From eea5bd29ecc6c3766df1129ca36791a759c7f574 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Mon, 10 Mar 2025 11:15:36 -0700 Subject: [PATCH 001/231] perf: Extend per event callchain limit to branch stack commit c53e14f1ea4a8f8ddd9b2cd850fcbc0d934b79f5 upstream. The commit 97c79a38cd45 ("perf core: Per event callchain limit") introduced a per-event term to allow finer tuning of the depth of callchains to save space. It should be applied to the branch stack as well. For example, autoFDO collections require maximum LBR entries. In the meantime, other system-wide LBR users may only be interested in the latest a few number of LBRs. A per-event LBR depth would save the perf output buffer. The patch simply drops the uninterested branches, but HW still collects the maximum branches. There may be a model-specific optimization that can reduce the HW depth for some cases to reduce the overhead further. But it isn't included in the patch set. Because it's not useful for all cases. For example, ARCH LBR can utilize the PEBS and XSAVE to collect LBRs. The depth should have less impact on the collecting overhead. The model-specific optimization may be implemented later separately. Intel-SIG: commit c53e14f1ea4a perf: Extend per event callchain limit to branch stack Backport CWF PMU support and dependency Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20250310181536.3645382-1-kan.liang@linux.intel.com Signed-off-by: Jason Zeng --- include/linux/perf_event.h | 3 +++ include/uapi/linux/perf_event.h | 2 ++ 2 files changed, 5 insertions(+) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index a069a42d5488..78c75e8e6e5d 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1329,6 +1329,9 @@ static inline void perf_sample_save_brstack(struct perf_sample_data *data, if (branch_sample_hw_index(event)) size += sizeof(u64); + + brs->nr = min_t(u16, event->attr.sample_max_stack, brs->nr); + size += brs->nr * sizeof(struct perf_branch_entry); /* diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 4842c36fdf80..008403fe0b0b 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -385,6 +385,8 @@ enum perf_event_read_format { * * @sample_max_stack: Max number of frame pointers in a callchain, * should be < /proc/sys/kernel/perf_event_max_stack + * Max number of entries of branch stack + * should be < hardware limit */ struct perf_event_attr { -- Gitee From 034b08a1156807d461a6159987497350ccd419ab Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Wed, 25 Oct 2023 13:16:24 -0700 Subject: [PATCH 002/231] tools headers UAPI: Sync include/uapi/linux/perf_event.h header with the kernel mainline inclusion from mainline-v6.8-rc1 commit 76db7aab1fca6688ddf9f388157521c442e0ffb8 category: feature bugzilla: https://gitee.com/openeuler/intel-kernel/issues/ICZHEB CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=76db7aab1fca6688ddf9f388157521c442e0ffb8 --------------------------- Sync the new sample type for the branch counters feature. Intel-SIG: commit 76db7aab1fca tools headers UAPI: Sync include/uapi/linux/perf_event.h header with the kernel Signed-off-by: Kan Liang Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Alexey Bayduraev Cc: Andi Kleen Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Tinghao Zhang Link: https://lore.kernel.org/r/20231025201626.3000228-6-kan.liang@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Jason Zeng --- tools/include/uapi/linux/perf_event.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h index 39c6a250dd1b..3a64499b0f5d 100644 --- a/tools/include/uapi/linux/perf_event.h +++ b/tools/include/uapi/linux/perf_event.h @@ -204,6 +204,8 @@ enum perf_branch_sample_type_shift { PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, /* save privilege mode */ + PERF_SAMPLE_BRANCH_COUNTERS_SHIFT = 19, /* save occurrences of events on a branch */ + PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */ }; @@ -235,6 +237,8 @@ enum perf_branch_sample_type { PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT, + PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT, + PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, }; @@ -982,6 +986,12 @@ enum perf_event_type { * { u64 nr; * { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX * { u64 from, to, flags } lbr[nr]; + * # + * # The format of the counters is decided by the + * # "branch_counter_nr" and "branch_counter_width", + * # which are defined in the ABI. + * # + * { u64 counters; } cntr[nr] && PERF_SAMPLE_BRANCH_COUNTERS * } && PERF_SAMPLE_BRANCH_STACK * * { u64 abi; # enum perf_sample_regs_abi @@ -1427,6 +1437,9 @@ struct perf_branch_entry { reserved:31; }; +/* Size of used info bits in struct perf_branch_entry */ +#define PERF_BRANCH_ENTRY_INFO_BITS_MAX 33 + union perf_sample_weight { __u64 full; #if defined(__LITTLE_ENDIAN_BITFIELD) -- Gitee From e955816692a4ac60b75e9d0b730bef8be12f8e09 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 6 Aug 2024 12:07:50 -0700 Subject: [PATCH 003/231] tools/include: Sync uapi/linux/perf.h with the kernel sources commit 8ec9497d3ef34fab216e277eca5035811f06b421 upstream. To pick up changes from: 608f6976c309 perf/x86/intel: Support new data source for Lunar Lake This should be used to beautify perf syscall arguments and it addresses these tools/perf build warnings: Warning: Kernel ABI header differences: diff -u tools/include/uapi/linux/perf_event.h include/uapi/linux/perf_event.h Please see tools/include/uapi/README for details (it's in the first patch of this series). Intel-SIG: commit 8ec9497d3ef3 tools/include: Sync uapi/linux/perf.h with the kernel sources Backport CWF PMU support and dependency Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo Cc: Mark Rutland Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Ian Rogers Cc: Adrian Hunter Cc: "Liang, Kan" Cc: linux-perf-users@vger.kernel.org Signed-off-by: Namhyung Kim Signed-off-by: Jason Zeng --- tools/include/uapi/linux/perf_event.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h index 3a64499b0f5d..4842c36fdf80 100644 --- a/tools/include/uapi/linux/perf_event.h +++ b/tools/include/uapi/linux/perf_event.h @@ -1349,12 +1349,14 @@ union perf_mem_data_src { #define PERF_MEM_LVLNUM_L2 0x02 /* L2 */ #define PERF_MEM_LVLNUM_L3 0x03 /* L3 */ #define PERF_MEM_LVLNUM_L4 0x04 /* L4 */ -/* 5-0x7 available */ +#define PERF_MEM_LVLNUM_L2_MHB 0x05 /* L2 Miss Handling Buffer */ +#define PERF_MEM_LVLNUM_MSC 0x06 /* Memory-side Cache */ +/* 0x7 available */ #define PERF_MEM_LVLNUM_UNC 0x08 /* Uncached */ #define PERF_MEM_LVLNUM_CXL 0x09 /* CXL */ #define PERF_MEM_LVLNUM_IO 0x0a /* I/O */ #define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */ -#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB */ +#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB / L1 Miss Handling Buffer */ #define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */ #define PERF_MEM_LVLNUM_PMEM 0x0e /* PMEM */ #define PERF_MEM_LVLNUM_NA 0x0f /* N/A */ -- Gitee From 7d07108c2725157ef831af7fce161a43b0d2ad24 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 9 Apr 2025 17:11:18 -0700 Subject: [PATCH 004/231] tools headers: Update the uapi/linux/perf_event.h copy with the kernel sources commit ae62977331fcbf5c9a4260c88d9f94450db2d99a upstream. To pick up the changes in: c53e14f1ea4a8f8d perf: Extend per event callchain limit to branch stack Addressing this perf tools build warning: Warning: Kernel ABI header differences: diff -u tools/include/uapi/linux/perf_event.h include/uapi/linux/perf_event.h Please see tools/include/uapi/README for further details. Intel-SIG: commit ae62977331fc tools headers: Update the uapi/linux/perf_event.h copy with the kernel sources Backport CWF PMU support and dependency Acked-by: Ingo Molnar Tested-by: Venkat Rao Bagalkote Link: https://lore.kernel.org/r/20250410001125.391820-4-namhyung@kernel.org Signed-off-by: Namhyung Kim Signed-off-by: Jason Zeng --- tools/include/uapi/linux/perf_event.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h index 4842c36fdf80..008403fe0b0b 100644 --- a/tools/include/uapi/linux/perf_event.h +++ b/tools/include/uapi/linux/perf_event.h @@ -385,6 +385,8 @@ enum perf_event_read_format { * * @sample_max_stack: Max number of frame pointers in a callchain, * should be < /proc/sys/kernel/perf_event_max_stack + * Max number of entries of branch stack + * should be < hardware limit */ struct perf_event_attr { -- Gitee From e0b6faf9b24f557bd987c7145ba2686fd089da74 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 22 May 2025 09:51:22 +0200 Subject: [PATCH 005/231] perf/uapi: Clean up a bit commit 44889ff67cee7b9ee2d305690ce7a5488b137a66 upstream. When applying a recent commit to the header I noticed that we have accumulated quite a bit of historic noise in this header, so do a bit of spring cleaning: - Define bitfields in a vertically aligned fashion, like perf_event_mmap_page::capabilities already does. This makes it easier to see the distribution and sizing of bits within a word, at a glance. The following is much more readable: __u64 cap_bit0 : 1, cap_bit0_is_deprecated : 1, cap_user_rdpmc : 1, cap_user_time : 1, cap_user_time_zero : 1, cap_user_time_short : 1, cap_____res : 58; Than: __u64 cap_bit0:1, cap_bit0_is_deprecated:1, cap_user_rdpmc:1, cap_user_time:1, cap_user_time_zero:1, cap_user_time_short:1, cap_____res:58; So convert all bitfield definitions from the latter style to the former style. - Fix typos and grammar - Fix capitalization - Remove whitespace noise - Harmonize the definitions of various generations and groups of PERF_MEM_ ABI values. - Vertically align all definitions and assignments to the same column (48), as the first definition (enum perf_type_id), throughout the entire header. - And in general make the code and comments to be more in sync with each other and to be more readable overall. No change in functionality. Copy the changes over to tools/include/uapi/linux/perf_event.h. Intel-SIG: commit 44889ff67cee perf/uapi: Clean up a bit Backport CWF PMU support and dependency Signed-off-by: Ingo Molnar Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Alexander Shishkin Cc: Mark Rutland Cc: Namhyung Kim Cc: Ian Rogers Link: https://lore.kernel.org/r/20250521221529.2547099-1-irogers@google.com Signed-off-by: Jason Zeng --- include/uapi/linux/perf_event.h | 652 +++++++++++++------------- tools/include/uapi/linux/perf_event.h | 652 +++++++++++++------------- 2 files changed, 662 insertions(+), 642 deletions(-) diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 008403fe0b0b..365184d931ad 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -39,18 +39,21 @@ enum perf_type_id { /* * attr.config layout for type PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE + * * PERF_TYPE_HARDWARE: 0xEEEEEEEE000000AA * AA: hardware event ID * EEEEEEEE: PMU type ID + * * PERF_TYPE_HW_CACHE: 0xEEEEEEEE00DDCCBB * BB: hardware cache ID * CC: hardware cache op ID * DD: hardware cache op result ID * EEEEEEEE: PMU type ID - * If the PMU type ID is 0, the PERF_TYPE_RAW will be applied. + * + * If the PMU type ID is 0, PERF_TYPE_RAW will be applied. */ -#define PERF_PMU_TYPE_SHIFT 32 -#define PERF_HW_EVENT_MASK 0xffffffff +#define PERF_PMU_TYPE_SHIFT 32 +#define PERF_HW_EVENT_MASK 0xffffffff /* * Generalized performance event event_id types, used by the @@ -112,7 +115,7 @@ enum perf_hw_cache_op_result_id { /* * Special "software" events provided by the kernel, even if the hardware * does not support performance events. These events measure various - * physical and sw events of the kernel (and allow the profiling of them as + * physical and SW events of the kernel (and allow the profiling of them as * well): */ enum perf_sw_ids { @@ -167,8 +170,9 @@ enum perf_event_sample_format { }; #define PERF_SAMPLE_WEIGHT_TYPE (PERF_SAMPLE_WEIGHT | PERF_SAMPLE_WEIGHT_STRUCT) + /* - * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set + * Values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set. * * If the user does not pass priv level information via branch_sample_type, * the kernel uses the event's priv level. Branch and event priv levels do @@ -178,20 +182,20 @@ enum perf_event_sample_format { * of branches and therefore it supersedes all the other types. */ enum perf_branch_sample_type_shift { - PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */ - PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */ - PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */ - - PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */ - PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */ - PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */ - PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */ - PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */ - PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */ - PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */ + PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */ + PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */ + PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */ + + PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */ + PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */ + PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */ + PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */ + PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */ + PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */ + PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */ PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */ - PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */ + PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* CALL/RET stack */ PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */ PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */ @@ -210,96 +214,95 @@ enum perf_branch_sample_type_shift { }; enum perf_branch_sample_type { - PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT, - PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT, - PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT, + PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT, + PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT, + PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT, - PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT, - PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT, - PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT, - PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT, - PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT, - PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT, - PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT, - PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT, + PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT, + PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT, + PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT, + PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT, + PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT, + PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT, + PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT, + PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT, - PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT, - PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT, - PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT, + PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT, + PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT, + PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT, - PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT, - PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT, + PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT, + PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT, - PERF_SAMPLE_BRANCH_TYPE_SAVE = - 1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT, + PERF_SAMPLE_BRANCH_TYPE_SAVE = 1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT, - PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT, + PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT, - PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT, + PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT, - PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT, + PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT, - PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, + PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, }; /* - * Common flow change classification + * Common control flow change classifications: */ enum { - PERF_BR_UNKNOWN = 0, /* unknown */ - PERF_BR_COND = 1, /* conditional */ - PERF_BR_UNCOND = 2, /* unconditional */ - PERF_BR_IND = 3, /* indirect */ - PERF_BR_CALL = 4, /* function call */ - PERF_BR_IND_CALL = 5, /* indirect function call */ - PERF_BR_RET = 6, /* function return */ - PERF_BR_SYSCALL = 7, /* syscall */ - PERF_BR_SYSRET = 8, /* syscall return */ - PERF_BR_COND_CALL = 9, /* conditional function call */ - PERF_BR_COND_RET = 10, /* conditional function return */ - PERF_BR_ERET = 11, /* exception return */ - PERF_BR_IRQ = 12, /* irq */ - PERF_BR_SERROR = 13, /* system error */ - PERF_BR_NO_TX = 14, /* not in transaction */ - PERF_BR_EXTEND_ABI = 15, /* extend ABI */ + PERF_BR_UNKNOWN = 0, /* Unknown */ + PERF_BR_COND = 1, /* Conditional */ + PERF_BR_UNCOND = 2, /* Unconditional */ + PERF_BR_IND = 3, /* Indirect */ + PERF_BR_CALL = 4, /* Function call */ + PERF_BR_IND_CALL = 5, /* Indirect function call */ + PERF_BR_RET = 6, /* Function return */ + PERF_BR_SYSCALL = 7, /* Syscall */ + PERF_BR_SYSRET = 8, /* Syscall return */ + PERF_BR_COND_CALL = 9, /* Conditional function call */ + PERF_BR_COND_RET = 10, /* Conditional function return */ + PERF_BR_ERET = 11, /* Exception return */ + PERF_BR_IRQ = 12, /* IRQ */ + PERF_BR_SERROR = 13, /* System error */ + PERF_BR_NO_TX = 14, /* Not in transaction */ + PERF_BR_EXTEND_ABI = 15, /* Extend ABI */ PERF_BR_MAX, }; /* - * Common branch speculation outcome classification + * Common branch speculation outcome classifications: */ enum { - PERF_BR_SPEC_NA = 0, /* Not available */ - PERF_BR_SPEC_WRONG_PATH = 1, /* Speculative but on wrong path */ - PERF_BR_NON_SPEC_CORRECT_PATH = 2, /* Non-speculative but on correct path */ - PERF_BR_SPEC_CORRECT_PATH = 3, /* Speculative and on correct path */ + PERF_BR_SPEC_NA = 0, /* Not available */ + PERF_BR_SPEC_WRONG_PATH = 1, /* Speculative but on wrong path */ + PERF_BR_NON_SPEC_CORRECT_PATH = 2, /* Non-speculative but on correct path */ + PERF_BR_SPEC_CORRECT_PATH = 3, /* Speculative and on correct path */ PERF_BR_SPEC_MAX, }; enum { - PERF_BR_NEW_FAULT_ALGN = 0, /* Alignment fault */ - PERF_BR_NEW_FAULT_DATA = 1, /* Data fault */ - PERF_BR_NEW_FAULT_INST = 2, /* Inst fault */ - PERF_BR_NEW_ARCH_1 = 3, /* Architecture specific */ - PERF_BR_NEW_ARCH_2 = 4, /* Architecture specific */ - PERF_BR_NEW_ARCH_3 = 5, /* Architecture specific */ - PERF_BR_NEW_ARCH_4 = 6, /* Architecture specific */ - PERF_BR_NEW_ARCH_5 = 7, /* Architecture specific */ + PERF_BR_NEW_FAULT_ALGN = 0, /* Alignment fault */ + PERF_BR_NEW_FAULT_DATA = 1, /* Data fault */ + PERF_BR_NEW_FAULT_INST = 2, /* Inst fault */ + PERF_BR_NEW_ARCH_1 = 3, /* Architecture specific */ + PERF_BR_NEW_ARCH_2 = 4, /* Architecture specific */ + PERF_BR_NEW_ARCH_3 = 5, /* Architecture specific */ + PERF_BR_NEW_ARCH_4 = 6, /* Architecture specific */ + PERF_BR_NEW_ARCH_5 = 7, /* Architecture specific */ PERF_BR_NEW_MAX, }; enum { - PERF_BR_PRIV_UNKNOWN = 0, - PERF_BR_PRIV_USER = 1, - PERF_BR_PRIV_KERNEL = 2, - PERF_BR_PRIV_HV = 3, + PERF_BR_PRIV_UNKNOWN = 0, + PERF_BR_PRIV_USER = 1, + PERF_BR_PRIV_KERNEL = 2, + PERF_BR_PRIV_HV = 3, }; -#define PERF_BR_ARM64_FIQ PERF_BR_NEW_ARCH_1 -#define PERF_BR_ARM64_DEBUG_HALT PERF_BR_NEW_ARCH_2 -#define PERF_BR_ARM64_DEBUG_EXIT PERF_BR_NEW_ARCH_3 -#define PERF_BR_ARM64_DEBUG_INST PERF_BR_NEW_ARCH_4 -#define PERF_BR_ARM64_DEBUG_DATA PERF_BR_NEW_ARCH_5 +#define PERF_BR_ARM64_FIQ PERF_BR_NEW_ARCH_1 +#define PERF_BR_ARM64_DEBUG_HALT PERF_BR_NEW_ARCH_2 +#define PERF_BR_ARM64_DEBUG_EXIT PERF_BR_NEW_ARCH_3 +#define PERF_BR_ARM64_DEBUG_INST PERF_BR_NEW_ARCH_4 +#define PERF_BR_ARM64_DEBUG_DATA PERF_BR_NEW_ARCH_5 #define PERF_SAMPLE_BRANCH_PLM_ALL \ (PERF_SAMPLE_BRANCH_USER|\ @@ -310,9 +313,9 @@ enum { * Values to determine ABI of the registers dump. */ enum perf_sample_regs_abi { - PERF_SAMPLE_REGS_ABI_NONE = 0, - PERF_SAMPLE_REGS_ABI_32 = 1, - PERF_SAMPLE_REGS_ABI_64 = 2, + PERF_SAMPLE_REGS_ABI_NONE = 0, + PERF_SAMPLE_REGS_ABI_32 = 1, + PERF_SAMPLE_REGS_ABI_64 = 2, }; /* @@ -320,21 +323,21 @@ enum perf_sample_regs_abi { * abort events. Multiple bits can be set. */ enum { - PERF_TXN_ELISION = (1 << 0), /* From elision */ - PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */ - PERF_TXN_SYNC = (1 << 2), /* Instruction is related */ - PERF_TXN_ASYNC = (1 << 3), /* Instruction not related */ - PERF_TXN_RETRY = (1 << 4), /* Retry possible */ - PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */ - PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */ - PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */ + PERF_TXN_ELISION = (1 << 0), /* From elision */ + PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */ + PERF_TXN_SYNC = (1 << 2), /* Instruction is related */ + PERF_TXN_ASYNC = (1 << 3), /* Instruction is not related */ + PERF_TXN_RETRY = (1 << 4), /* Retry possible */ + PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */ + PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */ + PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */ - PERF_TXN_MAX = (1 << 8), /* non-ABI */ + PERF_TXN_MAX = (1 << 8), /* non-ABI */ - /* bits 32..63 are reserved for the abort code */ + /* Bits 32..63 are reserved for the abort code */ - PERF_TXN_ABORT_MASK = (0xffffffffULL << 32), - PERF_TXN_ABORT_SHIFT = 32, + PERF_TXN_ABORT_MASK = (0xffffffffULL << 32), + PERF_TXN_ABORT_SHIFT = 32, }; /* @@ -369,24 +372,22 @@ enum perf_event_read_format { PERF_FORMAT_MAX = 1U << 5, /* non-ABI */ }; -#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ -#define PERF_ATTR_SIZE_VER1 72 /* add: config2 */ -#define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */ -#define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */ - /* add: sample_stack_user */ -#define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */ -#define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */ -#define PERF_ATTR_SIZE_VER6 120 /* add: aux_sample_size */ -#define PERF_ATTR_SIZE_VER7 128 /* add: sig_data */ -#define PERF_ATTR_SIZE_VER8 136 /* add: config3 */ +#define PERF_ATTR_SIZE_VER0 64 /* Size of first published 'struct perf_event_attr' */ +#define PERF_ATTR_SIZE_VER1 72 /* Add: config2 */ +#define PERF_ATTR_SIZE_VER2 80 /* Add: branch_sample_type */ +#define PERF_ATTR_SIZE_VER3 96 /* Add: sample_regs_user */ + /* Add: sample_stack_user */ +#define PERF_ATTR_SIZE_VER4 104 /* Add: sample_regs_intr */ +#define PERF_ATTR_SIZE_VER5 112 /* Add: aux_watermark */ +#define PERF_ATTR_SIZE_VER6 120 /* Add: aux_sample_size */ +#define PERF_ATTR_SIZE_VER7 128 /* Add: sig_data */ +#define PERF_ATTR_SIZE_VER8 136 /* Add: config3 */ /* - * Hardware event_id to monitor via a performance monitoring event: - * - * @sample_max_stack: Max number of frame pointers in a callchain, - * should be < /proc/sys/kernel/perf_event_max_stack - * Max number of entries of branch stack - * should be < hardware limit + * 'struct perf_event_attr' contains various attributes that define + * a performance event - most of them hardware related configuration + * details, but also a lot of behavioral switches and values implemented + * by the kernel. */ struct perf_event_attr { @@ -396,7 +397,7 @@ struct perf_event_attr { __u32 type; /* - * Size of the attr structure, for fwd/bwd compat. + * Size of the attr structure, for forward/backwards compatibility. */ __u32 size; @@ -451,21 +452,21 @@ struct perf_event_attr { comm_exec : 1, /* flag comm events that are due to an exec */ use_clockid : 1, /* use @clockid for time fields */ context_switch : 1, /* context switch data */ - write_backward : 1, /* Write ring buffer from end to beginning */ + write_backward : 1, /* write ring buffer from end to beginning */ namespaces : 1, /* include namespaces data */ ksymbol : 1, /* include ksymbol events */ - bpf_event : 1, /* include bpf events */ + bpf_event : 1, /* include BPF events */ aux_output : 1, /* generate AUX records instead of events */ cgroup : 1, /* include cgroup events */ text_poke : 1, /* include text poke events */ - build_id : 1, /* use build id in mmap2 events */ + build_id : 1, /* use build ID in mmap2 events */ inherit_thread : 1, /* children only inherit if cloned with CLONE_THREAD */ remove_on_exec : 1, /* event is removed from task on exec */ sigtrap : 1, /* send synchronous SIGTRAP on event */ __reserved_1 : 26; union { - __u32 wakeup_events; /* wakeup every n events */ + __u32 wakeup_events; /* wake up every n events */ __u32 wakeup_watermark; /* bytes before wakeup */ }; @@ -474,13 +475,13 @@ struct perf_event_attr { __u64 bp_addr; __u64 kprobe_func; /* for perf_kprobe */ __u64 uprobe_path; /* for perf_uprobe */ - __u64 config1; /* extension of config */ + __u64 config1; /* extension of config */ }; union { __u64 bp_len; - __u64 kprobe_addr; /* when kprobe_func == NULL */ + __u64 kprobe_addr; /* when kprobe_func == NULL */ __u64 probe_offset; /* for perf_[k,u]probe */ - __u64 config2; /* extension of config1 */ + __u64 config2; /* extension of config1 */ }; __u64 branch_sample_type; /* enum perf_branch_sample_type */ @@ -510,7 +511,16 @@ struct perf_event_attr { * Wakeup watermark for AUX area */ __u32 aux_watermark; + + /* + * Max number of frame pointers in a callchain, should be + * lower than /proc/sys/kernel/perf_event_max_stack. + * + * Max number of entries of branch stack should be lower + * than the hardware limit. + */ __u16 sample_max_stack; + __u16 __reserved_2; __u32 aux_sample_size; __u32 __reserved_3; @@ -528,7 +538,7 @@ struct perf_event_attr { /* * Structure used by below PERF_EVENT_IOC_QUERY_BPF command - * to query bpf programs attached to the same perf tracepoint + * to query BPF programs attached to the same perf tracepoint * as the given perf event. */ struct perf_event_query_bpf { @@ -550,21 +560,21 @@ struct perf_event_query_bpf { /* * Ioctls that can be done on a perf event fd: */ -#define PERF_EVENT_IOC_ENABLE _IO ('$', 0) -#define PERF_EVENT_IOC_DISABLE _IO ('$', 1) -#define PERF_EVENT_IOC_REFRESH _IO ('$', 2) -#define PERF_EVENT_IOC_RESET _IO ('$', 3) -#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64) -#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) -#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *) -#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *) -#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32) -#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32) +#define PERF_EVENT_IOC_ENABLE _IO ('$', 0) +#define PERF_EVENT_IOC_DISABLE _IO ('$', 1) +#define PERF_EVENT_IOC_REFRESH _IO ('$', 2) +#define PERF_EVENT_IOC_RESET _IO ('$', 3) +#define PERF_EVENT_IOC_PERIOD _IOW ('$', 4, __u64) +#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) +#define PERF_EVENT_IOC_SET_FILTER _IOW ('$', 6, char *) +#define PERF_EVENT_IOC_ID _IOR ('$', 7, __u64 *) +#define PERF_EVENT_IOC_SET_BPF _IOW ('$', 8, __u32) +#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW ('$', 9, __u32) #define PERF_EVENT_IOC_QUERY_BPF _IOWR('$', 10, struct perf_event_query_bpf *) -#define PERF_EVENT_IOC_MODIFY_ATTRIBUTES _IOW('$', 11, struct perf_event_attr *) +#define PERF_EVENT_IOC_MODIFY_ATTRIBUTES _IOW ('$', 11, struct perf_event_attr *) enum perf_event_ioc_flags { - PERF_IOC_FLAG_GROUP = 1U << 0, + PERF_IOC_FLAG_GROUP = 1U << 0, }; /* @@ -575,7 +585,7 @@ struct perf_event_mmap_page { __u32 compat_version; /* lowest version this is compat with */ /* - * Bits needed to read the hw events in user-space. + * Bits needed to read the HW events in user-space. * * u32 seq, time_mult, time_shift, index, width; * u64 count, enabled, running; @@ -613,7 +623,7 @@ struct perf_event_mmap_page { __u32 index; /* hardware event identifier */ __s64 offset; /* add to hardware event value */ __u64 time_enabled; /* time event active */ - __u64 time_running; /* time event on cpu */ + __u64 time_running; /* time event on CPU */ union { __u64 capabilities; struct { @@ -641,7 +651,7 @@ struct perf_event_mmap_page { /* * If cap_usr_time the below fields can be used to compute the time - * delta since time_enabled (in ns) using rdtsc or similar. + * delta since time_enabled (in ns) using RDTSC or similar. * * u64 quot, rem; * u64 delta; @@ -714,7 +724,7 @@ struct perf_event_mmap_page { * after reading this value. * * When the mapping is PROT_WRITE the @data_tail value should be - * written by userspace to reflect the last read data, after issueing + * written by user-space to reflect the last read data, after issuing * an smp_mb() to separate the data read from the ->data_tail store. * In this case the kernel will not over-write unread data. * @@ -730,7 +740,7 @@ struct perf_event_mmap_page { /* * AUX area is defined by aux_{offset,size} fields that should be set - * by the userspace, so that + * by the user-space, so that * * aux_offset >= data_offset + data_size * @@ -804,7 +814,7 @@ struct perf_event_mmap_page { * Indicates that thread was preempted in TASK_RUNNING state. * * PERF_RECORD_MISC_MMAP_BUILD_ID: - * Indicates that mmap2 event carries build id data. + * Indicates that mmap2 event carries build ID data. */ #define PERF_RECORD_MISC_EXACT_IP (1 << 14) #define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14) @@ -815,26 +825,26 @@ struct perf_event_mmap_page { #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15) struct perf_event_header { - __u32 type; - __u16 misc; - __u16 size; + __u32 type; + __u16 misc; + __u16 size; }; struct perf_ns_link_info { - __u64 dev; - __u64 ino; + __u64 dev; + __u64 ino; }; enum { - NET_NS_INDEX = 0, - UTS_NS_INDEX = 1, - IPC_NS_INDEX = 2, - PID_NS_INDEX = 3, - USER_NS_INDEX = 4, - MNT_NS_INDEX = 5, - CGROUP_NS_INDEX = 6, - - NR_NAMESPACES, /* number of available namespaces */ + NET_NS_INDEX = 0, + UTS_NS_INDEX = 1, + IPC_NS_INDEX = 2, + PID_NS_INDEX = 3, + USER_NS_INDEX = 4, + MNT_NS_INDEX = 5, + CGROUP_NS_INDEX = 6, + + NR_NAMESPACES, /* number of available namespaces */ }; enum perf_event_type { @@ -850,11 +860,11 @@ enum perf_event_type { * optional fields being ignored. * * struct sample_id { - * { u32 pid, tid; } && PERF_SAMPLE_TID - * { u64 time; } && PERF_SAMPLE_TIME - * { u64 id; } && PERF_SAMPLE_ID - * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID - * { u32 cpu, res; } && PERF_SAMPLE_CPU + * { u32 pid, tid; } && PERF_SAMPLE_TID + * { u64 time; } && PERF_SAMPLE_TIME + * { u64 id; } && PERF_SAMPLE_ID + * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID + * { u32 cpu, res; } && PERF_SAMPLE_CPU * { u64 id; } && PERF_SAMPLE_IDENTIFIER * } && perf_event_attr::sample_id_all * @@ -865,7 +875,7 @@ enum perf_event_type { /* * The MMAP events record the PROT_EXEC mappings so that we can - * correlate userspace IPs to code. They have the following structure: + * correlate user-space IPs to code. They have the following structure: * * struct { * struct perf_event_header header; @@ -875,7 +885,7 @@ enum perf_event_type { * u64 len; * u64 pgoff; * char filename[]; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_MMAP = 1, @@ -885,7 +895,7 @@ enum perf_event_type { * struct perf_event_header header; * u64 id; * u64 lost; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_LOST = 2, @@ -896,7 +906,7 @@ enum perf_event_type { * * u32 pid, tid; * char comm[]; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_COMM = 3, @@ -907,7 +917,7 @@ enum perf_event_type { * u32 pid, ppid; * u32 tid, ptid; * u64 time; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_EXIT = 4, @@ -918,7 +928,7 @@ enum perf_event_type { * u64 time; * u64 id; * u64 stream_id; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_THROTTLE = 5, @@ -930,7 +940,7 @@ enum perf_event_type { * u32 pid, ppid; * u32 tid, ptid; * u64 time; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_FORK = 7, @@ -941,7 +951,7 @@ enum perf_event_type { * u32 pid, tid; * * struct read_format values; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_READ = 8, @@ -996,12 +1006,12 @@ enum perf_event_type { * { u64 counters; } cntr[nr] && PERF_SAMPLE_BRANCH_COUNTERS * } && PERF_SAMPLE_BRANCH_STACK * - * { u64 abi; # enum perf_sample_regs_abi - * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER + * { u64 abi; # enum perf_sample_regs_abi + * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER * - * { u64 size; - * char data[size]; - * u64 dyn_size; } && PERF_SAMPLE_STACK_USER + * { u64 size; + * char data[size]; + * u64 dyn_size; } && PERF_SAMPLE_STACK_USER * * { union perf_sample_weight * { @@ -1061,7 +1071,7 @@ enum perf_event_type { * }; * u32 prot, flags; * char filename[]; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_MMAP2 = 10, @@ -1070,12 +1080,12 @@ enum perf_event_type { * Records that new data landed in the AUX buffer part. * * struct { - * struct perf_event_header header; + * struct perf_event_header header; * - * u64 aux_offset; - * u64 aux_size; + * u64 aux_offset; + * u64 aux_size; * u64 flags; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_AUX = 11, @@ -1158,7 +1168,7 @@ enum perf_event_type { PERF_RECORD_KSYMBOL = 17, /* - * Record bpf events: + * Record BPF events: * enum perf_bpf_event_type { * PERF_BPF_EVENT_UNKNOWN = 0, * PERF_BPF_EVENT_PROG_LOAD = 1, @@ -1236,181 +1246,181 @@ enum perf_record_ksymbol_type { #define PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER (1 << 0) enum perf_bpf_event_type { - PERF_BPF_EVENT_UNKNOWN = 0, - PERF_BPF_EVENT_PROG_LOAD = 1, - PERF_BPF_EVENT_PROG_UNLOAD = 2, - PERF_BPF_EVENT_MAX, /* non-ABI */ + PERF_BPF_EVENT_UNKNOWN = 0, + PERF_BPF_EVENT_PROG_LOAD = 1, + PERF_BPF_EVENT_PROG_UNLOAD = 2, + PERF_BPF_EVENT_MAX, /* non-ABI */ }; -#define PERF_MAX_STACK_DEPTH 127 -#define PERF_MAX_CONTEXTS_PER_STACK 8 +#define PERF_MAX_STACK_DEPTH 127 +#define PERF_MAX_CONTEXTS_PER_STACK 8 enum perf_callchain_context { - PERF_CONTEXT_HV = (__u64)-32, - PERF_CONTEXT_KERNEL = (__u64)-128, - PERF_CONTEXT_USER = (__u64)-512, + PERF_CONTEXT_HV = (__u64)-32, + PERF_CONTEXT_KERNEL = (__u64)-128, + PERF_CONTEXT_USER = (__u64)-512, - PERF_CONTEXT_GUEST = (__u64)-2048, - PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, - PERF_CONTEXT_GUEST_USER = (__u64)-2560, + PERF_CONTEXT_GUEST = (__u64)-2048, + PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, + PERF_CONTEXT_GUEST_USER = (__u64)-2560, - PERF_CONTEXT_MAX = (__u64)-4095, + PERF_CONTEXT_MAX = (__u64)-4095, }; /** * PERF_RECORD_AUX::flags bits */ -#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */ -#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */ -#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */ -#define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */ +#define PERF_AUX_FLAG_TRUNCATED 0x0001 /* Record was truncated to fit */ +#define PERF_AUX_FLAG_OVERWRITE 0x0002 /* Snapshot from overwrite mode */ +#define PERF_AUX_FLAG_PARTIAL 0x0004 /* Record contains gaps */ +#define PERF_AUX_FLAG_COLLISION 0x0008 /* Sample collided with another */ #define PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK 0xff00 /* PMU specific trace format type */ /* CoreSight PMU AUX buffer formats */ -#define PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT 0x0000 /* Default for backward compatibility */ -#define PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW 0x0100 /* Raw format of the source */ +#define PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT 0x0000 /* Default for backward compatibility */ +#define PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW 0x0100 /* Raw format of the source */ -#define PERF_FLAG_FD_NO_GROUP (1UL << 0) -#define PERF_FLAG_FD_OUTPUT (1UL << 1) -#define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */ -#define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */ +#define PERF_FLAG_FD_NO_GROUP (1UL << 0) +#define PERF_FLAG_FD_OUTPUT (1UL << 1) +#define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup ID, per-CPU mode only */ +#define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */ #if defined(__LITTLE_ENDIAN_BITFIELD) union perf_mem_data_src { __u64 val; struct { - __u64 mem_op:5, /* type of opcode */ - mem_lvl:14, /* memory hierarchy level */ - mem_snoop:5, /* snoop mode */ - mem_lock:2, /* lock instr */ - mem_dtlb:7, /* tlb access */ - mem_lvl_num:4, /* memory hierarchy level number */ - mem_remote:1, /* remote */ - mem_snoopx:2, /* snoop mode, ext */ - mem_blk:3, /* access blocked */ - mem_hops:3, /* hop level */ - mem_rsvd:18; + __u64 mem_op : 5, /* Type of opcode */ + mem_lvl : 14, /* Memory hierarchy level */ + mem_snoop : 5, /* Snoop mode */ + mem_lock : 2, /* Lock instr */ + mem_dtlb : 7, /* TLB access */ + mem_lvl_num : 4, /* Memory hierarchy level number */ + mem_remote : 1, /* Remote */ + mem_snoopx : 2, /* Snoop mode, ext */ + mem_blk : 3, /* Access blocked */ + mem_hops : 3, /* Hop level */ + mem_rsvd : 18; }; }; #elif defined(__BIG_ENDIAN_BITFIELD) union perf_mem_data_src { __u64 val; struct { - __u64 mem_rsvd:18, - mem_hops:3, /* hop level */ - mem_blk:3, /* access blocked */ - mem_snoopx:2, /* snoop mode, ext */ - mem_remote:1, /* remote */ - mem_lvl_num:4, /* memory hierarchy level number */ - mem_dtlb:7, /* tlb access */ - mem_lock:2, /* lock instr */ - mem_snoop:5, /* snoop mode */ - mem_lvl:14, /* memory hierarchy level */ - mem_op:5; /* type of opcode */ + __u64 mem_rsvd : 18, + mem_hops : 3, /* Hop level */ + mem_blk : 3, /* Access blocked */ + mem_snoopx : 2, /* Snoop mode, ext */ + mem_remote : 1, /* Remote */ + mem_lvl_num : 4, /* Memory hierarchy level number */ + mem_dtlb : 7, /* TLB access */ + mem_lock : 2, /* Lock instr */ + mem_snoop : 5, /* Snoop mode */ + mem_lvl : 14, /* Memory hierarchy level */ + mem_op : 5; /* Type of opcode */ }; }; #else -#error "Unknown endianness" +# error "Unknown endianness" #endif -/* type of opcode (load/store/prefetch,code) */ -#define PERF_MEM_OP_NA 0x01 /* not available */ -#define PERF_MEM_OP_LOAD 0x02 /* load instruction */ -#define PERF_MEM_OP_STORE 0x04 /* store instruction */ -#define PERF_MEM_OP_PFETCH 0x08 /* prefetch */ -#define PERF_MEM_OP_EXEC 0x10 /* code (execution) */ -#define PERF_MEM_OP_SHIFT 0 +/* Type of memory opcode: */ +#define PERF_MEM_OP_NA 0x0001 /* Not available */ +#define PERF_MEM_OP_LOAD 0x0002 /* Load instruction */ +#define PERF_MEM_OP_STORE 0x0004 /* Store instruction */ +#define PERF_MEM_OP_PFETCH 0x0008 /* Prefetch */ +#define PERF_MEM_OP_EXEC 0x0010 /* Code (execution) */ +#define PERF_MEM_OP_SHIFT 0 /* - * PERF_MEM_LVL_* namespace being depricated to some extent in the + * The PERF_MEM_LVL_* namespace is being deprecated to some extent in * favour of newer composite PERF_MEM_{LVLNUM_,REMOTE_,SNOOPX_} fields. - * Supporting this namespace inorder to not break defined ABIs. + * We support this namespace in order to not break defined ABIs. * - * memory hierarchy (memory level, hit or miss) + * Memory hierarchy (memory level, hit or miss) */ -#define PERF_MEM_LVL_NA 0x01 /* not available */ -#define PERF_MEM_LVL_HIT 0x02 /* hit level */ -#define PERF_MEM_LVL_MISS 0x04 /* miss level */ -#define PERF_MEM_LVL_L1 0x08 /* L1 */ -#define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */ -#define PERF_MEM_LVL_L2 0x20 /* L2 */ -#define PERF_MEM_LVL_L3 0x40 /* L3 */ -#define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */ -#define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */ -#define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */ -#define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */ -#define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */ -#define PERF_MEM_LVL_IO 0x1000 /* I/O memory */ -#define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */ -#define PERF_MEM_LVL_SHIFT 5 - -#define PERF_MEM_REMOTE_REMOTE 0x01 /* Remote */ -#define PERF_MEM_REMOTE_SHIFT 37 - -#define PERF_MEM_LVLNUM_L1 0x01 /* L1 */ -#define PERF_MEM_LVLNUM_L2 0x02 /* L2 */ -#define PERF_MEM_LVLNUM_L3 0x03 /* L3 */ -#define PERF_MEM_LVLNUM_L4 0x04 /* L4 */ -#define PERF_MEM_LVLNUM_L2_MHB 0x05 /* L2 Miss Handling Buffer */ -#define PERF_MEM_LVLNUM_MSC 0x06 /* Memory-side Cache */ -/* 0x7 available */ -#define PERF_MEM_LVLNUM_UNC 0x08 /* Uncached */ -#define PERF_MEM_LVLNUM_CXL 0x09 /* CXL */ -#define PERF_MEM_LVLNUM_IO 0x0a /* I/O */ -#define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */ -#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB / L1 Miss Handling Buffer */ -#define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */ -#define PERF_MEM_LVLNUM_PMEM 0x0e /* PMEM */ -#define PERF_MEM_LVLNUM_NA 0x0f /* N/A */ - -#define PERF_MEM_LVLNUM_SHIFT 33 - -/* snoop mode */ -#define PERF_MEM_SNOOP_NA 0x01 /* not available */ -#define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */ -#define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */ -#define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */ -#define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */ -#define PERF_MEM_SNOOP_SHIFT 19 - -#define PERF_MEM_SNOOPX_FWD 0x01 /* forward */ -#define PERF_MEM_SNOOPX_PEER 0x02 /* xfer from peer */ -#define PERF_MEM_SNOOPX_SHIFT 38 - -/* locked instruction */ -#define PERF_MEM_LOCK_NA 0x01 /* not available */ -#define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */ -#define PERF_MEM_LOCK_SHIFT 24 +#define PERF_MEM_LVL_NA 0x0001 /* Not available */ +#define PERF_MEM_LVL_HIT 0x0002 /* Hit level */ +#define PERF_MEM_LVL_MISS 0x0004 /* Miss level */ +#define PERF_MEM_LVL_L1 0x0008 /* L1 */ +#define PERF_MEM_LVL_LFB 0x0010 /* Line Fill Buffer */ +#define PERF_MEM_LVL_L2 0x0020 /* L2 */ +#define PERF_MEM_LVL_L3 0x0040 /* L3 */ +#define PERF_MEM_LVL_LOC_RAM 0x0080 /* Local DRAM */ +#define PERF_MEM_LVL_REM_RAM1 0x0100 /* Remote DRAM (1 hop) */ +#define PERF_MEM_LVL_REM_RAM2 0x0200 /* Remote DRAM (2 hops) */ +#define PERF_MEM_LVL_REM_CCE1 0x0400 /* Remote Cache (1 hop) */ +#define PERF_MEM_LVL_REM_CCE2 0x0800 /* Remote Cache (2 hops) */ +#define PERF_MEM_LVL_IO 0x1000 /* I/O memory */ +#define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */ +#define PERF_MEM_LVL_SHIFT 5 + +#define PERF_MEM_REMOTE_REMOTE 0x0001 /* Remote */ +#define PERF_MEM_REMOTE_SHIFT 37 + +#define PERF_MEM_LVLNUM_L1 0x0001 /* L1 */ +#define PERF_MEM_LVLNUM_L2 0x0002 /* L2 */ +#define PERF_MEM_LVLNUM_L3 0x0003 /* L3 */ +#define PERF_MEM_LVLNUM_L4 0x0004 /* L4 */ +#define PERF_MEM_LVLNUM_L2_MHB 0x0005 /* L2 Miss Handling Buffer */ +#define PERF_MEM_LVLNUM_MSC 0x0006 /* Memory-side Cache */ +/* 0x007 available */ +#define PERF_MEM_LVLNUM_UNC 0x0008 /* Uncached */ +#define PERF_MEM_LVLNUM_CXL 0x0009 /* CXL */ +#define PERF_MEM_LVLNUM_IO 0x000a /* I/O */ +#define PERF_MEM_LVLNUM_ANY_CACHE 0x000b /* Any cache */ +#define PERF_MEM_LVLNUM_LFB 0x000c /* LFB / L1 Miss Handling Buffer */ +#define PERF_MEM_LVLNUM_RAM 0x000d /* RAM */ +#define PERF_MEM_LVLNUM_PMEM 0x000e /* PMEM */ +#define PERF_MEM_LVLNUM_NA 0x000f /* N/A */ + +#define PERF_MEM_LVLNUM_SHIFT 33 + +/* Snoop mode */ +#define PERF_MEM_SNOOP_NA 0x0001 /* Not available */ +#define PERF_MEM_SNOOP_NONE 0x0002 /* No snoop */ +#define PERF_MEM_SNOOP_HIT 0x0004 /* Snoop hit */ +#define PERF_MEM_SNOOP_MISS 0x0008 /* Snoop miss */ +#define PERF_MEM_SNOOP_HITM 0x0010 /* Snoop hit modified */ +#define PERF_MEM_SNOOP_SHIFT 19 + +#define PERF_MEM_SNOOPX_FWD 0x0001 /* Forward */ +#define PERF_MEM_SNOOPX_PEER 0x0002 /* Transfer from peer */ +#define PERF_MEM_SNOOPX_SHIFT 38 + +/* Locked instruction */ +#define PERF_MEM_LOCK_NA 0x0001 /* Not available */ +#define PERF_MEM_LOCK_LOCKED 0x0002 /* Locked transaction */ +#define PERF_MEM_LOCK_SHIFT 24 /* TLB access */ -#define PERF_MEM_TLB_NA 0x01 /* not available */ -#define PERF_MEM_TLB_HIT 0x02 /* hit level */ -#define PERF_MEM_TLB_MISS 0x04 /* miss level */ -#define PERF_MEM_TLB_L1 0x08 /* L1 */ -#define PERF_MEM_TLB_L2 0x10 /* L2 */ -#define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/ -#define PERF_MEM_TLB_OS 0x40 /* OS fault handler */ -#define PERF_MEM_TLB_SHIFT 26 +#define PERF_MEM_TLB_NA 0x0001 /* Not available */ +#define PERF_MEM_TLB_HIT 0x0002 /* Hit level */ +#define PERF_MEM_TLB_MISS 0x0004 /* Miss level */ +#define PERF_MEM_TLB_L1 0x0008 /* L1 */ +#define PERF_MEM_TLB_L2 0x0010 /* L2 */ +#define PERF_MEM_TLB_WK 0x0020 /* Hardware Walker*/ +#define PERF_MEM_TLB_OS 0x0040 /* OS fault handler */ +#define PERF_MEM_TLB_SHIFT 26 /* Access blocked */ -#define PERF_MEM_BLK_NA 0x01 /* not available */ -#define PERF_MEM_BLK_DATA 0x02 /* data could not be forwarded */ -#define PERF_MEM_BLK_ADDR 0x04 /* address conflict */ -#define PERF_MEM_BLK_SHIFT 40 - -/* hop level */ -#define PERF_MEM_HOPS_0 0x01 /* remote core, same node */ -#define PERF_MEM_HOPS_1 0x02 /* remote node, same socket */ -#define PERF_MEM_HOPS_2 0x03 /* remote socket, same board */ -#define PERF_MEM_HOPS_3 0x04 /* remote board */ +#define PERF_MEM_BLK_NA 0x0001 /* Not available */ +#define PERF_MEM_BLK_DATA 0x0002 /* Data could not be forwarded */ +#define PERF_MEM_BLK_ADDR 0x0004 /* Address conflict */ +#define PERF_MEM_BLK_SHIFT 40 + +/* Hop level */ +#define PERF_MEM_HOPS_0 0x0001 /* Remote core, same node */ +#define PERF_MEM_HOPS_1 0x0002 /* Remote node, same socket */ +#define PERF_MEM_HOPS_2 0x0003 /* Remote socket, same board */ +#define PERF_MEM_HOPS_3 0x0004 /* Remote board */ /* 5-7 available */ -#define PERF_MEM_HOPS_SHIFT 43 +#define PERF_MEM_HOPS_SHIFT 43 #define PERF_MEM_S(a, s) \ (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT) /* - * single taken branch record layout: + * Layout of single taken branch records: * * from: source instruction (may not always be a branch insn) * to: branch target @@ -1429,37 +1439,37 @@ union perf_mem_data_src { struct perf_branch_entry { __u64 from; __u64 to; - __u64 mispred:1, /* target mispredicted */ - predicted:1,/* target predicted */ - in_tx:1, /* in transaction */ - abort:1, /* transaction abort */ - cycles:16, /* cycle count to last branch */ - type:4, /* branch type */ - spec:2, /* branch speculation info */ - new_type:4, /* additional branch type */ - priv:3, /* privilege level */ - reserved:31; + __u64 mispred : 1, /* target mispredicted */ + predicted : 1, /* target predicted */ + in_tx : 1, /* in transaction */ + abort : 1, /* transaction abort */ + cycles : 16, /* cycle count to last branch */ + type : 4, /* branch type */ + spec : 2, /* branch speculation info */ + new_type : 4, /* additional branch type */ + priv : 3, /* privilege level */ + reserved : 31; }; /* Size of used info bits in struct perf_branch_entry */ #define PERF_BRANCH_ENTRY_INFO_BITS_MAX 33 union perf_sample_weight { - __u64 full; + __u64 full; #if defined(__LITTLE_ENDIAN_BITFIELD) struct { - __u32 var1_dw; - __u16 var2_w; - __u16 var3_w; + __u32 var1_dw; + __u16 var2_w; + __u16 var3_w; }; #elif defined(__BIG_ENDIAN_BITFIELD) struct { - __u16 var3_w; - __u16 var2_w; - __u32 var1_dw; + __u16 var3_w; + __u16 var2_w; + __u32 var1_dw; }; #else -#error "Unknown endianness" +# error "Unknown endianness" #endif }; diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h index 008403fe0b0b..365184d931ad 100644 --- a/tools/include/uapi/linux/perf_event.h +++ b/tools/include/uapi/linux/perf_event.h @@ -39,18 +39,21 @@ enum perf_type_id { /* * attr.config layout for type PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE + * * PERF_TYPE_HARDWARE: 0xEEEEEEEE000000AA * AA: hardware event ID * EEEEEEEE: PMU type ID + * * PERF_TYPE_HW_CACHE: 0xEEEEEEEE00DDCCBB * BB: hardware cache ID * CC: hardware cache op ID * DD: hardware cache op result ID * EEEEEEEE: PMU type ID - * If the PMU type ID is 0, the PERF_TYPE_RAW will be applied. + * + * If the PMU type ID is 0, PERF_TYPE_RAW will be applied. */ -#define PERF_PMU_TYPE_SHIFT 32 -#define PERF_HW_EVENT_MASK 0xffffffff +#define PERF_PMU_TYPE_SHIFT 32 +#define PERF_HW_EVENT_MASK 0xffffffff /* * Generalized performance event event_id types, used by the @@ -112,7 +115,7 @@ enum perf_hw_cache_op_result_id { /* * Special "software" events provided by the kernel, even if the hardware * does not support performance events. These events measure various - * physical and sw events of the kernel (and allow the profiling of them as + * physical and SW events of the kernel (and allow the profiling of them as * well): */ enum perf_sw_ids { @@ -167,8 +170,9 @@ enum perf_event_sample_format { }; #define PERF_SAMPLE_WEIGHT_TYPE (PERF_SAMPLE_WEIGHT | PERF_SAMPLE_WEIGHT_STRUCT) + /* - * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set + * Values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set. * * If the user does not pass priv level information via branch_sample_type, * the kernel uses the event's priv level. Branch and event priv levels do @@ -178,20 +182,20 @@ enum perf_event_sample_format { * of branches and therefore it supersedes all the other types. */ enum perf_branch_sample_type_shift { - PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */ - PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */ - PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */ - - PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */ - PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */ - PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */ - PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */ - PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */ - PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */ - PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */ + PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */ + PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */ + PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */ + + PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */ + PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */ + PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */ + PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */ + PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */ + PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */ + PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */ PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */ - PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */ + PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* CALL/RET stack */ PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */ PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */ @@ -210,96 +214,95 @@ enum perf_branch_sample_type_shift { }; enum perf_branch_sample_type { - PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT, - PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT, - PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT, + PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT, + PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT, + PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT, - PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT, - PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT, - PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT, - PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT, - PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT, - PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT, - PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT, - PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT, + PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT, + PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT, + PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT, + PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT, + PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT, + PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT, + PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT, + PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT, - PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT, - PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT, - PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT, + PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT, + PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT, + PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT, - PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT, - PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT, + PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT, + PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT, - PERF_SAMPLE_BRANCH_TYPE_SAVE = - 1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT, + PERF_SAMPLE_BRANCH_TYPE_SAVE = 1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT, - PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT, + PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT, - PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT, + PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT, - PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT, + PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT, - PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, + PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, }; /* - * Common flow change classification + * Common control flow change classifications: */ enum { - PERF_BR_UNKNOWN = 0, /* unknown */ - PERF_BR_COND = 1, /* conditional */ - PERF_BR_UNCOND = 2, /* unconditional */ - PERF_BR_IND = 3, /* indirect */ - PERF_BR_CALL = 4, /* function call */ - PERF_BR_IND_CALL = 5, /* indirect function call */ - PERF_BR_RET = 6, /* function return */ - PERF_BR_SYSCALL = 7, /* syscall */ - PERF_BR_SYSRET = 8, /* syscall return */ - PERF_BR_COND_CALL = 9, /* conditional function call */ - PERF_BR_COND_RET = 10, /* conditional function return */ - PERF_BR_ERET = 11, /* exception return */ - PERF_BR_IRQ = 12, /* irq */ - PERF_BR_SERROR = 13, /* system error */ - PERF_BR_NO_TX = 14, /* not in transaction */ - PERF_BR_EXTEND_ABI = 15, /* extend ABI */ + PERF_BR_UNKNOWN = 0, /* Unknown */ + PERF_BR_COND = 1, /* Conditional */ + PERF_BR_UNCOND = 2, /* Unconditional */ + PERF_BR_IND = 3, /* Indirect */ + PERF_BR_CALL = 4, /* Function call */ + PERF_BR_IND_CALL = 5, /* Indirect function call */ + PERF_BR_RET = 6, /* Function return */ + PERF_BR_SYSCALL = 7, /* Syscall */ + PERF_BR_SYSRET = 8, /* Syscall return */ + PERF_BR_COND_CALL = 9, /* Conditional function call */ + PERF_BR_COND_RET = 10, /* Conditional function return */ + PERF_BR_ERET = 11, /* Exception return */ + PERF_BR_IRQ = 12, /* IRQ */ + PERF_BR_SERROR = 13, /* System error */ + PERF_BR_NO_TX = 14, /* Not in transaction */ + PERF_BR_EXTEND_ABI = 15, /* Extend ABI */ PERF_BR_MAX, }; /* - * Common branch speculation outcome classification + * Common branch speculation outcome classifications: */ enum { - PERF_BR_SPEC_NA = 0, /* Not available */ - PERF_BR_SPEC_WRONG_PATH = 1, /* Speculative but on wrong path */ - PERF_BR_NON_SPEC_CORRECT_PATH = 2, /* Non-speculative but on correct path */ - PERF_BR_SPEC_CORRECT_PATH = 3, /* Speculative and on correct path */ + PERF_BR_SPEC_NA = 0, /* Not available */ + PERF_BR_SPEC_WRONG_PATH = 1, /* Speculative but on wrong path */ + PERF_BR_NON_SPEC_CORRECT_PATH = 2, /* Non-speculative but on correct path */ + PERF_BR_SPEC_CORRECT_PATH = 3, /* Speculative and on correct path */ PERF_BR_SPEC_MAX, }; enum { - PERF_BR_NEW_FAULT_ALGN = 0, /* Alignment fault */ - PERF_BR_NEW_FAULT_DATA = 1, /* Data fault */ - PERF_BR_NEW_FAULT_INST = 2, /* Inst fault */ - PERF_BR_NEW_ARCH_1 = 3, /* Architecture specific */ - PERF_BR_NEW_ARCH_2 = 4, /* Architecture specific */ - PERF_BR_NEW_ARCH_3 = 5, /* Architecture specific */ - PERF_BR_NEW_ARCH_4 = 6, /* Architecture specific */ - PERF_BR_NEW_ARCH_5 = 7, /* Architecture specific */ + PERF_BR_NEW_FAULT_ALGN = 0, /* Alignment fault */ + PERF_BR_NEW_FAULT_DATA = 1, /* Data fault */ + PERF_BR_NEW_FAULT_INST = 2, /* Inst fault */ + PERF_BR_NEW_ARCH_1 = 3, /* Architecture specific */ + PERF_BR_NEW_ARCH_2 = 4, /* Architecture specific */ + PERF_BR_NEW_ARCH_3 = 5, /* Architecture specific */ + PERF_BR_NEW_ARCH_4 = 6, /* Architecture specific */ + PERF_BR_NEW_ARCH_5 = 7, /* Architecture specific */ PERF_BR_NEW_MAX, }; enum { - PERF_BR_PRIV_UNKNOWN = 0, - PERF_BR_PRIV_USER = 1, - PERF_BR_PRIV_KERNEL = 2, - PERF_BR_PRIV_HV = 3, + PERF_BR_PRIV_UNKNOWN = 0, + PERF_BR_PRIV_USER = 1, + PERF_BR_PRIV_KERNEL = 2, + PERF_BR_PRIV_HV = 3, }; -#define PERF_BR_ARM64_FIQ PERF_BR_NEW_ARCH_1 -#define PERF_BR_ARM64_DEBUG_HALT PERF_BR_NEW_ARCH_2 -#define PERF_BR_ARM64_DEBUG_EXIT PERF_BR_NEW_ARCH_3 -#define PERF_BR_ARM64_DEBUG_INST PERF_BR_NEW_ARCH_4 -#define PERF_BR_ARM64_DEBUG_DATA PERF_BR_NEW_ARCH_5 +#define PERF_BR_ARM64_FIQ PERF_BR_NEW_ARCH_1 +#define PERF_BR_ARM64_DEBUG_HALT PERF_BR_NEW_ARCH_2 +#define PERF_BR_ARM64_DEBUG_EXIT PERF_BR_NEW_ARCH_3 +#define PERF_BR_ARM64_DEBUG_INST PERF_BR_NEW_ARCH_4 +#define PERF_BR_ARM64_DEBUG_DATA PERF_BR_NEW_ARCH_5 #define PERF_SAMPLE_BRANCH_PLM_ALL \ (PERF_SAMPLE_BRANCH_USER|\ @@ -310,9 +313,9 @@ enum { * Values to determine ABI of the registers dump. */ enum perf_sample_regs_abi { - PERF_SAMPLE_REGS_ABI_NONE = 0, - PERF_SAMPLE_REGS_ABI_32 = 1, - PERF_SAMPLE_REGS_ABI_64 = 2, + PERF_SAMPLE_REGS_ABI_NONE = 0, + PERF_SAMPLE_REGS_ABI_32 = 1, + PERF_SAMPLE_REGS_ABI_64 = 2, }; /* @@ -320,21 +323,21 @@ enum perf_sample_regs_abi { * abort events. Multiple bits can be set. */ enum { - PERF_TXN_ELISION = (1 << 0), /* From elision */ - PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */ - PERF_TXN_SYNC = (1 << 2), /* Instruction is related */ - PERF_TXN_ASYNC = (1 << 3), /* Instruction not related */ - PERF_TXN_RETRY = (1 << 4), /* Retry possible */ - PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */ - PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */ - PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */ + PERF_TXN_ELISION = (1 << 0), /* From elision */ + PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */ + PERF_TXN_SYNC = (1 << 2), /* Instruction is related */ + PERF_TXN_ASYNC = (1 << 3), /* Instruction is not related */ + PERF_TXN_RETRY = (1 << 4), /* Retry possible */ + PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */ + PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */ + PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */ - PERF_TXN_MAX = (1 << 8), /* non-ABI */ + PERF_TXN_MAX = (1 << 8), /* non-ABI */ - /* bits 32..63 are reserved for the abort code */ + /* Bits 32..63 are reserved for the abort code */ - PERF_TXN_ABORT_MASK = (0xffffffffULL << 32), - PERF_TXN_ABORT_SHIFT = 32, + PERF_TXN_ABORT_MASK = (0xffffffffULL << 32), + PERF_TXN_ABORT_SHIFT = 32, }; /* @@ -369,24 +372,22 @@ enum perf_event_read_format { PERF_FORMAT_MAX = 1U << 5, /* non-ABI */ }; -#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ -#define PERF_ATTR_SIZE_VER1 72 /* add: config2 */ -#define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */ -#define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */ - /* add: sample_stack_user */ -#define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */ -#define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */ -#define PERF_ATTR_SIZE_VER6 120 /* add: aux_sample_size */ -#define PERF_ATTR_SIZE_VER7 128 /* add: sig_data */ -#define PERF_ATTR_SIZE_VER8 136 /* add: config3 */ +#define PERF_ATTR_SIZE_VER0 64 /* Size of first published 'struct perf_event_attr' */ +#define PERF_ATTR_SIZE_VER1 72 /* Add: config2 */ +#define PERF_ATTR_SIZE_VER2 80 /* Add: branch_sample_type */ +#define PERF_ATTR_SIZE_VER3 96 /* Add: sample_regs_user */ + /* Add: sample_stack_user */ +#define PERF_ATTR_SIZE_VER4 104 /* Add: sample_regs_intr */ +#define PERF_ATTR_SIZE_VER5 112 /* Add: aux_watermark */ +#define PERF_ATTR_SIZE_VER6 120 /* Add: aux_sample_size */ +#define PERF_ATTR_SIZE_VER7 128 /* Add: sig_data */ +#define PERF_ATTR_SIZE_VER8 136 /* Add: config3 */ /* - * Hardware event_id to monitor via a performance monitoring event: - * - * @sample_max_stack: Max number of frame pointers in a callchain, - * should be < /proc/sys/kernel/perf_event_max_stack - * Max number of entries of branch stack - * should be < hardware limit + * 'struct perf_event_attr' contains various attributes that define + * a performance event - most of them hardware related configuration + * details, but also a lot of behavioral switches and values implemented + * by the kernel. */ struct perf_event_attr { @@ -396,7 +397,7 @@ struct perf_event_attr { __u32 type; /* - * Size of the attr structure, for fwd/bwd compat. + * Size of the attr structure, for forward/backwards compatibility. */ __u32 size; @@ -451,21 +452,21 @@ struct perf_event_attr { comm_exec : 1, /* flag comm events that are due to an exec */ use_clockid : 1, /* use @clockid for time fields */ context_switch : 1, /* context switch data */ - write_backward : 1, /* Write ring buffer from end to beginning */ + write_backward : 1, /* write ring buffer from end to beginning */ namespaces : 1, /* include namespaces data */ ksymbol : 1, /* include ksymbol events */ - bpf_event : 1, /* include bpf events */ + bpf_event : 1, /* include BPF events */ aux_output : 1, /* generate AUX records instead of events */ cgroup : 1, /* include cgroup events */ text_poke : 1, /* include text poke events */ - build_id : 1, /* use build id in mmap2 events */ + build_id : 1, /* use build ID in mmap2 events */ inherit_thread : 1, /* children only inherit if cloned with CLONE_THREAD */ remove_on_exec : 1, /* event is removed from task on exec */ sigtrap : 1, /* send synchronous SIGTRAP on event */ __reserved_1 : 26; union { - __u32 wakeup_events; /* wakeup every n events */ + __u32 wakeup_events; /* wake up every n events */ __u32 wakeup_watermark; /* bytes before wakeup */ }; @@ -474,13 +475,13 @@ struct perf_event_attr { __u64 bp_addr; __u64 kprobe_func; /* for perf_kprobe */ __u64 uprobe_path; /* for perf_uprobe */ - __u64 config1; /* extension of config */ + __u64 config1; /* extension of config */ }; union { __u64 bp_len; - __u64 kprobe_addr; /* when kprobe_func == NULL */ + __u64 kprobe_addr; /* when kprobe_func == NULL */ __u64 probe_offset; /* for perf_[k,u]probe */ - __u64 config2; /* extension of config1 */ + __u64 config2; /* extension of config1 */ }; __u64 branch_sample_type; /* enum perf_branch_sample_type */ @@ -510,7 +511,16 @@ struct perf_event_attr { * Wakeup watermark for AUX area */ __u32 aux_watermark; + + /* + * Max number of frame pointers in a callchain, should be + * lower than /proc/sys/kernel/perf_event_max_stack. + * + * Max number of entries of branch stack should be lower + * than the hardware limit. + */ __u16 sample_max_stack; + __u16 __reserved_2; __u32 aux_sample_size; __u32 __reserved_3; @@ -528,7 +538,7 @@ struct perf_event_attr { /* * Structure used by below PERF_EVENT_IOC_QUERY_BPF command - * to query bpf programs attached to the same perf tracepoint + * to query BPF programs attached to the same perf tracepoint * as the given perf event. */ struct perf_event_query_bpf { @@ -550,21 +560,21 @@ struct perf_event_query_bpf { /* * Ioctls that can be done on a perf event fd: */ -#define PERF_EVENT_IOC_ENABLE _IO ('$', 0) -#define PERF_EVENT_IOC_DISABLE _IO ('$', 1) -#define PERF_EVENT_IOC_REFRESH _IO ('$', 2) -#define PERF_EVENT_IOC_RESET _IO ('$', 3) -#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64) -#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) -#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *) -#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *) -#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32) -#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32) +#define PERF_EVENT_IOC_ENABLE _IO ('$', 0) +#define PERF_EVENT_IOC_DISABLE _IO ('$', 1) +#define PERF_EVENT_IOC_REFRESH _IO ('$', 2) +#define PERF_EVENT_IOC_RESET _IO ('$', 3) +#define PERF_EVENT_IOC_PERIOD _IOW ('$', 4, __u64) +#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) +#define PERF_EVENT_IOC_SET_FILTER _IOW ('$', 6, char *) +#define PERF_EVENT_IOC_ID _IOR ('$', 7, __u64 *) +#define PERF_EVENT_IOC_SET_BPF _IOW ('$', 8, __u32) +#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW ('$', 9, __u32) #define PERF_EVENT_IOC_QUERY_BPF _IOWR('$', 10, struct perf_event_query_bpf *) -#define PERF_EVENT_IOC_MODIFY_ATTRIBUTES _IOW('$', 11, struct perf_event_attr *) +#define PERF_EVENT_IOC_MODIFY_ATTRIBUTES _IOW ('$', 11, struct perf_event_attr *) enum perf_event_ioc_flags { - PERF_IOC_FLAG_GROUP = 1U << 0, + PERF_IOC_FLAG_GROUP = 1U << 0, }; /* @@ -575,7 +585,7 @@ struct perf_event_mmap_page { __u32 compat_version; /* lowest version this is compat with */ /* - * Bits needed to read the hw events in user-space. + * Bits needed to read the HW events in user-space. * * u32 seq, time_mult, time_shift, index, width; * u64 count, enabled, running; @@ -613,7 +623,7 @@ struct perf_event_mmap_page { __u32 index; /* hardware event identifier */ __s64 offset; /* add to hardware event value */ __u64 time_enabled; /* time event active */ - __u64 time_running; /* time event on cpu */ + __u64 time_running; /* time event on CPU */ union { __u64 capabilities; struct { @@ -641,7 +651,7 @@ struct perf_event_mmap_page { /* * If cap_usr_time the below fields can be used to compute the time - * delta since time_enabled (in ns) using rdtsc or similar. + * delta since time_enabled (in ns) using RDTSC or similar. * * u64 quot, rem; * u64 delta; @@ -714,7 +724,7 @@ struct perf_event_mmap_page { * after reading this value. * * When the mapping is PROT_WRITE the @data_tail value should be - * written by userspace to reflect the last read data, after issueing + * written by user-space to reflect the last read data, after issuing * an smp_mb() to separate the data read from the ->data_tail store. * In this case the kernel will not over-write unread data. * @@ -730,7 +740,7 @@ struct perf_event_mmap_page { /* * AUX area is defined by aux_{offset,size} fields that should be set - * by the userspace, so that + * by the user-space, so that * * aux_offset >= data_offset + data_size * @@ -804,7 +814,7 @@ struct perf_event_mmap_page { * Indicates that thread was preempted in TASK_RUNNING state. * * PERF_RECORD_MISC_MMAP_BUILD_ID: - * Indicates that mmap2 event carries build id data. + * Indicates that mmap2 event carries build ID data. */ #define PERF_RECORD_MISC_EXACT_IP (1 << 14) #define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14) @@ -815,26 +825,26 @@ struct perf_event_mmap_page { #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15) struct perf_event_header { - __u32 type; - __u16 misc; - __u16 size; + __u32 type; + __u16 misc; + __u16 size; }; struct perf_ns_link_info { - __u64 dev; - __u64 ino; + __u64 dev; + __u64 ino; }; enum { - NET_NS_INDEX = 0, - UTS_NS_INDEX = 1, - IPC_NS_INDEX = 2, - PID_NS_INDEX = 3, - USER_NS_INDEX = 4, - MNT_NS_INDEX = 5, - CGROUP_NS_INDEX = 6, - - NR_NAMESPACES, /* number of available namespaces */ + NET_NS_INDEX = 0, + UTS_NS_INDEX = 1, + IPC_NS_INDEX = 2, + PID_NS_INDEX = 3, + USER_NS_INDEX = 4, + MNT_NS_INDEX = 5, + CGROUP_NS_INDEX = 6, + + NR_NAMESPACES, /* number of available namespaces */ }; enum perf_event_type { @@ -850,11 +860,11 @@ enum perf_event_type { * optional fields being ignored. * * struct sample_id { - * { u32 pid, tid; } && PERF_SAMPLE_TID - * { u64 time; } && PERF_SAMPLE_TIME - * { u64 id; } && PERF_SAMPLE_ID - * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID - * { u32 cpu, res; } && PERF_SAMPLE_CPU + * { u32 pid, tid; } && PERF_SAMPLE_TID + * { u64 time; } && PERF_SAMPLE_TIME + * { u64 id; } && PERF_SAMPLE_ID + * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID + * { u32 cpu, res; } && PERF_SAMPLE_CPU * { u64 id; } && PERF_SAMPLE_IDENTIFIER * } && perf_event_attr::sample_id_all * @@ -865,7 +875,7 @@ enum perf_event_type { /* * The MMAP events record the PROT_EXEC mappings so that we can - * correlate userspace IPs to code. They have the following structure: + * correlate user-space IPs to code. They have the following structure: * * struct { * struct perf_event_header header; @@ -875,7 +885,7 @@ enum perf_event_type { * u64 len; * u64 pgoff; * char filename[]; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_MMAP = 1, @@ -885,7 +895,7 @@ enum perf_event_type { * struct perf_event_header header; * u64 id; * u64 lost; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_LOST = 2, @@ -896,7 +906,7 @@ enum perf_event_type { * * u32 pid, tid; * char comm[]; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_COMM = 3, @@ -907,7 +917,7 @@ enum perf_event_type { * u32 pid, ppid; * u32 tid, ptid; * u64 time; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_EXIT = 4, @@ -918,7 +928,7 @@ enum perf_event_type { * u64 time; * u64 id; * u64 stream_id; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_THROTTLE = 5, @@ -930,7 +940,7 @@ enum perf_event_type { * u32 pid, ppid; * u32 tid, ptid; * u64 time; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_FORK = 7, @@ -941,7 +951,7 @@ enum perf_event_type { * u32 pid, tid; * * struct read_format values; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_READ = 8, @@ -996,12 +1006,12 @@ enum perf_event_type { * { u64 counters; } cntr[nr] && PERF_SAMPLE_BRANCH_COUNTERS * } && PERF_SAMPLE_BRANCH_STACK * - * { u64 abi; # enum perf_sample_regs_abi - * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER + * { u64 abi; # enum perf_sample_regs_abi + * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER * - * { u64 size; - * char data[size]; - * u64 dyn_size; } && PERF_SAMPLE_STACK_USER + * { u64 size; + * char data[size]; + * u64 dyn_size; } && PERF_SAMPLE_STACK_USER * * { union perf_sample_weight * { @@ -1061,7 +1071,7 @@ enum perf_event_type { * }; * u32 prot, flags; * char filename[]; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_MMAP2 = 10, @@ -1070,12 +1080,12 @@ enum perf_event_type { * Records that new data landed in the AUX buffer part. * * struct { - * struct perf_event_header header; + * struct perf_event_header header; * - * u64 aux_offset; - * u64 aux_size; + * u64 aux_offset; + * u64 aux_size; * u64 flags; - * struct sample_id sample_id; + * struct sample_id sample_id; * }; */ PERF_RECORD_AUX = 11, @@ -1158,7 +1168,7 @@ enum perf_event_type { PERF_RECORD_KSYMBOL = 17, /* - * Record bpf events: + * Record BPF events: * enum perf_bpf_event_type { * PERF_BPF_EVENT_UNKNOWN = 0, * PERF_BPF_EVENT_PROG_LOAD = 1, @@ -1236,181 +1246,181 @@ enum perf_record_ksymbol_type { #define PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER (1 << 0) enum perf_bpf_event_type { - PERF_BPF_EVENT_UNKNOWN = 0, - PERF_BPF_EVENT_PROG_LOAD = 1, - PERF_BPF_EVENT_PROG_UNLOAD = 2, - PERF_BPF_EVENT_MAX, /* non-ABI */ + PERF_BPF_EVENT_UNKNOWN = 0, + PERF_BPF_EVENT_PROG_LOAD = 1, + PERF_BPF_EVENT_PROG_UNLOAD = 2, + PERF_BPF_EVENT_MAX, /* non-ABI */ }; -#define PERF_MAX_STACK_DEPTH 127 -#define PERF_MAX_CONTEXTS_PER_STACK 8 +#define PERF_MAX_STACK_DEPTH 127 +#define PERF_MAX_CONTEXTS_PER_STACK 8 enum perf_callchain_context { - PERF_CONTEXT_HV = (__u64)-32, - PERF_CONTEXT_KERNEL = (__u64)-128, - PERF_CONTEXT_USER = (__u64)-512, + PERF_CONTEXT_HV = (__u64)-32, + PERF_CONTEXT_KERNEL = (__u64)-128, + PERF_CONTEXT_USER = (__u64)-512, - PERF_CONTEXT_GUEST = (__u64)-2048, - PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, - PERF_CONTEXT_GUEST_USER = (__u64)-2560, + PERF_CONTEXT_GUEST = (__u64)-2048, + PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, + PERF_CONTEXT_GUEST_USER = (__u64)-2560, - PERF_CONTEXT_MAX = (__u64)-4095, + PERF_CONTEXT_MAX = (__u64)-4095, }; /** * PERF_RECORD_AUX::flags bits */ -#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */ -#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */ -#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */ -#define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */ +#define PERF_AUX_FLAG_TRUNCATED 0x0001 /* Record was truncated to fit */ +#define PERF_AUX_FLAG_OVERWRITE 0x0002 /* Snapshot from overwrite mode */ +#define PERF_AUX_FLAG_PARTIAL 0x0004 /* Record contains gaps */ +#define PERF_AUX_FLAG_COLLISION 0x0008 /* Sample collided with another */ #define PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK 0xff00 /* PMU specific trace format type */ /* CoreSight PMU AUX buffer formats */ -#define PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT 0x0000 /* Default for backward compatibility */ -#define PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW 0x0100 /* Raw format of the source */ +#define PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT 0x0000 /* Default for backward compatibility */ +#define PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW 0x0100 /* Raw format of the source */ -#define PERF_FLAG_FD_NO_GROUP (1UL << 0) -#define PERF_FLAG_FD_OUTPUT (1UL << 1) -#define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */ -#define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */ +#define PERF_FLAG_FD_NO_GROUP (1UL << 0) +#define PERF_FLAG_FD_OUTPUT (1UL << 1) +#define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup ID, per-CPU mode only */ +#define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */ #if defined(__LITTLE_ENDIAN_BITFIELD) union perf_mem_data_src { __u64 val; struct { - __u64 mem_op:5, /* type of opcode */ - mem_lvl:14, /* memory hierarchy level */ - mem_snoop:5, /* snoop mode */ - mem_lock:2, /* lock instr */ - mem_dtlb:7, /* tlb access */ - mem_lvl_num:4, /* memory hierarchy level number */ - mem_remote:1, /* remote */ - mem_snoopx:2, /* snoop mode, ext */ - mem_blk:3, /* access blocked */ - mem_hops:3, /* hop level */ - mem_rsvd:18; + __u64 mem_op : 5, /* Type of opcode */ + mem_lvl : 14, /* Memory hierarchy level */ + mem_snoop : 5, /* Snoop mode */ + mem_lock : 2, /* Lock instr */ + mem_dtlb : 7, /* TLB access */ + mem_lvl_num : 4, /* Memory hierarchy level number */ + mem_remote : 1, /* Remote */ + mem_snoopx : 2, /* Snoop mode, ext */ + mem_blk : 3, /* Access blocked */ + mem_hops : 3, /* Hop level */ + mem_rsvd : 18; }; }; #elif defined(__BIG_ENDIAN_BITFIELD) union perf_mem_data_src { __u64 val; struct { - __u64 mem_rsvd:18, - mem_hops:3, /* hop level */ - mem_blk:3, /* access blocked */ - mem_snoopx:2, /* snoop mode, ext */ - mem_remote:1, /* remote */ - mem_lvl_num:4, /* memory hierarchy level number */ - mem_dtlb:7, /* tlb access */ - mem_lock:2, /* lock instr */ - mem_snoop:5, /* snoop mode */ - mem_lvl:14, /* memory hierarchy level */ - mem_op:5; /* type of opcode */ + __u64 mem_rsvd : 18, + mem_hops : 3, /* Hop level */ + mem_blk : 3, /* Access blocked */ + mem_snoopx : 2, /* Snoop mode, ext */ + mem_remote : 1, /* Remote */ + mem_lvl_num : 4, /* Memory hierarchy level number */ + mem_dtlb : 7, /* TLB access */ + mem_lock : 2, /* Lock instr */ + mem_snoop : 5, /* Snoop mode */ + mem_lvl : 14, /* Memory hierarchy level */ + mem_op : 5; /* Type of opcode */ }; }; #else -#error "Unknown endianness" +# error "Unknown endianness" #endif -/* type of opcode (load/store/prefetch,code) */ -#define PERF_MEM_OP_NA 0x01 /* not available */ -#define PERF_MEM_OP_LOAD 0x02 /* load instruction */ -#define PERF_MEM_OP_STORE 0x04 /* store instruction */ -#define PERF_MEM_OP_PFETCH 0x08 /* prefetch */ -#define PERF_MEM_OP_EXEC 0x10 /* code (execution) */ -#define PERF_MEM_OP_SHIFT 0 +/* Type of memory opcode: */ +#define PERF_MEM_OP_NA 0x0001 /* Not available */ +#define PERF_MEM_OP_LOAD 0x0002 /* Load instruction */ +#define PERF_MEM_OP_STORE 0x0004 /* Store instruction */ +#define PERF_MEM_OP_PFETCH 0x0008 /* Prefetch */ +#define PERF_MEM_OP_EXEC 0x0010 /* Code (execution) */ +#define PERF_MEM_OP_SHIFT 0 /* - * PERF_MEM_LVL_* namespace being depricated to some extent in the + * The PERF_MEM_LVL_* namespace is being deprecated to some extent in * favour of newer composite PERF_MEM_{LVLNUM_,REMOTE_,SNOOPX_} fields. - * Supporting this namespace inorder to not break defined ABIs. + * We support this namespace in order to not break defined ABIs. * - * memory hierarchy (memory level, hit or miss) + * Memory hierarchy (memory level, hit or miss) */ -#define PERF_MEM_LVL_NA 0x01 /* not available */ -#define PERF_MEM_LVL_HIT 0x02 /* hit level */ -#define PERF_MEM_LVL_MISS 0x04 /* miss level */ -#define PERF_MEM_LVL_L1 0x08 /* L1 */ -#define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */ -#define PERF_MEM_LVL_L2 0x20 /* L2 */ -#define PERF_MEM_LVL_L3 0x40 /* L3 */ -#define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */ -#define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */ -#define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */ -#define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */ -#define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */ -#define PERF_MEM_LVL_IO 0x1000 /* I/O memory */ -#define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */ -#define PERF_MEM_LVL_SHIFT 5 - -#define PERF_MEM_REMOTE_REMOTE 0x01 /* Remote */ -#define PERF_MEM_REMOTE_SHIFT 37 - -#define PERF_MEM_LVLNUM_L1 0x01 /* L1 */ -#define PERF_MEM_LVLNUM_L2 0x02 /* L2 */ -#define PERF_MEM_LVLNUM_L3 0x03 /* L3 */ -#define PERF_MEM_LVLNUM_L4 0x04 /* L4 */ -#define PERF_MEM_LVLNUM_L2_MHB 0x05 /* L2 Miss Handling Buffer */ -#define PERF_MEM_LVLNUM_MSC 0x06 /* Memory-side Cache */ -/* 0x7 available */ -#define PERF_MEM_LVLNUM_UNC 0x08 /* Uncached */ -#define PERF_MEM_LVLNUM_CXL 0x09 /* CXL */ -#define PERF_MEM_LVLNUM_IO 0x0a /* I/O */ -#define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */ -#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB / L1 Miss Handling Buffer */ -#define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */ -#define PERF_MEM_LVLNUM_PMEM 0x0e /* PMEM */ -#define PERF_MEM_LVLNUM_NA 0x0f /* N/A */ - -#define PERF_MEM_LVLNUM_SHIFT 33 - -/* snoop mode */ -#define PERF_MEM_SNOOP_NA 0x01 /* not available */ -#define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */ -#define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */ -#define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */ -#define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */ -#define PERF_MEM_SNOOP_SHIFT 19 - -#define PERF_MEM_SNOOPX_FWD 0x01 /* forward */ -#define PERF_MEM_SNOOPX_PEER 0x02 /* xfer from peer */ -#define PERF_MEM_SNOOPX_SHIFT 38 - -/* locked instruction */ -#define PERF_MEM_LOCK_NA 0x01 /* not available */ -#define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */ -#define PERF_MEM_LOCK_SHIFT 24 +#define PERF_MEM_LVL_NA 0x0001 /* Not available */ +#define PERF_MEM_LVL_HIT 0x0002 /* Hit level */ +#define PERF_MEM_LVL_MISS 0x0004 /* Miss level */ +#define PERF_MEM_LVL_L1 0x0008 /* L1 */ +#define PERF_MEM_LVL_LFB 0x0010 /* Line Fill Buffer */ +#define PERF_MEM_LVL_L2 0x0020 /* L2 */ +#define PERF_MEM_LVL_L3 0x0040 /* L3 */ +#define PERF_MEM_LVL_LOC_RAM 0x0080 /* Local DRAM */ +#define PERF_MEM_LVL_REM_RAM1 0x0100 /* Remote DRAM (1 hop) */ +#define PERF_MEM_LVL_REM_RAM2 0x0200 /* Remote DRAM (2 hops) */ +#define PERF_MEM_LVL_REM_CCE1 0x0400 /* Remote Cache (1 hop) */ +#define PERF_MEM_LVL_REM_CCE2 0x0800 /* Remote Cache (2 hops) */ +#define PERF_MEM_LVL_IO 0x1000 /* I/O memory */ +#define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */ +#define PERF_MEM_LVL_SHIFT 5 + +#define PERF_MEM_REMOTE_REMOTE 0x0001 /* Remote */ +#define PERF_MEM_REMOTE_SHIFT 37 + +#define PERF_MEM_LVLNUM_L1 0x0001 /* L1 */ +#define PERF_MEM_LVLNUM_L2 0x0002 /* L2 */ +#define PERF_MEM_LVLNUM_L3 0x0003 /* L3 */ +#define PERF_MEM_LVLNUM_L4 0x0004 /* L4 */ +#define PERF_MEM_LVLNUM_L2_MHB 0x0005 /* L2 Miss Handling Buffer */ +#define PERF_MEM_LVLNUM_MSC 0x0006 /* Memory-side Cache */ +/* 0x007 available */ +#define PERF_MEM_LVLNUM_UNC 0x0008 /* Uncached */ +#define PERF_MEM_LVLNUM_CXL 0x0009 /* CXL */ +#define PERF_MEM_LVLNUM_IO 0x000a /* I/O */ +#define PERF_MEM_LVLNUM_ANY_CACHE 0x000b /* Any cache */ +#define PERF_MEM_LVLNUM_LFB 0x000c /* LFB / L1 Miss Handling Buffer */ +#define PERF_MEM_LVLNUM_RAM 0x000d /* RAM */ +#define PERF_MEM_LVLNUM_PMEM 0x000e /* PMEM */ +#define PERF_MEM_LVLNUM_NA 0x000f /* N/A */ + +#define PERF_MEM_LVLNUM_SHIFT 33 + +/* Snoop mode */ +#define PERF_MEM_SNOOP_NA 0x0001 /* Not available */ +#define PERF_MEM_SNOOP_NONE 0x0002 /* No snoop */ +#define PERF_MEM_SNOOP_HIT 0x0004 /* Snoop hit */ +#define PERF_MEM_SNOOP_MISS 0x0008 /* Snoop miss */ +#define PERF_MEM_SNOOP_HITM 0x0010 /* Snoop hit modified */ +#define PERF_MEM_SNOOP_SHIFT 19 + +#define PERF_MEM_SNOOPX_FWD 0x0001 /* Forward */ +#define PERF_MEM_SNOOPX_PEER 0x0002 /* Transfer from peer */ +#define PERF_MEM_SNOOPX_SHIFT 38 + +/* Locked instruction */ +#define PERF_MEM_LOCK_NA 0x0001 /* Not available */ +#define PERF_MEM_LOCK_LOCKED 0x0002 /* Locked transaction */ +#define PERF_MEM_LOCK_SHIFT 24 /* TLB access */ -#define PERF_MEM_TLB_NA 0x01 /* not available */ -#define PERF_MEM_TLB_HIT 0x02 /* hit level */ -#define PERF_MEM_TLB_MISS 0x04 /* miss level */ -#define PERF_MEM_TLB_L1 0x08 /* L1 */ -#define PERF_MEM_TLB_L2 0x10 /* L2 */ -#define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/ -#define PERF_MEM_TLB_OS 0x40 /* OS fault handler */ -#define PERF_MEM_TLB_SHIFT 26 +#define PERF_MEM_TLB_NA 0x0001 /* Not available */ +#define PERF_MEM_TLB_HIT 0x0002 /* Hit level */ +#define PERF_MEM_TLB_MISS 0x0004 /* Miss level */ +#define PERF_MEM_TLB_L1 0x0008 /* L1 */ +#define PERF_MEM_TLB_L2 0x0010 /* L2 */ +#define PERF_MEM_TLB_WK 0x0020 /* Hardware Walker*/ +#define PERF_MEM_TLB_OS 0x0040 /* OS fault handler */ +#define PERF_MEM_TLB_SHIFT 26 /* Access blocked */ -#define PERF_MEM_BLK_NA 0x01 /* not available */ -#define PERF_MEM_BLK_DATA 0x02 /* data could not be forwarded */ -#define PERF_MEM_BLK_ADDR 0x04 /* address conflict */ -#define PERF_MEM_BLK_SHIFT 40 - -/* hop level */ -#define PERF_MEM_HOPS_0 0x01 /* remote core, same node */ -#define PERF_MEM_HOPS_1 0x02 /* remote node, same socket */ -#define PERF_MEM_HOPS_2 0x03 /* remote socket, same board */ -#define PERF_MEM_HOPS_3 0x04 /* remote board */ +#define PERF_MEM_BLK_NA 0x0001 /* Not available */ +#define PERF_MEM_BLK_DATA 0x0002 /* Data could not be forwarded */ +#define PERF_MEM_BLK_ADDR 0x0004 /* Address conflict */ +#define PERF_MEM_BLK_SHIFT 40 + +/* Hop level */ +#define PERF_MEM_HOPS_0 0x0001 /* Remote core, same node */ +#define PERF_MEM_HOPS_1 0x0002 /* Remote node, same socket */ +#define PERF_MEM_HOPS_2 0x0003 /* Remote socket, same board */ +#define PERF_MEM_HOPS_3 0x0004 /* Remote board */ /* 5-7 available */ -#define PERF_MEM_HOPS_SHIFT 43 +#define PERF_MEM_HOPS_SHIFT 43 #define PERF_MEM_S(a, s) \ (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT) /* - * single taken branch record layout: + * Layout of single taken branch records: * * from: source instruction (may not always be a branch insn) * to: branch target @@ -1429,37 +1439,37 @@ union perf_mem_data_src { struct perf_branch_entry { __u64 from; __u64 to; - __u64 mispred:1, /* target mispredicted */ - predicted:1,/* target predicted */ - in_tx:1, /* in transaction */ - abort:1, /* transaction abort */ - cycles:16, /* cycle count to last branch */ - type:4, /* branch type */ - spec:2, /* branch speculation info */ - new_type:4, /* additional branch type */ - priv:3, /* privilege level */ - reserved:31; + __u64 mispred : 1, /* target mispredicted */ + predicted : 1, /* target predicted */ + in_tx : 1, /* in transaction */ + abort : 1, /* transaction abort */ + cycles : 16, /* cycle count to last branch */ + type : 4, /* branch type */ + spec : 2, /* branch speculation info */ + new_type : 4, /* additional branch type */ + priv : 3, /* privilege level */ + reserved : 31; }; /* Size of used info bits in struct perf_branch_entry */ #define PERF_BRANCH_ENTRY_INFO_BITS_MAX 33 union perf_sample_weight { - __u64 full; + __u64 full; #if defined(__LITTLE_ENDIAN_BITFIELD) struct { - __u32 var1_dw; - __u16 var2_w; - __u16 var3_w; + __u32 var1_dw; + __u16 var2_w; + __u16 var3_w; }; #elif defined(__BIG_ENDIAN_BITFIELD) struct { - __u16 var3_w; - __u16 var2_w; - __u32 var1_dw; + __u16 var3_w; + __u16 var2_w; + __u32 var1_dw; }; #else -#error "Unknown endianness" +# error "Unknown endianness" #endif }; -- Gitee From ac34bf5f2ff479eb930e67ab5688bc9584c4cbff Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 24 Jul 2025 00:06:12 -0400 Subject: [PATCH 006/231] perf/x86/intel: Fix crash in icl_update_topdown_event() commit b0823d5fbacb1c551d793cbfe7af24e0d1fa45ed upstream. [ Upstream commit b0823d5fbacb1c551d793cbfe7af24e0d1fa45ed ] The perf_fuzzer found a hard-lockup crash on a RaptorLake machine: Oops: general protection fault, maybe for address 0xffff89aeceab400: 0000 CPU: 23 UID: 0 PID: 0 Comm: swapper/23 Tainted: [W]=WARN Hardware name: Dell Inc. Precision 9660/0VJ762 RIP: 0010:native_read_pmc+0x7/0x40 Code: cc e8 8d a9 01 00 48 89 03 5b cd cc cc cc cc 0f 1f ... RSP: 000:fffb03100273de8 EFLAGS: 00010046 .... Call Trace: icl_update_topdown_event+0x165/0x190 ? ktime_get+0x38/0xd0 intel_pmu_read_event+0xf9/0x210 __perf_event_read+0xf9/0x210 CPUs 16-23 are E-core CPUs that don't support the perf metrics feature. The icl_update_topdown_event() should not be invoked on these CPUs. It's a regression of commit: f9bdf1f95339 ("perf/x86/intel: Avoid disable PMU if !cpuc->enabled in sample read") The bug introduced by that commit is that the is_topdown_event() function is mistakenly used to replace the is_topdown_count() call to check if the topdown functions for the perf metrics feature should be invoked. Fix it. Intel-SIG: commit b0823d5fbacb perf/x86/intel: Fix crash in icl_update_topdown_event() Backport CWF PMU support and dependency Fixes: f9bdf1f95339 ("perf/x86/intel: Avoid disable PMU if !cpuc->enabled in sample read") Closes: https://lore.kernel.org/lkml/352f0709-f026-cd45-e60c-60dfd97f73f3@maine.edu/ Reported-by: Vince Weaver Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Tested-by: Vince Weaver Cc: stable@vger.kernel.org # v6.15+ Link: https://lore.kernel.org/r/20250612143818.2889040-1-kan.liang@linux.intel.com [ omitted PEBS check ] Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman Signed-off-by: Jason Zeng --- arch/x86/events/intel/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index a6b5259d4404..6d99c2e7c867 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2811,7 +2811,7 @@ static void intel_pmu_read_event(struct perf_event *event) * If the PEBS counters snapshotting is enabled, * the topdown event is available in PEBS records. */ - if (is_topdown_event(event) && !is_pebs_counter_event_group(event)) + if (is_topdown_count(event) && !is_pebs_counter_event_group(event)) static_call(intel_pmu_update_topdown_event)(event, NULL); else intel_pmu_drain_pebs_buffer(); -- Gitee From f9c4c936145a9af322dd37b8ab500cb06a3bd91d Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 8 May 2025 16:44:52 +0300 Subject: [PATCH 007/231] perf/x86/intel: Fix segfault with PEBS-via-PT with sample_freq commit 99bcd91fabada0dbb1d5f0de44532d8008db93c6 upstream. Currently, using PEBS-via-PT with a sample frequency instead of a sample period, causes a segfault. For example: BUG: kernel NULL pointer dereference, address: 0000000000000195 ? __die_body.cold+0x19/0x27 ? page_fault_oops+0xca/0x290 ? exc_page_fault+0x7e/0x1b0 ? asm_exc_page_fault+0x26/0x30 ? intel_pmu_pebs_event_update_no_drain+0x40/0x60 ? intel_pmu_pebs_event_update_no_drain+0x32/0x60 intel_pmu_drain_pebs_icl+0x333/0x350 handle_pmi_common+0x272/0x3c0 intel_pmu_handle_irq+0x10a/0x2e0 perf_event_nmi_handler+0x2a/0x50 That happens because intel_pmu_pebs_event_update_no_drain() assumes all the pebs_enabled bits represent counter indexes, which is not always the case. In this particular case, bits 60 and 61 are set for PEBS-via-PT purposes. The behaviour of PEBS-via-PT with sample frequency is questionable because although a PMI is generated (PEBS_PMI_AFTER_EACH_RECORD), the period is not adjusted anyway. Putting that aside, fix intel_pmu_pebs_event_update_no_drain() by passing the mask of counter bits instead of 'size'. Note, prior to the Fixes commit, 'size' would be limited to the maximum counter index, so the issue was not hit. Intel-SIG: commit 99bcd91fabad perf/x86/intel: Fix segfault with PEBS-via-PT with sample_freq Backport CWF PMU support and dependency Fixes: 722e42e45c2f1 ("perf/x86: Support counter mask") Signed-off-by: Adrian Hunter Signed-off-by: Ingo Molnar Reviewed-by: Kan Liang Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Namhyung Kim Cc: Ian Rogers Cc: linux-perf-users@vger.kernel.org Link: https://lore.kernel.org/r/20250508134452.73960-1-adrian.hunter@intel.com Signed-off-by: Jason Zeng --- arch/x86/events/intel/ds.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index cd8634165bde..332b5fc9dd22 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -2428,8 +2428,9 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sample_ setup_pebs_fixed_sample_data); } -static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int size) +static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, u64 mask) { + u64 pebs_enabled = cpuc->pebs_enabled & mask; struct perf_event *event; int bit; @@ -2440,7 +2441,7 @@ static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int * It needs to call intel_pmu_save_and_restart_reload() to * update the event->count for this case. */ - for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, size) { + for_each_set_bit(bit, (unsigned long *)&pebs_enabled, X86_PMC_IDX_MAX) { event = cpuc->events[bit]; if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) intel_pmu_save_and_restart_reload(event, 0); @@ -2475,7 +2476,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d } if (unlikely(base >= top)) { - intel_pmu_pebs_event_update_no_drain(cpuc, size); + intel_pmu_pebs_event_update_no_drain(cpuc, mask); return; } @@ -2589,7 +2590,7 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d (hybrid(cpuc->pmu, fixed_cntr_mask64) << INTEL_PMC_IDX_FIXED); if (unlikely(base >= top)) { - intel_pmu_pebs_event_update_no_drain(cpuc, X86_PMC_IDX_MAX); + intel_pmu_pebs_event_update_no_drain(cpuc, mask); return; } -- Gitee From 80be0ab73994a8659650b5730e283abaefdce759 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 24 Apr 2025 06:47:18 -0700 Subject: [PATCH 008/231] perf/x86/intel/ds: Fix counter backwards of non-precise events counters-snapshotting commit 7da9960b59fb7e590eb8538c9428db55a4ea2d23 upstream. The counter backwards may be observed in the PMI handler when counters-snapshotting some non-precise events in the freq mode. For the non-precise events, it's possible the counters-snapshotting records a positive value for an overflowed PEBS event. Then the HW auto-reload mechanism reset the counter to 0 immediately. Because the pebs_event_reset is cleared in the freq mode, which doesn't set the PERF_X86_EVENT_AUTO_RELOAD. In the PMI handler, 0 will be read rather than the positive value recorded in the counters-snapshotting record. The counters-snapshotting case has to be specially handled. Since the event value has been updated when processing the counters-snapshotting record, only needs to set the new period for the counter via x86_pmu_set_period(). Intel-SIG: commit 7da9960b59fb perf/x86/intel/ds: Fix counter backwards of non-precise events counters-snapshotting Backport CWF PMU support and dependency Fixes: e02e9b0374c3 ("perf/x86/intel: Support PEBS counters snapshotting") Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20250424134718.311934-6-kan.liang@linux.intel.com Signed-off-by: Jason Zeng --- arch/x86/events/intel/ds.c | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 332b5fc9dd22..36b2a9dd8399 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -2359,8 +2359,25 @@ __intel_pmu_pebs_last_event(struct perf_event *event, */ intel_pmu_save_and_restart_reload(event, count); } - } else - intel_pmu_save_and_restart(event); + } else { + /* + * For a non-precise event, it's possible the + * counters-snapshotting records a positive value for the + * overflowed event. Then the HW auto-reload mechanism + * reset the counter to 0 immediately, because the + * pebs_event_reset is cleared if the PERF_X86_EVENT_AUTO_RELOAD + * is not set. The counter backwards may be observed in a + * PMI handler. + * + * Since the event value has been updated when processing the + * counters-snapshotting record, only needs to set the new + * period for the counter. + */ + if (is_pebs_counter_event_group(event)) + static_call(x86_pmu_set_period)(event); + else + intel_pmu_save_and_restart(event); + } } static __always_inline void -- Gitee From 42d34db84a7339b807578b29ae06893b061d2fbc Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Wed, 19 Feb 2025 06:10:05 -0800 Subject: [PATCH 009/231] perf/x86/intel: Fix event constraints for LNC commit 782cffeec9ad96daa64ffb2d527b2a052fb02552 upstream. According to the latest event list, update the event constraint tables for Lion Cove core. The general rule (the event codes < 0x90 are restricted to counters 0-3.) has been removed. There is no restriction for most of the performance monitoring events. Intel-SIG: commit 782cffeec9ad perf/x86/intel: Fix event constraints for LNC Backport CWF PMU support and dependency Fixes: a932aa0e868f ("perf/x86: Add Lunar Lake and Arrow Lake support") Reported-by: Amiri Khalil Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20250219141005.2446823-1-kan.liang@linux.intel.com Signed-off-by: Jason Zeng --- arch/x86/events/intel/core.c | 20 +++++++------------- arch/x86/events/intel/ds.c | 2 +- 2 files changed, 8 insertions(+), 14 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 6d99c2e7c867..8243a9dda8b8 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -387,34 +387,28 @@ static struct event_constraint intel_lnc_event_constraints[] = { METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6), METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7), + INTEL_EVENT_CONSTRAINT(0x20, 0xf), + + INTEL_UEVENT_CONSTRAINT(0x012a, 0xf), + INTEL_UEVENT_CONSTRAINT(0x012b, 0xf), INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), INTEL_UEVENT_CONSTRAINT(0x0175, 0x4), INTEL_EVENT_CONSTRAINT(0x2e, 0x3ff), INTEL_EVENT_CONSTRAINT(0x3c, 0x3ff), - /* - * Generally event codes < 0x90 are restricted to counters 0-3. - * The 0x2E and 0x3C are exception, which has no restriction. - */ - INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf), - INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf), - INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1), INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1), INTEL_UEVENT_CONSTRAINT(0x10a4, 0x1), INTEL_UEVENT_CONSTRAINT(0x01b1, 0x8), + INTEL_UEVENT_CONSTRAINT(0x01cd, 0x3fc), INTEL_UEVENT_CONSTRAINT(0x02cd, 0x3), - INTEL_EVENT_CONSTRAINT(0xce, 0x1), INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf), - /* - * Generally event codes >= 0x90 are likely to have no restrictions. - * The exception are defined as above. - */ - INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0x3ff), + + INTEL_UEVENT_CONSTRAINT(0x00e0, 0xf), EVENT_CONSTRAINT_END }; diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 36b2a9dd8399..3010a90a339a 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1177,7 +1177,7 @@ struct event_constraint intel_lnc_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x100, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */ INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), - INTEL_HYBRID_LDLAT_CONSTRAINT(0x1cd, 0x3ff), + INTEL_HYBRID_LDLAT_CONSTRAINT(0x1cd, 0x3fc), INTEL_HYBRID_STLAT_CONSTRAINT(0x2cd, 0x3), INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */ -- Gitee From acd8e058990e4abcd75c2de2c55f11ec97dab22c Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Mon, 16 Dec 2024 08:02:52 -0800 Subject: [PATCH 010/231] perf/x86/intel: Fix bitmask of OCR and FRONTEND events for LNC commit aa5d2ca7c179c40669edb5e96d931bf9828dea3d upstream. The released OCR and FRONTEND events utilized more bits on Lunar Lake p-core. The corresponding mask in the extra_regs has to be extended to unblock the extra bits. Add a dedicated intel_lnc_extra_regs. Intel-SIG: commit aa5d2ca7c179 perf/x86/intel: Fix bitmask of OCR and FRONTEND events for LNC Backport CWF PMU support and dependency Fixes: a932aa0e868f ("perf/x86: Add Lunar Lake and Arrow Lake support") Reported-by: Andi Kleen Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20241216160252.430858-1-kan.liang@linux.intel.com Signed-off-by: Jason Zeng --- arch/x86/events/intel/core.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 8243a9dda8b8..d5504e969d57 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -413,6 +413,16 @@ static struct event_constraint intel_lnc_event_constraints[] = { EVENT_CONSTRAINT_END }; +static struct extra_reg intel_lnc_extra_regs[] __read_mostly = { + INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0xfffffffffffull, RSP_0), + INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0xfffffffffffull, RSP_1), + INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), + INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE), + INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE), + INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0xf, FE), + INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE), + EVENT_EXTRA_END +}; EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3"); EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3"); @@ -6748,7 +6758,7 @@ static __always_inline void intel_pmu_init_lnc(struct pmu *pmu) intel_pmu_init_glc(pmu); hybrid(pmu, event_constraints) = intel_lnc_event_constraints; hybrid(pmu, pebs_constraints) = intel_lnc_pebs_event_constraints; - hybrid(pmu, extra_regs) = intel_rwc_extra_regs; + hybrid(pmu, extra_regs) = intel_lnc_extra_regs; } static __always_inline void intel_pmu_init_skt(struct pmu *pmu) -- Gitee From eeca4cb246428fff442a847e776b01cd90a5ad75 Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Thu, 29 May 2025 08:02:36 +0000 Subject: [PATCH 011/231] perf/x86/intel: Fix incorrect MSR index calculations in intel_pmu_config_acr() commit 86aa94cd50b138be0dd872b0779fa3036e641881 upstream. The MSR offset calculations in intel_pmu_config_acr() are buggy. To calculate fixed counter MSR addresses in intel_pmu_config_acr(), the HW counter index "idx" is subtracted by INTEL_PMC_IDX_FIXED. This leads to the ACR mask value of fixed counters to be incorrectly saved to the positions of GP counters in acr_cfg_b[], e.g. For fixed counter 0, its ACR counter mask should be saved to acr_cfg_b[32], but it's saved to acr_cfg_b[0] incorrectly. Fix this issue. [ mingo: Clarified & improved the changelog. ] Intel-SIG: commit 86aa94cd50b1 perf/x86/intel: Fix incorrect MSR index calculations in intel_pmu_config_acr() Backport CWF PMU support and dependency Fixes: ec980e4facef ("perf/x86/intel: Support auto counter reload") Signed-off-by: Dapeng Mi Signed-off-by: Ingo Molnar Reviewed-by: Kan Liang Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20250529080236.2552247-2-dapeng1.mi@linux.intel.com Signed-off-by: Jason Zeng --- arch/x86/events/intel/core.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index d5504e969d57..95a8650b7518 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2889,6 +2889,7 @@ static void intel_pmu_config_acr(int idx, u64 mask, u32 reload) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int msr_b, msr_c; + int msr_offset; if (!mask && !cpuc->acr_cfg_b[idx]) return; @@ -2896,19 +2897,20 @@ static void intel_pmu_config_acr(int idx, u64 mask, u32 reload) if (idx < INTEL_PMC_IDX_FIXED) { msr_b = MSR_IA32_PMC_V6_GP0_CFG_B; msr_c = MSR_IA32_PMC_V6_GP0_CFG_C; + msr_offset = x86_pmu.addr_offset(idx, false); } else { msr_b = MSR_IA32_PMC_V6_FX0_CFG_B; msr_c = MSR_IA32_PMC_V6_FX0_CFG_C; - idx -= INTEL_PMC_IDX_FIXED; + msr_offset = x86_pmu.addr_offset(idx - INTEL_PMC_IDX_FIXED, false); } if (cpuc->acr_cfg_b[idx] != mask) { - wrmsrl(msr_b + x86_pmu.addr_offset(idx, false), mask); + wrmsrl(msr_b + msr_offset, mask); cpuc->acr_cfg_b[idx] = mask; } /* Only need to update the reload value when there is a valid config value. */ if (mask && cpuc->acr_cfg_c[idx] != reload) { - wrmsrl(msr_c + x86_pmu.addr_offset(idx, false), reload); + wrmsrl(msr_c + msr_offset, reload); cpuc->acr_cfg_c[idx] = reload; } } -- Gitee From a57fbfa95d6192935d52270091ebd1955690e9f0 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 9 Apr 2025 22:28:49 +0200 Subject: [PATCH 012/231] x86/msr: Standardize on u64 in commit f4138de5e41fae1a0b406f0d354a3028dc46bf1f upstream. Also fix some nearby whitespace damage while at it. Intel-SIG: commit f4138de5e41f x86/msr: Standardize on u64 in Backport CWF PMU support and dependency Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra (Intel) Cc: Juergen Gross Cc: H. Peter Anvin Cc: Dave Hansen Cc: Xin Li Cc: Linus Torvalds Signed-off-by: Jason Zeng --- arch/x86/include/asm/msr-index.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 334a284615ef..6cf19d001e58 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -513,7 +513,7 @@ #define MSR_HWP_CAPABILITIES 0x00000771 #define MSR_HWP_REQUEST_PKG 0x00000772 #define MSR_HWP_INTERRUPT 0x00000773 -#define MSR_HWP_REQUEST 0x00000774 +#define MSR_HWP_REQUEST 0x00000774 #define MSR_HWP_STATUS 0x00000777 /* CPUID.6.EAX */ @@ -530,16 +530,16 @@ #define HWP_LOWEST_PERF(x) (((x) >> 24) & 0xff) /* IA32_HWP_REQUEST */ -#define HWP_MIN_PERF(x) (x & 0xff) -#define HWP_MAX_PERF(x) ((x & 0xff) << 8) +#define HWP_MIN_PERF(x) (x & 0xff) +#define HWP_MAX_PERF(x) ((x & 0xff) << 8) #define HWP_DESIRED_PERF(x) ((x & 0xff) << 16) -#define HWP_ENERGY_PERF_PREFERENCE(x) (((unsigned long long) x & 0xff) << 24) +#define HWP_ENERGY_PERF_PREFERENCE(x) (((u64)x & 0xff) << 24) #define HWP_EPP_PERFORMANCE 0x00 #define HWP_EPP_BALANCE_PERFORMANCE 0x80 #define HWP_EPP_BALANCE_POWERSAVE 0xC0 #define HWP_EPP_POWERSAVE 0xFF -#define HWP_ACTIVITY_WINDOW(x) ((unsigned long long)(x & 0xff3) << 32) -#define HWP_PACKAGE_CONTROL(x) ((unsigned long long)(x & 0x1) << 42) +#define HWP_ACTIVITY_WINDOW(x) ((u64)(x & 0xff3) << 32) +#define HWP_PACKAGE_CONTROL(x) ((u64)(x & 0x1) << 42) /* IA32_HWP_STATUS */ #define HWP_GUARANTEED_CHANGE(x) (x & 0x1) -- Gitee From a7706182262e6ef045ca8d4720d4b1cf202d7c8c Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 12 Jun 2025 17:58:05 -0300 Subject: [PATCH 013/231] tools arch x86: Sync the msr-index.h copy with the kernel sources commit 6143374c6dc874d301dc16612aed144bf4544ba upstream. To pick up the changes from these csets: 159013a7ca18c271 ("x86/its: Enumerate Indirect Target Selection (ITS) bug") f4138de5e41fae1a ("x86/msr: Standardize on u64 in ") ec980e4facef8110 ("perf/x86/intel: Support auto counter reload") That cause no changes to tooling as it doesn't include a new MSR to be captured by the tools/perf/trace/beauty/tracepoints/x86_msr.sh script. Just silences this perf build warning: Warning: Kernel ABI header differences: diff -u tools/arch/x86/include/asm/msr-index.h arch/x86/include/asm/msr-index.h Intel-SIG: commit 6143374c6dc8 tools arch x86: Sync the msr-index.h copy with the kernel sources Backport CWF PMU support and dependency Cc: Adrian Hunter Cc: Dave Hansen Cc: Ian Rogers Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: Kan Liang Cc: Namhyung Kim Cc: Pawan Gupta Cc: Peter Zijlstra Link: https://lore.kernel.org/r/aEtAUg83OQGx8Kay@x1 Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Jason Zeng --- tools/arch/x86/include/asm/msr-index.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h index 38d71379a781..b9972c5c14ad 100644 --- a/tools/arch/x86/include/asm/msr-index.h +++ b/tools/arch/x86/include/asm/msr-index.h @@ -513,7 +513,7 @@ #define MSR_HWP_CAPABILITIES 0x00000771 #define MSR_HWP_REQUEST_PKG 0x00000772 #define MSR_HWP_INTERRUPT 0x00000773 -#define MSR_HWP_REQUEST 0x00000774 +#define MSR_HWP_REQUEST 0x00000774 #define MSR_HWP_STATUS 0x00000777 /* CPUID.6.EAX */ @@ -530,16 +530,16 @@ #define HWP_LOWEST_PERF(x) (((x) >> 24) & 0xff) /* IA32_HWP_REQUEST */ -#define HWP_MIN_PERF(x) (x & 0xff) -#define HWP_MAX_PERF(x) ((x & 0xff) << 8) +#define HWP_MIN_PERF(x) (x & 0xff) +#define HWP_MAX_PERF(x) ((x & 0xff) << 8) #define HWP_DESIRED_PERF(x) ((x & 0xff) << 16) -#define HWP_ENERGY_PERF_PREFERENCE(x) (((unsigned long long) x & 0xff) << 24) +#define HWP_ENERGY_PERF_PREFERENCE(x) (((u64)x & 0xff) << 24) #define HWP_EPP_PERFORMANCE 0x00 #define HWP_EPP_BALANCE_PERFORMANCE 0x80 #define HWP_EPP_BALANCE_POWERSAVE 0xC0 #define HWP_EPP_POWERSAVE 0xFF -#define HWP_ACTIVITY_WINDOW(x) ((unsigned long long)(x & 0xff3) << 32) -#define HWP_PACKAGE_CONTROL(x) ((unsigned long long)(x & 0x1) << 42) +#define HWP_ACTIVITY_WINDOW(x) ((u64)(x & 0xff3) << 32) +#define HWP_PACKAGE_CONTROL(x) ((u64)(x & 0x1) << 42) /* IA32_HWP_STATUS */ #define HWP_GUARANTEED_CHANGE(x) (x & 0x1) -- Gitee From c9f887a3d564f4eb953b88a40ffa235d06c416e7 Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Wed, 20 Aug 2025 10:30:27 +0800 Subject: [PATCH 014/231] perf/x86/intel: Fix IA32_PMC_x_CFG_B MSRs access error commit 43796f30507802d93ead2dc44fc9637f34671a89 upstream. When running perf_fuzzer on PTL, sometimes the below "unchecked MSR access error" is seen when accessing IA32_PMC_x_CFG_B MSRs. [ 55.611268] unchecked MSR access error: WRMSR to 0x1986 (tried to write 0x0000000200000001) at rIP: 0xffffffffac564b28 (native_write_msr+0x8/0x30) [ 55.611280] Call Trace: [ 55.611282] [ 55.611284] ? intel_pmu_config_acr+0x87/0x160 [ 55.611289] intel_pmu_enable_acr+0x6d/0x80 [ 55.611291] intel_pmu_enable_event+0xce/0x460 [ 55.611293] x86_pmu_start+0x78/0xb0 [ 55.611297] x86_pmu_enable+0x218/0x3a0 [ 55.611300] ? x86_pmu_enable+0x121/0x3a0 [ 55.611302] perf_pmu_enable+0x40/0x50 [ 55.611307] ctx_resched+0x19d/0x220 [ 55.611309] __perf_install_in_context+0x284/0x2f0 [ 55.611311] ? __pfx_remote_function+0x10/0x10 [ 55.611314] remote_function+0x52/0x70 [ 55.611317] ? __pfx_remote_function+0x10/0x10 [ 55.611319] generic_exec_single+0x84/0x150 [ 55.611323] smp_call_function_single+0xc5/0x1a0 [ 55.611326] ? __pfx_remote_function+0x10/0x10 [ 55.611329] perf_install_in_context+0xd1/0x1e0 [ 55.611331] ? __pfx___perf_install_in_context+0x10/0x10 [ 55.611333] __do_sys_perf_event_open+0xa76/0x1040 [ 55.611336] __x64_sys_perf_event_open+0x26/0x30 [ 55.611337] x64_sys_call+0x1d8e/0x20c0 [ 55.611339] do_syscall_64+0x4f/0x120 [ 55.611343] entry_SYSCALL_64_after_hwframe+0x76/0x7e On PTL, GP counter 0 and 1 doesn't support auto counter reload feature, thus it would trigger a #GP when trying to write 1 on bit 0 of CFG_B MSR which requires to enable auto counter reload on GP counter 0. The root cause of causing this issue is the check for auto counter reload (ACR) counter mask from user space is incorrect in intel_pmu_acr_late_setup() helper. It leads to an invalid ACR counter mask from user space could be set into hw.config1 and then written into CFG_B MSRs and trigger the MSR access warning. e.g., User may create a perf event with ACR counter mask (config2=0xcb), and there is only 1 event created, so "cpuc->n_events" is 1. The correct check condition should be "i + idx >= cpuc->n_events" instead of "i + idx > cpuc->n_events" (it looks a typo). Otherwise, the counter mask would traverse twice and an invalid "cpuc->assign[1]" bit (bit 0) is set into hw.config1 and cause MSR accessing error. Besides, also check if the ACR counter mask corresponding events are ACR events. If not, filter out these counter mask. If a event is not a ACR event, it could be scheduled to an HW counter which doesn't support ACR. It's invalid to add their counter index in ACR counter mask. Furthermore, remove the WARN_ON_ONCE() since it's easily triggered as user could set any invalid ACR counter mask and the warning message could mislead users. Intel-SIG: commit 43796f305078 perf/x86/intel: Fix IA32_PMC_x_CFG_B MSRs access error Backport CWF PMU support and dependency Fixes: ec980e4facef ("perf/x86/intel: Support auto counter reload") Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Kan Liang Link: https://lore.kernel.org/r/20250820023032.17128-3-dapeng1.mi@linux.intel.com Signed-off-by: Jason Zeng --- arch/x86/events/intel/core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 95a8650b7518..501e83fe08bc 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2986,7 +2986,8 @@ static void intel_pmu_acr_late_setup(struct cpu_hw_events *cpuc) if (event->group_leader != leader->group_leader) break; for_each_set_bit(idx, (unsigned long *)&event->attr.config2, X86_PMC_IDX_MAX) { - if (WARN_ON_ONCE(i + idx > cpuc->n_events)) + if (i + idx >= cpuc->n_events || + !is_acr_event_group(cpuc->event_list[i + idx])) return; __set_bit(cpuc->assign[i + idx], (unsigned long *)&event->hw.config1); } -- Gitee From fdd086c79440395e11cfef1cddd6063d9f3ab939 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 9 Nov 2023 18:28:49 -0800 Subject: [PATCH 015/231] KVM: x86/pmu: Add common define to capture fixed counters offset commit be6b067dae1573cf4d53c8b08175d8872d82f030 upstream. Add a common define to "officially" solidify KVM's split of counters, i.e. to commit to using bits 31:0 to track general purpose counters and bits 63:32 to track fixed counters (which only Intel supports). KVM already bleeds this behavior all over common PMU code, and adding a KVM- defined macro allows clarifying that the value is a _base_, as oppposed to the _flag_ that is used to access fixed PMCs via RDPMC (which perf confusingly calls INTEL_PMC_FIXED_RDPMC_BASE). No functional change intended. Intel-SIG: commit be6b067dae15 KVM: x86/pmu: Add common define to capture fixed counters offset Backport CWF PMU support and dependency Link: https://lore.kernel.org/r/20231110022857.1273836-3-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Jason Zeng --- arch/x86/kvm/pmu.c | 8 ++++---- arch/x86/kvm/pmu.h | 4 +++- arch/x86/kvm/vmx/pmu_intel.c | 12 ++++++------ 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index c424fb2bcbd8..a2b2b358db12 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -67,7 +67,7 @@ static const struct x86_cpu_id vmx_pebs_pdist_cpu[] = { * all perf counters (both gp and fixed). The mapping relationship * between pmc and perf counters is as the following: * * Intel: [0 .. KVM_INTEL_PMC_MAX_GENERIC-1] <=> gp counters - * [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed + * [KVM_FIXED_PMC_BASE_IDX .. KVM_FIXED_PMC_BASE_IDX + 2] <=> fixed * * AMD: [0 .. AMD64_NUM_COUNTERS-1] and, for families 15H * and later, [0 .. AMD64_NUM_COUNTERS_CORE-1] <=> gp counters */ @@ -411,7 +411,7 @@ static bool is_gp_event_allowed(struct kvm_x86_pmu_event_filter *f, static bool is_fixed_event_allowed(struct kvm_x86_pmu_event_filter *filter, int idx) { - int fixed_idx = idx - INTEL_PMC_IDX_FIXED; + int fixed_idx = idx - KVM_FIXED_PMC_BASE_IDX; if (filter->action == KVM_PMU_EVENT_DENY && test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap)) @@ -465,7 +465,7 @@ static void reprogram_counter(struct kvm_pmc *pmc) if (pmc_is_fixed(pmc)) { fixed_ctr_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, - pmc->idx - INTEL_PMC_IDX_FIXED); + pmc->idx - KVM_FIXED_PMC_BASE_IDX); if (fixed_ctr_ctrl & 0x1) eventsel |= ARCH_PERFMON_EVENTSEL_OS; if (fixed_ctr_ctrl & 0x2) @@ -857,7 +857,7 @@ static inline bool cpl_is_matched(struct kvm_pmc *pmc) select_user = config & ARCH_PERFMON_EVENTSEL_USR; } else { config = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl, - pmc->idx - INTEL_PMC_IDX_FIXED); + pmc->idx - KVM_FIXED_PMC_BASE_IDX); select_os = config & 0x1; select_user = config & 0x2; } diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index 51bbb01b21c8..e8c6a1f4b8e8 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -18,6 +18,8 @@ #define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001 #define VMWARE_BACKDOOR_PMC_APPARENT_TIME 0x10002 +#define KVM_FIXED_PMC_BASE_IDX INTEL_PMC_IDX_FIXED + struct kvm_pmu_ops { struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx); struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu, @@ -130,7 +132,7 @@ static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc) if (pmc_is_fixed(pmc)) return fixed_ctrl_field(pmu->fixed_ctr_ctrl, - pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3; + pmc->idx - KVM_FIXED_PMC_BASE_IDX) & 0x3; return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE; } diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 7307f0bbddc0..46056bdee7e6 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -50,18 +50,18 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i); - __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use); + __set_bit(KVM_FIXED_PMC_BASE_IDX + i, pmu->pmc_in_use); kvm_pmu_request_counter_reprogram(pmc); } } static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) { - if (pmc_idx < INTEL_PMC_IDX_FIXED) { + if (pmc_idx < KVM_FIXED_PMC_BASE_IDX) { return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx, MSR_P6_EVNTSEL0); } else { - u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED; + u32 idx = pmc_idx - KVM_FIXED_PMC_BASE_IDX; return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0); } @@ -516,7 +516,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) for (i = 0; i < pmu->nr_arch_fixed_counters; i++) pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4)); counter_mask = ~(((1ull << pmu->nr_arch_gp_counters) - 1) | - (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED)); + (((1ull << pmu->nr_arch_fixed_counters) - 1) << KVM_FIXED_PMC_BASE_IDX)); pmu->global_ctrl_mask = counter_mask; /* @@ -560,7 +560,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE; for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { pmu->fixed_ctr_ctrl_mask &= - ~(1ULL << (INTEL_PMC_IDX_FIXED + i * 4)); + ~(1ULL << (KVM_FIXED_PMC_BASE_IDX + i * 4)); } pmu->pebs_data_cfg_mask = ~0xff00000full; } else { @@ -586,7 +586,7 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu) for (i = 0; i < KVM_PMC_MAX_FIXED; i++) { pmu->fixed_counters[i].type = KVM_PMC_FIXED; pmu->fixed_counters[i].vcpu = vcpu; - pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED; + pmu->fixed_counters[i].idx = i + KVM_FIXED_PMC_BASE_IDX; pmu->fixed_counters[i].current_config = 0; pmu->fixed_counters[i].eventsel = intel_get_fixed_pmc_eventsel(i); } -- Gitee From be354aeda0c2385e7d3b144bf1b10d83ae4ea827 Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Tue, 30 Apr 2024 08:52:38 +0800 Subject: [PATCH 016/231] KVM: x86/pmu: Change ambiguous _mask suffix to _rsvd in kvm_pmu commit 0e102ce3d4133194a26060fe987315133736c37b upstream. Several '_mask' suffixed variables such as, global_ctrl_mask, are defined in kvm_pmu structure. However the _mask suffix is ambiguous and misleading since it's not a real mask with positive logic. On the contrary it represents the reserved bits of corresponding MSRs and these bits should not be accessed. Intel-SIG: commit 0e102ce3d413 KVM: x86/pmu: Change ambiguous _mask suffix to _rsvd in kvm_pmu Backport CWF PMU support and dependency Suggested-by: Sean Christopherson Signed-off-by: Dapeng Mi Link: https://lore.kernel.org/r/20240430005239.13527-2-dapeng1.mi@linux.intel.com Signed-off-by: Sean Christopherson [jz: an extra rename for commit aafae78e6d79 ("KVM: SVM: Emulate PERF_CNTR_GLOBAL_STATUS_SET for PerfMonV2")] Signed-off-by: Jason Zeng --- arch/x86/include/asm/kvm_host.h | 10 +++++----- arch/x86/kvm/pmu.c | 18 +++++++++--------- arch/x86/kvm/pmu.h | 2 +- arch/x86/kvm/svm/pmu.c | 4 ++-- arch/x86/kvm/vmx/pmu_intel.c | 26 +++++++++++++------------- 5 files changed, 30 insertions(+), 30 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index b5097170f25e..471d7b82ca15 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -540,12 +540,12 @@ struct kvm_pmu { unsigned nr_arch_fixed_counters; unsigned available_event_types; u64 fixed_ctr_ctrl; - u64 fixed_ctr_ctrl_mask; + u64 fixed_ctr_ctrl_rsvd; u64 global_ctrl; u64 global_status; u64 counter_bitmask[2]; - u64 global_ctrl_mask; - u64 global_status_mask; + u64 global_ctrl_rsvd; + u64 global_status_rsvd; u64 reserved_bits; u64 raw_event_mask; struct kvm_pmc gp_counters[KVM_INTEL_PMC_MAX_GENERIC]; @@ -565,9 +565,9 @@ struct kvm_pmu { u64 ds_area; u64 pebs_enable; - u64 pebs_enable_mask; + u64 pebs_enable_rsvd; u64 pebs_data_cfg; - u64 pebs_data_cfg_mask; + u64 pebs_data_cfg_rsvd; /* * If a guest counter is cross-mapped to host counter with different diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index a2b2b358db12..0c86aeddba86 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -677,13 +677,13 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (!msr_info->host_initiated) break; - if (data & pmu->global_status_mask) + if (data & pmu->global_status_rsvd) return 1; pmu->global_status = data; break; case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: - data &= ~pmu->global_ctrl_mask; + data &= ~pmu->global_ctrl_rsvd; fallthrough; case MSR_CORE_PERF_GLOBAL_CTRL: if (!kvm_valid_perf_global_ctrl(pmu, data)) @@ -700,7 +700,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) * GLOBAL_OVF_CTRL, a.k.a. GLOBAL STATUS_RESET, clears bits in * GLOBAL_STATUS, and so the set of reserved bits is the same. */ - if (data & pmu->global_status_mask) + if (data & pmu->global_status_rsvd) return 1; fallthrough; case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: @@ -709,7 +709,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET: if (!msr_info->host_initiated) - pmu->global_status |= data & ~pmu->global_status_mask; + pmu->global_status |= data & ~pmu->global_status_rsvd; break; default: kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index); @@ -772,11 +772,11 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu) pmu->counter_bitmask[KVM_PMC_FIXED] = 0; pmu->reserved_bits = 0xffffffff00200000ull; pmu->raw_event_mask = X86_RAW_EVENT_MASK; - pmu->global_ctrl_mask = ~0ull; - pmu->global_status_mask = ~0ull; - pmu->fixed_ctr_ctrl_mask = ~0ull; - pmu->pebs_enable_mask = ~0ull; - pmu->pebs_data_cfg_mask = ~0ull; + pmu->global_ctrl_rsvd = ~0ull; + pmu->global_status_rsvd = ~0ull; + pmu->fixed_ctr_ctrl_rsvd = ~0ull; + pmu->pebs_enable_rsvd = ~0ull; + pmu->pebs_data_cfg_rsvd = ~0ull; bitmap_zero(pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX); if (!vcpu->kvm->arch.enable_pmu) diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index e8c6a1f4b8e8..63464440cd2e 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -91,7 +91,7 @@ static inline bool pmc_is_fixed(struct kvm_pmc *pmc) static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu, u64 data) { - return !(pmu->global_ctrl_mask & data); + return !(pmu->global_ctrl_rsvd & data); } /* returns general purpose PMC with the specified MSR. Note that it can be diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index 41ac4cee11b8..819623b95fd9 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -200,8 +200,8 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu) kvm_pmu_cap.num_counters_gp); if (pmu->version > 1) { - pmu->global_ctrl_mask = ~((1ull << pmu->nr_arch_gp_counters) - 1); - pmu->global_status_mask = pmu->global_ctrl_mask; + pmu->global_ctrl_rsvd = ~((1ull << pmu->nr_arch_gp_counters) - 1); + pmu->global_status_rsvd = pmu->global_ctrl_rsvd; } pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1; diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 46056bdee7e6..4bbbf6ba67b1 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -360,14 +360,14 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) switch (msr) { case MSR_CORE_PERF_FIXED_CTR_CTRL: - if (data & pmu->fixed_ctr_ctrl_mask) + if (data & pmu->fixed_ctr_ctrl_rsvd) return 1; if (pmu->fixed_ctr_ctrl != data) reprogram_fixed_counters(pmu, data); break; case MSR_IA32_PEBS_ENABLE: - if (data & pmu->pebs_enable_mask) + if (data & pmu->pebs_enable_rsvd) return 1; if (pmu->pebs_enable != data) { @@ -383,7 +383,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) pmu->ds_area = data; break; case MSR_PEBS_DATA_CFG: - if (data & pmu->pebs_data_cfg_mask) + if (data & pmu->pebs_data_cfg_rsvd) return 1; pmu->pebs_data_cfg = data; @@ -468,7 +468,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) union cpuid10_eax eax; union cpuid10_edx edx; u64 perf_capabilities; - u64 counter_mask; + u64 counter_rsvd; int i; memset(&lbr_desc->records, 0, sizeof(lbr_desc->records)); @@ -514,21 +514,21 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) } for (i = 0; i < pmu->nr_arch_fixed_counters; i++) - pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4)); - counter_mask = ~(((1ull << pmu->nr_arch_gp_counters) - 1) | + pmu->fixed_ctr_ctrl_rsvd &= ~(0xbull << (i * 4)); + counter_rsvd = ~(((1ull << pmu->nr_arch_gp_counters) - 1) | (((1ull << pmu->nr_arch_fixed_counters) - 1) << KVM_FIXED_PMC_BASE_IDX)); - pmu->global_ctrl_mask = counter_mask; + pmu->global_ctrl_rsvd = counter_rsvd; /* * GLOBAL_STATUS and GLOBAL_OVF_CONTROL (a.k.a. GLOBAL_STATUS_RESET) * share reserved bit definitions. The kernel just happens to use * OVF_CTRL for the names. */ - pmu->global_status_mask = pmu->global_ctrl_mask + pmu->global_status_rsvd = pmu->global_ctrl_rsvd & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF | MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD); if (vmx_pt_mode_is_host_guest()) - pmu->global_status_mask &= + pmu->global_status_rsvd &= ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI; entry = kvm_find_cpuid_entry_index(vcpu, 7, 0); @@ -556,15 +556,15 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) if (perf_capabilities & PERF_CAP_PEBS_FORMAT) { if (perf_capabilities & PERF_CAP_PEBS_BASELINE) { - pmu->pebs_enable_mask = counter_mask; + pmu->pebs_enable_rsvd = counter_rsvd; pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE; for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { - pmu->fixed_ctr_ctrl_mask &= + pmu->fixed_ctr_ctrl_rsvd &= ~(1ULL << (KVM_FIXED_PMC_BASE_IDX + i * 4)); } - pmu->pebs_data_cfg_mask = ~0xff00000full; + pmu->pebs_data_cfg_rsvd = ~0xff00000full; } else { - pmu->pebs_enable_mask = + pmu->pebs_enable_rsvd = ~((1ull << pmu->nr_arch_gp_counters) - 1); } } -- Gitee From 0dc203af1f7cc0bfb7608b7ad244045bd4585fdc Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Tue, 30 Apr 2024 08:52:39 +0800 Subject: [PATCH 017/231] KVM: x86/pmu: Manipulate FIXED_CTR_CTRL MSR with macros commit 75430c412a3139c29404459ab1216a07d1280428 upstream. Magic numbers are used to manipulate the bit fields of FIXED_CTR_CTRL MSR. This makes reading code become difficult, so use pre-defined macros to replace these magic numbers. Intel-SIG: commit 75430c412a31 KVM: x86/pmu: Manipulate FIXED_CTR_CTRL MSR with macros Backport CWF PMU support and dependency Signed-off-by: Dapeng Mi Link: https://lore.kernel.org/r/20240430005239.13527-3-dapeng1.mi@linux.intel.com [sean: drop unnecessary curly braces] Signed-off-by: Sean Christopherson Signed-off-by: Jason Zeng --- arch/x86/kvm/pmu.c | 10 +++++----- arch/x86/kvm/pmu.h | 6 ++++-- arch/x86/kvm/vmx/pmu_intel.c | 12 ++++++++---- 3 files changed, 17 insertions(+), 11 deletions(-) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 0c86aeddba86..6984463a20ba 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -466,11 +466,11 @@ static void reprogram_counter(struct kvm_pmc *pmc) if (pmc_is_fixed(pmc)) { fixed_ctr_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, pmc->idx - KVM_FIXED_PMC_BASE_IDX); - if (fixed_ctr_ctrl & 0x1) + if (fixed_ctr_ctrl & INTEL_FIXED_0_KERNEL) eventsel |= ARCH_PERFMON_EVENTSEL_OS; - if (fixed_ctr_ctrl & 0x2) + if (fixed_ctr_ctrl & INTEL_FIXED_0_USER) eventsel |= ARCH_PERFMON_EVENTSEL_USR; - if (fixed_ctr_ctrl & 0x8) + if (fixed_ctr_ctrl & INTEL_FIXED_0_ENABLE_PMI) eventsel |= ARCH_PERFMON_EVENTSEL_INT; new_config = (u64)fixed_ctr_ctrl; } @@ -858,8 +858,8 @@ static inline bool cpl_is_matched(struct kvm_pmc *pmc) } else { config = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl, pmc->idx - KVM_FIXED_PMC_BASE_IDX); - select_os = config & 0x1; - select_user = config & 0x2; + select_os = config & INTEL_FIXED_0_KERNEL; + select_user = config & INTEL_FIXED_0_USER; } return (static_call(kvm_x86_get_cpl)(pmc->vcpu) == 0) ? select_os : select_user; diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index 63464440cd2e..40c3e4c00eea 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -12,7 +12,8 @@ MSR_IA32_MISC_ENABLE_BTS_UNAVAIL) /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */ -#define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf) +#define fixed_ctrl_field(ctrl_reg, idx) \ + (((ctrl_reg) >> ((idx) * INTEL_FIXED_BITS_STRIDE)) & INTEL_FIXED_BITS_MASK) #define VMWARE_BACKDOOR_PMC_HOST_TSC 0x10000 #define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001 @@ -132,7 +133,8 @@ static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc) if (pmc_is_fixed(pmc)) return fixed_ctrl_field(pmu->fixed_ctr_ctrl, - pmc->idx - KVM_FIXED_PMC_BASE_IDX) & 0x3; + pmc->idx - KVM_FIXED_PMC_BASE_IDX) & + (INTEL_FIXED_0_KERNEL | INTEL_FIXED_0_USER); return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE; } diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 4bbbf6ba67b1..9edb2ade3a9d 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -514,7 +514,12 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) } for (i = 0; i < pmu->nr_arch_fixed_counters; i++) - pmu->fixed_ctr_ctrl_rsvd &= ~(0xbull << (i * 4)); + pmu->fixed_ctr_ctrl_rsvd &= + ~intel_fixed_bits_by_idx(i, + INTEL_FIXED_0_KERNEL | + INTEL_FIXED_0_USER | + INTEL_FIXED_0_ENABLE_PMI); + counter_rsvd = ~(((1ull << pmu->nr_arch_gp_counters) - 1) | (((1ull << pmu->nr_arch_fixed_counters) - 1) << KVM_FIXED_PMC_BASE_IDX)); pmu->global_ctrl_rsvd = counter_rsvd; @@ -558,10 +563,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) if (perf_capabilities & PERF_CAP_PEBS_BASELINE) { pmu->pebs_enable_rsvd = counter_rsvd; pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE; - for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { + for (i = 0; i < pmu->nr_arch_fixed_counters; i++) pmu->fixed_ctr_ctrl_rsvd &= - ~(1ULL << (KVM_FIXED_PMC_BASE_IDX + i * 4)); - } + ~intel_fixed_bits_by_idx(i, ICL_FIXED_0_ADAPTIVE); pmu->pebs_data_cfg_rsvd = ~0xff00000full; } else { pmu->pebs_enable_rsvd = -- Gitee From e570a3da402ccd29b58418082c3576fdba6cd643 Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Wed, 20 Aug 2025 10:30:31 +0800 Subject: [PATCH 018/231] perf/x86/intel: Add ICL_FIXED_0_ADAPTIVE bit into INTEL_FIXED_BITS_MASK commit 2676dbf9f4fb7f6739d1207c0f1deaf63124642a ICL_FIXED_0_ADAPTIVE is missed to be added into INTEL_FIXED_BITS_MASK, add it. With help of this new INTEL_FIXED_BITS_MASK, intel_pmu_enable_fixed() can be optimized. The old fixed counter control bits can be unconditionally cleared with INTEL_FIXED_BITS_MASK and then set new control bits base on new configuration. Intel-SIG: commit 2676dbf9f4fb perf/x86/intel: Add ICL_FIXED_0_ADAPTIVE bit into INTEL_FIXED_BITS_MASK Backport CWF PMU support and dependency Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Kan Liang Tested-by: Yi Lai Link: https://lore.kernel.org/r/20250820023032.17128-7-dapeng1.mi@linux.intel.com Signed-off-by: Jason Zeng --- arch/x86/events/intel/core.c | 10 +++------- arch/x86/include/asm/perf_event.h | 6 +++++- arch/x86/kvm/pmu.h | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 501e83fe08bc..3c4df7533947 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2834,8 +2834,8 @@ static void intel_pmu_enable_fixed(struct perf_event *event) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; - u64 mask, bits = 0; int idx = hwc->idx; + u64 bits = 0; if (is_topdown_idx(idx)) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); @@ -2874,14 +2874,10 @@ static void intel_pmu_enable_fixed(struct perf_event *event) idx -= INTEL_PMC_IDX_FIXED; bits = intel_fixed_bits_by_idx(idx, bits); - mask = intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK); - - if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) { + if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) bits |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE); - mask |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE); - } - cpuc->fixed_ctrl_val &= ~mask; + cpuc->fixed_ctrl_val &= ~intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK); cpuc->fixed_ctrl_val |= bits; } diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index a79155360549..35459437dad5 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -35,7 +35,6 @@ #define ARCH_PERFMON_EVENTSEL_EQ (1ULL << 36) #define ARCH_PERFMON_EVENTSEL_UMASK2 (0xFFULL << 40) -#define INTEL_FIXED_BITS_MASK 0xFULL #define INTEL_FIXED_BITS_STRIDE 4 #define INTEL_FIXED_0_KERNEL (1ULL << 0) #define INTEL_FIXED_0_USER (1ULL << 1) @@ -48,6 +47,11 @@ #define ICL_EVENTSEL_ADAPTIVE (1ULL << 34) #define ICL_FIXED_0_ADAPTIVE (1ULL << 32) +#define INTEL_FIXED_BITS_MASK \ + (INTEL_FIXED_0_KERNEL | INTEL_FIXED_0_USER | \ + INTEL_FIXED_0_ANYTHREAD | INTEL_FIXED_0_ENABLE_PMI | \ + ICL_FIXED_0_ADAPTIVE) + #define intel_fixed_bits_by_idx(_idx, _bits) \ ((_bits) << ((_idx) * INTEL_FIXED_BITS_STRIDE)) diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index 40c3e4c00eea..2b9b4dd70e45 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -11,7 +11,7 @@ #define MSR_IA32_MISC_ENABLE_PMU_RO_MASK (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | \ MSR_IA32_MISC_ENABLE_BTS_UNAVAIL) -/* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */ +/* retrieve a fixed counter bits out of IA32_FIXED_CTR_CTRL */ #define fixed_ctrl_field(ctrl_reg, idx) \ (((ctrl_reg) >> ((idx) * INTEL_FIXED_BITS_STRIDE)) & INTEL_FIXED_BITS_MASK) -- Gitee From b9ca4816daca39cc468c8642fca2b144f5e2cd81 Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Wed, 20 Aug 2025 10:30:32 +0800 Subject: [PATCH 019/231] perf/x86: Print PMU counters bitmap in x86_pmu_show_pmu_cap() commit f49e1be19542487921e82b29004908966cb99d7c upstream. Along with the introduction Perfmon v6, pmu counters could be incontinuous, like fixed counters on CWF, only fixed counters 0-3 and 5-7 are supported, there is no fixed counter 4 on CWF. To accommodate this change, archPerfmonExt CPUID (0x23) leaves are introduced to enumerate the true-view of counters bitmap. Current perf code already supports archPerfmonExt CPUID and uses counters-bitmap to enumerate HW really supported counters, but x86_pmu_show_pmu_cap() still only dumps the absolute counter number instead of true-view bitmap, it's out-dated and may mislead readers. So dump counters true-view bitmap in x86_pmu_show_pmu_cap() and opportunistically change the dump sequence and words. Intel-SIG: commit f49e1be19542 perf/x86: Print PMU counters bitmap in x86_pmu_show_pmu_cap() Backport CWF PMU support and dependency Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Kan Liang Link: https://lore.kernel.org/r/20250820023032.17128-8-dapeng1.mi@linux.intel.com Signed-off-by: Jason Zeng --- arch/x86/events/core.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index d5b3b19b8719..0d291865b4bc 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2058,13 +2058,15 @@ static void _x86_pmu_read(struct perf_event *event) void x86_pmu_show_pmu_cap(struct pmu *pmu) { - pr_info("... version: %d\n", x86_pmu.version); - pr_info("... bit width: %d\n", x86_pmu.cntval_bits); - pr_info("... generic registers: %d\n", x86_pmu_num_counters(pmu)); - pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask); - pr_info("... max period: %016Lx\n", x86_pmu.max_period); - pr_info("... fixed-purpose events: %d\n", x86_pmu_num_counters_fixed(pmu)); - pr_info("... event mask: %016Lx\n", hybrid(pmu, intel_ctrl)); + pr_info("... version: %d\n", x86_pmu.version); + pr_info("... bit width: %d\n", x86_pmu.cntval_bits); + pr_info("... generic counters: %d\n", x86_pmu_num_counters(pmu)); + pr_info("... generic bitmap: %016llx\n", hybrid(pmu, cntr_mask64)); + pr_info("... fixed-purpose counters: %d\n", x86_pmu_num_counters_fixed(pmu)); + pr_info("... fixed-purpose bitmap: %016llx\n", hybrid(pmu, fixed_cntr_mask64)); + pr_info("... value mask: %016llx\n", x86_pmu.cntval_mask); + pr_info("... max period: %016llx\n", x86_pmu.max_period); + pr_info("... global_ctrl mask: %016llx\n", hybrid(pmu, intel_ctrl)); } static int __init init_hw_perf_events(void) -- Gitee From 8cc0c155a4612c30b67eab3708ea6648bd978db8 Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Thu, 26 Sep 2024 09:40:40 -0500 Subject: [PATCH 020/231] perf mem: Fix printing PERF_MEM_LVLNUM_{L2_MHB|MSC} commit 4f23fc34cc68812c68c3a3dec15e26e87565f430 upstream. With commit 8ec9497d3ef34 ("tools/include: Sync uapi/linux/perf.h with the kernel sources"), 'perf mem report' gives an incorrect memory access string. ... 0.02% 1 3644 L5 hit [.] 0x0000000000009b0e mlc [.] 0x00007fce43f59480 ... This occurs because, if no entry exists in mem_lvlnum, perf_mem__lvl_scnprintf will default to 'L%d, lvl', which in this case for PERF_MEM_LVLNUM_L2_MHB is 0x05. Add entries for PERF_MEM_LVLNUM_L2_MHB and PERF_MEM_LVLNUM_MSC to mem_lvlnum, so that the correct strings are printed. ... 0.02% 1 3644 L2 MHB hit [.] 0x0000000000009b0e mlc [.] 0x00007fce43f59480 ... Intel-SIG: commit 4f23fc34cc68 perf mem: Fix printing PERF_MEM_LVLNUM_{L2_MHB|MSC} Backport CWF PMU support and dependency Fixes: 8ec9497d3ef34 ("tools/include: Sync uapi/linux/perf.h with the kernel sources") Suggested-by: Kan Liang Signed-off-by: Thomas Falcon Reviewed-by: Leo Yan Link: https://lore.kernel.org/r/20240926144040.77897-1-thomas.falcon@intel.com Signed-off-by: Namhyung Kim Signed-off-by: Jason Zeng --- tools/perf/util/mem-events.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c index a6996d3e32c0..16e276402df8 100644 --- a/tools/perf/util/mem-events.c +++ b/tools/perf/util/mem-events.c @@ -297,6 +297,12 @@ static const char * const mem_lvl[] = { }; static const char * const mem_lvlnum[] = { + [PERF_MEM_LVLNUM_L1] = "L1", + [PERF_MEM_LVLNUM_L2] = "L2", + [PERF_MEM_LVLNUM_L3] = "L3", + [PERF_MEM_LVLNUM_L4] = "L4", + [PERF_MEM_LVLNUM_L2_MHB] = "L2 MHB", + [PERF_MEM_LVLNUM_MSC] = "Memory-side Cache", [PERF_MEM_LVLNUM_UNC] = "Uncached", [PERF_MEM_LVLNUM_CXL] = "CXL", [PERF_MEM_LVLNUM_IO] = "I/O", @@ -379,7 +385,7 @@ int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info) if (mem_lvlnum[lvl]) l += scnprintf(out + l, sz - l, mem_lvlnum[lvl]); else - l += scnprintf(out + l, sz - l, "L%d", lvl); + l += scnprintf(out + l, sz - l, "Unknown level %d", lvl); l += scnprintf(out + l, sz - l, " %s", hit_miss); return l; -- Gitee From 8cffaa044e9761f69f7d88d3bfa965e2e570e1d4 Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Tue, 15 Apr 2025 11:44:10 +0000 Subject: [PATCH 021/231] perf/x86/intel: Decouple BTS initialization from PEBS initialization commit d971342d38bf228ea4c137249501eb5be38ee958 upstream. Move x86_pmu.bts flag initialization into bts_init() from intel_ds_init() and rename intel_ds_init() to intel_pebs_init() since it fully initializes PEBS now after removing the x86_pmu.bts initialization. It's safe to move x86_pmu.bts into bts_init() since all x86_pmu.bts flag are called after bts_init() execution. Intel-SIG: commit d971342d38bf perf/x86/intel: Decouple BTS initialization from PEBS initialization Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lkml.kernel.org/r/20250415114428.341182-5-dapeng1.mi@linux.intel.com Signed-off-by: Jason Zeng --- arch/x86/events/intel/bts.c | 6 +++++- arch/x86/events/intel/core.c | 2 +- arch/x86/events/intel/ds.c | 5 ++--- arch/x86/events/perf_event.h | 2 +- 4 files changed, 9 insertions(+), 6 deletions(-) diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c index 974e917e65b2..d3a4b6630368 100644 --- a/arch/x86/events/intel/bts.c +++ b/arch/x86/events/intel/bts.c @@ -587,7 +587,11 @@ static void bts_event_read(struct perf_event *event) static __init int bts_init(void) { - if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts) + if (!boot_cpu_has(X86_FEATURE_DTES64)) + return -ENODEV; + + x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS); + if (!x86_pmu.bts) return -ENODEV; if (boot_cpu_has(X86_FEATURE_PTI)) { diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 3c4df7533947..d726f96d15ca 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -6849,7 +6849,7 @@ __init int intel_pmu_init(void) if (boot_cpu_has(X86_FEATURE_ARCH_LBR)) intel_pmu_arch_lbr_init(); - intel_ds_init(); + intel_pebs_init(); x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */ diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 3010a90a339a..b3c191fd98da 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -2650,10 +2650,10 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d } /* - * BTS, PEBS probe and setup + * PEBS probe and setup */ -void __init intel_ds_init(void) +void __init intel_pebs_init(void) { /* * No support for 32bit formats @@ -2661,7 +2661,6 @@ void __init intel_ds_init(void) if (!boot_cpu_has(X86_FEATURE_DTES64)) return; - x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS); x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS); x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE; if (x86_pmu.version <= 4) diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index e5237c126e4b..320595fe0c48 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -1672,7 +1672,7 @@ void intel_pmu_drain_pebs_buffer(void); void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr); -void intel_ds_init(void); +void intel_pebs_init(void); void intel_pmu_lbr_save_brstack(struct perf_sample_data *data, struct cpu_hw_events *cpuc, -- Gitee From cb3fcce5620588cfaeee65b97f131ce4630ddfe8 Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Tue, 15 Apr 2025 11:44:11 +0000 Subject: [PATCH 022/231] perf/x86/intel: Rename x86_pmu.pebs to x86_pmu.ds_pebs commit acb727e0956a2424f22e5ab8c1ff9a39d1acb150 upstream. Since architectural PEBS would be introduced in subsequent patches, rename x86_pmu.pebs to x86_pmu.ds_pebs for distinguishing with the upcoming architectural PEBS. Besides restrict reserve_ds_buffers() helper to work only for the legacy DS based PEBS and avoid it to corrupt the pebs_active flag and release PEBS buffer incorrectly for arch-PEBS since the later patch would reuse these flags and alloc/release_pebs_buffer() helpers for arch-PEBS. Intel-SIG: commit acb727e0956a perf/x86/intel: Rename x86_pmu.pebs to x86_pmu.ds_pebs Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lkml.kernel.org/r/20250415114428.341182-6-dapeng1.mi@linux.intel.com Signed-off-by: Jason Zeng --- arch/x86/events/intel/core.c | 6 +++--- arch/x86/events/intel/ds.c | 32 ++++++++++++++++++-------------- arch/x86/events/perf_event.h | 2 +- 3 files changed, 22 insertions(+), 18 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index d726f96d15ca..90fa14635cf7 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4566,7 +4566,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data) .guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask & ~pebs_mask, }; - if (!x86_pmu.pebs) + if (!x86_pmu.ds_pebs) return arr; /* @@ -5729,7 +5729,7 @@ static __init void intel_clovertown_quirk(void) * these chips. */ pr_warn("PEBS disabled due to CPU errata\n"); - x86_pmu.pebs = 0; + x86_pmu.ds_pebs = 0; x86_pmu.pebs_constraints = NULL; } @@ -6234,7 +6234,7 @@ tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i) static umode_t pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i) { - return x86_pmu.pebs ? attr->mode : 0; + return x86_pmu.ds_pebs ? attr->mode : 0; } static umode_t diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index b3c191fd98da..3891d1324000 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -602,7 +602,7 @@ static int alloc_pebs_buffer(int cpu) int max, node = cpu_to_node(cpu); void *buffer, *insn_buff, *cea; - if (!x86_pmu.pebs) + if (!x86_pmu.ds_pebs) return 0; buffer = dsalloc_pages(bsiz, GFP_KERNEL, cpu); @@ -637,7 +637,7 @@ static void release_pebs_buffer(int cpu) struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); void *cea; - if (!x86_pmu.pebs) + if (!x86_pmu.ds_pebs) return; kfree(per_cpu(insn_buffer, cpu)); @@ -712,7 +712,7 @@ void release_ds_buffers(void) { int cpu; - if (!x86_pmu.bts && !x86_pmu.pebs) + if (!x86_pmu.bts && !x86_pmu.ds_pebs) return; for_each_possible_cpu(cpu) @@ -728,7 +728,8 @@ void release_ds_buffers(void) } for_each_possible_cpu(cpu) { - release_pebs_buffer(cpu); + if (x86_pmu.ds_pebs) + release_pebs_buffer(cpu); release_bts_buffer(cpu); } } @@ -739,15 +740,17 @@ void reserve_ds_buffers(void) int cpu; x86_pmu.bts_active = 0; - x86_pmu.pebs_active = 0; - if (!x86_pmu.bts && !x86_pmu.pebs) + if (x86_pmu.ds_pebs) + x86_pmu.pebs_active = 0; + + if (!x86_pmu.bts && !x86_pmu.ds_pebs) return; if (!x86_pmu.bts) bts_err = 1; - if (!x86_pmu.pebs) + if (!x86_pmu.ds_pebs) pebs_err = 1; for_each_possible_cpu(cpu) { @@ -759,7 +762,8 @@ void reserve_ds_buffers(void) if (!bts_err && alloc_bts_buffer(cpu)) bts_err = 1; - if (!pebs_err && alloc_pebs_buffer(cpu)) + if (x86_pmu.ds_pebs && !pebs_err && + alloc_pebs_buffer(cpu)) pebs_err = 1; if (bts_err && pebs_err) @@ -771,7 +775,7 @@ void reserve_ds_buffers(void) release_bts_buffer(cpu); } - if (pebs_err) { + if (x86_pmu.ds_pebs && pebs_err) { for_each_possible_cpu(cpu) release_pebs_buffer(cpu); } @@ -783,7 +787,7 @@ void reserve_ds_buffers(void) if (x86_pmu.bts && !bts_err) x86_pmu.bts_active = 1; - if (x86_pmu.pebs && !pebs_err) + if (x86_pmu.ds_pebs && !pebs_err) x86_pmu.pebs_active = 1; for_each_possible_cpu(cpu) { @@ -2661,12 +2665,12 @@ void __init intel_pebs_init(void) if (!boot_cpu_has(X86_FEATURE_DTES64)) return; - x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS); + x86_pmu.ds_pebs = boot_cpu_has(X86_FEATURE_PEBS); x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE; if (x86_pmu.version <= 4) x86_pmu.pebs_no_isolation = 1; - if (x86_pmu.pebs) { + if (x86_pmu.ds_pebs) { char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-'; char *pebs_qual = ""; int format = x86_pmu.intel_cap.pebs_format; @@ -2750,7 +2754,7 @@ void __init intel_pebs_init(void) default: pr_cont("no PEBS fmt%d%c, ", format, pebs_type); - x86_pmu.pebs = 0; + x86_pmu.ds_pebs = 0; } } } @@ -2759,7 +2763,7 @@ void perf_restore_debug_store(void) { struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); - if (!x86_pmu.bts && !x86_pmu.pebs) + if (!x86_pmu.bts && !x86_pmu.ds_pebs) return; wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds); diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 320595fe0c48..00c7c34273b9 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -908,7 +908,7 @@ struct x86_pmu { */ unsigned int bts :1, bts_active :1, - pebs :1, + ds_pebs :1, pebs_active :1, pebs_broken :1, pebs_prec_dist :1, -- Gitee From ed80d4a0c1594861793536dd2699ac56299d7afc Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Tue, 15 Apr 2025 11:44:12 +0000 Subject: [PATCH 023/231] perf/x86/intel: Introduce pairs of PEBS static calls commit 4a3fd13054a98c43dfcfcbdb93deb43c7b1b9c34 upstream. Arch-PEBS retires IA32_PEBS_ENABLE and MSR_PEBS_DATA_CFG MSRs, so intel_pmu_pebs_enable/disable() and intel_pmu_pebs_enable/disable_all() are not needed to call for ach-PEBS. To make the code cleaner, introduce static calls x86_pmu_pebs_enable/disable() and x86_pmu_pebs_enable/disable_all() instead of adding "x86_pmu.arch_pebs" check directly in these helpers. Intel-SIG: commit 4a3fd13054a9 perf/x86/intel: Introduce pairs of PEBS static calls Suggested-by: Peter Zijlstra Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://lkml.kernel.org/r/20250415114428.341182-7-dapeng1.mi@linux.intel.com Signed-off-by: Jason Zeng --- arch/x86/events/core.c | 10 ++++++++++ arch/x86/events/intel/core.c | 8 ++++---- arch/x86/events/intel/ds.c | 5 +++++ arch/x86/events/perf_event.h | 8 ++++++++ 4 files changed, 27 insertions(+), 4 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 0d291865b4bc..e163fc7c5428 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -96,6 +96,11 @@ DEFINE_STATIC_CALL_NULL(x86_pmu_filter, *x86_pmu.filter); DEFINE_STATIC_CALL_NULL(x86_pmu_late_setup, *x86_pmu.late_setup); +DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_enable, *x86_pmu.pebs_enable); +DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_disable, *x86_pmu.pebs_disable); +DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_enable_all, *x86_pmu.pebs_enable_all); +DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_disable_all, *x86_pmu.pebs_disable_all); + /* * This one is magic, it will get called even when PMU init fails (because * there is no PMU), in which case it should simply return NULL. @@ -2049,6 +2054,11 @@ static void x86_pmu_static_call_update(void) static_call_update(x86_pmu_filter, x86_pmu.filter); static_call_update(x86_pmu_late_setup, x86_pmu.late_setup); + + static_call_update(x86_pmu_pebs_enable, x86_pmu.pebs_enable); + static_call_update(x86_pmu_pebs_disable, x86_pmu.pebs_disable); + static_call_update(x86_pmu_pebs_enable_all, x86_pmu.pebs_enable_all); + static_call_update(x86_pmu_pebs_disable_all, x86_pmu.pebs_disable_all); } static void _x86_pmu_read(struct perf_event *event) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 90fa14635cf7..ffc5b1ef9a60 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2296,7 +2296,7 @@ static __always_inline void __intel_pmu_disable_all(bool bts) static __always_inline void intel_pmu_disable_all(void) { __intel_pmu_disable_all(true); - intel_pmu_pebs_disable_all(); + static_call_cond(x86_pmu_pebs_disable_all)(); intel_pmu_lbr_disable_all(); } @@ -2328,7 +2328,7 @@ static void __intel_pmu_enable_all(int added, bool pmi) static void intel_pmu_enable_all(int added) { - intel_pmu_pebs_enable_all(); + static_call_cond(x86_pmu_pebs_enable_all)(); __intel_pmu_enable_all(added, false); } @@ -2585,7 +2585,7 @@ static void intel_pmu_disable_event(struct perf_event *event) * so we don't trigger the event without PEBS bit set. */ if (unlikely(event->attr.precise_ip)) - intel_pmu_pebs_disable(event); + static_call(x86_pmu_pebs_disable)(event); } static void intel_pmu_assign_event(struct perf_event *event, int idx) @@ -2936,7 +2936,7 @@ static void intel_pmu_enable_event(struct perf_event *event) int idx = hwc->idx; if (unlikely(event->attr.precise_ip)) - intel_pmu_pebs_enable(event); + static_call(x86_pmu_pebs_enable)(event); switch (idx) { case 0 ... INTEL_PMC_IDX_FIXED - 1: diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 3891d1324000..237bdb01fbaa 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -2678,6 +2678,11 @@ void __init intel_pebs_init(void) if (format < 4) x86_pmu.intel_cap.pebs_baseline = 0; + x86_pmu.pebs_enable = intel_pmu_pebs_enable; + x86_pmu.pebs_disable = intel_pmu_pebs_disable; + x86_pmu.pebs_enable_all = intel_pmu_pebs_enable_all; + x86_pmu.pebs_disable_all = intel_pmu_pebs_disable_all; + switch (format) { case 0: pr_cont("PEBS fmt0%c, ", pebs_type); diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 00c7c34273b9..9c99c4ff4892 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -818,6 +818,10 @@ struct x86_pmu { int (*hw_config)(struct perf_event *event); int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); void (*late_setup)(void); + void (*pebs_enable)(struct perf_event *event); + void (*pebs_disable)(struct perf_event *event); + void (*pebs_enable_all)(void); + void (*pebs_disable_all)(void); unsigned eventsel; unsigned perfctr; unsigned fixedctr; @@ -1137,6 +1141,10 @@ DECLARE_STATIC_CALL(x86_pmu_set_period, *x86_pmu.set_period); DECLARE_STATIC_CALL(x86_pmu_update, *x86_pmu.update); DECLARE_STATIC_CALL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs); DECLARE_STATIC_CALL(x86_pmu_late_setup, *x86_pmu.late_setup); +DECLARE_STATIC_CALL(x86_pmu_pebs_enable, *x86_pmu.pebs_enable); +DECLARE_STATIC_CALL(x86_pmu_pebs_disable, *x86_pmu.pebs_disable); +DECLARE_STATIC_CALL(x86_pmu_pebs_enable_all, *x86_pmu.pebs_enable_all); +DECLARE_STATIC_CALL(x86_pmu_pebs_disable_all, *x86_pmu.pebs_disable_all); static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx) { -- Gitee From 0a3a5503847cdc598ea1e71a67b0b177b80a0108 Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Wed, 20 Aug 2025 10:30:26 +0800 Subject: [PATCH 024/231] perf/x86/intel: Use early_initcall() to hook bts_init() commit d9cf9c6884d21e01483c4e17479d27636ea4bb50 upstream. After the commit 'd971342d38bf ("perf/x86/intel: Decouple BTS initialization from PEBS initialization")' is introduced, x86_pmu.bts would initialized in bts_init() which is hooked by arch_initcall(). Whereas init_hw_perf_events() is hooked by early_initcall(). Once the core PMU is initialized, nmi watchdog initialization is called immediately before bts_init() is called. It leads to the BTS buffer is not really initialized since bts_init() is not called and x86_pmu.bts is still false at that time. Worse, BTS buffer would never be initialized then unless all core PMU events are freed and reserve_ds_buffers() is called again. Thus aligning with init_hw_perf_events(), use early_initcall() to hook bts_init() to ensure x86_pmu.bts is initialized before nmi watchdog initialization. Intel-SIG: commit d9cf9c6884d2 perf/x86/intel: Use early_initcall() to hook bts_init() Fixes: d971342d38bf ("perf/x86/intel: Decouple BTS initialization from PEBS initialization") Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Kan Liang Link: https://lore.kernel.org/r/20250820023032.17128-2-dapeng1.mi@linux.intel.com Signed-off-by: Jason Zeng --- arch/x86/events/intel/bts.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c index d3a4b6630368..94f81ead16b4 100644 --- a/arch/x86/events/intel/bts.c +++ b/arch/x86/events/intel/bts.c @@ -626,4 +626,4 @@ static __init int bts_init(void) return perf_pmu_register(&bts_pmu, "intel_bts", -1); } -arch_initcall(bts_init); +early_initcall(bts_init); -- Gitee From 1866025786c12e690f8c6ce8d3e798b2e76d083f Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Wed, 20 Aug 2025 10:30:29 +0800 Subject: [PATCH 025/231] perf/x86: Add PERF_CAP_PEBS_TIMING_INFO flag commit 0c5caea762de31a85cbcce65d978cec83449f699 upstream. IA32_PERF_CAPABILITIES.PEBS_TIMING_INFO[bit 17] is introduced to indicate whether timed PEBS is supported. Timed PEBS adds a new "retired latency" field in basic info group to show the timing info. Please find detailed information about timed PEBS in section 8.4.1 "Timed Processor Event Based Sampling" of "Intel Architecture Instruction Set Extensions and Future Features". This patch adds PERF_CAP_PEBS_TIMING_INFO flag and KVM module leverages this flag to expose timed PEBS feature to guest. Moreover, opportunistically refine the indents and make the macros share consistent indents. Intel-SIG: commit 0c5caea762de perf/x86: Add PERF_CAP_PEBS_TIMING_INFO flag Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Kan Liang Tested-by: Yi Lai Link: https://lore.kernel.org/r/20250820023032.17128-5-dapeng1.mi@linux.intel.com Signed-off-by: Jason Zeng --- arch/x86/include/asm/msr-index.h | 14 ++++++++------ tools/arch/x86/include/asm/msr-index.h | 14 ++++++++------ 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 6cf19d001e58..de19c1af434c 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -300,12 +300,14 @@ #define PERF_CAP_PT_IDX 16 #define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6 -#define PERF_CAP_PEBS_TRAP BIT_ULL(6) -#define PERF_CAP_ARCH_REG BIT_ULL(7) -#define PERF_CAP_PEBS_FORMAT 0xf00 -#define PERF_CAP_PEBS_BASELINE BIT_ULL(14) -#define PERF_CAP_PEBS_MASK (PERF_CAP_PEBS_TRAP | PERF_CAP_ARCH_REG | \ - PERF_CAP_PEBS_FORMAT | PERF_CAP_PEBS_BASELINE) +#define PERF_CAP_PEBS_TRAP BIT_ULL(6) +#define PERF_CAP_ARCH_REG BIT_ULL(7) +#define PERF_CAP_PEBS_FORMAT 0xf00 +#define PERF_CAP_PEBS_BASELINE BIT_ULL(14) +#define PERF_CAP_PEBS_TIMING_INFO BIT_ULL(17) +#define PERF_CAP_PEBS_MASK (PERF_CAP_PEBS_TRAP | PERF_CAP_ARCH_REG | \ + PERF_CAP_PEBS_FORMAT | PERF_CAP_PEBS_BASELINE | \ + PERF_CAP_PEBS_TIMING_INFO) #define MSR_IA32_RTIT_CTL 0x00000570 #define RTIT_CTL_TRACEEN BIT(0) diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h index b9972c5c14ad..ad5066f2e0b5 100644 --- a/tools/arch/x86/include/asm/msr-index.h +++ b/tools/arch/x86/include/asm/msr-index.h @@ -300,12 +300,14 @@ #define PERF_CAP_PT_IDX 16 #define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6 -#define PERF_CAP_PEBS_TRAP BIT_ULL(6) -#define PERF_CAP_ARCH_REG BIT_ULL(7) -#define PERF_CAP_PEBS_FORMAT 0xf00 -#define PERF_CAP_PEBS_BASELINE BIT_ULL(14) -#define PERF_CAP_PEBS_MASK (PERF_CAP_PEBS_TRAP | PERF_CAP_ARCH_REG | \ - PERF_CAP_PEBS_FORMAT | PERF_CAP_PEBS_BASELINE) +#define PERF_CAP_PEBS_TRAP BIT_ULL(6) +#define PERF_CAP_ARCH_REG BIT_ULL(7) +#define PERF_CAP_PEBS_FORMAT 0xf00 +#define PERF_CAP_PEBS_BASELINE BIT_ULL(14) +#define PERF_CAP_PEBS_TIMING_INFO BIT_ULL(17) +#define PERF_CAP_PEBS_MASK (PERF_CAP_PEBS_TRAP | PERF_CAP_ARCH_REG | \ + PERF_CAP_PEBS_FORMAT | PERF_CAP_PEBS_BASELINE | \ + PERF_CAP_PEBS_TIMING_INFO) #define MSR_IA32_RTIT_CTL 0x00000570 #define RTIT_CTL_TRACEEN BIT(0) -- Gitee From 989c988ea013ed556d87f27b71b5a9ba55c32f79 Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Tue, 6 May 2025 17:49:07 +0800 Subject: [PATCH 026/231] perf/x86/intel/ds: Remove redundant assignments to sample.period commit 75a9001bab36f0456f6aae1ab0aa487db456464a upstream. The perf_sample_data_init() has already set the period of sample, so no need to do it again. Intel-SIG: commit 75a9001bab36 perf/x86/intel/ds: Remove redundant assignments to sample.period Signed-off-by: Changbin Du Signed-off-by: Ingo Molnar Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20250506094907.2724-1-changbin.du@huawei.com Signed-off-by: Jason Zeng --- arch/x86/events/intel/ds.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 237bdb01fbaa..6435543a7e6a 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1809,8 +1809,6 @@ static void setup_pebs_fixed_sample_data(struct perf_event *event, perf_sample_data_init(data, 0, event->hw.last_period); - data->period = event->hw.last_period; - /* * Use latency for weight (only avail with PEBS-LL) */ @@ -2065,7 +2063,6 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event, sample_type = event->attr.sample_type; format_group = basic->format_group; perf_sample_data_init(data, 0, event->hw.last_period); - data->period = event->hw.last_period; setup_pebs_time(event, data, basic->tsc); -- Gitee From 283cb35f773818273b2ec36885ced6c3dc04ae32 Mon Sep 17 00:00:00 2001 From: Yabin Cui Date: Wed, 15 May 2024 12:36:08 -0700 Subject: [PATCH 027/231] perf/core: Check sample_type in perf_sample_save_callchain commit f226805bc5f60adf03783d8e4cbfe303ccecd64e upstream. Check sample_type in perf_sample_save_callchain() to prevent saving callchain data when it isn't required. Intel-SIG: commit f226805bc5f6 perf/core: Check sample_type in perf_sample_save_callchain Suggested-by: Namhyung Kim Signed-off-by: Yabin Cui Signed-off-by: Ingo Molnar Reviewed-by: Ian Rogers Acked-by: Namhyung Kim Link: https://lore.kernel.org/r/20240515193610.2350456-3-yabinc@google.com Signed-off-by: Jason Zeng --- arch/x86/events/amd/ibs.c | 3 +-- arch/x86/events/intel/ds.c | 6 ++---- include/linux/perf_event.h | 5 +++++ 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index fac3d97111b0..ec1b8a193e61 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -1135,8 +1135,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs) * recorded as part of interrupt regs. Thus we need to use rip from * interrupt regs while unwinding call stack. */ - if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) - perf_sample_save_callchain(&data, event, iregs); + perf_sample_save_callchain(&data, event, iregs); throttle = perf_event_overflow(event, &data, ®s); out: diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 6435543a7e6a..24ed189e1f25 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1831,8 +1831,7 @@ static void setup_pebs_fixed_sample_data(struct perf_event *event, * previous PMI context or an (I)RET happened between the record and * PMI. */ - if (sample_type & PERF_SAMPLE_CALLCHAIN) - perf_sample_save_callchain(data, event, iregs); + perf_sample_save_callchain(data, event, iregs); /* * We use the interrupt regs as a base because the PEBS record does not @@ -2072,8 +2071,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event, * previous PMI context or an (I)RET happened between the record and * PMI. */ - if (sample_type & PERF_SAMPLE_CALLCHAIN) - perf_sample_save_callchain(data, event, iregs); + perf_sample_save_callchain(data, event, iregs); *regs = *iregs; /* The ip in basic is EventingIP */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 78c75e8e6e5d..d62b0cb2412f 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1284,6 +1284,11 @@ static inline void perf_sample_save_callchain(struct perf_sample_data *data, { int size = 1; + if (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)) + return; + if (WARN_ON_ONCE(data->sample_flags & PERF_SAMPLE_CALLCHAIN)) + return; + data->callchain = perf_callchain(event, regs); size += data->callchain->nr; -- Gitee From 07c4f03710f0c361a150a49192ff0a49e1e4ac70 Mon Sep 17 00:00:00 2001 From: Yabin Cui Date: Wed, 15 May 2024 12:36:09 -0700 Subject: [PATCH 028/231] perf/core: Check sample_type in perf_sample_save_brstack commit faac6f105ef169e2e5678c14e1ffebf2a7d780b6 upstream. Check sample_type in perf_sample_save_brstack() to prevent saving branch stack data when it isn't required. Intel-SIG: commit faac6f105ef1 perf/core: Check sample_type in perf_sample_save_brstack Suggested-by: Namhyung Kim Signed-off-by: Yabin Cui Signed-off-by: Ingo Molnar Reviewed-by: Ian Rogers Acked-by: Namhyung Kim Link: https://lore.kernel.org/r/20240515193610.2350456-4-yabinc@google.com Conflicts: include/linux/perf_event.h [jz: resolve simple context conflict] Signed-off-by: Jason Zeng --- arch/x86/events/amd/core.c | 3 +-- arch/x86/events/core.c | 3 +-- arch/x86/events/intel/ds.c | 3 +-- include/linux/perf_event.h | 15 ++++++++++----- 4 files changed, 13 insertions(+), 11 deletions(-) diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index 16283da28b00..5c93241b4262 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -976,8 +976,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs) if (!x86_perf_event_set_period(event)) continue; - if (has_branch_stack(event)) - perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL); + perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL); if (perf_event_overflow(event, &data, regs)) x86_pmu_stop(event, 0); diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index e163fc7c5428..7202eff9b4e6 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -1723,8 +1723,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs) perf_sample_data_init(&data, 0, event->hw.last_period); - if (has_branch_stack(event)) - perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL); + perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL); if (perf_event_overflow(event, &data, regs)) x86_pmu_stop(event, 0); diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 24ed189e1f25..765af942fb90 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1930,8 +1930,7 @@ static void setup_pebs_fixed_sample_data(struct perf_event *event, if (x86_pmu.intel_cap.pebs_format >= 3) setup_pebs_time(event, data, pebs->tsc); - if (has_branch_stack(event)) - perf_sample_save_brstack(data, event, &cpuc->lbr_stack, NULL); + perf_sample_save_brstack(data, event, &cpuc->lbr_stack, NULL); } static void adaptive_pebs_save_regs(struct pt_regs *regs, diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index d62b0cb2412f..7418e970dc2d 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1325,6 +1325,11 @@ static inline void perf_sample_save_raw_data(struct perf_sample_data *data, data->sample_flags |= PERF_SAMPLE_RAW; } +static inline bool has_branch_stack(struct perf_event *event) +{ + return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; +} + static inline void perf_sample_save_brstack(struct perf_sample_data *data, struct perf_event *event, struct perf_branch_stack *brs, @@ -1332,6 +1337,11 @@ static inline void perf_sample_save_brstack(struct perf_sample_data *data, { int size = sizeof(u64); /* nr */ + if (!has_branch_stack(event)) + return; + if (WARN_ON_ONCE(data->sample_flags & PERF_SAMPLE_BRANCH_STACK)) + return; + if (branch_sample_hw_index(event)) size += sizeof(u64); @@ -1718,11 +1728,6 @@ extern void perf_bp_event(struct perf_event *event, void *data); # define perf_arch_bpf_user_pt_regs(regs) regs #endif -static inline bool has_branch_stack(struct perf_event *event) -{ - return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; -} - static inline bool needs_branch_stack(struct perf_event *event) { return event->attr.branch_sample_type != 0; -- Gitee From 4d8c35e373416cf1dec752ba61eeb16abc6c911a Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Tue, 28 Oct 2025 14:42:14 +0800 Subject: [PATCH 029/231] perf/x86/intel: Fix KASAN global-out-of-bounds warning commit 0ba6502ce167fc3d598c08c2cc3b4ed7ca5aa251 upstream. When running "perf mem record" command on CWF, the below KASAN global-out-of-bounds warning is seen. ================================================================== BUG: KASAN: global-out-of-bounds in cmt_latency_data+0x176/0x1b0 Read of size 4 at addr ffffffffb721d000 by task dtlb/9850 Call Trace: kasan_report+0xb8/0xf0 cmt_latency_data+0x176/0x1b0 setup_arch_pebs_sample_data+0xf49/0x2560 intel_pmu_drain_arch_pebs+0x577/0xb00 handle_pmi_common+0x6c4/0xc80 The issue is caused by below code in __grt_latency_data(). The code tries to access x86_hybrid_pmu structure which doesn't exist on non-hybrid platform like CWF. WARN_ON_ONCE(hybrid_pmu(event->pmu)->pmu_type == hybrid_big) So add is_hybrid() check before calling this WARN_ON_ONCE to fix the global-out-of-bounds access issue. Intel-SIG: commit 0ba6502ce167 perf/x86/intel: Fix KASAN global-out-of-bounds warning Fixes: 090262439f66 ("perf/x86/intel: Rename model-specific pebs_latency_data functions") Reported-by: Xudong Hao Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Zide Chen Cc: stable@vger.kernel.org Link: https://patch.msgid.link/20251028064214.1451968-1-dapeng1.mi@linux.intel.com Signed-off-by: Jason Zeng --- arch/x86/events/intel/ds.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 765af942fb90..da55bf957b00 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -304,7 +304,8 @@ static u64 __grt_latency_data(struct perf_event *event, u64 status, { u64 val; - WARN_ON_ONCE(hybrid_pmu(event->pmu)->pmu_type == hybrid_big); + WARN_ON_ONCE(is_hybrid() && + hybrid_pmu(event->pmu)->pmu_type == hybrid_big); dse &= PERF_PEBS_DATA_SOURCE_GRT_MASK; val = hybrid_var(event->pmu, pebs_data_source)[dse]; -- Gitee From 673bd998f1a777246de9982e441db8ee97b728bb Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Wed, 29 Oct 2025 18:21:25 +0800 Subject: [PATCH 030/231] perf/x86: Remove redundant is_x86_event() prototype commit c7f69dc073e51f1c448713320ccd2e2be63fb1f6 upstream. 2 is_x86_event() prototypes are defined in perf_event.h. Remove the redundant one. Intel-SIG: commit c7f69dc073e5 perf/x86: Remove redundant is_x86_event() prototype Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Link: https://patch.msgid.link/20251029102136.61364-2-dapeng1.mi@linux.intel.com Signed-off-by: Jason Zeng --- arch/x86/events/perf_event.h | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 9c99c4ff4892..ae5fcd2ee684 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -1133,7 +1133,6 @@ static struct perf_pmu_format_hybrid_attr format_attr_hybrid_##_name = {\ .pmu_type = _pmu, \ } -int is_x86_event(struct perf_event *event); struct pmu *x86_get_pmu(unsigned int cpu); extern struct x86_pmu x86_pmu __read_mostly; -- Gitee From 17d7540da43a119203cc7556e986a152935faaf5 Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Wed, 29 Oct 2025 18:21:26 +0800 Subject: [PATCH 031/231] perf/x86: Fix NULL event access and potential PEBS record loss commit 7e772a93eb61cb6265bdd1c5bde17d0f2718b452 upstream. When intel_pmu_drain_pebs_icl() is called to drain PEBS records, the perf_event_overflow() could be called to process the last PEBS record. While perf_event_overflow() could trigger the interrupt throttle and stop all events of the group, like what the below call-chain shows. perf_event_overflow() -> __perf_event_overflow() ->__perf_event_account_interrupt() -> perf_event_throttle_group() -> perf_event_throttle() -> event->pmu->stop() -> x86_pmu_stop() The side effect of stopping the events is that all corresponding event pointers in cpuc->events[] array are cleared to NULL. Assume there are two PEBS events (event a and event b) in a group. When intel_pmu_drain_pebs_icl() calls perf_event_overflow() to process the last PEBS record of PEBS event a, interrupt throttle is triggered and all pointers of event a and event b are cleared to NULL. Then intel_pmu_drain_pebs_icl() tries to process the last PEBS record of event b and encounters NULL pointer access. To avoid this issue, move cpuc->events[] clearing from x86_pmu_stop() to x86_pmu_del(). It's safe since cpuc->active_mask or cpuc->pebs_enabled is always checked before access the event pointer from cpuc->events[]. Intel-SIG: commit 7e772a93eb61 perf/x86: Fix NULL event access and potential PEBS record loss Closes: https://lore.kernel.org/oe-lkp/202507042103.a15d2923-lkp@intel.com Fixes: 9734e25fbf5a ("perf: Fix the throttle logic for a group") Reported-by: kernel test robot Suggested-by: Peter Zijlstra Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Link: https://patch.msgid.link/20251029102136.61364-3-dapeng1.mi@linux.intel.com Signed-off-by: Jason Zeng --- arch/x86/events/core.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 7202eff9b4e6..d809f4c8378f 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -1344,6 +1344,7 @@ static void x86_pmu_enable(struct pmu *pmu) hwc->state |= PERF_HES_ARCH; x86_pmu_stop(event, PERF_EF_UPDATE); + cpuc->events[hwc->idx] = NULL; } /* @@ -1365,6 +1366,7 @@ static void x86_pmu_enable(struct pmu *pmu) * if cpuc->enabled = 0, then no wrmsr as * per x86_pmu_enable_event() */ + cpuc->events[hwc->idx] = event; x86_pmu_start(event, PERF_EF_RELOAD); } cpuc->n_added = 0; @@ -1531,7 +1533,6 @@ static void x86_pmu_start(struct perf_event *event, int flags) event->hw.state = 0; - cpuc->events[idx] = event; __set_bit(idx, cpuc->active_mask); static_call(x86_pmu_enable)(event); perf_event_update_userpage(event); @@ -1608,7 +1609,6 @@ void x86_pmu_stop(struct perf_event *event, int flags) if (test_bit(hwc->idx, cpuc->active_mask)) { static_call(x86_pmu_disable)(event); __clear_bit(hwc->idx, cpuc->active_mask); - cpuc->events[hwc->idx] = NULL; WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); hwc->state |= PERF_HES_STOPPED; } @@ -1646,6 +1646,7 @@ static void x86_pmu_del(struct perf_event *event, int flags) * Not a TXN, therefore cleanup properly. */ x86_pmu_stop(event, PERF_EF_UPDATE); + cpuc->events[event->hw.idx] = NULL; for (i = 0; i < cpuc->n_events; i++) { if (event == cpuc->event_list[i]) -- Gitee From 41a30621961d48d4bceecf0e309cd6903609cb9b Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Wed, 29 Oct 2025 18:21:27 +0800 Subject: [PATCH 032/231] perf/x86/intel: Replace x86_pmu.drain_pebs calling with static call commit ee98b8bfc7c4baca69a6852c4ecc399794f7e53b upstream. Use x86_pmu_drain_pebs static call to replace calling x86_pmu.drain_pebs function pointer. Intel-SIG: commit ee98b8bfc7c4 perf/x86/intel: Replace x86_pmu.drain_pebs calling with static call Suggested-by: Peter Zijlstra Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Link: https://patch.msgid.link/20251029102136.61364-4-dapeng1.mi@linux.intel.com Signed-off-by: Jason Zeng --- arch/x86/events/intel/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index ffc5b1ef9a60..d3cc0469e82b 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3258,7 +3258,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) * The PEBS buffer has to be drained before handling the A-PMI */ if (is_pebs_counter_event_group(event)) - x86_pmu.drain_pebs(regs, &data); + static_call(x86_pmu_drain_pebs)(regs, &data); if (!intel_pmu_save_and_restart(event)) continue; -- Gitee From 8243396fe8085e828c9d0bc7495e6ae4360b5cd0 Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Wed, 29 Oct 2025 18:21:28 +0800 Subject: [PATCH 033/231] perf/x86/intel: Correct large PEBS flag check commit 5e4e355ae7cdeb0fef5dbe908866e1f895abfacc upstream. current large PEBS flag check only checks if sample_regs_user contains unsupported GPRs but doesn't check if sample_regs_intr contains unsupported GPRs. Of course, currently PEBS HW supports to sample all perf supported GPRs, the missed check doesn't cause real issue. But it won't be true any more after the subsequent patches support to sample SSP register. SSP sampling is not supported by adaptive PEBS HW and it would be supported until arch-PEBS HW. So correct this issue. Intel-SIG: commit 5e4e355ae7cd perf/x86/intel: Correct large PEBS flag check Fixes: a47ba4d77e12 ("perf/x86: Enable free running PEBS for REGS_USER/INTR") Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Link: https://patch.msgid.link/20251029102136.61364-5-dapeng1.mi@linux.intel.com Signed-off-by: Jason Zeng --- arch/x86/events/intel/core.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index d3cc0469e82b..7290b03dbb42 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4017,7 +4017,9 @@ static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event) if (!event->attr.exclude_kernel) flags &= ~PERF_SAMPLE_REGS_USER; if (event->attr.sample_regs_user & ~PEBS_GP_REGS) - flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR); + flags &= ~PERF_SAMPLE_REGS_USER; + if (event->attr.sample_regs_intr & ~PEBS_GP_REGS) + flags &= ~PERF_SAMPLE_REGS_INTR; return flags; } -- Gitee From d8c3e9db45f62725ba505f8ef67f2795c7959113 Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Wed, 29 Oct 2025 18:21:29 +0800 Subject: [PATCH 034/231] perf/x86/intel: Initialize architectural PEBS commit d243d0bb64af1e90ec18ac2fa6e7cadfe8895913 upstream. arch-PEBS leverages CPUID.23H.4/5 sub-leaves enumerate arch-PEBS supported capabilities and counters bitmap. This patch parses these 2 sub-leaves and initializes arch-PEBS capabilities and corresponding structures. Since IA32_PEBS_ENABLE and MSR_PEBS_DATA_CFG MSRs are no longer existed for arch-PEBS, arch-PEBS doesn't need to manipulate these MSRs. Thus add a simple pair of __intel_pmu_pebs_enable/disable() callbacks for arch-PEBS. Intel-SIG: commit d243d0bb64af perf/x86/intel: Initialize architectural PEBS Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Link: https://patch.msgid.link/20251029102136.61364-6-dapeng1.mi@linux.intel.com Conflicts: arch/x86/events/intel/ds.c arch/x86/events/perf_event.h [jz: resolve context conflict, use wrmsrl(), instead of wrmsrq()] Signed-off-by: Jason Zeng --- arch/x86/events/core.c | 21 ++++++++--- arch/x86/events/intel/core.c | 60 ++++++++++++++++++++++--------- arch/x86/events/intel/ds.c | 52 ++++++++++++++++++++++----- arch/x86/events/perf_event.h | 25 +++++++++++-- arch/x86/include/asm/perf_event.h | 7 +++- 5 files changed, 132 insertions(+), 33 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index d809f4c8378f..5aa93e547036 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -554,14 +554,22 @@ static inline int precise_br_compat(struct perf_event *event) return m == b; } -int x86_pmu_max_precise(void) +int x86_pmu_max_precise(struct pmu *pmu) { int precise = 0; - /* Support for constant skid */ if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) { - precise++; + /* arch PEBS */ + if (x86_pmu.arch_pebs) { + precise = 2; + if (hybrid(pmu, arch_pebs_cap).pdists) + precise++; + + return precise; + } + /* legacy PEBS - support for constant skid */ + precise++; /* Support for IP fixup */ if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2) precise++; @@ -569,13 +577,14 @@ int x86_pmu_max_precise(void) if (x86_pmu.pebs_prec_dist) precise++; } + return precise; } int x86_pmu_hw_config(struct perf_event *event) { if (event->attr.precise_ip) { - int precise = x86_pmu_max_precise(); + int precise = x86_pmu_max_precise(event->pmu); if (event->attr.precise_ip > precise) return -EOPNOTSUPP; @@ -2628,7 +2637,9 @@ static ssize_t max_precise_show(struct device *cdev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu_max_precise()); + struct pmu *pmu = dev_get_drvdata(cdev); + + return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu_max_precise(pmu)); } static DEVICE_ATTR_RO(max_precise); diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 7290b03dbb42..7485a64d9d9e 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -5233,34 +5233,59 @@ static inline bool intel_pmu_broken_perf_cap(void) return false; } +#define counter_mask(_gp, _fixed) ((_gp) | ((u64)(_fixed) << INTEL_PMC_IDX_FIXED)) + static void update_pmu_cap(struct pmu *pmu) { - unsigned int cntr, fixed_cntr, ecx, edx; - union cpuid35_eax eax; - union cpuid35_ebx ebx; + unsigned int eax, ebx, ecx, edx; + union cpuid35_eax eax_0; + union cpuid35_ebx ebx_0; + u64 cntrs_mask = 0; + u64 pebs_mask = 0; + u64 pdists_mask = 0; - cpuid(ARCH_PERFMON_EXT_LEAF, &eax.full, &ebx.full, &ecx, &edx); + cpuid(ARCH_PERFMON_EXT_LEAF, &eax_0.full, &ebx_0.full, &ecx, &edx); - if (ebx.split.umask2) + if (ebx_0.split.umask2) hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_UMASK2; - if (ebx.split.eq) + if (ebx_0.split.eq) hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_EQ; - if (eax.split.cntr_subleaf) { + if (eax_0.split.cntr_subleaf) { cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF, - &cntr, &fixed_cntr, &ecx, &edx); - hybrid(pmu, cntr_mask64) = cntr; - hybrid(pmu, fixed_cntr_mask64) = fixed_cntr; + &eax, &ebx, &ecx, &edx); + hybrid(pmu, cntr_mask64) = eax; + hybrid(pmu, fixed_cntr_mask64) = ebx; + cntrs_mask = counter_mask(eax, ebx); } - if (eax.split.acr_subleaf) { + if (eax_0.split.acr_subleaf) { cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_ACR_LEAF, - &cntr, &fixed_cntr, &ecx, &edx); + &eax, &ebx, &ecx, &edx); /* The mask of the counters which can be reloaded */ - hybrid(pmu, acr_cntr_mask64) = cntr | ((u64)fixed_cntr << INTEL_PMC_IDX_FIXED); - + hybrid(pmu, acr_cntr_mask64) = counter_mask(eax, ebx); /* The mask of the counters which can cause a reload of reloadable counters */ - hybrid(pmu, acr_cause_mask64) = ecx | ((u64)edx << INTEL_PMC_IDX_FIXED); + hybrid(pmu, acr_cause_mask64) = counter_mask(ecx, edx); + } + + /* Bits[5:4] should be set simultaneously if arch-PEBS is supported */ + if (eax_0.split.pebs_caps_subleaf && eax_0.split.pebs_cnts_subleaf) { + cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_PEBS_CAP_LEAF, + &eax, &ebx, &ecx, &edx); + hybrid(pmu, arch_pebs_cap).caps = (u64)ebx << 32; + + cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_PEBS_COUNTER_LEAF, + &eax, &ebx, &ecx, &edx); + pebs_mask = counter_mask(eax, ecx); + pdists_mask = counter_mask(ebx, edx); + hybrid(pmu, arch_pebs_cap).counters = pebs_mask; + hybrid(pmu, arch_pebs_cap).pdists = pdists_mask; + + if (WARN_ON((pebs_mask | pdists_mask) & ~cntrs_mask)) + x86_pmu.arch_pebs = 0; + } else { + WARN_ON(x86_pmu.arch_pebs == 1); + x86_pmu.arch_pebs = 0; } if (!intel_pmu_broken_perf_cap()) { @@ -6236,7 +6261,7 @@ tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i) static umode_t pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i) { - return x86_pmu.ds_pebs ? attr->mode : 0; + return intel_pmu_has_pebs() ? attr->mode : 0; } static umode_t @@ -7612,6 +7637,9 @@ __init int intel_pmu_init(void) if (!is_hybrid() && boot_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT)) update_pmu_cap(NULL); + if (x86_pmu.arch_pebs) + pr_cont("Architectural PEBS, "); + intel_pmu_check_counters_mask(&x86_pmu.cntr_mask64, &x86_pmu.fixed_cntr_mask64, &x86_pmu.intel_ctrl); diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index da55bf957b00..b0d1692cd925 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1509,6 +1509,15 @@ static inline void intel_pmu_drain_large_pebs(struct cpu_hw_events *cpuc) intel_pmu_drain_pebs_buffer(); } +static void __intel_pmu_pebs_enable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + + hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; + cpuc->pebs_enabled |= 1ULL << hwc->idx; +} + void intel_pmu_pebs_enable(struct perf_event *event) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); @@ -1517,9 +1526,7 @@ void intel_pmu_pebs_enable(struct perf_event *event) struct debug_store *ds = cpuc->ds; unsigned int idx = hwc->idx; - hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; - - cpuc->pebs_enabled |= 1ULL << hwc->idx; + __intel_pmu_pebs_enable(event); if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && (x86_pmu.version < 5)) cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32); @@ -1581,14 +1588,22 @@ void intel_pmu_pebs_del(struct perf_event *event) pebs_update_state(needed_cb, cpuc, event, false); } -void intel_pmu_pebs_disable(struct perf_event *event) +static void __intel_pmu_pebs_disable(struct perf_event *event) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; intel_pmu_drain_large_pebs(cpuc); - cpuc->pebs_enabled &= ~(1ULL << hwc->idx); + hwc->config |= ARCH_PERFMON_EVENTSEL_INT; +} + +void intel_pmu_pebs_disable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + + __intel_pmu_pebs_disable(event); if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && (x86_pmu.version < 5)) @@ -1600,8 +1615,6 @@ void intel_pmu_pebs_disable(struct perf_event *event) if (cpuc->enabled) wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); - - hwc->config |= ARCH_PERFMON_EVENTSEL_INT; } void intel_pmu_pebs_enable_all(void) @@ -2648,11 +2661,26 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d } } +static void __init intel_arch_pebs_init(void) +{ + /* + * Current hybrid platforms always both support arch-PEBS or not + * on all kinds of cores. So directly set x86_pmu.arch_pebs flag + * if boot cpu supports arch-PEBS. + */ + x86_pmu.arch_pebs = 1; + x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE; + x86_pmu.pebs_capable = ~0ULL; + + x86_pmu.pebs_enable = __intel_pmu_pebs_enable; + x86_pmu.pebs_disable = __intel_pmu_pebs_disable; +} + /* * PEBS probe and setup */ -void __init intel_pebs_init(void) +static void __init intel_ds_pebs_init(void) { /* * No support for 32bit formats @@ -2759,6 +2787,14 @@ void __init intel_pebs_init(void) } } +void __init intel_pebs_init(void) +{ + if (x86_pmu.intel_cap.pebs_format == 0xf) + intel_arch_pebs_init(); + else + intel_ds_pebs_init(); +} + void perf_restore_debug_store(void) { struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index ae5fcd2ee684..32c91b5b9f47 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -710,6 +710,12 @@ enum hybrid_pmu_type { #define X86_HYBRID_NUM_PMUS 2 +struct arch_pebs_cap { + u64 caps; + u64 counters; + u64 pdists; +}; + struct x86_hybrid_pmu { struct pmu pmu; const char *name; @@ -754,6 +760,8 @@ struct x86_hybrid_pmu { mid_ack :1, enabled_ack :1; + struct arch_pebs_cap arch_pebs_cap; + u64 pebs_data_source[PERF_PEBS_DATA_SOURCE_MAX]; }; @@ -908,7 +916,7 @@ struct x86_pmu { union perf_capabilities intel_cap; /* - * Intel DebugStore bits + * Intel DebugStore and PEBS bits */ unsigned int bts :1, bts_active :1, @@ -919,7 +927,8 @@ struct x86_pmu { pebs_no_tlb :1, pebs_no_isolation :1, pebs_block :1, - pebs_ept :1; + pebs_ept :1, + arch_pebs :1; int pebs_record_size; int pebs_buffer_size; u64 pebs_events_mask; @@ -931,6 +940,11 @@ struct x86_pmu { u64 rtm_abort_event; u64 pebs_capable; + /* + * Intel Architectural PEBS + */ + struct arch_pebs_cap arch_pebs_cap; + /* * Intel LBR */ @@ -1225,7 +1239,7 @@ int x86_reserve_hardware(void); void x86_release_hardware(void); -int x86_pmu_max_precise(void); +int x86_pmu_max_precise(struct pmu *pmu); void hw_perf_lbr_event_destroy(struct perf_event *event); @@ -1782,6 +1796,11 @@ static inline int intel_pmu_max_num_pebs(struct pmu *pmu) return fls((u32)hybrid(pmu, pebs_events_mask)); } +static inline bool intel_pmu_has_pebs(void) +{ + return x86_pmu.ds_pebs || x86_pmu.arch_pebs; +} + #else /* CONFIG_CPU_SUP_INTEL */ static inline void reserve_ds_buffers(void) diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 35459437dad5..76cd943db796 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -217,6 +217,8 @@ union cpuid10_edx { #define ARCH_PERFMON_EXT_LEAF 0x00000023 #define ARCH_PERFMON_NUM_COUNTER_LEAF 0x1 #define ARCH_PERFMON_ACR_LEAF 0x2 +#define ARCH_PERFMON_PEBS_CAP_LEAF 0x4 +#define ARCH_PERFMON_PEBS_COUNTER_LEAF 0x5 union cpuid35_eax { struct { @@ -227,7 +229,10 @@ union cpuid35_eax { unsigned int acr_subleaf:1; /* Events Sub-Leaf */ unsigned int events_subleaf:1; - unsigned int reserved:28; + /* arch-PEBS Sub-Leaves */ + unsigned int pebs_caps_subleaf:1; + unsigned int pebs_cnts_subleaf:1; + unsigned int reserved:26; } split; unsigned int full; }; -- Gitee From 5e7b88dce352716cdbba016f093d5cc34cdf81b0 Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Wed, 29 Oct 2025 18:21:30 +0800 Subject: [PATCH 035/231] perf/x86/intel/ds: Factor out PEBS record processing code to functions commit 8807d922705f0a137d8de5f636b50e7b4fbef155 upstream. Beside some PEBS record layout difference, arch-PEBS can share most of PEBS record processing code with adaptive PEBS. Thus, factor out these common processing code to independent inline functions, so they can be reused by subsequent arch-PEBS handler. Intel-SIG: commit 8807d922705f perf/x86/intel/ds: Factor out PEBS record processing code to functions Suggested-by: Kan Liang Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Link: https://patch.msgid.link/20251029102136.61364-7-dapeng1.mi@linux.intel.com Signed-off-by: Jason Zeng --- arch/x86/events/intel/ds.c | 83 ++++++++++++++++++++++++++------------ 1 file changed, 58 insertions(+), 25 deletions(-) diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index b0d1692cd925..ba386b15181b 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -2593,6 +2593,57 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d } } +static __always_inline void +__intel_pmu_handle_pebs_record(struct pt_regs *iregs, + struct pt_regs *regs, + struct perf_sample_data *data, + void *at, u64 pebs_status, + short *counts, void **last, + setup_fn setup_sample) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct perf_event *event; + int bit; + + for_each_set_bit(bit, (unsigned long *)&pebs_status, X86_PMC_IDX_MAX) { + event = cpuc->events[bit]; + + if (WARN_ON_ONCE(!event) || + WARN_ON_ONCE(!event->attr.precise_ip)) + continue; + + if (counts[bit]++) { + __intel_pmu_pebs_event(event, iregs, regs, data, + last[bit], setup_sample); + } + + last[bit] = at; + } +} + +static __always_inline void +__intel_pmu_handle_last_pebs_record(struct pt_regs *iregs, + struct pt_regs *regs, + struct perf_sample_data *data, + u64 mask, short *counts, void **last, + setup_fn setup_sample) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct perf_event *event; + int bit; + + for_each_set_bit(bit, (unsigned long *)&mask, X86_PMC_IDX_MAX) { + if (!counts[bit]) + continue; + + event = cpuc->events[bit]; + + __intel_pmu_pebs_last_event(event, iregs, regs, data, last[bit], + counts[bit], setup_sample); + } + +} + static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_data *data) { short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {}; @@ -2602,9 +2653,7 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d struct x86_perf_regs perf_regs; struct pt_regs *regs = &perf_regs.regs; struct pebs_basic *basic; - struct perf_event *event; void *base, *at, *top; - int bit; u64 mask; if (!x86_pmu.pebs_active) @@ -2617,6 +2666,7 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d mask = hybrid(cpuc->pmu, pebs_events_mask) | (hybrid(cpuc->pmu, fixed_cntr_mask64) << INTEL_PMC_IDX_FIXED); + mask &= cpuc->pebs_enabled; if (unlikely(base >= top)) { intel_pmu_pebs_event_update_no_drain(cpuc, mask); @@ -2634,31 +2684,14 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d if (basic->format_size != cpuc->pebs_record_size) continue; - pebs_status = basic->applicable_counters & cpuc->pebs_enabled & mask; - for_each_set_bit(bit, (unsigned long *)&pebs_status, X86_PMC_IDX_MAX) { - event = cpuc->events[bit]; - - if (WARN_ON_ONCE(!event) || - WARN_ON_ONCE(!event->attr.precise_ip)) - continue; - - if (counts[bit]++) { - __intel_pmu_pebs_event(event, iregs, regs, data, last[bit], - setup_pebs_adaptive_sample_data); - } - last[bit] = at; - } + pebs_status = mask & basic->applicable_counters; + __intel_pmu_handle_pebs_record(iregs, regs, data, at, + pebs_status, counts, last, + setup_pebs_adaptive_sample_data); } - for_each_set_bit(bit, (unsigned long *)&mask, X86_PMC_IDX_MAX) { - if (!counts[bit]) - continue; - - event = cpuc->events[bit]; - - __intel_pmu_pebs_last_event(event, iregs, regs, data, last[bit], - counts[bit], setup_pebs_adaptive_sample_data); - } + __intel_pmu_handle_last_pebs_record(iregs, regs, data, mask, counts, last, + setup_pebs_adaptive_sample_data); } static void __init intel_arch_pebs_init(void) -- Gitee From 4ea69ed716cd2a28888de8a7ce9e217518e00487 Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Wed, 29 Oct 2025 18:21:31 +0800 Subject: [PATCH 036/231] perf/x86/intel/ds: Factor out PEBS group processing code to functions commit 167cde7dc9b36b7a88f3c29d836fabce13023327 upstream. Adaptive PEBS and arch-PEBS share lots of same code to process these PEBS groups, like basic, GPR and meminfo groups. Extract these shared code to generic functions to avoid duplicated code. Intel-SIG: commit 167cde7dc9b3 perf/x86/intel/ds: Factor out PEBS group processing code to functions Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Link: https://patch.msgid.link/20251029102136.61364-8-dapeng1.mi@linux.intel.com Signed-off-by: Jason Zeng --- arch/x86/events/intel/ds.c | 170 +++++++++++++++++++++++-------------- 1 file changed, 104 insertions(+), 66 deletions(-) diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index ba386b15181b..996d21222513 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -2050,6 +2050,90 @@ static inline void __setup_pebs_counter_group(struct cpu_hw_events *cpuc, #define PEBS_LATENCY_MASK 0xffff +static inline void __setup_perf_sample_data(struct perf_event *event, + struct pt_regs *iregs, + struct perf_sample_data *data) +{ + perf_sample_data_init(data, 0, event->hw.last_period); + + /* + * We must however always use iregs for the unwinder to stay sane; the + * record BP,SP,IP can point into thin air when the record is from a + * previous PMI context or an (I)RET happened between the record and + * PMI. + */ + perf_sample_save_callchain(data, event, iregs); +} + +static inline void __setup_pebs_basic_group(struct perf_event *event, + struct pt_regs *regs, + struct perf_sample_data *data, + u64 sample_type, u64 ip, + u64 tsc, u16 retire) +{ + /* The ip in basic is EventingIP */ + set_linear_ip(regs, ip); + regs->flags = PERF_EFLAGS_EXACT; + setup_pebs_time(event, data, tsc); + + if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) + data->weight.var3_w = retire; +} + +static inline void __setup_pebs_gpr_group(struct perf_event *event, + struct pt_regs *regs, + struct pebs_gprs *gprs, + u64 sample_type) +{ + if (event->attr.precise_ip < 2) { + set_linear_ip(regs, gprs->ip); + regs->flags &= ~PERF_EFLAGS_EXACT; + } + + if (sample_type & (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER)) + adaptive_pebs_save_regs(regs, gprs); +} + +static inline void __setup_pebs_meminfo_group(struct perf_event *event, + struct perf_sample_data *data, + u64 sample_type, u64 latency, + u16 instr_latency, u64 address, + u64 aux, u64 tsx_tuning, u64 ax) +{ + if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) { + u64 tsx_latency = intel_get_tsx_weight(tsx_tuning); + + data->weight.var2_w = instr_latency; + + /* + * Although meminfo::latency is defined as a u64, + * only the lower 32 bits include the valid data + * in practice on Ice Lake and earlier platforms. + */ + if (sample_type & PERF_SAMPLE_WEIGHT) + data->weight.full = latency ?: tsx_latency; + else + data->weight.var1_dw = (u32)latency ?: tsx_latency; + + data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE; + } + + if (sample_type & PERF_SAMPLE_DATA_SRC) { + data->data_src.val = get_data_src(event, aux); + data->sample_flags |= PERF_SAMPLE_DATA_SRC; + } + + if (sample_type & PERF_SAMPLE_ADDR_TYPE) { + data->addr = address; + data->sample_flags |= PERF_SAMPLE_ADDR; + } + + if (sample_type & PERF_SAMPLE_TRANSACTION) { + data->txn = intel_get_tsx_transaction(tsx_tuning, ax); + data->sample_flags |= PERF_SAMPLE_TRANSACTION; + } +} + /* * With adaptive PEBS the layout depends on what fields are configured. */ @@ -2059,12 +2143,14 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event, struct pt_regs *regs) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + u64 sample_type = event->attr.sample_type; struct pebs_basic *basic = __pebs; void *next_record = basic + 1; - u64 sample_type, format_group; struct pebs_meminfo *meminfo = NULL; struct pebs_gprs *gprs = NULL; struct x86_perf_regs *perf_regs; + u64 format_group; + u16 retire; if (basic == NULL) return; @@ -2072,31 +2158,17 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event, perf_regs = container_of(regs, struct x86_perf_regs, regs); perf_regs->xmm_regs = NULL; - sample_type = event->attr.sample_type; format_group = basic->format_group; - perf_sample_data_init(data, 0, event->hw.last_period); - setup_pebs_time(event, data, basic->tsc); - - /* - * We must however always use iregs for the unwinder to stay sane; the - * record BP,SP,IP can point into thin air when the record is from a - * previous PMI context or an (I)RET happened between the record and - * PMI. - */ - perf_sample_save_callchain(data, event, iregs); + __setup_perf_sample_data(event, iregs, data); *regs = *iregs; - /* The ip in basic is EventingIP */ - set_linear_ip(regs, basic->ip); - regs->flags = PERF_EFLAGS_EXACT; - if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) { - if (x86_pmu.flags & PMU_FL_RETIRE_LATENCY) - data->weight.var3_w = basic->retire_latency; - else - data->weight.var3_w = 0; - } + /* basic group */ + retire = x86_pmu.flags & PMU_FL_RETIRE_LATENCY ? + basic->retire_latency : 0; + __setup_pebs_basic_group(event, regs, data, sample_type, + basic->ip, basic->tsc, retire); /* * The record for MEMINFO is in front of GP @@ -2112,54 +2184,20 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event, gprs = next_record; next_record = gprs + 1; - if (event->attr.precise_ip < 2) { - set_linear_ip(regs, gprs->ip); - regs->flags &= ~PERF_EFLAGS_EXACT; - } - - if (sample_type & (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER)) - adaptive_pebs_save_regs(regs, gprs); + __setup_pebs_gpr_group(event, regs, gprs, sample_type); } if (format_group & PEBS_DATACFG_MEMINFO) { - if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) { - u64 latency = x86_pmu.flags & PMU_FL_INSTR_LATENCY ? - meminfo->cache_latency : meminfo->mem_latency; - - if (x86_pmu.flags & PMU_FL_INSTR_LATENCY) - data->weight.var2_w = meminfo->instr_latency; - - /* - * Although meminfo::latency is defined as a u64, - * only the lower 32 bits include the valid data - * in practice on Ice Lake and earlier platforms. - */ - if (sample_type & PERF_SAMPLE_WEIGHT) { - data->weight.full = latency ?: - intel_get_tsx_weight(meminfo->tsx_tuning); - } else { - data->weight.var1_dw = (u32)latency ?: - intel_get_tsx_weight(meminfo->tsx_tuning); - } - - data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE; - } - - if (sample_type & PERF_SAMPLE_DATA_SRC) { - data->data_src.val = get_data_src(event, meminfo->aux); - data->sample_flags |= PERF_SAMPLE_DATA_SRC; - } - - if (sample_type & PERF_SAMPLE_ADDR_TYPE) { - data->addr = meminfo->address; - data->sample_flags |= PERF_SAMPLE_ADDR; - } - - if (sample_type & PERF_SAMPLE_TRANSACTION) { - data->txn = intel_get_tsx_transaction(meminfo->tsx_tuning, - gprs ? gprs->ax : 0); - data->sample_flags |= PERF_SAMPLE_TRANSACTION; - } + u64 latency = x86_pmu.flags & PMU_FL_INSTR_LATENCY ? + meminfo->cache_latency : meminfo->mem_latency; + u64 instr_latency = x86_pmu.flags & PMU_FL_INSTR_LATENCY ? + meminfo->instr_latency : 0; + u64 ax = gprs ? gprs->ax : 0; + + __setup_pebs_meminfo_group(event, data, sample_type, latency, + instr_latency, meminfo->address, + meminfo->aux, meminfo->tsx_tuning, + ax); } if (format_group & PEBS_DATACFG_XMMS) { -- Gitee From 54683bf4c6bbe0497eaf3b7c6bdc19baf8c53d5e Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Wed, 29 Oct 2025 18:21:32 +0800 Subject: [PATCH 037/231] perf/x86/intel: Process arch-PEBS records or record fragments commit d21954c8a0ffbc94ffdd65106fb6da5b59042e0a upstream. A significant difference with adaptive PEBS is that arch-PEBS record supports fragments which means an arch-PEBS record could be split into several independent fragments which have its own arch-PEBS header in each fragment. This patch defines architectural PEBS record layout structures and add helpers to process arch-PEBS records or fragments. Only legacy PEBS groups like basic, GPR, XMM and LBR groups are supported in this patch, the new added YMM/ZMM/OPMASK vector registers capturing would be supported in the future. Intel-SIG: commit d21954c8a0ff perf/x86/intel: Process arch-PEBS records or record fragments Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Link: https://patch.msgid.link/20251029102136.61364-9-dapeng1.mi@linux.intel.com [jz: use rdmsrl/wrmsrl instead of rdmsrq/wrmsrq] Signed-off-by: Jason Zeng --- arch/x86/events/intel/core.c | 13 +++ arch/x86/events/intel/ds.c | 184 ++++++++++++++++++++++++++++++ arch/x86/include/asm/msr-index.h | 6 + arch/x86/include/asm/perf_event.h | 96 ++++++++++++++++ 4 files changed, 299 insertions(+) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 7485a64d9d9e..1094ae7f0b3d 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3205,6 +3205,19 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) status &= ~GLOBAL_STATUS_PERF_METRICS_OVF_BIT; } + /* + * Arch PEBS sets bit 54 in the global status register + */ + if (__test_and_clear_bit(GLOBAL_STATUS_ARCH_PEBS_THRESHOLD_BIT, + (unsigned long *)&status)) { + handled++; + static_call(x86_pmu_drain_pebs)(regs, &data); + + if (cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS] && + is_pebs_counter_event_group(cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS])) + status &= ~GLOBAL_STATUS_PERF_METRICS_OVF_BIT; + } + /* * Intel PT */ diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 996d21222513..43f9429da4e7 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -2248,6 +2248,117 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event, format_group); } +static inline bool arch_pebs_record_continued(struct arch_pebs_header *header) +{ + /* Continue bit or null PEBS record indicates fragment follows. */ + return header->cont || !(header->format & GENMASK_ULL(63, 16)); +} + +static void setup_arch_pebs_sample_data(struct perf_event *event, + struct pt_regs *iregs, + void *__pebs, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + u64 sample_type = event->attr.sample_type; + struct arch_pebs_header *header = NULL; + struct arch_pebs_aux *meminfo = NULL; + struct arch_pebs_gprs *gprs = NULL; + struct x86_perf_regs *perf_regs; + void *next_record; + void *at = __pebs; + + if (at == NULL) + return; + + perf_regs = container_of(regs, struct x86_perf_regs, regs); + perf_regs->xmm_regs = NULL; + + __setup_perf_sample_data(event, iregs, data); + + *regs = *iregs; + +again: + header = at; + next_record = at + sizeof(struct arch_pebs_header); + if (header->basic) { + struct arch_pebs_basic *basic = next_record; + u16 retire = 0; + + next_record = basic + 1; + + if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) + retire = basic->valid ? basic->retire : 0; + __setup_pebs_basic_group(event, regs, data, sample_type, + basic->ip, basic->tsc, retire); + } + + /* + * The record for MEMINFO is in front of GP + * But PERF_SAMPLE_TRANSACTION needs gprs->ax. + * Save the pointer here but process later. + */ + if (header->aux) { + meminfo = next_record; + next_record = meminfo + 1; + } + + if (header->gpr) { + gprs = next_record; + next_record = gprs + 1; + + __setup_pebs_gpr_group(event, regs, + (struct pebs_gprs *)gprs, + sample_type); + } + + if (header->aux) { + u64 ax = gprs ? gprs->ax : 0; + + __setup_pebs_meminfo_group(event, data, sample_type, + meminfo->cache_latency, + meminfo->instr_latency, + meminfo->address, meminfo->aux, + meminfo->tsx_tuning, ax); + } + + if (header->xmm) { + struct pebs_xmm *xmm; + + next_record += sizeof(struct arch_pebs_xer_header); + + xmm = next_record; + perf_regs->xmm_regs = xmm->xmm; + next_record = xmm + 1; + } + + if (header->lbr) { + struct arch_pebs_lbr_header *lbr_header = next_record; + struct lbr_entry *lbr; + int num_lbr; + + next_record = lbr_header + 1; + lbr = next_record; + + num_lbr = header->lbr == ARCH_PEBS_LBR_NUM_VAR ? + lbr_header->depth : + header->lbr * ARCH_PEBS_BASE_LBR_ENTRIES; + next_record += num_lbr * sizeof(struct lbr_entry); + + if (has_branch_stack(event)) { + intel_pmu_store_pebs_lbrs(lbr); + intel_pmu_lbr_save_brstack(data, cpuc, event); + } + } + + /* Parse followed fragments if there are. */ + if (arch_pebs_record_continued(header)) { + at = at + header->size; + goto again; + } +} + static inline void * get_next_pebs_record_by_bit(void *base, void *top, int bit) { @@ -2732,6 +2843,78 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d setup_pebs_adaptive_sample_data); } +static void intel_pmu_drain_arch_pebs(struct pt_regs *iregs, + struct perf_sample_data *data) +{ + short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {}; + void *last[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS]; + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + union arch_pebs_index index; + struct x86_perf_regs perf_regs; + struct pt_regs *regs = &perf_regs.regs; + void *base, *at, *top; + u64 mask; + + rdmsrl(MSR_IA32_PEBS_INDEX, index.whole); + + if (unlikely(!index.wr)) { + intel_pmu_pebs_event_update_no_drain(cpuc, X86_PMC_IDX_MAX); + return; + } + + base = cpuc->ds_pebs_vaddr; + top = (void *)((u64)cpuc->ds_pebs_vaddr + + (index.wr << ARCH_PEBS_INDEX_WR_SHIFT)); + + index.wr = 0; + index.full = 0; + wrmsrl(MSR_IA32_PEBS_INDEX, index.whole); + + mask = hybrid(cpuc->pmu, arch_pebs_cap).counters & cpuc->pebs_enabled; + + if (!iregs) + iregs = &dummy_iregs; + + /* Process all but the last event for each counter. */ + for (at = base; at < top;) { + struct arch_pebs_header *header; + struct arch_pebs_basic *basic; + u64 pebs_status; + + header = at; + + if (WARN_ON_ONCE(!header->size)) + break; + + /* 1st fragment or single record must have basic group */ + if (!header->basic) { + at += header->size; + continue; + } + + basic = at + sizeof(struct arch_pebs_header); + pebs_status = mask & basic->applicable_counters; + __intel_pmu_handle_pebs_record(iregs, regs, data, at, + pebs_status, counts, last, + setup_arch_pebs_sample_data); + + /* Skip non-last fragments */ + while (arch_pebs_record_continued(header)) { + if (!header->size) + break; + at += header->size; + header = at; + } + + /* Skip last fragment or the single record */ + at += header->size; + } + + __intel_pmu_handle_last_pebs_record(iregs, regs, data, mask, + counts, last, + setup_arch_pebs_sample_data); +} + static void __init intel_arch_pebs_init(void) { /* @@ -2741,6 +2924,7 @@ static void __init intel_arch_pebs_init(void) */ x86_pmu.arch_pebs = 1; x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE; + x86_pmu.drain_pebs = intel_pmu_drain_arch_pebs; x86_pmu.pebs_capable = ~0ULL; x86_pmu.pebs_enable = __intel_pmu_pebs_enable; diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index de19c1af434c..ad80cdfe1dcc 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -309,6 +309,12 @@ PERF_CAP_PEBS_FORMAT | PERF_CAP_PEBS_BASELINE | \ PERF_CAP_PEBS_TIMING_INFO) +/* Arch PEBS */ +#define MSR_IA32_PEBS_BASE 0x000003f4 +#define MSR_IA32_PEBS_INDEX 0x000003f5 +#define ARCH_PEBS_OFFSET_MASK 0x7fffff +#define ARCH_PEBS_INDEX_WR_SHIFT 4 + #define MSR_IA32_RTIT_CTL 0x00000570 #define RTIT_CTL_TRACEEN BIT(0) #define RTIT_CTL_CYCLEACC BIT(1) diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 76cd943db796..ed2609f7b985 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -452,6 +452,8 @@ static inline bool is_topdown_idx(int idx) #define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(GLOBAL_STATUS_LBRS_FROZEN_BIT) #define GLOBAL_STATUS_TRACE_TOPAPMI_BIT 55 #define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(GLOBAL_STATUS_TRACE_TOPAPMI_BIT) +#define GLOBAL_STATUS_ARCH_PEBS_THRESHOLD_BIT 54 +#define GLOBAL_STATUS_ARCH_PEBS_THRESHOLD BIT_ULL(GLOBAL_STATUS_ARCH_PEBS_THRESHOLD_BIT) #define GLOBAL_STATUS_PERF_METRICS_OVF_BIT 48 #define GLOBAL_CTRL_EN_PERF_METRICS 48 @@ -522,6 +524,100 @@ struct pebs_cntr_header { #define INTEL_CNTR_METRICS 0x3 +/* + * Arch PEBS + */ +union arch_pebs_index { + struct { + u64 rsvd:4, + wr:23, + rsvd2:4, + full:1, + en:1, + rsvd3:3, + thresh:23, + rsvd4:5; + }; + u64 whole; +}; + +struct arch_pebs_header { + union { + u64 format; + struct { + u64 size:16, /* Record size */ + rsvd:14, + mode:1, /* 64BIT_MODE */ + cont:1, + rsvd2:3, + cntr:5, + lbr:2, + rsvd3:7, + xmm:1, + ymmh:1, + rsvd4:2, + opmask:1, + zmmh:1, + h16zmm:1, + rsvd5:5, + gpr:1, + aux:1, + basic:1; + }; + }; + u64 rsvd6; +}; + +struct arch_pebs_basic { + u64 ip; + u64 applicable_counters; + u64 tsc; + u64 retire :16, /* Retire Latency */ + valid :1, + rsvd :47; + u64 rsvd2; + u64 rsvd3; +}; + +struct arch_pebs_aux { + u64 address; + u64 rsvd; + u64 rsvd2; + u64 rsvd3; + u64 rsvd4; + u64 aux; + u64 instr_latency :16, + pad2 :16, + cache_latency :16, + pad3 :16; + u64 tsx_tuning; +}; + +struct arch_pebs_gprs { + u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di; + u64 r8, r9, r10, r11, r12, r13, r14, r15, ssp; + u64 rsvd; +}; + +struct arch_pebs_xer_header { + u64 xstate; + u64 rsvd; +}; + +#define ARCH_PEBS_LBR_NAN 0x0 +#define ARCH_PEBS_LBR_NUM_8 0x1 +#define ARCH_PEBS_LBR_NUM_16 0x2 +#define ARCH_PEBS_LBR_NUM_VAR 0x3 +#define ARCH_PEBS_BASE_LBR_ENTRIES 8 +struct arch_pebs_lbr_header { + u64 rsvd; + u64 ctl; + u64 depth; + u64 ler_from; + u64 ler_to; + u64 ler_info; +}; + /* * AMD Extended Performance Monitoring and Debug cpuid feature detection */ -- Gitee From ce1f9752e06e465f8e0b174555053e1e6aeb8ce3 Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Wed, 29 Oct 2025 18:21:33 +0800 Subject: [PATCH 038/231] perf/x86/intel: Allocate arch-PEBS buffer and initialize PEBS_BASE MSR commit 2721e8da2de7271533ac36285332219f700d16ca upstream. Arch-PEBS introduces a new MSR IA32_PEBS_BASE to store the arch-PEBS buffer physical address. This patch allocates arch-PEBS buffer and then initialize IA32_PEBS_BASE MSR with the buffer physical address. Intel-SIG: commit 2721e8da2de7 perf/x86/intel: Allocate arch-PEBS buffer and initialize PEBS_BASE MSR Co-developed-by: Kan Liang Signed-off-by: Kan Liang Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Link: https://patch.msgid.link/20251029102136.61364-10-dapeng1.mi@linux.intel.com Signed-off-by: Jason Zeng --- arch/x86/events/intel/core.c | 11 ++++- arch/x86/events/intel/ds.c | 82 ++++++++++++++++++++++++++++----- arch/x86/events/perf_event.h | 11 ++++- arch/x86/include/asm/intel_ds.h | 3 +- 4 files changed, 92 insertions(+), 15 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 1094ae7f0b3d..f3ef5c3107bc 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -5189,7 +5189,13 @@ int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) static int intel_pmu_cpu_prepare(int cpu) { - return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu); + int ret; + + ret = intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu); + if (ret) + return ret; + + return alloc_arch_pebs_buf_on_cpu(cpu); } static void flip_smm_bit(void *data) @@ -5418,6 +5424,7 @@ static void intel_pmu_cpu_starting(int cpu) return; init_debug_store_on_cpu(cpu); + init_arch_pebs_on_cpu(cpu); /* * Deal with CPUs that don't clear their LBRs on power-up, and that may * even boot with LBRs enabled. @@ -5515,6 +5522,7 @@ static void free_excl_cntrs(struct cpu_hw_events *cpuc) static void intel_pmu_cpu_dying(int cpu) { fini_debug_store_on_cpu(cpu); + fini_arch_pebs_on_cpu(cpu); } void intel_cpuc_finish(struct cpu_hw_events *cpuc) @@ -5535,6 +5543,7 @@ static void intel_pmu_cpu_dead(int cpu) { struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); + release_arch_pebs_buf_on_cpu(cpu); intel_cpuc_finish(cpuc); if (is_hybrid() && cpuc->pmu) diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 43f9429da4e7..4f7a5bc733a0 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -603,13 +603,18 @@ static int alloc_pebs_buffer(int cpu) int max, node = cpu_to_node(cpu); void *buffer, *insn_buff, *cea; - if (!x86_pmu.ds_pebs) + if (!intel_pmu_has_pebs()) return 0; buffer = dsalloc_pages(bsiz, GFP_KERNEL, cpu); if (unlikely(!buffer)) return -ENOMEM; + if (x86_pmu.arch_pebs) { + hwev->pebs_vaddr = buffer; + return 0; + } + /* * HSW+ already provides us the eventing ip; no need to allocate this * buffer then. @@ -622,7 +627,7 @@ static int alloc_pebs_buffer(int cpu) } per_cpu(insn_buffer, cpu) = insn_buff; } - hwev->ds_pebs_vaddr = buffer; + hwev->pebs_vaddr = buffer; /* Update the cpu entry area mapping */ cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer; ds->pebs_buffer_base = (unsigned long) cea; @@ -638,17 +643,20 @@ static void release_pebs_buffer(int cpu) struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); void *cea; - if (!x86_pmu.ds_pebs) + if (!intel_pmu_has_pebs()) return; - kfree(per_cpu(insn_buffer, cpu)); - per_cpu(insn_buffer, cpu) = NULL; + if (x86_pmu.ds_pebs) { + kfree(per_cpu(insn_buffer, cpu)); + per_cpu(insn_buffer, cpu) = NULL; - /* Clear the fixmap */ - cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer; - ds_clear_cea(cea, x86_pmu.pebs_buffer_size); - dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size); - hwev->ds_pebs_vaddr = NULL; + /* Clear the fixmap */ + cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer; + ds_clear_cea(cea, x86_pmu.pebs_buffer_size); + } + + dsfree_pages(hwev->pebs_vaddr, x86_pmu.pebs_buffer_size); + hwev->pebs_vaddr = NULL; } static int alloc_bts_buffer(int cpu) @@ -801,6 +809,56 @@ void reserve_ds_buffers(void) } } +inline int alloc_arch_pebs_buf_on_cpu(int cpu) +{ + if (!x86_pmu.arch_pebs) + return 0; + + return alloc_pebs_buffer(cpu); +} + +inline void release_arch_pebs_buf_on_cpu(int cpu) +{ + if (!x86_pmu.arch_pebs) + return; + + release_pebs_buffer(cpu); +} + +void init_arch_pebs_on_cpu(int cpu) +{ + struct cpu_hw_events *cpuc = per_cpu_ptr(&cpu_hw_events, cpu); + u64 arch_pebs_base; + + if (!x86_pmu.arch_pebs) + return; + + if (!cpuc->pebs_vaddr) { + WARN(1, "Fail to allocate PEBS buffer on CPU %d\n", cpu); + x86_pmu.pebs_active = 0; + return; + } + + /* + * 4KB-aligned pointer of the output buffer + * (__alloc_pages_node() return page aligned address) + * Buffer Size = 4KB * 2^SIZE + * contiguous physical buffer (__alloc_pages_node() with order) + */ + arch_pebs_base = virt_to_phys(cpuc->pebs_vaddr) | PEBS_BUFFER_SHIFT; + wrmsr_on_cpu(cpu, MSR_IA32_PEBS_BASE, (u32)arch_pebs_base, + (u32)(arch_pebs_base >> 32)); + x86_pmu.pebs_active = 1; +} + +inline void fini_arch_pebs_on_cpu(int cpu) +{ + if (!x86_pmu.arch_pebs) + return; + + wrmsr_on_cpu(cpu, MSR_IA32_PEBS_BASE, 0, 0); +} + /* * BTS */ @@ -2862,8 +2920,8 @@ static void intel_pmu_drain_arch_pebs(struct pt_regs *iregs, return; } - base = cpuc->ds_pebs_vaddr; - top = (void *)((u64)cpuc->ds_pebs_vaddr + + base = cpuc->pebs_vaddr; + top = (void *)((u64)cpuc->pebs_vaddr + (index.wr << ARCH_PEBS_INDEX_WR_SHIFT)); index.wr = 0; diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 32c91b5b9f47..d76d30fa37e8 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -282,8 +282,9 @@ struct cpu_hw_events { * Intel DebugStore bits */ struct debug_store *ds; - void *ds_pebs_vaddr; void *ds_bts_vaddr; + /* DS based PEBS or arch-PEBS buffer address */ + void *pebs_vaddr; u64 pebs_enabled; int n_pebs; int n_large_pebs; @@ -1610,6 +1611,14 @@ extern void intel_cpuc_finish(struct cpu_hw_events *cpuc); int intel_pmu_init(void); +int alloc_arch_pebs_buf_on_cpu(int cpu); + +void release_arch_pebs_buf_on_cpu(int cpu); + +void init_arch_pebs_on_cpu(int cpu); + +void fini_arch_pebs_on_cpu(int cpu); + void init_debug_store_on_cpu(int cpu); void fini_debug_store_on_cpu(int cpu); diff --git a/arch/x86/include/asm/intel_ds.h b/arch/x86/include/asm/intel_ds.h index 5dbeac48a5b9..023c2883f9f3 100644 --- a/arch/x86/include/asm/intel_ds.h +++ b/arch/x86/include/asm/intel_ds.h @@ -4,7 +4,8 @@ #include #define BTS_BUFFER_SIZE (PAGE_SIZE << 4) -#define PEBS_BUFFER_SIZE (PAGE_SIZE << 4) +#define PEBS_BUFFER_SHIFT 4 +#define PEBS_BUFFER_SIZE (PAGE_SIZE << PEBS_BUFFER_SHIFT) /* The maximal number of PEBS events: */ #define MAX_PEBS_EVENTS_FMT4 8 -- Gitee From 533d0de1485614d11f3c3faba5c33ce8ddf14be0 Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Wed, 29 Oct 2025 18:21:34 +0800 Subject: [PATCH 039/231] perf/x86/intel: Update dyn_constraint base on PEBS event precise level commit e89c5d1f290e8915e0aad10014f2241086ea95e4 upstream. arch-PEBS provides CPUIDs to enumerate which counters support PEBS sampling and precise distribution PEBS sampling. Thus PEBS constraints should be dynamically configured base on these counter and precise distribution bitmap instead of defining them statically. Update event dyn_constraint base on PEBS event precise level. Intel-SIG: commit e89c5d1f290e perf/x86/intel: Update dyn_constraint base on PEBS event precise level Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Link: https://patch.msgid.link/20251029102136.61364-11-dapeng1.mi@linux.intel.com Signed-off-by: Jason Zeng --- arch/x86/events/intel/core.c | 11 +++++++++++ arch/x86/events/intel/ds.c | 1 + 2 files changed, 12 insertions(+) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index f3ef5c3107bc..6bf7356b07a4 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4240,6 +4240,8 @@ static int intel_pmu_hw_config(struct perf_event *event) } if (event->attr.precise_ip) { + struct arch_pebs_cap pebs_cap = hybrid(event->pmu, arch_pebs_cap); + if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT) return -EINVAL; @@ -4253,6 +4255,15 @@ static int intel_pmu_hw_config(struct perf_event *event) } if (x86_pmu.pebs_aliases) x86_pmu.pebs_aliases(event); + + if (x86_pmu.arch_pebs) { + u64 cntr_mask = hybrid(event->pmu, intel_ctrl) & + ~GLOBAL_CTRL_EN_PERF_METRICS; + u64 pebs_mask = event->attr.precise_ip >= 3 ? + pebs_cap.pdists : pebs_cap.counters; + if (cntr_mask != pebs_mask) + event->hw.dyn_constraint &= pebs_mask; + } } if (needs_branch_stack(event) && is_sampling_event(event)) diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 4f7a5bc733a0..d7897ebec56b 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -2984,6 +2984,7 @@ static void __init intel_arch_pebs_init(void) x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE; x86_pmu.drain_pebs = intel_pmu_drain_arch_pebs; x86_pmu.pebs_capable = ~0ULL; + x86_pmu.flags |= PMU_FL_PEBS_ALL; x86_pmu.pebs_enable = __intel_pmu_pebs_enable; x86_pmu.pebs_disable = __intel_pmu_pebs_disable; -- Gitee From 8c6e974823631ea9ca175accb27a5d0a12ac679b Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Wed, 29 Oct 2025 18:21:35 +0800 Subject: [PATCH 040/231] perf/x86/intel: Setup PEBS data configuration and enable legacy groups commit 52448a0a739002eca3d051a6ec314a0b178949a1 upstream. Different with legacy PEBS, arch-PEBS provides per-counter PEBS data configuration by programing MSR IA32_PMC_GPx/FXx_CFG_C MSRs. This patch obtains PEBS data configuration from event attribute and then writes the PEBS data configuration to MSR IA32_PMC_GPx/FXx_CFG_C and enable corresponding PEBS groups. Please notice this patch only enables XMM SIMD regs sampling for arch-PEBS, the other SIMD regs (OPMASK/YMM/ZMM) sampling on arch-PEBS would be supported after PMI based SIMD regs (OPMASK/YMM/ZMM) sampling is supported. Intel-SIG: commit 52448a0a7390 perf/x86/intel: Setup PEBS data configuration and enable legacy groups Co-developed-by: Kan Liang Signed-off-by: Kan Liang Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Link: https://patch.msgid.link/20251029102136.61364-12-dapeng1.mi@linux.intel.com [jz: use rdmsrl/wrmsrl instead of rdmsrq/wrmsrq] Signed-off-by: Jason Zeng --- arch/x86/events/intel/core.c | 136 ++++++++++++++++++++++++++++++- arch/x86/events/intel/ds.c | 17 ++++ arch/x86/events/perf_event.h | 4 + arch/x86/include/asm/intel_ds.h | 7 ++ arch/x86/include/asm/msr-index.h | 8 ++ 5 files changed, 171 insertions(+), 1 deletion(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 6bf7356b07a4..cac9df69038c 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2552,6 +2552,45 @@ static void intel_pmu_disable_fixed(struct perf_event *event) cpuc->fixed_ctrl_val &= ~mask; } +static inline void __intel_pmu_update_event_ext(int idx, u64 ext) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + u32 msr; + + if (idx < INTEL_PMC_IDX_FIXED) { + msr = MSR_IA32_PMC_V6_GP0_CFG_C + + x86_pmu.addr_offset(idx, false); + } else { + msr = MSR_IA32_PMC_V6_FX0_CFG_C + + x86_pmu.addr_offset(idx - INTEL_PMC_IDX_FIXED, false); + } + + cpuc->cfg_c_val[idx] = ext; + wrmsrl(msr, ext); +} + +static void intel_pmu_disable_event_ext(struct perf_event *event) +{ + if (!x86_pmu.arch_pebs) + return; + + /* + * Only clear CFG_C MSR for PEBS counter group events, + * it avoids the HW counter's value to be added into + * other PEBS records incorrectly after PEBS counter + * group events are disabled. + * + * For other events, it's unnecessary to clear CFG_C MSRs + * since CFG_C doesn't take effect if counter is in + * disabled state. That helps to reduce the WRMSR overhead + * in context switches. + */ + if (!is_pebs_counter_event_group(event)) + return; + + __intel_pmu_update_event_ext(event->hw.idx, 0); +} + static void intel_pmu_disable_event(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; @@ -2560,9 +2599,12 @@ static void intel_pmu_disable_event(struct perf_event *event) switch (idx) { case 0 ... INTEL_PMC_IDX_FIXED - 1: intel_clear_masks(event, idx); + intel_pmu_disable_event_ext(event); x86_pmu_disable_event(event); break; case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1: + intel_pmu_disable_event_ext(event); + fallthrough; case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: intel_pmu_disable_fixed(event); break; @@ -2929,6 +2971,66 @@ static void intel_pmu_enable_acr(struct perf_event *event) DEFINE_STATIC_CALL_NULL(intel_pmu_enable_acr_event, intel_pmu_enable_acr); +static void intel_pmu_enable_event_ext(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + union arch_pebs_index old, new; + struct arch_pebs_cap cap; + u64 ext = 0; + + if (!x86_pmu.arch_pebs) + return; + + cap = hybrid(cpuc->pmu, arch_pebs_cap); + + if (event->attr.precise_ip) { + u64 pebs_data_cfg = intel_get_arch_pebs_data_config(event); + + ext |= ARCH_PEBS_EN; + if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) + ext |= (-hwc->sample_period) & ARCH_PEBS_RELOAD; + + if (pebs_data_cfg && cap.caps) { + if (pebs_data_cfg & PEBS_DATACFG_MEMINFO) + ext |= ARCH_PEBS_AUX & cap.caps; + + if (pebs_data_cfg & PEBS_DATACFG_GP) + ext |= ARCH_PEBS_GPR & cap.caps; + + if (pebs_data_cfg & PEBS_DATACFG_XMMS) + ext |= ARCH_PEBS_VECR_XMM & cap.caps; + + if (pebs_data_cfg & PEBS_DATACFG_LBRS) + ext |= ARCH_PEBS_LBR & cap.caps; + } + + if (cpuc->n_pebs == cpuc->n_large_pebs) + new.thresh = ARCH_PEBS_THRESH_MULTI; + else + new.thresh = ARCH_PEBS_THRESH_SINGLE; + + rdmsrl(MSR_IA32_PEBS_INDEX, old.whole); + if (new.thresh != old.thresh || !old.en) { + if (old.thresh == ARCH_PEBS_THRESH_MULTI && old.wr > 0) { + /* + * Large PEBS was enabled. + * Drain PEBS buffer before applying the single PEBS. + */ + intel_pmu_drain_pebs_buffer(); + } else { + new.wr = 0; + new.full = 0; + new.en = 1; + wrmsrl(MSR_IA32_PEBS_INDEX, new.whole); + } + } + } + + if (cpuc->cfg_c_val[hwc->idx] != ext) + __intel_pmu_update_event_ext(hwc->idx, ext); +} + static void intel_pmu_enable_event(struct perf_event *event) { u64 enable_mask = ARCH_PERFMON_EVENTSEL_ENABLE; @@ -2944,10 +3046,12 @@ static void intel_pmu_enable_event(struct perf_event *event) enable_mask |= ARCH_PERFMON_EVENTSEL_BR_CNTR; intel_set_masks(event, idx); static_call_cond(intel_pmu_enable_acr_event)(event); + intel_pmu_enable_event_ext(event); __x86_pmu_enable_event(hwc, enable_mask); break; case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1: static_call_cond(intel_pmu_enable_acr_event)(event); + intel_pmu_enable_event_ext(event); fallthrough; case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: intel_pmu_enable_fixed(event); @@ -5263,6 +5367,30 @@ static inline bool intel_pmu_broken_perf_cap(void) return false; } +static inline void __intel_update_pmu_caps(struct pmu *pmu) +{ + struct pmu *dest_pmu = pmu ? pmu : x86_get_pmu(smp_processor_id()); + + if (hybrid(pmu, arch_pebs_cap).caps & ARCH_PEBS_VECR_XMM) + dest_pmu->capabilities |= PERF_PMU_CAP_EXTENDED_REGS; +} + +static inline void __intel_update_large_pebs_flags(struct pmu *pmu) +{ + u64 caps = hybrid(pmu, arch_pebs_cap).caps; + + x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME; + if (caps & ARCH_PEBS_LBR) + x86_pmu.large_pebs_flags |= PERF_SAMPLE_BRANCH_STACK; + + if (!(caps & ARCH_PEBS_AUX)) + x86_pmu.large_pebs_flags &= ~PERF_SAMPLE_DATA_SRC; + if (!(caps & ARCH_PEBS_GPR)) { + x86_pmu.large_pebs_flags &= + ~(PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER); + } +} + #define counter_mask(_gp, _fixed) ((_gp) | ((u64)(_fixed) << INTEL_PMC_IDX_FIXED)) static void update_pmu_cap(struct pmu *pmu) @@ -5311,8 +5439,12 @@ static void update_pmu_cap(struct pmu *pmu) hybrid(pmu, arch_pebs_cap).counters = pebs_mask; hybrid(pmu, arch_pebs_cap).pdists = pdists_mask; - if (WARN_ON((pebs_mask | pdists_mask) & ~cntrs_mask)) + if (WARN_ON((pebs_mask | pdists_mask) & ~cntrs_mask)) { x86_pmu.arch_pebs = 0; + } else { + __intel_update_pmu_caps(pmu); + __intel_update_large_pebs_flags(pmu); + } } else { WARN_ON(x86_pmu.arch_pebs == 1); x86_pmu.arch_pebs = 0; @@ -5474,6 +5606,8 @@ static void intel_pmu_cpu_starting(int cpu) } } + __intel_update_pmu_caps(cpuc->pmu); + if (!cpuc->shared_regs) return; diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index d7897ebec56b..dae34a22aac2 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1506,6 +1506,18 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc, } } +u64 intel_get_arch_pebs_data_config(struct perf_event *event) +{ + u64 pebs_data_cfg = 0; + + if (WARN_ON(event->hw.idx < 0 || event->hw.idx >= X86_PMC_IDX_MAX)) + return 0; + + pebs_data_cfg |= pebs_update_adaptive_cfg(event); + + return pebs_data_cfg; +} + void intel_pmu_pebs_add(struct perf_event *event) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); @@ -2926,6 +2938,11 @@ static void intel_pmu_drain_arch_pebs(struct pt_regs *iregs, index.wr = 0; index.full = 0; + index.en = 1; + if (cpuc->n_pebs == cpuc->n_large_pebs) + index.thresh = ARCH_PEBS_THRESH_MULTI; + else + index.thresh = ARCH_PEBS_THRESH_SINGLE; wrmsrl(MSR_IA32_PEBS_INDEX, index.whole); mask = hybrid(cpuc->pmu, arch_pebs_cap).counters & cpuc->pebs_enabled; diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index d76d30fa37e8..569b9295571e 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -303,6 +303,8 @@ struct cpu_hw_events { /* Intel ACR configuration */ u64 acr_cfg_b[X86_PMC_IDX_MAX]; u64 acr_cfg_c[X86_PMC_IDX_MAX]; + /* Cached CFG_C values */ + u64 cfg_c_val[X86_PMC_IDX_MAX]; /* * Intel LBR bits @@ -1773,6 +1775,8 @@ void intel_pmu_pebs_data_source_cmt(void); void intel_pmu_pebs_data_source_lnl(void); +u64 intel_get_arch_pebs_data_config(struct perf_event *event); + int intel_pmu_setup_lbr_filter(struct perf_event *event); void intel_pt_interrupt(void); diff --git a/arch/x86/include/asm/intel_ds.h b/arch/x86/include/asm/intel_ds.h index 023c2883f9f3..695f87efbeb8 100644 --- a/arch/x86/include/asm/intel_ds.h +++ b/arch/x86/include/asm/intel_ds.h @@ -7,6 +7,13 @@ #define PEBS_BUFFER_SHIFT 4 #define PEBS_BUFFER_SIZE (PAGE_SIZE << PEBS_BUFFER_SHIFT) +/* + * The largest PEBS record could consume a page, ensure + * a record at least can be written after triggering PMI. + */ +#define ARCH_PEBS_THRESH_MULTI ((PEBS_BUFFER_SIZE - PAGE_SIZE) >> PEBS_BUFFER_SHIFT) +#define ARCH_PEBS_THRESH_SINGLE 1 + /* The maximal number of PEBS events: */ #define MAX_PEBS_EVENTS_FMT4 8 #define MAX_PEBS_EVENTS 32 diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index ad80cdfe1dcc..f039b085155a 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -315,6 +315,14 @@ #define ARCH_PEBS_OFFSET_MASK 0x7fffff #define ARCH_PEBS_INDEX_WR_SHIFT 4 +#define ARCH_PEBS_RELOAD 0xffffffff +#define ARCH_PEBS_LBR_SHIFT 40 +#define ARCH_PEBS_LBR (0x3ull << ARCH_PEBS_LBR_SHIFT) +#define ARCH_PEBS_VECR_XMM BIT_ULL(49) +#define ARCH_PEBS_GPR BIT_ULL(61) +#define ARCH_PEBS_AUX BIT_ULL(62) +#define ARCH_PEBS_EN BIT_ULL(63) + #define MSR_IA32_RTIT_CTL 0x00000570 #define RTIT_CTL_TRACEEN BIT(0) #define RTIT_CTL_CYCLEACC BIT(1) -- Gitee From 20a8798d451af1bf9cd113711f42c42166a7198b Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Wed, 29 Oct 2025 18:21:36 +0800 Subject: [PATCH 041/231] perf/x86/intel: Add counter group support for arch-PEBS commit bb5f13df3c455110c4468a31a5b21954268108c9 upstream. Base on previous adaptive PEBS counter snapshot support, add counter group support for architectural PEBS. Since arch-PEBS shares same counter group layout with adaptive PEBS, directly reuse __setup_pebs_counter_group() helper to process arch-PEBS counter group. Intel-SIG: commit bb5f13df3c45 perf/x86/intel: Add counter group support for arch-PEBS Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Link: https://patch.msgid.link/20251029102136.61364-13-dapeng1.mi@linux.intel.com Signed-off-by: Jason Zeng --- arch/x86/events/intel/core.c | 38 ++++++++++++++++++++++++++++--- arch/x86/events/intel/ds.c | 29 ++++++++++++++++++++--- arch/x86/include/asm/msr-index.h | 6 +++++ arch/x86/include/asm/perf_event.h | 13 ++++++++--- 4 files changed, 77 insertions(+), 9 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index cac9df69038c..e1a585bae5cb 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3003,6 +3003,17 @@ static void intel_pmu_enable_event_ext(struct perf_event *event) if (pebs_data_cfg & PEBS_DATACFG_LBRS) ext |= ARCH_PEBS_LBR & cap.caps; + + if (pebs_data_cfg & + (PEBS_DATACFG_CNTR_MASK << PEBS_DATACFG_CNTR_SHIFT)) + ext |= ARCH_PEBS_CNTR_GP & cap.caps; + + if (pebs_data_cfg & + (PEBS_DATACFG_FIX_MASK << PEBS_DATACFG_FIX_SHIFT)) + ext |= ARCH_PEBS_CNTR_FIXED & cap.caps; + + if (pebs_data_cfg & PEBS_DATACFG_METRICS) + ext |= ARCH_PEBS_CNTR_METRICS & cap.caps; } if (cpuc->n_pebs == cpuc->n_large_pebs) @@ -3027,6 +3038,9 @@ static void intel_pmu_enable_event_ext(struct perf_event *event) } } + if (is_pebs_counter_event_group(event)) + ext |= ARCH_PEBS_CNTR_ALLOW; + if (cpuc->cfg_c_val[hwc->idx] != ext) __intel_pmu_update_event_ext(hwc->idx, ext); } @@ -4311,6 +4325,20 @@ static bool intel_pmu_is_acr_group(struct perf_event *event) return false; } +static inline bool intel_pmu_has_pebs_counter_group(struct pmu *pmu) +{ + u64 caps; + + if (x86_pmu.intel_cap.pebs_format >= 6 && x86_pmu.intel_cap.pebs_baseline) + return true; + + caps = hybrid(pmu, arch_pebs_cap).caps; + if (x86_pmu.arch_pebs && (caps & ARCH_PEBS_CNTR_MASK)) + return true; + + return false; +} + static inline void intel_pmu_set_acr_cntr_constr(struct perf_event *event, u64 *cause_mask, int *num) { @@ -4455,8 +4483,7 @@ static int intel_pmu_hw_config(struct perf_event *event) } if ((event->attr.sample_type & PERF_SAMPLE_READ) && - (x86_pmu.intel_cap.pebs_format >= 6) && - x86_pmu.intel_cap.pebs_baseline && + intel_pmu_has_pebs_counter_group(event->pmu) && is_sampling_event(event) && event->attr.precise_ip) event->group_leader->hw.flags |= PERF_X86_EVENT_PEBS_CNTR; @@ -5382,6 +5409,8 @@ static inline void __intel_update_large_pebs_flags(struct pmu *pmu) x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME; if (caps & ARCH_PEBS_LBR) x86_pmu.large_pebs_flags |= PERF_SAMPLE_BRANCH_STACK; + if (caps & ARCH_PEBS_CNTR_MASK) + x86_pmu.large_pebs_flags |= PERF_SAMPLE_READ; if (!(caps & ARCH_PEBS_AUX)) x86_pmu.large_pebs_flags &= ~PERF_SAMPLE_DATA_SRC; @@ -7057,8 +7086,11 @@ __init int intel_pmu_init(void) * Many features on and after V6 require dynamic constraint, * e.g., Arch PEBS, ACR. */ - if (version >= 6) + if (version >= 6) { x86_pmu.flags |= PMU_FL_DYN_CONSTRAINT; + x86_pmu.late_setup = intel_pmu_late_setup; + } + /* * Install the hw-cache-events table: */ diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index dae34a22aac2..231c3bda375f 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1508,13 +1508,20 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc, u64 intel_get_arch_pebs_data_config(struct perf_event *event) { + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); u64 pebs_data_cfg = 0; + u64 cntr_mask; if (WARN_ON(event->hw.idx < 0 || event->hw.idx >= X86_PMC_IDX_MAX)) return 0; pebs_data_cfg |= pebs_update_adaptive_cfg(event); + cntr_mask = (PEBS_DATACFG_CNTR_MASK << PEBS_DATACFG_CNTR_SHIFT) | + (PEBS_DATACFG_FIX_MASK << PEBS_DATACFG_FIX_SHIFT) | + PEBS_DATACFG_CNTR | PEBS_DATACFG_METRICS; + pebs_data_cfg |= cpuc->pebs_data_cfg & cntr_mask; + return pebs_data_cfg; } @@ -2422,6 +2429,24 @@ static void setup_arch_pebs_sample_data(struct perf_event *event, } } + if (header->cntr) { + struct arch_pebs_cntr_header *cntr = next_record; + unsigned int nr; + + next_record += sizeof(struct arch_pebs_cntr_header); + + if (is_pebs_counter_event_group(event)) { + __setup_pebs_counter_group(cpuc, event, + (struct pebs_cntr_header *)cntr, next_record); + data->sample_flags |= PERF_SAMPLE_READ; + } + + nr = hweight32(cntr->cntr) + hweight32(cntr->fixed); + if (cntr->metrics == INTEL_CNTR_METRICS) + nr += 2; + next_record += nr * sizeof(u64); + } + /* Parse followed fragments if there are. */ if (arch_pebs_record_continued(header)) { at = at + header->size; @@ -3073,10 +3098,8 @@ static void __init intel_ds_pebs_init(void) break; case 6: - if (x86_pmu.intel_cap.pebs_baseline) { + if (x86_pmu.intel_cap.pebs_baseline) x86_pmu.large_pebs_flags |= PERF_SAMPLE_READ; - x86_pmu.late_setup = intel_pmu_late_setup; - } fallthrough; case 5: x86_pmu.pebs_ept = 1; diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index f039b085155a..a9c99cfaddec 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -316,12 +316,18 @@ #define ARCH_PEBS_INDEX_WR_SHIFT 4 #define ARCH_PEBS_RELOAD 0xffffffff +#define ARCH_PEBS_CNTR_ALLOW BIT_ULL(35) +#define ARCH_PEBS_CNTR_GP BIT_ULL(36) +#define ARCH_PEBS_CNTR_FIXED BIT_ULL(37) +#define ARCH_PEBS_CNTR_METRICS BIT_ULL(38) #define ARCH_PEBS_LBR_SHIFT 40 #define ARCH_PEBS_LBR (0x3ull << ARCH_PEBS_LBR_SHIFT) #define ARCH_PEBS_VECR_XMM BIT_ULL(49) #define ARCH_PEBS_GPR BIT_ULL(61) #define ARCH_PEBS_AUX BIT_ULL(62) #define ARCH_PEBS_EN BIT_ULL(63) +#define ARCH_PEBS_CNTR_MASK (ARCH_PEBS_CNTR_GP | ARCH_PEBS_CNTR_FIXED | \ + ARCH_PEBS_CNTR_METRICS) #define MSR_IA32_RTIT_CTL 0x00000570 #define RTIT_CTL_TRACEEN BIT(0) diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index ed2609f7b985..e64eaaab96e7 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -158,16 +158,16 @@ #define ARCH_PERFMON_EVENTS_COUNT 7 #define PEBS_DATACFG_MEMINFO BIT_ULL(0) -#define PEBS_DATACFG_GP BIT_ULL(1) +#define PEBS_DATACFG_GP BIT_ULL(1) #define PEBS_DATACFG_XMMS BIT_ULL(2) #define PEBS_DATACFG_LBRS BIT_ULL(3) -#define PEBS_DATACFG_LBR_SHIFT 24 #define PEBS_DATACFG_CNTR BIT_ULL(4) +#define PEBS_DATACFG_METRICS BIT_ULL(5) +#define PEBS_DATACFG_LBR_SHIFT 24 #define PEBS_DATACFG_CNTR_SHIFT 32 #define PEBS_DATACFG_CNTR_MASK GENMASK_ULL(15, 0) #define PEBS_DATACFG_FIX_SHIFT 48 #define PEBS_DATACFG_FIX_MASK GENMASK_ULL(7, 0) -#define PEBS_DATACFG_METRICS BIT_ULL(5) /* Steal the highest bit of pebs_data_cfg for SW usage */ #define PEBS_UPDATE_DS_SW BIT_ULL(63) @@ -618,6 +618,13 @@ struct arch_pebs_lbr_header { u64 ler_info; }; +struct arch_pebs_cntr_header { + u32 cntr; + u32 fixed; + u32 metrics; + u32 reserved; +}; + /* * AMD Extended Performance Monitoring and Debug cpuid feature detection */ -- Gitee From f6fcb452b5b66d92ed69792d18995a65718268dd Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Mon, 12 May 2025 10:55:42 -0700 Subject: [PATCH 042/231] perf/x86/intel: Add a check for dynamic constraints commit bd24f9beed591422f45fa6d8d0d3bd3a755b8a48 upstream. The current event scheduler has a limit. If the counter constraint of an event is not a subset of any other counter constraint with an equal or higher weight. The counters may not be fully utilized. To workaround it, the commit bc1738f6ee83 ("perf, x86: Fix event scheduler for constraints with overlapping counters") introduced an overlap flag, which is hardcoded to the event constraint that may trigger the limit. It only works for static constraints. Many features on and after Intel PMON v6 require dynamic constraints. An event constraint is decided by both static and dynamic constraints at runtime. See commit 4dfe3232cc04 ("perf/x86: Add dynamic constraint"). The dynamic constraints are from CPUID enumeration. It's impossible to hardcode it in advance. It's not practical to set the overlap flag to all events. It's harmful to the scheduler. For the existing Intel platforms, the dynamic constraints don't trigger the limit. A real fix is not required. However, for virtualization, VMM may give a weird CPUID enumeration to a guest. It's impossible to indicate what the weird enumeration is. A check is introduced, which can list the possible breaks if a weird enumeration is used. Check the dynamic constraints enumerated for normal, branch counters logging, and auto-counter reload. Check both PEBS and non-PEBS constratins. Intel-SIG: commit bd24f9beed59 perf/x86/intel: Add a check for dynamic constraints Closes: https://lore.kernel.org/lkml/20250416195610.GC38216@noisy.programming.kicks-ass.net/ Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Link: https://patch.msgid.link/20250512175542.2000708-1-kan.liang@linux.intel.com Signed-off-by: Jason Zeng --- arch/x86/events/intel/core.c | 156 +++++++++++++++++++++++++++++++++-- 1 file changed, 148 insertions(+), 8 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index e1a585bae5cb..453778ae502a 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -5382,6 +5382,151 @@ static void intel_pmu_check_event_constraints(struct event_constraint *event_con u64 fixed_cntr_mask, u64 intel_ctrl); +enum dyn_constr_type { + DYN_CONSTR_NONE, + DYN_CONSTR_BR_CNTR, + DYN_CONSTR_ACR_CNTR, + DYN_CONSTR_ACR_CAUSE, + + DYN_CONSTR_MAX, +}; + +static const char * const dyn_constr_type_name[] = { + [DYN_CONSTR_NONE] = "a normal event", + [DYN_CONSTR_BR_CNTR] = "a branch counter logging event", + [DYN_CONSTR_ACR_CNTR] = "an auto-counter reload event", + [DYN_CONSTR_ACR_CAUSE] = "an auto-counter reload cause event", +}; + +static void __intel_pmu_check_dyn_constr(struct event_constraint *constr, + enum dyn_constr_type type, u64 mask) +{ + struct event_constraint *c1, *c2; + int new_weight, check_weight; + u64 new_mask, check_mask; + + for_each_event_constraint(c1, constr) { + new_mask = c1->idxmsk64 & mask; + new_weight = hweight64(new_mask); + + /* ignore topdown perf metrics event */ + if (c1->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) + continue; + + if (!new_weight && fls64(c1->idxmsk64) < INTEL_PMC_IDX_FIXED) { + pr_info("The event 0x%llx is not supported as %s.\n", + c1->code, dyn_constr_type_name[type]); + } + + if (new_weight <= 1) + continue; + + for_each_event_constraint(c2, c1 + 1) { + bool check_fail = false; + + check_mask = c2->idxmsk64 & mask; + check_weight = hweight64(check_mask); + + if (c2->idxmsk64 & INTEL_PMC_MSK_TOPDOWN || + !check_weight) + continue; + + /* The same constraints or no overlap */ + if (new_mask == check_mask || + (new_mask ^ check_mask) == (new_mask | check_mask)) + continue; + + /* + * A scheduler issue may be triggered in the following cases. + * - Two overlap constraints have the same weight. + * E.g., A constraints: 0x3, B constraints: 0x6 + * event counter failure case + * B PMC[2:1] 1 + * A PMC[1:0] 0 + * A PMC[1:0] FAIL + * - Two overlap constraints have different weight. + * The constraint has a low weight, but has high last bit. + * E.g., A constraints: 0x7, B constraints: 0xC + * event counter failure case + * B PMC[3:2] 2 + * A PMC[2:0] 0 + * A PMC[2:0] 1 + * A PMC[2:0] FAIL + */ + if (new_weight == check_weight) { + check_fail = true; + } else if (new_weight < check_weight) { + if ((new_mask | check_mask) != check_mask && + fls64(new_mask) > fls64(check_mask)) + check_fail = true; + } else { + if ((new_mask | check_mask) != new_mask && + fls64(new_mask) < fls64(check_mask)) + check_fail = true; + } + + if (check_fail) { + pr_info("The two events 0x%llx and 0x%llx may not be " + "fully scheduled under some circumstances as " + "%s.\n", + c1->code, c2->code, dyn_constr_type_name[type]); + } + } + } +} + +static void intel_pmu_check_dyn_constr(struct pmu *pmu, + struct event_constraint *constr, + u64 cntr_mask) +{ + enum dyn_constr_type i; + u64 mask; + + for (i = DYN_CONSTR_NONE; i < DYN_CONSTR_MAX; i++) { + mask = 0; + switch (i) { + case DYN_CONSTR_NONE: + mask = cntr_mask; + break; + case DYN_CONSTR_BR_CNTR: + if (x86_pmu.flags & PMU_FL_BR_CNTR) + mask = x86_pmu.lbr_counters; + break; + case DYN_CONSTR_ACR_CNTR: + mask = hybrid(pmu, acr_cntr_mask64) & GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0); + break; + case DYN_CONSTR_ACR_CAUSE: + if (hybrid(pmu, acr_cntr_mask64) == hybrid(pmu, acr_cause_mask64)) + continue; + mask = hybrid(pmu, acr_cause_mask64) & GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0); + break; + default: + pr_warn("Unsupported dynamic constraint type %d\n", i); + } + + if (mask) + __intel_pmu_check_dyn_constr(constr, i, mask); + } +} + +static void intel_pmu_check_event_constraints_all(struct pmu *pmu) +{ + struct event_constraint *event_constraints = hybrid(pmu, event_constraints); + struct event_constraint *pebs_constraints = hybrid(pmu, pebs_constraints); + u64 cntr_mask = hybrid(pmu, cntr_mask64); + u64 fixed_cntr_mask = hybrid(pmu, fixed_cntr_mask64); + u64 intel_ctrl = hybrid(pmu, intel_ctrl); + + intel_pmu_check_event_constraints(event_constraints, cntr_mask, + fixed_cntr_mask, intel_ctrl); + + if (event_constraints) + intel_pmu_check_dyn_constr(pmu, event_constraints, cntr_mask); + + if (pebs_constraints) + intel_pmu_check_dyn_constr(pmu, pebs_constraints, cntr_mask); +} + static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs); static inline bool intel_pmu_broken_perf_cap(void) @@ -5504,10 +5649,7 @@ static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu) else pmu->pmu.capabilities &= ~PERF_PMU_CAP_AUX_OUTPUT; - intel_pmu_check_event_constraints(pmu->event_constraints, - pmu->cntr_mask64, - pmu->fixed_cntr_mask64, - pmu->intel_ctrl); + intel_pmu_check_event_constraints_all(&pmu->pmu); intel_pmu_check_extra_regs(pmu->extra_regs); } @@ -7847,10 +7989,8 @@ __init int intel_pmu_init(void) if (x86_pmu.intel_cap.anythread_deprecated) x86_pmu.format_attrs = intel_arch_formats_attr; - intel_pmu_check_event_constraints(x86_pmu.event_constraints, - x86_pmu.cntr_mask64, - x86_pmu.fixed_cntr_mask64, - x86_pmu.intel_ctrl); + intel_pmu_check_event_constraints_all(NULL); + /* * Access LBR MSR may cause #GP under certain circumstances. * Check all LBR MSR here. -- Gitee From dec8bacfb683577ca3b8c5896843f3439d7037b8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 7 Nov 2025 14:50:16 +0100 Subject: [PATCH 043/231] perf/x86/intel: Check PEBS dyn_constraints commit 02da693f6658b9f73b97fce3695358ef3f13d0d1 upstream. Handle the interaction between ("perf/x86/intel: Update dyn_constraint base on PEBS event precise level") and ("perf/x86/intel: Add a check for dynamic constraints"). Intel-SIG: commit 02da693f6658 perf/x86/intel: Check PEBS dyn_constraints Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Jason Zeng --- arch/x86/events/intel/core.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 453778ae502a..088a73d14c51 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -5387,6 +5387,8 @@ enum dyn_constr_type { DYN_CONSTR_BR_CNTR, DYN_CONSTR_ACR_CNTR, DYN_CONSTR_ACR_CAUSE, + DYN_CONSTR_PEBS, + DYN_CONSTR_PDIST, DYN_CONSTR_MAX, }; @@ -5396,6 +5398,8 @@ static const char * const dyn_constr_type_name[] = { [DYN_CONSTR_BR_CNTR] = "a branch counter logging event", [DYN_CONSTR_ACR_CNTR] = "an auto-counter reload event", [DYN_CONSTR_ACR_CAUSE] = "an auto-counter reload cause event", + [DYN_CONSTR_PEBS] = "a PEBS event", + [DYN_CONSTR_PDIST] = "a PEBS PDIST event", }; static void __intel_pmu_check_dyn_constr(struct event_constraint *constr, @@ -5500,6 +5504,14 @@ static void intel_pmu_check_dyn_constr(struct pmu *pmu, continue; mask = hybrid(pmu, acr_cause_mask64) & GENMASK_ULL(INTEL_PMC_MAX_GENERIC - 1, 0); break; + case DYN_CONSTR_PEBS: + if (x86_pmu.arch_pebs) + mask = hybrid(pmu, arch_pebs_cap).counters; + break; + case DYN_CONSTR_PDIST: + if (x86_pmu.arch_pebs) + mask = hybrid(pmu, arch_pebs_cap).pdists; + break; default: pr_warn("Unsupported dynamic constraint type %d\n", i); } -- Gitee From b01394047e792d28cd6c4ae986eef7aa7402cfbb Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 7 Nov 2025 14:50:20 +0100 Subject: [PATCH 044/231] perf/x86/intel: Optimize PEBS extended config commit 2093d8cf80fa5552d1025a78a8f3a10bf3b6466e upstream. Similar to enable_acr_event, avoid the branch. Intel-SIG: commit 2093d8cf80fa perf/x86/intel: Optimize PEBS extended config Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Jason Zeng --- arch/x86/events/intel/core.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 088a73d14c51..7fe4a72fdb70 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2571,9 +2571,6 @@ static inline void __intel_pmu_update_event_ext(int idx, u64 ext) static void intel_pmu_disable_event_ext(struct perf_event *event) { - if (!x86_pmu.arch_pebs) - return; - /* * Only clear CFG_C MSR for PEBS counter group events, * it avoids the HW counter's value to be added into @@ -2591,6 +2588,8 @@ static void intel_pmu_disable_event_ext(struct perf_event *event) __intel_pmu_update_event_ext(event->hw.idx, 0); } +DEFINE_STATIC_CALL_NULL(intel_pmu_disable_event_ext, intel_pmu_disable_event_ext); + static void intel_pmu_disable_event(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; @@ -2599,11 +2598,11 @@ static void intel_pmu_disable_event(struct perf_event *event) switch (idx) { case 0 ... INTEL_PMC_IDX_FIXED - 1: intel_clear_masks(event, idx); - intel_pmu_disable_event_ext(event); + static_call_cond(intel_pmu_disable_event_ext)(event); x86_pmu_disable_event(event); break; case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1: - intel_pmu_disable_event_ext(event); + static_call_cond(intel_pmu_disable_event_ext)(event); fallthrough; case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: intel_pmu_disable_fixed(event); @@ -2979,9 +2978,6 @@ static void intel_pmu_enable_event_ext(struct perf_event *event) struct arch_pebs_cap cap; u64 ext = 0; - if (!x86_pmu.arch_pebs) - return; - cap = hybrid(cpuc->pmu, arch_pebs_cap); if (event->attr.precise_ip) { @@ -3045,6 +3041,8 @@ static void intel_pmu_enable_event_ext(struct perf_event *event) __intel_pmu_update_event_ext(hwc->idx, ext); } +DEFINE_STATIC_CALL_NULL(intel_pmu_enable_event_ext, intel_pmu_enable_event_ext); + static void intel_pmu_enable_event(struct perf_event *event) { u64 enable_mask = ARCH_PERFMON_EVENTSEL_ENABLE; @@ -3060,12 +3058,12 @@ static void intel_pmu_enable_event(struct perf_event *event) enable_mask |= ARCH_PERFMON_EVENTSEL_BR_CNTR; intel_set_masks(event, idx); static_call_cond(intel_pmu_enable_acr_event)(event); - intel_pmu_enable_event_ext(event); + static_call_cond(intel_pmu_enable_event_ext)(event); __x86_pmu_enable_event(hwc, enable_mask); break; case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1: static_call_cond(intel_pmu_enable_acr_event)(event); - intel_pmu_enable_event_ext(event); + static_call_cond(intel_pmu_enable_event_ext)(event); fallthrough; case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: intel_pmu_enable_fixed(event); @@ -7990,8 +7988,13 @@ __init int intel_pmu_init(void) if (!is_hybrid() && boot_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT)) update_pmu_cap(NULL); - if (x86_pmu.arch_pebs) + if (x86_pmu.arch_pebs) { + static_call_update(intel_pmu_disable_event_ext, + intel_pmu_disable_event_ext); + static_call_update(intel_pmu_enable_event_ext, + intel_pmu_enable_event_ext); pr_cont("Architectural PEBS, "); + } intel_pmu_check_counters_mask(&x86_pmu.cntr_mask64, &x86_pmu.fixed_cntr_mask64, -- Gitee From 4e14d39516e1b16fea8cf84577ee80c6e06239aa Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 12 Nov 2025 10:40:26 +0100 Subject: [PATCH 045/231] perf/x86/intel: Fix and clean up intel_pmu_drain_arch_pebs() type use commit 9929dffce5ed7e2988e0274f4db98035508b16d9 upstream. The following commit introduced a build failure on x86-32: 21954c8a0ff ("perf/x86/intel: Process arch-PEBS records or record fragments") ... arch/x86/events/intel/ds.c:2983:24: error: cast from pointer to integer of different size [-Werror=pointer-to-int-cast] The forced type conversion to 'u64' and 'void *' are not 32-bit clean, but they are also entirely unnecessary: ->pebs_vaddr is 'void *' already, and integer-compatible pointer arithmetics will work just fine on it. Fix & simplify the code. Intel-SIG: commit 9929dffce5ed perf/x86/intel: Fix and clean up intel_pmu_drain_arch_pebs() type use Reported-by: Stephen Rothwell Fixes: d21954c8a0ff ("perf/x86/intel: Process arch-PEBS records or record fragments") Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra (Intel) Acked-by: Dapeng Mi Cc: Kan Liang Link: https://patch.msgid.link/20251029102136.61364-10-dapeng1.mi@linux.intel.com Signed-off-by: Jason Zeng --- arch/x86/events/intel/ds.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 231c3bda375f..c26dd4275111 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -2958,8 +2958,7 @@ static void intel_pmu_drain_arch_pebs(struct pt_regs *iregs, } base = cpuc->pebs_vaddr; - top = (void *)((u64)cpuc->pebs_vaddr + - (index.wr << ARCH_PEBS_INDEX_WR_SHIFT)); + top = cpuc->pebs_vaddr + (index.wr << ARCH_PEBS_INDEX_WR_SHIFT); index.wr = 0; index.full = 0; -- Gitee From cbd7e60c1a5dbaf4f3f34c88ce6c4a1bb9f0d946 Mon Sep 17 00:00:00 2001 From: Evan Li Date: Fri, 12 Dec 2025 16:49:43 +0800 Subject: [PATCH 046/231] perf/x86/intel: Fix NULL event dereference crash in handle_pmi_common() commit 9415f749d34b926b9e4853da1462f4d941f89a0d upstream. handle_pmi_common() may observe an active bit set in cpuc->active_mask while the corresponding cpuc->events[] entry has already been cleared, which leads to a NULL pointer dereference. This can happen when interrupt throttling stops all events in a group while PEBS processing is still in progress. perf_event_overflow() can trigger perf_event_throttle_group(), which stops the group and clears the cpuc->events[] entry, but the active bit may still be set when handle_pmi_common() iterates over the events. The following recent fix: 7e772a93eb61 ("perf/x86: Fix NULL event access and potential PEBS record loss") moved the cpuc->events[] clearing from x86_pmu_stop() to x86_pmu_del() and relied on cpuc->active_mask/pebs_enabled checks. However, handle_pmi_common() can still encounter a NULL cpuc->events[] entry despite the active bit being set. Add an explicit NULL check on the event pointer before using it, to cover this legitimate scenario and avoid the NULL dereference crash. Intel-SIG: commit 9415f749d34b perf/x86/intel: Fix NULL event dereference crash in handle_pmi_common() Fixes: 7e772a93eb61 ("perf/x86: Fix NULL event access and potential PEBS record loss") Reported-by: kitta Co-developed-by: kitta Signed-off-by: Evan Li Signed-off-by: Ingo Molnar Link: https://patch.msgid.link/20251212084943.2124787-1-evan.li@linux.alibaba.com Closes: https://bugzilla.kernel.org/show_bug.cgi?id=220855 Signed-off-by: Jason Zeng --- arch/x86/events/intel/core.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 7fe4a72fdb70..ba4672b2dee0 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3367,6 +3367,9 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) if (!test_bit(bit, cpuc->active_mask)) continue; + /* Event may have already been cleared: */ + if (!event) + continue; /* * There may be unprocessed PEBS records in the PEBS buffer, -- Gitee From 65d382a4243462d79c6bfbc3e4e5396f153ec663 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 22 Dec 2025 14:57:12 -0800 Subject: [PATCH 047/231] tools headers: Sync x86 headers with kernel sources commit 369e91bd201d15a711f952ee9ac253a8b91628a3 upstream. To pick up changes from: 54de197c9a5e8f52 ("Merge tag 'x86_sgx_for_6.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip") 679fcce0028bf101 ("Merge tag 'kvm-x86-svm-6.19' of https://github.com/kvm-x86/linux into HEAD") 3767def18f4cc394 ("x86/cpufeatures: Add support for L3 Smart Data Cache Injection Allocation Enforcement") f6106d41ec84e552 ("x86/bugs: Use an x86 feature to track the MMIO Stale Data mitigation") 7baadd463e147fdc ("x86/cpufeatures: Enumerate the LASS feature bits") 47955b58cf9b97fe ("x86/cpufeatures: Correct LKGS feature flag description") 5d0316e25defee47 ("x86/cpufeatures: Add X86_FEATURE_X2AVIC_EXT") 6ffdb49101f02313 ("x86/cpufeatures: Add X86_FEATURE_SGX_EUPDATESVN feature flag") 4793f990ea152330 ("KVM: x86: Advertise EferLmsleUnsupported to userspace") bb5f13df3c455110 ("perf/x86/intel: Add counter group support for arch-PEBS") 52448a0a739002ec ("perf/x86/intel: Setup PEBS data configuration and enable legacy groups") d21954c8a0ffbc94 ("perf/x86/intel: Process arch-PEBS records or record fragments") bffeb2fd0b9c99d8 ("x86/microcode/intel: Enable staging when available") 740144bc6bde9d44 ("x86/microcode/intel: Establish staging control logic") This should address these tools/perf build warnings: Warning: Kernel ABI header differences: diff -u tools/arch/x86/include/asm/cpufeatures.h arch/x86/include/asm/cpufeatures.h diff -u tools/arch/x86/include/asm/msr-index.h arch/x86/include/asm/msr-index.h Please see tools/include/uapi/README. Intel-SIG: commit 369e91bd201d tools headers: Sync x86 headers with kernel sources Cc: x86@kernel.org Signed-off-by: Namhyung Kim [jz: only following 3 relevant commits are sync'ed: bb5f13df3c455110 ("perf/x86/intel: Add counter group support for arch-PEBS") 52448a0a739002ec ("perf/x86/intel: Setup PEBS data configuration and enable legacy groups") d21954c8a0ffbc94 ("perf/x86/intel: Process arch-PEBS records or record fragments")] Signed-off-by: Jason Zeng --- tools/arch/x86/include/asm/msr-index.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h index ad5066f2e0b5..fc0bb2f6b4a3 100644 --- a/tools/arch/x86/include/asm/msr-index.h +++ b/tools/arch/x86/include/asm/msr-index.h @@ -309,6 +309,26 @@ PERF_CAP_PEBS_FORMAT | PERF_CAP_PEBS_BASELINE | \ PERF_CAP_PEBS_TIMING_INFO) +/* Arch PEBS */ +#define MSR_IA32_PEBS_BASE 0x000003f4 +#define MSR_IA32_PEBS_INDEX 0x000003f5 +#define ARCH_PEBS_OFFSET_MASK 0x7fffff +#define ARCH_PEBS_INDEX_WR_SHIFT 4 + +#define ARCH_PEBS_RELOAD 0xffffffff +#define ARCH_PEBS_CNTR_ALLOW BIT_ULL(35) +#define ARCH_PEBS_CNTR_GP BIT_ULL(36) +#define ARCH_PEBS_CNTR_FIXED BIT_ULL(37) +#define ARCH_PEBS_CNTR_METRICS BIT_ULL(38) +#define ARCH_PEBS_LBR_SHIFT 40 +#define ARCH_PEBS_LBR (0x3ull << ARCH_PEBS_LBR_SHIFT) +#define ARCH_PEBS_VECR_XMM BIT_ULL(49) +#define ARCH_PEBS_GPR BIT_ULL(61) +#define ARCH_PEBS_AUX BIT_ULL(62) +#define ARCH_PEBS_EN BIT_ULL(63) +#define ARCH_PEBS_CNTR_MASK (ARCH_PEBS_CNTR_GP | ARCH_PEBS_CNTR_FIXED | \ + ARCH_PEBS_CNTR_METRICS) + #define MSR_IA32_RTIT_CTL 0x00000570 #define RTIT_CTL_TRACEEN BIT(0) #define RTIT_CTL_CYCLEACC BIT(1) -- Gitee From 842a2cf3627a544825fb9012f52579a3186fe383 Mon Sep 17 00:00:00 2001 From: LeoLiu-oc Date: Wed, 4 Mar 2026 16:39:52 +0800 Subject: [PATCH 048/231] x86/mce: Update Centaur MCA support zhaoxin inclusion category: feature ------------------- Supplement MCA support for some Zhaoxin CPUs which use X86_VENDOR_CENTAUR as vendor ID Signed-off-by: LeoLiu-oc --- arch/x86/kernel/cpu/mce/core.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 79fe73dfb78b..cb99e2866228 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -2217,6 +2217,7 @@ void mca_bsp_init(struct cpuinfo_x86 *c) case X86_VENDOR_INTEL: intel_apply_global_quirks(c); break; + case X86_VENDOR_CENTAUR: case X86_VENDOR_ZHAOXIN: zhaoxin_apply_global_quirks(c); break; -- Gitee From 412617b71ee617e54ec2e29a64ff7281c8f5114b Mon Sep 17 00:00:00 2001 From: LeoLiu-oc Date: Wed, 4 Mar 2026 18:36:40 +0800 Subject: [PATCH 049/231] crypto: x86: zhaoxin-aes: Switch to X86_MATCH macro for CPU ID matching This patch modernizes CPU ID matching for AES algorithm support by replacing the legacy {} list format with the standardized X86_MATCH_VENDOR_FAM_FEATURE macro. The changes are applied across two files: - In drivers/crypto/padlock-aes.c, the old static { X86_VENDOR_CENTAUR, 6, X86_MODEL_ANY, X86_FEATURE_XCRYPT } entry is replaced with X86_MATCH_VENDOR_FAM_FEATURE(CENTAUR, 6, X86_FEATURE_XCRYPT, NULL) to align with modern x86 CPU ID matching conventions. - In drivers/crypto/zhaoxin-aes.c, the CPU ID array is renamed to zhaoxin_aes_cpu_ids for clarity, and all entries (including support for Zhaoxin family 6 CPUs and both ZHAOXIN/CENTAUR vendor IDs for relevant Zhaoxin processors) are converted to use the X86_MATCH_VENDOR_FAM_FEATURE macro. This update ensures consistent and maintainable CPU ID matching while enabling proper detection of the Zhaoxin Advanced Cryptography Engine (ACE) on processors that use either ZHAOXIN or CENTAUR vendor IDs, thereby supporting fast AES cryptographic operations. Signed-off-by: LeoLiu-oc --- drivers/crypto/padlock-aes.c | 2 +- drivers/crypto/zhaoxin-aes.c | 11 ++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index f0c3127941ae..eb5f9ede1090 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -475,7 +475,7 @@ static struct skcipher_alg cbc_aes_alg = { }; static const struct x86_cpu_id padlock_cpu_id[] = { - { X86_VENDOR_CENTAUR, 6, X86_MODEL_ANY, X86_FEATURE_XCRYPT }, + X86_MATCH_VENDOR_FAM_FEATURE(CENTAUR, 6, X86_FEATURE_XCRYPT, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id); diff --git a/drivers/crypto/zhaoxin-aes.c b/drivers/crypto/zhaoxin-aes.c index e1d029fa9d1a..5b24a6536129 100644 --- a/drivers/crypto/zhaoxin-aes.c +++ b/drivers/crypto/zhaoxin-aes.c @@ -460,18 +460,19 @@ static struct skcipher_alg cbc_aes_alg = { .decrypt = cbc_aes_decrypt, }; -static const struct x86_cpu_id zhaoxin_cpu_id[] = { - { X86_VENDOR_CENTAUR, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_XCRYPT }, - { X86_VENDOR_ZHAOXIN, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_XCRYPT }, +static const struct x86_cpu_id zhaoxin_aes_cpu_ids[] = { + X86_MATCH_VENDOR_FAM_FEATURE(ZHAOXIN, 6, X86_FEATURE_XCRYPT, NULL), + X86_MATCH_VENDOR_FAM_FEATURE(CENTAUR, 7, X86_FEATURE_XCRYPT, NULL), + X86_MATCH_VENDOR_FAM_FEATURE(ZHAOXIN, 7, X86_FEATURE_XCRYPT, NULL), {} }; -MODULE_DEVICE_TABLE(x86cpu, zhaoxin_cpu_id); +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_aes_cpu_ids); static int __init padlock_init(void) { int ret; - if (!x86_match_cpu(zhaoxin_cpu_id)) + if (!x86_match_cpu(zhaoxin_aes_cpu_ids)) return -ENODEV; if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) { -- Gitee From a47f16ffc1b6cef0f19860fa3da5f1aa891f9fa0 Mon Sep 17 00:00:00 2001 From: LeoLiu-oc Date: Wed, 4 Mar 2026 18:37:49 +0800 Subject: [PATCH 050/231] crypto: x86: zhaoxin-sha: Switch to X86_MATCH macro for CPU ID matching This patch modernizes CPU ID matching for SHA algorithm support by replacing the legacy {} list format with the standardized X86_MATCH_VENDOR_FAM_FEATURE macro. The changes are applied across two files: - In drivers/crypto/padlock-sha.c, the old static { X86_VENDOR_CENTAUR, 6, X86_MODEL_ANY, X86_FEATURE_PHE } entry is replaced with X86_MATCH_VENDOR_FAM_FEATURE(CENTAUR, 6, X86_FEATURE_PHE, NULL) to align with modern x86 CPU ID matching conventions. - In drivers/crypto/zhaoxin-sha.c, the CPU ID array is renamed to zhaoxin_sha_cpu_ids for clarity, and all entries (including support for Zhaoxin family 6 CPUs and both ZHAOXIN/CENTAUR vendor IDs for relevant Zhaoxin processors) are converted to use the X86_MATCH_VENDOR_FAM_FEATURE macro. This update ensures consistent and maintainable CPU ID matching while enabling proper detection of the Zhaoxin Advanced Cryptography Engine (ACE) on processors that use either ZHAOXIN or CENTAUR vendor IDs, thereby supporting fast SHA1/SHA256 cryptographic operations. Signed-off-by: LeoLiu-oc --- drivers/crypto/padlock-sha.c | 2 +- drivers/crypto/zhaoxin-sha.c | 11 ++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index 04858dc8b597..2e82c5e77f7a 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -491,7 +491,7 @@ static struct shash_alg sha256_alg_nano = { }; static const struct x86_cpu_id padlock_sha_ids[] = { - { X86_VENDOR_CENTAUR, 6, X86_MODEL_ANY, X86_FEATURE_PHE }, + X86_MATCH_VENDOR_FAM_FEATURE(CENTAUR, 6, X86_FEATURE_PHE, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, padlock_sha_ids); diff --git a/drivers/crypto/zhaoxin-sha.c b/drivers/crypto/zhaoxin-sha.c index 840805f36838..37828774a9f0 100644 --- a/drivers/crypto/zhaoxin-sha.c +++ b/drivers/crypto/zhaoxin-sha.c @@ -245,12 +245,13 @@ static struct shash_alg sha256_alg_zhaoxin = { } }; -static const struct x86_cpu_id zhaoxin_sha_ids[] = { - { X86_VENDOR_CENTAUR, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_PHE }, - { X86_VENDOR_ZHAOXIN, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_PHE }, +static const struct x86_cpu_id zhaoxin_sha_cpu_ids[] = { + X86_MATCH_VENDOR_FAM_FEATURE(ZHAOXIN, 6, X86_FEATURE_PHE, NULL), + X86_MATCH_VENDOR_FAM_FEATURE(CENTAUR, 7, X86_FEATURE_PHE, NULL), + X86_MATCH_VENDOR_FAM_FEATURE(ZHAOXIN, 7, X86_FEATURE_PHE, NULL), {} }; -MODULE_DEVICE_TABLE(x86cpu, zhaoxin_sha_ids); +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_sha_cpu_ids); static int __init padlock_init(void) { @@ -258,7 +259,7 @@ static int __init padlock_init(void) struct shash_alg *sha1; struct shash_alg *sha256; - if (!x86_match_cpu(zhaoxin_sha_ids) || !boot_cpu_has(X86_FEATURE_PHE_EN)) + if (!x86_match_cpu(zhaoxin_sha_cpu_ids) || !boot_cpu_has(X86_FEATURE_PHE_EN)) return -ENODEV; sha1 = &sha1_alg_zhaoxin; -- Gitee From 5e44c5d659d9c275dcbb6d72812090c36136dc17 Mon Sep 17 00:00:00 2001 From: LeoLiu-oc Date: Thu, 5 Mar 2026 10:46:36 +0800 Subject: [PATCH 051/231] cpufreq: ACPI: Update ITMT support when CPPC enabled This patch updates ITMT (Intel Turbo Max Technology) support in the ACPI cpufreq driver when CPPC (Collaborative Processor Performance Control) is enabled. Key changes include: Moves sched_set_itmt() and related work queue logic from arch/x86/kernel/itmt.c to drivers/cpufreq/acpi-cpufreq.c for better integration with cpufreq operations. Adds error handling for cppc_get_highest_perf() to gracefully handle failures when retrieving performance capabilities. Fixes the vendor check to use the current CPU's vendor (c->x86_vendor) instead of the boot CPU's vendor, ensuring correct ITMT initialization for all CPUs. Adds inline attribute to core_set_itmt_prio() when CPPC is disabled, and initializes variables for better code consistency. Exports sched_set_itmt_support() in itmt.c to maintain compatibility with the moved functionality. These changes ensure proper ITMT support for Centaur and Zhaoxin CPUs when using CPPC, improving performance scaling and reliability. Signed-off-by: LeoLiu-oc --- arch/x86/include/asm/topology.h | 3 --- arch/x86/kernel/itmt.c | 15 +------------- drivers/cpufreq/acpi-cpufreq.c | 35 +++++++++++++++++++++++---------- 3 files changed, 26 insertions(+), 27 deletions(-) diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 5119b7e170c4..76b1d87f1531 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -246,9 +246,6 @@ extern unsigned int __read_mostly sysctl_sched_itmt_enabled; /* Interface to set priority of a cpu */ void sched_set_itmt_core_prio(int prio, int core_cpu); -/* Interface to enable ITMT settings in the scheduler */ -void sched_set_itmt(void); - /* Interface to notify scheduler that system supports ITMT */ int sched_set_itmt_support(void); diff --git a/arch/x86/kernel/itmt.c b/arch/x86/kernel/itmt.c index be88e98a9966..b49ac8ecbbd6 100644 --- a/arch/x86/kernel/itmt.c +++ b/arch/x86/kernel/itmt.c @@ -122,6 +122,7 @@ int sched_set_itmt_support(void) return 0; } +EXPORT_SYMBOL_GPL(sched_set_itmt_support); /** * sched_clear_itmt_support() - Revoke platform's support of ITMT @@ -182,17 +183,3 @@ void sched_set_itmt_core_prio(int prio, int cpu) per_cpu(sched_core_priority, cpu) = prio; } EXPORT_SYMBOL_GPL(sched_set_itmt_core_prio); - -/* The work item is needed to avoid CPU hotplug locking issues */ -static void sched_itmt_work_fn(struct work_struct *work) -{ - sched_set_itmt_support(); -} - -static DECLARE_WORK(sched_itmt_work, sched_itmt_work_fn); - -void sched_set_itmt(void) -{ - schedule_work(&sched_itmt_work); -} -EXPORT_SYMBOL_GPL(sched_set_itmt); diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index cef5fd935253..40209ebe5227 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -21,15 +21,12 @@ #include #include #include - #include #include #include #include - #include #include - #include #include #include @@ -627,6 +624,19 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) } #endif +/* The work item is needed to avoid CPU hotplug locking issues */ +static void sched_itmt_work_fn(struct work_struct *work) +{ + sched_set_itmt_support(); +} + +static DECLARE_WORK(sched_itmt_work, sched_itmt_work_fn); + +static void sched_set_itmt(void) +{ + schedule_work(&sched_itmt_work); +} + #ifdef CONFIG_ACPI_CPPC_LIB /* * get_max_boost_ratio: Computes the max_boost_ratio as the ratio @@ -680,12 +690,18 @@ static u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq) static bool cppc_highest_perf_diff; static struct cpumask core_prio_cpumask; + static void core_set_itmt_prio(int cpu) { - u64 highest_perf; + u64 highest_perf = 0; + int ret = 0; static u64 max_highest_perf = 0, min_highest_perf = U64_MAX; - cppc_get_highest_perf(cpu, &highest_perf); + ret = cppc_get_highest_perf(cpu, &highest_perf); + if (ret) { + pr_debug("CPU%d: Unable to get performance capabilities (%d)\n", cpu, ret); + return; + } sched_set_itmt_core_prio(highest_perf, cpu); cpumask_set_cpu(cpu, &core_prio_cpumask); @@ -714,7 +730,7 @@ static inline u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq) { return 0; } -static void core_set_itmt_prio(int cpu) {} +static inline void core_set_itmt_prio(int cpu) {} #endif static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) @@ -727,7 +743,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) u64 max_boost_ratio, nominal_freq = 0; unsigned int valid_states = 0; unsigned int result = 0; - unsigned int i, j; + unsigned int i, j = 0; #ifdef CONFIG_SMP static int blacklisted; #endif @@ -791,11 +807,10 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) pr_info_once("overriding BIOS provided _PSD data\n"); } #endif - - if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || - boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) + if (c->x86_vendor == X86_VENDOR_CENTAUR || c->x86_vendor == X86_VENDOR_ZHAOXIN) { for_each_cpu(j, policy->cpus) core_set_itmt_prio(j); + } /* capability check */ if (perf->state_count <= 1) { -- Gitee From e8693b57b8ba19130460ce35ee8c4fb54791872b Mon Sep 17 00:00:00 2001 From: LeoLiu-oc Date: Fri, 6 Mar 2026 11:08:19 +0800 Subject: [PATCH 052/231] efi: cper: Update Zhaoxin/Centaur ZDI/ZPI error decode This patch restricts ZDI and ZPI error handling code to X86 architecture and Zhaoxin/Centaur CPUs. Key changes include: - Wrapping the zdi_zpi_err_type_strs array, cper_zdi_zpi_err_type_str(), and its header declaration with #ifdef CONFIG_X86 to limit compilation to X86 systems. - Moving the vendor check (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN ||X86_VENDOR_CENTAUR) from cper_print_proc_generic_zdi_zpi() to cper_print_proc_generic(), ensuring ZDI/ZPI-specific error printing only runs for relevant CPUs. - Cleaning up redundant conditional compilation in cper_print_proc_generic_zdi_zpi() while maintaining the same error reporting logic for ZDI/ZPI errors. These changes ensure that ZDI/ZPI error handling is properly scoped to supported architectures and vendors, improving code maintainability and reducing unnecessary code paths on non-relevant systems. Signed-off-by: LeoLiu-oc --- drivers/firmware/efi/cper.c | 40 ++++++++++++++++++------------------- include/linux/cper.h | 2 ++ 2 files changed, 22 insertions(+), 20 deletions(-) diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index bcbcf4d37cce..8c2343c3cb28 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -141,7 +141,8 @@ static const char * const proc_flag_strs[] = { "corrected", }; -static const char *const zdi_zpi_err_type_strs[] = { +#ifdef CONFIG_X86 +static const char * const zdi_zpi_err_type_strs[] = { "No Error", "Training Error Status (PHY)", "Data Link Protocol Error Status (DLL)", @@ -171,28 +172,23 @@ const char *cper_zdi_zpi_err_type_str(unsigned int etype) EXPORT_SYMBOL_GPL(cper_zdi_zpi_err_type_str); static void cper_print_proc_generic_zdi_zpi(const char *pfx, - const struct cper_sec_proc_generic *zdi_zpi) -{ -#if IS_ENABLED(CONFIG_X86) + const struct cper_sec_proc_generic *zdi_zpi) { u8 etype = zdi_zpi->responder_id; - if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN || - boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) { - if ((zdi_zpi->requestor_id & 0xff) == 7) { - pr_info("%s general processor error(zpi error)\n", pfx); - } else if ((zdi_zpi->requestor_id & 0xff) == 6) { - pr_info("%s general processor error(zdi error)\n", pfx); - } else { - pr_info("%s general processor error(unknown error)\n", pfx); - return; - } - pr_info("%s bus number %llx device number %llx function number 0\n", pfx, - ((zdi_zpi->requestor_id)>>8) & 0xff, zdi_zpi->requestor_id & 0xff); - pr_info("%s apic id %lld error_type: %s\n", pfx, zdi_zpi->proc_id, - cper_zdi_zpi_err_type_str(etype)); + if ((zdi_zpi->requestor_id & 0xff) == 7) { + pr_info("%s general processor error(zpi error)\n", pfx); + } else if ((zdi_zpi->requestor_id & 0xff) == 6) { + pr_info("%s general processor error(zdi error)\n", pfx); + } else { + pr_info("%s general processor error(unknown error)\n", pfx); + return; } -#endif + pr_info("%s bus number %llx device number %llx function number 0\n", + pfx, ((zdi_zpi->requestor_id) >> 8) & 0xff, zdi_zpi->requestor_id & 0xff); + pr_info("%s apic id %lld error_type: %s\n", + pfx, zdi_zpi->proc_id, cper_zdi_zpi_err_type_str(etype)); } +#endif static void cper_print_proc_generic(const char *pfx, const struct cper_sec_proc_generic *proc) @@ -238,7 +234,11 @@ static void cper_print_proc_generic(const char *pfx, if (proc->validation_bits & CPER_PROC_VALID_IP) printk("%s""IP: 0x%016llx\n", pfx, proc->ip); - cper_print_proc_generic_zdi_zpi(pfx, proc); +#ifdef CONFIG_X86 + if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) + cper_print_proc_generic_zdi_zpi(pfx, proc); +#endif } static const char * const mem_err_type_strs[] = { diff --git a/include/linux/cper.h b/include/linux/cper.h index d48bc91a612c..926b9700bd78 100644 --- a/include/linux/cper.h +++ b/include/linux/cper.h @@ -596,5 +596,7 @@ void cper_estatus_print(const char *pfx, int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus); int cper_estatus_check(const struct acpi_hest_generic_status *estatus); +#ifdef CONFIG_X86 const char *cper_zdi_zpi_err_type_str(unsigned int etype); +#endif /* CONFIG_X86 */ #endif -- Gitee From 7901e4fcdd598453de9db0407e52231181cd7063 Mon Sep 17 00:00:00 2001 From: LeoLiu-oc Date: Fri, 6 Mar 2026 11:12:57 +0800 Subject: [PATCH 053/231] efi/cper: Add Zhaoxin ZDI ZPI error decode for KH-50000 The Zhaoxin KH-50000 processor's ZDI and ZPI support reporting additional error types with more detailed analysis, hence the addition of KH-50000 ZDI and ZPI error parsing. Signed-off-by: LeoLiu-oc --- drivers/firmware/efi/cper.c | 83 +++++++++++++++++++++++++++++++++++-- 1 file changed, 80 insertions(+), 3 deletions(-) diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 8c2343c3cb28..e248f24f9e57 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -162,17 +162,40 @@ static const char * const zdi_zpi_err_type_strs[] = { "ZPI Gen2 Link Speed Unreliable Status", "ZDI Gen3 Link Speed Unreliable Status", "ZDI Gen4 Link Speed Unreliable Status", + "SL_NA_CYCLE Status", + "8.0GT/s Link Speed Unreliable Status", + "16.0GT/s Link Speed Unreliable Status", + "32.0GT/s Link Speed Unreliable Status", + "X2 Link Width Unreliable Status", + "X4 Link Width Unreliable Status", + "X8 Link Width Unreliable Status", + "X16X12 Link Width Unreliable Status", + "X32X24 Link Width Unreliable Status", }; const char *cper_zdi_zpi_err_type_str(unsigned int etype) { + switch (boot_cpu_data.x86_model) { + case 0x5b: + if (etype >= 0x13) + return "unknown error"; + break; + case 0x7b: + if (etype == 0x6 || (etype >= 0xb && etype <= 0x12)) + return "unknown error"; + break; + default: + return "unknown error"; + } return etype < ARRAY_SIZE(zdi_zpi_err_type_strs) ? - zdi_zpi_err_type_strs[etype] : "unknown error"; + zdi_zpi_err_type_strs[etype] : + "unknown error"; } EXPORT_SYMBOL_GPL(cper_zdi_zpi_err_type_str); -static void cper_print_proc_generic_zdi_zpi(const char *pfx, - const struct cper_sec_proc_generic *zdi_zpi) { +static void cper_print_proc_generic_zdi_zpi_kh40000(const char *pfx, + const struct cper_sec_proc_generic *zdi_zpi) +{ u8 etype = zdi_zpi->responder_id; if ((zdi_zpi->requestor_id & 0xff) == 7) { @@ -188,6 +211,60 @@ static void cper_print_proc_generic_zdi_zpi(const char *pfx, pr_info("%s apic id %lld error_type: %s\n", pfx, zdi_zpi->proc_id, cper_zdi_zpi_err_type_str(etype)); } + +static void cper_print_proc_generic_zdi_zpi_kh50000(const char *pfx, + const struct cper_sec_proc_generic *zdi_zpi) +{ + u8 etype = (zdi_zpi->requestor_id & 0xff) >> 4; + const char *zdi_etype_str; + + if (!(zdi_zpi->validation_bits & CPER_PROC_VALID_REQUESTOR_ID) || + !(zdi_zpi->validation_bits & CPER_PROC_VALID_RESPONDER_ID)) + return; + + if (etype == 0xf) { + pr_info("%s general processor error(zpi port 0x%llx error)\n", + pfx, zdi_zpi->requestor_id & 0xf); + } else if (etype >= 0x0 && etype <= 0xb) { + switch (zdi_zpi->requestor_id & 0xf) { + case 0x0: + zdi_etype_str = "ccdzdi"; + break; + case 0x1: + zdi_etype_str = "iodzdi"; + break; + default: + zdi_etype_str = "unknown"; + } + pr_info("%s general processor error(zdi port 0x%x %s error)\n", + pfx, etype, zdi_etype_str); + } else { + pr_info("%s general processor error(unknown error)\n", pfx); + return; + } + + pr_info("%s socket id %lld error_type: %s\n", + pfx, + (zdi_zpi->requestor_id & 0xfff) >> 8, + cper_zdi_zpi_err_type_str(zdi_zpi->responder_id)); +} + +static void cper_print_proc_generic_zdi_zpi(const char *pfx, + const struct cper_sec_proc_generic *zdi_zpi) +{ + if (boot_cpu_data.x86 == 0x7) { + switch (boot_cpu_data.x86_model) { + case 0x5b: + cper_print_proc_generic_zdi_zpi_kh40000(pfx, zdi_zpi); + break; + case 0x7b: + cper_print_proc_generic_zdi_zpi_kh50000(pfx, zdi_zpi); + break; + default: + return; + } + } +} #endif static void cper_print_proc_generic(const char *pfx, -- Gitee From fb372196b34fea2e1f40aee1ddf90a1b5504c9be Mon Sep 17 00:00:00 2001 From: LeoLiu-oc Date: Fri, 6 Mar 2026 11:14:02 +0800 Subject: [PATCH 054/231] efi/cper: Add Zhaoxin non standard cper error decode zhaoxin inclusion category: feature -------------------- The Zhaoxin processor's HIF and SVID error types are not defined in the UEFI spec and are reported via a non-standard CPER structure. This patch adds support to decode these proprietary error records. Signed-off-by: LeoLiu-oc --- drivers/firmware/efi/cper.c | 131 ++++++++++++++++++++++++++++++++++++ include/linux/cper.h | 45 +++++++++++++ 2 files changed, 176 insertions(+) diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index e248f24f9e57..c51a35484e66 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -634,6 +634,121 @@ static void cper_print_fw_err(const char *pfx, print_hex_dump(pfx, "", DUMP_PREFIX_OFFSET, 16, 4, buf, length, true); } +static const char *const svid_error_type_strs[] = { + "reserved", + "SVID resend fail error", + "VRM over current error", + "VRM over temperature error", + "VRM parity error", +}; + +static void cper_print_svid_err(const char *pfx, const struct cper_sec_svid *svid) +{ + if (svid->validation_bits & CPER_SVID_VALID_SOCKET_ID) + pr_info("%s socket id : %d\n", pfx, svid->socket_id); + if (svid->validation_bits & CPER_SVID_VALID_SVID_ID) + pr_info("%s svid id : %d\n", pfx, svid->svid_id); + if (svid->validation_bits & CPER_SVID_VALID_VRM_NUM) + pr_info("%s vrm number : %d\n", pfx, svid->vrm_number); + if (svid->validation_bits & CPER_SVID_VALID_ERROR_TYPE) + pr_info("%s error type: %d, %s\n", pfx, svid->error_type, + svid->error_type < ARRAY_SIZE(svid_error_type_strs) ? + svid_error_type_strs[svid->error_type] : "unknown"); +} + +static const char *const cxl_error_type_strs[] = { + "reserved", + "reserved", + "reserved", + "reserved", + "decode poison UC error", + "decode poison CE error", + "parity error", + "reserved", + "timeout error", +}; + +static const char *const snt_error_type_strs[] = { + "no error", + "correctable error", + "uncorrectable error", + "multi correctable error", + "multi Uncorrectable error", + "uncorrectable + correctable error", +}; + +static void dump_cxl_error_type(const char *pfx, u8 port, u64 decode_addr, u8 type) +{ + pr_info("%s CXL Port%d Error Type: %d, %s\n", + pfx, + port, + type, + type < ARRAY_SIZE(cxl_error_type_strs) ? + cxl_error_type_strs[type] : + "unknown"); + if (type == 0x4 || type == 0x5) + pr_info("%s CXL Decode Error Address: 0x%llx\n", pfx, decode_addr); +} + +static void dump_hif_error_type(const char *pfx, const struct cper_sec_hif *hif) +{ + u64 data, addr; + + data = (u64)hif->snt_error_data[0] | + (u64)hif->snt_error_data[1] << 8 | + (u64)hif->snt_error_data[2] << 16 | + (u64)hif->snt_error_data[3] << 24 | + (u64)hif->snt_error_data[4] << 32; + addr = (u64)hif->snt_error_addr[0] | + (u64)hif->snt_error_addr[1] << 8 | + (u64)hif->snt_error_addr[2] << 16 | + (u64)hif->snt_error_addr[3] << 24 | + (u64)hif->snt_error_addr[4] << 32; + pr_info("%s SNT Location, Way: %d Bank Number: %d Channel: %d\n", + pfx, + (hif->snt_location >> 4) & 0x7, + (hif->snt_location >> 2) & 0x3, + hif->snt_location & 0x3); + pr_info("%s SNT Error Type: %d, %s\n", + pfx, + hif->snt_error_type, + hif->snt_error_type < ARRAY_SIZE(snt_error_type_strs) ? + snt_error_type_strs[hif->snt_error_type] : + "unknown"); + pr_info("%s SNT Error Data: 0x%llx, Error Address: 0x%llx\n", pfx, data, addr); +} + +static void cper_print_hif_err(const char *pfx, const struct cper_sec_hif *hif) +{ + pr_info("%s Error occurred at Socket %d, Hnode %d\n", pfx, hif->socket_id, hif->hnod_id); + + if (hif->validation_bits & CPER_HIF_VALID_DVAD_CHANNEL0) + pr_info("%s Sub channel 0 DVAD Error Address : %llx\n", + pfx, hif->dvad_error_addr[0]); + if (hif->validation_bits & CPER_HIF_VALID_DVAD_CHANNEL1) + pr_info("%s Sub channel 1 DVAD Error Address : %llx\n", + pfx, hif->dvad_error_addr[1]); + if (hif->validation_bits & CPER_HIF_VALID_DVAD_CHANNEL2) + pr_info("%s Sub channel 2 DVAD Error Address : %llx\n", + pfx, hif->dvad_error_addr[2]); + if (hif->validation_bits & CPER_HIF_VALID_DVAD_CHANNEL3) + pr_info("%s Sub channel 3 DVAD Error Address : %llx\n", + pfx, hif->dvad_error_addr[3]); + if (hif->validation_bits & CPER_HIF_VALID_DVAD_CHANNEL4) + pr_info("%s Sub channel 4 DVAD Error Address : %llx\n", + pfx, hif->dvad_error_addr[4]); + if (hif->validation_bits & CPER_HIF_VALID_DVAD_CHANNEL5) + pr_info("%s Sub channel 5 DVAD Error Address : %llx\n", + pfx, hif->dvad_error_addr[5]); + if (hif->validation_bits & CPER_HIF_VALID_SNT) + dump_hif_error_type(pfx, hif); + + if (hif->validation_bits & CPER_HIF_VALID_CXL_PORT0) + dump_cxl_error_type(pfx, 0, hif->cxl_decode_error_addr[0], hif->cxl_error_type[0]); + if (hif->validation_bits & CPER_HIF_VALID_CXL_PORT1) + dump_cxl_error_type(pfx, 1, hif->cxl_decode_error_addr[1], hif->cxl_error_type[1]); +} + static void cper_print_tstamp(const char *pfx, struct acpi_hest_generic_data_v300 *gdata) { @@ -739,6 +854,22 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata cper_print_prot_err(newpfx, prot_err); else goto err_section_too_small; + } else if (guid_equal(sec_type, &CPER_SEC_SVID)) { + struct cper_sec_svid *svid_err = acpi_hest_get_payload(gdata); + + printk("%ssection_type: SVID Error\n", newpfx); + if (gdata->error_data_length >= sizeof(*svid_err)) + cper_print_svid_err(newpfx, svid_err); + else + goto err_section_too_small; + } else if (guid_equal(sec_type, &CPER_SEC_HIF)) { + struct cper_sec_hif *hif_err = acpi_hest_get_payload(gdata); + + printk("%ssection_type: HIF Error\n", newpfx); + if (gdata->error_data_length >= sizeof(*hif_err)) + cper_print_hif_err(newpfx, hif_err); + else + goto err_section_too_small; } else { const void *err = acpi_hest_get_payload(gdata); diff --git a/include/linux/cper.h b/include/linux/cper.h index 926b9700bd78..c07d512cac3c 100644 --- a/include/linux/cper.h +++ b/include/linux/cper.h @@ -202,6 +202,14 @@ enum { GUID_INIT(0x74D255B0, 0x73E8, 0x488A, 0xB8, 0x67, 0x90, 0x65, \ 0x4A, 0x35, 0x86, 0x5E) +#define CPER_SEC_SVID \ + GUID_INIT(0x3B1E4A7C, 0x9F82, 0x4E3D, 0x8A, 0x5B, 0x1C, 0x7D, \ + 0x3F, 0x6E, 0x8A, 0x2C) + +#define CPER_SEC_HIF \ + GUID_INIT(0x137B5703, 0xFD9C, 0x4C3F, 0xA2, 0x75, 0x3E, 0x00, \ + 0x2A, 0xDF, 0x7B, 0x04) + #define CPER_PROC_VALID_TYPE 0x0001 #define CPER_PROC_VALID_ISA 0x0002 #define CPER_PROC_VALID_ERROR_TYPE 0x0004 @@ -258,6 +266,21 @@ enum { #define CPER_PCIE_SLOT_SHIFT 3 +#define CPER_SVID_VALID_SOCKET_ID 0x1 +#define CPER_SVID_VALID_SVID_ID 0x2 +#define CPER_SVID_VALID_VRM_NUM 0x4 +#define CPER_SVID_VALID_ERROR_TYPE 0x8 + +#define CPER_HIF_VALID_DVAD_CHANNEL0 0x1 +#define CPER_HIF_VALID_DVAD_CHANNEL1 0x2 +#define CPER_HIF_VALID_DVAD_CHANNEL2 0x4 +#define CPER_HIF_VALID_DVAD_CHANNEL3 0x8 +#define CPER_HIF_VALID_DVAD_CHANNEL4 0x10 +#define CPER_HIF_VALID_DVAD_CHANNEL5 0x20 +#define CPER_HIF_VALID_SNT 0x40 +#define CPER_HIF_VALID_CXL_PORT0 0x80 +#define CPER_HIF_VALID_CXL_PORT1 0x100 + #define CPER_ARM_VALID_MPIDR BIT(0) #define CPER_ARM_VALID_AFFINITY_LEVEL BIT(1) #define CPER_ARM_VALID_RUNNING_STATE BIT(2) @@ -559,6 +582,28 @@ struct cper_sec_ubus { u32 err_info[72]; }; +struct cper_sec_svid { + u8 validation_bits; + u8 socket_id; + u8 svid_id; + u8 vrm_number; + u16 error_type; + u16 reserved; +}; + +struct cper_sec_hif { + u16 validation_bits; + u8 socket_id; + u8 hnod_id; + u8 snt_location; + u8 snt_error_type; + u8 snt_error_data[5]; + u8 snt_error_addr[5]; + u64 dvad_error_addr[6]; + u64 cxl_decode_error_addr[2]; + u8 cxl_error_type[2]; +}; + /* Firmware Error Record Reference, UEFI v2.7 sec N.2.10 */ struct cper_sec_fw_err_rec_ref { u8 record_type; -- Gitee From 4dab5e55d0e5f21e67534aface6c38d3558a0ad5 Mon Sep 17 00:00:00 2001 From: LeoLiu-oc Date: Fri, 6 Mar 2026 11:14:49 +0800 Subject: [PATCH 055/231] efi/cper: Add Zhaoxin micro-architectural error decode zhaoxin inclusion category: feature -------------------- Zhaoxin processors report detailed micro-architectural error classifications by re-purposing the Responder ID and Requestor ID fields. This patch adds support to decode these proprietary error types into human-readable messages. Signed-off-by: LeoLiu-oc --- drivers/firmware/efi/cper.c | 71 ++++++++++++++++++++++++++++++++++++- 1 file changed, 70 insertions(+), 1 deletion(-) diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index c51a35484e66..69f23a2edc00 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -265,6 +265,75 @@ static void cper_print_proc_generic_zdi_zpi(const char *pfx, } } } + +static const char * const zx_micro_arch_cpu_err_type_strs[] = { + "unknown error", + "unknown error", + "machine hang", + "undefined ucode address", +}; + +static const char * const zx_micro_arch_shutdown_err_type_strs[] = { + "unknown error", + "#PF happened again when handle #DF caused by #PF", + "MCE happened when handle #DF", + "#GP happened again when handle #DF caused by #GP", + "exit smm mode if already shutdown before enter smm mode", + "exit smm mode if CR0.CD bit is 0 and CR0.NW bit is 1 stored in SMRAM and in smm mode", + "exit smm mode if CR0.PE bit is 0 and CR0.PG bit is 1 stored in SMRAM and in smm mode", + "exit smm mode if the reserved bits in CR4 are writed to 1 in smm mode", + "exit smm mode if CR4.VMXE bit is writed to 1 in smm mode", + "exit smm mode if CR4.PCIDE bit is writed to 1 and EFER.LMA bit is writed to 0 in smm mode", + "software inject an UC error after MCE changed to SMI happened", + "reserved", + "MCE happened when CR4 MCE is 0", + "MCE happened again when didn't clear MCIP in the first MCE handler", + "using vmentry to enter guest mode", + "configuration for VM-exit MSR-store and load address is wrong when vmexit caused by VMX guest mode", + "TXT check fail", +}; + +static void cper_print_proc_generic_zx_micro_arch(const char *pfx, + const struct cper_sec_proc_generic *arch) +{ + u8 etype = arch->responder_id; + + if (!(arch->validation_bits & CPER_PROC_VALID_REQUESTOR_ID) || + !(arch->validation_bits & CPER_PROC_VALID_RESPONDER_ID)) + return; + + if (arch->requestor_id == 0x1) + pr_info("%s CPU Error, error type: %d, %s\n", + pfx, + etype, + etype < ARRAY_SIZE(zx_micro_arch_cpu_err_type_strs) ? + zx_micro_arch_cpu_err_type_strs[etype] : + "unknown error"); + else if (arch->requestor_id == 0x2) + pr_info("%s Shutdown Error, error type: %d, %s\n", + pfx, + etype, + etype < ARRAY_SIZE(zx_micro_arch_shutdown_err_type_strs) ? + zx_micro_arch_shutdown_err_type_strs[etype] : + "unknown error"); +} + +static void cper_print_proc_generic_zx(const char *pfx, + const struct cper_sec_proc_generic *proc) +{ + if (proc->validation_bits & CPER_PROC_VALID_ERROR_TYPE) { + switch (proc->proc_error_type) { + case 0x4: + cper_print_proc_generic_zdi_zpi(pfx, proc); + break; + case 0x8: + cper_print_proc_generic_zx_micro_arch(pfx, proc); + break; + default: + break; + } + } +} #endif static void cper_print_proc_generic(const char *pfx, @@ -314,7 +383,7 @@ static void cper_print_proc_generic(const char *pfx, #ifdef CONFIG_X86 if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN || boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) - cper_print_proc_generic_zdi_zpi(pfx, proc); + cper_print_proc_generic_zx(pfx, proc); #endif } -- Gitee From 35abb97a98412034ee62f20b9c9ef31dc90d8d50 Mon Sep 17 00:00:00 2001 From: LeoLiu-oc Date: Fri, 6 Mar 2026 11:15:27 +0800 Subject: [PATCH 056/231] efi/cper: Add Zhaoxin cache error decode zhaoxin inclusion category: feature -------------------- Zhaoxin processors report detailed cache error types by re-purposing the Responder ID field. This patch adds a decoder to translate these numeric values into human-readable strings. Signed-off-by: LeoLiu-oc --- drivers/firmware/efi/cper.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 69f23a2edc00..1f73c092b27e 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -318,11 +318,39 @@ static void cper_print_proc_generic_zx_micro_arch(const char *pfx, "unknown error"); } +static const char * const zx_cache_err_type_strs[] = { + "unknown error", + "single bit ECC for data part in the same line", + "single bit ECC for different line", + "multi bit ECC for data part", +}; + +static void cper_print_proc_generic_zx_cache(const char *pfx, + const struct cper_sec_proc_generic *cache) +{ + u8 etype = cache->responder_id; + const char *etype_str = etype < ARRAY_SIZE(zx_cache_err_type_strs) ? + zx_cache_err_type_strs[etype] : + "unknown error"; + + if (!(cache->validation_bits & CPER_PROC_VALID_LEVEL) || + !(cache->validation_bits & CPER_PROC_VALID_RESPONDER_ID)) + return; + + if (cache->level == 0x2) + pr_info("%s PL2 Error, error type: %d, %s\n", pfx, etype, etype_str); + else if (cache->level == 0x3) + pr_info("%s LLC Error, error type: %d, %s\n", pfx, etype, etype_str); +} + static void cper_print_proc_generic_zx(const char *pfx, const struct cper_sec_proc_generic *proc) { if (proc->validation_bits & CPER_PROC_VALID_ERROR_TYPE) { switch (proc->proc_error_type) { + case 0x1: + cper_print_proc_generic_zx_cache(pfx, proc); + break; case 0x4: cper_print_proc_generic_zdi_zpi(pfx, proc); break; -- Gitee From fc5bac1186d046465e2d03c0b7e615cf923aafeb Mon Sep 17 00:00:00 2001 From: LeoLiu-oc Date: Fri, 6 Mar 2026 11:15:50 +0800 Subject: [PATCH 057/231] efi/cper: Add Zhaoxin mem error decode for KH-50000 zhaoxin inclusion category: feature -------------------- Some memory error types on the Zhaoxin KH-50000 do not fit the UEFI-standard Memory Error Type classification. These errors are reported by repurposing the Requestor ID field. Add parsing logic to decode this non-standard usage and present human-readable error messages. Signed-off-by: LeoLiu-oc --- drivers/firmware/efi/cper.c | 49 +++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 1f73c092b27e..f043c5988799 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -362,6 +362,49 @@ static void cper_print_proc_generic_zx(const char *pfx, } } } + +static const char * const zx_mem_err_type_strs[] = { + "reserved", + "reserved", + "correctable write CRC error", + "uncorrectable write CRC error", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved", + "reserved", + "single device error", + "multi devices error", + "correctable read CRC error", + "uncorrectable read CRC error", + "rank counter overflow", + "UC occurred after mirror broken", + "sPPR done", +}; + +static void cper_print_mem_zx(const char *pfx, const struct cper_sec_mem_err *mem) +{ + u8 etype; + + if (boot_cpu_data.x86 != 0x7 || boot_cpu_data.x86_model != 0x7b) + return; + + if (!(mem->validation_bits & CPER_MEM_VALID_REQUESTOR_ID)) + return; + + etype = mem->requestor_id; + if (etype < 0x2 || (etype > 0x3 && etype < 0x10) || etype > 0x16) + return; + + pr_info("%s Specific error type: %d, %s\n", pfx, etype, zx_mem_err_type_strs[etype]); +} #endif static void cper_print_proc_generic(const char *pfx, @@ -612,6 +655,12 @@ static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem, } if (cper_dimm_err_location(&cmem, rcd_decode_str)) printk("%s%s\n", pfx, rcd_decode_str); + +#ifdef CONFIG_X86 + if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) + cper_print_mem_zx(pfx, mem); +#endif } static const char * const pcie_port_type_strs[] = { -- Gitee From 8e5650f7976560b004bebd81fe1657ee97a49c2c Mon Sep 17 00:00:00 2001 From: LeoLiu-oc Date: Fri, 6 Mar 2026 11:27:34 +0800 Subject: [PATCH 058/231] x86/cpu: detect extended topology for Zhaoxin CPUs zhaoxin inclusion category: feature -------------------- Zhaoxin CPUs support extended topology enumeration CPUID leaf 0xb/0x1f. Zhaoxin's new product, the KH-50000, features a single socket with 96 cores and support 2/4 sockets interconnects. Additionally, the Zhaoxin KH-50000 requires the use of extended CPUID leaf 0xb/0x1f to retrieve cputopo information. Right now the kernel uses the legacy CPUID leaf 0x1/0x4 for topology detection for Zhaoxin CPUs. So add extended topology detection support for Zhaoxin CPUs. Signed-off-by: Tony W Wang-oc Signed-off-by: LeoLiu-oc --- arch/x86/kernel/cpu/topology_common.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/topology_common.c b/arch/x86/kernel/cpu/topology_common.c index 9a6069e7133c..0da37096cde5 100644 --- a/arch/x86/kernel/cpu/topology_common.c +++ b/arch/x86/kernel/cpu/topology_common.c @@ -126,8 +126,12 @@ static void parse_topology(struct topo_scan *tscan, bool early) cpu_parse_topology_amd(tscan); break; case X86_VENDOR_CENTAUR: + if (!IS_ENABLED(CONFIG_CPU_SUP_CENTAUR) || !cpu_parse_topology_ext(tscan)) + parse_legacy(tscan); + break; case X86_VENDOR_ZHAOXIN: - parse_legacy(tscan); + if (!IS_ENABLED(CONFIG_CPU_SUP_ZHAOXIN) || !cpu_parse_topology_ext(tscan)) + parse_legacy(tscan); break; case X86_VENDOR_INTEL: if (!IS_ENABLED(CONFIG_CPU_SUP_INTEL) || !cpu_parse_topology_ext(tscan)) -- Gitee From bfab5460456f814bc9af5f3563f3e8ab10db7ce8 Mon Sep 17 00:00:00 2001 From: LeoLiu-oc Date: Fri, 6 Mar 2026 11:36:47 +0800 Subject: [PATCH 059/231] x86/mce: Set bios_cmci_threshold for CMCI threshold zhaoxin inclusion category: feature ------------------- In the Linux kernel, the CMCI threshold is set to 1 by default. This patch prevents Linux from overwriting the CMCI threshold set by the bios. With this patch, the CMCI threshold can be set through the BIOS, which can also avoid CMCI storms, on Zhaoxin/Centaur CPUs. Signed-off-by: LeoLiu-oc --- arch/x86/kernel/cpu/mce/core.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 79fe73dfb78b..2ac918f14956 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -1985,6 +1985,7 @@ static void zhaoxin_apply_global_quirks(struct cpuinfo_x86 *c) if (c->x86 > 6 || (c->x86_model == 0x19 || c->x86_model == 0x1f)) { if (mca_cfg.monarch_timeout < 0) mca_cfg.monarch_timeout = USEC_PER_SEC; + mca_cfg.bios_cmci_threshold = 1; } } -- Gitee From b080f30a61f21d54dd7af32ad56764f36fca7d3c Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 27 Feb 2026 17:53:04 +0800 Subject: [PATCH 060/231] iommu/vt-d: fix NULL pointer issue in acpi_rmrr_andd_probe zhaoxin inclusion category: feature ------------------- When the dev->iommu was NULL for the RMRR ANDD device, the corresponding (&pci_device->dev)->iommu could be used instead. Signed-off-by: leoliu-oc --- drivers/iommu/dma-iommu.c | 6 ++++++ drivers/iommu/intel/iommu.c | 4 ++++ 2 files changed, 10 insertions(+) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index ba6ab611a791..61c1bfdc8d5c 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -594,6 +594,12 @@ int iova_reserve_domain_addr(struct iommu_domain *domain, dma_addr_t start, dma_ unsigned long lo, hi; + if (!iovad->granule) { + unsigned long order = __ffs(domain->pgsize_bitmap); + + iovad->granule = 1UL << order; + } + lo = iova_pfn(iovad, start); hi = iova_pfn(iovad, end); diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index a2bcfaf61c58..e4a24f74c102 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -3657,6 +3657,10 @@ static inline int acpi_rmrr_andd_probe(struct device *dev) pr_err("cannot get acpi devie corresponding pci_device\n"); return -EINVAL; } + + if (!dev->iommu) + dev->iommu = (&pci_device->dev)->iommu; + ret = acpi_rmrr_device_create_direct_mappings(iommu_get_domain_for_dev(&pci_device->dev), dev); -- Gitee From 13bb796ea3453cad45a52a6c8d26522514a1f934 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 27 Feb 2026 17:53:07 +0800 Subject: [PATCH 061/231] Optimize VIA CPU Temp Monitoring During Suspend/Resume zhaoxin inclusion category: feature -------------------- This patch improves the VIA CPU temperature driver's behavior during suspend and resume. It adds conditions in 'via_cputemp_online' and 'via_cputemp_down_prep' functions to skip processes when the system is suspending or resuming, using 'cpuhp_tasks_frozen'. This prevents potential lock-ups and ensures smoother temperature monitoring during power state transitions. Signed-off-by: leoliu-oc --- drivers/hwmon/via-cputemp.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c index 0a5057dbe51a..c79fa618efaf 100644 --- a/drivers/hwmon/via-cputemp.c +++ b/drivers/hwmon/via-cputemp.c @@ -216,6 +216,13 @@ static int via_cputemp_online(unsigned int cpu) struct platform_device *pdev; struct pdev_entry *pdev_entry; + /* + * Don't execute this on suspend as the device remove locks + * up the machine. + */ + if (cpuhp_tasks_frozen) + return 0; + pdev = platform_device_alloc(DRVNAME, cpu); if (!pdev) { err = -ENOMEM; @@ -255,6 +262,13 @@ static int via_cputemp_down_prep(unsigned int cpu) { struct pdev_entry *p; + /* + * Don't execute this on suspend as the device remove locks + * up the machine. + */ + if (cpuhp_tasks_frozen) + return 0; + mutex_lock(&pdev_list_mutex); list_for_each_entry(p, &pdev_list, list) { if (p->cpu == cpu) { -- Gitee From bfc33d715e88c4942c8c6083d043163f5123c932 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 27 Feb 2026 17:53:07 +0800 Subject: [PATCH 062/231] Optimize Zhaoxin CPU Temp Monitoring During Suspend/Resume zhaoxin inclusion category: feature -------------------- Modified 'zhaoxin_cputemp_down_prep' to avoid system freezes during suspend by skipping device removal when 'cpuhp_tasks_frozen' is true. Signed-off-by: leoliu-oc --- drivers/hwmon/zhaoxin-cputemp.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/hwmon/zhaoxin-cputemp.c b/drivers/hwmon/zhaoxin-cputemp.c index 751d2c5a868a..ae1c6f066324 100644 --- a/drivers/hwmon/zhaoxin-cputemp.c +++ b/drivers/hwmon/zhaoxin-cputemp.c @@ -189,6 +189,13 @@ static int zhaoxin_cputemp_online(unsigned int cpu) struct platform_device *pdev; struct pdev_entry *pdev_entry; + /* + * Don't execute this on suspend as the device remove locks + * up the machine. + */ + if (cpuhp_tasks_frozen) + return 0; + pdev = platform_device_alloc(DRVNAME, cpu); if (!pdev) { err = -ENOMEM; @@ -228,6 +235,13 @@ static int zhaoxin_cputemp_down_prep(unsigned int cpu) { struct pdev_entry *p; + /* + * Don't execute this on suspend as the device remove locks + * up the machine. + */ + if (cpuhp_tasks_frozen) + return 0; + mutex_lock(&pdev_list_mutex); list_for_each_entry(p, &pdev_list, list) { if (p->cpu == cpu) { -- Gitee From 8cf9306520d9db54de61c69191dd58f2ec6228a3 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 27 Feb 2026 17:53:08 +0800 Subject: [PATCH 063/231] i2c: zhaoxin: Enhancements and Modifications to SMBus Driver zhaoxin inclusion category: feature -------------------- 1. Remove IIC access. 2. Enablement of I2C Block Read/Write Function. 3. Returning IRQ_NONE When Host is Busy. Signed-off-by: leoliu-oc --- drivers/i2c/busses/i2c-zhaoxin-smbus.c | 125 ++++++++++++++----------- 1 file changed, 71 insertions(+), 54 deletions(-) diff --git a/drivers/i2c/busses/i2c-zhaoxin-smbus.c b/drivers/i2c/busses/i2c-zhaoxin-smbus.c index 52c689e928af..e2a420ee128d 100644 --- a/drivers/i2c/busses/i2c-zhaoxin-smbus.c +++ b/drivers/i2c/busses/i2c-zhaoxin-smbus.c @@ -1,9 +1,9 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Zhaoxin SMBus controller driver + * i2c-zhaoxin-smbus.c - Zhaoxin SMBus controller driver * - * Copyright(c) 2023 Shanghai Zhaoxin Semiconductor Corporation. - * All rights reserved. + * Copyright(c) 2023 Shanghai Zhaoxin Semiconductor Corporation. + * All rights reserved. */ #include @@ -15,7 +15,7 @@ #include #include -#define DRIVER_VERSION "3.1.0" +#define DRIVER_VERSION "3.4.0" #define ZXSMB_NAME "smbus_zhaoxin" @@ -23,40 +23,40 @@ * registers */ /* SMBus MMIO address offsets */ -#define ZXSMB_STS 0x00 -#define ZXSMB_BUSY BIT(0) -#define ZXSMB_CMD_CMPLET BIT(1) -#define ZXSMB_DEV_ERR BIT(2) -#define ZXSMB_BUS_CLSI BIT(3) -#define ZXSMB_FAIL_TRANS BIT(4) -#define ZXSMB_STS_MASK GENMASK(4, 0) -#define ZXSMB_NSMBSRST BIT(5) -#define ZXSMB_CTL 0x02 -#define ZXSMB_CMPLT_EN BIT(0) -#define ZXSMB_KILL_PRG BIT(1) -#define ZXSMB_START BIT(6) -#define ZXSMB_PEC_EN BIT(7) -#define ZXSMB_CMD 0x03 -#define ZXSMB_ADD 0x04 -#define ZXSMB_DAT0 0x05 -#define ZXSMB_DAT1 0x06 -#define ZXSMB_BLKDAT 0x07 +#define ZXSMB_STS 0x00 +#define ZXSMB_BUSY BIT(0) +#define ZXSMB_CMD_CMPLET BIT(1) +#define ZXSMB_DEV_ERR BIT(2) +#define ZXSMB_BUS_CLSI BIT(3) +#define ZXSMB_FAIL_TRANS BIT(4) +#define ZXSMB_STS_MASK GENMASK(4, 0) +#define ZXSMB_NSMBSRST BIT(5) +#define ZXSMB_CTL 0x02 +#define ZXSMB_CMPLT_EN BIT(0) +#define ZXSMB_KILL_PRG BIT(1) +#define ZXSMB_START BIT(6) +#define ZXSMB_PEC_EN BIT(7) +#define ZXSMB_CMD 0x03 +#define ZXSMB_ADD 0x04 +#define ZXSMB_DAT0 0x05 +#define ZXSMB_DAT1 0x06 +#define ZXSMB_BLKDAT 0x07 /* * platform related information */ - /* protocol cmd constants */ -#define ZXSMB_QUICK 0x00 -#define ZXSMB_BYTE 0x04 -#define ZXSMB_BYTE_DATA 0x08 -#define ZXSMB_WORD_DATA 0x0C -#define ZXSMB_PROC_CALL 0x10 -#define ZXSMB_BLOCK_DATA 0x14 -#define ZXSMB_I2C_10_BIT_ADDR 0x18 -#define ZXSMB_I2C_PROC_CALL 0x30 -#define ZXSMB_I2C_BLOCK_DATA 0x34 -#define ZXSMB_I2C_7_BIT_ADDR 0x38 -#define ZXSMB_UNIVERSAL 0x3C +/* protocol cmd constants */ +#define ZXSMB_QUICK 0x00 +#define ZXSMB_BYTE 0x04 +#define ZXSMB_BYTE_DATA 0x08 +#define ZXSMB_WORD_DATA 0x0C +#define ZXSMB_PROC_CALL 0x10 +#define ZXSMB_BLOCK_DATA 0x14 +#define ZXSMB_I2C_10_BIT_ADDR 0x18 +#define ZXSMB_I2C_PROC_CALL 0x30 +#define ZXSMB_I2C_BLOCK_DATA 0x34 +#define ZXSMB_I2C_7_BIT_ADDR 0x38 +#define ZXSMB_UNIVERSAL 0x3C #define ZXSMB_TIMEOUT 500 @@ -74,11 +74,13 @@ struct zxsmb { static irqreturn_t zxsmb_irq_handle(int irq, void *dev_id) { struct zxsmb *smb = (struct zxsmb *)dev_id; + u8 status; - smb->status = inb(smb->base + ZXSMB_STS); - if ((smb->status & ZXSMB_STS_MASK) == 0) + status = inb(smb->base + ZXSMB_STS); + if ((status & ZXSMB_STS_MASK) == 0 || (status & ZXSMB_BUSY)) return IRQ_NONE; + smb->status = status; /* clear status */ outb(smb->status, smb->base + ZXSMB_STS); complete(&smb->complete); @@ -138,7 +140,14 @@ static int zxsmb_wait_polling_finish(struct zxsmb *smb) } while ((status & ZXSMB_BUSY) && (--time_left)); if (time_left == 0) { - dev_dbg(smb->dev, "polling timeout\n"); + if (status & ZXSMB_BUSY) { + /* kill process and try again */ + outb(ZXSMB_KILL_PRG, smb->base + ZXSMB_CTL); + outb(status, smb->base + ZXSMB_STS); + dev_err(smb->dev, "timeout and Bus still busy\n"); + return -EAGAIN; + } + dev_err(smb->dev, "polling timeout\n"); return -EIO; } @@ -200,7 +209,7 @@ static int zxsmb_transaction(struct zxsmb *smb) } static int zxsmb_smbus_xfer(struct i2c_adapter *adap, u16 addr, u16 flags, char read, u8 command, - int size, union i2c_smbus_data *data) + int size, union i2c_smbus_data *data) { int i; int err; @@ -232,25 +241,20 @@ static int zxsmb_smbus_xfer(struct i2c_adapter *adap, u16 addr, u16 flags, char outb(data->word & 0xff, base + ZXSMB_DAT0); outb((data->word & 0xff00) >> 8, base + ZXSMB_DAT1); } - size = (size == I2C_SMBUS_PROC_CALL) ? - ZXSMB_PROC_CALL : ZXSMB_WORD_DATA; + size = (size == I2C_SMBUS_PROC_CALL) ? ZXSMB_PROC_CALL : ZXSMB_WORD_DATA; break; case I2C_SMBUS_I2C_BLOCK_DATA: case I2C_SMBUS_BLOCK_DATA: len = data->block[0]; - if (read && size == I2C_SMBUS_I2C_BLOCK_DATA) - outb(len, base + ZXSMB_DAT1); + outb(len, base + ZXSMB_DAT0); outb(command, base + ZXSMB_CMD); /* Reset ZXSMB_BLKDAT */ inb(base + ZXSMB_CTL); if (!read) { - outb(len, base + ZXSMB_DAT0); - outb(0, base + ZXSMB_DAT1); for (i = 1; i <= len; i++) outb(data->block[i], base + ZXSMB_BLKDAT); } - size = (size == I2C_SMBUS_I2C_BLOCK_DATA) ? - ZXSMB_I2C_BLOCK_DATA : ZXSMB_BLOCK_DATA; + size = (size == I2C_SMBUS_I2C_BLOCK_DATA) ? ZXSMB_I2C_BLOCK_DATA : ZXSMB_BLOCK_DATA; break; default: goto exit_unsupported; @@ -280,6 +284,11 @@ static int zxsmb_smbus_xfer(struct i2c_adapter *adap, u16 addr, u16 flags, char data->word = inb(base + ZXSMB_DAT0) + (inb(base + ZXSMB_DAT1) << 8); break; case ZXSMB_I2C_BLOCK_DATA: + inb(base + ZXSMB_CTL); + data->block[0] = len; + for (i = 1; i <= len; i++) + data->block[i] = inb(base + ZXSMB_BLKDAT); + break; case ZXSMB_BLOCK_DATA: data->block[0] = inb(base + ZXSMB_DAT0); if (data->block[0] > I2C_SMBUS_BLOCK_MAX) @@ -300,12 +309,12 @@ static int zxsmb_smbus_xfer(struct i2c_adapter *adap, u16 addr, u16 flags, char static u32 zxsmb_func(struct i2c_adapter *adapter) { - return I2C_FUNC_SMBUS_EMUL; + return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_READ_BLOCK_DATA; } static const struct i2c_algorithm smbus_algorithm = { .smbus_xfer = zxsmb_smbus_xfer, - .functionality = zxsmb_func, + .functionality = zxsmb_func, }; static int zxsmb_probe(struct platform_device *pdev) @@ -313,6 +322,7 @@ static int zxsmb_probe(struct platform_device *pdev) struct zxsmb *smb; struct resource *res; struct i2c_adapter *adap; + int err; smb = devm_kzalloc(&pdev->dev, sizeof(*smb), GFP_KERNEL); if (!smb) @@ -329,11 +339,12 @@ static int zxsmb_probe(struct platform_device *pdev) smb->irq = platform_get_irq(pdev, 0); if (smb->irq < 0 || devm_request_irq(&pdev->dev, smb->irq, zxsmb_irq_handle, IRQF_SHARED, - pdev->name, smb)) { + pdev->name, smb)) { dev_warn(&pdev->dev, "failed to request irq %d\n", smb->irq); smb->irq = 0; - } else + } else { init_completion(&smb->complete); + } smb->dev = &pdev->dev; platform_set_drvdata(pdev, (void *)smb); @@ -345,10 +356,16 @@ static int zxsmb_probe(struct platform_device *pdev) adap->dev.parent = &pdev->dev; ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev)); snprintf(adap->name, sizeof(adap->name), "zhaoxin-%s-%s", dev_name(pdev->dev.parent), - dev_name(smb->dev)); + dev_name(smb->dev)); i2c_set_adapdata(&smb->adap, smb); - return i2c_add_adapter(&smb->adap); + err = i2c_add_adapter(&smb->adap); + if (err) + return err; + + dev_info(smb->dev, "adapter /dev/i2c-%d registered. version %s\n", adap->nr, + DRIVER_VERSION); + return 0; } static int zxsmb_remove(struct platform_device *pdev) @@ -373,13 +390,13 @@ static struct platform_driver zxsmb_driver = { .remove = zxsmb_remove, .driver = { .name = ZXSMB_NAME, - .acpi_match_table = ACPI_PTR(zxsmb_acpi_match), + .acpi_match_table = zxsmb_acpi_match, }, }; module_platform_driver(zxsmb_driver); MODULE_AUTHOR("hanshu@zhaoxin.com"); -MODULE_DESCRIPTION("Zhaoxin SMBus driver"); +MODULE_DESCRIPTION("ZHAOXIN SMBus driver"); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL"); -- Gitee From 553c5ad8a1d5a18c04def671142dc3983290a416 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 27 Feb 2026 17:53:09 +0800 Subject: [PATCH 064/231] PCI: Supplement ACS quirk for more Zhaoxin Root Ports zhaoxin inclusion category: feature -------------------- To adapt to more new Zhaoxin platforms, further supplement more Root Port Device IDs to the pci_quirk_zhaoxin_pcie_ports_acs() function. Signed-off-by: leoliu-oc --- drivers/pci/quirks.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 58fbf229f546..fa6ec8f6893c 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -4799,7 +4799,7 @@ static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags) switch (dev->device) { case 0x0710 ... 0x071e: case 0x0721: - case 0x0723 ... 0x0752: + case 0x0723 ... 0x075e: return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } -- Gitee From 529ca24ed071fe695cc401bcf5269e51673ef0a0 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 27 Feb 2026 17:53:10 +0800 Subject: [PATCH 065/231] ALSA: HDA: Roll back the old version for consistency with the mainline zhaoxin inclusion category: feature -------------------- The official version has already been merged into the mainline. Therefore, in order to ensure the consistency of the HDAC code with the mainline, it is necessary to roll back the current version so as to merge the mainline version subsequently. Signed-off-by: leoliu-oc --- sound/pci/hda/hda_controller.c | 17 +---------- sound/pci/hda/hda_controller.h | 3 -- sound/pci/hda/hda_intel.c | 56 ---------------------------------- sound/pci/hda/patch_hdmi.c | 32 ------------------- 4 files changed, 1 insertion(+), 107 deletions(-) diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index b69e7b94673c..406779625fb5 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -1061,16 +1061,6 @@ static void stream_update(struct hdac_bus *bus, struct hdac_stream *s) } } -static void azx_rirb_zxdelay(struct azx *chip, int enable) -{ - if (chip->remap_diu_addr) { - if (!enable) - writel(0x0, (char *)chip->remap_diu_addr + 0x490a8); - else - writel(0x1000000, (char *)chip->remap_diu_addr + 0x490a8); - } -} - irqreturn_t azx_interrupt(int irq, void *dev_id) { struct azx *chip = dev_id; @@ -1113,14 +1103,9 @@ irqreturn_t azx_interrupt(int irq, void *dev_id) azx_writeb(chip, RIRBSTS, RIRB_INT_MASK); active = true; if (status & RIRB_INT_RESPONSE) { - if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) || - (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)) { - azx_rirb_zxdelay(chip, 1); + if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) udelay(80); - } snd_hdac_bus_update_rirb(bus); - if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY) - azx_rirb_zxdelay(chip, 0); } } } while (active && ++repeat < 10); diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h index 9db89f4c7b3f..8556031bcd68 100644 --- a/sound/pci/hda/hda_controller.h +++ b/sound/pci/hda/hda_controller.h @@ -45,7 +45,6 @@ #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */ #define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */ #define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */ -#define AZX_DCAPS_RIRB_PRE_DELAY (1 << 31) /* Put a delay before read */ enum { AZX_SNOOP_TYPE_NONE, @@ -144,8 +143,6 @@ struct azx { unsigned int disabled:1; /* disabled by vga_switcheroo */ unsigned int pm_prepared:1; - void __iomem *remap_diu_addr; - /* GTS present */ unsigned int gts_present:1; diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 11d8d0134a26..1872a53852e2 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -238,7 +238,6 @@ enum { AZX_DRIVER_CTHDA, AZX_DRIVER_CMEDIA, AZX_DRIVER_ZHAOXIN, - AZX_DRIVER_ZXHDMI, AZX_DRIVER_LOONGSON, AZX_DRIVER_HYGON, AZX_DRIVER_GENERIC, @@ -352,7 +351,6 @@ static const char * const driver_short_names[] = { [AZX_DRIVER_CTHDA] = "HDA Creative", [AZX_DRIVER_CMEDIA] = "HDA C-Media", [AZX_DRIVER_ZHAOXIN] = "HDA Zhaoxin", - [AZX_DRIVER_ZXHDMI] = "HDA Zhaoxin HDMI", [AZX_DRIVER_LOONGSON] = "HDA Loongson", [AZX_DRIVER_HYGON] = "HDA Hygon", [AZX_DRIVER_GENERIC] = "HD-Audio Generic", @@ -376,31 +374,6 @@ static void update_pci_byte(struct pci_dev *pci, unsigned int reg, pci_write_config_byte(pci, reg, data); } -static int azx_init_pci_zx(struct azx *chip) -{ - struct snd_card *card = chip->card; - unsigned int diu_reg; - struct pci_dev *diu_pci = NULL; - - azx_bus(chip)->polling_mode = 1; - diu_pci = pci_get_device(PCI_VENDOR_ID_ZHAOXIN, 0x3a03, NULL); - if (!diu_pci) { - dev_info(card->dev, "zx_hda no KX-5000 device.\n"); - return -ENXIO; - } - pci_read_config_dword(diu_pci, PCI_BASE_ADDRESS_0, &diu_reg); - chip->remap_diu_addr = ioremap(diu_reg, 0x50000); - pci_dev_put(diu_pci); - dev_info(card->dev, "zx_hda %x %p\n", diu_reg, chip->remap_diu_addr); - return 0; -} - -static void azx_free_pci_zx(struct azx *chip) -{ - if (chip->remap_diu_addr) - iounmap(chip->remap_diu_addr); -} - static void azx_init_pci(struct azx *chip) { int snoop_type = azx_get_snoop_type(chip); @@ -1390,9 +1363,6 @@ static void azx_free(struct azx *chip) hda->init_failed = 1; /* to be sure */ complete_all(&hda->probe_wait); - if (chip->driver_type == AZX_DRIVER_ZXHDMI) - azx_free_pci_zx(chip); - if (use_vga_switcheroo(hda)) { if (chip->disabled && hda->probe_continued) snd_hda_unlock_devices(&chip->bus); @@ -1800,8 +1770,6 @@ static int default_bdl_pos_adj(struct azx *chip) case AZX_DRIVER_ICH: case AZX_DRIVER_PCH: return 1; - case AZX_DRIVER_ZXHDMI: - return 128; default: return 32; } @@ -1931,11 +1899,6 @@ static int azx_first_init(struct azx *chip) chip->pci->device == PCI_DEVICE_ID_HYGON_18H_M05H_HDA) bus->hygon_dword_access = 1; - chip->remap_diu_addr = NULL; - - if (chip->driver_type == AZX_DRIVER_ZXHDMI) - azx_init_pci_zx(chip); - err = pcim_iomap_regions(pci, 1 << 0, "ICH HD audio"); if (err < 0) return err; @@ -2037,7 +2000,6 @@ static int azx_first_init(struct azx *chip) chip->capture_streams = ATIHDMI_NUM_CAPTURE; break; case AZX_DRIVER_GFHDMI: - case AZX_DRIVER_ZXHDMI: case AZX_DRIVER_GENERIC: default: chip->playback_streams = ICH6_NUM_PLAYBACK; @@ -2787,15 +2749,6 @@ static const struct pci_device_id azx_ids[] = { { PCI_VDEVICE(VIA, 0x9170), .driver_data = AZX_DRIVER_GENERIC }, /* VIA GFX VT6122/VX11 */ { PCI_VDEVICE(VIA, 0x9140), .driver_data = AZX_DRIVER_GENERIC }, - { PCI_VDEVICE(VIA, 0x9141), .driver_data = AZX_DRIVER_GENERIC }, - { PCI_VDEVICE(VIA, 0x9142), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | - AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, - { PCI_VDEVICE(VIA, 0x9144), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | - AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, - { PCI_VDEVICE(VIA, 0x9145), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | - AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, - { PCI_VDEVICE(VIA, 0x9146), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | - AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, /* SIS966 */ { PCI_VDEVICE(SI, 0x7502), .driver_data = AZX_DRIVER_SIS }, /* ULI M5461 */ @@ -2851,15 +2804,6 @@ static const struct pci_device_id azx_ids[] = { .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_HDMI }, /* Zhaoxin */ { PCI_VDEVICE(ZHAOXIN, 0x3288), .driver_data = AZX_DRIVER_ZHAOXIN }, - { PCI_VDEVICE(ZHAOXIN, 0x9141), .driver_data = AZX_DRIVER_GENERIC }, - { PCI_VDEVICE(ZHAOXIN, 0x9142), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | - AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, - { PCI_VDEVICE(ZHAOXIN, 0x9144), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | - AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, - { PCI_VDEVICE(ZHAOXIN, 0x9145), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | - AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, - { PCI_VDEVICE(ZHAOXIN, 0x9146), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | - AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, /* Loongson HDAudio*/ { PCI_VDEVICE(LOONGSON, PCI_DEVICE_ID_LOONGSON_HDA), .driver_data = AZX_DRIVER_LOONGSON | AZX_DCAPS_NO_TCSEL}, diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 6be7c186276b..80c015af09ef 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -4497,20 +4497,6 @@ static int patch_via_hdmi(struct hda_codec *codec) return patch_simple_hdmi(codec, VIAHDMI_CVT_NID, VIAHDMI_PIN_NID); } -/* Zhaoxin HDMI Implementation */ -static int patch_zhaoxin_hdmi(struct hda_codec *codec) -{ - int err; - - err = patch_generic_hdmi(codec); - codec->no_sticky_stream = 1; - - if (err) - return err; - - return 0; -} - static int patch_gf_hdmi(struct hda_codec *codec) { int err; @@ -4653,15 +4639,6 @@ HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi), HDA_CODEC_ENTRY(0x11069f81, "VX900 HDMI/DP", patch_via_hdmi), HDA_CODEC_ENTRY(0x11069f84, "VX11 HDMI/DP", patch_generic_hdmi), HDA_CODEC_ENTRY(0x11069f85, "VX11 HDMI/DP", patch_generic_hdmi), -HDA_CODEC_ENTRY(0x11069f88, "KX-5000 HDMI/DP", patch_zhaoxin_hdmi), -HDA_CODEC_ENTRY(0x11069f89, "KX-5000 HDMI/DP", patch_zhaoxin_hdmi), -HDA_CODEC_ENTRY(0x11069f8a, "KX-6000 HDMI/DP", patch_zhaoxin_hdmi), -HDA_CODEC_ENTRY(0x11069f8b, "KX-6000 HDMI/DP", patch_zhaoxin_hdmi), -HDA_CODEC_ENTRY(0x11069f8c, "KX-6000G HDMI/DP", patch_zhaoxin_hdmi), -HDA_CODEC_ENTRY(0x11069f8d, "KX-6000G HDMI/DP", patch_zhaoxin_hdmi), -HDA_CODEC_ENTRY(0x11069f8e, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), -HDA_CODEC_ENTRY(0x11069f8f, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), -HDA_CODEC_ENTRY(0x11069f90, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), HDA_CODEC_ENTRY(0x80860054, "IbexPeak HDMI", patch_i915_cpt_hdmi), HDA_CODEC_ENTRY(0x80862800, "Geminilake HDMI", patch_i915_glk_hdmi), HDA_CODEC_ENTRY(0x80862801, "Bearlake HDMI", patch_generic_hdmi), @@ -4695,15 +4672,6 @@ HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi), HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi), HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi), HDA_CODEC_ENTRY(0x808629fb, "Crestline HDMI", patch_generic_hdmi), -HDA_CODEC_ENTRY(0x1d179f88, "KX-5000 HDMI/DP", patch_zhaoxin_hdmi), -HDA_CODEC_ENTRY(0x1d179f89, "KX-5000 HDMI/DP", patch_zhaoxin_hdmi), -HDA_CODEC_ENTRY(0x1d179f8a, "KX-6000 HDMI/DP", patch_zhaoxin_hdmi), -HDA_CODEC_ENTRY(0x1d179f8b, "KX-6000 HDMI/DP", patch_zhaoxin_hdmi), -HDA_CODEC_ENTRY(0x1d179f8c, "KX-6000G HDMI/DP", patch_zhaoxin_hdmi), -HDA_CODEC_ENTRY(0x1d179f8d, "KX-6000G HDMI/DP", patch_zhaoxin_hdmi), -HDA_CODEC_ENTRY(0x1d179f8e, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), -HDA_CODEC_ENTRY(0x1d179f8f, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), -HDA_CODEC_ENTRY(0x1d179f90, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), /* special ID for generic HDMI */ HDA_CODEC_ENTRY(HDA_CODEC_ID_GENERIC_HDMI, "Generic HDMI", patch_generic_hdmi), {} /* terminator */ -- Gitee From 1456a6cfb3fa8f2a0f02c252b587ed9ccbb0c22f Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 27 Feb 2026 17:53:10 +0800 Subject: [PATCH 066/231] ALSA: HDA: Add Zhaoxin HDMI Controller and Codec support mainline inclusion from v6.16-rc1 commit f28aa371b73af941dc3a676cb875fe285b5c0d40 upstream. category: feature ------------------- Add newer Zhaoxin HD Audio PCI IDs, and HDMI codec vendor IDs. Because Zhaoxin hardware limitation, set BDL position to 128 to increase interrupt interval. To fix response write request not synced to memory when handle HDAC interrupt, set bus->polling_mode = 1. Signed-off-by: Joanne Bao Signed-off-by: Tony W Wang-oc Link: https://patch.msgid.link/20250423102851.57997-2-TonyWWang-oc@zhaoxin.com Signed-off-by: Takashi Iwai Signed-off-by: leoliu-oc --- sound/pci/hda/hda_intel.c | 23 +++++++++++++++++++++++ sound/pci/hda/patch_hdmi.c | 11 +++++++++++ 2 files changed, 34 insertions(+) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 1872a53852e2..ecdfb75cd700 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -238,6 +238,7 @@ enum { AZX_DRIVER_CTHDA, AZX_DRIVER_CMEDIA, AZX_DRIVER_ZHAOXIN, + AZX_DRIVER_ZHAOXINHDMI, AZX_DRIVER_LOONGSON, AZX_DRIVER_HYGON, AZX_DRIVER_GENERIC, @@ -351,6 +352,7 @@ static const char * const driver_short_names[] = { [AZX_DRIVER_CTHDA] = "HDA Creative", [AZX_DRIVER_CMEDIA] = "HDA C-Media", [AZX_DRIVER_ZHAOXIN] = "HDA Zhaoxin", + [AZX_DRIVER_ZHAOXINHDMI] = "HDA Zhaoxin HDMI", [AZX_DRIVER_LOONGSON] = "HDA Loongson", [AZX_DRIVER_HYGON] = "HDA Hygon", [AZX_DRIVER_GENERIC] = "HD-Audio Generic", @@ -1770,6 +1772,8 @@ static int default_bdl_pos_adj(struct azx *chip) case AZX_DRIVER_ICH: case AZX_DRIVER_PCH: return 1; + case AZX_DRIVER_ZHAOXINHDMI: + return 128; default: return 32; } @@ -1899,6 +1903,9 @@ static int azx_first_init(struct azx *chip) chip->pci->device == PCI_DEVICE_ID_HYGON_18H_M05H_HDA) bus->hygon_dword_access = 1; + if (chip->driver_type == AZX_DRIVER_ZHAOXINHDMI) + bus->polling_mode = 1; + err = pcim_iomap_regions(pci, 1 << 0, "ICH HD audio"); if (err < 0) return err; @@ -2000,6 +2007,7 @@ static int azx_first_init(struct azx *chip) chip->capture_streams = ATIHDMI_NUM_CAPTURE; break; case AZX_DRIVER_GFHDMI: + case AZX_DRIVER_ZHAOXINHDMI: case AZX_DRIVER_GENERIC: default: chip->playback_streams = ICH6_NUM_PLAYBACK; @@ -2804,6 +2812,21 @@ static const struct pci_device_id azx_ids[] = { .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_HDMI }, /* Zhaoxin */ { PCI_VDEVICE(ZHAOXIN, 0x3288), .driver_data = AZX_DRIVER_ZHAOXIN }, + { PCI_VDEVICE(ZHAOXIN, 0x9141), + .driver_data = AZX_DRIVER_ZHAOXINHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(ZHAOXIN, 0x9142), + .driver_data = AZX_DRIVER_ZHAOXINHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(ZHAOXIN, 0x9144), + .driver_data = AZX_DRIVER_ZHAOXINHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(ZHAOXIN, 0x9145), + .driver_data = AZX_DRIVER_ZHAOXINHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(ZHAOXIN, 0x9146), + .driver_data = AZX_DRIVER_ZHAOXINHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_NO_64BIT }, /* Loongson HDAudio*/ { PCI_VDEVICE(LOONGSON, PCI_DEVICE_ID_LOONGSON_HDA), .driver_data = AZX_DRIVER_LOONGSON | AZX_DCAPS_NO_TCSEL}, diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 80c015af09ef..3d1a47965d74 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -4639,6 +4639,17 @@ HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi), HDA_CODEC_ENTRY(0x11069f81, "VX900 HDMI/DP", patch_via_hdmi), HDA_CODEC_ENTRY(0x11069f84, "VX11 HDMI/DP", patch_generic_hdmi), HDA_CODEC_ENTRY(0x11069f85, "VX11 HDMI/DP", patch_generic_hdmi), +HDA_CODEC_ENTRY(0x1d179f86, "ZX-100S HDMI/DP", patch_gf_hdmi), +HDA_CODEC_ENTRY(0x1d179f87, "ZX-100S HDMI/DP", patch_gf_hdmi), +HDA_CODEC_ENTRY(0x1d179f88, "KX-5000 HDMI/DP", patch_gf_hdmi), +HDA_CODEC_ENTRY(0x1d179f89, "KX-5000 HDMI/DP", patch_gf_hdmi), +HDA_CODEC_ENTRY(0x1d179f8a, "KX-6000 HDMI/DP", patch_gf_hdmi), +HDA_CODEC_ENTRY(0x1d179f8b, "KX-6000 HDMI/DP", patch_gf_hdmi), +HDA_CODEC_ENTRY(0x1d179f8c, "KX-6000G HDMI/DP", patch_gf_hdmi), +HDA_CODEC_ENTRY(0x1d179f8d, "KX-6000G HDMI/DP", patch_gf_hdmi), +HDA_CODEC_ENTRY(0x1d179f8e, "KX-7000 HDMI/DP", patch_gf_hdmi), +HDA_CODEC_ENTRY(0x1d179f8f, "KX-7000 HDMI/DP", patch_gf_hdmi), +HDA_CODEC_ENTRY(0x1d179f90, "KX-7000 HDMI/DP", patch_gf_hdmi), HDA_CODEC_ENTRY(0x80860054, "IbexPeak HDMI", patch_i915_cpt_hdmi), HDA_CODEC_ENTRY(0x80862800, "Geminilake HDMI", patch_i915_glk_hdmi), HDA_CODEC_ENTRY(0x80862801, "Bearlake HDMI", patch_generic_hdmi), -- Gitee From 7bff5058e5d57b0cbce7c80835f77e11b801696d Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 27 Feb 2026 17:53:11 +0800 Subject: [PATCH 067/231] PCI/P2PDMA: Add Zhaoxin Zhaoxin bridges to the whitelist zhaoxin inclusion category: feature -------------------- Add Zhaoxin host bridges to the whitelist of p2pdma. Signed-off-by: leoliu-oc --- drivers/pci/p2pdma.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c index 0f1e431bbfc2..9e1a0f13ec4d 100644 --- a/drivers/pci/p2pdma.c +++ b/drivers/pci/p2pdma.c @@ -442,6 +442,12 @@ static const struct pci_p2pdma_whitelist_entry { {PCI_VENDOR_ID_INTEL, 0x2033, 0}, {PCI_VENDOR_ID_INTEL, 0x2020, 0}, {PCI_VENDOR_ID_INTEL, 0x09a2, 0}, + /* KX-6000/KX-6000G/KX-7000/KH-40000/KH-50000 */ + {PCI_VENDOR_ID_ZHAOXIN, 0x1003, REQ_SAME_HOST_BRIDGE}, + {PCI_VENDOR_ID_ZHAOXIN, 0x1005, REQ_SAME_HOST_BRIDGE}, + {PCI_VENDOR_ID_ZHAOXIN, 0x1006, REQ_SAME_HOST_BRIDGE}, + {PCI_VENDOR_ID_ZHAOXIN, 0x1007, REQ_SAME_HOST_BRIDGE}, + {PCI_VENDOR_ID_ZHAOXIN, 0x1008, 0}, {} }; -- Gitee From 701b734b70ed7f5f51f396f03796fa188b07083d Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 27 Feb 2026 17:53:13 +0800 Subject: [PATCH 068/231] perf/zhaoxin/uncore: Enhance uncore support and fix related bugs zhaoxin inclusion category: feature -------------------- This patch makes several improvements to Zhaoxin PMU uncore. 1. Zhaoxin uncore PMC driver control: Adds kernel parameters zhaoxin_pmc_uncore={0|off} or zhaoxin_pmc_uncore={1|on}. These parameters can be used to enable the Zhaoxin uncore PMC driver during Linux kernel boot. By default, the driver is disabled. 2. Fix PCI device conflict bug: Resolves the previous PCI device conflict bug. For the PCI/MC/PXPTRF devices which are based on PCI access, the method of accessing them has been changed from probe to pci_get_device. 3. Fix MC bug on KX-7000: Identifies and fixes a bug related to the MC on KX-7000. When running the stream test and repeatedly executing the command perf stat -e uncore_mc_a0/umask=0xf, event=0x1/ sleep 10 2~3 times, a large abnormal value occurs. Debugging reveals that the read_counter function has an issue when reading high-order data. This patch corrects this problem to ensure accurate performance measurement and operation of the MC on KX-7000. Signed-off-by: leoliu-oc --- arch/x86/events/zhaoxin/uncore.c | 1328 +++++++++++++++--------------- arch/x86/events/zhaoxin/uncore.h | 80 +- 2 files changed, 721 insertions(+), 687 deletions(-) diff --git a/arch/x86/events/zhaoxin/uncore.c b/arch/x86/events/zhaoxin/uncore.c index 30a51324f41a..2663ab537fbd 100644 --- a/arch/x86/events/zhaoxin/uncore.c +++ b/arch/x86/events/zhaoxin/uncore.c @@ -2,13 +2,17 @@ #include #include "uncore.h" -static struct zhaoxin_uncore_type *empty_uncore[] = { NULL, }; +static int uncore_enabled; + +static struct zhaoxin_uncore_type *empty_uncore[] = { + NULL, +}; static struct zhaoxin_uncore_type **uncore_msr_uncores = empty_uncore; static struct zhaoxin_uncore_type **uncore_pci_uncores = empty_uncore; static struct zhaoxin_uncore_type **uncore_mmio_uncores = empty_uncore; static bool pcidrv_registered; -static struct pci_driver *uncore_pci_driver; +static const struct pci_device_id *uncore_pci_ids; /* mask of cpus that collect uncore events */ static cpumask_t uncore_cpu_mask; @@ -24,204 +28,195 @@ static int clusters_per_subnode; static int subnodes_per_die; static int dies_per_socket; -#define KH40000_MAX_SUBNODE_NUMBER 8 +#define KH40000_MAX_SUBNODE_NUMBER 8 static int kh40000_pcibus_limit[KH40000_MAX_SUBNODE_NUMBER]; /* get CPU topology register */ -#define BJ_GLOBAL_STATUS_MSR 0x1610 -#define BJ_HDW_CONFIG_MSR 0X1628 +#define BJ_GLOBAL_STATUS_MSR 0x1610 +#define BJ_HDW_CONFIG_MSR 0X1628 /* KX5000/KX6000 event control */ -#define KX5000_UNC_CTL_EV_SEL_MASK 0x000000ff -#define KX5000_UNC_CTL_UMASK_MASK 0x0000ff00 -#define KX5000_UNC_CTL_EDGE_DET (1 << 18) -#define KX5000_UNC_CTL_EN (1 << 22) -#define KX5000_UNC_CTL_INVERT (1 << 23) -#define KX5000_UNC_CTL_CMASK_MASK 0x7000000 -#define KX5000_UNC_FIXED_CTR_CTL_EN (1 << 0) - -#define KX5000_UNC_RAW_EVENT_MASK (KX5000_UNC_CTL_EV_SEL_MASK | \ - KX5000_UNC_CTL_UMASK_MASK | \ - KX5000_UNC_CTL_EDGE_DET | \ - KX5000_UNC_CTL_INVERT | \ - KX5000_UNC_CTL_CMASK_MASK) +#define KX5000_UNC_CTL_EV_SEL_MASK 0x000000ff +#define KX5000_UNC_CTL_UMASK_MASK 0x0000ff00 +#define KX5000_UNC_CTL_EDGE_DET (1 << 18) +#define KX5000_UNC_CTL_EN (1 << 22) +#define KX5000_UNC_CTL_INVERT (1 << 23) +#define KX5000_UNC_CTL_CMASK_MASK 0x7000000 +#define KX5000_UNC_FIXED_CTR_CTL_EN (1 << 0) + +#define KX5000_UNC_RAW_EVENT_MASK \ + (KX5000_UNC_CTL_EV_SEL_MASK | KX5000_UNC_CTL_UMASK_MASK | KX5000_UNC_CTL_EDGE_DET | \ + KX5000_UNC_CTL_INVERT | KX5000_UNC_CTL_CMASK_MASK) /* KX5000/KX6000 uncore global register */ -#define KX5000_UNC_PERF_GLOBAL_CTL 0x391 -#define KX5000_UNC_FIXED_CTR 0x394 -#define KX5000_UNC_FIXED_CTR_CTRL 0x395 +#define KX5000_UNC_PERF_GLOBAL_CTL 0x391 +#define KX5000_UNC_FIXED_CTR 0x394 +#define KX5000_UNC_FIXED_CTR_CTRL 0x395 /* KX5000/KX6000 uncore global control */ -#define KX5000_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 4) - 1) -#define KX5000_UNC_GLOBAL_CTL_EN_FC (1ULL << 32) +#define KX5000_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 4) - 1) +#define KX5000_UNC_GLOBAL_CTL_EN_FC (1ULL << 32) /* KX5000/KX6000 uncore register */ -#define KX5000_UNC_PERFEVTSEL0 0x3c0 -#define KX5000_UNC_UNCORE_PMC0 0x3b0 +#define KX5000_UNC_PERFEVTSEL0 0x3c0 +#define KX5000_UNC_UNCORE_PMC0 0x3b0 /* KH40000 event control */ -#define KH40000_PMON_CTL_EV_SEL_MASK 0x000000ff -#define KH40000_PMON_CTL_UMASK_MASK 0x0000ff00 -#define KH40000_PMON_CTL_RST (1 << 17) -#define KH40000_PMON_CTL_EDGE_DET (1 << 18) -#define KH40000_PMON_CTL_EN (1 << 22) -#define KH40000_PMON_CTL_INVERT (1 << 23) -#define KH40000_PMON_CTL_THRESH_MASK 0xff000000 -#define KH40000_PMON_RAW_EVENT_MASK (KH40000_PMON_CTL_EV_SEL_MASK | \ - KH40000_PMON_CTL_UMASK_MASK | \ - KH40000_PMON_CTL_EDGE_DET | \ - KH40000_PMON_CTL_INVERT | \ - KH40000_PMON_CTL_THRESH_MASK) +#define KH40000_PMON_CTL_EV_SEL_MASK 0x000000ff +#define KH40000_PMON_CTL_UMASK_MASK 0x0000ff00 +#define KH40000_PMON_CTL_RST (1 << 17) +#define KH40000_PMON_CTL_EDGE_DET (1 << 18) +#define KH40000_PMON_CTL_EN (1 << 22) +#define KH40000_PMON_CTL_INVERT (1 << 23) +#define KH40000_PMON_CTL_THRESH_MASK 0xff000000 +#define KH40000_PMON_RAW_EVENT_MASK \ + (KH40000_PMON_CTL_EV_SEL_MASK | KH40000_PMON_CTL_UMASK_MASK | KH40000_PMON_CTL_EDGE_DET | \ + KH40000_PMON_CTL_INVERT | KH40000_PMON_CTL_THRESH_MASK) /* KH40000 LLC register*/ -#define KH40000_LLC_MSR_PMON_CTL0 0x1660 -#define KH40000_LLC_MSR_PMON_CTR0 0x165c -#define KH40000_LLC_MSR_PMON_BLK_CTL 0x1665 +#define KH40000_LLC_MSR_PMON_CTL0 0x1660 +#define KH40000_LLC_MSR_PMON_CTR0 0x165c +#define KH40000_LLC_MSR_PMON_BLK_CTL 0x1665 /* KH40000 HIF register*/ -#define KH40000_HIF_MSR_PMON_CTL0 0x1656 -#define KH40000_HIF_MSR_PMON_CTR0 0x1651 -#define KH40000_HIF_MSR_PMON_FIXED_CTL 0x1655 -#define KH40000_HIF_MSR_PMON_FIXED_CTR 0x1650 -#define KH40000_HIF_MSR_PMON_BLK_CTL 0x165b +#define KH40000_HIF_MSR_PMON_CTL0 0x1656 +#define KH40000_HIF_MSR_PMON_CTR0 0x1651 +#define KH40000_HIF_MSR_PMON_FIXED_CTL 0x1655 +#define KH40000_HIF_MSR_PMON_FIXED_CTR 0x1650 +#define KH40000_HIF_MSR_PMON_BLK_CTL 0x165b /* KH40000 ZZI(ZPI+ZOI+INI) register*/ -#define KH40000_ZZI_MSR_PMON_CTL0 0x166A -#define KH40000_ZZI_MSR_PMON_CTR0 0x1666 -#define KH40000_ZZI_MSR_PMON_BLK_CTL 0x166f +#define KH40000_ZZI_MSR_PMON_CTL0 0x166A +#define KH40000_ZZI_MSR_PMON_CTR0 0x1666 +#define KH40000_ZZI_MSR_PMON_BLK_CTL 0x166f /* KH40000 MC register*/ -#define KH40000_MC0_CHy_PMON_FIXED_CTL 0xf40 -#define KH40000_MC0_CHy_PMON_FIXED_CTR 0xf20 -#define KH40000_MC0_CHy_PMON_CTR0 0xf00 -#define KH40000_MC0_CHy_PMON_CTL0 0xf28 -#define KH40000_MC0_CHy_PMON_BLK_CTL 0xf44 - -#define KH40000_MC1_CHy_PMON_FIXED_CTL 0xf90 -#define KH40000_MC1_CHy_PMON_FIXED_CTR 0xf70 -#define KH40000_MC1_CHy_PMON_CTR0 0xf50 -#define KH40000_MC1_CHy_PMON_CTL0 0xf78 -#define KH40000_MC1_CHy_PMON_BLK_CTL 0xf94 +#define KH40000_MC0_CHy_PMON_FIXED_CTL 0xf40 +#define KH40000_MC0_CHy_PMON_FIXED_CTR 0xf20 +#define KH40000_MC0_CHy_PMON_CTR0 0xf00 +#define KH40000_MC0_CHy_PMON_CTL0 0xf28 +#define KH40000_MC0_CHy_PMON_BLK_CTL 0xf44 + +#define KH40000_MC1_CHy_PMON_FIXED_CTL 0xf90 +#define KH40000_MC1_CHy_PMON_FIXED_CTR 0xf70 +#define KH40000_MC1_CHy_PMON_CTR0 0xf50 +#define KH40000_MC1_CHy_PMON_CTL0 0xf78 +#define KH40000_MC1_CHy_PMON_BLK_CTL 0xf94 /* KH40000 PCI register*/ -#define KH40000_PCI_PMON_CTR0 0xf00 -#define KH40000_PCI_PMON_CTL0 0xf28 -#define KH40000_PCI_PMON_BLK_CTL 0xf44 +#define KH40000_PCI_PMON_CTR0 0xf00 +#define KH40000_PCI_PMON_CTL0 0xf28 +#define KH40000_PCI_PMON_BLK_CTL 0xf44 /* KH40000 ZPI_DLL register*/ -#define KH40000_ZPI_DLL_PMON_FIXED_CTL 0xf40 -#define KH40000_ZPI_DLL_PMON_FIXED_CTR 0xf20 -#define KH40000_ZPI_DLL_PMON_CTR0 0xf00 -#define KH40000_ZPI_DLL_PMON_CTL0 0xf28 -#define KH40000_ZPI_DLL_PMON_BLK_CTL 0xf44 +#define KH40000_ZPI_DLL_PMON_FIXED_CTL 0xf40 +#define KH40000_ZPI_DLL_PMON_FIXED_CTR 0xf20 +#define KH40000_ZPI_DLL_PMON_CTR0 0xf00 +#define KH40000_ZPI_DLL_PMON_CTL0 0xf28 +#define KH40000_ZPI_DLL_PMON_BLK_CTL 0xf44 /* KH40000 ZDI_DLL register*/ -#define KH40000_ZDI_DLL_PMON_FIXED_CTL 0xf40 -#define KH40000_ZDI_DLL_PMON_FIXED_CTR 0xf20 -#define KH40000_ZDI_DLL_PMON_CTR0 0xf00 -#define KH40000_ZDI_DLL_PMON_CTL0 0xf28 -#define KH40000_ZDI_DLL_PMON_BLK_CTL 0xf44 +#define KH40000_ZDI_DLL_PMON_FIXED_CTL 0xf40 +#define KH40000_ZDI_DLL_PMON_FIXED_CTR 0xf20 +#define KH40000_ZDI_DLL_PMON_CTR0 0xf00 +#define KH40000_ZDI_DLL_PMON_CTL0 0xf28 +#define KH40000_ZDI_DLL_PMON_BLK_CTL 0xf44 /* KH40000 PXPTRF register*/ -#define KH40000_PXPTRF_PMON_CTR0 0xf00 -#define KH40000_PXPTRF_PMON_CTL0 0xf28 -#define KH40000_PXPTRF_PMON_BLK_CTL 0xf44 +#define KH40000_PXPTRF_PMON_CTR0 0xf00 +#define KH40000_PXPTRF_PMON_CTL0 0xf28 +#define KH40000_PXPTRF_PMON_BLK_CTL 0xf44 /* KH40000 Box level control */ -#define KH40000_PMON_BOX_CTL_RST_CTRL (1 << 0) -#define KH40000_PMON_BOX_CTL_RST_CTRS (1 << 1) -#define KH40000_PMON_BOX_CTL_FRZ (1 << 8) -#define KH40000_PMON_PCI_BOX_PMON_EN (1 << 31) - -#define KH40000_PMON_BOX_CTL_INT (KH40000_PMON_BOX_CTL_RST_CTRL | \ - KH40000_PMON_BOX_CTL_RST_CTRS) +#define KH40000_PMON_BOX_CTL_RST_CTRL (1 << 0) +#define KH40000_PMON_BOX_CTL_RST_CTRS (1 << 1) +#define KH40000_PMON_BOX_CTL_FRZ (1 << 8) +#define KH40000_PMON_PCI_BOX_PMON_EN (1 << 31) -#define KH40000_PMON_PCI_BOX_CTL_INT (KH40000_PMON_BOX_CTL_RST_CTRL | \ - KH40000_PMON_BOX_CTL_RST_CTRS | \ - KH40000_PMON_PCI_BOX_PMON_EN) +#define KH40000_PMON_BOX_CTL_INT (KH40000_PMON_BOX_CTL_RST_CTRL | KH40000_PMON_BOX_CTL_RST_CTRS) +#define KH40000_PMON_PCI_BOX_CTL_INT \ + (KH40000_PMON_BOX_CTL_RST_CTRL | KH40000_PMON_BOX_CTL_RST_CTRS | \ + KH40000_PMON_PCI_BOX_PMON_EN) /* KX7000 event control */ -#define KX7000_PMON_CTL_EV_SEL_MASK 0x000000ff -#define KX7000_PMON_CTL_UMASK_MASK 0x0000ff00 -#define KX7000_PMON_CTL_RST (1 << 17) -#define KX7000_PMON_CTL_EDGE_DET (1 << 18) -#define KX7000_PMON_CTL_LOGIC_OP0 (1 << 19) -#define KX7000_PMON_CTL_LOGIC_OP1 (1 << 21) -#define KX7000_PMON_CTL_EN (1 << 22) -#define KX7000_PMON_CTL_INVERT (1 << 23) -#define KX7000_PMON_CTL_THRESH_MASK 0xff000000 -#define KX7000_PMON_RAW_EVENT_MASK (KX7000_PMON_CTL_EV_SEL_MASK | \ - KX7000_PMON_CTL_UMASK_MASK | \ - KX7000_PMON_CTL_EDGE_DET | \ - KX7000_PMON_CTL_LOGIC_OP0 | \ - KX7000_PMON_CTL_LOGIC_OP1 | \ - KX7000_PMON_CTL_INVERT | \ - KX7000_PMON_CTL_THRESH_MASK) +#define KX7000_PMON_CTL_EV_SEL_MASK 0x000000ff +#define KX7000_PMON_CTL_UMASK_MASK 0x0000ff00 +#define KX7000_PMON_CTL_RST (1 << 17) +#define KX7000_PMON_CTL_EDGE_DET (1 << 18) +#define KX7000_PMON_CTL_LOGIC_OP0 (1 << 19) +#define KX7000_PMON_CTL_LOGIC_OP1 (1 << 21) +#define KX7000_PMON_CTL_EN (1 << 22) +#define KX7000_PMON_CTL_INVERT (1 << 23) +#define KX7000_PMON_CTL_THRESH_MASK 0xff000000 +#define KX7000_PMON_RAW_EVENT_MASK \ + (KX7000_PMON_CTL_EV_SEL_MASK | KX7000_PMON_CTL_UMASK_MASK | KX7000_PMON_CTL_EDGE_DET | \ + KX7000_PMON_CTL_LOGIC_OP0 | KX7000_PMON_CTL_LOGIC_OP1 | KX7000_PMON_CTL_INVERT | \ + KX7000_PMON_CTL_THRESH_MASK) /* KX7000 LLC register*/ -#define KX7000_LLC_MSR_PMON_CTL0 0x1979 -#define KX7000_LLC_MSR_PMON_CTR0 0x1975 -#define KX7000_LLC_MSR_PMON_BLK_CTL 0x197e +#define KX7000_LLC_MSR_PMON_CTL0 0x1979 +#define KX7000_LLC_MSR_PMON_CTR0 0x1975 +#define KX7000_LLC_MSR_PMON_BLK_CTL 0x197e /* KX7000 MESH register*/ -#define KX7000_MESH_MSR_PMON_CTL0 0x1983 -#define KX7000_MESH_MSR_PMON_CTR0 0x197f -#define KX7000_MESH_MSR_PMON_BLK_CTL 0x1987 +#define KX7000_MESH_MSR_PMON_CTL0 0x1983 +#define KX7000_MESH_MSR_PMON_CTR0 0x197f +#define KX7000_MESH_MSR_PMON_BLK_CTL 0x1987 /* KX7000 HOMESTOP register*/ -#define KX7000_HOMESTOP_MSR_PMON_CTL0 0x196a -#define KX7000_HOMESTOP_MSR_PMON_CTR0 0x1966 -#define KX7000_HOMESTOP_MSR_PMON_BLK_CTL 0x196e -#define KX7000_HOMESTOP_MSR_PMON_FIXED_CTR 0x1970 -#define KX7000_HOMESTOP_MSR_PMON_FIXED_CTL 0x1971 +#define KX7000_HOMESTOP_MSR_PMON_CTL0 0x196a +#define KX7000_HOMESTOP_MSR_PMON_CTR0 0x1966 +#define KX7000_HOMESTOP_MSR_PMON_BLK_CTL 0x196e +#define KX7000_HOMESTOP_MSR_PMON_FIXED_CTR 0x1970 +#define KX7000_HOMESTOP_MSR_PMON_FIXED_CTL 0x1971 /* KX7000 CCDie ZDI_PL register*/ -#define KX7000_CCD_ZDI_PL_MSR_PMON_CTL0 0x1960 -#define KX7000_CCD_ZDI_PL_MSR_PMON_CTR0 0x195c -#define KX7000_CCD_ZDI_PL_MSR_PMON_BLK_CTL 0x1964 +#define KX7000_CCD_ZDI_PL_MSR_PMON_CTL0 0x1960 +#define KX7000_CCD_ZDI_PL_MSR_PMON_CTR0 0x195c +#define KX7000_CCD_ZDI_PL_MSR_PMON_BLK_CTL 0x1964 /* KX7000 cIODie ZDI_PL register*/ -#define KX7000_IOD_ZDI_PL_MSR_PMON_CTL0 0x1894 -#define KX7000_IOD_ZDI_PL_MSR_PMON_CTR0 0x1890 -#define KX7000_IOD_ZDI_PL_MSR_PMON_BLK_CTL 0x1898 -#define KX7000_IOD_ZDI_PL_MSR_PMON_FIXED_CTR 0x189A -#define KX7000_IOD_ZDI_PL_MSR_PMON_FIXED_CTL 0x189B +#define KX7000_IOD_ZDI_PL_MSR_PMON_CTL0 0x1894 +#define KX7000_IOD_ZDI_PL_MSR_PMON_CTR0 0x1890 +#define KX7000_IOD_ZDI_PL_MSR_PMON_BLK_CTL 0x1898 +#define KX7000_IOD_ZDI_PL_MSR_PMON_FIXED_CTR 0x189A +#define KX7000_IOD_ZDI_PL_MSR_PMON_FIXED_CTL 0x189B /* KX7000 MC register*/ -#define KX7000_MC_A0_CHy_PMON_FIXED_CTL 0xe30 -#define KX7000_MC_A0_CHy_PMON_FIXED_CTR 0xe08 -#define KX7000_MC_A0_CHy_PMON_CTR0 0xe00 -#define KX7000_MC_A0_CHy_PMON_CTL0 0xe20 -#define KX7000_MC_A0_CHy_PMON_BLK_CTL 0xe34 - -#define KX7000_MC_A1_CHy_PMON_FIXED_CTL 0xe70 -#define KX7000_MC_A1_CHy_PMON_FIXED_CTR 0xe48 -#define KX7000_MC_A1_CHy_PMON_CTR0 0xe40 -#define KX7000_MC_A1_CHy_PMON_CTL0 0xe60 -#define KX7000_MC_A1_CHy_PMON_BLK_CTL 0xe74 - -#define KX7000_MC_B0_CHy_PMON_FIXED_CTL 0xeb0 -#define KX7000_MC_B0_CHy_PMON_FIXED_CTR 0xe88 -#define KX7000_MC_B0_CHy_PMON_CTR0 0xe80 -#define KX7000_MC_B0_CHy_PMON_CTL0 0xea0 -#define KX7000_MC_B0_CHy_PMON_BLK_CTL 0xeb4 - -#define KX7000_MC_B1_CHy_PMON_FIXED_CTL 0xef0 -#define KX7000_MC_B1_CHy_PMON_FIXED_CTR 0xec8 -#define KX7000_MC_B1_CHy_PMON_CTR0 0xec0 -#define KX7000_MC_B1_CHy_PMON_CTL0 0xee0 -#define KX7000_MC_B1_CHy_PMON_BLK_CTL 0xef4 - -#define KX7000_ZDI_DL_MMIO_PMON_CTR0 0xf00 -#define KX7000_ZDI_DL_MMIO_PMON_CTL0 0xf28 -#define KX7000_ZDI_DL_MMIO_PMON_BLK_CTL 0xf44 -#define KX7000_IOD_ZDI_DL_MMIO_BASE_OFFSET 0x168 -#define KX7000_CCD_ZDI_DL_MMIO_BASE_OFFSET 0x170 -#define KX7000_ZDI_DL_MMIO_BASE_MASK 0x3fff -#define KX7000_ZDI_DL_MMIO_BASE_MASK 0x3fff -#define KX7000_ZDI_DL_MMIO_MEM0_MASK 0xfffff000 -#define KX7000_ZDI_DL_MMIO_SIZE 0x1000 +#define KX7000_MC_A0_CHy_PMON_FIXED_CTL 0xe30 +#define KX7000_MC_A0_CHy_PMON_FIXED_CTR 0xe08 +#define KX7000_MC_A0_CHy_PMON_CTR0 0xe00 +#define KX7000_MC_A0_CHy_PMON_CTL0 0xe20 +#define KX7000_MC_A0_CHy_PMON_BLK_CTL 0xe34 + +#define KX7000_MC_A1_CHy_PMON_FIXED_CTL 0xe70 +#define KX7000_MC_A1_CHy_PMON_FIXED_CTR 0xe48 +#define KX7000_MC_A1_CHy_PMON_CTR0 0xe40 +#define KX7000_MC_A1_CHy_PMON_CTL0 0xe60 +#define KX7000_MC_A1_CHy_PMON_BLK_CTL 0xe74 + +#define KX7000_MC_B0_CHy_PMON_FIXED_CTL 0xeb0 +#define KX7000_MC_B0_CHy_PMON_FIXED_CTR 0xe88 +#define KX7000_MC_B0_CHy_PMON_CTR0 0xe80 +#define KX7000_MC_B0_CHy_PMON_CTL0 0xea0 +#define KX7000_MC_B0_CHy_PMON_BLK_CTL 0xeb4 + +#define KX7000_MC_B1_CHy_PMON_FIXED_CTL 0xef0 +#define KX7000_MC_B1_CHy_PMON_FIXED_CTR 0xec8 +#define KX7000_MC_B1_CHy_PMON_CTR0 0xec0 +#define KX7000_MC_B1_CHy_PMON_CTL0 0xee0 +#define KX7000_MC_B1_CHy_PMON_BLK_CTL 0xef4 + +#define KX7000_ZDI_DL_MMIO_PMON_CTR0 0xf00 +#define KX7000_ZDI_DL_MMIO_PMON_CTL0 0xf28 +#define KX7000_ZDI_DL_MMIO_PMON_BLK_CTL 0xf44 +#define KX7000_IOD_ZDI_DL_MMIO_BASE_OFFSET 0x168 +#define KX7000_CCD_ZDI_DL_MMIO_BASE_OFFSET 0x170 +#define KX7000_ZDI_DL_MMIO_BASE_MASK 0x3fff +#define KX7000_ZDI_DL_MMIO_BASE_MASK 0x3fff +#define KX7000_ZDI_DL_MMIO_MEM0_MASK 0xfffff000 +#define KX7000_ZDI_DL_MMIO_SIZE 0x1000 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); @@ -249,8 +244,6 @@ static void get_global_status_msr(void *status) /*topology number : get max packages/subnode/clusters number*/ static void get_topology_number(void) { - int clusters; - int subnodes; int dies; int packages; u64 data; @@ -276,18 +269,10 @@ static void get_topology_number(void) dies_per_socket = 1; /* check subnodes_per_die */ - subnodes = (data >> 32) & 0x3; - if (subnodes == 0x3) - subnodes_per_die = 2; - else - subnodes_per_die = 1; + subnodes_per_die = 2; /* check clusters_per_subnode */ - clusters = (data >> 6) & 0x3; - if (clusters == 0x3) - clusters_per_subnode = 2; - else - clusters_per_subnode = 1; + clusters_per_subnode = 2; max_subnodes = max_packages * dies_per_socket * subnodes_per_die; max_clusters = clusters_per_subnode * max_subnodes; @@ -321,6 +306,8 @@ static int get_pcibus_limit(void) } } + pci_dev_put(dev); + return 0; } @@ -398,7 +385,7 @@ DEFINE_PER_CPU(cpumask_t, zx_subnode_core_bits); static void zx_gen_core_map(void) { - int cpu, i; + int i, cpu; int cluster_id, subnode_id; for_each_present_cpu(cpu) { @@ -430,20 +417,10 @@ static struct cpumask *topology_subnode_core_cpumask(int cpu) return &per_cpu(zx_subnode_core_bits, cpu); } -static void uncore_free_pcibus_map(void) +ssize_t zx_uncore_event_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct uncore_event_desc *event = container_of(attr, struct uncore_event_desc, attr); -} - -static int kh40000_pci2node_map_init(void) -{ - return 0; -} - -ssize_t zx_uncore_event_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct uncore_event_desc *event = - container_of(attr, struct uncore_event_desc, attr); return sprintf(buf, "%s", event->config); } @@ -483,7 +460,7 @@ static void uncore_assign_hw_event(struct zhaoxin_uncore_box *box, struct perf_e } hwc->config_base = uncore_event_ctl(box, hwc->idx); - hwc->event_base = uncore_perf_ctr(box, hwc->idx); + hwc->event_base = uncore_perf_ctr(box, hwc->idx); } void uncore_perf_event_update(struct zhaoxin_uncore_box *box, struct perf_event *event) @@ -524,7 +501,7 @@ static void kx5000_uncore_msr_disable_box(struct zhaoxin_uncore_box *box) static void kx5000_uncore_msr_enable_box(struct zhaoxin_uncore_box *box) { wrmsrl(KX5000_UNC_PERF_GLOBAL_CTL, - KX5000_UNC_GLOBAL_CTL_EN_PC_ALL | KX5000_UNC_GLOBAL_CTL_EN_FC); + KX5000_UNC_GLOBAL_CTL_EN_PC_ALL | KX5000_UNC_GLOBAL_CTL_EN_FC); } static void kx5000_uncore_msr_enable_event(struct zhaoxin_uncore_box *box, struct perf_event *event) @@ -556,27 +533,27 @@ static struct uncore_event_desc kx5000_uncore_events[] = { }; static struct zhaoxin_uncore_ops kx5000_uncore_msr_ops = { - .disable_box = kx5000_uncore_msr_disable_box, - .enable_box = kx5000_uncore_msr_enable_box, - .disable_event = kx5000_uncore_msr_disable_event, - .enable_event = kx5000_uncore_msr_enable_event, - .read_counter = uncore_msr_read_counter, + .disable_box = kx5000_uncore_msr_disable_box, + .enable_box = kx5000_uncore_msr_enable_box, + .disable_event = kx5000_uncore_msr_disable_event, + .enable_event = kx5000_uncore_msr_enable_event, + .read_counter = uncore_msr_read_counter, }; static struct zhaoxin_uncore_type kx5000_uncore_box = { - .name = "", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .event_ctl = KX5000_UNC_PERFEVTSEL0, - .perf_ctr = KX5000_UNC_UNCORE_PMC0, - .fixed_ctr = KX5000_UNC_FIXED_CTR, - .fixed_ctl = KX5000_UNC_FIXED_CTR_CTRL, - .event_mask = KX5000_UNC_RAW_EVENT_MASK, - .event_descs = kx5000_uncore_events, - .ops = &kx5000_uncore_msr_ops, - .format_group = &kx5000_uncore_format_group, + .name = "", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KX5000_UNC_PERFEVTSEL0, + .perf_ctr = KX5000_UNC_UNCORE_PMC0, + .fixed_ctr = KX5000_UNC_FIXED_CTR, + .fixed_ctl = KX5000_UNC_FIXED_CTR_CTRL, + .event_mask = KX5000_UNC_RAW_EVENT_MASK, + .event_descs = kx5000_uncore_events, + .ops = &kx5000_uncore_msr_ops, + .format_group = &kx5000_uncore_format_group, }; static struct zhaoxin_uncore_type *kx5000_msr_uncores[] = { @@ -665,57 +642,57 @@ static struct uncore_event_desc kh40000_uncore_zzi_box_events[] = { }; static struct zhaoxin_uncore_ops kh40000_uncore_msr_ops = { - .init_box = kh40000_uncore_msr_init_box, - .disable_box = kh40000_uncore_msr_disable_box, - .enable_box = kh40000_uncore_msr_enable_box, - .disable_event = kh40000_uncore_msr_disable_event, - .enable_event = kh40000_uncore_msr_enable_event, - .read_counter = uncore_msr_read_counter, + .init_box = kh40000_uncore_msr_init_box, + .disable_box = kh40000_uncore_msr_disable_box, + .enable_box = kh40000_uncore_msr_enable_box, + .disable_event = kh40000_uncore_msr_disable_event, + .enable_event = kh40000_uncore_msr_enable_event, + .read_counter = uncore_msr_read_counter, }; static struct zhaoxin_uncore_type kh40000_uncore_llc_box = { - .name = "llc", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .event_ctl = KH40000_LLC_MSR_PMON_CTL0, - .perf_ctr = KH40000_LLC_MSR_PMON_CTR0, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KH40000_LLC_MSR_PMON_BLK_CTL, - .event_descs = kh40000_uncore_llc_box_events, - .ops = &kh40000_uncore_msr_ops, - .format_group = &kh40000_uncore_format_group, + .name = "llc", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_ctl = KH40000_LLC_MSR_PMON_CTL0, + .perf_ctr = KH40000_LLC_MSR_PMON_CTR0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_LLC_MSR_PMON_BLK_CTL, + .event_descs = kh40000_uncore_llc_box_events, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, }; static struct zhaoxin_uncore_type kh40000_uncore_hif_box = { - .name = "hif", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .event_ctl = KH40000_HIF_MSR_PMON_CTL0, - .perf_ctr = KH40000_HIF_MSR_PMON_CTR0, - .fixed_ctr = KH40000_HIF_MSR_PMON_FIXED_CTR, - .fixed_ctl = KH40000_HIF_MSR_PMON_FIXED_CTL, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KH40000_HIF_MSR_PMON_BLK_CTL, - .event_descs = kh40000_uncore_hif_box_events, - .ops = &kh40000_uncore_msr_ops, - .format_group = &kh40000_uncore_format_group, + .name = "hif", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KH40000_HIF_MSR_PMON_CTL0, + .perf_ctr = KH40000_HIF_MSR_PMON_CTR0, + .fixed_ctr = KH40000_HIF_MSR_PMON_FIXED_CTR, + .fixed_ctl = KH40000_HIF_MSR_PMON_FIXED_CTL, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_HIF_MSR_PMON_BLK_CTL, + .event_descs = kh40000_uncore_hif_box_events, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, }; static struct zhaoxin_uncore_type kh40000_uncore_zzi_box = { - .name = "zzi", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .event_ctl = KH40000_ZZI_MSR_PMON_CTL0, - .perf_ctr = KH40000_ZZI_MSR_PMON_CTR0, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KH40000_ZZI_MSR_PMON_BLK_CTL, - .event_descs = kh40000_uncore_zzi_box_events, - .ops = &kh40000_uncore_msr_ops, - .format_group = &kh40000_uncore_format_group, + .name = "zzi", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_ctl = KH40000_ZZI_MSR_PMON_CTL0, + .perf_ctr = KH40000_ZZI_MSR_PMON_CTR0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_ZZI_MSR_PMON_BLK_CTL, + .event_descs = kh40000_uncore_zzi_box_events, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, }; static struct zhaoxin_uncore_type *kh40000_msr_uncores[] = { @@ -810,102 +787,102 @@ static struct uncore_event_desc kh40000_uncore_pxptrf_events[] = { }; static struct zhaoxin_uncore_ops kh40000_uncore_pci_ops = { - .init_box = kh40000_uncore_pci_init_box, - .disable_box = kh40000_uncore_pci_disable_box, - .enable_box = kh40000_uncore_pci_enable_box, - .disable_event = kh40000_uncore_pci_disable_event, - .enable_event = kh40000_uncore_pci_enable_event, - .read_counter = kh40000_uncore_pci_read_counter + .init_box = kh40000_uncore_pci_init_box, + .disable_box = kh40000_uncore_pci_disable_box, + .enable_box = kh40000_uncore_pci_enable_box, + .disable_event = kh40000_uncore_pci_disable_event, + .enable_event = kh40000_uncore_pci_enable_event, + .read_counter = kh40000_uncore_pci_read_counter }; static struct zhaoxin_uncore_type kh40000_uncore_mc0 = { - .name = "mc0", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .fixed_ctr = KH40000_MC0_CHy_PMON_FIXED_CTR, - .fixed_ctl = KH40000_MC0_CHy_PMON_FIXED_CTL, - .event_descs = kh40000_uncore_imc_events, - .perf_ctr = KH40000_MC0_CHy_PMON_CTR0, - .event_ctl = KH40000_MC0_CHy_PMON_CTL0, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KH40000_MC0_CHy_PMON_BLK_CTL, - .ops = &kh40000_uncore_pci_ops, - .format_group = &kh40000_uncore_format_group + .name = "mc0", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KH40000_MC0_CHy_PMON_FIXED_CTR, + .fixed_ctl = KH40000_MC0_CHy_PMON_FIXED_CTL, + .event_descs = kh40000_uncore_imc_events, + .perf_ctr = KH40000_MC0_CHy_PMON_CTR0, + .event_ctl = KH40000_MC0_CHy_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_MC0_CHy_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group }; static struct zhaoxin_uncore_type kh40000_uncore_mc1 = { - .name = "mc1", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .fixed_ctr = KH40000_MC1_CHy_PMON_FIXED_CTR, - .fixed_ctl = KH40000_MC1_CHy_PMON_FIXED_CTL, - .event_descs = kh40000_uncore_imc_events, - .perf_ctr = KH40000_MC1_CHy_PMON_CTR0, - .event_ctl = KH40000_MC1_CHy_PMON_CTL0, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KH40000_MC1_CHy_PMON_BLK_CTL, - .ops = &kh40000_uncore_pci_ops, - .format_group = &kh40000_uncore_format_group + .name = "mc1", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KH40000_MC1_CHy_PMON_FIXED_CTR, + .fixed_ctl = KH40000_MC1_CHy_PMON_FIXED_CTL, + .event_descs = kh40000_uncore_imc_events, + .perf_ctr = KH40000_MC1_CHy_PMON_CTR0, + .event_ctl = KH40000_MC1_CHy_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_MC1_CHy_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group }; static struct zhaoxin_uncore_type kh40000_uncore_pci = { - .name = "pci", - .num_counters = 4, - .num_boxes = 10, - .perf_ctr_bits = 48, - .event_descs = kh40000_uncore_pci_events, - .perf_ctr = KH40000_PCI_PMON_CTR0, - .event_ctl = KH40000_PCI_PMON_CTL0, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KH40000_PCI_PMON_BLK_CTL, - .ops = &kh40000_uncore_pci_ops, - .format_group = &kh40000_uncore_format_group + .name = "pci", + .num_counters = 4, + .num_boxes = 2, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_pci_events, + .perf_ctr = KH40000_PCI_PMON_CTR0, + .event_ctl = KH40000_PCI_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_PCI_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group }; static struct zhaoxin_uncore_type kh40000_uncore_zpi_dll = { - .name = "zpi_dll", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .event_descs = kh40000_uncore_zpi_dll_events, - .perf_ctr = KH40000_ZPI_DLL_PMON_CTR0, - .event_ctl = KH40000_ZPI_DLL_PMON_CTL0, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KH40000_ZPI_DLL_PMON_BLK_CTL, - .ops = &kh40000_uncore_pci_ops, - .format_group = &kh40000_uncore_format_group + .name = "zpi_dll", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_zpi_dll_events, + .perf_ctr = KH40000_ZPI_DLL_PMON_CTR0, + .event_ctl = KH40000_ZPI_DLL_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_ZPI_DLL_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group }; static struct zhaoxin_uncore_type kh40000_uncore_zdi_dll = { - .name = "zdi_dll", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .event_descs = kh40000_uncore_zdi_dll_events, - .perf_ctr = KH40000_ZDI_DLL_PMON_CTR0, - .event_ctl = KH40000_ZDI_DLL_PMON_CTL0, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KH40000_ZDI_DLL_PMON_BLK_CTL, - .ops = &kh40000_uncore_pci_ops, - .format_group = &kh40000_uncore_format_group + .name = "zdi_dll", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_zdi_dll_events, + .perf_ctr = KH40000_ZDI_DLL_PMON_CTR0, + .event_ctl = KH40000_ZDI_DLL_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_ZDI_DLL_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group }; static struct zhaoxin_uncore_type kh40000_uncore_pxptrf = { - .name = "pxptrf", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .event_descs = kh40000_uncore_pxptrf_events, - .perf_ctr = KH40000_PXPTRF_PMON_CTR0, - .event_ctl = KH40000_PXPTRF_PMON_CTL0, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KH40000_PXPTRF_PMON_BLK_CTL, - .ops = &kh40000_uncore_pci_ops, - .format_group = &kh40000_uncore_format_group + .name = "pxptrf", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_pxptrf_events, + .perf_ctr = KH40000_PXPTRF_PMON_CTR0, + .event_ctl = KH40000_PXPTRF_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_PXPTRF_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group }; enum { @@ -918,49 +895,66 @@ enum { }; static struct zhaoxin_uncore_type *kh40000_pci_uncores[] = { - [KH40000_PCI_UNCORE_MC0] = &kh40000_uncore_mc0, - [KH40000_PCI_UNCORE_MC1] = &kh40000_uncore_mc1, - [KH40000_PCI_UNCORE_PCI] = &kh40000_uncore_pci, - [KH40000_PCI_UNCORE_ZPI_DLL] = &kh40000_uncore_zpi_dll, - [KH40000_PCI_UNCORE_ZDI_DLL] = &kh40000_uncore_zdi_dll, - [KH40000_PCI_UNCORE_PXPTRF] = &kh40000_uncore_pxptrf, + [KH40000_PCI_UNCORE_MC0] = &kh40000_uncore_mc0, + [KH40000_PCI_UNCORE_MC1] = &kh40000_uncore_mc1, + [KH40000_PCI_UNCORE_PCI] = &kh40000_uncore_pci, + [KH40000_PCI_UNCORE_ZPI_DLL] = &kh40000_uncore_zpi_dll, + [KH40000_PCI_UNCORE_ZDI_DLL] = &kh40000_uncore_zdi_dll, + [KH40000_PCI_UNCORE_PXPTRF] = &kh40000_uncore_pxptrf, NULL, }; static const struct pci_device_id kh40000_uncore_pci_ids[] = { - { /* MC Channe0/1 */ + { + /* MC Channe0/1 */ PCI_DEVICE(0x1D17, 0x31b2), .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_MC0, 0), }, - { /* ZPI_DLL */ + /* + * PEXC_A: D2F0 D2F1 D3F0 D3F1 D3F2 all use D2F0 to access, + * with different eventcode. + */ + { + /* PCIE D2F0 */ + PCI_DEVICE(0x1D17, 0x0717), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 0), + }, + + /* + * PEXC_B: D4F0 D4F1 D5F0 D5F1 D5F2 all use D4F0 to access, + * with different eventcode. + */ + { + /* PCIE D4F0 */ + PCI_DEVICE(0x1D17, 0x071C), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 1), + }, + + { + /* ZPI_DLL */ PCI_DEVICE(0x1D17, 0x91c1), .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_ZPI_DLL, 0), }, - { /* ZDI_DLL */ + { + /* ZDI_DLL */ PCI_DEVICE(0x1D17, 0x3b03), .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_ZDI_DLL, 0), }, - { /* PXPTRF */ + { + /* PXPTRF */ PCI_DEVICE(0x1D17, 0x31B4), .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PXPTRF, 0), }, { /* end: all zeroes */ } }; - -static struct pci_driver kh40000_uncore_pci_driver = { - .name = "kh40000_uncore", - .id_table = kh40000_uncore_pci_ids, -}; /*KH40000 pci ops end*/ /*KX7000 msr ops start*/ -static unsigned int kx7000_uncore_msr_offsets[] = { - 0x0, 0x13, 0x27, 0x3b, 0x4f, 0x63, 0x77, 0x8b -}; +static unsigned int kx7000_uncore_msr_offsets[] = { 0x0, 0x13, 0x27, 0x3b, 0x4f, 0x63, 0x77, 0x8b }; static struct attribute *kx7000_uncore_formats_attr[] = { &format_attr_event.attr, @@ -979,96 +973,95 @@ static struct attribute_group kx7000_uncore_format_group = { }; static struct zhaoxin_uncore_type kx7000_uncore_mesh_box = { - .name = "mesh", - .num_counters = 4, - .num_boxes = 8, - .perf_ctr_bits = 48, - .event_ctl = KX7000_MESH_MSR_PMON_CTL0, - .perf_ctr = KX7000_MESH_MSR_PMON_CTR0, - .event_mask = KX7000_PMON_RAW_EVENT_MASK, - .box_ctl = KX7000_MESH_MSR_PMON_BLK_CTL, - .msr_offsets = kx7000_uncore_msr_offsets, - .ops = &kh40000_uncore_msr_ops, - .format_group = &kx7000_uncore_format_group, + .name = "mesh", + .num_counters = 4, + .num_boxes = 8, + .perf_ctr_bits = 48, + .event_ctl = KX7000_MESH_MSR_PMON_CTL0, + .perf_ctr = KX7000_MESH_MSR_PMON_CTR0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_MESH_MSR_PMON_BLK_CTL, + .msr_offsets = kx7000_uncore_msr_offsets, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kx7000_uncore_format_group, }; static struct zhaoxin_uncore_type kx7000_uncore_llc_box = { - .name = "llc", - .num_counters = 4, - .num_boxes = 8, - .perf_ctr_bits = 48, - .event_ctl = KX7000_LLC_MSR_PMON_CTL0, - .perf_ctr = KX7000_LLC_MSR_PMON_CTR0, - .event_mask = KX7000_PMON_RAW_EVENT_MASK, - .box_ctl = KX7000_LLC_MSR_PMON_BLK_CTL, - .msr_offsets = kx7000_uncore_msr_offsets, - .ops = &kh40000_uncore_msr_ops, - .format_group = &kx7000_uncore_format_group, + .name = "llc", + .num_counters = 4, + .num_boxes = 8, + .perf_ctr_bits = 48, + .event_ctl = KX7000_LLC_MSR_PMON_CTL0, + .perf_ctr = KX7000_LLC_MSR_PMON_CTR0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_LLC_MSR_PMON_BLK_CTL, + .msr_offsets = kx7000_uncore_msr_offsets, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kx7000_uncore_format_group, }; static struct zhaoxin_uncore_type kx7000_uncore_hif_box = { - .name = "hif", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .event_ctl = KH40000_HIF_MSR_PMON_CTL0, - .perf_ctr = KH40000_HIF_MSR_PMON_CTR0, - .fixed_ctr = KH40000_HIF_MSR_PMON_FIXED_CTR, - .fixed_ctl = KH40000_HIF_MSR_PMON_FIXED_CTL, - .event_mask = KX7000_PMON_RAW_EVENT_MASK, - .box_ctl = KH40000_HIF_MSR_PMON_BLK_CTL, - .ops = &kh40000_uncore_msr_ops, - .format_group = &kx7000_uncore_format_group, + .name = "hif", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KH40000_HIF_MSR_PMON_CTL0, + .perf_ctr = KH40000_HIF_MSR_PMON_CTR0, + .fixed_ctr = KH40000_HIF_MSR_PMON_FIXED_CTR, + .fixed_ctl = KH40000_HIF_MSR_PMON_FIXED_CTL, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_HIF_MSR_PMON_BLK_CTL, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kx7000_uncore_format_group, }; static struct zhaoxin_uncore_type kx7000_uncore_homestop = { - .name = "homestop", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .event_ctl = KX7000_HOMESTOP_MSR_PMON_CTL0, - .perf_ctr = KX7000_HOMESTOP_MSR_PMON_CTR0, - .fixed_ctr = KX7000_HOMESTOP_MSR_PMON_FIXED_CTR, - .fixed_ctl = KX7000_HOMESTOP_MSR_PMON_FIXED_CTL, - .event_mask = KX7000_PMON_RAW_EVENT_MASK, - .box_ctl = KX7000_HOMESTOP_MSR_PMON_BLK_CTL, - .ops = &kh40000_uncore_msr_ops, - .format_group = &kx7000_uncore_format_group, + .name = "homestop", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KX7000_HOMESTOP_MSR_PMON_CTL0, + .perf_ctr = KX7000_HOMESTOP_MSR_PMON_CTR0, + .fixed_ctr = KX7000_HOMESTOP_MSR_PMON_FIXED_CTR, + .fixed_ctl = KX7000_HOMESTOP_MSR_PMON_FIXED_CTL, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_HOMESTOP_MSR_PMON_BLK_CTL, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kx7000_uncore_format_group, }; static struct zhaoxin_uncore_type kx7000_uncore_ccd_zdi_pl = { - .name = "ccd_zdi_pl", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .event_ctl = KX7000_CCD_ZDI_PL_MSR_PMON_CTL0, - .perf_ctr = KX7000_CCD_ZDI_PL_MSR_PMON_CTR0, - .event_mask = KX7000_PMON_RAW_EVENT_MASK, - .box_ctl = KX7000_CCD_ZDI_PL_MSR_PMON_BLK_CTL, - .ops = &kh40000_uncore_msr_ops, - .format_group = &kx7000_uncore_format_group, + .name = "ccd_zdi_pl", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KX7000_CCD_ZDI_PL_MSR_PMON_CTL0, + .perf_ctr = KX7000_CCD_ZDI_PL_MSR_PMON_CTR0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_CCD_ZDI_PL_MSR_PMON_BLK_CTL, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kx7000_uncore_format_group, }; static struct zhaoxin_uncore_type kx7000_uncore_iod_zdi_pl = { - .name = "iod_zdi_pl", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .event_ctl = KX7000_IOD_ZDI_PL_MSR_PMON_CTL0, - .perf_ctr = KX7000_IOD_ZDI_PL_MSR_PMON_CTR0, - .fixed_ctr = KX7000_IOD_ZDI_PL_MSR_PMON_FIXED_CTR, - .fixed_ctl = KX7000_IOD_ZDI_PL_MSR_PMON_FIXED_CTL, - .event_mask = KX7000_PMON_RAW_EVENT_MASK, - .box_ctl = KX7000_IOD_ZDI_PL_MSR_PMON_BLK_CTL, - .ops = &kh40000_uncore_msr_ops, - .format_group = &kx7000_uncore_format_group, + .name = "iod_zdi_pl", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KX7000_IOD_ZDI_PL_MSR_PMON_CTL0, + .perf_ctr = KX7000_IOD_ZDI_PL_MSR_PMON_CTR0, + .fixed_ctr = KX7000_IOD_ZDI_PL_MSR_PMON_FIXED_CTR, + .fixed_ctl = KX7000_IOD_ZDI_PL_MSR_PMON_FIXED_CTL, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_IOD_ZDI_PL_MSR_PMON_BLK_CTL, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kx7000_uncore_format_group, }; - static struct zhaoxin_uncore_type *kx7000_msr_uncores[] = { &kx7000_uncore_llc_box, &kx7000_uncore_mesh_box, @@ -1081,9 +1074,7 @@ static struct zhaoxin_uncore_type *kx7000_msr_uncores[] = { /*KX7000 msr ops end*/ /*KX7000 pci ops start*/ -static unsigned int kx7000_mc_ctr_lh_offsets[] = { - 0xc, 0xe, 0x10, 0x12, 0x14 -}; +static unsigned int kx7000_mc_ctr_lh_offsets[] = { 0xc, 0xe, 0x10, 0x12, 0x14 }; static u64 kx7000_uncore_pci_mc_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event) @@ -1092,7 +1083,7 @@ static u64 kx7000_uncore_pci_mc_read_counter(struct zhaoxin_uncore_box *box, struct hw_perf_event *hwc = &event->hw; u64 count = 0; - pci_read_config_word(pdev, hwc->event_base, (u16 *)&count + 3); + pci_read_config_word(pdev, hwc->event_base, (u16 *)&count + 2); pci_read_config_dword(pdev, hwc->event_base + kx7000_mc_ctr_lh_offsets[hwc->idx], (u32 *)&count); @@ -1100,103 +1091,103 @@ static u64 kx7000_uncore_pci_mc_read_counter(struct zhaoxin_uncore_box *box, } static struct zhaoxin_uncore_ops kx7000_uncore_pci_mc_ops = { - .init_box = kh40000_uncore_pci_init_box, - .disable_box = kh40000_uncore_pci_disable_box, - .enable_box = kh40000_uncore_pci_enable_box, - .disable_event = kh40000_uncore_pci_disable_event, - .enable_event = kh40000_uncore_pci_enable_event, - .read_counter = kx7000_uncore_pci_mc_read_counter + .init_box = kh40000_uncore_pci_init_box, + .disable_box = kh40000_uncore_pci_disable_box, + .enable_box = kh40000_uncore_pci_enable_box, + .disable_event = kh40000_uncore_pci_disable_event, + .enable_event = kh40000_uncore_pci_enable_event, + .read_counter = kx7000_uncore_pci_mc_read_counter }; static struct zhaoxin_uncore_type kx7000_uncore_mc_a0 = { - .name = "mc_a0", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .fixed_ctr = KX7000_MC_A0_CHy_PMON_FIXED_CTR, - .fixed_ctl = KX7000_MC_A0_CHy_PMON_FIXED_CTL, - .perf_ctr = KX7000_MC_A0_CHy_PMON_CTR0, - .event_ctl = KX7000_MC_A0_CHy_PMON_CTL0, - .event_mask = KX7000_PMON_RAW_EVENT_MASK, - .box_ctl = KX7000_MC_A0_CHy_PMON_BLK_CTL, - .ops = &kx7000_uncore_pci_mc_ops, - .format_group = &kx7000_uncore_format_group, + .name = "mc_a0", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX7000_MC_A0_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX7000_MC_A0_CHy_PMON_FIXED_CTL, + .perf_ctr = KX7000_MC_A0_CHy_PMON_CTR0, + .event_ctl = KX7000_MC_A0_CHy_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_MC_A0_CHy_PMON_BLK_CTL, + .ops = &kx7000_uncore_pci_mc_ops, + .format_group = &kx7000_uncore_format_group, }; static struct zhaoxin_uncore_type kx7000_uncore_mc_a1 = { - .name = "mc_a1", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .fixed_ctr = KX7000_MC_A1_CHy_PMON_FIXED_CTR, - .fixed_ctl = KX7000_MC_A1_CHy_PMON_FIXED_CTL, - .perf_ctr = KX7000_MC_A1_CHy_PMON_CTR0, - .event_ctl = KX7000_MC_A1_CHy_PMON_CTL0, - .event_mask = KX7000_PMON_RAW_EVENT_MASK, - .box_ctl = KX7000_MC_A1_CHy_PMON_BLK_CTL, - .ops = &kx7000_uncore_pci_mc_ops, - .format_group = &kx7000_uncore_format_group, + .name = "mc_a1", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX7000_MC_A1_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX7000_MC_A1_CHy_PMON_FIXED_CTL, + .perf_ctr = KX7000_MC_A1_CHy_PMON_CTR0, + .event_ctl = KX7000_MC_A1_CHy_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_MC_A1_CHy_PMON_BLK_CTL, + .ops = &kx7000_uncore_pci_mc_ops, + .format_group = &kx7000_uncore_format_group, }; static struct zhaoxin_uncore_type kx7000_uncore_mc_b0 = { - .name = "mc_b0", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .fixed_ctr = KX7000_MC_B0_CHy_PMON_FIXED_CTR, - .fixed_ctl = KX7000_MC_B0_CHy_PMON_FIXED_CTL, - .perf_ctr = KX7000_MC_B0_CHy_PMON_CTR0, - .event_ctl = KX7000_MC_B0_CHy_PMON_CTL0, - .event_mask = KX7000_PMON_RAW_EVENT_MASK, - .box_ctl = KX7000_MC_B0_CHy_PMON_BLK_CTL, - .ops = &kx7000_uncore_pci_mc_ops, - .format_group = &kx7000_uncore_format_group, + .name = "mc_b0", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX7000_MC_B0_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX7000_MC_B0_CHy_PMON_FIXED_CTL, + .perf_ctr = KX7000_MC_B0_CHy_PMON_CTR0, + .event_ctl = KX7000_MC_B0_CHy_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_MC_B0_CHy_PMON_BLK_CTL, + .ops = &kx7000_uncore_pci_mc_ops, + .format_group = &kx7000_uncore_format_group, }; static struct zhaoxin_uncore_type kx7000_uncore_mc_b1 = { - .name = "mc_b1", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .fixed_ctr = KX7000_MC_B1_CHy_PMON_FIXED_CTR, - .fixed_ctl = KX7000_MC_B1_CHy_PMON_FIXED_CTL, - .perf_ctr = KX7000_MC_B1_CHy_PMON_CTR0, - .event_ctl = KX7000_MC_B1_CHy_PMON_CTL0, - .event_mask = KX7000_PMON_RAW_EVENT_MASK, - .box_ctl = KX7000_MC_B1_CHy_PMON_BLK_CTL, - .ops = &kx7000_uncore_pci_mc_ops, - .format_group = &kx7000_uncore_format_group, + .name = "mc_b1", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX7000_MC_B1_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX7000_MC_B1_CHy_PMON_FIXED_CTL, + .perf_ctr = KX7000_MC_B1_CHy_PMON_CTR0, + .event_ctl = KX7000_MC_B1_CHy_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_MC_B1_CHy_PMON_BLK_CTL, + .ops = &kx7000_uncore_pci_mc_ops, + .format_group = &kx7000_uncore_format_group, }; static struct zhaoxin_uncore_type kx7000_uncore_pci = { - .name = "pci", - .num_counters = 4, - .num_boxes = 17, - .perf_ctr_bits = 48, - .perf_ctr = KH40000_PCI_PMON_CTR0, - .event_ctl = KH40000_PCI_PMON_CTL0, - .event_mask = KX7000_PMON_RAW_EVENT_MASK, - .box_ctl = KH40000_PCI_PMON_BLK_CTL, - .ops = &kh40000_uncore_pci_ops, - .format_group = &kx7000_uncore_format_group, + .name = "pci", + .num_counters = 4, + .num_boxes = 2, + .perf_ctr_bits = 48, + .perf_ctr = KH40000_PCI_PMON_CTR0, + .event_ctl = KH40000_PCI_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_PCI_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kx7000_uncore_format_group, }; static struct zhaoxin_uncore_type kx7000_uncore_pxptrf = { - .name = "pxptrf", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .event_descs = kh40000_uncore_pxptrf_events, - .perf_ctr = KH40000_PXPTRF_PMON_CTR0, - .event_ctl = KH40000_PXPTRF_PMON_CTL0, - .event_mask = KX7000_PMON_RAW_EVENT_MASK, - .box_ctl = KH40000_PXPTRF_PMON_BLK_CTL, - .ops = &kh40000_uncore_pci_ops, - .format_group = &kx7000_uncore_format_group, + .name = "pxptrf", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_pxptrf_events, + .perf_ctr = KH40000_PXPTRF_PMON_CTR0, + .event_ctl = KH40000_PXPTRF_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_PXPTRF_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kx7000_uncore_format_group, }; enum { @@ -1209,33 +1200,50 @@ enum { }; static struct zhaoxin_uncore_type *kx7000_pci_uncores[] = { - [KX7000_PCI_UNCORE_MC_A0] = &kx7000_uncore_mc_a0, - [KX7000_PCI_UNCORE_MC_A1] = &kx7000_uncore_mc_a1, - [KX7000_PCI_UNCORE_MC_B0] = &kx7000_uncore_mc_b0, - [KX7000_PCI_UNCORE_MC_B1] = &kx7000_uncore_mc_b1, - [KX7000_PCI_UNCORE_PCI] = &kx7000_uncore_pci, - [KX7000_PCI_UNCORE_PXPTRF] = &kx7000_uncore_pxptrf, + [KX7000_PCI_UNCORE_MC_A0] = &kx7000_uncore_mc_a0, + [KX7000_PCI_UNCORE_MC_A1] = &kx7000_uncore_mc_a1, + [KX7000_PCI_UNCORE_MC_B0] = &kx7000_uncore_mc_b0, + [KX7000_PCI_UNCORE_MC_B1] = &kx7000_uncore_mc_b1, + [KX7000_PCI_UNCORE_PCI] = &kx7000_uncore_pci, + [KX7000_PCI_UNCORE_PXPTRF] = &kx7000_uncore_pxptrf, NULL, }; static const struct pci_device_id kx7000_uncore_pci_ids[] = { - { /* MC Channe A0/A1/B0/B1 */ + { + /* MC Channe A0/A1/B0/B1 */ PCI_DEVICE(0x1D17, 0x31B2), .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_MC_A0, 0), }, - { /* PXPTRF */ + /* + * PEXC_A: D2F0 D2F1 D2F2 D2F3 D2F4 D3F0 D3F1 D3F2 D3F3 all + * use D2F0 to access, with different eventcode + */ + { + /* PCIE D2F0 */ + PCI_DEVICE(0x1D17, 0x0717), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 0), + }, + + /* + * PEXC_B: D4F0 D4F1 D4F2 D4F3 D4F4 D5F0 D5F1 D5F2 D5F3 all + * use D4F0 to access, with different eventcode + */ + { + /* PCIE D4F0 */ + PCI_DEVICE(0x1D17, 0x071B), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 1), + }, + + { + /* PXPTRF */ PCI_DEVICE(0x1D17, 0x31B4), .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PXPTRF, 0), }, { /* end: all zeroes */ } }; - -static struct pci_driver kx7000_uncore_pci_driver = { - .name = "kx7000_uncore", - .id_table = kx7000_uncore_pci_ids, -}; /*KX7000 pci ops end*/ /*KX7000 mmio ops start*/ @@ -1262,6 +1270,8 @@ static void kx7000_uncore_mmio_init_box(struct zhaoxin_uncore_box *box) pci_read_config_dword(pdev, mmio_base_offset + 4, &pci_dword); addr |= pci_dword & KX7000_ZDI_DL_MMIO_MEM0_MASK; + pci_dev_put(pdev); + box->io_addr = ioremap(addr, KX7000_ZDI_DL_MMIO_SIZE); if (!box->io_addr) return; @@ -1340,41 +1350,41 @@ static u64 uncore_mmio_read_counter(struct zhaoxin_uncore_box *box, struct perf_ } static struct zhaoxin_uncore_ops kx7000_uncore_mmio_ops = { - .init_box = kx7000_uncore_mmio_init_box, - .exit_box = uncore_mmio_exit_box, - .disable_box = kx7000_uncore_mmio_disable_box, - .enable_box = kx7000_uncore_mmio_enable_box, - .disable_event = kx7000_uncore_mmio_disable_event, - .enable_event = kx7000_uncore_mmio_enable_event, - .read_counter = uncore_mmio_read_counter, + .init_box = kx7000_uncore_mmio_init_box, + .exit_box = uncore_mmio_exit_box, + .disable_box = kx7000_uncore_mmio_disable_box, + .enable_box = kx7000_uncore_mmio_enable_box, + .disable_event = kx7000_uncore_mmio_disable_event, + .enable_event = kx7000_uncore_mmio_enable_event, + .read_counter = uncore_mmio_read_counter, }; static struct zhaoxin_uncore_type kx7000_uncore_iod_zdi_dl = { - .name = "iod_zdi_dl", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .perf_ctr = KX7000_ZDI_DL_MMIO_PMON_CTR0, - .event_ctl = KX7000_ZDI_DL_MMIO_PMON_CTL0, - .event_mask = KX7000_PMON_RAW_EVENT_MASK, - .box_ctl = KX7000_ZDI_DL_MMIO_PMON_BLK_CTL, - .ops = &kx7000_uncore_mmio_ops, - .format_group = &kx7000_uncore_format_group, + .name = "iod_zdi_dl", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .perf_ctr = KX7000_ZDI_DL_MMIO_PMON_CTR0, + .event_ctl = KX7000_ZDI_DL_MMIO_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_ZDI_DL_MMIO_PMON_BLK_CTL, + .ops = &kx7000_uncore_mmio_ops, + .format_group = &kx7000_uncore_format_group, }; static struct zhaoxin_uncore_type kx7000_uncore_ccd_zdi_dl = { - .name = "ccd_zdi_dl", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .perf_ctr = KX7000_ZDI_DL_MMIO_PMON_CTR0, - .event_ctl = KX7000_ZDI_DL_MMIO_PMON_CTL0, - .event_mask = KX7000_PMON_RAW_EVENT_MASK, - .box_ctl = KX7000_ZDI_DL_MMIO_PMON_BLK_CTL, - .ops = &kx7000_uncore_mmio_ops, - .format_group = &kx7000_uncore_format_group, + .name = "ccd_zdi_dl", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .perf_ctr = KX7000_ZDI_DL_MMIO_PMON_CTR0, + .event_ctl = KX7000_ZDI_DL_MMIO_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_ZDI_DL_MMIO_PMON_BLK_CTL, + .ops = &kx7000_uncore_mmio_ops, + .format_group = &kx7000_uncore_format_group, }; static struct zhaoxin_uncore_type *kx7000_mmio_uncores[] = { @@ -1419,8 +1429,7 @@ static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer) static void uncore_pmu_start_hrtimer(struct zhaoxin_uncore_box *box) { - hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration), - HRTIMER_MODE_REL_PINNED); + hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration), HRTIMER_MODE_REL_PINNED); } static void uncore_pmu_cancel_hrtimer(struct zhaoxin_uncore_box *box) @@ -1491,8 +1500,7 @@ static int uncore_collect_events(struct zhaoxin_uncore_box *box, struct perf_eve return n; for_each_sibling_event(event, leader) { - if (!is_box_event(box, event) || - event->state <= PERF_EVENT_STATE_OFF) + if (!is_box_event(box, event) || event->state <= PERF_EVENT_STATE_OFF) continue; if (n >= max_count) @@ -1574,8 +1582,7 @@ static int uncore_assign_events(struct zhaoxin_uncore_box *box, int assign[], in } /* slow path */ if (i != n) - ret = perf_assign_events(box->event_constraint, n, - wmin, wmax, n, assign); + ret = perf_assign_events(box->event_constraint, n, wmin, wmax, n, assign); if (!assign || ret) { for (i = 0; i < n; i++) @@ -1589,7 +1596,6 @@ static void uncore_pmu_event_start(struct perf_event *event, int flags) struct zhaoxin_uncore_box *box = uncore_event_to_box(event); int idx = event->hw.idx; - if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX)) return; @@ -1662,8 +1668,7 @@ static int uncore_pmu_event_add(struct perf_event *event, int flags) event = box->event_list[i]; hwc = &event->hw; - if (hwc->idx == assign[i] && - hwc->last_tag == box->tags[assign[i]]) + if (hwc->idx == assign[i] && hwc->last_tag == box->tags[assign[i]]) continue; /* * Ensure we don't accidentally enable a stopped @@ -1680,8 +1685,7 @@ static int uncore_pmu_event_add(struct perf_event *event, int flags) event = box->event_list[i]; hwc = &event->hw; - if (hwc->idx != assign[i] || - hwc->last_tag != box->tags[assign[i]]) + if (hwc->idx != assign[i] || hwc->last_tag != box->tags[assign[i]]) uncore_assign_hw_event(box, event, assign[i]); else if (i < box->n_events) continue; @@ -1813,7 +1817,7 @@ static int uncore_pmu_event_init(struct perf_event *event) hwc->config = 0ULL; } else { hwc->config = event->attr.config & - (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32)); + (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32)); if (pmu->type->ops->hw_config) { ret = pmu->type->ops->hw_config(box, event); if (ret) @@ -1880,7 +1884,6 @@ static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, c } else { active_mask = &uncore_cpu_mask; } - return cpumap_print_to_pagebuf(true, buf, active_mask); } static DEVICE_ATTR_RO(cpumask); @@ -1899,19 +1902,19 @@ static int uncore_pmu_register(struct zhaoxin_uncore_pmu *pmu) int ret; if (!pmu->type->pmu) { - pmu->pmu = (struct pmu) { - .attr_groups = pmu->type->attr_groups, - .task_ctx_nr = perf_invalid_context, - .pmu_enable = uncore_pmu_enable, - .pmu_disable = uncore_pmu_disable, - .event_init = uncore_pmu_event_init, - .add = uncore_pmu_event_add, - .del = uncore_pmu_event_del, - .start = uncore_pmu_event_start, - .stop = uncore_pmu_event_stop, - .read = uncore_pmu_event_read, - .module = THIS_MODULE, - .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + pmu->pmu = (struct pmu){ + .attr_groups = pmu->type->attr_groups, + .task_ctx_nr = perf_invalid_context, + .pmu_enable = uncore_pmu_enable, + .pmu_disable = uncore_pmu_disable, + .event_init = uncore_pmu_event_init, + .add = uncore_pmu_event_add, + .del = uncore_pmu_event_del, + .start = uncore_pmu_event_start, + .stop = uncore_pmu_event_stop, + .read = uncore_pmu_event_read, + .module = THIS_MODULE, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; } else { pmu->pmu = *pmu->type->pmu; @@ -1924,14 +1927,12 @@ static int uncore_pmu_register(struct zhaoxin_uncore_pmu *pmu) else sprintf(pmu->name, "uncore"); } else { - sprintf(pmu->name, "uncore_%s_%d", pmu->type->name, - pmu->pmu_idx); + sprintf(pmu->name, "uncore_%s_%d", pmu->type->name, pmu->pmu_idx); } ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); if (!ret) pmu->registered = true; - return ret; } @@ -2004,18 +2005,17 @@ static int __init uncore_type_init(struct zhaoxin_uncore_type *type, bool setid) } for (i = 0; i < type->num_boxes; i++) { - pmus[i].func_id = setid ? i : -1; - pmus[i].pmu_idx = i; - pmus[i].type = type; - pmus[i].boxes = kzalloc(size, GFP_KERNEL); + pmus[i].func_id = setid ? i : -1; + pmus[i].pmu_idx = i; + pmus[i].type = type; + pmus[i].boxes = kzalloc(size, GFP_KERNEL); if (!pmus[i].boxes) goto err; } type->pmus = pmus; - type->unconstrainted = (struct event_constraint) - __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1, - 0, type->num_counters, 0, 0); + type->unconstrainted = (struct event_constraint)__EVENT_CONSTRAINT( + 0, (1ULL << type->num_counters) - 1, 0, type->num_counters, 0, 0); if (type->event_descs) { struct { @@ -2065,7 +2065,7 @@ static int __init uncore_types_init(struct zhaoxin_uncore_type **types, bool set /* * add a pci uncore device */ -static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +static int uncore_pci_type_init(struct pci_dev *pdev, const struct pci_device_id *id) { struct zhaoxin_uncore_type *type; struct zhaoxin_uncore_pmu *pmu; @@ -2073,7 +2073,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id struct zhaoxin_uncore_box **boxes; char mc_dev[10]; int loop = 1; - int i, j = 0; + int i; int subnode_id = 0; int ret = 0; @@ -2096,15 +2096,11 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id if (!boxes) return -ENOMEM; + pci_set_drvdata(pdev, boxes); + for (i = 0; i < loop; i++) { - type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data) + j]; + type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data) + i]; - if (!type) - continue; - /* - * for performance monitoring unit with multiple boxes, - * each box has a different function id. - */ pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)]; if (WARN_ON_ONCE(pmu->boxes[subnode_id] != NULL)) @@ -2126,91 +2122,116 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id uncore_box_init(box); boxes[i] = box; - pci_set_drvdata(pdev, boxes); pmu->boxes[subnode_id] = box; if (atomic_inc_return(&pmu->activeboxes) > 1) { if (!strcmp(type->name, mc_dev)) - goto next_loop; + continue; else return 0; } + /* First active box registers the pmu */ ret = uncore_pmu_register(pmu); - if (ret) { - pci_set_drvdata(pdev, NULL); - pmu->boxes[subnode_id] = NULL; - uncore_box_exit(box); - kfree(box); - } -next_loop: - j++; + if (ret) + return ret; } - return ret; + return 0; } -static void uncore_pci_remove(struct pci_dev *pdev) +static void uncore_pci_type_exit(void) { + struct zhaoxin_uncore_type **types; + struct zhaoxin_uncore_type *type; struct zhaoxin_uncore_box **boxes; struct zhaoxin_uncore_box *box; struct zhaoxin_uncore_pmu *pmu; - int subnode_id = 0; - int i = 0; - int loop = 1; + int i, j; + int max; + struct pci_dev *pdev = NULL; - boxes = pci_get_drvdata(pdev); + for (types = uncore_pci_uncores; *types; types++) { + type = *types; + pmu = type->pmus; - if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { - if (!strcmp(boxes[0]->pmu->type->name, "mc0")) - loop = 2; - else - loop = 1; - } else if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KX7000) { - if (!strcmp(boxes[0]->pmu->type->name, "mc_a0")) - loop = 4; - else - loop = 1; - } + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(type->name, "llc")) + max = max_clusters; + else + max = max_subnodes; + } else { + max = max_packages; + } - for (i = 0; i < loop; i++) { - box = boxes[i]; - pmu = box->pmu; - if (WARN_ON_ONCE(subnode_id != box->subnode_id)) - return; - - pci_set_drvdata(pdev, NULL); - pmu->boxes[subnode_id] = NULL; - if (atomic_dec_return(&pmu->activeboxes) == 0) - uncore_pmu_unregister(pmu); + for (i = 0; i < type->num_boxes; i++, pmu++) { + if (atomic_dec_return(&pmu->activeboxes) == 0) + uncore_pmu_unregister(pmu); - uncore_box_exit(box); - kfree(box); - } + for (j = 0; j < max; j++) { + box = pmu->boxes[j]; + /* check if device exist */ + if (!box) + continue; - kfree(boxes); + pdev = box->pci_dev; + + uncore_box_exit(box); + kfree(box); + + /* + * MC use one PCI device for mc0/mc1 mc_a0/mc_a1/mc_b0/mc_b1 + * So just put and free once: only put mc0 and mc_a0. + */ + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) + if (strncmp(type->name, "mc", 2) == 0 && + strcmp(type->name, "mc0") != 0) + break; + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KX7000) + if (strncmp(type->name, "mc", 2) == 0 && + strcmp(type->name, "mc_a0") != 0) + break; + + boxes = pci_get_drvdata(pdev); + kfree(boxes); + pci_set_drvdata(pdev, NULL); + pci_dev_put(pdev); + } + } + } } static int __init uncore_pci_init(void) { int ret; + const struct pci_device_id *id = uncore_pci_ids; + struct pci_dev *pdev = NULL; ret = uncore_types_init(uncore_pci_uncores, false); if (ret) - goto errtype; + goto err; - uncore_pci_driver->probe = uncore_pci_probe; - uncore_pci_driver->remove = uncore_pci_remove; + while (id && id->vendor) { + pdev = pci_get_device(id->vendor, id->device, NULL); + while (pdev) { + ret = uncore_pci_type_init(pdev, id); + if (ret) { + pci_dev_put(pdev); + goto err; + } - ret = pci_register_driver(uncore_pci_driver); - if (ret) - goto errtype; + pdev = pci_get_device(id->vendor, id->device, pdev); + } + + id++; + } pcidrv_registered = true; return 0; -errtype: +err: + uncore_pci_type_exit(); uncore_types_exit(uncore_pci_uncores); - uncore_free_pcibus_map(); uncore_pci_uncores = empty_uncore; return ret; } @@ -2219,9 +2240,8 @@ static void uncore_pci_exit(void) { if (pcidrv_registered) { pcidrv_registered = false; - pci_unregister_driver(uncore_pci_driver); + uncore_pci_type_exit(); uncore_types_exit(uncore_pci_uncores); - uncore_free_pcibus_map(); } } @@ -2255,7 +2275,6 @@ static void uncore_change_type_ctx(struct zhaoxin_uncore_type *type, int old_cpu } if (old_cpu < 0) { - WARN_ON_ONCE(box->cpu != -1); box->cpu = new_cpu; continue; @@ -2662,9 +2681,9 @@ static int __init uncore_mmio_init(void) } struct zhaoxin_uncore_init_fun { - void (*cpu_init)(void); - int (*pci_init)(void); - void (*mmio_init)(void); + void (*cpu_init)(void); + int (*pci_init)(void); + void (*mmio_init)(void); }; void kx5000_uncore_cpu_init(void) @@ -2683,12 +2702,11 @@ void kh40000_uncore_cpu_init(void) int kh40000_uncore_pci_init(void) { - int ret = kh40000_pci2node_map_init();/*pci_bus to package mapping, do nothing*/ - - if (ret) - return ret; + /* Register pci driver will conflict with other PCI device use pci_get_device instead */ uncore_pci_uncores = kh40000_pci_uncores; - uncore_pci_driver = &kh40000_uncore_pci_driver; + + uncore_pci_ids = kh40000_uncore_pci_ids; + return 0; } @@ -2714,8 +2732,10 @@ void kx7000_uncore_cpu_init(void) int kx7000_uncore_pci_init(void) { + /* Register pci driver will conflict with other PCI device use pci_get_device instead */ uncore_pci_uncores = kx7000_pci_uncores; - uncore_pci_driver = &kx7000_uncore_pci_driver; + + uncore_pci_ids = kx7000_uncore_pci_ids; return 0; } @@ -2744,12 +2764,34 @@ static const struct x86_cpu_id zhaoxin_uncore_match[] __initconst = { }; MODULE_DEVICE_TABLE(x86cpu, zhaoxin_uncore_match); +/* + * Process kernel command-line parameter at boot time. + * zhaoxin_pmc_uncore={0|off} or zhaoxin_pmc_uncore={1|on} + */ +static int __init zhaoxin_uncore_enable(char *str) +{ + if (!strcasecmp(str, "off") || !strcmp(str, "0")) + uncore_enabled = 0; + else if (!strcasecmp(str, "on") || !strcmp(str, "1")) + uncore_enabled = 1; + else + pr_err("zhaoxin_pmc_uncore: invalid parameter value (%s)\n", str); + + pr_info("Zhaoxin PMC uncore %s\n", uncore_enabled ? "enabled" : "disabled"); + + return 1; +} +__setup("zhaoxin_pmc_uncore=", zhaoxin_uncore_enable); + static int __init zhaoxin_uncore_init(void) { const struct x86_cpu_id *id = NULL; struct zhaoxin_uncore_init_fun *uncore_init; int pret = 0, cret = 0, mret = 0, ret; + if (!uncore_enabled) + return 0; + id = x86_match_cpu(zhaoxin_uncore_match); if (!id) return -ENODEV; @@ -2788,10 +2830,8 @@ static int __init zhaoxin_uncore_init(void) if (cret && pret && mret) return -ENODEV; - ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE, - "perf/x86/zhaoxin/uncore:online", - uncore_event_cpu_online, - uncore_event_cpu_offline); + ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE, "perf/x86/zhaoxin/uncore:online", + uncore_event_cpu_online, uncore_event_cpu_offline); if (ret) goto err; pr_info("uncore init success!\n"); diff --git a/arch/x86/events/zhaoxin/uncore.h b/arch/x86/events/zhaoxin/uncore.h index 43ea06364175..fb4a48154b29 100644 --- a/arch/x86/events/zhaoxin/uncore.h +++ b/arch/x86/events/zhaoxin/uncore.h @@ -7,24 +7,24 @@ #include #include "../perf_event.h" -#define ZHAOXIN_FAM7_KX5000 0x1b -#define ZHAOXIN_FAM7_KX6000 0x3b -#define ZHAOXIN_FAM7_KH40000 0x5b -#define ZHAOXIN_FAM7_KX7000 0x6b +#define ZHAOXIN_FAM7_KX5000 0x1b +#define ZHAOXIN_FAM7_KX6000 0x3b +#define ZHAOXIN_FAM7_KH40000 0x5b +#define ZHAOXIN_FAM7_KX7000 0x6b -#define UNCORE_PMU_NAME_LEN 32 -#define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC) +#define UNCORE_PMU_NAME_LEN 32 +#define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC) -#define UNCORE_FIXED_EVENT 0xff -#define UNCORE_PMC_IDX_MAX_GENERIC 4 -#define UNCORE_PMC_IDX_MAX_FIXED 1 -#define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC +#define UNCORE_FIXED_EVENT 0xff +#define UNCORE_PMC_IDX_MAX_GENERIC 4 +#define UNCORE_PMC_IDX_MAX_FIXED 1 +#define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC -#define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1) +#define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1) -#define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx) -#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) -#define UNCORE_PCI_DEV_IDX(data) (data & 0xff) +#define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx) +#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) +#define UNCORE_PCI_DEV_IDX(data) (data & 0xff) struct zhaoxin_uncore_ops; struct zhaoxin_uncore_pmu; @@ -98,12 +98,12 @@ struct zhaoxin_uncore_extra_reg { struct zhaoxin_uncore_box { int pci_phys_id; - int package_id; /*Package ID */ + int package_id; /*Package ID */ int cluster_id; int subnode_id; - int n_active; /* number of active events */ + int n_active; /* number of active events */ int n_events; - int cpu; /* cpu to collect events */ + int cpu; /* cpu to collect events */ unsigned long flags; atomic_t refcnt; struct perf_event *events[UNCORE_PMC_IDX_MAX]; @@ -135,22 +135,20 @@ struct hw_info { ssize_t zx_uncore_event_show(struct device *dev, struct device_attribute *attr, char *buf); -#define ZHAOXIN_UNCORE_EVENT_DESC(_name, _config) \ -{ \ - .attr = __ATTR(_name, 0444, zx_uncore_event_show, NULL), \ - .config = _config, \ -} +#define ZHAOXIN_UNCORE_EVENT_DESC(_name, _config) \ + { \ + .attr = __ATTR(_name, 0444, zx_uncore_event_show, NULL), .config = _config, \ + } -#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \ -static ssize_t __uncore_##_var##_show(struct device *dev, \ - struct device_attribute *attr, \ - char *page) \ -{ \ - BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ - return sprintf(page, _format "\n"); \ -} \ -static struct device_attribute format_attr_##_var = \ - __ATTR(_name, 0444, __uncore_##_var##_show, NULL) +#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \ + static ssize_t __uncore_##_var##_show(struct device *dev, struct device_attribute *attr, \ + char *page) \ + { \ + BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ + return sprintf(page, _format "\n"); \ + } \ + static struct device_attribute format_attr_##_var = \ + __ATTR(_name, 0444, __uncore_##_var##_show, NULL) static inline bool uncore_pmc_fixed(int idx) { @@ -159,8 +157,7 @@ static inline bool uncore_pmc_fixed(int idx) static inline unsigned int uncore_mmio_box_ctl(struct zhaoxin_uncore_box *box) { - return box->pmu->type->box_ctl + - box->pmu->type->mmio_offset * box->pmu->pmu_idx; + return box->pmu->type->box_ctl + box->pmu->type->mmio_offset * box->pmu->pmu_idx; } static inline unsigned int uncore_pci_box_ctl(struct zhaoxin_uncore_box *box) @@ -195,9 +192,8 @@ static inline unsigned int uncore_msr_box_offset(struct zhaoxin_uncore_box *box) { struct zhaoxin_uncore_pmu *pmu = box->pmu; - return pmu->type->msr_offsets ? - pmu->type->msr_offsets[pmu->pmu_idx] : - pmu->type->msr_offset * pmu->pmu_idx; + return pmu->type->msr_offsets ? pmu->type->msr_offsets[pmu->pmu_idx] : + pmu->type->msr_offset * pmu->pmu_idx; } static inline unsigned int uncore_msr_box_ctl(struct zhaoxin_uncore_box *box) @@ -221,16 +217,14 @@ static inline unsigned int uncore_msr_fixed_ctr(struct zhaoxin_uncore_box *box) static inline unsigned int uncore_msr_event_ctl(struct zhaoxin_uncore_box *box, int idx) { - return box->pmu->type->event_ctl + - (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + - uncore_msr_box_offset(box); + return box->pmu->type->event_ctl + (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + + uncore_msr_box_offset(box); } static inline unsigned int uncore_msr_perf_ctr(struct zhaoxin_uncore_box *box, int idx) { - return box->pmu->type->perf_ctr + - (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + - uncore_msr_box_offset(box); + return box->pmu->type->perf_ctr + (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + + uncore_msr_box_offset(box); } static inline unsigned int uncore_fixed_ctl(struct zhaoxin_uncore_box *box) -- Gitee From 41456b77faad77bddcddd430c54452fe1fee5132 Mon Sep 17 00:00:00 2001 From: LeoLiu-oc Date: Fri, 6 Mar 2026 17:42:10 +0800 Subject: [PATCH 069/231] Revert the HOST and KVM ZXPAUSE patches zhaoxin inclusion category: feature ------------------- The original zxpause instruction has been updated with the official name pauseopt. Moreover, the original patches have been optimized and updated. Therefore, the original patches need to be withdrawn so that they can be replaced with the new ones later. Signed-off-by: LeoLiu-oc --- arch/x86/include/asm/cpufeature.h | 7 +- arch/x86/include/asm/cpufeatures.h | 5 +- arch/x86/include/asm/delay.h | 1 - arch/x86/include/asm/disabled-features.h | 3 +- arch/x86/include/asm/msr-index.h | 18 -- arch/x86/include/asm/mwait.h | 21 -- arch/x86/include/asm/required-features.h | 3 +- arch/x86/include/asm/vmx.h | 8 - arch/x86/include/asm/vmxfeatures.h | 10 +- arch/x86/kernel/cpu/Makefile | 1 - arch/x86/kernel/cpu/centaur.c | 3 - arch/x86/kernel/cpu/feat_ctl.c | 8 - arch/x86/kernel/cpu/zhaoxin.c | 3 - arch/x86/kernel/cpu/zxpause.c | 238 ----------------------- arch/x86/kernel/time.c | 3 - arch/x86/kvm/cpuid.c | 13 +- arch/x86/kvm/reverse_cpuid.h | 1 - arch/x86/kvm/vmx/capabilities.h | 7 - arch/x86/kvm/vmx/vmcs.h | 2 - arch/x86/kvm/vmx/vmx.c | 66 +------ arch/x86/kvm/vmx/vmx.h | 19 -- arch/x86/kvm/x86.c | 5 - arch/x86/lib/delay.c | 27 --- tools/arch/x86/include/asm/cpufeatures.h | 5 +- tools/arch/x86/include/asm/msr-index.h | 13 -- 25 files changed, 12 insertions(+), 478 deletions(-) delete mode 100644 arch/x86/kernel/cpu/zxpause.c diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 0193a9d8c2be..e7afc9c36bb2 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -49,7 +49,6 @@ enum cpuid_leafs * CPUID_C000_0006_EAX must keep at the 30th position (count from 0)! * NR_CPUID_WORDS can not bigger than 31 (the cpuid_leafs only 32 bits!). */ - CPUID_C000_0006_EAX, /* 30 */ NR_CPUID_WORDS, }; @@ -118,9 +117,8 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 27, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 28, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 29, feature_bit) || \ - CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 30, feature_bit) || \ REQUIRED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 31)) + BUILD_BUG_ON_ZERO(NCAPINTS != 30)) #define DISABLED_MASK_BIT_SET(feature_bit) \ ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \ @@ -153,9 +151,8 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 27, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 28, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 29, feature_bit) || \ - CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 30, feature_bit) || \ DISABLED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 31)) + BUILD_BUG_ON_ZERO(NCAPINTS != 30)) #define cpu_has(c, bit) \ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index c06a62851611..c67d87eaac22 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -13,7 +13,7 @@ /* * Defines x86 CPU feature bits */ -#define NCAPINTS 31 /* N 32-bit words worth of info */ +#define NCAPINTS 30 /* N 32-bit words worth of info */ #define NBUGINTS 4 /* N 32-bit bug flags */ /* @@ -494,9 +494,6 @@ #define X86_FEATURE_HYGON_SM3 (29*32 + 1) /* "sm3" SM3 instructions */ #define X86_FEATURE_HYGON_SM4 (29*32 + 2) /* "sm4" SM4 instructions */ -/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000006, word 21 */ -#define X86_FEATURE_ZXPAUSE (30*32 + 0) /* ZHAOXIN ZXPAUSE */ - /* * Extended auxiliary flags: Linux defined - for features scattered in various diff --git a/arch/x86/include/asm/delay.h b/arch/x86/include/asm/delay.h index 4dbb3fea67fb..630891d25819 100644 --- a/arch/x86/include/asm/delay.h +++ b/arch/x86/include/asm/delay.h @@ -7,7 +7,6 @@ void __init use_tsc_delay(void); void __init use_tpause_delay(void); -void __init use_zxpause_delay(void); void use_mwaitx_delay(void); #endif /* _ASM_X86_DELAY_H */ diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index c7f91989a16f..5082faf136be 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -158,7 +158,6 @@ #define DISABLED_MASK27 0 #define DISABLED_MASK28 0 #define DISABLED_MASK29 0 -#define DISABLED_MASK30 0 -#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 31) +#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 30) #endif /* _ASM_X86_DISABLED_FEATURES_H */ diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 334a284615ef..11ab6780d160 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -75,23 +75,12 @@ #define MSR_IA32_UMWAIT_CONTROL 0xe1 #define MSR_IA32_UMWAIT_CONTROL_C02_DISABLE BIT(0) #define MSR_IA32_UMWAIT_CONTROL_RESERVED BIT(1) - -#define MSR_ZX_PAUSE_CONTROL 0x187f -#define MSR_ZX_PAUSE_CONTROL_C02_DISABLE BIT(0) -#define MSR_ZX_PAUSE_CONTROL_RESERVED BIT(1) - /* * The time field is bit[31:2], but representing a 32bit value with * bit[1:0] zero. */ #define MSR_IA32_UMWAIT_CONTROL_TIME_MASK (~0x03U) -/* - * The time field is bit[31:2], but representing a 32bit value with - * bit[1:0] zero. - */ -#define MSR_ZX_PAUSE_CONTROL_TIME_MASK (~0x03U) - /* Abbreviated from Intel SDM name IA32_CORE_CAPABILITIES */ #define MSR_IA32_CORE_CAPS 0x000000cf #define MSR_IA32_CORE_CAPS_INTEGRITY_CAPS_BIT 2 @@ -836,13 +825,6 @@ #define MSR_VIA_RNG 0x0000110b #define MSR_VIA_BCR2 0x00001147 -/* - * Zhaoxin extend VMCS capabilities: - * bit 0: exec-cntl3 VMCS field. - */ -#define MSR_ZX_EXT_VMCS_CAPS 0x1675 -#define MSR_ZX_VMCS_EXEC_CTL3 BIT(0) - /* Transmeta defined MSRs */ #define MSR_TMTA_LONGRUN_CTRL 0x80868010 #define MSR_TMTA_LONGRUN_FLAGS 0x80868011 diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index bb293f9f9a2c..ae7a83e3f743 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h @@ -26,8 +26,6 @@ #define TPAUSE_C01_STATE 1 #define TPAUSE_C02_STATE 0 -#define ZXPAUSE_C01_STATE 1 - static __always_inline void __monitor(const void *eax, unsigned long ecx, unsigned long edx) { @@ -153,23 +151,4 @@ static inline void __tpause(u32 ecx, u32 edx, u32 eax) #endif } -/* - * Caller can specify whether to enter C0.1 (low latency, less - * power saving) or C0.2 state (saves more power, but longer wakeup - * latency). This may be overridden by the ZX_PAUSE_CONTROL MSR - * which can force requests for C0.2 to be downgraded to C0.1. - */ -static inline void __zxpause(u32 ecx, u32 edx, u32 eax) -{ - /* "zxpause %ecx, %edx, %eax;" */ - #ifdef CONFIG_AS_ZXPAUSE - asm volatile("zxpause %%ecx\n" - : - : "c"(ecx), "d"(edx), "a"(eax)); - #else - asm volatile(".byte 0xf2, 0x0f, 0xa6, 0xd0\t\n" - : - : "c"(ecx), "d"(edx), "a"(eax)); - #endif -} #endif /* _ASM_X86_MWAIT_H */ diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h index 9abee54484e6..5d2017c24101 100644 --- a/arch/x86/include/asm/required-features.h +++ b/arch/x86/include/asm/required-features.h @@ -108,7 +108,6 @@ #define REQUIRED_MASK27 0 #define REQUIRED_MASK28 0 #define REQUIRED_MASK29 0 -#define REQUIRED_MASK30 0 -#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 31) +#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 30) #endif /* _ASM_X86_REQUIRED_FEATURES_H */ diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 32dc7414b83b..0e73616b82f3 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -84,12 +84,6 @@ */ #define TERTIARY_EXEC_IPI_VIRT VMCS_CONTROL_BIT(IPI_VIRT) -/* - * Definitions of Zhaoxin Tertiary Processor-Based VM-Execution Controls. - */ -#define ZX_TERTIARY_EXEC_GUEST_ZXPAUSE VMCS_CONTROL_BIT(GUEST_ZXPAUSE) - - #define PIN_BASED_EXT_INTR_MASK VMCS_CONTROL_BIT(INTR_EXITING) #define PIN_BASED_NMI_EXITING VMCS_CONTROL_BIT(NMI_EXITING) #define PIN_BASED_VIRTUAL_NMIS VMCS_CONTROL_BIT(VIRTUAL_NMIS) @@ -241,7 +235,6 @@ enum vmcs_field { TERTIARY_VM_EXEC_CONTROL_HIGH = 0x00002035, PID_POINTER_TABLE = 0x00002042, PID_POINTER_TABLE_HIGH = 0x00002043, - ZXPAUSE_VMEXIT_TSC = 0x00002200, GUEST_PHYSICAL_ADDRESS = 0x00002400, GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401, VMCS_LINK_POINTER = 0x00002800, @@ -291,7 +284,6 @@ enum vmcs_field { PLE_GAP = 0x00004020, PLE_WINDOW = 0x00004022, NOTIFY_WINDOW = 0x00004024, - ZX_TERTIARY_VM_EXEC_CONTROL = 0x00004200, VM_INSTRUCTION_ERROR = 0x00004400, VM_EXIT_REASON = 0x00004402, VM_EXIT_INTR_INFO = 0x00004404, diff --git a/arch/x86/include/asm/vmxfeatures.h b/arch/x86/include/asm/vmxfeatures.h index ff4b573dd191..c6a7eed03914 100644 --- a/arch/x86/include/asm/vmxfeatures.h +++ b/arch/x86/include/asm/vmxfeatures.h @@ -5,7 +5,7 @@ /* * Defines VMX CPU feature bits */ -#define NVMXINTS 6 /* N 32-bit words worth of info */ +#define NVMXINTS 5 /* N 32-bit words worth of info */ /* * Note: If the comment begins with a quoted string, that string is used @@ -87,10 +87,6 @@ #define VMX_FEATURE_BUS_LOCK_DETECTION ( 2*32+ 30) /* "" VM-Exit when bus lock caused */ #define VMX_FEATURE_NOTIFY_VM_EXITING ( 2*32+ 31) /* VM-Exit when no event windows after notify window */ -/* Zhaoxin Tertiary Processor-Based VM-Execution Controls, word 3 */ -#define VMX_FEATURE_GUEST_ZXPAUSE (3*32 + 0) /* zxpause instruction in guest mode */ - -/* Tertiary Processor-Based VM-Execution Controls, word 4 */ -#define VMX_FEATURE_IPI_VIRT (4*32 + 4) /* Enable IPI virtualization */ - +/* Tertiary Processor-Based VM-Execution Controls, word 3 */ +#define VMX_FEATURE_IPI_VIRT ( 3*32+ 4) /* Enable IPI virtualization */ #endif /* _ASM_X86_VMXFEATURES_H */ diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 09bb2e72b7a3..eb4dbcdf41f1 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -26,7 +26,6 @@ obj-y += bugs.o obj-y += aperfmperf.o obj-y += cpuid-deps.o obj-y += umwait.o -obj-y += zxpause.o obj-y += capflags.o powerflags.o obj-$(CONFIG_X86_LOCAL_APIC) += topology.o diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 09e3d7f57204..5bd0fab944db 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -116,9 +116,6 @@ static void early_init_centaur(struct cpuinfo_x86 *c) */ if (c->x86 == 0x6 || (c->x86 == 0x7 && c->x86_model <= 0x3b)) set_cpu_cap(c, X86_FEATURE_CRC32C_LOW_PERF); - - if (cpuid_eax(0xC0000000) >= 0xC0000006) - c->x86_capability[CPUID_C000_0006_EAX] = cpuid_eax(0xC0000006); } static void init_centaur(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/feat_ctl.c b/arch/x86/kernel/cpu/feat_ctl.c index f16f19b06527..03851240c3e3 100644 --- a/arch/x86/kernel/cpu/feat_ctl.c +++ b/arch/x86/kernel/cpu/feat_ctl.c @@ -15,7 +15,6 @@ enum vmx_feature_leafs { MISC_FEATURES = 0, PRIMARY_CTLS, SECONDARY_CTLS, - ZX_TERTIARY_CTLS, TERTIARY_CTLS_LOW, TERTIARY_CTLS_HIGH, NR_VMX_FEATURE_WORDS, @@ -98,13 +97,6 @@ static void init_vmx_capabilities(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_EPT_AD); if (c->vmx_capability[MISC_FEATURES] & VMX_F(VPID)) set_cpu_cap(c, X86_FEATURE_VPID); - /* - * Initialize Zhaoxin Tertiary Exec Control feature flags. - */ - rdmsr_safe(MSR_ZX_EXT_VMCS_CAPS, &supported, &ign); - if (supported & MSR_ZX_VMCS_EXEC_CTL3) - c->vmx_capability[ZX_TERTIARY_CTLS] |= VMX_F(GUEST_ZXPAUSE); - } #endif /* CONFIG_X86_VMX_FEATURE_NAMES */ diff --git a/arch/x86/kernel/cpu/zhaoxin.c b/arch/x86/kernel/cpu/zhaoxin.c index 3180e2026c40..6efe3c0aafda 100644 --- a/arch/x86/kernel/cpu/zhaoxin.c +++ b/arch/x86/kernel/cpu/zhaoxin.c @@ -72,9 +72,6 @@ static void early_init_zhaoxin(struct cpuinfo_x86 *c) */ if (c->x86 == 0x6 || (c->x86 == 0x7 && c->x86_model <= 0x3b)) set_cpu_cap(c, X86_FEATURE_CRC32C_LOW_PERF); - - if (cpuid_eax(0xC0000000) >= 0xC0000006) - c->x86_capability[CPUID_C000_0006_EAX] = cpuid_eax(0xC0000006); } static void init_zhaoxin(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/zxpause.c b/arch/x86/kernel/cpu/zxpause.c deleted file mode 100644 index 7f55f5d9e8c0..000000000000 --- a/arch/x86/kernel/cpu/zxpause.c +++ /dev/null @@ -1,238 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include -#include -#include - -#include -#include - -#define ZXPAUSE_C02_ENABLE 0 - -#define ZXPAUSE_CTRL_VAL(max_time, c02_disable) \ - (((max_time) & MSR_ZX_PAUSE_CONTROL_TIME_MASK) | \ - ((c02_disable) & MSR_ZX_PAUSE_CONTROL_C02_DISABLE)) - -/* - * Cache ZX_PAUSE_CONTROL MSR. This is a systemwide control. By default, - * zxpause max time is 100000 in TSC-quanta and C0.2 is enabled - */ -static u32 zxpause_control_cached = ZXPAUSE_CTRL_VAL(100000, ZXPAUSE_C02_ENABLE); - -/* - * Cache the original ZX_PAUSE_CONTROL MSR value which is configured by - * hardware or BIOS before kernel boot. - */ -static u32 orig_zxpause_control_cached __ro_after_init; - -/* - * Serialize access to zxpause_control_cached and ZX_PAUSE_CONTROL MSR in - * the sysfs write functions. - */ -static DEFINE_MUTEX(zxpause_lock); - -static void zxpause_update_control_msr(void *unused) -{ - lockdep_assert_irqs_disabled(); - wrmsr(MSR_ZX_PAUSE_CONTROL, READ_ONCE(zxpause_control_cached), 0); -} - -/* - * The CPU hotplug callback sets the control MSR to the global control - * value. - * - * Disable interrupts so the read of zxpause_control_cached and the WRMSR - * are protected against a concurrent sysfs write. Otherwise the sysfs - * write could update the cached value after it had been read on this CPU - * and issue the IPI before the old value had been written. The IPI would - * interrupt, write the new value and after return from IPI the previous - * value would be written by this CPU. - * - * With interrupts disabled the upcoming CPU either sees the new control - * value or the IPI is updating this CPU to the new control value after - * interrupts have been reenabled. - */ -static int zxpause_cpu_online(unsigned int cpu) -{ - local_irq_disable(); - zxpause_update_control_msr(NULL); - local_irq_enable(); - return 0; -} - -/* - * The CPU hotplug callback sets the control MSR to the original control - * value. - */ -static int zxpause_cpu_offline(unsigned int cpu) -{ - /* - * This code is protected by the CPU hotplug already and - * orig_zxpause_control_cached is never changed after it caches - * the original control MSR value in zxpause_init(). So there - * is no race condition here. - */ - wrmsr(MSR_ZX_PAUSE_CONTROL, orig_zxpause_control_cached, 0); - - return 0; -} - -/* - * On resume, restore ZX_PAUSE_CONTROL MSR on the boot processor which - * is the only active CPU at this time. The MSR is set up on the APs via the - * CPU hotplug callback. - * - * This function is invoked on resume from suspend and hibernation. On - * resume from suspend the restore should be not required, but we neither - * trust the firmware nor does it matter if the same value is written - * again. - */ -static void zxpause_syscore_resume(void) -{ - zxpause_update_control_msr(NULL); -} - -static struct syscore_ops zxpause_syscore_ops = { - .resume = zxpause_syscore_resume, -}; - -/* sysfs interface */ - -/* - * When bit 0 in ZX_PAUSE_CONTROL MSR is 1, C0.2 is disabled. - * Otherwise, C0.2 is enabled. - */ -static inline bool zxpause_ctrl_c02_enabled(u32 ctrl) -{ - return !(ctrl & MSR_ZX_PAUSE_CONTROL_C02_DISABLE); -} - -static inline u32 zxpause_ctrl_max_time(u32 ctrl) -{ - return ctrl & MSR_ZX_PAUSE_CONTROL_TIME_MASK; -} - -static inline void zxpause_update_control(u32 maxtime, bool c02_enable) -{ - u32 ctrl = maxtime & MSR_ZX_PAUSE_CONTROL_TIME_MASK; - - if (!c02_enable) - ctrl |= MSR_ZX_PAUSE_CONTROL_C02_DISABLE; - - WRITE_ONCE(zxpause_control_cached, ctrl); - /* Propagate to all CPUs */ - on_each_cpu(zxpause_update_control_msr, NULL, 1); -} - -static ssize_t -enable_c02_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - u32 ctrl = READ_ONCE(zxpause_control_cached); - - return sprintf(buf, "%d\n", zxpause_ctrl_c02_enabled(ctrl)); -} - -static ssize_t enable_c02_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - bool c02_enable; - u32 ctrl; - int ret; - - ret = kstrtobool(buf, &c02_enable); - if (ret) - return ret; - - mutex_lock(&zxpause_lock); - - ctrl = READ_ONCE(zxpause_control_cached); - if (c02_enable != zxpause_ctrl_c02_enabled(ctrl)) - zxpause_update_control(ctrl, c02_enable); - - mutex_unlock(&zxpause_lock); - - return count; -} -static DEVICE_ATTR_RW(enable_c02); - -static ssize_t -max_time_show(struct device *kobj, struct device_attribute *attr, char *buf) -{ - u32 ctrl = READ_ONCE(zxpause_control_cached); - - return sprintf(buf, "%u\n", zxpause_ctrl_max_time(ctrl)); -} - -static ssize_t max_time_store(struct device *kobj, - struct device_attribute *attr, - const char *buf, size_t count) -{ - u32 max_time, ctrl; - int ret; - - ret = kstrtou32(buf, 0, &max_time); - if (ret) - return ret; - - /* bits[1:0] must be zero */ - if (max_time & ~MSR_ZX_PAUSE_CONTROL_TIME_MASK) - return -EINVAL; - - mutex_lock(&zxpause_lock); - - ctrl = READ_ONCE(zxpause_control_cached); - if (max_time != zxpause_ctrl_max_time(ctrl)) - zxpause_update_control(max_time, zxpause_ctrl_c02_enabled(ctrl)); - - mutex_unlock(&zxpause_lock); - - return count; -} -static DEVICE_ATTR_RW(max_time); - -static struct attribute *zxpause_attrs[] = { - &dev_attr_enable_c02.attr, - &dev_attr_max_time.attr, - NULL -}; - -static struct attribute_group zxpause_attr_group = { - .attrs = zxpause_attrs, - .name = "zxpause_control", -}; - -static int __init zxpause_init(void) -{ - struct device *dev; - int ret; - - if (!boot_cpu_has(X86_FEATURE_ZXPAUSE)) - return -ENODEV; - - /* - * Cache the original control MSR value before the control MSR is - * changed. This is the only place where orig_zxpause_control_cached - * is modified. - */ - rdmsrl(MSR_ZX_PAUSE_CONTROL, orig_zxpause_control_cached); - - ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "zxpause:online", - zxpause_cpu_online, zxpause_cpu_offline); - if (ret < 0) { - /* - * On failure, the control MSR on all CPUs has the - * original control value. - */ - return ret; - } - - register_syscore_ops(&zxpause_syscore_ops); - - /* - * Add zxpause control interface. Ignore failure, so at least the - * default values are set up in case the machine manages to boot. - */ - dev = bus_get_dev_root(&cpu_subsys); - return sysfs_create_group(&dev->kobj, &zxpause_attr_group); -} -device_initcall(zxpause_init); diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c index 6a6c8bd7843c..52e1f3f0b361 100644 --- a/arch/x86/kernel/time.c +++ b/arch/x86/kernel/time.c @@ -84,9 +84,6 @@ static __init void x86_late_time_init(void) if (static_cpu_has(X86_FEATURE_WAITPKG)) use_tpause_delay(); - - if (static_cpu_has(X86_FEATURE_ZXPAUSE)) - use_zxpause_delay(); } /* diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 697cdc1c57ad..c9f27a4df96f 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -853,10 +853,6 @@ void kvm_set_cpu_caps(void) F(PMM) | F(PMM_EN) ); - /* Zhaoxin 0xC0000006 leaf */ - kvm_cpu_cap_mask(CPUID_C000_0006_EAX, 0 /* bit0: zxpause */ | 0 /* bit1 HMAC */); - - /* * Hide RDTSCP and RDPID if either feature is reported as supported but * probing MSR_TSC_AUX failed. This is purely a sanity check and @@ -1416,22 +1412,17 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) } /*Add support for Centaur's CPUID instruction*/ case 0xC0000000: - /* Extended to 0xC0000006 */ - entry->eax = min(entry->eax, 0xC0000006); + /*Just support up to 0xC0000004 now*/ + entry->eax = min(entry->eax, 0xC0000004); break; case 0xC0000001: cpuid_entry_override(entry, CPUID_C000_0001_EDX); break; - case 0xC0000006: - cpuid_entry_override(entry, CPUID_C000_0006_EAX); - break; - case 3: /* Processor serial number */ case 5: /* MONITOR/MWAIT */ case 0xC0000002: case 0xC0000003: case 0xC0000004: - case 0xC0000005: default: entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h index 867d8ee96c50..74ef96cb2752 100644 --- a/arch/x86/kvm/reverse_cpuid.h +++ b/arch/x86/kvm/reverse_cpuid.h @@ -104,7 +104,6 @@ static const struct cpuid_reg reverse_cpuid[] = { [CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX}, [CPUID_7_2_EDX] = { 7, 2, CPUID_EDX}, [CPUID_24_0_EBX] = { 0x24, 0, CPUID_EBX}, - [CPUID_C000_0006_EAX] = {0xc0000006, 0, CPUID_EAX}, [CPUID_8000_0021_ECX] = {0x80000021, 0, CPUID_ECX}, }; diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h index 631e65a21228..41a4533f9989 100644 --- a/arch/x86/kvm/vmx/capabilities.h +++ b/arch/x86/kvm/vmx/capabilities.h @@ -60,7 +60,6 @@ struct vmcs_config { u32 pin_based_exec_ctrl; u32 cpu_based_exec_ctrl; u32 cpu_based_2nd_exec_ctrl; - u32 zx_cpu_based_3rd_exec_ctrl; u64 cpu_based_3rd_exec_ctrl; u32 vmexit_ctrl; u32 vmentry_ctrl; @@ -256,12 +255,6 @@ static inline bool cpu_has_vmx_xsaves(void) SECONDARY_EXEC_ENABLE_XSAVES; } -static inline bool cpu_has_vmx_zxpause(void) -{ - return vmcs_config.zx_cpu_based_3rd_exec_ctrl & - ZX_TERTIARY_EXEC_GUEST_ZXPAUSE; -} - static inline bool cpu_has_vmx_waitpkg(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & diff --git a/arch/x86/kvm/vmx/vmcs.h b/arch/x86/kvm/vmx/vmcs.h index 4eabed8e5813..7c1996b433e2 100644 --- a/arch/x86/kvm/vmx/vmcs.h +++ b/arch/x86/kvm/vmx/vmcs.h @@ -50,9 +50,7 @@ struct vmcs_controls_shadow { u32 pin; u32 exec; u32 secondary_exec; - u32 zx_tertiary_exec; u64 tertiary_exec; - u64 zx_vmexit_tsc; }; /* diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 8725a059c4c6..247bdad01cda 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -220,7 +220,6 @@ int __read_mostly pt_mode = PT_MODE_SYSTEM; module_param(pt_mode, int, S_IRUGO); #endif -static u32 zx_ext_vmcs_cap; struct x86_pmu_lbr __ro_after_init vmx_lbr_caps; static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush); @@ -2021,11 +2020,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_UMWAIT_CONTROL: if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx)) return 1; - msr_info->data = vmx->msr_ia32_umwait_control; - break; - case MSR_ZX_PAUSE_CONTROL: - if (!msr_info->host_initiated && !vmx_guest_zxpause_enabled(vmx)) - return 1; + msr_info->data = vmx->msr_ia32_umwait_control; break; case MSR_IA32_SPEC_CTRL: @@ -2294,15 +2289,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) /* The reserved bit 1 and non-32 bit [63:32] should be zero */ if (data & (BIT_ULL(1) | GENMASK_ULL(63, 32))) return 1; - vmx->msr_ia32_umwait_control = data; - break; - case MSR_ZX_PAUSE_CONTROL: - if (!msr_info->host_initiated && !vmx_guest_zxpause_enabled(vmx)) - return 1; - /* The reserved bit 1 and non-32 bit [63:32] should be zero */ - if (data & (BIT_ULL(1) | GENMASK_ULL(63, 32))) - return 1; vmx->msr_ia32_umwait_control = data; break; case MSR_IA32_SPEC_CTRL: @@ -2758,10 +2745,6 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf, vmcs_conf->vmentry_ctrl = _vmentry_control; vmcs_conf->misc = misc_msr; - /* Setup Zhaoxin exec-cntl3 VMCS field. */ - if (zx_ext_vmcs_cap & MSR_ZX_VMCS_EXEC_CTL3) - vmcs_conf->zx_cpu_based_3rd_exec_ctrl |= ZX_TERTIARY_EXEC_GUEST_ZXPAUSE; - #if IS_ENABLED(CONFIG_HYPERV) if (enlightened_vmcs) evmcs_sanitize_exec_ctrls(vmcs_conf); @@ -4562,28 +4545,6 @@ static u64 vmx_tertiary_exec_control(struct vcpu_vmx *vmx) return exec_control; } -static u32 vmx_zx_tertiary_exec_control(struct vcpu_vmx *vmx) -{ - struct kvm_vcpu *vcpu = &vmx->vcpu; - u32 exec_control = vmcs_config.zx_cpu_based_3rd_exec_ctrl; - - /* - * Show errors if Qemu wants to enable guest_zxpause while - * vmx not support it. - */ - if (guest_cpuid_has(vcpu, X86_FEATURE_ZXPAUSE)) { - if (!cpu_has_vmx_zxpause()) - pr_err("VMX not support guest_zxpause!\n"); - else - exec_control |= ZX_TERTIARY_EXEC_GUEST_ZXPAUSE; - } else - exec_control &= ~ZX_TERTIARY_EXEC_GUEST_ZXPAUSE; - - /* enable other features here */ - - return exec_control; -} - /* * Adjust a single secondary execution control bit to intercept/allow an * instruction in the guest. This is usually done based on whether or not a @@ -4791,11 +4752,6 @@ static void init_vmcs(struct vcpu_vmx *vmx) if (cpu_has_secondary_exec_ctrls()) secondary_exec_controls_set(vmx, vmx_secondary_exec_control(vmx)); - if (zx_ext_vmcs_cap & MSR_ZX_VMCS_EXEC_CTL3) { - zx_tertiary_exec_controls_set(vmx, vmx_zx_tertiary_exec_control(vmx)); - zx_vmexit_tsc_controls_set(vmx, 0); - } - if (cpu_has_tertiary_exec_ctrls()) tertiary_exec_controls_set(vmx, vmx_tertiary_exec_control(vmx)); @@ -6357,13 +6313,6 @@ void dump_vmcs(struct kvm_vcpu *vcpu) else tertiary_exec_control = 0; - pr_err("*** Zhaoxin Specific Fields ***\n"); - if (zx_ext_vmcs_cap & MSR_ZX_VMCS_EXEC_CTL3) { - pr_err("Zhaoxin TertiaryExec Cntl = 0x%016x\n", - vmcs_read32(ZX_TERTIARY_VM_EXEC_CONTROL)); - pr_err("ZXPAUSE Saved TSC = 0x%016llx\n", vmcs_read64(ZXPAUSE_VMEXIT_TSC)); - } - pr_err("VMCS %p, last attempted VM-entry on CPU %d\n", vmx->loaded_vmcs->vmcs, vcpu->arch.last_vmentry_cpu); pr_err("*** Guest State ***\n"); @@ -7919,11 +7868,6 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) vmcs_set_secondary_exec_control(vmx, vmx_secondary_exec_control(vmx)); - if (zx_ext_vmcs_cap & MSR_ZX_VMCS_EXEC_CTL3) { - zx_tertiary_exec_controls_set(vmx, vmx_zx_tertiary_exec_control(vmx)); - zx_vmexit_tsc_controls_set(vmx, 0); - } - if (guest_can_use(vcpu, X86_FEATURE_VMX)) vmx->msr_ia32_feature_control_valid_bits |= FEAT_CTL_VMX_ENABLED_INSIDE_SMX | @@ -8082,10 +8026,6 @@ static __init void vmx_set_cpu_caps(void) if (cpu_has_vmx_waitpkg()) kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG); - - if (cpu_has_vmx_zxpause()) - kvm_cpu_cap_check_and_set(X86_FEATURE_ZXPAUSE); - } static int vmx_check_intercept_io(struct kvm_vcpu *vcpu, @@ -8652,10 +8592,6 @@ static __init int hardware_setup(void) unsigned long host_bndcfgs; struct desc_ptr dt; int r; - u32 ign; - - /* Caches Zhaoxin extend VMCS capabilities. */ - rdmsr_safe(MSR_ZX_EXT_VMCS_CAPS, &zx_ext_vmcs_cap, &ign); store_idt(&dt); host_idt_base = dt.address; diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 7c997b07c85d..30c86e88eb84 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -602,17 +602,6 @@ static inline u8 vmx_get_rvi(void) #define KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL \ (TERTIARY_EXEC_IPI_VIRT) -#define KVM_REQUIRED_VMX_ZX_TERTIARY_VM_EXEC_CONTROL 0 -#define KVM_OPTIONAL_VMX_ZX_TERTIARY_VM_EXEC_CONTROL \ - (ZX_TERTIARY_EXEC_GUEST_ZXPAUSE) - -/* - * We shouldn't rw zxpause_vmexit_tsc vmcs field in this - * way, try to use another way in the future. - */ -#define KVM_REQUIRED_VMX_ZXPAUSE_VMEXIT_TSC 0 -#define KVM_OPTIONAL_VMX_ZXPAUSE_VMEXIT_TSC 1 - #define BUILD_CONTROLS_SHADOW(lname, uname, bits) \ static inline void lname##_controls_set(struct vcpu_vmx *vmx, u##bits val) \ { \ @@ -645,8 +634,6 @@ BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL, 32) BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL, 32) BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL, 32) BUILD_CONTROLS_SHADOW(tertiary_exec, TERTIARY_VM_EXEC_CONTROL, 64) -BUILD_CONTROLS_SHADOW(zx_tertiary_exec, ZX_TERTIARY_VM_EXEC_CONTROL, 32) -BUILD_CONTROLS_SHADOW(zx_vmexit_tsc, ZXPAUSE_VMEXIT_TSC, 64) /* * VMX_REGS_LAZY_LOAD_SET - The set of registers that will be updated in the @@ -749,12 +736,6 @@ static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx) SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; } -static inline bool vmx_guest_zxpause_enabled(struct vcpu_vmx *vmx) -{ - return zx_tertiary_exec_controls_get(vmx) & - ZX_TERTIARY_EXEC_GUEST_ZXPAUSE; -} - static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu) { if (!enable_ept) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 21855275e867..0e6d18764a4e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -345,7 +345,6 @@ static const u32 msrs_to_save_base[] = { MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B, MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B, MSR_IA32_UMWAIT_CONTROL, - MSR_ZX_PAUSE_CONTROL, MSR_IA32_XFD, MSR_IA32_XFD_ERR, }; @@ -7257,10 +7256,6 @@ static void kvm_probe_msr_to_save(u32 msr_index) if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG)) return; break; - case MSR_ZX_PAUSE_CONTROL: - if (!kvm_cpu_cap_has(X86_FEATURE_ZXPAUSE)) - return; - break; case MSR_IA32_RTIT_CTL: case MSR_IA32_RTIT_STATUS: if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index 3946badbd78f..0e65d00e2339 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c @@ -117,27 +117,6 @@ static void delay_halt_tpause(u64 start, u64 cycles) __tpause(TPAUSE_C02_STATE, edx, eax); } -/* - * On ZHAOXIN the ZXPAUSE instruction waits until any of: - * 1) the delta of TSC counter exceeds the value provided in EDX:EAX - * 2) global timeout in ZX_PAUSE_CONTROL is exceeded - * 3) an external interrupt occurs - */ -static void delay_halt_zxpause(u64 unused, u64 cycles) -{ - u64 until = cycles; - u32 eax, edx; - - eax = lower_32_bits(until); - edx = upper_32_bits(until); - - /* - * Hard code the deeper (C0.1) sleep state because exit latency is - * small compared to the "microseconds" that usleep() will delay. - */ - __zxpause(ZXPAUSE_C01_STATE, edx, eax); -} - /* * On some AMD platforms, MWAITX has a configurable 32-bit timer, that * counts with TSC frequency. The input value is the number of TSC cycles @@ -204,12 +183,6 @@ void __init use_tpause_delay(void) delay_fn = delay_halt; } -void __init use_zxpause_delay(void) -{ - delay_halt_fn = delay_halt_zxpause; - delay_fn = delay_halt; -} - void use_mwaitx_delay(void) { delay_halt_fn = delay_halt_mwaitx; diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index b8e6840fb997..5053119fb488 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -13,7 +13,7 @@ /* * Defines x86 CPU feature bits */ -#define NCAPINTS 31 /* N 32-bit words worth of info */ +#define NCAPINTS 30 /* N 32-bit words worth of info */ #define NBUGINTS 4 /* N 32-bit bug flags */ /* @@ -496,9 +496,6 @@ #define X86_FEATURE_HYGON_SM3 (29*32 + 1) /* "sm3" SM3 instructions */ #define X86_FEATURE_HYGON_SM4 (29*32 + 2) /* "sm4" SM4 instructions */ -/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000006, word 21 */ -#define X86_FEATURE_ZXPAUSE (30*32 + 0) /* ZHAOXIN ZXPAUSE */ - #define X86_FEATURE_PREFETCHI (20*32+20) /* Prefetch Data/Instruction to Cache Level */ /* diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h index 38d71379a781..42e13e6e3980 100644 --- a/tools/arch/x86/include/asm/msr-index.h +++ b/tools/arch/x86/include/asm/msr-index.h @@ -86,12 +86,6 @@ */ #define MSR_IA32_UMWAIT_CONTROL_TIME_MASK (~0x03U) -/* - * The time field is bit[31:2], but representing a 32bit value with - * bit[1:0] zero. - */ -#define MSR_ZX_PAUSE_CONTROL_TIME_MASK (~0x03U) - /* Abbreviated from Intel SDM name IA32_CORE_CAPABILITIES */ #define MSR_IA32_CORE_CAPS 0x000000cf #define MSR_IA32_CORE_CAPS_INTEGRITY_CAPS_BIT 2 @@ -835,13 +829,6 @@ #define MSR_VIA_RNG 0x0000110b #define MSR_VIA_BCR2 0x00001147 -/* - * Zhaoxin extend VMCS capabilities: - * bit 0: exec-cntl3 VMCS field. - */ -#define MSR_ZX_EXT_VMCS_CAPS 0x1675 -#define MSR_ZX_VMCS_EXEC_CTL3 BIT(0) - /* Transmeta defined MSRs */ #define MSR_TMTA_LONGRUN_CTRL 0x80868010 #define MSR_TMTA_LONGRUN_FLAGS 0x80868011 -- Gitee From 6ca21ee78409eb475e4f4cbb31d1ab993322ff91 Mon Sep 17 00:00:00 2001 From: LeoLiu-oc Date: Fri, 6 Mar 2026 17:47:00 +0800 Subject: [PATCH 070/231] x86/delay: add support for Zhaoxin PAUSEOPT instruction zhaoxin inclusion category: feature ------------------- PAUSEOPT instructs the processor to enter an implementation-dependent optimized state. The instruction execution wakes up when the time-stamp counter reaches or exceeds the implicit EDX:EAX 64-bit input value. The instruction execution also wakes up due to the expiration of the operating system time-limit or by an external interrupt. PAUSEOPT is available on processors with X86_FEATURE_PAUSEOPT. PAUSEOPT allows the processor to enter a light-weight power/performance optimized state (C0.1 state) for a period specified by the instruction or until the system time limit. MSR_ZX_PAUSE_CONTROL MSR register allows the OS to enable/disable C0.2 on the processor and to set the maximum time the processor can reside in C0.1 or C0.2. By default C0.2 is disabled. A sysfs interface to adjust the time and the C0.2 enablement is provided in a follow up change. Signed-off-by: LeoLiu-oc --- arch/x86/Kconfig.assembler | 5 + arch/x86/include/asm/cpufeature.h | 7 +- arch/x86/include/asm/cpufeatures.h | 5 +- arch/x86/include/asm/delay.h | 1 + arch/x86/include/asm/disabled-features.h | 3 +- arch/x86/include/asm/msr-index.h | 7 + arch/x86/include/asm/mwait.h | 21 +++ arch/x86/include/asm/required-features.h | 3 +- arch/x86/kernel/cpu/Makefile | 1 + arch/x86/kernel/cpu/centaur.c | 3 + arch/x86/kernel/cpu/pauseopt.c | 208 +++++++++++++++++++++++ arch/x86/kernel/cpu/zhaoxin.c | 3 + arch/x86/kernel/time.c | 3 + arch/x86/lib/delay.c | 23 +++ tools/arch/x86/include/asm/cpufeatures.h | 5 +- tools/arch/x86/include/asm/msr-index.h | 8 + 16 files changed, 300 insertions(+), 6 deletions(-) create mode 100644 arch/x86/kernel/cpu/pauseopt.c diff --git a/arch/x86/Kconfig.assembler b/arch/x86/Kconfig.assembler index 16d0b022d6ff..1195554bb92c 100644 --- a/arch/x86/Kconfig.assembler +++ b/arch/x86/Kconfig.assembler @@ -29,3 +29,8 @@ config AS_WRUSS def_bool $(as-instr64,wrussq %rax$(comma)(%rbx)) help Supported by binutils >= 2.31 and LLVM integrated assembler + +config AS_PAUSEOPT + def_bool $(as-instr,pauseopt) + help + Supported by binutils >= xxx-TBD and LLVM integrated assembler xxx-TBD diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index e7afc9c36bb2..564b678fbbbd 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -49,6 +49,7 @@ enum cpuid_leafs * CPUID_C000_0006_EAX must keep at the 30th position (count from 0)! * NR_CPUID_WORDS can not bigger than 31 (the cpuid_leafs only 32 bits!). */ + CPUID_C000_0006_EAX, NR_CPUID_WORDS, }; @@ -117,8 +118,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 27, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 28, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 29, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 30, feature_bit) || \ REQUIRED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 30)) + BUILD_BUG_ON_ZERO(NCAPINTS != 31)) #define DISABLED_MASK_BIT_SET(feature_bit) \ ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \ @@ -151,8 +153,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 27, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 28, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 29, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 30, feature_bit) || \ DISABLED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 30)) + BUILD_BUG_ON_ZERO(NCAPINTS != 31)) #define cpu_has(c, bit) \ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index c67d87eaac22..671e82b5ee74 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -13,7 +13,7 @@ /* * Defines x86 CPU feature bits */ -#define NCAPINTS 30 /* N 32-bit words worth of info */ +#define NCAPINTS 31 /* N 32-bit words worth of info */ #define NBUGINTS 4 /* N 32-bit bug flags */ /* @@ -514,6 +514,9 @@ #define X86_FEATURE_CLEAR_CPU_BUF_VM (21*32+13) /* "" Clear CPU buffers using VERW before VMRUN */ #define X86_FEATURE_IBPB_EXIT_TO_USER (21*32+14) /* Use IBPB on exit-to-userspace, see VMSCAPE bug */ +/* Zhaoxin/Centaur-defined CPU features, CPUID level 0xC0000006, word 30 */ +#define X86_FEATURE_PAUSEOPT (30*32+ 0) /* ZHAOXIN PAUSEOPT */ + /* * BUG word(s) */ diff --git a/arch/x86/include/asm/delay.h b/arch/x86/include/asm/delay.h index 630891d25819..c844077f19b6 100644 --- a/arch/x86/include/asm/delay.h +++ b/arch/x86/include/asm/delay.h @@ -7,6 +7,7 @@ void __init use_tsc_delay(void); void __init use_tpause_delay(void); +void __init use_pauseopt_delay(void); void use_mwaitx_delay(void); #endif /* _ASM_X86_DELAY_H */ diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index 5082faf136be..c7f91989a16f 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -158,6 +158,7 @@ #define DISABLED_MASK27 0 #define DISABLED_MASK28 0 #define DISABLED_MASK29 0 -#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 30) +#define DISABLED_MASK30 0 +#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 31) #endif /* _ASM_X86_DISABLED_FEATURES_H */ diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 11ab6780d160..62d22086a2d9 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -81,6 +81,13 @@ */ #define MSR_IA32_UMWAIT_CONTROL_TIME_MASK (~0x03U) +#define MSR_PAUSEOPT_CONTROL 0x187f +/* + * The time field is bit[31:2], but representing a 32bit value with + * bit[1:0] zero. + */ +#define MSR_PAUSEOPT_CONTROL_TIME_MASK (~0x03U) + /* Abbreviated from Intel SDM name IA32_CORE_CAPABILITIES */ #define MSR_IA32_CORE_CAPS 0x000000cf #define MSR_IA32_CORE_CAPS_INTEGRITY_CAPS_BIT 2 diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index ae7a83e3f743..d480006b6360 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h @@ -26,6 +26,8 @@ #define TPAUSE_C01_STATE 1 #define TPAUSE_C02_STATE 0 +#define PAUSEOPT_P01_STATE 1 + static __always_inline void __monitor(const void *eax, unsigned long ecx, unsigned long edx) { @@ -151,4 +153,23 @@ static inline void __tpause(u32 ecx, u32 edx, u32 eax) #endif } +/* + * Caller can specify to enter P0.1 (low latency, less power saving). + */ +static inline void __pauseopt(u32 ecx, u32 edx, u32 eax) +{ + /* "pauseopt %ecx, %edx, %eax;" */ +#ifdef CONFIG_AS_PAUSEOPT + asm volatile( + "pauseopt\n" + : + : "c"(ecx), "d"(edx), "a"(eax)); +#else + asm volatile( + ".byte 0xf2, 0x0f, 0xa6, 0xd0\t\n" + : + : "c"(ecx), "d"(edx), "a"(eax)); +#endif +} + #endif /* _ASM_X86_MWAIT_H */ diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h index 5d2017c24101..9abee54484e6 100644 --- a/arch/x86/include/asm/required-features.h +++ b/arch/x86/include/asm/required-features.h @@ -108,6 +108,7 @@ #define REQUIRED_MASK27 0 #define REQUIRED_MASK28 0 #define REQUIRED_MASK29 0 -#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 30) +#define REQUIRED_MASK30 0 +#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 31) #endif /* _ASM_X86_REQUIRED_FEATURES_H */ diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index eb4dbcdf41f1..1acb62a04312 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -26,6 +26,7 @@ obj-y += bugs.o obj-y += aperfmperf.o obj-y += cpuid-deps.o obj-y += umwait.o +obj-y += pauseopt.o obj-y += capflags.o powerflags.o obj-$(CONFIG_X86_LOCAL_APIC) += topology.o diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 5bd0fab944db..09e3d7f57204 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -116,6 +116,9 @@ static void early_init_centaur(struct cpuinfo_x86 *c) */ if (c->x86 == 0x6 || (c->x86 == 0x7 && c->x86_model <= 0x3b)) set_cpu_cap(c, X86_FEATURE_CRC32C_LOW_PERF); + + if (cpuid_eax(0xC0000000) >= 0xC0000006) + c->x86_capability[CPUID_C000_0006_EAX] = cpuid_eax(0xC0000006); } static void init_centaur(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/pauseopt.c b/arch/x86/kernel/cpu/pauseopt.c new file mode 100644 index 000000000000..58a490da7b3e --- /dev/null +++ b/arch/x86/kernel/cpu/pauseopt.c @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +#include +#include + +#define PAUSEOPT_CTRL_VAL(max_time) (((max_time) & MSR_PAUSEOPT_CONTROL_TIME_MASK)) + +/* + * Cache PAUSEOPT_CONTROL MSR. This is a systemwide control. By default, + * pauseopt max time is 100000 in TSC-quanta and P0.1 is enabled. + */ +static u32 pauseopt_control_cached = PAUSEOPT_CTRL_VAL(100000); + +/* + * Cache the original PAUSEOPT_CONTROL MSR value which is configured by + * hardware or BIOS before kernel boot. + */ +static u32 orig_pauseopt_control_cached __ro_after_init; + +/* + * Serialize access to pauseopt_control_cached and PAUSEOPT_CONTROL MSR in + * the sysfs write functions. + */ +static DEFINE_MUTEX(pauseopt_lock); + +static void pauseopt_update_control_msr(void *unused) +{ + lockdep_assert_irqs_disabled(); + wrmsr(MSR_PAUSEOPT_CONTROL, READ_ONCE(pauseopt_control_cached), 0); +} + +/* + * The CPU hotplug callback sets the control MSR to the global control + * value. + * + * Disable interrupts so the read of pauseopt_control_cached and the WRMSR + * are protected against a concurrent sysfs write. Otherwise the sysfs + * write could update the cached value after it had been read on this CPU + * and issue the IPI before the old value had been written. The IPI would + * interrupt, write the new value and after return from IPI the previous + * value would be written by this CPU. + * + * With interrupts disabled the upcoming CPU either sees the new control + * value or the IPI is updating this CPU to the new control value after + * interrupts have been reenabled. + */ +static int pauseopt_cpu_online(unsigned int cpu) +{ + local_irq_disable(); + pauseopt_update_control_msr(NULL); + local_irq_enable(); + return 0; +} + +/* + * The CPU hotplug callback sets the control MSR to the original control + * value. + */ +static int pauseopt_cpu_offline(unsigned int cpu) +{ + /* + * This code is protected by the CPU hotplug already and + * orig_pauseopt_control_cached is never changed after it caches + * the original control MSR value in pauseopt_init(). So there + * is no race condition here. + */ + wrmsr(MSR_PAUSEOPT_CONTROL, orig_pauseopt_control_cached, 0); + + return 0; +} + +/* + * On resume, restore PAUSEOPT_CONTROL MSR on the boot processor which + * is the only active CPU at this time. The MSR is set up on the APs via the + * CPU hotplug callback. + * + * This function is invoked on resume from suspend and hibernation. On + * resume from suspend the restore should be not required, but we neither + * trust the firmware nor does it matter if the same value is written + * again. + */ +static void pauseopt_syscore_resume(void) +{ + pauseopt_update_control_msr(NULL); +} + +static struct syscore_ops pauseopt_syscore_ops = { + .resume = pauseopt_syscore_resume, +}; + +/* sysfs interface */ + +static inline u32 pauseopt_ctrl_max_time(u32 ctrl) +{ + return ctrl & MSR_PAUSEOPT_CONTROL_TIME_MASK; +} + +static inline void pauseopt_update_control(u32 maxtime) +{ + u32 ctrl = maxtime & MSR_PAUSEOPT_CONTROL_TIME_MASK; + + WRITE_ONCE(pauseopt_control_cached, ctrl); + /* Propagate to all CPUs */ + on_each_cpu(pauseopt_update_control_msr, NULL, 1); +} + +static ssize_t +enable_p01_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + u32 ret; + + if (boot_cpu_has(X86_FEATURE_PAUSEOPT)) + ret = 1; + else + ret = 0; + + return sprintf(buf, "%d\n", ret); +} +static DEVICE_ATTR_RO(enable_p01); + +static ssize_t +max_time_show(struct device *kobj, struct device_attribute *attr, char *buf) +{ + u32 ctrl = READ_ONCE(pauseopt_control_cached); + + return sprintf(buf, "%u\n", pauseopt_ctrl_max_time(ctrl)); +} + +static ssize_t max_time_store(struct device *kobj, + struct device_attribute *attr, + const char *buf, size_t count) +{ + u32 max_time, ctrl; + int ret; + + ret = kstrtou32(buf, 0, &max_time); + if (ret) + return ret; + + /* bits[1:0] must be zero */ + if (max_time & ~MSR_PAUSEOPT_CONTROL_TIME_MASK) + return -EINVAL; + + mutex_lock(&pauseopt_lock); + + ctrl = READ_ONCE(pauseopt_control_cached); + if (max_time != pauseopt_ctrl_max_time(ctrl)) + pauseopt_update_control(max_time); + + mutex_unlock(&pauseopt_lock); + + return count; +} +static DEVICE_ATTR_RW(max_time); + +static struct attribute *pauseopt_attrs[] = { + &dev_attr_enable_p01.attr, + &dev_attr_max_time.attr, + NULL +}; + +static struct attribute_group pauseopt_attr_group = { + .attrs = pauseopt_attrs, + .name = "pauseopt_control", +}; + +static int __init pauseopt_init(void) +{ + struct device *dev; + int ret; + + if (!boot_cpu_has(X86_FEATURE_PAUSEOPT)) + return -ENODEV; + + /* + * Cache the original control MSR value before the control MSR is + * changed. This is the only place where orig_pauseopt_control_cached + * is modified. + */ + rdmsrl(MSR_PAUSEOPT_CONTROL, orig_pauseopt_control_cached); + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pauseopt:online", + pauseopt_cpu_online, pauseopt_cpu_offline); + if (ret < 0) { + /* + * On failure, the control MSR on all CPUs has the + * original control value. + */ + return ret; + } + + register_syscore_ops(&pauseopt_syscore_ops); + + /* + * Add pauseopt control interface. Ignore failure, so at least the + * default values are set up in case the machine manages to boot. + */ + dev = bus_get_dev_root(&cpu_subsys); + if (dev) { + ret = sysfs_create_group(&dev->kobj, &pauseopt_attr_group); + put_device(dev); + } + return ret; +} +device_initcall(pauseopt_init); diff --git a/arch/x86/kernel/cpu/zhaoxin.c b/arch/x86/kernel/cpu/zhaoxin.c index 6efe3c0aafda..3180e2026c40 100644 --- a/arch/x86/kernel/cpu/zhaoxin.c +++ b/arch/x86/kernel/cpu/zhaoxin.c @@ -72,6 +72,9 @@ static void early_init_zhaoxin(struct cpuinfo_x86 *c) */ if (c->x86 == 0x6 || (c->x86 == 0x7 && c->x86_model <= 0x3b)) set_cpu_cap(c, X86_FEATURE_CRC32C_LOW_PERF); + + if (cpuid_eax(0xC0000000) >= 0xC0000006) + c->x86_capability[CPUID_C000_0006_EAX] = cpuid_eax(0xC0000006); } static void init_zhaoxin(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c index 52e1f3f0b361..99355d27415e 100644 --- a/arch/x86/kernel/time.c +++ b/arch/x86/kernel/time.c @@ -84,6 +84,9 @@ static __init void x86_late_time_init(void) if (static_cpu_has(X86_FEATURE_WAITPKG)) use_tpause_delay(); + + if (static_cpu_has(X86_FEATURE_PAUSEOPT)) + use_pauseopt_delay(); } /* diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index 0e65d00e2339..96bf5b3baacd 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c @@ -117,6 +117,23 @@ static void delay_halt_tpause(u64 start, u64 cycles) __tpause(TPAUSE_C02_STATE, edx, eax); } +/* + * On ZHAOXIN the PAUSEOPT instruction waits until any of: + * 1) the delta of TSC counter exceeds the value provided in EDX:EAX + * 2) global timeout in PAUSEOPT_CONTROL is exceeded + * 3) an external interrupt occurs + */ +static void delay_halt_pauseopt(u64 unused, u64 cycles) +{ + u64 until = cycles; + u32 eax, edx; + + eax = lower_32_bits(until); + edx = upper_32_bits(until); + + __pauseopt(PAUSEOPT_P01_STATE, edx, eax); +} + /* * On some AMD platforms, MWAITX has a configurable 32-bit timer, that * counts with TSC frequency. The input value is the number of TSC cycles @@ -183,6 +200,12 @@ void __init use_tpause_delay(void) delay_fn = delay_halt; } +void __init use_pauseopt_delay(void) +{ + delay_halt_fn = delay_halt_pauseopt; + delay_fn = delay_halt; +} + void use_mwaitx_delay(void) { delay_halt_fn = delay_halt_mwaitx; diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index 5053119fb488..cd4af761cdde 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -13,7 +13,7 @@ /* * Defines x86 CPU feature bits */ -#define NCAPINTS 30 /* N 32-bit words worth of info */ +#define NCAPINTS 31 /* N 32-bit words worth of info */ #define NBUGINTS 4 /* N 32-bit bug flags */ /* @@ -496,6 +496,9 @@ #define X86_FEATURE_HYGON_SM3 (29*32 + 1) /* "sm3" SM3 instructions */ #define X86_FEATURE_HYGON_SM4 (29*32 + 2) /* "sm4" SM4 instructions */ +/* Zhaoxin/Centaur-defined CPU features, CPUID level 0xC0000006, word 30 */ +#define X86_FEATURE_PAUSEOPT (30*32+ 0) /* ZHAOXIN PAUSEOPT */ + #define X86_FEATURE_PREFETCHI (20*32+20) /* Prefetch Data/Instruction to Cache Level */ /* diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h index 42e13e6e3980..eb2157f552b6 100644 --- a/tools/arch/x86/include/asm/msr-index.h +++ b/tools/arch/x86/include/asm/msr-index.h @@ -86,6 +86,14 @@ */ #define MSR_IA32_UMWAIT_CONTROL_TIME_MASK (~0x03U) +#define MSR_PAUSEOPT_CONTROL 0x187f + +/* + * The time field is bit[31:2], but representing a 32bit value with + * bit[1:0] zero. + */ +#define MSR_PAUSEOPT_CONTROL_TIME_MASK (~0x03U) + /* Abbreviated from Intel SDM name IA32_CORE_CAPABILITIES */ #define MSR_IA32_CORE_CAPS 0x000000cf #define MSR_IA32_CORE_CAPS_INTEGRITY_CAPS_BIT 2 -- Gitee From 5f755ee0f5fe5196fa4f7cf1a052722436d4949f Mon Sep 17 00:00:00 2001 From: LeoLiu-oc Date: Fri, 6 Mar 2026 17:49:03 +0800 Subject: [PATCH 071/231] KVM: x86: Introduce support for Zhaoxin PAUSEOPT instruction MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds support for the PAUSEOPT instruction, a Zhaoxin-specific counterpart to Intel’s TPAUSE. Two key differences distinguish PAUSEOPT from TPAUSE: 1. PAUSEOPT uses a delta TSC, calculated as the smaller of (MSR_PAUSEOPT_CONTROL[31:2] << 2) and the EDX:EAX input to PAUSEOPT, subtracted from the current TSC. TPAUSE, by contrast, uses a target TSC computed from the smaller of (MSR_IA32_UMWAIT_CONTROL[31:2] << 2) and its EDX:EAX input. 2. PAUSEOPT currently supports only the C0.1 optimization state, whereas TPAUSE may support both C0.1 and C0.2 states. This feature depends on a pending QEMU patch to recognize PAUSEOPT, as well as the preceding patch in this series that adds Linux kernel support for PAUSEOPT. Guest Behavioral notes: - Writes to the PAUSEOPT/TPAUSE CONTROL MSR are ignored (WRMSR is nopped). - Executing PAUSEOPT or TPAUSE succeeds in entering the optimized state for the duration specified by EDX:EAX. - External interrupts and other defined events will break out of this optimized state. - On any VM exit that interrupts PAUSEOPT, such as an external-interrupt VM exit, if the VMM will resume execution at the instruction following PAUSEOPT, the software must clear the PAUSEOPT_TARGET_TSC field in the VMCS. This cleanup is implemented in this patch. Signed-off-by: LeoLiu-oc --- arch/x86/include/asm/msr-index.h | 10 ++ arch/x86/include/asm/vmx.h | 7 + arch/x86/include/asm/vmxfeatures.h | 5 +- arch/x86/include/uapi/asm/vmx.h | 1 + arch/x86/kernel/cpu/feat_ctl.c | 28 ++++ arch/x86/kvm/cpuid.c | 14 +- arch/x86/kvm/reverse_cpuid.h | 1 + arch/x86/kvm/vmx/capabilities.h | 12 ++ arch/x86/kvm/vmx/vmcs.h | 1 + arch/x86/kvm/vmx/vmx.c | 205 ++++++++++++++++++++++++++ arch/x86/kvm/vmx/vmx.h | 21 +++ arch/x86/kvm/x86.c | 5 + tools/arch/x86/include/uapi/asm/vmx.h | 1 + 13 files changed, 308 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 62d22086a2d9..f7d2e6306ea0 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -832,6 +832,13 @@ #define MSR_VIA_RNG 0x0000110b #define MSR_VIA_BCR2 0x00001147 +/* + * Zhaoxin extend VMCS capabilities: + * bit 0: exec-cntl3 VMCS field. + */ +#define MSR_ZX_EXT_VMCS_CAPS 0x1675 +#define MSR_ZX_VMCS_EXEC_CTL3_EN BIT(0) + /* Transmeta defined MSRs */ #define MSR_TMTA_LONGRUN_CTRL 0x80868010 #define MSR_TMTA_LONGRUN_FLAGS 0x80868011 @@ -1168,6 +1175,9 @@ #define MSR_IA32_VMX_VMFUNC 0x00000491 #define MSR_IA32_VMX_PROCBASED_CTLS3 0x00000492 +/* Zhaoxin VT MSRs */ +#define MSR_ZX_VMX_PROCBASED_CTLS3 0x12A7 + /* VMX_BASIC bits and bitmasks */ #define VMX_BASIC_VMCS_SIZE_SHIFT 32 #define VMX_BASIC_TRUE_CTLS (1ULL << 55) diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 0e73616b82f3..ebd9e13c5f59 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -84,6 +84,11 @@ */ #define TERTIARY_EXEC_IPI_VIRT VMCS_CONTROL_BIT(IPI_VIRT) +/* + * Definitions of Zhaoxin Tertiary Processor-Based VM-Execution Controls. + */ +#define ZX_TERTIARY_EXEC_GUEST_PAUSEOPT VMCS_CONTROL_BIT(GUEST_PAUSEOPT) + #define PIN_BASED_EXT_INTR_MASK VMCS_CONTROL_BIT(INTR_EXITING) #define PIN_BASED_NMI_EXITING VMCS_CONTROL_BIT(NMI_EXITING) #define PIN_BASED_VIRTUAL_NMIS VMCS_CONTROL_BIT(VIRTUAL_NMIS) @@ -235,6 +240,7 @@ enum vmcs_field { TERTIARY_VM_EXEC_CONTROL_HIGH = 0x00002035, PID_POINTER_TABLE = 0x00002042, PID_POINTER_TABLE_HIGH = 0x00002043, + PAUSEOPT_TARGET_TSC = 0x00002200, GUEST_PHYSICAL_ADDRESS = 0x00002400, GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401, VMCS_LINK_POINTER = 0x00002800, @@ -284,6 +290,7 @@ enum vmcs_field { PLE_GAP = 0x00004020, PLE_WINDOW = 0x00004022, NOTIFY_WINDOW = 0x00004024, + ZX_TERTIARY_VM_EXEC_CONTROL = 0x00004200, VM_INSTRUCTION_ERROR = 0x00004400, VM_EXIT_REASON = 0x00004402, VM_EXIT_INTR_INFO = 0x00004404, diff --git a/arch/x86/include/asm/vmxfeatures.h b/arch/x86/include/asm/vmxfeatures.h index c6a7eed03914..eaafc0fabf61 100644 --- a/arch/x86/include/asm/vmxfeatures.h +++ b/arch/x86/include/asm/vmxfeatures.h @@ -5,7 +5,7 @@ /* * Defines VMX CPU feature bits */ -#define NVMXINTS 5 /* N 32-bit words worth of info */ +#define NVMXINTS 6 /* N 32-bit words worth of info */ /* * Note: If the comment begins with a quoted string, that string is used @@ -89,4 +89,7 @@ /* Tertiary Processor-Based VM-Execution Controls, word 3 */ #define VMX_FEATURE_IPI_VIRT ( 3*32+ 4) /* Enable IPI virtualization */ + +/* Zhaoxin Tertiary Processor-Based VM-Execution Controls, word 3 */ +#define VMX_FEATURE_GUEST_PAUSEOPT ( 4*32+ 0) /* pauseopt instruction in guest mode */ #endif /* _ASM_X86_VMXFEATURES_H */ diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h index a5faf6d88f1b..afab76af3e43 100644 --- a/arch/x86/include/uapi/asm/vmx.h +++ b/arch/x86/include/uapi/asm/vmx.h @@ -90,6 +90,7 @@ #define EXIT_REASON_XRSTORS 64 #define EXIT_REASON_UMWAIT 67 #define EXIT_REASON_TPAUSE 68 +#define EXIT_REASON_PAUSEOPT 68 #define EXIT_REASON_BUS_LOCK 74 #define EXIT_REASON_NOTIFY 75 diff --git a/arch/x86/kernel/cpu/feat_ctl.c b/arch/x86/kernel/cpu/feat_ctl.c index 03851240c3e3..964f067f0b2d 100644 --- a/arch/x86/kernel/cpu/feat_ctl.c +++ b/arch/x86/kernel/cpu/feat_ctl.c @@ -17,11 +17,37 @@ enum vmx_feature_leafs { SECONDARY_CTLS, TERTIARY_CTLS_LOW, TERTIARY_CTLS_HIGH, + ZX_TERTIARY_CTLS, NR_VMX_FEATURE_WORDS, }; #define VMX_F(x) BIT(VMX_FEATURE_##x & 0x1f) +static void init_zhaoxin_ext_capabilities(struct cpuinfo_x86 *c) +{ + u32 ext_vmcs_cap = 0; + u32 proc_based_ctls3_high = 0; + u32 ign, msr_high; + int err; + + if (!(boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR)) + return; + + err = rdmsr_safe(MSR_ZX_EXT_VMCS_CAPS, &ext_vmcs_cap, &ign); + + if (!(ext_vmcs_cap & MSR_ZX_VMCS_EXEC_CTL3_EN)) + return; + + err = rdmsr_safe(MSR_ZX_VMX_PROCBASED_CTLS3, &ign, &msr_high); + if (!(msr_high & 0x1)) /* CTLS3 MSR doesn't exist */ + proc_based_ctls3_high = 0x1; /* set PAUSEOPT(bit0) */ + else + proc_based_ctls3_high = msr_high; + + c->vmx_capability[ZX_TERTIARY_CTLS] = proc_based_ctls3_high; +} + static void init_vmx_capabilities(struct cpuinfo_x86 *c) { u32 supported, funcs, ept, vpid, ign, low, high; @@ -97,6 +123,8 @@ static void init_vmx_capabilities(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_EPT_AD); if (c->vmx_capability[MISC_FEATURES] & VMX_F(VPID)) set_cpu_cap(c, X86_FEATURE_VPID); + + init_zhaoxin_ext_capabilities(c); } #endif /* CONFIG_X86_VMX_FEATURE_NAMES */ diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index c9f27a4df96f..b21c921fc9c0 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -867,6 +867,12 @@ void kvm_set_cpu_caps(void) kvm_cpu_cap_clear(X86_FEATURE_RDTSCP); kvm_cpu_cap_clear(X86_FEATURE_RDPID); } + + /* + * Do not hide any features supported by this leaf, allow the guest to see + * the original information.Now leaf 0xC000_0006 EAX only supports PAUSEOPT. + */ + kvm_cpu_cap_mask(CPUID_C000_0006_EAX, F(PAUSEOPT)); } EXPORT_SYMBOL_GPL(kvm_set_cpu_caps); @@ -1412,17 +1418,21 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) } /*Add support for Centaur's CPUID instruction*/ case 0xC0000000: - /*Just support up to 0xC0000004 now*/ - entry->eax = min(entry->eax, 0xC0000004); + /* Extended to 0xC0000006 */ + entry->eax = min(entry->eax, 0xC0000006); break; case 0xC0000001: cpuid_entry_override(entry, CPUID_C000_0001_EDX); break; + case 0xC0000006: + cpuid_entry_override(entry, CPUID_C000_0006_EAX); + break; case 3: /* Processor serial number */ case 5: /* MONITOR/MWAIT */ case 0xC0000002: case 0xC0000003: case 0xC0000004: + case 0xC0000005: default: entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h index 74ef96cb2752..5fe7761bb97c 100644 --- a/arch/x86/kvm/reverse_cpuid.h +++ b/arch/x86/kvm/reverse_cpuid.h @@ -86,6 +86,7 @@ static const struct cpuid_reg reverse_cpuid[] = { [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX}, [CPUID_1_ECX] = { 1, 0, CPUID_ECX}, [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX}, + [CPUID_C000_0006_EAX] = {0xc0000006, 0, CPUID_EAX}, [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX}, [CPUID_7_0_EBX] = { 7, 0, CPUID_EBX}, [CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX}, diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h index 41a4533f9989..f287396720a9 100644 --- a/arch/x86/kvm/vmx/capabilities.h +++ b/arch/x86/kvm/vmx/capabilities.h @@ -61,6 +61,7 @@ struct vmcs_config { u32 cpu_based_exec_ctrl; u32 cpu_based_2nd_exec_ctrl; u64 cpu_based_3rd_exec_ctrl; + u32 zx_cpu_based_3rd_exec_ctrl; u32 vmexit_ctrl; u32 vmentry_ctrl; u64 misc; @@ -138,6 +139,11 @@ static inline bool cpu_has_tertiary_exec_ctrls(void) CPU_BASED_ACTIVATE_TERTIARY_CONTROLS; } +static inline bool cpu_has_zx_tertiary_exec_ctrls(void) +{ + return !!vmcs_config.zx_cpu_based_3rd_exec_ctrl; +} + static inline bool cpu_has_vmx_virtualize_apic_accesses(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & @@ -255,6 +261,12 @@ static inline bool cpu_has_vmx_xsaves(void) SECONDARY_EXEC_ENABLE_XSAVES; } +static inline bool cpu_has_vmx_pauseopt(void) +{ + return vmcs_config.zx_cpu_based_3rd_exec_ctrl & + ZX_TERTIARY_EXEC_GUEST_PAUSEOPT; +} + static inline bool cpu_has_vmx_waitpkg(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & diff --git a/arch/x86/kvm/vmx/vmcs.h b/arch/x86/kvm/vmx/vmcs.h index 7c1996b433e2..0e07394f02dd 100644 --- a/arch/x86/kvm/vmx/vmcs.h +++ b/arch/x86/kvm/vmx/vmcs.h @@ -51,6 +51,7 @@ struct vmcs_controls_shadow { u32 exec; u32 secondary_exec; u64 tertiary_exec; + u32 zx_tertiary_exec; }; /* diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 247bdad01cda..5280e3dd4f51 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1987,6 +1987,24 @@ int vmx_get_feature_msr(u32 msr, u64 *data) } } +static int zx_vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (!is_zhaoxin_cpu()) + return KVM_MSR_RET_UNHANDLED; + + switch (msr_info->index) { + case MSR_PAUSEOPT_CONTROL: + if (!msr_info->host_initiated && !vmx_guest_pauseopt_enabled(vmx)) + return 1; + msr_info->data = vmx->msr_pauseopt_control; + return 0; + default: + return KVM_MSR_RET_UNHANDLED; /* Non-zhaoxin MSRs */ + } +} + /* * Reads an msr value (of 'msr_info->index') into 'msr_info->data'. * Returns 0 on success, non-0 otherwise. @@ -1997,6 +2015,17 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) struct vcpu_vmx *vmx = to_vmx(vcpu); struct vmx_uret_msr *msr; u32 index; + int ret = 0; + + ret = zx_vmx_get_msr(vcpu, msr_info); + switch (ret) { + case 0: + case 1: + return ret; + case KVM_MSR_RET_UNHANDLED: + ret = 0; + break; + } switch (msr_info->index) { #ifdef CONFIG_X86_64 @@ -2182,6 +2211,31 @@ bool vmx_is_valid_debugctl(struct kvm_vcpu *vcpu, u64 data, bool host_initiated) return !invalid; } +static int zx_vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + u32 msr_index = msr_info->index; + u64 data = msr_info->data; + + if (!is_zhaoxin_cpu()) + return KVM_MSR_RET_UNHANDLED; + + switch (msr_index) { + case MSR_PAUSEOPT_CONTROL: + if (!msr_info->host_initiated && !vmx_guest_pauseopt_enabled(vmx)) + return 1; + + /* The reserved bit 1 and non-32 bit [63:32] should be zero */ + if (data & (BIT_ULL(1) | GENMASK_ULL(63, 32))) + return 1; + + vmx->msr_pauseopt_control = data; + return 0; + default: + return KVM_MSR_RET_UNHANDLED; /* Non-zhaoxin MSRs*/ + } +} + /* * Writes msr value into the appropriate "register". * Returns 0 on success, non-0 otherwise. @@ -2196,6 +2250,16 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) u64 data = msr_info->data; u32 index; + ret = zx_vmx_set_msr(vcpu, msr_info); + switch (ret) { + case 0: + case 1: + return ret; + case KVM_MSR_RET_UNHANDLED: + ret = 0; + break; + } + switch (msr_index) { case MSR_EFER: ret = kvm_set_msr_common(vcpu, msr_info); @@ -2586,6 +2650,44 @@ static u64 adjust_vmx_controls64(u64 ctl_opt, u32 msr) return ctl_opt & allowed; } +static int setup_zhaoxin_vmcs_controls(struct vmcs_config *vmcs_conf) +{ + u32 zx_ext_vmcs_cap, msr_high, ign; + u32 zx_ctl3 = 0; + int ret; + + if (!is_zhaoxin_cpu()) + return 0; + + /* + * Zhaoxin uses MSR_ZX_EXT_VMCS_CAPS to enumerate the 3rd CPU-based + * control, rather than a bit in the 2nd CPU-based control. + */ + rdmsr_safe(MSR_ZX_EXT_VMCS_CAPS, &zx_ext_vmcs_cap, &ign); + if (!(zx_ext_vmcs_cap & MSR_ZX_VMCS_EXEC_CTL3_EN)) + return 0; + + ret = rdmsr_safe(MSR_ZX_VMX_PROCBASED_CTLS3, &ign, &msr_high); + if (msr_high & 0x1) { + /* ZX CPU with ZX_VMX_PROCBASED_CTLS3 support */ + ret = adjust_vmx_controls(KVM_REQUIRED_VMX_ZX_TERTIARY_VM_EXEC_CONTROL, + KVM_OPTIONAL_VMX_ZX_TERTIARY_VM_EXEC_CONTROL, + MSR_ZX_VMX_PROCBASED_CTLS3, &zx_ctl3); + if (ret) + return -EIO; + } else { + /* ZX CPU without ZX_VMX_PROCBASED_CTLS3 support: + * assume PAUSEOPT is supported and set that bit + */ + zx_ctl3 |= ZX_TERTIARY_EXEC_GUEST_PAUSEOPT; + } + + /* Will be exetended in the future for more 3rd controls */ + vmcs_conf->zx_cpu_based_3rd_exec_ctrl = zx_ctl3; + + return 0; +} + static int setup_vmcs_config(struct vmcs_config *vmcs_conf, struct vmx_capability *vmx_cap) { @@ -2714,6 +2816,9 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf, _vmexit_control &= ~x_ctrl; } + if (setup_zhaoxin_vmcs_controls(vmcs_conf)) + return -EIO; + rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high); /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ @@ -4545,6 +4650,26 @@ static u64 vmx_tertiary_exec_control(struct vcpu_vmx *vmx) return exec_control; } +/* + * We might need to modify the way the third level control corrections + * are handled here in the future by introducing a check using the + * CTLS3 MSR. The current hardware does not include the design for CTLS3, + * but the designer is attempting to add this MSR implementation + * through ucode. + */ +static u32 zx_vmx_tertiary_exec_control(struct vcpu_vmx *vmx) +{ + struct kvm_vcpu *vcpu = &vmx->vcpu; + u32 exec_control = vmcs_config.zx_cpu_based_3rd_exec_ctrl; + + if (!guest_cpuid_has(vcpu, X86_FEATURE_PAUSEOPT)) + exec_control &= ~ZX_TERTIARY_EXEC_GUEST_PAUSEOPT; + + /* Adjust other features here */ + + return exec_control; +} + /* * Adjust a single secondary execution control bit to intercept/allow an * instruction in the guest. This is usually done based on whether or not a @@ -4731,6 +4856,25 @@ static int vmx_vcpu_precreate(struct kvm *kvm) #define VMX_XSS_EXIT_BITMAP 0 +static void zx_setup_3rd_ctrls(struct vcpu_vmx *vmx) +{ + if (cpu_has_zx_tertiary_exec_ctrls()) { + zx_tertiary_exec_controls_set(vmx, zx_vmx_tertiary_exec_control(vmx)); + /* + * Regardless of whether the guest has PAUSEOPT support or not, + * as long as there is a 3rd control, we need to initialize this + * field to 0 + */ + if (cpu_has_vmx_pauseopt()) + vmcs_write64(PAUSEOPT_TARGET_TSC, 0); + } +} + +static void zx_init_vmcs(struct vcpu_vmx *vmx) +{ + zx_setup_3rd_ctrls(vmx); +} + static void init_vmcs(struct vcpu_vmx *vmx) { struct kvm *kvm = vmx->vcpu.kvm; @@ -4847,6 +4991,7 @@ static void init_vmcs(struct vcpu_vmx *vmx) vmcs_write32(TPR_THRESHOLD, 0); } + zx_init_vmcs(vmx); vmx_setup_uret_msrs(vmx); } @@ -4888,6 +5033,9 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) vmx->rmode.vm86_active = 0; vmx->spec_ctrl = 0; + vmx->msr_pauseopt_control = 0; + vmx->pauseopt_in_progress = false; + vmx->pauseopt_rip = 0; vmx->msr_ia32_umwait_control = 0; @@ -6315,6 +6463,18 @@ void dump_vmcs(struct kvm_vcpu *vcpu) pr_err("VMCS %p, last attempted VM-entry on CPU %d\n", vmx->loaded_vmcs->vmcs, vcpu->arch.last_vmentry_cpu); + + if (cpu_has_zx_tertiary_exec_ctrls()) { + /* + * Now zhaoxin only support specific vmcs fields on 3rd exec control, + * may exetend in the future. + */ + pr_err("*** Zhaoxin Specific Fields ***\n"); + pr_err("Zhaoxin TertiaryExec Cntl = 0x%016x\n", + vmcs_read32(ZX_TERTIARY_VM_EXEC_CONTROL)); + pr_err("PAUSEOPT Saved TSC = 0x%016llx\n", vmcs_read64(PAUSEOPT_TARGET_TSC)); + } + pr_err("*** Guest State ***\n"); pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n", vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW), @@ -7362,6 +7522,37 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, guest_state_exit_irqoff(); } +static void zx_vmx_vcpu_run_pre(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + unsigned long new_rip; + + if (vmx->pauseopt_in_progress) { + new_rip = kvm_rip_read(vcpu); + if (new_rip != vmx->pauseopt_rip) { + /* + * When the execution of PAUSEOPT in the guest is interrupted by + * other events, causing a vmexit, the pauseopt target tsc should be + * cleared to zero before the next vmentry if guest rip changed, + * avoiding re-enter pauseopt optimized state after enter guest. + */ + vmcs_write64(PAUSEOPT_TARGET_TSC, 0); + vmx->pauseopt_in_progress = false; + vmx->pauseopt_rip = 0; + } + } +} + +static void zx_vmx_vcpu_run_post(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (cpu_has_vmx_pauseopt() && vmcs_read64(PAUSEOPT_TARGET_TSC)) { + vmx->pauseopt_in_progress = true; + vmx->pauseopt_rip = kvm_rip_read(vcpu); + } +} + static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags) { bool force_immediate_exit = run_flags & KVM_RUN_FORCE_IMMEDIATE_EXIT; @@ -7408,6 +7599,8 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags) if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP)) vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); vcpu->arch.regs_dirty = 0; + if (is_zhaoxin_cpu()) + zx_vmx_vcpu_run_pre(vcpu); if (run_flags & KVM_RUN_LOAD_GUEST_DR6) set_debugreg(vcpu->arch.dr6, 6); @@ -7513,6 +7706,8 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags) return EXIT_FASTPATH_NONE; vmx->loaded_vmcs->launched = 1; + if (is_zhaoxin_cpu()) + zx_vmx_vcpu_run_post(vcpu); vmx_recover_nmi_blocking(vmx); vmx_complete_interrupts(vmx); @@ -7868,6 +8063,8 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) vmcs_set_secondary_exec_control(vmx, vmx_secondary_exec_control(vmx)); + zx_setup_3rd_ctrls(vmx); + if (guest_can_use(vcpu, X86_FEATURE_VMX)) vmx->msr_ia32_feature_control_valid_bits |= FEAT_CTL_VMX_ENABLED_INSIDE_SMX | @@ -7978,6 +8175,12 @@ static __init u64 vmx_get_perf_capabilities(void) return perf_cap; } +static void zx_vmx_set_cpu_caps(void) +{ + if (cpu_has_vmx_pauseopt()) + kvm_cpu_cap_check_and_set(X86_FEATURE_PAUSEOPT); +} + static __init void vmx_set_cpu_caps(void) { kvm_set_cpu_caps(); @@ -8026,6 +8229,8 @@ static __init void vmx_set_cpu_caps(void) if (cpu_has_vmx_waitpkg()) kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG); + + zx_vmx_set_cpu_caps(); } static int vmx_check_intercept_io(struct kvm_vcpu *vcpu, diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 30c86e88eb84..2404c7957d7b 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -279,6 +279,9 @@ struct vcpu_vmx { u64 spec_ctrl; u32 msr_ia32_umwait_control; + u32 msr_pauseopt_control; + bool pauseopt_in_progress; + unsigned long pauseopt_rip; /* * loaded_vmcs points to the VMCS currently used in this vcpu. For a @@ -602,6 +605,9 @@ static inline u8 vmx_get_rvi(void) #define KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL \ (TERTIARY_EXEC_IPI_VIRT) +#define KVM_REQUIRED_VMX_ZX_TERTIARY_VM_EXEC_CONTROL 0 +#define KVM_OPTIONAL_VMX_ZX_TERTIARY_VM_EXEC_CONTROL (ZX_TERTIARY_EXEC_GUEST_PAUSEOPT) + #define BUILD_CONTROLS_SHADOW(lname, uname, bits) \ static inline void lname##_controls_set(struct vcpu_vmx *vmx, u##bits val) \ { \ @@ -634,6 +640,7 @@ BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL, 32) BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL, 32) BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL, 32) BUILD_CONTROLS_SHADOW(tertiary_exec, TERTIARY_VM_EXEC_CONTROL, 64) +BUILD_CONTROLS_SHADOW(zx_tertiary_exec, ZX_TERTIARY_VM_EXEC_CONTROL, 32) /* * VMX_REGS_LAZY_LOAD_SET - The set of registers that will be updated in the @@ -736,6 +743,12 @@ static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx) SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; } +static inline bool vmx_guest_pauseopt_enabled(struct vcpu_vmx *vmx) +{ + return zx_tertiary_exec_controls_get(vmx) & + ZX_TERTIARY_EXEC_GUEST_PAUSEOPT; +} + static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu) { if (!enable_ept) @@ -779,4 +792,12 @@ static inline bool guest_cpuid_has_evmcs(struct kvm_vcpu *vcpu) to_vmx(vcpu)->nested.enlightened_vmcs_enabled; } +static inline bool is_zhaoxin_cpu(void) +{ + /* Now zhaoxin owns 2 x86 vendor brands, Zhaoxin and Centaur */ + return (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR); +} + +#define KVM_MSR_RET_UNHANDLED 2 #endif /* __KVM_X86_VMX_H */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0e6d18764a4e..ef8a9684500d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -345,6 +345,7 @@ static const u32 msrs_to_save_base[] = { MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B, MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B, MSR_IA32_UMWAIT_CONTROL, + MSR_PAUSEOPT_CONTROL, MSR_IA32_XFD, MSR_IA32_XFD_ERR, }; @@ -7256,6 +7257,10 @@ static void kvm_probe_msr_to_save(u32 msr_index) if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG)) return; break; + case MSR_PAUSEOPT_CONTROL: + if (!kvm_cpu_cap_has(X86_FEATURE_PAUSEOPT)) + return; + break; case MSR_IA32_RTIT_CTL: case MSR_IA32_RTIT_STATUS: if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) diff --git a/tools/arch/x86/include/uapi/asm/vmx.h b/tools/arch/x86/include/uapi/asm/vmx.h index a5faf6d88f1b..afab76af3e43 100644 --- a/tools/arch/x86/include/uapi/asm/vmx.h +++ b/tools/arch/x86/include/uapi/asm/vmx.h @@ -90,6 +90,7 @@ #define EXIT_REASON_XRSTORS 64 #define EXIT_REASON_UMWAIT 67 #define EXIT_REASON_TPAUSE 68 +#define EXIT_REASON_PAUSEOPT 68 #define EXIT_REASON_BUS_LOCK 74 #define EXIT_REASON_NOTIFY 75 -- Gitee From d993793fd43e6af5a56789ef77a85c1c183a01ea Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 9 Apr 2024 09:21:57 +0800 Subject: [PATCH 072/231] KVM: arm64: vgic-v3: Upgrade AP1Rn to 64bit. commit d46e52452afc8f097ccc91555a429928ddfdc8a7 openEuler With the advent of FEAT_GIC_NMI, ICH_AP1R0_EL2 is now a 64bit register, as the NMI priority is encoded in bit 63. Upgrade the whole of the AP1Rn array to 64bit, leaving the Group0 equivalent to 32bit. Signed-off-by: Marc Zyngier Signed-off-by: Xiang Chen Signed-off-by: caijian Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/hyp/vgic-v3-sr.c | 11 ++++++++--- include/kvm/arm_vgic.h | 2 +- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index 6cb638b184b1..05955e369c51 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -130,7 +130,11 @@ static void __vgic_v3_write_ap0rn(u32 val, int n) } } -static void __vgic_v3_write_ap1rn(u32 val, int n) +/* + * Contrary to ICH_AP0Rn_EL2, ICH_AP1R0_EL2 is 64bit, thanks to the + * NMI bit stuck at [63]. Isn't that fun? + */ +static void __vgic_v3_write_ap1rn(u64 val, int n) { switch (n) { case 0: @@ -172,9 +176,10 @@ static u32 __vgic_v3_read_ap0rn(int n) return val; } -static u32 __vgic_v3_read_ap1rn(int n) +/* Same remark about the 64bit-ness of AP1R0 */ +static u64 __vgic_v3_read_ap1rn(int n) { - u32 val; + u64 val; switch (n) { case 0: diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index acb521b6bfca..757e403c80d1 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -329,7 +329,7 @@ struct vgic_v3_cpu_if { u32 vgic_vmcr; u32 vgic_sre; /* Restored only, change ignored */ u32 vgic_ap0r[4]; - u32 vgic_ap1r[4]; + u64 vgic_ap1r[4]; u64 vgic_lr[VGIC_V3_MAX_LRS]; /* -- Gitee From fa3d9c688e0fbaf8d45145c33175ab2e4fe1d77f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 9 Apr 2024 09:21:58 +0800 Subject: [PATCH 073/231] KVM: arm64: vgic-v3: Allow the NMI state to make it into the LRs commit 7b2984c427395f40cfc3154d7484f2f11052a9e2 openEuler Add the new NMI state to the vgic IRQ state, and allow it to make it into the LRs. Nothing can set it yet, so no impact is expected from this change. Signed-off-by: Marc Zyngier Signed-off-by: Xiang Chen Signed-off-by: caijian Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/include/asm/sysreg.h | 1 + arch/arm64/kvm/vgic/vgic-v3.c | 5 ++++- include/kvm/arm_vgic.h | 1 + 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 307b0dcefa43..e54ab2ed6d4e 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -956,6 +956,7 @@ #define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1) #define ICH_LR_EOI (1ULL << 41) +#define ICH_LR_NMI (1ULL << 59) #define ICH_LR_GROUP (1ULL << 60) #define ICH_LR_HW (1ULL << 61) #define ICH_LR_STATE (3ULL << 62) diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index 2a03c16b3bb3..91dc67e87057 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -183,7 +183,10 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) if (irq->group) val |= ICH_LR_GROUP; - val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT; + if (irq->nmi) + val |= ICH_LR_NMI; + else + val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT; vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val; } diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 757e403c80d1..2042495f86b6 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -155,6 +155,7 @@ struct vgic_irq { bool active; /* not used for LPIs */ bool enabled; bool hw; /* Tied to HW IRQ */ + bool nmi; /* Configured as NMI */ struct kref refcount; /* Used for LPIs */ u32 hwintid; /* HW INTID number */ unsigned int host_irq; /* linux irq corresponding to hwintid */ -- Gitee From 35e6fe5864c4e51946fd04e266c16f05c77fdffb Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 9 Apr 2024 09:21:59 +0800 Subject: [PATCH 074/231] KVM: arm64: vgic-v3: Make NMI priority RES0 commit 49c1f33275e1e0afd37d2b2a1c3faae1baa2202f openEuler The priority of an NMI is always RES0. Let's enforce it when the guest accesses the priority MMIO range. Signed-off-by: Marc Zyngier Signed-off-by: Xiang Chen Signed-off-by: caijian Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/vgic/vgic-mmio.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kvm/vgic/vgic-mmio.c b/arch/arm64/kvm/vgic/vgic-mmio.c index ff558c05e990..77709dae0e47 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio.c +++ b/arch/arm64/kvm/vgic/vgic-mmio.c @@ -683,13 +683,17 @@ unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len) { u32 intid = VGIC_ADDR_TO_INTID(addr, 8); + unsigned long flags; int i; u64 val = 0; for (i = 0; i < len; i++) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); - val |= (u64)irq->priority << (i * 8); + raw_spin_lock_irqsave(&irq->irq_lock, flags); + if (!irq->nmi) + val |= (u64)irq->priority << (i * 8); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); } @@ -716,10 +720,15 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu, struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); raw_spin_lock_irqsave(&irq->irq_lock, flags); - /* Narrow the priority range to what we actually support */ - irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); - if (irq->hw && vgic_irq_is_sgi(irq->intid)) - vgic_update_vsgi(irq); + if (!irq->nmi) { + /* + * Narrow the priority range to what we + * actually support + */ + irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); + if (irq->hw && vgic_irq_is_sgi(irq->intid)) + vgic_update_vsgi(irq); + } raw_spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); -- Gitee From 9600d10acca13075d1138270ef776a758fc223fa Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 9 Apr 2024 09:22:00 +0800 Subject: [PATCH 075/231] KVM: arm64: vgic-v4: Propagate the NMI state into the GICv4.1 VSGI configuration commit c35e09506c28e7cf17b0a699f72c490d377343b6 openEuler Just as we now allow the NMI state to make it into the LRs, allow the same state to be propagated into the VSGI configuration. Signed-off-by: Marc Zyngier Signed-off-by: Xiang Chen Signed-off-by: caijian Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/vgic/vgic-mmio.c | 6 ++++-- arch/arm64/kvm/vgic/vgic-v4.c | 1 + arch/arm64/kvm/vgic/vgic.h | 1 + drivers/irqchip/irq-gic-v3-its.c | 9 +++++++++ drivers/irqchip/irq-gic-v4.c | 3 ++- include/linux/irqchip/arm-gic-v4.h | 4 +++- 6 files changed, 20 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/vgic/vgic-mmio.c b/arch/arm64/kvm/vgic/vgic-mmio.c index 77709dae0e47..559a1f0effe5 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio.c +++ b/arch/arm64/kvm/vgic/vgic-mmio.c @@ -61,9 +61,11 @@ unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu, return value; } -static void vgic_update_vsgi(struct vgic_irq *irq) +void vgic_update_vsgi(struct vgic_irq *irq) { - WARN_ON(its_prop_update_vsgi(irq->host_irq, irq->priority, irq->group)); + WARN_ON(its_prop_update_vsgi(irq->host_irq, + irq->nmi ? 0 : irq->priority, + irq->group, irq->nmi)); } void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr, diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c index 339a55194b2c..0bcdf68e14be 100644 --- a/arch/arm64/kvm/vgic/vgic-v4.c +++ b/arch/arm64/kvm/vgic/vgic-v4.c @@ -110,6 +110,7 @@ static void vgic_v4_sync_sgi_config(struct its_vpe *vpe, struct vgic_irq *irq) vpe->sgi_config[irq->intid].enabled = irq->enabled; vpe->sgi_config[irq->intid].group = irq->group; vpe->sgi_config[irq->intid].priority = irq->priority; + vpe->sgi_config[irq->intid].nmi = irq->nmi; } static void vgic_v4_enable_vsgis(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h index 3fa68827dc89..ce72e26cba34 100644 --- a/arch/arm64/kvm/vgic/vgic.h +++ b/arch/arm64/kvm/vgic/vgic.h @@ -363,6 +363,7 @@ bool vgic_supports_direct_msis(struct kvm *kvm); int vgic_v4_init(struct kvm *kvm); void vgic_v4_teardown(struct kvm *kvm); void vgic_v4_configure_vsgis(struct kvm *kvm); +void vgic_update_vsgi(struct vgic_irq *irq); void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val); int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq); diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 294c853beb88..bf050d3bdba5 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -740,6 +740,7 @@ struct its_cmd_desc { u8 sgi; u8 priority; bool enable; + bool nmi; bool group; bool clear; } its_vsgi_cmd; @@ -901,6 +902,11 @@ static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio) its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20); } +static void its_encode_sgi_nmi(struct its_cmd_block *cmd, bool nmi) +{ + its_mask_encode(&cmd->raw_cmd[0], nmi, 11, 11); +} + static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp) { its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10); @@ -1362,6 +1368,7 @@ static struct its_vpe *its_build_vsgi_cmd(struct its_node *its, its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id); its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi); its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority); + its_encode_sgi_nmi(cmd, desc->its_vsgi_cmd.nmi); its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group); its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear); its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable); @@ -4639,6 +4646,7 @@ static void its_configure_sgi(struct irq_data *d, bool clear) desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority; desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled; desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group; + desc.its_vsgi_cmd.nmi = vpe->sgi_config[d->hwirq].nmi; desc.its_vsgi_cmd.clear = clear; /* @@ -4762,6 +4770,7 @@ static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) case PROP_UPDATE_VSGI: vpe->sgi_config[d->hwirq].priority = info->priority; vpe->sgi_config[d->hwirq].group = info->group; + vpe->sgi_config[d->hwirq].nmi = info->nmi; its_configure_sgi(d, false); return 0; diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c index 94d56a03b175..963c57bac4cd 100644 --- a/drivers/irqchip/irq-gic-v4.c +++ b/drivers/irqchip/irq-gic-v4.c @@ -359,13 +359,14 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv) return irq_set_vcpu_affinity(irq, &info); } -int its_prop_update_vsgi(int irq, u8 priority, bool group) +int its_prop_update_vsgi(int irq, u8 priority, bool group, bool nmi) { struct its_cmd_info info = { .cmd_type = PROP_UPDATE_VSGI, { .priority = priority, .group = group, + .nmi = nmi, }, }; diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index bf9e0640288d..5e8cc8a31329 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -57,6 +57,7 @@ struct its_vpe { u8 priority; bool enabled; bool group; + bool nmi; } sgi_config[16]; }; }; @@ -126,6 +127,7 @@ struct its_cmd_info { struct { u8 priority; bool group; + bool nmi; }; }; }; @@ -140,7 +142,7 @@ int its_map_vlpi(int irq, struct its_vlpi_map *map); int its_get_vlpi(int irq, struct its_vlpi_map *map); int its_unmap_vlpi(int irq); int its_prop_update_vlpi(int irq, u8 config, bool inv); -int its_prop_update_vsgi(int irq, u8 priority, bool group); +int its_prop_update_vsgi(int irq, u8 priority, bool group, bool nmi); struct irq_domain_ops; int its_init_v4(struct irq_domain *domain, -- Gitee From d162f467fed497cbe6c53470c2784bd82cb74e5a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 9 Apr 2024 09:22:01 +0800 Subject: [PATCH 076/231] KVM: arm64: vgic-v3: Use the NMI attribute as part of the AP-list sorting commit efd9735b630ca7d7708294de4ff93bbe00106e7d openEuler Since we want NMIs to make it quicker into the LRs, add them to the priority sorting. Signed-off-by: Marc Zyngier Signed-off-by: Xiang Chen Signed-off-by: caijian Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/vgic/vgic.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c index 0650a5b57ed9..36a815c31751 100644 --- a/arch/arm64/kvm/vgic/vgic.c +++ b/arch/arm64/kvm/vgic/vgic.c @@ -261,6 +261,7 @@ static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq) * * Otherwise things should be sorted by the priority field and the GIC * hardware support will take care of preemption of priority groups etc. + * NMI acts as a super-priority. * * Return negative if "a" sorts before "b", 0 to preserve order, and positive * to sort "b" before "a". @@ -296,7 +297,12 @@ static int vgic_irq_cmp(void *priv, const struct list_head *a, goto out; } - /* Both pending and enabled, sort by priority */ + /* Both pending and enabled, sort by NMI and then priority */ + if (irqa->nmi != irqb->nmi) { + ret = (int)irqb->nmi - (int)irqa->nmi; + goto out; + } + ret = irqa->priority - irqb->priority; out: raw_spin_unlock(&irqb->irq_lock); -- Gitee From ef80f819006513b3a6902c955eee00fa1be50aa8 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 9 Apr 2024 09:22:02 +0800 Subject: [PATCH 077/231] KVM: arm64: vgic-v3: Add support for GIC{D,R}_INMIR registers commit 620365f70542a72df1a146b75b6447fa6bb79a7a openEuler Plumb the distributor and redistributor NMI configuration registerse into the MMIO spaces. The update to the registers are gated by a distributor flag which is never set, so there is still no observable behaviour change... Signed-off-by: Marc Zyngier Signed-off-by: Xiang Chen Signed-off-by: caijian Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/vgic/vgic-mmio-v3.c | 55 ++++++++++++++++++++++++++++++ include/kvm/arm_vgic.h | 1 + 2 files changed, 56 insertions(+) diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index cea9745fa2c5..0d78577b4080 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -595,6 +595,55 @@ static void vgic_mmio_write_invall(struct kvm_vcpu *vcpu, vgic_set_rdist_busy(vcpu, false); } +static unsigned long vgic_mmio_read_nmi(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + u32 intid = VGIC_ADDR_TO_INTID(addr, 1); + u32 value = 0; + int i; + + /* Loop over all IRQs affected by this read */ + for (i = 0; i < len * 8; i++) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + + if (irq->nmi) + value |= (1U << i); + + vgic_put_irq(vcpu->kvm, irq); + } + + return value; +} + +static void vgic_mmio_write_nmi(struct kvm_vcpu *vcpu, gpa_t addr, + unsigned int len, unsigned long val) +{ + u32 intid = VGIC_ADDR_TO_INTID(addr, 1); + unsigned long flags; + int i; + + if (!vcpu->kvm->arch.vgic.has_nmi) + return; + + for (i = 0; i < len * 8; i++) { + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + bool was_nmi; + + raw_spin_lock_irqsave(&irq->irq_lock, flags); + + was_nmi = irq->nmi; + irq->nmi = (val & BIT(i)); + + if (irq->hw && vgic_irq_is_sgi(irq->intid) && + was_nmi != irq->nmi) + vgic_update_vsgi(irq); + + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); + + vgic_put_irq(vcpu->kvm, irq); + } +} + /* * The GICv3 per-IRQ registers are split to control PPIs and SGIs in the * redistributors, while SPIs are covered by registers in the distributor @@ -668,6 +717,9 @@ static const struct vgic_register_region vgic_v3_dist_registers[] = { REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGRPMODR, vgic_mmio_read_raz, vgic_mmio_write_wi, NULL, NULL, 1, VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_INMIR, + vgic_mmio_read_nmi, vgic_mmio_write_nmi, NULL, NULL, 1, + VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IROUTER, vgic_mmio_read_irouter, vgic_mmio_write_irouter, NULL, NULL, 64, VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), @@ -752,6 +804,9 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = { REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_NSACR, vgic_mmio_read_raz, vgic_mmio_write_wi, 4, VGIC_ACCESS_32bit), + REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_INMIR0, + vgic_mmio_read_nmi, vgic_mmio_write_nmi, 4, + VGIC_ACCESS_32bit), }; unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev) diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 2042495f86b6..1b6a4bbc4a19 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -280,6 +280,7 @@ struct vgic_dist { struct vgic_io_device dist_iodev; + bool has_nmi; bool has_its; bool table_write_in_progress; -- Gitee From 7c448d992040f9823204264d128528ee6b790f24 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 9 Apr 2024 09:22:03 +0800 Subject: [PATCH 078/231] KVM: arm64: vgic-v3: Add userspace selection for GICv3.3 NMI commit ba88649d31ff542c4b05616accc8d431338bf266 openEuler In order to allow the GIC NMI support to be selected from userspace, add userspace access to GICD_TYPER, which allows the NMI support bit to be set. This is gated by a global capability that nobody can set yet, so no observable change is expected. Again. Signed-off-by: Marc Zyngier Signed-off-by: Xiang Chen Signed-off-by: caijian Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/vgic/vgic-init.c | 10 +++++++++- arch/arm64/kvm/vgic/vgic-mmio-v2.c | 1 + arch/arm64/kvm/vgic/vgic-mmio-v3.c | 15 ++++++++++++++- include/kvm/arm_vgic.h | 6 +++++- 4 files changed, 29 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index 35699f10790a..0f299f87cae2 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -336,8 +336,16 @@ int vgic_init(struct kvm *kvm) * If userspace didn't set the GIC implementation revision, * default to the latest and greatest. You know want it. */ - if (!dist->implementation_rev) + if (!dist->implementation_rev) { dist->implementation_rev = KVM_VGIC_IMP_REV_LATEST; + /* + * Advertise NMI if available. Userspace that explicitly + * doesn't want NMI will have written to GICD_{IIDR,TYPER} + * to set the implementation and the NMI support status. + */ + dist->has_nmi = kvm_vgic_global_state.has_nmi; + } + dist->initialized = true; out: diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v2.c b/arch/arm64/kvm/vgic/vgic-mmio-v2.c index e070cda86e12..365d83b0f263 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v2.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v2.c @@ -95,6 +95,7 @@ static int vgic_mmio_uaccess_write_v2_misc(struct kvm_vcpu *vcpu, switch (reg) { case KVM_VGIC_IMP_REV_2: case KVM_VGIC_IMP_REV_3: + case KVM_VGIC_IMP_REV_4: vcpu->kvm->arch.vgic.v2_groups_user_writable = true; dist->implementation_rev = reg; return 0; diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index 0d78577b4080..9f2ab25d4972 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -78,6 +78,8 @@ static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu, case GICD_TYPER: value = vgic->nr_spis + VGIC_NR_PRIVATE_IRQS; value = (value >> 5) - 1; + if (vgic->has_nmi) + value |= GICD_TYPER_NMI; if (vgic_has_its(vcpu->kvm)) { value |= (INTERRUPT_ID_BITS_ITS - 1) << 19; value |= GICD_TYPER_LPIS; @@ -158,6 +160,13 @@ static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu, u32 reg; switch (addr & 0x0c) { + case GICD_TYPER: + if (dist->implementation_rev >= KVM_VGIC_IMP_REV_4 && + kvm_vgic_global_state.has_nmi) + dist->has_nmi = val & GICD_TYPER_NMI; + else if (val & GICD_TYPER_NMI) + return -EINVAL; + return 0; case GICD_TYPER2: if (val != vgic_mmio_read_v3_misc(vcpu, addr, len)) return -EINVAL; @@ -171,6 +180,10 @@ static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu, switch (reg) { case KVM_VGIC_IMP_REV_2: case KVM_VGIC_IMP_REV_3: + /* Disable NMI on selecting an older revision */ + dist->has_nmi = false; + fallthrough; + case KVM_VGIC_IMP_REV_4: dist->implementation_rev = reg; return 0; default: @@ -186,7 +199,7 @@ static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu, return 0; } - vgic_mmio_write_v3_misc(vcpu, addr, len, val); + /* Not reachable... */ return 0; } diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 1b6a4bbc4a19..3ec28b94e6d1 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -91,6 +91,9 @@ struct vgic_global { bool has_gicv4; bool has_gicv4_1; + /* NMI */ + bool has_nmi; + /* Pseudo GICv3 from outer space */ bool no_hw_deactivation; @@ -251,7 +254,8 @@ struct vgic_dist { u32 implementation_rev; #define KVM_VGIC_IMP_REV_2 2 /* GICv2 restorable groups */ #define KVM_VGIC_IMP_REV_3 3 /* GICv3 GICR_CTLR.{IW,CES,RWP} */ -#define KVM_VGIC_IMP_REV_LATEST KVM_VGIC_IMP_REV_3 +#define KVM_VGIC_IMP_REV_4 4 /* GICv3 NMI */ +#define KVM_VGIC_IMP_REV_LATEST KVM_VGIC_IMP_REV_4 /* Userspace can write to GICv2 IGROUPR */ bool v2_groups_user_writable; -- Gitee From ebaa9af7491defb0125b25b8344564a7fba43cf5 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 9 Apr 2024 09:22:04 +0800 Subject: [PATCH 079/231] KVM: arm64: vgic-debug: Add the NMI field to the debug output commit 059de40b831b8bffd7f017d81223aaedfd4d1455 openEuler Add the per-INTID NMI state to the vgic-state file. Signed-off-by: Marc Zyngier Signed-off-by: Xiang Chen Signed-off-by: caijian Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/vgic/vgic-debug.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/vgic/vgic-debug.c b/arch/arm64/kvm/vgic/vgic-debug.c index 85606a531dc3..a51f6ce7c637 100644 --- a/arch/arm64/kvm/vgic/vgic-debug.c +++ b/arch/arm64/kvm/vgic/vgic-debug.c @@ -155,7 +155,7 @@ static void print_dist_state(struct seq_file *s, struct vgic_dist *dist) seq_printf(s, "P=pending_latch, L=line_level, A=active\n"); seq_printf(s, "E=enabled, H=hw, C=config (level=1, edge=0)\n"); - seq_printf(s, "G=group\n"); + seq_printf(s, "G=group, N=NMI\n"); } static void print_header(struct seq_file *s, struct vgic_irq *irq, @@ -170,8 +170,8 @@ static void print_header(struct seq_file *s, struct vgic_irq *irq, } seq_printf(s, "\n"); - seq_printf(s, "%s%2d TYP ID TGT_ID PLAEHCG HWID TARGET SRC PRI VCPU_ID\n", hdr, id); - seq_printf(s, "----------------------------------------------------------------\n"); + seq_printf(s, "%s%2d TYP ID TGT_ID PLAEHCGN HWID TARGET SRC PRI VCPU_ID\n", hdr, id); + seq_printf(s, "-----------------------------------------------------------------\n"); } static void print_irq_state(struct seq_file *s, struct vgic_irq *irq, @@ -204,7 +204,7 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq, seq_printf(s, " %s %4d " " %2d " - "%d%d%d%d%d%d%d " + "%d%d%d%d%d%d%d%d " "%8d " "%8x " " %2x " @@ -220,6 +220,7 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq, irq->hw, irq->config == VGIC_CONFIG_LEVEL, irq->group, + irq->nmi, irq->hwintid, irq->mpidr, irq->source, -- Gitee From 07d8a0d142b11c48f677139bf8ffb0299073e58f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 9 Apr 2024 09:22:05 +0800 Subject: [PATCH 080/231] KVM: arm64: Allow userspace to control ID_AA64PFR1_EL1.NMI commit 395d99d8b0448f995cf3adfd81330525ede3929a openEuler In order for userspace to be able to control the CPU side of the NMI distribution (just like we have it on the GIC side), allow it to set/clear ID_AA64PFR1_EL1.NMI. This relies on a per-VM property that defaults to the host value. Signed-off-by: Marc Zyngier Signed-off-by: Xiang Chen Signed-off-by: caijian Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/include/asm/kvm_host.h | 2 ++ arch/arm64/kvm/arm.c | 3 +++ arch/arm64/kvm/sys_regs.c | 9 +++++++++ 3 files changed, 14 insertions(+) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 5d43bd341450..739a633710f3 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -234,6 +234,8 @@ struct kvm_arch { /* VTCR_EL2 value for this VM */ u64 vtcr; + u8 pfr1_nmi; + /* Interrupt controller */ struct vgic_dist vgic; diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 5631f03be55c..921b85ac0ca7 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -302,6 +302,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) /* The maximum number of VCPUs is limited by the host's GIC model */ kvm->max_vcpus = kvm_arm_default_max_vcpus(); + if (system_uses_nmi()) + kvm->arch.pfr1_nmi = ID_AA64PFR1_EL1_NMI_IMP; + kvm_arm_init_hypercalls(kvm); bitmap_zero(kvm->arch.vcpu_features, KVM_VCPU_MAX_FEATURES); diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 6238698e095e..85a1c6264031 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1399,6 +1399,7 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu, val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_NMI); + val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_NMI), vcpu->kvm->arch.pfr1_nmi); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MPAM_frac); break; case SYS_ID_AA64ISAR1_EL1: @@ -1680,6 +1681,13 @@ static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu, { u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1); u64 mpam_mask = ID_AA64PFR1_EL1_MPAM_frac_MASK; + u8 nmi; + + nmi = cpuid_feature_extract_unsigned_field(user_val, ID_AA64PFR1_EL1_NMI_SHIFT); + if (nmi > ID_AA64PFR1_EL1_NMI_IMP || (nmi && !system_uses_nmi())) + return -EINVAL; + + vcpu->kvm->arch.pfr1_nmi = nmi; /* See set_id_aa64pfr0_el1 for comment about MPAM */ if ((hw_val & mpam_mask) == (user_val & mpam_mask)) @@ -1688,6 +1696,7 @@ static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu, return set_id_reg(vcpu, rd, user_val); } + /* * cpufeature ID register user accessors * -- Gitee From 245be6825c1c3329288f96486d8a690d2ba016de Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 9 Apr 2024 09:22:06 +0800 Subject: [PATCH 081/231] KVM: arm64: Don't trap ALLINT accesses if the vcpu has FEAT_NMI commit e2895896e01f2afc3da67eab497f875a127cf4d1 openEuler This is counter-productive, so let it rip. Signed-off-by: Marc Zyngier Signed-off-by: Xiang Chen Signed-off-by: caijian Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/hyp/include/hyp/switch.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index b9a26ff76211..8d0c2bb09524 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -224,7 +224,8 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu) vcpu_set_flag(vcpu, PMUSERENR_ON_CPU); } - if (cpus_have_final_cap(ARM64_HAS_NMI)) + if (cpus_have_final_cap(ARM64_HAS_NMI) && + !kern_hyp_va(vcpu->kvm)->arch.pfr1_nmi) sysreg_clear_set_s(SYS_HCRX_EL2, 0, HCRX_EL2_TALLINT); *host_data_ptr(host_debug_state.mdcr_el2) = read_sysreg(mdcr_el2); @@ -258,7 +259,8 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) { write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2); - if (cpus_have_final_cap(ARM64_HAS_NMI)) + if (cpus_have_final_cap(ARM64_HAS_NMI) && + !kern_hyp_va(vcpu->kvm)->arch.pfr1_nmi) sysreg_clear_set_s(SYS_HCRX_EL2, HCRX_EL2_TALLINT, 0); write_sysreg(0, hstr_el2); -- Gitee From c16f5f710945dd5284d3b37a965c4c042ccd76ff Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 9 Apr 2024 09:22:07 +0800 Subject: [PATCH 082/231] KVM: arm64: vgic-v3: Don't inject an NMI if the vcpu doesn't have FEAT_NMI commit 8b487c6098bb58981d0c05aea3661e5bde5fc7b8 openEuler Since it is allowed to have any combination of CPU and GIC supporting NMIs or not, let's drop the NMI feature at the point where it is injected in the LR if the vcpu doesn't have FEAT_NMI. Signed-off-by: Marc Zyngier Signed-off-by: Xiang Chen Signed-off-by: caijian Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/vgic/vgic-v3.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index 91dc67e87057..654a37e0311e 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -183,7 +183,8 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) if (irq->group) val |= ICH_LR_GROUP; - if (irq->nmi) + if (vcpu->kvm->arch.pfr1_nmi == ID_AA64PFR1_EL1_NMI_IMP && + irq->nmi) val |= ICH_LR_NMI; else val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT; -- Gitee From d5a8648fe68f31943445289e900ca34453eec884 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 9 Apr 2024 09:22:08 +0800 Subject: [PATCH 083/231] KVM: arm64: Allow GICv3.3 NMI if the host supports it commit e5c5bbf9871230a3497e6ad44a49877eef6a1444 openEuler Let the GICv3 driver populate a has_nmi flag in the kvm_gic_info structure, and communicate it to the rest of KVM. We disallow the use of NMI if trapping of the CPU interface is enabled, because life is definitely too short to write more emulation. Signed-off-by: Marc Zyngier Signed-off-by: Xiang Chen Signed-off-by: caijian Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/arm.c | 2 +- arch/arm64/kvm/sys_regs.c | 4 ++-- arch/arm64/kvm/vgic/vgic-v3.c | 6 ++++++ drivers/irqchip/irq-gic-v3.c | 1 + include/linux/irqchip/arm-vgic-info.h | 2 ++ 5 files changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 921b85ac0ca7..0653cf47f6c6 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -302,7 +302,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) /* The maximum number of VCPUs is limited by the host's GIC model */ kvm->max_vcpus = kvm_arm_default_max_vcpus(); - if (system_uses_nmi()) + if (system_uses_nmi() && !static_branch_unlikely(&vgic_v3_cpuif_trap)) kvm->arch.pfr1_nmi = ID_AA64PFR1_EL1_NMI_IMP; kvm_arm_init_hypercalls(kvm); diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 85a1c6264031..9802fac7222c 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1684,7 +1684,8 @@ static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu, u8 nmi; nmi = cpuid_feature_extract_unsigned_field(user_val, ID_AA64PFR1_EL1_NMI_SHIFT); - if (nmi > ID_AA64PFR1_EL1_NMI_IMP || (nmi && !system_uses_nmi())) + if (nmi > ID_AA64PFR1_EL1_NMI_IMP || + (nmi && (!system_uses_nmi() || static_branch_unlikely(&vgic_v3_cpuif_trap)))) return -EINVAL; vcpu->kvm->arch.pfr1_nmi = nmi; @@ -1696,7 +1697,6 @@ static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu, return set_id_reg(vcpu, rd, user_val); } - /* * cpufeature ID register user accessors * diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index 654a37e0311e..b5fbda382f3e 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -716,6 +716,12 @@ int vgic_v3_probe(const struct gic_kvm_info *info) static_branch_enable(&vgic_v3_cpuif_trap); } + if (info->has_nmi) { + kvm_vgic_global_state.has_nmi = !static_branch_unlikely(&vgic_v3_cpuif_trap); + kvm_info("GICv3 NMI support %s\n", + kvm_vgic_global_state.has_nmi ? "enabled" : "disabled due to trapping"); + } + kvm_vgic_global_state.vctrl_base = NULL; kvm_vgic_global_state.type = VGIC_V3; kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS; diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 2eb6ae23f3fb..c2e45fb4a74d 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -2726,6 +2726,7 @@ static void __init gic_acpi_setup_kvm_info(void) gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; + gic_v3_kvm_info.has_nmi = has_v3_3_nmi(); vgic_set_kvm_info(&gic_v3_kvm_info); } diff --git a/include/linux/irqchip/arm-vgic-info.h b/include/linux/irqchip/arm-vgic-info.h index a75b2c7de69d..919f71c76477 100644 --- a/include/linux/irqchip/arm-vgic-info.h +++ b/include/linux/irqchip/arm-vgic-info.h @@ -32,6 +32,8 @@ struct gic_kvm_info { bool has_v4; /* rvpeid support */ bool has_v4_1; + /* NMI support */ + bool has_nmi; /* Deactivation impared, subpar stuff */ bool no_hw_deactivation; }; -- Gitee From 71da5448d87edc81381bafd99f21b398fa840151 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 9 Apr 2024 09:22:09 +0800 Subject: [PATCH 084/231] KVM: arm64: Handle traps of ALLINT commit dfe028fe605ad0dd165d7283767cd42a8e290a8f openEuler Although we do force a trap of the ALLINT system register when FEAT_NMI isn't exposed to the guest, we don't provide any handler. That's bad. Let's fix that. Signed-off-by: Marc Zyngier Signed-off-by: Xiang Chen Signed-off-by: caijian Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/include/asm/sysreg.h | 2 ++ arch/arm64/kvm/sys_regs.c | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index e54ab2ed6d4e..280f47bb483e 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -292,6 +292,8 @@ #define SYS_SPSR_EL1 sys_reg(3, 0, 4, 0, 0) #define SYS_ELR_EL1 sys_reg(3, 0, 4, 0, 1) +#define SYS_ALLINT sys_reg(3, 0, 4, 3, 0) + #define SYS_ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0) #define SYS_AFSR0_EL1 sys_reg(3, 0, 5, 1, 0) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 9802fac7222c..ddd9195ddcb8 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -2061,6 +2061,9 @@ static bool access_spsr(struct kvm_vcpu *vcpu, * guest... */ static const struct sys_reg_desc sys_reg_descs[] = { + { SYS_DESC(SYS_ALLINT_CLR), undef_access }, + { SYS_DESC(SYS_ALLINT_SET), undef_access }, + { SYS_DESC(SYS_DC_ISW), access_dcsw }, { SYS_DESC(SYS_DC_IGSW), access_dcgsw }, { SYS_DESC(SYS_DC_IGDSW), access_dcgsw }, @@ -2279,6 +2282,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_SPSR_EL1), access_spsr}, { SYS_DESC(SYS_ELR_EL1), access_elr}, + { SYS_DESC(SYS_ALLINT), undef_access }, + { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 }, { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 }, { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 }, -- Gitee From 7d19a5099ac25044d4ac51dbe27966ca1086771a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 9 Apr 2024 09:22:10 +0800 Subject: [PATCH 085/231] arm64: Decouple KVM from CONFIG_ARM64_NMI commit b8c8255e1d74937638bca666264404952c60395a openEuler Even if the host isn't using NMIs, that isn't a reason for preventing the feature being available to guests, Decouple the two and let the capability be available irrespective of the host NMI level of support. Signed-off-by: Marc Zyngier Signed-off-by: Xiang Chen Signed-off-by: caijian Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/include/asm/cpufeature.h | 3 ++- arch/arm64/kernel/cpufeature.c | 13 +++++++++---- arch/arm64/kvm/arm.c | 2 +- arch/arm64/kvm/sys_regs.c | 2 +- drivers/irqchip/irq-gic-v3.c | 11 ++++++++++- 5 files changed, 23 insertions(+), 8 deletions(-) diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 4163c2728bef..f60261834a2e 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -845,7 +845,8 @@ static __always_inline bool system_uses_irq_prio_masking(void) static __always_inline bool system_uses_nmi(void) { return IS_ENABLED(CONFIG_ARM64_NMI) && - cpus_have_const_cap(ARM64_USES_NMI); + cpus_have_const_cap(ARM64_USES_NMI) && + !system_uses_irq_prio_masking(); } static inline bool system_supports_mte(void) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index edcbe54044d1..10717ed14159 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2314,20 +2314,24 @@ static bool has_gic_prio_relaxed_sync(const struct arm64_cpu_capabilities *entry } #endif -#ifdef CONFIG_ARM64_NMI static bool use_nmi(const struct arm64_cpu_capabilities *entry, int scope) { if (!has_cpuid_feature(entry, scope)) return false; /* + * NMI support was not enabled in the kernel, but can still be + * used by guests. Let the world know. + * * Having both real and pseudo NMIs enabled simultaneously is * likely to cause confusion. Since pseudo NMIs must be * enabled with an explicit command line option, if the user * has set that option on a system with real NMIs for some * reason assume they know what they're doing. */ - if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && enable_pseudo_nmi) { + if (!IS_ENABLED(CONFIG_ARM64_NMI)) + pr_info("CONFIG_ARM64_NMI disabled, using NMIs for guests only\n"); + else if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && enable_pseudo_nmi) { pr_info("Pseudo NMI enabled, not using architected NMI\n"); return false; } @@ -2335,6 +2339,7 @@ static bool use_nmi(const struct arm64_cpu_capabilities *entry, int scope) return true; } +#ifdef CONFIG_ARM64_NMI static void nmi_enable(const struct arm64_cpu_capabilities *__unused) { /* @@ -3033,7 +3038,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .cpu_enable = cpu_enable_fpmr, ARM64_CPUID_FIELDS(ID_AA64PFR2_EL1, FPMR, IMP) }, -#ifdef CONFIG_ARM64_NMI { .desc = "Non-maskable Interrupts present", .capability = ARM64_HAS_NMI, @@ -3055,9 +3059,10 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .field_width = 4, .min_field_value = ID_AA64PFR1_EL1_NMI_IMP, .matches = use_nmi, +#ifdef CONFIG_ARM64_NMI .cpu_enable = nmi_enable, - }, #endif + }, #ifdef CONFIG_ARM64_LS64 { .desc = "LS64", diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 0653cf47f6c6..e901dc61ee28 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -302,7 +302,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) /* The maximum number of VCPUs is limited by the host's GIC model */ kvm->max_vcpus = kvm_arm_default_max_vcpus(); - if (system_uses_nmi() && !static_branch_unlikely(&vgic_v3_cpuif_trap)) + if (cpus_have_const_cap(ARM64_HAS_NMI) && !static_branch_unlikely(&vgic_v3_cpuif_trap)) kvm->arch.pfr1_nmi = ID_AA64PFR1_EL1_NMI_IMP; kvm_arm_init_hypercalls(kvm); diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index ddd9195ddcb8..103744878c60 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1685,7 +1685,7 @@ static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu, nmi = cpuid_feature_extract_unsigned_field(user_val, ID_AA64PFR1_EL1_NMI_SHIFT); if (nmi > ID_AA64PFR1_EL1_NMI_IMP || - (nmi && (!system_uses_nmi() || static_branch_unlikely(&vgic_v3_cpuif_trap)))) + (nmi && (!cpus_have_const_cap(ARM64_HAS_NMI) || static_branch_unlikely(&vgic_v3_cpuif_trap)))) return -EINVAL; vcpu->kvm->arch.pfr1_nmi = nmi; diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index c2e45fb4a74d..9e3932e232c3 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -165,11 +165,19 @@ static inline bool has_v3_3_nmi(void) { return gic_data.has_nmi && system_uses_nmi(); } +static bool system_is_nmi_capable(void) +{ + return gic_data.has_nmi && cpus_have_const_cap(ARM64_HAS_NMI); +} #else static inline bool has_v3_3_nmi(void) { return false; } +static bool system_is_nmi_capable(void) +{ + return false; +} #endif static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq) @@ -2384,6 +2392,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node) gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; + gic_v3_kvm_info.has_nmi = system_is_nmi_capable(); vgic_set_kvm_info(&gic_v3_kvm_info); } @@ -2726,7 +2735,7 @@ static void __init gic_acpi_setup_kvm_info(void) gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; - gic_v3_kvm_info.has_nmi = has_v3_3_nmi(); + gic_v3_kvm_info.has_nmi = system_is_nmi_capable(); vgic_set_kvm_info(&gic_v3_kvm_info); } -- Gitee From d3e4a45c7a2878168bf798b328e958a74529d768 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 9 Apr 2024 09:22:11 +0800 Subject: [PATCH 086/231] KVM: arm64: vgic-v3: Handle traps of ICV_NMIAR1_EL1 commit 3adab6c497a4153129fecf181f539a66df1956ee openEuler Even if we do not plan to deal with the GICv3.3 NMI feature in our in-kernel emulation of the CPU interface for terminally broken systems, the actual system register may still exist and raise its ugly head. Hit it with an UNDEF-sized hammer. Signed-off-by: Marc Zyngier Signed-off-by: Xiang Chen Signed-off-by: caijian Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/hyp/vgic-v3-sr.c | 3 +++ arch/arm64/kvm/sys_regs.c | 1 + 2 files changed, 4 insertions(+) diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index 05955e369c51..7a9e132f68db 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -1048,6 +1048,9 @@ int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu) return 0; fn = __vgic_v3_read_iar; break; + case SYS_ICC_NMIAR1_EL1: + /* Here's an UNDEF for you */ + return 0; case SYS_ICC_EOIR0_EL1: case SYS_ICC_EOIR1_EL1: if (unlikely(is_read)) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 103744878c60..41a1f480b30b 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -2342,6 +2342,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only }, { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only }, { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only }, + { SYS_DESC(SYS_ICC_NMIAR1_EL1), undef_access }, { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only }, { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only }, { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi }, -- Gitee From 713c9f9e8fac63a274e202a7224e62be19b5ae2b Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Wed, 23 Apr 2025 17:16:23 +0800 Subject: [PATCH 087/231] kvm: hisi_virt: Allocate VM table and save vpeid in it commit ab4dbc657c10f8368aba4577b0c72a64d5455d2a openEuler When guest access register ICC_SGI1R_EL1, GIC will access VM table to get the vpeid of vcpu for IPIV feature. So when IPIV feature is enabled, allocate VM table and save vpeid in it. The index of the entries in VM table is vcpu id. Signed-off-by: Xiang Chen Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- drivers/irqchip/irq-gic-v3-its.c | 31 ++++++++++++++++++++++++++++-- drivers/irqchip/irq-gic-v3.c | 3 +++ include/linux/irqchip/arm-gic-v4.h | 1 + 3 files changed, 33 insertions(+), 2 deletions(-) diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 294c853beb88..c6ad37d29963 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -430,6 +430,8 @@ void build_devid_pools(void) } #endif +extern struct static_key_false ipiv_enable; + static struct page *its_alloc_pages_node(int node, gfp_t gfp, unsigned int order) { @@ -4919,6 +4921,10 @@ static void its_vpe_irq_domain_free(struct irq_domain *domain, if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); its_free_prop_table(vm->vprop_page); + if (static_branch_unlikely(&ipiv_enable)) { + free_pages((unsigned long)page_address(vm->vpeid_page), + get_order(nr_irqs * 2)); + } } } @@ -4928,8 +4934,10 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq struct irq_chip *irqchip = &its_vpe_irq_chip; struct its_vm *vm = args; unsigned long *bitmap; - struct page *vprop_page; + struct page *vprop_page, *vpeid_page; int base, nr_ids, i, err = 0; + void *vpeid_table_va; + u16 *vpeid_entry; bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids); if (!bitmap) @@ -4951,14 +4959,33 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq vm->nr_db_lpis = nr_ids; vm->vprop_page = vprop_page; - if (gic_rdists->has_rvpeid) + if (gic_rdists->has_rvpeid) { irqchip = &its_vpe_4_1_irq_chip; + if (static_branch_unlikely(&ipiv_enable)) { + /* + * The vpeid's size is 2 bytes, so we need to allocate 2 * + * (num of vcpus). nr_irqs is equal to the number of vCPUs. + */ + vpeid_page = alloc_pages(GFP_KERNEL, get_order(nr_irqs * 2)); + if (!vpeid_page) { + its_lpi_free(bitmap, base, nr_ids); + its_free_prop_table(vprop_page); + return -ENOMEM; + } + vm->vpeid_page = vpeid_page; + vpeid_table_va = page_address(vpeid_page); + } + } for (i = 0; i < nr_irqs; i++) { vm->vpes[i]->vpe_db_lpi = base + i; err = its_vpe_init(vm->vpes[i]); if (err) break; + if (static_branch_unlikely(&ipiv_enable)) { + vpeid_entry = (u16 *)vpeid_table_va + i; + *vpeid_entry = vm->vpes[i]->vpe_id; + } err = its_irq_gic_domain_alloc(domain, virq + i, vm->vpes[i]->vpe_db_lpi); if (err) diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 2eb6ae23f3fb..4a43d741fbb1 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -112,6 +112,9 @@ static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis); DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities); EXPORT_SYMBOL(gic_nonsecure_priorities); +DEFINE_STATIC_KEY_FALSE(ipiv_enable); +EXPORT_SYMBOL(ipiv_enable); + /* * When the Non-secure world has access to group 0 interrupts (as a * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index bf9e0640288d..606020aefac0 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -20,6 +20,7 @@ struct its_vm { struct fwnode_handle *fwnode; struct irq_domain *domain; struct page *vprop_page; + struct page *vpeid_page; struct its_vpe **vpes; int nr_vpes; irq_hw_number_t db_lpi_base; -- Gitee From 75fa3b164f2eb3959ae77536f196d9e1f3fec936 Mon Sep 17 00:00:00 2001 From: Jinqian Yang Date: Wed, 23 Apr 2025 17:16:24 +0800 Subject: [PATCH 088/231] kvm: arm64: avoid sending multi-SGIs in IPIV commit 3b4266e88e78b97a50be29130cd4a29a10b34f3c openEuler IPIV optimizes unicast scenarios and does not support multicast or broadcast. To prevent GuestOS from generating multicast and broadcast, the MPIDR of vCPUs is modified to ensure that the [aff3, aff2, aff1] fields are uniquely assigned for each vCPU within a virtual machine, while all aff0 fields are set to zero. This configuration guarantees the uniqueness of vCPU affinity identifiers at the architecture level, thereby suppressing the generation of SGI multicast and broadcast signals by the GuestOS. Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/sys_regs.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 6238698e095e..cba662dc7c5f 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -711,6 +711,8 @@ static u64 reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) return actlr; } +extern struct static_key_false ipiv_enable; + static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { u64 mpidr; @@ -725,6 +727,15 @@ static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0); mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1); mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2); + + if (static_branch_unlikely(&ipiv_enable)) { + /* + * To avoid sending multi-SGIs in guest OS, make aff1/aff2 unique + */ + mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(1); + mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(2); + } + mpidr |= (1ULL << 31); vcpu_write_sys_reg(vcpu, mpidr, MPIDR_EL1); -- Gitee From caf364c77700ae6c37628c582eef0a38d6f73036 Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Wed, 23 Apr 2025 17:16:25 +0800 Subject: [PATCH 089/231] irqchip: gicv3-its: Set base address of vm table and targe ITS when vpe schedule and deschedule commit cb87b7ec77ebabe1d5d3fd4a0eb63a6974b61559 openEuler Set the base address of vm table and target ITS when vpe schedule and deschedule. Also need to make sure IPIV is not busy before setting them. Disable ICC_SGI1R_EL1 trap when vpe schedule, and enable the trap when vpe deschedule. Only disable ICC_SGI1R_EL1 trap when enabled ipiv and set GICD_CTLR_nASSGIreq for register GICD_CTLR of virtual machine. Signed-off-by: Xiang Chen Signed-off-by: Nianyao Tang Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/vgic/vgic-mmio-v3.c | 1 + drivers/irqchip/irq-gic-v3-its.c | 70 ++++++++++++++++++++++++++++++ include/linux/irqchip/arm-gic-v3.h | 15 +++++++ include/linux/irqchip/arm-gic-v4.h | 1 + 4 files changed, 87 insertions(+) diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index cea9745fa2c5..f3c14508ed05 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -130,6 +130,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu, /* Switching HW SGIs? */ dist->nassgireq = val & GICD_CTLR_nASSGIreq; + dist->its_vm.nassgireq = dist->nassgireq; if (is_hwsgi != dist->nassgireq) vgic_v4_configure_vsgis(vcpu->kvm); diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index c6ad37d29963..dd0b8d064c91 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -4522,11 +4522,69 @@ static void its_vpe_4_1_unmask_irq(struct irq_data *d) its_vpe_4_1_send_inv(d); } +/* IPIV private register */ +#define CPU_SYS_TRAP_EL2 sys_reg(3, 4, 15, 7, 2) +#define CPU_SYS_TRAP_EL2_IPIV_ENABLE_SHIFT 0 +#define CPU_SYS_TRAP_EL2_IPIV_ENABLE \ + (1ULL << CPU_SYS_TRAP_EL2_IPIV_ENABLE_SHIFT) + +/* + * ipiv_disable_vsgi_trap and ipiv_enable_vsgi_trap run only + * in VHE mode and in EL2. + */ +static void ipiv_disable_vsgi_trap(void) +{ +#ifdef CONFIG_ARM64 + u64 val; + + /* disable guest access ICC_SGI1R_EL1 trap, enable ipiv */ + val = read_sysreg_s(CPU_SYS_TRAP_EL2); + val |= CPU_SYS_TRAP_EL2_IPIV_ENABLE; + write_sysreg_s(val, CPU_SYS_TRAP_EL2); +#endif +} + +static void ipiv_enable_vsgi_trap(void) +{ +#ifdef CONFIG_ARM64 + u64 val; + + /* enable guest access ICC_SGI1R_EL1 trap, disable ipiv */ + val = read_sysreg_s(CPU_SYS_TRAP_EL2); + val &= ~CPU_SYS_TRAP_EL2_IPIV_ENABLE; + write_sysreg_s(val, CPU_SYS_TRAP_EL2); +#endif +} + static void its_vpe_4_1_schedule(struct its_vpe *vpe, struct its_cmd_info *info) { void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + struct its_vm *vm = vpe->its_vm; + unsigned long vpeid_page_addr; + u64 ipiv_val = 0; u64 val = 0; + u32 nr_vpes; + + if (static_branch_unlikely(&ipiv_enable) && + vm->nassgireq) { + /* wait gicr_ipiv_busy */ + WARN_ON_ONCE(readl_relaxed_poll_timeout_atomic(vlpi_base + GICR_IPIV_ST, + ipiv_val, !(ipiv_val & GICR_IPIV_ST_IPIV_BUSY), 1, 500)); + vpeid_page_addr = virt_to_phys(page_address(vm->vpeid_page)); + writel_relaxed(lower_32_bits(vpeid_page_addr), + vlpi_base + GICR_VM_TABLE_BAR_L); + writel_relaxed(upper_32_bits(vpeid_page_addr), + vlpi_base + GICR_VM_TABLE_BAR_H); + + /* setup gicr_vcpu_entry_num_max and gicr_ipiv_its_ta_sel */ + nr_vpes = vpe->its_vm->nr_vpes; + ipiv_val = ((nr_vpes - 1) << GICR_IPIV_CTRL_VCPU_ENTRY_NUM_MAX_SHIFT) | + (0 << GICR_IPIV_CTRL_IPIV_ITS_TA_SEL_SHIFT); + writel_relaxed(ipiv_val, vlpi_base + GICR_IPIV_CTRL); + + ipiv_disable_vsgi_trap(); + } /* Schedule the VPE */ val |= GICR_VPENDBASER_Valid; @@ -4541,6 +4599,7 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe, struct its_cmd_info *info) { void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + struct its_vm *vm = vpe->its_vm; u64 val; if (info->req_db) { @@ -4572,6 +4631,17 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe, GICR_VPENDBASER_PendingLast); vpe->pending_last = true; } + + if (static_branch_unlikely(&ipiv_enable) && + vm->nassgireq) { + /* wait gicr_ipiv_busy */ + WARN_ON_ONCE(readl_relaxed_poll_timeout_atomic(vlpi_base + GICR_IPIV_ST, + val, !(val & GICR_IPIV_ST_IPIV_BUSY), 1, 500)); + writel_relaxed(0, vlpi_base + GICR_VM_TABLE_BAR_L); + writel_relaxed(0, vlpi_base + GICR_VM_TABLE_BAR_H); + + ipiv_enable_vsgi_trap(); + } } static void its_vpe_4_1_invall(struct its_vpe *vpe) diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index a109c3212aad..8ff961c49c59 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -360,6 +360,21 @@ #define GICR_VSGIPENDR_BUSY (1U << 31) #define GICR_VSGIPENDR_PENDING GENMASK(15, 0) +/* IPIV VM table address */ +#define GICR_VM_TABLE_BAR_L 0x140 +#define GICR_VM_TABLE_BAR_H 0x144 + +#define GICR_IPIV_CTRL 0x148 +#define GICR_IPIV_CTRL_VCPU_ENTRY_NUM_MAX_SHIFT 8 +/* + * Select ITS to determine the ITS through which the IPI is sent. + */ +#define GICR_IPIV_CTRL_IPIV_ITS_TA_SEL_SHIFT 4 + +#define GICR_IPIV_ST 0x14c +#define GICR_IPIV_ST_IPIV_BUSY_SHIFT 0 +#define GICR_IPIV_ST_IPIV_BUSY (1 << GICR_IPIV_ST_IPIV_BUSY_SHIFT) + /* * ITS registers, offsets from ITS_base */ diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index 606020aefac0..bab2f125dfc8 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -27,6 +27,7 @@ struct its_vm { unsigned long *db_bitmap; int nr_db_lpis; u32 vlpi_count[GICv4_ITS_LIST_MAX]; + bool nassgireq; }; /* Embedded in kvm_vcpu.arch */ -- Gitee From cace4d3e54e151297abd25cdccfbace1a34a7873 Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Wed, 23 Apr 2025 17:16:26 +0800 Subject: [PATCH 090/231] kvm: hisi_virt: Register ipiv exception interrupt commit 292d513a1364499f57be56245ffa31a5fc4cee55 openEuler When one of the following conditions occurs: 1. The index of VM table exceeds the supported range. 2. Guest sends SGI with IRM=1. 3. Guest sends multicast. it triggers a exception interrupt (PPI interrupt). Just printk exception info in interrupt handler. Signed-off-by: Xiang Chen Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/vgic/vgic-init.c | 36 +++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index 35699f10790a..0a5cd0859ae4 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -3,6 +3,7 @@ * Copyright (C) 2015, 2016 ARM Ltd. */ +#include #include #include #include @@ -506,17 +507,30 @@ int kvm_vgic_map_resources(struct kvm *kvm) return ret; } +extern struct static_key_false ipiv_enable; +static int ipiv_irq; + /* GENERIC PROBE */ void kvm_vgic_cpu_up(void) { enable_percpu_irq(kvm_vgic_global_state.maint_irq, 0); + if (static_branch_unlikely(&ipiv_enable)) + enable_percpu_irq(ipiv_irq, 0); } void kvm_vgic_cpu_down(void) { disable_percpu_irq(kvm_vgic_global_state.maint_irq); + if (static_branch_unlikely(&ipiv_enable)) + disable_percpu_irq(ipiv_irq); +} + +static irqreturn_t vgic_ipiv_irq_handler(int irq, void *data) +{ + kvm_info("IPIV irq handler!\n"); + return IRQ_HANDLED; } static irqreturn_t vgic_maintenance_handler(int irq, void *data) @@ -626,5 +640,27 @@ int kvm_vgic_hyp_init(void) } kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq); + + if (static_branch_unlikely(&ipiv_enable)) { + ipiv_irq = acpi_register_gsi(NULL, 18, ACPI_EDGE_SENSITIVE, + ACPI_ACTIVE_HIGH); + if (ipiv_irq < 0) { + kvm_err("No ipiv exception irq\n"); + free_percpu_irq(kvm_vgic_global_state.maint_irq, + kvm_get_running_vcpus()); + return -ENXIO; + } + + ret = request_percpu_irq(ipiv_irq, vgic_ipiv_irq_handler, + "ipiv exception", kvm_get_running_vcpus()); + if (ret) { + kvm_err("Cannot register interrupt %d\n", ipiv_irq); + free_percpu_irq(kvm_vgic_global_state.maint_irq, + kvm_get_running_vcpus()); + acpi_unregister_gsi(18); + return ret; + } + } + return 0; } -- Gitee From 73bbc8d0cd32469d4f1f60639e80cb30bcb6607a Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Wed, 23 Apr 2025 17:16:27 +0800 Subject: [PATCH 091/231] kvm: arm64: Add interface KVM_CAP_ARM_IPIV_MODE commit 5242aefd5d2efe6d902e7cfacf7cff2fe440a7f5 openEuler Before IPIV feature, it gets mpidr from vcpu id, but after the feature, we need to know if IPIV mode is enabled. And new IPIV modes may be added later. Signed-off-by: Xiang Chen Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/arm.c | 8 ++++++++ include/uapi/linux/kvm.h | 2 ++ 2 files changed, 10 insertions(+) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 5631f03be55c..21f397c4540e 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -353,6 +353,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kvm_destroy_realm(kvm); } +extern struct static_key_false ipiv_enable; + int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) { int r; @@ -479,6 +481,12 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = sdev_enable; break; #endif + case KVM_CAP_ARM_IPIV_MODE: + if (static_branch_unlikely(&ipiv_enable)) + r = 1; + else + r = 0; + break; default: r = 0; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 76428118589d..7b8b26c24a95 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1256,6 +1256,8 @@ struct kvm_ppc_resize_hpt { #ifdef KVM_CAP_IRQ_ROUTING +#define KVM_CAP_ARM_IPIV_MODE 503 + struct kvm_irq_routing_irqchip { __u32 irqchip; __u32 pin; -- Gitee From 1a7d5dcaa077e27d72731184e107c0f80d7d5a2f Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Wed, 23 Apr 2025 17:16:28 +0800 Subject: [PATCH 092/231] kvm: hisi_virt: Probe and configure IPIV capacity on HIP12 commit 5634a3dfafb71ede4922b523f77389ef2c006b61 openEuler IPIV is an virtualization extension on HIP12, which allows IPIs on guest directly sending by hardware to other vcpu stead of trapping to EL2. It will bring IPI interrupt optimization on guest. Introduce the method to detect and enable the feature, and also add a kernel command parameter "kvm-arm.ipiv_enabled" (default is 0) so that users can disable or enable the feature. The feature is based on GICv4p1. Signed-off-by: Xiang Chen Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- .../admin-guide/kernel-parameters.txt | 3 ++ arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/arm.c | 7 ++++ arch/arm64/kvm/hisilicon/hisi_virt.c | 37 +++++++++++++++++++ arch/arm64/kvm/hisilicon/hisi_virt.h | 11 ++++++ drivers/irqchip/irq-gic-v3.c | 30 +++++++++++++++ include/linux/irqchip/arm-gic-v3.h | 12 ++++++ 7 files changed, 101 insertions(+) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index ba5b925952ab..ebe256d4bf04 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2695,6 +2695,9 @@ [KVM,ARM] Allow use of GICv4 for direct injection of LPIs. + kvm-arm.ipiv_enabled= + [KVM,ARM] Allow use of HiSilicon ipiv on GICv4.1 + kvm-arm.dvmbm_enabled= [KVM,ARM] Allow use of HiSilicon DVMBM capability. Default: 0 diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 5d43bd341450..4c6baa1822ab 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -1258,5 +1258,6 @@ bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu); extern bool kvm_ncsnp_support; extern bool kvm_dvmbm_support; +extern bool kvm_ipiv_support; #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 21f397c4540e..8f8ef0802e5c 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -68,6 +68,8 @@ bool kvm_ncsnp_support; /* Capability of DVMBM */ bool kvm_dvmbm_support; +/* Capability of IPIV */ +bool kvm_ipiv_support; static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized); @@ -2812,8 +2814,13 @@ static __init int kvm_arm_init(void) probe_hisi_cpu_type(); kvm_ncsnp_support = hisi_ncsnp_supported(); kvm_dvmbm_support = hisi_dvmbm_supported(); + kvm_ipiv_support = hisi_ipiv_supported(); kvm_info("KVM ncsnp %s\n", kvm_ncsnp_support ? "enabled" : "disabled"); kvm_info("KVM dvmbm %s\n", kvm_dvmbm_support ? "enabled" : "disabled"); + kvm_info("KVM ipiv %s\n", kvm_ipiv_support ? "enabled" : "disabled"); + + if (kvm_ipiv_support) + ipiv_gicd_init(); if (kvm_dvmbm_support) kvm_get_pg_cfg(); diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index cf1e8fe3f4c4..b3d82b273f42 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -13,6 +13,7 @@ static enum hisi_cpu_type cpu_type = UNKNOWN_HI_TYPE; static bool dvmbm_enabled; +static bool ipiv_enabled; static const char * const hisi_cpu_type_str[] = { "Hisi1612", @@ -158,6 +159,42 @@ static void hardware_disable_dvmbm(void *data) write_sysreg_s(val, SYS_LSUDVM_CTRL_EL2); } +static int __init early_ipiv_enable(char *buf) +{ + return strtobool(buf, &ipiv_enabled); +} +early_param("kvm-arm.ipiv_enabled", early_ipiv_enable); + +bool hisi_ipiv_supported(void) +{ + if (cpu_type != HI_IP12) + return false; + + /* Determine whether IPIV is supported by the hardware */ + if (!(read_sysreg(aidr_el1) & AIDR_EL1_IPIV_MASK)) { + kvm_info("Hisi ipiv not supported by the hardware\n"); + return false; + } + + /* User provided kernel command-line parameter */ + if (!ipiv_enabled || !is_kernel_in_hyp_mode()) + return false; + + /* Enable IPIV feature if necessary */ + if (!is_gicv4p1()) { + kvm_info("Hisi ipiv needs to enable GICv4p1!\n"); + return false; + } + + kvm_info("Enable Hisi ipiv, do not support vSGI broadcast\n"); + return true; +} + +void ipiv_gicd_init(void) +{ + gic_dist_enable_ipiv(); +} + bool hisi_dvmbm_supported(void) { if (cpu_type != HI_IP10 && cpu_type != HI_IP10C && diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index c57ca65970de..ce0d8e7b7ec6 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -18,6 +18,8 @@ enum hisi_cpu_type { UNKNOWN_HI_TYPE }; +/* HIP12 */ +#define AIDR_EL1_IPIV_MASK GENMASK_ULL(17, 16) /* HIP10 */ #define AIDR_EL1_DVMBM_MASK GENMASK_ULL(13, 12) #define SYS_LSUDVM_CTRL_EL2 sys_reg(3, 4, 15, 7, 4) @@ -90,7 +92,9 @@ enum hisi_cpu_type { void probe_hisi_cpu_type(void); bool hisi_ncsnp_supported(void); bool hisi_dvmbm_supported(void); +bool hisi_ipiv_supported(void); void kvm_get_pg_cfg(void); +void ipiv_gicd_init(void); int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu); void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu); @@ -109,7 +113,12 @@ static inline bool hisi_dvmbm_supported(void) { return false; } +static inline bool hisi_ipiv_supported(void) +{ + return false; +} static inline void kvm_get_pg_cfg(void) {} +static inline void ipiv_gicd_init(void) {} static inline int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu) { @@ -126,4 +135,6 @@ static inline void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu) {} static inline void kvm_hisi_reload_lsudvmbm(struct kvm *kvm) {} #endif /* CONFIG_KVM_HISI_VIRT */ +extern bool gic_dist_enable_ipiv(void); +extern bool is_gicv4p1(void); #endif /* __HISI_VIRT_H__ */ diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 4a43d741fbb1..2262f97e175e 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -1406,6 +1406,36 @@ static int gic_dist_supports_lpis(void) !gicv3_nolpi); } +bool is_gicv4p1(void) +{ + if (!gic_data.rdists.has_rvpeid) + return false; + + return true; +} +EXPORT_SYMBOL(is_gicv4p1); + +void gic_dist_enable_ipiv(void) +{ + u32 val; + + val = readl_relaxed(gic_data.dist_base + GICD_MISC_CTRL); + val |= GICD_MISC_CTRL_CFG_IPIV_EN; + writel_relaxed(val, gic_data.dist_base + GICD_MISC_CTRL); + static_branch_enable(&ipiv_enable); + + val = (0 << GICD_IPIV_CTRL_AFF_DIRECT_VPEID_SHIFT) | + (0 << GICD_IPIV_CTRL_AFF1_LEFT_SHIFT_SHIFT) | + (4 << GICD_IPIV_CTRL_AFF2_LEFT_SHIFT_SHIFT) | + (7 << GICD_IPIV_CTRL_VM_TABLE_INNERCACHE_SHIFT) | + (2 << GICD_IPIV_CTRL_VM_TABLE_SHAREABILITY_SHIFT); + writel_relaxed(val, gic_data.dist_base + GICD_IPIV_CTRL); + + /* Set target ITS address of IPIV feature */ + writel_relaxed(0x4880, gic_data.dist_base + GICD_IPIV_ITS_TA_BASE); +} +EXPORT_SYMBOL(gic_dist_enable_ipiv); + static void gic_cpu_init(void) { void __iomem *rbase; diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 8ff961c49c59..ba07564dcd20 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -111,6 +111,18 @@ #define GIC_PAGE_SIZE_64K 2ULL #define GIC_PAGE_SIZE_MASK 3ULL +#define GICD_MISC_CTRL 0x2084 +#define GICD_MISC_CTRL_CFG_IPIV_EN (1U << 19) + +/* IPIV private register */ +#define GICD_IPIV_CTRL 0xc05c +#define GICD_IPIV_CTRL_AFF_DIRECT_VPEID_SHIFT 4 +#define GICD_IPIV_CTRL_AFF1_LEFT_SHIFT_SHIFT 8 +#define GICD_IPIV_CTRL_AFF2_LEFT_SHIFT_SHIFT 12 +#define GICD_IPIV_CTRL_VM_TABLE_INNERCACHE_SHIFT 16 +#define GICD_IPIV_CTRL_VM_TABLE_SHAREABILITY_SHIFT 19 +#define GICD_IPIV_ITS_TA_BASE 0xc010 + /* * Re-Distributor registers, offsets from RD_base */ -- Gitee From 390127c93f7850a491874b748e3183a87be9ecb4 Mon Sep 17 00:00:00 2001 From: Jinqian Yang Date: Wed, 23 Apr 2025 17:16:29 +0800 Subject: [PATCH 093/231] kabi: Use KABI_EXTEND to perform kabi repair for IPIV commit 68d4f5dfc8755de6376bab774a997d3310d0515e openEuler Follow the kabi repair method of openeuler and use KABI_EXTEND. Its essence is to use GENKSYMS shielding. Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- include/linux/irqchip/arm-gic-v4.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index bab2f125dfc8..20f7532ce75b 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -20,14 +20,14 @@ struct its_vm { struct fwnode_handle *fwnode; struct irq_domain *domain; struct page *vprop_page; - struct page *vpeid_page; struct its_vpe **vpes; int nr_vpes; irq_hw_number_t db_lpi_base; unsigned long *db_bitmap; int nr_db_lpis; u32 vlpi_count[GICv4_ITS_LIST_MAX]; - bool nassgireq; + KABI_EXTEND(struct page *vpeid_page) + KABI_EXTEND(bool nassgireq) }; /* Embedded in kvm_vcpu.arch */ -- Gitee From 6cd64e272d1adfd1bdc1656baaff1da3febe8433 Mon Sep 17 00:00:00 2001 From: Jinqian Yang Date: Fri, 23 May 2025 20:45:25 +0800 Subject: [PATCH 094/231] arm64/config: add config to control whether enable IPIV feature commit e318754d42ce9bab007a22b1034e98d70ff1a8c8 openEuler Add ARM64_HISI_IPIV config to control whether enable the IPIV feature. The IPIV feature optimizes vSGI performance based on GICv4.1 and is a feature of HIP12. Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/Kconfig | 13 +++++++++++ arch/arm64/configs/tencent.config | 1 + arch/arm64/include/asm/kvm_host.h | 2 ++ arch/arm64/kvm/arm.c | 11 ++++++++++ arch/arm64/kvm/hisilicon/hisi_virt.c | 5 +++++ arch/arm64/kvm/hisilicon/hisi_virt.h | 12 +++++++++-- arch/arm64/kvm/sys_regs.c | 4 ++++ arch/arm64/kvm/vgic/vgic-init.c | 10 +++++++++ arch/arm64/kvm/vgic/vgic-mmio-v3.c | 2 ++ drivers/irqchip/irq-gic-v3-its.c | 32 ++++++++++++++++++++++------ drivers/irqchip/irq-gic-v3.c | 4 ++++ include/linux/irqchip/arm-gic-v3.h | 4 ++++ include/linux/irqchip/arm-gic-v4.h | 2 ++ 13 files changed, 93 insertions(+), 9 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 0bfae487845c..a608718055a1 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2354,6 +2354,19 @@ config ARM64_HDBSS endmenu # "ARMv9.5 architectural features" +config ARM64_HISI_IPIV + bool "Enable support for IPIV" + default y + depends on ACPI + depends on ARM64 + help + IPIV optimizes vSGI on the basis of GICv4.1. The vCPU on the sending + side of vSGI needs to trap to Hypervisor. IPIv sends vSGI without + traping, improving performance. + + The feature will only be enabled if CPU in the system and Guest OS + support this feature. If unsure, say Y. + config ARM64_SVE bool "ARM Scalable Vector Extension support" default y diff --git a/arch/arm64/configs/tencent.config b/arch/arm64/configs/tencent.config index e6977bb1af81..ca842e8ad2a2 100644 --- a/arch/arm64/configs/tencent.config +++ b/arch/arm64/configs/tencent.config @@ -1873,3 +1873,4 @@ CONFIG_FWCTL=m # UB_FWCTL CONFIG_FWCTL_UB=m # end of UB_FWCTL +CONFIG_ARM64_HISI_IPIV=y \ No newline at end of file diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 4c6baa1822ab..982f8f1daf1c 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -1258,6 +1258,8 @@ bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu); extern bool kvm_ncsnp_support; extern bool kvm_dvmbm_support; +#ifdef CONFIG_ARM64_HISI_IPIV extern bool kvm_ipiv_support; +#endif #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 8f8ef0802e5c..d34e847f39f9 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -68,8 +68,10 @@ bool kvm_ncsnp_support; /* Capability of DVMBM */ bool kvm_dvmbm_support; +#ifdef CONFIG_ARM64_HISI_IPIV /* Capability of IPIV */ bool kvm_ipiv_support; +#endif /* CONFIG_ARM64_HISI_IPIV */ static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized); @@ -355,7 +357,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kvm_destroy_realm(kvm); } +#ifdef CONFIG_ARM64_HISI_IPIV extern struct static_key_false ipiv_enable; +#endif int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) { @@ -483,12 +487,14 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = sdev_enable; break; #endif +#ifdef CONFIG_ARM64_HISI_IPIV case KVM_CAP_ARM_IPIV_MODE: if (static_branch_unlikely(&ipiv_enable)) r = 1; else r = 0; break; +#endif default: r = 0; } @@ -2814,13 +2820,18 @@ static __init int kvm_arm_init(void) probe_hisi_cpu_type(); kvm_ncsnp_support = hisi_ncsnp_supported(); kvm_dvmbm_support = hisi_dvmbm_supported(); +#ifdef CONFIG_ARM64_HISI_IPIV kvm_ipiv_support = hisi_ipiv_supported(); +#endif kvm_info("KVM ncsnp %s\n", kvm_ncsnp_support ? "enabled" : "disabled"); kvm_info("KVM dvmbm %s\n", kvm_dvmbm_support ? "enabled" : "disabled"); + +#ifdef CONFIG_ARM64_HISI_IPIV kvm_info("KVM ipiv %s\n", kvm_ipiv_support ? "enabled" : "disabled"); if (kvm_ipiv_support) ipiv_gicd_init(); +#endif if (kvm_dvmbm_support) kvm_get_pg_cfg(); diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index b3d82b273f42..37eff94b7f33 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -13,7 +13,10 @@ static enum hisi_cpu_type cpu_type = UNKNOWN_HI_TYPE; static bool dvmbm_enabled; + +#ifdef CONFIG_ARM64_HISI_IPIV static bool ipiv_enabled; +#endif static const char * const hisi_cpu_type_str[] = { "Hisi1612", @@ -159,6 +162,7 @@ static void hardware_disable_dvmbm(void *data) write_sysreg_s(val, SYS_LSUDVM_CTRL_EL2); } +#ifdef CONFIG_ARM64_HISI_IPIV static int __init early_ipiv_enable(char *buf) { return strtobool(buf, &ipiv_enabled); @@ -194,6 +198,7 @@ void ipiv_gicd_init(void) { gic_dist_enable_ipiv(); } +#endif /* CONFIG_ARM64_HISI_IPIV */ bool hisi_dvmbm_supported(void) { diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index ce0d8e7b7ec6..027ac323bdde 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -19,7 +19,9 @@ enum hisi_cpu_type { }; /* HIP12 */ +#ifdef CONFIG_ARM64_HISI_IPIV #define AIDR_EL1_IPIV_MASK GENMASK_ULL(17, 16) +#endif /* HIP10 */ #define AIDR_EL1_DVMBM_MASK GENMASK_ULL(13, 12) #define SYS_LSUDVM_CTRL_EL2 sys_reg(3, 4, 15, 7, 4) @@ -92,9 +94,11 @@ enum hisi_cpu_type { void probe_hisi_cpu_type(void); bool hisi_ncsnp_supported(void); bool hisi_dvmbm_supported(void); +#ifdef CONFIG_ARM64_HISI_IPIV bool hisi_ipiv_supported(void); -void kvm_get_pg_cfg(void); void ipiv_gicd_init(void); +#endif /* CONFIG_ARM64_HISI_IPIV */ +void kvm_get_pg_cfg(void); int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu); void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu); @@ -113,12 +117,14 @@ static inline bool hisi_dvmbm_supported(void) { return false; } +#ifdef CONFIG_ARM64_HISI_IPIV static inline bool hisi_ipiv_supported(void) { return false; } -static inline void kvm_get_pg_cfg(void) {} static inline void ipiv_gicd_init(void) {} +#endif /* CONFIG_ARM64_HISI_IPIV */ +static inline void kvm_get_pg_cfg(void) {} static inline int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu) { @@ -135,6 +141,8 @@ static inline void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu) {} static inline void kvm_hisi_reload_lsudvmbm(struct kvm *kvm) {} #endif /* CONFIG_KVM_HISI_VIRT */ +#ifdef CONFIG_ARM64_HISI_IPIV extern bool gic_dist_enable_ipiv(void); extern bool is_gicv4p1(void); +#endif /* CONFIG_ARM64_HISI_IPIV */ #endif /* __HISI_VIRT_H__ */ diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index cba662dc7c5f..24d1bc0d2814 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -711,7 +711,9 @@ static u64 reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) return actlr; } +#ifdef CONFIG_ARM64_HISI_IPIV extern struct static_key_false ipiv_enable; +#endif static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { @@ -728,6 +730,7 @@ static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1); mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2); +#ifdef CONFIG_ARM64_HISI_IPIV if (static_branch_unlikely(&ipiv_enable)) { /* * To avoid sending multi-SGIs in guest OS, make aff1/aff2 unique @@ -735,6 +738,7 @@ static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(1); mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(2); } +#endif mpidr |= (1ULL << 31); vcpu_write_sys_reg(vcpu, mpidr, MPIDR_EL1); diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index 0a5cd0859ae4..950d890be7cc 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -507,31 +507,39 @@ int kvm_vgic_map_resources(struct kvm *kvm) return ret; } +#ifdef CONFIG_ARM64_HISI_IPIV extern struct static_key_false ipiv_enable; static int ipiv_irq; +#endif /* GENERIC PROBE */ void kvm_vgic_cpu_up(void) { enable_percpu_irq(kvm_vgic_global_state.maint_irq, 0); +#ifdef CONFIG_ARM64_HISI_IPIV if (static_branch_unlikely(&ipiv_enable)) enable_percpu_irq(ipiv_irq, 0); +#endif } void kvm_vgic_cpu_down(void) { disable_percpu_irq(kvm_vgic_global_state.maint_irq); +#ifdef CONFIG_ARM64_HISI_IPIV if (static_branch_unlikely(&ipiv_enable)) disable_percpu_irq(ipiv_irq); +#endif } +#ifdef CONFIG_ARM64_HISI_IPIV static irqreturn_t vgic_ipiv_irq_handler(int irq, void *data) { kvm_info("IPIV irq handler!\n"); return IRQ_HANDLED; } +#endif static irqreturn_t vgic_maintenance_handler(int irq, void *data) { @@ -641,6 +649,7 @@ int kvm_vgic_hyp_init(void) kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq); +#ifdef CONFIG_ARM64_HISI_IPIV if (static_branch_unlikely(&ipiv_enable)) { ipiv_irq = acpi_register_gsi(NULL, 18, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_HIGH); @@ -661,6 +670,7 @@ int kvm_vgic_hyp_init(void) return ret; } } +#endif return 0; } diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index f3c14508ed05..41308c69555d 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -130,7 +130,9 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu, /* Switching HW SGIs? */ dist->nassgireq = val & GICD_CTLR_nASSGIreq; +#ifdef CONFIG_ARM64_HISI_IPIV dist->its_vm.nassgireq = dist->nassgireq; +#endif if (is_hwsgi != dist->nassgireq) vgic_v4_configure_vsgis(vcpu->kvm); diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index dd0b8d064c91..60b54a4c0ba3 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -430,7 +430,9 @@ void build_devid_pools(void) } #endif +#ifdef CONFIG_ARM64_HISI_IPIV extern struct static_key_false ipiv_enable; +#endif static struct page *its_alloc_pages_node(int node, gfp_t gfp, unsigned int order) @@ -4522,6 +4524,7 @@ static void its_vpe_4_1_unmask_irq(struct irq_data *d) its_vpe_4_1_send_inv(d); } +#ifdef CONFIG_ARM64_HISI_IPIV /* IPIV private register */ #define CPU_SYS_TRAP_EL2 sys_reg(3, 4, 15, 7, 2) #define CPU_SYS_TRAP_EL2_IPIV_ENABLE_SHIFT 0 @@ -4534,36 +4537,35 @@ static void its_vpe_4_1_unmask_irq(struct irq_data *d) */ static void ipiv_disable_vsgi_trap(void) { -#ifdef CONFIG_ARM64 u64 val; /* disable guest access ICC_SGI1R_EL1 trap, enable ipiv */ val = read_sysreg_s(CPU_SYS_TRAP_EL2); val |= CPU_SYS_TRAP_EL2_IPIV_ENABLE; write_sysreg_s(val, CPU_SYS_TRAP_EL2); -#endif } static void ipiv_enable_vsgi_trap(void) { -#ifdef CONFIG_ARM64 u64 val; /* enable guest access ICC_SGI1R_EL1 trap, disable ipiv */ val = read_sysreg_s(CPU_SYS_TRAP_EL2); val &= ~CPU_SYS_TRAP_EL2_IPIV_ENABLE; write_sysreg_s(val, CPU_SYS_TRAP_EL2); -#endif } +#endif /* CONFIG_ARM64_HISI_IPIV */ static void its_vpe_4_1_schedule(struct its_vpe *vpe, struct its_cmd_info *info) { void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val = 0; + +#ifdef CONFIG_ARM64_HISI_IPIV struct its_vm *vm = vpe->its_vm; unsigned long vpeid_page_addr; u64 ipiv_val = 0; - u64 val = 0; u32 nr_vpes; if (static_branch_unlikely(&ipiv_enable) && @@ -4585,6 +4587,7 @@ static void its_vpe_4_1_schedule(struct its_vpe *vpe, ipiv_disable_vsgi_trap(); } +#endif /* CONFIG_ARM64_HISI_IPIV */ /* Schedule the VPE */ val |= GICR_VPENDBASER_Valid; @@ -4599,9 +4602,12 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe, struct its_cmd_info *info) { void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); - struct its_vm *vm = vpe->its_vm; u64 val; +#ifdef CONFIG_ARM64_HISI_IPIV + struct its_vm *vm = vpe->its_vm; +#endif + if (info->req_db) { unsigned long flags; @@ -4632,6 +4638,7 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe, vpe->pending_last = true; } +#ifdef CONFIG_ARM64_HISI_IPIV if (static_branch_unlikely(&ipiv_enable) && vm->nassgireq) { /* wait gicr_ipiv_busy */ @@ -4642,6 +4649,7 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe, ipiv_enable_vsgi_trap(); } +#endif } static void its_vpe_4_1_invall(struct its_vpe *vpe) @@ -4991,10 +4999,12 @@ static void its_vpe_irq_domain_free(struct irq_domain *domain, if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); its_free_prop_table(vm->vprop_page); +#ifdef CONFIG_ARM64_HISI_IPIV if (static_branch_unlikely(&ipiv_enable)) { free_pages((unsigned long)page_address(vm->vpeid_page), get_order(nr_irqs * 2)); } +#endif } } @@ -5004,10 +5014,14 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq struct irq_chip *irqchip = &its_vpe_irq_chip; struct its_vm *vm = args; unsigned long *bitmap; - struct page *vprop_page, *vpeid_page; + struct page *vprop_page; int base, nr_ids, i, err = 0; + +#ifdef CONFIG_ARM64_HISI_IPIV + struct page *vpeid_page; void *vpeid_table_va; u16 *vpeid_entry; +#endif bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids); if (!bitmap) @@ -5031,6 +5045,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq if (gic_rdists->has_rvpeid) { irqchip = &its_vpe_4_1_irq_chip; +#ifdef CONFIG_ARM64_HISI_IPIV if (static_branch_unlikely(&ipiv_enable)) { /* * The vpeid's size is 2 bytes, so we need to allocate 2 * @@ -5045,6 +5060,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq vm->vpeid_page = vpeid_page; vpeid_table_va = page_address(vpeid_page); } +#endif } for (i = 0; i < nr_irqs; i++) { @@ -5052,10 +5068,12 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq err = its_vpe_init(vm->vpes[i]); if (err) break; +#ifdef CONFIG_ARM64_HISI_IPIV if (static_branch_unlikely(&ipiv_enable)) { vpeid_entry = (u16 *)vpeid_table_va + i; *vpeid_entry = vm->vpes[i]->vpe_id; } +#endif err = its_irq_gic_domain_alloc(domain, virq + i, vm->vpes[i]->vpe_db_lpi); if (err) diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 2262f97e175e..4ce6872ed252 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -112,8 +112,10 @@ static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis); DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities); EXPORT_SYMBOL(gic_nonsecure_priorities); +#ifdef CONFIG_ARM64_HISI_IPIV DEFINE_STATIC_KEY_FALSE(ipiv_enable); EXPORT_SYMBOL(ipiv_enable); +#endif /* * When the Non-secure world has access to group 0 interrupts (as a @@ -1406,6 +1408,7 @@ static int gic_dist_supports_lpis(void) !gicv3_nolpi); } +#ifdef CONFIG_ARM64_HISI_IPIV bool is_gicv4p1(void) { if (!gic_data.rdists.has_rvpeid) @@ -1435,6 +1438,7 @@ void gic_dist_enable_ipiv(void) writel_relaxed(0x4880, gic_data.dist_base + GICD_IPIV_ITS_TA_BASE); } EXPORT_SYMBOL(gic_dist_enable_ipiv); +#endif /* CONFIG_ARM64_HISI_IPIV */ static void gic_cpu_init(void) { diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index ba07564dcd20..3abfd32d71e3 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -111,6 +111,7 @@ #define GIC_PAGE_SIZE_64K 2ULL #define GIC_PAGE_SIZE_MASK 3ULL +#ifdef CONFIG_ARM64_HISI_IPIV #define GICD_MISC_CTRL 0x2084 #define GICD_MISC_CTRL_CFG_IPIV_EN (1U << 19) @@ -122,6 +123,7 @@ #define GICD_IPIV_CTRL_VM_TABLE_INNERCACHE_SHIFT 16 #define GICD_IPIV_CTRL_VM_TABLE_SHAREABILITY_SHIFT 19 #define GICD_IPIV_ITS_TA_BASE 0xc010 +#endif /* * Re-Distributor registers, offsets from RD_base @@ -372,6 +374,7 @@ #define GICR_VSGIPENDR_BUSY (1U << 31) #define GICR_VSGIPENDR_PENDING GENMASK(15, 0) +#ifdef CONFIG_ARM64_HISI_IPIV /* IPIV VM table address */ #define GICR_VM_TABLE_BAR_L 0x140 #define GICR_VM_TABLE_BAR_H 0x144 @@ -386,6 +389,7 @@ #define GICR_IPIV_ST 0x14c #define GICR_IPIV_ST_IPIV_BUSY_SHIFT 0 #define GICR_IPIV_ST_IPIV_BUSY (1 << GICR_IPIV_ST_IPIV_BUSY_SHIFT) +#endif /* CONFIG_ARM64_HISI_IPIV */ /* * ITS registers, offsets from ITS_base diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index 20f7532ce75b..e40b1e9a9227 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -26,8 +26,10 @@ struct its_vm { unsigned long *db_bitmap; int nr_db_lpis; u32 vlpi_count[GICv4_ITS_LIST_MAX]; +#ifdef CONFIG_ARM64_HISI_IPIV KABI_EXTEND(struct page *vpeid_page) KABI_EXTEND(bool nassgireq) +#endif }; /* Embedded in kvm_vcpu.arch */ -- Gitee From 6b80f3dc4595ace6dda2b7099d027b892ed4f273 Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Fri, 23 May 2025 20:45:26 +0800 Subject: [PATCH 095/231] KVM: arm64: Introduce ipiv enable ioctl commit 47782a5586e0e212aa1dbe884da54b908474aedb openEuler IPIV uses ioctl to be enabled. Users (such as qemu) can invoke the ioctl to enable IPIV. Signed-off-by: Zhou Wang Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/arm.c | 15 ++++++++++++++- include/linux/irqchip/arm-gic-v4.h | 1 + include/uapi/linux/kvm.h | 2 +- 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index d34e847f39f9..e62c08241b04 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -174,6 +174,14 @@ static int kvm_cap_arm_enable_hdbss(struct kvm *kvm, } #endif +#ifdef CONFIG_ARM64_HISI_IPIV +static int kvm_hisi_ipiv_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) +{ + kvm->arch.vgic.its_vm.enable_ipiv_from_vmm = true; + return 0; +} +#endif + int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) { @@ -230,6 +238,11 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, case KVM_CAP_ARM_HW_DIRTY_STATE_TRACK: r = kvm_cap_arm_enable_hdbss(kvm, cap); break; +#endif +#ifdef CONFIG_ARM64_HISI_IPIV + case KVM_CAP_ARM_HISI_IPIV: + r = kvm_hisi_ipiv_enable_cap(kvm, cap); + break; #endif default: r = -EINVAL; @@ -488,7 +501,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) break; #endif #ifdef CONFIG_ARM64_HISI_IPIV - case KVM_CAP_ARM_IPIV_MODE: + case KVM_CAP_ARM_HISI_IPIV: if (static_branch_unlikely(&ipiv_enable)) r = 1; else diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index e40b1e9a9227..23e88aea2f50 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -29,6 +29,7 @@ struct its_vm { #ifdef CONFIG_ARM64_HISI_IPIV KABI_EXTEND(struct page *vpeid_page) KABI_EXTEND(bool nassgireq) + bool enable_ipiv_from_vmm; #endif }; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 7b8b26c24a95..7f2ce8ccd76e 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1256,7 +1256,7 @@ struct kvm_ppc_resize_hpt { #ifdef KVM_CAP_IRQ_ROUTING -#define KVM_CAP_ARM_IPIV_MODE 503 +#define KVM_CAP_ARM_HISI_IPIV 798 struct kvm_irq_routing_irqchip { __u32 irqchip; -- Gitee From abb1635544a6b5c5cfed5fc60ae800069eec10f1 Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Fri, 23 May 2025 20:45:27 +0800 Subject: [PATCH 096/231] KVM: arm64: Document PV-sgi interface commit 11c4602610ddc6079e34f3c6ce3d4ec0d752efbe openEuler Introduce a paravirtualization interface for KVM/arm64 to PV-sgi. A hypercall interface is provided for the guest to interrogate the hypervisor's support for IPIV. In the previous IPIV implementation, the MPIDR value of the vCPU needs to be changed to prevent guests from sending multicast and broadcast. This series of bugfix patches provide a method: Add the SMCCC interface to the kernel so that the guest OS can control the enabling of IPIV. When IPIV is enabled, the guest OS uses multiple unicast to implement multicast. (Broadcasting cannot be implemented due to hardware limitations. If a guest sends a broadcast, an exception message is displayed on the host.) In this way, do not need to modify the MPIDR. Signed-off-by: Zhou Wang Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- Documentation/virt/kvm/arm/hypercalls.rst | 4 +++ Documentation/virt/kvm/arm/pvsgi.rst | 33 +++++++++++++++++++++++ 2 files changed, 37 insertions(+) create mode 100644 Documentation/virt/kvm/arm/pvsgi.rst diff --git a/Documentation/virt/kvm/arm/hypercalls.rst b/Documentation/virt/kvm/arm/hypercalls.rst index 3e23084644ba..67bd102cf43b 100644 --- a/Documentation/virt/kvm/arm/hypercalls.rst +++ b/Documentation/virt/kvm/arm/hypercalls.rst @@ -127,6 +127,10 @@ The pseudo-firmware bitmap register are as follows: Bit-1: KVM_REG_ARM_VENDOR_HYP_BIT_PTP: The bit represents the Precision Time Protocol KVM service. + Bit-2: KVM_REG_ARM_VENDOR_HYP_BIT_IPIV: + The bit represents the ARM_SMCCC_VENDOR_PV_SGI_FEATURES and + ARM_SMCCC_VENDOR_PV_SGI_ENABLE function-ids. + Errors: ======= ============================================================= diff --git a/Documentation/virt/kvm/arm/pvsgi.rst b/Documentation/virt/kvm/arm/pvsgi.rst new file mode 100644 index 000000000000..5f12a3aaccd2 --- /dev/null +++ b/Documentation/virt/kvm/arm/pvsgi.rst @@ -0,0 +1,33 @@ +.. SPDX-License-Identifier: GPL-2.0 + +Paravirtualized SGI support for HiSilicon +========================================== + +KVM/arm64 provides some hypervisor service calls to support a paravirtualized +SGI(software generated interrupt) in HiSilicon Hip12 SoC. + +Some SMCCC compatible hypercalls are defined: + +* PV_SGI_FEATURES: 0xC6000090 +* PV_SGI_ENABLE: 0xC6000091 + +The existence of the PV_SGI hypercall should be probed using the SMCCC 1.1 +ARCH_FEATURES mechanism before calling it. + +PV_SGI_FEATURES + + ============= ======== ========== + Function ID: (uint32) 0xC6000090 + PV_call_id: (uint32) The function to query for support. + Currently only PV_SGI_ENABLE is supported. + Return value: (int64) NOT_SUPPORTED (-1) or SUCCESS (0) if the relevant + PV-sgi feature is supported by the hypervisor. + ============= ======== ========== + +PV_SGI_ENABLE + + ============= ======== ========== + Function ID: (uint32) 0xC6000091 + Return value: (int64) NOT_SUPPORTED (-1) or SUCCESS (0) if this feature + has been enabled. + ============= ======== ========== -- Gitee From b29728a129c62e6449107a2c170fcd07acfabc8b Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Fri, 23 May 2025 20:45:28 +0800 Subject: [PATCH 097/231] KVM: arm64: Implement PV_SGI related calls commit cd77c03a1ee1bcae1ffbde74afcc32a0a1bb0273 openEuler This provides a mechanism for querying whether IPIV are available in this hypervisor. Add some SMCCC compatible hypercalls for PV SGI: PV_SGI_FEATURES: 0xC6000090 PV_SGI_ENABLE: 0xC6000091 ipiv_enable is a global variable, indicating whether the hardware supports IPIV. enable_ipiv_from_vmm indicates whether the VMM (such as QEMU) enables IPIV through ioctl. enable_ipiv_from_guest indicates whether the guest OS enables IPIV through the SMCCC interface. Signed-off-by: Zhou Wang Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/include/uapi/asm/kvm.h | 5 ++++ arch/arm64/kvm/hisilicon/hisi_virt.c | 25 ++++++++++++++++++ arch/arm64/kvm/hisilicon/hisi_virt.h | 7 +++++ arch/arm64/kvm/hypercalls.c | 26 +++++++++++++++++++ arch/arm64/kvm/vgic/vgic-its.c | 9 +++++++ arch/arm64/kvm/vgic/vgic-mmio-v3.c | 3 --- drivers/irqchip/irq-gic-v3-its.c | 15 ++++------- include/linux/arm-smccc.h | 15 +++++++++++ include/linux/irqchip/arm-gic-v4.h | 2 +- tools/arch/arm64/include/uapi/asm/kvm.h | 5 ++++ .../selftests/kvm/aarch64/hypercalls.c | 2 +- 11 files changed, 99 insertions(+), 15 deletions(-) diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index ba1557d7a449..62a84c90cf05 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -376,6 +376,11 @@ enum { enum { KVM_REG_ARM_VENDOR_HYP_BIT_FUNC_FEAT = 0, KVM_REG_ARM_VENDOR_HYP_BIT_PTP = 1, + /* + * If the mainline conflicts, do not change the + * current sequence, add in sequence. + */ + KVM_REG_ARM_VENDOR_HYP_BIT_IPIV = 2, #ifdef __KERNEL__ KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_COUNT, #endif diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index 37eff94b7f33..fb9ba7eccb5f 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -194,6 +194,31 @@ bool hisi_ipiv_supported(void) return true; } +extern struct static_key_false ipiv_enable; + +bool hisi_ipiv_supported_per_vm(struct kvm_vcpu *vcpu) +{ + /* IPIV is supported by the hardware */ + if (!static_branch_unlikely(&ipiv_enable)) + return false; + + /* vSGI passthrough is configured */ + if (!vcpu->kvm->arch.vgic.nassgireq) + return false; + + /* IPIV is enabled by the user */ + if (!vcpu->kvm->arch.vgic.its_vm.enable_ipiv_from_vmm) + return false; + + return true; +} + +void hisi_ipiv_enable_per_vm(struct kvm_vcpu *vcpu) +{ + /* Enable IPIV feature */ + vcpu->kvm->arch.vgic.its_vm.enable_ipiv_from_guest = true; +} + void ipiv_gicd_init(void) { gic_dist_enable_ipiv(); diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index 027ac323bdde..0fbefabf4f39 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -96,6 +96,8 @@ bool hisi_ncsnp_supported(void); bool hisi_dvmbm_supported(void); #ifdef CONFIG_ARM64_HISI_IPIV bool hisi_ipiv_supported(void); +bool hisi_ipiv_supported_per_vm(struct kvm_vcpu *vcpu); +void hisi_ipiv_enable_per_vm(struct kvm_vcpu *vcpu); void ipiv_gicd_init(void); #endif /* CONFIG_ARM64_HISI_IPIV */ void kvm_get_pg_cfg(void); @@ -122,6 +124,11 @@ static inline bool hisi_ipiv_supported(void) { return false; } +static bool hisi_ipiv_supported_per_vm(struct kvm_vcpu *vcpu) +{ + return false; +} +static void hisi_ipiv_enable_per_vm(struct kvm_vcpu *vcpu) {} static inline void ipiv_gicd_init(void) {} #endif /* CONFIG_ARM64_HISI_IPIV */ static inline void kvm_get_pg_cfg(void) {} diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index c06e9352a80a..a85bf604b9ac 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -9,6 +9,10 @@ #include #include +#ifdef CONFIG_ARM64_HISI_IPIV +#include "hisilicon/hisi_virt.h" +#endif + #define KVM_ARM_SMCCC_STD_FEATURES \ GENMASK(KVM_REG_ARM_STD_BMAP_BIT_COUNT - 1, 0) #define KVM_ARM_SMCCC_STD_HYP_FEATURES \ @@ -116,6 +120,12 @@ static bool kvm_smccc_test_fw_bmap(struct kvm_vcpu *vcpu, u32 func_id) case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID: return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_PTP, &smccc_feat->vendor_hyp_bmap); +#ifdef CONFIG_ARM64_HISI_IPIV + case ARM_SMCCC_VENDOR_PV_SGI_FEATURES: + case ARM_SMCCC_VENDOR_PV_SGI_ENABLE: + return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_IPIV, + &smccc_feat->vendor_hyp_bmap); +#endif default: return false; } @@ -342,6 +352,22 @@ int kvm_smccc_call_handler(struct kvm_vcpu *vcpu) if (gpa != INVALID_GPA) val[0] = gpa; break; +#ifdef CONFIG_ARM64_HISI_IPIV + case ARM_SMCCC_VENDOR_PV_SGI_FEATURES: + if (hisi_ipiv_supported_per_vm(vcpu)) + val[0] = SMCCC_RET_SUCCESS; + else + val[0] = SMCCC_RET_NOT_SUPPORTED; + break; + case ARM_SMCCC_VENDOR_PV_SGI_ENABLE: + if (hisi_ipiv_supported_per_vm(vcpu)) { + hisi_ipiv_enable_per_vm(vcpu); + val[0] = SMCCC_RET_SUCCESS; + } else { + val[0] = SMCCC_RET_NOT_SUPPORTED; + } + break; +#endif case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID: val[0] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0; val[1] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1; diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index dc43b51cc373..34b2bcc64c72 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -2746,6 +2746,15 @@ static void vgic_its_reset(struct kvm *kvm, struct vgic_its *its) its->enabled = 0; vgic_its_free_device_list(kvm, its); vgic_its_free_collection_list(kvm, its); + +#ifdef CONFIG_ARM64_HISI_IPIV + /* + * For the para-virtualization feature IPIV, ensure that + * the flag of the guest OS is reset when the guest OS is + * reset. + */ + kvm->arch.vgic.its_vm.enable_ipiv_from_guest = false; +#endif } static int vgic_its_has_attr(struct kvm_device *dev, diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index 41308c69555d..cea9745fa2c5 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -130,9 +130,6 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu, /* Switching HW SGIs? */ dist->nassgireq = val & GICD_CTLR_nASSGIreq; -#ifdef CONFIG_ARM64_HISI_IPIV - dist->its_vm.nassgireq = dist->nassgireq; -#endif if (is_hwsgi != dist->nassgireq) vgic_v4_configure_vsgis(vcpu->kvm); diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 60b54a4c0ba3..425ad89c1cfd 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -430,9 +430,6 @@ void build_devid_pools(void) } #endif -#ifdef CONFIG_ARM64_HISI_IPIV -extern struct static_key_false ipiv_enable; -#endif static struct page *its_alloc_pages_node(int node, gfp_t gfp, unsigned int order) @@ -4568,8 +4565,7 @@ static void its_vpe_4_1_schedule(struct its_vpe *vpe, u64 ipiv_val = 0; u32 nr_vpes; - if (static_branch_unlikely(&ipiv_enable) && - vm->nassgireq) { + if (vm->enable_ipiv_from_guest) { /* wait gicr_ipiv_busy */ WARN_ON_ONCE(readl_relaxed_poll_timeout_atomic(vlpi_base + GICR_IPIV_ST, ipiv_val, !(ipiv_val & GICR_IPIV_ST_IPIV_BUSY), 1, 500)); @@ -4639,8 +4635,7 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe, } #ifdef CONFIG_ARM64_HISI_IPIV - if (static_branch_unlikely(&ipiv_enable) && - vm->nassgireq) { + if (vm->enable_ipiv_from_guest) { /* wait gicr_ipiv_busy */ WARN_ON_ONCE(readl_relaxed_poll_timeout_atomic(vlpi_base + GICR_IPIV_ST, val, !(val & GICR_IPIV_ST_IPIV_BUSY), 1, 500)); @@ -5000,7 +4995,7 @@ static void its_vpe_irq_domain_free(struct irq_domain *domain, its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); its_free_prop_table(vm->vprop_page); #ifdef CONFIG_ARM64_HISI_IPIV - if (static_branch_unlikely(&ipiv_enable)) { + if (vm->enable_ipiv_from_vmm) { free_pages((unsigned long)page_address(vm->vpeid_page), get_order(nr_irqs * 2)); } @@ -5046,7 +5041,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq if (gic_rdists->has_rvpeid) { irqchip = &its_vpe_4_1_irq_chip; #ifdef CONFIG_ARM64_HISI_IPIV - if (static_branch_unlikely(&ipiv_enable)) { + if (vm->enable_ipiv_from_vmm) { /* * The vpeid's size is 2 bytes, so we need to allocate 2 * * (num of vcpus). nr_irqs is equal to the number of vCPUs. @@ -5069,7 +5064,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq if (err) break; #ifdef CONFIG_ARM64_HISI_IPIV - if (static_branch_unlikely(&ipiv_enable)) { + if (vm->enable_ipiv_from_vmm) { vpeid_entry = (u16 *)vpeid_table_va + i; *vpeid_entry = vm->vpes[i]->vpe_id; } diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 374ff338755c..6aea1bf82a84 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -551,5 +551,20 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, method; \ }) +#ifdef CONFIG_ARM64_HISI_IPIV +/* HiSilicon paravirtualised sgi calls */ +#define ARM_SMCCC_VENDOR_PV_SGI_FEATURES \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_VENDOR_HYP, \ + 0x90) + +#define ARM_SMCCC_VENDOR_PV_SGI_ENABLE \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_VENDOR_HYP, \ + 0x91) +#endif /* CONFIG_ARM64_HISI_IPIV */ + #endif /*__ASSEMBLY__*/ #endif /*__LINUX_ARM_SMCCC_H*/ diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index 23e88aea2f50..ae3def6ce7d5 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -28,8 +28,8 @@ struct its_vm { u32 vlpi_count[GICv4_ITS_LIST_MAX]; #ifdef CONFIG_ARM64_HISI_IPIV KABI_EXTEND(struct page *vpeid_page) - KABI_EXTEND(bool nassgireq) bool enable_ipiv_from_vmm; + bool enable_ipiv_from_guest; #endif }; diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h index 56562793812b..66f8f498ca69 100644 --- a/tools/arch/arm64/include/uapi/asm/kvm.h +++ b/tools/arch/arm64/include/uapi/asm/kvm.h @@ -375,6 +375,11 @@ enum { enum { KVM_REG_ARM_VENDOR_HYP_BIT_FUNC_FEAT = 0, KVM_REG_ARM_VENDOR_HYP_BIT_PTP = 1, + /* + * If the mainline conflicts, do not change the + * current sequence, add in sequence. + */ + KVM_REG_ARM_VENDOR_HYP_BIT_IPIV = 2, #ifdef __KERNEL__ KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_COUNT, #endif diff --git a/tools/testing/selftests/kvm/aarch64/hypercalls.c b/tools/testing/selftests/kvm/aarch64/hypercalls.c index 31f66ba97228..f433c809b803 100644 --- a/tools/testing/selftests/kvm/aarch64/hypercalls.c +++ b/tools/testing/selftests/kvm/aarch64/hypercalls.c @@ -20,7 +20,7 @@ /* Last valid bits of the bitmapped firmware registers */ #define KVM_REG_ARM_STD_BMAP_BIT_MAX 0 #define KVM_REG_ARM_STD_HYP_BMAP_BIT_MAX 0 -#define KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_MAX 1 +#define KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_MAX 2 struct kvm_fw_reg_info { uint64_t reg; /* Register definition */ -- Gitee From dd99f82440ef0f2fe3b2a8484fc7f5d0c1155752 Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Fri, 23 May 2025 20:45:29 +0800 Subject: [PATCH 098/231] irqchip/gic: Add HiSilicon PV SGI support commit 09d8e921869585f29270896ad604662bda3db077 openEuler Use the smccc interface to enable ipiv for the guest OS, modify the guest kernel, and use multiple unicasts to implement group boradcast. In this way, do not need to modify the MPIDR. In addition, the MPIDR modification is deleted, and the GICD configuration is modified. The hardware uses the mpidr to calculate the corresponding vCPU ID to lookup vpeid table. Signed-off-by: Zhou Wang Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/sys_regs.c | 14 ------------ drivers/irqchip/irq-gic-v3.c | 42 ++++++++++++++++++++++++++++++++++-- 2 files changed, 40 insertions(+), 16 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 24d1bc0d2814..21317e5ada57 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -711,10 +711,6 @@ static u64 reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) return actlr; } -#ifdef CONFIG_ARM64_HISI_IPIV -extern struct static_key_false ipiv_enable; -#endif - static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { u64 mpidr; @@ -730,16 +726,6 @@ static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1); mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2); -#ifdef CONFIG_ARM64_HISI_IPIV - if (static_branch_unlikely(&ipiv_enable)) { - /* - * To avoid sending multi-SGIs in guest OS, make aff1/aff2 unique - */ - mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(1); - mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(2); - } -#endif - mpidr |= (1ULL << 31); vcpu_write_sys_reg(vcpu, mpidr, MPIDR_EL1); diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 4ce6872ed252..543cf3256027 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -113,8 +113,12 @@ DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities); EXPORT_SYMBOL(gic_nonsecure_priorities); #ifdef CONFIG_ARM64_HISI_IPIV +/* indicate if host supports IPIv */ DEFINE_STATIC_KEY_FALSE(ipiv_enable); EXPORT_SYMBOL(ipiv_enable); + +/* indicate if guest is using IPIv */ +static bool hisi_pv_sgi_enabled; #endif /* @@ -1428,8 +1432,8 @@ void gic_dist_enable_ipiv(void) static_branch_enable(&ipiv_enable); val = (0 << GICD_IPIV_CTRL_AFF_DIRECT_VPEID_SHIFT) | - (0 << GICD_IPIV_CTRL_AFF1_LEFT_SHIFT_SHIFT) | - (4 << GICD_IPIV_CTRL_AFF2_LEFT_SHIFT_SHIFT) | + (4 << GICD_IPIV_CTRL_AFF1_LEFT_SHIFT_SHIFT) | + (12 << GICD_IPIV_CTRL_AFF2_LEFT_SHIFT_SHIFT) | (7 << GICD_IPIV_CTRL_VM_TABLE_INNERCACHE_SHIFT) | (2 << GICD_IPIV_CTRL_VM_TABLE_SHAREABILITY_SHIFT); writel_relaxed(val, gic_data.dist_base + GICD_IPIV_CTRL); @@ -1548,7 +1552,15 @@ static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(gic_cpu_to_affinity(cpu)); u16 tlist; +#ifdef CONFIG_ARM64_HISI_IPIV + if (!hisi_pv_sgi_enabled) + tlist = gic_compute_target_list(&cpu, mask, cluster_id); + else + tlist = 1 << (gic_cpu_to_affinity(cpu) & 0xf); +#else tlist = gic_compute_target_list(&cpu, mask, cluster_id); +#endif + gic_send_sgi(cluster_id, tlist, d->hwirq); } @@ -2773,6 +2785,28 @@ static struct fwnode_handle *gic_v3_get_gsi_domain_id(u32 gsi) return gsi_domain_handle; } +#ifdef CONFIG_ARM64_HISI_IPIV +static void hisi_pv_sgi_init(void) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_PV_SGI_FEATURES, &res); + if (res.a0 != SMCCC_RET_SUCCESS) { + pr_info("Not Support HiSilicon PV SGI!\n"); + return; + } + + arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_PV_SGI_ENABLE, &res); + if (res.a0 != SMCCC_RET_SUCCESS) { + pr_info("Disable HiSilicon PV SGI!\n"); + return; + } + + hisi_pv_sgi_enabled = true; + pr_info("Enable HiSilicon PV SGI!\n"); +} +#endif + static int __init gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) { @@ -2825,6 +2859,10 @@ gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) if (static_branch_likely(&supports_deactivate_key)) gic_acpi_setup_kvm_info(); +#ifdef CONFIG_ARM64_HISI_IPIV + hisi_pv_sgi_init(); +#endif + return 0; out_fwhandle_free: -- Gitee From abf4ecfc68fd5f1cc27a69025259f377425635b1 Mon Sep 17 00:00:00 2001 From: Jinqian Yang Date: Fri, 23 May 2025 20:45:30 +0800 Subject: [PATCH 099/231] KVM: arm64: fix live migration bug of IPIv commit b599aaac9ec7d76f36d7cb5d3e6e3e6d6c325f51 openEuler GITS_IIDR bit7 is used to store whether IPIV is enabled for Guest OS to ensure that enable_ipiv_from_guest are the same before and after live migration. Signed-off-by: Zhou Wang Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/vgic/vgic-its.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index 34b2bcc64c72..2bf8b4828930 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -504,6 +504,14 @@ static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm, return extract_bytes(reg, addr & 7, len); } +#ifdef CONFIG_ARM64_HISI_IPIV +/* + * Use bit7 not used by GITS_IIDR to indicate whether IPIV is + * enabled for guest OS. + */ +#define HISI_GUEST_ENABLE_IPIV_SHIFT 7 +#endif + static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm, struct vgic_its *its, gpa_t addr, unsigned int len) @@ -524,6 +532,12 @@ static int vgic_mmio_uaccess_write_its_iidr(struct kvm *kvm, if (rev >= NR_ITS_ABIS) return -EINVAL; + +#ifdef CONFIG_ARM64_HISI_IPIV + if (val & (1UL << HISI_GUEST_ENABLE_IPIV_SHIFT)) + kvm->arch.vgic.its_vm.enable_ipiv_from_guest = true; +#endif + return vgic_its_set_abi(its, rev); } @@ -2114,6 +2128,11 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev, region->its_write(dev->kvm, its, addr, len, *reg); } else { *reg = region->its_read(dev->kvm, its, addr, len); +#ifdef CONFIG_ARM64_HISI_IPIV + if (dev->kvm->arch.vgic.its_vm.enable_ipiv_from_guest && + offset == GITS_IIDR) + *reg |= 1UL << HISI_GUEST_ENABLE_IPIV_SHIFT; +#endif } out: mutex_unlock(&dev->kvm->arch.config_lock); -- Gitee From ad3a2f4ad64698f2e87d03a54c65eefa3fd0b16b Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Fri, 23 May 2025 20:45:31 +0800 Subject: [PATCH 100/231] kvm: hisi: make sure vcpu_id and vcpu_idx have same value in IPIv commit 075449783bf2716aba7a7a977b5a7be0f65c074a openEuler When the VM's vgic is initialized, vpeids are written to vpeid table in the sequence of vcpu_idx. However, the actual hardware lookups vpeid_table in the sequence of vcpu_id. Therefore, ensure that vcpu_idx and vcpu_id are the same for IPIV. Signed-off-by: Zhou Wang Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/arm.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index e62c08241b04..e08474428fd3 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1530,6 +1530,16 @@ static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu, if (test_bit(KVM_ARM_VCPU_REC, &features)) return -EINVAL; +#ifdef CONFIG_ARM64_HISI_IPIV + if (static_branch_unlikely(&ipiv_enable) && + vcpu->kvm->arch.vgic.its_vm.enable_ipiv_from_vmm && + vcpu->vcpu_id != vcpu->vcpu_idx) { + kvm_err("IPIV ERROR: vcpu_id %d != vcpu_idx %d\n", + vcpu->vcpu_id, vcpu->vcpu_idx); + return -EINVAL; + } +#endif + return 0; } -- Gitee From 1391d04b582fda7324469dcd1cfb26d21a127a5d Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Fri, 23 May 2025 20:45:32 +0800 Subject: [PATCH 101/231] kvm: hisi: Don't allow to change mpidr in IPIv commit a68af58d270de2b9c0622704252efed84ff09cf4 openEuler IPIV uses the MPIDR value to transmit vpeid to hardware, so don't allow to change it. Signed-off-by: Zhou Wang Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/sys_regs.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 21317e5ada57..9aaf44929e70 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1630,6 +1630,25 @@ static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu, return val; } +#ifdef CONFIG_ARM64_HISI_IPIV +extern struct static_key_false ipiv_enable; +static int set_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, + u64 val) +{ + if (static_branch_unlikely(&ipiv_enable) && + vcpu->kvm->arch.vgic.its_vm.enable_ipiv_from_vmm) { + if (val != __vcpu_sys_reg(vcpu, rd->reg)) { + kvm_err("IPIV ERROR: MPIDR changed\n"); + return -EINVAL; + } + } + + __vcpu_sys_reg(vcpu, rd->reg) = val; + + return 0; +} +#endif + static int set_id_dfr0_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 val) @@ -2147,7 +2166,12 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 }, +#ifdef CONFIG_ARM64_HISI_IPIV + { SYS_DESC(SYS_MPIDR_EL1), + .reset = reset_mpidr, .reg = MPIDR_EL1, .set_user = set_mpidr}, +#else { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 }, +#endif /* * ID regs: all ID_SANITISED() entries here must have corresponding -- Gitee From e034c647f2d52186f4555523a39605e3ce767fcd Mon Sep 17 00:00:00 2001 From: Jinqian Yang Date: Fri, 23 May 2025 20:45:33 +0800 Subject: [PATCH 102/231] KVM: arm64: using kvm_vgic_global_state for ipiv commit 730562db1d901a6d175a76c5a35c4d4a867d150d openEuler when kvm-arm.vgic_v4_enable=0 kvm-arm.ipiv_enabled=1 is configured in the cmdline, the Host KVM displays "ipiv enabled", but ipiv is not enabled. gic_data.rdists.has_rvpeid is hardware-level information, which does not indicate that GICv4.1 is enabled for KVM. So whether the host supports IPIV depends on KVM enables GICv4.1m instead of hardware supports GICv4.1. Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/include/asm/kvm_host.h | 3 --- arch/arm64/kvm/arm.c | 14 -------------- arch/arm64/kvm/hisilicon/hisi_virt.c | 2 +- arch/arm64/kvm/hisilicon/hisi_virt.h | 1 - arch/arm64/kvm/vgic/vgic-init.c | 11 +++++++++++ drivers/irqchip/irq-gic-v3.c | 9 --------- 6 files changed, 12 insertions(+), 28 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 982f8f1daf1c..5d43bd341450 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -1258,8 +1258,5 @@ bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu); extern bool kvm_ncsnp_support; extern bool kvm_dvmbm_support; -#ifdef CONFIG_ARM64_HISI_IPIV -extern bool kvm_ipiv_support; -#endif #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index e08474428fd3..809e60e1ba16 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -68,11 +68,6 @@ bool kvm_ncsnp_support; /* Capability of DVMBM */ bool kvm_dvmbm_support; -#ifdef CONFIG_ARM64_HISI_IPIV -/* Capability of IPIV */ -bool kvm_ipiv_support; -#endif /* CONFIG_ARM64_HISI_IPIV */ - static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized); bool is_kvm_arm_initialised(void) @@ -2843,18 +2838,9 @@ static __init int kvm_arm_init(void) probe_hisi_cpu_type(); kvm_ncsnp_support = hisi_ncsnp_supported(); kvm_dvmbm_support = hisi_dvmbm_supported(); -#ifdef CONFIG_ARM64_HISI_IPIV - kvm_ipiv_support = hisi_ipiv_supported(); -#endif kvm_info("KVM ncsnp %s\n", kvm_ncsnp_support ? "enabled" : "disabled"); kvm_info("KVM dvmbm %s\n", kvm_dvmbm_support ? "enabled" : "disabled"); -#ifdef CONFIG_ARM64_HISI_IPIV - kvm_info("KVM ipiv %s\n", kvm_ipiv_support ? "enabled" : "disabled"); - - if (kvm_ipiv_support) - ipiv_gicd_init(); -#endif if (kvm_dvmbm_support) kvm_get_pg_cfg(); diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index fb9ba7eccb5f..3fffbbc280f0 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -185,7 +185,7 @@ bool hisi_ipiv_supported(void) return false; /* Enable IPIV feature if necessary */ - if (!is_gicv4p1()) { + if (!kvm_vgic_global_state.has_gicv4_1) { kvm_info("Hisi ipiv needs to enable GICv4p1!\n"); return false; } diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index 0fbefabf4f39..98f7e82c1ad3 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -150,6 +150,5 @@ static inline void kvm_hisi_reload_lsudvmbm(struct kvm *kvm) {} #ifdef CONFIG_ARM64_HISI_IPIV extern bool gic_dist_enable_ipiv(void); -extern bool is_gicv4p1(void); #endif /* CONFIG_ARM64_HISI_IPIV */ #endif /* __HISI_VIRT_H__ */ diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index 950d890be7cc..43fc801cf682 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -13,6 +13,10 @@ #include #include "vgic.h" +#ifdef CONFIG_ARM64_HISI_IPIV +#include "hisilicon/hisi_virt.h" +#endif + /* * Initialization rules: there are multiple stages to the vgic * initialization, both for the distributor and the CPU interfaces. The basic @@ -650,6 +654,13 @@ int kvm_vgic_hyp_init(void) kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq); #ifdef CONFIG_ARM64_HISI_IPIV + if (hisi_ipiv_supported()) { + ipiv_gicd_init(); + kvm_info("KVM ipiv enabled\n"); + } else { + kvm_info("KVM ipiv disabled\n"); + } + if (static_branch_unlikely(&ipiv_enable)) { ipiv_irq = acpi_register_gsi(NULL, 18, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_HIGH); diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 543cf3256027..ffe74da733a8 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -1413,15 +1413,6 @@ static int gic_dist_supports_lpis(void) } #ifdef CONFIG_ARM64_HISI_IPIV -bool is_gicv4p1(void) -{ - if (!gic_data.rdists.has_rvpeid) - return false; - - return true; -} -EXPORT_SYMBOL(is_gicv4p1); - void gic_dist_enable_ipiv(void) { u32 val; -- Gitee From 407a2e22fad7d6a9ac90d501b7712d24dcc899d9 Mon Sep 17 00:00:00 2001 From: Jinqian Yang Date: Fri, 23 May 2025 20:45:34 +0800 Subject: [PATCH 103/231] kvm: hisi: print error for IPIV commit 8207b85ea72073cc2f75884bfad29b41d805ef24 openEuler Displays detailed IPIV error causes based on hardware information. 1. Guest sends SGI with IRM=1. 2. Guest sends multicast. 3. The index of VM table exceeds the supported range. Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/vgic/vgic-init.c | 21 ++++++++++++++++++++- drivers/irqchip/irq-gic-v3-its.c | 8 ++++++++ include/linux/irqchip/arm-gic-v3.h | 6 ++++++ 3 files changed, 34 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index 43fc801cf682..2d97816299d4 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -14,6 +14,7 @@ #include "vgic.h" #ifdef CONFIG_ARM64_HISI_IPIV +#include #include "hisilicon/hisi_virt.h" #endif @@ -538,9 +539,27 @@ void kvm_vgic_cpu_down(void) } #ifdef CONFIG_ARM64_HISI_IPIV +extern void __iomem *gic_data_rdist_get_vlpi_base(void); static irqreturn_t vgic_ipiv_irq_handler(int irq, void *data) { - kvm_info("IPIV irq handler!\n"); + void __iomem *vlpi_base = gic_data_rdist_get_vlpi_base(); + u32 gicr_ipiv_st; + bool broadcast_err, grpbrd_err, vcpuidx_err; + + gicr_ipiv_st = readl_relaxed(vlpi_base + GICR_IPIV_ST); + + broadcast_err = !!(gicr_ipiv_st & GICR_IPIV_ST_IRM_ERR); + if (broadcast_err) + kvm_err("IPIV error: IRM=1 Guest broadcast error\n"); + + grpbrd_err = !!(gicr_ipiv_st & GICR_IPIV_ST_BRPBRD_ERR); + if (grpbrd_err) + kvm_err("IPIV error: Guest group broadcast error\n"); + + vcpuidx_err = !!(gicr_ipiv_st & GICR_IPIV_ST_VCPUIDX_ERR); + if (vcpuidx_err) + kvm_err("IPIV error: The VCPU index is out of range\n"); + return IRQ_HANDLED; } #endif diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 425ad89c1cfd..cdf90dc201da 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -431,6 +431,14 @@ void build_devid_pools(void) #endif +#ifdef CONFIG_ARM64_HISI_IPIV +void __iomem *gic_data_rdist_get_vlpi_base(void) +{ + return gic_data_rdist_vlpi_base(); +} +EXPORT_SYMBOL(gic_data_rdist_get_vlpi_base); +#endif + static struct page *its_alloc_pages_node(int node, gfp_t gfp, unsigned int order) { diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 3abfd32d71e3..d6b5dad67b1f 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -389,6 +389,12 @@ #define GICR_IPIV_ST 0x14c #define GICR_IPIV_ST_IPIV_BUSY_SHIFT 0 #define GICR_IPIV_ST_IPIV_BUSY (1 << GICR_IPIV_ST_IPIV_BUSY_SHIFT) +#define GICR_IPIV_ST_IRM_ERR_ST_SHIFT 1 +#define GICR_IPIV_ST_IRM_ERR (1 << GICR_IPIV_ST_IRM_ERR_ST_SHIFT) +#define GICR_IPIV_ST_BRPBRD_ERR_ST_SHIFT 2 +#define GICR_IPIV_ST_BRPBRD_ERR (1 << GICR_IPIV_ST_BRPBRD_ERR_ST_SHIFT) +#define GICR_IPIV_ST_VCPUIDX_ERR_ST_SHIFT 3 +#define GICR_IPIV_ST_VCPUIDX_ERR (1 << GICR_IPIV_ST_VCPUIDX_ERR_ST_SHIFT) #endif /* CONFIG_ARM64_HISI_IPIV */ /* -- Gitee From 94fd823957e57189e634af6fa421b5e71f7e7d89 Mon Sep 17 00:00:00 2001 From: Jinqian Yang Date: Fri, 23 May 2025 20:45:35 +0800 Subject: [PATCH 104/231] arm64/kabi: use KABI_EXTEND to skip KABI check commit 5a25f1236baea2de6e5382bb619f60a9b4a91107 openEuler Use KABI_EXTEND to skip KABI check of IPIV feature. Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- include/linux/irqchip/arm-gic-v4.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index ae3def6ce7d5..429236cb32b4 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -28,8 +28,8 @@ struct its_vm { u32 vlpi_count[GICv4_ITS_LIST_MAX]; #ifdef CONFIG_ARM64_HISI_IPIV KABI_EXTEND(struct page *vpeid_page) - bool enable_ipiv_from_vmm; - bool enable_ipiv_from_guest; + KABI_EXTEND(bool enable_ipiv_from_vmm) + KABI_EXTEND(bool enable_ipiv_from_guest) #endif }; -- Gitee From 40ba26f82f4e7a5086ef00004398c02502e802df Mon Sep 17 00:00:00 2001 From: Jinqian Yang Date: Tue, 17 Jun 2025 19:31:02 +0800 Subject: [PATCH 105/231] KVM: arm64: check if IPIV is enabled in BIOS commit 4c1b2c9b61b21a0183f345eb7076db7ac054ae6c openEuler GICD_MISC_CTRL bit19(cfg_ipiv_en) is read-only in EL2. The write of GICD_MISC_CTRL bit19=1 is done by BIOS. Therefore, need to check whether the BIOS has enabled ipiv in OS. Fixes: ("kvm: hisi_virt: Probe and configure IPIV capacity on HIP12") Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/hisilicon/hisi_virt.c | 5 +++++ arch/arm64/kvm/hisilicon/hisi_virt.h | 1 + drivers/irqchip/irq-gic-v3.c | 16 ++++++++++++---- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index 3fffbbc280f0..cf22f247f00f 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -180,6 +180,11 @@ bool hisi_ipiv_supported(void) return false; } + if (!gic_get_ipiv_status()) { + kvm_info("Hisi ipiv is disabled by BIOS\n"); + return false; + } + /* User provided kernel command-line parameter */ if (!ipiv_enabled || !is_kernel_in_hyp_mode()) return false; diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index 98f7e82c1ad3..2a995acd1c18 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -150,5 +150,6 @@ static inline void kvm_hisi_reload_lsudvmbm(struct kvm *kvm) {} #ifdef CONFIG_ARM64_HISI_IPIV extern bool gic_dist_enable_ipiv(void); +extern bool gic_get_ipiv_status(void); #endif /* CONFIG_ARM64_HISI_IPIV */ #endif /* __HISI_VIRT_H__ */ diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index ffe74da733a8..d91acee4ba39 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -1417,11 +1417,7 @@ void gic_dist_enable_ipiv(void) { u32 val; - val = readl_relaxed(gic_data.dist_base + GICD_MISC_CTRL); - val |= GICD_MISC_CTRL_CFG_IPIV_EN; - writel_relaxed(val, gic_data.dist_base + GICD_MISC_CTRL); static_branch_enable(&ipiv_enable); - val = (0 << GICD_IPIV_CTRL_AFF_DIRECT_VPEID_SHIFT) | (4 << GICD_IPIV_CTRL_AFF1_LEFT_SHIFT_SHIFT) | (12 << GICD_IPIV_CTRL_AFF2_LEFT_SHIFT_SHIFT) | @@ -1433,6 +1429,18 @@ void gic_dist_enable_ipiv(void) writel_relaxed(0x4880, gic_data.dist_base + GICD_IPIV_ITS_TA_BASE); } EXPORT_SYMBOL(gic_dist_enable_ipiv); + +bool gic_get_ipiv_status(void) +{ + u32 val; + + val = readl_relaxed(gic_data.dist_base + GICD_MISC_CTRL); + if (val & GICD_MISC_CTRL_CFG_IPIV_EN) + return true; + + return false; +} +EXPORT_SYMBOL(gic_get_ipiv_status); #endif /* CONFIG_ARM64_HISI_IPIV */ static void gic_cpu_init(void) -- Gitee From d7e5a241f80786a7e5ca51fb084bbe5c3e419615 Mon Sep 17 00:00:00 2001 From: Jinqian Yang Date: Thu, 20 Nov 2025 16:45:17 +0800 Subject: [PATCH 106/231] KVM: arm64: ipiv: fix bug in live migration commit 22f1e3b38ca09cbc655a606e6404b996710841a1 openEuler If an environment has IPIV enabled and a VM also has IPIV enabled, when this VM is migrated to a host environment where IPIV is not enabled, the host will continuously print "vSGI trap! IPIV disabled!" This is because the destination host enabled the VM's IPIV without checking for IPIV support. Fixes: ("KVM: arm64: fix live migration bug of IPIv") Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/vgic/vgic-its.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index 2bf8b4828930..8fa86a43966d 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -22,6 +22,7 @@ #include "vgic.h" #include "vgic-mmio.h" +#include "hisilicon/hisi_virt.h" static int vgic_its_save_tables_v0(struct vgic_its *its); static int vgic_its_restore_tables_v0(struct vgic_its *its); @@ -534,7 +535,8 @@ static int vgic_mmio_uaccess_write_its_iidr(struct kvm *kvm, return -EINVAL; #ifdef CONFIG_ARM64_HISI_IPIV - if (val & (1UL << HISI_GUEST_ENABLE_IPIV_SHIFT)) + if (hisi_ipiv_supported_per_vm(kvm) && + val & (1UL << HISI_GUEST_ENABLE_IPIV_SHIFT)) kvm->arch.vgic.its_vm.enable_ipiv_from_guest = true; #endif -- Gitee From 65a666dcc2f817a7d829b1ece6b9a0fdd5d1786e Mon Sep 17 00:00:00 2001 From: Jinqian Yang Date: Thu, 20 Nov 2025 16:45:16 +0800 Subject: [PATCH 107/231] KVM: arm64: ipiv: Change parameter from vcpu to kvm commit cbd165f529bd32b586342c0a2d11c3ca50b9fdd3 openEuler IPIV is a per-VM feature. When checking whether IPIV is supported, it is more appropriate to use kvm as the input parameter. Fixes: ("KVM: arm64: Implement PV_SGI related calls") Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/hisilicon/hisi_virt.c | 10 +++++----- arch/arm64/kvm/hisilicon/hisi_virt.h | 4 ++-- arch/arm64/kvm/hypercalls.c | 6 +++--- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index cf22f247f00f..5731d337b228 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -201,27 +201,27 @@ bool hisi_ipiv_supported(void) extern struct static_key_false ipiv_enable; -bool hisi_ipiv_supported_per_vm(struct kvm_vcpu *vcpu) +bool hisi_ipiv_supported_per_vm(struct kvm *kvm) { /* IPIV is supported by the hardware */ if (!static_branch_unlikely(&ipiv_enable)) return false; /* vSGI passthrough is configured */ - if (!vcpu->kvm->arch.vgic.nassgireq) + if (!kvm->arch.vgic.nassgireq) return false; /* IPIV is enabled by the user */ - if (!vcpu->kvm->arch.vgic.its_vm.enable_ipiv_from_vmm) + if (!kvm->arch.vgic.its_vm.enable_ipiv_from_vmm) return false; return true; } -void hisi_ipiv_enable_per_vm(struct kvm_vcpu *vcpu) +void hisi_ipiv_enable_per_vm(struct kvm *kvm) { /* Enable IPIV feature */ - vcpu->kvm->arch.vgic.its_vm.enable_ipiv_from_guest = true; + kvm->arch.vgic.its_vm.enable_ipiv_from_guest = true; } void ipiv_gicd_init(void) diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index 2a995acd1c18..4ae192e42045 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -96,8 +96,8 @@ bool hisi_ncsnp_supported(void); bool hisi_dvmbm_supported(void); #ifdef CONFIG_ARM64_HISI_IPIV bool hisi_ipiv_supported(void); -bool hisi_ipiv_supported_per_vm(struct kvm_vcpu *vcpu); -void hisi_ipiv_enable_per_vm(struct kvm_vcpu *vcpu); +bool hisi_ipiv_supported_per_vm(struct kvm *kvm); +void hisi_ipiv_enable_per_vm(struct kvm *kvm); void ipiv_gicd_init(void); #endif /* CONFIG_ARM64_HISI_IPIV */ void kvm_get_pg_cfg(void); diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index a85bf604b9ac..3a6700344341 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -354,14 +354,14 @@ int kvm_smccc_call_handler(struct kvm_vcpu *vcpu) break; #ifdef CONFIG_ARM64_HISI_IPIV case ARM_SMCCC_VENDOR_PV_SGI_FEATURES: - if (hisi_ipiv_supported_per_vm(vcpu)) + if (hisi_ipiv_supported_per_vm(vcpu->kvm)) val[0] = SMCCC_RET_SUCCESS; else val[0] = SMCCC_RET_NOT_SUPPORTED; break; case ARM_SMCCC_VENDOR_PV_SGI_ENABLE: - if (hisi_ipiv_supported_per_vm(vcpu)) { - hisi_ipiv_enable_per_vm(vcpu); + if (hisi_ipiv_supported_per_vm(vcpu->kvm)) { + hisi_ipiv_enable_per_vm(vcpu->kvm); val[0] = SMCCC_RET_SUCCESS; } else { val[0] = SMCCC_RET_NOT_SUPPORTED; -- Gitee From 707d277d2f9724bf1a15edea20ab1307d42f7d79 Mon Sep 17 00:00:00 2001 From: Jinqian Yang Date: Mon, 24 Nov 2025 10:25:35 +0800 Subject: [PATCH 108/231] KVM: arm64: ipiv: change declaration argument commit 0a93f3e2d545356fb8b996eeab182562a6d9a274 openEuler Add the missed changes. When CONFIG_KVM_HISI_VIRT is not set, a compilation error will occur. Fixes: ("KVM: arm64: ipiv: Change parameter from vcpu to kvm") Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202511221815.EIAnEfb9-lkp@intel.com/ Closes: https://lore.kernel.org/oe-kbuild-all/202511221643.H3LgffBo-lkp@intel.com/ --- arch/arm64/kvm/hisilicon/hisi_virt.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index 4ae192e42045..ea3f20ffec70 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -124,11 +124,11 @@ static inline bool hisi_ipiv_supported(void) { return false; } -static bool hisi_ipiv_supported_per_vm(struct kvm_vcpu *vcpu) +static inline bool hisi_ipiv_supported_per_vm(struct kvm *kvm) { return false; } -static void hisi_ipiv_enable_per_vm(struct kvm_vcpu *vcpu) {} +static inline void hisi_ipiv_enable_per_vm(struct kvm *kvm) {} static inline void ipiv_gicd_init(void) {} #endif /* CONFIG_ARM64_HISI_IPIV */ static inline void kvm_get_pg_cfg(void) {} -- Gitee From cd89e6e976894d3aa1353ee0623c3e04f681e3a4 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 9 Mar 2026 10:56:15 +0800 Subject: [PATCH 109/231] KVM: x86: allow CPUID 0xC000_0000 to proceed on Zhaoxin CPUs mainline inclusion from v6.18-rc3 commit 1f0654dc75b8b4945988f802e4e532ff722ecce7 upstream. category: feature -------------------- Bypass the Centaur-only filter for the CPUID signature leaf so that processing continues when the CPU vendor is Zhaoxin. Signed-off-by: Ewan Hai Link: https://lore.kernel.org/r/20250818083034.93935-1-ewanhai-oc@zhaoxin.com Signed-off-by: Sean Christopherson Signed-off-by: leoliu-oc --- arch/x86/kvm/cpuid.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 697cdc1c57ad..bf9fbbde3f45 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -1463,7 +1463,8 @@ static int get_cpuid_func(struct kvm_cpuid_array *array, u32 func, int r; if (func == CENTAUR_CPUID_SIGNATURE && - boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR) + boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR && + boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN) return 0; r = do_cpuid_func(array, func, type); -- Gitee From 95aa528cd7909d648752750d99bf7612b675d073 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 9 Mar 2026 10:56:16 +0800 Subject: [PATCH 110/231] x86/split_lock: Move Split and Bus lock code to a dedicated file mainline inclusion from v6.11-rc2 commit 350afa8a1101f62ce31bc4ed6f69cf4b90ec4fa2 upstream. category: feature ------------------- Bus Lock Detect functionality on AMD platforms works identical to Intel. Move split_lock and bus_lock specific code from intel.c to a dedicated file so that it can be compiled and supported on non-Intel platforms. Also, introduce CONFIG_X86_BUS_LOCK_DETECT, make it dependent on CONFIG_CPU_SUP_INTEL and add compilation dependency of the new bus_lock.c file on CONFIG_X86_BUS_LOCK_DETECT. Signed-off-by: Ravi Bangoria Signed-off-by: Thomas Gleixner Reviewed-by: Tom Lendacky Link: https://lore.kernel.org/all/20240808062937.1149-2-ravi.bangoria@amd.com Signed-off-by: Lyle Li Signed-off-by: leoliu-oc --- arch/x86/Kconfig | 8 + arch/x86/include/asm/cpu.h | 12 +- arch/x86/kernel/cpu/Makefile | 2 + arch/x86/kernel/cpu/bus_lock.c | 406 ++++++++++++++++++++++++++++++++ arch/x86/kernel/cpu/intel.c | 419 --------------------------------- include/linux/sched.h | 2 +- kernel/fork.c | 2 +- 7 files changed, 428 insertions(+), 423 deletions(-) create mode 100644 arch/x86/kernel/cpu/bus_lock.c diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 7c9809925f19..611f43f60a3f 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2503,6 +2503,14 @@ config STRICT_SIGALTSTACK_SIZE source "kernel/livepatch/Kconfig" +config X86_BUS_LOCK_DETECT + bool "Split Lock Detect and Bus Lock Detect support" + depends on CPU_SUP_INTEL + default y + help + Enable Split Lock Detect and Bus Lock Detect functionalities. + See for more information. + endmenu config CC_HAS_SLS diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index 0942bcfa9a5f..de6ce6286053 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -30,12 +30,13 @@ int mwait_usable(const struct cpuinfo_x86 *); unsigned int x86_family(unsigned int sig); unsigned int x86_model(unsigned int sig); unsigned int x86_stepping(unsigned int sig); -#ifdef CONFIG_CPU_SUP_INTEL +#ifdef CONFIG_X86_BUS_LOCK_DETECT extern void __init sld_setup(struct cpuinfo_x86 *c); extern bool handle_user_split_lock(struct pt_regs *regs, long error_code); extern bool handle_guest_split_lock(unsigned long ip); extern void handle_bus_lock(struct pt_regs *regs); -u8 get_this_hybrid_cpu_type(void); +void split_lock_init(void); +void bus_lock_init(void); #else static inline void __init sld_setup(struct cpuinfo_x86 *c) {} static inline bool handle_user_split_lock(struct pt_regs *regs, long error_code) @@ -49,6 +50,13 @@ static inline bool handle_guest_split_lock(unsigned long ip) } static inline void handle_bus_lock(struct pt_regs *regs) {} +static inline void split_lock_init(void) {} +static inline void bus_lock_init(void) {} +#endif + +#ifdef CONFIG_CPU_SUP_INTEL +u8 get_this_hybrid_cpu_type(void); +#else static inline u8 get_this_hybrid_cpu_type(void) { diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 09bb2e72b7a3..49f934b179b0 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -60,6 +60,8 @@ obj-$(CONFIG_ACRN_GUEST) += acrn.o obj-$(CONFIG_DEBUG_FS) += debugfs.o +obj-$(CONFIG_X86_BUS_LOCK_DETECT) += bus_lock.o + quiet_cmd_mkcapflags = MKCAP $@ cmd_mkcapflags = $(CONFIG_SHELL) $(srctree)/$(src)/mkcapflags.sh $@ $^ diff --git a/arch/x86/kernel/cpu/bus_lock.c b/arch/x86/kernel/cpu/bus_lock.c new file mode 100644 index 000000000000..704e9241b964 --- /dev/null +++ b/arch/x86/kernel/cpu/bus_lock.c @@ -0,0 +1,406 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define pr_fmt(fmt) "x86/split lock detection: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +enum split_lock_detect_state { + sld_off = 0, + sld_warn, + sld_fatal, + sld_ratelimit, +}; + +/* + * Default to sld_off because most systems do not support split lock detection. + * sld_state_setup() will switch this to sld_warn on systems that support + * split lock/bus lock detect, unless there is a command line override. + */ +static enum split_lock_detect_state sld_state __ro_after_init = sld_off; +static u64 msr_test_ctrl_cache __ro_after_init; + +/* + * With a name like MSR_TEST_CTL it should go without saying, but don't touch + * MSR_TEST_CTL unless the CPU is one of the whitelisted models. Writing it + * on CPUs that do not support SLD can cause fireworks, even when writing '0'. + */ +static bool cpu_model_supports_sld __ro_after_init; + +static const struct { + const char *option; + enum split_lock_detect_state state; +} sld_options[] __initconst = { + { "off", sld_off }, + { "warn", sld_warn }, + { "fatal", sld_fatal }, + { "ratelimit:", sld_ratelimit }, +}; + +static struct ratelimit_state bld_ratelimit; + +static unsigned int sysctl_sld_mitigate = 1; +static DEFINE_SEMAPHORE(buslock_sem, 1); + +#ifdef CONFIG_PROC_SYSCTL +static struct ctl_table sld_sysctls[] = { + { + .procname = "split_lock_mitigate", + .data = &sysctl_sld_mitigate, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_douintvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, +}; + +static int __init sld_mitigate_sysctl_init(void) +{ + register_sysctl_init("kernel", sld_sysctls); + return 0; +} + +late_initcall(sld_mitigate_sysctl_init); +#endif + +static inline bool match_option(const char *arg, int arglen, const char *opt) +{ + int len = strlen(opt), ratelimit; + + if (strncmp(arg, opt, len)) + return false; + + /* + * Min ratelimit is 1 bus lock/sec. + * Max ratelimit is 1000 bus locks/sec. + */ + if (sscanf(arg, "ratelimit:%d", &ratelimit) == 1 && + ratelimit > 0 && ratelimit <= 1000) { + ratelimit_state_init(&bld_ratelimit, HZ, ratelimit); + ratelimit_set_flags(&bld_ratelimit, RATELIMIT_MSG_ON_RELEASE); + return true; + } + + return len == arglen; +} + +static bool split_lock_verify_msr(bool on) +{ + u64 ctrl, tmp; + + if (rdmsrl_safe(MSR_TEST_CTRL, &ctrl)) + return false; + if (on) + ctrl |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT; + else + ctrl &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT; + if (wrmsrl_safe(MSR_TEST_CTRL, ctrl)) + return false; + rdmsrl(MSR_TEST_CTRL, tmp); + return ctrl == tmp; +} + +static void __init sld_state_setup(void) +{ + enum split_lock_detect_state state = sld_warn; + char arg[20]; + int i, ret; + + if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) && + !boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) + return; + + ret = cmdline_find_option(boot_command_line, "split_lock_detect", + arg, sizeof(arg)); + if (ret >= 0) { + for (i = 0; i < ARRAY_SIZE(sld_options); i++) { + if (match_option(arg, ret, sld_options[i].option)) { + state = sld_options[i].state; + break; + } + } + } + sld_state = state; +} + +static void __init __split_lock_setup(void) +{ + if (!split_lock_verify_msr(false)) { + pr_info("MSR access failed: Disabled\n"); + return; + } + + rdmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache); + + if (!split_lock_verify_msr(true)) { + pr_info("MSR access failed: Disabled\n"); + return; + } + + /* Restore the MSR to its cached value. */ + wrmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache); + + setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT); +} + +/* + * MSR_TEST_CTRL is per core, but we treat it like a per CPU MSR. Locking + * is not implemented as one thread could undo the setting of the other + * thread immediately after dropping the lock anyway. + */ +static void sld_update_msr(bool on) +{ + u64 test_ctrl_val = msr_test_ctrl_cache; + + if (on) + test_ctrl_val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT; + + wrmsrl(MSR_TEST_CTRL, test_ctrl_val); +} + +void split_lock_init(void) +{ + /* + * #DB for bus lock handles ratelimit and #AC for split lock is + * disabled. + */ + if (sld_state == sld_ratelimit) { + split_lock_verify_msr(false); + return; + } + + if (cpu_model_supports_sld) + split_lock_verify_msr(sld_state != sld_off); +} + +static void __split_lock_reenable_unlock(struct work_struct *work) +{ + sld_update_msr(true); + up(&buslock_sem); +} + +static DECLARE_DELAYED_WORK(sl_reenable_unlock, __split_lock_reenable_unlock); + +static void __split_lock_reenable(struct work_struct *work) +{ + sld_update_msr(true); +} +static DECLARE_DELAYED_WORK(sl_reenable, __split_lock_reenable); + +/* + * If a CPU goes offline with pending delayed work to re-enable split lock + * detection then the delayed work will be executed on some other CPU. That + * handles releasing the buslock_sem, but because it executes on a + * different CPU probably won't re-enable split lock detection. This is a + * problem on HT systems since the sibling CPU on the same core may then be + * left running with split lock detection disabled. + * + * Unconditionally re-enable detection here. + */ +static int splitlock_cpu_offline(unsigned int cpu) +{ + sld_update_msr(true); + + return 0; +} + +static void split_lock_warn(unsigned long ip) +{ + struct delayed_work *work; + int cpu; + + if (!current->reported_split_lock) + pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n", + current->comm, current->pid, ip); + current->reported_split_lock = 1; + + if (sysctl_sld_mitigate) { + /* + * misery factor #1: + * sleep 10ms before trying to execute split lock. + */ + if (msleep_interruptible(10) > 0) + return; + /* + * Misery factor #2: + * only allow one buslocked disabled core at a time. + */ + if (down_interruptible(&buslock_sem) == -EINTR) + return; + work = &sl_reenable_unlock; + } else { + work = &sl_reenable; + } + + cpu = get_cpu(); + schedule_delayed_work_on(cpu, work, 2); + + /* Disable split lock detection on this CPU to make progress */ + sld_update_msr(false); + put_cpu(); +} + +bool handle_guest_split_lock(unsigned long ip) +{ + if (sld_state == sld_warn) { + split_lock_warn(ip); + return true; + } + + pr_warn_once("#AC: %s/%d %s split_lock trap at address: 0x%lx\n", + current->comm, current->pid, + sld_state == sld_fatal ? "fatal" : "bogus", ip); + + current->thread.error_code = 0; + current->thread.trap_nr = X86_TRAP_AC; + force_sig_fault(SIGBUS, BUS_ADRALN, NULL); + return false; +} +EXPORT_SYMBOL_GPL(handle_guest_split_lock); + +void bus_lock_init(void) +{ + u64 val; + + if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) + return; + + rdmsrl(MSR_IA32_DEBUGCTLMSR, val); + + if ((boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) && + (sld_state == sld_warn || sld_state == sld_fatal)) || + sld_state == sld_off) { + /* + * Warn and fatal are handled by #AC for split lock if #AC for + * split lock is supported. + */ + val &= ~DEBUGCTLMSR_BUS_LOCK_DETECT; + } else { + val |= DEBUGCTLMSR_BUS_LOCK_DETECT; + } + + wrmsrl(MSR_IA32_DEBUGCTLMSR, val); +} + +bool handle_user_split_lock(struct pt_regs *regs, long error_code) +{ + if ((regs->flags & X86_EFLAGS_AC) || sld_state == sld_fatal) + return false; + split_lock_warn(regs->ip); + return true; +} + +void handle_bus_lock(struct pt_regs *regs) +{ + switch (sld_state) { + case sld_off: + break; + case sld_ratelimit: + /* Enforce no more than bld_ratelimit bus locks/sec. */ + while (!__ratelimit(&bld_ratelimit)) + msleep(20); + /* Warn on the bus lock. */ + fallthrough; + case sld_warn: + pr_warn_ratelimited("#DB: %s/%d took a bus_lock trap at address: 0x%lx\n", + current->comm, current->pid, regs->ip); + break; + case sld_fatal: + force_sig_fault(SIGBUS, BUS_ADRALN, NULL); + break; + } +} + +/* + * CPU models that are known to have the per-core split-lock detection + * feature even though they do not enumerate IA32_CORE_CAPABILITIES. + */ +static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = { + X86_MATCH_VFM(INTEL_ICELAKE_X, 0), + X86_MATCH_VFM(INTEL_ICELAKE_L, 0), + X86_MATCH_VFM(INTEL_ICELAKE_D, 0), + {} +}; + +static void __init split_lock_setup(struct cpuinfo_x86 *c) +{ + const struct x86_cpu_id *m; + u64 ia32_core_caps; + + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) + return; + + /* Check for CPUs that have support but do not enumerate it: */ + m = x86_match_cpu(split_lock_cpu_ids); + if (m) + goto supported; + + if (!cpu_has(c, X86_FEATURE_CORE_CAPABILITIES)) + return; + + /* + * Not all bits in MSR_IA32_CORE_CAPS are architectural, but + * MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT is. All CPUs that set + * it have split lock detection. + */ + rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps); + if (ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT) + goto supported; + + /* CPU is not in the model list and does not have the MSR bit: */ + return; + +supported: + cpu_model_supports_sld = true; + __split_lock_setup(); +} + +static void sld_state_show(void) +{ + if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) && + !boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) + return; + + switch (sld_state) { + case sld_off: + pr_info("disabled\n"); + break; + case sld_warn: + if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) { + pr_info("#AC: crashing the kernel on kernel split_locks and warning on user-space split_locks\n"); + if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "x86/splitlock", NULL, splitlock_cpu_offline) < 0) + pr_warn("No splitlock CPU offline handler\n"); + } else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) { + pr_info("#DB: warning on user-space bus_locks\n"); + } + break; + case sld_fatal: + if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) { + pr_info("#AC: crashing the kernel on kernel split_locks and sending SIGBUS on user-space split_locks\n"); + } else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) { + pr_info("#DB: sending SIGBUS on user-space bus_locks%s\n", + boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) ? + " from non-WB" : ""); + } + break; + case sld_ratelimit: + if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) + pr_info("#DB: setting system wide bus lock rate limit to %u/sec\n", bld_ratelimit.burst); + break; + } +} + +void __init sld_setup(struct cpuinfo_x86 *c) +{ + split_lock_setup(c); + sld_state_setup(); + sld_state_show(); +} diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 09ae0a18f441..de2631b9401b 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -7,13 +7,9 @@ #include #include #include -#include #include #include #include -#include -#include -#include #include #include @@ -24,8 +20,6 @@ #include #include #include -#include -#include #include #include #include @@ -41,28 +35,6 @@ #include #endif -enum split_lock_detect_state { - sld_off = 0, - sld_warn, - sld_fatal, - sld_ratelimit, -}; - -/* - * Default to sld_off because most systems do not support split lock detection. - * sld_state_setup() will switch this to sld_warn on systems that support - * split lock/bus lock detect, unless there is a command line override. - */ -static enum split_lock_detect_state sld_state __ro_after_init = sld_off; -static u64 msr_test_ctrl_cache __ro_after_init; - -/* - * With a name like MSR_TEST_CTL it should go without saying, but don't touch - * MSR_TEST_CTL unless the CPU is one of the whitelisted models. Writing it - * on CPUs that do not support SLD can cause fireworks, even when writing '0'. - */ -static bool cpu_model_supports_sld __ro_after_init; - /* * Processors which have self-snooping capability can handle conflicting * memory type across CPUs by snooping its own cache. However, there exists @@ -596,9 +568,6 @@ static void init_intel_misc_features(struct cpuinfo_x86 *c) wrmsrl(MSR_MISC_FEATURES_ENABLES, msr); } -static void split_lock_init(void); -static void bus_lock_init(void); - static void init_intel(struct cpuinfo_x86 *c) { early_init_intel(c); @@ -971,394 +940,6 @@ static const struct cpu_dev intel_cpu_dev = { cpu_dev_register(intel_cpu_dev); -#undef pr_fmt -#define pr_fmt(fmt) "x86/split lock detection: " fmt - -static const struct { - const char *option; - enum split_lock_detect_state state; -} sld_options[] __initconst = { - { "off", sld_off }, - { "warn", sld_warn }, - { "fatal", sld_fatal }, - { "ratelimit:", sld_ratelimit }, -}; - -static struct ratelimit_state bld_ratelimit; - -static unsigned int sysctl_sld_mitigate = 1; -static DEFINE_SEMAPHORE(buslock_sem, 1); - -#ifdef CONFIG_PROC_SYSCTL -static struct ctl_table sld_sysctls[] = { - { - .procname = "split_lock_mitigate", - .data = &sysctl_sld_mitigate, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = proc_douintvec_minmax, - .extra1 = SYSCTL_ZERO, - .extra2 = SYSCTL_ONE, - }, - {} -}; - -static int __init sld_mitigate_sysctl_init(void) -{ - register_sysctl_init("kernel", sld_sysctls); - return 0; -} - -late_initcall(sld_mitigate_sysctl_init); -#endif - -static inline bool match_option(const char *arg, int arglen, const char *opt) -{ - int len = strlen(opt), ratelimit; - - if (strncmp(arg, opt, len)) - return false; - - /* - * Min ratelimit is 1 bus lock/sec. - * Max ratelimit is 1000 bus locks/sec. - */ - if (sscanf(arg, "ratelimit:%d", &ratelimit) == 1 && - ratelimit > 0 && ratelimit <= 1000) { - ratelimit_state_init(&bld_ratelimit, HZ, ratelimit); - ratelimit_set_flags(&bld_ratelimit, RATELIMIT_MSG_ON_RELEASE); - return true; - } - - return len == arglen; -} - -static bool split_lock_verify_msr(bool on) -{ - u64 ctrl, tmp; - - if (rdmsrl_safe(MSR_TEST_CTRL, &ctrl)) - return false; - if (on) - ctrl |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT; - else - ctrl &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT; - if (wrmsrl_safe(MSR_TEST_CTRL, ctrl)) - return false; - rdmsrl(MSR_TEST_CTRL, tmp); - return ctrl == tmp; -} - -static void __init sld_state_setup(void) -{ - enum split_lock_detect_state state = sld_warn; - char arg[20]; - int i, ret; - - if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) && - !boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) - return; - - ret = cmdline_find_option(boot_command_line, "split_lock_detect", - arg, sizeof(arg)); - if (ret >= 0) { - for (i = 0; i < ARRAY_SIZE(sld_options); i++) { - if (match_option(arg, ret, sld_options[i].option)) { - state = sld_options[i].state; - break; - } - } - } - sld_state = state; -} - -static void __init __split_lock_setup(void) -{ - if (!split_lock_verify_msr(false)) { - pr_info("MSR access failed: Disabled\n"); - return; - } - - rdmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache); - - if (!split_lock_verify_msr(true)) { - pr_info("MSR access failed: Disabled\n"); - return; - } - - /* Restore the MSR to its cached value. */ - wrmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache); - - setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT); -} - -/* - * MSR_TEST_CTRL is per core, but we treat it like a per CPU MSR. Locking - * is not implemented as one thread could undo the setting of the other - * thread immediately after dropping the lock anyway. - */ -static void sld_update_msr(bool on) -{ - u64 test_ctrl_val = msr_test_ctrl_cache; - - if (on) - test_ctrl_val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT; - - wrmsrl(MSR_TEST_CTRL, test_ctrl_val); -} - -static void split_lock_init(void) -{ - /* - * #DB for bus lock handles ratelimit and #AC for split lock is - * disabled. - */ - if (sld_state == sld_ratelimit) { - split_lock_verify_msr(false); - return; - } - - if (cpu_model_supports_sld) - split_lock_verify_msr(sld_state != sld_off); -} - -static void __split_lock_reenable_unlock(struct work_struct *work) -{ - sld_update_msr(true); - up(&buslock_sem); -} - -static DECLARE_DELAYED_WORK(sl_reenable_unlock, __split_lock_reenable_unlock); - -static void __split_lock_reenable(struct work_struct *work) -{ - sld_update_msr(true); -} -/* - * In order for each CPU to schedule its delayed work independently of the - * others, delayed work struct must be per-CPU. This is not required when - * sysctl_sld_mitigate is enabled because of the semaphore that limits - * the number of simultaneously scheduled delayed works to 1. - */ -static DEFINE_PER_CPU(struct delayed_work, sl_reenable); - -/* - * If a CPU goes offline with pending delayed work to re-enable split lock - * detection then the delayed work will be executed on some other CPU. That - * handles releasing the buslock_sem, but because it executes on a - * different CPU probably won't re-enable split lock detection. This is a - * problem on HT systems since the sibling CPU on the same core may then be - * left running with split lock detection disabled. - * - * Unconditionally re-enable detection here. - */ -static int splitlock_cpu_offline(unsigned int cpu) -{ - sld_update_msr(true); - - return 0; -} - -static void split_lock_warn(unsigned long ip) -{ - struct delayed_work *work = NULL; - int cpu; - - if (!current->reported_split_lock) - pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n", - current->comm, current->pid, ip); - current->reported_split_lock = 1; - - if (sysctl_sld_mitigate) { - /* - * misery factor #1: - * sleep 10ms before trying to execute split lock. - */ - if (msleep_interruptible(10) > 0) - return; - /* - * Misery factor #2: - * only allow one buslocked disabled core at a time. - */ - if (down_interruptible(&buslock_sem) == -EINTR) - return; - work = &sl_reenable_unlock; - } - - cpu = get_cpu(); - - if (!work) { - work = this_cpu_ptr(&sl_reenable); - /* Deferred initialization of per-CPU struct */ - if (!work->work.func) - INIT_DELAYED_WORK(work, __split_lock_reenable); - } - - schedule_delayed_work_on(cpu, work, 2); - - /* Disable split lock detection on this CPU to make progress */ - sld_update_msr(false); - put_cpu(); -} - -bool handle_guest_split_lock(unsigned long ip) -{ - if (sld_state == sld_warn) { - split_lock_warn(ip); - return true; - } - - pr_warn_once("#AC: %s/%d %s split_lock trap at address: 0x%lx\n", - current->comm, current->pid, - sld_state == sld_fatal ? "fatal" : "bogus", ip); - - current->thread.error_code = 0; - current->thread.trap_nr = X86_TRAP_AC; - force_sig_fault(SIGBUS, BUS_ADRALN, NULL); - return false; -} -EXPORT_SYMBOL_GPL(handle_guest_split_lock); - -static void bus_lock_init(void) -{ - u64 val; - - if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) - return; - - rdmsrl(MSR_IA32_DEBUGCTLMSR, val); - - if ((boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) && - (sld_state == sld_warn || sld_state == sld_fatal)) || - sld_state == sld_off) { - /* - * Warn and fatal are handled by #AC for split lock if #AC for - * split lock is supported. - */ - val &= ~DEBUGCTLMSR_BUS_LOCK_DETECT; - } else { - val |= DEBUGCTLMSR_BUS_LOCK_DETECT; - } - - wrmsrl(MSR_IA32_DEBUGCTLMSR, val); -} - -bool handle_user_split_lock(struct pt_regs *regs, long error_code) -{ - if ((regs->flags & X86_EFLAGS_AC) || sld_state == sld_fatal) - return false; - split_lock_warn(regs->ip); - return true; -} - -void handle_bus_lock(struct pt_regs *regs) -{ - switch (sld_state) { - case sld_off: - break; - case sld_ratelimit: - /* Enforce no more than bld_ratelimit bus locks/sec. */ - while (!__ratelimit(&bld_ratelimit)) - msleep(20); - /* Warn on the bus lock. */ - fallthrough; - case sld_warn: - pr_warn_ratelimited("#DB: %s/%d took a bus_lock trap at address: 0x%lx\n", - current->comm, current->pid, regs->ip); - break; - case sld_fatal: - force_sig_fault(SIGBUS, BUS_ADRALN, NULL); - break; - } -} - -/* - * CPU models that are known to have the per-core split-lock detection - * feature even though they do not enumerate IA32_CORE_CAPABILITIES. - */ -static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = { - X86_MATCH_VFM(INTEL_ICELAKE_X, 0), - X86_MATCH_VFM(INTEL_ICELAKE_L, 0), - X86_MATCH_VFM(INTEL_ICELAKE_D, 0), - {} -}; - -static void __init split_lock_setup(struct cpuinfo_x86 *c) -{ - const struct x86_cpu_id *m; - u64 ia32_core_caps; - - if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) - return; - - /* Check for CPUs that have support but do not enumerate it: */ - m = x86_match_cpu(split_lock_cpu_ids); - if (m) - goto supported; - - if (!cpu_has(c, X86_FEATURE_CORE_CAPABILITIES)) - return; - - /* - * Not all bits in MSR_IA32_CORE_CAPS are architectural, but - * MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT is. All CPUs that set - * it have split lock detection. - */ - rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps); - if (ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT) - goto supported; - - /* CPU is not in the model list and does not have the MSR bit: */ - return; - -supported: - cpu_model_supports_sld = true; - __split_lock_setup(); -} - -static void sld_state_show(void) -{ - if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) && - !boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) - return; - - switch (sld_state) { - case sld_off: - pr_info("disabled\n"); - break; - case sld_warn: - if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) { - pr_info("#AC: crashing the kernel on kernel split_locks and warning on user-space split_locks\n"); - if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, - "x86/splitlock", NULL, splitlock_cpu_offline) < 0) - pr_warn("No splitlock CPU offline handler\n"); - } else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) { - pr_info("#DB: warning on user-space bus_locks\n"); - } - break; - case sld_fatal: - if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) { - pr_info("#AC: crashing the kernel on kernel split_locks and sending SIGBUS on user-space split_locks\n"); - } else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) { - pr_info("#DB: sending SIGBUS on user-space bus_locks%s\n", - boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) ? - " from non-WB" : ""); - } - break; - case sld_ratelimit: - if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) - pr_info("#DB: setting system wide bus lock rate limit to %u/sec\n", bld_ratelimit.burst); - break; - } -} - -void __init sld_setup(struct cpuinfo_x86 *c) -{ - split_lock_setup(c); - sld_state_setup(); - sld_state_show(); -} - #define X86_HYBRID_CPU_TYPE_ID_SHIFT 24 /** diff --git a/include/linux/sched.h b/include/linux/sched.h index f95f0e8b7839..c449361b583f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1095,7 +1095,7 @@ struct task_struct { #ifdef CONFIG_ARCH_HAS_CPU_PASID unsigned pasid_activated:1; #endif -#ifdef CONFIG_CPU_SUP_INTEL +#ifdef CONFIG_X86_BUS_LOCK_DETECT unsigned reported_split_lock:1; #endif #ifdef CONFIG_TASK_DELAY_ACCT diff --git a/kernel/fork.c b/kernel/fork.c index 9a2e5f5b4e98..c7240d32daea 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1252,7 +1252,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) tsk->active_memcg = NULL; #endif -#ifdef CONFIG_CPU_SUP_INTEL +#ifdef CONFIG_X86_BUS_LOCK_DETECT tsk->reported_split_lock = 0; #endif -- Gitee From 857bf848e3fc35a8839b722fd8f8e403cb406994 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 9 Mar 2026 10:56:17 +0800 Subject: [PATCH 111/231] x86/bus_lock: Add support for AMD mainline inclusion from v6.11-rc2 commit 408eb7417a92c5354c7be34f7425b305dfe30ad9 upstream. category: feature ------------------- Add Bus Lock Detect (called Bus Lock Trap in AMD docs) support for AMD platforms. Bus Lock Detect is enumerated with CPUID Fn0000_0007_ECX_x0 bit [24 / BUSLOCKTRAP]. It can be enabled through MSR_IA32_DEBUGCTLMSR. When enabled, hardware clears DR6[11] and raises a #DB exception on occurrence of Bus Lock if CPL > 0. More detail about the feature can be found in AMD APM[1]. [1]: AMD64 Architecture Programmer's Manual Pub. 40332, Rev. 4.07 - June 2023, Vol 2, 13.1.3.6 Bus Lock Trap https://bugzilla.kernel.org/attachment.cgi?id=304653 Signed-off-by: Ravi Bangoria Signed-off-by: Thomas Gleixner Reviewed-by: Tom Lendacky Link: https://lore.kernel.org/all/20240808062937.1149-3-ravi.bangoria@amd.com Signed-off-by: Lyle Li Signed-off-by: leoliu-oc --- Documentation/arch/x86/buslock.rst | 3 ++- arch/x86/Kconfig | 2 +- arch/x86/kernel/cpu/common.c | 2 ++ arch/x86/kernel/cpu/intel.c | 1 - 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/Documentation/arch/x86/buslock.rst b/Documentation/arch/x86/buslock.rst index 4c5a4822eeb7..31f1bfdff16f 100644 --- a/Documentation/arch/x86/buslock.rst +++ b/Documentation/arch/x86/buslock.rst @@ -26,7 +26,8 @@ Detection ========= Intel processors may support either or both of the following hardware -mechanisms to detect split locks and bus locks. +mechanisms to detect split locks and bus locks. Some AMD processors also +support bus lock detect. #AC exception for split lock detection -------------------------------------- diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 611f43f60a3f..a74c9667b155 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2505,7 +2505,7 @@ source "kernel/livepatch/Kconfig" config X86_BUS_LOCK_DETECT bool "Split Lock Detect and Bus Lock Detect support" - depends on CPU_SUP_INTEL + depends on CPU_SUP_INTEL || CPU_SUP_AMD default y help Enable Split Lock Detect and Bus Lock Detect functionalities. diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 7b640c720c10..6075d6bd94d5 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1996,6 +1996,8 @@ static void identify_cpu(struct cpuinfo_x86 *c) if (this_cpu->c_init) this_cpu->c_init(c); + bus_lock_init(); + /* Disable the PN if appropriate */ squash_the_stupid_serial_number(c); diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index de2631b9401b..4605e6f73093 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -656,7 +656,6 @@ static void init_intel(struct cpuinfo_x86 *c) init_intel_misc_features(c); split_lock_init(); - bus_lock_init(); intel_init_thermal(c); } -- Gitee From 1453c69bb87e363b591aa076197caa5dc53f405b Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 9 Mar 2026 10:56:17 +0800 Subject: [PATCH 112/231] x86/split_lock: Fix the delayed detection logic mainline inclusion from v6.14-rc3 commit c929d08df8bee855528b9d15b853c892c54e1eee upstream. category: feature ------------------- If the warning mode with disabled mitigation mode is used, then on each CPU where the split lock occurred detection will be disabled in order to make progress and delayed work will be scheduled, which then will enable detection back. Now it turns out that all CPUs use one global delayed work structure. This leads to the fact that if a split lock occurs on several CPUs at the same time (within 2 jiffies), only one CPU will schedule delayed work, but the rest will not. The return value of schedule_delayed_work_on() would have shown this, but it is not checked in the code. A diagram that can help to understand the bug reproduction: - sld_update_msr() enables/disables SLD on both CPUs on the same core - schedule_delayed_work_on() internally checks WORK_STRUCT_PENDING_BIT. If a work has the 'pending' status, then schedule_delayed_work_on() will return an error code and, most importantly, the work will not be placed in the workqueue. Let's say we have a multicore system on which split_lock_mitigate=0 and a multithreaded application is running that calls splitlock in multiple threads. Due to the fact that sld_update_msr() affects the entire core (both CPUs), we will consider 2 CPUs from different cores. Let the 2 threads of this application schedule to CPU0 (core 0) and to CPU 2 (core 1), then: | || | | CPU 0 (core 0) || CPU 2 (core 1) | |_________________________________||___________________________________| | || | | 1) SPLIT LOCK occured || | | || | | 2) split_lock_warn() || | | || | | 3) sysctl_sld_mitigate == 0 || | | (work = &sl_reenable) || | | || | | 4) schedule_delayed_work_on() || | | (reenable will be called || | | after 2 jiffies on CPU 0) || | | || | | 5) disable SLD for core 0 || | | || | | ------------------------- || | | || | | || 6) SPLIT LOCK occured | | || | | || 7) split_lock_warn() | | || | | || 8) sysctl_sld_mitigate == 0 | | || (work = &sl_reenable, | | || the same address as in 3) ) | | || | | 2 jiffies || 9) schedule_delayed_work_on() | | || fials because the work is in | | || the pending state since 4). | | || The work wasn't placed to the | | || workqueue. reenable won't be | | || called on CPU 2 | | || | | || 10) disable SLD for core 0 | | || | | || From now on SLD will | | || never be reenabled on core 1 | | || | | ------------------------- || | | || | | 11) enable SLD for core 0 by || | | __split_lock_reenable || | | || | If the application threads can be scheduled to all processor cores, then over time there will be only one core left, on which SLD will be enabled and split lock will be able to be detected; and on all other cores SLD will be disabled all the time. Most likely, this bug has not been noticed for so long because sysctl_sld_mitigate default value is 1, and in this case a semaphore is used that does not allow 2 different cores to have SLD disabled at the same time, that is, strictly only one work is placed in the workqueue. In order to fix the warning mode with disabled mitigation mode, delayed work has to be per-CPU. Implement it. Fixes: 727209376f49 ("x86/split_lock: Add sysctl to control the misery mode") Signed-off-by: Maksim Davydov Signed-off-by: Ingo Molnar Tested-by: Guilherme G. Piccoli Cc: Thomas Gleixner Cc: Ravi Bangoria Cc: Tom Lendacky Link: https://lore.kernel.org/r/20250115131704.132609-1-davydov-max@yandex-team.ru Signed-off-by: Lyle Li Signed-off-by: leoliu-oc --- arch/x86/kernel/cpu/bus_lock.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/bus_lock.c b/arch/x86/kernel/cpu/bus_lock.c index 704e9241b964..994eae8ad860 100644 --- a/arch/x86/kernel/cpu/bus_lock.c +++ b/arch/x86/kernel/cpu/bus_lock.c @@ -192,7 +192,13 @@ static void __split_lock_reenable(struct work_struct *work) { sld_update_msr(true); } -static DECLARE_DELAYED_WORK(sl_reenable, __split_lock_reenable); +/* + * In order for each CPU to schedule its delayed work independently of the + * others, delayed work struct must be per-CPU. This is not required when + * sysctl_sld_mitigate is enabled because of the semaphore that limits + * the number of simultaneously scheduled delayed works to 1. + */ +static DEFINE_PER_CPU(struct delayed_work, sl_reenable); /* * If a CPU goes offline with pending delayed work to re-enable split lock @@ -213,7 +219,7 @@ static int splitlock_cpu_offline(unsigned int cpu) static void split_lock_warn(unsigned long ip) { - struct delayed_work *work; + struct delayed_work *work = NULL; int cpu; if (!current->reported_split_lock) @@ -235,11 +241,17 @@ static void split_lock_warn(unsigned long ip) if (down_interruptible(&buslock_sem) == -EINTR) return; work = &sl_reenable_unlock; - } else { - work = &sl_reenable; } cpu = get_cpu(); + + if (!work) { + work = this_cpu_ptr(&sl_reenable); + /* Deferred initialization of per-CPU struct */ + if (!work->work.func) + INIT_DELAYED_WORK(work, __split_lock_reenable); + } + schedule_delayed_work_on(cpu, work, 2); /* Disable split lock detection on this CPU to make progress */ -- Gitee From 9a5a3846fca2f63e7aefbe845e4222ff0d71b7a0 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 9 Mar 2026 10:56:18 +0800 Subject: [PATCH 113/231] x86/split_lock: Simplify reenabling mainline inclusion from v6.14 commit 0e1ff67d164be45e8ddfea5aaf5803224ede0805 upstream. category: feature ------------------- When split_lock_mitigate is disabled, each CPU needs its own delayed_work structure. They are used to reenable split lock detection after its disabling. But delayed_work structure must be correctly initialized after its allocation. Current implementation uses deferred initialization that makes the split lock handler code unclear. The code can be simplified a bit if the initialization is moved to the appropriate initcall. sld_setup() is called before setup_per_cpu_areas(), thus it can't be used for this purpose, so introduce an independent initcall for the initialization. [ mingo: Simplified the 'work' assignment line a bit more. ] Signed-off-by: Maksim Davydov Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20250325085807.171885-1-davydov-max@yandex-team.ru Signed-off-by: Lyle Li Signed-off-by: leoliu-oc --- arch/x86/kernel/cpu/bus_lock.c | 35 +++++++++++++++++++++++----------- 1 file changed, 24 insertions(+), 11 deletions(-) diff --git a/arch/x86/kernel/cpu/bus_lock.c b/arch/x86/kernel/cpu/bus_lock.c index 994eae8ad860..4e5cbafa0aac 100644 --- a/arch/x86/kernel/cpu/bus_lock.c +++ b/arch/x86/kernel/cpu/bus_lock.c @@ -200,6 +200,26 @@ static void __split_lock_reenable(struct work_struct *work) */ static DEFINE_PER_CPU(struct delayed_work, sl_reenable); +/* + * Per-CPU delayed_work can't be statically initialized properly because + * the struct address is unknown. Thus per-CPU delayed_work structures + * have to be initialized during kernel initialization and after calling + * setup_per_cpu_areas(). + */ +static int __init setup_split_lock_delayed_work(void) +{ + unsigned int cpu; + + for_each_possible_cpu(cpu) { + struct delayed_work *work = per_cpu_ptr(&sl_reenable, cpu); + + INIT_DELAYED_WORK(work, __split_lock_reenable); + } + + return 0; +} +pure_initcall(setup_split_lock_delayed_work); + /* * If a CPU goes offline with pending delayed work to re-enable split lock * detection then the delayed work will be executed on some other CPU. That @@ -219,15 +239,16 @@ static int splitlock_cpu_offline(unsigned int cpu) static void split_lock_warn(unsigned long ip) { - struct delayed_work *work = NULL; + struct delayed_work *work; int cpu; + unsigned int saved_sld_mitigate = READ_ONCE(sysctl_sld_mitigate); if (!current->reported_split_lock) pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n", current->comm, current->pid, ip); current->reported_split_lock = 1; - if (sysctl_sld_mitigate) { + if (saved_sld_mitigate) { /* * misery factor #1: * sleep 10ms before trying to execute split lock. @@ -240,18 +261,10 @@ static void split_lock_warn(unsigned long ip) */ if (down_interruptible(&buslock_sem) == -EINTR) return; - work = &sl_reenable_unlock; } cpu = get_cpu(); - - if (!work) { - work = this_cpu_ptr(&sl_reenable); - /* Deferred initialization of per-CPU struct */ - if (!work->work.func) - INIT_DELAYED_WORK(work, __split_lock_reenable); - } - + work = saved_sld_mitigate ? &sl_reenable_unlock : per_cpu_ptr(&sl_reenable, cpu); schedule_delayed_work_on(cpu, work, 2); /* Disable split lock detection on this CPU to make progress */ -- Gitee From 505c66ff2e42c075fe687589c5efa2e4b984dd75 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 9 Mar 2026 10:56:19 +0800 Subject: [PATCH 114/231] x86/split_lock: Add support for Zhaoxin zhaoxin inclusion category: feature -------------------- Add Split Lock Detect support for Zhaoxin platforms. Signed-off-by: Lyle Li Signed-off-by: leoliu-oc --- Documentation/arch/x86/buslock.rst | 3 ++- arch/x86/Kconfig | 2 +- arch/x86/kernel/cpu/centaur.c | 2 ++ arch/x86/kernel/cpu/zhaoxin.c | 2 ++ 4 files changed, 7 insertions(+), 2 deletions(-) diff --git a/Documentation/arch/x86/buslock.rst b/Documentation/arch/x86/buslock.rst index 31f1bfdff16f..a5daebfa0a58 100644 --- a/Documentation/arch/x86/buslock.rst +++ b/Documentation/arch/x86/buslock.rst @@ -27,7 +27,8 @@ Detection Intel processors may support either or both of the following hardware mechanisms to detect split locks and bus locks. Some AMD processors also -support bus lock detect. +support bus lock detect. Some Zhaoxin processors also support split lock +detect. #AC exception for split lock detection -------------------------------------- diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index a74c9667b155..090d56de9419 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2505,7 +2505,7 @@ source "kernel/livepatch/Kconfig" config X86_BUS_LOCK_DETECT bool "Split Lock Detect and Bus Lock Detect support" - depends on CPU_SUP_INTEL || CPU_SUP_AMD + depends on CPU_SUP_INTEL || CPU_SUP_AMD || CPU_SUP_ZHAOXIN || CPU_SUP_CENTAUR default y help Enable Split Lock Detect and Bus Lock Detect functionalities. diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 09e3d7f57204..be09838720d0 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -221,6 +221,8 @@ static void init_centaur(struct cpuinfo_x86 *c) #endif init_ia32_feat_ctl(c); + + split_lock_init(); } #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/cpu/zhaoxin.c b/arch/x86/kernel/cpu/zhaoxin.c index 3180e2026c40..cb01eccc0975 100644 --- a/arch/x86/kernel/cpu/zhaoxin.c +++ b/arch/x86/kernel/cpu/zhaoxin.c @@ -101,6 +101,8 @@ static void init_zhaoxin(struct cpuinfo_x86 *c) #endif init_ia32_feat_ctl(c); + + split_lock_init(); } #ifdef CONFIG_X86_32 -- Gitee From 03623e43d21bdf5f78e169ee4aaa25133db93855 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 9 Mar 2026 10:56:20 +0800 Subject: [PATCH 115/231] pinctrl: Zhaoxin: Add KH-50000 support zhaoxin inclusion category: feature -------------------- This patch adds GPIO and pinctrl driver support for the Zhaoxin KH-50000 chipset. It includes: A new driver file `pinctrl-kh50000.c` which defines the pin configuration and GPIO functionality for the KH-50000 platform, covering a wide range of pins from general-purpose GPIOs to specialized functions like USB, SMBus, and LPC. Updates to `Kconfig` to introduce the `PINCTRL_KH50000` configuration option, enabling build support for the new driver. Modifications to `Makefile` to compile the new `pinctrl-kh50000.o` module when `CONFIG_PINCTRL_KH50000` is enabled. Enhancements to `pinctrl-zhaoxin.h` to introduce new macros, data structures, and enumerations necessary for supporting the KH-50000's specific pin configurations and GPIO types. Refinements in `pinctrl-zhaoxin.c` to improve the generic Zhaoxin pinctrl/GPIO driver, including updates to interrupt handling and probe functions to accommodate the new hardware. Improvements to `pinctrl-kx7000.c` to enhance code quality and maintainability. This change enables full GPIO functionality support for the Zhaoxin KH-50000 platform. Signed-off-by: leoliu-oc --- drivers/pinctrl/zhaoxin/Kconfig | 19 +- drivers/pinctrl/zhaoxin/Makefile | 3 +- drivers/pinctrl/zhaoxin/pinctrl-kh50000.c | 443 ++++++++++++++++++++++ drivers/pinctrl/zhaoxin/pinctrl-kx7000.c | 232 +++++------ drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c | 277 ++++++++------ drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.h | 89 ++++- 6 files changed, 815 insertions(+), 248 deletions(-) create mode 100644 drivers/pinctrl/zhaoxin/pinctrl-kh50000.c diff --git a/drivers/pinctrl/zhaoxin/Kconfig b/drivers/pinctrl/zhaoxin/Kconfig index 65f95ca80d5c..0a6ed551ec1f 100644 --- a/drivers/pinctrl/zhaoxin/Kconfig +++ b/drivers/pinctrl/zhaoxin/Kconfig @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -# Intel pin control drivers +# Zhaoxin pin control drivers if (X86 || COMPILE_TEST) @@ -10,19 +10,24 @@ config PINCTRL_ZHAOXIN select GENERIC_PINCONF select GPIOLIB select GPIOLIB_IRQCHIP + default m config PINCTRL_KX7000 tristate "Zhaoxin KX7000 pinctrl and GPIO driver" - depends on ACPI && X86 - default m + depends on ACPI select PINCTRL_ZHAOXIN + default m help This pinctrl driver provides an interface that allows configuring of Zhaoxin KX7000 chipset pins and using them as GPIOs. - To compile this driver as a module, choose M here: the - module will be called pinctrl-kx7000. - - If unsure, say Y. +config PINCTRL_KH50000 + tristate "Zhaoxin KH50000 pinctrl and GPIO driver" + depends on ACPI + select PINCTRL_ZHAOXIN + default m + help + This pinctrl driver provides an interface that allows configuring + of Zhaoxin KH50000 chipset pins and using them as GPIOs. endif diff --git a/drivers/pinctrl/zhaoxin/Makefile b/drivers/pinctrl/zhaoxin/Makefile index a3acfa66f196..d307d578666e 100644 --- a/drivers/pinctrl/zhaoxin/Makefile +++ b/drivers/pinctrl/zhaoxin/Makefile @@ -1,4 +1,5 @@ -# zhaoxin pin control drivers +# Zhaoxin pin control drivers obj-$(CONFIG_PINCTRL_ZHAOXIN) += pinctrl-zhaoxin.o obj-$(CONFIG_PINCTRL_KX7000) += pinctrl-kx7000.o +obj-$(CONFIG_PINCTRL_KH50000) += pinctrl-kh50000.o diff --git a/drivers/pinctrl/zhaoxin/pinctrl-kh50000.c b/drivers/pinctrl/zhaoxin/pinctrl-kh50000.c new file mode 100644 index 000000000000..5495ea5eed71 --- /dev/null +++ b/drivers/pinctrl/zhaoxin/pinctrl-kh50000.c @@ -0,0 +1,443 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * zhaoxin KH50000 pinctrl/GPIO driver + * + * + * Copyright(c) 2021 Shanghai Zhaoxin Corporation. All rights reserved. + * + */ + +#define DRIVER_VERSION "1.0.0" + +#include +#include +#include +#include + +#include "pinctrl-zhaoxin.h" + +#define CHX007_SOCKET_PINS(sock) \ + SOCKET_PINCTRL_PIN(sock, 0, "IOD_CLK27M_G0"), \ + SOCKET_PINCTRL_PIN(sock, 1, "IOD_CLK27M_G1"), \ + SOCKET_PINCTRL_PIN(sock, 2, "IOD_CLK27M_G2"), \ + SOCKET_PINCTRL_PIN(sock, 3, "IOD_CLK27M_G3"), \ + SOCKET_PINCTRL_PIN(sock, 4, "IOD_CPURST_G0"), \ + SOCKET_PINCTRL_PIN(sock, 5, "IOD_CPURST_G1"), \ + SOCKET_PINCTRL_PIN(sock, 6, "IOD_CPURST_G2"), \ + SOCKET_PINCTRL_PIN(sock, 7, "IOD_CPURST_G3"), \ + SOCKET_PINCTRL_PIN(sock, 8, "IOD_RSMRST_G0"), \ + SOCKET_PINCTRL_PIN(sock, 9, "IOD_RSMRST_G1"), \ + SOCKET_PINCTRL_PIN(sock, 10, "IOD_RSMRST_G2"), \ + SOCKET_PINCTRL_PIN(sock, 11, "IOD_RSMRST_G3"), \ + SOCKET_PINCTRL_PIN(sock, 12, "IOD_PWROK_G0"), \ + SOCKET_PINCTRL_PIN(sock, 13, "IOD_PWROK_G1"), \ + SOCKET_PINCTRL_PIN(sock, 14, "IOD_PWROK_G2"), \ + SOCKET_PINCTRL_PIN(sock, 15, "IOD_PWROK_G3"), \ + SOCKET_PINCTRL_PIN(sock, 16, "IOD_THRMTRIP_G0"), \ + SOCKET_PINCTRL_PIN(sock, 17, "IOD_THRMTRIP_G1"), \ + SOCKET_PINCTRL_PIN(sock, 18, "IOD_THRMTRIP_G2"), \ + SOCKET_PINCTRL_PIN(sock, 19, "IOD_THRMTRIP_G3"), \ + SOCKET_PINCTRL_PIN(sock, 20, "IOD_CLK50M_G0"), \ + SOCKET_PINCTRL_PIN(sock, 21, "IOD_CLK50M_G1"), \ + SOCKET_PINCTRL_PIN(sock, 22, "IOD_CLK50M_G2"), \ + SOCKET_PINCTRL_PIN(sock, 23, "IOD_CLK50M_G3"), \ + /*GPIO range 0 */ \ + SOCKET_PINCTRL_PIN(sock, 24, "USBHOC0"), /*PGPIO0------gpio36*/ \ + SOCKET_PINCTRL_PIN(sock, 25, "USBHOC1"), /*PGPIO1------gpio37*/ \ + SOCKET_PINCTRL_PIN(sock, 26, "USBHOC2"), /*PGPIO2------gpio38*/ \ + SOCKET_PINCTRL_PIN(sock, 27, "USBHOC3"), /*PGPIO3------gpio39*/ \ + SOCKET_PINCTRL_PIN(sock, 28, "I3C0DT"), \ + SOCKET_PINCTRL_PIN(sock, 29, "I3C0CK"), \ + SOCKET_PINCTRL_PIN(sock, 30, "I3C1DT"), \ + SOCKET_PINCTRL_PIN(sock, 31, "I3C1CK"), \ + SOCKET_PINCTRL_PIN(sock, 32, "I3C2DT"), \ + SOCKET_PINCTRL_PIN(sock, 33, "I3C2CK"), \ + SOCKET_PINCTRL_PIN(sock, 34, "I3C3DT"), \ + SOCKET_PINCTRL_PIN(sock, 35, "I3C3CK"), \ + SOCKET_PINCTRL_PIN(sock, 36, "SMBDT0"), \ + /*GPIO range 1*/ \ + SOCKET_PINCTRL_PIN(sock, 37, "SMBCK0"), /*PGPIO11------gpio47*/ \ + SOCKET_PINCTRL_PIN(sock, 38, "SMBDT1"), /*PGPIO12------gpio48*/ \ + SOCKET_PINCTRL_PIN(sock, 39, "SMBCK1"), /*PGPIO13------gpio49*/ \ + SOCKET_PINCTRL_PIN(sock, 40, "SMBDT2"), /*PGPIO7------gpio43*/ \ + SOCKET_PINCTRL_PIN(sock, 41, "SMBCK2"), /*PGPIO8------gpio44*/ \ + SOCKET_PINCTRL_PIN(sock, 42, "SMBALRT"), /*PGPIO14------gpio50*/ \ + SOCKET_PINCTRL_PIN(sock, 43, "SME_I2CDT_S"), \ + SOCKET_PINCTRL_PIN(sock, 44, "SME_I2CCK_S"), \ + /*GPIO range 2*/ \ + SOCKET_PINCTRL_PIN(sock, 45, "GPIO0"), /*GPIO0--------gpio0*/ \ + SOCKET_PINCTRL_PIN(sock, 46, "GPIO1"), /*GPIO1--------gpio1*/ \ + SOCKET_PINCTRL_PIN(sock, 47, "GPIO2"), /*GPIO2--------gpio2*/ \ + SOCKET_PINCTRL_PIN(sock, 48, "GPIO3"), /*GPIO3--------gpio3*/ \ + SOCKET_PINCTRL_PIN(sock, 49, "GPIO4"), /*GPIO4--------gpio4*/ \ + SOCKET_PINCTRL_PIN(sock, 50, "GPIO5"), /*GPIO5--------gpio5*/ \ + SOCKET_PINCTRL_PIN(sock, 51, "GPIO6"), /*GPIO6--------gpio6*/ \ + SOCKET_PINCTRL_PIN(sock, 52, "GPIO7"), /*GPIO7--------gpio7*/ \ + SOCKET_PINCTRL_PIN(sock, 53, "GPIO8"), /*GPIO8--------gpio8*/ \ + SOCKET_PINCTRL_PIN(sock, 54, "GPIO9"), /*GPIO9--------gpio9*/ \ + SOCKET_PINCTRL_PIN(sock, 55, "GPIO10"), /*GPIO10-------gpio10*/ \ + SOCKET_PINCTRL_PIN(sock, 56, "GPIO11"), /*GPIO11-------gpio11*/ \ + SOCKET_PINCTRL_PIN(sock, 57, "GPIO12"), /*GPIO12-------gpio12*/ \ + SOCKET_PINCTRL_PIN(sock, 58, "GPIO13"), /*GPIO13-------gpio13*/ \ + SOCKET_PINCTRL_PIN(sock, 59, "GPIO14"), /*GPIO14-------gpio14*/ \ + SOCKET_PINCTRL_PIN(sock, 60, "GPIO15"), /*GPIO15-------gpio15*/ \ + SOCKET_PINCTRL_PIN(sock, 61, "GPIO16"), /*GPIO16-------gpio16*/ \ + SOCKET_PINCTRL_PIN(sock, 62, "GPIO17"), /*GPIO17-------gpio17*/ \ + SOCKET_PINCTRL_PIN(sock, 63, "GPIO18"), /*GPIO18-------gpio18*/ \ + SOCKET_PINCTRL_PIN(sock, 64, "GPIO19"), /*GPIO19-------gpio19*/ \ + SOCKET_PINCTRL_PIN(sock, 65, "GPIO20"), /*GPIO20-------gpio20*/ \ + SOCKET_PINCTRL_PIN(sock, 66, "GPIO21"), /*GPIO21-------gpio21*/ \ + SOCKET_PINCTRL_PIN(sock, 67, "GPIO22"), /*GPIO22-------gpio22*/ \ + SOCKET_PINCTRL_PIN(sock, 68, "GPIO23"), /*GPIO23-------gpio23*/ \ + SOCKET_PINCTRL_PIN(sock, 69, "GPIO24"), /*GPIO24-------gpio24*/ \ + SOCKET_PINCTRL_PIN(sock, 70, "GPIO25"), /*GPIO25-------gpio25*/ \ + SOCKET_PINCTRL_PIN(sock, 71, "GPIO26"), /*GPIO26-------gpio26*/ \ + SOCKET_PINCTRL_PIN(sock, 72, "GPIO27"), /*GPIO27-------gpio27*/ \ + SOCKET_PINCTRL_PIN(sock, 73, "GPIO28"), /*GPIO28-------gpio28*/ \ + SOCKET_PINCTRL_PIN(sock, 74, "GPIO29"), /*GPIO29-------gpio29*/ \ + SOCKET_PINCTRL_PIN(sock, 75, "GPIO30"), /*GPIO30-------gpio30*/ \ + SOCKET_PINCTRL_PIN(sock, 76, "GPIO31"), /*GPIO31-------gpio31*/ \ + SOCKET_PINCTRL_PIN(sock, 77, "GPIO32"), /*GPIO32-------gpio32*/ \ + SOCKET_PINCTRL_PIN(sock, 78, "GPIO33"), /*GPIO33-------gpio33*/ \ + SOCKET_PINCTRL_PIN(sock, 79, "GPIO34"), /*GPIO34-------gpio34*/ \ + SOCKET_PINCTRL_PIN(sock, 80, "GPIO35"), /*GPIO35-------gpio35*/ \ + /*GPIO range 3*/ \ + SOCKET_PINCTRL_PIN(sock, 81, "LPCCLK"), /*PGPIO16------gpio52*/ \ + SOCKET_PINCTRL_PIN(sock, 82, "LPCDRQ1"), /*PGPIO17------gpio53*/ \ + SOCKET_PINCTRL_PIN(sock, 83, "LPCDRQ0"), /*PGPIO18------gpio54*/ \ + SOCKET_PINCTRL_PIN(sock, 84, "LPCFRAME"), /*PGPIO19------gpio55*/ \ + SOCKET_PINCTRL_PIN(sock, 85, "LPCAD3"), /*PGPIO20------gpio56*/ \ + SOCKET_PINCTRL_PIN(sock, 86, "LPCAD2"), /*PGPIO21------gpio57*/ \ + SOCKET_PINCTRL_PIN(sock, 87, "LPCAD1"), /*PGPIO22------gpio58*/ \ + SOCKET_PINCTRL_PIN(sock, 88, "LPCAD0"), /*PGPIO23------gpio59*/ \ + SOCKET_PINCTRL_PIN(sock, 89, "SERIRQ"), /*PGPIO24------gpio60*/ \ + /*GPIO range 4*/ \ + SOCKET_PINCTRL_PIN(sock, 90, "ESPICLK"), /*PGPIO15------gpio51*/ \ + /*GPIO range 5*/ \ + SOCKET_PINCTRL_PIN(sock, 91, "ESPIRST"), /*PGPIO29------gpio65*/ \ + SOCKET_PINCTRL_PIN(sock, 92, "ESPICS"), /*PGPIO30------gpio66*/ \ + SOCKET_PINCTRL_PIN(sock, 93, "ESPIIO3"), /*PGPIO31------gpio67*/ \ + /*GPIO range 6*/ \ + SOCKET_PINCTRL_PIN(sock, 94, "ESPIIO2"), /*PGPIO4------gpio40*/ \ + SOCKET_PINCTRL_PIN(sock, 95, "ESPIIO1"), /*PGPIO5------gpio41*/ \ + SOCKET_PINCTRL_PIN(sock, 96, "ESPIIO0"), /*PGPIO6------gpio42*/ \ + /* jump */ \ + SOCKET_PINCTRL_PIN(sock, 97, "SPIDI"), \ + SOCKET_PINCTRL_PIN(sock, 98, "SPIDO"), \ + SOCKET_PINCTRL_PIN(sock, 99, "SPICLK"), \ + SOCKET_PINCTRL_PIN(sock, 100, "SPISS"), \ + SOCKET_PINCTRL_PIN(sock, 101, "TPMRST"), \ + SOCKET_PINCTRL_PIN(sock, 102, "TPMIRQ"), \ + SOCKET_PINCTRL_PIN(sock, 103, "MSPIDI"), \ + SOCKET_PINCTRL_PIN(sock, 104, "MSPIDO"), \ + SOCKET_PINCTRL_PIN(sock, 105, "MSPIIO2"), \ + SOCKET_PINCTRL_PIN(sock, 106, "MSPIIO3"), \ + SOCKET_PINCTRL_PIN(sock, 107, "MSPICLK"), \ + SOCKET_PINCTRL_PIN(sock, 108, "MSPISS0"), \ + /*GPIO range 7*/ \ + SOCKET_PINCTRL_PIN(sock, 109, "MSPISS1"), /*PGPIO9------gpio45*/ \ + /*GPIO range 8 */ \ + SOCKET_PINCTRL_PIN(sock, 110, "MSPISS2"), /*PGPIO22------gpio58*/ \ + /*GPIO range 9*/ \ + SOCKET_PINCTRL_PIN(sock, 111, "SPIDEVINT"), /*PGPIO25------gpio61*/ \ + /*jump*/ \ + SOCKET_PINCTRL_PIN(sock, 112, "ZLSDATA_TX_P0"), \ + SOCKET_PINCTRL_PIN(sock, 113, "ZLSDATA_RX_P0"), \ + SOCKET_PINCTRL_PIN(sock, 114, "ZLSDATA_TX_P1"), \ + SOCKET_PINCTRL_PIN(sock, 115, "ZLSDATA_RX_P1"), \ + SOCKET_PINCTRL_PIN(sock, 116, "ZLSDATA_TX_P2"), \ + SOCKET_PINCTRL_PIN(sock, 117, "ZLSDATA_RX_P2"), \ + SOCKET_PINCTRL_PIN(sock, 118, "BOOT_EN"), \ + SOCKET_PINCTRL_PIN(sock, 119, "BOOT_DONE"), \ + SOCKET_PINCTRL_PIN(sock, 120, "MST_SKT"), \ + SOCKET_PINCTRL_PIN(sock, 121, "HRX_BEVO_CLK"), \ + SOCKET_PINCTRL_PIN(sock, 122, "HRX_BEVO_DATA"), \ + SOCKET_PINCTRL_PIN(sock, 123, "HTX_BEVO_CLK"), \ + SOCKET_PINCTRL_PIN(sock, 124, "HTX_BEVO_DATA"), \ + SOCKET_PINCTRL_PIN(sock, 125, "THRMTRIP_I"), \ + SOCKET_PINCTRL_PIN(sock, 126, "CLK50M_I"), \ + SOCKET_PINCTRL_PIN(sock, 127, "CLK50M_O"), \ + SOCKET_PINCTRL_PIN(sock, 128, "PCIRST_IO"), \ + SOCKET_PINCTRL_PIN(sock, 129, "RSMRST_IO"), \ + SOCKET_PINCTRL_PIN(sock, 130, "PWRGD_IO"), \ + SOCKET_PINCTRL_PIN(sock, 131, "CLK32K_IO"), \ + SOCKET_PINCTRL_PIN(sock, 132, "BIOSSEL"), \ + SOCKET_PINCTRL_PIN(sock, 133, "THRMRIP"), \ + /*GPIO range 10 */ \ + SOCKET_PINCTRL_PIN(sock, 134, "THRM"), /*PGPIO26------gpio62*/ \ + /*GPIO range 11*/ \ + SOCKET_PINCTRL_PIN(sock, 135, "PEXWAKE"), /*PGPIO10------gpio46*/ \ + /*jump*/ \ + SOCKET_PINCTRL_PIN(sock, 136, "PWRBTN"), \ + SOCKET_PINCTRL_PIN(sock, 137, "PCIRST"), \ + /*GPIO range 12*/ \ + SOCKET_PINCTRL_PIN(sock, 138, "SPKR"), /*PGPIO27------gpio63*/ \ + SOCKET_PINCTRL_PIN(sock, 139, "PME"), /*PGPIO28------gpio64*/ \ + SOCKET_PINCTRL_PIN(sock, 140, "SUSA"), \ + SOCKET_PINCTRL_PIN(sock, 141, "SUSB"), \ + SOCKET_PINCTRL_PIN(sock, 142, "SUSC"), \ + SOCKET_PINCTRL_PIN(sock, 143, "SVID0_VREN"), \ + SOCKET_PINCTRL_PIN(sock, 144, "SVID1_VREN"), + +/* kh50000 pin define */ +static const struct pinctrl_pin_desc kh50000_pins_0[] = { + CHX007_SOCKET_PINS(0) +}; + +static const struct pinctrl_pin_desc kh50000_pins_1[] = { + CHX007_SOCKET_PINS(1) +}; + +static const struct pinctrl_pin_desc kh50000_pins_2[] = { + CHX007_SOCKET_PINS(2) +}; + +static const struct pinctrl_pin_desc kh50000_pins_3[] = { + CHX007_SOCKET_PINS(3) +}; + +#define NOT_DEFINE -30000 + +static int calibrate_int[] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, +}; + +static int calibrate_status[] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, +}; + +static const struct reg_cal_array kh50000_int_cal[] = { + ZX_CAL_ARRAY((0xCC - 0xCC), 16), /* GPIO0-15 */ + ZX_CAL_ARRAY((0xCE - 0xCC), 16), /* GPIO15-31 */ + ZX_CAL_ARRAY((0xD4 - 0xCC), 4), /* GPIO32-35 */ + ZX_CAL_ARRAY((0xD0 - 0xCC), 16), /* PGPIO0-PGPIO15 */ + ZX_CAL_ARRAY((0xD2 - 0xCC), 16), /* PGPIO16-PGPIO31 */ +}; + +static const struct reg_calibrate int_cal[] = { + { + .reg = kh50000_int_cal, + .reg_cal_size = ARRAY_SIZE(kh50000_int_cal), + .cal_array = calibrate_int, + .size = ARRAY_SIZE(calibrate_int), + .is_pmio = false, + } +}; + +static const struct reg_cal_array kh50000_status_cal[] = { + ZX_CAL_ARRAY((0xE4 - 0xCC), 16), + ZX_CAL_ARRAY((0xE6 - 0xCC), 16), + ZX_CAL_ARRAY((0xEC - 0xCC), 4), + ZX_CAL_ARRAY((0xE8 - 0xCC), 16), + ZX_CAL_ARRAY((0xEA - 0xCC), 16), +}; + +static const struct reg_calibrate status_cal[] = { { + .reg = kh50000_status_cal, + .reg_cal_size = ARRAY_SIZE(kh50000_status_cal), + .cal_array = calibrate_status, + .size = ARRAY_SIZE(calibrate_status), +} }; + +static const struct reg_cal_array kh50000_mod_sel_cal[] = { + ZX_CAL_ARRAY((0xD8 - 0xCC), 16), + ZX_CAL_ARRAY((0xDA - 0xCC), 16), + ZX_CAL_ARRAY((0xE0 - 0xCC), 4), + ZX_CAL_ARRAY((0xDC - 0xCC), 16), + ZX_CAL_ARRAY((0xDE - 0xCC), 16), +}; + +static const struct reg_calibrate mod_sel_cal[] = { + { + .reg = kh50000_mod_sel_cal, + .reg_cal_size = ARRAY_SIZE(kh50000_mod_sel_cal), + .cal_array = calibrate_status, + .size = ARRAY_SIZE(calibrate_status), + } +}; + +static const struct index_cal_array kh50000_gpio_in_cal[] = { + ZX_CAL_INDEX_ARRAY(0xC8, NULL, 68), +}; + +static const struct index_cal_array kh50000_gpio_out_cal[] = { + ZX_CAL_INDEX_ARRAY(0xC0, NULL, 68), +}; + +static int calibrate_trigger[] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67 +}; + +static const struct index_cal_array kh50000_trigger_cal[] = { + ZX_CAL_INDEX_ARRAY_MASK(0xD0, calibrate_trigger, 68, 3, 0x7), +}; + +static const struct zhaoxin_pin_topology kh50000_pin_topologys[] = { + { + .int_cal = int_cal, + .status_cal = status_cal, + .mod_sel_cal = mod_sel_cal, + .gpio_in_cal = kh50000_gpio_in_cal, + .gpio_out_cal = kh50000_gpio_out_cal, + .trigger_cal = kh50000_trigger_cal, + } +}; + +static const struct zhaoxin_pin_map2_gpio kh50000_pinmap_gpps[] = { + ZHAOXIN_GPP(0, 23, ZHAOXIN_GPIO_BASE_NOMAP), /* no range */ + ZHAOXIN_GPP(24, 27, 10), /* gpio range 0 */ + ZHAOXIN_GPP(28, 36, ZHAOXIN_GPIO_BASE_NOMAP), /* no range */ + ZHAOXIN_GPP(37, 42, 47), /* gpio range 1 */ + ZHAOXIN_GPP(43, 44, ZHAOXIN_GPIO_BASE_NOMAP), /* no range */ + ZHAOXIN_GPP(45, 80, 0), /* gpio range 2 */ + ZHAOXIN_GPP(81, 89, 52), /* gpio range 3 */ + ZHAOXIN_GPP(90, 90, 51), /* gpio range 4 */ + ZHAOXIN_GPP(91, 93, 65), /* gpio range 5 */ + ZHAOXIN_GPP(94, 96, 40), /* gpio range 6 */ + ZHAOXIN_GPP(97, 108, ZHAOXIN_GPIO_BASE_NOMAP), /* no range */ + ZHAOXIN_GPP(109, 109, 45), /* gpio range 7 */ + ZHAOXIN_GPP(110, 110, 58), /* gpio range 8 */ + ZHAOXIN_GPP(111, 111, 61), /* gpio range 9 */ + ZHAOXIN_GPP(112, 133, ZHAOXIN_GPIO_BASE_NOMAP), /* no range */ + ZHAOXIN_GPP(134, 134, 62), /* gpio range 10 */ + ZHAOXIN_GPP(135, 135, 46), /* gpio range 11 */ + ZHAOXIN_GPP(136, 137, ZHAOXIN_GPIO_BASE_NOMAP), /* no range */ + ZHAOXIN_GPP(138, 139, 63), /* gpio range 12 */ + ZHAOXIN_GPP(140, 144, ZHAOXIN_GPIO_BASE_NOMAP), /* no range */ +}; + +static zx_gpio_type kh50000_gpio_type(struct zhaoxin_pinctrl *pctrl, unsigned int pin) +{ + if (pin >= 24 && pin <= 27) + return ZX_TYPE_PGPIO; + else if (pin >= 37 && pin <= 42) + return ZX_TYPE_PGPIO; + else if (pin >= 45 && pin <= 80) + return ZX_TYPE_GPIO; + else if (pin >= 81 && pin <= 96) + return ZX_TYPE_PGPIO; + else if (pin >= 109 && pin <= 111) + return ZX_TYPE_PGPIO; + else if (pin >= 134 && pin <= 135) + return ZX_TYPE_PGPIO; + else if (pin >= 138 && pin <= 139) + return ZX_TYPE_PGPIO; + else + return ZX_TYPE_ERROR; +} + +static void kh50000_gpio_init(struct zhaoxin_pinctrl *pctrl) +{ + struct resource *res_pmio; + struct platform_device *pdev = to_platform_device(pctrl->dev); + + res_pmio = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (!res_pmio) { + dev_err(&pdev->dev, "can't fetch device pmio resource info\n"); + return; + } + + if (!request_region(res_pmio->start, resource_size(res_pmio), pdev->name)) { + dev_err(&pdev->dev, "can't request region\n"); + return; + } + + pctrl->pmio_base = res_pmio->start; + pctrl->pmio_rx90 = 4; + pctrl->pmio_rx8c = 0; + zx_pad_write16(pctrl, 0xF8, 0x7F); + dev_info(pctrl->dev, "KH50000 private init\n"); +} + +static const struct zhaoxin_pinctrl_soc_data socket_0_soc_data = { + .uid = "0", + .pins = kh50000_pins_0, + .npins = ARRAY_SIZE(kh50000_pins_0), + .pin_topologys = kh50000_pin_topologys, + .gpio_type = kh50000_gpio_type, + .private_init = kh50000_gpio_init, + .zhaoxin_pin_maps = kh50000_pinmap_gpps, + .pin_map_size = ARRAY_SIZE(kh50000_pinmap_gpps), +}; +static const struct zhaoxin_pinctrl_soc_data socket_1_soc_data = { + .uid = "1", + .pins = kh50000_pins_1, + .npins = ARRAY_SIZE(kh50000_pins_1), + .pin_topologys = kh50000_pin_topologys, + .gpio_type = kh50000_gpio_type, + .private_init = kh50000_gpio_init, + .zhaoxin_pin_maps = kh50000_pinmap_gpps, + .pin_map_size = ARRAY_SIZE(kh50000_pinmap_gpps), +}; +static const struct zhaoxin_pinctrl_soc_data socket_2_soc_data = { + .uid = "2", + .pins = kh50000_pins_2, + .npins = ARRAY_SIZE(kh50000_pins_2), + .pin_topologys = kh50000_pin_topologys, + .gpio_type = kh50000_gpio_type, + .private_init = kh50000_gpio_init, + .zhaoxin_pin_maps = kh50000_pinmap_gpps, + .pin_map_size = ARRAY_SIZE(kh50000_pinmap_gpps), +}; +static const struct zhaoxin_pinctrl_soc_data socket_3_soc_data = { + .uid = "3", + .pins = kh50000_pins_3, + .npins = ARRAY_SIZE(kh50000_pins_3), + .pin_topologys = kh50000_pin_topologys, + .gpio_type = kh50000_gpio_type, + .private_init = kh50000_gpio_init, + .zhaoxin_pin_maps = kh50000_pinmap_gpps, + .pin_map_size = ARRAY_SIZE(kh50000_pinmap_gpps), +}; + +static const struct zhaoxin_pinctrl_soc_data *kh50000_soc_data[] = { + &socket_0_soc_data, + &socket_1_soc_data, + &socket_2_soc_data, + &socket_3_soc_data, + NULL, +}; + +static const struct acpi_device_id kh50000_pinctrl_acpi_match[] = { + { "KH8344B", (kernel_ulong_t)&kh50000_soc_data }, + {} +}; +MODULE_DEVICE_TABLE(acpi, kh50000_pinctrl_acpi_match); + +static const struct dev_pm_ops kh50000_pinctrl_pm_ops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(zhaoxin_pinctrl_suspend_noirq, zhaoxin_pinctrl_resume_noirq) +}; + +static struct platform_driver kh50000_pinctrl_driver = { + .probe = zhaoxin_pinctrl_probe_by_uid, + .driver = { + .name = "kh50000-pinctrl", + .acpi_match_table = kh50000_pinctrl_acpi_match, + .pm = &kh50000_pinctrl_pm_ops, + }, +}; + +module_platform_driver(kh50000_pinctrl_driver); + +MODULE_AUTHOR("www.zhaoxin.com"); +MODULE_DESCRIPTION("Shanghai Zhaoxin pinctrl driver"); +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/zhaoxin/pinctrl-kx7000.c b/drivers/pinctrl/zhaoxin/pinctrl-kx7000.c index f249dd369e7c..df5bc278b740 100644 --- a/drivers/pinctrl/zhaoxin/pinctrl-kx7000.c +++ b/drivers/pinctrl/zhaoxin/pinctrl-kx7000.c @@ -1,42 +1,22 @@ // SPDX-License-Identifier: GPL-2.0 /* - * zhaoxin KX7000 pinctrl/GPIO driver + * Zhaoxin KX7000 pinctrl/GPIO driver * - * Copyright(c) 2023 Shanghai Zhaoxin Corporation. All rights reserved. + * Copyright(c) 2021 Shanghai Zhaoxin Corporation. All rights reserved. * */ -#define DRIVER_VERSION "1.0.0" +#define DRIVER_VERSION "1.0.1" #include #include #include - #include #include "pinctrl-zhaoxin.h" -#define ZX_CAL_ARRAY(a, b) \ -{ \ - .pmio_offset = (a), \ - .size = (b), \ -} - -#define PMIO_RX90 100 -#define PMIO_RX8C 200 - -#define ZX_CAL_INDEX_ARRAY(a, b, c) \ -{ \ - .reg_port_base = (PMIO_RX90), \ - .reg_data_base = (PMIO_RX8C), \ - .index = (a), \ - .cal_array = (b), \ - .size = (c), \ -} - /* kx7000 pin define */ static const struct pinctrl_pin_desc kx7000_pins[] = { - PINCTRL_PIN(0, "IOD_CPUTCK"), PINCTRL_PIN(1, "IOD_CPUTMS"), PINCTRL_PIN(2, "IOD_CPUTRST"), @@ -51,7 +31,7 @@ static const struct pinctrl_pin_desc kx7000_pins[] = { PINCTRL_PIN(11, "IOD_PWORK"), PINCTRL_PIN(12, "IOD_RSMRST"), PINCTRL_PIN(13, "IOD_THRMTRIP"), - //GPIO range 0 + /* GPIO range 0 */ PINCTRL_PIN(14, "USBHOC0"), PINCTRL_PIN(15, "USBHOC1"), PINCTRL_PIN(16, "USBHOC2"), @@ -60,33 +40,33 @@ static const struct pinctrl_pin_desc kx7000_pins[] = { PINCTRL_PIN(19, "USBHOC5"), PINCTRL_PIN(20, "USBHOC6"), PINCTRL_PIN(21, "USBHOC7"), - //gpio range 1 + /* GPIO range 1 */ PINCTRL_PIN(22, "USB4SBTX0"), PINCTRL_PIN(23, "USB4SBRX0"), PINCTRL_PIN(24, "USB4SBTX1"), PINCTRL_PIN(25, "USB4SBRX1"), - //gpio range 2 + /* GPIO range 2 */ PINCTRL_PIN(26, "I2C1DT"), PINCTRL_PIN(27, "I2C1CK"), PINCTRL_PIN(28, "I2C1INT"), - //gpio range 3 + /* GPIO range 3 */ PINCTRL_PIN(29, "I2C2DT"), PINCTRL_PIN(30, "I2C2CK"), - //gpio range 4 + /* GPIO range 4 */ PINCTRL_PIN(31, "I2C2INT"), - //gpio range 5 + /* GPIO range 5 */ PINCTRL_PIN(32, "SMBDT1"), PINCTRL_PIN(33, "SMBCK1"), PINCTRL_PIN(34, "SMBDT2"), PINCTRL_PIN(35, "SMBCK2"), PINCTRL_PIN(36, "SMBALRT"), - //gpio range 6 + /* GPIO range 6 */ PINCTRL_PIN(37, "SME_I2CDT"), PINCTRL_PIN(38, "SME_I2CCK"), - //gpio range 7 + /* GPIO range 7 */ PINCTRL_PIN(39, "PWM"), PINCTRL_PIN(40, "TACH"), - //gpio range 8 + /* GPIO range 8 */ PINCTRL_PIN(41, "GPIO0"), PINCTRL_PIN(42, "GPIO1"), PINCTRL_PIN(43, "GPIO2"), @@ -99,14 +79,14 @@ static const struct pinctrl_pin_desc kx7000_pins[] = { PINCTRL_PIN(50, "GPIO9"), PINCTRL_PIN(51, "LPCCLK"), PINCTRL_PIN(52, "LPCDRQ1"), - //gpio range 9 + /* GPIO range 9 */ PINCTRL_PIN(53, "LPCDRQ0"), PINCTRL_PIN(54, "LPCFRAME"), PINCTRL_PIN(55, "LPCAD3"), PINCTRL_PIN(56, "LPCAD2"), PINCTRL_PIN(57, "LPCAD1"), PINCTRL_PIN(58, "LPCAD0"), - //gpio range 10 + /* GPIO range 10 */ PINCTRL_PIN(59, "SERIRQ"), PINCTRL_PIN(60, "AZRST"), PINCTRL_PIN(61, "AZBITCLK"), @@ -114,14 +94,14 @@ static const struct pinctrl_pin_desc kx7000_pins[] = { PINCTRL_PIN(63, "AZSDIN1"), PINCTRL_PIN(64, "AZSDOUT"), PINCTRL_PIN(65, "AZSYNC"), - //gpio range 11 + /* GPIO range 11 */ PINCTRL_PIN(66, "I2S1_SCLK"), PINCTRL_PIN(67, "I2S1_TXD"), PINCTRL_PIN(68, "I2S1_WS"), PINCTRL_PIN(69, "I2S1_MCLK"), - //gpio range 12 + /* GPIO range 12 */ PINCTRL_PIN(70, "I2S1_RXD"), - //gpio range 13 + /* GPIO range 13 */ PINCTRL_PIN(71, "I2S1_INT"), PINCTRL_PIN(72, "MSPIDI"), PINCTRL_PIN(73, "MSPIDO"), @@ -129,20 +109,20 @@ static const struct pinctrl_pin_desc kx7000_pins[] = { PINCTRL_PIN(75, "MSPIIO3"), PINCTRL_PIN(76, "MSPICLK"), PINCTRL_PIN(77, "MSPISS0"), - //gpio range 14 + /* GPIO range 14 */ PINCTRL_PIN(78, "MSPISS1"), PINCTRL_PIN(79, "MSPISS2"), - //gpio range 15 + /* GPIO range 15 */ PINCTRL_PIN(80, "SPIDEVINT"), PINCTRL_PIN(81, "BIOSSEL"), - //gpio range 16 + /* GPIO range 16 */ PINCTRL_PIN(82, "THRM"), PINCTRL_PIN(83, "PEXWAKE"), PINCTRL_PIN(84, "PWRBTN"), - //gpio range 17 + /* GPIO range 17 */ PINCTRL_PIN(85, "SPKR"), PINCTRL_PIN(86, "PME"), - //gpio range 18 + /* GPIO range 18 */ PINCTRL_PIN(87, "BATLOW"), PINCTRL_PIN(88, "EXTSMI"), PINCTRL_PIN(89, "SUSA"), @@ -154,9 +134,9 @@ static const struct pinctrl_pin_desc kx7000_pins[] = { PINCTRL_PIN(95, "SLPS0"), PINCTRL_PIN(96, "PCIRST"), PINCTRL_PIN(97, "SVID_VREN"), - //gpio range 19 + /* GPIO range 19 */ PINCTRL_PIN(98, "INTRUDER"), - //gpio range 20 + /* GPIO range 20 */ PINCTRL_PIN(99, "GFX_I2CCLK0"), PINCTRL_PIN(100, "GFX_I2CDAT0"), PINCTRL_PIN(101, "GFX_I2CCLK1"), @@ -173,22 +153,22 @@ static const struct pinctrl_pin_desc kx7000_pins[] = { PINCTRL_PIN(112, "CRTVSYNC"), }; -#define NOT_DEFINE -30000 +#define NOT_DEFINE -30000 static int calibrate_int[] = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, - 63, 64, 65, 66, 67, 68, - 69, 70, - 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, - 34, 35, 36, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62 + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 63, 64, 65, 66, 67, 68, 69, 70, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 50, 51, 52, + 53, 54, 55, 56, 57, 58, 59, 60, 61, 62 }; -static int calibrate_sattus[] = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, - 63, 64, 65, 66, 67, 68, - 69, 70, - 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, - 34, 35, 36, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62 +static int calibrate_status[] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 63, 64, 65, 66, 67, 68, 69, 70, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 50, 51, 52, + 53, 54, 55, 56, 57, 58, 59, 60, 61, 62 }; static const struct reg_cal_array kx7000_int_cal[] = { @@ -204,6 +184,7 @@ static const struct reg_calibrate int_cal[] = { .reg_cal_size = ARRAY_SIZE(kx7000_int_cal), .cal_array = calibrate_int, .size = ARRAY_SIZE(calibrate_int), + .is_pmio = true, } }; @@ -218,8 +199,8 @@ static const struct reg_calibrate status_cal[] = { { .reg = kx7000_status_cal, .reg_cal_size = ARRAY_SIZE(kx7000_status_cal), - .cal_array = calibrate_sattus, - .size = ARRAY_SIZE(calibrate_sattus), + .cal_array = calibrate_status, + .size = ARRAY_SIZE(calibrate_status), } }; @@ -234,8 +215,8 @@ static const struct reg_calibrate mod_sel_cal[] = { { .reg = kx7000_mod_sel_cal, .reg_cal_size = ARRAY_SIZE(kx7000_mod_sel_cal), - .cal_array = calibrate_sattus, - .size = ARRAY_SIZE(calibrate_sattus), + .cal_array = calibrate_status, + .size = ARRAY_SIZE(calibrate_status), } }; @@ -248,22 +229,15 @@ static const struct index_cal_array kx7000_gpio_out_cal[] = { }; static int calibrate_trigger[] = { - 0, 1, 2, 3, 4, 5, 6, 7, - 8, 9, 18, 19, - 20, 21, 22, 23, - 24, 25, 26, 27, - 28, 29, 30, 31, - 32, 33, 34, 35, - 36, 50, 51, 52, - 53, 54, 55, 56, - 57, 58, 59, 60, - 61, 62, 63, 64, - 65, 66, 67, 68, - 69, 70 + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, 36, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70 }; static const struct index_cal_array kx7000_trigger_cal[] = { - ZX_CAL_INDEX_ARRAY(0xA0, calibrate_trigger, 50), + ZX_CAL_INDEX_ARRAY_MASK(0xA0, calibrate_trigger, 50, 4, 0x7), }; static const struct zhaoxin_pin_topology kx7000_pin_topologys[] = { @@ -274,62 +248,89 @@ static const struct zhaoxin_pin_topology kx7000_pin_topologys[] = { .gpio_in_cal = kx7000_gpio_in_cal, .gpio_out_cal = kx7000_gpio_out_cal, .trigger_cal = kx7000_trigger_cal, - } + }, }; -#define KX7000_GPP(s, e, g) \ -{ \ - .zhaoxin_range_pin_base = (s), \ - .zhaoxin_range_pin_size = ((e) - (s) + 1), \ - .zhaoxin_range_gpio_base = (g), \ -} - static const struct zhaoxin_pin_map2_gpio kx7000_pinmap_gpps[] = { - KX7000_GPP(0, 13, ZHAOXIN_GPIO_BASE_NOMAP), - KX7000_GPP(14, 19, 10), - KX7000_GPP(20, 21, ZHAOXIN_GPIO_BASE_NOMAP), - KX7000_GPP(22, 25, 65), - KX7000_GPP(26, 28, 43), - KX7000_GPP(29, 30, 41), - KX7000_GPP(31, 31, 49), - KX7000_GPP(32, 36, 16), - KX7000_GPP(37, 38, 69), - KX7000_GPP(39, 40, 67), - KX7000_GPP(41, 50, 0), - KX7000_GPP(51, 52, ZHAOXIN_GPIO_BASE_NOMAP), - KX7000_GPP(53, 53, 39), - KX7000_GPP(54, 58, ZHAOXIN_GPIO_BASE_NOMAP), - KX7000_GPP(59, 59, 40), - KX7000_GPP(60, 65, ZHAOXIN_GPIO_BASE_NOMAP), - KX7000_GPP(66, 69, 35), - KX7000_GPP(70, 70, 46), - KX7000_GPP(71, 71, 64), - KX7000_GPP(72, 77, ZHAOXIN_GPIO_BASE_NOMAP), - KX7000_GPP(78, 78, 50), - KX7000_GPP(79, 79, ZHAOXIN_GPIO_BASE_NOMAP), - KX7000_GPP(80, 80, 51), - KX7000_GPP(81, 81, ZHAOXIN_GPIO_BASE_NOMAP), - KX7000_GPP(82, 82, 52), - KX7000_GPP(83, 84, ZHAOXIN_GPIO_BASE_NOMAP), - KX7000_GPP(85, 85, 53), - KX7000_GPP(86, 86, ZHAOXIN_GPIO_BASE_NOMAP), - KX7000_GPP(87, 95, 54), - KX7000_GPP(96, 97, ZHAOXIN_GPIO_BASE_NOMAP), - KX7000_GPP(98, 98, 63), - KX7000_GPP(99, 112, 21), + ZHAOXIN_GPP(0, 13, ZHAOXIN_GPIO_BASE_NOMAP), + ZHAOXIN_GPP(14, 19, 10), + ZHAOXIN_GPP(20, 21, ZHAOXIN_GPIO_BASE_NOMAP), + ZHAOXIN_GPP(22, 25, 65), + ZHAOXIN_GPP(26, 28, 43), + ZHAOXIN_GPP(29, 30, 41), + ZHAOXIN_GPP(31, 31, 49), + ZHAOXIN_GPP(32, 36, 16), + ZHAOXIN_GPP(37, 38, 69), + ZHAOXIN_GPP(39, 40, 67), + ZHAOXIN_GPP(41, 50, 0), + ZHAOXIN_GPP(51, 52, ZHAOXIN_GPIO_BASE_NOMAP), + ZHAOXIN_GPP(53, 53, 39), + ZHAOXIN_GPP(54, 58, ZHAOXIN_GPIO_BASE_NOMAP), + ZHAOXIN_GPP(59, 59, 40), + ZHAOXIN_GPP(60, 65, ZHAOXIN_GPIO_BASE_NOMAP), + ZHAOXIN_GPP(66, 69, 35), + ZHAOXIN_GPP(70, 70, 46), + ZHAOXIN_GPP(71, 71, 64), + ZHAOXIN_GPP(72, 77, ZHAOXIN_GPIO_BASE_NOMAP), + ZHAOXIN_GPP(78, 78, 50), + ZHAOXIN_GPP(79, 79, ZHAOXIN_GPIO_BASE_NOMAP), + ZHAOXIN_GPP(80, 80, 51), + ZHAOXIN_GPP(81, 81, ZHAOXIN_GPIO_BASE_NOMAP), + ZHAOXIN_GPP(82, 82, 52), + ZHAOXIN_GPP(83, 84, ZHAOXIN_GPIO_BASE_NOMAP), + ZHAOXIN_GPP(85, 85, 53), + ZHAOXIN_GPP(86, 86, ZHAOXIN_GPIO_BASE_NOMAP), + ZHAOXIN_GPP(87, 95, 54), + ZHAOXIN_GPP(96, 97, ZHAOXIN_GPIO_BASE_NOMAP), + ZHAOXIN_GPP(98, 98, 63), + ZHAOXIN_GPP(99, 112, 21), }; +static zx_gpio_type kx7000_gpio_type(struct zhaoxin_pinctrl *pctrl, unsigned int pin) +{ + if (pin >= 0xE && pin <= 0x13) + return ZX_TYPE_PGPIO; + else if (pin >= 0x16 && pin <= 0x28) + return ZX_TYPE_PGPIO; + else if (pin >= 0x29 && pin <= 0x32) + return ZX_TYPE_GPIO; + else if (pin == 0x35 || pin == 0x3B) + return ZX_TYPE_PGPIO; + else if (pin >= 0x42 && pin <= 0x47) + return ZX_TYPE_PGPIO; + else if (pin == 0x4E || pin == 0x50) + return ZX_TYPE_PGPIO; + else if (pin == 0x52 || pin == 0x55) + return ZX_TYPE_PGPIO; + else if (pin >= 0x57 && pin <= 0x5F) + return ZX_TYPE_PGPIO; + else if (pin >= 0x62 && pin <= 0x70) + return ZX_TYPE_PGPIO; + else + return ZX_TYPE_ERROR; +} + +static void kx7000_gpio_init(struct zhaoxin_pinctrl *pctrl) +{ + pctrl->pmio_base = 0x800; + pctrl->pmio_rx90 = 0x90; + pctrl->pmio_rx8c = 0x8c; + dev_info(pctrl->dev, "kx7000 private init\n"); +} + static const struct zhaoxin_pinctrl_soc_data kx7000_soc_data = { .pins = kx7000_pins, .npins = ARRAY_SIZE(kx7000_pins), .pin_topologys = kx7000_pin_topologys, + .gpio_type = kx7000_gpio_type, + .private_init = kx7000_gpio_init, .zhaoxin_pin_maps = kx7000_pinmap_gpps, .pin_map_size = ARRAY_SIZE(kx7000_pinmap_gpps), }; static const struct acpi_device_id kx7000_pinctrl_acpi_match[] = { { "KX8344B", (kernel_ulong_t)&kx7000_soc_data }, - { } + {} }; MODULE_DEVICE_TABLE(acpi, kx7000_pinctrl_acpi_match); @@ -345,10 +346,9 @@ static struct platform_driver kx7000_pinctrl_driver = { .pm = &kx7000_pinctrl_pm_ops, }, }; - module_platform_driver(kx7000_pinctrl_driver); MODULE_AUTHOR("www.zhaoxin.com"); MODULE_DESCRIPTION("Shanghai Zhaoxin pinctrl driver"); MODULE_VERSION(DRIVER_VERSION); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c b/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c index 1e434869d3dd..f488de118d92 100644 --- a/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c +++ b/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c @@ -6,7 +6,7 @@ * */ -#define DRIVER_VERSION "1.0.0" +#define DRIVER_VERSION "1.0.1" #include #include @@ -26,30 +26,19 @@ #include "../core.h" #include "pinctrl-zhaoxin.h" -static int pin_to_hwgpio(struct pinctrl_gpio_range *range, unsigned int pin) +u16 zx_pad_read16(struct zhaoxin_pinctrl *pctrl, u8 index) { - int offset = 0; - - if (range->pins) { - for (offset = 0; offset < range->npins; offset++) - if (pin == range->pins[offset]) - break; - return range->base+offset-range->gc->base; - } else - return pin-range->pin_base+range->base-range->gc->base; -} - -static u16 zx_pad_read16(struct zhaoxin_pinctrl *pctrl, u8 index) -{ - outb(index, pctrl->pmio_rx90+pctrl->pmio_base); - return inw(pctrl->pmio_rx8c+pctrl->pmio_base); + outb(index, pctrl->pmio_rx90 + pctrl->pmio_base); + return inw(pctrl->pmio_rx8c + pctrl->pmio_base); } +EXPORT_SYMBOL_GPL(zx_pad_read16); -static void zx_pad_write16(struct zhaoxin_pinctrl *pctrl, u8 index, u16 value) +void zx_pad_write16(struct zhaoxin_pinctrl *pctrl, u8 index, u16 value) { - outb(index, pctrl->pmio_rx90+pctrl->pmio_base); - outw(value, pctrl->pmio_rx8c+pctrl->pmio_base); + outb(index, pctrl->pmio_rx90 + pctrl->pmio_base); + outw(value, pctrl->pmio_rx8c + pctrl->pmio_base); } +EXPORT_SYMBOL_GPL(zx_pad_write16); static int zhaoxin_get_groups_count(struct pinctrl_dev *pctldev) { @@ -66,7 +55,7 @@ static const char *zhaoxin_get_group_name(struct pinctrl_dev *pctldev, unsigned } static int zhaoxin_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group, - const unsigned int **pins, unsigned int *npins) + const unsigned int **pins, unsigned int *npins) { struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); @@ -103,7 +92,7 @@ static const char *zhaoxin_get_function_name(struct pinctrl_dev *pctldev, unsign } static int zhaoxin_get_function_groups(struct pinctrl_dev *pctldev, unsigned int function, - const char * const **groups, unsigned int *const ngroups) + const char *const **groups, unsigned int *const ngroups) { struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); @@ -114,36 +103,32 @@ static int zhaoxin_get_function_groups(struct pinctrl_dev *pctldev, unsigned int } static int zhaoxin_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function, - unsigned int group) + unsigned int group) { - struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); - - dev_dbg(pctrl->dev, "%s,group=%d,func=%d\n", __func__, group, function); return 0; } -#define ZHAOXIN_PULL_UP_20K 0x80 -#define ZHAOXIN_PULL_UP_10K 0x40 -#define ZHAOXIN_PULL_UP_47K 0x20 -#define ZHAOXIN_PULL_DOWN 0x10 +#define ZHAOXIN_PULL_UP_20K 0x80 +#define ZHAOXIN_PULL_UP_10K 0x40 +#define ZHAOXIN_PULL_UP_47K 0x20 +#define ZHAOXIN_PULL_DOWN 0x10 -#define ZHAOXIN_PULL_UP 0xe0 +#define ZHAOXIN_PULL_UP 0xe0 static void zhaoxin_gpio_set_gpio_mode_and_pull(struct zhaoxin_pinctrl *pctrl, unsigned int pin, - bool isup) + bool isup) { u16 tmp = 0; u16 value; u16 value_back = 0; if (isup) - tmp = ZHAOXIN_PULL_UP_10K|1; + tmp = ZHAOXIN_PULL_UP_10K | 1; else - tmp = ZHAOXIN_PULL_DOWN|1; - value = zx_pad_read16(pctrl, pin); + tmp = ZHAOXIN_PULL_DOWN | 1; - /* for gpio */ - if (pin <= 0x32 && pin >= 0x29) { + if (pctrl->gpio_type(pctrl, pin) == ZX_TYPE_GPIO) { + value = zx_pad_read16(pctrl, pin); if (isup) { value &= (~(ZHAOXIN_PULL_DOWN)); value |= tmp; @@ -151,10 +136,12 @@ static void zhaoxin_gpio_set_gpio_mode_and_pull(struct zhaoxin_pinctrl *pctrl, u value &= (~(ZHAOXIN_PULL_UP)); value |= tmp; } - value &= ~(0x1); + + value &= (~(0xf)); zx_pad_write16(pctrl, pin, value); value_back = zx_pad_read16(pctrl, pin); - } else {/* for pgpio */ + } else if (pctrl->gpio_type(pctrl, pin) == ZX_TYPE_PGPIO) { + value = zx_pad_read16(pctrl, pin); if (isup) { value &= (~(ZHAOXIN_PULL_DOWN)); value |= tmp; @@ -162,20 +149,26 @@ static void zhaoxin_gpio_set_gpio_mode_and_pull(struct zhaoxin_pinctrl *pctrl, u value &= (~(ZHAOXIN_PULL_UP)); value |= tmp; } - value |= 0x1; + + value &= (~(0xf)); + value |= 1; zx_pad_write16(pctrl, pin, value); value_back = zx_pad_read16(pctrl, pin); + } else { + dev_info(pctrl->dev, "pin %d is not a gpio or pgpio\n", pin); } } static int zhaoxin_gpio_request_enable(struct pinctrl_dev *pctldev, - struct pinctrl_gpio_range *range, unsigned int pin) + struct pinctrl_gpio_range *range, unsigned int pin) { struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); - int hwgpio = pin_to_hwgpio(range, pin); + unsigned long flags; - dev_dbg(pctrl->dev, "%s, hwgpio=%d, pin=%d\n", __func__, hwgpio, pin); + raw_spin_lock_irqsave(&pctrl->lock, flags); zhaoxin_gpio_set_gpio_mode_and_pull(pctrl, pin, true); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); + return 0; } @@ -193,7 +186,7 @@ static int zhaoxin_config_get(struct pinctrl_dev *pctldev, unsigned int pin, uns } static int zhaoxin_config_set(struct pinctrl_dev *pctldev, unsigned int pin, unsigned long *configs, - unsigned int nconfigs) + unsigned int nconfigs) { return 0; } @@ -212,8 +205,8 @@ static const struct pinctrl_desc zhaoxin_pinctrl_desc = { }; static int zhaoxin_gpio_to_pin(struct zhaoxin_pinctrl *pctrl, unsigned int offset, - const struct zhaoxin_pin_topology **community, - const struct zhaoxin_pin_map2_gpio **padgrp) + const struct zhaoxin_pin_topology **community, + const struct zhaoxin_pin_map2_gpio **padgrp) { int i; @@ -223,7 +216,7 @@ static int zhaoxin_gpio_to_pin(struct zhaoxin_pinctrl *pctrl, unsigned int offse if (map->zhaoxin_range_gpio_base == ZHAOXIN_GPIO_BASE_NOMAP) continue; if (offset >= map->zhaoxin_range_gpio_base && - offset < map->zhaoxin_range_gpio_base + map->zhaoxin_range_pin_size) { + offset < map->zhaoxin_range_gpio_base + map->zhaoxin_range_pin_size) { int pin; pin = map->zhaoxin_range_pin_base + offset - map->zhaoxin_range_gpio_base; @@ -250,15 +243,18 @@ static int zhaoxin_gpio_get(struct gpio_chip *chip, unsigned int offset) { struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(chip); const struct index_cal_array *gpio_in_cal; - int gap = offset/16; - int bit = offset%16; + unsigned long flags; + int gap = offset / 16; + int bit = offset % 16; int pin; int value; gpio_in_cal = pctrl->pin_topologys->gpio_in_cal; pin = zhaoxin_gpio_to_pin(pctrl, offset, NULL, NULL); - value = zx_pad_read16(pctrl, gpio_in_cal->index+gap); - value &= (1<lock, flags); + value = zx_pad_read16(pctrl, gpio_in_cal->index + gap); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); + value &= (1 << bit); return !!value; } @@ -278,12 +274,12 @@ static void zhaoxin_gpio_set(struct gpio_chip *chip, unsigned int offset, int va raw_spin_lock_irqsave(&pctrl->lock, flags); - org = zx_pad_read16(pctrl, gpio_out_cal->index+gap); + org = zx_pad_read16(pctrl, gpio_out_cal->index + gap); if (value) - org |= (1<index+gap, org); + org &= (~(1 << bit)); + zx_pad_write16(pctrl, gpio_out_cal->index + gap, org); raw_spin_unlock_irqrestore(&pctrl->lock, flags); } @@ -301,7 +297,6 @@ static int zhaoxin_gpio_request(struct gpio_chip *gc, unsigned int offset) { return gpiochip_generic_request(gc, offset); } - static void zhaoxin_gpio_free(struct gpio_chip *gc, unsigned int offset) { gpiochip_generic_free(gc, offset); @@ -335,7 +330,6 @@ static void zhaoxin_gpio_irq_ack(struct irq_data *d) int base_offset = 0; int bit_off = 0; u16 value; - u16 value_read; status_cal = pctrl->pin_topologys->status_cal; if (gpio >= 0) { @@ -347,13 +341,13 @@ static void zhaoxin_gpio_irq_ack(struct irq_data *d) break; offset += status_cal->reg[j].size; } - reg_off = &status_cal->reg[j-1]; - bit_off = i-(offset-reg_off->size); + reg_off = &status_cal->reg[j - 1]; + bit_off = i - (offset - reg_off->size); base_offset = reg_off->pmio_offset; - value = readw(pctrl->pm_pmio_base+reg_off->pmio_offset); - value_read = value; - value |= (1<pm_pmio_base+reg_off->pmio_offset); + value = (1 << bit_off); + raw_spin_lock(&pctrl->lock); + writew(value, pctrl->pm_pmio_base + reg_off->pmio_offset); + raw_spin_unlock(&pctrl->lock); } } @@ -371,10 +365,10 @@ static void zhaoxin_gpio_irq_mask_unmask(struct irq_data *d, bool mask) int bit_off = 0; u16 value; u16 value1; + unsigned long flags; int_cal = pctrl->pin_topologys->int_cal; mod_sel_cal = pctrl->pin_topologys->mod_sel_cal; - if (gpio >= 0) { for (i = 0; i < int_cal->size; i++) if (gpio == int_cal->cal_array[i]) @@ -384,26 +378,39 @@ static void zhaoxin_gpio_irq_mask_unmask(struct irq_data *d, bool mask) break; offset += int_cal->reg[j].size; } - reg_off = &(int_cal->reg[j-1]); - mod = &(mod_sel_cal->reg[j-1]); - bit_off = i-(offset-reg_off->size); + reg_off = &(int_cal->reg[j - 1]); + mod = &(mod_sel_cal->reg[j - 1]); + bit_off = i - (offset - reg_off->size); base_offset = reg_off->pmio_offset; - value = inw(pctrl->pmio_base+reg_off->pmio_offset); - if (mask) - value &= (~(1<pmio_base+reg_off->pmio_offset); + raw_spin_lock_irqsave(&pctrl->lock, flags); + if (!int_cal->is_pmio) { + value = readw(pctrl->pm_pmio_base + reg_off->pmio_offset); + if (mask) + value &= (~(1 << bit_off)); + else + value |= (1 << bit_off); + + writew(value, pctrl->pm_pmio_base + reg_off->pmio_offset); + } else { + value = inw(pctrl->pmio_base + reg_off->pmio_offset); + if (mask) + value &= (~(1 << bit_off)); + else + value |= (1 << bit_off); + + outw(value, pctrl->pmio_base + reg_off->pmio_offset); + } if (mask) { - value1 = readw(pctrl->pm_pmio_base+mod->pmio_offset); - value1 |= (1<pm_pmio_base+mod->pmio_offset); + value1 = readw(pctrl->pm_pmio_base + mod->pmio_offset); + value1 |= (1 << bit_off); + writew(value1, pctrl->pm_pmio_base + mod->pmio_offset); } else { - value1 = readw(pctrl->pm_pmio_base+mod->pmio_offset); - value1 |= (1<pm_pmio_base+mod->pmio_offset); + value1 = readw(pctrl->pm_pmio_base + mod->pmio_offset); + value1 |= (1 << bit_off); + writew(value1, pctrl->pm_pmio_base + mod->pmio_offset); } + raw_spin_unlock_irqrestore(&pctrl->lock, flags); } } @@ -433,13 +440,20 @@ static irqreturn_t zhaoxin_gpio_irq(int irq, void *data) int ret = 0; int subirq; unsigned int hwirq; + unsigned long flags; init = pctrl->pin_topologys->int_cal; stat_cal = pctrl->pin_topologys->status_cal; for (i = 0; i < init->reg_cal_size; i++) { pending = 0; + raw_spin_lock_irqsave(&pctrl->lock, flags); status = readw(pctrl->pm_pmio_base + stat_cal->reg[i].pmio_offset); - enable = inw(pctrl->pmio_base + init->reg[i].pmio_offset); + + if (!init->is_pmio) + enable = readw(pctrl->pm_pmio_base + init->reg[i].pmio_offset); + else + enable = inw(pctrl->pmio_base + init->reg[i].pmio_offset); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); enable &= status; pending = enable; for_each_set_bit(bit_offset, &pending, init->reg[i].size) { @@ -467,6 +481,10 @@ static int zhaoxin_gpio_irq_type(struct irq_data *d, unsigned int type) int position, point; u16 value; bool isup = true; + bool high_bit = false; + u16 mask = 0; + int bits_num = 0; + u16 test_mask; trigger_cal = pctrl->pin_topologys->trigger_cal; pin = zhaoxin_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL); @@ -484,26 +502,63 @@ static int zhaoxin_gpio_irq_type(struct irq_data *d, unsigned int type) for (position = 0; position < trigger_cal->size; position++) if (trigger_cal->cal_array[position] == gpio) break; - - index = trigger_cal->index + ALIGN(position+1, 4)/4-1; + mask = trigger_cal->mask; + bits_num = trigger_cal->bits_num; + index = trigger_cal->index + ALIGN(position + 1, 4) / 4 - 1; point = position % 4; + if (point > 1) + high_bit = true; raw_spin_lock_irqsave(&pctrl->lock, flags); value = zx_pad_read16(pctrl, index); - if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) - value |= TRIGGER_BOTH_EDGE << (point*4); - else if (type & IRQ_TYPE_EDGE_FALLING) - value |= TRIGGER_FALL_EDGE << (point*4); - else if (type & IRQ_TYPE_EDGE_RISING) - value |= TRIGGER_RISE_EDGE << (point*4); - else if (type & IRQ_TYPE_LEVEL_LOW) - value |= TRIGGER_LOW_LEVEL << (point*4); - else if (type & IRQ_TYPE_LEVEL_HIGH) - value |= TRIGGER_HIGH_LEVEL << (point*4); - else - dev_dbg(pctrl->dev, "%s wrang type\n", __func__); + if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) { + if (high_bit) { + point = point - 2; + position = 8; + } else + position = 0; + test_mask = (~(mask << (point * bits_num + position))); + value &= (~(mask << (point * bits_num + position))); + value |= TRIGGER_BOTH_EDGE << (point * bits_num + position); + } else if (type & IRQ_TYPE_EDGE_FALLING) { + if (high_bit) { + point = point - 2; + position = 8; + } else + position = 0; + test_mask = (~(mask << (point * bits_num + position))); + value &= (~(mask << (point * bits_num + position))); + value |= TRIGGER_FALL_EDGE << (point * bits_num + position); + } else if (type & IRQ_TYPE_EDGE_RISING) { + if (high_bit) { + point = point - 2; + position = 8; + } else + position = 0; + test_mask = (~(mask << (point * bits_num + position))); + value &= (~(mask << (point * bits_num + position))); + value |= TRIGGER_RISE_EDGE << (point * bits_num + position); + } else if (type & IRQ_TYPE_LEVEL_LOW) { + if (high_bit) { + point = point - 2; + position = 8; + } else + position = 0; + test_mask = (~(mask << (point * bits_num + position))); + value &= (~(mask << (point * bits_num + position))); + value |= TRIGGER_LOW_LEVEL << (point * bits_num + position); + } else if (type & IRQ_TYPE_LEVEL_HIGH) { + if (high_bit) { + point = point - 2; + position = 8; + } else + position = 0; + test_mask = (~(mask << (point * bits_num + position))); + value &= (~(mask << (point * bits_num + position))); + value |= TRIGGER_HIGH_LEVEL << (point * bits_num + position); + } zx_pad_write16(pctrl, index, value); @@ -523,6 +578,7 @@ static int zhaoxin_gpio_irq_wake(struct irq_data *d, unsigned int on) unsigned int pin; pin = zhaoxin_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL); + if (pin) { if (on) enable_irq_wake(pctrl->irq); @@ -544,8 +600,9 @@ static int zhaoxin_gpio_add_pin_ranges(struct gpio_chip *gc) if (map->zhaoxin_range_gpio_base == ZHAOXIN_GPIO_BASE_NOMAP) continue; ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev), - map->zhaoxin_range_gpio_base, map->zhaoxin_range_pin_base, - map->zhaoxin_range_pin_size); + map->zhaoxin_range_gpio_base, + map->zhaoxin_range_pin_base, + map->zhaoxin_range_pin_size); if (ret) { dev_err(pctrl->dev, "failed to add GPIO pin range\n"); return ret; @@ -567,7 +624,7 @@ static unsigned int zhaoxin_gpio_ngpio(const struct zhaoxin_pinctrl *pctrl) continue; if (pin_maps->zhaoxin_range_gpio_base + pin_maps->zhaoxin_range_pin_size > ngpio) ngpio = pin_maps->zhaoxin_range_gpio_base + - pin_maps->zhaoxin_range_pin_size; + pin_maps->zhaoxin_range_pin_size; } return ngpio; @@ -595,9 +652,11 @@ static int zhaoxin_gpio_probe(struct zhaoxin_pinctrl *pctrl, int irq) pctrl->irqchip.irq_set_type = zhaoxin_gpio_irq_type; pctrl->irqchip.irq_set_wake = zhaoxin_gpio_irq_wake; pctrl->irqchip.flags = IRQCHIP_MASK_ON_SUSPEND; - + /* + * father domain irq + */ ret = devm_request_irq(pctrl->dev, irq, zhaoxin_gpio_irq, IRQF_SHARED | IRQF_NO_THREAD, - dev_name(pctrl->dev), pctrl); + dev_name(pctrl->dev), pctrl); if (ret) { dev_err(pctrl->dev, "failed to request interrupt\n"); return ret; @@ -624,10 +683,10 @@ static int zhaoxin_pinctrl_pm_init(struct zhaoxin_pinctrl *pctrl) } static int zhaoxin_pinctrl_probe(struct platform_device *pdev, - const struct zhaoxin_pinctrl_soc_data *soc_data) + const struct zhaoxin_pinctrl_soc_data *soc_data) { struct zhaoxin_pinctrl *pctrl; - int ret, i, irq; + int ret, i, irq; struct resource *res; void __iomem *regs; @@ -638,9 +697,11 @@ static int zhaoxin_pinctrl_probe(struct platform_device *pdev, pctrl->soc = soc_data; raw_spin_lock_init(&pctrl->lock); pctrl->pin_topologys = pctrl->soc->pin_topologys; + pctrl->gpio_type = pctrl->soc->gpio_type; + pctrl->private_init = pctrl->soc->private_init; pctrl->pin_map_size = pctrl->soc->pin_map_size; - pctrl->pin_maps = devm_kcalloc(&pdev->dev, pctrl->pin_map_size, - sizeof(*pctrl->pin_maps), GFP_KERNEL); + pctrl->pin_maps = + devm_kcalloc(&pdev->dev, pctrl->pin_map_size, sizeof(*pctrl->pin_maps), GFP_KERNEL); if (!pctrl->pin_maps) return -ENOMEM; for (i = 0; i < pctrl->pin_map_size; i++) { @@ -651,11 +712,10 @@ static int zhaoxin_pinctrl_probe(struct platform_device *pdev, regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(regs)) return PTR_ERR(regs); + if (pctrl->private_init) + pctrl->private_init(pctrl); pctrl->pm_pmio_base = regs; - pctrl->pmio_base = 0x800; - pctrl->pmio_rx90 = 0x90; - pctrl->pmio_rx8c = 0x8c; irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; @@ -673,7 +733,6 @@ static int zhaoxin_pinctrl_probe(struct platform_device *pdev, return PTR_ERR(pctrl->pctldev); } ret = zhaoxin_gpio_probe(pctrl, irq); - if (ret) return ret; platform_set_drvdata(pdev, pctrl); @@ -753,6 +812,6 @@ EXPORT_SYMBOL_GPL(zhaoxin_pinctrl_resume_noirq); #endif MODULE_AUTHOR("www.zhaoxin.com"); +MODULE_DESCRIPTION("Shanghai Zhaoxin pinctrl driver"); MODULE_VERSION(DRIVER_VERSION); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Zhaoxin pinctrl/GPIO core driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.h b/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.h index cebea382dbe9..344bfe6a31a0 100644 --- a/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.h +++ b/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.h @@ -1,7 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ +// SPDX-License-Identifier: GPL-2.0-or-later + /* - * zhaoxin pinctrl common code - * Copyright(c) 2023 Shanghai Zhaoxin Corporation. All rights reserved. + * Zhaoxin pinctrl common code + * Copyright(c) 2021 Shanghai Zhaoxin Corporation. All rights reserved. */ #ifndef PINCTRL_zhaoxin_H @@ -18,8 +19,47 @@ struct platform_device; struct device; +struct zhaoxin_pinctrl; + +#define SOCKET_PINCTRL_PIN(sock, a, b) PINCTRL_PIN(a, b"_"#sock) + +#define PMIO_RX90 100 +#define PMIO_RX8C 200 + +#define ZX_CAL_ARRAY(a, b) \ + { \ + .pmio_offset = (a), \ + .size = (b), \ + } + +#define ZX_CAL_INDEX_ARRAY(a, b, c) \ + { \ + .reg_port_base = (PMIO_RX90), \ + .reg_data_base = (PMIO_RX8C), \ + .index = (a), \ + .cal_array = (b), \ + .size = (c), \ + } + +#define ZX_CAL_INDEX_ARRAY_MASK(a, b, c, d, e) \ + { \ + .reg_port_base = (PMIO_RX90), \ + .reg_data_base = (PMIO_RX8C), \ + .index = (a), \ + .cal_array = (b), \ + .size = (c), \ + .bits_num = (d), \ + .mask = (e), \ + } + +#define ZHAOXIN_GPP(s, e, g) \ + { \ + .zhaoxin_range_pin_base = (s), \ + .zhaoxin_range_pin_size = ((e) - (s) + 1), \ + .zhaoxin_range_gpio_base = (g), \ + } -/** +/* * struct zhaoxin_pingroup pin define */ struct zhaoxin_pingroup { @@ -30,12 +70,12 @@ struct zhaoxin_pingroup { const unsigned int *modes; }; -/** +/* * struct zhaoxin_function */ struct zhaoxin_function { const char *name; - const char * const *groups; + const char *const *groups; size_t ngroups; }; @@ -63,6 +103,7 @@ struct reg_calibrate { const int reg_cal_size; const int *cal_array; const int size; + bool is_pmio; }; struct index_cal_array { @@ -71,6 +112,8 @@ struct index_cal_array { int index; int *cal_array; int size; + int bits_num; + u16 mask; }; struct zhaoxin_pin_topology { @@ -83,13 +126,19 @@ struct zhaoxin_pin_topology { const struct index_cal_array *trigger_cal; }; -#define TRIGGER_FALL_EDGE 0 -#define TRIGGER_RISE_EDGE 1 -#define TRIGGER_BOTH_EDGE 2 -#define TRIGGER_LOW_LEVEL 3 -#define TRIGGER_HIGH_LEVEL 4 +#define TRIGGER_FALL_EDGE 0 +#define TRIGGER_RISE_EDGE 1 +#define TRIGGER_BOTH_EDGE 2 +#define TRIGGER_LOW_LEVEL 3 +#define TRIGGER_HIGH_LEVEL 4 + +#define ZHAOXIN_GPIO_BASE_NOMAP -1 -#define ZHAOXIN_GPIO_BASE_NOMAP -1 +typedef enum { + ZX_TYPE_ERROR = 0, + ZX_TYPE_GPIO, + ZX_TYPE_PGPIO, +} zx_gpio_type; struct zhaoxin_pinctrl_soc_data { const char *uid; @@ -100,13 +149,15 @@ struct zhaoxin_pinctrl_soc_data { const struct zhaoxin_function *functions; size_t nfunctions; const struct zhaoxin_pin_topology *pin_topologys; + + zx_gpio_type (*gpio_type)(struct zhaoxin_pinctrl *pctrl, unsigned int pin); + + void (*private_init)(struct zhaoxin_pinctrl *pctrl); + const struct zhaoxin_pin_map2_gpio *zhaoxin_pin_maps; size_t pin_map_size; }; -const struct zhaoxin_pinctrl_soc_data * - zhaoxin_pinctrl_get_soc_data(struct platform_device *pdev); - struct zhaoxin_pinctrl { struct device *dev; raw_spinlock_t lock; @@ -117,6 +168,10 @@ struct zhaoxin_pinctrl { const struct zhaoxin_pinctrl_soc_data *soc; const struct zhaoxin_pin_topology *pin_topologys; struct zhaoxin_pin_map2_gpio *pin_maps; + + zx_gpio_type (*gpio_type)(struct zhaoxin_pinctrl *pctrl, unsigned int pin); + + void (*private_init)(struct zhaoxin_pinctrl *pctrl); size_t pin_map_size; int irq; int pmio_base; @@ -125,6 +180,10 @@ struct zhaoxin_pinctrl { int pmio_rx8c; }; +const struct zhaoxin_pinctrl_soc_data *zhaoxin_pinctrl_get_soc_data(struct platform_device *pdev); +void zx_pad_write16(struct zhaoxin_pinctrl *pctrl, u8 index, u16 value); +u16 zx_pad_read16(struct zhaoxin_pinctrl *pctrl, u8 index); + int zhaoxin_pinctrl_probe_by_hid(struct platform_device *pdev); int zhaoxin_pinctrl_probe_by_uid(struct platform_device *pdev); -- Gitee From 98ca6b4a9b9d4a77fe1682026c781cb0a9c4597e Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 9 Mar 2026 10:56:21 +0800 Subject: [PATCH 116/231] ata: ahci: Fix Zhaoxin SATA LED quirk for KH-50000 zhaoxin inclusion category: feature -------------------- This patch addresses a quirk in the SATA Enclosure Management (EM) LED functionality on Zhaoxin processors for the controller with device ID 0x9083 and revision 0x40. It ensures proper LED functionality by correctly managing memory-mapped I/O addresses and applying controller-specific quirks. Signed-off-by: leoliu-oc --- drivers/ata/ahci.c | 114 ++++++++++++++++++++++++++++++++++++++++++ drivers/ata/ahci.h | 9 ++++ drivers/ata/libahci.c | 17 +++++++ 3 files changed, 140 insertions(+) diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 7b61915a141e..699d77871345 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -1793,6 +1793,117 @@ static ssize_t remapped_nvme_show(struct device *dev, static DEVICE_ATTR_RO(remapped_nvme); +#ifdef CONFIG_X86 +static void ahci_zx_led_remove_quirk(struct pci_dev *pdev) +{ + struct ata_host *host = pci_get_drvdata(pdev); + struct ahci_host_priv *hpriv = host->private_data; + struct pci_dev *sata_pdev = NULL; + struct ata_host *sata_host = NULL; + struct ahci_host_priv *sata_hpriv = NULL; + void __iomem *p1_mmio_tmp = NULL; + struct pci_dev *target_p0_dev = NULL; + + if (!hpriv->px_index || !hpriv->has_p0_p1) + return; + + while ((sata_pdev = pci_get_device(PCI_VENDOR_ID_ZHAOXIN, 0x9083, sata_pdev)) != NULL) { + sata_host = pci_get_drvdata(sata_pdev); + sata_hpriv = sata_host ? sata_host->private_data : NULL; + if (!sata_hpriv) + continue; + if (sata_hpriv->sx_index == hpriv->sx_index) { + if (sata_hpriv->px_index == 1 && + PCI_FUNC(pdev->devfn) != PCI_FUNC(sata_pdev->devfn)) + p1_mmio_tmp = sata_hpriv->mmio; + else if (sata_hpriv->px_index == 0) + target_p0_dev = sata_pdev; + } + + if (target_p0_dev && p1_mmio_tmp) + break; + } + + if (sata_pdev) + pci_dev_put(sata_pdev); + + if (target_p0_dev) { + sata_host = pci_get_drvdata(target_p0_dev); + sata_hpriv = sata_host ? sata_host->private_data : NULL; + if (sata_hpriv) + sata_hpriv->p1_mmio = p1_mmio_tmp; + } +} + +static void ahci_zx_led_init_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv) +{ + int i, err; + u8 p0_bus_number, p1_bus_number, target_px_index; + u64 val; + struct pci_dev *sata_pdev = NULL; + struct ata_host *sata_host = NULL; + struct ahci_host_priv *sata_hpriv = NULL; + + if (pdev->vendor != PCI_VENDOR_ID_ZHAOXIN || pdev->device != 0x9083 || + pdev->revision != 0x40) + return; + + val = native_read_msr_safe(ZX_GET_BUS_NUMBER_QUIRK, &err); + if (err) /* MSR read failed */ + return; + + pr_info("ahci: zx led quirk init\n"); + + hpriv->sx_index = 0xFF; + hpriv->px_index = 0xFF; + hpriv->p1_mmio = NULL; + hpriv->has_p0_p1 = false; + for (i = 0; i < 4; i++) { + p0_bus_number = val & 0xFF; + p1_bus_number = (val >> 8) & 0xFF; + if (pdev->bus->number == p0_bus_number) { + hpriv->sx_index = i; + hpriv->px_index = 0; + break; + } + if (pdev->bus->number == p1_bus_number) { + hpriv->sx_index = i; + hpriv->px_index = 1; + break; + } + val >>= 16; + } + /* Exit if no matching bus number found */ + if (hpriv->px_index == 0xFF || hpriv->sx_index == 0xFF) + return; + + target_px_index = !hpriv->px_index; + while ((sata_pdev = pci_get_device(PCI_VENDOR_ID_ZHAOXIN, 0x9083, sata_pdev)) != NULL) { + sata_host = pci_get_drvdata(sata_pdev); + sata_hpriv = sata_host ? sata_host->private_data : NULL; + if (!sata_hpriv) + continue; + + if (sata_hpriv->sx_index == hpriv->sx_index && + sata_hpriv->px_index == target_px_index) { + if (hpriv->px_index == 0) + hpriv->p1_mmio = sata_hpriv->mmio; + else + sata_hpriv->p1_mmio = hpriv->mmio; + hpriv->has_p0_p1 = true; + sata_hpriv->has_p0_p1 = true; + break; + } + } + + if (sata_pdev) + pci_dev_put(sata_pdev); +} +#else +static inline void ahci_zx_led_remove_quirk(struct pci_dev *pdev) { } +static inline void ahci_zx_led_init_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv) { } +#endif + static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned int board_id = ent->driver_data; @@ -1915,6 +2026,8 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* save initial config */ ahci_pci_save_initial_config(pdev, hpriv); + ahci_zx_led_init_quirk(pdev, hpriv); + /* prepare host */ if (hpriv->cap & HOST_CAP_NCQ) { pi.flags |= ATA_FLAG_NCQ; @@ -2066,6 +2179,7 @@ static void ahci_shutdown_one(struct pci_dev *pdev) static void ahci_remove_one(struct pci_dev *pdev) { + ahci_zx_led_remove_quirk(pdev); sysfs_remove_file_from_group(&pdev->dev.kobj, &dev_attr_remapped_nvme.attr, NULL); diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index df8f8a1a3a34..8ba831020d77 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -36,6 +36,10 @@ #define EM_MSG_LED_VALUE_ACTIVITY 0x00070000 #define EM_MSG_LED_VALUE_OFF 0xfff80000 #define EM_MSG_LED_VALUE_ON 0x00010000 +#ifdef CONFIG_X86 +/*fix zhaoxin Enclosure Management quirk*/ +#define ZX_GET_BUS_NUMBER_QUIRK 0x000012B0 +#endif enum { AHCI_MAX_PORTS = 32, @@ -379,6 +383,11 @@ struct ahci_host_priv { /* only required for per-port MSI(-X) support */ int (*get_irq_vector)(struct ata_host *host, int port); + /* fix zhaoxin Enclosure Management quirk */ + void __iomem *p1_mmio; + u8 sx_index; + u8 px_index; + bool has_p0_p1; }; extern int ahci_ignore_sss; diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 3bd0d82f4c2a..eb7117e2117f 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -367,6 +367,18 @@ static ssize_t ahci_read_em_buffer(struct device *dev, return i; } +static void __iomem *zx_led_get_mmio(struct ata_port *ap, struct ahci_host_priv *hpriv) +{ +#ifdef CONFIG_X86 + if (hpriv->has_p0_p1 && hpriv->px_index == 0) { + if (hpriv->p1_mmio) + return hpriv->p1_mmio; + dev_warn_ratelimited(ap->host->dev, "P1 removed, LED mode unavailable\n"); + } +#endif + return hpriv->mmio; +} + static ssize_t ahci_store_em_buffer(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) @@ -390,6 +402,7 @@ static ssize_t ahci_store_em_buffer(struct device *dev, ahci_rpm_get_port(ap); spin_lock_irqsave(ap->lock, flags); + mmio = zx_led_get_mmio(ap, hpriv); em_ctl = readl(mmio + HOST_EM_CTL); if (em_ctl & EM_CTL_TM) { spin_unlock_irqrestore(ap->lock, flags); @@ -1138,6 +1151,8 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, * if we are still busy transmitting a previous message, * do not allow */ + + mmio = zx_led_get_mmio(ap, hpriv); em_ctl = readl(mmio + HOST_EM_CTL); if (em_ctl & EM_CTL_TM) { spin_unlock_irqrestore(ap->lock, flags); @@ -1145,6 +1160,7 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, return -EBUSY; } + mmio = hpriv->mmio; if (hpriv->em_msg_type & EM_MSG_TYPE_LED) { /* * create message header - this is all zero except for @@ -1162,6 +1178,7 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, /* * tell hardware to transmit the message */ + mmio = zx_led_get_mmio(ap, hpriv); writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL); } -- Gitee From 70a3eae3a3bc2230687f9c8210eca6762277aa2a Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 9 Mar 2026 10:56:22 +0800 Subject: [PATCH 117/231] perf/zhaoxin/core: Tailor FMS for KH-50000 zhaoxin inclusion category: feature -------------------- This patch adds support for CPU model 0x7b in the Zhaoxin PMU driver. It extends the existing switch statement in `zhaoxin_pmu_init` to handle this new CPU model, which is similar to the 0x6b model. Reviewed-by: Tony W. Wang Tested-by: Lyle Li Signed-off-by: leoliu-oc --- arch/x86/events/zhaoxin/core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/events/zhaoxin/core.c b/arch/x86/events/zhaoxin/core.c index 687c2d8a25ff..61cb6582d8c2 100644 --- a/arch/x86/events/zhaoxin/core.c +++ b/arch/x86/events/zhaoxin/core.c @@ -646,6 +646,7 @@ __init int zhaoxin_pmu_init(void) break; case 0x5b: case 0x6b: + case 0x7b: zx_pmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = X86_CONFIG(.event = 0x02, .umask = 0x01, .inv = 0x01, .cmask = 0x01); @@ -661,7 +662,7 @@ __init int zhaoxin_pmu_init(void) if (boot_cpu_data.x86_model == 0x5b) pr_cont("Yongfeng events, "); - if (boot_cpu_data.x86_model == 0x6b) + if (boot_cpu_data.x86_model == 0x6b || boot_cpu_data.x86_model == 0x7b) pr_cont("Shijidadao events, "); break; -- Gitee From d8b62f7562bc0de5b70cb2b1a693f747c841b98a Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 9 Mar 2026 10:56:23 +0800 Subject: [PATCH 118/231] hwmon: zhaoxin-cputemp: Update for KH-50000 zhaoxin inclusion category: feature -------------------- Adds support for KH-50000 CPU with model 0x7b in the zhaoxin-cputemp driver. It extends the existing `x86_cpu_id` array to include this new CPU model. Reviewed-by: Tony W. Wang Tested-by: Lyle Li Signed-off-by: leoliu-oc --- drivers/hwmon/zhaoxin-cputemp.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/hwmon/zhaoxin-cputemp.c b/drivers/hwmon/zhaoxin-cputemp.c index 751d2c5a868a..3c4eeda7b40f 100644 --- a/drivers/hwmon/zhaoxin-cputemp.c +++ b/drivers/hwmon/zhaoxin-cputemp.c @@ -122,7 +122,7 @@ static int zhaoxin_cputemp_probe(struct platform_device *pdev) data->id = pdev->id; data->name = "zhaoxin_cputemp"; data->msr_temp = 0x1423; - if (c->x86_model == 0x6b) { + if (c->x86_model == 0x6b || c->x86_model == 0x7b) { data->msr_crit = 0x175b; data->msr_max = 0x175a; } else { @@ -249,6 +249,8 @@ static const struct x86_cpu_id cputemp_ids[] __initconst = { X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, 0x5b, NULL), X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, 0x6b, NULL), X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, 0x6b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, 0x7b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, 0x7b, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, cputemp_ids); -- Gitee From 47e7a273bb1a395f7c9eb4bfb32218db8e8e4cf2 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 9 Mar 2026 10:56:24 +0800 Subject: [PATCH 119/231] Add platform-specific quirks for ZX-200 device zhaoxin inclusion category: feature -------------------- Adds platform-specific handling for Zhaoxin ZX-200 in both AHCI and xHCI drivers. For non-Zhaoxin CPU platforms, it applies the following quirks: 1. In AHCI driver (ahci.c): - Sets AHCI_HFLAG_32BIT_ONLY flag to restrict DMA operations to 32-bit addressing 2. In xHCI driver (xhci-pci.c): - Sets XHCI_RESET_ON_RESUME quirk to handle device resume issues - Sets XHCI_NO_64BIT_SUPPORT quirk to disable 64-bit DMA addressing These changes ensure proper functionality and compatibility of Zhaoxin ZX-200 on third-party platforms. Reviewed-by: Tony W. Wang Tested-by: Lyle Li Signed-off-by: leoliu-oc --- drivers/ata/ahci.c | 13 +++++++++++++ drivers/usb/host/xhci-pci.c | 16 ++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 7b61915a141e..793915689544 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -81,6 +81,16 @@ enum board_ids { board_ahci_mcp79 = board_ahci_mcp77, }; +static bool is_zhaoxin_cpu(void) +{ +#ifdef CONFIG_X86 + if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) + return true; +#endif + return false; +} + static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static void ahci_remove_one(struct pci_dev *dev); static void ahci_shutdown_one(struct pci_dev *dev); @@ -1889,6 +1899,9 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (ahci_sb600_enable_64bit(pdev)) hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY; + if (pdev->vendor == PCI_VENDOR_ID_ZHAOXIN && !is_zhaoxin_cpu()) + hpriv->flags |= AHCI_HFLAG_32BIT_ONLY; + hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar]; /* detect remapped nvme devices */ diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 7cdb1ce5104f..a338420980b9 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -307,6 +307,16 @@ static int xhci_pci_reinit(struct xhci_hcd *xhci, struct pci_dev *pdev) return 0; } +static bool is_zhaoxin_cpu(void) +{ +#ifdef CONFIG_X86 + if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) + return true; +#endif + return false; +} + static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) { struct pci_dev *pdev = to_pci_dev(dev); @@ -577,6 +587,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == PCI_DEVICE_ID_CADENCE_SSP) xhci->quirks |= XHCI_CDNS_SCTX_QUIRK; + if (pdev->vendor == PCI_VENDOR_ID_ZHAOXIN && !is_zhaoxin_cpu()) { + xhci->quirks |= XHCI_NO_64BIT_SUPPORT; + if (pdev->device == 0x9203) + xhci->quirks |= XHCI_RESET_ON_RESUME; + } + /* xHC spec requires PCI devices to support D3hot and D3cold */ if (xhci->hci_version >= 0x120) xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; -- Gitee From b73771d2650e93625c62c9d1ce932aa4f3f4a70a Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 9 Mar 2026 10:56:25 +0800 Subject: [PATCH 120/231] ACPI: processor: idle: Set timer broadcast state on C3 for KH-50000 zhaoxin inclusion category: feature -------------------- For avoid broadcast timer interrupt at C2 state on KH-50000 platform, to increace performance and low interrupt count, so set timer broadcast state on C3 state. Reviewed-by: Alan Song Tested-by: Lyle Li Signed-off-by: leoliu-oc --- drivers/acpi/processor_idle.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index b524cf27213d..86e026c49aeb 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -130,6 +130,12 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr, if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT)) return; + /* On the KH-50000 platform, the local APIC stops in C3 state */ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) && + (boot_cpu_data.x86 == 0x7 && boot_cpu_data.x86_model == 0x7b)) + type = ACPI_STATE_C3; + if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) type = ACPI_STATE_C1; -- Gitee From 2d45d59b28dc8fd6e5cca08ffe9b0753be01ca8e Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 9 Mar 2026 10:56:25 +0800 Subject: [PATCH 121/231] x86/hpet: Set dynamic IRQ feature for HPET for KH-50000 zhaoxin inclusion category: feature version: v6.0.8 -------------------- To avoid unnecessary wakeups of core 0 caused by HPET broadcast timer interrupts, enable the dynamic IRQ feature for HPET on the KH-50000 platform. Reviewed-by: Alan Song Tested-by: Lyle Li Signed-off-by: leoliu-oc --- arch/x86/kernel/hpet.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 2626fa052b45..e1dcb241b2e1 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -457,6 +457,15 @@ static void __init hpet_legacy_clockevent_register(struct hpet_channel *hc) hc->evt.features |= CLOCK_EVT_FEAT_PERIODIC; hc->evt.set_state_periodic = hpet_clkevt_set_state_periodic; + /* + * On the KH-50000 platform, enable dynamic HPET interrupts + * to prevent unnecessary wakeups of core 0 caused by broadcast timer events. + */ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) && + (boot_cpu_data.x86 == 0x7 && boot_cpu_data.x86_model == 0x7b)) + hc->evt.features |= CLOCK_EVT_FEAT_DYNIRQ; + /* Start HPET legacy interrupts */ hpet_enable_legacy_int(); -- Gitee From ea95096d003bb48b0c84e35814c4e3f358a38338 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 9 Mar 2026 10:56:13 +0800 Subject: [PATCH 122/231] x86/microcode: Add Zhaoxin cpu microcode update driver zhaoxin inclusion category: feature -------------------- Add support for Zhaoxin CPU in the x86 microcode loading facility to enable microcode loading during the OS stage. Currently, Zhaoxin CPU only support early microcode loading. Signed-off-by: leoliu-oc --- arch/x86/Kconfig | 2 +- arch/x86/kernel/cpu/microcode/Makefile | 3 + arch/x86/kernel/cpu/microcode/core.c | 40 +- arch/x86/kernel/cpu/microcode/internal.h | 27 + arch/x86/kernel/cpu/microcode/zhaoxin.c | 604 +++++++++++++++++++++++ 5 files changed, 669 insertions(+), 7 deletions(-) create mode 100644 arch/x86/kernel/cpu/microcode/zhaoxin.c diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 7c9809925f19..5044c094be3b 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1323,7 +1323,7 @@ config X86_REBOOTFIXUPS config MICROCODE def_bool y - depends on CPU_SUP_AMD || CPU_SUP_INTEL || CPU_SUP_HYGON + depends on CPU_SUP_AMD || CPU_SUP_INTEL || CPU_SUP_HYGON || CPU_SUP_ZHAOXIN || CPU_SUP_CENTAUR select CRYPTO_LIB_SHA256 if CPU_SUP_AMD config MICROCODE_INITRD32 diff --git a/arch/x86/kernel/cpu/microcode/Makefile b/arch/x86/kernel/cpu/microcode/Makefile index 193d98b33a0a..f4d31d8d10f8 100644 --- a/arch/x86/kernel/cpu/microcode/Makefile +++ b/arch/x86/kernel/cpu/microcode/Makefile @@ -3,3 +3,6 @@ microcode-y := core.o obj-$(CONFIG_MICROCODE) += microcode.o microcode-$(CONFIG_CPU_SUP_INTEL) += intel.o microcode-$(CONFIG_CPU_SUP_AMD) += amd.o +ifneq ($(CONFIG_CPU_SUP_ZHAOXIN)$(CONFIG_CPU_SUP_CENTAUR),) +microcode-y += zhaoxin.o +endif diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 58ecfa41cf0f..b7a16dd21cc9 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -136,7 +136,7 @@ bool __init microcode_loader_disabled(void) void __init load_ucode_bsp(void) { unsigned int cpuid_1_eax; - bool intel = true; + unsigned int x86_vendor; if (cmdline_find_option_bool(boot_command_line, "dis_ucode_ldr") > 0) dis_ucode_ldr = true; @@ -145,8 +145,9 @@ void __init load_ucode_bsp(void) return; cpuid_1_eax = native_cpuid_eax(1); + x86_vendor = x86_cpuid_vendor(); - switch (x86_cpuid_vendor()) { + switch (x86_vendor) { case X86_VENDOR_INTEL: if (x86_family(cpuid_1_eax) < 6) return; @@ -155,21 +156,36 @@ void __init load_ucode_bsp(void) case X86_VENDOR_AMD: if (x86_family(cpuid_1_eax) < 0x10) return; - intel = false; break; case X86_VENDOR_HYGON: - intel = false; + break; + + case X86_VENDOR_ZHAOXIN: + case X86_VENDOR_CENTAUR: + if ((cpuid_eax(0xC0000000) < 0xC0000004) || !(cpuid_edx(0xC0000004) & 0x1)) + return; break; default: return; } - if (intel) + switch (x86_vendor) { + case X86_VENDOR_INTEL: load_ucode_intel_bsp(&early_data); - else + break; + case X86_VENDOR_AMD: + case X86_VENDOR_HYGON: load_ucode_amd_bsp(&early_data, cpuid_1_eax); + break; + case X86_VENDOR_ZHAOXIN: + case X86_VENDOR_CENTAUR: + load_ucode_zhaoxin_bsp(&early_data); + break; + default: + return; + } } void load_ucode_ap(void) @@ -198,6 +214,11 @@ void load_ucode_ap(void) case X86_VENDOR_HYGON: load_ucode_amd_ap(cpuid_1_eax); break; + case X86_VENDOR_ZHAOXIN: + case X86_VENDOR_CENTAUR: + if ((cpuid_eax(0xC0000000) >= 0xC0000004) && (cpuid_edx(0xC0000004) & 0x1)) + load_ucode_zhaoxin_ap(); + break; default: break; } @@ -260,6 +281,10 @@ static void reload_early_microcode(unsigned int cpu) case X86_VENDOR_HYGON: reload_ucode_amd(cpu); break; + case X86_VENDOR_ZHAOXIN: + case X86_VENDOR_CENTAUR: + reload_ucode_zhaoxin(); + break; default: break; } @@ -849,6 +874,9 @@ static int __init microcode_init(void) microcode_ops = init_amd_microcode(); else if (c->x86_vendor == X86_VENDOR_HYGON) microcode_ops = init_hygon_microcode(); + else if (c->x86_vendor == X86_VENDOR_ZHAOXIN || + c->x86_vendor == X86_VENDOR_CENTAUR) + microcode_ops = init_zhaoxin_microcode(); else pr_err("no support for this CPU vendor\n"); diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index 3a6cc9efe80d..aefe30aa8a82 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -58,6 +58,12 @@ struct cpio_data find_microcode_in_initrd(const char *path); #define CPUID_HYGON1 QCHAR('H', 'y', 'g', 'o') #define CPUID_HYGON2 QCHAR('n', 'G', 'e', 'n') #define CPUID_HYGON3 QCHAR('u', 'i', 'n', 'e') +#define CPUID_ZHAOXIN1 QCHAR(' ', ' ', 'S', 'h') +#define CPUID_ZHAOXIN2 QCHAR('a', 'n', 'g', 'h') +#define CPUID_ZHAOXIN3 QCHAR('a', 'i', ' ', ' ') +#define CPUID_CENTAUR1 QCHAR('C', 'e', 'n', 't') +#define CPUID_CENTAUR2 QCHAR('a', 'u', 'r', 'H') +#define CPUID_CENTAUR3 QCHAR('a', 'u', 'l', 's') #define CPUID_IS(a, b, c, ebx, ecx, edx) \ (!(((ebx) ^ (a)) | ((edx) ^ (b)) | ((ecx) ^ (c)))) @@ -87,6 +93,12 @@ static inline int x86_cpuid_vendor(void) if (CPUID_IS(CPUID_HYGON1, CPUID_HYGON2, CPUID_HYGON3, ebx, ecx, edx)) return X86_VENDOR_HYGON; + if (CPUID_IS(CPUID_ZHAOXIN1, CPUID_ZHAOXIN2, CPUID_ZHAOXIN3, ebx, ecx, edx)) + return X86_VENDOR_ZHAOXIN; + + if (CPUID_IS(CPUID_CENTAUR1, CPUID_CENTAUR2, CPUID_CENTAUR3, ebx, ecx, edx)) + return X86_VENDOR_CENTAUR; + return X86_VENDOR_UNKNOWN; } @@ -136,4 +148,19 @@ static inline void reload_ucode_intel(void) { } static inline struct microcode_ops *init_intel_microcode(void) { return NULL; } #endif /* !CONFIG_CPU_SUP_INTEL */ +#if defined(CONFIG_CPU_SUP_ZHAOXIN) || defined(CONFIG_CPU_SUP_CENTAUR) +void load_ucode_zhaoxin_bsp(struct early_load_data *ed); +void load_ucode_zhaoxin_ap(void); +void reload_ucode_zhaoxin(void); +struct microcode_ops *init_zhaoxin_microcode(void); +#else /* CONFIG_CPU_SUP_ZHAOXIN || CONFIG_CPU_SUP_CENTAUR */ +static inline void load_ucode_zhaoxin_bsp(struct early_load_data *ed) { } +static inline void load_ucode_zhaoxin_ap(void) { } +static inline void reload_ucode_zhaoxin(void) { } +static inline struct microcode_ops *init_zhaoxin_microcode(void) +{ + return NULL; +} +#endif /* !CONFIG_CPU_SUP_ZHAOXIN && !CONFIG_CPU_SUP_CENTAUR */ + #endif /* _X86_MICROCODE_INTERNAL_H */ diff --git a/arch/x86/kernel/cpu/microcode/zhaoxin.c b/arch/x86/kernel/cpu/microcode/zhaoxin.c new file mode 100644 index 000000000000..9ddffe40439d --- /dev/null +++ b/arch/x86/kernel/cpu/microcode/zhaoxin.c @@ -0,0 +1,604 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Zhaoxin CPU Microcode Update Driver for Linux + * + * Author: Lyle Li + * + */ +#define pr_fmt(fmt) "microcode: " fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "internal.h" + +struct microcode_header_zhaoxin { + u32 signature; + u32 reserved1; + u32 year:16; + u32 day:8; + u32 month:8; + u32 applicable_processor; + u32 checksum; + u32 ldr_rev; + u8 chip_pf; + u8 sku_flag; + u16 update_rev_small_low; + u32 data_size; + u32 total_size; + u16 reserved2; + u32 update_rev; + u16 reserved3; + u16 signed_flag; + u16 update_rev_small_high; +} __packed; + +struct microcode_zhaoxin { + struct microcode_header_zhaoxin hdr; + unsigned int data[]; +}; + +static pgd_t microcode_pgd_entry; +static pud_t *microcode_pud_page; +static p4d_t *microcode_p4d_page; +static const char ucode_path[] = "kernel/x86/microcode/Zhaoxinucode.bin"; + +#define ZHAOXIN_MICROCODE_HEADER 0x53415252 +#define ZHAOXIN_MC_HEADER_SIZE sizeof(struct microcode_header_zhaoxin) +#define UCODE_BSP_LOADED ((struct microcode_zhaoxin *)0x1UL) +#define ZHAOXIN_MSR_PF 0x1631 +#define ZHAOXIN_MSR_FCR5_PATCH_ERROR_CODE 0x1205 +#define IS_HEX(c) \ + (((c) >= '0' && (c) <= '9') || ((c) >= 'A' && (c) <= 'F') || ((c) >= 'a' && (c) <= 'f')) + +/* + * Current microcode patch used on both the BSP and APs + * during the resume phase + */ +static struct microcode_zhaoxin *zhaoxin_ucode_patch __read_mostly; + +static inline u32 get_microcode_revision(bool small) +{ + u32 rev, rev_small; + + native_wrmsrl(MSR_IA32_UCODE_REV, 0); + + /* As documented in the SDM: Do a CPUID 1 here */ + native_cpuid_eax(1); + + /* get the current revision from MSR 0x8B */ + native_rdmsr(MSR_IA32_UCODE_REV, rev_small, rev); + + return small ? rev_small : rev; +} + +static int zhaoxin_collect_cpu_info(int cpu_num, struct cpu_signature *sig) +{ + sig->sig = cpuid_eax(1); + sig->rev = get_microcode_revision(true); + + sig->pf = get_microcode_revision(false); + + return 0; +} + +static inline u32 get_microcode_header_update_revision(struct microcode_header_zhaoxin *mc_hdr) +{ + const u8 *bytes = (const u8 *)&mc_hdr->update_rev; + u32 byte0, byte1, byte2, byte3; + + if (!IS_HEX(bytes[0]) || !IS_HEX(bytes[1]) || !IS_HEX(bytes[2]) || !IS_HEX(bytes[3])) + return 0; + + byte0 = (bytes[0] <= '9') ? (bytes[0] - '0') : ((bytes[0] & 0xDF) - 'A' + 0xA); + byte1 = (bytes[1] <= '9') ? (bytes[1] - '0') : ((bytes[1] & 0xDF) - 'A' + 0xA); + byte2 = (bytes[2] <= '9') ? (bytes[2] - '0') : ((bytes[2] & 0xDF) - 'A' + 0xA); + byte3 = (bytes[3] <= '9') ? (bytes[3] - '0') : ((bytes[3] & 0xDF) - 'A' + 0xA); + + return (byte0 << 12) | (byte1 << 8) | (byte2 << 4) | byte3; +} + +static inline bool cpu_signatures_match(struct cpu_signature *s1, unsigned int sig2, + unsigned int pf2) +{ + return s1->sig == sig2 && s1->pf == pf2; +} + +static bool zhaoxin_find_matching_signature(void *mc, struct cpu_signature *sig) +{ + struct microcode_header_zhaoxin *mc_hdr = mc; + u32 chip_pf, dummy, sku_flag; + + /* verfiy cpu signature and revision */ + if (!cpu_signatures_match(sig, mc_hdr->applicable_processor, + get_microcode_header_update_revision(mc_hdr))) + return false; + + native_rdmsr(ZHAOXIN_MSR_PF, dummy, chip_pf); + chip_pf &= ((0x1 << 8) - 1); + if (mc_hdr->chip_pf != chip_pf) + return false; + + native_rdmsr(ZHAOXIN_MSR_FCR5_PATCH_ERROR_CODE, sku_flag, dummy); + sku_flag = (sku_flag & ((0x1 << 21) - 1)) >> 13; + + return mc_hdr->sku_flag == sku_flag; +} + +static int zhaoxin_microcode_sanity_check(void *mc, bool print_err, int hdr_type) +{ + struct microcode_header_zhaoxin *mc_header = mc; + u32 check_sum = 0, i; + + /* verify microcode data size */ + if (mc_header->data_size + ZHAOXIN_MC_HEADER_SIZE > mc_header->total_size) { + if (print_err) + pr_err("Error: bad microcode data file size.\n"); + return -EINVAL; + } + + /* verify loader_version and header signature */ + if (mc_header->ldr_rev != 1 || mc_header->signature != hdr_type) { + if (print_err) + pr_err("Error: invalid/unknown microcode update format. Header type %d\n", + mc_header->signature); + return -EINVAL; + } + + /* Calculate the checksum of update data and header. */ + check_sum = 0; + i = mc_header->total_size / sizeof(u32); + while (i--) + check_sum += ((u32 *)mc)[i]; + + if (check_sum) { + if (print_err) + pr_err("Bad microcode data checksum, aborting.\n"); + return -EINVAL; + } + + return 0; +} + +static void save_microcode_patch(struct microcode_zhaoxin *patch) +{ + unsigned int size = patch->hdr.total_size; + struct page *pg = NULL; + void *dst = NULL; + + /* + * Due to hardware limitations, the ucode must reside within the 4G + * address space. Therefore, the GFP_DMA32 flag is used to restrict + * the memory allocation to this range. + */ + pg = alloc_pages(GFP_DMA32 | GFP_KERNEL, get_order(size)); + if (!pg) { + pr_err("Unable to allocate microcode memory size: %u\n", size); + return; + } + + dst = page_address(pg); + memcpy(dst, patch, size); + zhaoxin_ucode_patch = dst; +} + +static inline u32 +get_microcode_header_update_revision_small(struct microcode_header_zhaoxin *mc_hdr) +{ + const u8 *bytes_low = (const u8 *)&mc_hdr->update_rev_small_low; + const u8 *bytes_high = (const u8 *)&mc_hdr->update_rev_small_high; + u32 byte0, byte1, byte2, byte3; + + if (!IS_HEX(bytes_low[0]) || !IS_HEX(bytes_low[1]) || !IS_HEX(bytes_high[0]) || + !IS_HEX(bytes_high[1])) + return 0; + + byte0 = (bytes_low[0] <= '9') ? (bytes_low[0] - '0') : ((bytes_low[0] & 0xDF) - 'A' + 0xA); + byte1 = (bytes_low[1] <= '9') ? (bytes_low[1] - '0') : ((bytes_low[1] & 0xDF) - 'A' + 0xA); + byte2 = (bytes_high[0] <= '9') ? (bytes_high[0] - '0') : + ((bytes_high[0] & 0xDF) - 'A' + 0xA); + byte3 = (bytes_high[1] <= '9') ? (bytes_high[1] - '0') : + ((bytes_high[1] & 0xDF) - 'A' + 0xA); + + return (byte0 << 12) | (byte1 << 8) | (byte2 << 4) | byte3; +} + +/* Scan blob for microcode matching the boot CPUs family, model, stepping */ +static struct microcode_zhaoxin *scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, + bool save) +{ + struct microcode_header_zhaoxin *mc_header; + struct microcode_zhaoxin *patch = NULL; + unsigned int mc_size; + u32 cur_rev = uci->cpu_sig.rev; + u32 update_rev; + + for (; size >= sizeof(struct microcode_header_zhaoxin); size -= mc_size, data += mc_size) { + mc_header = (struct microcode_header_zhaoxin *)data; + + mc_size = mc_header->total_size; + if (!mc_size || mc_size > size || + zhaoxin_microcode_sanity_check(data, false, ZHAOXIN_MICROCODE_HEADER) < 0) + break; + + if (!zhaoxin_find_matching_signature(data, &uci->cpu_sig)) + continue; + + update_rev = get_microcode_header_update_revision_small(mc_header); + if (!update_rev) + continue; + /* + * For saving the early microcode, find the matching revision + * which was loaded on both the BSP and APs. + * + * On both the BSP and APs during early boot, find a newer + * revision than actually loaded in the CPU. + */ + if (save) { + if (cur_rev != update_rev) + continue; + } else if (cur_rev >= update_rev) { + continue; + } + + patch = data; + cur_rev = update_rev; + } + + return size ? NULL : patch; +} + +static bool verify_patch_load(void) +{ + u32 err_code, dummy; + + /* get ucode update return code from msr 0x1205 */ + native_rdmsr(ZHAOXIN_MSR_FCR5_PATCH_ERROR_CODE, err_code, dummy); + + err_code &= (1 << 8) - 1; + + switch (err_code) { + case 0: + pr_err("no update has been attempted since reset\n"); + break; + case 1: + return true; + case 2: + pr_err("patch mechanism disable\n"); + break; + case 3: + pr_err("bad patch header data\n"); + break; + case 4: + pr_err("bad patch header checksum\n"); + break; + case 5: + pr_err("bad immediate patch data checksum\n"); + break; + case 6: + pr_err("bad main patch data checksum\n"); + break; + case 7: + pr_err("bad overlay patch data checksum\n"); + break; + case 8: + pr_err("patch too big for pram\n"); + break; + } + + return false; +} + +static inline void clear_zhaoxin_err_code_reg(void) +{ + u32 err_code, dummy; + + native_rdmsr(ZHAOXIN_MSR_FCR5_PATCH_ERROR_CODE, err_code, dummy); + + err_code &= ~((1 << 8) - 1); + native_wrmsr(ZHAOXIN_MSR_FCR5_PATCH_ERROR_CODE, err_code, dummy); +} + +static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci) +{ + struct microcode_zhaoxin *mc = uci->mc; + u32 rev; + + if (!mc) + return UCODE_NFOUND; + + clear_zhaoxin_err_code_reg(); + + /* write microcode via MSR 0x79 */ + native_wrmsrl(MSR_IA32_UCODE_WRITE, __pa((unsigned long)mc->data)); + + if (!verify_patch_load()) + return UCODE_ERROR; + + rev = get_microcode_revision(true); + if (rev != get_microcode_header_update_revision_small(&mc->hdr)) + return UCODE_ERROR; + + uci->cpu_sig.rev = rev; + + return UCODE_UPDATED; +} + +static __init struct microcode_zhaoxin *get_microcode_blob(struct ucode_cpu_info *uci, bool save) +{ + struct cpio_data cp; + + zhaoxin_collect_cpu_info(smp_processor_id(), &uci->cpu_sig); + + cp = find_microcode_in_initrd(ucode_path); + + if (!(cp.data && cp.size)) + return NULL; + + return scan_microcode(cp.data, cp.size, uci, save); +} + +static __init pud_t *find_pud_entry(unsigned long addr) +{ + pgd_t *pgd; + p4d_t *p4d; + + pgd = (pgd_t *)__va(read_cr3_pa()); + + pgd = pgd_offset_pgd(pgd, addr); + if (pgd_none(*pgd) || pgd_bad(*pgd)) { + pr_err("Invalid PGD entry for 0x%lx\n", addr); + return NULL; + } + + p4d = p4d_offset(pgd, addr); + if (p4d_none(*p4d) || p4d_bad(*p4d)) { + pr_err("Invalid P4D entry for 0x%lx\n", addr); + return NULL; + } + + return pud_offset(p4d, addr); +} + +/* + * Due to hardware limitations, when loading the microcode, + * it is necessary to ensure that the physical address is + * equal to the virtual address. When the APs core loads + * the ucode, the PAGE_OFFSET has already been randomized. + * Therefore, this function is called during early initialization + * to set up a 4G identity-mapped pgd entry. + */ +static __init void init_microcode_pages(void) +{ + pud_t *pud; + unsigned int i; + pgdval_t pgd_flags = _PAGE_PRESENT | _PAGE_RW | _PAGE_USER; + + microcode_pud_page = (pud_t *)__get_free_page(GFP_KERNEL_ACCOUNT); + if (!microcode_pud_page) { + pr_err("Failed to allocate PUD page\n"); + goto exit; + } + memset(microcode_pud_page, 0, PAGE_SIZE); + + if (pgtable_l5_enabled()) { + microcode_p4d_page = (p4d_t *)__get_free_page(GFP_KERNEL_ACCOUNT); + if (!microcode_pud_page) { + pr_err("Failed to allocate P4D page\n"); + goto fail_pud; + } + memset(microcode_p4d_page, 0, PAGE_SIZE); + } + + for (i = 0; i < 4; i++) { + pud = find_pud_entry(__PAGE_OFFSET + (unsigned long)(i << 30)); + if (pud == NULL) + goto fail_p4d; + microcode_pud_page[i] = *pud; + } + + if (pgtable_l5_enabled()) { + microcode_p4d_page[0] = __p4d(__pa(microcode_pud_page) | pgd_flags); + microcode_pgd_entry = __pgd(__pa(microcode_p4d_page) | pgd_flags); + } else + microcode_pgd_entry = __pgd(__pa(microcode_pud_page) | pgd_flags); + + goto exit; + +fail_p4d: + if (pgtable_l5_enabled()) + free_page((unsigned long)microcode_p4d_page); + +fail_pud: + free_page((unsigned long)microcode_pud_page); + pr_err("Microcode PUD initialized failed.\n"); + microcode_pgd_entry.pgd = 0; +exit: + return; +} + +/* + * Invoked from an early init call to save the microcode blob which was + * selected during early boot when mm was not usable. The microcode must be + * saved because initrd is going away. It's an early init call so the APs + * just can use the pointer and do not have to scan initrd/builtin firmware + * again. + */ +static __init int save_builtin_microcode(void) +{ + struct ucode_cpu_info uci; + + if (IS_ENABLED(CONFIG_X86_32)) + return 0; + + if (xchg(&zhaoxin_ucode_patch, NULL) != UCODE_BSP_LOADED) + return 0; + + if (boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN && + boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR) + return 0; + + if (microcode_loader_disabled()) + return 0; + + uci.mc = get_microcode_blob(&uci, true); + if (uci.mc) { + save_microcode_patch(uci.mc); + if (zhaoxin_ucode_patch) { + init_microcode_pages(); + return 0; + } + pr_err("%s: CPU %d save microcode patch failed due to page allocation failure\n", + __func__, smp_processor_id()); + } + return 0; +} +early_initcall(save_builtin_microcode); + +/* Load microcode on BSP from initrd */ +void __init load_ucode_zhaoxin_bsp(struct early_load_data *ed) +{ + struct ucode_cpu_info uci; + + if (IS_ENABLED(CONFIG_X86_32)) + return; + + uci.mc = get_microcode_blob(&uci, false); + ed->old_rev = uci.cpu_sig.rev; + + /* + * Due to hardware limitations, when loading the microcode, + * it is necessary to ensure that the physical address is + * equal to the virtual address. Therefore, an identity mapping + * is created for the physical address where the ucode data resides. + */ + early_top_pgt[0] = early_top_pgt[pgd_index(__PAGE_OFFSET)]; + __native_tlb_flush_global(this_cpu_read(cpu_tlbstate.cr4)); + + if (uci.mc && apply_microcode_early(&uci) == UCODE_UPDATED) { + zhaoxin_ucode_patch = UCODE_BSP_LOADED; + x86_cpuinit.parallel_bringup = false; + ed->new_rev = uci.cpu_sig.rev; + } else if (uci.mc) { + pr_debug("%s: BSP CPU %d early update failed due to application failure\n", + __func__, smp_processor_id()); + } +} + +/* Load microcode on AP cores */ +void load_ucode_zhaoxin_ap(void) +{ + struct ucode_cpu_info uci; + pgd_t *pgd_pgt; + + if (IS_ENABLED(CONFIG_X86_32)) + return; + + if (!zhaoxin_ucode_patch) + return; + + if (!microcode_pgd_entry.pgd) { + pr_err("%s: CPU %d early update failed due to page initialization failure\n", + __func__, smp_processor_id()); + return; + } + + uci.mc = zhaoxin_ucode_patch; + + /* + * Due to hardware limitations, the ucode loading + * must use identity mapping. + */ + pgd_pgt = __va(read_cr3_pa()); + pgd_pgt[0] = microcode_pgd_entry; + __flush_tlb_all(); + + if (apply_microcode_early(&uci) != UCODE_UPDATED) + pr_debug("%s: CPU %d early update failed due to microcode application failure\n", + __func__, smp_processor_id()); +} + +/* Reload microcode on resume */ +void reload_ucode_zhaoxin(void) +{ + struct ucode_cpu_info uci = { + .mc = zhaoxin_ucode_patch, + }; + pgd_t old_pgd, *pgd_pgt; + + if (!zhaoxin_ucode_patch) + return; + + if (!microcode_pgd_entry.pgd) { + pr_err("%s: BSP CPU %d reload update failed due to page initialization failure\n", + __func__, smp_processor_id()); + return; + } + + /* + * Due to hardware limitations, the ucode loading must + * use identity mapping. + */ + pgd_pgt = __va(read_cr3_pa()); + old_pgd = pgd_pgt[0]; + pgd_pgt[0] = microcode_pgd_entry; + __flush_tlb_all(); + + if (uci.mc) + if (apply_microcode_early(&uci) != UCODE_UPDATED) + pr_debug("%s: BSP CPU %d reload update failed due to application failure\n", + __func__, smp_processor_id()); + + /* + * Since the page table in use at this point might be that of + * a user-space process, it needs to be restored. + */ + pgd_pgt[0] = old_pgd; + __flush_tlb_all(); +} + +static enum ucode_state apply_microcode_late(int cpu) +{ + /* Zhaoxin CPUs currently do not support runtime microcode updates. */ + return UCODE_NFOUND; +} + +static enum ucode_state request_microcode_fw(int cpu, struct device *device) +{ + /* Zhaoxin CPUs currently do not support runtime microcode updates. */ + return UCODE_NFOUND; +} + +static struct microcode_ops microcode_zhaoxin_ops = { + .request_microcode_fw = request_microcode_fw, + .collect_cpu_info = zhaoxin_collect_cpu_info, + .apply_microcode = apply_microcode_late, +}; + +struct microcode_ops *__init init_zhaoxin_microcode(void) +{ + struct cpuinfo_x86 *c = &boot_cpu_data; + + if (IS_ENABLED(CONFIG_X86_32)) + return NULL; + + if ((cpuid_eax(0xC0000000) < 0xC0000004) || !(cpuid_edx(0xC0000004) & 0x1)) { + pr_info("Zhaoxin CPU family 0x%x model 0x%x not supported\n", c->x86, c->x86_model); + return NULL; + } + + return µcode_zhaoxin_ops; +} -- Gitee From 5897d25867c99a23e18f3e486877416ddfd25e0b Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 9 Mar 2026 14:32:55 +0800 Subject: [PATCH 123/231] efi/cper: Print correctable AER information mainline inclusion from v6.12-rc1 commit d7171eb494353e03f3cde1a6f665e19c243c98e8 upstream. category: feature ------------------- Currently, cper_print_pcie() only logs Uncorrectable Error Status, Mask and Severity registers along with the TLP header. If a correctable error is received immediately preceding or following an Uncorrectable Fatal Error, its information is lost since Correctable Error Status and Mask registers are not logged. As such, to avoid skipping any possible error information, Correctable Error Status and Mask registers should also be logged. Additionally, ensure that AER information is also available through cper_print_pcie() for Correctable and Uncorrectable Non-Fatal Errors. Signed-off-by: Yazen Ghannam Tested-by: Avadhut Naik Signed-off-by: Avadhut Naik Signed-off-by: Ard Biesheuvel Signed-off-by: leoliu-oc --- drivers/firmware/efi/cper.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index bcbcf4d37cce..536d638237db 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -489,12 +489,17 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie, "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n", pfx, pcie->bridge.secondary_status, pcie->bridge.control); - /* Fatal errors call __ghes_panic() before AER handler prints this */ - if ((pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) && - (gdata->error_severity & CPER_SEV_FATAL)) { + /* + * Print all valid AER info. Record may be from BERT (boot-time) or GHES (run-time). + * + * Fatal errors call __ghes_panic() before AER handler prints this. + */ + if (pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) { struct aer_capability_regs *aer; aer = (struct aer_capability_regs *)pcie->aer_info; + printk("%saer_cor_status: 0x%08x, aer_cor_mask: 0x%08x\n", + pfx, aer->cor_status, aer->cor_mask); printk("%saer_uncor_status: 0x%08x, aer_uncor_mask: 0x%08x\n", pfx, aer->uncor_status, aer->uncor_mask); printk("%saer_uncor_severity: 0x%08x\n", -- Gitee From de350b1f1c1e7902869f7d3cbd3206a86880c797 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 9 Mar 2026 14:32:56 +0800 Subject: [PATCH 124/231] x86/cpu: detect extended topology for Zhaoxin CPUs zhaoxin inclusion category: feature -------------------- Zhaoxin CPUs support extended topology enumeration CPUID leaf 0xb/0x1f. Zhaoxin's new product, the KH-50000, features a single socket with 96 cores and support 2/4 sockets interconnects. Additionally, the Zhaoxin KH-50000 requires the use of extended CPUID leaf 0xb/0x1f to retrieve cputopo information. Right now the kernel uses the legacy CPUID leaf 0x1/0x4 for topology detection for Zhaoxin CPUs. So add extended topology detection support for Zhaoxin CPUs. Signed-off-by: Tony W Wang-oc Signed-off-by: leoliu-oc --- arch/x86/kernel/cpu/topology_common.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/topology_common.c b/arch/x86/kernel/cpu/topology_common.c index 9a6069e7133c..0da37096cde5 100644 --- a/arch/x86/kernel/cpu/topology_common.c +++ b/arch/x86/kernel/cpu/topology_common.c @@ -126,8 +126,12 @@ static void parse_topology(struct topo_scan *tscan, bool early) cpu_parse_topology_amd(tscan); break; case X86_VENDOR_CENTAUR: + if (!IS_ENABLED(CONFIG_CPU_SUP_CENTAUR) || !cpu_parse_topology_ext(tscan)) + parse_legacy(tscan); + break; case X86_VENDOR_ZHAOXIN: - parse_legacy(tscan); + if (!IS_ENABLED(CONFIG_CPU_SUP_ZHAOXIN) || !cpu_parse_topology_ext(tscan)) + parse_legacy(tscan); break; case X86_VENDOR_INTEL: if (!IS_ENABLED(CONFIG_CPU_SUP_INTEL) || !cpu_parse_topology_ext(tscan)) -- Gitee From 3a8c5782232ac90d89794ddbaba05c5e08902b00 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 9 Mar 2026 14:32:57 +0800 Subject: [PATCH 125/231] apei/ghes: Add Zhaoxin platform support for ghes_edac zhaoxin inclusion category: feature -------------------- Add ghes_edac driver platform support for Zhaoxin-based systems by extending the platform detection list in ghes.c. Signed-off-by: Tony W.Wang Signed-off-by: Lyle Li Signed-off-by: leoliu-oc --- drivers/acpi/apei/ghes.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 465e8dff71d6..669a5012fc96 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -1625,6 +1625,8 @@ void __init acpi_ghes_init(void) */ static struct acpi_platform_list plat_list[] = { {"HPE ", "Server ", 0, ACPI_SIG_FADT, all_versions}, + {"__ZX__", "EDK2 ", 3, ACPI_SIG_FADT, greater_than_or_equal}, + {"_BYO_ ", "BYOSOFT ", 3, ACPI_SIG_FADT, greater_than_or_equal}, { } /* End */ }; -- Gitee From 6da6dba3730fbd278812759cd46b4789454f1d83 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 9 Mar 2026 14:33:03 +0800 Subject: [PATCH 126/231] hwmon: zhaoxin-cputemp: Update for KX-8000 zhaoxin inclusion category: feature -------------------- This patch extends temperature monitoring support to include the new Zhaoxin KX-8000 FMS CPU family by: 1. Adding model 0x8b to the MSR register mapping condition, so it uses the same temperature critical and maximum MSR addresses (0x175b and 0x175a) as the existing 0x6b and 0x7b models. 2. Registering both CENTAUR and ZHAOXIN vendor variants of the 0x8b model in the CPU ID matching table to enable driver probe on these systems. Signed-off-by: leoliu-oc --- drivers/hwmon/zhaoxin-cputemp.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/hwmon/zhaoxin-cputemp.c b/drivers/hwmon/zhaoxin-cputemp.c index 3c4eeda7b40f..6351a5a59600 100644 --- a/drivers/hwmon/zhaoxin-cputemp.c +++ b/drivers/hwmon/zhaoxin-cputemp.c @@ -122,7 +122,7 @@ static int zhaoxin_cputemp_probe(struct platform_device *pdev) data->id = pdev->id; data->name = "zhaoxin_cputemp"; data->msr_temp = 0x1423; - if (c->x86_model == 0x6b || c->x86_model == 0x7b) { + if (c->x86_model == 0x6b || c->x86_model == 0x7b || c->x86_model == 0x8b) { data->msr_crit = 0x175b; data->msr_max = 0x175a; } else { @@ -251,6 +251,8 @@ static const struct x86_cpu_id cputemp_ids[] __initconst = { X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, 0x6b, NULL), X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, 0x7b, NULL), X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, 0x7b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, 0x8b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, 0x8b, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, cputemp_ids); -- Gitee From 93f9a9a7c8cea6abd85ada92ea537c9b2b2bcf00 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 9 Mar 2026 14:33:03 +0800 Subject: [PATCH 127/231] cpufreq: Add CPU frequency policy change notification support zhaoxin inclusion category: feature -------------------- Implemented CPUFREQ_CHANGE_POLICY notification in cpufreq subsystem. This enhancement allows kernel modules to be notified when a CPU frequency policy is modified, facilitating more integrated system management. The notification is dispatched after a successful governor switch during policy updates. Signed-off-by: Tony W Wang-oc Signed-off-by: leoliu-oc --- drivers/cpufreq/cpufreq.c | 2 ++ include/linux/cpufreq.h | 1 + 2 files changed, 3 insertions(+) diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 3151929c6239..91297ea3e526 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -2701,6 +2701,8 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, ret = cpufreq_start_governor(policy); if (!ret) { pr_debug("governor change\n"); + blocking_notifier_call_chain(&cpufreq_policy_notifier_list, + CPUFREQ_CHANGE_POLICY, policy); sched_cpufreq_governor_change(policy, old_gov); return 0; } diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 1f94fe8559a9..d170b3139c13 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -508,6 +508,7 @@ static inline void cpufreq_resume(void) {} /* Policy Notifiers */ #define CPUFREQ_CREATE_POLICY (0) #define CPUFREQ_REMOVE_POLICY (1) +#define CPUFREQ_CHANGE_POLICY (2) #ifdef CONFIG_CPU_FREQ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); -- Gitee From 41eb60aac686b2f24cd96a1f81dc5c7a093e726f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9F=A9=E6=88=90=E9=A3=9E10333708?= Date: Tue, 10 Mar 2026 11:49:46 +0800 Subject: [PATCH 128/231] net: Add driver support for ZTE Dinghai E310/E312/E316 series NICs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit adds support for ZTE Dinghai E310/E312/E316 series standard network cards. These cards support SR-IOV virtualization technology and network function offloading including checksum offloading, VLAN offloading, and RDMA functionality. The driver includes both network and RDMA components: Network driver: driver/net/ethernet/dinghai RDMA driver: driver/infiniband/hw/zrdma Signed-off-by: 韩成飞10333708 --- drivers/infiniband/Kconfig | 1 + drivers/infiniband/hw/Makefile | 1 + drivers/infiniband/hw/zrdma/Kconfig | 11 + drivers/infiniband/hw/zrdma/Makefile | 49 + drivers/infiniband/hw/zrdma/cm.c | 348 + drivers/infiniband/hw/zrdma/cm.h | 339 + drivers/infiniband/hw/zrdma/configfs.c | 1488 + drivers/infiniband/hw/zrdma/ctrl.c | 5336 + drivers/infiniband/hw/zrdma/dbgfs.c | 1407 + drivers/infiniband/hw/zrdma/dbgfs.h | 230 + drivers/infiniband/hw/zrdma/defs.h | 2882 + drivers/infiniband/hw/zrdma/hmc.c | 1007 + drivers/infiniband/hw/zrdma/hmc.h | 296 + drivers/infiniband/hw/zrdma/hw.c | 2798 + drivers/infiniband/hw/zrdma/icrdma_hw.c | 401 + drivers/infiniband/hw/zrdma/icrdma_hw.h | 865 + drivers/infiniband/hw/zrdma/iidc.h | 309 + drivers/infiniband/hw/zrdma/linux_kcompat.h | 81 + drivers/infiniband/hw/zrdma/main.c | 1797 + drivers/infiniband/hw/zrdma/main.h | 865 + drivers/infiniband/hw/zrdma/manager.c | 1564 + drivers/infiniband/hw/zrdma/manager.h | 481 + drivers/infiniband/hw/zrdma/osdep.h | 111 + drivers/infiniband/hw/zrdma/pble.c | 466 + drivers/infiniband/hw/zrdma/pble.h | 135 + .../infiniband/hw/zrdma/private_verbs_cmd.c | 5571 + .../infiniband/hw/zrdma/private_verbs_cmd.h | 354 + drivers/infiniband/hw/zrdma/protos.h | 187 + drivers/infiniband/hw/zrdma/puda.c | 207 + drivers/infiniband/hw/zrdma/puda.h | 156 + drivers/infiniband/hw/zrdma/restrack.c | 158 + drivers/infiniband/hw/zrdma/restrack.h | 12 + drivers/infiniband/hw/zrdma/slib.c | 181 + drivers/infiniband/hw/zrdma/slib.h | 35 + .../hw/zrdma/smmu/kernel/adk_mmu600.c | 313 + .../hw/zrdma/smmu/kernel/adk_mmu600.h | 145 + .../infiniband/hw/zrdma/smmu/kernel/cmdk.h | 54 + .../hw/zrdma/smmu/kernel/cmdk_mmu600.c | 96 + .../hw/zrdma/smmu/kernel/cmdk_mmu600.h | 54 + .../hw/zrdma/smmu/kernel/cmdk_mmu600_inner.h | 38 + .../hw/zrdma/smmu/kernel/cmdk_pagetable.c | 1207 + .../hw/zrdma/smmu/kernel/common_define.h | 53 + .../hw/zrdma/smmu/kernel/hal_smmu.h | 149 + .../hw/zrdma/smmu/kernel/ioctl_mmu600.c | 0 .../hw/zrdma/smmu/kernel/ioctl_mmu600.h | 0 .../hw/zrdma/smmu/kernel/pub_print.h | 90 + .../hw/zrdma/smmu/kernel/pub_return.h | 44 + drivers/infiniband/hw/zrdma/srq.c | 1116 + drivers/infiniband/hw/zrdma/srq.h | 304 + drivers/infiniband/hw/zrdma/status.h | 6 + drivers/infiniband/hw/zrdma/tc_hmcdma.c | 413 + drivers/infiniband/hw/zrdma/tc_hmcdma.h | 17 + drivers/infiniband/hw/zrdma/trace.c | 112 + drivers/infiniband/hw/zrdma/trace.h | 4 + drivers/infiniband/hw/zrdma/trace_cm.h | 65 + drivers/infiniband/hw/zrdma/type.h | 1816 + drivers/infiniband/hw/zrdma/uda.c | 371 + drivers/infiniband/hw/zrdma/uda.h | 87 + drivers/infiniband/hw/zrdma/uda_d.h | 230 + drivers/infiniband/hw/zrdma/uk.c | 1947 + drivers/infiniband/hw/zrdma/user.h | 519 + drivers/infiniband/hw/zrdma/utils.c | 2896 + drivers/infiniband/hw/zrdma/verbs.c | 4069 + drivers/infiniband/hw/zrdma/verbs.h | 382 + drivers/infiniband/hw/zrdma/vf.c | 52 + drivers/infiniband/hw/zrdma/vf.h | 20 + drivers/infiniband/hw/zrdma/virtchnl.c | 81 + drivers/infiniband/hw/zrdma/virtchnl.h | 84 + drivers/infiniband/hw/zrdma/ws.h | 29 + drivers/infiniband/hw/zrdma/zrdma-abi.h | 147 + drivers/infiniband/hw/zrdma/zrdma.h | 208 + drivers/infiniband/hw/zrdma/zrdma_kcompat.c | 2714 + drivers/infiniband/hw/zrdma/zrdma_kcompat.h | 479 + .../infiniband/hw/zrdma/zxdh_auxiliary_bus.h | 87 + .../hw/zrdma/zxdh_user_ioctl_cmds.h | 142 + .../hw/zrdma/zxdh_user_ioctl_verbs.h | 212 + drivers/net/ethernet/Kconfig | 1 + drivers/net/ethernet/Makefile | 1 + drivers/net/ethernet/dinghai/Kconfig | 97 + drivers/net/ethernet/dinghai/Makefile | 49 + .../net/ethernet/dinghai/bonding/rdma_ops.c | 58 + .../net/ethernet/dinghai/bonding/rdma_ops.h | 21 + .../net/ethernet/dinghai/bonding/zxdh_lag.c | 1989 + .../net/ethernet/dinghai/bonding/zxdh_lag.h | 103 + drivers/net/ethernet/dinghai/cmd.c | 39 + .../net/ethernet/dinghai/cmd/msg_chan_lock.c | 314 + .../net/ethernet/dinghai/cmd/msg_chan_lock.h | 67 + .../net/ethernet/dinghai/cmd/msg_chan_priv.h | 311 + .../net/ethernet/dinghai/cmd/msg_chan_test.c | 151 + .../net/ethernet/dinghai/cmd/msg_chan_ver.h | 16 + drivers/net/ethernet/dinghai/cmd/msg_main.c | 31 + drivers/net/ethernet/dinghai/devlink.c | 40 + drivers/net/ethernet/dinghai/dh_cmd.c | 1358 + drivers/net/ethernet/dinghai/dh_procfs.c | 76 + drivers/net/ethernet/dinghai/dh_procfs.h | 26 + drivers/net/ethernet/dinghai/en_aux.c | 6705 ++ drivers/net/ethernet/dinghai/en_aux.h | 806 + .../ethernet/dinghai/en_aux/dcbnl/en_dcbnl.c | 854 + .../ethernet/dinghai/en_aux/dcbnl/en_dcbnl.h | 256 + .../dinghai/en_aux/dcbnl/en_dcbnl_api.c | 1486 + .../dinghai/en_aux/dcbnl/en_dcbnl_api.h | 45 + .../net/ethernet/dinghai/en_aux/drs_sec_dtb.c | 1092 + .../net/ethernet/dinghai/en_aux/drs_sec_dtb.h | 534 + .../dinghai/en_aux/en_1588_pkt_proc.c | 617 + .../dinghai/en_aux/en_1588_pkt_proc.h | 41 + .../dinghai/en_aux/en_1588_pkt_proc_func.c | 663 + .../dinghai/en_aux/en_1588_pkt_proc_func.h | 198 + .../net/ethernet/dinghai/en_aux/en_aux_cmd.c | 3176 + .../net/ethernet/dinghai/en_aux/en_aux_cmd.h | 276 + .../net/ethernet/dinghai/en_aux/en_aux_eq.c | 360 + .../net/ethernet/dinghai/en_aux/en_aux_eq.h | 24 + .../ethernet/dinghai/en_aux/en_aux_events.c | 1511 + .../ethernet/dinghai/en_aux/en_aux_events.h | 50 + .../ethernet/dinghai/en_aux/en_aux_ioctl.c | 1904 + .../ethernet/dinghai/en_aux/en_aux_ioctl.h | 102 + .../net/ethernet/dinghai/en_aux/priv_queue.c | 788 + .../net/ethernet/dinghai/en_aux/priv_queue.h | 192 + drivers/net/ethernet/dinghai/en_aux/queue.c | 3832 + drivers/net/ethernet/dinghai/en_aux/queue.h | 916 + drivers/net/ethernet/dinghai/en_aux/selq.c | 376 + drivers/net/ethernet/dinghai/en_auxiliary.c | 329 + .../net/ethernet/dinghai/en_ethtool/ethtool.c | 3991 + .../net/ethernet/dinghai/en_ethtool/ethtool.h | 159 + drivers/net/ethernet/dinghai/en_mpf.c | 297 + drivers/net/ethernet/dinghai/en_mpf.h | 32 + .../ethernet/dinghai/en_mpf/en_mpf_cfg_sf.c | 60 + .../ethernet/dinghai/en_mpf/en_mpf_cfg_sf.h | 27 + .../ethernet/dinghai/en_mpf/en_mpf_devlink.c | 126 + .../ethernet/dinghai/en_mpf/en_mpf_devlink.h | 14 + .../net/ethernet/dinghai/en_mpf/en_mpf_eq.c | 239 + .../net/ethernet/dinghai/en_mpf/en_mpf_eq.h | 19 + .../ethernet/dinghai/en_mpf/en_mpf_events.c | 144 + .../ethernet/dinghai/en_mpf/en_mpf_events.h | 18 + .../net/ethernet/dinghai/en_mpf/en_mpf_irq.c | 165 + .../net/ethernet/dinghai/en_mpf/en_mpf_irq.h | 20 + drivers/net/ethernet/dinghai/en_mpf/tc.c | 0 .../net/ethernet/dinghai/en_np/.clang-format | 176 + drivers/net/ethernet/dinghai/en_np/Makefile | 24 + .../ethernet/dinghai/en_np/Makefile.include | 60 + .../dinghai/en_np/agent/Kbuild.include | 4 + .../en_np/agent/include/dpp_agent_channel.h | 464 + .../en_np/agent/include/dpp_agent_se_res.h | 250 + .../dinghai/en_np/agent/source/Kbuild.include | 2 + .../en_np/agent/source/dpp_agent_channel.c | 1361 + .../ethernet/dinghai/en_np/cmd/Kbuild.include | 4 + .../dinghai/en_np/cmd/include/dpp_cmd_init.h | 16 + .../dinghai/en_np/cmd/include/dpp_cmd_shell.h | 395 + .../dinghai/en_np/cmd/source/Kbuild.include | 2 + .../dinghai/en_np/cmd/source/dpp_cmd_init.c | 36 + .../dinghai/en_np/cmd/source/dpp_cmd_shell.c | 306 + .../dinghai/en_np/comm/Kbuild.include | 4 + .../en_np/comm/include/zxic_comm_avl_tree.h | 89 + .../comm/include/zxic_comm_double_link.h | 108 + .../comm/include/zxic_comm_doublelink_index.h | 147 + .../en_np/comm/include/zxic_comm_index_ctrl.h | 83 + .../en_np/comm/include/zxic_comm_index_fill.h | 65 + .../comm/include/zxic_comm_index_fill_type.h | 90 + .../comm/include/zxic_comm_index_reserve.h | 79 + .../en_np/comm/include/zxic_comm_liststack.h | 88 + .../en_np/comm/include/zxic_comm_rb_tree.h | 117 + .../en_np/comm/include/zxic_comm_socket.h | 145 + .../en_np/comm/include/zxic_comm_thread.h | 89 + .../dinghai/en_np/comm/include/zxic_common.h | 3548 + .../dinghai/en_np/comm/include/zxic_private.h | 114 + .../en_np/comm/include/zxic_private_top.h | 198 + .../dinghai/en_np/comm/include/zxic_slt.h | 781 + .../dinghai/en_np/comm/source/Kbuild.include | 2 + .../en_np/comm/source/zxic_comm_double_link.c | 544 + .../en_np/comm/source/zxic_comm_index_fill.c | 442 + .../en_np/comm/source/zxic_comm_liststack.c | 394 + .../en_np/comm/source/zxic_comm_mutex_lock.c | 180 + .../en_np/comm/source/zxic_comm_print.c | 554 + .../en_np/comm/source/zxic_comm_rb_tree.c | 1009 + .../dinghai/en_np/comm/source/zxic_common.c | 1052 + .../en_np/comm/source/zxic_private_top.c | 146 + .../dinghai/en_np/driver/Kbuild.include | 4 + .../en_np/driver/include/dpp_drv_acl.h | 119 + .../en_np/driver/include/dpp_drv_eram.h | 323 + .../en_np/driver/include/dpp_drv_hash.h | 110 + .../en_np/driver/include/dpp_drv_init.h | 32 + .../en_np/driver/include/dpp_drv_sdt.h | 73 + .../en_np/driver/source/Kbuild.include | 2 + .../dinghai/en_np/driver/source/dpp_drv_acl.c | 477 + .../en_np/driver/source/dpp_drv_eram.c | 825 + .../en_np/driver/source/dpp_drv_hash.c | 430 + .../en_np/driver/source/dpp_drv_init.c | 276 + .../ethernet/dinghai/en_np/fc/Kbuild.include | 4 + .../dinghai/en_np/fc/include/dpp_drv_fc.h | 85 + .../dinghai/en_np/fc/source/Kbuild.include | 2 + .../dinghai/en_np/fc/source/dpp_drv_fc.c | 187 + .../dinghai/en_np/flow/Kbuild.include | 4 + .../ethernet/dinghai/en_np/flow/README.txt | 9 + .../dinghai/en_np/flow/api/Kbuild.include | 4 + .../en_np/flow/api/include/dpp_tbl_fd_cfg.h | 57 + .../en_np/flow/api/source/Kbuild.include | 2 + .../en_np/flow/api/source/dpp_fd_cfg_api.c | 133 + .../en_np/flow/api/source/dpp_flow_struct.c | 57 + .../dinghai/en_np/flow/common/Kbuild.include | 4 + .../en_np/flow/common/include/dpp_flow_comm.h | 73 + .../en_np/flow/common/source/Kbuild.include | 2 + .../en_np/flow/common/source/dpp_flow_comm.c | 821 + .../net/ethernet/dinghai/en_np/flow/tool.py | 577 + .../dinghai/en_np/flow/zxdh_flow_attr_api.xml | 44 + .../en_np/flow/zxdh_flow_attr_demo.xml | 86 + .../dinghai/en_np/init/Kbuild.include | 4 + .../dinghai/en_np/init/include/dpp_np_init.h | 13 + .../dinghai/en_np/init/source/Kbuild.include | 2 + .../dinghai/en_np/init/source/dpp_np_init.c | 235 + .../dinghai/en_np/netlink/Kbuild.include | 4 + .../en_np/netlink/include/dpp_netlink.h | 30 + .../en_np/netlink/source/Kbuild.include | 2 + .../en_np/netlink/source/dpp_netlink.c | 198 + .../ethernet/dinghai/en_np/qos/Kbuild.include | 4 + .../dinghai/en_np/qos/include/dpp_drv_qos.h | 520 + .../dinghai/en_np/qos/source/Kbuild.include | 2 + .../dinghai/en_np/qos/source/dpp_drv_qos.c | 1119 + .../ethernet/dinghai/en_np/sdk/Kbuild.include | 4 + .../en_np/sdk/include/api/dpp_apt_se_api.h | 620 + .../en_np/sdk/include/api/dpp_dtb_table_api.h | 339 + .../en_np/sdk/include/api/dpp_pbu_api.h | 130 + .../en_np/sdk/include/api/dpp_pktrx_api.h | 86 + .../en_np/sdk/include/api/dpp_ppu_api.h | 602 + .../en_np/sdk/include/api/dpp_reg_api.h | 191 + .../en_np/sdk/include/api/dpp_se_api.h | 1466 + .../en_np/sdk/include/api/dpp_stat_api.h | 513 + .../en_np/sdk/include/api/dpp_tm_api.h | 1134 + .../en_np/sdk/include/api/dpp_type_api.h | 424 + .../en_np/sdk/include/dev/chip/dpp_dev.h | 262 + .../en_np/sdk/include/dev/chip/dpp_init.h | 79 + .../sdk/include/dev/init/dpp_kernel_init.h | 19 + .../sdk/include/dev/module/dma/dpp_dtb.h | 732 + .../sdk/include/dev/module/dma/dpp_dtb_cfg.h | 414 + .../sdk/include/dev/module/nppu/dpp_pbu.h | 444 + .../include/dev/module/nppu/dpp_pktrx_cfg.h | 35 + .../sdk/include/dev/module/ppu/dpp_ppu.h | 109 + .../sdk/include/dev/module/se/dpp_apt_se.h | 144 + .../sdk/include/dev/module/se/dpp_etcam.h | 160 + .../en_np/sdk/include/dev/module/se/dpp_se.h | 777 + .../sdk/include/dev/module/se/dpp_stat_car.h | 999 + .../sdk/include/dev/module/se/dpp_stat_cfg.h | 187 + .../include/dev/module/table/sdt/dpp_sdt.h | 167 + .../dev/module/table/sdt/dpp_sdt_def.h | 129 + .../dev/module/table/sdt/dpp_sdt_mgr.h | 142 + .../sdk/include/dev/module/table/se/dpp_acl.h | 112 + .../dev/module/table/se/dpp_dtb_table.h | 843 + .../include/dev/module/table/se/dpp_hash.h | 428 + .../dev/module/table/se/dpp_hash_crc.h | 17 + .../include/dev/module/table/se/dpp_se_cfg.h | 280 + .../en_np/sdk/include/dev/module/tm/dpp_tm.h | 4793 + .../en_np/sdk/include/dev/reg/dpp_axi_reg.h | 42 + .../en_np/sdk/include/dev/reg/dpp_cfg_reg.h | 1000 + .../en_np/sdk/include/dev/reg/dpp_dtb4k_reg.h | 40 + .../en_np/sdk/include/dev/reg/dpp_dtb_reg.h | 484 + .../en_np/sdk/include/dev/reg/dpp_etm_reg.h | 4056 + .../en_np/sdk/include/dev/reg/dpp_mem_info.h | 60 + .../en_np/sdk/include/dev/reg/dpp_module.h | 212 + .../en_np/sdk/include/dev/reg/dpp_nppu_reg.h | 3281 + .../en_np/sdk/include/dev/reg/dpp_pci.h | 39 + .../en_np/sdk/include/dev/reg/dpp_ppu4k_reg.h | 37 + .../en_np/sdk/include/dev/reg/dpp_ppu_reg.h | 3653 + .../en_np/sdk/include/dev/reg/dpp_ptptm_reg.h | 729 + .../en_np/sdk/include/dev/reg/dpp_reg.h | 53 + .../en_np/sdk/include/dev/reg/dpp_reg_info.h | 4456 + .../sdk/include/dev/reg/dpp_reg_struct.h | 81 + .../en_np/sdk/include/dev/reg/dpp_se4k_reg.h | 279 + .../en_np/sdk/include/dev/reg/dpp_se_reg.h | 2778 + .../en_np/sdk/include/dev/reg/dpp_smmu0_reg.h | 6035 + .../sdk/include/dev/reg/dpp_smmu14k_reg.h | 204 + .../en_np/sdk/include/dev/reg/dpp_smmu1_reg.h | 1076 + .../sdk/include/dev/reg/dpp_stat4k_reg.h | 37 + .../en_np/sdk/include/dev/reg/dpp_stat_reg.h | 2064 + .../en_np/sdk/include/dev/reg/dpp_trpg_reg.h | 276 + .../en_np/sdk/include/dev/reg/dpp_tsn_reg.h | 101 + .../en_np/sdk/include/diag/dpp_se_diag.h | 36 + .../dinghai/en_np/sdk/source/Kbuild.include | 4 + .../en_np/sdk/source/dev/Kbuild.include | 4 + .../en_np/sdk/source/dev/chip/Kbuild.include | 2 + .../en_np/sdk/source/dev/chip/dpp_dev.c | 1463 + .../en_np/sdk/source/dev/chip/dpp_init.c | 65 + .../en_np/sdk/source/dev/init/Kbuild.include | 2 + .../sdk/source/dev/init/dpp_kernel_init.c | 252 + .../sdk/source/dev/module/Kbuild.include | 4 + .../sdk/source/dev/module/dma/Kbuild.include | 2 + .../en_np/sdk/source/dev/module/dma/dpp_dtb.c | 2453 + .../sdk/source/dev/module/dma/dpp_dtb_cfg.c | 1254 + .../sdk/source/dev/module/nppu/Kbuild.include | 2 + .../sdk/source/dev/module/nppu/dpp_pbu.c | 292 + .../source/dev/module/nppu/dpp_pktrx_cfg.c | 253 + .../sdk/source/dev/module/ppu/Kbuild.include | 2 + .../en_np/sdk/source/dev/module/ppu/dpp_ppu.c | 344 + .../sdk/source/dev/module/se/Kbuild.include | 2 + .../sdk/source/dev/module/se/dpp_etcam.c | 784 + .../en_np/sdk/source/dev/module/se/dpp_se.c | 1166 + .../sdk/source/dev/module/se/dpp_stat_car.c | 4544 + .../sdk/source/dev/module/se/dpp_stat_cfg.c | 288 + .../source/dev/module/se_apt/Kbuild.include | 2 + .../source/dev/module/se_apt/dpp_apt_se_acl.c | 426 + .../dev/module/se_apt/dpp_apt_se_comm.c | 391 + .../source/dev/module/se_apt/dpp_apt_se_ddr.c | 59 + .../dev/module/se_apt/dpp_apt_se_eram.c | 214 + .../dev/module/se_apt/dpp_apt_se_hash.c | 594 + .../source/dev/module/se_apt/dpp_apt_se_res.c | 1323 + .../source/dev/module/table/Kbuild.include | 4 + .../dev/module/table/sdt/Kbuild.include | 2 + .../sdk/source/dev/module/table/sdt/dpp_sdt.c | 611 + .../source/dev/module/table/sdt/dpp_sdt_mgr.c | 328 + .../source/dev/module/table/se/Kbuild.include | 2 + .../sdk/source/dev/module/table/se/dpp_acl.c | 703 + .../dev/module/table/se/dpp_dtb_table.c | 8359 ++ .../dev/module/table/se/dpp_dtb_table_api.c | 1368 + .../sdk/source/dev/module/table/se/dpp_hash.c | 3716 + .../source/dev/module/table/se/dpp_hash_crc.c | 376 + .../source/dev/module/table/se/dpp_se_cfg.c | 284 + .../sdk/source/dev/module/tm/Kbuild.include | 2 + .../en_np/sdk/source/dev/module/tm/dpp_tm.c | 20004 ++++ .../en_np/sdk/source/dev/reg/Kbuild.include | 2 + .../en_np/sdk/source/dev/reg/dpp_module.c | 311 + .../en_np/sdk/source/dev/reg/dpp_pci.c | 90 + .../en_np/sdk/source/dev/reg/dpp_reg_api.c | 571 + .../en_np/sdk/source/dev/reg/dpp_reg_info.c | 98226 ++++++++++++++++ .../dinghai/en_np/table/Kbuild.include | 4 + .../dinghai/en_np/table/include/dpp_tbl_api.h | 565 + .../dinghai/en_np/table/include/dpp_tbl_bc.h | 37 + .../dinghai/en_np/table/include/dpp_tbl_cfg.h | 26 + .../en_np/table/include/dpp_tbl_comm.h | 95 + .../en_np/table/include/dpp_tbl_diag.h | 518 + .../dinghai/en_np/table/include/dpp_tbl_fd.h | 26 + .../en_np/table/include/dpp_tbl_ipsec.h | 26 + .../dinghai/en_np/table/include/dpp_tbl_lag.h | 29 + .../dinghai/en_np/table/include/dpp_tbl_mac.h | 26 + .../dinghai/en_np/table/include/dpp_tbl_mc.h | 41 + .../en_np/table/include/dpp_tbl_pkt_cap.h | 251 + .../en_np/table/include/dpp_tbl_plcr.h | 28 + .../en_np/table/include/dpp_tbl_port.h | 26 + .../en_np/table/include/dpp_tbl_promisc.h | 38 + .../dinghai/en_np/table/include/dpp_tbl_ptp.h | 26 + .../dinghai/en_np/table/include/dpp_tbl_qid.h | 28 + .../en_np/table/include/dpp_tbl_rdma.h | 33 + .../en_np/table/include/dpp_tbl_stat.h | 193 + .../dinghai/en_np/table/include/dpp_tbl_tm.h | 60 + .../en_np/table/include/dpp_tbl_uplink.h | 26 + .../en_np/table/include/dpp_tbl_vlan.h | 29 + .../dinghai/en_np/table/include/dpp_tbl_vqm.h | 26 + .../dinghai/en_np/table/source/Kbuild.include | 2 + .../dinghai/en_np/table/source/dpp_tbl_bc.c | 227 + .../dinghai/en_np/table/source/dpp_tbl_cfg.c | 256 + .../dinghai/en_np/table/source/dpp_tbl_comm.c | 429 + .../dinghai/en_np/table/source/dpp_tbl_diag.c | 4539 + .../dinghai/en_np/table/source/dpp_tbl_fd.c | 322 + .../en_np/table/source/dpp_tbl_ipsec.c | 125 + .../dinghai/en_np/table/source/dpp_tbl_lag.c | 371 + .../dinghai/en_np/table/source/dpp_tbl_mac.c | 809 + .../dinghai/en_np/table/source/dpp_tbl_mc.c | 1267 + .../en_np/table/source/dpp_tbl_pkt_cap.c | 1315 + .../dinghai/en_np/table/source/dpp_tbl_plcr.c | 147 + .../dinghai/en_np/table/source/dpp_tbl_port.c | 428 + .../en_np/table/source/dpp_tbl_promisc.c | 193 + .../dinghai/en_np/table/source/dpp_tbl_ptp.c | 53 + .../dinghai/en_np/table/source/dpp_tbl_qid.c | 235 + .../dinghai/en_np/table/source/dpp_tbl_rdma.c | 173 + .../dinghai/en_np/table/source/dpp_tbl_stat.c | 1087 + .../en_np/table/source/dpp_tbl_stream.c | 125 + .../dinghai/en_np/table/source/dpp_tbl_tm.c | 208 + .../en_np/table/source/dpp_tbl_uplink.c | 137 + .../dinghai/en_np/table/source/dpp_tbl_vhca.c | 98 + .../dinghai/en_np/table/source/dpp_tbl_vlan.c | 188 + .../dinghai/en_np/table/source/dpp_tbl_vqm.c | 207 + .../ethernet/dinghai/en_np/tools/dpp_tools.py | 136 + .../ethernet/dinghai/en_np/tools/readme.txt | 2 + drivers/net/ethernet/dinghai/en_pf.c | 4339 + drivers/net/ethernet/dinghai/en_pf.h | 362 + .../ethernet/dinghai/en_pf/en_pf_devlink.c | 126 + .../ethernet/dinghai/en_pf/en_pf_devlink.h | 14 + drivers/net/ethernet/dinghai/en_pf/en_pf_eq.c | 606 + drivers/net/ethernet/dinghai/en_pf/en_pf_eq.h | 65 + .../net/ethernet/dinghai/en_pf/en_pf_events.c | 549 + .../net/ethernet/dinghai/en_pf/en_pf_events.h | 17 + .../net/ethernet/dinghai/en_pf/en_pf_irq.c | 190 + .../net/ethernet/dinghai/en_pf/en_pf_irq.h | 27 + drivers/net/ethernet/dinghai/en_pf/msg_func.c | 3207 + drivers/net/ethernet/dinghai/en_pf/msg_func.h | 33 + .../net/ethernet/dinghai/en_ptp/tod_driver.c | 525 + .../net/ethernet/dinghai/en_ptp/tod_driver.h | 23 + .../ethernet/dinghai/en_ptp/tod_driver_stub.c | 62 + .../net/ethernet/dinghai/en_ptp/zxdh_ptp.c | 1862 + .../net/ethernet/dinghai/en_ptp/zxdh_ptp.h | 148 + .../ethernet/dinghai/en_ptp/zxdh_ptp_common.h | 11 + .../ethernet/dinghai/en_ptp/zxdh_ptp_regs.h | 473 + drivers/net/ethernet/dinghai/en_sf.c | 1158 + drivers/net/ethernet/dinghai/en_sf.h | 108 + .../ethernet/dinghai/en_sf/en_sf_devlink.c | 126 + .../ethernet/dinghai/en_sf/en_sf_devlink.h | 14 + drivers/net/ethernet/dinghai/en_sf/en_sf_eq.c | 414 + drivers/net/ethernet/dinghai/en_sf/en_sf_eq.h | 77 + .../net/ethernet/dinghai/en_sf/en_sf_irq.c | 4 + .../net/ethernet/dinghai/en_sf/en_sf_irq.h | 24 + .../net/ethernet/dinghai/en_tsn/zxdh_tsn.c | 73 + .../net/ethernet/dinghai/en_tsn/zxdh_tsn.h | 104 + .../ethernet/dinghai/en_tsn/zxdh_tsn_comm.h | 223 + .../ethernet/dinghai/en_tsn/zxdh_tsn_ioctl.c | 837 + .../ethernet/dinghai/en_tsn/zxdh_tsn_ioctl.h | 49 + .../ethernet/dinghai/en_tsn/zxdh_tsn_reg.c | 472 + .../ethernet/dinghai/en_tsn/zxdh_tsn_reg.h | 113 + drivers/net/ethernet/dinghai/en_vf.c | 0 drivers/net/ethernet/dinghai/eq.c | 120 + drivers/net/ethernet/dinghai/events.c | 12 + drivers/net/ethernet/dinghai/health.c | 1226 + drivers/net/ethernet/dinghai/irq_affinity.c | 319 + drivers/net/ethernet/dinghai/lag/lag.c | 869 + drivers/net/ethernet/dinghai/lag/lag.h | 114 + drivers/net/ethernet/dinghai/lag/lag_procfs.c | 58 + drivers/net/ethernet/dinghai/log.c | 4 + drivers/net/ethernet/dinghai/msg_common.h | 1147 + drivers/net/ethernet/dinghai/pci_irq.c | 331 + drivers/net/ethernet/dinghai/plcr.c | 2849 + drivers/net/ethernet/dinghai/plcr.h | 413 + drivers/net/ethernet/dinghai/slib.c | 168 + drivers/net/ethernet/dinghai/slib.h | 25 + drivers/net/ethernet/dinghai/sriov_sysfs.c | 1839 + drivers/net/ethernet/dinghai/xarray.c | 1933 + .../zf_mpf/bar_chan_user/normal_send_eg.c | 113 + .../zf_mpf/bar_chan_user/pci_res_query_eg.c | 201 + .../net/ethernet/dinghai/zf_mpf/epc/Makefile | 11 + .../ethernet/dinghai/zf_mpf/epc/dmaengine.c | 1632 + .../ethernet/dinghai/zf_mpf/epc/dmaengine.h | 205 + .../dinghai/zf_mpf/epc/pcie-zte-zf-epc.c | 1854 + .../dinghai/zf_mpf/epc/pcie-zte-zf-epc.h | 267 + .../dinghai/zf_mpf/epc/pcie-zte-zf-hdma.c | 752 + .../dinghai/zf_mpf/epc/pcie-zte-zf-hdma.h | 157 + .../dinghai/zf_mpf/epc/pcie-zte-zf-json.c | 196 + .../dinghai/zf_mpf/epc/pcie-zte-zf-json.h | 40 + .../ethernet/dinghai/zf_mpf/epc/virt-dma.c | 137 + .../ethernet/dinghai/zf_mpf/epc/virt-dma.h | 236 + .../dinghai/zf_mpf/epf/pcie-zte-zf-epf.c | 991 + .../dinghai/zf_mpf/epf/pcie-zte-zf-epf.h | 142 + .../dinghai/zf_mpf/fuc_hotplug/fuc_hotplug.c | 228 + .../dinghai/zf_mpf/fuc_hotplug/fuc_hotplug.h | 62 + .../zf_mpf/fuc_hotplug/fuc_hotplug_commom.h | 77 + .../zf_mpf/fuc_hotplug/fuc_hotplug_ioctl.c | 72 + .../zf_mpf/fuc_hotplug/fuc_hotplug_ioctl.h | 20 + .../zf_mpf/fuc_hotplug/fuc_hp_app/build.sh | 5 + .../fuc_hotplug/fuc_hp_app/fuc_hp_app.c | 257 + .../fuc_hotplug/fuc_hp_app/fuc_hp_app.h | 48 + drivers/net/ethernet/dinghai/zf_mpf/gdma.c | 551 + drivers/net/ethernet/dinghai/zf_mpf/gdma.h | 63 + .../ethernet/dinghai/zf_mpf/zf_chan_ioctl.c | 549 + .../ethernet/dinghai/zf_mpf/zf_chan_ioctl.h | 114 + .../net/ethernet/dinghai/zf_mpf/zf_events.c | 220 + .../net/ethernet/dinghai/zf_mpf/zf_events.h | 24 + drivers/net/ethernet/dinghai/zf_mpf/zf_mpf.c | 510 + drivers/net/ethernet/dinghai/zf_mpf/zf_mpf.h | 57 + .../ethernet/dinghai/zf_mpf/zf_mpf_cfg_sf.c | 59 + .../ethernet/dinghai/zf_mpf/zf_mpf_cfg_sf.h | 27 + .../ethernet/dinghai/zf_mpf/zf_mpf_devlink.c | 126 + .../ethernet/dinghai/zf_mpf/zf_mpf_devlink.h | 14 + .../net/ethernet/dinghai/zf_mpf/zf_mpf_eq.c | 517 + .../net/ethernet/dinghai/zf_mpf/zf_mpf_eq.h | 19 + .../net/ethernet/dinghai/zf_mpf/zf_mpf_irq.c | 162 + .../net/ethernet/dinghai/zf_mpf/zf_mpf_irq.h | 49 + .../dinghai/zf_mpf/zf_reset_finish_flag.c | 108 + .../ethernet/dinghai/zf_mpf/zxdh_reset_zf.c | 265 + .../dinghai/zxdh_tools/zxdh_tools_ioctl.c | 2500 + .../dinghai/zxdh_tools/zxdh_tools_ioctl.h | 237 + .../dinghai/zxdh_tools/zxdh_tools_netlink.c | 259 + .../dinghai/zxdh_tools/zxdh_tools_netlink.h | 62 + include/linux/dinghai/device.h | 183 + include/linux/dinghai/devlink.h | 28 + include/linux/dinghai/dh_cmd.h | 316 + include/linux/dinghai/dh_ifc.h | 51 + include/linux/dinghai/dinghai_irq.h | 35 + include/linux/dinghai/driver.h | 416 + include/linux/dinghai/en_aux.h | 71 + include/linux/dinghai/en_sf.h | 118 + include/linux/dinghai/eq.h | 117 + include/linux/dinghai/events.h | 35 + include/linux/dinghai/helper.h | 81 + include/linux/dinghai/kcompat.h | 7330 ++ include/linux/dinghai/kcompat_vfd.h | 189 + include/linux/dinghai/lag.h | 55 + include/linux/dinghai/log.h | 45 + include/linux/dinghai/pci_irq.h | 81 + include/linux/dinghai/queue.h | 78 + include/linux/dinghai/xarray.h | 1733 + include/linux/dinghai/zxdh_auxiliary_bus.h | 140 + 484 files changed, 375731 insertions(+) create mode 100644 drivers/infiniband/hw/zrdma/Kconfig create mode 100644 drivers/infiniband/hw/zrdma/Makefile create mode 100644 drivers/infiniband/hw/zrdma/cm.c create mode 100644 drivers/infiniband/hw/zrdma/cm.h create mode 100644 drivers/infiniband/hw/zrdma/configfs.c create mode 100644 drivers/infiniband/hw/zrdma/ctrl.c create mode 100644 drivers/infiniband/hw/zrdma/dbgfs.c create mode 100644 drivers/infiniband/hw/zrdma/dbgfs.h create mode 100644 drivers/infiniband/hw/zrdma/defs.h create mode 100644 drivers/infiniband/hw/zrdma/hmc.c create mode 100644 drivers/infiniband/hw/zrdma/hmc.h create mode 100644 drivers/infiniband/hw/zrdma/hw.c create mode 100644 drivers/infiniband/hw/zrdma/icrdma_hw.c create mode 100644 drivers/infiniband/hw/zrdma/icrdma_hw.h create mode 100644 drivers/infiniband/hw/zrdma/iidc.h create mode 100644 drivers/infiniband/hw/zrdma/linux_kcompat.h create mode 100644 drivers/infiniband/hw/zrdma/main.c create mode 100644 drivers/infiniband/hw/zrdma/main.h create mode 100644 drivers/infiniband/hw/zrdma/manager.c create mode 100644 drivers/infiniband/hw/zrdma/manager.h create mode 100644 drivers/infiniband/hw/zrdma/osdep.h create mode 100644 drivers/infiniband/hw/zrdma/pble.c create mode 100644 drivers/infiniband/hw/zrdma/pble.h create mode 100644 drivers/infiniband/hw/zrdma/private_verbs_cmd.c create mode 100644 drivers/infiniband/hw/zrdma/private_verbs_cmd.h create mode 100644 drivers/infiniband/hw/zrdma/protos.h create mode 100644 drivers/infiniband/hw/zrdma/puda.c create mode 100644 drivers/infiniband/hw/zrdma/puda.h create mode 100644 drivers/infiniband/hw/zrdma/restrack.c create mode 100644 drivers/infiniband/hw/zrdma/restrack.h create mode 100644 drivers/infiniband/hw/zrdma/slib.c create mode 100644 drivers/infiniband/hw/zrdma/slib.h create mode 100644 drivers/infiniband/hw/zrdma/smmu/kernel/adk_mmu600.c create mode 100644 drivers/infiniband/hw/zrdma/smmu/kernel/adk_mmu600.h create mode 100644 drivers/infiniband/hw/zrdma/smmu/kernel/cmdk.h create mode 100644 drivers/infiniband/hw/zrdma/smmu/kernel/cmdk_mmu600.c create mode 100644 drivers/infiniband/hw/zrdma/smmu/kernel/cmdk_mmu600.h create mode 100644 drivers/infiniband/hw/zrdma/smmu/kernel/cmdk_mmu600_inner.h create mode 100644 drivers/infiniband/hw/zrdma/smmu/kernel/cmdk_pagetable.c create mode 100644 drivers/infiniband/hw/zrdma/smmu/kernel/common_define.h create mode 100644 drivers/infiniband/hw/zrdma/smmu/kernel/hal_smmu.h create mode 100644 drivers/infiniband/hw/zrdma/smmu/kernel/ioctl_mmu600.c create mode 100644 drivers/infiniband/hw/zrdma/smmu/kernel/ioctl_mmu600.h create mode 100644 drivers/infiniband/hw/zrdma/smmu/kernel/pub_print.h create mode 100644 drivers/infiniband/hw/zrdma/smmu/kernel/pub_return.h create mode 100644 drivers/infiniband/hw/zrdma/srq.c create mode 100644 drivers/infiniband/hw/zrdma/srq.h create mode 100644 drivers/infiniband/hw/zrdma/status.h create mode 100644 drivers/infiniband/hw/zrdma/tc_hmcdma.c create mode 100644 drivers/infiniband/hw/zrdma/tc_hmcdma.h create mode 100644 drivers/infiniband/hw/zrdma/trace.c create mode 100644 drivers/infiniband/hw/zrdma/trace.h create mode 100644 drivers/infiniband/hw/zrdma/trace_cm.h create mode 100644 drivers/infiniband/hw/zrdma/type.h create mode 100644 drivers/infiniband/hw/zrdma/uda.c create mode 100644 drivers/infiniband/hw/zrdma/uda.h create mode 100644 drivers/infiniband/hw/zrdma/uda_d.h create mode 100644 drivers/infiniband/hw/zrdma/uk.c create mode 100644 drivers/infiniband/hw/zrdma/user.h create mode 100644 drivers/infiniband/hw/zrdma/utils.c create mode 100644 drivers/infiniband/hw/zrdma/verbs.c create mode 100644 drivers/infiniband/hw/zrdma/verbs.h create mode 100644 drivers/infiniband/hw/zrdma/vf.c create mode 100644 drivers/infiniband/hw/zrdma/vf.h create mode 100644 drivers/infiniband/hw/zrdma/virtchnl.c create mode 100644 drivers/infiniband/hw/zrdma/virtchnl.h create mode 100644 drivers/infiniband/hw/zrdma/ws.h create mode 100644 drivers/infiniband/hw/zrdma/zrdma-abi.h create mode 100644 drivers/infiniband/hw/zrdma/zrdma.h create mode 100644 drivers/infiniband/hw/zrdma/zrdma_kcompat.c create mode 100644 drivers/infiniband/hw/zrdma/zrdma_kcompat.h create mode 100644 drivers/infiniband/hw/zrdma/zxdh_auxiliary_bus.h create mode 100644 drivers/infiniband/hw/zrdma/zxdh_user_ioctl_cmds.h create mode 100644 drivers/infiniband/hw/zrdma/zxdh_user_ioctl_verbs.h create mode 100644 drivers/net/ethernet/dinghai/Kconfig create mode 100644 drivers/net/ethernet/dinghai/Makefile create mode 100644 drivers/net/ethernet/dinghai/bonding/rdma_ops.c create mode 100644 drivers/net/ethernet/dinghai/bonding/rdma_ops.h create mode 100644 drivers/net/ethernet/dinghai/bonding/zxdh_lag.c create mode 100644 drivers/net/ethernet/dinghai/bonding/zxdh_lag.h create mode 100644 drivers/net/ethernet/dinghai/cmd.c create mode 100644 drivers/net/ethernet/dinghai/cmd/msg_chan_lock.c create mode 100644 drivers/net/ethernet/dinghai/cmd/msg_chan_lock.h create mode 100644 drivers/net/ethernet/dinghai/cmd/msg_chan_priv.h create mode 100644 drivers/net/ethernet/dinghai/cmd/msg_chan_test.c create mode 100644 drivers/net/ethernet/dinghai/cmd/msg_chan_ver.h create mode 100644 drivers/net/ethernet/dinghai/cmd/msg_main.c create mode 100644 drivers/net/ethernet/dinghai/devlink.c create mode 100644 drivers/net/ethernet/dinghai/dh_cmd.c create mode 100644 drivers/net/ethernet/dinghai/dh_procfs.c create mode 100644 drivers/net/ethernet/dinghai/dh_procfs.h create mode 100644 drivers/net/ethernet/dinghai/en_aux.c create mode 100644 drivers/net/ethernet/dinghai/en_aux.h create mode 100644 drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl.c create mode 100644 drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl.h create mode 100644 drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl_api.c create mode 100644 drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl_api.h create mode 100644 drivers/net/ethernet/dinghai/en_aux/drs_sec_dtb.c create mode 100644 drivers/net/ethernet/dinghai/en_aux/drs_sec_dtb.h create mode 100644 drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc.c create mode 100644 drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc.h create mode 100644 drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc_func.c create mode 100644 drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc_func.h create mode 100644 drivers/net/ethernet/dinghai/en_aux/en_aux_cmd.c create mode 100644 drivers/net/ethernet/dinghai/en_aux/en_aux_cmd.h create mode 100644 drivers/net/ethernet/dinghai/en_aux/en_aux_eq.c create mode 100644 drivers/net/ethernet/dinghai/en_aux/en_aux_eq.h create mode 100644 drivers/net/ethernet/dinghai/en_aux/en_aux_events.c create mode 100644 drivers/net/ethernet/dinghai/en_aux/en_aux_events.h create mode 100644 drivers/net/ethernet/dinghai/en_aux/en_aux_ioctl.c create mode 100644 drivers/net/ethernet/dinghai/en_aux/en_aux_ioctl.h create mode 100644 drivers/net/ethernet/dinghai/en_aux/priv_queue.c create mode 100644 drivers/net/ethernet/dinghai/en_aux/priv_queue.h create mode 100644 drivers/net/ethernet/dinghai/en_aux/queue.c create mode 100644 drivers/net/ethernet/dinghai/en_aux/queue.h create mode 100644 drivers/net/ethernet/dinghai/en_aux/selq.c create mode 100644 drivers/net/ethernet/dinghai/en_auxiliary.c create mode 100644 drivers/net/ethernet/dinghai/en_ethtool/ethtool.c create mode 100644 drivers/net/ethernet/dinghai/en_ethtool/ethtool.h create mode 100644 drivers/net/ethernet/dinghai/en_mpf.c create mode 100644 drivers/net/ethernet/dinghai/en_mpf.h create mode 100644 drivers/net/ethernet/dinghai/en_mpf/en_mpf_cfg_sf.c create mode 100644 drivers/net/ethernet/dinghai/en_mpf/en_mpf_cfg_sf.h create mode 100644 drivers/net/ethernet/dinghai/en_mpf/en_mpf_devlink.c create mode 100644 drivers/net/ethernet/dinghai/en_mpf/en_mpf_devlink.h create mode 100644 drivers/net/ethernet/dinghai/en_mpf/en_mpf_eq.c create mode 100644 drivers/net/ethernet/dinghai/en_mpf/en_mpf_eq.h create mode 100644 drivers/net/ethernet/dinghai/en_mpf/en_mpf_events.c create mode 100644 drivers/net/ethernet/dinghai/en_mpf/en_mpf_events.h create mode 100644 drivers/net/ethernet/dinghai/en_mpf/en_mpf_irq.c create mode 100644 drivers/net/ethernet/dinghai/en_mpf/en_mpf_irq.h create mode 100644 drivers/net/ethernet/dinghai/en_mpf/tc.c create mode 100644 drivers/net/ethernet/dinghai/en_np/.clang-format create mode 100644 drivers/net/ethernet/dinghai/en_np/Makefile create mode 100644 drivers/net/ethernet/dinghai/en_np/Makefile.include create mode 100644 drivers/net/ethernet/dinghai/en_np/agent/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/agent/include/dpp_agent_channel.h create mode 100644 drivers/net/ethernet/dinghai/en_np/agent/include/dpp_agent_se_res.h create mode 100644 drivers/net/ethernet/dinghai/en_np/agent/source/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/agent/source/dpp_agent_channel.c create mode 100644 drivers/net/ethernet/dinghai/en_np/cmd/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/cmd/include/dpp_cmd_init.h create mode 100644 drivers/net/ethernet/dinghai/en_np/cmd/include/dpp_cmd_shell.h create mode 100644 drivers/net/ethernet/dinghai/en_np/cmd/source/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/cmd/source/dpp_cmd_init.c create mode 100644 drivers/net/ethernet/dinghai/en_np/cmd/source/dpp_cmd_shell.c create mode 100644 drivers/net/ethernet/dinghai/en_np/comm/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_avl_tree.h create mode 100644 drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_double_link.h create mode 100644 drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_doublelink_index.h create mode 100644 drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_index_ctrl.h create mode 100644 drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_index_fill.h create mode 100644 drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_index_fill_type.h create mode 100644 drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_index_reserve.h create mode 100644 drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_liststack.h create mode 100644 drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_rb_tree.h create mode 100644 drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_socket.h create mode 100644 drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_thread.h create mode 100644 drivers/net/ethernet/dinghai/en_np/comm/include/zxic_common.h create mode 100644 drivers/net/ethernet/dinghai/en_np/comm/include/zxic_private.h create mode 100644 drivers/net/ethernet/dinghai/en_np/comm/include/zxic_private_top.h create mode 100644 drivers/net/ethernet/dinghai/en_np/comm/include/zxic_slt.h create mode 100644 drivers/net/ethernet/dinghai/en_np/comm/source/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/comm/source/zxic_comm_double_link.c create mode 100644 drivers/net/ethernet/dinghai/en_np/comm/source/zxic_comm_index_fill.c create mode 100644 drivers/net/ethernet/dinghai/en_np/comm/source/zxic_comm_liststack.c create mode 100644 drivers/net/ethernet/dinghai/en_np/comm/source/zxic_comm_mutex_lock.c create mode 100644 drivers/net/ethernet/dinghai/en_np/comm/source/zxic_comm_print.c create mode 100644 drivers/net/ethernet/dinghai/en_np/comm/source/zxic_comm_rb_tree.c create mode 100644 drivers/net/ethernet/dinghai/en_np/comm/source/zxic_common.c create mode 100644 drivers/net/ethernet/dinghai/en_np/comm/source/zxic_private_top.c create mode 100644 drivers/net/ethernet/dinghai/en_np/driver/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/driver/include/dpp_drv_acl.h create mode 100644 drivers/net/ethernet/dinghai/en_np/driver/include/dpp_drv_eram.h create mode 100644 drivers/net/ethernet/dinghai/en_np/driver/include/dpp_drv_hash.h create mode 100644 drivers/net/ethernet/dinghai/en_np/driver/include/dpp_drv_init.h create mode 100644 drivers/net/ethernet/dinghai/en_np/driver/include/dpp_drv_sdt.h create mode 100644 drivers/net/ethernet/dinghai/en_np/driver/source/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/driver/source/dpp_drv_acl.c create mode 100644 drivers/net/ethernet/dinghai/en_np/driver/source/dpp_drv_eram.c create mode 100644 drivers/net/ethernet/dinghai/en_np/driver/source/dpp_drv_hash.c create mode 100644 drivers/net/ethernet/dinghai/en_np/driver/source/dpp_drv_init.c create mode 100644 drivers/net/ethernet/dinghai/en_np/fc/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/fc/include/dpp_drv_fc.h create mode 100644 drivers/net/ethernet/dinghai/en_np/fc/source/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/fc/source/dpp_drv_fc.c create mode 100644 drivers/net/ethernet/dinghai/en_np/flow/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/flow/README.txt create mode 100644 drivers/net/ethernet/dinghai/en_np/flow/api/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/flow/api/include/dpp_tbl_fd_cfg.h create mode 100644 drivers/net/ethernet/dinghai/en_np/flow/api/source/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/flow/api/source/dpp_fd_cfg_api.c create mode 100644 drivers/net/ethernet/dinghai/en_np/flow/api/source/dpp_flow_struct.c create mode 100644 drivers/net/ethernet/dinghai/en_np/flow/common/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/flow/common/include/dpp_flow_comm.h create mode 100644 drivers/net/ethernet/dinghai/en_np/flow/common/source/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/flow/common/source/dpp_flow_comm.c create mode 100644 drivers/net/ethernet/dinghai/en_np/flow/tool.py create mode 100644 drivers/net/ethernet/dinghai/en_np/flow/zxdh_flow_attr_api.xml create mode 100644 drivers/net/ethernet/dinghai/en_np/flow/zxdh_flow_attr_demo.xml create mode 100644 drivers/net/ethernet/dinghai/en_np/init/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/init/include/dpp_np_init.h create mode 100644 drivers/net/ethernet/dinghai/en_np/init/source/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/init/source/dpp_np_init.c create mode 100644 drivers/net/ethernet/dinghai/en_np/netlink/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/netlink/include/dpp_netlink.h create mode 100644 drivers/net/ethernet/dinghai/en_np/netlink/source/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/netlink/source/dpp_netlink.c create mode 100644 drivers/net/ethernet/dinghai/en_np/qos/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/qos/include/dpp_drv_qos.h create mode 100644 drivers/net/ethernet/dinghai/en_np/qos/source/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/qos/source/dpp_drv_qos.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_apt_se_api.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_dtb_table_api.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_pbu_api.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_pktrx_api.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_ppu_api.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_reg_api.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_se_api.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_stat_api.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_tm_api.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_type_api.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/chip/dpp_dev.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/chip/dpp_init.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/init/dpp_kernel_init.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/dma/dpp_dtb.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/dma/dpp_dtb_cfg.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/nppu/dpp_pbu.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/nppu/dpp_pktrx_cfg.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/ppu/dpp_ppu.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/se/dpp_apt_se.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/se/dpp_etcam.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/se/dpp_se.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/se/dpp_stat_car.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/se/dpp_stat_cfg.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/sdt/dpp_sdt.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/sdt/dpp_sdt_def.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/sdt/dpp_sdt_mgr.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/se/dpp_acl.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/se/dpp_dtb_table.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/se/dpp_hash.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/se/dpp_hash_crc.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/se/dpp_se_cfg.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/tm/dpp_tm.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_axi_reg.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_cfg_reg.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_dtb4k_reg.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_dtb_reg.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_etm_reg.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_mem_info.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_module.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_nppu_reg.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_pci.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_ppu4k_reg.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_ppu_reg.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_ptptm_reg.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_reg.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_reg_info.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_reg_struct.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_se4k_reg.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_se_reg.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_smmu0_reg.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_smmu14k_reg.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_smmu1_reg.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_stat4k_reg.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_stat_reg.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_trpg_reg.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_tsn_reg.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/include/diag/dpp_se_diag.h create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/chip/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/chip/dpp_dev.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/chip/dpp_init.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/init/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/init/dpp_kernel_init.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/dma/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/dma/dpp_dtb.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/dma/dpp_dtb_cfg.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/nppu/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/nppu/dpp_pbu.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/nppu/dpp_pktrx_cfg.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/ppu/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/ppu/dpp_ppu.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se/dpp_etcam.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se/dpp_se.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se/dpp_stat_car.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se/dpp_stat_cfg.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/dpp_apt_se_acl.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/dpp_apt_se_comm.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/dpp_apt_se_ddr.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/dpp_apt_se_eram.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/dpp_apt_se_hash.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/dpp_apt_se_res.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/sdt/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/sdt/dpp_sdt.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/sdt/dpp_sdt_mgr.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/dpp_acl.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/dpp_dtb_table.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/dpp_dtb_table_api.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/dpp_hash.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/dpp_hash_crc.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/dpp_se_cfg.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/tm/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/tm/dpp_tm.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/reg/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/reg/dpp_module.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/reg/dpp_pci.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/reg/dpp_reg_api.c create mode 100644 drivers/net/ethernet/dinghai/en_np/sdk/source/dev/reg/dpp_reg_info.c create mode 100644 drivers/net/ethernet/dinghai/en_np/table/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_api.h create mode 100644 drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_bc.h create mode 100644 drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_cfg.h create mode 100644 drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_comm.h create mode 100644 drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_diag.h create mode 100644 drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_fd.h create mode 100644 drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_ipsec.h create mode 100644 drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_lag.h create mode 100644 drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_mac.h create mode 100644 drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_mc.h create mode 100644 drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_pkt_cap.h create mode 100644 drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_plcr.h create mode 100644 drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_port.h create mode 100644 drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_promisc.h create mode 100644 drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_ptp.h create mode 100644 drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_qid.h create mode 100644 drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_rdma.h create mode 100644 drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_stat.h create mode 100644 drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_tm.h create mode 100644 drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_uplink.h create mode 100644 drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_vlan.h create mode 100644 drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_vqm.h create mode 100644 drivers/net/ethernet/dinghai/en_np/table/source/Kbuild.include create mode 100644 drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_bc.c create mode 100644 drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_cfg.c create mode 100644 drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_comm.c create mode 100644 drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_diag.c create mode 100644 drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_fd.c create mode 100644 drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_ipsec.c create mode 100644 drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_lag.c create mode 100644 drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_mac.c create mode 100644 drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_mc.c create mode 100644 drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_pkt_cap.c create mode 100644 drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_plcr.c create mode 100644 drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_port.c create mode 100644 drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_promisc.c create mode 100644 drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_ptp.c create mode 100644 drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_qid.c create mode 100644 drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_rdma.c create mode 100644 drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_stat.c create mode 100644 drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_stream.c create mode 100644 drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_tm.c create mode 100644 drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_uplink.c create mode 100644 drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_vhca.c create mode 100644 drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_vlan.c create mode 100644 drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_vqm.c create mode 100644 drivers/net/ethernet/dinghai/en_np/tools/dpp_tools.py create mode 100644 drivers/net/ethernet/dinghai/en_np/tools/readme.txt create mode 100644 drivers/net/ethernet/dinghai/en_pf.c create mode 100644 drivers/net/ethernet/dinghai/en_pf.h create mode 100644 drivers/net/ethernet/dinghai/en_pf/en_pf_devlink.c create mode 100644 drivers/net/ethernet/dinghai/en_pf/en_pf_devlink.h create mode 100644 drivers/net/ethernet/dinghai/en_pf/en_pf_eq.c create mode 100644 drivers/net/ethernet/dinghai/en_pf/en_pf_eq.h create mode 100644 drivers/net/ethernet/dinghai/en_pf/en_pf_events.c create mode 100644 drivers/net/ethernet/dinghai/en_pf/en_pf_events.h create mode 100644 drivers/net/ethernet/dinghai/en_pf/en_pf_irq.c create mode 100644 drivers/net/ethernet/dinghai/en_pf/en_pf_irq.h create mode 100644 drivers/net/ethernet/dinghai/en_pf/msg_func.c create mode 100644 drivers/net/ethernet/dinghai/en_pf/msg_func.h create mode 100644 drivers/net/ethernet/dinghai/en_ptp/tod_driver.c create mode 100644 drivers/net/ethernet/dinghai/en_ptp/tod_driver.h create mode 100644 drivers/net/ethernet/dinghai/en_ptp/tod_driver_stub.c create mode 100644 drivers/net/ethernet/dinghai/en_ptp/zxdh_ptp.c create mode 100644 drivers/net/ethernet/dinghai/en_ptp/zxdh_ptp.h create mode 100644 drivers/net/ethernet/dinghai/en_ptp/zxdh_ptp_common.h create mode 100644 drivers/net/ethernet/dinghai/en_ptp/zxdh_ptp_regs.h create mode 100644 drivers/net/ethernet/dinghai/en_sf.c create mode 100644 drivers/net/ethernet/dinghai/en_sf.h create mode 100644 drivers/net/ethernet/dinghai/en_sf/en_sf_devlink.c create mode 100644 drivers/net/ethernet/dinghai/en_sf/en_sf_devlink.h create mode 100644 drivers/net/ethernet/dinghai/en_sf/en_sf_eq.c create mode 100644 drivers/net/ethernet/dinghai/en_sf/en_sf_eq.h create mode 100644 drivers/net/ethernet/dinghai/en_sf/en_sf_irq.c create mode 100644 drivers/net/ethernet/dinghai/en_sf/en_sf_irq.h create mode 100644 drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn.c create mode 100644 drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn.h create mode 100644 drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn_comm.h create mode 100644 drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn_ioctl.c create mode 100644 drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn_ioctl.h create mode 100644 drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn_reg.c create mode 100644 drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn_reg.h create mode 100644 drivers/net/ethernet/dinghai/en_vf.c create mode 100644 drivers/net/ethernet/dinghai/eq.c create mode 100644 drivers/net/ethernet/dinghai/events.c create mode 100644 drivers/net/ethernet/dinghai/health.c create mode 100644 drivers/net/ethernet/dinghai/irq_affinity.c create mode 100644 drivers/net/ethernet/dinghai/lag/lag.c create mode 100644 drivers/net/ethernet/dinghai/lag/lag.h create mode 100644 drivers/net/ethernet/dinghai/lag/lag_procfs.c create mode 100644 drivers/net/ethernet/dinghai/log.c create mode 100644 drivers/net/ethernet/dinghai/msg_common.h create mode 100644 drivers/net/ethernet/dinghai/pci_irq.c create mode 100644 drivers/net/ethernet/dinghai/plcr.c create mode 100644 drivers/net/ethernet/dinghai/plcr.h create mode 100644 drivers/net/ethernet/dinghai/slib.c create mode 100644 drivers/net/ethernet/dinghai/slib.h create mode 100644 drivers/net/ethernet/dinghai/sriov_sysfs.c create mode 100644 drivers/net/ethernet/dinghai/xarray.c create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/bar_chan_user/normal_send_eg.c create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/bar_chan_user/pci_res_query_eg.c create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/epc/Makefile create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/epc/dmaengine.c create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/epc/dmaengine.h create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/epc/pcie-zte-zf-epc.c create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/epc/pcie-zte-zf-epc.h create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/epc/pcie-zte-zf-hdma.c create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/epc/pcie-zte-zf-hdma.h create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/epc/pcie-zte-zf-json.c create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/epc/pcie-zte-zf-json.h create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/epc/virt-dma.c create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/epc/virt-dma.h create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/epf/pcie-zte-zf-epf.c create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/epf/pcie-zte-zf-epf.h create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hotplug.c create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hotplug.h create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hotplug_commom.h create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hotplug_ioctl.c create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hotplug_ioctl.h create mode 100755 drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hp_app/build.sh create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hp_app/fuc_hp_app.c create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hp_app/fuc_hp_app.h create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/gdma.c create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/gdma.h create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/zf_chan_ioctl.c create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/zf_chan_ioctl.h create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/zf_events.c create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/zf_events.h create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/zf_mpf.c create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/zf_mpf.h create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_cfg_sf.c create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_cfg_sf.h create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_devlink.c create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_devlink.h create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_eq.c create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_eq.h create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_irq.c create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_irq.h create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/zf_reset_finish_flag.c create mode 100644 drivers/net/ethernet/dinghai/zf_mpf/zxdh_reset_zf.c create mode 100644 drivers/net/ethernet/dinghai/zxdh_tools/zxdh_tools_ioctl.c create mode 100644 drivers/net/ethernet/dinghai/zxdh_tools/zxdh_tools_ioctl.h create mode 100644 drivers/net/ethernet/dinghai/zxdh_tools/zxdh_tools_netlink.c create mode 100644 drivers/net/ethernet/dinghai/zxdh_tools/zxdh_tools_netlink.h create mode 100755 include/linux/dinghai/device.h create mode 100755 include/linux/dinghai/devlink.h create mode 100644 include/linux/dinghai/dh_cmd.h create mode 100755 include/linux/dinghai/dh_ifc.h create mode 100755 include/linux/dinghai/dinghai_irq.h create mode 100755 include/linux/dinghai/driver.h create mode 100644 include/linux/dinghai/en_aux.h create mode 100755 include/linux/dinghai/en_sf.h create mode 100755 include/linux/dinghai/eq.h create mode 100755 include/linux/dinghai/events.h create mode 100755 include/linux/dinghai/helper.h create mode 100644 include/linux/dinghai/kcompat.h create mode 100644 include/linux/dinghai/kcompat_vfd.h create mode 100644 include/linux/dinghai/lag.h create mode 100644 include/linux/dinghai/log.h create mode 100755 include/linux/dinghai/pci_irq.h create mode 100644 include/linux/dinghai/queue.h create mode 100644 include/linux/dinghai/xarray.h create mode 100644 include/linux/dinghai/zxdh_auxiliary_bus.h diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index a5827d11e934..b6b89ac9c7df 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -111,5 +111,6 @@ source "drivers/infiniband/ulp/isert/Kconfig" source "drivers/infiniband/ulp/rtrs/Kconfig" source "drivers/infiniband/ulp/opa_vnic/Kconfig" +source "drivers/infiniband/hw/zrdma/Kconfig" endif # INFINIBAND diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile index 1211f4317a9f..9b6ea9f7564a 100644 --- a/drivers/infiniband/hw/Makefile +++ b/drivers/infiniband/hw/Makefile @@ -15,3 +15,4 @@ obj-$(CONFIG_INFINIBAND_HNS) += hns/ obj-$(CONFIG_INFINIBAND_QEDR) += qedr/ obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re/ obj-$(CONFIG_INFINIBAND_ERDMA) += erdma/ +obj-m += zrdma/ diff --git a/drivers/infiniband/hw/zrdma/Kconfig b/drivers/infiniband/hw/zrdma/Kconfig new file mode 100644 index 000000000000..974397e4247a --- /dev/null +++ b/drivers/infiniband/hw/zrdma/Kconfig @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 + +config INFINIBAND_ZRDMA + tristate "ZTE Ethernet Protocol Driver for RDMA" + depends on NET_VENDOR_DINGHAI + depends on (X86 || ARM64) + help + Say Y or M here to enable support for the ZXDH ZRDMA RDMA device driver. + + If you choose to build this driver as a module, it will be built as + a module named zrdma. diff --git a/drivers/infiniband/hw/zrdma/Makefile b/drivers/infiniband/hw/zrdma/Makefile new file mode 100644 index 000000000000..5735f4e185bd --- /dev/null +++ b/drivers/infiniband/hw/zrdma/Makefile @@ -0,0 +1,49 @@ +ccflags-y += -I$(zrdma) +ccflags-y += -I$(zrdma)/smmu/kernel +subdir-ccflags-y += -I$(src)/../../../net/ethernet/dinghai/en_np/table/include +subdir-ccflags-y += -I$(src)/../../../net/ethernet/dinghai/en_np/comm/include +subdir-ccflags-y += -I$(src)/../../../net/ethernet/dinghai/en_np/driver/include +subdir-ccflags-y += -I$(src)/../../../net/ethernet/dinghai/en_np/sdk/include/dev/module/table/se +subdir-ccflags-y += -I$(src)/../../../net/ethernet/dinghai/en_np/sdk/include/dev/module/se +subdir-ccflags-y += -I$(src)/../../../net/ethernet/dinghai/en_np/sdk/include/api +subdir-ccflags-y += -I$(src)/../../../net/ethernet/dinghai/en_np/sdk/include/dev/chip +subdir-ccflags-y += -I$(src)/../../../net/ethernet/dinghai/en_np/sdk/include/dev/reg +subdir-ccflags-y += -I$(src)/../../../net/ethernet/dinghai/en_np/sdk/include/dev/module/dma +subdir-ccflags-y += -I$(src)/../../../net/ethernet/dinghai/en_np/sdk/include/dev/module/nppu +subdir-ccflags-y += -I$(src)/../../../net/ethernet/dinghai/en_np/sdk/include/dev/module/ppu +subdir-ccflags-y += -I$(src)/../../../net/ethernet/dinghai/en_np/sdk/include/dev/module/table +subdir-ccflags-y += -I$(src)/../../../net/ethernet/dinghai/en_np/sdk/include/dev/module/tm +subdir-ccflags-y += -I$(src)/../../../net/ethernet/dinghai/en_np/sdk/include/diag +subdir-ccflags-y += -I$(src)/../../../net/ethernet/dinghai/en_np/agent/include + +obj-$(CONFIG_INFINIBAND_ZRDMA) += zrdma.o + +zrdma-y := \ + slib.o \ + main.o \ + manager.o \ + verbs.o \ + private_verbs_cmd.o \ + ctrl.o \ + hw.o \ + icrdma_hw.o \ + hmc.o \ + pble.o \ + utils.o \ + uk.o \ + dbgfs.o \ + configfs.o \ + cm.o \ + srq.o \ + puda.o \ + uda.o \ + vf.o \ + virtchnl.o \ + restrack.o \ + trace.o \ + tc_hmcdma.o \ + zrdma_kcompat.o \ + smmu/kernel/adk_mmu600.o \ + smmu/kernel/cmdk_mmu600.o \ + smmu/kernel/cmdk_pagetable.o \ + smmu/kernel/ioctl_mmu600.o diff --git a/drivers/infiniband/hw/zrdma/cm.c b/drivers/infiniband/hw/zrdma/cm.c new file mode 100644 index 000000000000..bfb986df53b6 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/cm.c @@ -0,0 +1,348 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ +#include "main.h" +#include "trace.h" + +/** + * zxdh_copy_ip_htonl - copy IP address from host to network order + * @dst: IP address in network order (big endian) + * @src: IP address in host order + */ +void zxdh_copy_ip_htonl(__be32 *dst, u32 *src) +{ + *dst++ = htonl(*src++); + *dst++ = htonl(*src++); + *dst++ = htonl(*src++); + *dst = htonl(*src); +} + +/** + * zxdh_netdev_vlan_ipv6 - Gets the netdev and mac + * @addr: local IPv6 address + * @vlan_id: vlan id for the given IPv6 address + * @mac: mac address for the given IPv6 address + * + * Returns the net_device of the IPv6 address and also sets the + * vlan id and mac for that address. + */ +struct net_device *zxdh_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac) +{ + struct net_device *ip_dev = NULL; + struct in6_addr laddr6; + + if (!IS_ENABLED(CONFIG_IPV6)) + return NULL; + + zxdh_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr); + if (vlan_id) + *vlan_id = 0xFFFF; /* Match rdma_vlan_dev_vlan_id() */ + if (mac) + eth_zero_addr(mac); + + rcu_read_lock(); + for_each_netdev_rcu(&init_net, ip_dev) { + if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) { + if (vlan_id) + *vlan_id = rdma_vlan_dev_vlan_id(ip_dev); + if (ip_dev->dev_addr && mac) + ether_addr_copy(mac, ip_dev->dev_addr); + break; + } + } + rcu_read_unlock(); + + return ip_dev; +} + +/** + * zxdh_get_vlan_ipv4 - Returns the vlan_id for IPv4 address + * @addr: local IPv4 address + */ +u16 zxdh_get_vlan_ipv4(u32 *addr) +{ + struct net_device *netdev; + u16 vlan_id = 0xFFFF; + + netdev = ip_dev_find(&init_net, htonl(addr[0])); + if (netdev) { + vlan_id = rdma_vlan_dev_vlan_id(netdev); + dev_put(netdev); + } + + return vlan_id; +} + +/** + * zxdh_ipv4_is_lpb - check if loopback + * @loc_addr: local addr to compare + * @rem_addr: remote address + */ +bool zxdh_ipv4_is_lpb(u32 loc_addr, u32 rem_addr) +{ + return ipv4_is_loopback(htonl(rem_addr)) || (loc_addr == rem_addr); +} + +/** + * zxdh_ipv6_is_lpb - check if loopback + * @loc_addr: local addr to compare + * @rem_addr: remote address + */ +bool zxdh_ipv6_is_lpb(u32 *loc_addr, u32 *rem_addr) +{ + struct in6_addr raddr6; + + zxdh_copy_ip_htonl(raddr6.in6_u.u6_addr32, rem_addr); + + return !memcmp(loc_addr, rem_addr, 16) || ipv6_addr_loopback(&raddr6); +} + +/** + * zxdh_aeq_qp_event - called by worker thread to disconnect qp + * @iwqp: associate qp for the connection + */ +static void zxdh_aeq_qp_event(struct zxdh_qp *iwqp) +{ + struct zxdh_sc_qp *qp = &iwqp->sc_qp; + unsigned long flags; + struct ib_qp_attr attr; + + spin_lock_irqsave(&iwqp->lock, flags); + + if (iwqp->flush_issued || iwqp->sc_qp.qp_uk.destroy_pending) { + spin_unlock_irqrestore(&iwqp->lock, flags); + return; + } + spin_unlock_irqrestore(&iwqp->lock, flags); + + attr.qp_state = IB_QPS_ERR; + zxdh_modify_qp_roce(&iwqp->ibqp, &attr, IB_QP_STATE, NULL); + zxdh_ib_qp_event(iwqp, qp->event_type); +} + +/** + * zxdh_aeq_qp_worker - worker for aeq handle qp + * @work: points or disconn structure + */ +static void zxdh_aeq_qp_worker(struct work_struct *work) +{ + struct aeq_qp_work *dwork = + container_of(work, struct aeq_qp_work, work); + struct zxdh_qp *iwqp = dwork->iwqp; + + kfree(dwork); + zxdh_aeq_qp_event(iwqp); + zxdh_qp_rem_ref(&iwqp->ibqp); +} + +/** + * zxdh_aeq_qp_disconn - when a connection is being closed + * @iwqp: associated qp for the connection + */ +void zxdh_aeq_qp_disconn(struct zxdh_qp *iwqp) +{ + struct zxdh_device *iwdev = iwqp->iwdev; + struct aeq_qp_work *work; + unsigned long flags; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + pr_err("kzalloc work failed!\n"); + return; + } + spin_lock_irqsave(&iwdev->rf->qptable_lock, flags); + if (!iwdev->rf->qp_table[iwqp->sc_qp.qp_ctx_num - + iwdev->rf->sc_dev.base_qpn]) { + spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags); + zxdh_dbg(iwdev_to_idev(iwdev), + "CM: qp_id %d is already freed\n", iwqp->sc_qp.qp_ctx_num); + kfree(work); + return; + } + zxdh_qp_add_ref(&iwqp->ibqp); + spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags); + + work->iwqp = iwqp; + INIT_WORK(&work->work, zxdh_aeq_qp_worker); + queue_work(iwdev->cleanup_wq, &work->work); +} + +/** + * zxdh_aeq_entry_err_worker - worker for aeq 8f5 handle qpc + * @work: work task structure + */ +static void zxdh_aeq_entry_err_worker(struct work_struct *work) +{ + struct aeq_qp_work *dwork = + container_of(work, struct aeq_qp_work, work); + struct zxdh_qp *iwqp = dwork->iwqp; + struct zxdh_sc_qp *qp = &iwqp->sc_qp; + struct zxdh_dma_mem qpc_buf = {}; + u64 temp; + u32 tx_last_ack_psn; + + qpc_buf.size = ALIGN(ZXDH_QP_CTX_SIZE, ZXDH_QPC_ALIGNMENT); + qpc_buf.va = dma_alloc_coherent(iwqp->iwdev->rf->sc_dev.hw->device, + qpc_buf.size, &qpc_buf.pa, GFP_KERNEL); + if (!qpc_buf.va) { + pr_err("no memory\n"); + return; + } + + kfree(dwork); + zxdh_query_qpc(qp, &qpc_buf); + get_64bit_val((__le64 *)qpc_buf.va, 0, &temp); + tx_last_ack_psn = FIELD_GET(RDMAQPC_TX_LAST_ACK_PSN, temp); + if (tx_last_ack_psn != qp->aeq_entry_err_last_psn) { + // qp restart success + qp->entry_err_cnt = 0; + } + qp->aeq_entry_err_last_psn = tx_last_ack_psn; + + if (qp->entry_err_cnt >= ZXDH_AEQ_RETRY_LIMIT) { + // AEQ reported. counts out of limit. + zxdh_ib_qp_event(iwqp, ZXDH_QP_EVENT_CATASTROPHIC); + } else { + // AEQ not reported + pr_info("8f5 entry_err_cnt: %d\n", qp->entry_err_cnt); + qp->entry_err_cnt++; + } + + dma_free_coherent(iwqp->iwdev->rf->sc_dev.hw->device, qpc_buf.size, + qpc_buf.va, qpc_buf.pa); + zxdh_qp_rem_ref(&iwqp->ibqp); +} + +/** + * zxdh_aeq_process_entry_err - query qpc when aeq 8f5 is triggered + * @iwqp: associated qp for the connection + */ +void zxdh_aeq_process_entry_err(struct zxdh_qp *iwqp) +{ + struct aeq_qp_work *work; + struct zxdh_device *iwdev = iwqp->iwdev; + unsigned long flags; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + pr_err("kzalloc work failed!\n"); + return; + } + + spin_lock_irqsave(&iwdev->rf->qptable_lock, flags); + + if (!iwdev->rf->qp_table[iwqp->sc_qp.qp_ctx_num - + iwdev->rf->sc_dev.base_qpn]) { + spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags); + zxdh_dbg(iwdev_to_idev(iwdev), + "CM: qp_id %d is already freed\n", iwqp->sc_qp.qp_ctx_num); + kfree(work); + return; + } + zxdh_qp_add_ref(&iwqp->ibqp); + spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags); + + work->iwqp = iwqp; + INIT_WORK(&work->work, zxdh_aeq_entry_err_worker); + queue_work(iwdev->cleanup_wq, &work->work); +} + +/** + * zxdh_aeq_retry_err_worker - worker for aeq 8f3 handle qpc + * @work: work task structure + */ +static void zxdh_aeq_retry_err_worker(struct work_struct *work) +{ + struct aeq_qp_work *dwork = + container_of(work, struct aeq_qp_work, work); + struct zxdh_qp *iwqp = dwork->iwqp; + struct zxdh_sc_qp *qp = &iwqp->sc_qp; + struct zxdh_dma_mem qpc_buf = {}; + u64 temp; + u32 ack_err_flag, tx_last_ack_psn, retry_cqe_sq_opcode, recv_err_flag; + + qpc_buf.size = ALIGN(ZXDH_QP_CTX_SIZE, ZXDH_QPC_ALIGNMENT); + qpc_buf.va = dma_alloc_coherent(iwqp->iwdev->rf->sc_dev.hw->device, + qpc_buf.size, &qpc_buf.pa, GFP_KERNEL); + if (!qpc_buf.va) { + pr_err("no memory\n"); + return; + } + + kfree(dwork); + zxdh_query_qpc(qp, &qpc_buf); + get_64bit_val((__le64 *)qpc_buf.va, 0, &temp); + tx_last_ack_psn = FIELD_GET(RDMAQPC_TX_LAST_ACK_PSN, temp); + get_64bit_val((__le64 *)qpc_buf.va, 56, &temp); + retry_cqe_sq_opcode = + FIELD_GET(RDMAQPC_TX_RETRY_CQE_SQ_OPCODE_FLAG, temp); + get_64bit_val((__le64 *)qpc_buf.va, 48, &temp); + recv_err_flag = FIELD_GET(RDMAQPC_TX_RECV_ERR_FLAG, temp); + get_64bit_val((__le64 *)qpc_buf.va, 40, &temp); + ack_err_flag = FIELD_GET(BIT_ULL(48), temp); + + if (ack_err_flag != 1) { + pr_info("qp %d has been restarted!\n", qp->qp_uk.qp_id); + goto free_rsrc; + } + + if (!((retry_cqe_sq_opcode >= 32) && + (recv_err_flag == 1 || recv_err_flag == 2))) { + pr_info("Timeout! 800f3 aeq reported!\n"); + zxdh_ib_qp_event(iwqp, ZXDH_QP_EVENT_CATASTROPHIC); + goto free_rsrc; + } + + if (tx_last_ack_psn != qp->aeq_retry_err_last_psn) { + // qp restart success + pr_info("retry_err_cnt reset\n"); + qp->retry_err_cnt = 0; + } + qp->aeq_retry_err_last_psn = tx_last_ack_psn; + + if (qp->retry_err_cnt >= ZXDH_AEQ_RETRY_LIMIT) { + // AEQ reported. counts out of limit. + zxdh_ib_qp_event(iwqp, ZXDH_QP_EVENT_CATASTROPHIC); + } else { + // AEQ not reported + pr_info("8f3 retry_err_cnt: %d\n", qp->retry_err_cnt); + qp->retry_err_cnt++; + } +free_rsrc: + dma_free_coherent(iwqp->iwdev->rf->sc_dev.hw->device, qpc_buf.size, + qpc_buf.va, qpc_buf.pa); + zxdh_qp_rem_ref(&iwqp->ibqp); +} + +/** + * zxdh_aeq_process_retry_err - query qpc when aeq 8f3 is triggered + * @iwqp: associated qp for the connection + */ +void zxdh_aeq_process_retry_err(struct zxdh_qp *iwqp) +{ + struct aeq_qp_work *work; + struct zxdh_device *iwdev = iwqp->iwdev; + unsigned long flags; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + pr_err("kzalloc work failed!\n"); + return; + } + + spin_lock_irqsave(&iwdev->rf->qptable_lock, flags); + + if (!iwdev->rf->qp_table[iwqp->sc_qp.qp_ctx_num - + iwdev->rf->sc_dev.base_qpn]) { + spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags); + zxdh_dbg(iwdev_to_idev(iwdev), + "CM: qp_id %d is already freed\n", iwqp->sc_qp.qp_ctx_num); + kfree(work); + return; + } + zxdh_qp_add_ref(&iwqp->ibqp); + spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags); + + work->iwqp = iwqp; + INIT_WORK(&work->work, zxdh_aeq_retry_err_worker); + queue_work(iwdev->cleanup_wq, &work->work); +} diff --git a/drivers/infiniband/hw/zrdma/cm.h b/drivers/infiniband/hw/zrdma/cm.h new file mode 100644 index 000000000000..48018955c98a --- /dev/null +++ b/drivers/infiniband/hw/zrdma/cm.h @@ -0,0 +1,339 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ +#ifndef ZXDH_CM_H +#define ZXDH_CM_H + +#define ZXDH_MPA_REQUEST_ACCEPT 1 +#define ZXDH_MPA_REQUEST_REJECT 2 + +/* IETF MPA -- defines */ +#define IEFT_MPA_KEY_REQ "MPA ID Req Frame" +#define IEFT_MPA_KEY_REP "MPA ID Rep Frame" +#define IETF_MPA_KEY_SIZE 16 +#define IETF_MPA_VER 1 +#define IETF_MAX_PRIV_DATA_LEN 512 +#define IETF_MPA_FRAME_SIZE 20 +#define IETF_RTR_MSG_SIZE 4 +#define IETF_MPA_V2_FLAG 0x10 +#define SNDMARKER_SEQNMASK 0x000001ff +#define ZXDH_MAX_IETF_SIZE 32 + +/* IETF RTR MSG Fields */ +#define IETF_PEER_TO_PEER 0x8000 +#define IETF_FLPDU_ZERO_LEN 0x4000 +#define IETF_RDMA0_WRITE 0x8000 +#define IETF_RDMA0_READ 0x4000 +#define IETF_NO_IRD_ORD 0x3fff + +#define MAX_PORTS 65536 + +#define ZXDH_PASSIVE_STATE_INDICATED 0 +#define ZXDH_DO_NOT_SEND_RESET_EVENT 1 +#define ZXDH_SEND_RESET_EVENT 2 + +#define MAX_ZXDH_IFS 4 + +#define SET_ACK 1 +#define SET_SYN 2 +#define SET_FIN 4 +#define SET_RST 8 + +#define TCP_OPTIONS_PADDING 3 + +#define ZXDH_DEFAULT_RETRYS 64 +#define ZXDH_DEFAULT_RETRANS 8 +#define ZXDH_DEFAULT_TTL 0x40 +#define ZXDH_DEFAULT_RTT_VAR 6 +#define ZXDH_DEFAULT_SS_THRESH 0x3fffffff +#define ZXDH_DEFAULT_REXMIT_THRESH 8 + +#define ZXDH_RETRY_TIMEOUT HZ +#define ZXDH_SHORT_TIME 10 +#define ZXDH_LONG_TIME (2 * HZ) +#define ZXDH_MAX_TIMEOUT ((unsigned long)(12 * HZ)) + +#define ZXDH_CM_HASHTABLE_SIZE 1024 +#define ZXDH_CM_TCP_TIMER_INTERVAL 3000 +#define ZXDH_CM_DEFAULT_MTU 1540 +#define ZXDH_CM_DEFAULT_FRAME_CNT 10 +#define ZXDH_CM_THREAD_STACK_SIZE 256 +#define ZXDH_CM_DEFAULT_RCV_WND 64240 +#define ZXDH_CM_DEFAULT_RCV_WND_SCALED 0x3FFFC +#define ZXDH_CM_DEFAULT_RCV_WND_SCALE 2 +#define ZXDH_CM_DEFAULT_FREE_PKTS 10 +#define ZXDH_CM_FREE_PKT_LO_WATERMARK 2 +#define ZXDH_CM_DEFAULT_MSS 536 +#define ZXDH_CM_DEFAULT_MPA_VER 2 +#define ZXDH_CM_DEFAULT_SEQ 0x159bf75f +#define ZXDH_CM_DEFAULT_LOCAL_ID 0x3b47 +#define ZXDH_CM_DEFAULT_SEQ2 0x18ed5740 +#define ZXDH_CM_DEFAULT_LOCAL_ID2 0xb807 +#define ZXDH_MAX_CM_BUF (ZXDH_MAX_IETF_SIZE + IETF_MAX_PRIV_DATA_LEN) + +/* cm node transition states */ +enum zxdh_cm_node_state { + ZXDH_CM_STATE_UNKNOWN, + ZXDH_CM_STATE_INITED, + ZXDH_CM_STATE_LISTENING, + ZXDH_CM_STATE_SYN_RCVD, + ZXDH_CM_STATE_SYN_SENT, + ZXDH_CM_STATE_ONE_SIDE_ESTABLISHED, + ZXDH_CM_STATE_ESTABLISHED, + ZXDH_CM_STATE_ACCEPTING, + ZXDH_CM_STATE_MPAREQ_SENT, + ZXDH_CM_STATE_MPAREQ_RCVD, + ZXDH_CM_STATE_MPAREJ_RCVD, + ZXDH_CM_STATE_OFFLOADED, + ZXDH_CM_STATE_FIN_WAIT1, + ZXDH_CM_STATE_FIN_WAIT2, + ZXDH_CM_STATE_CLOSE_WAIT, + ZXDH_CM_STATE_TIME_WAIT, + ZXDH_CM_STATE_LAST_ACK, + ZXDH_CM_STATE_CLOSING, + ZXDH_CM_STATE_LISTENER_DESTROYED, + ZXDH_CM_STATE_CLOSED, +}; + +enum mpa_frame_ver { + IETF_MPA_V1 = 1, + IETF_MPA_V2 = 2, +}; + +enum mpa_frame_key { + MPA_KEY_REQUEST, + MPA_KEY_REPLY, +}; + +enum send_rdma0 { + SEND_RDMA_READ_ZERO = 1, + SEND_RDMA_WRITE_ZERO = 2, +}; + +enum zxdh_tcpip_pkt_type { + ZXDH_PKT_TYPE_UNKNOWN, + ZXDH_PKT_TYPE_SYN, + ZXDH_PKT_TYPE_SYNACK, + ZXDH_PKT_TYPE_ACK, + ZXDH_PKT_TYPE_FIN, + ZXDH_PKT_TYPE_RST, +}; + +enum zxdh_cm_listener_state { + ZXDH_CM_LISTENER_PASSIVE_STATE = 1, + ZXDH_CM_LISTENER_ACTIVE_STATE = 2, + ZXDH_CM_LISTENER_EITHER_STATE = 3, +}; + +/* CM event codes */ +enum zxdh_cm_event_type { + ZXDH_CM_EVENT_UNKNOWN, + ZXDH_CM_EVENT_ESTABLISHED, + ZXDH_CM_EVENT_MPA_REQ, + ZXDH_CM_EVENT_MPA_CONNECT, + ZXDH_CM_EVENT_MPA_ACCEPT, + ZXDH_CM_EVENT_MPA_REJECT, + ZXDH_CM_EVENT_MPA_ESTABLISHED, + ZXDH_CM_EVENT_CONNECTED, + ZXDH_CM_EVENT_RESET, + ZXDH_CM_EVENT_ABORTED, +}; + +struct ietf_mpa_v1 { + u8 key[IETF_MPA_KEY_SIZE]; + u8 flags; + u8 rev; + __be16 priv_data_len; + u8 priv_data[]; +}; + +struct ietf_rtr_msg { + __be16 ctrl_ird; + __be16 ctrl_ord; +}; + +struct ietf_mpa_v2 { + u8 key[IETF_MPA_KEY_SIZE]; + u8 flags; + u8 rev; + __be16 priv_data_len; + struct ietf_rtr_msg rtr_msg; + u8 priv_data[]; +}; + +struct option_base { + u8 optionnum; + u8 len; +}; + +struct option_mss { + u8 optionnum; + u8 len; + __be16 mss; +}; + +struct option_windowscale { + u8 optionnum; + u8 len; + u8 shiftcount; +}; + +union all_known_options { + char eol; + struct option_base base; + struct option_mss mss; + struct option_windowscale windowscale; +}; + +struct zxdh_timer_entry { + struct list_head list; + unsigned long timetosend; /* jiffies */ + struct zxdh_puda_buf *sqbuf; + u32 type; + u32 retrycount; + u32 retranscount; + u32 context; + u32 send_retrans; + int close_when_complete; +}; + +/* CM context params */ +struct zxdh_cm_tcp_context { + u8 client; + u32 loc_seq_num; + u32 loc_ack_num; + u32 rem_ack_num; + u32 rcv_nxt; + u32 loc_id; + u32 rem_id; + u32 snd_wnd; + u32 max_snd_wnd; + u32 rcv_wnd; + u32 mss; + u8 snd_wscale; + u8 rcv_wscale; +}; + +struct zxdh_apbvt_entry { + struct hlist_node hlist; + u32 use_cnt; + u16 port; +}; + +struct zxdh_cm_listener { + struct list_head list; + struct iw_cm_id *cm_id; + struct zxdh_cm_core *cm_core; + struct zxdh_device *iwdev; + struct list_head child_listen_list; + struct zxdh_apbvt_entry *apbvt_entry; + enum zxdh_cm_listener_state listener_state; + refcount_t refcnt; + atomic_t pend_accepts_cnt; + u32 loc_addr[4]; + u32 reused_node; + int backlog; + u16 loc_port; + u16 vlan_id; + u8 loc_mac[ETH_ALEN]; + u8 user_pri; + u8 tos; + u8 qhash_set : 1; + u8 ipv4 : 1; +}; + +struct zxdh_kmem_info { + void *addr; + u32 size; +}; + +struct zxdh_mpa_priv_info { + const void *addr; + u32 size; +}; + +struct zxdh_cm_node { + struct zxdh_qp *iwqp; + struct zxdh_device *iwdev; + struct zxdh_sc_dev *dev; + struct zxdh_cm_tcp_context tcp_cntxt; + struct zxdh_cm_core *cm_core; + struct zxdh_timer_entry *send_entry; + struct zxdh_timer_entry *close_entry; + struct zxdh_cm_listener *listener; + struct list_head timer_entry; + struct list_head reset_entry; + struct list_head teardown_entry; + struct zxdh_apbvt_entry *apbvt_entry; + struct rcu_head rcu_head; + struct zxdh_mpa_priv_info pdata; + struct zxdh_sc_ah *ah; + struct ietf_mpa_v2 mpa_v2_frame; + struct zxdh_kmem_info mpa_hdr; + struct iw_cm_id *cm_id; + struct hlist_node list; + struct completion establish_comp; + spinlock_t retrans_list_lock; /* protect CM node rexmit updates*/ + atomic_t passive_state; + refcount_t refcnt; + enum zxdh_cm_node_state state; + enum send_rdma0 send_rdma0_op; + enum mpa_frame_ver mpa_frame_rev; + u32 loc_addr[4], rem_addr[4]; + u16 loc_port, rem_port; + int apbvt_set; + int accept_pend; + u16 vlan_id; + u16 ird_size; + u16 ord_size; + u16 mpav2_ird_ord; + u16 lsmm_size; + u8 pdata_buf[IETF_MAX_PRIV_DATA_LEN]; + u8 loc_mac[ETH_ALEN]; + u8 rem_mac[ETH_ALEN]; + u8 user_pri; + u8 tos; + u8 ack_rcvd : 1; + u8 qhash_set : 1; + u8 ipv4 : 1; + u8 snd_mark_en : 1; + u8 rcv_mark_en : 1; + u8 do_lpb : 1; + u8 accelerated : 1; +}; + +struct zxdh_cm_core { + struct zxdh_device *iwdev; + struct zxdh_sc_dev *dev; + struct list_head listen_list; + DECLARE_HASHTABLE(cm_hash_tbl, 8); + DECLARE_HASHTABLE(apbvt_hash_tbl, 8); + struct timer_list tcp_timer; + struct workqueue_struct *event_wq; + spinlock_t ht_lock; /* protect CM node (active side) list */ + spinlock_t listen_list_lock; /* protect listener list */ + spinlock_t apbvt_lock; /*serialize apbvt add/del entries*/ + u64 stats_nodes_created; + u64 stats_nodes_destroyed; + u64 stats_listen_created; + u64 stats_listen_destroyed; + u64 stats_listen_nodes_created; + u64 stats_listen_nodes_destroyed; + u64 stats_lpbs; + u64 stats_accepts; + u64 stats_rejects; + u64 stats_connect_errs; + u64 stats_passive_errs; + u64 stats_pkt_retrans; + u64 stats_backlog_drops; + struct zxdh_puda_buf *(*form_cm_frame)(struct zxdh_cm_node *cm_node, + struct zxdh_kmem_info *options, + struct zxdh_kmem_info *hdr, + struct zxdh_mpa_priv_info *pdata, + u8 flags); + int (*cm_create_ah)(struct zxdh_cm_node *cm_node, bool wait); + void (*cm_free_ah)(struct zxdh_cm_node *cm_node); +}; + +bool zxdh_ipv4_is_lpb(u32 loc_addr, u32 rem_addr); +bool zxdh_ipv6_is_lpb(u32 *loc_addr, u32 *rem_addr); +#endif /* ZXDH_CM_H */ diff --git a/drivers/infiniband/hw/zrdma/configfs.c b/drivers/infiniband/hw/zrdma/configfs.c new file mode 100644 index 000000000000..e150a2ee323a --- /dev/null +++ b/drivers/infiniband/hw/zrdma/configfs.c @@ -0,0 +1,1488 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ +#include +#include +#include +#include +#include "main.h" +#ifdef __OFED_4_8__ +#include +#include +#endif + +#if IS_ENABLED(CONFIG_CONFIGFS_FS) +enum zxdh_configfs_attr_type { + ZXDH_ATTR_IW_DCTCP, + ZXDH_ATTR_IW_TIMELY, + ZXDH_ATTR_IW_ECN, + ZXDH_ATTR_ROCE_TIMELY, + ZXDH_ATTR_ROCE_DCQCN, + ZXDH_ATTR_ROCE_DCTCP, + ZXDH_ATTR_ROCE_ENABLE, + ZXDH_ATTR_IW_OOO, + ZXDH_ATTR_ROCE_NO_ICRC, + ZXDH_ATTR_ENABLE_UP_MAP, +}; + +struct zxdh_vsi_grp { + struct config_group group; + struct zxdh_device *iwdev; +}; + +/** + * zxdh_find_device_by_name - find a vsi device given a name + * @name: name of iwdev + */ +static struct zxdh_device *zxdh_find_device_by_name(const char *name) +{ + struct zxdh_handler *hdl; + struct zxdh_device *iwdev; + unsigned long flags; + + spin_lock_irqsave(&zxdh_handler_lock, flags); + list_for_each_entry(hdl, &zxdh_handlers, list) { + iwdev = hdl->iwdev; + if (!strcmp(name, iwdev->ibdev.name)) { + spin_unlock_irqrestore(&zxdh_handler_lock, flags); + return iwdev; + } + } + spin_unlock_irqrestore(&zxdh_handler_lock, flags); + + return NULL; +} + +#ifdef __OFED_4_8__ +static int zxdh_configfs_set_vsi_attr(struct zxdh_vsi_grp *grp, const char *buf, + enum zxdh_configfs_attr_type attr_type) +{ +#else +/* + * zxdh_configfs_set_vsi_attr - set vsi configfs attribute + * @item_name: config item name + * @buf: buffer + * @zxdh_configfs_type_attr: vsi attribute type to set + */ +static int zxdh_configfs_set_vsi_attr(struct config_item *item, const char *buf, + enum zxdh_configfs_attr_type attr_type) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + struct zxdh_up_info up_map_info = {}; + bool enable; + int ret = 0; + + if (strtobool(buf, &enable)) { + ret = -EINVAL; + goto done; + } + + switch (attr_type) { + case ZXDH_ATTR_IW_DCTCP: + iwdev->iwarp_dctcp_en = enable; + iwdev->iwarp_ecn_en = !enable; + break; + case ZXDH_ATTR_IW_TIMELY: + iwdev->iwarp_timely_en = enable; + break; + case ZXDH_ATTR_IW_ECN: + iwdev->iwarp_ecn_en = enable; + break; + case ZXDH_ATTR_ENABLE_UP_MAP: + iwdev->up_map_en = enable; + if (enable) { + *((u64 *)up_map_info.map) = iwdev->up_up_map; + up_map_info.use_cnp_up_override = true; + up_map_info.cnp_up_override = iwdev->cnp_up_override; + } else { + *((u64 *)up_map_info.map) = ZXDH_DEFAULT_UP_UP_MAP; + up_map_info.use_cnp_up_override = false; + } + up_map_info.hmc_fcn_idx = iwdev->rf->sc_dev.hmc_fn_id; + zxdh_cqp_up_map_cmd(&iwdev->rf->sc_dev, ZXDH_OP_SET_UP_MAP, + &up_map_info); + break; + case ZXDH_ATTR_ROCE_NO_ICRC: + iwdev->roce_no_icrc_en = enable; + break; + case ZXDH_ATTR_ROCE_TIMELY: + iwdev->roce_timely_en = enable; + break; + case ZXDH_ATTR_ROCE_DCQCN: + iwdev->roce_dcqcn_en = enable; + break; + case ZXDH_ATTR_ROCE_DCTCP: + iwdev->roce_dctcp_en = enable; + break; + case ZXDH_ATTR_ROCE_ENABLE: + //rf->roce_en = enable; FIXME: Add when roce/iwarp in configFS + break; + case ZXDH_ATTR_IW_OOO: + iwdev->iw_ooo = enable; + iwdev->override_ooo = true; + break; + default: + ret = -EINVAL; + } + +done: + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_push_mode(struct zxdh_vsi_grp *grp, char *buf) +{ +#else +/** + * push_mode_show - Show the value of push_mode for device + * @item: config item + * @buf: buffer to write to + */ +static ssize_t push_mode_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->push_mode); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_push_mode(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +{ +#else +/** + * push_mode_store - Store value for push_mode + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t push_mode_store(struct config_item *item, const char *buf, + size_t count) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + bool enable; + + if (strtobool(buf, &enable)) + return -EINVAL; + + iwdev->push_mode = enable; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_roce_cwnd(struct zxdh_vsi_grp *grp, char *buf) +{ +#else +/** + * roce_cwnd_show - Show the value of RoCE cwnd + * @item: config item + * @buf: buffer to write to + */ +static ssize_t roce_cwnd_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->roce_cwnd); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_roce_cwnd(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +{ +#else +/** + * roce_cwnd_store - Store value for roce_cwnd + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t roce_cwnd_store(struct config_item *item, const char *buf, + size_t count) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + u32 rsrc_cwnd; + + if (kstrtou32(buf, 0, &rsrc_cwnd)) + return -EINVAL; + + if (!rsrc_cwnd) + return -EINVAL; + + iwdev->roce_cwnd = rsrc_cwnd; + iwdev->override_cwnd = true; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_roce_rd_fence_rate(struct zxdh_vsi_grp *grp, + char *buf) +{ +#else +/* + * roce_rd_fence_rate_show - Show RoCE read fence rate + * @item: config item + * @buf: buffer to write to + */ +static ssize_t roce_rd_fence_rate_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->rd_fence_rate); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_roce_rd_fence_rate(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +{ +#else +/** + * roce_rd_fence_rate_store - Store RoCE read fence rate + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t roce_rd_fence_rate_store(struct config_item *item, + const char *buf, size_t count) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + u32 rd_fence_rate; + + if (kstrtou32(buf, 0, &rd_fence_rate)) + return -EINVAL; + + if (rd_fence_rate > 256) + return -EINVAL; + + iwdev->rd_fence_rate = rd_fence_rate; + iwdev->override_rd_fence_rate = true; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_roce_ackcreds(struct zxdh_vsi_grp *grp, char *buf) +{ +#else +/** + * roce_ackcreds_show - Show the value of RoCE ack_creds + * @item: config item + * @buf: buffer to write to + */ +static ssize_t roce_ackcreds_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->roce_ackcreds); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_roce_ackcreds(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +{ +#else +/** + * roce_ackcreds_store - Store value for roce_ackcreds + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t roce_ackcreds_store(struct config_item *item, const char *buf, + size_t count) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + u32 rsrc_ackcreds; + + if (kstrtou32(buf, 0, &rsrc_ackcreds)) + return -EINVAL; + + if (!rsrc_ackcreds || rsrc_ackcreds > 0x1E) + return -EINVAL; + + iwdev->roce_ackcreds = rsrc_ackcreds; + iwdev->override_ackcreds = true; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_cnp_up_override(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +{ +#else +/** + * cnp_up_override_store - Store value for CNP override + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t cnp_up_override_store(struct config_item *item, const char *buf, + size_t count) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + u8 cnp_override; + + if (kstrtou8(buf, 0, &cnp_override)) + return -EINVAL; + + if (cnp_override > 0x3F) + return -EINVAL; + + iwdev->cnp_up_override = cnp_override; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_cnp_up_override(struct zxdh_vsi_grp *grp, + char *buf) +{ +#else +/** + * cnp_up_override_show - Show value of CNP UP override + * @item: config item + * @buf: buffer to write to + */ +static ssize_t cnp_up_override_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->cnp_up_override); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_ceq_itr(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +{ +#else +/** + * ceq_itr_store - Set interrupt Throttling(ITR) value + * @item: config item + * @buf: buffer to read from + * @count: size of buffer + */ +static ssize_t ceq_itr_store(struct config_item *item, const char *buf, + size_t count) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + u32 itr; + + if (kstrtou32(buf, 0, &itr)) + return -EINVAL; + +#define ZXDH_MAX_ITR 8160 + if (itr > 8160) + return -EINVAL; + + iwdev->rf->sc_dev.ceq_itr = itr; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_ceq_itr(struct zxdh_vsi_grp *grp, char *buf) +{ +#else +/** + * ceq_itr_show - Show interrupt Throttling(ITR) value + * @item: config item + * @buf: buffer to write to + */ +static ssize_t ceq_itr_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->rf->sc_dev.ceq_itr); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_ceq_intrl(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +{ +#else +/** + * ceq_intrl_store - Set the interrupt rate limit value + * @item: config item + * @buf: buffer to read from + * @count: size of buffer + */ +static ssize_t ceq_intrl_store(struct config_item *item, const char *buf, + size_t count) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + struct zxdh_msix_vector *msix_vec; + u32 intrl, interval = 0; + int i; + + if (kstrtou32(buf, 0, &intrl)) + return -EINVAL; + +#define ZXDH_MIN_INT_RATE_LIMIT 4237 +#define ZXDH_MAX_INT_RATE_LIMIT 250000 +#define ZXDH_USECS_PER_SEC 1000000 +#define ZXDH_USECS_PER_UNIT 4 +#define ZXDH_MAX_SUPPORTED_INT_RATE_INTERVAL 59 /* 59 * 4 = 236 us */ + + if (intrl && intrl < ZXDH_MIN_INT_RATE_LIMIT) + intrl = ZXDH_MIN_INT_RATE_LIMIT; + if (intrl > ZXDH_MAX_INT_RATE_LIMIT) + intrl = ZXDH_MAX_INT_RATE_LIMIT; + + iwdev->ceq_intrl = intrl; + if (intrl) { + interval = (ZXDH_USECS_PER_SEC / intrl) / ZXDH_USECS_PER_UNIT; + + ibdev_info( + &iwdev->ibdev, + "CEQ Interrupt rate Limit enabled with interval = %d\n", + interval); + } else { + ibdev_info(&iwdev->ibdev, + "CEQ Interrupt rate Limit disabled\n"); + } + msix_vec = &iwdev->rf->iw_msixtbl[2]; + for (i = 1; i < iwdev->rf->ceqs_count; i++, msix_vec++) + zxdh_set_irq_rate_limit(&iwdev->rf->sc_dev, msix_vec->idx, + interval); + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_ceq_intrl(struct zxdh_vsi_grp *grp, char *buf) +{ +#else +/** + * ceq_intrl_show - Show the interrupt rate limit value + * @item: config item + * @buf: buffer to write to + */ +static ssize_t ceq_intrl_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->ceq_intrl); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_up_up_map(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +{ +#else +/** + * up_up_map_store - Store value for UP-UP map + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t up_up_map_store(struct config_item *item, const char *buf, + size_t count) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + u64 up_map; + + if (kstrtou64(buf, 0, &up_map)) + return -EINVAL; + + iwdev->up_up_map = up_map; + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_up_up_map(struct zxdh_vsi_grp *grp, char *buf) +{ +#else +/** + * up_up_map_show - Show value of IP-UP map + * @item: config item + * @buf: buffer to write to + */ +static ssize_t up_up_map_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "0x%llx\n", iwdev->up_up_map); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_rcv_wnd(struct zxdh_vsi_grp *grp, char *buf) +{ +#else +/** + * rcv_wnd_show - Show the value of TCP receive window + * @item: config item + * @buf: buffer to write to + */ +static ssize_t rcv_wnd_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->rcv_wnd); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_rcv_wnd(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +{ +#else +/** + * rcv_wnd_store - Store value for rcv_wnd + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t rcv_wnd_store(struct config_item *item, const char *buf, + size_t count) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + u32 rsrc_rcv_wnd; + + if (kstrtou32(buf, 0, &rsrc_rcv_wnd)) + return -EINVAL; + + if (rsrc_rcv_wnd < 65536) + return -EINVAL; + + iwdev->rcv_wnd = rsrc_rcv_wnd; + iwdev->override_rcv_wnd = true; + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_rcv_wscale(struct zxdh_vsi_grp *grp, char *buf) +{ +#else +/** + * rcv_wscale_show - Show value of TCP receive window scale + * @item: config item + * @buf: buffer to write to + */ +static ssize_t rcv_wscale_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->rcv_wscale); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_rcv_wscale(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +{ +#else +/** + * rcv_wscale_store - Store value for recv_wscale + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t rcv_wscale_store(struct config_item *item, const char *buf, + size_t count) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + u8 rsrc_rcv_wscale; + + if (kstrtou8(buf, 0, &rsrc_rcv_wscale)) + return -EINVAL; + + if (rsrc_rcv_wscale > 16) + return -EINVAL; + + iwdev->rcv_wscale = rsrc_rcv_wscale; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_iw_dctcp_enable(struct zxdh_vsi_grp *grp, + char *buf) +{ +#else +/** + * iw_dctcp_enable_show - Show the value of dctcp_enable for vsi + * @item: config item + * @buf: buffer to write to + */ +static ssize_t iw_dctcp_enable_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->iwarp_dctcp_en); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_iw_dctcp_enable(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +#else +/** + * iw_dctcp_enable_store - Store value of dctcp_enable for vsi + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t iw_dctcp_enable_store(struct config_item *item, const char *buf, + size_t count) +#endif +{ + int ret; + +#ifdef __OFED_4_8__ + ret = zxdh_configfs_set_vsi_attr(grp, buf, ZXDH_ATTR_IW_DCTCP); +#else + ret = zxdh_configfs_set_vsi_attr(item, buf, ZXDH_ATTR_IW_DCTCP); +#endif + + if (ret) + return ret; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_iw_ecn_enable(struct zxdh_vsi_grp *grp, char *buf) +{ +#else +/** + * iw_ecn_enable_show - Show the value of ecn_enable for vsi + * @item: config item + * @buf: buffer to write to + */ +static ssize_t iw_ecn_enable_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->iwarp_ecn_en); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_iw_ecn_enable(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +#else +/** + * iw_ecn_enable_store - Store value of ecn_enable for vsi + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t iw_ecn_enable_store(struct config_item *item, const char *buf, + size_t count) +#endif +{ + int ret; + +#ifdef __OFED_4_8__ + ret = zxdh_configfs_set_vsi_attr(grp, buf, ZXDH_ATTR_IW_ECN); +#else + ret = zxdh_configfs_set_vsi_attr(item, buf, ZXDH_ATTR_IW_ECN); +#endif + + if (ret) + return ret; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_iw_timely_enable(struct zxdh_vsi_grp *grp, + char *buf) +{ +#else +/** + * iw_timely_enable_show - Show value of iwarp_timely_enable for vsi + * @item: config item + * @buf: buffer to write to + */ +static ssize_t iw_timely_enable_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->iwarp_timely_en); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_iw_timely_enable(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +#else +/** + * iw_timely_enable_store - Store value of iwarp_timely_enable for vsi + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t iw_timely_enable_store(struct config_item *item, const char *buf, + size_t count) +#endif +{ + int ret; + +#ifdef __OFED_4_8__ + ret = zxdh_configfs_set_vsi_attr(grp, buf, ZXDH_ATTR_IW_TIMELY); +#else + ret = zxdh_configfs_set_vsi_attr(item, buf, ZXDH_ATTR_IW_TIMELY); +#endif + + if (ret) + return ret; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_iw_rtomin(struct zxdh_vsi_grp *grp, char *buf) +{ +#else +/** + * iw_rtomin_show - Show the value of rtomin for vsi + * @item: config item + * @buf: buffer to write to + */ +static ssize_t iw_rtomin_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->iwarp_rtomin); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_iw_rtomin(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +{ +#else +/** + * iw_rtomin_store - Store value of iwarp_rtomin for vsi + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t iw_rtomin_store(struct config_item *item, const char *buf, + size_t count) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + u8 rtomin; + + if (kstrtou8(buf, 0, &rtomin)) + return -EINVAL; + + iwdev->iwarp_rtomin = rtomin; + iwdev->override_rtomin = true; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_roce_rtomin(struct zxdh_vsi_grp *grp, char *buf) +{ +#else +/** + * roce_rtomin_show - Show the value of roce_rtomin for vsi + * @item: config item + * @buf: buffer to write to + */ +static ssize_t roce_rtomin_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->roce_rtomin); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_roce_rtomin(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +{ +#else +/** + * roce_rtomin_store - Store value of roce_rtomin for vsi + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t roce_rtomin_store(struct config_item *item, const char *buf, + size_t count) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + u8 rtomin; + + if (kstrtou8(buf, 0, &rtomin)) + return -EINVAL; + + iwdev->roce_rtomin = rtomin; + iwdev->override_rtomin = true; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_roce_timely_enable(struct zxdh_vsi_grp *grp, + char *buf) +{ +#else +/** + * roce_timely_enable_show - Show value of roce_timely_enable for vsi + * @item: config item + * @buf: buffer to write to + */ +static ssize_t roce_timely_enable_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->roce_timely_en); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_roce_timely_enable(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +#else +/** + * roce_timely_enable_store - Store value of roce_timely_enable for vsi + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t roce_timely_enable_store(struct config_item *item, + const char *buf, size_t count) +#endif +{ + int ret; + +#ifdef __OFED_4_8__ + ret = zxdh_configfs_set_vsi_attr(grp, buf, ZXDH_ATTR_ROCE_TIMELY); +#else + ret = zxdh_configfs_set_vsi_attr(item, buf, ZXDH_ATTR_ROCE_TIMELY); +#endif + + if (ret) + return ret; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_roce_no_icrc_enable(struct zxdh_vsi_grp *grp, + char *buf) +{ +#else +/** + * roce_no_icrc_enable_show - Show value of no_icrc for vsi + * @item: config item + * @buf: buffer to write to + */ +static ssize_t roce_no_icrc_enable_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->roce_no_icrc_en); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_roce_no_icrc_enable(struct zxdh_vsi_grp *grp, + const char *buf, + size_t count) +#else +/** + * roce_no_icrc_enable_store - Store value of roce_no_icrc for vsi + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t roce_no_icrc_enable_store(struct config_item *item, + const char *buf, size_t count) +#endif +{ + int ret; + +#ifdef __OFED_4_8__ + ret = zxdh_configfs_set_vsi_attr(grp, buf, ZXDH_ATTR_ROCE_NO_ICRC); +#else + ret = zxdh_configfs_set_vsi_attr(item, buf, ZXDH_ATTR_ROCE_NO_ICRC); +#endif + + if (ret) + return ret; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_up_map_enable(struct zxdh_vsi_grp *grp, char *buf) +{ +#else +/** + * up_map_enable_show - Show value of up_map_enable for PF + * @item: config item + * @buf: buffer to write to + */ +static ssize_t up_map_enable_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->up_map_en); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_up_map_enable(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +#else +/** + * up_map_enable_store - Store value of up_map_enable for PF + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t up_map_enable_store(struct config_item *item, const char *buf, + size_t count) +#endif +{ + int ret; + +#ifdef __OFED_4_8__ + ret = zxdh_configfs_set_vsi_attr(grp, buf, ZXDH_ATTR_ENABLE_UP_MAP); +#else + ret = zxdh_configfs_set_vsi_attr(item, buf, ZXDH_ATTR_ENABLE_UP_MAP); +#endif + + if (ret) + return ret; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_iw_ooo_enable(struct zxdh_vsi_grp *grp, char *buf) +{ +#else +/** + * iw_ooo_enable_show - Show the value of iw_ooo_enable for vsi + * @item: config item + * @buf: buffer to write to + */ +static ssize_t iw_ooo_enable_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->iw_ooo); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_iw_ooo_enable(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +#else +/** + * iw_ooo_enable_store - Store value of iw_ooo_enable for vsi + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t iw_ooo_enable_store(struct config_item *item, const char *buf, + size_t count) +#endif +{ + int ret; + +#ifdef __OFED_4_8__ + ret = zxdh_configfs_set_vsi_attr(grp, buf, ZXDH_ATTR_IW_OOO); +#else + ret = zxdh_configfs_set_vsi_attr(item, buf, ZXDH_ATTR_IW_OOO); +#endif + + if (ret) + return ret; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_roce_dcqcn_enable(struct zxdh_vsi_grp *grp, + char *buf) +{ +#else +/** + * roce_dcqcn_enable_show - Show the value of roce_dcqcn_enable for vsi + * @item: config item + * @buf: buffer to write to + */ +static ssize_t roce_dcqcn_enable_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->roce_dcqcn_en); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_roce_dcqcn_enable(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +#else +/** + * roce_dcqcn_enable_store - Store value of roce_dcqcn_enable for vsi + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t roce_dcqcn_enable_store(struct config_item *item, + const char *buf, size_t count) +#endif +{ + int ret; + +#ifdef __OFED_4_8__ + ret = zxdh_configfs_set_vsi_attr(grp, buf, ZXDH_ATTR_ROCE_DCQCN); +#else + ret = zxdh_configfs_set_vsi_attr(item, buf, ZXDH_ATTR_ROCE_DCQCN); +#endif + + if (ret) + return ret; + + return count; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_show_attr_roce_dctcp_enable(struct zxdh_vsi_grp *grp, + char *buf) +{ +#else +/* roce_dctcp_enable_show - Show the value of roce_dctcp_enable for vsi + * @item: config item + * @buf: buffer to write to + */ +static ssize_t roce_dctcp_enable_show(struct config_item *item, char *buf) +{ + struct zxdh_vsi_grp *grp = + container_of(to_config_group(item), struct zxdh_vsi_grp, group); +#endif + struct zxdh_device *iwdev = grp->iwdev; + ssize_t ret; + + ret = sprintf(buf, "%d\n", iwdev->roce_dctcp_en); + + return ret; +} + +#ifdef __OFED_4_8__ +static ssize_t zxdh_store_attr_roce_dctcp_enable(struct zxdh_vsi_grp *grp, + const char *buf, size_t count) +#else +/** + * roce_dctcp_enable_store - Store value of roce_dctcp_enable for vsi + * @item: config item + * @buf: buf to read from + * @count: size of buf + */ +static ssize_t roce_dctcp_enable_store(struct config_item *item, + const char *buf, size_t count) +#endif +{ + int ret; + +#ifdef __OFED_4_8__ + ret = zxdh_configfs_set_vsi_attr(grp, buf, ZXDH_ATTR_ROCE_DCTCP); +#else + ret = zxdh_configfs_set_vsi_attr(item, buf, ZXDH_ATTR_ROCE_DCTCP); +#endif + + if (ret) + return ret; + + return count; +} + +#ifdef __OFED_4_8__ +CONFIGFS_EATTR_STRUCT(zrdma, zxdh_vsi_grp); +#define CFG_CONFIG_DESC_ITEM_ATTR(name) \ + static struct zxdh_attribute name = __CONFIGFS_EATTR( \ + name, S_IRUGO | S_IWUSR, zxdh_show_attr_##name, \ + zxdh_store_attr_##name) + +CFG_CONFIG_DESC_ITEM_ATTR(iw_dctcp_enable); +CFG_CONFIG_DESC_ITEM_ATTR(push_mode); +CFG_CONFIG_DESC_ITEM_ATTR(iw_timely_enable); +CFG_CONFIG_DESC_ITEM_ATTR(iw_ecn_enable); +CFG_CONFIG_DESC_ITEM_ATTR(iw_rtomin); +CFG_CONFIG_DESC_ITEM_ATTR(rcv_wnd); +CFG_CONFIG_DESC_ITEM_ATTR(rcv_wscale); +CFG_CONFIG_DESC_ITEM_ATTR(iw_ooo_enable); +CFG_CONFIG_DESC_ITEM_ATTR(cnp_up_override); +CFG_CONFIG_DESC_ITEM_ATTR(up_map_enable); +CFG_CONFIG_DESC_ITEM_ATTR(up_up_map); +CFG_CONFIG_DESC_ITEM_ATTR(ceq_itr); +CFG_CONFIG_DESC_ITEM_ATTR(ceq_intrl); +CFG_CONFIG_DESC_ITEM_ATTR(roce_cwnd); +CFG_CONFIG_DESC_ITEM_ATTR(roce_rd_fence_rate); +CFG_CONFIG_DESC_ITEM_ATTR(roce_ackcreds); +CFG_CONFIG_DESC_ITEM_ATTR(roce_timely_enable); +CFG_CONFIG_DESC_ITEM_ATTR(roce_no_icrc_enable); +CFG_CONFIG_DESC_ITEM_ATTR(roce_dcqcn_enable); +CFG_CONFIG_DESC_ITEM_ATTR(roce_dctcp_enable); +CFG_CONFIG_DESC_ITEM_ATTR(roce_rtomin); + +CONFIGFS_EATTR_OPS(zrdma, zxdh_vsi_grp, group); + +static struct configfs_attribute *zxdh_gen1_iw_vsi_attrs[] = { + &rcv_wnd.attr, + &rcv_wscale.attr, + NULL, +}; + +static struct configfs_attribute *zxdh_iw_vsi_attrs[] = { + &push_mode.attr, + &iw_dctcp_enable.attr, + &iw_timely_enable.attr, + &iw_ecn_enable.attr, + &iw_rtomin.attr, + &rcv_wnd.attr, + &rcv_wscale.attr, + &iw_ooo_enable.attr, + &cnp_up_override.attr, + &up_map_enable.attr, + &up_up_map.attr, + &ceq_itr.attr, + &ceq_intrl.attr, + NULL, +}; + +static struct configfs_attribute *zxdh_roce_vsi_attrs[] = { + &push_mode.attr, + &roce_cwnd.attr, + &roce_rd_fence_rate.attr, + &roce_ackcreds.attr, + &roce_timely_enable.attr, + &roce_no_icrc_enable.attr, + &roce_dcqcn_enable.attr, + &roce_dctcp_enable.attr, + &roce_rtomin.attr, + &cnp_up_override.attr, + &up_map_enable.attr, + &up_up_map.attr, + &ceq_itr.attr, + &ceq_intrl.attr, + NULL, +}; +#else /* OFED_4_8 */ +CONFIGFS_ATTR(, push_mode); +CONFIGFS_ATTR(, iw_dctcp_enable); +CONFIGFS_ATTR(, iw_timely_enable); +CONFIGFS_ATTR(, iw_ecn_enable); +CONFIGFS_ATTR(, iw_rtomin); +CONFIGFS_ATTR(, rcv_wnd); +CONFIGFS_ATTR(, rcv_wscale); +CONFIGFS_ATTR(, iw_ooo_enable); +CONFIGFS_ATTR(, up_map_enable); +CONFIGFS_ATTR(, cnp_up_override); +CONFIGFS_ATTR(, up_up_map); +CONFIGFS_ATTR(, ceq_itr); +CONFIGFS_ATTR(, ceq_intrl); +CONFIGFS_ATTR(, roce_timely_enable); +CONFIGFS_ATTR(, roce_no_icrc_enable); +CONFIGFS_ATTR(, roce_dcqcn_enable); +CONFIGFS_ATTR(, roce_dctcp_enable); +CONFIGFS_ATTR(, roce_cwnd); +CONFIGFS_ATTR(, roce_rd_fence_rate); +CONFIGFS_ATTR(, roce_ackcreds); +CONFIGFS_ATTR(, roce_rtomin); + +static struct configfs_attribute *zxdh_gen1_iw_vsi_attrs[] = { + &attr_rcv_wnd, + &attr_rcv_wscale, + NULL, +}; + +static struct configfs_attribute *zxdh_iw_vsi_attrs[] = { + &attr_push_mode, + &attr_iw_dctcp_enable, + &attr_iw_timely_enable, + &attr_iw_ecn_enable, + &attr_iw_rtomin, + &attr_rcv_wnd, + &attr_rcv_wscale, + &attr_iw_ooo_enable, + &attr_cnp_up_override, + &attr_up_map_enable, + &attr_up_up_map, + &attr_ceq_itr, + &attr_ceq_intrl, + NULL, +}; + +static struct configfs_attribute *zxdh_roce_vsi_attrs[] = { + &attr_push_mode, + &attr_roce_cwnd, + &attr_roce_rd_fence_rate, + &attr_roce_ackcreds, + &attr_roce_timely_enable, + &attr_roce_no_icrc_enable, + &attr_roce_dcqcn_enable, + &attr_roce_dctcp_enable, + &attr_roce_rtomin, + &attr_cnp_up_override, + &attr_up_map_enable, + &attr_up_up_map, + &attr_ceq_itr, + &attr_ceq_intrl, + NULL, +}; +#endif /* OFED_4_8 */ + +static void zxdh_release_vsi_grp(struct config_item *item) +{ + struct config_group *group = + container_of(item, struct config_group, cg_item); + struct zxdh_vsi_grp *vsi_grp = + container_of(group, struct zxdh_vsi_grp, group); + + kfree(vsi_grp); +} + +static struct configfs_item_operations zxdh_vsi_ops = { +#ifdef __OFED_4_8__ + .show_attribute = zxdh_attr_show, + .store_attribute = zxdh_attr_store, +#endif + .release = zxdh_release_vsi_grp +}; + +static struct config_item_type zxdh_iw_vsi_type = { + .ct_attrs = zxdh_iw_vsi_attrs, + .ct_item_ops = &zxdh_vsi_ops, + .ct_owner = THIS_MODULE, +}; + +static struct config_item_type zxdh_roce_vsi_type = { + .ct_attrs = zxdh_roce_vsi_attrs, + .ct_item_ops = &zxdh_vsi_ops, + .ct_owner = THIS_MODULE, +}; + +static struct config_item_type zxdh_gen1_iw_vsi_type = { + .ct_attrs = zxdh_gen1_iw_vsi_attrs, + .ct_item_ops = &zxdh_vsi_ops, + .ct_owner = THIS_MODULE, +}; + +/** + * zxdh_vsi_make_group - Creation of subsystem groups + * @group: config group + * @name: name of the group + */ +static struct config_group *zxdh_vsi_make_group(struct config_group *group, + const char *name) +{ + struct zxdh_vsi_grp *vsi_grp; + struct zxdh_device *iwdev; + u8 hw_ver; + + iwdev = zxdh_find_device_by_name(name); + if (!iwdev) + return ERR_PTR(-ENODEV); + + hw_ver = iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev; + + vsi_grp = kzalloc(sizeof(*vsi_grp), GFP_KERNEL); + if (!vsi_grp) + return ERR_PTR(-ENOMEM); + + vsi_grp->iwdev = iwdev; + + config_group_init(&vsi_grp->group); + + if (hw_ver == ZXDH_GEN_1) { + config_group_init_type_name(&vsi_grp->group, name, + &zxdh_gen1_iw_vsi_type); + } else { + if (iwdev->rf->protocol_used == ZXDH_ROCE_PROTOCOL_ONLY) + config_group_init_type_name(&vsi_grp->group, name, + &zxdh_roce_vsi_type); + else + config_group_init_type_name(&vsi_grp->group, name, + &zxdh_iw_vsi_type); + } + + return &vsi_grp->group; +} + +static struct configfs_group_operations zxdh_vsi_group_ops = { + .make_group = zxdh_vsi_make_group, +}; + +static struct config_item_type zxdh_subsys_type = { + .ct_group_ops = &zxdh_vsi_group_ops, + .ct_owner = THIS_MODULE, +}; + +static struct configfs_subsystem cfs_subsys = { + .su_group = { + .cg_item = { + .ci_namebuf = "zrdma", + .ci_type = &zxdh_subsys_type, + }, + }, +}; + +int zxdh_configfs_init(void) +{ + config_group_init(&cfs_subsys.su_group); + mutex_init(&cfs_subsys.su_mutex); + return configfs_register_subsystem(&cfs_subsys); +} + +void zxdh_configfs_exit(void) +{ + configfs_unregister_subsystem(&cfs_subsys); +} +#endif /* CONFIG_CONFIGFS_FS */ diff --git a/drivers/infiniband/hw/zrdma/ctrl.c b/drivers/infiniband/hw/zrdma/ctrl.c new file mode 100644 index 000000000000..af2cc40de09f --- /dev/null +++ b/drivers/infiniband/hw/zrdma/ctrl.c @@ -0,0 +1,5336 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ +#include "osdep.h" +#include "status.h" +#include "hmc.h" +#include "defs.h" +#include "type.h" +#include "ws.h" +#include "protos.h" +#include "vf.h" +#include "virtchnl.h" +#include "icrdma_hw.h" +#include "main.h" +#include "srq.h" + +/** + * zxdh_get_qp_from_list - get next qp from a list + * @head: Listhead of qp's + * @qp: current qp + */ +struct zxdh_sc_qp *zxdh_get_qp_from_list(struct list_head *head, + struct zxdh_sc_qp *qp) +{ + struct list_head *lastentry; + struct list_head *entry = NULL; + + if (list_empty(head)) + return NULL; + + if (!qp) { + entry = head->next; + } else { + lastentry = &qp->list; + entry = lastentry->next; + if (entry == head) + return NULL; + } + + return container_of(entry, struct zxdh_sc_qp, list); +} + +#ifdef Z_CONFIG_RDMA_VSI +/** + * zxdh_qp_rem_qos - remove qp from qos lists during destroy qp + * @qp: qp to be removed from qos + */ +void zxdh_qp_rem_qos(struct zxdh_sc_qp *qp) +{ + struct zxdh_sc_vsi *vsi = qp->vsi; + + mutex_lock(&vsi->qos[qp->user_pri].qos_mutex); + if (qp->on_qoslist) { + qp->on_qoslist = false; + list_del(&qp->list); + } + mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex); +} + +/** + * zxdh_qp_add_qos - called during setctx for qp to be added to qos + * @qp: qp to be added to qos + */ +void zxdh_qp_add_qos(struct zxdh_sc_qp *qp) +{ + struct zxdh_sc_vsi *vsi = qp->vsi; + + mutex_lock(&vsi->qos[qp->user_pri].qos_mutex); + if (!qp->on_qoslist) { + list_add(&qp->list, &vsi->qos[qp->user_pri].qplist); + qp->on_qoslist = true; + qp->qs_handle = vsi->qos[qp->user_pri].qs_handle; + } + mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex); +} +#else +/** + * zxdh_qp_rem_qos - remove qp from qos lists during destroy qp + * @qp: qp to be removed from qos + */ +void zxdh_qp_rem_qos(struct zxdh_sc_qp *qp) +{ + struct zxdh_sc_dev *dev = qp->dev; + + mutex_lock(&dev->qos[qp->user_pri].qos_mutex); + if (qp->on_qoslist) { + qp->on_qoslist = false; + list_del(&qp->list); + } + mutex_unlock(&dev->qos[qp->user_pri].qos_mutex); +} + +/** + * zxdh_qp_add_qos - called during setctx for qp to be added to qos + * @qp: qp to be added to qos + */ +void zxdh_qp_add_qos(struct zxdh_sc_qp *qp) +{ + struct zxdh_sc_dev *dev = qp->dev; + + mutex_lock(&dev->qos[qp->user_pri].qos_mutex); + if (!qp->on_qoslist) { + list_add(&qp->list, &dev->qos[qp->user_pri].qplist); + qp->on_qoslist = true; + qp->qs_handle = dev->qos[qp->user_pri].qs_handle; + } + mutex_unlock(&dev->qos[qp->user_pri].qos_mutex); +} +#endif + +/** + * zxdh_sc_pd_init - initialize sc pd struct + * @dev: sc device struct + * @pd: sc pd ptr + * @pd_id: pd_id for allocated pd + * @abi_ver: User/Kernel ABI version + */ +void zxdh_sc_pd_init(struct zxdh_sc_dev *dev, struct zxdh_sc_pd *pd, u32 pd_id, + int abi_ver) +{ + pd->pd_id = pd_id; + pd->abi_ver = abi_ver; + pd->dev = dev; +} + +/** + * zxdh_sc_add_arp_cache_entry - cqp wqe add arp cache entry + * @cqp: struct for cqp hw + * @info: arp entry information + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static int +zxdh_sc_add_arp_cache_entry(struct zxdh_sc_cqp *cqp, + struct zxdh_add_arp_cache_entry_info *info, + u64 scratch, bool post_sq) +{ + __le64 *wqe; + u64 temp, hdr; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + set_64bit_val(wqe, 8, info->reach_max); + + temp = info->mac_addr[5] | LS_64_1(info->mac_addr[4], 8) | + LS_64_1(info->mac_addr[3], 16) | LS_64_1(info->mac_addr[2], 24) | + LS_64_1(info->mac_addr[1], 32) | LS_64_1(info->mac_addr[0], 40); + set_64bit_val(wqe, 16, temp); + + hdr = info->arp_index | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_MANAGE_ARP) | + FIELD_PREP(ZXDH_CQPSQ_MAT_PERMANENT, info->permanent) | + FIELD_PREP(ZXDH_CQPSQ_MAT_ENTRYVALID, true) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, hdr); + + print_hex_dump_debug("WQE: ARP_CACHE_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, + 8, wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_del_arp_cache_entry - dele arp cache entry + * @cqp: struct for cqp hw + * @scratch: u64 saved to be used during cqp completion + * @arp_index: arp index to delete arp entry + * @post_sq: flag for cqp db to ring + */ +static int zxdh_sc_del_arp_cache_entry(struct zxdh_sc_cqp *cqp, u64 scratch, + u16 arp_index, bool post_sq) +{ + __le64 *wqe; + u64 hdr; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = arp_index | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_MANAGE_ARP) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, hdr); + + print_hex_dump_debug("WQE: ARP_CACHE_DEL_ENTRY WQE", DUMP_PREFIX_OFFSET, + 16, 8, wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_manage_apbvt_entry - for adding and deleting apbvt entries + * @cqp: struct for cqp hw + * @info: info for apbvt entry to add or delete + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static int zxdh_sc_manage_apbvt_entry(struct zxdh_sc_cqp *cqp, + struct zxdh_apbvt_info *info, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + u64 hdr; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 16, info->port); + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_MANAGE_APBVT) | + FIELD_PREP(ZXDH_CQPSQ_MAPT_ADDPORT, info->add) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, hdr); + + print_hex_dump_debug("WQE: MANAGE_APBVT WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_manage_qhash_table_entry - manage quad hash entries + * @cqp: struct for cqp hw + * @info: info for quad hash to manage + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + * + * This is called before connection establishment is started. + * For passive connections, when listener is created, it will + * call with entry type of ZXDH_QHASH_TYPE_TCP_SYN with local + * ip address and tcp port. When SYN is received (passive + * connections) or sent (active connections), this routine is + * called with entry type of ZXDH_QHASH_TYPE_TCP_ESTABLISHED + * and quad is passed in info. + * + * When iwarp connection is done and its state moves to RTS, the + * quad hash entry in the hardware will point to iwarp's qp + * number and requires no calls from the driver. + */ +static int zxdh_sc_manage_qhash_table_entry(struct zxdh_sc_cqp *cqp, + struct zxdh_qhash_table_info *info, + u64 scratch, bool post_sq) +{ + __le64 *wqe; + u64 qw1 = 0; + u64 qw2 = 0; + u64 temp; + struct zxdh_sc_vsi *vsi = info->vsi; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + temp = info->mac_addr[5] | LS_64_1(info->mac_addr[4], 8) | + LS_64_1(info->mac_addr[3], 16) | LS_64_1(info->mac_addr[2], 24) | + LS_64_1(info->mac_addr[1], 32) | LS_64_1(info->mac_addr[0], 40); + set_64bit_val(wqe, 0, temp); + + qw1 = FIELD_PREP(ZXDH_CQPSQ_QHASH_QPN, info->qp_num) | + FIELD_PREP(ZXDH_CQPSQ_QHASH_DEST_PORT, info->dest_port); + if (info->ipv4_valid) { + set_64bit_val(wqe, 48, + FIELD_PREP(ZXDH_CQPSQ_QHASH_ADDR3, + info->dest_ip[0])); + } else { + set_64bit_val(wqe, 56, + FIELD_PREP(ZXDH_CQPSQ_QHASH_ADDR0, + info->dest_ip[0]) | + FIELD_PREP(ZXDH_CQPSQ_QHASH_ADDR1, + info->dest_ip[1])); + + set_64bit_val(wqe, 48, + FIELD_PREP(ZXDH_CQPSQ_QHASH_ADDR2, + info->dest_ip[2]) | + FIELD_PREP(ZXDH_CQPSQ_QHASH_ADDR3, + info->dest_ip[3])); + } + qw2 = FIELD_PREP(ZXDH_CQPSQ_QHASH_QS_HANDLE, + vsi->qos[info->user_pri].qs_handle); + if (info->vlan_valid) + qw2 |= FIELD_PREP(ZXDH_CQPSQ_QHASH_VLANID, info->vlan_id); + set_64bit_val(wqe, 16, qw2); + if (info->entry_type == ZXDH_QHASH_TYPE_TCP_ESTABLISHED) { + qw1 |= FIELD_PREP(ZXDH_CQPSQ_QHASH_SRC_PORT, info->src_port); + if (!info->ipv4_valid) { + set_64bit_val(wqe, 40, + FIELD_PREP(ZXDH_CQPSQ_QHASH_ADDR0, + info->src_ip[0]) | + FIELD_PREP(ZXDH_CQPSQ_QHASH_ADDR1, + info->src_ip[1])); + set_64bit_val(wqe, 32, + FIELD_PREP(ZXDH_CQPSQ_QHASH_ADDR2, + info->src_ip[2]) | + FIELD_PREP(ZXDH_CQPSQ_QHASH_ADDR3, + info->src_ip[3])); + } else { + set_64bit_val(wqe, 32, + FIELD_PREP(ZXDH_CQPSQ_QHASH_ADDR3, + info->src_ip[0])); + } + } + + set_64bit_val(wqe, 8, qw1); + temp = FIELD_PREP(ZXDH_CQPSQ_QHASH_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_QHASH_OPCODE, + ZXDH_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY) | + FIELD_PREP(ZXDH_CQPSQ_QHASH_MANAGE, info->manage) | + FIELD_PREP(ZXDH_CQPSQ_QHASH_IPV4VALID, info->ipv4_valid) | + FIELD_PREP(ZXDH_CQPSQ_QHASH_VLANVALID, info->vlan_valid) | + FIELD_PREP(ZXDH_CQPSQ_QHASH_ENTRYTYPE, info->entry_type); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, temp); + + print_hex_dump_debug("WQE: MANAGE_QHASH WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_qp_init - initialize qp + * @qp: sc qp + * @info: initialization qp info + */ +int zxdh_sc_qp_init(struct zxdh_sc_qp *qp, struct zxdh_qp_init_info *info) +{ + int ret_code; + u32 pble_obj_cnt; + u16 wqe_size; + struct zxdh_qp *iwqp = container_of(qp, struct zxdh_qp, sc_qp); + + if (iwqp->is_srq == false) { + if (info->qp_uk_init_info.max_sq_frag_cnt > + info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags || + info->qp_uk_init_info.max_rq_frag_cnt > + info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags) + return -EINVAL; + } else { + if (info->qp_uk_init_info.max_sq_frag_cnt > + info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags) + return -EINVAL; + } + + qp->dev = info->dev; +#ifdef Z_CONFIG_RDMA_VSI + qp->vsi = info->vsi; +#endif + qp->sq_pa = info->sq_pa; + if (iwqp->is_srq == false) + qp->rq_pa = info->rq_pa; + qp->hw_host_ctx_pa = info->host_ctx_pa; + qp->shadow_area_pa = info->shadow_area_pa; + qp->pd = info->pd; + qp->hw_host_ctx = info->host_ctx; + info->qp_uk_init_info.wqe_alloc_db = qp->pd->dev->wqe_alloc_db; + qp->is_nvmeof_ioq = false; + qp->is_nvmeof_tgt = false; + qp->nvmeof_qid = 0xffff; + qp->entry_err_cnt = 0; + qp->retry_err_cnt = 0; + qp->aeq_entry_err_last_psn = 0; + qp->aeq_retry_err_last_psn = 0; + + ret_code = zxdh_uk_qp_init(&qp->qp_uk, &info->qp_uk_init_info); + if (ret_code) + return ret_code; + + qp->virtual_map = info->virtual_map; + pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE].cnt; + + if (iwqp->is_srq == false) { + if ((info->virtual_map && info->sq_pa >= pble_obj_cnt) || + (info->virtual_map && info->rq_pa >= pble_obj_cnt)) + return -EINVAL; + } else { + if ((info->virtual_map && info->sq_pa >= pble_obj_cnt)) + return -EINVAL; + } + + qp->hw_sq_size = zxdh_get_encoded_wqe_size(qp->qp_uk.sq_ring.size, + ZXDH_QUEUE_TYPE_SQ_RQ); + + ret_code = zxdh_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt, + &wqe_size); + if (ret_code) + return ret_code; + + if (iwqp->is_srq == false) { + qp->hw_rq_size = zxdh_get_encoded_wqe_size( + qp->qp_uk.rq_size, ZXDH_QUEUE_TYPE_SQ_RQ); + } + + return 0; +} + +/** + * zxdh_sc_qp_create - create qp + * @qp: sc qp + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +int zxdh_sc_qp_create(struct zxdh_sc_qp *qp, u64 scratch, bool post_sq) +{ + struct zxdh_sc_cqp *cqp; + __le64 *wqe; + u64 hdr; + + cqp = qp->dev->cqp; + + if (qp->qp_ctx_num < qp->dev->base_qpn || + qp->qp_ctx_num > + (qp->dev->base_qpn + + cqp->dev->hmc_info->hmc_obj[ZXDH_HMC_IW_QP].max_cnt - 1)) + return -EINVAL; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, qp->hw_host_ctx_pa); + set_64bit_val(wqe, 16, RDMAQPC_MASK_INIT); + set_64bit_val(wqe, 24, RDMAQPC_MASK_INIT); + set_64bit_val(wqe, 32, RDMAQPC_MASK_INIT); + set_64bit_val(wqe, 40, RDMAQPC_MASK_INIT); + hdr = FIELD_PREP(ZXDH_CQPSQ_QP_ID, qp->qp_uk.qp_id) | + FIELD_PREP(ZXDH_CQPSQ_QP_CONTEXT_ID, qp->qp_ctx_num) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_CREATE_QP); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: QP_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_qp_modify - modify qp cqp wqe + * @qp: sc qp + * @info: modify qp info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +int zxdh_sc_qp_modify(struct zxdh_sc_qp *qp, struct zxdh_modify_qp_info *info, + u64 scratch, bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp; + u64 hdr; + + cqp = qp->dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, qp->hw_host_ctx_pa); + set_64bit_val(wqe, 16, info->qpc_tx_mask_low); + set_64bit_val(wqe, 24, info->qpc_tx_mask_high); + set_64bit_val(wqe, 32, info->qpc_rx_mask_low); + set_64bit_val(wqe, 40, info->qpc_rx_mask_high); + hdr = FIELD_PREP(ZXDH_CQPSQ_QP_ID, qp->qp_uk.qp_id) | + FIELD_PREP(ZXDH_CQPSQ_QP_CONTEXT_ID, qp->qp_ctx_num) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_MODIFY_QP); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: QP_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_qp_destroy - cqp destroy qp + * @qp: sc qp + * @scratch: u64 saved to be used during cqp completion + * @ignore_mw_bnd: memory window bind flag + * @post_sq: flag for cqp db to ring + */ +int zxdh_sc_qp_destroy(struct zxdh_sc_qp *qp, u64 scratch, bool ignore_mw_bnd, + bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp; + u64 hdr; + + cqp = qp->dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, qp->hw_host_ctx_pa); + set_64bit_val(wqe, 16, RDMAQPC_TX_MASKL_DESTROY); + set_64bit_val(wqe, 24, RDMAQPC_TX_MASKH_QP_STATE); + set_64bit_val(wqe, 32, RDMAQPC_RX_MASKL_DESTROY); + set_64bit_val(wqe, 40, RDMAQPC_RX_MASKH_DEST_IP); + hdr = FIELD_PREP(ZXDH_CQPSQ_QP_ID, qp->qp_uk.qp_id) | + FIELD_PREP(ZXDH_CQPSQ_QP_CONTEXT_ID, qp->qp_ctx_num) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_DESTROY_QP); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: QP_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_get_encoded_ird_size - + * @ird_size: IRD size + */ +static u8 zxdh_sc_get_encoded_ird_size(u16 ird_size) +{ + u8 encoded_size = 0; + + while (ird_size >>= 1) + encoded_size++; + + return encoded_size; +} + +/** + * zxdh_sc_qp_resetctx_roce - set qp's context + * @qp: sc qp + * @qp_ctx: context ptr + */ +void zxdh_sc_qp_resetctx_roce(struct zxdh_sc_qp *qp, __le64 *qp_ctx) +{ + memset(qp_ctx, 0, ZXDH_QP_CTX_SIZE); + set_64bit_val(qp_ctx, 32, + FIELD_PREP(RDMAQPC_TX_HW_SQ_TAIL_HIGH, + RS_64_1(IRDMAQPC_HW_SQ_TAIL_INIT, 11))); + set_64bit_val(qp_ctx, 280, FIELD_PREP(RDMAQPC_RX_IRD_RXNUM, 511)); + set_64bit_val(qp_ctx, 384, + FIELD_PREP(RDMAQPC_RX_VHCA_ID, qp->dev->vhca_id)); +} + +u16 zxdh_get_rc_gqp_id(u16 qp_8k_index, u16 vhca_gqp_start, u16 vhca_gqp_cnt) +{ + u16 gqp_offset = 0; + + gqp_offset = qp_8k_index % vhca_gqp_cnt; + + return (vhca_gqp_start + gqp_offset); +} + +void zxdh_sc_qp_modify_ctx_udp_sport(struct zxdh_sc_qp *qp, __le64 *qp_ctx, + struct zxdh_qp_host_ctx_info *info) +{ + struct zxdh_udp_offload_info *udp; + u64 hdr; + + udp = info->udp_info; + + hdr = FIELD_PREP(RDMAQPC_TX_SRC_PORTNUM, udp->src_port); + set_64bit_val(qp_ctx, 96, hdr); + dma_wmb(); + set_64bit_val(qp_ctx, 368, + FIELD_PREP(RDMAQPC_RX_SRC_PORTNUM, udp->src_port)); + dma_wmb(); + print_hex_dump_debug("WQE: QP_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16, 8, + qp_ctx, ZXDH_QP_CTX_SIZE, false); +} + +void zxdh_sc_qp_modify_private_cmd_qpc(struct zxdh_sc_qp *qp, __le64 *qp_ctx, + struct zxdh_modify_qpc_item *info) +{ + u64 hdr; + + hdr = FIELD_PREP(RDMAQPC_TX_CUR_RETRY_CNT, info->cur_retry_count) | + FIELD_PREP(RDMAQPC_TX_READ_RETRY_FLAG, info->read_retry_flag) | + FIELD_PREP(RDMAQPC_TX_LAST_ACK_PSN, info->tx_last_ack_psn); + set_64bit_val(qp_ctx, 0, hdr); + dma_wmb(); + set_64bit_val(qp_ctx, 8, + FIELD_PREP(RDMAQPC_TX_RNR_RETRY_FLAG, + info->rnr_retry_flag) | + FIELD_PREP(RDMAQPC_TX_RNR_RETRY_TIME_L, + info->rnr_retry_time_l) | + FIELD_PREP(RDMAQPC_TX_RNR_RETRY_THRESHOLD, + info->rnr_retry_threshold)); + set_64bit_val(qp_ctx, 16, + FIELD_PREP(RDMAQPC_TX_RNR_RETRY_TIME_H, + info->rnr_retry_time_h)); + dma_wmb(); + + set_64bit_val(qp_ctx, 32, + FIELD_PREP(RDMAQPC_TX_RETRY_FLAG, info->retry_flag)); + dma_wmb(); + + set_64bit_val(qp_ctx, 40, + FIELD_PREP(RDMAQPC_TX_ERR_FLAG, info->err_flag) | + FIELD_PREP(RDMAQPC_TX_ACK_ERR_FLAG, + info->ack_err_flag) | + FIELD_PREP(RDMAQPC_TX_LAST_ACK_WQE_OFFSET, + info->last_ack_wqe_offset) | + FIELD_PREP(RDMAQPC_TX_HW_SQ_TAIL_UNA, + info->hw_sq_tail_una) | + FIELD_PREP(RDMAQPC_TX_RDWQE_PYLD_LENGTH_L, + info->rdwqe_pyld_length_l) | + FIELD_PREP(RDMAQPC_TX_RDWQE_PYLD_LENGTH_H, + info->rdwqe_pyld_length_h)); + + dma_wmb(); + + set_64bit_val(qp_ctx, 48, + FIELD_PREP(RDMAQPC_TX_PACKAGE_ERR_FLAG, + info->package_err_flag) | + FIELD_PREP(RDMAQPC_TX_RECV_READ_FLAG, + info->recv_read_flag) | + FIELD_PREP(RDMAQPC_TX_RECV_ERR_FLAG, + info->recv_err_flag) | + FIELD_PREP(RDMAQPC_TX_RECV_RD_MSG_LOSS_ERR_FLAG, + info->recv_rd_msg_loss_err_flag) | + FIELD_PREP(RDMAQPC_TX_RECV_RD_MSG_LOSS_ERR_CNT, + info->recv_rd_msg_loss_err_cnt) | + FIELD_PREP(RDMAQPC_TX_RD_MSG_LOSS_ERR_FLAG, + info->rd_msg_loss_err_flag) | + FIELD_PREP(RDMAQPC_TX_PKTCHK_RD_MSG_LOSS_ERR_CNT, + info->pktchk_rd_msg_loss_err_cnt)); + dma_wmb(); + + set_64bit_val(qp_ctx, 56, + FIELD_PREP(RDMAQPC_TX_RETRY_CQE_SQ_OPCODE_FLAG, + info->retry_cqe_sq_opcode)); + dma_wmb(); + + print_hex_dump_debug("WQE: QP_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16, 8, + qp_ctx, ZXDH_QP_CTX_SIZE, false); +} + +/** + * zxdh_sc_qp_setctx_roce - set qp's context + * @qp: sc qp + * @qp_ctx: context ptr + * @info: ctx info + */ +void zxdh_sc_qp_setctx_roce(struct zxdh_sc_qp *qp, __le64 *qp_ctx, + struct zxdh_qp_host_ctx_info *info) +{ + struct zxdh_roce_offload_info *roce_info; + struct zxdh_udp_offload_info *udp; + u64 mac; + u64 dmac; + u64 hdr; + u8 service_type; + u16 header_len; + u16 gqp_id; + + roce_info = info->roce_info; + udp = info->udp_info; + + if (roce_info->dcqcn_en || roce_info->dctcp_en) { + udp->tos &= ~ECN_CODE_PT_MASK; + udp->tos |= ECN_CODE_PT_VAL; + } + + mac = LS_64_1(roce_info->mac_addr[5], 0) | + LS_64_1(roce_info->mac_addr[4], 8) | + LS_64_1(roce_info->mac_addr[3], 16) | + LS_64_1(roce_info->mac_addr[2], 24) | + LS_64_1(roce_info->mac_addr[1], 32) | + LS_64_1(roce_info->mac_addr[0], 40); + + dmac = LS_64_1(udp->dest_mac[5], 0) | LS_64_1(udp->dest_mac[4], 8) | + LS_64_1(udp->dest_mac[3], 16) | LS_64_1(udp->dest_mac[2], 24) | + LS_64_1(udp->dest_mac[1], 32) | LS_64_1(udp->dest_mac[0], 40); + + qp->user_pri = info->user_pri; + if (qp->qp_uk.qp_type == ZXDH_QP_TYPE_ROCE_RC) { + service_type = ZXDH_QP_SERVICE_TYPE_RC; + gqp_id = zxdh_get_rc_gqp_id(qp->qp_uk.qp_8k_index, + qp->dev->vhca_gqp_start, + qp->dev->vhca_gqp_cnt); + } else { + service_type = ZXDH_QP_SERVICE_TYPE_UD; + gqp_id = qp->dev->vhca_ud_gqp; + } + + if (qp->dev->chip_version < 2) { + if (udp->ipv4) + header_len = udp->insert_vlan_tag ? 46 : 42; + else + header_len = udp->insert_vlan_tag ? 66 : 62; + } else { + qp->is_credit_en = 1; + if (udp->ipv4) + header_len = udp->insert_vlan_tag ? 70 : 66; + else + header_len = udp->insert_vlan_tag ? 90 : 86; + } + + roce_info->is_qp1 = qp->qp_uk.qp_id == 1 ? 1 : 0; + + set_64bit_val(qp_ctx, 0, + FIELD_PREP(RDMAQPC_TX_RETRY_CNT, udp->rexmit_thresh) | + FIELD_PREP(RDMAQPC_TX_CUR_RETRY_CNT, + udp->rexmit_thresh) | + FIELD_PREP(RDMAQPC_TX_LAST_ACK_PSN, + udp->psn_max) | + FIELD_PREP(RDMAQPC_TX_LSN_LOW1, udp->lsn)); + set_64bit_val(qp_ctx, 8, + FIELD_PREP(RDMAQPC_TX_LSN_HIGH23, RS_64_1(udp->lsn, 1)) | + FIELD_PREP(RDMAQPC_TX_ACKCREDITS, + (info->use_srq || qp->is_nvmeof_ioq || !qp->is_credit_en) ? 0x1f : roce_info->ack_credits) | + FIELD_PREP(RDMAQPC_TX_RNR_RETRY_THRESHOLD, + udp->rnr_nak_thresh)); + set_64bit_val(qp_ctx, 16, FIELD_PREP(RDMAQPC_TX_SSN, 1)); + set_64bit_val(qp_ctx, 24, + FIELD_PREP(RDMAQPC_TX_PSN_MAX, udp->psn_max) | + FIELD_PREP(RDMAQPC_TX_PSN_NEXT, udp->psn_nxt)); + set_64bit_val(qp_ctx, 32, + FIELD_PREP(RDMAQPC_TX_HW_SQ_TAIL_HIGH, + RS_64_1(IRDMAQPC_HW_SQ_TAIL_INIT, 11)) | + FIELD_PREP(RDMAQPC_TX_LOCAL_ACK_TIMEOUT, + udp->timeout)); + set_64bit_val(qp_ctx, 40, + FIELD_PREP(RDMAQPC_TX_HW_SQ_TAIL_UNA, + IRDMAQPC_HW_SQ_TAIL_INIT)); + set_64bit_val(qp_ctx, 48, 0); + set_64bit_val(qp_ctx, 56, + FIELD_PREP(RDMAQPC_TX_RNR_RETRY_CNT, + udp->rnr_nak_thresh) | + FIELD_PREP(RDMAQPC_TX_RNR_CUR_RETRY_CNT, + udp->rnr_nak_thresh)); + hdr = FIELD_PREP(RDMAQPC_TX_SERVICE_TYPE, service_type) | + FIELD_PREP(RDMAQPC_TX_SQ_VMAP, qp->virtual_map) | + FIELD_PREP(RDMAQPC_TX_SQ_LPBL_SIZE, qp->virtual_map ? 1 : 0) | + FIELD_PREP(RDMAQPC_TX_IS_QP1, roce_info->is_qp1) | + FIELD_PREP(RDMAQPC_TX_IPV4, udp->ipv4) | + FIELD_PREP(RDMAQPC_TX_FAST_REG_EN, roce_info->fast_reg_en) | + FIELD_PREP(RDMAQPC_TX_BIND_EN, roce_info->bind_en) | + FIELD_PREP(RDMAQPC_TX_INSERT_VLANTAG, udp->insert_vlan_tag) | + FIELD_PREP(RDMAQPC_TX_VLANTAG, udp->vlan_tag) | + FIELD_PREP(RDMAQPC_TX_PD_INDEX, roce_info->pd_id) | + FIELD_PREP(RDMAQPC_TX_RSV_LKEY_EN, roce_info->priv_mode_en) | + FIELD_PREP(RDMAQPC_TX_ECN_EN, roce_info->ecn_en); + dma_wmb(); + + set_64bit_val(qp_ctx, 64, hdr); + set_64bit_val(qp_ctx, 72, qp->sq_pa); + set_64bit_val(qp_ctx, 80, + FIELD_PREP(RDMAQPC_TX_DEST_IPADDR3, + udp->dest_ip_addr[3]) | + FIELD_PREP(RDMAQPC_TX_DEST_IPADDR2, + udp->dest_ip_addr[2])); + set_64bit_val(qp_ctx, 88, + FIELD_PREP(RDMAQPC_TX_DEST_IPADDR1, + udp->dest_ip_addr[1]) | + FIELD_PREP(RDMAQPC_TX_DEST_IPADDR0, + udp->dest_ip_addr[0])); + hdr = FIELD_PREP(RDMAQPC_TX_SRC_PORTNUM, udp->src_port) | + FIELD_PREP(RDMAQPC_TX_DEST_PORTNUM, udp->dst_port) | + FIELD_PREP(RDMAQPC_TX_FLOWLABEL, udp->flow_label) | + FIELD_PREP(RDMAQPC_TX_TTL, udp->ttl) | + FIELD_PREP(RDMAQPC_TX_ROCE_TVER, roce_info->roce_tver); + dma_wmb(); + + set_64bit_val(qp_ctx, 96, hdr); + set_64bit_val( + qp_ctx, 104, + FIELD_PREP(RDMAQPC_TX_QKEY, roce_info->qkey) | + FIELD_PREP(RDMAQPC_TX_DEST_QP, roce_info->dest_qp) | + FIELD_PREP(RDMAQPC_TX_ORD_SIZE, roce_info->ord_size)); + set_64bit_val(qp_ctx, 112, + FIELD_PREP(RDMAQPC_TX_DEST_MAC, dmac) | + FIELD_PREP(RDMAQPC_TX_PKEY, roce_info->p_key)); + set_64bit_val(qp_ctx, 120, info->qp_compl_ctx); + set_64bit_val(qp_ctx, 128, + FIELD_PREP(RDMAQPC_TX_LOCAL_IPADDR3, + udp->local_ipaddr[3]) | + FIELD_PREP(RDMAQPC_TX_LOCAL_IPADDR2, + udp->local_ipaddr[2])); + set_64bit_val(qp_ctx, 136, + FIELD_PREP(RDMAQPC_TX_LOCAL_IPADDR1, + udp->local_ipaddr[1]) | + FIELD_PREP(RDMAQPC_TX_LOCAL_IPADDR0, + udp->local_ipaddr[0])); + set_64bit_val(qp_ctx, 144, + FIELD_PREP(RDMAQPC_TX_SRC_MAC, mac) | + FIELD_PREP(RDMAQPC_TX_PMTU, udp->pmtu) | + FIELD_PREP(RDMAQPC_TX_ACK_TIMEOUT, udp->timeout) | + FIELD_PREP(RDMAQPC_TX_LOG_SQSIZE, + qp->hw_sq_size)); + hdr = FIELD_PREP(RDMAQPC_TX_CQN, info->send_cq_num) | + FIELD_PREP(RDMAQPC_TX_NVMEOF_QID, qp->nvmeof_qid) | + FIELD_PREP(RDMAQPC_TX_IS_NVMEOF_TGT, qp->is_nvmeof_tgt) | + FIELD_PREP(RDMAQPC_TX_IS_NVMEOF_IOQ, qp->is_nvmeof_ioq) | + FIELD_PREP(RDMAQPC_TX_DCQCN_ID, + gqp_id) | + FIELD_PREP(RDMAQPC_TX_DCQCN_EN, roce_info->dcqcn_en) | + FIELD_PREP(RDMAQPC_TX_QUEUE_TC, + (service_type == ZXDH_QP_SERVICE_TYPE_UD) ? + ZXDH_QP_UD_QUEUE_TC : + qp->qp_uk.user_pri); + dma_wmb(); + + set_64bit_val(qp_ctx, 152, hdr); + set_64bit_val( + qp_ctx, 160, + FIELD_PREP(RDMAQPC_TX_QPN, qp->qp_uk.qp_id) | + FIELD_PREP(RDMAQPC_TX_TOS, + (service_type == ZXDH_QP_SERVICE_TYPE_UD) ? + ZXDH_QP_UD_TOS : + udp->tos) | + FIELD_PREP(RDMAQPC_TX_VHCA_ID_LOW6, qp->dev->vhca_id)); + set_64bit_val( + qp_ctx, 168, + FIELD_PREP(RDMAQPC_TX_VHCA_ID_HIGH4, + RS_64_1(qp->dev->vhca_id, 6)) | + FIELD_PREP(RDMAQPC_TX_QP_FLOW_SET, qp->qp_uk.qp_8k_index) | + FIELD_PREP(RDMAQPC_TX_QPSTATE, info->next_qp_state) | + FIELD_PREP(RDMAQPC_TX_DEBUG_SET, qp->dev->vhca_id)); + + set_64bit_val(qp_ctx, 256, FIELD_PREP(RDMAQPC_RX_LAST_OPCODE, 4)); + set_64bit_val(qp_ctx, 264, + (service_type == ZXDH_QP_SERVICE_TYPE_UD) ? + roce_info->qkey : + 0); + set_64bit_val(qp_ctx, 272, FIELD_PREP(RDMAQPC_RX_EPSN, udp->epsn)); + set_64bit_val(qp_ctx, 280, FIELD_PREP(RDMAQPC_RX_IRD_RXNUM, 511)); + set_64bit_val(qp_ctx, 288, 0); + set_64bit_val(qp_ctx, 296, 0); + set_64bit_val(qp_ctx, 304, 0); + set_64bit_val(qp_ctx, 312, 0); + set_64bit_val(qp_ctx, 320, + FIELD_PREP(RDMAQPC_RX_LOCAL_IPADDR3, + udp->local_ipaddr[3]) | + FIELD_PREP(RDMAQPC_RX_LOCAL_IPADDR2, + udp->local_ipaddr[2])); + set_64bit_val(qp_ctx, 328, + FIELD_PREP(RDMAQPC_RX_SRC_MAC_HIGH16, RS_64_1(mac, 32)) | + FIELD_PREP(RDMAQPC_RX_DEST_MAC, dmac)); + + hdr = FIELD_PREP(RDMAQPC_RX_IS_NVMEOF_IOQ, qp->is_nvmeof_ioq) | + FIELD_PREP(RDMAQPC_RX_INSERT_VLANTAG, udp->insert_vlan_tag) | + FIELD_PREP(RDMAQPC_RX_PMTU, udp->pmtu) | + FIELD_PREP(RDMAQPC_RX_SERVICE_TYPE, service_type) | + FIELD_PREP(RDMAQPC_RX_IPV4, udp->ipv4) | + FIELD_PREP(RDMAQPC_RX_PD_INDEX, roce_info->pd_id) | + FIELD_PREP(RDMAQPC_RX_QPSTATE, info->next_qp_state) | + FIELD_PREP(RDMAQPC_RX_SRC_MAC_LOW32, mac); + dma_wmb(); + + set_64bit_val(qp_ctx, 336, hdr); + hdr = FIELD_PREP(RDMAQPC_RX_DEST_QP_HIGH12, + RS_64_1(roce_info->dest_qp, 12)) | + FIELD_PREP(RDMAQPC_RX_FLOWLABEL, udp->flow_label) | + FIELD_PREP(RDMAQPC_RX_TTL, udp->ttl) | + FIELD_PREP(RDMAQPC_RX_TOS, + (service_type == ZXDH_QP_SERVICE_TYPE_UD) ? + ZXDH_QP_UD_TOS : + udp->tos) | + FIELD_PREP(RDMAQPC_RX_VLANTAG, udp->vlan_tag); + dma_wmb(); + + set_64bit_val(qp_ctx, 344, hdr); + + if (info->use_srq) { + set_64bit_val(qp_ctx, 352, + FIELD_PREP(RDMAQPC_RX_SRQN, + qp->srq->srq_uk.srq_id)); + } else if (qp->is_nvmeof_ioq) { + set_64bit_val(qp_ctx, 352, + FIELD_PREP(RDMAQPC_RX_NVMEOF_QID, + qp->nvmeof_qid) | + FIELD_PREP(RDMAQPC_RX_IS_NVMEOF_TGT, + qp->nvme_flush_qp ? 1 : qp->is_nvmeof_tgt)); + } else { + set_64bit_val(qp_ctx, 352, qp->rq_pa); + } + + set_64bit_val(qp_ctx, 360, qp->shadow_area_pa); + set_64bit_val(qp_ctx, 368, + FIELD_PREP(RDMAQPC_RX_HDR_LEN, header_len) | + FIELD_PREP(RDMAQPC_RX_PKEY, roce_info->p_key) | + FIELD_PREP(RDMAQPC_RX_SRC_PORTNUM, + udp->src_port)); + hdr = FIELD_PREP(RDMAQPC_RX_WQE_SIGN_EN, 0) | + FIELD_PREP(RDMAQPC_RX_RQ_VMAP, qp->virtual_map) | + FIELD_PREP(RDMAQPC_RX_IRD_SIZE, + zxdh_sc_get_encoded_ird_size(roce_info->ird_size)) | + FIELD_PREP(RDMAQPC_RX_LOG_RQSIZE, qp->hw_rq_size) | + FIELD_PREP(RDMAQPC_RX_SEND_EN, 1) | + FIELD_PREP(RDMAQPC_RX_WRITE_EN, roce_info->wr_rdresp_en) | + FIELD_PREP(RDMAQPC_RX_READ_EN, roce_info->rd_en) | + FIELD_PREP(RDMAQPC_RX_LOG_RQE_SIZE, qp->qp_uk.rq_wqe_size) | + FIELD_PREP(RDMAQPC_RX_USE_SRQ, info->use_srq) | + FIELD_PREP(RDMAQPC_RX_CQN, info->rcv_cq_num) | + FIELD_PREP(RDMAQPC_RX_DEST_QP_LOW12, roce_info->dest_qp) | + FIELD_PREP(RDMAQPC_RX_RQ_LPBL_SIZE, qp->virtual_map ? 1 : 0) | + FIELD_PREP(RDMAQPC_RX_RSV_LKEY_EN, roce_info->priv_mode_en) | + FIELD_PREP(RDMAQPC_RX_RNR_TIMER, udp->min_rnr_timer) | + FIELD_PREP(RDMAQPC_RX_ACK_CREDITS, + (info->use_srq || qp->is_nvmeof_ioq || !qp->is_credit_en) ? 1 : 0); + dma_wmb(); + + set_64bit_val(qp_ctx, 376, hdr); + set_64bit_val( + qp_ctx, 384, + FIELD_PREP(RDMAQPC_RX_QP_GROUP_NUM, + gqp_id) | + FIELD_PREP(RDMAQPC_RX_QP_FLOW_SET, qp->qp_uk.qp_8k_index) | + FIELD_PREP(RDMAQPC_RX_DEBUG_SET, qp->dev->vhca_id) | + FIELD_PREP(RDMAQPC_RX_VHCA_ID, qp->dev->vhca_id) | + FIELD_PREP(RDMAQPC_RX_QUEUE_TC, + (service_type == ZXDH_QP_SERVICE_TYPE_UD) ? + ZXDH_QP_UD_QUEUE_TC : + qp->qp_uk.user_pri)); + set_64bit_val(qp_ctx, 392, info->qp_compl_ctx); + set_64bit_val(qp_ctx, 400, + FIELD_PREP(RDMAQPC_RX_DEST_IPADDR1, + udp->dest_ip_addr[1]) | + FIELD_PREP(RDMAQPC_RX_DEST_IPADDR0, + udp->dest_ip_addr[0])); + set_64bit_val(qp_ctx, 408, + FIELD_PREP(RDMAQPC_RX_DEST_IPADDR3, + udp->dest_ip_addr[3]) | + FIELD_PREP(RDMAQPC_RX_DEST_IPADDR2, + udp->dest_ip_addr[2])); + set_64bit_val(qp_ctx, 416, + FIELD_PREP(RDMAQPC_RX_LOCAL_IPADDR1, + udp->local_ipaddr[1]) | + FIELD_PREP(RDMAQPC_RX_LOCAL_IPADDR0, + udp->local_ipaddr[0])); + + print_hex_dump_debug("WQE: QP_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16, 8, + qp_ctx, ZXDH_QP_CTX_SIZE, false); +} + +/** + * zxdh_sc_alloc_stag - mr stag alloc + * @dev: sc device struct + * @info: stag info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static int zxdh_sc_alloc_stag(struct zxdh_sc_dev *dev, + struct zxdh_allocate_stag_info *info, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp; + u64 hdr; + u32 pd_h, pd_l; + enum zxdh_page_size page_size; + + if (info->page_size == 0x40000000) + page_size = ZXDH_PAGE_SIZE_1G; + else if (info->page_size == 0x200000) + page_size = ZXDH_PAGE_SIZE_2M; + else + page_size = ZXDH_PAGE_SIZE_4K; + + cqp = dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + pd_l = info->pd_id & 0x3FFFF; + pd_h = (info->pd_id >> 18) & 0x03; + + if (info->chunk_size) + set_64bit_val(wqe, 16, + FIELD_PREP(ZXDH_CQPSQ_STAG_FIRSTPMPBLIDX, + info->first_pm_pbl_idx)); + + set_64bit_val(wqe, 24, + FIELD_PREP(ZXDH_CQPSQ_STAG_IDX, info->stag_idx) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MR_PDID_HIG, pd_h)); + + set_64bit_val(wqe, 40, + FIELD_PREP(ZXDH_CQPSQ_STAG_MR_PDID_LOW, pd_l) | + FIELD_PREP(ZXDH_CQPSQ_STAG_STAGLEN, + info->total_len)); + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_ALLOC_MKEY) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) | + FIELD_PREP(ZXDH_CQPSQ_STAG_HMCFNIDX, info->hmc_fcn_index) | + FIELD_PREP(ZXDH_CQPSQ_STAG_ARIGHTS, info->access_rights) | + FIELD_PREP(ZXDH_CQPSQ_STAG_HPAGESIZE, page_size) | + FIELD_PREP(ZXDH_CQPSQ_STAG_LPBLSIZE, info->chunk_size) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MR, 1) | + FIELD_PREP(ZXDH_CQPSQ_STAG_FAST_REGISTER_MR_EN, 1) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MR_INVALID_EN, 1); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * zxdh_sc_mr_reg_non_shared - non-shared mr registration + * @dev: sc device struct + * @info: mr info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static int zxdh_sc_mr_reg_non_shared(struct zxdh_sc_dev *dev, + struct zxdh_reg_ns_stag_info *info, + u64 scratch, bool post_sq) +{ + __le64 *wqe; + u64 fbo; + struct zxdh_sc_cqp *cqp; + u64 hdr; + u32 pble_obj_cnt, pd_h, pd_l; + u8 addr_type; + enum zxdh_page_size page_size; + + if (info->page_size == 0x40000000) + page_size = ZXDH_PAGE_SIZE_1G; + else if (info->page_size == 0x200000) + page_size = ZXDH_PAGE_SIZE_2M; + else if (info->page_size == 0x1000) + page_size = ZXDH_PAGE_SIZE_4K; + else + return -EINVAL; + + pble_obj_cnt = dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE_MR].cnt; + if (info->chunk_size && info->first_pm_pbl_index >= pble_obj_cnt) + return -EINVAL; + + cqp = dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + fbo = info->va & (info->page_size - 1); + + pd_l = info->pd_id & 0x3FFFF; + pd_h = (info->pd_id >> 18) & 0x03; + + set_64bit_val(wqe, 8, + (info->addr_type == ZXDH_ADDR_TYPE_VA_BASED ? info->va : + fbo)); + set_64bit_val(wqe, 24, + FIELD_PREP(ZXDH_CQPSQ_STAG_KEY, info->stag_key) | + FIELD_PREP(ZXDH_CQPSQ_STAG_IDX, info->stag_idx) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MR_PDID_HIG, pd_h)); + + if (!info->chunk_size) { + set_64bit_val(wqe, 32, info->reg_addr_pa); + } else { + set_64bit_val(wqe, 16, + FIELD_PREP(ZXDH_CQPSQ_STAG_FIRSTPMPBLIDX, + info->first_pm_pbl_index)); + } + + set_64bit_val(wqe, 40, + FIELD_PREP(ZXDH_CQPSQ_STAG_STAGLEN, info->total_len) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MR_PDID_LOW, pd_l)); + + addr_type = (info->addr_type == ZXDH_ADDR_TYPE_VA_BASED) ? 1 : 0; + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_REG_MR) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) | + FIELD_PREP(ZXDH_CQPSQ_STAG_HMCFNIDX, info->hmc_fcn_index) | + FIELD_PREP(ZXDH_CQPSQ_STAG_VABASEDTO, addr_type) | + FIELD_PREP(ZXDH_CQPSQ_STAG_SHARED, 0) | + FIELD_PREP(ZXDH_CQPSQ_STAG_ARIGHTS, info->access_rights) | + FIELD_PREP(ZXDH_CQPSQ_STAG_HPAGESIZE, page_size) | + FIELD_PREP(ZXDH_CQPSQ_STAG_LPBLSIZE, info->chunk_size) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MR, 1) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MR_INVALID_EN, 0) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MR_FORCE_DEL, 0); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: MR_REG_NS WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * zxdh_sc_dealloc_stag - deallocate stag + * @dev: sc device struct + * @info: dealloc stag info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static int zxdh_sc_dealloc_stag(struct zxdh_sc_dev *dev, + struct zxdh_dealloc_stag_info *info, + u64 scratch, bool post_sq) +{ + u64 hdr; + __le64 *wqe; + struct zxdh_sc_cqp *cqp; + u32 pd_h, pd_l; + + cqp = dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + pd_l = info->pd_id & 0x3FFFF; + pd_h = (info->pd_id >> 18) & 0x03; + + set_64bit_val(wqe, 24, + FIELD_PREP(ZXDH_CQPSQ_STAG_IDX, info->stag_idx) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MR_PDID_HIG, pd_h)); + + set_64bit_val(wqe, 40, FIELD_PREP(ZXDH_CQPSQ_STAG_MR_PDID_LOW, pd_l)); + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_DEALLOC_MKEY) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MR, info->mr) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MR_FORCE_DEL, 0); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: DEALLOC_STAG WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * zxdh_sc_mw_alloc - mw allocate + * @dev: sc device struct + * @info: memory window allocation information + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static int zxdh_sc_mw_alloc(struct zxdh_sc_dev *dev, + struct zxdh_mw_alloc_info *info, u64 scratch, + bool post_sq) +{ + u64 hdr; + struct zxdh_sc_cqp *cqp; + __le64 *wqe; + u32 pd_h, pd_l; + + cqp = dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + pd_l = info->pd_id & 0x3FFFF; + pd_h = (info->pd_id >> 18) & 0x03; + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_ALLOC_MKEY) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MWTYPE, info->mw_wide) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY, + info->mw1_bind_dont_vldt_key) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MR, 0); + + set_64bit_val(wqe, 24, + FIELD_PREP(ZXDH_CQPSQ_STAG_IDX, info->mw_stag_index) | + FIELD_PREP(ZXDH_CQPSQ_STAG_MR_PDID_HIG, pd_h)); + + set_64bit_val(wqe, 40, FIELD_PREP(ZXDH_CQPSQ_STAG_MR_PDID_LOW, pd_l)); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: MW_ALLOC WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp + * @qp: sc qp struct + * @info: fast mr info + * @post_sq: flag for cqp db to ring + */ +int zxdh_sc_mr_fast_register(struct zxdh_sc_qp *qp, + struct zxdh_fast_reg_stag_info *info, bool post_sq) +{ + u64 temp, hdr; + __le64 *wqe; + u32 wqe_idx; + bool local_fence = true; + enum zxdh_page_size page_size; + struct zxdh_post_sq_info sq_info = {}; + + if (info->page_size == 0x40000000) + page_size = ZXDH_PAGE_SIZE_1G; + else if (info->page_size == 0x200000) + page_size = ZXDH_PAGE_SIZE_2M; + else + page_size = ZXDH_PAGE_SIZE_4K; + + sq_info.wr_id = info->wr_id; + sq_info.signaled = info->signaled; + + wqe = zxdh_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, + ZXDH_QP_WQE_MIN_QUANTA, 0, &sq_info); + if (!wqe) + return -ENOSPC; + + zxdh_clr_wqes(&qp->qp_uk, wqe_idx); + + temp = (info->addr_type == ZXDH_ADDR_TYPE_VA_BASED) ? + (uintptr_t)info->va : + info->fbo; + set_64bit_val(wqe, 8, temp); + + set_64bit_val(wqe, 16, + info->total_len | FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXLO, + info->first_pm_pbl_index)); + + temp = info->first_pm_pbl_index >> 16; + + set_64bit_val(wqe, 24, + FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXHI, temp) | + FIELD_PREP(IRDMAQPSQ_PBLADDR, + info->reg_addr_pa >> + ZXDH_HW_PAGE_SHIFT)); + + hdr = FIELD_PREP(IRDMAQPSQ_STAGKEY, info->stag_key) | + FIELD_PREP(IRDMAQPSQ_STAGINDEX, info->stag_idx) | + FIELD_PREP(IRDMAQPSQ_LPBLSIZE, info->chunk_size) | + FIELD_PREP(IRDMAQPSQ_HPAGESIZE, page_size) | + FIELD_PREP(IRDMAQPSQ_STAGRIGHTS, info->access_rights) | + FIELD_PREP(IRDMAQPSQ_VABASEDTO, info->addr_type) | + FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) | + FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) | + FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | + FIELD_PREP(IRDMAQPSQ_OPCODE, ZXDH_OP_TYPE_FAST_REG_MR) | + FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: FAST_REG WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_QP_WQE_MIN_SIZE, false); + + if (post_sq) + zxdh_uk_qp_post_wr(&qp->qp_uk); + + return 0; +} + +/** + * zxdh_sc_dev_qplist_init - Init the qos qplist + * @dev: pointer to dev + */ +void zxdh_sc_dev_qplist_init(struct zxdh_sc_dev *dev) +{ + u8 i; + + for (i = 0; i < ZXDH_MAX_USER_PRIORITY; i++) { + mutex_init(&dev->qos[i].qos_mutex); + INIT_LIST_HEAD(&dev->qos[i].qplist); + } +} + +/** + * zxdh_get_encoded_wqe_size - given wq size, returns hardware encoded size + * @wqsize: size of the wq (sq, rq) to encoded_size + * @queue_type: queue type selected for the calculation algorithm + */ +u8 zxdh_get_encoded_wqe_size(u32 wqsize, enum zxdh_queue_type queue_type) +{ + u8 encoded_size = 0; + + /* cqp sq's hw coded value starts from 1 for size of 4 + * while it starts from 0 for qp' wq's. + */ + if (queue_type == ZXDH_QUEUE_TYPE_CQP) + encoded_size = 1; + while (wqsize >>= 1) + encoded_size++; + + return encoded_size; +} + +/** + * zxdh_sc_gather_stats - collect the statistics + * @cqp: struct for cqp hw + * @info: gather stats info structure + * @scratch: u64 saved to be used during cqp completion + */ +static int zxdh_sc_gather_stats(struct zxdh_sc_cqp *cqp, + struct zxdh_stats_gather_info *info, + u64 scratch) +{ + __le64 *wqe; + u64 temp; + + if (info->stats_buff_mem.size < ZXDH_GATHER_STATS_BUF_SIZE) + return -ENOSPC; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 40, + FIELD_PREP(ZXDH_CQPSQ_STATS_HMC_FCN_INDEX, + info->hmc_fcn_index)); + set_64bit_val(wqe, 32, info->stats_buff_mem.pa); + + temp = FIELD_PREP(ZXDH_CQPSQ_STATS_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_STATS_USE_INST, info->use_stats_inst) | + FIELD_PREP(ZXDH_CQPSQ_STATS_INST_INDEX, info->stats_inst_index) | + FIELD_PREP(ZXDH_CQPSQ_STATS_USE_HMC_FCN_INDEX, + info->use_hmc_fcn_index) | + FIELD_PREP(ZXDH_CQPSQ_STATS_OP, ZXDH_CQP_OP_GATHER_STATS); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, temp); + + print_hex_dump_debug("STATS: GATHER_STATS WQE", DUMP_PREFIX_OFFSET, 16, + 8, wqe, ZXDH_CQP_WQE_SIZE * 8, false); + + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_manage_stats_inst - allocate or free stats instance + * @cqp: struct for cqp hw + * @info: stats info structure + * @alloc: alloc vs. delete flag + * @scratch: u64 saved to be used during cqp completion + */ +static int zxdh_sc_manage_stats_inst(struct zxdh_sc_cqp *cqp, + struct zxdh_stats_inst_info *info, + bool alloc, u64 scratch) +{ + __le64 *wqe; + u64 temp; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 40, + FIELD_PREP(ZXDH_CQPSQ_STATS_HMC_FCN_INDEX, + info->hmc_fn_id)); + temp = FIELD_PREP(ZXDH_CQPSQ_STATS_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_STATS_ALLOC_INST, alloc) | + FIELD_PREP(ZXDH_CQPSQ_STATS_USE_HMC_FCN_INDEX, + info->use_hmc_fcn_index) | + FIELD_PREP(ZXDH_CQPSQ_STATS_INST_INDEX, info->stats_idx) | + FIELD_PREP(ZXDH_CQPSQ_STATS_OP, ZXDH_CQP_OP_MANAGE_STATS); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, temp); + + print_hex_dump_debug("WQE: MANAGE_STATS WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + + zxdh_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * zxdh_sc_set_up_map - set the up map table + * @cqp: struct for cqp hw + * @info: User priority map info + * @scratch: u64 saved to be used during cqp completion + */ +static int zxdh_sc_set_up_map(struct zxdh_sc_cqp *cqp, + struct zxdh_up_info *info, u64 scratch) +{ + __le64 *wqe; + u64 temp; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + temp = info->map[0] | LS_64_1(info->map[1], 8) | + LS_64_1(info->map[2], 16) | LS_64_1(info->map[3], 24) | + LS_64_1(info->map[4], 32) | LS_64_1(info->map[5], 40) | + LS_64_1(info->map[6], 48) | LS_64_1(info->map[7], 56); + + set_64bit_val(wqe, 0, temp); + set_64bit_val( + wqe, 40, + FIELD_PREP(ZXDH_CQPSQ_UP_CNPOVERRIDE, info->cnp_up_override) | + FIELD_PREP(ZXDH_CQPSQ_UP_HMCFCNIDX, info->hmc_fcn_idx)); + + temp = FIELD_PREP(ZXDH_CQPSQ_UP_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_UP_USEVLAN, info->use_vlan) | + FIELD_PREP(ZXDH_CQPSQ_UP_USEOVERRIDE, + info->use_cnp_up_override) | + FIELD_PREP(ZXDH_CQPSQ_UP_OP, ZXDH_CQP_OP_UP_MAP); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, temp); + + print_hex_dump_debug("WQE: UPMAP WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, + ZXDH_CQP_WQE_SIZE * 8, false); + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_manage_ws_node - create/modify/destroy WS node + * @cqp: struct for cqp hw + * @info: node info structure + * @node_op: 0 for add 1 for modify, 2 for delete + * @scratch: u64 saved to be used during cqp completion + */ +static int zxdh_sc_manage_ws_node(struct zxdh_sc_cqp *cqp, + struct zxdh_ws_node_info *info, + enum zxdh_ws_node_op node_op, u64 scratch) +{ + __le64 *wqe; + u64 temp = 0; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 32, + FIELD_PREP(ZXDH_CQPSQ_WS_VSI, info->vsi) | + FIELD_PREP(ZXDH_CQPSQ_WS_WEIGHT, info->weight)); + + temp = FIELD_PREP(ZXDH_CQPSQ_WS_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_WS_NODEOP, node_op) | + FIELD_PREP(ZXDH_CQPSQ_WS_ENABLENODE, info->enable) | + FIELD_PREP(ZXDH_CQPSQ_WS_NODETYPE, info->type_leaf) | + FIELD_PREP(ZXDH_CQPSQ_WS_PRIOTYPE, info->prio_type) | + FIELD_PREP(ZXDH_CQPSQ_WS_TC, info->tc) | + FIELD_PREP(ZXDH_CQPSQ_WS_OP, ZXDH_CQP_OP_WORK_SCHED_NODE) | + FIELD_PREP(ZXDH_CQPSQ_WS_PARENTID, info->parent_id) | + FIELD_PREP(ZXDH_CQPSQ_WS_NODEID, info->id); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, temp); + + print_hex_dump_debug("WQE: MANAGE_WS WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_qp_flush_wqes - flush qp's wqe + * @qp: sc qp + * @info: dlush information + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +int zxdh_sc_qp_flush_wqes(struct zxdh_sc_qp *qp, + struct zxdh_qp_flush_info *info, u64 scratch, + bool post_sq) +{ + u64 temp = 0; + __le64 *wqe; + struct zxdh_sc_cqp *cqp; + u64 hdr; + bool flush_sq = false, flush_rq = false; + + if (info->rq && !qp->flush_rq) + flush_rq = true; + if (info->sq && !qp->flush_sq) + flush_sq = true; + qp->flush_sq |= flush_sq; + qp->flush_rq |= flush_rq; + + if (!flush_sq && !flush_rq) { + pr_err("CQP: Additional flush request ignored for qp %x\n", + qp->qp_uk.qp_id); + return -EALREADY; + } + + cqp = qp->pd->dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + if (info->userflushcode) { + if (flush_rq) + temp |= FIELD_PREP(ZXDH_CQPSQ_FWQE_RQMNERR, + info->rq_minor_code) | + FIELD_PREP(ZXDH_CQPSQ_FWQE_RQMJERR, + info->rq_major_code); + if (flush_sq) + temp |= FIELD_PREP(ZXDH_CQPSQ_FWQE_SQMNERR, + info->sq_minor_code) | + FIELD_PREP(ZXDH_CQPSQ_FWQE_SQMJERR, + info->sq_major_code); + } + set_64bit_val(wqe, 8, temp); + + temp = (info->generate_ae) ? + info->ae_code | FIELD_PREP(ZXDH_CQPSQ_FWQE_AESOURCE, + info->ae_src) : + 0; + set_64bit_val(wqe, 16, temp); + + hdr = qp->qp_uk.qp_id | + FIELD_PREP(ZXDH_CQPSQ_FWQE_GENERATE_AE, info->generate_ae) | + FIELD_PREP(ZXDH_CQPSQ_FWQE_USERFLCODE, info->userflushcode) | + FIELD_PREP(ZXDH_CQPSQ_FWQE_FLUSHSQ, flush_sq) | + FIELD_PREP(ZXDH_CQPSQ_FWQE_FLUSHRQ, flush_rq) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_FLUSH_WQES); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: QP_FLUSH WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_gen_ae - generate AE, uses flush WQE CQP OP + * @qp: sc qp + * @info: gen ae information + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static int zxdh_sc_gen_ae(struct zxdh_sc_qp *qp, struct zxdh_gen_ae_info *info, + u64 scratch, bool post_sq) +{ + u64 temp; + __le64 *wqe; + struct zxdh_sc_cqp *cqp; + u64 hdr; + + cqp = qp->pd->dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + temp = info->ae_code | + FIELD_PREP(ZXDH_CQPSQ_FWQE_AESOURCE, info->ae_src); + set_64bit_val(wqe, 8, temp); + + hdr = qp->qp_uk.qp_id | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_GEN_AE) | + FIELD_PREP(ZXDH_CQPSQ_FWQE_GENERATE_AE, 1) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, hdr); + + print_hex_dump_debug("WQE: GEN_AE WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe, + ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/*** zxdh_sc_qp_upload_context - upload qp's context + * @dev: sc device struct + * @info: upload context info ptr for return + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static int zxdh_sc_qp_upload_context(struct zxdh_sc_dev *dev, + struct zxdh_upload_context_info *info, + u64 scratch, bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp; + u64 hdr; + + cqp = dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 16, info->buf_pa); + + hdr = FIELD_PREP(ZXDH_CQPSQ_UCTX_QPID, info->qp_id) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_UPLOAD_QPC) | + FIELD_PREP(ZXDH_CQPSQ_UCTX_QPTYPE, info->qp_type) | + FIELD_PREP(ZXDH_CQPSQ_UCTX_RAWFORMAT, info->raw_format) | + FIELD_PREP(ZXDH_CQPSQ_UCTX_FREEZEQP, info->freeze_qp) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, hdr); + + print_hex_dump_debug("WQE: QP_UPLOAD_CTX WQE", DUMP_PREFIX_OFFSET, 16, + 8, wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_manage_push_page - Handle push page + * @cqp: struct for cqp hw + * @info: push page info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static int zxdh_sc_manage_push_page(struct zxdh_sc_cqp *cqp, + struct zxdh_cqp_manage_push_page_info *info, + u64 scratch, bool post_sq) +{ + __le64 *wqe; + u64 hdr; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 16, info->qs_handle); + hdr = FIELD_PREP(ZXDH_CQPSQ_MPP_PPTYPE, info->push_page_type) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_MANAGE_PUSH_PAGES) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_MPP_FREE_PAGE, info->free_page); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, hdr); + + print_hex_dump_debug("WQE: MANAGE_PUSH_PAGES WQE", DUMP_PREFIX_OFFSET, + 16, 8, wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_suspend_qp - suspend qp for param change + * @cqp: struct for cqp hw + * @qp: sc qp struct + * @scratch: u64 saved to be used during cqp completion + */ +static int zxdh_sc_suspend_qp(struct zxdh_sc_cqp *cqp, struct zxdh_sc_qp *qp, + u64 scratch) +{ + u64 hdr; + __le64 *wqe; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_CQPSQ_SUSPENDQP_QPID, qp->qp_uk.qp_id) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_SUSPEND_QP) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, hdr); + + print_hex_dump_debug("WQE: SUSPEND_QP WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_resume_qp - resume qp after suspend + * @cqp: struct for cqp hw + * @qp: sc qp struct + * @scratch: u64 saved to be used during cqp completion + */ +static int zxdh_sc_resume_qp(struct zxdh_sc_cqp *cqp, struct zxdh_sc_qp *qp, + u64 scratch) +{ + u64 hdr; + __le64 *wqe; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 16, + FIELD_PREP(ZXDH_CQPSQ_RESUMEQP_QSHANDLE, qp->qs_handle)); + + hdr = FIELD_PREP(ZXDH_CQPSQ_RESUMEQP_QPID, qp->qp_uk.qp_id) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_RESUME_QP) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, hdr); + + print_hex_dump_debug("WQE: RESUME_QP WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_cq_init - initialize completion q + * @cq: cq struct + * @info: cq initialization info + */ +int zxdh_sc_cq_init(struct zxdh_sc_cq *cq, struct zxdh_cq_init_info *info) +{ + u32 pble_obj_cnt; + + pble_obj_cnt = info->dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE].cnt; + if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) + return -EINVAL; + + cq->cq_pa = info->cq_base_pa; + cq->dev = info->dev; + cq->ceq_id = info->ceq_id; + cq->ceq_index = info->ceq_index; + info->cq_uk_init_info.cqe_alloc_db = cq->dev->cq_arm_db; + zxdh_uk_cq_init(&cq->cq_uk, &info->cq_uk_init_info); + + cq->virtual_map = info->virtual_map; + cq->pbl_chunk_size = info->pbl_chunk_size; + cq->ceqe_mask = info->ceqe_mask; + cq->cq_type = (info->type) ? info->type : ZXDH_CQ_TYPE_IO; + cq->shadow_area_pa = info->shadow_area_pa; + cq->shadow_read_threshold = info->shadow_read_threshold; + cq->ceq_id_valid = info->ceq_id_valid; + cq->tph_en = info->tph_en; + cq->tph_val = info->tph_val; + cq->first_pm_pbl_idx = info->first_pm_pbl_idx; + + return 0; +} + +/** + * zxdh_sc_cq_create - create completion q + * @cq: cq struct + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static int zxdh_sc_cq_create(struct zxdh_sc_cq *cq, u64 scratch, bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp; + u64 temp; + u64 hdr; + struct zxdh_sc_ceq *ceq; + int ret_code = 0; + + cqp = cq->dev->cqp; + if (cq->cq_uk.cq_id > + (cqp->dev->base_cqn + + cqp->dev->hmc_info->hmc_obj[ZXDH_HMC_IW_CQ].max_cnt - 1)) + return -EINVAL; + + if (cq->ceq_index > (cq->dev->max_ceqs - 1)) + return -EINVAL; + + ceq = cq->dev->ceq[cq->ceq_index]; + if (ceq && ceq->reg_cq) + ret_code = zxdh_sc_add_cq_ctx(ceq, cq); + + if (ret_code) + return ret_code; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) { + if (ceq && ceq->reg_cq) + zxdh_sc_remove_cq_ctx(ceq, cq); + return -ENOSPC; + } + + set_64bit_val(wqe, 8, + FIELD_PREP(ZXDH_CQPSQ_CQ_CQC_SET_MASK, + ZXDH_CQC_SET_FIELD_ALL)); + temp = FIELD_PREP(ZXDH_CQPSQ_CQ_CQSTATE, 1) | + FIELD_PREP(ZXDH_CQPSQ_CQ_OVERFLOW_LOCKED_FLAG, 0) | + FIELD_PREP(ZXDH_CQPSQ_CQ_CQESIZE, cq->cq_uk.cqe_size) | + FIELD_PREP(ZXDH_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) | + FIELD_PREP(ZXDH_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) | + FIELD_PREP(ZXDH_CQPSQ_CQ_DEBUG_SET, cq->dev->vhca_id) | + FIELD_PREP(ZXDH_CQPSQ_CQ_VHCAID, cq->dev->vhca_id) | + FIELD_PREP(ZXDH_CQPSQ_CQ_CQMAX, cq->cq_max) | + FIELD_PREP(ZXDH_CQPSQ_CQ_CQPERIOD, cq->cq_period) | + FIELD_PREP(ZXDH_CQPSQ_CQ_SCQE_BREAK_MODERATION_EN, + cq->scqe_break_moderation_en); + + dma_wmb(); + set_64bit_val(wqe, 16, temp); + set_64bit_val(wqe, 24, RS_64_1(cq->shadow_area_pa, 6)); + + temp = FIELD_PREP(ZXDH_CQPSQ_CQ_CEQ_ID, + (cq->ceq_id_valid ? cq->ceq_id : 0)) | + FIELD_PREP(ZXDH_CQPSQ_CQ_ST, cq->cq_st) | + FIELD_PREP(ZXDH_CQPSQ_CQ_IS_IN_LIST_CNT, cq->is_in_list_cnt) | + FIELD_PREP(ZXDH_CQPSQ_CQ_CQSIZE, cq->cq_uk.cq_log_size) | + FIELD_PREP(ZXDH_CQPSQ_CQ_SHADOW_READ_THRESHOLD, + cq->shadow_read_threshold); + + dma_wmb(); + set_64bit_val(wqe, 32, temp); + set_64bit_val(wqe, 40, 0); // hw self-maintenance field + set_64bit_val(wqe, 48, + cq->virtual_map ? cq->first_pm_pbl_idx : + RS_64_1(cq->cq_pa, 8)); + set_64bit_val(wqe, 56, RS_64_1(cq, 1)); + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_CREATE_CQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FLD_LS_64(cq->dev, cq->cq_uk.cq_id, ZXDH_CQPSQ_CQ_CQID); + + dma_wmb(); + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: CQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_cq_destroy - destroy completion q + * @cq: cq struct + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +int zxdh_sc_cq_destroy(struct zxdh_sc_cq *cq, u64 scratch, bool post_sq) +{ + struct zxdh_sc_cqp *cqp; + __le64 *wqe; + u64 hdr; + struct zxdh_sc_ceq *ceq; + + cqp = cq->dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + ceq = cq->dev->ceq[cq->ceq_index]; + if (ceq && ceq->reg_cq) + zxdh_sc_remove_cq_ctx(ceq, cq); + + if (cq->cq_overflow_locked_flag) + set_64bit_val(wqe, 8, + FIELD_PREP(ZXDH_CQPSQ_CQ_CQC_SET_MASK, + ZXDH_CQC_SET_FIELD_ALL)); + else + set_64bit_val(wqe, 8, + FIELD_PREP(ZXDH_CQPSQ_CQ_CQC_SET_MASK, + ZXDH_CQC_SET_CQ_STATE)); + + set_64bit_val(wqe, 16, 0); + set_64bit_val(wqe, 24, 0); + set_64bit_val(wqe, 32, 0); + set_64bit_val(wqe, 40, 0); + set_64bit_val(wqe, 48, 0); + set_64bit_val(wqe, 56, 0); + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_DESTROY_CQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FLD_LS_64(cq->dev, cq->cq_uk.cq_id, ZXDH_CQPSQ_CQ_CQID); + + dma_wmb(); + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: CQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_cq_resize - set resized cq buffer info + * @cq: resized cq + * @info: resized cq buffer info + */ +void zxdh_sc_cq_resize(struct zxdh_sc_cq *cq, struct zxdh_modify_cq_info *info) +{ + cq->virtual_map = info->virtual_map; + cq->cq_pa = info->cq_pa; + cq->first_pm_pbl_idx = info->first_pm_pbl_idx; + cq->pbl_chunk_size = info->pbl_chunk_size; + zxdh_uk_cq_resize(&cq->cq_uk, info->cq_base, info->cq_size); +} + +/** + * zxdh_sc_cq_modify - modify a Completion Queue + * @cq: cq struct + * @info: modification info struct + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag to post to sq + */ +static int zxdh_sc_cq_modify(struct zxdh_sc_cq *cq, + struct zxdh_modify_cq_info *info, u64 scratch, + bool post_sq) +{ + struct zxdh_sc_cqp *cqp; + __le64 *wqe; + u64 hdr; + u64 temp; + u32 pble_obj_cnt; + + pble_obj_cnt = cq->dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE].cnt; + if (info->cq_resize && info->virtual_map && + info->first_pm_pbl_idx >= pble_obj_cnt) + return -EINVAL; + + cqp = cq->dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, + FIELD_PREP(ZXDH_CQPSQ_CQ_CQC_SET_MASK, + ZXDH_CQC_SET_FIELD_RESIZE)); + temp = FIELD_PREP(ZXDH_CQPSQ_CQ_CQSTATE, 1) | + FIELD_PREP(ZXDH_CQPSQ_CQ_OVERFLOW_LOCKED_FLAG, 0) | + FIELD_PREP(ZXDH_CQPSQ_CQ_CQESIZE, cq->cq_uk.cqe_size) | + FIELD_PREP(ZXDH_CQPSQ_CQ_LPBLSIZE, info->pbl_chunk_size) | + FIELD_PREP(ZXDH_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) | + FIELD_PREP(ZXDH_CQPSQ_CQ_DEBUG_SET, cq->dev->vhca_id) | + FIELD_PREP(ZXDH_CQPSQ_CQ_VHCAID, cq->dev->vhca_id) | + FIELD_PREP(ZXDH_CQPSQ_CQ_CQMAX, cq->cq_max) | + FIELD_PREP(ZXDH_CQPSQ_CQ_CQPERIOD, cq->cq_period) | + FIELD_PREP(ZXDH_CQPSQ_CQ_SCQE_BREAK_MODERATION_EN, + cq->scqe_break_moderation_en); + + dma_wmb(); + set_64bit_val(wqe, 16, temp); + set_64bit_val(wqe, 24, RS_64_1(cq->shadow_area_pa, 6)); + + temp = FIELD_PREP(ZXDH_CQPSQ_CQ_CEQ_ID, + (cq->ceq_id_valid ? cq->ceq_id : 0)) | + FIELD_PREP(ZXDH_CQPSQ_CQ_ST, cq->cq_st) | + FIELD_PREP(ZXDH_CQPSQ_CQ_IS_IN_LIST_CNT, cq->is_in_list_cnt) | + FIELD_PREP(ZXDH_CQPSQ_CQ_CQSIZE, + zxdh_num_to_log(info->cq_size)) | + FIELD_PREP(ZXDH_CQPSQ_CQ_SHADOW_READ_THRESHOLD, + cq->shadow_read_threshold); + + dma_wmb(); + set_64bit_val(wqe, 32, temp); + set_64bit_val(wqe, 40, 0); // hw self-maintenance field + set_64bit_val(wqe, 48, + info->virtual_map ? info->first_pm_pbl_idx : + RS_64_1(info->cq_pa, 8)); + set_64bit_val(wqe, 56, RS_64_1(cq, 1)); + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_MODIFY_CQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_CQ_MODIFY_SIZE, 1) | + FLD_LS_64(cq->dev, cq->cq_uk.cq_id, ZXDH_CQPSQ_CQ_CQID); + + dma_wmb(); + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: CQ_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_modify_cq_moderation - modify cq_count and cq_period of a Completion Queue + * @cq: cq struct + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag to post to sq + */ +static int zxdh_sc_modify_cq_moderation(struct zxdh_sc_cq *cq, u64 scratch, + bool post_sq) +{ + struct zxdh_sc_cqp *cqp; + __le64 *wqe; + u64 hdr; + u64 temp; + + cqp = cq->dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, + FIELD_PREP(ZXDH_CQPSQ_CQ_CQC_SET_MASK, + ZXDH_CQC_SET_FIELD_MODIFY)); + temp = FIELD_PREP(ZXDH_CQPSQ_CQ_CQMAX, cq->cq_max) | + FIELD_PREP(ZXDH_CQPSQ_CQ_CQPERIOD, cq->cq_period) | + FIELD_PREP(ZXDH_CQPSQ_CQ_SCQE_BREAK_MODERATION_EN, + cq->scqe_break_moderation_en); + + dma_wmb(); + set_64bit_val(wqe, 16, temp); + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_MODIFY_CQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_CQ_MODIFY_SIZE, 0) | + FLD_LS_64(cq->dev, cq->cq_uk.cq_id, ZXDH_CQPSQ_CQ_CQID); + + dma_wmb(); + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: CQ_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_check_cqp_progress - check cqp processing progress + * @timeout: timeout info struct + * @dev: sc device struct + */ +void zxdh_check_cqp_progress(struct zxdh_cqp_timeout *timeout, + struct zxdh_sc_dev *dev) +{ + if (timeout->compl_cqp_cmds != dev->cqp_cmd_stats[ZXDH_OP_CMPL_CMDS]) { + timeout->compl_cqp_cmds = dev->cqp_cmd_stats[ZXDH_OP_CMPL_CMDS]; + timeout->count = 0; + } else { + if (dev->cqp_cmd_stats[ZXDH_OP_REQ_CMDS] != + timeout->compl_cqp_cmds) + timeout->count++; + } +} + +/** + * zxdh_get_cqp_reg_info - get head and tail for cqp using registers + * @cqp: struct for cqp hw + * @val: cqp tail register value + * @tail: wqtail register value + * @error: cqp processing err + */ +static inline void zxdh_get_cqp_reg_info(struct zxdh_sc_cqp *cqp, u32 *val, + u32 *tail, u32 *error) +{ + *val = readl(cqp->dev->hw->hw_addr + C_RDMA_CQP_TAIL); + *tail = (u32)FIELD_GET(ZXDH_CQPTAIL_WQTAIL, *val); + *error = readl(cqp->dev->hw->hw_addr + C_RDMA_CQP_ERROR); +} + +/** + * zxdh_cqp_poll_registers - poll cqp registers + * @cqp: struct for cqp hw + * @tail: wqtail register value + * @count: how many times to try for completion + */ +int zxdh_cqp_poll_registers(struct zxdh_sc_cqp *cqp, u32 tail, u32 count) +{ + u32 i = 0; + u32 newtail, error, val; + struct zxdh_pci_f *rf = + container_of(cqp->dev, struct zxdh_pci_f, sc_dev); + + while (i++ < count) { + zxdh_get_cqp_reg_info(cqp, &val, &newtail, &error); + if (error) { + error = readl(cqp->dev->hw->hw_addr + + C_RDMA_CQP_ERRCODE); + if (cqp->dev->hw_attrs.self_health == false) + pr_err("CQP: CQPERRCODES error_code[x%08X]\n", error); + return -EIO; + } + if (newtail != tail) { + /* SUCCESS */ + if (cqp->sq_ring.head == cqp->sq_ring.tail) + pr_info("[%s] cqp_err init_state:%d vhca_id:%d head:%d tail:%d\n", __func__, + rf->init_state, cqp->dev->vhca_id, cqp->sq_ring.head, cqp->sq_ring.tail); + + ZXDH_RING_MOVE_TAIL(cqp->sq_ring); + cqp->dev->cqp_cmd_stats[ZXDH_OP_CMPL_CMDS]++; + return 0; + } + udelay(cqp->dev->hw_attrs.max_sleep_count); + } + + return -ETIMEDOUT; +} + +/** + * zxdh_sc_find_reg_cq - find cq ctx index + * @ceq: ceq sc structure + * @cq: cq sc structure + */ +static u32 zxdh_sc_find_reg_cq(struct zxdh_sc_ceq *ceq, struct zxdh_sc_cq *cq) +{ + u32 i; + + for (i = 0; i < ceq->reg_cq_size; i++) { + if (cq == ceq->reg_cq[i]) + return i; + } + + return ZXDH_INVALID_CQ_IDX; +} + +/** + * zxdh_sc_add_cq_ctx - add cq ctx tracking for ceq + * @ceq: ceq sc structure + * @cq: cq sc structure + */ +int zxdh_sc_add_cq_ctx(struct zxdh_sc_ceq *ceq, struct zxdh_sc_cq *cq) +{ + unsigned long flags; + + spin_lock_irqsave(&ceq->req_cq_lock, flags); + + if (ceq->reg_cq_size == ceq->elem_cnt) { + spin_unlock_irqrestore(&ceq->req_cq_lock, flags); + return -ENOSPC; + } + + ceq->reg_cq[ceq->reg_cq_size++] = cq; + + spin_unlock_irqrestore(&ceq->req_cq_lock, flags); + + return 0; +} + +/** + * zxdh_sc_remove_cq_ctx - remove cq ctx tracking for ceq + * @ceq: ceq sc structure + * @cq: cq sc structure + */ +void zxdh_sc_remove_cq_ctx(struct zxdh_sc_ceq *ceq, struct zxdh_sc_cq *cq) +{ + unsigned long flags; + u32 cq_ctx_idx; + + spin_lock_irqsave(&ceq->req_cq_lock, flags); + cq_ctx_idx = zxdh_sc_find_reg_cq(ceq, cq); + if (cq_ctx_idx == ZXDH_INVALID_CQ_IDX) + goto exit; + + ceq->reg_cq_size--; + if (cq_ctx_idx != ceq->reg_cq_size) + ceq->reg_cq[cq_ctx_idx] = ceq->reg_cq[ceq->reg_cq_size]; + ceq->reg_cq[ceq->reg_cq_size] = NULL; + +exit: + spin_unlock_irqrestore(&ceq->req_cq_lock, flags); +} + +/** + * zxdh_sc_cqp_init - Initialize buffers for a control Queue Pair + * @cqp: IWARP control queue pair pointer + * @info: IWARP control queue pair init info pointer + * + * Initializes the object and context buffers for a control Queue Pair. + */ +int zxdh_sc_cqp_init(struct zxdh_sc_cqp *cqp, struct zxdh_cqp_init_info *info) +{ + u8 hw_sq_size; + + if (info->sq_size > ZXDH_CQP_SW_SQSIZE_2048 || + info->sq_size < ZXDH_CQP_SW_SQSIZE_4 || + ((info->sq_size & (info->sq_size - 1)))) + return -EINVAL; + + hw_sq_size = + zxdh_get_encoded_wqe_size(info->sq_size, ZXDH_QUEUE_TYPE_CQP); + cqp->size = sizeof(*cqp); + cqp->sq_size = info->sq_size; + cqp->hw_sq_size = hw_sq_size; + cqp->sq_base = info->sq; + cqp->sq_pa = info->sq_pa; + cqp->dev = info->dev; + cqp->struct_ver = info->struct_ver; + cqp->hw_maj_ver = info->hw_maj_ver; + cqp->hw_min_ver = info->hw_min_ver; + cqp->scratch_array = info->scratch_array; + cqp->polarity = 0; + cqp->en_datacenter_tcp = info->en_datacenter_tcp; + cqp->ena_vf_count = info->ena_vf_count; + cqp->hmc_profile = info->hmc_profile; + cqp->ceqs_per_vf = info->ceqs_per_vf; + cqp->disable_packed = info->disable_packed; + cqp->rocev2_rto_policy = info->rocev2_rto_policy; + cqp->protocol_used = info->protocol_used; + cqp->state_cfg = true; // CQP Create: true, CQP Destroy: false + memcpy(&cqp->dcqcn_params, &info->dcqcn_params, + sizeof(cqp->dcqcn_params)); + info->dev->cqp = cqp; + + ZXDH_RING_INIT(cqp->sq_ring, cqp->sq_size); + cqp->dev->cqp_cmd_stats[ZXDH_OP_REQ_CMDS] = 0; + cqp->dev->cqp_cmd_stats[ZXDH_OP_CMPL_CMDS] = 0; + /* for the cqp commands backlog. */ + INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head); + + writel(ZXDH_CQPDB_INIT_VALUE, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_DB)); + writel(0, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_RDMA_CQP_MGC_BASE_HIGH)); + writel(0, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_RDMA_CQP_MGC_BASE_LOW)); + writel(0, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_AH_CACHE_ID)); + writel(0, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_RDMA_CQP_MGC_INDICATE_ID)); + writel(0, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_RDMA_CQP_CQ_DISTRIBUTE_DONE)); + + return 0; +} + +/** + * zxdh_sc_cqp_create - create cqp during bringup + * @cqp: struct for cqp hw + * @maj_err: If error, major err number + * @min_err: If error, minor err number + */ +int zxdh_sc_cqp_create(struct zxdh_sc_cqp *cqp, u16 *maj_err, u16 *min_err) +{ + u32 temp; + u32 cnt = 0, val = 0, err_code; + int ret_code; + struct zxdh_pci_f *rf = + container_of(cqp->dev, struct zxdh_pci_f, sc_dev); + + spin_lock_init(&cqp->dev->cqp_lock); + + //reset CQP status + writel(0, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_CONFIG_DONE)); + mdelay(5); + + do { + if (cnt++ > cqp->dev->hw_attrs.max_done_count) { + ret_code = -ETIMEDOUT; + pr_info("%s reset cqp timeout!\n", __func__); + break; + } + udelay(cqp->dev->hw_attrs.max_sleep_count); + val = readl((u32 __iomem *)(cqp->dev->hw->hw_addr + + C_RDMA_CQP_STATUS)); + } while (val & 0x01); + cnt = 0; + + // VF_PF_ID + temp = (u32)(FIELD_PREP(ZXDH_CQP_CREATE_EPID, + (rf->ep_id + ZXDH_HOST_EP0_ID)) | + FIELD_PREP(ZXDH_CQP_CREATE_VFID, rf->vf_id) | + FIELD_PREP(ZXDH_CQP_CREATE_PFID, rf->pf_id) | + FIELD_PREP(ZXDH_CQP_CREATE_VFUNC_ACTIVE, rf->ftype)); + writel(temp, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_RDMA_CQP_PF_VF_ID(cqp->dev->vhca_id))); + + // CQP_Context_0 + temp = (u32)(FIELD_PREP(ZXDH_CQP_CREATE_STATE_CFG, cqp->state_cfg) | + FIELD_PREP(ZXDH_CQP_CREATE_SQSIZE, cqp->sq_size) | + FIELD_PREP(ZXDH_CQP_CREATE_QPC_OBJ_IDX, 11) | + FIELD_PREP(ZXDH_CQP_CREATE_QPC_INDICATE_IDX, 2) | + FIELD_PREP(ZXDH_CQP_CREATE_OBJ_IDX, 11) | + FIELD_PREP(ZXDH_CQP_CREATE_INDICATE_IDX, 2)); + writel(temp, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_CONTEXT_0)); + // CQP_Context_1 + writel(cqp->dev->base_qpn, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_CONTEXT_1)); + // CQP_Context_2 + temp = (u32)FIELD_GET(ZXDH_CQPADDR_HIGH, cqp->sq_pa); + writel(temp, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_CONTEXT_2)); + // CQP_Context_3 + temp = (u32)FIELD_GET(ZXDH_CQPADDR_LOW, cqp->sq_pa); + writel(temp, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_CONTEXT_3)); + // CQP_Context_4 + temp = (u32)FIELD_GET(ZXDH_CQPADDR_HIGH, (uintptr_t)cqp); + writel(temp, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_CONTEXT_4)); + // CQP_Context_5 + temp = (u32)FIELD_GET(ZXDH_CQPADDR_LOW, (uintptr_t)cqp); + writel(temp, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_CONTEXT_5)); + + // CQP_CQ_NUM INIT + writel(ZXDH_CCQN_INIT_VALUE, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_CQ_NUM)); + + wmb(); /* make sure WQE is populated before valid bit is set */ + // CQP_Config_Done + writel(1, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_CONFIG_DONE)); + +#ifdef ZXDH_DEBUG + writel(1, (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_STATUS)); +#endif + + do { + if (cnt++ > cqp->dev->hw_attrs.max_done_count) { + ret_code = -ETIMEDOUT; + goto err; + } + udelay(cqp->dev->hw_attrs.max_sleep_count); + val = readl((u32 __iomem *)(cqp->dev->hw->hw_addr + + C_RDMA_CQP_STATUS)); + } while (!val); + + if (FLD_RS_32(cqp->dev, val, ZXDH_CCQPSTATUS_CCQP_ERR)) { + ret_code = -EOPNOTSUPP; + goto err; + } + + cqp->process_config_pte_table = zxdh_sc_config_pte_table; + writel(0, (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_CQ_NUM)); + + return 0; +err: + err_code = readl( + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_ERRCODE)); + *min_err = (u16)FIELD_GET(ZXDH_CQPERRCODES_CQP_MINOR_CODE, err_code); + *maj_err = (u16)FIELD_GET(ZXDH_CQPERRCODES_CQP_MAJOR_CODE, err_code); + return ret_code; +} + +/** + * zxdh_sc_cqp_post_sq - post of cqp's sq + * @cqp: struct for cqp hw + */ +void zxdh_sc_cqp_post_sq(struct zxdh_sc_cqp *cqp) +{ + u32 hdr; + u8 polarity = 0; + + polarity = ((ZXDH_RING_CURRENT_HEAD(cqp->sq_ring) == 0) ? + !cqp->polarity : + cqp->polarity); + hdr = FIELD_PREP(ZXDH_CQPSQ_DBPOLARITY, polarity) | + FIELD_PREP(ZXDH_CQPSQ_DBRINGHEAD, + ZXDH_RING_CURRENT_HEAD(cqp->sq_ring)); + + dma_wmb(); + + writel(hdr, cqp->dev->cqp_db); +} +/** + * zxdh_sc_cqp_get_next_send_wqe_idx - get next wqe on cqp sq + * and pass back index + * @cqp: CQP HW structure + * @scratch: private data for CQP WQE + * @wqe_idx: WQE index of CQP SQ + */ +__le64 *zxdh_sc_cqp_get_next_send_wqe_idx(struct zxdh_sc_cqp *cqp, u64 scratch, + u32 *wqe_idx) +{ + __le64 *wqe = NULL; + int ret_code; + + if (ZXDH_RING_FULL_ERR(cqp->sq_ring)) { + if (cqp->dev->hw_attrs.self_health == false) + pr_err("WQE: CQP SQ is full, head 0x%x tail 0x%x size 0x%x\n", + cqp->sq_ring.head, cqp->sq_ring.tail, cqp->sq_ring.size); + return NULL; + } + ZXDH_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code); + if (ret_code) + return NULL; + + cqp->dev->cqp_cmd_stats[ZXDH_OP_REQ_CMDS]++; + if (!*wqe_idx) + cqp->polarity = !cqp->polarity; + wqe = cqp->sq_base[*wqe_idx].elem; + cqp->scratch_array[*wqe_idx] = scratch; + + memset(&wqe[0], 0, 24); + memset(&wqe[4], 0, 32); + + return wqe; +} + +/** + * zxdh_sc_cqp_destroy - destroy cqp during close + * @cqp: struct for cqp hw + * @free_hwcqp: true for regular cqp destroy; false for reset path + */ +int zxdh_sc_cqp_destroy(struct zxdh_sc_cqp *cqp, bool free_hwcqp) +{ + u32 cnt = 0, val; + int ret_code = 0; + + if (free_hwcqp) { + writel(0, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_RDMA_CQP_CONFIG_DONE)); + do { + if (cnt++ > cqp->dev->hw_attrs.max_done_count) { + ret_code = -ETIMEDOUT; + break; + } + udelay(cqp->dev->hw_attrs.max_sleep_count); + val = readl((u32 __iomem *)(cqp->dev->hw->hw_addr + + C_RDMA_CQP_STATUS)); + } while (FLD_RS_32(cqp->dev, val, ZXDH_CCQPSTATUS_CCQP_DONE)); + } + return ret_code; +} + +/** + * zxdh_sc_ccq_arm - enable intr for control cq + * @ccq: ccq sc struct + */ +void zxdh_sc_ccq_arm(struct zxdh_sc_cq *ccq) +{ + u64 temp_val; + u16 sw_cq_sel; + u8 arm_seq_num; + u32 cqe_index; + u32 hdr; + + get_64bit_val(ccq->cq_uk.shadow_area, 0, &temp_val); + sw_cq_sel = (u16)FIELD_GET(ZXDH_CQ_DBSA_SW_CQ_SELECT, temp_val); + arm_seq_num = (u8)FIELD_GET(ZXDH_CQ_DBSA_ARM_SEQ_NUM, temp_val); + arm_seq_num++; + cqe_index = (u32)FIELD_GET(ZXDH_CQ_DBSA_CQEIDX, temp_val); + + temp_val = FIELD_PREP(ZXDH_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) | + FIELD_PREP(ZXDH_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) | + FIELD_PREP(ZXDH_CQ_DBSA_ARM_NEXT, 0) | + FIELD_PREP(ZXDH_CQ_DBSA_CQEIDX, cqe_index); + + set_64bit_val(ccq->cq_uk.shadow_area, 0, temp_val); + + hdr = FIELD_PREP(ZXDH_CQ_ARM_DBSA_VLD, 0) | + FIELD_PREP(ZXDH_CQ_ARM_CQ_ID, ccq->cq_uk.cq_id); + + dma_wmb(); /* make sure shadow area is updated before arming */ + + writel(hdr, ccq->dev->cq_arm_db); +} + +/** + * zxdh_sc_ccq_get_cqe_info - get ccq's cq entry + * @ccq: ccq sc struct + * @info: completion q entry to return + */ +int zxdh_sc_ccq_get_cqe_info(struct zxdh_sc_cq *ccq, + struct zxdh_ccq_cqe_info *info) +{ + u64 qp_ctx, temp, temp1, cq_shadow_temp; + __le64 *cqe; + struct zxdh_sc_cqp *cqp; + u32 wqe_idx; + u32 error; + u8 polarity; + u8 mailbox_cqe = 0; + int ret_code = 0; + + cqe = ZXDH_GET_CURRENT_CQ_ELEM(&ccq->cq_uk); + get_64bit_val(cqe, 0, &temp); + polarity = (u8)FIELD_GET(ZXDH_CQ_VALID, temp); + if (polarity != ccq->cq_uk.polarity) + return -ENOENT; + mailbox_cqe = (u8)FIELD_GET(ZXDH_CQ_MAILBOXCQE, temp); + + get_64bit_val(cqe, 8, &qp_ctx); + //cqp = (struct zxdh_sc_cqp *)(unsigned long)qp_ctx; + cqp = ccq->dev->cqp; + info->error = (bool)FIELD_GET(ZXDH_CQ_ERROR, temp); + info->maj_err_code = ZXDH_CQPSQ_MAJ_NO_ERROR; + info->min_err_code = (u16)FIELD_GET(ZXDH_CQ_MINERR, temp); + if (info->error) { + info->maj_err_code = (u16)FIELD_GET(ZXDH_CQ_MAJERR, temp); + cqp = ccq->dev->cqp; + error = readl((u32 __iomem *)(cqp->dev->hw->hw_addr + + C_RDMA_CQP_ERRCODE)); + if (ccq->dev->hw_attrs.self_health == false) + pr_err("CQP: CQPERRCODES error_code[x%08X]\n", error); + } + + wqe_idx = (u32)FIELD_GET(ZXDH_CQ_WQEIDX, temp); + + if (info->error) + wqe_idx = ZXDH_RING_CURRENT_TAIL(cqp->sq_ring); + + info->scratch = cqp->scratch_array[wqe_idx]; + + get_64bit_val(cqe, 16, &temp1); + info->op_ret_val = (u32)FIELD_GET(ZXDH_CCQ_OPRETVAL, temp1); + get_64bit_val(cqp->sq_base[wqe_idx].elem, 0, &temp1); + info->op_code = (u8)FIELD_GET(ZXDH_CQPSQ_OPCODE, temp1); + info->cqp = cqp; + info->mailbox_cqe = mailbox_cqe; + + if (mailbox_cqe == 1) { + get_64bit_val(cqe, 24, &temp1); + info->addrbuf[0] = temp1; + get_64bit_val(cqe, 32, &temp1); + info->addrbuf[1] = temp1; + get_64bit_val(cqe, 40, &temp1); + info->addrbuf[2] = temp1; + get_64bit_val(cqe, 48, &temp1); + info->addrbuf[3] = temp1; + get_64bit_val(cqe, 56, &temp1); + info->addrbuf[4] = temp1; + } else if (info->op_code == ZXDH_CQP_OP_WQE_DMA_READ_USECQE) { + get_64bit_val(cqe, 24, &temp1); + info->addrbuf[0] = temp1; + get_64bit_val(cqe, 32, &temp1); + info->addrbuf[1] = temp1; + get_64bit_val(cqe, 40, &temp1); + info->addrbuf[2] = temp1; + get_64bit_val(cqe, 48, &temp1); + info->addrbuf[3] = temp1; + get_64bit_val(cqe, 56, &temp1); + info->addrbuf[4] = temp1; + } + + /* move the head for cq */ + ZXDH_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code); + if (!ZXDH_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring)) + ccq->cq_uk.polarity ^= 1; + + /* update cq tail in cq shadow memory also */ + ZXDH_RING_MOVE_TAIL(ccq->cq_uk.cq_ring); + get_64bit_val(ccq->cq_uk.shadow_area, 0, &cq_shadow_temp); + cq_shadow_temp &= ~ZXDH_CQ_DBSA_CQEIDX; + cq_shadow_temp |= + FIELD_PREP(ZXDH_CQ_DBSA_CQEIDX, + ZXDH_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring)); + set_64bit_val(ccq->cq_uk.shadow_area, 0, cq_shadow_temp); + + dma_wmb(); /* make sure shadow area is updated before moving tail */ + if ((mailbox_cqe != 1)) { + if (cqp->sq_ring.head == cqp->sq_ring.tail) + pr_info("[%s] cqp_err op_code:%d vhca_id:%d head:%d tail:%d\n", __func__, + info->op_code, cqp->dev->vhca_id, cqp->sq_ring.head, cqp->sq_ring.tail); + + ZXDH_RING_MOVE_TAIL(cqp->sq_ring); + ccq->dev->cqp_cmd_stats[ZXDH_OP_CMPL_CMDS]++; + } + + return ret_code; +} + +/** + * zxdh_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ + * @cqp: struct for cqp hw + * @op_code: cqp opcode for completion + * @compl_info: completion q entry to return + */ +int zxdh_sc_poll_for_cqp_op_done(struct zxdh_sc_cqp *cqp, u8 op_code, + struct zxdh_ccq_cqe_info *compl_info) +{ + struct zxdh_ccq_cqe_info info = {}; + struct zxdh_sc_cq *ccq; + int ret_code = 0; + u32 cnt = 0; + u8 cqe_valid = false; + + ccq = cqp->dev->ccq; + while (1) { + if (cnt++ > 100 * cqp->dev->hw_attrs.max_done_count) + return -ETIMEDOUT; + + if (zxdh_sc_ccq_get_cqe_info(ccq, &info)) { + udelay(cqp->dev->hw_attrs.max_sleep_count); + continue; + } + if (info.error && info.op_code != ZXDH_CQP_OP_QUERY_MKEY) { + ret_code = -EIO; + break; + } + cqe_valid = true; + + /* make sure op code matches*/ + if (op_code == info.op_code) + break; + pr_err("WQE: opcode mismatch for my op code 0x%x, returned opcode %x\n", + op_code, info.op_code); + } + + if (compl_info) + memcpy(compl_info, &info, sizeof(*compl_info)); + + if ((cqe_valid == true) && (cqp->dev->ceq_0_ok == true)) + zxdh_sc_ccq_arm(ccq); + + return ret_code; +} + +/** + * zxdh_sc_manage_hmc_pm_func_table - manage of function table + * @cqp: struct for cqp hw + * @scratch: u64 saved to be used during cqp completion + * @info: info for the manage function table operation + * @post_sq: flag for cqp db to ring + */ +static int zxdh_sc_manage_hmc_pm_func_table(struct zxdh_sc_cqp *cqp, + struct zxdh_hmc_fcn_info *info, + u64 scratch, bool post_sq) +{ + __le64 *wqe; + u64 hdr; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_CQPSQ_MHMC_VFIDX, info->vf_id) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, + ZXDH_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE) | + FIELD_PREP(ZXDH_CQPSQ_MHMC_FREEPMFN, info->free_fcn) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, hdr); + + print_hex_dump_debug("WQE: MANAGE_HMC_PM_FUNC_TABLE WQE", + DUMP_PREFIX_OFFSET, 16, 8, wqe, + ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_commit_fpm_val_done - wait for cqp eqe completion + * for fpm commit + * @cqp: struct for cqp hw + */ +static int zxdh_sc_commit_fpm_val_done(struct zxdh_sc_cqp *cqp) +{ + return zxdh_sc_poll_for_cqp_op_done(cqp, ZXDH_CQP_OP_WQE_DMA_WRITE_32, + NULL); +} + +/** + * zxdh_sc_ceq_init - initialize ceq + * @ceq: ceq sc structure + * @info: ceq initialization info + */ +int zxdh_sc_ceq_init(struct zxdh_sc_ceq *ceq, struct zxdh_ceq_init_info *info) +{ + u32 pble_obj_cnt; + + if (info->elem_cnt < info->dev->hw_attrs.min_hw_ceq_size || + info->elem_cnt > info->dev->hw_attrs.max_hw_ceq_size) + return -EINVAL; + + if (info->ceq_index > (info->dev->max_ceqs - 1)) + return -EINVAL; + pble_obj_cnt = info->dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE].cnt; + + if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) + return -EINVAL; + + ceq->size = sizeof(*ceq); + ceq->ceqe_base = (struct zxdh_ceqe *)info->ceqe_base; + ceq->ceq_id = info->ceq_id; + ceq->ceq_index = info->ceq_index; + ceq->dev = info->dev; + ceq->elem_cnt = info->elem_cnt; + ceq->log2_elem_size = info->log2_elem_size; + ceq->ceq_elem_pa = info->ceqe_pa; + ceq->virtual_map = info->virtual_map; + ceq->itr_no_expire = info->itr_no_expire; + ceq->reg_cq = info->reg_cq; + ceq->reg_cq_size = 0; + spin_lock_init(&ceq->req_cq_lock); + ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0); + ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0); + ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL); + ceq->tph_en = info->tph_en; + ceq->tph_val = info->tph_val; + ceq->msix_idx = info->msix_idx; + ceq->polarity = 1; + ZXDH_RING_INIT(ceq->ceq_ring, ceq->elem_cnt); + ceq->dev->ceq[info->ceq_index] = ceq; + + return 0; +} + +/** + * zxdh_sc_ceq_create - create ceq wqe + * @ceq: ceq sc structure + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static int zxdh_sc_ceq_create(struct zxdh_sc_ceq *ceq, u64 scratch, + bool post_sq) +{ + struct zxdh_sc_cqp *cqp; + __le64 *wqe; + u64 hdr; + + cqp = ceq->dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_CEQC_PERIOD_L, 0) | + FIELD_PREP(ZXDH_CEQC_VHCA, ceq->dev->vhca_id) | + FIELD_PREP(ZXDH_CEQC_INTR_IDX, ceq->msix_idx) | + FIELD_PREP(ZXDH_CEQC_INT_TYPE, ZXDH_IRQ_TYPE_MSIX) | + FIELD_PREP(ZXDH_CEQC_CEQ_HEAD, 0) | + FIELD_PREP(ZXDH_CEQC_CEQE_VALID, ceq->polarity) | + FIELD_PREP(ZXDH_CEQC_LEAF_PBL_SIZE, ceq->pbl_chunk_size) | + // FIELD_PREP(ZXDH_CEQC_VIRTUALLY_MAPPED, ceq->virtual_map) | + FIELD_PREP(ZXDH_CEQC_CEQ_SIZE, ZXDH_CEQE_SIZE_16_BYTE) | + FIELD_PREP(ZXDH_CEQC_LOG_CEQ_NUM, ceq->log2_elem_size) | + FIELD_PREP(ZXDH_CEQC_CEQ_STATE, ZXDH_QUEUE_STATE_OK); + dma_wmb(); + + set_64bit_val(wqe, 8, hdr); + + hdr = FIELD_PREP(ZXDH_CEQC_CEQ_ADDRESS, + ceq->virtual_map ? + ceq->first_pm_pbl_idx : + RS_64_1(ceq->ceq_elem_pa, 7)) | //右移7bit + FIELD_PREP(ZXDH_CEQC_PERIOD_H, 0); + dma_wmb(); + set_64bit_val(wqe, 16, hdr); + + hdr = FIELD_PREP(ZXDH_CEQC_CEQ_MAX_CNT, IRMDA_CEQ_AGGREGATION_CNT_0) | + FIELD_PREP(ZXDH_CEQC_CEQ_AXI_RSP_ERR_FLAG, 0); + dma_wmb(); + set_64bit_val(wqe, 24, hdr); + + hdr = FIELD_PREP(ZXDH_CQPSQ_CEQ_CEQID, ceq->ceq_id) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_CREATE_CEQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: CEQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_cceq_create_done - poll for control ceq wqe to complete + * @ceq: ceq sc structure + */ +static int zxdh_sc_cceq_create_done(struct zxdh_sc_ceq *ceq) +{ + struct zxdh_sc_cqp *cqp; + + cqp = ceq->dev->cqp; + return zxdh_sc_poll_for_cqp_op_done(cqp, ZXDH_CQP_OP_CREATE_CEQ, NULL); +} + +/** + * zxdh_sc_cceq_destroy_done - poll for destroy cceq to complete + * @ceq: ceq sc structure + */ +int zxdh_sc_cceq_destroy_done(struct zxdh_sc_ceq *ceq) +{ + struct zxdh_sc_cqp *cqp; + + if (ceq->reg_cq) + zxdh_sc_remove_cq_ctx(ceq, ceq->dev->ccq); + + cqp = ceq->dev->cqp; + + return zxdh_sc_poll_for_cqp_op_done(cqp, ZXDH_CQP_OP_DESTROY_CEQ, NULL); +} + +/** + * zxdh_sc_cceq_create - create cceq + * @ceq: ceq sc structure + * @scratch: u64 saved to be used during cqp completion + */ +int zxdh_sc_cceq_create(struct zxdh_sc_ceq *ceq, u64 scratch) +{ + int ret_code; + + if (ceq->reg_cq) { + ret_code = zxdh_sc_add_cq_ctx(ceq, ceq->dev->ccq); + if (ret_code) + return ret_code; + } + + ret_code = zxdh_sc_ceq_create(ceq, scratch, true); + if (!ret_code) + return zxdh_sc_cceq_create_done(ceq); + + return ret_code; +} + +/** + * zxdh_sc_ceq_destroy - destroy ceq + * @ceq: ceq sc structure + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +int zxdh_sc_ceq_destroy(struct zxdh_sc_ceq *ceq, u64 scratch, bool post_sq) +{ + struct zxdh_sc_cqp *cqp; + __le64 *wqe; + u64 hdr; + + cqp = ceq->dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_CEQC_PERIOD_L, 0) | + FIELD_PREP(ZXDH_CEQC_VHCA, ceq->dev->vhca_id) | + FIELD_PREP(ZXDH_CEQC_INTR_IDX, ceq->msix_idx) | + FIELD_PREP(ZXDH_CEQC_INT_TYPE, ZXDH_IRQ_TYPE_PIN) | + FIELD_PREP(ZXDH_CEQC_CEQ_HEAD, 0) | + FIELD_PREP(ZXDH_CEQC_CEQE_VALID, ceq->polarity) | + FIELD_PREP(ZXDH_CEQC_LEAF_PBL_SIZE, ceq->pbl_chunk_size) | + // FIELD_PREP(ZXDH_CEQC_VIRTUALLY_MAPPED, ceq->virtual_map) | + FIELD_PREP(ZXDH_CEQC_CEQ_SIZE, ZXDH_CEQE_SIZE_64_BYTE) | + FIELD_PREP(ZXDH_CEQC_LOG_CEQ_NUM, ceq->log2_elem_size) | + FIELD_PREP(ZXDH_CEQC_CEQ_STATE, ZXDH_QUEUE_STATE_OK); + dma_wmb(); + + set_64bit_val(wqe, 8, hdr); + + hdr = FIELD_PREP(ZXDH_CEQC_CEQ_ADDRESS, ceq->virtual_map ? + ceq->first_pm_pbl_idx : + ceq->ceq_elem_pa) | + FIELD_PREP(ZXDH_CEQC_PERIOD_H, 0); + dma_wmb(); + set_64bit_val(wqe, 16, hdr); + + hdr = FIELD_PREP(ZXDH_CEQC_CEQ_MAX_CNT, IRMDA_CEQ_AGGREGATION_CNT_0) | + FIELD_PREP(ZXDH_CEQC_CEQ_AXI_RSP_ERR_FLAG, 0); + dma_wmb(); + set_64bit_val(wqe, 24, hdr); + + hdr = FIELD_PREP(ZXDH_CQPSQ_CEQ_CEQID, ceq->ceq_id) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_DESTROY_CEQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: CEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_sc_process_ceq - process ceq + * @dev: sc device struct + * @ceq: ceq sc structure + * + * It is expected caller serializes this function with cleanup_ceqes() + * because these functions manipulate the same ceq + */ +void *zxdh_sc_process_ceq(struct zxdh_sc_dev *dev, struct zxdh_sc_ceq *ceq) +{ + u64 temp; + __le64 *ceqe; + struct zxdh_sc_cq *cq = NULL; + struct zxdh_sc_cq *temp_cq; + u8 polarity; + u32 cq_idx; + unsigned long flags; + + do { + if (ceq->valid_ceq == false) { + return NULL; + } + cq_idx = 0; + ceqe = ZXDH_GET_CURRENT_CEQ_ELEM(ceq); + get_64bit_val(ceqe, 0, &temp); + polarity = (u8)FIELD_GET(ZXDH_CEQE_VALID, temp); + if (polarity != ceq->polarity) + return NULL; + + temp_cq = (struct zxdh_sc_cq *)(unsigned long)LS_64_1(temp, 1); + if (!temp_cq) { + cq_idx = ZXDH_INVALID_CQ_IDX; + ZXDH_RING_MOVE_TAIL(ceq->ceq_ring); + + if (!ZXDH_RING_CURRENT_TAIL(ceq->ceq_ring)) + ceq->polarity ^= 1; + continue; + } + + cq = temp_cq; + if (ceq->reg_cq) { + spin_lock_irqsave(&ceq->req_cq_lock, flags); + cq_idx = zxdh_sc_find_reg_cq(ceq, cq); + spin_unlock_irqrestore(&ceq->req_cq_lock, flags); + } + + ZXDH_RING_MOVE_TAIL(ceq->ceq_ring); + if (!ZXDH_RING_CURRENT_TAIL(ceq->ceq_ring)) + ceq->polarity ^= 1; + } while (cq_idx == ZXDH_INVALID_CQ_IDX); + + return cq; +} + +/** + * zxdh_sc_cleanup_ceqes - clear the valid ceqes ctx matching the cq + * @cq: cq for which the ceqes need to be cleaned up + * @ceq: ceq ptr + * + * The function is called after the cq is destroyed to cleanup + * its pending ceqe entries. It is expected caller serializes this + * function with process_ceq() in interrupt context. + */ +void zxdh_sc_cleanup_ceqes(struct zxdh_sc_cq *cq, struct zxdh_sc_ceq *ceq) +{ + struct zxdh_sc_cq *next_cq; + u8 ceq_polarity = ceq->polarity; + __le64 *ceqe; + u8 polarity; + u64 temp; + int next; + u32 i; + + next = ZXDH_RING_GET_NEXT_TAIL(ceq->ceq_ring, 0); + + for (i = 1; i <= ZXDH_RING_SIZE(*ceq); i++) { + ceqe = ZXDH_GET_CEQ_ELEM_AT_POS(ceq, next); + + get_64bit_val(ceqe, 0, &temp); + polarity = (u8)FIELD_GET(ZXDH_CEQE_VALID, temp); + if (polarity != ceq_polarity) + return; + + next_cq = (struct zxdh_sc_cq *)(unsigned long)LS_64_1(temp, 1); + if (cq == next_cq) + set_64bit_val(ceqe, 0, temp & ZXDH_CEQE_VALID); + + next = ZXDH_RING_GET_NEXT_TAIL(ceq->ceq_ring, i); + if (!next) + ceq_polarity ^= 1; + } +} + +/** + * zxdh_sc_aeq_init - initialize aeq + * @aeq: aeq structure ptr + * @info: aeq initialization info + */ +int zxdh_sc_aeq_init(struct zxdh_sc_aeq *aeq, struct zxdh_aeq_init_info *info) +{ + u32 pble_obj_cnt; + + if (info->elem_cnt < info->dev->hw_attrs.min_hw_aeq_size || + info->elem_cnt > info->dev->hw_attrs.max_hw_aeq_size) + return -EINVAL; + + pble_obj_cnt = info->dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE].cnt; + + if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) + return -EINVAL; + + aeq->size = sizeof(*aeq); + aeq->polarity = 1; + aeq->get_polarity_flag = 0; + aeq->aeqe_base = (struct zxdh_sc_aeqe *)info->aeqe_base; + aeq->dev = info->dev; + aeq->elem_cnt = info->elem_cnt; + aeq->aeq_elem_pa = info->aeq_elem_pa; + ZXDH_RING_INIT(aeq->aeq_ring, aeq->elem_cnt); + aeq->virtual_map = info->virtual_map; + aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL); + aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0); + aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0); + aeq->msix_idx = info->msix_idx; + info->dev->aeq = aeq; + + return 0; +} + +/** + * zxdh_sc_aeq_create - create aeq + * @aeq: aeq structure ptr + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static int zxdh_sc_aeq_create(struct zxdh_sc_aeq *aeq, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp; + u64 hdr; + + cqp = aeq->dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_AEQC_INTR_IDX, aeq->msix_idx) | + FIELD_PREP(ZXDH_AEQC_AEQ_HEAD, 0) | + FIELD_PREP(ZXDH_AEQC_LEAF_PBL_SIZE, aeq->pbl_chunk_size) | + FIELD_PREP(ZXDH_AEQC_VIRTUALLY_MAPPED, aeq->virtual_map) | + FIELD_PREP(ZXDH_AEQC_AEQ_SIZE, aeq->elem_cnt) | + FIELD_PREP(ZXDH_AEQC_AEQ_STATE, 0); + dma_wmb(); + set_64bit_val(wqe, 8, hdr); + + set_64bit_val(wqe, 16, + aeq->virtual_map ? aeq->first_pm_pbl_idx : + aeq->aeq_elem_pa); + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_CREATE_AEQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: AEQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_cqp_aeq_create - create aeq + * @aeq: aeq structure ptr + */ +int zxdh_cqp_aeq_create(struct zxdh_sc_aeq *aeq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp; + struct zxdh_sc_dev *dev; + u64 hdr; + u64 scratch = 0; + u32 tail = 0, val = 0, error = 0; + int ret_code; + + cqp = aeq->dev->cqp; + dev = aeq->dev; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_AEQC_INTR_IDX, aeq->msix_idx) | + FIELD_PREP(ZXDH_AEQC_AEQ_HEAD, 0) | + FIELD_PREP(ZXDH_AEQC_LEAF_PBL_SIZE, aeq->pbl_chunk_size) | + FIELD_PREP(ZXDH_AEQC_VIRTUALLY_MAPPED, aeq->virtual_map) | + FIELD_PREP(ZXDH_AEQC_AEQ_SIZE, aeq->elem_cnt) | + FIELD_PREP(ZXDH_AEQC_AEQ_STATE, 0); + dma_wmb(); + set_64bit_val(wqe, 8, hdr); + + set_64bit_val(wqe, 16, + aeq->virtual_map ? aeq->first_pm_pbl_idx : + aeq->aeq_elem_pa); + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_CREATE_AEQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: AEQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + + zxdh_get_cqp_reg_info(cqp, &val, &tail, &error); + + zxdh_sc_cqp_post_sq(cqp); + + ret_code = zxdh_cqp_poll_registers(cqp, tail, + dev->hw_attrs.max_done_count); + + if (ret_code) + return ret_code; + + return 0; +} + +/** + * zxdh_sc_aeq_destroy - destroy aeq during close + * @aeq: aeq structure ptr + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +int zxdh_sc_aeq_destroy(struct zxdh_sc_aeq *aeq, u64 scratch, bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp; + struct zxdh_sc_dev *dev; + u64 hdr; + + dev = aeq->dev; + + cqp = dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + hdr = FIELD_PREP(ZXDH_AEQC_INTR_IDX, aeq->msix_idx) | + FIELD_PREP(ZXDH_AEQC_AEQ_HEAD, 0) | + FIELD_PREP(ZXDH_AEQC_LEAF_PBL_SIZE, aeq->pbl_chunk_size) | + FIELD_PREP(ZXDH_AEQC_VIRTUALLY_MAPPED, aeq->virtual_map) | + FIELD_PREP(ZXDH_AEQC_AEQ_SIZE, aeq->elem_cnt) | + FIELD_PREP(ZXDH_AEQC_AEQ_STATE, ZXDH_QUEUE_STATE_OK); + dma_wmb(); + set_64bit_val(wqe, 8, hdr); + + set_64bit_val(wqe, 16, + aeq->virtual_map ? aeq->first_pm_pbl_idx : + aeq->aeq_elem_pa); + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_DESTROY_AEQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: AEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * zxdh_aeq_requestor_msg_cfg - ae src msg cfg + *@info: aeqe info to be cfg + */ +static void zxdh_aeq_requestor_msg_cfg(struct zxdh_aeqe_info *info) +{ + switch (info->ae_id) { + case ZXDH_AE_REQ_AXI_RSP_ERR: + case ZXDH_AE_REQ_WQE_FLUSH: + break; + default: + info->qp = true; + info->sq = true; + break; + } +} + +/** + * zxdh_aeq_responder_msg_cfg - ae src msg cfg + *@info: aeqe info to be cfg + */ +static void zxdh_aeq_responder_msg_cfg(struct zxdh_aeqe_info *info) +{ + switch (info->ae_id) { + case ZXDH_AE_RSP_WQE_FLUSH: + info->qp = true; + break; + case ZXDH_AE_RSP_SRQ_WATER_SIG: + info->srq = true; + break; + case ZXDH_AE_RSP_PKT_TYPE_CQ_OVERFLOW: + info->cq = true; + break; + case ZXDH_AE_RSP_PKT_TYPE_CQ_OVERFLOW_QP: + info->qp = true; + break; + case ZXDH_AE_RSP_PKT_TYPE_CQ_STATE: + info->qp = true; + break; + case ZXDH_AE_RSP_PKT_TYPE_CQ_TWO_PBLE_RSP: + info->cq = true; + break; + case ZXDH_AE_RSP_SRQ_AXI_RSP_SIG: + info->srq = true; + break; + default: + info->qp = true; + info->rq = true; + break; + } +} + +/** + * zxdh_ae_src_msg_cfg - ae src msg cfg + *@info: aeqe info to be cfg + *@ae_src: ae msg source + */ +static void zxdh_ae_src_msg_cfg(struct zxdh_aeqe_info *info, u8 ae_src) +{ + if (ae_src == ZXDH_AE_REQUESTER) { //requestor + zxdh_aeq_requestor_msg_cfg(info); + } else if (ae_src == ZXDH_AE_RESPONDER) { //responder + zxdh_aeq_responder_msg_cfg(info); + } else { + pr_err("aeq src msg cfg, bad ae_src!\n"); + } +} + +/** + * zxdh_sc_get_next_aeqe - get next aeq entry + * @aeq: aeq structure ptr + * @info: aeqe info to be returned + */ +int zxdh_sc_get_next_aeqe(struct zxdh_sc_aeq *aeq, struct zxdh_aeqe_info *info) +{ + u64 temp, temp1, compl_ctx; + __le64 *aeqe; + u16 wqe_idx; + u8 ae_src; + u8 polarity; + + aeqe = ZXDH_GET_CURRENT_AEQ_ELEM(aeq); + get_64bit_val(aeqe, 16, &compl_ctx); + get_64bit_val(aeqe, 0, &temp); + get_64bit_val(aeqe, 8, &temp1); + polarity = (u8)FIELD_GET(ZXDH_AEQE_VALID, temp); + info->ae_id = (u16)FIELD_GET(ZXDH_AEQE_AECODE, temp); + if ((aeq->get_polarity_flag == 0) && (info->ae_id)) { + aeq->polarity = polarity; + aeq->get_polarity_flag = 1; + } + + if (aeq->polarity != polarity) + return -ENOENT; + + if (info->ae_id == 0) + return -ENOENT; + + print_hex_dump_debug("WQE: AEQ_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, 8, + aeqe, 16, false); + + ae_src = (u8)FIELD_GET(ZXDH_AEQE_AESRC, temp); + wqe_idx = (u16)FIELD_GET(ZXDH_AEQE_WQDESCIDX, temp1); + info->qp_cq_id = (u32)FIELD_GET(ZXDH_AEQE_QPCQID, temp1); + info->iwarp_state = (u8)FIELD_GET(ZXDH_AEQE_IWSTATE, temp); + info->aeqe_overflow = (bool)FIELD_GET(ZXDH_AEQE_OVERFLOW, temp); + info->vhca_id = (u8)FIELD_GET(ZXDH_AEQE_VHCA_ID, temp); + info->compl_ctx = compl_ctx; + info->ae_src = ae_src; + zxdh_ae_src_msg_cfg(info, ae_src); + if ((info->ae_id != 257) && (info->ae_id != 18)) { + pr_info("%s ae_src:%d wqe_idx:%d qp_cq_id:%d ae_id:%d vhca_id:%d\n", + __func__, ae_src, wqe_idx, info->qp_cq_id, info->ae_id, + info->vhca_id); + } + + ZXDH_RING_MOVE_TAIL(aeq->aeq_ring); + if (!ZXDH_RING_CURRENT_TAIL(aeq->aeq_ring)) + aeq->polarity ^= 1; + + return 0; +} + +/** + * zxdh_sc_repost_aeq_tail - repost aeq valid idx + * @dev: sc device struct + * @idx: valid location + */ +int zxdh_sc_repost_aeq_tail(struct zxdh_sc_dev *dev, u32 idx) +{ + writel(idx, dev->aeq_tail_pointer); + return 0; +} + +int zxdh_sc_dma_read(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_src_copy_dest *src_dest, + struct zxdh_path_index *spath_index, + struct zxdh_path_index *dpath_index, bool post_sq) +{ + __le64 *wqe; + u64 hdr; + u64 src_path_index = 0, dest_path_index = 0; + + if (!cqp) + return -ENOMEM; + + src_path_index = zxdh_get_path_index(spath_index); + dest_path_index = zxdh_get_path_index(dpath_index); + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, dest_path_index); + set_64bit_val(wqe, 16, src_dest->dest); + set_64bit_val(wqe, 24, src_dest->src); + set_64bit_val(wqe, 32, src_dest->len); + + hdr = FIELD_PREP(ZXDH_CQPSQ_SRCPATHINDEX, src_path_index) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_WQE_DMA_READ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +int zxdh_sc_dma_read_usecqe(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_dam_read_bycqe *readbuf, + struct zxdh_path_index *spath_index, bool post_sq) +{ + __le64 *wqe; + u64 hdr; + u64 src_path_index = 0; + u8 i = 0; + + if (!cqp) + return -ENOMEM; + + if (readbuf->num > 5) + return -ENOMEM; + + src_path_index = zxdh_get_path_index(spath_index); + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, readbuf->valuetype); + for (i = 0; i < readbuf->num; i++) + set_64bit_val(wqe, 16 + i * 8, readbuf->addrbuf[i]); + + hdr = FIELD_PREP(ZXDH_CQPSQ_SRCPATHINDEX, src_path_index) | + FIELD_PREP(ZXDH_CQPSQ_DATABITWIDTH, readbuf->bitwidth) | + FIELD_PREP(ZXDH_CQPSQ_DATAINCQENUM, readbuf->num) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_WQE_DMA_READ_USECQE) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +int zxdh_sc_dma_write64(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_path_index *dpath_index, + struct zxdh_dma_write64_date *dma_data, bool post_sq) +{ + __le64 *wqe; + u64 hdr; + u64 dest_path_index = 0; + int i, loop; + + if (!cqp) + return -ENOMEM; + + loop = dma_data->num; + if (loop > 3) + return -ENOMEM; + + dest_path_index = zxdh_get_path_index(dpath_index); + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + for (i = 0; i < loop; i++) { + set_64bit_val(wqe, 16 + i * 8, dma_data->addrbuf[i]); + set_64bit_val(wqe, 40 + i * 8, dma_data->databuf[i]); + } + + hdr = FIELD_PREP(ZXDH_CQPSQ_DESTPATHINDEX, dest_path_index) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_WQE_DMA_WRITE_64) | + FIELD_PREP(ZXDH_CQPSQ_DATAINWQENUM, dma_data->num) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +int zxdh_clear_nof_ioq(struct zxdh_sc_dev *dev, u64 size, u64 ioq_pa) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status; + + if (!dev) + return -ENOMEM; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + dev->nof_clear_dpu_mem.size = NOF_IOQ_SQ_WQE_SIZE * NOF_IOQ_SQ_SIZE; + dev->nof_clear_dpu_mem.va = + dma_alloc_coherent(dev->hw->device, dev->nof_clear_dpu_mem.size, + &dev->nof_clear_dpu_mem.pa, GFP_KERNEL); + if (!dev->nof_clear_dpu_mem.va) + return -ENOMEM; + memset(dev->nof_clear_dpu_mem.va, 0, dev->nof_clear_dpu_mem.size); + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = ZXDH_OP_DMA_WRITE; + cqp_info->in.u.dma_writeread.cqp = dev->cqp; + cqp_info->in.u.dma_writeread.src_dest.src = dev->nof_clear_dpu_mem.pa; + cqp_info->in.u.dma_writeread.src_dest.len = dev->nof_clear_dpu_mem.size; + cqp_info->in.u.dma_writeread.src_dest.dest = ioq_pa; + + cqp_info->in.u.dma_writeread.src_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.src_path_index.obj_id = ZXDH_DMA_OBJ_ID; + cqp_info->in.u.dma_writeread.src_path_index.path_select = + ZXDH_INDICATE_HOST_NOSMMU; + cqp_info->in.u.dma_writeread.src_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; + + cqp_info->in.u.dma_writeread.dest_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.dest_path_index.obj_id = ZXDH_DMA_OBJ_ID; + cqp_info->in.u.dma_writeread.dest_path_index.path_select = + ZXDH_INDICATE_DPU_DDR; + cqp_info->in.u.dma_writeread.dest_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; + pr_info("clear nof ioq pa=%llx size=%d\n", ioq_pa, + dev->nof_clear_dpu_mem.size); + cqp_info->in.u.dma_writeread.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + + return status; +} + +int zxdh_clear_dpuddr(struct zxdh_sc_dev *dev, bool clear) +{ + __le64 *wqe; + u64 hdr; + u32 tail = 0, val = 0, error = 0, loop = 0, i = 0; + int ret_code = 0; + u64 scratch = 0; + u64 src_path_index = 0, dest_path_index = 0, remain_leg = 0; + u64 size = 0; + struct zxdh_path_index spath_index = {}; + struct zxdh_path_index dpath_index = {}; + struct zxdh_src_copy_dest src_dest = {}; + + if (!dev) + return -ENOMEM; + + if ((false == clear) || (dev->hmc_use_dpu_ddr == false)) + return 0; + + dev->clear_dpu_mem.size = ZXDH_HMC_DIRECT_BP_SIZE; + dev->clear_dpu_mem.va = + dma_alloc_coherent(dev->hw->device, dev->clear_dpu_mem.size, + &dev->clear_dpu_mem.pa, GFP_KERNEL); + if (!dev->clear_dpu_mem.va) + return -ENOMEM; + memset(dev->clear_dpu_mem.va, 0, dev->clear_dpu_mem.size); + + size = dev->hmc_pf_manager_info.hmc_size; + loop = size / ZXDH_HMC_DIRECT_BP_SIZE; + remain_leg = size % ZXDH_HMC_DIRECT_BP_SIZE; + + dpath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; // not pass cache + dpath_index.path_select = ZXDH_INDICATE_DPU_DDR; // L2D + dpath_index.obj_id = ZXDH_DMA_OBJ_ID; // L2D + dpath_index.vhca_id = dev->vhca_id; + dest_path_index = zxdh_get_path_index(&dpath_index); + + spath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; // not pass cache + spath_index.path_select = ZXDH_INDICATE_HOST_NOSMMU; + spath_index.obj_id = ZXDH_DMA_OBJ_ID; + spath_index.vhca_id = dev->vhca_id; + src_path_index = zxdh_get_path_index(&spath_index); + src_dest.src = dev->clear_dpu_mem.pa; + src_dest.len = dev->clear_dpu_mem.size; + + for (i = 0; i < loop; i++) { + src_dest.dest = dev->hmc_pf_manager_info.hmc_base + + i * ZXDH_HMC_DIRECT_BP_SIZE; + + wqe = zxdh_sc_cqp_get_next_send_wqe(dev->cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, src_path_index); + set_64bit_val(wqe, 16, src_dest.dest); + set_64bit_val(wqe, 24, src_dest.src); + set_64bit_val(wqe, 32, src_dest.len); + + hdr = FIELD_PREP(ZXDH_CQPSQ_DESTPATHINDEX, dest_path_index) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_WQE_DMA_WRITE) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, dev->cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + zxdh_get_cqp_reg_info(dev->cqp, &val, &tail, &error); + + zxdh_sc_cqp_post_sq(dev->cqp); + + ret_code = zxdh_cqp_poll_registers( + dev->cqp, tail, dev->hw_attrs.max_done_count); + + if (ret_code) + return ret_code; + } + + if (remain_leg != 0) { + src_dest.dest = dev->hmc_pf_manager_info.hmc_base + + i * ZXDH_HMC_DIRECT_BP_SIZE; + src_dest.len = remain_leg; + + wqe = zxdh_sc_cqp_get_next_send_wqe(dev->cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, src_path_index); + set_64bit_val(wqe, 16, src_dest.dest); + set_64bit_val(wqe, 24, src_dest.src); + set_64bit_val(wqe, 32, src_dest.len); + + hdr = FIELD_PREP(ZXDH_CQPSQ_DESTPATHINDEX, dest_path_index) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_WQE_DMA_WRITE) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, dev->cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + zxdh_get_cqp_reg_info(dev->cqp, &val, &tail, &error); + + zxdh_sc_cqp_post_sq(dev->cqp); + + ret_code = zxdh_cqp_poll_registers( + dev->cqp, tail, dev->hw_attrs.max_done_count); + + if (ret_code) + return ret_code; + } + + return ret_code; +} + +int zxdh_sc_dma_write32(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_path_index *dpath_index, + struct zxdh_dma_write32_date *dma_data, bool post_sq) +{ + __le64 *wqe; + u64 hdr; + u64 dest_path_index = 0; + int i, loop; + + if (!cqp) + return -ENOMEM; + + loop = dma_data->num; + if (loop > 4) + return -ENOMEM; + + dest_path_index = zxdh_get_path_index(dpath_index); + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + for (i = 0; i < loop; i++) { + set_64bit_val(wqe, 16 + i * 8, dma_data->addrbuf[i]); + if (i == 0) { + hdr = FIELD_PREP(ZXDH_CQPSQ_DATALOW, + dma_data->databuf[i]); + set_64bit_val(wqe, 48, hdr); + } else if (i == 1) { + hdr = FIELD_PREP(ZXDH_CQPSQ_DATAHIGH, + dma_data->databuf[i]); + set_64bit_val(wqe, 48, hdr); + } else if (i == 2) { + hdr = FIELD_PREP(ZXDH_CQPSQ_DATALOW, + dma_data->databuf[i]); + set_64bit_val(wqe, 56, hdr); + } else { // if (i == 3) + hdr = FIELD_PREP(ZXDH_CQPSQ_DATAHIGH, + dma_data->databuf[i]); + set_64bit_val(wqe, 56, hdr); + } + } + + hdr = FIELD_PREP(ZXDH_CQPSQ_DESTPATHINDEX, dest_path_index) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_WQE_DMA_WRITE_32) | + FIELD_PREP(ZXDH_CQPSQ_InterSourSel, dma_data->inter_sour_sel) | + FIELD_PREP(ZXDH_CQPSQ_NeedInter, dma_data->need_inter) | + FIELD_PREP(ZXDH_CQPSQ_DATAINWQENUM, dma_data->num) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +int zxdh_sc_dma_write(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_src_copy_dest *src_dest, + struct zxdh_path_index *spath_index, + struct zxdh_path_index *dpath_index, bool post_sq) +{ + __le64 *wqe; + u64 hdr; + u64 src_path_index = 0, dest_path_index = 0; + + if (!cqp) + return -ENOMEM; + + src_path_index = zxdh_get_path_index(spath_index); + dest_path_index = zxdh_get_path_index(dpath_index); + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, src_path_index); + set_64bit_val(wqe, 16, src_dest->dest); + set_64bit_val(wqe, 24, src_dest->src); + set_64bit_val(wqe, 32, src_dest->len); + + hdr = FIELD_PREP(ZXDH_CQPSQ_DESTPATHINDEX, dest_path_index) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_WQE_DMA_WRITE) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +int zxdh_sc_query_qpc(struct zxdh_sc_dev *dev, u32 qpn, u64 qpc_buf_pa, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp = dev->cqp; + u64 hdr; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_QUERY_QP) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_QUERY_QPC_ID, qpn); + set_64bit_val(wqe, 8, qpc_buf_pa); + + dma_wmb(); + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +int zxdh_sc_query_cqc(struct zxdh_sc_dev *dev, u32 cqn, u64 cqc_buf_pa, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp = dev->cqp; + u64 hdr; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_QUERY_CQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_QUERY_CQC_ID, cqn); + set_64bit_val(wqe, 8, cqc_buf_pa); + + dma_wmb(); + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +int zxdh_sc_query_ceqc(struct zxdh_sc_dev *dev, u32 ceqn, u64 ceqc_buf_pa, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp = dev->cqp; + u64 hdr; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_QUERY_CEQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_QUERY_CQC_ID, ceqn); + set_64bit_val(wqe, 8, ceqc_buf_pa); + + dma_wmb(); + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +int zxdh_sc_query_aeqc(struct zxdh_sc_dev *dev, u16 aeqn, u64 aeqc_buf_pa, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp = dev->cqp; + u64 hdr; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_QUERY_AEQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_QUERY_CQC_ID, aeqn); + set_64bit_val(wqe, 8, aeqc_buf_pa); + + dma_wmb(); + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +u32 zxdh_num_to_log(u32 size_num) +{ + u32 size_log = 0; + u32 temp = size_num; + + while (size_num > 1) { + size_num >>= 1; + size_log++; + } + if (temp != (1 << size_log)) + size_log += 1; + + return size_log; +} + +int zxdh_sc_mb_create(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_mailboxhead_data *mbhead_data, bool post_sq, + u32 dst_vf_id) +{ + __le64 *wqe; + u64 hdr; + struct zxdh_sc_dev *dev = NULL; + struct zxdh_pci_f *rf = NULL; + bool ftype = false; + + if (!cqp) + return -ENOMEM; + + dev = cqp->dev; + rf = container_of(dev, struct zxdh_pci_f, sc_dev); + ftype = rf->ftype; // ftype==0 ->PF + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, mbhead_data->msg0); + set_64bit_val(wqe, 16, mbhead_data->msg1); + set_64bit_val(wqe, 24, mbhead_data->msg2); + set_64bit_val(wqe, 32, mbhead_data->msg3); + set_64bit_val(wqe, 40, mbhead_data->msg4); + + hdr = FIELD_PREP(ZXDH_CQPSQ_DSTVFID, dst_vf_id) | + FIELD_PREP(ZXDH_CQPSQ_SRCPFVFID, + ((ftype == 0) ? rf->pf_id : rf->vf_id)) | + FIELD_PREP(ZXDH_CQPSQ_PFVALID, !ftype) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_SEND_MAILBOX) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * zxdh_sc_ccq_init - initialize control cq + * @cq: sc's cq ctruct + * @info: info for control cq initialization + */ +int zxdh_sc_ccq_init(struct zxdh_sc_cq *cq, struct zxdh_ccq_init_info *info) +{ + u32 pble_obj_cnt; + + if (info->num_elem < info->dev->hw_attrs.uk_attrs.min_hw_cq_size || + info->num_elem > info->dev->hw_attrs.uk_attrs.max_hw_cq_size) + return -EINVAL; + + if (info->ceq_index > (info->dev->max_ceqs - 1)) + return -EINVAL; + + pble_obj_cnt = info->dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE].cnt; + + if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) + return -EINVAL; + + cq->cq_pa = info->cq_pa; + cq->cq_uk.cq_base = info->cq_base; + cq->shadow_area_pa = info->shadow_area_pa; + cq->cq_uk.shadow_area = info->shadow_area; + cq->shadow_read_threshold = info->shadow_read_threshold; + cq->dev = info->dev; + cq->ceq_id = info->ceq_id; + cq->ceq_index = info->ceq_index; + cq->cq_uk.cq_size = info->num_elem; + cq->cq_uk.cq_log_size = zxdh_num_to_log(info->num_elem); + cq->cq_type = ZXDH_CQ_TYPE_CQP; + cq->ceqe_mask = info->ceqe_mask; + ZXDH_RING_INIT(cq->cq_uk.cq_ring, info->num_elem); + cq->cq_uk.cq_id = info->cq_num; + cq->ceq_id_valid = info->ceq_id_valid; + cq->cq_uk.cqe_size = info->cqe_size; + cq->pbl_list = info->pbl_list; + cq->virtual_map = info->virtual_map; + cq->pbl_chunk_size = info->pbl_chunk_size; + cq->first_pm_pbl_idx = info->first_pm_pbl_idx; + cq->cq_uk.polarity = true; + cq->cq_max = info->cq_max; + cq->cq_period = info->cq_period; + cq->scqe_break_moderation_en = info->scqe_break_moderation_en; + cq->cq_st = info->cq_st; + cq->is_in_list_cnt = info->is_in_list_cnt; + + /* Only applicable to CQs other than CCQ so initialize to zero */ + cq->cq_uk.cqe_alloc_db = NULL; + + info->dev->ccq = cq; + writel(cq->cq_uk.cq_id, + (u32 __iomem *)(cq->dev->hw->hw_addr + C_RDMA_CQP_CQ_NUM)); + + return 0; +} + +/** + * zxdh_sc_ccq_create_done - poll cqp for ccq create + * @ccq: ccq sc struct + */ +static inline int zxdh_sc_ccq_create_done(struct zxdh_sc_cq *ccq) +{ + struct zxdh_sc_cqp *cqp; + + cqp = ccq->dev->cqp; + + return zxdh_sc_poll_for_cqp_op_done(cqp, ZXDH_CQP_OP_CREATE_CQ, NULL); +} + +/** + * zxdh_sc_ccq_create - create control cq + * @ccq: ccq sc struct + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +int zxdh_sc_ccq_create(struct zxdh_sc_cq *ccq, u64 scratch, bool post_sq) +{ + int ret_code; + + ret_code = zxdh_sc_cq_create(ccq, scratch, post_sq); + if (ret_code) + return ret_code; + + if (post_sq) { + ret_code = zxdh_sc_ccq_create_done(ccq); + if (ret_code) + return ret_code; + } + + ccq->dev->cqp->process_config_pte_table = zxdh_cqp_config_pte_table_cmd; + + return 0; +} + +/** + * zxdh_sc_ccq_destroy - destroy ccq during close + * @ccq: ccq sc struct + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +int zxdh_sc_ccq_destroy(struct zxdh_sc_cq *ccq, u64 scratch, bool post_sq) +{ + struct zxdh_sc_cqp *cqp; + __le64 *wqe; + u64 temp; + u64 hdr; + int ret_code = 0; + u32 tail, val, error; + + cqp = ccq->dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + writel(0, (u32 __iomem *)(cqp->dev->hw->hw_addr + C_RDMA_CQP_CQ_NUM)); + dma_wmb(); + + set_64bit_val(wqe, 8, + FIELD_PREP(ZXDH_CQPSQ_CQ_CQC_SET_MASK, + ZXDH_CQC_SET_FIELD_ALL)); + temp = FIELD_PREP(ZXDH_CQPSQ_CQ_CQSTATE, 0) | + FIELD_PREP(ZXDH_CQPSQ_CQ_CQESIZE, ccq->cq_uk.cqe_size) | + FIELD_PREP(ZXDH_CQPSQ_CQ_VIRTMAP, ccq->virtual_map) | + FIELD_PREP(ZXDH_CQPSQ_CQ_LPBLSIZE, ccq->pbl_chunk_size) | + FIELD_PREP(ZXDH_CQPSQ_CQ_ENCEQEMASK, ccq->ceqe_mask); + + dma_wmb(); + set_64bit_val(wqe, 16, temp); + set_64bit_val(wqe, 24, RS_64_1(ccq->shadow_area_pa, 6)); + temp = FLD_LS_64(ccq->dev, (ccq->ceq_id_valid ? ccq->ceq_id : 0), + ZXDH_CQPSQ_CQ_CEQID) | + FIELD_PREP(ZXDH_CQPSQ_CQ_CQSIZE, ccq->cq_uk.cq_size) | + FIELD_PREP(ZXDH_CQPSQ_CQ_SHADOW_READ_THRESHOLD, + ccq->shadow_read_threshold); + + dma_wmb(); + set_64bit_val(wqe, 32, temp); + set_64bit_val(wqe, 40, 0); + set_64bit_val(wqe, 48, + (ccq->virtual_map ? ccq->first_pm_pbl_idx : + RS_64_1(ccq->cq_pa, 8))); + set_64bit_val(wqe, 56, RS_64_1(ccq, 0)); + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_DESTROY_CQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FLD_LS_64(ccq->dev, ccq->cq_uk.cq_id, ZXDH_CQPSQ_CQ_CQID); + + dma_wmb(); + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: CCQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + zxdh_get_cqp_reg_info(cqp, &val, &tail, &error); + + if (post_sq) { + zxdh_sc_cqp_post_sq(cqp); + ret_code = zxdh_cqp_poll_registers( + cqp, tail, cqp->dev->hw_attrs.max_done_count); + } + + return ret_code; +} + +/** + * zxdh_cqp_ring_full - check if cqp ring is full + * @cqp: struct for cqp hw + */ +static bool zxdh_cqp_ring_full(struct zxdh_sc_cqp *cqp) +{ + return ZXDH_RING_FULL_ERR(cqp->sq_ring); +} + +/** + * zxdh_sc_query_rdma_features - query RDMA features and FW ver + * @cqp: struct for cqp hw + * @buf: buffer to hold query info + * @scratch: u64 saved to be used during cqp completion + */ +static int zxdh_sc_query_rdma_features(struct zxdh_sc_cqp *cqp, + struct zxdh_dma_mem *buf, u64 scratch) +{ + __le64 *wqe; + u64 temp; + u32 tail, val, error; + int status; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + temp = buf->pa; + set_64bit_val(wqe, 32, temp); + + temp = FIELD_PREP(ZXDH_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID, + cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN, buf->size) | + FIELD_PREP(ZXDH_CQPSQ_UP_OP, ZXDH_CQP_OP_QUERY_RDMA_FEATURES); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, temp); + + print_hex_dump_debug("WQE: QUERY RDMA FEATURES", DUMP_PREFIX_OFFSET, 16, + 8, wqe, ZXDH_CQP_WQE_SIZE * 8, false); + zxdh_get_cqp_reg_info(cqp, &val, &tail, &error); + + zxdh_sc_cqp_post_sq(cqp); + status = zxdh_cqp_poll_registers(cqp, tail, + cqp->dev->hw_attrs.max_done_count); + if (error || status) + status = -EIO; + + return status; +} + +u64 zxdh_get_hmc_align_2M(u64 paaddr) +{ + u64 pa = paaddr; + + if (paaddr % 0x200000 == 0) + return pa; + + pa = pa + 0x200000; + pa = pa & (~GENMASK_ULL(20, 0)); + + return pa; +} + +u64 zxdh_get_hmc_align_512(u64 paaddr) +{ + u64 pa = paaddr; + + if (paaddr % 512 == 0) + return pa; + + pa = pa + 512; + pa = pa & (~GENMASK_ULL(8, 0)); + + return pa; +} + +u64 zxdh_get_hmc_align_4K(u64 paaddr) +{ + u64 pa = paaddr; + + if (paaddr % 4096 == 0) + return pa; + + pa = pa + 4096; + pa = pa & (~GENMASK_ULL(11, 0)); + + return pa; +} + +u16 zxdh_txwind_ddr_size(u8 num) // 每个qp配置的tx window的条目,配置为2~9 +{ + u8 i = 0; + u16 result = 1; + + if (num > 9 || num < 2) { + result = 4; + return result; + } + + for (i = 0; i < num; i++) + result = result * 2; + + return result; +} + +void zxdh_hmc_dpu_capability(struct zxdh_sc_dev *dev) +{ + u32 val = 0; + struct zxdh_hmc_obj_info *obj_info = NULL; + u8 txwindo_ddr_reg = 9; + + //txwindo_ddr_reg = readl(dev->hw->hw_addr+ TXWINDOW_DDR_SIZE); + + obj_info = dev->hmc_info->hmc_obj; + + obj_info[ZXDH_HMC_IW_QP].cnt = obj_info[ZXDH_HMC_IW_QP].max_cnt; + obj_info[ZXDH_HMC_IW_QP].size = 512; + + obj_info[ZXDH_HMC_IW_CQ].cnt = obj_info[ZXDH_HMC_IW_CQ].max_cnt; + obj_info[ZXDH_HMC_IW_CQ].size = 64; + + obj_info[ZXDH_HMC_IW_SRQ].cnt = obj_info[ZXDH_HMC_IW_SRQ].max_cnt; + obj_info[ZXDH_HMC_IW_SRQ].size = 64; + + obj_info[ZXDH_HMC_IW_MR].cnt = obj_info[ZXDH_HMC_IW_MR].max_cnt; + obj_info[ZXDH_HMC_IW_MR].size = 64; + + obj_info[ZXDH_HMC_IW_AH].cnt = obj_info[ZXDH_HMC_IW_AH].max_cnt; + obj_info[ZXDH_HMC_IW_AH].size = 64; + + obj_info[ZXDH_HMC_IW_IRD].cnt = obj_info[ZXDH_HMC_IW_IRD].max_cnt; + obj_info[ZXDH_HMC_IW_IRD].size = 64 * 2 * (dev->ird_size); + + obj_info[ZXDH_HMC_IW_TXWINDOW].cnt = + obj_info[ZXDH_HMC_IW_TXWINDOW].max_cnt; + obj_info[ZXDH_HMC_IW_TXWINDOW].size = + 64 * zxdh_txwind_ddr_size(txwindo_ddr_reg); + + obj_info[ZXDH_HMC_IW_PBLE_MR].cnt = + obj_info[ZXDH_HMC_IW_PBLE_MR].max_cnt; + obj_info[ZXDH_HMC_IW_PBLE_MR].size = 8; + + obj_info[ZXDH_HMC_IW_PBLE].cnt = obj_info[ZXDH_HMC_IW_PBLE].max_cnt; + obj_info[ZXDH_HMC_IW_PBLE].size = 8; + + val = obj_info[ZXDH_HMC_IW_MR].cnt; + + writel(val, (u32 __iomem *)(dev->hw->hw_addr + C_TX_MRTE_INDEX_CFG)); + writel(val, (u32 __iomem *)(dev->hw->hw_addr + + RDMATX_ACK_PCI_MAX_MRTE_INDEX_PARA_CFG)); + writel(val, (u32 __iomem *)(dev->hw->hw_addr + + RDMARX_PCI_MAX_MRTE_INDEX_RAM)); + writel(val, (u32 __iomem *)(dev->hw->hw_addr + + RDMATX_LOCAL_MRTE_PARENT_PARA_CFG)); +} + +int zxdh_create_vf_pblehmc_entry(struct zxdh_sc_dev *dev) +{ + u32 sd_lmt, hmc_entry_total = 0, j = 0, k = 0, mem_size = 0, cnt = 0; + u64 fpm_limit = 0; + struct zxdh_hmc_info *hmc_info = NULL; + struct zxdh_hmc_obj_info *obj_info = NULL; + struct zxdh_virt_mem virt_mem = {}; + + hmc_info = dev->hmc_info; + obj_info = hmc_info->hmc_obj; + for (k = ZXDH_HMC_IW_PBLE; k < ZXDH_HMC_IW_MAX; k++) { + cnt = obj_info[k].cnt; + + fpm_limit = obj_info[k].size * cnt; + + if (fpm_limit == 0) + continue; + + if (k == ZXDH_HMC_IW_PBLE) + hmc_info->hmc_first_entry_pble = hmc_entry_total; + + if (k == ZXDH_HMC_IW_PBLE_MR) + hmc_info->hmc_first_entry_pble_mr = hmc_entry_total; + + if ((fpm_limit % ZXDH_HMC_DIRECT_BP_SIZE) == 0) { + sd_lmt = fpm_limit / ZXDH_HMC_DIRECT_BP_SIZE; + sd_lmt += 1; + } else { + sd_lmt = (u32)((fpm_limit - 1) / + ZXDH_HMC_DIRECT_BP_SIZE); + sd_lmt += 1; + } + + if (sd_lmt == 1) + hmc_entry_total++; + else { + for (j = 0; j < sd_lmt - 1; j++) + hmc_entry_total++; + + if (fpm_limit % ZXDH_HMC_DIRECT_BP_SIZE) + hmc_entry_total++; + } + } + + mem_size = sizeof(struct zxdh_hmc_sd_entry) * hmc_entry_total; + virt_mem.size = mem_size; + virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL); + if (!virt_mem.va) { + pr_err("HMC: failed to allocate memory for sd_entry buffer\n"); + return -ENOMEM; + } + hmc_info->sd_table.sd_entry = virt_mem.va; + hmc_info->hmc_entry_total = hmc_entry_total; + + return 0; +} + +int zxdh_sc_commit_hmc_register_val(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_path_index *dpath_index, + struct zxdh_dma_write32_date *dma_data, + bool post_sq, u8 wait_type) +{ + __le64 *wqe; + u64 hdr; + u32 tail, val, error; + int ret_code = 0; + u64 dest_path_index = 0; + int i, loop; + + if (!cqp) + return -ENOMEM; + + loop = dma_data->num; + if (loop > 4) + return -ENOMEM; + + dest_path_index = zxdh_get_path_index(dpath_index); + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + for (i = 0; i < loop; i++) { + set_64bit_val(wqe, 16 + i * 8, dma_data->addrbuf[i]); + if (i == 0) { + hdr = FIELD_PREP(ZXDH_CQPSQ_DATALOW, + dma_data->databuf[i]); + set_64bit_val(wqe, 48, hdr); + } else if (i == 1) { + hdr = FIELD_PREP(ZXDH_CQPSQ_DATAHIGH, + dma_data->databuf[i]); + set_64bit_val(wqe, 48, hdr); + } else if (i == 2) { + hdr = FIELD_PREP(ZXDH_CQPSQ_DATALOW, + dma_data->databuf[i]); + set_64bit_val(wqe, 56, hdr); + } else { //if (i == 3) + hdr = FIELD_PREP(ZXDH_CQPSQ_DATAHIGH, + dma_data->databuf[i]); + set_64bit_val(wqe, 56, hdr); + } + } + + hdr = FIELD_PREP(ZXDH_CQPSQ_DESTPATHINDEX, dest_path_index) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_WQE_DMA_WRITE_32) | + FIELD_PREP(ZXDH_CQPSQ_InterSourSel, dma_data->inter_sour_sel) | + FIELD_PREP(ZXDH_CQPSQ_NeedInter, dma_data->need_inter) | + FIELD_PREP(ZXDH_CQPSQ_DATAINWQENUM, dma_data->num) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + set_64bit_val(wqe, 0, hdr); + + zxdh_get_cqp_reg_info(cqp, &val, &tail, &error); + + if (post_sq) { + zxdh_sc_cqp_post_sq(cqp); + if (wait_type == ZXDH_CQP_WAIT_POLL_REGS) + ret_code = zxdh_cqp_poll_registers( + cqp, tail, cqp->dev->hw_attrs.max_done_count); + else if (wait_type == ZXDH_CQP_WAIT_POLL_CQ) + ret_code = zxdh_sc_commit_fpm_val_done(cqp); + } + + return ret_code; +} + +u32 zxdh_hmc_register_config_comval(struct zxdh_sc_dev *dev, u32 rsrc_type) +{ + u32 tmp = 0, val = 0; + + if ((rsrc_type == ZXDH_HMC_IW_QP) || (rsrc_type == ZXDH_HMC_IW_CQ) || + (rsrc_type == ZXDH_HMC_IW_SRQ)) { + tmp = 0; // not use default 0 + tmp &= GENMASK_ULL(1, 0); + val |= tmp; + } else if ((rsrc_type == ZXDH_HMC_IW_IRD) && (dev->cache_id != 0)) { + tmp = 2; // ird cacheid is 2 + tmp &= GENMASK_ULL(1, 0); + val |= tmp; + } else if ((rsrc_type == ZXDH_HMC_IW_TXWINDOW) && + (dev->cache_id != 0)) { + tmp = 3; // tx_wind cacheid is 3 + tmp &= GENMASK_ULL(1, 0); + val |= tmp; + } else { + tmp = dev->cache_id; // cacheid + tmp &= GENMASK_ULL(1, 0); + val |= tmp; + } + + if ((rsrc_type == ZXDH_HMC_IW_QP) || (rsrc_type == ZXDH_HMC_IW_CQ) || + (rsrc_type == ZXDH_HMC_IW_SRQ)) { + if (dev->hmc_use_dpu_ddr) + tmp = ZXDH_INDICATE_DPU_DDR << 2; // indicateid + else + tmp = ZXDH_INDICATE_HOST_SMMU << 2; // indicateid + } else { + tmp = 0; // not used, Default is 0 + } + tmp &= GENMASK_ULL(3, 2); + val |= tmp; + + if (dev->hmc_use_dpu_ddr) + tmp = ZXDH_AXID_DPUDDR << 4; // AXID,DPU为1 + else + tmp = dev->hmc_epid << 4; + + tmp &= GENMASK_ULL(6, 4); // HOST is ep_id + val |= tmp; + + tmp = 0 << 7; // way_partition temp is 0 + tmp &= GENMASK_ULL(9, 7); + val |= tmp; + + tmp = 0 << 10; // rev is 0 + tmp &= GENMASK_ULL(31, 10); + val |= tmp; + + return val; +} + +u32 zxdh_hmc_register_config_cqpval(struct zxdh_sc_dev *dev, u32 max_cnt, + u32 rsrc_type) +{ + u32 tmp = 0, val = 0; + + if ((rsrc_type == ZXDH_HMC_IW_MR) || (rsrc_type == ZXDH_HMC_IW_AH)) { + tmp = dev->cache_id; // cacheid + tmp &= GENMASK_ULL(1, 0); + val |= tmp; + + tmp = 0 << 2; // way_partition temp is 0 + tmp &= GENMASK_ULL(4, 2); + val |= tmp; + + tmp = max_cnt << 5; + tmp &= GENMASK_ULL(28, 5); // max index + val |= tmp; + } + return val; +} + +/** + * zxdh_cfg_fpm_val - configure HMC objects + * @dev: sc device struct + */ +int zxdh_cfg_fpm_val(struct zxdh_sc_dev *dev) +{ + struct zxdh_virt_mem virt_mem = {}; + struct zxdh_hmc_info *hmc_info = NULL; + int ret_code = 0; + u32 sd_lmt = 0, hmc_entry_total = 0, i = 0, j = 0, mem_size = 0, + cnt = 0, k = 0; + u64 fpm_limit = 0; + struct zxdh_hmc_obj_info *obj_info = NULL; + + hmc_info = dev->hmc_info; + zxdh_hmc_dpu_capability(dev); + + for (k = 0; k < ZXDH_HMC_IW_MAX; k++) { + zxdh_sc_write_hmc_register(dev, hmc_info->hmc_obj, k, + dev->vhca_id); + } + + obj_info = hmc_info->hmc_obj; + for (i = 0; i < ZXDH_HMC_IW_MAX; i++) { + switch (i) { + case ZXDH_HMC_IW_QP: + cnt = dev->hmc_pf_manager_info.total_qp_cnt; + break; + case ZXDH_HMC_IW_CQ: + cnt = dev->hmc_pf_manager_info.total_cq_cnt; + break; + case ZXDH_HMC_IW_SRQ: + cnt = dev->hmc_pf_manager_info.total_srq_cnt; + break; + case ZXDH_HMC_IW_AH: + cnt = dev->hmc_pf_manager_info.total_ah_cnt; + break; + case ZXDH_HMC_IW_MR: + cnt = dev->hmc_pf_manager_info.total_mrte_cnt; + break; + default: + cnt = obj_info[i].cnt; + break; + } + + fpm_limit = obj_info[i].size * cnt; + fpm_limit = ALIGN(fpm_limit, ZXDH_HMC_DIRECT_BP_SIZE); + + if (fpm_limit == 0) + continue; + + if (i == ZXDH_HMC_IW_PBLE) + hmc_info->hmc_first_entry_pble = hmc_entry_total; + + if (i == ZXDH_HMC_IW_PBLE_MR) + hmc_info->hmc_first_entry_pble_mr = hmc_entry_total; + + sd_lmt = fpm_limit / ZXDH_HMC_DIRECT_BP_SIZE; + + for (j = 0; j < sd_lmt; j++) + hmc_entry_total++; + } + + mem_size = sizeof(struct zxdh_hmc_sd_entry) * hmc_entry_total; + virt_mem.size = mem_size; + virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL); + if (!virt_mem.va) { + pr_err("HMC: failed to allocate memory for sd_entry buffer\n"); + return -ENOMEM; + } + + hmc_info->sd_table.sd_entry = virt_mem.va; + hmc_info->hmc_entry_total = hmc_entry_total; + return ret_code; +} + +/** + * zxdh_exec_cqp_cmd - execute cqp cmd when wqe are available + * @dev: rdma device + * @pcmdinfo: cqp command info + */ +static int zxdh_exec_cqp_cmd(struct zxdh_sc_dev *dev, + struct cqp_cmds_info *pcmdinfo) +{ + int status; + bool alloc = false; + + dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++; + if (dev->hw_attrs.self_health == true) { + status = zxdh_check_cqp_cmd(pcmdinfo); + if (status) + return status; + } + switch (pcmdinfo->cqp_cmd) { + case ZXDH_OP_CEQ_DESTROY: + status = zxdh_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq, + pcmdinfo->in.u.ceq_destroy.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_AEQ_DESTROY: + status = zxdh_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq, + pcmdinfo->in.u.aeq_destroy.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_CEQ_CREATE: + status = zxdh_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq, + pcmdinfo->in.u.ceq_create.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_AEQ_CREATE: + status = zxdh_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq, + pcmdinfo->in.u.aeq_create.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_QP_UPLOAD_CONTEXT: + status = zxdh_sc_qp_upload_context( + pcmdinfo->in.u.qp_upload_context.dev, + &pcmdinfo->in.u.qp_upload_context.info, + pcmdinfo->in.u.qp_upload_context.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_CQ_CREATE: + status = zxdh_sc_cq_create(pcmdinfo->in.u.cq_create.cq, + pcmdinfo->in.u.cq_create.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_CQ_MODIFY: + status = zxdh_sc_cq_modify(pcmdinfo->in.u.cq_modify.cq, + &pcmdinfo->in.u.cq_modify.info, + pcmdinfo->in.u.cq_modify.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_CQ_MODIFY_MODERATION: + status = zxdh_sc_modify_cq_moderation( + pcmdinfo->in.u.cq_modify.cq, + pcmdinfo->in.u.cq_modify.scratch, pcmdinfo->post_sq); + break; + case ZXDH_OP_CQ_DESTROY: + status = zxdh_sc_cq_destroy(pcmdinfo->in.u.cq_destroy.cq, + pcmdinfo->in.u.cq_destroy.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_QP_FLUSH_WQES: + status = zxdh_sc_qp_flush_wqes( + pcmdinfo->in.u.qp_flush_wqes.qp, + &pcmdinfo->in.u.qp_flush_wqes.info, + pcmdinfo->in.u.qp_flush_wqes.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_GEN_AE: + status = zxdh_sc_gen_ae(pcmdinfo->in.u.gen_ae.qp, + &pcmdinfo->in.u.gen_ae.info, + pcmdinfo->in.u.gen_ae.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_MANAGE_PUSH_PAGE: + status = zxdh_sc_manage_push_page( + pcmdinfo->in.u.manage_push_page.cqp, + &pcmdinfo->in.u.manage_push_page.info, + pcmdinfo->in.u.manage_push_page.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_MANAGE_HMC_PM_FUNC_TABLE: + /* switch to calling through the call table */ + status = zxdh_sc_manage_hmc_pm_func_table( + pcmdinfo->in.u.manage_hmc_pm.dev->cqp, + &pcmdinfo->in.u.manage_hmc_pm.info, + pcmdinfo->in.u.manage_hmc_pm.scratch, true); + break; + case ZXDH_OP_SUSPEND: + status = zxdh_sc_suspend_qp( + pcmdinfo->in.u.suspend_resume.cqp, + pcmdinfo->in.u.suspend_resume.qp, + pcmdinfo->in.u.suspend_resume.scratch); + break; + case ZXDH_OP_RESUME: + status = zxdh_sc_resume_qp( + pcmdinfo->in.u.suspend_resume.cqp, + pcmdinfo->in.u.suspend_resume.qp, + pcmdinfo->in.u.suspend_resume.scratch); + break; + case ZXDH_OP_MANAGE_VF_PBLE_BP: + status = zxdh_manage_vf_pble_bp( + pcmdinfo->in.u.manage_vf_pble_bp.cqp, + &pcmdinfo->in.u.manage_vf_pble_bp.info, + pcmdinfo->in.u.manage_vf_pble_bp.scratch, true); + break; + case ZXDH_OP_STATS_ALLOCATE: + alloc = true; + fallthrough; + case ZXDH_OP_STATS_FREE: + status = zxdh_sc_manage_stats_inst( + pcmdinfo->in.u.stats_manage.cqp, + &pcmdinfo->in.u.stats_manage.info, alloc, + pcmdinfo->in.u.stats_manage.scratch); + break; + case ZXDH_OP_STATS_GATHER: + status = zxdh_sc_gather_stats( + pcmdinfo->in.u.stats_gather.cqp, + &pcmdinfo->in.u.stats_gather.info, + pcmdinfo->in.u.stats_gather.scratch); + break; + case ZXDH_OP_WS_MODIFY_NODE: + status = zxdh_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp, + &pcmdinfo->in.u.ws_node.info, + ZXDH_MODIFY_NODE, + pcmdinfo->in.u.ws_node.scratch); + break; + case ZXDH_OP_WS_DELETE_NODE: + status = zxdh_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp, + &pcmdinfo->in.u.ws_node.info, + ZXDH_DEL_NODE, + pcmdinfo->in.u.ws_node.scratch); + break; + case ZXDH_OP_WS_ADD_NODE: + status = zxdh_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp, + &pcmdinfo->in.u.ws_node.info, + ZXDH_ADD_NODE, + pcmdinfo->in.u.ws_node.scratch); + break; + case ZXDH_OP_SET_UP_MAP: + status = zxdh_sc_set_up_map(pcmdinfo->in.u.up_map.cqp, + &pcmdinfo->in.u.up_map.info, + pcmdinfo->in.u.up_map.scratch); + break; + case ZXDH_OP_QUERY_RDMA_FEATURES: + status = zxdh_sc_query_rdma_features( + pcmdinfo->in.u.query_rdma.cqp, + &pcmdinfo->in.u.query_rdma.query_buff_mem, + pcmdinfo->in.u.query_rdma.scratch); + break; + case ZXDH_OP_DELETE_ARP_CACHE_ENTRY: + status = zxdh_sc_del_arp_cache_entry( + pcmdinfo->in.u.del_arp_cache_entry.cqp, + pcmdinfo->in.u.del_arp_cache_entry.scratch, + pcmdinfo->in.u.del_arp_cache_entry.arp_index, + pcmdinfo->post_sq); + break; + case ZXDH_OP_MANAGE_APBVT_ENTRY: + status = zxdh_sc_manage_apbvt_entry( + pcmdinfo->in.u.manage_apbvt_entry.cqp, + &pcmdinfo->in.u.manage_apbvt_entry.info, + pcmdinfo->in.u.manage_apbvt_entry.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_MANAGE_QHASH_TABLE_ENTRY: + status = zxdh_sc_manage_qhash_table_entry( + pcmdinfo->in.u.manage_qhash_table_entry.cqp, + &pcmdinfo->in.u.manage_qhash_table_entry.info, + pcmdinfo->in.u.manage_qhash_table_entry.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_QP_MODIFY: + status = zxdh_sc_qp_modify(pcmdinfo->in.u.qp_modify.qp, + &pcmdinfo->in.u.qp_modify.info, + pcmdinfo->in.u.qp_modify.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_QP_CREATE: + status = zxdh_sc_qp_create(pcmdinfo->in.u.qp_create.qp, + pcmdinfo->in.u.qp_create.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_QP_DESTROY: + status = zxdh_sc_qp_destroy( + pcmdinfo->in.u.qp_destroy.qp, + pcmdinfo->in.u.qp_destroy.scratch, + pcmdinfo->in.u.qp_destroy.ignore_mw_bnd, + pcmdinfo->post_sq); + break; + case ZXDH_OP_ALLOC_STAG: + status = zxdh_sc_alloc_stag(pcmdinfo->in.u.alloc_stag.dev, + &pcmdinfo->in.u.alloc_stag.info, + pcmdinfo->in.u.alloc_stag.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_MR_REG_NON_SHARED: + status = zxdh_sc_mr_reg_non_shared( + pcmdinfo->in.u.mr_reg_non_shared.dev, + &pcmdinfo->in.u.mr_reg_non_shared.info, + pcmdinfo->in.u.mr_reg_non_shared.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_DEALLOC_STAG: + status = zxdh_sc_dealloc_stag( + pcmdinfo->in.u.dealloc_stag.dev, + &pcmdinfo->in.u.dealloc_stag.info, + pcmdinfo->in.u.dealloc_stag.scratch, pcmdinfo->post_sq); + break; + case ZXDH_OP_MW_ALLOC: + status = zxdh_sc_mw_alloc(pcmdinfo->in.u.mw_alloc.dev, + &pcmdinfo->in.u.mw_alloc.info, + pcmdinfo->in.u.mw_alloc.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_ADD_ARP_CACHE_ENTRY: + status = zxdh_sc_add_arp_cache_entry( + pcmdinfo->in.u.add_arp_cache_entry.cqp, + &pcmdinfo->in.u.add_arp_cache_entry.info, + pcmdinfo->in.u.add_arp_cache_entry.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_AH_CREATE: + status = zxdh_sc_create_ah(pcmdinfo->in.u.ah_create.cqp, + &pcmdinfo->in.u.ah_create.info, + pcmdinfo->in.u.ah_create.scratch); + break; + case ZXDH_OP_AH_DESTROY: + status = zxdh_sc_destroy_ah(pcmdinfo->in.u.ah_destroy.cqp, + &pcmdinfo->in.u.ah_destroy.info, + pcmdinfo->in.u.ah_destroy.scratch); + break; + case ZXDH_OP_MC_CREATE: + status = zxdh_sc_create_mcast_grp( + pcmdinfo->in.u.mc_create.cqp, + pcmdinfo->in.u.mc_create.info, + pcmdinfo->in.u.mc_create.scratch); + break; + case ZXDH_OP_MC_DESTROY: + status = zxdh_sc_destroy_mcast_grp( + pcmdinfo->in.u.mc_destroy.cqp, + pcmdinfo->in.u.mc_destroy.info, + pcmdinfo->in.u.mc_destroy.scratch); + break; + case ZXDH_OP_MC_MODIFY: + status = zxdh_sc_modify_mcast_grp( + pcmdinfo->in.u.mc_modify.cqp, + pcmdinfo->in.u.mc_modify.info, + pcmdinfo->in.u.mc_modify.scratch); + break; + case ZXDH_OP_CONFIG_PTE_TAB: + case ZXDH_OP_CONFIG_PBLE_TAB: + case ZXDH_OP_DMA_WRITE: + status = zxdh_sc_dma_write( + pcmdinfo->in.u.dma_writeread.cqp, + pcmdinfo->in.u.dma_writeread.scratch, + &pcmdinfo->in.u.dma_writeread.src_dest, + &pcmdinfo->in.u.dma_writeread.src_path_index, + &pcmdinfo->in.u.dma_writeread.dest_path_index, + pcmdinfo->post_sq); + break; + case ZXDH_OP_QUERY_PTE_TAB: + case ZXDH_OP_QUERY_HW_OBJECT_INFO: + case ZXDH_OP_DMA_READ: + status = zxdh_sc_dma_read( + pcmdinfo->in.u.dma_writeread.cqp, + pcmdinfo->in.u.dma_writeread.scratch, + &pcmdinfo->in.u.dma_writeread.src_dest, + &pcmdinfo->in.u.dma_writeread.src_path_index, + &pcmdinfo->in.u.dma_writeread.dest_path_index, + pcmdinfo->post_sq); + break; + case ZXDH_OP_CONFIG_MAILBOX: + status = zxdh_sc_mb_create(pcmdinfo->in.u.hmc_mb.cqp, + pcmdinfo->in.u.hmc_mb.scratch, + &pcmdinfo->in.u.hmc_mb.mbhead_data, + pcmdinfo->post_sq, + pcmdinfo->in.u.hmc_mb.dst_vf_id); + break; + case ZXDH_OP_DMA_READ_USE_CQE: + status = zxdh_sc_dma_read_usecqe( + pcmdinfo->in.u.dma_read_cqe.cqp, + pcmdinfo->in.u.dma_read_cqe.scratch, + &pcmdinfo->in.u.dma_read_cqe.dma_rcqe, + &pcmdinfo->in.u.dma_read_cqe.src_path_index, + pcmdinfo->post_sq); + break; + case ZXDH_OP_DMA_WRITE32: + status = zxdh_sc_dma_write32( + pcmdinfo->in.u.dma_write32data.cqp, + pcmdinfo->in.u.dma_write32data.scratch, + &pcmdinfo->in.u.dma_write32data.dest_path_index, + &pcmdinfo->in.u.dma_write32data.dma_data, + pcmdinfo->post_sq); + break; + case ZXDH_OP_DMA_WRITE64: + status = zxdh_sc_dma_write64( + pcmdinfo->in.u.dma_write64data.cqp, + pcmdinfo->in.u.dma_write64data.scratch, + &pcmdinfo->in.u.dma_write64data.dest_path_index, + &pcmdinfo->in.u.dma_write64data.dma_data, + pcmdinfo->post_sq); + break; + case ZXDH_OP_QUERY_QPC: + status = zxdh_sc_query_qpc(pcmdinfo->in.u.query_qpc.dev, + pcmdinfo->in.u.query_qpc.qpn, + pcmdinfo->in.u.query_qpc.qpc_buf_pa, + pcmdinfo->in.u.query_qpc.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_QUERY_CQC: + status = zxdh_sc_query_cqc(pcmdinfo->in.u.query_cqc.dev, + pcmdinfo->in.u.query_cqc.cqn, + pcmdinfo->in.u.query_cqc.cqc_buf_pa, + pcmdinfo->in.u.query_cqc.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_QUERY_CEQC: + status = zxdh_sc_query_ceqc(pcmdinfo->in.u.query_ceqc.dev, + pcmdinfo->in.u.query_ceqc.ceqn, + pcmdinfo->in.u.query_ceqc.ceqc_buf_pa, + pcmdinfo->in.u.query_ceqc.scratch, pcmdinfo->post_sq); + break; + case ZXDH_OP_QUERY_AEQC: + status = zxdh_sc_query_aeqc(pcmdinfo->in.u.query_aeqc.dev, + pcmdinfo->in.u.query_aeqc.aeqn, + pcmdinfo->in.u.query_aeqc.aeqc_buf_pa, + pcmdinfo->in.u.query_aeqc.scratch, pcmdinfo->post_sq); + break; + case ZXDH_OP_QUERY_SRQC: + status = zxdh_sc_query_srqc(pcmdinfo->in.u.query_srqc.dev, + pcmdinfo->in.u.query_srqc.srqn, + pcmdinfo->in.u.query_srqc.srqc_buf_pa, + pcmdinfo->in.u.query_srqc.scratch, pcmdinfo->post_sq); + break; + case ZXDH_OP_SRQ_MODIFY: + status = zxdh_sc_srq_modify(pcmdinfo->in.u.srq_modify.srq, + &pcmdinfo->in.u.srq_modify.info, + pcmdinfo->in.u.srq_modify.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_SRQ_CREATE: + status = zxdh_sc_srq_create(pcmdinfo->in.u.srq_create.srq, + &pcmdinfo->in.u.srq_create.info, + pcmdinfo->in.u.srq_create.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_SRQ_DESTROY: + status = zxdh_sc_srq_destroy(pcmdinfo->in.u.srq_destroy.srq, + pcmdinfo->in.u.srq_destroy.scratch, + pcmdinfo->post_sq); + break; + case ZXDH_OP_QUERY_MKEY: + status = zxdh_sc_query_mkey(pcmdinfo->in.u.query_mkey.cqp, + pcmdinfo->in.u.query_mkey.mkeyindex, + pcmdinfo->in.u.query_mkey.scratch, + pcmdinfo->post_sq); + break; + default: + status = -EOPNOTSUPP; + break; + } + + return status; +} + +/** + * zxdh_process_cqp_cmd - process all cqp commands + * @dev: sc device struct + * @pcmdinfo: cqp command info + */ +int zxdh_process_cqp_cmd(struct zxdh_sc_dev *dev, + struct cqp_cmds_info *pcmdinfo) +{ + int status = 0; + unsigned long flags; + + spin_lock_irqsave(&dev->cqp_lock, flags); + if (list_empty(&dev->cqp_cmd_head) && !zxdh_cqp_ring_full(dev->cqp)) + status = zxdh_exec_cqp_cmd(dev, pcmdinfo); + else + list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head); + spin_unlock_irqrestore(&dev->cqp_lock, flags); + return status; +} + +/** + * zxdh_process_bh - called from tasklet for cqp list + * @dev: sc device struct + */ +int zxdh_process_bh(struct zxdh_sc_dev *dev) +{ + int status = 0; + struct cqp_cmds_info *pcmdinfo; + unsigned long flags; + + spin_lock_irqsave(&dev->cqp_lock, flags); + while (!list_empty(&dev->cqp_cmd_head) && + !zxdh_cqp_ring_full(dev->cqp)) { + pcmdinfo = (struct cqp_cmds_info *)zxdh_remove_cqp_head(dev); + if (!pcmdinfo) + return -ENOMEM; + status = zxdh_exec_cqp_cmd(dev, pcmdinfo); + if (status) + break; + } + spin_unlock_irqrestore(&dev->cqp_lock, flags); + return status; +} + +#if IS_ENABLED(CONFIG_CONFIGFS_FS) +/** + * zxdh_set_irq_rate_limit- Configure interrupt rate limit + * @dev: pointer to the device structure + * @idx: vector index + * @interval: Time interval in 4 usec units. Zero for no limit. + */ +void zxdh_set_irq_rate_limit(struct zxdh_sc_dev *dev, u32 idx, u32 interval) +{ + u32 reg_val = 0; + + if (interval) { +#define ZXDH_MAX_SUPPORTED_INT_RATE_INTERVAL 59 /* 59 * 4 = 236 us */ + if (interval > ZXDH_MAX_SUPPORTED_INT_RATE_INTERVAL) + interval = ZXDH_MAX_SUPPORTED_INT_RATE_INTERVAL; + reg_val = interval & ZXDH_GLINT_RATE_INTERVAL; + reg_val |= FIELD_PREP(ZXDH_GLINT_RATE_INTRL_ENA, 1); + } + writel(reg_val, dev->hw_regs[ZXDH_GLINT_RATE] + idx); +} + +#endif +/** + * zxdh_cfg_aeq- Configure AEQ interrupt + * @dev: pointer to the device structure + * @irq_idx: vector index + */ +void zxdh_cfg_aeq(struct zxdh_sc_dev *dev, u32 irq_idx) +{ + struct zxdh_pci_f *rf; + u32 hdr = 0; + + rf = container_of(dev, struct zxdh_pci_f, sc_dev); + + hdr = FIELD_PREP(ZXDH_AEQ_MSIX_DATA_VECTOR, irq_idx) | + FIELD_PREP(ZXDH_AEQ_MSIX_DATA_TC, 0) | + FIELD_PREP(ZXDH_AEQ_MSIX_DATA_VF_ACTIVE, rf->ftype) | + FIELD_PREP(ZXDH_AEQ_MSIX_DATA_VF_ID, rf->vf_id) | + FIELD_PREP(ZXDH_AEQ_MSIX_DATA_PF_ID, rf->pf_id); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + writel(hdr, dev->aeq_vhca_pfvf.aeq_msix_data); + + hdr = FIELD_PREP(ZXDH_AEQ_MSIX_CONFIG_IRQ, 0) | + FIELD_PREP(ZXDH_AEQ_MSIX_CONFIG_EPID, rf->ep_id); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + writel(hdr, dev->aeq_vhca_pfvf.aeq_msix_config); +} + +int zxdh_sc_config_pte_table(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest src_dest) +{ + __le64 *wqe; + u64 hdr; + u32 tail = 0, val = 0, error = 0; + int ret_code = 0; + u64 scratch = 0; + u64 src_path_index = 0, dest_path_index = 0; + struct zxdh_path_index spath_index = {}; + struct zxdh_path_index dpath_index = {}; + + if (!dev) + return -ENOMEM; + + dpath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; // not pass cache + dpath_index.path_select = ZXDH_INDICATE_L2D; // L2D + dpath_index.obj_id = ZXDH_L2D_OBJ_ID; // L2D + dpath_index.vhca_id = dev->vhca_id; + dest_path_index = zxdh_get_path_index(&dpath_index); + + spath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; // not pass cache + spath_index.path_select = ZXDH_INDICATE_HOST_NOSMMU; + spath_index.obj_id = ZXDH_DMA_OBJ_ID; + spath_index.vhca_id = dev->vhca_id; + src_path_index = zxdh_get_path_index(&spath_index); + + wqe = zxdh_sc_cqp_get_next_send_wqe(dev->cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, src_path_index); + set_64bit_val(wqe, 16, src_dest.dest); // L2D Address + set_64bit_val(wqe, 24, src_dest.src); // Physical_Buffer_Address + set_64bit_val(wqe, 32, src_dest.len); // PTE_Length + + hdr = FIELD_PREP(ZXDH_CQPSQ_DESTPATHINDEX, dest_path_index) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_WQE_DMA_WRITE) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, dev->cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: QUERY_FPM WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + + zxdh_get_cqp_reg_info(dev->cqp, &val, &tail, &error); + + zxdh_sc_cqp_post_sq(dev->cqp); + + ret_code = zxdh_cqp_poll_registers(dev->cqp, tail, + dev->hw_attrs.max_done_count); + return ret_code; +} +static int zxdh_query_flr_flag(struct zxdh_pci_f *rf) +{ + u32 cnt = 0, val = 0; + struct zxdh_sc_dev *dev = &rf->sc_dev; + if (rf->sc_dev.flr_query == ZXDH_FLR_QUERY_FLAG) { + do { + val = readl(dev->hw->hw_addr + C_RDMA_CQP_CONTEXT_6); + if (val != ZXDH_FLR_OP_FLAG) + return 0; + if (cnt++ > ZXDH_FLR_QUERY_CNT) { + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMA_CQP_CONTEXT_6)); + writel(1, (u32 __iomem *)(dev->hw->hw_addr + RDMATX_QUEUE_VHCA_FLAG)); + pr_err("[%s] val:0x%x vhca_id:%d timeout!\n", __func__, val, rf->sc_dev.vhca_id); + break; + } + msleep(ZXDH_FLR_QUERY_TIME); + } while (val == ZXDH_FLR_OP_FLAG); + } + return 0; +} +static int zxdh_wait_fw_done(struct zxdh_pci_f *rf) +{ + u32 cnt = 0, val = 0, status = 0; + struct zxdh_sc_dev *dev = &rf->sc_dev; + + do { + val = readl((u32 __iomem *)(dev->hw->hw_addr + RDMARX_WAIT_FW_DONE)); + if (cnt++ > FW_TIME_WAIT_CNT) { + status = -ETIMEDOUT; + break; + } + if (val) + break; + mdelay(FW_TIME_WAIT_1S); + } while (!val); + pr_info("[%s] val:0x%x wait time: %ds\n", __func__, val, cnt); + + return status; +} +/** + * zxdh_sc_dev_init - Initialize control part of device + * @ver: version + * @dev: Device pointer + * @info: Device init info + */ +int zxdh_sc_dev_init(enum zxdh_rdma_vers ver, struct zxdh_sc_dev *dev, + struct zxdh_device_init_info *info) +{ + struct zxdh_pci_f *rf; + int status = 0; + int ret = 0; + + rf = container_of(dev, struct zxdh_pci_f, sc_dev); + INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for CQP command backlog */ + mutex_init(&dev->ws_mutex); + dev->privileged = info->privileged; + dev->num_vfs = info->max_vfs; + dev->cache_id = 1; + dev->ird_size = ICRDMA_MAX_IRD_SIZE; + + dev->hw = info->hw; + dev->hw->hw_addr = info->bar0; + dev->hmc_epid = (ZXDH_AXID_HOST_EP0 + dev->ep_id); + /* Setup the hardware limits, hmc may limit further */ + dev->hw_attrs.min_hw_qp_id = ZXDH_MIN_IW_QP_ID; + dev->hw_attrs.min_hw_aeq_size = ZXDH_MIN_AEQ_ENTRIES; + dev->hw_attrs.max_hw_aeq_size = ZXDH_MAX_AEQ_ENTRIES; + dev->hw_attrs.min_hw_ceq_size = ZXDH_MIN_CEQ_ENTRIES; + dev->hw_attrs.max_hw_ceq_size = ZXDH_MAX_CEQ_ENTRIES; + dev->hw_attrs.uk_attrs.min_hw_cq_size = ZXDH_MIN_CQ_SIZE; + dev->hw_attrs.uk_attrs.max_hw_cq_size = ZXDH_MAX_CQ_SIZE; + dev->hw_attrs.max_hw_outbound_msg_size = ZXDH_MAX_OUTBOUND_MSG_SIZE; + dev->hw_attrs.max_mr_size = ZXDH_MAX_MR_SIZE; + dev->hw_attrs.max_hw_inbound_msg_size = ZXDH_MAX_INBOUND_MSG_SIZE; + dev->hw_attrs.uk_attrs.max_hw_inline = ZXDH_MAX_INLINE_DATA_SIZE; + dev->hw_attrs.max_hw_wqes = ZXDH_MAX_WQ_ENTRIES; + dev->hw_attrs.max_qp_wr = ZXDH_MAX_QP_WRS(ZXDH_MAX_QUANTA_PER_WR); + dev->hw_attrs.max_srq_wr = ZXDH_MAX_SRQ_WRS; + + dev->hw_attrs.uk_attrs.max_hw_srq_wr = ZXDH_MAX_SRQ_WRS; + dev->hw_attrs.uk_attrs.max_hw_rq_quanta = ZXDH_QP_SW_MAX_RQ_QUANTA; + dev->hw_attrs.uk_attrs.max_hw_srq_quanta = ZXDH_QP_SW_MAX_SRQ_QUANTA; + dev->hw_attrs.uk_attrs.max_hw_wq_quanta = ZXDH_QP_SW_MAX_WQ_QUANTA; + dev->hw_attrs.max_hw_pds = ZXDH_MAX_PDS; + dev->hw_attrs.max_hw_ena_vf_count = ZXDH_MAX_PE_ENA_VF_COUNT; + + dev->hw_attrs.max_done_count = ZXDH_DONE_COUNT; + dev->hw_attrs.max_sleep_count = ZXDH_SLEEP_COUNT; + dev->hw_attrs.max_cqp_compl_wait_time_ms = CQP_COMPL_WAIT_TIME_MS; + dev->hw_attrs.cqp_timeout_threshold = CQP_TIMEOUT_THRESHOLD; + dev->hw_attrs.self_health = false; + + dev->hw_attrs.uk_attrs.hw_rev = (u8)ver; + if (!rf->ftype) { + status = zxdh_wait_fw_done(rf); + if (status) { + pr_info("[%s] FW undone! FW may have not been fully loaded after host is started.\n", __func__); + } + } + ret = zxdh_query_flr_flag(rf); + spin_lock_init(&dev->vf_dev_lock); + zxdh_init_hw(dev); + return ret; +} + +u16 zxdh_get_tc_8k_index_offset(u32 total_vhca, u16 vhca_8k_index_cnt, + u8 traffic_class, u16 *tc_8k_index_num) +{ + u16 tc_8k_index_offset = 0; + + if (total_vhca <= 34) { + *tc_8k_index_num = vhca_8k_index_cnt / 8; + tc_8k_index_offset = (*tc_8k_index_num) * traffic_class; + } else if (total_vhca <= 66) { + *tc_8k_index_num = vhca_8k_index_cnt / 8; + tc_8k_index_offset = (*tc_8k_index_num) * traffic_class; + } else if (total_vhca <= 130) { + *tc_8k_index_num = vhca_8k_index_cnt / 4; + traffic_class /= 2; + tc_8k_index_offset = (*tc_8k_index_num) * traffic_class; + } else if (total_vhca <= 258) { + *tc_8k_index_num = vhca_8k_index_cnt / 2; + traffic_class /= 4; + tc_8k_index_offset = (*tc_8k_index_num) * traffic_class; + } + + return tc_8k_index_offset; +} + +u16 zxdh_get_8k_index(struct zxdh_sc_qp *qp, u32 dest_ip) +{ + u16 tc_8k_index_offset, tc_8k_index_num; + u16 dip_8k_index_offset; + u16 qp_8k_index; + + if (qp->qp_uk.qp_type == ZXDH_QP_TYPE_ROCE_UD) { + return qp->dev->vhca_ud_8k_index; + } + + tc_8k_index_offset = zxdh_get_tc_8k_index_offset(qp->dev->total_vhca, + qp->dev->vhca_8k_index_cnt, qp->user_pri, &tc_8k_index_num); + dip_8k_index_offset = dest_ip % tc_8k_index_num; + qp_8k_index = qp->dev->vhca_8k_index_start + + tc_8k_index_offset + dip_8k_index_offset; + return qp_8k_index; +} + +/** + * zxdh_init_destroy_aeq - destroy aeq + * @rf: RDMA PCI function + * + * Issue a destroy aeq request and + * free the resources associated with the aeq + * The function is called during driver unload + */ +int zxdh_init_destroy_aeq(struct zxdh_pci_f *rf) +{ + struct zxdh_sc_cqp *cqp; + struct zxdh_sc_dev *dev; + __le64 *wqe; + u64 hdr; + u32 tail = 0, val = 0, error = 0; + int ret_code = 0; + u64 scratch = 0; + + dev = &rf->sc_dev; + cqp = dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, 0); + + set_64bit_val(wqe, 16, 0); + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_DESTROY_AEQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: AEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + + zxdh_get_cqp_reg_info(dev->cqp, &val, &tail, &error); + + zxdh_sc_cqp_post_sq(dev->cqp); + + ret_code = zxdh_cqp_poll_registers(dev->cqp, tail, + dev->hw_attrs.max_done_count); + + if (ret_code) + return ret_code; + + return 0; +} + +/** + * zxdh_create_cqp_qp - create cqp qp + * @rf: RDMA PCI function + * + * Issue a create cqp qp request and + * create the resources associated with the cqp qp + * The function is called during driver load + */ +int zxdh_create_cqp_qp(struct zxdh_pci_f *rf) +{ + struct zxdh_sc_cqp *cqp; + struct zxdh_sc_dev *dev; + struct zxdh_dma_mem *cqp_host_ctx; + __le64 *wqe; + u64 hdr; + u32 tail = 0, val = 0, error = 0; + int ret_code = 0; + u64 scratch = 0; + + dev = &rf->sc_dev; + cqp = dev->cqp; + cqp_host_ctx = &rf->cqp_host_ctx; + + cqp_host_ctx->va = NULL; + cqp_host_ctx->size = ALIGN(ZXDH_QP_CTX_SIZE, ZXDH_QPC_ALIGNMENT); + cqp_host_ctx->va = dma_alloc_coherent(dev->hw->device, + cqp_host_ctx->size, + &cqp_host_ctx->pa, GFP_KERNEL); + + if (!cqp_host_ctx->va) + return -ENOMEM; + + memset(cqp_host_ctx->va, 0, cqp_host_ctx->size); + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) { + dma_free_coherent(dev->hw->device, cqp_host_ctx->size, + cqp_host_ctx->va, cqp_host_ctx->pa); + cqp_host_ctx->va = NULL; + return -ENOSPC; + } + + hdr = FIELD_PREP(RDMAQPC_TX_CQN, dev->base_cqn); + set_64bit_val((__le64 *)cqp_host_ctx->va, 152, hdr); + + set_64bit_val( + (__le64 *)cqp_host_ctx->va, 160, + FIELD_PREP(RDMAQPC_TX_QPN, dev->base_qpn) | + FIELD_PREP(RDMAQPC_TX_VHCA_ID_LOW6, dev->vhca_id)); + + set_64bit_val( + (__le64 *)cqp_host_ctx->va, 168, + FIELD_PREP(RDMAQPC_TX_VHCA_ID_HIGH4, RS_64_1(dev->vhca_id, 6)) | + FIELD_PREP(RDMAQPC_TX_QPSTATE, ZXDH_QPS_RTS)); + dma_wmb(); + + hdr = FIELD_PREP(RDMAQPC_RX_CQN, dev->base_cqn); + set_64bit_val((__le64 *)cqp_host_ctx->va, 376, hdr); + dma_wmb(); + + hdr = FIELD_PREP(RDMAQPC_RX_VHCA_ID, dev->vhca_id); + set_64bit_val((__le64 *)cqp_host_ctx->va, 384, hdr); + dma_wmb(); + + set_64bit_val(wqe, 8, cqp_host_ctx->pa); + set_64bit_val(wqe, 16, RDMAQPC_MASK_INIT); + set_64bit_val(wqe, 24, RDMAQPC_MASK_INIT); + set_64bit_val(wqe, 32, RDMAQPC_MASK_INIT); + set_64bit_val(wqe, 40, RDMAQPC_MASK_INIT); + hdr = FIELD_PREP(ZXDH_CQPSQ_QP_ID, cqp->dev->base_qpn) | + FIELD_PREP(ZXDH_CQPSQ_QP_CONTEXT_ID, cqp->dev->base_qpn) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_CREATE_QP); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: AEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + + zxdh_get_cqp_reg_info(dev->cqp, &val, &tail, &error); + + zxdh_sc_cqp_post_sq(dev->cqp); + + ret_code = zxdh_cqp_poll_registers(dev->cqp, tail, + dev->hw_attrs.max_done_count); + + if (ret_code) { + dma_free_coherent(dev->hw->device, cqp_host_ctx->size, + cqp_host_ctx->va, cqp_host_ctx->pa); + cqp_host_ctx->va = NULL; + return ret_code; + } + return 0; +} + +/** + * zxdh_destroy_cqp_qp - destroy cqp qp + * @rf: RDMA PCI function + * + * Issue a destroy cqp qp request and + * free the resources associated with the cqp qp + * The function is called during driver unload + */ +int zxdh_destroy_cqp_qp(struct zxdh_pci_f *rf) +{ + struct zxdh_sc_cqp *cqp; + struct zxdh_sc_dev *dev; + struct zxdh_dma_mem *cqp_host_ctx; + __le64 *wqe; + u64 hdr; + u32 tail = 0, val = 0, error = 0; + int ret_code = 0; + u64 scratch = 0; + + dev = &rf->sc_dev; + cqp = dev->cqp; + cqp_host_ctx = &rf->cqp_host_ctx; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, cqp_host_ctx->pa); + set_64bit_val(wqe, 16, 0); + set_64bit_val(wqe, 24, RDMAQPC_TX_MASKH_QP_STATE); + set_64bit_val(wqe, 32, RDMAQPC_MASK_RESET); + set_64bit_val(wqe, 40, RDMAQPC_MASK_RESET); + hdr = FIELD_PREP(ZXDH_CQPSQ_QP_ID, cqp->dev->base_qpn) | + FIELD_PREP(ZXDH_CQPSQ_QP_CONTEXT_ID, cqp->dev->base_qpn) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_DESTROY_QP); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: AEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + + zxdh_get_cqp_reg_info(dev->cqp, &val, &tail, &error); + + zxdh_sc_cqp_post_sq(dev->cqp); + + ret_code = zxdh_cqp_poll_registers(dev->cqp, tail, + dev->hw_attrs.max_done_count); + + if (ret_code) + return ret_code; + + dma_free_coherent(dev->hw->device, cqp_host_ctx->size, cqp_host_ctx->va, + cqp_host_ctx->pa); + cqp_host_ctx->va = NULL; + + return 0; +} + +int zxdh_sc_query_mkey(struct zxdh_sc_cqp *cqp, u32 mkeyindex, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + u64 hdr; + u64 tmp = 0; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_QUERY_MKEY) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + + set_64bit_val(wqe, 24, FIELD_PREP(ZXDH_CQPSQ_QUERY_MKEY, mkeyindex)); + + dma_wmb(); + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + get_64bit_val(wqe, 24, &tmp); + + return 0; +} + +/** + * zxdh_copy_ip_ntohl - copy IP address from network to host + * @dst: IP address in host order + * @src: IP address in network order (big endian) + */ +void zxdh_copy_ip_ntohl(u32 *dst, __be32 *src) +{ + *dst++ = ntohl(*src++); + *dst++ = ntohl(*src++); + *dst++ = ntohl(*src++); + *dst = ntohl(*src); +} diff --git a/drivers/infiniband/hw/zrdma/dbgfs.c b/drivers/infiniband/hw/zrdma/dbgfs.c new file mode 100644 index 000000000000..815200cea2fa --- /dev/null +++ b/drivers/infiniband/hw/zrdma/dbgfs.c @@ -0,0 +1,1407 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ +#include "main.h" +#include "icrdma_hw.h" + +struct mutex zrdma_debugfs_mutex; +struct dentry *zrdma_debugfs_root; +EXPORT_SYMBOL(zrdma_debugfs_root); + +#define ZXDH_NP_PSN_WRAPAROUND_ENABLE_BIT 30 +#define ZXDH_NP_PMTU_START_BIT 0 +#define ZXDH_NP_PMTU_END_BIT 2 +#define SET_32_REG_VAL(rf, reg, offset, var) \ + do { \ + u32 tmp = rd32((rf)->sc_dev.hw, (reg)) & ~(offset); \ + wr32((rf)->sc_dev.hw, (reg), tmp | FIELD_PREP(offset, var)); \ + } while (0) + +int read_np_cnp_dscp(struct zxdh_pci_f *rf, u32 *var) +{ + u32 tmp = 0; + + tmp = rd32(rf->sc_dev.hw, RDMA_DCQCN_NP_CNP_DSCP); + *var = FIELD_GET(ZXDH_DCQCN_NP_CNP_DSCP, tmp); + return 0; +} + +int write_np_cnp_dscp(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_NP_CNP_DSCP) + return -EINVAL; + + SET_32_REG_VAL(rf, RDMA_DCQCN_NP_CNP_DSCP, ZXDH_DCQCN_NP_CNP_DSCP, var); + return 0; +} + +int read_np_cnp_prio(struct zxdh_pci_f *rf, u32 *var) +{ + u32 tmp = 0; + + tmp = rd32(rf->sc_dev.hw, RDMA_DCQCN_NP_CNP_PRIO); + *var = FIELD_GET(ZXDH_DCQCN_NP_CNP_PRIO, tmp); + return 0; +} + +int write_np_cnp_prio(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_NP_CNP_PRIO) + return -EINVAL; + + SET_32_REG_VAL(rf, RDMA_DCQCN_NP_CNP_PRIO, ZXDH_DCQCN_NP_CNP_PRIO, var); + return 0; +} + +int read_np_cnp_prio_mode(struct zxdh_pci_f *rf, u32 *var) +{ + u32 tmp = 0; + + tmp = rd32(rf->sc_dev.hw, RDMA_DCQCN_NP_CNP_PRIO_MODE); + *var = FIELD_GET(ZXDH_DCQCN_NP_CNP_PRIO_MODE, tmp); + return 0; +} + +int write_np_cnp_prio_mode(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_NP_CNP_PRIO_MODE) + return -EINVAL; + + SET_32_REG_VAL(rf, RDMA_DCQCN_NP_CNP_PRIO_MODE, + ZXDH_DCQCN_NP_CNP_PRIO_MODE, var); + return 0; +} + +int read_np_min_time_between_cnps(struct zxdh_pci_f *rf, u32 *var) +{ + u32 tmp_x = 0; + u32 tmp_y = 0; + + tmp_x = rd32(rf->sc_dev.hw, RDMA_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_X); + tmp_y = rd32(rf->sc_dev.hw, RDMA_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_Y); + *var = FIELD_GET(ZXDH_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_X, tmp_x) * + FIELD_GET(ZXDH_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_Y, tmp_y); + return 0; +} + +int write_np_min_time_between_cnps(struct zxdh_pci_f *rf, u32 var) +{ + u32 y = 0; + u32 y_ex = 0; + u16 x = 0; + + if (var > RDMA_FLOW_MAX_NP_MIN_TIME_BETWEEN_CNPS || + (var < RDMA_FLOW_MIN_NP_MIN_TIME_BETWEEN_CNPS)) { + return -EINVAL; + } + y = FIELD_GET(ZXDH_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_Y, + rd32(rf->sc_dev.hw, + RDMA_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_Y)); + y_ex = FIELD_GET(ZXDH_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_Y_EX, + rd32(rf->sc_dev.hw, + RDMA_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_Y_EX)); + if (y != RDMA_FLOW_NP_MIN_TIME_BETWEEN_CNPS_Y || + y_ex != RDMA_FLOW_NP_MIN_TIME_BETWEEN_CNPS_Y_EX) { + return -EPERM; + } + x = var / RDMA_FLOW_NP_MIN_TIME_BETWEEN_CNPS_Y; + SET_32_REG_VAL(rf, RDMA_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_X, + ZXDH_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_X, x); + return 0; +} + +int read_prg_time_reset(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, MCODE_TYPE_DCQCN, + E_PARA_DCQCN_RPG_TIME_RESET, var); +} + +int write_prg_time_reset(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_PRG_TIME_RESET || + var < RDMA_FLOW_MIN_PRG_TIME_RESET) { + return -EINVAL; + } + return zxdh_mp_dtcm_para_set(rf, MCODE_TYPE_DCQCN, + E_PARA_DCQCN_RPG_TIME_RESET, var); +} + +int read_rpg_clamp_tgt_rate(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, MCODE_TYPE_DCQCN, + E_PARA_DCQCN_CLAMP_TGT_RAGE, var); +} + +int write_rpg_clamp_tgt_rate(struct zxdh_pci_f *rf, u32 var) +{ + if (var != 1 && var != 0) + return -EINVAL; + + return zxdh_mp_dtcm_para_set(rf, MCODE_TYPE_DCQCN, + E_PARA_DCQCN_CLAMP_TGT_RAGE, var); +} + +int read_rpg_clamp_tgt_rate_after_time_inc(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, MCODE_TYPE_DCQCN, + E_PARA_DCQCN_CLAMP_TGT_RATE_AFTER_TIME_INC, + var); +} + +int write_rpg_clamp_tgt_rate_after_time_inc(struct zxdh_pci_f *rf, u32 var) +{ + if (var != 1 && var != 0) + return -EINVAL; + + return zxdh_mp_dtcm_para_set(rf, MCODE_TYPE_DCQCN, + E_PARA_DCQCN_CLAMP_TGT_RATE_AFTER_TIME_INC, + var); +} + +int read_rp_dce_tcp_rtt(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, MCODE_TYPE_DCQCN, + E_PARA_DCQCN_DCE_TCP_RTT, var); +} + +int write_rp_dce_tcp_rtt(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_RP_DCE_TCP_RTT || + var < RDMA_FLOW_MIN_RP_DCE_TCP_RTT) { + return -EINVAL; + } + return zxdh_mp_dtcm_para_set(rf, MCODE_TYPE_DCQCN, + E_PARA_DCQCN_DCE_TCP_RTT, var); +} + +int read_dce_tcp_g(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, MCODE_TYPE_DCQCN, E_PARA_DCQCN_DCE_TCP_G, + var); +} + +int write_dce_tcp_g(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_DCE_TCP_G) + return -EINVAL; + + return zxdh_mp_dtcm_para_set(rf, MCODE_TYPE_DCQCN, E_PARA_DCQCN_DCE_TCP_G, + var); +} + +int read_rpg_gd(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, MCODE_TYPE_DCQCN, E_PARA_DCQCN_RPG_GD, + var); +} + +int write_rpg_gd(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_RPG_GD) + return -EINVAL; + + return zxdh_mp_dtcm_para_set(rf, MCODE_TYPE_DCQCN, E_PARA_DCQCN_RPG_GD, + var); +} + +int read_rpg_initial_alpha_value(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, MCODE_TYPE_DCQCN, + E_PARA_DCQCN_INITIAL_ALPHA_VALUE, var); +} + +int write_rpg_initial_alpha_value(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_RPG_INITIAL_ALPHA_VALUE) + return -EINVAL; + + return zxdh_mp_dtcm_para_set(rf, MCODE_TYPE_DCQCN, + E_PARA_DCQCN_INITIAL_ALPHA_VALUE, var); +} + +int read_rpg_min_dec_fac(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, MCODE_TYPE_DCQCN, + E_PARA_DCQCN_MIN_DEC_FAC, var); +} + +int write_rpg_min_dec_fac(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_RPG_MIN_DEC_FAC) + return -EINVAL; + + return zxdh_mp_dtcm_para_set(rf, MCODE_TYPE_DCQCN, + E_PARA_DCQCN_MIN_DEC_FAC, var); +} + +int read_rpg_threshold(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, MCODE_TYPE_DCQCN, + E_PARA_DCQCN_RPG_THRESHOLD, var); +} + +int write_rpg_threshold(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_RPG_THRESHOLD) + return -EINVAL; + + return zxdh_mp_dtcm_para_set(rf, MCODE_TYPE_DCQCN, + E_PARA_DCQCN_RPG_THRESHOLD, var); +} + +int read_rpg_ratio_increase(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, MCODE_TYPE_DCQCN, + E_PARA_DCQCN_RPG_RATIO_INCREASE, var); +} + +int write_rpg_ratio_increase(struct zxdh_pci_f *rf, u32 var) +{ + if (var != 1 && var != 0) + return -EINVAL; + + return zxdh_mp_dtcm_para_set(rf, MCODE_TYPE_DCQCN, + E_PARA_DCQCN_RPG_RATIO_INCREASE, var); +} + +int read_rpg_ai_ratio(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, MCODE_TYPE_DCQCN, + E_PARA_DCQCN_RPG_AI_RATIO, var); +} + +int write_rpg_ai_ratio(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_RPG_AI_RATIO) + return -EINVAL; + + return zxdh_mp_dtcm_para_set(rf, MCODE_TYPE_DCQCN, + E_PARA_DCQCN_RPG_AI_RATIO, var); +} + +int read_rpg_hai_ratio(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, MCODE_TYPE_DCQCN, + E_PARA_DCQCN_RPG_HAI_RATIO, var); +} + +int write_rpg_hai_ratio(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_RPG_HAI_RATIO) + return -EINVAL; + + return zxdh_mp_dtcm_para_set(rf, MCODE_TYPE_DCQCN, + E_PARA_DCQCN_RPG_HAI_RATIO, var); +} + +int read_rpg_byte_reset(struct zxdh_pci_f *rf, u32 *var) +{ + u32 tmp = 0; + + tmp = rd32(rf->sc_dev.hw, RDMA_DCQCN_RPG_BYTE_RESET); + *var = FIELD_GET(ZXDH_DCQCN_RPG_BYTE_RESET, tmp); + return 0; +} + +int write_rpg_byte_reset(struct zxdh_pci_f *rf, u32 var) +{ + if (var < RDMA_FLOW_BYTE_RESET_THRESHOLD) + return -EINVAL; + + SET_32_REG_VAL(rf, RDMA_DCQCN_RPG_BYTE_RESET, ZXDH_DCQCN_RPG_BYTE_RESET, + var); + return 0; +} + +int read_rpg_ai_rate(struct zxdh_pci_f *rf, u32 *var) +{ + u32 tmp = 0; + + tmp = rd32(rf->sc_dev.hw, RDMA_DCQCN_RPG_AI_RATE); + *var = FIELD_GET(ZXDH_DCQCN_RPG_AI_RATE, tmp); + return 0; +} + +int write_rpg_ai_rate(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_CONTROL_RATE_1G) + return -EINVAL; + + SET_32_REG_VAL(rf, RDMA_DCQCN_RPG_AI_RATE, ZXDH_DCQCN_RPG_AI_RATE, var); + return 0; +} + +int read_rpg_hai_rate(struct zxdh_pci_f *rf, u32 *var) +{ + u32 tmp = 0; + + tmp = rd32(rf->sc_dev.hw, RDMA_DCQCN_RPG_HAI_RATE); + *var = FIELD_GET(ZXDH_DCQCN_RPG_HAI_RATE, tmp); + return 0; +} + +int write_rpg_hai_rate(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_CONTROL_RATE_10G) + return -EINVAL; + + SET_32_REG_VAL(rf, RDMA_DCQCN_RPG_HAI_RATE, ZXDH_DCQCN_RPG_HAI_RATE, + var); + return 0; +} + +int read_rpg_max_rate(struct zxdh_pci_f *rf, u32 *var) +{ + u32 tmp = 0; + + tmp = rd32(rf->sc_dev.hw, RDMA_RPG_MAX_RATE); + *var = FIELD_GET(ZXDH_RPG_MAX_RATE, tmp); + return 0; +} + +int write_rpg_max_rate(struct zxdh_pci_f *rf, u32 var) +{ + int ret; + u32 tmp = 0; + + if (var < RDMA_FLOW_CONTROL_RATE_10M || + var > RDMA_FLOW_CONTROL_RATE_200G) { + return -EINVAL; + } + ret = read_rpg_min_rate(rf, &tmp); + if (ret || tmp > var) + return -EINVAL; + + SET_32_REG_VAL(rf, RDMA_RPG_MAX_RATE, ZXDH_RPG_MAX_RATE, var); + return 0; +} + +int read_rpg_min_rate(struct zxdh_pci_f *rf, u32 *var) +{ + u32 tmp = 0; + + tmp = rd32(rf->sc_dev.hw, RDMA_RPG_MIN_RATE); + *var = FIELD_GET(ZXDH_RPG_MIN_RATE, tmp); + return 0; +} + +int write_rpg_min_rate(struct zxdh_pci_f *rf, u32 var) +{ + int ret; + u32 tmp = 0; + + if (var < RDMA_FLOW_CONTROL_RATE_10M || + var > RDMA_FLOW_CONTROL_RATE_200G) { + return -EINVAL; + } + ret = read_rpg_max_rate(rf, &tmp); + if (ret || tmp < var) + return -EINVAL; + + SET_32_REG_VAL(rf, RDMA_RPG_MIN_RATE, ZXDH_RPG_MIN_RATE, var); + return 0; +} + +struct parameter_t zrdma_dcqcn_params[] = { + { "np_cnp_dscp", ZRDMA_DBG_DCQCN_NP_CNP_DSCP, &read_np_cnp_dscp, + &write_np_cnp_dscp }, + { "np_cnp_prio", ZRDMA_DBG_DCQCN_NP_CNP_PRIO, &read_np_cnp_prio, + &write_np_cnp_prio }, + { "np_cnp_prio_mode", ZRDMA_DBG_DCQCN_NP_CNP_PRIO_MODE, + &read_np_cnp_prio_mode, &write_np_cnp_prio_mode }, + { "np_min_time_between_cnps", ZRDMA_DBG_DCQCN_NP_MIN_TIME_BETWEEN_CNPS, + &read_np_min_time_between_cnps, &write_np_min_time_between_cnps }, + { "prg_time_reset", ZRDMA_DBG_DCQCN_PRG_TIME_RESET, + &read_prg_time_reset, &write_prg_time_reset }, + { "clamp_tgt_rate", ZRDMA_DBG_DCQCN_RPG_CLAMP_TGT_RATE, + &read_rpg_clamp_tgt_rate, &write_rpg_clamp_tgt_rate }, + { "clamp_tgt_rate_after_time_inc", + ZRDMA_DBG_DCQCN_RPG_CLAMP_TGT_RATE_AFTER_TIME_INC, + &read_rpg_clamp_tgt_rate_after_time_inc, + &write_rpg_clamp_tgt_rate_after_time_inc }, + { "dce_tcp_rtt", ZRDMA_DBG_DCQCN_RP_DCE_TCP_RTT, &read_rp_dce_tcp_rtt, + &write_rp_dce_tcp_rtt }, + { "dce_tcp_g", ZRDMA_DBG_DCQCN_DCE_TCP_G, &read_dce_tcp_g, + &write_dce_tcp_g }, + { "rpg_gd", ZRDMA_DBG_DCQCN_RPG_GD, &read_rpg_gd, &write_rpg_gd }, + { "initial_alpha_value", ZRDMA_DBG_DCQCN_RPG_INITIAL_ALPHA_VALUE, + &read_rpg_initial_alpha_value, &write_rpg_initial_alpha_value }, + { "min_dec_fac", ZRDMA_DBG_DCQCN_RPG_MIN_DEC_FAC, &read_rpg_min_dec_fac, + &write_rpg_min_dec_fac }, + { "rpg_threshold", ZRDMA_DBG_DCQCN_RPG_THRESHOLD, &read_rpg_threshold, + &write_rpg_threshold }, + { "rpg_ratio_increase", ZRDMA_DBG_DCQCN_RPG_RATIO_INCREASE, + &read_rpg_ratio_increase, &write_rpg_ratio_increase }, + { "rpg_ai_ratio", ZRDMA_DBG_DCQCN_RPG_AI_RATIO, &read_rpg_ai_ratio, + &write_rpg_ai_ratio }, + { "rpg_hai_ratio", ZRDMA_DBG_DCQCN_RPG_HAI_RATIO, &read_rpg_hai_ratio, + &write_rpg_hai_ratio }, + { "rpg_byte_reset", ZRDMA_DBG_DCQCN_RPG_BYTE_RESET, + &read_rpg_byte_reset, &write_rpg_byte_reset }, + { "rpg_ai_rate", ZRDMA_DBG_DCQCN_RPG_AI_RATE, &read_rpg_ai_rate, + &write_rpg_ai_rate }, + { "rpg_hai_rate", ZRDMA_DBG_DCQCN_RPG_HAI_RATE, &read_rpg_hai_rate, + &write_rpg_hai_rate }, + { "rpg_max_rate", ZRDMA_DBG_DCQCN_RPG_MAX_RATE, &read_rpg_max_rate, + &write_rpg_max_rate }, + { "rpg_min_rate", ZRDMA_DBG_DCQCN_RPG_MIN_RATE, &read_rpg_min_rate, + &write_rpg_min_rate }, +}; + +int read_alpha(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, rf->mcode_type, E_PARA_RTT_ALPHA, var); +} + +int write_alpha(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_ALPHA_VALUE || var == 0) + return -EINVAL; + + return zxdh_mp_dtcm_para_set(rf, rf->mcode_type, E_PARA_RTT_ALPHA, var); +} + +int read_tlow(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, rf->mcode_type, E_PARA_RTT_TLOW, var); +} + +int write_tlow(struct zxdh_pci_f *rf, u32 var) +{ + u32 tmp = 0; + int ret; + + if (var > RDMA_FLOW_MAX_TLOW_VALUE || var == 0) + return -EINVAL; + + ret = read_thigh(rf, &tmp); + if (ret || tmp < var) + return -EINVAL; + + return zxdh_mp_dtcm_para_set(rf, rf->mcode_type, E_PARA_RTT_TLOW, var); +} + +int read_thigh(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, rf->mcode_type, E_PARA_RTT_THIGH, var); +} + +int write_thigh(struct zxdh_pci_f *rf, u32 var) +{ + u32 tmp = 0; + int ret; + + if (var > RDMA_FLOW_MAX_THIGH_VALUE || var == 0) + return -EINVAL; + + ret = read_tlow(rf, &tmp); + if (ret || tmp > var) + return -EINVAL; + + return zxdh_mp_dtcm_para_set(rf, rf->mcode_type, E_PARA_RTT_THIGH, var); +} + +int read_ai_num(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, rf->mcode_type, E_PARA_RTT_AI_NUM, + var); +} + +int write_ai_num(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_AI_NUM_VALUE || var == 0) + return -EINVAL; + + return zxdh_mp_dtcm_para_set(rf, rf->mcode_type, E_PARA_RTT_AI_NUM, + var); +} + +int read_thred_gradient(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, rf->mcode_type, + E_PARA_RTT_THRED_GRADIENT, var); +} + +int write_thred_gradient(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_THRED_GRADIENT) + return -EINVAL; + + return zxdh_mp_dtcm_para_set(rf, rf->mcode_type, + E_PARA_RTT_THRED_GRADIENT, var); +} + +int read_hai_n(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, rf->mcode_type, E_PARA_RTT_HAI_N, var); +} + +int write_hai_n(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_HAI_N_VALUE || var == 0) + return -EINVAL; + + return zxdh_mp_dtcm_para_set(rf, rf->mcode_type, E_PARA_RTT_HAI_N, var); +} + +int read_ai_n(struct zxdh_pci_f *rf, u32 *var) +{ + return zxdh_mp_dtcm_para_get(rf, rf->mcode_type, E_PARA_RTT_AI_N, var); +} + +int write_ai_n(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_AI_N_VALUE || var == 0) + return -EINVAL; + + return zxdh_mp_dtcm_para_set(rf, rf->mcode_type, E_PARA_RTT_AI_N, var); +} + +int read_vf_delta(struct zxdh_pci_f *rf, u32 *var) +{ + u32 tmp = 0; + + tmp = rd32(rf->sc_dev.hw, RDMA_RPG_VF_DELTA); + *var = FIELD_GET(ZXDH_RTT_VF_DELTA, tmp); + return 0; +} + +int write_vf_delta(struct zxdh_pci_f *rf, u32 var) +{ + if (var > RDMA_FLOW_MAX_VF_DELTA_VALUE || var == 0) + return -EINVAL; + + SET_32_REG_VAL(rf, RDMA_RPG_VF_DELTA, ZXDH_RTT_VF_DELTA, var); + return 0; +} + +struct parameter_t zrdma_rtt_params[] = { + { "alpha", ZRDMA_DBG_RTT_ALPHA, &read_alpha, &write_alpha }, + { "tlow", ZRDMA_DBG_RTT_TLOW, &read_tlow, &write_tlow }, + { "thigh", ZRDMA_DBG_RTT_THIGH, &read_thigh, &write_thigh }, + { "ai_num", ZRDMA_DBG_RTT_AI_NUM, &read_ai_num, &write_ai_num }, + { "thred_gradient", ZRDMA_DBG_RTT_THRED_GRADIENT, &read_thred_gradient, + &write_thred_gradient }, + { "hai_n", ZRDMA_DBG_RTT_HAI_N, &read_hai_n, &write_hai_n }, + { "ai_n", ZRDMA_DBG_RTT_AI_N, &read_ai_n, &write_ai_n }, + { "rpg_max_rate", ZRDMA_DBG_RTT_RPG_MAX_RATE, &read_rpg_max_rate, + &write_rpg_max_rate }, + { "rpg_min_rate", ZRDMA_DBG_RTT_RPG_MIN_RATE, &read_rpg_min_rate, + &write_rpg_min_rate }, + { "delta", ZRDMA_DBG_RTT_VF_DELTA, &read_vf_delta, &write_vf_delta }, +}; + +static bool check_psn_wraparound_enable_input(u32 input, u32 *output) +{ + if (input <= 1) { + *output = input; + return false; + } + return true; +} + +static bool check_pmtu_input(u32 input, u32 *output) +{ + if (input <= IB_MTU_4096) { + *output = input; + return false; + } + return true; +} + +int read_psn_wraparound_enable(struct zxdh_pci_f *rf, u32 *var) +{ + u32 glb_cfg_data_1 = 0, np_ret = 0, psn_wraparound_enable = 0; + struct iidc_core_dev_info *cdev_info; + DPP_PF_INFO_T pf_info = { 0 }; + cdev_info = rf->cdev; + if (!cdev_info) { + *var = 0; + pr_err("read_psn_wraparound_enable: cdev_info is null!\n"); + return -EIO; + } + pf_info.vport = cdev_info->vport_id; + pf_info.slot = cdev_info->slot_id; + + np_ret = dpp_glb_cfg_get_1(&pf_info, &glb_cfg_data_1); + if (np_ret) { + *var = 0; + pr_err("read_psn_wraparound_enable: query dpp_glb_cfg_get_1 failed!\n"); + return -EIO; + } + psn_wraparound_enable = FIELD_GET( + ZXDH_NP_PSN_WRAPAROUND_PSN_WRAPAROUND_ENABLE, glb_cfg_data_1); + if (check_psn_wraparound_enable_input(psn_wraparound_enable, var)) + return -EINVAL; + + return 0; +} + +int write_psn_wraparound_enable(struct zxdh_pci_f *rf, u32 var) +{ + u32 dpp_glb_cfg_psn_wraparound_enable = 0, + np_ret = 0; + struct iidc_core_dev_info *cdev_info; + DPP_PF_INFO_T pf_info = { 0 }; + cdev_info = rf->cdev; + if (!cdev_info) { + pr_err("write_psn_wraparound_enable: cdev_info is null!\n"); + return -EIO; + } + pf_info.vport = cdev_info->vport_id; + pf_info.slot = cdev_info->slot_id; + + if (check_psn_wraparound_enable_input( + var, &dpp_glb_cfg_psn_wraparound_enable)) + return -EINVAL; + + np_ret = dpp_pktrx_mcode_glb_cfg_write( + &pf_info, ZXDH_NP_PSN_WRAPAROUND_ENABLE_BIT, + ZXDH_NP_PSN_WRAPAROUND_ENABLE_BIT, + dpp_glb_cfg_psn_wraparound_enable); + if (np_ret) { + pr_err("write_psn_wraparound_enable: query dpp_pktrx_mcode_glb_cfg_write failed!\n"); + return -EIO; + } + return 0; +} + +int read_pmtu(struct zxdh_pci_f *rf, u32 *var) +{ + u32 glb_cfg_data_1 = 0, np_ret = 0, pmtu_in_table = 0; + struct iidc_core_dev_info *cdev_info; + DPP_PF_INFO_T pf_info = { 0 }; + cdev_info = rf->cdev; + if (!cdev_info) { + *var = 0; + pr_err("read_pmtu: cdev_info is null!\n"); + return -EIO; + } + pf_info.vport = cdev_info->vport_id; + pf_info.slot = cdev_info->slot_id; + + np_ret = dpp_glb_cfg_get_1(&pf_info, &glb_cfg_data_1); + if (np_ret) { + *var = 0; + pr_err("read_pmtu: query dpp_glb_cfg_get_1 failed!\n"); + return -EIO; + } + pmtu_in_table = FIELD_GET(ZXDH_NP_PMTU, glb_cfg_data_1); + if (check_pmtu_input(pmtu_in_table, var)) + return -EINVAL; + return 0; +} + +int write_pmtu(struct zxdh_pci_f *rf, u32 var) +{ + u32 dpp_glb_cfg_pmtu = 0, np_ret = 0; + struct iidc_core_dev_info *cdev_info; + DPP_PF_INFO_T pf_info = { 0 }; + cdev_info = rf->cdev; + if (!cdev_info) { + pr_err("write_pmtu: cdev_info is null!\n"); + return -EIO; + } + pf_info.vport = cdev_info->vport_id; + pf_info.slot = cdev_info->slot_id; + if (check_pmtu_input(var, &dpp_glb_cfg_pmtu)) + return -EINVAL; + + np_ret = dpp_pktrx_mcode_glb_cfg_write(&pf_info, ZXDH_NP_PMTU_START_BIT, + ZXDH_NP_PMTU_END_BIT, + dpp_glb_cfg_pmtu); + if (np_ret) { + pr_err("write_pmtu: query dpp_pktrx_mcode_glb_cfg_write failed!\n"); + return -EIO; + } + return 0; +} + +struct parameter_t zrdma_np_psn_wraparound_params[] = { + { "psn_wraparound_enable", ZRDMA_DBG_NP_PSN_WRAPAROUND_ENABLE_PARA, &read_psn_wraparound_enable, &write_psn_wraparound_enable }, + { "pmtu", ZRDMA_DBG_NP_PMTU_PARA, &read_pmtu, &write_pmtu }, +}; + +int zrdma_ib_write_rtt_params(struct zxdh_pci_f *rf, int offset, u32 var) +{ + int ret; + + if (offset >= ZRDMA_DBG_RTT_MAX || offset < 0) + return -EINVAL; + + ret = zrdma_rtt_params[offset].wfunc(rf, var); + return ret; +} + +int zrdma_ib_read_rtt_params(struct zxdh_pci_f *rf, int offset, u32 *var) +{ + int ret; + + if (offset >= ZRDMA_DBG_RTT_MAX || offset < 0) + return -EINVAL; + + ret = zrdma_rtt_params[offset].rfunc(rf, var); + return ret; +} + +int zrdma_ib_write_dcqcn_params(struct zxdh_pci_f *rf, int offset, u32 var) +{ + int ret; + + if (offset >= ZRDMA_DBG_DCQCN_MAX || offset < 0) + return -EINVAL; + + ret = zrdma_dcqcn_params[offset].wfunc(rf, var); + return ret; +} + +int zrdma_ib_read_dcqcn_params(struct zxdh_pci_f *rf, int offset, u32 *var) +{ + int ret; + + if (offset >= ZRDMA_DBG_DCQCN_MAX || offset < 0) + return -EINVAL; + + ret = zrdma_dcqcn_params[offset].rfunc(rf, var); + return ret; +} + +int zrdma_ib_write_np_psn_wraparound_params(struct zxdh_pci_f *rf, int offset, u32 var) +{ + int ret; + if (offset >= ZRDMA_DBG_NP_PARA_MAX || offset < 0) + return -EINVAL; + ret = zrdma_np_psn_wraparound_params[offset].wfunc(rf, var); + return ret; +} + +int zrdma_ib_read_np_psn_wraparound_params(struct zxdh_pci_f *rf, int offset, u32 *var) +{ + int ret; + if (offset >= ZRDMA_DBG_NP_PARA_MAX || offset < 0) + return -EINVAL; + ret = zrdma_np_psn_wraparound_params[offset].rfunc(rf, var); + return ret; +} + +static ssize_t check_write_param(const char __user *buf, size_t count, u32 *var) +{ + char lbuf[ZRDMA_DEBUGFS_MAX_BUF_LEN] = { 0 }; + + if (count > sizeof(lbuf)) + return -EINVAL; + if (copy_from_user(lbuf, buf, count)) + return -EFAULT; + lbuf[sizeof(lbuf) - 1] = '\0'; + if (kstrtou32(lbuf, 0, var)) + return -EINVAL; + return 0; +} + +static ssize_t dcqcn_write_param(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) + +{ + struct zrdma_dbg_param *param = filp->private_data; + int offset = param->offset; + int ret; + u32 var = 0; + + ret = check_write_param(buf, count, &var); + if (ret) + return ret; + + ret = zrdma_ib_write_dcqcn_params(param->dev, offset, var); + return ret ? ret : count; +} + +static ssize_t dcqcn_read_param(struct file *filp, char __user *buf, + size_t count, loff_t *pos) +{ + struct zrdma_dbg_param *param = filp->private_data; + int offset = param->offset; + u32 var = 0; + int ret; + char lbuf[ZRDMA_DEBUGFS_MAX_BUF_LEN] = { 0 }; + + ret = zrdma_ib_read_dcqcn_params(param->dev, offset, &var); + if (ret) + return ret; + + ret = snprintf(lbuf, sizeof(lbuf), "%d\n", var); + if (ret < 0) + return ret; + + return simple_read_from_buffer(buf, count, pos, lbuf, ret); +} + +static ssize_t np_psn_wraparound_params_write_param(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) + +{ + struct zrdma_dbg_param *param = filp->private_data; + int offset = param->offset; + int ret; + u32 var = 0; + ret = check_write_param(buf, count, &var); + if (ret) + return ret; + ret = zrdma_ib_write_np_psn_wraparound_params(param->dev, offset, var); + return ret ? ret : count; +} + +static ssize_t np_psn_wraparound_params_read_param(struct file *filp, char __user *buf, + size_t count, loff_t *pos) +{ + struct zrdma_dbg_param *param = filp->private_data; + int offset = param->offset; + u32 var = 0; + int ret; + char lbuf[ZRDMA_DEBUGFS_MAX_BUF_LEN] = { 0 }; + ret = zrdma_ib_read_np_psn_wraparound_params(param->dev, offset, &var); + if (ret) + return ret; + ret = snprintf(lbuf, sizeof(lbuf), "%d\n", var); + if (ret < 0) + return ret; + return simple_read_from_buffer(buf, count, pos, lbuf, ret); +} + +static ssize_t rtt_write_param(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) + +{ + struct zrdma_dbg_param *param = filp->private_data; + int offset = param->offset; + int ret; + u32 var = 0; + + ret = check_write_param(buf, count, &var); + if (ret) + return ret; + + ret = zrdma_ib_write_rtt_params(param->dev, offset, var); + return ret ? ret : count; +} + +static ssize_t rtt_read_param(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct zrdma_dbg_param *param = filp->private_data; + int offset = param->offset; + u32 var = 0; + int ret; + char lbuf[ZRDMA_DEBUGFS_MAX_BUF_LEN] = { 0 }; + + ret = zrdma_ib_read_rtt_params(param->dev, offset, &var); + if (ret) + return ret; + + ret = snprintf(lbuf, sizeof(lbuf), "%d\n", var); + if (ret < 0) + return ret; + + return simple_read_from_buffer(buf, count, pos, lbuf, ret); +} + +static const struct file_operations dbg_dcqcn_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = dcqcn_write_param, + .read = dcqcn_read_param, +}; + +static const struct file_operations dbg_rtt_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = rtt_write_param, + .read = rtt_read_param, +}; + +static const struct file_operations dbg_np_psn_wraparound_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = np_psn_wraparound_params_write_param, + .read = np_psn_wraparound_params_read_param, +}; + +void zrdma_cleanup_np_psn_wraparound_params_debugfs_entry(struct zxdh_pci_f *rf) +{ + int ret; + struct dentry *dentry_pci_board_bdf = NULL; + struct dentry *dentry = NULL; + char pci_board_bdf[64] = { 0 }; + + if (!rf) { + pr_info("zrdma_cleanup_debugfs rf is null\n"); + return; + } + if (!zrdma_debugfs_root) + return; + if (rf->debugfs_entry.board_root && !rf->ftype) { + mutex_lock(&zrdma_debugfs_mutex); + ret = get_pci_board_bdf(pci_board_bdf, rf); + if (!ret) { + dentry_pci_board_bdf = debugfs_lookup( + pci_board_bdf, zrdma_debugfs_root); + if (dentry_pci_board_bdf) { + dentry = debugfs_lookup( + ZRDMA_DEBUGFS_NP_PSN_WRAPAROUND, + dentry_pci_board_bdf); + if (dentry) { + debugfs_remove_recursive(dentry); + rf->debugfs_entry + .board_np_psn_wraparound_root = + NULL; + } else { + rf->debugfs_entry + .board_np_psn_wraparound_root = + NULL; + } + } + } + mutex_unlock(&zrdma_debugfs_mutex); + } + if (rf->debugfs_entry.board_params.board_np_psn_wraparound_params && !rf->ftype) { + kfree(rf->debugfs_entry.board_params.board_np_psn_wraparound_params); + rf->debugfs_entry.board_params.board_np_psn_wraparound_params = NULL; + } +} + +static void cleanup_board_debugfs_entry(struct zxdh_pci_f *rf, int type, char *pci_board_bdf) +{ + const char *sub_dir = NULL; + struct dentry **board_mcode_root_ptr = NULL; + struct dentry *dentry_pci_board_bdf = NULL; + struct dentry *dentry = NULL; + + if (type == MCODE_TYPE_DCQCN) { + sub_dir = ZRDMA_DEBUGFS_DCQCN_DIR; + board_mcode_root_ptr = &rf->debugfs_entry.board_dcqcn_root; + } else if (type == MCODE_TYPE_WUMENG) { + sub_dir = ZRDMA_DEBUGFS_WUMENG_DIR; + board_mcode_root_ptr = &rf->debugfs_entry.board_dcqcn_root; + } else { + sub_dir = ZRDMA_DEBUGFS_RTT_DIR; + board_mcode_root_ptr = &rf->debugfs_entry.board_rtt_root; + } + + dentry_pci_board_bdf = debugfs_lookup(pci_board_bdf, zrdma_debugfs_root); + if (dentry_pci_board_bdf) { + dentry = debugfs_lookup(sub_dir, dentry_pci_board_bdf); + if (dentry) { + debugfs_remove_recursive(dentry); + } + *board_mcode_root_ptr = NULL; + } +} + +void zrdma_cleanup_mcode_type_debugfs_entry(struct zxdh_pci_f *rf, int type) +{ + int ret; + char pci_board_bdf[64] = { 0 }; + + if (!rf) { + pr_info("zrdma_cleanup_debugfs rf is null\n"); + return; + } + if (!zrdma_debugfs_root || !rf->debugfs_entry.vhca_root) + return; + if (rf->debugfs_entry.board_root && !rf->ftype) { + mutex_lock(&zrdma_debugfs_mutex); + ret = get_pci_board_bdf(pci_board_bdf, rf); + if (!ret) { + cleanup_board_debugfs_entry(rf, type, pci_board_bdf); + } + + mutex_unlock(&zrdma_debugfs_mutex); + } + debugfs_remove_recursive(rf->debugfs_entry.vhca_root); + rf->debugfs_entry.vhca_root = NULL; + if (rf->debugfs_entry.board_params.mcode_board_params.base && !rf->ftype) { + kfree(rf->debugfs_entry.board_params.mcode_board_params.base); + rf->debugfs_entry.board_params.mcode_board_params.base = NULL; + } + if (rf->debugfs_entry.vhca_params.mcode_vhca_params.base) { + kfree(rf->debugfs_entry.vhca_params.mcode_vhca_params.base); + rf->debugfs_entry.vhca_params.mcode_vhca_params.base = NULL; + } +} + +void zrdma_cleanup_debugfs_entry(struct zxdh_pci_f *rf) +{ + int ret; + struct dentry *dentry = NULL; + char pci_board_bdf[64] = { 0 }; + + if (!rf) { + pr_info("zrdma_cleanup_debugfs rf is null\n"); + return; + } + if (!zrdma_debugfs_root || !rf->debugfs_entry.vhca_root) + return; + if (rf->debugfs_entry.board_root && !rf->ftype) { + mutex_lock(&zrdma_debugfs_mutex); + ret = get_pci_board_bdf(pci_board_bdf, rf); + if (!ret) { + dentry = debugfs_lookup(pci_board_bdf, + zrdma_debugfs_root); + if (dentry) { + debugfs_remove_recursive(dentry); + rf->debugfs_entry.board_root = NULL; + } else { + rf->debugfs_entry.board_root = NULL; + } + } + mutex_unlock(&zrdma_debugfs_mutex); + } + debugfs_remove_recursive(rf->debugfs_entry.vhca_root); + rf->debugfs_entry.vhca_root = NULL; + if (rf->debugfs_entry.board_params.mcode_board_params.base && !rf->ftype) { + kfree(rf->debugfs_entry.board_params.mcode_board_params.base); + rf->debugfs_entry.board_params.mcode_board_params.base = NULL; + } + if (rf->debugfs_entry.board_params.board_np_psn_wraparound_params && !rf->ftype) { + kfree(rf->debugfs_entry.board_params.board_np_psn_wraparound_params); + rf->debugfs_entry.board_params.board_np_psn_wraparound_params = NULL; + } + if (rf->debugfs_entry.vhca_params.mcode_vhca_params.base) { + kfree(rf->debugfs_entry.vhca_params.mcode_vhca_params.base); + rf->debugfs_entry.vhca_params.mcode_vhca_params.base = NULL; + } +} + +int zrdma_create_board_root_debugfs(struct zxdh_pci_f *rf, const char *pci_bdf, enum zrdma_debugfs_mode mode) +{ + struct dentry *dentry; + + switch (mode) { + case ZRDMA_DEBUGFS_MODE_NORMAL: + if (!rf->debugfs_entry.board_root && !rf->ftype) { + dentry = debugfs_lookup(pci_bdf, zrdma_debugfs_root); + if (!dentry) { + rf->debugfs_entry.board_root = + debugfs_create_dir(pci_bdf, zrdma_debugfs_root); + } else { + rf->debugfs_entry.board_root = dentry; + } + } + break; + case ZRDMA_DEBUGFS_MODE_BOND: + dentry = debugfs_lookup(pci_bdf, zrdma_debugfs_root); + if (!dentry) { + rf->debugfs_entry.board_root = + debugfs_create_dir(pci_bdf, zrdma_debugfs_root); + } else { + rf->debugfs_entry.board_root = dentry; + } + break; + default: + break; + } + + return 0; +} + +int zrdma_create_board_subdir_debugfs(struct zxdh_pci_f *rf, + const char *subdir_name, + struct dentry **board_subdir_ptr, + int (*create_file_func)(struct zxdh_pci_f *), + enum zrdma_debugfs_mode mode) +{ + struct dentry *dentry; + int ret; + + switch (mode) { + case ZRDMA_DEBUGFS_MODE_NORMAL: + if (!*board_subdir_ptr && !rf->ftype) { + dentry = debugfs_lookup(subdir_name, rf->debugfs_entry.board_root); + if (!dentry) { + *board_subdir_ptr = debugfs_create_dir(subdir_name, + rf->debugfs_entry.board_root); + ret = create_file_func(rf); + if (ret) { + return ret; + } + } else { + *board_subdir_ptr = dentry; + } + } + break; + case ZRDMA_DEBUGFS_MODE_BOND: + dentry = debugfs_lookup(subdir_name, rf->debugfs_entry.board_root); + if (!dentry) { + *board_subdir_ptr = debugfs_create_dir(subdir_name, + rf->debugfs_entry.board_root); + ret = create_file_func(rf); + if (ret) { + return ret; + } + } else { + *board_subdir_ptr = dentry; + } + break; + default: + break; + } + + return 0; +} + +static int zrdma_create_vhca_debugfs(struct zxdh_pci_f *rf, + const char *subdir_name, + struct dentry **vhca_subdir_ptr, + int (*create_file_func)(struct zxdh_pci_f *), + int mcode_type) +{ + int ret; + + rf->debugfs_entry.vhca_root = debugfs_create_dir( + dev_name(&rf->pcidev->dev), zrdma_debugfs_root); + *vhca_subdir_ptr = debugfs_create_dir(subdir_name, rf->debugfs_entry.vhca_root); + ret = create_file_func(rf); + if (ret) + return ret; + return 0; +} + +int create_debugfs_file_vhca_dcqcn(struct zxdh_pci_f *rf) +{ + int i; + int offset; + struct zrdma_dbg_vhca_dcqcn_params *dbg_cc_params; + + dbg_cc_params = kzalloc(sizeof(*dbg_cc_params), GFP_KERNEL); + if (!dbg_cc_params) + return -ENOMEM; + rf->debugfs_entry.vhca_params.mcode_vhca_params.vhca_dcqcn_params = dbg_cc_params; + for (i = 0, offset = ZRDMA_DBG_DCQCN_RPG_BYTE_RESET; + i < ZRDMA_VHCA_DCQCN_CC_MAX; i++, offset++) { + dbg_cc_params->params[i].offset = offset; + dbg_cc_params->params[i].dev = rf; + debugfs_create_file(zrdma_dcqcn_params[offset].name, 0600, + rf->debugfs_entry.vhca_dcqcn_root, + &dbg_cc_params->params[i], &dbg_dcqcn_fops); + } + return 0; +} + +int create_debugfs_file_board_dcqcn(struct zxdh_pci_f *rf) +{ + int i; + struct zrdma_dbg_board_dcqcn_params *dbg_cc_params; + + dbg_cc_params = kzalloc(sizeof(*dbg_cc_params), GFP_KERNEL); + if (!dbg_cc_params) + return -ENOMEM; + rf->debugfs_entry.board_params.mcode_board_params.board_dcqcn_params = dbg_cc_params; + for (i = 0; i < ZRDMA_BOARD_DCQCN_CC_MAX; i++) { + dbg_cc_params->params[i].offset = i; + dbg_cc_params->params[i].dev = rf; + debugfs_create_file(zrdma_dcqcn_params[i].name, 0600, + rf->debugfs_entry.board_dcqcn_root, + &dbg_cc_params->params[i], &dbg_dcqcn_fops); + } + return 0; +} + +int create_debugfs_file_board_np_psn_wraparound(struct zxdh_pci_f *rf) +{ + int i; + struct zrdma_dbg_board_np_psn_wraparound_params *dbg_cc_params; + + dbg_cc_params = kzalloc(sizeof(*dbg_cc_params), GFP_KERNEL); + if (!dbg_cc_params) + return -ENOMEM; + rf->debugfs_entry.board_params.board_np_psn_wraparound_params = dbg_cc_params; + for (i = 0; i < ZRDMA_BOARD_NP_PSN_WRAPAROUND_CC_MAX; i++) { + dbg_cc_params->params[i].offset = i; + dbg_cc_params->params[i].dev = rf; + debugfs_create_file(zrdma_np_psn_wraparound_params[i].name, 0600, + rf->debugfs_entry.board_np_psn_wraparound_root, + &dbg_cc_params->params[i], &dbg_np_psn_wraparound_fops); + } + return 0; +} + +int create_debugfs_file_vhca_rtt(struct zxdh_pci_f *rf) +{ + int i = 0; + int offset = 0; + struct zrdma_dbg_vhca_rtt_params *dbg_cc_params; + + dbg_cc_params = kzalloc(sizeof(*dbg_cc_params), GFP_KERNEL); + if (!dbg_cc_params) + return -ENOMEM; + rf->debugfs_entry.vhca_params.mcode_vhca_params.vhca_rtt_params = dbg_cc_params; + for (i = 0, offset = ZRDMA_DBG_RTT_RPG_MAX_RATE; + i < ZRDMA_VHCA_RTT_CC_MAX; i++, offset++) { + dbg_cc_params->params[i].offset = offset; + dbg_cc_params->params[i].dev = rf; + debugfs_create_file(zrdma_rtt_params[offset].name, 0600, + rf->debugfs_entry.vhca_rtt_root, + &dbg_cc_params->params[i], &dbg_rtt_fops); + } + return 0; +} + +int create_debugfs_file_board_rtt(struct zxdh_pci_f *rf) +{ + int i = 0; + struct zrdma_dbg_board_rtt_params *dbg_cc_params; + + dbg_cc_params = kzalloc(sizeof(*dbg_cc_params), GFP_KERNEL); + if (!dbg_cc_params) + return -ENOMEM; + rf->debugfs_entry.board_params.mcode_board_params.board_rtt_params = dbg_cc_params; + for (i = 0; i < ZRDMA_BOARD_RTT_CC_MAX; i++) { + dbg_cc_params->params[i].offset = i; + dbg_cc_params->params[i].dev = rf; + debugfs_create_file(zrdma_rtt_params[i].name, 0600, + rf->debugfs_entry.board_rtt_root, + &dbg_cc_params->params[i], &dbg_rtt_fops); + } + return 0; +} + +void create_debugfs_dcqcn_entry(const char *pci_bdf, struct zxdh_pci_f *rf, enum zrdma_debugfs_mode mode) +{ + int ret; + const char *debugfs_dir = NULL; + + mutex_lock(&zrdma_debugfs_mutex); + if (rf->mcode_type == MCODE_TYPE_DCQCN) { + debugfs_dir = ZRDMA_DEBUGFS_DCQCN_DIR; + } else { + debugfs_dir = ZRDMA_DEBUGFS_WUMENG_DIR; + } + ret = zrdma_create_board_root_debugfs(rf, pci_bdf, mode); + if (ret) { + mutex_unlock(&zrdma_debugfs_mutex); + goto err; + } + + ret = zrdma_create_board_subdir_debugfs(rf, debugfs_dir, + &rf->debugfs_entry.board_dcqcn_root, + create_debugfs_file_board_dcqcn, mode); + if (ret) { + mutex_unlock(&zrdma_debugfs_mutex); + goto err; + } + mutex_unlock(&zrdma_debugfs_mutex); + + if (mode == ZRDMA_DEBUGFS_MODE_NORMAL) { + ret = zrdma_create_vhca_debugfs(rf, debugfs_dir, + &rf->debugfs_entry.vhca_dcqcn_root, + create_debugfs_file_vhca_dcqcn, + MCODE_TYPE_DCQCN); + if (ret) + goto err; + } + return; +err: + zrdma_cleanup_mcode_type_debugfs_entry(rf, rf->mcode_type); +} + +void create_debugfs_rtt_entry(const char *pci_bdf, struct zxdh_pci_f *rf, enum zrdma_debugfs_mode mode) +{ + int ret; + + mutex_lock(&zrdma_debugfs_mutex); + ret = zrdma_create_board_root_debugfs(rf, pci_bdf, mode); + if (ret) { + mutex_unlock(&zrdma_debugfs_mutex); + goto err; + } + + ret = zrdma_create_board_subdir_debugfs(rf, ZRDMA_DEBUGFS_RTT_DIR, + &rf->debugfs_entry.board_rtt_root, + create_debugfs_file_board_rtt, mode); + if (ret) { + mutex_unlock(&zrdma_debugfs_mutex); + goto err; + } + mutex_unlock(&zrdma_debugfs_mutex); + + if (mode == ZRDMA_DEBUGFS_MODE_NORMAL) { + ret = zrdma_create_vhca_debugfs(rf, ZRDMA_DEBUGFS_RTT_DIR, + &rf->debugfs_entry.vhca_rtt_root, + create_debugfs_file_vhca_rtt, + MCODE_TYPE_RTT); + if (ret) + goto err; + } + return; +err: + zrdma_cleanup_mcode_type_debugfs_entry(rf, MCODE_TYPE_RTT); +} + +void create_debugfs_np_psn_wraparound_entry(const char *pci_bdf, + struct zxdh_pci_f *rf, enum zrdma_debugfs_mode mode) +{ + int ret; + if (rf->ftype) { + return; + } + mutex_lock(&zrdma_debugfs_mutex); + ret = zrdma_create_board_root_debugfs(rf, pci_bdf, mode); + if (ret) { + mutex_unlock(&zrdma_debugfs_mutex); + goto err; + } + + ret = zrdma_create_board_subdir_debugfs(rf, ZRDMA_DEBUGFS_NP_PSN_WRAPAROUND, + &rf->debugfs_entry.board_np_psn_wraparound_root, + create_debugfs_file_board_np_psn_wraparound, mode); + if (ret) { + mutex_unlock(&zrdma_debugfs_mutex); + goto err; + } + mutex_unlock(&zrdma_debugfs_mutex); + return; +err: + zrdma_cleanup_np_psn_wraparound_params_debugfs_entry(rf); +} + +void create_debugfs_default_entry(struct zxdh_pci_f *rf, enum zrdma_debugfs_mode mode) +{ + int ret; + char pci_board_bdf[64] = { 0 }; + + if (!zrdma_debugfs_root) { + pr_info("%s debugfs zrdma_debugfs_root is null\n", __func__); + return; + } + ret = get_pci_board_bdf(pci_board_bdf, rf); + if (ret) { + pr_info("create_debugfs_entry debugfs pci_board_bdf is null\n"); + return; + } + switch (rf->mcode_type) { + case MCODE_TYPE_DCQCN: + case MCODE_TYPE_WUMENG: + create_debugfs_dcqcn_entry(pci_board_bdf, rf, mode); + break; + case MCODE_TYPE_RTT: + create_debugfs_rtt_entry(pci_board_bdf, rf, mode); + break; + default: + break; + } + create_debugfs_np_psn_wraparound_entry(pci_board_bdf, rf, mode); +} + +void create_debugfs_entry(struct zxdh_pci_f *rf) +{ + create_debugfs_default_entry(rf, ZRDMA_DEBUGFS_MODE_NORMAL); +} + +void zrdma_register_debugfs(void) +{ + zrdma_debugfs_root = debugfs_create_dir("zrdma", NULL); + mutex_init(&zrdma_debugfs_mutex); +} + +void zrdma_unregister_debugfs(void) +{ + debugfs_remove(zrdma_debugfs_root); +} diff --git a/drivers/infiniband/hw/zrdma/dbgfs.h b/drivers/infiniband/hw/zrdma/dbgfs.h new file mode 100644 index 000000000000..c16270270794 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/dbgfs.h @@ -0,0 +1,230 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ +#ifndef ZXDH_DEBUGFS_H +#define ZXDH_DEBUGFS_H +#include +#include "type.h" + +enum zrdma_debugfs_mode { + ZRDMA_DEBUGFS_MODE_NORMAL = 0, + ZRDMA_DEBUGFS_MODE_BOND, +}; + +#define ZRDMA_DEBUGFS_MAX_BUF_LEN 35 + +#define ZRDMA_BOARD_DCQCN_CC_MAX 16 +#define ZRDMA_VHCA_DCQCN_CC_MAX 5 +#define ZRDMA_BOARD_RTT_CC_MAX 7 +#define ZRDMA_VHCA_RTT_CC_MAX 3 +#define ZRDMA_BOARD_NP_PSN_WRAPAROUND_CC_MAX 2 + +#define ZRDMA_DEBUGFS_DCQCN_DIR "dcqcn" +#define ZRDMA_DEBUGFS_RTT_DIR "rtt" +#define ZRDMA_DEBUGFS_WUMENG_DIR "wumeng" +#define ZRDMA_DEBUGFS_NP_PSN_WRAPAROUND "wrap_params" + +enum zrdma_dbg_dcqcn_types { + ZRDMA_DBG_DCQCN_NP_CNP_DSCP = 0, + ZRDMA_DBG_DCQCN_NP_CNP_PRIO, + ZRDMA_DBG_DCQCN_NP_CNP_PRIO_MODE, + ZRDMA_DBG_DCQCN_NP_MIN_TIME_BETWEEN_CNPS, + ZRDMA_DBG_DCQCN_PRG_TIME_RESET, + ZRDMA_DBG_DCQCN_RPG_CLAMP_TGT_RATE, + ZRDMA_DBG_DCQCN_RPG_CLAMP_TGT_RATE_AFTER_TIME_INC, + ZRDMA_DBG_DCQCN_RP_DCE_TCP_RTT, + ZRDMA_DBG_DCQCN_DCE_TCP_G, + ZRDMA_DBG_DCQCN_RPG_GD, + ZRDMA_DBG_DCQCN_RPG_INITIAL_ALPHA_VALUE, + ZRDMA_DBG_DCQCN_RPG_MIN_DEC_FAC, + ZRDMA_DBG_DCQCN_RPG_THRESHOLD, + ZRDMA_DBG_DCQCN_RPG_RATIO_INCREASE, + ZRDMA_DBG_DCQCN_RPG_AI_RATIO, + ZRDMA_DBG_DCQCN_RPG_HAI_RATIO, + ZRDMA_DBG_DCQCN_RPG_BYTE_RESET, + ZRDMA_DBG_DCQCN_RPG_AI_RATE, + ZRDMA_DBG_DCQCN_RPG_HAI_RATE, + ZRDMA_DBG_DCQCN_RPG_MAX_RATE, + ZRDMA_DBG_DCQCN_RPG_MIN_RATE, + ZRDMA_DBG_DCQCN_MAX, +}; + +enum zrdma_dbg_rtt_types { + ZRDMA_DBG_RTT_ALPHA = 0, + ZRDMA_DBG_RTT_TLOW, + ZRDMA_DBG_RTT_THIGH, + ZRDMA_DBG_RTT_AI_NUM, + ZRDMA_DBG_RTT_THRED_GRADIENT, + ZRDMA_DBG_RTT_HAI_N, + ZRDMA_DBG_RTT_AI_N, + ZRDMA_DBG_RTT_RPG_MAX_RATE, + ZRDMA_DBG_RTT_RPG_MIN_RATE, + ZRDMA_DBG_RTT_VF_DELTA, + ZRDMA_DBG_RTT_MAX, +}; + +enum zrdma_dbg_np_psn_wraparound_types { + ZRDMA_DBG_NP_PSN_WRAPAROUND_ENABLE_PARA = 0, + ZRDMA_DBG_NP_PMTU_PARA, + ZRDMA_DBG_NP_PARA_MAX, +}; + +struct parameter_t { + const char *name; + uint8_t types; + int (*rfunc)(struct zxdh_pci_f *, u32 *); + int (*wfunc)(struct zxdh_pci_f *, u32); +}; + +struct zrdma_dbg_param { + int offset; + struct zxdh_pci_f *dev; +}; + +struct zrdma_dbg_board_dcqcn_params { + struct dentry *root; + struct zrdma_dbg_param params[ZRDMA_BOARD_DCQCN_CC_MAX]; +}; + +struct zrdma_dbg_vhca_dcqcn_params { + struct dentry *root; + struct zrdma_dbg_param params[ZRDMA_VHCA_DCQCN_CC_MAX]; +}; + +struct zrdma_dbg_board_np_psn_wraparound_params { + struct dentry *root; + struct zrdma_dbg_param params[ZRDMA_BOARD_NP_PSN_WRAPAROUND_CC_MAX]; +}; + +struct zrdma_dbg_board_rtt_params { + struct dentry *root; + struct zrdma_dbg_param params[ZRDMA_BOARD_RTT_CC_MAX]; +}; + +struct zrdma_dbg_vhca_rtt_params { + struct dentry *root; + struct zrdma_dbg_param params[ZRDMA_VHCA_RTT_CC_MAX]; +}; + +struct zrdma_board_params { + union { + void *base; + struct zrdma_dbg_board_dcqcn_params *board_dcqcn_params; + struct zrdma_dbg_board_rtt_params *board_rtt_params; + } mcode_board_params; + struct zrdma_dbg_board_np_psn_wraparound_params + *board_np_psn_wraparound_params; +}; + +struct zrdma_vhca_params { + union { + void *base; + struct zrdma_dbg_vhca_dcqcn_params *vhca_dcqcn_params; + struct zrdma_dbg_vhca_rtt_params *vhca_rtt_params; + } mcode_vhca_params; +}; + +struct zrdma_debugfs_entries { + struct dentry *board_root; + struct dentry *vhca_root; + struct dentry *board_dcqcn_root; + struct dentry *board_rtt_root; + struct dentry *board_np_psn_wraparound_root; + struct dentry *vhca_dcqcn_root; + struct dentry *vhca_rtt_root; + struct zrdma_board_params board_params; + struct zrdma_vhca_params vhca_params; +}; + +void create_debugfs_entry(struct zxdh_pci_f *rf); +void zrdma_register_debugfs(void); +void zrdma_unregister_debugfs(void); +void zrdma_cleanup_debugfs_entry(struct zxdh_pci_f *rf); +void zrdma_cleanup_mcode_type_debugfs_entry(struct zxdh_pci_f *rf, int type); +void zrdma_cleanup_np_psn_wraparound_params_debugfs_entry(struct zxdh_pci_f *rf); +int zrdma_ib_write_rtt_params(struct zxdh_pci_f *rf, int offset, u32 var); +int zrdma_ib_read_rtt_params(struct zxdh_pci_f *rf, int offset, u32 *var); +int zrdma_ib_write_dcqcn_params(struct zxdh_pci_f *rf, int offset, u32 var); +int zrdma_ib_read_dcqcn_params(struct zxdh_pci_f *rf, int offset, u32 *var); +int zrdma_ib_write_np_psn_wraparound_params(struct zxdh_pci_f *rf, int offset, u32 var); +int zrdma_ib_read_np_psn_wraparound_params(struct zxdh_pci_f *rf, int offset, u32 *var); +int create_debugfs_file_vhca_dcqcn(struct zxdh_pci_f *rf); +int create_debugfs_file_board_dcqcn(struct zxdh_pci_f *rf); +int create_debugfs_file_vhca_rtt(struct zxdh_pci_f *rf); +int create_debugfs_file_board_rtt(struct zxdh_pci_f *rf); +int create_debugfs_file_board_np_psn_wraparound(struct zxdh_pci_f *rf); +void create_debugfs_dcqcn_entry(const char *pci_bdf, struct zxdh_pci_f *rf, enum zrdma_debugfs_mode mode); +void create_debugfs_rtt_entry(const char *pci_bdf, struct zxdh_pci_f *rf, enum zrdma_debugfs_mode mode); +void create_debugfs_np_psn_wraparound_entry(const char *pci_bdf, struct zxdh_pci_f *rf, enum zrdma_debugfs_mode mode); +int zrdma_create_board_root_debugfs(struct zxdh_pci_f *rf, const char *pci_bdf, enum zrdma_debugfs_mode mode); +int zrdma_create_board_subdir_debugfs(struct zxdh_pci_f *rf, + const char *subdir_name, + struct dentry **board_subdir_ptr, + int (*create_file_func)(struct zxdh_pci_f *), + enum zrdma_debugfs_mode mode); +void create_debugfs_default_entry(struct zxdh_pci_f *rf, enum zrdma_debugfs_mode mode); + +int read_np_cnp_dscp(struct zxdh_pci_f *rf, u32 *var); +int read_np_cnp_prio(struct zxdh_pci_f *rf, u32 *var); +int read_np_cnp_prio_mode(struct zxdh_pci_f *rf, u32 *var); +int read_np_min_time_between_cnps(struct zxdh_pci_f *rf, u32 *var); +int read_prg_time_reset(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_clamp_tgt_rate(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_clamp_tgt_rate_after_time_inc(struct zxdh_pci_f *rf, u32 *var); +int read_rp_dce_tcp_rtt(struct zxdh_pci_f *rf, u32 *var); +int read_dce_tcp_g(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_gd(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_initial_alpha_value(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_min_dec_fac(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_threshold(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_ratio_increase(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_ai_ratio(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_hai_ratio(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_byte_reset(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_ai_rate(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_hai_rate(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_max_rate(struct zxdh_pci_f *rf, u32 *var); +int read_rpg_min_rate(struct zxdh_pci_f *rf, u32 *var); +int read_alpha(struct zxdh_pci_f *rf, u32 *var); +int read_tlow(struct zxdh_pci_f *rf, u32 *var); +int read_thigh(struct zxdh_pci_f *rf, u32 *var); +int read_ai_num(struct zxdh_pci_f *rf, u32 *var); +int read_thred_gradient(struct zxdh_pci_f *rf, u32 *var); +int read_hai_n(struct zxdh_pci_f *rf, u32 *var); +int read_ai_n(struct zxdh_pci_f *rf, u32 *var); +int read_vf_delta(struct zxdh_pci_f *rf, u32 *var); +int read_psn_wraparound_enable(struct zxdh_pci_f *rf, u32 *var); +int read_pmtu(struct zxdh_pci_f *rf, u32 *var); + +int write_np_cnp_dscp(struct zxdh_pci_f *rf, u32 var); +int write_np_cnp_prio(struct zxdh_pci_f *rf, u32 var); +int write_np_cnp_prio_mode(struct zxdh_pci_f *rf, u32 var); +int write_np_min_time_between_cnps(struct zxdh_pci_f *rf, u32 var); +int write_prg_time_reset(struct zxdh_pci_f *rf, u32 var); +int write_rpg_clamp_tgt_rate(struct zxdh_pci_f *rf, u32 var); +int write_rpg_clamp_tgt_rate_after_time_inc(struct zxdh_pci_f *rf, u32 var); +int write_rp_dce_tcp_rtt(struct zxdh_pci_f *rf, u32 var); +int write_dce_tcp_g(struct zxdh_pci_f *rf, u32 var); +int write_rpg_gd(struct zxdh_pci_f *rf, u32 var); +int write_rpg_initial_alpha_value(struct zxdh_pci_f *rf, u32 var); +int write_rpg_min_dec_fac(struct zxdh_pci_f *rf, u32 var); +int write_rpg_threshold(struct zxdh_pci_f *rf, u32 var); +int write_rpg_ratio_increase(struct zxdh_pci_f *rf, u32 var); +int write_rpg_ai_ratio(struct zxdh_pci_f *rf, u32 var); +int write_rpg_hai_ratio(struct zxdh_pci_f *rf, u32 var); +int write_rpg_byte_reset(struct zxdh_pci_f *rf, u32 var); +int write_rpg_ai_rate(struct zxdh_pci_f *rf, u32 var); +int write_rpg_hai_rate(struct zxdh_pci_f *rf, u32 var); +int write_rpg_max_rate(struct zxdh_pci_f *rf, u32 var); +int write_rpg_min_rate(struct zxdh_pci_f *rf, u32 var); +int write_alpha(struct zxdh_pci_f *rf, u32 var); +int write_tlow(struct zxdh_pci_f *rf, u32 var); +int write_thigh(struct zxdh_pci_f *rf, u32 var); +int write_ai_num(struct zxdh_pci_f *rf, u32 var); +int write_thred_gradient(struct zxdh_pci_f *rf, u32 var); +int write_hai_n(struct zxdh_pci_f *rf, u32 var); +int write_ai_n(struct zxdh_pci_f *rf, u32 var); +int write_vf_delta(struct zxdh_pci_f *rf, u32 var); +int write_psn_wraparound_enable(struct zxdh_pci_f *rf, u32 var); +int write_pmtu(struct zxdh_pci_f *rf, u32 var); + +#endif diff --git a/drivers/infiniband/hw/zrdma/defs.h b/drivers/infiniband/hw/zrdma/defs.h new file mode 100644 index 000000000000..a2ce0f8e69e8 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/defs.h @@ -0,0 +1,2882 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ +#ifndef ZXDH_DEFS_H +#define ZXDH_DEFS_H + +#define ZXDH_FIRST_USER_QP_ID 3 + +#define ECN_CODE_PT_MASK 3 +#define ECN_CODE_PT_VAL 2 + +#define ZXDH_PUSH_OFFSET (8 * 1024 * 1024) +#define ZXDH_PF_FIRST_PUSH_PAGE_INDEX 16 +#define ZXDH_PF_BAR_RSVD (60 * 1024) +#define ZXDH_VF_PUSH_OFFSET ((8 + 64) * 1024) +#define ZXDH_VF_FIRST_PUSH_PAGE_INDEX 2 +#define ZXDH_VF_BAR_RSVD 4096 + +#define ZXDH_AE_REQUESTER 0x0 +#define ZXDH_AE_RESPONDER 0x1 + +#define ZXDH_RISCV_IDX 1023 +#define ZXDH_MAX_EP_NUM 5 +#define ZXDH_CQE_ERR_MAX 0xFFFF +#define ZXDH_AEQ_RETRY_LIMIT 5 + +#define ZXDH_IRD_HW_SIZE_4 0 +#define ZXDH_IRD_HW_SIZE_16 1 +#define ZXDH_IRD_HW_SIZE_64 2 +#define ZXDH_IRD_HW_SIZE_128 3 +#define ZXDH_IRD_HW_SIZE_256 4 + +#define ZXDH_RAM_REPEAT_READ_CNT 10 //5 +#define ZXDH_RAM_DATA_REPEAT_READ_CNT 10 +#define ZXDH_RAM_DELAY_MS 30 +#define ZXDH_RAM_CFG_READ_CNT 100 +#define ZXDH_RAM_READ_DELAY_TIME 200 + +#define ZXDH_RAM_WIDTH_LEN_UNIT_1 1 +#define ZXDH_RAM_WIDTH_LEN_UNIT_2 2 +#define ZXDH_RAM_WIDTH_LEN_UNIT_3 3 +#define ZXDH_RAM_WIDTH_LEN_UNIT_4 4 + +#define ZXDH_RAM_WIDTH_32_BIT 32 +#define ZXDH_RAM_WIDTH_64_BIT 64 +#define ZXDH_RAM_WIDTH_96_BIT 96 +#define ZXDH_RAM_WIDTH_128_BIT 128 +#define ZXDH_RAM_WIDTH_160_BIT 160 +#define ZXDH_RAM_WIDTH_192_BIT 192 +#define ZXDH_RAM_WIDTH_224_BIT 224 +#define ZXDH_RAM_WIDTH_256_BIT 256 +#define ZXDH_RAM_WIDTH_288_BIT 288 +#define ZXDH_RAM_WIDTH_320_BIT 320 +#define ZXDH_RAM_WIDTH_352_BIT 352 +#define ZXDH_RAM_WIDTH_384_BIT 384 +#define ZXDH_RAM_WIDTH_416_BIT 416 +#define ZXDH_RAM_WIDTH_448_BIT 448 +#define ZXDH_RAM_WIDTH_480_BIT 480 + +#define ZXDH_RAM_32_BIT_IDX_0 0 +#define ZXDH_RAM_32_BIT_IDX_1 1 +#define ZXDH_RAM_32_BIT_IDX_2 2 +#define ZXDH_RAM_32_BIT_IDX_3 3 +#define ZXDH_RAM_32_BIT_IDX_4 4 +#define ZXDH_RAM_32_BIT_IDX_5 5 +#define ZXDH_RAM_32_BIT_IDX_6 6 +#define ZXDH_RAM_32_BIT_IDX_7 7 +#define ZXDH_RAM_32_BIT_IDX_8 8 +#define ZXDH_RAM_32_BIT_IDX_9 9 +#define ZXDH_RAM_32_BIT_IDX_10 10 +#define ZXDH_RAM_32_BIT_IDX_11 11 +#define ZXDH_RAM_32_BIT_IDX_12 12 +#define ZXDH_RAM_32_BIT_IDX_13 13 +#define ZXDH_RAM_32_BIT_IDX_14 14 + +#define ZXDH_32_BIT_MASK_0_3 0x07UL +#define ZXDH_32_BIT_MASK_0_7 0xFFUL +#define ZXDH_32_BIT_MASK_0_15 0xFFFFUL +#define ZXDH_32_BIT_MASK_8_15 0xFF00UL +#define ZXDH_32_BIT_MASK_8_24 0xFFFF00UL +#define ZXDH_32_BIT_MASK_16_23 0xFF0000UL +#define ZXDH_32_BIT_MASK_24_31 0xFF000000UL +#define ZXDH_32_BIT_MASK_16_31 0xFFFF0000UL + +#define IRMDA_BIT_WIDTH_8 8 +#define IRMDA_BIT_WIDTH_11 11 +#define IRMDA_BIT_WIDTH_12 12 +#define IRMDA_BIT_WIDTH_16 16 +#define IRMDA_BIT_WIDTH_24 24 +#define IRMDA_BIT_WIDTH_32 32 + +#define ZXDH_RAM_H11 0x11 +#define ZXDH_RAM_H12 0x12 +#define ZXDH_RAM_H13 0x13 +#define ZXDH_RAM_H14 0x14 +#define ZXDH_RAM_H15 0x15 +#define ZXDH_RAM_H25 0x25 +#define ZXDH_RAM_H26 0x26 +#define ZXDH_RAM_H28 0x28 +#define ZXDH_RAM_H29 0x29 +#define ZXDH_RAM_H35 0x35 +#define ZXDH_RAM_H61 0x61 +#define ZXDH_RAM_H62 0x62 +#define ZXDH_RAM_H63 0x63 +#define ZXDH_RAM_H64 0x64 +#define ZXDH_RAM_H100 0x100 +#define ZXDH_RAM_H104 0x104 +#define ZXDH_RAM_H105 0x105 +#define ZXDH_RAM_H106 0x106 +#define ZXDH_RAM_H19D 0x19D + +#define RDMARX_MAX_MSG_SIZE 0x80000000 + +enum zxdh_protocol_used { + ZXDH_ANY_PROTOCOL = 0, + ZXDH_IWARP_PROTOCOL_ONLY = 1, + ZXDH_ROCE_PROTOCOL_ONLY = 2, +}; + +#define ZXDH_QPC_PF_REQ_ENA_S 30 +#define ZXDH_QPC_PF_REQ_ENA BIT_ULL(30) +#define ZXDH_QPC_PF_REQ_BASEQPN_S 10 +#define ZXDH_QPC_PF_REQ_BASEQPN GENMASK_ULL(29, 10) +#define ZXDH_QPC_PF_REQ_VHCAID_S 0 +#define ZXDH_QPC_PF_REQ_VHCAID GENMASK_ULL(9, 0) + +#define ZXDH_CQ_PF_REQ_ENA_S 30 +#define ZXDH_CQ_PF_REQ_ENA BIT_ULL(30) +#define ZXDH_CQ_PF_REQ_BASEQPN_S 10 +#define ZXDH_CQ_PF_REQ_BASEQPN GENMASK_ULL(29, 10) +#define ZXDH_CQ_PF_REQ_VHCAID_S 0 +#define ZXDH_CQ_PF_REQ_VHCAID GENMASK_ULL(9, 0) + +#define ZXDH_SRQ_PF_REQ_ENA_S 30 +#define ZXDH_SRQ_PF_REQ_ENA BIT_ULL(30) +#define ZXDH_SRQ_PF_REQ_BASEQPN_S 10 +#define ZXDH_SRQ_PF_REQ_BASEQPN GENMASK_ULL(29, 10) +#define ZXDH_SRQ_PF_REQ_VHCAID_S 0 +#define ZXDH_SRQ_PF_REQ_VHCAID GENMASK_ULL(9, 0) + +#define ZXDH_QP_STATE_INVALID 0 +#define ZXDH_QP_STATE_IDLE 1 +#define ZXDH_QP_STATE_RTS 2 +#define ZXDH_QP_STATE_CLOSING 3 +#define ZXDH_QP_STATE_SQD 3 +#define ZXDH_QP_STATE_RTR 4 +#define ZXDH_QP_STATE_TERMINATE 5 +#define ZXDH_QP_STATE_ERROR 6 + +//DPU QP state +#define ZXDH_QPS_RESET 0 +#define ZXDH_QPS_INIT 1 +#define ZXDH_QPS_RTR 2 +#define ZXDH_QPS_RTS 3 +#define ZXDH_QPS_SQE 4 +#define ZXDH_QPS_SQD 5 +#define ZXDH_QPS_ERR 6 +#define ZXDH_QPS_RSV 7 + +#define ZXDH_MAX_USER_PRIORITY 8 +#define ZXDH_DSCP_NUM_VAL 64 +#define IEEE_8021QAZ_MAX_TCS 8 +#define ZXDH_MAX_STATS_COUNT_GEN1 12 +#define ZXDH_MAX_STATS_COUNT 128 +#define ZXDH_FIRST_NON_PF_STAT 4 + +#define ZXDH_MIN_MTU_IPV4 576 +#define ZXDH_MIN_MTU_IPV6 1280 +#define ZXDH_MTU_TO_MSS_IPV4 40 +#define ZXDH_MTU_TO_MSS_IPV6 60 +#define ZXDH_DEFAULT_MTU 1500 + +#define ZXDH_INDICATE_ID_HOST 2 + +#define Q2_FPSN_OFFSET 64 +#define TERM_DDP_LEN_TAGGED 14 +#define TERM_DDP_LEN_UNTAGGED 18 +#define TERM_RDMA_LEN 28 +#define RDMA_OPCODE_M 0x0f +#define RDMA_READ_REQ_OPCODE 1 +#define Q2_BAD_FRAME_OFFSET 72 +#define CQE_MAJOR_DRV 0x8000 + +#define ZXDH_TERM_SENT 1 +#define ZXDH_TERM_RCVD 2 +#define ZXDH_TERM_DONE 4 +#define ZXDH_MAC_HLEN 14 + +#define ZXDH_CQP_WAIT_POLL_REGS 1 +#define ZXDH_CQP_WAIT_POLL_CQ 2 +#define ZXDH_CQP_WAIT_EVENT 3 + +#define ZXDH_AE_SOURCE_RSVD 0x0 +#define ZXDH_AE_SOURCE_RQ 0x1 +#define ZXDH_AE_SOURCE_RQ_0011 0x3 + +#define ZXDH_AE_SOURCE_CQ 0x2 +#define ZXDH_AE_SOURCE_CQ_0110 0x6 +#define ZXDH_AE_SOURCE_CQ_1010 0xa +#define ZXDH_AE_SOURCE_CQ_1110 0xe + +#define ZXDH_AE_SOURCE_SQ 0x5 +#define ZXDH_AE_SOURCE_SQ_0111 0x7 + +#define ZXDH_AE_SOURCE_IN_WR 0x9 +#define ZXDH_AE_SOURCE_IN_RR 0xb +#define ZXDH_AE_SOURCE_OUT_RR 0xd +#define ZXDH_AE_SOURCE_OUT_RR_1111 0xf + +#define ZXDH_TCP_STATE_NON_EXISTENT 0 +#define ZXDH_TCP_STATE_CLOSED 1 +#define ZXDH_TCP_STATE_LISTEN 2 +#define ZXDH_STATE_SYN_SEND 3 +#define ZXDH_TCP_STATE_SYN_RECEIVED 4 +#define ZXDH_TCP_STATE_ESTABLISHED 5 +#define ZXDH_TCP_STATE_CLOSE_WAIT 6 +#define ZXDH_TCP_STATE_FIN_WAIT_1 7 +#define ZXDH_TCP_STATE_CLOSING 8 +#define ZXDH_TCP_STATE_LAST_ACK 9 +#define ZXDH_TCP_STATE_FIN_WAIT_2 10 +#define ZXDH_TCP_STATE_TIME_WAIT 11 +#define ZXDH_TCP_STATE_RESERVED_1 12 +#define ZXDH_TCP_STATE_RESERVED_2 13 +#define ZXDH_TCP_STATE_RESERVED_3 14 +#define ZXDH_TCP_STATE_RESERVED_4 15 + +#define ZXDH_CQP_SW_SQSIZE_4 4 +#define ZXDH_CQP_SW_SQSIZE_2048 2048 + +#define ZXDH_CQ_TYPE_IO 1 +#define ZXDH_CQ_TYPE_ILQ 2 +#define ZXDH_CQ_TYPE_IEQ 3 +#define ZXDH_CQ_TYPE_CQP 4 + +#define ZXDH_DONE_COUNT 1000 +#define ZXDH_MIN_DONE_COUNT 1 +#define ZXDH_SLEEP_COUNT 10 +#define ZXDH_MAILBOX_SLEEP_TIME 100 +#define ZXDH_MAILBOX_CYC_NUM 120 +#define ZXDH_BAR_MSG_RETRY_NUM 4 +#define ZXDH_BAR_MSG_DEFAULT_NUM 1 +#define ZXDH_BAR_ERR_TIME_OUT 6 +#define ZXDH_BAR_ERR_LOCK_FAILED 20 + +#define ZXDH_FLR_QUERY_FLAG 1 +#define ZXDH_FLR_OP_FLAG 1 +#define ZXDH_FLR_QUERY_TIME 50 +#define ZXDH_FLR_QUERY_CNT 1200 + +#define ZXDH_PCIE_LINK_DOWN 0 +#define ZXDH_PCIE_LINK_UP 1 + +#define ZXDH_UPDATE_SD_BUFF_SIZE 128 +#define ZXDH_FEATURE_BUF_SIZE (8 * ZXDH_MAX_FEATURES) + +#define ZXDH_MAX_QUANTA_PER_WR 16 +#define ZXDH_MAX_SQ_WQES_PER_PAGE 128 +#define ZXDH_MAX_SQ_DEPTH 32768 + +#define ZXDH_MAX_SQ_FRAG 31 +#define ZXDH_MAX_SQ_INLINE_DATELEN_WITH_IMM 210 + +#define INLINE_DATASIZE_7BYTES 7 +#define INLINE_DATASIZE_24BYTES 24 + +#define INLINE_DATA_OFFSET_7BYTES 7 +#define WQE_OFFSET_7BYTES 7 +#define WQE_OFFSET_8BYTES 8 +#define WQE_OFFSET_24BYTES 24 + +#define ZXDH_CQPDB_INIT_VALUE 0x800 +#define ZXDH_CCQN_INIT_VALUE 0x3 + +#define ZXDH_QP_SW_MAX_WQ_QUANTA 32768 +#define ZXDH_QP_SW_MAX_SQ_QUANTA 32768 +#define ZXDH_QP_SW_MAX_RQ_QUANTA 32768 +#define ZXDH_QP_SW_MAX_SRQ_QUANTA 32768 +#define ZXDH_MAX_QP_WRS(max_quanta_per_wr) \ + ((ZXDH_QP_SW_MAX_WQ_QUANTA - ZXDH_SQ_RSVD) / (max_quanta_per_wr)) +#define ZXDH_MAX_SRQ_WRS 32768 +#define ZXDH_PF_MAX_SRQ_NUM_USE_L2D 512 +#define ZXDH_VF_MAX_SRQ_NUM_USE_L2D 0 + +#define IRDMAQP_TERM_SEND_TERM_AND_FIN 0 +#define IRDMAQP_TERM_SEND_TERM_ONLY 1 +#define IRDMAQP_TERM_SEND_FIN_ONLY 2 +#define IRDMAQP_TERM_DONOT_SEND_TERM_OR_FIN 3 + +#define ZXDH_QP_TYPE_IWARP 3 +#define ZXDH_QP_TYPE_UDA 4 +#define ZXDH_QP_TYPE_CQP 0 +#define ZXDH_QP_TYPE_ROCE_RC 1 +#define ZXDH_QP_TYPE_ROCE_UD 2 + +#define ZXDH_QP_SERVICE_TYPE_RC 0 +#define ZXDH_QP_SERVICE_TYPE_UD 2 + +#define ZXDH_QP_UD_WS_IDX_START 8097 +#define ZXDH_QP_UD_TOS 0x62 +#define ZXDH_QP_UD_QUEUE_TC 3 + +#define ZXDH_QP_MODIFY_NVMEOF_FLUSH (1 << 31) +#define ZXDH_QP_MODIFY_NVMEOF_FLR (1 << 21) +#define ZXDH_QP_MODIFY_NVMEOF_IOQ (1 << 20) +#define ZXDH_QP_MODIFY_NVMEOF_TGT (1 << 19) +#define ZXDH_QP_MODIFY_NVMEOF_QID (1 << 18) +#define ZXDH_QP_NVMEOF_IOQ_MASK_S 17 +#define ZXDH_QP_NVMEOF_IOQ_MASK BIT_ULL(17) +#define ZXDH_QP_NVMEOF_TGT_MASK_S 16 +#define ZXDH_QP_NVMEOF_TGT_MASK BIT_ULL(16) +#define ZXDH_QP_NVMEOF_QID_MASK_S 0 +#define ZXDH_QP_NVMEOF_QID_MASK GENMASK_ULL(15, 0) + +#define ZXDH_HW_PAGE_SIZE 4096 +#define ZXDH_HW_PAGE_SHIFT 12 +#define ZXDH_CQE_QTYPE_RQ 0 +#define ZXDH_CQE_QTYPE_SQ 1 + +#define ZXDH_QP_SW_MIN_WQSIZE 64u /* in WRs*/ +#define ZXDH_QP_WQE_MIN_SIZE 32 +#define ZXDH_QP_SQ_WQE_MIN_SIZE 32 +#define ZXDH_QP_RQ_WQE_MIN_SIZE 16 +#define ZXDH_QP_WQE_MAX_SIZE 256 +#define ZXDH_QP_WQE_MIN_QUANTA 1 +#define ZXDH_MAX_RQ_WQE_SHIFT_GEN1 2 +#define ZXDH_MAX_RQ_WQE_SHIFT_GEN2 3 +#define ZXDH_SRQ_FRAG_BYTESIZE 16 +#define ZXDH_QP_FRAG_BYTESIZE 16 +#define ZXDH_SQ_WQE_BYTESIZE 32 +#define ZXDH_SRQ_WQE_MIN_SIZE 16 + +#define ZXDH_SQ_RSVD 258 +#define ZXDH_RQ_RSVD 1 +#define ZXDH_SRQ_RSVD 1 + +#define ZXDH_FEATURE_RTS_AE 1ULL +#define ZXDH_FEATURE_CQ_RESIZE 2ULL +#define ZXDH_FEATURE_64_BYTE_CQE 128ULL +#define IRDMAQP_OP_RDMA_WRITE 0x00 +#define IRDMAQP_OP_RDMA_READ 0x01 +#define IRDMAQP_OP_RDMA_SEND 0x03 +#define IRDMAQP_OP_RDMA_SEND_INV 0x04 +#define IRDMAQP_OP_RDMA_SEND_SOL_EVENT 0x05 +#define IRDMAQP_OP_RDMA_SEND_SOL_EVENT_INV 0x06 +#define IRDMAQP_OP_BIND_MW 0x08 +#define IRDMAQP_OP_FAST_REGISTER 0x09 +#define IRDMAQP_OP_LOCAL_INVALIDATE 0x0a +#define IRDMAQP_OP_RDMA_READ_LOC_INV 0x0b +#define IRDMAQP_OP_NOP 0x0c +#define IRDMAQP_OP_RDMA_WRITE_SOL 0x0d +#define IRDMAQP_OP_GEN_RTS_AE 0x30 + +#define IRDMAQPC_HW_SQ_TAIL_INIT 0x8000 + +#define ZXDH_SOC_TXRXCQP_IND_ACC_DPU_INTERNAL 0x0 +#define ZXDH_SOC_TXRXCQP_IND_ACC_RSV 0x1 +#define ZXDH_SOC_TXRXCQP_IND_ACC_HOST_NOT_THROUGH_SMMU 0x2 +#define ZXDH_SOC_TXRXCQP_IND_ACC_HOST_THROUGH_SMMU 0x3 + +#define ZXDH_SOC_TXRXCQP_AXID_DEST_L2D 0x0 +#define ZXDH_SOC_TXRXCQP_AXID_DEST_DPU_DDR 0x1 +#define ZXDH_SOC_TXRXCQP_AXID_DEST_EP5 0x2 +#define ZXDH_SOC_TXRXCQP_AXID_DEST_EP6 0x3 +#define ZXDH_SOC_TXRXCQP_AXID_DEST_EP7 0x4 +#define ZXDH_SOC_TXRXCQP_AXID_DEST_EP8 0x5 +#define ZXDH_SOC_TXRXCQP_AXID_DEST_EP9 0x6 + +#define ZXDH_SOC_TXRXCQP_CACHE_ID_0 0x0 +#define ZXDH_SOC_TXRXCQP_CACHE_ID_1 0x1 + +#define ZXDH_SOC_RDMAIO_IND_ACC_DPU_INTERNAL 0x0 +#define ZXDH_SOC_RDMAIO_IND_ACC_RSV 0x1 +#define ZXDH_SOC_RDMAIO_IND_ACC_HOST_NOT_THROUGH_SMMU 0x2 +#define ZXDH_SOC_RDMAIO_IND_ACC_HOST_THROUGH_SMMU 0x3 + +enum zxdh_cqp_op_type { + ZXDH_OP_CEQ_DESTROY = 1, + ZXDH_OP_AEQ_DESTROY = 2, + ZXDH_OP_DELETE_ARP_CACHE_ENTRY = 3, + ZXDH_OP_MANAGE_APBVT_ENTRY = 4, + ZXDH_OP_CEQ_CREATE = 5, + ZXDH_OP_AEQ_CREATE = 6, + ZXDH_OP_MANAGE_QHASH_TABLE_ENTRY = 7, + ZXDH_OP_QP_MODIFY = 8, + ZXDH_OP_QP_UPLOAD_CONTEXT = 9, + ZXDH_OP_CQ_CREATE = 10, + ZXDH_OP_CQ_DESTROY = 11, + ZXDH_OP_QP_CREATE = 12, + ZXDH_OP_QP_DESTROY = 13, + ZXDH_OP_ALLOC_STAG = 14, + ZXDH_OP_MR_REG_NON_SHARED = 15, + ZXDH_OP_DEALLOC_STAG = 16, + ZXDH_OP_MW_ALLOC = 17, + ZXDH_OP_QP_FLUSH_WQES = 18, + ZXDH_OP_ADD_ARP_CACHE_ENTRY = 19, + ZXDH_OP_MANAGE_PUSH_PAGE = 20, + ZXDH_OP_UPDATE_PE_SDS = 21, + ZXDH_OP_MANAGE_HMC_PM_FUNC_TABLE = 22, + ZXDH_OP_SUSPEND = 23, + ZXDH_OP_RESUME = 24, + ZXDH_OP_MANAGE_VF_PBLE_BP = 25, + ZXDH_OP_QUERY_FPM_VAL = 26, + ZXDH_OP_COMMIT_FPM_VAL = 27, + ZXDH_OP_REQ_CMDS = 28, + ZXDH_OP_CMPL_CMDS = 29, + ZXDH_OP_AH_CREATE = 30, + ZXDH_OP_AH_MODIFY = 31, + ZXDH_OP_AH_DESTROY = 32, + ZXDH_OP_MC_CREATE = 33, + ZXDH_OP_MC_DESTROY = 34, + ZXDH_OP_MC_MODIFY = 35, + ZXDH_OP_STATS_ALLOCATE = 36, + ZXDH_OP_STATS_FREE = 37, + ZXDH_OP_STATS_GATHER = 38, + ZXDH_OP_WS_ADD_NODE = 39, + ZXDH_OP_WS_MODIFY_NODE = 40, + ZXDH_OP_WS_DELETE_NODE = 41, + ZXDH_OP_WS_FAILOVER_START = 42, + ZXDH_OP_WS_FAILOVER_COMPLETE = 43, + ZXDH_OP_SET_UP_MAP = 44, + ZXDH_OP_GEN_AE = 45, + ZXDH_OP_QUERY_RDMA_FEATURES = 46, + ZXDH_OP_ADD_LOCAL_MAC_ENTRY = 48, + ZXDH_OP_DELETE_LOCAL_MAC_ENTRY = 49, + ZXDH_OP_CQ_MODIFY = 50, + ZXDH_OP_CONFIG_PTE_TAB = 51, + ZXDH_OP_QUERY_PTE_TAB = 52, + ZXDH_OP_CONFIG_PBLE_TAB = 53, + ZXDH_OP_CONFIG_MAILBOX = 54, + ZXDH_OP_DMA_WRITE = 55, + ZXDH_OP_DMA_WRITE32 = 56, + ZXDH_OP_DMA_WRITE64 = 57, + ZXDH_OP_DMA_READ = 58, + ZXDH_OP_DMA_READ_USE_CQE = 59, + ZXDH_OP_QUERY_QPC = 60, + ZXDH_OP_QUERY_CQC = 61, + ZXDH_OP_QUERY_SRQC = 62, + ZXDH_OP_QUERY_CEQC = 63, + ZXDH_OP_QUERY_AEQC = 64, + ZXDH_OP_SRQ_CREATE = 65, + ZXDH_OP_SRQ_DESTROY = 66, + ZXDH_OP_SRQ_MODIFY = 67, + ZXDH_OP_QUERY_MKEY = 68, + ZXDH_OP_CQ_MODIFY_MODERATION = 69, + ZXDH_OP_QP_MODIFY_UDP_SPORT = 70, + ZXDH_OP_QUERY_HW_OBJECT_INFO = 71, + /* Must be last entry*/ + ZXDH_MAX_CQP_OPS = 72, +}; + +/* CQP SQ WQES */ +#define ZXDH_CQP_OP_NOP 0 +#define ZXDH_CQP_OP_CREATE_QP 0x01 +#define ZXDH_CQP_OP_MODIFY_QP 0x02 +#define ZXDH_CQP_OP_DESTROY_QP 0x03 +#define ZXDH_CQP_OP_QUERY_QP 0x04 +#define ZXDH_CQP_OP_CREATE_CQ 0x05 +#define ZXDH_CQP_OP_MODIFY_CQ 0x06 +#define ZXDH_CQP_OP_DESTROY_CQ 0x07 +#define ZXDH_CQP_OP_QUERY_CQ 0x08 +#define ZXDH_CQP_OP_CREATE_CEQ 0x09 +#define ZXDH_CQP_OP_DESTROY_CEQ 0x0b +#define ZXDH_CQP_OP_QUERY_CEQ 0x0c +#define ZXDH_CQP_OP_CREATE_AEQ 0x0d +#define ZXDH_CQP_OP_DESTROY_AEQ 0x0f +#define ZXDH_CQP_OP_QUERY_AEQ 0x10 +#define ZXDH_CQP_OP_ALLOC_MKEY 0x12 +#define ZXDH_CQP_OP_DEALLOC_MKEY 0x13 +#define ZXDH_CQP_OP_REG_MR 0x14 +#define ZXDH_CQP_OP_QUERY_MKEY 0x16 +#define ZXDH_CQP_OP_CREATE_AH 0x17 +#define ZXDH_CQP_OP_MODIFY_AH 0x18 +#define ZXDH_CQP_OP_DESTROY_AH 0x19 +#define ZXDH_CQP_OP_QUERY_BASE_REG 0x1b +#define ZXDH_CQP_OP_COMMIT_BASE_REG 0x1c +#define ZXDH_CQP_OP_FLUSH_WQES 0x1d +#define ZXDH_CQP_OP_SEND_MAILBOX 0x1e +#define ZXDH_CQP_OP_UPLOAD_QPC 0x1f +#define ZXDH_CQP_OP_CREATE_MCAST_GRP 0x20 +#define ZXDH_CQP_OP_MODIFY_MCAST_GRP 0x21 +#define ZXDH_CQP_OP_DESTROY_MCAST_GRP 0x22 +#define ZXDH_CQP_OP_CREATE_SRQ 0x24 +#define ZXDH_CQP_OP_MODIFY_SRQ 0x25 +#define ZXDH_CQP_OP_DESTROY_SRQ 0x26 +#define ZXDH_CQP_OP_QUERY_SRQ 0x27 +#define ZXDH_CQP_OP_WQE_DMA_WRITE 0x28 +#define ZXDH_CQP_OP_WQE_DMA_WRITE_32 0x29 +#define ZXDH_CQP_OP_WQE_DMA_WRITE_64 0x2a +#define ZXDH_CQP_OP_WQE_DMA_READ 0x2b +#define ZXDH_CQP_OP_WQE_DMA_READ_USECQE 0x2c +#define ZXDH_CQP_OP_SEND_MAILBOX 0x1e + +/*DELETED CQP SQ WQES*/ +#define ZXDH_CQP_OP_MANAGE_LOC_MAC_TABLE 0 +#define ZXDH_CQP_OP_MANAGE_ARP 0 +#define ZXDH_CQP_OP_MANAGE_VF_PBLE_BP 0 +#define ZXDH_CQP_OP_MANAGE_PUSH_PAGES 0 +#define ZXDH_CQP_OP_QUERY_RDMA_FEATURES 0 +#define ZXDH_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY 0 +#define ZXDH_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE 0 +#define ZXDH_CQP_OP_UPDATE_PE_SDS 0 +#define ZXDH_CQP_OP_GEN_AE 0 +#define ZXDH_CQP_OP_MANAGE_APBVT 0 +#define ZXDH_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY 0 +#define ZXDH_CQP_OP_SUSPEND_QP 0 +#define ZXDH_CQP_OP_RESUME_QP 0 +#define ZXDH_CQP_OP_SHMC_PAGES_ALLOCATED 0 +#define ZXDH_CQP_OP_WORK_SCHED_NODE 0 +#define ZXDH_CQP_OP_MANAGE_STATS 0 +#define ZXDH_CQP_OP_GATHER_STATS 0 +#define ZXDH_CQP_OP_UP_MAP 0 + +/* Async Events codes */ +#define ZXDH_AE_AMP_UNALLOCATED_STAG 0x0102 +#define ZXDH_AE_AMP_INVALID_STAG 0x0103 +#define ZXDH_AE_AMP_BAD_QP 0x0104 +#define ZXDH_AE_AMP_BAD_PD 0x0105 +#define ZXDH_AE_AMP_BAD_STAG_KEY 0x0106 +#define ZXDH_AE_AMP_BAD_STAG_INDEX 0x0107 +#define ZXDH_AE_AMP_BOUNDS_VIOLATION 0x0108 +#define ZXDH_AE_AMP_RIGHTS_VIOLATION 0x0109 +#define ZXDH_AE_AMP_TO_WRAP 0x010a +#define ZXDH_AE_AMP_FASTREG_VALID_STAG 0x010c +#define ZXDH_AE_AMP_FASTREG_MW_STAG 0x010d +#define ZXDH_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e +#define ZXDH_AE_AMP_FASTREG_INVALID_LENGTH 0x0110 +#define ZXDH_AE_AMP_INVALIDATE_SHARED 0x0111 +#define ZXDH_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112 +#define ZXDH_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113 +#define ZXDH_AE_AMP_MWBIND_VALID_STAG 0x0114 +#define ZXDH_AE_AMP_MWBIND_OF_MR_STAG 0x0115 +#define ZXDH_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116 +#define ZXDH_AE_AMP_MWBIND_TO_MW_STAG 0x0117 +#define ZXDH_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118 +#define ZXDH_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119 +#define ZXDH_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a +#define ZXDH_AE_AMP_MWBIND_BIND_DISABLED 0x011b +#define ZXDH_AE_PRIV_OPERATION_DENIED 0x011c +#define ZXDH_AE_AMP_INVALIDATE_TYPE1_MW 0x011d +#define ZXDH_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW 0x011e +#define ZXDH_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG 0x011f +#define ZXDH_AE_AMP_MWBIND_WRONG_TYPE 0x0120 +#define ZXDH_AE_AMP_FASTREG_PBLE_MISMATCH 0x0121 +#define ZXDH_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0132 +#define ZXDH_AE_UDA_XMIT_BAD_PD 0x0133 +#define ZXDH_AE_UDA_XMIT_DGRAM_TOO_SHORT 0x0134 +#define ZXDH_AE_UDA_L4LEN_INVALID 0x0135 +#define ZXDH_AE_BAD_CLOSE 0x0201 +#define ZXDH_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202 +#define ZXDH_AE_CQ_OPERATION_ERROR 0x0203 +#define ZXDH_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205 +#define ZXDH_AE_STAG_ZERO_INVALID 0x0206 +#define ZXDH_AE_IB_RREQ_AND_Q1_FULL 0x0207 +#define ZXDH_AE_IB_INVALID_REQUEST 0x0208 +#define ZXDH_AE_WQE_UNEXPECTED_OPCODE 0x020a +#define ZXDH_AE_WQE_INVALID_PARAMETER 0x020b +#define ZXDH_AE_WQE_INVALID_FRAG_DATA 0x020c +#define ZXDH_AE_IB_REMOTE_ACCESS_ERROR 0x020d +#define ZXDH_AE_IB_REMOTE_OP_ERROR 0x020e +#define ZXDH_AE_WQE_LSMM_TOO_LONG 0x0220 +#define ZXDH_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301 +#define ZXDH_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303 +#define ZXDH_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304 +#define ZXDH_AE_DDP_UBE_INVALID_MO 0x0305 +#define ZXDH_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE 0x0306 +#define ZXDH_AE_DDP_UBE_INVALID_QN 0x0307 +#define ZXDH_AE_DDP_NO_L_BIT 0x0308 +#define ZXDH_AE_RDMAP_ROE_INVALID_RDMAP_VERSION 0x0311 +#define ZXDH_AE_RDMAP_ROE_UNEXPECTED_OPCODE 0x0312 +#define ZXDH_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313 +#define ZXDH_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314 +#define ZXDH_AE_ROCE_RSP_LENGTH_ERROR 0x0316 +#define ZXDH_AE_ROCE_EMPTY_MCG 0x0380 +#define ZXDH_AE_ROCE_BAD_MC_IP_ADDR 0x0381 +#define ZXDH_AE_ROCE_BAD_MC_QPID 0x0382 +#define ZXDH_AE_MCG_QP_PROTOCOL_MISMATCH 0x0383 +#define ZXDH_AE_INVALID_ARP_ENTRY 0x0401 +#define ZXDH_AE_INVALID_TCP_OPTION_RCVD 0x0402 +#define ZXDH_AE_STALE_ARP_ENTRY 0x0403 +#define ZXDH_AE_INVALID_AH_ENTRY 0x0406 +#define ZXDH_AE_LLP_CLOSE_COMPLETE 0x0501 +#define ZXDH_AE_LLP_CONNECTION_RESET 0x0502 +#define ZXDH_AE_LLP_FIN_RECEIVED 0x0503 +#define ZXDH_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH 0x0504 +#define ZXDH_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505 +#define ZXDH_AE_LLP_SEGMENT_TOO_SMALL 0x0507 +#define ZXDH_AE_LLP_SYN_RECEIVED 0x0508 +#define ZXDH_AE_LLP_TERMINATE_RECEIVED 0x0509 +#define ZXDH_AE_LLP_TOO_MANY_RETRIES 0x050a +#define ZXDH_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b +#define ZXDH_AE_LLP_DOUBT_REACHABILITY 0x050c +#define ZXDH_AE_LLP_CONNECTION_ESTABLISHED 0x050e +#define ZXDH_AE_RESOURCE_EXHAUSTION 0x0520 +#define ZXDH_AE_RESET_SENT 0x0601 +#define ZXDH_AE_TERMINATE_SENT 0x0602 +#define ZXDH_AE_RESET_NOT_SENT 0x0603 +#define ZXDH_AE_LCE_QP_CATASTROPHIC 0x0700 +#define ZXDH_AE_LCE_FUNCTION_CATASTROPHIC 0x0701 +#define ZXDH_AE_LCE_CQ_CATASTROPHIC 0x0702 +#define ZXDH_AE_QP_SUSPEND_COMPLETE 0x0900 + +/* Async Events codes request*/ +#define ZXDH_AE_REQ_AXI_RSP_ERR 0x01 + +#define ZXDH_AE_REQ_WQE_FLUSH 0x101 +#define ZXDH_AE_REQ_PSN_OPCODE_ERR 0x110 + +#define ZXDH_AE_REQ_WR_ORD_ERR 0x220 +#define ZXDH_AE_REQ_WR_INV_OPCODE 0x221 +#define ZXDH_AE_REQ_WR_CQP_QP_STATE 0x222 +#define ZXDH_AE_REQ_WR_LEN_ERR 0x223 +#define ZXDH_AE_REQ_WR_INLINE_LEN_ERR 0x224 +#define ZXDH_AE_REQ_WR_AH_VALID_ERR 0x225 +#define ZXDH_AE_REQ_WR_UD_PD_IDX_ERR 0x226 +#define IRMDA_AE_REQ_WR_QP_STATE_ERR 0x227 +#define ZXDH_AE_REQ_WR_SERVER_TYPE_MISMATCH_OPCODE 0x228 +#define ZXDH_AE_REQ_WR_UD_PAYLOAD_OUT_OF_PMTU 0x229 +#define ZXDH_AE_REQ_WR_PRE_READ_MOD_WQE_LEN_ZERO 0x22a +#define ZXDH_AE_REQ_WR_ADDL_SGE_NOT_READ_BACK 0x22b +#define ZXDH_AE_REQ_WR_IMM_OPCODE_MISMATCH_FLAG 0x22c +#define ZXDH_AE_REQ_HAD_SEND_MSG_OUT_OF_RANGE 0x22d +#define ZXDH_AE_REQ_WR_WQE_ZERO_LEN_SGE 0x99f + +#define ZXDH_AE_REQ_NVME_IDX_ERR 0x330 +#define ZXDH_AE_REQ_NVME_NOF_QID_ERR 0x331 +#define ZXDH_AE_REQ_NVME_PD_IDX_ERR 0x332 +#define ZXDH_AE_REQ_NVME_LEN_ERR 0x333 +#define ZXDH_AE_REQ_NVME_KEY_ERR 0x334 +#define ZXDH_AE_REQ_NVME_ACC_ERR 0x335 +#define ZXDH_AE_REQ_NVME_TX_ROUTE_IDX_ERR 0x336 +#define ZXDH_AE_REQ_NVME_TX_ROUTE_NOF_QID_ERR 0x337 +#define ZXDH_AE_REQ_NVME_TX_ROUTE_PD_IDX_ERR 0x338 +#define ZXDH_AE_REQ_NVME_TX_ROUTE_LEN_ERR 0x339 +#define ZXDH_AE_REQ_NVME_TX_ROUTE_KEY_ERR 0x33a +#define ZXDH_AE_REQ_NVME_TX_ROUTE_ACC_ERR 0x33b + +#define ZXDH_AE_REQ_MW_MR_MISMATCH_OPCODE 0x450 +#define ZXDH_AE_REQ_MW_INV_LKEY_ERR 0x451 +#define ZXDH_AE_REQ_MW_INV_TYPE_ERR 0x452 +#define ZXDH_AE_REQ_MW_INV_STATE_INV 0x453 +#define ZXDH_AE_REQ_MW_INV_PD_IDX_ERR 0x454 +#define ZXDH_AE_REQ_MW_INV_SHARE_MEM_ERR 0x455 +#define ZXDH_AE_REQ_MW_INV_PARENT_STATE_INV 0x456 +#define ZXDH_AE_REQ_MW_INV_MW_NUM_ZERO 0x458 +#define ZXDH_AE_REQ_MW_INV_MW_STAG_31_8_ZERO 0x459 +#define ZXDH_AE_REQ_MW_INV_QP_NUM_ERR 0x45A +#define ZXDH_AE_REQ_MR_INV_INV_LKEY_ERR 0x45B +#define ZXDH_AE_REQ_MR_INV_MW_NUM_ZERO 0x45C +#define ZXDH_AE_REQ_MR_INV_STATE_ERR 0x45D +#define ZXDH_AE_REQ_MR_INV_EN_ERR 0x45E +#define ZXDH_AE_REQ_MR_INV_SHARE_MEM_ERR 0x45F +#define ZXDH_AE_REQ_MR_INV_PD_IDX_ERR 0x460 +#define ZXDH_AE_REQ_MR_INV_MW_STAG_31_8_ZERO 0x461 +#define ZXDH_AE_REQ_MWBIND_WRITE_ACC_ERR 0x462 +#define ZXDH_AE_REQ_MWBIND_VA_BIND_ERR 0x463 +#define ZXDH_AE_REQ_MWBIND_PD_IDX_ERR 0x464 +#define ZXDH_AE_REQ_MWBIND_MRTE_STATE_TYPE_ERR 0x465 +#define ZXDH_AE_REQ_MWBIND_VA_LEN_ERR 0x466 +#define ZXDH_AE_REQ_MWBIND_TYPE_VA_ERR 0x467 +#define ZXDH_AE_REQ_MWBIND_TYPE_IDX_ERR 0x468 +#define ZXDH_AE_REQ_MWBIND_MRTE_MR_ERR 0x469 +#define ZXDH_AE_REQ_MWBIND_TYPE2_LEN_ERR 0x46A +#define ZXDH_AE_REQ_MWBIND_MRTE_STATE_ERR 0x46B +#define ZXDH_AE_REQ_MWBIND_QPC_EN_ERR 0x46C +#define ZXDH_AE_REQ_MWBIND_PARENT_MR_ERR 0x46D +#define ZXDH_AE_REQ_MWBIND_ACC_BIT4_ERR 0x46E +#define ZXDH_AE_REQ_MWBIND_MW_STAG_ERR 0x470 +#define ZXDH_AE_REQ_MWBIND_IDX_OUT_RANGE 0x471 +#define ZXDH_AE_REQ_MR_FASTREG_ACC_ERR 0x472 +#define ZXDH_AE_REQ_MR_FASTREG_PD_IDX_ERR 0x473 +#define ZXDH_AE_REQ_MR_FASTREG_MRTE_STATE_ERR 0x474 +#define ZXDH_AE_REQ_MR_FASTREG_MR_IS_NOT_1 0x475 +#define ZXDH_AE_REQ_MR_FASTREG_QPC_EN_ERR 0x476 +#define ZXDH_AE_REQ_MR_FASTREG_STAG_LEN_ERR 0x477 +#define ZXDH_AE_REQ_MR_FASTREG_SHARE_MR_ERR 0x478 +#define ZXDH_AE_REQ_MR_FASTREG_MW_STAG_ERR 0x479 +#define ZXDH_AE_REQ_MR_FASTREG_IDX_OUT_RANGE 0x47A +#define ZXDH_AE_REQ_MR_FASTREG_MR_EN_ERR 0x47B +#define ZXDH_AE_REQ_MW_BIND_PD_IDX_ERR 0x47C + +#define ZXDH_AE_REQ_MRTE_STATE_FREE 0x590 +#define ZXDH_AE_REQ_MRTE_STATE_INVALID 0x591 +#define ZXDH_AE_REQ_MRTE_MW_QP_ID_ERR 0x592 +#define ZXDH_AE_REQ_MRTE_PD_IDX_ERR 0x593 +#define ZXDH_AE_REQ_MRTE_KEY_ERR 0x594 +#define ZXDH_AE_REQ_MRTE_STAG_IDX_RANGE_ERR 0x595 +#define ZXDH_AE_REQ_MRTE_VIRT_ADDR_AND_LEN_ERR 0x596 +#define ZXDH_AE_REQ_MRTE_ACC_ERR 0x597 +#define ZXDH_AE_REQ_MRTE_STAG_IDX_RANGE_RSV_ERR 0x598 + +#define ZXDH_AE_REQ_LOC_LEN_READ_REP_ERR 0x6c0 + +#define ZXDH_AE_REQ_REM_INV_OPCODE 0x7d0 +#define ZXDH_AE_REQ_REM_INV_RKEY 0x7d1 +#define ZXDH_AE_REQ_REM_OPERATIONAL_ERR 0x7d2 +#define ZXDH_AE_REQ_RETURN_NAK 0x7d3 + +#define ZXDH_AE_REQ_RETRY_EXC_PSN_OUT_RANGE 0x8f1 +#define ZXDH_AE_REQ_RETRY_EXC_ACK_PSN_OUT_RANGE 0x8f2 +#define ZXDH_AE_REQ_RETRY_EXC_LOC_ACK_OUT_RANGE 0x8f3 +#define ZXDH_AE_REQ_RETRY_EXC_RNR_NAK_OUT_RANGE 0x8f4 +#define ZXDH_AE_REQ_RETRY_EXC_TX_WINDOW_GET_ENTRY_ERR 0x8f5 +#define ZXDH_AE_REQ_RETRY_EXC_TX_WINDOW_MSN_FALLBACK 0x8f6 +#define ZXDH_AE_REQ_RETRY_EXC_TX_WINDOW_MSN_LITTLE 0x8f7 +#define ZXDH_AE_REQ_PSN_LESS_THAN_START_PSN 0x8fe +#define ZXDH_AE_REQ_LOG_SQ_SIZE_MISMATCH_WQE_POINTER 0x8e0 +#define ZXDH_AE_REQ_OFED_INVALID_SQ_OPCODE 0x8e1 +#define ZXDH_AE_REQ_NVME_INVALID_SQ_OPCODE 0x8e2 + +#define ZXDH_AE_REQ_WQE_MRTE_STATE_FREE 0x990 +#define ZXDH_AE_REQ_WQE_MRTE_STATE_INV 0x991 +#define ZXDH_AE_REQ_WQE_MRTE_MW_QP_ID_ERR 0x992 +#define ZXDH_AE_REQ_WQE_MRTE_PD_IDX_ERR 0x993 +#define ZXDH_AE_REQ_WQE_MRTE_KEY_ERR 0x994 +#define ZXDH_AE_REQ_WQE_MRTE_STAG_IDX_ERR 0x995 +#define ZXDH_AE_REQ_WQE_MRTE_VIRT_ADDR_AND_LEN_CHK_ERR 0x996 +#define ZXDH_AE_REQ_WQE_MRTE_ACC_ERR 0x997 +#define ZXDH_AE_REQ_WQE_MRTE_RSV_LKEY_EN_ERR 0x998 + +/* Async Events codes respond */ +#define ZXDH_AE_RSP_WQE_FLUSH 0x12 + +#define ZXDH_AE_RSP_PRIFIELD_CHK_INV_OPCODE 0x50 +#define ZXDH_AE_RSP_PRIFIELD_CHK_OUT_OF_ORDER 0x51 +#define ZXDH_AE_RSP_PRIFIELD_CHK_LEN_ERR 0x52 +//qp context,qp err +#define ZXDH_AE_RSP_SRQ_CHK_SRQ_STA_ERR 0x53 +#define ZXDH_AE_RSP_WQE_CHK_FORMAT_ERR 0x54 +#define ZXDH_AE_RSP_WQE_CHK_LEN_ERR 0x55 + +//srq context +#define ZXDH_AE_RSP_SRQ_WATER_SIG 0x80 + +#define ZXDH_AE_RSP_PKT_TYPE_CQ_OVERFLOW 0x76 +#define ZXDH_AE_RSP_PKT_TYPE_CQ_OVERFLOW_QP 0x78 +#define ZXDH_AE_RSP_PKT_TYPE_CQ_STATE 0x7A +#define ZXDH_AE_RSP_PKT_TYPE_CQ_TWO_PBLE_RSP 0x7B +#define ZXDH_AE_RSP_PKT_TYPE_CQ_TWO_PBLE_RSP_QP 0x7C +#define ZXDH_AE_RSP_PKT_TYPE_NOF_IOQ_ERR 0x70 +#define ZXDH_AE_RSP_PKT_TYPE_NOF_PD_IDX_ERR 0x71 +#define ZXDH_AE_RSP_PKT_TYPE_NOF_LEN_ERR 0x72 +#define ZXDH_AE_RSP_PKT_TYPE_NOF_RKEY_ERR 0x73 +#define ZXDH_AE_RSP_PKT_TYPE_NOF_ACC_ERR 0x74 +//qp context, srq err +#define ZXDH_AE_RSP_SRQ_AXI_RSP_SIG 0xB1 +#define ZXDH_AE_RSP_PKT_TYPE_IRD_OVERFLOW_ERR 0x77 + +#define ZXDH_AE_RSP_PKT_TYPE_MR_DISTRIBUTE_ERR 0x90 +#define ZXDH_AE_RSP_PKT_TYPE_MR_INV_ERR 0x91 +#define ZXDH_AE_RSP_PKT_TYPE_MR_QP_CHK_ERR 0x92 +#define ZXDH_AE_RSP_PKT_TYPE_MR_PD_CHK_ERR 0x93 +#define ZXDH_AE_RSP_PKT_TYPE_MR_KEY_CHK_ERR 0x94 +#define ZXDH_AE_RSP_PKT_TYPE_MR_STAG_IDX_ERR 0x95 +#define ZXDH_AE_RSP_PKT_TYPE_MR_BOUNDARY_ERR 0x96 +#define ZXDH_AE_RSP_PKT_TYPE_MR_ACC_ERR 0x97 +#define ZXDH_AE_RSP_PKT_TYPE_MR_STAG0_ERR 0x98 +#define ZXDH_AE_RSP_PKT_TYPE_MW_STATE_ERR 0x99 +#define ZXDH_AE_RSP_PKT_TYPE_MW_PD_ERR 0x9A +#define ZXDH_AE_RSP_PKT_TYPE_MW_KEY_ERR 0x9B +#define ZXDH_AE_RSP_PKT_TYPE_MW_TYPE2B_QPN_ERR 0x9C +#define ZXDH_AE_RSP_PKT_TYPE_MW_KEY_IDX_ERR 0x9D +#define ZXDH_AE_RSP_PKT_TYPE_MW_SHARE_MR 0x9E +#define ZXDH_AE_RSP_PKT_TYPE_MW_TYPE_ERR 0x9F +#define ZXDH_AE_RSP_PKT_TYPE_REM_INV_PD_ERR 0xA0 +#define ZXDH_AE_RSP_PKT_TYPE_REM_INV_KEY_ERR 0xA1 +#define ZXDH_AE_RSP_PKT_TYPE_REM_INV_ACC_ERR 0xA2 +#define ZXDH_AE_RSP_CHK_ERR_SHARE_MR 0xA4 +#define ZXDH_AE_RSP_MW_NUM_ERR 0xA5 +#define ZXDH_AE_RSP_INV_EN_ERR 0xA6 +#define ZXDH_AE_RSP_QP_AXI_RSP_ERR 0xB0 + +#define LS_64_1(val, bits) ((u64)(uintptr_t)(val) << (bits)) +#define RS_64_1(val, bits) ((u64)(uintptr_t)(val) >> (bits)) +#define LS_32_1(val, bits) ((u32)((val) << (bits))) +#define RS_32_1(val, bits) ((u32)((val) >> (bits))) + +#define FLD_LS_64(dev, val, field) \ + (((u64)(val) << (dev)->hw_shifts[field##_S]) & \ + (dev)->hw_masks[field##_M]) +#define FLD_RS_64(dev, val, field) \ + ((u64)((val) & (dev)->hw_masks[field##_M]) >> \ + (dev)->hw_shifts[field##_S]) +#define FLD_LS_32(dev, val, field) \ + (((val) << (dev)->hw_shifts[field##_S]) & (dev)->hw_masks[field##_M]) +#define FLD_RS_32(dev, val, field) \ + ((u64)((val) & (dev)->hw_masks[field##_M]) >> \ + (dev)->hw_shifts[field##_S]) + +#define ZXDH_MAX_STATS_12 0xfffULL +#define ZXDH_MAX_STATS_16 0xffffULL +#define ZXDH_MAX_STATS_24 0xffffffULL +#define ZXDH_MAX_STATS_28 0x0fffffffULL +#define ZXDH_MAX_STATS_32 0xffffffffULL +#define ZXDH_MAX_STATS_48 0xffffffffffffULL +#define ZXDH_MAX_STATS_56 0xffffffffffffffULL +#define ZXDH_MAX_STATS_64 0xffffffffffffffffULL + +#define ZXDH_VCHNL_RESP_DEFAULT_SIZE (sizeof(struct zxdh_virtchnl_resp_buf)) + +#define ZXDH_MAX_CQ_READ_THRESH 0x3FFFF +#define ZXDH_CQPSQ_QHASH_VLANID_S 32 +#define ZXDH_CQPSQ_QHASH_VLANID GENMASK_ULL(43, 32) +#define ZXDH_CQPSQ_QHASH_QPN_S 32 +#define ZXDH_CQPSQ_QHASH_QPN GENMASK_ULL(49, 32) +#define ZXDH_CQPSQ_QHASH_QS_HANDLE_S 0 +#define ZXDH_CQPSQ_QHASH_QS_HANDLE GENMASK_ULL(9, 0) +#define ZXDH_CQPSQ_QHASH_SRC_PORT_S 16 +#define ZXDH_CQPSQ_QHASH_SRC_PORT GENMASK_ULL(31, 16) +#define ZXDH_CQPSQ_QHASH_DEST_PORT_S 0 +#define ZXDH_CQPSQ_QHASH_DEST_PORT GENMASK_ULL(15, 0) +#define ZXDH_CQPSQ_QHASH_ADDR0_S 32 +#define ZXDH_CQPSQ_QHASH_ADDR0 GENMASK_ULL(63, 32) +#define ZXDH_CQPSQ_QHASH_ADDR1_S 0 +#define ZXDH_CQPSQ_QHASH_ADDR1 GENMASK_ULL(31, 0) +#define ZXDH_CQPSQ_QHASH_ADDR2_S 32 +#define ZXDH_CQPSQ_QHASH_ADDR2 GENMASK_ULL(63, 32) +#define ZXDH_CQPSQ_QHASH_ADDR3_S 0 +#define ZXDH_CQPSQ_QHASH_ADDR3 GENMASK_ULL(31, 0) +#define ZXDH_CQPSQ_QHASH_WQEVALID_S 63 +#define ZXDH_CQPSQ_QHASH_WQEVALID BIT_ULL(63) +#define ZXDH_CQPSQ_QHASH_OPCODE_S 32 +#define ZXDH_CQPSQ_QHASH_OPCODE GENMASK_ULL(37, 32) +#define ZXDH_CQPSQ_QHASH_MANAGE_S 61 +#define ZXDH_CQPSQ_QHASH_MANAGE GENMASK_ULL(62, 61) +#define ZXDH_CQPSQ_QHASH_IPV4VALID_S 60 +#define ZXDH_CQPSQ_QHASH_IPV4VALID BIT_ULL(60) +#define ZXDH_CQPSQ_QHASH_VLANVALID_S 59 +#define ZXDH_CQPSQ_QHASH_VLANVALID BIT_ULL(59) +#define ZXDH_CQPSQ_QHASH_ENTRYTYPE_S 42 +#define ZXDH_CQPSQ_QHASH_ENTRYTYPE GENMASK_ULL(44, 42) +#define ZXDH_CQPSQ_STATS_WQEVALID_S 63 +#define ZXDH_CQPSQ_STATS_WQEVALID BIT_ULL(63) +#define ZXDH_CQPSQ_STATS_ALLOC_INST_S 62 +#define ZXDH_CQPSQ_STATS_ALLOC_INST BIT_ULL(62) +#define ZXDH_CQPSQ_STATS_USE_HMC_FCN_INDEX_S 60 +#define ZXDH_CQPSQ_STATS_USE_HMC_FCN_INDEX BIT_ULL(60) +#define ZXDH_CQPSQ_STATS_USE_INST_S 61 +#define ZXDH_CQPSQ_STATS_USE_INST BIT_ULL(61) +#define ZXDH_CQPSQ_STATS_OP_S 32 +#define ZXDH_CQPSQ_STATS_OP GENMASK_ULL(37, 32) +#define ZXDH_CQPSQ_STATS_INST_INDEX_S 0 +#define ZXDH_CQPSQ_STATS_INST_INDEX GENMASK_ULL(6, 0) +#define ZXDH_CQPSQ_STATS_HMC_FCN_INDEX_S 0 +#define ZXDH_CQPSQ_STATS_HMC_FCN_INDEX GENMASK_ULL(5, 0) +#define ZXDH_CQPSQ_WS_WQEVALID_S 63 +#define ZXDH_CQPSQ_WS_WQEVALID BIT_ULL(63) +#define ZXDH_CQPSQ_WS_NODEOP_S 52 +#define ZXDH_CQPSQ_WS_NODEOP GENMASK_ULL(53, 52) + +#define ZXDH_CQPSQ_WS_ENABLENODE_S 62 +#define ZXDH_CQPSQ_WS_ENABLENODE BIT_ULL(62) +#define ZXDH_CQPSQ_WS_NODETYPE_S 61 +#define ZXDH_CQPSQ_WS_NODETYPE BIT_ULL(61) +#define ZXDH_CQPSQ_WS_PRIOTYPE_S 59 +#define ZXDH_CQPSQ_WS_PRIOTYPE GENMASK_ULL(60, 59) +#define ZXDH_CQPSQ_WS_TC_S 56 +#define ZXDH_CQPSQ_WS_TC GENMASK_ULL(58, 56) +#define ZXDH_CQPSQ_WS_VMVFTYPE_S 54 +#define ZXDH_CQPSQ_WS_VMVFTYPE GENMASK_ULL(55, 54) +#define ZXDH_CQPSQ_WS_VMVFNUM_S 42 +#define ZXDH_CQPSQ_WS_VMVFNUM GENMASK_ULL(51, 42) +#define ZXDH_CQPSQ_WS_OP_S 32 +#define ZXDH_CQPSQ_WS_OP GENMASK_ULL(37, 32) +#define ZXDH_CQPSQ_WS_PARENTID_S 16 +#define ZXDH_CQPSQ_WS_PARENTID GENMASK_ULL(25, 16) +#define ZXDH_CQPSQ_WS_NODEID_S 0 +#define ZXDH_CQPSQ_WS_NODEID GENMASK_ULL(9, 0) +#define ZXDH_CQPSQ_WS_VSI_S 48 +#define ZXDH_CQPSQ_WS_VSI GENMASK_ULL(57, 48) +#define ZXDH_CQPSQ_WS_WEIGHT_S 32 +#define ZXDH_CQPSQ_WS_WEIGHT GENMASK_ULL(38, 32) + +#define ZXDH_CQPSQ_UP_WQEVALID_S 63 +#define ZXDH_CQPSQ_UP_WQEVALID BIT_ULL(63) +#define ZXDH_CQPSQ_UP_USEVLAN_S 62 +#define ZXDH_CQPSQ_UP_USEVLAN BIT_ULL(62) +#define ZXDH_CQPSQ_UP_USEOVERRIDE_S 61 +#define ZXDH_CQPSQ_UP_USEOVERRIDE BIT_ULL(61) +#define ZXDH_CQPSQ_UP_OP_S 32 +#define ZXDH_CQPSQ_UP_OP GENMASK_ULL(37, 32) +#define ZXDH_CQPSQ_UP_HMCFCNIDX_S 0 +#define ZXDH_CQPSQ_UP_HMCFCNIDX GENMASK_ULL(5, 0) +#define ZXDH_CQPSQ_UP_CNPOVERRIDE_S 32 +#define ZXDH_CQPSQ_UP_CNPOVERRIDE GENMASK_ULL(37, 32) +#define ZXDH_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID_S 63 +#define ZXDH_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID BIT_ULL(63) +#define ZXDH_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN_S 0 +#define ZXDH_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN GENMASK_ULL(31, 0) +#define ZXDH_CQPSQ_QUERY_RDMA_FEATURES_OP_S 32 +#define ZXDH_CQPSQ_QUERY_RDMA_FEATURES_OP GENMASK_ULL(37, 32) +#define ZXDH_CQPSQ_QUERY_RDMA_FEATURES_HW_MODEL_USED_S 32 +#define ZXDH_CQPSQ_QUERY_RDMA_FEATURES_HW_MODEL_USED GENMASK_ULL(47, 32) +#define ZXDH_CQPSQ_QUERY_RDMA_FEATURES_HW_MAJOR_VERSION_S 16 +#define ZXDH_CQPSQ_QUERY_RDMA_FEATURES_HW_MAJOR_VERSION GENMASK_ULL(23, 16) +#define ZXDH_CQPSQ_QUERY_RDMA_FEATURES_HW_MINOR_VERSION_S 0 +#define ZXDH_CQPSQ_QUERY_RDMA_FEATURES_HW_MINOR_VERSION GENMASK_ULL(7, 0) +#define ZXDH_CQPHC_SQSIZE_S 8 +#define ZXDH_CQPHC_SQSIZE GENMASK_ULL(11, 8) +#define ZXDH_CQPHC_DISABLE_PFPDUS_S 1 +#define ZXDH_CQPHC_DISABLE_PFPDUS BIT_ULL(1) +#define ZXDH_CQPHC_ROCEV2_RTO_POLICY_S 2 +#define ZXDH_CQPHC_ROCEV2_RTO_POLICY BIT_ULL(2) +#define ZXDH_CQPHC_PROTOCOL_USED_S 3 +#define ZXDH_CQPHC_PROTOCOL_USED GENMASK_ULL(4, 3) +#define ZXDH_CQPHC_MIN_RATE_S 48 +#define ZXDH_CQPHC_MIN_RATE GENMASK_ULL(51, 48) +#define ZXDH_CQPHC_MIN_DEC_FACTOR_S 56 +#define ZXDH_CQPHC_MIN_DEC_FACTOR GENMASK_ULL(59, 56) +#define ZXDH_CQPHC_DCQCN_T_S 0 +#define ZXDH_CQPHC_DCQCN_T GENMASK_ULL(15, 0) +#define ZXDH_CQPHC_HAI_FACTOR_S 32 +#define ZXDH_CQPHC_HAI_FACTOR GENMASK_ULL(47, 32) +#define ZXDH_CQPHC_RAI_FACTOR_S 48 +#define ZXDH_CQPHC_RAI_FACTOR GENMASK_ULL(63, 48) +#define ZXDH_CQPHC_DCQCN_B_S 0 +#define ZXDH_CQPHC_DCQCN_B GENMASK_ULL(24, 0) +#define ZXDH_CQPHC_DCQCN_F_S 25 +#define ZXDH_CQPHC_DCQCN_F GENMASK_ULL(27, 25) +#define ZXDH_CQPHC_CC_CFG_VALID_S 31 +#define ZXDH_CQPHC_CC_CFG_VALID BIT_ULL(31) +#define ZXDH_CQPHC_RREDUCE_MPERIOD_S 32 +#define ZXDH_CQPHC_RREDUCE_MPERIOD GENMASK_ULL(63, 32) +#define ZXDH_CQPHC_HW_MINVER_S 0 +#define ZXDH_CQPHC_HW_MINVER GENMASK_ULL(15, 0) + +#define ZXDH_CQPHC_HW_MAJVER_GEN_1 0 +#define ZXDH_CQPHC_HW_MAJVER_GEN_2 1 +#define ZXDH_CQPHC_HW_MAJVER_GEN_3 2 +#define ZXDH_CQPHC_HW_MAJVER_S 16 +#define ZXDH_CQPHC_HW_MAJVER GENMASK_ULL(31, 16) +#define ZXDH_CQPHC_CEQPERVF_S 32 +#define ZXDH_CQPHC_CEQPERVF GENMASK_ULL(39, 32) + +#define ZXDH_CQPHC_EN_REM_ENDPOINT_TRK_S 3 +#define ZXDH_CQPHC_EN_REM_ENDPOINT_TRK BIT_ULL(3) + +#define ZXDH_CQPHC_ENABLED_VFS_S 32 +#define ZXDH_CQPHC_ENABLED_VFS GENMASK_ULL(37, 32) + +#define ZXDH_CQPHC_HMC_PROFILE_S 0 +#define ZXDH_CQPHC_HMC_PROFILE GENMASK_ULL(2, 0) +#define ZXDH_CQPHC_SVER_S 24 +#define ZXDH_CQPHC_SVER GENMASK_ULL(31, 24) +#define ZXDH_CQPHC_SQBASE_S 9 +#define ZXDH_CQPHC_SQBASE GENMASK_ULL(63, 9) + +#define ZXDH_CQPHC_QPCTX_S 0 +#define ZXDH_CQPHC_QPCTX GENMASK_ULL(63, 0) +#define ZXDH_QP_DBSA_HW_SQ_TAIL_S 0 +#define ZXDH_QP_DBSA_HW_SQ_TAIL GENMASK_ULL(17, 0) +#define ZXDH_CQ_DBSA_CQEIDX_S 0 +#define ZXDH_CQ_DBSA_CQEIDX GENMASK_ULL(22, 0) +#define ZXDH_CQ_DBSA_SW_CQ_SELECT_S 23 +#define ZXDH_CQ_DBSA_SW_CQ_SELECT GENMASK_ULL(28, 23) +#define ZXDH_CQ_DBSA_ARM_NEXT_S 31 +#define ZXDH_CQ_DBSA_ARM_NEXT BIT_ULL(31) +// #define ZXDH_CQ_DBSA_ARM_NEXT_SE_S 15 +// #define ZXDH_CQ_DBSA_ARM_NEXT_SE BIT_ULL(15) +#define ZXDH_CQ_DBSA_ARM_SEQ_NUM_S 29 +#define ZXDH_CQ_DBSA_ARM_SEQ_NUM GENMASK_ULL(30, 29) + +/* RDMA TX DDR Access REG Masks */ +#define ZXDH_TX_CACHE_ID_S 0 +#define ZXDH_TX_CACHE_ID GENMASK_ULL(1, 0) +#define ZXDH_TX_INDICATE_ID_S 2 +#define ZXDH_TX_INDICATE_ID GENMASK_ULL(3, 2) +#define ZXDH_TX_AXI_ID_S 4 +#define ZXDH_TX_AXI_ID GENMASK_ULL(6, 4) +#define ZXDH_TX_WAY_PARTITION_S 7 +#define ZXDH_TX_WAY_PARTITION GENMASK_ULL(9, 7) + +/* RDMA RX REG Masks */ +#define ZXDH_CQ_CQE_AXI_ID_S 4 +#define ZXDH_CQ_CQE_AXI_ID GENMASK_ULL(6, 4) +#define ZXDH_CQ_CQE_INDICATE_ID_S 2 +#define ZXDH_CQ_CQE_INDICATE_ID GENMASK_ULL(3, 2) + +#define ZXDH_CQ_ARM_DBSA_ARM_SEQ_NUM_S 0 +#define IZXDH_CQ_ARM_DBSA_ARM_SEQ_NUM GENMASK_ULL(1, 0) +#define ZXDH_CQ_ARM_DBSA_ARM_NXT_S 2 +#define ZXDH_CQ_ARM_DBSA_ARM_NXT BIT_ULL(2) +#define ZXDH_CQ_ARM_CQ_ID_S 10 +#define ZXDH_CQ_ARM_CQ_ID GENMASK_ULL(29, 10) +#define ZXDH_CQ_ARM_DBSA_VLD_S 30 +#define ZXDH_CQ_ARM_DBSA_VLD BIT_ULL(30) + +/* RDMA RX DDR Access REG Masks */ +#define ZXDH_RX_CACHE_ID_S 0 +#define ZXDH_RX_CACHE_ID GENMASK_ULL(1, 0) +#define ZXDH_RX_INDICATE_ID_S 2 +#define ZXDH_RX_INDICATE_ID GENMASK_ULL(3, 2) +#define ZXDH_RX_AXI_ID_S 4 +#define ZXDH_RX_AXI_ID GENMASK_ULL(6, 4) +#define ZXDH_RX_WAY_PARTITION_S 7 +#define ZXDH_RX_WAY_PARTITION GENMASK_ULL(9, 7) + +/* RDMA IO REG Masks */ +#define ZXDH_IOTABLE2_SID_S 0 +#define ZXDH_IOTABLE2_SID GENMASK_ULL(5, 0) + +#define ZXDH_IOTABLE4_EPID_S 11 +#define ZXDH_IOTABLE4_EPID GENMASK_ULL(14, 11) +#define ZXDH_IOTABLE4_VFID_S 3 +#define ZXDH_IOTABLE4_VFID GENMASK_ULL(10, 3) +#define ZXDH_IOTABLE4_PFID_S 0 +#define ZXDH_IOTABLE4_PFID GENMASK_ULL(2, 0) + +#define ZXDH_IOTABLE7_PFID_S 2 +#define ZXDH_IOTABLE7_PFID GENMASK_ULL(4, 2) +#define ZXDH_IOTABLE7_EPID_S 5 +#define ZXDH_IOTABLE7_EPID GENMASK_ULL(8, 5) + +/* CQP Create Masks */ +#define ZXDH_CQP_CREATE_EPID_S 12 +#define ZXDH_CQP_CREATE_EPID GENMASK_ULL(15, 12) +#define ZXDH_CQP_CREATE_VFID_S 4 +#define ZXDH_CQP_CREATE_VFID GENMASK_ULL(11, 4) +#define ZXDH_CQP_CREATE_PFID_S 1 +#define ZXDH_CQP_CREATE_PFID GENMASK_ULL(3, 1) +#define ZXDH_CQP_CREATE_VFUNC_ACTIVE_S 0 +#define ZXDH_CQP_CREATE_VFUNC_ACTIVE BIT_ULL(0) + +#define ZXDH_CQP_CREATE_STATE_CFG_S 31 +#define ZXDH_CQP_CREATE_STATE_CFG BIT_ULL(31) +#define ZXDH_CQP_CREATE_SQSIZE_S 16 +#define ZXDH_CQP_CREATE_SQSIZE GENMASK_ULL(27, 16) +#define ZXDH_CQP_CREATE_QPC_OBJ_IDX_S 10 +#define ZXDH_CQP_CREATE_QPC_OBJ_IDX GENMASK_ULL(14, 10) +#define ZXDH_CQP_CREATE_QPC_INDICATE_IDX_S 8 +#define ZXDH_CQP_CREATE_QPC_INDICATE_IDX GENMASK_ULL(9, 8) +#define ZXDH_CQP_CREATE_OBJ_IDX_S 2 +#define ZXDH_CQP_CREATE_OBJ_IDX GENMASK_ULL(6, 2) +#define ZXDH_CQP_CREATE_INDICATE_IDX_S 0 +#define ZXDH_CQP_CREATE_INDICATE_IDX GENMASK_ULL(1, 0) + +#define ZXDH_CQP_CREATE_ENA_PFVF_NUM_S 8 +#define ZXDH_CQP_CREATE_ENA_PFVF_NUM GENMASK_ULL(15, 8) +#define ZXDH_CQP_CREATE_CEQPERVF_S 0 +#define ZXDH_CQP_CREATE_CEQPERVF GENMASK_ULL(7, 0) + +/* CQP and iWARP Completion Queue */ +#define ZXDH_CQ_QPCTX_S ZXDH_CQPHC_QPCTX_S +#define ZXDH_CQ_QPCTX ZXDH_CQPHC_QPCTX + +#define ZXDH_CCQ_OPRETVAL_S 0 +#define ZXDH_CCQ_OPRETVAL GENMASK_ULL(33, 0) +#define ZXDH_SRC_VHCA_ID_S 0 +#define ZXDH_SRC_VHCA_ID GENMASK_ULL(9, 0) +#define ZXDH_SRC_PFVF_ID_S 10 +#define ZXDH_SRC_PFVF_ID GENMASK_ULL(17, 10) +#define ZXDH_SRC_DSTVHCA_ID_S 18 +#define ZXDH_SRC_DSTVHCA_ID GENMASK_ULL(27, 18) + +#define ZXDH_CQ_MINERR_S 7 +#define ZXDH_CQ_MINERR GENMASK_ULL(22, 7) +#define ZXDH_CQ_MAJERR_S 23 +#define ZXDH_CQ_MAJERR GENMASK_ULL(38, 23) +#define ZXDH_CQ_WQEIDX_S 40 +#define ZXDH_CQ_WQEIDX GENMASK_ULL(54, 40) +#define ZXDH_CQ_EXTCQE_S 50 +#define ZXDH_CQ_EXTCQE BIT_ULL(50) +#define ZXDH_OOO_CMPL_S 54 +#define ZXDH_OOO_CMPL BIT_ULL(54) +#define ZXDH_CQ_ERROR_S 39 +#define ZXDH_CQ_ERROR BIT_ULL(39) +#define ZXDH_CQ_SQ_S 4 +#define ZXDH_CQ_SQ BIT_ULL(4) + +#define ZXDH_CQ_TYPE_S 0 +#define ZXDH_CQ_TYPE GENMASK_ULL(1, 0) + +#define ZXDH_CQ_VALID_S 5 +#define ZXDH_CQ_VALID BIT_ULL(5) +#define ZXDH_CQ_IMMVALID_S 0 +#define ZXDH_CQ_IMMVALID BIT_ULL(0) +#define ZXDH_CQ_UDSMACVALID_S 26 +#define ZXDH_CQ_UDSMACVALID BIT_ULL(26) +#define ZXDH_CQ_UDVLANVALID_S 27 +#define ZXDH_CQ_UDVLANVALID BIT_ULL(27) +#define ZXDH_CQ_UDSMAC_S 0 +#define ZXDH_CQ_UDSMAC GENMASK_ULL(47, 0) +#define ZXDH_CQ_UDVLAN_S 48 +#define ZXDH_CQ_UDVLAN GENMASK_ULL(63, 48) +#define ZXDH_CQ_IMMDATA_S 0 +#define ZXDH_CQ_IMMDATA GENMASK_ULL(31, 0) +#define IRDMACQ_PAYLDLEN_S 32 +#define IRDMACQ_PAYLDLEN GENMASK_ULL(63, 32) +#define ZXDH_CQ_MAILBOXCQE_S 3 +#define ZXDH_CQ_MAILBOXCQE BIT_ULL(3) + +#define ZXDH_CQ_IMMDATALOW32_S 0 +#define ZXDH_CQ_IMMDATALOW32 GENMASK_ULL(31, 0) +#define ZXDH_CQ_IMMDATAUP32_S 32 +#define ZXDH_CQ_IMMDATAUP32 GENMASK_ULL(63, 32) +#define IRDMACQ_TCPSEQNUMRTT_S 32 +#define IRDMACQ_TCPSEQNUMRTT GENMASK_ULL(63, 32) +#define IRDMACQ_INVSTAG_S 11 +#define IRDMACQ_INVSTAG GENMASK_ULL(42, 11) +#define IRDMACQ_QPID_S 44 +#define IRDMACQ_QPID GENMASK_ULL(63, 44) + +#define IRDMACQ_UDSRCQPN_S 1 +#define IRDMACQ_UDSRCQPN GENMASK_ULL(24, 1) +#define IRDMACQ_PSHDROP_S 51 +#define IRDMACQ_PSHDROP BIT_ULL(51) +#define IRDMACQ_STAG_S 43 +#define IRDMACQ_STAG BIT_ULL(43) +#define IRDMACQ_IPV4_S 25 +#define IRDMACQ_IPV4 BIT_ULL(25) +#define IRDMACQ_SOEVENT_S 6 +#define IRDMACQ_SOEVENT BIT_ULL(6) +#define IRDMACQ_OP_S 58 +#define IRDMACQ_OP GENMASK_ULL(63, 58) + +#define ZXDH_CEQE_CQCTX_S 0 +#define ZXDH_CEQE_CQCTX GENMASK_ULL(62, 0) +#define ZXDH_CEQE_VALID_S 63 +#define ZXDH_CEQE_VALID BIT_ULL(63) + +/* AEQE format */ +#define ZXDH_AEQE_COMPCTX_S ZXDH_CQPHC_QPCTX_S +#define ZXDH_AEQE_COMPCTX ZXDH_CQPHC_QPCTX +#define ZXDH_AEQE_QPCQID_S 0 +#define ZXDH_AEQE_QPCQID GENMASK_ULL(20, 0) +#define ZXDH_AEQE_WQDESCIDX_S 37 +#define ZXDH_AEQE_WQDESCIDX GENMASK_ULL(37, 21) +#define ZXDH_AEQE_OVERFLOW_S 62 +#define ZXDH_AEQE_OVERFLOW BIT_ULL(62) +#define ZXDH_AEQE_AECODE_S 50 +#define ZXDH_AEQE_AECODE GENMASK_ULL(61, 50) +#define ZXDH_AEQE_AESRC_S 45 +#define ZXDH_AEQE_AESRC BIT_ULL(45) +#define ZXDH_AEQE_VHCA_ID_S 35 +#define ZXDH_AEQE_VHCA_ID GENMASK_ULL(44, 35) +#define ZXDH_AEQE_IWSTATE_S 46 +#define ZXDH_AEQE_IWSTATE GENMASK_ULL(49, 46) +#define ZXDH_AEQE_VALID_S 63 +#define ZXDH_AEQE_VALID BIT_ULL(63) +#define ZXDH_AEQE_MINOR_ERROR_S 50 +#define ZXDH_AEQE_MINOR_ERROR GENMASK_ULL(57, 50) +#define ZXDH_AEQE_MAJOR_ERROR_S 58 +#define ZXDH_AEQE_MAJOR_ERROR GENMASK_ULL(61, 58) + +#define ZXDH_UDA_QPSQ_NEXT_HDR_S 16 +#define ZXDH_UDA_QPSQ_NEXT_HDR GENMASK_ULL(23, 16) +#define ZXDH_UDA_QPSQ_OPCODE_S 32 +#define ZXDH_UDA_QPSQ_OPCODE GENMASK_ULL(37, 32) +#define ZXDH_UDA_QPSQ_L4LEN_S 42 +#define ZXDH_UDA_QPSQ_L4LEN GENMASK_ULL(45, 42) +#define ZXDH_GEN1_UDA_QPSQ_L4LEN_S 24 +#define ZXDH_GEN1_UDA_QPSQ_L4LEN GENMASK_ULL(27, 24) +#define ZXDH_UDA_QPSQ_AHIDX_S 0 +#define ZXDH_UDA_QPSQ_AHIDX GENMASK_ULL(16, 0) +#define ZXDH_UDA_QPSQ_VALID_S 63 +#define ZXDH_UDA_QPSQ_VALID BIT_ULL(63) +#define ZXDH_UDA_QPSQ_SIGCOMPL_S 62 +#define ZXDH_UDA_QPSQ_SIGCOMPL BIT_ULL(62) +#define ZXDH_UDA_QPSQ_MACLEN_S 56 +#define ZXDH_UDA_QPSQ_MACLEN GENMASK_ULL(62, 56) +#define ZXDH_UDA_QPSQ_IPLEN_S 48 +#define ZXDH_UDA_QPSQ_IPLEN GENMASK_ULL(54, 48) +#define ZXDH_UDA_QPSQ_L4T_S 30 +#define ZXDH_UDA_QPSQ_L4T GENMASK_ULL(31, 30) +#define ZXDH_UDA_QPSQ_IIPT_S 28 +#define ZXDH_UDA_QPSQ_IIPT GENMASK_ULL(29, 28) +#define ZXDH_UDA_PAYLOADLEN_S 0 +#define ZXDH_UDA_PAYLOADLEN GENMASK_ULL(13, 0) +#define ZXDH_UDA_HDRLEN_S 16 +#define ZXDH_UDA_HDRLEN GENMASK_ULL(24, 16) +#define ZXDH_VLAN_TAG_VALID_S 50 +#define ZXDH_VLAN_TAG_VALID BIT_ULL(50) +#define ZXDH_UDA_L3PROTO_S 0 +#define ZXDH_UDA_L3PROTO GENMASK_ULL(1, 0) +#define ZXDH_UDA_L4PROTO_S 16 +#define ZXDH_UDA_L4PROTO GENMASK_ULL(17, 16) +#define ZXDH_UDA_QPSQ_DOLOOPBACK_S 44 +#define ZXDH_UDA_QPSQ_DOLOOPBACK BIT_ULL(44) +#define ZXDH_CQPSQ_BUFSIZE_S 0 +#define ZXDH_CQPSQ_BUFSIZE GENMASK_ULL(31, 0) + +#define ZXDH_CQPSQ_OPCODE_S 58 +#define ZXDH_CQPSQ_OPCODE GENMASK_ULL(63, 58) +#define ZXDH_CQPSQ_WQEVALID_S 57 +#define ZXDH_CQPSQ_WQEVALID BIT_ULL(57) +#define ZXDH_CQPSQ_TPHVAL_S 0 +#define ZXDH_CQPSQ_TPHVAL GENMASK_ULL(7, 0) + +// DMA OP +#define ZXDH_CQPSQ_DESTPATHINDEX_S 0 +#define ZXDH_CQPSQ_DESTPATHINDEX GENMASK_ULL(39, 0) + +#define ZXDH_CQPSQ_SRCPATHINDEX_S 0 +#define ZXDH_CQPSQ_SRCPATHINDEX GENMASK_ULL(39, 0) + +#define ZXDH_CQPSQ_InterSourSel_S 41 +#define ZXDH_CQPSQ_InterSourSel GENMASK_ULL(45, 41) + +#define ZXDH_CQPSQ_NeedInter_S 40 +#define ZXDH_CQPSQ_NeedInter BIT_ULL(40) + +#define ZXDH_CQPSQ_DATAINWQENUM_S 54 +#define ZXDH_CQPSQ_DATAINWQENUM GENMASK_ULL(56, 54) + +#define ZXDH_CQPSQ_DATAHIGH_S 32 +#define ZXDH_CQPSQ_DATAHIGH GENMASK_ULL(63, 32) + +#define ZXDH_CQPSQ_DATALOW_S 31 +#define ZXDH_CQPSQ_DATALOW GENMASK_ULL(31, 0) + +#define ZXDH_CQPSQ_DATABITWIDTH_S 53 +#define ZXDH_CQPSQ_DATABITWIDTH BIT_ULL(53) + +#define ZXDH_CQPSQ_INTERSOURCESEL_S 41 +#define ZXDH_CQPSQ_INTERSOURCESEL GENMASK_ULL(45, 41) + +#define ZXDH_CQPSQ_NEEDINTER_S 40 +#define ZXDH_CQPSQ_NEEDINTER BIT_ULL(40) + +#define ZXDH_CQPSQ_DATAINCQENUM_S 54 +#define ZXDH_CQPSQ_DATAINCQENUM GENMASK_ULL(56, 54) +// DMA OP + +// MB +#define ZXDH_CQPSQ_PFVALID_S 56 +#define ZXDH_CQPSQ_PFVALID BIT_ULL(56) + +#define ZXDH_CQPSQ_SRCPFVFID_S 8 +#define ZXDH_CQPSQ_SRCPFVFID GENMASK_ULL(15, 8) + +#define ZXDH_CQPSQ_DSTVFID_S 0 +#define ZXDH_CQPSQ_DSTVFID GENMASK_ULL(7, 0) +// MB + +#define ZXDH_CQPSQ_DBPOLARITY_S 11 +#define ZXDH_CQPSQ_DBPOLARITY BIT_ULL(11) +#define ZXDH_CQPSQ_DBRINGHEAD_S 0 +#define ZXDH_CQPSQ_DBRINGHEAD GENMASK_ULL(10, 0) + +#define ZXDH_CQPSQ_VSIIDX_S 8 +#define ZXDH_CQPSQ_VSIIDX GENMASK_ULL(17, 8) +#define ZXDH_CQPSQ_TPHEN_S 60 +#define ZXDH_CQPSQ_TPHEN BIT_ULL(60) + +#define ZXDH_CQPSQ_PBUFADDR_S ZXDH_CQPHC_QPCTX_S +#define ZXDH_CQPSQ_PBUFADDR ZXDH_CQPHC_QPCTX + +/* Create/Modify/Destroy QP */ + +#define ZXDH_CQPSQ_QP_NEWMSS_S 32 +#define ZXDH_CQPSQ_QP_NEWMSS GENMASK_ULL(45, 32) +#define ZXDH_CQPSQ_QP_TERMLEN_S 48 +#define ZXDH_CQPSQ_QP_TERMLEN GENMASK_ULL(51, 48) + +#define ZXDH_CQPSQ_QP_QPCTX_S ZXDH_CQPHC_QPCTX_S +#define ZXDH_CQPSQ_QP_QPCTX ZXDH_CQPHC_QPCTX + +#define ZXDH_CQPSQ_QP_QPID_S 0 +#define ZXDH_CQPSQ_QP_QPID_M (0xFFFFFFUL) + +#define ZXDH_CQPSQ_QP_OP_S 32 +#define ZXDH_CQPSQ_QP_OP_M IRDMACQ_OP_M +#define ZXDH_CQPSQ_QP_ORDVALID_S 42 +#define ZXDH_CQPSQ_QP_ORDVALID BIT_ULL(42) +#define ZXDH_CQPSQ_QP_TOECTXVALID_S 43 +#define ZXDH_CQPSQ_QP_TOECTXVALID BIT_ULL(43) +#define ZXDH_CQPSQ_QP_CACHEDVARVALID_S 44 +#define ZXDH_CQPSQ_QP_CACHEDVARVALID BIT_ULL(44) +#define ZXDH_CQPSQ_QP_VQ_S 45 +#define ZXDH_CQPSQ_QP_VQ BIT_ULL(45) +#define ZXDH_CQPSQ_QP_FORCELOOPBACK_S 46 +#define ZXDH_CQPSQ_QP_FORCELOOPBACK BIT_ULL(46) +#define ZXDH_CQPSQ_QP_CQNUMVALID_S 47 +#define ZXDH_CQPSQ_QP_CQNUMVALID BIT_ULL(47) +#define ZXDH_CQPSQ_QP_QPTYPE_S 48 +#define ZXDH_CQPSQ_QP_QPTYPE GENMASK_ULL(50, 48) +#define ZXDH_CQPSQ_QP_MACVALID_S 51 +#define ZXDH_CQPSQ_QP_MACVALID BIT_ULL(51) +#define ZXDH_CQPSQ_QP_MSSCHANGE_S 52 +#define ZXDH_CQPSQ_QP_MSSCHANGE BIT_ULL(52) + +#define ZXDH_CQPSQ_QP_IGNOREMWBOUND_S 54 +#define ZXDH_CQPSQ_QP_IGNOREMWBOUND BIT_ULL(54) +#define ZXDH_CQPSQ_QP_REMOVEHASHENTRY_S 55 +#define ZXDH_CQPSQ_QP_REMOVEHASHENTRY BIT_ULL(55) +#define ZXDH_CQPSQ_QP_TERMACT_S 56 +#define ZXDH_CQPSQ_QP_TERMACT GENMASK_ULL(57, 56) +#define ZXDH_CQPSQ_QP_RESETCON_S 58 +#define ZXDH_CQPSQ_QP_RESETCON BIT_ULL(58) +#define ZXDH_CQPSQ_QP_ARPTABIDXVALID_S 59 +#define ZXDH_CQPSQ_QP_ARPTABIDXVALID BIT_ULL(59) +#define ZXDH_CQPSQ_QP_NEXTIWSTATE_S 60 +#define ZXDH_CQPSQ_QP_NEXTIWSTATE GENMASK_ULL(62, 60) + +#define ZXDH_CQPSQ_QP_ID_S 0 +#define ZXDH_CQPSQ_QP_ID GENMASK_ULL(19, 0) +#define ZXDH_CQPSQ_QP_CONTEXT_ID_S 20 +#define ZXDH_CQPSQ_QP_CONTEXT_ID GENMASK_ULL(39, 20) + +#define ZXDH_CQPSQ_QP_DBSHADOWADDR_S ZXDH_CQPHC_QPCTX_S +#define ZXDH_CQPSQ_QP_DBSHADOWADDR ZXDH_CQPHC_QPCTX + +#define ZXDH_CQPSQ_CQ_OP_S 32 +#define ZXDH_CQPSQ_CQ_OP GENMASK_ULL(37, 32) + +#define ZXDH_CQPSQ_CQ_CEQIDVALID_S 54 +#define ZXDH_CQPSQ_CQ_CEQIDVALID BIT_ULL(54) +#define ZXDH_CQPSQ_CQ_CQRESIZE_S 53 +#define ZXDH_CQPSQ_CQ_CQRESIZE BIT_ULL(53) +#define ZXDH_CQPSQ_CQ_CQADDRVALID_S 52 +#define ZXDH_CQPSQ_CQ_CQADDRVALID BIT_ULL(52) + +#define ZXDH_CQPSQ_CQ_CQSTATE_S 60 +#define ZXDH_CQPSQ_CQ_CQSTATE GENMASK_ULL(63, 60) +#define ZXDH_CQPSQ_CQ_OVERFLOW_LOCKED_FLAG_S 59 +#define ZXDH_CQPSQ_CQ_OVERFLOW_LOCKED_FLAG BIT_ULL(59) +#define ZXDH_CQPSQ_CQ_CQESIZE_S 58 +#define ZXDH_CQPSQ_CQ_CQESIZE BIT_ULL(58) +#define ZXDH_CQPSQ_CQ_VIRTMAP_S 57 +#define ZXDH_CQPSQ_CQ_VIRTMAP BIT_ULL(57) +#define ZXDH_CQPSQ_CQ_LPBLSIZE_S 55 +#define ZXDH_CQPSQ_CQ_LPBLSIZE GENMASK_ULL(56, 55) +#define ZXDH_CQPSQ_CQ_ENCEQEMASK_S 54 +#define ZXDH_CQPSQ_CQ_ENCEQEMASK BIT_ULL(54) +#define ZXDH_CQPSQ_CQ_DEBUG_SET_S 44 +#define ZXDH_CQPSQ_CQ_DEBUG_SET GENMASK_ULL(53, 44) +#define ZXDH_CQPSQ_CQ_VHCAID_S 34 +#define ZXDH_CQPSQ_CQ_VHCAID GENMASK_ULL(43, 34) +#define ZXDH_CQPSQ_CQ_CQMAX_S 18 +#define ZXDH_CQPSQ_CQ_CQMAX GENMASK_ULL(33, 18) +#define ZXDH_CQPSQ_CQ_CQPERIOD_S 7 +#define ZXDH_CQPSQ_CQ_CQPERIOD GENMASK_ULL(17, 7) +#define ZXDH_CQPSQ_CQ_SCQE_BREAK_MODERATION_EN_S 6 +#define ZXDH_CQPSQ_CQ_SCQE_BREAK_MODERATION_EN BIT_ULL(6) + +#define ZXDH_CQPSQ_CQ_CEQ_ID_S 48 +#define ZXDH_CQPSQ_CQ_CEQ_ID GENMASK_ULL(59, 48) +#define ZXDH_CQPSQ_CQ_ST_S 46 +#define ZXDH_CQPSQ_CQ_ST GENMASK_ULL(47, 46) +#define ZXDH_CQPSQ_CQ_IS_IN_LIST_CNT_S 33 +#define ZXDH_CQPSQ_CQ_IS_IN_LIST_CNT GENMASK_ULL(45, 33) +#define ZXDH_CQPSQ_CQ_CQSIZE_S 24 +#define ZXDH_CQPSQ_CQ_CQSIZE GENMASK_ULL(28, 24) +#define ZXDH_CQPSQ_CQ_SHADOW_READ_THRESHOLD_S 0 +#define ZXDH_CQPSQ_CQ_SHADOW_READ_THRESHOLD GENMASK(15, 0) +#define ZXDH_CQPSQ_CQ_FIRSTPMPBLIDX_S 0 +#define ZXDH_CQPSQ_CQ_FIRSTPMPBLIDX GENMASK_ULL(27, 0) +#define ZXDH_CQPSQ_CQ_CQC_SET_MASK_S 0 +#define ZXDH_CQPSQ_CQ_CQC_SET_MASK GENMASK_ULL(63, 0) +#define ZXDH_CQPSQ_CQ_MODIFY_SIZE_S 20 +#define ZXDH_CQPSQ_CQ_MODIFY_SIZE BIT_ULL(20) + +/* Allocate/Register/Register Shared/Deallocate Stag */ +#define ZXDH_CQPSQ_STAG_VA_FBO_S ZXDH_CQPHC_QPCTX_S +#define ZXDH_CQPSQ_STAG_VA_FBO ZXDH_CQPHC_QPCTX +#define ZXDH_CQPSQ_STAG_STAGLEN_S 0 +#define ZXDH_CQPSQ_STAG_STAGLEN GENMASK_ULL(45, 0) +#define ZXDH_CQPSQ_STAG_KEY_S 0 +#define ZXDH_CQPSQ_STAG_KEY GENMASK_ULL(7, 0) +#define ZXDH_CQPSQ_STAG_IDX_S 8 +#define ZXDH_CQPSQ_STAG_IDX GENMASK_ULL(31, 8) +#define ZXDH_CQPSQ_STAG_PARENTSTAGIDX_S 32 +#define ZXDH_CQPSQ_STAG_PARENTSTAGIDX GENMASK_ULL(55, 32) +#define ZXDH_CQPSQ_STAG_MR_S 31 +#define ZXDH_CQPSQ_STAG_MR BIT_ULL(31) +#define ZXDH_CQPSQ_STAG_MWTYPE_S 30 +#define ZXDH_CQPSQ_STAG_MWTYPE BIT_ULL(30) +#define ZXDH_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY_S 29 +#define ZXDH_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY BIT_ULL(29) +#define ZXDH_CQPSQ_STAG_FAST_REGISTER_MR_EN_S 28 +#define ZXDH_CQPSQ_STAG_FAST_REGISTER_MR_EN BIT_ULL(28) +#define ZXDH_CQPSQ_STAG_MR_INVALID_EN_S 27 +#define ZXDH_CQPSQ_STAG_MR_INVALID_EN BIT_ULL(27) +#define ZXDH_CQPSQ_STAG_MR_FORCE_DEL_S 26 +#define ZXDH_CQPSQ_STAG_MR_FORCE_DEL BIT_ULL(26) +#define ZXDH_CQPSQ_STAG_MR_PDID_HIG_S 56 +#define ZXDH_CQPSQ_STAG_MR_PDID_HIG GENMASK_ULL(57, 56) +#define ZXDH_CQPSQ_STAG_MR_PDID_LOW_S 46 +#define ZXDH_CQPSQ_STAG_MR_PDID_LOW GENMASK_ULL(63, 46) +#define ZXDH_CQPSQ_QUERY_MKEY_S 8 +#define ZXDH_CQPSQ_QUERY_MKEY GENMASK_ULL(31, 8) + +#define ZXDH_CQPSQ_STAG_LPBLSIZE_S 32 +#define ZXDH_CQPSQ_STAG_LPBLSIZE GENMASK_ULL(33, 32) +#define ZXDH_CQPSQ_STAG_HPAGESIZE_S 34 +#define ZXDH_CQPSQ_STAG_HPAGESIZE GENMASK_ULL(38, 34) +#define ZXDH_CQPSQ_STAG_ARIGHTS_S 39 +#define ZXDH_CQPSQ_STAG_ARIGHTS GENMASK_ULL(43, 39) +// #define ZXDH_CQPSQ_STAG_REMACCENABLED_S 53 +// #define ZXDH_CQPSQ_STAG_REMACCENABLED BIT_ULL(53) +#define ZXDH_CQPSQ_STAG_VABASEDTO_S 45 +#define ZXDH_CQPSQ_STAG_VABASEDTO BIT_ULL(45) +#define ZXDH_CQPSQ_STAG_USEHMCFNIDX_S 56 +#define ZXDH_CQPSQ_STAG_USEHMCFNIDX BIT_ULL(56) +#define ZXDH_CQPSQ_STAG_FCN_INDEX_S 46 +#define ZXDH_CQPSQ_STAG_FCN_INDEX GENMASK_ULL(55, 46) +// #define ZXDH_CQPSQ_STAG_USEPFRID_S 61 +// #define ZXDH_CQPSQ_STAG_USEPFRID BIT_ULL(61) +#define ZXDH_CQPSQ_STAG_SHARED_S 44 +#define ZXDH_CQPSQ_STAG_SHARED BIT_ULL(44) + +#define ZXDH_CQPSQ_STAG_PBA_S ZXDH_CQPHC_QPCTX_S +#define ZXDH_CQPSQ_STAG_PBA ZXDH_CQPHC_QPCTX +#define ZXDH_CQPSQ_STAG_HMCFNIDX_S 0 +#define ZXDH_CQPSQ_STAG_HMCFNIDX GENMASK_ULL(5, 0) + +#define ZXDH_CQPSQ_STAG_FIRSTPMPBLIDX_S 0 +#define ZXDH_CQPSQ_STAG_FIRSTPMPBLIDX GENMASK_ULL(27, 0) + +#define ZXDH_CQPSQ_QUERYSTAG_IDX_S ZXDH_CQPSQ_STAG_IDX_S +#define ZXDH_CQPSQ_QUERYSTAG_IDX ZXDH_CQPSQ_STAG_IDX +#define ZXDH_CQPSQ_MLM_TABLEIDX_S 0 +#define ZXDH_CQPSQ_MLM_TABLEIDX GENMASK_ULL(5, 0) +#define ZXDH_CQPSQ_MLM_FREEENTRY_S 62 +#define ZXDH_CQPSQ_MLM_FREEENTRY BIT_ULL(62) +#define ZXDH_CQPSQ_MLM_IGNORE_REF_CNT_S 61 +#define ZXDH_CQPSQ_MLM_IGNORE_REF_CNT BIT_ULL(61) +#define ZXDH_CQPSQ_MLM_MAC0_S 0 +#define ZXDH_CQPSQ_MLM_MAC0 GENMASK_ULL(7, 0) +#define ZXDH_CQPSQ_MLM_MAC1_S 8 +#define ZXDH_CQPSQ_MLM_MAC1 GENMASK_ULL(15, 8) +#define ZXDH_CQPSQ_MLM_MAC2_S 16 +#define ZXDH_CQPSQ_MLM_MAC2 GENMASK_ULL(23, 16) +#define ZXDH_CQPSQ_MLM_MAC3_S 24 +#define ZXDH_CQPSQ_MLM_MAC3 GENMASK_ULL(31, 24) +#define ZXDH_CQPSQ_MLM_MAC4_S 32 +#define ZXDH_CQPSQ_MLM_MAC4 GENMASK_ULL(39, 32) +#define ZXDH_CQPSQ_MLM_MAC5_S 40 +#define ZXDH_CQPSQ_MLM_MAC5 GENMASK_ULL(47, 40) +#define ZXDH_CQPSQ_MAT_REACHMAX_S 0 +#define ZXDH_CQPSQ_MAT_REACHMAX GENMASK_ULL(31, 0) +#define ZXDH_CQPSQ_MAT_MACADDR_S 0 +#define ZXDH_CQPSQ_MAT_MACADDR GENMASK_ULL(47, 0) +#define ZXDH_CQPSQ_MAT_ARPENTRYIDX_S 0 +#define ZXDH_CQPSQ_MAT_ARPENTRYIDX GENMASK_ULL(11, 0) +#define ZXDH_CQPSQ_MAT_ENTRYVALID_S 42 +#define ZXDH_CQPSQ_MAT_ENTRYVALID BIT_ULL(42) +#define ZXDH_CQPSQ_MAT_PERMANENT_S 43 +#define ZXDH_CQPSQ_MAT_PERMANENT BIT_ULL(43) +#define ZXDH_CQPSQ_MAT_QUERY_S 44 +#define ZXDH_CQPSQ_MAT_QUERY BIT_ULL(44) +#define ZXDH_CQPSQ_MVPBP_PD_ENTRY_CNT_S 0 +#define ZXDH_CQPSQ_MVPBP_PD_ENTRY_CNT GENMASK_ULL(9, 0) +#define ZXDH_CQPSQ_MVPBP_FIRST_PD_INX_S 16 +#define ZXDH_CQPSQ_MVPBP_FIRST_PD_INX GENMASK_ULL(24, 16) +#define ZXDH_CQPSQ_MVPBP_SD_INX_S 32 +#define ZXDH_CQPSQ_MVPBP_SD_INX GENMASK_ULL(43, 32) +#define ZXDH_CQPSQ_MVPBP_INV_PD_ENT_S 62 +#define ZXDH_CQPSQ_MVPBP_INV_PD_ENT BIT_ULL(62) +#define ZXDH_CQPSQ_MVPBP_PD_PLPBA_S 3 +#define ZXDH_CQPSQ_MVPBP_PD_PLPBA GENMASK_ULL(63, 3) + +/* Manage Push Page - MPP */ +#define ZXDH_INVALID_PUSH_PAGE_INDEX_GEN_1 0xffff +#define ZXDH_INVALID_PUSH_PAGE_INDEX 0xffffffff + +#define ZXDH_CQPSQ_MPP_QS_HANDLE_S 0 +#define ZXDH_CQPSQ_MPP_QS_HANDLE GENMASK_ULL(9, 0) +#define ZXDH_CQPSQ_MPP_PPIDX_S 0 +#define ZXDH_CQPSQ_MPP_PPIDX GENMASK_ULL(9, 0) +#define ZXDH_CQPSQ_MPP_PPTYPE_S 60 +#define ZXDH_CQPSQ_MPP_PPTYPE GENMASK_ULL(61, 60) + +#define ZXDH_CQPSQ_MPP_FREE_PAGE_S 62 +#define ZXDH_CQPSQ_MPP_FREE_PAGE BIT_ULL(62) + +/* Upload Context - UCTX */ +#define ZXDH_CQPSQ_UCTX_QPCTXADDR_S ZXDH_CQPHC_QPCTX_S +#define ZXDH_CQPSQ_UCTX_QPCTXADDR ZXDH_CQPHC_QPCTX +#define ZXDH_CQPSQ_UCTX_QPID_S 0 +#define ZXDH_CQPSQ_UCTX_QPID GENMASK_ULL(23, 0) +#define ZXDH_CQPSQ_UCTX_QPTYPE_S 48 +#define ZXDH_CQPSQ_UCTX_QPTYPE GENMASK_ULL(51, 48) + +#define ZXDH_CQPSQ_UCTX_RAWFORMAT_S 61 +#define ZXDH_CQPSQ_UCTX_RAWFORMAT BIT_ULL(61) +#define ZXDH_CQPSQ_UCTX_FREEZEQP_S 62 +#define ZXDH_CQPSQ_UCTX_FREEZEQP BIT_ULL(62) + +#define ZXDH_CQPSQ_MHMC_VFIDX_S 0 +#define ZXDH_CQPSQ_MHMC_VFIDX GENMASK_ULL(15, 0) +#define ZXDH_CQPSQ_MHMC_FREEPMFN_S 62 +#define ZXDH_CQPSQ_MHMC_FREEPMFN BIT_ULL(62) + +#define ZXDH_CQPSQ_SHMCRP_HMC_PROFILE_S 0 +#define ZXDH_CQPSQ_SHMCRP_HMC_PROFILE GENMASK_ULL(2, 0) +#define ZXDH_CQPSQ_SHMCRP_VFNUM_S 32 +#define ZXDH_CQPSQ_SHMCRP_VFNUM GENMASK_ULL(37, 32) +#define ZXDH_CQPSQ_CEQ_CEQSIZE_S 0 +#define ZXDH_CQPSQ_CEQ_CEQSIZE GENMASK_ULL(21, 0) +#define ZXDH_CQPSQ_CEQ_CEQID_S 0 +#define ZXDH_CQPSQ_CEQ_CEQID GENMASK_ULL(11, 0) + +#define ZXDH_CQPSQ_CEQ_LPBLSIZE_S ZXDH_CQPSQ_CQ_LPBLSIZE_S +#define ZXDH_CQPSQ_CEQ_LPBLSIZE_M ZXDH_CQPSQ_CQ_LPBLSIZE_M +#define ZXDH_CQPSQ_CEQ_LPBLSIZE ZXDH_CQPSQ_CQ_LPBLSIZE +#define ZXDH_CQPSQ_CEQ_VMAP_S 47 +#define ZXDH_CQPSQ_CEQ_VMAP BIT_ULL(47) +#define ZXDH_CQPSQ_CEQ_ITRNOEXPIRE_S 46 +#define ZXDH_CQPSQ_CEQ_ITRNOEXPIRE BIT_ULL(46) +#define ZXDH_CQPSQ_CEQ_FIRSTPMPBLIDX_S 0 +#define ZXDH_CQPSQ_CEQ_FIRSTPMPBLIDX GENMASK_ULL(27, 0) + +/*CEQC format*/ +#define ZXDH_CEQC_PERIOD_L_S 0 +#define ZXDH_CEQC_PERIOD_L GENMASK_ULL(2, 0) +#define ZXDH_CEQC_VHCA_S 3 +#define ZXDH_CEQC_VHCA GENMASK_ULL(12, 3) +#define ZXDH_CEQC_INTR_IDX_S 13 +#define ZXDH_CEQC_INTR_IDX GENMASK_ULL(30, 13) +#define ZXDH_CEQC_INT_TYPE_S 31 +#define ZXDH_CEQC_INT_TYPE BIT_ULL(31) +#define ZXDH_CEQC_CEQ_HEAD_S 32 +#define ZXDH_CEQC_CEQ_HEAD GENMASK_ULL(52, 32) +#define ZXDH_CEQC_CEQE_VALID_S 53 +#define ZXDH_CEQC_CEQE_VALID BIT_ULL(53) +#define ZXDH_CEQC_LEAF_PBL_SIZE_S 54 +#define ZXDH_CEQC_LEAF_PBL_SIZE GENMASK_ULL(55, 54) +#define ZXDH_CEQC_CEQ_SIZE_S 56 +#define ZXDH_CEQC_CEQ_SIZE GENMASK_ULL(57, 56) +#define ZXDH_CEQC_LOG_CEQ_NUM_S 58 +#define ZXDH_CEQC_LOG_CEQ_NUM GENMASK_ULL(62, 58) +#define ZXDH_CEQC_CEQ_STATE_S 63 +#define ZXDH_CEQC_CEQ_STATE BIT_ULL(63) + +#define ZXDH_CEQC_CEQ_ADDRESS_S 0 +#define ZXDH_CEQC_CEQ_ADDRESS GENMASK_ULL(56, 0) +#define ZXDH_CEQC_PERIOD_H_S 57 +#define ZXDH_CEQC_PERIOD_H GENMASK_ULL(63, 57) + +#define ZXDH_CEQC_CEQ_MAX_CNT_S 0 +#define ZXDH_CEQC_CEQ_MAX_CNT GENMASK_ULL(15, 0) +#define ZXDH_CEQC_CEQ_AXI_RSP_ERR_FLAG_S 16 +#define ZXDH_CEQC_CEQ_AXI_RSP_ERR_FLAG BIT_ULL(16) + +#define ZXDH_CEQ_CEQE_AXI_INFO_INDICATE_ID_S 2 +#define ZXDH_CEQ_CEQE_AXI_INFO_INDICATE_ID GENMASK_ULL(3, 2) +#define ZXDH_CEQ_CEQE_AXI_INFO_AXI_ID_S 4 +#define ZXDH_CEQ_CEQE_AXI_INFO_AXI_ID GENMASK_ULL(6, 4) + +#define ZXDH_CEQ_PBLE_AXI_INFO_CACHE_ID_S 0 +#define ZXDH_CEQ_PBLE_AXI_INFO_CACHE_ID GENMASK_ULL(1, 0) +#define ZXDH_CEQ_PBLE_AXI_INFO_AXI_ID_S 4 +#define ZXDH_CEQ_PBLEE_AXI_INFO_AXI_ID GENMASK_ULL(6, 4) + +#define ZXDH_CQPSQ_AEQ_AEQECNT_S 0 +#define ZXDH_CQPSQ_AEQ_AEQECNT GENMASK_ULL(18, 0) + +#define ZXDH_CQPSQ_AEQ_LPBLSIZE_S ZXDH_CQPSQ_CQ_LPBLSIZE_S +#define ZXDH_CQPSQ_AEQ_LPBLSIZE_M ZXDH_CQPSQ_CQ_LPBLSIZE_M +#define ZXDH_CQPSQ_AEQ_LPBLSIZE ZXDH_CQPSQ_CQ_LPBLSIZE +#define ZXDH_CQPSQ_AEQ_VMAP_S 47 +#define ZXDH_CQPSQ_AEQ_VMAP BIT_ULL(47) +#define ZXDH_CQPSQ_AEQ_FIRSTPMPBLIDX_S 0 +#define ZXDH_CQPSQ_AEQ_FIRSTPMPBLIDX GENMASK_ULL(27, 0) + +#define ZXDH_CEQ_ARM_VHCA_ID_S 0 +#define ZXDH_CEQ_ARM_VHCA_ID GENMASK_ULL(9, 0) +#define ZXDH_CEQ_ARM_CEQ_ID_S 10 +#define ZXDH_CEQ_ARM_CEQ_ID GENMASK_ULL(21, 10) + +#define ZXDH_CEQ_INT_PCIE_DBI_EN_S 0 +#define ZXDH_CEQ_INT_PCIE_DBI_EN BIT_ULL(0) +#define ZXDH_CEQ_INT_EP_ID_S 1 +#define ZXDH_CEQ_INT_EP_ID GENMASK_ULL(3, 1) +#define ZXDH_CEQ_INT_PF_NUM_S 4 +#define ZXDH_CEQ_INT_PF_NUM GENMASK_ULL(8, 4) +#define ZXDH_CEQ_INT_VF_NUM_S 9 +#define ZXDH_CEQ_INT_VF_NUM GENMASK_ULL(16, 9) +#define ZXDH_CEQ_INT_VF_ACTIVE_S 17 +#define ZXDH_CEQ_INT_VF_ACTIVE BIT_ULL(17) + +/*AEQC format*/ +#define ZXDH_AEQC_INTR_IDX_S 0 +#define ZXDH_AEQC_INTR_IDX GENMASK_ULL(11, 0) +#define ZXDH_AEQC_AEQ_HEAD_S 13 +#define ZXDH_AEQC_AEQ_HEAD GENMASK_ULL(34, 13) +#define ZXDH_AEQC_LEAF_PBL_SIZE_S 35 +#define ZXDH_AEQC_LEAF_PBL_SIZE GENMASK_ULL(36, 35) +#define ZXDH_AEQC_VIRTUALLY_MAPPED_S 37 +#define ZXDH_AEQC_VIRTUALLY_MAPPED BIT_ULL(37) +#define ZXDH_AEQC_AEQ_SIZE_S 38 +#define ZXDH_AEQC_AEQ_SIZE GENMASK_ULL(59, 38) +#define ZXDH_AEQC_AEQ_STATE_S 60 +#define ZXDH_AEQC_AEQ_STATE GENMASK_ULL(63, 60) +#define ZXDH_AEQC_AEQ_ADDRESS_S 0 +#define ZXDH_AEQC_AEQ_ADDRESS GENMASK_ULL(63, 0) + +#define ZXDH_AEQ_MSIX_DATA_VECTOR_S 0 +#define ZXDH_AEQ_MSIX_DATA_VECTOR GENMASK_ULL(10, 0) +#define ZXDH_AEQ_MSIX_DATA_TC_S 12 +#define ZXDH_AEQ_MSIX_DATA_TC GENMASK_ULL(14, 12) +#define ZXDH_AEQ_MSIX_DATA_VF_ACTIVE_S 15 +#define ZXDH_AEQ_MSIX_DATA_VF_ACTIVE BIT_ULL(15) +#define ZXDH_AEQ_MSIX_DATA_VF_ID_S 16 +#define ZXDH_AEQ_MSIX_DATA_VF_ID GENMASK_ULL(23, 16) +#define ZXDH_AEQ_MSIX_DATA_PF_ID_S 24 +#define ZXDH_AEQ_MSIX_DATA_PF_ID GENMASK_ULL(28, 24) + +#define ZXDH_AEQ_MSIX_CONFIG_IRQ_S 0 +#define ZXDH_AEQ_MSIX_CONFIG_IRQ GENMASK_ULL(2, 0) +#define ZXDH_AEQ_MSIX_CONFIG_EPID_S 3 +#define ZXDH_AEQ_MSIX_CONFIG_EPID GENMASK_ULL(7, 3) + +#define ZXDH_AEQ_CACHE_ID_S 0 +#define ZXDH_AEQ_CACHE_ID GENMASK_ULL(1, 0) +#define ZXDH_AEQ_AXI_ID_S 2 +#define ZXDH_AEQ_AXI_ID GENMASK_ULL(4, 2) +#define ZXDH_AEQ_WAY_PARTITION_S 5 +#define ZXDH_AEQ_WAY_PATITION GENMASK_ULL(7, 5) + +#define ZXDH_AEQ_INDICIATE_ID_S 0 +#define ZXDH_AEQ_INDICIATE_ID GENMASK_ULL(1, 0) + +#define ZXDH_COMMIT_FPM_QPCNT_S 0 +#define ZXDH_COMMIT_FPM_QPCNT GENMASK_ULL(18, 0) + +#define ZXDH_COMMIT_FPM_BASE_S 32 +#define ZXDH_CQPSQ_CFPM_HMCFNID_S 0 +#define ZXDH_CQPSQ_CFPM_HMCFNID GENMASK_ULL(5, 0) + +#define ZXDH_CQPSQ_FWQE_AECODE_S 0 +#define ZXDH_CQPSQ_FWQE_AECODE GENMASK_ULL(15, 0) +#define ZXDH_CQPSQ_FWQE_AESOURCE_S 32 +#define ZXDH_CQPSQ_FWQE_AESOURCE GENMASK_ULL(35, 32) +#define ZXDH_CQPSQ_FWQE_RQMNERR_S 0 +#define ZXDH_CQPSQ_FWQE_RQMNERR GENMASK_ULL(15, 0) +#define ZXDH_CQPSQ_FWQE_RQMJERR_S 16 +#define ZXDH_CQPSQ_FWQE_RQMJERR GENMASK_ULL(31, 16) +#define ZXDH_CQPSQ_FWQE_SQMNERR_S 32 +#define ZXDH_CQPSQ_FWQE_SQMNERR GENMASK_ULL(47, 32) +#define ZXDH_CQPSQ_FWQE_SQMJERR_S 48 +#define ZXDH_CQPSQ_FWQE_SQMJERR GENMASK_ULL(63, 48) +#define ZXDH_CQPSQ_FWQE_QPID_S 0 +#define ZXDH_CQPSQ_FWQE_QPID GENMASK_ULL(23, 0) +#define ZXDH_CQPSQ_FWQE_GENERATE_AE_S 53 +#define ZXDH_CQPSQ_FWQE_GENERATE_AE BIT_ULL(53) +#define ZXDH_CQPSQ_FWQE_USERFLCODE_S 54 +#define ZXDH_CQPSQ_FWQE_USERFLCODE BIT_ULL(54) +#define ZXDH_CQPSQ_FWQE_FLUSHSQ_S 55 +#define ZXDH_CQPSQ_FWQE_FLUSHSQ BIT_ULL(55) +#define ZXDH_CQPSQ_FWQE_FLUSHRQ_S 56 +#define ZXDH_CQPSQ_FWQE_FLUSHRQ BIT_ULL(56) +#define ZXDH_CQPSQ_MAPT_PORT_S 0 +#define ZXDH_CQPSQ_MAPT_PORT GENMASK_ULL(15, 0) +#define ZXDH_CQPSQ_MAPT_ADDPORT_S 62 +#define ZXDH_CQPSQ_MAPT_ADDPORT BIT_ULL(62) +#define ZXDH_CQPSQ_UPESD_SDCMD_S 0 +#define ZXDH_CQPSQ_UPESD_SDCMD GENMASK_ULL(31, 0) +#define ZXDH_CQPSQ_UPESD_SDDATALOW_S 0 +#define ZXDH_CQPSQ_UPESD_SDDATALOW GENMASK_ULL(31, 0) +#define ZXDH_CQPSQ_UPESD_SDDATAHI_S 32 +#define ZXDH_CQPSQ_UPESD_SDDATAHI GENMASK_ULL(63, 32) +#define ZXDH_CQPSQ_UPESD_HMCFNID_S 0 +#define ZXDH_CQPSQ_UPESD_HMCFNID GENMASK_ULL(5, 0) +#define ZXDH_CQPSQ_UPESD_ENTRY_VALID_S 63 +#define ZXDH_CQPSQ_UPESD_ENTRY_VALID BIT_ULL(63) + +#define ZXDH_CQPSQ_UPESD_BM_PF 0 +#define ZXDH_CQPSQ_UPESD_BM_CP_LM 1 +#define ZXDH_CQPSQ_UPESD_BM_AXF 2 +#define ZXDH_CQPSQ_UPESD_BM_LM 4 +#define ZXDH_CQPSQ_UPESD_BM_S 32 +#define ZXDH_CQPSQ_UPESD_BM GENMASK_ULL(34, 32) +#define ZXDH_CQPSQ_UPESD_ENTRY_COUNT_S 0 +#define ZXDH_CQPSQ_UPESD_ENTRY_COUNT GENMASK_ULL(3, 0) +#define ZXDH_CQPSQ_UPESD_SKIP_ENTRY_S 7 +#define ZXDH_CQPSQ_UPESD_SKIP_ENTRY BIT_ULL(7) + +/* Suspend QP */ +#define ZXDH_CQPSQ_SUSPENDQP_QPID_S 0 +#define ZXDH_CQPSQ_SUSPENDQP_QPID GENMASK_ULL(23, 0) +#define ZXDH_CQPSQ_RESUMEQP_QSHANDLE_S 0 +#define ZXDH_CQPSQ_RESUMEQP_QSHANDLE GENMASK_ULL(31, 0) + +/* Query hw Context OP */ +#define ZXDH_CQPSQ_QUERY_QPC_ID_S 0 +#define ZXDH_CQPSQ_QUERY_QPC_ID GENMASK_ULL(19, 0) +#define ZXDH_CQPSQ_QUERY_CQC_ID_S 0 +#define ZXDH_CQPSQ_QUERY_CQC_ID GENMASK_ULL(20, 0) +#define ZXDH_CQPSQ_QUERY_CEQC_ID_S 0 +#define ZXDH_CQPSQ_QUERY_CEQC_ID GENMASK_ULL(10, 0) +#define ZXDH_CQPSQ_QUERY_AEQC_ID_S 0 +#define ZXDH_CQPSQ_QUERY_AEQC_ID GENMASK_ULL(9, 0) +#define ZXDH_CQPSQ_QUERY_SRQC_ID_S 0 +#define ZXDH_CQPSQ_QUERY_SRQC_ID GENMASK_ULL(19, 0) + +#define ZXDH_CQPSQ_RESUMEQP_QPID_S ZXDH_CQPSQ_SUSPENDQP_QPID_S +#define ZXDH_CQPSQ_RESUMEQP_QPID_M ZXDH_CQPSQ_SUSPENDQP_QPID_M +#define ZXDH_CQPSQ_RESUMEQP_QPID ZXDH_CQPSQ_SUSPENDQP_QPID + +#define ZXDH_CQPSQ_MIN_STAG_INVALID 0x0001 +#define ZXDH_CQPSQ_MIN_SUSPEND_PND 0x0005 + +#define ZXDH_CQPSQ_MAJ_NO_ERROR 0x0000 +#define ZXDH_CQPSQ_MAJ_OBJCACHE_ERROR 0xF000 +#define ZXDH_CQPSQ_MAJ_CNTXTCACHE_ERROR 0xF001 +#define ZXDH_CQPSQ_MAJ_ERROR 0xFFFF + +//NVME OF IOQ SQ +#define NOF_IOQ_SQ_WQE_SIZE 32 +#define NOF_IOQ_SQ_SIZE 512 +#define NOF_IOQ_SQ_LOG_SIZE 9 + +//CQC FIELD MASK +#define RDMACPC_LEAF_PBL_SIZE GENMASK_ULL(56, 55) +#define RDMACPC_DOORBELL_SHADOW_ADDR GENMASK_ULL(57, 0) +#define RDMACPC_LOG_CQE_NUM GENMASK_ULL(28, 24) +#define RDMACPC_HW_CQ_HEAD GENMASK_ULL(63, 41) +#define RDMACPC_CQ_ADDRESS GENMASK_ULL(55, 0) +#define RDMACPC_ROOT_PBLE GENMASK_ULL(51, 0) + +//CQC TABLE INDEX +#define ZXDH_CQC_LEAF_PBLE_SIZE_BYTE_OFFSET 0x00 +#define ZXDH_CQC_DOORBELL_SHADOW_ADDR_BYTE_OFFSET 0x08 +#define ZXDH_CQC_LOG_CQE_NUM 0x10 +#define ZXDH_CQC_HW_CQ_HEAD_BYTE_OFFSET 0x18 +#define ZXDH_CQC_CQ_ADDRESS_BYTE_OFFSET 0x20 +#define ZXDH_CQC_HW_CQ_ROOT_PBLE_BYTE_OFFSET 0x38 + +//CEQC ABLE INDEX +#define ZXDH_CEQC_LEAF_PBL_SIZE_OFFSET 0x00 +#define ZXDH_CEQC_CEQ_ADDRESS_OFFSET 0x08 + +//CEQC +#define RDMACEQC_LOG_CEQ_NUM GENMASK_ULL(62, 58) +#define RDMACEQC_LEAF_PBL_SIZE GENMASK_ULL(55, 54) +#define RDMACEQC_CEQE_HEAD GENMASK_ULL(52, 32) +#define RDMACEQC_CEQ_ADDRESS GENMASK_ULL(56, 0) + +//AEQC TABLE INDEX +#define ZXDH_AEQC_AEQ_HEAD_OFFSET 0x00 +#define ZXDH_AEQC_AEQ_ADDRESS_OFFSET 0x08 + +//QPC_FIELD_MASK +#define RDMAQPC_MASK_INIT 0xFFFFFFFFFFFFFFFFUL +#define RDMAQPC_MASK_RESET 0xFFFFFFFFFFFFFFFFUL +#define RDMAQPC_TX_MASKL_DESTROY 0x146800UL +#define RDMAQPC_RX_MASKL_DESTROY 0x81C0000000UL + +#define RDMAQPC_TX_MASKL_RETRY_CNT (0x1UL << 1) +#define RDMAQPC_TX_MASKL_CUR_RETRY_CNT (0x1UL << 2) +#define RDMAQPC_TX_MASKL_READ_RETRY_FLAG (0x1UL << 3) +#define RDMAQPC_TX_MASKL_LAST_ACK_PSN (0x1UL << 4) +#define RDMAQPC_TX_MASKL_LSN (0x1UL << 6) +#define RDMAQPC_TX_MASKL_ACK_CREDITS (0x1UL << 7) +#define RDMAQPC_TX_MASKL_RNR_RETRY_FLAG (0x1UL << 8) +#define RDMAQPC_TX_MASKL_RNR_RETRY_THRESHOLD (0x1UL << 9) +#define RDMAQPC_TX_MASKL_RNR_RETRY_TIME (0x1UL << 10) +#define RDMAQPC_TX_MASKL_PSN_MAX (0x1UL << 16) +#define RDMAQPC_TX_MASKL_PSN_NXT (0x1UL << 17) +#define RDMAQPC_TX_MASKL_LOCAL_ACK_TIMEOUT (0x1UL << 21) +#define RDMAQPC_TX_MASKL_RETRY_FLAG (0x1UL << 22) +#define RDMAQPC_TX_MASKL_HW_SQ_TAIL_UNA (0x1UL << 23) +#define RDMAQPC_TX_MASKL_LAST_ACK_WQE_OFFSET (0x1UL << 24) +#define RDMAQPC_TX_MASKL_SQ_VMAP (0x1UL << 26) +#define RDMAQPC_TX_MASKL_SQ_LPBL_SIZE (0x1UL << 27) +#define RDMAQPC_TX_MASKL_IPV4 (0x1UL << 29) +#define RDMAQPC_TX_MASKL_INSERT_VLANTAG (0x1UL << 32) +#define RDMAQPC_TX_MASKL_VLANTAG (0x1UL << 33) +#define RDMAQPC_TX_MASKL_PD_ID (0x1UL << 34) +#define RDMAQPC_TX_MASKL_SQ_PA (0x1UL << 38) +#define RDMAQPC_TX_MASKL_DEST_IP_LOW (0x1UL << 39) +#define RDMAQPC_TX_MASKL_DEST_IP_HIGH (0x1UL << 40) +#define RDMAQPC_TX_MASKL_SRC_PORT (0x1UL << 41) +#define RDMAQPC_TX_MASKL_FLOWLABLE (0x1UL << 43) +#define RDMAQPC_TX_MASKL_TTL (0x1UL << 44) +#define RDMAQPC_TX_MASKL_QKEY (0x1UL << 46) +#define RDMAQPC_TX_MASKL_DEST_QPN (0x1UL << 47) +#define RDMAQPC_TX_MASKL_ORD_SIZE (0x1UL << 48) +#define RDMAQPC_TX_MASKL_PKEY (0x1UL << 49) +#define RDMAQPC_TX_MASKL_DEST_MAC (0x1UL << 50) +#define RDMAQPC_TX_MASKL_LOCAL_IP_LOW (0x1UL << 52) +#define RDMAQPC_TX_MASKL_LOCAL_IP_HIGH (0x1UL << 53) +#define RDMAQPC_TX_MASKL_PMTU (0x1UL << 55) +#define RDMAQPC_TX_MASKL_ACK_TIMEOUT (0x1UL << 56) +#define RDMAQPC_TX_MASKL_LOG_SQ_SIZE (0x1UL << 57) +#define RDMAQPC_TX_MASKL_NVMEOF_QID (0x1UL << 59) +#define RDMAQPC_TX_MASKL_NVMEOF_TGT (0x1UL << 60) +#define RDMAQPC_TX_MASKL_NVMEOF_IOQ (0x1UL << 61) +#define RDMAQPC_TX_MASKL_GQP_ID (0x1ULL << 62) + +#define RDMAQPC_TX_MASKH_QUEUE_TC (0x1UL << 0) +#define RDMAQPC_TX_MASKH_TOS (0x1UL << 5) +#define RDMAQPC_TX_MASKH_WS_IDX (0x1UL << 7) +#define RDMAQPC_TX_MASKH_QP_STATE (0x1UL << 8) +#define RDMAQPC_TX_MASKH_RNR_RETRY_CNT (0x1UL << 26) +#define RDMAQPC_TX_MASKH_RNR_CUR_RETRY_CNT (0x1UL << 27) + +#define RDMAQPC_TX_MASKH_ERR_FLAG (0x1UL << 13) +#define RDMAQPC_TX_MASKH_ACK_ERR_FLAG (0x1UL << 14) +#define RDMAQPC_TX_MASKH_RDWQE_PYLD_LENGTH (0x1UL << 16) + +#define RDMAQPC_TX_MASKH_RD_MSG_LOSS_ERR_FLAG (0x1UL << 19) +#define RDMAQPC_TX_MASKH_PKTCHK_RD_MSG_LOSS_ERR_CNT (0x1UL << 20) +#define RDMAQPC_TX_MASKH_RECV_RD_MSG_LOSS_ERR_CNT (0x1UL << 21) +#define RDMAQPC_TX_MASKH_RECV_RD_MSG_LOSS_ERR_FLAG (0x1UL << 22) +#define RDMAQPC_TX_MASKH_RECV_ERR_FLAG (0x1UL << 23) +#define RDMAQPC_TX_MASKH_RECV_READ_FLAG (0x1UL << 24) +#define RDMAQPC_TX_MASKH_RETRY_CQE_SQ_OPCODE (0x1UL << 25) +#define RDMAQPC_TX_MASKH_PACKAGE_ERR_FLAG (0x1UL << 17) + +#define RDMAQPC_RX_MASKL_QKEY (0x1UL << 3) +#define RDMAQPC_RX_MASKL_EPSN (0x1UL << 6) +#define RDMAQPC_RX_MASKL_ACK_CREDITS (0x1UL << 7) +#define RDMAQPC_RX_MASKL_RNR_TIMER (0x1UL << 22) +#define RDMAQPC_RX_MASKL_LOCAL_IP (0x1UL << 30) +#define RDMAQPC_RX_MASKL_DEST_MAC (0x1ULL << 32) +#define RDMAQPC_RX_MASKL_NVMEOF_IOQ (0x1ULL << 33) +#define RDMAQPC_RX_MASKL_INSERT_VLANTAG (0x1ULL << 34) +#define RDMAQPC_RX_MASKL_PMTU (0x1ULL << 35) +#define RDMAQPC_RX_MASKL_IPV4 (0x1ULL << 37) +#define RDMAQPC_RX_MASKL_PD_ID (0x1ULL << 38) +#define RDMAQPC_RX_MASKL_QP_STATE (0x1ULL << 39) +#define RDMAQPC_RX_MASKL_DEST_QPN (0x1ULL << 40) +#define RDMAQPC_RX_MASKL_FLOWLABLE (0x1ULL << 41) +#define RDMAQPC_RX_MASKL_TTL (0x1ULL << 42) +#define RDMAQPC_RX_MASKL_TOS (0x1ULL << 43) +#define RDMAQPC_RX_MASKL_VLANTAG (0x1ULL << 44) +#define RDMAQPC_RX_MASKL_NVMEOF_QID (0x1ULL << 45) +#define RDMAQPC_RX_MASKL_NVMEOF_TGT (0x1ULL << 45) +#define RDMAQPC_RX_MASKL_HDR_LEN (0x1ULL << 47) +#define RDMAQPC_RX_MASKL_PKEY (0x1ULL << 48) +#define RDMAQPC_RX_MASKL_SRC_PORT (0x1ULL << 49) +#define RDMAQPC_RX_MASKL_IRD_SIZE (0x1ULL << 52) +#define RDMAQPC_RX_MASKL_WRITE_EN (0x1ULL << 55) +#define RDMAQPC_RX_MASKL_READ_EN (0x1ULL << 56) +#define RDMAQPC_RX_MASKL_GQP_ID (0x1ULL << 62) +#define RDMAQPC_RX_MASKL_WS_IDX (0x1ULL << 63) + +#define RDMAQPC_RX_MASKH_DEST_IP (0x1UL << 4) +#define RDMAQPC_RX_MASKH_QUEUE_TC (0x1UL << 10) + +//QPC_TX stucture of DPU +#define RDMAQPC_TX_RETRY_CNT_S 8 +#define RDMAQPC_TX_RETRY_CNT GENMASK_ULL(10, 8) +#define RDMAQPC_TX_CUR_RETRY_CNT_S 11 +#define RDMAQPC_TX_CUR_RETRY_CNT GENMASK_ULL(13, 11) +#define RDMAQPC_TX_LAST_ACK_PSN_S 15 +#define RDMAQPC_TX_LAST_ACK_PSN GENMASK_ULL(38, 15) +#define RDMAQPC_TX_LSN_LOW1_S 63 +#define RDMAQPC_TX_LSN_LOW1 BIT_ULL(63) + +#define RDMAQPC_TX_LSN_HIGH23_S 0 +#define RDMAQPC_TX_LSN_HIGH23 GENMASK_ULL(22, 0) +#define RDMAQPC_TX_ACKCREDITS_S 23 +#define RDMAQPC_TX_ACKCREDITS GENMASK_ULL(27, 23) +#define RDMAQPC_TX_RNR_RETRY_THRESHOLD_S 29 +#define RDMAQPC_TX_RNR_RETRY_THRESHOLD GENMASK_ULL(33, 29) + +#define RDMAQPC_TX_SSN_S 44 +#define RDMAQPC_TX_SSN GENMASK_ULL(63, 44) + +#define RDMAQPC_TX_PSN_MAX_S 5 +#define RDMAQPC_TX_PSN_MAX GENMASK_ULL(28, 5) +#define RDMAQPC_TX_PSN_NEXT_S 29 +#define RDMAQPC_TX_PSN_NEXT GENMASK_ULL(52, 29) + +#define RDMAQPC_TX_HW_SQ_TAIL_HIGH_S 0 +#define RDMAQPC_TX_HW_SQ_TAIL_HIGH GENMASK_ULL(6, 0) +#define RDMAQPC_TX_LOCAL_ACK_TIMEOUT_S 57 +#define RDMAQPC_TX_LOCAL_ACK_TIMEOUT GENMASK_ULL(61, 57) + +#define RDMAQPC_TX_RNR_RETRY_CNT_S 6 +#define RDMAQPC_TX_RNR_RETRY_CNT GENMASK_ULL(8, 6) +#define RDMAQPC_TX_RNR_CUR_RETRY_CNT_S 9 +#define RDMAQPC_TX_RNR_CUR_RETRY_CNT GENMASK_ULL(11, 9) + +#define RDMAQPC_TX_RETRY_FLAG BIT_ULL(62) +#define RDMAQPC_TX_RNR_RETRY_FLAG BIT_ULL(28) +#define RDMAQPC_TX_READ_RETRY_FLAG BIT_ULL(14) +#define RDMAQPC_TX_RETRY_CQE_SQ_OPCODE_FLAG GENMASK_ULL(5, 0) +#define RDMAQPC_TX_ERR_FLAG BIT_ULL(47) +#define RDMAQPC_TX_ACK_ERR_FLAG BIT_ULL(48) +#define RDMAQPC_TX_PACKAGE_ERR_FLAG BIT_ULL(27) +#define RDMAQPC_TX_RECV_ERR_FLAG GENMASK_ULL(47, 46) +#define RDMAQPC_TX_WIN_RADDR GENMASK_ULL(38, 30) +#define RDMAQPC_TX_RNR_RETRY_TIME_L GENMASK_ULL(63, 34) +#define RDMAQPC_TX_RNR_RETRY_TIME_H GENMASK(1, 0) +#define RDMAQPC_TX_LAST_ACK_WQE_OFFSET GENMASK_ULL(46, 16) +#define RDMAQPC_TX_HW_SQ_TAIL_UNA GENMASK_ULL(15, 0) +#define RDMAQPC_TX_RDWQE_PYLD_LENGTH_H GENMASK_ULL(26, 0) +#define RDMAQPC_TX_RDWQE_PYLD_LENGTH_L GENMASK_ULL(63, 59) +#define RDMAQPC_TX_RECV_READ_FLAG BIT_ULL(48) +#define RDMAQPC_TX_RECV_ERR_FLAG GENMASK_ULL(47, 46) +#define RDMAQPC_TX_RECV_RD_MSG_LOSS_ERR_FLAG BIT_ULL(45) +#define RDMAQPC_TX_RECV_RD_MSG_LOSS_ERR_CNT GENMASK_ULL(44, 43) +#define RDMAQPC_TX_RD_MSG_LOSS_ERR_FLAG BIT_ULL(40) +#define RDMAQPC_TX_PKTCHK_RD_MSG_LOSS_ERR_CNT GENMASK_ULL(42, 41) + +#define RDMAQPC_TX_HW_SQ_TAIL_UNA_S 0 +#define RDMAQPC_TX_HW_SQ_TAIL_UNA GENMASK_ULL(15, 0) + +#define RDMAQPC_TX_SERVICE_TYPE_S 0 +#define RDMAQPC_TX_SERVICE_TYPE GENMASK_ULL(2, 0) +#define RDMAQPC_TX_SQ_VMAP_S 3 +#define RDMAQPC_TX_SQ_VMAP BIT_ULL(3) +#define RDMAQPC_TX_SQ_LPBL_SIZE_S 4 +#define RDMAQPC_TX_SQ_LPBL_SIZE GENMASK_ULL(5, 4) +#define RDMAQPC_TX_IS_QP1_S 6 +#define RDMAQPC_TX_IS_QP1 BIT_ULL(6) +#define RDMAQPC_TX_IPV4_S 7 +#define RDMAQPC_TX_IPV4 BIT_ULL(7) +#define RDMAQPC_TX_FAST_REG_EN_S 8 +#define RDMAQPC_TX_FAST_REG_EN BIT_ULL(8) +#define RDMAQPC_TX_BIND_EN_S 9 +#define RDMAQPC_TX_BIND_EN BIT_ULL(9) +#define RDMAQPC_TX_INSERT_VLANTAG_S 10 +#define RDMAQPC_TX_INSERT_VLANTAG BIT_ULL(10) +#define RDMAQPC_TX_VLANTAG_S 11 +#define RDMAQPC_TX_VLANTAG GENMASK_ULL(26, 11) +#define RDMAQPC_TX_PD_INDEX_S 27 +#define RDMAQPC_TX_PD_INDEX GENMASK_ULL(50, 27) +#define RDMAQPC_TX_RSV_LKEY_EN_S 51 +#define RDMAQPC_TX_RSV_LKEY_EN BIT_ULL(51) +#define RDMAQPC_TX_ECN_EN_S 63 +#define RDMAQPC_TX_ECN_EN BIT_ULL(63) + +#define RDMAQPC_TX_DEST_IPADDR3_S 0 +#define RDMAQPC_TX_DEST_IPADDR3 GENMASK_ULL(31, 0) +#define RDMAQPC_TX_DEST_IPADDR2_S 32 +#define RDMAQPC_TX_DEST_IPADDR2 GENMASK_ULL(63, 32) +#define RDMAQPC_TX_DEST_IPADDR1_S 0 +#define RDMAQPC_TX_DEST_IPADDR1 GENMASK_ULL(31, 0) +#define RDMAQPC_TX_DEST_IPADDR0_S 32 +#define RDMAQPC_TX_DEST_IPADDR0 GENMASK_ULL(63, 32) + +#define RDMAQPC_TX_SRC_PORTNUM_S 0 +#define RDMAQPC_TX_SRC_PORTNUM GENMASK_ULL(15, 0) +#define RDMAQPC_TX_DEST_PORTNUM_S 16 +#define RDMAQPC_TX_DEST_PORTNUM GENMASK_ULL(31, 16) +#define RDMAQPC_TX_FLOWLABEL_S 32 +#define RDMAQPC_TX_FLOWLABEL GENMASK_ULL(51, 32) +#define RDMAQPC_TX_TTL_S 52 +#define RDMAQPC_TX_TTL GENMASK_ULL(59, 52) +#define RDMAQPC_TX_ROCE_TVER_S 60 +#define RDMAQPC_TX_ROCE_TVER GENMASK_ULL(63, 60) + +#define RDMAQPC_TX_QKEY_S 0 +#define RDMAQPC_TX_QKEY GENMASK_ULL(31, 0) +#define RDMAQPC_TX_DEST_QP_S 32 +#define RDMAQPC_TX_DEST_QP GENMASK_ULL(55, 32) +#define RDMAQPC_TX_ORD_SIZE_S 56 +#define RDMAQPC_TX_ORD_SIZE GENMASK_ULL(63, 56) + +#define RDMAQPC_TX_PKEY_S 0 +#define RDMAQPC_TX_PKEY GENMASK_ULL(15, 0) +#define RDMAQPC_TX_DEST_MAC_S 16 +#define RDMAQPC_TX_DEST_MAC GENMASK_ULL(63, 16) + +#define RDMAQPC_TX_LOCAL_IPADDR3_S 0 +#define RDMAQPC_TX_LOCAL_IPADDR3 GENMASK_ULL(31, 0) +#define RDMAQPC_TX_LOCAL_IPADDR2_S 32 +#define RDMAQPC_TX_LOCAL_IPADDR2 GENMASK_ULL(63, 32) +#define RDMAQPC_TX_LOCAL_IPADDR1_S 0 +#define RDMAQPC_TX_LOCAL_IPADDR1 GENMASK_ULL(31, 0) +#define RDMAQPC_TX_LOCAL_IPADDR0_S 32 +#define RDMAQPC_TX_LOCAL_IPADDR0 GENMASK_ULL(63, 32) + +#define RDMAQPC_TX_SRC_MAC_S 0 +#define RDMAQPC_TX_SRC_MAC GENMASK_ULL(47, 0) +#define RDMAQPC_TX_PMTU_S 48 +#define RDMAQPC_TX_PMTU GENMASK_ULL(50, 48) +#define RDMAQPC_TX_ACK_TIMEOUT_S 51 +#define RDMAQPC_TX_ACK_TIMEOUT GENMASK_ULL(55, 51) +#define RDMAQPC_TX_LOG_SQSIZE_S 56 +#define RDMAQPC_TX_LOG_SQSIZE GENMASK_ULL(59, 56) + +#define RDMAQPC_TX_CQN_S 0 +#define RDMAQPC_TX_CQN GENMASK_ULL(20, 0) +#define RDMAQPC_TX_NVMEOF_QID_S 21 +#define RDMAQPC_TX_NVMEOF_QID GENMASK_ULL(30, 21) +#define RDMAQPC_TX_IS_NVMEOF_TGT_S 31 +#define RDMAQPC_TX_IS_NVMEOF_TGT BIT_ULL(31) +#define RDMAQPC_TX_IS_NVMEOF_IOQ_S 32 +#define RDMAQPC_TX_IS_NVMEOF_IOQ BIT_ULL(32) +#define RDMAQPC_TX_DCQCN_ID_S 33 +#define RDMAQPC_TX_DCQCN_ID GENMASK_ULL(43, 33) +#define RDMAQPC_TX_DCQCN_EN_S 49 +#define RDMAQPC_TX_DCQCN_EN BIT_ULL(49) +#define RDMAQPC_TX_QUEUE_TC_S 50 +#define RDMAQPC_TX_QUEUE_TC GENMASK_ULL(52, 50) + +#define RDMAQPC_TX_QPN_S 0 +#define RDMAQPC_TX_QPN GENMASK_ULL(19, 0) +#define RDMAQPC_TX_TOS_S 50 +#define RDMAQPC_TX_TOS GENMASK_ULL(57, 50) +#define RDMAQPC_TX_VHCA_ID_LOW6_S 58 +#define RDMAQPC_TX_VHCA_ID_LOW6 GENMASK_ULL(63, 58) + +#define RDMAQPC_TX_VHCA_ID_HIGH4_S 0 +#define RDMAQPC_TX_VHCA_ID_HIGH4 GENMASK_ULL(3, 0) +#define RDMAQPC_TX_QP_FLOW_SET_S 4 +#define RDMAQPC_TX_QP_FLOW_SET GENMASK_ULL(16, 4) +#define RDMAQPC_TX_QPSTATE_S 17 +#define RDMAQPC_TX_QPSTATE GENMASK_ULL(19, 17) +#define RDMAQPC_TX_DEBUG_SET_S 20 +#define RDMAQPC_TX_DEBUG_SET GENMASK_ULL(29, 20) +#define RDMAQPC_TX_QP_GROUP_NUM_S 20 +#define RDMAQPC_TX_QP_GROUP_NUM GENMASK_ULL(30, 20) + +//QPC_RX stucture of DPU +#define RDMAQPC_RX_LAST_OPCODE_S 56 +#define RDMAQPC_RX_LAST_OPCODE GENMASK_ULL(63, 56) + +#define RDMAQPC_RX_EPSN_S 40 +#define RDMAQPC_RX_EPSN GENMASK_ULL(63, 40) + +#define RDMAQPC_RX_IRD_RXNUM_S 46 +#define RDMAQPC_RX_IRD_RXNUM GENMASK_ULL(54, 46) + +#define RDMAQPC_RX_LOCAL_IPADDR3_S 0 +#define RDMAQPC_RX_LOCAL_IPADDR3 GENMASK_ULL(31, 0) +#define RDMAQPC_RX_LOCAL_IPADDR2_S 32 +#define RDMAQPC_RX_LOCAL_IPADDR2 GENMASK_ULL(63, 32) + +#define RDMAQPC_RX_SRC_MAC_HIGH16_S 0 +#define RDMAQPC_RX_SRC_MAC_HIGH16 GENMASK_ULL(15, 0) +#define RDMAQPC_RX_DEST_MAC_S 16 +#define RDMAQPC_RX_DEST_MAC GENMASK_ULL(63, 16) + +#define RDMAQPC_RX_IS_NVMEOF_IOQ_S 0 +#define RDMAQPC_RX_IS_NVMEOF_IOQ BIT_ULL(0) +#define RDMAQPC_RX_INSERT_VLANTAG_S 1 +#define RDMAQPC_RX_INSERT_VLANTAG BIT_ULL(1) +#define RDMAQPC_RX_PMTU_S 2 +#define RDMAQPC_RX_PMTU GENMASK_ULL(4, 2) +#define RDMAQPC_RX_SERVICE_TYPE_S 5 +#define RDMAQPC_RX_SERVICE_TYPE GENMASK_ULL(7, 5) +#define RDMAQPC_RX_IPV4_S 8 +#define RDMAQPC_RX_IPV4 BIT_ULL(8) +#define RDMAQPC_RX_PD_INDEX_S 9 +#define RDMAQPC_RX_PD_INDEX GENMASK_ULL(28, 9) +#define RDMAQPC_RX_QPSTATE_S 29 +#define RDMAQPC_RX_QPSTATE GENMASK_ULL(31, 29) +#define RDMAQPC_RX_SRC_MAC_LOW32_S 32 +#define RDMAQPC_RX_SRC_MAC_LOW32 GENMASK_ULL(63, 32) + +#define RDMAQPC_RX_DEST_QP_HIGH12_S 0 +#define RDMAQPC_RX_DEST_QP_HIGH12 GENMASK_ULL(11, 0) +#define RDMAQPC_RX_FLOWLABEL_S 12 +#define RDMAQPC_RX_FLOWLABEL GENMASK_ULL(31, 12) +#define RDMAQPC_RX_TTL_S 32 +#define RDMAQPC_RX_TTL GENMASK_ULL(39, 32) +#define RDMAQPC_RX_TOS_S 40 +#define RDMAQPC_RX_TOS GENMASK_ULL(47, 40) +#define RDMAQPC_RX_VLANTAG_S 48 +#define RDMAQPC_RX_VLANTAG GENMASK_ULL(63, 48) + +#define RDMAQPC_RX_SRQN_S 0 +#define RDMAQPC_RX_SRQN GENMASK_ULL(18, 0) +#define RDMAQPC_RX_NVMEOF_QID_S 0 +#define RDMAQPC_RX_NVMEOF_QID GENMASK_ULL(9, 0) +#define RDMAQPC_RX_IS_NVMEOF_TGT_S 10 +#define RDMAQPC_RX_IS_NVMEOF_TGT BIT_ULL(10) + +#define RDMAQPC_RX_HDR_LEN_S 0 +#define RDMAQPC_RX_HDR_LEN GENMASK_ULL(9, 0) +#define RDMAQPC_RX_PKEY_S 32 +#define RDMAQPC_RX_PKEY GENMASK_ULL(47, 32) +#define RDMAQPC_RX_SRC_PORTNUM_S 48 +#define RDMAQPC_RX_SRC_PORTNUM GENMASK_ULL(63, 48) + +#define RDMAQPC_RX_WQE_SIGN_EN_S 1 +#define RDMAQPC_RX_WQE_SIGN_EN BIT_ULL(1) +#define RDMAQPC_RX_RQ_VMAP_S 2 +#define RDMAQPC_RX_RQ_VMAP BIT_ULL(2) +#define RDMAQPC_RX_IRD_SIZE_S 3 +#define RDMAQPC_RX_IRD_SIZE GENMASK_ULL(6, 3) +#define RDMAQPC_RX_LOG_RQSIZE_S 7 +#define RDMAQPC_RX_LOG_RQSIZE GENMASK_ULL(10, 7) +#define RDMAQPC_RX_SEND_EN_S 11 +#define RDMAQPC_RX_SEND_EN BIT_ULL(11) +#define RDMAQPC_RX_WRITE_EN_S 12 +#define RDMAQPC_RX_WRITE_EN BIT_ULL(12) +#define RDMAQPC_RX_READ_EN_S 13 +#define RDMAQPC_RX_READ_EN BIT_ULL(13) +#define RDMAQPC_RX_LOG_RQE_SIZE_S 14 +#define RDMAQPC_RX_LOG_RQE_SIZE GENMASK_ULL(16, 14) +#define RDMAQPC_RX_USE_SRQ_S 17 +#define RDMAQPC_RX_USE_SRQ BIT_ULL(17) +#define RDMAQPC_RX_CQN_S 18 +#define RDMAQPC_RX_CQN GENMASK_ULL(38, 18) +#define RDMAQPC_RX_DEST_QP_LOW12_S 39 +#define RDMAQPC_RX_DEST_QP_LOW12 GENMASK_ULL(50, 39) +#define RDMAQPC_RX_RQ_LPBL_SIZE_S 51 +#define RDMAQPC_RX_RQ_LPBL_SIZE GENMASK_ULL(52, 51) +#define RDMAQPC_RX_RSV_LKEY_EN_S 53 +#define RDMAQPC_RX_RSV_LKEY_EN BIT_ULL(53) +#define RDMAQPC_RX_RNR_TIMER_S 58 +#define RDMAQPC_RX_RNR_TIMER GENMASK_ULL(62, 58) +#define RDMAQPC_RX_ACK_CREDITS_S 63 +#define RDMAQPC_RX_ACK_CREDITS BIT_ULL(63) + +#define RDMAQPC_RX_QP_GROUP_NUM_S 0 +#define RDMAQPC_RX_QP_GROUP_NUM GENMASK_ULL(10, 0) +#define RDMAQPC_RX_QP_FLOW_SET_S 11 +#define RDMAQPC_RX_QP_FLOW_SET GENMASK_ULL(23, 11) +#define RDMAQPC_RX_DEBUG_SET_S 40 +#define RDMAQPC_RX_DEBUG_SET GENMASK_ULL(49, 40) +#define RDMAQPC_RX_VHCA_ID_S 50 +#define RDMAQPC_RX_VHCA_ID GENMASK_ULL(59, 50) +#define RDMAQPC_RX_QUEUE_TC_S 60 +#define RDMAQPC_RX_QUEUE_TC GENMASK_ULL(62, 60) + +#define RDMAQPC_RX_DEST_IPADDR1_S 0 +#define RDMAQPC_RX_DEST_IPADDR1 GENMASK_ULL(31, 0) +#define RDMAQPC_RX_DEST_IPADDR0_S 32 +#define RDMAQPC_RX_DEST_IPADDR0 GENMASK_ULL(63, 32) +#define RDMAQPC_RX_DEST_IPADDR3_S 0 +#define RDMAQPC_RX_DEST_IPADDR3 GENMASK_ULL(31, 0) +#define RDMAQPC_RX_DEST_IPADDR2_S 32 +#define RDMAQPC_RX_DEST_IPADDR2 GENMASK_ULL(63, 32) + +#define RDMAQPC_RX_LOCAL_IPADDR1_S 0 +#define RDMAQPC_RX_LOCAL_IPADDR1 GENMASK_ULL(31, 0) +#define RDMAQPC_RX_LOCAL_IPADDR0_S 32 +#define RDMAQPC_RX_LOCAL_IPADDR0 GENMASK_ULL(63, 32) + +//QPC stucture +#define IRDMAQPC_DDP_VER_S 0 +#define IRDMAQPC_DDP_VER GENMASK_ULL(1, 0) +#define IRDMAQPC_IBRDENABLE_S 2 +#define IRDMAQPC_IBRDENABLE BIT_ULL(2) +#define IRDMAQPC_IPV4_S 3 +#define IRDMAQPC_IPV4 BIT_ULL(3) +#define IRDMAQPC_NONAGLE_S 4 +#define IRDMAQPC_NONAGLE BIT_ULL(4) +#define IRDMAQPC_INSERTVLANTAG_S 5 +#define IRDMAQPC_INSERTVLANTAG BIT_ULL(5) +#define IRDMAQPC_ISQP1_S 6 +#define IRDMAQPC_ISQP1 BIT_ULL(6) +#define IRDMAQPC_TIMESTAMP_S 7 +#define IRDMAQPC_TIMESTAMP BIT_ULL(7) +#define IRDMAQPC_RQWQESIZE_S 8 +#define IRDMAQPC_RQWQESIZE GENMASK_ULL(9, 8) +#define IRDMAQPC_INSERTL2TAG2_S 11 +#define IRDMAQPC_INSERTL2TAG2 BIT_ULL(11) +#define IRDMAQPC_LIMIT_S 12 +#define IRDMAQPC_LIMIT GENMASK_ULL(13, 12) + +#define IRDMAQPC_ECN_EN_S 14 +#define IRDMAQPC_ECN_EN BIT_ULL(14) +#define IRDMAQPC_DROPOOOSEG_S 15 +#define IRDMAQPC_DROPOOOSEG BIT_ULL(15) +#define IRDMAQPC_DUPACK_THRESH_S 16 +#define IRDMAQPC_DUPACK_THRESH GENMASK_ULL(18, 16) +#define IRDMAQPC_ERR_RQ_IDX_VALID_S 19 +#define IRDMAQPC_ERR_RQ_IDX_VALID BIT_ULL(19) +#define IRDMAQPC_DIS_VLAN_CHECKS_S 19 +#define IRDMAQPC_DIS_VLAN_CHECKS GENMASK_ULL(21, 19) +#define IRDMAQPC_DC_TCP_EN_S 25 +#define IRDMAQPC_DC_TCP_EN BIT_ULL(25) +#define IRDMAQPC_RCVTPHEN_S 28 +#define IRDMAQPC_RCVTPHEN BIT_ULL(28) +#define IRDMAQPC_XMITTPHEN_S 29 +#define IRDMAQPC_XMITTPHEN BIT_ULL(29) +#define IRDMAQPC_RQTPHEN_S 30 +#define IRDMAQPC_RQTPHEN BIT_ULL(30) +#define IRDMAQPC_SQTPHEN_S 31 +#define IRDMAQPC_SQTPHEN BIT_ULL(31) +#define IRDMAQPC_PPIDX_S 32 +#define IRDMAQPC_PPIDX GENMASK_ULL(41, 32) +#define IRDMAQPC_PMENA_S 47 +#define IRDMAQPC_PMENA BIT_ULL(47) +#define IRDMAQPC_RDMAP_VER_S 62 +#define IRDMAQPC_RDMAP_VER GENMASK_ULL(63, 62) +#define IRDMAQPC_ROCE_TVER_S 60 +#define IRDMAQPC_ROCE_TVER GENMASK_ULL(63, 60) + +#define IRDMAQPC_SQADDR_S ZXDH_CQPHC_QPCTX_S +#define IRDMAQPC_SQADDR ZXDH_CQPHC_QPCTX + +#define IRDMAQPC_RQADDR_S ZXDH_CQPHC_QPCTX_S +#define IRDMAQPC_RQADDR ZXDH_CQPHC_QPCTX +#define IRDMAQPC_TTL_S 0 +#define IRDMAQPC_TTL GENMASK_ULL(7, 0) +#define IRDMAQPC_RQSIZE_S 8 +#define IRDMAQPC_RQSIZE GENMASK_ULL(11, 8) +#define IRDMAQPC_SQSIZE_S 12 +#define IRDMAQPC_SQSIZE GENMASK_ULL(15, 12) +#define IRDMAQPC_GEN1_SRCMACADDRIDX_S 16 +#define IRDMAQPC_GEN1_SRCMACADDRIDX GENMASK(21, 16) +#define IRDMAQPC_AVOIDSTRETCHACK_S 23 +#define IRDMAQPC_AVOIDSTRETCHACK BIT_ULL(23) +#define IRDMAQPC_TOS_S 24 +#define IRDMAQPC_TOS GENMASK_ULL(31, 24) +#define IRDMAQPC_SRCPORTNUM_S 32 +#define IRDMAQPC_SRCPORTNUM GENMASK_ULL(47, 32) +#define IRDMAQPC_DESTPORTNUM_S 48 +#define IRDMAQPC_DESTPORTNUM GENMASK_ULL(63, 48) +#define IRDMAQPC_DESTIPADDR0_S 32 +#define IRDMAQPC_DESTIPADDR0 GENMASK_ULL(63, 32) +#define IRDMAQPC_DESTIPADDR1_S 0 +#define IRDMAQPC_DESTIPADDR1 GENMASK_ULL(31, 0) +#define IRDMAQPC_DESTIPADDR2_S 32 +#define IRDMAQPC_DESTIPADDR2 GENMASK_ULL(63, 32) +#define IRDMAQPC_DESTIPADDR3_S 0 +#define IRDMAQPC_DESTIPADDR3 GENMASK_ULL(31, 0) +#define IRDMAQPC_SNDMSS_S 16 +#define IRDMAQPC_SNDMSS GENMASK_ULL(29, 16) +#define IRDMAQPC_SYN_RST_HANDLING_S 30 +#define IRDMAQPC_SYN_RST_HANDLING GENMASK_ULL(31, 30) +#define IRDMAQPC_VLANTAG_S 32 +#define IRDMAQPC_VLANTAG GENMASK_ULL(47, 32) +#define IRDMAQPC_ARPIDX_S 48 +#define IRDMAQPC_ARPIDX GENMASK_ULL(63, 48) +#define IRDMAQPC_FLOWLABEL_S 0 +#define IRDMAQPC_FLOWLABEL GENMASK_ULL(19, 0) +#define IRDMAQPC_WSCALE_S 20 +#define IRDMAQPC_WSCALE BIT_ULL(20) +#define IRDMAQPC_KEEPALIVE_S 21 +#define IRDMAQPC_KEEPALIVE BIT_ULL(21) +#define IRDMAQPC_IGNORE_TCP_OPT_S 22 +#define IRDMAQPC_IGNORE_TCP_OPT BIT_ULL(22) +#define IRDMAQPC_IGNORE_TCP_UNS_OPT_S 23 +#define IRDMAQPC_IGNORE_TCP_UNS_OPT BIT_ULL(23) +#define IRDMAQPC_TCPSTATE_S 28 +#define IRDMAQPC_TCPSTATE GENMASK_ULL(31, 28) +#define IRDMAQPC_RCVSCALE_S 32 +#define IRDMAQPC_RCVSCALE GENMASK_ULL(35, 32) +#define IRDMAQPC_SNDSCALE_S 40 +#define IRDMAQPC_SNDSCALE GENMASK_ULL(43, 40) +#define IRDMAQPC_PDIDX_S 48 +#define IRDMAQPC_PDIDX GENMASK_ULL(63, 48) +#define IRDMAQPC_PDIDXHI_S 20 +#define IRDMAQPC_PDIDXHI GENMASK_ULL(21, 20) +#define IRDMAQPC_PKEY_S 32 +#define IRDMAQPC_PKEY GENMASK_ULL(47, 32) +#define IRDMAQPC_ACKCREDITS_S 20 +#define IRDMAQPC_ACKCREDITS GENMASK_ULL(24, 20) +#define IRDMAQPC_QKEY_S 32 +#define IRDMAQPC_QKEY GENMASK_ULL(63, 32) +#define IRDMAQPC_DESTQP_S 0 +#define IRDMAQPC_DESTQP GENMASK_ULL(23, 0) +#define IRDMAQPC_KALIVE_TIMER_MAX_PROBES_S 16 +#define IRDMAQPC_KALIVE_TIMER_MAX_PROBES GENMASK_ULL(23, 16) +#define IRDMAQPC_KEEPALIVE_INTERVAL_S 24 +#define IRDMAQPC_KEEPALIVE_INTERVAL GENMASK_ULL(31, 24) +#define IRDMAQPC_TIMESTAMP_RECENT_S 0 +#define IRDMAQPC_TIMESTAMP_RECENT GENMASK_ULL(31, 0) +#define IRDMAQPC_TIMESTAMP_AGE_S 32 +#define IRDMAQPC_TIMESTAMP_AGE GENMASK_ULL(63, 32) +#define IRDMAQPC_SNDNXT_S 0 +#define IRDMAQPC_SNDNXT GENMASK_ULL(31, 0) +#define IRDMAQPC_ISN_S 32 +#define IRDMAQPC_ISN GENMASK_ULL(55, 32) +#define IRDMAQPC_PSNNXT_S 0 +#define IRDMAQPC_PSNNXT GENMASK_ULL(23, 0) +#define IRDMAQPC_LSN_S 32 +#define IRDMAQPC_LSN GENMASK_ULL(55, 32) +#define IRDMAQPC_SNDWND_S 32 +#define IRDMAQPC_SNDWND GENMASK_ULL(63, 32) +#define IRDMAQPC_RCVNXT_S 0 +#define IRDMAQPC_RCVNXT GENMASK_ULL(31, 0) +#define IRDMAQPC_EPSN_S 0 +#define IRDMAQPC_EPSN GENMASK_ULL(23, 0) +#define IRDMAQPC_RCVWND_S 32 +#define IRDMAQPC_RCVWND GENMASK_ULL(63, 32) +#define IRDMAQPC_SNDMAX_S 0 +#define IRDMAQPC_SNDMAX GENMASK_ULL(31, 0) +#define IRDMAQPC_SNDUNA_S 32 +#define IRDMAQPC_SNDUNA GENMASK_ULL(63, 32) +#define IRDMAQPC_PSNMAX_S 0 +#define IRDMAQPC_PSNMAX GENMASK_ULL(23, 0) +#define IRDMAQPC_PSNUNA_S 32 +#define IRDMAQPC_PSNUNA GENMASK_ULL(55, 32) +#define IRDMAQPC_SRTT_S 0 +#define IRDMAQPC_SRTT GENMASK_ULL(31, 0) +#define IRDMAQPC_RTTVAR_S 32 +#define IRDMAQPC_RTTVAR GENMASK_ULL(63, 32) +#define IRDMAQPC_SSTHRESH_S 0 +#define IRDMAQPC_SSTHRESH GENMASK_ULL(31, 0) +#define IRDMAQPC_CWND_S 32 +#define IRDMAQPC_CWND GENMASK_ULL(63, 32) +#define IRDMAQPC_CWNDROCE_S 32 +#define IRDMAQPC_CWNDROCE GENMASK_ULL(55, 32) +#define IRDMAQPC_SNDWL1_S 0 +#define IRDMAQPC_SNDWL1 GENMASK_ULL(31, 0) +#define IRDMAQPC_SNDWL2_S 32 +#define IRDMAQPC_SNDWL2 GENMASK_ULL(63, 32) +#define IRDMAQPC_ERR_RQ_IDX_S 32 +#define IRDMAQPC_ERR_RQ_IDX GENMASK_ULL(45, 32) +#define IRDMAQPC_RTOMIN_S 57 +#define IRDMAQPC_RTOMIN GENMASK_ULL(63, 57) +#define IRDMAQPC_MAXSNDWND_S 0 +#define IRDMAQPC_MAXSNDWND GENMASK_ULL(31, 0) +#define IRDMAQPC_REXMIT_THRESH_S 48 +#define IRDMAQPC_REXMIT_THRESH GENMASK_ULL(53, 48) +#define IRDMAQPC_RNRNAK_THRESH_S 54 +#define IRDMAQPC_RNRNAK_THRESH GENMASK_ULL(56, 54) +#define IRDMAQPC_TXCQNUM_S 0 +#define IRDMAQPC_TXCQNUM GENMASK_ULL(18, 0) +#define IRDMAQPC_RXCQNUM_S 32 +#define IRDMAQPC_RXCQNUM GENMASK_ULL(50, 32) +#define IRDMAQPC_STAT_INDEX_S 0 +#define IRDMAQPC_STAT_INDEX GENMASK_ULL(6, 0) +#define IRDMAQPC_Q2ADDR_S 8 +#define IRDMAQPC_Q2ADDR GENMASK_ULL(63, 8) +#define IRDMAQPC_LASTBYTESENT_S 0 +#define IRDMAQPC_LASTBYTESENT GENMASK_ULL(7, 0) +#define IRDMAQPC_MACADDRESS_S 16 +#define IRDMAQPC_MACADDRESS GENMASK_ULL(63, 16) +#define IRDMAQPC_ORDSIZE_S 0 +#define IRDMAQPC_ORDSIZE GENMASK_ULL(7, 0) + +#define IRDMAQPC_IRDSIZE_S 16 +#define IRDMAQPC_IRDSIZE GENMASK_ULL(18, 16) + +#define IRDMAQPC_UDPRIVCQENABLE_S 19 +#define IRDMAQPC_UDPRIVCQENABLE BIT_ULL(19) +#define IRDMAQPC_WRRDRSPOK_S 20 +#define IRDMAQPC_WRRDRSPOK BIT_ULL(20) +#define IRDMAQPC_RDOK_S 21 +#define IRDMAQPC_RDOK BIT_ULL(21) +#define IRDMAQPC_SNDMARKERS_S 22 +#define IRDMAQPC_SNDMARKERS BIT_ULL(22) +#define IRDMAQPC_DCQCNENABLE_S 22 +#define IRDMAQPC_DCQCNENABLE BIT_ULL(22) +#define IRDMAQPC_FW_CC_ENABLE_S 28 +#define IRDMAQPC_FW_CC_ENABLE BIT_ULL(28) +#define IRDMAQPC_RCVNOICRC_S 31 +#define IRDMAQPC_RCVNOICRC BIT_ULL(31) +#define IRDMAQPC_BINDEN_S 23 +#define IRDMAQPC_BINDEN BIT_ULL(23) +#define IRDMAQPC_FASTREGEN_S 24 +#define IRDMAQPC_FASTREGEN BIT_ULL(24) +#define IRDMAQPC_PRIVEN_S 25 +#define IRDMAQPC_PRIVEN BIT_ULL(25) +#define IRDMAQPC_TIMELYENABLE_S 27 +#define IRDMAQPC_TIMELYENABLE BIT_ULL(27) +#define IRDMAQPC_THIGH_S 52 +#define IRDMAQPC_THIGH GENMASK_ULL(63, 52) +#define IRDMAQPC_TLOW_S 32 +#define IRDMAQPC_TLOW GENMASK_ULL(39, 32) +#define IRDMAQPC_REMENDPOINTIDX_S 0 +#define IRDMAQPC_REMENDPOINTIDX GENMASK_ULL(16, 0) +#define IRDMAQPC_USESTATSINSTANCE_S 26 +#define IRDMAQPC_USESTATSINSTANCE BIT_ULL(26) +#define IRDMAQPC_IWARPMODE_S 28 +#define IRDMAQPC_IWARPMODE BIT_ULL(28) +#define IRDMAQPC_RCVMARKERS_S 29 +#define IRDMAQPC_RCVMARKERS BIT_ULL(29) +#define IRDMAQPC_ALIGNHDRS_S 30 +#define IRDMAQPC_ALIGNHDRS BIT_ULL(30) +#define IRDMAQPC_RCVNOMPACRC_S 31 +#define IRDMAQPC_RCVNOMPACRC BIT_ULL(31) +#define IRDMAQPC_RCVMARKOFFSET_S 32 +#define IRDMAQPC_RCVMARKOFFSET GENMASK_ULL(40, 32) +#define IRDMAQPC_SNDMARKOFFSET_S 48 +#define IRDMAQPC_SNDMARKOFFSET GENMASK_ULL(56, 48) + +#define IRDMAQPC_QPCOMPCTX_S ZXDH_CQPHC_QPCTX_S +#define IRDMAQPC_QPCOMPCTX ZXDH_CQPHC_QPCTX +#define IRDMAQPC_SQTPHVAL_S 0 +#define IRDMAQPC_SQTPHVAL GENMASK_ULL(7, 0) +#define IRDMAQPC_RQTPHVAL_S 8 +#define IRDMAQPC_RQTPHVAL GENMASK_ULL(15, 8) +#define IRDMAQPC_QSHANDLE_S 16 +#define IRDMAQPC_QSHANDLE GENMASK_ULL(25, 16) +#define IRDMAQPC_EXCEPTION_LAN_QUEUE_S 32 +#define IRDMAQPC_EXCEPTION_LAN_QUEUE GENMASK_ULL(43, 32) +#define IRDMAQPC_LOCAL_IPADDR3_S 0 +#define IRDMAQPC_LOCAL_IPADDR3 GENMASK_ULL(31, 0) +#define IRDMAQPC_LOCAL_IPADDR2_S 32 +#define IRDMAQPC_LOCAL_IPADDR2 GENMASK_ULL(63, 32) +#define IRDMAQPC_LOCAL_IPADDR1_S 0 +#define IRDMAQPC_LOCAL_IPADDR1 GENMASK_ULL(31, 0) +#define IRDMAQPC_LOCAL_IPADDR0_S 32 +#define IRDMAQPC_LOCAL_IPADDR0 GENMASK_ULL(63, 32) +#define ZXDH_FW_VER_MINOR_S 0 +#define ZXDH_FW_VER_MINOR GENMASK_ULL(15, 0) +#define ZXDH_FW_VER_MAJOR_S 16 +#define ZXDH_FW_VER_MAJOR GENMASK_ULL(31, 16) +#define ZXDH_FEATURE_INFO_S 0 +#define ZXDH_FEATURE_INFO GENMASK_ULL(47, 0) +#define ZXDH_FEATURE_CNT_S 32 +#define ZXDH_FEATURE_CNT GENMASK_ULL(47, 32) +#define ZXDH_FEATURE_TYPE_S 48 +#define ZXDH_FEATURE_TYPE GENMASK_ULL(63, 48) +#define ZXDH_RSVD_S 41 +#define ZXDH_RSVD GENMASK_ULL(55, 41) + +#define IRDMAQPSQ_OPCODE_S 57 +#define IRDMAQPSQ_OPCODE GENMASK_ULL(62, 57) +#define IRDMAQPSQ_COPY_HOST_PBL_S 43 +#define IRDMAQPSQ_COPY_HOST_PBL BIT_ULL(43) +#define IRDMAQPSQ_ADDFRAGCNT_S 32 +#define IRDMAQPSQ_ADDFRAGCNT GENMASK_ULL(39, 32) +#define IRDMAQPSQ_UD_ADDFRAGCNT_S 29 +#define IRDMAQPSQ_UD_ADDFRAGCNT GENMASK_ULL(36, 29) +#define IRDMAQPSQ_PUSHWQE_S 56 +#define IRDMAQPSQ_PUSHWQE BIT_ULL(56) +#define IRDMAQPSQ_STREAMMODE_S 58 +#define IRDMAQPSQ_STREAMMODE BIT_ULL(58) +#define IRDMAQPSQ_WAITFORRCVPDU_S 59 +#define IRDMAQPSQ_WAITFORRCVPDU BIT_ULL(59) +#define IRDMAQPSQ_READFENCE_S 54 +#define IRDMAQPSQ_READFENCE BIT_ULL(54) +#define IRDMAQPSQ_LOCALFENCE_S 55 +#define IRDMAQPSQ_LOCALFENCE BIT_ULL(55) +#define IRDMAQPSQ_UDPHEADER_S 61 +#define IRDMAQPSQ_UDPHEADER BIT_ULL(61) +#define IRDMAQPSQ_L4LEN_S 42 +#define IRDMAQPSQ_L4LEN GENMASK_ULL(45, 42) +#define IRDMAQPSQ_SIGCOMPL_S 56 +#define IRDMAQPSQ_SIGCOMPL BIT_ULL(56) +#define IRDMAQPSQ_SOLICITED_S 53 +#define IRDMAQPSQ_SOLICITED BIT_ULL(53) +#define IRDMAQPSQ_VALID_S 63 +#define IRDMAQPSQ_VALID BIT_ULL(63) + +#define IRDMAQPSQ_FRAG_TO_S ZXDH_CQPHC_QPCTX_S +#define IRDMAQPSQ_FRAG_TO ZXDH_CQPHC_QPCTX +#define IRDMAQPSQ_FRAG_VALID_S 63 +#define IRDMAQPSQ_FRAG_VALID BIT_ULL(63) +#define IRDMAQPSQ_FIRST_FRAG_VALID_S 0 +#define IRDMAQPSQ_FIRST_FRAG_VALID BIT_ULL(0) +#define IRDMAQPSQ_FIRST_FRAG_LEN_S 1 +#define IRDMAQPSQ_FIRST_FRAG_LEN GENMASK_ULL(31, 1) +#define IRDMAQPSQ_FIRST_FRAG_STAG_S 32 +#define IRDMAQPSQ_FIRST_FRAG_STAG GENMASK_ULL(63, 32) +#define IRDMAQPSQ_FRAG_LEN_S 32 +#define IRDMAQPSQ_FRAG_LEN GENMASK_ULL(62, 32) +#define IRDMAQPSQ_FRAG_STAG_S 0 +#define IRDMAQPSQ_FRAG_STAG GENMASK_ULL(31, 0) +#define IRDMAQPSQ_GEN1_FRAG_LEN_S 0 +#define IRDMAQPSQ_GEN1_FRAG_LEN GENMASK_ULL(31, 0) +#define IRDMAQPSQ_GEN1_FRAG_STAG_S 32 +#define IRDMAQPSQ_GEN1_FRAG_STAG GENMASK_ULL(63, 32) +#define IRDMAQPSQ_REMSTAGINV_S 0 +#define IRDMAQPSQ_REMSTAGINV GENMASK_ULL(31, 0) +#define IRDMAQPSQ_DESTQKEY_S 0 +#define IRDMAQPSQ_DESTQKEY GENMASK_ULL(31, 0) +#define IRDMAQPSQ_DESTQPN_S 32 +#define IRDMAQPSQ_DESTQPN GENMASK_ULL(55, 32) +#define IRDMAQPSQ_AHID_S 0 +#define IRDMAQPSQ_AHID GENMASK_ULL(18, 0) +#define IRDMAQPSQ_INLINEDATAFLAG_S 63 +#define IRDMAQPSQ_INLINEDATAFLAG BIT_ULL(63) +#define IRDMAQPSQ_UD_INLINEDATAFLAG_S 50 +#define IRDMAQPSQ_UD_INLINEDATAFLAG BIT_ULL(50) +#define IRDMAQPSQ_WRITE_INLINEDATAFLAG_S 48 +#define IRDMAQPSQ_WRITE_INLINEDATAFLAG BIT_ULL(48) + +#define ZXDH_INLINE_VALID_S 7 +#define IRDMAQPSQ_INLINE_VALID_S 63 +#define IRDMAQPSQ_INLINE_VALID BIT_ULL(63) +#define IRDMAQPSQ_INLINEDATALEN_S 50 +#define IRDMAQPSQ_INLINEDATALEN GENMASK_ULL(62, 55) +#define IRDMAQPSQ_UD_INLINEDATALEN_S 42 +#define IRDMAQPSQ_UD_INLINEDATALEN GENMASK_ULL(49, 42) +#define IRDMAQPSQ_WRITE_INLINEDATALEN_S 40 +#define IRDMAQPSQ_WRITE_INLINEDATALEN GENMASK_ULL(47, 40) +#define IRDMAQPSQ_IMMDATAFLAG_S 52 +#define IRDMAQPSQ_IMMDATAFLAG BIT_ULL(52) +#define IRDMAQPSQ_REPORTRTT_S 46 +#define IRDMAQPSQ_REPORTRTT BIT_ULL(46) + +#define IRDMAQPSQ_IMMDATA_VALID_S 63 +#define IRDMAQPSQ_IMMDATA_VALID BIT_ULL(63) +#define IRDMAQPSQ_IMMDATA_S 0 +#define IRDMAQPSQ_IMMDATA GENMASK_ULL(31, 0) +#define IRDMAQPSQ_REMSTAG_S 0 +#define IRDMAQPSQ_REMSTAG GENMASK_ULL(31, 0) + +#define IRDMAQPSQ_REMTO_S ZXDH_CQPHC_QPCTX_S +#define IRDMAQPSQ_REMTO ZXDH_CQPHC_QPCTX + +#define IRDMAQPSQ_STAGRIGHTS_S 47 +#define IRDMAQPSQ_STAGRIGHTS GENMASK_ULL(51, 47) +#define IRDMAQPSQ_VABASEDTO_S 53 +#define IRDMAQPSQ_VABASEDTO BIT_ULL(53) +#define IRDMAQPSQ_MEMWINDOWTYPE_S 52 +#define IRDMAQPSQ_MEMWINDOWTYPE BIT_ULL(52) + +#define IRDMAQPSQ_MWLEN_S ZXDH_CQPHC_QPCTX_S +#define IRDMAQPSQ_MWLEN ZXDH_CQPHC_QPCTX +#define IRDMAQPSQ_PARENTMRSTAG_S 32 +#define IRDMAQPSQ_PARENTMRSTAG GENMASK_ULL(63, 32) +#define IRDMAQPSQ_MWSTAG_S 0 +#define IRDMAQPSQ_MWSTAG GENMASK_ULL(31, 0) + +#define IRDMAQPSQ_BASEVA_TO_FBO_S ZXDH_CQPHC_QPCTX_S +#define IRDMAQPSQ_BASEVA_TO_FBO ZXDH_CQPHC_QPCTX + +#define IRDMAQPSQ_LOCSTAG_S 0 +#define IRDMAQPSQ_LOCSTAG GENMASK_ULL(31, 0) + +#define IRDMAQPSQ_STAGKEY_S 0 +#define IRDMAQPSQ_STAGKEY GENMASK_ULL(7, 0) +#define IRDMAQPSQ_STAGINDEX_S 8 +#define IRDMAQPSQ_STAGINDEX GENMASK_ULL(31, 8) +#define IRDMAQPSQ_COPYHOSTPBLS_S 43 +#define IRDMAQPSQ_COPYHOSTPBLS BIT_ULL(43) +#define IRDMAQPSQ_LPBLSIZE_S 40 +#define IRDMAQPSQ_LPBLSIZE GENMASK_ULL(41, 40) +#define IRDMAQPSQ_HPAGESIZE_S 43 +#define IRDMAQPSQ_HPAGESIZE GENMASK_ULL(46, 43) +#define IRDMAQPSQ_STAGLEN_S 0 +#define IRDMAQPSQ_STAGLEN GENMASK_ULL(40, 0) +#define IRDMAQPSQ_FIRSTPMPBLIDXLO_S 46 +#define IRDMAQPSQ_FIRSTPMPBLIDXLO GENMASK_ULL(61, 46) +#define IRDMAQPSQ_FIRSTPMPBLIDXHI_S 52 +#define IRDMAQPSQ_FIRSTPMPBLIDXHI GENMASK_ULL(63, 52) +#define IRDMAQPSQ_PBLADDR_S 51 +#define IRDMAQPSQ_PBLADDR GENMASK_ULL(51, 0) + +//QP RQ WQE common fields +#define IRDMAQPRQ_SIGNATURE_S 16 +#define IRDMAQPRQ_SIGNATURE GENMASK_ULL(31, 16) + +#define IRDMAQPRQ_ADDFRAGCNT_S IRDMAQPSQ_ADDFRAGCNT_S +#define IRDMAQPRQ_ADDFRAGCNT IRDMAQPSQ_ADDFRAGCNT + +#define IRDMAQPRQ_VALID_S IRDMAQPSQ_VALID_S +#define IRDMAQPRQ_VALID IRDMAQPSQ_VALID + +#define IRDMAQPRQ_COMPLCTX_S ZXDH_CQPHC_QPCTX_S +#define IRDMAQPRQ_COMPLCTX ZXDH_CQPHC_QPCTX + +#define IRDMAQPRQ_FRAG_LEN_S IRDMAQPSQ_FRAG_LEN_S +#define IRDMAQPRQ_FRAG_LEN IRDMAQPSQ_FRAG_LEN + +#define IRDMAQPRQ_STAG_S IRDMAQPSQ_FRAG_STAG_S +#define IRDMAQPRQ_STAG IRDMAQPSQ_FRAG_STAG + +#define IRDMAQPRQ_TO_S IRDMAQPSQ_FRAG_TO_S +#define IRDMAQPRQ_TO IRDMAQPSQ_FRAG_TO + +#define IRDMAQPSRQ_RSV GENMASK_ULL(63, 40) +#define IRDMAQPSRQ_VALID_SGE_NUM GENMASK_ULL(39, 32) +#define IRDMAQPSRQ_SIGNATURE GENMASK_ULL(31, 24) +#define IRDMAQPSRQ_NEXT_WQE_INDEX GENMASK_ULL(15, 0) +#define IRDMAQPSRQ_START_PADDING BIT_ULL(63) +#define IRDMAQPSRQ_FRAG_LEN GENMASK_ULL(62, 32) +#define IRDMAQPSRQ_FRAG_STAG GENMASK_ULL(31, 0) + +//QP RQ DBSA fields +#define IRDMAQPDBSA_RQ_POLARITY_S 15 +#define IRDMAQPDBSA_RQ_POLARITY BIT_ULL(15) +#define IRDMAQPDBSA_RQ_SW_HEAD_S 0 +#define IRDMAQPDBSA_RQ_SW_HEAD GENMASK_ULL(14, 0) + +#define IRDMAPFINT_OICR_HMC_ERR_M BIT(26) +#define IRDMAPFINT_OICR_PE_PUSH_M BIT(27) +#define IRDMAPFINT_OICR_PE_CRITERR_M BIT(28) + +#define ZXDH_QUERY_FPM_MAX_QPS_S 0 +#define ZXDH_QUERY_FPM_MAX_QPS GENMASK_ULL(18, 0) +#define ZXDH_QUERY_FPM_MAX_CQS_S 0 +#define ZXDH_QUERY_FPM_MAX_CQS GENMASK_ULL(19, 0) +#define ZXDH_QUERY_FPM_FIRST_PE_SD_INDEX_S 0 +#define ZXDH_QUERY_FPM_FIRST_PE_SD_INDEX GENMASK_ULL(13, 0) +#define ZXDH_QUERY_FPM_MAX_PE_SDS_S 32 +#define ZXDH_QUERY_FPM_MAX_PE_SDS GENMASK_ULL(45, 32) + +#define ZXDH_QUERY_FPM_MAX_CEQS_S 0 +#define ZXDH_QUERY_FPM_MAX_CEQS GENMASK_ULL(9, 0) +#define ZXDH_QUERY_FPM_XFBLOCKSIZE_S 32 +#define ZXDH_QUERY_FPM_XFBLOCKSIZE GENMASK_ULL(63, 32) +#define ZXDH_QUERY_FPM_Q1BLOCKSIZE_S 32 +#define ZXDH_QUERY_FPM_Q1BLOCKSIZE GENMASK_ULL(63, 32) +#define ZXDH_QUERY_FPM_HTMULTIPLIER_S 16 +#define ZXDH_QUERY_FPM_HTMULTIPLIER GENMASK_ULL(19, 16) +#define ZXDH_QUERY_FPM_TIMERBUCKET_S 32 +#define ZXDH_QUERY_FPM_TIMERBUCKET GENMASK_ULL(47, 32) +#define ZXDH_QUERY_FPM_RRFBLOCKSIZE_S 32 +#define ZXDH_QUERY_FPM_RRFBLOCKSIZE GENMASK_ULL(63, 32) +#define ZXDH_QUERY_FPM_RRFFLBLOCKSIZE_S 32 +#define ZXDH_QUERY_FPM_RRFFLBLOCKSIZE GENMASK_ULL(63, 32) +#define ZXDH_QUERY_FPM_OOISCFBLOCKSIZE_S 32 +#define ZXDH_QUERY_FPM_OOISCFBLOCKSIZE GENMASK_ULL(63, 32) +#define ZXDH_SHMC_PAGE_ALLOCATED_HMC_FN_ID_S 0 +#define ZXDH_SHMC_PAGE_ALLOCATED_HMC_FN_ID GENMASK_ULL(5, 0) + +#define IRDMATX_WIN_START_PSN GENMASK_ULL(23, 0) + +//qpc table index +#define ZXDH_QPC_RETY_COUNT_BYTE_OFFSET 0x00 +#define ZXDH_QPC_TX_LAST_ACK_PSN_BYTE_OFFSET 0x00 +#define ZXDH_QPC_CUR_RETRY_COUNT_BYTE_OFFSET 0x00 +#define ZXDH_QPC_READ_RETRY_FALG_BYTE_OFFSET 0x00 +#define ZXDH_QPC_RNR_RETRY_FALG_BYTE_OFFSET 0x08 +#define ZXDH_QPC_SEND_PSN_BYTE_OFFSET 0x18 +#define ZXDH_QPC_RETRY_FALG_BYTE_OFFSET 0x20 +#define ZXDH_QPC_ACK_ERR_FLAG_BYTE_OFFSET 0x28 +#define ZXDH_QPC_ERR_FLAG_BYTE_OFFSET 0x28 +#define ZXDH_QPC_PACKAGE_ERR_FLAG_BYTE_OFFSET 0x30 +#define ZXDH_QPC_RETRY_CQE_SQ_OPCODE_BYTE_OFFSET 0x38 +#define ZXDH_QPC_SEND_EPSN_BYTE_OFFSET 0x110 +#define ZXDH_QPC_RECV_ERR_FLAG_BYTE_OFFSET 0x30 +#define ZXDH_QPC_TX_WIN_RADDR_BYTE_OFFSET 0x30 +#define ZXDH_QPC_RNR_RETRY_TIME_L_BYTE_OFFSET 0x08 +#define ZXDH_QPC_RNR_RETRY_TIME_H_BYTE_OFFSET 0x10 +#define ZXDH_QPC_RNR_RETRY_THRESHOLD_BYTE_OFFSET 0x08 + +#define ZXDH_TX_WIN_START_PSN_BYTE_OFFSET 0x00 + +//Flow Control Algorithms +/*DCQCN*/ +#define ZXDH_DCQCN_NP_CNP_DSCP GENMASK_ULL(7, 2) +#define ZXDH_DCQCN_NP_CNP_PRIO GENMASK_ULL(2, 0) +#define ZXDH_DCQCN_NP_CNP_PRIO_MODE BIT_ULL(0) +#define ZXDH_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_X GENMASK_ULL(15, 0) +#define ZXDH_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_Y GENMASK_ULL(7, 0) +#define ZXDH_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_Y_EX GENMASK_ULL(8, 0) +#define ZXDH_DCQCN_PRG_TIME_RESET GENMASK_ULL(31, 16) +#define ZXDH_DCQCN_RPG_CLAMP_TGT_RATE BIT_ULL(0) +#define ZXDH_DCQCN_RPG_CLAMP_TGT_RATE_AFTER_TIME_INC BIT_ULL(0) +#define ZXDH_DCQCN_RP_DCE_TCP_RTT GENMASK_ULL(31, 16) +#define ZXDH_DCQCN_DCE_TCP_G GENMASK_ULL(31, 16) +#define ZXDH_DCQCN_RPG_GD GENMASK_ULL(31, 0) +#define ZXDH_DCQCN_RPG_INITIAL_ALPHA_VALUE GENMASK_ULL(31, 0) +#define ZXDH_DCQCN_RPG_MIN_DEC_FAC GENMASK_ULL(31, 0) +#define ZXDH_DCQCN_RPG_THRESHOLD GENMASK_ULL(31, 0) +#define ZXDH_DCQCN_RPG_RATIO_INCREASE BIT_ULL(0) +#define ZXDH_DCQCN_RPG_AI_RATIO GENMASK_ULL(31, 0) +#define ZXDH_DCQCN_RPG_HAI_RATIO GENMASK_ULL(31, 0) +#define ZXDH_DCQCN_RPG_BYTE_RESET GENMASK_ULL(31, 0) +#define ZXDH_DCQCN_RPG_AI_RATE GENMASK_ULL(31, 0) +#define ZXDH_DCQCN_RPG_HAI_RATE GENMASK_ULL(31, 0) +#define ZXDH_RPG_MAX_RATE GENMASK_ULL(31, 0) +#define ZXDH_RPG_MIN_RATE GENMASK_ULL(31, 0) +/*RTT*/ +#define ZXDH_RTT_VF_DELTA GENMASK_ULL(31, 0) +/*NP_PSN_WRAPAROUND*/ +#define ZXDH_NP_PSN_WRAPAROUND_PSN_WRAPAROUND_ENABLE BIT_ULL(30) +#define ZXDH_NP_PMTU GENMASK_ULL(2, 0) + +#define ZXDH_GET_CURRENT_AEQ_ELEM(_aeq) \ + ((_aeq)->aeqe_base[ZXDH_RING_CURRENT_TAIL((_aeq)->aeq_ring)].buf) + +#define ZXDH_GET_CURRENT_CEQ_ELEM(_ceq) \ + ((_ceq)->ceqe_base[ZXDH_RING_CURRENT_TAIL((_ceq)->ceq_ring)].buf) + +#define ZXDH_GET_CEQ_ELEM_AT_POS(_ceq, _pos) ((_ceq)->ceqe_base[_pos].buf) + +#define ZXDH_RING_GET_NEXT_TAIL(_ring, _idx) \ + (((_ring).tail + (_idx)) % (_ring).size) + +#define ZXDH_GET_CURRENT_CQ_ELEM(_cq) \ + ((_cq)->cq_base[ZXDH_RING_CURRENT_HEAD((_cq)->cq_ring)].buf) +#define ZXDH_GET_CURRENT_EXTENDED_CQ_ELEM(_cq) \ + (((struct zxdh_extended_cqe \ + *)((_cq)->cq_base))[ZXDH_RING_CURRENT_HEAD((_cq)->cq_ring)] \ + .buf) + +#define ZXDH_RING_INIT(_ring, _size) \ + { \ + (_ring).head = 0; \ + (_ring).tail = 0; \ + (_ring).size = (_size); \ + } +#define ZXDH_RING_SIZE(_ring) ((_ring).size) +#define ZXDH_RING_CURRENT_HEAD(_ring) ((_ring).head) +#define ZXDH_RING_CURRENT_TAIL(_ring) ((_ring).tail) + +#define ZXDH_RING_MOVE_HEAD(_ring, _retcode) \ + { \ + register u32 size; \ + size = (_ring).size; \ + if (!ZXDH_RING_FULL_ERR(_ring)) { \ + (_ring).head = ((_ring).head + 1) % size; \ + (_retcode) = 0; \ + } else { \ + (_retcode) = -ENOSPC; \ + } \ + } +#define ZXDH_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \ + { \ + register u32 size; \ + size = (_ring).size; \ + if ((ZXDH_RING_USED_QUANTA(_ring) + (_count)) < size) { \ + (_ring).head = ((_ring).head + (_count)) % size; \ + (_retcode) = 0; \ + } else { \ + (_retcode) = -ENOSPC; \ + } \ + } +#define ZXDH_SQ_RING_MOVE_HEAD(_ring, _retcode) \ + { \ + register u32 size; \ + size = (_ring).size; \ + if (!ZXDH_SQ_RING_FULL_ERR(_ring)) { \ + (_ring).head = ((_ring).head + 1) % size; \ + (_retcode) = 0; \ + } else { \ + (_retcode) = -ENOSPC; \ + } \ + } +#define ZXDH_SQ_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \ + { \ + register u32 size; \ + size = (_ring).size; \ + if ((ZXDH_RING_USED_QUANTA(_ring) + (_count)) < \ + (size - 256)) { \ + (_ring).head = ((_ring).head + (_count)) % size; \ + (_retcode) = 0; \ + } else { \ + (_retcode) = -ENOSPC; \ + } \ + } +#define ZXDH_RING_MOVE_HEAD_BY_COUNT_NOCHECK(_ring, _count) \ + (_ring).head = ((_ring).head + (_count)) % (_ring).size + +#define ZXDH_RING_MOVE_TAIL(_ring) \ + (_ring).tail = ((_ring).tail + 1) % (_ring).size + +#define ZXDH_RING_MOVE_HEAD_NOCHECK(_ring) \ + (_ring).head = ((_ring).head + 1) % (_ring).size + +#define ZXDH_RING_MOVE_TAIL_BY_COUNT(_ring, _count) \ + (_ring).tail = ((_ring).tail + (_count)) % (_ring).size + +#define ZXDH_RING_SET_TAIL(_ring, _pos) (_ring).tail = (_pos) % (_ring).size + +#define ZXDH_RING_FULL_ERR(_ring) \ + ((ZXDH_RING_USED_QUANTA(_ring) == ((_ring).size - 1))) + +#define ZXDH_ERR_RING_FULL2(_ring) \ + ((ZXDH_RING_USED_QUANTA(_ring) == ((_ring).size - 2))) + +#define ZXDH_ERR_RING_FULL3(_ring) \ + ((ZXDH_RING_USED_QUANTA(_ring) == ((_ring).size - 3))) + +#define ZXDH_SQ_RING_FULL_ERR(_ring) \ + ((ZXDH_RING_USED_QUANTA(_ring) == ((_ring).size - 257))) + +#define ZXDH_ERR_SQ_RING_FULL2(_ring) \ + ((ZXDH_RING_USED_QUANTA(_ring) == ((_ring).size - 258))) +#define ZXDH_ERR_SQ_RING_FULL3(_ring) \ + ((ZXDH_RING_USED_QUANTA(_ring) == ((_ring).size - 259))) +#define ZXDH_RING_MORE_WORK(_ring) ((ZXDH_RING_USED_QUANTA(_ring) != 0)) + +#define ZXDH_RING_USED_QUANTA(_ring) \ + ((((_ring).head + (_ring).size - (_ring).tail) % (_ring).size)) + +#define ZXDH_RING_FREE_QUANTA(_ring) \ + (((_ring).size - ZXDH_RING_USED_QUANTA(_ring) - 1)) + +#define ZXDH_SQ_RING_FREE_QUANTA(_ring) \ + (((_ring).size - ZXDH_RING_USED_QUANTA(_ring) - 257)) + +#define ZXDH_ATOMIC_RING_MOVE_HEAD(_ring, index, _retcode) \ + { \ + index = ZXDH_RING_CURRENT_HEAD(_ring); \ + ZXDH_RING_MOVE_HEAD(_ring, _retcode); \ + } + +#define ZXDH_GET_QPC_ITEM(type, qp_ctx, index, name) \ + ({ \ + type ___t; \ + u64 temp; \ + get_64bit_val(qp_ctx, index, &temp); \ + ___t = (type)FIELD_GET(name, temp); \ + ___t; \ + }) + +enum zxdh_qp_wqe_size { + ZXDH_WQE_SIZE_16 = 16, + ZXDH_WQE_SIZE_32 = 32, + ZXDH_WQE_SIZE_64 = 64, + ZXDH_WQE_SIZE_96 = 96, + ZXDH_WQE_SIZE_128 = 128, + ZXDH_WQE_SIZE_256 = 256, + ZXDH_WQE_SIZE_512 = 512, +}; + +enum zxdh_cq_wqe_size { + ZXDH_CQE_SIZE_64 = 0, + ZXDH_CQE_SIZE_128 = 1, + ZXDH_CQE_SIZE_RESV = 2, +}; + +enum zxdh_cqc_set_field_mask { + ZXDH_CQC_SET_LPBLE_SIZE = 1 << 5, + ZXDH_CQC_SET_CQ_STATE = 1 << 8, + ZXDH_CQC_SET_CQ_SIZE = 1 << 11, + ZXDH_CQC_SET_CQ_ADDR = 1 << 13, +}; + +enum zxdh_cqc_set_cq_moderation { + ZXDH_CQC_SET_CQ_COUNT_AND_PERIOD = 1 << 1, +}; + +#define ZXDH_CQC_SET_FIELD_ALL 0xffff +#define ZXDH_CQC_SET_FIELD_RESIZE \ + (ZXDH_CQC_SET_LPBLE_SIZE | ZXDH_CQC_SET_CQ_SIZE | ZXDH_CQC_SET_CQ_ADDR) +#define ZXDH_CQC_SET_FIELD_MODIFY ZXDH_CQC_SET_CQ_COUNT_AND_PERIOD + +enum zxdh_ws_node_op { + ZXDH_ADD_NODE = 0, + ZXDH_MODIFY_NODE, + ZXDH_DEL_NODE, +}; + +enum { + ZXDH_Q_ALIGNMENT_M = (128 - 1), + ZXDH_AEQ_ALIGNMENT_M = (256 - 1), + ZXDH_Q2_ALIGNMENT_M = (256 - 1), + ZXDH_CEQ_ALIGNMENT_M = (256 - 1), + ZXDH_CQ0_ALIGNMENT_M = (256 - 1), + ZXDH_HOST_CTX_ALIGNMENT_M = (4 - 1), + ZXDH_SHADOWAREA_M = (128 - 1), + ZXDH_FPM_QUERY_BUF_ALIGNMENT_M = (4 - 1), + ZXDH_FPM_COMMIT_BUF_ALIGNMENT_M = (4 - 1), +}; + +enum zxdh_alignment { + ZXDH_CQP_ALIGNMENT = 0x200, + ZXDH_AEQ_ALIGNMENT = 0x100, + ZXDH_CEQ_ALIGNMENT = 0x100, + ZXDH_CQ0_ALIGNMENT = 0x100, + ZXDH_SD_BUF_ALIGNMENT = 0x80, + ZXDH_FEATURE_BUF_ALIGNMENT = 0x10, + ZXDH_QPC_ALIGNMENT = 0x20, + ZXDH_CQC_ALIGNMENT = 0x20, + ZXDH_CEQC_ALIGNMENT = 0x20, + ZXDH_AEQC_ALIGNMENT = 0x20, + ZXDH_SRQC_ALIGNMENT = 0x20, +}; + +enum icrdma_protocol_used { + ICRDMA_ANY_PROTOCOL = 0, + ICRDMA_IWARP_PROTOCOL_ONLY = 1, + ICRDMA_ROCE_PROTOCOL_ONLY = 2, +}; + +/** + * set_64bit_val - set 64 bit value to hw wqe + * @wqe_words: wqe addr to write + * @byte_index: index in wqe + * @val: value to write + **/ +static inline void set_64bit_val(__le64 *wqe_words, u32 byte_index, u64 val) +{ + wqe_words[byte_index >> 3] = cpu_to_le64(val); +} + +/** + * set_32bit_val - set 32 bit value to hw wqe + * @wqe_words: wqe addr to write + * @byte_index: index in wqe + * @val: value to write + **/ +static inline void set_32bit_val(__le32 *wqe_words, u32 byte_index, u32 val) +{ + wqe_words[byte_index >> 2] = cpu_to_le32(val); +} + +/** + * set_16bit_val - set 16 bit value to hw wqe + * @wqe_words: wqe addr to write + * @byte_index: index in wqe + * @val: value to write + **/ +static inline void set_16bit_val(__le16 *wqe_words, u32 byte_index, u16 val) +{ + wqe_words[byte_index >> 1] = cpu_to_le16(val); +} + +/** + * get_64bit_val - read 64 bit value from wqe + * @wqe_words: wqe addr + * @byte_index: index to read from + * @val: read value + **/ +static inline void get_64bit_val(__le64 *wqe_words, u32 byte_index, u64 *val) +{ + *val = le64_to_cpu(wqe_words[byte_index >> 3]); +} + +/** + * get_32bit_val - read 32 bit value from wqe + * @wqe_words: wqe addr + * @byte_index: index to reaad from + * @val: return 32 bit value + **/ +static inline void get_32bit_val(__le32 *wqe_words, u32 byte_index, u32 *val) +{ + *val = le32_to_cpu(wqe_words[byte_index >> 2]); +} +#endif /* ZXDH_DEFS_H */ diff --git a/drivers/infiniband/hw/zrdma/hmc.c b/drivers/infiniband/hw/zrdma/hmc.c new file mode 100644 index 000000000000..a3befc293ab4 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/hmc.c @@ -0,0 +1,1007 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ +#include "osdep.h" +#include "status.h" +#include "hmc.h" +#include "defs.h" +#include "type.h" +#include "protos.h" +#include "vf.h" +#include "virtchnl.h" +#include "icrdma_hw.h" +#include "main.h" +#include "smmu/kernel/adk_mmu600.h" + +extern enum zxdh_hmc_rsrc_type iw_hmc_obj_types[ZXDH_HMC_IW_TXWINDOW + 1]; + +int zxdh_sc_create_date_cap_obj(struct zxdh_sc_dev *dev) +{ + struct zxdh_hmc_sd_entry *sd_entry = NULL; + u32 sd_lmt = 0; + u32 i = 0, ret = 0; + u32 hmc_entry_total = 0; + u64 fpm_addr = 0, fpm_limit = 0; + struct zxdh_hw *hw = dev->hw; + struct zxdh_dma_mem dma_mem = {}; + u64 alloc_len = 0; + struct smmu_pte_request smmu_pte_cfg = {}; + + memset(&smmu_pte_cfg, 0, sizeof(smmu_pte_cfg)); + + fpm_addr = dev->data_cap_sd.data_cap_base; + fpm_limit = dev->data_cap_sd.data_len; + fpm_limit = ALIGN(fpm_limit, ZXDH_HMC_DIRECT_BP_SIZE); + + sd_lmt = fpm_limit / ZXDH_HMC_DIRECT_BP_SIZE; + sd_lmt += 1; + if (sd_lmt == 1) { + hmc_entry_total++; + } else { + hmc_entry_total = sd_lmt - 1; + } + sd_entry = kzalloc(sizeof(struct zxdh_hmc_sd_entry) * hmc_entry_total, + GFP_KERNEL); + if (!sd_entry) { + pr_err("HMC: failed to allocate memory for sd_entry buffer\n"); + return -ENOMEM; + } + dev->data_cap_sd.entry = sd_entry; + dev->data_cap_sd.sd_cnt = hmc_entry_total; + pr_err("HMC: zxdh_sc_create_date_cap_obj sd_lmt:%d\n", sd_lmt); + for (i = 0; i < sd_lmt - 1; i++) { // 满足2M空间 + alloc_len = ZXDH_HMC_DIRECT_BP_SIZE; //按实际2M分配空间 + dma_mem.size = ALIGN(alloc_len, ZXDH_HMC_PD_BP_BUF_ALIGNMENT); + dma_mem.va = dma_alloc_coherent(hw->device, dma_mem.size, + &dma_mem.pa, GFP_KERNEL); + + if (!dma_mem.va) + return -ENOMEM; + + memset(dma_mem.va, 0, alloc_len); + // ********************调用SMMU接口*********************** + smmu_pte_cfg.phy_addr = dma_mem.pa; + smmu_pte_cfg.vir_addr = fpm_addr; + smmu_pte_cfg.size = alloc_len; + smmu_pte_cfg.stream_id = dev->hmc_fn_id; + // bspSmmuSetPTE(&smmu_pte_cfg,dev); // for Crash + smmu_pte_cfg.access_perm = 0x03; + ret = zxdh_smmu_set_pte(&smmu_pte_cfg, dev); + if (ret) + return ret; + + memcpy(&sd_entry->u.bp.addr, &dma_mem, + sizeof(sd_entry->u.bp.addr)); + sd_entry->valid = true; + fpm_addr = fpm_addr + alloc_len; + sd_entry++; + } + + return 0; +} + +/** + * zxdh_sc_create_hmc_obj - allocate backing store for hmc objects + * @dev: pointer to the device structure + * @info: pointer to zxdh_hmc_create_obj_info struct + * + * This will allocate memory for PDs and backing pages and populate + * the sd and pd entries. + */ +int zxdh_sc_create_hmc_obj(struct zxdh_sc_dev *dev, + struct zxdh_hmc_create_obj_info *info) +{ + struct zxdh_hmc_sd_entry *sd_entry; + u32 sd_lmt = 0; + u32 i = 0, cnt = 0, ret = 0; + u64 fpm_addr = 0, fpm_limit = 0; + struct zxdh_hw *hw = dev->hw; + struct zxdh_dma_mem dma_mem = {}; + u64 alloc_len = 0; + struct smmu_pte_request smmu_pte_cfg = {}; + + memset(&smmu_pte_cfg, 0, sizeof(smmu_pte_cfg)); + + fpm_addr = info->hmc_info->hmc_obj[info->rsrc_type].base; + switch (info->rsrc_type) { + case ZXDH_HMC_IW_QP: + cnt = dev->hmc_pf_manager_info.total_qp_cnt; + break; + case ZXDH_HMC_IW_CQ: + cnt = dev->hmc_pf_manager_info.total_cq_cnt; + break; + case ZXDH_HMC_IW_SRQ: + cnt = dev->hmc_pf_manager_info.total_srq_cnt; + break; + case ZXDH_HMC_IW_AH: + cnt = dev->hmc_pf_manager_info.total_ah_cnt; + break; + case ZXDH_HMC_IW_MR: + cnt = dev->hmc_pf_manager_info.total_mrte_cnt; + break; + default: + cnt = info->hmc_info->hmc_obj[info->rsrc_type].cnt; + break; + } + fpm_limit = info->hmc_info->hmc_obj[info->rsrc_type].size * cnt; + fpm_limit = ALIGN(fpm_limit, ZXDH_HMC_DIRECT_BP_SIZE); + + sd_lmt = fpm_limit / ZXDH_HMC_DIRECT_BP_SIZE; + + for (i = 0; i < sd_lmt; i++) { // 满足2M空间 + sd_entry = &info->hmc_info->sd_table.sd_entry[info->add_sd_cnt]; + + alloc_len = ZXDH_HMC_DIRECT_BP_SIZE; //按实际2M分配空间 + dma_mem.size = ALIGN(alloc_len, ZXDH_HMC_PD_BP_BUF_ALIGNMENT); + dma_mem.va = dma_alloc_coherent(hw->device, dma_mem.size, + &dma_mem.pa, GFP_KERNEL); + + if (!dma_mem.va) + return -ENOMEM; + + memset(dma_mem.va, 0, alloc_len); + // ********************调用SMMU接口*********************** + smmu_pte_cfg.phy_addr = dma_mem.pa; + smmu_pte_cfg.vir_addr = fpm_addr; + smmu_pte_cfg.size = alloc_len; + smmu_pte_cfg.stream_id = dev->hmc_fn_id; + // bspSmmuSetPTE(&smmu_pte_cfg,dev); // for Crash + smmu_pte_cfg.access_perm = 0x03; + ret = zxdh_smmu_set_pte(&smmu_pte_cfg, dev); + if (ret) + return ret; + + memcpy(&sd_entry->u.bp.addr, &dma_mem, + sizeof(sd_entry->u.bp.addr)); + + sd_entry->u.bp.sd_pd_index = info->add_sd_cnt; + info->hmc_info->sd_indexes[info->add_sd_cnt] = + (u16)info->add_sd_cnt; + sd_entry->valid = true; + fpm_addr = fpm_addr + alloc_len; + info->add_sd_cnt++; + } + + return 0; +} + +static int zxdh_pf2vf_add_pble_hmc_obj(struct zxdh_sc_dev *dev, + struct zxdh_vfdev *vf_dev, u32 rsrc_type) +{ + struct zxdh_hmc_sd_entry *sd_entry = NULL; + struct zxdh_dma_mem dma_mem = {}; + u32 pble_hmc_comm_index = 0, pages = 0; + u32 unallocated_pble = 0, ret = 0; + u64 alloc_len = 0, size = 0; + u64 next_fpm_addr = 0, fpm_base_addr = 0; + u32 pd_idx = 0, rel_pd_idx = 0; + struct zxdh_hmc_info *hmc_info = &vf_dev->hmc_info; + + struct smmu_pte_request smmu_pte_cfg = {}; + + if (rsrc_type == ZXDH_HMC_IW_PBLE) { + pble_hmc_comm_index = hmc_info->pble_hmc_index; + unallocated_pble = vf_dev->pbleq_unallocated_pble; + fpm_base_addr = vf_dev->pbleq_fpm_base_addr; + next_fpm_addr = vf_dev->pbleq_next_fpm_addr; + } else if (rsrc_type == ZXDH_HMC_IW_PBLE_MR) { + pble_hmc_comm_index = hmc_info->pble_mr_hmc_index; + unallocated_pble = vf_dev->pblemr_unallocated_pble; + fpm_base_addr = vf_dev->pblemr_fpm_base_addr; + next_fpm_addr = vf_dev->pblemr_next_fpm_addr; + } + + if (unallocated_pble < PBLE_PER_PAGE) + return -ENOMEM; + + sd_entry = &hmc_info->sd_table.sd_entry[pble_hmc_comm_index]; + pd_idx = (u32)((next_fpm_addr - fpm_base_addr) / + ZXDH_HMC_PAGED_BP_SIZE); //4096 + rel_pd_idx = (pd_idx % ZXDH_HMC_PD_CNT_IN_SD); // 512 + pages = (rel_pd_idx) ? (ZXDH_HMC_PD_CNT_IN_SD - rel_pd_idx) : + ZXDH_HMC_PD_CNT_IN_SD; + + pages = min(pages, + unallocated_pble >> PBLE_512_SHIFT); // PBLE_512_SHIFT==9 + + if (!sd_entry->valid) { + alloc_len = pages * ZXDH_HMC_PAGED_BP_SIZE; + dma_mem.size = ALIGN(alloc_len, ZXDH_HMC_PD_BP_BUF_ALIGNMENT); + dma_mem.va = dma_alloc_coherent(dev->hw->device, dma_mem.size, + &dma_mem.pa, GFP_KERNEL); + if (!dma_mem.va) { + pr_info("%s %d failed to alloc mem\n", __func__, __LINE__); + return -ENOMEM; + } + + memcpy(&sd_entry->u.bp.addr, &dma_mem, + sizeof(sd_entry->u.bp.addr)); + + smmu_pte_cfg.phy_addr = dma_mem.pa; + smmu_pte_cfg.vir_addr = next_fpm_addr; + smmu_pte_cfg.size = alloc_len; + smmu_pte_cfg.stream_id = dev->hmc_fn_id; // 这个SID后续需要修改 + smmu_pte_cfg.access_perm = 0x03; + // 这里调用SMMU接口 + ret = zxdh_smmu_set_pte(&smmu_pte_cfg, dev); + if (ret) { + pr_info("%s %d set pte failed ret:%d\n", __func__, __LINE__, ret); + return ret; + } + + sd_entry->u.bp.sd_pd_index = pble_hmc_comm_index; + hmc_info->sd_table.use_cnt = pble_hmc_comm_index; + hmc_info->sd_table.sd_entry->entry_type = ZXDH_SD_TYPE_DIRECT; + } + + sd_entry->valid = true; + size = pages << HMC_PAGED_BP_SHIFT; + if (rsrc_type == ZXDH_HMC_IW_PBLE) { + vf_dev->pbleq_next_fpm_addr += size; + vf_dev->pbleq_unallocated_pble -= (u32)(size >> 3); + } else { + vf_dev->pblemr_next_fpm_addr += size; + vf_dev->pblemr_unallocated_pble -= (u32)(size >> 3); + } + + return 0; +} + +int zxdh_vf_add_pble_hmc_obj(struct zxdh_sc_dev *dev, + struct zxdh_hmc_info *hmc_info, + struct zxdh_hmc_pble_rsrc *pble_rsrc, u32 pages) +{ + struct zxdh_hmc_sd_entry *sd_entry; + struct zxdh_dma_mem dma_mem = {}; + struct zxdh_pci_f *rf; + u64 alloc_len; + u32 pble_hmc_comm_index = 0, cnt = 0, val = 0; + struct zxdh_hw *hw = pble_rsrc->dev->hw; + int status = 0; + + rf = container_of(dev, struct zxdh_pci_f, sc_dev); + + if (pble_rsrc->pble_type == PBLE_QUEUE) + pble_hmc_comm_index = hmc_info->pble_hmc_index; + else + pble_hmc_comm_index = hmc_info->pble_mr_hmc_index; + + sd_entry = &hmc_info->sd_table.sd_entry[pble_hmc_comm_index]; + + if (!sd_entry->valid) { + if (!dev->hmc_use_dpu_ddr) { + writel(0, (u32 __iomem *)(dev->hw->hw_addr + + C_RDMA_CQP_CQ_DISTRIBUTE_DONE)); + + if (pble_rsrc->pble_type == PBLE_QUEUE) { + zxdh_sc_send_mailbox_cmd( + dev, ZTE_ZXDH_OP_ADD_QPBLE_HMC_RANGE, 0, + 0, 0, rf->vf_id); + } else { + zxdh_sc_send_mailbox_cmd( + dev, ZTE_ZXDH_OP_ADD_MRPBLE_HMC_RANGE, + 0, 0, 0, rf->vf_id); + } + + do { + val = readl(dev->hw->hw_addr + + C_RDMA_CQP_CQ_DISTRIBUTE_DONE); + if (cnt++ > ZXDH_MAILBOX_CYC_NUM * dev->hw_attrs.max_done_count) { + status = -ETIMEDOUT; + pr_info("vhca_id:%d waiting completed PBLE mailbox too long time,timeout!\n", dev->vhca_id); + break; + } + if (dev->hw_attrs.self_health == true) { + status = -ETIMEDOUT; + break; + } + udelay(ZXDH_MAILBOX_SLEEP_TIME); + } while (!val); + } + + alloc_len = (u64)pages * ZXDH_HMC_PAGED_BP_SIZE; + dma_mem.size = ALIGN(alloc_len, ZXDH_HMC_PD_BP_BUF_ALIGNMENT); + dma_mem.va = dma_alloc_coherent(hw->device, dma_mem.size, + &dma_mem.pa, GFP_KERNEL); + if (!dma_mem.va) + return -ENOMEM; + + memcpy(&sd_entry->u.bp.addr, &dma_mem, + sizeof(sd_entry->u.bp.addr)); + + sd_entry->u.bp.sd_pd_index = pble_hmc_comm_index; + if (pble_rsrc->pble_type == PBLE_QUEUE) + hmc_info->pble_hmc_index++; + else + hmc_info->pble_mr_hmc_index++; + + hmc_info->sd_table.use_cnt++; + hmc_info->sd_table.sd_entry->entry_type = ZXDH_SD_TYPE_DIRECT; + } + return status; +} + +int zxdh_add_pble_hmc_obj(struct zxdh_hmc_info *hmc_info, + struct zxdh_hmc_pble_rsrc *pble_rsrc, u32 pages) +{ + struct zxdh_hmc_sd_entry *sd_entry; + struct zxdh_dma_mem dma_mem = {}; + + struct smmu_pte_request smmu_pte_cfg = {}; + + u64 alloc_len; + u32 pble_hmc_comm_index; + u32 ret = 0; + struct zxdh_hw *hw = pble_rsrc->dev->hw; + + memset(&smmu_pte_cfg, 0, sizeof(smmu_pte_cfg)); + + if (pble_rsrc->pble_type == PBLE_QUEUE) + pble_hmc_comm_index = hmc_info->pble_hmc_index; + else + pble_hmc_comm_index = hmc_info->pble_mr_hmc_index; + + sd_entry = &hmc_info->sd_table.sd_entry[pble_hmc_comm_index]; + + if (!sd_entry->valid) { + alloc_len = (u64)pages * ZXDH_HMC_PAGED_BP_SIZE; + dma_mem.size = ALIGN(alloc_len, ZXDH_HMC_PD_BP_BUF_ALIGNMENT); + dma_mem.va = dma_alloc_coherent(hw->device, dma_mem.size, + &dma_mem.pa, GFP_KERNEL); + if (!dma_mem.va) + return -ENOMEM; + + memset(dma_mem.va, 0, dma_mem.size); + + memcpy(&sd_entry->u.bp.addr, &dma_mem, + sizeof(sd_entry->u.bp.addr)); + + if (false == pble_rsrc->dev->hmc_use_dpu_ddr) { // is HOST DDR + memset(&dma_mem, 0, sizeof(struct zxdh_dma_mem)); + dma_mem.size = + ALIGN(alloc_len, ZXDH_HMC_PD_BP_BUF_ALIGNMENT); + dma_mem.va = + dma_alloc_coherent(hw->device, dma_mem.size, + &dma_mem.pa, GFP_KERNEL); + if (!dma_mem.va) + return -ENOMEM; + memset(dma_mem.va, 0, dma_mem.size); + + memcpy(&sd_entry->u.bp.addr_hardware, &dma_mem, + sizeof(sd_entry->u.bp.addr_hardware)); + + smmu_pte_cfg.phy_addr = dma_mem.pa; + smmu_pte_cfg.vir_addr = pble_rsrc->next_fpm_addr; + smmu_pte_cfg.size = alloc_len; + smmu_pte_cfg.stream_id = + pble_rsrc->dev->hmc_fn_id; // 这个SID后续需要修改 + smmu_pte_cfg.access_perm = 0x03; + // 这里调用SMMU接口 + ret = zxdh_smmu_set_pte(&smmu_pte_cfg, pble_rsrc->dev); + if (ret) + return ret; + } + + sd_entry->u.bp.sd_pd_index = pble_hmc_comm_index; + if (pble_rsrc->pble_type == PBLE_QUEUE) + hmc_info->pble_hmc_index++; + else + hmc_info->pble_mr_hmc_index++; + + hmc_info->sd_table.use_cnt++; + hmc_info->sd_table.sd_entry->entry_type = ZXDH_SD_TYPE_DIRECT; + } + + return 0; +} + +/** + * zxdh_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry + * @hmc_info: pointer to the HMC configuration information structure + * @idx: the page index + */ +int zxdh_prep_remove_sd_bp(struct zxdh_hmc_info *hmc_info, u32 idx) +{ + struct zxdh_hmc_sd_entry *sd_entry; + + sd_entry = &hmc_info->sd_table.sd_entry[idx]; + + hmc_info->sd_table.use_cnt--; + sd_entry->valid = false; + + return 0; +} + +/** + * zxdh_get_next_vf_idx - return the next vf_idx available + * @dev: pointer to RDMA dev structure + */ +static u16 zxdh_get_next_vf_idx(struct zxdh_sc_dev *dev) +{ + u16 vf_idx; + + for (vf_idx = 0; vf_idx < dev->num_vfs; vf_idx++) { + if (!dev->vf_dev[vf_idx]) + break; + } + + return vf_idx < dev->num_vfs ? vf_idx : ZXDH_VCHNL_INVALID_VF_IDX; +} + +static int zxdh_get_vf_hmc_baseinfo(struct zxdh_sc_dev *dev, + struct zxdh_hmc_obj_info *hmc_obj, + u16 iw_vf_idx, u16 vf_id) +{ + u16 i = 0; + + for (i = 0; i < ZXDH_HMC_IW_MAX; i++) { + if ((i == ZXDH_HMC_IW_IRD) || (i == ZXDH_HMC_IW_TXWINDOW)) { + hmc_obj[i].max_cnt = dev->hmc_pf_manager_info.vf_qp_cnt; + hmc_obj[i].cnt = dev->hmc_pf_manager_info.vf_qp_cnt; + hmc_obj[i].size = dev->hmc_info->hmc_obj[i].size; + hmc_obj[i].type = dev->hmc_info->hmc_obj[i].type; + hmc_obj[i].base = dev->hmc_info->hmc_obj[i].base + + (dev->hmc_info->hmc_obj[i].cnt + + hmc_obj[i].cnt * vf_id) * + hmc_obj[i].size; + } else if (i == ZXDH_HMC_IW_PBLE_MR) { + hmc_obj[i].max_cnt = + dev->hmc_pf_manager_info.vf_pblemr_cnt; + hmc_obj[i].cnt = dev->hmc_pf_manager_info.vf_pblemr_cnt; + hmc_obj[i].size = dev->hmc_info->hmc_obj[i].size; + hmc_obj[i].type = dev->hmc_info->hmc_obj[i].type; + hmc_obj[i].base = dev->hmc_info->hmc_obj[i].base + + (dev->hmc_info->hmc_obj[i].cnt + + hmc_obj[i].cnt * vf_id) * + hmc_obj[i].size; + } else if (i == ZXDH_HMC_IW_PBLE) { + hmc_obj[i].max_cnt = + dev->hmc_pf_manager_info.vf_pblequeue_cnt; + hmc_obj[i].cnt = + dev->hmc_pf_manager_info.vf_pblequeue_cnt; + hmc_obj[i].size = dev->hmc_info->hmc_obj[i].size; + hmc_obj[i].type = dev->hmc_info->hmc_obj[i].type; + hmc_obj[i].base = dev->hmc_info->hmc_obj[i].base + + (dev->hmc_info->hmc_obj[i].cnt + + hmc_obj[i].cnt * vf_id) * + hmc_obj[i].size; + } + } + + return 0; +} + +struct zxdh_vfdev *zxdh_pf_get_vf_hmc_res(struct zxdh_sc_dev *dev, u16 vf_id) +{ + struct zxdh_virt_mem virt_mem; + struct zxdh_vfdev *vf_dev; + u16 iw_vf_idx = 0; + unsigned long flags; + + spin_lock_irqsave(&dev->vf_dev_lock, flags); + iw_vf_idx = zxdh_get_next_vf_idx(dev); + if (iw_vf_idx == ZXDH_VCHNL_INVALID_VF_IDX || + iw_vf_idx >= ZXDH_MAX_PE_ENA_VF_COUNT) { + spin_unlock_irqrestore(&dev->vf_dev_lock, flags); + return NULL; + } + + virt_mem.size = sizeof(struct zxdh_vfdev) + + sizeof(struct zxdh_hmc_obj_info) * ZXDH_HMC_IW_MAX; + virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL); + + if (!virt_mem.va) { + pr_err("VIRT: VF%u Unable to allocate a VF device structure.\n", + vf_id); + spin_unlock_irqrestore(&dev->vf_dev_lock, flags); + return NULL; + } + + vf_dev = virt_mem.va; + vf_dev->pf_dev = dev; + vf_dev->vf_id = vf_id; + vf_dev->iw_vf_idx = iw_vf_idx; + vf_dev->pf_hmc_initialized = false; + vf_dev->hmc_info.hmc_obj = (struct zxdh_hmc_obj_info *)(&vf_dev[1]); + zxdh_get_vf_hmc_baseinfo(dev, vf_dev->hmc_info.hmc_obj, iw_vf_idx, + vf_id); + + refcount_set(&vf_dev->refcnt, 1); + dev->vf_dev[iw_vf_idx] = vf_dev; + spin_unlock_irqrestore(&dev->vf_dev_lock, flags); + return vf_dev; +} + +/** + * zxdh_mailbox_worker - process mailbox message + * @work: work task structure + */ +static void zxdh_mailbox_worker(struct work_struct *work) +{ + u32 ret = 0; + int resp_code = 0; + u32 i = 0; + u16 srcvhcaid = 0, mb_vfid = 0, iw_vf_idx = 0, vf_vhca_id = 0; + u8 opt = 0xff; // 避免与mailbox消息类型宏定义同值. + struct zxdh_vfdev *vf_dev = NULL; + struct zxdh_hmc_create_obj_info obj_info = {}; + DPP_PF_INFO_T pf_info = { 0 }; + void *vf_dev_mac_addr = NULL; + void *vf_dev_addr = NULL; + u64 op_ret_val; + __le64 addrbuf[ZXDH_MAILBOX_ADDR_BUF_LEN]; + struct zxdh_sc_dev *dev; + struct zxdh_pci_f *rf; + struct iidc_core_dev_info *cdev_info; + struct mailbox_work *dwork = container_of(work, struct mailbox_work, work); + + dev = dwork->dev; + rf = container_of(dev, struct zxdh_pci_f, sc_dev); + cdev_info = rf->cdev; + for (i = 0; i < ZXDH_MAILBOX_ADDR_BUF_LEN; i++) { + addrbuf[i] = dwork->addrbuf[i]; + } + op_ret_val = dwork->op_ret_val; + opt = (u8)dwork->addrbuf[0]; //避免opt默认为0情况,ZTE_ZXDH_VCHNL_OP_GET_HMC_FCN修改为1 + mb_vfid = FIELD_GET(ZXDH_SRC_PFVF_ID, op_ret_val); + srcvhcaid = FIELD_GET(ZXDH_SRC_VHCA_ID, op_ret_val); + vf_dev = zxdh_find_vf_dev(dev, mb_vfid); + kfree(dwork); + + switch (opt) { + case ZTE_ZXDH_VCHNL_OP_GET_HMC_FCN: // fix is 1, + if (!vf_dev) { + vf_dev = zxdh_pf_get_vf_hmc_res(dev, mb_vfid); + if (!vf_dev) { + resp_code = -ENODEV; + pr_err("%s vhca_id:%d get vf hmc res failed!\n", __func__, dev->vhca_id); + break; + } + refcount_inc(&vf_dev->refcnt); + } + vf_dev->vhca_id = srcvhcaid; + obj_info.hmc_info = &vf_dev->hmc_info; + obj_info.add_sd_cnt = 0; + zxdh_vfhmc_enter(dev, obj_info.hmc_info); + + vf_dev->hmc_info.pble_hmc_index = + vf_dev->hmc_info.hmc_first_entry_pble; + vf_dev->pbleq_unallocated_pble = + obj_info.hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE].cnt; + vf_dev->pbleq_fpm_base_addr = + obj_info.hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE].base; + vf_dev->pbleq_next_fpm_addr = vf_dev->pbleq_fpm_base_addr; + + vf_dev->hmc_info.pble_mr_hmc_index = + vf_dev->hmc_info.hmc_first_entry_pble_mr; + vf_dev->pblemr_unallocated_pble = + obj_info.hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE_MR].cnt; + vf_dev->pblemr_fpm_base_addr = + obj_info.hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE_MR].base; + vf_dev->pblemr_next_fpm_addr = vf_dev->pblemr_fpm_base_addr; + + for (i = ZXDH_HMC_IW_IRD; i < ZXDH_HMC_IW_TXWINDOW + 1; i++) { + zxdh_create_vf_hmc_objs(dev, &vf_dev->hmc_info, i, + &obj_info); + } + + break; + case ZTE_ZXDH_OP_REQ_NP_CONFIG: + pf_info.vport = addrbuf[2]; + + pf_info.slot = cdev_info->slot_id; + dpp_vport_vhca_id_add(&pf_info, srcvhcaid); + ret = dpp_vport_attr_set(&pf_info, EGR_FLAG_VHCA, srcvhcaid); + if (ret != 0) { + pr_err("%s[%d]: dpp vport attr set EGR_FLAG_VHCA fail! ret=%u!\n", __func__, __LINE__, ret); + return; + } + ret = dpp_vport_attr_set(&pf_info, EGR_FLAG_RDMA_OFFLOAD_EN_OFF, + EGR_RDMA_OFFLOAD_EN); + if (ret != 0) { + pr_err("%s[%d]: dpp vport attr set EGR_FLAG_RDMA_OFFLOAD_EN_OFF fail! ret=%u!\n", __func__, __LINE__, ret); + return; + } + + break; + case ZTE_ZXDH_OP_DEL_HMC_OBJ_RANGE: + if (!vf_dev) { + resp_code = -ENODEV; + pr_info("VF[%d] remove failed by mailbox!\n", mb_vfid); + break; + } + iw_vf_idx = vf_dev->iw_vf_idx; + vf_vhca_id = vf_dev->vhca_id; + zxdh_del_hmc_objects(dev, + &rf->sc_dev.vf_dev[iw_vf_idx]->hmc_info); + zxdh_put_vfdev(dev, rf->sc_dev.vf_dev[iw_vf_idx]); + zxdh_remove_vf_dev(dev, rf->sc_dev.vf_dev[iw_vf_idx]); + vf_dev = NULL; + break; + case ZTE_ZXDH_OP_REQ_NP_MAC_DEL: + pf_info.vport = addrbuf[2]; + vf_dev_mac_addr = (void *)addrbuf[3]; + pf_info.slot = cdev_info->slot_id; + pr_info("%s[%d]: dpp del rdma trans item\n", __func__, __LINE__); + dpp_del_rdma_trans_item(&pf_info, vf_dev_mac_addr); + + break; + case ZTE_ZXDH_OP_REQ_NP_MAC_ADD: + pf_info.vport = addrbuf[2]; + vf_dev_addr = (void *)addrbuf[3]; + pf_info.slot = cdev_info->slot_id; + pr_info("%s[%d]: dpp add rdma trans item\n", __func__, __LINE__); + dpp_add_rdma_trans_item(&pf_info, vf_dev_addr, srcvhcaid); + + break; + case ZTE_ZXDH_OP_ADD_QPBLE_HMC_RANGE: + case ZTE_ZXDH_OP_ADD_MRPBLE_HMC_RANGE: + if (!vf_dev) { + resp_code = -ENODEV; + pr_err("%s vhca_id:%d get vf_dev failed!\n", __func__, dev->vhca_id); + break; + } + if (!dev->hmc_use_dpu_ddr) { + if (opt == ZTE_ZXDH_OP_ADD_QPBLE_HMC_RANGE) + i = ZXDH_HMC_IW_PBLE; /* code */ + else + i = ZXDH_HMC_IW_PBLE_MR; + + if (vf_dev->hmc_info.hmc_obj[i].cnt) { + resp_code = zxdh_pf2vf_add_pble_hmc_obj( + dev, vf_dev, i); + if (i == ZXDH_HMC_IW_PBLE) + vf_dev->hmc_info.pble_hmc_index++; + else + vf_dev->hmc_info.pble_mr_hmc_index++; + } + } + break; + case ZTE_ZXDH_OP_SET_SMMU_INVALID: + if (!dev->hmc_use_dpu_ddr) + zxdh_smmu_invalidate_tlb(dev); + vf_vhca_id = srcvhcaid; + break; + default: + break; + } + + if (!vf_dev) { + if ((opt != ZTE_ZXDH_OP_DEL_HMC_OBJ_RANGE) && (opt != ZTE_ZXDH_OP_SET_SMMU_INVALID)) { + resp_code = -ENODEV; + pr_err("%s vhca_id:%d vf_dev is NULL!\n", __func__, dev->vhca_id); + } else + resp_code = zxdh_rdma_reg_write(rf, C_RDMA_VF_HMC_CQP_CQ_DISTRIBUTE_DONE(vf_vhca_id), 1); + return; + } + if (opt != ZTE_ZXDH_OP_SET_SMMU_INVALID) + vf_vhca_id = vf_dev->vhca_id; + zxdh_put_vfdev(dev, vf_dev); + resp_code = zxdh_rdma_reg_write(rf, C_RDMA_VF_HMC_CQP_CQ_DISTRIBUTE_DONE(vf_vhca_id), 1); + if (resp_code) { + pr_err("%s failed msg, resp_code:%d\n", __func__, resp_code); + } +} + +int zxdh_recv_mb(struct zxdh_sc_dev *dev, struct zxdh_ccq_cqe_info *info) +{ + struct mailbox_work *work; + struct zxdh_pci_f *rf; + struct zxdh_device *iwdev; + int i = 0; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + pr_err("%s kzalloc work failed!\n", __func__); + return -ENODEV; + } + work->dev = dev; + work->op_ret_val = info->op_ret_val; + for (i = 0; i < ZXDH_MAILBOX_ADDR_BUF_LEN; i++) { + work->addrbuf[i] = info->addrbuf[i]; + } + rf = container_of(dev, struct zxdh_pci_f, sc_dev); + iwdev = rf->iwdev; + INIT_WORK(&work->work, zxdh_mailbox_worker); + queue_work(iwdev->cleanup_wq, &work->work); + return 0; +} + +int zxdh_create_vf_hmc_objs(struct zxdh_sc_dev *dev, + struct zxdh_hmc_info *hmc_info, u8 type, + struct zxdh_hmc_create_obj_info *obj_info) +{ + int status = 0; + + if (hmc_info->hmc_obj[type].cnt) { + obj_info->rsrc_type = type; + obj_info->count = hmc_info->hmc_obj[obj_info->rsrc_type].cnt; + status = zxdh_sc_create_hmc_obj(dev, obj_info); + if (status) { + zxdh_del_hmc_objects(dev, hmc_info); + pr_err("ERR: create obj type %d status = %d\n", + iw_hmc_obj_types[obj_info->rsrc_type], status); + } + } + + return status; +} + +int zxdh_vfhmc_enter(struct zxdh_sc_dev *dev, struct zxdh_hmc_info *vf_hmc_info) +{ + u32 sd_lmt = 0, hmc_entry_total = 0, j = 0, k = 0, mem_size = 0, + cnt = 0; + u64 fpm_limit = 0; + struct zxdh_hmc_info *hmc_info = NULL; + struct zxdh_virt_mem virt_mem = {}; + struct zxdh_hmc_obj_info *obj_info = NULL; + + hmc_info = vf_hmc_info; + obj_info = hmc_info->hmc_obj; + + for (k = ZXDH_HMC_IW_IRD; k < ZXDH_HMC_IW_MAX; k++) { + cnt = obj_info[k].cnt; + + fpm_limit = obj_info[k].size * cnt; + + if (fpm_limit == 0) + continue; + + if (k == ZXDH_HMC_IW_PBLE) + hmc_info->hmc_first_entry_pble = hmc_entry_total; + + if (k == ZXDH_HMC_IW_PBLE_MR) + hmc_info->hmc_first_entry_pble_mr = hmc_entry_total; + + if ((fpm_limit % ZXDH_HMC_DIRECT_BP_SIZE) == 0) { + sd_lmt = fpm_limit / ZXDH_HMC_DIRECT_BP_SIZE; + sd_lmt += 1; + } else { + sd_lmt = (u32)((fpm_limit - 1) / + ZXDH_HMC_DIRECT_BP_SIZE); + sd_lmt += 1; + } + + if (sd_lmt == 1) + hmc_entry_total++; + else { + for (j = 0; j < sd_lmt - 1; j++) + hmc_entry_total++; + + if (fpm_limit % ZXDH_HMC_DIRECT_BP_SIZE) + hmc_entry_total++; + } + } + + mem_size = sizeof(struct zxdh_hmc_sd_entry) * hmc_entry_total; + virt_mem.size = mem_size; + virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL); + if (!virt_mem.va) { + pr_err("HMC: failed to allocate memory for sd_entry buffer\n"); + return -ENOMEM; + } + hmc_info->sd_table.sd_entry = virt_mem.va; + hmc_info->hmc_entry_total = hmc_entry_total; + return 0; +} + +int zxdh_sc_write_hmc_register(struct zxdh_sc_dev *dev, + struct zxdh_hmc_obj_info *obj_info, + u32 rsrc_type, u16 vhca_id) +{ + u32 base_low = 0, base_high = 0, val = 0; + u64 base = 0; + struct zxdh_sc_cqp *cqp = dev->cqp; + + if (dev->cache_id > 3) { + pr_info("cache id is error!!!\n"); + return -EACCES; + } + + base = obj_info[rsrc_type].base; + + base = base / 512; + base_low = (u32)(base & 0x00000000ffffffff); + base_high = (u32)((base & 0xffffffff00000000) >> 32); + + switch (rsrc_type) { + case ZXDH_HMC_IW_PBLE_MR: + val = zxdh_hmc_register_config_comval(dev, rsrc_type); + writel(val, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEMR_TX1)); + writel(val, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEMR_RX1)); + writel(val, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEMR_RX2)); + if (dev->hmc_use_dpu_ddr == true) { + writel(ZXDH_INDICATE_DPU_DDR, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEMR_RDMAIO_INDICATE)); + } else { + writel(ZXDH_INDICATE_HOST_SMMU, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEMR_RDMAIO_INDICATE)); + } + writel(base_low, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEMR_RDMAIO_BASE_LOW)); + writel(base_high, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEMR_RDMAIO_BASE_HIGH)); + break; + case ZXDH_HMC_IW_PBLE: + val = zxdh_hmc_register_config_comval(dev, rsrc_type); + writel(val, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEQUEUE_TX1)); + writel(val, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEQUEUE_TX2)); + writel(val, (u32 __iomem *)(cqp->dev->hw->hw_addr + + RDMATX_DB_PBLE_ID_CFG)); + + writel(val, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEQUEUE_RX1)); + writel(val, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEQUEUE_RX2)); + writel(val, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEQUEUE_RX3)); + writel(val, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEQUEUE_RX4)); + writel(val, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEQUEUE_RX5)); + if (dev->hmc_use_dpu_ddr == true) { + writel(ZXDH_INDICATE_DPU_DDR, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEQUEUE_RDMAIO_INDICATE)); + } else { + writel(ZXDH_INDICATE_HOST_SMMU, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEQUEUE_RDMAIO_INDICATE)); + } + writel(base_low, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEQUEUE_RDMAIO_BASE_LOW)); + writel(base_high, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_PBLEQUEUE_RDMAIO_BASE_HIGH)); + break; + case ZXDH_HMC_IW_MR: + val = zxdh_hmc_register_config_comval(dev, rsrc_type); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_MRTE_TX1)); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_MRTE_TX3)); + + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_MRTE_RX1)); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_MRTE_RX2)); + + val = zxdh_hmc_register_config_cqpval( + dev, obj_info[rsrc_type].max_cnt, rsrc_type); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_MRTE_CQP)); + + if (dev->hmc_use_dpu_ddr == true) { + writel(ZXDH_INDICATE_DPU_DDR, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_MRTE_RDMAIO_INDICATE)); + } else { + writel(ZXDH_INDICATE_HOST_SMMU, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_MRTE_RDMAIO_INDICATE)); + } + writel(base_low, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_MRTE_RDMAIO_BASE_LOW)); + writel(base_high, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_MRTE_RDMAIO_BASE_HIGH)); + break; + case ZXDH_HMC_IW_AH: + + val = zxdh_hmc_register_config_comval(dev, rsrc_type); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_AH_TX)); + + val = zxdh_hmc_register_config_cqpval( + dev, obj_info[rsrc_type].max_cnt, rsrc_type); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_AH_CQP)); + + writel(base_low, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_AH_RDMAIO_BASE_LOW)); + writel(base_high, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_AH_RDMAIO_BASE_HIGH)); + if (dev->hmc_use_dpu_ddr == true) { + writel(ZXDH_INDICATE_DPU_DDR, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_AH_RDMAIO_INDICATE)); + } else { + writel(ZXDH_INDICATE_HOST_SMMU, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_AH_RDMAIO_INDICATE)); + } + break; + case ZXDH_HMC_IW_IRD: + + val = zxdh_hmc_register_config_comval(dev, rsrc_type); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_IRD_RX1)); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_IRD_RX2)); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_IRD_RX3)); + + writel(base_low, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_IRD_RDMAIO_BASE_LOW)); + writel(base_high, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_IRD_RDMAIO_BASE_HIGH)); + if (dev->hmc_use_dpu_ddr == true) { + writel(ZXDH_INDICATE_DPU_DDR, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_IRD_RDMAIO_INDICATE)); + } else { + writel(ZXDH_INDICATE_HOST_SMMU, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_IRD_RDMAIO_INDICATE)); + } + break; + case ZXDH_HMC_IW_TXWINDOW: + + val = zxdh_hmc_register_config_comval(dev, rsrc_type); + writel(val, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_TX_WINDOW_TX)); + if (dev->hmc_use_dpu_ddr == true) { + writel(ZXDH_INDICATE_DPU_DDR, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_TX_WINDOW_RDMAIO_INDICATE)); + } else { + writel(ZXDH_INDICATE_HOST_SMMU, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_TX_WINDOW_RDMAIO_INDICATE)); + } + writel(base_low, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_TX_WINDOW_RDMAIO_BASE_LOW)); + writel(base_high, + (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_TX_WINDOW_RDMAIO_BASE_HIGH)); + break; + case ZXDH_HMC_IW_QP: + val = zxdh_hmc_register_config_comval(dev, rsrc_type); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_QPC_RX)); + + writel(base_low, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_QPC_RX_BASE_LOW)); + writel(base_high, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_QPC_RX_BASE_HIGH)); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_QPC_TX)); + + writel(base_low, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_QPC_TX_BASE_LOW)); + writel(base_high, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_QPC_TX_BASE_HIGH)); + break; + case ZXDH_HMC_IW_SRQ: + val = zxdh_hmc_register_config_comval(dev, rsrc_type); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_SRQC_RX)); + writel(base_low, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_SRQC_RX_BASE_LOW)); + writel(base_high, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_SRQC_RX_BASE_HIGH)); + break; + case ZXDH_HMC_IW_CQ: + val = zxdh_hmc_register_config_comval(dev, rsrc_type); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_CQC_RX1)); + writel(val, + (u32 __iomem *)(cqp->dev->hw->hw_addr + C_HMC_CQC_RX2)); + writel(base_low, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_CQC_RX_BASE_LOW)); + writel(base_high, (u32 __iomem *)(cqp->dev->hw->hw_addr + + C_HMC_CQC_RX_BASE_HIGH)); + break; + default: + break; + } + return 0; +} diff --git a/drivers/infiniband/hw/zrdma/hmc.h b/drivers/infiniband/hw/zrdma/hmc.h new file mode 100644 index 000000000000..d137b4a092e4 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/hmc.h @@ -0,0 +1,296 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ +#ifndef ZXDH_HMC_H +#define ZXDH_HMC_H + +#include "defs.h" +#include "pble.h" + +/* Forward declarations for SMMU */ +struct smmu_pte_request; +struct zxdh_sc_dev; + +#define ZXDH_HMC_MAX_BP_COUNT 512 +#define ZXDH_MAX_SD_ENTRIES 11 +#define ZXDH_HW_DBG_HMC_INVALID_BP_MARK 0xca +#define ZXDH_HMC_INFO_SIGNATURE 0x484d5347 +#define ZXDH_HMC_PD_CNT_IN_SD 512 +#define ZXDH_HMC_DIRECT_BP_SIZE 0x200000 +#define ZXDH_HMC_MAX_SD_COUNT 8192 +#define ZXDH_HMC_PAGED_BP_SIZE 4096 +#define ZXDH_HMC_PD_BP_BUF_ALIGNMENT 4096 +#define ZXDH_FIRST_VF_FPM_ID 8 +#define FPM_MULTIPLIER 1024 + +#define ZXDH_MIN_GLOBAL_CQPN 2 +#define ZXDH_MAX_GLOBAL_CQPN 1025 +#define ZXDH_MIN_GLOBAL_QPN (1 + ZXDH_MAX_GLOBAL_CQPN) +#define ZXDH_MIN_GLOBAL_CQN 1 +#define ZXDH_MIN_GLOBAL_SRQN 1 +#define ZXDH_MIN_GLOBAL_CEQN 1 + +//修改为结构体 +// #define ZXDH_HMC_CNT_DEBUG 64 +// #define ZXDH_HMC_1K 1024 +// #define ZXDH_HMC_1M (1024*ZXDH_HMC_1K/ZXDH_HMC_CNT_DEBUG) +// #define ZXDH_HMC_HOST_QPC_MAX_QUANTITY (ZXDH_HMC_1M/ZXDH_HMC_CNT_DEBUG) +// #define ZXDH_HMC_HOST_CQC_MAX_QUANTITY 2*ZXDH_HMC_1M/ZXDH_HMC_CNT_DEBUG +// #define ZXDH_HMC_HOST_SRQC_MAX_QUANTITY 512*ZXDH_HMC_1K/ZXDH_HMC_CNT_DEBUG +// #define ZXDH_HMC_HOST_MRTE_MAX_QUANTITY 16*ZXDH_HMC_1M/ZXDH_HMC_CNT_DEBUG +// #define ZXDH_HMC_HOST_AH_MAX_QUANTITY 512*ZXDH_HMC_1K/ZXDH_HMC_CNT_DEBUG +// #define ZXDH_HMC_HOST_MGCPAYLOAD_MAX_QUANTITY 2 //There is no multicast scenario. The number can be set to a small value. +// #define ZXDH_HMC_HOST_MGC_MAX_QUANTITY 8*ZXDH_HMC_1K/ZXDH_HMC_CNT_DEBUG +// #define ZXDH_HMC_HOST_PBLEMR_MAX_QUANTITY 512*ZXDH_HMC_1M/ZXDH_HMC_CNT_DEBUG +// #define ZXDH_HMC_HOST_PBLEOTHER_MAX_QUANTITY 512*ZXDH_HMC_1M/ZXDH_HMC_CNT_DEBUG +// #define ZXDH_HMC_HOST_CEQC_MAX_QUANTITY 4 + +// smmu使用参数 +#define ZXDH_HMC_CNT_DEBUG 64 +#define ZXDH_HMC_1K 1024 +#define ZXDH_HMC_1M (1024 * ZXDH_HMC_1K / ZXDH_HMC_CNT_DEBUG) +#define ZXDH_HMC_HOST_QPC_MAX_QUANTITY (1024 * 8) +#define ZXDH_HMC_HOST_CQC_MAX_QUANTITY (1024 * 8) +#define ZXDH_HMC_HOST_SRQC_MAX_QUANTITY (1024 * 8) +#define ZXDH_HMC_HOST_MRTE_MAX_QUANTITY (1024 * 32) +#define ZXDH_HMC_HOST_AH_MAX_QUANTITY (1024 * 32) +// #define ZXDH_HMC_HOST_MGCPAYLOAD_MAX_QUANTITY 600 +// #define ZXDH_HMC_HOST_MGC_MAX_QUANTITY (8192) +#define ZXDH_HMC_HOST_PBLEMR_MAX_QUANTITY (1024 * 1024 * 4) +#define ZXDH_HMC_HOST_PBLEOTHER_MAX_QUANTITY (1024 * 1024 * 4) +#define ZXDH_HMC_HOST_CEQC_MAX_QUANTITY 4 + +enum zxdh_hmc_rsrc_type { + ZXDH_HMC_IW_QP = 0, + ZXDH_HMC_IW_CQ = 1, + ZXDH_HMC_IW_SRQ = 2, + ZXDH_HMC_IW_AH = 3, + ZXDH_HMC_IW_MR = 4, + ZXDH_HMC_IW_IRD = 5, + ZXDH_HMC_IW_TXWINDOW = 6, + ZXDH_HMC_IW_PBLE = 7, + ZXDH_HMC_IW_PBLE_MR = 8, + ZXDH_HMC_IW_MAX, /* Must be last entry */ +}; + +enum zxdh_indicate_id { + ZXDH_INDICATE_L2D = 0, + ZXDH_INDICATE_DPU_DDR = ZXDH_INDICATE_L2D, // 外挂 + ZXDH_INDICATE_REGISTER = ZXDH_INDICATE_L2D, + ZXDH_INDICATE_RESERVED = 1, + ZXDH_INDICATE_HOST_NOSMMU = 2, + ZXDH_INDICATE_HOST_SMMU = 3, +}; + +enum zxdh_axid_type { + ZXDH_AXID_L2D = 0, + ZXDH_AXID_DPUDDR = 1, + ZXDH_AXID_HOST_EP0 = 2, + ZXDH_AXID_HOST_EP1 = 3, + ZXDH_AXID_HOST_EP2 = 4, + ZXDH_AXID_HOST_EP3 = 5, + ZXDH_AXID_HOST_EP4 = 6, +}; + +enum zxdh_interface_type { + ZXDH_INTERFACE_CACHE = 0, + ZXDH_INTERFACE_NOTCACHE = 1, +}; + +enum zxdh_object_id { + ZXDH_PBLE_MR_OBJ_ID = 0, + ZXDH_PBLE_QUEUE_OBJ_ID = 1, + ZXDH_MR_OBJ_ID = 2, + ZXDH_AH_OBJ_ID = 3, + ZXDH_IRD_OBJ_ID = 4, + ZXDH_TX_WINDOW_OBJ_ID = 5, + ZXDH_SRQC_OBJ_ID = 6, + ZXDH_CQC_OBJ_ID = 7, + ZXDH_MG_PAYLOAD_OBJ_ID = 8, + ZXDH_MG_OBJ_ID = 9, + ZXDH_RW_PAYLOAD = 10, + ZXDH_SQ = 11, + ZXDH_SQ_SHADOW_AREA = 12, + ZXDH_RQ = 13, + ZXDH_RQ_SHADOW_AREA = 14, + ZXDH_SRQP = 15, + ZXDH_SRQ = 16, + ZXDH_SRQ_SHADOW_AREA = 17, + ZXDH_CQ = 18, + ZXDH_CQ_SHADOW_AREA = 19, + ZXDH_CEQ = 20, + ZXDH_AEQ = 21, + ZXDH_MG_QPN = 22, + ZXDH_CPU_DDR = 24, + ZXDH_QPC_OBJ_ID = 29, + ZXDH_DMA_OBJ_ID = 30, + ZXDH_L2D_OBJ_ID = 31, + ZXDH_REG_OBJ_ID = ZXDH_L2D_OBJ_ID, +}; + +enum zxdh_sd_entry_type { + ZXDH_SD_TYPE_INVALID = 0, + ZXDH_SD_TYPE_PAGED = 1, + ZXDH_SD_TYPE_DIRECT = 2, +}; + +enum zxdh_mb_opt_type { + ZTE_ZXDH_VCHNL_OP_GET_HMC_FCN = 1, + ZTE_ZXDH_OP_REQ_NP_CONFIG = 2, + ZTE_ZXDH_OP_DEL_HMC_OBJ_RANGE = 3, + ZTE_ZXDH_OP_REQ_NP_MAC_DEL = 4, + ZTE_ZXDH_OP_REQ_NP_MAC_ADD = 5, + ZTE_ZXDH_OP_GET_PBLE_HMC_BASEINFO = 6, + ZTE_ZXDH_OP_REPLY_PBLE_HMC_BASEINFO = 7, + ZTE_ZXDH_OP_ADD_QPBLE_HMC_RANGE = 8, + ZTE_ZXDH_OP_ADD_MRPBLE_HMC_RANGE = 9, + ZTE_ZXDH_OP_SET_SMMU_INVALID = 10, +}; + +enum function_type { + FUNCTION_TYPE_PF = 0, + FUNCTION_TYPE_VF = 1, +}; + +struct zxdh_hmc_obj_manage { + u64 hmc_base; + u64 hmc_size; + u32 total_qp_cnt; + u32 total_cq_cnt; + u32 total_srq_cnt; + u32 total_mrte_cnt; + u32 total_ah_cnt; + u32 pf_pblemr_cnt; + u32 pf_pblequeue_cnt; + u32 vf_qp_cnt; + u32 vf_pblemr_cnt; + u32 vf_pblequeue_cnt; +}; + +struct zxdh_hmc_obj_info { + u64 base; + u32 max_cnt; + u32 cnt; + u64 size; + u8 type; +}; + +struct zxdh_vf_hmc_obj_info { + struct zxdh_hmc_obj_info hmc_objinfo[ZXDH_HMC_IW_MAX]; + u16 vf_id; + u8 valid : 1; +}; + +struct zxdh_hmc_bp { + enum zxdh_sd_entry_type entry_type; + struct zxdh_dma_mem addr; + struct zxdh_dma_mem addr_hardware; // for hardware + u32 sd_pd_index; + u32 use_cnt; +}; + +struct zxdh_hmc_pd_entry { + struct zxdh_hmc_bp bp; + u32 sd_index; + u8 rsrc_pg : 1; + u8 valid : 1; +}; + +struct zxdh_hmc_pd_table { + struct zxdh_dma_mem pd_page_addr; + struct zxdh_hmc_pd_entry *pd_entry; + struct zxdh_virt_mem pd_entry_virt_mem; + u32 use_cnt; + u32 sd_index; +}; + +struct zxdh_hmc_sd_entry { + enum zxdh_sd_entry_type entry_type; + bool valid; + union { + struct zxdh_hmc_pd_table pd_table; + struct zxdh_hmc_bp bp; + } u; +}; + +struct zxdh_hmc_sd_table { + struct zxdh_virt_mem addr; + u32 sd_cnt; + u32 use_cnt; + struct zxdh_hmc_sd_entry *sd_entry; +}; + +struct zxdh_hmc_info { + u32 signature; + u8 hmc_fn_id; + u16 first_sd_index; + u32 pble_hmc_index; + u32 pble_mr_hmc_index; + u32 hmc_entry_total; + u32 hmc_first_entry_pble; + u32 hmc_first_entry_pble_mr; + struct zxdh_hmc_obj_info *hmc_obj; + struct zxdh_virt_mem hmc_obj_virt_mem; + struct zxdh_hmc_sd_table sd_table; + u16 sd_indexes[ZXDH_HMC_MAX_SD_COUNT]; +}; + +struct zxdh_update_sd_entry { + u64 cmd; + u64 data; +}; + +struct zxdh_update_sds_info { + u32 cnt; + u8 hmc_fn_id; + struct zxdh_update_sd_entry entry[ZXDH_MAX_SD_ENTRIES]; +}; + +struct zxdh_ccq_cqe_info; +struct zxdh_hmc_fcn_info { + u32 vf_id; + u8 free_fcn; +}; + +struct zxdh_hmc_create_obj_info { + struct zxdh_hmc_info *hmc_info; + struct zxdh_virt_mem add_sd_virt_mem; + u32 rsrc_type; + u32 count; + u32 add_sd_cnt; + enum zxdh_sd_entry_type entry_type; + bool privileged; +}; + +struct zxdh_hmc_del_obj_info { + struct zxdh_hmc_info *hmc_info; + struct zxdh_virt_mem del_sd_virt_mem; + u32 rsrc_type; + u32 count; + u32 del_sd_cnt; + bool privileged; +}; + +int zxdh_sc_create_hmc_obj(struct zxdh_sc_dev *dev, + struct zxdh_hmc_create_obj_info *info); +int zxdh_sc_create_date_cap_obj(struct zxdh_sc_dev *dev); +int zxdh_add_pble_hmc_obj(struct zxdh_hmc_info *hmc_info, + struct zxdh_hmc_pble_rsrc *pble_rsrc, u32 pages); +int zxdh_vf_add_pble_hmc_obj(struct zxdh_sc_dev *dev, + struct zxdh_hmc_info *hmc_info, + struct zxdh_hmc_pble_rsrc *pble_rsrc, u32 pages); + +int zxdh_prep_remove_sd_bp(struct zxdh_hmc_info *hmc_info, u32 idx); + +int zxdh_recv_mb(struct zxdh_sc_dev *dev, struct zxdh_ccq_cqe_info *info); + +struct zxdh_vfdev *zxdh_pf_get_vf_hmc_res(struct zxdh_sc_dev *dev, u16 vf_id); +int zxdh_sc_write_hmc_register(struct zxdh_sc_dev *dev, + struct zxdh_hmc_obj_info *obj_info, + u32 rsrc_type, u16 vhca_id); +int zxdh_vfhmc_enter(struct zxdh_sc_dev *dev, + struct zxdh_hmc_info *vf_hmc_info); + +int zxdh_create_vf_hmc_objs(struct zxdh_sc_dev *dev, + struct zxdh_hmc_info *hmc_info, u8 type, + struct zxdh_hmc_create_obj_info *obj_info); +#endif /* ZXDH_HMC_H */ diff --git a/drivers/infiniband/hw/zrdma/hw.c b/drivers/infiniband/hw/zrdma/hw.c new file mode 100644 index 000000000000..34723b7d70b5 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/hw.c @@ -0,0 +1,2798 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ +#include "main.h" +#include "user.h" +#include "icrdma_hw.h" +#include "hmc.h" +#include "smmu/kernel/adk_mmu600.h" + +/* types of hmc objects */ +enum zxdh_hmc_rsrc_type iw_hmc_obj_types[ZXDH_HMC_IW_TXWINDOW + 1] = { + ZXDH_HMC_IW_QP, + ZXDH_HMC_IW_CQ, + ZXDH_HMC_IW_SRQ, + ZXDH_HMC_IW_AH, + ZXDH_HMC_IW_MR, + ZXDH_HMC_IW_IRD, + ZXDH_HMC_IW_TXWINDOW, +}; + +/** + * zxdh_iwarp_ce_handler - handle iwarp completions + * @iwcq: iwarp cq receiving event + */ +static void zxdh_iwarp_ce_handler(struct zxdh_sc_cq *iwcq) +{ + struct zxdh_cq *cq = iwcq->back_cq; + + if (cq != NULL) { + if (!cq->user_mode) + cq->armed = false; + if (cq->ibcq.comp_handler && (iwcq->cq_uk.valid_cq == true)) + cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); + } +} + +static void zxdh_ceq_ena_intr(struct zxdh_sc_dev *dev, u32 ceq_id); +int zxdh_vf_init_np_tbl(struct zxdh_pci_f *rf); +int zxdh_vf_init_hmc(struct zxdh_pci_f *rf); + +/** + * zxdh_process_ceq - handle ceq for completions + * @rf: RDMA PCI function + * @ceq: ceq having cq for completion + */ +static void zxdh_process_ceq(struct zxdh_pci_f *rf, struct zxdh_ceq *ceq) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_sc_ceq *sc_ceq; + struct zxdh_sc_cq *cq; + unsigned long flags; + + sc_ceq = &ceq->sc_ceq; + do { + spin_lock_irqsave(&ceq->ce_lock, flags); + cq = zxdh_sc_process_ceq(dev, sc_ceq); + if (!cq) { + spin_unlock_irqrestore(&ceq->ce_lock, flags); + break; + } + if (cq->cq_type == ZXDH_CQ_TYPE_IO) + zxdh_iwarp_ce_handler(cq); + spin_unlock_irqrestore(&ceq->ce_lock, flags); + + if (cq->cq_type == ZXDH_CQ_TYPE_CQP) { + rf->sc_dev.ceq_interrupt = true; + queue_work(rf->cqp_cmpl_wq, &rf->cqp_cmpl_work); + } + } while (1); +} + +static void zxdh_set_flush_fields_requester(struct zxdh_sc_qp *qp, + struct zxdh_aeqe_info *info) +{ + switch (info->ae_id) { + case ZXDH_AE_REQ_NVME_IDX_ERR: + case ZXDH_AE_REQ_NVME_PD_IDX_ERR: + case ZXDH_AE_REQ_NVME_KEY_ERR: + case ZXDH_AE_REQ_NVME_ACC_ERR: + case ZXDH_AE_REQ_NVME_TX_ROUTE_IDX_ERR: + case ZXDH_AE_REQ_NVME_TX_ROUTE_PD_IDX_ERR: + case ZXDH_AE_REQ_NVME_TX_ROUTE_KEY_ERR: + case ZXDH_AE_REQ_NVME_TX_ROUTE_ACC_ERR: + case ZXDH_AE_REQ_MW_INV_LKEY_ERR: + case ZXDH_AE_REQ_MW_INV_TYPE_ERR: + case ZXDH_AE_REQ_MW_INV_STATE_INV: + case ZXDH_AE_REQ_MW_INV_PD_IDX_ERR: + case ZXDH_AE_REQ_MW_INV_SHARE_MEM_ERR: + case ZXDH_AE_REQ_MW_INV_PARENT_STATE_INV: + case ZXDH_AE_REQ_MW_INV_MW_NUM_ZERO: + case ZXDH_AE_REQ_MW_INV_MW_STAG_31_8_ZERO: + case ZXDH_AE_REQ_MW_INV_QP_NUM_ERR: + case ZXDH_AE_REQ_MR_INV_INV_LKEY_ERR: + case ZXDH_AE_REQ_MR_INV_MW_NUM_ZERO: + case ZXDH_AE_REQ_MR_INV_STATE_ERR: + case ZXDH_AE_REQ_MR_INV_EN_ERR: + case ZXDH_AE_REQ_MR_INV_SHARE_MEM_ERR: + case ZXDH_AE_REQ_MR_INV_PD_IDX_ERR: + case ZXDH_AE_REQ_MR_INV_MW_STAG_31_8_ZERO: + case ZXDH_AE_REQ_MWBIND_WRITE_ACC_ERR: + case ZXDH_AE_REQ_MWBIND_VA_BIND_ERR: + case ZXDH_AE_REQ_MWBIND_PD_IDX_ERR: + case ZXDH_AE_REQ_MWBIND_MRTE_STATE_TYPE_ERR: + case ZXDH_AE_REQ_MWBIND_VA_LEN_ERR: + case ZXDH_AE_REQ_MWBIND_TYPE_VA_ERR: + case ZXDH_AE_REQ_MWBIND_TYPE_IDX_ERR: + case ZXDH_AE_REQ_MWBIND_MRTE_MR_ERR: + case ZXDH_AE_REQ_MWBIND_TYPE2_LEN_ERR: + case ZXDH_AE_REQ_MWBIND_MRTE_STATE_ERR: + case ZXDH_AE_REQ_MWBIND_QPC_EN_ERR: + case ZXDH_AE_REQ_MWBIND_PARENT_MR_ERR: + case ZXDH_AE_REQ_MWBIND_ACC_BIT4_ERR: + case ZXDH_AE_REQ_MWBIND_MW_STAG_ERR: + case ZXDH_AE_REQ_MWBIND_IDX_OUT_RANGE: + case ZXDH_AE_REQ_MR_FASTREG_ACC_ERR: + case ZXDH_AE_REQ_MR_FASTREG_PD_IDX_ERR: + case ZXDH_AE_REQ_MR_FASTREG_MRTE_STATE_ERR: + case ZXDH_AE_REQ_MR_FASTREG_MR_IS_NOT_1: + case ZXDH_AE_REQ_MR_FASTREG_QPC_EN_ERR: + case ZXDH_AE_REQ_MR_FASTREG_STAG_LEN_ERR: + case ZXDH_AE_REQ_MR_FASTREG_SHARE_MR_ERR: + case ZXDH_AE_REQ_MR_FASTREG_MW_STAG_ERR: + case ZXDH_AE_REQ_MR_FASTREG_IDX_OUT_RANGE: + case ZXDH_AE_REQ_MR_FASTREG_MR_EN_ERR: + case ZXDH_AE_REQ_MW_BIND_PD_IDX_ERR: + case ZXDH_AE_REQ_MRTE_STATE_FREE: + case ZXDH_AE_REQ_MRTE_STATE_INVALID: + case ZXDH_AE_REQ_MRTE_MW_QP_ID_ERR: + case ZXDH_AE_REQ_MRTE_PD_IDX_ERR: + case ZXDH_AE_REQ_MRTE_KEY_ERR: + case ZXDH_AE_REQ_MRTE_STAG_IDX_RANGE_ERR: + case ZXDH_AE_REQ_MRTE_VIRT_ADDR_AND_LEN_ERR: + case ZXDH_AE_REQ_MRTE_ACC_ERR: + case ZXDH_AE_REQ_MRTE_STAG_IDX_RANGE_RSV_ERR: + case ZXDH_AE_REQ_REM_INV_RKEY: + case ZXDH_AE_REQ_WQE_MRTE_STATE_FREE: + case ZXDH_AE_REQ_WQE_MRTE_STATE_INV: + case ZXDH_AE_REQ_WQE_MRTE_MW_QP_ID_ERR: + case ZXDH_AE_REQ_WQE_MRTE_PD_IDX_ERR: + case ZXDH_AE_REQ_WQE_MRTE_KEY_ERR: + case ZXDH_AE_REQ_WQE_MRTE_STAG_IDX_ERR: + case ZXDH_AE_REQ_WQE_MRTE_VIRT_ADDR_AND_LEN_CHK_ERR: + case ZXDH_AE_REQ_WQE_MRTE_ACC_ERR: + case ZXDH_AE_REQ_WQE_MRTE_RSV_LKEY_EN_ERR: + qp->event_type = ZXDH_QP_EVENT_ACCESS_ERR; + break; + case ZXDH_AE_REQ_REM_INV_OPCODE: + case ZXDH_AE_REQ_OFED_INVALID_SQ_OPCODE: + case ZXDH_AE_REQ_NVME_INVALID_SQ_OPCODE: + qp->event_type = ZXDH_QP_EVENT_REQ_ERR; + break; + default: + qp->event_type = ZXDH_QP_EVENT_CATASTROPHIC; + break; + } +} + +static void zxdh_set_flush_fields_responder(struct zxdh_sc_qp *qp, + struct zxdh_aeqe_info *info) +{ + switch (info->ae_id) { + case ZXDH_AE_RSP_PRIFIELD_CHK_INV_OPCODE: + qp->event_type = ZXDH_QP_EVENT_REQ_ERR; + break; + case ZXDH_AE_RSP_PKT_TYPE_NOF_PD_IDX_ERR: + case ZXDH_AE_RSP_PKT_TYPE_NOF_RKEY_ERR: + case ZXDH_AE_RSP_PKT_TYPE_NOF_ACC_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MR_DISTRIBUTE_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MR_INV_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MR_QP_CHK_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MR_PD_CHK_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MR_KEY_CHK_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MR_STAG_IDX_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MR_BOUNDARY_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MR_ACC_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MR_STAG0_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MW_STATE_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MW_PD_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MW_KEY_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MW_TYPE2B_QPN_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MW_KEY_IDX_ERR: + case ZXDH_AE_RSP_PKT_TYPE_MW_SHARE_MR: + case ZXDH_AE_RSP_PKT_TYPE_MW_TYPE_ERR: + case ZXDH_AE_RSP_PKT_TYPE_REM_INV_PD_ERR: + case ZXDH_AE_RSP_PKT_TYPE_REM_INV_KEY_ERR: + case ZXDH_AE_RSP_PKT_TYPE_REM_INV_ACC_ERR: + case ZXDH_AE_RSP_CHK_ERR_SHARE_MR: + case ZXDH_AE_RSP_MW_NUM_ERR: + case ZXDH_AE_RSP_INV_EN_ERR: + qp->event_type = ZXDH_QP_EVENT_ACCESS_ERR; + break; + default: + qp->event_type = ZXDH_QP_EVENT_CATASTROPHIC; + break; + } +} + +int zxdh_set_smmu_invalid(struct zxdh_pci_f *rf) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + u32 cnt = 0, val = 0, status = 0; + + writel(0, (u32 __iomem *)(dev->hw->hw_addr + + C_RDMA_CQP_CQ_DISTRIBUTE_DONE)); + + zxdh_sc_send_mailbox_cmd(dev, ZTE_ZXDH_OP_SET_SMMU_INVALID, 0x12, 0x13, + 0x15, dev->vf_id); + + do { + val = readl(dev->hw->hw_addr + + C_RDMA_CQP_CQ_DISTRIBUTE_DONE); + if (cnt++ > ZXDH_MAILBOX_CYC_NUM * dev->hw_attrs.max_done_count) { + status = -ETIMEDOUT; + pr_info("vhca_id:%d waiting completed SET_SMMU_INVALID mailbox too long time,timeout!\n", dev->vhca_id); + break; + } + if (dev->hw_attrs.self_health == true) { + status = -ETIMEDOUT; + break; + } + udelay(ZXDH_MAILBOX_SLEEP_TIME); + } while (!val); + + return status; +} + +int zxdh_vf_init_hmc(struct zxdh_pci_f *rf) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + u32 cnt = 0, val = 0, status = 0; + + writel(0, (u32 __iomem *)(dev->hw->hw_addr + + C_RDMA_CQP_CQ_DISTRIBUTE_DONE)); + + zxdh_sc_send_mailbox_cmd(dev, ZTE_ZXDH_VCHNL_OP_GET_HMC_FCN, 0x12, 0x13, + 0x15, dev->vf_id); + + do { + val = readl(dev->hw->hw_addr + + C_RDMA_CQP_CQ_DISTRIBUTE_DONE); + if (cnt++ > ZXDH_MAILBOX_CYC_NUM * dev->hw_attrs.max_done_count) { + status = -ETIMEDOUT; + pr_info("vhca_id:%d waiting completed GET_HMC mailbox too long time,timeout!\n", dev->vhca_id); + break; + } + if (dev->hw_attrs.self_health == true) { + status = -ETIMEDOUT; + break; + } + udelay(ZXDH_MAILBOX_SLEEP_TIME); + } while (!val); + + return status; +} + +int zxdh_vf_init_np_tbl(struct zxdh_pci_f *rf) +{ + struct iidc_core_dev_info *cdev_info = rf->cdev; + struct zxdh_sc_dev *dev = &rf->sc_dev; + u32 cnt = 0, val = 0, status = 0; + + if (!rf->sc_dev.np_mode_low_lat) { + writel(0, (u32 __iomem *)(dev->hw->hw_addr + + C_RDMA_CQP_CQ_DISTRIBUTE_DONE)); + + zxdh_sc_send_mailbox_cmd(dev, ZTE_ZXDH_OP_REQ_NP_CONFIG, + cdev_info->vport_id, 0, 0, dev->vf_id); + do { + val = readl(dev->hw->hw_addr + + C_RDMA_CQP_CQ_DISTRIBUTE_DONE); + if (cnt++ > ZXDH_MAILBOX_CYC_NUM * dev->hw_attrs.max_done_count) { + pr_info("vhca_id:%d waiting completed NP_CONFIG mailbox too long time,timeout!\n", dev->vhca_id); + status = -ETIMEDOUT; + break; + } + if (dev->hw_attrs.self_health == true) { + status = -ETIMEDOUT; + break; + } + udelay(ZXDH_MAILBOX_SLEEP_TIME); + } while (!val); + + } else { + if (rf->iwdev->netdev->dev_addr == NULL) { + pr_err("[%s] dev_addr is null!\n", __func__); + status = -EINVAL; + return status; + } + + writel(0, (u32 __iomem *)(dev->hw->hw_addr + + C_RDMA_CQP_CQ_DISTRIBUTE_DONE)); + + zxdh_sc_send_mailbox_cmd(dev, ZTE_ZXDH_OP_REQ_NP_MAC_ADD, + cdev_info->vport_id, + (u64)rf->iwdev->netdev->dev_addr, 0, + dev->vf_id); + + do { + val = readl(dev->hw->hw_addr + + C_RDMA_CQP_CQ_DISTRIBUTE_DONE); + if (cnt++ > ZXDH_MAILBOX_CYC_NUM * dev->hw_attrs.max_done_count) { + pr_info("vhca_id:%d waiting completed NP_MAC_ADD mailbox too long time,timeout!\n", dev->vhca_id); + status = -ETIMEDOUT; + break; + } + if (dev->hw_attrs.self_health == true) { + status = -ETIMEDOUT; + break; + } + udelay(ZXDH_MAILBOX_SLEEP_TIME); + } while (!val); + + ether_addr_copy(rf->iwdev->mac_addr, + rf->iwdev->netdev->dev_addr); + } + + return status; +} + +void zxdh_stop_cap_worker(struct work_struct *work) +{ + uint32_t reg_val = 0; + struct aeq_stop_cap_work *aeq_stop_cap_work = + container_of(work, struct aeq_stop_cap_work, work); + struct zxdh_pci_f *rf = aeq_stop_cap_work->rf; + + kfree(aeq_stop_cap_work); + if (rf->sc_dev.tx_stop_on_aeq == 1) { + if (zxdh_rdma_reg_read(rf, RDMATX_DATA_START_CAP, ®_val)) + pr_err("zxdh_rdma_reg_read RDMATX_DATA_START_CAP failed\n"); + if (reg_val != 0 && zxdh_rdma_reg_write(rf, RDMATX_DATA_START_CAP, 0)) + pr_err("zxdh_rdma_reg_write RDMATX_DATA_START_CAP failed\n"); + rf->sc_dev.tx_stop_on_aeq = 0; + } + + if (rf->sc_dev.rx_stop_on_aeq == 1) { + if (zxdh_rdma_reg_read(rf, RDMARX_DATA_START_CAP, ®_val)) + pr_err("zxdh_rdma_reg_read RDMARX_DATA_START_CAP failed\n"); + if (reg_val != 0 && zxdh_rdma_reg_write(rf, RDMARX_DATA_START_CAP, 0)) + pr_err("zxdh_rdma_reg_write RDMARX_DATA_START_CAP failed\n"); + rf->sc_dev.rx_stop_on_aeq = 0; + } +} + +void zxdh_aeq_process_stop_cap(struct zxdh_pci_f *rf) +{ + struct aeq_stop_cap_work *stop_cap_work; + + stop_cap_work = kzalloc(sizeof(*stop_cap_work), GFP_ATOMIC); + if (!stop_cap_work) { + pr_err("kzalloc stop_cap_work failed!\n"); + return; + } + + stop_cap_work->rf = rf; + INIT_WORK(&stop_cap_work->work, zxdh_stop_cap_worker); + queue_work(rf->iwdev->cleanup_wq, &stop_cap_work->work); +} + +void zrdma_cleanup_rdma_tools_cfg(struct zxdh_pci_f *rf) +{ + struct zxdh_cap_addr_info *cap_addr_info = NULL; + int i; + struct zxdh_device *iwdev = rf->iwdev; + + if (rf->sc_dev.tx_stop_on_aeq != 0) + rf->sc_dev.tx_stop_on_aeq = 0; + if (rf->sc_dev.rx_stop_on_aeq != 0) + rf->sc_dev.rx_stop_on_aeq = 0; + if (rf->sc_dev.hw_attrs.self_health == false) { + if (zxdh_rdma_reg_write(rf, RDMATX_DATA_START_CAP, 0)) + pr_err("zrdma_cleanup_rdma_tools_cfg write RDMATX_DATA_START_CAP failed\n"); + if (zxdh_rdma_reg_write(rf, RDMARX_DATA_START_CAP, 0)) + pr_err("zrdma_cleanup_rdma_tools_cfg write RDMARX_DATA_START_CAP failed\n"); + } + for (i = 0; i < CAP_NODE_NUM; i++) { + free_cap_addr(iwdev, &iwdev->hw_data_cap.cap_tx_use_direct_dma[i]); + free_cap_addr(iwdev, &iwdev->hw_data_cap.cap_rx_use_direct_dma[i]); + cap_addr_info = &iwdev->hw_data_cap.cap_txrx_use_iova[i]; + if (cap_addr_info->entry_info.cap_mmap_entry != NULL) { +#ifdef RDMA_MMAP_DB_SUPPORT + rdma_user_mmap_entry_remove( + cap_addr_info->entry_info.cap_mmap_entry); +#else + zxdh_user_mmap_entry_del_hash( + cap_addr_info->entry_info.cap_mmap_entry); +#endif + cap_addr_info->entry_info.cap_mmap_entry = NULL; + } + if (iwdev->hw_data_cap.cap_txrx_use_iova[i].addr_info.cap_iova_addr != 0) + iwdev->hw_data_cap.cap_txrx_use_iova[i].addr_info.cap_iova_addr = 0; + } +} + +/** + * zxdh_process_aeq - handle aeq events + * @rf: RDMA PCI function + */ +static void zxdh_process_aeq(struct zxdh_pci_f *rf) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_aeq *aeq = &rf->aeq; + struct zxdh_sc_aeq *sc_aeq = &aeq->sc_aeq; + struct zxdh_aeqe_info aeinfo; + struct zxdh_aeqe_info *info = &aeinfo; + int ret; + struct zxdh_qp *iwqp = NULL; + struct zxdh_cq *iwcq = NULL; + struct zxdh_srq *iwsrq = NULL; + struct zxdh_sc_qp *qp = NULL; + unsigned long flags; + struct ib_event ibevent; + + u32 aeqcnt = 0; + + if (!sc_aeq->size) + return; + + do { + memset(info, 0, sizeof(*info)); + ret = zxdh_sc_get_next_aeqe(sc_aeq, info); + if (ret) + break; + + aeqcnt++; + zxdh_dbg( + dev, + "AEQ: ae_id = 0x%x bool qp=%d qp_id = %d tcp_state=%d iwarp_state=%d ae_src=%d\n", + info->ae_id, info->qp, info->qp_cq_id, info->tcp_state, + info->iwarp_state, info->ae_src); + + if (info->qp) { + spin_lock_irqsave(&rf->qptable_lock, flags); + if (info->qp_cq_id < dev->base_qpn) { + spin_unlock_irqrestore(&rf->qptable_lock, + flags); + pr_err("qp information is valid,qpn < base_qpn, qpn:%d\n", + info->qp_cq_id); + continue; + } else if (info->qp_cq_id >= (dev->base_qpn + dev->max_qp)) { + spin_unlock_irqrestore(&rf->qptable_lock, + flags); + pr_err("qp information is valid,qpn >= (base_qpn + max_qp), qpn:%d\n", + info->qp_cq_id); + continue; + } + iwqp = rf->qp_table[info->qp_cq_id - dev->base_qpn]; + if (!iwqp) { + spin_unlock_irqrestore(&rf->qptable_lock, + flags); + zxdh_dbg(dev, + "AEQ: qp_id %d is already freed\n", + info->qp_cq_id); + continue; + } + zxdh_qp_add_ref(&iwqp->ibqp); + spin_unlock_irqrestore(&rf->qptable_lock, flags); + qp = &iwqp->sc_qp; + spin_lock_irqsave(&iwqp->lock, flags); + iwqp->hw_iwarp_state = info->iwarp_state; + iwqp->last_aeq = info->ae_id; + spin_unlock_irqrestore(&iwqp->lock, flags); + } else { + if (info->ae_id == ZXDH_AE_REQ_WQE_FLUSH) + continue; + else if (info->ae_id == ZXDH_AE_RSP_WQE_FLUSH) + continue; + else if (info->ae_id == ZXDH_AE_REQ_WR_CQP_QP_STATE) { + pr_info("[%s] cqp qp state err!\n", __func__); + continue; + } + } + + if ((rf->sc_dev.tx_stop_on_aeq != 0 || + rf->sc_dev.rx_stop_on_aeq != 0) && + info->ae_id != ZXDH_AE_RSP_SRQ_WATER_SIG) { + zxdh_aeq_process_stop_cap(rf); + } + + switch (info->ae_id) { + case ZXDH_AE_RSP_SRQ_WATER_SIG: + spin_lock_irqsave(&rf->srqtable_lock, flags); + if (info->qp_cq_id < dev->base_srqn) { + spin_unlock_irqrestore(&rf->srqtable_lock, + flags); + pr_err("aeq srq water limit event,srqn < base_srqn, srqn:%d\n", + info->qp_cq_id); + continue; + } else if (info->qp_cq_id >= (dev->base_srqn + dev->max_srq)) { + spin_unlock_irqrestore(&rf->srqtable_lock, + flags); + pr_err("aeq srq water limit event,srqn >= (base_srqn + max_srq), srqn:%d\n", + info->qp_cq_id); + continue; + } + iwsrq = rf->srq_table[info->qp_cq_id - dev->base_srqn]; + if (!iwsrq) { + spin_unlock_irqrestore(&rf->srqtable_lock, + flags); + zxdh_dbg(dev, + "AEQ: srq_id %d is already freed\n", + info->qp_cq_id); + continue; + } + zxdh_srq_add_ref(&iwsrq->ibsrq); + spin_unlock_irqrestore(&rf->srqtable_lock, flags); + if (iwsrq->ibsrq.event_handler) { + ibevent.device = iwsrq->ibsrq.device; + ibevent.event = IB_EVENT_SRQ_LIMIT_REACHED; + ibevent.element.srq = &iwsrq->ibsrq; + iwsrq->ibsrq.event_handler( + &ibevent, iwsrq->ibsrq.srq_context); + } + zxdh_srq_rem_ref(&iwsrq->ibsrq); + break; + case ZXDH_AE_RSP_PKT_TYPE_CQ_OVERFLOW: + case ZXDH_AE_RSP_PKT_TYPE_CQ_TWO_PBLE_RSP: + dev_err(idev_to_dev(dev), + "Processing CQ[0x%x] op error, AE 0x%04X\n", + info->qp_cq_id, info->ae_id); + spin_lock_irqsave(&rf->cqtable_lock, flags); + if (info->qp_cq_id < dev->base_cqn) { + spin_unlock_irqrestore(&rf->cqtable_lock, + flags); + pr_err("aeq cq err, cqn < base_cqn cqn:%d\n", + info->qp_cq_id); + continue; + } else if (info->qp_cq_id >= (dev->base_cqn + dev->max_cq)) { + spin_unlock_irqrestore(&rf->cqtable_lock, + flags); + pr_err("aeq cq err, cqn >= (base_cqn + max_cq) cqn:%d\n", + info->qp_cq_id); + continue; + } + iwcq = rf->cq_table[info->qp_cq_id - dev->base_cqn]; + if (!iwcq) { + spin_unlock_irqrestore(&rf->cqtable_lock, + flags); + zxdh_dbg(dev, + "AEQ: cq_id %d is already freed\n", + info->qp_cq_id); + continue; + } + zxdh_cq_add_ref(&iwcq->ibcq); + spin_unlock_irqrestore(&rf->cqtable_lock, flags); + if (iwcq->ibcq.event_handler) { + ibevent.device = iwcq->ibcq.device; + ibevent.event = IB_EVENT_CQ_ERR; + ibevent.element.cq = &iwcq->ibcq; + iwcq->ibcq.event_handler(&ibevent, + iwcq->ibcq.cq_context); + } + zxdh_cq_rem_ref(&iwcq->ibcq); + break; + case ZXDH_AE_RSP_SRQ_AXI_RSP_SIG: + + spin_lock_irqsave(&rf->qptable_lock, flags); + if (info->qp_cq_id < dev->base_qpn) { + spin_unlock_irqrestore(&rf->qptable_lock, + flags); + pr_err("aeq srq axi err, qpn < base_qpn qpn:%d\n", + info->qp_cq_id); + continue; + } else if (info->qp_cq_id >= (dev->base_qpn + dev->max_qp)) { + spin_unlock_irqrestore(&rf->qptable_lock, + flags); + pr_err("aeq srq axi err, qpn >= (base_qpn + max_qp) qpn:%d\n", + info->qp_cq_id); + continue; + } + iwqp = rf->qp_table[info->qp_cq_id - dev->base_qpn]; + if (!iwqp) { + spin_unlock_irqrestore(&rf->qptable_lock, + flags); + zxdh_dbg(dev, + "AEQ: qp_id %d is already freed\n", + info->qp_cq_id); + continue; + } + spin_unlock_irqrestore(&rf->qptable_lock, flags); + + if (iwqp->is_srq == false) { + pr_err("aeq srq axi err, qp is not bound to srq\n"); + continue; + } + iwsrq = iwqp->iwsrq; + + spin_lock_irqsave(&rf->srqtable_lock, flags); + if (!iwsrq) { + spin_unlock_irqrestore(&rf->srqtable_lock, + flags); + zxdh_dbg(dev, + "AEQ: srq_id %d is already freed\n", + info->qp_cq_id); + continue; + } + zxdh_srq_add_ref(&iwsrq->ibsrq); + spin_unlock_irqrestore(&rf->srqtable_lock, flags); + if (iwsrq->ibsrq.event_handler) { + ibevent.device = iwsrq->ibsrq.device; + ibevent.event = IB_EVENT_SRQ_ERR; + ibevent.element.srq = &iwsrq->ibsrq; + iwsrq->ibsrq.event_handler( + &ibevent, iwsrq->ibsrq.srq_context); + } + zxdh_srq_rem_ref(&iwsrq->ibsrq); + break; + case ZXDH_AE_RSP_WQE_FLUSH: + if (iwqp && iwqp->is_srq == true) { + if (iwqp->ibqp.event_handler) { + ibevent.device = iwqp->ibqp.device; + ibevent.event = IB_EVENT_QP_LAST_WQE_REACHED; + ibevent.element.qp = &iwqp->ibqp; + iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context); + } + } + break; + case ZXDH_AE_REQ_RETRY_EXC_LOC_ACK_OUT_RANGE: + // 0x8f3�������� + if (iwqp) + zxdh_aeq_process_retry_err(iwqp); + + break; + case ZXDH_AE_REQ_RETRY_EXC_TX_WINDOW_GET_ENTRY_ERR: + // 0x8f5�������� + if (iwqp) + zxdh_aeq_process_entry_err(iwqp); + + break; + default: + if (qp == NULL) + break; + + if (info->ae_src == ZXDH_AE_REQUESTER) { //requestor + zxdh_set_flush_fields_requester(qp, info); + } else if (info->ae_src == + ZXDH_AE_RESPONDER) { //responder + zxdh_set_flush_fields_responder(qp, info); + } else { + pr_err("bad ae_src, ae_src:%d\n", info->ae_src); + break; + } + if (iwqp) + zxdh_aeq_qp_disconn(iwqp); + + break; + } + + if (info->qp) + zxdh_qp_rem_ref(&iwqp->ibqp); + } while (1); + + if (aeqcnt) + zxdh_sc_repost_aeq_tail(dev, sc_aeq->aeq_ring.tail); +} + +/** + * zxdh_ceq_ena_intr - set up device interrupts + * @dev: hardware control device structure + * @ceq_id: ceq of the interrupt to be enabled + */ +static void zxdh_ceq_ena_intr(struct zxdh_sc_dev *dev, u32 ceq_id) +{ + dev->irq_ops->zxdh_ceq_en_irq(dev, ceq_id); +} + +/** + * zxdh_aeq_ena_intr - set up device interrupts + * @dev: hardware control device structure + * @enable: aeq of the interrupt to be enabled + */ +static void zxdh_aeq_ena_intr(struct zxdh_sc_dev *dev, bool enable) +{ + dev->irq_ops->zxdh_aeq_en_irq(dev, enable); +} + +/** + * zxdh_dpc - tasklet for aeq and ceq 0 + * @t: tasklet_struct ptr + */ +static void zxdh_dpc(struct tasklet_struct *t) +{ + struct zxdh_pci_f *rf = from_tasklet(rf, t, dpc_tasklet); + + zxdh_process_aeq(rf); + zxdh_aeq_ena_intr(&rf->sc_dev, true); +} + +/** + * zxdh_ceq_dpc - dpc handler for CEQ + * @t: tasklet_struct ptr + */ +static void zxdh_ceq_dpc(struct tasklet_struct *t) +{ + struct zxdh_ceq *iwceq = from_tasklet(iwceq, t, dpc_tasklet); + struct zxdh_pci_f *rf = iwceq->rf; + + zxdh_process_ceq(rf, iwceq); + zxdh_ceq_ena_intr(&rf->sc_dev, iwceq->sc_ceq.ceq_id); +} + +/** + * zxdh_save_msix_info - copy msix vector information to iwarp device + * @rf: RDMA PCI function + * + * Allocate iwdev msix table and copy the msix info to the table + * Return 0 if successful, otherwise return error + */ +static int zxdh_save_msix_info(struct zxdh_pci_f *rf) +{ + struct zxdh_qvlist_info *iw_qvlist; + struct zxdh_qv_info *iw_qvinfo; +#ifdef MSIX_DEBUG + struct msix_entry *pmsix; +#else + u32 vector; + u16 entry; +#endif + u32 ceq_idx; + u32 i; + u32 size; + u32 online_cpus_num; + + if (!rf->msix_count) + return -EINVAL; + + size = sizeof(struct zxdh_msix_vector) * rf->msix_count; + size += sizeof(struct zxdh_qvlist_info); + size += sizeof(struct zxdh_qv_info) * rf->msix_count - 1; + rf->iw_msixtbl = kzalloc(size, GFP_KERNEL); + if (!rf->iw_msixtbl) + return -ENOMEM; + + rf->iw_qvlist = + (struct zxdh_qvlist_info *)(&rf->iw_msixtbl[rf->msix_count]); + iw_qvlist = rf->iw_qvlist; + iw_qvinfo = iw_qvlist->qv_info; + iw_qvlist->num_vectors = rf->msix_count; + online_cpus_num = num_online_cpus(); +#ifdef MSIX_DEBUG + pmsix = rf->msix_entries; +#else + entry = rf->msix_entries->entry; +#endif + +#ifdef MSIX_SUPPORT + for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) { +#ifdef MSIX_DEBUG + rf->iw_msixtbl[i].idx = pmsix->entry; + rf->iw_msixtbl[i].irq = pmsix->vector; +#else + rf->iw_msixtbl[i].idx = entry + i; + vector = pci_irq_vector(rf->pcidev, (entry + i)); + rf->iw_msixtbl[i].irq = vector; +#endif + if (rf->msix_count <= (online_cpus_num + 1)) + rf->iw_msixtbl[i].cpu_affinity = ceq_idx; + else + rf->iw_msixtbl[i].cpu_affinity = + (ceq_idx % online_cpus_num); + if (!i) { + iw_qvinfo->aeq_idx = 0; + iw_qvinfo->ceq_idx = ZXDH_Q_INVALID_IDX; + } else { + iw_qvinfo->aeq_idx = ZXDH_Q_INVALID_IDX; + iw_qvinfo->ceq_idx = ceq_idx++; + } + iw_qvinfo->itr_idx = ZXDH_IDX_NOITR; + iw_qvinfo->v_idx = rf->iw_msixtbl[i].idx; +#ifdef MSIX_DEBUG + pmsix++; +#endif + } +#endif + return 0; +} + +/** + * zxdh_aeq_handler - interrupt handler for aeq + * @irq: Interrupt request number + * @data: RDMA PCI function + */ +static irqreturn_t zxdh_aeq_handler(int irq, void *data) +{ + struct zxdh_pci_f *rf = data; + + tasklet_schedule(&rf->dpc_tasklet); + + return IRQ_HANDLED; +} + +/** + * zxdh_ceq_handler - interrupt handler for ceq + * @irq: interrupt request number + * @data: ceq pointer + */ +static irqreturn_t zxdh_ceq_handler(int irq, void *data) +{ + struct zxdh_ceq *iwceq = data; + + if (iwceq->irq != irq) + dev_err(idev_to_dev(&iwceq->rf->sc_dev), + "expected irq = %d received irq = %d\n", iwceq->irq, + irq); + tasklet_schedule(&iwceq->dpc_tasklet); + + return IRQ_HANDLED; +} + +/** + * zxdh_destroy_irq - destroy device interrupts + * @msix_vec: msix vector to disable irq + * @dev_id: parameter to pass to free_irq (used during irq setup) + * + * The function is called when destroying aeq/ceq + */ +static void zxdh_destroy_irq(struct zxdh_msix_vector *msix_vec, void *dev_id) +{ + irq_set_affinity_hint(msix_vec->irq, NULL); + free_irq(msix_vec->irq, dev_id); +} + +/** + * zxdh_destroy_cqp - destroy control qp + * @rf: RDMA PCI function + * @free_hwcqp: 1 if hw cqp should be freed + * + * Issue destroy cqp request and + * free the resources associated with the cqp + */ +static void zxdh_destroy_cqp(struct zxdh_pci_f *rf, bool free_hwcqp) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_cqp *cqp = &rf->cqp; + int status = 0; + + if (rf->cqp_cmpl_wq) + destroy_workqueue(rf->cqp_cmpl_wq); + status = zxdh_sc_cqp_destroy(dev->cqp, free_hwcqp); + if (status) + zxdh_dbg(dev, "ERR: Destroy CQP failed %d\n", status); + + zxdh_cleanup_pending_cqp_op(rf); + dma_free_coherent(dev->hw->device, cqp->sq.size, cqp->sq.va, + cqp->sq.pa); + cqp->sq.va = NULL; + kfree(cqp->scratch_array); + cqp->scratch_array = NULL; + kfree(cqp->cqp_requests); + cqp->cqp_requests = NULL; +} + +static void zxdh_destroy_virt_aeq(struct zxdh_pci_f *rf) +{ + struct zxdh_aeq *aeq = &rf->aeq; + u32 pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE); + dma_addr_t *pg_arr = (dma_addr_t *)aeq->palloc.level1.addr; + + zxdh_unmap_vm_page_list(&rf->hw, pg_arr, pg_cnt); + zxdh_free_pble(rf->pble_rsrc, &aeq->palloc); + vfree(aeq->mem.va); +} + +static int zxdh_destroy_aeq_reg_cmd(struct zxdh_sc_dev *dev, struct zxdh_sc_aeq *aeq) +{ + struct zxdh_sc_cqp *cqp; + __le64 *wqe; + u64 hdr; + u32 tail = 0, val = 0; + int ret_code = 0; + u64 scratch = 0; + + cqp = dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_AEQC_INTR_IDX, aeq->msix_idx) | + FIELD_PREP(ZXDH_AEQC_AEQ_HEAD, 0) | + FIELD_PREP(ZXDH_AEQC_LEAF_PBL_SIZE, aeq->pbl_chunk_size) | + FIELD_PREP(ZXDH_AEQC_VIRTUALLY_MAPPED, aeq->virtual_map) | + FIELD_PREP(ZXDH_AEQC_AEQ_SIZE, aeq->elem_cnt) | + FIELD_PREP(ZXDH_AEQC_AEQ_STATE, 1); + dma_wmb(); + set_64bit_val(wqe, 8, hdr); + + set_64bit_val(wqe, 16, + aeq->virtual_map ? aeq->first_pm_pbl_idx : + aeq->aeq_elem_pa); + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_DESTROY_AEQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: AEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + + val = readl(dev->hw->hw_addr + C_RDMA_CQP_TAIL); + tail = (u32)FIELD_GET(ZXDH_CQPTAIL_WQTAIL, val); + + zxdh_sc_cqp_post_sq(cqp); + + ret_code = zxdh_cqp_poll_registers(cqp, tail, + dev->hw_attrs.max_done_count); + + if (ret_code) + return ret_code; + + return 0; +} + +/** + * zxdh_destroy_aeq_reg - destroy aeq + * @rf: RDMA PCI function + * + * Issue a destroy aeq request and + * free the resources associated with the aeq + * The function is called during driver unload + */ +static void zxdh_destroy_aeq_reg(struct zxdh_pci_f *rf) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_aeq *aeq = &rf->aeq; + int status = -EBUSY; +#ifdef MSIX_SUPPORT + zxdh_destroy_irq(rf->iw_msixtbl, rf); +#endif + aeq->sc_aeq.size = 0; + status = zxdh_destroy_aeq_reg_cmd(dev, &aeq->sc_aeq); + if (status) + zxdh_dbg(dev, "ERR: Destroy AEQ failed %d\n", status); + + if (aeq->virtual_map) + zxdh_destroy_virt_aeq(rf); + else { + dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va, + aeq->mem.pa); + aeq->mem.va = NULL; + } +} + +/** + * zxdh_destroy_aeq - destroy aeq + * @rf: RDMA PCI function + * + * Issue a destroy aeq request and + * free the resources associated with the aeq + * The function is called during driver unload + */ +static void zxdh_destroy_aeq(struct zxdh_pci_f *rf) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_aeq *aeq = &rf->aeq; + int status = -EBUSY; +#ifdef MSIX_SUPPORT + if (aeq->irq_sta == true) { + aeq->irq_sta = false; + zxdh_destroy_irq(rf->iw_msixtbl, rf); + } +#endif + aeq->sc_aeq.size = 0; + status = zxdh_cqp_aeq_cmd(dev, &aeq->sc_aeq, ZXDH_OP_AEQ_DESTROY); + if (status) + zxdh_dbg(dev, "ERR: Destroy AEQ failed %d\n", status); + + if (aeq->virtual_map) + zxdh_destroy_virt_aeq(rf); + else { + dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va, + aeq->mem.pa); + aeq->mem.va = NULL; + } +} + +/** + * zxdh_destroy_ceq - destroy ceq + * @rf: RDMA PCI function + * @iwceq: ceq to be destroyed + * + * Issue a destroy ceq request and + * free the resources associated with the ceq + */ +static void zxdh_destroy_ceq(struct zxdh_pci_f *rf, struct zxdh_ceq *iwceq) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + int status; + unsigned long flags; + + if (rf->reset) + goto exit; + + status = zxdh_sc_ceq_destroy(&iwceq->sc_ceq, 0, 1); + if (status) { + zxdh_dbg(dev, "ERR: CEQ destroy command failed %d\n", status); + goto exit; + } + + status = zxdh_sc_cceq_destroy_done(&iwceq->sc_ceq); + if (status) + zxdh_dbg(dev, "ERR: CEQ destroy completion failed %d\n", + status); +exit: + spin_lock_irqsave(&iwceq->ce_lock, flags); + iwceq->sc_ceq.valid_ceq = false; + spin_unlock_irqrestore(&iwceq->ce_lock, flags); + dma_free_coherent(dev->hw->device, iwceq->mem.size, iwceq->mem.va, + iwceq->mem.pa); + iwceq->mem.va = NULL; +} + +/** + * zxdh_del_ceq_0 - destroy ceq 0 + * @rf: RDMA PCI function + * + * Disable the ceq 0 interrupt and destroy the ceq 0 + */ +static void zxdh_del_ceq_0(struct zxdh_pci_f *rf) +{ + struct zxdh_ceq *iwceq = rf->ceqlist; + struct zxdh_msix_vector *msix_vec; + + msix_vec = &rf->iw_msixtbl[1]; + +#ifdef MSIX_SUPPORT + if (iwceq->irq_sta == true) { + iwceq->irq_sta = false; + zxdh_destroy_irq(msix_vec, iwceq); + } +#endif + zxdh_destroy_ceq(rf, iwceq); + rf->sc_dev.ceq_valid = false; + rf->ceqs_count = 0; +} + +/** + * zxdh_del_ceqs - destroy all ceq's except CEQ 0 + * @rf: RDMA PCI function + * + * Go through all of the device ceq's, except 0, and for each + * ceq disable the ceq interrupt and destroy the ceq + */ +static void zxdh_del_ceqs(struct zxdh_pci_f *rf) +{ + struct zxdh_ceq *iwceq = &rf->ceqlist[1]; + + struct zxdh_msix_vector *msix_vec; + u32 i = 0; + unsigned long flags; + + msix_vec = &rf->iw_msixtbl[2]; + for (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) { +#ifdef MSIX_SUPPORT + if (iwceq->irq_sta == true) { + iwceq->irq_sta = false; + zxdh_destroy_irq(msix_vec, iwceq); + } +#endif + zxdh_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq, + ZXDH_OP_CEQ_DESTROY); + spin_lock_irqsave(&iwceq->ce_lock, flags); + iwceq->sc_ceq.valid_ceq = false; + spin_unlock_irqrestore(&iwceq->ce_lock, flags); + dma_free_coherent(rf->sc_dev.hw->device, iwceq->mem.size, + iwceq->mem.va, iwceq->mem.pa); + iwceq->mem.va = NULL; + } + + rf->ceqs_count = 1; +} + +/** + * zxdh_destroy_ccq - destroy control cq + * @rf: RDMA PCI function + * + * Issue destroy ccq request and + * free the resources associated with the ccq + */ +static void zxdh_destroy_ccq(struct zxdh_pci_f *rf) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_ccq *ccq = &rf->ccq; + int status = 0; + + if (!rf->reset) + status = zxdh_sc_ccq_destroy(dev->ccq, 0, true); + if (status) + zxdh_dbg(dev, "ERR: CCQ destroy failed %d\n", status); + dma_free_coherent(dev->hw->device, ccq->mem_cq.size, ccq->mem_cq.va, + ccq->mem_cq.pa); + ccq->mem_cq.va = NULL; + dma_free_coherent(dev->hw->device, ccq->shadow_area.size, + ccq->shadow_area.va, ccq->shadow_area.pa); + ccq->shadow_area.va = NULL; + zxdh_free_rsrc(rf, rf->allocated_cqs, + ccq->sc_cq.cq_uk.cq_id - dev->base_cqn); +} + +void zxdh_del_data_cap_objects(struct zxdh_sc_dev *dev) +{ + unsigned int i; + struct zxdh_hmc_sd_entry *sd_entry; + struct zxdh_dma_mem *mem = NULL; + + for (i = 0; i < dev->data_cap_sd.sd_cnt; i++) { + if (!dev->data_cap_sd.entry[i].valid) + continue; + + sd_entry = &dev->data_cap_sd.entry[i]; + mem = &sd_entry->u.bp.addr; + if (!mem || !mem->va) + pr_err("HMC: error cqp sd mem\n"); + else { + dma_free_coherent(dev->hw->device, mem->size, mem->va, + mem->pa); + mem->va = NULL; + } + } +} + +void zxdh_del_hmc_objects(struct zxdh_sc_dev *dev, + struct zxdh_hmc_info *hmc_info) +{ + unsigned int i, sd_idx; + u32 del_sd_cnt = 0; + struct zxdh_hmc_sd_entry *sd_entry; + struct zxdh_dma_mem *mem = NULL; + struct zxdh_dma_mem *mem_harware = NULL; + + for (i = 0; i < hmc_info->hmc_entry_total; i++) { + if (!hmc_info->sd_table.sd_entry[i].valid) + continue; + zxdh_prep_remove_sd_bp(hmc_info, i); + hmc_info->sd_indexes[del_sd_cnt] = (u16)i; + del_sd_cnt++; + } + + for (i = 0; i < del_sd_cnt; i++) { + sd_idx = hmc_info->sd_indexes[i]; + sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; + mem = &sd_entry->u.bp.addr; + if (!mem || !mem->va) + pr_err("HMC: error cqp sd mem\n"); + else { + dma_free_coherent(dev->hw->device, mem->size, mem->va, + mem->pa); + mem->va = NULL; + } + + mem_harware = &sd_entry->u.bp.addr_hardware; + if (mem_harware && mem_harware->va) { + dma_free_coherent(dev->hw->device, mem_harware->size, + mem_harware->va, mem_harware->pa); + mem_harware->va = NULL; + } + } +} + +/** + * zxdh_create_hmc_objs - create all hmc objects for the device + * @rf: RDMA PCI function + * @privileged: permission to create HMC objects + * + * Create the device hmc objects and allocate hmc pages + * Return 0 if successful, otherwise clean up and return error + */ +static int zxdh_create_hmc_objs(struct zxdh_pci_f *rf, bool privileged) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_hmc_create_obj_info info = {}; + int i, status = 0; + + info.hmc_info = dev->hmc_info; + info.privileged = privileged; + info.add_sd_cnt = 0; + + for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) { + if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) { + info.rsrc_type = iw_hmc_obj_types[i]; + info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt; + status = zxdh_sc_create_hmc_obj(dev, &info); + if (status) { + zxdh_del_hmc_objects(&rf->sc_dev, + rf->sc_dev.hmc_info); + zxdh_dbg( + dev, + "ERR: create obj type %d status = %d\n", + iw_hmc_obj_types[i], status); + break; + } + } + } + + return status; +} + +static int zxdh_create_hmcobjs_dpuddr(struct zxdh_pci_f *rf) +{ + u32 sd_lmt, hmc_entry_total = 0, j = 0, k = 0, mem_size = 0, cnt = 0; + u64 fpm_limit = 0; + struct zxdh_hmc_info *hmc_info; + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_virt_mem virt_mem = {}; + struct zxdh_hmc_obj_info *obj_info; + + hmc_info = dev->hmc_info; + + zxdh_hmc_dpu_capability(dev); + for (k = 0; k < ZXDH_HMC_IW_MAX; k++) { + zxdh_sc_write_hmc_register(dev, hmc_info->hmc_obj, k, + dev->vhca_id); + } + + obj_info = hmc_info->hmc_obj; + for (k = ZXDH_HMC_IW_PBLE; k < ZXDH_HMC_IW_MAX; k++) { + cnt = obj_info[k].cnt; + + fpm_limit = obj_info[k].size * cnt; + + if (fpm_limit == 0) + continue; + + if (k == ZXDH_HMC_IW_PBLE) + hmc_info->hmc_first_entry_pble = hmc_entry_total; + + if (k == ZXDH_HMC_IW_PBLE_MR) + hmc_info->hmc_first_entry_pble_mr = hmc_entry_total; + + sd_lmt = (u32)((fpm_limit - 1) / ZXDH_HMC_DIRECT_BP_SIZE); + sd_lmt += 1; + + if (sd_lmt == 1) { + hmc_entry_total++; + } else { + for (j = 0; j < sd_lmt - 1; j++) + hmc_entry_total++; + + if (fpm_limit % ZXDH_HMC_DIRECT_BP_SIZE) + hmc_entry_total++; + } + } + + mem_size = sizeof(struct zxdh_hmc_sd_entry) * hmc_entry_total; + virt_mem.size = mem_size; + virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL); + if (!virt_mem.va) { + zxdh_dbg( + dev, + "HMC: failed to allocate memory for sd_entry buffer\n"); + return -ENOMEM; + } + hmc_info->sd_table.sd_entry = virt_mem.va; + hmc_info->hmc_entry_total = hmc_entry_total; + + return 0; +} + +/** + * zxdh_create_cqp - create control qp + * @rf: RDMA PCI function + * + * Return 0, if the cqp and all the resources associated with it + * are successfully created, otherwise return error + */ +static int zxdh_create_cqp(struct zxdh_pci_f *rf) +{ + u32 sqsize = ZXDH_CQP_SW_SQSIZE_2048; + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_cqp_init_info cqp_init_info = {}; + struct zxdh_cqp *cqp = + &rf->cqp; // this struct will be transferred to CQE. + u16 maj_err, min_err; + int i, status; + + cqp->cqp_requests = + kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL); + if (!cqp->cqp_requests) + return -ENOMEM; + + cqp->scratch_array = + kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL); + if (!cqp->scratch_array) { + status = -ENOMEM; + goto err_scratch; + } + + dev->cqp = &cqp->sc_cqp; + dev->cqp->dev = dev; + cqp->sq.size = ALIGN(sizeof(struct zxdh_cqp_sq_wqe) * sqsize, + ZXDH_CQP_ALIGNMENT); + cqp->sq.va = dma_alloc_coherent(dev->hw->device, cqp->sq.size, + &cqp->sq.pa, GFP_KERNEL); + if (!cqp->sq.va) { + status = -ENOMEM; + goto err_sq; + } + + // populate the cqp init info + cqp_init_info.dev = dev; + cqp_init_info.sq_size = sqsize; + cqp_init_info.sq = cqp->sq.va; + cqp_init_info.sq_pa = cqp->sq.pa; + if (dev->privileged) { + cqp_init_info.hmc_profile = rf->rsrc_profile; + cqp_init_info.ena_vf_count = rf->max_rdma_vfs; + } + cqp_init_info.scratch_array = cqp->scratch_array; + cqp_init_info.protocol_used = rf->protocol_used; + memcpy(&cqp_init_info.dcqcn_params, &rf->dcqcn_params, + sizeof(cqp_init_info.dcqcn_params)); + + cqp_init_info.hw_maj_ver = ZXDH_CQPHC_HW_MAJVER_GEN_2; + status = zxdh_sc_cqp_init(dev->cqp, &cqp_init_info); + if (status) { + pr_err("ERR: cqp init status %d\n", status); + goto err_ctx; + } + + spin_lock_init(&cqp->req_lock); + spin_lock_init(&cqp->compl_lock); + + status = zxdh_sc_cqp_create(dev->cqp, &maj_err, &min_err); + if (status) { + zxdh_dbg( + dev, + "ERR: cqp create failed - status %d maj_err %d min_err %d\n", + status, maj_err, min_err); + goto err_create; + } + + INIT_LIST_HEAD(&cqp->cqp_avail_reqs); + INIT_LIST_HEAD(&cqp->cqp_pending_reqs); + + /* init the waitqueue of the cqp_requests and add them to the list */ + for (i = 0; i < sqsize; i++) { + init_waitqueue_head(&cqp->cqp_requests[i].waitq); + list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs); + } + init_waitqueue_head(&cqp->remove_wq); + return 0; + +err_create: +err_ctx: + dma_free_coherent(dev->hw->device, cqp->sq.size, cqp->sq.va, + cqp->sq.pa); + cqp->sq.va = NULL; +err_sq: + kfree(cqp->scratch_array); + cqp->scratch_array = NULL; +err_scratch: + kfree(cqp->cqp_requests); + cqp->cqp_requests = NULL; + + return status; +} + +/** + * zxdh_create_ccq - create control cq + * @rf: RDMA PCI function + * + * Return 0, if the ccq and the resources associated with it + * are successfully created, otherwise return error + */ +static int zxdh_create_ccq(struct zxdh_pci_f *rf) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_ccq_init_info info = {}; + struct zxdh_ccq *ccq = &rf->ccq; + u32 cq_num = 0; + int status; + + status = zxdh_alloc_rsrc( + rf, rf->allocated_cqs, rf->max_cq, &cq_num, + &rf->next_cq); /* cq_num is the allocated cq_id. */ + if (status) + return status; + cq_num += dev->base_cqn; + info.cq_num = cq_num; + dev->ccq = &ccq->sc_cq; + dev->ccq->dev = dev; + info.dev = dev; + ccq->shadow_area.size = sizeof(struct zxdh_cq_shadow_area); + ccq->mem_cq.size = ALIGN(sizeof(struct zxdh_cqe) * IW_CCQ_SIZE, + ZXDH_CQ0_ALIGNMENT); + ccq->mem_cq.va = dma_alloc_coherent(dev->hw->device, ccq->mem_cq.size, + &ccq->mem_cq.pa, GFP_KERNEL); + if (!ccq->mem_cq.va) + return -ENOMEM; + + ccq->shadow_area.va = + dma_alloc_coherent(dev->hw->device, ccq->shadow_area.size, + &ccq->shadow_area.pa, GFP_KERNEL); + if (!ccq->shadow_area.va) { + dma_free_coherent(dev->hw->device, ccq->mem_cq.size, + ccq->mem_cq.va, ccq->mem_cq.pa); + ccq->mem_cq.va = NULL; + zxdh_free_rsrc(rf, rf->allocated_cqs, cq_num - dev->base_cqn); + return -ENOMEM; + } + + ccq->sc_cq.back_cq = ccq; + /* populate the ccq init info */ + info.cq_base = ccq->mem_cq.va; + info.cq_pa = ccq->mem_cq.pa; + info.num_elem = IW_CCQ_SIZE; + info.shadow_area = ccq->shadow_area.va; + info.shadow_area_pa = ccq->shadow_area.pa; + info.ceqe_mask = false; + info.ceq_id_valid = true; + info.ceq_id = dev->base_ceqn; + info.ceq_index = 0; + info.shadow_read_threshold = 16; + info.cqe_size = ZXDH_CQE_SIZE_64; + info.cq_max = 0; + info.cq_period = 0; + info.scqe_break_moderation_en = false; + info.cq_st = 0; + info.is_in_list_cnt = 0; + + status = zxdh_sc_ccq_init(dev->ccq, &info); + if (status) + goto exit; + + status = zxdh_sc_ccq_create(dev->ccq, 0, true); +exit: + if (status) { + dma_free_coherent(dev->hw->device, ccq->mem_cq.size, + ccq->mem_cq.va, ccq->mem_cq.pa); + ccq->mem_cq.va = NULL; + dma_free_coherent(dev->hw->device, ccq->shadow_area.size, + ccq->shadow_area.va, ccq->shadow_area.pa); + ccq->shadow_area.va = NULL; + zxdh_free_rsrc(rf, rf->allocated_cqs, cq_num - dev->base_cqn); + } + + return status; +} + +/** + * zxdh_cfg_ceq_vector - set up the msix interrupt vector for + * ceq + * @rf: RDMA PCI function + * @iwceq: ceq associated with the vector + * @ceq_id: the id number of the iwceq + * @msix_vec: interrupt vector information + * + * Allocate interrupt resources and enable irq handling + * Return 0 if successful, otherwise return error + */ +static int zxdh_cfg_ceq_vector(struct zxdh_pci_f *rf, struct zxdh_ceq *iwceq, + u32 ceq_id, struct zxdh_msix_vector *msix_vec) +{ +#ifndef MSIX_SUPPORT + return 0; +#endif + int status; + + tasklet_setup(&iwceq->dpc_tasklet, zxdh_ceq_dpc); + status = request_irq(msix_vec->irq, zxdh_ceq_handler, 0, "CEQ", iwceq); + cpumask_clear(&msix_vec->mask); + cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask); + irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask); + if (status) { + pr_err("ERR: ceq irq config fail\n"); + return status; + } + iwceq->irq = msix_vec->irq; + iwceq->msix_idx = msix_vec->idx; + iwceq->irq_sta = true; + msix_vec->ceq_id = ceq_id; + return 0; +} + +/** + * zxdh_cfg_aeq_vector - set up the msix vector for aeq + * @rf: RDMA PCI function + * + * Allocate interrupt resources and enable irq handling + * Return 0 if successful, otherwise return error + */ +static int zxdh_cfg_aeq_vector(struct zxdh_pci_f *rf) +{ +#ifndef MSIX_SUPPORT + return 0; +#endif + struct zxdh_msix_vector *msix_vec = rf->iw_msixtbl; + u32 ret = 0; + + tasklet_setup(&rf->dpc_tasklet, zxdh_dpc); + ret = request_irq(msix_vec->irq, zxdh_aeq_handler, 0, "AEQ", rf); + if (ret) { + pr_err("ERR: aeq irq config fail\n"); + return -EINVAL; + } + rf->sc_dev.irq_ops->zxdh_cfg_aeq(&rf->sc_dev, msix_vec->idx); + rf->aeq.irq = msix_vec->irq; + rf->aeq.msix_idx = msix_vec->idx; + rf->aeq.irq_sta = true; + return 0; +} + +/** + * zxdh_create_ceq - create completion event queue + * @rf: RDMA PCI function + * @iwceq: pointer to the ceq resources to be created + * @ceq_id: the id number of the iwceq + * + * Return 0, if the ceq and the resources associated with it + * are successfully created, otherwise return error + */ +static int zxdh_create_ceq(struct zxdh_pci_f *rf, struct zxdh_ceq *iwceq, + u32 ceq_id) +{ + int status; + struct zxdh_ceq_init_info info = {}; + struct zxdh_sc_dev *dev = &rf->sc_dev; + u64 scratch; + u32 ceq_size; + u32 log2_ceq_size; + + info.ceq_id = ceq_id; + info.ceq_index = ceq_id - dev->base_ceqn; + iwceq->rf = rf; + ceq_size = min(rf->sc_dev.hmc_info->hmc_obj[ZXDH_HMC_IW_CQ].cnt, + dev->hw_attrs.max_hw_ceq_size); + ceq_size = roundup_pow_of_two(ceq_size); + log2_ceq_size = order_base_2(ceq_size); + + iwceq->mem.size = + ALIGN(sizeof(struct zxdh_ceqe) * ceq_size, ZXDH_CEQ_ALIGNMENT); + iwceq->mem.va = dma_alloc_coherent(dev->hw->device, iwceq->mem.size, + &iwceq->mem.pa, GFP_KERNEL); + if (!iwceq->mem.va) + return -ENOMEM; + + info.ceqe_base = iwceq->mem.va; + info.ceqe_pa = iwceq->mem.pa; + info.elem_cnt = ceq_size; + info.log2_elem_size = log2_ceq_size; + info.msix_idx = iwceq->msix_idx; + iwceq->sc_ceq.ceq_id = ceq_id; + iwceq->sc_ceq.valid_ceq = true; + info.dev = dev; + scratch = (uintptr_t)&rf->cqp.sc_cqp; + status = zxdh_sc_ceq_init(&iwceq->sc_ceq, &info); + + if (!status) { + if (dev->ceq_valid) + status = zxdh_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq, + ZXDH_OP_CEQ_CREATE); + else + status = zxdh_sc_cceq_create(&iwceq->sc_ceq, scratch); + } + + if (status) { + dma_free_coherent(dev->hw->device, iwceq->mem.size, + iwceq->mem.va, iwceq->mem.pa); + iwceq->mem.va = NULL; + } + + return status; +} + +/** + * zxdh_setup_ceq_0 - create CEQ 0 and it's interrupt resource + * @rf: RDMA PCI function + * + * Allocate a list for all device completion event queues + * Create the ceq 0 and configure it's msix interrupt vector + * Return 0, if successfully set up, otherwise return error + */ +static int zxdh_setup_ceq_0(struct zxdh_pci_f *rf) +{ + struct zxdh_ceq *iwceq; + struct zxdh_msix_vector *msix_vec; + int status = 0; + u32 num_ceqs; + + num_ceqs = min(rf->msix_count, rf->sc_dev.max_ceqs); + rf->ceqlist = kcalloc(num_ceqs, sizeof(*rf->ceqlist), GFP_KERNEL); + if (!rf->ceqlist) { + status = -ENOMEM; + goto exit; + } + + iwceq = &rf->ceqlist[0]; + //0 is aeq, 1~xx is ceq + msix_vec = &rf->iw_msixtbl[1]; + iwceq->irq = msix_vec->irq; + iwceq->msix_idx = msix_vec->idx; + status = zxdh_create_ceq(rf, iwceq, rf->sc_dev.base_ceqn); + if (status) { + pr_err("ERR: create ceq status = %d\n", status); + goto exit; + } + + spin_lock_init(&iwceq->ce_lock); + status = zxdh_cfg_ceq_vector(rf, iwceq, rf->sc_dev.base_ceqn, msix_vec); + if (status) { + zxdh_destroy_ceq(rf, iwceq); + goto exit; + } + + zxdh_ceq_ena_intr(&rf->sc_dev, iwceq->sc_ceq.ceq_id); + rf->ceqs_count++; + +exit: + if (status && !rf->ceqs_count) { + kfree(rf->ceqlist); + rf->ceqlist = NULL; + return status; + } + rf->sc_dev.ceq_valid = true; + + return 0; +} + +/** + * zxdh_setup_ceqs - manage the device ceq's and their interrupt resources + * @rf: RDMA PCI function + * + * Allocate a list for all device completion event queues + * Create the ceq's and configure their msix interrupt vectors + * Return 0, if ceqs are successfully set up, otherwise return error + */ +static int zxdh_setup_ceqs(struct zxdh_pci_f *rf) +{ + u32 i; + u32 ceq_id; + u32 ceq_id_offset; + struct zxdh_ceq *iwceq; + struct zxdh_msix_vector *msix_vec; + int status; + u32 num_ceqs; + + num_ceqs = min(rf->msix_count, rf->sc_dev.max_ceqs); + i = 2; + for (ceq_id_offset = 1; ceq_id_offset < num_ceqs; + i++, ceq_id_offset++) { + iwceq = &rf->ceqlist[ceq_id_offset]; + ceq_id = rf->sc_dev.base_ceqn + ceq_id_offset; + msix_vec = &rf->iw_msixtbl[i]; + iwceq->irq = msix_vec->irq; + iwceq->msix_idx = msix_vec->idx; + status = zxdh_create_ceq(rf, iwceq, ceq_id); + if (status) { + pr_err("ERR: create ceq status = %d\n", status); + goto del_ceqs; + } + spin_lock_init(&iwceq->ce_lock); + status = zxdh_cfg_ceq_vector(rf, iwceq, ceq_id, msix_vec); + if (status) { + zxdh_destroy_ceq(rf, iwceq); + goto del_ceqs; + } + + zxdh_ceq_ena_intr(&rf->sc_dev, iwceq->sc_ceq.ceq_id); + rf->ceqs_count++; + } + + return 0; + +del_ceqs: + zxdh_del_ceqs(rf); + + return status; +} + +#if 0 +static int zxdh_create_virt_aeq(struct zxdh_pci_f *rf, u32 size) +{ + struct zxdh_aeq *aeq = &rf->aeq; + dma_addr_t *pg_arr; + u32 pg_cnt; + int status; + + if (rf->rdma_ver < ZXDH_GEN_2) + return -EOPNOTSUPP; + + aeq->mem.size = sizeof(struct zxdh_sc_aeqe) * size; + aeq->mem.va = vzalloc(aeq->mem.size); + + if (!aeq->mem.va) + return -ENOMEM; + + pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE); + status = zxdh_get_pble(rf->pble_rsrc, &aeq->palloc, pg_cnt, true); + if (status) { + vfree(aeq->mem.va); + return status; + } + + pg_arr = (dma_addr_t *)aeq->palloc.level1.addr; + status = zxdh_map_vm_page_list(&rf->hw, aeq->mem.va, pg_arr, pg_cnt); + if (status) { + zxdh_free_pble(rf->pble_rsrc, &aeq->palloc); + vfree(aeq->mem.va); + return status; + } + + return 0; +} +#endif + +/** + * zxdh_create_aeq - create async event queue + * @rf: RDMA PCI function + * + * Return 0, if the aeq and the resources associated with it + * are successfully created, otherwise return error + */ +static int zxdh_create_aeq(struct zxdh_pci_f *rf) +{ + struct zxdh_aeq_init_info info = {}; + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_aeq *aeq = &rf->aeq; + struct zxdh_hmc_info *hmc_info = rf->sc_dev.hmc_info; + u32 aeq_size; + u8 multiplier = (rf->protocol_used == ZXDH_IWARP_PROTOCOL_ONLY) ? 2 : 1; + int status; + + aeq_size = multiplier * hmc_info->hmc_obj[ZXDH_HMC_IW_QP].cnt + + hmc_info->hmc_obj[ZXDH_HMC_IW_CQ].cnt + + hmc_info->hmc_obj[ZXDH_HMC_IW_SRQ].cnt; + aeq_size = min(aeq_size, dev->hw_attrs.max_hw_aeq_size); + + aeq->mem.size = ALIGN(sizeof(struct zxdh_sc_aeqe) * aeq_size, + ZXDH_AEQ_ALIGNMENT); + aeq->mem.va = dma_alloc_coherent(dev->hw->device, aeq->mem.size, + &aeq->mem.pa, + GFP_KERNEL | __GFP_NOWARN); + + if (aeq->mem.va) + goto skip_virt_aeq; + + pr_err("aeq_size out of range, failed to apply for physical memory!\n"); + return -ENOMEM; + +#if 0 + /* physically mapped aeq failed. setup virtual aeq */ + status = zxdh_create_virt_aeq(rf, aeq_size); + if (status) + return status; + + info.virtual_map = true; + aeq->virtual_map = info.virtual_map; + info.pbl_chunk_size = 1; + info.first_pm_pbl_idx = aeq->palloc.level1.idx; +#endif + +skip_virt_aeq: + info.aeqe_base = aeq->mem.va; + info.aeq_elem_pa = aeq->mem.pa; + info.elem_cnt = aeq_size; + info.dev = dev; + info.msix_idx = rf->iw_msixtbl->idx; + status = zxdh_sc_aeq_init(&aeq->sc_aeq, &info); + if (status) + goto err; + + status = zxdh_cqp_aeq_create(&aeq->sc_aeq); + if (status) + goto err; + + return 0; + +err: + if (aeq->virtual_map) + zxdh_destroy_virt_aeq(rf); + else { + dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va, + aeq->mem.pa); + aeq->mem.va = NULL; + } + return status; +} + +/** + * zxdh_setup_aeq - set up the device aeq + * @rf: RDMA PCI function + * + * Create the aeq and configure its msix interrupt vector + * Return 0 if successful, otherwise return error + */ +static int zxdh_setup_aeq(struct zxdh_pci_f *rf) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + int status; + + status = zxdh_create_aeq(rf); + if (status) + return status; + status = zxdh_cfg_aeq_vector(rf); + if (status) { + zxdh_init_destroy_aeq(rf); + return status; + } + zxdh_aeq_ena_intr(dev, true); + return 0; +} + +/** + * zxdh_hmc_setup - create hmc objects for the device + * @rf: RDMA PCI function + * + * Set up the device private memory space for the number and size of + * the hmc objects and create the objects + * Return 0 if successful, otherwise return error + */ +static int zxdh_hmc_setup(struct zxdh_pci_f *rf) +{ + int status; + struct zxdh_sc_dev *dev = &rf->sc_dev; + + status = zxdh_cfg_fpm_val(dev); + if (status) + return status; + + status = zxdh_create_hmc_objs(rf, true); + + return status; +} + +static int zxdh_data_cap_setup(struct zxdh_pci_f *rf) +{ + int status; + struct zxdh_sc_dev *dev = &rf->sc_dev; + + status = zxdh_sc_create_date_cap_obj(dev); + if (status) { + zxdh_del_data_cap_objects(&rf->sc_dev); + zxdh_dbg(dev, "ERR: create data cap status = %d\n", status); + } + return status; +} + +/** + * zxdh_del_init_mem - deallocate memory resources + * @rf: RDMA PCI function + */ +static void zxdh_del_init_mem(struct zxdh_pci_f *rf) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + + kfree(dev->hmc_info->sd_table.sd_entry); + dev->hmc_info->sd_table.sd_entry = NULL; + vfree(rf->mem_rsrc); + rf->mem_rsrc = NULL; + + if (rf->ceqlist) { + kfree(rf->ceqlist); + rf->ceqlist = NULL; + } + if (rf->iw_msixtbl) { + kfree(rf->iw_msixtbl); + rf->iw_msixtbl = NULL; + } + kfree(rf->hmc_info_mem); + rf->hmc_info_mem = NULL; +} + +/** + * zxdh_initialize_dev - initialize device + * @rf: RDMA PCI function + * + * Allocate memory for the hmc objects and initialize iwdev + * Return 0 if successful, otherwise clean up the resources + * and return error + */ +static int zxdh_initialize_dev(struct zxdh_pci_f *rf) +{ + struct zxdh_device_init_info info = {}; + int ret = 0; + + info.bar0 = rf->hw.hw_addr; + info.privileged = !rf->ftype; + info.max_vfs = rf->max_rdma_vfs; + info.hw = &rf->hw; + rf->vlan_parse_en = 1; + ret = zxdh_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info); + + return ret; +} + +/** + * zxdh_rt_deinit_hw - clean up the zrdma device resources + * @iwdev: zrdma device + * + * remove the mac ip entry and ipv4/ipv6 addresses, destroy the + * device queues and free the pble and the hmc objects + */ +void zxdh_rt_deinit_hw(struct zxdh_device *iwdev) +{ + switch (iwdev->init_state) { + case AEQ_CREATED: + case PBLE_CHUNK_MEM: + case CEQS_CREATED: + default: + dev_warn(idev_to_dev(&iwdev->rf->sc_dev), + "rt bad init_state = %d\n", iwdev->init_state); + break; + } + + if (iwdev->cleanup_wq) + destroy_workqueue(iwdev->cleanup_wq); +} + +static int zxdh_setup_init_state(struct zxdh_pci_f *rf) +{ + int status; + + status = zxdh_save_msix_info(rf); + if (status) + return status; + rf->hw.device = &rf->pcidev->dev; + + mutex_init(&rf->sc_dev.vchnl_mutex); + status = zxdh_initialize_dev(rf); + if (status) + goto clean_msixtbl; + + return 0; + +clean_msixtbl: + kfree(rf->iw_msixtbl); + rf->iw_msixtbl = NULL; + return status; +} + +/** + * zxdh_get_used_rsrc - determine resources used internally + * @iwdev: zrdma device + * + * Called at the end of open to get all internal allocations + */ +static void zxdh_get_used_rsrc(struct zxdh_device *iwdev) +{ + iwdev->rf->used_pds = find_next_zero_bit(iwdev->rf->allocated_pds, + iwdev->rf->max_pd, 0); + iwdev->rf->used_qps = find_next_zero_bit(iwdev->rf->allocated_qps, + iwdev->rf->max_qp, 0); + iwdev->rf->used_cqs = find_next_zero_bit(iwdev->rf->allocated_cqs, + iwdev->rf->max_cq, 0); + iwdev->rf->used_mrs = find_next_zero_bit(iwdev->rf->allocated_mrs, + iwdev->rf->max_mr, 0); + iwdev->rf->used_srqs = find_next_zero_bit(iwdev->rf->allocated_srqs, + iwdev->rf->max_srq, 0); +} + +static void zxdh_shutdown_vhca(struct zxdh_pci_f *rf) +{ + u32 invalid_sid = 63; + u32 qpc_axi_info; + writel(invalid_sid, (u32 __iomem *)(rf->sc_dev.hw->hw_addr + C_RDMAIO_TABLE2)); + qpc_axi_info = readl((u32 __iomem *)(rf->sc_dev.hw->hw_addr + C_HMC_QPC_RX)); + qpc_axi_info |= (3 << 2); + writel(qpc_axi_info, + (u32 __iomem *)(rf->sc_dev.hw->hw_addr + C_HMC_QPC_RX)); +} + +void zxdh_ctrl_deinit_hw(struct zxdh_pci_f *rf) +{ + uint16_t vf_id; + struct zxdh_vfdev *vf_dev = NULL; + enum init_completion_state state = rf->init_state; + + rf->init_state = INVALID_STATE; + if (state > AEQ_CREATED) + zxdh_destroy_aeq(rf); + else if (state == AEQ_CREATED) + zxdh_destroy_aeq_reg(rf); + if (rf->rsrc_created) { + zxdh_destroy_pble_prm(rf->pble_rsrc); + zxdh_destroy_pble_prm(rf->pble_mr_rsrc); + zxdh_del_ceqs(rf); + rf->rsrc_created = false; + } + + switch (state) { + case CEQ0_CREATED: + zxdh_del_ceq_0(rf); + fallthrough; + case CCQ_CREATED: + zxdh_destroy_ccq(rf); + fallthrough; + case HW_RSRC_INITIALIZED: + case HMC_OBJS_CREATED: + zxdh_del_hmc_objects(&rf->sc_dev, rf->sc_dev.hmc_info); + fallthrough; + case DATA_CAP_CREATED: + zxdh_del_data_cap_objects(&rf->sc_dev); + fallthrough; + case CQP_QP_CREATED: + zxdh_destroy_cqp_qp(rf); + fallthrough; + case SMMU_PAGETABLE_INITIALIZED: + if (!rf->ftype) + zxdh_smmu_pagetable_exit(&rf->sc_dev); + fallthrough; + case CQP_CREATED: + zxdh_destroy_cqp(rf, !rf->reset); + fallthrough; + case INITIAL_STATE: + zxdh_del_init_mem(rf); + break; + case INVALID_STATE: + default: + pr_warn("ctrl bad init_state = %d\n", rf->init_state); + break; + } + + if (rf->ftype == 0) { + for (vf_id = 0; vf_id < rf->max_rdma_vfs; vf_id++) { + vf_dev = zxdh_find_vf_dev(&rf->sc_dev, vf_id); + if (vf_dev) { + zxdh_del_hmc_objects( + &rf->sc_dev, + &rf->sc_dev.vf_dev[vf_id]->hmc_info); + zxdh_put_vfdev(&rf->sc_dev, + rf->sc_dev.vf_dev[vf_id]); + zxdh_remove_vf_dev(&rf->sc_dev, + rf->sc_dev.vf_dev[vf_id]); + } + } + } + zxdh_shutdown_vhca(rf); +} + +/** + * zxdh_rt_init_hw - Initializes runtime portion of HW + * @iwdev: zrdma device + * + * Create device queues ILQ, IEQ, CEQs and PBLEs. Setup zrdma + * device resource objects. + */ +int zxdh_rt_init_hw(struct zxdh_device *iwdev) +{ + struct zxdh_pci_f *rf = iwdev->rf; + struct zxdh_sc_dev *dev = &rf->sc_dev; + int status; + + zxdh_sc_dev_qplist_init(dev); + do { + if (!rf->rsrc_created) { + status = zxdh_setup_ceqs(rf); + if (status) + break; + + iwdev->init_state = CEQS_CREATED; + + rf->pble_rsrc->fpm_base_addr = + rf->sc_dev.hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE] + .base; + rf->sc_dev.hmc_info->pble_hmc_index = + rf->sc_dev.hmc_info->hmc_first_entry_pble; + status = zxdh_hmc_init_pble(&rf->sc_dev, rf->pble_rsrc, + PBLE_QUEUE); + if (status) { + zxdh_del_ceqs(rf); + break; + } + rf->pble_mr_rsrc->fpm_base_addr = + rf->sc_dev.hmc_info + ->hmc_obj[ZXDH_HMC_IW_PBLE_MR] + .base; + rf->sc_dev.hmc_info->pble_mr_hmc_index = + rf->sc_dev.hmc_info->hmc_first_entry_pble_mr; + status = zxdh_hmc_init_pble(&rf->sc_dev, + rf->pble_mr_rsrc, PBLE_MR); + if (status) { + zxdh_destroy_pble_prm(rf->pble_rsrc); + zxdh_del_ceqs(rf); + break; + } + + iwdev->init_state = PBLE_CHUNK_MEM; + rf->rsrc_created = true; + } + + iwdev->device_cap_flags = + IB_DEVICE_MEM_WINDOW | IB_DEVICE_MEM_MGT_EXTENSIONS | + IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SYS_IMAGE_GUID | + IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_N_NOTIFY_CQ; + + iwdev->cleanup_wq = alloc_workqueue( + "zrdma-cleanup-wq", WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); + if (!iwdev->cleanup_wq) + return -ENOMEM; + + zxdh_get_used_rsrc(iwdev); + init_waitqueue_head(&iwdev->suspend_wq); + + return 0; + } while (0); + + dev_err(idev_to_dev(dev), + "HW runtime init FAIL status = %d last cmpl = %d\n", status, + iwdev->init_state); + zxdh_rt_deinit_hw(iwdev); + + return status; +} + +static void zxdh_config_tx_regs(struct zxdh_sc_dev *dev) +{ + u32 temp; + + temp = FIELD_PREP(ZXDH_TX_CACHE_ID, 0) | + FIELD_PREP(ZXDH_TX_INDICATE_ID, ZXDH_INDICATE_HOST_NOSMMU) | + FIELD_PREP(ZXDH_TX_AXI_ID, (ZXDH_AXID_HOST_EP0 + dev->ep_id)) | + FIELD_PREP(ZXDH_TX_WAY_PARTITION, 0); + + writel(temp, + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_ACK_SQWQE_PARA_CFG)); + writel(temp, + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_ACK_DDR_PARA_CFG)); + writel(temp, + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_DB_SQWQE_ID_CFG)); + writel(temp, (u32 __iomem *)(dev->hw->hw_addr + RDMATX_SQWQE_PARA_CFG)); + writel(temp, + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_PAYLOAD_PARA_CFG)); + + if (dev->hmc_use_dpu_ddr) { + temp = FIELD_PREP(ZXDH_TX_CACHE_ID, dev->cache_id) | + FIELD_PREP(ZXDH_TX_INDICATE_ID, ZXDH_INDICATE_DPU_DDR) | + FIELD_PREP(ZXDH_TX_AXI_ID, + (ZXDH_AXID_HOST_EP0 + dev->ep_id)) | + FIELD_PREP(ZXDH_TX_WAY_PARTITION, 0); + } else { + temp = FIELD_PREP(ZXDH_TX_CACHE_ID, dev->cache_id) | + FIELD_PREP(ZXDH_TX_INDICATE_ID, + ZXDH_INDICATE_HOST_SMMU) | + FIELD_PREP(ZXDH_TX_AXI_ID, + (ZXDH_AXID_HOST_EP0 + dev->ep_id)) | + FIELD_PREP(ZXDH_TX_WAY_PARTITION, 0); + } + writel(temp, (u32 __iomem *)(dev->hw->hw_addr + C_HMC_MRTE_TX2)); + writel(temp, (u32 __iomem *)(dev->hw->hw_addr + C_HMC_PBLEMR_TX2)); + + writel((ZXDH_AXID_HOST_EP0 + dev->ep_id), + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_HOSTID_CFG)); + + /*adding token config to 200Gbps, equal to time(us)*size(Byte)*/ + writel(0x1, + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_ADD_TOKEN_CHANGE_EN)); + writel(0x1900, + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_TIME_ADD_TOKEN_CFG)); + writel(0x132d7, + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_SIZE_ADD_TOKEN_CFG)); + writel(0x3FFFFFF, + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_TOKEN_MAX_CFG)); +} + +static void zxdh_config_rx_regs(struct zxdh_sc_dev *dev) +{ + struct zxdh_pci_f *rf = container_of(dev, struct zxdh_pci_f, sc_dev); + + u32 temp; + + temp = FIELD_PREP(ZXDH_RX_CACHE_ID, 0) | + FIELD_PREP(ZXDH_RX_INDICATE_ID, ZXDH_INDICATE_HOST_NOSMMU) | + FIELD_PREP(ZXDH_RX_AXI_ID, (ZXDH_AXID_HOST_EP0 + dev->ep_id)) | + FIELD_PREP(ZXDH_RX_WAY_PARTITION, 0); + + writel(temp, + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_PLD_WR_AXIID_RAM)); + writel(temp, (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RQ_AXI_RAM)); + writel(temp, (u32 __iomem *)(dev->hw->hw_addr + RDMARX_SRQ_AXI_RAM)); + writel(temp, + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_ACK_RQDB_AXI_RAM)); + writel(temp, + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_CQ_CQE_AXI_INFO_RAM)); + writel(temp, + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_CQ_DBSA_AXI_INFO_RAM)); + writel(dev->hmc_fn_id, (u32 __iomem *)(dev->hw->hw_addr + + RDMARX_MUL_CACHE_CFG_SIDN_RAM)); + writel((ZXDH_AXID_HOST_EP0 + dev->ep_id), + (u32 __iomem *)(dev->hw->hw_addr + + RDMARX_MUL_COPY_QPN_INDICATE)); + writel(RDMARX_MAX_MSG_SIZE, + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_VHCA_MAX_SIZE_RAM)); + + if (rf->ftype == 0) { + // writel(ZXDH_HMC_HOST_MGCPAYLOAD_MAX_QUANTITY, (u32 __iomem *)(dev->hw->hw_addr + RDMARX_MUL_CACHE_CFG_INDEX_SUM_RAM)); + // writel(1, (u32 __iomem *)(dev->hw->hw_addr + RDMARX_MUL_CACHE_CFG_VLD_RAM)); + // writel(dev->vhca_id, (u32 __iomem *)(dev->hw->hw_addr + RDMARX_MUL_CACHE_CFG_VHCA_RAM)); + } +} + +static void zxdh_config_io_regs(struct zxdh_sc_dev *dev) +{ + u32 temp0, temp1, temp2; + struct zxdh_pci_f *rf = container_of(dev, struct zxdh_pci_f, sc_dev); + + temp0 = FIELD_PREP(ZXDH_IOTABLE2_SID, dev->hmc_fn_id); + writel(temp0, (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE2)); + + temp1 = FIELD_PREP(ZXDH_IOTABLE4_EPID, + (ZXDH_HOST_EP0_ID + dev->ep_id)) | + FIELD_PREP(ZXDH_IOTABLE4_VFID, dev->vf_id) | + FIELD_PREP(ZXDH_IOTABLE4_PFID, rf->pf_id); + writel(temp1, (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE4)); + + temp0 = 0x10000; + writel(temp0, (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE3)); + for (temp0 = 0; temp0 < 32; temp0++) { + if (temp0 < ZXDH_RW_PAYLOAD || temp0 == ZXDH_QPC_OBJ_ID) { + writel(0, (u32 __iomem *)(dev->hw->hw_addr + + C_RDMAIO_TABLE5_0 + + (temp0 * 4))); + } else { + writel((rf->ftype), (u32 __iomem *)(dev->hw->hw_addr + + C_RDMAIO_TABLE5_0 + + (temp0 * 4))); + } + } + + if (rf->ftype == 0) { + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_0)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_1)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_2)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_3)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_4)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_5)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_6)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_7)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_8)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_9)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_10)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_11)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_12)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_13)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_14)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE6_15)); + + temp2 = FIELD_PREP(ZXDH_IOTABLE7_PFID, rf->pf_id) | + FIELD_PREP(ZXDH_IOTABLE7_EPID, + (ZXDH_HOST_EP0_ID + rf->ep_id)); + writel(temp2, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE7)); + } else { + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_0)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_1)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_2)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_3)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_4)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_5)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_6)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_7)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_8)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_9)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_10)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_11)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_12)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_13)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_14)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_15)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_16)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_17)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_18)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_19)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_20)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_21)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_22)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_23)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_24)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_25)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_26)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_27)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_28)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_29)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_30)); + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + C_RDMAIO_TABLE5_31)); + } +} + +static void zxdh_config_hw_regs(struct zxdh_sc_dev *dev) +{ + zxdh_config_tx_regs(dev); + zxdh_config_rx_regs(dev); + zxdh_config_io_regs(dev); +} +/** + * zxdh_ctrl_init_hw - Initializes control portion of HW + * @rf: RDMA PCI function + * + * Create admin queues, HMC obejcts and RF resource objects + */ +int zxdh_ctrl_init_hw(struct zxdh_pci_f *rf) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + u32 k = 0; + int status = 0; + + do { + status = zxdh_setup_init_state(rf); + if (status) + break; + rf->init_state = INITIAL_STATE; + + zxdh_config_hw_regs(dev); + + status = zxdh_create_cqp(rf); + if (status) + break; + rf->init_state = CQP_CREATED; + zxdh_init_destroy_aeq(rf); + if (!rf->ftype) { + status = zxdh_smmu_pagetable_init(dev); + if (status) + break; + rf->init_state = SMMU_PAGETABLE_INITIALIZED; + if (rf->sc_dev.ep_id != ZXDH_ZF_EPID || dev->hmc_use_dpu_ddr) { + status = zxdh_data_cap_setup(rf); + if (status) + break; + rf->init_state = DATA_CAP_CREATED; + } + if (dev->hmc_use_dpu_ddr) { + status = zxdh_clear_dpuddr( + dev, true); //TODO:VF clear dpu ddr + if (status) { + if (dev->clear_dpu_mem.va) { + dma_free_coherent( + dev->hw->device, + dev->clear_dpu_mem.size, + dev->clear_dpu_mem.va, + dev->clear_dpu_mem.pa); + dev->clear_dpu_mem.va = NULL; + } + break; + } + status = zxdh_create_hmcobjs_dpuddr(rf); + } else + status = zxdh_hmc_setup(rf); + + if (dev->clear_dpu_mem.va) { + dma_free_coherent(dev->hw->device, + dev->clear_dpu_mem.size, + dev->clear_dpu_mem.va, + dev->clear_dpu_mem.pa); + dev->clear_dpu_mem.va = NULL; + } + + for (k = 0; k < rf->max_rdma_vfs; k++) + zxdh_pf_get_vf_hmc_res(dev, k); + + } else if (rf->ftype == 1) { + zxdh_hmc_dpu_capability(dev); + for (k = 0; k < ZXDH_HMC_IW_MAX; k++) { + zxdh_sc_write_hmc_register( + dev, dev->hmc_info->hmc_obj, k, + dev->vhca_id); + } + zxdh_create_vf_pblehmc_entry(dev); + } else { + pr_info("ftype is error!!\n"); + status = EINVAL; + } + + if (status) + break; + rf->init_state = HMC_OBJS_CREATED; + + status = zxdh_initialize_hw_rsrc(rf); + if (status) + break; + rf->init_state = HW_RSRC_INITIALIZED; + status = zxdh_create_cqp_qp(rf); + if (status) + break; + rf->init_state = CQP_QP_CREATED; + + status = zxdh_setup_aeq(rf); + if (status) + break; + rf->init_state = AEQ_CREATED; + + status = zxdh_create_ccq(rf); + if (status) + break; + rf->init_state = CCQ_CREATED; + + status = zxdh_setup_ceq_0(rf); + if (status) + break; + + rf->sc_dev.ceq_0_ok = true; + rf->sc_dev.ceq_interrupt = false; + rf->init_state = CEQ0_CREATED; + /* Handles processing of CQP completions */ + rf->cqp_cmpl_wq = alloc_ordered_workqueue( + "cqp_cmpl_wq", WQ_HIGHPRI | WQ_UNBOUND); + if (!rf->cqp_cmpl_wq) { + status = -ENOMEM; + break; + } + INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker); +#ifdef MSIX_SUPPORT + zxdh_sc_ccq_arm(dev->ccq); +#endif + + if (rf->ftype == 1 && !dev->hmc_use_dpu_ddr) { + zxdh_set_smmu_invalid(rf); + status = zxdh_vf_init_hmc(rf); + if (status) + break; + } + + if (rf->ftype) { + status = zxdh_vf_init_np_tbl(rf); + if (status) + break; + } + + return 0; + } while (0); + + pr_err("ZRDMA hardware initialization FAILED init_state=%d status=%d\n", + rf->init_state, status); + zxdh_ctrl_deinit_hw(rf); + return status; +} + +/** + * zxdh_set_hw_rsrc - set hw memory resources. + * @rf: RDMA PCI function + */ +static void zxdh_set_hw_rsrc(struct zxdh_pci_f *rf) +{ +#ifdef Z_CONFIG_RDMA_ARP + rf->allocated_srqs = + (void *)(rf->mem_rsrc + + (sizeof(struct zxdh_arp_entry) * rf->arp_table_size)); +#else + rf->allocated_srqs = (void *)(rf->mem_rsrc); +#endif + rf->allocated_qps = &rf->allocated_srqs[BITS_TO_LONGS(rf->max_srq)]; + rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)]; + rf->allocated_mrs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)]; + rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)]; + rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)]; + rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)]; +#ifdef Z_CONFIG_RDMA_ARP + rf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)]; + rf->qp_table = (struct zxdh_qp **)(&rf->allocated_arps[BITS_TO_LONGS( + rf->arp_table_size)]); + +#else + rf->qp_table = + (struct zxdh_qp * + *)(&rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)]); +#endif + rf->cq_table = (struct zxdh_cq **)(&rf->qp_table[rf->max_qp]); + rf->srq_table = (struct zxdh_srq **)(&rf->cq_table[rf->max_cq]); + + spin_lock_init(&rf->rsrc_lock); +#ifdef Z_CONFIG_RDMA_ARP + spin_lock_init(&rf->arp_lock); +#endif + spin_lock_init(&rf->qptable_lock); + spin_lock_init(&rf->cqtable_lock); + spin_lock_init(&rf->srqtable_lock); +} + +/** + * zxdh_calc_mem_rsrc_size - calculate memory resources size. + * @rf: RDMA PCI function + */ +static u32 zxdh_calc_mem_rsrc_size(struct zxdh_pci_f *rf) +{ + u32 rsrc_size; + +#ifdef Z_CONFIG_RDMA_ARP + rsrc_size = sizeof(struct zxdh_arp_entry) * rf->arp_table_size; + rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_srq); +#else + rsrc_size = sizeof(unsigned long) * BITS_TO_LONGS(rf->max_srq); +#endif + rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qp); + rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr); + rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq); + rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd); +#ifdef Z_CONFIG_RDMA_ARP + rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size); +#endif + rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah); + rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg); + rsrc_size += sizeof(struct zxdh_qp **) * rf->max_qp; + rsrc_size += sizeof(struct zxdh_cq **) * rf->max_cq; + rsrc_size += sizeof(struct zxdh_srq **) * rf->max_srq; + + return rsrc_size; +} + +/** + * zxdh_initialize_hw_rsrc - initialize hw resource tracking array + * @rf: RDMA PCI function + */ +u32 zxdh_initialize_hw_rsrc(struct zxdh_pci_f *rf) +{ + u32 rsrc_size; + u32 mrdrvbits; + u32 ret; + + rf->max_cqe = rf->sc_dev.hw_attrs.uk_attrs.max_hw_cq_size; + rf->max_qp = rf->sc_dev.hmc_info->hmc_obj[ZXDH_HMC_IW_QP].cnt; + rf->max_mr = rf->sc_dev.hmc_info->hmc_obj[ZXDH_HMC_IW_MR].cnt; + rf->max_cq = rf->sc_dev.hmc_info->hmc_obj[ZXDH_HMC_IW_CQ].cnt; + rf->max_srq = rf->sc_dev.hmc_info->hmc_obj[ZXDH_HMC_IW_SRQ].cnt; + rf->max_pd = rf->sc_dev.hw_attrs.max_hw_pds; + rf->max_ah = rf->sc_dev.hmc_info->hmc_obj[ZXDH_HMC_IW_AH].cnt; + rf->max_mcg = rf->max_qp; + + rsrc_size = zxdh_calc_mem_rsrc_size(rf); + rf->mem_rsrc = vzalloc(rsrc_size); + if (!rf->mem_rsrc) { + ret = -ENOMEM; + goto mem_rsrc_vmalloc_fail; + } +#ifdef Z_CONFIG_RDMA_ARP + rf->arp_table = (struct zxdh_arp_entry *)rf->mem_rsrc; +#endif + + zxdh_set_hw_rsrc(rf); + + set_bit(0, rf->allocated_mrs); + set_bit(1, rf->allocated_mrs); + set_bit(0, rf->allocated_pds); + set_bit(0, rf->allocated_qps); +#ifdef Z_CONFIG_RDMA_ARP + set_bit(0, rf->allocated_arps); +#endif + set_bit(0, rf->allocated_ahs); + set_bit(0, rf->allocated_mcgs); + set_bit(0, rf->allocated_srqs); + + /* stag index mask has a minimum of 14 bits */ + mrdrvbits = 24 - max(get_count_order(rf->max_mr), 14); + rf->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits)); + + return 0; + +mem_rsrc_vmalloc_fail: + return ret; +} + +/** + * zxdh_cqp_ce_handler - handle cqp completions + * @rf: RDMA PCI function + * @cq: cq for cqp completions + */ +void zxdh_cqp_ce_handler(struct zxdh_pci_f *rf, struct zxdh_sc_cq *cq) +{ + struct zxdh_cqp_request *cqp_request; + struct zxdh_sc_dev *dev = &rf->sc_dev; + u32 cqe_count = 0; + struct zxdh_ccq_cqe_info info; + unsigned long flags; + int ret = 0; + + do { + memset(&info, 0, sizeof(info)); + spin_lock_irqsave(&rf->cqp.compl_lock, flags); + ret = zxdh_sc_ccq_get_cqe_info(cq, &info); + spin_unlock_irqrestore(&rf->cqp.compl_lock, flags); + if (ret) { + if (dev->hw_attrs.self_health == true) + return; + break; + } + + cqp_request = + (struct zxdh_cqp_request *)(unsigned long)info.scratch; + if (info.error && + zxdh_cqp_crit_err(dev, cqp_request->info.cqp_cmd, + info.maj_err_code, info.min_err_code)) + pr_err("cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n", + info.op_code, info.maj_err_code, + info.min_err_code); + if (cqp_request && (info.mailbox_cqe != 1)) { + cqp_request->compl_info.maj_err_code = + info.maj_err_code; + cqp_request->compl_info.min_err_code = + info.min_err_code; + cqp_request->compl_info.op_ret_val = info.op_ret_val; + cqp_request->compl_info.error = info.error; + + if (info.op_code == ZXDH_CQP_OP_WQE_DMA_READ_USECQE) { + cqp_request->compl_info.addrbuf[0] = + info.addrbuf[0]; + cqp_request->compl_info.addrbuf[1] = + info.addrbuf[1]; + cqp_request->compl_info.addrbuf[2] = + info.addrbuf[2]; + cqp_request->compl_info.addrbuf[3] = + info.addrbuf[3]; + cqp_request->compl_info.addrbuf[4] = + info.addrbuf[4]; + } + + if (cqp_request->waiting) { + cqp_request->request_done = true; + wake_up(&cqp_request->waitq); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + } else { + if (cqp_request->callback_fcn) + cqp_request->callback_fcn(cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + } + } else if (info.mailbox_cqe == 1) { + if (rf->ftype == 0) { + ret = zxdh_recv_mb(dev, &info); + if (ret != 0) + pr_info("pf recv mb failed\n"); + } + } + + cqe_count++; + } while (1); + + if (cqe_count) { + zxdh_sc_ccq_arm(dev->ccq); + dev->ceq_interrupt = false; + zxdh_process_bh(dev); + } + if (dev->ceq_interrupt == true) { + zxdh_sc_ccq_arm(dev->ccq); + dev->ceq_interrupt = false; + } +} + +/** + * cqp_compl_worker - Handle cqp completions + * @work: Pointer to work structure + */ +void cqp_compl_worker(struct work_struct *work) +{ + struct zxdh_pci_f *rf = + container_of(work, struct zxdh_pci_f, cqp_cmpl_work); + struct zxdh_sc_cq *cq = &rf->ccq.sc_cq; + + zxdh_cqp_ce_handler(rf, cq); +} + +/** + * zxdh_hw_flush_wqes - flush qp's wqe + * @rf: RDMA PCI function + * @qp: hardware control qp + * @info: info for flush + * @wait: flag wait for completion + */ +int zxdh_hw_flush_wqes(struct zxdh_pci_f *rf, struct zxdh_sc_qp *qp, + struct zxdh_qp_flush_info *info, bool wait) +{ + int status; + struct zxdh_qp_flush_info *hw_info; + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + hw_info = &cqp_request->info.in.u.qp_flush_wqes.info; + memcpy(hw_info, info, sizeof(*hw_info)); + cqp_info->cqp_cmd = ZXDH_OP_QP_FLUSH_WQES; + cqp_info->post_sq = 1; + cqp_info->in.u.qp_flush_wqes.qp = qp; + cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + if (status) { + qp->qp_uk.sq_flush_complete = true; + qp->qp_uk.rq_flush_complete = true; + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return status; + } + + if (!wait || cqp_request->compl_info.maj_err_code) + goto put_cqp; + + if (info->rq) { + if (cqp_request->compl_info.min_err_code == + ZXDH_CQP_COMPL_SQ_WQE_FLUSHED || + cqp_request->compl_info.min_err_code == 0) { + /* RQ WQE flush was requested but did not happen */ + qp->qp_uk.rq_flush_complete = true; + } + } + if (info->sq) { + if (cqp_request->compl_info.min_err_code == + ZXDH_CQP_COMPL_RQ_WQE_FLUSHED || + cqp_request->compl_info.min_err_code == 0) { + /* SQ WQE flush was requested but did not happen */ + qp->qp_uk.sq_flush_complete = true; + } + } + +put_cqp: + zxdh_put_cqp_request(&rf->cqp, cqp_request); + + return status; +} + +void zxdh_flush_wqes(struct zxdh_qp *iwqp, u32 flush_mask) +{ + struct zxdh_qp_flush_info info = {}; + struct zxdh_pci_f *rf = iwqp->iwdev->rf; + u8 flush_code = iwqp->sc_qp.flush_code; + + if (!(flush_mask & ZXDH_FLUSH_SQ) && !(flush_mask & ZXDH_FLUSH_RQ)) + return; + + if (iwqp->sc_qp.is_nvmeof_ioq) + return; + + if (iwqp->ibqp.qp_num == 1) + return; + + /* Set flush info fields*/ + info.sq = flush_mask & ZXDH_FLUSH_SQ; + info.rq = flush_mask & ZXDH_FLUSH_RQ; + + /* Generate userflush errors in CQE */ + info.sq_major_code = ZXDH_FLUSH_MAJOR_ERR; + info.sq_minor_code = FLUSH_GENERAL_ERR; + info.rq_major_code = ZXDH_FLUSH_MAJOR_ERR; + info.rq_minor_code = FLUSH_GENERAL_ERR; + info.userflushcode = true; + + if (flush_mask & ZXDH_REFLUSH) { + if (info.sq) + iwqp->sc_qp.flush_sq = false; + if (info.rq) + iwqp->sc_qp.flush_rq = false; + } else { + if (flush_code) { + if (info.sq && iwqp->sc_qp.sq_flush_code) + info.sq_minor_code = flush_code; + if (info.rq && iwqp->sc_qp.rq_flush_code) + info.rq_minor_code = flush_code; + } + } + + /* Issue flush */ + (void)zxdh_hw_flush_wqes(rf, &iwqp->sc_qp, &info, + flush_mask & ZXDH_FLUSH_WAIT); + iwqp->flush_issued = true; +} diff --git a/drivers/infiniband/hw/zrdma/icrdma_hw.c b/drivers/infiniband/hw/zrdma/icrdma_hw.c new file mode 100644 index 000000000000..89ed2d7335e6 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/icrdma_hw.c @@ -0,0 +1,401 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ +#include "osdep.h" +#include "type.h" +#include "icrdma_hw.h" +#include "main.h" + +static u64 icrdma_masks[ZXDH_MAX_MASKS] = { + ICRDMA_CCQPSTATUS_CCQP_DONE, + ICRDMA_CCQPSTATUS_CCQP_ERR, + ICRDMA_CQPSQ_STAG_PDID, + ICRDMA_CQPSQ_CQ_CEQID, + ICRDMA_CQPSQ_CQ_CQID, + ICRDMA_COMMIT_FPM_CQCNT, +}; + +static u8 icrdma_shifts[ZXDH_MAX_SHIFTS] = { + ICRDMA_CCQPSTATUS_CCQP_DONE_S, + ICRDMA_CCQPSTATUS_CCQP_ERR_S, + ICRDMA_CQPSQ_STAG_PDID_S, + ICRDMA_CQPSQ_CQ_CEQID_S, + ICRDMA_CQPSQ_CQ_CQID_S, + ICRDMA_COMMIT_FPM_CQCNT_S, +}; + +static unsigned int zxdh_dbi_en = 1; +module_param(zxdh_dbi_en, uint, 0444); +MODULE_PARM_DESC(zxdh_dbi_en, "zxdh_dbi_en =1, enable dbi module"); + +static unsigned int zxdh_ep_addr = 0x948; +module_param(zxdh_ep_addr, uint, 0444); +MODULE_PARM_DESC(zxdh_ep_addr, + "zxdh_ep_addr = 0x948, dbi model ,0x948 is register addr"); + +static unsigned int zxdh_ep_id; +module_param(zxdh_ep_id, uint, 0444); +MODULE_PARM_DESC(zxdh_ep_id, + "zxdh_ep_id 0 is 5, 1 is 6, 2 is 7, 3 is 8, 4 is 9"); + +/** + * zxdh_rdma_ena_ceq_irq - Enable ceq interrupt + * @dev: pointer to the device structure + * @ceq_id: ceq id + */ +static void zxdh_rdma_ena_ceq_irq(struct zxdh_sc_dev *dev, u32 ceq_id) +{ + u32 hdr; + + hdr = FIELD_PREP(ZXDH_CEQ_ARM_VHCA_ID, dev->vhca_id) | + FIELD_PREP(ZXDH_CEQ_ARM_CEQ_ID, ceq_id); + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + writel(hdr, dev->ceq_itr_enable); + // printk("%s hdr:0x%x\n",__func__,hdr); +} + +/** + * zxdh_rdma_ena_aeq_irq - Enable aeq interrupt + * @dev: pointer to the device structure + * @enable: enable value + */ +static void zxdh_rdma_ena_aeq_irq(struct zxdh_sc_dev *dev, bool enable) +{ + writel(enable, dev->aeq_itr_enable); +} + +static const struct zxdh_irq_ops zxdh_rdma_irq_ops = { + .zxdh_cfg_aeq = zxdh_cfg_aeq, + .zxdh_ceq_en_irq = zxdh_rdma_ena_ceq_irq, + .zxdh_aeq_en_irq = zxdh_rdma_ena_aeq_irq, +}; + +static void zxdh_init_ceq_hw(struct zxdh_sc_dev *dev) +{ + struct zxdh_pci_f *rf; + u32 hdr; + u8 __iomem *hw_addr; + + hw_addr = dev->hw->hw_addr; + rf = container_of(dev, struct zxdh_pci_f, sc_dev); + + dev->ceq_itr_enable = (u32 __iomem *)(hw_addr + C_CEQ_EQARM_RAM); + dev->ceq_axi.ceqe_axi_info = + (u32 __iomem *)(hw_addr + C_CEQ_CEQE_AXI_INFO_RAM); + dev->ceq_axi.rpble_axi_info = + (u32 __iomem *)(hw_addr + C_CEQ_RPBLE_AXI_INFO_RAM); + dev->ceq_axi.lpble_axi_info = + (u32 __iomem *)(hw_addr + C_CEQ_LPBLE_AXI_INFO_RAM); + dev->ceq_axi.int_info = (u32 __iomem *)(hw_addr + C_CEQ_INT_INFO_RAM); + + hdr = FIELD_PREP(ZXDH_CEQ_CEQE_AXI_INFO_INDICATE_ID, + dev->soc_tx_rx_cqp_ind) | //�1�7�1�7�1�7�1�7smmu + FIELD_PREP(ZXDH_CEQ_CEQE_AXI_INFO_AXI_ID, + dev->soc_tx_rx_cqp_axid); //ep5 + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + writel(hdr, dev->ceq_axi.ceqe_axi_info); + + hdr = FIELD_PREP(ZXDH_CEQ_PBLE_AXI_INFO_CACHE_ID, dev->cache_id) | + FIELD_PREP(ZXDH_CEQ_CEQE_AXI_INFO_AXI_ID, + dev->soc_tx_rx_cqp_axid); //ep5 + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + writel(hdr, dev->ceq_axi.rpble_axi_info); + + hdr = FIELD_PREP(ZXDH_CEQ_PBLE_AXI_INFO_CACHE_ID, dev->cache_id) | + FIELD_PREP(ZXDH_CEQ_CEQE_AXI_INFO_AXI_ID, + dev->soc_tx_rx_cqp_axid); //ep5 + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + writel(hdr, dev->ceq_axi.lpble_axi_info); + + hdr = FIELD_PREP(ZXDH_CEQ_INT_PCIE_DBI_EN, zxdh_dbi_en) | + FIELD_PREP(ZXDH_CEQ_INT_EP_ID, rf->ep_id) | + FIELD_PREP(ZXDH_CEQ_INT_PF_NUM, rf->pf_id) | + FIELD_PREP(ZXDH_CEQ_INT_VF_NUM, rf->vf_id) | + FIELD_PREP(ZXDH_CEQ_INT_VF_ACTIVE, rf->ftype); + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + writel(hdr, dev->ceq_axi.int_info); +} + +static void zxdh_init_aeq_hw(struct zxdh_sc_dev *dev) +{ + u8 __iomem *hw_addr; + u32 hdr; + + hw_addr = dev->hw->hw_addr; + + dev->aeq_itr_enable = (u32 __iomem *)(hw_addr + C_RDMA_CPU_AEQ_ARM); + dev->aeq_tail_pointer = + (u32 __iomem *)(hw_addr + C_RDMA_CPU_SOFTWARE_TAIL); + dev->aeq_vhca_pfvf.aeq_msix_data = + (u32 __iomem *)(hw_addr + RDMA_CPU_MSIX_DATA); + dev->aeq_vhca_pfvf.aeq_msix_config = + (u32 __iomem *)(hw_addr + RDMA_CPU_MSIX_CONFIG); + dev->aeq_vhca_pfvf.aeq_root_axi_data = + (u32 __iomem *)(hw_addr + AEQ_REPORT_ROOT_AXI_DATA); + dev->aeq_vhca_pfvf.aeq_leaf_axi_data = + (u32 __iomem *)(hw_addr + AEQ_REPORT_LEAF_AXI_DATA); + dev->aeq_vhca_pfvf.aeq_wr_axi_data = + (u32 __iomem *)(hw_addr + AEQ_REPORT_WR_AXI_DATA); + dev->aeq_vhca_pfvf.aeq_aee_flag = + (u32 __iomem *)(hw_addr + AEQ_AEQC_AEE_FLAG); + + writel(0, dev->aeq_tail_pointer); + //soc hmc config + hdr = FIELD_PREP(ZXDH_AEQ_CACHE_ID, dev->cache_id) | + FIELD_PREP(ZXDH_AEQ_AXI_ID, dev->soc_tx_rx_cqp_axid) | + FIELD_PREP(ZXDH_AEQ_WAY_PATITION, 0); + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + writel(hdr, dev->aeq_vhca_pfvf.aeq_root_axi_data); + + hdr = FIELD_PREP(ZXDH_AEQ_CACHE_ID, dev->cache_id) | + FIELD_PREP(ZXDH_AEQ_AXI_ID, dev->soc_tx_rx_cqp_axid) | + FIELD_PREP(ZXDH_AEQ_WAY_PATITION, 0); + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + writel(hdr, dev->aeq_vhca_pfvf.aeq_leaf_axi_data); + //soc data config + hdr = FIELD_PREP(ZXDH_AEQ_INDICIATE_ID, dev->soc_tx_rx_cqp_ind) | + FIELD_PREP(ZXDH_AEQ_AXI_ID, dev->soc_tx_rx_cqp_axid) | + FIELD_PREP(ZXDH_AEQ_WAY_PATITION, 0); + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + writel(hdr, dev->aeq_vhca_pfvf.aeq_wr_axi_data); + //clear 0 after reading values during maintenance + writel(0, dev->aeq_vhca_pfvf.aeq_aee_flag); +} + +void zxdh_init_hw(struct zxdh_sc_dev *dev) +{ + int i; + u32 hdr; + u8 __iomem *hw_addr; + struct zxdh_pci_f *rf = dev_to_rf(dev); + + dev->ceq_0_ok = false; + dev->soc_tx_rx_cqp_ind = ZXDH_SOC_TXRXCQP_IND_ACC_HOST_NOT_THROUGH_SMMU; + dev->soc_tx_rx_cqp_axid = ZXDH_SOC_TXRXCQP_AXID_DEST_EP5; + dev->soc_rdma_io_ind = ZXDH_SOC_RDMAIO_IND_ACC_HOST_NOT_THROUGH_SMMU; + + hw_addr = dev->hw->hw_addr; + + dev->wqe_alloc_db = (u32 __iomem *)(hw_addr + C_RDMA_SQ_DBINFO_LOW_DIN); + dev->cq_arm_db = (u32 __iomem *)(hw_addr + RDMARX_CQ_CQARM); + dev->cqp_db = (u32 __iomem *)(hw_addr + C_RDMA_CQP_DB); + + zxdh_init_ceq_hw(dev); + zxdh_init_aeq_hw(dev); + dev->hw_attrs.max_hw_vf_fpm_id = ZXDH_MAX_VF_FPM_ID; + dev->hw_attrs.first_hw_vf_fpm_id = ZXDH_FIRST_VF_FPM_ID; + + for (i = 0; i < ZXDH_MAX_SHIFTS; ++i) + dev->hw_shifts[i] = icrdma_shifts[i]; + + for (i = 0; i < ZXDH_MAX_MASKS; ++i) + dev->hw_masks[i] = icrdma_masks[i]; + + dev->srq_axi_ram.db = (u32 __iomem *)(hw_addr + C_DB_AXI_RAM); + dev->srq_axi_ram.srql = (u32 __iomem *)(hw_addr + C_SRQL_AXI_RAM); + + dev->irq_ops = &zxdh_rdma_irq_ops; + + dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE; + dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE; + dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT; + dev->hw_attrs.max_stat_idx = ZXDH_HW_STAT_INDEX_MAX; + + dev->hw_attrs.uk_attrs.max_hw_sq_chunk = ZXDH_MAX_QUANTA_PER_WR; + dev->hw_attrs.uk_attrs.feature_flags |= + ZXDH_FEATURE_RTS_AE | ZXDH_FEATURE_CQ_RESIZE | + ZXDH_FEATURE_64_BYTE_CQE; /* RC UD both set to 64 Bytes*/ + + if (rf->srq_l2d_base_paddr != 0 && rf->srq_l2d_size != 0) { + hdr = FIELD_PREP(ZXDH_SRQ_DB_CACHE_ID, dev->cache_id) | + FIELD_PREP(ZXDH_SRQ_DB_INDICATE_ID, ZXDH_INDICATE_L2D) | + FIELD_PREP(ZXDH_SRQ_DB_AXI_ID, ZXDH_AXID_L2D) | + FIELD_PREP(ZXDH_SRQ_DB_WAY_PATION, 0); + } else { + hdr = FIELD_PREP(ZXDH_SRQ_DB_CACHE_ID, dev->cache_id) | + FIELD_PREP(ZXDH_SRQ_DB_INDICATE_ID, dev->soc_tx_rx_cqp_ind) | + FIELD_PREP(ZXDH_SRQ_DB_AXI_ID, dev->soc_tx_rx_cqp_axid) | + FIELD_PREP(ZXDH_SRQ_DB_WAY_PATION, 0); + } + wmb(); /* make sure WQE is populated before valid bit is set */ + writel(hdr, dev->srq_axi_ram.db); + + hdr = FIELD_PREP(ZXDH_SRQ_DSRQL_CACHE_ID, dev->cache_id) | + FIELD_PREP(ZXDH_SRQ_SRQL_INDICATE_ID, dev->soc_tx_rx_cqp_ind) | + FIELD_PREP(ZXDH_SRQ_SRQL_AXI_ID, dev->soc_tx_rx_cqp_axid) | + FIELD_PREP(ZXDH_SRQ_SRQL_WAY_PATION, 0); + wmb(); /* make sure WQE is populated before valid bit is set */ + writel(hdr, dev->srq_axi_ram.srql); + + writel(IRDMARX_RD_TIME_LIMIT_VALUE, + (u32 __iomem *)(hw_addr + RDMATX_RD_TIME_LIMIT)); + writel(IRDMARX_RD_TIME_LIMIT_VALUE, + (u32 __iomem *)(hw_addr + RDMARX_RD_TIME_LIMIT)); +} + +void zxdh_init_config_check(struct zxdh_config_check *cc, u8 traffic_class, + u16 qs_handle) +{ + cc->config_ok = false; + cc->traffic_class = traffic_class; + cc->qs_handle = qs_handle; + cc->lfc_set = 0; + cc->pfc_set = 0; +} + +static bool zxdh_is_lfc_set(struct zxdh_config_check *cc, + struct zxdh_sc_vsi *vsi) +{ + u32 lfc = 1; + u8 fn_id = vsi->dev->hmc_fn_id; + + lfc &= (rd32(vsi->dev->hw, + PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0 + 4 * fn_id) >> + 8); + lfc &= (rd32(vsi->dev->hw, + PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0 + 4 * fn_id) >> + 8); + lfc &= rd32(vsi->dev->hw, + PRTMAC_HSEC_CTL_RX_ENABLE_GPP_0 + 4 * vsi->dev->hmc_fn_id); + + if (lfc) + return true; + return false; +} + +static bool zxdh_check_tc_has_pfc(struct zxdh_sc_vsi *vsi, u64 reg_offset, + u16 traffic_class) +{ + u32 value, pfc = 0; + u32 i; + + value = rd32(vsi->dev->hw, reg_offset); + for (i = 0; i < 4; i++) + pfc |= (value >> (8 * i + traffic_class)) & 0x1; + + if (pfc) + return true; + return false; +} + +static bool zxdh_is_pfc_set(struct zxdh_config_check *cc, + struct zxdh_sc_vsi *vsi) +{ + u32 pause; + u8 fn_id = vsi->dev->hmc_fn_id; + + pause = (rd32(vsi->dev->hw, + PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0 + 4 * fn_id) >> + cc->traffic_class) & + BIT(0); + pause &= (rd32(vsi->dev->hw, + PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0 + 4 * fn_id) >> + cc->traffic_class) & + BIT(0); + + return zxdh_check_tc_has_pfc(vsi, GLDCB_TC2PFC, cc->traffic_class) && + pause; +} + +bool zxdh_is_config_ok(struct zxdh_config_check *cc, struct zxdh_sc_vsi *vsi) +{ + cc->lfc_set = zxdh_is_lfc_set(cc, vsi); + cc->pfc_set = zxdh_is_pfc_set(cc, vsi); + + cc->config_ok = cc->lfc_set || cc->pfc_set; + + return cc->config_ok; +} + +#define ZXDH_RCV_WND_NO_FC 0x1FFFC +#define ZXDH_RCV_WND_FC 0x3FFFC + +#define ZXDH_CWND_NO_FC 0x20 +#define ZXDH_CWND_FC 0x400 + +#define ZXDH_RTOMIN_NO_FC 0x5 +#define ZXDH_RTOMIN_FC 0x32 + +#define ZXDH_ACKCREDS_NO_FC 0x02 +#define ZXDH_ACKCREDS_FC 0x1E + +static void zxdh_check_flow_ctrl(struct zxdh_sc_vsi *vsi, u8 user_prio, + u8 traffic_class) +{ +#if IS_ENABLED(CONFIG_CONFIGFS_FS) + struct zxdh_config_check *cfg_chk = &vsi->cfg_check[user_prio]; + struct zxdh_device *iwdev = vsi->back_vsi; + + if (!zxdh_is_config_ok(cfg_chk, vsi)) { + if (!iwdev->override_rcv_wnd) + iwdev->rcv_wnd = ZXDH_RCV_WND_NO_FC; + if (!iwdev->override_cwnd) + iwdev->roce_cwnd = ZXDH_CWND_NO_FC; + if (!iwdev->override_rtomin) + iwdev->roce_rtomin = ZXDH_RTOMIN_NO_FC; + if (!iwdev->override_ackcreds) + iwdev->roce_ackcreds = ZXDH_ACKCREDS_NO_FC; +#define ZXDH_READ_FENCE_RATE_NO_FC 4 + if (iwdev->roce_mode && !iwdev->override_rd_fence_rate) + iwdev->rd_fence_rate = ZXDH_READ_FENCE_RATE_NO_FC; + if (vsi->tc_print_warning[traffic_class]) { + pr_info("INFO: Flow control is disabled for this traffic class (%d) on this vsi.\n", + traffic_class); + vsi->tc_print_warning[traffic_class] = false; + } + } else { + if (!iwdev->override_rcv_wnd) + iwdev->rcv_wnd = ZXDH_RCV_WND_FC; + if (!iwdev->override_cwnd) + iwdev->roce_cwnd = ZXDH_CWND_FC; + if (!iwdev->override_rtomin) + iwdev->roce_rtomin = ZXDH_RTOMIN_FC; + if (!iwdev->override_ackcreds) + iwdev->roce_ackcreds = ZXDH_ACKCREDS_FC; +#define ZXDH_READ_FENCE_RATE_FC 0 + if (!iwdev->override_rd_fence_rate) + iwdev->rd_fence_rate = ZXDH_READ_FENCE_RATE_FC; + if (vsi->tc_print_warning[traffic_class]) { + pr_info("INFO: Flow control is enabled for this traffic class (%d) on this vsi.\n", + traffic_class); + vsi->tc_print_warning[traffic_class] = false; + } + } +#endif +} + +void zxdh_check_fc_for_tc_update(struct zxdh_sc_vsi *vsi, + struct zxdh_l2params *l2params) +{ + u8 i; + + if (!vsi->dev->privileged) + return; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + vsi->tc_print_warning[i] = true; + + for (i = 0; i < ZXDH_MAX_USER_PRIORITY; i++) { + struct zxdh_config_check *cfg_chk = &vsi->cfg_check[i]; + u8 tc = l2params->up2tc[i]; + + cfg_chk->traffic_class = tc; + cfg_chk->qs_handle = vsi->qos[i].qs_handle; + zxdh_check_flow_ctrl(vsi, i, tc); + } +} + +void zxdh_check_fc_for_qp(struct zxdh_sc_vsi *vsi, struct zxdh_sc_qp *sc_qp) +{ + u8 i; + + if (!vsi->dev->privileged) + return; + for (i = 0; i < ZXDH_MAX_USER_PRIORITY; i++) { + struct zxdh_config_check *cfg_chk = &vsi->cfg_check[i]; + + zxdh_init_config_check(cfg_chk, vsi->qos[i].traffic_class, + vsi->qos[i].qs_handle); + if (sc_qp->qs_handle == cfg_chk->qs_handle) + zxdh_check_flow_ctrl(vsi, i, cfg_chk->traffic_class); + } +} diff --git a/drivers/infiniband/hw/zrdma/icrdma_hw.h b/drivers/infiniband/hw/zrdma/icrdma_hw.h new file mode 100644 index 000000000000..782b9855d188 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/icrdma_hw.h @@ -0,0 +1,865 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ICRDMA_HW_H +#define ICRDMA_HW_H + +#include "zrdma.h" + +#define VFPE_CQPTAIL1 0x0000a000 +#define VFPE_CQPDB1 0x0000bc00 +#define VFPE_CCQPSTATUS1 0x0000b800 +#define VFPE_CCQPHIGH1 0x00009800 +#define VFPE_CCQPLOW1 0x0000ac00 +#define VFPE_CQARM1 0x0000b400 +#define VFPE_CQARM1 0x0000b400 +#define VFPE_CQACK1 0x0000b000 +#define VFPE_AEQALLOC1 0x0000a400 +#define VFPE_CQPERRCODES1 0x00009c00 +#define VFPE_WQEALLOC1 0x0000c000 +#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i)*4)) /* _i=0...63 */ + +#define PFPE_CQPTAIL 0x801C //0x00500880 +#define PFPE_CQPDB 0x8014 //0x00500800 +#define PFPE_CCQPSTATUS 0x8044 //0x0050a000 +#define PFPE_CCQPHIGH 0x8054 //0x0050a100 +#define PFPE_CCQPLOW 0x804C //0x0050a080 +#define PFPE_CQARM 0x8024 //0x00502c00 +#define PFPE_CQACK 0x8034 //0x00502c80 +#define PFPE_AEQALLOC 0x802C //0x00502d00 +#define GLINT_DYN_CTL(_INT) \ + (0x6004 + ((_INT)*4)) //(0x00160000 + ((_INT) * 4)) /* _i=0...2047 */ +#define GLPCI_LBARCTRL 0x800C //0x0009de74 +#define GLPE_CPUSTATUS0 0x8064 //0x0050ba5c +#define GLPE_CPUSTATUS1 0x806C //0x0050ba60 +#define GLPE_CPUSTATUS2 0x8074 //0x0050ba64 +#define PFINT_AEQCTL 0x8004 //0x0016cb00 +#define PFPE_CQPERRCODES 0x805C //0x0050a200 +#define PFPE_WQEALLOC 0x803C //0x00504400 +#define GLINT_CEQCTL(_INT) \ + (0x4004 + ((_INT)*4)) //(0x0015c000 + ((_INT) * 4)) /* _i=0...2047 */ +#define VSIQF_PE_CTL1(_VSI) \ + (0x807C + ((_VSI)*4)) //(0x00414000 + ((_VSI) * 4)) /* _i=0...767 */ +#define PFHMC_PDINV 0x8C7C //0x00520300 +#define GLHMC_VFPDINV(_i) \ + (0x8C84 + ((_i)*4)) //(0x00528300 + ((_i) * 4)) /* _i=0...31 */ +#define GLPE_CRITERR 0x8D04 //0x00534000 +#define GLINT_RATE(_INT) \ + (0x2004 + \ + ((_INT)*4)) //(0x0015A000 + ((_INT) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */ + +#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0 0x001e3180 +#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_1 0x001e3184 +#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_2 0x001e3188 +#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_3 0x001e318c + +#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0 0x001e31a0 +#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_1 0x001e31a4 +#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_2 0x001e31a8 +#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_3 0x001e31aC + +#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP_0 0x001e34c0 +#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP_1 0x001e34c4 +#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP_2 0x001e34c8 +#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP_3 0x001e34cC + +#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP_0 0x001e35c0 +#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP_1 0x001e35c4 +#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP_2 0x001e35c8 +#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP_3 0x001e35cC + +#define GLDCB_TC2PFC 0x001d2694 +#define PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001e31c0 + +#define ICRDMA_DB_ADDR_OFFSET (8 * 1024) //(8 * 1024 * 1024 - 64 * 1024) + +#define ICRDMA_VF_DB_ADDR_OFFSET (64 * 1024) + +#define ICRDMA_CCQPSTATUS_CCQP_DONE_S 0 +#define ICRDMA_CCQPSTATUS_CCQP_DONE BIT_ULL(0) +#define ICRDMA_CCQPSTATUS_CCQP_ERR_S 31 +#define ICRDMA_CCQPSTATUS_CCQP_ERR BIT_ULL(31) +#define ICRDMA_CQPSQ_STAG_PDID_S 46 +#define ICRDMA_CQPSQ_STAG_PDID GENMASK_ULL(63, 46) +#define ICRDMA_CQPSQ_CQ_CEQID_S 48 +#define ICRDMA_CQPSQ_CQ_CEQID GENMASK_ULL(59, 48) +#define ICRDMA_CQPSQ_CQ_CQID_S 0 +#define ICRDMA_CQPSQ_CQ_CQID GENMASK_ULL(20, 0) +#define ICRDMA_COMMIT_FPM_CQCNT_S 0 +#define ICRDMA_COMMIT_FPM_CQCNT GENMASK_ULL(19, 0) + +#define ZXDH_PRI_BASE_RD_BAD_QKEY_COUNTER GENMASK(31, 24) + +/**************** Start of ZTE RDMA Registers ***************/ +#define C_RDMA_BASE_ADDRESS (0x6204000000u) +#define C_RDMA_HW_BAR_PAGE_NUM 31 +#define C_RDMA_HW_BAR_PAGE_SIZE 0x1000 +extern u64 zxdh_hw_bar_pages[C_RDMA_HW_BAR_PAGE_NUM]; + +/****** RDMA REG BASE Address******/ +#define C_RDMA_CQP_PUBLIC_PAGE1 (zxdh_hw_bar_pages[0]) /* hw addr:0x6204402000 */ +#define C_RDMA_CQP_PUBLIC_PAGE2 (zxdh_hw_bar_pages[1]) //hw addr:0x6204403000 +#define C_RDMA_CQP_VHCA_PAGE (zxdh_hw_bar_pages[2]) //hw addr:0x6204800000 + vhca_id * 0x1000 +#define C_RDMA_CQP_VHCA_PF_PAGE (zxdh_hw_bar_pages[3]) //hw addr:0x6204C00000 + vhca_id * 0x1000 + +#define C_RDMA_RX_PKT_PROC_PAGE (zxdh_hw_bar_pages[4]) //hw addr:0x6205400000 +#define C_RDMA_RX_PUBLIC_PAGE1 (zxdh_hw_bar_pages[5]) //hw addr:0x6205420000 +#define C_RDMA_RX_PUBLIC_PAGE2 (zxdh_hw_bar_pages[6]) //hw addr:0x6205440000 +#define C_RDMA_RX_CNP_GEN_PAGE (zxdh_hw_bar_pages[7]) //hw addr:0x6205460000 +#define C_RDMA_RX_RAM_SCHE_PAGE1 (zxdh_hw_bar_pages[8]) //hw addr:0x6205467000 +#define C_RDMA_RX_RAM_SCHE_PAGE2 (zxdh_hw_bar_pages[9]) //hw addr:0x6205468000 +#define C_RDMA_RX_RAM_SCHE_PAGE3 (zxdh_hw_bar_pages[10]) //hw addr:0x6205469000 +#define C_RDMA_RX_RAM_SCHE_PAGE4 (zxdh_hw_bar_pages[11]) //hw addr:0x620546A000 +#define C_RDMA_RX_RAM_SCHE_PAGE5 (zxdh_hw_bar_pages[12]) //hw addr:0x620546B000 +#define C_RDMA_RX_VHCA_PAGE (zxdh_hw_bar_pages[13]) //hw addr:0x6205800000 + vhca_id * 0x1000 +#define C_RDMA_RX_VHCA_PF_PAGE (zxdh_hw_bar_pages[14]) //hw addr:0x6205C00000 + vhca_id * 0x1000 + +#define C_RDMA_TX_ACK_RECV_PAGE (zxdh_hw_bar_pages[15]) //hw addr:0x62065E8000 +#define C_RDMA_TX_WQE_PARSE_PAGE (zxdh_hw_bar_pages[16]) //hw addr:0x62065F0000 + +#define C_RDMA_TX_RAM_SCHE_PAGE1 (zxdh_hw_bar_pages[17]) //hw addr:0x6206700000 +#define C_RDMA_TX_RAM_SCHE_PAGE2 (zxdh_hw_bar_pages[18]) //hw addr:0x6206701000 +#define C_RDMA_TX_RAM_SCHE_PAGE3 (zxdh_hw_bar_pages[19]) //hw addr:0x6206702000 +#define C_RDMA_TX_RAM_SCHE_PAGE4 (zxdh_hw_bar_pages[20]) //hw addr:0x6206703000 +#define C_RDMA_TX_RAM_SCHE_PAGE5 (zxdh_hw_bar_pages[21]) //hw addr:0x6206704000 +#define C_RDMA_TX_VHCA_PAGE (zxdh_hw_bar_pages[22]) //hw addr:0x6206800000 + vhca_id * 0x1000 +#define C_RDMA_TX_VHCA_PF_PAGE (zxdh_hw_bar_pages[23]) //hw addr:0x6206C00000 + vhca_id * 0x1000 + +#define C_RDMA_IO_VHCA_PAGE (zxdh_hw_bar_pages[24]) //hw addr:0x6207801000 + vhca_id * 0x2000 size:0x1000 + +#define C_RDMA_NOF_L2D_PAGES (zxdh_hw_bar_pages[25]) //hw addr:0x62008C2000 size:0x2000 + // 0x6206705000 not used +#define C_RDMA_TX_RTT_PAGE (zxdh_hw_bar_pages[27]) //hw addr:0x62065F8000 size:0x1000 +#define C_RDMA_RX_SIDN_PAGE (zxdh_hw_bar_pages[28]) //hw addr:0x6205600000 + sid * 0x10000 size:0x1000 +#define C_RDMA_TX_SIDN_PAGE (zxdh_hw_bar_pages[29]) //hw addr:0x6206600000 size:0x1000 +#define C_RDMA_IO_SIDN_PAGE (zxdh_hw_bar_pages[30]) //hw addr:0x6207600000 + sid * 0x10000 size:0x1000 + +/****** CQP Module Register ******/ +#define C_RDMA_CQP_CONTEXT_0 (C_RDMA_CQP_VHCA_PAGE + 0x004u) +#define C_RDMA_CQP_CONTEXT_1 (C_RDMA_CQP_VHCA_PAGE + 0x008u) +#define C_RDMA_CQP_CONTEXT_2 (C_RDMA_CQP_VHCA_PAGE + 0x00Cu) +#define C_RDMA_CQP_CONTEXT_3 (C_RDMA_CQP_VHCA_PAGE + 0x010u) +#define C_RDMA_CQP_CONTEXT_4 (C_RDMA_CQP_VHCA_PAGE + 0x014u) +#define C_RDMA_CQP_CONTEXT_5 (C_RDMA_CQP_VHCA_PAGE + 0x018u) +#define C_RDMA_CQP_CONTEXT_6 (C_RDMA_CQP_VHCA_PAGE + 0x01Cu) +#define C_RDMA_CQP_CONTEXT_7 (C_RDMA_CQP_VHCA_PAGE + 0x020u) +#define C_RDMA_CQP_CONFIG_DONE (C_RDMA_CQP_VHCA_PAGE + 0x024u) +#define C_RDMA_CQP_DB (C_RDMA_CQP_VHCA_PAGE + 0x028u) +#define C_RDMA_CQP_TAIL (C_RDMA_CQP_VHCA_PAGE + 0x02Cu) +#define C_RDMA_CQP_STATUS (C_RDMA_CQP_VHCA_PAGE + 0x030u) +#define C_RDMA_CQP_ERROR (C_RDMA_CQP_VHCA_PAGE + 0x034u) +#define C_RDMA_CQP_ERRCODE (C_RDMA_CQP_VHCA_PAGE + 0x038u) +#define C_RDMA_CQP_CQ_NUM (C_RDMA_CQP_VHCA_PAGE + 0x03Cu) +#define C_RDMA_CQP_CQ_DISTRIBUTE_DONE (C_RDMA_CQP_VHCA_PAGE + 0x040u) +#define C_RDMA_CQP_STATE_RSV0 (C_RDMA_CQP_VHCA_PAGE + 0x058u) +#define C_RDMA_CQP_STATE_RSV1 (C_RDMA_CQP_VHCA_PAGE + 0x05Cu) + +#define C_RDMA_CQP_STATUS_PHY_ADDR 0x6204800030u + +#define C_RDMA_CQP_PF_VF_ID(_i) \ + (C_RDMA_CQP_PUBLIC_PAGE1 + 4 * (_i)) /* i= 0...1023 */ +#define C_RDMA_CQP_PF_VF_ID_INVLD(_i) \ + (C_RDMA_CQP_PUBLIC_PAGE2 + 0x1000u + 4 * (_i)) /* i= 0...1023 */ + +#define C_RDMA_CQP_MGC_BASE_HIGH (C_RDMA_CQP_VHCA_PF_PAGE + 0x004u) +#define C_RDMA_CQP_MGC_BASE_LOW (C_RDMA_CQP_VHCA_PF_PAGE + 0x008u) +#define C_RDMA_CQP_MRTE_CACHE_ID (C_RDMA_CQP_VHCA_PF_PAGE + 0x00Cu) +#define C_RDMA_CQP_AH_CACHE_ID (C_RDMA_CQP_VHCA_PF_PAGE + 0x010u) +#define C_RDMA_CQP_MGC_INDICATE_ID (C_RDMA_CQP_VHCA_PF_PAGE + 0x014u) +#define C_RDMA_CQP_STATE_PF_RSV0 (C_RDMA_CQP_VHCA_PF_PAGE + 0x018u) + +/****** RDMA Flow Control Algorithms Register ******/ +/* DCQCN */ +#define RDMA_DCQCN_NP_CNP_DSCP (C_RDMA_RX_CNP_GEN_PAGE + 0x10u) +#define RDMA_DCQCN_NP_CNP_PRIO_MODE (C_RDMA_RX_CNP_GEN_PAGE + 0x14u) +#define RDMA_DCQCN_NP_CNP_PRIO (C_RDMA_RX_CNP_GEN_PAGE + 0x18u) +#define RDMA_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_X (C_RDMA_RX_CNP_GEN_PAGE + 0x28u) +#define RDMA_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_Y (C_RDMA_RX_CNP_GEN_PAGE + 0xcu) +#define RDMA_DCQCN_NP_MIN_TIME_BETWEEN_CNPS_Y_EX \ + (C_RDMA_RX_CNP_GEN_PAGE + 0x2cu) +#define RDMA_DCQCN_PRG_TIME_RESET (0x6206008084u) +#define RDMA_DCQCN_RPG_CLAMP_TGT_RATE (0x6206008084u) +#define RDMA_DCQCN_RPG_CLAMP_TGT_RATE_AFTER_TIME_INC (0x6206008080u) +#define RDMA_DCQCN_RP_DCE_TCP_RTT (0x6206008080u) +#define RDMA_DCQCN_DCE_TCP_G (0x6206008088u) +#define RDMA_DCQCN_RPG_GD (0x6206008090u) +#define RDMA_DCQCN_RPG_INITIAL_ALPHA_VALUE (0x620600808cu) +#define RDMA_DCQCN_RPG_MIN_DEC_FAC (0x6206008094u) +#define RDMA_DCQCN_RPG_THRESHOLD (0x6206008098u) +#define RDMA_DCQCN_RPG_RATIO_INCREASE (0x6206008088u) +#define RDMA_DCQCN_RPG_AI_RATIO (0x620600809cu) +#define RDMA_DCQCN_RPG_HAI_RATIO (0x62060080a0u) +#define RDMA_DCQCN_RPG_BYTE_RESET (C_RDMA_TX_VHCA_PAGE + 0x0900u) +#define RDMA_DCQCN_RPG_AI_RATE (C_RDMA_TX_VHCA_PAGE + 0x0908u) +#define RDMA_DCQCN_RPG_HAI_RATE (C_RDMA_TX_VHCA_PAGE + 0x090cu) +#define RDMA_RPG_MAX_RATE (C_RDMA_TX_VHCA_PAGE + 0x0910u) +#define RDMA_RPG_MIN_RATE (C_RDMA_TX_VHCA_PAGE + 0x0914u) +/* RTT */ +#define RDMA_RPG_VF_DELTA (C_RDMA_TX_VHCA_PAGE + 0x091cu) +/****** RDMA Flow Control Algorithms Parameters ******/ +#define RDMA_FLOW_CONTROL_RATE_200G 0x4C4B4000 +#define RDMA_FLOW_CONTROL_RATE_10M 0xFA00 +#define RDMA_FLOW_CONTROL_RATE_10G 0x3D09000 +#define RDMA_FLOW_CONTROL_RATE_1G 0x61A800 +#define RDMA_FLOW_BYTE_RESET_THRESHOLD 125 +#define RDMA_FLOW_MAX_RPG_HAI_RATIO 4096 +#define RDMA_FLOW_MAX_RPG_AI_RATIO 2048 +#define RDMA_FLOW_MAX_RPG_THRESHOLD 31 +#define RDMA_FLOW_MAX_RPG_MIN_DEC_FAC 32768 +#define RDMA_FLOW_MAX_RPG_INITIAL_ALPHA_VALUE 32768 +#define RDMA_FLOW_MAX_RPG_GD 15 +#define RDMA_FLOW_MAX_DCE_TCP_G 15 +#define RDMA_FLOW_MAX_RP_DCE_TCP_RTT 100 +#define RDMA_FLOW_MIN_RP_DCE_TCP_RTT 5 +#define RDMA_FLOW_MAX_PRG_TIME_RESET 200 +#define RDMA_FLOW_MIN_PRG_TIME_RESET 5 +#define RDMA_FLOW_NP_MIN_TIME_BETWEEN_CNPS_Y 10 +#define RDMA_FLOW_NP_MIN_TIME_BETWEEN_CNPS_Y_EX \ + (2 * RDMA_FLOW_NP_MIN_TIME_BETWEEN_CNPS_Y - 1) +#define RDMA_FLOW_MAX_NP_MIN_TIME_BETWEEN_CNPS_X 65535 +#define RDMA_FLOW_MIN_MIN_TIME_BETWEEN_CNPS_X 8 +#define RDMA_FLOW_MAX_NP_MIN_TIME_BETWEEN_CNPS \ + (RDMA_FLOW_NP_MIN_TIME_BETWEEN_CNPS_Y * \ + RDMA_FLOW_MAX_NP_MIN_TIME_BETWEEN_CNPS_X) +#define RDMA_FLOW_MIN_NP_MIN_TIME_BETWEEN_CNPS \ + (RDMA_FLOW_NP_MIN_TIME_BETWEEN_CNPS_Y * \ + RDMA_FLOW_MIN_MIN_TIME_BETWEEN_CNPS_X) +#define RDMA_FLOW_MAX_NP_CNP_PRIO_MODE 7 +#define RDMA_FLOW_MAX_NP_CNP_PRIO 7 +#define RDMA_FLOW_MAX_NP_CNP_DSCP 63 +#define RDMA_FLOW_MAX_ALPHA_VALUE 32768 +#define RDMA_FLOW_MAX_TLOW_VALUE 65535 +#define RDMA_FLOW_MAX_THIGH_VALUE 65535 +#define RDMA_FLOW_MAX_AI_NUM_VALUE 65535 +#define RDMA_FLOW_MAX_HAI_N_VALUE 65534 +#define RDMA_FLOW_MAX_AI_N_VALUE 65534 +#define RDMA_FLOW_MAX_VF_DELTA_VALUE 65534 +#define RDMA_FLOW_MAX_THRED_GRADIENT 32768 + +/****** REQ Module Register ******/ +/* rdmatx_ack_recv */ +#define RDMATX_ACK_RSV_RO_REG_0 (C_RDMA_TX_ACK_RECV_PAGE + 0xA0u) +#define RDMATX_ACK_RSV_RO_REG_5 (C_RDMA_TX_ACK_RECV_PAGE + 0xB4u) +#define RDMATX_ACK_RSV_RO_REG_14 (C_RDMA_TX_ACK_RECV_PAGE + 0xD8u) +#define RDMATX_ACK_RSV_RO_REG_20 (C_RDMA_TX_ACK_RECV_PAGE + 0xF0u) +#define RDMATX_ACK_ERR_CQE_OUT_TASK_CNT (C_RDMA_TX_ACK_RECV_PAGE + 0x3C4u) +#define RDMATX_ACK_FLUSH_CQE_OUT_TASK_CNT (C_RDMA_TX_ACK_RECV_PAGE + 0x3C8u) + +/* rdmatx_wqe_parse */ +#define RDMATX_PKT_TIME_IN_CNT (C_RDMA_TX_WQE_PARSE_PAGE + 0x674u) +#define RDMATX_PKT_TIME_OUT_CNT (C_RDMA_TX_WQE_PARSE_PAGE + 0x678u) +#define RDMATX_HOST3_ERR_INFO_FIFO_OVERFLOW_CNT \ + (C_RDMA_TX_WQE_PARSE_PAGE + 0x8b0u) + +/* rdmatx_ram_scheduling */ +#define RDMATX_RAM_READ_FLAG (C_RDMA_TX_RAM_SCHE_PAGE1 + 0x0u) +#define RDMATX_RAM_ADDR (C_RDMA_TX_RAM_SCHE_PAGE1 + 0x04u) +#define RDMATX_RAM_READ_LENGTH (C_RDMA_TX_RAM_SCHE_PAGE1 + 0x08u) +#define RDMATX_RAM_NUM (C_RDMA_TX_RAM_SCHE_PAGE1 + 0x0Cu) +#define RDMATX_RAM_WIDTH (C_RDMA_TX_RAM_SCHE_PAGE1 + 0x10u) +#define RDMATX_RAM_MAINTENANCE_RAM(_i) \ + (C_RDMA_TX_RAM_SCHE_PAGE1 + 0x14u + (_i)*0x4) //i=0... 14 +#define RDMATX_RD_TIME_LIMIT (C_RDMA_TX_RAM_SCHE_PAGE5 + 0x0C0u) +#define RDMATX_READ_ERROR_FLAG (C_RDMA_TX_RAM_SCHE_PAGE5 + 0x0C4u) +#define RDMATX_ERROR_RAM_NUM (C_RDMA_TX_RAM_SCHE_PAGE5 + 0x0C8u) +#define RDMATX_ERROR_RAM_ADDR (C_RDMA_TX_RAM_SCHE_PAGE5 + 0x0CCu) +#define RDMATX_READ_CNT_ERROR (C_RDMA_TX_RAM_SCHE_PAGE5 + 0x0DCu) +#define RDMATX_RAM_REDUN_FLAG (C_RDMA_TX_RAM_SCHE_PAGE5 + 0x110u) +#define RDMATX_DOUBLE_VLD_FLAG (C_RDMA_TX_RAM_SCHE_PAGE5 + 0x114u) + +/* rdmatx_ack_recv_vhca_pfvf */ +#define RDMATX_ACK_SQWQE_PARA_CFG (C_RDMA_TX_VHCA_PAGE + 0x004u) +#define RDMATX_ACK_DDR_PARA_CFG (C_RDMA_TX_VHCA_PAGE + 0x010u) +#define RDMATX_ACK_PCI_MAX_MRTE_INDEX_PARA_CFG (C_RDMA_TX_VHCA_PAGE + 0x014u) + +/* rdmatx_doorbell_mgr_vhca_pfvf */ +#define RDMATX_DB_PBLE_ID_CFG (C_RDMA_TX_VHCA_PAGE + 0x400u) +#define RDMATX_DB_SQWQE_ID_CFG (C_RDMA_TX_VHCA_PAGE + 0x40Cu) +#define RDMATX_QPN_BASEQPN_CFG (C_RDMA_TX_VHCA_PAGE + 0x424u) +#define RDMATX_QPN_CONTEXT_ID_CFG (C_RDMA_TX_VHCA_PAGE + 0x428u) +#define RDMATX_QUEUE_VHCA_FLAG (C_RDMA_TX_VHCA_PAGE + 0x448u) + +/* rdmatx_doorbell_mgr_vhca_pf */ +#define C_RDMA_SQ_DBINFO_LOW_DIN (C_RDMA_TX_VHCA_PF_PAGE + 0x404u) +#define C_RDMA_SQ_DBINFO_HIGH_DIN (C_RDMA_TX_VHCA_PF_PAGE + 0x42Cu) //not used + +/* rdmatx_wqe_parse_vhca_pfvf */ +#define RDMATX_SQWQE_PBLE_PARA_CFG (C_RDMA_TX_VHCA_PAGE + 0xC00u) //hmc +#define RDMATX_SQWQE_PARA_CFG (C_RDMA_TX_VHCA_PAGE + 0xC04u) +#define RDMATX_AH_PARA_CFG (C_RDMA_TX_VHCA_PAGE + 0xC08u) //hmc +#define RDMATX_LOCAL_MRTE_PARENT_PARA_CFG \ + (C_RDMA_TX_VHCA_PAGE + 0xC0Cu) //not used +#define RDMATX_LOCAL_MRTE_PARA_CFG (C_RDMA_TX_VHCA_PAGE + 0xC10u) //hmc +#define RDMATX_SGETRAN_MRTE_PARA_CFG (C_RDMA_TX_VHCA_PAGE + 0xC14u) //hmc +#define RDMATX_SGETRAN_PBLE_PARA_CFG (C_RDMA_TX_VHCA_PAGE + 0xC18u) //hmc +#define RDMATX_PAYLOAD_PARA_CFG (C_RDMA_TX_VHCA_PAGE + 0xC1Cu) +#define RDMATX_HOSTID_CFG (C_RDMA_TX_VHCA_PAGE + 0xC20u) + +/* rdmatx_cm_vhca_pfvf */ +#define RDMA_CPU_MSIX_DATA (C_RDMA_TX_VHCA_PAGE + 0x00000804u) /* i=0...1023 */ +#define RDMA_CPU_MSIX_CONFIG \ + (C_RDMA_TX_VHCA_PAGE + 0x00000808u) /* i=0...1023 */ +#define AEQ_REPORT_ROOT_AXI_DATA \ + (C_RDMA_TX_VHCA_PAGE + 0x00000814u) /* i=0...1023 */ +#define AEQ_REPORT_LEAF_AXI_DATA \ + (C_RDMA_TX_VHCA_PAGE + 0x00000818u) /* i=0...1023 */ +#define AEQ_REPORT_WR_AXI_DATA \ + (C_RDMA_TX_VHCA_PAGE + 0x0000081Cu) /* i=0...1023 */ +#define AEQ_AEQC_AEE_FLAG (C_RDMA_TX_VHCA_PAGE + 0x00000820u) /* i=0...1023 */ +#define RDMATX_TXWINDOW_QPN_BASE (C_RDMA_TX_VHCA_PAGE + 0x810u) + +/* rdmatx_cm_vhca_pf */ +#define C_RDMA_CPU_SOFTWARE_TAIL (C_RDMA_TX_VHCA_PF_PAGE + 0x00000804u) +#define C_RDMA_CPU_AEQ_ARM (C_RDMA_TX_VHCA_PF_PAGE + 0x00000808u) + +/* rdmatx_sub_vhca_pfvf*/ +#define RDMATX_ADD_TOKEN_CHANGE_EN (C_RDMA_TX_VHCA_PAGE + 0x928u) +#define RDMATX_TIME_ADD_TOKEN_CFG (C_RDMA_TX_VHCA_PAGE + 0x92Cu) +#define RDMATX_SIZE_ADD_TOKEN_CFG (C_RDMA_TX_VHCA_PAGE + 0x930u) +#define RDMATX_TOKEN_MAX_CFG (C_RDMA_TX_VHCA_PAGE + 0x934u) + +/****** RES Module Register ******/ +/* rdmarx_pkt_proc */ +#define C_ICRC_CHECK_EOP_CNT (C_RDMA_RX_PKT_PROC_PAGE + 0x7cu) +#define C_ICRC_CHECK_SOP_CNT (C_RDMA_RX_PKT_PROC_PAGE + 0x8cu) +#define C_ICRC_PROC_SOP_CNT (C_RDMA_RX_PKT_PROC_PAGE + 0x84u) +#define C_ICRC_PROC_EOP_CNT (C_RDMA_RX_PKT_PROC_PAGE + 0x88u) +#define C_NHD_CHECK_ETH_DISGARD_CNT (C_RDMA_RX_PKT_PROC_PAGE + 0x4e4u) +#define C_NHD_CHECK_ICRC_REMOVAL_EOP_CNT (C_RDMA_RX_PKT_PROC_PAGE + 0x4f8u) +#define C_PLD_CACHE_PKT_PLD_PROC_EOP_CNT (C_RDMA_RX_PKT_PROC_PAGE + 0xc64u) +#define C_PLD_CACHE_PLD_CACHE_CTRL_EOP_CNT (C_RDMA_RX_PKT_PROC_PAGE + 0xc6cu) +#define C_TRPG_NP_RX_EOP_CNT (C_RDMA_RX_PKT_PROC_PAGE + 0x68u) + +/* rdmarx_cnp_gen */ +#define C_STATE_ERR_CFG (C_RDMA_RX_CNP_GEN_PAGE + 0x84u) + +/* rdma_tx_rtt_cfg*/ +#define RDMATX_RTT_CFG (C_RDMA_TX_RTT_PAGE + 0x468u) +/* rdma tx cap cfg*/ +#define RDMA_TX_SEL_NODE_MODULE_NUM 6 +#define RDMA_TX_SEL_NODE_MODULE_ACK 0 +#define RDMA_TX_SEL_NODE_MODULE_DB 1 +#define RDMA_TX_SEL_NODE_MODULE_AEQ 2 +#define RDMA_TX_SEL_NODE_MODULE_NONE 3 +#define RDMA_TX_SEL_NODE_MODULE_TXWINDOW 4 +#define RDMA_TX_SEL_NODE_MODULE_WQE 5 +#define RDMA_TX_CAP_WQE_MOD_NUM 3 +#define RDMA_TX_CAP_WQE_PRE_READ 0 +#define RDMA_TX_CAP_WQE_HANDLE 1 +#define RDMA_TX_CAP_WQE_PACK 2 + +#define C_RDMA_TX_CAP_BASE 0x62065E0000u +#define C_RDMA_TX_ACK_RECV_BASE 0x62065E8000u +#define C_RDMA_TX_SIDN_BASE 0x6206600000u +#define RDMATX_CAP_CHL_SEL_NODE0 (C_RDMA_TX_CAP_BASE + 0x224u) +#define RDMATX_CAP_CHL_OPEN_NODE0 (C_RDMA_TX_CAP_BASE + 0x228u) +#define RDMATX_CAP_NODE0_SEL (C_RDMA_TX_WQE_PARSE_PAGE + 0x144u) +#define RDMATX_CAP_NODE0_ACK (C_RDMA_TX_ACK_RECV_BASE + 0xF8u) +#define RDMATX_CAP_NODE0_DB (C_RDMA_TX_SIDN_BASE + 0x32Cu) +#define RDMATX_CAP_NODE0_AEQ (C_RDMA_TX_CAP_BASE + 0x214u) +#define RDMATX_CAP_NODE0_TXWINDOW (C_RDMA_TX_CAP_BASE + 0x21Cu) +#define RDMATX_CAP_NODE0_WQE_PRE_READ (C_RDMA_TX_WQE_PARSE_PAGE + 0x14Cu) +#define RDMATX_CAP_NODE0_WQE_HANDLE (C_RDMA_TX_WQE_PARSE_PAGE + 0x154u) +#define RDMATX_CAP_NODE0_PACKAGE (C_RDMA_TX_WQE_PARSE_PAGE + 0x15Cu) +#define RDMATX_CAP_COMPARE_BIT_EN0_NODE0 (C_RDMA_TX_CAP_BASE + 0x2FCu) +#define RDMATX_CAP_COMPARE_BIT_EN1_NODE0 (C_RDMA_TX_CAP_BASE + 0x300u) +#define RDMATX_CAP_COMPARE_BIT_EN2_NODE0 (C_RDMA_TX_CAP_BASE + 0x304u) +#define RDMATX_CAP_COMPARE_BIT_EN3_NODE0 (C_RDMA_TX_CAP_BASE + 0x308u) +#define RDMATX_CAP_COMPARE_BIT_EN4_NODE0 (C_RDMA_TX_CAP_BASE + 0x30Cu) +#define RDMATX_CAP_COMPARE_BIT_EN5_NODE0 (C_RDMA_TX_CAP_BASE + 0x310u) +#define RDMATX_CAP_COMPARE_BIT_EN6_NODE0 (C_RDMA_TX_CAP_BASE + 0x314u) +#define RDMATX_CAP_COMPARE_BIT_EN7_NODE0 (C_RDMA_TX_CAP_BASE + 0x318u) +#define RDMATX_CAP_COMPARE_BIT_EN8_NODE0 (C_RDMA_TX_CAP_BASE + 0x31Cu) +#define RDMATX_CAP_COMPARE_BIT_EN9_NODE0 (C_RDMA_TX_CAP_BASE + 0x320u) +#define RDMATX_CAP_COMPARE_BIT_EN10_NODE0 (C_RDMA_TX_CAP_BASE + 0x324u) +#define RDMATX_CAP_COMPARE_BIT_EN11_NODE0 (C_RDMA_TX_CAP_BASE + 0x328u) +#define RDMATX_CAP_COMPARE_BIT_EN12_NODE0 (C_RDMA_TX_CAP_BASE + 0x32Cu) +#define RDMATX_CAP_COMPARE_BIT_EN13_NODE0 (C_RDMA_TX_CAP_BASE + 0x330u) +#define RDMATX_CAP_COMPARE_BIT_EN14_NODE0 (C_RDMA_TX_CAP_BASE + 0x334u) +#define RDMATX_CAP_COMPARE_BIT_EN15_NODE0 (C_RDMA_TX_CAP_BASE + 0x338u) +#define RDMATX_CAP_COMPARE_DATA0_NODE0 (C_RDMA_TX_CAP_BASE + 0x234u) +#define RDMATX_CAP_COMPARE_DATA1_NODE0 (C_RDMA_TX_CAP_BASE + 0x238u) +#define RDMATX_CAP_COMPARE_DATA2_NODE0 (C_RDMA_TX_CAP_BASE + 0x23Cu) +#define RDMATX_CAP_COMPARE_DATA3_NODE0 (C_RDMA_TX_CAP_BASE + 0x240u) +#define RDMATX_CAP_COMPARE_DATA4_NODE0 (C_RDMA_TX_CAP_BASE + 0x244u) +#define RDMATX_CAP_COMPARE_DATA5_NODE0 (C_RDMA_TX_CAP_BASE + 0x248u) +#define RDMATX_CAP_COMPARE_DATA6_NODE0 (C_RDMA_TX_CAP_BASE + 0x24Cu) +#define RDMATX_CAP_COMPARE_DATA7_NODE0 (C_RDMA_TX_CAP_BASE + 0x250u) +#define RDMATX_CAP_COMPARE_DATA8_NODE0 (C_RDMA_TX_CAP_BASE + 0x254u) +#define RDMATX_CAP_COMPARE_DATA9_NODE0 (C_RDMA_TX_CAP_BASE + 0x258u) +#define RDMATX_CAP_COMPARE_DATA10_NODE0 (C_RDMA_TX_CAP_BASE + 0x25Cu) +#define RDMATX_CAP_COMPARE_DATA11_NODE0 (C_RDMA_TX_CAP_BASE + 0x260u) +#define RDMATX_CAP_COMPARE_DATA12_NODE0 (C_RDMA_TX_CAP_BASE + 0x264u) +#define RDMATX_CAP_COMPARE_DATA13_NODE0 (C_RDMA_TX_CAP_BASE + 0x268u) +#define RDMATX_CAP_COMPARE_DATA14_NODE0 (C_RDMA_TX_CAP_BASE + 0x26Cu) +#define RDMATX_CAP_COMPARE_DATA15_NODE0 (C_RDMA_TX_CAP_BASE + 0x270u) +#define RDMATX_CAP_AXI_WR_ADDR_LOW_NODE0 (C_RDMA_TX_CAP_BASE + 0x274u) +#define RDMATX_CAP_AXI_WR_ADDR_HIGH_NODE0 (C_RDMA_TX_CAP_BASE + 0x278u) +#define RDMATX_CAP_AXI_WR_LEN_LOW_NODE0 (C_RDMA_TX_CAP_BASE + 0x27Cu) +#define RDMATX_CAP_AXI_WR_LEN_HIGH_NODE0 (C_RDMA_TX_CAP_BASE + 0x4ECu) +#define RDMATX_CAP_TIME_WRL2D_NODE0 (C_RDMA_TX_CAP_BASE + 0x2F4u) +#define RDMATX_CAP_VHCA_NUM_NODE0 (C_RDMA_TX_CAP_BASE + 0x22Cu) +#define RDMATX_CAP_AXI_ID_NODE0 (C_RDMA_TX_CAP_BASE + 0x2DCu) +#define RDMATX_CAP_CAP_ID_NODE0 (C_RDMA_TX_CAP_BASE + 0x2E4u) +#define RDMATX_CAP_CHL_SEL_NODE1 (C_RDMA_TX_CAP_BASE + 0x280u) +#define RDMATX_CAP_CHL_OPEN_NODE1 (C_RDMA_TX_CAP_BASE + 0x284u) +#define RDMATX_CAP_NODE1_SEL (C_RDMA_TX_WQE_PARSE_PAGE + 0x148u) +#define RDMATX_CAP_NODE1_ACK (C_RDMA_TX_ACK_RECV_BASE + 0xFCu) +#define RDMATX_CAP_NODE1_DB (C_RDMA_TX_SIDN_BASE + 0x330u) +#define RDMATX_CAP_NODE1_AEQ (C_RDMA_TX_CAP_BASE + 0x218u) +#define RDMATX_CAP_NODE1_TXWINDOW (C_RDMA_TX_CAP_BASE + 0x220u) +#define RDMATX_CAP_NODE1_WQE_PRE_READ (C_RDMA_TX_WQE_PARSE_PAGE + 0x150u) +#define RDMATX_CAP_NODE1_WQE_HANDLE (C_RDMA_TX_WQE_PARSE_PAGE + 0x158u) +#define RDMATX_CAP_NODE1_PACKAGE (C_RDMA_TX_WQE_PARSE_PAGE + 0x160u) +#define RDMATX_CAP_COMPARE_BIT_EN0_NODE1 (C_RDMA_TX_CAP_BASE + 0x33Cu) +#define RDMATX_CAP_COMPARE_BIT_EN1_NODE1 (C_RDMA_TX_CAP_BASE + 0x340u) +#define RDMATX_CAP_COMPARE_BIT_EN2_NODE1 (C_RDMA_TX_CAP_BASE + 0x344u) +#define RDMATX_CAP_COMPARE_BIT_EN3_NODE1 (C_RDMA_TX_CAP_BASE + 0x348u) +#define RDMATX_CAP_COMPARE_BIT_EN4_NODE1 (C_RDMA_TX_CAP_BASE + 0x34Cu) +#define RDMATX_CAP_COMPARE_BIT_EN5_NODE1 (C_RDMA_TX_CAP_BASE + 0x350u) +#define RDMATX_CAP_COMPARE_BIT_EN6_NODE1 (C_RDMA_TX_CAP_BASE + 0x354u) +#define RDMATX_CAP_COMPARE_BIT_EN7_NODE1 (C_RDMA_TX_CAP_BASE + 0x358u) +#define RDMATX_CAP_COMPARE_BIT_EN8_NODE1 (C_RDMA_TX_CAP_BASE + 0x35Cu) +#define RDMATX_CAP_COMPARE_BIT_EN9_NODE1 (C_RDMA_TX_CAP_BASE + 0x360u) +#define RDMATX_CAP_COMPARE_BIT_EN10_NODE1 (C_RDMA_TX_CAP_BASE + 0x364u) +#define RDMATX_CAP_COMPARE_BIT_EN11_NODE1 (C_RDMA_TX_CAP_BASE + 0x368u) +#define RDMATX_CAP_COMPARE_BIT_EN12_NODE1 (C_RDMA_TX_CAP_BASE + 0x36Cu) +#define RDMATX_CAP_COMPARE_BIT_EN13_NODE1 (C_RDMA_TX_CAP_BASE + 0x370u) +#define RDMATX_CAP_COMPARE_BIT_EN14_NODE1 (C_RDMA_TX_CAP_BASE + 0x374u) +#define RDMATX_CAP_COMPARE_BIT_EN15_NODE1 (C_RDMA_TX_CAP_BASE + 0x378u) +#define RDMATX_CAP_COMPARE_DATA0_NODE1 (C_RDMA_TX_CAP_BASE + 0x290u) +#define RDMATX_CAP_COMPARE_DATA1_NODE1 (C_RDMA_TX_CAP_BASE + 0x294u) +#define RDMATX_CAP_COMPARE_DATA2_NODE1 (C_RDMA_TX_CAP_BASE + 0x298u) +#define RDMATX_CAP_COMPARE_DATA3_NODE1 (C_RDMA_TX_CAP_BASE + 0x29Cu) +#define RDMATX_CAP_COMPARE_DATA4_NODE1 (C_RDMA_TX_CAP_BASE + 0x2A0u) +#define RDMATX_CAP_COMPARE_DATA5_NODE1 (C_RDMA_TX_CAP_BASE + 0x2ACu) +#define RDMATX_CAP_COMPARE_DATA6_NODE1 (C_RDMA_TX_CAP_BASE + 0x2A8u) +#define RDMATX_CAP_COMPARE_DATA7_NODE1 (C_RDMA_TX_CAP_BASE + 0x2ACu) +#define RDMATX_CAP_COMPARE_DATA8_NODE1 (C_RDMA_TX_CAP_BASE + 0x2B0u) +#define RDMATX_CAP_COMPARE_DATA9_NODE1 (C_RDMA_TX_CAP_BASE + 0x2B4u) +#define RDMATX_CAP_COMPARE_DATA10_NODE1 (C_RDMA_TX_CAP_BASE + 0x2B8u) +#define RDMATX_CAP_COMPARE_DATA11_NODE1 (C_RDMA_TX_CAP_BASE + 0x2BCu) +#define RDMATX_CAP_COMPARE_DATA12_NODE1 (C_RDMA_TX_CAP_BASE + 0x2C0u) +#define RDMATX_CAP_COMPARE_DATA13_NODE1 (C_RDMA_TX_CAP_BASE + 0x2C4u) +#define RDMATX_CAP_COMPARE_DATA14_NODE1 (C_RDMA_TX_CAP_BASE + 0x2C8u) +#define RDMATX_CAP_COMPARE_DATA15_NODE1 (C_RDMA_TX_CAP_BASE + 0x2CCu) +#define RDMATX_CAP_AXI_WR_ADDR_LOW_NODE1 (C_RDMA_TX_CAP_BASE + 0x2D0u) +#define RDMATX_CAP_AXI_WR_ADDR_HIGH_NODE1 (C_RDMA_TX_CAP_BASE + 0x2D4u) +#define RDMATX_CAP_AXI_WR_LEN_LOW_NODE1 (C_RDMA_TX_CAP_BASE + 0x2D8u) +#define RDMATX_CAP_AXI_WR_LEN_HIGH_NODE1 (C_RDMA_TX_CAP_BASE + 0x4F0u) +#define RDMATX_CAP_TIME_WRL2D_NODE1 (C_RDMA_TX_CAP_BASE + 0x2F8u) +#define RDMATX_CAP_VHCA_NUM_NODE1 (C_RDMA_TX_CAP_BASE + 0x288u) +#define RDMATX_CAP_AXI_ID_NODE1 (C_RDMA_TX_CAP_BASE + 0x2E0u) +#define RDMATX_CAP_CAP_ID_NODE1 (C_RDMA_TX_CAP_BASE + 0x2E8u) +#define RDMATX_DATA_START_CAP (C_RDMA_TX_CAP_BASE + 0x230u) + +/* rdmarx_pri_base_rd */ +#define RDMARX_PRI_BASE_RD (C_RDMA_RX_PUBLIC_PAGE2 + 0x418u) + +/* rdmarx_cq_period_cfg*/ +#define RDMARX_CQ_PERIOD_CFG (C_RDMA_RX_CNP_GEN_PAGE + 0x0f7cu) + +/* rdmarx_ram_scheduling */ +#define RDMARX_RAM_NUM (C_RDMA_RX_RAM_SCHE_PAGE1 + 0x50cu) +#define RDMARX_RAM_WIDTH (C_RDMA_RX_RAM_SCHE_PAGE1 + 0x510u) +#define RDMARX_RAM_ADDR (C_RDMA_RX_RAM_SCHE_PAGE1 + 0x504u) +#define RDMARX_RAM_READ_LENGTH (C_RDMA_RX_RAM_SCHE_PAGE1 + 0x508u) +#define RDMARX_RD_TIME_LIMIT (C_RDMA_RX_RAM_SCHE_PAGE5 + 0x5c0u) +#define RDMARX_RAM_READ_FLAG (C_RDMA_RX_RAM_SCHE_PAGE1 + 0x500u) +#define RDMARX_READ_ERROR_FLAG (C_RDMA_RX_RAM_SCHE_PAGE5 + 0x5c4u) +#define RDMARX_READ_CNT_ERROR (C_RDMA_RX_RAM_SCHE_PAGE5 + 0x5dcu) +#define RDMARX_RAM_REDUN_FLAG (C_RDMA_RX_RAM_SCHE_PAGE5 + 0x610u) +#define RDMARX_DOUBLE_VLD_FLAG (C_RDMA_RX_RAM_SCHE_PAGE5 + 0x614u) +#define RDMARX_RAM_MAINTENANCE_RAM(_i) \ + (C_RDMA_RX_RAM_SCHE_PAGE1 + 0x514u + (_i)*0x4) /* i=0...14 */ +#define RDMARX_RAM_USE_FLAG (C_RDMA_RX_RAM_SCHE_PAGE5 + 0x5E0u) +#define RDMARX_RAM_USE_VHCA_ID (C_RDMA_RX_RAM_SCHE_PAGE5 + 0x5E4u) +#define RDMARX_RAM_USE_CNT (C_RDMA_RX_RAM_SCHE_PAGE5 + 0x5E8u) + +/* rdmarx rsv fw done */ +#define RDMARX_WAIT_FW_DONE (C_RDMA_RX_RAM_SCHE_PAGE5 + 0x5F0u) + +/* rdma rx cap cfg*/ +#define RDMA_RX_SEL_NODE_MODULE_NUM 14 +#define RDMA_RX_SEL_NODE_MODULE_RTT_T4 0 +#define RDMA_RX_SEL_NODE_MODULE_PKT_PROC 1 +#define RDMA_RX_SEL_NODE_MODULE_HD_CACHE 2 +#define RDMA_RX_SEL_NODE_MODULE_VAPA_DDRWR 3 +#define RDMA_RX_SEL_NODE_MODULE_PSN_CHECK 4 +#define RDMA_RX_SEL_NODE_MODULE_PRIFIELD_CHECK 5 +#define RDMA_RX_SEL_NODE_MODULE_READ_SRQC 6 +#define RDMA_RX_SEL_NODE_MODULE_READ_WQE 7 +#define RDMA_RX_SEL_NODE_MODULE_CNP_GEN 8 +#define RDMA_RX_SEL_NODE_MODULE_ACKNAKFIFO 9 +#define RDMA_RX_SEL_NODE_MODULE_CEQ 10 +#define RDMA_RX_SEL_NODE_MODULE_COMPLQUEUE 11 +#define RDMA_RX_SEL_NODE_MODULE_NOF 12 +#define RDMA_RX_SEL_NODE_MODULE_TX_SUB 13 + +#define C_RDMA_RX_CAP_BASE 0x6205478000u +#define C_RDMA_RX_PKT_PROC_BASE 0x6205400000u +#define C_RDMA_RX_PUBLIC_BASE1 0x6205420000u +#define C_RDMA_RX_PUBLIC_BASE2 0x6205440000u +#define C_RDMA_RX_CNP_GEN_BASE 0x6205460000u +#define C_RDMA_RTT_BASE 0x62065F8000u +#define C_RDMA_RX_NOF_BASE 0x6205465000u +#define RDMARX_CAP_CHL_SEL_NODE0 (C_RDMA_RX_CAP_BASE + 0x604u) +#define RDMARX_CAP_CHL_OPEN_NODE0 (C_RDMA_RX_CAP_BASE + 0x60Cu) +#define RDMARX_CAP_NODE0_SEL_RTT_T4 (C_RDMA_RX_CAP_BASE + 0x774u) +#define RDMARX_CAP_NODE0_SEL_PKT_PROC (C_RDMA_RX_PKT_PROC_BASE + 0x10u) +#define RDMARX_CAP_NODE_SEL_HD_CACHE (C_RDMA_RX_PUBLIC_BASE1 + 0x610u) +#define RDMARX_CAP_NODE_SEL_VAPA_DDRWR (C_RDMA_RX_PUBLIC_BASE1 + 0xA00u) +#define RDMARX_CAP_NODE0_SEL_PRIFIELD_CHECK (C_RDMA_RX_PUBLIC_BASE2 + 0x404u) +#define RDMARX_CAP_NODE0_SEL_READ_SRQC (C_RDMA_RX_PUBLIC_BASE2 + 0x870u) +#define RDMARX_CAP_NODE0_SEL_READ_WQE (C_RDMA_RX_PUBLIC_BASE2 + 0xC14u) +#define RDMARX_CAP_NODE0_SEL_CNP_GEN (C_RDMA_RX_CNP_GEN_BASE + 0x1Cu) +#define RDMARX_CAP_NODE_SEL_ACKNAKFIFO (C_RDMA_RX_CNP_GEN_BASE + 0x3CCu) +#define RDMARX_CAP_NODE0_SEL_CQE (C_RDMA_RX_CNP_GEN_BASE + 0x814u) +#define RDMARX_CAP_NODE0_SEL_COMPLQUEUE (C_RDMA_RX_CNP_GEN_BASE + 0xC70u) +#define RDMARX_CAP_NODE_SEL_NOF (C_RDMA_RX_NOF_BASE + 0x400u) +#define RDMARX_CAP_NODE0_SEL_TXSUB (C_RDMA_RTT_BASE + 0x460u) + +#define RDMARX_CAP_COMPARE_BIT_EN0_NODE0 (C_RDMA_RX_CAP_BASE + 0x6DCu) +#define RDMARX_CAP_COMPARE_BIT_EN1_NODE0 (C_RDMA_RX_CAP_BASE + 0x6E0u) +#define RDMARX_CAP_COMPARE_BIT_EN2_NODE0 (C_RDMA_RX_CAP_BASE + 0x6E4u) +#define RDMARX_CAP_COMPARE_BIT_EN3_NODE0 (C_RDMA_RX_CAP_BASE + 0x6E8u) +#define RDMARX_CAP_COMPARE_BIT_EN4_NODE0 (C_RDMA_RX_CAP_BASE + 0x6ECu) +#define RDMARX_CAP_COMPARE_BIT_EN5_NODE0 (C_RDMA_RX_CAP_BASE + 0x6F0u) +#define RDMARX_CAP_COMPARE_BIT_EN6_NODE0 (C_RDMA_RX_CAP_BASE + 0x6F4u) +#define RDMARX_CAP_COMPARE_BIT_EN7_NODE0 (C_RDMA_RX_CAP_BASE + 0x6F8u) +#define RDMARX_CAP_COMPARE_BIT_EN8_NODE0 (C_RDMA_RX_CAP_BASE + 0x6FCu) +#define RDMARX_CAP_COMPARE_BIT_EN9_NODE0 (C_RDMA_RX_CAP_BASE + 0x700u) +#define RDMARX_CAP_COMPARE_BIT_EN10_NODE0 (C_RDMA_RX_CAP_BASE + 0x704u) +#define RDMARX_CAP_COMPARE_BIT_EN11_NODE0 (C_RDMA_RX_CAP_BASE + 0x708u) +#define RDMARX_CAP_COMPARE_BIT_EN12_NODE0 (C_RDMA_RX_CAP_BASE + 0x70Cu) +#define RDMARX_CAP_COMPARE_BIT_EN13_NODE0 (C_RDMA_RX_CAP_BASE + 0x710u) +#define RDMARX_CAP_COMPARE_BIT_EN14_NODE0 (C_RDMA_RX_CAP_BASE + 0x714u) +#define RDMARX_CAP_COMPARE_BIT_EN15_NODE0 (C_RDMA_RX_CAP_BASE + 0x718u) +#define RDMARX_CAP_COMPARE_DATA0_NODE0 (C_RDMA_RX_CAP_BASE + 0x61Cu) +#define RDMARX_CAP_COMPARE_DATA1_NODE0 (C_RDMA_RX_CAP_BASE + 0x620u) +#define RDMARX_CAP_COMPARE_DATA2_NODE0 (C_RDMA_RX_CAP_BASE + 0x624u) +#define RDMARX_CAP_COMPARE_DATA3_NODE0 (C_RDMA_RX_CAP_BASE + 0x628u) +#define RDMARX_CAP_COMPARE_DATA4_NODE0 (C_RDMA_RX_CAP_BASE + 0x62Cu) +#define RDMARX_CAP_COMPARE_DATA5_NODE0 (C_RDMA_RX_CAP_BASE + 0x630u) +#define RDMARX_CAP_COMPARE_DATA6_NODE0 (C_RDMA_RX_CAP_BASE + 0x634u) +#define RDMARX_CAP_COMPARE_DATA7_NODE0 (C_RDMA_RX_CAP_BASE + 0x638u) +#define RDMARX_CAP_COMPARE_DATA8_NODE0 (C_RDMA_RX_CAP_BASE + 0x63Cu) +#define RDMARX_CAP_COMPARE_DATA9_NODE0 (C_RDMA_RX_CAP_BASE + 0x640u) +#define RDMARX_CAP_COMPARE_DATA10_NODE0 (C_RDMA_RX_CAP_BASE + 0x644u) +#define RDMARX_CAP_COMPARE_DATA11_NODE0 (C_RDMA_RX_CAP_BASE + 0x648u) +#define RDMARX_CAP_COMPARE_DATA12_NODE0 (C_RDMA_RX_CAP_BASE + 0x64Cu) +#define RDMARX_CAP_COMPARE_DATA13_NODE0 (C_RDMA_RX_CAP_BASE + 0x650u) +#define RDMARX_CAP_COMPARE_DATA14_NODE0 (C_RDMA_RX_CAP_BASE + 0x654u) +#define RDMARX_CAP_COMPARE_DATA15_NODE0 (C_RDMA_RX_CAP_BASE + 0x658u) +#define RDMARX_CAP_AXI_WR_ADDR_LOW_NODE0 (C_RDMA_RX_CAP_BASE + 0x6ACu) +#define RDMARX_CAP_AXI_WR_ADDR_HIGH_NODE0 (C_RDMA_RX_CAP_BASE + 0x6B0u) +#define RDMARX_CAP_AXI_WR_LEN_LOW_NODE0 (C_RDMA_RX_CAP_BASE + 0x6B4u) +#define RDMARX_CAP_AXI_WR_LEN_HIGH_NODE0 (C_RDMA_RX_CAP_BASE + 0x898u) +#define RDMARX_CAP_TIME_WRL2D_NODE0 (C_RDMA_RX_CAP_BASE + 0x6D4u) +#define RDMARX_CAP_VHCA_NUM_NODE0 (C_RDMA_RX_CAP_BASE + 0x69Cu) +#define RDMARX_CAP_AXI_ID_NODE0 (C_RDMA_RX_CAP_BASE + 0x6A4u) +#define RDMARX_CAP_CAP_ID_NODE0 (C_RDMA_RX_CAP_BASE + 0x6B8u) + +#define RDMARX_CAP_CHL_SEL_NODE1 (C_RDMA_RX_CAP_BASE + 0x608u) +#define RDMARX_CAP_CHL_OPEN_NODE1 (C_RDMA_RX_CAP_BASE + 0x610u) +#define RDMARX_CAP_NODE1_SEL_RTT_T4 (C_RDMA_RX_CAP_BASE + 0x778u) +#define RDMARX_CAP_NODE1_SEL_PKT_PROC (C_RDMA_RX_PKT_PROC_BASE + 0x14u) +#define RDMARX_CAP_NODE1_SEL_PRIFIELD_CHECK (C_RDMA_RX_PUBLIC_BASE2 + 0x408u) +#define RDMARX_CAP_NODE1_SEL_READ_SRQC (C_RDMA_RX_PUBLIC_BASE2 + 0x874u) +#define RDMARX_CAP_NODE1_SEL_READ_WQE (C_RDMA_RX_PUBLIC_BASE2 + 0xC18u) +#define RDMARX_CAP_NODE1_SEL_CNP_GEN (C_RDMA_RX_CNP_GEN_BASE + 0x20u) +#define RDMARX_CAP_NODE1_SEL_CQE (C_RDMA_RX_CNP_GEN_BASE + 0x818u) +#define RDMARX_CAP_NODE1_SEL_COMPLQUEUE (C_RDMA_RX_CNP_GEN_BASE + 0xC74u) +#define RDMARX_CAP_NODE1_SEL_TXSUB (C_RDMA_RTT_BASE + 0x464u) + +#define RDMARX_CAP_COMPARE_BIT_EN0_NODE1 (C_RDMA_RX_CAP_BASE + 0x71Cu) +#define RDMARX_CAP_COMPARE_BIT_EN1_NODE1 (C_RDMA_RX_CAP_BASE + 0x720u) +#define RDMARX_CAP_COMPARE_BIT_EN2_NODE1 (C_RDMA_RX_CAP_BASE + 0x724u) +#define RDMARX_CAP_COMPARE_BIT_EN3_NODE1 (C_RDMA_RX_CAP_BASE + 0x728u) +#define RDMARX_CAP_COMPARE_BIT_EN4_NODE1 (C_RDMA_RX_CAP_BASE + 0x72Cu) +#define RDMARX_CAP_COMPARE_BIT_EN5_NODE1 (C_RDMA_RX_CAP_BASE + 0x730u) +#define RDMARX_CAP_COMPARE_BIT_EN6_NODE1 (C_RDMA_RX_CAP_BASE + 0x734u) +#define RDMARX_CAP_COMPARE_BIT_EN7_NODE1 (C_RDMA_RX_CAP_BASE + 0x738u) +#define RDMARX_CAP_COMPARE_BIT_EN8_NODE1 (C_RDMA_RX_CAP_BASE + 0x73Cu) +#define RDMARX_CAP_COMPARE_BIT_EN9_NODE1 (C_RDMA_RX_CAP_BASE + 0x740u) +#define RDMARX_CAP_COMPARE_BIT_EN10_NODE1 (C_RDMA_RX_CAP_BASE + 0x744u) +#define RDMARX_CAP_COMPARE_BIT_EN11_NODE1 (C_RDMA_RX_CAP_BASE + 0x748u) +#define RDMARX_CAP_COMPARE_BIT_EN12_NODE1 (C_RDMA_RX_CAP_BASE + 0x74Cu) +#define RDMARX_CAP_COMPARE_BIT_EN13_NODE1 (C_RDMA_RX_CAP_BASE + 0x750u) +#define RDMARX_CAP_COMPARE_BIT_EN14_NODE1 (C_RDMA_RX_CAP_BASE + 0x754u) +#define RDMARX_CAP_COMPARE_BIT_EN15_NODE1 (C_RDMA_RX_CAP_BASE + 0x758u) +#define RDMARX_CAP_COMPARE_DATA0_NODE1 (C_RDMA_RX_CAP_BASE + 0x65Cu) +#define RDMARX_CAP_COMPARE_DATA1_NODE1 (C_RDMA_RX_CAP_BASE + 0x660u) +#define RDMARX_CAP_COMPARE_DATA2_NODE1 (C_RDMA_RX_CAP_BASE + 0x664u) +#define RDMARX_CAP_COMPARE_DATA3_NODE1 (C_RDMA_RX_CAP_BASE + 0x668u) +#define RDMARX_CAP_COMPARE_DATA4_NODE1 (C_RDMA_RX_CAP_BASE + 0x66Cu) +#define RDMARX_CAP_COMPARE_DATA5_NODE1 (C_RDMA_RX_CAP_BASE + 0x670u) +#define RDMARX_CAP_COMPARE_DATA6_NODE1 (C_RDMA_RX_CAP_BASE + 0x674u) +#define RDMARX_CAP_COMPARE_DATA7_NODE1 (C_RDMA_RX_CAP_BASE + 0x678u) +#define RDMARX_CAP_COMPARE_DATA8_NODE1 (C_RDMA_RX_CAP_BASE + 0x67Cu) +#define RDMARX_CAP_COMPARE_DATA9_NODE1 (C_RDMA_RX_CAP_BASE + 0x680u) +#define RDMARX_CAP_COMPARE_DATA10_NODE1 (C_RDMA_RX_CAP_BASE + 0x684u) +#define RDMARX_CAP_COMPARE_DATA11_NODE1 (C_RDMA_RX_CAP_BASE + 0x688u) +#define RDMARX_CAP_COMPARE_DATA12_NODE1 (C_RDMA_RX_CAP_BASE + 0x68Cu) +#define RDMARX_CAP_COMPARE_DATA13_NODE1 (C_RDMA_RX_CAP_BASE + 0x690u) +#define RDMARX_CAP_COMPARE_DATA14_NODE1 (C_RDMA_RX_CAP_BASE + 0x694u) +#define RDMARX_CAP_COMPARE_DATA15_NODE1 (C_RDMA_RX_CAP_BASE + 0x698u) +#define RDMARX_CAP_AXI_WR_ADDR_LOW_NODE1 (C_RDMA_RX_CAP_BASE + 0x6C0u) +#define RDMARX_CAP_AXI_WR_ADDR_HIGH_NODE1 (C_RDMA_RX_CAP_BASE + 0x6C4u) +#define RDMARX_CAP_AXI_WR_LEN_LOW_NODE1 (C_RDMA_RX_CAP_BASE + 0x6C8u) +#define RDMARX_CAP_AXI_WR_LEN_HIGH_NODE1 (C_RDMA_RX_CAP_BASE + 0x89Cu) +#define RDMARX_CAP_TIME_WRL2D_NODE1 (C_RDMA_RX_CAP_BASE + 0x6D8u) +#define RDMARX_CAP_VHCA_NUM_NODE1 (C_RDMA_RX_CAP_BASE + 0x6A0u) +#define RDMARX_CAP_AXI_ID_NODE1 (C_RDMA_RX_CAP_BASE + 0x6A8u) +#define RDMARX_CAP_CAP_ID_NODE1 (C_RDMA_RX_CAP_BASE + 0x6BCu) +#define RDMARX_DATA_START_CAP (C_RDMA_RX_CAP_BASE + 0x614u) + +/* rdmarx_ceq_pf */ +#define RDMARX_CEQ_EQARM_RAM (C_RDMA_RX_VHCA_PF_PAGE + 0x684u) //CEQ +#define C_CEQ_EQARM_RAM (C_RDMA_RX_VHCA_PF_PAGE + 0x00000684u) + +/* rdmarx_completion_queue_pf */ +#define RDMARX_CQ_CQARM (C_RDMA_RX_VHCA_PF_PAGE + 0x588u) + +/* rdmarx_pkt_proc_pfvf */ +#define RDMARX_MUL_CACHE_CFG_SIDN_RAM (C_RDMA_RX_VHCA_PAGE + 0x108u) +#define RDMARX_MUL_COPY_QPN_INDICATE (C_RDMA_RX_VHCA_PAGE + 0x214u) + +/* rdma_rdmarx_hd_cache_top_pfvf */ +#define RDMARX_LIST_CACHE_BASE_QPN (C_RDMA_RX_VHCA_PAGE + 0x28Cu) +#define RDMARX_PLD_WR_AXIID_RAM (C_RDMA_RX_VHCA_PAGE + 0x304u) + +/* rdmarx_prifield_check_pfvf */ +#define RDMARX_VHCA_MAX_SIZE_RAM (C_RDMA_RX_VHCA_PAGE + 0x400u) + +/* rdmarx_read_srqc_top_pfvf */ +#define RDMARX_SRQN_BASE_RAM (C_RDMA_RX_VHCA_PAGE + 0x49Cu) +#define C_DB_AXI_RAM (C_RDMA_RX_VHCA_PAGE + 0x48Cu) /* i=0...1023 */ +#define C_SRQL_AXI_RAM (C_RDMA_RX_VHCA_PAGE + 0x494u) /* i=0...1023 */ + +/* rdmarx_completion_queue_pf */ +#define RDMARX_RQ_AXI_RAM (C_RDMA_RX_VHCA_PAGE + 0x500u) +#define RDMARX_SRQ_AXI_RAM (C_RDMA_RX_VHCA_PAGE + 0x508u) +#define RDMARX_PCI_MAX_MRTE_INDEX_RAM (C_RDMA_RX_VHCA_PAGE + 0x50Cu) + +/* rdmarx_ceq_pfvf */ +#define C_CEQ_CEQE_AXI_INFO_RAM \ + (C_RDMA_RX_VHCA_PAGE + 0x00000680u) /* i=0...1023 */ +#define C_CEQ_RPBLE_AXI_INFO_RAM \ + (C_RDMA_RX_VHCA_PAGE + 0x00000684u) /* i=0...1023 */ +#define C_CEQ_LPBLE_AXI_INFO_RAM \ + (C_RDMA_RX_VHCA_PAGE + 0x00000688u) /* i=0...1023 */ +#define C_CEQ_INT_INFO_RAM (C_RDMA_RX_VHCA_PAGE + 0x0000068Cu) /* i=0...1023 */ +#define RDMARX_ACK_RQDB_AXI_RAM (C_RDMA_RX_VHCA_PAGE + 0x600u) + +/* rdmarx_completion_queue_pfvf */ +#define RDMARX_CQ_CQN_BASE_OFFSET_RAM (C_RDMA_RX_VHCA_PAGE + 0x580u) +#define RDMARX_CQ_CQE_AXI_INFO_RAM (C_RDMA_RX_VHCA_PAGE + 0x594u) +#define RDMARX_CQ_DBSA_AXI_INFO_RAM (C_RDMA_RX_VHCA_PAGE + 0x598u) + +/****** IO Module Register ******/ +#define C_RDMAIO_TABLE2 (C_RDMA_IO_VHCA_PAGE + 0x060u) +#define C_RDMAIO_TABLE4 (C_RDMA_IO_VHCA_PAGE + 0x080u) +#define C_RDMAIO_TABLE3 (C_RDMA_IO_VHCA_PAGE + 0x070u) + +#define C_RDMAIO_TABLE5_0 (C_RDMA_IO_VHCA_PAGE + 0x090u) +#define C_RDMAIO_TABLE5_1 (C_RDMA_IO_VHCA_PAGE + 0x094u) +#define C_RDMAIO_TABLE5_2 (C_RDMA_IO_VHCA_PAGE + 0x098u) +#define C_RDMAIO_TABLE5_3 (C_RDMA_IO_VHCA_PAGE + 0x09cu) + +#define C_RDMAIO_TABLE5_4 (C_RDMA_IO_VHCA_PAGE + 0x0a0u) +#define C_RDMAIO_TABLE5_5 (C_RDMA_IO_VHCA_PAGE + 0x0a4u) +#define C_RDMAIO_TABLE5_6 (C_RDMA_IO_VHCA_PAGE + 0x0a8u) +#define C_RDMAIO_TABLE5_7 (C_RDMA_IO_VHCA_PAGE + 0x0acu) + +#define C_RDMAIO_TABLE5_8 (C_RDMA_IO_VHCA_PAGE + 0x0b0u) +#define C_RDMAIO_TABLE5_9 (C_RDMA_IO_VHCA_PAGE + 0x0b4u) +#define C_RDMAIO_TABLE5_10 (C_RDMA_IO_VHCA_PAGE + 0x0b8u) +#define C_RDMAIO_TABLE5_11 (C_RDMA_IO_VHCA_PAGE + 0x0bcu) + +#define C_RDMAIO_TABLE5_12 (C_RDMA_IO_VHCA_PAGE + 0x0c0u) +#define C_RDMAIO_TABLE5_13 (C_RDMA_IO_VHCA_PAGE + 0x0c4u) +#define C_RDMAIO_TABLE5_14 (C_RDMA_IO_VHCA_PAGE + 0x0c8u) +#define C_RDMAIO_TABLE5_15 (C_RDMA_IO_VHCA_PAGE + 0x0ccu) + +#define C_RDMAIO_TABLE5_16 (C_RDMA_IO_VHCA_PAGE + 0x0d0u) +#define C_RDMAIO_TABLE5_17 (C_RDMA_IO_VHCA_PAGE + 0x0d4u) +#define C_RDMAIO_TABLE5_18 (C_RDMA_IO_VHCA_PAGE + 0x0d8u) +#define C_RDMAIO_TABLE5_19 (C_RDMA_IO_VHCA_PAGE + 0x0dcu) + +#define C_RDMAIO_TABLE5_20 (C_RDMA_IO_VHCA_PAGE + 0x0e0u) +#define C_RDMAIO_TABLE5_21 (C_RDMA_IO_VHCA_PAGE + 0x0e4u) +#define C_RDMAIO_TABLE5_22 (C_RDMA_IO_VHCA_PAGE + 0x0e8u) +#define C_RDMAIO_TABLE5_23 (C_RDMA_IO_VHCA_PAGE + 0x0ecu) + +#define C_RDMAIO_TABLE5_24 (C_RDMA_IO_VHCA_PAGE + 0x0f0u) +#define C_RDMAIO_TABLE5_25 (C_RDMA_IO_VHCA_PAGE + 0x0f4u) +#define C_RDMAIO_TABLE5_26 (C_RDMA_IO_VHCA_PAGE + 0x0f8u) +#define C_RDMAIO_TABLE5_27 (C_RDMA_IO_VHCA_PAGE + 0x0fcu) + +#define C_RDMAIO_TABLE5_28 (C_RDMA_IO_VHCA_PAGE + 0x100u) +#define C_RDMAIO_TABLE5_29 (C_RDMA_IO_VHCA_PAGE + 0x104u) +#define C_RDMAIO_TABLE5_30 (C_RDMA_IO_VHCA_PAGE + 0x108u) +#define C_RDMAIO_TABLE5_31 (C_RDMA_IO_VHCA_PAGE + 0x10cu) + +#define C_RDMAIO_TABLE7 (C_RDMA_IO_SIDN_PAGE + 0x000u) +#define C_RDMAIO_TABLE6_0 (C_RDMA_IO_SIDN_PAGE + 0x010u) +#define C_RDMAIO_TABLE6_1 (C_RDMA_IO_SIDN_PAGE + 0x014u) +#define C_RDMAIO_TABLE6_2 (C_RDMA_IO_SIDN_PAGE + 0x018u) +#define C_RDMAIO_TABLE6_3 (C_RDMA_IO_SIDN_PAGE + 0x01Cu) +#define C_RDMAIO_TABLE6_4 (C_RDMA_IO_SIDN_PAGE + 0x020u) +#define C_RDMAIO_TABLE6_5 (C_RDMA_IO_SIDN_PAGE + 0x024u) +#define C_RDMAIO_TABLE6_6 (C_RDMA_IO_SIDN_PAGE + 0x028u) +#define C_RDMAIO_TABLE6_7 (C_RDMA_IO_SIDN_PAGE + 0x02Cu) +#define C_RDMAIO_TABLE6_8 (C_RDMA_IO_SIDN_PAGE + 0x030u) +#define C_RDMAIO_TABLE6_9 (C_RDMA_IO_SIDN_PAGE + 0x034u) +#define C_RDMAIO_TABLE6_10 (C_RDMA_IO_SIDN_PAGE + 0x038u) +#define C_RDMAIO_TABLE6_11 (C_RDMA_IO_SIDN_PAGE + 0x03Cu) +#define C_RDMAIO_TABLE6_12 (C_RDMA_IO_SIDN_PAGE + 0x040u) +#define C_RDMAIO_TABLE6_13 (C_RDMA_IO_SIDN_PAGE + 0x044u) +#define C_RDMAIO_TABLE6_14 (C_RDMA_IO_SIDN_PAGE + 0x048u) +#define C_RDMAIO_TABLE6_15 (C_RDMA_IO_SIDN_PAGE + 0x04Cu) + +/**************************NVME OF L2D *************************/ +#define NOF_IOQ_VHCA_ID(_i) \ + (C_RDMA_NOF_L2D_PAGES + 0x000u + (_i)*0x8) /* i=nof_qid : 0...1023 */ +#define NOF_IOQ_PD_ID(_i) \ + (C_RDMA_NOF_L2D_PAGES + 0x004u + (_i)*0x8) /* i=nof_qid : 0...1023 */ + +/**************************PF HMC REGISTER *************************/ +/******************************PBLEMR***************************/ + +#define C_HMC_PBLEMR_RX1 (C_RDMA_RX_VHCA_PAGE + 0x308u) // ram num:0x35 +#define C_HMC_PBLEMR_RX2 (C_RDMA_RX_VHCA_PAGE + 0x610u) + +#define C_HMC_PBLEMR_TX1 (C_RDMA_TX_VHCA_PAGE + 0x00Cu) +#define C_HMC_PBLEMR_TX2 (C_RDMA_TX_VHCA_PAGE + 0xC18u) + +#define C_HMC_PBLEMR_RDMAIO_INDICATE (C_RDMA_IO_VHCA_PAGE + 0x040u) +#define C_HMC_PBLEMR_RDMAIO_BASE_LOW (C_RDMA_IO_VHCA_PAGE + 0x000u) +#define C_HMC_PBLEMR_RDMAIO_BASE_HIGH (C_RDMA_IO_VHCA_PAGE + 0x004u) + +/******************************PBLEQUEUE***************************/ + +#define C_HMC_PBLEQUEUE_RX1 (C_RDMA_RX_VHCA_PAGE + 0x404u) +#define C_HMC_PBLEQUEUE_RX2 (C_RDMA_RX_VHCA_PAGE + 0x688u) +#define C_HMC_PBLEQUEUE_RX3 (C_RDMA_RX_VHCA_PAGE + 0x684u) +#define C_HMC_PBLEQUEUE_RX4 (C_RDMA_RX_VHCA_PAGE + 0x5A0u) +#define C_HMC_PBLEQUEUE_RX5 (C_RDMA_RX_VHCA_PAGE + 0x59Cu) + +#define C_HMC_PBLEQUEUE_TX1 (C_RDMA_TX_VHCA_PAGE + 0x000u) +#define C_HMC_PBLEQUEUE_TX2 (C_RDMA_TX_VHCA_PAGE + 0xC00u) + +#define C_HMC_PBLEQUEUE_RDMAIO_INDICATE (C_RDMA_IO_VHCA_PAGE + 0x044u) +#define C_HMC_PBLEQUEUE_RDMAIO_BASE_LOW (C_RDMA_IO_VHCA_PAGE + 0x008u) +#define C_HMC_PBLEQUEUE_RDMAIO_BASE_HIGH (C_RDMA_IO_VHCA_PAGE + 0x00Cu) + +/******************************MRTE***************************/ +#define C_HMC_MRTE_RX1 (C_RDMA_RX_VHCA_PAGE + 0x300u) +#define C_HMC_MRTE_RX2 (C_RDMA_RX_VHCA_PAGE + 0x60Cu) + +#define C_HMC_MRTE_TX1 (C_RDMA_TX_VHCA_PAGE + 0x008u) +#define C_HMC_MRTE_TX2 (C_RDMA_TX_VHCA_PAGE + 0xC14u) +#define C_HMC_MRTE_TX3 (C_RDMA_TX_VHCA_PAGE + 0xC10u) + +#define C_HMC_MRTE_CQP (C_RDMA_CQP_VHCA_PF_PAGE + 0x00Cu) + +#define C_HMC_MRTE_RDMAIO_INDICATE (C_RDMA_IO_VHCA_PAGE + 0x048u) +#define C_HMC_MRTE_RDMAIO_BASE_LOW (C_RDMA_IO_VHCA_PAGE + 0x010u) +#define C_HMC_MRTE_RDMAIO_BASE_HIGH (C_RDMA_IO_VHCA_PAGE + 0x014u) + +/******************************AH***************************/ + +#define C_HMC_AH_TX (C_RDMA_TX_VHCA_PAGE + 0xC08u) + +#define C_HMC_AH_CQP (C_RDMA_CQP_VHCA_PF_PAGE + 0x010u) + +#define C_HMC_AH_RDMAIO_INDICATE (C_RDMA_IO_VHCA_PAGE + 0x04Cu) +#define C_HMC_AH_RDMAIO_BASE_LOW (C_RDMA_IO_VHCA_PAGE + 0x018u) +#define C_HMC_AH_RDMAIO_BASE_HIGH (C_RDMA_IO_VHCA_PAGE + 0x01Cu) + +/******************************IRD***************************/ + +#define C_HMC_IRD_RX1 (C_RDMA_RX_VHCA_PAGE + 0x604u) +#define C_HMC_IRD_RX2 (C_RDMA_RX_VHCA_PAGE + 0x608u) +#define C_HMC_IRD_RX3 (C_RDMA_RX_VHCA_PAGE + 0x614u) + +#define C_HMC_IRD_RDMAIO_INDICATE (C_RDMA_IO_VHCA_PAGE + 0x050u) +#define C_HMC_IRD_RDMAIO_BASE_LOW (C_RDMA_IO_VHCA_PAGE + 0x020u) +#define C_HMC_IRD_RDMAIO_BASE_HIGH (C_RDMA_IO_VHCA_PAGE + 0x024u) + +/******************************TX_WINDOW***************************/ +#define C_HMC_TX_WINDOW_TX (C_RDMA_TX_VHCA_PAGE + 0x80Cu) +// #define TXWINDOW_DDR_SIZE 0x62065e0100 +#define C_HMC_TX_WINDOW_RDMAIO_INDICATE (C_RDMA_IO_VHCA_PAGE + 0x054u) +#define C_HMC_TX_WINDOW_RDMAIO_BASE_LOW (C_RDMA_IO_VHCA_PAGE + 0x028u) +#define C_HMC_TX_WINDOW_RDMAIO_BASE_HIGH (C_RDMA_IO_VHCA_PAGE + 0x02Cu) + +/******************************QPC***************************/ +#define C_HMC_QPC_RX_BASE_LOW (C_RDMA_RX_VHCA_PAGE + 0x280u) +#define C_HMC_QPC_RX_BASE_HIGH (C_RDMA_RX_VHCA_PAGE + 0x284u) +#define C_HMC_QPC_RX (C_RDMA_RX_VHCA_PAGE + 0x288u) + +#define C_HMC_QPC_TX_BASE_LOW (C_RDMA_TX_VHCA_PAGE + 0x41Cu) +#define C_HMC_QPC_TX_BASE_HIGH (C_RDMA_TX_VHCA_PAGE + 0x438u) + +#define C_HMC_QPC_TX (C_RDMA_TX_VHCA_PAGE + 0x414u) + +/******************************SRQC***************************/ +#define C_HMC_SRQC_RX_BASE_LOW (C_RDMA_RX_VHCA_PAGE + 0x480u) +#define C_HMC_SRQC_RX_BASE_HIGH (C_RDMA_RX_VHCA_PAGE + 0x484u) +#define C_HMC_SRQC_RX (C_RDMA_RX_VHCA_PAGE + 0x488u) + +/******************************CQC***************************/ +#define C_HMC_CQC_RX_BASE_LOW (C_RDMA_RX_VHCA_PAGE + 0x584u) +#define C_HMC_CQC_RX_BASE_HIGH (C_RDMA_RX_VHCA_PAGE + 0x588u) +#define C_HMC_CQC_RX1 (C_RDMA_RX_VHCA_PAGE + 0x58Cu) +#define C_HMC_CQC_RX2 (C_RDMA_RX_VHCA_PAGE + 0x590u) + +/*******************************************MR Related Registers*************************************/ +#define C_TX_MRTE_INDEX_CFG (C_RDMA_TX_VHCA_PAGE + 0xC24u) + +#define C_RDMA_VF_HMC_CQP_CQ_DISTRIBUTE_DONE(_i) \ + (0x6204800040u + (0x1000 * (_i))) + +/*******************************************DATA CAP Related *************************************/ +#define C_HMC_DATA_CAP_IOVA_BASE (0x7C0000000u) +#define C_HMC_DATA_CAP_IOVA_LEN (0x40000000u) +/**************** End of ZTE RDMA Registers ***************/ +#define C_QPC_IRD_SIZE 16 + +/********************************CQP PUBLIC********************)****/ +#define C_RDMACQP_PUBLIC_BASE (0x6204400000u) +#define C_RDMACQP_RDRAM_READ_FLAG (C_RDMACQP_PUBLIC_BASE + 0x34u) +#define C_RDMACQP_RDRAM_ADDR (C_RDMACQP_PUBLIC_BASE + 0x38u) +#define C_RDMACQP_RDRAM_READ_LENGTH (C_RDMACQP_PUBLIC_BASE + 0x3Cu) +#define C_RDMACQP_RDRAM_NUM (C_RDMACQP_PUBLIC_BASE + 0x40u) +#define C_RDMACQP_RDRAM_DATA_WIDTH (C_RDMACQP_PUBLIC_BASE + 0x48u) +#define C_RDMACQP_RDRAM_TIME_LIMIT (C_RDMACQP_PUBLIC_BASE + 0x4Cu) + +#define C_RDMACQP_RDRAM_RD_FINISH (C_RDMACQP_PUBLIC_BASE + 0x220u) +#define C_RDMACQP_RDRAM_RD_ERROR (C_RDMACQP_PUBLIC_BASE + 0x224u) +#define C_RDMACQP_RDRAM_RD_CNT_ERR (C_RDMACQP_PUBLIC_BASE + 0x23Cu) + +#define C_RDMACQP_VCHA_BASE (0x6204800000u) +#define C_RDMACQP_RDRAM_RD_MAINTENANCE_RAM(_i) \ + (C_RDMACQP_VCHA_BASE + 0x44 + (_i)*0x1000) + +enum icrdma_device_caps_const { + //ICRDMA_MAX_WQ_FRAGMENT_COUNT = 30, + //ICRDMA_MAX_SGE_RD = 30, + ICRDMA_MAX_STATS_COUNT = 128, + + ICRDMA_MAX_IRD_SIZE = 16, + ICRDMA_MAX_ORD_SIZE = 16, + +}; + +void zxdh_init_hw(struct zxdh_sc_dev *dev); +void zxdh_init_config_check(struct zxdh_config_check *cc, u8 traffic_class, + u16 qs_handle); +bool zxdh_is_config_ok(struct zxdh_config_check *cc, struct zxdh_sc_vsi *vsi); +void zxdh_check_fc_for_tc_update(struct zxdh_sc_vsi *vsi, + struct zxdh_l2params *l2params); +void zxdh_check_fc_for_qp(struct zxdh_sc_vsi *vsi, struct zxdh_sc_qp *sc_qp); +#endif /* ICRDMA_HW_H*/ diff --git a/drivers/infiniband/hw/zrdma/iidc.h b/drivers/infiniband/hw/zrdma/iidc.h new file mode 100644 index 000000000000..23553f024bd8 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/iidc.h @@ -0,0 +1,309 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef _IIDC_H_ +#define _IIDC_H_ + +#include +#include +#include +#include +#include +#include "zxdh_auxiliary_bus.h" +//#include "../../../..//zxdh_kernel/include/linux/dinghai/auxiliary_bus.h" + +/* This major and minor version represent IDC API version information. + * + * The concept of passing an API version should be incorporated into the + * auxiliary drivers' probe handlers to check if they can communicate with the + * core PCI driver. During auxiliary driver probe, auxiliary driver should + * check major and minor version information (via iidc_core_dev_info:ver). If + * the version check fails, the auxiliary driver should fail the probe and log + * an appropriate message. + */ +#define IIDC_MAJOR_VER 10 +#define IIDC_MINOR_VER 1 + +#define ZXDH_NET_MAJOR_IDX GENMASK_ULL(7, 0) +#define ZXDH_NET_MINOR_IDX GENMASK_ULL(15, 8) +#define ZXDH_RDMA_MINOR_IDX GENMASK_ULL(23, 16) + +enum iidc_event_type { + IIDC_EVENT_BEFORE_MTU_CHANGE, + IIDC_EVENT_AFTER_MTU_CHANGE, + IIDC_EVENT_BEFORE_TC_CHANGE, + IIDC_EVENT_AFTER_TC_CHANGE, + IIDC_EVENT_VF_RESET, + IIDC_EVENT_LINK_CHNG, + IIDC_EVENT_CRIT_ERR, + IIDC_EVENT_NBITS /* must be last */ +}; + +enum iidc_reset_type { + IIDC_PFR, + IIDC_CORER, + IIDC_GLOBR, +}; + +enum iidc_rdma_protocol { + IIDC_RDMA_PROTOCOL_IWARP = BIT(0), + IIDC_RDMA_PROTOCOL_ROCEV2 = BIT(1), +}; + +enum iidc_rdma_gen { + IIDC_RDMA_GEN_1, + IIDC_RDMA_GEN_2, + IIDC_RDMA_GEN_3, + IIDC_RDMA_GEN_4, +}; + +struct iidc_rdma_caps { + u8 gen; /* Hardware generation */ + u8 protocols; /* bitmap of supported protocols */ +}; +/* This information is needed to handle auxiliary driver probe */ +struct iidc_ver_info { + u16 major; + u16 minor; + u64 support; //0~7:net_major,8~15:net_minor,16~23:rdma_minor,24~63:rscv +}; + +/* Struct to hold per DCB APP info */ +struct iidc_dcb_app_info { + u8 priority; + u8 selector; + u16 prot_id; +}; + +struct iidc_core_dev_info; + +#define IIDC_MAX_USER_PRIORITY 8 +#define IIDC_MAX_APPS 64 +#define IIDC_MAX_DSCP_MAPPING 64 +#define IIDC_VLAN_PFC_MODE 0x0 +#define IIDC_DSCP_PFC_MODE 0x1 + +/* Struct to hold per RDMA Qset info */ +struct iidc_rdma_qset_params { + u32 teid; /* qset TEID */ + u16 qs_handle; /* RDMA driver provides this */ + u16 vport_id; /* VSI index */ + u8 tc; /* TC branch the QSet should belong to */ +}; + +struct iidc_qos_info { + u64 tc_ctx; + u8 rel_bw; + u8 prio_type; + u8 egress_virt_up; + u8 ingress_virt_up; +}; + +/* Struct to hold QoS info */ +struct iidc_qos_params { + // struct iidc_qos_info tc_info[IEEE_8021QAZ_MAX_TCS]; + // u8 up2tc[IIDC_MAX_USER_PRIORITY]; + // u8 vport_relative_bw; + // u8 vport_priority_type; + // u32 num_apps; + // u8 pfc_mode; + // struct iidc_dcb_app_info apps[IIDC_MAX_APPS]; + // u8 dscp_map[IIDC_MAX_DSCP_MAPPING]; + u8 num_tc; +}; + +union iidc_event_info { + /* IIDC_EVENT_AFTER_TC_CHANGE */ + struct iidc_qos_params port_qos; + /* IIDC_EVENT_LINK_CHNG */ + bool link_up; + /* IIDC_EVENT_VF_RESET */ + u32 vf_id; + /* IIDC_EVENT_CRIT_ERR */ + u32 reg; +}; + +struct iidc_event { + DECLARE_BITMAP(type, IIDC_EVENT_NBITS); + union iidc_event_info info; +}; + +/* RDMA queue vector map info */ +struct iidc_qv_info { + u32 v_idx; + u16 ceq_idx; + u16 aeq_idx; + u8 itr_idx; +}; + +struct iidc_qvlist_info { + u32 num_vectors; + struct iidc_qv_info qv_info[]; +}; + +struct iidc_vf_port_info { + u16 vf_id; + u16 vport_id; + u16 port_vlan_id; + u16 port_vlan_tpid; +}; + +/* Following APIs are implemented by core PCI driver */ +struct iidc_core_ops { + /* APIs to allocate resources such as VEB, VSI, Doorbell queues, + * completion queues, Tx/Rx queues, etc... + */ + int (*request_reset)(struct iidc_core_dev_info *cdev_info, + enum iidc_reset_type reset_type); +}; + +#define IIDC_RDMA_ROCE_NAME "roce" +#define IIDC_RDMA_IWARP_NAME "iwarp" +#define IIDC_RDMA_ID 0x00000010 +#define IIDC_IEPS_NAME "ieps" +#define IIDC_IEPS_ID 0x00000015 +#define IIDC_MAX_NUM_AUX 5 + +/* The const struct that instantiates cdev_info_id needs to be initialized + * in the .c with the macro ASSIGN_IIDC_INFO. + * For example: + * static const struct cdev_info_id cdev_info_ids[] = ASSIGN_IIDC_INFO; + */ +struct cdev_info_id { + char *name; + int id; +}; + +#define IIDC_RDMA_INFO { .name = IIDC_RDMA_ROCE_NAME, .id = IIDC_RDMA_ID }, +#define IIDC_IEPS_INFO { .name = IIDC_IEPS_NAME, .id = IIDC_IEPS_ID }, + +#define ASSIGN_IIDC_INFO \ + { \ + IIDC_IEPS_INFO \ + IIDC_RDMA_INFO \ + } + +enum iidc_function_type { + IIDC_FUNCTION_TYPE_PF, + IIDC_FUNCTION_TYPE_VF, +}; + +/* Structure representing auxiliary driver tailored information about the core + * PCI dev, each auxiliary driver using the IIDC interface will have an + * instance of this struct dedicated to it. + */ +struct iidc_core_dev_info { + struct pci_dev *pdev; /* PCI device of corresponding to main function */ + struct zxdh_auxiliary_device *adev; + /* KVA / Linear address corresponding to BAR0 of underlying + * pci_device. + */ + u8 __iomem *hw_addr; + int cdev_info_id; + struct iidc_ver_info ver; + + /* Opaque pointer for aux driver specific data tracking. This memory + * will be alloc'd and freed by the auxiliary driver and used for + * private data accessible only to the specific auxiliary driver. + * It is stored here so that when this struct is passed to the + * auxiliary driver via an IIDC call, the data can be accessed + * at that time. + */ + void *auxiliary_priv; + + enum iidc_function_type ftype; + u16 vport_id; + u16 slot_id; + /* Current active RDMA protocol */ + enum iidc_rdma_protocol rdma_protocol; + + struct iidc_qos_params qos_info; + struct net_device *netdev; + + struct msix_entry *msix_entries; + u16 msix_count; /* How many vectors are reserved for this device */ + // struct iidc_rdma_caps rdma_caps; + /* Following struct contains function pointers to be initialized + * by core PCI driver and called by auxiliary driver + */ + const struct iidc_core_ops *ops; + u8 pf_id; +}; + +struct iidc_auxiliary_dev { + struct zxdh_auxiliary_device adev; + struct iidc_core_dev_info *cdev_info; +}; + +struct zxdh_core_dev_info { + struct pci_dev *pdev; /* PCI device of corresponding to main function */ + struct zxdh_auxiliary_device *adev; + /* KVA / Linear address corresponding to BAR0 of underlying + * pci_device. + */ + u8 __iomem *hw_addr; + int cdev_info_id; + struct iidc_ver_info ver; + + /* Opaque pointer for aux driver specific data tracking. This memory + * will be alloc'd and freed by the auxiliary driver and used for + * private data accessible only to the specific auxiliary driver. + * It is stored here so that when this struct is passed to the + * auxiliary driver via an IIDC call, the data can be accessed + * at that time. + */ + void *auxiliary_priv; + + enum iidc_function_type ftype; + u16 vport_id; + u16 slot_id; + /* Current active RDMA protocol */ + enum iidc_rdma_protocol rdma_protocol; + + struct iidc_qos_params qos_info; + //struct net_device *netdev; + + struct msix_entry msix_entries; + u16 msix_count; /* How many vectors are reserved for this device */ + // struct iidc_rdma_caps rdma_caps; + /* Following struct contains function pointers to be initialized + * by core PCI driver and called by auxiliary driver + */ + const struct iidc_core_ops *ops; + u8 pf_id; +}; + +struct zxdh_rdma_if { + void *(*get_rdma_netdev)(void *dh_dev); +}; + +struct zxdh_auxiliary_dev { + struct zxdh_auxiliary_device adev; + struct zxdh_core_dev_info *zxdh_info; + + struct zxdh_rdma_if *rdma_ops; + void *ops; + void *parent; + int32_t aux_id; + void *auxiliary_ops[18]; //max support 20 auxiliary devices +}; + +/* structure representing the auxiliary driver. This struct is to be + * allocated and populated by the auxiliary driver's owner. The core PCI + * driver will access these ops by performing a container_of on the + * auxiliary_device->dev.driver. + */ +struct iidc_auxiliary_drv { + struct zxdh_auxiliary_driver adrv; + /* This event_handler is meant to be a blocking call. For instance, + * when a BEFORE_MTU_CHANGE event comes in, the event_handler will not + * return until the auxiliary driver is ready for the MTU change to + * happen. + */ + void (*event_handler)(struct iidc_core_dev_info *cdev_info, + struct iidc_event *event); + int (*vc_receive)(struct iidc_core_dev_info *cdev_info, u32 vf_id, + u8 *msg, u16 len); +}; + +#endif /* _IIDC_H_*/ diff --git a/drivers/infiniband/hw/zrdma/linux_kcompat.h b/drivers/infiniband/hw/zrdma/linux_kcompat.h new file mode 100644 index 000000000000..40781d379fbc --- /dev/null +++ b/drivers/infiniband/hw/zrdma/linux_kcompat.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef LINUX_KCOMPAT_H +#define LINUX_KCOMPAT_H + +#define IB_DEV_OPS_FILL_ENTRY +#define IB_DEV_CAPS_VER_2 +#define CREATE_AH_VER_5 +#define PROCESS_MAD_VER_3 +#define ZRDMA_CREATE_SRQ_VER_2 +#define ZRDMA_DESTROY_SRQ_VER_3 +#define DESTROY_AH_VER_4 +#define CREATE_QP_VER_2 +#define GLOBAL_QP_MEM +#define DESTROY_QP_VER_2 +#define kc_zxdh_destroy_qp(ibqp, udata) zxdh_destroy_qp(ibqp, udata) +#define CREATE_CQ_VER_3 +#define COPY_USER_PGADDR_VER_4 +#define ALLOC_UCONTEXT_VER_2 +#define DEALLOC_UCONTEXT_VER_2 +#define DEALLOC_PD_VER_4 +#define ALLOC_PD_VER_3 +#define ALLOC_HW_STATS_STRUCT_V2 +#define ALLOC_HW_STATS_V3 +#define QUERY_GID_ROCE_V2 +#define MODIFY_PORT_V2 +#define QUERY_PKEY_V2 +#define ROCE_PORT_IMMUTABLE_V2 +#define GET_HW_STATS_V2 +#define GET_LINK_LAYER_V2 +#define IW_PORT_IMMUTABLE_V2 +#define QUERY_GID_V2 +#define QUERY_PORT_V2 +#define GET_ETH_SPEED_AND_WIDTH_V2 +#define RDMA_MMAP_DB_SUPPORT +#define RDMA_COPY_TO_STRUCT_OR_ZERO_SUPPORT +#define ZXDH_ALLOC_MW_VER_2 +#define ZXDH_ALLOC_MR_VER_0 +#define ZXDH_DESTROY_CQ_VER_4 +#define set_max_sge(props, rf) \ + do { \ + ((props)->max_send_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + ((props)->max_recv_sge = \ + (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags); \ + } while (0) +#define kc_set_props_ip_gid_caps(props) ((props)->ip_gids = true) +#define kc_rdma_gid_attr_network_type(sgid_attr, gid_type, gid) \ + rdma_gid_attr_network_type(sgid_attr) +#define kc_deref_sgid_attr(sgid_attr) ((sgid_attr)->ndev) + +#define kc_typeq_ib_wr const + +#define kc_ib_register_device(device, name, dev) \ + ib_register_device(device, name, dev) +#define HAS_IB_SET_DEVICE_OP +#define kc_set_ibdev_add_del_gid(ibdev) +#define kc_ib_modify_qp_is_ok(cur_state, next_state, type, mask, ll) \ + ib_modify_qp_is_ok(cur_state, next_state, type, mask) +#define SET_BEST_PAGE_SZ_V2 +#define kc_rdma_udata_to_drv_context(ibpd, udata) \ + rdma_udata_to_drv_context(udata, struct zxdh_ucontext, ibucontext) +#define USE_QP_ATTRS_STANDARD +#define NETDEV_TO_IBDEV_SUPPORT +#define IB_DEALLOC_DRIVER_SUPPORT +#define kc_get_ucontext(udata) \ + rdma_udata_to_drv_context(udata, struct zxdh_ucontext, ibucontext) +#define IN_IFADDR +int zxdh_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); +void zxdh_dealloc_ucontext(struct ib_ucontext *context); +int zxdh_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); +int zxdh_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); +#define set_ibdev_dma_device(ibdev, dev) +#define ah_attr_to_dmac(attr) ((attr).roce.dmac) +#define SET_ROCE_CM_INFO_VER_3 +#define IB_UMEM_GET_V3 +#define DEREG_MR_VER_2 +#define REREG_MR_VER_2 + +#endif /* LINUX_KCOMPAT_H */ diff --git a/drivers/infiniband/hw/zrdma/main.c b/drivers/infiniband/hw/zrdma/main.c new file mode 100644 index 000000000000..7e866a89fa0e --- /dev/null +++ b/drivers/infiniband/hw/zrdma/main.c @@ -0,0 +1,1797 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include "main.h" +/* TODO: Adding this here is not ideal. Can we remove this warning now? */ +#include "icrdma_hw.h" +#include +#include +#include "zrdma_kcompat.h" +#include "slib.h" + +#define DRV_VER_MAJOR 1 +#define DRV_VER_MINOR 8 +#define DRV_VER_BUILD 46 +#define DRV_VER \ + __stringify(DRV_VER_MAJOR) "." __stringify( \ + DRV_VER_MINOR) "." __stringify(DRV_VER_BUILD) + +#define FW_MAJOR_VER 0 +#define FW_MINOR_FW_VER 0 +#define FW_MINOR_DRV_VER 0 + +#define DRV_MAJOR_VER 0 +#define DRV_NET_MINOR_VER 0 +#define DRV_RDMA_MINOR_VER 0 +#define ZXDH_FW_VER_OFFSET 0x5400 +#define MODULE_RDMA_ID 11 + +struct rdma_sriov_glb_info pf_sriov_glb_info[HOST_RDMA_MAX_PF] = { 0 }; + +static u8 resource_profile; +module_param(resource_profile, byte, 0444); +MODULE_PARM_DESC( + resource_profile, + "Resource Profile: 0=PF only(default), 1=Weighted VF, 2=Even Distribution"); + +static u8 max_rdma_vfs = 1; +module_param(max_rdma_vfs, byte, 0444); +MODULE_PARM_DESC(max_rdma_vfs, "Maximum VF count: 0-32, default=1"); + +bool zxdh_upload_context; +module_param(zxdh_upload_context, bool, 0444); +MODULE_PARM_DESC(zxdh_upload_context, "Upload QP context, default=false"); + +static unsigned int limits_sel = 3; +module_param(limits_sel, uint, 0444); +MODULE_PARM_DESC(limits_sel, "Resource limits selector, Range: 0-7, default=3"); + +static unsigned int gen1_limits_sel = 1; +module_param(gen1_limits_sel, uint, 0444); +MODULE_PARM_DESC(gen1_limits_sel, + "x722 resource limits selector, Range: 0-5, default=1"); + +static unsigned int roce_ena = 1; +module_param(roce_ena, uint, 0444); +MODULE_PARM_DESC( + roce_ena, + "RoCE enable: 1=enable RoCEv2 on all ports (not supported on x722), 0=iWARP(default)"); + +static ulong roce_port_cfg; +module_param(roce_port_cfg, ulong, 0444); +MODULE_PARM_DESC( + roce_port_cfg, + "RoCEv2 per port enable: 1=port0 RoCEv2 all others iWARP, 2=port1 RoCEv2 etc. not supported on X722"); + +static bool en_rem_endpoint_trk; +module_param(en_rem_endpoint_trk, bool, 0444); +MODULE_PARM_DESC( + en_rem_endpoint_trk, + "Remote Endpoint Tracking: 1=enabled (not supported on x722), 0=disabled(default)"); + +static u8 fragment_count_limit = 6; +module_param(fragment_count_limit, byte, 0444); +MODULE_PARM_DESC( + fragment_count_limit, + "adjust maximum values for queue depth and inline data size, default=4, Range: 2-13"); + +/******************Advanced RoCEv2 congestion knobs***********************************************/ +static bool dcqcn_enable; +module_param(dcqcn_enable, bool, 0444); +MODULE_PARM_DESC( + dcqcn_enable, + "enables DCQCN algorithm for RoCEv2 on all ports, default=false "); + +static bool dcqcn_cc_cfg_valid; +module_param(dcqcn_cc_cfg_valid, bool, 0444); +MODULE_PARM_DESC(dcqcn_cc_cfg_valid, + "set DCQCN parameters to be valid, default=false"); + +static u8 dcqcn_min_dec_factor = 1; +module_param(dcqcn_min_dec_factor, byte, 0444); +MODULE_PARM_DESC( + dcqcn_min_dec_factor, + "set minimum percentage factor by which tx rate can be changed for CNP, Range: 1-100, default=1"); + +static u8 dcqcn_min_rate_MBps; +module_param(dcqcn_min_rate_MBps, byte, 0444); +MODULE_PARM_DESC(dcqcn_min_rate_MBps, + "set minimum rate limit value, in MBits per second, default=0"); + +static u8 dcqcn_F; +module_param(dcqcn_F, byte, 0444); +MODULE_PARM_DESC( + dcqcn_F, + "set number of times to stay in each stage of bandwidth recovery, default=0"); + +static unsigned short dcqcn_T; +module_param(dcqcn_T, ushort, 0444); +MODULE_PARM_DESC( + dcqcn_T, + "set number of usecs that should elapse before increasing the CWND in DCQCN mode, default=0"); + +static unsigned int dcqcn_B; +module_param(dcqcn_B, uint, 0444); +MODULE_PARM_DESC( + dcqcn_B, + "set number of MSS to add to the congestion window in additive increase mode, default=0"); + +static unsigned short dcqcn_rai_factor; +module_param(dcqcn_rai_factor, ushort, 0444); +MODULE_PARM_DESC( + dcqcn_rai_factor, + "set number of MSS to add to the congestion window in additive increase mode, default=0"); + +static unsigned short dcqcn_hai_factor; +module_param(dcqcn_hai_factor, ushort, 0444); +MODULE_PARM_DESC( + dcqcn_hai_factor, + "set number of MSS to add to the congestion window in hyperactive increase mode, default=0"); + +static unsigned int dcqcn_rreduce_mperiod; +module_param(dcqcn_rreduce_mperiod, uint, 0444); +MODULE_PARM_DESC( + dcqcn_rreduce_mperiod, + "set minimum time between 2 consecutive rate reductions for a single flow, default=0"); + +static u8 display_drv_side_fw_ver; +module_param(display_drv_side_fw_ver, byte, 0444); +MODULE_PARM_DESC(display_drv_side_fw_ver, "display fw ver, display=1, not display=0"); + +static u8 display_drv_side_net_ver; +module_param(display_drv_side_net_ver, byte, 0444); +MODULE_PARM_DESC(display_drv_side_net_ver, "display drv ver, display=1, not display=0"); + +static void zxdh_destory_eth_info_hlist(struct zxdh_device *iwdev); + +/****************************************************************************************************************/ + +MODULE_ALIAS("zrdma"); +MODULE_AUTHOR("ZTE Corporation"); +MODULE_DESCRIPTION("ZTE(R) Ethernet Protocol Driver for RDMA"); +MODULE_LICENSE("Dual BSD/GPL"); +#ifdef RDMA_VERSION +#define STRINGIFY(x) #x +#define TOSTRING(x) STRINGIFY(x) +MODULE_VERSION(TOSTRING(RDMA_VERSION)); +#else +MODULE_VERSION(DRV_VER); +#endif +int zxdh_vf_update_np_tbl(struct zxdh_pci_f *rf); + +int zxdh_vf_update_np_tbl(struct zxdh_pci_f *rf) +{ + u32 cnt = 0, val = 0, status = 0; + struct iidc_core_dev_info *cdev_info = rf->cdev; + struct zxdh_sc_dev *dev = &rf->sc_dev; + + writel(0, (u32 __iomem *)(dev->hw->hw_addr + + C_RDMA_CQP_CQ_DISTRIBUTE_DONE)); + + zxdh_sc_send_mailbox_cmd(dev, ZTE_ZXDH_OP_REQ_NP_MAC_DEL, + cdev_info->vport_id, + ether_addr_to_u64(rf->iwdev->mac_addr), + 0, rf->vf_id); + + do { + val = readl(dev->hw->hw_addr + + C_RDMA_CQP_CQ_DISTRIBUTE_DONE); + if (cnt++ > ZXDH_MAILBOX_CYC_NUM * dev->hw_attrs.max_done_count) { + pr_info("vhca_id:%d waiting completed NP_MAC_DEL mailbox too long time,timeout!\n", dev->vhca_id); + status = -ETIMEDOUT; + break; + } + if (dev->hw_attrs.self_health == true) { + status = -ETIMEDOUT; + break; + } + udelay(ZXDH_MAILBOX_SLEEP_TIME); + } while (!val); + + if (rf->iwdev->netdev->dev_addr == NULL) { + pr_err("[%s] dev_addr is null!\n", __func__); + status = -EINVAL; + return status; + } + + writel(0, (u32 __iomem *)(dev->hw->hw_addr + + C_RDMA_CQP_CQ_DISTRIBUTE_DONE)); + + zxdh_sc_send_mailbox_cmd(dev, ZTE_ZXDH_OP_REQ_NP_MAC_ADD, + cdev_info->vport_id, + ether_addr_to_u64(rf->iwdev->netdev->dev_addr), + 0, rf->vf_id); + + do { + val = readl(dev->hw->hw_addr + + C_RDMA_CQP_CQ_DISTRIBUTE_DONE); + if (cnt++ > ZXDH_MAILBOX_CYC_NUM * dev->hw_attrs.max_done_count) { + pr_info("vhca_id:%d waiting completed NP_MAC_ADD mailbox too long time,timeout!\n", dev->vhca_id); + status = -ETIMEDOUT; + break; + } + if (dev->hw_attrs.self_health == true) { + status = -ETIMEDOUT; + break; + } + udelay(ZXDH_MAILBOX_SLEEP_TIME); + } while (!val); + + return status; +} + +void zxdh_update_dpp_mac_tbl(struct zxdh_device *iwdev, struct iidc_core_dev_info *cdev_info) +{ + DPP_PF_INFO_T pf_info = { 0 }; + struct zxdh_pci_f *rf = iwdev->rf; + + pf_info.vport = cdev_info->vport_id; + pf_info.slot = cdev_info->slot_id; + if (iwdev->rf->sc_dev.np_mode_low_lat) { + if (iwdev->netdev->dev_addr == NULL) { + pr_err("[%s] dev_addr is null!\n", __func__); + return; + } + + if (ether_addr_cmp(iwdev->mac_addr, iwdev->netdev->dev_addr) == 0) { + pr_warn("%s[%d]: mac_addr is equal to dev_addr(%02x:%02x:%02x:%02x:%02x:%02x)\n", __func__, __LINE__, + iwdev->mac_addr[0], iwdev->mac_addr[1], iwdev->mac_addr[2], + iwdev->mac_addr[3], iwdev->mac_addr[4], iwdev->mac_addr[5]); + } + + if (!iwdev->rf->ftype) { + pr_info("%s[%d]: dpp del/add rdma trans item\n", __func__, __LINE__); + dpp_del_rdma_trans_item(&pf_info, iwdev->mac_addr); + dpp_add_rdma_trans_item(&pf_info, + iwdev->netdev->dev_addr, + iwdev->rf->sc_dev.vhca_id); + } else { + zxdh_vf_update_np_tbl(rf); + } + ether_addr_copy(iwdev->mac_addr, iwdev->netdev->dev_addr); + } +} + +static enum ib_port_state get_port_state(struct net_device *netdev) +{ + if (netif_carrier_ok(netdev) && netif_running(netdev)) + return IB_PORT_ACTIVE; + + return IB_PORT_DOWN; +} + +static int zxdh_netdevice_event(struct notifier_block *not_blk, + unsigned long event, void *arg) +{ + struct ib_device *ibdev; + struct zxdh_device *iwdev; + enum ib_port_state state; + struct net_device *netdev = netdev_notifier_info_to_dev(arg); + + ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_ZXDH); + if (!ibdev) + return NOTIFY_DONE; + iwdev = to_iwdev(ibdev); + + switch (event) { + case NETDEV_CHANGE: + case NETDEV_UP: + case NETDEV_DOWN: + if (!refcount_read(&iwdev->trace_switch.t_switch)) + break; + state = get_port_state(netdev); + if (state == IB_PORT_ACTIVE) + ibdev_notice(ibdev, "IB port up\n"); + else + ibdev_notice(ibdev, "IB port down\n"); + break; + case NETDEV_CHANGEMTU: + pr_info("%s changed mtu to %d\n", netdev->name, netdev->mtu); + break; + case NETDEV_CHANGEADDR: + pr_info("%s[%d]: process NETDEV_CHANGEADDR event, update dpp mac tbl\n", __func__, __LINE__); + zxdh_update_dpp_mac_tbl(iwdev, iwdev->rf->cdev); + break; + default: + pr_info("%s[%d]: ignoring netdev event=%ld for %s\n", __func__, __LINE__, event, netdev->name); + break; + } + + ib_device_put(ibdev); + return NOTIFY_DONE; +} + +static struct notifier_block zxdh_netdevice_notifier = { + .notifier_call = zxdh_netdevice_event +}; + +static void zxdh_register_notifiers(void) +{ + register_netdevice_notifier(&zxdh_netdevice_notifier); +} + +static void zxdh_unregister_notifiers(void) +{ + unregister_netdevice_notifier(&zxdh_netdevice_notifier); +} + +extern struct zxdh_rdma_hb_if hwbond_ops; +/** + * set_protocol_used - set protocol_used against HW generation and roce_ena flag + * @rf: RDMA PCI function + * @roce_ena: RoCE enabled bit flag + */ +static inline void set_protocol_used(struct zxdh_pci_f *rf, uint roce_ena) +{ + switch (rf->rdma_ver) { + case ZXDH_GEN_2: + rf->protocol_used = + roce_ena & BIT(PCI_FUNC(rf->pcidev->devfn)) ? + ZXDH_ROCE_PROTOCOL_ONLY : + ZXDH_IWARP_PROTOCOL_ONLY; + + break; + case ZXDH_GEN_1: + rf->protocol_used = ZXDH_IWARP_PROTOCOL_ONLY; + break; + } +} + +/** + * zxdh_set_rf_user_cfg_params - Setup RF configurations from module parameters + * @rf: RDMA PCI function + */ +void zxdh_set_rf_user_cfg_params(struct zxdh_pci_f *rf) +{ + /*TODO: Fixup range checks on all integer module params */ + if (limits_sel > 7) + limits_sel = 7; + + if (gen1_limits_sel > 5) + gen1_limits_sel = 5; + + rf->limits_sel = (rf->rdma_ver == ZXDH_GEN_1) ? gen1_limits_sel : + limits_sel; + if (roce_ena) + pr_warn_once( + "zrdma: Because roce_ena is ENABLED, roce_port_cfg will be ignored."); + set_protocol_used(rf, roce_ena ? 0xFFFFFFFF : roce_port_cfg); + rf->rsrc_profile = + (resource_profile < ZXDH_HMC_PROFILE_EQUAL) ? + (u8)resource_profile + ZXDH_HMC_PROFILE_DEFAULT : + ZXDH_HMC_PROFILE_DEFAULT; + if (max_rdma_vfs > ZXDH_MAX_PE_ENA_VF_COUNT) { + pr_warn_once( + "zrdma: Requested VF count [%d] is above max supported. Setting to %d.", + max_rdma_vfs, ZXDH_MAX_PE_ENA_VF_COUNT); + max_rdma_vfs = ZXDH_MAX_PE_ENA_VF_COUNT; + } + //rf->max_rdma_vfs = (rf->rsrc_profile != ZXDH_HMC_PROFILE_DEFAULT)? + //max_rdma_vfs : 0; + rf->en_rem_endpoint_trk = en_rem_endpoint_trk; + rf->fragcnt_limit = fragment_count_limit; + if (rf->fragcnt_limit > 13 || rf->fragcnt_limit < 2) { + rf->fragcnt_limit = 6; + pr_warn_once( + "zrdma: Requested [%d] fragment count limit out of range (2-13), setting to default=6.", + fragment_count_limit); + } + rf->dcqcn_ena = dcqcn_enable; + + /* Skip over all checking if no dcqcn */ + if (!dcqcn_enable) + return; + + rf->dcqcn_params.cc_cfg_valid = dcqcn_cc_cfg_valid; + rf->dcqcn_params.dcqcn_b = dcqcn_B; + +#define DCQCN_B_MAX GENMASK(25, 0) + if (rf->dcqcn_params.dcqcn_b > DCQCN_B_MAX) { + rf->dcqcn_params.dcqcn_b = DCQCN_B_MAX; + pr_warn_once( + "zrdma: Requested [%d] dcqcn_b value too high, setting to %d.", + dcqcn_B, rf->dcqcn_params.dcqcn_b); + } + +#define DCQCN_F_MAX 8 + rf->dcqcn_params.dcqcn_f = dcqcn_F; + if (dcqcn_F > DCQCN_F_MAX) { + rf->dcqcn_params.dcqcn_f = DCQCN_F_MAX; + pr_warn_once( + "zrdma: Requested [%d] dcqcn_f value too high, setting to %d.", + dcqcn_F, DCQCN_F_MAX); + } + + rf->dcqcn_params.dcqcn_t = dcqcn_T; + rf->dcqcn_params.hai_factor = dcqcn_hai_factor; + rf->dcqcn_params.min_dec_factor = dcqcn_min_dec_factor; + if (dcqcn_min_dec_factor < 1 || dcqcn_min_dec_factor > 100) { + rf->dcqcn_params.dcqcn_b = 1; + pr_warn_once( + "zrdma: Requested [%d] dcqcn_min_dec_factor out of range (1-100) , setting to default=1", + dcqcn_min_dec_factor); + } + + rf->dcqcn_params.min_rate = dcqcn_min_rate_MBps; + rf->dcqcn_params.rai_factor = dcqcn_rai_factor; + rf->dcqcn_params.rreduce_mperiod = dcqcn_rreduce_mperiod; +} + +static void zxdh_iidc_event_handler(struct iidc_core_dev_info *cdev_info, + struct iidc_event *event) +{ +} + +/** + * zxdh_request_reset - Request a reset + * @rf: RDMA PCI function + */ +static void zxdh_request_reset(struct zxdh_pci_f *rf) +{ + struct iidc_core_dev_info *cdev_info = rf->cdev; + if (rf->sc_dev.hw_attrs.self_health == false) + dev_warn(idev_to_dev(&rf->sc_dev), "Requesting a reset\n"); + rf->sc_dev.vchnl_up = false; + cdev_info->ops->request_reset(rf->cdev, IIDC_PFR); +} + +/** + * zxdh_dev_ibevent - indicate dev event + * @iwdev: zrdma device + */ +static void zxdh_dev_ibevent(struct zxdh_device *iwdev) +{ + struct ib_event event; + + event.device = &iwdev->ibdev; + event.element.port_num = 1; + event.event = IB_EVENT_DEVICE_FATAL; + ib_dispatch_event(&event); +} + +static int rdma_get_rp_link_status(struct pci_dev *pdev) +{ + struct pci_dev *rp_dev = NULL; + int pcie_cap = 0; + u16 data = 0; + rp_dev = pcie_find_root_port(pdev); + if (!rp_dev) { + pr_err("rdma can not find RP\n"); + return -ENODEV; + } + pcie_cap = pci_find_capability(rp_dev, PCI_CAP_ID_EXP); + if (!pcie_cap) { + pr_err("rdma can not find PCI Express CAP\n"); + return -ENXIO; + } + pci_read_config_word(rp_dev, pcie_cap + PCI_EXP_LNKSTA, &data); + return (data & PCI_EXP_LNKSTA_DLLLA) ? ZXDH_PCIE_LINK_UP : ZXDH_PCIE_LINK_DOWN; +} + +static int rdma_get_upstream_port_link_status(struct pci_dev *pdev) +{ + struct pci_dev *up_stream_dev = NULL; + int pcie_cap = 0; + u16 data = 0; + + up_stream_dev = pci_upstream_bridge(pdev); + if (!up_stream_dev) { + pr_err("rdma can not find RP\n"); + return -ENODEV; + } + pcie_cap = pci_find_capability(up_stream_dev, PCI_CAP_ID_EXP); + if (!pcie_cap) { + pr_err("rdma can not find PCI Express CAP\n"); + return -ENXIO; + } + pci_read_config_word(up_stream_dev, pcie_cap + PCI_EXP_LNKSTA, &data); + return (data & PCI_EXP_LNKSTA_DLLLA) ? ZXDH_PCIE_LINK_UP : ZXDH_PCIE_LINK_DOWN; +} + +static int zxdh_rdma_check_remove_state(struct pci_dev *pdev) +{ + if (!rdma_get_rp_link_status(pdev)) + return ZXDH_PCIE_LINK_DOWN; + + return rdma_get_upstream_port_link_status(pdev); +} + +static int zxdh_rdma_hotplug_event(struct zxdh_pci_f *rf) +{ + struct zxdh_ceq *iwceq; + u32 i; + + if (!rf) { + return -ENODEV; + } + + rf->sc_dev.hw_attrs.cqp_timeout_threshold = CQP_MIN_TIMEOUT_THRESHOLD; + rf->sc_dev.hw_attrs.max_done_count = ZXDH_MIN_DONE_COUNT; + rf->sc_dev.hw_attrs.self_health = true; + + if (rf->aeq.irq_sta == true) { + rf->aeq.irq_sta = false; + irq_set_affinity_hint(rf->aeq.irq, NULL); + free_irq(rf->aeq.irq, rf); + } + + for (i = 0; i < rf->ceqs_count; i++) { + iwceq = &rf->ceqlist[i]; + if (iwceq->irq_sta == true) { + iwceq->irq_sta = false; + irq_set_affinity_hint(iwceq->irq, NULL); + free_irq(iwceq->irq, iwceq); + } + } + pr_info("vhca_id:%d rdma quick remove start\n", rf->sc_dev.vhca_id); + return 0; +} +static int process_rdma_health_event(struct net_device *netdev) +{ + struct zxdh_device *iwdev; + struct zxdh_pci_f *rf; + struct zxdh_ceq *iwceq; + u32 i; + + iwdev = zxdh_device_get_by_source_netdev(netdev); + if (!iwdev) { + return -ENODEV; + } + rf = iwdev->rf; + if (!rf) { + return -ENODEV; + } + + rf->sc_dev.hw_attrs.cqp_timeout_threshold = CQP_MIN_TIMEOUT_THRESHOLD; + rf->sc_dev.hw_attrs.max_done_count = ZXDH_MIN_DONE_COUNT; + rf->sc_dev.hw_attrs.self_health = true; + + if (rf->aeq.irq_sta == true) { + rf->aeq.irq_sta = false; + irq_set_affinity_hint(rf->aeq.irq, NULL); + free_irq(rf->aeq.irq, rf); + } + + for (i = 0; i < rf->ceqs_count; i++) { + iwceq = &rf->ceqlist[i]; + if (iwceq->irq_sta == true) { + iwceq->irq_sta = false; + irq_set_affinity_hint(iwceq->irq, NULL); + free_irq(iwceq->irq, iwceq); + } + } + zxdh_dev_ibevent(iwdev); + + pr_info("vhca_id:%d rdma_self_health\n", rf->sc_dev.vhca_id); + return 0; +} + +static struct zxdh_pci_f *zxdh_get_rf_from_pdev(struct pci_dev *pdev) +{ + struct zxdh_pci_f *rf = NULL; + int i; + + for (i = 0; i < HOST_RDMA_MAX_PF; i++) { + if (pf_sriov_glb_info[i].rdma_pf_enable && pf_sriov_glb_info[i].pdev == pdev) { + rf = pf_sriov_glb_info[i].rf; + break; + } else { + continue; + } + } + + return rf; +} + +static void update_vf_pblem_info(struct zxdh_sc_dev *dev, u16 num_vfs, u64 vf_pblem_cnt) +{ + struct zxdh_vfdev *vf_dev = NULL; + struct zxdh_hmc_obj_info *hmc_obj; + u16 vf_id; + + dev->hmc_pf_manager_info.vf_pblemr_cnt = vf_pblem_cnt; + pr_info("%s %d Update vf pblem cnt to 0x%llx\n", __func__, __LINE__, vf_pblem_cnt); + + for (vf_id = 0; vf_id < num_vfs; vf_id++) { + vf_dev = zxdh_find_vf_dev(dev, vf_id); + if (vf_dev) { + hmc_obj = vf_dev->hmc_info.hmc_obj; + hmc_obj[ZXDH_HMC_IW_PBLE_MR].max_cnt = + dev->hmc_pf_manager_info.vf_pblemr_cnt; + hmc_obj[ZXDH_HMC_IW_PBLE_MR].cnt = dev->hmc_pf_manager_info.vf_pblemr_cnt; + hmc_obj[ZXDH_HMC_IW_PBLE_MR].size = dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE_MR].size; + hmc_obj[ZXDH_HMC_IW_PBLE_MR].type = dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE_MR].type; + hmc_obj[ZXDH_HMC_IW_PBLE_MR].base = dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE_MR].base + + (dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE_MR].cnt + + hmc_obj[ZXDH_HMC_IW_PBLE_MR].cnt * vf_id) * + hmc_obj[ZXDH_HMC_IW_PBLE_MR].size; + zxdh_put_vfdev(dev, vf_dev); + vf_dev = NULL; + } else { + continue; + } + } + + return; +} + +static int process_rdma_sriov_event(void *data) +{ + struct zxdh_rdma_sriov_event_info *sriov_info; + struct zxdh_pci_f *rf; + u64 vf_pblem_cnt; + int ret = 0; + + if (NULL == data) + return -EINVAL; + + sriov_info = (struct zxdh_rdma_sriov_event_info *)data; + rf = zxdh_get_rf_from_pdev(sriov_info->pdev); + if (rf && rf->sc_dev.active_vfs_num == sriov_info->num_vfs) { + return 0; + } + + ret = set_rdma_vf_num(sriov_info, &vf_pblem_cnt); + if (ret) { + pr_err("%s set_rdma_vf_num failed, ret=%d\n", __func__, ret); + return ret; + } + + if (rf) { + rf->sc_dev.active_vfs_num = sriov_info->num_vfs; + update_vf_pblem_info(&rf->sc_dev, sriov_info->num_vfs, vf_pblem_cnt); + } + + return 0; +} + +/*** + * @brief zxdh_rdma event handler + * + * @param netdev zxdh_net device,Netdev is the network structure pointer corresponding to the PF or VF that needs to release resources + * @param event_type,Types of events handled + * @param data,Incoming parameters, default NULL + * @return Return value 0 is OK, error is another value + */ +static int zxdh_rdma_event_handler(struct net_device *netdev, u8 event_type, void *data) +{ + int ret = 0; + + switch (event_type) { + case ZXDH_RDMA_HEALTH_EVENT: + ret = process_rdma_health_event(netdev); + break; + case ZXDH_RDMA_SRIOV_EVENT: + ret = process_rdma_sriov_event(data); + break; + default: + return -EINVAL; + } + + return ret; +} + +static u32 zxdh_get_sq_delta(u32 head, u32 tail, u32 size) +{ + u32 delta = 0; + + if ((head > size) || (tail > size)) + return delta; + if (head > tail) { + delta = (head - tail); + } else if (head < tail) { + delta = (head + size - tail); + } + return delta; +} + +static void zxdh_self_health_wait_res_free(u32 delta) +{ + if (delta <= 10) + mdelay(5); + else if (delta <= 26) + mdelay(15); + else if (delta <= 50) + mdelay(30); + else if (delta <= 100) + mdelay(100); + else if (delta <= 150) + mdelay(150); + else + mdelay(200); +} + +void zxdh_handle_internal_error(struct zxdh_pci_f *rf) +{ + struct zxdh_qp *qp; + struct zxdh_cq *send_cq; + struct zxdh_sc_dev *dev; + __le64 *cqe; + unsigned long flags_qp; + unsigned long flags_cp; + u32 wqe_idx; + u64 hdr; + bool wait_flag = false; + struct zxdh_ring temp_sq_ring; + struct zxdh_cq_uk temp_cq; + u32 delta = 0; + + qp = rf->iwdev->qp1; + if (qp == NULL) + return; + + dev = &rf->sc_dev; + if (dev == NULL) + return; + + if (dev->hw_attrs.self_health == false) + return; + spin_lock_irqsave(&qp->lock, flags_qp); + if (ZXDH_RING_CURRENT_HEAD(qp->sc_qp.qp_uk.sq_ring) != ZXDH_RING_CURRENT_TAIL(qp->sc_qp.qp_uk.sq_ring)) { + delta = zxdh_get_sq_delta(qp->sc_qp.qp_uk.sq_ring.head, qp->sc_qp.qp_uk.sq_ring.tail, + qp->sc_qp.qp_uk.sq_ring.size); + wait_flag = true; + temp_sq_ring.head = qp->sc_qp.qp_uk.sq_ring.head; + temp_sq_ring.tail = qp->sc_qp.qp_uk.sq_ring.tail; + temp_sq_ring.size = qp->sc_qp.qp_uk.sq_ring.size; + pr_info("%s vhca_id:%d\n", __func__, dev->vhca_id); + send_cq = qp->iwscq; + temp_cq.cq_base = send_cq->sc_cq.cq_uk.cq_base; + temp_cq.cq_ring.head = send_cq->sc_cq.cq_uk.cq_ring.head; + temp_cq.cq_ring.tail = send_cq->sc_cq.cq_uk.cq_ring.tail; + temp_cq.cq_ring.size = send_cq->sc_cq.cq_uk.cq_ring.size; + temp_cq.polarity = send_cq->sc_cq.cq_uk.polarity; + spin_lock_irqsave(&send_cq->lock, flags_cp); + if (!send_cq->user_mode) + send_cq->armed = false; + if (send_cq->ibcq.comp_handler && (send_cq->sc_cq.cq_uk.valid_cq == true)) { + do { + cqe = ZXDH_GET_CURRENT_EXTENDED_CQ_ELEM(&temp_cq); + set_64bit_val(cqe, 8, qp->ctx_info.qp_compl_ctx); + set_64bit_val(cqe, 24, 0); + hdr = FIELD_PREP(IRDMACQ_QPID, qp->ibqp.qp_num); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + set_64bit_val(cqe, 16, hdr); + wqe_idx = ZXDH_RING_CURRENT_TAIL(temp_sq_ring); + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_OP_TYPE_UD_SEND) | + FIELD_PREP(ZXDH_CQ_WQEIDX, wqe_idx) | + FIELD_PREP(ZXDH_CQ_ERROR, 1) | + FIELD_PREP(ZXDH_CQ_MAJERR, ZXDH_FLUSH_MAJOR_ERR) | + FIELD_PREP(ZXDH_CQ_MINERR, FLUSH_GENERAL_ERR) | + FIELD_PREP(IRDMACQ_SOEVENT, 1) | + FIELD_PREP(ZXDH_CQ_VALID, temp_cq.polarity) | + FIELD_PREP(ZXDH_CQ_SQ, ZXDH_CQE_QTYPE_SQ) | + FIELD_PREP(ZXDH_CQ_TYPE, 0); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(cqe, 0, hdr); + pr_info("%s vhca_id:%d wqe_idx:%d sq_head:%d sq_tail:%d cq_head:%d\n", + __func__, dev->vhca_id, wqe_idx, temp_sq_ring.head, temp_sq_ring.tail, temp_cq.cq_ring.head); + ZXDH_RING_SET_TAIL( + temp_sq_ring, + wqe_idx + qp->sc_qp.qp_uk.sq_wrtrk_array[wqe_idx].quanta); + ZXDH_RING_MOVE_HEAD_NOCHECK(temp_cq.cq_ring); + if (!ZXDH_RING_CURRENT_HEAD(temp_cq.cq_ring)) + temp_cq.polarity ^= 1; + } while (temp_sq_ring.head != temp_sq_ring.tail); + send_cq->ibcq.comp_handler(&send_cq->ibcq, send_cq->ibcq.cq_context); + } + spin_unlock_irqrestore(&send_cq->lock, flags_cp); + } + spin_unlock_irqrestore(&qp->lock, flags_qp); + if (wait_flag == true) { + cancel_delayed_work_sync(&qp->dwork_flush); + zxdh_self_health_wait_res_free(delta); + } +} + +static void zxdh_store_rdma_pf_glb(struct zxdh_pci_f *rf) +{ + int i; + + for (i = 0; i < HOST_RDMA_MAX_PF; i++) { + if (pf_sriov_glb_info[i].rdma_pf_enable) { + continue; + } else { + pf_sriov_glb_info[i].pdev = rf->pcidev; + pf_sriov_glb_info[i].rf = rf; + pf_sriov_glb_info[i].rdma_pf_enable = true; + break; + } + } + + if (i >= HOST_RDMA_MAX_PF) { + pr_err("rdma_pf_num over limit:%d\n", HOST_RDMA_MAX_PF); + } + + return; +} + +static void zxdh_delete_rdma_pf_glb(struct zxdh_pci_f *rf) +{ + int i; + + for (i = 0; i < HOST_RDMA_MAX_PF; i++) { + if (pf_sriov_glb_info[i].rdma_pf_enable && pf_sriov_glb_info[i].rf == rf) { + pf_sriov_glb_info[i].pdev = NULL; + pf_sriov_glb_info[i].rf = NULL; + pf_sriov_glb_info[i].rdma_pf_enable = false; + break; + } else { + continue; + } + } + + return; +} + +static int zxdh_remove(struct zxdh_auxiliary_device *aux_dev) +{ + u32 cnt = 0, val = 0; + DPP_PF_INFO_T pf_info = { 0 }; + struct iidc_auxiliary_dev *iidc_adev = + container_of(aux_dev, struct iidc_auxiliary_dev, adev); + struct iidc_core_dev_info *cdev_info = iidc_adev->cdev_info; + struct zxdh_device *iwdev = dev_get_drvdata(&aux_dev->dev); + struct zxdh_sc_dev *dev = &iwdev->rf->sc_dev; + uint64_t cqp_status_phy_addr = 0; + uint32_t cqp_status = 0xFFFF; + int ret = 0; + + if (zxdh_rdma_check_remove_state(iwdev->rf->pcidev) == ZXDH_PCIE_LINK_DOWN) { + zxdh_rdma_hotplug_event(iwdev->rf); + } + dev->driver_load = false; + if ((iwdev->rf->ftype == 0) && (dev->hw_attrs.self_health == false)) { + pf_info.vport = cdev_info->vport_id; + pf_info.slot = cdev_info->slot_id; + pr_info("%s[%d]: dpp del rdma trans item\n", __func__, __LINE__); + dpp_del_rdma_trans_item(&pf_info, iwdev->mac_addr); + } + + if ((iwdev->rf->ftype == 1) && (dev->hw_attrs.self_health == false)) { + cqp_status_phy_addr = C_RDMA_CQP_STATUS_PHY_ADDR + iwdev->rf->sc_dev.vhca_id_pf * 0x1000; + ret = zxdh_rdma_reg_read(iwdev->rf, cqp_status_phy_addr, &cqp_status); + if (ret) { + pr_err("%s[%d]: rdma reg read failed!\n", __func__, __LINE__); + goto clean_ib_resource; + } + + pr_info("vf rdma remove: ep_id=%d, pf_id=%d, vf_id=%d, vhca_id=%d, vhca_id_pf=%d, cqp_status=%d\n", + iwdev->rf->ep_id, iwdev->rf->pf_id, iwdev->rf->vf_id, iwdev->rf->sc_dev.vhca_id, iwdev->rf->sc_dev.vhca_id_pf, cqp_status); + if (cqp_status != 1) { + pr_err("vf rdma remove: The RDMA device for EP%d PF%d corresponding to VF%d does not exist!\n", iwdev->rf->ep_id, iwdev->rf->pf_id, iwdev->rf->vf_id); + goto clean_ib_resource; + } + + if (!dev->hmc_use_dpu_ddr) { + pr_info("[%s] hmc_use_dpu_ddr: %d\n", __func__, dev->hmc_use_dpu_ddr); + zxdh_set_smmu_invalid(iwdev->rf); + } + + if (iwdev->rf->sc_dev.np_mode_low_lat) { + writel(0, + (u32 __iomem *)(dev->hw->hw_addr + + C_RDMA_CQP_CQ_DISTRIBUTE_DONE)); + + zxdh_sc_send_mailbox_cmd( + &iwdev->rf->sc_dev, + ZTE_ZXDH_OP_REQ_NP_MAC_DEL, + cdev_info->vport_id, + ether_addr_to_u64(iwdev->mac_addr), + 0, iwdev->rf->vf_id); + + do { + val = readl(dev->hw->hw_addr + + C_RDMA_CQP_CQ_DISTRIBUTE_DONE); + if (cnt++ > 200 * dev->hw_attrs.max_done_count) { + pr_info("vhca_id:%d waiting completed NP_MAC_DEL mailbox too long time,timeout!\n", dev->vhca_id); + break; + } + if (dev->hw_attrs.self_health == true) { + break; + } + udelay(dev->hw_attrs.max_sleep_count); + } while (!val); + } + + writel(0, (u32 __iomem *)(dev->hw->hw_addr + + C_RDMA_CQP_CQ_DISTRIBUTE_DONE)); + + zxdh_sc_send_mailbox_cmd( + &iwdev->rf->sc_dev, ZTE_ZXDH_OP_DEL_HMC_OBJ_RANGE, + cdev_info->vport_id, + ether_addr_to_u64(iwdev->mac_addr), 0, + iwdev->rf->vf_id); // VF给PF发mailbox消息,PF释放VF HMC资源 + + do { + val = readl(dev->hw->hw_addr + + C_RDMA_CQP_CQ_DISTRIBUTE_DONE); + if (cnt++ > 200 * dev->hw_attrs.max_done_count) { + pr_info("vhca_id:%d waiting completed DEL_HMC mailbox too long time,timeout!\n", dev->vhca_id); + break; + } + if (dev->hw_attrs.self_health == true) { + break; + } + udelay(dev->hw_attrs.max_sleep_count); + } while (!val); + } + + zxdh_handle_internal_error(iwdev->rf); +clean_ib_resource: + zrdma_cleanup_rdma_tools_cfg(iwdev->rf); + zrdma_cleanup_debugfs_entry(iwdev->rf); + zxdh_ib_unregister_device(iwdev); + zxdh_destory_eth_info_hlist(iwdev); + + if (!iwdev->rf->ftype) { + zxdh_delete_rdma_pf_glb(iwdev->rf); + } + +#ifndef IB_DEALLOC_DRIVER_SUPPORT + /* In newer kernels core issues callback zxdh_ib_dealloc_device to cleanup on ib unregister + * Older kernels require cleanup here + */ + + zxdh_rt_deinit_hw(iwdev); + zxdh_ctrl_deinit_hw(iwdev->rf); + zxdh_del_handler(iwdev->hdl); +#ifdef MSIX_DEBUG + pci_free_irq_vectors(cdev_info->pdev); +#endif + if (iwdev->rf->iw_msixtbl) { + kfree(iwdev->rf->iw_msixtbl); + iwdev->rf->iw_msixtbl = NULL; + } + kfree(iwdev->hdl); + kfree(iwdev->rf); + + ib_dealloc_device(&iwdev->ibdev); + +#endif /* IB_DEALLOC_DRIVER_SUPPORT */ + pr_info("INIT: Gen2 PF[%d] device remove success\n", + PCI_FUNC(cdev_info->pdev->devfn)); + return 0; +} + +/** + * zxdh_shutdown - trigger when reboot + * @aux_dev: auxiliary device ptr + */ +static void zxdh_shutdown(struct zxdh_auxiliary_device *aux_dev) +{ + zxdh_remove(aux_dev); +} + +#ifdef MSIX_DEBUG + +static int ft_debug_msix_interrupt(struct pci_dev *pdev, + struct msix_entry *msix, u32 msix_num) +{ + struct msix_entry *temp_msix; + int ret; + int i; + + temp_msix = msix; + if (pci_enable_device(pdev)) { + pr_info("%s enable pcie msix failed!\n", __func__); + return -1; + } + ret = pci_alloc_irq_vectors_affinity(pdev, msix_num, msix_num, + PCI_IRQ_MSIX, NULL); + if (ret < 0) { + pr_info("%s alloc irq vectors failed!\n", __func__); + return -1; + } + pr_info("%s alloc irq vectors ret:%d\n", __func__, ret); + + for (i = 0; i < msix_num; i++) { + temp_msix->vector = pci_irq_vector(pdev, i); + temp_msix->entry = i; + pr_info("%s vector:%d entry:%d\n", __func__, temp_msix->vector, + temp_msix->entry); + temp_msix++; + } + + return 0; +} +#endif +static void zxdh_cfg_dpp(struct zxdh_device *iwdev, + struct iidc_core_dev_info *cdev_info) +{ + DPP_PF_INFO_T pf_info = { 0 }; + u32 ret = 0; + + pf_info.vport = cdev_info->vport_id; + pf_info.slot = cdev_info->slot_id; + + if (!iwdev->rf->sc_dev.np_mode_low_lat) { + dpp_vport_vhca_id_add(&pf_info, iwdev->rf->sc_dev.vhca_id); + ret = dpp_vport_attr_set(&pf_info, EGR_FLAG_VHCA, + iwdev->rf->sc_dev.vhca_id); + if (ret != 0) { + pr_err("%s[%d]: dpp vport attr set EGR_FLAG_VHCA fail! ret=%u!\n", __func__, __LINE__, ret); + return; + } + ret = dpp_vport_attr_set(&pf_info, EGR_FLAG_RDMA_OFFLOAD_EN_OFF, + EGR_RDMA_OFFLOAD_EN); + if (ret != 0) { + pr_err("%s[%d]: dpp vport attr set EGR_FLAG_RDMA_OFFLOAD_EN_OFF fail! ret=%u!\n", __func__, __LINE__, ret); + return; + } + } else { + if (iwdev->netdev->dev_addr == NULL) { + pr_err("[%s] netdev dev_addr is null!\n", __func__); + return; + } + + pr_info("%s[%d]: dpp add rdma trans item\n", __func__, __LINE__); + dpp_add_rdma_trans_item(&pf_info, iwdev->netdev->dev_addr, + iwdev->rf->sc_dev.vhca_id); + ether_addr_copy(iwdev->mac_addr, iwdev->netdev->dev_addr); + } +} + +static void zxdh_init_eth_info_hlist(struct zxdh_device *iwdev) +{ + int i = 0; + + iwdev->eth_info_hlist = (struct hlist_head *)kmalloc(sizeof(struct hlist_head) * ETH_INFO_HASH_COUNT, GFP_ATOMIC); + if (!iwdev->eth_info_hlist) { + pr_err("%s[%d]: kmalloc return NULL!", __func__, __LINE__); + return; + } + for (i = 0; i < ETH_INFO_HASH_COUNT; i++) // hash数组动态初始化 + INIT_HLIST_HEAD(&iwdev->eth_info_hlist[i]); + mutex_init(&iwdev->eth_info_list_mtx_lock); +} + +static void zxdh_destory_eth_info_hlist(struct zxdh_device *iwdev) +{ + struct zxdh_eth_info *hnode = NULL; + struct hlist_node *hlist = NULL; + int i = 0; + int valid_hlist_num = 0; + + for (i = 0; i < ETH_INFO_HASH_COUNT; i++) { + // 遍历每一个槽,有结点就删除 + hlist_for_each_entry_safe(hnode, hlist, &iwdev->eth_info_hlist[i], list) { + valid_hlist_num++; + hlist_del(&hnode->list); + kfree(hnode); // kmalloc in zxdh_eth_info_hlist_add + } + } + kfree(iwdev->eth_info_hlist); + iwdev->eth_info_hlist = NULL; + + pr_info("%s[%d]: valid_hlist_num=%d\n", __func__, __LINE__, valid_hlist_num); +} + +void zxdh_eth_info_hlist_display(struct zxdh_device *iwdev) +{ + struct zxdh_eth_info *hnode = NULL; + struct hlist_node *hlist = NULL; + int i = 0; + int valid_hlist_num = 0; + int valid_node_num = 0; + int ip_cfg_ref_num = 0; + + for (i = 0; i < ETH_INFO_HASH_COUNT; i++) { + if (!hlist_empty(&iwdev->eth_info_hlist[i])) { + valid_hlist_num++; + } + + hlist_for_each_entry_safe(hnode, hlist, &iwdev->eth_info_hlist[i], list) { + valid_node_num++; + ip_cfg_ref_num += hnode->ip_cfg_ref_cnt; + pr_info("%s[%d]: hlist key=%d, src_ip=0x%x-0x%x-0x%x-0x%x, dst_ip=0x%x-0x%x-0x%x-0x%x, name=%s, ip_cfg_ref_cnt=%d\n", __func__, __LINE__, i, + hnode->rdma_to_eth_ip_para.src_ip[0], hnode->rdma_to_eth_ip_para.src_ip[1], hnode->rdma_to_eth_ip_para.src_ip[2], hnode->rdma_to_eth_ip_para.src_ip[3], + hnode->rdma_to_eth_ip_para.dst_ip[0], hnode->rdma_to_eth_ip_para.dst_ip[1], hnode->rdma_to_eth_ip_para.dst_ip[2], hnode->rdma_to_eth_ip_para.dst_ip[3], hnode->rdma_to_eth_ip_para.ifname, hnode->ip_cfg_ref_cnt); + } + } + pr_info("%s[%d]: valid_hlist_num=%d, valid_node_num=%d, ip_cfg_ref_num=%d\n", __func__, __LINE__, valid_hlist_num, valid_node_num, ip_cfg_ref_num); +} + +static int zxdh_eth_info_cmp(struct zxdh_rdma_to_eth_ip_para *ip_para, struct zxdh_eth_info *info2) +{ + if (ip_para->src_ip[0] == info2->rdma_to_eth_ip_para.src_ip[0] && ip_para->src_ip[1] == info2->rdma_to_eth_ip_para.src_ip[1] && ip_para->src_ip[2] == info2->rdma_to_eth_ip_para.src_ip[2] && ip_para->src_ip[3] == info2->rdma_to_eth_ip_para.src_ip[3] && + ip_para->dst_ip[0] == info2->rdma_to_eth_ip_para.dst_ip[0] && ip_para->dst_ip[1] == info2->rdma_to_eth_ip_para.dst_ip[1] && ip_para->dst_ip[2] == info2->rdma_to_eth_ip_para.dst_ip[2] && ip_para->dst_ip[3] == info2->rdma_to_eth_ip_para.dst_ip[3] && + memcmp(ip_para->ifname, info2->rdma_to_eth_ip_para.ifname, strlen(ip_para->ifname)) == 0) { + return 0; + } + + return 1; +} + +static u32 src_dst_ipv4_hash(const struct zxdh_rdma_to_eth_ip_para *ip_para) +{ + u32 hash = jhash(&ip_para->src_ip[3], sizeof(ip_para->src_ip[3]), 0); + u32 key = jhash(&ip_para->dst_ip[3], sizeof(ip_para->dst_ip[3]), hash); + + return key % ETH_INFO_HASH_COUNT; +} + +static u32 src_dst_ipv6_hash(const struct zxdh_rdma_to_eth_ip_para *ip_para) +{ + u32 hash = jhash(ip_para->src_ip, sizeof(ip_para->src_ip), 0); + u32 key = jhash(ip_para->dst_ip, sizeof(ip_para->dst_ip), hash); + + return key % ETH_INFO_HASH_COUNT; +} + +int zxdh_eth_info_hlist_add(struct zxdh_device *iwdev, struct zxdh_rdma_to_eth_ip_para *ip_para) +{ + struct zxdh_eth_info *hnode = NULL; + struct hlist_node *hlist = NULL; + char s_straddr[INET6_ADDRSTRLEN + 20]; + char d_straddr[INET6_ADDRSTRLEN + 20]; + u32 key = 0; + + if (ip_para->ipv4 == true) { + key = src_dst_ipv4_hash(ip_para); + sprintf(s_straddr, "%pI4", &ip_para->src_ip[3]); + sprintf(d_straddr, "%pI4", &ip_para->dst_ip[3]); + } else { + key = src_dst_ipv6_hash(ip_para); + sprintf(s_straddr, "%pI6", ip_para->src_ip); + sprintf(d_straddr, "%pI6", ip_para->dst_ip); + } + + hlist_for_each_entry_safe(hnode, hlist, &iwdev->eth_info_hlist[key], list) { + if (zxdh_eth_info_cmp(ip_para, hnode) == 0) { + hnode->ip_cfg_ref_cnt += 1; + pr_debug("%s[%d]: hlist add ref success, key=%u, ipv4=%d, name=%s, src_ip=0x%x-0x%x-0x%x-0x%x, dst_ip=0x%x-0x%x-0x%x-0x%x, src_mac=0x%llx, dst_mac=0x%llx, ip_cfg_ref_cnt=%d\n", + __func__, __LINE__, key, ip_para->ipv4, ip_para->ifname, ip_para->src_ip[0], ip_para->src_ip[1], ip_para->src_ip[2], ip_para->src_ip[3], + ip_para->dst_ip[0], ip_para->dst_ip[1], ip_para->dst_ip[2], ip_para->dst_ip[3], ip_para->src_mac, ip_para->dst_mac, hnode->ip_cfg_ref_cnt); + goto finish; + } + } + + // 分配结点 + hnode = (struct zxdh_eth_info *)kmalloc(sizeof(struct zxdh_eth_info), GFP_ATOMIC); // kfree in zxdh_eth_info_hlist_delete/zxdh_destory_eth_info_hlist + if (!hnode) { + pr_err("%s[%d]: kmalloc fail, name=%s, src_ip=0x%x-0x%x-0x%x-0x%x, dst_ip=0x%x-0x%x-0x%x-0x%x\n", __func__, __LINE__, ip_para->ifname, + ip_para->src_ip[0], ip_para->src_ip[1], ip_para->src_ip[2], ip_para->src_ip[3], + ip_para->dst_ip[0], ip_para->dst_ip[1], ip_para->dst_ip[2], ip_para->dst_ip[3]); + return -1; + } + + INIT_HLIST_NODE(&hnode->list); + memcpy(&hnode->rdma_to_eth_ip_para, ip_para, sizeof(struct zxdh_rdma_to_eth_ip_para)); + hnode->netdev = iwdev->netdev; + hnode->ip_cfg_ref_cnt = 1; + hlist_add_head(&hnode->list, &iwdev->eth_info_hlist[key]); // 添加到链表首部 + + pr_debug("%s[%d]: hlist add node success, key=%u, ipv4=%d, name=%s, s_straddr=%s, d_straddr=%s, src_ip=0x%x-0x%x-0x%x-0x%x, dst_ip=0x%x-0x%x-0x%x-0x%x, src_mac=0x%llx, dst_mac=0x%llx, ip_cfg_ref_cnt=%d\n", + __func__, __LINE__, key, ip_para->ipv4, ip_para->ifname, s_straddr, d_straddr, ip_para->src_ip[0], ip_para->src_ip[1], ip_para->src_ip[2], ip_para->src_ip[3], + ip_para->dst_ip[0], ip_para->dst_ip[1], ip_para->dst_ip[2], ip_para->dst_ip[3], ip_para->src_mac, ip_para->dst_mac, hnode->ip_cfg_ref_cnt); + + rdma_update_remote_ip(ip_para); + zxdh_eth_info_hlist_display(iwdev); + +finish: + return 0; +} + +int zxdh_eth_info_hlist_delete(struct zxdh_device *iwdev, struct zxdh_rdma_to_eth_ip_para *ip_para) +{ + struct zxdh_eth_info *hnode = NULL; + struct hlist_node *hlist = NULL; + char s_straddr[INET6_ADDRSTRLEN + 20]; + char d_straddr[INET6_ADDRSTRLEN + 20]; + u32 key; + + if (ip_para->ipv4 == true) { + key = src_dst_ipv4_hash(ip_para); + sprintf(s_straddr, "%pI4", &ip_para->src_ip[3]); + sprintf(d_straddr, "%pI4", &ip_para->dst_ip[3]); + } else { + key = src_dst_ipv6_hash(ip_para); + sprintf(s_straddr, "%pI6", ip_para->src_ip); + sprintf(d_straddr, "%pI6", ip_para->dst_ip); + } + + if (hlist_empty(&iwdev->eth_info_hlist[key])) { + pr_err("%s[%d]: hlist key(%d) not exit, ipv4=%d, name=%s, src_ip=0x%x-0x%x-0x%x-0x%x, dst_ip=0x%x-0x%x-0x%x-0x%x, src_mac=0x%llx, dst_mac=0x%llx\n", + __func__, __LINE__, key, ip_para->ipv4, ip_para->ifname, ip_para->src_ip[0], ip_para->src_ip[1], ip_para->src_ip[2], ip_para->src_ip[3], + ip_para->dst_ip[0], ip_para->dst_ip[1], ip_para->dst_ip[2], ip_para->dst_ip[3], ip_para->src_mac, ip_para->dst_mac); + return -1; + } else { + // 遍历对应的槽,匹配值就删除 + hlist_for_each_entry_safe(hnode, hlist, &iwdev->eth_info_hlist[key], list) { + if (zxdh_eth_info_cmp(ip_para, hnode) == 0) { + hnode->ip_cfg_ref_cnt -= 1; + if (hnode->ip_cfg_ref_cnt == 0) { + pr_debug("%s[%d]: hlist delete node success, key=%u, ipv4=%d, name=%s, s_straddr=%s, d_straddr=%s, src_ip=0x%x-0x%x-0x%x-0x%x, dst_ip=0x%x-0x%x-0x%x-0x%x, src_mac=0x%llx, dst_mac=0x%llx, ip_cfg_ref_cnt=%d\n", + __func__, __LINE__, key, ip_para->ipv4, ip_para->ifname, s_straddr, d_straddr, ip_para->src_ip[0], ip_para->src_ip[1], ip_para->src_ip[2], ip_para->src_ip[3], + ip_para->dst_ip[0], ip_para->dst_ip[1], ip_para->dst_ip[2], ip_para->dst_ip[3], ip_para->src_mac, ip_para->dst_mac, hnode->ip_cfg_ref_cnt); + hlist_del(&hnode->list); + kfree(hnode); // kmalloc in zxdh_eth_info_hlist_add + rdma_update_remote_ip(ip_para); + zxdh_eth_info_hlist_display(iwdev); + } else { + pr_debug("%s[%d]: hlist delete ref success, key=%u, ipv4=%d, name=%s, src_ip=0x%x-0x%x-0x%x-0x%x, dst_ip=0x%x-0x%x-0x%x-0x%x, src_mac=0x%llx, dst_mac=0x%llx, ip_cfg_ref_cnt=%d\n", + __func__, __LINE__, key, ip_para->ipv4, hnode->netdev->name, ip_para->src_ip[0], ip_para->src_ip[1], ip_para->src_ip[2], ip_para->src_ip[3], + ip_para->dst_ip[0], ip_para->dst_ip[1], ip_para->dst_ip[2], ip_para->dst_ip[3], ip_para->src_mac, ip_para->dst_mac, hnode->ip_cfg_ref_cnt); + } + return 0; + } + } + } + + pr_err("%s[%d]: delete data fail, key=%u, ipv4=%d, name=%s, src_ip=0x%x-0x%x-0x%x-0x%x, dst_ip=0x%x-0x%x-0x%x-0x%x, src_mac=0x%llx, dst_mac=0x%llx\n", + __func__, __LINE__, key, ip_para->ipv4, ip_para->ifname, ip_para->src_ip[0], ip_para->src_ip[1], ip_para->src_ip[2], ip_para->src_ip[3], + ip_para->dst_ip[0], ip_para->dst_ip[1], ip_para->dst_ip[2], ip_para->dst_ip[3], ip_para->src_mac, ip_para->dst_mac); + return -1; +} + +static void zxdh_fill_device_info(struct zxdh_device *iwdev, + struct iidc_core_dev_info *cdev_info) +{ + struct zxdh_pci_f *rf = iwdev->rf; + + rf->ftype = (cdev_info->vport_id >> 11) & 0x1; + rf->pf_id = (cdev_info->vport_id >> 8) & 0x7; + rf->sc_dev.ep_id = (cdev_info->vport_id >> 12) & 0x7; + rf->ep_id = rf->sc_dev.ep_id; + rf->sc_dev.driver_load = true; + rf->sc_dev.last_time = 0; + + rf->cdev = cdev_info; + rf->pcidev = cdev_info->pdev; + rf->hw.pci_hw_addr = cdev_info->hw_addr; + + rf->msix_count = cdev_info->msix_count; +#ifdef MSIX_DEBUG + ft_debug_msix_interrupt(cdev_info->pdev, cdev_info->msix_entries, + rf->msix_count); +#endif + rf->msix_entries = cdev_info->msix_entries; + rf->sc_dev.max_ceqs = (rf->msix_count - 1); + rf->protocol_used = cdev_info->rdma_protocol == + IIDC_RDMA_PROTOCOL_ROCEV2 ? + ZXDH_ROCE_PROTOCOL_ONLY : + ZXDH_IWARP_PROTOCOL_ONLY; + rf->rdma_ver = ZXDH_GEN_2; + rf->rsrc_profile = ZXDH_HMC_PROFILE_DEFAULT; + rf->rst_to = ZXDH_RST_TIMEOUT_HZ; + rf->gen_ops.request_reset = zxdh_request_reset; + rf->check_fc = zxdh_check_fc_for_qp; + rf->qp_index = 0; + + /* Can override limits_sel, protocol_used */ + zxdh_set_rf_user_cfg_params(rf); + rf->iwdev = iwdev; + + INIT_LIST_HEAD(&iwdev->ah_list); + mutex_init(&iwdev->ah_list_lock); + iwdev->netdev = cdev_info->netdev; + iwdev->source_netdev = cdev_info->netdev; + iwdev->init_state = INITIAL_STATE; + iwdev->roce_cwnd = ZXDH_ROCE_CWND_DEFAULT; + iwdev->roce_ackcreds = ZXDH_ROCE_ACKCREDS_DEFAULT; + iwdev->rcv_wnd = ZXDH_CM_DEFAULT_RCV_WND_SCALED; + iwdev->rcv_wscale = ZXDH_CM_DEFAULT_RCV_WND_SCALE; + iwdev->qp1 = NULL; +#if IS_ENABLED(CONFIG_CONFIGFS_FS) + iwdev->iwarp_ecn_en = true; + iwdev->iwarp_rtomin = 5; + iwdev->up_up_map = ZXDH_DEFAULT_UP_UP_MAP; +#endif + if (rf->protocol_used == ZXDH_ROCE_PROTOCOL_ONLY) { +#if IS_ENABLED(CONFIG_CONFIGFS_FS) + iwdev->roce_rtomin = 5; +#endif + //iwdev->roce_dcqcn_en = iwdev->rf->dcqcn_ena; + iwdev->roce_dcqcn_en = true; //dcqcn/ecn is set to default on + iwdev->roce_mode = true; + } + + zxdh_init_eth_info_hlist(iwdev); +} + +/*zxdh_auxiliary_dev中的netdev字段上移,此处重新赋�?*/ +static void zxdh_to_iidc(struct iidc_core_dev_info *cdev_info, + struct zxdh_auxiliary_dev *iidc_adev) +{ + cdev_info->pdev = iidc_adev->zxdh_info->pdev; + cdev_info->adev = iidc_adev->zxdh_info->adev; + cdev_info->hw_addr = iidc_adev->zxdh_info->hw_addr; + cdev_info->cdev_info_id = iidc_adev->zxdh_info->cdev_info_id; + cdev_info->ver = iidc_adev->zxdh_info->ver; + cdev_info->auxiliary_priv = iidc_adev->zxdh_info->auxiliary_priv; + cdev_info->vport_id = iidc_adev->zxdh_info->vport_id; + cdev_info->slot_id = iidc_adev->zxdh_info->slot_id; + cdev_info->rdma_protocol = iidc_adev->zxdh_info->rdma_protocol; + cdev_info->qos_info = iidc_adev->zxdh_info->qos_info; + cdev_info->msix_entries = &iidc_adev->zxdh_info->msix_entries; + cdev_info->msix_count = iidc_adev->zxdh_info->msix_count; + cdev_info->ops = iidc_adev->zxdh_info->ops; + cdev_info->netdev = + iidc_adev->rdma_ops->get_rdma_netdev(iidc_adev->parent); +} + +static int zxdh_fw_ver_check(struct iidc_core_dev_info *cdev_info) +{ + struct zxdh_fw_compat *ver = NULL; + u64 addr_offset = 0; + u8 fw_minor_fw_ver = FW_MINOR_FW_VER; + u8 fw_minor_drv_ver = FW_MINOR_DRV_VER; + + addr_offset = ZXDH_FW_VER_OFFSET + (MODULE_RDMA_ID - 2) * sizeof(struct zxdh_fw_compat); + ver = (struct zxdh_fw_compat *)((void __iomem *)cdev_info->hw_addr + addr_offset); + if ((MODULE_RDMA_ID - 1) == ver->module_id) { + addr_offset = ZXDH_FW_VER_OFFSET + (MODULE_RDMA_ID - 3) * sizeof(struct zxdh_fw_compat); + ver = (struct zxdh_fw_compat *)((void __iomem *)cdev_info->hw_addr + addr_offset); + if ((MODULE_RDMA_ID - 2) == ver->module_id) { + addr_offset = ZXDH_FW_VER_OFFSET + (MODULE_RDMA_ID - 1) * sizeof(struct zxdh_fw_compat); + ver = (struct zxdh_fw_compat *)((void __iomem *)cdev_info->hw_addr + addr_offset); + if (MODULE_RDMA_ID == ver->module_id) { + if (ver->major != FW_MAJOR_VER) { + pr_err("fw major rdma side ver:%u-%u-%u is not match fw side ver:%u-%u-%u\n", + FW_MAJOR_VER, FW_MINOR_FW_VER, FW_MINOR_DRV_VER, ver->major, ver->fw_minor, ver->drv_minor); + return -EINVAL; + } + + if (fw_minor_fw_ver > ver->fw_minor) { + pr_err("fw minor rdma side ver:%u-%u-%u is higher than fw side ver:%u-%u-%u\n", + FW_MAJOR_VER, FW_MINOR_FW_VER, FW_MINOR_DRV_VER, ver->major, ver->fw_minor, ver->drv_minor); + return -EINVAL; + } + + if (fw_minor_drv_ver < ver->drv_minor) { + pr_err("fw rdma minor rdma side ver:%u-%u-%u is lower than fw side ver:%u-%u-%u\n", + FW_MAJOR_VER, FW_MINOR_FW_VER, FW_MINOR_DRV_VER, ver->major, ver->fw_minor, ver->drv_minor); + return -EINVAL; + } + pr_info("[%s] fw ver match success!\n", __func__); + } + } + } + return 0; +} + +static int zxdh_drv_ver_check(struct iidc_core_dev_info *cdev_info) +{ + u8 net_major = 0; + u8 net_minor = 0; + u8 rdma_minor = 0; + u8 drv_net_minor_ver = DRV_NET_MINOR_VER; + u8 drv_rdma_minor_ver = DRV_RDMA_MINOR_VER; + + net_major = (u8)FIELD_GET(ZXDH_NET_MAJOR_IDX, cdev_info->ver.support); + net_minor = (u8)FIELD_GET(ZXDH_NET_MINOR_IDX, cdev_info->ver.support); + rdma_minor = (u8)FIELD_GET(ZXDH_RDMA_MINOR_IDX, cdev_info->ver.support); + + if (net_major != DRV_MAJOR_VER) { + pr_err("drv major rdma side ver:%u-%u-%u is not match net side ver:%u-%u-%u\n", + DRV_MAJOR_VER, DRV_NET_MINOR_VER, DRV_RDMA_MINOR_VER, net_major, net_minor, rdma_minor); + return -EINVAL; + } + + if (drv_net_minor_ver > net_minor) { + pr_err("drv net minor rdma side ver:%u-%u-%u is higher than net side ver:%u-%u-%u\n", + DRV_MAJOR_VER, DRV_NET_MINOR_VER, DRV_RDMA_MINOR_VER, net_major, net_minor, rdma_minor); + return -EINVAL; + } + + if (drv_rdma_minor_ver < rdma_minor) { + pr_err("drv rdma minor rdma side ver:%u-%u-%u is lower than net side ver:%u-%u-%u\n", + DRV_MAJOR_VER, DRV_NET_MINOR_VER, DRV_RDMA_MINOR_VER, net_major, net_minor, rdma_minor); + return -EINVAL; + } + return 0; +} + +static int zxdh_compat_ver_check(struct iidc_core_dev_info *cdev_info) +{ + if (zxdh_fw_ver_check(cdev_info)) + return -EINVAL; + + if (zxdh_drv_ver_check(cdev_info)) + return -EINVAL; + return 0; +} + +static void zxdh_rdma_init_sriov(struct zxdh_pci_f *rf) +{ + struct zxdh_rdma_sriov_event_info sriov_info; + u64 vf_pblem_cnt; + int active_vf_num = pci_num_vf(rf->pcidev); + struct iidc_core_dev_info *cdev_info = rf->cdev; + int ret = 0; + + if (active_vf_num <= 0) { + return; + } + + sriov_info.pdev = cdev_info->pdev; + sriov_info.bar0_virt_addr = (u64)(uintptr_t)cdev_info->hw_addr; + sriov_info.vport_id = cdev_info->vport_id; + sriov_info.num_vfs = active_vf_num; + + if (set_rdma_vf_num(&sriov_info, &vf_pblem_cnt)) { + pr_err("%s set_rdma_vf_num failed, ret=%d\n", __func__, ret); + return; + } + + rf->sc_dev.active_vfs_num = active_vf_num; + pr_info("%s active_vf_num:%d vf_pblem_cnt:0x%llx\n", __func__, active_vf_num, vf_pblem_cnt); + + return; +} + +static int zxdh_clear_l2d(struct zxdh_sc_dev *dev, u64 l2d_pa, u64 size) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status; + + if (!dev) + return -ENOMEM; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + dev->nof_clear_dpu_mem.size = size; + dev->nof_clear_dpu_mem.va = + dma_alloc_coherent(dev->hw->device, dev->nof_clear_dpu_mem.size, + &dev->nof_clear_dpu_mem.pa, GFP_KERNEL); + if (!dev->nof_clear_dpu_mem.va) + return -ENOMEM; + zte_memset_s(dev->nof_clear_dpu_mem.va, 0, dev->nof_clear_dpu_mem.size); + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = ZXDH_OP_DMA_WRITE; + cqp_info->in.u.dma_writeread.cqp = dev->cqp; + cqp_info->in.u.dma_writeread.src_dest.src = dev->nof_clear_dpu_mem.pa; + cqp_info->in.u.dma_writeread.src_dest.len = dev->nof_clear_dpu_mem.size; + cqp_info->in.u.dma_writeread.src_dest.dest = l2d_pa; + + cqp_info->in.u.dma_writeread.src_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.src_path_index.obj_id = ZXDH_DMA_OBJ_ID; + cqp_info->in.u.dma_writeread.src_path_index.path_select = ZXDH_INDICATE_HOST_NOSMMU; + cqp_info->in.u.dma_writeread.src_path_index.inter_select = ZXDH_INTERFACE_NOTCACHE; + + cqp_info->in.u.dma_writeread.dest_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.dest_path_index.obj_id = ZXDH_DMA_OBJ_ID; + cqp_info->in.u.dma_writeread.dest_path_index.path_select = ZXDH_INDICATE_DPU_DDR; + cqp_info->in.u.dma_writeread.dest_path_index.inter_select = ZXDH_INTERFACE_NOTCACHE; + pr_info("%s[%d]: clear l2d pa=0x%llx size=0x%x\n", __func__, __LINE__, l2d_pa, dev->nof_clear_dpu_mem.size); + cqp_info->in.u.dma_writeread.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + + return status; +} + +static int zxdh_get_srq_l2d_info(struct zxdh_pci_f *rf, struct dh_get_srq_l2d_addr_resp *srq_l2d_info) +{ + int ret = 0; + u32 function_id = 0; + u8 rep_valid = 0; + u16 rep_len = 0; + u8 *rep_ptr; + struct zxdh_mgr mgr = { 0 }; + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + struct dh_get_srq_l2d_addr_req get_cmd = { 0 }; + struct iidc_core_dev_info *cdev_info; + size_t recv_len; + void *recv_buffer; + struct dh_get_srq_l2d_addr_resp *get_resp; + u32 cnt = 0; + u32 cnt_num = ZXDH_BAR_MSG_RETRY_NUM; + + if (!rf) { + pr_err("%s[%d]: rf is null\n", __func__, __LINE__); + return -EINVAL; + } + if (rf->sc_dev.driver_load == false) + cnt_num = ZXDH_BAR_MSG_DEFAULT_NUM; + + function_id = DH_FUNC_ID_GEN(rf->ftype, rf->ep_id, 0, rf->pf_id, rf->vf_id); + + cdev_info = (struct iidc_core_dev_info *)rf->cdev; + // query pcie id + mgr.pdev = cdev_info->pdev; + ret = dh_rdma_pf_pcie_id_get(&mgr); + if (ret) { + pr_err("[%s] get pf pcie_id failed, ret=%d\n", __func__, ret); + return -EINVAL; + } + + recv_len = ZXDH_CHAN_REPS_LEN + sizeof(struct dh_get_srq_l2d_addr_resp); + recv_buffer = (void *)kzalloc(recv_len, GFP_KERNEL); + if (!recv_buffer) + return -ENOMEM; + + // commnad preparation + get_cmd.op_code = GET_SRQ_L2D_ADDR; + get_cmd.function_id = function_id; + pr_info("%s[%d]: function_id=0x%x ftype=%d ep_id=%d pf_id=%d vf_id=%d\n", __func__, __LINE__, + function_id, rf->ftype, rf->ep_id, rf->pf_id, rf->vf_id); + + // get message preparation + in.payload_addr = (void *)&get_cmd; + in.payload_len = sizeof(struct dh_get_srq_l2d_addr_req); + in.src = rf->ftype == 0 ? MSG_CHAN_END_PF : MSG_CHAN_END_VF; + in.dst = MSG_CHAN_END_RISC; + in.event_id = MODULE_RDMA; + in.virt_addr = (u64)(uintptr_t)cdev_info->hw_addr + ZXDH_BAR_CHAN_OFFSET; + in.src_pcieid = mgr.pcie_id; + + // resv buffer preparation + result.recv_buffer = recv_buffer; + result.buffer_len = recv_len; + + do { + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + if ((ret != ZXDH_BAR_ERR_TIME_OUT) && (ret != ZXDH_BAR_ERR_LOCK_FAILED)) { + break; + } + cnt++; + } while (cnt < cnt_num); + + if (ret) { + pr_err("[%s] message send failed, ret=%d cnt=%d\n", __func__, ret, cnt); + kfree(recv_buffer); + return -EPROTO; + } + + rep_ptr = (uint8_t *)recv_buffer; + rep_valid = *rep_ptr; + if (rep_valid != MSG_REP_VALID) { + pr_err("[%s] response message invalid, rep_valid=0x%x\n", __func__, rep_valid); + kfree(recv_buffer); + return -EPROTO; + } + + rep_len = *(uint16_t *)(rep_ptr + MSG_REP_LEN_OFFSET); + if (rep_len != (recv_len - ZXDH_CHAN_REPS_LEN)) { + pr_err("[%s] response length invalid, rep_len=0x%x recv_len=0x%zx\n", __func__, rep_len, recv_len); + kfree(recv_buffer); + return -EPROTO; + } + + get_resp = (struct dh_get_srq_l2d_addr_resp *)(rep_ptr + ZXDH_CHAN_REPS_LEN); + if (get_resp->status_code != BAR_MSG_STATUS_OK) { + pr_err("[%s] response status invalid, statuc_code=0x%x\n", __func__, get_resp->status_code); + kfree(recv_buffer); + return -EPROTO; + } + + zte_memcpy_s(srq_l2d_info, get_resp, sizeof(struct dh_get_srq_l2d_addr_resp)); + pr_info("%s[%d]: resp srq_l2d_paddr=0x%llx srq_l2d_size=0x%x\n", __func__, __LINE__, get_resp->srq_l2d_paddr, get_resp->srq_l2d_size); + + // *outdata = get_resp->val; + kfree(recv_buffer); + recv_buffer = NULL; + return 0; +} + +static void zxdh_set_srq_l2d_info(struct zxdh_pci_f *rf) +{ + struct dh_get_srq_l2d_addr_resp srq_l2d_info = { 0 }; + int ret = 0; + // u32 *srq_l2d_vaddr = NULL; + // u32 val = 0; + + if (!rf) { + pr_err("%s[%d] error: rf is null\n", __func__, __LINE__); + return; + } + + ret = zxdh_get_srq_l2d_info(rf, &srq_l2d_info); + if (ret) { + rf->srq_l2d_base_paddr = 0; + rf->srq_l2d_size = 0; + rf->rdma_ext_bar_offset = 0; + pr_warn("%s: get srq l2d failed, use ddr! ret=%d srq_l2d_paddr=0x%llx srq_l2d_size=0x%x rdma_ext_bar_offset=0x%x status_code=%d\n", __func__, + ret, srq_l2d_info.srq_l2d_paddr, srq_l2d_info.srq_l2d_size, srq_l2d_info.rdma_ext_bar_offset, srq_l2d_info.status_code); + } else { + rf->srq_l2d_base_paddr = srq_l2d_info.srq_l2d_paddr; + rf->srq_l2d_size = srq_l2d_info.srq_l2d_size; + rf->rdma_ext_bar_offset = srq_l2d_info.rdma_ext_bar_offset; + pr_debug("%s: get srq l2d success! srq_l2d_paddr=0x%llx srq_l2d_size=0x%x rdma_ext_bar_offset=0x%x status_code=%d\n", __func__, + srq_l2d_info.srq_l2d_paddr, srq_l2d_info.srq_l2d_size, srq_l2d_info.rdma_ext_bar_offset, srq_l2d_info.status_code); + } +} + +static int zxdh_probe(struct zxdh_auxiliary_device *aux_dev, + const struct zxdh_auxiliary_device_id *id) +{ + struct zxdh_auxiliary_dev *iidc_adev = + container_of(aux_dev, struct zxdh_auxiliary_dev, adev); + struct zxdh_device *iwdev; + struct zxdh_pci_f *rf; + int err; + struct zxdh_handler *hdl; + struct iidc_core_dev_info *cdev_info = + kzalloc(sizeof(struct iidc_core_dev_info), GFP_KERNEL); + + if (!cdev_info) + return -ENOMEM; + zxdh_to_iidc(cdev_info, iidc_adev); + + if (zxdh_compat_ver_check(cdev_info)) { + kfree(cdev_info); + return -EINVAL; + } + + if (cdev_info->ver.major != IIDC_MAJOR_VER) { + pr_err("version mismatch:\n"); + pr_err("expected major ver %d, caller specified major ver %d\n", + IIDC_MAJOR_VER, cdev_info->ver.major); + pr_err("expected minor ver %d, caller specified minor ver %d\n", + IIDC_MINOR_VER, cdev_info->ver.minor); + kfree(cdev_info); + return -EINVAL; + } + if (cdev_info->ver.minor != IIDC_MINOR_VER) + pr_info("probe: minor version mismatch: expected %0d.%0d caller specified %0d.%0d\n", + IIDC_MAJOR_VER, IIDC_MINOR_VER, cdev_info->ver.major, + cdev_info->ver.minor); + + iwdev = ib_alloc_device(zxdh_device, ibdev); + if (!iwdev) { + kfree(cdev_info); + return -ENOMEM; + } + iwdev->rf = kzalloc(sizeof(*rf), GFP_KERNEL); + if (!iwdev->rf) { + ib_dealloc_device(&iwdev->ibdev); + kfree(cdev_info); + return -ENOMEM; + } + zxdh_fill_device_info(iwdev, cdev_info); + zxdh_req_cmd_ver(iwdev->rf); + + zxdh_set_srq_l2d_info(iwdev->rf); + + if (!iwdev->rf->ftype) { + zxdh_rdma_init_sriov(iwdev->rf); + } + + err = zxdh_manager_init(iwdev->rf, cdev_info); + if (err != 0) { + pr_warn("zxdh_manager_init failed!\n"); + goto err_mgr_init; + } + + if (!iwdev->rf->ftype) { + zxdh_cfg_dpp(iwdev, cdev_info); + } + + rf = iwdev->rf; + + hdl = kzalloc(sizeof(*hdl), GFP_KERNEL); + if (!hdl) { + kfree(iwdev->rf); + ib_dealloc_device(&iwdev->ibdev); + return -ENOMEM; + } + + hdl->iwdev = iwdev; + iwdev->hdl = hdl; + + err = zxdh_ctrl_init_hw(rf); + if (err) + goto err_ctrl_init; + + err = zxdh_rt_init_hw(iwdev); + if (err) + goto err_rt_init; + + if (rf->srq_l2d_base_paddr != 0 && rf->srq_l2d_size != 0) { + zxdh_clear_l2d(&rf->sc_dev, rf->srq_l2d_base_paddr, rf->srq_l2d_size); + } + + err = zxdh_ib_register_device(iwdev); + if (err) + goto err_ibreg; + + zxdh_add_handler(hdl); + refcount_set(&iwdev->trace_switch.t_switch, 0); + dev_set_drvdata(&aux_dev->dev, iwdev); + if (!rf->ftype) { + zxdh_store_rdma_pf_glb(rf); + } + + create_debugfs_entry(rf); + if (!rf->ftype) + zxdh_hwbond_register_rdma_ops(&hwbond_ops); + pr_info("INIT: device[%d] probe success\n", rf->sc_dev.vhca_id); + return 0; + +err_ibreg: + zxdh_rt_deinit_hw(iwdev); +err_rt_init: + zxdh_ctrl_deinit_hw(rf); +#ifdef MSIX_DEBUG + pci_free_irq_vectors(cdev_info->pdev); +#endif +err_ctrl_init: + kfree(hdl); +err_mgr_init: + kfree(iwdev->rf); + ib_dealloc_device(&iwdev->ibdev); + kfree(cdev_info); + + return err; +} + +static const struct zxdh_auxiliary_device_id zxdh_auxiliary_id_table[] = { + { + .name = ZXDH_PF_NAME "." ZXDH_RDMA_DEV_NAME, + }, + {}, +}; + +MODULE_DEVICE_TABLE(auxiliary, zxdh_auxiliary_id_table); + +static struct iidc_auxiliary_drv zxdh_auxiliary_drv = { + .adrv = { + .name = ZXDH_RDMA_DEV_NAME, + .id_table = zxdh_auxiliary_id_table, + .probe = zxdh_probe, + .remove = zxdh_remove, + .shutdown = zxdh_shutdown, + }, + .event_handler = zxdh_iidc_event_handler, +}; + +static void zxdh_show_ver(void) +{ + if (display_drv_side_fw_ver == 1) + pr_info("zrdma driver side fw version: %d.%d.%d\n", FW_MAJOR_VER, + FW_MINOR_FW_VER, FW_MINOR_DRV_VER); + if (display_drv_side_net_ver == 1) + pr_info("zrdma driver side network version: %d.%d.%d\n", DRV_MAJOR_VER, + DRV_NET_MINOR_VER, DRV_RDMA_MINOR_VER); +} + +static int __init zxdh_init_module(void) +{ + int ret; + +#ifdef RDMA_VERSION + pr_info("zrdma driver version: %s\n", TOSTRING(RDMA_VERSION)); +#else + pr_info("zrdma driver version: %d.%d.%d\n", DRV_VER_MAJOR, DRV_VER_MINOR, DRV_VER_BUILD); +#endif + zxdh_show_ver(); + + zrdma_register_debugfs(); + ret = zxdh_auxiliary_driver_register(&zxdh_auxiliary_drv.adrv); + if (ret) + return ret; + + pr_info("[%s] install hwbond callback function\n", __func__); + zxdh_hwbond_register_rdma_ops(&hwbond_ops); + zxdh_rdma_events_register(&zxdh_rdma_event_handler); + zxdh_register_notifiers(); + + return 0; +} + +static void __exit zxdh_exit_module(void) +{ + zxdh_unregister_notifiers(); + + pr_info("[%s] remove hwbond callback function\n", __func__); + hwbond_ops.cfg_rdma_hb_master = NULL; + hwbond_ops.cfg_rdma_hb_speed = NULL; + zxdh_hwbond_unregister_rdma_ops(); + zxdh_rdma_events_unregister(); + zxdh_auxiliary_driver_unregister(&zxdh_auxiliary_drv.adrv); + zrdma_unregister_debugfs(); +} + +module_init(zxdh_init_module); +module_exit(zxdh_exit_module); diff --git a/drivers/infiniband/hw/zrdma/main.h b/drivers/infiniband/hw/zrdma/main.h new file mode 100644 index 000000000000..af1eafefdd27 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/main.h @@ -0,0 +1,865 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_MAIN_H +#define ZXDH_MAIN_H +#define Z_DH_DEBUG +#define MSIX_SUPPORT + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "dpp_tbl_api.h" +#ifndef CONFIG_64BIT +#include +#endif +#include "zxdh_auxiliary_bus.h" +//#include "../../../../zxdh_kernel/include/linux/dinghai/auxiliary_bus.h" +#ifndef RDMA_MMAP_DB_SUPPORT +#include +#endif +#include +#ifdef __OFED_4_8__ +#include +#endif /* __OFED_4_8__ */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "status.h" +#include "osdep.h" +#include "defs.h" +#include "hmc.h" +#include "type.h" +#include "ws.h" +#include "protos.h" +#include "pble.h" +#include "cm.h" +#include "iidc.h" +#include "zrdma_kcompat.h" +#include "zrdma-abi.h" +#include "verbs.h" +#include "user.h" +#include "puda.h" +#include "srq.h" +#include "manager.h" +#include "dbgfs.h" +#include + +extern struct list_head zxdh_handlers; +extern spinlock_t zxdh_handler_lock; +extern bool zxdh_upload_context; + +struct zxdh_fw_compat { + u8 module_id; + u8 major; + u8 fw_minor; + u8 drv_minor; + u16 patch; + u16 rsv; +} __attribute__((packed)); + +// typedef struct dpp_pf_info { +// u16 slot; +// u16 vport; +// } DPP_PF_INFO_T; + +struct zxdh_vport_t { + u32 tpid /* : 16; */; + u32 vhca /* : 10; */; + u32 uplink_port /* : 6; */; + + u32 rss_hash_factor /* : 8; */; + u32 hash_alg /* : 4; */; + u32 panel_id /* : 4; */; + + u32 lag_id /* : 3; */; + u32 pf_vqm_vfid /* : 11; */; + u32 ingress_tm_enable /* : 2; */; + u32 egress_tm_enable /* : 1; */; + + u32 mtu /* : 16; */; + + u32 port_base_qid /* : 12; */; + u32 hash_search_index /* : 3; */; + u32 rsv1 /* : 1; */; + + u32 tm_enable /* : 1; */; + u32 ingress_meter_enable /* : 1; */; + u32 egress_meter_enable /* : 1; */; + u32 ingress_meter_mode /* : 1; */; + u32 egress_meter_mode /* : 1; */; + u32 fd_enable /* : 1; */; + u32 vepa_enable /* : 1; */; + u32 spoof_check_enable /* : 1; */; + + u32 inline_sec_offload /* : 1; */; + u32 ovs_enable /* : 1; */; + u32 lag_enable /* : 1; */; + u32 is_passthrough /* : 1; */; + u32 is_vf /* : 1; */; + u32 virtion_version /* : 2; */; + u32 virtio_enable /* : 1; */; + + u32 accelerator_offload_flag /* : 1; */; + u32 lro_offload /* : 1; */; + u32 ip_fragment_offload /* : 1; */; + u32 tcp_udp_checksum_offload /* : 1; */; + u32 ip_checksum_offload /* : 1; */; + u32 outer_ip_checksum_offload /* : 1; */; + u32 is_up /* : 1; */; + u32 allmulticast_enable /* : 1; */; + + u32 hw_bond_enable /* : 1; */; + u32 rdma_offload_enable /* : 1; */; + u32 vlan_filter_enable /* : 1; */; + u32 vlan_strip_offload /* : 1; */; + u32 qinq_vlan_strip_offload /* : 1; */; + u32 rss_enable /* : 1; */; + u32 mtu_offload_enable /* : 1; */; + u32 hit_flag /*: 1; */; +}; + +#define ZXDH_PF_NAME "dinghai10e" +#define ZXDH_RDMA_DEV_NAME "rdma_aux" + +#define EGR_FLAG_VHCA ((u32)(offsetof(struct zxdh_vport_t, vhca) / sizeof(u32))) +#define EGR_FLAG_RDMA_OFFLOAD_EN_OFF \ + ((u32)(offsetof(struct zxdh_vport_t, rdma_offload_enable) / \ + sizeof(u32))) + +#define EGR_RDMA_OFFLOAD_EN 0x1 +u32 dpp_vport_vhca_id_add(DPP_PF_INFO_T *pf_info, u32 vhca_id); +u32 dpp_vport_attr_set(DPP_PF_INFO_T *pf_info, u32 mode, u32 value); +u32 dpp_add_rdma_trans_item(DPP_PF_INFO_T *pf_info, const void *mac, u16 vhca_id); +u32 dpp_del_rdma_trans_item(DPP_PF_INFO_T *pf_info, const void *mac); +u32 dpp_glb_cfg_get_1(DPP_PF_INFO_T *pf_info, u32 *glb_cfg_data_1); +u32 dpp_pktrx_mcode_glb_cfg_write(DPP_PF_INFO_T *pf_info, u32 start_bit_no, u32 end_bit_no, u32 glb_cfg_data_1); +void zxdh_rdma_events_unregister(void); +typedef int32_t (*zxdh_rdma_event_func)(struct net_device *netdev, uint8_t event_type, void *data); +void zxdh_rdma_events_register(zxdh_rdma_event_func callback); + +enum { + ZXDH_RDMA_HEALTH_EVENT = 1, + ZXDH_RDMA_SRIOV_EVENT = 2, +}; + +#define ZXDH_RDMA_VER_LEN 60 +#define ZXDH_RDMA_QP_BUF_LEN 100 +#define ZXDH_RDMA_QP_NOT_EXIST 0 +#define ZXDH_RDMA_QP_EXIST 1 + +#define ZXDH_MAX_IRQ_COUNT 4 +#define ZXDH_CEQ_IRQ_COUNT 3 + +#define ZXDH_FW_VER_DEFAULT 2 +#define ZXDH_HW_VER 2 + +#define ZXDH_ARP_ADD 1 +#define ZXDH_ARP_DELETE 2 +#define ZXDH_ARP_RESOLVE 3 + +#define ZXDH_MACIP_ADD 1 +#define ZXDH_MACIP_DELETE 2 + +#define IW_CCQ_SIZE ZXDH_CQP_SW_SQSIZE_2048 +#define IW_CEQ_SIZE 2048 +#define IW_AEQ_SIZE 2048 + +#define RX_BUF_SIZE (1536 + 8) +#define IW_REG0_SIZE (4 * 1024) +#define IW_TX_TIMEOUT (6 * HZ) +#define IW_FIRST_QPN 1 + +#define IW_SW_CONTEXT_ALIGN 1024 + +#define MAX_DPC_ITERATIONS 128 + +#define ZXDH_EVENT_TIMEOUT_MS 5000 +#define ZXDH_VCHNL_EVENT_TIMEOUT_MS 10000 +#define ZXDH_RST_TIMEOUT_HZ 4 + +#define ZXDH_NO_QSET 0xffff + +#define IW_CFG_FPM_QP_COUNT 32768 +#define ZXDH_MAX_PAGES_PER_FMR 512 +#define ZXDH_MIN_PAGES_PER_FMR 1 +#define ZXDH_CQP_COMPL_RQ_WQE_FLUSHED 2 +#define ZXDH_CQP_COMPL_SQ_WQE_FLUSHED 3 + +#define ZXDH_Q_TYPE_PE_AEQ 0x80 +#define ZXDH_Q_INVALID_IDX 0xffff +#define ZXDH_REM_ENDPOINT_TRK_QPID 3 + +#define ZXDH_DRV_OPT_ENA_MPA_VER_0 0x00000001 +#define ZXDH_DRV_OPT_DISABLE_MPA_CRC 0x00000002 +#define ZXDH_DRV_OPT_DISABLE_FIRST_WRITE 0x00000004 +#define ZXDH_DRV_OPT_DISABLE_INTF 0x00000008 +#define ZXDH_DRV_OPT_ENA_MSI 0x00000010 +#define ZXDH_DRV_OPT_DUAL_LOGICAL_PORT 0x00000020 +#define ZXDH_DRV_OPT_NO_INLINE_DATA 0x00000080 +#define ZXDH_DRV_OPT_DISABLE_INT_MOD 0x00000100 +#define ZXDH_DRV_OPT_DISABLE_VIRT_WQ 0x00000200 +#define ZXDH_DRV_OPT_ENA_PAU 0x00000400 +#define ZXDH_DRV_OPT_MCAST_LOGPORT_MAP 0x00000800 + +#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types) +#define ZXDH_ROCE_CWND_DEFAULT 0x400 +#define ZXDH_ROCE_RTOMIN_DEFAULT 0x5 +#define ZXDH_ROCE_ACKCREDS_DEFAULT 0x1E +#if IS_ENABLED(CONFIG_CONFIGFS_FS) +#define ZXDH_DEFAULT_UP_UP_MAP 0x0706050403020100l +#endif + +#define ZXDH_FLUSH_SQ BIT(0) +#define ZXDH_FLUSH_RQ BIT(1) +#define ZXDH_REFLUSH BIT(2) +#define ZXDH_FLUSH_WAIT BIT(3) + +#define SINGLE_EP0 1 +#define MULTI_EP_NO_ZF 0 +#define MULTI_EP_WITH_ZF 0 + +#define ETH_INFO_HASH_COUNT 256 + +#define ZXDH_DUAL_TOR_SWITCH_OFFSET 0x5780 +#define ZXDH_DUAL_TOR_SWITCH_OPEN 0xaaaaaaaa + +#define ZXDH_HW_SCHEDULE_OFF 0 +#define ZXDH_HW_SCHEDULE_ON 1 + +#define FW_TIME_WAIT_1S 1000 +#define FW_TIME_WAIT_CNT 20 + +#define HOST_RDMA_MAX_PF 256 + +struct dev_log_trace { + refcount_t t_switch; +}; + +enum init_completion_state { + INVALID_STATE = 0, + INITIAL_STATE, + CQP_CREATED, + SMMU_PAGETABLE_INITIALIZED, + DATA_CAP_CREATED, + HMC_OBJS_CREATED, + HW_RSRC_INITIALIZED, + CQP_QP_CREATED, + AEQ_CREATED, + CCQ_CREATED, + CEQ0_CREATED, /* Last state of probe */ + ILQ_CREATED, + IEQ_CREATED, + REM_ENDPOINT_TRK_CREATED, + CEQS_CREATED, + PBLE_CHUNK_MEM, + IP_ADDR_REGISTERED, /* Last state of open */ +}; + +enum { + MCODE_TYPE_DCQCN = 1, + MCODE_TYPE_RTT = 2, + MCODE_TYPE_WUMENG = 6, +}; + +struct zxdh_cqp_err_info { + u16 maj; + u16 min; + const char *desc; +}; + +struct zxdh_cqp_compl_info { + u64 op_ret_val; + u16 maj_err_code; + u16 min_err_code; + bool error; + u8 op_code; + __le64 addrbuf[5]; +}; + +struct zxdh_cqp_request { + struct cqp_cmds_info info; + wait_queue_head_t waitq; + struct list_head list; + refcount_t refcnt; + void (*callback_fcn)(struct zxdh_cqp_request *cqp_request); + void *param; + struct zxdh_cqp_compl_info compl_info; + u8 waiting : 1; + u8 request_done : 1; + u8 dynamic : 1; +}; + +struct zxdh_cqp { + struct zxdh_sc_cqp sc_cqp; + spinlock_t req_lock; /* protect CQP request list */ + spinlock_t compl_lock; /* protect CQP completion processing */ + wait_queue_head_t waitq; + wait_queue_head_t remove_wq; + struct zxdh_dma_mem sq; + struct zxdh_dma_mem host_ctx; + u64 *scratch_array; + struct zxdh_cqp_request *cqp_requests; + struct list_head cqp_avail_reqs; + struct list_head cqp_pending_reqs; +}; + +struct zxdh_ccq { + struct zxdh_sc_cq sc_cq; + struct zxdh_dma_mem mem_cq; + struct zxdh_dma_mem shadow_area; +}; + +struct zxdh_ceq { + struct zxdh_sc_ceq sc_ceq; + struct zxdh_dma_mem mem; + u32 irq; + u32 msix_idx; + bool irq_sta; + struct zxdh_pci_f *rf; + struct tasklet_struct dpc_tasklet; + + spinlock_t + ce_lock; /* sync cq destroy with cq completion event notification */ +}; + +struct zxdh_aeq { + struct zxdh_sc_aeq sc_aeq; + struct zxdh_dma_mem mem; + struct zxdh_pble_alloc palloc; + bool virtual_map; + u32 irq; + u32 msix_idx; + bool irq_sta; +}; + +struct zxdh_arp_entry { + u32 ip_addr[4]; + u8 mac_addr[ETH_ALEN]; +}; + +struct zxdh_msix_vector { + u32 idx; + u32 irq; + u32 cpu_affinity; + u32 ceq_id; + cpumask_t mask; +}; + +struct zxdh_mc_table_info { + u32 mgn; + u32 dest_ip[4]; + u8 lan_fwd : 1; + u8 ipv4_valid : 1; +}; + +struct mc_table_list { + struct list_head list; + struct zxdh_mc_table_info mc_info; + struct zxdh_mcast_grp_info mc_grp_ctx; +}; + +struct zxdh_qv_info { + u32 v_idx; /* msix_vector */ + u16 ceq_idx; + u16 aeq_idx; + u8 itr_idx; +}; + +struct zxdh_qvlist_info { + u32 num_vectors; + struct zxdh_qv_info qv_info[1]; +}; + +struct zxdh_gen_ops { + void (*request_reset)(struct zxdh_pci_f *rf); + int (*register_qset)(struct zxdh_sc_vsi *vsi, + struct zxdh_ws_node *tc_node); + void (*unregister_qset)(struct zxdh_sc_vsi *vsi, + struct zxdh_ws_node *tc_node); +}; + +struct zxdh_pci_f { + u8 reset : 1; + u8 rsrc_created : 1; + u8 ftype : 1; + u8 rsrc_profile; + u8 max_rdma_vfs; + u8 *hmc_info_mem; + u8 *mem_rsrc; + u8 rdma_ver; + u8 rst_to; + /* Not used in SRIOV VF mode */ + u8 pf_id; + u8 vf_id; + u8 ep_id; + u8 fragcnt_limit; + enum zxdh_protocol_used protocol_used; + u8 en_rem_endpoint_trk : 1; + u8 dcqcn_ena : 1; + u32 sd_type; + u32 msix_count; + u32 max_mr; + u32 max_qp; + u32 max_cq; + u32 max_ah; + u32 next_ah; + u32 max_mcg; + u32 next_mcg; + u32 max_pd; + u32 next_qp; + u32 next_cq; + u32 next_pd; + u32 next_mr; + u32 max_mr_size; + u32 max_cqe; + u32 mr_stagmask; + u32 used_pds; + u32 used_cqs; + u32 used_mrs; + u32 used_qps; + u32 max_srq; + u32 next_srq; + u32 used_srqs; +#ifdef Z_CONFIG_RDMA_ARP + u32 arp_table_size; + u32 next_arp_index; + unsigned long *allocated_arps; + struct zxdh_arp_entry *arp_table; + spinlock_t arp_lock; /*protect ARP table access*/ +#endif + u32 ceqs_count; + u32 limits_sel; + u64 base_bar_offset; + + unsigned long *allocated_qps; + unsigned long *allocated_cqs; + unsigned long *allocated_mrs; + unsigned long *allocated_pds; + unsigned long *allocated_mcgs; + unsigned long *allocated_ahs; + unsigned long *allocated_srqs; + + enum init_completion_state init_state; + struct zxdh_sc_dev sc_dev; + struct zxdh_handler *hdl; + struct pci_dev *pcidev; + void *cdev; + struct zxdh_hw hw; + struct zxdh_cqp cqp; + struct zxdh_ccq ccq; + struct zxdh_aeq aeq; + struct zxdh_ceq *ceqlist; + struct zxdh_hmc_pble_rsrc *pble_rsrc; + struct zxdh_hmc_pble_rsrc *pble_mr_rsrc; + struct zxdh_dma_mem cqp_host_ctx; + + spinlock_t rsrc_lock; /* protect HW resource array access */ + spinlock_t qptable_lock; /*protect QP table access*/ + spinlock_t cqtable_lock; /*protect CQ table access*/ + struct zxdh_qp **qp_table; + struct zxdh_cq **cq_table; + struct zxdh_msix_vector *iw_msixtbl; + struct zxdh_qvlist_info *iw_qvlist; + spinlock_t srqtable_lock; /*protect SRQ table access*/ + struct zxdh_srq **srq_table; + struct tasklet_struct dpc_tasklet; + struct msix_entry *msix_entries; + struct workqueue_struct *cqp_cmpl_wq; + struct work_struct cqp_cmpl_work; + struct zxdh_gen_ops gen_ops; + void (*check_fc)(struct zxdh_sc_vsi *vsi, struct zxdh_sc_qp *sc_qp); + struct zxdh_dcqcn_cc_params dcqcn_params; + struct zxdh_device *iwdev; + struct zrdma_debugfs_entries debugfs_entry; + u8 vlan_parse_en; + u8 mcode_type; + u16 pcie_id; + u8 ver_buf[ZXDH_RDMA_VER_LEN]; + u32 qp_buf[ZXDH_RDMA_QP_BUF_LEN]; + u16 qp_index; + u8 rdma_srq_mem_type; + u32 rdma_ext_bar_offset; + u32 srq_l2d_size; + u64 srq_l2d_base_paddr; +}; + +struct zxdh_cap_entry_info { +#ifdef RDMA_MMAP_DB_SUPPORT + struct rdma_user_mmap_entry *cap_mmap_entry; +#else + struct zxdh_user_mmap_entry *cap_mmap_entry; +#endif +}; + +struct zxdh_dma_addr { + dma_addr_t cap_dma_addr; + void *cap_cpu_addr; +}; + +struct zxdh_cap_addr_info { + union { + u64 cap_iova_addr; + struct zxdh_dma_addr cap_direct_dma_addr; + } addr_info; + struct zxdh_cap_entry_info entry_info; +}; + +struct zxdh_hw_data_cap_info { + struct zxdh_cap_addr_info cap_txrx_use_iova[CAP_NODE_NUM]; + struct zxdh_cap_addr_info cap_tx_use_direct_dma[CAP_NODE_NUM]; + struct zxdh_cap_addr_info cap_rx_use_direct_dma[CAP_NODE_NUM]; + struct zxdh_cap_addr_info mp_cap; + struct zxdh_cap_addr_info hw_object_mmap; + u64 mp_cap_media_addr_base; + u32 object_buffer_size; +}; + +struct zxdh_eth_info { + struct zxdh_rdma_to_eth_ip_para rdma_to_eth_ip_para; + struct net_device *netdev; + struct hlist_node list; + u32 ip_cfg_ref_cnt; +}; + +struct aeq_stop_cap_work { + struct work_struct work; + struct zxdh_pci_f *rf; +}; + +struct zxdh_device { + struct ib_device ibdev; +#ifndef ZXDH_UAPI_DEF + const struct uverbs_object_tree_def *driver_trees[6]; +#endif + struct zxdh_pci_f *rf; + struct net_device *netdev; + struct net_device *source_netdev; + struct zxdh_handler *hdl; + struct workqueue_struct *cleanup_wq; + struct zxdh_sc_vsi vsi; + struct zxdh_cm_core cm_core; + struct list_head ah_list; + struct mutex ah_list_lock; + struct dev_log_trace trace_switch; + struct zxdh_qp *qp1; + u32 ah_list_cnt; + u32 ah_list_hwm; + u32 roce_cwnd; + u32 roce_ackcreds; + u32 vendor_id; + u32 vendor_part_id; + u32 device_cap_flags; + u32 push_mode; + u32 rcv_wnd; + u16 mac_ip_table_idx; + u16 vsi_num; + u8 mac_addr[ETH_ALEN]; + u8 rcv_wscale; + u8 iw_status; + u8 rd_fence_rate; +#if IS_ENABLED(CONFIG_CONFIGFS_FS) + u64 up_up_map; + u8 cnp_up_override; + u8 iwarp_rtomin; + u32 ceq_intrl; /* Interrupt rate limit per second: 0-disabled, 4237 - 250,000 */ + u8 up_map_en : 1; + u8 iwarp_dctcp_en : 1; + u8 iwarp_timely_en : 1; + u8 iwarp_bolt_en : 1; + u8 iwarp_ecn_en : 1; + u8 override_rcv_wnd : 1; + u8 override_cwnd : 1; + u8 override_ackcreds : 1; + u8 override_ooo : 1; + u8 override_rtomin : 1; + u8 override_rd_fence_rate : 1; + u8 roce_rtomin; + u8 roce_ecn_en : 1; + u8 roce_timely_en : 1; + u8 roce_no_icrc_en : 1; + u8 roce_dctcp_en : 1; +#endif /* CONFIG_CONFIGFS_FS */ + u8 roce_mode : 1; + u8 roce_dcqcn_en : 1; + u8 dcb_vlan_mode : 1; + u8 iw_ooo : 1; + enum init_completion_state init_state; + struct zxdh_hw_data_cap_info hw_data_cap; + wait_queue_head_t suspend_wq; + struct hlist_head *eth_info_hlist; + struct mutex eth_info_list_mtx_lock; +}; + +struct zxdh_handler { + struct list_head list; + struct zxdh_device *iwdev; + bool shared_res_created; +}; + +struct rdma_sriov_glb_info { + struct pci_dev *pdev; + struct zxdh_pci_f *rf; + u16 rdma_pf_enable; +}; + +static inline struct zxdh_device *to_iwdev(struct ib_device *ibdev) +{ + return container_of(ibdev, struct zxdh_device, ibdev); +} + +static inline struct zxdh_ucontext *to_ucontext(struct ib_ucontext *ibucontext) +{ + return container_of(ibucontext, struct zxdh_ucontext, ibucontext); +} + +#ifdef RDMA_MMAP_DB_SUPPORT +static inline struct zxdh_user_mmap_entry * +to_zxdh_mmap_entry(struct rdma_user_mmap_entry *rdma_entry) +{ + return container_of(rdma_entry, struct zxdh_user_mmap_entry, + rdma_entry); +} + +#endif +static inline struct zxdh_pd *to_iwpd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct zxdh_pd, ibpd); +} + +static inline struct zxdh_ah *to_iwah(struct ib_ah *ibah) +{ + return container_of(ibah, struct zxdh_ah, ibah); +} + +static inline struct zxdh_mr *to_iwmr(struct ib_mr *ibmr) +{ + return container_of(ibmr, struct zxdh_mr, ibmr); +} + +static inline struct zxdh_mr *to_iwmw(struct ib_mw *ibmw) +{ + return container_of(ibmw, struct zxdh_mr, ibmw); +} + +static inline struct zxdh_cq *to_iwcq(struct ib_cq *ibcq) +{ + return container_of(ibcq, struct zxdh_cq, ibcq); +} + +static inline struct zxdh_qp *to_iwqp(struct ib_qp *ibqp) +{ + return container_of(ibqp, struct zxdh_qp, ibqp); +} + +static inline struct zxdh_pci_f *dev_to_rf(struct zxdh_sc_dev *dev) +{ + return container_of(dev, struct zxdh_pci_f, sc_dev); +} + +/** + * zxdh_alloc_resource_qp - allocate a qp resource + * @iwdev: device pointer + * @resource_array: resource bit array: + * @max_resources: maximum resource number + * @req_resources_num: Allocated resource number + * @next: next free id + * @ret: qp sta value + **/ +static inline int zxdh_alloc_rsrc_qp(struct zxdh_pci_f *rf, + unsigned long *rsrc_array, u32 max_rsrc, + u32 *req_rsrc_num, u32 *next, u8 *ret) +{ + u32 rsrc_num; + u16 i; + unsigned long flags; + + *ret = ZXDH_RDMA_QP_NOT_EXIST; + spin_lock_irqsave(&rf->rsrc_lock, flags); + rsrc_num = find_next_zero_bit(rsrc_array, max_rsrc, *next); + if (rsrc_num >= max_rsrc) { + rsrc_num = find_first_zero_bit(rsrc_array, max_rsrc); + if (rsrc_num >= max_rsrc) { + spin_unlock_irqrestore(&rf->rsrc_lock, flags); + pr_err("ERR: resource [%d] allocation failed\n", + rsrc_num); + return -EOVERFLOW; + } + } + __set_bit(rsrc_num, rsrc_array); + *next = rsrc_num + 1; + if (*next == max_rsrc) + *next = 0; + *req_rsrc_num = rsrc_num; + for (i = 0; i < ZXDH_RDMA_QP_BUF_LEN; i++) { + if (rf->qp_buf[i] == rsrc_num) { + *ret = ZXDH_RDMA_QP_EXIST; + break; + } + } + spin_unlock_irqrestore(&rf->rsrc_lock, flags); + + return 0; +} + +/** + * zxdh_alloc_resource - allocate a resource + * @iwdev: device pointer + * @resource_array: resource bit array: + * @max_resources: maximum resource number + * @req_resources_num: Allocated resource number + * @next: next free id + **/ +static inline int zxdh_alloc_rsrc(struct zxdh_pci_f *rf, + unsigned long *rsrc_array, u32 max_rsrc, + u32 *req_rsrc_num, u32 *next) +{ + u32 rsrc_num; + unsigned long flags; + + spin_lock_irqsave(&rf->rsrc_lock, flags); + rsrc_num = find_next_zero_bit(rsrc_array, max_rsrc, *next); + if (rsrc_num >= max_rsrc) { + rsrc_num = find_first_zero_bit(rsrc_array, max_rsrc); + if (rsrc_num >= max_rsrc) { + spin_unlock_irqrestore(&rf->rsrc_lock, flags); + pr_err("ERR: resource [%d] allocation failed\n", + rsrc_num); + return -EOVERFLOW; + } + } + __set_bit(rsrc_num, rsrc_array); + *next = rsrc_num + 1; + if (*next == max_rsrc) + *next = 0; + *req_rsrc_num = rsrc_num; + spin_unlock_irqrestore(&rf->rsrc_lock, flags); + + return 0; +} + +/** + * zxdh_free_resource - free a resource + * @iwdev: device pointer + * @resource_array: resource array for the resource_num + * @resource_num: resource number to free + **/ +static inline void zxdh_free_rsrc(struct zxdh_pci_f *rf, + unsigned long *rsrc_array, u32 rsrc_num) +{ + unsigned long flags; + + spin_lock_irqsave(&rf->rsrc_lock, flags); + __clear_bit(rsrc_num, rsrc_array); + spin_unlock_irqrestore(&rf->rsrc_lock, flags); +} + +int zxdh_ctrl_init_hw(struct zxdh_pci_f *rf); +void zxdh_ctrl_deinit_hw(struct zxdh_pci_f *rf); +int zxdh_rt_init_hw(struct zxdh_device *iwdev); +void zxdh_rt_deinit_hw(struct zxdh_device *iwdev); +void zxdh_qp_add_ref(struct ib_qp *ibqp); +void zxdh_qp_rem_ref(struct ib_qp *ibqp); +void zxdh_flush_wqes(struct zxdh_qp *iwqp, u32 flush_mask); +struct zxdh_cqp_request *zxdh_alloc_and_get_cqp_request(struct zxdh_cqp *cqp, + bool wait); +void zxdh_free_cqp_request(struct zxdh_cqp *cqp, + struct zxdh_cqp_request *cqp_request); +void zxdh_put_cqp_request(struct zxdh_cqp *cqp, + struct zxdh_cqp_request *cqp_request); +u32 zxdh_initialize_hw_rsrc(struct zxdh_pci_f *rf); +void zxdh_port_ibevent(struct zxdh_device *iwdev); +void zxdh_aeq_qp_disconn(struct zxdh_qp *qp); +void zxdh_aeq_process_retry_err(struct zxdh_qp *iwqp); +void zxdh_aeq_process_entry_err(struct zxdh_qp *iwqp); + +bool zxdh_cqp_crit_err(struct zxdh_sc_dev *dev, u8 cqp_cmd, u16 maj_err_code, + u16 min_err_code); +int zxdh_check_cqp_cmd(struct cqp_cmds_info *info); +int zxdh_handle_cqp_op(struct zxdh_pci_f *rf, + struct zxdh_cqp_request *cqp_request); +int zxdh_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata); +void zxdh_cq_add_ref(struct ib_cq *ibcq); +void zxdh_cq_rem_ref(struct ib_cq *ibcq); +void zxdh_cq_wq_destroy(struct zxdh_pci_f *rf, struct zxdh_sc_cq *cq); + +void zxdh_cleanup_pending_cqp_op(struct zxdh_pci_f *rf); +int zxdh_hw_modify_qp(struct zxdh_device *iwdev, struct zxdh_qp *iwqp, + struct zxdh_modify_qp_info *info, bool wait); +int zxdh_qp_suspend_resume(struct zxdh_sc_qp *qp, bool suspend); +void zxdh_free_qp_rsrc(struct zxdh_qp *iwqp); +int zxdh_hw_flush_wqes(struct zxdh_pci_f *rf, struct zxdh_sc_qp *qp, + struct zxdh_qp_flush_info *info, bool wait); +void zxdh_copy_ip_ntohl(u32 *dst, __be32 *src); +void zxdh_copy_ip_htonl(__be32 *dst, u32 *src); +u16 zxdh_get_vlan_ipv4(u32 *addr); +struct net_device *zxdh_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac); +struct ib_mr *zxdh_reg_phys_mr(struct ib_pd *ib_pd, u64 addr, u64 size, int acc, + u64 *iova_start); +int zxdh_upload_qp_context(struct zxdh_qp *iwqp, bool freeze, bool raw); +void zxdh_del_hmc_objects(struct zxdh_sc_dev *dev, + struct zxdh_hmc_info *hmc_info); +void zxdh_del_data_cap_objects(struct zxdh_sc_dev *dev); +void zxdh_cqp_ce_handler(struct zxdh_pci_f *rf, struct zxdh_sc_cq *cq); +int zxdh_ah_cqp_op(struct zxdh_pci_f *rf, struct zxdh_sc_ah *sc_ah, u8 cmd, + bool wait, + void (*callback_fcn)(struct zxdh_cqp_request *cqp_request), + void *cb_param); +void zxdh_gsi_ud_qp_ah_cb(struct zxdh_cqp_request *cqp_request); +bool zxdh_cq_empty(struct zxdh_cq *iwcq); +#if IS_ENABLED(CONFIG_CONFIGFS_FS) +struct zxdh_device *zxdh_get_device_by_name(const char *name); +#endif + +void zxdh_set_rf_user_cfg_params(struct zxdh_pci_f *rf); +void zxdh_handle_internal_error(struct zxdh_pci_f *rf); +void zxdh_add_handler(struct zxdh_handler *hdl); +void zxdh_del_handler(struct zxdh_handler *hdl); +void cqp_compl_worker(struct work_struct *work); +void zxdh_stop_cap_worker(struct work_struct *work); +void zxdh_aeq_process_stop_cap(struct zxdh_pci_f *rf); +void zrdma_cleanup_rdma_tools_cfg(struct zxdh_pci_f *rf); +void free_cap_addr(struct zxdh_device *iwdev, struct zxdh_cap_addr_info *cap_addr_info); +int zxdh_manager_init(struct zxdh_pci_f *rf, + struct iidc_core_dev_info *cdev_info); +void zxdh_update_dpp_mac_tbl(struct zxdh_device *iwdev, struct iidc_core_dev_info *cdev_info); +int zxdh_eth_info_hlist_add(struct zxdh_device *iwdev, struct zxdh_rdma_to_eth_ip_para *ip_para); +int zxdh_eth_info_hlist_delete(struct zxdh_device *iwdev, struct zxdh_rdma_to_eth_ip_para *ip_para); +void zxdh_eth_info_hlist_display(struct zxdh_device *iwdev); + +struct zxdh_rdma_hb_if { + int32_t (*cfg_rdma_hb_master)(struct net_device *primary_netdev, struct net_device *linux_bond_netdev, bool hb_enable); + int32_t (*cfg_rdma_hb_speed)(struct net_device *netdev, uint32_t bps); +}; + +extern void zxdh_hwbond_register_rdma_ops(struct zxdh_rdma_hb_if *ops); +extern void zxdh_hwbond_unregister_rdma_ops(void); + +int zxdh_set_smmu_invalid(struct zxdh_pci_f *rf); + +#endif /* ZRDMA_MAIN_H */ diff --git a/drivers/infiniband/hw/zrdma/manager.c b/drivers/infiniband/hw/zrdma/manager.c new file mode 100644 index 000000000000..2cb83796447a --- /dev/null +++ b/drivers/infiniband/hw/zrdma/manager.c @@ -0,0 +1,1564 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include + +//#include "/home/chenhuan/code/rdma_dev/zxdh_kernel/incldue/linux/dinghai/dh_cmd.h" +//#include "dh_cmd.h" +#include "iidc.h" +#include "main.h" +#include "manager.h" +#include "icrdma_hw.h" + +u64 zxdh_hw_bar_pages[C_RDMA_HW_BAR_PAGE_NUM] = { 0 }; + +struct zxdh_rdma_hb_if hwbond_ops = { + .cfg_rdma_hb_master = switch_bound_master_netdev, + .cfg_rdma_hb_speed = set_rdma_firmware_speed, +}; + +int dh_rdma_pf_pcie_id_get(struct zxdh_mgr *mgr) +{ + u32 pos = 0; + u8 type = 0; + u16 padding = 0; + struct pci_dev *pdev = mgr->pdev; + + for (pos = pci_find_capability(pdev, PCI_CAP_ID_VNDR); pos > 0; + pos = pci_find_next_capability(pdev, pos, PCI_CAP_ID_VNDR)) { + pci_read_config_byte( + pdev, pos + offsetof(struct zxdh_pf_pci_cap, cfg_type), + &type); + + if (type == ZXDH_PCI_CAP_PCI_CFG) { + pci_read_config_word( + pdev, + pos + offsetof(struct zxdh_pf_pci_cap, + padding[0]), + &padding); + mgr->pcie_id = padding; + return 0; + } + } + return -1; +} + +int zxdh_chan_sync_send(struct zxdh_mgr *pmgr, struct zxdh_chan_msg *pmsg, + u32 *pdata, u32 rep_len) +{ + u16 buffer_len = 0; + void *recv_buffer = NULL; + int ret = 0; + u8 *reply_ptr = NULL; + u16 reply_msg_len = 0; + u32 cnt = 0; + + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + + if (pmgr == NULL || pmsg == NULL || pdata == NULL) + return -1; + + buffer_len = rep_len + ZXDH_CHAN_REPS_LEN; + recv_buffer = (void *)kmalloc(buffer_len, GFP_KERNEL); + if (recv_buffer == NULL) + return -1; + + in.virt_addr = + (u64)pmgr->pci_hw_addr + ZXDH_BAR_CHAN_OFFSET; //bar空间偏移? + in.payload_addr = pmsg->msg; + in.payload_len = pmsg->msg_len; + + if (!pmgr->ftype) + in.src = MSG_CHAN_END_PF; + else + in.src = MSG_CHAN_END_VF; + + in.dst = MSG_CHAN_END_RISC; + in.event_id = MODULE_RDMA; + + if (0 == dh_rdma_pf_pcie_id_get(pmgr)) + in.src_pcieid = pmgr->pcie_id; + else { + kfree(recv_buffer); + return -1; + } + + result.buffer_len = buffer_len; + result.recv_buffer = recv_buffer; + + do { + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + if ((ret != ZXDH_BAR_ERR_TIME_OUT) && (ret != ZXDH_BAR_ERR_LOCK_FAILED)) { + break; + } + cnt++; + } while (cnt < ZXDH_BAR_MSG_RETRY_NUM); + + if (ret != 0) { + pr_err("zxdh_bar_chan_sync_msg_send faile, ret=%d cnt=%d\n", ret, cnt); + kfree(recv_buffer); + return -1; + } + + reply_ptr = (u8 *)result.recv_buffer; + if (*reply_ptr == MSG_REP_VALID) { + reply_msg_len = *(u16 *)(reply_ptr + MSG_REP_LEN_OFFSET); + memcpy(pdata, reply_ptr + ZXDH_CHAN_REPS_LEN, + ((reply_msg_len > rep_len) ? rep_len : reply_msg_len)); + kfree(recv_buffer); + return 0; + } + + kfree(recv_buffer); + return 0; +} + +int zxdh_mgr_par_get(struct zxdh_mgr *dh_mgr) +{ + int ret = 0; + + struct zxdh_mgr_msg *cmd = + kzalloc(sizeof(struct zxdh_mgr_msg), GFP_KERNEL); + struct zxdh_chan_msg *pmsg = + kzalloc(sizeof(struct zxdh_chan_msg), GFP_KERNEL); + struct zxdh_mgr_par param; + + if (pmsg == NULL) { + kfree(cmd); + return -ENOMEM; + } + + if (cmd == NULL) { + kfree(pmsg); + return -ENOMEM; + } + + cmd->op_code = 0; + cmd->pf_id = dh_mgr->pf_id; + cmd->vport_vf_id = dh_mgr->vport_vf_id; + cmd->ftype = dh_mgr->ftype; + cmd->ep_id = dh_mgr->ep_id; + + pmsg->msg_len = sizeof(struct zxdh_mgr_msg); + pmsg->msg = (void *)cmd; + + ret = zxdh_chan_sync_send(dh_mgr, pmsg, (void *)&dh_mgr->param, + sizeof(struct zxdh_mgr_par)); + param = dh_mgr->param; + pr_info("mgr cfg param:"); + pr_info("ftype=%d, ep_id=%d, pf_id=%d, max_vf_num=%d, vhca_id=%d, bar_offset=0x%x.\n", + param.ftype, param.ep_id, param.pf_id, param.max_vf_num, + param.vhca_id, param.bar_offset); + pr_info("l2d_smmu_addr=0x%llx, vf_id=%d, vhca_id_pf=%d, l2d_smmu_l2_offset=%d.\n", + param.l2d_smmu_addr, param.vf_id, param.vhca_id_pf, + param.l2d_smmu_l2_offset); + pr_info("qp_cnt=%d, cq_cnt=%d, srq_cnt=%d, ceq_cnt=%d, ah_cnt=%d, mr_cnt=%d, pbleq_cnt=%d, pblem_cnt=%d.\n", + param.qp_cnt, param.cq_cnt, param.srq_cnt, param.ceq_cnt, + param.ah_cnt, param.mr_cnt, param.pbleq_cnt, param.pblem_cnt); + pr_info("base_qpn=%d, base_cqn=%d, base_srqn=%d, base_ceqn=%d.\n", + param.base_qpn, param.base_cqn, param.base_srqn, + param.base_ceqn); + pr_info("qp_hmc_base=0x%llx, cq_hmc_base=0x%llx, srq_hmc_base=0x%llx, txwindow_hmc_base=0x%llx.\n", + param.qp_hmc_base, param.cq_hmc_base, param.srq_hmc_base, + param.txwindow_hmc_base); + pr_info("ird_hmc_base=0x%llx,ah_hmc_base=0x%llx,mr_hmc_base=0x%llx,pbleq_hmc_base=0x%llx,pblem_hmc_base=0x%llx.\n", + param.ird_hmc_base, param.ah_hmc_base, param.mr_hmc_base, + param.pbleq_hmc_base, param.pblem_hmc_base); + pr_info("mcode_type=%d, chip_version=%d, max_hw_wq_frags=%d, max_hw_read_sges=%d\n", + param.mcode_type, param.chip_version, param.max_hw_wq_frags, + param.max_hw_read_sges); + + if (ret != 0) { + pr_info("get pf param faile, ret=%d.\n", ret); + kfree(cmd); + kfree(pmsg); + return -EPIPE; + } + + if (param.ftype != dh_mgr->ftype || param.ep_id != dh_mgr->ep_id || + param.pf_id != dh_mgr->pf_id) { + kfree(cmd); + kfree(pmsg); + return -EPIPE; + } + + kfree(cmd); + kfree(pmsg); + + return 0; +} + +static int zxdh_sc_init_hmccnt(struct zxdh_pci_f *rf, + struct zxdh_mgr_par *param) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + u32 hmc_info_mem_size; + + hmc_info_mem_size = + sizeof(struct zxdh_hmc_pble_rsrc) * 2 + + sizeof(struct zxdh_hmc_info) + + (sizeof(struct zxdh_hmc_obj_info) * ZXDH_HMC_IW_MAX); + + rf->hmc_info_mem = kzalloc(hmc_info_mem_size, GFP_KERNEL); + if (!rf->hmc_info_mem) + return -ENOMEM; + + rf->pble_mr_rsrc = (struct zxdh_hmc_pble_rsrc *)rf->hmc_info_mem; + rf->pble_rsrc = (struct zxdh_hmc_pble_rsrc *)(rf->pble_mr_rsrc + 1); + dev->hmc_info = &rf->hw.hmc; + dev->hmc_info->hmc_obj = + (struct zxdh_hmc_obj_info *)(rf->pble_rsrc + 1); + + rf->max_rdma_vfs = param->max_vf_num; + dev->hmc_use_dpu_ddr = param->hmc_use_dpu_ddr; + dev->l2d_smmu_addr = param->l2d_smmu_addr; + dev->l2d_smmu_l2_offset = param->l2d_smmu_l2_offset; + + dev->hmc_pf_manager_info.hmc_base = param->qp_hmc_base; + dev->hmc_pf_manager_info.hmc_size = param->pf_hmc_size; + + rf->max_qp = param->qp_cnt; + rf->max_cq = param->cq_cnt; + rf->max_srq = param->srq_cnt; + rf->max_ah = param->ah_cnt; + rf->max_mr = param->mr_cnt; + + if (rf->srq_l2d_base_paddr != 0 && rf->srq_l2d_size != 0) { + if (rf->ftype == FUNCTION_TYPE_PF) { + rf->max_srq = ZXDH_PF_MAX_SRQ_NUM_USE_L2D; + } else { + rf->max_srq = ZXDH_VF_MAX_SRQ_NUM_USE_L2D; + } + } else { + rf->max_srq = 0; + pr_warn("%s[%d]: warning SRQ can not use DDR memory! ep_id=%d pf_id=%d vf_id=%d ftype=%d\n", __func__, __LINE__, + rf->ep_id, rf->pf_id, rf->vf_id, rf->ftype); + } + + dev->max_qp = rf->max_qp; + dev->max_cq = rf->max_cq; + dev->max_srq = rf->max_srq; + dev->base_qpn = param->base_qpn; + dev->base_cqn = param->base_cqn; + dev->base_srqn = param->base_srqn; + + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_QP].max_cnt = rf->max_qp; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_CQ].max_cnt = rf->max_cq; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_SRQ].max_cnt = rf->max_srq; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_TXWINDOW].max_cnt = rf->max_qp; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_IRD].max_cnt = rf->max_qp; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_AH].max_cnt = rf->max_ah; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_MR].max_cnt = param->mr_cnt; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE].max_cnt = param->pbleq_cnt; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE_MR].max_cnt = param->pblem_cnt; + + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_QP].base = param->qp_hmc_base; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_CQ].base = param->cq_hmc_base; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_SRQ].base = param->srq_hmc_base; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_TXWINDOW].base = + param->txwindow_hmc_base; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_IRD].base = param->ird_hmc_base; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_AH].base = param->ah_hmc_base; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_MR].base = param->mr_hmc_base; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE].base = param->pbleq_hmc_base; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE_MR].base = + param->pblem_hmc_base; + + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_QP].cnt = param->qp_cnt; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_CQ].cnt = param->cq_cnt; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_SRQ].cnt = param->srq_cnt; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_AH].cnt = param->ah_cnt; + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_MR].cnt = param->mr_cnt; + if (rf->sc_dev.ep_id != ZXDH_ZF_EPID || dev->hmc_use_dpu_ddr) { + dev->data_cap_sd.data_cap_base = C_HMC_DATA_CAP_IOVA_BASE; + dev->data_cap_sd.data_len = C_HMC_DATA_CAP_IOVA_LEN; + } + + if (!rf->ftype) { + dev->hmc_pf_manager_info.total_qp_cnt = + param->qp_cnt + param->max_vf_num * param->vf_qp_cnt; + dev->hmc_pf_manager_info.total_cq_cnt = + param->cq_cnt + param->max_vf_num * param->vf_cq_cnt; + dev->hmc_pf_manager_info.total_srq_cnt = + param->srq_cnt + param->max_vf_num * param->vf_srq_cnt; + dev->hmc_pf_manager_info.total_ah_cnt = + param->ah_cnt + param->max_vf_num * param->vf_ah_cnt; + dev->hmc_pf_manager_info.total_mrte_cnt = + param->mr_cnt + param->max_vf_num * param->vf_mr_cnt; + + dev->hmc_pf_manager_info.pf_pblemr_cnt = param->pblem_cnt; + dev->hmc_pf_manager_info.pf_pblequeue_cnt = param->pbleq_cnt; + + dev->hmc_pf_manager_info.vf_qp_cnt = param->vf_qp_cnt; + dev->hmc_pf_manager_info.vf_pblemr_cnt = param->vf_pblem_cnt; + dev->hmc_pf_manager_info.vf_pblequeue_cnt = param->vf_pbleq_cnt; + } + return 0; +} + +static void zxdh_init_hw_bar_pages(u8 ep_id, u64 *base_bar_offset) +{ + int i; + u64 page_bar_offset; + u64 bar_offset_low; + u64 bar_offset_high; + + if (ep_id == ZXDH_ZF_EPID) { + page_bar_offset = *base_bar_offset; + bar_offset_low = page_bar_offset & 0xFFFF; + bar_offset_high = page_bar_offset & 0xF0000; + *base_bar_offset = bar_offset_low + (bar_offset_high << 4); + } else + page_bar_offset = 0; + + for (i = 0; i < C_RDMA_HW_BAR_PAGE_NUM; i++) { + if (ep_id == ZXDH_ZF_EPID) { + bar_offset_low = page_bar_offset & 0xFFFF; + bar_offset_high = page_bar_offset & 0xF0000; + zxdh_hw_bar_pages[i] = + bar_offset_low + (bar_offset_high << 4); + zxdh_hw_bar_pages[i] -= *base_bar_offset; + } else + zxdh_hw_bar_pages[i] = page_bar_offset; + + page_bar_offset += C_RDMA_HW_BAR_PAGE_SIZE; + } +} + +static int zxdh_pf_dev_exist_for_vf(struct zxdh_pci_f *rf) +{ + uint64_t cqp_status_phy_addr = 0; + uint32_t cqp_status = 0xFFFF; + int ret = 0; + + if (rf->ftype == 1) { + cqp_status_phy_addr = C_RDMA_CQP_STATUS_PHY_ADDR + rf->sc_dev.vhca_id_pf * 0x1000; + ret = zxdh_rdma_reg_read(rf, cqp_status_phy_addr, &cqp_status); + if (ret) { + pr_err("%s[%d]: rdma reg read failed!\n", __func__, __LINE__); + return ret; + } + + pr_info("vf rdma probe: ep_id=%d, pf_id=%d, vf_id=%d, vhca_id=%d, vhca_id_pf=%d, cqp_status=%d\n", + rf->ep_id, rf->pf_id, rf->vf_id, rf->sc_dev.vhca_id, rf->sc_dev.vhca_id_pf, cqp_status); + if (cqp_status != 1) { + pr_err("vf rdma probe: The RDMA device for EP%d PF%d corresponding to VF%d does not exist!\n", rf->ep_id, rf->pf_id, rf->vf_id); + return -ENODEV; + } + } + + return 0; +} + +int zxdh_manager_init(struct zxdh_pci_f *rf, + struct iidc_core_dev_info *cdev_info) +{ + int ret = 0; + struct zxdh_mgr *dh_mgr = kzalloc(sizeof(struct zxdh_mgr), GFP_KERNEL); + + if (dh_mgr == NULL) + return -ENOMEM; + + dh_mgr->pdev = cdev_info->pdev; + dh_mgr->pf_id = rf->pf_id; + dh_mgr->vport_vf_id = (cdev_info->vport_id) & 0xFF; + dh_mgr->ftype = rf->ftype; + dh_mgr->ep_id = rf->sc_dev.ep_id; + + dh_mgr->device_id = cdev_info->pdev->subsystem_device; + dh_mgr->pci_hw_addr = cdev_info->hw_addr; + + ret = zxdh_mgr_par_get(dh_mgr); + if (ret != 0) { + kfree(dh_mgr); + pr_info("dh_rdma_mgr_par_get faile.\n"); + return ret; + } + + pr_info("manager pcie_id=0x%x, device_id=0x%x, slot_id=0x%x\n", dh_mgr->pcie_id, dh_mgr->device_id, cdev_info->slot_id); + rf->pcie_id = dh_mgr->pcie_id; + rf->vf_id = dh_mgr->param.vf_id; + rf->sc_dev.vf_id = dh_mgr->param.vf_id; + rf->sc_dev.vhca_id = dh_mgr->param.vhca_id; + rf->sc_dev.vhca_id_pf = dh_mgr->param.vhca_id_pf; + if (rf->sc_dev.vhca_id == 1023) { + kfree(dh_mgr); + pr_info("vhca_id:1023 invalid\n"); + return -1; + } + + ret = zxdh_pf_dev_exist_for_vf(rf); + if (ret != 0) { + kfree(dh_mgr); + return -1; + } + + rf->sc_dev.hmc_fn_id = dh_mgr->param.hmc_sid; + rf->sc_dev.total_vhca = dh_mgr->param.dh_total_vhca; + rf->sc_dev.np_mode_low_lat = dh_mgr->param.np_mode_low_lat; + rf->sc_dev.chip_version = dh_mgr->param.chip_version; + + rf->sc_dev.nof_ioq_ddr_addr = dh_mgr->param.nof_ioq_ddr_addr; + rf->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags = + dh_mgr->param.max_hw_wq_frags; + rf->sc_dev.hw_attrs.uk_attrs.max_hw_read_sges = + dh_mgr->param.max_hw_read_sges; + //VHCA_RC_UD_GQP_MAX_CNT + rf->sc_dev.vhca_gqp_start = dh_mgr->param.vhca_gqp_start; + rf->sc_dev.vhca_gqp_cnt = dh_mgr->param.vhca_gqp_cnt; + rf->sc_dev.vhca_8k_index_start = dh_mgr->param.vhca_8k_index_start; + //VHCA_RC_UD_8K_MAX_CNT + rf->sc_dev.vhca_8k_index_cnt = dh_mgr->param.vhca_8k_index_cnt; + rf->sc_dev.vhca_ud_gqp = dh_mgr->param.vhca_ud_gqp; + rf->sc_dev.vhca_ud_8k_index = dh_mgr->param.vhca_ud_8k_index; + + pr_info("vhca_gqp_start:0x%x\n", rf->sc_dev.vhca_gqp_start); + pr_info("vhca_gqp_cnt:0x%x\n", rf->sc_dev.vhca_gqp_cnt); + pr_info("vhca_8k_index_start:0x%x\n", rf->sc_dev.vhca_8k_index_start); + pr_info("vhca_8k_index_cnt:0x%x\n", rf->sc_dev.vhca_8k_index_cnt); + pr_info("vhca_ud_gqp:0x%x\n", rf->sc_dev.vhca_ud_gqp); + pr_info("vhca_ud_8k_index:0x%x\n", rf->sc_dev.vhca_ud_8k_index); + + rf->base_bar_offset = dh_mgr->param.bar_offset; + zxdh_init_hw_bar_pages(rf->sc_dev.ep_id, &rf->base_bar_offset); + rf->hw.hw_addr = cdev_info->hw_addr + rf->base_bar_offset; + pr_info("rf->hw.hw_addr=0x%llx, cdev_info->hw_addr=0x%llx, rf->base_bar_offset=0x%llx\n", + (u64)(uintptr_t)rf->hw.hw_addr, + (u64)(uintptr_t)cdev_info->hw_addr, + rf->base_bar_offset); + + ret = zxdh_sc_init_hmccnt(rf, &dh_mgr->param); + if (ret != 0) { + kfree(dh_mgr); + pr_info("init_hmccnt faile.\n"); + return ret; + } + + rf->sc_dev.max_ceqs = dh_mgr->param.ceq_cnt; + rf->sc_dev.base_ceqn = dh_mgr->param.base_ceqn; + + rf->msix_count = min(rf->msix_count, (rf->sc_dev.max_ceqs + 1)); + if (rf->msix_count > 1) + rf->sc_dev.max_ceqs = (rf->msix_count - 1); + else + rf->sc_dev.max_ceqs = rf->msix_count; + if (rf->msix_count == 0) { + kfree(dh_mgr); + pr_info("misx_count is 0\n"); + return -EINVAL; + } + rf->mcode_type = dh_mgr->param.mcode_type; + kfree(dh_mgr); + + return 0; +} + +/*** + * @brief send general rdma message to firmware + * + * @param rf + * @param para includes infos of buffers to send and receive + * @return int + * @retval 0 on success + * @retval -EINVAL when input arguments are invalid + * @retval -ENOMEM when alloc buffer fails + * @retval -EPROTO when bar message send/recv fails + */ +int rdma_chan_msg_send(struct zxdh_pci_f *rf, struct rdma_chan_msg_para *para) +{ + int ret = 0; + uint8_t *rep_ptr; + uint16_t rep_len = 0; + uint8_t rep_valid = 0; + size_t recv_len = 0; + void *recv_buffer = NULL; + + struct zxdh_mgr mgr = { 0 }; + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + struct iidc_core_dev_info *cdev_info = NULL; + u32 cnt = 0; + u32 cnt_num = ZXDH_BAR_MSG_RETRY_NUM; + + if (!para || !rf) + return -EINVAL; + if (rf->sc_dev.driver_load == false) + cnt_num = ZXDH_BAR_MSG_DEFAULT_NUM; + + if (!(para->in_buf) || !(para->out_buf)) + return -EINVAL; + + cdev_info = (struct iidc_core_dev_info *)rf->cdev; + + // query pcie id + mgr.pdev = cdev_info->pdev; + ret = dh_rdma_pf_pcie_id_get(&mgr); + if (ret) { + pr_err("[%s] get pf pcie_id failed, ret=%d\n", __func__, ret); + return -EINVAL; + } + + // malloc recv buffer with extra ZXDH_CHAN_REPS_LEN size + recv_len = ZXDH_CHAN_REPS_LEN + para->out_size; + recv_buffer = (void *)kzalloc(recv_len, GFP_KERNEL); + if (!recv_buffer) + return -ENOMEM; + + // get message preparation + in.payload_addr = (void *)para->in_buf; + in.payload_len = para->in_size; + + in.src = rf->ftype == 0 ? MSG_CHAN_END_PF : MSG_CHAN_END_VF; + in.dst = MSG_CHAN_END_RISC; + in.event_id = MODULE_RDMA; + in.virt_addr = (uint64_t)(cdev_info->hw_addr) + ZXDH_BAR_CHAN_OFFSET; + in.src_pcieid = mgr.pcie_id; + + // resv buffer preparation + result.recv_buffer = recv_buffer; + result.buffer_len = recv_len; + + do { + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + if ((ret != ZXDH_BAR_ERR_TIME_OUT) && (ret != ZXDH_BAR_ERR_LOCK_FAILED)) { + break; + } + cnt++; + } while (cnt < cnt_num); + + if (ret) { + pr_err("[%s] message send failed, ret=%d cnt=%d\n", __func__, ret, cnt); + kfree(recv_buffer); + return -EPROTO; + } + + rep_ptr = (uint8_t *)recv_buffer; + rep_valid = *rep_ptr; + if (rep_valid != MSG_REP_VALID) { + pr_err("[%s] response message invalid, rep_valid=0x%x\n", + __func__, rep_valid); + kfree(recv_buffer); + return -EPROTO; + } + + rep_len = *(uint16_t *)(rep_ptr + MSG_REP_LEN_OFFSET); + if (rep_len != para->out_size) { + pr_err("[%s] response length invalid, rep_len=0x%x\n", __func__, + rep_len); + kfree(recv_buffer); + return -EPROTO; + } + + memcpy(para->out_buf, rep_ptr + ZXDH_CHAN_REPS_LEN, rep_len); + kfree(recv_buffer); + return 0; +} + +/*** + * @brief read register value from rdma + * + * @param rf for accessing hardware info + * @param phy_addr physical address on rdma. registuer width is uint32_t + * @param outdata + * @return int + * - 0: ok + * - -1: failed + */ +int zxdh_rdma_reg_read(struct zxdh_pci_f *rf, uint64_t phy_addr, + uint32_t *outdata) +{ + int ret = 0; + uint8_t rep_valid = 0; + uint16_t rep_len = 0; + uint8_t *rep_ptr; + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + struct zxdh_mgr mgr = { 0 }; + struct iidc_core_dev_info *cdev_info; + struct zxdh_reg_read_cmd *read_cmd; + size_t recv_len; + void *recv_buffer; + struct dh_rdma_reg_read_resp *read_resp; + u32 cnt = 0; + u32 cnt_num = ZXDH_BAR_MSG_RETRY_NUM; + + if (!rf || !outdata) + return -EINVAL; + if (rf->sc_dev.driver_load == false) + cnt_num = ZXDH_BAR_MSG_DEFAULT_NUM; + + cdev_info = (struct iidc_core_dev_info *)rf->cdev; + // query pcie id + mgr.pdev = cdev_info->pdev; + ret = dh_rdma_pf_pcie_id_get(&mgr); + if (ret) { + pr_err("[%s] get pf pcie_id failed, ret=%d\n", __func__, ret); + return -EINVAL; + } + + read_cmd = (struct zxdh_reg_read_cmd *)kzalloc( + sizeof(struct zxdh_reg_read_cmd), GFP_KERNEL); + if (!read_cmd) + return -ENOMEM; + + recv_len = ZXDH_CHAN_REPS_LEN + sizeof(struct dh_rdma_reg_read_resp) + + 1 * sizeof(uint32_t); // data + recv_buffer = (void *)kzalloc(recv_len, GFP_KERNEL); + if (!recv_buffer) { + kfree(read_cmd); + return -ENOMEM; + } + + // commnad preparation + read_cmd->op_code = RDMA_REG_READ; + read_cmd->req.phy_addr = phy_addr; + read_cmd->req.reg_num = 1; + + // send message preparation + in.payload_addr = (void *)read_cmd; + in.payload_len = sizeof(struct zxdh_reg_read_cmd); + in.src = rf->ftype == 0 ? MSG_CHAN_END_PF : MSG_CHAN_END_VF; + in.dst = MSG_CHAN_END_RISC; + in.event_id = MODULE_RDMA; + in.virt_addr = (u64)(uintptr_t)cdev_info->hw_addr + ZXDH_BAR_CHAN_OFFSET; + in.src_pcieid = mgr.pcie_id; + + // resv buffer preparation + result.recv_buffer = recv_buffer; + result.buffer_len = recv_len; + + do { + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + if ((ret != ZXDH_BAR_ERR_TIME_OUT) && (ret != ZXDH_BAR_ERR_LOCK_FAILED)) { + break; + } + cnt++; + } while (cnt < cnt_num); + + kfree(read_cmd); + + if (ret) { + pr_err("[%s] message send failed, ret=%d\n", __func__, ret); + kfree(recv_buffer); + return -EPROTO; + } + + rep_ptr = (uint8_t *)recv_buffer; + rep_valid = *rep_ptr; + if (rep_valid != MSG_REP_VALID) { + pr_err("[%s] response message invalid, rep_valid=0x%x\n", + __func__, rep_valid); + kfree(recv_buffer); + return -EPROTO; + } + + rep_len = *(uint16_t *)(rep_ptr + MSG_REP_LEN_OFFSET); + if (rep_len != recv_len - ZXDH_CHAN_REPS_LEN) { + pr_err("[%s] response length invalid, rep_len=0x%x\n", __func__, + rep_len); + kfree(recv_buffer); + return -EPROTO; + } + + read_resp = + (struct dh_rdma_reg_read_resp *)(rep_ptr + ZXDH_CHAN_REPS_LEN); + if (read_resp->status_code != 200) { + pr_err("[%s] response status invalid, statuc_code=0x%x\n", + __func__, read_resp->status_code); + kfree(recv_buffer); + return -EPROTO; + } + + *outdata = read_resp->data[0]; + + kfree(recv_buffer); + return 0; +} + +int zxdh_rdma_regs_read(struct zxdh_pci_f *rf, uint64_t phy_addr, + uint32_t *outdata, uint32_t num) +{ + int ret = 0; + uint8_t rep_valid = 0; + uint16_t rep_len = 0; + uint8_t *rep_ptr; + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + struct zxdh_mgr mgr = { 0 }; + struct iidc_core_dev_info *cdev_info; + struct zxdh_reg_read_cmd *read_cmd; + size_t recv_len; + void *recv_buffer; + struct dh_rdma_reg_read_resp *read_resp; + u32 cnt = 0; + u32 cnt_num = ZXDH_BAR_MSG_RETRY_NUM; + + if (!rf || !outdata) + return -EINVAL; + if (rf->sc_dev.driver_load == false) + cnt_num = ZXDH_BAR_MSG_DEFAULT_NUM; + + cdev_info = (struct iidc_core_dev_info *)rf->cdev; + // query pcie id + mgr.pdev = cdev_info->pdev; + ret = dh_rdma_pf_pcie_id_get(&mgr); + if (ret) { + pr_err("[%s] get pf pcie_id failed, ret=%d\n", __func__, ret); + return -EINVAL; + } + + read_cmd = (struct zxdh_reg_read_cmd *)kzalloc( + sizeof(struct zxdh_reg_read_cmd), GFP_KERNEL); + if (!read_cmd) + return -ENOMEM; + + recv_len = ZXDH_CHAN_REPS_LEN + sizeof(struct dh_rdma_reg_read_resp) + + num * sizeof(uint32_t); // data + recv_buffer = (void *)kzalloc(recv_len, GFP_KERNEL); + if (!recv_buffer) { + kfree(read_cmd); + return -ENOMEM; + } + + // commnad preparation + read_cmd->op_code = RDMA_REG_READ; + read_cmd->req.phy_addr = phy_addr; + read_cmd->req.reg_num = num; + + // send message preparation + in.payload_addr = (void *)read_cmd; + in.payload_len = sizeof(struct zxdh_reg_read_cmd); + in.src = rf->ftype == 0 ? MSG_CHAN_END_PF : MSG_CHAN_END_VF; + in.dst = MSG_CHAN_END_RISC; + in.event_id = MODULE_RDMA; + in.virt_addr = (u64)(uintptr_t)cdev_info->hw_addr + ZXDH_BAR_CHAN_OFFSET; + in.src_pcieid = mgr.pcie_id; + + // resv buffer preparation + result.recv_buffer = recv_buffer; + result.buffer_len = recv_len; + + do { + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + if ((ret != ZXDH_BAR_ERR_TIME_OUT) && (ret != ZXDH_BAR_ERR_LOCK_FAILED)) { + break; + } + cnt++; + } while (cnt < cnt_num); + + kfree(read_cmd); + + if (ret) { + pr_err("[%s] message send failed, ret=%d cnt=%d\n", __func__, ret, cnt); + kfree(recv_buffer); + return -EPROTO; + } + + rep_ptr = (uint8_t *)recv_buffer; + rep_valid = *rep_ptr; + if (rep_valid != MSG_REP_VALID) { + pr_err("[%s] response message invalid, rep_valid=0x%x\n", + __func__, rep_valid); + kfree(recv_buffer); + return -EPROTO; + } + + rep_len = *(uint16_t *)(rep_ptr + MSG_REP_LEN_OFFSET); + if (rep_len != recv_len - ZXDH_CHAN_REPS_LEN) { + pr_err("[%s] response length invalid, rep_len=0x%x\n", __func__, + rep_len); + kfree(recv_buffer); + return -EPROTO; + } + + read_resp = + (struct dh_rdma_reg_read_resp *)(rep_ptr + ZXDH_CHAN_REPS_LEN); + if (read_resp->status_code != 200) { + pr_err("[%s] response status invalid, statuc_code=0x%x\n", + __func__, read_resp->status_code); + kfree(recv_buffer); + return -EPROTO; + } + + memcpy(outdata, read_resp->data, num * sizeof(uint32_t)); + + kfree(recv_buffer); + return 0; +} + +int zxdh_rdma_reg_write(struct zxdh_pci_f *rf, uint64_t phy_addr, uint32_t val) +{ + int ret = 0; + uint8_t rep_valid = 0; + uint16_t rep_len = 0; + uint8_t *rep_ptr; + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + struct zxdh_mgr mgr = { 0 }; + struct iidc_core_dev_info *cdev_info; + size_t write_cmd_len; + size_t recv_len; + void *recv_buffer; + struct zxdh_reg_write_cmd *write_cmd; + struct dh_rdma_reg_write_resp *write_resp; + u32 cnt = 0; + u32 cnt_num = ZXDH_BAR_MSG_RETRY_NUM; + + if (!rf) + return -EINVAL; + if (rf->sc_dev.driver_load == false) + cnt_num = ZXDH_BAR_MSG_DEFAULT_NUM; + + cdev_info = (struct iidc_core_dev_info *)rf->cdev; + // query pcie id + mgr.pdev = cdev_info->pdev; + ret = dh_rdma_pf_pcie_id_get(&mgr); + if (ret) { + pr_err("[%s] get pf pcie_id failed, ret=%d\n", __func__, ret); + return -EINVAL; + } + + write_cmd_len = + sizeof(struct zxdh_reg_write_cmd) + 1 * sizeof(uint32_t); + write_cmd = + (struct zxdh_reg_write_cmd *)kzalloc(write_cmd_len, GFP_KERNEL); + if (!write_cmd) + return -ENOMEM; + + recv_len = ZXDH_CHAN_REPS_LEN + sizeof(struct dh_rdma_reg_write_resp); + recv_buffer = (void *)kzalloc(recv_len, GFP_KERNEL); + if (!recv_buffer) { + kfree(write_cmd); + return -ENOMEM; + } + + // commnad preparation + write_cmd->op_code = RDMA_REG_WRITE; + write_cmd->req.phy_addr = phy_addr; + write_cmd->req.reg_num = 1; + write_cmd->req.data[0] = val; + + // send message preparation + in.payload_addr = (void *)write_cmd; + in.payload_len = write_cmd_len; + in.src = rf->ftype == 0 ? MSG_CHAN_END_PF : MSG_CHAN_END_VF; + in.dst = MSG_CHAN_END_RISC; + in.event_id = MODULE_RDMA; + in.virt_addr = (u64)(uintptr_t)cdev_info->hw_addr + ZXDH_BAR_CHAN_OFFSET; + in.src_pcieid = mgr.pcie_id; + + // resv buffer preparation + result.recv_buffer = recv_buffer; + result.buffer_len = recv_len; + + do { + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + if ((ret != ZXDH_BAR_ERR_TIME_OUT) && (ret != ZXDH_BAR_ERR_LOCK_FAILED)) { + break; + } + cnt++; + pr_info("[%s] cnt:%d ret:%d\n", __func__, cnt, ret); + } while (cnt < cnt_num); + + kfree(write_cmd); + + if (ret) { + pr_err("[%s] message send failed, ret=%d cnt=%d\n", __func__, ret, cnt); + kfree(recv_buffer); + return -EPROTO; + } + + rep_ptr = (uint8_t *)recv_buffer; + rep_valid = *rep_ptr; + if (rep_valid != MSG_REP_VALID) { + pr_err("[%s] response message invalid, rep_valid=0x%x\n", + __func__, rep_valid); + kfree(recv_buffer); + return -EPROTO; + } + + rep_len = *(uint16_t *)(rep_ptr + MSG_REP_LEN_OFFSET); + if (rep_len != recv_len - ZXDH_CHAN_REPS_LEN) { + pr_err("[%s] response length invalid, rep_len=0x%x\n", __func__, + rep_len); + kfree(recv_buffer); + return -EPROTO; + } + + write_resp = + (struct dh_rdma_reg_write_resp *)(rep_ptr + ZXDH_CHAN_REPS_LEN); + if (write_resp->status_code != 200) { + pr_err("[%s] response status invalid, statuc_code=0x%x\n", + __func__, write_resp->status_code); + kfree(recv_buffer); + return -EPROTO; + } + + kfree(recv_buffer); + return 0; +} + +int zxdh_mp_dtcm_para_get(struct zxdh_pci_f *rf, uint16_t mcode_type, + uint16_t para_id, uint32_t *outdata) +{ + int ret = 0; + uint8_t rep_valid = 0; + uint16_t rep_len = 0; + uint8_t *rep_ptr; + struct zxdh_mgr mgr = { 0 }; + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + struct zxdh_mp_dtcm_para_get_cmd get_cmd = { 0 }; + struct iidc_core_dev_info *cdev_info; + size_t recv_len; + void *recv_buffer; + struct dh_mp_dtcm_para_get_resp *get_resp; + u32 cnt = 0; + u32 cnt_num = ZXDH_BAR_MSG_RETRY_NUM; + + if (!rf || !outdata) + return -EINVAL; + if (rf->sc_dev.driver_load == false) + cnt_num = ZXDH_BAR_MSG_DEFAULT_NUM; + + cdev_info = (struct iidc_core_dev_info *)rf->cdev; + // query pcie id + mgr.pdev = cdev_info->pdev; + ret = dh_rdma_pf_pcie_id_get(&mgr); + if (ret) { + pr_err("[%s] get pf pcie_id failed, ret=%d\n", __func__, ret); + return -EINVAL; + } + + recv_len = ZXDH_CHAN_REPS_LEN + sizeof(struct dh_mp_dtcm_para_get_resp); + recv_buffer = (void *)kzalloc(recv_len, GFP_KERNEL); + if (!recv_buffer) + return -ENOMEM; + + // commnad preparation + get_cmd.op_code = RDMA_MP_DTCM_PARA_GET; + get_cmd.req.mcode_type = mcode_type; + get_cmd.req.para_id = para_id; + + // get message preparation + in.payload_addr = (void *)&get_cmd; + in.payload_len = sizeof(struct zxdh_mp_dtcm_para_get_cmd); + in.src = rf->ftype == 0 ? MSG_CHAN_END_PF : MSG_CHAN_END_VF; + in.dst = MSG_CHAN_END_RISC; + in.event_id = MODULE_RDMA; + in.virt_addr = (u64)(uintptr_t)cdev_info->hw_addr + ZXDH_BAR_CHAN_OFFSET; + in.src_pcieid = mgr.pcie_id; + + // resv buffer preparation + result.recv_buffer = recv_buffer; + result.buffer_len = recv_len; + + do { + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + if ((ret != ZXDH_BAR_ERR_TIME_OUT) && (ret != ZXDH_BAR_ERR_LOCK_FAILED)) { + break; + } + cnt++; + } while (cnt < cnt_num); + + if (ret) { + pr_err("[%s] message send failed, ret=%d cnt=%d\n", __func__, ret, cnt); + kfree(recv_buffer); + return -EPROTO; + } + + rep_ptr = (uint8_t *)recv_buffer; + rep_valid = *rep_ptr; + if (rep_valid != MSG_REP_VALID) { + pr_err("[%s] response message invalid, rep_valid=0x%x\n", + __func__, rep_valid); + kfree(recv_buffer); + return -EPROTO; + } + + rep_len = *(uint16_t *)(rep_ptr + MSG_REP_LEN_OFFSET); + if (rep_len != recv_len - ZXDH_CHAN_REPS_LEN) { + pr_err("[%s] response length invalid, rep_len=0x%x\n", __func__, + rep_len); + kfree(recv_buffer); + return -EPROTO; + } + + get_resp = (struct dh_mp_dtcm_para_get_resp *)(rep_ptr + + ZXDH_CHAN_REPS_LEN); + if (get_resp->status_code != 200) { + pr_err("[%s] response status invalid, statuc_code=0x%x\n", + __func__, get_resp->status_code); + kfree(recv_buffer); + return -EPROTO; + } + + pr_info("resp: para_id=%d val=%d\n", get_resp->para_id, get_resp->val); + + *outdata = get_resp->val; + kfree(recv_buffer); + return 0; +} + +int zxdh_mp_dtcm_para_set(struct zxdh_pci_f *rf, uint16_t mcode_type, + uint16_t para_id, uint32_t val) +{ + int ret = 0; + uint8_t rep_valid = 0; + uint16_t rep_len = 0; + uint8_t *rep_ptr; + struct zxdh_mgr mgr = { 0 }; + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + struct zxdh_mp_dtcm_para_set_cmd set_cmd = { 0 }; + struct iidc_core_dev_info *cdev_info; + size_t recv_len; + void *recv_buffer; + struct dh_mp_dtcm_para_set_resp *set_resp; + u32 cnt = 0; + u32 cnt_num = ZXDH_BAR_MSG_RETRY_NUM; + + if (!rf) + return -EINVAL; + if (rf->sc_dev.driver_load == false) + cnt_num = ZXDH_BAR_MSG_DEFAULT_NUM; + + cdev_info = (struct iidc_core_dev_info *)rf->cdev; + // query pcie id + mgr.pdev = cdev_info->pdev; + ret = dh_rdma_pf_pcie_id_get(&mgr); + if (ret) { + pr_err("[%s] get pf pcie_id failed, ret=%d\n", __func__, ret); + return -EINVAL; + } + + recv_len = ZXDH_CHAN_REPS_LEN + sizeof(struct dh_mp_dtcm_para_set_resp); + recv_buffer = (void *)kzalloc(recv_len, GFP_KERNEL); + if (!recv_buffer) + return -ENOMEM; + + // commnad preparation + set_cmd.op_code = RDMA_MP_DTCM_PARA_SET; + set_cmd.req.mcode_type = mcode_type; + set_cmd.req.para_id = para_id; + set_cmd.req.val = val; + + // get message preparation + in.payload_addr = (void *)&set_cmd; + in.payload_len = sizeof(struct zxdh_mp_dtcm_para_set_cmd); + in.src = rf->ftype == 0 ? MSG_CHAN_END_PF : MSG_CHAN_END_VF; + in.dst = MSG_CHAN_END_RISC; + in.event_id = MODULE_RDMA; + in.virt_addr = (u64)(uintptr_t)cdev_info->hw_addr + ZXDH_BAR_CHAN_OFFSET; + in.src_pcieid = mgr.pcie_id; + + // resv buffer preparation + result.recv_buffer = recv_buffer; + result.buffer_len = recv_len; + + do { + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + if ((ret != ZXDH_BAR_ERR_TIME_OUT) && (ret != ZXDH_BAR_ERR_LOCK_FAILED)) { + break; + } + cnt++; + } while (cnt < cnt_num); + if (ret) { + pr_err("[%s] message send failed, ret=%d cnt=%d\n", __func__, ret, cnt); + kfree(recv_buffer); + return -EPROTO; + } + + rep_ptr = (uint8_t *)recv_buffer; + rep_valid = *rep_ptr; + if (rep_valid != MSG_REP_VALID) { + pr_err("[%s] response message invalid, rep_valid=0x%x\n", + __func__, rep_valid); + kfree(recv_buffer); + return -EPROTO; + } + + rep_len = *(uint16_t *)(rep_ptr + MSG_REP_LEN_OFFSET); + if (rep_len != recv_len - ZXDH_CHAN_REPS_LEN) { + pr_err("[%s] response length invalid, rep_len=0x%x\n", __func__, + rep_len); + kfree(recv_buffer); + return -EPROTO; + } + + set_resp = (struct dh_mp_dtcm_para_set_resp *)(rep_ptr + + ZXDH_CHAN_REPS_LEN); + if (set_resp->status_code != 200) { + pr_err("[%s] response status invalid, statuc_code=0x%x\n", + __func__, set_resp->status_code); + kfree(recv_buffer); + return -EPROTO; + } + + pr_info("resp: para_id=%d\n", para_id); + kfree(recv_buffer); + + return 0; +} + +static void clean_bond_old_gid(struct ib_device *ibdev, struct net_device *primary_netdev) +{ + struct ib_port_attr attr; + const struct ib_gid_attr *gid_attr; + struct net_device *ndev; + int err; + int i; + err = ib_query_port(ibdev, 1, &attr); + if (err) + return; + + for (i = 0; i < attr.gid_tbl_len; i++) { + gid_attr = rdma_get_gid_attr(ibdev, 1, i); + if (IS_ERR(gid_attr)) + continue; +#ifndef IB_READ_GID_ATTRIBUTE_NETDEVICE_NOT_DEFINE + rcu_read_lock(); + ndev = rdma_read_gid_attr_ndev_rcu(gid_attr); + if (IS_ERR(ndev)) { + rcu_read_unlock(); + continue; + } + rcu_read_unlock(); +#else + ndev = gid_attr->ndev; +#endif + + if (ndev != NULL && ndev == primary_netdev) { + pr_info("[rdma_bond] clean bond old gid ndev name=%s primary_netdev name=%s i=%d", ndev->name, primary_netdev->name, i); + pr_info("[rdma_bond] gid.subnet_prefix=0x%llx, gid=%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x", gid_attr->gid.global.subnet_prefix, + gid_attr->gid.raw[0], gid_attr->gid.raw[1], gid_attr->gid.raw[2], gid_attr->gid.raw[3], + gid_attr->gid.raw[4], gid_attr->gid.raw[5], gid_attr->gid.raw[6], gid_attr->gid.raw[7], + gid_attr->gid.raw[8], gid_attr->gid.raw[9], gid_attr->gid.raw[10], gid_attr->gid.raw[11], + gid_attr->gid.raw[12], gid_attr->gid.raw[13], gid_attr->gid.raw[14], gid_attr->gid.raw[15]); + rdma_put_gid_attr(gid_attr); + } else if (ndev != NULL && primary_netdev != NULL) { + pr_info("[rdma_bond] ndev and primary_netdev not equal: ndev name=%s primary_netdev name=%s i=%d", ndev->name, primary_netdev->name, i); + } + rdma_put_gid_attr(gid_attr); + } +} + +int32_t switch_bound_master_netdev(struct net_device *primary_netdev, struct net_device *linux_bond_netdev, bool hb_enable) +{ + struct zxdh_device *iwdev = NULL; + struct ib_device *ibdev; + struct zxdh_pci_f *rf; + struct net_device *old_netdev; + struct net_device *new_netdev; +#ifdef NETDEV_TO_IBDEV_SUPPORT + int ret; +#endif + if (!primary_netdev || !linux_bond_netdev) { + pr_err("[rdma_bond] primary_netdev or linux_bond_netdev is NULL.\n"); + return -1; + } + + pr_info("[rdma_bond] primary:%s bond:%s hb_enable:%d\n", primary_netdev->name, linux_bond_netdev->name, hb_enable); + + if (hb_enable) { + old_netdev = primary_netdev; + new_netdev = linux_bond_netdev; + } else { + old_netdev = linux_bond_netdev; + new_netdev = primary_netdev; + } + + ibdev = ib_device_get_by_netdev(old_netdev, RDMA_DRIVER_ZXDH); + if (!ibdev) { + pr_err("[rdma_bond] get ib device by netdev failed.\n"); + return -1; + } + iwdev = to_iwdev(ibdev); + if (!iwdev) { + pr_err("[rdma_bond] ibdev to iwdev failed.\n"); + ib_device_put(ibdev); + return -1; + } + clean_bond_old_gid(ibdev, old_netdev); + + rf = iwdev->rf; + if (!rf) { + pr_err("[rdma_bond] rf is NULL\n"); + ib_device_put(ibdev); + return -1; + } + + if (rf->sc_dev.hw_attrs.self_health == true) { + pr_err("[rdma_bond] self_health is true\n"); + ib_device_put(ibdev); + return -1; + } + + pr_info("[rdma_bond] %s ==> %s\n", old_netdev->name, new_netdev->name); +#ifdef NETDEV_TO_IBDEV_SUPPORT + pr_info("[rdma_bond] NETDEV_TO_IBDEV_SUPPORT is defined\n"); + ret = ib_device_set_netdev(ibdev, new_netdev, 1); + if (ret) { + pr_err("[rdma_bond] ib device set netdev error, ret=%d\n", ret); + ib_device_put(ibdev); + return -1; + } + iwdev->netdev = new_netdev; +#else + pr_info("[rdma_bond] NETDEV_TO_IBDEV_SUPPORT is not defined\n"); + iwdev->netdev = new_netdev; +#endif + rdma_roce_rescan_device(ibdev); + + zxdh_update_dpp_mac_tbl(iwdev, iwdev->rf->cdev); + pr_info("[rdma_bond] update dpp mac tbl\n"); + create_debugfs_default_entry(rf, ZRDMA_DEBUGFS_MODE_BOND); + ib_device_put(ibdev); + return 0; +} + +/*** + * @brief Set the rdma speed to firmware, triggering speed reconfiguration. + * + * @param netdev zxdh_net device + * @param bps for speed + * @param speed_valid boolean + * @return int32_t + */ +int32_t set_rdma_firmware_speed(struct net_device *netdev, uint32_t bps) +{ + int ret = 0; + uint32_t status_code = 0; + struct zxdh_device *iwdev; + struct ib_device *ibdev; + struct zxdh_pci_f *rf; + struct zxdh_hwbond_speed_set_cmd cmd = { 0 }; + struct rdma_chan_msg_para para = { 0 }; + + pr_info("[rdma_bond] new speed: %d\n", bps); + ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_ZXDH); + if (!ibdev) + return -1; + iwdev = to_iwdev(ibdev); + if (!iwdev) { + ib_device_put(ibdev); + return -1; + } + rf = iwdev->rf; + if (!rf) { + ib_device_put(ibdev); + return -1; + } + + if (rf->sc_dev.hw_attrs.self_health == true) { + ib_device_put(ibdev); + return -1; + } + + cmd.op_code = RDMA_HWBOND_SPEED_SET; + cmd.req.speed = bps; + cmd.req.speed_valid = TRUE; + + para.in_buf = (uint8_t *)&cmd; + para.in_size = sizeof(cmd); + para.out_buf = (uint8_t *)&status_code; + para.out_size = sizeof(status_code); + + ret = rdma_chan_msg_send(rf, ¶); + if (ret) { + pr_info("[%s] send msg failed, ret:%d", __func__, ret); + ib_device_put(ibdev); + return -1; + } + + if (status_code != 200) { + pr_info("[%s] status code not ok, status:%d", __func__, + status_code); + ib_device_put(ibdev); + return -1; + } + + ib_device_put(ibdev); + return 0; +} + +int set_rdma_vf_num(struct zxdh_rdma_sriov_event_info *sriov_info, u64 *vf_pblem_cnt) +{ + int ret = 0; + uint8_t *rep_ptr; + uint16_t rep_len = 0; + uint8_t rep_valid = 0; + size_t recv_len = 0; + void *recv_buffer = NULL; + + struct zxdh_rdma_vf_num_set_cmd cmd = { 0 }; + struct zxdh_mgr mgr = { 0 }; + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + struct dh_rdma_vf_num_set_resp *resp; + u32 cnt = 0; + u32 cnt_num = ZXDH_BAR_MSG_RETRY_NUM; + + mgr.pdev = sriov_info->pdev; + ret = dh_rdma_pf_pcie_id_get(&mgr); + if (ret) { + pr_err("[%s] get pf pcie_id failed, ret=%d\n", __func__, ret); + return -EINVAL; + } + + recv_len = ZXDH_CHAN_REPS_LEN + sizeof(struct dh_rdma_vf_num_set_resp); + recv_buffer = (void *)kzalloc(recv_len, GFP_KERNEL); + if (!recv_buffer) + return -ENOMEM; + + cmd.op_code = RDMA_VFS_NUM_SET; + cmd.req.ep_id = (sriov_info->vport_id >> 12) & 0x7; + cmd.req.pf_id = (sriov_info->vport_id >> 8) & 0x7; + cmd.req.num_vfs = sriov_info->num_vfs; + + in.payload_addr = (void *)&cmd; + in.payload_len = sizeof(cmd); + + in.src = MSG_CHAN_END_PF; + in.dst = MSG_CHAN_END_RISC; + in.event_id = MODULE_RDMA; + in.virt_addr = sriov_info->bar0_virt_addr + ZXDH_BAR_CHAN_OFFSET; + in.src_pcieid = mgr.pcie_id; + + // resv buffer preparation + result.recv_buffer = recv_buffer; + result.buffer_len = recv_len; + + do { + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + if ((ret != ZXDH_BAR_ERR_TIME_OUT) && (ret != ZXDH_BAR_ERR_LOCK_FAILED)) { + break; + } + cnt++; + } while (cnt < cnt_num); + + if (ret) { + pr_err("[%s] message send failed, ret=%d cnt=%d\n", __func__, ret, cnt); + kfree(recv_buffer); + return -EPROTO; + } + + rep_ptr = (uint8_t *)recv_buffer; + rep_valid = *rep_ptr; + if (rep_valid != MSG_REP_VALID) { + pr_err("[%s] response message invalid, rep_valid=0x%x\n", + __func__, rep_valid); + kfree(recv_buffer); + return -EPROTO; + } + + rep_len = *(uint16_t *)(rep_ptr + MSG_REP_LEN_OFFSET); + if (rep_len != recv_len - ZXDH_CHAN_REPS_LEN) { + pr_err("[%s] response length invalid, rep_len=0x%x\n", __func__, + rep_len); + kfree(recv_buffer); + return -EPROTO; + } + + resp = (struct dh_rdma_vf_num_set_resp *)(rep_ptr + ZXDH_CHAN_REPS_LEN); + if (resp->status_code != 200) { + pr_info("[%s] status code not ok, status:%d", __func__, + resp->status_code); + return -EPROTO; + } + + *vf_pblem_cnt = resp->vf_pblem_cnt; + kfree(recv_buffer); + return 0; +} + +static int zxdh_data_check_sum(u8 *buf, u8 check_sum, u32 buf_len) +{ + u8 sum = 0; + u32 i; + + if (buf == NULL) + return -ENOMEM; + for (i = 0; i < buf_len; i++) + sum += buf[i]; + + if (sum != check_sum) + return -EINVAL; + + return 0; +} + +static int zxdh_resp_msg_check(u8 *buf, u32 buf_len) +{ + u32 len; + + if (buf == NULL) + return -ENOMEM; + if ((buf_len > ZXDH_RESP_MSG_LEN) || (buf_len < ZXDH_MSG_MIN_LEN)) + return -ERANGE; + if (buf[0] != ZXDH_VER_HEADER_H) + return -EINVAL; + if (buf[1] != ZXDH_VER_HEADER_L) + return -EINVAL; + + len = buf[2] + 3; + return zxdh_data_check_sum(&buf[3], buf[len], buf[2]); +} + +int zxdh_req_cmd_ver(struct zxdh_pci_f *rf) +{ + int ret = 0; + uint8_t rep_valid = 0; + uint16_t rep_len = 0; + uint32_t msg_len = 0; + uint8_t *rep_ptr; + struct zxdh_mgr mgr = { 0 }; + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + struct iidc_core_dev_info *cdev_info; + struct zxdh_req_msg req_msg = { 0 }; + struct zxdh_resp_msg *resp_msg; + size_t recv_len; + void *recv_buffer; + u32 cnt = 0; + u32 cnt_num = ZXDH_BAR_MSG_RETRY_NUM; + + if (!rf) + return -ENOMEM; + if (rf->sc_dev.driver_load == false) + cnt_num = ZXDH_BAR_MSG_DEFAULT_NUM; + rf->sc_dev.flr_query = 0; + /*The default version number is 0, and subsequent modified versions will be incremented by 1. + The storage location of the version number is defined by oneself.*/ + memset(rf->ver_buf, 0, ZXDH_RDMA_VER_LEN * sizeof(u8)); + cdev_info = (struct iidc_core_dev_info *)rf->cdev; + + // query pcie id + mgr.pdev = cdev_info->pdev; + ret = dh_rdma_pf_pcie_id_get(&mgr); + if (ret) { + pr_err("[%s] get pf pcie_id failed, ret=%d\n", __func__, ret); + return -EINVAL; + } + + recv_len = ZXDH_CHAN_REPS_LEN + sizeof(struct zxdh_resp_msg); + recv_buffer = (void *)kzalloc(recv_len, GFP_KERNEL); + if (!recv_buffer) + return -ENOMEM; + + // commnad preparation + req_msg.op_code = RDMA_REQ_VER; + req_msg.buf[0] = ZXDH_VER_HEADER_H; + req_msg.buf[1] = ZXDH_VER_HEADER_L; + req_msg.buf[2] = 1; + req_msg.buf[3] = 1; + req_msg.buf[4] = 1; + + // get message preparation + in.payload_addr = (void *)&req_msg; + in.payload_len = sizeof(struct zxdh_req_msg); + in.src = rf->ftype == 0 ? MSG_CHAN_END_PF : MSG_CHAN_END_VF; + in.dst = MSG_CHAN_END_RISC; + in.event_id = MODULE_RDMA; + in.virt_addr = (u64)cdev_info->hw_addr + ZXDH_BAR_CHAN_OFFSET; + in.src_pcieid = mgr.pcie_id; + + // resv buffer preparation + result.recv_buffer = recv_buffer; + result.buffer_len = recv_len; + + do { + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + if ((ret != ZXDH_BAR_ERR_TIME_OUT) && (ret != ZXDH_BAR_ERR_LOCK_FAILED)) { + break; + } + cnt++; + } while (cnt < cnt_num); + + if (ret) { + pr_err("[%s] message send failed, ret=%d cnt=%d\n", __func__, ret, cnt); + kfree(recv_buffer); + return -EPROTO; + } + + rep_ptr = (uint8_t *)recv_buffer; + rep_valid = *rep_ptr; + if (rep_valid != MSG_REP_VALID) { + pr_err("[%s] response message invalid, rep_valid=0x%x\n", + __func__, rep_valid); + kfree(recv_buffer); + return -EPROTO; + } + + rep_len = *(uint16_t *)(rep_ptr + MSG_REP_LEN_OFFSET); + if (rep_len != recv_len - ZXDH_CHAN_REPS_LEN) { + pr_err("[%s] response length invalid, rep_len=0x%x\n", __func__, + rep_len); + kfree(recv_buffer); + return -ERANGE; + } + + resp_msg = (struct zxdh_resp_msg *)(rep_ptr + ZXDH_CHAN_REPS_LEN); + if (resp_msg->op_code != RDMA_RESP_VER) { + pr_err("[%s] response op code invalid, op_code=0x%x\n", + __func__, resp_msg->op_code); + kfree(recv_buffer); + return -EPROTO; + } + + msg_len = resp_msg->buf[2] + 4; + ret = zxdh_resp_msg_check(resp_msg->buf, msg_len); + if (ret == 0) { + pr_info("[%s] rdma get ver cfg success!\n", __func__); + if (resp_msg->buf[2] > ZXDH_RDMA_VER_LEN) + memcpy(rf->ver_buf, &resp_msg->buf[3], ZXDH_RDMA_VER_LEN * sizeof(u8)); + else + memcpy(rf->ver_buf, &resp_msg->buf[3], resp_msg->buf[2] * sizeof(u8)); + rf->sc_dev.flr_query = rf->ver_buf[0]; + } + kfree(recv_buffer); + + return ret; +} + +notify_remote_ip_update remote_ip_update_hook = NULL; +int register_remote_ip_event_handler(notify_remote_ip_update handler) +{ + if (!handler) { + pr_err("Failed to register: handler is NULL.\n"); + return -EINVAL; + } + + remote_ip_update_hook = handler; + pr_info("Event handler registered successfully.\n"); + return 0; +} +EXPORT_SYMBOL(register_remote_ip_event_handler); + +void unregister_remote_ip_event_handler(void) +{ + pr_info("double plane: unregister_remote_ip_event_handler\n"); + remote_ip_update_hook = NULL; +} +EXPORT_SYMBOL(unregister_remote_ip_event_handler); + +void rdma_update_remote_ip(struct zxdh_rdma_to_eth_ip_para *info) +{ + char s_straddr[INET6_ADDRSTRLEN + 20]; + char d_straddr[INET6_ADDRSTRLEN + 20]; + + if (info->ipv4 == true) { + sprintf(s_straddr, "%pI4", &info->src_ip[3]); + sprintf(d_straddr, "%pI4", &info->dst_ip[3]); + } else { + sprintf(s_straddr, "%pI6", info->src_ip); + sprintf(d_straddr, "%pI6", info->dst_ip); + } + + pr_info("%s[%d]: ipv4=%d, name=%s, s_straddr=%s, d_straddr=%s, src_ip=0x%x-0x%x-0x%x-0x%x, dst_ip=0x%x-0x%x-0x%x-0x%x, src_mac=0x%llx, dst_mac=0x%llx, mode=%d, linked_fid=0x%x\n", + __func__, __LINE__, info->ipv4, info->ifname, s_straddr, d_straddr, info->src_ip[0], info->src_ip[1], info->src_ip[2], info->src_ip[3], + info->dst_ip[0], info->dst_ip[1], info->dst_ip[2], info->dst_ip[3], info->src_mac, info->dst_mac, info->mode, info->linked_fid); + if (remote_ip_update_hook) + remote_ip_update_hook(info); +} diff --git a/drivers/infiniband/hw/zrdma/manager.h b/drivers/infiniband/hw/zrdma/manager.h new file mode 100644 index 000000000000..b05134c225c0 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/manager.h @@ -0,0 +1,481 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef _MGR_H +#define _MGR_H + +/* pcie function_id */ +#define VF_ID_START_BIT 0 +#define PF_ID_START_BIT 13 +#define BAR_NUM_START_BIT 20 +#define EP_ID_START_BIT 23 +#define FUNCTION_TYPE_START_BIT 27 +#define SCENE_CODE_START_BIT 28 + +#define VF_ID_OFFSET (PF_ID_START_BIT - VF_ID_START_BIT) // 13 +#define PF_ID_OFFSET (BAR_NUM_START_BIT - PF_ID_START_BIT) // 7 +#define BAR_NUM_OFFSET (EP_ID_START_BIT - BAR_NUM_START_BIT) // 3 +#define EP_ID_OFFSET (FUNCTION_TYPE_START_BIT - EP_ID_START_BIT) // 4 +#define FUNCTION_TYPE_OFFSET (SCENE_CODE_START_BIT - FUNCTION_TYPE_START_BIT) // 1 +#define SCENE_CODE_OFFSET (32 - SCENE_CODE_START_BIT) // 4 + +/* filed: VF_ID/PF_ID/BAR_NUM/EP_ID/FUNCTION_TYPE/SCENE_CODE */ +#define DH_FUNC_ID_EXTRACT(data, filed) \ + (((data & (~((1UL << filed##_START_BIT) - 1))) & \ + ((1UL << (filed##_START_BIT + filed##_OFFSET)) - 1)) >> \ + filed##_START_BIT) + +#define DH_FUNC_ID_GEN(type, ep, bar, pf, vf) \ + (((type & ((1 << FUNCTION_TYPE_OFFSET) - 1)) << FUNCTION_TYPE_START_BIT) | ((ep & ((1 << EP_ID_OFFSET) - 1)) << EP_ID_START_BIT) | ((bar & ((1 << BAR_NUM_OFFSET) - 1)) << BAR_NUM_START_BIT) | ((pf & ((1 << PF_ID_OFFSET) - 1)) << PF_ID_START_BIT) | ((vf & ((1 << VF_ID_OFFSET) - 1)))) + +/* bar msg status */ +#define BAR_MSG_STATUS_OK (200) +#define BAR_MSG_STATUS_REQ_ERR (400) +#define BAR_MSG_STATUS_RESP_ERR (500) + +/* Common configuration */ +#define ZXDH_PCI_CAP_COMMON_CFG 1 +/* Notifications */ +#define ZXDH_PCI_CAP_NOTIFY_CFG 2 +/* ISR access */ +#define ZXDH_PCI_CAP_ISR_CFG 3 +/* Device specific configuration */ +#define ZXDH_PCI_CAP_DEVICE_CFG 4 +/* PCI configuration access */ +#define ZXDH_PCI_CAP_PCI_CFG 5 + +#define ZXDH_ZF_EPID 4 +#define ZXDH_BAR_CHAN_OFFSET 0x2000 +#define ZXDH_CHAN_REPS_LEN 4 +#define MSG_REP_VALID 0xff +#define MSG_REP_LEN_OFFSET 1 + +// #define MSG_CHAN_END_PF 1 +// #define MSG_CHAN_END_VF 2 +// #define MSG_CHAN_END_RISC 3 + +#define MODULE_RDMA 4 + +#define RDMA_MGR_INIT (0) +#define RDMA_REG_READ (1) +#define RDMA_REG_WRITE (2) +#define RDMA_MP_DTCM_PARA_GET (3) +#define RDMA_MP_DTCM_PARA_SET (4) +#define RDMA_HWBOND_SPEED_SET (5) +#define RDMA_REQ_VER (6) +#define RDMA_RESP_VER (7) +#define GET_SRQ_L2D_ADDR (8) +#define RDMA_VFS_NUM_SET (9) + +#define ZXDH_REQ_MSG_LEN 15 +#define ZXDH_RESP_MSG_LEN 80 +#define ZXDH_MSG_MIN_LEN 5 +#define ZXDH_VER_HEADER_H 0xAA +#define ZXDH_VER_HEADER_L 0x55 + +#define RDMA_DEL_REMOTE_IP 0 +#define RDMA_ADD_REMOTE_IP 1 + +enum BAR_DRIVER_TYPE { + MSG_CHAN_END_MPF = 0, + MSG_CHAN_END_PF, + MSG_CHAN_END_VF, + MSG_CHAN_END_RISC, + MSG_CHAN_END_ERR, +}; + +struct zxdh_pci_bar_msg { + uint64_t virt_addr; /**< 4k空间地址, 若src为MPF该参数不生效>**/ + void *payload_addr; /**< 消息净荷地址>**/ + uint16_t payload_len; /**< 消息净荷长度>**/ + uint16_t emec; /**< 消息紧急类型>**/ + uint16_t src; /**< 消息发送源,参考BAR_DRIVER_TYPE>**/ + uint16_t dst; /**< 消息接收者,参考BAR_DRIVER_TYPE>**/ + uint32_t event_id; /**< 事件id>**/ + uint16_t src_pcieid; /**< 源 pcie_id>**/ + uint16_t dst_pcieid; /**< 目的pcie_id>**/ +}; + +struct zxdh_msg_recviver_mem { + void *recv_buffer; /**< 消息接收缓存>**/ + uint16_t buffer_len; /**< 消息缓存长度>**/ +}; + +/* This is the PCI capability header: */ +struct zxdh_pf_pci_cap { + __u8 cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */ + __u8 cap_next; /* Generic PCI field: next ptr. */ + __u8 cap_len; /* Generic PCI field: capability length */ + __u8 cfg_type; /* Identifies the structure. */ + __u8 bar; /* Where to find it. */ + __u8 id; /* Multiple capabilities of the same type */ + __u8 padding[2]; /* Pad to full dword. */ + __le32 offset; /* Offset within bar. */ + __le32 length; /* Length of the structure, in bytes. */ +}; + +struct dh_rdma_board_glb_cfg { + u32 cqp_size; //cqp队列深度 + u32 qp_size; //qp队列深度 + u32 cq_size; //cq队列深度 + u32 ceq_size; //ceq队列深度 + u32 srq_size; //srq队列深度 + u32 wr_cnt; //workrequest个数 + u32 sge_cnt; //sge个数 +}; + +struct dh_rdma_vf_param { + u32 vf_id; + u32 vf_vhca_id; + u32 pf_id; + u32 pf_vhca_id; + + u32 vf_bar_offset; //bar空间地址偏移 + + u32 qp_cnt; // 每个vhca的QP数 + u32 cq_cnt; // 每个vhca的CQ数 + u32 srq_cnt; // 每个vhca的SRQ数 + u32 ceq_cnt; // 每个vhca的CEQ数 + u32 ah_cnt; // 每个vhca的AH数 + + u32 qp_id_min; // QP最小队列编号 + u32 cq_id_min; // CQ最小队列编号 + u32 ceq_id_min; // CEQ最小队列编号 + u32 srq_id_min; // SRQ最小队列编号 +}; + +//pf需要知道自己实际能用的最大队列,已经需要分配的最大队列数(包括vf需要用的队列) +struct dh_rdma_pf_param { + u8 pf_id; // 当前EP下该PF的ID + u32 max_vf_num; // 当前PF下VF的最大数量 + u8 sid; // PF的SID(0~31) + //u8 has_vf; // 判断该PF有没有VF,没有为0,有为1 + u32 vhca_id; // vhca ID + + u32 pf_bar_offset; //bar空间地址偏移 + + u32 qp_cnt; // 每个vhca的QP数 + u32 cq_cnt; // 每个vhca的CQ数 + u32 srq_cnt; // 每个vhca的SRQ数 + u32 ceq_cnt; // 每个vhca的CEQ数 + //u32 aeq_cnt; // 每个vhca的AEQ数 + u32 ah_cnt; // 每个vhca的AH数 + + u32 qp_id_min; // QP最小队列编号 + u32 cq_id_min; // CQ最小队列编号 + u32 ceq_id_min; // CEQ最小队列编号 + u32 srq_id_min; // SRQ最小队列编号 + + u32 assign_qp_cnt; //pf分配使用的QP数 + u32 assign_cq_cnt; //pf分配使用的CQ数 + u32 assign_ceq_cnt; //pf分配使用的CEQ数 + u32 assign_srq_cnt; //pf分配使用的SRQ数 + + u32 qp_size; // QP队列深度 + u32 cq_size; // CQ队列深度 + u32 ceq_size; // CEQ队列深度 + u32 aeq_size; // AEQ队列深度 + u32 srq_size; // SRQ队列深度 +}; + +struct zxdh_mgr_par { + u16 ftype; + u16 ep_id; + u16 pf_id; + u16 vf_id; + u32 bar_offset; + u32 l2d_smmu_l2_offset; + u64 l2d_smmu_addr; + u64 nof_ioq_ddr_addr; + + u16 vhca_id; + u16 vhca_id_pf; + u32 max_vf_num; + + u32 qp_cnt; + u32 cq_cnt; + u32 srq_cnt; + u32 ceq_cnt; + u32 ah_cnt; + u32 mr_cnt; + u32 pbleq_cnt; + u32 pblem_cnt; + + u32 vf_qp_cnt; + u32 vf_cq_cnt; + u32 vf_srq_cnt; + u32 vf_ceq_cnt; + u32 vf_ah_cnt; + u32 vf_mr_cnt; + u32 vf_pbleq_cnt; + u32 vf_pblem_cnt; + + u32 base_qpn; + u32 base_cqn; + u32 base_srqn; + u32 base_ceqn; + + u64 pf_hmc_size; + u64 qp_hmc_base; + u64 cq_hmc_base; + u64 srq_hmc_base; + u64 txwindow_hmc_base; + u64 ird_hmc_base; + u64 ah_hmc_base; + u64 mr_hmc_base; + u64 pbleq_hmc_base; + u64 pblem_hmc_base; + + u8 hmc_sid; + u8 hmc_use_dpu_ddr; + u8 np_mode_low_lat; + u8 mcode_type; + u8 chip_version; + + u32 max_hw_read_sges; + u32 max_hw_wq_frags; + u32 dh_total_vhca; + u16 vhca_gqp_start; + u16 vhca_gqp_cnt; + u16 vhca_8k_index_start; + u16 vhca_8k_index_cnt; + u16 vhca_ud_gqp; + u16 vhca_ud_8k_index; +} __attribute__((__packed__)); + +struct zxdh_chan_msg { + u32 msg_len; + void *msg; +}; + +enum chan_cmd_type { + GET_PF_PARAM = 1, + GET_VF_PARAM = 2, +}; + +struct zxdh_mgr_msg { + u32 op_code; + u8 ep_id; + u8 pf_id; + u16 vport_vf_id; + u8 ftype; // 判断为vf,0是pf, 1是vf + u8 rsv[3]; +}; + +struct zxdh_mgr { + //struct irdma_device *iwdev; + struct pci_dev *pdev; + u32 pf_id; + u32 vport_vf_id; + u32 ep_id; + u8 ftype; // 判断为vf,0是pf, 1是vf + u16 pcie_id; + u16 device_id; + u8 __iomem *pci_hw_addr; + struct zxdh_mgr_par param; +}; + +enum e_dtcm_para_id_dcqcn { + E_PARA_DCQCN_RPG_TIME_RESET, + E_PARA_DCQCN_CLAMP_TGT_RAGE, + E_PARA_DCQCN_CLAMP_TGT_RATE_AFTER_TIME_INC, + E_PARA_DCQCN_DCE_TCP_RTT, + E_PARA_DCQCN_DCE_TCP_G, + E_PARA_DCQCN_RPG_GD, + E_PARA_DCQCN_INITIAL_ALPHA_VALUE, + E_PARA_DCQCN_MIN_DEC_FAC, + E_PARA_DCQCN_RPG_THRESHOLD, + E_PARA_DCQCN_RPG_RATIO_INCREASE, + E_PARA_DCQCN_RPG_AI_RATIO, + E_PARA_DCQCN_RPG_HAI_RATIO, + E_PARA_DCQCN_NUM +}; + +enum e_dtcm_para_id_rtt { + E_PARA_RTT_ALPHA, + E_PARA_RTT_TLOW, + E_PARA_RTT_THIGH, + E_PARA_RTT_MINRTT, + E_PARA_RTT_BETA, + E_PARA_RTT_AI_NUM, + E_PARA_RTT_THRED_GRADIENT, + E_PARA_RTT_HAI_N, + E_PARA_RTT_AI_N, + E_PARA_RTT_NUM +}; + +struct rdma_chan_msg_para { + uint8_t *in_buf; + uint8_t *out_buf; + size_t in_size; + size_t out_size; +}; + +struct dh_rdma_reg_read_req { + u64 phy_addr; + u32 reg_num; +} __attribute__((__packed__)); + +struct dh_rdma_reg_read_resp { + u64 phy_addr; + u32 reg_num; + u32 status_code; + u32 data[]; +} __attribute__((__packed__)); + +struct dh_rdma_reg_write_req { + u64 phy_addr; + u32 reg_num; + u32 data[]; +} __attribute__((__packed__)); + +struct dh_rdma_reg_write_resp { + u64 phy_addr; + u32 reg_num; + u32 status_code; +} __attribute__((__packed__)); + +// mp dtcm para set/get messages +struct dh_mp_dtcm_para_set_req { + u16 mcode_type; + u16 para_id; + u32 val; +} __attribute__((__packed__)); + +struct dh_mp_dtcm_para_set_resp { + u16 para_id; + u32 status_code; +} __attribute__((__packed__)); + +struct dh_mp_dtcm_para_get_req { + u16 mcode_type; + u16 para_id; +} __attribute__((__packed__)); + +struct dh_mp_dtcm_para_get_resp { + u16 para_id; + u32 status_code; + u32 val; +} __attribute__((__packed__)); + +struct dh_rdma_vf_num_set_resp { + u64 vf_pblem_cnt; + u32 status_code; +} __attribute__((__packed__)); + +struct dh_hwbond_speed_set_req { + u32 speed; + u8 speed_valid; +} __attribute__((__packed__)); + +struct dh_rdma_vf_num_set_req { + u16 ep_id; + u16 pf_id; + u16 num_vfs; +} __attribute__((__packed__)); + +// channel message struct +struct zxdh_reg_read_cmd { + u32 op_code; + struct dh_rdma_reg_read_req req; +} __attribute__((__packed__)); +struct zxdh_reg_write_cmd { + u32 op_code; + struct dh_rdma_reg_write_req req; +} __attribute__((__packed__)); + +struct zxdh_mp_dtcm_para_get_cmd { + u32 op_code; + struct dh_mp_dtcm_para_get_req req; +} __attribute__((__packed__)); +struct zxdh_mp_dtcm_para_set_cmd { + u32 op_code; + struct dh_mp_dtcm_para_set_req req; +} __attribute__((__packed__)); + +struct zxdh_hwbond_speed_set_cmd { + u32 op_code; + struct dh_hwbond_speed_set_req req; +} __attribute__((__packed__)); + +struct zxdh_rdma_vf_num_set_cmd { + u32 op_code; + struct dh_rdma_vf_num_set_req req; +} __attribute__((__packed__)); + +struct zxdh_req_msg { + u8 op_code; + u8 buf[ZXDH_REQ_MSG_LEN]; +} __attribute__((__packed__)); + +struct zxdh_resp_msg { + u8 op_code; + u8 buf[ZXDH_RESP_MSG_LEN]; +} __attribute__((__packed__)); + +struct zxdh_rdma_to_eth_ip_para { + char *ifname; + u32 src_ip[4]; + u32 dst_ip[4]; + u64 src_mac; + u64 dst_mac; + u32 linked_fid; + u8 ipv4 : 1; + u8 mode : 1; +}; + +struct dh_get_srq_l2d_addr_req { + u32 op_code; + u32 function_id; +} __attribute__((__packed__)); + +struct dh_get_srq_l2d_addr_resp { + u64 srq_l2d_paddr; + u32 srq_l2d_size; + u32 rdma_ext_bar_offset; + u32 status_code; +} __attribute__((__packed__)); + +struct zxdh_rdma_sriov_event_info { + struct pci_dev *pdev; + uint64_t bar0_virt_addr; + uint16_t vport_id; + uint16_t num_vfs; +}; + +typedef void (*notify_remote_ip_update)(struct zxdh_rdma_to_eth_ip_para *info); + +int rdma_chan_msg_send(struct zxdh_pci_f *rf, struct rdma_chan_msg_para *para); + +int zxdh_bar_chan_sync_msg_send(struct zxdh_pci_bar_msg *in, + struct zxdh_msg_recviver_mem *result); +int zxdh_chan_sync_send(struct zxdh_mgr *pmgr, struct zxdh_chan_msg *pmsg, + u32 *pdata, u32 rep_len); +int zxdh_mgr_par_get(struct zxdh_mgr *dh_mgr); + +int zxdh_rdma_reg_read(struct zxdh_pci_f *rf, uint64_t phy_addr, + uint32_t *outdata); + +int zxdh_rdma_regs_read(struct zxdh_pci_f *rf, uint64_t phy_addr, + uint32_t *outdata, uint32_t num); + +int zxdh_rdma_reg_write(struct zxdh_pci_f *rf, uint64_t phy_addr, uint32_t val); + +int zxdh_mp_dtcm_para_get(struct zxdh_pci_f *rf, uint16_t mcode_type, + uint16_t para_id, uint32_t *outdata); + +int zxdh_mp_dtcm_para_set(struct zxdh_pci_f *rf, uint16_t mcode_type, + uint16_t para_id, uint32_t val); + +int dh_rdma_pf_pcie_id_get(struct zxdh_mgr *mgr); + +// callback installed to net +int32_t switch_bound_master_netdev(struct net_device *primary_netdev, struct net_device *linux_bond_netdev, bool hb_enable); +int32_t set_rdma_firmware_speed(struct net_device *netdev, uint32_t bps); +// int32_t set_rdma_hwbond_status(struct net_device *netdev, bool hb_enable); +int zxdh_req_cmd_ver(struct zxdh_pci_f *rf); +int register_remote_ip_event_handler(notify_remote_ip_update handler); +void unregister_remote_ip_event_handler(void); +void rdma_update_remote_ip(struct zxdh_rdma_to_eth_ip_para *info); +int set_rdma_vf_num(struct zxdh_rdma_sriov_event_info *sriov_info, u64 *vf_pblem_cnt); +#endif diff --git a/drivers/infiniband/hw/zrdma/osdep.h b/drivers/infiniband/hw/zrdma/osdep.h new file mode 100644 index 000000000000..ccda6d220656 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/osdep.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_OSDEP_H +#define ZXDH_OSDEP_H + +#include +#include +#include +#include +#include +#if defined(__OFED_4_8__) +#define refcount_t atomic_t +#define refcount_inc atomic_inc +#define refcount_dec_and_test atomic_dec_and_test +#define refcount_set atomic_set +#else +#include +#endif /* OFED_4_8 */ +#define STATS_TIMER_DELAY 60000 + +/* + * See include/linux/compiler_attributes.h in kernel >=5.4 for fallthrough. + * This code really should be in zxdh_kcompat.h but to cover shared code + * it had to be here. + * The two #if checks implements fallthrough definition for kernels < 5.4 + * The first check is for new compiler, GCC >= 5.0. If code in compiler_attributes.h + * is not invoked and compiler supports __has_attribute. + * If fallthrough is not defined after the first check, the second check against fallthrough + * will define the macro for the older compiler. + */ +#if !defined(fallthrough) && !defined(__GCC4_has_attribute___noclone__) && \ + defined(__has_attribute) +#define fallthrough __attribute__((__fallthrough__)) +#endif +#ifndef fallthrough +#define fallthrough \ + do { \ + } while (0) +#endif +#define idev_to_dev(ptr) (((ptr)->hw->device)) +#ifndef ibdev_dbg +#define zxdh_dbg(idev, fmt, ...) dev_dbg(idev_to_dev(idev), fmt, ##__VA_ARGS__) +#define ibdev_err(ibdev, fmt, ...) dev_err(&((ibdev)->dev), fmt, ##__VA_ARGS__) +#define ibdev_warn(ibdev, fmt, ...) \ + dev_warn(&((ibdev)->dev), fmt, ##__VA_ARGS__) +#define ibdev_info(ibdev, fmt, ...) \ + dev_info(&((ibdev)->dev), fmt, ##__VA_ARGS__) +#define ibdev_notice(ibdev, fmt, ...) \ + dev_notice(&((ibdev)->dev), fmt, ##__VA_ARGS__) +#else +#define zxdh_dbg(idev, fmt, ...) \ + do { \ + struct ib_device *ibdev = zxdh_get_ibdev(idev); \ + if (ibdev) \ + ibdev_dbg(ibdev, fmt, ##__VA_ARGS__); \ + else \ + dev_dbg(idev_to_dev(idev), fmt, ##__VA_ARGS__); \ + } while (0) +#endif + +struct zxdh_dma_info { + dma_addr_t *dmaaddrs; +}; + +struct zxdh_dma_mem { + void *va; + dma_addr_t pa; + u32 size; +} __packed; + +struct zxdh_virt_mem { + void *va; + u32 size; +} __packed; + +struct zxdh_sc_vsi; +struct zxdh_sc_dev; +struct zxdh_sc_qp; +struct zxdh_puda_buf; +struct zxdh_puda_cmpl_info; +struct zxdh_update_sds_info; +struct zxdh_hmc_fcn_info; +struct zxdh_manage_vf_pble_info; +struct zxdh_hw; +struct zxdh_pci_f; +struct zxdh_virtchnl_req; + +#if defined(__OFED_4_8__) +/* Special handling for 7.2/OFED. The GENMASK macros need to be updated */ +#undef GENMASK +#define GENMASK(h, l) \ + (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) +#undef GENMASK_ULL +#define GENMASK_ULL(h, l) \ + (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) +#endif + +struct ib_device *zxdh_get_ibdev(struct zxdh_sc_dev *dev); +void *zxdh_remove_cqp_head(struct zxdh_sc_dev *dev); +void zxdh_terminate_del_timer(struct zxdh_sc_qp *qp); +void zxdh_hw_stats_start_timer(struct zxdh_sc_vsi *vsi); +void zxdh_hw_stats_stop_timer(struct zxdh_sc_vsi *vsi); +void wr32(struct zxdh_hw *hw, u32 reg, u32 val); +u32 rd32(struct zxdh_hw *hw, u32 reg); +u64 rd64(struct zxdh_hw *hw, u32 reg); +int zxdh_map_vm_page_list(struct zxdh_hw *hw, void *va, dma_addr_t *pg_dma, + u32 pg_cnt); +void zxdh_unmap_vm_page_list(struct zxdh_hw *hw, dma_addr_t *pg_dma, + u32 pg_cnt); +#endif /* ZXDH_OSDEP_H */ diff --git a/drivers/infiniband/hw/zrdma/pble.c b/drivers/infiniband/hw/zrdma/pble.c new file mode 100644 index 000000000000..bdc11cb244ab --- /dev/null +++ b/drivers/infiniband/hw/zrdma/pble.c @@ -0,0 +1,466 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include "osdep.h" +#include "status.h" +#include "hmc.h" +#include "defs.h" +#include "type.h" +#include "protos.h" +#include "vf.h" +#include "virtchnl.h" +#include "pble.h" +#include "main.h" + +static int add_pble_prm(struct zxdh_hmc_pble_rsrc *pble_rsrc); + +/** + * zxdh_destroy_pble_prm - destroy prm during module unload + * @pble_rsrc: pble resources + */ +void zxdh_destroy_pble_prm(struct zxdh_hmc_pble_rsrc *pble_rsrc) +{ + struct zxdh_chunk *chunk; + struct zxdh_pble_prm *pinfo = &pble_rsrc->pinfo; + + while (!list_empty(&pinfo->clist)) { + chunk = (struct zxdh_chunk *)pinfo->clist.next; + list_del(&chunk->list); + if (chunk->type == PBLE_SD_PAGED) + zxdh_pble_free_paged_mem(chunk); + if (chunk->bitmapbuf) + kfree(chunk->bitmapmem.va); + kfree(chunk->chunkmem.va); + } +} + +/** + * zxdh_hmc_init_pble - Initialize pble resources during module load + * @dev: zxdh_sc_dev struct + * @pble_rsrc: pble resources + * @mr: Queue or Memory area + */ +int zxdh_hmc_init_pble(struct zxdh_sc_dev *dev, + struct zxdh_hmc_pble_rsrc *pble_rsrc, int mr) +{ + struct zxdh_hmc_info *hmc_info; + int status = 0; + + hmc_info = dev->hmc_info; + pble_rsrc->dev = dev; + pble_rsrc->pble_copy = true; + pble_rsrc->pble_type = mr; + + if (mr == PBLE_QUEUE) { + pble_rsrc->unallocated_pble = + hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE].cnt; + } else { + pble_rsrc->unallocated_pble = + hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE_MR].cnt; + } + + pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr; + + pble_rsrc->pinfo.pble_shift = PBLE_SHIFT; + + mutex_init(&pble_rsrc->pble_mutex_lock); + + spin_lock_init(&pble_rsrc->pinfo.prm_lock); + INIT_LIST_HEAD(&pble_rsrc->pinfo.clist); + if (add_pble_prm(pble_rsrc)) { + zxdh_destroy_pble_prm(pble_rsrc); + status = -ENOMEM; + } + + return status; +} + +/** + * add_sd_direct - add sd direct for pble + * @pble_rsrc: pble resource ptr + * @info: page info for sd + */ +static int add_sd_direct(struct zxdh_hmc_pble_rsrc *pble_rsrc, + struct zxdh_add_page_info *info) +{ + int ret_code = 0; + struct sd_pd_idx *idx = &info->idx; + struct zxdh_chunk *chunk = info->chunk; + struct zxdh_hmc_info *hmc_info = info->hmc_info; + struct zxdh_hmc_sd_entry *sd_entry = info->sd_entry; + u32 offset = 0; + struct zxdh_pci_f *rf = (struct zxdh_pci_f *)container_of( + pble_rsrc->dev, struct zxdh_pci_f, sc_dev); + + if (rf->ftype == 1) { + if (pble_rsrc->pble_type == PBLE_QUEUE) { + if (!sd_entry->valid && + (hmc_info->pble_hmc_index < + hmc_info->hmc_first_entry_pble_mr)) { + ret_code = zxdh_vf_add_pble_hmc_obj( + pble_rsrc->dev, hmc_info, pble_rsrc, + info->pages); + } + } else { + if (!sd_entry->valid && + (hmc_info->pble_mr_hmc_index < + hmc_info->hmc_entry_total + 1)) { + ret_code = zxdh_vf_add_pble_hmc_obj( + pble_rsrc->dev, hmc_info, pble_rsrc, + info->pages); + } + } + } else { + if (pble_rsrc->pble_type == PBLE_QUEUE) { + if (!sd_entry->valid && + (hmc_info->pble_hmc_index < + hmc_info->hmc_first_entry_pble_mr)) { + ret_code = zxdh_add_pble_hmc_obj( + hmc_info, pble_rsrc, info->pages); + } + } else { + if (!sd_entry->valid && + (hmc_info->pble_mr_hmc_index < + hmc_info->hmc_entry_total + 1)) { + ret_code = zxdh_add_pble_hmc_obj( + hmc_info, pble_rsrc, info->pages); + } + } + } + + if (ret_code) + return ret_code; + + chunk->type = PBLE_SD_CONTIGOUS; + + offset = idx->rel_pd_idx << HMC_PAGED_BP_SHIFT; + chunk->size = info->pages << HMC_PAGED_BP_SHIFT; + + chunk->vaddr = sd_entry->u.bp.addr.va + offset; + chunk->pa = sd_entry->u.bp.addr.pa + offset; // + chunk->fpm_addr = pble_rsrc->next_fpm_addr; + + return 0; +} + +/** + * fpm_to_idx - given fpm address, get pble index + * @pble_rsrc: pble resource management + * @addr: fpm address for index + */ +static u32 fpm_to_idx(struct zxdh_hmc_pble_rsrc *pble_rsrc, u64 addr) +{ + u64 idx; + + idx = (addr - (pble_rsrc->fpm_base_addr)) >> 3; + + return (u32)idx; +} + +/** + * add_pble_prm - add a sd entry for pble resoure + * @pble_rsrc: pble resource management + */ +static int add_pble_prm(struct zxdh_hmc_pble_rsrc *pble_rsrc) +{ + struct zxdh_sc_dev *dev = pble_rsrc->dev; + struct zxdh_hmc_sd_entry *sd_entry; + struct zxdh_hmc_info *hmc_info; + struct zxdh_chunk *chunk; + struct zxdh_add_page_info info; + struct sd_pd_idx *idx = &info.idx; + int ret_code = 0; + struct zxdh_virt_mem chunkmem; + u32 pages; + + if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE) + return -ENOMEM; + + chunkmem.size = sizeof(*chunk); + chunkmem.va = kzalloc(chunkmem.size, GFP_KERNEL); + if (!chunkmem.va) + return -ENOMEM; + + chunk = chunkmem.va; + chunk->chunkmem = chunkmem; + hmc_info = dev->hmc_info; + chunk->dev = dev; + chunk->fpm_addr = pble_rsrc->next_fpm_addr; + + if (pble_rsrc->pble_type == PBLE_QUEUE) { + sd_entry = + &hmc_info->sd_table + .sd_entry[hmc_info->pble_hmc_index]; /* code */ + } else { + sd_entry = &hmc_info->sd_table + .sd_entry[hmc_info->pble_mr_hmc_index]; + } + + idx->pd_idx = + (u32)((pble_rsrc->next_fpm_addr - pble_rsrc->fpm_base_addr) / + ZXDH_HMC_PAGED_BP_SIZE); //4096 + idx->rel_pd_idx = (idx->pd_idx % ZXDH_HMC_PD_CNT_IN_SD); // 512 + pages = (idx->rel_pd_idx) ? (ZXDH_HMC_PD_CNT_IN_SD - idx->rel_pd_idx) : + ZXDH_HMC_PD_CNT_IN_SD; + + pages = (u32)min(pages, pble_rsrc->unallocated_pble >> + PBLE_512_SHIFT); // PBLE_512_SHIFT==9 + + info.chunk = chunk; + info.hmc_info = hmc_info; + info.pages = pages; + info.sd_entry = sd_entry; + + ret_code = add_sd_direct(pble_rsrc, &info); + + if (ret_code) + goto error; + + ret_code = zxdh_prm_add_pble_mem(&pble_rsrc->pinfo, chunk); + if (ret_code) + goto error; + + pble_rsrc->next_fpm_addr += chunk->size; + pble_rsrc->unallocated_pble -= (u32)(chunk->size >> 3); + + sd_entry->valid = true; + list_add(&chunk->list, &pble_rsrc->pinfo.clist); + + return 0; + +error: + if (chunk->bitmapbuf) + kfree(chunk->bitmapmem.va); + kfree(chunk->chunkmem.va); + + return ret_code; +} + +/** + * free_lvl2 - fee level 2 pble + * @pble_rsrc: pble resource management + * @palloc: level 2 pble allocation + */ +static void free_lvl2(struct zxdh_hmc_pble_rsrc *pble_rsrc, + struct zxdh_pble_alloc *palloc) +{ + u32 i; + struct zxdh_pble_level2 *lvl2 = &palloc->level2; + struct zxdh_pble_info *root = &lvl2->root; + struct zxdh_pble_info *leaf = lvl2->leaf; + + for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) { + if (leaf->addr) + zxdh_prm_return_pbles(&pble_rsrc->pinfo, + &leaf->chunkinfo); + else + break; + } + + if (root->addr) + zxdh_prm_return_pbles(&pble_rsrc->pinfo, &root->chunkinfo); + + vfree(lvl2->leafmem.va); + lvl2->leaf = NULL; + lvl2->leafmem.va = NULL; +} + +/** + * get_lvl2_pble - get level 2 pble resource + * @pble_rsrc: pble resource management + * @palloc: level 2 pble allocation + */ +static int get_lvl2_pble(struct zxdh_hmc_pble_rsrc *pble_rsrc, + struct zxdh_pble_alloc *palloc) +{ + u32 lf4k, lflast, total, i; + u32 pblcnt = PBLE_PER_PAGE; + u64 *addr; + struct zxdh_pble_level2 *lvl2 = &palloc->level2; + struct zxdh_pble_info *root = &lvl2->root; + struct zxdh_pble_info *leaf; + int ret_code; + u64 fpm_addr; + dma_addr_t paaddr; + + /* number of full 512 (4K) leafs) */ + lf4k = palloc->total_cnt >> 9; + lflast = palloc->total_cnt % PBLE_PER_PAGE; + total = (lflast == 0) ? lf4k : lf4k + 1; + lvl2->leaf_cnt = total; + + lvl2->leafmem.size = (sizeof(*leaf) * total); + lvl2->leafmem.va = vzalloc(lvl2->leafmem.size); + if (!lvl2->leafmem.va) { + pr_info("%s %d failed to alloc lvl2->leafmem size:0x%x \n", __func__, __LINE__, lvl2->leafmem.size); + return -ENOMEM; + } + + lvl2->leaf = lvl2->leafmem.va; + leaf = lvl2->leaf; + ret_code = zxdh_prm_get_pbles(&pble_rsrc->pinfo, &root->chunkinfo, + total << 3, &root->addr, &fpm_addr, + &paaddr); + if (ret_code) { + vfree(lvl2->leafmem.va); + lvl2->leaf = NULL; + pr_info("%s %d faile to get lvl2 pble\n", __func__, __LINE__); + return -ENOMEM; + } + + root->smmu_fpm_addr = fpm_addr; + root->pa = paaddr; + root->idx = fpm_to_idx(pble_rsrc, fpm_addr); + root->cnt = total; + addr = root->addr; + for (i = 0; i < total; i++, leaf++) { + pblcnt = (lflast && ((i + 1) == total)) ? lflast : + PBLE_PER_PAGE; + ret_code = zxdh_prm_get_pbles(&pble_rsrc->pinfo, + &leaf->chunkinfo, pblcnt << 3, + &leaf->addr, &fpm_addr, &paaddr); + if (ret_code) + goto error; + + leaf->idx = fpm_to_idx(pble_rsrc, fpm_addr); + leaf->smmu_fpm_addr = fpm_addr; + leaf->pa = paaddr; + leaf->cnt = pblcnt; + *addr = (u64)leaf->idx; + addr++; + } + + if (pble_rsrc->pble_copy) { + zxdh_cqp_config_pble_table_cmd(pble_rsrc->dev, root, total << 3, + pble_rsrc->pble_type); + } + + palloc->level = PBLE_LEVEL_2; + pble_rsrc->stats_lvl2++; + return 0; + +error: + free_lvl2(pble_rsrc, palloc); + + return -ENOMEM; +} + +/** + * get_lvl1_pble - get level 1 pble resource + * @pble_rsrc: pble resource management + * @palloc: level 1 pble allocation + */ +static int get_lvl1_pble(struct zxdh_hmc_pble_rsrc *pble_rsrc, + struct zxdh_pble_alloc *palloc) +{ + int ret_code; + u64 fpm_addr; + dma_addr_t paaddr; + struct zxdh_pble_info *lvl1 = &palloc->level1; + + ret_code = zxdh_prm_get_pbles(&pble_rsrc->pinfo, &lvl1->chunkinfo, + palloc->total_cnt << 3, &lvl1->addr, + &fpm_addr, &paaddr); + if (ret_code) + return -ENOMEM; + + palloc->level = PBLE_LEVEL_1; + lvl1->idx = fpm_to_idx(pble_rsrc, fpm_addr); + lvl1->cnt = palloc->total_cnt; + lvl1->smmu_fpm_addr = fpm_addr; + lvl1->pa = paaddr; + pble_rsrc->stats_lvl1++; + + return 0; +} + +/** + * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine + * @pble_rsrc: pble resources + * @palloc: contains all inforamtion regarding pble (idx + pble addr) + * @level1_only: flag for a level 1 PBLE + */ +static int get_lvl1_lvl2_pble(struct zxdh_hmc_pble_rsrc *pble_rsrc, + struct zxdh_pble_alloc *palloc, bool level1_only) +{ + int status = 0; + + status = get_lvl1_pble(pble_rsrc, palloc); + if (!status || level1_only || palloc->total_cnt <= PBLE_PER_PAGE) + return status; + + status = get_lvl2_pble(pble_rsrc, palloc); + + return status; +} + +/** + * zxdh_get_pble - allocate pbles from the prm + * @pble_rsrc: pble resources + * @palloc: contains all inforamtion regarding pble (idx + pble addr) + * @pble_cnt: #of pbles requested + * @level1_only: true if only pble level 1 to acquire + */ +int zxdh_get_pble(struct zxdh_hmc_pble_rsrc *pble_rsrc, + struct zxdh_pble_alloc *palloc, u32 pble_cnt, + bool level1_only) +{ + int status = 0; + int max_sds = 0; + int i; + + palloc->total_cnt = pble_cnt; + palloc->level = PBLE_LEVEL_0; + + mutex_lock(&pble_rsrc->pble_mutex_lock); + + /*check first to see if we can get pble's without acquiring + * additional sd's + */ + status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only); + if (!status) + goto exit; + + max_sds = (palloc->total_cnt >> 18) + 1; + for (i = 0; i < max_sds; i++) { + status = add_pble_prm(pble_rsrc); + if (status) { + pr_info("%s %d failed to add pble_chunck\n", __func__, __LINE__); + break; + } + + status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only); + /* if level1_only, only go through it once */ + if (!status || level1_only) + break; + } + +exit: + if (!status) { + pble_rsrc->allocdpbles += pble_cnt; + pble_rsrc->stats_alloc_ok++; + } else { + pble_rsrc->stats_alloc_fail++; + } + mutex_unlock(&pble_rsrc->pble_mutex_lock); + + return status; +} + +/** + * zxdh_free_pble - put pbles back into prm + * @pble_rsrc: pble resources + * @palloc: contains all information regarding pble resource being freed + */ +void zxdh_free_pble(struct zxdh_hmc_pble_rsrc *pble_rsrc, + struct zxdh_pble_alloc *palloc) +{ + pble_rsrc->freedpbles += palloc->total_cnt; + + if (palloc->level == PBLE_LEVEL_2) + free_lvl2(pble_rsrc, palloc); + else + zxdh_prm_return_pbles(&pble_rsrc->pinfo, + &palloc->level1.chunkinfo); + pble_rsrc->stats_alloc_freed++; +} diff --git a/drivers/infiniband/hw/zrdma/pble.h b/drivers/infiniband/hw/zrdma/pble.h new file mode 100644 index 000000000000..e04d03cdb3bf --- /dev/null +++ b/drivers/infiniband/hw/zrdma/pble.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_PBLE_H +#define ZXDH_PBLE_H + +#define PBLE_SHIFT 6 +#define PBLE_PER_PAGE 512 +#define HMC_PAGED_BP_SHIFT 12 +#define PBLE_512_SHIFT 9 +#define PBLE_INVALID_IDX 0xffffffff + +enum zxdh_pble_level { + PBLE_LEVEL_0 = 0, + PBLE_LEVEL_1 = 1, + PBLE_LEVEL_2 = 2, +}; + +enum zxdh_alloc_type { + PBLE_NO_ALLOC = 0, + PBLE_SD_CONTIGOUS = 1, + PBLE_SD_PAGED = 2, +}; + +struct zxdh_chunk; + +struct zxdh_pble_chunkinfo { + struct zxdh_chunk *pchunk; + u64 bit_idx; + u64 bits_used; +}; + +struct zxdh_pble_info { + u64 *addr; + dma_addr_t pa; + u64 smmu_fpm_addr; + u32 idx; + u32 cnt; + struct zxdh_pble_chunkinfo chunkinfo; + bool pble_copy; +}; + +struct zxdh_pble_level2 { + struct zxdh_pble_info root; + struct zxdh_pble_info *leaf; + struct zxdh_virt_mem leafmem; + u32 leaf_cnt; +}; + +struct zxdh_pble_alloc { + u32 total_cnt; + enum zxdh_pble_level level; + union { + struct zxdh_pble_info level1; + struct zxdh_pble_level2 level2; + }; +}; + +struct sd_pd_idx { + u32 sd_idx; + u32 pd_idx; + u32 rel_pd_idx; +}; + +struct zxdh_add_page_info { + struct zxdh_chunk *chunk; + struct zxdh_hmc_sd_entry *sd_entry; + struct zxdh_hmc_info *hmc_info; + struct sd_pd_idx idx; + u32 pages; +}; + +struct zxdh_chunk { + struct list_head list; + struct zxdh_dma_info dmainfo; + void *bitmapbuf; + + u32 sizeofbitmap; + u64 size; + void *vaddr; + dma_addr_t pa; + u64 fpm_addr; + u32 pg_cnt; + enum zxdh_alloc_type type; + struct zxdh_sc_dev *dev; + struct zxdh_virt_mem bitmapmem; + struct zxdh_virt_mem chunkmem; +}; + +struct zxdh_pble_prm { + struct list_head clist; + spinlock_t prm_lock; /* protect prm bitmap */ + u64 total_pble_alloc; + u64 free_pble_cnt; + u8 pble_shift; +}; + +struct zxdh_hmc_pble_rsrc { + u32 unallocated_pble; + struct mutex pble_mutex_lock; /* protect PBLE resource */ + struct zxdh_sc_dev *dev; + u64 fpm_base_addr; + u64 next_fpm_addr; + struct zxdh_pble_prm pinfo; + u64 allocdpbles; + u64 freedpbles; + u32 stats_direct_sds; + u32 stats_paged_sds; + u64 stats_alloc_ok; + u64 stats_alloc_fail; + u64 stats_alloc_freed; + u64 stats_lvl1; + u64 stats_lvl2; + u32 pble_type; + bool pble_copy; +}; + +void zxdh_destroy_pble_prm(struct zxdh_hmc_pble_rsrc *pble_rsrc); +int zxdh_hmc_init_pble(struct zxdh_sc_dev *dev, + struct zxdh_hmc_pble_rsrc *pble_rsrc, int mr); + +void zxdh_free_pble(struct zxdh_hmc_pble_rsrc *pble_rsrc, + struct zxdh_pble_alloc *palloc); +int zxdh_get_pble(struct zxdh_hmc_pble_rsrc *pble_rsrc, + struct zxdh_pble_alloc *palloc, u32 pble_cnt, + bool level1_only); +int zxdh_prm_add_pble_mem(struct zxdh_pble_prm *pprm, + struct zxdh_chunk *pchunk); +int zxdh_prm_get_pbles(struct zxdh_pble_prm *pprm, + struct zxdh_pble_chunkinfo *chunkinfo, u64 mem_size, + u64 **vaddr, u64 *fpm_addr, dma_addr_t *paaddr); +void zxdh_prm_return_pbles(struct zxdh_pble_prm *pprm, + struct zxdh_pble_chunkinfo *chunkinfo); +void zxdh_pble_free_paged_mem(struct zxdh_chunk *chunk); +#endif /* ZXDH_PBLE_H */ diff --git a/drivers/infiniband/hw/zrdma/private_verbs_cmd.c b/drivers/infiniband/hw/zrdma/private_verbs_cmd.c new file mode 100644 index 000000000000..9c57497eeb77 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/private_verbs_cmd.c @@ -0,0 +1,5571 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include +#define UVERBS_MODULE_NAME zxdh_ib +#include +#include +#include "zxdh_user_ioctl_cmds.h" +#include "private_verbs_cmd.h" +#include "main.h" +#include "zxdh_user_ioctl_verbs.h" +#include "icrdma_hw.h" + +#define DATA_ADDR_BASE 0x620610E0B4u +#define READ_RAM_REG_BASE 0x620610E0B8u +#define MP_DATA_NUM_GEG 0x620610E0BCu +#define BYPASS_REG 0x620740000Cu +#define REPLACE_REG 0x62074000A0u +#define BASE_FOR_LITTLE_GQP 0x6206008000u +#define BASE_FOR_BIG_GQP 0x6206108000u +#define RAM_ADDR 0xA1F40u +#define MP_OFFSET 0x200u +#define REG_BYTE 0x4u +#define CAP_ENABLE_REG_IDX 0x2Du +#define WRITE_RAM_REG_IDX 0x2Eu +#define GQP_MOD 0x14u +#define MP_MOD 0x37u +#define GQP_OFFSET 0x4u +#define GQP_ID_1023 0x3FF +#define GQP_ID_1103 0x44F +#define GQP_ID_2047 0x7FF +#define MP_IDX_INC 0x1u +#define MP_DATA_BYTE 0x40u +#define DDR_MP_DATA_NUM 0x30D3Fu +#define DDR_ADDR_BASE 0x3C0000000u +#define DDR_SIZE 0x3200000u +#define REPLACE_VALUE 0x20000000u +#define FREE_TYPE_MP 1 +#define FREE_TYPE_TX 2 +#define FREE_TYPE_RX 3 +#define FREE_TYPE_IOVA 4 +#define FREE_TYPE_HW_OBJ_DATA 5 +#define MAX_COPY_SIZE 32 +#define MAX_COPY_SIZE_EX 32 +#define MAX_READ_REG_SIZE 32 +#define MAX_SMMU_READ_REG_SIZE 16 + +#define PBLE_QUEUE_CACHE_ID_BASE 0x6206800000 +#define AH_CACHE_ID_BASE 0x6206800C08 +#define TX_WINDOW_CACHE_ID_BASE 0x620680080C +#define TX_WINDOW_DDR_SIZE_REG 0x62065e0100 +#define CQ_DOORBELL_SHADOW_BASE 0x6205800598 +#define CQ_INDICATE_ID_BASE 0x6205800594 +#define CEQ_INDICATE_ID_BASE 0x6205800680 +#define AEQ_INDICATE_ID_BASE 0x620680081c +#define ROUTE_ID_REG_SIZE 0x1000 +#define LAST_15_WQE 15 + +#define VHCA_RC_UD_GQP_MAX_CNT 49 +#define VHCA_RC_UD_8K_MAX_CNT 193 +#define PCIE_PF_NUM_MAX 31 +#define VHCA_NUM_MAX 257 +#define CUSTOM_ERROR_CODE_BASE 200 + +#define ZXDH_QPN_ERROR (CUSTOM_ERROR_CODE_BASE + 1) +#define ZXDH_QP_NOT_AVALIABLE (CUSTOM_ERROR_CODE_BASE + 2) + +#define ZXDH_READ_RAM_MAX_OFFSET 1024 + +int write_cap_tx_reg_node0(struct zxdh_sc_dev *dev, + struct zxdh_cap_cfg *cap_cfg); +int write_cap_tx_reg_node1(struct zxdh_sc_dev *dev, + struct zxdh_cap_cfg *cap_cfg); +int write_cap_rx_reg_node0(struct zxdh_sc_dev *dev, + struct zxdh_cap_cfg *cap_cfg); +int write_cap_rx_reg_node1(struct zxdh_sc_dev *dev, + struct zxdh_cap_cfg *cap_cfg); +static int write_mp_cap_regs(struct zxdh_device *iwdev, bool is_l2d_used, + struct zxdh_mp_cap_resp *resp); +static int hw_object_query_info(struct zxdh_pci_f *rf, struct hw_object_wqe_context *object_wqe_ctx); + +extern u16 zxdh_get_rc_gqp_id(u16 qp_8k_index, u16 vhca_gqp_start, + u16 vhca_gqp_cnt); + +#define WRITE_REGISTER_AND_CHECK(rf, reg, value) \ + do { \ + int ok = zxdh_rdma_reg_write(rf, reg, value); \ + if (ok != 0) \ + return ok; \ + } while (0) + +#define READ_REGISTER_AND_CHECK(rf, reg, value) \ + do { \ + int ok = zxdh_rdma_reg_read(rf, reg, value); \ + if (ok != 0) \ + return ok; \ + } while (0) + +#define READ_REGISTERS_AND_CHECK(rf, reg, value, num) \ + do { \ + int ok = zxdh_rdma_regs_read(rf, reg, value, num); \ + if (ok != 0) \ + return ok; \ + } while (0) + +#define REG_OP_AND_CHECK(opfunc, rf, reg, value) \ + do { \ + int ok = opfunc(rf, reg, value); \ + if (ok != 0) \ + return ok; \ + } while (0) + +#define GET_REG_AND_WRITE_TO_USE(addr, index) \ + do { \ + reg_value[i].reg_addr = addr << index; \ + READ_REGISTER_AND_CHECK(rf, addr, ®_value[i].value); \ + (*count)++; \ + i++; \ + } while (0) + +#define COPY_TO_USER_SAFE(dest, src, size) \ + do { \ + if (copy_to_user((void __user *)(uintptr_t)(dest), \ + (const void *)(src), (size))) \ + return -EFAULT; \ + } while (0) + +static int process_hw_modify_qpc_cmd(struct zxdh_qp *iwqp, + struct zxdh_modify_qpc_item *modify_item, + u64 modify_mask) +{ + unsigned long flags; + struct zxdh_device *iwdev; + struct zxdh_modify_qp_info info = { 0 }; + u64 qpc_tx_mask_low = 0; + u64 qpc_tx_mask_high = 0; + iwdev = iwqp->iwdev; + + if (modify_mask & ZXDH_TX_READ_RETRY_FLAG_SET) { + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_RETRY_FLAG; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_CUR_RETRY_CNT; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_READ_RETRY_FLAG; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_RNR_RETRY_FLAG; + } + if (modify_mask & ZXDH_ERR_FLAG_SET) { + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_ERR_FLAG; + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_ACK_ERR_FLAG; + } + if (modify_mask & ZXDH_RETRY_CQE_SQ_OPCODE) + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_RETRY_CQE_SQ_OPCODE; + + if (modify_mask & ZXDH_PACKAGE_ERR_FLAG) + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_PACKAGE_ERR_FLAG; + + if (modify_mask & ZXDH_TX_LAST_ACK_PSN) + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_LAST_ACK_PSN; + + if (modify_mask & ZXDH_TX_LAST_ACK_WQE_OFFSET_SET) { + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_LAST_ACK_WQE_OFFSET; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_HW_SQ_TAIL_UNA; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_RNR_RETRY_THRESHOLD; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_RNR_RETRY_TIME; + } + if (modify_mask & ZXDH_TX_RDWQE_PYLD_LENGTH) + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_RDWQE_PYLD_LENGTH; + + if (modify_mask & ZXDH_TX_RECV_READ_FLAG_SET) { + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_RECV_RD_MSG_LOSS_ERR_CNT; + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_RECV_RD_MSG_LOSS_ERR_FLAG; + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_RECV_ERR_FLAG; + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_RECV_READ_FLAG; + } + if (modify_mask & ZXDH_TX_RD_MSG_LOSS_ERR_FLAG_SET) { + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_RD_MSG_LOSS_ERR_FLAG; + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_PKTCHK_RD_MSG_LOSS_ERR_CNT; + } + + info.qpc_tx_mask_low = qpc_tx_mask_low; + info.qpc_tx_mask_high = qpc_tx_mask_high; + spin_lock_irqsave(&iwqp->lock, flags); + zxdh_sc_qp_modify_private_cmd_qpc(&iwqp->sc_qp, iwqp->host_ctx.va, + modify_item); + spin_unlock_irqrestore(&iwqp->lock, flags); + if (zxdh_hw_modify_qp(iwdev, iwqp, &info, true)) + return -EINVAL; + + return 0; +} + +static u16 get_tx_wqe_pointer(uint8_t *buf) +{ + __le16 ddd = ((*(__le32 *)(buf + 7)) & 0x1FFFE) >> 1; + + return le16_to_cpu(ddd); +} + +void copy_tx_window_to_win_item(void *va, struct zxdh_qp_tx_win_item *info) +{ + info->start_psn = ZXDH_GET_QPC_ITEM(u32, va, + ZXDH_TX_WIN_START_PSN_BYTE_OFFSET, + IRDMATX_WIN_START_PSN); + info->wqe_pointer = get_tx_wqe_pointer(va); +} + +static void copy_qpc_to_tx_retry_item(void *va, + struct zxdh_reset_qp_retry_tx_item *info) +{ + info->tx_win_raddr = + ZXDH_GET_QPC_ITEM(u16, va, ZXDH_QPC_TX_WIN_RADDR_BYTE_OFFSET, + RDMAQPC_TX_WIN_RADDR); + info->tx_last_ack_psn = + ZXDH_GET_QPC_ITEM(u32, va, ZXDH_QPC_TX_LAST_ACK_PSN_BYTE_OFFSET, + RDMAQPC_TX_LAST_ACK_PSN); + info->rnr_retry_time_l = ZXDH_GET_QPC_ITEM( + u32, va, ZXDH_QPC_RNR_RETRY_TIME_L_BYTE_OFFSET, + RDMAQPC_TX_RNR_RETRY_TIME_L); + info->rnr_retry_time_h = + ZXDH_GET_QPC_ITEM(u8, va, ZXDH_QPC_RNR_RETRY_TIME_H_BYTE_OFFSET, + RDMAQPC_TX_RNR_RETRY_TIME_H); + info->rnr_retry_threshold = ZXDH_GET_QPC_ITEM( + u8, va, ZXDH_QPC_RNR_RETRY_THRESHOLD_BYTE_OFFSET, + RDMAQPC_TX_RNR_RETRY_THRESHOLD); + info->cur_retry_count = + ZXDH_GET_QPC_ITEM(u8, va, ZXDH_QPC_CUR_RETRY_COUNT_BYTE_OFFSET, + RDMAQPC_TX_CUR_RETRY_CNT); + info->retry_cqe_sq_opcode = ZXDH_GET_QPC_ITEM( + u8, va, ZXDH_QPC_RETRY_CQE_SQ_OPCODE_BYTE_OFFSET, + RDMAQPC_TX_RETRY_CQE_SQ_OPCODE_FLAG); +} + +static int zxdh_query_tx_window_info(struct zxdh_device *iwdev, u64 tx_addr, + struct zxdh_dma_mem *qpc_buf) +{ + int err_code; + struct zxdh_src_copy_dest src_dest = { 0 }; + + src_dest.src = tx_addr; + src_dest.dest = qpc_buf->pa; + src_dest.len = qpc_buf->size; + err_code = + zxdh_cqp_rdma_read_tx_window_cmd(&iwdev->rf->sc_dev, &src_dest); + if (err_code) { + pr_err("zxdh query tx window info failed:%d\n", err_code); + return err_code; + } + return 0; +} + +void set_retry_modify_qpc_item( + struct zxdh_modify_qpc_item *modify_qpc_item, + struct zxdh_reset_qp_retry_tx_item *retry_item_info, + struct zxdh_qp_tx_win_item *tx_win_item_info, u64 *modify_mask) +{ + modify_qpc_item->tx_last_ack_psn = tx_win_item_info->start_psn - 1; + *modify_mask |= ZXDH_TX_LAST_ACK_PSN; + + modify_qpc_item->last_ack_wqe_offset = 0; + modify_qpc_item->hw_sq_tail_una = tx_win_item_info->wqe_pointer; + modify_qpc_item->rnr_retry_time_l = retry_item_info->rnr_retry_time_l; + modify_qpc_item->rnr_retry_time_h = retry_item_info->rnr_retry_time_h; + modify_qpc_item->rnr_retry_threshold = + retry_item_info->rnr_retry_threshold; + *modify_mask |= ZXDH_TX_LAST_ACK_WQE_OFFSET_SET; + + modify_qpc_item->retry_flag = 0; + modify_qpc_item->rnr_retry_flag = 0; + modify_qpc_item->read_retry_flag = 0; + modify_qpc_item->cur_retry_count = retry_item_info->cur_retry_count; + *modify_mask |= ZXDH_TX_READ_RETRY_FLAG_SET; + + modify_qpc_item->rdwqe_pyld_length_l = 0; + modify_qpc_item->rdwqe_pyld_length_h = 0; + *modify_mask |= ZXDH_TX_RDWQE_PYLD_LENGTH; + + modify_qpc_item->recv_read_flag = 0; + modify_qpc_item->recv_err_flag = 0; + modify_qpc_item->recv_rd_msg_loss_err_cnt = 0; + modify_qpc_item->recv_rd_msg_loss_err_flag = 0; + *modify_mask |= ZXDH_TX_RECV_READ_FLAG_SET; + + modify_qpc_item->rd_msg_loss_err_flag = 0; + modify_qpc_item->pktchk_rd_msg_loss_err_cnt = 0; + *modify_mask |= ZXDH_TX_RD_MSG_LOSS_ERR_FLAG_SET; + + modify_qpc_item->ack_err_flag = 0; + modify_qpc_item->err_flag = 0; + *modify_mask |= ZXDH_ERR_FLAG_SET; + + modify_qpc_item->package_err_flag = 0; + *modify_mask |= ZXDH_PACKAGE_ERR_FLAG; + + modify_qpc_item->retry_cqe_sq_opcode = + retry_item_info->retry_cqe_sq_opcode & + ZXDH_RESET_RETRY_CQE_SQ_OPCODE_ERR; + *modify_mask |= ZXDH_RETRY_CQE_SQ_OPCODE; +} + +#ifdef ZXDH_UAPI_DEF +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_QP_RESET_QP)(struct uverbs_attr_bundle *attrs) +#else +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_QP_RESET_QP)(struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) +#endif +{ + struct zxdh_dma_mem qpc_buf = { 0 }; + struct zxdh_qp *iwqp; + struct zxdh_device *iwdev; + struct ib_device *ib_dev; + struct ib_ucontext *ucontext; + struct zxdh_pci_f *rf; + struct zxdh_sc_dev *dev; + struct zxdh_reset_qp_retry_tx_item retry_item_info = { 0 }; + struct zxdh_modify_qpc_item modify_qpc_item = { 0 }; + struct zxdh_qp_tx_win_item tx_win_item_info = { 0 }; + int ret; + int err_code = 0; + u64 tx_addr; + u64 modify_mask = 0; + u64 reset_opcode; + struct ib_qp *qp = + uverbs_attr_get_obj(attrs, ZXDH_IB_ATTR_QP_RESET_QP_HANDLE); +#ifdef ZXDH_UAPI_DEF + ucontext = ib_uverbs_get_ucontext(attrs); +#else + ucontext = ib_uverbs_get_ucontext(attrs->ufile); +#endif + if (IS_ERR(ucontext)) + return PTR_ERR(ucontext); + ib_dev = ucontext->device; + iwdev = to_iwdev(ib_dev); + iwqp = to_iwqp(qp); + rf = iwdev->rf; + dev = &rf->sc_dev; + ret = uverbs_copy_from(&reset_opcode, attrs, + ZXDH_IB_ATTR_QP_RESET_OP_CODE); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + + if (reset_opcode <= 0) + return -EINVAL; + + switch (reset_opcode) { + case ZXDH_RESET_RETRY_TX_ITEM_FLAG: + qpc_buf.va = NULL; + qpc_buf.size = ALIGN(ZXDH_QP_CTX_SIZE, ZXDH_QPC_ALIGNMENT); + qpc_buf.va = dma_alloc_coherent(iwdev->rf->hw.device, + qpc_buf.size, &qpc_buf.pa, + GFP_KERNEL); + if (!qpc_buf.va) { + pr_err("res qp alloc dma failed:ENOMEM\n"); + return -ENOMEM; + } + err_code = zxdh_fill_qpc(dev, iwqp->sc_qp.qp_ctx_num, &qpc_buf); + if (err_code) { + pr_err("reset qp fill qpc failed:%d\n", err_code); + goto free_exit; + } + copy_qpc_to_tx_retry_item(qpc_buf.va, &retry_item_info); + tx_addr = (qp->qp_num - dev->base_qpn) * + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_TXWINDOW] + .size + + retry_item_info.tx_win_raddr * 64; + + memset(qpc_buf.va, 0, qpc_buf.size); + qpc_buf.size = 16; + err_code = zxdh_query_tx_window_info(iwdev, tx_addr, &qpc_buf); + if (err_code) { + pr_err("reset qp dma read tx window failed:%d\n", + err_code); + goto free_exit; + } + copy_tx_window_to_win_item(qpc_buf.va, &tx_win_item_info); + set_retry_modify_qpc_item(&modify_qpc_item, &retry_item_info, + &tx_win_item_info, &modify_mask); + err_code = process_hw_modify_qpc_cmd(iwqp, &modify_qpc_item, + modify_mask); + if (err_code) { + pr_err("reset qp process modify qpc cmd failed:%d\n", + err_code); + goto free_exit; + } + break; + default: + pr_err("reset qp unknow opcode:%lld\n", reset_opcode); + err_code = EINVAL; + break; + } +free_exit: + if (qpc_buf.va) { + dma_free_coherent(iwdev->rf->hw.device, + ALIGN(ZXDH_QP_CTX_SIZE, ZXDH_QPC_ALIGNMENT), + qpc_buf.va, qpc_buf.pa); + qpc_buf.va = NULL; + } + return err_code; +} + +static void copy_qpc_to_resp(void *va, struct zxdh_query_qpc_resp *resp) +{ + resp->retry_flag = ZXDH_GET_QPC_ITEM( + u8, va, ZXDH_QPC_RETRY_FALG_BYTE_OFFSET, RDMAQPC_TX_RETRY_FLAG); + resp->rnr_retry_flag = + ZXDH_GET_QPC_ITEM(u8, va, ZXDH_QPC_RNR_RETRY_FALG_BYTE_OFFSET, + RDMAQPC_TX_RNR_RETRY_FLAG); + resp->read_retry_flag = + ZXDH_GET_QPC_ITEM(u8, va, ZXDH_QPC_READ_RETRY_FALG_BYTE_OFFSET, + RDMAQPC_TX_READ_RETRY_FLAG); + resp->cur_retry_count = + ZXDH_GET_QPC_ITEM(u8, va, ZXDH_QPC_CUR_RETRY_COUNT_BYTE_OFFSET, + RDMAQPC_TX_CUR_RETRY_CNT); + resp->retry_cqe_sq_opcode = ZXDH_GET_QPC_ITEM( + u8, va, ZXDH_QPC_RETRY_CQE_SQ_OPCODE_BYTE_OFFSET, + RDMAQPC_TX_RETRY_CQE_SQ_OPCODE_FLAG); + resp->err_flag = ZXDH_GET_QPC_ITEM( + u8, va, ZXDH_QPC_ERR_FLAG_BYTE_OFFSET, RDMAQPC_TX_ERR_FLAG); + resp->ack_err_flag = + ZXDH_GET_QPC_ITEM(u8, va, ZXDH_QPC_ACK_ERR_FLAG_BYTE_OFFSET, + RDMAQPC_TX_ACK_ERR_FLAG); + resp->package_err_flag = + ZXDH_GET_QPC_ITEM(u8, va, ZXDH_QPC_PACKAGE_ERR_FLAG_BYTE_OFFSET, + RDMAQPC_TX_PACKAGE_ERR_FLAG); + resp->recv_err_flag = + ZXDH_GET_QPC_ITEM(u8, va, ZXDH_QPC_RECV_ERR_FLAG_BYTE_OFFSET, + RDMAQPC_TX_RECV_ERR_FLAG); + resp->tx_last_ack_psn = + ZXDH_GET_QPC_ITEM(u32, va, ZXDH_QPC_TX_LAST_ACK_PSN_BYTE_OFFSET, + RDMAQPC_TX_LAST_ACK_PSN); + resp->retry_count = ZXDH_GET_QPC_ITEM( + u8, va, ZXDH_QPC_RETY_COUNT_BYTE_OFFSET, RDMAQPC_TX_RETRY_CNT); +} + +#ifndef RDMA_COPY_TO_STRUCT_OR_ZERO_SUPPORT +static int zxdh_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle, + size_t idx, const void *from, + size_t size) +{ + const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); + + if (IS_ERR(attr)) + return PTR_ERR(attr); + + if (size < attr->ptr_attr.len) { + if (clear_user(u64_to_user_ptr(attr->ptr_attr.data) + size, + attr->ptr_attr.len - size)) + return -EFAULT; + } + return uverbs_copy_to(bundle, idx, from, size); +} +#endif + +#ifdef ZXDH_UAPI_DEF +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_QP_QUERY_QPC)(struct uverbs_attr_bundle *attrs) +#else +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_QP_QUERY_QPC)(struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) +#endif +{ + struct zxdh_qp *iwqp; + struct zxdh_device *iwdev; + struct ib_device *ib_dev; + struct ib_ucontext *ucontext; + struct zxdh_dma_mem qpc_buf; + int err_code = 0; + struct zxdh_query_qpc_resp resp = { 0 }; + struct ib_qp *qp = + uverbs_attr_get_obj(attrs, ZXDH_IB_ATTR_QP_QUERY_HANDLE); +#ifdef ZXDH_UAPI_DEF + ucontext = ib_uverbs_get_ucontext(attrs); +#else + ucontext = ib_uverbs_get_ucontext(attrs->ufile); +#endif + if (IS_ERR(ucontext)) + return PTR_ERR(ucontext); + ib_dev = ucontext->device; + iwdev = to_iwdev(ib_dev); + iwqp = to_iwqp(qp); + + qpc_buf.va = NULL; + qpc_buf.size = ALIGN(ZXDH_QP_CTX_SIZE, ZXDH_QPC_ALIGNMENT); + qpc_buf.va = dma_alloc_coherent(iwdev->rf->hw.device, qpc_buf.size, + &qpc_buf.pa, GFP_KERNEL); + if (!qpc_buf.va) { + pr_err("query qpc alloc dma failed:ENOMEM\n"); + return -ENOMEM; + } + err_code = zxdh_fill_qpc(&iwdev->rf->sc_dev, iwqp->sc_qp.qp_ctx_num, &qpc_buf); + if (err_code) { + pr_err("query qpc fill qpc failed:%d\n", err_code); + dma_free_coherent(iwdev->rf->hw.device, qpc_buf.size, + qpc_buf.va, qpc_buf.pa); + return -EFAULT; + } + copy_qpc_to_resp(qpc_buf.va, &resp); + dma_free_coherent(iwdev->rf->hw.device, qpc_buf.size, qpc_buf.va, + qpc_buf.pa); +#ifdef RDMA_COPY_TO_STRUCT_OR_ZERO_SUPPORT + return uverbs_copy_to_struct_or_zero(attrs, ZXDH_IB_ATTR_QP_QUERY_RESP, + &resp, sizeof(resp)); +#else + return zxdh_copy_to_struct_or_zero(attrs, ZXDH_IB_ATTR_QP_QUERY_RESP, + &resp, sizeof(resp)); +#endif +} + +static void +transfer_modify_qpc_req_to_item(const struct zxdh_modify_qpc_req *req, + struct zxdh_modify_qpc_item *modify_item) +{ + modify_item->retry_flag = req->retry_flag; + modify_item->rnr_retry_flag = req->rnr_retry_flag; + modify_item->read_retry_flag = req->read_retry_flag; + modify_item->cur_retry_count = req->cur_retry_count; + modify_item->retry_cqe_sq_opcode = req->retry_cqe_sq_opcode; + modify_item->err_flag = req->err_flag; + modify_item->ack_err_flag = req->ack_err_flag; + modify_item->package_err_flag = req->package_err_flag; +} + +#ifdef ZXDH_UAPI_DEF +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_QP_MODIFY_QPC)(struct uverbs_attr_bundle *attrs) +#else +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_QP_MODIFY_QPC)(struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) +#endif +{ + struct zxdh_qp *iwqp; + struct ib_ucontext *ucontext; + struct zxdh_modify_qpc_req req = { 0 }; + int ret; + struct zxdh_modify_qpc_item modify_item = { 0 }; + u64 modify_mask; + struct ib_qp *qp = + uverbs_attr_get_obj(attrs, ZXDH_IB_ATTR_QP_MODIFY_QPC_HANDLE); +#ifdef ZXDH_UAPI_DEF + ucontext = ib_uverbs_get_ucontext(attrs); +#else + ucontext = ib_uverbs_get_ucontext(attrs->ufile); +#endif + if (IS_ERR(ucontext)) + return PTR_ERR(ucontext); + iwqp = to_iwqp(qp); + ret = uverbs_copy_from(&modify_mask, attrs, + ZXDH_IB_ATTR_QP_MODIFY_QPC_MASK); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + ret = uverbs_copy_from_or_zero(&req, attrs, + ZXDH_IB_ATTR_QP_MODIFY_QPC_REQ); + if (ret) + return ret; + transfer_modify_qpc_req_to_item(&req, &modify_item); + ret = process_hw_modify_qpc_cmd(iwqp, &modify_item, modify_mask); + if (ret) { + pr_err("modify qpc process modify qpc cmd failed:%d\n", ret); + return ret; + } + return 0; +} + +#ifdef ZXDH_UAPI_DEF +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_QP_MODIFY_UDP_SPORT)(struct uverbs_attr_bundle *attrs) +#else +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_QP_MODIFY_UDP_SPORT)(struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) +#endif +{ + struct zxdh_pd *iwpd; + struct zxdh_device *iwdev; + struct ib_ucontext *ucontext; + struct ib_device *ib_dev; + struct zxdh_qp *iwqp = NULL; + struct zxdh_udp_offload_info *udp_info; + struct zxdh_qp_host_ctx_info *ctx_info; + struct zxdh_pci_f *rf; + struct zxdh_sc_dev *dev; + struct zxdh_modify_qp_info info = {}; + u64 qpc_tx_mask_low = 0; + u64 qpc_rx_mask_low = 0; + unsigned long flags; + u16 udp_sport = 0; + u32 qpn = 0; + int ret; + +#ifdef ZXDH_UAPI_DEF + ucontext = ib_uverbs_get_ucontext(attrs); +#else + ucontext = ib_uverbs_get_ucontext(attrs->ufile); +#endif + if (IS_ERR(ucontext)) + return PTR_ERR(ucontext); + ib_dev = ucontext->device; + iwdev = to_iwdev(ib_dev); + ret = uverbs_copy_from(&udp_sport, attrs, ZXDH_IB_ATTR_QP_UDP_PORT); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + ret = uverbs_copy_from(&qpn, attrs, ZXDH_IB_ATTR_QP_QPN); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + rf = iwdev->rf; + dev = &rf->sc_dev; + if (qpn < (dev->base_qpn + 1) || qpn > (dev->base_qpn + rf->max_qp - 1)) + return -EINVAL; + + iwqp = iwdev->rf->qp_table[qpn - dev->base_qpn]; + if (iwqp == NULL) + return -EINVAL; + + if (iwqp->ibqp.qp_type != IB_QPT_RC || + !(iwqp->ibqp_state == IB_QPS_RTR || iwqp->ibqp_state == IB_QPS_RTS)) + return -EOPNOTSUPP; + + iwpd = to_iwpd(iwqp->ibqp.pd); + udp_info = &iwqp->udp_info; + ctx_info = &iwqp->ctx_info; + ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id; + + udp_info->src_port = udp_sport; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_SRC_PORT; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_SRC_PORT; + info.qpc_tx_mask_low = qpc_tx_mask_low; + info.qpc_rx_mask_low = qpc_rx_mask_low; + spin_lock_irqsave(&iwqp->lock, flags); + zxdh_sc_qp_modify_ctx_udp_sport(&iwqp->sc_qp, iwqp->host_ctx.va, + ctx_info); + spin_unlock_irqrestore(&iwqp->lock, flags); + if (zxdh_hw_modify_qp(iwdev, iwqp, &info, true)) + return -EINVAL; + + if (!refcount_read(&iwdev->trace_switch.t_switch)) + return 0; + + if (udp_info->ipv4) { + struct sockaddr_in saddr_in4 = { 0 }; + struct sockaddr_in daddr_in4 = { 0 }; + + saddr_in4.sin_addr.s_addr = htonl(udp_info->local_ipaddr[3]); + daddr_in4.sin_addr.s_addr = htonl(udp_info->dest_ip_addr[3]); + + ibdev_notice( + &iwdev->ibdev, + "QP[%u]: modify QP udp sport, type %d, ib qpn 0x%X, state: %s, dest_qpn:%d, src_port:%d, src_ip:%pI4, dest_ip:%pI4\n", + iwqp->ibqp.qp_num, iwqp->ibqp.qp_type, + iwqp->ibqp.qp_num, + zxdh_qp_state_to_string(iwqp->ibqp_state), + iwqp->roce_info.dest_qp, udp_sport, &saddr_in4.sin_addr, + &daddr_in4.sin_addr); + } else { + struct sockaddr_in6 saddr_in6 = { 0 }; + struct sockaddr_in6 daddr_in6 = { 0 }; + + zxdh_copy_ip_htonl(saddr_in6.sin6_addr.in6_u.u6_addr32, + udp_info->local_ipaddr); + zxdh_copy_ip_htonl(daddr_in6.sin6_addr.in6_u.u6_addr32, + udp_info->dest_ip_addr); + + ibdev_notice( + &iwdev->ibdev, + "QP[%u]: modify QP udp sport, type %d, ib qpn 0x%X, state: %s, dest_qpn:%d, src_port:%d, src_ip:%pI6, dest_ip:%pI6\n", + iwqp->ibqp.qp_num, iwqp->ibqp.qp_type, + iwqp->ibqp.qp_num, + zxdh_qp_state_to_string(iwqp->ibqp_state), + iwqp->roce_info.dest_qp, udp_sport, + &saddr_in6.sin6_addr, &daddr_in6.sin6_addr); + } + return 0; +} + +static int zxdh_modify_qp_credit_flag(struct zxdh_qp *iwqp, u64 credit_flag) +{ + __le64 *qp_ctx = iwqp->host_ctx.va; + struct zxdh_modify_qp_info info = {}; + u64 hdr; + u64 mask; + + get_64bit_val(qp_ctx, 8, &hdr); + mask = FIELD_PREP(RDMAQPC_TX_ACKCREDITS, 0x1f); + hdr &= ~mask; + /* 0x1e is on, 0x1f is off*/ + hdr |= FIELD_PREP(RDMAQPC_TX_ACKCREDITS, credit_flag ? 0x1e : 0x1f); + set_64bit_val(qp_ctx, 8, hdr); + + get_64bit_val(qp_ctx, 376, &hdr); + mask = FIELD_PREP(RDMAQPC_RX_ACK_CREDITS, 0x1); + hdr &= ~mask; + hdr |= FIELD_PREP(RDMAQPC_RX_ACK_CREDITS, credit_flag ? 0x0 : 0x1); + + set_64bit_val(qp_ctx, 376, hdr); + + info.qpc_tx_mask_low = (0x1UL << 7); + info.qpc_tx_mask_high = 0; + info.qpc_rx_mask_low = (1 << 7); + info.qpc_rx_mask_high = 0; + + info.qpc_tx_mask_low |= 0x1FFFFFF; + info.qpc_tx_mask_high |= 0x1UL << 18; + info.qpc_rx_mask_low |= 0xDA3CE8081E7FFCF0; + info.qpc_rx_mask_high |= 0x1E9; + + if (zxdh_hw_modify_qp(iwqp->iwdev, iwqp, &info, true)) + return -EINVAL; + + iwqp->sc_qp.is_credit_en = credit_flag ? 1 : 0; + return 0; +} + +#ifdef ZXDH_UAPI_DEF +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_QP_SET_CREDIT_FLAG)(struct uverbs_attr_bundle *attrs) +#else +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_QP_SET_CREDIT_FLAG)(struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) +#endif +{ + struct zxdh_qp *iwqp; + struct ib_ucontext *ucontext; + int ret; + u64 credity_flag; + struct ib_qp *qp = + uverbs_attr_get_obj(attrs, ZXDH_IB_ATTR_QP_SET_CREDIT_FLAG_HANDLE); +#ifdef ZXDH_UAPI_DEF + ucontext = ib_uverbs_get_ucontext(attrs); +#else + ucontext = ib_uverbs_get_ucontext(attrs->ufile); +#endif + if (IS_ERR(ucontext)) + return PTR_ERR(ucontext); + + iwqp = to_iwqp(qp); + ret = uverbs_copy_from(&credity_flag, attrs, + ZXDH_IB_ATTR_QP_CREDIT_FLAG); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + + ret = zxdh_modify_qp_credit_flag(iwqp, credity_flag); + return ret; +} + +#ifdef ZXDH_UAPI_DEF +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_GET_LOG_TRACE)(struct uverbs_attr_bundle *attrs) +#else +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_GET_LOG_TRACE)(struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) +#endif +{ + struct zxdh_device *iwdev; + struct ib_ucontext *ucontext; + struct ib_device *ib_dev; + u8 trace_switch; + int ret; + +#ifdef ZXDH_UAPI_DEF + ucontext = ib_uverbs_get_ucontext(attrs); +#else + ucontext = ib_uverbs_get_ucontext(attrs->ufile); +#endif + if (IS_ERR(ucontext)) + return PTR_ERR(ucontext); + ib_dev = ucontext->device; + iwdev = to_iwdev(ib_dev); + trace_switch = refcount_read(&iwdev->trace_switch.t_switch); + ret = uverbs_copy_to(attrs, ZXDH_IB_ATTR_DEV_GET_LOG_TARCE_SWITCH, + &trace_switch, sizeof(trace_switch)); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + return 0; +} + +#ifdef ZXDH_UAPI_DEF +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_SET_LOG_TRACE)(struct uverbs_attr_bundle *attrs) +#else +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_SET_LOG_TRACE)(struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) +#endif +{ + struct ib_ucontext *ucontext; + struct zxdh_device *iwdev; + struct ib_device *ib_dev; + u8 trace_switch; + int ret; + +#ifdef ZXDH_UAPI_DEF + ucontext = ib_uverbs_get_ucontext(attrs); +#else + ucontext = ib_uverbs_get_ucontext(attrs->ufile); +#endif + if (IS_ERR(ucontext)) + return PTR_ERR(ucontext); + ib_dev = ucontext->device; + iwdev = to_iwdev(ib_dev); + ret = uverbs_copy_from(&trace_switch, attrs, + ZXDH_IB_ATTR_DEV_SET_LOG_TARCE_SWITCH); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + + if (trace_switch >= SWITCH_ERROR) + return -EINVAL; + + refcount_set(&iwdev->trace_switch.t_switch, trace_switch); + return 0; +} + +int write_cap_tx_reg_node0(struct zxdh_sc_dev *dev, + struct zxdh_cap_cfg *cap_cfg) +{ + struct zxdh_pci_f *rf; + u8 node_select, node_choose, comapre_loop; + u64 wqe_offset[RDMA_TX_CAP_WQE_MOD_NUM] = { + RDMATX_CAP_NODE0_WQE_PRE_READ, RDMATX_CAP_NODE0_WQE_HANDLE, + RDMATX_CAP_NODE0_PACKAGE + }; + u64 node0offset[RDMA_TX_SEL_NODE_MODULE_NUM - 1] = { + RDMATX_CAP_NODE0_ACK, RDMATX_CAP_NODE0_DB, RDMATX_CAP_NODE0_AEQ, + 0, RDMATX_CAP_NODE0_TXWINDOW + }; + u64 compare_bit_en_offset[EN_32bit_GROUP_NUM] = { + RDMATX_CAP_COMPARE_BIT_EN0_NODE0, + RDMATX_CAP_COMPARE_BIT_EN1_NODE0, + RDMATX_CAP_COMPARE_BIT_EN2_NODE0, + RDMATX_CAP_COMPARE_BIT_EN3_NODE0, + RDMATX_CAP_COMPARE_BIT_EN4_NODE0, + RDMATX_CAP_COMPARE_BIT_EN5_NODE0, + RDMATX_CAP_COMPARE_BIT_EN6_NODE0, + RDMATX_CAP_COMPARE_BIT_EN7_NODE0, + RDMATX_CAP_COMPARE_BIT_EN8_NODE0, + RDMATX_CAP_COMPARE_BIT_EN9_NODE0, + RDMATX_CAP_COMPARE_BIT_EN10_NODE0, + RDMATX_CAP_COMPARE_BIT_EN11_NODE0, + RDMATX_CAP_COMPARE_BIT_EN12_NODE0, + RDMATX_CAP_COMPARE_BIT_EN13_NODE0, + RDMATX_CAP_COMPARE_BIT_EN14_NODE0, + RDMATX_CAP_COMPARE_BIT_EN15_NODE0 + }; + u64 compare_data_en_offset[EN_32bit_GROUP_NUM] = { + RDMATX_CAP_COMPARE_DATA0_NODE0, + RDMATX_CAP_COMPARE_DATA1_NODE0, + RDMATX_CAP_COMPARE_DATA2_NODE0, + RDMATX_CAP_COMPARE_DATA3_NODE0, + RDMATX_CAP_COMPARE_DATA4_NODE0, + RDMATX_CAP_COMPARE_DATA5_NODE0, + RDMATX_CAP_COMPARE_DATA6_NODE0, + RDMATX_CAP_COMPARE_DATA7_NODE0, + RDMATX_CAP_COMPARE_DATA8_NODE0, + RDMATX_CAP_COMPARE_DATA9_NODE0, + RDMATX_CAP_COMPARE_DATA10_NODE0, + RDMATX_CAP_COMPARE_DATA11_NODE0, + RDMATX_CAP_COMPARE_DATA12_NODE0, + RDMATX_CAP_COMPARE_DATA13_NODE0, + RDMATX_CAP_COMPARE_DATA14_NODE0, + RDMATX_CAP_COMPARE_DATA15_NODE0 + }; + + u32 node0_mask, val, chl_sel_idx; + + chl_sel_idx = cap_cfg->channel_select[NODE0]; + if (chl_sel_idx >= RDMA_TX_SEL_NODE_MODULE_NUM) + return -EINVAL; + + rf = container_of(dev, struct zxdh_pci_f, sc_dev); + WRITE_REGISTER_AND_CHECK(rf, RDMATX_CAP_CHL_SEL_NODE0, + (cap_cfg->channel_select[NODE0] & 0xF)); + WRITE_REGISTER_AND_CHECK(rf, RDMATX_CAP_CHL_OPEN_NODE0, + (cap_cfg->channel_open[NODE0] & 0xF)); + + node_choose = cap_cfg->node_choose[NODE0] & 0xFF; + node0_mask = ~(0xff); + if (chl_sel_idx == RDMA_TX_SEL_NODE_MODULE_WQE) { + node_select = (cap_cfg->node_select[NODE0] & 0xFF); + val = readl((u32 __iomem *)(dev->hw->hw_addr + + RDMATX_CAP_NODE0_SEL)); + val = ((val & node0_mask) | node_select); + writel(val, (u32 __iomem *)(dev->hw->hw_addr + + RDMATX_CAP_NODE0_SEL)); + + val = readl((u32 __iomem *)(dev->hw->hw_addr + + wqe_offset[node_select])); + val = ((val & node0_mask) | node_choose); + writel(val, (u32 __iomem *)(dev->hw->hw_addr + + wqe_offset[node_select])); + } else { + if (chl_sel_idx != RDMA_TX_SEL_NODE_MODULE_NONE) { + writel(0, (u32 __iomem *)(dev->hw->hw_addr + + RDMATX_CAP_NODE0_SEL)); + + READ_REGISTER_AND_CHECK(rf, node0offset[chl_sel_idx], + &val); + val = ((val & node0_mask) | node_choose); + WRITE_REGISTER_AND_CHECK(rf, node0offset[chl_sel_idx], + val); + } + } + + for (comapre_loop = 0; comapre_loop < EN_32bit_GROUP_NUM; + comapre_loop++) { + WRITE_REGISTER_AND_CHECK( + rf, compare_bit_en_offset[comapre_loop], + cap_cfg->compare_bit_en[comapre_loop][NODE0]); + WRITE_REGISTER_AND_CHECK( + rf, compare_data_en_offset[comapre_loop], + cap_cfg->compare_data[comapre_loop][NODE0]); + } + + WRITE_REGISTER_AND_CHECK(rf, RDMATX_CAP_TIME_WRL2D_NODE0, + cap_cfg->rdma_time_wrl2d[NODE0]); + return 0; +} + +int write_cap_tx_reg_node1(struct zxdh_sc_dev *dev, + struct zxdh_cap_cfg *cap_cfg) +{ + struct zxdh_pci_f *rf; + u8 node_select, node_choose, comapre_loop; + u64 wqe_offset[RDMA_TX_CAP_WQE_MOD_NUM] = { + RDMATX_CAP_NODE1_WQE_PRE_READ, RDMATX_CAP_NODE1_WQE_HANDLE, + RDMATX_CAP_NODE1_PACKAGE + }; + u64 node1offset[RDMA_TX_SEL_NODE_MODULE_NUM - 1] = { + RDMATX_CAP_NODE1_ACK, RDMATX_CAP_NODE1_DB, RDMATX_CAP_NODE1_AEQ, + 0, RDMATX_CAP_NODE1_TXWINDOW + }; + u64 compare_bit_en_offset[EN_32bit_GROUP_NUM] = { + RDMATX_CAP_COMPARE_BIT_EN0_NODE1, + RDMATX_CAP_COMPARE_BIT_EN1_NODE1, + RDMATX_CAP_COMPARE_BIT_EN2_NODE1, + RDMATX_CAP_COMPARE_BIT_EN3_NODE1, + RDMATX_CAP_COMPARE_BIT_EN4_NODE1, + RDMATX_CAP_COMPARE_BIT_EN5_NODE1, + RDMATX_CAP_COMPARE_BIT_EN6_NODE1, + RDMATX_CAP_COMPARE_BIT_EN7_NODE1, + RDMATX_CAP_COMPARE_BIT_EN8_NODE1, + RDMATX_CAP_COMPARE_BIT_EN9_NODE1, + RDMATX_CAP_COMPARE_BIT_EN10_NODE1, + RDMATX_CAP_COMPARE_BIT_EN11_NODE1, + RDMATX_CAP_COMPARE_BIT_EN12_NODE1, + RDMATX_CAP_COMPARE_BIT_EN13_NODE1, + RDMATX_CAP_COMPARE_BIT_EN14_NODE1, + RDMATX_CAP_COMPARE_BIT_EN15_NODE1 + }; + u64 compare_data_en_offset[EN_32bit_GROUP_NUM] = { + RDMATX_CAP_COMPARE_DATA0_NODE1, + RDMATX_CAP_COMPARE_DATA1_NODE1, + RDMATX_CAP_COMPARE_DATA2_NODE1, + RDMATX_CAP_COMPARE_DATA3_NODE1, + RDMATX_CAP_COMPARE_DATA4_NODE1, + RDMATX_CAP_COMPARE_DATA5_NODE1, + RDMATX_CAP_COMPARE_DATA6_NODE1, + RDMATX_CAP_COMPARE_DATA7_NODE1, + RDMATX_CAP_COMPARE_DATA8_NODE1, + RDMATX_CAP_COMPARE_DATA9_NODE1, + RDMATX_CAP_COMPARE_DATA10_NODE1, + RDMATX_CAP_COMPARE_DATA11_NODE1, + RDMATX_CAP_COMPARE_DATA12_NODE1, + RDMATX_CAP_COMPARE_DATA13_NODE1, + RDMATX_CAP_COMPARE_DATA14_NODE1, + RDMATX_CAP_COMPARE_DATA15_NODE1 + }; + u32 node1_mask, val, chl_sel_idx; + + chl_sel_idx = cap_cfg->channel_select[NODE1]; + if (chl_sel_idx >= RDMA_TX_SEL_NODE_MODULE_NUM) + return -EINVAL; + + rf = container_of(dev, struct zxdh_pci_f, sc_dev); + WRITE_REGISTER_AND_CHECK(rf, RDMATX_CAP_CHL_SEL_NODE1, + (cap_cfg->channel_select[NODE1] & 0xF)); + WRITE_REGISTER_AND_CHECK(rf, RDMATX_CAP_CHL_OPEN_NODE1, + (cap_cfg->channel_open[NODE1] & 0xF)); + node_choose = cap_cfg->node_choose[NODE1] & 0xFF; + node1_mask = ~(0xff); + if (chl_sel_idx == RDMA_TX_SEL_NODE_MODULE_WQE) { + node_select = (cap_cfg->node_select[NODE1] & 0xFF); + + val = readl((u32 __iomem *)(dev->hw->hw_addr + + RDMATX_CAP_NODE1_SEL)); + val = ((val & node1_mask) | node_select); + writel(val, (u32 __iomem *)(dev->hw->hw_addr + + RDMATX_CAP_NODE1_SEL)); + + val = readl((u32 __iomem *)(dev->hw->hw_addr + + wqe_offset[node_select])); + val = ((val & node1_mask) | node_choose); + writel(val, (u32 __iomem *)(dev->hw->hw_addr + + wqe_offset[node_select])); + } else { + if (chl_sel_idx != RDMA_TX_SEL_NODE_MODULE_NONE) { + writel(0, (u32 __iomem *)(dev->hw->hw_addr + + RDMATX_CAP_NODE1_SEL)); + + READ_REGISTER_AND_CHECK(rf, node1offset[chl_sel_idx], + &val); + val = ((val & node1_mask) | node_choose); + WRITE_REGISTER_AND_CHECK(rf, node1offset[chl_sel_idx], + val); + } + } + + for (comapre_loop = 0; comapre_loop < EN_32bit_GROUP_NUM; + comapre_loop++) { + WRITE_REGISTER_AND_CHECK( + rf, compare_bit_en_offset[comapre_loop], + cap_cfg->compare_bit_en[comapre_loop][NODE1]); + WRITE_REGISTER_AND_CHECK( + rf, compare_data_en_offset[comapre_loop], + cap_cfg->compare_data[comapre_loop][NODE1]); + } + + WRITE_REGISTER_AND_CHECK(rf, RDMATX_CAP_TIME_WRL2D_NODE1, + cap_cfg->rdma_time_wrl2d[NODE1]); + return 0; +} + +int write_cap_rx_reg_node0(struct zxdh_sc_dev *dev, + struct zxdh_cap_cfg *cap_cfg) +{ + struct zxdh_pci_f *rf; + u8 comapre_loop; + u64 node0offset[RDMA_RX_SEL_NODE_MODULE_NUM] = { + RDMARX_CAP_NODE0_SEL_RTT_T4, + RDMARX_CAP_NODE0_SEL_PKT_PROC, + RDMARX_CAP_NODE_SEL_HD_CACHE, + RDMARX_CAP_NODE_SEL_VAPA_DDRWR, + 0, + RDMARX_CAP_NODE0_SEL_PRIFIELD_CHECK, + RDMARX_CAP_NODE0_SEL_READ_SRQC, + RDMARX_CAP_NODE0_SEL_READ_WQE, + RDMARX_CAP_NODE0_SEL_CNP_GEN, + RDMARX_CAP_NODE_SEL_ACKNAKFIFO, + RDMARX_CAP_NODE0_SEL_CQE, + RDMARX_CAP_NODE0_SEL_COMPLQUEUE, + RDMARX_CAP_NODE_SEL_NOF, + RDMARX_CAP_NODE0_SEL_TXSUB + }; + + u64 compare_bit_en_offset[EN_32bit_GROUP_NUM] = { + RDMARX_CAP_COMPARE_BIT_EN0_NODE0, + RDMARX_CAP_COMPARE_BIT_EN1_NODE0, + RDMARX_CAP_COMPARE_BIT_EN2_NODE0, + RDMARX_CAP_COMPARE_BIT_EN3_NODE0, + RDMARX_CAP_COMPARE_BIT_EN4_NODE0, + RDMARX_CAP_COMPARE_BIT_EN5_NODE0, + RDMARX_CAP_COMPARE_BIT_EN6_NODE0, + RDMARX_CAP_COMPARE_BIT_EN7_NODE0, + RDMARX_CAP_COMPARE_BIT_EN8_NODE0, + RDMARX_CAP_COMPARE_BIT_EN9_NODE0, + RDMARX_CAP_COMPARE_BIT_EN10_NODE0, + RDMARX_CAP_COMPARE_BIT_EN11_NODE0, + RDMARX_CAP_COMPARE_BIT_EN12_NODE0, + RDMARX_CAP_COMPARE_BIT_EN13_NODE0, + RDMARX_CAP_COMPARE_BIT_EN14_NODE0, + RDMARX_CAP_COMPARE_BIT_EN15_NODE0 + }; + u64 compare_data_en_offset[EN_32bit_GROUP_NUM] = { + RDMARX_CAP_COMPARE_DATA0_NODE0, + RDMARX_CAP_COMPARE_DATA1_NODE0, + RDMARX_CAP_COMPARE_DATA2_NODE0, + RDMARX_CAP_COMPARE_DATA3_NODE0, + RDMARX_CAP_COMPARE_DATA4_NODE0, + RDMARX_CAP_COMPARE_DATA5_NODE0, + RDMARX_CAP_COMPARE_DATA6_NODE0, + RDMARX_CAP_COMPARE_DATA7_NODE0, + RDMARX_CAP_COMPARE_DATA8_NODE0, + RDMARX_CAP_COMPARE_DATA9_NODE0, + RDMARX_CAP_COMPARE_DATA10_NODE0, + RDMARX_CAP_COMPARE_DATA11_NODE0, + RDMARX_CAP_COMPARE_DATA12_NODE0, + RDMARX_CAP_COMPARE_DATA13_NODE0, + RDMARX_CAP_COMPARE_DATA14_NODE0, + RDMARX_CAP_COMPARE_DATA15_NODE0 + }; + u32 node0_mask = 0; + u32 node0_value = 0; + u32 val, chl_sel_idx; + + chl_sel_idx = cap_cfg->channel_select[NODE0]; + if (chl_sel_idx >= RDMA_RX_SEL_NODE_MODULE_NUM) { + return CAP_WRITE_NODE0_REGS_ERROR; + }; + + rf = container_of(dev, struct zxdh_pci_f, sc_dev); + WRITE_REGISTER_AND_CHECK(rf, RDMARX_CAP_CHL_SEL_NODE0, + (cap_cfg->channel_select[NODE0] & 0xF)); + WRITE_REGISTER_AND_CHECK(rf, RDMARX_CAP_CHL_OPEN_NODE0, + (cap_cfg->channel_open[NODE0] & 0xF)); + + switch (chl_sel_idx) { + case RDMA_RX_SEL_NODE_MODULE_RTT_T4: + case RDMA_RX_SEL_NODE_MODULE_PKT_PROC: + case RDMA_RX_SEL_NODE_MODULE_CEQ: + case RDMA_RX_SEL_NODE_MODULE_COMPLQUEUE: + case RDMA_RX_SEL_NODE_MODULE_TX_SUB: + node0_mask = ~(0xffffffff); + node0_value = cap_cfg->node_select[NODE0]; + break; + case RDMA_RX_SEL_NODE_MODULE_HD_CACHE: + case RDMA_RX_SEL_NODE_MODULE_VAPA_DDRWR: + node0_mask = ~(0xf); + node0_value = cap_cfg->node_select[NODE0]; + break; + case RDMA_RX_SEL_NODE_MODULE_PRIFIELD_CHECK: + case RDMA_RX_SEL_NODE_MODULE_READ_SRQC: + case RDMA_RX_SEL_NODE_MODULE_READ_WQE: + node0_mask = ~(0xff); + node0_value = cap_cfg->node_select[NODE0]; + break; + case RDMA_RX_SEL_NODE_MODULE_CNP_GEN: + node0_mask = ~(0x7); + node0_value = cap_cfg->node_select[NODE0]; + break; + case RDMA_RX_SEL_NODE_MODULE_ACKNAKFIFO: + node0_mask = ~(0xffff); + node0_value = cap_cfg->node_select[NODE0]; + break; + case RDMA_RX_SEL_NODE_MODULE_NOF: + node0_mask = ~(0xffff << 16); + node0_value = cap_cfg->node_select[NODE0] << 16; + break; + default: + break; + } + if (chl_sel_idx != RDMA_RX_SEL_NODE_MODULE_PSN_CHECK) { + READ_REGISTER_AND_CHECK(rf, node0offset[chl_sel_idx], &val); + val = ((val & node0_mask) | node0_value); + WRITE_REGISTER_AND_CHECK(rf, node0offset[chl_sel_idx], val); + + pr_info("val=%u, node0_value=%u, channel_select=%u, node0_select val= 0x%08llx\n", + val, node0_value, chl_sel_idx, + node0offset[chl_sel_idx]); + } + + for (comapre_loop = 0; comapre_loop < EN_32bit_GROUP_NUM; + comapre_loop++) { + WRITE_REGISTER_AND_CHECK( + rf, compare_bit_en_offset[comapre_loop], + cap_cfg->compare_bit_en[comapre_loop][NODE0]); + WRITE_REGISTER_AND_CHECK( + rf, compare_data_en_offset[comapre_loop], + cap_cfg->compare_data[comapre_loop][NODE0]); + } + + WRITE_REGISTER_AND_CHECK(rf, RDMARX_CAP_TIME_WRL2D_NODE0, + cap_cfg->rdma_time_wrl2d[NODE0]); + return 0; +} + +int write_cap_rx_reg_node1(struct zxdh_sc_dev *dev, + struct zxdh_cap_cfg *cap_cfg) +{ + struct zxdh_pci_f *rf; + u8 comapre_loop; + u64 node1offset[RDMA_RX_SEL_NODE_MODULE_NUM] = { + RDMARX_CAP_NODE1_SEL_RTT_T4, + RDMARX_CAP_NODE1_SEL_PKT_PROC, + RDMARX_CAP_NODE_SEL_HD_CACHE, + RDMARX_CAP_NODE_SEL_VAPA_DDRWR, + 0, + RDMARX_CAP_NODE1_SEL_PRIFIELD_CHECK, + RDMARX_CAP_NODE1_SEL_READ_SRQC, + RDMARX_CAP_NODE1_SEL_READ_WQE, + RDMARX_CAP_NODE1_SEL_CNP_GEN, + RDMARX_CAP_NODE_SEL_ACKNAKFIFO, + RDMARX_CAP_NODE1_SEL_CQE, + RDMARX_CAP_NODE1_SEL_COMPLQUEUE, + RDMARX_CAP_NODE_SEL_NOF, + RDMARX_CAP_NODE1_SEL_TXSUB + }; + + u64 compare_bit_en_offset[EN_32bit_GROUP_NUM] = { + RDMARX_CAP_COMPARE_BIT_EN0_NODE1, + RDMARX_CAP_COMPARE_BIT_EN1_NODE1, + RDMARX_CAP_COMPARE_BIT_EN2_NODE1, + RDMARX_CAP_COMPARE_BIT_EN3_NODE1, + RDMARX_CAP_COMPARE_BIT_EN4_NODE1, + RDMARX_CAP_COMPARE_BIT_EN5_NODE1, + RDMARX_CAP_COMPARE_BIT_EN6_NODE1, + RDMARX_CAP_COMPARE_BIT_EN7_NODE1, + RDMARX_CAP_COMPARE_BIT_EN8_NODE1, + RDMARX_CAP_COMPARE_BIT_EN9_NODE1, + RDMARX_CAP_COMPARE_BIT_EN10_NODE1, + RDMARX_CAP_COMPARE_BIT_EN11_NODE1, + RDMARX_CAP_COMPARE_BIT_EN12_NODE1, + RDMARX_CAP_COMPARE_BIT_EN13_NODE1, + RDMARX_CAP_COMPARE_BIT_EN14_NODE1, + RDMARX_CAP_COMPARE_BIT_EN15_NODE1 + }; + u64 compare_data_en_offset[EN_32bit_GROUP_NUM] = { + RDMARX_CAP_COMPARE_DATA0_NODE1, + RDMARX_CAP_COMPARE_DATA1_NODE1, + RDMARX_CAP_COMPARE_DATA2_NODE1, + RDMARX_CAP_COMPARE_DATA3_NODE1, + RDMARX_CAP_COMPARE_DATA4_NODE1, + RDMARX_CAP_COMPARE_DATA5_NODE1, + RDMARX_CAP_COMPARE_DATA6_NODE1, + RDMARX_CAP_COMPARE_DATA7_NODE1, + RDMARX_CAP_COMPARE_DATA8_NODE1, + RDMARX_CAP_COMPARE_DATA9_NODE1, + RDMARX_CAP_COMPARE_DATA10_NODE1, + RDMARX_CAP_COMPARE_DATA11_NODE1, + RDMARX_CAP_COMPARE_DATA12_NODE1, + RDMARX_CAP_COMPARE_DATA13_NODE1, + RDMARX_CAP_COMPARE_DATA14_NODE1, + RDMARX_CAP_COMPARE_DATA15_NODE1 + }; + u32 node1_mask = 0; + u32 node1_value = 0; + u32 val, chl_sel_idx; + + chl_sel_idx = cap_cfg->channel_select[NODE1]; + if (chl_sel_idx >= RDMA_RX_SEL_NODE_MODULE_NUM) + return CAP_WRITE_NODE1_REGS_ERROR; + + rf = container_of(dev, struct zxdh_pci_f, sc_dev); + WRITE_REGISTER_AND_CHECK(rf, RDMARX_CAP_CHL_SEL_NODE1, + (cap_cfg->channel_select[NODE1] & 0xF)); + WRITE_REGISTER_AND_CHECK(rf, RDMARX_CAP_CHL_OPEN_NODE1, + (cap_cfg->channel_open[NODE1] & 0xF)); + + switch (chl_sel_idx) { + case RDMA_RX_SEL_NODE_MODULE_RTT_T4: + case RDMA_RX_SEL_NODE_MODULE_PKT_PROC: + case RDMA_RX_SEL_NODE_MODULE_CEQ: + case RDMA_RX_SEL_NODE_MODULE_COMPLQUEUE: + case RDMA_RX_SEL_NODE_MODULE_TX_SUB: + node1_mask = ~(0xffffffff); + node1_value = cap_cfg->node_select[NODE1]; + break; + case RDMA_RX_SEL_NODE_MODULE_HD_CACHE: + case RDMA_RX_SEL_NODE_MODULE_VAPA_DDRWR: + node1_mask = ~(0xf << 16); + node1_value = cap_cfg->node_select[NODE1] << 16; + break; + case RDMA_RX_SEL_NODE_MODULE_PRIFIELD_CHECK: + case RDMA_RX_SEL_NODE_MODULE_READ_SRQC: + case RDMA_RX_SEL_NODE_MODULE_READ_WQE: + case RDMA_RX_SEL_NODE_MODULE_NOF: + node1_mask = ~(0xff); + node1_value = cap_cfg->node_select[NODE1]; + break; + case RDMA_RX_SEL_NODE_MODULE_CNP_GEN: + node1_mask = ~(0x7); + node1_value = cap_cfg->node_select[NODE1]; + break; + case RDMA_RX_SEL_NODE_MODULE_ACKNAKFIFO: + node1_mask = ~(0xffff << 16); + node1_value = cap_cfg->node_select[NODE1] << 16; + break; + default: + break; + } + if (chl_sel_idx != RDMA_RX_SEL_NODE_MODULE_PSN_CHECK) { + READ_REGISTER_AND_CHECK(rf, node1offset[chl_sel_idx], &val); + val = (val & node1_mask) | node1_value; + WRITE_REGISTER_AND_CHECK(rf, node1offset[chl_sel_idx], val); + + pr_info("val=%u, node1_value=%u, channel_select_node1=%u, node1_select val= 0x%08llx\n", + val, node1_value, chl_sel_idx, + node1offset[chl_sel_idx]); + } + + for (comapre_loop = 0; comapre_loop < EN_32bit_GROUP_NUM; + comapre_loop++) { + WRITE_REGISTER_AND_CHECK( + rf, compare_bit_en_offset[comapre_loop], + cap_cfg->compare_bit_en[comapre_loop][NODE1]); + WRITE_REGISTER_AND_CHECK( + rf, compare_data_en_offset[comapre_loop], + cap_cfg->compare_data[comapre_loop][NODE1]); + } + + WRITE_REGISTER_AND_CHECK(rf, RDMARX_CAP_TIME_WRL2D_NODE1, + cap_cfg->rdma_time_wrl2d[NODE1]); + return 0; +} + +static bool check_cap_cfg(struct zxdh_cap_cfg *cap_cfg) +{ + if (cap_cfg->cap_data_start_cap == 0x0) { + pr_err("zxdh cap_data_start_cap cfg err!\n"); + return false; + } + + if (cap_cfg->cap_data_start_cap == 0x1) { + if (cap_cfg->cap_position == CAP_TX && + (cap_cfg->channel_select[NODE0] == + RDMA_TX_SEL_NODE_MODULE_NONE || + cap_cfg->channel_select[NODE0] > + RDMA_TX_SEL_NODE_MODULE_WQE)) { + pr_err("zxdh cap_data_start_cap cfg tx node0 channel_select:%u err!\n", + cap_cfg->channel_select[NODE0]); + return false; + } + + if (cap_cfg->cap_position == CAP_RX && + cap_cfg->channel_select[NODE0] >= + RDMA_RX_SEL_NODE_MODULE_NUM) { + pr_err("zxdh cap_data_start_cap cfg rx node0 channel_select:%u err!\n", + cap_cfg->channel_select[NODE0]); + return false; + } + } + + if (cap_cfg->cap_data_start_cap == 0x2) { + if (cap_cfg->cap_position == CAP_TX && + (cap_cfg->channel_select[NODE1] == + RDMA_TX_SEL_NODE_MODULE_NONE || + cap_cfg->channel_select[NODE1] > + RDMA_TX_SEL_NODE_MODULE_WQE)) { + pr_err("zxdh cap_data_start_cap cfg tx node1 channel_select:%u err!\n", + cap_cfg->channel_select[NODE1]); + return false; + } + + if (cap_cfg->cap_position == CAP_RX && + cap_cfg->channel_select[NODE1] >= + RDMA_RX_SEL_NODE_MODULE_NUM) { + pr_err("zxdh cap_data_start_cap cfg rx node1 channel_select:%u err!\n", + cap_cfg->channel_select[NODE1]); + return false; + } + } + return true; +} + +static void clean_data_cap_buff(struct zxdh_sc_dev *dev, u64 size) +{ + u64 numbufs; + u64 i; + u64 j; + j = 0; + numbufs = size / ZXDH_HMC_DIRECT_BP_SIZE; + for (i = 0; i < numbufs; i++, j++) { + memset(dev->data_cap_sd.entry[j].u.bp.addr.va, 0, + ZXDH_HMC_DIRECT_BP_SIZE); + } +} +static int allocate_addr_for_data_cap(struct zxdh_device *iwdev, struct zxdh_ucontext *ucontext, struct zxdh_cap_addr_info *cap_addr_info, __u64 *cap_pa) +{ + cap_addr_info->addr_info.cap_direct_dma_addr.cap_cpu_addr = + zxdh_zalloc_mapped(iwdev, &cap_addr_info->addr_info.cap_direct_dma_addr.cap_dma_addr, + ZXDH_CAP_DATA_HOST_MEM_SIZE, DMA_BIDIRECTIONAL); + if (cap_addr_info->addr_info.cap_direct_dma_addr.cap_cpu_addr == NULL) { + pr_err("allocate_addr_for_data_cap fail!\n"); + return -ENOMEM; + } + +#ifdef RDMA_MMAP_DB_SUPPORT + cap_addr_info->entry_info.cap_mmap_entry = + zxdh_cap_mmap_entry_insert(ucontext, + cap_addr_info->addr_info.cap_direct_dma_addr.cap_cpu_addr, + ZXDH_CAP_DATA_HOST_MEM_SIZE, + ZXDH_MMAP_PFN, + cap_pa); +#else + cap_addr_info->entry_info.cap_mmap_entry = + zxdh_user_mmap_entry_add_hash( + ucontext, + virt_to_phys(cap_addr_info->addr_info.cap_direct_dma_addr.cap_cpu_addr), + ZXDH_MMAP_PFN, cap_pa); + +#endif + if (!cap_addr_info->entry_info.cap_mmap_entry) { + pr_err("cap_mmap_entry insert err!\n"); + return -ENOMEM; + } + return 0; +} + +static int prepare_addr_for_data_cap(struct ib_ucontext *ib_uctx, struct zxdh_device *iwdev, + struct zxdh_cap_cfg *cap_cfg, struct zxdh_cap_start_resp *cap_resp) +{ + struct zxdh_ucontext *ucontext; + struct zxdh_sc_dev *dev; + u64 mmap_len; + int ret = 0; + int i; + u64 *cap_iova_addr[CAP_NODE_NUM]; + __u64 cap_pa[CAP_NODE_NUM] = { 0 }; + u8 cap_position = cap_cfg->cap_position; + + ucontext = to_ucontext(ib_uctx); + dev = &iwdev->rf->sc_dev; + + if (cap_cfg->size > ZXDH_CAP_DATA_HOST_MEM_SIZE) { + mmap_len = cap_cfg->size; + cap_iova_addr[NODE0] = &iwdev->hw_data_cap.cap_txrx_use_iova[NODE0].addr_info.cap_iova_addr; + cap_iova_addr[NODE1] = &iwdev->hw_data_cap.cap_txrx_use_iova[NODE1].addr_info.cap_iova_addr; + if ((cap_cfg->cap_data_start_cap & 0x3) == 0x3) { + if (mmap_len > (ZXDH_CAP_DATA_HMC_MEM_SIZE / 2)) { + pr_err("prepare_addr_for_data_cap mmap_len:%llu too big!\n", mmap_len); + return -EINVAL; + } + *cap_iova_addr[NODE0] = dev->data_cap_sd.data_cap_base; + *cap_iova_addr[NODE1] = dev->data_cap_sd.data_cap_base + mmap_len; + clean_data_cap_buff(dev, mmap_len * 2); + } else if ((cap_cfg->cap_data_start_cap & 0x1) == 0x1) { + if (mmap_len > ZXDH_CAP_DATA_HMC_MEM_SIZE) { + pr_err("prepare_addr_for_data_cap node0 mmap_len:%llu too big!\n", mmap_len); + return -EINVAL; + } + *cap_iova_addr[NODE0] = dev->data_cap_sd.data_cap_base; + clean_data_cap_buff(dev, mmap_len); + } else if ((cap_cfg->cap_data_start_cap & 0x2) == 0x2) { + if (mmap_len > ZXDH_CAP_DATA_HMC_MEM_SIZE) { + pr_err("prepare_addr_for_data_cap node1 mmap_len:%llu too big!\n", mmap_len); + return -EINVAL; + } + *cap_iova_addr[NODE1] = dev->data_cap_sd.data_cap_base; + clean_data_cap_buff(dev, mmap_len); + } + + for (i = 0; i < CAP_NODE_NUM; i++) { + if (*cap_iova_addr[i] != 0) { +#ifdef RDMA_MMAP_DB_SUPPORT + iwdev->hw_data_cap.cap_txrx_use_iova[i].entry_info.cap_mmap_entry = + zxdh_cap_mmap_entry_insert( + ucontext, + (void *)(uintptr_t)(*cap_iova_addr[i]), + mmap_len, ZXDH_MMAP_HMC, + &cap_pa[i]); +#else + iwdev->hw_data_cap.cap_txrx_use_iova[i].entry_info.cap_mmap_entry = + zxdh_user_mmap_entry_add_hash(ucontext, + *cap_iova_addr[i], + ZXDH_MMAP_HMC, + &cap_pa[i]); + +#endif + if (i == 0) { + cap_resp->cap_pa_node0 = cap_pa[NODE0]; + } else { + cap_resp->cap_pa_node1 = cap_pa[NODE1]; + } + + if (!iwdev->hw_data_cap.cap_txrx_use_iova[i].entry_info.cap_mmap_entry) { + pr_err("cap_mmap_entry_node0 insert err!\n"); + return -ENOMEM; + } + } + } + + } else { + if (cap_cfg->size != ZXDH_CAP_DATA_HOST_MEM_SIZE) { + cap_cfg->size = ZXDH_CAP_DATA_HOST_MEM_SIZE; + } + + for (i = 0; i < CAP_NODE_NUM; i++) { + if (cap_cfg->cap_data_start_cap & (1 << i)) { + if (cap_position == CAP_TX) { + if (allocate_addr_for_data_cap(iwdev, ucontext, &iwdev->hw_data_cap.cap_tx_use_direct_dma[i], &cap_pa[i]) != 0) { + pr_err("zxdh_zalloc_mapped for tx node%u fail!\n", i); + return -ENOMEM; + } + } else if (cap_position == CAP_RX) { + if (allocate_addr_for_data_cap(iwdev, ucontext, &iwdev->hw_data_cap.cap_rx_use_direct_dma[i], &cap_pa[i]) != 0) { + pr_err("zxdh_zalloc_mapped for rx node%u fail!\n", i); + return -ENOMEM; + } + } else { + pr_err("zxdh_zalloc_mapped for cap_position:%u err!\n", cap_position); + return -EINVAL; + } + } + if (i == 0) { + cap_resp->cap_pa_node0 = cap_pa[NODE0]; + } else { + cap_resp->cap_pa_node1 = cap_pa[NODE1]; + } + } + } + + return ret; +} + +void free_cap_addr(struct zxdh_device *iwdev, struct zxdh_cap_addr_info *cap_addr_info) +{ + if (cap_addr_info->entry_info.cap_mmap_entry != NULL) { +#ifdef RDMA_MMAP_DB_SUPPORT + rdma_user_mmap_entry_remove( + cap_addr_info->entry_info.cap_mmap_entry); +#else + zxdh_user_mmap_entry_del_hash( + cap_addr_info->entry_info.cap_mmap_entry); +#endif + cap_addr_info->entry_info.cap_mmap_entry = NULL; + } + + if (cap_addr_info->addr_info.cap_direct_dma_addr.cap_cpu_addr != NULL) { + zxdh_free_mapped(iwdev, cap_addr_info->addr_info.cap_direct_dma_addr.cap_cpu_addr, + cap_addr_info->addr_info.cap_direct_dma_addr.cap_dma_addr, + ZXDH_CAP_DATA_HOST_MEM_SIZE, + DMA_BIDIRECTIONAL); + cap_addr_info->addr_info.cap_direct_dma_addr.cap_cpu_addr = NULL; + } +} + +static void cap_free_for_data_cap(u8 free_type, struct zxdh_device *iwdev) +{ + struct zxdh_cap_addr_info *cap_addr_info; + int i; + for (i = 0; i < CAP_NODE_NUM; i++) { + if (free_type == FREE_TYPE_TX) { + free_cap_addr(iwdev, &iwdev->hw_data_cap.cap_tx_use_direct_dma[i]); + } + + if (free_type == FREE_TYPE_RX) { + free_cap_addr(iwdev, &iwdev->hw_data_cap.cap_rx_use_direct_dma[i]); + } + + pr_info("free_cap_addr for free_type:%d (0:None,1:MP,2:TX,3:RX,4:IOVA) node %d!\n", free_type, i); + if (free_type == FREE_TYPE_IOVA) { + cap_addr_info = &iwdev->hw_data_cap.cap_txrx_use_iova[i]; + if (cap_addr_info->entry_info.cap_mmap_entry != NULL) { +#ifdef RDMA_MMAP_DB_SUPPORT + rdma_user_mmap_entry_remove( + cap_addr_info->entry_info.cap_mmap_entry); +#else + zxdh_user_mmap_entry_del_hash( + cap_addr_info->entry_info.cap_mmap_entry); +#endif + cap_addr_info->entry_info.cap_mmap_entry = NULL; + } + if (iwdev->hw_data_cap.cap_txrx_use_iova[i].addr_info.cap_iova_addr != 0) + iwdev->hw_data_cap.cap_txrx_use_iova[i].addr_info.cap_iova_addr = 0; + } + } +} + +#ifdef ZXDH_UAPI_DEF +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_CAP_START)(struct uverbs_attr_bundle *attrs) +#else +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_CAP_START)(struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) +#endif +{ + struct ib_ucontext *ib_uctx; + struct zxdh_device *iwdev; + struct zxdh_sc_dev *dev; + struct ib_device *ib_dev; + struct zxdh_cap_cfg cap_cfg = { 0 }; + struct zxdh_cap_start_resp cap_resp = { 0 }; + int ret; + u32 dma_addr_low, dma_addr_high, cap_id; + u32 reg_val = 0; + bool is_host_dyn_mem_used = true; + dma_addr_t *dma_addr[CAP_NODE_NUM] = { NULL }; + u64 *cap_iova_addr[CAP_NODE_NUM] = { NULL }; + u8 free_type; + +#ifdef ZXDH_UAPI_DEF + ib_uctx = ib_uverbs_get_ucontext(attrs); +#else + ib_uctx = ib_uverbs_get_ucontext(attrs->ufile); +#endif + if (IS_ERR(ib_uctx)) + return PTR_ERR(ib_uctx); + + ib_dev = ib_uctx->device; + iwdev = to_iwdev(ib_dev); + dev = &iwdev->rf->sc_dev; + + ret = uverbs_copy_from(&cap_cfg, attrs, ZXDH_IB_ATTR_DEV_CAP_START); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + + if (cap_cfg.size > ZXDH_CAP_DATA_HOST_MEM_SIZE) { + is_host_dyn_mem_used = false; + free_type = FREE_TYPE_IOVA; + if (iwdev->hw_data_cap.cap_txrx_use_iova[NODE0].addr_info.cap_iova_addr != 0 || + iwdev->hw_data_cap.cap_txrx_use_iova[NODE1].addr_info.cap_iova_addr != 0) { + pr_err("vhca_id:%u,cap_position:%u iova cap already start.\n", dev->vhca_id, cap_cfg.cap_position); + return CAP_ALREADY_START; + } + } else { + if (cap_cfg.cap_position == CAP_TX) { + READ_REGISTER_AND_CHECK(iwdev->rf, RDMATX_DATA_START_CAP, ®_val); + dma_addr[NODE0] = &iwdev->hw_data_cap.cap_tx_use_direct_dma[NODE0].addr_info.cap_direct_dma_addr.cap_dma_addr; + dma_addr[NODE1] = &iwdev->hw_data_cap.cap_tx_use_direct_dma[NODE1].addr_info.cap_direct_dma_addr.cap_dma_addr; + free_type = FREE_TYPE_TX; + } else if (cap_cfg.cap_position == CAP_RX) { + READ_REGISTER_AND_CHECK(iwdev->rf, RDMARX_DATA_START_CAP, ®_val); + dma_addr[NODE0] = &iwdev->hw_data_cap.cap_rx_use_direct_dma[NODE0].addr_info.cap_direct_dma_addr.cap_dma_addr; + dma_addr[NODE1] = &iwdev->hw_data_cap.cap_rx_use_direct_dma[NODE1].addr_info.cap_direct_dma_addr.cap_dma_addr; + free_type = FREE_TYPE_RX; + } else { + pr_err("vhca_id:%u,cap_position:%u error.\n", dev->vhca_id, cap_cfg.cap_position); + return CAP_CFG_ERROR; + } + if (reg_val != 0) { + pr_err("vhca_id:%u,cap_position:%u already start.\n", dev->vhca_id, cap_cfg.cap_position); + return CAP_ALREADY_START; + } + } + + if (!check_cap_cfg(&cap_cfg)) + return CAP_CFG_ERROR; + cap_iova_addr[NODE0] = &iwdev->hw_data_cap.cap_txrx_use_iova[NODE0].addr_info.cap_iova_addr; + cap_iova_addr[NODE1] = &iwdev->hw_data_cap.cap_txrx_use_iova[NODE1].addr_info.cap_iova_addr; + ret = prepare_addr_for_data_cap(ib_uctx, iwdev, &cap_cfg, &cap_resp); + if (ret != 0) { + ret = CAP_ALLOC_ADDR_ERROR; + goto free; + } + + if ((cap_cfg.cap_data_start_cap & 0x1) == 0x1) { + if (is_host_dyn_mem_used) { + dma_addr_low = (u32)((u64)(*dma_addr[NODE0]) & 0xFFFFFFFF); + dma_addr_high = + (u32)(((u64)(*dma_addr[NODE0]) >> 32) & 0xFFFFFFFF); + // access host, smmu not used + cap_id = (ZXDH_INDICATE_HOST_NOSMMU << 5 | ZXDH_CPU_DDR); + } else { + dma_addr_low = (u32)(*cap_iova_addr[NODE0] & 0xFFFFFFFF); + dma_addr_high = + (u32)((*cap_iova_addr[NODE0] >> 32) & 0xFFFFFFFF); + // access host, smmu iova used + cap_id = (ZXDH_INDICATE_HOST_SMMU << 5 | ZXDH_CPU_DDR); + } + + pr_info("is_host_dyn_mem_used:%d,dma_addr_low:0x%X,dma_addr_high:0x%X.\n", + is_host_dyn_mem_used, dma_addr_low, dma_addr_high); + pr_info("vhca_id:%u,cap_id:%X.\n", dev->vhca_id, cap_id); + + if (cap_cfg.cap_position == CAP_TX) { + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMATX_CAP_AXI_WR_ADDR_LOW_NODE0, + dma_addr_low); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMATX_CAP_AXI_WR_ADDR_HIGH_NODE0, + dma_addr_high); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMATX_CAP_AXI_WR_LEN_LOW_NODE0, + cap_cfg.size); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMATX_CAP_AXI_WR_LEN_HIGH_NODE0, 0); + WRITE_REGISTER_AND_CHECK(iwdev->rf, + RDMATX_CAP_VHCA_NUM_NODE0, + dev->vhca_id); + WRITE_REGISTER_AND_CHECK(iwdev->rf, + RDMATX_CAP_AXI_ID_NODE0, 0); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMATX_CAP_CAP_ID_NODE0, cap_id); + ret = write_cap_tx_reg_node0(dev, &cap_cfg); + if (ret != 0) + goto free; + } + + if (cap_cfg.cap_position == CAP_RX) { + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMARX_CAP_AXI_WR_ADDR_LOW_NODE0, + dma_addr_low); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMARX_CAP_AXI_WR_ADDR_HIGH_NODE0, + dma_addr_high); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMARX_CAP_AXI_WR_LEN_LOW_NODE0, + cap_cfg.size); + WRITE_REGISTER_AND_CHECK(iwdev->rf, + RDMARX_CAP_VHCA_NUM_NODE0, + dev->vhca_id); + WRITE_REGISTER_AND_CHECK(iwdev->rf, + RDMARX_CAP_AXI_ID_NODE0, 0); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMARX_CAP_CAP_ID_NODE0, cap_id); + ret = write_cap_rx_reg_node0(dev, &cap_cfg); + if (ret != 0) + goto free; + } + } + + if ((cap_cfg.cap_data_start_cap & 0x2) == 0x2) { + if (is_host_dyn_mem_used) { + dma_addr_low = (u32)(*dma_addr[NODE1] & 0xFFFFFFFF); + dma_addr_high = + (u32)((*dma_addr[NODE1] >> 32) & 0xFFFFFFFF); + // access host, smmu not used + cap_id = (ZXDH_INDICATE_HOST_NOSMMU << 5 | ZXDH_CPU_DDR); + } else { + dma_addr_low = (u32)(*cap_iova_addr[NODE1] & 0xFFFFFFFF); + dma_addr_high = + (u32)(((u64)(*cap_iova_addr[NODE1]) >> 32) & 0xFFFFFFFF); + // access host, smmu iova used + cap_id = (ZXDH_INDICATE_HOST_SMMU << 5 | ZXDH_CPU_DDR); + } + pr_info("node1 is_host_dyn_mem_used:%d, dma_addr_low:0x%X,dma_addr_high:0x%X.\n", + is_host_dyn_mem_used, dma_addr_low, dma_addr_high); + pr_info("vhca_id:%u,cap_id:%X.\n", dev->vhca_id, cap_id); + + if (cap_cfg.cap_position == CAP_TX) { + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMATX_CAP_AXI_WR_ADDR_LOW_NODE1, + dma_addr_low); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMATX_CAP_AXI_WR_ADDR_HIGH_NODE1, + dma_addr_high); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMATX_CAP_AXI_WR_LEN_LOW_NODE1, + cap_cfg.size); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMATX_CAP_AXI_WR_LEN_HIGH_NODE1, 0); + WRITE_REGISTER_AND_CHECK(iwdev->rf, + RDMATX_CAP_VHCA_NUM_NODE1, + dev->vhca_id); + WRITE_REGISTER_AND_CHECK(iwdev->rf, + RDMATX_CAP_AXI_ID_NODE1, 0); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMATX_CAP_CAP_ID_NODE1, cap_id); + ret = write_cap_tx_reg_node1(dev, &cap_cfg); + if (ret != 0) + goto free; + } + + if (cap_cfg.cap_position == CAP_RX) { + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMARX_CAP_AXI_WR_ADDR_LOW_NODE1, + dma_addr_low); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMARX_CAP_AXI_WR_ADDR_HIGH_NODE1, + dma_addr_high); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMARX_CAP_AXI_WR_LEN_LOW_NODE1, + cap_cfg.size); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMARX_CAP_AXI_WR_LEN_HIGH_NODE1, 0); + WRITE_REGISTER_AND_CHECK(iwdev->rf, + RDMARX_CAP_VHCA_NUM_NODE1, + dev->vhca_id); + WRITE_REGISTER_AND_CHECK(iwdev->rf, + RDMARX_CAP_AXI_ID_NODE1, 0); + WRITE_REGISTER_AND_CHECK( + iwdev->rf, RDMARX_CAP_CAP_ID_NODE1, cap_id); + ret = write_cap_rx_reg_node1(dev, &cap_cfg); + if (ret != 0) + goto free; + } + } + + if (cap_cfg.cap_position == CAP_TX) { + WRITE_REGISTER_AND_CHECK(iwdev->rf, RDMATX_DATA_START_CAP, + cap_cfg.cap_data_start_cap); + } else if (cap_cfg.cap_position == CAP_RX) { + WRITE_REGISTER_AND_CHECK(iwdev->rf, RDMARX_DATA_START_CAP, + cap_cfg.cap_data_start_cap); + } + + pr_info("cap_start msg:cap_position=%u,cap_pa_node0:%llu,cap_pa_node1:%llu\n", + cap_cfg.cap_position, cap_resp.cap_pa_node0, cap_resp.cap_pa_node1); +#ifdef RDMA_COPY_TO_STRUCT_OR_ZERO_SUPPORT + ret = uverbs_copy_to_struct_or_zero(attrs, + ZXDH_IB_ATTR_DEV_CAP_START_RESP, + &cap_resp, sizeof(cap_resp)); +#else + ret = zxdh_copy_to_struct_or_zero(attrs, + ZXDH_IB_ATTR_DEV_CAP_START_RESP, + &cap_resp, sizeof(cap_resp)); +#endif + +free: + if (ret != 0) { + cap_free_for_data_cap(free_type, iwdev); + pr_err("cap_start fail for %d!\n", ret); + return -EFAULT; + } + + return 0; +} + +#ifdef ZXDH_UAPI_DEF +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_CAP_STOP)(struct uverbs_attr_bundle *attrs) +#else +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_CAP_STOP)(struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) +#endif +{ + struct ib_ucontext *ib_uctx; + struct zxdh_device *iwdev; + struct ib_device *ib_dev; + u8 cap_position; + int ret; +#ifdef ZXDH_UAPI_DEF + ib_uctx = ib_uverbs_get_ucontext(attrs); +#else + ib_uctx = ib_uverbs_get_ucontext(attrs->ufile); +#endif + if (IS_ERR(ib_uctx)) + return PTR_ERR(ib_uctx); + + ib_dev = ib_uctx->device; + iwdev = to_iwdev(ib_dev); + + ret = uverbs_copy_from(&cap_position, attrs, ZXDH_IB_ATTR_DEV_CAP_STOP); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + + if (cap_position == CAP_TX) { + WRITE_REGISTER_AND_CHECK(iwdev->rf, RDMATX_DATA_START_CAP, 0); + } else if (cap_position == CAP_RX) { + WRITE_REGISTER_AND_CHECK(iwdev->rf, RDMARX_DATA_START_CAP, 0); + } else { + pr_info("cap %u stop err!\n", cap_position); + return -EINVAL; + } + + pr_info("cap %u stop!1:tx,2:rx\n", cap_position); + return 0; +} + +static void free_mmap_addr(struct zxdh_device *iwdev, u32 length, struct zxdh_cap_addr_info *cap_addr_info) +{ + if (cap_addr_info->entry_info.cap_mmap_entry != NULL) { + pr_info("free_mmap_addr rdma_user_mmap_entry_remove!\n"); +#ifdef RDMA_MMAP_DB_SUPPORT + rdma_user_mmap_entry_remove( + cap_addr_info->entry_info.cap_mmap_entry); +#else + zxdh_user_mmap_entry_del_hash( + cap_addr_info->entry_info.cap_mmap_entry); +#endif + cap_addr_info->entry_info.cap_mmap_entry = NULL; + } + + if (cap_addr_info->addr_info.cap_direct_dma_addr.cap_cpu_addr != NULL) { + dma_free_coherent(iwdev->rf->sc_dev.hw->device, + length, + cap_addr_info->addr_info.cap_direct_dma_addr.cap_cpu_addr, + cap_addr_info->addr_info.cap_direct_dma_addr.cap_dma_addr); + cap_addr_info->addr_info.cap_direct_dma_addr.cap_cpu_addr = NULL; + pr_info("free_mmap_addr,length:%u!\n", length); + } +} + +#ifdef ZXDH_UAPI_DEF +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_CAP_FREE)(struct uverbs_attr_bundle *attrs) +#else +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_CAP_FREE)(struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) +#endif +{ + struct ib_ucontext *ib_uctx; + struct zxdh_device *iwdev; + struct ib_device *ib_dev; + u8 free_type; + int ret = 0; + u32 length; + +#ifdef ZXDH_UAPI_DEF + ib_uctx = ib_uverbs_get_ucontext(attrs); +#else + ib_uctx = ib_uverbs_get_ucontext(attrs->ufile); +#endif + if (IS_ERR(ib_uctx)) + return PTR_ERR(ib_uctx); + + ib_dev = ib_uctx->device; + iwdev = to_iwdev(ib_dev); + ret = uverbs_copy_from(&free_type, attrs, ZXDH_IB_ATTR_DEV_CAP_FREE); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + + switch (free_type) { + case FREE_TYPE_MP: + length = ALIGN(ZXDH_L2D_MPCAP_BUFF_SIZE, ZXDH_HW_PAGE_SIZE); + free_mmap_addr(iwdev, length, &iwdev->hw_data_cap.mp_cap); + break; + case FREE_TYPE_HW_OBJ_DATA: + length = iwdev->hw_data_cap.object_buffer_size; + free_mmap_addr(iwdev, length, &iwdev->hw_data_cap.hw_object_mmap); + if (iwdev->hw_data_cap.object_buffer_size) + iwdev->hw_data_cap.object_buffer_size = 0; + break; + case FREE_TYPE_IOVA: + case FREE_TYPE_TX: + case FREE_TYPE_RX: + cap_free_for_data_cap(free_type, iwdev); + break; + default: + pr_err("ZXDH_IB_METHOD_DEV_CAP_FREE free_type:%u err!\n", free_type); + ret = -EINVAL; + break; + } + return ret; +} + +static int write_mp_cap_regs(struct zxdh_device *iwdev, bool is_l2d_used, + struct zxdh_mp_cap_resp *resp) +{ + int i; + uint8_t mp_idx, gqp_idx; + uint64_t reg_addr, cap_size, addr_val; + uint32_t reg_val; + uint32_t read_reg_val = 0; + + if (is_l2d_used) { + iwdev->hw_data_cap.mp_cap_media_addr_base = + iwdev->rf->sc_dev.l2d_smmu_addr + + ZXDH_SMMU_OFFSET + ZXDH_MP_BASERTT_OFFSET + + ZXDH_SMMU_CMDQ_OFFSET; + cap_size = ZXDH_L2D_MPCAP_BUFF_SIZE / resp->cap_gqp_num; + } else { + iwdev->hw_data_cap.mp_cap_media_addr_base = DDR_ADDR_BASE; + cap_size = DDR_SIZE; + } + + for (i = 0; i < resp->cap_gqp_num && i < MAX_CAP_QPS; i++) { + reg_addr = READ_RAM_REG_BASE + i * MP_OFFSET; + reg_val = RAM_ADDR + i * MP_OFFSET; + /* e0b8:read ram address bak*/ + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, reg_addr, &read_reg_val); + if (reg_val == read_reg_val) { + pr_err("reg_addr:0x%llx, mp cap for cap_gqpid:%u is working!\n", reg_addr, resp->cap_gqpid[i]); + return EINVAL; + } + mp_idx = 0; + gqp_idx = 0; + if (resp->cap_gqpid[i] <= GQP_ID_1023) { + mp_idx = resp->cap_gqpid[i] / GQP_MOD; + gqp_idx = resp->cap_gqpid[i] % GQP_MOD; + + reg_addr = (mp_idx * MP_OFFSET) + BASE_FOR_LITTLE_GQP + + (REG_BYTE * CAP_ENABLE_REG_IDX); + reg_val = (uint32_t)(RAM_ADDR + (i * MP_OFFSET)); + /* 80b4:a1f40*/ + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + reg_addr, reg_val); + + reg_addr = (mp_idx * MP_OFFSET) + BASE_FOR_LITTLE_GQP + + (REG_BYTE * WRITE_RAM_REG_IDX); + reg_val = gqp_idx; + /* 80b8:gqp_idx*/ + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + reg_addr, reg_val); + } else if (resp->cap_gqpid[i] > GQP_ID_1023 && + resp->cap_gqpid[i] <= GQP_ID_1103) { + mp_idx = ((resp->cap_gqpid[i] - GQP_OFFSET) / GQP_MOD) + + MP_IDX_INC; + gqp_idx = (resp->cap_gqpid[i] - GQP_OFFSET) % GQP_MOD; + + reg_addr = (mp_idx * MP_OFFSET) + BASE_FOR_LITTLE_GQP + + (4 * CAP_ENABLE_REG_IDX); + reg_val = RAM_ADDR + i * MP_OFFSET; + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + reg_addr, reg_val); + + reg_addr = (mp_idx * MP_OFFSET) + BASE_FOR_LITTLE_GQP + + (REG_BYTE * WRITE_RAM_REG_IDX); + reg_val = gqp_idx; + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + reg_addr, reg_val); + + } else if (resp->cap_gqpid[i] <= GQP_ID_2047) { + mp_idx = ((resp->cap_gqpid[i] - GQP_OFFSET) / GQP_MOD) - + MP_MOD; + gqp_idx = (resp->cap_gqpid[i] - GQP_OFFSET) % GQP_MOD; + + reg_addr = (mp_idx * MP_OFFSET) + BASE_FOR_BIG_GQP + + (REG_BYTE * CAP_ENABLE_REG_IDX); + reg_val = RAM_ADDR + i * MP_OFFSET; + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + reg_addr, reg_val); + + reg_addr = (mp_idx * MP_OFFSET) + BASE_FOR_BIG_GQP + + (REG_BYTE * WRITE_RAM_REG_IDX); + reg_val = gqp_idx; + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + reg_addr, reg_val); + } else { + pr_err("gqpid:%u err!\n", resp->cap_gqpid[i]); + return -EINVAL; + } + + reg_addr = MP_DATA_NUM_GEG + i * MP_OFFSET; + if (is_l2d_used) { + reg_val = ((cap_size / MP_DATA_BYTE) - 1); + } else { + reg_val = DDR_MP_DATA_NUM; /* 20w MP data */ + } + /* data num */ + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, reg_addr, reg_val); + + reg_addr = DATA_ADDR_BASE + i * MP_OFFSET; + addr_val = iwdev->hw_data_cap.mp_cap_media_addr_base + (i * cap_size); + if ((addr_val >> 32) != 0) { + reg_val = (REPLACE_VALUE | (addr_val & 0x0FFFFFFF)); + } else { + reg_val = addr_val; + } + /* e0b4:ram data address */ + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, reg_addr, reg_val); + + reg_addr = READ_RAM_REG_BASE + i * MP_OFFSET; + reg_val = RAM_ADDR + i * MP_OFFSET; + /* e0b8:read ram address bak*/ + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, reg_addr, reg_val); + } + + return 0; +} + +static int allocate_addr_for_mmap(struct zxdh_device *iwdev, struct zxdh_ucontext *ucontext, + uint32_t length, struct zxdh_cap_addr_info *cap_addr_info, __u64 *cap_pa) +{ + cap_addr_info->addr_info.cap_direct_dma_addr.cap_cpu_addr = dma_alloc_coherent( + iwdev->rf->sc_dev.hw->device, length, + &cap_addr_info->addr_info.cap_direct_dma_addr.cap_dma_addr, GFP_KERNEL); + if (cap_addr_info->addr_info.cap_direct_dma_addr.cap_cpu_addr == NULL) + return -1; + +#ifdef RDMA_MMAP_DB_SUPPORT + cap_addr_info->entry_info.cap_mmap_entry = + zxdh_cap_mmap_entry_insert(ucontext, + cap_addr_info->addr_info.cap_direct_dma_addr.cap_cpu_addr, + length, + ZXDH_MMAP_PFN, + cap_pa); +#else + cap_addr_info->entry_info.cap_mmap_entry = + zxdh_user_mmap_entry_add_hash( + ucontext, virt_to_phys(cap_addr_info->addr_info.cap_direct_dma_addr.cap_cpu_addr), + ZXDH_MMAP_PFN, cap_pa); +#endif + if (!cap_addr_info->entry_info.cap_mmap_entry) { + pr_err("allocate_addr_for_mmap cap_mmap_entry insert err!\n"); + return -ENOMEM; + } + return 0; +} +#ifdef ZXDH_UAPI_DEF +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_MP_CAP)(struct uverbs_attr_bundle *attrs) +#else +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_MP_CAP)(struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) +#endif +{ + int ret, i, j; + struct ib_ucontext *ib_uctx; + struct zxdh_device *iwdev; + struct ib_device *ib_dev; + struct zxdh_mp_cap_cfg mp_cap_cfg = { 0 }; + struct zxdh_qp *iwqp; + struct zxdh_ucontext *ucontext; + uint16_t gqp_id; + uint64_t mp_reg_addrs[MAX_CAP_QPS]; + bool same_gqp_exist; + struct zxdh_mp_cap_resp mp_cap_resp; + uint32_t length; + +#ifdef ZXDH_UAPI_DEF + ib_uctx = ib_uverbs_get_ucontext(attrs); +#else + ib_uctx = ib_uverbs_get_ucontext(attrs->ufile); +#endif + if (IS_ERR(ib_uctx)) + return PTR_ERR(ib_uctx); + + ib_dev = ib_uctx->device; + iwdev = to_iwdev(ib_dev); + + ret = uverbs_copy_from(&mp_cap_cfg, attrs, ZXDH_IB_ATTR_DEV_MP_CAP); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + pr_info("qpn_num:%u\n", mp_cap_cfg.qpn_num); + if (mp_cap_cfg.qpn_num == 0 || mp_cap_cfg.qpn_num > MAX_CAP_QPS) + return -EINVAL; + + memset(&mp_cap_resp, 0, sizeof(struct zxdh_mp_cap_resp)); + if (mp_cap_cfg.cap_use_l2d) { + iwdev->hw_data_cap.mp_cap.addr_info.cap_direct_dma_addr.cap_cpu_addr = NULL; + ucontext = to_ucontext(ib_uctx); + length = ZXDH_L2D_MPCAP_BUFF_SIZE; + if (allocate_addr_for_mmap(iwdev, ucontext, length, &iwdev->hw_data_cap.mp_cap, &mp_cap_resp.cap_pa)) { + pr_err("allocate_addr_for_mmap err!length:%u.\n", length); + return -ENOMEM; + } + } + + memset(mp_reg_addrs, 0, sizeof(mp_reg_addrs)); + for (i = 0; i < mp_cap_cfg.qpn_num; i++) { + if (mp_cap_cfg.qpn[i] < (iwdev->rf->sc_dev.base_qpn + 1) || + mp_cap_cfg.qpn[i] > (iwdev->rf->sc_dev.base_qpn + + iwdev->rf->max_qp - 1)) { + pr_err("qpn:%u, base_qpn:%u overload", + mp_cap_cfg.qpn[i], iwdev->rf->sc_dev.base_qpn); + return -EINVAL; + } + + iwqp = NULL; + iwqp = iwdev->rf->qp_table[mp_cap_cfg.qpn[i] - + iwdev->rf->sc_dev.base_qpn]; + if (iwqp == NULL) + return -EINVAL; + + if (iwqp->sc_qp.qp_uk.qp_type == ZXDH_QP_TYPE_ROCE_RC) { + gqp_id = zxdh_get_rc_gqp_id( + iwqp->sc_qp.qp_uk.qp_8k_index, + iwqp->sc_qp.dev->vhca_gqp_start, + iwqp->sc_qp.dev->vhca_gqp_cnt); + } else { + gqp_id = iwqp->sc_qp.dev->vhca_ud_gqp; + } + + pr_info("mp cap qp_type:%u (1:RC,2:UD),qpn:%u,gqp_id:%u,vhcaid:%u!\n", + iwqp->sc_qp.qp_uk.qp_type, mp_cap_cfg.qpn[i], gqp_id, + iwqp->sc_qp.dev->vhca_id); + + same_gqp_exist = false; + for (j = 0; j < mp_cap_resp.cap_gqp_num; j++) { + if (mp_cap_resp.cap_gqpid[i] == gqp_id) { + same_gqp_exist = true; + pr_info("same gqp_id:%u for qpn:%u vhcaid:%u!\n", + gqp_id, mp_cap_cfg.qpn[i], + iwqp->sc_qp.dev->vhca_id); + break; + } + } + if (same_gqp_exist) + continue; + mp_cap_resp.cap_gqpid[mp_cap_resp.cap_gqp_num] = gqp_id; + mp_cap_resp.cap_gqp_num += 1; + } + + ret = write_mp_cap_regs(iwdev, mp_cap_cfg.cap_use_l2d, &mp_cap_resp); + if (ret != 0) { + pr_err("write_mp_cap_regs err! gqp_num:%u\n", + mp_cap_resp.cap_gqp_num); + return ret; + } + mp_cap_resp.mcode_type = iwdev->rf->mcode_type; + pr_info("zxdh_rdma mp cap ib_copy_to_udata gqpid:%u,gqp_num:%u,cap_pa:%llx.mcode_type:%u!\n", + mp_cap_resp.cap_gqpid[0], mp_cap_resp.cap_gqp_num, + mp_cap_resp.cap_pa, mp_cap_resp.mcode_type); +#ifdef RDMA_COPY_TO_STRUCT_OR_ZERO_SUPPORT + ret = uverbs_copy_to_struct_or_zero(attrs, ZXDH_IB_ATTR_DEV_MP_CAP_RESP, + &mp_cap_resp, sizeof(mp_cap_resp)); +#else + ret = zxdh_copy_to_struct_or_zero(attrs, ZXDH_IB_ATTR_DEV_MP_CAP_RESP, + &mp_cap_resp, sizeof(mp_cap_resp)); +#endif + if (ret) { + pr_err("zxdh_rdma mp cap ib_copy_to_udata failed!\n"); +#ifdef RDMA_MMAP_DB_SUPPORT + rdma_user_mmap_entry_remove( + iwdev->hw_data_cap.mp_cap.entry_info.cap_mmap_entry); +#else + zxdh_user_mmap_entry_del_hash( + iwdev->hw_data_cap.mp_cap.entry_info.cap_mmap_entry); +#endif + iwdev->hw_data_cap.mp_cap.entry_info.cap_mmap_entry = NULL; + return -EFAULT; + } + + return 0; +} + +#ifdef ZXDH_UAPI_DEF +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_MP_GET_DATA)(struct uverbs_attr_bundle *attrs) +#else +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_MP_GET_DATA)(struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) +#endif +{ + int ret = 0; + struct ib_ucontext *ib_uctx; + struct zxdh_device *iwdev; + struct ib_device *ib_dev; + struct zxdh_src_copy_dest src_dest = {}; + int status; + u8 param; + +#ifdef ZXDH_UAPI_DEF + ib_uctx = ib_uverbs_get_ucontext(attrs); +#else + ib_uctx = ib_uverbs_get_ucontext(attrs->ufile); +#endif + if (IS_ERR(ib_uctx)) + return PTR_ERR(ib_uctx); + + ib_dev = ib_uctx->device; + iwdev = to_iwdev(ib_dev); + + ret = uverbs_copy_from(¶m, attrs, ZXDH_IB_ATTR_DEV_MP_GET_DATA); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + + src_dest.src = iwdev->hw_data_cap.mp_cap_media_addr_base; + src_dest.dest = iwdev->hw_data_cap.mp_cap.addr_info.cap_direct_dma_addr.cap_dma_addr; + src_dest.len = ZXDH_L2D_MPCAP_BUFF_SIZE; + status = zxdh_dpuddr_to_host_cmd(&iwdev->rf->sc_dev, &src_dest); + if (status != 0) { + pr_info("status:%d\n", status); + return -EFAULT; + } + + // pr_info("zxdh_dpuddr_to_host_cmd succ!\n"); + return 0; +} + +#ifdef ZXDH_UAPI_DEF +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_MP_CAP_CLEAR)(struct uverbs_attr_bundle *attrs) +#else +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_MP_CAP_CLEAR)(struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) +#endif +{ + int ret = 0; + uint8_t i; + struct ib_ucontext *ib_uctx; + struct zxdh_device *iwdev; + struct ib_device *ib_dev; + struct zxdh_cap_gqp cap_gqp; + uint64_t reg_addr; + uint8_t mp_idx, gqp_idx; + +#ifdef ZXDH_UAPI_DEF + ib_uctx = ib_uverbs_get_ucontext(attrs); +#else + ib_uctx = ib_uverbs_get_ucontext(attrs->ufile); +#endif + if (IS_ERR(ib_uctx)) + return PTR_ERR(ib_uctx); + + ret = uverbs_copy_from(&cap_gqp, attrs, ZXDH_IB_ATTR_DEV_MP_CAP_CLEAR); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + + ib_dev = ib_uctx->device; + iwdev = to_iwdev(ib_dev); + if (cap_gqp.gqp_num == 0 || cap_gqp.gqp_num > MAX_CAP_QPS) + return -EINVAL; + gqp_idx = 0xff; + for (i = 0; i < cap_gqp.gqp_num; i++) { + mp_idx = 0; + if (cap_gqp.gqpid[i] <= GQP_ID_1023) { + mp_idx = cap_gqp.gqpid[i] / GQP_MOD; + reg_addr = (mp_idx * MP_OFFSET) + BASE_FOR_LITTLE_GQP + + (REG_BYTE * WRITE_RAM_REG_IDX); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, reg_addr, gqp_idx); + } else if (cap_gqp.gqpid[i] > GQP_ID_1023 && + cap_gqp.gqpid[i] <= GQP_ID_1103) { + mp_idx = ((cap_gqp.gqpid[i] - GQP_OFFSET) / GQP_MOD) + MP_IDX_INC; + reg_addr = (mp_idx * MP_OFFSET) + BASE_FOR_LITTLE_GQP + + (REG_BYTE * WRITE_RAM_REG_IDX); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + reg_addr, gqp_idx); + + } else if (cap_gqp.gqpid[i] <= GQP_ID_2047) { + mp_idx = ((cap_gqp.gqpid[i] - GQP_OFFSET) / GQP_MOD) - MP_MOD; + reg_addr = (mp_idx * MP_OFFSET) + BASE_FOR_BIG_GQP + + (REG_BYTE * WRITE_RAM_REG_IDX); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + reg_addr, gqp_idx); + } else { + pr_err("gqpid:%u err!\n", cap_gqp.gqpid[i]); + return EINVAL; + } + + reg_addr = READ_RAM_REG_BASE + i * MP_OFFSET; + /* e0b8:read ram address bak*/ + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, reg_addr, 0); + } + + return ret; +} + +#ifdef ZXDH_UAPI_DEF +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_GET_ACT_VHCA_GQPS)(struct uverbs_attr_bundle *attrs) +#else +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_GET_ACT_VHCA_GQPS)(struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) +#endif +{ + int ret, gqp_id; + struct ib_ucontext *ib_uctx; + struct zxdh_device *iwdev; + struct zxdh_sc_dev *dev; + struct ib_device *ib_dev; + struct zxdh_active_vhca_gqps get_active_vhca_gqps_resp = { 0 }; + u16 vhca_id, gqp_start, gqp_cnt; + uint32_t read_reg_val = 0; + uint32_t qps_act_bit = 0x80000000; + +#ifdef ZXDH_UAPI_DEF + ib_uctx = ib_uverbs_get_ucontext(attrs); +#else + ib_uctx = ib_uverbs_get_ucontext(attrs->ufile); +#endif + if (IS_ERR(ib_uctx)) + return PTR_ERR(ib_uctx); + + ib_dev = ib_uctx->device; + iwdev = to_iwdev(ib_dev); + dev = &iwdev->rf->sc_dev; + + vhca_id = dev->vhca_id; + if (vhca_id > 257) { + pr_err("vhca_id:%u bigger ths 257,err!\n", vhca_id); + return -EINVAL; + } + gqp_start = dev->vhca_gqp_start; + gqp_cnt = dev->vhca_gqp_cnt; + pr_info("zxdh_get_active_vhca_gqps vhca_id:%u, gqp_start:%u, gqp_cnt:%u\n", + vhca_id, gqp_start, gqp_cnt); + for (gqp_id = gqp_start; gqp_id < gqp_start + gqp_cnt; gqp_id++) { + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, 0x62065f81d4, + gqp_id); + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, 0x62065f84c0, + &read_reg_val); + if ((read_reg_val & qps_act_bit) != 0) { + get_active_vhca_gqps_resp + .gqp_id[get_active_vhca_gqps_resp.gqp_num] = + gqp_id; + get_active_vhca_gqps_resp.gqp_num++; + } + } + if (get_active_vhca_gqps_resp.gqp_num > 0) + get_active_vhca_gqps_resp.vhca_id = vhca_id; + pr_info("zxdh_get_active_vhca_gqps vhca_id:%u,gqp_num:%u\n", + get_active_vhca_gqps_resp.vhca_id, + get_active_vhca_gqps_resp.gqp_num); +#ifdef RDMA_COPY_TO_STRUCT_OR_ZERO_SUPPORT + ret = uverbs_copy_to_struct_or_zero( + attrs, ZXDH_IB_ATTR_DEV_GET_ACT_VHCA_GQPS_RESP, + &get_active_vhca_gqps_resp, sizeof(get_active_vhca_gqps_resp)); +#else + ret = zxdh_copy_to_struct_or_zero( + attrs, ZXDH_IB_ATTR_DEV_GET_ACT_VHCA_GQPS_RESP, + &get_active_vhca_gqps_resp, sizeof(get_active_vhca_gqps_resp)); +#endif + if (ret) { + pr_err("zxdh_get_active_vhca_gqps ib_copy_to_udata failed!\n"); + return -EFAULT; + } + return 0; +} + +#ifdef ZXDH_UAPI_DEF +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_GET_CC_BASIC_INFO)(struct uverbs_attr_bundle *attrs) +#else +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_GET_CC_BASIC_INFO)(struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) +#endif +{ + int ret; + struct ib_ucontext *ib_uctx; + struct zxdh_device *iwdev; + struct ib_device *ib_dev; + uint8_t i; + uint32_t read_reg_val = 0; + uint32_t read_reg_val_ex = 0; + uint64_t read_value; + struct zxdh_cc_basic_info resp = { 0 }; + +#ifdef ZXDH_UAPI_DEF + ib_uctx = ib_uverbs_get_ucontext(attrs); +#else + ib_uctx = ib_uverbs_get_ucontext(attrs->ufile); +#endif + if (IS_ERR(ib_uctx)) + return PTR_ERR(ib_uctx); + + ib_dev = ib_uctx->device; + iwdev = to_iwdev(ib_dev); + + for (i = 0; i < 8; i++) { + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + C_RDMA_TX_SUB_RW_RSV0, i); + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, + C_RDMA_TX_SUB_RO_RSV5, &read_reg_val); + resp.active_gqp_cnt += read_reg_val; + } + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, + C_SQ_CPU_MAINTAIN_RESERVE1, &read_reg_val); + resp.active_vhca_sq_cnt = EXTRACT_BITS(read_reg_val, 10, 22); + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, + C_RQ_CPU_MAINTAIN_RESERVE1, &read_reg_val); + resp.active_vhca_read_cnt = EXTRACT_BITS(read_reg_val, 10, 22); + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, + C_ACK_CPU_MAINTAIN_RESERVE1, &read_reg_val); + resp.active_vhca_ack_cnt = EXTRACT_BITS(read_reg_val, 10, 22); + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, + C_DB_AXI_INTERFACW_STATE_REG2, &read_reg_val); + resp.active_qp_sq_cur_cnt = EXTRACT_BITS(read_reg_val, 16, 31); + resp.active_qp_rq_cur_cnt = EXTRACT_BITS(read_reg_val, 0, 15); + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, + C_WQE_PREFETCH_TOP_FIFO_WE_RD_CNT0, &read_reg_val); + resp.task_prefetch_recv_com_cnt = EXTRACT_BITS(read_reg_val, 16, 31); + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, + C_RDMATX_ARBITRATION_DIN_0, &read_reg_val); + resp.flight_pkt_cnt = EXTRACT_BITS(read_reg_val, 0, 8); + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, + HOST3_ERR_INFO_FIFO_OVERFLOW_CNT, &read_reg_val); + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, C_PKT_TIME_OUT_CNT, + &read_reg_val_ex); + read_value = EXTRACT_BITS(read_reg_val_ex, 16, 31); + resp.tx_pkt_cnt = ((read_value << 32) | read_reg_val); + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, C_ICRC_PROC_EOP_CNT_HW, + &read_reg_val); + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, C_ICRC_PROC_SOP_CNT_HW, + &read_reg_val_ex); + read_value = EXTRACT_BITS(read_reg_val_ex, 0, 15); + resp.rx_pkt_cnt = ((read_value << 32) | read_reg_val); + resp.backpres_rx = EXTRACT_BITS(read_reg_val_ex, 23, 26); + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, + RDMATX_ACK_RSV_RO_REG_0_HW, &read_reg_val); + resp.retry_timeout_cnt = EXTRACT_BITS(read_reg_val, 0, 15); + resp.retry_read_cnt = EXTRACT_BITS(read_reg_val, 16, 31); + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, RDMATX_ACK_RSV_RO_REG_1, + &read_reg_val); + resp.retry_nak_cnt = EXTRACT_BITS(read_reg_val, 16, 31); + resp.retry_rnr_cnt = EXTRACT_BITS(read_reg_val, 0, 15); + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, + RDMATX_ACK_RD_MSG_LOSS_FLAG_CNT, &read_reg_val); + resp.drop_read_msg_cnt = EXTRACT_BITS(read_reg_val, 0, 15); + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, C_MUL_CACHE_ARBITER_D2B_SOP_CNT, + &read_reg_val); + resp.rx_pkt_ecn_cnt = EXTRACT_BITS(read_reg_val, 16, 31); + + read_reg_val = readl((u32 __iomem *)(iwdev->rf->sc_dev.hw->hw_addr + + C_STATE_ERR_CFG)); + resp.tx_pkt_cnp_cnt = EXTRACT_BITS(read_reg_val, 0, 15); + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, + C_NHD_CHECK_ICRC_REMOVAL_EOP_CNT_HW, &read_reg_val); + resp.rx_pkt_cnp_cnt = read_reg_val; + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, PKT_RTT_T1_GEN_SOP_CNT, + &read_reg_val); + resp.tx_pkt_rtt_t1_cnt = read_reg_val; + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, + C_NHD_CHECK_RTT_PROC_SOP_CNT, &read_reg_val); + resp.rx_pkt_rtt_t2_cnt = read_reg_val; + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, C_RAM_TEST_RSV_1, + &read_reg_val); + resp.tx_pkt_rtt_t4_cnt = EXTRACT_BITS(read_reg_val, 10, 25); + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, + C_NHD_CHECK_RTT_PROC_EOP_CNT, &read_reg_val); + resp.rx_pkt_rtt_t5_cnt = read_reg_val; + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, + C_SQ_CPU_FIFO_OVERFLOW_CNT, &read_reg_val); + resp.limit_tx_sq_cnt = EXTRACT_BITS(read_reg_val, 6, 21); + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, + C_RQ_CPU_FIFO_OVERFLOW_CNT, &read_reg_val); + resp.limit_tx_read_cnt = EXTRACT_BITS(read_reg_val, 6, 21); + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, + C_ACK_CPU_FIFO_OVERFLOW_CNT, &read_reg_val); + resp.limit_tx_ack_cnt = EXTRACT_BITS(read_reg_val, 6, 21); + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, + C_SQ_CPU_MAINTAIN_RESERVE2, &read_reg_val); + resp.backpres_tx_pfc_flg_pyh0_3 = read_reg_val; + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, + C_SQ_CPU_MAINTAIN_RESERVE3, &read_reg_val); + resp.backpres_tx_pfc_flg_pyh4_7 = read_reg_val; + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, C_NP_RDY_TEST, + &read_reg_val); + resp.backpres_tx_pfc_cnt = EXTRACT_BITS(read_reg_val, 0, 15); + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, C_ICRC_CHECK_SOP_CNT_HW, + &read_reg_val); + resp.backpres_rx_pfc_cnt = EXTRACT_BITS(read_reg_val, 28, 31); + +#ifdef RDMA_COPY_TO_STRUCT_OR_ZERO_SUPPORT + ret = uverbs_copy_to_struct_or_zero( + attrs, ZXDH_IB_ATTR_DEV_GET_CC_BASIC_INFO_RESP, &resp, + sizeof(resp)); +#else + ret = zxdh_copy_to_struct_or_zero( + attrs, ZXDH_IB_ATTR_DEV_GET_CC_BASIC_INFO_RESP, &resp, + sizeof(resp)); +#endif + if (ret) { + pr_err("zxdh_get_cc_basic_info ib_copy_to_udata failed!\n"); + return -EFAULT; + } + return 0; +} + +static int +fill_qpc( + struct zxdh_pci_f *rf, + struct zxdh_context_req *context_req, + struct zxdh_dma_mem *resource_buf) +{ + struct zxdh_sc_dev *dev; + int err_code = 0; + __u32 qpn; + + dev = &rf->sc_dev; + qpn = context_req->resource_id; + if (qpn == 1) + qpn = dev->base_qpn + 1; + if (qpn < (dev->base_qpn + 1) || qpn > (dev->base_qpn + rf->max_qp - 1)) { + pr_err("get qpc err, qpn is out of boundary\n"); + pr_err("qpn boundary :[%d,%d]\n", dev->base_qpn + 1, dev->base_qpn + rf->max_qp - 1); + return -EOVERFLOW; + } + err_code = zxdh_fill_qpc(dev, qpn, resource_buf); + return err_code; +} + +static int +fill_cqc( + struct zxdh_pci_f *rf, + struct zxdh_context_req *context_req, + struct zxdh_dma_mem *resource_buf) +{ + struct zxdh_sc_dev *dev; + int err_code = 0; + __u32 cqn; + + dev = &rf->sc_dev; + cqn = context_req->resource_id; + if (cqn < (dev->base_cqn + 1) || cqn > (dev->base_cqn + rf->max_cq - 1)) { + pr_err("get cqc err, cqn is out of boundary\n"); + pr_err("cqn boundary :[%d,%d]\n", dev->base_cqn + 1, dev->base_cqn + rf->max_cq - 1); + return -EOVERFLOW; + } + err_code = zxdh_fill_cqc(dev, cqn, resource_buf); + return err_code; +} + +static int +fill_ceqc( + struct zxdh_pci_f *rf, + struct zxdh_context_req *context_req, + struct zxdh_dma_mem *resource_buf) +{ + struct zxdh_sc_dev *dev; + int err_code = 0; + __u32 ceqn; + + dev = &rf->sc_dev; + ceqn = context_req->resource_id; + if (ceqn < dev->base_ceqn || ceqn > (dev->base_ceqn + rf->max_cqe - 1)) { + pr_err("get ceqc err, ceqn is out of boundary\n"); + pr_err("ceqn boundary :[%d,%d]\n", dev->base_ceqn, dev->base_ceqn + rf->max_cqe - 1); + return -EOVERFLOW; + } + err_code = zxdh_fill_ceqc(dev, ceqn, resource_buf); + return err_code; +} + +static int +fill_aeqc( + struct zxdh_pci_f *rf, + struct zxdh_context_req *context_req, + struct zxdh_dma_mem *resource_buf) +{ + struct zxdh_sc_dev *dev; + int err_code = 0; + + dev = &rf->sc_dev; + err_code = zxdh_fill_aeqc(dev, resource_buf); + return err_code; +} + +static int +fill_srqc( + struct zxdh_pci_f *rf, + struct zxdh_context_req *context_req, + struct zxdh_dma_mem *resource_buf) +{ + struct zxdh_sc_dev *dev; + int err_code = 0; + __u32 srqn; + + dev = &rf->sc_dev; + srqn = context_req->resource_id; + if (srqn < dev->base_srqn || srqn > (dev->base_srqn + rf->max_srq - 1)) { + pr_err("get srqc err, srqn is out of boundary\n"); + pr_err("srqn boundary :[%d,%d]\n", dev->base_srqn, dev->base_srqn + rf->max_srq - 1); + return -EOVERFLOW; + } + err_code = zxdh_fill_srqc(dev, srqn, resource_buf); + return err_code; +} + +static int +fill_mrte( + struct zxdh_pci_f *rf, + struct zxdh_context_req *context_req, + struct zxdh_dma_mem *resource_buf) +{ + struct zxdh_src_copy_dest src_dest = { 0 }; + int err_code = 0; + __u32 stag, stag_index; + + stag = context_req->resource_id; + stag_index = stag >> ZXDH_CQPSQ_STAG_IDX_S; + if (stag_index > (rf->max_mr - 1)) { + pr_err("get mrte err, stag is out of boundary\n"); + pr_err("stag_index boundary :[0,%d]\n", rf->max_mr - 1); + return -EOVERFLOW; + } + src_dest.src = 64 * stag_index; + src_dest.dest = resource_buf->pa; + src_dest.len = resource_buf->size; + err_code = zxdh_cqp_rdma_read_mrte_cmd(&rf->sc_dev, &src_dest); + if (err_code) { + pr_err("res mrte entry raw fill qpc failed:%d\n", err_code); + return err_code; + } + return 0; +} + +static int +get_buf_info(struct zxdh_context_req *context_req, + int *buf_size, int *data_size, int *buf_alignment, + int (**fill_ctx)(struct zxdh_pci_f *, struct zxdh_context_req *, struct zxdh_dma_mem *)) +{ + switch (context_req->type) { + case ZXDH_RX_READ_QPC: + *buf_size = ZXDH_QP_CTX_SIZE; + *data_size = ZXDH_RX_READ_QPC_SIZE; + *buf_alignment = ZXDH_QPC_ALIGNMENT; + *fill_ctx = fill_qpc; + break; + + case ZXDH_TX_READ_QPC: + *buf_size = ZXDH_QP_CTX_SIZE; + *data_size = ZXDH_TX_READ_QPC_SIZE; + *buf_alignment = ZXDH_QPC_ALIGNMENT; + *fill_ctx = fill_qpc; + break; + + case ZXDH_READ_CQC: + *buf_size = ZXDH_CQ_CTX_SIZE; + *data_size = ZXDH_READ_CQC_SIZE; + *buf_alignment = ZXDH_CQC_ALIGNMENT; + *fill_ctx = fill_cqc; + break; + + case ZXDH_READ_CEQC: + *buf_size = ZXDH_CEQ_CTX_SIZE; + *data_size = ZXDH_READ_CEQC_SIZE; + *buf_alignment = ZXDH_CEQC_ALIGNMENT; + *fill_ctx = fill_ceqc; + break; + + case ZXDH_READ_AEQC: + *buf_size = ZXDH_AEQ_CTX_SIZE; + *data_size = ZXDH_READ_AEQC_SIZE; + *buf_alignment = ZXDH_AEQC_ALIGNMENT; + *fill_ctx = fill_aeqc; + break; + + case ZXDH_RX_READ_SRQC: + *buf_size = ZXDH_SRQ_CTX_SIZE; + *data_size = ZXDH_RX_READ_SRQC_SIZE; + *buf_alignment = ZXDH_SRQC_ALIGNMENT; + *fill_ctx = fill_srqc; + break; + + case ZXDH_READ_MRTE: + *buf_size = ZXDH_READ_MRTE_SIZE; + *data_size = ZXDH_READ_MRTE_SIZE; + *buf_alignment = ZXDH_QPC_ALIGNMENT; + *fill_ctx = fill_mrte; + break; + default: + return -EINVAL; + } + return 0; +} + +#ifdef ZXDH_UAPI_DEF +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_GET_HMC)(struct uverbs_attr_bundle *attrs) +#else +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_GET_HMC)(struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) +#endif +{ + int ret = 0, buf_size = 0, data_size = 0, buf_alignment = 0, err_code = 0; + struct ib_ucontext *ib_uctx; + struct zxdh_device *iwdev; + struct ib_device *ib_dev; + struct zxdh_pci_f *rf; + struct zxdh_context_req context_req = { 0 }; + struct zxdh_dma_mem resource_buf = { 0 }; + struct zxdh_context_resp context_resp; + int (*fill_ctx)(struct zxdh_pci_f *, struct zxdh_context_req *, struct zxdh_dma_mem *) = NULL; + +#ifdef ZXDH_UAPI_DEF + ib_uctx = ib_uverbs_get_ucontext(attrs); +#else + ib_uctx = ib_uverbs_get_ucontext(attrs->ufile); +#endif + if (IS_ERR(ib_uctx)) + return PTR_ERR(ib_uctx); + ib_dev = ib_uctx->device; + iwdev = to_iwdev(ib_dev); + rf = iwdev->rf; + + ret = uverbs_copy_from(&context_req, attrs, ZXDH_IB_ATTR_DEV_GET_HMC); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + + memset(&context_resp, 0, sizeof(struct zxdh_context_resp)); + err_code = get_buf_info(&context_req, &buf_size, &data_size, &buf_alignment, &fill_ctx); + if (err_code) { + pr_err("req type not exist: %d\n", err_code); + return err_code; + } + context_resp.context_size = buf_size / 8; + resource_buf.va = NULL; + resource_buf.size = ALIGN(buf_size, buf_alignment); + resource_buf.va = dma_alloc_coherent(rf->hw.device, + resource_buf.size, &resource_buf.pa, + GFP_KERNEL); + if (!resource_buf.va) { + pr_err("get qpc alloc dma failed:ENOMEM\n"); + return -ENOMEM; + } + + err_code = fill_ctx(rf, &context_req, &resource_buf); + if (err_code) { + pr_err("get ctx fill buf failed:%d\n", err_code); + goto free_exit; + } + + if (context_req.type == ZXDH_RX_READ_QPC) { + memcpy(context_resp.context_info, (void *)((__u8 *)resource_buf.va + ZXDH_RX_QPC_SHIFT), data_size); + } else { + memcpy(context_resp.context_info, resource_buf.va, data_size); + } +#ifdef RDMA_COPY_TO_STRUCT_OR_ZERO_SUPPORT + ret = uverbs_copy_to_struct_or_zero(attrs, ZXDH_IB_ATTR_DEV_GET_HMC_RESP, + &context_resp, sizeof(context_resp)); +#else + ret = zxdh_copy_to_struct_or_zero(attrs, ZXDH_IB_ATTR_DEV_GET_HMC_RESP, + &context_resp, sizeof(context_resp)); +#endif + if (ret) { + pr_err("zxdh_get_ctx ib_copy_to_udata failed!\n"); + return -EFAULT; + } +free_exit: + if (resource_buf.va) { + dma_free_coherent(iwdev->rf->hw.device, + ALIGN(buf_size, buf_alignment), + resource_buf.va, resource_buf.pa); + resource_buf.va = NULL; + } + return err_code; +} + +static int validate_hw_object_id(struct zxdh_get_object_data_req *object_req) +{ + switch (object_req->object_id) { + case ZXDH_PBLE_MR_OBJ_ID: + case ZXDH_PBLE_QUEUE_OBJ_ID: + case ZXDH_AH_OBJ_ID: + case ZXDH_IRD_OBJ_ID: + case ZXDH_TX_WINDOW_OBJ_ID: + case ZXDH_CQ_SHADOW_AREA: + case ZXDH_CQ: + case ZXDH_AEQ: + case ZXDH_RQ: + case ZXDH_RQ_SHADOW_AREA: + case ZXDH_SRQP: + case ZXDH_SRQ: + case ZXDH_SRQ_SHADOW_AREA: + case ZXDH_SQ: + case ZXDH_CEQ: + return 0; + default: + return -ZXDH_NOT_SUPPORT_OBJECT_ID; + } +} + +const int object_interface_type[] = { + [ZXDH_PBLE_MR_OBJ_ID] = ZXDH_INTERFACE_CACHE, + [ZXDH_PBLE_QUEUE_OBJ_ID] = ZXDH_INTERFACE_CACHE, + [ZXDH_AH_OBJ_ID] = ZXDH_INTERFACE_CACHE, + [ZXDH_IRD_OBJ_ID] = ZXDH_INTERFACE_CACHE, + [ZXDH_TX_WINDOW_OBJ_ID] = ZXDH_INTERFACE_CACHE, + [ZXDH_CQ] = ZXDH_INTERFACE_NOTCACHE, + [ZXDH_CQ_SHADOW_AREA] = ZXDH_INTERFACE_NOTCACHE, + [ZXDH_AEQ] = ZXDH_INTERFACE_NOTCACHE, + [ZXDH_SQ] = ZXDH_INTERFACE_NOTCACHE, + [ZXDH_RQ] = ZXDH_INTERFACE_NOTCACHE, + [ZXDH_RQ_SHADOW_AREA] = ZXDH_INTERFACE_NOTCACHE, + [ZXDH_SRQP] = ZXDH_INTERFACE_NOTCACHE, + [ZXDH_SRQ] = ZXDH_INTERFACE_NOTCACHE, + [ZXDH_SRQ_SHADOW_AREA] = ZXDH_INTERFACE_NOTCACHE, + [ZXDH_CEQ] = ZXDH_INTERFACE_NOTCACHE, +}; + +static int +pre_validate_hw_object_request(struct zxdh_get_object_data_req *object_req) +{ + int ret = 0; + ret = validate_hw_object_id(object_req); + if (ret) { + pr_err("query hw object, validate request object id err:%d, object_id: %d", + ret, object_req->object_id); + return ret; + } + return ret; +} + +static void hw_object_wqe_init(struct hw_object_wqe_context *object_wqe_ctx, + struct zxdh_sc_dev *dev, + struct zxdh_get_object_data_req *object_req) +{ + object_wqe_ctx->op_code = ZXDH_OP_QUERY_HW_OBJECT_INFO; + object_wqe_ctx->src_vhca_Index = dev->vhca_id; + object_wqe_ctx->src_object_id = object_req->object_id; + object_wqe_ctx->src_waypartition = 0; + object_wqe_ctx->src_interface_select = + object_interface_type[object_req->object_id]; + + object_wqe_ctx->dest_vhca_index = dev->vhca_id; + object_wqe_ctx->dest_object_id = ZXDH_DMA_OBJ_ID; + object_wqe_ctx->dest_waypartition = 0; + object_wqe_ctx->dest_path_select = ZXDH_INDICATE_HOST_NOSMMU; + object_wqe_ctx->dest_interface_select = ZXDH_INTERFACE_NOTCACHE; + + object_wqe_ctx->zxdh_hmc_rsrc_type = ZXDH_HMC_IW_MAX; + object_wqe_ctx->req = object_req; + object_wqe_ctx->dev = dev; +} + +static void +set_cache_object_wqe_ctx(struct hw_object_wqe_context *object_context, + struct zxdh_get_object_data_req *object_req, + u32 object_size) +{ + object_context->src_address = object_req->entry_idx * object_size; + object_context->data_length = object_req->object_num * object_size; + object_context->object_size = object_size; +} + +static int query_route_id_from_ram(struct zxdh_sc_dev *dev, u32 ram_num, + u8 *route_id) +{ + int ret = 0; + u32 ram_value = 0; + ret = zxdh_read_ram_32bit_value(dev, ram_num, ZXDH_RAM_WIDTH_32_BIT, + ZXDH_RAM_WIDTH_LEN_UNIT_1, + ZXDH_RAM_32_BIT_IDX_0, &ram_value); + if (ret) { + pr_err("query hw object, query object cache id err:%d", ret); + return ret; + } + *route_id = ram_value & 0x3; + return ret; +} + +static int query_route_id_from_reg(struct zxdh_sc_dev *dev, u64 reg_base, + u8 bit_low, u8 bit_higth, u8 *route_id) +{ + uint64_t address; + uint32_t read_reg_val = 0; + struct zxdh_pci_f *rf = dev_to_rf(dev); + address = reg_base + dev->vhca_id * ROUTE_ID_REG_SIZE; + REG_OP_AND_CHECK(zxdh_rdma_reg_read, rf, address, &read_reg_val); + *route_id = EXTRACT_BITS(read_reg_val, bit_low, bit_higth); + return 0; +} + +static int +prepare_query_pble_mr_wqe_context(struct zxdh_sc_dev *dev, + struct zxdh_get_object_data_req *object_req, + struct hw_object_wqe_context *object_context) +{ + int ret = 0; + u8 cache_id = 0; + object_context->zxdh_hmc_rsrc_type = ZXDH_HMC_IW_PBLE_MR; + set_cache_object_wqe_ctx(object_context, object_req, + ZXDH_PBLE_MR_QUADRUPLE_SIZE); + ret = query_route_id_from_ram(dev, ZXDH_RAM_H35, &cache_id); + if (ret) { + pr_err("query hw object, query pble mr route id err:%d", ret); + return ret; + } + object_context->src_path_select = cache_id; + return ret; +} + +static int prepare_query_pble_queue_wqe_context( + struct zxdh_sc_dev *dev, struct zxdh_get_object_data_req *object_req, + struct hw_object_wqe_context *object_context) +{ + int ret = 0; + u8 cache_id = 0; + object_context->zxdh_hmc_rsrc_type = ZXDH_HMC_IW_PBLE; + set_cache_object_wqe_ctx(object_context, object_req, + ZXDH_PBLE_QUEUE_QUADRUPLE_SIZE); + ret = query_route_id_from_reg(dev, PBLE_QUEUE_CACHE_ID_BASE, 0, 1, + &cache_id); + if (ret) { + pr_err("query hw object, query pble queue route id err:%d\n", + ret); + return ret; + } + object_context->src_path_select = cache_id; + return ret; +} + +static int +prepare_query_ah_wqe_context(struct zxdh_sc_dev *dev, + struct zxdh_get_object_data_req *object_req, + struct hw_object_wqe_context *object_context) +{ + int ret = 0; + u8 cache_id = 0; + object_context->zxdh_hmc_rsrc_type = ZXDH_HMC_IW_AH; + set_cache_object_wqe_ctx(object_context, object_req, ZXDH_AH_SIZE); + ret = query_route_id_from_reg(dev, AH_CACHE_ID_BASE, 0, 1, &cache_id); + if (ret) { + pr_err("query hw object, query ah route id err:%d\n", ret); + return ret; + } + object_context->src_path_select = cache_id; + return ret; +} + +static int +prepare_query_ird_wqe_context(struct zxdh_sc_dev *dev, + struct zxdh_get_object_data_req *object_req, + struct hw_object_wqe_context *object_context) +{ + int ret = 0; + u8 cache_id = 0; + object_context->zxdh_hmc_rsrc_type = ZXDH_HMC_IW_IRD; + set_cache_object_wqe_ctx(object_context, object_req, ZXDH_IRD_SIZE); + ret = query_route_id_from_ram(dev, ZXDH_RAM_H100, &cache_id); + if (ret) { + pr_err("query hw object, query ird cache id err:%d", ret); + return ret; + } + object_context->src_path_select = cache_id; + return ret; +} + +static bool validate_qpn(struct zxdh_pci_f *rf, + struct zxdh_get_object_data_req *object_req) +{ + struct zxdh_sc_dev *dev = NULL; + dev = &rf->sc_dev; + if (object_req->queue_id == 1) + object_req->queue_id = dev->base_qpn + 1; + if (object_req->queue_id < (dev->base_qpn + 1) || + object_req->queue_id > (dev->base_qpn + rf->max_qp - 1)) { + pr_err("qpn is out of boundary\n"); + pr_err("qpn boundary :[%d,%d]\n", dev->base_qpn + 1, + dev->base_qpn + rf->max_qp - 1); + return false; + } + return true; +} + +static int +prepare_query_tx_window_context(struct zxdh_sc_dev *dev, + struct zxdh_get_object_data_req *object_req, + struct hw_object_wqe_context *object_context) +{ + int ret = 0; + u8 cache_id = 0; + uint32_t tx_window_ddr_size = 0; + struct zxdh_pci_f *rf = dev_to_rf(dev); + + if (!validate_qpn(rf, object_req)) + return -ZXDH_QUEUE_ID_ERROR; + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, rf, TX_WINDOW_DDR_SIZE_REG, + &tx_window_ddr_size); + pr_info("tx window rdd size 0x%x", tx_window_ddr_size); + object_req->entry_idx = + ((object_req->queue_id - dev->base_qpn) << tx_window_ddr_size) + + object_req->entry_idx; + set_cache_object_wqe_ctx(object_context, object_req, + ZXDH_TX_WINDOW_SIZE); + ret = query_route_id_from_reg(dev, TX_WINDOW_CACHE_ID_BASE, 0, 1, + &cache_id); + if (ret) { + pr_err("query hw object, query tx window route id err:%d\n", + ret); + return ret; + } + object_context->src_path_select = cache_id; + return ret; +} + +static int query_hw_object_cqc(struct zxdh_sc_dev *dev, + struct zxdh_get_object_data_req *object_req, + struct zxdh_cqc_item *cqc_item) +{ + int ret = 0; + struct zxdh_context_req context_req = { 0 }; + struct zxdh_dma_mem resource_buf = { 0 }; + struct zxdh_pci_f *rf = dev_to_rf(dev); + context_req.resource_id = object_req->queue_id; + resource_buf.va = NULL; + resource_buf.size = ZXDH_READ_CQC_SIZE; + resource_buf.va = dma_alloc_coherent(rf->hw.device, resource_buf.size, + &resource_buf.pa, GFP_KERNEL); + if (!resource_buf.va) { + pr_err("query cq shadow area, dma allocate err: ENOMEM\n"); + return -ENOMEM; + } + ret = fill_cqc(rf, &context_req, &resource_buf); + if (ret) { + pr_err("query cq shadow area, query cqc err: %d\n", ret); + goto free_exit; + } + + cqc_item->leaf_pbl_size = ZXDH_GET_QPC_ITEM( + u8, resource_buf.va, ZXDH_CQC_LEAF_PBLE_SIZE_BYTE_OFFSET, + RDMACPC_LEAF_PBL_SIZE); + cqc_item->doorbell_shadow_addr = ZXDH_GET_QPC_ITEM( + u64, resource_buf.va, ZXDH_CQC_DOORBELL_SHADOW_ADDR_BYTE_OFFSET, + RDMACPC_DOORBELL_SHADOW_ADDR); + cqc_item->log_cqe_num = ZXDH_GET_QPC_ITEM( + u8, resource_buf.va, ZXDH_CQC_LOG_CQE_NUM, RDMACPC_LOG_CQE_NUM); + cqc_item->hw_cq_head = ZXDH_GET_QPC_ITEM( + u32, resource_buf.va, ZXDH_CQC_HW_CQ_HEAD_BYTE_OFFSET, + RDMACPC_HW_CQ_HEAD); + cqc_item->cq_address = ZXDH_GET_QPC_ITEM( + u64, resource_buf.va, ZXDH_CQC_CQ_ADDRESS_BYTE_OFFSET, + RDMACPC_CQ_ADDRESS); + cqc_item->root_pble = ZXDH_GET_QPC_ITEM( + u64, resource_buf.va, ZXDH_CQC_HW_CQ_ROOT_PBLE_BYTE_OFFSET, + RDMACPC_ROOT_PBLE); + pr_info("query hw object, cqc leaf_pbl_size: 0x%x, doorbell_shadow_addr: 0x%llx, log_cqe_num: 0x%x, hw_cq_head: 0x%x, cq_address: 0x%llx, root_pble: 0x%llx", + cqc_item->leaf_pbl_size, cqc_item->doorbell_shadow_addr, + cqc_item->log_cqe_num, cqc_item->hw_cq_head, + cqc_item->cq_address, cqc_item->root_pble); + +free_exit: + if (resource_buf.va) { + dma_free_coherent(rf->hw.device, resource_buf.size, + resource_buf.va, resource_buf.pa); + resource_buf.va = NULL; + } + return ret; +} + +static bool validate_cqn(struct hw_object_wqe_context *object_context) +{ + u32 cqn = object_context->req->queue_id; + u32 base_cqn = object_context->dev->base_cqn; + u32 max_cqn = + base_cqn + + object_context->dev->hmc_info->hmc_obj[ZXDH_HMC_IW_CQ].cnt - 1; + if (cqn >= base_cqn && cqn <= max_cqn) { + return true; + } + + pr_err("query hw object, validate cqn error, cqn range:[%u,%u], current cqn: %u\n", + base_cqn, max_cqn, cqn); + return false; +} + +static int prepare_query_cq_doorbell_wqe_context( + struct zxdh_sc_dev *dev, struct zxdh_get_object_data_req *object_req, + struct hw_object_wqe_context *object_context) +{ + int ret = 0; + struct zxdh_cqc_item cqc_item = { 0 }; + u8 indicate_id = 0; + if (!validate_cqn(object_context)) { + pr_err("query hw object, validate cqp id err:%d", ret); + return -ZXDH_QUEUE_ID_ERROR; + } + ret = query_hw_object_cqc(dev, object_req, &cqc_item); + if (ret) + return ret; + + ret = query_route_id_from_reg(dev, CQ_DOORBELL_SHADOW_BASE, 2, 3, + &indicate_id); + if (ret) { + pr_err("query hw object, query tx window route id err:%d\n", + ret); + return ret; + } + object_context->data_length = ZXDH_CQ_SHADOW_AREA_SIZE; + object_context->src_address = cqc_item.doorbell_shadow_addr << 6; + object_context->src_path_select = indicate_id; + return ret; +} + +static int +compute_cq_src_address_and_length(struct hw_object_wqe_context *object_context, + struct zxdh_cqc_item *cqc_item) +{ + u32 cqe_num = 0; + int ret = 0; + u32 mask = 0; + object_context->object_size = ZXDH_CQ_SIZE; + switch (cqc_item->leaf_pbl_size) { + case PBLE_LEVEL_0: + mask = GENMASK_ULL(cqc_item->log_cqe_num - 1, 0); + cqe_num = mask & cqc_item->hw_cq_head; + if (cqe_num < LAST_15_WQE) { + object_context->data_length = + (cqe_num + 1) * ZXDH_CQ_SIZE; + object_context->src_address = + (cqc_item->cq_address << 8); + } else { + object_context->data_length = + (LAST_15_WQE + 1) * ZXDH_CQ_SIZE; + object_context->src_address = + (cqc_item->cq_address << 8) + + (cqe_num - LAST_15_WQE) * ZXDH_CQ_SIZE; + } + break; + case PBLE_LEVEL_1: + cqe_num = + (u32)FIELD_GET(GENMASK_ULL(5, 0), cqc_item->hw_cq_head); + object_context->data_length = (cqe_num + 1) * ZXDH_CQ_SIZE; + object_context->src_address = cqc_item->root_pble; + break; + default: + return -ZXDH_NOT_SUPPORT_TWO_LEVEL_PBLE_CODE; + } + return ret; +} + +static int +prepare_query_cq_wqe_context(struct zxdh_sc_dev *dev, + struct zxdh_get_object_data_req *object_req, + struct hw_object_wqe_context *object_context) +{ + int ret = 0; + struct zxdh_cqc_item cqc_item = { 0 }; + u8 indicate_id = 0; + if (!validate_cqn(object_context)) { + pr_err("query hw object, validate cqp id err:%d", ret); + return -ZXDH_QUEUE_ID_ERROR; + } + ret = query_hw_object_cqc(dev, object_req, &cqc_item); + if (ret) { + pr_err("query hw object, query cqc err:%d", ret); + return ret; + } + ret = compute_cq_src_address_and_length(object_context, &cqc_item); + if (ret) { + pr_err("query hw object, compute cq src address and length err:%d", + ret); + return ret; + } + ret = query_route_id_from_reg(dev, CQ_INDICATE_ID_BASE, 2, 3, + &indicate_id); + if (ret) { + pr_err("query hw object, query indicate id err:%d", ret); + return ret; + } + object_context->src_path_select = indicate_id; + return ret; +} + +static int query_hw_object_ceqc(struct zxdh_sc_dev *dev, + struct zxdh_get_object_data_req *object_req, + struct zxdh_ceqc_item *ceqc_item) +{ + int ret = 0; + struct zxdh_context_req context_req = { 0 }; + struct zxdh_dma_mem resource_buf = { 0 }; + struct zxdh_pci_f *rf = dev_to_rf(dev); + context_req.resource_id = object_req->queue_id; + resource_buf.va = NULL; + resource_buf.size = ZXDH_READ_CEQC_SIZE; + resource_buf.va = dma_alloc_coherent(rf->hw.device, resource_buf.size, + &resource_buf.pa, GFP_KERNEL); + if (!resource_buf.va) { + pr_err("query ceqc, dma allocate err: ENOMEM\n"); + return -ENOMEM; + } + ret = fill_ceqc(rf, &context_req, &resource_buf); + if (ret) { + pr_err("query ceqc, query ceqc err: %d\n", ret); + goto free_exit; + } + + ceqc_item->leaf_pbl_size = ZXDH_GET_QPC_ITEM( + u8, resource_buf.va, ZXDH_CEQC_LEAF_PBL_SIZE_OFFSET, + RDMACEQC_LEAF_PBL_SIZE); + ceqc_item->ceqe_head = ZXDH_GET_QPC_ITEM(u32, resource_buf.va, + ZXDH_CEQC_LEAF_PBL_SIZE_OFFSET, + RDMACEQC_CEQE_HEAD); + ceqc_item->log_ceq_num = ZXDH_GET_QPC_ITEM( + u64, resource_buf.va, ZXDH_CEQC_LEAF_PBL_SIZE_OFFSET, + RDMACEQC_LOG_CEQ_NUM); + ceqc_item->ceq_address = ZXDH_GET_QPC_ITEM(u64, resource_buf.va, + ZXDH_CEQC_CEQ_ADDRESS_OFFSET, + RDMACEQC_CEQ_ADDRESS); + + pr_info("query hw object, ceqc leaf_pbl_size: 0x%x, ceqe_head: 0x%x, ceq_address: 0x%llx", + ceqc_item->leaf_pbl_size, ceqc_item->ceqe_head, + ceqc_item->ceq_address); + +free_exit: + if (resource_buf.va) { + dma_free_coherent(rf->hw.device, resource_buf.size, + resource_buf.va, resource_buf.pa); + resource_buf.va = NULL; + } + return ret; +} + +static int +compute_ceq_src_address_and_length(struct hw_object_wqe_context *object_context, + struct zxdh_ceqc_item *ceqc_item) +{ + u32 ceqe_num = 0; + int ret = 0; + u32 mask = 0; + object_context->object_size = ZXDH_CEQ_SIZE; + switch (ceqc_item->leaf_pbl_size) { + case PBLE_LEVEL_0: + mask = GENMASK_ULL(ceqc_item->log_ceq_num - 1, 0); + ceqe_num = mask & ceqc_item->ceqe_head; + if (ceqe_num < LAST_15_WQE) { + object_context->data_length = + (LAST_15_WQE + 1) * ZXDH_CEQ_SIZE; + object_context->src_address = + (ceqc_item->ceq_address << 7); + } else { + if ((ceqe_num - LAST_15_WQE) % 2 == 1) { + object_context->data_length = + 15 * ZXDH_CEQ_SIZE; + object_context->src_address = + (ceqc_item->ceq_address << 7) + + (ceqe_num - LAST_15_WQE + 1) * + ZXDH_CEQ_SIZE; + } else { + object_context->data_length = + (LAST_15_WQE + 1) * ZXDH_CEQ_SIZE; + object_context->src_address = + (ceqc_item->ceq_address << 7) + + (ceqe_num - LAST_15_WQE) * + ZXDH_CEQ_SIZE; + } + } + break; + default: + return -ZXDH_NOT_SUPPORT_VIRTUAL_ADDRESS; + } + return ret; +} + +static int +prepare_query_ceq_wqe_context(struct zxdh_sc_dev *dev, + struct zxdh_get_object_data_req *object_req, + struct hw_object_wqe_context *object_context) +{ + int ret = 0; + struct zxdh_ceqc_item ceqc_item = { 0 }; + u8 indicate_id = 0; + + object_context->zxdh_hmc_rsrc_type = ZXDH_HMC_IW_PBLE; + + ret = query_hw_object_ceqc(dev, object_req, &ceqc_item); + if (ret) { + pr_err("query hw object, query indicate id err:%d", ret); + return ret; + } + ret = compute_ceq_src_address_and_length(object_context, &ceqc_item); + if (ret) + return ret; + ret = query_route_id_from_reg(dev, CEQ_INDICATE_ID_BASE, 2, 3, + &indicate_id); + if (ret) { + pr_err("query hw object, query indicate id err:%d", ret); + return ret; + } + object_context->src_path_select = indicate_id; + return ret; +} + +static int query_hw_object_aeqc(struct zxdh_sc_dev *dev, + struct zxdh_get_object_data_req *object_req, + struct zxdh_aeqc_item *aeqc_item) +{ + int ret = 0; + struct zxdh_context_req context_req = { 0 }; + struct zxdh_dma_mem resource_buf = { 0 }; + struct zxdh_pci_f *rf = dev_to_rf(dev); + + context_req.resource_id = object_req->queue_id; + resource_buf.va = NULL; + resource_buf.size = ZXDH_READ_AEQC_SIZE; + resource_buf.va = dma_alloc_coherent(rf->hw.device, resource_buf.size, + &resource_buf.pa, GFP_KERNEL); + if (!resource_buf.va) { + pr_err("query cq shadow area, dma allocate err: ENOMEM\n"); + return -ENOMEM; + } + ret = fill_aeqc(rf, &context_req, &resource_buf); + if (ret) { + pr_err("query hw object aeq, query err: %d\n", ret); + goto free_exit; + } + + aeqc_item->aeq_head = ZXDH_GET_QPC_ITEM(u32, resource_buf.va, + ZXDH_AEQC_AEQ_HEAD_OFFSET, + ZXDH_AEQC_AEQ_HEAD); + aeqc_item->leaf_pbl_size = ZXDH_GET_QPC_ITEM(u8, resource_buf.va, + ZXDH_AEQC_AEQ_HEAD_OFFSET, + ZXDH_AEQC_LEAF_PBL_SIZE); + aeqc_item->virtually_mapped = ZXDH_GET_QPC_ITEM( + u8, resource_buf.va, ZXDH_AEQC_AEQ_HEAD_OFFSET, + ZXDH_AEQC_VIRTUALLY_MAPPED); + aeqc_item->aeq_size = ZXDH_GET_QPC_ITEM(u32, resource_buf.va, + ZXDH_AEQC_AEQ_HEAD_OFFSET, + ZXDH_AEQC_AEQ_SIZE); + aeqc_item->aeq_address = ZXDH_GET_QPC_ITEM(u64, resource_buf.va, + ZXDH_AEQC_AEQ_ADDRESS_OFFSET, + ZXDH_AEQC_AEQ_ADDRESS); + + pr_info("query hw object, aeqc aeq_head: 0x%x, leaf_pbl_size: 0x%x, virtually_mapped: 0x%x, aeq_size: 0x%x, aeq_address: 0x%llx", + aeqc_item->aeq_head, aeqc_item->leaf_pbl_size, + aeqc_item->virtually_mapped, aeqc_item->aeq_size, + aeqc_item->aeq_address); + +free_exit: + if (resource_buf.va) { + dma_free_coherent(rf->hw.device, resource_buf.size, + resource_buf.va, resource_buf.pa); + resource_buf.va = NULL; + } + return ret; +} + +static int +compute_aeq_src_address_and_length(struct hw_object_wqe_context *object_context, + struct zxdh_aeqc_item *aeqc_item) +{ + switch (aeqc_item->virtually_mapped) { + case 0: + object_context->data_length = + (aeqc_item->aeq_head + 1) * ZXDH_AEQ_SIZE; + object_context->src_address = aeqc_item->aeq_address; + object_context->object_size = ZXDH_AEQ_SIZE; + return 0; + default: + return -ZXDH_NOT_SUPPORT_VIRTUAL_ADDRESS; + } +} + +static int +prepare_query_aeq_wqe_context(struct zxdh_sc_dev *dev, + struct zxdh_get_object_data_req *object_req, + struct hw_object_wqe_context *object_context) +{ + int ret = 0; + struct zxdh_aeqc_item aeqc_item = { 0 }; + u8 indicate_id = 0; + ret = query_hw_object_aeqc(dev, object_req, &aeqc_item); + if (ret) + return ret; + ret = compute_aeq_src_address_and_length(object_context, &aeqc_item); + if (ret) + return ret; + ret = query_route_id_from_reg(dev, AEQ_INDICATE_ID_BASE, 2, 3, + &indicate_id); + if (ret) { + pr_err("query hw object, query indicate id err:%d", ret); + return ret; + } + object_context->src_path_select = indicate_id; + return ret; +} + +static int query_hw_object_qpc(struct zxdh_sc_dev *dev, struct zxdh_get_object_data_req *object_req, struct zxdh_qpc_item *qpc_item) +{ + int err_code = 0; + struct zxdh_pci_f *rf = dev_to_rf(dev); + struct zxdh_device *iwdev = rf->iwdev; + struct zxdh_dma_mem qpc_buf; + struct zxdh_context_req context_req = { 0 }; + u64 temp; + + context_req.resource_id = object_req->queue_id; + qpc_buf.va = NULL; + qpc_buf.size = ALIGN(ZXDH_QP_CTX_SIZE, ZXDH_QPC_ALIGNMENT); + qpc_buf.va = dma_alloc_coherent(iwdev->rf->hw.device, qpc_buf.size, + &qpc_buf.pa, GFP_KERNEL); + if (!qpc_buf.va) { + pr_err("query qpc alloc dma failed:ENOMEM\n"); + return -ENOMEM; + } + err_code = fill_qpc(rf, &context_req, &qpc_buf); + if (err_code) { + pr_err("query qpc fill qpc failed:%d\n", err_code); + goto free_exit; + } + + get_64bit_val(qpc_buf.va, 64, &temp); + qpc_item->sq_leaf_pbl_size = FIELD_GET(GENMASK_ULL(5, 4), temp); + + get_64bit_val(qpc_buf.va, 72, &temp); + qpc_item->sq_address = FIELD_GET(GENMASK_ULL(63, 0), temp); + + get_64bit_val(qpc_buf.va, 144, &temp); + qpc_item->log_sq_size = FIELD_GET(GENMASK_ULL(59, 56), temp); + + get_64bit_val(qpc_buf.va, 352, &temp); + qpc_item->rq_address = FIELD_GET(GENMASK_ULL(63, 0), temp); + + get_64bit_val(qpc_buf.va, 360, &temp); + qpc_item->db_address = FIELD_GET(GENMASK_ULL(63, 0), temp); + + get_64bit_val(qpc_buf.va, 376, &temp); + qpc_item->rq_leaf_pbl_size = FIELD_GET(GENMASK_ULL(52, 51), temp); + qpc_item->log_rq_wqe_size = FIELD_GET(GENMASK_ULL(16, 14), temp); + qpc_item->log_rq_size = FIELD_GET(GENMASK_ULL(10, 7), temp); + +free_exit: + if (qpc_buf.va) { + dma_free_coherent(iwdev->rf->hw.device, qpc_buf.size, qpc_buf.va, qpc_buf.pa); + qpc_buf.va = NULL; + } + return err_code; +} + +static int get_rq_size(__u8 log_rq_wqe_size, int *log_rq_wqe_real_size) +{ + switch (log_rq_wqe_size) { + case 0: + *log_rq_wqe_real_size = ZXDH_WQE_SIZE_16; + break; + case 1: + *log_rq_wqe_real_size = ZXDH_WQE_SIZE_32; + break; + case 2: + *log_rq_wqe_real_size = ZXDH_WQE_SIZE_64; + break; + case 3: + *log_rq_wqe_real_size = ZXDH_WQE_SIZE_128; + break; + case 4: + *log_rq_wqe_real_size = ZXDH_WQE_SIZE_256; + break; + case 5: + *log_rq_wqe_real_size = ZXDH_WQE_SIZE_512; + break; + default: + return 1; + } + return 0; +} + +static int get_srq_size(__u8 log_srq_stride, int *log_srq_stride_wqe_real_size) +{ + switch (log_srq_stride) { + case 1: + *log_srq_stride_wqe_real_size = ZXDH_WQE_SIZE_32; + break; + case 2: + *log_srq_stride_wqe_real_size = ZXDH_WQE_SIZE_64; + break; + case 3: + *log_srq_stride_wqe_real_size = ZXDH_WQE_SIZE_128; + break; + case 4: + *log_srq_stride_wqe_real_size = ZXDH_WQE_SIZE_256; + break; + case 5: + *log_srq_stride_wqe_real_size = ZXDH_WQE_SIZE_512; + break; + default: + return 1; + } + return 0; +} + +#define MAX_BUFFER_SIZE (2 * 1024 * 1024) +static int check_object_index_range(int min_index, int max_index, int index) +{ + return (index >= min_index && index <= max_index); +} + +static int check_object_buffer_size(uint32_t wqe_size, uint32_t wqe_num) +{ + return (wqe_size * wqe_num >= MAX_BUFFER_SIZE); +} + +static void query_32_byte_aligned_address(u64 idx, u64 *aligned_address, + u64 *aligned_offset) +{ + *aligned_address = idx & ~0x1F; + *aligned_offset = idx & 0x1F; +} + +static void +hw_object_pble_wqe_init(struct hw_object_wqe_context *object_context, + struct zxdh_sc_dev *dev, u64 entry_idx, + u64 *aligned_offset) +{ + u64 pble_aligned_address = 0; + object_context->op_code = ZXDH_OP_QUERY_HW_OBJECT_INFO; + object_context->src_vhca_Index = dev->vhca_id; + object_context->src_object_id = ZXDH_PBLE_QUEUE_OBJ_ID; + object_context->src_waypartition = 0; + + query_32_byte_aligned_address((entry_idx << 3), &pble_aligned_address, + aligned_offset); + object_context->src_address = pble_aligned_address; + object_context->src_interface_select = + object_interface_type[ZXDH_PBLE_QUEUE_OBJ_ID]; + + object_context->dest_vhca_index = dev->vhca_id; + object_context->dest_object_id = ZXDH_DMA_OBJ_ID; + object_context->dest_waypartition = 0; + object_context->dest_path_select = ZXDH_INDICATE_HOST_NOSMMU; + object_context->dest_interface_select = ZXDH_INTERFACE_NOTCACHE; + + object_context->zxdh_hmc_rsrc_type = ZXDH_HMC_IW_PBLE; + object_context->dev = dev; + + object_context->data_length = ZXDH_PBLE_QUEUE_QUADRUPLE_SIZE; + object_context->object_size = ZXDH_PBLE_QUEUE_QUADRUPLE_SIZE; +} + +static int query_hw_object_pble_queue(struct zxdh_sc_dev *dev, u64 entry_idx, + u64 *pble_src_addr) +{ + int ret = 0; + u64 aligned_offset = 0; + u8 route_id = 0; + struct hw_object_wqe_context object_context = { 0 }; + struct zxdh_pci_f *rf = dev_to_rf(dev); + struct zxdh_device *iwdev = rf->iwdev; + struct zxdh_dma_mem qpc_buf; + u64 temp; + + hw_object_pble_wqe_init(&object_context, dev, entry_idx, + &aligned_offset); + ret = query_route_id_from_reg(dev, PBLE_QUEUE_CACHE_ID_BASE, 0, 1, + &route_id); + if (ret) { + pr_err("query route id failed, invalid reg value\n"); + return -EINVAL; + } + object_context.src_path_select = route_id; + + qpc_buf.va = NULL; + qpc_buf.size = object_context.data_length; + qpc_buf.va = dma_alloc_coherent(iwdev->rf->hw.device, qpc_buf.size, + &qpc_buf.pa, GFP_KERNEL); + if (!qpc_buf.va) { + pr_err("query qpc alloc dma failed:ENOMEM\n"); + return -ENOMEM; + } + object_context.dest_address = qpc_buf.pa; + + ret = hw_object_query_info(rf, &object_context); + if (ret) { + pr_err("query hw object, query object info err:%d\n", ret); + goto free_exit; + } + + get_64bit_val(qpc_buf.va, aligned_offset, &temp); + *pble_src_addr = FIELD_GET(GENMASK_ULL(63, 0), temp); + +free_exit: + if (qpc_buf.va) { + dma_free_coherent(iwdev->rf->hw.device, qpc_buf.size, + qpc_buf.va, qpc_buf.pa); + qpc_buf.va = NULL; + } + return ret; +} + +static __u64 physical_addressing(__u64 qp_address, int qp_wqe_size, + int wqe_index) +{ + return (qp_address & ZXDH_MAX_STATS_64) + qp_wqe_size * wqe_index; +} + +static __u64 single_level_virtual_addressing(struct zxdh_sc_dev *dev, + __u64 qp_address, int qp_wqe_size, + int wqe_index) +{ + __u32 pble_index = (qp_address & ZXDH_MAX_STATS_28) + + (qp_wqe_size * wqe_index) / 4096; + __u64 pble_addr; + int ret = query_hw_object_pble_queue(dev, pble_index, &pble_addr); + if (ret) { + pr_err("query hw object, query pble queue address for qp info err:%d\n", + ret); + return -EINVAL; + } + return (pble_addr & ZXDH_MAX_STATS_64) + + ((qp_wqe_size * wqe_index) & ZXDH_MAX_STATS_12); +} + +static int calculate_qp_src_address(struct zxdh_sc_dev *dev, + struct zxdh_qp_addr_context qp_addr_ctx, + u64 *src_addr) +{ + switch (qp_addr_ctx.addr_mode) { + case 0: + *src_addr = physical_addressing(qp_addr_ctx.qp_base_addr, + qp_addr_ctx.wqe_size, + qp_addr_ctx.wqe_index); + break; + case 1: + *src_addr = single_level_virtual_addressing( + dev, qp_addr_ctx.qp_base_addr, qp_addr_ctx.wqe_size, + qp_addr_ctx.wqe_index); + break; + default: + pr_err("query address mode is invalid %d\n", + qp_addr_ctx.addr_mode); + return 1; + } + return 0; +} + +static int +query_hw_object_qpc_for_rq(struct zxdh_sc_dev *dev, + struct zxdh_get_object_data_req *object_req, + struct zxdh_qp_addr_context *qp_addr_ctx) +{ + int err_code = 0; + struct zxdh_qpc_item qpc_item = { 0 }; + err_code = query_hw_object_qpc(dev, object_req, &qpc_item); + if (err_code) { + pr_err("query qpc failed:%u\n", err_code); + return err_code; + } + + if (get_rq_size(qpc_item.log_rq_wqe_size, + &qpc_item.log_rq_wqe_real_size)) { + pr_err("query qpc, invalid log_rq_wqe_size:%d\n", + qpc_item.log_rq_wqe_size); + return -EINVAL; + } + + if (!check_object_index_range(0, (1U << qpc_item.log_rq_size) - 1, + object_req->entry_idx)) { + pr_err("query entry idx is out of index.entry_idx:%u\n", + object_req->entry_idx); + return -ZXDH_ENTRY_IDX_ERROR; + } + if (check_object_buffer_size(qpc_item.log_rq_wqe_real_size, + object_req->object_num)) { + pr_err("query buffer size is more than 2M.\n"); + return -ZXDH_DMA_MEMORY_OVER_2M; + } + + qp_addr_ctx->wqe_size = qpc_item.log_rq_wqe_real_size; + qp_addr_ctx->wqe_index = object_req->entry_idx; + qp_addr_ctx->addr_mode = qpc_item.rq_leaf_pbl_size; + qp_addr_ctx->qp_base_addr = qpc_item.rq_address; + + return err_code; +} + +static int +prepare_query_rq_wqe_context(struct zxdh_sc_dev *dev, + struct zxdh_get_object_data_req *object_req, + struct hw_object_wqe_context *object_context) +{ + int ret = 0; + struct zxdh_qp_addr_context qp_addr_ctx = { 0 }; + u64 src_addr = 0; + u8 route_id = 0; + ret = query_hw_object_qpc_for_rq(dev, object_req, &qp_addr_ctx); + if (ret) + return ret; + if (qp_addr_ctx.addr_mode != 0 && object_req->object_num != 1) { + pr_err("query qpc, When the addressing mode is virtual addressing, object_num can only be 1.object_num:%d\n", + object_req->object_num); + return -ZXDH_PBLE_ADDRESSING_ONLY_SUPPORTS_OBJECT_NUMBER_1; + } + if (calculate_qp_src_address(dev, qp_addr_ctx, &src_addr)) { + pr_err("query qpc, invalid addressing mode\n"); + return -EINVAL; + } + ret = query_route_id_from_reg(dev, C_RQ_INDICATE_ID_REG_CHECK, 2, 3, + &route_id); + if (ret) { + pr_err("query route id failed, invalid reg value\n"); + return -EINVAL; + } + object_context->src_address = src_addr; + object_context->src_path_select = route_id; + object_context->src_interface_select = ZXDH_INTERFACE_NOTCACHE; + object_context->data_length = + qp_addr_ctx.wqe_size * object_req->object_num; + object_context->object_size = qp_addr_ctx.wqe_size; + object_context->zxdh_hmc_rsrc_type = ZXDH_HMC_IW_PBLE; + return ret; +} + +static int +query_hw_object_qpc_for_rq_db(struct zxdh_sc_dev *dev, + struct zxdh_get_object_data_req *object_req, + struct hw_object_wqe_context *object_context) +{ + int err_code = 0; + struct zxdh_qpc_item qpc_item = { 0 }; + err_code = query_hw_object_qpc(dev, object_req, &qpc_item); + if (err_code) { + pr_err("query qpc failed:%u\n", err_code); + return err_code; + } + object_context->src_address = qpc_item.db_address; + return err_code; +} + +static int prepare_query_rq_doorbell_wqe_context( + struct zxdh_sc_dev *dev, struct zxdh_get_object_data_req *object_req, + struct hw_object_wqe_context *object_context) +{ + int ret = 0; + u8 route_id = 0; + ret = query_hw_object_qpc_for_rq_db(dev, object_req, object_context); + if (ret) + return ret; + + ret = query_route_id_from_reg(dev, C_RQDB_INDICATE_ID_REG_CHECK, 2, 3, + &route_id); + if (ret) { + pr_err("query route id failed, invalid reg value\n"); + return -EINVAL; + } + object_context->data_length = ZXDH_RQ_SHADOW_AREA_SIZE; + object_context->src_path_select = route_id; + object_context->src_interface_select = ZXDH_INTERFACE_NOTCACHE; + return ret; +} + +static int query_hw_object_srqc(struct zxdh_sc_dev *dev, + struct zxdh_get_object_data_req *object_req, + struct zxdh_srqc_item *srqc_item) +{ + int err_code = 0; + struct zxdh_pci_f *rf = dev_to_rf(dev); + struct zxdh_device *iwdev = rf->iwdev; + struct zxdh_dma_mem srqc_buf; + struct zxdh_context_req context_req = { 0 }; + u64 temp; + + context_req.resource_id = object_req->queue_id; + srqc_buf.va = NULL; + srqc_buf.size = ALIGN(ZXDH_SRQ_CTX_SIZE, ZXDH_SRQC_ALIGNMENT); + srqc_buf.va = dma_alloc_coherent(iwdev->rf->hw.device, srqc_buf.size, + &srqc_buf.pa, GFP_KERNEL); + if (!srqc_buf.va) { + pr_err("query qpc alloc dma failed:ENOMEM\n"); + return -ENOMEM; + } + err_code = fill_srqc(rf, &context_req, &srqc_buf); + if (err_code) { + pr_err("query qpc fill qpc failed:%d\n", err_code); + goto free_exit; + } + + get_64bit_val(srqc_buf.va, 0, &temp); + srqc_item->leaf_pbl_size = FIELD_GET(GENMASK_ULL(61, 60), temp); + srqc_item->log_srq_size = FIELD_GET(GENMASK_ULL(59, 56), temp); + srqc_item->log_srq_stride = FIELD_GET(GENMASK_ULL(26, 24), temp); + srqc_item->list_leaf_pbl_size = FIELD_GET(GENMASK_ULL(23, 22), temp); + + get_64bit_val(srqc_buf.va, 8, &temp); + srqc_item->srq_address = FIELD_GET(GENMASK_ULL(63, 0), temp); + + get_64bit_val(srqc_buf.va, 16, &temp); + srqc_item->srq_list_address = FIELD_GET(GENMASK_ULL(63, 0), temp); + + get_64bit_val(srqc_buf.va, 24, &temp); + srqc_item->dbr_address = FIELD_GET(GENMASK_ULL(63, 0), temp); + + get_64bit_val(srqc_buf.va, 32, &temp); + srqc_item->hw_wqe_cnt = FIELD_GET(GENMASK_ULL(15, 0), temp); + +free_exit: + if (srqc_buf.va) { + dma_free_coherent(iwdev->rf->hw.device, srqc_buf.size, + srqc_buf.va, srqc_buf.pa); + srqc_buf.va = NULL; + } + return err_code; +} + +static int +query_hw_object_srqc_for_srqp(struct zxdh_sc_dev *dev, + struct zxdh_get_object_data_req *object_req, + struct zxdh_qp_addr_context *qp_addr_ctx) +{ + int err_code = 0; + struct zxdh_srqc_item srqc_item = { 0 }; + err_code = query_hw_object_srqc(dev, object_req, &srqc_item); + if (err_code) + return err_code; + qp_addr_ctx->addr_mode = srqc_item.list_leaf_pbl_size == 0 ? ZXDH_ADDR_TYPE_ZERO_BASED : ZXDH_ADDR_TYPE_VA_BASED; + qp_addr_ctx->wqe_size = ZXDH_SRQP_INDEX_SIZE; + qp_addr_ctx->wqe_index = object_req->entry_idx; + qp_addr_ctx->qp_base_addr = srqc_item.srq_list_address; + + if (check_object_buffer_size(qp_addr_ctx->wqe_size, + object_req->object_num)) { + pr_err("query buffer size is more than 2M.\n"); + return -ZXDH_DMA_MEMORY_OVER_2M; + } + + return err_code; +} + +static int +prepare_query_srqp_wqe_context(struct zxdh_sc_dev *dev, + struct zxdh_get_object_data_req *object_req, + struct hw_object_wqe_context *object_context) +{ + int ret = 0; + struct zxdh_qp_addr_context qp_addr_ctx = { 0 }; + u64 src_addr = 0, aligned_offset = 0, phy_src_addr = 0; + u8 route_id = 0; + ret = query_hw_object_srqc_for_srqp(dev, object_req, &qp_addr_ctx); + if (ret) + return ret; + + if (calculate_qp_src_address(dev, qp_addr_ctx, &phy_src_addr)) { + pr_err("query qpc, invalid addressing mode\n"); + return -EINVAL; + } + query_32_byte_aligned_address(phy_src_addr, &src_addr, &aligned_offset); + ret = query_route_id_from_reg(dev, C_SRQP_INDICATE_ID_REG_CHECK, 2, 3, + &route_id); + if (ret) { + pr_err("query route id failed, invalid reg value\n"); + return -EINVAL; + } + object_context->data_length = ZXDH_PBLE_QUEUE_QUADRUPLE_SIZE; + object_context->src_path_select = route_id; + object_context->src_interface_select = ZXDH_INTERFACE_NOTCACHE; + object_context->src_address = src_addr; + object_context->zxdh_hmc_rsrc_type = ZXDH_HMC_IW_PBLE; + object_context->srqp_aligned_offset = aligned_offset; + return ret; +} + +static int +query_hw_object_srqc_for_srq(struct zxdh_sc_dev *dev, + struct zxdh_get_object_data_req *object_req, + struct zxdh_qp_addr_context *qp_addr_ctx) +{ + int err_code = 0; + struct zxdh_srqc_item srqc_item = { 0 }; + err_code = query_hw_object_srqc(dev, object_req, &srqc_item); + if (err_code) + return err_code; + if (get_srq_size(srqc_item.log_srq_stride, + &srqc_item.log_srq_stride_wqe_real_size)) { + pr_err("query srqc, invalid log_rq_wqe_size:%d\n", + srqc_item.log_srq_stride); + return -EINVAL; + } + + if (srqc_item.leaf_pbl_size != 0 && object_req->object_num != 1) { + pr_err("query qpc, When the addressing mode is virtual addressing, object_num can only be 1.object_num:%d\n", + object_req->object_num); + return -ZXDH_PBLE_ADDRESSING_ONLY_SUPPORTS_OBJECT_NUMBER_1; + } + + if (!check_object_index_range(0, (1U << srqc_item.log_srq_size) - 1, + object_req->entry_idx)) { + pr_err("query entry idx is out of index.entry_idx:%u\n", + object_req->entry_idx); + return -ZXDH_ENTRY_IDX_ERROR; + } + if (check_object_buffer_size(srqc_item.log_srq_stride_wqe_real_size, + object_req->object_num)) { + pr_err("query buffer size is more than 2M.\n"); + return -ZXDH_DMA_MEMORY_OVER_2M; + } + qp_addr_ctx->addr_mode = srqc_item.leaf_pbl_size; + qp_addr_ctx->wqe_size = srqc_item.log_srq_stride_wqe_real_size; + qp_addr_ctx->wqe_index = object_req->entry_idx; + qp_addr_ctx->qp_base_addr = srqc_item.srq_address; + return err_code; +} + +static int +prepare_query_srq_wqe_context(struct zxdh_sc_dev *dev, + struct zxdh_get_object_data_req *object_req, + struct hw_object_wqe_context *object_context) +{ + int ret = 0; + struct zxdh_qp_addr_context qp_addr_ctx = { 0 }; + u64 src_addr = 0; + u8 route_id = 0; + ret = query_hw_object_srqc_for_srq(dev, object_req, &qp_addr_ctx); + if (ret) + return ret; + if (calculate_qp_src_address(dev, qp_addr_ctx, &src_addr)) { + pr_err("query srqc, invalid addressing mode\n"); + return -EINVAL; + } + + ret = query_route_id_from_reg(dev, C_SRQ_INDICATE_ID_REG_CHECK, 2, 3, + &route_id); + if (ret) { + pr_err("query route id failed, invalid reg value\n"); + return -EINVAL; + } + object_context->src_address = src_addr; + object_context->src_path_select = route_id; + + object_context->src_interface_select = ZXDH_INTERFACE_NOTCACHE; + object_context->data_length = + qp_addr_ctx.wqe_size * object_req->object_num; + object_context->object_size = qp_addr_ctx.wqe_size; + object_context->zxdh_hmc_rsrc_type = ZXDH_HMC_IW_PBLE; + return ret; +} + +static int +query_hw_object_srqc_for_srq_db(struct zxdh_sc_dev *dev, + struct zxdh_get_object_data_req *object_req, + struct hw_object_wqe_context *object_context) +{ + int err_code = 0; + struct zxdh_srqc_item srqc_item = { 0 }; + err_code = query_hw_object_srqc(dev, object_req, &srqc_item); + if (err_code) + return err_code; + object_context->src_address = srqc_item.dbr_address; + return err_code; +} + +static int prepare_query_srq_doorbell_wqe_context( + struct zxdh_sc_dev *dev, struct zxdh_get_object_data_req *object_req, + struct hw_object_wqe_context *object_context) +{ + int ret = 0; + u8 route_id = 0; + ret = query_hw_object_srqc_for_srq_db(dev, object_req, object_context); + if (ret) + return ret; + + ret = query_route_id_from_reg(dev, C_SRQDB_INDICATE_ID_REG_CHECK, 2, 3, + &route_id); + if (ret) { + pr_err("query route id failed, invalid reg value\n"); + return -EINVAL; + } + object_context->data_length = ZXDH_SRQ_SHADOW_AREA_SIZE; + object_context->src_path_select = route_id; + object_context->src_interface_select = ZXDH_INTERFACE_NOTCACHE; + return ret; +} + +static int +query_hw_object_qpc_for_sq(struct zxdh_sc_dev *dev, + struct zxdh_get_object_data_req *object_req, + struct zxdh_qp_addr_context *qp_addr_ctx) +{ + int err_code = 0; + struct zxdh_qpc_item qpc_item = { 0 }; + err_code = query_hw_object_qpc(dev, object_req, &qpc_item); + if (err_code) { + pr_err("query qpc failed:%u\n", err_code); + return err_code; + } + + if (qpc_item.sq_leaf_pbl_size != 0 && object_req->object_num != 1) { + pr_err("query qpc, When the addressing mode is virtual addressing, object_num can only be 1.object_num:%d\n", + object_req->object_num); + return -ZXDH_PBLE_ADDRESSING_ONLY_SUPPORTS_OBJECT_NUMBER_1; + } + + if (!check_object_index_range(0, (1U << qpc_item.log_sq_size) - 1, + object_req->entry_idx)) { + pr_err("query entry idx is out of index.entry_idx:%u\n", + object_req->entry_idx); + return -ZXDH_ENTRY_IDX_ERROR; + } + + if (check_object_buffer_size(ZXDH_SQ_UNIT_SIZE, object_req->object_num)) { + pr_err("query buffer size is more than 2M.\n"); + return -ZXDH_DMA_MEMORY_OVER_2M; + } + qp_addr_ctx->wqe_size = ZXDH_SQ_UNIT_SIZE; + qp_addr_ctx->wqe_index = object_req->entry_idx; + qp_addr_ctx->addr_mode = qpc_item.sq_leaf_pbl_size; + qp_addr_ctx->qp_base_addr = qpc_item.sq_address; + + return err_code; +} + +static int +prepare_query_sq_wqe_context(struct zxdh_sc_dev *dev, + struct zxdh_get_object_data_req *object_req, + struct hw_object_wqe_context *object_context) +{ + int ret = 0; + struct zxdh_qp_addr_context qp_addr_ctx = { 0 }; + u64 src_addr = 0; + u8 route_id = 0; + ret = query_hw_object_qpc_for_sq(dev, object_req, &qp_addr_ctx); + if (ret) + return ret; + if (calculate_qp_src_address(dev, qp_addr_ctx, &src_addr)) { + pr_err("query qpc, invalid addressing mode\n"); + return -EINVAL; + } + + ret = query_route_id_from_reg(dev, C_SQ_INDICATE_ID_REG_CHECK, 2, 3, + &route_id); + if (ret) { + pr_err("query route id failed, invalid reg value\n"); + return -EINVAL; + } + + object_context->src_address = src_addr; + object_context->src_path_select = route_id; + object_context->src_interface_select = ZXDH_INTERFACE_NOTCACHE; + object_context->data_length = + qp_addr_ctx.wqe_size * object_req->object_num; + object_context->zxdh_hmc_rsrc_type = ZXDH_HMC_IW_PBLE; + return ret; +} + +static int +hw_object_calculate_wqe_context(struct zxdh_sc_dev *dev, + struct zxdh_get_object_data_req *object_req, + struct hw_object_wqe_context *object_context) +{ + switch (object_req->object_id) { + case ZXDH_PBLE_MR_OBJ_ID: + return prepare_query_pble_mr_wqe_context(dev, object_req, + object_context); + case ZXDH_PBLE_QUEUE_OBJ_ID: + return prepare_query_pble_queue_wqe_context(dev, object_req, + object_context); + case ZXDH_AH_OBJ_ID: + return prepare_query_ah_wqe_context(dev, object_req, + object_context); + case ZXDH_IRD_OBJ_ID: + return prepare_query_ird_wqe_context(dev, object_req, + object_context); + case ZXDH_TX_WINDOW_OBJ_ID: + return prepare_query_tx_window_context(dev, object_req, + object_context); + case ZXDH_CQ_SHADOW_AREA: + return prepare_query_cq_doorbell_wqe_context(dev, object_req, + object_context); + case ZXDH_CQ: + return prepare_query_cq_wqe_context(dev, object_req, + object_context); + case ZXDH_CEQ: + return prepare_query_ceq_wqe_context(dev, object_req, + object_context); + case ZXDH_AEQ: + return prepare_query_aeq_wqe_context(dev, object_req, + object_context); + case ZXDH_RQ: + return prepare_query_rq_wqe_context(dev, object_req, object_context); + case ZXDH_RQ_SHADOW_AREA: + return prepare_query_rq_doorbell_wqe_context(dev, object_req, object_context); + case ZXDH_SRQP: + return prepare_query_srqp_wqe_context(dev, object_req, object_context); + case ZXDH_SRQ: + return prepare_query_srq_wqe_context(dev, object_req, object_context); + case ZXDH_SRQ_SHADOW_AREA: + return prepare_query_srq_doorbell_wqe_context(dev, object_req, object_context); + case ZXDH_SQ: + return prepare_query_sq_wqe_context(dev, object_req, object_context); + default: + return -ZXDH_NOT_SUPPORT_OBJECT_ID; + } +} + +static bool validate_address_32_byte_align(u64 address) +{ + return !(address & 0x1F); +} + +static bool +validate_hw_object_cache_id(struct hw_object_wqe_context *hw_object_wqe_ctx) +{ + int use_cache = 0; + use_cache = !object_interface_type[hw_object_wqe_ctx->src_object_id]; + if (!use_cache) { + return true; + } + switch (hw_object_wqe_ctx->src_object_id) { + case ZXDH_IRD_OBJ_ID: + return hw_object_wqe_ctx->src_path_select == 2; + case ZXDH_TX_WINDOW_OBJ_ID: + return hw_object_wqe_ctx->src_path_select == 3; + default: + return hw_object_wqe_ctx->src_path_select == 1; + } +} + +static bool +validate_allocate_buffer_size(struct hw_object_wqe_context *hw_object_wqe_ctx) +{ + return hw_object_wqe_ctx->data_length <= ZXDH_CAP_DATA_HOST_MEM_SIZE; +} + +static bool +validate_require_data_length(struct hw_object_wqe_context *hw_object_wqe_ctx) +{ + u32 max_cnt = 0; + u64 size = 0; + u64 relative_address = 0; + if (hw_object_wqe_ctx->zxdh_hmc_rsrc_type == ZXDH_HMC_IW_MAX) { + return true; + } + max_cnt = hw_object_wqe_ctx->dev->hmc_info + ->hmc_obj[hw_object_wqe_ctx->zxdh_hmc_rsrc_type] + .max_cnt; + size = hw_object_wqe_ctx->dev->hmc_info + ->hmc_obj[hw_object_wqe_ctx->zxdh_hmc_rsrc_type] + .size; + relative_address = hw_object_wqe_ctx->req->entry_idx * + (u64)hw_object_wqe_ctx->object_size; + return max_cnt * size >= + relative_address + hw_object_wqe_ctx->data_length; +} + +static int post_validate_hw_object_request( + struct hw_object_wqe_context *hw_object_wqe_ctx, + struct zxdh_get_object_data_req *object_data_req) +{ + int ret = 0; + if (!validate_address_32_byte_align(hw_object_wqe_ctx->src_address)) { + pr_err("query hw object, dma read src address not 32 bit aligned, 0x%llx\n", + hw_object_wqe_ctx->src_address); + return -ZXDH_DMA_READ_NOT_32_ALIGN; + } + + if (!validate_hw_object_cache_id(hw_object_wqe_ctx)) { + pr_err("query hw object, validate cache id err: invalid cache id %d \n", + hw_object_wqe_ctx->src_path_select); + return -ZXDH_CACHE_ID_CHECK_ERROR; + } + + if (!validate_allocate_buffer_size(hw_object_wqe_ctx)) { + pr_err("query hw object, validate buffer: buffer size more than 2M 0x%x\n", + hw_object_wqe_ctx->data_length); + return -ZXDH_DMA_MEMORY_OVER_2M; + } + if (!validate_require_data_length(hw_object_wqe_ctx)) { + pr_err("query hw object, validate data_length: invalid entry_idx %d and object_num %d \n", + object_data_req->entry_idx, object_data_req->object_num); + return -ZXDH_DATA_ENTRY_IDX_OVER_LIMIT; + } + return ret; +}; + +static int hw_object_query_info(struct zxdh_pci_f *rf, + struct hw_object_wqe_context *object_wqe_ctx) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_sc_dev *dev; + int ret = 0; + dev = &rf->sc_dev; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) { + pr_err("query hw object, get cqp resquest err"); + return -ENOMEM; + } + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = object_wqe_ctx->op_code; + cqp_info->in.u.dma_writeread.cqp = dev->cqp; + pr_info("dma read hw object, opcode %d:\n", cqp_info->cqp_cmd); + + cqp_info->in.u.dma_writeread.src_dest.src = object_wqe_ctx->src_address; + cqp_info->in.u.dma_writeread.src_dest.len = object_wqe_ctx->data_length; + cqp_info->in.u.dma_writeread.src_dest.dest = + object_wqe_ctx->dest_address; + pr_info("dma read hw object, src addr: %llu, dest addr: %llu, len: %u\n", + cqp_info->in.u.dma_writeread.src_dest.src, + cqp_info->in.u.dma_writeread.src_dest.dest, + cqp_info->in.u.dma_writeread.src_dest.len); + + cqp_info->in.u.dma_writeread.src_path_index.vhca_id = + object_wqe_ctx->src_vhca_Index; + cqp_info->in.u.dma_writeread.src_path_index.obj_id = + object_wqe_ctx->src_object_id; + cqp_info->in.u.dma_writeread.src_path_index.waypartion = + object_wqe_ctx->src_waypartition; + cqp_info->in.u.dma_writeread.src_path_index.path_select = + object_wqe_ctx->src_path_select; + cqp_info->in.u.dma_writeread.src_path_index.inter_select = + object_wqe_ctx->src_interface_select; + pr_info("dma read hw object, src path index: vhca_id: %u, object id: %d, waypartion %d, path select %d, interface select %d\n", + cqp_info->in.u.dma_writeread.src_path_index.vhca_id, + cqp_info->in.u.dma_writeread.src_path_index.obj_id, + cqp_info->in.u.dma_writeread.src_path_index.waypartion, + cqp_info->in.u.dma_writeread.src_path_index.path_select, + cqp_info->in.u.dma_writeread.src_path_index.inter_select); + + cqp_info->in.u.dma_writeread.dest_path_index.vhca_id = + object_wqe_ctx->dest_vhca_index; + cqp_info->in.u.dma_writeread.dest_path_index.obj_id = + object_wqe_ctx->dest_object_id; + cqp_info->in.u.dma_writeread.dest_path_index.waypartion = + object_wqe_ctx->dest_waypartition; + cqp_info->in.u.dma_writeread.dest_path_index.path_select = + object_wqe_ctx->dest_path_select; + cqp_info->in.u.dma_writeread.dest_path_index.inter_select = + object_wqe_ctx->dest_interface_select; + pr_info("dma read hw object, dest path index: vhca_id: %u, object id: %d, waypartion %d, path select %d, interface select %d\n", + cqp_info->in.u.dma_writeread.dest_path_index.vhca_id, + cqp_info->in.u.dma_writeread.dest_path_index.obj_id, + cqp_info->in.u.dma_writeread.dest_path_index.waypartion, + cqp_info->in.u.dma_writeread.dest_path_index.path_select, + cqp_info->in.u.dma_writeread.dest_path_index.inter_select); + + cqp_info->in.u.dma_writeread.scratch = (uintptr_t)cqp_request; + ret = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + if (ret) { + pr_err("query hw object, handle query hw object cpq request err:%d\n", + ret); + return ret; + } + return ret; +} + +static int hw_object_fill_object_data_resp( + struct zxdh_get_object_data_resp *object_data_resp, + struct hw_object_wqe_context *object_wqe_ctx) +{ + object_data_resp->length = object_wqe_ctx->data_length; + object_data_resp->vhca_id = object_wqe_ctx->src_vhca_Index; + object_data_resp->route_id = object_wqe_ctx->src_path_select; + object_data_resp->object_size = object_wqe_ctx->object_size; + object_data_resp->srqp_aligned_offset = object_wqe_ctx->srqp_aligned_offset; + pr_info("query hw object response, object_mmap_offset: %llu, length %u, vhca_id: %u, route_id: %d, wqe_size: %u, srqp_aligned_offset: %llu\n", + object_data_resp->object_mmap_offset, object_data_resp->length, + object_data_resp->vhca_id, object_data_resp->route_id, + object_data_resp->object_size, object_wqe_ctx->srqp_aligned_offset); + return 0; +} + +#ifdef ZXDH_UAPI_DEF +static int UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_GET_OBJ_DATA)( + struct uverbs_attr_bundle *attrs) +#else +static int UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_GET_OBJ_DATA)( + struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) +#endif +{ + int ret = 0; + struct ib_ucontext *ib_uctx; + struct zxdh_ucontext *ucontext; + struct zxdh_device *iwdev; + struct ib_device *ib_dev; + struct zxdh_pci_f *rf; + struct zxdh_get_object_data_req object_data_req = { 0 }; + struct zxdh_get_object_data_resp object_data_resp; + struct hw_object_wqe_context object_wqe_ctx = { 0 }; + struct zxdh_sc_dev *dev; +#ifdef ZXDH_UAPI_DEF + ib_uctx = ib_uverbs_get_ucontext(attrs); +#else + ib_uctx = ib_uverbs_get_ucontext(attrs->ufile); +#endif + + if (IS_ERR(ib_uctx)) + return PTR_ERR(ib_uctx); + + ib_dev = ib_uctx->device; + iwdev = to_iwdev(ib_dev); + rf = iwdev->rf; + dev = &rf->sc_dev; + ucontext = to_ucontext(ib_uctx); + + if (iwdev->hw_data_cap.hw_object_mmap.addr_info.cap_direct_dma_addr + .cap_cpu_addr != NULL || + iwdev->hw_data_cap.hw_object_mmap.entry_info.cap_mmap_entry != + NULL) { + pr_err("query hw object, cpu addr or object mmap entry already exist\n"); + return -ENOMEM; + } + + ret = uverbs_copy_from(&object_data_req, attrs, + ZXDH_IB_ATTR_DEV_GET_OBJ_DATA); + if (IS_UVERBS_COPY_ERR(ret)) { + pr_err("query hw object, uverbs copy err: %d\n", ret); + return ret; + } + + ret = pre_validate_hw_object_request(&object_data_req); + if (ret) { + pr_err("query hw object, pre validate hw object request err: %d\n", + ret); + return ret; + } + + hw_object_wqe_init(&object_wqe_ctx, dev, &object_data_req); + + ret = hw_object_calculate_wqe_context(dev, &object_data_req, + &object_wqe_ctx); + if (ret) { + pr_err("query hw object, caculate wqe context err: %d\n", ret); + return ret; + } + + ret = post_validate_hw_object_request(&object_wqe_ctx, + &object_data_req); + if (ret) { + pr_err("query hw object, pre validate hw object request err: %d\n", + ret); + return ret; + } + + if (allocate_addr_for_mmap(iwdev, ucontext, object_wqe_ctx.data_length, + &iwdev->hw_data_cap.hw_object_mmap, + &object_data_resp.object_mmap_offset)) { + pr_err("query hw object, obj_mmap_entry insert err!buf_size:%u\n", + object_wqe_ctx.data_length); + goto free_exit; + } + + iwdev->hw_data_cap.object_buffer_size = object_wqe_ctx.data_length; + object_wqe_ctx.dest_address = + iwdev->hw_data_cap.hw_object_mmap.addr_info.cap_direct_dma_addr + .cap_dma_addr; + + ret = hw_object_query_info(rf, &object_wqe_ctx); + if (ret) { + pr_err("query hw object, query object info err:%d\n", ret); + goto free_exit; + } + hw_object_fill_object_data_resp(&object_data_resp, &object_wqe_ctx); +#ifdef RDMA_COPY_TO_STRUCT_OR_ZERO_SUPPORT + ret = uverbs_copy_to_struct_or_zero(attrs, + ZXDH_IB_ATTR_DEV_GET_OBJ_DATA_RESP, + &object_data_resp, + sizeof(object_data_resp)); +#else + ret = zxdh_copy_to_struct_or_zero(attrs, + ZXDH_IB_ATTR_DEV_GET_OBJ_DATA_RESP, + &object_data_resp, + sizeof(object_data_resp)); +#endif + if (ret) { + pr_err("query hw object, copy response to user err:%d\n", ret); + goto free_exit; + } + return ret; +free_exit: + pr_err("query hw object failed, free allocated memory!\n"); + free_mmap_addr(iwdev, iwdev->hw_data_cap.object_buffer_size, + &iwdev->hw_data_cap.hw_object_mmap); + return ret; +} + +static u32 get_reg_value_by_addr(struct zxdh_pci_f *rf, + u64 reg_va, u16 *count_ret, u32 *reg_list, u64 *base_reg) +{ + u32 value = 0, base_addr = 0; + long offset = 0; + switch (reg_va >> 12) { + case 0x6205400: + base_addr = C_RDMA_RX_PKT_PROC_PAGE; + break; + case 0x6205420: + base_addr = C_RDMA_RX_PUBLIC_PAGE1; + break; + case 0x6205440: + base_addr = C_RDMA_RX_PUBLIC_PAGE2; + break; + case 0x6205460: + base_addr = C_RDMA_RX_CNP_GEN_PAGE; + break; + case 0x6330200: + if (zxdh_rdma_reg_read(rf, reg_va, &value)) { + pr_err("[check_health_err] read 0x633 reg value falied, reg_va: %llx\n", reg_va); + (*count_ret)--; + } + break; + default: + offset = (reg_va - *base_reg) / 4; + if (*base_reg == 0 || offset < 0 || offset >= MAX_READ_REG_SIZE) { + if (zxdh_rdma_regs_read(rf, reg_va, reg_list, MAX_READ_REG_SIZE)) { + pr_err("[check_health_err] read reg value falied, reg_va: %llx\n", reg_va); + (*count_ret)--; + return 0; + } + *base_reg = reg_va; + value = reg_list[0]; + } else { + value = reg_list[offset]; + } + break; + } + if (base_addr) { + value = readl((u32 __iomem *)(rf->sc_dev.hw->hw_addr + base_addr + (reg_va & 0xfff))); + } + return value; +} + +static int get_reg_value(struct zxdh_pci_f *rf, + u64 reg_va, u64 value_va, u16 count, u16 *count_ret) +{ + u64 regs[MAX_COPY_SIZE]; + u32 values[MAX_COPY_SIZE]; + u32 reg_list[MAX_READ_REG_SIZE]; + u64 base_reg = 0; + int i = 0, j = 0, len = 0; + + if (reg_va == 0 || value_va == 0 || + count == 0 || count > 512) { + pr_err("invalid reg_va or value_va or count!\n"); + return -EINVAL; + } + *count_ret = count; + while (i < count) { + if (count - i >= MAX_COPY_SIZE) + len = MAX_COPY_SIZE; + else + len = count - i; + i += len; + if (copy_from_user((void *)regs, + (const void __user *)(uintptr_t)reg_va, + sizeof(u64) * len)) + return -EFAULT; + + reg_va += sizeof(u64) * len; + j = 0; + while (j < len) { + values[j] = get_reg_value_by_addr(rf, regs[j], count_ret, reg_list, &base_reg); + j++; + } + + if (copy_to_user((void __user *)(uintptr_t)value_va, + (const void *)values, + sizeof(u32) * len)) + return -EFAULT; + + value_va += sizeof(u32) * len; + } + return 0; +} + +static int get_reg_value_ex(struct zxdh_pci_f *rf, + u64 va, u16 *count) +{ + struct zxdh_reg_value reg_value[MAX_COPY_SIZE_EX]; + u32 reg1, reg2; + int i = 0; + + if (va == 0) { + pr_err("invalid reg_value_va_ex!\n"); + return -EINVAL; + } + WRITE_REGISTER_AND_CHECK(rf, 0x620660b3d0, 0xa0202c0b); + GET_REG_AND_WRITE_TO_USE(0x620660b240, 0); + + WRITE_REGISTER_AND_CHECK(rf, 0x620660b3d0, 0xa0202c0d); + GET_REG_AND_WRITE_TO_USE(0x620660b248, 0); + + WRITE_REGISTER_AND_CHECK(rf, 0x620660b3d0, 0xa0202c0f); + GET_REG_AND_WRITE_TO_USE(0x620660b250, 0); + + WRITE_REGISTER_AND_CHECK(rf, 0x620660b3d0, 0xa0202c11); + GET_REG_AND_WRITE_TO_USE(0x620660b258, 0); + + WRITE_REGISTER_AND_CHECK(rf, 0x620660b3d0, 0xa0202c13); + GET_REG_AND_WRITE_TO_USE(0x620660b26c, 0); + + WRITE_REGISTER_AND_CHECK(rf, 0x620660b3d0, 0xa0202c00); + GET_REG_AND_WRITE_TO_USE(0x620660b26c, 1); + + READ_REGISTER_AND_CHECK(rf, 0x62065F0100, ®2); + READ_REGISTER_AND_CHECK(rf, 0x62065F0100, ®1); + reg1 |= 1; + WRITE_REGISTER_AND_CHECK(rf, 0x62065F0100, reg1); + GET_REG_AND_WRITE_TO_USE(0x62065F0b58, 0); + GET_REG_AND_WRITE_TO_USE(0x62065F0ae0, 0); + GET_REG_AND_WRITE_TO_USE(0x62065F07b8, 0); + GET_REG_AND_WRITE_TO_USE(0x62065F0824, 0); + GET_REG_AND_WRITE_TO_USE(0x62065F0880, 0); + GET_REG_AND_WRITE_TO_USE(0x62065F08dc, 0); + GET_REG_AND_WRITE_TO_USE(0x62065F0938, 0); + GET_REG_AND_WRITE_TO_USE(0x62065F0994, 0); + GET_REG_AND_WRITE_TO_USE(0x62065F0a68, 0); + WRITE_REGISTER_AND_CHECK(rf, 0x62065F0100, reg2); + + READ_REGISTER_AND_CHECK(rf, 0x62065F0100, ®2); + READ_REGISTER_AND_CHECK(rf, 0x62065F0100, ®1); + reg1 |= (1 << 4); + WRITE_REGISTER_AND_CHECK(rf, 0x62065F0100, reg1); + GET_REG_AND_WRITE_TO_USE(0x62065f0f1c, 0); + GET_REG_AND_WRITE_TO_USE(0x62065f0f24, 0); + GET_REG_AND_WRITE_TO_USE(0x62065f0f0c, 0); + GET_REG_AND_WRITE_TO_USE(0x62065f0f14, 0); + GET_REG_AND_WRITE_TO_USE(0x62065f0f18, 0); + GET_REG_AND_WRITE_TO_USE(0x62065f0f28, 0); + WRITE_REGISTER_AND_CHECK(rf, 0x62065F0100, reg2); + + READ_REGISTER_AND_CHECK(rf, 0x62065F0100, ®2); + READ_REGISTER_AND_CHECK(rf, 0x62065F0100, ®1); + reg1 |= (1 << 5); + WRITE_REGISTER_AND_CHECK(rf, 0x62065F0100, reg1); + GET_REG_AND_WRITE_TO_USE(0x62065f10F8, 0); + GET_REG_AND_WRITE_TO_USE(0x62065f10FC, 0); + WRITE_REGISTER_AND_CHECK(rf, 0x62065F0100, reg2); + + READ_REGISTER_AND_CHECK(rf, 0x62065F0100, ®2); + READ_REGISTER_AND_CHECK(rf, 0x62065F0100, ®1); + reg1 &= ~(1 << 4); + WRITE_REGISTER_AND_CHECK(rf, 0x62065F0100, reg1); + GET_REG_AND_WRITE_TO_USE(0x62065f0f0c, 1); + GET_REG_AND_WRITE_TO_USE(0x62065f0f10, 0); + WRITE_REGISTER_AND_CHECK(rf, 0x62065F0100, reg2); + + if (copy_to_user((void __user *)(uintptr_t)va, + (const void *)reg_value, + sizeof(struct zxdh_reg_value) * i)) + return -EFAULT; + + return 0; +} + +static int compare_SMMU_reg(struct zxdh_pci_f *rf, + u64 va, u16 *count) +{ + u32 flag = 0; + u16 i = 0, j, k, index = 0; + u64 regs[MAX_SMMU_READ_REG_SIZE]; + while (i < 512 / MAX_SMMU_READ_REG_SIZE) { + j = 0; + READ_REGISTERS_AND_CHECK(rf, 0x4ec02000 + i * MAX_SMMU_READ_REG_SIZE * 0x8, (u32 *)regs, MAX_SMMU_READ_REG_SIZE * 2); + while (j < MAX_SMMU_READ_REG_SIZE) { + k = 0; + flag = 0; + while (k < 4) { + if (regs[j + k] != 0) { + flag = 1; + break; + } + k++; + } + if (flag == 1) { + for (k = 0; k < 4; k++) + if (regs[j + k] != 0) + pr_err("SMMU reg 0x%x is not zero, reg value is 0x%llx\n", 0x4ec02000 + i * MAX_SMMU_READ_REG_SIZE * 0x8 + (j + k) * 0x8, regs[j + k]); + if (copy_to_user((void *)va, &index, sizeof(index))) + return -EFAULT; + va += sizeof(index); + (*count)++; + } + j += 4; + index++; + } + i++; + } + return 0; +} + +#ifdef ZXDH_UAPI_DEF +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_HEALTH_CHECK)(struct uverbs_attr_bundle *attrs) +#else +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_HEALTH_CHECK)(struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) +#endif +{ + struct ib_ucontext *ib_uctx; + struct zxdh_device *iwdev; + struct ib_device *ib_dev; + struct zxdh_pci_f *rf; + struct zxdh_health_check_req health_check_req = { 0 }; + struct zxdh_health_check_resp health_check_resp = { 0 }; + int ret = 0; + +#ifdef ZXDH_UAPI_DEF + ib_uctx = ib_uverbs_get_ucontext(attrs); +#else + ib_uctx = ib_uverbs_get_ucontext(attrs->ufile); +#endif + + if (IS_ERR(ib_uctx)) + return PTR_ERR(ib_uctx); + ib_dev = ib_uctx->device; + iwdev = to_iwdev(ib_dev); + rf = iwdev->rf; + + ret = uverbs_copy_from(&health_check_req, attrs, ZXDH_IB_ATTR_DEV_HEALTH_CHECK); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + + switch (health_check_req.reg_type) { + case ZXDH_WRITE_FIRST_REG: + ret = get_reg_value_ex(rf, health_check_req.reg_value_va_ex, &health_check_resp.count_ex); + if (ret) { + pr_err("get reg value ex failed!\n"); + return ret; + } + fallthrough; + case ZXDH_NORMAL_REG: + ret = get_reg_value(rf, health_check_req.reg_va, health_check_req.value_va, + health_check_req.count, &health_check_resp.count); + break; + + case ZXDH_SMMU_REG: + ret = compare_SMMU_reg(rf, health_check_req.reg_value_va_ex, &health_check_resp.count_ex); + break; + + default: + pr_err("errro reg_type is %d, not support!\n", health_check_req.reg_type); + return -EFAULT; + } + if (ret) { + pr_err("get reg value failed!\n"); + return ret; + } + +#ifdef RDMA_COPY_TO_STRUCT_OR_ZERO_SUPPORT + ret = uverbs_copy_to_struct_or_zero(attrs, ZXDH_IB_ATTR_DEV_HEALTH_CHECK_RESP, + &health_check_resp, sizeof(health_check_resp)); +#else + ret = zxdh_copy_to_struct_or_zero(attrs, ZXDH_IB_ATTR_DEV_HEALTH_CHECK_RESP, + &health_check_resp, sizeof(health_check_resp)); +#endif + if (ret) { + pr_err("ib_copy_to_udata failed!\n"); + return -EFAULT; + } + return 0; +} + +static int clean_cc_basic_cnt_info(struct zxdh_device *iwdev) +{ + uint32_t read_reg_val = 0; + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, C_ACTIVE_VHCA_SQ_CNT_CLEAN, + &read_reg_val); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + C_ACTIVE_VHCA_SQ_CNT_CLEAN, read_reg_val |= C_ACTIVE_VHCA_SQ_CNT_CLEAN_MASK); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + C_ACTIVE_VHCA_SQ_CNT_CLEAN, read_reg_val &= (~C_ACTIVE_VHCA_SQ_CNT_CLEAN_MASK)); + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, C_ACTIVE_VHCA_READ_CNT_CLEAN, + &read_reg_val); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + C_ACTIVE_VHCA_READ_CNT_CLEAN, read_reg_val |= C_ACTIVE_VHCA_READ_CNT_CLEAN_MASK); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + C_ACTIVE_VHCA_READ_CNT_CLEAN, read_reg_val &= (~C_ACTIVE_VHCA_READ_CNT_CLEAN_MASK)); + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, C_ACTIVE_VHCA_ACK_CNT_CLEAN, + &read_reg_val); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + C_ACTIVE_VHCA_ACK_CNT_CLEAN, read_reg_val |= C_ACTIVE_VHCA_ACK_CNT_CLEAN_MASK); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + C_ACTIVE_VHCA_ACK_CNT_CLEAN, read_reg_val &= (~C_ACTIVE_VHCA_ACK_CNT_CLEAN_MASK)); + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, C_TASK_PREFETCH_RECV_COM_CNT_CLEAN, + &read_reg_val); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + C_TASK_PREFETCH_RECV_COM_CNT_CLEAN, read_reg_val |= C_TASK_PREFETCH_RECV_COM_CNT_CLEAN_MASK); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + C_TASK_PREFETCH_RECV_COM_CNT_CLEAN, read_reg_val &= (~C_TASK_PREFETCH_RECV_COM_CNT_CLEAN_MASK)); + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, C_TX_PKT_CNT_CLEAN, + &read_reg_val); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + C_TX_PKT_CNT_CLEAN, read_reg_val |= C_TX_PKT_CNT_CLEAN_MASK); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + C_TX_PKT_CNT_CLEAN, read_reg_val &= (~C_TX_PKT_CNT_CLEAN_MASK)); + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, C_RX_PKT_CNT_CLEAN, + &read_reg_val); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + C_RX_PKT_CNT_CLEAN, read_reg_val |= C_RX_PKT_CNT_CLEAN_MASK); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + C_RX_PKT_CNT_CLEAN, read_reg_val &= (~C_RX_PKT_CNT_CLEAN_MASK)); + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, C_RETRY_TIMEOUTE_CNT_CLEAN, + &read_reg_val); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + C_RETRY_TIMEOUTE_CNT_CLEAN, read_reg_val |= C_RETRY_TIMEOUTE_CNT_CLEAN_MASK); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + C_RETRY_TIMEOUTE_CNT_CLEAN, read_reg_val &= (~C_RETRY_TIMEOUTE_CNT_CLEAN_MASK)); + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, C_TX_PKT_CNP_CNT_CLEAN, + &read_reg_val); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + C_TX_PKT_CNP_CNT_CLEAN, read_reg_val |= C_TX_PKT_CNP_CNT_CLEAN_MASK); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + C_TX_PKT_CNP_CNT_CLEAN, read_reg_val &= (~C_TX_PKT_CNP_CNT_CLEAN_MASK)); + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, C_TX_PKT_RTT_T1_CNT_CLEAN, + &read_reg_val); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + C_TX_PKT_RTT_T1_CNT_CLEAN, read_reg_val |= C_TX_PKT_RTT_T1_CNT_CLEAN_MASK); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + C_TX_PKT_RTT_T1_CNT_CLEAN, read_reg_val &= (~C_TX_PKT_RTT_T1_CNT_CLEAN_MASK)); + + REG_OP_AND_CHECK(zxdh_rdma_reg_read, iwdev->rf, C_TX_PKT_RTT_T4_CNT_CLEAN, + &read_reg_val); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + C_TX_PKT_RTT_T4_CNT_CLEAN, read_reg_val |= C_TX_PKT_RTT_T4_CNT_CLEAN_MASK); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + C_TX_PKT_RTT_T4_CNT_CLEAN, read_reg_val &= (~C_TX_PKT_RTT_T4_CNT_CLEAN_MASK)); + + return 0; +} + +static int clean_all_gqps_mp_cap(struct zxdh_device *iwdev) +{ + struct zxdh_sc_dev *dev; + u16 gqpid[VHCA_RC_UD_GQP_MAX_CNT]; + u16 gqp_start, gqp_cnt, vhca_ud_gqp, gqp_num, gqp_idx, mp_idx; + u64 reg_addr; + u8 i, j; + + dev = &iwdev->rf->sc_dev; + gqp_start = dev->vhca_gqp_start; + gqp_cnt = dev->vhca_gqp_cnt; + vhca_ud_gqp = dev->vhca_ud_gqp; + if (gqp_cnt >= VHCA_RC_UD_GQP_MAX_CNT) { + pr_err("gqp_cnt:%u bigger than 48,err!\n", gqp_cnt); + return -EINVAL; + } + gqp_num = gqp_cnt + 1; + for (i = 0; i < gqp_cnt; i++) { + gqpid[i] = gqp_start + i; + } + gqpid[gqp_cnt] = vhca_ud_gqp; + pr_info("clean_all_gqps_mp_cap gqp_num:%u", gqp_num); + gqp_idx = 0xff; + for (i = 0; i < gqp_num; i++) { + mp_idx = 0; + if (gqpid[i] <= GQP_ID_1023) { + mp_idx = gqpid[i] / GQP_MOD; + reg_addr = (mp_idx * MP_OFFSET) + BASE_FOR_LITTLE_GQP + + (REG_BYTE * WRITE_RAM_REG_IDX); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + reg_addr, gqp_idx); + } else if (gqpid[i] > GQP_ID_1023 && gqpid[i] <= GQP_ID_1103) { + mp_idx = ((gqpid[i] - GQP_OFFSET) / GQP_MOD) + + MP_IDX_INC; + reg_addr = (mp_idx * MP_OFFSET) + BASE_FOR_LITTLE_GQP + + (REG_BYTE * WRITE_RAM_REG_IDX); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + reg_addr, gqp_idx); + } else if (gqpid[i] <= GQP_ID_2047) { + mp_idx = ((gqpid[i] - GQP_OFFSET) / GQP_MOD) - MP_MOD; + reg_addr = (mp_idx * MP_OFFSET) + BASE_FOR_BIG_GQP + + (REG_BYTE * WRITE_RAM_REG_IDX); + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, + reg_addr, gqp_idx); + } else { + pr_err("gqpid:%u err!\n", gqpid[i]); + return -EINVAL; + } + } + + for (j = 0; j < MAX_CAP_QPS; j++) { + reg_addr = READ_RAM_REG_BASE + j * MP_OFFSET; + /* e0b8:read ram address bak*/ + REG_OP_AND_CHECK(zxdh_rdma_reg_write, iwdev->rf, reg_addr, 0); + } + return 0; +} + +#ifdef ZXDH_UAPI_DEF +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_CFG_PARAMETER)(struct uverbs_attr_bundle *attrs) +#else +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_CFG_PARAMETER)(struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) +#endif +{ + struct ib_ucontext *ib_uctx; + struct zxdh_device *iwdev; + struct ib_device *ib_dev; + struct zxdh_cfg_dev_parameter_req req = { 0 }; + int ret = 0; + int i; + +#ifdef ZXDH_UAPI_DEF + ib_uctx = ib_uverbs_get_ucontext(attrs); +#else + ib_uctx = ib_uverbs_get_ucontext(attrs->ufile); +#endif + + if (IS_ERR(ib_uctx)) + return PTR_ERR(ib_uctx); + ib_dev = ib_uctx->device; + iwdev = to_iwdev(ib_dev); + + ret = uverbs_copy_from(&req, attrs, ZXDH_IB_ATTR_DEV_CFG_PARAMETER); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + + switch (req.type) { + case TX_STOP_ON_AEQ: + iwdev->rf->sc_dev.tx_stop_on_aeq = 1; + break; + case RX_STOP_ON_AEQ: + iwdev->rf->sc_dev.rx_stop_on_aeq = 1; + break; + case TXRX_STOP_IOVA_CAP: + for (i = 0; i < CAP_NODE_NUM; i++) { + if (iwdev->hw_data_cap.cap_txrx_use_iova[i].addr_info.cap_iova_addr != 0) + iwdev->hw_data_cap.cap_txrx_use_iova[i].addr_info.cap_iova_addr = 0; + } + break; + case CLEAR_ALL_CC_BASIC_CNT: + ret = clean_cc_basic_cnt_info(iwdev); + break; + case CLEAR_ALL_GQPS_MP_CAP: + ret = clean_all_gqps_mp_cap(iwdev); + break; + default: + pr_err("not support type %d!\n", req.type); + return -EINVAL; + } + return ret; +} + +static int +get_pf_qpn_reg_value(struct zxdh_pci_f *rf, + struct zxdh_db_show_res_map_req *show_res_map_req, + struct zxdh_db_show_res_map_resp *show_res_map_resp) +{ + u64 reg[2]; + u32 value[2], idx[2], pf_id_u32; + u8 pf_id = rf->pf_id; + + if (rf->ftype) + return 0; + if (pf_id > PCIE_PF_NUM_MAX) { + pr_err("pf_id:%u bigger than 31,err!\n", pf_id); + return -EINVAL; + } + pf_id_u32 = (u32)pf_id; + idx[0] = pf_id_u32; + idx[1] = pf_id_u32; + reg[0] = C_DB_SHOW_PF_START_QPN_MAP + pf_id_u32 * 0x8; + reg[1] = C_DB_SHOW_PF_END_VHCA_MAP + pf_id_u32 * 0x8; + + COPY_TO_USER_SAFE(show_res_map_req->idx_va, idx, sizeof(u32) * 2); + COPY_TO_USER_SAFE(show_res_map_req->reg_va, reg, sizeof(u64) * 2); + READ_REGISTERS_AND_CHECK(rf, reg[0], value, 2); + COPY_TO_USER_SAFE(show_res_map_req->value_va, value, sizeof(u32) * 2); + show_res_map_resp->count = 2; + return 0; +} + +static int +get_pf_vhca_reg_value(struct zxdh_pci_f *rf, + struct zxdh_db_show_res_map_req *show_res_map_req, + struct zxdh_db_show_res_map_resp *show_res_map_resp) +{ + u64 reg; + u32 value, pf_id_u32; + u8 pf_id = rf->pf_id; + + if (rf->ftype) + return 0; + if (pf_id > PCIE_PF_NUM_MAX) { + pr_err("pf_id:%u bigger than 31,err!\n", pf_id); + return -EINVAL; + } + pf_id_u32 = (u32)pf_id; + reg = C_DB_SHOW_PF_VHCA_MAP + pf_id_u32 * 0x4; + + COPY_TO_USER_SAFE(show_res_map_req->reg_va, ®, sizeof(u64)); + READ_REGISTER_AND_CHECK(rf, reg, &value); + COPY_TO_USER_SAFE(show_res_map_req->value_va, &value, sizeof(u32)); + COPY_TO_USER_SAFE(show_res_map_req->idx_va, &pf_id_u32, sizeof(u32)); + show_res_map_resp->count = 1; + return 0; +} + +static int +get_vhca_physical_reg_value(struct zxdh_pci_f *rf, + struct zxdh_db_show_res_map_req *show_res_map_req, + struct zxdh_db_show_res_map_resp *show_res_map_resp) +{ + u64 reg; + u32 value, vhca_id_u32; + struct zxdh_sc_dev *dev; + u16 vhca_id; + + dev = &rf->sc_dev; + vhca_id = dev->vhca_id; + if (vhca_id > VHCA_NUM_MAX) { + pr_err("vhca_id:%u bigger than 257,err!\n", vhca_id); + return -EINVAL; + } + + vhca_id_u32 = (u32)vhca_id; + reg = C_DB_SHOW_VHCA_PHYSICAL_MAP + vhca_id_u32 * 0x1000; + + COPY_TO_USER_SAFE(show_res_map_req->reg_va, ®, sizeof(u64)); + READ_REGISTER_AND_CHECK(rf, reg, &value); + COPY_TO_USER_SAFE(show_res_map_req->value_va, &value, sizeof(u32)); + COPY_TO_USER_SAFE(show_res_map_req->idx_va, &vhca_id_u32, sizeof(u32)); + show_res_map_resp->count = 1; + return 0; +} + +static int +get_8k_gqp_reg_value(struct zxdh_pci_f *rf, + struct zxdh_db_show_res_map_req *show_res_map_req, + struct zxdh_db_show_res_map_resp *show_res_map_resp) +{ + u64 *reg_array, reg; + u32 *idx, *value_array, i, j, chunk_size, ud_value; + void *base_memory; + struct zxdh_sc_dev *dev; + u16 vhca_id, start_8k, cnt_8k, vhca_ud_8k_index; + int ret; + size_t total_size = 0; + uint32_t regs_num; + + dev = &rf->sc_dev; + vhca_id = dev->vhca_id; + if (vhca_id > VHCA_NUM_MAX) { + pr_err("vhca_id:%u bigger than 257,err!\n", vhca_id); + return -EINVAL; + } + + start_8k = dev->vhca_8k_index_start; + cnt_8k = dev->vhca_8k_index_cnt; + vhca_ud_8k_index = dev->vhca_ud_8k_index; + regs_num = cnt_8k + 1; + + if (regs_num > VHCA_RC_UD_8K_MAX_CNT) { + pr_err("cnt_8k:%u bigger than 192,err!\n", cnt_8k); + return -EINVAL; + } + total_size = sizeof(u64) * regs_num + 2 * sizeof(u32) * regs_num; + base_memory = kmalloc(total_size, GFP_KERNEL); + if (!base_memory) { + pr_err("Memory allocation failed\n"); + return -ENOMEM; + } + memset(base_memory, 0, total_size); + reg_array = (u64 *)base_memory; + idx = (u32 *)((char *)base_memory + sizeof(u64) * regs_num); + value_array = (u32 *)((char *)idx + sizeof(u32) * regs_num); + + for (i = start_8k; i < start_8k + cnt_8k; i++) { + reg = C_DB_SHOW_8K_2K_MAP + i * 0x4; + reg_array[i - start_8k] = reg; + idx[i - start_8k] = i; + } + + for (j = 0; j < cnt_8k; j += MAX_READ_REG_SIZE) { + chunk_size = (cnt_8k - j) < MAX_READ_REG_SIZE ? + (cnt_8k - j) : + MAX_READ_REG_SIZE; + ret = zxdh_rdma_regs_read(rf, reg_array[j], &value_array[j], + chunk_size); + if (ret) { + kfree(base_memory); + pr_err("[get_8k_gqp_reg_value] zxdh_rdma_regs_read failed at chunk %d", + j / MAX_READ_REG_SIZE); + return -EFAULT; + } + } + + idx[cnt_8k] = vhca_ud_8k_index; + reg_array[cnt_8k] = C_DB_SHOW_8K_2K_MAP + vhca_ud_8k_index * 0x4; + + ret = zxdh_rdma_reg_read(rf, reg_array[cnt_8k], &ud_value); + if (ret) { + kfree(base_memory); + pr_err("[get_8k_gqp_reg_value] zxdh_rdma_reg_read falied"); + return -EFAULT; + } + + value_array[cnt_8k] = ud_value & 0x7FF; + if (copy_to_user((void *)show_res_map_req->reg_va, (void *)reg_array, + sizeof(u64) * regs_num)) { + kfree(base_memory); + return -EFAULT; + } + if (copy_to_user((void *)show_res_map_req->value_va, + (void *)value_array, sizeof(u32) * regs_num)) { + kfree(base_memory); + return -EFAULT; + } + if (copy_to_user((void *)show_res_map_req->idx_va, (void *)idx, + sizeof(u32) * regs_num)) { + kfree(base_memory); + return -EFAULT; + } + show_res_map_resp->count = regs_num; + + kfree(base_memory); + + return 0; +} + +static int +get_gqp_8k_create_reg_value(struct zxdh_pci_f *rf, + struct zxdh_db_show_res_map_req *show_res_map_req, + struct zxdh_db_show_res_map_resp *show_res_map_resp) +{ + u64 reg, reg_array[VHCA_RC_UD_GQP_MAX_CNT]; + u32 idx[VHCA_RC_UD_GQP_MAX_CNT], value_array[VHCA_RC_UD_GQP_MAX_CNT], i, j, chunk_size; + struct zxdh_sc_dev *dev; + u16 vhca_id, gqp_start, gqp_cnt, vhca_ud_gqp; + + dev = &rf->sc_dev; + vhca_id = dev->vhca_id; + if (vhca_id > VHCA_NUM_MAX) { + pr_err("vhca_id:%u bigger than 257,err!\n", vhca_id); + return -EINVAL; + } + gqp_start = dev->vhca_gqp_start; + gqp_cnt = dev->vhca_gqp_cnt; + vhca_ud_gqp = dev->vhca_ud_gqp; + if (gqp_cnt >= VHCA_RC_UD_GQP_MAX_CNT) { + pr_err("gqp_cnt:%u bigger than 48,err!\n", gqp_cnt); + return -EINVAL; + } + for (i = gqp_start; i < gqp_start + gqp_cnt; i++) { + reg = C_DB_SHOW_GQP_VHCA_MAP + i * 0x4; + reg_array[i - gqp_start] = reg; + idx[i - gqp_start] = i; + } + for (j = 0; j < gqp_cnt; j += MAX_READ_REG_SIZE) { + chunk_size = (gqp_cnt - j) < MAX_READ_REG_SIZE ? + (gqp_cnt - j) : + MAX_READ_REG_SIZE; + READ_REGISTERS_AND_CHECK(rf, reg_array[j], &value_array[j], + chunk_size); + } + + idx[gqp_cnt] = vhca_ud_gqp; + reg_array[gqp_cnt] = C_DB_SHOW_GQP_VHCA_MAP + vhca_ud_gqp * 0x4; + READ_REGISTER_AND_CHECK(rf, reg_array[gqp_cnt], &value_array[gqp_cnt]); + + COPY_TO_USER_SAFE(show_res_map_req->reg_va, reg_array, + sizeof(u64) * (gqp_cnt + 1)); + COPY_TO_USER_SAFE(show_res_map_req->value_va, value_array, + sizeof(u32) * (gqp_cnt + 1)); + COPY_TO_USER_SAFE(show_res_map_req->idx_va, idx, + sizeof(u32) * (gqp_cnt + 1)); + + show_res_map_resp->count = gqp_cnt + 1; + return 0; +} + +static int process_vhca_register(u64 *reg_array, u32 *idx, u32 *value_array, + u32 *num, u32 index, void *rf) +{ + u32 read_reg_val; + u64 reg; + uint32_t qps_act_bit = 0x80000000; + + REG_OP_AND_CHECK(zxdh_rdma_reg_write, rf, C_CHECK_GQP_ACTIVE_WRITE, + index); + REG_OP_AND_CHECK(zxdh_rdma_reg_read, rf, C_CHECK_GQP_ACTIVE_READ, + &read_reg_val); + + if ((read_reg_val & qps_act_bit) != 0) { + reg = C_DB_SHOW_GQP_VHCA_MAP + index * 0x4; + reg_array[*num] = reg; + idx[*num] = index; + READ_REGISTER_AND_CHECK(rf, reg, &value_array[*num]); + (*num)++; + return 0; + } + + return 0; +} + +static int +get_gqp_8k_active_reg_value(struct zxdh_pci_f *rf, + struct zxdh_db_show_res_map_req *show_res_map_req, + struct zxdh_db_show_res_map_resp *show_res_map_resp) +{ + u64 reg_array[VHCA_RC_UD_GQP_MAX_CNT]; + u32 idx[VHCA_RC_UD_GQP_MAX_CNT], i; + u32 value_array[VHCA_RC_UD_GQP_MAX_CNT]; + struct zxdh_sc_dev *dev; + u16 vhca_id, gqp_start, gqp_cnt, vhca_ud_gqp; + u32 num = 0; + + dev = &rf->sc_dev; + vhca_id = dev->vhca_id; + if (vhca_id > VHCA_NUM_MAX) { + pr_err("vhca_id:%u bigger than 257,err!\n", vhca_id); + return -EINVAL; + } + gqp_start = dev->vhca_gqp_start; + gqp_cnt = dev->vhca_gqp_cnt; + vhca_ud_gqp = dev->vhca_ud_gqp; + if (gqp_cnt >= VHCA_RC_UD_GQP_MAX_CNT) { + pr_err("gqp_cnt:%u bigger than 48,err!\n", gqp_cnt); + return -EINVAL; + } + for (i = gqp_start; i < gqp_start + gqp_cnt; i++) { + process_vhca_register(reg_array, idx, value_array, &num, i, rf); + } + + process_vhca_register(reg_array, idx, value_array, &num, vhca_ud_gqp, rf); + COPY_TO_USER_SAFE(show_res_map_req->reg_va, reg_array, + sizeof(u64) * num); + COPY_TO_USER_SAFE(show_res_map_req->value_va, value_array, + sizeof(u32) * num); + COPY_TO_USER_SAFE(show_res_map_req->idx_va, idx, sizeof(u32) * num); + + show_res_map_resp->count = num; + return 0; +} + +static int validate_show_map_qpn(struct zxdh_pci_f *rf, u32 *qpn, + u32 requested_qpn) +{ + struct zxdh_sc_dev *dev; + u32 min_qpn, max_qpn; + + if (!rf) { + pr_err("Invalid rf pointer\n"); + return -EINVAL; + } + + dev = &rf->sc_dev; + min_qpn = dev->base_qpn + 1; + max_qpn = dev->base_qpn + rf->max_qp - 1; + + if ((requested_qpn != 1) && + (requested_qpn < min_qpn || requested_qpn > max_qpn)) { + pr_err("Requested qpn (%u) out of valid range [%u, %u]\n", + requested_qpn, min_qpn, max_qpn); + return -ZXDH_QPN_ERROR; + } + + if (requested_qpn == 1) { + *qpn = min_qpn; + } else { + *qpn = requested_qpn; + } + + return 0; +} + +static int +get_qp_8k_reg_value(struct zxdh_pci_f *rf, u32 qpn, + struct zxdh_db_show_res_map_resp *show_res_map_resp) +{ + struct zxdh_sc_dev *dev; + struct zxdh_qp *iwqp = NULL; + dev = &rf->sc_dev; + + iwqp = rf->qp_table[qpn - dev->base_qpn]; + if (!iwqp) { + pr_err("get_qp_8k_reg_value: iwqp is null!\n"); + return -ZXDH_QP_NOT_AVALIABLE; + } + show_res_map_resp->qp_8k_index = iwqp->sc_qp.qp_uk.qp_8k_index; + + return 0; +} + +#ifdef ZXDH_UAPI_DEF +static int UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_SHOW_RES_MAP)( + struct uverbs_attr_bundle *attrs) +#else +static int UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_SHOW_RES_MAP)( + struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) +#endif +{ + struct ib_ucontext *ib_uctx; + struct zxdh_device *iwdev; + struct ib_device *ib_dev; + struct zxdh_pci_f *rf; + struct zxdh_db_show_res_map_req show_res_map_req = { 0 }; + struct zxdh_db_show_res_map_resp show_res_map_resp = { 0 }; + int ret = 0; + u32 qpn; + +#ifdef ZXDH_UAPI_DEF + ib_uctx = ib_uverbs_get_ucontext(attrs); +#else + ib_uctx = ib_uverbs_get_ucontext(attrs->ufile); +#endif + + if (IS_ERR(ib_uctx)) + return PTR_ERR(ib_uctx); + ib_dev = ib_uctx->device; + iwdev = to_iwdev(ib_dev); + rf = iwdev->rf; + + ret = uverbs_copy_from(&show_res_map_req, attrs, + ZXDH_IB_ATTR_DEV_SHOW_RES_MAP); + if (IS_UVERBS_COPY_ERR(ret)) + return ret; + + switch (show_res_map_req.type) { + case ZXDH_SHOW_RES_MAP_PF_TO_QPN: + ret = get_pf_qpn_reg_value(rf, &show_res_map_req, + &show_res_map_resp); + break; + case ZXDH_SHOW_RES_MAP_PF_TO_VHCA: + ret = get_pf_vhca_reg_value(rf, &show_res_map_req, + &show_res_map_resp); + break; + case ZXDH_SHOW_RES_MAP_VHCA_TO_PF: + ret = get_vhca_physical_reg_value(rf, &show_res_map_req, + &show_res_map_resp); + break; + case ZXDH_SHOW_RES_MAP_8K_TO_GQP: + ret = get_8k_gqp_reg_value(rf, &show_res_map_req, + &show_res_map_resp); + break; + case ZXDH_SHOW_RES_MAP_GQP_TO_VHCA_CREATED: + ret = get_gqp_8k_create_reg_value(rf, &show_res_map_req, + &show_res_map_resp); + break; + case ZXDH_SHOW_RES_MAP_GQP_TO_VHCA_ACTIVE: + ret = get_gqp_8k_active_reg_value(rf, &show_res_map_req, + &show_res_map_resp); + break; + case ZXDH_SHOW_RES_MAP_QP_TO_8K: + ret = validate_show_map_qpn(rf, &qpn, show_res_map_req.qp_id); + if (ret) + break; + ret = get_qp_8k_reg_value(rf, qpn, &show_res_map_resp); + break; + default: + pr_err("errro reg_type is %d, not support!\n", + show_res_map_req.type); + return -EFAULT; + } + if (ret) { + pr_err("zxdh_db_show_res_map: get reg value failed!\n"); + return ret; + } + +#ifdef RDMA_COPY_TO_STRUCT_OR_ZERO_SUPPORT + ret = uverbs_copy_to_struct_or_zero(attrs, + ZXDH_IB_ATTR_DEV_SHOW_RES_MAP_RESP, + &show_res_map_resp, + sizeof(show_res_map_resp)); +#else + ret = zxdh_copy_to_struct_or_zero(attrs, + ZXDH_IB_ATTR_DEV_SHOW_RES_MAP_RESP, + &show_res_map_resp, + sizeof(show_res_map_resp)); +#endif + if (ret) { + pr_err("ib_copy_to_udata failed!\n"); + return -EFAULT; + } + return 0; +} + +static int read_ram_data(struct zxdh_device *iwdev, + struct zxdh_read_ram_req *req, u32 ram_max_index, + u32 *reg_values) +{ + struct read_ram_info ram_info = { 0 }; + ram_info.ram_num = req->ram_id; + ram_info.ram_width = req->ram_width; + ram_info.ram_read_cnt = req->read_count; + ram_info.ram_addr = req->ram_addr; + ram_info.offset_idx = ram_max_index; + + if (req->hw_module == HW_MODULE_TX) { + return zxdh_read_ram_tx_values(&iwdev->rf->sc_dev, &ram_info, + reg_values); + } + if (req->hw_module == HW_MODULE_RX) { + return zxdh_read_ram_rx_values(&iwdev->rf->sc_dev, &ram_info, + reg_values); + } + if (req->hw_module == HW_MODULE_CQP) { + return zxdh_read_ram_cqp_values(&iwdev->rf->sc_dev, &ram_info, + reg_values); + } + return -1; +} + +#ifdef ZXDH_UAPI_DEF +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_READ_RAM)(struct uverbs_attr_bundle *attrs) +#else +static int + UVERBS_HANDLER(ZXDH_IB_METHOD_DEV_READ_RAM)(struct ib_uverbs_file *file, + struct uverbs_attr_bundle *attrs) +#endif +{ + struct ib_ucontext *ib_uctx; + struct zxdh_device *iwdev; + struct ib_device *ib_dev; + struct zxdh_read_ram_req req = { 0 }; + u32 *reg_values = NULL; + int ret = 0; + +#ifdef ZXDH_UAPI_DEF + ib_uctx = ib_uverbs_get_ucontext(attrs); +#else + ib_uctx = ib_uverbs_get_ucontext(attrs->ufile); +#endif + if (IS_ERR(ib_uctx)) + return PTR_ERR(ib_uctx); + ib_dev = ib_uctx->device; + iwdev = to_iwdev(ib_dev); + ret = uverbs_copy_from(&req, attrs, ZXDH_IB_ATTR_DEV_READ_RAM); + if (IS_UVERBS_COPY_ERR(ret)) + return -ZXDH_COPY_USER_PARAM_ERROR; + reg_values = + kmalloc(sizeof(u32) * ZXDH_READ_RAM_MAX_OFFSET, GFP_KERNEL); + if (!reg_values) { + pr_err("zxdh_read_ram error: allocate memory failed!\n"); + return -ENOMEM; + } + ret = read_ram_data(iwdev, &req, ZXDH_READ_RAM_MAX_OFFSET, reg_values); + if (ret) { + ret = -ZXDH_READ_RAM_ERROR; + pr_err("zxdh_read_ram error: read ram reg failed!\n"); + goto cleanup; + } + ret = copy_to_user((void __user *)(uintptr_t)req.value_va, + (const void *)reg_values, + sizeof(u32) * ZXDH_READ_RAM_MAX_OFFSET); + if (ret) { + ret = -ZXDH_COPY_DATA_TO_USER_ERROR; + pr_err("zxdh_read_ram error: copy ram data to user failed!\n"); + } +cleanup: + if (reg_values) { + kfree(reg_values); + } + return ret; +} + +DECLARE_UVERBS_NAMED_METHOD(ZXDH_IB_METHOD_QP_RESET_QP, + UVERBS_ATTR_IDR(ZXDH_IB_ATTR_QP_RESET_QP_HANDLE, + UVERBS_OBJECT_QP, + UVERBS_ACCESS_READ, UA_MANDATORY), + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_QP_RESET_OP_CODE, + UVERBS_ATTR_TYPE(u64), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD( + ZXDH_IB_METHOD_QP_MODIFY_QPC, + UVERBS_ATTR_IDR(ZXDH_IB_ATTR_QP_MODIFY_QPC_HANDLE, UVERBS_OBJECT_QP, + UVERBS_ACCESS_READ, UA_MANDATORY), + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_QP_MODIFY_QPC_REQ, + UVERBS_ATTR_STRUCT(struct zxdh_modify_qpc_req, + package_err_flag), + UA_MANDATORY), + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_QP_MODIFY_QPC_MASK, + UVERBS_ATTR_TYPE(u64), UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD( + ZXDH_IB_METHOD_QP_QUERY_QPC, + UVERBS_ATTR_IDR(ZXDH_IB_ATTR_QP_QUERY_HANDLE, UVERBS_OBJECT_QP, + UVERBS_ACCESS_READ, UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(ZXDH_IB_ATTR_QP_QUERY_RESP, + UVERBS_ATTR_STRUCT(struct zxdh_query_qpc_resp, + tx_last_ack_psn), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD(ZXDH_IB_METHOD_QP_MODIFY_UDP_SPORT, + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_QP_UDP_PORT, + UVERBS_ATTR_TYPE(u16), + UA_MANDATORY), + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_QP_QPN, + UVERBS_ATTR_TYPE(u32), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD(ZXDH_IB_METHOD_QP_SET_CREDIT_FLAG, + UVERBS_ATTR_IDR(ZXDH_IB_ATTR_QP_SET_CREDIT_FLAG_HANDLE, + UVERBS_OBJECT_QP, + UVERBS_ACCESS_READ, UA_MANDATORY), + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_QP_CREDIT_FLAG, + UVERBS_ATTR_TYPE(u64), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD( + ZXDH_IB_METHOD_DEV_GET_LOG_TRACE, + UVERBS_ATTR_PTR_OUT(ZXDH_IB_ATTR_DEV_GET_LOG_TARCE_SWITCH, + UVERBS_ATTR_TYPE(u8), UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD( + ZXDH_IB_METHOD_DEV_SET_LOG_TRACE, + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_DEV_SET_LOG_TARCE_SWITCH, + UVERBS_ATTR_TYPE(u8), UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD( + ZXDH_IB_METHOD_DEV_CAP_START, + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_DEV_CAP_START, + UVERBS_ATTR_STRUCT(struct zxdh_cap_cfg, + cap_data_start_cap), + UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(ZXDH_IB_ATTR_DEV_CAP_START_RESP, + UVERBS_ATTR_STRUCT(struct zxdh_cap_start_resp, + cap_pa_node1), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD(ZXDH_IB_METHOD_DEV_CAP_STOP, + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_DEV_CAP_STOP, + UVERBS_ATTR_TYPE(u8), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD(ZXDH_IB_METHOD_DEV_CAP_FREE, + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_DEV_CAP_FREE, + UVERBS_ATTR_TYPE(u8), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD( + ZXDH_IB_METHOD_DEV_MP_CAP, + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_DEV_MP_CAP, + UVERBS_ATTR_STRUCT(struct zxdh_mp_cap_cfg, qpn_num), + UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(ZXDH_IB_ATTR_DEV_MP_CAP_RESP, + UVERBS_ATTR_STRUCT(struct zxdh_mp_cap_resp, cap_pa), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD(ZXDH_IB_METHOD_DEV_MP_GET_DATA, + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_DEV_MP_GET_DATA, + UVERBS_ATTR_TYPE(u8), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD( + ZXDH_IB_METHOD_DEV_MP_CAP_CLEAR, + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_DEV_MP_CAP_CLEAR, + UVERBS_ATTR_STRUCT(struct zxdh_cap_gqp, gqp_num), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD( + ZXDH_IB_METHOD_DEV_GET_ACT_VHCA_GQPS, + UVERBS_ATTR_PTR_OUT(ZXDH_IB_ATTR_DEV_GET_ACT_VHCA_GQPS_RESP, + UVERBS_ATTR_STRUCT(struct zxdh_active_vhca_gqps, + gqp_num), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD( + ZXDH_IB_METHOD_DEV_GET_CC_BASIC_INFO, + UVERBS_ATTR_PTR_OUT(ZXDH_IB_ATTR_DEV_GET_CC_BASIC_INFO_RESP, + UVERBS_ATTR_STRUCT(struct zxdh_cc_basic_info, + backpres_rx), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD( + ZXDH_IB_METHOD_DEV_GET_HMC, + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_DEV_GET_HMC, + UVERBS_ATTR_STRUCT(struct zxdh_context_req, resource_id), + UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(ZXDH_IB_ATTR_DEV_GET_HMC_RESP, + UVERBS_ATTR_STRUCT(struct zxdh_context_resp, context_size), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD( + ZXDH_IB_METHOD_DEV_GET_OBJ_DATA, + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_DEV_GET_OBJ_DATA, + UVERBS_ATTR_STRUCT(struct zxdh_get_object_data_req, + object_num), + UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(ZXDH_IB_ATTR_DEV_GET_OBJ_DATA_RESP, + UVERBS_ATTR_STRUCT(struct zxdh_get_object_data_resp, + route_id), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD( + ZXDH_IB_METHOD_DEV_HEALTH_CHECK, + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_DEV_HEALTH_CHECK, + UVERBS_ATTR_STRUCT(struct zxdh_health_check_req, reg_value_va_ex), + UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(ZXDH_IB_ATTR_DEV_HEALTH_CHECK_RESP, + UVERBS_ATTR_STRUCT(struct zxdh_health_check_resp, count_ex), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD( + ZXDH_IB_METHOD_DEV_CFG_PARAMETER, + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_DEV_CFG_PARAMETER, + UVERBS_ATTR_STRUCT(struct zxdh_cfg_dev_parameter_req, reserved2), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD( + ZXDH_IB_METHOD_DEV_SHOW_RES_MAP, + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_DEV_SHOW_RES_MAP, + UVERBS_ATTR_STRUCT(struct zxdh_db_show_res_map_req, type), + UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(ZXDH_IB_ATTR_DEV_SHOW_RES_MAP_RESP, + UVERBS_ATTR_STRUCT(struct zxdh_db_show_res_map_resp, count), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD( + ZXDH_IB_METHOD_DEV_READ_RAM, + UVERBS_ATTR_PTR_IN(ZXDH_IB_ATTR_DEV_READ_RAM, + UVERBS_ATTR_STRUCT(struct zxdh_read_ram_req, reserved2), + UA_MANDATORY)); + +DECLARE_UVERBS_GLOBAL_METHODS(ZXDH_IB_OBJECT_DEV, + &UVERBS_METHOD(ZXDH_IB_METHOD_DEV_GET_LOG_TRACE), + &UVERBS_METHOD(ZXDH_IB_METHOD_DEV_SET_LOG_TRACE), + &UVERBS_METHOD(ZXDH_IB_METHOD_DEV_CAP_START), + &UVERBS_METHOD(ZXDH_IB_METHOD_DEV_CAP_STOP), + &UVERBS_METHOD(ZXDH_IB_METHOD_DEV_CAP_FREE), + &UVERBS_METHOD(ZXDH_IB_METHOD_DEV_MP_CAP), + &UVERBS_METHOD(ZXDH_IB_METHOD_DEV_MP_GET_DATA), + &UVERBS_METHOD(ZXDH_IB_METHOD_DEV_MP_CAP_CLEAR)); + +DECLARE_UVERBS_GLOBAL_METHODS( + ZXDH_IB_OBJECT_DEVICE_EX, + &UVERBS_METHOD(ZXDH_IB_METHOD_DEV_GET_ACT_VHCA_GQPS), + &UVERBS_METHOD(ZXDH_IB_METHOD_DEV_GET_CC_BASIC_INFO), + &UVERBS_METHOD(ZXDH_IB_METHOD_DEV_GET_HMC), + &UVERBS_METHOD(ZXDH_IB_METHOD_DEV_GET_OBJ_DATA), + &UVERBS_METHOD(ZXDH_IB_METHOD_DEV_HEALTH_CHECK), + &UVERBS_METHOD(ZXDH_IB_METHOD_DEV_CFG_PARAMETER), + &UVERBS_METHOD(ZXDH_IB_METHOD_DEV_SHOW_RES_MAP), + &UVERBS_METHOD(ZXDH_IB_METHOD_DEV_READ_RAM)); + +DECLARE_UVERBS_GLOBAL_METHODS(ZXDH_IB_OBJECT_QP_OBJ, + &UVERBS_METHOD(ZXDH_IB_METHOD_QP_MODIFY_UDP_SPORT), + &UVERBS_METHOD(ZXDH_IB_METHOD_QP_QUERY_QPC), + &UVERBS_METHOD(ZXDH_IB_METHOD_QP_MODIFY_QPC), + &UVERBS_METHOD(ZXDH_IB_METHOD_QP_RESET_QP), + &UVERBS_METHOD(ZXDH_IB_METHOD_QP_SET_CREDIT_FLAG)); + +#ifdef ZXDH_UAPI_DEF +const struct uapi_definition zxdh_ib_dev_defs[] = { + UAPI_DEF_CHAIN_OBJ_TREE_NAMED(ZXDH_IB_OBJECT_DEV), + UAPI_DEF_CHAIN_OBJ_TREE_NAMED(ZXDH_IB_OBJECT_QP_OBJ), + UAPI_DEF_CHAIN_OBJ_TREE_NAMED(ZXDH_IB_OBJECT_DEVICE_EX), + {}, +}; +#else +DECLARE_UVERBS_OBJECT_TREE(devx_objects, + &UVERBS_OBJECT(ZXDH_IB_OBJECT_DEV), + &UVERBS_OBJECT(ZXDH_IB_OBJECT_QP_OBJ), + &UVERBS_OBJECT(ZXDH_IB_OBJECT_DEVICE_EX)); + +const struct uverbs_object_tree_def *zxdh_ib_get_devx_tree(void) +{ + return &devx_objects; +} +#endif diff --git a/drivers/infiniband/hw/zrdma/private_verbs_cmd.h b/drivers/infiniband/hw/zrdma/private_verbs_cmd.h new file mode 100644 index 000000000000..103ab659ab44 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/private_verbs_cmd.h @@ -0,0 +1,354 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_PRIVATE_VERBS_CMD_H +#define ZXDH_PRIVATE_VERBS_CMD_H +#include + +#define EXTRACT_BITS(value, low, high) \ + (((value) >> (low)) & ((1U << ((high) - (low) + 1)) - 1)) +#define C_RDMA_TX_SUB_RW_RSV0 0x62065f84f0u +#define C_RDMA_TX_SUB_RO_RSV5 0x62065f84b4 +#define C_SQ_CPU_MAINTAIN_RESERVE1 0x6206623284u +#define C_RQ_CPU_MAINTAIN_RESERVE1 0x62066232a0u +#define C_ACK_CPU_MAINTAIN_RESERVE1 0x62066232bcu +#define C_DB_AXI_INTERFACW_STATE_REG2 0x620660b214u +#define C_WQE_PREFETCH_TOP_FIFO_WE_RD_CNT0 0x62065F0FBCu +#define C_RDMATX_ARBITRATION_DIN_0 0x62065f061cu +#define HOST3_ERR_INFO_FIFO_OVERFLOW_CNT 0x62065F08b0u +#define C_PKT_TIME_OUT_CNT 0x62065F0678u +#define C_ICRC_PROC_SOP_CNT_HW 0x6205400084u +#define C_ICRC_PROC_EOP_CNT_HW 0x6205400088u +#define RDMATX_ACK_RSV_RO_REG_0_HW 0x62065e80a0u +#define RDMATX_ACK_RD_MSG_LOSS_FLAG_CNT 0x62065e83dcu +#define C_NHD_CHECK_ICRC_REMOVAL_EOP_CNT_HW 0x62054004f8u +#define PKT_RTT_T1_GEN_SOP_CNT 0x62065f8494u +#define C_NHD_CHECK_RTT_PROC_SOP_CNT 0x62054004f0u +#define C_RAM_TEST_RSV_1 0x6205478800u +#define C_NHD_CHECK_RTT_PROC_EOP_CNT 0x62054004f4u +#define C_SQ_CPU_FIFO_OVERFLOW_CNT 0x6206623290u +#define C_RQ_CPU_FIFO_OVERFLOW_CNT 0x62066232acu +#define RDMATX_ACK_RSV_RO_REG_1 0x62065e80a4u +#define C_ACK_CPU_FIFO_OVERFLOW_CNT 0x62066232c8u +#define C_SQ_CPU_MAINTAIN_RESERVE2 0x6206623288u +#define C_SQ_CPU_MAINTAIN_RESERVE3 0x620662328cu +#define C_NP_RDY_TEST 0x62065F0ec8u +#define C_ICRC_CHECK_SOP_CNT_HW 0x620540008cu +#define C_MUL_CACHE_ARBITER_D2B_SOP_CNT 0x62054008A8u + +#define ZXDH_RESET_RETRY_CQE_SQ_OPCODE_ERR 0x1f + +#define C_RQ_INDICATE_ID_REG_CHECK 0x6205800500 +#define C_RQDB_INDICATE_ID_REG_CHECK 0x6205800600 +#define C_SRQ_INDICATE_ID_REG_CHECK 0x6205800508 +#define C_SRQP_INDICATE_ID_REG_CHECK 0x6205800494 +#define C_SRQDB_INDICATE_ID_REG_CHECK 0x620580048c +#define C_SQ_INDICATE_ID_REG_CHECK 0x6206800c04 + +#define C_DB_SHOW_PF_START_QPN_MAP 0x6206600228 +#define C_DB_SHOW_PF_END_VHCA_MAP 0x620660022c +#define C_DB_SHOW_PF_VHCA_MAP 0x6206600400 +#define C_DB_SHOW_VHCA_PHYSICAL_MAP 0x6206800430 +#define C_DB_SHOW_8K_2K_MAP 0x6206602000 +#define C_DB_SHOW_GQP_VHCA_MAP 0x62065fc000 +#define C_CHECK_GQP_ACTIVE_WRITE 0x62065f81d4 +#define C_CHECK_GQP_ACTIVE_READ 0x62065f84c0 + +#define C_ACTIVE_VHCA_SQ_CNT_CLEAN 0x620662327c +#define C_ACTIVE_VHCA_READ_CNT_CLEAN 0x6206623298 +#define C_ACTIVE_VHCA_ACK_CNT_CLEAN 0x62066232b4 +#define C_TASK_PREFETCH_RECV_COM_CNT_CLEAN 0x62065F0FB4 +#define C_TX_PKT_CNT_CLEAN 0x62065F0680 +#define C_RX_PKT_CNT_CLEAN 0x6205400008 +#define C_RETRY_TIMEOUTE_CNT_CLEAN 0x62065e809c +#define C_TX_PKT_CNP_CNT_CLEAN 0x620546008c +#define C_TX_PKT_RTT_T1_CNT_CLEAN 0x62065f81dc +#define C_TX_PKT_RTT_T4_CNT_CLEAN 0x6205478880 + +#define C_ACTIVE_VHCA_SQ_CNT_CLEAN_MASK BIT(4) +#define C_ACTIVE_VHCA_READ_CNT_CLEAN_MASK BIT(4) +#define C_ACTIVE_VHCA_ACK_CNT_CLEAN_MASK BIT(4) +#define C_TASK_PREFETCH_RECV_COM_CNT_CLEAN_MASK BIT(2) +#define C_TX_PKT_CNT_CLEAN_MASK BIT(0) +#define C_RX_PKT_CNT_CLEAN_MASK BIT(0) +#define C_RETRY_TIMEOUTE_CNT_CLEAN_MASK BIT(0) +#define C_TX_PKT_CNP_CNT_CLEAN_MASK BIT(3) +#define C_TX_PKT_RTT_T1_CNT_CLEAN_MASK BIT(0) +#define C_TX_PKT_RTT_T4_CNT_CLEAN_MASK BIT(2) + +enum switch_status { + SWITCH_CLOSE = 0, + SWITCH_OPEN = 1, + SWITCH_ERROR, +}; + +enum zxdh_qp_modify_qpc_mask { + ZXDH_RETRY_CQE_SQ_OPCODE = 1 << 0, + ZXDH_ERR_FLAG_SET = 1 << 1, + ZXDH_PACKAGE_ERR_FLAG = 1 << 2, + ZXDH_TX_LAST_ACK_PSN = 1 << 3, + ZXDH_TX_LAST_ACK_WQE_OFFSET_SET = 1 << 4, + ZXDH_TX_READ_RETRY_FLAG_SET = 1 << 5, + ZXDH_TX_RDWQE_PYLD_LENGTH = 1 << 6, + ZXDH_TX_RECV_READ_FLAG_SET = 1 << 7, + ZXDH_TX_RD_MSG_LOSS_ERR_FLAG_SET = 1 << 8, +}; + +enum zxdh_qp_reset_qp_code { + ZXDH_RESET_RETRY_TX_ITEM_FLAG = 1, +}; + +enum zxdh_read_context_size_const { + ZXDH_RX_READ_QPC_SIZE = 168, + ZXDH_RX_QPC_SHIFT = 256, + ZXDH_TX_READ_QPC_SIZE = 176, + ZXDH_READ_CQC_SIZE = 64, + ZXDH_READ_CEQC_SIZE = 24, + ZXDH_READ_AEQC_SIZE = 16, + ZXDH_RX_READ_SRQC_SIZE = 64, + ZXDH_READ_MRTE_SIZE = 64, +}; + +enum zxdh_obj_size_const { + ZXDH_PBLE_MR_QUADRUPLE_SIZE = 32, // quadruple pble mr size + ZXDH_PBLE_QUEUE_QUADRUPLE_SIZE = 32, // quadruple pble queue size + ZXDH_PBLE_RQ_SIZE = 8, + ZXDH_AH_SIZE = 64, + ZXDH_IRD_SIZE = 64, + ZXDH_TX_WINDOW_SIZE = 64, + ZXDH_CQ_SHADOW_AREA_SIZE = 8, + ZXDH_RQ_SHADOW_AREA_SIZE = 8, + ZXDH_SRQ_SHADOW_AREA_SIZE = 8, + ZXDH_SQ_UNIT_SIZE = 32, + ZXDH_CQ_SIZE = 64, + ZXDH_CEQ_SIZE = 16, + ZXDH_AEQ_SIZE = 16, + ZXDH_SRQP_INDEX_SIZE = 2, +}; + +struct zxdh_srqc_item { + u32 list_leaf_pbl_size; + long log_srq_size; + u64 srq_address; + u64 srq_list_address; + u32 leaf_pbl_size; + u32 log_srq_stride; + u32 log_srq_stride_wqe_real_size; + u64 dbr_address; + u32 hw_wqe_cnt; +}; + +struct zxdh_qpc_item { + u32 rq_leaf_pbl_size; + u64 rq_address; + u32 log_rq_wqe_size; + u32 log_rq_wqe_real_size; + long log_rq_size; + u64 db_address; + u32 sq_leaf_pbl_size; + u64 sq_address; + long log_sq_size; +}; + +struct zxdh_qp_addr_context { + u32 qp_type; + u32 addr_mode; + u32 wqe_size; + u32 wqe_index; + u64 qp_base_addr; +}; + +struct zxdh_reset_qp_retry_tx_item { + u16 tx_win_raddr; + u32 tx_last_ack_psn; + u32 last_ack_wqe_offset; + u16 hw_sq_tail_una; + u32 rnr_retry_time_l; + u8 rnr_retry_time_h; + u8 rnr_retry_threshold; + u8 read_retry_flag; + u8 rnr_retry_flag; + u8 retry_flag; + u8 cur_retry_count; + u32 rdwqe_pyld_length; + u8 recv_read_flag; + u8 recv_err_flag; + u8 recv_rd_msg_loss_err_flag; + u8 recv_rd_msg_loss_err_cnt; + u8 rd_msg_loss_err_flag; + u8 pktchk_rd_msg_loss_err_cnt; + u8 ack_err_flag; + u8 err_flag; + u8 package_err_flag; + u8 retry_cqe_sq_opcode; +}; + +struct zxdh_qp_tx_win_item { + u32 start_psn; + u16 wqe_pointer; +}; + +struct zxdh_modify_qpc_item { + u32 tx_last_ack_psn; + u32 last_ack_wqe_offset; + u16 hw_sq_tail_una; + u32 rnr_retry_time_l; + u8 rnr_retry_time_h; + u8 rnr_retry_threshold; + u8 read_retry_flag; + u8 rnr_retry_flag; + u8 retry_flag; + u8 cur_retry_count; + u8 rdwqe_pyld_length_l; + u32 rdwqe_pyld_length_h; + u8 recv_read_flag; + u8 recv_err_flag; + u8 recv_rd_msg_loss_err_flag; + u8 recv_rd_msg_loss_err_cnt; + u8 rd_msg_loss_err_flag; + u8 pktchk_rd_msg_loss_err_cnt; + u8 ack_err_flag; + u8 err_flag; + u8 package_err_flag; + u8 retry_cqe_sq_opcode; +}; + +struct hw_object_wqe_context { + u8 op_code; + u8 wqe_valid; + u16 src_vhca_Index; + u8 src_object_id; + u8 src_waypartition; + u8 src_path_select; + u8 src_interface_select; + u16 dest_vhca_index; + u8 dest_object_id; + u8 dest_waypartition; + u8 dest_path_select; + u8 dest_interface_select; + u32 data_length; + u32 object_size; + u64 src_address; + u64 dest_address; + u64 srqp_aligned_offset; + int zxdh_hmc_rsrc_type; + struct zxdh_sc_dev *dev; + struct zxdh_get_object_data_req *req; +}; + +enum zxdh_error_code_const { + ZXDH_NOT_SUPPORT_OBJECT_ID = 100, + ZXDH_DMA_MEMORY_OVER_2M = 101, + ZXDH_DMA_READ_NOT_32_ALIGN = 102, + ZXDH_CACHE_ID_CHECK_ERROR = 103, + ZXDH_ENTRY_IDX_ERROR = 104, + ZXDH_PBLE_ADDRESSING_ONLY_SUPPORTS_OBJECT_NUMBER_1 = 105, + ZXDH_NOT_SUPPORT_TWO_LEVEL_PBLE_CODE = 106, + ZXDH_NOT_SUPPORT_VIRTUAL_ADDRESS = 107, + ZXDH_DATA_ENTRY_IDX_OVER_LIMIT = 108, + ZXDH_QUEUE_ID_ERROR = 109, +}; + +struct zxdh_cqc_item { + u8 leaf_pbl_size; + u64 doorbell_shadow_addr; + u8 log_cqe_num; + u32 hw_cq_head; + u64 cq_address; + u64 root_pble; +}; + +struct zxdh_aeqc_item { + u32 aeq_size; + u8 virtually_mapped; + u8 leaf_pbl_size; + u32 aeq_head; + u64 aeq_address; +}; + +struct zxdh_ceqc_item { + u8 leaf_pbl_size; + u32 ceqe_head; + u8 log_ceq_num; + u64 ceq_address; +}; + +#define CAP_NODE_NUM 2 +#define NODE1 1 +#define NODE0 0 +#define EN_32bit_GROUP_NUM 16 +#define BIT_O_31 0 +#define BIT_32_63 1 +#define BIT_64_95 2 +#define BIT_96_127 3 +#define BIT_128_159 4 +#define BIT_160_191 5 +#define BIT_192_223 6 +#define BIT_224_255 7 +#define BIT_256_287 8 +#define BIT_288_319 9 +#define BIT_320_351 10 +#define BIT_352_383 11 +#define BIT_384_415 12 +#define BIT_416_447 13 +#define BIT_448_479 14 +#define BIT_480_511 15 +#define CAP_TX 1 +#define CAP_RX 2 +#define CAP_CFG_ERROR 0x1 +#define CAP_ALLOC_ADDR_ERROR 0x2 +#define CAP_WRITE_NODE0_REGS_ERROR 0x4 +#define CAP_WRITE_NODE1_REGS_ERROR 0x8 +#define CAP_ALREADY_START 0x10 + +struct zxdh_cap_cfg { + uint8_t cap_position; + uint64_t size; + uint32_t channel_select[CAP_NODE_NUM]; + uint32_t channel_open[CAP_NODE_NUM]; + uint32_t node_choose[CAP_NODE_NUM]; + uint32_t node_select[CAP_NODE_NUM]; + uint32_t compare_bit_en[EN_32bit_GROUP_NUM][CAP_NODE_NUM]; + uint32_t compare_data[EN_32bit_GROUP_NUM][CAP_NODE_NUM]; + uint32_t rdma_time_wrl2d[CAP_NODE_NUM]; + uint32_t extra[CAP_NODE_NUM][EN_32bit_GROUP_NUM]; + uint32_t cap_data_start_cap; +}; +#define MAX_CAP_QPS 4 +struct zxdh_mp_cap_cfg { + bool cap_use_l2d; + uint32_t qpn[MAX_CAP_QPS]; + uint8_t qpn_num; +}; + +struct zxdh_cap_gqp { + uint16_t gqpid[MAX_CAP_QPS]; + uint8_t gqp_num; +}; + +/* ZXDH Devices ID */ +#define ZXDH_DEV_ID_ADAPTIVE_EVB_PF 0x8040 /* ZXDH EVB PF DEVICE ID*/ +#define ZXDH_DEV_ID_ADAPTIVE_EVB_VF 0x8041 /* ZXDH EVB VF DEVICE ID*/ +#define ZXDH_DEV_ID_ADAPTIVE_E312_PF 0x8049 /* ZXDH E312 PF DEVICE ID*/ +#define ZXDH_DEV_ID_ADAPTIVE_E312_VF 0x8060 /* ZXDH E312 VF DEVICE ID*/ +#define ZXDH_DEV_ID_ADAPTIVE_X512_PF 0x806B /* ZXDH X512 PF DEVICE ID*/ +#define ZXDH_DEV_ID_ADAPTIVE_X512_VF 0x806C /* ZXDH X512 VF DEVICE ID*/ + +#define ZXDH_SMMU_OFFSET 0x40000u +#define ZXDH_MP_BASERTT_OFFSET 0x8000u +#define ZXDH_SMMU_CMDQ_OFFSET 0x3000u +#define ZXDH_L2D_MPCAP_BUFF_SIZE 0x14000u +#define ZXDH_CAP_DATA_HOST_MEM_SIZE (2 * 1024 * 1024) +#define ZXDH_CAP_DATA_HMC_MEM_SIZE (1024 * 1024 * 1024) +#define ZXDH_LOG_BUF_SIZE 4096 + +#ifndef ZXDH_UAPI_DEF +const struct uverbs_object_tree_def *zxdh_ib_get_devx_tree(void); +#endif + +void copy_tx_window_to_win_item(void *va, struct zxdh_qp_tx_win_item *info); +void set_retry_modify_qpc_item( + struct zxdh_modify_qpc_item *modify_qpc_item, + struct zxdh_reset_qp_retry_tx_item *retry_item_info, + struct zxdh_qp_tx_win_item *tx_win_item_info, u64 *modify_mask); +#endif diff --git a/drivers/infiniband/hw/zrdma/protos.h b/drivers/infiniband/hw/zrdma/protos.h new file mode 100644 index 000000000000..3da97d1cb942 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/protos.h @@ -0,0 +1,187 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_PROTOS_H +#define ZXDH_PROTOS_H +#include + +#define PAUSE_TIMER_VAL 0xffff +#define REFRESH_THRESHOLD 0x7fff +#define HIGH_THRESHOLD 0x800 +#define LOW_THRESHOLD 0x200 +#define ALL_TC2PFC 0xff +#define CQP_COMPL_WAIT_TIME_MS 6 +#define CQP_TIMEOUT_THRESHOLD 20000 +#define CQP_MIN_TIMEOUT_THRESHOLD 1 + +struct read_ram_info { + u32 ram_num; + u32 ram_width; + u32 ram_read_cnt; + u32 ram_addr; + u32 offset_idx; +}; + +/* init operations */ +int zxdh_sc_dev_init(enum zxdh_rdma_vers ver, struct zxdh_sc_dev *dev, + struct zxdh_device_init_info *info); +void zxdh_sc_cqp_post_sq(struct zxdh_sc_cqp *cqp); +__le64 *zxdh_sc_cqp_get_next_send_wqe(struct zxdh_sc_cqp *cqp, u64 scratch); +int zxdh_sc_mr_fast_register(struct zxdh_sc_qp *qp, + struct zxdh_fast_reg_stag_info *info, + bool post_sq); +void zxdh_init_config_check(struct zxdh_config_check *cc, u8 traffic_class, + u16 qs_handle); +/* HMC/FPM functions */ + +/* stats misc */ +int zxdh_rdma_stats_read(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats); + +int zxdh_process_pma_cmd(struct zxdh_sc_dev *dev, u8 port, + const struct ib_mad *in_mad, struct ib_mad *out_mad); +void zxdh_hw_stats_read_all(struct zxdh_vsi_pestat *stats, + const u64 *hw_stats_regs); +int zxdh_cqp_up_map_cmd(struct zxdh_sc_dev *dev, u8 cmd, + struct zxdh_up_info *map_info); +int zxdh_cqp_ceq_cmd(struct zxdh_sc_dev *dev, struct zxdh_sc_ceq *sc_ceq, + u8 op); +int zxdh_cqp_aeq_cmd(struct zxdh_sc_dev *dev, struct zxdh_sc_aeq *sc_aeq, + u8 op); +void zxdh_sc_dev_qplist_init(struct zxdh_sc_dev *dev); +int zxdh_sc_add_cq_ctx(struct zxdh_sc_ceq *ceq, struct zxdh_sc_cq *cq); +void zxdh_sc_remove_cq_ctx(struct zxdh_sc_ceq *ceq, struct zxdh_sc_cq *cq); +/* misc L2 param change functions */ +void zxdh_qp_add_qos(struct zxdh_sc_qp *qp); +void zxdh_qp_rem_qos(struct zxdh_sc_qp *qp); +struct zxdh_sc_qp *zxdh_get_qp_from_list(struct list_head *head, + struct zxdh_sc_qp *qp); +/* dynamic memory allocation */ +/* misc */ +u8 zxdh_get_encoded_wqe_size(u32 wqsize, enum zxdh_queue_type queue_type); +void zxdh_modify_qp_to_err(struct zxdh_sc_qp *sc_qp); +int zxdh_cfg_fpm_val(struct zxdh_sc_dev *dev); +void free_sd_mem(struct zxdh_sc_dev *dev); +int zxdh_process_cqp_cmd(struct zxdh_sc_dev *dev, + struct cqp_cmds_info *pcmdinfo); +int zxdh_process_bh(struct zxdh_sc_dev *dev); +extern void dump_ctx(struct zxdh_sc_dev *dev, u32 pf_num, u32 qp_num); +void dumpCSR(struct zxdh_sc_dev *dev); +void dumpCSRx(struct zxdh_sc_dev *dev); +void dumpcls(struct zxdh_sc_dev *dev); +void *zxdh_remove_cqp_head(struct zxdh_sc_dev *dev); + +int zxdh_sc_config_pte_table(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest scr_dest); +int zxdh_cqp_config_pte_table_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest scr_dest); + +void zxdh_hmc_dpu_capability(struct zxdh_sc_dev *dev); +u32 zxdh_hmc_register_config_comval(struct zxdh_sc_dev *dev, u32 rsrc_type); +u32 zxdh_hmc_register_config_cqpval(struct zxdh_sc_dev *dev, u32 max_cnt, + u32 rsrc_type); +u64 zxdh_get_path_index(struct zxdh_path_index *path_index); +int zxdh_cqp_config_pble_table_cmd(struct zxdh_sc_dev *dev, + struct zxdh_pble_info *pbleinfo, u32 len, + bool pbletype); +u16 zxdh_get_8k_index(struct zxdh_sc_qp *qp, u32 dest_ip); +u16 zxdh_get_tc_8k_index_offset(u32 total_vhca, u16 vhca_8k_index_cnt, + u8 traffic_class, u16 *tc_8k_index_num); + +int zxdh_sc_send_mailbox_cmd(struct zxdh_sc_dev *dev, u8 opt, u64 msg2, + u64 msg3, u64 msg4, u16 dst_vf_id); +int zxdh_sc_commit_hmc_register_val(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_path_index *dpath_index, + struct zxdh_dma_write32_date *dma_data, + bool post_sq, u8 wait_type); + +int zxdh_sc_dma_read_usecqe(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_dam_read_bycqe *readbuf, + struct zxdh_path_index *spath_index, bool post_sq); + +int zxdh_sc_dma_read(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_src_copy_dest *src_dest, + struct zxdh_path_index *spath_index, + struct zxdh_path_index *dpath_index, bool post_sq); + +int zxdh_sc_dma_write64(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_path_index *dpath_index, + struct zxdh_dma_write64_date *dma_data, bool post_sq); + +int zxdh_sc_dma_write32(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_path_index *dpath_index, + struct zxdh_dma_write32_date *dma_data, bool post_sq); + +int zxdh_sc_dma_write(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_src_copy_dest *src_dest, + struct zxdh_path_index *spath_index, + struct zxdh_path_index *dpath_index, bool post_sq); + +int zxdh_sc_mb_create(struct zxdh_sc_cqp *cqp, u64 scratch, + struct zxdh_mailboxhead_data *mbhead_data, bool post_sq, + u32 dst_vf_id); +int zxdh_sc_query_mkey_cmd(struct zxdh_sc_dev *dev, u32 mekyindex); + +int zxdh_clear_dpuddr(struct zxdh_sc_dev *dev, bool clear); +int zxdh_vf_clear_dpuddr(struct zxdh_sc_dev *dev, u64 size, bool clear); + +int zxdh_clear_nof_ioq(struct zxdh_sc_dev *dev, u64 size, u64 ioq_pa); + +int zxdh_dpuddr_to_host_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest *src_dest); +int zxdh_cqp_rdma_write_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest *src_dest, u8 src_dir, + u8 dest_dir); +int zxdh_cqp_rdma_read_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest *src_dest, u8 src_dir, + u8 dest_dir); +int zxdh_cqp_damreadbycqe_cmd(struct zxdh_sc_dev *dev, + struct zxdh_dam_read_bycqe *dmadata, + struct zxdh_path_index *src_path_index, u64 *arr); +int zxdh_cqp_rdma_write32_cmd(struct zxdh_sc_dev *dev, + struct zxdh_dma_write32_date *dma_data); +int zxdh_cqp_rdma_readreg_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest *src_dest); +int zxdh_cqp_rdma_read_mrte_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest *src_dest); +int zxdh_cqp_rdma_read_tx_window_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest *src_dest); + +int zxdh_read_ram_32bit_value(struct zxdh_sc_dev *dev, u32 ram_num, + u32 ram_width, u32 ram_read_cnt, + u32 offset_idx, u32 *value); + +int zxdh_read_ram_tx_values(struct zxdh_sc_dev *dev, + struct read_ram_info *ram_info, u32 *value); +int zxdh_read_ram_rx_values(struct zxdh_sc_dev *dev, + struct read_ram_info *ram_info, u32 *value); +int zxdh_read_ram_cqp_values(struct zxdh_sc_dev *dev, + struct read_ram_info *ram_info, u32 *value); + +u64 zxdh_get_hmc_align_512(u64 paaddr); +u16 zxdh_txwind_ddr_size(u8 num); + +u64 zxdh_get_hmc_align_2M(u64 paaddr); +u64 zxdh_get_hmc_align_4K(u64 paaddr); +int zxdh_create_vf_pblehmc_entry(struct zxdh_sc_dev *dev); +int zxdh_sc_query_mkey(struct zxdh_sc_cqp *cqp, u32 mkeyindex, u64 scratch, + bool post_sq); + +int zxdh_sc_query_qpc(struct zxdh_sc_dev *dev, u32 qpn, u64 qpc_buf_pa, u64 scratch, + bool post_sq); +int zxdh_sc_query_cqc(struct zxdh_sc_dev *dev, u32 cqn, u64 cqc_buf_pa, u64 scratch, + bool post_sq); +int zxdh_sc_query_ceqc(struct zxdh_sc_dev *dev, u32 ceqn, u64 ceqc_buf_pa, u64 scratch, + bool post_sq); +int zxdh_sc_query_aeqc(struct zxdh_sc_dev *dev, u16 aeqn, u64 aeqc_buf_pa, u64 scratch, + bool post_sq); + +int zxdh_cq_round_up(u32 wqdepth); + +int zxdh_cqp_aeq_create(struct zxdh_sc_aeq *aeq); +int zxdh_init_destroy_aeq(struct zxdh_pci_f *rf); +int zxdh_create_cqp_qp(struct zxdh_pci_f *rf); +int zxdh_destroy_cqp_qp(struct zxdh_pci_f *rf); +const char *zxdh_qp_state_to_string(enum ib_qp_state state); +int get_pci_board_bdf(char *pci_board_bdf, struct zxdh_pci_f *rf); +#endif /* ZXDH_PROTOS_H */ diff --git a/drivers/infiniband/hw/zrdma/puda.c b/drivers/infiniband/hw/zrdma/puda.c new file mode 100644 index 000000000000..c6dacb3dfc25 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/puda.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include "osdep.h" +#include "status.h" +#include "hmc.h" +#include "defs.h" +#include "type.h" +#include "protos.h" +#include "puda.h" +#include "ws.h" + +/** + * zxdh_puda_get_listbuf - get buffer from puda list + * @list: list to use for buffers (ILQ or IEQ) + */ +static struct zxdh_puda_buf *zxdh_puda_get_listbuf(struct list_head *list) +{ + struct zxdh_puda_buf *buf = NULL; + + if (!list_empty(list)) { + buf = (struct zxdh_puda_buf *)list->next; + list_del((struct list_head *)&buf->list); + } + + return buf; +} + +/** + * zxdh_puda_ret_bufpool - return buffer to rsrc list + * @rsrc: resource to use for buffer + * @buf: buffer to return to resource + */ +void zxdh_puda_ret_bufpool(struct zxdh_puda_rsrc *rsrc, + struct zxdh_puda_buf *buf) +{ + unsigned long flags; + + buf->do_lpb = false; + spin_lock_irqsave(&rsrc->bufpool_lock, flags); + list_add(&buf->list, &rsrc->bufpool); + spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); + rsrc->avail_buf_count++; +} + +/** + * zxdh_puda_get_next_send_wqe - return next wqe for processing + * @qp: puda qp for wqe + * @wqe_idx: wqe index for caller + */ +static __le64 *zxdh_puda_get_next_send_wqe(struct zxdh_qp_uk *qp, u32 *wqe_idx) +{ + __le64 *wqe = NULL; + int ret_code = 0; + + *wqe_idx = ZXDH_RING_CURRENT_HEAD(qp->sq_ring); + if (!*wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + ZXDH_RING_MOVE_HEAD(qp->sq_ring, ret_code); + if (ret_code) + return wqe; + + wqe = qp->sq_base[*wqe_idx].elem; + + return wqe; +} + +/** + * zxdh_puda_send - complete send wqe for transmit + * @qp: puda qp for send + * @info: buffer information for transmit + */ +int zxdh_puda_send(struct zxdh_sc_qp *qp, struct zxdh_puda_send_info *info) +{ + __le64 *wqe; + u32 iplen, l4len; + u64 hdr[2]; + u32 wqe_idx; + u8 iipt; + + /* number of 32 bits DWORDS in header */ + l4len = info->tcplen >> 2; + if (info->ipv4) { + iipt = 3; + iplen = 5; + } else { + iipt = 1; + iplen = 10; + } + + wqe = zxdh_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx); + if (!wqe) + return -ENOSPC; + + qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch; + /* Third line of WQE descriptor */ + /* maclen is in words */ + + if (qp->dev->hw_attrs.uk_attrs.hw_rev >= ZXDH_GEN_2) { + hdr[0] = 0; /* Dest_QPN and Dest_QKey only for UD */ + hdr[1] = FIELD_PREP(ZXDH_UDA_QPSQ_OPCODE, ZXDH_OP_TYPE_SEND) | + FIELD_PREP(ZXDH_UDA_QPSQ_L4LEN, l4len) | + FIELD_PREP(IRDMAQPSQ_AHID, info->ah_id) | + FIELD_PREP(ZXDH_UDA_QPSQ_SIGCOMPL, 1) | + FIELD_PREP(ZXDH_UDA_QPSQ_VALID, + qp->qp_uk.swqe_polarity); + + /* Forth line of WQE descriptor */ + + set_64bit_val(wqe, 0, info->paddr); + set_64bit_val(wqe, 8, + FIELD_PREP(IRDMAQPSQ_FRAG_LEN, info->len) | + FIELD_PREP(ZXDH_UDA_QPSQ_VALID, + qp->qp_uk.swqe_polarity)); + } else { + hdr[0] = FIELD_PREP(ZXDH_UDA_QPSQ_MACLEN, info->maclen >> 1) | + FIELD_PREP(ZXDH_UDA_QPSQ_IPLEN, iplen) | + FIELD_PREP(ZXDH_UDA_QPSQ_L4T, 1) | + FIELD_PREP(ZXDH_UDA_QPSQ_IIPT, iipt) | + FIELD_PREP(ZXDH_GEN1_UDA_QPSQ_L4LEN, l4len); + + hdr[1] = FIELD_PREP(ZXDH_UDA_QPSQ_OPCODE, ZXDH_OP_TYPE_SEND) | + FIELD_PREP(ZXDH_UDA_QPSQ_SIGCOMPL, 1) | + FIELD_PREP(ZXDH_UDA_QPSQ_DOLOOPBACK, info->do_lpb) | + FIELD_PREP(ZXDH_UDA_QPSQ_VALID, + qp->qp_uk.swqe_polarity); + + /* Forth line of WQE descriptor */ + + set_64bit_val(wqe, 0, info->paddr); + set_64bit_val(wqe, 8, + FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, info->len)); + } + + set_64bit_val(wqe, 16, hdr[0]); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 24, hdr[1]); + + print_hex_dump_debug("PUDA: PUDA SEND WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, 32, false); + zxdh_uk_qp_post_wr(&qp->qp_uk); + return 0; +} + +/** + * zxdh_puda_send_buf - transmit puda buffer + * @rsrc: resource to use for buffer + * @buf: puda buffer to transmit + */ +void zxdh_puda_send_buf(struct zxdh_puda_rsrc *rsrc, struct zxdh_puda_buf *buf) +{ + struct zxdh_puda_send_info info = {}; + int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&rsrc->bufpool_lock, flags); + /* if no wqe available or not from a completion and we have + * pending buffers, we must queue new buffer + */ + if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) { + list_add_tail(&buf->list, &rsrc->txpend); + spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); + rsrc->stats_sent_pkt_q++; + if (rsrc->type == ZXDH_PUDA_RSRC_TYPE_ILQ) + pr_err("PUDA: adding to txpend\n"); + return; + } + rsrc->tx_wqe_avail_cnt--; + /* if we are coming from a completion and have pending buffers + * then Get one from pending list + */ + if (!buf) { + buf = zxdh_puda_get_listbuf(&rsrc->txpend); + if (!buf) + goto done; + } + + info.scratch = buf; + info.paddr = buf->mem.pa; + info.len = buf->totallen; + info.tcplen = buf->tcphlen; + info.ipv4 = buf->ipv4; + + if (rsrc->dev->hw_attrs.uk_attrs.hw_rev >= ZXDH_GEN_2) { + info.ah_id = buf->ah_id; + } else { + info.maclen = buf->maclen; + info.do_lpb = buf->do_lpb; + } + + /* Synch buffer for use by device */ + dma_sync_single_for_cpu(rsrc->dev->hw->device, buf->mem.pa, + buf->mem.size, DMA_BIDIRECTIONAL); + ret = zxdh_puda_send(&rsrc->qp, &info); + if (ret) { + rsrc->tx_wqe_avail_cnt++; + rsrc->stats_sent_pkt_q++; + list_add(&buf->list, &rsrc->txpend); + if (rsrc->type == ZXDH_PUDA_RSRC_TYPE_ILQ) + pr_info("PUDA: adding to puda_send\n"); + } else { + rsrc->stats_pkt_sent++; + } +done: + spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); +} diff --git a/drivers/infiniband/hw/zrdma/puda.h b/drivers/infiniband/hw/zrdma/puda.h new file mode 100644 index 000000000000..ea292937c276 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/puda.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_PUDA_H +#define ZXDH_PUDA_H + +#define ZXDH_IEQ_MPA_FRAMING 6 +#define ZXDH_TCP_OFFSET 40 +#define ZXDH_IPV4_PAD 20 +#define ZXDH_MRK_BLK_SZ 512 + +enum puda_rsrc_type { + ZXDH_PUDA_RSRC_TYPE_ILQ = 1, + ZXDH_PUDA_RSRC_TYPE_IEQ, + ZXDH_PUDA_RSRC_TYPE_MAX, /* Must be last entry */ +}; + +enum puda_rsrc_complete { + PUDA_CQ_CREATED = 1, + PUDA_QP_CREATED, + PUDA_TX_COMPLETE, + PUDA_RX_COMPLETE, + PUDA_HASH_CRC_COMPLETE, +}; + +struct zxdh_sc_dev; +struct zxdh_sc_qp; +struct zxdh_sc_cq; + +struct zxdh_puda_cmpl_info { + struct zxdh_qp_uk *qp; + u8 q_type; + u8 l3proto; + u8 l4proto; + u16 vlan; + u32 payload_len; + u32 compl_error; /* No_err=0, else major and minor err code */ + u32 qp_id; + u32 wqe_idx; + u8 ipv4 : 1; + u8 smac_valid : 1; + u8 vlan_valid : 1; + u8 smac[ETH_ALEN]; +}; + +struct zxdh_puda_send_info { + u64 paddr; /* Physical address */ + u32 len; + u32 ah_id; + u8 tcplen; + u8 maclen; + u8 ipv4 : 1; + u8 do_lpb : 1; + void *scratch; +}; + +struct zxdh_puda_buf { + struct list_head list; /* MUST be first entry */ + struct zxdh_dma_mem mem; /* DMA memory for the buffer */ + struct zxdh_puda_buf *next; /* for alloclist in rsrc struct */ + struct zxdh_virt_mem buf_mem; /* Buffer memory for this buffer */ + void *scratch; + u8 *iph; + u8 *tcph; + u8 *data; + u16 datalen; + u16 vlan_id; + u8 tcphlen; /* tcp length in bytes */ + u8 maclen; /* mac length in bytes */ + u32 totallen; /* machlen+iphlen+tcphlen+datalen */ + refcount_t refcount; + u8 hdrlen; + u8 ipv4 : 1; + u8 vlan_valid : 1; + u8 do_lpb : 1; /* Loopback buffer */ + u8 smac_valid : 1; + u32 seqnum; + u32 ah_id; + u8 smac[ETH_ALEN]; + struct zxdh_sc_vsi *vsi; +}; + +struct zxdh_puda_rsrc_info { + void (*receive)(struct zxdh_sc_vsi *vsi, struct zxdh_puda_buf *buf); + void (*xmit_complete)(struct zxdh_sc_vsi *vsi, void *sqwrid); + enum puda_rsrc_type type; /* ILQ or IEQ */ + u32 count; + u32 pd_id; + u32 cq_id; + u32 qp_id; + u32 sq_size; + u32 rq_size; + u32 tx_buf_cnt; /* total bufs allocated will be rq_size + tx_buf_cnt */ + u16 buf_size; + u8 stats_idx; + u8 stats_idx_valid : 1; + int abi_ver; +}; + +struct zxdh_puda_rsrc { + struct zxdh_sc_cq cq; + struct zxdh_sc_qp qp; + struct zxdh_sc_pd sc_pd; + struct zxdh_sc_dev *dev; + struct zxdh_sc_vsi *vsi; + struct zxdh_dma_mem cqmem; + struct zxdh_dma_mem qpmem; + struct zxdh_virt_mem ilq_mem; + enum puda_rsrc_complete cmpl; + enum puda_rsrc_type type; + u16 buf_size; /*buf must be max datalen + tcpip hdr + mac */ + u32 cq_id; + u32 qp_id; + u32 sq_size; + u32 rq_size; + u32 cq_size; + struct zxdh_sq_uk_wr_trk_info *sq_wrtrk_array; + u64 *rq_wrid_array; + u32 compl_rxwqe_idx; + u32 rx_wqe_idx; + u32 rxq_invalid_cnt; + u32 tx_wqe_avail_cnt; + struct shash_desc *hash_desc; + struct list_head txpend; + struct list_head bufpool; /* free buffers pool list for recv and xmit */ + u32 alloc_buf_count; + u32 avail_buf_count; /* snapshot of currently available buffers */ + spinlock_t bufpool_lock; + struct zxdh_puda_buf *alloclist; + void (*receive)(struct zxdh_sc_vsi *vsi, struct zxdh_puda_buf *buf); + void (*xmit_complete)(struct zxdh_sc_vsi *vsi, void *sqwrid); + /* puda stats */ + u64 stats_buf_alloc_fail; + u64 stats_pkt_rcvd; + u64 stats_pkt_sent; + u64 stats_rcvd_pkt_err; + u64 stats_sent_pkt_q; + u64 stats_bad_qp_id; + /* IEQ stats */ + u64 fpdu_processed; + u64 bad_seq_num; + u64 crc_err; + u64 pmode_count; + u64 partials_handled; + u8 stats_idx; + u8 check_crc : 1; + u8 stats_idx_valid : 1; +}; + +void zxdh_puda_ret_bufpool(struct zxdh_puda_rsrc *rsrc, + struct zxdh_puda_buf *buf); +void zxdh_puda_send_buf(struct zxdh_puda_rsrc *rsrc, struct zxdh_puda_buf *buf); +int zxdh_puda_send(struct zxdh_sc_qp *qp, struct zxdh_puda_send_info *info); + +int zxdh_cqp_qp_destroy_cmd(struct zxdh_sc_dev *dev, struct zxdh_sc_qp *qp); +#endif /*ZXDH_PROTOS_H */ diff --git a/drivers/infiniband/hw/zrdma/restrack.c b/drivers/infiniband/hw/zrdma/restrack.c new file mode 100644 index 000000000000..d648198d7acd --- /dev/null +++ b/drivers/infiniband/hw/zrdma/restrack.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include +#ifndef RDMA_MMAP_DB_SUPPORT +#include +#endif +#include +#include +#include "restrack.h" +#include "main.h" + +#ifdef IB_DEV_OPS_FILL_ENTRY +static int fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr) +{ + struct zxdh_mr *mr = to_iwmr(ibmr); + struct nlattr *table_attr; + + table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); + if (!table_attr) + goto err; + switch (mr->type) { + case ZXDH_MEMREG_TYPE_MEM: + if (rdma_nl_put_driver_string(msg, "type", "mem")) + goto err; + break; + case ZXDH_MEMREG_TYPE_QP: + if (rdma_nl_put_driver_string(msg, "type", "qp")) + goto err; + break; + case ZXDH_MEMREG_TYPE_CQ: + if (rdma_nl_put_driver_string(msg, "type", "cq")) + goto err; + break; + case ZXDH_MEMREG_TYPE_SRQ: + if (rdma_nl_put_driver_string(msg, "type", "srq")) + goto err; + break; + default: + goto err; + } + nla_nest_end(msg, table_attr); + return 0; + +err: + pr_err("res mr entry failed\n"); + nla_nest_cancel(msg, table_attr); + return -EMSGSIZE; +} + +static int fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ibmr) +{ + struct zxdh_mr *iwmr = to_iwmr(ibmr); + struct zxdh_device *iwdev = to_iwdev(ibmr->device); + struct zxdh_src_copy_dest src_dest = { 0 }; + struct zxdh_dma_mem qpc_buf = { 0 }; + int err_code = 0; + + qpc_buf.size = 64; + qpc_buf.va = dma_alloc_coherent(iwdev->rf->hw.device, qpc_buf.size, + &qpc_buf.pa, GFP_KERNEL); + if (!qpc_buf.va) { + pr_err("res mr entry raw failed:ENOMEM\n"); + return -ENOMEM; + } + if (iwmr->type != ZXDH_MEMREG_TYPE_MEM) { + err_code = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, 0, qpc_buf.va); + goto free_buff; + } + src_dest.src = 64 * (iwmr->stag >> ZXDH_CQPSQ_STAG_IDX_S); + src_dest.dest = qpc_buf.pa; + src_dest.len = qpc_buf.size; + err_code = zxdh_cqp_rdma_read_mrte_cmd(&iwdev->rf->sc_dev, &src_dest); + if (err_code) { + pr_err("res qp entry raw fill qpc failed:%d\n", err_code); + goto free_buff; + } + err_code = + nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, qpc_buf.size, qpc_buf.va); +free_buff: + dma_free_coherent(iwdev->rf->hw.device, qpc_buf.size, qpc_buf.va, + qpc_buf.pa); + qpc_buf.va = NULL; + return err_code; +} + +static int fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ibqp) +{ + struct zxdh_qp *iwqp = to_iwqp(ibqp); + struct zxdh_device *iwdev = iwqp->iwdev; + struct zxdh_dma_mem qpc_buf; + int err_code = 0; + + qpc_buf.va = NULL; + qpc_buf.size = ALIGN(ZXDH_QP_CTX_SIZE, ZXDH_QPC_ALIGNMENT); + qpc_buf.va = dma_alloc_coherent(iwdev->rf->hw.device, qpc_buf.size, + &qpc_buf.pa, GFP_KERNEL); + if (!qpc_buf.va) { + pr_err("res qp entry raw failed:ENOMEM\n"); + return -ENOMEM; + } + err_code = zxdh_fill_qpc(&iwdev->rf->sc_dev, iwqp->sc_qp.qp_ctx_num, &qpc_buf); + if (err_code) { + pr_err("res qp entry raw fill qpc failed:%d\n", err_code); + goto free_buff; + } + err_code = + nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, qpc_buf.size, qpc_buf.va); +free_buff: + dma_free_coherent(iwdev->rf->hw.device, qpc_buf.size, qpc_buf.va, + qpc_buf.pa); + qpc_buf.va = NULL; + return err_code; +} + +static int fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ibcq) +{ + struct zxdh_cq *iwcq = to_iwcq(ibcq); + struct zxdh_device *iwdev = to_iwdev(ibcq->device); + struct zxdh_dma_mem cqc_buf; + int err_code = 0; + + cqc_buf.va = NULL; + cqc_buf.size = ALIGN(ZXDH_CQ_CTX_SIZE, ZXDH_QPC_ALIGNMENT); + cqc_buf.va = dma_alloc_coherent(iwdev->rf->hw.device, cqc_buf.size, + &cqc_buf.pa, GFP_KERNEL); + if (!cqc_buf.va) { + pr_err("res cq entry raw failed:ENOMEM\n"); + return -ENOMEM; + } + err_code = zxdh_fill_cqc(&iwdev->rf->sc_dev, iwcq->sc_cq.cq_uk.cq_id, &cqc_buf); + if (err_code) { + pr_err("res cq entry raw fill cqc failed:%d\n", err_code); + goto free_buff; + } + err_code = + nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, cqc_buf.size, cqc_buf.va); +free_buff: + dma_free_coherent(iwdev->rf->hw.device, cqc_buf.size, cqc_buf.va, + cqc_buf.pa); + cqc_buf.va = NULL; + return err_code; +} + +static const struct ib_device_ops restrack_ops = { + .fill_res_cq_entry_raw = fill_res_cq_entry_raw, + .fill_res_mr_entry = fill_res_mr_entry, + .fill_res_qp_entry_raw = fill_res_qp_entry_raw, + .fill_res_mr_entry_raw = fill_res_mr_entry_raw, + +}; + +int zxdh_set_restrack_ops(struct ib_device *ibdev) +{ + ib_set_device_ops(ibdev, &restrack_ops); + return 0; +} +#endif diff --git a/drivers/infiniband/hw/zrdma/restrack.h b/drivers/infiniband/hw/zrdma/restrack.h new file mode 100644 index 000000000000..38439f2ef4fc --- /dev/null +++ b/drivers/infiniband/hw/zrdma/restrack.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_ZRDMA_H +#define ZXDH_ZRDMA_H + +#ifndef ZXDH_UAPI_DEF +#include +#endif + +int zxdh_set_restrack_ops(struct ib_device *ibdev); +#endif diff --git a/drivers/infiniband/hw/zrdma/slib.c b/drivers/infiniband/hw/zrdma/slib.c new file mode 100644 index 000000000000..bd205f804aad --- /dev/null +++ b/drivers/infiniband/hw/zrdma/slib.c @@ -0,0 +1,181 @@ +#include +#include "slib.h" + +void *zte_memcpy_s(void *dest, const void *src, size_t n) +{ + return memcpy(dest, src, n); +} + +char *zte_strncpy_s(char *dest, const char *src, size_t dest_size) +{ + if (!dest || !src || dest_size == 0) { + printk("error: dest=%p src=%p dest_size=%zd\n", dest, src, dest_size); + return NULL; + } + + return strncpy(dest, src, dest_size); +} + +void *zte_memset_s(void *s, int c, size_t n) +{ + return memset(s, c, n); +} + +int zte_snprintf_s(char *buf, size_t size, const char *format, ...) +{ + va_list args; + int i; + + if (buf == NULL || size == 0) { + return 0; + } + + va_start(args, format); + i = vsnprintf(buf, size, format, args); + va_end(args); + + return i; +} + +int zte_sprintf_s(char *buf, const char *format, ...) +{ + va_list args; + int i; + + if (buf == NULL) { + return 0; + } + + va_start(args, format); + i = vsprintf(buf, format, args); + va_end(args); + return i; +} + +int zte_sscanf_s(const char *buf, const char *format, ...) +{ + va_list args; + int i; + + va_start(args, format); + i = vsscanf(buf, format, args); + va_end(args); + + return i; +} + +size_t zte_strlen_s(const char *s) +{ + return strlen(s); +} + +char *zte_strncat_s(char *dest, const char *src, size_t n) +{ + return strncat(dest, src, n); +} + +/* 测试用例 */ +void test_zte_memcpy_s(void) +{ + char src[] = "Hello"; + char dest[10]; + size_t src_len = zte_strlen_s(src); + + if (sizeof(dest) <= src_len) { + printk("error: dest buffer is not enough\n"); + return; + } + + zte_memcpy_s(dest, src, zte_strlen_s(src) + 1); + dest[src_len] = '\0'; + if (strcmp(dest, src) == 0) { + printk("test_zte_memcpy_s success\n"); + } +} + +void test_zte_memset_s(void) +{ + char buf[10]; + zte_memset_s(buf, 'A', 5); + buf[5] = '\0'; + if (strcmp(buf, "AAAAA") == 0) { + printk("test_zte_memset_s success\n"); + } +} + +void test_zte_snprintf_s(void) +{ + char buf[20]; + int result = zte_snprintf_s(buf, sizeof(buf), "Number: %d", 123); + if (result > 0) { + if (strcmp(buf, "Number: 123") == 0) { + printk("test_zte_snprintf_s success\n"); + } + } +} + +void test_zte_sprintf_s(void) +{ + char buf[20]; + int result = zte_sprintf_s(buf, "Text: %s", "Test"); + if (result > 0) { + if (strcmp(buf, "Text: Test") == 0) { + printk("test_zte_sprintf_s success\n"); + } + } +} + +void test_zte_strlen_s(void) +{ + char str[] = "Length"; + size_t len = zte_strlen_s(str); + printk("test_zte_strlen_s success, len = %ld\n", len); +} + +void test_zte_strncat_s(void) +{ + char dest[20] = "Hello"; + char src[] = " World"; + zte_strncat_s(dest, src, sizeof(src)); + if (strcmp(dest, "Hello World") == 0) { + printk("test_zte_strncat success\n"); + } +} + +void test_zte_strncpy_s(void) +{ + char src[] = "World"; + char dest[10]; + unsigned int src_len; + + src_len = strlen(src); + zte_strncpy_s(dest, src, src_len + 1); + dest[src_len] = '\0'; + if (strcmp(dest, src) == 0) { + printk("test_zte_strncpy_s success\n"); + } +} + +void test_zte_sscanf_s(void) +{ + char input[] = "123"; + int value; + int result = zte_sscanf_s(input, "%d", &value); + if (result == 1 && value == 123) { + printk("test_zte_sscanf_s success\n"); + } +} + +void recording_not_safe_func(void) +{ + test_zte_memcpy_s(); + test_zte_memset_s(); + test_zte_snprintf_s(); + test_zte_sprintf_s(); + test_zte_strlen_s(); + test_zte_strncat_s(); + test_zte_strncpy_s(); + test_zte_sscanf_s(); + + printk("All tests passed!\n"); +} \ No newline at end of file diff --git a/drivers/infiniband/hw/zrdma/slib.h b/drivers/infiniband/hw/zrdma/slib.h new file mode 100644 index 000000000000..a65822a9fcc5 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/slib.h @@ -0,0 +1,35 @@ +#ifndef __SLIB_H__ +#define __SLIB_H__ +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +void *zte_memcpy_s(void *dest, const void *src, size_t n); +void *zte_memset_s(void *s, int c, size_t n); +int zte_snprintf_s(char *buf, size_t size, const char *format, ...) __attribute__((format(gnu_printf, 3, 4))); +int zte_sprintf_s(char *buf, const char *format, ...) __attribute__((format(gnu_printf, 2, 3))); +size_t zte_strlen_s(const char *s); +char *zte_strncat_s(char *dest, const char *src, size_t n); +char *zte_strncpy_s(char *dest, const char *src, size_t count); +int zte_sscanf_s(const char *buf, const char *format, ...) __attribute__((format(gnu_scanf, 2, 3))); +void recording_not_safe_func(void); + +/* 测试用例 */ +void test_zte_memcpy_s(void); +void test_zte_memset_s(void); +void test_zte_snprintf_s(void); +void test_zte_sprintf_s(void); +void test_zte_strlen_s(void); +void test_zte_strncat_s(void); +void test_zte_strncpy_s(void); +void test_zte_sscanf_s(void); +void recording_not_safe_func(void); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/infiniband/hw/zrdma/smmu/kernel/adk_mmu600.c b/drivers/infiniband/hw/zrdma/smmu/kernel/adk_mmu600.c new file mode 100644 index 000000000000..8e0f1136e587 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/smmu/kernel/adk_mmu600.c @@ -0,0 +1,313 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include +#include +#include +#include +#include + +#include "common_define.h" +#include "cmdk_mmu600.h" +#include "adk_mmu600.h" +#include "../../main.h" + +/* CMA page allocation size - 64MB total */ +#define ZXDH_CMA_PAGE_COUNT (16 * 1024) /* 16K pages * 4KB = 64MB */ + +/* SMMU page table size definitions */ +#define ZXDH_SMMU_L1_ENTRY_SIZE 0x100 /* 256 bytes per L1 entry */ +#define ZXDH_SMMU_L1_ALIGN_SIZE 0x100 /* 256 byte alignment */ +#define ZXDH_SMMU_L1_PT_NUM 32 /* 32 L1 page tables */ +#define ZXDH_SMMU_L1_PT_SIZE (ZXDH_SMMU_L1_PT_NUM * ZXDH_SMMU_L1_ENTRY_SIZE) + +#define ZXDH_SMMU_L2_ENTRY_SIZE 0x1000 /* 4KB per L2 entry */ +#define ZXDH_SMMU_L2_ALIGN_SIZE 0x1000 /* 4KB alignment */ +#define ZXDH_SMMU_L2_PT_NUM 32 /* 32 L2 page tables */ +#define ZXDH_SMMU_L2_PT_SIZE (ZXDH_SMMU_L2_PT_NUM * ZXDH_SMMU_L2_ENTRY_SIZE) + +#define ZXDH_SMMU_L3_ENTRY_SIZE 0x1000 /* 4KB per L3 entry */ +#define ZXDH_SMMU_L3_ALIGN_SIZE 0x1000 /* 4KB alignment */ +#define ZXDH_SMMU_L3_PT_NUM 0x3DE /* 990 L3 page tables */ +#define ZXDH_SMMU_L3_PT_SIZE (ZXDH_SMMU_L3_PT_NUM * ZXDH_SMMU_L3_ENTRY_SIZE) + +#define ZXDH_SMMU_PT_TOTAL_SIZE (ZXDH_SMMU_L1_PT_SIZE + \ + ZXDH_SMMU_L2_PT_SIZE + \ + ZXDH_SMMU_L3_PT_SIZE) + +/* L2D SMMU base physical address */ +#define ZXDH_PTE_L2D_START_PA 0x6200630000ULL + +/** + * zxdh_smmu_set_pte - Set page table entry + * @pte_req: PTE request configuration + * @dev: Device context + * + * Configure page table entry for the specified virtual-to-physical mapping. + * + * Return: 0 on success, negative error code on failure + */ +int zxdh_smmu_set_pte(struct smmu_pte_request *pte_req, struct zxdh_sc_dev *dev) +{ + if (!pte_req || !dev) + return -EINVAL; + + return zxdh_smmu_mmap(pte_req, dev); +} +EXPORT_SYMBOL(zxdh_smmu_set_pte); + +/** + * zxdh_smmu_enable_stream_bypass - Enable stream bypass mode + * @stream_id: Stream ID to configure + * + * Configure the specified stream to bypass SMMU translation. + * + * Return: 0 on success, negative error code on failure + */ +int zxdh_smmu_enable_stream_bypass(u32 stream_id) +{ + /* 需要向risc-v的smmu驱动发送命令,TODO */ + return 0; +} +EXPORT_SYMBOL(zxdh_smmu_enable_stream_bypass); + +/** + * zxdh_smmu_delete_pte - Delete a page table entry + * @stream_id: Stream ID + * @virt_addr: Virtual address + * @dev: Device context + * + * Delete the specified page table entry for the given stream and address. + * + * Return: 0 on success, negative error code on failure + */ +int zxdh_smmu_delete_pte(u32 stream_id, u64 virt_addr, struct zxdh_sc_dev *dev) +{ + return zxdh_smmu_cmd_tlb_sync(); +} + +/** + * zxdh_smmu_alloc_cma_memory - Allocate CMA memory for SMMU + * @dev: Device context + * + * Allocate coherent DMA memory for SMMU page tables using CMA. + * + * Return: 0 on success, negative error code on failure + */ +static int zxdh_smmu_alloc_cma_memory(struct zxdh_sc_dev *dev) +{ + struct smmu_pte_address *pte_addr = dev->pte_address; + struct device *device = dev->hw->device; + + pte_addr->cma_page_addr = dma_alloc_coherent(device, + ZXDH_SMMU_PT_TOTAL_SIZE, + &pte_addr->cma_page_mem_base_pa, + GFP_KERNEL); + if (!pte_addr->cma_page_addr) + return -ENOMEM; + + pte_addr->cma_page_mem_base_va = (u64)pte_addr->cma_page_addr; + + return 0; +} + +/** + * zxdh_smmu_pagetable_init - Initialize SMMU page tables + * @dev: Device context + * + * Initialize SMMU page table structures and allocate necessary memory. + * + * Return: 0 on success, negative error code on failure + */ +int zxdh_smmu_pagetable_init(struct zxdh_sc_dev *dev) +{ + /* Initialize page table parameters using new structure */ + struct smmu_pagetable_param pgt_param = {}; + int ret; + + if (!dev) + return -EINVAL; + + /* Send TLB invalidation command first */ + zxdh_smmu_invalidate_tlb(dev); + + /* Set L2D start physical address */ + dev->pte_l2d_startpa = dev->l2d_smmu_addr; + + /* Allocate PTE address structure */ + dev->pte_address = kzalloc(sizeof(*dev->pte_address), GFP_KERNEL); + if (!dev->pte_address) + return -ENOMEM; + + /* Allocate CMA memory for page tables */ + ret = zxdh_smmu_alloc_cma_memory(dev); + if (ret) + goto err_free_pte_addr; + + /* Initialize page table parameters using new structure */ + pgt_param.pagetable_phy_addr = dev->pte_address->cma_page_mem_base_pa; + pgt_param.pagetable_vir_addr = dev->pte_address->cma_page_mem_base_va; + pgt_param.pagetable_size = ZXDH_SMMU_PT_TOTAL_SIZE; + pgt_param.l1_pagetable_num = 32; + pgt_param.l2_pagetable_num = 32; + pgt_param.l3_pagetable_num = 990; + + /* Set L2D SMMU L2 offset */ + dev->pte_address->l2d_smmu_l2_offset = dev->l2d_smmu_l2_offset; + + /* Initialize SMMU structures */ + ret = zxdh_smmu_struct_init(&pgt_param, dev->pte_address, dev->hw->device); + if (ret) + goto err_free_cma_mem; + + return 0; + +err_free_cma_mem: + dma_free_coherent(dev->hw->device, ZXDH_SMMU_PT_TOTAL_SIZE, + (void *)dev->pte_address->cma_page_mem_base_va, + dev->pte_address->cma_page_mem_base_pa); +err_free_pte_addr: + kfree(dev->pte_address); + dev->pte_address = NULL; + return ret; +} + +/** + * zxdh_smmu_pagetable_exit - Cleanup SMMU page tables + * @dev: Device context + * + * Release all SMMU related memory and resources. + * + * Return: 0 on success, negative error code on failure + */ +int zxdh_smmu_pagetable_exit(struct zxdh_sc_dev *dev) +{ + struct smmu_pte_address *pte_addr = dev->pte_address; + struct device *device = dev->hw->device; + + if (!pte_addr) + return -EINVAL; + + /* Free CMA memory */ + if (pte_addr->cma_page_addr) { + dma_free_coherent(device, ZXDH_SMMU_PT_TOTAL_SIZE, + pte_addr->cma_page_addr, + pte_addr->cma_page_mem_base_pa); + } + + /* Free map management memory */ + if (pte_addr->map_manage_addr) + kfree((void *)pte_addr->map_manage_addr); + + /* Free PTE records */ + if (pte_addr->pte_records) + kfree(pte_addr->pte_records); + + /* Free temporary PTE buffer */ + if (pte_addr->pte_temp_vir_addr) { + dma_free_coherent(device, ZXDH_SMMU_L1_ENTRY_SIZE * 4, + (void *)pte_addr->pte_temp_vir_addr, + pte_addr->pte_temp_phy_addr); + } + + /* Free PTE address structure */ + kfree(dev->pte_address); + dev->pte_address = NULL; + + return 0; +} + +/** + * zxdh_smmu_invalidate_tlb - Invalidate SMMU TLB + * @dev: Device context + * + * Send TLB invalidation command to SMMU hardware. + * + * Return: 0 on success, negative error code on failure + */ +int zxdh_smmu_invalidate_tlb(struct zxdh_sc_dev *dev) +{ + int ret = 0; + u64 recv_buffer = 0; + u8 *reply_ptr = NULL; + u8 *risc_smmu_back_result = NULL; + u16 *risc_smmu_back_len = NULL; + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + struct smmu_msg_info smmu_info = { 0 }; + struct zxdh_pci_f *rf = dev_to_rf(dev); + struct zxdh_mgr mgr = { 0 }; + struct iidc_core_dev_info *cdev_info; + + ktime_t current_time; + ktime_t last_time; + ktime_t delta_ms; + u32 cnt = 0; + u32 cnt_num = ZXDH_BAR_MSG_RETRY_NUM; + + if (rf->sc_dev.driver_load == false) + cnt_num = ZXDH_BAR_MSG_DEFAULT_NUM; + + last_time = dev->last_time; + current_time = ktime_get_real(); + if (last_time != 0) { + delta_ms = ktime_ms_delta(current_time, last_time); + if (delta_ms < 100) { /* 100ms timeout */ + return 0; + } + } + dev->last_time = ktime_get_real(); + + cdev_info = (struct iidc_core_dev_info *)rf->cdev; + /* query pcie id */ + mgr.pdev = cdev_info->pdev; + ret = dh_rdma_pf_pcie_id_get(&mgr); + if (ret) { + pr_err("[%s] get pf pcie_id failed, ret=%d\n", __func__, ret); + return -EINVAL; + } + + result.recv_buffer = &recv_buffer; + result.buffer_len = sizeof(u64); + + smmu_info.is_tlb_invalid = 1; + smmu_info.tlb_cfg.cmd = 0x2; /* TLBI_NSNH_ALL command */ + + in.payload_addr = (uint8_t *)&smmu_info; + in.payload_len = sizeof(struct smmu_msg_info); + + in.src = MSG_CHAN_END_PF; + in.dst = MSG_CHAN_END_RISC; + in.virt_addr = (u64)dev->hw->pci_hw_addr + 0x2000; /* bar空间偏移 */ + + in.event_id = 0x5; /* SMMU event ID */ + in.src_pcieid = mgr.pcie_id; + + do { + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + if ((ret != ZXDH_BAR_ERR_TIME_OUT) && (ret != ZXDH_BAR_ERR_LOCK_FAILED)) { + break; + } + cnt++; + } while (cnt < cnt_num); + + if (ret != 0) { + pr_err("zxdh_bar_chan_sync_msg_send error, ret = %d cnt=%d\n", + ret, cnt); + } + + reply_ptr = (u8 *)result.recv_buffer; /* common 通道处理状态信息 */ + if (*reply_ptr == 0xFF) { + risc_smmu_back_result = (u8 *)(reply_ptr + 4); + risc_smmu_back_len = (u16 *)(reply_ptr + 1); + + pr_err("risc_back_result = 0x%x, risc_smmu_back_len = 0x%x\n", + *(u8 *)risc_smmu_back_result, + *(u8 *)risc_smmu_back_len); + } + + return 0; +} + +MODULE_AUTHOR("ZTE Corporation"); +MODULE_LICENSE("GPL v2"); \ No newline at end of file diff --git a/drivers/infiniband/hw/zrdma/smmu/kernel/adk_mmu600.h b/drivers/infiniband/hw/zrdma/smmu/kernel/adk_mmu600.h new file mode 100644 index 000000000000..282450d26c3e --- /dev/null +++ b/drivers/infiniband/hw/zrdma/smmu/kernel/adk_mmu600.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef _ZXDH_SMMU_H_ +#define _ZXDH_SMMU_H_ + +#include +#include +#include "cmdk_mmu600.h" + +/* Forward declarations */ +struct smmu_pagetable_param; +struct smmu_pte_request; +struct zxdh_sc_dev; +struct dentry; + +/* Maximum stream ID number */ +#define ZXDH_SMMU_MAX_STREAM_NUM 64 + +/* Page Table Entry Access Permission values */ +#define SMMU_PTE_AP_EL1_RW 0 /* EL1+ R/W, EL0 none */ +#define SMMU_PTE_AP_ALL_RW 1 /* R/W in all EL */ +#define SMMU_PTE_AP_EL1_RO 2 /* EL1+ RO, EL0 none */ +#define SMMU_PTE_AP_ALL_RO 3 /* RO in all EL */ + +/* Memory Attribute values */ +#define SMMU_PTE_MEMATTR_DEVICE 0 +#define SMMU_PTE_MEMATTR_NORMAL_WB_WA 1 +#define SMMU_PTE_MEMATTR_NON_CACHEABLE 2 + +/* Shareability values */ +#define SMMU_PAGETABLE_NON_SHAREABLE 0 /* Non-shareable */ +#define SMMU_PAGETABLE_OUTER_SHAREABLE 2 /* Outer shareable */ +#define SMMU_PAGETABLE_INNER_SHAREABLE 3 /* Inner shareable */ + +/* SMMU command and message definitions */ +#define ZXDH_SMMU_MSG_EVENT_ID 6 +#define SMMU_CMDQ_OP_TLBI_NSNH_ALL 0x30 +#define ZXDH_SMMU_INVALID_TLB_TIMEOUT_MS 15000 + +/** + * struct smmu_tlb_invalidate_cfg - TLB invalidation configuration + * @cmd: Command type + * @scale: Scale factor + * @num: Number of entries + * @tg: Translation granule + * @leaf: Leaf level + * @ttl: Translation table level + * @vmid: Virtual machine ID + * @asid: Address space ID + * @addr: Target address + */ +struct smmu_tlb_invalidate_cfg { + u32 cmd; + u32 scale; + u32 num; + u32 tg; + u32 leaf; + u32 ttl; + u32 vmid; + u32 asid; + u64 addr; +}; + +/** + * struct smmu_msg_info - SMMU message information + * @is_tlb_invalid: TLB invalidation flag + * @tlb_cfg: TLB invalidation configuration + */ +struct smmu_msg_info { + u32 is_tlb_invalid; + struct smmu_tlb_invalidate_cfg tlb_cfg; +}; + +/** + * struct smmu_pte_record - Page table entry record + * @valid: Entry validity + * @stream_id: Stream ID + * @virt_addr: Virtual address + * @phy_addr: Physical address + * @size: Size + */ +struct smmu_pte_record { + u32 valid; + u32 stream_id; + u64 virt_addr; + u64 phy_addr; + u64 size; +}; + +/* Structure definitions */ + +/** + * struct smmu_pte_address - SMMU page table address management + * @cma_page_mem_base_pa: CMA page memory base physical address + * @cma_page_mem_base_va: CMA page memory base virtual address + * @cma_mem_base_va_pte: CMA memory base virtual address for PTE + * @pagetable_cfg: Page table configuration + * @pagetable_vir_base_addr: Page table virtual base address + * @map_manage_addr: Map management address for L2/L3 + * @pte_records: PTE records array + * @cma_page_addr: CMA page address + * @pte_temp_vir_addr: Temporary PTE virtual address + * @pte_temp_phy_addr: Temporary PTE physical address + * @l1_pagetable_num: L1 page table number + * @l2_pagetable_num: L2 page table number + * @l3_pagetable_num: L3 page table number + * @pte_record_num: PTE record number + * @pte_fail_record_num: PTE fail record number + * @l2d_smmu_l2_offset: L2D SMMU L2 offset + */ +struct smmu_pte_address { + u64 cma_page_mem_base_pa; + u64 cma_page_mem_base_va; + u64 cma_mem_base_va_pte; + struct smmu_pagetable_param pagetable_cfg; + u64 pagetable_vir_base_addr; + u64 map_manage_addr; + struct smmu_pte_record *pte_records; + struct page *cma_page_addr; + u64 pte_temp_vir_addr; + u64 pte_temp_phy_addr; + u32 l1_pagetable_num; + u32 l2_pagetable_num; + u32 l3_pagetable_num; + u32 pte_record_num; + u32 pte_fail_record_num; + struct dentry *dbg_dentry; + u32 l2d_smmu_l2_offset; +}; + +/* Function prototypes */ +int zxdh_smmu_pagetable_init(struct zxdh_sc_dev *dev); +int zxdh_smmu_pagetable_exit(struct zxdh_sc_dev *dev); +int zxdh_smmu_enable_stream_stage2(u32 stream_id); +int zxdh_smmu_enable_stream_bypass(u32 stream_id); + +int zxdh_smmu_set_pte(struct smmu_pte_request *pte_req, + struct zxdh_sc_dev *dev); +int zxdh_smmu_delete_pte(u32 stream_id, u64 virt_addr, + struct zxdh_sc_dev *dev); + +int zxdh_smmu_invalidate_tlb(struct zxdh_sc_dev *dev); + +#endif /* _ZXDH_SMMU_H_ */ \ No newline at end of file diff --git a/drivers/infiniband/hw/zrdma/smmu/kernel/cmdk.h b/drivers/infiniband/hw/zrdma/smmu/kernel/cmdk.h new file mode 100644 index 000000000000..b0bd7b6c32dd --- /dev/null +++ b/drivers/infiniband/hw/zrdma/smmu/kernel/cmdk.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +/** + * @file cmdk_mmu600.h + * @brief SMMU MMU600 command definitions and structures + */ + +#ifndef CMDK_MMU600_H +#define CMDK_MMU600_H + +#include +#include + +/* Forward declarations */ +struct smmu_pte_address; +struct zxdh_sc_dev; + +/* SMMU structures following Linux kernel naming conventions */ +struct smmu_pagetable_param { + u64 pagetable_phy_addr; + u64 pagetable_vir_addr; + u32 pagetable_size; + u64 ex_pagetable_phy_addr; + u32 ex_pagetable_size; + u32 l1_pagetable_num; + u32 l2_pagetable_num; + u32 l3_pagetable_num; +}; + +struct smmu_pte_request { + u32 stream_id; + u64 phy_addr; + u64 vir_addr; + u64 size; + u32 access_perm; + u32 mem_attr; + u32 shareability; +}; + +/* Function prototypes */ +u32 uswap_32(u32 v); +u64 uswap_64(u64 v); +u32 memset_8byte(u64 *p, u64 data, u64 size); +u32 zxdh_smmu_cmd_tlb_sync(void); +u32 zxdh_smmu_set_print_level(u32 print_level); +u8 zxdh_smmu_get_print_level(void); +u32 zxdh_smmu_struct_init(const struct smmu_pagetable_param *pgt_param, + struct smmu_pte_address *pte_address, + struct device *dmadev); + +u32 zxdh_smmu_mmap(struct smmu_pte_request *pte_request, struct zxdh_sc_dev *dev); + +#endif /* CMDK_MMU600_H */ \ No newline at end of file diff --git a/drivers/infiniband/hw/zrdma/smmu/kernel/cmdk_mmu600.c b/drivers/infiniband/hw/zrdma/smmu/kernel/cmdk_mmu600.c new file mode 100644 index 000000000000..d6415affc657 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/smmu/kernel/cmdk_mmu600.c @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include +#include +#include + +#include "cmdk_mmu600.h" +#include "common_define.h" + +u8 smmu_print_module_id; +EXPORT_SYMBOL(smmu_print_module_id); + +/** + * uswap_32 - Swap bytes in 32-bit value + * @v: Value to swap + * + * Return: Byte-swapped value + */ +u32 uswap_32(u32 v) +{ + return ((v & 0x000000ff) << 24) | ((v & 0x0000ff00) << 8) | + ((v & 0x00ff0000) >> 8) | ((v & 0xff000000) >> 24); +} +EXPORT_SYMBOL(uswap_32); + +/** + * uswap_64 - Swap bytes in 64-bit value + * @v: Value to swap + * + * Return: Byte-swapped value + */ +u64 uswap_64(u64 v) +{ + return ((u64)uswap_32((u32)v) << 32) | uswap_32((u32)(v >> 32)); +} +EXPORT_SYMBOL(uswap_64); + +/** + * memset_8byte - Set memory with 64-bit pattern + * @p: Pointer to memory + * @data: 64-bit pattern to set + * @size: Size in bytes + * + * Return: 0 on success + */ +u32 memset_8byte(u64 *p, u64 data, u64 size) +{ + u64 count = size / 8; + u64 i; + + for (i = 0; i < count; i++) + p[i] = data; + + return 0; +} +EXPORT_SYMBOL(memset_8byte); + +/** + * zxdh_smmu_cmd_tlb_sync - Synchronize TLB + * + * Return: 0 on success + */ +u32 zxdh_smmu_cmd_tlb_sync(void) +{ + /* TODO: Implement TLB sync command */ + return 0; +} +EXPORT_SYMBOL(zxdh_smmu_cmd_tlb_sync); + +/** + * zxdh_smmu_set_print_level - Set SMMU print level + * @print_level: Print level to set + * + * Return: 0 on success + */ +u32 zxdh_smmu_set_print_level(u32 print_level) +{ + smmu_print_module_id = print_level; + return 0; +} +EXPORT_SYMBOL(zxdh_smmu_set_print_level); + +/** + * zxdh_smmu_get_print_level - Get current SMMU print level + * + * Return: Current print level + */ +u8 zxdh_smmu_get_print_level(void) +{ + return smmu_print_module_id; +} +EXPORT_SYMBOL(zxdh_smmu_get_print_level); + +MODULE_AUTHOR("ZTE Corporation"); +MODULE_LICENSE("GPL v2"); \ No newline at end of file diff --git a/drivers/infiniband/hw/zrdma/smmu/kernel/cmdk_mmu600.h b/drivers/infiniband/hw/zrdma/smmu/kernel/cmdk_mmu600.h new file mode 100644 index 000000000000..b0bd7b6c32dd --- /dev/null +++ b/drivers/infiniband/hw/zrdma/smmu/kernel/cmdk_mmu600.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +/** + * @file cmdk_mmu600.h + * @brief SMMU MMU600 command definitions and structures + */ + +#ifndef CMDK_MMU600_H +#define CMDK_MMU600_H + +#include +#include + +/* Forward declarations */ +struct smmu_pte_address; +struct zxdh_sc_dev; + +/* SMMU structures following Linux kernel naming conventions */ +struct smmu_pagetable_param { + u64 pagetable_phy_addr; + u64 pagetable_vir_addr; + u32 pagetable_size; + u64 ex_pagetable_phy_addr; + u32 ex_pagetable_size; + u32 l1_pagetable_num; + u32 l2_pagetable_num; + u32 l3_pagetable_num; +}; + +struct smmu_pte_request { + u32 stream_id; + u64 phy_addr; + u64 vir_addr; + u64 size; + u32 access_perm; + u32 mem_attr; + u32 shareability; +}; + +/* Function prototypes */ +u32 uswap_32(u32 v); +u64 uswap_64(u64 v); +u32 memset_8byte(u64 *p, u64 data, u64 size); +u32 zxdh_smmu_cmd_tlb_sync(void); +u32 zxdh_smmu_set_print_level(u32 print_level); +u8 zxdh_smmu_get_print_level(void); +u32 zxdh_smmu_struct_init(const struct smmu_pagetable_param *pgt_param, + struct smmu_pte_address *pte_address, + struct device *dmadev); + +u32 zxdh_smmu_mmap(struct smmu_pte_request *pte_request, struct zxdh_sc_dev *dev); + +#endif /* CMDK_MMU600_H */ \ No newline at end of file diff --git a/drivers/infiniband/hw/zrdma/smmu/kernel/cmdk_mmu600_inner.h b/drivers/infiniband/hw/zrdma/smmu/kernel/cmdk_mmu600_inner.h new file mode 100644 index 000000000000..c75baed6abea --- /dev/null +++ b/drivers/infiniband/hw/zrdma/smmu/kernel/cmdk_mmu600_inner.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef _CMDK_MMU600_INNER_H_ +#define _CMDK_MMU600_INNER_H_ + +#include "cmdk.h" +/************************************************************************** + * Macro * + **************************************************************************/ +/* TTFmt value */ +#define PAGE_FORMAT_V8 (1) +#define PAGE_FORMAT_V7_LPAE (0) + +/* udEndian value */ +#define SMMU_TT_BIGENDIAN (1) +#define SMMU_TT_LITTLEENDIAN (0) + +// TG +#define SMMU_CD_TG0_4K (0x0) +#define SMMU_CD_TG0_16K (0x2) +#define SMMU_CD_TG0_64K (0x1) +// ips +#define SMMU_CD_IPS_32 (0x0) +#define SMMU_CD_IPS_36 (0x1) +#define SMMU_CD_IPS_40 (0x2) +#define SMMU_CD_IPS_42 (0x3) +#define SMMU_CD_IPS_44 (0x4) +#define SMMU_CD_IPS_48 (0x5) + +// manage L2/L3 pte +struct t_Map_Manage { + u64 uddMaskedVa; /* L2 mask 2M, L3 mask 4k */ + u64 uddTTBaseAddr; /* Translation table base address */ + u32 udMapValid; /* map udValid */ + u32 udSteamIndex; /* belongs to stream */ +}; +#endif diff --git a/drivers/infiniband/hw/zrdma/smmu/kernel/cmdk_pagetable.c b/drivers/infiniband/hw/zrdma/smmu/kernel/cmdk_pagetable.c new file mode 100644 index 000000000000..581ea8bff785 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/smmu/kernel/cmdk_pagetable.c @@ -0,0 +1,1207 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include "common_define.h" +#include +#include +#include +#include +#include "hal_smmu.h" +#include "cmdk_mmu600.h" +#include "cmdk_mmu600_inner.h" +#include "pub_print.h" +#include "adk_mmu600.h" +#include "../../main.h" + +/* Define missing macros for backward compatibility */ +#define MAX_PTE_RECORDS_NUM (2000) + +/* SMMU page table definitions */ +#define SMMU_L1_PER_PT_SIZE 0x100 +#define SMMU_L1_PT_ALIGN_SIZE 0x100 +#define SMMU_L1_PT_NUM 32 +#define SMMU_L1_PT_SIZE (SMMU_L1_PT_NUM * SMMU_L1_PER_PT_SIZE) + +#define SMMU_L2_PER_PT_SIZE 0x1000 +#define SMMU_L2_PT_ALIGN_SIZE 0x1000 +#define SMMU_L2_PT_NUM 32 +#define SMMU_L2_PT_SIZE (SMMU_L2_PT_NUM * SMMU_L2_PER_PT_SIZE) + +#define SMMU_L3_PER_PT_SIZE 0x1000 +#define SMMU_L3_PT_ALIGN_SIZE 0x1000 +#define SMMU_L3_PT_NUM 0x3DE +#define SMMU_L3_PT_SIZE (SMMU_L3_PT_NUM * SMMU_L3_PER_PT_SIZE) + +#define SMMU_PT_TOTAL (SMMU_L1_PT_SIZE + SMMU_L2_PT_SIZE + SMMU_L3_PT_SIZE) + +#define PTE_L2D_START_PA 0x6200630000ULL + +/* Page table masks */ +#define PAGE_MASK_4K 0xfffffffff000ULL +#define PAGE_MASK_2M 0xffffffe00000ULL +#define PAGE_MASK_1G 0xffffc0000000ULL + +#define REV_PAGE_MASK_4K 0x0000000fffULL +#define REV_PAGE_MASK_2M 0x00001fffffULL +#define REV_PAGE_MASK_1G 0x003fffffffULL + +/* Page size constants */ +#define PAGE_SIZE_4K 0x1000 +#define PAGE_SIZE_2M 0x200000 +#define PAGE_SIZE_1G 0x40000000 + +/* Page size compatibility */ +#define SMMU_PAGETABLE_PAGESIZE_1G SMMU_PAGETABLE_PAGESIZE_1GB + +/* Page table descriptor types */ +#define L1_LONG_DESCRIPTOR_FOR_BLOCK 0x1 +#define L1_LONG_DESCRIPTOR_FOR_TABLE 0x3 +#define L2_LONG_DESCRIPTOR_FOR_BLOCK 0x1 +#define L2_LONG_DESCRIPTOR_FOR_TABLE 0x3 +#define L3_LONG_DESCRIPTOR_FOR_PAGE 0x3 + +/* Page table descriptor masks and positions */ +#define L1_LONG_DESCRIPTOR_BLOCK_PA_MASK 0xFFFFC0000000ULL +#define L1_LONG_DESCRIPTOR_BLOCK_XN_POS 54 +#define L1_LONG_DESCRIPTOR_BLOCK_XN_MASK (1ULL << 54) +#define L1_LONG_DESCRIPTOR_BLOCK_S2AP_POS 6 +#define L1_LONG_DESCRIPTOR_BLOCK_S2AP_MASK (3ULL << 6) +#define L1_LONG_DESCRIPTOR_BLOCK_AF_POS 10 +#define L1_LONG_DESCRIPTOR_BLOCK_AF_MASK (1ULL << 10) +#define L1_LONG_DESCRIPTOR_BLOCK_SH1SH0_POS 8 +#define L1_LONG_DESCRIPTOR_BLOCK_SH1SH0_MASK (3ULL << 8) +#define L1_LONG_DESCRIPTOR_BLOCK_MEMATTR_POS 2 +#define L1_LONG_DESCRIPTOR_BLOCK_MEMATTR_MASK (0xFULL << 2) +#define L1_LONG_DESCRIPTOR_TABLE_PA_MASK 0xFFFFFFFFF000ULL + +#define L2_LONG_DESCRIPTOR_BLOCK_PA_MASK 0xFFFFFFE00000ULL +#define L2_LONG_DESCRIPTOR_BLOCK_XN_POS 54 +#define L2_LONG_DESCRIPTOR_BLOCK_XN_MASK (1ULL << 54) +#define L2_LONG_DESCRIPTOR_BLOCK_S2AP_POS 6 +#define L2_LONG_DESCRIPTOR_BLOCK_S2AP_MASK (3ULL << 6) +#define L2_LONG_DESCRIPTOR_BLOCK_AF_POS 10 +#define L2_LONG_DESCRIPTOR_BLOCK_AF_MASK (1ULL << 10) +#define L2_LONG_DESCRIPTOR_BLOCK_SH1SH0_POS 8 +#define L2_LONG_DESCRIPTOR_BLOCK_SH1SH0_MASK (3ULL << 8) +#define L2_LONG_DESCRIPTOR_BLOCK_MEMATTR_POS 2 +#define L2_LONG_DESCRIPTOR_BLOCK_MEMATTR_MASK (0xFULL << 2) +#define L2_LONG_DESCRIPTOR_TABLE_PA_MASK 0xFFFFFFFFF000ULL + +#define L3_LONG_DESCRIPTOR_BLOCK_PA_MASK 0xFFFFFFFFF000ULL +#define L3_LONG_DESCRIPTOR_BLOCK_XN_POS 54 +#define L3_LONG_DESCRIPTOR_BLOCK_XN_MASK (1ULL << 54) +#define L3_LONG_DESCRIPTOR_BLOCK_S2AP_POS 6 +#define L3_LONG_DESCRIPTOR_BLOCK_S2AP_MASK (3ULL << 6) +#define L3_LONG_DESCRIPTOR_BLOCK_AF_POS 10 +#define L3_LONG_DESCRIPTOR_BLOCK_AF_MASK (1ULL << 10) +#define L3_LONG_DESCRIPTOR_BLOCK_SH1SH0_POS 8 +#define L3_LONG_DESCRIPTOR_BLOCK_SH1SH0_MASK (3ULL << 8) +#define L3_LONG_DESCRIPTOR_BLOCK_MEMATTR_POS 2 +#define L3_LONG_DESCRIPTOR_BLOCK_MEMATTR_MASK (0xFULL << 2) + +/* RACFG and WACFG related macros */ +#define LONG_DESCRIPTOR_RACFG_POS 59 +#define LONG_DESCRIPTOR_RACFG_MASK (7ULL << 59) +#define LONG_DESCRIPTOR_WACFG_POS 56 +#define LONG_DESCRIPTOR_WACFG_MASK (7ULL << 56) + +/* Missing macro definitions for compatibility */ +#define MEMSET(ptr, size, val, len) memset(ptr, val, len) +#define MEMCPY(dst, size, src, len) memcpy(dst, src, len) +#define SMMU_POINTER_CHECK(ptr) \ + do { \ + if (!(ptr)) \ + return -EINVAL; \ + } while (0) + +/* Map manage structure sizes using existing definition */ +#define SMMU_L2_MAP_MANAGE_SIZE (SMMU_L2_PT_NUM * sizeof(struct t_Map_Manage)) +#define SMMU_L3_MAP_MANAGE_SIZE (SMMU_L3_PT_NUM * sizeof(struct t_Map_Manage)) + +/* TTB management structure */ +struct smmu_ttb_manage { + u32 stream_id; + u32 valid; + u64 phy_ttb; +}; + +/* Forward declarations */ +static u64 zxdh_smmu_host_pa_to_l2d_pa(u64 host_pa, struct zxdh_sc_dev *dev); +u32 zxdh_smmu_show_pagetable_info(struct smmu_pte_address *pte_address); +u32 zxdh_smmu_show_pte_record(u32 stream_id, u64 virt_addr, + struct smmu_pte_address *pte_address); + +/* Static variables for page table management */ +static u32 s_udV8NumL3Pta __maybe_unused; +static struct smmu_ttb_manage *g_ptTtbMng; + +/** + * zxdh_smmu_get_ttb - Get translation table base address + * @sid: Stream ID + * @pte_address: PTE address management structure + * + * Return: TTB physical address + */ +static u64 zxdh_smmu_get_ttb(u32 sid, struct smmu_pte_address *pte_address) +{ + return pte_address->cma_page_mem_base_pa + sid * SMMU_L1_PER_PT_SIZE; +} + +/** + * zxdh_smmu_get_pte_size - Get PTE size based on request + * @request_va: Requested virtual address + * @request_size: Requested size + * @ppte_size: Output PTE size + * + * Determine the optimal page table entry size (4K, 2M, or 1G blocks). + * + * Return: 0 on success, negative error code on failure + */ +static u32 zxdh_smmu_get_pte_size(u64 request_va, u64 request_size, u32 *ppte_size) +{ + /* Check for 1G block alignment and size */ + if (((request_va & REV_PAGE_MASK_1G) == 0) && + (request_size >= PAGE_SIZE_1G)) { + *ppte_size = PAGE_SIZE_1G; + return 0; + } + + /* Check for 2M block alignment and size */ + if (((request_va & REV_PAGE_MASK_2M) == 0) && + (request_size >= PAGE_SIZE_2M)) { + *ppte_size = PAGE_SIZE_2M; + return 0; + } + + /* Default to 4K pages */ + *ppte_size = PAGE_SIZE_4K; + return 0; +} + +/* 把传下来的配置信息填入到 struct smmu_pte_cfg *tlb_entry_cfg 中 */ +static u32 zxdh_smmu_request_to_pte_cfg(const u32 pte_size, + const struct smmu_pte_request *pte_request, + struct smmu_pte_cfg *tlb_entry_cfg) +{ + u64 request_phy_addr = 0; + + /* param check */ + SMMU_POINTER_CHECK(tlb_entry_cfg); + SMMU_POINTER_CHECK(pte_request); + + request_phy_addr = pte_request->phy_addr; + + tlb_entry_cfg->execute_never = SMMU_PAGETABLE_EXECUTE; + tlb_entry_cfg->shareable = pte_request->shareability; + tlb_entry_cfg->access_permission = pte_request->access_perm; + tlb_entry_cfg->memory_attribute = pte_request->mem_attr; + + /* 默认设为0 */ + tlb_entry_cfg->read_allocate_cfg = 0; + tlb_entry_cfg->write_allocate_cfg = 0; + if (READ_NOALLOCATE == + (READ_NOALLOCATE & tlb_entry_cfg->memory_attribute)) { + tlb_entry_cfg->read_allocate_cfg = 3; + } + if (WRITE_NOALLOCATE == + (WRITE_NOALLOCATE & tlb_entry_cfg->memory_attribute)) { + tlb_entry_cfg->write_allocate_cfg = 3; + } + + switch (pte_size) { + case PAGE_SIZE_4K: { + tlb_entry_cfg->pa_base_addr = request_phy_addr & PAGE_MASK_4K; + tlb_entry_cfg->page_type = SMMU_PAGETABLE_PAGESIZE_4KB; /* */ + break; + } + case PAGE_SIZE_2M: { + tlb_entry_cfg->pa_base_addr = request_phy_addr & PAGE_MASK_2M; + tlb_entry_cfg->page_type = SMMU_PAGETABLE_PAGESIZE_2MB; /* */ + break; + } + case PAGE_SIZE_1G: { + tlb_entry_cfg->pa_base_addr = request_phy_addr & PAGE_MASK_1G; + tlb_entry_cfg->page_type = SMMU_PAGETABLE_PAGESIZE_1G; /* */ + break; + } + default: /* 默认按4k处理 */ + { + tlb_entry_cfg->pa_base_addr = request_phy_addr & PAGE_MASK_4K; + tlb_entry_cfg->page_type = SMMU_PAGETABLE_PAGESIZE_4KB; /* */ + break; + } + } + + return 0; +} + +static u64 zxdh_smmu_sram_pagetable_v2p(u64 virt_addr, + struct smmu_pte_address *pte_address) +{ + u64 phy_addr = 0; + + if ((pte_address->pagetable_vir_base_addr == 0) || + (pte_address->pagetable_cfg.pagetable_phy_addr == 0)) { + return -1; + } + + phy_addr = pte_address->pagetable_cfg.pagetable_phy_addr + virt_addr - + pte_address->pagetable_vir_base_addr; + + return phy_addr; +} + +static u64 zxdh_smmu_sram_pagetable_p2v(u64 phy_addr, + struct smmu_pte_address *pte_address) +{ + u64 virt_addr = 0; + + if ((pte_address->pagetable_vir_base_addr == 0) || + (pte_address->pagetable_cfg.pagetable_phy_addr == 0)) { + return -1; + } + + virt_addr = pte_address->pagetable_vir_base_addr + phy_addr - + pte_address->pagetable_cfg.pagetable_phy_addr; + + return virt_addr; +} + +/************************************************************************** + * 函数名称: zxdh_smmu_get_l1_page_base_addr + * 功能描述: 获取L1 pte base address + * 输入参数: + * u64 uddPgTblAddr : sid对应的页表基地址 + * u64 virt_addr : VA + * 输出参数: + * 返 回 值:L1 PTE 地址 + * 其它说明: + * + * 修改日期 版本号 修改人 + * ----------------------------------------------- + * 2023/05/29 V1.0 guoll + ***************************************************************************/ +static u64 zxdh_smmu_get_l1_descriptor_va(u64 udd_l1_ttb_va, u64 request_va) +{ + return (udd_l1_ttb_va + ((request_va & 0xffc0000000ULL) >> 27)); +} + +/************************************************************************** + * 函数名称: zxdh_smmu_get_l1_page_base_addr + * 功能描述: 获取L2 pte base address,即获取L2 descriptor + * 输入参数: + * u64 uddPgTblAddr : sid对应的页表基地址 + * u64 request_va : VA + * 输出参数: + * 返 回 值:L1 PTE 地址 (VA) + * 其它说明: + * + * 修改日期 版本号 修改人 + * ----------------------------------------------- + * 2023/05/29 V1.0 guoll + ***************************************************************************/ +static u64 +zxdh_smmu_get_l2_descriptor_va(struct zxdh_sc_dev *dev, u32 sid, u64 request_va, + struct smmu_pte_address *pte_address) +{ + u32 i = 0; + u64 level_mask = 0; + u32 level_offset = 0; + u64 l2_nth_ttb_va = 0; + u64 udd_l2_start_ttb_va = 0; + struct t_Map_Manage *l2_map_manage = NULL; + u32 *used_l2_ttb_num = NULL; + /* 记录 l2 已申请使用的 ttb 数量 */ + + /* check param */ + SMMU_POINTER_CHECK(pte_address); + SMMU_POINTER_CHECK(pte_address->map_manage_addr); + + // 1G-1: 11 1111 1111 1111 1111 1111 1111 1111 + // 2M-1: 1 1111 1111 1111 1111 1111 + // ~(2M-1): 11 1111 1110 0000 0000 0000 0000 0000 + level_mask = 0x3fe00000ull; /* (1G-1)&(~(2M-1)) */ + level_offset = 18; /* div by 2M, mul 8 */ + + /* l2 map manage struct */ + l2_map_manage = + (struct t_Map_Manage *)(pte_address->map_manage_addr); + /* l2 start ttb 的内存起始地址 */ + udd_l2_start_ttb_va = pte_address->pagetable_vir_base_addr + + SMMU_L1_PT_SIZE + + pte_address->l2d_smmu_l2_offset; + + used_l2_ttb_num = &dev->s_udV8NumL2Pta; + + // 先在已用的L2页表中找,是否在已存在的页表中,如果有,就不用再申请新的页表了,直接返回对应页表项的地址 + // L1 的每一个页表项能够映射1G的空间 + /* if the 1G which this va corresponds to has been allocated, find the existing address */ + for (i = 0; i < *used_l2_ttb_num; i++) { + if (((request_va & PAGE_MASK_1G) == + l2_map_manage[i].uddMaskedVa) && + l2_map_manage[i].udMapValid && + (sid == l2_map_manage[i].udSteamIndex)) { + break; + } + } + + /* if not, allocate 4K space used for L2 page table for this 1G */ + if (i == *used_l2_ttb_num) { + /* 使用一个新的 L2 ttb */ + if (*used_l2_ttb_num < SMMU_L2_PT_NUM) { + /* 得到第 n 个L2页表的起始地址 即得到该1G对应的2M页表的基地址 */ + l2_nth_ttb_va = + udd_l2_start_ttb_va + i * SMMU_L2_PER_PT_SIZE; + } else { + return 0; + } + + l2_map_manage[i].udMapValid = 1; + l2_map_manage[i].udSteamIndex = sid; + l2_map_manage[i].uddTTBaseAddr = l2_nth_ttb_va; + l2_map_manage[i].uddMaskedVa = request_va & PAGE_MASK_1G; + + *used_l2_ttb_num += 1; + pte_address->l2_pagetable_num = *used_l2_ttb_num; + } + + /* 返回第 n 张 l2 ttb 的 pte base address,即获取 l2 descriptor */ + return (l2_map_manage[i].uddTTBaseAddr + + (u64)((request_va & level_mask) >> level_offset)); +} + +static u64 zxdh_smmu_get_l3_descriptor(u32 sid, u64 request_va, + struct smmu_pte_address *pte_address) +{ + u32 i = 0; + u64 level_mask = 0; + u32 level_offset = 0; + u64 l3_nth_ttb_va = 0; + u64 udd_l3_start_ttb_va = 0; + struct t_Map_Manage *l3_manage_map = NULL; + u32 *used_l3_ttb_num = NULL; + /* 记录 l3 已申请使用的 ttb 数量 */ + static u32 s_udV8NumL3Pta; + + /* check param */ + SMMU_POINTER_CHECK(pte_address); + SMMU_POINTER_CHECK(pte_address->map_manage_addr); + + // 2M-1: 1 1111 1111 1111 1111 1111 + // 4K-1: 1111 1111 1111 + //~(4K-1): 1 1111 1111 0000 0000 0000 + level_mask = 0x001ff000ull; /* (2M-1)&(~(4K-1)) */ + level_offset = 9; /* div 4K, mul 8 */ + + /* l3 map manage struct */ + l3_manage_map = + (struct t_Map_Manage *)(pte_address->map_manage_addr + + SMMU_L2_MAP_MANAGE_SIZE); + /* l3 start ttb 的内存起始地址 */ + udd_l3_start_ttb_va = pte_address->pagetable_vir_base_addr + + SMMU_L1_PT_SIZE + SMMU_L2_PT_SIZE; + + used_l3_ttb_num = &s_udV8NumL3Pta; + + /* the same logic as get L2 */ + for (i = 0; i < *used_l3_ttb_num; i++) { + // 此 L3 页表(每块4K)是给哪个 L2 的 2M 使用的 + if (((request_va & PAGE_MASK_2M) == + l3_manage_map[i].uddMaskedVa) && + l3_manage_map[i].udMapValid && + (sid == l3_manage_map[i].udSteamIndex)) { + break; + } + } + + if (i == *used_l3_ttb_num) { + /* 使用一个新的 l3 ttb */ + if (*used_l3_ttb_num < SMMU_L3_PT_NUM) { + /* 得到第 n 个 l3 页表的起始地址 即得到该 2M 对应的 4k 页表的基地址 */ + l3_nth_ttb_va = + udd_l3_start_ttb_va + i * SMMU_L3_PER_PT_SIZE; + } else { + return 0; + } + + l3_manage_map[i].udMapValid = 1; + l3_manage_map[i].udSteamIndex = sid; + l3_manage_map[i].uddTTBaseAddr = l3_nth_ttb_va; + l3_manage_map[i].uddMaskedVa = request_va & PAGE_MASK_2M; + + *used_l3_ttb_num += 1; + pte_address->l3_pagetable_num = *used_l3_ttb_num; + } + /* 返回第 n 张 l3 ttb 的 pte base address,即获取 l3 descriptor */ + return (l3_manage_map[i].uddTTBaseAddr + + (u64)((request_va & level_mask) >> level_offset)); +} + +/************************************************************************** + * 函数名称: zxdh_smmu_host_pa_to_l2d_pa + * 功能描述: + * 根据偏移,转换成risc_v l2d 上的 pa + * 输入参数: + * host_pa : host上的pa + * dev + * 输出参数: + * 返 回 值: + * 其它说明: + * + * 修改日期 版本号 修改人 + * ----------------------------------------------- + * 2023/05/29 V1.0 guoll + ***************************************************************************/ +u64 zxdh_smmu_host_pa_to_l2d_pa(u64 host_pa, struct zxdh_sc_dev *dev) +{ + u64 udd_offset = 0; + u64 udd_l2d_pa = 0; + + /* check param */ + SMMU_POINTER_CHECK(dev); + SMMU_POINTER_CHECK(dev->pte_address); + SMMU_POINTER_CHECK(dev->pte_address->cma_page_mem_base_pa); + + if (host_pa < dev->pte_address->cma_page_mem_base_pa) + return -1; + + udd_offset = host_pa - dev->pte_address->cma_page_mem_base_pa; + udd_l2d_pa = dev->pte_l2d_startpa + udd_offset; + return udd_l2d_pa; +} + +/************************************************************************** + * 函数名称: zxdh_smmu_write_l1_page_table_entry + * 功能描述: 配置 L1 PTE表项 + * 如果是块类型页表项: + * 配置最终的输出地址的高位; + * 配置高位属性 + * 配置低位属性 + * 如果是页表类型的页表项: + * 配置下一级页表的地址; + * 配置为页表类型的页表项 + * 输入参数: + * + * 输出参数: + * 返 回 值: + * 其它说明: + * + * 修改日期 版本号 修改人 + * ----------------------------------------------- + * 2023/05/29 V1.0 guoll + ***************************************************************************/ +static u32 zxdh_smmu_write_l1_pagetable_entry( + const u64 udd_l1_descriptor_va, + const struct smmu_pte_cfg *const mmu_pte_cfg, + struct zxdh_sc_dev *dev) +{ + u64 udd_l2d_pa = 0; + u64 physical_address = 0; + u64 l1_pte_offset = 0; + u64 *l1_desc_vaddr = NULL; + u64 *tmp_desc_vaddr = NULL; + u64 udd_tmp_l1_descriptor_value = 0; + u64 udd_l2d_tmp_l1_descriptor_value = 0; + + struct zxdh_src_copy_dest copy_dest = {}; + + /* check param */ + SMMU_POINTER_CHECK(dev); + SMMU_POINTER_CHECK(mmu_pte_cfg); + + if (mmu_pte_cfg->page_format != PAGE_FORMAT_V8) + return -1; + + /* pte base address */ + l1_desc_vaddr = (u64 *)udd_l1_descriptor_va; + *l1_desc_vaddr = 0; + + /* physical block base address or next level page table address */ + physical_address = mmu_pte_cfg->pa_base_addr; + + /* block descriptor */ + if (mmu_pte_cfg->page_type == SMMU_PAGETABLE_PAGESIZE_1G) { + udd_tmp_l1_descriptor_value = + ((physical_address & + L1_LONG_DESCRIPTOR_BLOCK_PA_MASK) | + ((mmu_pte_cfg->execute_never + << L1_LONG_DESCRIPTOR_BLOCK_XN_POS) & + L1_LONG_DESCRIPTOR_BLOCK_XN_MASK) | + (((mmu_pte_cfg->access_permission) + << L1_LONG_DESCRIPTOR_BLOCK_S2AP_POS) & + L1_LONG_DESCRIPTOR_BLOCK_S2AP_MASK) | + (((0x1) << L1_LONG_DESCRIPTOR_BLOCK_AF_POS) & + L1_LONG_DESCRIPTOR_BLOCK_AF_MASK) | + (((mmu_pte_cfg->shareable) + << L1_LONG_DESCRIPTOR_BLOCK_SH1SH0_POS) & + L1_LONG_DESCRIPTOR_BLOCK_SH1SH0_MASK) | + (((mmu_pte_cfg->memory_attribute) + << L1_LONG_DESCRIPTOR_BLOCK_MEMATTR_POS) & + L1_LONG_DESCRIPTOR_BLOCK_MEMATTR_MASK) | + (L1_LONG_DESCRIPTOR_FOR_BLOCK) | + (((mmu_pte_cfg->read_allocate_cfg) + << LONG_DESCRIPTOR_RACFG_POS) & + LONG_DESCRIPTOR_RACFG_MASK) | + (((mmu_pte_cfg->write_allocate_cfg) + << LONG_DESCRIPTOR_WACFG_POS) & + LONG_DESCRIPTOR_WACFG_MASK)); + + udd_l2d_tmp_l1_descriptor_value = udd_tmp_l1_descriptor_value; + + } + /* page table */ + else if (SMMU_PAGETABLE_PAGESIZE_2MB == + mmu_pte_cfg->page_type || + SMMU_PAGETABLE_PAGESIZE_4KB == + mmu_pte_cfg->page_type) { + udd_tmp_l1_descriptor_value = + ((physical_address & + L1_LONG_DESCRIPTOR_TABLE_PA_MASK) | + (L1_LONG_DESCRIPTOR_FOR_TABLE)); + + udd_l2d_pa = + zxdh_smmu_host_pa_to_l2d_pa(physical_address, dev); + udd_l2d_tmp_l1_descriptor_value = + ((udd_l2d_pa & L1_LONG_DESCRIPTOR_TABLE_PA_MASK) | + (L1_LONG_DESCRIPTOR_FOR_TABLE)); + } + + /* default little endian */ + if (mmu_pte_cfg->endian == SMMU_TT_BIGENDIAN) { + udd_tmp_l1_descriptor_value = + uswap_64(udd_tmp_l1_descriptor_value); + udd_l2d_tmp_l1_descriptor_value = + uswap_64(udd_l2d_tmp_l1_descriptor_value); + } + + *l1_desc_vaddr = udd_tmp_l1_descriptor_value; + + memset((void *)dev->pte_address->pte_temp_vir_addr, 0, 8); + tmp_desc_vaddr = (u64 *)dev->pte_address->pte_temp_vir_addr; + *tmp_desc_vaddr = udd_l2d_tmp_l1_descriptor_value; + + l1_pte_offset = + udd_l1_descriptor_va - dev->pte_address->cma_page_mem_base_va; + + /* cpy data from host to l2d */ + copy_dest.src = dev->pte_address->pte_temp_phy_addr; + copy_dest.len = 8; + copy_dest.dest = dev->pte_l2d_startpa + l1_pte_offset; + + dev->cqp->process_config_pte_table(dev, copy_dest); + + return 0; +} + +static u32 zxdh_smmu_write_l2_pagetable_entry( + u32 sid, const u64 l2_desc_va, + const struct smmu_pte_cfg *const mmu_pte_cfg, + struct zxdh_sc_dev *dev) +{ + u64 physical_address = 0; + u64 l2_desc_value = 0; + u64 l2d_l2_desc_offset = 0; + u64 *pull_tmp_l2_descriptor_va = NULL; + u64 *pull_to_l2d_descriptor_va = NULL; + u64 udd_l2d_l2_descriptor_value = 0; + + static u64 dma_to_l2d_count; + + struct zxdh_src_copy_dest copy_dest = {}; + + /* param check */ + SMMU_POINTER_CHECK(dev); + SMMU_POINTER_CHECK(mmu_pte_cfg); + SMMU_POINTER_CHECK(dev->pte_address->pte_temp_vir_addr); + + if (mmu_pte_cfg->page_format != PAGE_FORMAT_V8) + return -1; + + /* page table base address */ + pull_tmp_l2_descriptor_va = (u64 *)l2_desc_va; + *pull_tmp_l2_descriptor_va = 0; + + /* block base physical address, or next level page table base address */ + physical_address = mmu_pte_cfg->pa_base_addr; + + /* block descriptor */ + if (mmu_pte_cfg->page_type == SMMU_PAGETABLE_PAGESIZE_2MB) { + l2_desc_value = + ((physical_address & + L2_LONG_DESCRIPTOR_BLOCK_PA_MASK) | + ((mmu_pte_cfg->execute_never + << L2_LONG_DESCRIPTOR_BLOCK_XN_POS) & + L2_LONG_DESCRIPTOR_BLOCK_XN_MASK) | + (((mmu_pte_cfg->access_permission) + << L2_LONG_DESCRIPTOR_BLOCK_S2AP_POS) & + L2_LONG_DESCRIPTOR_BLOCK_S2AP_MASK) | + (((0x1) << L2_LONG_DESCRIPTOR_BLOCK_AF_POS) & + L2_LONG_DESCRIPTOR_BLOCK_AF_MASK) | + (((mmu_pte_cfg->shareable) + << L2_LONG_DESCRIPTOR_BLOCK_SH1SH0_POS) & + L2_LONG_DESCRIPTOR_BLOCK_SH1SH0_MASK) | + (((mmu_pte_cfg->memory_attribute) + << L2_LONG_DESCRIPTOR_BLOCK_MEMATTR_POS) & + L2_LONG_DESCRIPTOR_BLOCK_MEMATTR_MASK) | + (L2_LONG_DESCRIPTOR_FOR_BLOCK) | + (((mmu_pte_cfg->read_allocate_cfg) + << LONG_DESCRIPTOR_RACFG_POS) & + LONG_DESCRIPTOR_RACFG_MASK) | + (((mmu_pte_cfg->write_allocate_cfg) + << LONG_DESCRIPTOR_WACFG_POS) & + LONG_DESCRIPTOR_WACFG_MASK)); + + udd_l2d_l2_descriptor_value = l2_desc_value; + } + /* page table */ + else if (SMMU_PAGETABLE_PAGESIZE_4KB == + mmu_pte_cfg->page_type) { + l2_desc_value = ((physical_address & + L2_LONG_DESCRIPTOR_TABLE_PA_MASK) | + (L2_LONG_DESCRIPTOR_FOR_TABLE)); + + udd_l2d_l2_descriptor_value = ( + // 新版本方案 + (physical_address & 0x3FFFFFFFFF) // bit[37:0] + | ((sid & 0xFULL) << 42) // bit[46:42] + | (1ULL << 47) // bit[51:47] + | (L2_LONG_DESCRIPTOR_FOR_TABLE)); + } + + /* default little endian */ + if (mmu_pte_cfg->endian == SMMU_TT_BIGENDIAN) { + l2_desc_value = uswap_64(l2_desc_value); + udd_l2d_l2_descriptor_value = + uswap_64(udd_l2d_l2_descriptor_value); + } + + *pull_tmp_l2_descriptor_va = l2_desc_value; + + memset((void *)dev->pte_address->pte_temp_vir_addr, 0, 8); + pull_to_l2d_descriptor_va = (u64 *)dev->pte_address->pte_temp_vir_addr; + *pull_to_l2d_descriptor_va = udd_l2d_l2_descriptor_value; + + dma_to_l2d_count++; + + // ======================================================================= + // 计算偏移量 + // ======================================================================= + l2d_l2_desc_offset = + l2_desc_va - dev->pte_address->cma_page_mem_base_va; + + /* cpy data from host to l2d */ + copy_dest.src = dev->pte_address->pte_temp_phy_addr; + copy_dest.len = 8; + copy_dest.dest = dev->pte_l2d_startpa + l2d_l2_desc_offset; + dev->cqp->process_config_pte_table(dev, copy_dest); + + return 0; +} + +static u32 zxdh_smmu_write_l3_pagetable_entry( + const u64 l3_desc_va, + const struct smmu_pte_cfg *const mmu_pte_cfg, + struct smmu_pte_address *pte_address) +{ + u64 physical_address = 0; + u64 *l3_desc_vaddr = NULL; + u64 udd_tmp_l3_descriptor_value = 0; + + /* param check */ + SMMU_POINTER_CHECK(pte_address); + SMMU_POINTER_CHECK(mmu_pte_cfg); + + if (mmu_pte_cfg->page_format != PAGE_FORMAT_V8) + return -1; + + /* pte address */ + l3_desc_vaddr = (u64 *)l3_desc_va; + *l3_desc_vaddr = 0; + + physical_address = mmu_pte_cfg->pa_base_addr; + + if (mmu_pte_cfg->page_type == SMMU_PAGETABLE_PAGESIZE_4KB) { + udd_tmp_l3_descriptor_value = + ((physical_address & + L3_LONG_DESCRIPTOR_BLOCK_PA_MASK) | + ((mmu_pte_cfg->execute_never + << L3_LONG_DESCRIPTOR_BLOCK_XN_POS) & + L3_LONG_DESCRIPTOR_BLOCK_XN_MASK) | + (((mmu_pte_cfg->access_permission) + << L3_LONG_DESCRIPTOR_BLOCK_S2AP_POS) & + L3_LONG_DESCRIPTOR_BLOCK_S2AP_MASK) | + (((0x1) << L3_LONG_DESCRIPTOR_BLOCK_AF_POS) & + L3_LONG_DESCRIPTOR_BLOCK_AF_MASK) | + (((mmu_pte_cfg->shareable) + << L3_LONG_DESCRIPTOR_BLOCK_SH1SH0_POS) & + L3_LONG_DESCRIPTOR_BLOCK_SH1SH0_MASK) | + (((mmu_pte_cfg->memory_attribute) + << L3_LONG_DESCRIPTOR_BLOCK_MEMATTR_POS) & + L3_LONG_DESCRIPTOR_BLOCK_MEMATTR_MASK) | + (L3_LONG_DESCRIPTOR_FOR_PAGE) | + (((mmu_pte_cfg->read_allocate_cfg) + << LONG_DESCRIPTOR_RACFG_POS) & + LONG_DESCRIPTOR_RACFG_MASK) | + (((mmu_pte_cfg->write_allocate_cfg) + << LONG_DESCRIPTOR_WACFG_POS) & + LONG_DESCRIPTOR_WACFG_MASK)); + } + + /* default little endian */ + if (mmu_pte_cfg->endian == SMMU_TT_BIGENDIAN) { + udd_tmp_l3_descriptor_value = + uswap_64(udd_tmp_l3_descriptor_value); + } + + *l3_desc_vaddr = udd_tmp_l3_descriptor_value; + + return 0; +} + +static u32 zxdh_smmu_set_l1_pte_entry(u64 udd_l1_descriptor_va, + struct smmu_pte_cfg *tlb_entry_cfg, + struct zxdh_sc_dev *dev) +{ + SMMU_POINTER_CHECK(dev); + SMMU_POINTER_CHECK(tlb_entry_cfg); + + zxdh_smmu_write_l1_pagetable_entry(udd_l1_descriptor_va, tlb_entry_cfg, + dev); + + return 0; +} + +static u32 zxdh_smmu_set_l2_pte_entry(u64 udd_l1_descriptor_va, + u64 l2_desc_va, u32 sid, + struct smmu_pte_cfg *tlb_entry_cfg, + struct zxdh_sc_dev *dev) +{ + SMMU_POINTER_CHECK(tlb_entry_cfg); + + /* write L2 block descriptor */ + zxdh_smmu_write_l2_pagetable_entry(sid, l2_desc_va, + tlb_entry_cfg, dev); + + /* create Level1 page table config struct, get L2 pagetable base phyaddr */ + tlb_entry_cfg->pa_base_addr = zxdh_smmu_sram_pagetable_v2p( + l2_desc_va, dev->pte_address); + if (tlb_entry_cfg->pa_base_addr == 0) + return -1; + + /* write L1 page table entry */ + zxdh_smmu_write_l1_pagetable_entry(udd_l1_descriptor_va, tlb_entry_cfg, + dev); + + return 0; +} + +static u32 zxdh_smmu_set_l3_pte_entry(u64 udd_l1_descriptor_va, + u64 l2_desc_va, + u64 l3_desc_va, u64 sid, + u64 request_va, + struct smmu_pte_cfg *tlb_entry_cfg, + struct zxdh_sc_dev *dev) +{ + SMMU_POINTER_CHECK(dev); + SMMU_POINTER_CHECK(tlb_entry_cfg); + + /* write L3 page table descriptor */ + zxdh_smmu_write_l3_pagetable_entry(l3_desc_va, tlb_entry_cfg, + dev->pte_address); + + /* structure L2 page table descriptor config */ + tlb_entry_cfg->pa_base_addr = zxdh_smmu_sram_pagetable_v2p( + l3_desc_va, dev->pte_address); + + /* write L2 page table descriptor */ + // 因为L2 PTE中要写L3页表的基地址,所以,这里应该拿L3页表地址算L2 PTE偏移 + zxdh_smmu_write_l2_pagetable_entry(sid, l2_desc_va, + tlb_entry_cfg, dev); + + /* structure L1 page table descriptor config */ + tlb_entry_cfg->pa_base_addr = zxdh_smmu_sram_pagetable_v2p( + l2_desc_va, dev->pte_address); + + /* write L1 page table descriptor */ + zxdh_smmu_write_l1_pagetable_entry(udd_l1_descriptor_va, tlb_entry_cfg, + dev); + + return 0; +} + +static u32 zxdh_smmu_set_pte_entry(u64 udd_l1_ttb_va, u64 request_va, + u64 request_pa, u32 sid, + struct smmu_pte_cfg *tlb_entry_cfg, + struct zxdh_sc_dev *dev) +{ + u64 udd_l1_descriptor_va = 0; + u64 l2_desc_va = 0; + u64 l3_desc_va = 0; + + SMMU_POINTER_CHECK(dev); + SMMU_POINTER_CHECK(tlb_entry_cfg); + + if (dev->pte_address->pagetable_vir_base_addr == 0) { + pr_info("dev->pte_address->pagetable_vir_base_addr == 0\n"); + return -1; + } + + switch (tlb_entry_cfg->page_type) { + case SMMU_PAGETABLE_PAGESIZE_4KB: { + l3_desc_va = zxdh_smmu_get_l3_descriptor( + sid, request_va, dev->pte_address); + l2_desc_va = zxdh_smmu_get_l2_descriptor_va( + dev, sid, request_va, dev->pte_address); + udd_l1_descriptor_va = zxdh_smmu_get_l1_descriptor_va( + udd_l1_ttb_va, request_va); + + if (!l3_desc_va || !l2_desc_va || + !udd_l1_descriptor_va) { + pr_info("l3_desc_va|l2_desc_va|udd_l1_descriptor_va == 0\n"); + return -1; + } + + zxdh_smmu_set_l3_pte_entry(udd_l1_descriptor_va, + l2_desc_va, + l3_desc_va, sid, + request_va, tlb_entry_cfg, dev); + break; + } + case SMMU_PAGETABLE_PAGESIZE_2MB: { + l3_desc_va = 0; + l2_desc_va = zxdh_smmu_get_l2_descriptor_va( + dev, sid, request_va, dev->pte_address); + udd_l1_descriptor_va = zxdh_smmu_get_l1_descriptor_va( + udd_l1_ttb_va, request_va); + + if (!l2_desc_va || !udd_l1_descriptor_va) { + pr_info("l2_desc_va|udd_l1_descriptor_va == 0\n"); + return -1; + } + + zxdh_smmu_set_l2_pte_entry(udd_l1_descriptor_va, + l2_desc_va, sid, + tlb_entry_cfg, dev); + break; + } + case SMMU_PAGETABLE_PAGESIZE_1G: { + l3_desc_va = 0; + l2_desc_va = 0; + udd_l1_descriptor_va = zxdh_smmu_get_l1_descriptor_va( + udd_l1_ttb_va, request_va); + + if (!udd_l1_descriptor_va) { + pr_info("udd_l1_descriptor_va == 0\n"); + return -1; + } + + zxdh_smmu_set_l1_pte_entry(udd_l1_descriptor_va, tlb_entry_cfg, + dev); + break; + } + default: { + return -1; + } + } + + return 0; +} + +u32 zxdh_smmu_show_pagetable_info(struct smmu_pte_address *pte_address) +{ + pr_info("pagetable info: -------------------------------------------------------------------\n"); + pr_info("pagetable config.pagetable_phy_addr = 0x%llx\n", + pte_address->pagetable_cfg.pagetable_phy_addr); + pr_info("pagetable config.pagetable_vir_addr = 0x%llx\n", + pte_address->pagetable_cfg.pagetable_vir_addr); + pr_info("pagetable config.pagetable_size = 0x%x\n", + pte_address->pagetable_cfg.pagetable_size); + pr_info("pagetable config.ex_pagetable_phy_addr = 0x%llx\n", + pte_address->pagetable_cfg.ex_pagetable_phy_addr); + pr_info("pagetable config.ex_pagetable_size = 0x%x\n", + pte_address->pagetable_cfg.ex_pagetable_size); + pr_info("max L1 pagetable num = %d, used = %d\n", + SMMU_L1_PT_NUM, pte_address->l1_pagetable_num); + pr_info("max L2 pagetable num = %d, used = %d\n", + SMMU_L2_PT_NUM, pte_address->l2_pagetable_num); + pr_info("max L3 pagetable num = %d, used = %d\n", + SMMU_L3_PT_NUM, pte_address->l3_pagetable_num); + pr_info("pte records num = %d, fail record = %d, max capacity = %d\n", + pte_address->pte_record_num, + pte_address->pte_fail_record_num, MAX_PTE_RECORDS_NUM); + + return 0; +} +EXPORT_SYMBOL(zxdh_smmu_show_pagetable_info); + +u32 zxdh_smmu_show_pte_record(u32 stream_id, u64 virt_addr, + struct smmu_pte_address *pte_address) +{ + u32 i = 0; + u64 virt_addr_tmp; + u64 ttb_addr; + + for (; i < pte_address->pte_record_num; i++) { + if (pte_address->pte_records[i].valid) { + // print all records + if (virt_addr == 0xffffffffffffffff) { + virt_addr_tmp = pte_address->pte_records[i].virt_addr; + } else if (virt_addr >= pte_address->pte_records[i] + .virt_addr && + virt_addr < (pte_address->pte_records[i].virt_addr + + pte_address->pte_records[i] + .size)) { + virt_addr_tmp = virt_addr; + } else { + continue; + } + + ttb_addr = zxdh_smmu_get_ttb(stream_id, pte_address); + if (ttb_addr == -1 || ttb_addr == 0) + return -1; + //zxdh_smmu_get_l1_descriptor_va( + // zxdh_smmu_sram_pagetable_p2v(ttb_addr, + // pte_address), + // virt_addr_tmp); + if (pte_address->pte_records[i].size == + PAGE_SIZE_2M) { + //zxdh_smmu_get_l2_descriptor_va(dev, stream_id, virt_addr_tmp, + // pte_address);这里没有用到,暂时注释掉 + } + + if (pte_address->pte_records[i].size == + PAGE_SIZE_4K) { + //zxdh_smmu_get_l2_descriptor_va(dev, stream_id, virt_addr_tmp, + // pte_address);这里没有用到,暂时注释掉 + + zxdh_smmu_get_l3_descriptor(stream_id, virt_addr_tmp, + pte_address); + } + } + } + return 0; +} +EXPORT_SYMBOL(zxdh_smmu_show_pte_record); + +struct zxdh_smmu_host_risc_msgs { + u32 sid; + u32 va; +}; + +/************************************************************************** + * 函数名称: zxdh_smmu_mmap + * 功能描述: 在host上 + * 写入pte,实现smmu虚实地址映射 + * 输入参数:pte_request 地址映射信息 + * dev 设备信息 + * 输出参数: + * 返 回 值: 0 / -1 + * 其它说明: + * + * 修改日期 版本号 修改人 + * ----------------------------------------------- + * 2023/04/26 V1.0 guoll + ***************************************************************************/ +u32 zxdh_smmu_mmap(struct smmu_pte_request *pte_request, struct zxdh_sc_dev *dev) +{ + u32 ret = 0; + u32 pte_size = 0; + u64 l1_ttb_pa = 0; + u64 request_va = 0; + u64 request_pa = 0; + u64 request_size = 0; + struct smmu_pte_cfg tlb_entry_cfg = { 0 }; + u32 mmap_count = 0; + + SMMU_POINTER_CHECK(dev); + SMMU_POINTER_CHECK(pte_request); + + request_va = pte_request->vir_addr; + request_pa = pte_request->phy_addr; + request_size = pte_request->size; + + if ((request_pa & REV_PAGE_MASK_4K) || + (request_va & REV_PAGE_MASK_4K) || + (request_size & REV_PAGE_MASK_4K)) { + return -1; + } + + tlb_entry_cfg.endian = SMMU_TT_LITTLEENDIAN; /* endian cfg */ + tlb_entry_cfg.page_format = PAGE_FORMAT_V8; + + /* pa */ + l1_ttb_pa = + zxdh_smmu_get_ttb(pte_request->stream_id, dev->pte_address); + if (l1_ttb_pa == -1) + return -1; + + while (request_size > 0) { + mmap_count++; + + // if (10 == mmap_count) + // { + // g_ucMmu600PrintModuleId = 8; + // } + + // 判断是否可以使用块类型的页表项(优先使用块类型的页表项) + // 1G 2M 4k + pte_size = PAGE_SIZE_4K; // Default to 4K + zxdh_smmu_get_pte_size(request_va, request_size, &pte_size); + + // pte_size = PAGE_SIZE_4K; + + zxdh_smmu_request_to_pte_cfg(pte_size, pte_request, + &tlb_entry_cfg); + + ret = zxdh_smmu_set_pte_entry( + zxdh_smmu_sram_pagetable_p2v(l1_ttb_pa, + dev->pte_address), + request_va, request_pa, + pte_request->stream_id, &tlb_entry_cfg, dev); + + if (ret != 0) + return -1; + + request_va += pte_size; + request_pa += pte_size; + pte_request->phy_addr = request_pa; + if (request_size < pte_size) { + /* avoid negative value */ + request_size = 0; + } else { + request_size -= pte_size; + } + } +#ifndef BSP_IS_PC_UT + wmb(); +#endif + + return 0; +} +EXPORT_SYMBOL(zxdh_smmu_mmap); + +/************************************************************************** + * 函数名称: zxdh_smmu_struct_init + * 功能描述: 初始化mmu600页表相关数据结构 + * + * 输入参数: struct stPagetableParam *pgt_param : 页表初始化参数 + * struct zxdh_sc_dev * + * 输出参数: + * 返 回 值: 0 / -1 + * 其它说明: + * 修改日期 版本号 修改人 + * ----------------------------------------------- + * 2023/04/26 V1.0 guoll + ***************************************************************************/ +u32 zxdh_smmu_struct_init(const struct smmu_pagetable_param *pgt_param, + struct smmu_pte_address *pte_address, + struct device *dma_dev) +{ + void *vaddr = NULL; + u32 size = 0; + u32 l1_pt_index = 0; + u32 page_table_size = 0; + + SMMU_POINTER_CHECK(pgt_param); + SMMU_POINTER_CHECK(pte_address); + + // ========== ========== ========== ========== + // 页表初始化参数值校验 + // ========== ========== ========== ========== + if (pgt_param->pagetable_size == 0 || + pgt_param->l1_pagetable_num == 0 || + pgt_param->l2_pagetable_num == 0 || + pgt_param->l3_pagetable_num == 0) { + return -1; + } + + page_table_size = pgt_param->l1_pagetable_num * SMMU_L1_PER_PT_SIZE + + pgt_param->l2_pagetable_num * SMMU_L2_PER_PT_SIZE + + pgt_param->l3_pagetable_num * SMMU_L3_PER_PT_SIZE; + if (page_table_size > pgt_param->pagetable_size) + return -1; + + memcpy(&(pte_address->pagetable_cfg), pgt_param, sizeof(struct smmu_pagetable_param)); + + if (pte_address->cma_page_mem_base_va == 0) { + // ========== ========== ========== ========== + // use reserve mem + // ========== ========== ========== ========== + + // self: 我的理解,这个是自己打桩测试,正是代码不需要走这个分支 + SMMU_POINTER_CHECK(pgt_param->pagetable_phy_addr); + + // Mmap page table space + vaddr = (void *)ioremap(pgt_param->pagetable_phy_addr, + pgt_param->pagetable_size); + + memset_8byte(vaddr, 0, pgt_param->pagetable_size); + + pte_address->pagetable_vir_base_addr = (u64)vaddr; + } else { + // ========== ========== ========== ========== + // use cma mem + // ========== ========== ========== ========== + + // ========== ========== ========== ========== + // 对齐待定 self: 怎么对齐?这里暂时先注销 + // ========== ========== ========== ========== + if (pgt_param->pagetable_phy_addr & + (SMMU_L1_PT_ALIGN_SIZE - 1)) { + return -1; + } + + pte_address->pagetable_vir_base_addr = + pte_address->cma_page_mem_base_va; + } + + // ========== ========== ========== ========== + // allocate g_map_manage_addr + // T_MAP_MANGE共有1+512个,分别用来记录1个同一L1下的L2的首地址,512个同一L2下的L3的首地址。 + // ========== ========== ========== ========== + size = SMMU_L2_MAP_MANAGE_SIZE + SMMU_L3_MAP_MANAGE_SIZE; + pte_address->map_manage_addr = (u64)kmalloc(size, GFP_KERNEL); + SMMU_POINTER_CHECK(pte_address->map_manage_addr); + MEMSET((void *)pte_address->map_manage_addr, size, 0, size); + + // ========== ========== ========== ========== + // allocate g_pte_records + // ========== ========== ========== ========== + size = sizeof(struct smmu_pte_record) * MAX_PTE_RECORDS_NUM; + pte_address->pte_records = + (struct smmu_pte_record *)kmalloc(size, GFP_KERNEL); + SMMU_POINTER_CHECK(pte_address->pte_records); + MEMSET(pte_address->pte_records, size, 0, size); + + // ========== ========== ========== ========== ===== + // 分配8字节空间存储每一次下发PTE的数据,作为中转 + // ========== ========== ========== ========== ===== + // dma对源地址有对齐要求,必须32byte对齐 + // kmalloc申请到的va是根据传入的申请大小决定对齐的 + pte_address->pte_temp_vir_addr = + (u64)dma_alloc_coherent(dma_dev, SMMU_L1_PER_PT_SIZE * 4, + &pte_address->pte_temp_phy_addr, GFP_KERNEL); + SMMU_POINTER_CHECK(pte_address->pte_temp_vir_addr); + MEMSET((void *)pte_address->pte_temp_vir_addr, + SMMU_L1_PER_PT_SIZE * 4, 0, SMMU_L1_PER_PT_SIZE * 4); + + // ====================================================================== + // init g_ptTtbMng + // self: TtbMmg用来管理L1基地址 + // TtbMng用来管理L1的信息,包括L1的基地址、该L1表是否有效、对应的SID + // ====================================================================== + size = sizeof(struct smmu_ttb_manage) * SMMU_L1_PT_NUM; + g_ptTtbMng = (struct smmu_ttb_manage *)kmalloc(size, GFP_KERNEL); + SMMU_POINTER_CHECK(g_ptTtbMng); + MEMSET(g_ptTtbMng, size, 0, size); + + // 这里只负责把用到的TTB配置好,具体哪个sid使用,在cmdk进行配置,即由用户自己根据业务需求自己配置 + // 只需要把L1的TTB配置了就可以了,因为L1是确定的,L2 L3共用一份 + for (l1_pt_index = 0; l1_pt_index < SMMU_L1_PT_NUM; l1_pt_index++) { + g_ptTtbMng[l1_pt_index].phy_ttb = + (pte_address->pagetable_cfg.pagetable_phy_addr + + l1_pt_index * SMMU_L1_PER_PT_SIZE); + } + + return 0; +} diff --git a/drivers/infiniband/hw/zrdma/smmu/kernel/common_define.h b/drivers/infiniband/hw/zrdma/smmu/kernel/common_define.h new file mode 100644 index 000000000000..1e07c734b5a5 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/smmu/kernel/common_define.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef _SMMU_COMMON_DEFINE_H_ +#define _SMMU_COMMON_DEFINE_H_ + +#include +#include +#include +#include + +#include "cmdk.h" + +/* Endianness detection */ +#ifdef __BIG_ENDIAN +#define SMMU_BIG_ENDIAN +#endif + +/* Debug print level control */ +extern u8 smmu_print_module_id; + +#define smmu_print(level, fmt, ...) \ + do { \ + if ((level) >= smmu_print_module_id) \ + pr_info(fmt, ##__VA_ARGS__); \ + } while (0) + +/* Pointer validation macro */ +#define SMMU_CHECK_PTR(ptr) \ + do { \ + if (unlikely(!(ptr))) { \ + pr_err("%s: Invalid pointer\n", __func__); \ + return -EINVAL; \ + } \ + } while (0) + +/* Byte swap functions */ +u32 smmu_swap32(u32 val); +u64 smmu_swap64(u64 val); + +#ifdef SMMU_BIG_ENDIAN +#define SMMU_CPU_TO_LE32(x) smmu_swap32(x) +#define SMMU_CPU_TO_LE64(x) smmu_swap64(x) +#define SMMU_LE32_TO_CPU(x) smmu_swap32(x) +#define SMMU_LE64_TO_CPU(x) smmu_swap64(x) +#else +#define SMMU_CPU_TO_LE32(x) (x) +#define SMMU_CPU_TO_LE64(x) (x) +#define SMMU_LE32_TO_CPU(x) (x) +#define SMMU_LE64_TO_CPU(x) (x) +#endif + +#endif /* _SMMU_COMMON_DEFINE_H_ */ \ No newline at end of file diff --git a/drivers/infiniband/hw/zrdma/smmu/kernel/hal_smmu.h b/drivers/infiniband/hw/zrdma/smmu/kernel/hal_smmu.h new file mode 100644 index 000000000000..c1e044c451be --- /dev/null +++ b/drivers/infiniband/hw/zrdma/smmu/kernel/hal_smmu.h @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +/** + * @file hal_smmu.h + * @brief SMMU hardware abstraction layer definitions + * @details ARM v8 page table format definitions for MMU600 + */ + +#ifndef _ZXDH_HAL_SMMU_H_ +#define _ZXDH_HAL_SMMU_H_ + +#include + +/* + * Memory attribute definitions for SMMU page table entries + * + * Abbreviations: + * SO: Strongly-ordered memory + * DE: Device memory + * NM: Normal memory + * IWT: Inner cache, write-through + * OWT: Outer cache, write-through + * INC: Inner non-cacheable + * ONC: Outer non-cacheable + * IWB: Inner cache, write-back + * OWB: Outer cache, write-back + */ + +/* Bit width mask definitions */ +#define SMMU_BIT_MASK(bits) ((1ULL << (bits)) - 1) + +/* Common bit masks */ +#define SMMU_BW1 0x00000001ULL +#define SMMU_BW2 0x00000003ULL +#define SMMU_BW3 0x00000007ULL +#define SMMU_BW4 0x0000000fULL +#define SMMU_BW8 0x000000ffULL +#define SMMU_BW16 0x0000ffffULL +#define SMMU_BW32 0xffffffffULL +#define SMMU_BW48 0x0000ffffffffffffULL +#define SMMU_BW52 0x000fffffffffffffULL + +/* Page table entry field positions and masks */ +#define SMMU_PTE_WACFG_POS 55 +#define SMMU_PTE_WACFG_MASK (SMMU_BW2 << SMMU_PTE_WACFG_POS) +#define SMMU_PTE_RACFG_POS 57 +#define SMMU_PTE_RACFG_MASK (SMMU_BW2 << SMMU_PTE_RACFG_POS) + +/* L1 descriptor definitions */ +#define SMMU_L1_BLOCK_XN_POS 53 +#define SMMU_L1_BLOCK_XN_MASK (SMMU_BW1 << SMMU_L1_BLOCK_XN_POS) +#define SMMU_L1_BLOCK_PA_POS 30 +#define SMMU_L1_BLOCK_PA_MASK (SMMU_BW18 << SMMU_L1_BLOCK_PA_POS) +#define SMMU_L1_BLOCK_AF_POS 10 +#define SMMU_L1_BLOCK_AF_MASK (SMMU_BW1 << SMMU_L1_BLOCK_AF_POS) +#define SMMU_L1_BLOCK_SH_POS 8 +#define SMMU_L1_BLOCK_SH_MASK (SMMU_BW2 << SMMU_L1_BLOCK_SH_POS) +#define SMMU_L1_BLOCK_AP_POS 6 +#define SMMU_L1_BLOCK_AP_MASK (SMMU_BW2 << SMMU_L1_BLOCK_AP_POS) +#define SMMU_L1_BLOCK_MEMATTR_POS 2 +#define SMMU_L1_BLOCK_MEMATTR_MASK (SMMU_BW4 << SMMU_L1_BLOCK_MEMATTR_POS) + +#define SMMU_L1_DESC_BLOCK 1 +#define SMMU_L1_DESC_TABLE 3 + +/* L2 descriptor definitions */ +#define SMMU_L2_BLOCK_XN_POS 53 +#define SMMU_L2_BLOCK_XN_MASK (SMMU_BW1 << SMMU_L2_BLOCK_XN_POS) +#define SMMU_L2_BLOCK_PA_POS 21 +#define SMMU_L2_BLOCK_PA_MASK (SMMU_BW27 << SMMU_L2_BLOCK_PA_POS) +#define SMMU_L2_BLOCK_AF_POS 10 +#define SMMU_L2_BLOCK_AF_MASK (SMMU_BW1 << SMMU_L2_BLOCK_AF_POS) +#define SMMU_L2_BLOCK_SH_POS 8 +#define SMMU_L2_BLOCK_SH_MASK (SMMU_BW2 << SMMU_L2_BLOCK_SH_POS) +#define SMMU_L2_BLOCK_AP_POS 6 +#define SMMU_L2_BLOCK_AP_MASK (SMMU_BW2 << SMMU_L2_BLOCK_AP_POS) +#define SMMU_L2_BLOCK_MEMATTR_POS 2 +#define SMMU_L2_BLOCK_MEMATTR_MASK (SMMU_BW4 << SMMU_L2_BLOCK_MEMATTR_POS) + +#define SMMU_L2_DESC_BLOCK 1 +#define SMMU_L2_DESC_TABLE 3 + +/* L3 descriptor definitions */ +#define SMMU_L3_PAGE_XN_POS 53 +#define SMMU_L3_PAGE_XN_MASK (SMMU_BW1 << SMMU_L3_PAGE_XN_POS) +#define SMMU_L3_PAGE_PA_POS 12 +#define SMMU_L3_PAGE_PA_MASK (SMMU_BW36 << SMMU_L3_PAGE_PA_POS) +#define SMMU_L3_PAGE_AF_POS 10 +#define SMMU_L3_PAGE_AF_MASK (SMMU_BW1 << SMMU_L3_PAGE_AF_POS) +#define SMMU_L3_PAGE_SH_POS 8 +#define SMMU_L3_PAGE_SH_MASK (SMMU_BW2 << SMMU_L3_PAGE_SH_POS) +#define SMMU_L3_PAGE_AP_POS 6 +#define SMMU_L3_PAGE_AP_MASK (SMMU_BW2 << SMMU_L3_PAGE_AP_POS) +#define SMMU_L3_PAGE_MEMATTR_POS 2 +#define SMMU_L3_PAGE_MEMATTR_MASK (SMMU_BW4 << SMMU_L3_PAGE_MEMATTR_POS) + +#define SMMU_L3_DESC_PAGE 3 + +/* Memory attribute values */ +#define READ_NOALLOCATE 0x100 +#define WRITE_NOALLOCATE 0x200 + +/* Page table validity flags */ +#define SMMU_PAGETABLE_INVALID 0 +#define SMMU_PAGETABLE_VALID 1 +#define SMMU_PAGETABLE_PAGE_TYPE 3 +#define SMMU_PAGETABLE_BLOCK_TYPE 1 + +/* Execute permission */ +#define SMMU_PAGETABLE_EXECUTE_NEVER 1 /* XN=1, cannot execute */ +#define SMMU_PAGETABLE_EXECUTE 0 /* XN=0, can execute */ + +/* Page size definitions */ +#define SMMU_PAGETABLE_PAGESIZE_4KB 0 /* 4KB small page */ +#define SMMU_PAGETABLE_PAGESIZE_64KB 1 /* 64KB large page */ +#define SMMU_PAGETABLE_PAGESIZE_1MB 2 /* 1MB section */ +#define SMMU_PAGETABLE_PAGESIZE_2MB 3 /* 2MB block */ +#define SMMU_PAGETABLE_PAGESIZE_16MB 4 /* 16MB super-section */ +#define SMMU_PAGETABLE_PAGESIZE_512MB 5 /* 512MB block */ +#define SMMU_PAGETABLE_PAGESIZE_1GB 6 /* 1GB block */ + +/** + * struct smmu_pte_cfg - SMMU page table entry configuration + * @pa_base_addr: Block base physical address + * @execute_never: Execute never flag (XN bit) + * @shareable: Shareability attribute (SH field) + * @access_permission: Access permission (AP field) + * @memory_attribute: Memory attribute (MemAttr field) + * @page_type: Page size type + * @write_allocate_cfg: Write allocate configuration + * @read_allocate_cfg: Read allocate configuration + * @endian: Endianness setting + * @page_format: Page table format version + */ +struct smmu_pte_cfg { + u64 pa_base_addr; + u64 execute_never; + u32 shareable; + u32 access_permission; + u32 memory_attribute; + u32 page_type; + u64 write_allocate_cfg; + u64 read_allocate_cfg; + u32 endian; + u32 page_format; +}; + +#endif /* _ZXDH_HAL_SMMU_H_ */ \ No newline at end of file diff --git a/drivers/infiniband/hw/zrdma/smmu/kernel/ioctl_mmu600.c b/drivers/infiniband/hw/zrdma/smmu/kernel/ioctl_mmu600.c new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/drivers/infiniband/hw/zrdma/smmu/kernel/ioctl_mmu600.h b/drivers/infiniband/hw/zrdma/smmu/kernel/ioctl_mmu600.h new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/drivers/infiniband/hw/zrdma/smmu/kernel/pub_print.h b/drivers/infiniband/hw/zrdma/smmu/kernel/pub_print.h new file mode 100644 index 000000000000..3c7590844e3d --- /dev/null +++ b/drivers/infiniband/hw/zrdma/smmu/kernel/pub_print.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef PUB_PRINT_H +#define PUB_PRINT_H + +#if defined(__KERNEL__) +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#else +#include +#include +#endif +#include "cmdk.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define PM_DEBUG ((u8)0x01) /**< 默认仅在debug版本显示 */ +#define PM_INFO ((u8)0x02) +#define PM_WARN ((u8)0x04) +#define PM_ERROR ((u8)0x08) +#define PM_FATAL ((u8)0x10) +#define DEFAULT_LEVEL ((u8)0x1E) /**< 默认不显示debug信息 */ +/** @} 输出控制级别 */ + +#define MAX_LEVEL_MASK ((u8)0x1F) /**< 全级别掩码 */ + +#define MAX_LEVEL_TYPE ((u8)0x05) /**< 定义5级打印 */ +#define INVALID_MODULE_ID 0xFF /**< 无效的模块id */ + +#define MAX_MDL_NAME_LEN 24 /**< 打印模块名称最大长度 */ +#define MAX_MODULE_ID ((u8)0x80) /**< 最大模块号,目前定义了128个模块 */ +#define MAX_MDL_PRINT_BUF_LEN 512 /**< 打印最大buffer长度 */ + +#define PM_FLAG_ON 1 /**< 打印flag打开 */ +#define PM_FLAG_OFF 0 /**< 打印flag关闭 */ + +extern u8 g_ucBySelfId; /**< 默认打印模块id */ + +/************************************************************************** + * 宏定义 * + **************************************************************************/ +/** 通用打印封装 */ +#define PUB_PRINTF printk + +#ifdef __cplusplus +} +#endif + +#endif /* PUB_PRINT_H */ diff --git a/drivers/infiniband/hw/zrdma/smmu/kernel/pub_return.h b/drivers/infiniband/hw/zrdma/smmu/kernel/pub_return.h new file mode 100644 index 000000000000..05362f6a1c7e --- /dev/null +++ b/drivers/infiniband/hw/zrdma/smmu/kernel/pub_return.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef _PUB_RETURN_H_ +#define _PUB_RETURN_H_ +#include "pub_print.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/************************************************************************** + * 宏定义 * + **************************************************************************/ +#ifdef PUB_ERROR +#undef PUB_ERROR +#define PUB_ERROR (0xffffffff) /*直接定义为0xffffffff*/ +#else +#define PUB_ERROR (0xffffffff) /*0xffffffff*/ +#endif + +/** 检查空指针,返回错误 */ +#define PUB_CHECK_NULL_PTR_RET_ERR(ptr) \ + do { \ + if (!ptr) { \ + pr_info("Null Ptr Err! Fuc:%s,Line:%d,File:%s\n", \ + __func__, __LINE__, __FILE__); \ + return PUB_ERROR; \ + } \ + } while (0) + +/************************************************************************** + * 数据类型 * + **************************************************************************/ + +/************************************************************************** + * 全局函数原型 * + **************************************************************************/ + +#ifdef __cplusplus +} +#endif + +#endif /* _PUB_RETURN_H_ */ diff --git a/drivers/infiniband/hw/zrdma/srq.c b/drivers/infiniband/hw/zrdma/srq.c new file mode 100644 index 000000000000..232be1560b6d --- /dev/null +++ b/drivers/infiniband/hw/zrdma/srq.c @@ -0,0 +1,1116 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include "osdep.h" +#include "status.h" +#include "hmc.h" +#include "defs.h" +#include "type.h" +#include "ws.h" +#include "protos.h" +#include "vf.h" +#include "virtchnl.h" +#include "icrdma_hw.h" +#include "main.h" +#include "srq.h" + +static unsigned int ft_debug_srq_msg; +module_param(ft_debug_srq_msg, uint, 0444); +MODULE_PARM_DESC(ft_debug_srq_msg, "ft_debug_srq_msg =1, printk srq info"); + +/** + * zxdh_get_srq_wqe_shift - get shift count for maximum srq wqe size + * @uk_attrs: srq HW attributes + * @sge: Maximum Scatter Gather Elements wqe + * @shift: Returns the shift needed based on sge + * + * Shift can be used to left shift the srq wqe size based on number of SGEs. + * For 1 SGE, shift = 1 (wqe size of 2*16 bytes). + * For 2 or 3 SGEs, shift = 2 (wqe size of 4*16 bytes). + * For 4-7 SGE's Shift of 3. + * For 8-15 SGE's Shift of 4 otherwise (wqe size of 512 bytes). + */ +static void zxdh_get_srq_wqe_shift(struct zxdh_uk_attrs *uk_attrs, u32 sge, + u8 *shift) +{ + *shift = 0; //16bytes RQE, need to confirm configuration + if (sge < 2) + *shift = 1; + else if (sge < 4) + *shift = 2; + else if (sge < 8) + *shift = 3; + else if (sge < 16) + *shift = 4; + else + *shift = 5; +} + +/** + * zxdh_srq_round_up - return round up srq wq depth + * @wqdepth: wq depth in quanta to round up + */ +static int zxdh_srq_round_up(u32 wqdepth) +{ + int scount = 1; + + for (wqdepth--; scount <= 16; scount *= 2) + wqdepth |= wqdepth >> scount; + + return ++wqdepth; +} + +/* + * zxdh_get_srqdepth - get SRQ depth (quanta) + * @max_hw_rq_quanta: HW SRQ size limit + * @srq_size: SRQ size + * @shift: shift which determines size of WQE + * @srqdepth: depth of SRQ + */ +static int zxdh_get_srqdepth(u32 max_hw_srq_quanta, u32 srq_size, u8 shift, + u32 *srqdepth) +{ + *srqdepth = zxdh_srq_round_up((srq_size << shift) + ZXDH_SRQ_RSVD); + + if (*srqdepth < (ZXDH_QP_SW_MIN_WQSIZE << shift)) + *srqdepth = ZXDH_QP_SW_MIN_WQSIZE << shift; + else if ((*srqdepth >> shift) > max_hw_srq_quanta) + return -EINVAL; + + return 0; +} + +static __le64 *zxdh_get_srq_wqe(struct zxdh_srq *srq, int wqe_index) +{ + struct zxdh_srq_uk *srq_uk; + __le64 *wqe; + + srq_uk = &srq->sc_srq.srq_uk; + wqe = srq_uk->srq_base[wqe_index * srq_uk->srq_wqe_size_multiplier].elem; + return wqe; +} + +//each srq index occupies 2 Bytes +static __le16 *zxdh_get_srq_list_wqe(struct zxdh_srq *srq, u16 *idx) +{ + struct zxdh_srq_uk *srq_uk; + __le16 *wqe; + u16 wqe_idx; + + srq_uk = &srq->sc_srq.srq_uk; + wqe_idx = ZXDH_RING_CURRENT_TAIL(srq_uk->srq_list_ring); + dma_wmb(); /* make sure shadow area is updated before moving tail */ + ZXDH_RING_MOVE_TAIL(srq_uk->srq_list_ring); + *idx = ZXDH_RING_CURRENT_TAIL(srq_uk->srq_list_ring); + + if (!(*idx)) + srq_uk->srq_list_polarity = !srq_uk->srq_list_polarity; + + wqe = &srq->sc_srq.srq_uk.srq_list_base[wqe_idx]; + + return wqe; +} + +void zxdh_free_srq_wqe(struct zxdh_srq_uk *srq, int wqe_index) +{ + struct zxdh_srq *iwsrq; + struct zxdh_sc_srq *sc_srq; + unsigned long flags; + __le64 *wqe; + u64 hdr; + + sc_srq = container_of(srq, struct zxdh_sc_srq, srq_uk); + iwsrq = container_of(sc_srq, struct zxdh_srq, sc_srq); + /* always called with interrupts disabled. */ + spin_lock_irqsave(&iwsrq->lock, flags); + wqe = zxdh_get_srq_wqe(iwsrq, srq->srq_ring.tail); + + srq->srq_ring.tail = wqe_index; + hdr = FIELD_PREP(IRDMAQPSRQ_NEXT_WQE_INDEX, wqe_index); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + set_64bit_val(wqe, 0, hdr); + + spin_unlock_irqrestore(&iwsrq->lock, flags); +} + +/** + * zxdh_setup_kmode_srq - setup initialization for kernel mode srq + * @iwdev: iwarp device + * @iwsrq: srq ptr (user or kernel) + * @info: initialize info to return + * @init_attr: Initial SRQ create attributes + */ +static int zxdh_setup_kmode_srq(struct zxdh_device *iwdev, + struct zxdh_srq *iwsrq, + struct zxdh_srq_init_info *info, + struct ib_srq_init_attr *init_attr) +{ + struct zxdh_dma_mem *mem = &iwsrq->kmem; + struct zxdh_dma_mem *mem_list = &iwsrq->kmem_list; + struct zxdh_dma_mem *mem_db = &iwsrq->kmem_db; + u32 srqdepth; + u8 srqshift; + u32 srq_size; + u32 srq_list_size; + u32 db_size; + u32 log2_srq_size; + int status; + struct zxdh_srq_uk_init_info *ukinfo = &info->srq_uk_init_info; + struct zxdh_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs; + struct zxdh_pci_f *rf = iwdev->rf; + + //get shift count for maximum wqe size + zxdh_get_srq_wqe_shift(uk_attrs, ukinfo->max_srq_frag_cnt, &srqshift); + + //get SRQ depth (quanta) + status = zxdh_get_srqdepth(uk_attrs->max_hw_srq_quanta, + ukinfo->srq_size, srqshift, &srqdepth); + if (status) + return status; + + iwsrq->ksrq.srq_wrid_mem = kcalloc(ukinfo->srq_size, + sizeof(*iwsrq->ksrq.srq_wrid_mem), + GFP_KERNEL); + if (!iwsrq->ksrq.srq_wrid_mem) + return -ENOMEM; + + ukinfo->srq_wrid_array = iwsrq->ksrq.srq_wrid_mem; + srq_size = srqdepth * ZXDH_SRQ_WQE_MIN_SIZE; + ukinfo->srq_size = srqdepth >> srqshift; + log2_srq_size = roundup_pow_of_two(ukinfo->srq_size); + log2_srq_size = order_base_2(log2_srq_size); + ukinfo->log2_srq_size = log2_srq_size; + + mem->size = ALIGN(srq_size, ZXDH_HW_PAGE_SIZE); + mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size, &mem->pa, + GFP_KERNEL); + if (!mem->va) { + kfree(iwsrq->ksrq.srq_wrid_mem); + iwsrq->ksrq.srq_wrid_mem = NULL; + return -ENOMEM; + } + + srq_list_size = ukinfo->srq_size * sizeof(u16); + ukinfo->srq_list_size = ukinfo->srq_size; + mem_list->size = ALIGN(srq_list_size, 64); + mem_list->va = dma_alloc_coherent(iwdev->rf->hw.device, mem_list->size, + &mem_list->pa, GFP_KERNEL); + if (!mem_list->va) { + kfree(iwsrq->ksrq.srq_wrid_mem); + iwsrq->ksrq.srq_wrid_mem = NULL; + dma_free_coherent(iwdev->rf->hw.device, mem->size, mem->va, + mem->pa); + mem->va = NULL; + return -ENOMEM; + } + + if (rf->rdma_srq_mem_type != USER_L2D_KERNEL_L2D) { // use ddr memory + db_size = 8; + mem_db->size = ALIGN(db_size, 8); + mem_db->va = dma_alloc_coherent(iwdev->rf->hw.device, mem_db->size, + &mem_db->pa, GFP_KERNEL); + if (!mem_db->va) { + kfree(iwsrq->ksrq.srq_wrid_mem); + iwsrq->ksrq.srq_wrid_mem = NULL; + dma_free_coherent(iwdev->rf->hw.device, mem->size, mem->va, + mem->pa); + mem->va = NULL; + dma_free_coherent(iwdev->rf->hw.device, mem_list->size, + mem_list->va, mem_list->pa); + mem_list->va = NULL; + return -ENOMEM; + } + + *(u64 *)mem_db->va = ZXDH_SRQ_DB_INIT_VALUE; + info->srq_db_pa = mem_db->pa; + init_attr->attr.max_wr = (srqdepth - ZXDH_SRQ_RSVD) >> srqshift; + } + + ukinfo->srq_base = mem->va; + info->srq_pa = mem->pa; + ukinfo->srq_list_base = mem_list->va; + info->srq_list_pa = mem_list->pa; + ukinfo->srq_db_base = mem_db->va; + + return 0; +} + +static void zxdh_srq_wqe_init(struct zxdh_srq *srq) +{ + int i; + struct zxdh_srq_uk *srq_uk; + __le64 *wqe; + u64 hdr; + + srq_uk = &srq->sc_srq.srq_uk; + + for (i = srq_uk->srq_ring.head; i < srq_uk->srq_ring.tail; i++) { + wqe = zxdh_get_srq_wqe(srq, i); + hdr = FIELD_PREP(IRDMAQPSRQ_NEXT_WQE_INDEX, (i + 1)); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + set_64bit_val(wqe, 0, hdr); + } +} + +static int zxdh_validate_srq_attrs(struct ib_srq_init_attr *init_attr, + struct zxdh_device *iwdev) +{ + struct zxdh_sc_dev *dev = &iwdev->rf->sc_dev; + struct zxdh_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs; + + if (init_attr->attr.max_sge > uk_attrs->max_hw_wq_frags) + return -EINVAL; + + if (init_attr->attr.max_wr > uk_attrs->max_hw_srq_wr) + return -EINVAL; + + if (init_attr->attr.srq_limit > init_attr->attr.max_wr) + return -EINVAL; + + if (init_attr->srq_type != IB_SRQT_BASIC) + return -EOPNOTSUPP; + + return 0; +} + +/** + * zxdh_free_srq_rsrc - free up memory resources for srq + * @iwsrq: srq ptr (user or kernel) + */ +static void zxdh_free_srq_rsrc(struct zxdh_srq *iwsrq) +{ + struct zxdh_device *iwdev = iwsrq->iwdev; + struct zxdh_pci_f *rf = iwdev->rf; + struct zxdh_sc_dev *dev; + u32 srq_num; + + dev = &rf->sc_dev; + srq_num = iwsrq->ibsrq.ext.xrc.srq_num - dev->base_srqn; + zxdh_free_rsrc(rf, rf->allocated_srqs, srq_num); + + if (!iwsrq->user_mode) { + kfree(iwsrq->ksrq.srq_wrid_mem); + iwsrq->ksrq.srq_wrid_mem = NULL; + dma_free_coherent(iwdev->rf->hw.device, iwsrq->kmem.size, + iwsrq->kmem.va, iwsrq->kmem.pa); + iwsrq->kmem.va = NULL; + dma_free_coherent(iwdev->rf->hw.device, iwsrq->kmem_list.size, + iwsrq->kmem_list.va, iwsrq->kmem_list.pa); + iwsrq->kmem_list.va = NULL; + if (rf->rdma_srq_mem_type != USER_L2D_KERNEL_L2D) { // use ddr memory + dma_free_coherent(iwdev->rf->hw.device, iwsrq->kmem_db.size, + iwsrq->kmem_db.va, iwsrq->kmem_db.pa); + } + iwsrq->kmem_db.va = NULL; + } +} + +/** + * zxdh_uk_srq_init - initialize srq + * @srq: hw srq (user and kernel) + * @info: srq initialization info + * + * initializes the vars used in both user and kernel mode. + * size of the wqe depends on numbers of max. fragements + * allowed. Then size of wqe * the number of wqes should be the + * amount of memory allocated for srq. + */ +static int zxdh_uk_srq_init(struct zxdh_srq_uk *srq, + struct zxdh_srq_uk_init_info *info) +{ + u32 srq_ring_size; + u8 srqshift; + + srq->uk_attrs = info->uk_attrs; + if (info->max_srq_frag_cnt > srq->uk_attrs->max_hw_wq_frags) + return -EINVAL; + + zxdh_get_srq_wqe_shift(srq->uk_attrs, info->max_srq_frag_cnt, + &srqshift); + srq->srq_base = info->srq_base; + srq->srq_list_base = info->srq_list_base; + srq->srq_db_base = info->srq_db_base; + srq->srq_wrid_array = info->srq_wrid_array; + srq->srq_id = info->srq_id; + srq->srq_size = info->srq_size; + srq->log2_srq_size = info->log2_srq_size; + srq->srq_list_size = info->srq_list_size; + srq->max_srq_frag_cnt = info->max_srq_frag_cnt; + srq_ring_size = srq->srq_size; + srq->srq_wqe_size = srqshift; + srq->srq_wqe_size_multiplier = 1 << srqshift; + ZXDH_RING_INIT(srq->srq_ring, srq_ring_size); + ZXDH_RING_INIT(srq->srq_list_ring, srq->srq_list_size); + srq->srq_ring.tail = srq->srq_size - 1; + //initial value is 0, initial use is 1 + srq->srq_list_polarity = 1; + + return 0; +} + +/** + * zxdh_sc_srq_init - initialize srq + * @srq: sc srq + * @info: initialization srq info + */ +static int zxdh_sc_srq_init(struct zxdh_sc_srq *srq, + struct zxdh_srq_init_info *info) +{ + int ret_code; + u32 pble_obj_cnt; + struct zxdh_sc_dev *dev = srq->dev; + struct zxdh_pci_f *rf = container_of(dev, struct zxdh_pci_f, sc_dev); + + if (info->srq_uk_init_info.max_srq_frag_cnt > + info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags) + return -EINVAL; + + srq->srq_pa = info->srq_pa; + srq->srq_list_pa = info->srq_list_pa; + + if (rf->rdma_srq_mem_type == USER_L2D_KERNEL_L2D) { + info->srq_db_pa = rf->srq_l2d_base_paddr + ((info->srq_uk_init_info.srq_id - dev->base_srqn) % ZXDH_PF_MAX_SRQ_NUM_USE_L2D) * 8; + pr_debug("%s[%d]: srq use L2D memory! l2d_base_paddr=0x%llx srq_db_pa=0x%llx srq_id=%d ep_id=%d pf_id=%d vf_id=%d ftype=%d\n", __func__, __LINE__, + rf->srq_l2d_base_paddr, info->srq_db_pa, info->srq_uk_init_info.srq_id, rf->ep_id, rf->pf_id, rf->vf_id, rf->ftype); + } else { + info->srq_db_pa = 0; + pr_err("%s[%d]: rdma srq can not use ddr! ep_id=%d pf_id=%d vf_id=%d ftype=%d srq_mem_type=%d\n", __func__, __LINE__, + rf->ep_id, rf->pf_id, rf->vf_id, rf->ftype, rf->rdma_srq_mem_type); + return -EPERM; + } + + srq->srq_db_pa = info->srq_db_pa; + srq->pd = info->pd; + srq->virtual_map = info->virtual_map; + srq->list_virtual_map = info->list_virtual_map; + srq->pbl_chunk_size = info->pbl_chunk_size; + srq->list_pbl_chunk_size = info->list_pbl_chunk_size; + srq->first_pm_pbl_idx = info->first_pm_pbl_idx; + srq->list_first_pm_pbl_idx = info->list_first_pm_pbl_idx; + srq->srq_limit = info->srq_limit; + ret_code = zxdh_uk_srq_init(&srq->srq_uk, &info->srq_uk_init_info); + if (ret_code) + return ret_code; + + pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE].cnt; + + if ((info->virtual_map && info->srq_pa >= pble_obj_cnt) || + (info->list_virtual_map && info->srq_list_pa >= pble_obj_cnt)) + return -EINVAL; + srq->hw_srq_size = zxdh_get_encoded_wqe_size(srq->srq_uk.srq_ring.size, + ZXDH_QUEUE_TYPE_SQ_RQ); + + return 0; +} + +static int zxdh_cqp_create_srq_cmd(struct zxdh_srq *iwsrq) +{ + struct zxdh_pci_f *rf = iwsrq->iwdev->rf; + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_create_srq_info *srq_info; + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = ZXDH_OP_SRQ_CREATE; + srq_info = &cqp_request->info.in.u.srq_create.info; + memset(srq_info, 0, sizeof(*srq_info)); + srq_info->state = ZXDH_SRQ_STATE_GOOD; + cqp_info->post_sq = 1; + cqp_info->in.u.srq_create.srq = &iwsrq->sc_srq; + cqp_info->in.u.srq_create.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + + return status; +} + +#ifdef ZRDMA_CREATE_SRQ_VER_2 +/** + * zxdh_create_srq - create srq + * @ibsrq: ptr of srq + * @init_attr: attributes for srq + * @udata: user data for create srq + */ +int zxdh_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr, + struct ib_udata *udata) +{ + struct ib_pd *ibpd = ibsrq->pd; + struct zxdh_pd *iwpd = to_iwpd(ibpd); + struct zxdh_device *iwdev = to_iwdev(ibpd->device); + struct zxdh_srq *iwsrq = to_iwsrq(ibsrq); + struct zxdh_pci_f *rf = iwdev->rf; + struct zxdh_create_srq_req req; + struct zxdh_create_srq_resp uresp = { 0 }; + u32 srq_num = 0; + int ret; + int err_code; + int srq_size; + u32 log2_srq_size; + struct zxdh_sc_srq *srq; + struct zxdh_srq_init_info init_info = { 0 }; + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs; + struct zxdh_ucontext *ucontext; + unsigned long flags; + struct zxdh_srq_mr *srqmr; + + if (rf->rdma_srq_mem_type != USER_L2D_KERNEL_L2D) { + pr_err("%s[%d]: rdma srq can not use ddr! ep_id=%d pf_id=%d vf_id=%d ftype=%d srq_mem_type=%d\n", __func__, __LINE__, + rf->ep_id, rf->pf_id, rf->vf_id, rf->ftype, rf->rdma_srq_mem_type); + return -ENOMEM; + } + + err_code = zxdh_validate_srq_attrs(init_attr, iwdev); + if (err_code) + return err_code; + + srq_size = init_attr->attr.max_wr; + log2_srq_size = order_base_2(srq_size); + + init_info.srq_uk_init_info.srq_size = srq_size; + init_info.srq_uk_init_info.log2_srq_size = log2_srq_size; + init_info.srq_uk_init_info.max_srq_frag_cnt = init_attr->attr.max_sge; + init_info.srq_uk_init_info.srq_limit = init_attr->attr.srq_limit; + init_info.srq_limit = init_attr->attr.srq_limit; + init_info.srq_uk_init_info.uk_attrs = uk_attrs; + + err_code = zxdh_alloc_rsrc(rf, rf->allocated_srqs, rf->max_srq, + &srq_num, &rf->next_srq); + if (err_code) + goto error; + iwsrq->iwdev = iwdev; + iwsrq->ibsrq.ext.xrc.srq_num = dev->base_srqn + srq_num; + srq = &iwsrq->sc_srq; + srq->dev = dev; + srq->back_srq = iwsrq; + init_info.pd = &iwpd->sc_pd; + init_info.srq_uk_init_info.srq_id = dev->base_srqn + srq_num; + iwsrq->max_wr = srq_size; + iwsrq->max_sge = init_attr->attr.max_sge; + iwsrq->srq_limit = init_attr->attr.srq_limit; + iwsrq->srq_compl_ctx = (uintptr_t)srq; + iwsrq->sc_srq.srq_compl_ctx = iwsrq->srq_compl_ctx; + refcount_set(&iwsrq->refcnt, 1); + spin_lock_init(&iwsrq->lock); + + if (udata) { + err_code = ib_copy_from_udata(&req, udata, + min(sizeof(req), udata->inlen)); + if (err_code) { + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: ib_copy_from_data fail\n"); + goto free_rsrc; + } + iwsrq->user_mode = 1; + init_info.srq_uk_init_info.srq_base = + (void *)((unsigned long)req.user_wqe_bufs); + + ucontext = kc_rdma_udata_to_drv_context(ibpd, udata); + + spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags); + iwsrq->iwpbl = zxdh_get_pbl((unsigned long)req.user_wqe_bufs, + &ucontext->srq_reg_mem_list); + spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags); + + if (!iwsrq->iwpbl) { + err_code = -ENODATA; + zxdh_dbg(iwdev_to_idev(iwdev), "VERBS: no pbl info\n"); + goto free_rsrc; + } + srqmr = &iwsrq->iwpbl->srq_mr; + + //srq wqe addr configuration + if (iwsrq->iwpbl->pbl_allocated) { + init_info.virtual_map = true; + init_info.pbl_chunk_size = 1; + init_info.first_pm_pbl_idx = srqmr->srq_pbl.idx; + init_info.srq_pa = srqmr->srq_pbl.idx; + } else { + init_info.srq_pa = srqmr->srq_pbl.addr; + init_info.virtual_map = false; + init_info.pbl_chunk_size = 0; + } + + //srq wqe idx addr configuration + if (iwsrq->iwpbl->pbl_allocated) { + init_info.list_virtual_map = true; + init_info.list_pbl_chunk_size = 1; + init_info.list_first_pm_pbl_idx = + srqmr->srq_list_pbl.idx; + init_info.srq_list_pa = srqmr->srq_list_pbl.idx; + } else { + init_info.srq_list_pa = srqmr->srq_list_pbl.addr; + init_info.list_virtual_map = false; + init_info.list_pbl_chunk_size = 0; + } + + //srq wqe db addr configuration + init_info.srq_db_pa = srqmr->db_addr; + init_info.db_virtual_map = false; + init_info.db_pbl_chunk_size = 0; + } else { + err_code = zxdh_setup_kmode_srq(iwdev, iwsrq, &init_info, + init_attr); + } + + if (err_code) { + zxdh_dbg(iwdev_to_idev(iwdev), "VERBS: setup srq failed\n"); + goto free_rsrc; + } + + ret = zxdh_sc_srq_init(srq, &init_info); + if (ret) { + err_code = -EPROTO; + zxdh_dbg(iwdev_to_idev(iwdev), "VERBS: srq_init fail\n"); + goto free_rsrc; + } + + if (!udata) + zxdh_srq_wqe_init(iwsrq); + + err_code = zxdh_cqp_create_srq_cmd(iwsrq); + if (err_code) + goto free_rsrc; + + if (udata) { + uresp.srq_size = srq_size; + uresp.srq_list_size = srq_size; + uresp.srq_id = dev->base_srqn + srq_num; + uresp.base_srqn = dev->base_srqn; + + err_code = ib_copy_to_udata(udata, &uresp, + min(sizeof(uresp), udata->outlen)); + if (err_code) { + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: copy_to_udata failed\n"); + zxdh_destroy_srq(&iwsrq->ibsrq, udata); + goto free_rsrc; + } + } + iwsrq->state = ZXDH_SRQ_STATE_GOOD; + rf->srq_table[srq_num] = iwsrq; + init_completion(&iwsrq->free_srq); + + return 0; +free_rsrc: + zxdh_free_srq_rsrc(iwsrq); +error: + return err_code; +} +#endif + +void zxdh_srq_add_ref(struct ib_srq *ibsrq) +{ + struct zxdh_srq *iwsrq = to_iwsrq(ibsrq); + + refcount_inc(&iwsrq->refcnt); +} + +void zxdh_srq_rem_ref(struct ib_srq *ibsrq) +{ + struct zxdh_srq *iwsrq = to_iwsrq(ibsrq); + struct zxdh_device *iwdev = iwsrq->iwdev; + unsigned long flags; + + spin_lock_irqsave(&iwdev->rf->srqtable_lock, flags); + if (!refcount_dec_and_test(&iwsrq->refcnt)) { + spin_unlock_irqrestore(&iwdev->rf->srqtable_lock, flags); + return; + } + + iwdev->rf->srq_table[iwsrq->ibsrq.ext.xrc.srq_num - + iwdev->rf->sc_dev.base_srqn] = NULL; + spin_unlock_irqrestore(&iwdev->rf->srqtable_lock, flags); + complete(&iwsrq->free_srq); +} + +/** + * zxdh_srq_wq_destroy - send srq destroy cqp + * @rf: RDMA PCI function + * @srq: hardware control srq + */ +static void zxdh_srq_wq_destroy(struct zxdh_pci_f *rf, struct zxdh_sc_srq *srq) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_destroy_srq_info *srq_info; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return; + + cqp_info = &cqp_request->info; + srq_info = &cqp_request->info.in.u.srq_destroy.info; + cqp_info->cqp_cmd = ZXDH_OP_SRQ_DESTROY; + srq_info->state = ZXDH_SRQ_STATE_ERROR; + cqp_info->post_sq = 1; + cqp_info->in.u.srq_destroy.srq = srq; + cqp_info->in.u.srq_destroy.scratch = (uintptr_t)cqp_request; + + zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); +} + +#ifdef ZRDMA_DESTROY_SRQ_VER_3 +/** + * zxdh_destroy_srq - destroy + * @ibsrq: ptr of srq + * @udata: user data for destroy srq + */ +int zxdh_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) +{ + struct zxdh_srq *iwsrq = to_iwsrq(ibsrq); + struct zxdh_device *iwdev = iwsrq->iwdev; + + if (iwsrq->sc_srq.srq_uk.destroy_pending) + goto free_rsrc; + iwsrq->sc_srq.srq_uk.destroy_pending = true; + + zxdh_srq_rem_ref(&iwsrq->ibsrq); + wait_for_completion(&iwsrq->free_srq); + zxdh_srq_wq_destroy(iwdev->rf, &iwsrq->sc_srq); + +free_rsrc: + zxdh_free_srq_rsrc(iwsrq); + + return 0; +} +#endif + +/** + * zxdh_modify_srq - modify srq + * @ibsrq: ptr of srq + * @attr: access attributes + * @attr_mask: state mask + * @udata: user data + */ +int zxdh_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, + enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) +{ + struct zxdh_srq *iwsrq = to_iwsrq(ibsrq); + struct zxdh_device *iwdev = iwsrq->iwdev; + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = iwdev->rf; + /* We don't support resizing SRQs yet */ + if (attr_mask & IB_SRQ_MAX_WR) + return -EINVAL; + + if (attr_mask & IB_SRQ_LIMIT) { + if (attr->srq_limit >= iwsrq->max_wr) + return -EINVAL; + } + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + iwsrq->srq_limit = attr->srq_limit; + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = ZXDH_OP_SRQ_MODIFY; + cqp_info->post_sq = 1; + cqp_info->in.u.srq_modify.srq = &iwsrq->sc_srq; + cqp_info->in.u.srq_modify.info.limit = attr->srq_limit; + cqp_info->in.u.srq_modify.scratch = (uintptr_t)cqp_request; + + zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return 0; +} + +/** + * zxdh_query_srq - query srq + * @ibsrq: ptr of srq + * @srq_attr: srq attributes to return + */ +int zxdh_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) +{ + struct zxdh_srq *iwsrq = to_iwsrq(ibsrq); + u32 limit; + + zxdh_query_srqc(&iwsrq->sc_srq, &limit); + + srq_attr->max_wr = (iwsrq->max_wr - 1); + srq_attr->max_sge = iwsrq->max_sge; + srq_attr->srq_limit = limit; + + return 0; +} + +/** + * zxdh_post_srq_recv - post srq recv + * @ibsrq: ptr of srq + * @ib_wr: work request for receive + * @bad_wr: bad wr caused an error + */ +int zxdh_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *ib_wr, + const struct ib_recv_wr **bad_wr) +{ + struct zxdh_srq *iwsrq = to_iwsrq(ibsrq); + struct zxdh_srq_uk *srq_uk; + __le16 *wqe_16; + __le64 *wqe_64; + u64 temp_val; + unsigned long flags; + int err = 0; + int nreq; + int i; + u16 *buf; + u32 buf_size; + u16 idx = 0; + u64 hdr; + __u32 byte_off; + + srq_uk = &iwsrq->sc_srq.srq_uk; + spin_lock_irqsave(&iwsrq->lock, flags); + buf_size = (iwsrq->max_wr * sizeof(u16)); + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + *bad_wr = ib_wr; + goto out; + } + + if (iwsrq->sc_srq.dev->hw_attrs.self_health == true) { + err = -EINVAL; + *bad_wr = ib_wr; + goto out; + } + + if (iwsrq->state == ZXDH_SRQ_STATE_ERROR) { + err = -EIO; + *bad_wr = ib_wr; + goto out; + } + + for (nreq = 0; ib_wr; nreq++, ib_wr = ib_wr->next) { + if (unlikely(ib_wr->num_sge > iwsrq->max_sge)) { + err = -EINVAL; + *bad_wr = ib_wr; + break; + } + + if (unlikely(srq_uk->srq_ring.head == srq_uk->srq_ring.tail)) { + err = -ENOMEM; + *bad_wr = ib_wr; + break; + } + + srq_uk->srq_wrid_array[srq_uk->srq_ring.head] = ib_wr->wr_id; + buf[nreq] = srq_uk->srq_ring.head; + + wqe_64 = zxdh_get_srq_wqe(iwsrq, srq_uk->srq_ring.head); + get_64bit_val(wqe_64, 0, &temp_val); + srq_uk->srq_ring.head = + (__u16)FIELD_GET(IRDMAQPSRQ_NEXT_WQE_INDEX, temp_val); + + for (i = 0, byte_off = ZXDH_SRQ_FRAG_BYTESIZE; + i < ib_wr->num_sge; i++) { + set_64bit_val(wqe_64, byte_off, ib_wr->sg_list[i].addr); + set_64bit_val( + wqe_64, byte_off + 8, + FIELD_PREP(IRDMAQPSRQ_FRAG_LEN, + ib_wr->sg_list[i].length) | + FIELD_PREP(IRDMAQPSRQ_FRAG_STAG, + ib_wr->sg_list[i].lkey)); + byte_off += ZXDH_SRQ_FRAG_BYTESIZE; + } + + if ((ib_wr->num_sge < iwsrq->max_sge) || + (ib_wr->num_sge == 0)) { + set_64bit_val(wqe_64, byte_off, 0); + set_64bit_val( + wqe_64, byte_off + 8, + FIELD_PREP(IRDMAQPSRQ_FRAG_LEN, 0) | + FIELD_PREP(IRDMAQPSRQ_FRAG_STAG, + ZXDH_SRQ_INVALID_LKEY)); + } + set_64bit_val(wqe_64, 8, ((u64)srq_uk->srq_id) << 32); + + hdr = FIELD_PREP(IRDMAQPSRQ_RSV, 0) | + FIELD_PREP(IRDMAQPSRQ_VALID_SGE_NUM, ib_wr->num_sge) | + FIELD_PREP(IRDMAQPSRQ_SIGNATURE, 0) | + FIELD_PREP(IRDMAQPSRQ_NEXT_WQE_INDEX, + srq_uk->srq_ring.head); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe_64, 0, hdr); + } + + if (err == 0) { + for (i = 0; i < nreq; i++) { + wqe_16 = zxdh_get_srq_list_wqe(iwsrq, &idx); + set_16bit_val(wqe_16, 0, buf[i]); + } + + hdr = FIELD_PREP(ZXDH_SRQ_PARITY_SIGN, + iwsrq->sc_srq.srq_uk.srq_list_polarity) | + FIELD_PREP(ZXDH_SRQ_SW_SRQ_HEAD, idx); + dma_wmb(); + set_64bit_val(iwsrq->sc_srq.srq_uk.srq_db_base, 0, hdr); + } +out: + spin_unlock_irqrestore(&iwsrq->lock, flags); + if (err) + *bad_wr = ib_wr; + if (buf) + kfree(buf); + return err; +} + +/** + * zxdh_sc_srq_create - create srq + * @srq: sc srq + * @info: srq create info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +int zxdh_sc_srq_create(struct zxdh_sc_srq *srq, + struct zxdh_create_srq_info *info, u64 scratch, + bool post_sq) +{ + struct zxdh_sc_cqp *cqp; + __le64 *wqe; + u64 hdr; + struct zxdh_sc_dev *dev; + + dev = srq->dev; + cqp = srq->dev->cqp; + if ((srq->srq_uk.srq_id < dev->base_srqn) || + (srq->srq_uk.srq_id - dev->base_srqn) > (cqp->dev->hmc_info->hmc_obj[ZXDH_HMC_IW_SRQ].max_cnt - 1)) { + dev_err(idev_to_dev(dev), "srq_id=%d base_srqn=%d max_cnt=%d\n", + srq->srq_uk.srq_id, dev->base_srqn, cqp->dev->hmc_info->hmc_obj[ZXDH_HMC_IW_SRQ].max_cnt); + return -EINVAL; + } + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_CQPSQ_SRQ_SWWQECNT, 0) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_LISTVIRTMAP, srq->list_virtual_map) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_LIST_LEAFPBLSIZE, + srq->list_pbl_chunk_size) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_LOGSRQSTRIDE, + srq->srq_uk.srq_wqe_size) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_SRQAXIERRSIG, 0) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_REVERSEDLKEY, 0) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_SRQVIRTMAP, srq->virtual_map) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_CONTSRQ, ZXDH_SRQ_WQE_NOT_CONT) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_WQSIG, 0) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_PD_INDEX, srq->pd->pd_id) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_LOGSRQSIZE, srq->srq_uk.log2_srq_size) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_LEAFPBLSIZE, srq->pbl_chunk_size) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_STATE, ZXDH_SRQ_STATE_GOOD); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + set_64bit_val(wqe, 16, hdr); + + set_64bit_val(wqe, 24, + srq->virtual_map ? srq->first_pm_pbl_idx : srq->srq_pa); + set_64bit_val(wqe, 32, + srq->list_virtual_map ? srq->list_first_pm_pbl_idx : + srq->srq_list_pa); + + set_64bit_val(wqe, 40, srq->srq_db_pa); + hdr = FIELD_PREP(ZXDH_CQPSQ_SRQ_LIMITWATERMARK, srq->srq_limit); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + set_64bit_val(wqe, 48, hdr); + + set_64bit_val(wqe, 56, srq->srq_compl_ctx); + + //bit0 bit1 bit2 set to 1 + set_64bit_val(wqe, 8, + (RDMA_SRQC_MASK_GENERAL_CFG | + RDMA_SRQC_MASK_LIMIT_WATER_CFG | + RDMA_SRQC_MASK_DEBUG_SET_CFG)); + + hdr = FIELD_PREP(ZXDH_CQPSQ_SRQ_ID, srq->srq_uk.srq_id) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_CREATE_SRQ); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: SRQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + // print_hex_dump(KERN_DEBUG, "srq create ", DUMP_PREFIX_OFFSET, 16, 8, wqe, 9*8, false); + return 0; +} + +/** + * zxdh_sc_srq_modify - modify srq cqp wqe + * @srq: sc srq + * @info: modify srq info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +int zxdh_sc_srq_modify(struct zxdh_sc_srq *srq, + struct zxdh_modify_srq_info *info, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp; + u64 hdr; + + cqp = srq->dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + set_64bit_val(wqe, 8, RDMA_SRQC_MASK_LIMIT_WATER_CFG); + hdr = FIELD_PREP(ZXDH_CQPSQ_SRQ_LIMITWATERMARK, info->limit); + dma_wmb(); /* make sure WQE is written before valid bit is set */ + set_64bit_val(wqe, 48, hdr); + hdr = FIELD_PREP(ZXDH_CQPSQ_SRQ_ID, srq->srq_uk.srq_id) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_MODIFY_SRQ); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: SRQ_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + // print_hex_dump(KERN_DEBUG, "srq modify ", DUMP_PREFIX_OFFSET, 16, 8, wqe, 9*8, false); + + return 0; +} + +/** + * zxdh_sc_srq_destroy - cqp destroy srq + * @srq: sc srq + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +int zxdh_sc_srq_destroy(struct zxdh_sc_srq *srq, u64 scratch, bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp; + u64 hdr; + + cqp = srq->dev->cqp; + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_CQPSQ_SRQ_SWWQECNT, 0) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_LISTVIRTMAP, srq->list_virtual_map) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_LIST_LEAFPBLSIZE, + srq->list_pbl_chunk_size) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_LOGSRQSTRIDE, + ZXDH_SRQ_WQE_MIN_LEN_32_BYTE) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_SRQAXIERRSIG, 0) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_REVERSEDLKEY, 0) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_SRQVIRTMAP, srq->virtual_map) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_CONTSRQ, ZXDH_SRQ_WQE_NOT_CONT) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_WQSIG, 0) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_PD_INDEX, srq->pd->pd_id) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_LOGSRQSIZE, srq->srq_uk.log2_srq_size) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_LEAFPBLSIZE, srq->pbl_chunk_size) | + FIELD_PREP(ZXDH_CQPSQ_SRQ_STATE, ZXDH_SRQ_STATE_ERROR); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + set_64bit_val(wqe, 16, hdr); + + //bit0 set to 1 + set_64bit_val(wqe, 8, RDMA_SRQC_MASK_GENERAL_CFG); + + hdr = FIELD_PREP(ZXDH_CQPSQ_SRQ_ID, srq->srq_uk.srq_id) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_DESTROY_SRQ); + + dma_wmb(); /* make sure WQE is written before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + print_hex_dump_debug("WQE: SRQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +int zxdh_query_srqc(struct zxdh_sc_srq *srq, u32 *limit) +{ + struct zxdh_sc_dev *dev = srq->dev; + struct zxdh_pci_f *rf = container_of(dev, struct zxdh_pci_f, sc_dev); + struct zxdh_dma_mem srqc_buf; + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + int err_code = 0; + int status; + u64 temp; + + srqc_buf.va = NULL; + srqc_buf.size = ALIGN(ZXDH_SRQ_CTX_SIZE, ZXDH_SRQC_ALIGNMENT); + srqc_buf.va = dma_alloc_coherent(dev->hw->device, srqc_buf.size, + &srqc_buf.pa, GFP_KERNEL); + if (!srqc_buf.va) + return -ENOMEM; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) { + err_code = -ENOMEM; + goto free_rsrc; + } + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = ZXDH_OP_QUERY_SRQC; + cqp_info->post_sq = 1; + cqp_info->in.u.query_srqc.dev = dev; + cqp_info->in.u.query_srqc.srqn = srq->srq_uk.srq_id; + cqp_info->in.u.query_srqc.srqc_buf_pa = srqc_buf.pa; + cqp_info->in.u.query_srqc.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + if (status) { + err_code = -ENOMEM; + goto free_rsrc; + } + if (limit) { + get_64bit_val(srqc_buf.va, 32, &temp); + *limit = FIELD_GET(ZXDH_CQPSQ_SRQ_LIMITWATERMARK, temp); + } +free_rsrc: + dma_free_coherent(dev->hw->device, srqc_buf.size, srqc_buf.va, + srqc_buf.pa); + srqc_buf.va = NULL; + return err_code; +} + +int zxdh_sc_query_srqc(struct zxdh_sc_dev *dev, u32 srqn, u64 srqc_buf_pa, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + struct zxdh_sc_cqp *cqp = dev->cqp; + u64 hdr; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + hdr = FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_QUERY_SRQ) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_CQPSQ_QUERY_SRQC_ID, srqn); + set_64bit_val(wqe, 8, srqc_buf_pa); + + dma_wmb(); + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} diff --git a/drivers/infiniband/hw/zrdma/srq.h b/drivers/infiniband/hw/zrdma/srq.h new file mode 100644 index 000000000000..319b5854b20d --- /dev/null +++ b/drivers/infiniband/hw/zrdma/srq.h @@ -0,0 +1,304 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef RDMA_SRQ_H +#define RDMA_SRQ_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "main.h" +#include "verbs.h" + +//SRQC_FIELD_MASK +#define RDMA_SRQC_MASK_GENERAL_CFG (0x01UL << 0) +#define RDMA_SRQC_MASK_LIMIT_WATER_CFG (0x01UL << 1) +#define RDMA_SRQC_MASK_DEBUG_SET_CFG (0x01UL << 2) + +#define ZXDH_CQPSQ_SRQ_ID_S 0 +#define ZXDH_CQPSQ_SRQ_ID GENMASK_ULL(19, 0) +#define ZXDH_CQPSQ_SRQ_FILED_VALID_S 0 +#define ZXDH_CQPSQ_SRQ_FILEDVALID GENMASK_ULL(31, 0) + +#define ZXDH_CQPSQ_SRQ_STATE_S 62 +#define ZXDH_CQPSQ_SRQ_STATE GENMASK_ULL(63, 62) +#define ZXDH_CQPSQ_SRQ_LEAFPBLSIZE_S 62 +#define ZXDH_CQPSQ_SRQ_LEAFPBLSIZE GENMASK_ULL(61, 60) +#define ZXDH_CQPSQ_SRQ_LOGSRQSIZE_S 56 +#define ZXDH_CQPSQ_SRQ_LOGSRQSIZE GENMASK_ULL(59, 56) +#define ZXDH_CQPSQ_SRQ_PD_INDEX_S 32 +#define ZXDH_CQPSQ_SRQ_PD_INDEX GENMASK_ULL(51, 32) +#define ZXDH_CQPSQ_SRQ_WQSIG_S 31 +#define ZXDH_CQPSQ_SRQ_WQSIG BIT_ULL(31) +#define ZXDH_CQPSQ_SRQ_CONTSRQ_S 30 +#define ZXDH_CQPSQ_SRQ_CONTSRQ BIT_ULL(30) +#define ZXDH_CQPSQ_SRQ_SRQVIRTMAP_S 29 +#define ZXDH_CQPSQ_SRQ_SRQVIRTMAP BIT_ULL(29) +#define ZXDH_CQPSQ_SRQ_REVERSEDLKEY_S 28 +#define ZXDH_CQPSQ_SRQ_REVERSEDLKEY BIT_ULL(28) +#define ZXDH_CQPSQ_SRQ_SRQAXIERRSIG_S 27 +#define ZXDH_CQPSQ_SRQ_SRQAXIERRSIG BIT_ULL(27) +#define ZXDH_CQPSQ_SRQ_LOGSRQSTRIDE_S 24 +#define ZXDH_CQPSQ_SRQ_LOGSRQSTRIDE GENMASK_ULL(26, 24) +#define ZXDH_CQPSQ_SRQ_LIST_LEAFPBLSIZE_S 22 +#define ZXDH_CQPSQ_SRQ_LIST_LEAFPBLSIZE GENMASK_ULL(23, 22) +#define ZXDH_CQPSQ_SRQ_LISTVIRTMAP_S 21 +#define ZXDH_CQPSQ_SRQ_LISTVIRTMAP BIT_ULL(21) +#define ZXDH_CQPSQ_SRQ_SWWQECNT_S 0 +#define ZXDH_CQPSQ_SRQ_SWWQECNT GENMASK_ULL(15, 0) + +#define ZXDH_CQPSQ_SRQ_VALIDWQEINDEXPOINT_S 42 +#define ZXDH_CQPSQ_SRQ_VALIDWQEINDEXPOINT GENMASK_ULL(43, 42) +#define ZXDH_CQPSQ_SRQ_DEBUGSET_S 32 +#define ZXDH_CQPSQ_SRQ_DEBUGSET GENMASK_ULL(41, 32) +#define ZXDH_CQPSQ_SRQ_LIMITWATERMARK_S 16 +#define ZXDH_CQPSQ_SRQ_LIMITWATERMARK GENMASK_ULL(31, 16) +#define ZXDH_CQPSQ_SRQ_HWWQECNT_S 0 +#define ZXDH_CQPSQ_SRQ_HWWQECNT GENMASK_ULL(15, 0) + +#define ZXDH_SRQ_PARITY_SIGN_S 15 +#define ZXDH_SRQ_PARITY_SIGN BIT_ULL(15) +#define ZXDH_SRQ_SW_SRQ_HEAD_S 0 +#define ZXDH_SRQ_SW_SRQ_HEAD GENMASK_ULL(14, 0) + +#define ZXDH_SRQ_DB_CACHE_ID_S 0 +#define ZXDH_SRQ_DB_CACHE_ID GENMASK_ULL(1, 0) +#define ZXDH_SRQ_DB_INDICATE_ID_S 2 +#define ZXDH_SRQ_DB_INDICATE_ID GENMASK_ULL(3, 2) +#define ZXDH_SRQ_DB_AXI_ID_S 4 +#define ZXDH_SRQ_DB_AXI_ID GENMASK_ULL(6, 4) +#define ZXDH_SRQ_DB_WAY_PATION_S 7 +#define ZXDH_SRQ_DB_WAY_PATION GENMASK_ULL(9, 7) + +#define ZXDH_SRQ_SRQL_CACHE_ID_S 0 +#define ZXDH_SRQ_DSRQL_CACHE_ID GENMASK_ULL(1, 0) +#define ZXDH_SRQ_SRQL_INDICATE_ID_S 2 +#define ZXDH_SRQ_SRQL_INDICATE_ID GENMASK_ULL(3, 2) +#define ZXDH_SRQ_SRQL_AXI_ID_S 4 +#define ZXDH_SRQ_SRQL_AXI_ID GENMASK_ULL(6, 4) +#define ZXDH_SRQ_SRQL_WAY_PATION_S 7 +#define ZXDH_SRQ_SRQL_WAY_PATION GENMASK_ULL(9, 7) + +#define ZXDH_SRQ_SW_MIN_WQSIZE 32u /* in WRs*/ +#define ZXDH_SRQ_WQE_MIN_SIZE 16 +#define ZXDH_SRQ_WQE_MAX_SIZE 512 +#define ZXDH_SRQ_FRAG_BYTESIZE 16 +#define ZXDH_SRQ_WQE_BYTESIZE 32 + +#define ZXDH_SRQE_SIZE 2 + +#define ZXDH_SRQ_STATE_ERROR 0 +#define ZXDH_SRQ_STATE_GOOD 1 + +#define ZXDH_SRQ_WQE_NOT_CONT 0 +#define ZXDH_SRQ_WQE_CONT 1 + +#define ZXDH_SRQ_WQE_MIN_LEN_32_BYTE 1 +#define ZXDH_SRQ_WQE_MIN_LEN_64_BYTE 2 + +#define ZXDH_SRQ_INVALID_LKEY 0x100 +#define ZXDH_SRQ_DB_INIT_VALUE 0x8000 + +struct zxdh_wqe_srq_next_sge { + __le16 next_wqe_index; + __le16 signature; + u8 valid_sge_num; + u8 rsvd[11]; +}; + +struct zxdh_srq_sge { + __le64 addr; + __le32 length; + __le32 lkey; +}; + +struct zxdh_srq_wqe { + __le64 elem[ZXDH_SRQE_SIZE]; +}; + +struct zxdh_srq_uk { + struct zxdh_srq_wqe *srq_base; + struct zxdh_uk_attrs *uk_attrs; + __le16 *srq_list_base; + __le64 *srq_db_base; + u32 srq_id; + u32 srq_size; + u32 log2_srq_size; + u32 srq_list_size; + struct zxdh_ring srq_ring; + struct zxdh_ring srq_list_ring; + u8 srq_list_polarity; + u64 *srq_wrid_array; + u8 srq_wqe_size; + u8 srq_wqe_size_multiplier; + u32 srq_caps; + u32 max_srq_frag_cnt; + u32 srq_type; + spinlock_t *lock; + u8 srq_flush_complete : 1; /* Indicates flush was seen and SRQ was empty after the flush */ + u8 destroy_pending : 1; /* Indicates the SRQ is being destroyed */ + u8 srq_flush_seen; +}; + +struct zxdh_sc_srq { + struct zxdh_srq_uk srq_uk; + struct zxdh_sc_dev *dev; + struct zxdh_sc_pd *pd; + u64 srq_pa; + u64 srq_list_pa; + u64 srq_db_pa; + u32 srq_limit; + u64 srq_compl_ctx; + void *back_srq; + u8 srq_state; + u8 hw_srq_size; + u8 flush_srq; + u8 virtual_map : 1; + u8 list_virtual_map : 1; + u8 pbl_chunk_size; + u32 first_pm_pbl_idx; + u8 list_pbl_chunk_size; + u32 list_first_pm_pbl_idx; +}; + +struct zxdh_srq { + struct ib_srq ibsrq; + struct zxdh_sc_srq sc_srq; + struct zxdh_device *iwdev; + spinlock_t lock; /* serialize posting WRs to SQ/RQ */ + + u32 max_wr; + u32 max_sge; + u32 srq_limit; + refcount_t refcnt; + struct ib_umem *umem; + int wq_sig; + u8 state; + u8 user_mode; + enum ib_srq_type srq_type; + struct zxdh_dma_mem kmem; + struct zxdh_dma_mem kmem_list; + struct zxdh_dma_mem kmem_db; + struct zxdh_pbl *iwpbl; + struct completion free_srq; + int limit; + struct zxdh_srq_kmode ksrq; + + u64 srq_compl_ctx; +}; + +struct zxdh_srq_attr { + u32 type; + u32 flags; + u32 log_size; + u32 wqe_shift; + u32 log_page_size; + u32 wqe_cnt; + u32 srqn; + u32 page_offset; + u32 user_index; + struct ib_umem *umem; +}; + +struct zxdh_srq_uk_init_info { + struct zxdh_srq_wqe *srq_base; + struct zxdh_uk_attrs *uk_attrs; + __le16 *srq_list_base; + __le64 *srq_db_base; + u64 *srq_wrid_array; + u32 srq_id; + u32 srq_caps; + u32 srq_size; + u32 log2_srq_size; + u32 srq_list_size; + u32 max_srq_frag_cnt; + u32 srq_limit; +}; + +struct zxdh_srq_init_info { + struct zxdh_srq_uk_init_info srq_uk_init_info; + struct zxdh_sc_dev *dev; + struct zxdh_sc_pd *pd; + u8 virtual_map : 1; + u8 list_virtual_map : 1; + u8 db_virtual_map : 1; + u64 srq_pa; + u8 pbl_chunk_size; + u32 first_pm_pbl_idx; + u8 list_pbl_chunk_size; + u32 list_first_pm_pbl_idx; + u8 db_pbl_chunk_size; + u32 db_first_pm_pbl_idx; + u64 srq_list_pa; + u64 srq_db_pa; + u32 srq_limit; +}; + +struct zxdh_create_srq_req { + __aligned_u64 user_wqe_bufs; + __aligned_u64 user_compl_ctx; + __aligned_u64 user_wqe_list; + __aligned_u64 user_wqe_db; +}; + +struct zxdh_create_srq_resp { + __u32 srq_id; + __u32 srq_size; + __u32 srq_list_size; + __u32 base_srqn; +}; + +typedef enum { + USER_DDR_KERNEL_DDR, + USER_DDR_KERNEL_L2D, + USER_L2D_KERNEL_L2D, + USER_L2D_KERNEL_DDR +} zxdh_rdma_srq_mem_type; + +static inline struct zxdh_srq *to_iwsrq(struct ib_srq *ibsrq) +{ + return container_of(ibsrq, struct zxdh_srq, ibsrq); +} + +void zxdh_free_srq_wqe(struct zxdh_srq_uk *srq, int wqe_index); +#ifdef ZRDMA_CREATE_SRQ_VER_2 +int zxdh_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr, + struct ib_udata *udata); +#else +struct ib_srq *zxdh_create_srq(struct ib_pd *ibpd, + struct ib_srq_init_attr *init_attr, + struct ib_udata *udata); +#endif +void zxdh_srq_add_ref(struct ib_srq *ibsrq); +void zxdh_srq_rem_ref(struct ib_srq *ibsrq); +#ifdef ZRDMA_DESTROY_SRQ_VER_3 +int zxdh_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata); +#endif + +int zxdh_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, + enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); +int zxdh_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr); +int zxdh_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *ib_wr, + const struct ib_recv_wr **bad_wr); +int zxdh_sc_srq_create(struct zxdh_sc_srq *srq, + struct zxdh_create_srq_info *info, u64 scratch, + bool post_sq); +int zxdh_sc_srq_modify(struct zxdh_sc_srq *srq, + struct zxdh_modify_srq_info *info, u64 scratch, + bool post_sq); +int zxdh_sc_srq_destroy(struct zxdh_sc_srq *srq, u64 scratch, bool post_sq); + +int zxdh_query_srqc(struct zxdh_sc_srq *srq, u32 *limit); +int zxdh_sc_query_srqc(struct zxdh_sc_dev *dev, u32 srqn, u64 srqc_buf_pa, u64 scratch, + bool post_sq); + +#endif diff --git a/drivers/infiniband/hw/zrdma/status.h b/drivers/infiniband/hw/zrdma/status.h new file mode 100644 index 000000000000..c3e6ef199d62 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/status.h @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_STATUS_H +#define ZXDH_STATUS_H +#endif /* ZXDH_STATUS_H */ diff --git a/drivers/infiniband/hw/zrdma/tc_hmcdma.c b/drivers/infiniband/hw/zrdma/tc_hmcdma.c new file mode 100644 index 000000000000..bac72483e646 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/tc_hmcdma.c @@ -0,0 +1,413 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include "tc_hmcdma.h" +#include "icrdma_hw.h" +#include "type.h" +#include "protos.h" + +#define L2D_BASE_PA 0x6200900000 + +int host_test_dma_write32(struct zxdh_pci_f *rf) +{ + int i = 0, status = 0; + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_path_index dpath_index = {}; + struct zxdh_dma_mem ddrsrc = {}; + struct zxdh_dma_write32_date dma_data = {}; + struct zxdh_sc_cqp *cqp = rf->sc_dev.cqp; + u64 hmcreg = 0; + + if (rf->sc_dev.hmc_use_dpu_ddr == true) { + pr_info("This is use DPU DDR!!!\n"); + return -EPERM; + } + + ddrsrc.size = 100; + ddrsrc.va = dma_alloc_coherent(rf->hw.device, ddrsrc.size, &ddrsrc.pa, + GFP_KERNEL); + + if (!ddrsrc.va) { + status = -ENOMEM; + return status; + } + memset(ddrsrc.va, 0, ddrsrc.size); + + dpath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; + dpath_index.path_select = ZXDH_INDICATE_HOST_NOSMMU; + dpath_index.obj_id = ZXDH_DMA_OBJ_ID; + dpath_index.vhca_id = dev->vhca_id; + + dma_data.num = 4; + for (i = 0; i < dma_data.num; i++) { + dma_data.addrbuf[i] = ddrsrc.pa + 0x04 * i; + dma_data.databuf[i] = 0x55 + i; + } + + zxdh_sc_dma_write32(cqp, 0, &dpath_index, &dma_data, true); + + dpath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; + dpath_index.path_select = ZXDH_INDICATE_REGISTER; + dpath_index.obj_id = ZXDH_REG_OBJ_ID; + dpath_index.vhca_id = dev->vhca_id; + + dma_data.num = 4; + + hmcreg = 0x6204c00010; + + dma_data.addrbuf[0] = hmcreg; + dma_data.databuf[0] = 0x55; + + hmcreg = 0x6204c00010 + 4096 * 1; + dma_data.addrbuf[1] = hmcreg; + dma_data.databuf[1] = 0x56; + + hmcreg = 0x6204c00010 + 4096 * 2; + dma_data.addrbuf[2] = hmcreg; + dma_data.databuf[2] = 0x57; + + hmcreg = 0x6204c00010 + 4096 * 3; + dma_data.addrbuf[3] = hmcreg; + dma_data.databuf[3] = 0x58; + + zxdh_sc_dma_write32(cqp, 0, &dpath_index, &dma_data, true); + + return status; +} + +int host_test_dma_write64(struct zxdh_pci_f *rf) +{ + int i = 0, status = 0; + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_path_index dpath_index = {}; + struct zxdh_dma_mem ddrsrc = {}; // 外挂DDR + struct zxdh_dma_write64_date dma_data = {}; + struct zxdh_sc_cqp *cqp = rf->sc_dev.cqp; + u64 hmcreg = 0; + + if (rf->sc_dev.hmc_use_dpu_ddr == true) { + pr_info("This is use DPU DDR!!!\n"); + return -EPERM; + } + + ddrsrc.size = 100; + ddrsrc.va = dma_alloc_coherent(rf->hw.device, ddrsrc.size, &ddrsrc.pa, + GFP_KERNEL); + + if (!ddrsrc.va) { + status = -ENOMEM; + return status; + } + memset(ddrsrc.va, 0, ddrsrc.size); + + dpath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; // not pass cache + dpath_index.path_select = ZXDH_INDICATE_HOST_NOSMMU; + dpath_index.obj_id = ZXDH_DMA_OBJ_ID; + dpath_index.vhca_id = dev->vhca_id; + + dma_data.num = 3; + for (i = 0; i < dma_data.num; i++) { + dma_data.addrbuf[i] = ddrsrc.pa + 0x08 * i; + dma_data.databuf[i] = 0x66 + i; + } + + zxdh_sc_dma_write64(cqp, 0, &dpath_index, &dma_data, true); + + dpath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; // not pass cache + dpath_index.path_select = ZXDH_INDICATE_REGISTER; // L2D + dpath_index.obj_id = ZXDH_REG_OBJ_ID; // L2D + dpath_index.vhca_id = dev->vhca_id; + + dma_data.num = 3; + hmcreg = 0x6204c00008; + + dma_data.addrbuf[0] = hmcreg; + dma_data.databuf[0] = 0x155; + + hmcreg = 0x6204c00008 + 4096 * 1; + dma_data.addrbuf[1] = hmcreg; + dma_data.databuf[1] = 0x156; + + hmcreg = 0x6204c00008 + 4096 * 2; + dma_data.addrbuf[2] = hmcreg; + dma_data.databuf[2] = 0x157; + zxdh_sc_dma_write64(cqp, 0, &dpath_index, &dma_data, true); + return status; +} + +int host_test_dma_write(struct zxdh_pci_f *rf) +{ + int status = 0; + u16 i = 0; + u32 val = 0xff; + u8 *addr; + + struct zxdh_dma_mem ddrsrc = {}; + struct zxdh_dma_mem ddrdest = {}; + + struct zxdh_src_copy_dest src_dest = {}; + + struct zxdh_path_index spath_index = {}; + struct zxdh_path_index dpath_index = {}; + struct zxdh_sc_cqp *cqp = rf->sc_dev.cqp; + + if (rf->sc_dev.hmc_use_dpu_ddr == true) { + pr_info("This is use DPU DDR!!!\n"); + return -EPERM; + } + + ddrsrc.size = 1024; + ddrsrc.va = dma_alloc_coherent(rf->hw.device, ddrsrc.size, &ddrsrc.pa, + GFP_KERNEL); + + if (!ddrsrc.va) { + status = -ENOMEM; + return status; + } + + memset(ddrsrc.va, 0x00, ddrsrc.size); + + ddrdest.size = 1024; + ddrdest.va = dma_alloc_coherent(rf->hw.device, ddrdest.size, + &ddrdest.pa, GFP_KERNEL); + + if (!ddrdest.va) { + status = -ENOMEM; + return status; + } + memset(ddrdest.va, 0, ddrdest.size); + + addr = (u8 *)(uintptr_t)ddrsrc.va; + + for (i = 0; i < 200; i++) { + *addr = val + i; + addr = addr + sizeof(val); + } + + src_dest.src = ddrsrc.pa; + src_dest.dest = ddrdest.pa; + src_dest.len = 5 * 4; + + spath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; + spath_index.path_select = ZXDH_INDICATE_HOST_NOSMMU; + spath_index.obj_id = ZXDH_DMA_OBJ_ID; + spath_index.vhca_id = rf->sc_dev.vhca_id; + + if (rf->sc_dev.cache_id != 0) { + dpath_index.inter_select = ZXDH_INTERFACE_CACHE; + dpath_index.path_select = rf->sc_dev.cache_id; + dpath_index.obj_id = ZXDH_DMA_OBJ_ID; + dpath_index.vhca_id = rf->sc_dev.vhca_id; + } else { + dpath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; + dpath_index.path_select = ZXDH_INDICATE_HOST_NOSMMU; + dpath_index.obj_id = ZXDH_DMA_OBJ_ID; + dpath_index.vhca_id = rf->sc_dev.vhca_id; + } + + zxdh_sc_dma_write(cqp, 0, &src_dest, &spath_index, &dpath_index, true); + + src_dest.src = ddrsrc.pa; + src_dest.dest = L2D_BASE_PA; + src_dest.len = 5 * 4; + + spath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; + spath_index.path_select = ZXDH_INDICATE_HOST_NOSMMU; + spath_index.obj_id = ZXDH_DMA_OBJ_ID; + spath_index.vhca_id = rf->sc_dev.vhca_id; + + dpath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; + dpath_index.path_select = ZXDH_INDICATE_L2D; + dpath_index.obj_id = ZXDH_L2D_OBJ_ID; + dpath_index.vhca_id = rf->sc_dev.vhca_id; + zxdh_sc_dma_write(cqp, 0, &src_dest, &spath_index, &dpath_index, true); + + return status; +} + +int host_test_dma_write_bysmmu(struct zxdh_pci_f *rf) +{ + int status = 0; + u16 i = 0; + u32 val = 0xff; + u8 *addr; + + struct zxdh_dma_mem ddrsrc = {}; + struct zxdh_src_copy_dest src_dest = {}; + + struct zxdh_path_index spath_index = {}; + struct zxdh_path_index dpath_index = {}; + struct zxdh_sc_cqp *cqp = rf->sc_dev.cqp; + + if (rf->sc_dev.hmc_use_dpu_ddr == true) { + pr_info("This is use DPU DDR!!!\n"); + return -EPERM; + } + + ddrsrc.size = 1024; + ddrsrc.va = dma_alloc_coherent(rf->hw.device, ddrsrc.size, &ddrsrc.pa, + GFP_KERNEL); + + if (!ddrsrc.va) { + status = -ENOMEM; + return status; + } + memset(ddrsrc.va, 0x00, ddrsrc.size); + + addr = (u8 *)(uintptr_t)ddrsrc.va; + + for (i = 0; i < 200; i++) { + *addr = val + i; + addr = addr + sizeof(val); + } + + src_dest.src = ddrsrc.pa; + src_dest.dest = rf->sc_dev.hmc_info->hmc_obj[ZXDH_HMC_IW_QP].base; + src_dest.len = 512; + + spath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; + spath_index.path_select = ZXDH_INDICATE_HOST_NOSMMU; + spath_index.obj_id = ZXDH_DMA_OBJ_ID; + spath_index.vhca_id = rf->sc_dev.vhca_id; + + dpath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; + dpath_index.path_select = ZXDH_INDICATE_HOST_SMMU; + dpath_index.obj_id = ZXDH_QPC_OBJ_ID; + dpath_index.vhca_id = rf->sc_dev.vhca_id; + + status = zxdh_sc_dma_write(cqp, 0, &src_dest, &spath_index, + &dpath_index, true); + return status; +} + +int zxdh_sc_dma_wr32_auto(struct zxdh_pci_f *rf) +{ + int status = 0; + u16 i = 0, len = 0x20; + u32 val = 0xff; + u8 *addr; + + struct zxdh_dma_mem ddr1 = {}; + struct zxdh_dma_mem ddr2 = {}; + struct zxdh_dma_mem ddr3 = {}; + + struct zxdh_src_copy_dest src_dest = {}; + + if (rf->sc_dev.hmc_use_dpu_ddr == true) { + pr_info("This is use DPU DDR!!!\n"); + return -EPERM; + } + + ddr1.size = 1024; + ddr1.va = dma_alloc_coherent(rf->hw.device, ddr1.size, &ddr1.pa, + GFP_KERNEL); + + if (!ddr1.va) { + status = -ENOMEM; + return status; + } + + ddr2.size = 1024; + ddr2.va = dma_alloc_coherent(rf->hw.device, ddr2.size, &ddr2.pa, + GFP_KERNEL); + + if (!ddr2.va) { + status = -ENOMEM; + return status; + } + memset(ddr2.va, 0x00, ddr2.size); + + ddr3.size = 1024; + ddr3.va = dma_alloc_coherent(rf->hw.device, ddr3.size, &ddr3.pa, + GFP_KERNEL); + + if (!ddr3.va) { + status = -ENOMEM; + return status; + } + memset(ddr3.va, 0x00, ddr3.size); + + addr = (u8 *)(uintptr_t)ddr1.va; + + for (i = 0; i < 200; i++) { + *addr = val + i; + addr = addr + sizeof(val); + } + + src_dest.src = ddr1.pa; + src_dest.dest = ddr2.pa; + src_dest.len = len; + zxdh_cqp_rdma_write_cmd(&rf->sc_dev, &src_dest, + ZXDH_INDICATE_HOST_NOSMMU, + ZXDH_INDICATE_HOST_NOSMMU); + src_dest.src = ddr2.pa; + src_dest.dest = ddr3.pa; + src_dest.len = len; + zxdh_cqp_rdma_read_cmd(&rf->sc_dev, &src_dest, + ZXDH_INDICATE_HOST_NOSMMU, + ZXDH_INDICATE_HOST_NOSMMU); + + if (!memcmp(ddr1.va, ddr3.va, len)) { + status = 0; + pr_info("CQP Write Read is normal!!!\n"); + } + return status; +} + +int zxdh_sc_dma_w32r32_auto(struct zxdh_pci_f *rf) +{ + int status = 0; + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_path_index dpath_index = {}; + struct zxdh_dma_write32_date dma_data = {}; + u64 rarry[5], hmcreg = 0; + struct zxdh_dam_read_bycqe rdmadata = {}; + + if (rf->sc_dev.hmc_use_dpu_ddr == true) { + pr_info("This is use DPU DDR!!!\n"); + return -EPERM; + } + + rdmadata.num = 4; + rdmadata.bitwidth = 1; + rdmadata.valuetype = 1; + rdmadata.addrbuf[0] = 0x6204c00010; + rdmadata.addrbuf[1] = 0x6204c00010 + 4096 * 1; + rdmadata.addrbuf[2] = 0x6204c00010 + 4096 * 2; + rdmadata.addrbuf[3] = 0x6204c00010 + 4096 * 3; + + dma_data.num = 4; + + hmcreg = 0x6204c00010; + + dma_data.addrbuf[0] = hmcreg; + dma_data.databuf[0] = 0x55; + + hmcreg = 0x6204c00010 + 4096 * 1; + dma_data.addrbuf[1] = hmcreg; + dma_data.databuf[1] = 0x56; + + hmcreg = 0x6204c00010 + 4096 * 2; + dma_data.addrbuf[2] = hmcreg; + dma_data.databuf[2] = 0x57; + + hmcreg = 0x6204c00010 + 4096 * 3; + dma_data.addrbuf[3] = hmcreg; + dma_data.databuf[3] = 0x58; + + dpath_index.vhca_id = dev->vhca_id; + dpath_index.obj_id = ZXDH_REG_OBJ_ID; + dpath_index.path_select = ZXDH_INDICATE_REGISTER; + dpath_index.inter_select = ZXDH_INTERFACE_NOTCACHE; + zxdh_cqp_rdma_write32_cmd(dev, &dma_data); + + zxdh_cqp_damreadbycqe_cmd(dev, &rdmadata, &dpath_index, rarry); + + if (rarry[0] == 0x55 && rarry[1] == 0x56 && rarry[2] == 0x57 && + rarry[3] == 0x58) { + pr_info("CQP Write32 ReadbyCqe is normal!!!\n"); + status = 0; + } + + return status; +} diff --git a/drivers/infiniband/hw/zrdma/tc_hmcdma.h b/drivers/infiniband/hw/zrdma/tc_hmcdma.h new file mode 100644 index 000000000000..6056da47c048 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/tc_hmcdma.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_TCHMCDMA_H +#define ZXDH_TCHMCDMA_H + +#include "main.h" +#include "protos.h" + +int host_test_dma_write32(struct zxdh_pci_f *rf); +int host_test_dma_write64(struct zxdh_pci_f *rf); +int host_test_dma_write(struct zxdh_pci_f *rf); +int host_test_dma_write_bysmmu(struct zxdh_pci_f *rf); +int zxdh_sc_dma_wr32_auto(struct zxdh_pci_f *rf); +int zxdh_sc_dma_w32r32_auto(struct zxdh_pci_f *rf); + +#endif /* ZXDH_TCHMCDMA_H */ diff --git a/drivers/infiniband/hw/zrdma/trace.c b/drivers/infiniband/hw/zrdma/trace.c new file mode 100644 index 000000000000..1d78b3616ec6 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/trace.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ +// #define CREATE_TRACE_POINTS +#include "trace.h" + +const char *print_ip_addr(struct trace_seq *p, u32 *addr, u16 port, bool ipv4) +{ + const char *ret = trace_seq_buffer_ptr(p); + + if (ipv4) { + __be32 myaddr = htonl(*addr); + + trace_seq_printf(p, "%pI4:%d", &myaddr, htons(port)); + } else { + trace_seq_printf(p, "%pI6:%d", addr, htons(port)); + } + trace_seq_putc(p, 0); + + return ret; +} + +const char *parse_iw_event_type(enum iw_cm_event_type iw_type) +{ + switch (iw_type) { + case IW_CM_EVENT_CONNECT_REQUEST: + return "IwRequest"; + case IW_CM_EVENT_CONNECT_REPLY: + return "IwReply"; + case IW_CM_EVENT_ESTABLISHED: + return "IwEstablished"; + case IW_CM_EVENT_DISCONNECT: + return "IwDisconnect"; + case IW_CM_EVENT_CLOSE: + return "IwClose"; + } + + return "Unknown"; +} + +const char *parse_cm_event_type(enum zxdh_cm_event_type cm_type) +{ + switch (cm_type) { + case ZXDH_CM_EVENT_ESTABLISHED: + return "CmEstablished"; + case ZXDH_CM_EVENT_MPA_REQ: + return "CmMPA_REQ"; + case ZXDH_CM_EVENT_MPA_CONNECT: + return "CmMPA_CONNECT"; + case ZXDH_CM_EVENT_MPA_ACCEPT: + return "CmMPA_ACCEPT"; + case ZXDH_CM_EVENT_MPA_REJECT: + return "CmMPA_REJECT"; + case ZXDH_CM_EVENT_MPA_ESTABLISHED: + return "CmMPA_ESTABLISHED"; + case ZXDH_CM_EVENT_CONNECTED: + return "CmConnected"; + case ZXDH_CM_EVENT_RESET: + return "CmReset"; + case ZXDH_CM_EVENT_ABORTED: + return "CmAborted"; + case ZXDH_CM_EVENT_UNKNOWN: + return "none"; + } + return "Unknown"; +} + +const char *parse_cm_state(enum zxdh_cm_node_state state) +{ + switch (state) { + case ZXDH_CM_STATE_UNKNOWN: + return "UNKNOWN"; + case ZXDH_CM_STATE_INITED: + return "INITED"; + case ZXDH_CM_STATE_LISTENING: + return "LISTENING"; + case ZXDH_CM_STATE_SYN_RCVD: + return "SYN_RCVD"; + case ZXDH_CM_STATE_SYN_SENT: + return "SYN_SENT"; + case ZXDH_CM_STATE_ONE_SIDE_ESTABLISHED: + return "ONE_SIDE_ESTABLISHED"; + case ZXDH_CM_STATE_ESTABLISHED: + return "ESTABLISHED"; + case ZXDH_CM_STATE_ACCEPTING: + return "ACCEPTING"; + case ZXDH_CM_STATE_MPAREQ_SENT: + return "MPAREQ_SENT"; + case ZXDH_CM_STATE_MPAREQ_RCVD: + return "MPAREQ_RCVD"; + case ZXDH_CM_STATE_MPAREJ_RCVD: + return "MPAREJ_RECVD"; + case ZXDH_CM_STATE_OFFLOADED: + return "OFFLOADED"; + case ZXDH_CM_STATE_FIN_WAIT1: + return "FIN_WAIT1"; + case ZXDH_CM_STATE_FIN_WAIT2: + return "FIN_WAIT2"; + case ZXDH_CM_STATE_CLOSE_WAIT: + return "CLOSE_WAIT"; + case ZXDH_CM_STATE_TIME_WAIT: + return "TIME_WAIT"; + case ZXDH_CM_STATE_LAST_ACK: + return "LAST_ACK"; + case ZXDH_CM_STATE_CLOSING: + return "CLOSING"; + case ZXDH_CM_STATE_LISTENER_DESTROYED: + return "LISTENER_DESTROYED"; + case ZXDH_CM_STATE_CLOSED: + return "CLOSED"; + } + return ("Bad state"); +} diff --git a/drivers/infiniband/hw/zrdma/trace.h b/drivers/infiniband/hw/zrdma/trace.h new file mode 100644 index 000000000000..32015f71ab40 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/trace.h @@ -0,0 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include "trace_cm.h" diff --git a/drivers/infiniband/hw/zrdma/trace_cm.h b/drivers/infiniband/hw/zrdma/trace_cm.h new file mode 100644 index 000000000000..006321385078 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/trace_cm.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#if !defined(__TRACE_CM_H) || defined(TRACE_HEADER_MULTI_READ) +#define __TRACE_CM_H + +#include +#include + +#include "main.h" + +const char *print_ip_addr(struct trace_seq *p, u32 *addr, u16 port, bool ivp4); +const char *parse_iw_event_type(enum iw_cm_event_type iw_type); +const char *parse_cm_event_type(enum zxdh_cm_event_type cm_type); +const char *parse_cm_state(enum zxdh_cm_node_state); +#define __print_ip_addr(addr, port, ipv4) print_ip_addr(p, addr, port, ipv4) + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM zxdh_cm + +DECLARE_EVENT_CLASS( + cm_node_ah_template, TP_PROTO(struct zxdh_cm_node *cm_node), + TP_ARGS(cm_node), + TP_STRUCT__entry( + __field(struct zxdh_device *, + iwdev) __field(struct zxdh_cm_node *, + cm_node) __field(struct zxdh_sc_ah *, ah) + __field(u32, refcount) __field(u16, lport) __field( + u16, rport) __field(enum zxdh_cm_node_state, + state) __field(bool, ipv4) + __field(u16, vlan_id) __field(int, accel) + __dynamic_array(u32, laddr, 4) + __dynamic_array(u32, raddr, 4)), + TP_fast_assign(__entry->iwdev = cm_node->iwdev; + __entry->cm_node = cm_node; __entry->ah = cm_node->ah; + __entry->refcount = refcount_read(&cm_node->refcnt); + __entry->lport = cm_node->loc_port; + __entry->rport = cm_node->rem_port; + __entry->state = cm_node->state; + __entry->ipv4 = cm_node->ipv4; + __entry->vlan_id = cm_node->vlan_id; + __entry->accel = cm_node->accelerated; + memcpy(__get_dynamic_array(laddr), cm_node->loc_addr, 4); + memcpy(__get_dynamic_array(raddr), cm_node->rem_addr, + 4);), + TP_printk( + "iwdev=%p node=%p ah=%p refcnt=%d vlan_id=%d accel=%d state=%s loc: %s rem: %s", + __entry->iwdev, __entry->cm_node, __entry->ah, + __entry->refcount, __entry->vlan_id, __entry->accel, + parse_cm_state(__entry->state), + __print_ip_addr(__get_dynamic_array(laddr), __entry->lport, + __entry->ipv4), + __print_ip_addr(__get_dynamic_array(raddr), __entry->rport, + __entry->ipv4))); + +DEFINE_EVENT(cm_node_ah_template, zxdh_create_ah, + TP_PROTO(struct zxdh_cm_node *cm_node), TP_ARGS(cm_node)); + +#endif /* __TRACE_CM_H */ + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE trace_cm +#include diff --git a/drivers/infiniband/hw/zrdma/type.h b/drivers/infiniband/hw/zrdma/type.h new file mode 100644 index 000000000000..a26a0cb12787 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/type.h @@ -0,0 +1,1816 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_TYPE_H +#define ZXDH_TYPE_H +#include "status.h" +#include "osdep.h" +#include "zrdma.h" +#include "user.h" +#include "hmc.h" +#include "uda.h" +#include "vf.h" +#include "ws.h" +#include "virtchnl.h" +#include "private_verbs_cmd.h" + +enum zxdh_page_size { + ZXDH_PAGE_SIZE_4K = 0, + ZXDH_PAGE_SIZE_2M = 9, + ZXDH_PAGE_SIZE_1G = 18, +}; + +enum zxdh_hdrct_flags { + DDP_LEN_FLAG = 0x80, + DDP_HDR_FLAG = 0x40, + RDMA_HDR_FLAG = 0x20, +}; + +enum zxdh_term_layers { + LAYER_RDMA = 0, + LAYER_DDP = 1, + LAYER_MPA = 2, +}; + +enum zxdh_pble_type { + PBLE_QUEUE = 0, + PBLE_MR = 1, +}; + +enum zxdh_term_error_types { + RDMAP_REMOTE_PROT = 1, + RDMAP_REMOTE_OP = 2, + DDP_CATASTROPHIC = 0, + DDP_TAGGED_BUF = 1, + DDP_UNTAGGED_BUF = 2, + DDP_LLP = 3, +}; + +enum zxdh_term_rdma_errors { + RDMAP_INV_STAG = 0x00, + RDMAP_INV_BOUNDS = 0x01, + RDMAP_ACCESS = 0x02, + RDMAP_UNASSOC_STAG = 0x03, + RDMAP_TO_WRAP = 0x04, + RDMAP_INV_RDMAP_VER = 0x05, + RDMAP_UNEXPECTED_OP = 0x06, + RDMAP_CATASTROPHIC_LOCAL = 0x07, + RDMAP_CATASTROPHIC_GLOBAL = 0x08, + RDMAP_CANT_INV_STAG = 0x09, + RDMAP_UNSPECIFIED = 0xff, +}; + +enum zxdh_term_ddp_errors { + DDP_CATASTROPHIC_LOCAL = 0x00, + DDP_TAGGED_INV_STAG = 0x00, + DDP_TAGGED_BOUNDS = 0x01, + DDP_TAGGED_UNASSOC_STAG = 0x02, + DDP_TAGGED_TO_WRAP = 0x03, + DDP_TAGGED_INV_DDP_VER = 0x04, + DDP_UNTAGGED_INV_QN = 0x01, + DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02, + DDP_UNTAGGED_INV_MSN_RANGE = 0x03, + DDP_UNTAGGED_INV_MO = 0x04, + DDP_UNTAGGED_INV_TOO_LONG = 0x05, + DDP_UNTAGGED_INV_DDP_VER = 0x06, +}; + +enum zxdh_term_mpa_errors { + MPA_CLOSED = 0x01, + MPA_CRC = 0x02, + MPA_MARKER = 0x03, + MPA_REQ_RSP = 0x04, +}; + +enum zxdh_qp_event_type { + ZXDH_QP_EVENT_CATASTROPHIC, + ZXDH_QP_EVENT_ACCESS_ERR, + ZXDH_QP_EVENT_REQ_ERR, +}; + +enum zxdh_hw_stats_index { + /* 32-bit */ + HW_STAT_DUPLICATE_REQUEST = 0, + HW_STAT_NP_CNP_SENT, + HW_STAT_NP_ECN_MARKED_ROCE_PACKETS, + HW_STAT_OUT_OF_SEQUENCE, + HW_STAT_PACKET_SEQ_ERR, + HW_STAT_REQ_CQE_ERROR, + HW_STAT_REQ_REMOTE_ACCESS_ERRORS, + HW_STAT_REQ_REMOTE_INVALID_REQUEST, + HW_STAT_REQ_REMOTE_OPERATION_ERRORS, + HW_STAT_REQ_LOCAL_LENGTH_ERROR, + HW_STAT_RESP_CQE_ERROR, + HW_STAT_RESP_REMOTE_ACCESS_ERRORS, + HW_STAT_RESP_REMOTE_INVALID_REQUEST, + HW_STAT_RESP_REMOTE_OPERATION_ERRORS, + HW_STAT_RESP_RNR_NAK, + HW_STAT_RNR_NAK_RETRY_ERR, + HW_STAT_RP_CNP_HANDLED, + HW_STAT_RX_READ_REQUESTS, + HW_STAT_RX_WRITE_REQUESTS, + HW_STAT_RX_ICRC_ENCAPSULATED, + HW_STAT_ROCE_SLOW_RESTART_CNPS, + HW_STAT_RDMA_TX_PKTS, + HW_STAT_RDMA_TX_BYTES, + HW_STAT_RDMA_RX_PKTS, + HW_STAT_RDMA_RX_BYTES, + ZXDH_HW_STAT_INDEX_MAX, +}; + +enum zxdh_ib_hw_stats_index { + IB_STAT_SYMBOL_ERROR = 0, + IB_STAT_LINK_ERROR_RECOVERY, + IB_STAT_LINK_DOWNED, + IB_STAT_PORT_RCV_ERRORS, + IB_STAT_PORT_RCV_REMPHYS_ERRORS, + IB_STAT_PORT_RCV_SWITCH_RELAY_ERRORS, + IB_STAT_PORT_XMIT_DISCARDS, + IB_STAT_PORT_XMIT_CONTRAINT_ERRORS, + IB_STAT_PORT_XMIT_WAIT, + IB_STAT_PORT_RCV_CONSTRAINT_ERRORS, + IB_STAT_LINK_OVERRUN_ERRORS, + IB_STAT_VL15_DROPPED, + IB_STAT_PORT_XMIT_DATA, + IB_STAT_PORT_RCV_DATA, + IB_STAT_PORT_XMIT_PACKETS, + IB_STAT_PORT_RCV_PACKETS, + IB_STAT_PORT_UNICAST_XMIT_PACKETS, + IB_STAT_PORT_UNICAST_RCV_PACKETS, + IB_STAT_PORT_MULTICAST_XMIT_PACKETS, + IB_STAT_PORT_MULTICAST_RCV_PACKETS, + IB_STAT_LOCAL_LINK_INTEGRITY_ERRORS, + IB_STAT_INDEX_MAX, +}; + +enum zxdh_module_type { + ZXDH_IB_STAT = 0, + ZXDH_RDMA_STAT, +}; + +#define ZXDH_MIN_FEATURES 2 + +enum zxdh_feature_type { + ZXDH_FEATURE_FW_INFO = 0, + ZXDH_HW_VERSION_INFO = 1, + ZXDH_QSETS_MAX = 26, + ZXDH_MAX_FEATURES, /* Must be last entry */ +}; + +enum zxdh_sched_prio_type { + ZXDH_PRIO_WEIGHTED_RR = 1, + ZXDH_PRIO_STRICT = 2, + ZXDH_PRIO_WEIGHTED_STRICT = 3, +}; + +enum zxdh_vm_vf_type { + ZXDH_VF_TYPE = 0, + ZXDH_VM_TYPE, + ZXDH_PF_TYPE, +}; + +enum zxdh_cqp_hmc_profile { + ZXDH_HMC_PROFILE_DEFAULT = 1, + ZXDH_HMC_PROFILE_FAVOR_VF = 2, + ZXDH_HMC_PROFILE_EQUAL = 3, +}; + +enum zxdh_quad_entry_type { + ZXDH_QHASH_TYPE_TCP_ESTABLISHED = 1, + ZXDH_QHASH_TYPE_TCP_SYN, + ZXDH_QHASH_TYPE_UDP_UNICAST, + ZXDH_QHASH_TYPE_UDP_MCAST, + ZXDH_QHASH_TYPE_ROCE_MCAST, + ZXDH_QHASH_TYPE_ROCEV2_HW, +}; + +enum zxdh_quad_hash_manage_type { + ZXDH_QHASH_MANAGE_TYPE_DELETE = 0, + ZXDH_QHASH_MANAGE_TYPE_ADD, + ZXDH_QHASH_MANAGE_TYPE_MODIFY, +}; + +enum zxdh_syn_rst_handling { + ZXDH_SYN_RST_HANDLING_HW_TCP_SECURE = 0, + ZXDH_SYN_RST_HANDLING_HW_TCP, + ZXDH_SYN_RST_HANDLING_FW_TCP_SECURE, + ZXDH_SYN_RST_HANDLING_FW_TCP, +}; + +enum zxdh_queue_type { + ZXDH_QUEUE_TYPE_SQ_RQ = 0, + ZXDH_QUEUE_TYPE_CQP, +}; + +enum zxdh_cqe_source_type { + ZXDH_CQE_SOURCE_OTHERQP = 0, + ZXDH_CQE_SOURCE_CQP, +}; + +struct zxdh_sc_dev; +struct zxdh_vsi_pestat; +struct zxdh_src_copy_dest; + +struct zxdh_dcqcn_cc_params { + u8 cc_cfg_valid; + u8 min_dec_factor; + u8 min_rate; + u8 dcqcn_f; + u16 rai_factor; + u16 hai_factor; + u16 dcqcn_t; + u32 dcqcn_b; + u32 rreduce_mperiod; +}; + +struct zxdh_cqp_init_info { + u64 cqp_compl_ctx; + u64 host_ctx_pa; + u64 sq_pa; + struct zxdh_sc_dev *dev; + struct zxdh_cqp_quanta *sq; + struct zxdh_dcqcn_cc_params dcqcn_params; + __le64 *host_ctx; + u64 *scratch_array; + u32 sq_size; + u16 hw_maj_ver; + u16 hw_min_ver; + u8 struct_ver; + u8 hmc_profile; + u8 ena_vf_count; + u8 ceqs_per_vf; + u8 en_datacenter_tcp : 1; + u8 disable_packed : 1; + u8 rocev2_rto_policy : 1; + u8 en_rem_endpoint_trk : 1; + enum zxdh_protocol_used protocol_used; +}; + +struct zxdh_terminate_hdr { + u8 layer_etype; + u8 error_code; + u8 hdrct; + u8 rsvd; +}; + +struct zxdh_cqp_sq_wqe { + __le64 buf[ZXDH_CQP_WQE_SIZE]; +}; + +struct zxdh_sc_aeqe { + __le64 buf[ZXDH_AEQE_SIZE]; +}; + +struct zxdh_ceqe { + __le64 buf[ZXDH_CEQE_SIZE]; +}; + +struct zxdh_cqp_ctx { + __le64 buf[ZXDH_CQP_CTX_SIZE]; +}; + +struct zxdh_cq_shadow_area { + __le64 buf[ZXDH_SHADOW_AREA_SIZE]; +}; + +struct zxdh_dev_hw_stats_offsets { + u32 stats_offset[ZXDH_HW_STAT_INDEX_MAX]; +}; + +struct zxdh_dev_hw_stats { + u64 stats_val[ZXDH_GATHER_STATS_BUF_SIZE / sizeof(u64)]; +}; + +struct zxdh_gather_stats { + u64 val[ZXDH_GATHER_STATS_BUF_SIZE / sizeof(u64)]; +}; + +struct zxdh_hw_stat_map { + u16 byteoff; + u8 bitoff; + u64 bitmask; +}; + +struct zxdh_stats_gather_info { + u8 use_hmc_fcn_index : 1; + u8 use_stats_inst : 1; + u16 hmc_fcn_index; + u8 stats_inst_index; + struct zxdh_dma_mem stats_buff_mem; + void *gather_stats_va; + void *last_gather_stats_va; +}; + +struct zxdh_vsi_pestat { + struct zxdh_hw *hw; + struct zxdh_dev_hw_stats hw_stats; + struct zxdh_stats_gather_info gather_info; + struct timer_list stats_timer; + struct zxdh_sc_vsi *vsi; + spinlock_t lock; /* rdma stats lock */ +}; + +struct zxdh_hw { + u8 __iomem *hw_addr; + u8 __iomem *priv_hw_addr; + u8 __iomem *pci_hw_addr; + struct device *device; + struct zxdh_hmc_info hmc; +}; + +struct zxdh_pfpdu { + struct list_head rxlist; + u32 rcv_nxt; + u32 fps; + u32 max_fpdu_data; + u32 nextseqnum; + u32 rcv_start_seq; + u8 mode : 1; + u8 mpa_crc_err : 1; + u8 marker_len; + u64 total_ieq_bufs; + u64 fpdu_processed; + u64 bad_seq_num; + u64 crc_err; + u64 no_tx_bufs; + u64 tx_err; + u64 out_of_order; + u64 pmode_count; + struct zxdh_sc_ah *ah; + struct zxdh_puda_buf *ah_buf; + spinlock_t lock; /* fpdu processing lock */ + struct zxdh_puda_buf *lastrcv_buf; +}; + +struct zxdh_sc_pd { + struct zxdh_sc_dev *dev; + u32 pd_id; + int abi_ver; +}; + +struct zxdh_cqp_quanta { + __le64 elem[ZXDH_CQP_WQE_SIZE]; +}; + +struct zxdh_sc_cqp { + u32 size; + u64 sq_pa; + u64 host_ctx_pa; + void *back_cqp; + struct zxdh_sc_dev *dev; + int (*process_cqp_sds)(struct zxdh_sc_dev *dev, + struct zxdh_update_sds_info *info); + int (*process_config_pte_table)(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest src_dest); + struct zxdh_ring sq_ring; + struct zxdh_cqp_quanta *sq_base; + struct zxdh_dcqcn_cc_params dcqcn_params; + __le64 *host_ctx; + u64 *scratch_array; + u32 cqp_id; + u32 sq_size; + u32 hw_sq_size; + u16 hw_maj_ver; + u16 hw_min_ver; + u8 struct_ver; + u8 polarity; + u8 hmc_profile; + u8 ena_vf_count; + u8 timeout_count; + u8 ceqs_per_vf; + u8 en_datacenter_tcp : 1; + u8 disable_packed : 1; + u8 rocev2_rto_policy : 1; + u8 en_rem_endpoint_trk : 1; + u8 state_cfg : 1; // C_RDMA_CQP_CONTEXT_0 [31] + enum zxdh_protocol_used protocol_used; +}; + +struct zxdh_sc_aeq { + u32 size; + u64 aeq_elem_pa; + struct zxdh_sc_dev *dev; + struct zxdh_sc_aeqe *aeqe_base; + void *pbl_list; + u32 elem_cnt; + struct zxdh_ring aeq_ring; + u8 pbl_chunk_size; + u32 first_pm_pbl_idx; + u32 msix_idx; + u8 polarity; + u8 get_polarity_flag; + u8 virtual_map : 1; +}; + +struct zxdh_sc_ceq { + u32 size; + u64 ceq_elem_pa; + struct zxdh_sc_dev *dev; + struct zxdh_ceqe *ceqe_base; + void *pbl_list; + bool valid_ceq; + u32 ceq_id; + u32 ceq_index; + u32 elem_cnt; + u32 log2_elem_size; + struct zxdh_ring ceq_ring; + u8 pbl_chunk_size; + u8 tph_val; + u32 first_pm_pbl_idx; + u32 msix_idx; + u8 polarity; + struct zxdh_sc_vsi *vsi; + struct zxdh_sc_cq **reg_cq; + u32 reg_cq_size; + spinlock_t req_cq_lock; /* protect access to reg_cq array */ + u8 virtual_map : 1; + u8 tph_en : 1; + u8 itr_no_expire : 1; +}; + +struct zxdh_sc_cq { + struct zxdh_cq_uk cq_uk; + u64 cq_pa; + u64 shadow_area_pa; + struct zxdh_sc_dev *dev; + struct zxdh_sc_vsi *vsi; + void *pbl_list; + void *back_cq; + u32 ceq_id; + u32 ceq_index; + u32 shadow_read_threshold; + u8 pbl_chunk_size; + u8 cq_type; + u8 tph_val; + u32 first_pm_pbl_idx; + u8 ceqe_mask : 1; + u8 virtual_map : 1; + u8 ceq_id_valid : 1; + u8 tph_en; + u8 cq_st; + u16 is_in_list_cnt; + u16 cq_max; + u16 cq_period; + u8 scqe_break_moderation_en : 1; + u8 cq_overflow_locked_flag : 1; +}; + +struct zxdh_sc_qp { + struct zxdh_qp_uk qp_uk; + u64 sq_pa; + u64 rq_pa; + u64 hw_host_ctx_pa; + u64 shadow_area_pa; + struct zxdh_sc_dev *dev; + struct zxdh_sc_vsi *vsi; + struct zxdh_sc_pd *pd; + struct zxdh_sc_srq *srq; + __le64 *hw_host_ctx; + void *llp_stream_handle; + struct zxdh_pfpdu pfpdu; + u32 ieq_qp; + u8 *q2_buf; + u64 qp_compl_ctx; + u32 qp_ctx_num; + u16 qs_handle; + u16 push_offset; + u8 flush_wqes_count; + u8 sq_tph_val; + u8 rq_tph_val; + u8 qp_state; + u8 hw_sq_size; + u8 hw_rq_size; + u8 src_mac_addr_idx; + + u8 on_qoslist : 1; + u8 ieq_pass_thru : 1; + u8 sq_tph_en : 1; + u8 rq_tph_en : 1; + u8 rcv_tph_en : 1; + u8 xmit_tph_en : 1; + u8 virtual_map : 1; + u8 flush_sq : 1; + + u8 flush_rq : 1; + u8 sq_flush_code : 1; + u8 rq_flush_code : 1; + u8 is_nvmeof_ioq : 1; + u8 is_nvmeof_tgt : 1; + u8 nvme_flush_qp : 1; + u8 is_credit_en : 1; + u8 resv : 1; + + u32 nvmeof_qid; + enum zxdh_flush_opcode flush_code; + enum zxdh_qp_event_type event_type; + u8 term_flags; + u8 user_pri; + struct list_head list; + u8 is_srq; + u32 tx_last_ack_psn; + u32 aeq_entry_err_last_psn; + u32 aeq_retry_err_last_psn; + u8 entry_err_cnt; + u8 retry_err_cnt; +}; + +struct zxdh_stats_inst_info { + bool use_hmc_fcn_index; + u8 hmc_fn_id; + u8 stats_idx; +}; + +struct zxdh_up_info { + u8 map[8]; + u8 cnp_up_override; + u8 hmc_fcn_idx; + u8 use_vlan : 1; + u8 use_cnp_up_override : 1; +}; + +#define ZXDH_MAX_WS_NODES 0x3FF +#define ZXDH_WS_NODE_INVALID 0xFFFF + +struct zxdh_ws_node_info { + u16 id; + u16 vsi; + u16 parent_id; + u16 qs_handle; + u8 type_leaf : 1; + u8 enable : 1; + u8 prio_type; + u8 tc; + u8 weight; +}; + +#define ZXDH_VCHNL_MAX_VF_MSG_SIZE 512 +#define ZXDH_LEAF_DEFAULT_REL_BW 64 +#define ZXDH_PARENT_DEFAULT_REL_BW 1 + +struct zxdh_qos { + struct list_head qplist; + struct mutex qos_mutex; /* protect QoS attributes per QoS level */ + u64 lan_qos_handle; + u32 l2_sched_node_id; + u16 qs_handle; + u8 traffic_class; + u8 rel_bw; + u8 prio_type; + bool valid; +}; + +struct zxdh_config_check { + u8 config_ok : 1; + u8 lfc_set : 1; + u8 pfc_set : 1; + u8 traffic_class; + u16 qs_handle; +}; + +struct zxdh_vfdev { + struct zxdh_sc_dev *pf_dev; + struct zxdh_sc_vsi *vf_vsi; + u8 *hmc_info_mem; + u8 vf_msg_buf[ZXDH_VCHNL_MAX_VF_MSG_SIZE]; + struct zxdh_hmc_info hmc_info; + u32 max_ceqs; + u32 pbleq_unallocated_pble; + u64 pbleq_fpm_base_addr; + u64 pbleq_next_fpm_addr; + u32 pblemr_unallocated_pble; + u64 pblemr_fpm_base_addr; + u64 pblemr_next_fpm_addr; + + refcount_t refcnt; + u16 pmf_index; + u16 vf_id; + u16 vhca_id; + u16 iw_vf_idx; + u8 stats_initialized : 1; + u8 pf_hmc_initialized : 1; + u8 reset_en : 1; + u8 port_vlan_en : 1; +}; + +#define ZXDH_INVALID_STATS_IDX 0xff +struct zxdh_sc_vsi { + u16 vsi_idx; + struct zxdh_sc_dev *dev; + struct zxdh_vfdev *vf_dev; + void *back_vsi; + u32 ilq_count; + struct zxdh_virt_mem ilq_mem; + struct zxdh_puda_rsrc *ilq; + u32 ieq_count; + struct zxdh_virt_mem ieq_mem; + struct zxdh_puda_rsrc *ieq; + u32 exception_lan_q; + u16 mtu; + u16 vf_id; + enum zxdh_vm_vf_type vm_vf_type; + u8 stats_inst_alloc : 1; + u8 tc_change_pending : 1; + struct zxdh_vsi_pestat *pestat; + atomic_t qp_suspend_reqs; + int (*register_qset)(struct zxdh_sc_vsi *vsi, + struct zxdh_ws_node *tc_node); + void (*unregister_qset)(struct zxdh_sc_vsi *vsi, + struct zxdh_ws_node *tc_node); + struct zxdh_config_check cfg_check[ZXDH_MAX_USER_PRIORITY]; + bool tc_print_warning[IEEE_8021QAZ_MAX_TCS]; + u8 qos_rel_bw; + u8 qos_prio_type; + u8 stats_idx; + u8 dscp_map[ZXDH_DSCP_NUM_VAL]; + struct zxdh_qos qos[ZXDH_MAX_USER_PRIORITY]; + u64 hw_stats_regs[ZXDH_HW_STAT_INDEX_MAX]; + u8 dscp_mode : 1; +}; +struct zxdh_srq_axi_ram { + u32 __iomem *db; + u32 __iomem *srql; +}; + +struct zxdh_ceq_axi { + u32 __iomem *ceqe_axi_info; + u32 __iomem *rpble_axi_info; + u32 __iomem *lpble_axi_info; + u32 __iomem *int_info; +}; + +struct zxdh_aeq_vhca_pfvf { + u32 __iomem *aeq_msix_data; + u32 __iomem *aeq_msix_config; + u32 __iomem *aeq_root_axi_data; + u32 __iomem *aeq_leaf_axi_data; + u32 __iomem *aeq_wr_axi_data; + u32 __iomem *aeq_aee_flag; +}; + +struct zxdh_hw_stats { + u64 rdma_stats_entry[ZXDH_HW_STAT_INDEX_MAX]; +}; + +struct zxdh_rdma_stats_get { + u64 rdma_stats_entry[ZXDH_HW_STAT_INDEX_MAX]; + u8 rdma_stats_entry_sta[ZXDH_HW_STAT_INDEX_MAX]; +}; +struct zxdh_data_cap_sd { + u64 data_cap_base; + u64 data_len; + u16 sd_cnt; + struct zxdh_hmc_sd_entry *entry; +}; + +struct zxdh_sc_dev { + struct list_head cqp_cmd_head; /* head of the CQP command list */ + spinlock_t cqp_lock; /* protect CQP list access */ + bool stats_idx_array[ZXDH_MAX_STATS_COUNT_GEN1]; + struct zxdh_dma_mem vf_fpm_query_buf[ZXDH_MAX_PE_ENA_VF_COUNT]; + struct zxdh_dma_mem clear_dpu_mem; + struct zxdh_dma_mem nof_clear_dpu_mem; + + u64 pte_l2d_startpa; // PTE L2D PA + u32 pte_l2d_len; // PTE L2D LEN + struct zxdh_hw *hw; + u8 __iomem *db_addr; + u32 __iomem *wqe_alloc_db; + u32 __iomem *cq_arm_db; + u32 __iomem *aeq_alloc_db; + u32 __iomem *cqp_db; + u32 __iomem *cq_ack_db; + u32 __iomem *ceq_itr_mask_db; + u32 __iomem *aeq_itr_mask_db; + u32 __iomem *hw_regs[ZXDH_MAX_REGS]; + u32 __iomem *ceq_itr_enable; + // u32 __iomem *ceq_ep_addr[ZXDH_MAX_EP_NUM]; + // struct zxdh_ep_addr ceq_ep_addr[ZXDH_MAX_EP_NUM]; + struct zxdh_ceq_axi ceq_axi; + u32 __iomem *aeq_itr_enable; + u32 __iomem *aeq_tail_pointer; + // struct zxdh_ep_addr aeq_ep_addr[ZXDH_MAX_EP_NUM]; + struct zxdh_aeq_vhca_pfvf aeq_vhca_pfvf; + // struct zxdh_cm_aeq_axi aeq_axi; + u32 ceq_itr; /* Interrupt throttle, usecs between interrupts: 0 disabled. 2 - 8160 */ + struct zxdh_srq_axi_ram srq_axi_ram; + u64 hw_masks[ZXDH_MAX_MASKS]; + u8 hw_shifts[ZXDH_MAX_SHIFTS]; + struct zxdh_hw_stats stats_entry; + u64 hw_stats_regs[ZXDH_HW_STAT_INDEX_MAX]; + u64 hw_stats_vf_regs[ZXDH_HW_STAT_INDEX_MAX]; + u64 feature_info[ZXDH_MAX_FEATURES]; + u64 cqp_cmd_stats[ZXDH_MAX_CQP_OPS]; + struct zxdh_hw_attrs hw_attrs; + struct zxdh_hmc_info *hmc_info; + struct zxdh_vfdev *vf_dev[ZXDH_MAX_PE_ENA_VF_COUNT]; + u8 vf_recv_buf[ZXDH_VCHNL_MAX_VF_MSG_SIZE]; + u16 vf_recv_len; + + spinlock_t vf_dev_lock; /* sync vf_dev usage with async events like reset */ + struct workqueue_struct *vchnl_wq; + struct zxdh_sc_cqp *cqp; + struct zxdh_sc_aeq *aeq; + struct zxdh_sc_ceq *ceq[ZXDH_CEQ_MAX_COUNT]; + struct zxdh_sc_cq *ccq; + const struct zxdh_irq_ops *irq_ops; + u32 max_ceqs; + u32 base_qpn; + u32 base_cqn; + u32 base_srqn; + u32 base_ceqn; + u32 max_qp; + u32 max_cq; + u32 max_srq; + struct zxdh_ws_node *ws_tree_root; + struct mutex ws_mutex; /* ws tree mutex */ + struct zxdh_qos qos[ZXDH_MAX_USER_PRIORITY]; + u16 num_vfs; + u16 active_vfs_num; + u8 hmc_fn_id; + u16 vf_id; + u16 vhca_id; + u16 vhca_id_pf; + u16 cache_id; + u8 ep_id; + u8 hmc_epid; + u8 soc_tx_rx_cqp_ind; + u8 soc_tx_rx_cqp_axid; + u8 soc_rdma_io_ind; + u16 ird_size; + u32 total_vhca; + u16 vhca_gqp_start; + u16 vhca_gqp_cnt; + u16 vhca_8k_index_start; + u16 vhca_8k_index_cnt; + u16 vhca_ud_gqp; + u16 vhca_ud_8k_index; + u64 nof_ioq_ddr_addr; + u8 chip_version; + u64 l2d_smmu_addr; + u32 l2d_smmu_l2_offset; + u32 s_udV8NumL2Pta; + u8 vchnl_up : 1; + u8 ceq_valid : 1; + u8 privileged : 1; + u8 double_vlan_en : 1; + u8 hmc_use_dpu_ddr : 1; + u8 np_mode_low_lat : 1; + u8 vf_mb_init : 1; + struct mutex vchnl_mutex; + u8 pci_rev; + int (*ws_add)(struct zxdh_sc_vsi *vsi, u8 user_pri); + void (*ws_remove)(struct zxdh_sc_vsi *vsi, u8 user_pri); + void (*ws_reset)(struct zxdh_sc_vsi *vsi); + struct zxdh_hmc_obj_manage hmc_pf_manager_info; + struct smmu_pte_address *pte_address; + struct zxdh_vf_hmc_obj_info vf_hmcobjinfo[256]; + struct zxdh_data_cap_sd data_cap_sd; + u8 ceq_0_ok; + u8 ceq_interrupt; + u8 tx_stop_on_aeq : 1; + u8 rx_stop_on_aeq : 1; + u8 flag3 : 1; + u8 flag4 : 1; + u8 flag5 : 1; + ktime_t last_time; + u8 driver_load; + u8 flr_query; +}; + +struct zxdh_modify_cq_info { + u64 cq_pa; + struct zxdh_cqe *cq_base; + u32 cq_size; + u32 shadow_read_threshold; + u8 pbl_chunk_size; + u32 first_pm_pbl_idx; + u8 virtual_map : 1; + u8 cq_resize : 1; +}; + +struct zxdh_create_qp_info { + u8 ord_valid : 1; + u8 tcp_ctx_valid : 1; + u8 cq_num_valid : 1; + u8 arp_cache_idx_valid : 1; + u8 mac_valid : 1; + bool force_lpb; + u8 next_iwarp_state; +}; + +struct zxdh_modify_qp_info { + u64 rx_win0; + u64 rx_win1; + u64 qpc_tx_mask_low; + u64 qpc_tx_mask_high; + u64 qpc_rx_mask_low; + u64 qpc_rx_mask_high; + u16 new_mss; + u8 next_iwarp_state; + u8 curr_iwarp_state; + u8 termlen; + u16 udp_sport; + u8 ord_valid : 1; + u8 tcp_ctx_valid : 1; + u8 udp_ctx_valid : 1; + u8 cq_num_valid : 1; + u8 arp_cache_idx_valid : 1; + u8 reset_tcp_conn : 1; + u8 remove_hash_idx : 1; + u8 dont_send_term : 1; + u8 dont_send_fin : 1; + u8 cached_var_valid : 1; + u8 mss_change : 1; + u8 force_lpb : 1; + u8 mac_valid : 1; +}; + +struct zxdh_modify_srq_info { + int limit; +}; + +struct zxdh_create_srq_info { + u8 state; +}; + +struct zxdh_destroy_srq_info { + u8 state; +}; + +struct zxdh_ccq_cqe_info { + struct zxdh_sc_cqp *cqp; + u64 scratch; + u64 op_ret_val; + u16 maj_err_code; + u16 min_err_code; + u8 op_code; + u8 mailbox_cqe; + __le64 addrbuf[5]; + bool error; +}; + +struct zxdh_qos_tc_info { + u64 tc_ctx; + u8 rel_bw; + u8 prio_type; + u8 egress_virt_up; + u8 ingress_virt_up; +}; + +struct zxdh_l2params { + struct zxdh_qos_tc_info tc_info[ZXDH_MAX_USER_PRIORITY]; + u32 num_apps; + u16 qs_handle_list[ZXDH_MAX_USER_PRIORITY]; + u16 mtu; + u8 up2tc[ZXDH_MAX_USER_PRIORITY]; + u8 dscp_map[ZXDH_DSCP_NUM_VAL]; + u8 num_tc; + u8 vsi_rel_bw; + u8 vsi_prio_type; + u8 mtu_changed : 1; + u8 tc_changed : 1; + u8 dscp_mode : 1; +}; + +struct zxdh_vsi_init_info { + struct zxdh_sc_dev *dev; + void *back_vsi; + struct zxdh_l2params *params; + u16 exception_lan_q; + u16 pf_data_vsi_num; + enum zxdh_vm_vf_type vm_vf_type; + int (*register_qset)(struct zxdh_sc_vsi *vsi, + struct zxdh_ws_node *tc_node); + void (*unregister_qset)(struct zxdh_sc_vsi *vsi, + struct zxdh_ws_node *tc_node); +}; + +struct zxdh_vsi_stats_info { + struct zxdh_vsi_pestat *pestat; + u8 fcn_id; + bool alloc_stats_inst; +}; + +struct zxdh_device_init_info { + struct zxdh_hw *hw; + void __iomem *bar0; + struct workqueue_struct *vchnl_wq; + u16 max_vfs; + u8 hmc_fn_id; + bool privileged; +}; + +struct zxdh_ceq_init_info { + u64 ceqe_pa; + struct zxdh_sc_dev *dev; + u64 *ceqe_base; + void *pbl_list; + u32 elem_cnt; + u32 log2_elem_size; + u32 ceq_id; + u32 ceq_index; + u8 virtual_map : 1; + u8 tph_en : 1; + u8 itr_no_expire : 1; + u8 pbl_chunk_size; + u8 tph_val; + u32 first_pm_pbl_idx; + struct zxdh_sc_vsi *vsi; + struct zxdh_sc_cq **reg_cq; + u32 reg_cq_idx; + u32 msix_idx; +}; + +struct zxdh_aeq_init_info { + u64 aeq_elem_pa; + struct zxdh_sc_dev *dev; + u32 *aeqe_base; + void *pbl_list; + u32 elem_cnt; + bool virtual_map; + u8 pbl_chunk_size; + u32 first_pm_pbl_idx; + u32 msix_idx; +}; + +struct zxdh_ccq_init_info { + u64 cq_pa; + u64 shadow_area_pa; + struct zxdh_sc_dev *dev; + struct zxdh_cqe *cq_base; + __le64 *shadow_area; + void *pbl_list; + u32 num_elem; + u32 ceq_id; + u32 ceq_index; + u32 cq_num; + u32 shadow_read_threshold; + u8 ceqe_mask : 1; + u8 ceq_id_valid : 1; + u8 cqe_size; + u8 virtual_map : 1; + u8 tph_en : 1; + u8 tph_val; + u8 pbl_chunk_size; + u16 cq_max; + u16 cq_period; + u8 scqe_break_moderation_en : 1; + u8 cq_st; + u16 is_in_list_cnt; + u32 first_pm_pbl_idx; + struct zxdh_sc_vsi *vsi; +}; + +struct zxdh_udp_offload_info { + u8 ipv4 : 1; + u8 insert_vlan_tag : 1; + u8 ttl; + u8 tos; + u16 src_port; + u16 dst_port; + u32 dest_ip_addr[4]; + u16 pmtu; + u16 vlan_tag; + u8 dest_mac[ETH_ALEN]; + u32 flow_label; + u8 udp_state; + u32 psn_nxt; + u32 lsn; + u32 epsn; + u32 psn_max; + u32 psn_una; + u32 local_ipaddr[4]; + u32 cwnd; + u8 rexmit_thresh; + u8 rnr_nak_thresh; + u8 timeout; + u8 min_rnr_timer; +}; + +struct zxdh_roce_offload_info { + u16 p_key; + u16 err_rq_idx; + u32 qkey; + u32 dest_qp; + u32 local_qp; + u8 roce_tver; + u8 ack_credits; + u8 err_rq_idx_valid; + u32 pd_id; + u16 ord_size; + u16 ird_size; + u8 is_qp1 : 1; + u8 udprivcq_en : 1; + u8 dcqcn_en : 1; + u8 ecn_en : 1; + u8 rcv_no_icrc : 1; + u8 wr_rdresp_en : 1; + u8 bind_en : 1; + u8 fast_reg_en : 1; + u8 priv_mode_en : 1; + u8 rd_en : 1; + u8 timely_en : 1; + u8 dctcp_en : 1; + u8 fw_cc_enable : 1; + u8 use_stats_inst : 1; + u16 t_high; + u16 t_low; + u8 last_byte_sent; + u8 mac_addr[ETH_ALEN]; + u8 rtomin; +}; + +struct zxdh_iwarp_offload_info { + u16 rcv_mark_offset; + u16 snd_mark_offset; + u8 ddp_ver; + u8 rdmap_ver; + u8 iwarp_mode; + u16 err_rq_idx; + u32 pd_id; + u16 ord_size; + u16 ird_size; + u8 ib_rd_en : 1; + u8 align_hdrs : 1; + u8 rcv_no_mpa_crc : 1; + u8 err_rq_idx_valid : 1; + u8 snd_mark_en : 1; + u8 rcv_mark_en : 1; + u8 wr_rdresp_en : 1; + u8 bind_en : 1; + u8 fast_reg_en : 1; + u8 priv_mode_en : 1; + u8 rd_en : 1; + u8 timely_en : 1; + u8 use_stats_inst : 1; + u8 ecn_en : 1; + u8 dctcp_en : 1; + u16 t_high; + u16 t_low; + u8 last_byte_sent; + u8 mac_addr[ETH_ALEN]; + u8 rtomin; +}; + +struct zxdh_tcp_offload_info { + u8 ipv4 : 1; + u8 no_nagle : 1; + u8 insert_vlan_tag : 1; + u8 time_stamp : 1; + u8 drop_ooo_seg : 1; + u8 avoid_stretch_ack : 1; + u8 wscale : 1; + u8 ignore_tcp_opt : 1; + u8 ignore_tcp_uns_opt : 1; + u8 cwnd_inc_limit; + u8 dup_ack_thresh; + u8 ttl; + u8 src_mac_addr_idx; + u8 tos; + u16 src_port; + u16 dst_port; + u32 dest_ip_addr[4]; + //u32 dest_ip_addr0; + //u32 dest_ip_addr1; + //u32 dest_ip_addr2; + //u32 dest_ip_addr3; + u32 snd_mss; + u16 syn_rst_handling; + u16 vlan_tag; + u16 arp_idx; + u32 flow_label; + u8 tcp_state; + u8 snd_wscale; + u8 rcv_wscale; + u32 time_stamp_recent; + u32 time_stamp_age; + u32 snd_nxt; + u32 snd_wnd; + u32 rcv_nxt; + u32 rcv_wnd; + u32 snd_max; + u32 snd_una; + u32 srtt; + u32 rtt_var; + u32 ss_thresh; + u32 cwnd; + u32 snd_wl1; + u32 snd_wl2; + u32 max_snd_window; + u8 rexmit_thresh; + u32 local_ipaddr[4]; +}; + +struct zxdh_qp_host_ctx_info { + u64 qp_compl_ctx; + union { + struct zxdh_tcp_offload_info *tcp_info; + struct zxdh_udp_offload_info *udp_info; + }; + union { + struct zxdh_iwarp_offload_info *iwarp_info; + struct zxdh_roce_offload_info *roce_info; + }; + u32 send_cq_num; + u32 rcv_cq_num; + u32 rem_endpoint_idx; + u8 stats_idx; + u8 srq_valid : 1; + u8 tcp_info_valid : 1; + u8 iwarp_info_valid : 1; + u8 stats_idx_valid : 1; + u8 user_pri; + u8 next_qp_state; + u8 use_srq : 1; +}; + +struct zxdh_aeqe_info { + u64 compl_ctx; + u32 qp_cq_id; + u16 ae_id; + u16 wqe_idx; + u8 tcp_state; + u8 iwarp_state; + u8 qp : 1; + u8 cq : 1; + u8 sq : 1; + u8 rq : 1; + u8 srq : 1; + u8 in_rdrsp_wr : 1; + u8 out_rdrsp : 1; + u8 aeqe_overflow : 1; + u8 q2_data_written; + u8 ae_src; + u32 vhca_id; +}; + +struct zxdh_allocate_stag_info { + u64 total_len; + u64 first_pm_pbl_idx; + u32 chunk_size; + u32 stag_idx; + u32 page_size; + u32 pd_id; + u16 access_rights; + u8 remote_access : 1; + u8 use_hmc_fcn_index : 1; + u8 use_pf_rid : 1; + u16 hmc_fcn_index; +}; + +struct zxdh_mw_alloc_info { + u32 mw_stag_index; + u32 page_size; + u32 pd_id; + u8 remote_access : 1; + u8 mw_wide : 1; + u8 mw1_bind_dont_vldt_key : 1; +}; + +struct zxdh_reg_ns_stag_info { + u64 reg_addr_pa; + u64 va; + u64 total_len; + u32 page_size; + u32 chunk_size; + u32 first_pm_pbl_index; + enum zxdh_addressing_type addr_type; + zxdh_stag_index stag_idx; + u16 access_rights; + u32 pd_id; + zxdh_stag_key stag_key; + u8 use_hmc_fcn_index : 1; + u16 hmc_fcn_index; + u8 use_pf_rid : 1; +}; + +struct zxdh_fast_reg_stag_info { + u64 wr_id; + u64 reg_addr_pa; + u64 fbo; + void *va; + u64 total_len; + u32 page_size; + u32 chunk_size; + u32 first_pm_pbl_index; + enum zxdh_addressing_type addr_type; + zxdh_stag_index stag_idx; + u16 access_rights; + u32 pd_id; + zxdh_stag_key stag_key; + u8 local_fence : 1; + u8 read_fence : 1; + u8 signaled : 1; + u8 push_wqe : 1; + u8 use_hmc_fcn_index : 1; + u16 hmc_fcn_index; + u8 use_pf_rid : 1; + u8 defer_flag : 1; +}; + +struct zxdh_dealloc_stag_info { + u32 stag_idx; + u32 pd_id; + u8 mr : 1; + u8 dealloc_pbl : 1; +}; + +struct zxdh_register_shared_stag { + u64 va; + enum zxdh_addressing_type addr_type; + zxdh_stag_index new_stag_idx; + zxdh_stag_index parent_stag_idx; + u32 access_rights; + u32 pd_id; + u32 page_size; + zxdh_stag_key new_stag_key; +}; + +struct zxdh_qp_init_info { + struct zxdh_qp_uk_init_info qp_uk_init_info; + struct zxdh_sc_pd *pd; + struct zxdh_sc_vsi *vsi; + struct zxdh_sc_dev *dev; + __le64 *host_ctx; + u8 *q2; + u64 sq_pa; + u64 rq_pa; + u64 host_ctx_pa; + u64 q2_pa; + u64 shadow_area_pa; + u8 sq_tph_val; + u8 rq_tph_val; + u8 sq_tph_en : 1; + u8 rq_tph_en : 1; + u8 rcv_tph_en : 1; + u8 xmit_tph_en : 1; + u8 virtual_map : 1; +}; + +struct zxdh_cq_init_info { + struct zxdh_sc_dev *dev; + u64 cq_base_pa; + u64 shadow_area_pa; + u32 ceq_id; + u32 ceq_index; + u32 shadow_read_threshold; + u8 pbl_chunk_size; + u32 first_pm_pbl_idx; + u8 virtual_map : 1; + u8 ceqe_mask : 1; + u8 ceq_id_valid : 1; + u8 tph_en : 1; + u8 tph_val; + u8 type; + struct zxdh_cq_uk_init_info cq_uk_init_info; + struct zxdh_sc_vsi *vsi; +}; + +struct zxdh_upload_context_info { + u64 buf_pa; + u32 qp_id; + u8 qp_type; + u8 freeze_qp : 1; + u8 raw_format : 1; +}; + +struct zxdh_local_mac_entry_info { + u8 mac_addr[6]; + u16 entry_idx; +}; + +struct zxdh_add_arp_cache_entry_info { + u8 mac_addr[ETH_ALEN]; + u32 reach_max; + u16 arp_index; + bool permanent; +}; + +struct zxdh_apbvt_info { + u16 port; + bool add; +}; + +struct zxdh_qhash_table_info { + struct zxdh_sc_vsi *vsi; + enum zxdh_quad_hash_manage_type manage; + enum zxdh_quad_entry_type entry_type; + u8 vlan_valid : 1; + u8 ipv4_valid : 1; + u8 mac_addr[ETH_ALEN]; + u16 vlan_id; + u8 user_pri; + u32 qp_num; + u32 dest_ip[4]; + u32 src_ip[4]; + u16 dest_port; + u16 src_port; +}; + +struct zxdh_cqp_manage_push_page_info { + u32 push_idx; + u16 qs_handle; + u8 free_page; + u8 push_page_type; +}; + +struct zxdh_qp_flush_info { + u16 sq_minor_code; + u16 sq_major_code; + u16 rq_minor_code; + u16 rq_major_code; + u16 ae_code; + u8 ae_src; + bool sq : 1; + bool rq : 1; + u8 userflushcode : 1; + u8 generate_ae : 1; +}; + +struct zxdh_gen_ae_info { + u16 ae_code; + u8 ae_src; +}; + +struct zxdh_cqp_timeout { + u64 compl_cqp_cmds; + u32 count; +}; + +struct zxdh_src_copy_dest { + u64 src; + u32 len; + u64 dest; +}; + +struct zxdh_dam_read_bycqe { + u8 num; + u8 bitwidth; // 0:64 1:32 + u8 valuetype; + __le64 addrbuf[5]; +}; + +struct zxdh_dma_write64_date { + u8 num; + __le64 addrbuf[3]; + __le64 databuf[3]; +}; + +struct zxdh_dma_write32_date { + u8 num; + u8 inter_sour_sel; + u8 need_inter; + __le64 addrbuf[4]; + __le64 databuf[4]; +}; + +struct zxdh_path_index { + u16 vhca_id; + u8 obj_id; + u8 waypartion; + u8 path_select; + u8 inter_select; +}; + +struct zxdh_mailboxhead_data { + u64 msg0; + u64 msg1; + u64 msg2; + u64 msg3; + u64 msg4; +}; + +struct zxdh_irq_ops { + void (*zxdh_cfg_aeq)(struct zxdh_sc_dev *dev, u32 irq_idx); + void (*zxdh_ceq_en_irq)(struct zxdh_sc_dev *dev, u32 idx); + void (*zxdh_aeq_en_irq)(struct zxdh_sc_dev *dev, bool enable); +}; + +u32 zxdh_num_to_log(u32 size_num); + +void zxdh_sc_ccq_arm(struct zxdh_sc_cq *ccq); +int zxdh_sc_ccq_create(struct zxdh_sc_cq *ccq, u64 scratch, bool post_sq); +int zxdh_sc_ccq_destroy(struct zxdh_sc_cq *ccq, u64 scratch, bool post_sq); +int zxdh_sc_ccq_get_cqe_info(struct zxdh_sc_cq *ccq, + struct zxdh_ccq_cqe_info *info); +int zxdh_sc_ccq_init(struct zxdh_sc_cq *ccq, struct zxdh_ccq_init_info *info); + +int zxdh_sc_cceq_create(struct zxdh_sc_ceq *ceq, u64 scratch); +int zxdh_sc_cceq_destroy_done(struct zxdh_sc_ceq *ceq); + +int zxdh_sc_ceq_destroy(struct zxdh_sc_ceq *ceq, u64 scratch, bool post_sq); +int zxdh_sc_ceq_init(struct zxdh_sc_ceq *ceq, struct zxdh_ceq_init_info *info); +void zxdh_sc_cleanup_ceqes(struct zxdh_sc_cq *cq, struct zxdh_sc_ceq *ceq); +void *zxdh_sc_process_ceq(struct zxdh_sc_dev *dev, struct zxdh_sc_ceq *ceq); + +int zxdh_sc_aeq_init(struct zxdh_sc_aeq *aeq, struct zxdh_aeq_init_info *info); +int zxdh_sc_get_next_aeqe(struct zxdh_sc_aeq *aeq, struct zxdh_aeqe_info *info); +int zxdh_sc_repost_aeq_tail(struct zxdh_sc_dev *dev, u32 idx); + +void zxdh_sc_pd_init(struct zxdh_sc_dev *dev, struct zxdh_sc_pd *pd, u32 pd_id, + int abi_ver); +void zxdh_cfg_aeq(struct zxdh_sc_dev *dev, u32 irq_idx); +#if IS_ENABLED(CONFIG_CONFIGFS_FS) +void zxdh_set_irq_rate_limit(struct zxdh_sc_dev *dev, u32 idx, u32 interval); +#endif +void zxdh_check_cqp_progress(struct zxdh_cqp_timeout *cqp_timeout, + struct zxdh_sc_dev *dev); +int zxdh_cqp_poll_registers(struct zxdh_sc_cqp *cqp, u32 tail, u32 count); +int zxdh_sc_cqp_create(struct zxdh_sc_cqp *cqp, u16 *maj_err, u16 *min_err); +int zxdh_sc_cqp_destroy(struct zxdh_sc_cqp *cqp, bool free_hwcqp); +int zxdh_sc_cqp_init(struct zxdh_sc_cqp *cqp, struct zxdh_cqp_init_info *info); +void zxdh_sc_cqp_post_sq(struct zxdh_sc_cqp *cqp); +int zxdh_sc_poll_for_cqp_op_done(struct zxdh_sc_cqp *cqp, u8 opcode, + struct zxdh_ccq_cqe_info *cmpl_info); +int zxdh_sc_qp_create(struct zxdh_sc_qp *qp, u64 scratch, bool post_sq); +int zxdh_sc_qp_destroy(struct zxdh_sc_qp *qp, u64 scratch, bool ignore_mw_bnd, + bool post_sq); +int zxdh_sc_qp_flush_wqes(struct zxdh_sc_qp *qp, + struct zxdh_qp_flush_info *info, u64 scratch, + bool post_sq); +int zxdh_sc_qp_init(struct zxdh_sc_qp *qp, struct zxdh_qp_init_info *info); +int zxdh_sc_qp_modify(struct zxdh_sc_qp *qp, struct zxdh_modify_qp_info *info, + u64 scratch, bool post_sq); +void zxdh_sc_qp_setctx_roce(struct zxdh_sc_qp *qp, __le64 *qp_ctx, + struct zxdh_qp_host_ctx_info *info); +void zxdh_sc_qp_resetctx_roce(struct zxdh_sc_qp *qp, __le64 *qp_ctx); +u16 zxdh_get_rc_gqp_id(u16 qp_8k_index, u16 vhca_gqp_start, u16 vhca_gqp_cnt); +int zxdh_sc_cq_destroy(struct zxdh_sc_cq *cq, u64 scratch, bool post_sq); +int zxdh_sc_cq_init(struct zxdh_sc_cq *cq, struct zxdh_cq_init_info *info); +void zxdh_sc_cq_resize(struct zxdh_sc_cq *cq, struct zxdh_modify_cq_info *info); +int zxdh_sc_aeq_destroy(struct zxdh_sc_aeq *aeq, u64 scratch, bool post_sq); + +void sc_vsi_update_stats(struct zxdh_sc_vsi *vsi); +void zxdh_sc_qp_modify_ctx_udp_sport(struct zxdh_sc_qp *qp, __le64 *qp_ctx, + struct zxdh_qp_host_ctx_info *info); +void zxdh_sc_qp_modify_private_cmd_qpc(struct zxdh_sc_qp *qp, __le64 *qp_ctx, + struct zxdh_modify_qpc_item *info); +struct cqp_info { + union { + struct { + struct zxdh_sc_qp *qp; + struct zxdh_create_qp_info info; + u64 scratch; + } qp_create; + + struct { + struct zxdh_sc_qp *qp; + struct zxdh_modify_qp_info info; + u64 scratch; + } qp_modify; + + struct { + struct zxdh_sc_qp *qp; + u64 scratch; + bool remove_hash_idx; + bool ignore_mw_bnd; + } qp_destroy; + + struct { + struct zxdh_sc_srq *srq; + struct zxdh_create_srq_info info; + u64 scratch; + } srq_create; + + struct { + struct zxdh_sc_srq *srq; + struct zxdh_modify_srq_info info; + u64 scratch; + } srq_modify; + + struct { + struct zxdh_sc_srq *srq; + u64 scratch; + struct zxdh_destroy_srq_info info; + // bool remove_hash_idx; + } srq_destroy; + + struct { + struct zxdh_sc_cq *cq; + u64 scratch; + } cq_create; + + struct { + struct zxdh_sc_cq *cq; + struct zxdh_modify_cq_info info; + u64 scratch; + } cq_modify; + + struct { + struct zxdh_sc_cq *cq; + u64 scratch; + } cq_destroy; + + struct { + struct zxdh_sc_dev *dev; + struct zxdh_allocate_stag_info info; + u64 scratch; + } alloc_stag; + + struct { + struct zxdh_sc_dev *dev; + struct zxdh_mw_alloc_info info; + u64 scratch; + } mw_alloc; + + struct { + struct zxdh_sc_dev *dev; + struct zxdh_reg_ns_stag_info info; + u64 scratch; + } mr_reg_non_shared; + + struct { + struct zxdh_sc_dev *dev; + struct zxdh_dealloc_stag_info info; + u64 scratch; + } dealloc_stag; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_add_arp_cache_entry_info info; + u64 scratch; + } add_arp_cache_entry; + + struct { + struct zxdh_sc_cqp *cqp; + u64 scratch; + u16 arp_index; + } del_arp_cache_entry; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_local_mac_entry_info info; + u64 scratch; + } add_local_mac_entry; + + struct { + struct zxdh_sc_cqp *cqp; + u64 scratch; + u8 entry_idx; + u8 ignore_ref_count; + } del_local_mac_entry; + + struct { + struct zxdh_sc_cqp *cqp; + u64 scratch; + } alloc_local_mac_entry; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_manage_vf_pble_info info; + u64 scratch; + } manage_vf_pble_bp; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_cqp_manage_push_page_info info; + u64 scratch; + } manage_push_page; + + struct { + struct zxdh_sc_dev *dev; + struct zxdh_upload_context_info info; + u64 scratch; + } qp_upload_context; + + struct { + struct zxdh_sc_dev *dev; + struct zxdh_hmc_fcn_info info; + u64 scratch; + } manage_hmc_pm; + + struct { + struct zxdh_sc_ceq *ceq; + u64 scratch; + } ceq_create; + + struct { + struct zxdh_sc_ceq *ceq; + u64 scratch; + } ceq_destroy; + + struct { + struct zxdh_sc_aeq *aeq; + u64 scratch; + } aeq_create; + + struct { + struct zxdh_sc_aeq *aeq; + u64 scratch; + } aeq_destroy; + + struct { + struct zxdh_sc_qp *qp; + struct zxdh_qp_flush_info info; + u64 scratch; + } qp_flush_wqes; + + struct { + struct zxdh_sc_qp *qp; + struct zxdh_gen_ae_info info; + u64 scratch; + } gen_ae; + + struct { + struct zxdh_sc_cqp *cqp; + void *fpm_val_va; + u64 fpm_val_pa; + u8 hmc_fn_id; + u64 scratch; + } query_fpm_val; + + struct { + struct zxdh_sc_cqp *cqp; + void *fpm_val_va; + u64 fpm_val_pa; + u8 hmc_fn_id; + u64 scratch; + } commit_fpm_val; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_apbvt_info info; + u64 scratch; + } manage_apbvt_entry; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_qhash_table_info info; + u64 scratch; + } manage_qhash_table_entry; + + struct { + struct zxdh_sc_dev *dev; + struct zxdh_update_sds_info info; + u64 scratch; + } update_pe_sds; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_sc_qp *qp; + u64 scratch; + } suspend_resume; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_ah_info info; + u64 scratch; + } ah_create; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_ah_info info; + u64 scratch; + } ah_destroy; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_mcast_grp_info *info; + u64 scratch; + } mc_create; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_mcast_grp_info *info; + u64 scratch; + } mc_destroy; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_mcast_grp_info *info; + u64 scratch; + } mc_modify; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_stats_inst_info info; + u64 scratch; + } stats_manage; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_stats_gather_info info; + u64 scratch; + } stats_gather; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_ws_node_info info; + u64 scratch; + } ws_node; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_up_info info; + u64 scratch; + } up_map; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_dma_mem query_buff_mem; + u64 scratch; + } query_rdma; + + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_src_copy_dest src_dest; + struct zxdh_path_index src_path_index; + struct zxdh_path_index dest_path_index; + bool host; + u64 scratch; + } dma_writeread; + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_mailboxhead_data mbhead_data; + u64 scratch; + u32 dst_vf_id; + } hmc_mb; + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_path_index dest_path_index; + struct zxdh_dma_write32_date dma_data; + u64 scratch; + } dma_write32data; + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_path_index dest_path_index; + struct zxdh_dma_write64_date dma_data; + u64 scratch; + } dma_write64data; + struct { + struct zxdh_sc_cqp *cqp; + struct zxdh_dam_read_bycqe dma_rcqe; + struct zxdh_path_index src_path_index; + u64 scratch; + } dma_read_cqe; + struct { + struct zxdh_sc_dev *dev; + u32 qpn; + u64 qpc_buf_pa; + u64 scratch; + } query_qpc; + struct { + struct zxdh_sc_dev *dev; + u32 cqn; + u64 cqc_buf_pa; + u64 scratch; + } query_cqc; + struct { + struct zxdh_sc_dev *dev; + u32 ceqn; + u64 ceqc_buf_pa; + u64 scratch; + } query_ceqc; + struct { + struct zxdh_sc_dev *dev; + u16 aeqn; + u64 aeqc_buf_pa; + u64 scratch; + } query_aeqc; + struct { + struct zxdh_sc_dev *dev; + u32 srqn; + u64 srqc_buf_pa; + u64 scratch; + } query_srqc; + + struct { + struct zxdh_sc_cqp *cqp; + u64 scratch; + u32 mkeyindex; + } query_mkey; + + } u; +}; + +struct cqp_cmds_info { + struct list_head cqp_cmd_entry; + u8 cqp_cmd; + u8 post_sq; + struct cqp_info in; +}; + +struct zxdh_virtchnl_work { + struct work_struct work; + u8 vf_msg_buf[ZXDH_VCHNL_MAX_VF_MSG_SIZE]; + struct zxdh_sc_dev *dev; + u16 vf_id; + u16 len; +}; + +__le64 *zxdh_sc_cqp_get_next_send_wqe_idx(struct zxdh_sc_cqp *cqp, u64 scratch, + u32 *wqe_idx); + +/** + * zxdh_sc_cqp_get_next_send_wqe - get next wqe on cqp sq + * @cqp: struct for cqp hw + * @scratch: private data for CQP WQE + */ +static inline __le64 *zxdh_sc_cqp_get_next_send_wqe(struct zxdh_sc_cqp *cqp, + u64 scratch) +{ + u32 wqe_idx; + + return zxdh_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx); +} +#endif /* ZXDH_TYPE_H */ diff --git a/drivers/infiniband/hw/zrdma/uda.c b/drivers/infiniband/hw/zrdma/uda.c new file mode 100644 index 000000000000..184b2efaff2d --- /dev/null +++ b/drivers/infiniband/hw/zrdma/uda.c @@ -0,0 +1,371 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include "osdep.h" +#include "status.h" +#include "hmc.h" +#include "defs.h" +#include "type.h" +#include "protos.h" +#include "uda.h" +#include "uda_d.h" +#include "vf.h" +#include "virtchnl.h" +#include "main.h" + +extern notify_remote_ip_update remote_ip_update_hook; +static int ah_remote_ip_info_process(struct zxdh_device *iwdev, struct zxdh_ah_info *ah_info, int op_type) +{ + struct zxdh_rdma_to_eth_ip_para ip_para = { 0 }; + u64 dmac = 0; + u64 smac = 0; + int ret = 0; + + dmac = LS_64_1(ah_info->dmac[5], 0) | LS_64_1(ah_info->dmac[4], 8) | + LS_64_1(ah_info->dmac[3], 16) | LS_64_1(ah_info->dmac[2], 24) | + LS_64_1(ah_info->dmac[1], 32) | LS_64_1(ah_info->dmac[0], 40); + smac = LS_64_1(ah_info->mac_addr[5], 0) | LS_64_1(ah_info->mac_addr[4], 8) | + LS_64_1(ah_info->mac_addr[3], 16) | LS_64_1(ah_info->mac_addr[2], 24) | + LS_64_1(ah_info->mac_addr[1], 32) | LS_64_1(ah_info->mac_addr[0], 40); + + ip_para.ifname = iwdev->netdev->name; + ip_para.ipv4 = ah_info->ipv4_valid; + if (ip_para.ipv4) { + ip_para.src_ip[0] = 0; + ip_para.src_ip[1] = 0; + ip_para.src_ip[2] = 0; + ip_para.src_ip[3] = ah_info->src_ip_addr[0]; + ip_para.dst_ip[0] = 0; + ip_para.dst_ip[1] = 0; + ip_para.dst_ip[2] = 0; + ip_para.dst_ip[3] = ah_info->dest_ip_addr[0]; + } else { + memcpy(ip_para.src_ip, ah_info->src_ip_addr, sizeof(ip_para.src_ip)); + memcpy(ip_para.dst_ip, ah_info->dest_ip_addr, sizeof(ip_para.dst_ip)); + } + ip_para.src_mac = smac; + ip_para.dst_mac = dmac; + ip_para.mode = op_type; + + pr_debug("double plane %s[%d]: ipv4=%d, op_type=%d, src_ip=0x%x-0x%x-0x%x-0x%x, dst_ip=0x%x-0x%x-0x%x-0x%x, dst_mac=0x%llx, dmac=%x-%x-%x-%x-%x-%x\n", + __func__, __LINE__, ip_para.ipv4, op_type, + ip_para.src_ip[0], ip_para.src_ip[1], ip_para.src_ip[2], ip_para.src_ip[3], + ip_para.dst_ip[0], ip_para.dst_ip[1], ip_para.dst_ip[2], ip_para.dst_ip[3], + ip_para.dst_mac, ah_info->dmac[0], ah_info->dmac[1], ah_info->dmac[2], ah_info->dmac[3], ah_info->dmac[4], ah_info->dmac[5]); + pr_debug("double plane %s[%d]: src_mac=0x%llx, smac=%x-%x-%x-%x-%x-%x\n", + __func__, __LINE__, ip_para.src_mac, + ah_info->mac_addr[0], ah_info->mac_addr[1], ah_info->mac_addr[2], ah_info->mac_addr[3], ah_info->mac_addr[4], ah_info->mac_addr[5]); + + if (op_type == RDMA_ADD_REMOTE_IP || op_type == RDMA_DEL_REMOTE_IP) { + ret = remote_ip_info_process(iwdev, &ip_para); + } else { + pr_info("%s[%d]: error op_type=%d\n", __func__, __LINE__, op_type); + ret = -1; + } + + return ret; +} + +/** + * zxdh_sc_access_ah() - Create, modify or delete AH + * @cqp: struct for cqp hw + * @info: ah information + * @op: Operation + * @scratch: u64 saved to be used during cqp completion + */ +int zxdh_sc_access_ah(struct zxdh_sc_cqp *cqp, struct zxdh_ah_info *info, + u32 op, u64 scratch) +{ + __le64 *wqe; + u64 qw1, qw2; + struct ib_device *ibdev = zxdh_get_ibdev(cqp->dev); + struct zxdh_device *iwdev = to_iwdev(ibdev); + struct iidc_core_dev_info *cdev_info = (struct iidc_core_dev_info *)iwdev->rf->cdev; + u32 dual_tor_switch = 0xFFFF; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + info->tc_tos &= ~ECN_CODE_PT_MASK; + info->tc_tos |= ECN_CODE_PT_VAL; + + qw1 = FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_PDINDEX, info->pd_idx) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_AVIDX, info->ah_idx) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_TC, info->tc_tos) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_INSERTVLANTAG, + info->insert_vlan_tag) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_IPV4VALID, info->ipv4_valid) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_WQEVALID, cqp->polarity) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_OPCODE, op); + + qw2 = FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_FLOWLABEL, info->flow_label) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_HOPLIMIT, info->hop_ttl); + set_64bit_val(wqe, 8, qw2); + + set_64bit_val(wqe, 16, + FIELD_PREP(ZXDH_UDAQPC_VLANTAG, info->vlan_tag) | + LS_64_1(info->mac_addr[5], 16) | + LS_64_1(info->mac_addr[4], 24) | + LS_64_1(info->mac_addr[3], 32) | + LS_64_1(info->mac_addr[2], 40) | + LS_64_1(info->mac_addr[1], 48) | + LS_64_1(info->mac_addr[0], 56)); + + set_64bit_val(wqe, 24, + LS_64_1(info->dmac[5], 16) | LS_64_1(info->dmac[4], 24) | + LS_64_1(info->dmac[3], 32) | + LS_64_1(info->dmac[2], 40) | + LS_64_1(info->dmac[1], 48) | + LS_64_1(info->dmac[0], 56)); + + if (!info->ipv4_valid) { + set_64bit_val(wqe, 32, + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR1, + info->dest_ip_addr[1]) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR0, + info->dest_ip_addr[0])); + set_64bit_val(wqe, 40, + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR3, + info->dest_ip_addr[3]) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR2, + info->dest_ip_addr[2])); + set_64bit_val(wqe, 48, + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR1, + info->src_ip_addr[1]) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR0, + info->src_ip_addr[0])); + set_64bit_val(wqe, 56, + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR3, + info->src_ip_addr[3]) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR2, + info->src_ip_addr[2])); + } else { + set_64bit_val(wqe, 40, + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR3, + info->dest_ip_addr[0])); + set_64bit_val(wqe, 56, + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR3, + info->src_ip_addr[0])); + } + + dual_tor_switch = readl(cdev_info->hw_addr + ZXDH_DUAL_TOR_SWITCH_OFFSET); + pr_debug("%s[%d]: hw_addr=0x%llx, dual_tor_switch=0x%x\n", + __func__, __LINE__, + (u64)(uintptr_t)cdev_info->hw_addr, dual_tor_switch); + if (remote_ip_update_hook && (dual_tor_switch == ZXDH_DUAL_TOR_SWITCH_OPEN)) { + if (op == ZXDH_CQP_OP_CREATE_AH) { + ah_remote_ip_info_process(iwdev, info, RDMA_ADD_REMOTE_IP); + } else if (op == ZXDH_CQP_OP_DESTROY_AH) { + ah_remote_ip_info_process(iwdev, info, RDMA_DEL_REMOTE_IP); + } + } + + dma_wmb(); /* need write block before writing WQE header */ + + set_64bit_val(wqe, 0, qw1); + + print_hex_dump_debug("WQE: MANAGE_AH WQE", DUMP_PREFIX_OFFSET, 16, 8, + wqe, ZXDH_CQP_WQE_SIZE * 8, false); + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_create_mg_ctx() - create a mcg context + * @info: multicast group context info + */ +static int zxdh_create_mg_ctx(struct zxdh_mcast_grp_info *info) +{ + struct zxdh_mcast_grp_ctx_entry_info *entry_info = NULL; + u32 idx = 0; /* index in the array */ + u32 ctx_idx = 0; /* index in the MG context */ + + memset(info->dma_mem_mc.va, 0, + ZXDH_MAX_MGS_PER_CTX * sizeof(u32) + sizeof(u64)); + + for (idx = 0; idx < ZXDH_MAX_MGS_PER_CTX; idx++) { + entry_info = &info->mg_ctx_info[idx]; + if (entry_info->valid_entry) { + set_32bit_val((__le32 *)info->dma_mem_mc.va, + sizeof(u64) + ctx_idx * sizeof(u32), + FIELD_PREP(ZXDH_UDA_MGCTX_QPID, + entry_info->qp_id)); + ctx_idx++; + } + } + set_64bit_val((__le64 *)info->dma_mem_mc.va, 0, ctx_idx); + + return 0; +} + +/** + * zxdh_access_mcast_grp() - Access mcast group based on op + * @cqp: Control QP + * @info: multicast group context info + * @op: operation to perform + * @scratch: u64 saved to be used during cqp completion + */ +int zxdh_access_mcast_grp(struct zxdh_sc_cqp *cqp, + struct zxdh_mcast_grp_info *info, u32 op, u64 scratch) +{ + __le64 *wqe; + u64 dmac; + + if (info->mg_id >= ZXDH_UDA_MAX_FSI_MGS) { + pr_err("WQE: mg_id out of range\n"); + return -EINVAL; + } + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) { + pr_err("WQE: ring full\n"); + return -ENOSPC; + } + + zxdh_create_mg_ctx(info); + + dmac = LS_64_1(info->dest_mac_addr[5], 0) | + LS_64_1(info->dest_mac_addr[4], 8) | + LS_64_1(info->dest_mac_addr[3], 16) | + LS_64_1(info->dest_mac_addr[2], 24) | + LS_64_1(info->dest_mac_addr[1], 32) | + LS_64_1(info->dest_mac_addr[0], 40); + + set_64bit_val(wqe, 8, + FIELD_PREP(ZXDH_UDA_CQPSQ_MG_DMAC, dmac) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MG_VLANID, + info->vlan_id)); + + if (!info->ipv4_valid) { + set_64bit_val(wqe, 24, + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR0, + info->dest_ip_addr[0]) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR1, + info->dest_ip_addr[1])); + set_64bit_val(wqe, 16, + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR2, + info->dest_ip_addr[2]) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR3, + info->dest_ip_addr[3])); + } else { + set_64bit_val(wqe, 24, + FIELD_PREP(ZXDH_UDA_CQPSQ_MAV_ADDR0, + info->dest_ip_addr[0])); + } + + set_64bit_val(wqe, 32, info->dma_mem_mc.pa); + + dma_wmb(); /* need write memory block before writing the WQE header. */ + + set_64bit_val(wqe, 0, + FIELD_PREP(ZXDH_UDA_CQPSQ_MG_MGIDX, info->mg_id) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MG_VLANVALID, + info->vlan_valid) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MG_IPV4VALID, + info->ipv4_valid) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MG_WQEVALID, + cqp->polarity) | + FIELD_PREP(ZXDH_UDA_CQPSQ_MG_OPCODE, op)); + + zxdh_sc_cqp_post_sq(cqp); + + return 0; +} + +/** + * zxdh_compare_mgs - Compares two multicast group structures + * @entry1: Multcast group info + * @entry2: Multcast group info in context + */ +static bool zxdh_compare_mgs(struct zxdh_mcast_grp_ctx_entry_info *entry1, + struct zxdh_mcast_grp_ctx_entry_info *entry2) +{ + if (entry1->dest_port == entry2->dest_port && + entry1->qp_id == entry2->qp_id) + return true; + + return false; +} + +/** + * zxdh_sc_add_mcast_grp - Allocates mcast group entry in ctx + * @ctx: Multcast group context + * @mg: Multcast group info + */ +int zxdh_sc_add_mcast_grp(struct zxdh_mcast_grp_info *ctx, + struct zxdh_mcast_grp_ctx_entry_info *mg) +{ + u32 idx; + bool free_entry_found = false; + u32 free_entry_idx = 0; + + /* find either an identical or a free entry for a multicast group */ + for (idx = 0; idx < ZXDH_MAX_MGS_PER_CTX; idx++) { + if (ctx->mg_ctx_info[idx].valid_entry) { + if (zxdh_compare_mgs(&ctx->mg_ctx_info[idx], mg)) { + ctx->mg_ctx_info[idx].use_cnt++; + return 0; + } + continue; + } + if (!free_entry_found) { + free_entry_found = true; + free_entry_idx = idx; + } + } + + if (free_entry_found) { + ctx->mg_ctx_info[free_entry_idx] = *mg; + ctx->mg_ctx_info[free_entry_idx].valid_entry = true; + ctx->mg_ctx_info[free_entry_idx].use_cnt = 1; + ctx->no_of_mgs++; + return 0; + } + + return -ENOMEM; +} + +/** + * zxdh_sc_del_mcast_grp - Delete mcast group + * @ctx: Multcast group context + * @mg: Multcast group info + * + * Finds and removes a specific mulicast group from context, all + * parameters must match to remove a multicast group. + */ +int zxdh_sc_del_mcast_grp(struct zxdh_mcast_grp_info *ctx, + struct zxdh_mcast_grp_ctx_entry_info *mg) +{ + u32 idx; + + /* find an entry in multicast group context */ + for (idx = 0; idx < ZXDH_MAX_MGS_PER_CTX; idx++) { + if (!ctx->mg_ctx_info[idx].valid_entry) + continue; + + if (zxdh_compare_mgs(mg, &ctx->mg_ctx_info[idx])) { + ctx->mg_ctx_info[idx].use_cnt--; + + if (!ctx->mg_ctx_info[idx].use_cnt) { + ctx->mg_ctx_info[idx].valid_entry = false; + ctx->no_of_mgs--; + /* Remove gap if element was not the last */ + if (idx != ctx->no_of_mgs && + ctx->no_of_mgs > 0) { + memcpy(&ctx->mg_ctx_info[idx], + &ctx->mg_ctx_info[ctx->no_of_mgs - + 1], + sizeof(ctx->mg_ctx_info[idx])); + ctx->mg_ctx_info[ctx->no_of_mgs - 1] + .valid_entry = false; + } + } + + return 0; + } + } + + return -EINVAL; +} diff --git a/drivers/infiniband/hw/zrdma/uda.h b/drivers/infiniband/hw/zrdma/uda.h new file mode 100644 index 000000000000..3e921f2acbbe --- /dev/null +++ b/drivers/infiniband/hw/zrdma/uda.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_UDA_H +#define ZXDH_UDA_H + +#define ZXDH_UDA_MAX_FSI_MGS 8192 +#define ZXDH_UDA_MAX_PFS 16 +#define ZXDH_UDA_MAX_VFS 128 + +struct zxdh_sc_cqp; + +struct zxdh_ah_info { + struct zxdh_sc_vsi *vsi; + u32 pd_idx; + u32 dest_ip_addr[4]; + u32 src_ip_addr[4]; + u32 flow_label; + u32 ah_idx; + u16 vlan_tag; + u8 insert_vlan_tag; + u8 tc_tos; + u8 hop_ttl; + u8 mac_addr[ETH_ALEN]; + u8 dmac[ETH_ALEN]; + u8 ah_valid : 1; + u8 ipv4_valid : 1; + u8 do_lpbk : 1; +}; + +struct zxdh_sc_ah { + struct zxdh_sc_dev *dev; + struct zxdh_ah_info ah_info; +}; + +int zxdh_sc_add_mcast_grp(struct zxdh_mcast_grp_info *ctx, + struct zxdh_mcast_grp_ctx_entry_info *mg); +int zxdh_sc_del_mcast_grp(struct zxdh_mcast_grp_info *ctx, + struct zxdh_mcast_grp_ctx_entry_info *mg); +int zxdh_sc_access_ah(struct zxdh_sc_cqp *cqp, struct zxdh_ah_info *info, + u32 op, u64 scratch); +int zxdh_access_mcast_grp(struct zxdh_sc_cqp *cqp, + struct zxdh_mcast_grp_info *info, u32 op, + u64 scratch); + +static inline void zxdh_sc_init_ah(struct zxdh_sc_dev *dev, + struct zxdh_sc_ah *ah) +{ + ah->dev = dev; +} + +static inline int zxdh_sc_create_ah(struct zxdh_sc_cqp *cqp, + struct zxdh_ah_info *info, u64 scratch) +{ + return zxdh_sc_access_ah(cqp, info, ZXDH_CQP_OP_CREATE_AH, scratch); +} + +static inline int zxdh_sc_destroy_ah(struct zxdh_sc_cqp *cqp, + struct zxdh_ah_info *info, u64 scratch) +{ + return zxdh_sc_access_ah(cqp, info, ZXDH_CQP_OP_DESTROY_AH, scratch); +} + +static inline int zxdh_sc_create_mcast_grp(struct zxdh_sc_cqp *cqp, + struct zxdh_mcast_grp_info *info, + u64 scratch) +{ + return zxdh_access_mcast_grp(cqp, info, ZXDH_CQP_OP_CREATE_MCAST_GRP, + scratch); +} + +static inline int zxdh_sc_modify_mcast_grp(struct zxdh_sc_cqp *cqp, + struct zxdh_mcast_grp_info *info, + u64 scratch) +{ + return zxdh_access_mcast_grp(cqp, info, ZXDH_CQP_OP_MODIFY_MCAST_GRP, + scratch); +} + +static inline int zxdh_sc_destroy_mcast_grp(struct zxdh_sc_cqp *cqp, + struct zxdh_mcast_grp_info *info, + u64 scratch) +{ + return zxdh_access_mcast_grp(cqp, info, ZXDH_CQP_OP_DESTROY_MCAST_GRP, + scratch); +} +#endif /* ZXDH_UDA_H */ diff --git a/drivers/infiniband/hw/zrdma/uda_d.h b/drivers/infiniband/hw/zrdma/uda_d.h new file mode 100644 index 000000000000..9458a148fc6e --- /dev/null +++ b/drivers/infiniband/hw/zrdma/uda_d.h @@ -0,0 +1,230 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_UDA_D_H +#define ZXDH_UDA_D_H +/* L4 packet type */ +#define ZXDH_E_UDA_SQ_L4T_UNKNOWN 0 +#define ZXDH_E_UDA_SQ_L4T_TCP 1 +#define ZXDH_E_UDA_SQ_L4T_SCTP 2 +#define ZXDH_E_UDA_SQ_L4T_UDP 3 +/* Inner IP header type */ +#define ZXDH_E_UDA_SQ_IIPT_UNKNOWN 0 +#define ZXDH_E_UDA_SQ_IIPT_IPV6 1 +#define ZXDH_E_UDA_SQ_IIPT_IPV4_NO_CSUM 2 +#define ZXDH_E_UDA_SQ_IIPT_IPV4_CSUM 3 +#define ZXDH_UDA_QPSQ_PUSHWQE_S 56 +#define ZXDH_UDA_QPSQ_PUSHWQE BIT_ULL(56) +#define ZXDH_UDA_QPSQ_INLINEDATAFLAG_S 57 +#define ZXDH_UDA_QPSQ_INLINEDATAFLAG BIT_ULL(57) +#define ZXDH_UDA_QPSQ_INLINEDATALEN_S 48 +#define ZXDH_UDA_QPSQ_INLINEDATALEN GENMASK_ULL(55, 48) +#define ZXDH_UDA_QPSQ_ADDFRAGCNT_S 38 +#define ZXDH_UDA_QPSQ_ADDFRAGCNT GENMASK_ULL(41, 38) +#define ZXDH_UDA_QPSQ_IPFRAGFLAGS_S 42 +#define ZXDH_UDA_QPSQ_IPFRAGFLAGS GENMASK_ULL(43, 42) +#define ZXDH_UDA_QPSQ_NOCHECKSUM_S 45 +#define ZXDH_UDA_QPSQ_NOCHECKSUM BIT_ULL(45) +#define ZXDH_UDA_QPSQ_AHIDXVALID_S 46 +#define ZXDH_UDA_QPSQ_AHIDXVALID BIT_ULL(46) +#define ZXDH_UDA_QPSQ_LOCAL_FENCE_S 61 +#define ZXDH_UDA_QPSQ_LOCAL_FENCE BIT_ULL(61) +#define ZXDH_UDA_QPSQ_AHIDX_S 0 +#define ZXDH_UDA_QPSQ_AHIDX GENMASK_ULL(16, 0) +#define ZXDH_UDA_QPSQ_PROTOCOL_S 16 +#define ZXDH_UDA_QPSQ_PROTOCOL GENMASK_ULL(23, 16) +#define ZXDH_UDA_QPSQ_EXTHDRLEN_S 32 +#define ZXDH_UDA_QPSQ_EXTHDRLEN GENMASK_ULL(40, 32) +#define ZXDH_UDA_QPSQ_MULTICAST_S 63 +#define ZXDH_UDA_QPSQ_MULTICAST BIT_ULL(63) +#define ZXDH_UDA_QPSQ_MACLEN_S 56 +#define ZXDH_UDA_QPSQ_MACLEN GENMASK_ULL(62, 56) +#define ZXDH_UDA_QPSQ_MACLEN_LINE 2 +#define ZXDH_UDA_QPSQ_IPLEN_S 48 +#define ZXDH_UDA_QPSQ_IPLEN GENMASK_ULL(54, 48) +#define ZXDH_UDA_QPSQ_IPLEN_LINE 2 +#define ZXDH_UDA_QPSQ_L4T_S 30 +#define ZXDH_UDA_QPSQ_L4T GENMASK_ULL(31, 30) +#define ZXDH_UDA_QPSQ_L4T_LINE 2 +#define ZXDH_UDA_QPSQ_IIPT_S 28 +#define ZXDH_UDA_QPSQ_IIPT GENMASK_ULL(29, 28) +#define ZXDH_UDA_QPSQ_IIPT_LINE 2 +#define ZXDH_UDA_QPSQ_DO_LPB_LINE 3 +#define ZXDH_UDA_QPSQ_FWD_PROG_CONFIRM_S 45 +#define ZXDH_UDA_QPSQ_FWD_PROG_CONFIRM BIT_ULL(45) +#define ZXDH_UDA_QPSQ_FWD_PROG_CONFIRM_LINE 3 +#define ZXDH_UDA_QPSQ_IMMDATA_S 0 +#define ZXDH_UDA_QPSQ_IMMDATA GENMASK_ULL(63, 0) +/* Byte Offset 0 */ +#define ZXDH_UDAQPC_IPV4_S 3 +#define ZXDH_UDAQPC_IPV4 BIT_ULL(3) +#define ZXDH_UDAQPC_INSERTVLANTAG_S 5 +#define ZXDH_UDAQPC_INSERTVLANTAG BIT_ULL(5) +#define ZXDH_UDAQPC_ISQP1_S 6 +#define ZXDH_UDAQPC_ISQP1 BIT_ULL(6) +#define ZXDH_UDAQPC_RQWQESIZE_S IRDMAQPC_RQWQESIZE_S +#define ZXDH_UDAQPC_RQWQESIZE IRDMAQPC_RQWQESIZE +#define ZXDH_UDAQPC_ECNENABLE_S 14 +#define ZXDH_UDAQPC_ECNENABLE BIT_ULL(14) +#define ZXDH_UDAQPC_PDINDEXHI_S 20 +#define ZXDH_UDAQPC_PDINDEXHI GENMASK_ULL(21, 20) +#define ZXDH_UDAQPC_DCTCPENABLE_S 25 +#define ZXDH_UDAQPC_DCTCPENABLE BIT_ULL(25) +#define ZXDH_UDAQPC_RCVTPHEN_S IRDMAQPC_RCVTPHEN_S +#define ZXDH_UDAQPC_RCVTPHEN IRDMAQPC_RCVTPHEN +#define ZXDH_UDAQPC_XMITTPHEN_S IRDMAQPC_XMITTPHEN_S +#define ZXDH_UDAQPC_XMITTPHEN IRDMAQPC_XMITTPHEN +#define ZXDH_UDAQPC_RQTPHEN_S IRDMAQPC_RQTPHEN_S +#define ZXDH_UDAQPC_RQTPHEN IRDMAQPC_RQTPHEN +#define ZXDH_UDAQPC_SQTPHEN_S IRDMAQPC_SQTPHEN_S +#define ZXDH_UDAQPC_SQTPHEN IRDMAQPC_SQTPHEN +#define ZXDH_UDAQPC_PPIDX_S IRDMAQPC_PPIDX_S +#define ZXDH_UDAQPC_PPIDX IRDMAQPC_PPIDX +#define ZXDH_UDAQPC_PMENA_S IRDMAQPC_PMENA_S +#define ZXDH_UDAQPC_PMENA IRDMAQPC_PMENA +#define ZXDH_UDAQPC_INSERTTAG2_S 11 +#define ZXDH_UDAQPC_INSERTTAG2 BIT_ULL(11) +#define ZXDH_UDAQPC_INSERTTAG3_S 14 +#define ZXDH_UDAQPC_INSERTTAG3 BIT_ULL(14) +#define ZXDH_UDAQPC_RQSIZE_S IRDMAQPC_RQSIZE_S +#define ZXDH_UDAQPC_RQSIZE IRDMAQPC_RQSIZE +#define ZXDH_UDAQPC_SQSIZE_S IRDMAQPC_SQSIZE_S +#define ZXDH_UDAQPC_SQSIZE IRDMAQPC_SQSIZE +#define ZXDH_UDAQPC_TXCQNUM_S IRDMAQPC_TXCQNUM_S +#define ZXDH_UDAQPC_TXCQNUM IRDMAQPC_TXCQNUM +#define ZXDH_UDAQPC_RXCQNUM_S IRDMAQPC_RXCQNUM_S +#define ZXDH_UDAQPC_RXCQNUM IRDMAQPC_RXCQNUM +#define ZXDH_UDAQPC_QPCOMPCTX_S IRDMAQPC_QPCOMPCTX_S +#define ZXDH_UDAQPC_QPCOMPCTX IRDMAQPC_QPCOMPCTX +#define ZXDH_UDAQPC_SQTPHVAL_S IRDMAQPC_SQTPHVAL_S +#define ZXDH_UDAQPC_SQTPHVAL IRDMAQPC_SQTPHVAL +#define ZXDH_UDAQPC_RQTPHVAL_S IRDMAQPC_RQTPHVAL_S +#define ZXDH_UDAQPC_RQTPHVAL IRDMAQPC_RQTPHVAL +#define ZXDH_UDAQPC_QSHANDLE_S IRDMAQPC_QSHANDLE_S +#define ZXDH_UDAQPC_QSHANDLE IRDMAQPC_QSHANDLE +#define ZXDH_UDAQPC_RQHDRRINGBUFSIZE_S 48 +#define ZXDH_UDAQPC_RQHDRRINGBUFSIZE GENMASK_ULL(49, 48) +#define ZXDH_UDAQPC_SQHDRRINGBUFSIZE_S 32 +#define ZXDH_UDAQPC_SQHDRRINGBUFSIZE GENMASK_ULL(33, 32) +#define ZXDH_UDAQPC_PRIVILEGEENABLE_S 25 +#define ZXDH_UDAQPC_PRIVILEGEENABLE BIT_ULL(25) +#define ZXDH_UDAQPC_USE_STATISTICS_INSTANCE_S 26 +#define ZXDH_UDAQPC_USE_STATISTICS_INSTANCE BIT_ULL(26) +#define ZXDH_UDAQPC_STATISTICS_INSTANCE_INDEX_S 0 +#define ZXDH_UDAQPC_STATISTICS_INSTANCE_INDEX GENMASK_ULL(6, 0) +#define ZXDH_UDAQPC_PRIVHDRGENENABLE_S 0 +#define ZXDH_UDAQPC_PRIVHDRGENENABLE BIT_ULL(0) +#define ZXDH_UDAQPC_RQHDRSPLITENABLE_S 3 +#define ZXDH_UDAQPC_RQHDRSPLITENABLE BIT_ULL(3) +#define ZXDH_UDAQPC_RQHDRRINGBUFENABLE_S 2 +#define ZXDH_UDAQPC_RQHDRRINGBUFENABLE BIT_ULL(2) +#define ZXDH_UDAQPC_SQHDRRINGBUFENABLE_S 1 +#define ZXDH_UDAQPC_SQHDRRINGBUFENABLE BIT_ULL(1) +#define ZXDH_UDAQPC_IPID_S 32 +#define ZXDH_UDAQPC_IPID GENMASK_ULL(47, 32) +#define ZXDH_UDAQPC_SNDMSS_S 16 +#define ZXDH_UDAQPC_SNDMSS GENMASK_ULL(29, 16) +#define ZXDH_UDAQPC_VLANTAG_S 0 +#define ZXDH_UDAQPC_VLANTAG GENMASK_ULL(15, 0) +#define ZXDH_UDA_CQPSQ_MAV_PDINDEXHI_S 20 +#define ZXDH_UDA_CQPSQ_MAV_PDINDEXHI GENMASK_ULL(21, 20) +#define ZXDH_UDA_CQPSQ_MAV_PDINDEXLO_S 48 +#define ZXDH_UDA_CQPSQ_MAV_PDINDEXLO GENMASK_ULL(63, 48) +#define ZXDH_UDA_CQPSQ_MAV_PDINDEX_S 0 +#define ZXDH_UDA_CQPSQ_MAV_PDINDEX GENMASK_ULL(19, 0) +#define ZXDH_UDA_CQPSQ_MAV_SRCMACADDRINDEX_S 24 +#define ZXDH_UDA_CQPSQ_MAV_SRCMACADDRINDEX GENMASK_ULL(29, 24) +#define ZXDH_UDA_CQPSQ_MAV_ARPINDEX_S 48 +#define ZXDH_UDA_CQPSQ_MAV_ARPINDEX GENMASK_ULL(63, 48) +#define ZXDH_UDA_CQPSQ_MAV_TC_S 47 +#define ZXDH_UDA_CQPSQ_MAV_TC GENMASK_ULL(54, 47) +#define ZXDH_UDA_CQPSQ_MAV_HOPLIMIT_S 32 +#define ZXDH_UDA_CQPSQ_MAV_HOPLIMIT GENMASK_ULL(39, 32) +#define ZXDH_UDA_CQPSQ_MAV_FLOWLABEL_S 0 +#define ZXDH_UDA_CQPSQ_MAV_FLOWLABEL GENMASK_ULL(19, 0) +#define ZXDH_UDA_CQPSQ_MAV_ADDR3_S 0 +#define ZXDH_UDA_CQPSQ_MAV_ADDR3 GENMASK_ULL(31, 0) +#define ZXDH_UDA_CQPSQ_MAV_ADDR2_S 32 +#define ZXDH_UDA_CQPSQ_MAV_ADDR2 GENMASK_ULL(63, 32) +#define ZXDH_UDA_CQPSQ_MAV_ADDR1_S 0 +#define ZXDH_UDA_CQPSQ_MAV_ADDR1 GENMASK_ULL(31, 0) +#define ZXDH_UDA_CQPSQ_MAV_ADDR0_S 32 +#define ZXDH_UDA_CQPSQ_MAV_ADDR0 GENMASK_ULL(63, 32) +#define ZXDH_UDA_CQPSQ_MAV_WQEVALID_S 57 +#define ZXDH_UDA_CQPSQ_MAV_WQEVALID BIT_ULL(57) +#define ZXDH_UDA_CQPSQ_MAV_OPCODE_S 58 +#define ZXDH_UDA_CQPSQ_MAV_OPCODE GENMASK_ULL(63, 58) +#define ZXDH_UDA_CQPSQ_MAV_DOLOOPBACKK_S 62 +#define ZXDH_UDA_CQPSQ_MAV_DOLOOPBACKK BIT_ULL(62) +#define ZXDH_UDA_CQPSQ_MAV_IPV4VALID_S 56 +#define ZXDH_UDA_CQPSQ_MAV_IPV4VALID BIT_ULL(56) + +#define ZXDH_UDA_CQPSQ_MAV_AVIDX_S 24 +#define ZXDH_UDA_CQPSQ_MAV_AVIDX GENMASK_ULL(42, 24) +#define ZXDH_UDA_CQPSQ_MAV_INSERTVLANTAG_S 55 +#define ZXDH_UDA_CQPSQ_MAV_INSERTVLANTAG BIT_ULL(55) +#define ZXDH_UDA_MGCTX_VFFLAG_S 29 +#define ZXDH_UDA_MGCTX_VFFLAG BIT_ULL(29) +#define ZXDH_UDA_MGCTX_DESTPORT_S 32 +#define ZXDH_UDA_MGCTX_DESTPORT GENMASK_ULL(47, 32) +#define ZXDH_UDA_MGCTX_VFID_S 22 +#define ZXDH_UDA_MGCTX_VFID GENMASK_ULL(28, 22) +#define ZXDH_UDA_MGCTX_VALIDENT_S 31 +#define ZXDH_UDA_MGCTX_VALIDENT BIT_ULL(31) +#define ZXDH_UDA_MGCTX_PFID_S 18 +#define ZXDH_UDA_MGCTX_PFID GENMASK_ULL(21, 18) +#define ZXDH_UDA_MGCTX_FLAGIGNOREDPORT_S 30 +#define ZXDH_UDA_MGCTX_FLAGIGNOREDPORT BIT_ULL(30) +#define ZXDH_UDA_MGCTX_QPID_S 0 +#define ZXDH_UDA_MGCTX_QPID GENMASK_ULL(23, 0) + +#define ZXDH_UDA_CQPSQ_MG_WQEVALID_S 57 +#define ZXDH_UDA_CQPSQ_MG_WQEVALID BIT_ULL(57) +#define ZXDH_UDA_CQPSQ_MG_OPCODE_S 58 +#define ZXDH_UDA_CQPSQ_MG_OPCODE GENMASK_ULL(63, 58) +#define ZXDH_UDA_CQPSQ_MG_MGIDX_S 0 +#define ZXDH_UDA_CQPSQ_MG_MGIDX GENMASK_ULL(20, 0) +#define ZXDH_UDA_CQPSQ_MG_IPV4VALID_S 56 +#define ZXDH_UDA_CQPSQ_MG_IPV4VALID BIT_ULL(56) +#define ZXDH_UDA_CQPSQ_MG_VLANVALID_S 55 +#define ZXDH_UDA_CQPSQ_MG_VLANVALID BIT_ULL(55) + +#define ZXDH_UDA_CQPSQ_MG_DMAC_S 0 +#define ZXDH_UDA_CQPSQ_MG_DMAC GENMASK_ULL(47, 0) +#define ZXDH_UDA_CQPSQ_MG_VLANID_S 48 +#define ZXDH_UDA_CQPSQ_MG_VLANID GENMASK_ULL(63, 48) + +#define ZXDH_UDA_CQPSQ_MG_HMC_FCN_ID_S 0 +#define ZXDH_UDA_CQPSQ_MG_HMC_FCN_ID GENMASK_ULL(5, 0) + +#define ZXDH_UDA_CQPSQ_QS_HANDLE_S 0 +#define ZXDH_UDA_CQPSQ_QS_HANDLE GENMASK_ULL(9, 0) +#define ZXDH_UDA_CQPSQ_QHASH_QPN_S 32 +#define ZXDH_UDA_CQPSQ_QHASH_QPN GENMASK_ULL(49, 32) +#define ZXDH_UDA_CQPSQ_QHASH__S 0 +#define ZXDH_UDA_CQPSQ_QHASH_ BIT_ULL(0) +#define ZXDH_UDA_CQPSQ_QHASH_SRC_PORT_S 16 +#define ZXDH_UDA_CQPSQ_QHASH_SRC_PORT GENMASK_ULL(31, 16) +#define ZXDH_UDA_CQPSQ_QHASH_DEST_PORT_S 0 +#define ZXDH_UDA_CQPSQ_QHASH_DEST_PORT GENMASK_ULL(15, 0) +#define ZXDH_UDA_CQPSQ_QHASH_ADDR0_S 32 +#define ZXDH_UDA_CQPSQ_QHASH_ADDR0 GENMASK_ULL(63, 32) +#define ZXDH_UDA_CQPSQ_QHASH_ADDR1_S 0 +#define ZXDH_UDA_CQPSQ_QHASH_ADDR1 GENMASK_ULL(31, 0) +#define ZXDH_UDA_CQPSQ_QHASH_ADDR2_S 32 +#define ZXDH_UDA_CQPSQ_QHASH_ADDR2 GENMASK_ULL(63, 32) +#define ZXDH_UDA_CQPSQ_QHASH_ADDR3_S 0 +#define ZXDH_UDA_CQPSQ_QHASH_ADDR3 GENMASK_ULL(31, 0) +#define ZXDH_UDA_CQPSQ_QHASH_WQEVALID_S 63 +#define ZXDH_UDA_CQPSQ_QHASH_WQEVALID BIT_ULL(63) +#define ZXDH_UDA_CQPSQ_QHASH_OPCODE_S 32 +#define ZXDH_UDA_CQPSQ_QHASH_OPCODE GENMASK_ULL(37, 32) +#define ZXDH_UDA_CQPSQ_QHASH_MANAGE_S 61 +#define ZXDH_UDA_CQPSQ_QHASH_MANAGE GENMASK_ULL(62, 61) +#define ZXDH_UDA_CQPSQ_QHASH_IPV4VALID_S 60 +#define ZXDH_UDA_CQPSQ_QHASH_IPV4VALID BIT_ULL(60) +#define ZXDH_UDA_CQPSQ_QHASH_LANFWD_S 59 +#define ZXDH_UDA_CQPSQ_QHASH_LANFWD BIT_ULL(59) +#define ZXDH_UDA_CQPSQ_QHASH_ENTRYTYPE_S 42 +#define ZXDH_UDA_CQPSQ_QHASH_ENTRYTYPE GENMASK_ULL(44, 42) +#endif /* ZXDH_UDA_D_H */ diff --git a/drivers/infiniband/hw/zrdma/uk.c b/drivers/infiniband/hw/zrdma/uk.c new file mode 100644 index 000000000000..3bf135ba9be8 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/uk.c @@ -0,0 +1,1947 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include "osdep.h" +#include "status.h" +#include "defs.h" +#include "user.h" +#include "zrdma.h" +#include "type.h" +#include "srq.h" + +/** + * zxdh_set_fragment - set fragment in wqe + * @wqe: wqe for setting fragment + * @offset: offset value + * @sge: sge length and stag + * @valid: The wqe valid + */ +static void zxdh_set_fragment(__le64 *wqe, u32 offset, struct zxdh_sge *sge, + u8 valid) +{ + if (sge) { + set_64bit_val(wqe, offset + 8, + FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off)); + set_64bit_val(wqe, offset, + FIELD_PREP(IRDMAQPSQ_VALID, valid) | + FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->len) | + FIELD_PREP(IRDMAQPSQ_FRAG_STAG, + sge->stag)); + } else { + set_64bit_val(wqe, offset + 8, 0); + set_64bit_val(wqe, offset, FIELD_PREP(IRDMAQPSQ_VALID, valid)); + } +} + +/** + * zxdh_nop_1 - insert a NOP wqe + * @qp: hw qp ptr + */ +static int zxdh_nop_1(struct zxdh_qp_uk *qp) +{ + u64 hdr; + __le64 *wqe; + u32 wqe_idx; + bool signaled = false; + + if (!qp->sq_ring.head) + return -EINVAL; + + wqe_idx = ZXDH_RING_CURRENT_HEAD(qp->sq_ring); + wqe = qp->sq_base[wqe_idx].elem; + + qp->sq_wrtrk_array[wqe_idx].quanta = ZXDH_QP_WQE_MIN_QUANTA; + + set_64bit_val(wqe, 8, 0); + set_64bit_val(wqe, 16, 0); + set_64bit_val(wqe, 24, 0); + + hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, ZXDH_OP_TYPE_NOP) | + FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) | + FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); + + /* make sure WQE is written before valid bit is set */ + dma_wmb(); + + set_64bit_val(wqe, 0, hdr); + + return 0; +} + +/** + * zxdh_clr_wqes - clear next 128 sq entries + * @qp: hw qp ptr + * @qp_wqe_idx: wqe_idx + */ +void zxdh_clr_wqes(struct zxdh_qp_uk *qp, u32 qp_wqe_idx) +{ + __le64 *wqe; + u32 wqe_idx; + + if (!(qp_wqe_idx & 0x7F)) { + wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size; + wqe = qp->sq_base[wqe_idx].elem; + if (wqe_idx) + memset(wqe, qp->swqe_polarity ? 0 : 0xFF, 0x1000); + else + memset(wqe, qp->swqe_polarity ? 0xFF : 0, 0x1000); + } +} + +/** + * zxdh_uk_qp_post_wr - ring doorbell + * @qp: hw qp ptr + */ +void zxdh_uk_qp_post_wr(struct zxdh_qp_uk *qp) +{ + dma_wmb(); + + writel(qp->qp_id, qp->wqe_alloc_db); + qp->initial_ring.head = qp->sq_ring.head; +} + +/** + * zxdh_uk_qp_set_shadow_area - fill SW_RQ_Head + * @qp: hw qp ptr + */ +void zxdh_uk_qp_set_shadow_area(struct zxdh_qp_uk *qp) +{ + set_64bit_val(qp->shadow_area, 0, + FIELD_PREP(IRDMAQPDBSA_RQ_POLARITY, qp->rwqe_polarity) | + FIELD_PREP(IRDMAQPDBSA_RQ_SW_HEAD, + ZXDH_RING_CURRENT_HEAD(qp->rq_ring))); +} + +#ifdef Z_CONFIG_RDMA_PUSH_MODE +/** + * zxdh_qp_ring_push_db - ring qp doorbell + * @qp: hw qp ptr + * @wqe_idx: wqe index + */ +static void zxdh_qp_ring_push_db(struct zxdh_qp_uk *qp, u32 wqe_idx) +{ + set_32bit_val(qp->push_db, 0, + FIELD_PREP(ZXDH_WQEALLOC_WQE_DESC_INDEX, wqe_idx >> 3) | + qp->qp_id); + qp->initial_ring.head = qp->sq_ring.head; + qp->push_mode = true; + qp->push_dropped = false; +} + +void zxdh_qp_push_wqe(struct zxdh_qp_uk *qp, __le64 *wqe, u16 quanta, + u32 wqe_idx, bool post_sq) +{ + __le64 *push; + + if (ZXDH_RING_CURRENT_HEAD(qp->initial_ring) != + ZXDH_RING_CURRENT_TAIL(qp->sq_ring) && + !qp->push_mode) { + if (post_sq) + zxdh_uk_qp_post_wr(qp); + } else { + push = (__le64 *)((uintptr_t)qp->push_wqe + + (wqe_idx & 0x7) * 0x20); + memcpy(push, wqe, quanta * ZXDH_QP_WQE_MIN_SIZE); + zxdh_qp_ring_push_db(qp, wqe_idx); + } +} +#endif +/** + * zxdh_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go + * @qp: hw qp ptr + * @wqe_idx: return wqe index + * @quanta: size of WR in quanta + * @total_size: size of WR in bytes + * @info: info on WR + */ +__le64 *zxdh_qp_get_next_send_wqe(struct zxdh_qp_uk *qp, u32 *wqe_idx, + u16 quanta, u32 total_size, + struct zxdh_post_sq_info *info) +{ + __le64 *wqe; + u16 avail_quanta; + u16 i; + + avail_quanta = ZXDH_MAX_SQ_WQES_PER_PAGE - + (ZXDH_RING_CURRENT_HEAD(qp->sq_ring) % + ZXDH_MAX_SQ_WQES_PER_PAGE); + + if (quanta <= avail_quanta) { + /* WR fits in current chunk */ + if (quanta > ZXDH_SQ_RING_FREE_QUANTA(qp->sq_ring)) + return NULL; + } else { + /* Need to pad with NOP */ + if (quanta + avail_quanta > + ZXDH_SQ_RING_FREE_QUANTA(qp->sq_ring)) + return NULL; + + for (i = 0; i < avail_quanta; i++) { + zxdh_nop_1(qp); + ZXDH_RING_MOVE_HEAD_NOCHECK(qp->sq_ring); + } + } + + *wqe_idx = ZXDH_RING_CURRENT_HEAD(qp->sq_ring); + if (!*wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + + ZXDH_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta); + + wqe = qp->sq_base[*wqe_idx].elem; + + qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id; + qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size; + qp->sq_wrtrk_array[*wqe_idx].quanta = quanta; + + return wqe; +} + +/** + * zxdh_qp_get_next_recv_wqe - get next qp's rcv wqe + * @qp: hw qp ptr + * @wqe_idx: return wqe index + */ +__le64 *zxdh_qp_get_next_recv_wqe(struct zxdh_qp_uk *qp, u32 *wqe_idx) +{ + __le64 *wqe; + int ret_code; + + if (ZXDH_RING_FULL_ERR(qp->rq_ring)) + return NULL; + + ZXDH_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code); + if (ret_code) + return NULL; + + if (!*wqe_idx) + qp->rwqe_polarity = !qp->rwqe_polarity; + /* rq_wqe_size_multiplier is no of 16 byte quanta in one rq wqe */ + wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem; + + return wqe; +} + +/** + * zxdh_uk_rdma_write - rdma write operation + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +int zxdh_uk_rdma_write(struct zxdh_qp_uk *qp, struct zxdh_post_sq_info *info, + bool post_sq) +{ + u64 hdr; + __le64 *wqe; + struct zxdh_rdma_write *op_info; + u32 i, wqe_idx; + u32 total_size = 0, byte_off; + int ret_code; + u32 frag_cnt, addl_frag_cnt; + bool read_fence = false; + u16 quanta; + bool imm_data_flag = info->imm_data_valid ? 1 : 0; + + op_info = &info->op.rdma_write; + if (op_info->num_lo_sges > qp->max_sq_frag_cnt) + return -EINVAL; + + for (i = 0; i < op_info->num_lo_sges; i++) { + total_size += op_info->lo_sg_list[i].len; + if (0 != i && 0 == op_info->lo_sg_list[i].len) + return -EINVAL; + } + + if (total_size > ZXDH_MAX_SQ_PAYLOAD_SIZE) + return -EINVAL; + + read_fence |= info->read_fence; + + if (imm_data_flag) + frag_cnt = op_info->num_lo_sges ? (op_info->num_lo_sges + 1) : + 2; + else + frag_cnt = op_info->num_lo_sges; + addl_frag_cnt = op_info->num_lo_sges > 1 ? (op_info->num_lo_sges - 1) : + 0; + ret_code = zxdh_fragcnt_to_quanta_sq(frag_cnt, &quanta); + if (ret_code) + return ret_code; + + wqe = zxdh_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size, info); + if (!wqe) + return -ENOSPC; + + zxdh_clr_wqes(qp, wqe_idx); + + if (op_info->num_lo_sges) { + set_64bit_val( + wqe, 16, + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_VALID, + op_info->lo_sg_list->len == + ZXDH_MAX_SQ_PAYLOAD_SIZE ? + 1 : + 0) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_LEN, + op_info->lo_sg_list->len) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_STAG, + op_info->lo_sg_list->stag)); + set_64bit_val(wqe, 8, + FIELD_PREP(IRDMAQPSQ_FRAG_TO, + op_info->lo_sg_list->tag_off)); + } else { + /*if zero sge,post a special sge with zero lenth*/ + set_64bit_val(wqe, 16, + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_VALID, 0) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_LEN, 0) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_STAG, + 0x100)); + set_64bit_val(wqe, 8, FIELD_PREP(IRDMAQPSQ_FRAG_TO, 0)); + } + + if (imm_data_flag) { + set_64bit_val( + wqe, ZXDH_SQ_WQE_BYTESIZE, + FIELD_PREP(IRDMAQPSQ_IMMDATA_VALID, qp->swqe_polarity) | + FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data)); + i = 1; + for (byte_off = ZXDH_SQ_WQE_BYTESIZE + ZXDH_QP_FRAG_BYTESIZE; + i < op_info->num_lo_sges; i++) { + qp->wqe_ops.iw_set_fragment(wqe, byte_off, + &op_info->lo_sg_list[i], + qp->swqe_polarity); + byte_off += ZXDH_QP_FRAG_BYTESIZE; + } + } else { + i = 1; + for (byte_off = ZXDH_SQ_WQE_BYTESIZE; i < op_info->num_lo_sges; + i++) { + qp->wqe_ops.iw_set_fragment(wqe, byte_off, + &op_info->lo_sg_list[i], + qp->swqe_polarity); + byte_off += ZXDH_QP_FRAG_BYTESIZE; + } + } + + /* if not an odd number set valid bit in next fragment */ + if (!(frag_cnt & 0x01) && frag_cnt) { + qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL, + qp->swqe_polarity); + } + + hdr = FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity) | + FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | + FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | + FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | + FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | + FIELD_PREP(IRDMAQPSQ_SOLICITED, info->solicited) | + FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, imm_data_flag) | + FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) | + FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag); + set_64bit_val(wqe, 24, + FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off)); + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + if (post_sq) + zxdh_uk_qp_post_wr(qp); + + return 0; +} + +/** + * zxdh_uk_rdma_read - rdma read command + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +int zxdh_uk_rdma_read(struct zxdh_qp_uk *qp, struct zxdh_post_sq_info *info, + bool post_sq) +{ + struct zxdh_rdma_read *op_info; + int ret_code; + u32 i, byte_off, total_size = 0; + bool local_fence = false; + bool ord_fence = false; + u32 addl_frag_cnt; + __le64 *wqe; + u32 wqe_idx; + u16 quanta; + u64 hdr; + + op_info = &info->op.rdma_read; + if (qp->max_sq_frag_cnt < op_info->num_lo_sges) + return -EINVAL; + + for (i = 0; i < op_info->num_lo_sges; i++) { + total_size += op_info->lo_sg_list[i].len; + if (0 != i && 0 == op_info->lo_sg_list[i].len) + return -EINVAL; + } + + if (total_size > ZXDH_MAX_SQ_PAYLOAD_SIZE) + return -EINVAL; + + ret_code = zxdh_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta); + if (ret_code) + return ret_code; + + wqe = zxdh_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size, info); + if (!wqe) + return -ENOSPC; + + if (qp->rd_fence_rate && (qp->ord_cnt++ == qp->rd_fence_rate)) { + ord_fence = true; + qp->ord_cnt = 0; + } + + zxdh_clr_wqes(qp, wqe_idx); + + addl_frag_cnt = op_info->num_lo_sges > 1 ? (op_info->num_lo_sges - 1) : + 0; + local_fence |= info->local_fence; + + if (op_info->num_lo_sges) { + set_64bit_val( + wqe, 16, + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_VALID, + op_info->lo_sg_list->len == + ZXDH_MAX_SQ_PAYLOAD_SIZE ? + 1 : + 0) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_LEN, + op_info->lo_sg_list->len) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_STAG, + op_info->lo_sg_list->stag)); + set_64bit_val(wqe, 8, + FIELD_PREP(IRDMAQPSQ_FRAG_TO, + op_info->lo_sg_list->tag_off)); + } else { + /*if zero sge,post a special sge with zero lenth*/ + set_64bit_val(wqe, 16, + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_VALID, 0) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_LEN, 0) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_STAG, + 0x100)); + set_64bit_val(wqe, 8, FIELD_PREP(IRDMAQPSQ_FRAG_TO, 0)); + } + + i = 1; + for (byte_off = ZXDH_SQ_WQE_BYTESIZE; i < op_info->num_lo_sges; i++) { + qp->wqe_ops.iw_set_fragment(wqe, byte_off, + &op_info->lo_sg_list[i], + qp->swqe_polarity); + byte_off += ZXDH_QP_FRAG_BYTESIZE; + } + + /* if not an odd number set valid bit in next fragment */ + if (!(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) { + qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL, + qp->swqe_polarity); + } + + hdr = FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity) | + FIELD_PREP(IRDMAQPSQ_OPCODE, ZXDH_OP_TYPE_READ) | + FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | + FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | + FIELD_PREP(IRDMAQPSQ_READFENCE, + info->read_fence || ord_fence ? 1 : 0) | + FIELD_PREP(IRDMAQPSQ_SOLICITED, info->solicited) | + FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) | + FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag); + set_64bit_val(wqe, 24, + FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off)); + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_uk_qp_post_wr(qp); + + return 0; +} + +/** + * zxdh_uk_rc_send - rdma send command + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +int zxdh_uk_rc_send(struct zxdh_qp_uk *qp, struct zxdh_post_sq_info *info, + bool post_sq) +{ + __le64 *wqe; + struct zxdh_post_send *op_info; + u64 hdr; + u32 i, wqe_idx, total_size = 0, byte_off = ZXDH_SQ_WQE_BYTESIZE; + int ret_code; + u32 frag_cnt, addl_frag_cnt; + bool read_fence = false; + u16 quanta; + bool imm_data_flag = info->imm_data_valid ? 1 : 0; + + op_info = &info->op.send; + if (qp->max_sq_frag_cnt < op_info->num_sges) + return -EINVAL; + + for (i = 0; i < op_info->num_sges; i++) { + total_size += op_info->sg_list[i].len; + if (0 != i && 0 == op_info->sg_list[i].len) + return -EINVAL; + } + + if (total_size > ZXDH_MAX_SQ_PAYLOAD_SIZE) + return -EINVAL; + + if (imm_data_flag) + frag_cnt = op_info->num_sges ? (op_info->num_sges + 1) : 2; + else + frag_cnt = op_info->num_sges; + ret_code = zxdh_fragcnt_to_quanta_sq(frag_cnt, &quanta); + if (ret_code) + return ret_code; + + wqe = zxdh_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size, info); + if (!wqe) + return -ENOSPC; + + zxdh_clr_wqes(qp, wqe_idx); + + read_fence |= info->read_fence; + addl_frag_cnt = op_info->num_sges > 1 ? (op_info->num_sges - 1) : 0; + if (op_info->num_sges) { + set_64bit_val( + wqe, 16, + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_VALID, + op_info->sg_list->len == + ZXDH_MAX_SQ_PAYLOAD_SIZE ? + 1 : + 0) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_LEN, + op_info->sg_list->len) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_STAG, + op_info->sg_list->stag)); + set_64bit_val(wqe, 8, + FIELD_PREP(IRDMAQPSQ_FRAG_TO, + op_info->sg_list->tag_off)); + } else { + /*if zero sge,post a special sge with zero lenth*/ + set_64bit_val(wqe, 16, + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_VALID, 0) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_LEN, 0) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_STAG, + 0x100)); + set_64bit_val(wqe, 8, FIELD_PREP(IRDMAQPSQ_FRAG_TO, 0)); + } + + if (imm_data_flag) { + set_64bit_val( + wqe, ZXDH_SQ_WQE_BYTESIZE, + FIELD_PREP(IRDMAQPSQ_IMMDATA_VALID, qp->swqe_polarity) | + FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data)); + + i = 2; + if (i < op_info->num_sges) { + for (byte_off = ZXDH_SQ_WQE_BYTESIZE + 2 * ZXDH_QP_FRAG_BYTESIZE; + i < op_info->num_sges; i += 2) { + if (i == addl_frag_cnt) { + qp->wqe_ops.iw_set_fragment( + wqe, byte_off, + &op_info->sg_list[i], + qp->swqe_polarity); + byte_off += ZXDH_QP_FRAG_BYTESIZE; + break; + } + byte_off += ZXDH_QP_FRAG_BYTESIZE; + qp->wqe_ops.iw_set_fragment( + wqe, byte_off, &op_info->sg_list[i + 1], + qp->swqe_polarity); + byte_off -= ZXDH_QP_FRAG_BYTESIZE; + qp->wqe_ops.iw_set_fragment( + wqe, byte_off, &op_info->sg_list[i], + qp->swqe_polarity); + byte_off += 2 * ZXDH_QP_FRAG_BYTESIZE; + } + } + } else { + i = 1; + for (byte_off = ZXDH_SQ_WQE_BYTESIZE; i < op_info->num_sges; + i += 2) { + if (i == addl_frag_cnt) { + qp->wqe_ops.iw_set_fragment( + wqe, byte_off, &op_info->sg_list[i], + qp->swqe_polarity); + byte_off += ZXDH_QP_FRAG_BYTESIZE; + break; + } + byte_off += ZXDH_QP_FRAG_BYTESIZE; + qp->wqe_ops.iw_set_fragment(wqe, byte_off, + &op_info->sg_list[i + 1], + qp->swqe_polarity); + byte_off -= ZXDH_QP_FRAG_BYTESIZE; + qp->wqe_ops.iw_set_fragment(wqe, byte_off, + &op_info->sg_list[i], + qp->swqe_polarity); + byte_off += 2 * ZXDH_QP_FRAG_BYTESIZE; + } + } + + /* if not an odd number set valid bit in next fragment */ + if (!(frag_cnt & 0x01) && frag_cnt) { + qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL, + qp->swqe_polarity); + } + + hdr = FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity) | + FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | + FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | + FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | + FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | + FIELD_PREP(IRDMAQPSQ_SOLICITED, info->solicited) | + FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, imm_data_flag) | + FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) | + FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv); + set_64bit_val(wqe, 24, + FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 0) | + FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, 0)); + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + if (post_sq) + zxdh_uk_qp_post_wr(qp); + + return 0; +} + +/** + * zxdh_uk_ud_send - rdma send command + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +int zxdh_uk_ud_send(struct zxdh_qp_uk *qp, struct zxdh_post_sq_info *info, + bool post_sq) +{ + __le64 *wqe_base; + __le64 *wqe_ex = NULL; + struct zxdh_post_send *op_info; + u64 hdr; + u32 i, wqe_idx, total_size = 0, byte_off; + int ret_code; + u32 frag_cnt, addl_frag_cnt; + bool read_fence = false; + u16 quanta; + bool imm_data_flag = info->imm_data_valid ? 1 : 0; + + op_info = &info->op.send; + if (qp->max_sq_frag_cnt < op_info->num_sges) + return -EINVAL; + + for (i = 0; i < op_info->num_sges; i++) { + total_size += op_info->sg_list[i].len; + if (0 != i && 0 == op_info->sg_list[i].len) + return -EINVAL; + } + + if (total_size > ZXDH_MAX_SQ_PAYLOAD_SIZE) + return -EINVAL; + + if (imm_data_flag) + frag_cnt = op_info->num_sges ? (op_info->num_sges + 1) : 2; + else + frag_cnt = op_info->num_sges; + ret_code = zxdh_fragcnt_to_quanta_sq(frag_cnt, &quanta); + if (ret_code) + return ret_code; + + if (quanta > ZXDH_SQ_RING_FREE_QUANTA(qp->sq_ring)) + return -ENOSPC; + + wqe_idx = ZXDH_RING_CURRENT_HEAD(qp->sq_ring); + if (!wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + + ZXDH_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta); + + wqe_base = qp->sq_base[wqe_idx].elem; + qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id; + qp->sq_wrtrk_array[wqe_idx].wr_len = total_size; + qp->sq_wrtrk_array[wqe_idx].quanta = quanta; + + zxdh_clr_wqes(qp, wqe_idx); + + read_fence |= info->read_fence; + addl_frag_cnt = op_info->num_sges > 1 ? (op_info->num_sges - 1) : 0; + hdr = FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity) | + FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | + FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | + FIELD_PREP(IRDMAQPSQ_SOLICITED, info->solicited) | + FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, imm_data_flag) | + FIELD_PREP(IRDMAQPSQ_UD_INLINEDATAFLAG, 0) | + FIELD_PREP(IRDMAQPSQ_UD_INLINEDATALEN, 0) | + FIELD_PREP(IRDMAQPSQ_UD_ADDFRAGCNT, addl_frag_cnt) | + FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id); + + if (op_info->num_sges) { + set_64bit_val( + wqe_base, 16, + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_VALID, + op_info->sg_list->len == + ZXDH_MAX_SQ_PAYLOAD_SIZE ? + 1 : + 0) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_LEN, + op_info->sg_list->len) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_STAG, + op_info->sg_list->stag)); + set_64bit_val(wqe_base, 8, + FIELD_PREP(IRDMAQPSQ_FRAG_TO, + op_info->sg_list->tag_off)); + } else { + /*if zero sge,post a special sge with zero lenth*/ + set_64bit_val(wqe_base, 16, + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_VALID, 0) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_LEN, 0) | + FIELD_PREP(IRDMAQPSQ_FIRST_FRAG_STAG, + 0x100)); + set_64bit_val(wqe_base, 8, FIELD_PREP(IRDMAQPSQ_FRAG_TO, 0)); + } + + if (imm_data_flag) { + wqe_idx = (wqe_idx + 1) % qp->sq_ring.size; + if (!wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + wqe_ex = qp->sq_base[wqe_idx].elem; + set_64bit_val( + wqe_ex, 0, + FIELD_PREP(IRDMAQPSQ_IMMDATA_VALID, qp->swqe_polarity) | + FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data)); + i = 1; + for (byte_off = ZXDH_QP_FRAG_BYTESIZE; i < op_info->num_sges; + i++) { + if (!(i & 0x1)) { + wqe_idx = (wqe_idx + 1) % qp->sq_ring.size; + if (!wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + wqe_ex = qp->sq_base[wqe_idx].elem; + } + qp->wqe_ops.iw_set_fragment( + wqe_ex, byte_off % ZXDH_SQ_WQE_BYTESIZE, + &op_info->sg_list[i], qp->swqe_polarity); + byte_off += ZXDH_QP_FRAG_BYTESIZE; + } + } else { + i = 1; + for (byte_off = 0; i < op_info->num_sges; i++) { + if (i & 0x1) { + wqe_idx = (wqe_idx + 1) % qp->sq_ring.size; + if (!wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + wqe_ex = qp->sq_base[wqe_idx].elem; + } + qp->wqe_ops.iw_set_fragment( + wqe_ex, byte_off % ZXDH_SQ_WQE_BYTESIZE, + &op_info->sg_list[i], qp->swqe_polarity); + byte_off += ZXDH_QP_FRAG_BYTESIZE; + } + } + + /* if not an odd number set valid bit in next fragment */ + if (!(frag_cnt & 0x01) && frag_cnt && wqe_ex) { + qp->wqe_ops.iw_set_fragment(wqe_ex, ZXDH_QP_FRAG_BYTESIZE, NULL, + qp->swqe_polarity); + } + + set_64bit_val(wqe_base, 24, + FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp) | + FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey)); + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe_base, 0, hdr); + if (post_sq) + zxdh_uk_qp_post_wr(qp); + + return 0; +} + +/** + * zxdh_set_mw_bind_wqe - set mw bind in wqe + * @wqe: wqe for setting mw bind + * @op_info: info for setting wqe values + */ +static void zxdh_set_mw_bind_wqe(__le64 *wqe, struct zxdh_bind_window *op_info) +{ + set_64bit_val(wqe, 8, (uintptr_t)op_info->va); + set_64bit_val(wqe, 16, + FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mr_stag)); + set_64bit_val(wqe, 24, op_info->bind_len); +} + +/** + * zxdh_copy_inline_data - Copy inline data to wqe + * @dest: pointer to wqe + * @src: pointer to inline data + * @len: length of inline data to copy + * @polarity: polarity of wqe valid bit + * @imm_data_flag: flag to imm_data + */ +static void zxdh_copy_inline_data(u8 *dest, u8 *src, u32 len, u8 polarity, + bool imm_data_flag) +{ + u8 inline_valid = polarity << ZXDH_INLINE_VALID_S; + u32 copy_size; + u8 *inline_valid_addr; + + dest += ZXDH_WQE_SIZE_32; /* point to additional 32 byte quanta */ + + if (len) { + inline_valid_addr = dest + WQE_OFFSET_7BYTES; + if (imm_data_flag) { + copy_size = len < INLINE_DATASIZE_24BYTES ? + len : + INLINE_DATASIZE_24BYTES; + dest += WQE_OFFSET_8BYTES; + memcpy(dest, src, copy_size); + len -= copy_size; + dest += WQE_OFFSET_24BYTES; + src += copy_size; + } else { + if (len <= INLINE_DATASIZE_7BYTES) { + copy_size = len; + memcpy(dest, src, copy_size); + *inline_valid_addr = inline_valid; + return; + } + memcpy(dest, src, INLINE_DATASIZE_7BYTES); + len -= INLINE_DATASIZE_7BYTES; + dest += WQE_OFFSET_8BYTES; + src += INLINE_DATA_OFFSET_7BYTES; + copy_size = len < INLINE_DATASIZE_24BYTES ? + len : + INLINE_DATASIZE_24BYTES; + memcpy(dest, src, copy_size); + len -= copy_size; + dest += WQE_OFFSET_24BYTES; + src += copy_size; + } + *inline_valid_addr = inline_valid; + } + + while (len) { + inline_valid_addr = dest + WQE_OFFSET_7BYTES; + if (len <= INLINE_DATASIZE_7BYTES) { + copy_size = len; + memcpy(dest, src, copy_size); + *inline_valid_addr = inline_valid; + return; + } else { + memcpy(dest, src, INLINE_DATASIZE_7BYTES); + len -= INLINE_DATASIZE_7BYTES; + dest += WQE_OFFSET_8BYTES; + src += INLINE_DATA_OFFSET_7BYTES; + copy_size = len < INLINE_DATASIZE_24BYTES ? + len : + INLINE_DATASIZE_24BYTES; + memcpy(dest, src, copy_size); + len -= copy_size; + dest += WQE_OFFSET_24BYTES; + src += copy_size; + } + *inline_valid_addr = inline_valid; + } +} + +/** + * zxdh_inline_data_size_to_quanta - based on inline data, quanta + * @data_size: data size for inline + * @imm_data_flag: flag to imm_data + * @imm_data_flag: flag for immediate data + * + * Gets the quanta based on inline and immediate data. + */ +static u16 zxdh_inline_data_size_to_quanta(u32 data_size, bool imm_data_flag) +{ + if (imm_data_flag) + data_size += INLINE_DATASIZE_7BYTES; + + return data_size % 31 ? data_size / 31 + 2 : data_size / 31 + 1; +} + +/** + * zxdh_uk_inline_rdma_write - inline rdma write operation + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +int zxdh_uk_inline_rdma_write(struct zxdh_qp_uk *qp, + struct zxdh_post_sq_info *info, bool post_sq) +{ + __le64 *wqe; + u8 imm_valid; + struct zxdh_inline_rdma_write *op_info; + u64 hdr = 0; + u32 wqe_idx; + bool read_fence = false; + u16 quanta; + bool imm_data_flag = info->imm_data_valid ? 1 : 0; + + op_info = &info->op.inline_rdma_write; + + if (op_info->len > qp->max_inline_data) + return -EINVAL; + if (imm_data_flag && op_info->len > ZXDH_MAX_SQ_INLINE_DATELEN_WITH_IMM) + return -EINVAL; + + quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len, + imm_data_flag); + wqe = zxdh_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len, + info); + if (!wqe) + return -ENOSPC; + + zxdh_clr_wqes(qp, wqe_idx); + + read_fence |= info->read_fence; + hdr = FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity) | + FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | + FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | + FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | + FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | + FIELD_PREP(IRDMAQPSQ_SOLICITED, info->solicited) | + FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, imm_data_flag) | + FIELD_PREP(IRDMAQPSQ_WRITE_INLINEDATAFLAG, 1) | + FIELD_PREP(IRDMAQPSQ_WRITE_INLINEDATALEN, op_info->len) | + FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, quanta - 1) | + FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag); + set_64bit_val(wqe, 24, + FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off)); + + if (imm_data_flag) { + /* if inline exist, not update imm valid */ + imm_valid = (op_info->len == 0) ? qp->swqe_polarity : + (!qp->swqe_polarity); + set_64bit_val(wqe, 32, + FIELD_PREP(IRDMAQPSQ_IMMDATA_VALID, imm_valid) | + FIELD_PREP(IRDMAQPSQ_IMMDATA, + info->imm_data)); + } + + qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len, + qp->swqe_polarity, imm_data_flag); + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_uk_qp_post_wr(qp); + + return 0; +} + +/** + * zxdh_uk_rc_inline_send - inline send operation + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +int zxdh_uk_rc_inline_send(struct zxdh_qp_uk *qp, + struct zxdh_post_sq_info *info, bool post_sq) +{ + __le64 *wqe; + u8 imm_valid; + struct zxdh_post_inline_send *op_info; + u64 hdr; + u32 wqe_idx; + bool read_fence = false; + u16 quanta; + bool imm_data_flag = info->imm_data_valid ? 1 : 0; + + op_info = &info->op.inline_send; + + if (op_info->len > qp->max_inline_data) + return -EINVAL; + if (imm_data_flag && op_info->len > ZXDH_MAX_SQ_INLINE_DATELEN_WITH_IMM) + return -EINVAL; + + quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len, + imm_data_flag); + wqe = zxdh_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len, + info); + if (!wqe) + return -ENOSPC; + + zxdh_clr_wqes(qp, wqe_idx); + + read_fence |= info->read_fence; + hdr = FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity) | + FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | + FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | + FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) | + FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) | + FIELD_PREP(IRDMAQPSQ_SOLICITED, info->solicited) | + FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, quanta - 1) | + FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, imm_data_flag) | + FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv); + set_64bit_val(wqe, 24, + FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) | + FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, + op_info->len)); + + if (imm_data_flag) { + /* if inline exist, not update imm valid */ + imm_valid = (op_info->len == 0) ? qp->swqe_polarity : + (!qp->swqe_polarity); + set_64bit_val(wqe, 32, + FIELD_PREP(IRDMAQPSQ_IMMDATA_VALID, imm_valid) | + FIELD_PREP(IRDMAQPSQ_IMMDATA, + info->imm_data)); + } + + qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len, + qp->swqe_polarity, imm_data_flag); + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_uk_qp_post_wr(qp); + + return 0; +} + +/** + * zxdh_uk_ud_inline_send - inline send operation + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +int zxdh_uk_ud_inline_send(struct zxdh_qp_uk *qp, + struct zxdh_post_sq_info *info, bool post_sq) +{ + __le64 *wqe_base; + __le64 *wqe_ex; + struct zxdh_post_inline_send *op_info; + u64 hdr; + u32 wqe_idx; + bool read_fence = false; + u16 quanta; + bool imm_data_flag = info->imm_data_valid ? 1 : 0; + u8 *inline_dest; + u8 *inline_src; + u32 inline_len; + u32 copy_size; + u8 *inline_valid_addr; + + op_info = &info->op.inline_send; + inline_len = op_info->len; + + if (op_info->len > qp->max_inline_data) + return -EINVAL; + if (imm_data_flag && op_info->len > ZXDH_MAX_SQ_INLINE_DATELEN_WITH_IMM) + return -EINVAL; + + quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len, + imm_data_flag); + if (quanta > ZXDH_SQ_RING_FREE_QUANTA(qp->sq_ring)) + return -ENOSPC; + + wqe_idx = ZXDH_RING_CURRENT_HEAD(qp->sq_ring); + if (!wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + + ZXDH_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta); + + wqe_base = qp->sq_base[wqe_idx].elem; + qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id; + qp->sq_wrtrk_array[wqe_idx].wr_len = op_info->len; + qp->sq_wrtrk_array[wqe_idx].quanta = quanta; + + zxdh_clr_wqes(qp, wqe_idx); + + read_fence |= info->read_fence; + hdr = FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity) | + FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | + FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | + FIELD_PREP(IRDMAQPSQ_SOLICITED, info->solicited) | + FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, imm_data_flag) | + FIELD_PREP(IRDMAQPSQ_UD_INLINEDATAFLAG, 1) | + FIELD_PREP(IRDMAQPSQ_UD_INLINEDATALEN, op_info->len) | + FIELD_PREP(IRDMAQPSQ_UD_ADDFRAGCNT, quanta - 1) | + FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id); + set_64bit_val(wqe_base, 24, + FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp) | + FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey)); + + if (imm_data_flag) { + wqe_idx = (wqe_idx + 1) % qp->sq_ring.size; + if (!wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + wqe_ex = qp->sq_base[wqe_idx].elem; + + if (inline_len) { + /* imm and inline use the same valid, valid set after inline data updated*/ + copy_size = inline_len < INLINE_DATASIZE_24BYTES ? + inline_len : + INLINE_DATASIZE_24BYTES; + inline_dest = (u8 *)wqe_ex + WQE_OFFSET_8BYTES; + inline_src = (u8 *)op_info->data; + memcpy(inline_dest, inline_src, copy_size); + inline_len -= copy_size; + inline_src += copy_size; + } + set_64bit_val( + wqe_ex, 0, + FIELD_PREP(IRDMAQPSQ_IMMDATA_VALID, qp->swqe_polarity) | + FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data)); + + } else if (inline_len) { + wqe_idx = (wqe_idx + 1) % qp->sq_ring.size; + if (!wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + wqe_ex = qp->sq_base[wqe_idx].elem; + inline_dest = (u8 *)wqe_ex; + inline_src = (u8 *)op_info->data; + + if (inline_len <= INLINE_DATASIZE_7BYTES) { + copy_size = inline_len; + memcpy(inline_dest, inline_src, copy_size); + inline_len = 0; + } else { + copy_size = INLINE_DATASIZE_7BYTES; + memcpy(inline_dest, inline_src, copy_size); + inline_len -= copy_size; + inline_src += copy_size; + inline_dest += WQE_OFFSET_8BYTES; + copy_size = inline_len < INLINE_DATASIZE_24BYTES ? + inline_len : + INLINE_DATASIZE_24BYTES; + memcpy(inline_dest, inline_src, copy_size); + inline_len -= copy_size; + inline_src += copy_size; + } + inline_valid_addr = (u8 *)wqe_ex + WQE_OFFSET_7BYTES; + *inline_valid_addr = qp->swqe_polarity << ZXDH_INLINE_VALID_S; + } + + while (inline_len) { + wqe_idx = (wqe_idx + 1) % qp->sq_ring.size; + if (!wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + wqe_ex = qp->sq_base[wqe_idx].elem; + inline_dest = (u8 *)wqe_ex; + + if (inline_len <= INLINE_DATASIZE_7BYTES) { + copy_size = inline_len; + memcpy(inline_dest, inline_src, copy_size); + inline_len = 0; + } else { + copy_size = INLINE_DATASIZE_7BYTES; + memcpy(inline_dest, inline_src, copy_size); + inline_len -= copy_size; + inline_src += copy_size; + inline_dest += WQE_OFFSET_8BYTES; + copy_size = inline_len < INLINE_DATASIZE_24BYTES ? + inline_len : + INLINE_DATASIZE_24BYTES; + memcpy(inline_dest, inline_src, copy_size); + inline_len -= copy_size; + inline_src += copy_size; + } + inline_valid_addr = (u8 *)wqe_ex + WQE_OFFSET_7BYTES; + *inline_valid_addr = qp->swqe_polarity << ZXDH_INLINE_VALID_S; + } + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe_base, 0, hdr); + + if (post_sq) + zxdh_uk_qp_post_wr(qp); + + return 0; +} + +/** + * zxdh_uk_stag_local_invalidate - stag invalidate operation + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +int zxdh_uk_stag_local_invalidate(struct zxdh_qp_uk *qp, + struct zxdh_post_sq_info *info, bool post_sq) +{ + __le64 *wqe; + struct zxdh_inv_local_stag *op_info; + u64 hdr; + u32 wqe_idx; + bool local_fence = true; + + op_info = &info->op.inv_local_stag; + + wqe = zxdh_qp_get_next_send_wqe(qp, &wqe_idx, ZXDH_QP_WQE_MIN_QUANTA, 0, + info); + if (!wqe) + return -ENOSPC; + + zxdh_clr_wqes(qp, wqe_idx); + + hdr = FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity) | + FIELD_PREP(IRDMAQPSQ_OPCODE, ZXDH_OP_TYPE_LOCAL_INV) | + FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | + FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) | + FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) | + FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->target_stag); + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_uk_qp_post_wr(qp); + + return 0; +} + +/** + * zxdh_uk_mw_bind - bind Memory Window + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +int zxdh_uk_mw_bind(struct zxdh_qp_uk *qp, struct zxdh_post_sq_info *info, + bool post_sq) +{ + __le64 *wqe; + struct zxdh_bind_window *op_info; + u64 hdr; + u32 wqe_idx; + bool local_fence; + + info->push_wqe = qp->push_db ? true : false; + op_info = &info->op.bind_window; + local_fence = info->local_fence; + + wqe = zxdh_qp_get_next_send_wqe(qp, &wqe_idx, ZXDH_QP_WQE_MIN_QUANTA, 0, + info); + if (!wqe) + return -ENOSPC; + + zxdh_clr_wqes(qp, wqe_idx); + + qp->wqe_ops.iw_set_mw_bind_wqe(wqe, op_info); + + hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, ZXDH_OP_TYPE_BIND_MW) | + FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mw_stag) | + FIELD_PREP(IRDMAQPSQ_STAGRIGHTS, ((op_info->ena_reads << 2) | + (op_info->ena_writes << 3))) | + FIELD_PREP(IRDMAQPSQ_VABASEDTO, + (op_info->addressing_type == ZXDH_ADDR_TYPE_VA_BASED ? + 1 : + 0)) | + FIELD_PREP(IRDMAQPSQ_MEMWINDOWTYPE, + (op_info->mem_window_type_1 ? 1 : 0)) | + FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) | + FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) | + FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | + FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 0, hdr); + + if (post_sq) + zxdh_uk_qp_post_wr(qp); + + return 0; +} + +/** + * zxdh_uk_post_receive - post receive wqe + * @qp: hw qp ptr + * @info: post rq information + */ +int zxdh_uk_post_receive(struct zxdh_qp_uk *qp, struct zxdh_post_rq_info *info) +{ + u32 wqe_idx, i, byte_off; + __le64 *wqe; + struct zxdh_sge *sge; + + if (qp->max_rq_frag_cnt < info->num_sges) + return -EINVAL; + + wqe = zxdh_qp_get_next_recv_wqe(qp, &wqe_idx); + if (!wqe) + return -ENOSPC; + + qp->rq_wrid_array[wqe_idx] = info->wr_id; + + for (i = 0, byte_off = ZXDH_QP_FRAG_BYTESIZE; i < info->num_sges; i++) { + sge = &info->sg_list[i]; + set_64bit_val(wqe, byte_off, sge->tag_off); + set_64bit_val(wqe, byte_off + 8, + FIELD_PREP(IRDMAQPRQ_FRAG_LEN, sge->len) | + FIELD_PREP(IRDMAQPRQ_STAG, sge->stag)); + byte_off += ZXDH_QP_FRAG_BYTESIZE; + } + + /* + * while info->num_sges < qp->max_rq_frag_cnt, or 0 == info->num_sges, + * fill next fragment with FRAG_LEN=0, FRAG_STAG=0x00000100, + * witch indicates a invalid fragment + */ + if (info->num_sges < qp->max_rq_frag_cnt || 0 == info->num_sges) { + set_64bit_val(wqe, byte_off, 0); + set_64bit_val(wqe, byte_off + 8, + FIELD_PREP(IRDMAQPRQ_FRAG_LEN, 0) | + FIELD_PREP(IRDMAQPRQ_STAG, 0x00000100)); + } + + set_64bit_val(wqe, 0, + FIELD_PREP(IRDMAQPRQ_ADDFRAGCNT, info->num_sges) | + FIELD_PREP(IRDMAQPRQ_SIGNATURE, + qp->rwqe_signature)); + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 8, FIELD_PREP(IRDMAQPRQ_VALID, qp->rwqe_polarity)); + + return 0; +} + +/** + * zxdh_uk_cq_resize - reset the cq buffer info + * @cq: cq to resize + * @cq_base: new cq buffer addr + * @cq_size: number of cqes + */ +void zxdh_uk_cq_resize(struct zxdh_cq_uk *cq, void *cq_base, int cq_size) +{ + cq->cq_base = cq_base; + cq->cq_size = cq_size; + cq->cq_log_size = zxdh_num_to_log(cq_size); + ZXDH_RING_INIT(cq->cq_ring, cq->cq_size); + cq->polarity = 1; +} + +/** + * zxdh_uk_cq_set_resized_cnt - record the count of the resized buffers + * @cq: cq to resize + * @cq_cnt: the count of the resized cq buffers + */ +void zxdh_uk_cq_set_resized_cnt(struct zxdh_cq_uk *cq, u16 cq_cnt) +{ + u64 temp_val; + u16 sw_cq_sel; + u8 arm_next; + u8 arm_seq_num; + + get_64bit_val(cq->shadow_area, 0, &temp_val); + + sw_cq_sel = (u16)FIELD_GET(ZXDH_CQ_DBSA_SW_CQ_SELECT, temp_val); + sw_cq_sel += cq_cnt; + + arm_seq_num = (u8)FIELD_GET(ZXDH_CQ_DBSA_ARM_SEQ_NUM, temp_val); + arm_next = (u8)FIELD_GET(ZXDH_CQ_DBSA_ARM_NEXT, temp_val); + cq->cqe_rd_cnt = 0; + + temp_val = FIELD_PREP(ZXDH_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) | + FIELD_PREP(ZXDH_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) | + FIELD_PREP(ZXDH_CQ_DBSA_ARM_NEXT, arm_next) | + FIELD_PREP(ZXDH_CQ_DBSA_CQEIDX, cq->cqe_rd_cnt); + + set_64bit_val(cq->shadow_area, 0, temp_val); +} + +/** + * zxdh_uk_cq_request_notification - cq notification request (door bell) + * @cq: hw cq + * @cq_notify: notification type + */ +void zxdh_uk_cq_request_notification(struct zxdh_cq_uk *cq, + enum zxdh_cmpl_notify cq_notify) +{ + u64 temp_val; + u16 sw_cq_sel; + u8 arm_next = 0; + u8 arm_seq_num; + u32 cqe_index; + u32 hdr; + + cq->armed = true; + get_64bit_val(cq->shadow_area, 0, &temp_val); + arm_seq_num = (u8)FIELD_GET(ZXDH_CQ_DBSA_ARM_SEQ_NUM, temp_val); + arm_seq_num++; + sw_cq_sel = (u16)FIELD_GET(ZXDH_CQ_DBSA_SW_CQ_SELECT, temp_val); + cqe_index = (u32)FIELD_GET(ZXDH_CQ_DBSA_CQEIDX, temp_val); + + if (cq_notify == ZXDH_CQ_COMPL_SOLICITED) + arm_next = 1; + temp_val = FIELD_PREP(ZXDH_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) | + FIELD_PREP(ZXDH_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) | + FIELD_PREP(ZXDH_CQ_DBSA_ARM_NEXT, arm_next) | + FIELD_PREP(ZXDH_CQ_DBSA_CQEIDX, cqe_index); + + set_64bit_val(cq->shadow_area, 0, temp_val); + + hdr = FIELD_PREP(ZXDH_CQ_ARM_DBSA_VLD, 0) | + FIELD_PREP(ZXDH_CQ_ARM_CQ_ID, cq->cq_id); + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + + writel(hdr, cq->cqe_alloc_db); +} + +/** + * zxdh_uk_cq_poll_cmpl - get cq completion info + * @cq: hw cq + * @info: cq poll information returned + */ +int zxdh_uk_cq_poll_cmpl(struct zxdh_cq_uk *cq, struct zxdh_cq_poll_info *info) +{ + u64 comp_ctx, qword0, qword2, qword3; + __le64 *cqe; + struct zxdh_qp_uk *qp; + struct zxdh_sc_qp *sc_qp; + struct zxdh_sc_srq *sc_srq; + struct zxdh_srq_uk *srq_uk = NULL; + struct zxdh_ring *pring = NULL; + u32 wqe_idx, q_type; + int ret_code; + bool move_cq_head = true; + u8 polarity; + u8 qp_type; + u8 pring_handle = true; + + if (cq->valid_cq == false) { + return -ENOENT; + } + + cqe = ZXDH_GET_CURRENT_EXTENDED_CQ_ELEM(cq); + + get_64bit_val(cqe, 0, &qword0); + polarity = (u8)FIELD_GET(ZXDH_CQ_VALID, qword0); + if (polarity != cq->polarity) + return -ENOENT; + + /* Ensure CQE contents are read after valid bit is checked */ + dma_rmb(); + get_64bit_val(cqe, 8, &comp_ctx); + get_64bit_val(cqe, 16, &qword2); + get_64bit_val(cqe, 24, &qword3); + + qp = (struct zxdh_qp_uk *)(unsigned long)comp_ctx; + if (!qp || qp->destroy_pending) { + ret_code = -EFAULT; + goto exit; + } + info->qp_handle = (zxdh_qp_handle)(unsigned long)qp; + qp_type = qp->qp_type; + q_type = (u8)FIELD_GET(ZXDH_CQ_SQ, qword0); + info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword0); + wqe_idx = (u32)FIELD_GET(ZXDH_CQ_WQEIDX, qword0); + info->error = (bool)FIELD_GET(ZXDH_CQ_ERROR, qword0); + + if (info->error) { + info->major_err = FIELD_GET(ZXDH_CQ_MAJERR, qword0); + info->minor_err = FIELD_GET(ZXDH_CQ_MINERR, qword0); + if (info->major_err == ZXDH_FLUSH_MAJOR_ERR) { + info->comp_status = ZXDH_COMPL_STATUS_FLUSHED; + /* Set the min error to standard flush error code for remaining cqes */ + if (info->minor_err != FLUSH_GENERAL_ERR) { + qword0 &= ~ZXDH_CQ_MINERR; + qword0 |= FIELD_PREP(ZXDH_CQ_MINERR, + FLUSH_GENERAL_ERR); + set_64bit_val(cqe, 0, qword0); + } + } else { + info->comp_status = ZXDH_COMPL_STATUS_UNKNOWN; + } + } else { + info->comp_status = ZXDH_COMPL_STATUS_SUCCESS; + } + + info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2); + info->imm_valid = false; + info->ud_smac_valid = false; + info->ud_vlan_valid = false; + + info->qp_handle = (zxdh_qp_handle)(unsigned long)qp; + + if (q_type == ZXDH_CQE_QTYPE_RQ) { + u64 qword4; + + if (qp->is_srq == true) { + sc_qp = container_of(qp, struct zxdh_sc_qp, qp_uk); + sc_srq = sc_qp->srq; + srq_uk = &sc_srq->srq_uk; + pring_handle = false; + zxdh_free_srq_wqe(srq_uk, wqe_idx); + } + + if (info->comp_status == ZXDH_COMPL_STATUS_FLUSHED || + info->comp_status == ZXDH_COMPL_STATUS_UNKNOWN) { + if (qp->is_srq == false) { + if (!ZXDH_RING_MORE_WORK(qp->rq_ring)) { + ret_code = -ENOENT; + goto exit; + } + + info->wr_id = + qp->rq_wrid_array[qp->rq_ring.tail]; + wqe_idx = qp->rq_ring.tail; + } else { + info->wr_id = srq_uk->srq_wrid_array[wqe_idx]; + } + } else { + if (qp->is_srq == false) + info->wr_id = qp->rq_wrid_array[wqe_idx]; + else + info->wr_id = srq_uk->srq_wrid_array[wqe_idx]; + } + + info->imm_valid = (bool)FIELD_GET(ZXDH_CQ_IMMVALID, qword2); + if (info->imm_valid) { + info->imm_data = + (u32)FIELD_GET(ZXDH_CQ_IMMDATA, qword3); + } + + info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword3); + + if (info->imm_valid) + info->op_type = ZXDH_OP_TYPE_REC_IMM; + else + info->op_type = ZXDH_OP_TYPE_REC; + + if (qp_type == ZXDH_QP_TYPE_ROCE_RC) { + if (qword2 & IRDMACQ_STAG) { + info->stag_invalid_set = true; + info->inv_stag = + (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2); + } else { + info->stag_invalid_set = false; + } + } else if (qp_type == ZXDH_QP_TYPE_ROCE_UD) { + info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword2); + info->ud_src_qpn = + (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2); + + info->ud_smac_valid = + (bool)FIELD_GET(ZXDH_CQ_UDSMACVALID, qword2); + info->ud_vlan_valid = + (bool)FIELD_GET(ZXDH_CQ_UDVLANVALID, qword2); + if (info->ud_smac_valid || info->ud_vlan_valid) { + get_64bit_val(cqe, 32, &qword4); + if (info->ud_vlan_valid) + info->ud_vlan = (u16)FIELD_GET( + ZXDH_CQ_UDVLAN, qword4); + if (info->ud_smac_valid) { + info->ud_smac[5] = qword4 & 0xFF; + info->ud_smac[4] = (qword4 >> 8) & 0xFF; + info->ud_smac[3] = (qword4 >> 16) & + 0xFF; + info->ud_smac[2] = (qword4 >> 24) & + 0xFF; + info->ud_smac[1] = (qword4 >> 32) & + 0xFF; + info->ud_smac[0] = (qword4 >> 40) & + 0xFF; + } + } + } + if (qp->is_srq == false) { + ZXDH_RING_SET_TAIL(qp->rq_ring, wqe_idx + 1); + if (info->comp_status == ZXDH_COMPL_STATUS_FLUSHED) { + qp->rq_flush_seen = true; + if (!ZXDH_RING_MORE_WORK(qp->rq_ring)) + qp->rq_flush_complete = true; + else + move_cq_head = false; + } + pring = &qp->rq_ring; + } + } else { /* q_type is ZXDH_CQE_QTYPE_SQ */ + if (info->comp_status != ZXDH_COMPL_STATUS_FLUSHED) { + info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid; + if (!info->comp_status) + info->bytes_xfered = + qp->sq_wrtrk_array[wqe_idx].wr_len; + info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword0); + ZXDH_RING_SET_TAIL( + qp->sq_ring, + wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta); + } else { + if (!ZXDH_RING_MORE_WORK(qp->sq_ring)) { + ret_code = -ENOENT; + goto exit; + } + + do { + __le64 *sw_wqe; + u64 wqe_qword; + u8 op_type; + u32 tail; + + tail = qp->sq_ring.tail; + sw_wqe = qp->sq_base[tail].elem; + get_64bit_val(sw_wqe, 0, &wqe_qword); + op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, + wqe_qword); + info->op_type = op_type; + ZXDH_RING_SET_TAIL( + qp->sq_ring, + tail + qp->sq_wrtrk_array[tail].quanta); + if (op_type != ZXDH_OP_TYPE_NOP) { + info->wr_id = + qp->sq_wrtrk_array[tail].wrid; + info->bytes_xfered = + qp->sq_wrtrk_array[tail].wr_len; + break; + } + } while (1); + qp->sq_flush_seen = true; + if (!ZXDH_RING_MORE_WORK(qp->sq_ring)) + qp->sq_flush_complete = true; + } + pring = &qp->sq_ring; + } + + ret_code = 0; + +exit: + if (pring_handle == true) { + if (!ret_code && info->comp_status == ZXDH_COMPL_STATUS_FLUSHED) + if (pring && ZXDH_RING_MORE_WORK(*pring)) + move_cq_head = false; + } + + if (move_cq_head) { + u64 cq_shadow_temp; + + ZXDH_RING_MOVE_HEAD_NOCHECK(cq->cq_ring); + if (!ZXDH_RING_CURRENT_HEAD(cq->cq_ring)) + cq->polarity ^= 1; + + ZXDH_RING_MOVE_TAIL(cq->cq_ring); + cq->cqe_rd_cnt++; + get_64bit_val(cq->shadow_area, 0, &cq_shadow_temp); + cq_shadow_temp &= ~ZXDH_CQ_DBSA_CQEIDX; + cq_shadow_temp |= + FIELD_PREP(ZXDH_CQ_DBSA_CQEIDX, cq->cqe_rd_cnt); + set_64bit_val(cq->shadow_area, 0, cq_shadow_temp); + } else { + qword0 &= ~ZXDH_CQ_WQEIDX; + qword0 |= FIELD_PREP(ZXDH_CQ_WQEIDX, pring->tail); + set_64bit_val(cqe, 0, qword0); + } + + return ret_code; +} + +/** + * zxdh_qp_round_up - return round up qp wq depth + * @wqdepth: wq depth in quanta to round up + */ +static int zxdh_qp_round_up(u32 wqdepth) +{ + int scount = 1; + + for (wqdepth--; scount <= 16; scount *= 2) + wqdepth |= wqdepth >> scount; + + return ++wqdepth; +} + +/** + * zxdh_get_rq_wqe_shift - get shift count for maximum rq wqe size + * @uk_attrs: qp HW attributes + * @sge: Maximum Scatter Gather Elements wqe + * @shift: Returns the shift needed based on sge + * + * Shift can be used to left shift the rq wqe size based on number of SGEs. + * For 1 SGE, shift = 1 (wqe size of 2*16 bytes). + * For 2 or 3 SGEs, shift = 2 (wqe size of 4*16 bytes). + * For 4-7 SGE's Shift of 3. + * For 8-15 SGE's Shift of 4 otherwise (wqe size of 512 bytes). + */ +void zxdh_get_rq_wqe_shift(struct zxdh_uk_attrs *uk_attrs, u32 sge, u8 *shift) +{ + *shift = 0; //16bytes RQE, need to confirm configuration + if (sge < 2) + *shift = 1; + else if (sge < 4) + *shift = 2; + else if (sge < 8) + *shift = 3; + else if (sge < 16) + *shift = 4; + else + *shift = 5; +} + +/** + * zxdh_get_sq_wqe_shift - get shift count for maximum wqe size + * @uk_attrs: qp HW attributes + * @sge: Maximum Scatter Gather Elements wqe + * @inline_data: Maximum inline data size + * @shift: Returns the shift needed based on sge + * + * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size. + * To surport WR with imm_data,shift = 1 (wqe size of 2*32 bytes). + * For 2-7 SGEs or 24 < inline data <= 86, shift = 2 (wqe size of 4*32 bytes). + * Otherwise (wqe size of 256 bytes). + */ +void zxdh_get_sq_wqe_shift(struct zxdh_uk_attrs *uk_attrs, u32 sge, + u32 inline_data, u8 *shift) +{ + *shift = 1; + + if (sge > 1 || inline_data > 24) { + if (sge < 8 && inline_data <= 86) + *shift = 2; + else + *shift = 3; + } +} + +/* + * zxdh_get_sqdepth - get SQ depth (quanta) + * @max_hw_wq_quanta: HW SQ size limit + * @sq_size: SQ size + * @shift: shift which determines size of WQE + * @sqdepth: depth of SQ + */ +int zxdh_get_sqdepth(u32 max_hw_wq_quanta, u32 sq_size, u8 shift, u32 *sqdepth) +{ + if (sq_size > ZXDH_MAX_SQ_DEPTH) + return -EINVAL; + *sqdepth = zxdh_qp_round_up((sq_size << shift) + ZXDH_SQ_RSVD); + + if (*sqdepth < (ZXDH_QP_SW_MIN_WQSIZE << shift)) + *sqdepth = ZXDH_QP_SW_MIN_WQSIZE << shift; + else if (*sqdepth > max_hw_wq_quanta) + return -EINVAL; + + return 0; +} + +/* + * zxdh_get_rqdepth - get RQ/SRQ depth (quanta) + * @max_hw_rq_quanta: HW RQ/SRQ size limit + * @rq_size: RQ/SRQ size + * @shift: shift which determines size of WQE + * @rqdepth: depth of RQ/SRQ + */ +int zxdh_get_rqdepth(u32 max_hw_rq_quanta, u32 rq_size, u8 shift, u32 *rqdepth) +{ + *rqdepth = zxdh_qp_round_up((rq_size << shift) + ZXDH_RQ_RSVD); + + if (*rqdepth < (ZXDH_QP_SW_MIN_WQSIZE << shift)) + *rqdepth = ZXDH_QP_SW_MIN_WQSIZE << shift; + else if (*rqdepth > max_hw_rq_quanta) + return -EINVAL; + + return 0; +} + +static const struct zxdh_wqe_uk_ops iw_wqe_uk_ops = { + .iw_copy_inline_data = zxdh_copy_inline_data, + .iw_inline_data_size_to_quanta = zxdh_inline_data_size_to_quanta, + .iw_set_fragment = zxdh_set_fragment, + .iw_set_mw_bind_wqe = zxdh_set_mw_bind_wqe, +}; + +/** + * zxdh_uk_qp_init - initialize shared qp + * @qp: hw qp (user and kernel) + * @info: qp initialization info + * + * initializes the vars used in both user and kernel mode. + * size of the wqe depends on numbers of max. fragements + * allowed. Then size of wqe * the number of wqes should be the + * amount of memory allocated for sq and rq. + */ +int zxdh_uk_qp_init(struct zxdh_qp_uk *qp, struct zxdh_qp_uk_init_info *info) +{ + int ret_code = 0; + u32 sq_ring_size; + u8 sqshift, rqshift; + + qp->uk_attrs = info->uk_attrs; + if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags || + info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags) + return -EINVAL; + + zxdh_get_sq_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt, + info->max_inline_data, &sqshift); + zxdh_get_rq_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, &rqshift); + qp->qp_caps = info->qp_caps; + qp->sq_base = info->sq; + qp->rq_base = info->rq; + qp->qp_type = info->type; + qp->shadow_area = info->shadow_area; + qp->sq_wrtrk_array = info->sq_wrtrk_array; + + qp->rq_wrid_array = info->rq_wrid_array; + qp->wqe_alloc_db = info->wqe_alloc_db; + qp->rd_fence_rate = info->rd_fence_rate; + qp->qp_id = info->qp_id; + qp->sq_size = info->sq_size; + qp->max_sq_frag_cnt = info->max_sq_frag_cnt; + sq_ring_size = qp->sq_size << sqshift; + ZXDH_RING_INIT(qp->sq_ring, sq_ring_size); + ZXDH_RING_INIT(qp->initial_ring, sq_ring_size); + qp->swqe_polarity = 0; + + qp->swqe_polarity_deferred = 1; + qp->rwqe_polarity = 0; + qp->rwqe_signature = 0; + qp->rq_size = info->rq_size; + qp->max_rq_frag_cnt = info->max_rq_frag_cnt; + qp->max_inline_data = (info->max_inline_data == 0) ? + ZXDH_MAX_INLINE_DATA_SIZE : + info->max_inline_data; + qp->rq_wqe_size = rqshift; + ZXDH_RING_INIT(qp->rq_ring, qp->rq_size); + qp->rq_wqe_size_multiplier = 1 << rqshift; + + qp->wqe_ops = iw_wqe_uk_ops; + return ret_code; +} + +/** + * zxdh_uk_cq_init - initialize shared cq (user and kernel) + * @cq: hw cq + * @info: hw cq initialization info + */ +void zxdh_uk_cq_init(struct zxdh_cq_uk *cq, struct zxdh_cq_uk_init_info *info) +{ + cq->cq_base = info->cq_base; + cq->cq_id = info->cq_id; + cq->cq_size = info->cq_size; + cq->cq_log_size = info->cq_log_size; + cq->cqe_alloc_db = info->cqe_alloc_db; + cq->shadow_area = info->shadow_area; + cq->cqe_size = info->cqe_size; + ZXDH_RING_INIT(cq->cq_ring, cq->cq_size); + cq->polarity = 1; + cq->cqe_rd_cnt = 0; + cq->valid_cq = true; +} + +/** + * zxdh_uk_clean_cq - clean cq entries + * @q: completion context + * @cq: cq to clean + */ +void zxdh_uk_clean_cq(void *q, struct zxdh_cq_uk *cq) +{ + __le64 *cqe; + u64 qword0, comp_ctx; + u32 cq_head; + u8 polarity, temp; + + cq_head = cq->cq_ring.head; + temp = cq->polarity; + do { + if (cq->cqe_size) + cqe = ((struct zxdh_extended_cqe + *)(cq->cq_base))[cq_head] + .buf; + else + cqe = cq->cq_base[cq_head].buf; + get_64bit_val(cqe, 0, &qword0); + polarity = (u8)FIELD_GET(ZXDH_CQ_VALID, qword0); + + if (polarity != temp) + break; + + get_64bit_val(cqe, 8, &comp_ctx); + if ((void *)(unsigned long)comp_ctx == q) + set_64bit_val(cqe, 8, 0); + + cq_head = (cq_head + 1) % cq->cq_ring.size; + if (!cq_head) + temp ^= 1; + } while (true); +} + +/** + * zxdh_nop - post a nop + * @qp: hw qp ptr + * @wr_id: work request id + * @signaled: signaled for completion + * @post_sq: ring doorbell + */ +int zxdh_nop(struct zxdh_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq) +{ + __le64 *wqe; + u64 hdr; + u32 wqe_idx; + struct zxdh_post_sq_info info = {}; + + info.push_wqe = false; + info.wr_id = wr_id; + wqe = zxdh_qp_get_next_send_wqe(qp, &wqe_idx, ZXDH_QP_WQE_MIN_QUANTA, 0, + &info); + if (!wqe) + return -ENOSPC; + + zxdh_clr_wqes(qp, wqe_idx); + + set_64bit_val(wqe, 0, 0); + set_64bit_val(wqe, 8, 0); + set_64bit_val(wqe, 16, 0); + + hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, ZXDH_OP_TYPE_NOP) | + FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) | + FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 24, hdr); + if (post_sq) + zxdh_uk_qp_post_wr(qp); + + return 0; +} + +/** + * zxdh_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ + * @frag_cnt: number of fragments + * @quanta: quanta for frag_cnt + */ +int zxdh_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta) +{ + if (frag_cnt > ZXDH_MAX_SQ_FRAG) + return -EINVAL; + *quanta = frag_cnt / 2 + 1; + return 0; +} + +/** + * zxdh_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ + * @frag_cnt: number of fragments + * @wqe_size: size in bytes given frag_cnt + */ +int zxdh_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size) +{ + if (frag_cnt < 2) + *wqe_size = 32; + else if (frag_cnt < 4) + *wqe_size = 64; + else if (frag_cnt < 8) + *wqe_size = 128; + else if (frag_cnt < 16) + *wqe_size = 256; + else if (frag_cnt < 32) + *wqe_size = 512; + else + return -EINVAL; + + return 0; +} diff --git a/drivers/infiniband/hw/zrdma/user.h b/drivers/infiniband/hw/zrdma/user.h new file mode 100644 index 000000000000..5b1932ddad91 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/user.h @@ -0,0 +1,519 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_USER_H +#define ZXDH_USER_H + +#define zxdh_handle void * +#define zxdh_adapter_handle zxdh_handle +#define zxdh_qp_handle zxdh_handle +#define zxdh_cq_handle zxdh_handle +#define zxdh_pd_id zxdh_handle +#define zxdh_stag_handle zxdh_handle +#define zxdh_stag_index u32 +#define zxdh_stag u32 +#define zxdh_stag_key u8 +#define zxdh_tagged_offset u64 +#define zxdh_access_privileges u32 +#define zxdh_physical_fragment u64 +#define zxdh_address_list u64 * +#define zxdh_sgl struct zxdh_sge * + +#define ZXDH_MAX_MR_SIZE 0x200000000000ULL + +#define ZXDH_ACCESS_FLAGS_LOCALREAD 0x01 +#define ZXDH_ACCESS_FLAGS_LOCALWRITE 0x02 +#define ZXDH_ACCESS_FLAGS_REMOTEREAD_ONLY 0x04 +#define ZXDH_ACCESS_FLAGS_REMOTEREAD 0x05 +#define ZXDH_ACCESS_FLAGS_REMOTEWRITE_ONLY 0x08 +#define ZXDH_ACCESS_FLAGS_REMOTEWRITE 0x0a +#define ZXDH_ACCESS_FLAGS_BIND_WINDOW 0x10 +#define ZXDH_ACCESS_FLAGS_ZERO_BASED 0x20 +#define ZXDH_ACCESS_FLAGS_ALL 0x3f + +#define ZXDH_OP_TYPE_NOP 0x00 +#define ZXDH_OP_TYPE_SEND 0x01 +#define ZXDH_OP_TYPE_SEND_WITH_IMM 0x02 +#define ZXDH_OP_TYPE_SEND_INV 0x03 +#define ZXDH_OP_TYPE_WRITE 0x04 +#define ZXDH_OP_TYPE_WRITE_WITH_IMM 0x05 +#define ZXDH_OP_TYPE_READ 0x06 +#define ZXDH_OP_TYPE_BIND_MW 0x07 +#define ZXDH_OP_TYPE_FAST_REG_MR 0x08 +#define ZXDH_OP_TYPE_LOCAL_INV 0x09 +#define ZXDH_OP_TYPE_UD_SEND 0x0a +#define ZXDH_OP_TYPE_UD_SEND_WITH_IMM 0x0b + +#define ZXDH_OP_TYPE_REC 0x3e +#define ZXDH_OP_TYPE_REC_IMM 0x3f + +#define ZXDH_FLUSH_MAJOR_ERR 1 + +#define ZXDH_MAX_MSIX_INTERRUPT_SIZE 24 + +#define ZXDH_MIN_ROCE_QP_ID 1 +#define ZXDH_MIN_ROCE_SRQ_ID 1 + +#define ZXDH_SQE_SIZE 4 +#define ZXDH_RQE_SIZE 2 +#define IRDMARX_RD_TIME_LIMIT_VALUE 0x20 + +enum zxdh_hw_stats_state { + ZXDH_HW_STATS_INVALID = 0, + ZXDH_HW_STATS_VALID, +}; + +enum zxdh_cfg_ram_state { + ZXDH_CFG_RAM_FREE = 0, + ZXDH_CFG_RAM_BUSY, +}; + +enum zxdh_stat_rd_clr_mode { + ZXDH_STAT_RD_MODE_UNCLR = 0, //Not reading clearly + ZXDH_STAT_RD_MODE_CLR, // Read Clearly +}; + +enum zxdh_device_caps_const { + ZXDH_WQE_SIZE = 4, + ZXDH_CQP_WQE_SIZE = 8, + ZXDH_CQE_SIZE = 8, + ZXDH_EXTENDED_CQE_SIZE = 8, + ZXDH_AEQE_SIZE = 4, + ZXDH_CEQE_SIZE = 2, + ZXDH_CQP_CTX_SIZE = 8, + ZXDH_SHADOW_AREA_SIZE = 1, + ZXDH_GATHER_STATS_BUF_SIZE = 1024, + ZXDH_MIN_IW_QP_ID = 0, + ZXDH_QUERY_FPM_BUF_SIZE = 176, + ZXDH_COMMIT_FPM_BUF_SIZE = 176, + ZXDH_MAX_IW_QP_ID = 262143, + ZXDH_MIN_CEQID = 0, + ZXDH_MAX_CEQID = 4095, + ZXDH_CEQ_MAX_COUNT = ZXDH_MAX_CEQID + 1, + ZXDH_MIN_CQID = 0, + ZXDH_MAX_CQID = 524287, + ZXDH_MIN_AEQ_ENTRIES = 1, + ZXDH_MAX_AEQ_ENTRIES = 131072, // 64k QP + 32k CQ + 32k SRQ + ZXDH_MIN_CEQ_ENTRIES = 1, + ZXDH_MAX_CEQ_ENTRIES = 32768, // 32k CQ + ZXDH_MIN_CQ_SIZE = 1, + ZXDH_MAX_CQ_SIZE = 2097152, // 2M + ZXDH_DB_ID_ZERO = 0, + ZXDH_MAX_OUTBOUND_MSG_SIZE = 2147483647, + ZXDH_MAX_INBOUND_MSG_SIZE = 2147483647, + ZXDH_MAX_PUSH_PAGE_COUNT = 1024, + ZXDH_MAX_PE_ENA_VF_COUNT = 128, + ZXDH_MAX_VF_FPM_ID = 47, + ZXDH_MAX_SQ_PAYLOAD_SIZE = 2147483648, + ZXDH_MAX_INLINE_DATA_SIZE = 217, + ZXDH_MAX_WQ_ENTRIES = 32768, + ZXDH_Q2_BUF_SIZE = 256, + ZXDH_QP_CTX_SIZE = 512, + ZXDH_CQ_CTX_SIZE = 64, + ZXDH_CEQ_CTX_SIZE = 32, + ZXDH_AEQ_CTX_SIZE = 32, + ZXDH_SRQ_CTX_SIZE = 64, + ZXDH_MAX_PDS = 1048576, // 1M +}; + +enum zxdh_host_epid { + ZXDH_HOST_EP0_ID = 5, + ZXDH_HOST_EP1_ID = 6, + ZXDH_HOST_EP2_ID = 7, + ZXDH_HOST_EP3_ID = 8, + ZXDH_HOST_EP4_ID = 9, +}; + +enum zxdh_addressing_type { + ZXDH_ADDR_TYPE_ZERO_BASED = 0, + ZXDH_ADDR_TYPE_VA_BASED = 1, +}; + +enum zxdh_queue_status { + ZXDH_QUEUE_STATE_INVALID = 0, + ZXDH_QUEUE_STATE_OK, +}; + +enum zxdh_ceqe_size { + ZXDH_CEQE_SIZE_16_BYTE = 0, + ZXDH_CEQE_SIZE_32_BYTE, + ZXDH_CEQE_SIZE_64_BYTE, + ZXDH_CEQE_SIZE_128_BYTE, +}; + +enum zxdh_irq_type { + ZXDH_IRQ_TYPE_MSIX = 0, + ZXDH_IRQ_TYPE_PIN, +}; + +enum zxdh_ceq_aggregation_cnt { + IRMDA_CEQ_AGGREGATION_CNT_0, + IRMDA_CEQ_AGGREGATION_CNT_1 = 1, + ZXDH_CEQ_AGGREGATION_CNT_2 = 2, +}; + +enum zxdh_vf_active_state { + IRMDA_VF_STATE_INVALID = 0, + ZXDH_VF_STATE_VALID, +}; + +enum zxdh_flush_opcode { + FLUSH_INVALID = 0, + FLUSH_GENERAL_ERR, + FLUSH_PROT_ERR, + FLUSH_REM_ACCESS_ERR, + FLUSH_LOC_QP_OP_ERR, + FLUSH_REM_OP_ERR, + FLUSH_LOC_LEN_ERR, + FLUSH_FATAL_ERR, + FLUSH_RETRY_EXC_ERR, + FLUSH_MW_BIND_ERR, + FLUSH_REM_INV_REQ_ERR, + FLUSH_MR_FASTREG_ERR, +}; + +enum zxdh_cmpl_status { + ZXDH_COMPL_STATUS_SUCCESS = 0, + ZXDH_COMPL_STATUS_FLUSHED, + ZXDH_COMPL_STATUS_INVALID_WQE, + ZXDH_COMPL_STATUS_QP_CATASTROPHIC, + ZXDH_COMPL_STATUS_REMOTE_TERMINATION, + ZXDH_COMPL_STATUS_INVALID_STAG, + ZXDH_COMPL_STATUS_BASE_BOUND_VIOLATION, + ZXDH_COMPL_STATUS_ACCESS_VIOLATION, + ZXDH_COMPL_STATUS_INVALID_PD_ID, + ZXDH_COMPL_STATUS_WRAP_ERROR, + ZXDH_COMPL_STATUS_STAG_INVALID_PDID, + ZXDH_COMPL_STATUS_RDMA_READ_ZERO_ORD, + ZXDH_COMPL_STATUS_QP_NOT_PRIVLEDGED, + ZXDH_COMPL_STATUS_STAG_NOT_INVALID, + ZXDH_COMPL_STATUS_INVALID_PHYS_BUF_SIZE, + ZXDH_COMPL_STATUS_INVALID_PHYS_BUF_ENTRY, + ZXDH_COMPL_STATUS_INVALID_FBO, + ZXDH_COMPL_STATUS_INVALID_LEN, + ZXDH_COMPL_STATUS_INVALID_ACCESS, + ZXDH_COMPL_STATUS_PHYS_BUF_LIST_TOO_LONG, + ZXDH_COMPL_STATUS_INVALID_VIRT_ADDRESS, + ZXDH_COMPL_STATUS_INVALID_REGION, + ZXDH_COMPL_STATUS_INVALID_WINDOW, + ZXDH_COMPL_STATUS_INVALID_TOTAL_LEN, + ZXDH_COMPL_STATUS_UNKNOWN, +}; + +enum zxdh_cmpl_notify { + ZXDH_CQ_COMPL_EVENT = 0, + ZXDH_CQ_COMPL_SOLICITED = 1, +}; + +enum zxdh_qp_caps { + ZXDH_WRITE_WITH_IMM = 1, + ZXDH_SEND_WITH_IMM = 2, + ZXDH_ROCE = 4, + ZXDH_PUSH_MODE = 8, +}; + +struct zxdh_qp_uk; +struct zxdh_cq_uk; +struct zxdh_qp_uk_init_info; +struct zxdh_cq_uk_init_info; + +struct zxdh_sge { + zxdh_tagged_offset tag_off; + u32 len; + zxdh_stag stag; +}; + +struct zxdh_ring { + u32 head; + u32 tail; + u32 size; +}; + +struct zxdh_cqe { + __le64 buf[ZXDH_CQE_SIZE]; +}; + +struct zxdh_extended_cqe { + __le64 buf[ZXDH_EXTENDED_CQE_SIZE]; +}; + +struct zxdh_post_send { + zxdh_sgl sg_list; + u32 num_sges; + u32 qkey; + u32 dest_qp; + u32 ah_id; +}; + +struct zxdh_post_inline_send { + void *data; + u32 len; + u32 qkey; + u32 dest_qp; + u32 ah_id; +}; + +struct zxdh_post_rq_info { + u64 wr_id; + zxdh_sgl sg_list; + u32 num_sges; +}; + +struct zxdh_rdma_write { + zxdh_sgl lo_sg_list; + u32 num_lo_sges; + struct zxdh_sge rem_addr; +}; + +struct zxdh_inline_rdma_write { + void *data; + u32 len; + struct zxdh_sge rem_addr; +}; + +struct zxdh_rdma_read { + zxdh_sgl lo_sg_list; + u32 num_lo_sges; + struct zxdh_sge rem_addr; +}; + +struct zxdh_bind_window { + zxdh_stag mr_stag; + u64 bind_len; + void *va; + enum zxdh_addressing_type addressing_type; + u8 ena_reads : 1; + u8 ena_writes : 1; + zxdh_stag mw_stag; + u8 mem_window_type_1 : 1; +}; + +struct zxdh_inv_local_stag { + zxdh_stag target_stag; +}; + +struct zxdh_post_sq_info { + u64 wr_id; + u8 op_type; + u8 l4len; + u8 signaled : 1; + u8 solicited : 1; + u8 read_fence : 1; + u8 local_fence : 1; + u8 inline_data : 1; + u8 imm_data_valid : 1; + u8 push_wqe : 1; + u8 report_rtt : 1; + u8 udp_hdr : 1; + u8 defer_flag : 1; + u32 imm_data; + u32 stag_to_inv; + union { + struct zxdh_post_send send; + struct zxdh_rdma_write rdma_write; + struct zxdh_rdma_read rdma_read; + struct zxdh_bind_window bind_window; + struct zxdh_inv_local_stag inv_local_stag; + struct zxdh_inline_rdma_write inline_rdma_write; + struct zxdh_post_inline_send inline_send; + } op; +}; + +struct zxdh_cq_poll_info { + u64 wr_id; + zxdh_qp_handle qp_handle; + u32 bytes_xfered; + u32 tcp_seq_num_rtt; + u32 qp_id; + u32 ud_src_qpn; + u32 imm_data; + zxdh_stag inv_stag; /* or L_R_Key */ + enum zxdh_cmpl_status comp_status; + u16 major_err; + u16 minor_err; + u16 ud_vlan; + u8 ud_smac[6]; + u8 op_type; + u8 stag_invalid_set : 1; /* or L_R_Key set */ + u8 push_dropped : 1; + u8 error : 1; + u8 solicited_event : 1; + u8 ipv4 : 1; + u8 ud_vlan_valid : 1; + u8 ud_smac_valid : 1; + u8 imm_valid : 1; +}; + +int zxdh_uk_inline_rdma_write(struct zxdh_qp_uk *qp, + struct zxdh_post_sq_info *info, bool post_sq); +int zxdh_uk_rc_inline_send(struct zxdh_qp_uk *qp, + struct zxdh_post_sq_info *info, bool post_sq); +int zxdh_uk_ud_inline_send(struct zxdh_qp_uk *qp, + struct zxdh_post_sq_info *info, bool post_sq); +int zxdh_uk_mw_bind(struct zxdh_qp_uk *qp, struct zxdh_post_sq_info *info, + bool post_sq); +int zxdh_uk_post_nop(struct zxdh_qp_uk *qp, u64 wr_id, bool signaled, + bool post_sq); +int zxdh_uk_post_receive(struct zxdh_qp_uk *qp, struct zxdh_post_rq_info *info); +void zxdh_uk_qp_post_wr(struct zxdh_qp_uk *qp); +void zxdh_uk_qp_set_shadow_area(struct zxdh_qp_uk *qp); +int zxdh_uk_rdma_read(struct zxdh_qp_uk *qp, struct zxdh_post_sq_info *info, + bool post_sq); +int zxdh_uk_rdma_write(struct zxdh_qp_uk *qp, struct zxdh_post_sq_info *info, + bool post_sq); +int zxdh_uk_rc_send(struct zxdh_qp_uk *qp, struct zxdh_post_sq_info *info, + bool post_sq); +int zxdh_uk_ud_send(struct zxdh_qp_uk *qp, struct zxdh_post_sq_info *info, + bool post_sq); +int zxdh_uk_stag_local_invalidate(struct zxdh_qp_uk *qp, + struct zxdh_post_sq_info *info, bool post_sq); + +struct zxdh_wqe_uk_ops { + void (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity, + bool imm_data_flag); + u16 (*iw_inline_data_size_to_quanta)(u32 data_size, bool imm_data_flag); + void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct zxdh_sge *sge, + u8 valid); + void (*iw_set_mw_bind_wqe)(__le64 *wqe, + struct zxdh_bind_window *op_info); +}; + +int zxdh_uk_cq_poll_cmpl(struct zxdh_cq_uk *cq, struct zxdh_cq_poll_info *info); +void zxdh_uk_cq_request_notification(struct zxdh_cq_uk *cq, + enum zxdh_cmpl_notify cq_notify); +void zxdh_uk_cq_resize(struct zxdh_cq_uk *cq, void *cq_base, int size); +void zxdh_uk_cq_set_resized_cnt(struct zxdh_cq_uk *qp, u16 cnt); +void zxdh_uk_cq_init(struct zxdh_cq_uk *cq, struct zxdh_cq_uk_init_info *info); +int zxdh_uk_qp_init(struct zxdh_qp_uk *qp, struct zxdh_qp_uk_init_info *info); +struct zxdh_sq_uk_wr_trk_info { + u64 wrid; + u32 wr_len; + u16 quanta; + u8 reserved[2]; +}; + +struct zxdh_qp_sq_quanta { + __le64 elem[ZXDH_SQE_SIZE]; +}; + +struct zxdh_qp_rq_quanta { + __le64 elem[ZXDH_RQE_SIZE]; +}; + +struct zxdh_qp_uk { + struct zxdh_qp_sq_quanta *sq_base; + struct zxdh_qp_rq_quanta *rq_base; + struct zxdh_uk_attrs *uk_attrs; + u32 __iomem *wqe_alloc_db; + struct zxdh_sq_uk_wr_trk_info *sq_wrtrk_array; + u64 *rq_wrid_array; + __le64 *shadow_area; + __le32 *push_db; + __le64 *push_wqe; + struct zxdh_ring sq_ring; + struct zxdh_ring rq_ring; + struct zxdh_ring initial_ring; + u32 qp_id; + u32 qp_caps; + u32 sq_size; + u32 rq_size; + u32 max_sq_frag_cnt; + u32 max_rq_frag_cnt; + u32 max_inline_data; + struct zxdh_wqe_uk_ops wqe_ops; + u16 conn_wqes; + u8 qp_type; + u8 swqe_polarity; + u8 swqe_polarity_deferred; + u8 rwqe_polarity; + u8 rq_wqe_size; + u8 rq_wqe_size_multiplier; + u8 deferred_flag : 1; + u8 push_mode : 1; /* whether the last post wqe was pushed */ + u8 push_dropped : 1; + u8 first_sq_wq : 1; + u8 sq_flush_complete : 1; /* Indicates flush was seen and SQ was empty after the flush */ + u8 rq_flush_complete : 1; /* Indicates flush was seen and RQ was empty after the flush */ + u8 destroy_pending : 1; /* Indicates the QP is being destroyed */ + void *back_qp; + spinlock_t *lock; + u8 dbg_rq_flushed; + u16 ord_cnt; + u16 qp_8k_index; + u16 rwqe_signature; + u8 sq_flush_seen; + u8 rq_flush_seen; + u8 rd_fence_rate; + u8 user_pri; + u8 pmtu; + u8 is_srq; +}; + +struct zxdh_cq_uk { + struct zxdh_cqe *cq_base; + u32 __iomem *cqe_alloc_db; + u32 __iomem *cq_ack_db; + __le64 *shadow_area; + u32 cq_id; + u32 cq_size; + u32 cq_log_size; + u32 cqe_rd_cnt; + bool valid_cq; + struct zxdh_ring cq_ring; + u8 polarity; + u8 armed : 1; + u8 cqe_size; +}; + +struct zxdh_qp_uk_init_info { + struct zxdh_qp_sq_quanta *sq; + struct zxdh_qp_rq_quanta *rq; + struct zxdh_uk_attrs *uk_attrs; + u32 __iomem *wqe_alloc_db; + __le64 *shadow_area; + struct zxdh_sq_uk_wr_trk_info *sq_wrtrk_array; + u64 *rq_wrid_array; + u32 qp_id; + u32 qp_caps; + u32 sq_size; + u32 rq_size; + u32 max_sq_frag_cnt; + u32 max_rq_frag_cnt; + u32 max_inline_data; + u8 first_sq_wq; + u8 type; + u8 rd_fence_rate; + int abi_ver; + bool legacy_mode; +}; + +struct zxdh_cq_uk_init_info { + u32 __iomem *cqe_alloc_db; + u32 __iomem *cq_ack_db; + struct zxdh_cqe *cq_base; + __le64 *shadow_area; + u32 cq_size; + u32 cq_log_size; + u32 cq_id; + u8 cqe_size; +}; + +__le64 *zxdh_qp_get_next_send_wqe(struct zxdh_qp_uk *qp, u32 *wqe_idx, + u16 quanta, u32 total_size, + struct zxdh_post_sq_info *info); +__le64 *zxdh_qp_get_next_recv_wqe(struct zxdh_qp_uk *qp, u32 *wqe_idx); +void zxdh_uk_clean_cq(void *q, struct zxdh_cq_uk *cq); +int zxdh_nop(struct zxdh_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq); +int zxdh_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta); +int zxdh_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size); +void zxdh_get_sq_wqe_shift(struct zxdh_uk_attrs *uk_attrs, u32 sge, + u32 inline_data, u8 *shift); +void zxdh_get_rq_wqe_shift(struct zxdh_uk_attrs *uk_attrs, u32 sge, u8 *shift); +int zxdh_get_sqdepth(u32 max_hw_wq_quanta, u32 sq_size, u8 shift, u32 *wqdepth); +int zxdh_get_rqdepth(u32 max_hw_rq_quanta, u32 rq_size, u8 shift, u32 *wqdepth); +#ifdef Z_CONFIG_PUSH_MODE +void zxdh_qp_push_wqe(struct zxdh_qp_uk *qp, __le64 *wqe, u16 quanta, + u32 wqe_idx, bool post_sq); +#endif +void zxdh_clr_wqes(struct zxdh_qp_uk *qp, u32 qp_wqe_idx); +#endif /* ZXDH_USER_H */ diff --git a/drivers/infiniband/hw/zrdma/utils.c b/drivers/infiniband/hw/zrdma/utils.c new file mode 100644 index 000000000000..d7a9049caee4 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/utils.c @@ -0,0 +1,2896 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include "main.h" +#include "icrdma_hw.h" +#include + +#define ZXDH_CHECK_AND_RETURN(func_call) \ + do { \ + int ret = (func_call); \ + if (ret) { \ + return ret; \ + } \ + } while (0) + +u32 dpp_stat_port_RDMA_packet_msg_tx_cnt_get(DPP_PF_INFO_T *pf_info, u32 index, + u32 mode, u64 *p_pkB_cnt, + u64 *p_pk_cnt); + +LIST_HEAD(zxdh_handlers); +DEFINE_SPINLOCK(zxdh_handler_lock); +DEFINE_SPINLOCK(zxdh_rdma_stats_ram_lock); + +/** + * wr32 - write 32 bits to hw register + * @hw: hardware information including registers + * @reg: register offset + * @val: value to write to register + */ +inline void wr32(struct zxdh_hw *hw, u32 reg, u32 val) +{ + writel(val, hw->hw_addr + reg); +} + +/** + * rd32 - read a 32 bit hw register + * @hw: hardware information including registers + * @reg: register offset + * + * Return value of register content + */ +inline u32 rd32(struct zxdh_hw *hw, u32 reg) +{ + return readl(hw->hw_addr + reg); +} + +/** + * rd64 - read a 64 bit hw register + * @hw: hardware information including registers + * @reg: register offset + * + * Return value of register content + */ +inline u64 rd64(struct zxdh_hw *hw, u32 reg) +{ + return readq(hw->hw_addr + reg); +} + +/** + * zxdh_add_handler - add a handler to the list + * @hdl: handler to be added to the handler list + */ +void zxdh_add_handler(struct zxdh_handler *hdl) +{ + unsigned long flags; + + spin_lock_irqsave(&zxdh_handler_lock, flags); + list_add(&hdl->list, &zxdh_handlers); + spin_unlock_irqrestore(&zxdh_handler_lock, flags); +} + +/** + * zxdh_del_handler - delete a handler from the list + * @hdl: handler to be deleted from the handler list + */ +void zxdh_del_handler(struct zxdh_handler *hdl) +{ + unsigned long flags; + + spin_lock_irqsave(&zxdh_handler_lock, flags); + list_del(&hdl->list); + spin_unlock_irqrestore(&zxdh_handler_lock, flags); +} + +/** + * zxdh_alloc_and_get_cqp_request - get cqp struct + * @cqp: device cqp ptr + * @wait: cqp to be used in wait mode + */ +struct zxdh_cqp_request *zxdh_alloc_and_get_cqp_request(struct zxdh_cqp *cqp, + bool wait) +{ + struct zxdh_cqp_request *cqp_request = NULL; + unsigned long flags; + + spin_lock_irqsave(&cqp->req_lock, flags); + if (!list_empty(&cqp->cqp_avail_reqs)) { + cqp_request = list_entry(cqp->cqp_avail_reqs.next, + struct zxdh_cqp_request, list); + list_del_init(&cqp_request->list); + } + spin_unlock_irqrestore(&cqp->req_lock, flags); + if (!cqp_request) { + cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC); + if (cqp_request) { + cqp_request->dynamic = true; + if (wait) + init_waitqueue_head(&cqp_request->waitq); + } + } + if (!cqp_request) { + pr_err("ERR: CQP Request Fail: No Memory"); + return NULL; + } + + cqp_request->waiting = wait; + refcount_set(&cqp_request->refcnt, 1); + memset(&cqp_request->compl_info, 0, sizeof(cqp_request->compl_info)); + + return cqp_request; +} + +/** + * zxdh_get_cqp_request - increase refcount for cqp_request + * @cqp_request: pointer to cqp_request instance + */ +static inline void zxdh_get_cqp_request(struct zxdh_cqp_request *cqp_request) +{ + refcount_inc(&cqp_request->refcnt); +} + +/** + * zxdh_free_cqp_request - free cqp request + * @cqp: cqp ptr + * @cqp_request: to be put back in cqp list + */ +void zxdh_free_cqp_request(struct zxdh_cqp *cqp, + struct zxdh_cqp_request *cqp_request) +{ + unsigned long flags; + + if (cqp_request->dynamic) { + kfree(cqp_request); + } else { + cqp_request->request_done = false; + cqp_request->callback_fcn = NULL; + cqp_request->waiting = false; + + spin_lock_irqsave(&cqp->req_lock, flags); + list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs); + spin_unlock_irqrestore(&cqp->req_lock, flags); + } + wake_up(&cqp->remove_wq); +} + +/** + * zxdh_put_cqp_request - dec ref count and free if 0 + * @cqp: cqp ptr + * @cqp_request: to be put back in cqp list + */ +void zxdh_put_cqp_request(struct zxdh_cqp *cqp, + struct zxdh_cqp_request *cqp_request) +{ + if (refcount_dec_and_test(&cqp_request->refcnt)) + zxdh_free_cqp_request(cqp, cqp_request); +} + +/** + * zxdh_free_pending_cqp_request -free pending cqp request objs + * @cqp: cqp ptr + * @cqp_request: to be put back in cqp list + */ +static void zxdh_free_pending_cqp_request(struct zxdh_cqp *cqp, + struct zxdh_cqp_request *cqp_request) +{ + if (cqp_request->waiting) { + cqp_request->compl_info.error = true; + cqp_request->request_done = true; + wake_up(&cqp_request->waitq); + } + wait_event_timeout(cqp->remove_wq, + refcount_read(&cqp_request->refcnt) == 1, 1000); + zxdh_put_cqp_request(cqp, cqp_request); +} + +/** + * zxdh_cleanup_pending_cqp_op - clean-up cqp with no + * completions + * @rf: RDMA PCI function + */ +void zxdh_cleanup_pending_cqp_op(struct zxdh_pci_f *rf) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_cqp *cqp = &rf->cqp; + struct zxdh_cqp_request *cqp_request = NULL; + struct cqp_cmds_info *pcmdinfo = NULL; + u32 i, pending_work, wqe_idx; + + pending_work = ZXDH_RING_USED_QUANTA(cqp->sc_cqp.sq_ring); + wqe_idx = ZXDH_RING_CURRENT_TAIL(cqp->sc_cqp.sq_ring); + for (i = 0; i < pending_work; i++) { + cqp_request = (struct zxdh_cqp_request *)(unsigned long) + cqp->scratch_array[wqe_idx]; + if (cqp_request) + zxdh_free_pending_cqp_request(cqp, cqp_request); + wqe_idx = (wqe_idx + 1) % ZXDH_RING_SIZE(cqp->sc_cqp.sq_ring); + } + + while (!list_empty(&dev->cqp_cmd_head)) { + pcmdinfo = zxdh_remove_cqp_head(dev); + cqp_request = + container_of(pcmdinfo, struct zxdh_cqp_request, info); + if (cqp_request) + zxdh_free_pending_cqp_request(cqp, cqp_request); + } +} + +/** + * zxdh_wait_event - wait for completion + * @rf: RDMA PCI function + * @cqp_request: cqp request to wait + */ +static int zxdh_wait_event(struct zxdh_pci_f *rf, + struct zxdh_cqp_request *cqp_request) +{ + struct zxdh_cqp_timeout cqp_timeout = {}; + bool cqp_error = false; + int err_code = 0; + + cqp_timeout.compl_cqp_cmds = + rf->sc_dev.cqp_cmd_stats[ZXDH_OP_CMPL_CMDS]; + do { + int wait_time_ms = + rf->sc_dev.hw_attrs.max_cqp_compl_wait_time_ms; + + zxdh_cqp_ce_handler(rf, &rf->ccq.sc_cq); + if (wait_event_timeout(cqp_request->waitq, + cqp_request->request_done, + msecs_to_jiffies(wait_time_ms))) + break; + + zxdh_check_cqp_progress(&cqp_timeout, &rf->sc_dev); + if (rf->sc_dev.hw_attrs.self_health == true) + return 0; + if (cqp_timeout.count < rf->sc_dev.hw_attrs.cqp_timeout_threshold) + continue; + + if (!rf->reset) { + // rf->reset = true; + rf->gen_ops.request_reset(rf); + } + return -ETIMEDOUT; + } while (1); + + cqp_error = cqp_request->compl_info.error; + if (cqp_error) { + err_code = -EIO; + if (cqp_request->compl_info.maj_err_code == 0xFFFF) { + if (cqp_request->compl_info.min_err_code == 0x8002) { + err_code = -EBUSY; + } else if (cqp_request->compl_info.min_err_code == + 0x8029) { + if (!rf->reset) { + // rf->reset = true; + //rf->gen_ops.request_reset(rf); + } + } + } + } + + return err_code; +} + +static const char *const zxdh_cqp_cmd_names[ZXDH_MAX_CQP_OPS] = { + [ZXDH_OP_CEQ_DESTROY] = "Destroy CEQ Cmd", + [ZXDH_OP_AEQ_DESTROY] = "Destroy AEQ Cmd", + [ZXDH_OP_DELETE_ARP_CACHE_ENTRY] = "Delete ARP Cache Cmd", + [ZXDH_OP_MANAGE_APBVT_ENTRY] = "Manage APBV Table Entry Cmd", + [ZXDH_OP_CEQ_CREATE] = "CEQ Create Cmd", + [ZXDH_OP_AEQ_CREATE] = "AEQ Destroy Cmd", + [ZXDH_OP_MANAGE_QHASH_TABLE_ENTRY] = "Manage Quad Hash Table Entry Cmd", + [ZXDH_OP_QP_MODIFY] = "Modify QP Cmd", + [ZXDH_OP_QP_UPLOAD_CONTEXT] = "Upload Context Cmd", + [ZXDH_OP_CQ_CREATE] = "Create CQ Cmd", + [ZXDH_OP_CQ_DESTROY] = "Destroy CQ Cmd", + [ZXDH_OP_QP_CREATE] = "Create QP Cmd", + [ZXDH_OP_QP_DESTROY] = "Destroy QP Cmd", + [ZXDH_OP_ALLOC_STAG] = "Allocate STag Cmd", + [ZXDH_OP_MR_REG_NON_SHARED] = "Register Non-Shared MR Cmd", + [ZXDH_OP_DEALLOC_STAG] = "Deallocate STag Cmd", + [ZXDH_OP_MW_ALLOC] = "Allocate Memory Window Cmd", + [ZXDH_OP_QP_FLUSH_WQES] = "Flush QP Cmd", + [ZXDH_OP_ADD_ARP_CACHE_ENTRY] = "Add ARP Cache Cmd", + [ZXDH_OP_MANAGE_PUSH_PAGE] = "Manage Push Page Cmd", + [ZXDH_OP_MANAGE_HMC_PM_FUNC_TABLE] = "Manage HMC PM Function Table Cmd", + [ZXDH_OP_SUSPEND] = "Suspend QP Cmd", + [ZXDH_OP_RESUME] = "Resume QP Cmd", + [ZXDH_OP_MANAGE_VF_PBLE_BP] = "Manage VF PBLE Backing Pages Cmd", + [ZXDH_OP_QUERY_FPM_VAL] = "Query FPM Values Cmd", + [ZXDH_OP_COMMIT_FPM_VAL] = "Commit FPM Values Cmd", + [ZXDH_OP_AH_CREATE] = "Create Address Handle Cmd", + [ZXDH_OP_AH_MODIFY] = "Modify Address Handle Cmd", + [ZXDH_OP_AH_DESTROY] = "Destroy Address Handle Cmd", + [ZXDH_OP_MC_CREATE] = "Create Multicast Group Cmd", + [ZXDH_OP_MC_DESTROY] = "Destroy Multicast Group Cmd", + [ZXDH_OP_MC_MODIFY] = "Modify Multicast Group Cmd", + [ZXDH_OP_STATS_ALLOCATE] = "Add Statistics Instance Cmd", + [ZXDH_OP_STATS_FREE] = "Free Statistics Instance Cmd", + [ZXDH_OP_STATS_GATHER] = "Gather Statistics Cmd", + [ZXDH_OP_WS_ADD_NODE] = "Add Work Scheduler Node Cmd", + [ZXDH_OP_WS_MODIFY_NODE] = "Modify Work Scheduler Node Cmd", + [ZXDH_OP_WS_DELETE_NODE] = "Delete Work Scheduler Node Cmd", + [ZXDH_OP_SET_UP_MAP] = "Set UP-UP Mapping Cmd", + [ZXDH_OP_GEN_AE] = "Generate AE Cmd", + [ZXDH_OP_QUERY_RDMA_FEATURES] = "RDMA Get Features Cmd", + [ZXDH_OP_ADD_LOCAL_MAC_ENTRY] = "Add Local MAC Entry Cmd", + [ZXDH_OP_DELETE_LOCAL_MAC_ENTRY] = "Delete Local MAC Entry Cmd", + [ZXDH_OP_CQ_MODIFY] = "CQ Modify Cmd", + [ZXDH_OP_CONFIG_PTE_TAB] = "Config PTE Tab Cmd", + [ZXDH_OP_QUERY_PTE_TAB] = "Query PTE Tab Cmd", + [ZXDH_OP_CONFIG_PBLE_TAB] = "Config PBLE Tab Cmd", + [ZXDH_OP_CONFIG_MAILBOX] = "Config Mailbox Cmd", + [ZXDH_OP_DMA_WRITE] = "Dma Write Cmd", + [ZXDH_OP_DMA_WRITE32] = "Dma Write32 Cmd", + [ZXDH_OP_DMA_WRITE64] = "Dma Write64 Cmd", + [ZXDH_OP_DMA_READ] = "Dma Read Cmd", + [ZXDH_OP_DMA_READ_USE_CQE] = "Dma Read Use Cqe Cmd", + [ZXDH_OP_QUERY_QPC] = "Query HW QPC Cmd", + [ZXDH_OP_QUERY_CQC] = "Query HW CQC Cmd", + [ZXDH_OP_QUERY_SRQC] = "Query HW SRQC Cmd", + [ZXDH_OP_QUERY_CEQC] = "Query HW CEQC Cmd", + [ZXDH_OP_QUERY_AEQC] = "Query HW AEQC Cmd", + [ZXDH_OP_QUERY_HW_OBJECT_INFO] = "Query HW object data", +}; + +static const struct zxdh_cqp_err_info zxdh_noncrit_err_list[] = { + { 0xffff, 0x8002, "Invalid State" }, + { 0xffff, 0x8006, "Flush No Wqe Pending" }, + { 0xffff, 0x8007, "Modify QP Bad Close" }, + { 0xffff, 0x8009, "LLP Closed" }, + { 0xffff, 0x800a, "Reset Not Sent" } +}; + +/** + * zxdh_cqp_crit_err - check if CQP error is critical + * @dev: pointer to dev structure + * @cqp_cmd: code for last CQP operation + * @maj_err_code: major error code + * @min_err_code: minot error code + */ +bool zxdh_cqp_crit_err(struct zxdh_sc_dev *dev, u8 cqp_cmd, u16 maj_err_code, + u16 min_err_code) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(zxdh_noncrit_err_list); ++i) { + if (maj_err_code == zxdh_noncrit_err_list[i].maj && + min_err_code == zxdh_noncrit_err_list[i].min) { + pr_err("CQP: [%s Error][%s] maj=0x%x min=0x%x\n", + zxdh_noncrit_err_list[i].desc, + zxdh_cqp_cmd_names[cqp_cmd], maj_err_code, + min_err_code); + return false; + } + } + return true; +} + +int zxdh_check_cqp_cmd(struct cqp_cmds_info *info) +{ + int status = 0; + switch (info->cqp_cmd) { + case ZXDH_OP_CEQ_CREATE: + case ZXDH_OP_AEQ_CREATE: + case ZXDH_OP_QP_UPLOAD_CONTEXT: + case ZXDH_OP_CQ_CREATE: + case ZXDH_OP_CQ_MODIFY: + case ZXDH_OP_CQ_MODIFY_MODERATION: + case ZXDH_OP_MANAGE_HMC_PM_FUNC_TABLE: + case ZXDH_OP_MANAGE_VF_PBLE_BP: + case ZXDH_OP_QUERY_RDMA_FEATURES: + case ZXDH_OP_QP_MODIFY: + case ZXDH_OP_QP_CREATE: + case ZXDH_OP_ALLOC_STAG: + case ZXDH_OP_MR_REG_NON_SHARED: + case ZXDH_OP_MW_ALLOC: + case ZXDH_OP_ADD_ARP_CACHE_ENTRY: + // case ZXDH_OP_AH_CREATE: + case ZXDH_OP_CONFIG_PTE_TAB: + case ZXDH_OP_CONFIG_PBLE_TAB: + case ZXDH_OP_DMA_WRITE: + case ZXDH_OP_QUERY_PTE_TAB: + case ZXDH_OP_QUERY_HW_OBJECT_INFO: + case ZXDH_OP_DMA_READ: + case ZXDH_OP_CONFIG_MAILBOX: + case ZXDH_OP_DMA_READ_USE_CQE: + case ZXDH_OP_DMA_WRITE32: + case ZXDH_OP_DMA_WRITE64: + case ZXDH_OP_QUERY_QPC: + case ZXDH_OP_QUERY_CQC: + case ZXDH_OP_QUERY_CEQC: + case ZXDH_OP_QUERY_AEQC: + case ZXDH_OP_QUERY_SRQC: + case ZXDH_OP_SRQ_MODIFY: + case ZXDH_OP_SRQ_CREATE: + status = -EBUSY; + break; + default: + status = 0; + break; + } + return status; +} + +/** + * zxdh_handle_cqp_op - process cqp command + * @rf: RDMA PCI function + * @cqp_request: cqp request to process + */ +int zxdh_handle_cqp_op(struct zxdh_pci_f *rf, + struct zxdh_cqp_request *cqp_request) +{ + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct cqp_cmds_info *info = &cqp_request->info; + int status; + bool put_cqp_request = true; + + if (rf->reset) + return -EBUSY; + + zxdh_get_cqp_request(cqp_request); + if (rf->sc_dev.hw_attrs.self_health == true) { + status = zxdh_check_cqp_cmd(info); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return status; + } + + status = zxdh_process_cqp_cmd(dev, info); + if (status) + goto err; + + if (cqp_request->waiting) { + put_cqp_request = false; + status = zxdh_wait_event(rf, cqp_request); + if (status) + goto err; + } + + return 0; + +err: + if (zxdh_cqp_crit_err(dev, info->cqp_cmd, + cqp_request->compl_info.maj_err_code, + cqp_request->compl_info.min_err_code)) + if (dev->hw_attrs.self_health == false) + dev_err(idev_to_dev(dev), + "[%s Error][op_code=%d] status=%d waiting=%d completion_err=%d maj=0x%x min=0x%x\n", + zxdh_cqp_cmd_names[info->cqp_cmd], info->cqp_cmd, + status, cqp_request->waiting, + cqp_request->compl_info.error, + cqp_request->compl_info.maj_err_code, + cqp_request->compl_info.min_err_code); + + if (put_cqp_request) + zxdh_put_cqp_request(&rf->cqp, cqp_request); + + return status; +} + +void zxdh_qp_add_ref(struct ib_qp *ibqp) +{ + struct zxdh_qp *iwqp = to_iwqp(ibqp); + + refcount_inc(&iwqp->refcnt); +} + +void zxdh_qp_rem_ref(struct ib_qp *ibqp) +{ + struct zxdh_qp *iwqp = to_iwqp(ibqp); + struct zxdh_device *iwdev = iwqp->iwdev; + unsigned long flags; + + spin_lock_irqsave(&iwdev->rf->qptable_lock, flags); + if (!refcount_dec_and_test(&iwqp->refcnt)) { + spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags); + return; + } + + iwdev->rf->qp_table[iwqp->sc_qp.qp_ctx_num - iwqp->sc_qp.dev->base_qpn] = + NULL; + spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags); + complete(&iwqp->free_qp); +} + +void zxdh_cq_add_ref(struct ib_cq *ibcq) +{ + struct zxdh_cq *iwcq = to_iwcq(ibcq); + + refcount_inc(&iwcq->refcnt); +} + +void zxdh_cq_rem_ref(struct ib_cq *ibcq) +{ + struct zxdh_cq *iwcq = to_iwcq(ibcq); + struct zxdh_pci_f *rf = + container_of(iwcq->sc_cq.dev, struct zxdh_pci_f, sc_dev); + unsigned long flags; + + spin_lock_irqsave(&rf->cqtable_lock, flags); + if (!refcount_dec_and_test(&iwcq->refcnt)) { + spin_unlock_irqrestore(&rf->cqtable_lock, flags); + return; + } + + rf->cq_table[iwcq->cq_num - rf->sc_dev.base_cqn] = NULL; + spin_unlock_irqrestore(&rf->cqtable_lock, flags); + complete(&iwcq->free_cq); +} + +struct ib_device *zxdh_get_ibdev(struct zxdh_sc_dev *dev) +{ + return &(container_of(dev, struct zxdh_pci_f, sc_dev))->iwdev->ibdev; +} + +/** + * zxdh_remove_cqp_head - return head entry and remove + * @dev: device + */ +void *zxdh_remove_cqp_head(struct zxdh_sc_dev *dev) +{ + struct list_head *entry; + struct list_head *list = &dev->cqp_cmd_head; + + if (list_empty(list)) + return NULL; + + entry = list->next; + list_del(entry); + + return entry; +} + +/** + * zxdh_terminate_del_timer - delete terminate timeout + * @qp: hardware control qp + */ +void zxdh_terminate_del_timer(struct zxdh_sc_qp *qp) +{ + struct zxdh_qp *iwqp; + int ret; + + iwqp = qp->qp_uk.back_qp; + ret = del_timer(&iwqp->terminate_timer); + if (ret) + zxdh_qp_rem_ref(&iwqp->ibqp); +} + +/** + * zxdh_cq_wq_destroy - send cq destroy cqp + * @rf: RDMA PCI function + * @cq: hardware control cq + */ +void zxdh_cq_wq_destroy(struct zxdh_pci_f *rf, struct zxdh_sc_cq *cq) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return; + + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = ZXDH_OP_CQ_DESTROY; + cqp_info->post_sq = 1; + cqp_info->in.u.cq_destroy.cq = cq; + cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request; + + zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); +} + +/** + * zxdh_hw_modify_qp - setup cqp for modify qp + * @iwdev: RDMA device + * @iwqp: qp ptr (user or kernel) + * @info: info for modify qp + * @wait: flag to wait or not for modify qp completion + */ +int zxdh_hw_modify_qp(struct zxdh_device *iwdev, struct zxdh_qp *iwqp, + struct zxdh_modify_qp_info *info, bool wait) +{ + int status; + struct zxdh_pci_f *rf = iwdev->rf; + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_modify_qp_info *m_info; + + wait = true; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, wait); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + m_info = &cqp_info->in.u.qp_modify.info; + memcpy(m_info, info, sizeof(*m_info)); + cqp_info->cqp_cmd = ZXDH_OP_QP_MODIFY; + cqp_info->post_sq = 1; + cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp; + cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + + return status; +} + +/** + * zxdh_cqp_qp_destroy_cmd - destroy the cqp + * @dev: device pointer + * @qp: pointer to qp + */ +int zxdh_cqp_qp_destroy_cmd(struct zxdh_sc_dev *dev, struct zxdh_sc_qp *qp) +{ + struct zxdh_pci_f *rf = dev_to_rf(dev); + struct zxdh_cqp *iwcqp = &rf->cqp; + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(iwcqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + memset(cqp_info, 0, sizeof(*cqp_info)); + cqp_info->cqp_cmd = ZXDH_OP_QP_DESTROY; + cqp_info->post_sq = 1; + cqp_info->in.u.qp_destroy.qp = qp; + cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request; + + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + + return status; +} + +static void zxdh_set_rx_ram_reg(struct zxdh_sc_dev *dev, u32 ram_num, + u32 ram_width, u32 ram_addr, u32 ram_read_cnt) +{ + writel(ram_num, (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_NUM)); + writel(ram_width, (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_WIDTH)); + writel(ram_addr, (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_ADDR)); + writel(ram_read_cnt, + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_READ_LENGTH)); + writel(0, (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_READ_FLAG)); +} + +static int zxdh_read_rx_ram_flag(struct zxdh_sc_dev *dev) +{ + u32 val; + + udelay(1000); //to be modified smaller + val = readl((u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_READ_FLAG)); + if (val != 1) { + udelay(2000); //to be modified smaller + val = readl((u32 __iomem *)(dev->hw->hw_addr + + RDMARX_RAM_READ_FLAG)); + if (val != 1) + return -EIO; + } + val = readl((u32 __iomem *)(dev->hw->hw_addr + RDMARX_READ_ERROR_FLAG)); + val |= readl((u32 __iomem *)(dev->hw->hw_addr + RDMARX_READ_CNT_ERROR)); + val |= readl((u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_REDUN_FLAG)); + val |= readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_DOUBLE_VLD_FLAG)); + if (val != 0) + return -EIO; + + return val; +} + +static u32 zxdh_read_rx_ram_data(struct zxdh_sc_dev *dev, u32 offset_idx) +{ + u32 val; + + val = readl((u32 __iomem *)(dev->hw->hw_addr + + RDMARX_RAM_MAINTENANCE_RAM(offset_idx))); + return val; +} + +static void zxdh_set_tx_ram_reg(struct zxdh_sc_dev *dev, u32 ram_num, + u32 ram_width, u32 ram_addr, u32 ram_read_cnt) +{ + writel(ram_num, (u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_NUM)); + writel(ram_width, (u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_WIDTH)); + writel(ram_addr, (u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_ADDR)); + writel(ram_read_cnt, + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_READ_LENGTH)); + writel(0, (u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_READ_FLAG)); +} +static int zxdh_read_tx_ram_flag(struct zxdh_sc_dev *dev) +{ + u32 val; + + udelay(1000); //to be modified smaller + val = readl((u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_READ_FLAG)); + if (val != 1) { + udelay(2000); //to be modified smaller + val = readl((u32 __iomem *)(dev->hw->hw_addr + + RDMATX_RAM_READ_FLAG)); + if (val != 1) + return -EIO; + } + val = readl((u32 __iomem *)(dev->hw->hw_addr + RDMATX_READ_ERROR_FLAG)); + val |= readl((u32 __iomem *)(dev->hw->hw_addr + RDMATX_READ_CNT_ERROR)); + val |= readl((u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_REDUN_FLAG)); + val |= readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_DOUBLE_VLD_FLAG)); + if (val != 0) + return -EIO; + return val; +} + +static u32 zxdh_read_tx_ram_data(struct zxdh_sc_dev *dev, u32 offset_idx) +{ + u32 val; + + val = readl((u32 __iomem *)(dev->hw->hw_addr + + RDMATX_RAM_MAINTENANCE_RAM(offset_idx))); + return val; +} + +static u32 zxdh_get_vhca_ram(u32 vhca_id) +{ + u32 ram_num; + + if (vhca_id < 255) + ram_num = ZXDH_RAM_H12; + else if (vhca_id < 511) + ram_num = ZXDH_RAM_H13; + else if (vhca_id < 767) + ram_num = ZXDH_RAM_H14; + else + ram_num = ZXDH_RAM_H15; + return ram_num; +} + +static u32 zxdh_get_vhca_ram_addr(u32 vhca_id) +{ + u32 ram_addr = 0; + + if (vhca_id < 255) + ram_addr = vhca_id; + else if (vhca_id < 511) + ram_addr = (vhca_id - 256); + else if (vhca_id < 767) + ram_addr = (vhca_id - 512); + else + ram_addr = (vhca_id - 768); + return ram_addr; +} +static int zxdh_get_ram_msg_h11(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u64 val = 0; + u32 check_ram_num, check_ram_addr; + int ret; + int i; + u32 rtt_cfg; + + rtt_cfg = readl((u32 __iomem *)(dev->hw->hw_addr + RDMATX_RTT_CFG)); + /* if rtt enabled, rp_cnp_handled not count */ + if (rtt_cfg != 0) + return 0; + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_rx_ram_reg(dev, ZXDH_RAM_H11, ZXDH_RAM_WIDTH_64_BIT, + dev->vhca_id, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_rx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_ADDR)); + if ((check_ram_num != ZXDH_RAM_H11) || + (check_ram_addr != dev->vhca_id)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_1); + rdma_stats->rdma_stats_entry[HW_STAT_RP_CNP_HANDLED] = val; + rdma_stats->rdma_stats_entry_sta[HW_STAT_RP_CNP_HANDLED] = + ZXDH_HW_STATS_VALID; + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} +static void zxdh_get_ram_for_rx_stats(struct zxdh_sc_dev *dev, u64 *p_pkB_cnt, + u64 *p_pk_cnt) +{ + u64 temp_val = 0; + u64 stat_val1, stat_val2; + + // ipv6 unicast + stat_val1 = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_2); + stat_val1 = (stat_val1 << IRMDA_BIT_WIDTH_16); + temp_val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_1); + stat_val1 |= + ((temp_val & ZXDH_32_BIT_MASK_16_31) >> IRMDA_BIT_WIDTH_16); + // ipv4 unicast + stat_val2 = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_1); + stat_val2 = ((stat_val2 & ZXDH_32_BIT_MASK_0_15) << IRMDA_BIT_WIDTH_32); + temp_val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_0); + stat_val2 |= temp_val; + *p_pk_cnt = stat_val1 + stat_val2; + + // ipv6 + stat_val1 = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_12); + temp_val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_13); + stat_val1 |= ((temp_val & ZXDH_32_BIT_MASK_0_15) << IRMDA_BIT_WIDTH_32); + // ipv4 + stat_val2 = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_9); + temp_val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_10); + stat_val2 |= ((temp_val & ZXDH_32_BIT_MASK_0_15) << IRMDA_BIT_WIDTH_32); + *p_pkB_cnt = stat_val1 + stat_val2; +} +static int zxdh_get_rx_stat(struct zxdh_sc_dev *dev, u64 *p_pkB_cnt, + u64 *p_pk_cnt) +{ + u32 check_ram_num, check_ram_addr; + u32 ram_num; + u32 ram_addr; + int ret; + int i; + + ram_num = zxdh_get_vhca_ram(dev->vhca_id); + ram_addr = zxdh_get_vhca_ram_addr(dev->vhca_id); + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_rx_ram_reg(dev, ram_num, ZXDH_RAM_WIDTH_480_BIT, + ram_addr, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_rx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_ADDR)); + if ((check_ram_num != ram_num) || + (check_ram_addr != ram_addr)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + + zxdh_get_ram_for_rx_stats(dev, p_pkB_cnt, p_pk_cnt); + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} + +static int zxdh_get_ram_msg_h12_to_h15(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u64 pkB_cnt, pkts_cnt; + u64 val = 0; + u64 temp_val = 0; + u32 check_ram_num, check_ram_addr; + u32 ram_num; + u32 ram_addr; + int ret; + int i; + + ram_num = zxdh_get_vhca_ram(dev->vhca_id); + ram_addr = zxdh_get_vhca_ram_addr(dev->vhca_id); + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_rx_ram_reg(dev, ram_num, ZXDH_RAM_WIDTH_480_BIT, + ram_addr, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_rx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_ADDR)); + if ((check_ram_num != ram_num) || + (check_ram_addr != ram_addr)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + + zxdh_get_ram_for_rx_stats(dev, &pkB_cnt, &pkts_cnt); + + rdma_stats->rdma_stats_entry[HW_STAT_RDMA_RX_BYTES] = pkB_cnt; + rdma_stats->rdma_stats_entry_sta[HW_STAT_RDMA_RX_BYTES] = + ZXDH_HW_STATS_VALID; + rdma_stats->rdma_stats_entry[HW_STAT_RDMA_RX_PKTS] = pkts_cnt; + rdma_stats->rdma_stats_entry_sta[HW_STAT_RDMA_RX_PKTS] = + ZXDH_HW_STATS_VALID; + + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_3); + temp_val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_4); + val |= ((temp_val & ZXDH_32_BIT_MASK_0_15) + << IRMDA_BIT_WIDTH_32); + rdma_stats->rdma_stats_entry[HW_STAT_RX_ICRC_ENCAPSULATED] = + val; + rdma_stats->rdma_stats_entry_sta[HW_STAT_RX_ICRC_ENCAPSULATED] = + ZXDH_HW_STATS_VALID; + + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} + +static int zxdh_get_ram_msg_h25(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u64 val = 0; + u32 check_ram_num, check_ram_addr; + int ret; + int i; + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_tx_ram_reg(dev, ZXDH_RAM_H25, ZXDH_RAM_WIDTH_128_BIT, + dev->vhca_id, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_tx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_ADDR)); + if ((check_ram_num != ZXDH_RAM_H25) || + (check_ram_addr != dev->vhca_id)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + val = zxdh_read_tx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_2); + rdma_stats->rdma_stats_entry[HW_STAT_RNR_NAK_RETRY_ERR] = val; + rdma_stats->rdma_stats_entry_sta[HW_STAT_RNR_NAK_RETRY_ERR] = + ZXDH_HW_STATS_VALID; + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} + +static int zxdh_get_ram_msg_h26(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u64 val = 0; + u32 check_ram_num, check_ram_addr; + int ret; + int i; + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_tx_ram_reg(dev, ZXDH_RAM_H26, ZXDH_RAM_WIDTH_128_BIT, + dev->vhca_id, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_tx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_ADDR)); + if ((check_ram_num != ZXDH_RAM_H26) || + (check_ram_addr != dev->vhca_id)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + + val = zxdh_read_tx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_0); + rdma_stats->rdma_stats_entry[HW_STAT_PACKET_SEQ_ERR] = val; + rdma_stats->rdma_stats_entry_sta[HW_STAT_PACKET_SEQ_ERR] = + ZXDH_HW_STATS_VALID; + + val = zxdh_read_tx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_1); + rdma_stats + ->rdma_stats_entry[HW_STAT_REQ_REMOTE_INVALID_REQUEST] = + val; + rdma_stats->rdma_stats_entry_sta + [HW_STAT_REQ_REMOTE_INVALID_REQUEST] = + ZXDH_HW_STATS_VALID; + + val = zxdh_read_tx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_2); + rdma_stats->rdma_stats_entry[HW_STAT_REQ_REMOTE_ACCESS_ERRORS] = + val; + rdma_stats + ->rdma_stats_entry_sta[HW_STAT_REQ_REMOTE_ACCESS_ERRORS] = + ZXDH_HW_STATS_VALID; + + val = zxdh_read_tx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_3); + rdma_stats + ->rdma_stats_entry[HW_STAT_REQ_REMOTE_OPERATION_ERRORS] = + val; + rdma_stats->rdma_stats_entry_sta + [HW_STAT_REQ_REMOTE_OPERATION_ERRORS] = + ZXDH_HW_STATS_VALID; + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} + +static int zxdh_get_ram_msg_h63(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u64 val = 0; + u32 check_ram_num, check_ram_addr; + int ret; + int i; + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_rx_ram_reg(dev, ZXDH_RAM_H63, ZXDH_RAM_WIDTH_32_BIT, + dev->vhca_id, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_rx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_ADDR)); + if ((check_ram_num != ZXDH_RAM_H63) || + (check_ram_addr != dev->vhca_id)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_0); + rdma_stats->rdma_stats_entry[HW_STAT_DUPLICATE_REQUEST] = val; + rdma_stats->rdma_stats_entry_sta[HW_STAT_DUPLICATE_REQUEST] = + ZXDH_HW_STATS_VALID; + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} + +static int zxdh_get_ram_msg_h29(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u64 val = 0; + u32 check_ram_num, check_ram_addr; + int ret; + int i; + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_tx_ram_reg(dev, ZXDH_RAM_H29, ZXDH_RAM_WIDTH_128_BIT, + dev->vhca_id, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_tx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_ADDR)); + if ((check_ram_num != ZXDH_RAM_H29) || + (check_ram_addr != dev->vhca_id)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + val = zxdh_read_tx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_1); + rdma_stats->rdma_stats_entry[HW_STAT_REQ_LOCAL_LENGTH_ERROR] = + val; + rdma_stats + ->rdma_stats_entry_sta[HW_STAT_REQ_LOCAL_LENGTH_ERROR] = + ZXDH_HW_STATS_VALID; + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} +static int zxdh_get_ram_msg_h61(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u64 val = 0; + u32 check_ram_num, check_ram_addr; + int ret; + int i; + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_rx_ram_reg(dev, ZXDH_RAM_H61, ZXDH_RAM_WIDTH_32_BIT, + dev->vhca_id, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_rx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_ADDR)); + if ((check_ram_num != ZXDH_RAM_H61) || + (check_ram_addr != dev->vhca_id)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_0); + rdma_stats->rdma_stats_entry[HW_STAT_RX_WRITE_REQUESTS] = val; + rdma_stats->rdma_stats_entry_sta[HW_STAT_RX_WRITE_REQUESTS] = + ZXDH_HW_STATS_VALID; + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} + +static int zxdh_get_ram_msg_h62(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u64 val = 0; + u32 check_ram_num, check_ram_addr; + int ret; + int i; + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_rx_ram_reg(dev, ZXDH_RAM_H62, ZXDH_RAM_WIDTH_32_BIT, + dev->vhca_id, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_rx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_ADDR)); + if ((check_ram_num != ZXDH_RAM_H62) || + (check_ram_addr != dev->vhca_id)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_0); + rdma_stats->rdma_stats_entry[HW_STAT_RX_READ_REQUESTS] = val; + rdma_stats->rdma_stats_entry_sta[HW_STAT_RX_READ_REQUESTS] = + ZXDH_HW_STATS_VALID; + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} + +static int zxdh_get_ram_msg_h64(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u64 val = 0; + u32 check_ram_num, check_ram_addr; + int ret; + int i; + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_rx_ram_reg(dev, ZXDH_RAM_H64, ZXDH_RAM_WIDTH_32_BIT, + dev->vhca_id, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_rx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_ADDR)); + if ((check_ram_num != ZXDH_RAM_H64) || + (check_ram_addr != dev->vhca_id)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_0); + rdma_stats->rdma_stats_entry[HW_STAT_ROCE_SLOW_RESTART_CNPS] = + val; + rdma_stats + ->rdma_stats_entry_sta[HW_STAT_ROCE_SLOW_RESTART_CNPS] = + ZXDH_HW_STATS_VALID; + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} + +static int zxdh_get_ram_msg_h104(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u64 val = 0; + u32 check_ram_num, check_ram_addr; + int ret; + int i; + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_rx_ram_reg(dev, ZXDH_RAM_H104, ZXDH_RAM_WIDTH_128_BIT, + dev->vhca_id, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_rx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_ADDR)); + if ((check_ram_num != ZXDH_RAM_H104) || + (check_ram_addr != dev->vhca_id)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_0); + rdma_stats->rdma_stats_entry[HW_STAT_OUT_OF_SEQUENCE] = val; + rdma_stats->rdma_stats_entry_sta[HW_STAT_OUT_OF_SEQUENCE] = + ZXDH_HW_STATS_VALID; + + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_1); + rdma_stats->rdma_stats_entry[HW_STAT_RESP_RNR_NAK] = val; + rdma_stats->rdma_stats_entry_sta[HW_STAT_RESP_RNR_NAK] = + ZXDH_HW_STATS_VALID; + + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_2); + rdma_stats + ->rdma_stats_entry[HW_STAT_RESP_REMOTE_INVALID_REQUEST] = + val; + rdma_stats->rdma_stats_entry_sta + [HW_STAT_RESP_REMOTE_INVALID_REQUEST] = + ZXDH_HW_STATS_VALID; + + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_3); + rdma_stats->rdma_stats_entry[HW_STAT_RESP_REMOTE_ACCESS_ERRORS] = + val; + rdma_stats->rdma_stats_entry_sta + [HW_STAT_RESP_REMOTE_ACCESS_ERRORS] = + ZXDH_HW_STATS_VALID; + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} + +static int zxdh_get_ram_msg_h105(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u64 val = 0; + u32 check_ram_num, check_ram_addr; + int ret; + int i; + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_rx_ram_reg(dev, ZXDH_RAM_H105, ZXDH_RAM_WIDTH_128_BIT, + dev->vhca_id, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_rx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_ADDR)); + if ((check_ram_num != ZXDH_RAM_H105) || + (check_ram_addr != dev->vhca_id)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_0); + rdma_stats + ->rdma_stats_entry[HW_STAT_RESP_REMOTE_OPERATION_ERRORS] = + val; + rdma_stats->rdma_stats_entry_sta + [HW_STAT_RESP_REMOTE_OPERATION_ERRORS] = + ZXDH_HW_STATS_VALID; + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} + +static int zxdh_get_ram_msg_h106(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u64 val = 0; + u32 check_ram_num, check_ram_addr; + int ret; + int i; + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_rx_ram_reg(dev, ZXDH_RAM_H106, ZXDH_RAM_WIDTH_64_BIT, + dev->vhca_id, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_rx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_ADDR)); + if ((check_ram_num != ZXDH_RAM_H106) || + (check_ram_addr != dev->vhca_id)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_0); + rdma_stats + ->rdma_stats_entry[HW_STAT_NP_ECN_MARKED_ROCE_PACKETS] = + val; + rdma_stats->rdma_stats_entry_sta + [HW_STAT_NP_ECN_MARKED_ROCE_PACKETS] = + ZXDH_HW_STATS_VALID; + + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_1); + rdma_stats->rdma_stats_entry[HW_STAT_NP_CNP_SENT] = val; + rdma_stats->rdma_stats_entry_sta[HW_STAT_NP_CNP_SENT] = + ZXDH_HW_STATS_VALID; + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} + +static int zxdh_get_ram_msg_h19D(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + u64 val = 0; + u64 temp_val = 0; + u32 check_ram_num, check_ram_addr; + int ret; + int i; + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_rx_ram_reg(dev, ZXDH_RAM_H19D, ZXDH_RAM_WIDTH_128_BIT, + dev->vhca_id, ZXDH_RAM_WIDTH_LEN_UNIT_1); + ret = zxdh_read_rx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_ADDR)); + if ((check_ram_num != ZXDH_RAM_H19D) || + (check_ram_addr != dev->vhca_id)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_0); + val = (val & ZXDH_32_BIT_MASK_0_15); + temp_val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_0); + temp_val = ((temp_val & ZXDH_32_BIT_MASK_16_31) >> + IRMDA_BIT_WIDTH_16); + if (val >= temp_val) + val = val - temp_val; + else if (val < temp_val) + val = val + (ZXDH_CQE_ERR_MAX - temp_val); + rdma_stats->rdma_stats_entry[HW_STAT_REQ_CQE_ERROR] = val; + rdma_stats->rdma_stats_entry_sta[HW_STAT_REQ_CQE_ERROR] = + ZXDH_HW_STATS_VALID; + + val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_1); + val = (val & ZXDH_32_BIT_MASK_0_15); + temp_val = zxdh_read_rx_ram_data(dev, ZXDH_RAM_32_BIT_IDX_1); + temp_val = ((temp_val & ZXDH_32_BIT_MASK_16_31) >> + IRMDA_BIT_WIDTH_16); + if (val >= temp_val) + val = val - temp_val; + else if (val < temp_val) + val = val + (ZXDH_CQE_ERR_MAX - temp_val); + rdma_stats->rdma_stats_entry[HW_STAT_RESP_CQE_ERROR] = val; + rdma_stats->rdma_stats_entry_sta[HW_STAT_RESP_CQE_ERROR] = + ZXDH_HW_STATS_VALID; + + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} + +static void zxdh_get_np_tx_stats(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + struct iidc_core_dev_info *cdev_info; + struct zxdh_pci_f *rf = NULL; + u64 tx_pkts = 0; + u64 tx_bytes = 0; + u16 vport = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + rf = container_of(dev, struct zxdh_pci_f, sc_dev); + cdev_info = rf->cdev; + pf_info.vport = cdev_info->vport_id; + pf_info.slot = cdev_info->slot_id; + + dpp_stat_port_RDMA_packet_msg_tx_cnt_get(&pf_info, dev->vhca_id, + ZXDH_STAT_RD_MODE_UNCLR, + &tx_bytes, &tx_pkts); + + rdma_stats->rdma_stats_entry[HW_STAT_RDMA_TX_PKTS] = tx_pkts; + rdma_stats->rdma_stats_entry_sta[HW_STAT_RDMA_TX_PKTS] = + ZXDH_HW_STATS_VALID; + + rdma_stats->rdma_stats_entry[HW_STAT_RDMA_TX_BYTES] = tx_bytes; + rdma_stats->rdma_stats_entry_sta[HW_STAT_RDMA_TX_BYTES] = + ZXDH_HW_STATS_VALID; + + pr_info("%s dev->vhca_id:%d vport:0x%x tx_pkts:%llu tx_bytes:%llu\n", + __func__, dev->vhca_id, vport, tx_pkts, tx_bytes); +} + +static int zxdh_rdma_stats_ram_num_read(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + ZXDH_CHECK_AND_RETURN(zxdh_get_ram_msg_h11(dev, rdma_stats)); + ZXDH_CHECK_AND_RETURN(zxdh_get_ram_msg_h12_to_h15(dev, rdma_stats)); + ZXDH_CHECK_AND_RETURN(zxdh_get_ram_msg_h25(dev, rdma_stats)); + ZXDH_CHECK_AND_RETURN(zxdh_get_ram_msg_h26(dev, rdma_stats)); + ZXDH_CHECK_AND_RETURN(zxdh_get_ram_msg_h63(dev, rdma_stats)); + ZXDH_CHECK_AND_RETURN(zxdh_get_ram_msg_h29(dev, rdma_stats)); + ZXDH_CHECK_AND_RETURN(zxdh_get_ram_msg_h61(dev, rdma_stats)); + ZXDH_CHECK_AND_RETURN(zxdh_get_ram_msg_h62(dev, rdma_stats)); + ZXDH_CHECK_AND_RETURN(zxdh_get_ram_msg_h64(dev, rdma_stats)); + ZXDH_CHECK_AND_RETURN(zxdh_get_ram_msg_h104(dev, rdma_stats)); + ZXDH_CHECK_AND_RETURN(zxdh_get_ram_msg_h105(dev, rdma_stats)); + ZXDH_CHECK_AND_RETURN(zxdh_get_ram_msg_h106(dev, rdma_stats)); + ZXDH_CHECK_AND_RETURN(zxdh_get_ram_msg_h19D(dev, rdma_stats)); + zxdh_get_np_tx_stats(dev, rdma_stats); + return 0; +} + +int zxdh_rdma_stats_read(struct zxdh_sc_dev *dev, + struct zxdh_rdma_stats_get *rdma_stats) +{ + int ret = 0; + unsigned long flags; + spin_lock_irqsave(&zxdh_rdma_stats_ram_lock, flags); + ret = zxdh_rdma_stats_ram_num_read(dev, rdma_stats); + spin_unlock_irqrestore(&zxdh_rdma_stats_ram_lock, flags); + return ret; +} + +static int zxdh_get_pma_cnt_ext(struct zxdh_sc_dev *dev, + struct ib_pma_portcounters_ext *pma_cnt_ext) +{ + struct iidc_core_dev_info *cdev_info; + int ret = 0; + u64 val; + u64 rx_pkts = 0; + u64 rx_bytes = 0; + u64 tx_pkts = 0; + u64 tx_bytes = 0; + unsigned long flags; + struct zxdh_pci_f *rf = NULL; + DPP_PF_INFO_T pf_info = { 0 }; + + rf = container_of(dev, struct zxdh_pci_f, sc_dev); + cdev_info = rf->cdev; + pf_info.vport = cdev_info->vport_id; + pf_info.slot = cdev_info->slot_id; + + spin_lock_irqsave(&zxdh_rdma_stats_ram_lock, flags); + dpp_stat_port_RDMA_packet_msg_tx_cnt_get(&pf_info, dev->vhca_id, + ZXDH_STAT_RD_MODE_UNCLR, + &tx_bytes, &tx_pkts); + ret = zxdh_get_rx_stat(dev, &rx_bytes, &rx_pkts); + spin_unlock_irqrestore(&zxdh_rdma_stats_ram_lock, flags); + if (ret) { + return ret; + } + val = tx_bytes; + /* Total number of data octets, divided by 4 (lanes), transmitted on all VLs. This is 64 bit counter. */ + val = (val / 4); + val = cpu_to_be64(val); + pma_cnt_ext->port_xmit_data = val; + + val = rx_bytes; + /* Total number of data octets, divided by 4 (lanes), received on all VLs. This is 64 bit counter. */ + val = (val / 4); + val = cpu_to_be64(val); + pma_cnt_ext->port_rcv_data = val; + + val = tx_pkts; + val = cpu_to_be64(val); + pma_cnt_ext->port_xmit_packets = val; + + val = rx_pkts; + val = cpu_to_be64(val); + pma_cnt_ext->port_rcv_packets = val; + + val = tx_pkts; + val = cpu_to_be64(val); + pma_cnt_ext->port_unicast_xmit_packets = val; + + val = rx_pkts; + val = cpu_to_be64(val); + pma_cnt_ext->port_unicast_rcv_packets = val; + pma_cnt_ext->port_multicast_xmit_packets = 0; + pma_cnt_ext->port_multicast_rcv_packets = 0; + return 0; +} + +static void zxdh_get_pma_cnt(struct zxdh_sc_dev *dev, + struct ib_pma_portcounters *pma_cnt) +{ + pma_cnt->symbol_error_counter = 0; + pma_cnt->link_error_recovery_counter = 0; + pma_cnt->link_downed_counter = 0; + pma_cnt->port_rcv_errors = 0; + pma_cnt->port_rcv_remphys_errors = 0; + pma_cnt->port_rcv_switch_relay_errors = 0; + pma_cnt->port_xmit_discards = 0; + pma_cnt->port_xmit_constraint_errors = 0; + pma_cnt->port_xmit_wait = 0; + pma_cnt->port_rcv_constraint_errors = 0; + pma_cnt->link_overrun_errors = 0; + pma_cnt->vl15_dropped = 0; +} +/** + * zxdh_process_pma_cmd - process pma cmd + * @dev: pointer to device structure + * @port: the port number this packet came in on + * @in_mad: the incoming MAD + * @out_mad: any outgoing MAD reply + */ +int zxdh_process_pma_cmd(struct zxdh_sc_dev *dev, u8 port, + const struct ib_mad *in_mad, struct ib_mad *out_mad) +{ + // *out_mad = *in_mad; + int ret = 0; + pr_debug( + "%s %d vhca_id:%d attr_id:0x%x counters_ext:0x%x counter:0x%x\n", + __func__, __LINE__, dev->vhca_id, in_mad->mad_hdr.attr_id, + IB_PMA_PORT_COUNTERS_EXT, IB_PMA_PORT_COUNTERS); + /* Declaring support of extended counters */ + if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) { + struct ib_class_port_info cpi = {}; + + cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH; + memcpy((out_mad->data + 40), &cpi, sizeof(cpi)); + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; + } + + if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT) { + struct ib_pma_portcounters_ext *pma_cnt_ext = + (struct ib_pma_portcounters_ext *)(out_mad->data + 40); + ret = zxdh_get_pma_cnt_ext(dev, pma_cnt_ext); + if (ret) { + return IB_MAD_RESULT_FAILURE; + } + + } else if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS) { + struct ib_pma_portcounters *pma_cnt = + (struct ib_pma_portcounters *)(out_mad->data + 40); + zxdh_get_pma_cnt(dev, pma_cnt); + } + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; +} + +/** + * zxdh_cqp_ceq_cmd - Create/Destroy CEQ's after CEQ 0 + * @dev: pointer to device info + * @sc_ceq: pointer to ceq structure + * @op: Create or Destroy + */ +int zxdh_cqp_ceq_cmd(struct zxdh_sc_dev *dev, struct zxdh_sc_ceq *sc_ceq, u8 op) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = op; + cqp_info->in.u.ceq_create.ceq = sc_ceq; + cqp_info->in.u.ceq_create.scratch = (uintptr_t)cqp_request; + + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + + return status; +} + +/** + * zxdh_cqp_aeq_cmd - Create/Destroy AEQ + * @dev: pointer to device info + * @sc_aeq: pointer to aeq structure + * @op: Create or Destroy + */ +int zxdh_cqp_aeq_cmd(struct zxdh_sc_dev *dev, struct zxdh_sc_aeq *sc_aeq, u8 op) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = op; + cqp_info->in.u.aeq_create.aeq = sc_aeq; + cqp_info->in.u.aeq_create.scratch = (uintptr_t)cqp_request; + + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + + return status; +} + +/** + * zxdh_cqp_up_map_cmd - Set the up-up mapping + * @dev: pointer to device structure + * @cmd: map command + * @map_info: pointer to up map info + */ +int zxdh_cqp_up_map_cmd(struct zxdh_sc_dev *dev, u8 cmd, + struct zxdh_up_info *map_info) +{ + return 0; +} + +/** + * zxdh_ah_cqp_op - perform an AH cqp operation + * @rf: RDMA PCI function + * @sc_ah: address handle + * @cmd: AH operation + * @wait: wait if true + * @callback_fcn: Callback function on CQP op completion + * @cb_param: parameter for callback function + * + * returns errno + */ +int zxdh_ah_cqp_op(struct zxdh_pci_f *rf, struct zxdh_sc_ah *sc_ah, u8 cmd, + bool wait, void (*callback_fcn)(struct zxdh_cqp_request *), + void *cb_param) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + int status; + + if (cmd != ZXDH_OP_AH_CREATE && cmd != ZXDH_OP_AH_DESTROY) + return -EINVAL; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, wait); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = cmd; + cqp_info->post_sq = 1; + if (cmd == ZXDH_OP_AH_CREATE) { + cqp_info->in.u.ah_create.info = sc_ah->ah_info; + cqp_info->in.u.ah_create.scratch = (uintptr_t)cqp_request; + cqp_info->in.u.ah_create.cqp = &rf->cqp.sc_cqp; + } else if (cmd == ZXDH_OP_AH_DESTROY) { + cqp_info->in.u.ah_destroy.info = sc_ah->ah_info; + cqp_info->in.u.ah_destroy.scratch = (uintptr_t)cqp_request; + cqp_info->in.u.ah_destroy.cqp = &rf->cqp.sc_cqp; + } + + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + + if (status) + return -ENOMEM; + + sc_ah->ah_info.ah_valid = (cmd == ZXDH_OP_AH_CREATE); + + return 0; +} + +/** + * zxdh_gsi_ud_qp_ah_cb - callback after creation of AH for GSI/ID QP + * @cqp_request: pointer to cqp_request of create AH + */ +void zxdh_gsi_ud_qp_ah_cb(struct zxdh_cqp_request *cqp_request) +{ + struct zxdh_sc_ah *sc_ah = cqp_request->param; + + if (!cqp_request->compl_info.op_ret_val) + sc_ah->ah_info.ah_valid = true; + else + sc_ah->ah_info.ah_valid = false; +} + +/** + * zxdh_prm_add_pble_mem - add moemory to pble resources + * @pprm: pble resource manager + * @pchunk: chunk of memory to add + */ +int zxdh_prm_add_pble_mem(struct zxdh_pble_prm *pprm, struct zxdh_chunk *pchunk) +{ + u64 sizeofbitmap; + + if (pchunk->size & 0xfff) + return -EINVAL; + + sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift; + + pchunk->bitmapmem.size = sizeofbitmap >> 3; + pchunk->bitmapmem.va = kzalloc(pchunk->bitmapmem.size, GFP_KERNEL); + + if (!pchunk->bitmapmem.va) + return -ENOMEM; + + pchunk->bitmapbuf = pchunk->bitmapmem.va; + bitmap_zero(pchunk->bitmapbuf, sizeofbitmap); + + pchunk->sizeofbitmap = sizeofbitmap; + /* each pble is 8 bytes hence shift by 3 */ + pprm->total_pble_alloc += pchunk->size >> 3; + pprm->free_pble_cnt += pchunk->size >> 3; + + return 0; +} + +/** + * zxdh_prm_get_pbles - get pble's from prm + * @pprm: pble resource manager + * @chunkinfo: nformation about chunk where pble's were acquired + * @mem_size: size of pble memory needed + * @vaddr: returns virtual address of pble memory + * @fpm_addr: returns fpm address of pble memory + * @paaddr: returns pa address of pble memory + */ +int zxdh_prm_get_pbles(struct zxdh_pble_prm *pprm, + struct zxdh_pble_chunkinfo *chunkinfo, u64 mem_size, + u64 **vaddr, u64 *fpm_addr, dma_addr_t *paaddr) +{ + u64 bits_needed; + u64 bit_idx = PBLE_INVALID_IDX; + struct zxdh_chunk *pchunk = NULL; + struct list_head *chunk_entry = pprm->clist.next; + u32 offset; + unsigned long flags; + *vaddr = NULL; + *fpm_addr = 0; + *paaddr = 0; + + bits_needed = DIV_ROUND_UP_ULL(mem_size, BIT_ULL(pprm->pble_shift)); + + spin_lock_irqsave(&pprm->prm_lock, flags); + while (chunk_entry != &pprm->clist) { + pchunk = (struct zxdh_chunk *)chunk_entry; + bit_idx = bitmap_find_next_zero_area(pchunk->bitmapbuf, + pchunk->sizeofbitmap, 0, + bits_needed, 0); + if (bit_idx < pchunk->sizeofbitmap) + break; + + /* list.next used macro */ + chunk_entry = pchunk->list.next; + } + + if (!pchunk || bit_idx >= pchunk->sizeofbitmap) { + spin_unlock_irqrestore(&pprm->prm_lock, flags); + return -ENOMEM; + } + + bitmap_set(pchunk->bitmapbuf, bit_idx, bits_needed); + offset = bit_idx << pprm->pble_shift; + *vaddr = pchunk->vaddr + offset; + *fpm_addr = pchunk->fpm_addr + offset; + *paaddr = pchunk->pa + offset; + chunkinfo->pchunk = pchunk; + chunkinfo->bit_idx = bit_idx; + chunkinfo->bits_used = bits_needed; + /* 3 is sizeof pble divide */ + pprm->free_pble_cnt -= chunkinfo->bits_used << (pprm->pble_shift - 3); + spin_unlock_irqrestore(&pprm->prm_lock, flags); + + return 0; +} + +/** + * zxdh_prm_return_pbles - return pbles back to prm + * @pprm: pble resource manager + * @chunkinfo: chunk where pble's were acquired and to be freed + */ +void zxdh_prm_return_pbles(struct zxdh_pble_prm *pprm, + struct zxdh_pble_chunkinfo *chunkinfo) +{ + unsigned long flags; + + spin_lock_irqsave(&pprm->prm_lock, flags); + pprm->free_pble_cnt += chunkinfo->bits_used << (pprm->pble_shift - 3); + bitmap_clear(chunkinfo->pchunk->bitmapbuf, chunkinfo->bit_idx, + chunkinfo->bits_used); + spin_unlock_irqrestore(&pprm->prm_lock, flags); +} + +int zxdh_map_vm_page_list(struct zxdh_hw *hw, void *va, dma_addr_t *pg_dma, + u32 pg_cnt) +{ + struct page *vm_page; + int i; + u8 *addr; + + addr = (u8 *)(uintptr_t)va; + for (i = 0; i < pg_cnt; i++) { + vm_page = vmalloc_to_page(addr); + if (!vm_page) + goto err; + + pg_dma[i] = dma_map_page(hw->device, vm_page, 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(hw->device, pg_dma[i])) + goto err; + + addr += PAGE_SIZE; + } + + return 0; + +err: + zxdh_unmap_vm_page_list(hw, pg_dma, i); + return -ENOMEM; +} + +void zxdh_unmap_vm_page_list(struct zxdh_hw *hw, dma_addr_t *pg_dma, u32 pg_cnt) +{ + int i; + + for (i = 0; i < pg_cnt; i++) + dma_unmap_page(hw->device, pg_dma[i], PAGE_SIZE, + DMA_BIDIRECTIONAL); +} + +/** + * zxdh_pble_free_paged_mem - free virtual paged memory + * @chunk: chunk to free with paged memory + */ +void zxdh_pble_free_paged_mem(struct zxdh_chunk *chunk) +{ + if (!chunk->pg_cnt) + goto done; + + zxdh_unmap_vm_page_list(chunk->dev->hw, chunk->dmainfo.dmaaddrs, + chunk->pg_cnt); + +done: + kfree(chunk->dmainfo.dmaaddrs); + chunk->dmainfo.dmaaddrs = NULL; + vfree(chunk->vaddr); + chunk->vaddr = NULL; + chunk->type = 0; +} + +/** + * zxdh_modify_qp_to_err - Modify a QP to error + * @sc_qp: qp structure + */ +void zxdh_modify_qp_to_err(struct zxdh_sc_qp *sc_qp) +{ + struct zxdh_qp *qp = sc_qp->qp_uk.back_qp; + struct ib_qp_attr attr; + + if (qp->iwdev->rf->reset) + return; + attr.qp_state = IB_QPS_ERR; + + zxdh_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL); +} + +void zxdh_ib_qp_event(struct zxdh_qp *iwqp, enum zxdh_qp_event_type event) +{ + struct ib_event ibevent; + + if (!iwqp->ibqp.event_handler) + return; + + switch (event) { + case ZXDH_QP_EVENT_CATASTROPHIC: + ibevent.event = IB_EVENT_QP_FATAL; + break; + case ZXDH_QP_EVENT_ACCESS_ERR: + ibevent.event = IB_EVENT_QP_ACCESS_ERR; + break; + case ZXDH_QP_EVENT_REQ_ERR: + ibevent.event = IB_EVENT_QP_REQ_ERR; + break; + } + ibevent.device = iwqp->ibqp.device; + ibevent.element.qp = &iwqp->ibqp; + iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context); +} + +/** + * zxdh_upload_qp_context - upload raw QP context + * @iwqp: QP pointer + * @freeze: freeze QP + * @raw: raw context flag + */ +int zxdh_upload_qp_context(struct zxdh_qp *iwqp, bool freeze, bool raw) +{ + return 0; +} + +int zxdh_cqp_rdma_read_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest *src_dest, u8 src_dir, + u8 dest_dir) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = ZXDH_OP_DMA_READ; + cqp_info->in.u.dma_writeread.cqp = dev->cqp; + cqp_info->in.u.dma_writeread.src_dest.src = src_dest->src; + cqp_info->in.u.dma_writeread.src_dest.len = src_dest->len; + cqp_info->in.u.dma_writeread.src_dest.dest = src_dest->dest; + + cqp_info->in.u.dma_writeread.src_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.src_path_index.obj_id = ZXDH_DMA_OBJ_ID; + + cqp_info->in.u.dma_writeread.src_path_index.path_select = src_dir; + + cqp_info->in.u.dma_writeread.src_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; + + cqp_info->in.u.dma_writeread.dest_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.dest_path_index.obj_id = ZXDH_DMA_OBJ_ID; + + if (dev->cache_id == 0) { + cqp_info->in.u.dma_writeread.dest_path_index.path_select = + dest_dir; + cqp_info->in.u.dma_writeread.dest_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; + } else { + cqp_info->in.u.dma_writeread.dest_path_index.path_select = + dev->cache_id; + cqp_info->in.u.dma_writeread.dest_path_index.inter_select = + ZXDH_INTERFACE_CACHE; + } + + cqp_info->in.u.dma_writeread.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return status; +} + +int zxdh_cqp_damreadbycqe_cmd(struct zxdh_sc_dev *dev, + struct zxdh_dam_read_bycqe *dmadata, + struct zxdh_path_index *src_path_index, u64 *arr) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status, i = 0; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = ZXDH_OP_DMA_READ_USE_CQE; + cqp_info->in.u.dma_read_cqe.cqp = dev->cqp; + cqp_info->in.u.dma_read_cqe.dma_rcqe.num = dmadata->num; + cqp_info->in.u.dma_read_cqe.dma_rcqe.bitwidth = dmadata->bitwidth; + cqp_info->in.u.dma_read_cqe.dma_rcqe.valuetype = dmadata->valuetype; + for (i = 0; i < dmadata->num; i++) { + cqp_info->in.u.dma_read_cqe.dma_rcqe.addrbuf[i] = + dmadata->addrbuf[i]; + } + + cqp_info->in.u.dma_read_cqe.src_path_index.vhca_id = + src_path_index->vhca_id; + cqp_info->in.u.dma_read_cqe.src_path_index.obj_id = + src_path_index->obj_id; + cqp_info->in.u.dma_read_cqe.src_path_index.path_select = + src_path_index->path_select; + cqp_info->in.u.dma_read_cqe.src_path_index.inter_select = + src_path_index->inter_select; + + cqp_info->in.u.dma_read_cqe.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + + for (i = 0; i < 5; i++) + arr[i] = cqp_request->compl_info.addrbuf[i]; + + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return status; +} + +int zxdh_cqp_rdma_write32_cmd(struct zxdh_sc_dev *dev, + struct zxdh_dma_write32_date *dma_data) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status, i = 0; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = ZXDH_OP_DMA_WRITE32; + cqp_info->in.u.dma_write32data.cqp = dev->cqp; + cqp_info->in.u.dma_write32data.dest_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_write32data.dma_data.num = dma_data->num; + cqp_info->in.u.dma_write32data.dma_data.inter_sour_sel = + dma_data->inter_sour_sel; + cqp_info->in.u.dma_write32data.dma_data.need_inter = + dma_data->need_inter; + for (i = 0; i < dma_data->num; i++) { + cqp_info->in.u.dma_write32data.dma_data.addrbuf[i] = + dma_data->addrbuf[i]; + cqp_info->in.u.dma_write32data.dma_data.databuf[i] = + dma_data->databuf[i]; + } + + cqp_info->in.u.dma_write32data.dest_path_index.obj_id = ZXDH_REG_OBJ_ID; + cqp_info->in.u.dma_write32data.dest_path_index.path_select = + ZXDH_INDICATE_REGISTER; + cqp_info->in.u.dma_write32data.dest_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; + cqp_info->in.u.dma_write32data.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return status; +} + +int zxdh_dpuddr_to_host_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest *src_dest) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = ZXDH_OP_DMA_WRITE; + cqp_info->in.u.dma_writeread.cqp = dev->cqp; + cqp_info->in.u.dma_writeread.src_dest.src = src_dest->src; + cqp_info->in.u.dma_writeread.src_dest.len = src_dest->len; + cqp_info->in.u.dma_writeread.src_dest.dest = src_dest->dest; + + cqp_info->in.u.dma_writeread.src_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.src_path_index.obj_id = ZXDH_DMA_OBJ_ID; + + cqp_info->in.u.dma_writeread.src_path_index.path_select = + ZXDH_INDICATE_DPU_DDR; + + cqp_info->in.u.dma_writeread.src_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; + + cqp_info->in.u.dma_writeread.dest_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.dest_path_index.obj_id = ZXDH_DMA_OBJ_ID; + + cqp_info->in.u.dma_writeread.dest_path_index.path_select = + ZXDH_INDICATE_HOST_NOSMMU; + cqp_info->in.u.dma_writeread.dest_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; + + cqp_info->in.u.dma_writeread.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return status; +} + +int zxdh_cqp_rdma_write_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest *src_dest, u8 src_dir, + u8 dest_dir) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = ZXDH_OP_DMA_WRITE; + cqp_info->in.u.dma_writeread.cqp = dev->cqp; + cqp_info->in.u.dma_writeread.src_dest.src = src_dest->src; + cqp_info->in.u.dma_writeread.src_dest.len = src_dest->len; + cqp_info->in.u.dma_writeread.src_dest.dest = src_dest->dest; + + cqp_info->in.u.dma_writeread.src_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.src_path_index.obj_id = ZXDH_DMA_OBJ_ID; + + cqp_info->in.u.dma_writeread.src_path_index.path_select = src_dir; + + cqp_info->in.u.dma_writeread.src_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; + + cqp_info->in.u.dma_writeread.dest_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.dest_path_index.obj_id = ZXDH_DMA_OBJ_ID; + + if (dev->cache_id == 0) { + cqp_info->in.u.dma_writeread.dest_path_index.path_select = + dest_dir; + cqp_info->in.u.dma_writeread.dest_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; + } else { + cqp_info->in.u.dma_writeread.dest_path_index.path_select = + dev->cache_id; + cqp_info->in.u.dma_writeread.dest_path_index.inter_select = + ZXDH_INTERFACE_CACHE; + } + + cqp_info->in.u.dma_writeread.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return status; +} + +int zxdh_cqp_rdma_readreg_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest *src_dest) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = ZXDH_OP_DMA_READ; + cqp_info->in.u.dma_writeread.cqp = dev->cqp; + cqp_info->in.u.dma_writeread.src_dest.src = src_dest->src; + cqp_info->in.u.dma_writeread.src_dest.len = src_dest->len; + cqp_info->in.u.dma_writeread.src_dest.dest = src_dest->dest; + + cqp_info->in.u.dma_writeread.src_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.src_path_index.obj_id = ZXDH_REG_OBJ_ID; + + cqp_info->in.u.dma_writeread.src_path_index.path_select = + ZXDH_INDICATE_REGISTER; + + cqp_info->in.u.dma_writeread.src_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; + + cqp_info->in.u.dma_writeread.dest_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.dest_path_index.obj_id = ZXDH_DMA_OBJ_ID; + + cqp_info->in.u.dma_writeread.dest_path_index.path_select = + ZXDH_INDICATE_HOST_NOSMMU; + cqp_info->in.u.dma_writeread.dest_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; + + cqp_info->in.u.dma_writeread.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return status; +} + +int zxdh_cqp_rdma_read_mrte_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest *src_dest) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = ZXDH_OP_DMA_READ; + cqp_info->in.u.dma_writeread.cqp = dev->cqp; + cqp_info->in.u.dma_writeread.src_dest.src = src_dest->src; + cqp_info->in.u.dma_writeread.src_dest.len = src_dest->len; + cqp_info->in.u.dma_writeread.src_dest.dest = src_dest->dest; + + cqp_info->in.u.dma_writeread.src_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.src_path_index.obj_id = ZXDH_MR_OBJ_ID; + cqp_info->in.u.dma_writeread.src_path_index.path_select = dev->cache_id; + cqp_info->in.u.dma_writeread.src_path_index.inter_select = + ZXDH_INTERFACE_CACHE; + + cqp_info->in.u.dma_writeread.dest_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.dest_path_index.obj_id = ZXDH_DMA_OBJ_ID; + cqp_info->in.u.dma_writeread.dest_path_index.path_select = + ZXDH_INDICATE_HOST_NOSMMU; + cqp_info->in.u.dma_writeread.dest_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; + + cqp_info->in.u.dma_writeread.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return status; +} + +int zxdh_cqp_rdma_read_tx_window_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest *src_dest) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = ZXDH_OP_DMA_READ; + cqp_info->in.u.dma_writeread.cqp = dev->cqp; + cqp_info->in.u.dma_writeread.src_dest.src = src_dest->src; + cqp_info->in.u.dma_writeread.src_dest.len = src_dest->len; + cqp_info->in.u.dma_writeread.src_dest.dest = src_dest->dest; + + cqp_info->in.u.dma_writeread.src_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.src_path_index.obj_id = + ZXDH_TX_WINDOW_OBJ_ID; + cqp_info->in.u.dma_writeread.src_path_index.path_select = dev->cache_id; + cqp_info->in.u.dma_writeread.src_path_index.inter_select = + ZXDH_INTERFACE_CACHE; + + cqp_info->in.u.dma_writeread.dest_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.dest_path_index.obj_id = ZXDH_DMA_OBJ_ID; + cqp_info->in.u.dma_writeread.dest_path_index.path_select = + ZXDH_INDICATE_HOST_NOSMMU; + cqp_info->in.u.dma_writeread.dest_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; + + cqp_info->in.u.dma_writeread.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return status; +} + +int zxdh_cqp_config_pble_table_cmd(struct zxdh_sc_dev *dev, + struct zxdh_pble_info *pbleinfo, u32 len, + bool pbletype) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status; + u64 baseaddr = 0; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = ZXDH_OP_CONFIG_PBLE_TAB; + cqp_info->in.u.dma_writeread.cqp = dev->cqp; + cqp_info->in.u.dma_writeread.src_dest.src = pbleinfo->pa; + cqp_info->in.u.dma_writeread.src_dest.len = len; + cqp_info->in.u.dma_writeread.src_dest.dest = pbleinfo->smmu_fpm_addr; + + cqp_info->in.u.dma_writeread.src_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.src_path_index.obj_id = + ZXDH_DMA_OBJ_ID; // 0 | 1 + cqp_info->in.u.dma_writeread.src_path_index.waypartion = 0; + + cqp_info->in.u.dma_writeread.src_path_index.path_select = + ZXDH_INDICATE_HOST_NOSMMU; // 到host不经过SMMU + + cqp_info->in.u.dma_writeread.src_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; // 不经过cache + + cqp_info->in.u.dma_writeread.dest_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.dest_path_index.obj_id = + (pbletype == true) ? ZXDH_PBLE_MR_OBJ_ID : + ZXDH_PBLE_QUEUE_OBJ_ID; // 0 | 1 + cqp_info->in.u.dma_writeread.dest_path_index.waypartion = 0; + + if (dev->cache_id == 0) { + if (dev->hmc_use_dpu_ddr == true) { + cqp_info->in.u.dma_writeread.dest_path_index + .path_select = ZXDH_INDICATE_DPU_DDR; // + } else { + cqp_info->in.u.dma_writeread.dest_path_index + .path_select = ZXDH_INDICATE_HOST_SMMU; + } + cqp_info->in.u.dma_writeread.dest_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; // 经过cache + cqp_info->in.u.dma_writeread.src_dest.dest = + pbleinfo->smmu_fpm_addr; + } else { + if (pbletype == true) { + baseaddr = + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE_MR].base; + } else { + baseaddr = + dev->hmc_info->hmc_obj[ZXDH_HMC_IW_PBLE].base; + } + cqp_info->in.u.dma_writeread.src_dest.dest = + pbleinfo->smmu_fpm_addr - baseaddr; + cqp_info->in.u.dma_writeread.dest_path_index.path_select = + dev->cache_id; // + cqp_info->in.u.dma_writeread.dest_path_index.inter_select = + ZXDH_INTERFACE_CACHE; // 经过cache + } + + cqp_info->in.u.dma_writeread.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return status; +} + +bool zxdh_cq_empty(struct zxdh_cq *iwcq) +{ + struct zxdh_cq_uk *ukcq; + u64 qword0; + __le64 *cqe; + u8 polarity; + + ukcq = &iwcq->sc_cq.cq_uk; + if (ukcq->valid_cq == false) + return 0; + cqe = ZXDH_GET_CURRENT_CQ_ELEM(ukcq); + get_64bit_val(cqe, 0, &qword0); + polarity = (u8)FIELD_GET(ZXDH_CQ_VALID, qword0); + + return polarity != ukcq->polarity; +} + +void zxdh_remove_cmpls_list(struct zxdh_cq *iwcq) +{ + struct zxdh_cmpl_gen *cmpl_node; + struct list_head *tmp_node, *list_node; + + list_for_each_safe(list_node, tmp_node, &iwcq->cmpl_generated) { + cmpl_node = list_entry(list_node, struct zxdh_cmpl_gen, list); + list_del(&cmpl_node->list); + kfree(cmpl_node); + } +} + +int zxdh_generated_cmpls(struct zxdh_cq *iwcq, + struct zxdh_cq_poll_info *cq_poll_info) +{ + struct zxdh_cmpl_gen *cmpl; + + if (!iwcq || list_empty(&iwcq->cmpl_generated)) + return -ENOENT; + cmpl = list_first_entry_or_null(&iwcq->cmpl_generated, + struct zxdh_cmpl_gen, list); + list_del(&cmpl->list); + memcpy(cq_poll_info, &cmpl->cpi, sizeof(*cq_poll_info)); + kfree(cmpl); + + return 0; +} + +/** + * zxdh_set_cpi_common_values - fill in values for polling info struct + * @cpi: resulting structure of cq_poll_info type + * @qp: QPair + * @qp_num: id of the QP + */ +static void zxdh_set_cpi_common_values(struct zxdh_cq_poll_info *cpi, + struct zxdh_qp_uk *qp, u32 qp_num) +{ + cpi->comp_status = ZXDH_COMPL_STATUS_FLUSHED; + cpi->error = 1; + cpi->major_err = ZXDH_FLUSH_MAJOR_ERR; + cpi->minor_err = FLUSH_GENERAL_ERR; + cpi->qp_handle = (zxdh_qp_handle)(uintptr_t)qp; + cpi->qp_id = qp_num; +} + +static inline void zxdh_comp_handler(struct zxdh_cq *cq) +{ + if (cq->sc_cq.cq_uk.armed && cq->ibcq.comp_handler) + cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); +} + +/** + * zxdh_generate_flush_completions - generate completion from WRs + * @iwqp: pointer to QP + */ +void zxdh_generate_flush_completions(struct zxdh_qp *iwqp) +{ + struct zxdh_qp_uk *qp = &iwqp->sc_qp.qp_uk; + struct zxdh_ring *sq_ring = &qp->sq_ring; + struct zxdh_ring *rq_ring = &qp->rq_ring; + struct zxdh_cmpl_gen *cmpl; + __le64 *sw_wqe; + u64 wqe_qword; + u32 wqe_idx; + u8 compl_generated = 0; + unsigned long flags; + +#define SQ_COMPL_GENERATED (0x01) +#define RQ_COMPL_GENERATED (0x02) + + spin_lock_irqsave(&iwqp->iwscq->lock, flags); + if (zxdh_cq_empty(iwqp->iwscq)) { + while (ZXDH_RING_MORE_WORK(*sq_ring)) { + cmpl = kzalloc(sizeof(*cmpl), GFP_KERNEL); + if (!cmpl) { + spin_unlock_irqrestore(&iwqp->iwscq->lock, + flags); + return; + } + + wqe_idx = sq_ring->tail; + zxdh_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id); + + cmpl->cpi.wr_id = qp->sq_wrtrk_array[wqe_idx].wrid; + sw_wqe = qp->sq_base[wqe_idx].elem; + get_64bit_val(sw_wqe, 24, &wqe_qword); + cmpl->cpi.op_type = + (u8)FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword); + /* remove the SQ WR by moving SQ tail*/ + ZXDH_RING_SET_TAIL( + *sq_ring, + sq_ring->tail + + qp->sq_wrtrk_array[sq_ring->tail] + .quanta); + + list_add_tail(&cmpl->list, + &iwqp->iwscq->cmpl_generated); + compl_generated |= SQ_COMPL_GENERATED; + } + } else { + mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, + ZXDH_FLUSH_DELAY_MS / 2); + } + spin_unlock_irqrestore(&iwqp->iwscq->lock, flags); + + spin_lock_irqsave(&iwqp->iwrcq->lock, flags); + if (zxdh_cq_empty(iwqp->iwrcq)) { + while (ZXDH_RING_MORE_WORK(*rq_ring)) { + cmpl = kzalloc(sizeof(*cmpl), GFP_KERNEL); + if (!cmpl) { + spin_unlock_irqrestore(&iwqp->iwrcq->lock, + flags); + return; + } + + wqe_idx = rq_ring->tail; + zxdh_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id); + + cmpl->cpi.wr_id = qp->rq_wrid_array[wqe_idx]; + cmpl->cpi.op_type = ZXDH_OP_TYPE_REC; + /* remove the RQ WR by moving RQ tail */ + ZXDH_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1); + list_add_tail(&cmpl->list, + &iwqp->iwrcq->cmpl_generated); + + compl_generated |= RQ_COMPL_GENERATED; + } + } else { + mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, + ZXDH_FLUSH_DELAY_MS / 2); + } + spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags); + + if (iwqp->iwscq == iwqp->iwrcq) { + if (compl_generated) + zxdh_comp_handler(iwqp->iwscq); + return; + } + if (compl_generated & SQ_COMPL_GENERATED) + zxdh_comp_handler(iwqp->iwscq); + if (compl_generated & RQ_COMPL_GENERATED) + zxdh_comp_handler(iwqp->iwrcq); + if (compl_generated) + pr_info("VERBS: 0x%X (SQ 0x1, RQ 0x2, both 0x3) completions generated for QP %d\n", + compl_generated, iwqp->ibqp.qp_num); +} + +u64 zxdh_get_path_index(struct zxdh_path_index *path_index) +{ + u64 path_index_result = 0, tmp = 0; + + tmp = path_index->inter_select; + path_index_result |= tmp; + + tmp = path_index->path_select; + tmp <<= 8; + path_index_result |= tmp; + + tmp = path_index->waypartion; + tmp <<= 12; + path_index_result |= tmp; + + tmp = path_index->obj_id; + tmp <<= 16; + path_index_result |= tmp; + + tmp = path_index->vhca_id; + tmp <<= 24; + path_index_result |= tmp; + + return path_index_result; +} + +int zxdh_cqp_config_pte_table_cmd(struct zxdh_sc_dev *dev, + struct zxdh_src_copy_dest scr_dest) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = ZXDH_OP_CONFIG_PTE_TAB; + cqp_info->in.u.dma_writeread.cqp = dev->cqp; + cqp_info->in.u.dma_writeread.src_dest.src = scr_dest.src; + cqp_info->in.u.dma_writeread.src_dest.len = scr_dest.len; + cqp_info->in.u.dma_writeread.src_dest.dest = scr_dest.dest; + + cqp_info->in.u.dma_writeread.src_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.src_path_index.obj_id = + ZXDH_DMA_OBJ_ID; // DMA 搬移 使用宏定义 + cqp_info->in.u.dma_writeread.src_path_index.path_select = + ZXDH_INDICATE_HOST_NOSMMU; // 到host不经过SMMU + cqp_info->in.u.dma_writeread.src_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; // 不经过cache + + cqp_info->in.u.dma_writeread.dest_path_index.vhca_id = dev->vhca_id; + cqp_info->in.u.dma_writeread.dest_path_index.obj_id = + ZXDH_L2D_OBJ_ID; // L2D + cqp_info->in.u.dma_writeread.dest_path_index.path_select = + ZXDH_INDICATE_L2D; // L2D + cqp_info->in.u.dma_writeread.dest_path_index.inter_select = + ZXDH_INTERFACE_NOTCACHE; // 不经过cache + cqp_info->in.u.dma_writeread.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return status; +} + +int zxdh_sc_send_mailbox_cmd(struct zxdh_sc_dev *dev, u8 opt, u64 msg2, + u64 msg3, u64 msg4, u16 dst_vf_id) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = ZXDH_OP_CONFIG_MAILBOX; + cqp_info->in.u.hmc_mb.cqp = dev->cqp; + cqp_info->in.u.hmc_mb.dst_vf_id = dst_vf_id; + cqp_info->in.u.hmc_mb.mbhead_data.msg0 = opt; + cqp_info->in.u.hmc_mb.mbhead_data.msg1 = dev->vhca_id; + cqp_info->in.u.hmc_mb.mbhead_data.msg2 = msg2; + cqp_info->in.u.hmc_mb.mbhead_data.msg3 = msg3; + cqp_info->in.u.hmc_mb.mbhead_data.msg4 = msg4; + cqp_info->in.u.hmc_mb.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return status; +} + +int zxdh_sc_query_mkey_cmd(struct zxdh_sc_dev *dev, u32 mekyindex) +{ + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf = dev_to_rf(dev); + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + cqp_info->post_sq = 1; + cqp_info->cqp_cmd = ZXDH_OP_QUERY_MKEY; + cqp_info->in.u.query_mkey.cqp = dev->cqp; + cqp_info->in.u.query_mkey.mkeyindex = mekyindex; + cqp_info->in.u.query_mkey.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + return status; +} + +static const char *const _zxdh_qp_state_to_string[ZXDH_QPS_RSV] = { + [ZXDH_QPS_RESET] = "RESET", + [ZXDH_QPS_INIT] = "INIT", + [ZXDH_QPS_RTR] = "RTR", + [ZXDH_QPS_RTS] = "RTS", + [ZXDH_QPS_SQE] = "SQE", + [ZXDH_QPS_SQD] = "SQD", + [ZXDH_QPS_ERR] = "ERROR", +}; + +const char *zxdh_qp_state_to_string(enum ib_qp_state state) +{ + return _zxdh_qp_state_to_string[state]; +} + +int get_pci_board_bdf(char *pci_board_bdf, struct zxdh_pci_f *rf) +{ + int domain; + int bus; + int device; + if (rf->pcidev == NULL || rf->pcidev->bus == NULL) { + pr_info("get_pci_board_bdf fail:rf pcidev is null\n"); + return -EIO; + } + domain = pci_domain_nr(rf->pcidev->bus); + bus = rf->pcidev->bus->number; + device = PCI_SLOT(rf->pcidev->devfn); + sprintf(pci_board_bdf, "%04d:%02x:%02x", domain, bus, device); + // pr_info("%s succ:%s\n", __func__, pci_board_bdf); + return 0; +} + +int zxdh_read_ram_32bit_value(struct zxdh_sc_dev *dev, u32 ram_num, u32 ram_width, + u32 ram_read_cnt, u32 offset_idx, u32 *value) +{ + u32 check_ram_num, check_ram_addr; + int ret; + int i; + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_rx_ram_reg(dev, ram_num, ram_width, dev->vhca_id, + ram_read_cnt); + ret = zxdh_read_rx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + check_ram_num = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_ADDR)); + if ((check_ram_num != ram_num) || + (check_ram_addr != dev->vhca_id)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_num, check_ram_addr); + return -ERANGE; + } + *value = zxdh_read_rx_ram_data(dev, offset_idx); + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} + +int zxdh_read_ram_rx_values(struct zxdh_sc_dev *dev, + struct read_ram_info *ram_info, u32 *value) +{ + u32 check_ram_id, check_ram_addr; + int ret; + int i, offset_idx; + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_rx_ram_reg(dev, ram_info->ram_num, ram_info->ram_width, + ram_info->ram_addr, ram_info->ram_read_cnt); + ret = zxdh_read_rx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + check_ram_id = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMARX_RAM_ADDR)); + if ((check_ram_id != ram_info->ram_num) || + (check_ram_addr != ram_info->ram_addr)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_id, check_ram_addr); + return -ERANGE; + } + for (offset_idx = 0; offset_idx < ram_info->offset_idx; + offset_idx++) { + *value = zxdh_read_rx_ram_data(dev, offset_idx); + value++; + } + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} + +int zxdh_read_ram_tx_values(struct zxdh_sc_dev *dev, + struct read_ram_info *ram_info, u32 *value) +{ + u32 check_ram_id, check_ram_addr; + int ret; + int i, offset_idx; + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + zxdh_set_tx_ram_reg(dev, ram_info->ram_num, ram_info->ram_width, + ram_info->ram_addr, ram_info->ram_read_cnt); + ret = zxdh_read_rx_ram_flag(dev); + if (ret) { + udelay(500); + continue; + } + check_ram_id = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_NUM)); + check_ram_addr = readl( + (u32 __iomem *)(dev->hw->hw_addr + RDMATX_RAM_ADDR)); + if ((check_ram_id != ram_info->ram_num) || + (check_ram_addr != ram_info->ram_addr)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_id, check_ram_addr); + return -ERANGE; + } + for (offset_idx = 0; offset_idx < ram_info->offset_idx; + offset_idx++) { + *value = zxdh_read_tx_ram_data(dev, offset_idx); + value++; + } + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} + +static int zxdh_set_cqp_ram_reg(struct zxdh_pci_f *rf, u32 ram_num, + u32 ram_width, u32 ram_addr, u32 ram_read_cnt) +{ + int ret; + size_t i; + struct { + u64 reg; + u32 val; + } reg_vals[] = { { C_RDMACQP_RDRAM_NUM, ram_num }, + { C_RDMACQP_RDRAM_DATA_WIDTH, ram_width }, + { C_RDMACQP_RDRAM_TIME_LIMIT, + IRDMARX_RD_TIME_LIMIT_VALUE }, + { C_RDMACQP_RDRAM_ADDR, ram_addr }, + { C_RDMACQP_RDRAM_READ_LENGTH, ram_read_cnt }, + { C_RDMACQP_RDRAM_READ_FLAG, 0 } }; + for (i = 0; i < sizeof(reg_vals) / sizeof(reg_vals[0]); i++) { + ret = zxdh_rdma_reg_write(rf, reg_vals[i].reg, reg_vals[i].val); + if (ret) { + pr_err("Error: ret=%d, failed to write rdma cqp read ram reg 0x%llx!\n", + ret, reg_vals[i].reg); + return ret; + } + } + return 0; +} + +static int zxdh_read_cqp_ram_flag(struct zxdh_pci_f *rf) +{ + int ret; + u32 val, err_reg, cnt_err_reg; + udelay(1000); + ret = zxdh_rdma_reg_read(rf, C_RDMACQP_RDRAM_RD_FINISH, &val); + if (ret) { + udelay(2000); + ret = zxdh_rdma_reg_read(rf, C_RDMACQP_RDRAM_RD_FINISH, &val); + if (ret) { + pr_err("Error: ret=%d, failed to read rdma cqp ram reg 0x%lx!\n", + ret, C_RDMACQP_RDRAM_RD_FINISH); + return -EIO; + } + } + ret = zxdh_rdma_reg_read(rf, C_RDMACQP_RDRAM_RD_ERROR, &err_reg); + if (ret) { + pr_err("Error: ret=%d, failed to read rdma cqp ram reg 0x%lx!\n", + ret, C_RDMACQP_RDRAM_RD_ERROR); + return -EIO; + } + ret = zxdh_rdma_reg_read(rf, C_RDMACQP_RDRAM_RD_CNT_ERR, &cnt_err_reg); + if (ret) { + pr_err("Error: ret=%d, failed to read rdma cqp ram reg 0x%lx!\n", + ret, C_RDMACQP_RDRAM_RD_CNT_ERR); + return -EIO; + } + if (val == 1 && !err_reg && !cnt_err_reg) + return 0; + pr_err("Error: cqp read ram failed, reg 0x%lx, value: 0x%x; reg 0x%lx, value 0x%x", + C_RDMACQP_RDRAM_RD_ERROR, err_reg, C_RDMACQP_RDRAM_RD_CNT_ERR, + cnt_err_reg); + return -1; +} + +int zxdh_read_ram_cqp_values(struct zxdh_sc_dev *dev, + struct read_ram_info *ram_info, u32 *value) +{ + u32 check_ram_id, check_ram_addr; + int ret; + int i, offset_idx; + struct zxdh_pci_f *rf = container_of(dev, struct zxdh_pci_f, sc_dev); + + for (i = 0; i < ZXDH_RAM_REPEAT_READ_CNT; i++) { + ret = zxdh_set_cqp_ram_reg(rf, ram_info->ram_num, + ram_info->ram_width, + ram_info->ram_addr, + ram_info->ram_read_cnt); + if (ret) + continue; + ret = zxdh_read_cqp_ram_flag(rf); + if (ret) { + udelay(500); + continue; + } + ret = zxdh_rdma_reg_read(rf, C_RDMACQP_RDRAM_NUM, + &check_ram_id); + if (ret) { + pr_err("Error: read cqp reg 0x%lx failed!\n", + C_RDMACQP_RDRAM_NUM); + return -EIO; + } + ret = zxdh_rdma_reg_read(rf, C_RDMACQP_RDRAM_ADDR, + &check_ram_addr); + if (ret) { + pr_err("Error: read cqp reg 0x%lx failed!\n", + C_RDMACQP_RDRAM_ADDR); + return -EIO; + } + if ((check_ram_id != ram_info->ram_num) || + (check_ram_addr != ram_info->ram_addr)) { + pr_err("%s: get ram data failed! ram_num:0x%x, rdma_addr:0x%x\n", + __func__, check_ram_id, check_ram_addr); + return -ERANGE; + } + for (offset_idx = 0; offset_idx < ram_info->offset_idx; + offset_idx++) { + ret = zxdh_rdma_reg_read( + rf, + C_RDMACQP_RDRAM_RD_MAINTENANCE_RAM(offset_idx), + value); + if (ret) + return ret; + value++; + } + return 0; + } + pr_err("%s: get ram data failed !\n", __func__); + return -EIO; +} diff --git a/drivers/infiniband/hw/zrdma/verbs.c b/drivers/infiniband/hw/zrdma/verbs.c new file mode 100644 index 000000000000..49a0d5e56220 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/verbs.c @@ -0,0 +1,4069 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include +#include "main.h" +#include "icrdma_hw.h" +#include "srq.h" +#include "restrack.h" +#include "private_verbs_cmd.h" +#include "manager.h" + +#ifdef ZXDH_UAPI_DEF +extern const struct uapi_definition zxdh_ib_dev_defs[]; +#endif +extern notify_remote_ip_update remote_ip_update_hook; + +void extract_version(const char *input, char *output) +{ + const char *last_dash_pos = strrchr(input, '-'); + + if (last_dash_pos != NULL) { + const char *v_pos = strstr(last_dash_pos, "V"); + + if (v_pos != NULL) { + strncpy(output, v_pos + 1, 10); + output[10] = '\0'; + } else { + output[0] = '\0'; + } + } else { + output[0] = '\0'; + } +} +/** + * zxdh_query_device - get device attributes + * @ibdev: device pointer from stack + * @props: returning device attributes + * @udata: user data + */ +static int zxdh_query_device(struct ib_device *ibdev, + struct ib_device_attr *props, + struct ib_udata *udata) +{ + struct zxdh_device *iwdev = to_iwdev(ibdev); + struct zxdh_pci_f *rf = iwdev->rf; + struct pci_dev *pcidev = iwdev->rf->pcidev; + struct zxdh_hw_attrs *hw_attrs = &rf->sc_dev.hw_attrs; + + struct ethtool_drvinfo info; + int major, sub_major, minor, sub_minor; + + __u32 val; + __u16 unit_period; + char extracted_version[16]; + struct net_device *slave = NULL; + struct list_head *iter; + + memset(&info, 0, sizeof(info)); + if ((iwdev->netdev->priv_flags & IFF_BONDING) == 4) { + rcu_read_lock(); + netdev_for_each_lower_dev(iwdev->netdev, slave, iter) { + slave->ethtool_ops->get_drvinfo(slave, &info); + break; + } + rcu_read_unlock(); + if (!slave) { + iwdev->netdev->ethtool_ops->get_drvinfo(iwdev->netdev, &info); + } + } else { + iwdev->netdev->ethtool_ops->get_drvinfo(iwdev->netdev, &info); + } + extract_version(info.fw_version, extracted_version); + sscanf(extracted_version, "%d.%d.%d.%d", &major, &sub_major, &minor, + &sub_minor); + + if (udata->inlen || udata->outlen) + return -EINVAL; + + memset(props, 0, sizeof(*props)); + ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr); + props->fw_ver = ((u64)major << 48 | (u64)sub_major << 32 | + (u64)minor << 16 | sub_minor); + props->device_cap_flags = + IB_DEVICE_MEM_WINDOW | IB_DEVICE_MEM_MGT_EXTENSIONS | + IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SYS_IMAGE_GUID | + IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_N_NOTIFY_CQ; + props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY; + props->vendor_id = pcidev->vendor; + props->vendor_part_id = pcidev->device; + props->hw_ver = pcidev->revision; + props->page_size_cap = SZ_4K | SZ_2M | SZ_1G; + props->max_mr_size = hw_attrs->max_mr_size; + props->max_qp = rf->max_qp - rf->used_qps; + props->max_qp_wr = hw_attrs->max_qp_wr; + set_max_sge(props, rf); + props->max_cq = rf->max_cq - rf->used_cqs; + props->max_cqe = rf->max_cqe - 1; + props->max_mr = rf->max_mr - rf->used_mrs; + props->max_mw = props->max_mr; + props->max_pd = rf->max_pd - rf->used_pds; + props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges; + props->max_qp_rd_atom = hw_attrs->max_hw_ird; + props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; + props->max_qp_init_rd_atom = hw_attrs->max_hw_ord; + props->max_srq = rf->max_srq - rf->used_srqs; + props->max_srq_wr = hw_attrs->max_srq_wr; + props->max_srq_sge = hw_attrs->uk_attrs.max_hw_wq_frags; + props->local_ca_ack_delay = 16; + props->hca_core_clock = 1000 * 1000UL; + props->max_wq_type_rq = props->max_qp; + if (rdma_protocol_roce(ibdev, 1)) { + props->max_pkeys = ZXDH_PKEY_TBL_SZ; + props->max_ah = rf->max_ah; + if (hw_attrs->uk_attrs.hw_rev == ZXDH_GEN_2) { + props->max_mcast_grp = 0; + props->max_mcast_qp_attach = 0; + props->max_total_mcast_qp_attach = 0; + } + } + props->max_fast_reg_page_list_len = ZXDH_MAX_PAGES_PER_FMR; + val = readl(rf->sc_dev.hw->hw_addr + RDMARX_CQ_PERIOD_CFG); + unit_period = (__u16)(val & 0xffff); + props->cq_caps.max_cq_moderation_count = ZXDH_MAX_CQ_COUNT; + props->cq_caps.max_cq_moderation_period = + NS_TO_US(unit_period * ZXDH_MAX_CQ_PERIOD); +#define HCA_CLOCK_TIMESTAMP_MASK 0x1ffff + if (hw_attrs->uk_attrs.hw_rev >= ZXDH_GEN_2) + props->timestamp_mask = HCA_CLOCK_TIMESTAMP_MASK; + + return 0; +} + +static int zxdh_mmap_legacy(struct zxdh_ucontext *ucontext, + struct vm_area_struct *vma) +{ + u64 pfn; + + if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE) + return -EINVAL; + + vma->vm_private_data = ucontext; + pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev + .hw_regs[ZXDH_DB_ADDR_OFFSET] + + pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> + PAGE_SHIFT; + +#ifdef RDMA_MMAP_DB_SUPPORT + return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE, + pgprot_noncached(vma->vm_page_prot), NULL); +#else + return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE, + pgprot_noncached(vma->vm_page_prot)); +#endif +} + +void *zxdh_zalloc_mapped(struct zxdh_device *dev, dma_addr_t *dma_addr, + size_t size, enum dma_data_direction dir) +{ + void *addr; + + addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); + if (!addr) + return NULL; + *dma_addr = dma_map_single(&dev->rf->pcidev->dev, addr, size, dir); + if (dma_mapping_error(&dev->rf->pcidev->dev, *dma_addr)) { + pr_err("failed to map DMA address\n"); + free_pages_exact(addr, size); + return NULL; + } + return addr; +} + +void zxdh_free_mapped(struct zxdh_device *dev, void *cpu_addr, + dma_addr_t dma_addr, size_t size, + enum dma_data_direction dir) +{ + dma_unmap_single(&dev->rf->pcidev->dev, dma_addr, size, dir); + free_pages_exact(cpu_addr, size); +} + +static int zxdh_mmap_for_cap(struct zxdh_ucontext *ucontext, + struct vm_area_struct *vma, + struct zxdh_user_mmap_entry *entry) +{ + u64 pfn; + u64 start = vma->vm_start; + u64 size = vma->vm_end - vma->vm_start; + + pfn = entry->bar_offset >> ZXDH_HW_PAGE_SHIFT; + + if (remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot)) { + pr_info("%s error!\n", __func__); + return -EAGAIN; + } + + pr_info("%s remap_pfn_range end.start:%llx,size:%llx\n", __func__, + start, size); + return 0; +} + +static int zxdh_mmap_hmc_for_cap(struct zxdh_ucontext *ucontext, + struct vm_area_struct *vma, + struct zxdh_user_mmap_entry *entry) +{ + u64 size; + u64 i; + struct zxdh_sc_dev *dev; + u64 j = 0; + int ret = 0; + u64 mem_phy; + unsigned long addr; + u64 numbufs; + dev = &ucontext->iwdev->rf->sc_dev; + size = vma->vm_end - vma->vm_start; + if (size > dev->data_cap_sd.data_len) { + return -EINVAL; + } + numbufs = size / ZXDH_HMC_DIRECT_BP_SIZE; + j = (entry->bar_offset - dev->data_cap_sd.data_cap_base) / + ZXDH_HMC_DIRECT_BP_SIZE; + addr = vma->vm_start; + for (i = 0; i < numbufs; i++, j++) { + mem_phy = dev->data_cap_sd.entry[j].u.bp.addr.pa; + ret = remap_pfn_range(vma, addr, mem_phy >> ZXDH_HW_PAGE_SHIFT, + ZXDH_HMC_DIRECT_BP_SIZE, + vma->vm_page_prot); + if (ret < 0) + break; + addr += ZXDH_HMC_DIRECT_BP_SIZE; + } + pr_info("zxdh_mmap_hmc_for_cap remap_pfn_range end.start:%lx,size:%llx\n", + vma->vm_start, size); + return ret; +} + +#ifdef RDMA_MMAP_DB_SUPPORT +static void zxdh_mmap_free(struct rdma_user_mmap_entry *rdma_entry) +{ + struct zxdh_user_mmap_entry *entry = to_zxdh_mmap_entry(rdma_entry); + + kfree(entry); +} + +struct rdma_user_mmap_entry * +zxdh_user_mmap_entry_insert(struct zxdh_ucontext *ucontext, u64 bar_offset, + enum zxdh_mmap_flag mmap_flag, u64 *mmap_offset) +{ + struct zxdh_user_mmap_entry *entry = + kzalloc(sizeof(*entry), GFP_KERNEL); + int ret; + + if (!entry) + return NULL; + + entry->bar_offset = bar_offset; + entry->mmap_flag = mmap_flag; + + ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext, + &entry->rdma_entry, PAGE_SIZE); + if (ret) { + kfree(entry); + return NULL; + } + *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry); + + return &entry->rdma_entry; +} + +struct rdma_user_mmap_entry * +zxdh_mp_mmap_entry_insert(struct zxdh_ucontext *ucontext, u64 phy_addr, + size_t length, enum zxdh_mmap_flag mmap_flag, + u64 *mmap_offset) +{ + struct zxdh_user_mmap_entry *entry = + kzalloc(sizeof(*entry), GFP_KERNEL); + int ret; + + if (!entry) + return NULL; + + entry->bar_offset = phy_addr; + entry->mmap_flag = mmap_flag; + pr_info("%s entry->address:%lld\n", __func__, entry->bar_offset); + + ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext, + &entry->rdma_entry, length); + if (ret) { + kfree(entry); + return NULL; + } + *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry); + + return &entry->rdma_entry; +} + +struct rdma_user_mmap_entry * +zxdh_cap_mmap_entry_insert(struct zxdh_ucontext *ucontext, void *address, + size_t length, enum zxdh_mmap_flag mmap_flag, + u64 *mmap_offset) +{ + struct zxdh_user_mmap_entry *entry = + kzalloc(sizeof(*entry), GFP_KERNEL); + int ret; + + if (!entry) + return NULL; + if (mmap_flag == ZXDH_MMAP_HMC) { + entry->bar_offset = (uintptr_t)address; + } else { + entry->bar_offset = virt_to_phys(address); + } + entry->mmap_flag = mmap_flag; + pr_info("%s mmap_flag:%d, entry->address:%lld\n", __func__, mmap_flag, entry->bar_offset); + ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext, + &entry->rdma_entry, length); + if (ret) { + pr_err("%s entry insert failed\n", __func__); + kfree(entry); + return NULL; + } + *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry); + + return &entry->rdma_entry; +} + +#else /* RDMA_MMAP_DB_SUPPORT */ +static inline bool find_key_in_mmap_tbl(struct zxdh_ucontext *ucontext, u64 key) +{ + struct zxdh_user_mmap_entry *entry; + + hash_for_each_possible(ucontext->mmap_hash_tbl, entry, hlist, key) { + if (entry->pgoff_key == key) + return true; + } + + return false; +} + +struct zxdh_user_mmap_entry * +zxdh_user_mmap_entry_add_hash(struct zxdh_ucontext *ucontext, u64 bar_offset, + enum zxdh_mmap_flag mmap_flag, u64 *mmap_offset) +{ + struct zxdh_user_mmap_entry *entry = + kzalloc(sizeof(*entry), GFP_KERNEL); + unsigned long flags; + int retry_cnt = 0; + + if (!entry) + return NULL; + + entry->bar_offset = bar_offset; + entry->mmap_flag = mmap_flag; + entry->ucontext = ucontext; + do { + get_random_bytes(&entry->pgoff_key, sizeof(entry->pgoff_key)); + + /* The key is a page offset */ + entry->pgoff_key >>= PAGE_SHIFT; + + /*In the event of a collision in the hash table, retry a new key */ + spin_lock_irqsave(&ucontext->mmap_tbl_lock, flags); + if (!find_key_in_mmap_tbl(ucontext, entry->pgoff_key)) { + hash_add(ucontext->mmap_hash_tbl, &entry->hlist, + entry->pgoff_key); + spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags); + goto hash_add_done; + } + spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags); + } while (retry_cnt++ < 10); + + kfree(entry); + return NULL; + +hash_add_done: + /*libc mmap uses a byte offset */ + *mmap_offset = entry->pgoff_key << PAGE_SHIFT; + + return entry; +} + +static struct zxdh_user_mmap_entry * +zxdh_find_user_mmap_entry(struct zxdh_ucontext *ucontext, + struct vm_area_struct *vma) +{ + struct zxdh_user_mmap_entry *entry; + unsigned long flags; + + spin_lock_irqsave(&ucontext->mmap_tbl_lock, flags); + hash_for_each_possible(ucontext->mmap_hash_tbl, entry, hlist, + vma->vm_pgoff) { + if (entry->pgoff_key == vma->vm_pgoff) { + spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags); + return entry; + } + } + + spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags); + + return NULL; +} + +void zxdh_user_mmap_entry_del_hash(struct zxdh_user_mmap_entry *entry) +{ + struct zxdh_ucontext *ucontext = entry->ucontext; + unsigned long flags; + + spin_lock_irqsave(&ucontext->mmap_tbl_lock, flags); + hash_del(&entry->hlist); + spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags); + + kfree(entry); +} + +#endif /* RDMA_MMAP_DB_SUPPORT */ +/** + * zxdh_mmap - user memory map + * @context: context created during alloc + * @vma: kernel info for user memory map + */ +static int zxdh_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) +{ +#ifdef RDMA_MMAP_DB_SUPPORT + struct rdma_user_mmap_entry *rdma_entry; +#endif + struct zxdh_user_mmap_entry *entry; + struct zxdh_ucontext *ucontext; + u64 pfn; + int ret; + + ucontext = to_ucontext(context); + + /* Legacy support with hard-coded mmap key */ + if (ucontext->legacy_mode) + return zxdh_mmap_legacy(ucontext, vma); + +#ifdef RDMA_MMAP_DB_SUPPORT + rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma); + if (!rdma_entry) { + pr_err("VERBS: pgoff[0x%lx] does not have valid entry\n", + vma->vm_pgoff); + return -EINVAL; + } + + entry = to_zxdh_mmap_entry(rdma_entry); +#else + entry = zxdh_find_user_mmap_entry(ucontext, vma); + if (!entry) { + pr_err("VERBS: pgoff[0x%lx] does not have valid entry\n", + vma->vm_pgoff); + return -EINVAL; + } +#endif + + pfn = (entry->bar_offset + + pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> + PAGE_SHIFT; + + switch (entry->mmap_flag) { + case ZXDH_MMAP_HMC: + ret = zxdh_mmap_hmc_for_cap(ucontext, vma, entry); + break; + case ZXDH_MMAP_PFN: + ret = zxdh_mmap_for_cap(ucontext, vma, entry); + break; + case ZXDH_MMAP_IO_NC: +#ifdef RDMA_MMAP_DB_SUPPORT + ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE, + pgprot_noncached(vma->vm_page_prot), + rdma_entry); +#else + ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE, + pgprot_noncached(vma->vm_page_prot)); +#endif + break; + case ZXDH_MMAP_IO_WC: +#ifdef RDMA_MMAP_DB_SUPPORT + ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE, + pgprot_writecombine(vma->vm_page_prot), + rdma_entry); +#else + ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE, + pgprot_writecombine(vma->vm_page_prot)); +#endif + break; + default: + pr_err("VERBS: unsupported mmap_flag[%d]\n", entry->mmap_flag); + ret = -EINVAL; + } + + if (ret) + pr_err("VERBS: bar_offset [0x%llx] mmap_flag[%d] err[%d]\n", + entry->bar_offset, entry->mmap_flag, ret); + +#ifdef RDMA_MMAP_DB_SUPPORT + rdma_user_mmap_entry_put(rdma_entry); +#endif + + return ret; +} + +/** + * zxdh_get_pbl - Retrieve pbl from a list given a virtual + * address + * @va: user virtual address + * @pbl_list: pbl list to search in (QP's or CQ's) + */ +struct zxdh_pbl *zxdh_get_pbl(unsigned long va, struct list_head *pbl_list) +{ + struct zxdh_pbl *iwpbl; + + list_for_each_entry(iwpbl, pbl_list, list) { + if (iwpbl->user_base == va) { + list_del(&iwpbl->list); + iwpbl->on_list = false; + return iwpbl; + } + } + + return NULL; +} + +/** + * zxdh_clean_cqes - clean cq entries for qp + * @iwqp: qp ptr (user or kernel) + * @iwcq: cq ptr + */ +void zxdh_clean_cqes(struct zxdh_qp *iwqp, struct zxdh_cq *iwcq) +{ + struct zxdh_cq_uk *ukcq = &iwcq->sc_cq.cq_uk; + unsigned long flags; + + spin_lock_irqsave(&iwcq->lock, flags); + zxdh_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq); + spin_unlock_irqrestore(&iwcq->lock, flags); +} + +/** + * zxdh_setup_virt_qp - setup for allocation of virtual qp + * @iwdev: zrdma device + * @iwqp: qp ptr + * @init_info: initialize info to return + */ +void zxdh_setup_virt_qp(struct zxdh_device *iwdev, struct zxdh_qp *iwqp, + struct zxdh_qp_init_info *init_info) +{ + struct zxdh_pbl *iwpbl = iwqp->iwpbl; + struct zxdh_qp_mr *qpmr = &iwpbl->qp_mr; + + iwqp->page = qpmr->sq_page; + init_info->shadow_area_pa = qpmr->shadow; + if (iwpbl->pbl_allocated) { + init_info->virtual_map = true; + init_info->sq_pa = qpmr->sq_pbl.idx; + if (iwqp->is_srq == false) + init_info->rq_pa = qpmr->rq_pbl.idx; + } else { + init_info->sq_pa = qpmr->sq_pbl.addr; + if (iwqp->is_srq == false) + init_info->rq_pa = qpmr->rq_pbl.addr; + } +} + +/** + * zxdh_setup_kmode_qp - setup initialization for kernel mode qp + * @iwdev: iwarp device + * @iwqp: qp ptr (user or kernel) + * @info: initialize info to return + * @init_attr: Initial QP create attributes + */ +int zxdh_setup_kmode_qp(struct zxdh_device *iwdev, struct zxdh_qp *iwqp, + struct zxdh_qp_init_info *info, + struct ib_qp_init_attr *init_attr) +{ + struct zxdh_dma_mem *mem = &iwqp->kqp.dma_mem; + u32 sqdepth, rqdepth; + u8 sqshift, rqshift; + u32 size; + int status; + struct zxdh_qp_uk_init_info *ukinfo = &info->qp_uk_init_info; + struct zxdh_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs; + + zxdh_get_sq_wqe_shift(uk_attrs, ukinfo->max_sq_frag_cnt, + ukinfo->max_inline_data, &sqshift); + status = zxdh_get_sqdepth(uk_attrs->max_hw_wq_quanta, ukinfo->sq_size, + sqshift, &sqdepth); + if (status) + return status; + if (iwqp->is_srq == false) { + zxdh_get_rq_wqe_shift(uk_attrs, ukinfo->max_rq_frag_cnt, + &rqshift); + + status = zxdh_get_rqdepth(uk_attrs->max_hw_rq_quanta, + ukinfo->rq_size, rqshift, &rqdepth); + } + if (status) + return status; + + ukinfo->sq_size = sqdepth >> sqshift; + iwqp->kqp.sq_wrid_mem = + kcalloc(sqdepth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL); + if (!iwqp->kqp.sq_wrid_mem) + return -ENOMEM; + if (iwqp->is_srq == false) { + ukinfo->rq_size = rqdepth >> rqshift; + iwqp->kqp.rq_wrid_mem = kcalloc(ukinfo->rq_size, + sizeof(*iwqp->kqp.rq_wrid_mem), + GFP_KERNEL); + if (!iwqp->kqp.rq_wrid_mem) { + kfree(iwqp->kqp.sq_wrid_mem); + iwqp->kqp.sq_wrid_mem = NULL; + return -ENOMEM; + } + ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem; + } + + ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem; + if (iwqp->is_srq == false) + size = sqdepth * ZXDH_QP_SQ_WQE_MIN_SIZE + + rqdepth * ZXDH_QP_RQ_WQE_MIN_SIZE; + else + size = sqdepth * ZXDH_QP_SQ_WQE_MIN_SIZE; + size += (ZXDH_SHADOW_AREA_SIZE << 3); + + mem->size = ALIGN(size, 4096); + mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size, &mem->pa, + GFP_KERNEL); + if (!mem->va) { + kfree(iwqp->kqp.sq_wrid_mem); + iwqp->kqp.sq_wrid_mem = NULL; + kfree(iwqp->kqp.rq_wrid_mem); + iwqp->kqp.rq_wrid_mem = NULL; + return -ENOMEM; + } + + ukinfo->sq = mem->va; + info->sq_pa = mem->pa; + if (iwqp->is_srq == false) { + ukinfo->rq = (struct zxdh_qp_rq_quanta *)&ukinfo->sq[sqdepth]; + info->rq_pa = info->sq_pa + (sqdepth * ZXDH_QP_SQ_WQE_MIN_SIZE); + ukinfo->shadow_area = ukinfo->rq[rqdepth].elem; + info->shadow_area_pa = + info->rq_pa + (rqdepth * ZXDH_QP_RQ_WQE_MIN_SIZE); + } else { + ukinfo->shadow_area = (__le64 *)&ukinfo->sq[sqdepth]; + info->shadow_area_pa = + info->sq_pa + (sqdepth * ZXDH_QP_SQ_WQE_MIN_SIZE); + } + set_64bit_val(ukinfo->shadow_area, 0, 0x8000); + ukinfo->qp_id = iwqp->ibqp.qp_num; + + init_attr->cap.max_send_wr = (sqdepth - ZXDH_SQ_RSVD) >> sqshift; + if (iwqp->is_srq == false) + init_attr->cap.max_recv_wr = (rqdepth - ZXDH_RQ_RSVD) >> + rqshift; + + return 0; +} + +int zxdh_cqp_create_qp_cmd(struct zxdh_qp *iwqp) +{ + struct zxdh_pci_f *rf = iwqp->iwdev->rf; + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + + cqp_info->cqp_cmd = ZXDH_OP_QP_CREATE; + cqp_info->post_sq = 1; + cqp_info->in.u.qp_create.qp = &iwqp->sc_qp; + cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + + return status; +} + +void zxdh_roce_fill_and_set_qpctx_info(struct zxdh_qp *iwqp, + struct zxdh_qp_host_ctx_info *ctx_info) +{ + struct zxdh_device *iwdev = iwqp->iwdev; + struct zxdh_sc_dev *dev = &iwdev->rf->sc_dev; + struct zxdh_roce_offload_info *roce_info; + struct zxdh_udp_offload_info *udp_info; + + udp_info = &iwqp->udp_info; + udp_info->pmtu = zxdh_mtu_int_to_enum( + iwdev->netdev->mtu); + udp_info->cwnd = iwdev->roce_cwnd; + udp_info->rexmit_thresh = 2; + udp_info->rnr_nak_thresh = 2; + udp_info->src_port = 0xc000; + udp_info->dst_port = ROCE_V2_UDP_DPORT; + if (iwqp->sc_qp.qp_uk.qp_type == ZXDH_QP_TYPE_ROCE_RC) + udp_info->timeout = 0x1f; + else + udp_info->timeout = 0x0; + roce_info = &iwqp->roce_info; + ether_addr_copy(roce_info->mac_addr, iwdev->netdev->dev_addr); + + roce_info->rd_en = false; + roce_info->wr_rdresp_en = false; + roce_info->bind_en = true; + roce_info->dcqcn_en = true; //dcqcn/ecn is set to default on + roce_info->ecn_en = false; + roce_info->rtomin = 5; + +#if IS_ENABLED(CONFIG_CONFIGFS_FS) + roce_info->dcqcn_en = iwdev->roce_dcqcn_en; + roce_info->timely_en = iwdev->roce_timely_en; + roce_info->dctcp_en = iwdev->roce_dctcp_en; + roce_info->rtomin = iwdev->roce_rtomin; + roce_info->rcv_no_icrc = iwdev->roce_no_icrc_en; +#endif + roce_info->ack_credits = iwdev->roce_ackcreds; + roce_info->ird_size = dev->hw_attrs.max_hw_ird; + roce_info->ord_size = dev->hw_attrs.max_hw_ord; + + if (!iwqp->user_mode) { + roce_info->priv_mode_en = true; + roce_info->fast_reg_en = true; + roce_info->udprivcq_en = true; + } + roce_info->roce_tver = 0; + + ctx_info->roce_info = &iwqp->roce_info; + ctx_info->udp_info = &iwqp->udp_info; + zxdh_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); +} + +int zxdh_validate_qp_attrs(struct ib_qp_init_attr *init_attr, + struct zxdh_device *iwdev) +{ + struct zxdh_sc_dev *dev = &iwdev->rf->sc_dev; + struct zxdh_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs; + + if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline || + init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags || + init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags) + return -EINVAL; + + if (rdma_protocol_roce(&iwdev->ibdev, 1)) { + if (init_attr->qp_type != IB_QPT_RC && + init_attr->qp_type != IB_QPT_UD && + init_attr->qp_type != IB_QPT_GSI) + return -EOPNOTSUPP; + } else { + return -EOPNOTSUPP; + } + + return 0; +} + +void zxdh_flush_worker(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct zxdh_qp *iwqp = container_of(dwork, struct zxdh_qp, dwork_flush); + unsigned long flags; + + spin_lock_irqsave( + &iwqp->lock, + flags); /* Don't allow more posting while generating completions */ + zxdh_generate_flush_completions(iwqp); + spin_unlock_irqrestore(&iwqp->lock, flags); +} + +static int zxdh_get_ib_acc_flags(struct zxdh_qp *iwqp) +{ + int acc_flags = 0; + + if (rdma_protocol_roce(iwqp->ibqp.device, 1)) { + if (iwqp->roce_info.wr_rdresp_en) { + acc_flags |= IB_ACCESS_LOCAL_WRITE; + acc_flags |= IB_ACCESS_REMOTE_WRITE; + } + if (iwqp->roce_info.rd_en) + acc_flags |= IB_ACCESS_REMOTE_READ; + if (iwqp->roce_info.bind_en) + acc_flags |= IB_ACCESS_MW_BIND; + } else { + if (iwqp->iwarp_info.wr_rdresp_en) { + acc_flags |= IB_ACCESS_LOCAL_WRITE; + acc_flags |= IB_ACCESS_REMOTE_WRITE; + } + if (iwqp->iwarp_info.rd_en) + acc_flags |= IB_ACCESS_REMOTE_READ; + if (iwqp->iwarp_info.bind_en) + acc_flags |= IB_ACCESS_MW_BIND; + } + return acc_flags; +} + +/** + * zxdh_query_qp - query qp attributes + * @ibqp: qp pointer + * @attr: attributes pointer + * @attr_mask: Not used + * @init_attr: qp attributes to return + */ +static int zxdh_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_qp_init_attr *init_attr) +{ + struct zxdh_qp *iwqp = to_iwqp(ibqp); + struct zxdh_sc_qp *qp = &iwqp->sc_qp; + struct zxdh_device *iwdev = iwqp->iwdev; + struct zxdh_dma_mem qpc_buf; + int err_code = 0; + + memset(attr, 0, sizeof(*attr)); + memset(init_attr, 0, sizeof(*init_attr)); + qpc_buf.va = NULL; + + qpc_buf.size = ALIGN(ZXDH_QP_CTX_SIZE, ZXDH_QPC_ALIGNMENT); + qpc_buf.va = dma_alloc_coherent(iwdev->rf->hw.device, qpc_buf.size, + &qpc_buf.pa, GFP_KERNEL); + if (!qpc_buf.va) { + pr_err("res qp entry raw failed:ENOMEM\n"); + return -ENOMEM; + } + err_code = zxdh_fill_qpc(&iwdev->rf->sc_dev, iwqp->sc_qp.qp_ctx_num, &qpc_buf); + if (err_code) { + pr_err("res qp entry raw fill qpc failed:%d\n", err_code); + goto free_rsrc; + } + attr->path_mig_state = IB_MIG_MIGRATED; + attr->qp_state = iwqp->ibqp_state; + attr->cur_qp_state = iwqp->ibqp_state; + attr->cap.max_send_wr = iwqp->max_send_wr; + attr->cap.max_recv_wr = iwqp->max_recv_wr; + attr->cap.max_inline_data = qp->qp_uk.max_inline_data; + attr->cap.max_send_sge = qp->qp_uk.max_sq_frag_cnt; + attr->cap.max_recv_sge = qp->qp_uk.max_rq_frag_cnt; + attr->qp_access_flags = zxdh_get_ib_acc_flags(iwqp); + attr->port_num = 1; + if (iwqp->ibqp.qp_type == IB_QPT_RC) { + attr->ah_attr = iwqp->roce_ah.av.attrs; + attr->ah_attr.grh.sgid_attr = NULL; + } + + if (rdma_protocol_roce(ibqp->device, 1)) { + attr->path_mtu = iwqp->udp_info.pmtu; + attr->qkey = iwqp->roce_info.qkey; + attr->rq_psn = ZXDH_GET_QPC_ITEM(u32, qpc_buf.va, + ZXDH_QPC_SEND_EPSN_BYTE_OFFSET, + RDMAQPC_RX_EPSN); + attr->sq_psn = ZXDH_GET_QPC_ITEM(u32, qpc_buf.va, + ZXDH_QPC_SEND_PSN_BYTE_OFFSET, + RDMAQPC_TX_PSN_NEXT); + attr->dest_qp_num = iwqp->roce_info.dest_qp; + attr->pkey_index = 0; //supported pkey_table size ZXDH_PKEY_TBL_SZ is 1 + attr->timeout = iwqp->udp_info.timeout; + attr->retry_cnt = iwqp->udp_info.rexmit_thresh; + attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh; + attr->max_rd_atomic = iwqp->roce_info.ord_size; + attr->max_dest_rd_atomic = iwqp->roce_info.ird_size; + } + + init_attr->event_handler = iwqp->ibqp.event_handler; + init_attr->qp_context = iwqp->ibqp.qp_context; + init_attr->send_cq = iwqp->ibqp.send_cq; + init_attr->recv_cq = iwqp->ibqp.recv_cq; + init_attr->srq = iwqp->ibqp.srq; + init_attr->cap = attr->cap; +free_rsrc: + dma_free_coherent(iwdev->rf->hw.device, qpc_buf.size, qpc_buf.va, + qpc_buf.pa); + qpc_buf.va = NULL; + return err_code; +} + +#if 0 +#ifdef ZXDH_SET_UDP_SPORT_BYFLOW_LABLE +#define ZRDMA_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000) +#define ZRDMA_ROCE_UDP_ENCAP_VALID_PORT_OFFSET_MASK (0x3FFF) + +static u16 rdma_flow_label_to_udp_sport(u32 fl) +{ + u32 fl_low = fl & 0x03FFF; + u32 fl_high = fl & 0xFC000; + + fl_low ^= fl_high >> 14; + + return (u16)(fl_low | ZRDMA_ROCE_UDP_ENCAP_VALID_PORT_MIN); +} + +#define ZRDMA_GRH_FLOWLABEL_MASK (0x000FFFFF) + +static u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn) +{ + u64 fl = (u64)lqpn * rqpn; + + fl ^= fl >> 20; + fl ^= fl >> 40; + + return (u32)(fl & ZRDMA_GRH_FLOWLABEL_MASK); +} +#endif +#endif + +static u16 zxdh_get_udp_sport(const struct rdma_ah_attr *ah, u32 src_qp_num, u32 dest_qp_num) +{ + u16 flow_label = (u16)ah->grh.flow_label; + static u16 sport_offset; + u16 sport = 0; + +#if 1 + if (flow_label & 0x1) + flow_label++; + sport = (u16)((sport_offset + flow_label) % ZRDMA_UDP_SPORT_NUM + ZRDMA_UDP_SPORT_BASE); + sport_offset = (sport_offset + 1) % ZRDMA_UDP_SPORT_NUM; + pr_debug("%s[%d]: flow_label=%d, sport=%d, sport_offset=%d\n", __func__, __LINE__, flow_label, sport, sport_offset); + return sport; +#else + if (!flow_label) { + // Without flow label + sport = (u16)(sport_offset + ZRDMA_UDP_SPORT_BASE); + sport_offset = (sport_offset + 1) % ZRDMA_UDP_SPORT_NUM; + return sport; + } else { + // With flow label + return rdma_flow_label_to_udp_sport(flow_label); + } +#endif +} + +/* Started by AICoder, pid:2ea6ei8461ec90014d8608e96018d730a1e59409 */ +static void zxdh_init_qp_indices(struct zxdh_qp_uk *qp) +{ + u32 sq_ring_size; + sq_ring_size = ZXDH_RING_SIZE(qp->sq_ring); + ZXDH_RING_INIT(qp->sq_ring, sq_ring_size); + ZXDH_RING_INIT(qp->initial_ring, sq_ring_size); + qp->swqe_polarity = 0; + qp->swqe_polarity_deferred = 1; + qp->rwqe_polarity = 0; + qp->rwqe_signature = 0; + ZXDH_RING_INIT(qp->rq_ring, qp->rq_size); +} + +static int zxdh_modify_qp_to_reset(struct zxdh_qp *iwqp, struct zxdh_modify_qp_info *info) +{ + struct zxdh_device *iwdev = iwqp->iwdev; + struct zxdh_qp_host_ctx_info *ctx_info = &iwqp->ctx_info; + + info->qpc_tx_mask_low = RDMAQPC_MASK_INIT; + info->qpc_tx_mask_high = RDMAQPC_MASK_INIT; + info->qpc_rx_mask_low = RDMAQPC_MASK_INIT; + info->qpc_rx_mask_high = RDMAQPC_MASK_INIT; + + ctx_info->next_qp_state = ZXDH_QPS_RESET; + zxdh_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); + + if (zxdh_hw_modify_qp(iwdev, iwqp, info, true)) + return -EINVAL; + + iwqp->iwarp_state = ZXDH_QPS_RESET; + + if (!iwqp->user_mode) { + if (iwqp->iwscq) { + zxdh_clean_cqes(iwqp, iwqp->iwscq); + if (iwqp->iwrcq != iwqp->iwscq) + zxdh_clean_cqes(iwqp, iwqp->iwrcq); + } + zxdh_init_qp_indices(&iwqp->sc_qp.qp_uk); + } + + return 0; +} +/* Ended by AICoder, pid:2ea6ei8461ec90014d8608e96018d730a1e59409 */ + +int remote_ip_info_process(struct zxdh_device *iwdev, struct zxdh_rdma_to_eth_ip_para *ip_para) +{ + int ret = 0; + struct iidc_core_dev_info *cdev_info; + + cdev_info = iwdev->rf->cdev; + if (!cdev_info) { + pr_err("%s[%d]: cdev_info is null!\n", __func__, __LINE__); + return -EIO; + } + + mutex_lock(&iwdev->eth_info_list_mtx_lock); + + ip_para->linked_fid = (cdev_info->slot_id & 0x0000ffff) << 16 | iwdev->rf->pcie_id; + + if (ip_para->mode == RDMA_ADD_REMOTE_IP) { + ret = zxdh_eth_info_hlist_add(iwdev, ip_para); + } else if (ip_para->mode == RDMA_DEL_REMOTE_IP) { + ret = zxdh_eth_info_hlist_delete(iwdev, ip_para); + } else { + ret = -1; + pr_info("%s[%d]: error mode=%d\n", __func__, __LINE__, ip_para->mode); + } + + if (ret) { + pr_err("%s[%d] double plane: hlist process fail\n", __func__, __LINE__); + } + + mutex_unlock(&iwdev->eth_info_list_mtx_lock); + return ret; +} + +int qp_remote_ip_info_process(struct ib_qp *ibqp, int op_type) +{ + struct zxdh_qp *iwqp = to_iwqp(ibqp); + struct zxdh_device *iwdev = iwqp->iwdev; + struct zxdh_udp_offload_info *udp_info = &iwqp->udp_info; + struct zxdh_rdma_to_eth_ip_para ip_para = { 0 }; + u8 src_mac_addr[ETH_ALEN]; + int ret = 0; + + ip_para.ifname = iwdev->netdev->name; + memcpy(ip_para.src_ip, udp_info->local_ipaddr, sizeof(udp_info->local_ipaddr)); + memcpy(ip_para.dst_ip, udp_info->dest_ip_addr, sizeof(udp_info->dest_ip_addr)); + + ether_addr_copy(src_mac_addr, iwdev->netdev->dev_addr); + ip_para.src_mac = LS_64_1(src_mac_addr[5], 0) | LS_64_1(src_mac_addr[4], 8) | + LS_64_1(src_mac_addr[3], 16) | LS_64_1(src_mac_addr[2], 24) | + LS_64_1(src_mac_addr[1], 32) | LS_64_1(src_mac_addr[0], 40); + ip_para.dst_mac = LS_64_1(iwqp->udp_info.dest_mac[5], 0) | LS_64_1(iwqp->udp_info.dest_mac[4], 8) | + LS_64_1(iwqp->udp_info.dest_mac[3], 16) | LS_64_1(iwqp->udp_info.dest_mac[2], 24) | + LS_64_1(iwqp->udp_info.dest_mac[1], 32) | LS_64_1(iwqp->udp_info.dest_mac[0], 40); + ip_para.ipv4 = udp_info->ipv4; + ip_para.mode = op_type; + + pr_debug("%s[%d]: dev_addr=%pM, src_mac_addr=%x-%x-%x-%x-%x-%x\n", __func__, __LINE__, iwdev->netdev->dev_addr, src_mac_addr[0], src_mac_addr[1], src_mac_addr[2], src_mac_addr[3], src_mac_addr[4], src_mac_addr[5]); + pr_debug("%s[%d]: ipv4=%d, name=%s, src_ip=0x%x-0x%x-0x%x-0x%x, dst_ip=0x%x-0x%x-0x%x-0x%x, src_port=0x%x, dst_port=0x%x, src_mac=0x%llx, dst_mac=0x%llx\n", + __func__, __LINE__, ip_para.ipv4, ip_para.ifname, ip_para.src_ip[0], ip_para.src_ip[1], ip_para.src_ip[2], ip_para.src_ip[3], + ip_para.dst_ip[0], ip_para.dst_ip[1], ip_para.dst_ip[2], ip_para.dst_ip[3], udp_info->src_port, udp_info->dst_port, ip_para.src_mac, ip_para.dst_mac); + + if (op_type == RDMA_ADD_REMOTE_IP || op_type == RDMA_DEL_REMOTE_IP) { + ret = remote_ip_info_process(iwdev, &ip_para); + } else { + pr_info("%s[%d]: error op_type=%d\n", __func__, __LINE__, op_type); + ret = -1; + } + + return ret; +} + +/** + * zxdh_modify_qp_roce - modify qp request + * @ibqp: qp's pointer for modify + * @attr: access attributes + * @attr_mask: state mask + * @udata: user data + */ +int zxdh_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata) +{ + struct zxdh_pd *iwpd = to_iwpd(ibqp->pd); + struct zxdh_qp *iwqp = to_iwqp(ibqp); + struct zxdh_device *iwdev = iwqp->iwdev; + struct zxdh_sc_dev *dev = &iwdev->rf->sc_dev; + struct iidc_core_dev_info *cdev_info = (struct iidc_core_dev_info *)iwdev->rf->cdev; + struct zxdh_qp_host_ctx_info *ctx_info; + struct zxdh_roce_offload_info *roce_info; + struct zxdh_udp_offload_info *udp_info; + struct zxdh_modify_qp_info info = {}; + struct zxdh_modify_qp_resp uresp = {}; + struct zxdh_modify_qp_req ureq = {}; + char s_straddr[INET6_ADDRSTRLEN + 20] = { 0 }; + char d_straddr[INET6_ADDRSTRLEN + 20] = { 0 }; + int buf_size = 0; + char *log_buf = NULL; + enum ib_qp_state tmp_state; + unsigned long flags; + u8 issue_modify_qp = 0; + int ret = 0; + u64 qpc_tx_mask_low = 0; + u64 qpc_tx_mask_high = 0; + u64 qpc_rx_mask_low = 0; + u64 qpc_rx_mask_high = 0; + u32 dual_tor_switch = 0xFFFF; + u16 netdev_pmtu; + + ctx_info = &iwqp->ctx_info; + roce_info = &iwqp->roce_info; + udp_info = &iwqp->udp_info; + tmp_state = iwqp->ibqp_state; + if (attr_mask & IB_QP_RATE_LIMIT) { + if (attr->rate_limit & ZXDH_QP_MODIFY_NVMEOF_FLUSH) { + iwqp->sc_qp.nvme_flush_qp = 1; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_NVMEOF_IOQ; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_NVMEOF_TGT; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_NVMEOF_QID; + } + + if (attr->rate_limit & ZXDH_QP_MODIFY_NVMEOF_FLR) { + writel(1, (u32 __iomem *)(dev->hw->hw_addr + + RDMATX_QUEUE_VHCA_FLAG)); + } + if (attr->rate_limit & ZXDH_QP_MODIFY_NVMEOF_IOQ) { + iwqp->sc_qp.is_nvmeof_ioq = + (attr->rate_limit & ZXDH_QP_NVMEOF_IOQ_MASK) ? + 1 : + 0; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_NVMEOF_IOQ; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_NVMEOF_IOQ; + } + if (attr->rate_limit & ZXDH_QP_MODIFY_NVMEOF_TGT) { + iwqp->sc_qp.is_nvmeof_tgt = + (attr->rate_limit & ZXDH_QP_NVMEOF_TGT_MASK) ? + 1 : + 0; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_NVMEOF_TGT; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_NVMEOF_TGT; + } + if (attr->rate_limit & ZXDH_QP_MODIFY_NVMEOF_QID) { + iwqp->sc_qp.nvmeof_qid = attr->rate_limit & + ZXDH_QP_NVMEOF_QID_MASK; + writel(dev->vhca_id, + (u32 __iomem *)(dev->hw->hw_addr + + NOF_IOQ_VHCA_ID( + iwqp->sc_qp.nvmeof_qid))); + writel(iwpd->sc_pd.pd_id, + (u32 __iomem *)(dev->hw->hw_addr + + NOF_IOQ_PD_ID( + iwqp->sc_qp.nvmeof_qid))); + iwqp->sc_qp.virtual_map = 0; + iwqp->sc_qp.sq_pa = dev->nof_ioq_ddr_addr + + NOF_IOQ_SQ_WQE_SIZE * + NOF_IOQ_SQ_SIZE * + iwqp->sc_qp.nvmeof_qid; + iwqp->sc_qp.hw_sq_size = NOF_IOQ_SQ_LOG_SIZE; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_ACK_CREDITS; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_NVMEOF_QID; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_SQ_VMAP; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_SQ_LPBL_SIZE; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_SQ_PA; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_LOG_SQ_SIZE; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_ACK_CREDITS; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_NVMEOF_QID; + + ret = zxdh_clear_nof_ioq( + dev, NOF_IOQ_SQ_WQE_SIZE * NOF_IOQ_SQ_SIZE, + iwqp->sc_qp.sq_pa); + if (dev->nof_clear_dpu_mem.va) { + dma_free_coherent(dev->hw->device, + dev->nof_clear_dpu_mem.size, + dev->nof_clear_dpu_mem.va, + dev->nof_clear_dpu_mem.pa); + dev->nof_clear_dpu_mem.va = NULL; + } + if (ret) + return ret; + } + } + + if (refcount_read(&iwdev->trace_switch.t_switch)) { + log_buf = vzalloc(ZXDH_LOG_BUF_SIZE); + if (log_buf == NULL) + ibdev_notice(&iwdev->ibdev, "alloc log buf failed\n"); + } + + if (attr_mask & IB_QP_DEST_QPN) { + roce_info->dest_qp = attr->dest_qp_num; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_DEST_QPN; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_DEST_QPN; + if (log_buf) { + char qpn_buf[64] = { 0 }; + + sprintf(qpn_buf, ", dest_qpn:%d", roce_info->dest_qp); + strncat(log_buf, qpn_buf, + ZXDH_LOG_BUF_SIZE - buf_size - 1); + buf_size += strlen(qpn_buf); + } + } + + if (attr_mask & IB_QP_PKEY_INDEX) { + ret = zxdh_query_pkey(ibqp->device, 0, attr->pkey_index, + &roce_info->p_key); + if (ret) + return ret; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_PKEY; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_PKEY; + } + + if (attr_mask & IB_QP_QKEY) { + roce_info->qkey = attr->qkey; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_QKEY; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_QKEY; + } + + if (attr_mask & IB_QP_PATH_MTU) { + udp_info->pmtu = attr->path_mtu; + iwqp->sc_qp.qp_uk.pmtu = attr->path_mtu; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_PMTU; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_PMTU; + netdev_pmtu = zxdh_mtu_int_to_enum(iwdev->netdev->mtu); + + if (attr->path_mtu > netdev_pmtu) { + pr_info("WARNING: attr->path_mtu(%d) larger than netdev_pmtu(%d)\n", + attr->path_mtu, netdev_pmtu); + } + } + + if (attr_mask & IB_QP_SQ_PSN) { + udp_info->psn_nxt = attr->sq_psn; + udp_info->psn_una = attr->sq_psn; + udp_info->psn_max = attr->sq_psn - 1; + iwqp->sc_qp.aeq_entry_err_last_psn = attr->sq_psn - 1; + iwqp->sc_qp.aeq_retry_err_last_psn = attr->sq_psn - 1; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_LAST_ACK_PSN; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_LSN; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_PSN_MAX; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_PSN_NXT; + } + + if (attr_mask & IB_QP_RQ_PSN) { + udp_info->epsn = attr->rq_psn; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_EPSN; + } + + if (attr_mask & IB_QP_RNR_RETRY) { + udp_info->rnr_nak_thresh = attr->rnr_retry; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_RNR_RETRY_THRESHOLD; + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_RNR_RETRY_CNT; + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_RNR_CUR_RETRY_CNT; + } + + if (attr_mask & IB_QP_RETRY_CNT) { + if (attr->retry_cnt == 7) + attr->retry_cnt = 6; + + udp_info->rexmit_thresh = attr->retry_cnt; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_RETRY_CNT; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_CUR_RETRY_CNT; + } + + if (attr_mask & IB_QP_TIMEOUT) { + udp_info->timeout = attr->timeout; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_LOCAL_ACK_TIMEOUT; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_ACK_TIMEOUT; + } + + if (attr_mask & IB_QP_MIN_RNR_TIMER) { + udp_info->min_rnr_timer = attr->min_rnr_timer; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_RNR_TIMER; + } + + ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_PD_ID; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_PD_ID; + + if (attr_mask & IB_QP_AV) { + struct zxdh_av *av = &iwqp->roce_ah.av; + u16 vlan_id = VLAN_N_VID; + u32 local_ip[4] = {}; + + memset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah)); + if (attr->ah_attr.ah_flags & IB_AH_GRH) { + udp_info->ttl = attr->ah_attr.grh.hop_limit; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_TTL; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_TTL; + udp_info->flow_label = attr->ah_attr.grh.flow_label; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_FLOWLABLE; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_FLOWLABLE; + udp_info->src_port = zxdh_get_udp_sport( + &attr->ah_attr, iwqp->sc_qp.qp_uk.qp_id, + attr->dest_qp_num); + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_SRC_PORT; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_SRC_PORT; + udp_info->tos = attr->ah_attr.grh.traffic_class; + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_TOS; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_TOS; + zxdh_qp_rem_qos(&iwqp->sc_qp); + // ctx_info->user_pri = rt_tos2priority(udp_info->tos); //TODO: figure out why they do this + ctx_info->user_pri = (udp_info->tos >> 2) / 8; + iwqp->sc_qp.user_pri = ctx_info->user_pri; + iwqp->sc_qp.qp_uk.user_pri = ctx_info->user_pri; + zxdh_qp_add_qos(&iwqp->sc_qp); + + if (log_buf && udp_info->src_port) { + char port_buf[32] = { 0 }; + + sprintf(port_buf, ", src_port:%d", + udp_info->src_port); + strncat(log_buf, port_buf, + ZXDH_LOG_BUF_SIZE - buf_size - 1); + buf_size += strlen(port_buf); + } + } + ret = kc_zxdh_set_roce_cm_info(iwqp, attr, &vlan_id); + if (ret) + return ret; + + if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode) + vlan_id = 0; + if (vlan_id < VLAN_N_VID) { + udp_info->insert_vlan_tag = true; + udp_info->vlan_tag = + vlan_id | ctx_info->user_pri << VLAN_PRIO_SHIFT; + } else { + udp_info->insert_vlan_tag = false; + } + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_INSERT_VLANTAG; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_VLANTAG; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_INSERT_VLANTAG; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_VLANTAG; + + av->attrs = attr->ah_attr; + rdma_gid2ip((struct sockaddr *)&av->dgid_addr, + &attr->ah_attr.grh.dgid); + if (av->sgid_addr.saddr.sa_family == AF_INET6) { + __be32 *daddr = av->dgid_addr.saddr_in6.sin6_addr.in6_u + .u6_addr32; + __be32 *saddr = av->sgid_addr.saddr_in6.sin6_addr.in6_u + .u6_addr32; + + zxdh_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr); + zxdh_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr); + + udp_info->ipv4 = false; + zxdh_copy_ip_ntohl(local_ip, daddr); + sprintf(s_straddr, ", src_ip: %pI6", + &av->sgid_addr.saddr_in6.sin6_addr); + sprintf(d_straddr, ", dest_ip: %pI6", + &av->dgid_addr.saddr_in6.sin6_addr); + } else { + __be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr; + __be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr; + + local_ip[0] = ntohl(daddr); + + udp_info->ipv4 = true; + udp_info->dest_ip_addr[0] = 0; + udp_info->dest_ip_addr[1] = 0; + udp_info->dest_ip_addr[2] = 0; + udp_info->dest_ip_addr[3] = local_ip[0]; + + udp_info->local_ipaddr[0] = 0; + udp_info->local_ipaddr[1] = 0; + udp_info->local_ipaddr[2] = 0; + udp_info->local_ipaddr[3] = ntohl(saddr); + + sprintf(s_straddr, ", src_ip: %pI4", + &av->sgid_addr.saddr_in.sin_addr); + sprintf(d_straddr, ", dest_ip: %pI4", + &av->dgid_addr.saddr_in.sin_addr); + } + ether_addr_copy(udp_info->dest_mac, + ah_attr_to_dmac(attr->ah_attr)); + + dual_tor_switch = readl(cdev_info->hw_addr + ZXDH_DUAL_TOR_SWITCH_OFFSET); + pr_debug("%s[%d]: hw_addr=0x%llx, dual_tor_switch=0x%x\n", + __func__, __LINE__, + (u64)(uintptr_t)cdev_info->hw_addr, dual_tor_switch); + if (remote_ip_update_hook && (dual_tor_switch == ZXDH_DUAL_TOR_SWITCH_OPEN)) { + ret = qp_remote_ip_info_process(ibqp, RDMA_ADD_REMOTE_IP); + if (ret) { + pr_err("%s[%d]: ipv4=%d, name=%s, op_type=%d, src_ip=0x%x-0x%x-0x%x-0x%x, dst_ip=0x%x-0x%x-0x%x-0x%x, src_port=0x%x, dst_port=0x%x\n", + __func__, __LINE__, udp_info->ipv4, iwdev->netdev->name, RDMA_DEL_REMOTE_IP, udp_info->local_ipaddr[0], udp_info->local_ipaddr[1], udp_info->local_ipaddr[2], udp_info->local_ipaddr[3], + udp_info->dest_ip_addr[0], udp_info->dest_ip_addr[1], udp_info->dest_ip_addr[2], udp_info->dest_ip_addr[3], udp_info->src_port, udp_info->dst_port); + } + } + + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_IPV4; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_DEST_IP_LOW; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_DEST_IP_HIGH; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_LOCAL_IP_LOW; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_LOCAL_IP_HIGH; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_DEST_MAC; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_IPV4; + qpc_rx_mask_high |= RDMAQPC_RX_MASKH_DEST_IP; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_LOCAL_IP; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_DEST_MAC; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_HDR_LEN; + + if (log_buf) { + strncat(log_buf, s_straddr, + ZXDH_LOG_BUF_SIZE - buf_size - 1); + buf_size += strlen(s_straddr); + strncat(log_buf, d_straddr, + ZXDH_LOG_BUF_SIZE - buf_size - 1); + buf_size += strlen(d_straddr); + } + } + + iwqp->sc_qp.qp_uk.qp_8k_index = + zxdh_get_8k_index(&iwqp->sc_qp, udp_info->dest_ip_addr[3]); + + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_GQP_ID; + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_QUEUE_TC; + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_WS_IDX; + qpc_rx_mask_high |= RDMAQPC_RX_MASKH_QUEUE_TC; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_GQP_ID; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_WS_IDX; + + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { + if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) { + ibdev_err(&iwdev->ibdev, + "rd_atomic = %d, above max_hw_ord=%d\n", + attr->max_rd_atomic, + dev->hw_attrs.max_hw_ord); + return -EINVAL; + } + if (attr->max_rd_atomic) { + roce_info->ord_size = attr->max_rd_atomic; + qpc_tx_mask_low |= RDMAQPC_TX_MASKL_ORD_SIZE; + } + info.ord_valid = true; + } + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { + if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) { + ibdev_err(&iwdev->ibdev, + "rd_atomic = %d, above max_hw_ird=%d\n", + attr->max_rd_atomic, + dev->hw_attrs.max_hw_ird); + return -EINVAL; + } + if (attr->max_dest_rd_atomic) { + roce_info->ird_size = dev->hw_attrs.max_hw_ird; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_IRD_SIZE; + } + } + + if (attr_mask & IB_QP_ACCESS_FLAGS) { + if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE) { + roce_info->wr_rdresp_en = true; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_WRITE_EN; + } + + if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) { + roce_info->wr_rdresp_en = true; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_WRITE_EN; + } + if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) { + roce_info->rd_en = true; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_READ_EN; + } + } + + wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend)); + + spin_lock_irqsave(&iwqp->lock, flags); + if (attr_mask & IB_QP_STATE) { + if (!kc_ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state, + iwqp->ibqp.qp_type, attr_mask, + IB_LINK_LAYER_ETHERNET)) { + ibdev_warn( + &iwdev->ibdev, + "modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n", + iwqp->ibqp.qp_num, iwqp->ibqp_state, + attr->qp_state); + ret = -EINVAL; + goto exit; + } + info.curr_iwarp_state = iwqp->iwarp_state; + + qpc_tx_mask_high |= RDMAQPC_TX_MASKH_QP_STATE; + qpc_rx_mask_low |= RDMAQPC_RX_MASKL_QP_STATE; + + switch (attr->qp_state) { + case IB_QPS_INIT: + if (iwqp->iwarp_state > ZXDH_QPS_INIT) { + ret = -EINVAL; + goto exit; + } + + if (iwqp->iwarp_state == ZXDH_QPS_INIT) { + ctx_info->next_qp_state = ZXDH_QPS_INIT; + issue_modify_qp = 1; + } + + if (iwqp->iwarp_state == ZXDH_QPS_RESET) { + ctx_info->next_qp_state = ZXDH_QPS_INIT; + issue_modify_qp = 1; + } + break; + case IB_QPS_RTR: + if (iwqp->iwarp_state > ZXDH_QPS_INIT) { + ret = -EINVAL; + goto exit; + } + ctx_info->next_qp_state = ZXDH_QPS_RTR; + issue_modify_qp = 1; + break; + case IB_QPS_RTS: + if (iwqp->ibqp_state < IB_QPS_RTR || + iwqp->ibqp_state == IB_QPS_ERR) { + ret = -EINVAL; + goto exit; + } + + ctx_info->next_qp_state = ZXDH_QPS_RTS; + issue_modify_qp = 1; + break; + case IB_QPS_SQD: + if (iwqp->iwarp_state == ZXDH_QPS_SQD) + goto exit; + + if (iwqp->iwarp_state != ZXDH_QPS_RTS) { + ret = -EINVAL; + goto exit; + } + + ctx_info->next_qp_state = ZXDH_QPS_SQD; + issue_modify_qp = 1; + break; + case IB_QPS_SQE: + case IB_QPS_ERR: + case IB_QPS_RESET: + if (iwqp->iwarp_state == ZXDH_QPS_ERR) { + spin_unlock_irqrestore(&iwqp->lock, flags); + if (udata) { + if (ib_copy_from_udata( + &ureq, udata, + min(sizeof(ureq), + udata->inlen))) + return -EINVAL; + + zxdh_flush_wqes( + iwqp, + (ureq.sq_flush ? ZXDH_FLUSH_SQ : + 0) | + (ureq.rq_flush ? + ZXDH_FLUSH_RQ : + 0) | + ZXDH_REFLUSH); + } + iwqp->ibqp_state = attr->qp_state; + if (attr->qp_state == IB_QPS_RESET) { + if (zxdh_modify_qp_to_reset(iwqp, &info)) + return -EINVAL; + } + return 0; + } + + ctx_info->next_qp_state = ZXDH_QPS_ERR; + issue_modify_qp = 1; + break; + default: + ret = -EINVAL; + goto exit; + } + + iwqp->ibqp_state = attr->qp_state; + } + + zxdh_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); + spin_unlock_irqrestore(&iwqp->lock, flags); + if (ctx_info->next_qp_state == ZXDH_QPS_ERR) { + info.qpc_tx_mask_low = qpc_tx_mask_low; + info.qpc_tx_mask_high = qpc_tx_mask_high; + info.qpc_rx_mask_low = qpc_rx_mask_low; + info.qpc_rx_mask_high = qpc_rx_mask_high; + } else { + info.qpc_tx_mask_low = 0x1FFFFFF | qpc_tx_mask_low; + info.qpc_tx_mask_high = (0x1UL << 18) | qpc_tx_mask_high; + info.qpc_rx_mask_low = 0xDA3CE8081E7FFCF0 | qpc_rx_mask_low; + info.qpc_rx_mask_high = 0x1E9 | qpc_rx_mask_high; + } + + if (attr_mask & IB_QP_RATE_LIMIT) { + info.qpc_tx_mask_low = 0x1FFFFFF | qpc_tx_mask_low; + info.qpc_tx_mask_high = (0x1UL << 18) | qpc_tx_mask_high; + info.qpc_rx_mask_low = 0xDA3CE8081E7FFCF0 | qpc_rx_mask_low; + info.qpc_rx_mask_high = 0x1E9 | qpc_rx_mask_high; + if (zxdh_hw_modify_qp(iwdev, iwqp, &info, true)) + return -EINVAL; + } + + if (attr_mask & IB_QP_STATE) { + if (issue_modify_qp) { + if (zxdh_hw_modify_qp(iwdev, iwqp, &info, true)) + return -EINVAL; + spin_lock_irqsave(&iwqp->lock, flags); + if (iwqp->iwarp_state == info.curr_iwarp_state) { + iwqp->iwarp_state = ctx_info->next_qp_state; + iwqp->ibqp_state = attr->qp_state; + } + if (iwqp->ibqp_state > IB_QPS_RTS && + !iwqp->flush_issued) { + iwqp->flush_issued = 1; + if (!iwqp->user_mode) + queue_delayed_work( + iwqp->iwdev->cleanup_wq, + &iwqp->dwork_flush, + msecs_to_jiffies( + ZXDH_FLUSH_DELAY_MS)); + spin_unlock_irqrestore(&iwqp->lock, flags); + zxdh_flush_wqes(iwqp, ZXDH_FLUSH_SQ | + ZXDH_FLUSH_RQ | + ZXDH_FLUSH_WAIT); + } else { + spin_unlock_irqrestore(&iwqp->lock, flags); + } + + if (attr->qp_state == IB_QPS_RESET) { + if (attr->qp_state == IB_QPS_RESET) { + if (zxdh_modify_qp_to_reset(iwqp, &info)) + return -EINVAL; + } + } + } else { + iwqp->ibqp_state = attr->qp_state; + } + if (udata) { + uresp.rd_fence_rate = iwdev->rd_fence_rate; + ret = ib_copy_to_udata(udata, &uresp, + min(sizeof(uresp), + udata->outlen)); + if (ret) { + pr_err("VERBS: copy_to_udata failed\n"); + return ret; + } + } + if (log_buf) { + ibdev_notice( + &iwdev->ibdev, + "QP[%u]: modify QP, type %d, ib qpn 0x%X, state: %s => %s%s\n", + iwqp->ibqp.qp_num, iwqp->ibqp.qp_type, + iwqp->ibqp.qp_num, + zxdh_qp_state_to_string(tmp_state), + zxdh_qp_state_to_string(attr->qp_state), + log_buf); + } + } + + if (log_buf) + vfree(log_buf); + + return 0; +exit: + if (log_buf) + vfree(log_buf); + + spin_unlock_irqrestore(&iwqp->lock, flags); + + return ret; +} + +/** + * zxdh_cq_free_rsrc - free up resources for cq + * @rf: RDMA PCI function + * @iwcq: cq ptr + */ +void zxdh_cq_free_rsrc(struct zxdh_pci_f *rf, struct zxdh_cq *iwcq) +{ + struct zxdh_sc_cq *cq = &iwcq->sc_cq; + + if (!iwcq->user_mode) { + dma_free_coherent(rf->sc_dev.hw->device, iwcq->kmem.size, + iwcq->kmem.va, iwcq->kmem.pa); + iwcq->kmem.va = NULL; + dma_free_coherent(rf->sc_dev.hw->device, iwcq->kmem_shadow.size, + iwcq->kmem_shadow.va, iwcq->kmem_shadow.pa); + iwcq->kmem_shadow.va = NULL; + } + if (cq->dev) { + zxdh_free_rsrc(rf, rf->allocated_cqs, + iwcq->cq_num - cq->dev->base_cqn); + } +} + +/** + * zxdh_free_cqbuf - worker to free a cq buffer + * @work: provides access to the cq buffer to free + */ +static void zxdh_free_cqbuf(struct work_struct *work) +{ + struct zxdh_cq_buf *cq_buf = + container_of(work, struct zxdh_cq_buf, work); + + dma_free_coherent(cq_buf->hw->device, cq_buf->kmem_buf.size, + cq_buf->kmem_buf.va, cq_buf->kmem_buf.pa); + cq_buf->kmem_buf.va = NULL; + kfree(cq_buf); +} + +/** + * zxdh_process_resize_list - remove resized cq buffers from the resize_list + * @iwcq: cq which owns the resize_list + * @iwdev: zrdma device + * @lcqe_buf: the buffer where the last cqe is received + */ +int zxdh_process_resize_list(struct zxdh_cq *iwcq, struct zxdh_device *iwdev, + struct zxdh_cq_buf *lcqe_buf) +{ + struct list_head *tmp_node, *list_node; + struct zxdh_cq_buf *cq_buf; + int cnt = 0; + + list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) { + cq_buf = list_entry(list_node, struct zxdh_cq_buf, list); + if (cq_buf == lcqe_buf) + return cnt; + + list_del(&cq_buf->list); + queue_work(iwdev->cleanup_wq, &cq_buf->work); + cnt++; + } + + return cnt; +} + +/** + * zxdh_resize_cq - resize cq + * @ibcq: cq to be resized + * @entries: desired cq size + * @udata: user data + */ +static int zxdh_resize_cq(struct ib_cq *ibcq, int entries, + struct ib_udata *udata) +{ + struct zxdh_cq *iwcq = to_iwcq(ibcq); + struct zxdh_sc_dev *dev = iwcq->sc_cq.dev; + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_modify_cq_info *m_info; + struct zxdh_modify_cq_info info = {}; + struct zxdh_dma_mem kmem_buf; + struct zxdh_cq_mr *cqmr_buf; + struct zxdh_pbl *iwpbl_buf; + struct zxdh_device *iwdev; + struct zxdh_pci_f *rf; + struct zxdh_cq_buf *cq_buf = NULL; + unsigned long flags; + int ret; + + iwdev = to_iwdev(ibcq->device); + rf = iwdev->rf; + + if (!(rf->sc_dev.hw_attrs.uk_attrs.feature_flags & + ZXDH_FEATURE_CQ_RESIZE)) + return -EOPNOTSUPP; + + if (entries > rf->max_cqe) + return -EINVAL; + + if (!iwcq->user_mode) { + entries++; + if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= ZXDH_GEN_2) + entries *= 2; + } + + info.cq_size = zxdh_cq_round_up(max(entries, 4)); + + if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1) + return 0; + + if (udata) { + struct zxdh_resize_cq_req req = {}; + struct zxdh_ucontext *ucontext = + kc_rdma_udata_to_drv_context(ibcq, udata); + + /* CQ resize not supported with legacy GEN_1 lib */ + if (ucontext->legacy_mode) + return -EOPNOTSUPP; + + if (ib_copy_from_udata(&req, udata, + min(sizeof(req), udata->inlen))) + return -EINVAL; + + spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); + iwpbl_buf = zxdh_get_pbl((unsigned long)req.user_cq_buffer, + &ucontext->cq_reg_mem_list); + spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); + + if (!iwpbl_buf) + return -ENOMEM; + + cqmr_buf = &iwpbl_buf->cq_mr; + if (iwpbl_buf->pbl_allocated) { + info.virtual_map = true; + info.pbl_chunk_size = 1; + info.first_pm_pbl_idx = cqmr_buf->cq_pbl.idx; + } else { + info.cq_pa = cqmr_buf->cq_pbl.addr; + } + } else { + /* Kmode CQ resize */ + int rsize; + + rsize = info.cq_size * sizeof(struct zxdh_cqe); + kmem_buf.size = ALIGN(round_up(rsize, 256), 256); + kmem_buf.va = dma_alloc_coherent(dev->hw->device, kmem_buf.size, + &kmem_buf.pa, GFP_KERNEL); + if (!kmem_buf.va) + return -ENOMEM; + + info.cq_base = kmem_buf.va; + info.cq_pa = kmem_buf.pa; + cq_buf = kzalloc(sizeof(*cq_buf), GFP_KERNEL); + if (!cq_buf) { + ret = -ENOMEM; + goto error; + } + } + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) { + ret = -ENOMEM; + goto error; + } + + info.shadow_read_threshold = iwcq->sc_cq.shadow_read_threshold; + info.cq_resize = true; + + cqp_info = &cqp_request->info; + m_info = &cqp_info->in.u.cq_modify.info; + memcpy(m_info, &info, sizeof(*m_info)); + + cqp_info->cqp_cmd = ZXDH_OP_CQ_MODIFY; + cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq; + cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request; + cqp_info->post_sq = 1; + ret = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + if (ret) + goto error; + + spin_lock_irqsave(&iwcq->lock, flags); + if (cq_buf) { + cq_buf->kmem_buf = iwcq->kmem; + cq_buf->hw = dev->hw; + memcpy(&cq_buf->cq_uk, &iwcq->sc_cq.cq_uk, + sizeof(cq_buf->cq_uk)); + INIT_WORK(&cq_buf->work, zxdh_free_cqbuf); + list_add_tail(&cq_buf->list, &iwcq->resize_list); + iwcq->kmem = kmem_buf; + } + + zxdh_sc_cq_resize(&iwcq->sc_cq, &info); + ibcq->cqe = info.cq_size - 1; + spin_unlock_irqrestore(&iwcq->lock, flags); + + return 0; +error: + if (!udata) { + dma_free_coherent(dev->hw->device, kmem_buf.size, kmem_buf.va, + kmem_buf.pa); + kmem_buf.va = NULL; + } + kfree(cq_buf); + + return ret; +} + +static int zxdh_modify_cq(struct ib_cq *ibcq, u16 cq_count, u16 cq_period) +{ + struct zxdh_device *iwdev = to_iwdev(ibcq->device); + struct zxdh_cq *iwcq = to_iwcq(ibcq); + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_pci_f *rf; + int ret; + u32 val = 0; + u16 unit_period = 0; + + rf = iwdev->rf; + val = readl(rf->sc_dev.hw->hw_addr + RDMARX_CQ_PERIOD_CFG); + unit_period = (u16)(val & 0xffff); + + if ((US_TO_NS(cq_period) / unit_period) > ZXDH_MAX_CQ_PERIOD) { + pr_info("cq_count and cq_period validate fail\n"); + return -EINVAL; + } + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) + ret = -ENOMEM; + + cqp_info = &cqp_request->info; + + cqp_info->cqp_cmd = ZXDH_OP_CQ_MODIFY_MODERATION; + cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq; + cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request; + cqp_info->post_sq = 1; + + cqp_info->in.u.cq_modify.cq->cq_max = cq_count; + cqp_info->in.u.cq_modify.cq->cq_period = + (uint16_t)(US_TO_NS(cq_period) / unit_period); + cqp_info->in.u.cq_modify.cq->scqe_break_moderation_en = + iwcq->sc_cq.scqe_break_moderation_en; + ret = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + if (ret) + zxdh_dbg(iwdev_to_idev(iwdev), "MODIFY CQ: modify_cq failed\n"); + + return ret; +} + +/** + * zxdh_get_mr_access - get hw MR access permissions from IB access flags + * @access: IB access flags + */ +static inline u16 zxdh_get_mr_access(int access) +{ + u16 hw_access = 0; + + hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ? + ZXDH_ACCESS_FLAGS_LOCALWRITE : + 0; + hw_access |= (access & IB_ACCESS_REMOTE_WRITE) ? + ZXDH_ACCESS_FLAGS_REMOTEWRITE : + 0; + hw_access |= (access & IB_ACCESS_REMOTE_READ) ? + ZXDH_ACCESS_FLAGS_REMOTEREAD : + 0; + hw_access |= (access & IB_ACCESS_MW_BIND) ? + ZXDH_ACCESS_FLAGS_BIND_WINDOW : + 0; + hw_access |= (access & IB_ZERO_BASED) ? ZXDH_ACCESS_FLAGS_ZERO_BASED : + 0; + hw_access |= ZXDH_ACCESS_FLAGS_LOCALREAD; + + return hw_access; +} + +/** + * zxdh_free_stag - free stag resource + * @iwdev: zrdma device + * @stag: stag to free + */ +void zxdh_free_stag(struct zxdh_device *iwdev, u32 stag) +{ + u32 stag_idx; + + stag_idx = (stag) >> ZXDH_CQPSQ_STAG_IDX_S; + zxdh_free_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, stag_idx); +} + +/** + * zxdh_create_stag - create random stag + * @iwdev: zrdma device + */ +u32 zxdh_create_stag(struct zxdh_device *iwdev) +{ + u32 stag = 0; + u32 stag_index = 0; + u32 random; + u8 consumer_key; + int ret; + + get_random_bytes(&random, sizeof(random)); + consumer_key = (u8)random; + + ret = zxdh_alloc_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, + iwdev->rf->max_mr, &stag_index, + &iwdev->rf->next_mr); + + if (ret) + return stag; + stag = stag_index << ZXDH_CQPSQ_STAG_IDX_S; + stag |= consumer_key; + + return stag; +} + +/** + * zxdh_check_mem_contiguous - check if pbls stored in arr are contiguous + * @arr: lvl1 pbl array + * @npages: page count + * @pg_size: page size + * + */ +static bool zxdh_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size) +{ + u32 pg_idx; + + for (pg_idx = 0; pg_idx < npages; pg_idx++) { + if ((*arr + (pg_size * pg_idx)) != arr[pg_idx]) + return false; + } + + return true; +} + +/** + * zxdh_check_mr_contiguous - check if MR is physically contiguous + * @palloc: pbl allocation struct + * @pg_size: page size + */ +static bool zxdh_check_mr_contiguous(struct zxdh_pble_alloc *palloc, + u32 pg_size) +{ + struct zxdh_pble_level2 *lvl2 = &palloc->level2; + struct zxdh_pble_info *leaf = lvl2->leaf; + u64 *arr = NULL; + u64 *start_addr = NULL; + int i; + bool ret; + + if (palloc->level == PBLE_LEVEL_1) { + arr = palloc->level1.addr; + ret = zxdh_check_mem_contiguous(arr, palloc->total_cnt, + pg_size); + return ret; + } + + start_addr = leaf->addr; + + for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) { + arr = leaf->addr; + if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr) + return false; + ret = zxdh_check_mem_contiguous(arr, leaf->cnt, pg_size); + if (!ret) + return false; + } + + return true; +} + +/** + * zxdh_setup_pbles - copy user pg address to pble's + * @rf: RDMA PCI function + * @iwmr: mr pointer for this memory registration + * @use_pbles: flag if to use pble's + * @pble_type: flag if to pble type(mr or queue) + */ +static int zxdh_setup_pbles(struct zxdh_pci_f *rf, struct zxdh_mr *iwmr, + bool use_pbles, bool pble_type) +{ + struct zxdh_pbl *iwpbl = &iwmr->iwpbl; + struct zxdh_pble_alloc *palloc = &iwpbl->pble_alloc; + struct zxdh_pble_info *pinfo = NULL; + struct zxdh_hmc_pble_rsrc *pble_rsrc_com; + u64 *pbl; + int status; + enum zxdh_pble_level level = PBLE_LEVEL_1; + bool b_level1_only = true; + + if (use_pbles) { + if (pble_type == PBLE_QUEUE) { + pble_rsrc_com = rf->pble_rsrc; + b_level1_only = true; + } else { + pble_rsrc_com = rf->pble_mr_rsrc; + b_level1_only = false; + } + + status = zxdh_get_pble(pble_rsrc_com, palloc, iwmr->page_cnt, + b_level1_only); + if (status) { + pr_info("%s %d get pble failed status:%d\n", __func__, __LINE__, status); + return status; + } + + iwpbl->pbl_allocated = true; + level = palloc->level; + pinfo = (level == PBLE_LEVEL_1) ? &palloc->level1 : + palloc->level2.leaf; + pbl = pinfo->addr; + pinfo->pble_copy = pble_rsrc_com->pble_copy; + } else { + pbl = iwmr->pgaddrmem; + } + + zxdh_copy_user_pgaddrs(iwmr, pbl, &pinfo, level, use_pbles, pble_type); + + if (use_pbles) + iwmr->pgaddrmem[0] = *pbl; + + return 0; +} + +/** + * zxdh_handle_q_mem - handle memory for qp and cq + * @iwdev: zrdma device + * @req: information for q memory management + * @iwpbl: pble struct + * @use_pbles: flag to use pble + */ +static int zxdh_handle_q_mem(struct zxdh_device *iwdev, + struct zxdh_mem_reg_req *req, + struct zxdh_pbl *iwpbl, bool use_pbles) +{ + struct zxdh_pble_alloc *palloc = &iwpbl->pble_alloc; + struct zxdh_mr *iwmr = iwpbl->iwmr; + struct zxdh_qp_mr *qpmr = &iwpbl->qp_mr; + struct zxdh_cq_mr *cqmr = &iwpbl->cq_mr; + struct zxdh_srq_mr *srqmr = &iwpbl->srq_mr; + struct zxdh_hmc_pble *hmc_p; + u64 *arr = iwmr->pgaddrmem; + u32 pg_size, total; + int err = 0; + bool ret = true; + + pg_size = iwmr->page_size; + err = zxdh_setup_pbles(iwdev->rf, iwmr, use_pbles, + PBLE_QUEUE); // queue mr + if (err) + return err; + + if (use_pbles && palloc->level != PBLE_LEVEL_1) { + zxdh_free_pble(iwdev->rf->pble_rsrc, palloc); + iwpbl->pbl_allocated = false; + return -ENOMEM; + } + + if (use_pbles) + arr = palloc->level1.addr; + + switch (iwmr->type) { + case ZXDH_MEMREG_TYPE_QP: + total = req->sq_pages + req->rq_pages; + hmc_p = &qpmr->sq_pbl; + qpmr->shadow = (dma_addr_t)arr[total]; + if (use_pbles) { + ret = zxdh_check_mem_contiguous(arr, req->sq_pages, + pg_size); + if (ret) + ret = zxdh_check_mem_contiguous( + &arr[req->sq_pages], req->rq_pages, + pg_size); + } + + if (!ret) { + hmc_p->idx = palloc->level1.idx; + hmc_p = &qpmr->rq_pbl; + hmc_p->idx = palloc->level1.idx + req->sq_pages; + } else { + hmc_p->addr = arr[0]; + hmc_p = &qpmr->rq_pbl; + hmc_p->addr = arr[req->sq_pages]; + } + break; + case ZXDH_MEMREG_TYPE_CQ: + hmc_p = &cqmr->cq_pbl; + + if (!cqmr->split) + cqmr->shadow = (dma_addr_t)arr[req->cq_pages]; + + if (use_pbles) + ret = zxdh_check_mem_contiguous(arr, req->cq_pages, + pg_size); + + if (!ret) + hmc_p->idx = palloc->level1.idx; + else + hmc_p->addr = arr[0]; + break; + case ZXDH_MEMREG_TYPE_SRQ: + total = req->srq_pages + req->srq_list_pages; + hmc_p = &srqmr->srq_pbl; + srqmr->db_addr = (dma_addr_t)arr[total]; + + if (use_pbles) { + ret = zxdh_check_mem_contiguous(arr, req->srq_pages, + pg_size); + if (ret) + ret = zxdh_check_mem_contiguous( + &arr[req->srq_pages], + req->srq_list_pages, pg_size); + } + + if (!ret) { + hmc_p->idx = palloc->level1.idx; + hmc_p = &srqmr->srq_list_pbl; + hmc_p->idx = palloc->level1.idx + req->srq_pages; + } else { + hmc_p->addr = arr[0]; + hmc_p = &srqmr->srq_list_pbl; + hmc_p->addr = arr[req->srq_pages]; + } + break; + default: + pr_err("VERBS: MR type error\n"); + err = -EINVAL; + } + + if (use_pbles && ret) { + zxdh_free_pble(iwdev->rf->pble_rsrc, palloc); + iwpbl->pbl_allocated = false; + } + + return err; +} + +/** + * zxdh_hw_alloc_mw - create the hw memory window + * @iwdev: zrdma device + * @iwmr: pointer to memory window info + */ +int zxdh_hw_alloc_mw(struct zxdh_device *iwdev, struct zxdh_mr *iwmr) +{ + struct zxdh_mw_alloc_info *info; + struct zxdh_pd *iwpd = to_iwpd(iwmr->ibmr.pd); + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + info = &cqp_info->in.u.mw_alloc.info; + memset(info, 0, sizeof(*info)); + if (iwmr->ibmw.type == IB_MW_TYPE_1) + info->mw_wide = true; + + info->page_size = PAGE_SIZE; + info->mw_stag_index = iwmr->stag >> ZXDH_CQPSQ_STAG_IDX_S; + info->pd_id = iwpd->sc_pd.pd_id; + info->remote_access = true; + cqp_info->cqp_cmd = ZXDH_OP_MW_ALLOC; + cqp_info->post_sq = 1; + cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev; + cqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(iwdev->rf, cqp_request); + zxdh_put_cqp_request(&iwdev->rf->cqp, cqp_request); + + return status; +} + +/** + * zxdh_dealloc_mw - Dealloc memory window + * @ibmw: memory window structure. + */ +static int zxdh_dealloc_mw(struct ib_mw *ibmw) +{ + struct ib_pd *ibpd = ibmw->pd; + struct zxdh_pd *iwpd = to_iwpd(ibpd); + struct zxdh_mr *iwmr = to_iwmr((struct ib_mr *)ibmw); + struct zxdh_device *iwdev = to_iwdev(ibmw->device); + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_dealloc_stag_info *info; + + cqp_request = zxdh_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + info = &cqp_info->in.u.dealloc_stag.info; + memset(info, 0, sizeof(*info)); + info->pd_id = iwpd->sc_pd.pd_id; + info->stag_idx = RS_64_1(ibmw->rkey, ZXDH_CQPSQ_STAG_IDX_S); + info->mr = false; + cqp_info->cqp_cmd = ZXDH_OP_DEALLOC_STAG; + cqp_info->post_sq = 1; + cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev; + cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request; + zxdh_handle_cqp_op(iwdev->rf, cqp_request); + zxdh_put_cqp_request(&iwdev->rf->cqp, cqp_request); + zxdh_free_stag(iwdev, iwmr->stag); + + return 0; +} + +/** + * zxdh_hw_alloc_stag - cqp command to allocate stag + * @iwdev: zrdma device + * @iwmr: zrdma mr pointer + */ +int zxdh_hw_alloc_stag(struct zxdh_device *iwdev, struct zxdh_mr *iwmr) +{ + struct zxdh_allocate_stag_info *info; + struct zxdh_pd *iwpd = to_iwpd(iwmr->ibmr.pd); + int status; + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + + cqp_request = zxdh_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + info = &cqp_info->in.u.alloc_stag.info; + memset(info, 0, sizeof(*info)); + info->page_size = PAGE_SIZE; + info->stag_idx = iwmr->stag >> ZXDH_CQPSQ_STAG_IDX_S; + info->pd_id = iwpd->sc_pd.pd_id; + info->total_len = iwmr->len; + info->remote_access = true; + cqp_info->cqp_cmd = ZXDH_OP_ALLOC_STAG; + cqp_info->post_sq = 1; + cqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev; + cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(iwdev->rf, cqp_request); + zxdh_put_cqp_request(&iwdev->rf->cqp, cqp_request); + if (!status) + iwmr->is_hwreg = 1; + + return status; +} + +/** + * zxdh_set_page - populate pbl list for fmr + * @ibmr: ib mem to access iwarp mr pointer + * @addr: page dma address fro pbl list + */ +static int zxdh_set_page(struct ib_mr *ibmr, u64 addr) +{ + struct zxdh_mr *iwmr = to_iwmr(ibmr); + struct zxdh_pbl *iwpbl = &iwmr->iwpbl; + struct zxdh_pble_alloc *palloc = &iwpbl->pble_alloc; + u64 *pbl; + + if (unlikely(iwmr->npages == iwmr->page_cnt)) + return -ENOMEM; + + pbl = palloc->level1.addr; + pbl[iwmr->npages++] = addr; + + return 0; +} + +/** + * zxdh_map_mr_sg - map of sg list for fmr + * @ibmr: ib mem to access iwarp mr pointer + * @sg: scatter gather list + * @sg_nents: number of sg pages + * @sg_offset: scatter gather list for fmr + */ +static int zxdh_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, + int sg_nents, unsigned int *sg_offset) +{ + struct zxdh_mr *iwmr = to_iwmr(ibmr); + struct zxdh_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc; + int ret = 0; + + iwmr->npages = 0; + + ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, zxdh_set_page); + + if (iwmr->npages > 1) { + zxdh_cqp_config_pble_table_cmd(iwmr->sc_dev, &(palloc->level1), + iwmr->npages << 3, PBLE_MR); + } + + return ret; +} + +/** + * zxdh_hwreg_mr - send cqp command for memory registration + * @iwdev: zrdma device + * @iwmr: zrdma mr pointer + * @access: access for MR + */ +int zxdh_hwreg_mr(struct zxdh_device *iwdev, struct zxdh_mr *iwmr, u16 access) +{ + struct zxdh_pbl *iwpbl = &iwmr->iwpbl; + struct zxdh_reg_ns_stag_info *stag_info; + struct zxdh_pd *iwpd = to_iwpd(iwmr->ibmr.pd); + struct zxdh_pble_alloc *palloc = &iwpbl->pble_alloc; + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + int ret; + + cqp_request = zxdh_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + stag_info = &cqp_info->in.u.mr_reg_non_shared.info; + memset(stag_info, 0, sizeof(*stag_info)); + stag_info->va = iwpbl->user_base; + stag_info->stag_idx = iwmr->stag >> ZXDH_CQPSQ_STAG_IDX_S; + stag_info->stag_key = (u8)iwmr->stag; + stag_info->total_len = iwmr->len; + stag_info->access_rights = zxdh_get_mr_access(access); + stag_info->pd_id = iwpd->sc_pd.pd_id; + if (stag_info->access_rights & ZXDH_ACCESS_FLAGS_ZERO_BASED) + stag_info->addr_type = ZXDH_ADDR_TYPE_ZERO_BASED; + else + stag_info->addr_type = ZXDH_ADDR_TYPE_VA_BASED; + stag_info->page_size = iwmr->page_size; + + if (iwpbl->pbl_allocated) { + if (palloc->level == PBLE_LEVEL_1) { + stag_info->first_pm_pbl_index = palloc->level1.idx; + stag_info->chunk_size = 1; + } else { + stag_info->first_pm_pbl_index = palloc->level2.root.idx; + stag_info->chunk_size = 3; + } + } else { + stag_info->reg_addr_pa = iwmr->pgaddrmem[0]; + } + + cqp_info->cqp_cmd = ZXDH_OP_MR_REG_NON_SHARED; + cqp_info->post_sq = 1; + cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev; + cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request; + ret = zxdh_handle_cqp_op(iwdev->rf, cqp_request); + zxdh_put_cqp_request(&iwdev->rf->cqp, cqp_request); + + if (!ret) + iwmr->is_hwreg = 1; + + return ret; +} + +/** + * zxdh_reg_user_mr - Register a user memory region + * @pd: ptr of pd + * @start: virtual start address + * @len: length of mr + * @virt: virtual address + * @access: access of mr + * @udata: user data + */ +static struct ib_mr *zxdh_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, + u64 virt, int access, + struct ib_udata *udata) +{ + struct zxdh_device *iwdev = to_iwdev(pd->device); + struct zxdh_ucontext *ucontext; + struct zxdh_pble_alloc *palloc; + struct zxdh_pbl *iwpbl; + struct zxdh_mr *iwmr; + struct ib_umem *region; + struct zxdh_mem_reg_req req = {}; + struct zxdh_reg_mr_resp resp = {}; + u32 total = 0, stag = 0; + u8 shadow_pgcnt = 1; + bool use_pbles = false; + unsigned long flags; + int err = -EINVAL; + int ret; + + if (!len || len > iwdev->rf->sc_dev.hw_attrs.max_mr_size) { + pr_err("%s[%d]: error size, start=0x%llx, len=0x%llx, access=0x%x, max_mr_size=0x%llx\n", __func__, __LINE__, start, len, access, iwdev->rf->sc_dev.hw_attrs.max_mr_size); + return ERR_PTR(-EINVAL); + } + + region = ib_umem_get(pd->device, start, len, access); + + if (IS_ERR(region)) { + pr_err("%s[%d] VERBS: Failed to create ib_umem region, errno=%ld, start=0x%llx, len=0x%llx, access=0x%x\n", __func__, __LINE__, PTR_ERR(region), start, len, access); + return (struct ib_mr *)region; + } + + if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) { + pr_err("%s[%d]: copy from udata failed, sizeof(req)=0x%lx, inlen=%#zx\n", __func__, __LINE__, sizeof(req), udata->inlen); + ib_umem_release(region); + return ERR_PTR(-EFAULT); + } + + iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); + if (!iwmr) { + pr_err("%s[%d]: kzalloc failed, size=0x%lx\n", __func__, __LINE__, sizeof(*iwmr)); + ib_umem_release(region); + return ERR_PTR(-ENOMEM); + } + + iwpbl = &iwmr->iwpbl; + iwpbl->iwmr = iwmr; + iwmr->region = region; + iwmr->ibmr.pd = pd; + iwmr->ibmr.device = pd->device; + iwmr->ibmr.iova = virt; + iwmr->ibmr.length = len; + iwmr->page_size = PAGE_SIZE; + +#ifdef SET_BEST_PAGE_SZ_V2 + if (req.reg_type == ZXDH_MEMREG_TYPE_MEM) { + iwmr->page_size = ib_umem_find_best_pgsz( + region, SZ_4K | SZ_2M | SZ_1G, virt); + if (unlikely(!iwmr->page_size)) { + pr_err("%s[%d]: find best pgsz failed, page_size=0x%llx\n", __func__, __LINE__, iwmr->page_size); + kfree(iwmr); + ib_umem_release(region); + return ERR_PTR(-EOPNOTSUPP); + } + } +#endif + iwmr->len = region->length; + iwpbl->user_base = virt; + palloc = &iwpbl->pble_alloc; + iwmr->type = req.reg_type; +#ifdef rdma_umem_for_each_dma_block +#ifdef ib_umem_num_dma_blocks + iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size); +#else + iwmr->page_cnt = + zxdh_ib_umem_num_dma_blocks(region, iwmr->page_size, virt); +#endif +#else + iwmr->page_cnt = + zxdh_ib_umem_num_dma_blocks(region, iwmr->page_size, virt); +#endif + + switch (req.reg_type) { + case ZXDH_MEMREG_TYPE_QP: + total = req.sq_pages + req.rq_pages + shadow_pgcnt; + if (total > iwmr->page_cnt) { + err = -EINVAL; + pr_err("%s[%d]: page_cnt compare failed, reg_type=%d, total=0x%x, page_cnt=0x%x\n", __func__, __LINE__, req.reg_type, total, iwmr->page_cnt); + goto error; + } + total = req.sq_pages + req.rq_pages; + use_pbles = (total > 2); + err = zxdh_handle_q_mem(iwdev, &req, iwpbl, use_pbles); + if (err) { + pr_err("%s[%d]: handle_q_mem failed, err=%d, reg_type=%d, use_pbles=0x%x\n", __func__, __LINE__, err, req.reg_type, total > 2); + goto error; + } + + ucontext = kc_rdma_udata_to_drv_context(pd, udata); + spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); + list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list); + iwpbl->on_list = true; + spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); + break; + case ZXDH_MEMREG_TYPE_CQ: + if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & + ZXDH_FEATURE_CQ_RESIZE) + shadow_pgcnt = 0; + total = req.cq_pages + shadow_pgcnt; + if (total > iwmr->page_cnt) { + err = -EINVAL; + pr_err("%s[%d]: page_cnt compare failed, reg_type=%d, total=0x%x, page_cnt=0x%x\n", __func__, __LINE__, req.reg_type, total, iwmr->page_cnt); + goto error; + } + + use_pbles = (req.cq_pages > 1); + err = zxdh_handle_q_mem(iwdev, &req, iwpbl, use_pbles); + if (err) { + pr_err("%s[%d]: handle_q_mem failed, err=%d, reg_type=%d, use_pbles=0x%x\n", __func__, __LINE__, err, req.reg_type, total > 2); + goto error; + } + + ucontext = kc_rdma_udata_to_drv_context(pd, udata); + spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); + list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list); + iwpbl->on_list = true; + spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); + break; + case ZXDH_MEMREG_TYPE_SRQ: + total = req.srq_pages + req.srq_list_pages + shadow_pgcnt; + if (total > iwmr->page_cnt) { + err = -EINVAL; + pr_err("%s[%d]: page_cnt compare failed, reg_type=%d, total=0x%x, page_cnt=0x%x\n", __func__, __LINE__, req.reg_type, total, iwmr->page_cnt); + goto error; + } + + total = req.srq_pages + req.srq_list_pages; + use_pbles = (total > 2); + err = zxdh_handle_q_mem(iwdev, &req, iwpbl, use_pbles); + if (err) { + pr_err("%s[%d]: handle_q_mem failed, err=%d, reg_type=%d, use_pbles=0x%x\n", __func__, __LINE__, err, req.reg_type, total > 2); + goto error; + } + + ucontext = kc_rdma_udata_to_drv_context(pd, udata); + spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags); + list_add_tail(&iwpbl->list, &ucontext->srq_reg_mem_list); + iwpbl->on_list = true; + spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags); + break; + case ZXDH_MEMREG_TYPE_MEM: + use_pbles = (iwmr->page_cnt != 1); + + err = zxdh_setup_pbles(iwdev->rf, iwmr, use_pbles, + PBLE_MR); // mr + if (err) { + pr_err("%s[%d]: setup_pbles failed, err=%d, reg_type=%d, use_pbles=0x%x\n", __func__, __LINE__, err, req.reg_type, total > 2); + goto error; + } + + if (use_pbles) { + ret = zxdh_check_mr_contiguous(palloc, iwmr->page_size); + if (ret) { + zxdh_free_pble(iwdev->rf->pble_mr_rsrc, palloc); + iwpbl->pbl_allocated = false; + } + } + + stag = zxdh_create_stag(iwdev); + if (!stag) { + err = -ENOMEM; + pr_err("%s[%d]: create_stag failed, err=%d, reg_type=%d, stag=%d\n", __func__, __LINE__, err, req.reg_type, stag); + goto error; + } + + iwmr->stag = stag; + iwmr->ibmr.rkey = stag; + iwmr->ibmr.lkey = stag; + iwmr->access = access; + err = zxdh_hwreg_mr(iwdev, iwmr, access); + if (err) { + pr_err("%s[%d]: hwreg_mr failed, err=%d, reg_type=%d, access=0x%x\n", __func__, __LINE__, err, req.reg_type, access); + zxdh_free_stag(iwdev, stag); + goto error; + } + + if (iwpbl->pbl_allocated == true) { + if (iwpbl->pble_alloc.level == PBLE_LEVEL_1) { + resp.mr_pa_low = iwpbl->pble_alloc.level1.idx; + resp.mr_pa_hig = 0; + resp.leaf_pbl_size = 1; + } else { + resp.mr_pa_low = + iwpbl->pble_alloc.level2.root.idx; + resp.mr_pa_hig = 0; + resp.leaf_pbl_size = 3; + } + + } else { + resp.mr_pa_low = (u32)(iwmr->pgaddrmem[0] & 0xffffffff); + resp.mr_pa_hig = (u32)((iwmr->pgaddrmem[0] & + 0xffffffff00000000) >> + 32); + resp.leaf_pbl_size = 0; + } + + if (iwmr->page_size == 0x40000000) + resp.host_page_size = ZXDH_PAGE_SIZE_1G; + else if (iwmr->page_size == 0x200000) + resp.host_page_size = ZXDH_PAGE_SIZE_2M; + else if (iwmr->page_size == 0x1000) + resp.host_page_size = ZXDH_PAGE_SIZE_4K; + + if (ib_copy_to_udata(udata, &resp, + min(sizeof(resp), udata->outlen))) { + pr_err("%s[%d]: copy to udata failed, sizeof(resp)=0x%lx, outlen=%#zx\n", __func__, __LINE__, sizeof(resp), udata->outlen); + goto error; + } + + break; + default: + pr_err("%s[%d]: error reg_type=%d\n", __func__, __LINE__, req.reg_type); + goto error; + } + + iwmr->type = req.reg_type; + + return &iwmr->ibmr; + +error: + pr_err("%s process failed: err=%d, reg_type=%d\n", __func__, err, req.reg_type); + if (req.reg_type == ZXDH_MEMREG_TYPE_MEM) { + if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated) + zxdh_free_pble(iwdev->rf->pble_mr_rsrc, palloc); + } else { + if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated) + zxdh_free_pble(iwdev->rf->pble_rsrc, palloc); + } + ib_umem_release(region); + kfree(iwmr); + + return ERR_PTR(err); +} + +int zxdh_hwdereg_mr(struct ib_mr *ib_mr) +{ + struct zxdh_device *iwdev = to_iwdev(ib_mr->device); + struct zxdh_mr *iwmr = to_iwmr(ib_mr); + struct zxdh_pd *iwpd = to_iwpd(ib_mr->pd); + struct zxdh_dealloc_stag_info *info; + struct zxdh_pbl *iwpbl = &iwmr->iwpbl; + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + int status; + + /* Skip HW MR de-register when it is already de-registered + * during an MR re-reregister and the re-registration fails + */ + if (!iwmr->is_hwreg) + return 0; + + cqp_request = zxdh_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + info = &cqp_info->in.u.dealloc_stag.info; + memset(info, 0, sizeof(*info)); + info->pd_id = iwpd->sc_pd.pd_id; + info->stag_idx = RS_64_1(ib_mr->rkey, ZXDH_CQPSQ_STAG_IDX_S); + info->mr = true; + if (iwpbl->pbl_allocated) + info->dealloc_pbl = true; + + cqp_info->cqp_cmd = ZXDH_OP_DEALLOC_STAG; + cqp_info->post_sq = 1; + cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev; + cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(iwdev->rf, cqp_request); + zxdh_put_cqp_request(&iwdev->rf->cqp, cqp_request); + + if (!status) + iwmr->is_hwreg = 0; + + return status; +} + +/* + * zxdh_rereg_mr_trans - Re-register a user MR for a change translation. + * @iwmr: ptr of iwmr + * @start: virtual start address + * @len: length of mr + * @virt: virtual address + * + * Re-register a user memory region when a change translation is requested. + * Re-register a new region while reusing the stag from the original registration. + */ +struct ib_mr *zxdh_rereg_mr_trans(struct zxdh_mr *iwmr, u64 start, u64 len, + u64 virt, struct ib_udata *udata) +{ + struct zxdh_device *iwdev = to_iwdev(iwmr->ibmr.device); + struct zxdh_pbl *iwpbl = &iwmr->iwpbl; + struct zxdh_pble_alloc *palloc = &iwpbl->pble_alloc; + struct ib_pd *pd = iwmr->ibmr.pd; + struct ib_umem *region; + bool use_pbles; + int err; + + region = ib_umem_get(pd->device, start, len, iwmr->access); + + if (IS_ERR(region)) { + pr_err("VERBS: Failed to create ib_umem region\n"); + return (struct ib_mr *)region; + } + + iwmr->region = region; + iwmr->ibmr.iova = virt; + iwmr->ibmr.pd = pd; + iwmr->page_size = PAGE_SIZE; + +#ifdef SET_BEST_PAGE_SZ_V2 + iwmr->page_size = + ib_umem_find_best_pgsz(region, SZ_4K | SZ_2M | SZ_1G, virt); + if (unlikely(!iwmr->page_size)) { + ib_umem_release(region); + return ERR_PTR(-EOPNOTSUPP); + } +#endif + iwmr->len = region->length; + iwpbl->user_base = virt; +#ifdef rdma_umem_for_each_dma_block +#ifdef ib_umem_num_dma_blocks + iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size); +#else + iwmr->page_cnt = + zxdh_ib_umem_num_dma_blocks(region, iwmr->page_size, virt); +#endif +#else + iwmr->page_cnt = + zxdh_ib_umem_num_dma_blocks(region, iwmr->page_size, virt); +#endif + + use_pbles = (iwmr->page_cnt != 1); + + err = zxdh_setup_pbles(iwdev->rf, iwmr, use_pbles, PBLE_MR); // mr + if (err) + goto error; + + if (use_pbles) { + err = zxdh_check_mr_contiguous(palloc, iwmr->page_size); + if (err) { + zxdh_free_pble(iwdev->rf->pble_mr_rsrc, palloc); + iwpbl->pbl_allocated = false; + } + } + + err = zxdh_hwreg_mr(iwdev, iwmr, iwmr->access); + if (err) + goto error; + + return &iwmr->ibmr; + +error: + if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated) { + zxdh_free_pble(iwdev->rf->pble_mr_rsrc, palloc); + iwpbl->pbl_allocated = false; + } + ib_umem_release(region); + iwmr->region = NULL; + + return ERR_PTR(err); +} + +/** + * zxdh_reg_phys_mr - register kernel physical memory + * @pd: ibpd pointer + * @addr: physical address of memory to register + * @size: size of memory to register + * @access: Access rights + * @iova_start: start of virtual address for physical buffers + */ +struct ib_mr *zxdh_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access, + u64 *iova_start) +{ + struct zxdh_device *iwdev = to_iwdev(pd->device); + struct zxdh_pbl *iwpbl; + struct zxdh_mr *iwmr; + u32 stag; + int ret; + + iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); + if (!iwmr) + return ERR_PTR(-ENOMEM); + + iwmr->ibmr.pd = pd; + iwmr->ibmr.device = pd->device; + iwpbl = &iwmr->iwpbl; + iwpbl->iwmr = iwmr; + iwmr->type = ZXDH_MEMREG_TYPE_MEM; + iwpbl->user_base = *iova_start; + stag = zxdh_create_stag(iwdev); + if (!stag) { + ret = -ENOMEM; + goto err; + } + + iwmr->stag = stag; + iwmr->ibmr.iova = *iova_start; + iwmr->ibmr.rkey = stag; + iwmr->ibmr.lkey = stag; + iwmr->page_cnt = 1; + iwmr->pgaddrmem[0] = addr; + iwmr->len = size; + iwmr->page_size = SZ_4K; + ret = zxdh_hwreg_mr(iwdev, iwmr, access); + if (ret) { + zxdh_free_stag(iwdev, stag); + goto err; + } + + return &iwmr->ibmr; + +err: + kfree(iwmr); + + return ERR_PTR(ret); +} + +/** + * zxdh_get_dma_mr - register physical mem + * @pd: ptr of pd + * @acc: access for memory + */ +static struct ib_mr *zxdh_get_dma_mr(struct ib_pd *pd, int acc) +{ + u64 kva = 0; + + return zxdh_reg_phys_mr(pd, 0, 0, acc, &kva); +} + +/** + * zxdh_del_memlist - Deleting pbl list entries for CQ/QP + * @iwmr: iwmr for IB's user page addresses + * @ucontext: ptr to user context + */ +void zxdh_del_memlist(struct zxdh_mr *iwmr, struct zxdh_ucontext *ucontext) +{ + struct zxdh_pbl *iwpbl = &iwmr->iwpbl; + unsigned long flags; + + switch (iwmr->type) { + case ZXDH_MEMREG_TYPE_CQ: + spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); + if (iwpbl->on_list) { + iwpbl->on_list = false; + list_del(&iwpbl->list); + } + spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); + break; + case ZXDH_MEMREG_TYPE_QP: + spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); + if (iwpbl->on_list) { + iwpbl->on_list = false; + list_del(&iwpbl->list); + } + spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); + break; + case ZXDH_MEMREG_TYPE_SRQ: + spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags); + if (iwpbl->on_list) { + iwpbl->on_list = false; + list_del(&iwpbl->list); + } + spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags); + break; + default: + break; + } +} + +/** + * zxdh_copy_sg_list - copy sg list for qp + * @sg_list: copied into sg_list + * @sgl: copy from sgl + * @num_sges: count of sg entries + */ +static void zxdh_copy_sg_list(struct zxdh_sge *sg_list, struct ib_sge *sgl, + int num_sges) +{ + unsigned int i; + + for (i = 0; i < num_sges; i++) { + sg_list[i].tag_off = sgl[i].addr; + sg_list[i].len = sgl[i].length; + sg_list[i].stag = sgl[i].lkey; + } +} + +/** + * zxdh_get_inline_data - get inline_multi_sge data + * @inline_data: uint8_t* + * @ib_wr: work request ptr + * @len: sge total length + */ +static int zxdh_get_inline_data(uint8_t *inline_data, + const struct ib_send_wr *ib_wr, __u32 *len) +{ + int num = 0; + int offset = 0; + + while (num < ib_wr->num_sge) { + *len += ib_wr->sg_list[num].length; + if (*len > ZXDH_MAX_INLINE_DATA_SIZE) { + pr_err("err:inline bytes over max inline length\n"); + return -EINVAL; + } + memcpy(inline_data + offset, + (void *)(uintptr_t)ib_wr->sg_list[num].addr, + ib_wr->sg_list[num].length); + offset += ib_wr->sg_list[num].length; + num++; + } + return 0; +} + +/** + * zxdh_post_send - kernel application wr + * @ibqp: qp ptr for wr + * @ib_wr: work request ptr + * @bad_wr: return of bad wr if err + */ +static int zxdh_post_send(struct ib_qp *ibqp, + kc_typeq_ib_wr struct ib_send_wr *ib_wr, + kc_typeq_ib_wr struct ib_send_wr **bad_wr) +{ + struct zxdh_qp *iwqp; + struct zxdh_qp_uk *ukqp; + struct zxdh_sc_dev *dev; + struct zxdh_post_sq_info info; + int err = 0; + unsigned long flags; + struct zxdh_ah *ah; + + iwqp = to_iwqp(ibqp); + ukqp = &iwqp->sc_qp.qp_uk; + dev = &iwqp->iwdev->rf->sc_dev; + + if (iwqp->iwarp_state != ZXDH_QPS_RTS && !iwqp->flush_issued) { + *bad_wr = ib_wr; + pr_info("err:post send at state:%d\n", iwqp->iwarp_state); + return -EINVAL; + } + if (dev->hw_attrs.self_health == true) { + return -EINVAL; + } + + spin_lock_irqsave(&iwqp->lock, flags); + while (ib_wr) { + memset(&info, 0, sizeof(info)); + info.wr_id = (ib_wr->wr_id); + if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all) + info.signaled = true; + if (ib_wr->send_flags & IB_SEND_FENCE) + info.read_fence = true; + switch (ib_wr->opcode) { + case IB_WR_SEND_WITH_IMM: + if (ukqp->qp_caps & ZXDH_SEND_WITH_IMM) { + info.imm_data_valid = true; + info.imm_data = ntohl(ib_wr->ex.imm_data); + } else { + err = -EINVAL; + break; + } + fallthrough; + case IB_WR_SEND: + case IB_WR_SEND_WITH_INV: + if (ib_wr->send_flags & IB_SEND_SOLICITED) + info.solicited = 1; + + if (ib_wr->opcode == IB_WR_SEND) { + if (iwqp->ibqp.qp_type == IB_QPT_UD || + iwqp->ibqp.qp_type == IB_QPT_GSI) + info.op_type = ZXDH_OP_TYPE_UD_SEND; + else + info.op_type = ZXDH_OP_TYPE_SEND; + } else if (ib_wr->opcode == IB_WR_SEND_WITH_IMM) { + if (iwqp->ibqp.qp_type == IB_QPT_UD || + iwqp->ibqp.qp_type == IB_QPT_GSI) + info.op_type = + ZXDH_OP_TYPE_UD_SEND_WITH_IMM; + else + info.op_type = + ZXDH_OP_TYPE_SEND_WITH_IMM; + } else { + info.op_type = ZXDH_OP_TYPE_SEND_INV; + info.stag_to_inv = ib_wr->ex.invalidate_rkey; + } + + if ((ib_wr->send_flags & IB_SEND_INLINE) && + (ib_wr->num_sge != 0)) { + err = zxdh_get_inline_data( + iwqp->inline_data, ib_wr, + &info.op.inline_send.len); + if (err) { + pr_err("err: get_inline_data failed\n"); + spin_unlock_irqrestore(&iwqp->lock, + flags); + return -EINVAL; + } + info.op.inline_send.data = iwqp->inline_data; + + if (iwqp->ibqp.qp_type == IB_QPT_UD || + iwqp->ibqp.qp_type == IB_QPT_GSI) { + ah = to_iwah(ud_wr(ib_wr)->ah); + info.op.inline_send.ah_id = + ah->sc_ah.ah_info.ah_idx; + info.op.inline_send.qkey = + ud_wr(ib_wr)->remote_qkey; + info.op.inline_send.dest_qp = + ud_wr(ib_wr)->remote_qpn; + err = zxdh_uk_ud_inline_send( + ukqp, &info, false); + } else { + err = zxdh_uk_rc_inline_send( + ukqp, &info, false); + } + } else { + info.op.send.num_sges = ib_wr->num_sge; + info.op.send.sg_list = + (struct zxdh_sge *)ib_wr->sg_list; + if (iwqp->ibqp.qp_type == IB_QPT_UD || + iwqp->ibqp.qp_type == IB_QPT_GSI) { + ah = to_iwah(ud_wr(ib_wr)->ah); + info.op.send.ah_id = + ah->sc_ah.ah_info.ah_idx; + info.op.send.qkey = + ud_wr(ib_wr)->remote_qkey; + info.op.send.dest_qp = + ud_wr(ib_wr)->remote_qpn; + err = zxdh_uk_ud_send(ukqp, &info, + false); + } else { + err = zxdh_uk_rc_send(ukqp, &info, + false); + } + } + break; + case IB_WR_RDMA_WRITE_WITH_IMM: + if (ukqp->qp_caps & ZXDH_WRITE_WITH_IMM) { + info.imm_data_valid = true; + info.imm_data = ntohl(ib_wr->ex.imm_data); + } else { + err = -EINVAL; + break; + } + fallthrough; + case IB_WR_RDMA_WRITE: + if (ib_wr->send_flags & IB_SEND_SOLICITED) + info.solicited = 1; + + if (ib_wr->opcode == IB_WR_RDMA_WRITE) + info.op_type = ZXDH_OP_TYPE_WRITE; + else + info.op_type = ZXDH_OP_TYPE_WRITE_WITH_IMM; + + if ((ib_wr->send_flags & IB_SEND_INLINE) && + (ib_wr->num_sge != 0)) { + err = zxdh_get_inline_data( + iwqp->inline_data, ib_wr, + &info.op.inline_rdma_write.len); + if (err) { + pr_err("err: get_inline_data failed\n"); + spin_unlock_irqrestore(&iwqp->lock, + flags); + return -EINVAL; + } + info.op.inline_rdma_write.data = + iwqp->inline_data; + + info.op.inline_rdma_write.rem_addr.tag_off = + rdma_wr(ib_wr)->remote_addr; + info.op.inline_rdma_write.rem_addr.stag = + rdma_wr(ib_wr)->rkey; + err = zxdh_uk_inline_rdma_write(ukqp, &info, + false); + } else { + info.op.rdma_write.lo_sg_list = + (void *)ib_wr->sg_list; + info.op.rdma_write.num_lo_sges = ib_wr->num_sge; + info.op.rdma_write.rem_addr.tag_off = + rdma_wr(ib_wr)->remote_addr; + info.op.rdma_write.rem_addr.stag = + rdma_wr(ib_wr)->rkey; + err = zxdh_uk_rdma_write(ukqp, &info, false); + } + break; + case IB_WR_RDMA_READ: + if (ib_wr->num_sge > + dev->hw_attrs.uk_attrs.max_hw_read_sges) { + err = -EINVAL; + break; + } + info.op_type = ZXDH_OP_TYPE_READ; + info.op.rdma_read.rem_addr.tag_off = + rdma_wr(ib_wr)->remote_addr; + info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey; + info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list; + info.op.rdma_read.num_lo_sges = ib_wr->num_sge; + err = zxdh_uk_rdma_read(ukqp, &info, false); + break; + case IB_WR_LOCAL_INV: + info.op_type = ZXDH_OP_TYPE_LOCAL_INV; + info.op.inv_local_stag.target_stag = + ib_wr->ex.invalidate_rkey; + err = zxdh_uk_stag_local_invalidate(ukqp, &info, true); + break; + case IB_WR_REG_MR: { + struct zxdh_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr); + struct zxdh_pble_alloc *palloc = + &iwmr->iwpbl.pble_alloc; + struct zxdh_fast_reg_stag_info stag_info = {}; + + stag_info.signaled = info.signaled; + stag_info.read_fence = info.read_fence; + stag_info.access_rights = + zxdh_get_mr_access(reg_wr(ib_wr)->access); + stag_info.stag_key = reg_wr(ib_wr)->key & 0xff; + stag_info.stag_idx = reg_wr(ib_wr)->key >> 8; + stag_info.page_size = reg_wr(ib_wr)->mr->page_size; + stag_info.wr_id = ib_wr->wr_id; + stag_info.addr_type = ZXDH_ADDR_TYPE_VA_BASED; + stag_info.va = (void *)(uintptr_t)iwmr->ibmr.iova; + stag_info.total_len = iwmr->ibmr.length; + stag_info.reg_addr_pa = *palloc->level1.addr; + stag_info.first_pm_pbl_index = palloc->level1.idx; + stag_info.local_fence = ib_wr->send_flags & + IB_SEND_FENCE; + if (iwmr->npages > ZXDH_MIN_PAGES_PER_FMR) + stag_info.chunk_size = 1; + err = zxdh_sc_mr_fast_register(&iwqp->sc_qp, &stag_info, + true); + break; + } + default: + err = -EINVAL; + pr_err("VERBS: upost_send bad opcode = 0x%x\n", + ib_wr->opcode); + break; + } + + if (err) + break; + ib_wr = ib_wr->next; + } + + if (!iwqp->flush_issued && iwqp->iwarp_state == ZXDH_QPS_RTS) + zxdh_uk_qp_post_wr(ukqp); + else if (iwqp->flush_issued) + mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, + ZXDH_FLUSH_DELAY_MS); + spin_unlock_irqrestore(&iwqp->lock, flags); + if (err) + *bad_wr = ib_wr; + + return err; +} + +/** + * zxdh_post_recv - post receive wr for kernel application + * @ibqp: ib qp pointer + * @ib_wr: work request for receive + * @bad_wr: bad wr caused an error + */ +static int zxdh_post_recv(struct ib_qp *ibqp, + kc_typeq_ib_wr struct ib_recv_wr *ib_wr, + kc_typeq_ib_wr struct ib_recv_wr **bad_wr) +{ + struct zxdh_qp *iwqp = to_iwqp(ibqp); + struct zxdh_qp_uk *ukqp = &iwqp->sc_qp.qp_uk; + struct zxdh_post_rq_info post_recv = {}; + struct zxdh_sge *sg_list = iwqp->sg_list; + unsigned long flags; + int err = 0; + + if (iwqp->iwarp_state == ZXDH_QPS_RESET || iwqp->is_srq) { + *bad_wr = ib_wr; + return -EINVAL; + } + if (iwqp->sc_qp.dev->hw_attrs.self_health == true) { + return -EINVAL; + } + + spin_lock_irqsave(&iwqp->lock, flags); + + while (ib_wr) { + if (ib_wr->num_sge > ukqp->max_rq_frag_cnt) { + err = -EINVAL; + goto out; + } + post_recv.num_sges = ib_wr->num_sge; + post_recv.wr_id = ib_wr->wr_id; + zxdh_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge); + post_recv.sg_list = sg_list; + err = zxdh_uk_post_receive(ukqp, &post_recv); + if (err) { + pr_err("VERBS: post_recv err %d\n", err); + goto out; + } + + ib_wr = ib_wr->next; + } + +out: + if (iwqp->flush_issued) + mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, + ZXDH_FLUSH_DELAY_MS); + else + zxdh_uk_qp_set_shadow_area(ukqp); + spin_unlock_irqrestore(&iwqp->lock, flags); + if (err) + *bad_wr = ib_wr; + + return err; +} + +/** + * zxdh_flush_err_to_ib_wc_status - return change flush error code to IB status + * @opcode: iwarp flush code + */ +static enum ib_wc_status +zxdh_flush_err_to_ib_wc_status(enum zxdh_flush_opcode opcode) +{ + switch (opcode) { + case FLUSH_PROT_ERR: + return IB_WC_LOC_PROT_ERR; + case FLUSH_REM_ACCESS_ERR: + return IB_WC_REM_ACCESS_ERR; + case FLUSH_LOC_QP_OP_ERR: + return IB_WC_LOC_QP_OP_ERR; + case FLUSH_REM_OP_ERR: + return IB_WC_REM_OP_ERR; + case FLUSH_LOC_LEN_ERR: + return IB_WC_LOC_LEN_ERR; + case FLUSH_GENERAL_ERR: + return IB_WC_WR_FLUSH_ERR; + case FLUSH_MW_BIND_ERR: + return IB_WC_MW_BIND_ERR; + case FLUSH_REM_INV_REQ_ERR: + return IB_WC_REM_INV_REQ_ERR; + case FLUSH_RETRY_EXC_ERR: + return IB_WC_RETRY_EXC_ERR; + case FLUSH_FATAL_ERR: + default: + return IB_WC_FATAL_ERR; + } +} + +/** + * zxdh_process_cqe - process cqe info + * @entry: processed cqe + * @cq_poll_info: cqe info + */ +static void zxdh_process_cqe(struct ib_wc *entry, + struct zxdh_cq_poll_info *cq_poll_info) +{ + struct zxdh_qp *iwqp; + struct zxdh_sc_qp *qp; + + entry->wc_flags = 0; + entry->pkey_index = 0; + entry->wr_id = cq_poll_info->wr_id; + + qp = cq_poll_info->qp_handle; + iwqp = qp->qp_uk.back_qp; + entry->qp = qp->qp_uk.back_qp; + + if (cq_poll_info->error) { + entry->status = (cq_poll_info->comp_status == + ZXDH_COMPL_STATUS_FLUSHED) ? + zxdh_flush_err_to_ib_wc_status( + cq_poll_info->minor_err) : + IB_WC_GENERAL_ERR; + + entry->vendor_err = cq_poll_info->major_err << 16 | + cq_poll_info->minor_err; + } else { + entry->status = IB_WC_SUCCESS; + if (cq_poll_info->imm_valid) { + entry->ex.imm_data = htonl(cq_poll_info->imm_data); + entry->wc_flags |= IB_WC_WITH_IMM; + } + if (cq_poll_info->ud_smac_valid) { + ether_addr_copy(entry->smac, cq_poll_info->ud_smac); + entry->wc_flags |= IB_WC_WITH_SMAC; + } + + if (cq_poll_info->ud_vlan_valid && + iwqp->iwdev->rf->vlan_parse_en) { + u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK; + + entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT; + if (vlan) { + entry->vlan_id = vlan; + entry->wc_flags |= IB_WC_WITH_VLAN; + } + } else { + entry->sl = 0; + } + } + + switch (cq_poll_info->op_type) { + case ZXDH_OP_TYPE_SEND: + case ZXDH_OP_TYPE_SEND_WITH_IMM: + case ZXDH_OP_TYPE_SEND_INV: + case ZXDH_OP_TYPE_UD_SEND: + case ZXDH_OP_TYPE_UD_SEND_WITH_IMM: + entry->opcode = IB_WC_SEND; + break; + case ZXDH_OP_TYPE_WRITE: + case ZXDH_OP_TYPE_WRITE_WITH_IMM: + entry->opcode = IB_WC_RDMA_WRITE; + break; + case ZXDH_OP_TYPE_READ: + entry->opcode = IB_WC_RDMA_READ; + break; + case ZXDH_OP_TYPE_FAST_REG_MR: + entry->opcode = IB_WC_REG_MR; + break; + case ZXDH_OP_TYPE_LOCAL_INV: + entry->opcode = IB_WC_LOCAL_INV; + break; + case ZXDH_OP_TYPE_REC_IMM: + case ZXDH_OP_TYPE_REC: + entry->opcode = cq_poll_info->op_type == ZXDH_OP_TYPE_REC_IMM ? + IB_WC_RECV_RDMA_WITH_IMM : + IB_WC_RECV; + if (qp->qp_uk.qp_type != ZXDH_QP_TYPE_ROCE_UD && + cq_poll_info->stag_invalid_set) { + entry->ex.invalidate_rkey = cq_poll_info->inv_stag; + entry->wc_flags |= IB_WC_WITH_INVALIDATE; + } + break; + default: + pr_info("warnning: opcode = %d in CQE\n", cq_poll_info->op_type); + entry->status = IB_WC_GENERAL_ERR; + return; + } + + if (qp->qp_uk.qp_type == ZXDH_QP_TYPE_ROCE_UD) { + entry->src_qp = cq_poll_info->ud_src_qpn; + entry->slid = 0; + entry->wc_flags |= (IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE); + entry->network_hdr_type = cq_poll_info->ipv4 ? + RDMA_NETWORK_IPV4 : + RDMA_NETWORK_IPV6; + } else { + entry->src_qp = cq_poll_info->qp_id; + } + + entry->byte_len = cq_poll_info->bytes_xfered; +} + +/** + * zxdh_poll_one - poll one entry of the CQ + * @ukcq: ukcq to poll + * @cur_cqe: current CQE info to be filled in + * @entry: ibv_wc object to be filled for non-extended CQ or NULL for extended CQ + * + * Returns the internal zrdma device error code or 0 on success + */ +static inline int zxdh_poll_one(struct zxdh_cq_uk *ukcq, + struct zxdh_cq_poll_info *cur_cqe, + struct ib_wc *entry) +{ + int ret = zxdh_uk_cq_poll_cmpl(ukcq, cur_cqe); + + if (ret) + return ret; + + zxdh_process_cqe(entry, cur_cqe); + + return 0; +} + +/** + * __zxdh_poll_cq - poll cq for completion (kernel apps) + * @iwcq: cq to poll + * @num_entries: number of entries to poll + * @entry: wr of a completed entry + */ +static int __zxdh_poll_cq(struct zxdh_cq *iwcq, int num_entries, + struct ib_wc *entry) +{ + struct list_head *tmp_node, *list_node; + struct zxdh_cq_buf *last_buf = NULL; + struct zxdh_cq_poll_info *cur_cqe = &iwcq->cur_cqe; + struct zxdh_cq_buf *cq_buf; + int ret; + struct zxdh_device *iwdev; + struct zxdh_cq_uk *ukcq; + bool cq_new_cqe = false; + int resized_bufs = 0; + int npolled = 0; + + iwdev = to_iwdev(iwcq->ibcq.device); + ukcq = &iwcq->sc_cq.cq_uk; + + /* go through the list of previously resized CQ buffers */ + list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) { + cq_buf = container_of(list_node, struct zxdh_cq_buf, list); + while (npolled < num_entries) { + ret = zxdh_poll_one(&cq_buf->cq_uk, cur_cqe, + entry + npolled); + if (!ret) { + ++npolled; + cq_new_cqe = true; + continue; + } + if (ret == -ENOENT) + break; + /* QP using the CQ is destroyed. Skip reporting this CQE */ + if (ret == -EFAULT) { + cq_new_cqe = true; + continue; + } + goto error; + } + + /* save the resized CQ buffer which received the last cqe */ + if (cq_new_cqe) + last_buf = cq_buf; + cq_new_cqe = false; + } + + /* check the current CQ for new cqes */ + while (npolled < num_entries) { + ret = zxdh_poll_one(ukcq, cur_cqe, entry + npolled); + if (ret == -ENOENT) { + ret = zxdh_generated_cmpls(iwcq, cur_cqe); + if (!ret) + zxdh_process_cqe(entry + npolled, cur_cqe); + } + if (!ret) { + ++npolled; + cq_new_cqe = true; + continue; + } + + if (ret == -ENOENT) + break; + /* QP using the CQ is destroyed. Skip reporting this CQE */ + if (ret == -EFAULT) { + cq_new_cqe = true; + continue; + } + goto error; + } + + if (cq_new_cqe) + /* all previous CQ resizes are complete */ + resized_bufs = zxdh_process_resize_list(iwcq, iwdev, NULL); + else if (last_buf) + /* only CQ resizes up to the last_buf are complete */ + resized_bufs = zxdh_process_resize_list(iwcq, iwdev, last_buf); + if (resized_bufs) + /* report to the HW the number of complete CQ resizes */ + zxdh_uk_cq_set_resized_cnt(ukcq, resized_bufs); + + return npolled; +error: + pr_err("VERBS: %s: Error polling CQ, zxdh_err: %d\n", __func__, ret); + + return ret; +} + +/** + * zxdh_poll_cq - poll cq for completion (kernel apps) + * @ibcq: cq to poll + * @num_entries: number of entries to poll + * @entry: wr of a completed entry + */ +static int zxdh_poll_cq(struct ib_cq *ibcq, int num_entries, + struct ib_wc *entry) +{ + struct zxdh_cq *iwcq; + unsigned long flags; + int ret; + + iwcq = to_iwcq(ibcq); + if ((iwcq == NULL) || (iwcq->sc_cq.cq_uk.valid_cq == false)) { + return 0; + } + spin_lock_irqsave(&iwcq->lock, flags); + ret = __zxdh_poll_cq(iwcq, num_entries, entry); + spin_unlock_irqrestore(&iwcq->lock, flags); + + return ret; +} + +/** + * zxdh_req_notify_cq - arm cq kernel application + * @ibcq: cq to arm + * @notify_flags: notofication flags + */ +static int zxdh_req_notify_cq(struct ib_cq *ibcq, + enum ib_cq_notify_flags notify_flags) +{ + struct zxdh_cq *iwcq; + struct zxdh_cq_uk *ukcq; + unsigned long flags; + enum zxdh_cmpl_notify cq_notify = ZXDH_CQ_COMPL_EVENT; + bool promo_event = false; + int ret = 0; + + iwcq = to_iwcq(ibcq); + ukcq = &iwcq->sc_cq.cq_uk; + + spin_lock_irqsave(&iwcq->lock, flags); + if (ukcq->valid_cq == false) { + spin_unlock_irqrestore(&iwcq->lock, flags); + return 0; + } + if (notify_flags == IB_CQ_SOLICITED) { + cq_notify = ZXDH_CQ_COMPL_SOLICITED; + } else { + if (iwcq->last_notify == ZXDH_CQ_COMPL_SOLICITED) + promo_event = true; + } + + if (!iwcq->armed || promo_event) { + iwcq->armed = true; + iwcq->last_notify = cq_notify; + zxdh_uk_cq_request_notification(ukcq, cq_notify); + } + + if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && !zxdh_cq_empty(iwcq)) + ret = 1; + spin_unlock_irqrestore(&iwcq->lock, flags); + + return ret; +} + +#ifdef ALLOC_HW_STATS_STRUCT_V2 +const struct rdma_stat_desc zxdh_hw_stat_descs[] = { + /*32-bit */ + [HW_STAT_DUPLICATE_REQUEST].name = "duplicate_request", + [HW_STAT_NP_CNP_SENT].name = "np_cnp_sent", + [HW_STAT_NP_ECN_MARKED_ROCE_PACKETS].name = + "np_ecn_marked_roce_packets", + [HW_STAT_OUT_OF_SEQUENCE].name = "out_of_sequence", + [HW_STAT_PACKET_SEQ_ERR].name = "packet_seq_err", + [HW_STAT_REQ_CQE_ERROR].name = "req_cqe_error", + [HW_STAT_REQ_REMOTE_ACCESS_ERRORS].name = "req_remote_access_errors", + [HW_STAT_REQ_REMOTE_INVALID_REQUEST].name = + "req_remote_invalid_request", + [HW_STAT_REQ_REMOTE_OPERATION_ERRORS].name = + "req_remote_operation_errors", + [HW_STAT_REQ_LOCAL_LENGTH_ERROR].name = "req_local_length_error", + [HW_STAT_RESP_CQE_ERROR].name = "resp_cqe_error", + [HW_STAT_RESP_REMOTE_ACCESS_ERRORS].name = "resp_remote_access_errors", + [HW_STAT_RESP_REMOTE_INVALID_REQUEST].name = + "resp_remote_invalid_request", + [HW_STAT_RESP_REMOTE_OPERATION_ERRORS].name = + "resp_remote_operation_errors", + [HW_STAT_RESP_RNR_NAK].name = "resp_rnr_nak", + [HW_STAT_RNR_NAK_RETRY_ERR].name = "rnr_nak_retry_err", + [HW_STAT_RP_CNP_HANDLED].name = "rp_cnp_handled", + [HW_STAT_RX_READ_REQUESTS].name = "rx_read_requests", + [HW_STAT_RX_WRITE_REQUESTS].name = "rx_write_requests", + [HW_STAT_RX_ICRC_ENCAPSULATED].name = "rx_icrc_encapsulated", + [HW_STAT_ROCE_SLOW_RESTART_CNPS].name = "roce_slow_restart_cnps", + [HW_STAT_RDMA_TX_PKTS].name = "rdma_tx_pkts", + [HW_STAT_RDMA_TX_BYTES].name = "rdma_tx_bytes", + [HW_STAT_RDMA_RX_PKTS].name = "rdma_rx_pkts", + [HW_STAT_RDMA_RX_BYTES].name = "rdma_rx_bytes", +}; + +#endif /* ALLOC_HW_STATS_STRUCT_V2 */ + +/** + * zxdh_query_ah - Query address handle + * @ibah: pointer to address handle + * @ah_attr: address handle attributes + */ +static int zxdh_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) +{ + struct zxdh_ah *ah = to_iwah(ibah); + + memset(ah_attr, 0, sizeof(*ah_attr)); + if (ah->av.attrs.ah_flags & IB_AH_GRH) { + ah_attr->ah_flags = IB_AH_GRH; + ah_attr->grh.flow_label = ah->sc_ah.ah_info.flow_label; + ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos; + ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl; + ah_attr->grh.sgid_index = ah->sgid_index; + ah_attr->grh.sgid_index = ah->sgid_index; + memcpy(&ah_attr->grh.dgid, &ah->dgid, + sizeof(ah_attr->grh.dgid)); + } + + return 0; +} + +static __be64 zxdh_mac_to_guid(struct net_device *ndev) +{ + const unsigned char *mac = ndev->dev_addr; + __be64 guid; + unsigned char *dst = (unsigned char *)&guid; + + dst[0] = mac[0] ^ 2; + dst[1] = mac[1]; + dst[2] = mac[2]; + dst[3] = 0xff; + dst[4] = 0xfe; + dst[5] = mac[3]; + dst[6] = mac[4]; + dst[7] = mac[5]; + + return guid; +} + +static ssize_t hca_type_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct zxdh_device *iwdev = + zxdh_rdma_device_to_drv_device(device, ibdev); + +#if (KERNEL_VERSION(5, 11, 0) <= LINUX_VERSION_CODE) + return sysfs_emit(buf, "%d\n", iwdev->rf->pcidev->device); +#else + return sprintf(buf, "%d\n", iwdev->rf->pcidev->device); +#endif +} + +static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct zxdh_device *iwdev = + zxdh_rdma_device_to_drv_device(device, ibdev); + +#if (KERNEL_VERSION(5, 11, 0) <= LINUX_VERSION_CODE) + return sysfs_emit(buf, "%x\n", iwdev->rf->pcidev->revision); +#else + return sprintf(buf, "%x\n", iwdev->rf->pcidev->revision); +#endif +} + +#ifdef ZXDH_UAPI_DEF +static DEVICE_ATTR_RO(hca_type); +static DEVICE_ATTR_RO(hw_rev); + +static struct attribute *zxdh_class_attributes[] = { + &dev_attr_hw_rev.attr, + &dev_attr_hca_type.attr, + NULL, +}; + +static const struct attribute_group zxdh_attr_group = { + .attrs = zxdh_class_attributes, +}; + +static inline void +zxdh_set_device_sysfs_group(struct ib_device *dev, + const struct attribute_group *group) +{ + dev->groups[1] = group; +} +#else +static DEVICE_ATTR(hw_rev, S_IRUGO, hw_rev_show, NULL); +static DEVICE_ATTR(hca_type, S_IRUGO, hca_type_show, NULL); + +static struct device_attribute *zxdh_class_attributes[] = { + &dev_attr_hw_rev, + &dev_attr_hca_type, +}; + +static int zxdh_class_attr_init(struct zxdh_device *iwdev) +{ + int err; + int i; + + for (i = 0; i < ARRAY_SIZE(zxdh_class_attributes); i++) { + err = device_create_file(&iwdev->ibdev.dev, zxdh_class_attributes[i]); + if (err) { + while (i > 0) { + i--; + device_remove_file(&iwdev->ibdev.dev, + zxdh_class_attributes[i]); + } + return err; + } + } + return 0; +} +#endif + +#ifdef IB_GET_NETDEV_OP_NOT_DEPRECATED +static struct net_device *zxdh_get_netdev(struct ib_device *ibdev, u8 port_num) +{ + struct zxdh_device *iwdev = to_iwdev(ibdev); + + if (iwdev->netdev) { + dev_hold(iwdev->netdev); + return iwdev->netdev; + } + + return NULL; +} + +#endif +#ifdef HAS_IB_SET_DEVICE_OP +static struct ib_device_ops zxdh_roce_dev_ops; +static const struct ib_device_ops zxdh_dev_ops = { +#if KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE || defined(RHEL_8_2) || \ + defined(RHEL_8_3) || defined(RHEL_8_4) || defined(RHEL_8_5) || defined(KYLIN_V10_4) || \ + ((KERNEL_VERSION(4, 19, 90) == LINUX_VERSION_CODE) && (defined(__OFED_24_10__))) + .owner = THIS_MODULE, + .driver_id = RDMA_DRIVER_ZXDH, + .uverbs_abi_ver = ZXDH_ABI_VER, +#endif +#if defined(ALLOC_HW_STATS_V3) + .alloc_hw_port_stats = zxdh_alloc_hw_port_stats, +#else + .alloc_hw_stats = zxdh_alloc_hw_stats, +#endif + .alloc_mr = zxdh_alloc_mr, + .alloc_mw = zxdh_alloc_mw, + .alloc_pd = zxdh_alloc_pd, + .alloc_ucontext = zxdh_alloc_ucontext, + .create_cq = zxdh_create_cq, + .create_qp = zxdh_create_qp, + .create_srq = zxdh_create_srq, +#ifdef IB_DEALLOC_DRIVER_SUPPORT + .dealloc_driver = zxdh_ib_dealloc_device, +#endif + .dealloc_mw = zxdh_dealloc_mw, + .dealloc_pd = zxdh_dealloc_pd, + .dealloc_ucontext = zxdh_dealloc_ucontext, + .dereg_mr = zxdh_dereg_mr, + .destroy_cq = zxdh_destroy_cq, + .destroy_qp = zxdh_destroy_qp, + .destroy_srq = zxdh_destroy_srq, + .disassociate_ucontext = zxdh_disassociate_ucontext, + .get_dev_fw_str = zxdh_get_dev_fw_str, + .get_dma_mr = zxdh_get_dma_mr, + .get_hw_stats = zxdh_get_hw_stats, +#ifdef IB_GET_NETDEV_OP_NOT_DEPRECATED + .get_netdev = zxdh_get_netdev, +#endif + .map_mr_sg = zxdh_map_mr_sg, + .mmap = zxdh_mmap, +#ifdef RDMA_MMAP_DB_SUPPORT + .mmap_free = zxdh_mmap_free, +#endif + .poll_cq = zxdh_poll_cq, + .post_recv = zxdh_post_recv, + .post_send = zxdh_post_send, + .post_srq_recv = zxdh_post_srq_recv, + .process_mad = zxdh_process_mad, + .query_device = zxdh_query_device, + .query_port = zxdh_query_port, + .modify_port = zxdh_modify_port, + .query_qp = zxdh_query_qp, + .query_srq = zxdh_query_srq, + .reg_user_mr = zxdh_reg_user_mr, + .rereg_user_mr = zxdh_rereg_user_mr, + .req_notify_cq = zxdh_req_notify_cq, + .resize_cq = zxdh_resize_cq, + .modify_srq = zxdh_modify_srq, + .modify_cq = zxdh_modify_cq, +#if KERNEL_VERSION(5, 14, 0) <= LINUX_VERSION_CODE || defined OFED_USE_DEVICE_GROUP + .device_group = &zxdh_attr_group, +#endif +#ifdef INIT_RDMA_OBJ_SIZE + INIT_RDMA_OBJ_SIZE(ib_pd, zxdh_pd, ibpd), + INIT_RDMA_OBJ_SIZE(ib_ucontext, zxdh_ucontext, ibucontext), + INIT_RDMA_OBJ_SIZE(ib_srq, zxdh_srq, ibsrq), +#if KERNEL_VERSION(5, 2, 0) <= LINUX_VERSION_CODE || defined(RHEL_8_2) || \ + defined(RHEL_8_3) || defined(RHEL_8_4) || defined(RHEL_8_5) || defined(KYLIN_V10_4) || \ + ((KERNEL_VERSION(4, 19, 90) == LINUX_VERSION_CODE) && (defined(__OFED_24_10__))) + INIT_RDMA_OBJ_SIZE(ib_ah, zxdh_ah, ibah), +#endif /* 5.2.0 */ +#if KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE || defined(RHEL_8_2) || \ + defined(RHEL_8_3) || defined(RHEL_8_4) || defined(RHEL_8_5) || defined(KYLIN_V10_4) || \ + ((KERNEL_VERSION(4, 19, 90) == LINUX_VERSION_CODE) && (defined(__OFED_24_10__))) + INIT_RDMA_OBJ_SIZE(ib_cq, zxdh_cq, ibcq), +#endif /* 5.3.0 */ +#if KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE || defined(KYLIN_V10_4) || \ + ((KERNEL_VERSION(4, 19, 90) == LINUX_VERSION_CODE) && (defined(__OFED_24_10__))) + INIT_RDMA_OBJ_SIZE(ib_mw, zxdh_mr, ibmw), +#endif /* 5.10.0 */ +#ifdef GLOBAL_QP_MEM + INIT_RDMA_OBJ_SIZE(ib_qp, zxdh_qp, ibqp), +#endif /* GLOBAL_QP_MEM */ +#endif /* INIT_RDMA_OBJ_SIZE */ +}; + +#endif /* HAS_IB_SET_DEVICE_OP */ +static void zxdh_set_device_ops(struct ib_device *ibdev) +{ +#ifndef HAS_IB_SET_DEVICE_OP + struct ib_device *dev_ops = ibdev; + +#if defined(RHEL_7_7) || defined(RHEL_7_8) || defined(RHEL_7_9) || \ + defined(RHEL_8_2) || defined(RHEL_8_3) || defined(RHEL_8_4) || \ + defined(RHEL_8_5) || defined(KYLIN_V10_4) + dev_ops->uverbs_abi_ver = ZXDH_ABI_VER; + dev_ops->driver_id = RDMA_DRIVER_ZXDH; + dev_ops->owner = THIS_MODULE; +#endif + dev_ops->alloc_hw_stats = zxdh_alloc_hw_stats; + dev_ops->alloc_mr = zxdh_alloc_mr; + dev_ops->alloc_mw = zxdh_alloc_mw; + dev_ops->alloc_pd = zxdh_alloc_pd; + dev_ops->alloc_ucontext = zxdh_alloc_ucontext; + dev_ops->create_cq = zxdh_create_cq; + dev_ops->create_qp = zxdh_create_qp; + dev_ops->create_srq = zxdh_create_srq; +#ifdef IB_DEALLOC_DRIVER_SUPPORT + dev_ops->dealloc_driver = zxdh_ib_dealloc_device, +#endif + dev_ops->dealloc_mw = zxdh_dealloc_mw; + dev_ops->dealloc_pd = zxdh_dealloc_pd; + dev_ops->dealloc_ucontext = zxdh_dealloc_ucontext; + dev_ops->dereg_mr = zxdh_dereg_mr; + dev_ops->destroy_cq = zxdh_destroy_cq; + dev_ops->destroy_qp = zxdh_destroy_qp; + dev_ops->destroy_srq = zxdh_destroy_srq; + dev_ops->disassociate_ucontext = zxdh_disassociate_ucontext; + dev_ops->get_dev_fw_str = zxdh_get_dev_fw_str; + dev_ops->get_dma_mr = zxdh_get_dma_mr; + dev_ops->get_hw_stats = zxdh_get_hw_stats; +#ifndef HAS_IB_SET_DEVICE_OP + dev_ops->get_netdev = zxdh_get_netdev; +#endif + dev_ops->map_mr_sg = zxdh_map_mr_sg; + dev_ops->mmap = zxdh_mmap; +#ifdef RDMA_MMAP_DB_SUPPORT + dev_ops->mmap_free = zxdh_mmap_free; +#endif + dev_ops->poll_cq = zxdh_poll_cq; + dev_ops->post_recv = zxdh_post_recv; + dev_ops->post_send = zxdh_post_send; + dev_ops->post_srq_recv = zxdh_post_srq_recv; + dev_ops->process_mad = zxdh_process_mad; + dev_ops->query_device = zxdh_query_device; + dev_ops->query_port = zxdh_query_port; + dev_ops->modify_port = zxdh_modify_port; + dev_ops->query_qp = zxdh_query_qp; + dev_ops->query_srq = zxdh_query_srq; + dev_ops->reg_user_mr = zxdh_reg_user_mr; + dev_ops->rereg_user_mr = zxdh_rereg_user_mr; + dev_ops->req_notify_cq = zxdh_req_notify_cq; + dev_ops->resize_cq = zxdh_resize_cq; + dev_ops->modify_cq = zxdh_modify_cq; +#else + ib_set_device_ops(ibdev, &zxdh_dev_ops); +#endif +#ifdef ZXDH_UAPI_DEF + zxdh_set_device_sysfs_group(ibdev, &zxdh_attr_group); +#endif +} + +static void zxdh_set_device_roce_ops(struct ib_device *ibdev) +{ +#ifdef HAS_IB_SET_DEVICE_OP + struct ib_device_ops *dev_ops = &zxdh_roce_dev_ops; +#else + struct ib_device *dev_ops = ibdev; +#endif + dev_ops->create_ah = zxdh_create_ah; +#if KERNEL_VERSION(5, 11, 0) <= LINUX_VERSION_CODE || defined(RHEL_8_5) || \ + defined(CREATE_USER_AH) || defined(KYLIN_V10_4) + dev_ops->create_user_ah = zxdh_create_ah; +#endif + dev_ops->destroy_ah = zxdh_destroy_ah; + dev_ops->get_link_layer = zxdh_get_link_layer; + dev_ops->get_port_immutable = zxdh_roce_port_immutable; + dev_ops->modify_qp = zxdh_modify_qp_roce; + dev_ops->modify_srq = zxdh_modify_srq; + dev_ops->query_ah = zxdh_query_ah; + dev_ops->query_gid = zxdh_query_gid_roce; + dev_ops->query_pkey = zxdh_query_pkey; + kc_set_ibdev_add_del_gid(ibdev); +#ifdef HAS_IB_SET_DEVICE_OP + ib_set_device_ops(ibdev, &zxdh_roce_dev_ops); +#endif +} + +/** + * zxdh_init_roce_device - initialization of roce rdma device + * @iwdev: zrdma device + */ +static void zxdh_init_roce_device(struct zxdh_device *iwdev) +{ + iwdev->ibdev.node_type = RDMA_NODE_IB_CA; + iwdev->ibdev.node_guid = zxdh_mac_to_guid(iwdev->netdev); + zxdh_set_device_roce_ops(&iwdev->ibdev); +} + +#ifdef ZXDH_UAPI_DEF +static const struct uapi_definition zxdh_ib_defs[] = { + UAPI_DEF_CHAIN(zxdh_ib_dev_defs), + {} +}; +#endif + +/** + * zxdh_init_rdma_device - initialization of rdma device + * @iwdev: zrdma device + */ +static int zxdh_init_rdma_device(struct zxdh_device *iwdev) +{ + struct pci_dev *pcidev = iwdev->rf->pcidev; + +#if KERNEL_VERSION(5, 3, 0) > LINUX_VERSION_CODE && !defined(RHEL_8_2) && \ + !defined(RHEL_8_3) && !defined(RHEL_8_4) && !defined(RHEL_8_5) && !defined(KYLIN_V10_4) && \ + !((KERNEL_VERSION(4, 19, 90) == LINUX_VERSION_CODE) && (defined(__OFED_24_10__))) + iwdev->ibdev.owner = THIS_MODULE; + iwdev->ibdev.uverbs_abi_ver = ZXDH_ABI_VER; +#endif + + if (iwdev->roce_mode) + zxdh_init_roce_device(iwdev); + else + return -EPFNOSUPPORT; + + iwdev->ibdev.phys_port_cnt = 1; + iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count; + iwdev->ibdev.dev.parent = &pcidev->dev; + set_ibdev_dma_device(iwdev->ibdev, &pcidev->dev); + zxdh_set_device_ops(&iwdev->ibdev); + +#ifdef IB_DEV_OPS_FILL_ENTRY + zxdh_set_restrack_ops(&iwdev->ibdev); +#endif /* >= 5.9.0 */ +#ifdef ZXDH_UAPI_DEF + iwdev->ibdev.driver_def = zxdh_ib_defs; +#else + zxdh_get_dri_specs(iwdev); +#endif + return 0; +} + +#ifndef ZXDH_UAPI_DEF +int zxdh_get_dri_specs(struct zxdh_device *iwdev) +{ + const struct uverbs_object_tree_def **trees = iwdev->driver_trees; + + trees[0] = zxdh_ib_get_devx_tree(); + iwdev->ibdev.driver_specs = trees; + + return 0; +} +#endif + +/** + * zxdh_port_ibevent - indicate port event + * @iwdev: zrdma device + */ +void zxdh_port_ibevent(struct zxdh_device *iwdev) +{ + struct ib_event event; + + event.device = &iwdev->ibdev; + event.element.port_num = 1; + event.event = iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : + IB_EVENT_PORT_ERR; + ib_dispatch_event(&event); +} + +/** + * zxdh_ib_unregister_device - unregister rdma device from IB + * core + * @iwdev: zrdma device + */ +void zxdh_ib_unregister_device(struct zxdh_device *iwdev) +{ + iwdev->iw_status = 0; + zxdh_port_ibevent(iwdev); + ib_unregister_device(&iwdev->ibdev); +#if KERNEL_VERSION(5, 2, 0) > LINUX_VERSION_CODE && !defined(RHEL_8_2) && \ + !defined(RHEL_8_3) && !defined(RHEL_8_4) && !defined(RHEL_8_5) && !defined(KYLIN_V10_4) && \ + !((KERNEL_VERSION(4, 19, 90) == LINUX_VERSION_CODE) && (defined(__OFED_24_10__))) + kfree(iwdev->ibdev.iwcm); + iwdev->ibdev.iwcm = NULL; +#endif +} + +/** + * zxdh_ib_register_device - register zrdma device to IB core + * @iwdev: zrdma device + */ +int zxdh_ib_register_device(struct zxdh_device *iwdev) +{ + int ret; + + ret = zxdh_init_rdma_device(iwdev); + if (ret) + return ret; + + kc_set_driver_id(iwdev->ibdev); +#ifdef NETDEV_TO_IBDEV_SUPPORT + ret = ib_device_set_netdev(&iwdev->ibdev, iwdev->netdev, 1); + if (ret) + goto error; + pr_info("ib register device, update dpp mac tbl\n"); + zxdh_update_dpp_mac_tbl(iwdev, iwdev->rf->cdev); +#endif +#if KERNEL_VERSION(4, 20, 0) > LINUX_VERSION_CODE +#ifdef CONFIG_SUSE_KERNEL +#if SLE_VERSION(15, 0, 0) >= SLE_VERSION_CODE + strlcpy(iwdev->ibdev.name, "zrdma%d", IB_DEVICE_NAME_MAX); +#endif /* SLE_VERSION_CODE */ +#else + strlcpy(iwdev->ibdev.name, "zrdma%d", IB_DEVICE_NAME_MAX); +#endif /* CONFIG_SUSE_KERNEL */ +#endif /* LINUX_VERSION_CODE */ +#if KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE + dma_set_max_seg_size(iwdev->rf->hw.device, UINT_MAX); +#endif + ret = kc_ib_register_device(&iwdev->ibdev, "zrdma%d", + iwdev->rf->hw.device); + if (ret) + goto error; + + iwdev->iw_status = 1; + zxdh_port_ibevent(iwdev); + +#ifndef ZXDH_UAPI_DEF + ret = zxdh_class_attr_init(iwdev); + if (ret) + goto error; +#endif + return 0; + +error: +#if KERNEL_VERSION(5, 2, 0) > LINUX_VERSION_CODE && !defined(RHEL_8_2) && \ + !defined(RHEL_8_3) && !defined(RHEL_8_4) && !defined(RHEL_8_5) && !defined(KYLIN_V10_4) && \ + !((KERNEL_VERSION(4, 19, 90) == LINUX_VERSION_CODE) && (defined(__OFED_24_10__))) + kfree(iwdev->ibdev.iwcm); + iwdev->ibdev.iwcm = NULL; +#endif + if (ret) + pr_err("VERBS: Register RDMA device fail\n"); + + return ret; +} + +#ifdef IB_DEALLOC_DRIVER_SUPPORT +/** + * zxdh_ib_dealloc_device + * @ibdev: ib device + * + * callback from ibdev dealloc_driver to deallocate resources + * unber zrdma device + */ +void zxdh_ib_dealloc_device(struct ib_device *ibdev) +{ + struct zxdh_device *iwdev = to_iwdev(ibdev); + + zxdh_rt_deinit_hw(iwdev); + zxdh_ctrl_deinit_hw(iwdev->rf); + zxdh_del_handler(iwdev->hdl); + kfree(iwdev->hdl); + kfree(iwdev->rf); +} +#endif diff --git a/drivers/infiniband/hw/zrdma/verbs.h b/drivers/infiniband/hw/zrdma/verbs.h new file mode 100644 index 000000000000..4891c5d3139a --- /dev/null +++ b/drivers/infiniband/hw/zrdma/verbs.h @@ -0,0 +1,382 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_VERBS_H +#define ZXDH_VERBS_H + +/* Forward declarations */ +struct zxdh_rdma_to_eth_ip_para; + +#define ZXDH_MAX_SAVED_PHY_PGADDR 4 +#define ZXDH_FLUSH_DELAY_MS 200 + +#define ZXDH_MAX_CQ_COUNT 0xFFFF +#define ZXDH_MAX_CQ_PERIOD 0x7FF + +#define US_TO_NS(us) ((us)*1000) +#define NS_TO_US(ns) ((ns) / 1000) + +#define ZXDH_PKEY_TBL_SZ 1 +#define ZXDH_DEFAULT_PKEY 0xFFFF +#define ZXDH_MAX_AH 0x7FFFFFFF +#define ZXDH_MAX_AH_LIST 0x20000 + +#define ZRDMA_UDP_SPORT_BASE (50000) +#define ZRDMA_UDP_SPORT_NUM (15500) + +#define ZXDH_MAILBOX_ADDR_BUF_LEN 5 + +#define iwdev_to_idev(iwdev) (&(iwdev)->rf->sc_dev) + +struct zxdh_ucontext { + struct ib_ucontext ibucontext; + struct zxdh_device *iwdev; +#ifdef RDMA_MMAP_DB_SUPPORT + struct rdma_user_mmap_entry *sq_db_mmap_entry; + struct rdma_user_mmap_entry *cq_db_mmap_entry; + struct rdma_user_mmap_entry *srq_db_mmap_entry; +#else + struct zxdh_user_mmap_entry *sq_db_mmap_entry; + struct zxdh_user_mmap_entry *cq_db_mmap_entry; + struct zxdh_user_mmap_entry *srq_db_mmap_entry; + DECLARE_HASHTABLE(mmap_hash_tbl, 6); + spinlock_t mmap_tbl_lock; /* protect mmap hash table entries */ +#endif + struct list_head cq_reg_mem_list; + spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */ + struct list_head qp_reg_mem_list; + spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */ + struct list_head srq_reg_mem_list; + spinlock_t srq_reg_mem_list_lock; /* protect QP memory list */ + /* FIXME: Move to kcompat ideally. Used < 4.20.0 for old diassasscoaite flow */ + struct list_head vma_list; + struct mutex vma_list_mutex; /* protect the vma_list */ + int abi_ver; + bool legacy_mode; +}; + +struct zxdh_pd { + struct ib_pd ibpd; + struct zxdh_sc_pd sc_pd; +}; + +struct zxdh_av { + u8 macaddr[16]; + struct rdma_ah_attr attrs; + union { + struct sockaddr saddr; + struct sockaddr_in saddr_in; + struct sockaddr_in6 saddr_in6; + } sgid_addr, dgid_addr; + u8 net_type; +}; + +struct zxdh_ah { + struct ib_ah ibah; + struct zxdh_sc_ah sc_ah; + struct zxdh_pd *pd; + struct zxdh_av av; + u8 sgid_index; + union ib_gid dgid; + struct list_head list; + refcount_t refcnt; + struct zxdh_ah *parent_ah; /* AH from cached list */ +}; + +struct zxdh_hmc_pble { + union { + u32 idx; + dma_addr_t addr; + }; +}; + +struct zxdh_cq_mr { + struct zxdh_hmc_pble cq_pbl; + dma_addr_t shadow; + bool split; +}; + +struct zxdh_qp_mr { + struct zxdh_hmc_pble sq_pbl; + struct zxdh_hmc_pble rq_pbl; + dma_addr_t shadow; + struct page *sq_page; +}; + +struct zxdh_srq_mr { + struct zxdh_hmc_pble srq_pbl; + struct zxdh_hmc_pble srq_list_pbl; + struct page *srq_page; + dma_addr_t db_addr; +}; + +struct zxdh_cq_buf { + struct zxdh_dma_mem kmem_buf; + struct zxdh_cq_uk cq_uk; + struct zxdh_hw *hw; + struct list_head list; + struct work_struct work; +}; + +struct zxdh_pbl { + struct list_head list; + union { + struct zxdh_qp_mr qp_mr; + struct zxdh_cq_mr cq_mr; + struct zxdh_srq_mr srq_mr; + }; + + u8 pbl_allocated : 1; + u8 on_list : 1; + u64 user_base; + struct zxdh_pble_alloc pble_alloc; + struct zxdh_mr *iwmr; +}; + +struct zxdh_mr { + union { + struct ib_mr ibmr; + struct ib_mw ibmw; + }; + struct ib_umem *region; + struct zxdh_sc_dev *sc_dev; + int access; + u8 is_hwreg; + u16 type; + u32 page_cnt; + u64 page_size; + u64 page_msk; + u32 npages; + u32 stag; + u64 len; + u64 pgaddrmem[ZXDH_MAX_SAVED_PHY_PGADDR]; + struct zxdh_pbl iwpbl; +}; + +struct zxdh_cq { + struct ib_cq ibcq; + struct zxdh_sc_cq sc_cq; + u16 cq_head; + u16 cq_size; + u32 cq_num; + bool user_mode; + bool armed; + enum zxdh_cmpl_notify last_notify; + u32 polled_cmpls; + u32 cq_mem_size; + struct zxdh_dma_mem kmem; + struct zxdh_dma_mem kmem_shadow; + struct completion free_cq; + refcount_t refcnt; + spinlock_t lock; /* for poll cq */ + struct zxdh_pbl *iwpbl; + struct zxdh_pbl *iwpbl_shadow; + struct list_head resize_list; + struct zxdh_cq_poll_info cur_cqe; + struct list_head cmpl_generated; +}; + +struct zxdh_cmpl_gen { + struct list_head list; + struct zxdh_cq_poll_info cpi; +}; + +struct mailbox_work { + struct work_struct work; + u64 op_ret_val; + __le64 addrbuf[ZXDH_MAILBOX_ADDR_BUF_LEN]; + struct zxdh_sc_dev *dev; +}; + +struct aeq_qp_work { + struct work_struct work; + struct zxdh_qp *iwqp; +}; + +struct iw_cm_id; + +struct zxdh_qp_kmode { + struct zxdh_dma_mem dma_mem; + struct zxdh_sq_uk_wr_trk_info *sq_wrid_mem; + u64 *rq_wrid_mem; +}; + +struct zxdh_srq_kmode { + struct zxdh_dma_mem dma_mem; + u64 *srq_wrid_mem; +}; + +struct zxdh_qp { + struct ib_qp ibqp; + struct zxdh_sc_qp sc_qp; + struct zxdh_device *iwdev; + struct zxdh_cq *iwscq; + struct zxdh_cq *iwrcq; + struct zxdh_pd *iwpd; + struct zxdh_srq *iwsrq; +#ifdef RDMA_MMAP_DB_SUPPORT + struct rdma_user_mmap_entry *push_wqe_mmap_entry; + struct rdma_user_mmap_entry *push_db_mmap_entry; +#else + struct zxdh_user_mmap_entry *push_wqe_mmap_entry; + struct zxdh_user_mmap_entry *push_db_mmap_entry; +#endif + struct zxdh_qp_host_ctx_info ctx_info; + union { + struct zxdh_iwarp_offload_info iwarp_info; + struct zxdh_roce_offload_info roce_info; + }; + + union { + struct zxdh_tcp_offload_info tcp_info; + struct zxdh_udp_offload_info udp_info; + }; + + struct zxdh_ah roce_ah; + struct list_head teardown_entry; + refcount_t refcnt; + struct iw_cm_id *cm_id; + struct zxdh_cm_node *cm_node; + struct delayed_work dwork_flush; + struct ib_mr *lsmm_mr; + atomic_t hw_mod_qp_pend; + enum ib_qp_state ibqp_state; + u32 qp_mem_size; + u32 last_aeq; + int max_send_wr; + int max_recv_wr; + atomic_t close_timer_started; + spinlock_t lock; /* serialize posting WRs to SQ/RQ */ + struct zxdh_qp_context *iwqp_context; + void *pbl_vbase; + dma_addr_t pbl_pbase; + struct page *page; + u8 active_conn : 1; + u8 user_mode : 1; + u8 hte_added : 1; + u8 flush_issued : 1; + u8 sig_all : 1; + u8 pau_mode : 1; + u8 rsvd : 1; + u8 iwarp_state; + u16 term_sq_flush_code; + u16 term_rq_flush_code; + u8 hw_iwarp_state; + u8 hw_tcp_state; + u8 is_srq; + struct zxdh_qp_kmode kqp; + struct zxdh_dma_mem host_ctx; + struct timer_list terminate_timer; + struct zxdh_pbl *iwpbl; + struct zxdh_sge *sg_list; + struct zxdh_dma_mem ietf_mem; + struct completion free_qp; + wait_queue_head_t waitq; + wait_queue_head_t mod_qp_waitq; + u8 rts_ae_rcvd; + uint8_t inline_data[ZXDH_MAX_INLINE_DATA_SIZE]; +}; + +enum zxdh_mmap_flag { + ZXDH_MMAP_IO_NC, + ZXDH_MMAP_IO_WC, + ZXDH_MMAP_PFN, + ZXDH_MMAP_HMC, +}; + +struct zxdh_user_mmap_entry { +#ifdef RDMA_MMAP_DB_SUPPORT + struct rdma_user_mmap_entry rdma_entry; +#else + struct zxdh_ucontext *ucontext; + struct hlist_node hlist; + u64 pgoff_key; /* Used to compute offset (in bytes) returned to user libc's mmap */ +#endif + u64 bar_offset; + u8 mmap_flag; +}; + +static inline u16 zxdh_fw_major_ver(struct zxdh_sc_dev *dev) +{ + return (u16)FIELD_GET(ZXDH_FW_VER_MAJOR, + dev->feature_info[ZXDH_FEATURE_FW_INFO]); +} + +static inline u16 zxdh_fw_minor_ver(struct zxdh_sc_dev *dev) +{ + return (u16)FIELD_GET(ZXDH_FW_VER_MINOR, + dev->feature_info[ZXDH_FEATURE_FW_INFO]); +} + +/** + * zxdh_mcast_mac_v4 - Get the multicast MAC for an IP address + * @ip_addr: IPv4 address + * @mac: pointer to result MAC address + * + */ +static inline void zxdh_mcast_mac_v4(u32 *ip_addr, u8 *mac) +{ + u8 *ip = (u8 *)ip_addr; + unsigned char mac4[ETH_ALEN] = { 0x01, 0x00, 0x5E, + ip[2] & 0x7F, ip[1], ip[0] }; + + ether_addr_copy(mac, mac4); +} + +/** + * zxdh_mcast_mac_v6 - Get the multicast MAC for an IP address + * @ip_addr: IPv6 address + * @mac: pointer to result MAC address + * + */ +static inline void zxdh_mcast_mac_v6(u32 *ip_addr, u8 *mac) +{ + u8 *ip = (u8 *)ip_addr; + unsigned char mac6[ETH_ALEN] = { + 0x33, 0x33, ip[3], ip[2], ip[1], ip[0] + }; + + ether_addr_copy(mac, mac6); +} + +void *zxdh_zalloc_mapped(struct zxdh_device *dev, dma_addr_t *dma_addr, + size_t size, enum dma_data_direction dir); +void zxdh_free_mapped(struct zxdh_device *dev, void *cpu_addr, + dma_addr_t dma_addr, size_t size, + enum dma_data_direction dir); + +#ifdef RDMA_MMAP_DB_SUPPORT +struct rdma_user_mmap_entry * +zxdh_user_mmap_entry_insert(struct zxdh_ucontext *ucontext, u64 bar_offset, + enum zxdh_mmap_flag mmap_flag, u64 *mmap_offset); +struct rdma_user_mmap_entry * +zxdh_cap_mmap_entry_insert(struct zxdh_ucontext *ucontext, void *address, + size_t length, enum zxdh_mmap_flag mmap_flag, + u64 *mmap_offset); +struct rdma_user_mmap_entry * +zxdh_mp_mmap_entry_insert(struct zxdh_ucontext *ucontext, u64 phy_addr, + size_t length, enum zxdh_mmap_flag mmap_flag, + u64 *mmap_offset); +#else +struct zxdh_user_mmap_entry * +zxdh_user_mmap_entry_add_hash(struct zxdh_ucontext *ucontext, u64 bar_offset, + enum zxdh_mmap_flag mmap_flag, u64 *mmap_offset); +void zxdh_user_mmap_entry_del_hash(struct zxdh_user_mmap_entry *entry); +#endif /* RDMA_MMAP_DB_SUPPORT */ +int zxdh_ib_register_device(struct zxdh_device *iwdev); +void zxdh_ib_unregister_device(struct zxdh_device *iwdev); +void zxdh_ib_dealloc_device(struct ib_device *ibdev); +void zxdh_ib_qp_event(struct zxdh_qp *iwqp, enum zxdh_qp_event_type event); +void zxdh_generate_flush_completions(struct zxdh_qp *iwqp); +void zxdh_remove_cmpls_list(struct zxdh_cq *iwcq); +int zxdh_generated_cmpls(struct zxdh_cq *iwcq, + struct zxdh_cq_poll_info *cq_poll_info); +void zxdh_flush_worker(struct work_struct *work); +void extract_version(const char *input, char *output); +#ifndef ZXDH_UAPI_DEF +int zxdh_get_dri_specs(struct zxdh_device *iwdev); +#endif +int remote_ip_info_process(struct zxdh_device *iwdev, struct zxdh_rdma_to_eth_ip_para *ip_para); +int del_qp_remote_ip_info(struct ib_qp *ibqp); +int qp_remote_ip_info_process(struct ib_qp *ibqp, int op_type); +#endif /* ZXDH_VERBS_H */ diff --git a/drivers/infiniband/hw/zrdma/vf.c b/drivers/infiniband/hw/zrdma/vf.c new file mode 100644 index 000000000000..4e75f5326bed --- /dev/null +++ b/drivers/infiniband/hw/zrdma/vf.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include "osdep.h" +#include "status.h" +#include "hmc.h" +#include "defs.h" +#include "type.h" +#include "protos.h" +#include "vf.h" + +/** + * zxdh_manage_vf_pble_bp - manage vf pble + * @cqp: cqp for cqp' sq wqe + * @info: pble info + * @scratch: pointer for completion + * @post_sq: to post and ring + */ +int zxdh_manage_vf_pble_bp(struct zxdh_sc_cqp *cqp, + struct zxdh_manage_vf_pble_info *info, u64 scratch, + bool post_sq) +{ + __le64 *wqe; + u64 temp, hdr, pd_pl_pba; + + wqe = zxdh_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return -ENOSPC; + + temp = FIELD_PREP(ZXDH_CQPSQ_MVPBP_PD_ENTRY_CNT, info->pd_entry_cnt) | + FIELD_PREP(ZXDH_CQPSQ_MVPBP_FIRST_PD_INX, info->first_pd_index) | + FIELD_PREP(ZXDH_CQPSQ_MVPBP_SD_INX, info->sd_index); + set_64bit_val(wqe, 16, temp); + + pd_pl_pba = FIELD_PREP(ZXDH_CQPSQ_MVPBP_PD_PLPBA, info->pd_pl_pba >> 3); + set_64bit_val(wqe, 32, pd_pl_pba); + + hdr = FIELD_PREP(ZXDH_CQPSQ_MVPBP_INV_PD_ENT, + info->inv_pd_ent ? 1 : 0) | + FIELD_PREP(ZXDH_CQPSQ_OPCODE, ZXDH_CQP_OP_MANAGE_VF_PBLE_BP) | + FIELD_PREP(ZXDH_CQPSQ_WQEVALID, cqp->polarity); + + dma_wmb(); /* make sure WQE is populated before valid bit is set */ + set_64bit_val(wqe, 24, hdr); + + print_hex_dump_debug("WQE: MANAGE VF_PBLE_BP WQE", DUMP_PREFIX_OFFSET, + 16, 8, wqe, ZXDH_CQP_WQE_SIZE * 8, false); + + if (post_sq) + zxdh_sc_cqp_post_sq(cqp); + return 0; +} diff --git a/drivers/infiniband/hw/zrdma/vf.h b/drivers/infiniband/hw/zrdma/vf.h new file mode 100644 index 000000000000..571872b938af --- /dev/null +++ b/drivers/infiniband/hw/zrdma/vf.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_VF_H +#define ZXDH_VF_H + +struct zxdh_sc_cqp; + +struct zxdh_manage_vf_pble_info { + u32 sd_index; + u16 first_pd_index; + u16 pd_entry_cnt; + u8 inv_pd_ent; + u64 pd_pl_pba; +}; + +int zxdh_manage_vf_pble_bp(struct zxdh_sc_cqp *cqp, + struct zxdh_manage_vf_pble_info *info, u64 scratch, + bool post_sq); +#endif diff --git a/drivers/infiniband/hw/zrdma/virtchnl.c b/drivers/infiniband/hw/zrdma/virtchnl.c new file mode 100644 index 000000000000..b13a23b845cc --- /dev/null +++ b/drivers/infiniband/hw/zrdma/virtchnl.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include "osdep.h" +#include "status.h" +#include "hmc.h" +#include "defs.h" +#include "type.h" +#include "protos.h" +#include "virtchnl.h" +#include "ws.h" + +/** + * zxdh_find_vf_dev - get vf struct pointer + * @dev: shared device pointer + * @vf_id: virtual function id + */ +struct zxdh_vfdev *zxdh_find_vf_dev(struct zxdh_sc_dev *dev, u16 vf_id) +{ + struct zxdh_vfdev *vf_dev = NULL; + u16 iw_vf_idx; + unsigned long flags; + + spin_lock_irqsave(&dev->vf_dev_lock, flags); + for (iw_vf_idx = 0; iw_vf_idx < dev->num_vfs; iw_vf_idx++) { + if (dev->vf_dev[iw_vf_idx] && + dev->vf_dev[iw_vf_idx]->vf_id == vf_id) { + vf_dev = dev->vf_dev[iw_vf_idx]; + refcount_inc(&vf_dev->refcnt); + break; + } + } + spin_unlock_irqrestore(&dev->vf_dev_lock, flags); + + return vf_dev; +} + +/** + * zxdh_remove_vf_dev - remove vf_dev + * @dev: shared device pointer + * @vf_dev: vf dev to be removed + */ +void zxdh_remove_vf_dev(struct zxdh_sc_dev *dev, struct zxdh_vfdev *vf_dev) +{ + u16 iw_vf_idx = 0; + unsigned long flags; + + if (vf_dev) { + iw_vf_idx = vf_dev->iw_vf_idx; + zxdh_put_vfdev(dev, vf_dev); + } else { + pr_err("%s vf_dev is NULL!\n", __func__); + return; + } + spin_lock_irqsave(&dev->vf_dev_lock, flags); + dev->vf_dev[iw_vf_idx] = NULL; + spin_unlock_irqrestore(&dev->vf_dev_lock, flags); +} + +/** + * zxdh_put_vfdev - put vfdev and free memory + * @dev: pointer to RDMA dev structure + * @vf_dev: pointer to RDMA vf dev structure + */ +void zxdh_put_vfdev(struct zxdh_sc_dev *dev, struct zxdh_vfdev *vf_dev) +{ + if (refcount_dec_and_test(&vf_dev->refcnt)) { + struct zxdh_virt_mem virt_mem; + + if (vf_dev->hmc_info.sd_table.sd_entry) { + virt_mem.va = vf_dev->hmc_info.sd_table.sd_entry; + virt_mem.size = sizeof(struct zxdh_hmc_sd_entry) * + (vf_dev->hmc_info.hmc_entry_total); + kfree(virt_mem.va); + } + + virt_mem.va = vf_dev; + virt_mem.size = sizeof(*vf_dev); + kfree(virt_mem.va); + } +} diff --git a/drivers/infiniband/hw/zrdma/virtchnl.h b/drivers/infiniband/hw/zrdma/virtchnl.h new file mode 100644 index 000000000000..6892f13aaad3 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/virtchnl.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_VIRTCHNL_H +#define ZXDH_VIRTCHNL_H + +#include "hmc.h" + +#pragma pack(push, 1) + +struct zxdh_virtchnl_op_buf { + u16 op_code; + u16 op_ver; + u16 buf_len; + u16 rsvd; + u64 op_ctx; + /* Member alignment MUST be maintained above this location */ + u8 buf[]; +}; + +struct zxdh_virtchnl_resp_buf { + u64 op_ctx; + u16 buf_len; + s16 op_ret_code; + /* Member alignment MUST be maintained above this location */ + u16 rsvd[2]; + u8 buf[]; +}; + +enum zxdh_virtchnl_ops { + ZXDH_VCHNL_OP_GET_VER = 0, + ZXDH_VCHNL_OP_GET_HMC_FCN = 1, + ZXDH_VCHNL_OP_PUT_HMC_FCN = 2, + ZXDH_VCHNL_OP_ADD_HMC_OBJ_RANGE = 3, + ZXDH_VCHNL_OP_DEL_HMC_OBJ_RANGE = 4, + ZXDH_VCHNL_OP_GET_STATS = 5, + ZXDH_VCHNL_OP_MANAGE_STATS_INST = 6, + ZXDH_VCHNL_OP_MCG = 7, + ZXDH_VCHNL_OP_UP_MAP = 8, + ZXDH_VCHNL_OP_MANAGE_WS_NODE = 9, + ZXDH_VCHNL_OP_VLAN_PARSING = 12, +}; + +#define ZXDH_VCHNL_CHNL_VER_V0 0 +#define ZXDH_VCHNL_CHNL_VER_V1 1 + +#define ZXDH_VCHNL_OP_GET_VER_V0 0 +#define ZXDH_VCHNL_OP_GET_VER_V1 1 + +#define ZXDH_VCHNL_OP_GET_HMC_FCN_V0 0 +#define ZXDH_VCHNL_OP_PUT_HMC_FCN_V0 0 +#define ZXDH_VCHNL_OP_ADD_HMC_OBJ_RANGE_V0 0 +#define ZXDH_VCHNL_OP_DEL_HMC_OBJ_RANGE_V0 0 +#define ZXDH_VCHNL_OP_GET_STATS_V0 0 +#define ZXDH_VCHNL_OP_MANAGE_WS_NODE_V0 0 +#define ZXDH_VCHNL_OP_VLAN_PARSING_V0 0 +#define ZXDH_VCHNL_INVALID_VF_IDX 0xFFFF + +struct zxdh_virtchnl_hmc_obj_range { + u16 obj_type; + u16 rsvd; + u32 start_index; + u32 obj_count; +}; + +struct zxdh_virtchnl_manage_ws_node { + u8 add; + u8 user_pri; +}; + +struct zxdh_vfdev *zxdh_find_vf_dev(struct zxdh_sc_dev *dev, u16 vf_id); +void zxdh_put_vfdev(struct zxdh_sc_dev *dev, struct zxdh_vfdev *vf_dev); +void zxdh_remove_vf_dev(struct zxdh_sc_dev *dev, struct zxdh_vfdev *vf_dev); +struct zxdh_virtchnl_req { + struct zxdh_virtchnl_op_buf *vchnl_msg; + void *parm; + u32 vf_id; + u16 parm_len; + u16 resp_len; +}; + +#pragma pack(pop) + +#endif diff --git a/drivers/infiniband/hw/zrdma/ws.h b/drivers/infiniband/hw/zrdma/ws.h new file mode 100644 index 000000000000..ff9d786adfaa --- /dev/null +++ b/drivers/infiniband/hw/zrdma/ws.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_WS_H +#define ZXDH_WS_H + +#include "osdep.h" + +struct zxdh_ws_node { + struct list_head siblings; + struct list_head child_list_head; + struct zxdh_ws_node *parent; + u64 lan_qs_handle; /* opaque handle used by LAN */ + u32 l2_sched_node_id; + u16 index; + u16 qs_handle; + u16 vsi_index; + u8 traffic_class; + u8 user_pri; + u8 rel_bw; + u8 abstraction_layer; /* used for splitting a TC */ + u8 prio_type; + u8 type_leaf : 1; + u8 enable : 1; +}; + +struct zxdh_sc_vsi; + +#endif /* ZXDH_WS_H */ diff --git a/drivers/infiniband/hw/zrdma/zrdma-abi.h b/drivers/infiniband/hw/zrdma/zrdma-abi.h new file mode 100644 index 000000000000..65329d23120e --- /dev/null +++ b/drivers/infiniband/hw/zrdma/zrdma-abi.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ +/* + * Copyright (c) 2023 - 2024 ZTE Corporation. All rights reserved. + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + */ + +#ifndef ZRDMA_ABI_H +#define ZRDMA_ABI_H + +#include + +/* user-space whose last ABI ver is 5 */ +#define ZXDH_ABI_VER 5 +#define ZXDH_CONTEXT_VER_V1 5 +#define ZXDH_CONTEXT_VER_V2 6 + +enum zxdh_rdma_tool_flags { + ZXDH_QP_EXTEND_OP = 1 << 0, + ZXDH_CAPTURE = 1 << 1, + ZXDH_GET_HW_DATA = 1 << 2, + ZXDH_GET_HW_OBJECT_DATA = 1 << 3, + ZXDH_CHECK_HW_HEALTH = 1 << 4, + ZXDH_RDMA_TOOL_CFG_DEV_PARAM = 1 << 5, + ZXDH_RDMA_TOOL_SHOW_RES_MAP = 1 << 5, + ZXDH_RDMA_TOOL_READ_RAM = 1 << 6, +}; + +enum zxdh_memreg_type { + ZXDH_MEMREG_TYPE_MEM = 0, + ZXDH_MEMREG_TYPE_QP = 1, + ZXDH_MEMREG_TYPE_CQ = 2, + ZXDH_MEMREG_TYPE_SRQ = 3, +}; + +enum zxdh_db_addr_type { + ZXDH_DB_ADDR_PHY = 0, + ZXDH_DB_ADDR_BAR = 1, +}; + +struct zxdh_alloc_ucontext_req { + __u32 rsvd32; + __u8 userspace_ver; + __u8 rsvd8[3]; +}; + +struct zxdh_alloc_ucontext_resp { + __u32 max_pds; + __u32 max_qps; + __u32 wq_size; /* size of the WQs (SQ+RQ) in the mmaped area */ + __u8 kernel_ver; + __u8 db_addr_type; + __u16 rdma_tool_flags; + __aligned_u64 feature_flags; + __aligned_u64 sq_db_mmap_key; + __aligned_u64 cq_db_mmap_key; + __aligned_u64 sq_db_pa; + __aligned_u64 cq_db_pa; + __u32 max_hw_wq_frags; + __u32 max_hw_read_sges; + __u32 max_hw_inline; + __u32 max_hw_rq_quanta; + __u32 max_hw_srq_quanta; + __u32 max_hw_wq_quanta; + __u32 max_hw_srq_wr; + __u32 min_hw_cq_size; + __u32 max_hw_cq_size; + __u16 max_hw_sq_chunk; + __u8 hw_rev; + __u8 chip_rev; + __aligned_u64 srq_db_mmap_key; +}; + +struct zxdh_alloc_pd_resp { + __u32 pd_id; + __u8 rsvd[4]; +}; + +struct zxdh_resize_cq_req { + __aligned_u64 user_cq_buffer; +}; + +struct zxdh_create_cq_req { + __aligned_u64 user_cq_buf; + __aligned_u64 user_shadow_area; +}; + +struct zxdh_create_qp_req { + __aligned_u64 user_wqe_bufs; + __aligned_u64 user_compl_ctx; +}; + +struct zxdh_mem_reg_req { + __u32 reg_type; /* enum zxdh_memreg_type */ + __u32 cq_pages; + __u32 rq_pages; + __u32 sq_pages; + __u32 srq_pages; + __u16 srq_list_pages; + __u8 rsvd[2]; +}; + +struct zxdh_reg_mr_resp { + __u32 mr_pa_low; + __u32 mr_pa_hig; + __u16 host_page_size; + __u16 leaf_pbl_size; + __u8 rsvd[4]; +}; + +struct zxdh_modify_qp_req { + __u8 sq_flush; + __u8 rq_flush; + __u8 rsvd[6]; +}; + +struct zxdh_create_cq_resp { + __u32 cq_id; + __u32 cq_size; +}; + +struct zxdh_create_qp_resp { + __u32 qp_id; + __u32 actual_sq_size; + __u32 actual_rq_size; + __u32 zxdh_drv_opt; + __u16 push_idx; + __u8 lsmm; + __u8 rsvd; + __u32 qp_caps; +}; + +struct zxdh_modify_qp_resp { + __aligned_u64 push_wqe_mmap_key; + __aligned_u64 push_db_mmap_key; + __u16 push_offset; + __u8 push_valid; + __u8 rd_fence_rate; + __u8 rsvd[4]; +}; + +struct zxdh_create_ah_resp { + __u32 ah_id; + __u8 rsvd[4]; +}; +#endif /* ZXDH_ABI_H */ diff --git a/drivers/infiniband/hw/zrdma/zrdma.h b/drivers/infiniband/hw/zrdma/zrdma.h new file mode 100644 index 000000000000..b8f63de735ef --- /dev/null +++ b/drivers/infiniband/hw/zrdma/zrdma.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZRDMA_H +#define ZRDMA_H + +#define RDMA_BIT2(type, a) ((u##type)1UL << a) +#define RDMA_MASK3(type, mask, shift) ((u##type)mask << shift) +#define MAKEMASK(m, s) ((m) << (s)) + +#define ZXDH_WQEALLOC_WQE_DESC_INDEX_S 20 +#define ZXDH_WQEALLOC_WQE_DESC_INDEX GENMASK(31, 20) + +#define ZXDH_CQPTAIL_WQTAIL_S 0 +#define ZXDH_CQPTAIL_WQTAIL GENMASK(10, 0) +#define ZXDH_CQPTAIL_CQP_OP_ERR_S 31 +#define ZXDH_CQPTAIL_CQP_OP_ERR BIT(31) + +#define ZXDH_CQPERRCODES_CQP_MINOR_CODE_S 0 +#define ZXDH_CQPERRCODES_CQP_MINOR_CODE GENMASK(15, 0) +#define ZXDH_CQPERRCODES_CQP_MAJOR_CODE_S 16 +#define ZXDH_CQPERRCODES_CQP_MAJOR_CODE GENMASK(31, 16) +// CQP Address Masks +#define ZXDH_CQPADDR_HIGH_S 32 +#define ZXDH_CQPADDR_HIGH GENMASK_ULL(63, 32) +#define ZXDH_CQPADDR_LOW_S 0 +#define ZXDH_CQPADDR_LOW GENMASK_ULL(31, 0) + +#define ZXDH_GLPCI_LBARCTRL_PE_DB_SIZE_S 4 +#define ZXDH_GLPCI_LBARCTRL_PE_DB_SIZE GENMASK(5, 4) +#define ZXDH_GLINT_RATE_INTERVAL_S 0 +#define ZXDH_GLINT_RATE_INTERVAL GENMASK(4, 0) +#define ZXDH_GLINT_RATE_INTRL_ENA_S 6 +#define ZXDH_GLINT_RATE_INTRL_ENA_M BIT(6) +#define ZXDH_GLINT_RATE_INTRL_ENA BIT(6) + +#define ZXDH_GLINT_DYN_CTL_INTENA_S 0 +#define ZXDH_GLINT_DYN_CTL_INTENA BIT(0) +#define ZXDH_GLINT_DYN_CTL_CLEARPBA_S 1 +#define ZXDH_GLINT_DYN_CTL_CLEARPBA BIT(1) +#define ZXDH_GLINT_DYN_CTL_ITR_INDX_S 3 +#define ZXDH_GLINT_DYN_CTL_ITR_INDX GENMASK(4, 3) +#define ZXDH_GLINT_DYN_CTL_INTERVAL_S 5 +#define ZXDH_GLINT_DYN_CTL_INTERVAL GENMASK(16, 5) +#define ZXDH_GLINT_CEQCTL_ITR_INDX_S 11 +#define ZXDH_GLINT_CEQCTL_ITR_INDX GENMASK(12, 11) +#define ZXDH_GLINT_CEQCTL_CAUSE_ENA_S 30 +#define ZXDH_GLINT_CEQCTL_CAUSE_ENA BIT(30) +#define ZXDH_GLINT_CEQCTL_MSIX_INDX_S 0 +#define ZXDH_GLINT_CEQCTL_MSIX_INDX GENMASK(10, 0) +#define ZXDH_PFINT_AEQCTL_MSIX_INDX_S 0 +#define ZXDH_PFINT_AEQCTL_MSIX_INDX GENMASK(10, 0) +#define ZXDH_PFINT_AEQCTL_ITR_INDX_S 11 +#define ZXDH_PFINT_AEQCTL_ITR_INDX GENMASK(12, 11) +#define ZXDH_PFINT_AEQCTL_CAUSE_ENA_S 30 +#define ZXDH_PFINT_AEQCTL_CAUSE_ENA BIT(30) +#define ZXDH_PFHMC_PDINV_PMSDIDX_S 0 +#define ZXDH_PFHMC_PDINV_PMSDIDX GENMASK(11, 0) +#define ZXDH_PFHMC_PDINV_PMSDPARTSEL_S 15 +#define ZXDH_PFHMC_PDINV_PMSDPARTSEL BIT(15) +#define ZXDH_PFHMC_PDINV_PMPDIDX_S 16 +#define ZXDH_PFHMC_PDINV_PMPDIDX GENMASK(24, 16) +#define ZXDH_PFHMC_SDDATALOW_PMSDVALID_S 0 +#define ZXDH_PFHMC_SDDATALOW_PMSDVALID BIT(0) +#define ZXDH_PFHMC_SDDATALOW_PMSDTYPE_S 1 +#define ZXDH_PFHMC_SDDATALOW_PMSDTYPE BIT(1) +#define ZXDH_PFHMC_SDDATALOW_PMSDBPCOUNT_S 2 +#define ZXDH_PFHMC_SDDATALOW_PMSDBPCOUNT GENMASK(11, 2) +#define ZXDH_PFHMC_SDDATALOW_PMSDDATALOW_S 12 +#define ZXDH_PFHMC_SDDATALOW_PMSDDATALOW GENMASK(31, 12) +#define ZXDH_PFHMC_SDCMD_PMSDWR_S 31 +#define ZXDH_PFHMC_SDCMD_PMSDWR BIT(31) +#define ZXDH_PFHMC_SDCMD_PMSDPARTSEL_S 15 +#define ZXDH_PFHMC_SDCMD_PMSDPARTSEL BIT(15) + +#define ZXDH_INVALID_CQ_IDX 0xffffffff + +enum zxdh_dyn_idx_t { + ZXDH_IDX_ITR0 = 0, + ZXDH_IDX_ITR1 = 1, + ZXDH_IDX_ITR2 = 2, + ZXDH_IDX_NOITR = 3, +}; + +enum zxdh_registers { + ZXDH_CQPTAIL, + ZXDH_CQPDB, + ZXDH_CCQPSTATUS, + ZXDH_CCQPHIGH, + ZXDH_CCQPLOW, + ZXDH_CQARM, + ZXDH_CQACK, + ZXDH_AEQALLOC, + ZXDH_CQPERRCODES, + ZXDH_WQEALLOC, + ZXDH_GLINT_DYN_CTL, + ZXDH_DB_ADDR_OFFSET, + ZXDH_GLPCI_LBARCTRL, + ZXDH_GLPE_CPUSTATUS0, + ZXDH_GLPE_CPUSTATUS1, + ZXDH_GLPE_CPUSTATUS2, + ZXDH_PFINT_AEQCTL, + ZXDH_GLINT_CEQCTL, + ZXDH_VSIQF_PE_CTL1, + ZXDH_PFHMC_PDINV, + ZXDH_GLHMC_VFPDINV, + ZXDH_GLPE_CRITERR, + ZXDH_GLINT_RATE, + ZXDH_MAX_REGS, /* Must be last entry */ +}; + +enum zxdh_shifts { + ZXDH_CCQPSTATUS_CCQP_DONE_S, + ZXDH_CCQPSTATUS_CCQP_ERR_S, + ZXDH_CQPSQ_STAG_PDID_S, + ZXDH_CQPSQ_CQ_CEQID_S, + ZXDH_CQPSQ_CQ_CQID_S, + ZXDH_COMMIT_FPM_CQCNT_S, + ZXDH_MAX_SHIFTS, +}; + +enum zxdh_masks { + ZXDH_CCQPSTATUS_CCQP_DONE_M, + ZXDH_CCQPSTATUS_CCQP_ERR_M, + ZXDH_CQPSQ_STAG_PDID_M, + ZXDH_CQPSQ_CQ_CEQID_M, + ZXDH_CQPSQ_CQ_CQID_M, + ZXDH_COMMIT_FPM_CQCNT_M, + ZXDH_MAX_MASKS, /* Must be last entry */ +}; + +#define ZXDH_MAX_MGS_PER_CTX 1022 + +struct zxdh_mcast_grp_ctx_entry_info { + u32 qp_id; + bool valid_entry; + u16 dest_port; + u32 use_cnt; +}; + +struct zxdh_mcast_grp_info { + u8 dest_mac_addr[ETH_ALEN]; + u16 vlan_id; + u8 hmc_fcn_id; + u8 ipv4_valid : 1; + u8 vlan_valid : 1; + u16 mg_id; + u32 no_of_mgs; + u32 dest_ip_addr[4]; + u16 qs_handle; + struct zxdh_dma_mem dma_mem_mc; + struct zxdh_mcast_grp_ctx_entry_info mg_ctx_info[ZXDH_MAX_MGS_PER_CTX]; +}; + +enum zxdh_rdma_vers { + ZXDH_GEN_RSVD, + ZXDH_GEN_1, + ZXDH_GEN_2, +}; + +struct zxdh_uk_attrs { + u64 feature_flags; + u32 max_hw_wq_frags; + u32 max_hw_read_sges; + u32 max_hw_inline; + u32 max_hw_srq_quanta; + u32 max_hw_rq_quanta; + u32 max_hw_wq_quanta; + u32 min_hw_cq_size; + u32 max_hw_cq_size; + u16 max_hw_sq_chunk; + u32 max_hw_srq_wr; + u8 hw_rev; +}; + +struct zxdh_hw_attrs { + struct zxdh_uk_attrs uk_attrs; + u64 max_hw_outbound_msg_size; + u64 max_hw_inbound_msg_size; + u64 max_mr_size; + u32 min_hw_qp_id; + u32 min_hw_aeq_size; + u32 max_hw_aeq_size; + u32 min_hw_ceq_size; + u32 max_hw_ceq_size; + u32 max_hw_device_pages; + u32 max_hw_vf_fpm_id; + u32 first_hw_vf_fpm_id; + u32 max_hw_ird; + u32 max_hw_ord; + u32 max_hw_wqes; + u32 max_hw_pds; + u32 max_hw_ena_vf_count; + u32 max_qp_wr; + u32 max_srq_wr; + u32 max_pe_ready_count; + u32 max_done_count; + u32 max_sleep_count; + u32 max_cqp_compl_wait_time_ms; + u16 max_stat_inst; + u16 max_stat_idx; + u32 cqp_timeout_threshold; + u8 self_health; +}; + +void zxdh_init_hw(struct zxdh_sc_dev *dev); +void zxdh_check_fc_for_qp(struct zxdh_sc_vsi *vsi, struct zxdh_sc_qp *sc_qp); +#endif /* ZXDH_H*/ diff --git a/drivers/infiniband/hw/zrdma/zrdma_kcompat.c b/drivers/infiniband/hw/zrdma/zrdma_kcompat.c new file mode 100644 index 000000000000..a4116fe4d3e9 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/zrdma_kcompat.c @@ -0,0 +1,2714 @@ +// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#include "main.h" +#include "icrdma_hw.h" +#include + +#ifndef SPEED_200000 +#define SPEED_200000 200000 +#endif + +extern notify_remote_ip_update remote_ip_update_hook; + +#if KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE || defined(GET_ETH_SPEED_V1) +int zxdh_get_eth_speed(struct ib_device *dev, struct net_device *netdev, + u32 port_num, u16 *speed, u8 *width) +#elif KERNEL_VERSION(5, 4, 195) == LINUX_VERSION_CODE +#ifdef __OFED_23_10__ +int zxdh_get_eth_speed(struct ib_device *dev, struct net_device *netdev, + u32 port_num, u16 *speed, u8 *width) +#else +int zxdh_get_eth_speed(struct ib_device *dev, struct net_device *netdev, + u32 port_num, u8 *speed, u8 *width) +#endif +#else +int zxdh_get_eth_speed(struct ib_device *dev, struct net_device *netdev, + u32 port_num, u8 *speed, u8 *width) +#endif +{ + int rc; + u32 netdev_speed; + struct ethtool_link_ksettings lksettings; + + if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET) + return -EINVAL; + + rtnl_lock(); + rc = __ethtool_get_link_ksettings(netdev, &lksettings); + rtnl_unlock(); + + // dev_put(netdev); + + if (!rc && lksettings.base.speed != (u32)SPEED_UNKNOWN) { + netdev_speed = lksettings.base.speed; + } else { + netdev_speed = SPEED_1000; + } + + if (netdev_speed <= SPEED_1000) { + *width = IB_WIDTH_1X; + *speed = IB_SPEED_SDR; + } else if (netdev_speed <= SPEED_10000) { + *width = IB_WIDTH_1X; + *speed = IB_SPEED_FDR10; + } else if (netdev_speed <= SPEED_20000) { + *width = IB_WIDTH_4X; + *speed = IB_SPEED_DDR; + } else if (netdev_speed <= SPEED_25000) { + *width = IB_WIDTH_1X; + *speed = IB_SPEED_EDR; + } else if (netdev_speed <= SPEED_40000) { + *width = IB_WIDTH_4X; + *speed = IB_SPEED_FDR10; + } else if (netdev_speed <= SPEED_100000) { + *width = IB_WIDTH_4X; + *speed = IB_SPEED_EDR; + } else if (netdev_speed <= SPEED_200000) { + *width = IB_WIDTH_4X; + *speed = IB_SPEED_HDR; + } else { + *width = IB_WIDTH_8X; + *speed = IB_SPEED_HDR; + } + + return 0; +} + +#ifdef IB_FW_VERSION_NAME_MAX +void zxdh_get_dev_fw_str(struct ib_device *dev, char *str) +{ + struct zxdh_device *iwdev = to_iwdev(dev); + struct ethtool_drvinfo info; + char extracted_version[16]; + struct net_device *slave = NULL; + struct list_head *iter; + memset(&info, 0, sizeof(info)); + if ((iwdev->netdev->priv_flags & IFF_BONDING) == 4) { + rcu_read_lock(); + netdev_for_each_lower_dev(iwdev->netdev, slave, iter) { + slave->ethtool_ops->get_drvinfo(slave, &info); + break; + } + rcu_read_unlock(); + if (!slave) { + iwdev->netdev->ethtool_ops->get_drvinfo(iwdev->netdev, &info); + } + extract_version(info.fw_version, extracted_version); + snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", extracted_version); + return; + } + iwdev->netdev->ethtool_ops->get_drvinfo(iwdev->netdev, &info); + extract_version(info.fw_version, extracted_version); + snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", extracted_version); +} +#else +void zxdh_get_dev_fw_str(struct ib_device *dev, char *str, size_t str_len) +{ + struct zxdh_device *iwdev = to_iwdev(dev); + struct ethtool_drvinfo info; + char extracted_version[16]; + struct net_device *slave = NULL; + struct list_head *iter; + memset(&info, 0, sizeof(info)); + if ((iwdev->netdev->priv_flags & IFF_BONDING) == 4) { + rcu_read_lock(); + netdev_for_each_lower_dev(iwdev->netdev, slave, iter) { + slave->ethtool_ops->get_drvinfo(slave, &info); + break; + } + rcu_read_unlock(); + if (!slave) { + iwdev->netdev->ethtool_ops->get_drvinfo(iwdev->netdev, &info); + } + extract_version(info.fw_version, extracted_version); + snprintf(str, str_len, "%s", extracted_version); + return; + } + iwdev->netdev->ethtool_ops->get_drvinfo(iwdev->netdev, &info); + extract_version(info.fw_version, extracted_version); + snprintf(str, str_len, "%s", extracted_version); +} +#endif /* IB_FW_VERSION_NAME_MAX */ + +/** + * zxdh_alloc_mr - register stag for fast memory registration + * @pd: ibpd pointer + * @mr_type: memory for stag registrion + * @max_num_sg: man number of pages + */ +struct ib_mr *zxdh_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, + u32 max_num_sg) +{ + struct zxdh_device *iwdev = to_iwdev(pd->device); + struct zxdh_pble_alloc *palloc; + struct zxdh_pbl *iwpbl; + struct zxdh_mr *iwmr; + int status; + u32 stag; + int err_code = -ENOMEM; + + iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); + if (!iwmr) + return ERR_PTR(-ENOMEM); + + stag = zxdh_create_stag(iwdev); + if (!stag) { + err_code = -ENOMEM; + goto err; + } + + iwmr->stag = stag; + iwmr->ibmr.rkey = stag; + iwmr->ibmr.lkey = stag; + iwmr->ibmr.pd = pd; + iwmr->ibmr.device = pd->device; + iwpbl = &iwmr->iwpbl; + iwpbl->iwmr = iwmr; + iwmr->type = ZXDH_MEMREG_TYPE_MEM; + palloc = &iwpbl->pble_alloc; + iwmr->page_cnt = max_num_sg; + iwmr->sc_dev = &iwdev->rf->sc_dev; + status = zxdh_get_pble(iwdev->rf->pble_mr_rsrc, palloc, iwmr->page_cnt, + true); + if (status) + goto err_get_pble; + + err_code = zxdh_hw_alloc_stag(iwdev, iwmr); + if (err_code) + goto err_alloc_stag; + + iwpbl->pbl_allocated = true; + + return &iwmr->ibmr; +err_alloc_stag: + zxdh_free_pble(iwdev->rf->pble_mr_rsrc, palloc); +err_get_pble: + zxdh_free_stag(iwdev, stag); +err: + kfree(iwmr); + + return ERR_PTR(err_code); +} + +#ifdef ALLOC_UCONTEXT_VER_2 +/** + * zxdh_alloc_ucontext - Allocate the user context data structure + * @uctx: context + * @udata: user data + * + * This keeps track of all objects associated with a particular + * user-mode client. + */ +int zxdh_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) +{ + struct ib_device *ibdev = uctx->device; + struct zxdh_device *iwdev = to_iwdev(ibdev); + struct zxdh_alloc_ucontext_req req; + struct zxdh_alloc_ucontext_resp uresp = { 0 }; + struct zxdh_ucontext *ucontext = to_ucontext(uctx); + struct zxdh_uk_attrs *uk_attrs; + u64 sq_db_bar_off, cq_db_bar_off; + struct zxdh_pci_f *rf = NULL; + u64 srq_db_bar_off = 0; + bool kernel_srq_use_l2d = false; + + if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) + return -EINVAL; + + rf = iwdev->rf; + pr_debug("%s[%d]: req.userspace_ver=%d rf=0x%llx srq_l2d_base_paddr=0x%llx srq_l2d_size=0x%x\n", __func__, __LINE__, + req.userspace_ver, (u64)rf, rf->srq_l2d_base_paddr, rf->srq_l2d_size); + if (rf->srq_l2d_base_paddr != 0 && rf->srq_l2d_size != 0) { + kernel_srq_use_l2d = true; + pr_debug("%s[%d]: srq use l2d mem. srq_l2d_base_paddr=0x%llx srq_l2d_size=0x%x\n", __func__, __LINE__, rf->srq_l2d_base_paddr, rf->srq_l2d_size); + } + + if (req.userspace_ver != ZXDH_CONTEXT_VER_V1 && req.userspace_ver != ZXDH_CONTEXT_VER_V2) { + pr_err("%s[%d]: Invalid userspace driver version detected. Detected version %d, should be %d or %d\n", __func__, __LINE__, + req.userspace_ver, (u32)ZXDH_CONTEXT_VER_V1, (u32)ZXDH_CONTEXT_VER_V2); + goto ver_error; + } else if (req.userspace_ver == ZXDH_CONTEXT_VER_V2 && kernel_srq_use_l2d == true) { + rf->rdma_srq_mem_type = USER_L2D_KERNEL_L2D; + pr_debug("%s[%d]: srq use l2d mem!\n", __func__, __LINE__); + } else if (req.userspace_ver == ZXDH_CONTEXT_VER_V2 && kernel_srq_use_l2d == false) { + rf->rdma_srq_mem_type = USER_L2D_KERNEL_DDR; + pr_debug("%s[%d]: rdma kernel srq use ddr, but userspace driver not! userspace_ver=%d.\n", __func__, __LINE__, req.userspace_ver); + } else if (req.userspace_ver == ZXDH_CONTEXT_VER_V1 && kernel_srq_use_l2d == true) { + rf->rdma_srq_mem_type = USER_DDR_KERNEL_L2D; + pr_debug("%s[%d]: rdma kernel srq use l2d, but userspace driver not! userspace_ver=%d.\n", __func__, __LINE__, req.userspace_ver); + } else if (req.userspace_ver == ZXDH_CONTEXT_VER_V1 && kernel_srq_use_l2d == false) { + rf->rdma_srq_mem_type = USER_DDR_KERNEL_DDR; + pr_debug("%s[%d]: srq use ddr mem!\n", __func__, __LINE__); + } + + ucontext->iwdev = iwdev; + ucontext->abi_ver = req.userspace_ver; + + uk_attrs = &rf->sc_dev.hw_attrs.uk_attrs; + + sq_db_bar_off = C_RDMA_TX_VHCA_PF_PAGE + rf->base_bar_offset; + cq_db_bar_off = C_RDMA_RX_VHCA_PF_PAGE + rf->base_bar_offset; + if (rf->rdma_srq_mem_type == USER_L2D_KERNEL_L2D) { + srq_db_bar_off = rf->rdma_ext_bar_offset; + } + +#ifdef RDMA_MMAP_DB_SUPPORT + ucontext->sq_db_mmap_entry = zxdh_user_mmap_entry_insert( + ucontext, sq_db_bar_off, ZXDH_MMAP_IO_NC, + &uresp.sq_db_mmap_key); +#else + spin_lock_init(&ucontext->mmap_tbl_lock); + ucontext->sq_db_mmap_entry = zxdh_user_mmap_entry_add_hash( + ucontext, sq_db_bar_off, ZXDH_MMAP_IO_NC, + &uresp.sq_db_mmap_key); +#endif /* RDMA_MMAP_DB_SUPPORT */ + if (!ucontext->sq_db_mmap_entry) + return -ENOMEM; + +#ifdef RDMA_MMAP_DB_SUPPORT + ucontext->cq_db_mmap_entry = zxdh_user_mmap_entry_insert( + ucontext, cq_db_bar_off, ZXDH_MMAP_IO_NC, + &uresp.cq_db_mmap_key); +#else + ucontext->cq_db_mmap_entry = zxdh_user_mmap_entry_add_hash( + ucontext, cq_db_bar_off, ZXDH_MMAP_IO_NC, + &uresp.cq_db_mmap_key); +#endif /* RDMA_MMAP_DB_SUPPORT */ + if (!ucontext->cq_db_mmap_entry) { +#ifdef RDMA_MMAP_DB_SUPPORT + rdma_user_mmap_entry_remove(ucontext->sq_db_mmap_entry); +#else + zxdh_user_mmap_entry_del_hash(ucontext->sq_db_mmap_entry); +#endif + return -ENOMEM; + } + + if (rf->rdma_srq_mem_type == USER_L2D_KERNEL_L2D) { +#ifdef RDMA_MMAP_DB_SUPPORT + ucontext->srq_db_mmap_entry = zxdh_user_mmap_entry_insert( + ucontext, srq_db_bar_off, ZXDH_MMAP_IO_NC, + &uresp.srq_db_mmap_key); +#else + ucontext->srq_db_mmap_entry = zxdh_user_mmap_entry_add_hash( + ucontext, srq_db_bar_off, ZXDH_MMAP_IO_NC, + &uresp.srq_db_mmap_key); +#endif /* RDMA_MMAP_DB_SUPPORT */ + if (!ucontext->srq_db_mmap_entry) { +#ifdef RDMA_MMAP_DB_SUPPORT + rdma_user_mmap_entry_remove(ucontext->sq_db_mmap_entry); + rdma_user_mmap_entry_remove(ucontext->cq_db_mmap_entry); +#else + zxdh_user_mmap_entry_del_hash(ucontext->sq_db_mmap_entry); + zxdh_user_mmap_entry_del_hash(ucontext->cq_db_mmap_entry); +#endif + pr_err("%s[%d]: srq_db_mmap_entry is NULL!\n", __func__, __LINE__); + return -ENOMEM; + } + } + + uresp.kernel_ver = ZXDH_CONTEXT_VER_V1; + uresp.hw_rev = uk_attrs->hw_rev; + uresp.chip_rev = iwdev->rf->sc_dev.chip_version; + uresp.rdma_tool_flags = + ZXDH_QP_EXTEND_OP | + ZXDH_CAPTURE | + ZXDH_GET_HW_DATA | + ZXDH_GET_HW_OBJECT_DATA | + ZXDH_CHECK_HW_HEALTH | + ZXDH_RDMA_TOOL_CFG_DEV_PARAM | + ZXDH_RDMA_TOOL_READ_RAM; + + uresp.feature_flags = uk_attrs->feature_flags; + uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags; + uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges; + uresp.max_hw_inline = uk_attrs->max_hw_inline; + uresp.max_hw_srq_wr = uk_attrs->max_hw_srq_wr; + uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta; + uresp.max_hw_srq_quanta = uk_attrs->max_hw_srq_quanta; + uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta; + uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk; + uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size; + uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size; + uresp.db_addr_type = ZXDH_DB_ADDR_BAR; + if (ib_copy_to_udata(udata, &uresp, + min(sizeof(uresp), udata->outlen))) { +#ifdef RDMA_MMAP_DB_SUPPORT + rdma_user_mmap_entry_remove(ucontext->sq_db_mmap_entry); + rdma_user_mmap_entry_remove(ucontext->cq_db_mmap_entry); + if (rf->rdma_srq_mem_type == USER_L2D_KERNEL_L2D) { + rdma_user_mmap_entry_remove(ucontext->srq_db_mmap_entry); + } +#else + zxdh_user_mmap_entry_del_hash(ucontext->sq_db_mmap_entry); + zxdh_user_mmap_entry_del_hash(ucontext->cq_db_mmap_entry); + if (rf->rdma_srq_mem_type == USER_L2D_KERNEL_L2D) { + zxdh_user_mmap_entry_del_hash(ucontext->srq_db_mmap_entry); + } +#endif + return -EFAULT; + } + + INIT_LIST_HEAD(&ucontext->cq_reg_mem_list); + spin_lock_init(&ucontext->cq_reg_mem_list_lock); + INIT_LIST_HEAD(&ucontext->qp_reg_mem_list); + spin_lock_init(&ucontext->qp_reg_mem_list_lock); + INIT_LIST_HEAD(&ucontext->srq_reg_mem_list); + spin_lock_init(&ucontext->srq_reg_mem_list_lock); +#if KERNEL_VERSION(4, 20, 0) > LINUX_VERSION_CODE + INIT_LIST_HEAD(&ucontext->vma_list); + mutex_init(&ucontext->vma_list_mutex); +#endif + + return 0; + +ver_error: + return -EINVAL; +} +#endif + +static void free_cap_mmap_entry(struct zxdh_cap_addr_info *cap_addr_info) +{ + if (cap_addr_info->entry_info.cap_mmap_entry != NULL) { + pr_info("free_cap_mmap_entry rdma_user_mmap_entry_remove!\n"); +#ifdef RDMA_MMAP_DB_SUPPORT + rdma_user_mmap_entry_remove( + cap_addr_info->entry_info.cap_mmap_entry); +#else + zxdh_user_mmap_entry_del_hash( + cap_addr_info->entry_info.cap_mmap_entry); +#endif + cap_addr_info->entry_info.cap_mmap_entry = NULL; + } +} + +static void zxdh_cap_data_free(struct zxdh_device *iwdev) +{ + int i; + + if (!iwdev) + return; + free_cap_mmap_entry(&iwdev->hw_data_cap.mp_cap); + free_cap_mmap_entry(&iwdev->hw_data_cap.hw_object_mmap); + + for (i = 0; i < CAP_NODE_NUM; i++) { + free_cap_mmap_entry( + &iwdev->hw_data_cap.cap_tx_use_direct_dma[i]); + free_cap_mmap_entry( + &iwdev->hw_data_cap.cap_rx_use_direct_dma[i]); + free_cap_mmap_entry(&iwdev->hw_data_cap.cap_txrx_use_iova[i]); + } +} + +#ifdef DEALLOC_UCONTEXT_VER_2 +/** + * zxdh_dealloc_ucontext - deallocate the user context data structure + * @context: user context created during alloc + */ +void zxdh_dealloc_ucontext(struct ib_ucontext *context) +{ + struct zxdh_ucontext *ucontext = to_ucontext(context); + struct ib_device *ib_dev; + struct zxdh_device *iwdev; + + ib_dev = context->device; + if (!ib_dev) + return; + iwdev = to_iwdev(ib_dev); + if (!iwdev) + return; + +#ifdef RDMA_MMAP_DB_SUPPORT + rdma_user_mmap_entry_remove(ucontext->sq_db_mmap_entry); + rdma_user_mmap_entry_remove(ucontext->cq_db_mmap_entry); + if (iwdev->rf->rdma_srq_mem_type == USER_L2D_KERNEL_L2D) { + rdma_user_mmap_entry_remove(ucontext->srq_db_mmap_entry); + } +#else + zxdh_user_mmap_entry_del_hash(ucontext->sq_db_mmap_entry); + zxdh_user_mmap_entry_del_hash(ucontext->cq_db_mmap_entry); + if (iwdev->rf->rdma_srq_mem_type == USER_L2D_KERNEL_L2D) { + zxdh_user_mmap_entry_del_hash(ucontext->srq_db_mmap_entry); + } +#endif + zxdh_cap_data_free(iwdev); +} +#endif + +#ifdef ALLOC_PD_VER_3 +/** + * zxdh_alloc_pd - allocate protection domain + * @pd: protection domain + * @udata: user data + */ +int zxdh_alloc_pd(struct ib_pd *pd, struct ib_udata *udata) +{ + struct zxdh_pd *iwpd = to_iwpd(pd); + struct zxdh_device *iwdev = to_iwdev(pd->device); + struct zxdh_sc_dev *dev = &iwdev->rf->sc_dev; + struct zxdh_pci_f *rf = iwdev->rf; + struct zxdh_alloc_pd_resp uresp = {}; + struct zxdh_sc_pd *sc_pd; + u32 pd_id = 0; + int err; + + err = zxdh_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id, + &rf->next_pd); + if (err) + return err; + + sc_pd = &iwpd->sc_pd; + if (udata) { + struct zxdh_ucontext *ucontext = rdma_udata_to_drv_context( + udata, struct zxdh_ucontext, ibucontext); + + zxdh_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver); + uresp.pd_id = pd_id; + if (ib_copy_to_udata(udata, &uresp, + min(sizeof(uresp), udata->outlen))) { + err = -EFAULT; + goto error; + } + } else { + zxdh_sc_pd_init(dev, sc_pd, pd_id, ZXDH_ABI_VER); + } + + return 0; + +error: + + zxdh_free_rsrc(rf, rf->allocated_pds, pd_id); + + return err; +} +#endif + +#ifdef DEALLOC_PD_VER_4 +int zxdh_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) +{ + struct zxdh_pd *iwpd = to_iwpd(ibpd); + struct zxdh_device *iwdev = to_iwdev(ibpd->device); + + zxdh_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id); + return 0; +} + +#endif + +static void zxdh_fill_ah_info(struct zxdh_ah_info *ah_info, + const struct ib_gid_attr *sgid_attr, + struct sockaddr *sgid_addr, + struct sockaddr *dgid_addr, u8 net_type) +{ + if (net_type == RDMA_NETWORK_IPV4) { + ah_info->ipv4_valid = true; + ah_info->dest_ip_addr[0] = ntohl( + ((struct sockaddr_in *)dgid_addr)->sin_addr.s_addr); + ah_info->src_ip_addr[0] = ntohl( + ((struct sockaddr_in *)sgid_addr)->sin_addr.s_addr); + ah_info->do_lpbk = zxdh_ipv4_is_lpb(ah_info->src_ip_addr[0], + ah_info->dest_ip_addr[0]); + if (ipv4_is_multicast(((struct sockaddr_in *)dgid_addr) + ->sin_addr.s_addr)) { + zxdh_mcast_mac_v4(ah_info->dest_ip_addr, ah_info->dmac); + } + } else { + zxdh_copy_ip_ntohl(ah_info->dest_ip_addr, + ((struct sockaddr_in6 *)dgid_addr) + ->sin6_addr.in6_u.u6_addr32); + zxdh_copy_ip_ntohl(ah_info->src_ip_addr, + ((struct sockaddr_in6 *)sgid_addr) + ->sin6_addr.in6_u.u6_addr32); + ah_info->do_lpbk = zxdh_ipv6_is_lpb(ah_info->src_ip_addr, + ah_info->dest_ip_addr); + if (rdma_is_multicast_addr( + &((struct sockaddr_in6 *)dgid_addr)->sin6_addr)) { + zxdh_mcast_mac_v6(ah_info->dest_ip_addr, ah_info->dmac); + } + } +} + +static int zxdh_create_ah_vlan_tag(struct zxdh_device *iwdev, + struct zxdh_ah_info *ah_info, + const struct ib_gid_attr *sgid_attr) +{ + if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb_vlan_mode) + ah_info->vlan_tag = 0; + + if (ah_info->vlan_tag < VLAN_N_VID) { + ah_info->insert_vlan_tag = true; + ah_info->vlan_tag |= rt_tos2priority(ah_info->tc_tos) + << VLAN_PRIO_SHIFT; + } + return 0; +} + +static int zxdh_create_ah_wait(struct zxdh_pci_f *rf, struct zxdh_sc_ah *sc_ah, + bool sleep) +{ + if (!sleep) { + int cnt = rf->sc_dev.hw_attrs.max_cqp_compl_wait_time_ms * + rf->sc_dev.hw_attrs.cqp_timeout_threshold; + + do { + zxdh_cqp_ce_handler(rf, &rf->ccq.sc_cq); + udelay(20); + } while (!sc_ah->ah_info.ah_valid && --cnt); + if (sc_ah->ah_info.ah_valid) + mdelay(1); + if (!cnt) + return -ETIMEDOUT; + } + return 0; +} + +#ifndef CREATE_AH_VER_0 +static bool zxdh_ah_exists(struct zxdh_device *iwdev, struct zxdh_ah *new_ah) +{ + struct zxdh_ah *ah; + u32 save_ah_id = new_ah->sc_ah.ah_info.ah_idx; + + list_for_each_entry(ah, &iwdev->ah_list, list) { + /* Set ah_id the same so memcp can work */ + new_ah->sc_ah.ah_info.ah_idx = ah->sc_ah.ah_info.ah_idx; + if (!memcmp(&ah->sc_ah.ah_info, &new_ah->sc_ah.ah_info, + sizeof(ah->sc_ah.ah_info))) { + refcount_inc(&ah->refcnt); + new_ah->parent_ah = ah; + return true; + } + } + new_ah->sc_ah.ah_info.ah_idx = save_ah_id; + /* Add new AH to list */ + if (iwdev->ah_list_cnt >= ZXDH_MAX_AH_LIST) + return false; + ah = kmemdup(new_ah, sizeof(*new_ah), GFP_KERNEL); + if (!ah) + return false; + new_ah->parent_ah = ah; + list_add(&ah->list, &iwdev->ah_list); + iwdev->ah_list_cnt++; + if (iwdev->ah_list_cnt > iwdev->ah_list_hwm) + iwdev->ah_list_hwm = iwdev->ah_list_cnt; + refcount_set(&ah->refcnt, 1); + + return false; +} +#endif + +int zxdh_create_ah_v2(struct ib_ah *ib_ah, struct rdma_ah_attr *attr, u32 flags, + struct ib_udata *udata) +#if defined(CREATE_AH_VER_2) || defined(CREATE_AH_VER_5) +{ + struct zxdh_pd *pd = to_iwpd(ib_ah->pd); + struct zxdh_ah *ah = container_of(ib_ah, struct zxdh_ah, ibah); + struct zxdh_device *iwdev = to_iwdev(ib_ah->pd->device); + const struct ib_gid_attr *sgid_attr; + struct zxdh_pci_f *rf = iwdev->rf; + struct zxdh_sc_ah *sc_ah; + u32 ah_id = 0; + struct zxdh_ah_info *ah_info; + struct zxdh_create_ah_resp uresp = {}; + union { + struct sockaddr saddr; + struct sockaddr_in saddr_in; + struct sockaddr_in6 saddr_in6; + } sgid_addr, dgid_addr; + int err; + bool sleep = flags & RDMA_CREATE_AH_SLEEPABLE; + + err = zxdh_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah_id, + &rf->next_ah); + + if (err) + return err; + + ah->pd = pd; + sc_ah = &ah->sc_ah; + sc_ah->ah_info.ah_idx = ah_id; + sc_ah->ah_info.vsi = &iwdev->vsi; + zxdh_sc_init_ah(&rf->sc_dev, sc_ah); + ah->sgid_index = attr->grh.sgid_index; + memcpy(&ah->dgid, &attr->grh.dgid, sizeof(ah->dgid)); + sgid_attr = attr->grh.sgid_attr; + + rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid_attr->gid); + rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid); + ah->av.attrs = *attr; + ah->av.net_type = kc_rdma_gid_attr_network_type( + sgid_attr, sgid_attr.gid_type, &sgid); + + ah->av.sgid_addr.saddr = sgid_addr.saddr; + ah->av.dgid_addr.saddr = dgid_addr.saddr; + ah_info = &sc_ah->ah_info; + ah_info->ah_idx = ah_id; + ah_info->pd_idx = pd->sc_pd.pd_id; + err = rdma_read_gid_l2_fields(sgid_attr, &ah_info->vlan_tag, + ah_info->mac_addr); + + if (err) + goto err_gid_l2; + + if (attr->ah_flags & IB_AH_GRH) { + ah_info->flow_label = attr->grh.flow_label; + ah_info->hop_ttl = attr->grh.hop_limit; + ah_info->tc_tos = attr->grh.traffic_class; + } + + ether_addr_copy(ah_info->dmac, attr->roce.dmac); + + zxdh_fill_ah_info(ah_info, sgid_attr, &sgid_addr.saddr, + &dgid_addr.saddr, ah->av.net_type); + + zxdh_create_ah_vlan_tag(iwdev, ah_info, sgid_attr); + + if (sleep) { + mutex_lock(&iwdev->ah_list_lock); + if (zxdh_ah_exists(iwdev, ah)) { + zxdh_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, + ah_id); + ah_id = 0; + + goto exit; + } + } + + err = zxdh_ah_cqp_op(iwdev->rf, sc_ah, ZXDH_OP_AH_CREATE, sleep, + zxdh_gsi_ud_qp_ah_cb, sc_ah); + if (err) { + zxdh_dbg(iwdev_to_idev(iwdev), "VERBS: CQP-OP Create AH fail"); + goto err_ah_create; + } + + err = zxdh_create_ah_wait(rf, sc_ah, sleep); + if (err) { + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: CQP create AH timed out"); + goto err_gid_l2; + } + +exit: + if (udata) { + uresp.ah_id = ah->sc_ah.ah_info.ah_idx; + err = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + if (err) { + if (!ah->parent_ah || + (ah->parent_ah && + refcount_dec_and_test(&ah->parent_ah->refcnt))) { + zxdh_ah_cqp_op(iwdev->rf, &ah->sc_ah, + ZXDH_OP_AH_DESTROY, false, NULL, + ah); + ah_id = ah->sc_ah.ah_info.ah_idx; + goto err_ah_create; + } + goto err_unlock; + } + } + if (sleep) + mutex_unlock(&iwdev->ah_list_lock); + + return 0; +err_ah_create: + if (ah->parent_ah) { + list_del(&ah->parent_ah->list); + kfree(ah->parent_ah); + iwdev->ah_list_cnt--; + } +err_unlock: + if (sleep) + mutex_unlock(&iwdev->ah_list_lock); +err_gid_l2: + if (ah_id) + zxdh_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id); + + return err; +} +#endif + +#ifdef CREATE_AH_VER_6 +/** + * zxdh_create_ah - create address handle + * @ib_ah: ptr to AH + * @attr: address handle attributes + * @flags: AH flags to wait + * @udata: user data + * + * returns 0 on success, error otherwise + */ +int zxdh_create_ah(struct ib_ah *ib_ah, struct ib_ah_attr *attr, u32 flags, + struct ib_udata *udata) +{ + struct zxdh_pd *pd = to_iwpd(ib_ah->pd); + struct zxdh_ah *ah = container_of(ib_ah, struct zxdh_ah, ibah); + struct zxdh_device *iwdev = to_iwdev(ib_ah->pd->device); + union ib_gid sgid; + struct ib_gid_attr sgid_attr; + struct zxdh_pci_f *rf = iwdev->rf; + struct zxdh_sc_ah *sc_ah; + u32 ah_id = 0; + struct zxdh_ah_info *ah_info; + struct zxdh_create_ah_resp uresp; + union { + struct sockaddr saddr; + struct sockaddr_in saddr_in; + struct sockaddr_in6 saddr_in6; + } sgid_addr, dgid_addr; + int err; + bool sleep = flags & RDMA_CREATE_AH_SLEEPABLE; + + err = zxdh_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah_id, + &rf->next_ah); + + if (err) + return err; + + ah->pd = pd; + sc_ah = &ah->sc_ah; + sc_ah->ah_info.ah_idx = ah_id; + sc_ah->ah_info.vsi = &iwdev->vsi; + zxdh_sc_init_ah(&rf->sc_dev, sc_ah); + ah->sgid_index = attr->grh.sgid_index; + memcpy(&ah->dgid, &attr->grh.dgid, sizeof(ah->dgid)); + rcu_read_lock(); + err = ib_get_cached_gid(&iwdev->ibdev, attr->port_num, + attr->grh.sgid_index, &sgid, &sgid_attr); + rcu_read_unlock(); + if (err) { + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: GID lookup at idx=%d with port=%d failed\n", + attr->grh.sgid_index, attr->port_num); + err = -EINVAL; + goto err_gid_l2; + } + rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid); + rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid); + ah->av.attrs = *attr; + ah->av.net_type = kc_rdma_gid_attr_network_type( + sgid_attr, sgid_attr.gid_type, &sgid); + + if (kc_deref_sgid_attr(sgid_attr)) + dev_put(kc_deref_sgid_attr(sgid_attr)); + + ah->av.sgid_addr.saddr = sgid_addr.saddr; + ah->av.dgid_addr.saddr = dgid_addr.saddr; + ah_info = &sc_ah->ah_info; + ah_info->ah_idx = ah_id; + ah_info->pd_idx = pd->sc_pd.pd_id; + ether_addr_copy(ah_info->mac_addr, iwdev->netdev->dev_addr); + + if (attr->ah_flags & IB_AH_GRH) { + ah_info->flow_label = attr->grh.flow_label; + ah_info->hop_ttl = attr->grh.hop_limit; + ah_info->tc_tos = attr->grh.traffic_class; + } + + ether_addr_copy(ah_info->dmac, attr->dmac); + + zxdh_fill_ah_info(ah_info, &sgid_attr, &sgid_addr.saddr, + &dgid_addr.saddr, ah->av.net_type); + + err = zxdh_create_ah_vlan_tag(iwdev, ah_info, &sgid_attr); + if (err) + goto err_gid_l2; + + if (sleep) { + mutex_lock(&iwdev->ah_list_lock); + if (zxdh_ah_exists(iwdev, ah)) { + zxdh_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, + ah_id); + ah_id = 0; + + goto exit; + } + } + + err = zxdh_ah_cqp_op(iwdev->rf, sc_ah, ZXDH_OP_AH_CREATE, sleep, + zxdh_gsi_ud_qp_ah_cb, sc_ah); + if (err) { + zxdh_dbg(iwdev_to_idev(iwdev), "VERBS: CQP-OP Create AH fail"); + goto err_ah_create; + } + + err = zxdh_create_ah_wait(rf, sc_ah, sleep); + if (err) { + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: CQP create AH timed out"); + goto err_gid_l2; + } + +exit: + if (udata) { + uresp.ah_id = ah->sc_ah.ah_info.ah_idx; + err = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + if (err) { + if (!ah->parent_ah || + (ah->parent_ah && + refcount_dec_and_test(&ah->parent_ah->refcnt))) { + zxdh_ah_cqp_op(iwdev->rf, &ah->sc_ah, + ZXDH_OP_AH_DESTROY, false, NULL, + ah); + ah_id = ah->sc_ah.ah_info.ah_idx; + goto err_ah_create; + } + goto err_unlock; + } + } + if (sleep) + mutex_unlock(&iwdev->ah_list_lock); + + return 0; +err_ah_create: + if (ah->parent_ah) { + list_del(&ah->parent_ah->list); + kfree(ah->parent_ah); + iwdev->ah_list_cnt--; + } +err_unlock: + if (sleep) + mutex_unlock(&iwdev->ah_list_lock); +err_gid_l2: + if (ah_id) + zxdh_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id); + + return err; +} +#endif /* CREATE_AH_VER_6 */ + +#ifdef CREATE_AH_VER_5 +/** + * zxdh_create_ah - create address handle + * @ibah: ptr to AH + * @init_attr: address handle attributes + * @udata: user data + * + * returns a pointer to an address handle + */ +int zxdh_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, + struct ib_udata *udata) +{ + return zxdh_create_ah_v2(ibah, init_attr->ah_attr, init_attr->flags, + udata); +} +#endif + +#if defined(ETHER_COPY_VER_1) +void zxdh_ether_copy(u8 *dmac, struct ib_ah_attr *attr) +{ + ether_addr_copy(dmac, attr->dmac); +} +#endif + +static void zxdh_store_free_qp(struct zxdh_pci_f *rf, u32 qp_num) +{ + unsigned long flags; + + if ((qp_num == 0) || (qp_num >= rf->max_qp)) + return; + spin_lock_irqsave(&rf->rsrc_lock, flags); + rf->qp_buf[rf->qp_index] = qp_num; + rf->qp_index++; + rf->qp_index %= ZXDH_RDMA_QP_BUF_LEN; + spin_unlock_irqrestore(&rf->rsrc_lock, flags); +} +#ifdef CREATE_QP_VER_2 +/** + * zxdh_free_qp_rsrc - free up memory resources for qp + * @iwqp: qp ptr (user or kernel) + */ +void zxdh_free_qp_rsrc(struct zxdh_qp *iwqp) +{ + struct zxdh_device *iwdev = iwqp->iwdev; + struct zxdh_pci_f *rf = iwdev->rf; + int qp_index; + + if (iwqp->ibqp.qp_num <= 1) + qp_index = iwqp->ibqp.qp_num; + else + qp_index = iwqp->ibqp.qp_num - rf->sc_dev.base_qpn; + + if (qp_index > 0 && qp_index < rf->max_qp) { + if (iwqp->sc_qp.dev) + zxdh_qp_rem_qos(&iwqp->sc_qp); + zxdh_free_rsrc(rf, rf->allocated_qps, qp_index); + if (!iwqp->user_mode) { + dma_free_coherent(rf->sc_dev.hw->device, + iwqp->kqp.dma_mem.size, + iwqp->kqp.dma_mem.va, + iwqp->kqp.dma_mem.pa); + iwqp->kqp.dma_mem.va = NULL; + kfree(iwqp->kqp.sq_wrid_mem); + kfree(iwqp->kqp.rq_wrid_mem); + } + } + + if (iwqp->host_ctx.va) { + dma_free_coherent(rf->sc_dev.hw->device, iwqp->host_ctx.size, + iwqp->host_ctx.va, iwqp->host_ctx.pa); + iwqp->host_ctx.va = NULL; + } + kfree(iwqp->sg_list); +} + +/** + * zxdh_create_qp - create qp + * @ibqp: ptr of qp + * @init_attr: attributes for qp + * @udata: user data for create qp + */ +int zxdh_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, + struct ib_udata *udata) +{ + struct ib_pd *ibpd = ibqp->pd; + struct zxdh_pd *iwpd = to_iwpd(ibpd); + struct zxdh_device *iwdev = to_iwdev(ibpd->device); + struct zxdh_pci_f *rf = iwdev->rf; + struct zxdh_qp *iwqp = to_iwqp(ibqp); + struct zxdh_create_qp_req req; + struct zxdh_create_qp_resp uresp = {}; + u32 qp_num = 0; + u32 qp_ctx_num = 0; + u8 qp_ret; + int ret; + int err_code; + int sq_size; + int rq_size; + struct zxdh_srq *iwsrq; + struct zxdh_sc_qp *qp; + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs; + struct zxdh_qp_init_info init_info = {}; + struct zxdh_qp_host_ctx_info *ctx_info; + unsigned long flags; + + err_code = zxdh_validate_qp_attrs(init_attr, iwdev); + if (err_code) + return err_code; + + sq_size = init_attr->cap.max_send_wr; + rq_size = init_attr->cap.max_recv_wr; +#ifdef Z_CONFIG_RDMA_VSI + init_info.vsi = &iwdev->vsi; +#endif + init_info.dev = dev; + init_info.qp_uk_init_info.uk_attrs = uk_attrs; + init_info.qp_uk_init_info.sq_size = sq_size; + init_info.qp_uk_init_info.rq_size = rq_size; + init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge; + init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge; + init_info.qp_uk_init_info.max_inline_data = + init_attr->cap.max_inline_data; + + qp = &iwqp->sc_qp; + qp->dev = NULL; + qp->qp_uk.back_qp = iwqp; + qp->qp_uk.lock = &iwqp->lock; + + iwqp->is_srq = false; + if (init_attr->srq != NULL) { + iwqp->is_srq = true; + iwsrq = to_iwsrq(init_attr->srq); + iwqp->iwsrq = iwsrq; + iwqp->sc_qp.srq = &iwsrq->sc_srq; + } + qp->is_srq = iwqp->is_srq; + + iwqp->sg_list = kcalloc(uk_attrs->max_hw_wq_frags, + sizeof(*iwqp->sg_list), GFP_KERNEL); + if (!iwqp->sg_list) + return -ENOMEM; + + iwqp->iwdev = iwdev; + iwqp->host_ctx.va = NULL; + iwqp->host_ctx.size = ALIGN(ZXDH_QP_CTX_SIZE, ZXDH_QPC_ALIGNMENT); + iwqp->host_ctx.va = dma_alloc_coherent(dev->hw->device, + iwqp->host_ctx.size, + &iwqp->host_ctx.pa, GFP_KERNEL); + if (!iwqp->host_ctx.va) { + kfree(iwqp->sg_list); + return -ENOMEM; + } + + init_info.host_ctx = iwqp->host_ctx.va; + init_info.host_ctx_pa = iwqp->host_ctx.pa; + + err_code = zxdh_alloc_rsrc_qp(rf, rf->allocated_qps, rf->max_qp, + &qp_ctx_num, &rf->next_qp, &qp_ret); + if (err_code) + goto error; + qp_ctx_num += dev->base_qpn; + if (init_attr->qp_type == IB_QPT_GSI) + qp_num = 1; + else + qp_num = qp_ctx_num; + + iwqp->iwpd = iwpd; + iwqp->ibqp.qp_num = qp_num; + qp = &iwqp->sc_qp; + iwqp->sc_qp.qp_ctx_num = qp_ctx_num; + iwqp->iwscq = to_iwcq(init_attr->send_cq); + iwqp->iwrcq = to_iwcq(init_attr->recv_cq); + + init_info.pd = &iwpd->sc_pd; + init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num; + iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp; + init_waitqueue_head(&iwqp->mod_qp_waitq); + + if (udata) { + err_code = ib_copy_from_udata(&req, udata, + min(sizeof(req), udata->inlen)); + if (err_code) { + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: ib_copy_from_data fail\n"); + goto error; + } + + iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx; + iwqp->user_mode = 1; + if (req.user_wqe_bufs) { + struct zxdh_ucontext *ucontext = + kc_rdma_udata_to_drv_context(ibpd, udata); + + init_info.qp_uk_init_info.legacy_mode = + ucontext->legacy_mode; + spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, + flags); + iwqp->iwpbl = + zxdh_get_pbl((unsigned long)req.user_wqe_bufs, + &ucontext->qp_reg_mem_list); + spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, + flags); + + if (!iwqp->iwpbl) { + err_code = -ENODATA; + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: no pbl info\n"); + goto error; + } + } + init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver; + zxdh_setup_virt_qp(iwdev, iwqp, &init_info); + } else { + INIT_DELAYED_WORK(&iwqp->dwork_flush, zxdh_flush_worker); + init_info.qp_uk_init_info.abi_ver = ZXDH_ABI_VER; + err_code = + zxdh_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr); + } + + if (err_code) { + zxdh_dbg(iwdev_to_idev(iwdev), "VERBS: setup qp failed\n"); + goto error; + } + + if (init_attr->qp_type == IB_QPT_RC) { + init_info.qp_uk_init_info.type = ZXDH_QP_TYPE_ROCE_RC; + init_info.qp_uk_init_info.qp_caps = + ZXDH_SEND_WITH_IMM | ZXDH_WRITE_WITH_IMM | ZXDH_ROCE; + } else { + init_info.qp_uk_init_info.type = ZXDH_QP_TYPE_ROCE_UD; + init_info.qp_uk_init_info.qp_caps = ZXDH_SEND_WITH_IMM | + ZXDH_ROCE; + } + + ret = zxdh_sc_qp_init(qp, &init_info); + if (ret) { + err_code = -EPROTO; + zxdh_dbg(iwdev_to_idev(iwdev), "VERBS: qp_init fail\n"); + goto error; + } + + ctx_info = &iwqp->ctx_info; + ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; + ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; + + if (iwqp->is_srq == true) + ctx_info->use_srq = true; + else + ctx_info->use_srq = false; + + zxdh_roce_fill_and_set_qpctx_info(iwqp, ctx_info); + if (qp_ret == ZXDH_RDMA_QP_EXIST) { + mdelay(2); + } + err_code = zxdh_cqp_create_qp_cmd(iwqp); + if (err_code) + goto error; + + refcount_set(&iwqp->refcnt, 1); + spin_lock_init(&iwqp->lock); + spin_lock_init(&iwqp->sc_qp.pfpdu.lock); + iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0; + rf->qp_table[qp_ctx_num - dev->base_qpn] = iwqp; + iwqp->max_send_wr = sq_size; + iwqp->max_recv_wr = rq_size; + + zxdh_qp_add_qos(&iwqp->sc_qp); + + if (udata) { + uresp.lsmm = 1; + uresp.actual_sq_size = sq_size; + uresp.actual_rq_size = rq_size; + uresp.qp_id = qp_num; + uresp.qp_caps = qp->qp_uk.qp_caps; + + err_code = ib_copy_to_udata(udata, &uresp, + min(sizeof(uresp), udata->outlen)); + if (err_code) { + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: copy_to_udata failed\n"); + kc_zxdh_destroy_qp(&iwqp->ibqp, udata); + return err_code; + } + } + if (refcount_read(&iwdev->trace_switch.t_switch)) { + ibdev_notice( + &iwdev->ibdev, + "create new QP, type %d, ib qpn 0x%X, max_send_wr %d, max_recv_wr %d\n", + iwqp->ibqp.qp_type, iwqp->ibqp.qp_num, + iwqp->max_send_wr, iwqp->max_recv_wr); + } + init_completion(&iwqp->free_qp); + if (init_attr->qp_type == IB_QPT_GSI) + iwdev->qp1 = iwqp; + return 0; + +error: + zxdh_free_qp_rsrc(iwqp); + + return err_code; +} +#endif /* CREATE_QP_VER_2 */ + +/** + * zxdh_destroy_qp - destroy qp + * @ibqp: qp's ib pointer also to get to device's qp address + * @udata: user data + */ +#ifdef DESTROY_QP_VER_2 +int zxdh_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) +#endif +{ + struct zxdh_qp *iwqp = to_iwqp(ibqp); + struct zxdh_device *iwdev = iwqp->iwdev; + u32 qp_index; + struct iidc_core_dev_info *cdev_info = (struct iidc_core_dev_info *)iwdev->rf->cdev; + struct zxdh_av *av = &iwqp->roce_ah.av; + struct zxdh_udp_offload_info *udp_info = &iwqp->udp_info; + char s_straddr[INET6_ADDRSTRLEN + 20] = { 0 }; + char d_straddr[INET6_ADDRSTRLEN + 20] = { 0 }; + u32 dual_tor_switch = 0xFFFF; + int ret = 0; + + if (iwqp->sc_qp.qp_uk.destroy_pending) + goto free_rsrc; + iwqp->sc_qp.qp_uk.destroy_pending = true; + + zxdh_modify_qp_to_err(&iwqp->sc_qp); + + dual_tor_switch = readl(cdev_info->hw_addr + ZXDH_DUAL_TOR_SWITCH_OFFSET); + pr_debug("%s[%d]: qp_type=%d, hw_addr=0x%llx, dual_tor_switch=0x%x\n", + __func__, __LINE__, + iwqp->sc_qp.qp_uk.qp_type, + (u64)(uintptr_t)cdev_info->hw_addr, dual_tor_switch); + if (remote_ip_update_hook && (dual_tor_switch == ZXDH_DUAL_TOR_SWITCH_OPEN) && (iwqp->sc_qp.qp_uk.qp_type == ZXDH_QP_TYPE_ROCE_RC)) { + if (av->sgid_addr.saddr.sa_family == AF_INET6) { + sprintf(s_straddr, ", src_ip: %pI6", &av->sgid_addr.saddr_in6.sin6_addr); + sprintf(d_straddr, ", dest_ip: %pI6", &av->dgid_addr.saddr_in6.sin6_addr); + } else { + sprintf(s_straddr, ", src_ip: %pI4", &av->sgid_addr.saddr_in.sin_addr); + sprintf(d_straddr, ", dest_ip: %pI4", &av->dgid_addr.saddr_in.sin_addr); + } + ret = qp_remote_ip_info_process(ibqp, RDMA_DEL_REMOTE_IP); + if (ret) { + pr_err("%s[%d]: ipv4=%d, name=%s, op_type=%d, src_ip=0x%x-0x%x-0x%x-0x%x, dst_ip=0x%x-0x%x-0x%x-0x%x, src_port=0x%x, dst_port=0x%x, s_straddr=%s, d_straddr=%s\n", + __func__, __LINE__, udp_info->ipv4, iwdev->netdev->name, RDMA_DEL_REMOTE_IP, udp_info->local_ipaddr[0], udp_info->local_ipaddr[1], udp_info->local_ipaddr[2], udp_info->local_ipaddr[3], + udp_info->dest_ip_addr[0], udp_info->dest_ip_addr[1], udp_info->dest_ip_addr[2], udp_info->dest_ip_addr[3], udp_info->src_port, udp_info->dst_port, s_straddr, d_straddr); + } else { + pr_debug("%s[%d]: ipv4=%d, name=%s, op_type=%d, src_ip=0x%x-0x%x-0x%x-0x%x, dst_ip=0x%x-0x%x-0x%x-0x%x, src_port=0x%x, dst_port=0x%x, s_straddr=%s, d_straddr=%s\n", + __func__, __LINE__, udp_info->ipv4, iwdev->netdev->name, RDMA_DEL_REMOTE_IP, udp_info->local_ipaddr[0], udp_info->local_ipaddr[1], udp_info->local_ipaddr[2], udp_info->local_ipaddr[3], + udp_info->dest_ip_addr[0], udp_info->dest_ip_addr[1], udp_info->dest_ip_addr[2], udp_info->dest_ip_addr[3], udp_info->src_port, udp_info->dst_port, s_straddr, d_straddr); + } + } + + if (!iwqp->user_mode) + cancel_delayed_work_sync(&iwqp->dwork_flush); + + zxdh_qp_rem_ref(&iwqp->ibqp); + wait_for_completion(&iwqp->free_qp); + + zxdh_sc_qp_resetctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va); + + if (!iwdev->rf->reset && + zxdh_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp)) + return iwqp->user_mode ? -ENOTRECOVERABLE : 0; +free_rsrc: + if (!iwqp->user_mode) { + if (iwqp->iwscq) { + zxdh_clean_cqes(iwqp, iwqp->iwscq); + if (iwqp->iwrcq != iwqp->iwscq) + zxdh_clean_cqes(iwqp, iwqp->iwrcq); + } + } + if (refcount_read(&iwdev->trace_switch.t_switch)) { + ibdev_notice( + &iwdev->ibdev, + "destroy QP, type %d, ib qpn 0x%X, max_send_wr %d, max_recv_wr %d\n", + iwqp->ibqp.qp_type, iwqp->ibqp.qp_num, + iwqp->max_send_wr, iwqp->max_recv_wr); + } + if (iwqp->ibqp.qp_num <= 1) + qp_index = iwqp->ibqp.qp_num; + else + qp_index = iwqp->ibqp.qp_num - iwdev->rf->sc_dev.base_qpn; + zxdh_store_free_qp(iwdev->rf, qp_index); + zxdh_free_qp_rsrc(iwqp); + return 0; +} + +/** + * zxdh_cq_round_up - return round up cq wq depth + * @wqdepth: wq depth in quanta to round up + */ +int zxdh_cq_round_up(u32 wqdepth) +{ + int scount = 1; + + for (wqdepth--; scount <= 16; scount *= 2) + wqdepth |= wqdepth >> scount; + + return ++wqdepth; +} + +int zxdh_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct ib_udata *udata) +{ +#ifdef CREATE_CQ_VER_4 + struct ib_udata *udata = &attrs->driver_udata; // attrs->ucore ? +#endif +#if defined(CREATE_CQ_VER_3) || defined(CREATE_CQ_VER_4) + struct ib_device *ibdev = ibcq->device; +#endif + struct zxdh_device *iwdev = to_iwdev(ibdev); + struct zxdh_pci_f *rf = iwdev->rf; +#if defined(CREATE_CQ_VER_3) || defined(CREATE_CQ_VER_4) + struct zxdh_cq *iwcq = to_iwcq(ibcq); +#else + struct zxdh_cq *iwcq = NULL; +#endif + u32 cq_num = 0; + struct zxdh_sc_cq *cq; + struct zxdh_sc_dev *dev = &rf->sc_dev; + struct zxdh_cq_init_info info = {}; + int status; + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + struct zxdh_cq_uk_init_info *ukinfo = &info.cq_uk_init_info; + unsigned long flags; + int err_code; + int entries = attr->cqe; + + if (attr->cqe < ZXDH_MIN_CQ_SIZE || + attr->cqe > ZXDH_MAX_CQ_SIZE) { + err_code = -ENOMEM; + goto cq_free_rsrc; + } + +#if defined(CREATE_CQ_VER_3) || defined(CREATE_CQ_VER_4) + err_code = + cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev); + if (err_code) + return err_code; +#else + err_code = + cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev); + if (err_code) + return ERR_PTR(err_code); + + iwcq = kzalloc(sizeof(*iwcq), GFP_KERNEL); + if (!iwcq) + return ERR_PTR(-ENOMEM); +#endif + err_code = zxdh_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num, + &rf->next_cq); + if (err_code) +#if defined(CREATE_CQ_VER_3) || defined(CREATE_CQ_VER_4) + return err_code; +#else + goto error; +#endif + cq_num += dev->base_cqn; + cq = &iwcq->sc_cq; + cq->back_cq = iwcq; + iwcq->cq_num = cq_num; + refcount_set(&iwcq->refcnt, 1); + spin_lock_init(&iwcq->lock); + INIT_LIST_HEAD(&iwcq->resize_list); + INIT_LIST_HEAD(&iwcq->cmpl_generated); + info.dev = dev; + ukinfo->cq_size = max(entries, 4); /* Depth of CQ */ + ukinfo->cq_size = zxdh_cq_round_up(ukinfo->cq_size); + ukinfo->cq_id = cq_num; + ukinfo->cqe_size = ZXDH_CQE_SIZE_64; + ukinfo->cq_log_size = zxdh_num_to_log(ukinfo->cq_size); + iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size; + info.ceq_id = dev->base_ceqn + 1; + info.ceq_index = 1; + if (attr->comp_vector < rf->ceqs_count) { + if (attr->comp_vector == 0) { + info.ceq_id = dev->base_ceqn + 1; + } else { + info.ceq_id = + dev->base_ceqn + + attr->comp_vector; /* attr->comp_vector default value is 0 */ + info.ceq_index = attr->comp_vector; + } + } + info.ceq_id_valid = true; + info.ceqe_mask = 1; + info.type = ZXDH_CQ_TYPE_IO; + + if (udata) { + struct zxdh_ucontext *ucontext; + struct zxdh_create_cq_req req = {}; + struct zxdh_cq_mr *cqmr; + struct zxdh_pbl *iwpbl; + struct zxdh_pbl *iwpbl_shadow; + struct zxdh_cq_mr *cqmr_shadow; + + iwcq->user_mode = true; + ucontext = kc_get_ucontext(udata); + if (ib_copy_from_udata(&req, udata, + min(sizeof(req), udata->inlen))) { + err_code = -EFAULT; + goto cq_free_rsrc; + } + + spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); + iwpbl = zxdh_get_pbl((unsigned long)req.user_cq_buf, + &ucontext->cq_reg_mem_list); + spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); + if (!iwpbl) { + err_code = -EPROTO; + goto cq_free_rsrc; + } + iwcq->iwpbl = iwpbl; + iwcq->cq_mem_size = 0; + cqmr = &iwpbl->cq_mr; + + if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags & + ZXDH_FEATURE_CQ_RESIZE && + !ucontext->legacy_mode) { + spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, + flags); + iwpbl_shadow = zxdh_get_pbl( + (unsigned long)req.user_shadow_area, + &ucontext->cq_reg_mem_list); + spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, + flags); + + if (!iwpbl_shadow) { + err_code = -EPROTO; + goto cq_free_rsrc; + } + iwcq->iwpbl_shadow = iwpbl_shadow; + cqmr_shadow = &iwpbl_shadow->cq_mr; + info.shadow_area_pa = cqmr_shadow->cq_pbl.addr; + cqmr->split = true; + } else { + info.shadow_area_pa = cqmr->shadow; + } + if (iwpbl->pbl_allocated) { + info.virtual_map = true; + info.pbl_chunk_size = 1; + info.first_pm_pbl_idx = cqmr->cq_pbl.idx; + } else { + info.cq_base_pa = cqmr->cq_pbl.addr; + } + } else { + /* Kmode allocations */ + int rsize; + + if (entries < 1 || entries > rf->max_cqe) { + err_code = -EINVAL; + goto cq_free_rsrc; + } + + entries++; + ukinfo->cq_size = zxdh_cq_round_up(entries); + ukinfo->cq_log_size = zxdh_num_to_log(ukinfo->cq_size); + + rsize = info.cq_uk_init_info.cq_size * + sizeof(struct zxdh_extended_cqe); + + iwcq->kmem.size = ALIGN(round_up(rsize, ZXDH_HW_PAGE_SIZE), + ZXDH_HW_PAGE_SIZE); + iwcq->kmem.va = dma_alloc_coherent(dev->hw->device, + iwcq->kmem.size, + &iwcq->kmem.pa, GFP_KERNEL); + if (!iwcq->kmem.va) { + err_code = -ENOMEM; + goto cq_free_rsrc; + } + + iwcq->kmem_shadow.size = ALIGN(ZXDH_SHADOW_AREA_SIZE << 3, 64); + iwcq->kmem_shadow.va = dma_alloc_coherent( + dev->hw->device, iwcq->kmem_shadow.size, + &iwcq->kmem_shadow.pa, GFP_KERNEL); + + if (!iwcq->kmem_shadow.va) { + err_code = -ENOMEM; + goto cq_free_rsrc; + } + info.shadow_area_pa = iwcq->kmem_shadow.pa; + ukinfo->shadow_area = iwcq->kmem_shadow.va; + ukinfo->cq_base = iwcq->kmem.va; + info.cq_base_pa = iwcq->kmem.pa; + } + + info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2, + (u32)ZXDH_MAX_CQ_READ_THRESH); + if (zxdh_sc_cq_init(cq, &info)) { + pr_err("VERBS: init cq fail\n"); + err_code = -EPROTO; + goto cq_free_rsrc; + } + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) { + err_code = -ENOMEM; + goto cq_free_rsrc; + } + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = ZXDH_OP_CQ_CREATE; + cqp_info->post_sq = 1; + cqp_info->in.u.cq_create.cq = cq; + cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + if (status) { + err_code = -ENOMEM; + goto cq_free_rsrc; + } + + if (udata) { + struct zxdh_create_cq_resp resp = {}; + + resp.cq_id = info.cq_uk_init_info.cq_id; + resp.cq_size = info.cq_uk_init_info.cq_size; + if (ib_copy_to_udata(udata, &resp, + min(sizeof(resp), udata->outlen))) { + zxdh_dbg(iwdev_to_idev(iwdev), + "VERBS: copy to user data\n"); + err_code = -EPROTO; + goto cq_destroy; + } + } + + rf->cq_table[cq_num - dev->base_cqn] = iwcq; + init_completion(&iwcq->free_cq); + +#if defined(CREATE_CQ_VER_3) || defined(CREATE_CQ_VER_4) + return 0; +#else + return &iwcq->ibcq; +#endif +cq_destroy: + zxdh_cq_wq_destroy(rf, cq); +cq_free_rsrc: + zxdh_cq_free_rsrc(rf, iwcq); +#if defined(CREATE_CQ_VER_3) || defined(CREATE_CQ_VER_4) + return err_code; +#else +error: + kfree(iwcq); + return ERR_PTR(err_code); +#endif +} + +/** + * zxdh_copy_user_pgaddrs - copy user page address to pble's os locally + * @iwmr: iwmr for IB's user page addresses + * @pblpar: ple pointer to save 1 level or 0 level pble + * @pbleinfo: pble info + * @level: indicated level 0, 1 or 2 + * @use_pbles: ple pointer to save 1 level or 0 level pble + * @pble_type: ple pointer to save 1 level or 0 level pble + */ +#ifdef COPY_USER_PGADDR_VER_4 +void zxdh_copy_user_pgaddrs(struct zxdh_mr *iwmr, u64 *pblpar, + struct zxdh_pble_info **pbleinfo, + enum zxdh_pble_level level, bool use_pbles, + bool pble_type) +{ + struct ib_umem *region = NULL; + struct zxdh_pbl *iwpbl = NULL; + struct ib_block_iter biter; + struct zxdh_pble_alloc *palloc = NULL; + struct zxdh_pble_info *pinfo = NULL; + struct zxdh_sc_dev *dev = NULL; + + u32 idx = 0; + u32 pbl_cnt = 0; + u64 *pbl = NULL; + u32 l2_pinfo_cnt = 0; + int j; + + region = iwmr->region; + iwpbl = &iwmr->iwpbl; + palloc = &iwpbl->pble_alloc; + + if (use_pbles) { + if (!(*pbleinfo)) + return; + dev = (*pbleinfo)->chunkinfo.pchunk->dev; + pbl = (*pbleinfo)->addr; + } else { + pbl = pblpar; + } + + pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf; + if (iwmr->type == ZXDH_MEMREG_TYPE_QP) + iwpbl->qp_mr.sq_page = sg_page(region->sgt_append.sgt.sgl); + rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) { + *pbl = rdma_block_iter_dma_address(&biter); + if (++pbl_cnt == palloc->total_cnt) + break; + pbl = zxdh_next_pbl_addr(pbl, &pinfo, &idx, &l2_pinfo_cnt); + } + + if (use_pbles) { + if (true == (*pbleinfo)->pble_copy) { + if (level == PBLE_LEVEL_1) { + zxdh_cqp_config_pble_table_cmd( + dev, (*pbleinfo), + palloc->total_cnt << 3, pble_type); + } else if (level == PBLE_LEVEL_2) { + if ((palloc->total_cnt % 512) == 0) { + l2_pinfo_cnt = palloc->total_cnt >> 9; + } else { + l2_pinfo_cnt = + (palloc->total_cnt >> 9) + 1; + } + + pinfo = palloc->level2.leaf; + for (j = 0; j < l2_pinfo_cnt; j++) { + zxdh_cqp_config_pble_table_cmd( + dev, pinfo, pinfo->cnt << 3, + pble_type); + pinfo++; + } + } + } + } +} +#endif + +/** + * zxdh_destroy_ah - Destroy address handle + * @ibah: pointer to address handle + * @ah_flags: destroy flags + */ +#if defined(DESTROY_AH_VER_4) +int zxdh_destroy_ah(struct ib_ah *ibah, u32 ah_flags) +{ + struct zxdh_device *iwdev = to_iwdev(ibah->device); + struct zxdh_ah *ah = to_iwah(ibah); + + if (ah->parent_ah) { + mutex_lock(&iwdev->ah_list_lock); + if (!refcount_dec_and_test(&ah->parent_ah->refcnt)) { + mutex_unlock(&iwdev->ah_list_lock); + return 0; + } + list_del(&ah->parent_ah->list); + kfree(ah->parent_ah); + iwdev->ah_list_cnt--; + mutex_unlock(&iwdev->ah_list_lock); + } + zxdh_ah_cqp_op(iwdev->rf, &ah->sc_ah, ZXDH_OP_AH_DESTROY, false, NULL, + ah); + + zxdh_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, + ah->sc_ah.ah_info.ah_idx); + + return 0; +} +#endif + +#ifdef DEREG_MR_VER_2 +int zxdh_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) +#else +int zxdh_dereg_mr(struct ib_mr *ib_mr) +#endif +{ + struct zxdh_mr *iwmr = to_iwmr(ib_mr); + struct zxdh_device *iwdev = to_iwdev(ib_mr->device); + struct zxdh_pbl *iwpbl = &iwmr->iwpbl; + int ret; + + if (iwmr->type != ZXDH_MEMREG_TYPE_MEM) { + if (iwmr->region) { + struct zxdh_ucontext *ucontext; +#ifdef DEREG_MR_VER_2 + + ucontext = rdma_udata_to_drv_context( + udata, struct zxdh_ucontext, ibucontext); +#else + struct ib_pd *ibpd = ib_mr->pd; + + ucontext = to_ucontext(ibpd->uobject->context); +#endif + zxdh_del_memlist(iwmr, ucontext); + } + goto done; + } + + ret = zxdh_hwdereg_mr(ib_mr); + if (ret) + return ret; + + zxdh_free_stag(iwdev, iwmr->stag); +done: + if (iwpbl->pbl_allocated) { + if (iwmr->type != ZXDH_MEMREG_TYPE_MEM) { + if (iwmr->region) + zxdh_free_pble(iwdev->rf->pble_rsrc, + &iwpbl->pble_alloc); + } else { + zxdh_free_pble(iwdev->rf->pble_mr_rsrc, + &iwpbl->pble_alloc); + } + } + + if (iwmr->region) + ib_umem_release(iwmr->region); + + kfree(iwmr); + + return 0; +} + +#ifdef REREG_MR_VER_2 +/* + * zxdh_rereg_user_mr - Re-Register a user memory region + * @ibmr: ib mem to access iwarp mr pointer + * @flags: bit mask to indicate which of the attr's of MR modified + * @start: virtual start address + * @len: length of mr + * @virt: virtual address + * @new access flags: bit mask of access flags + * @new_pd: ptr of pd + * @udata: user data + */ +struct ib_mr *zxdh_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, + u64 len, u64 virt, int new_access, + struct ib_pd *new_pd, struct ib_udata *udata) +{ + struct zxdh_device *iwdev = to_iwdev(ib_mr->device); + struct zxdh_mr *iwmr = to_iwmr(ib_mr); + struct zxdh_pbl *iwpbl = &iwmr->iwpbl; + int ret; + + if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size) + return ERR_PTR(-EINVAL); + + if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) + return ERR_PTR(-EOPNOTSUPP); + + ret = zxdh_hwdereg_mr(ib_mr); + if (ret) + return ERR_PTR(ret); + + if (flags & IB_MR_REREG_ACCESS) + iwmr->access = new_access; + + if (flags & IB_MR_REREG_PD) { + iwmr->ibmr.pd = new_pd; + iwmr->ibmr.device = new_pd->device; + } + + if (flags & IB_MR_REREG_TRANS) { + if (iwpbl->pbl_allocated) { + zxdh_free_pble(iwdev->rf->pble_rsrc, + &iwpbl->pble_alloc); + iwpbl->pbl_allocated = false; + } + if (iwmr->region) { + ib_umem_release(iwmr->region); + iwmr->region = NULL; + } + + ib_mr = zxdh_rereg_mr_trans(iwmr, start, len, virt, udata); + } else { + ret = zxdh_hwreg_mr(iwdev, iwmr, iwmr->access); + if (ret) + return ERR_PTR(ret); + } + + return ib_mr; +} +#endif +#ifdef SET_ROCE_CM_INFO_VER_3 +int kc_zxdh_set_roce_cm_info(struct zxdh_qp *iwqp, struct ib_qp_attr *attr, + u16 *vlan_id) +{ + const struct ib_gid_attr *sgid_attr; + int ret; + struct zxdh_av *av = &iwqp->roce_ah.av; + + sgid_attr = attr->ah_attr.grh.sgid_attr; + if (kc_deref_sgid_attr(sgid_attr)) { + ret = rdma_read_gid_l2_fields( + sgid_attr, vlan_id, iwqp->ctx_info.roce_info->mac_addr); + if (ret) + return ret; + } + + rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid); + return 0; +} +#endif + +#ifdef ZXDH_DESTROY_CQ_VER_4 +/** + * zxdh_destroy_cq - destroy cq + * @ib_cq: cq pointer + * @udata: user data + */ +int zxdh_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) +{ + struct zxdh_device *iwdev = to_iwdev(ib_cq->device); + struct zxdh_cq *iwcq = to_iwcq(ib_cq); + struct zxdh_sc_cq *cq = &iwcq->sc_cq; + struct zxdh_sc_dev *dev = cq->dev; + struct zxdh_sc_ceq *ceq = dev->ceq[cq->ceq_index]; + struct zxdh_ceq *iwceq = container_of(ceq, struct zxdh_ceq, sc_ceq); + unsigned long flags; + + cq->cq_type = 0; + cq->back_cq = NULL; + + spin_lock_irqsave(&iwcq->lock, flags); + cq->cq_uk.valid_cq = false; + if (!list_empty(&iwcq->cmpl_generated)) + zxdh_remove_cmpls_list(iwcq); + if (!list_empty(&iwcq->resize_list)) + zxdh_process_resize_list(iwcq, iwdev, NULL); + spin_unlock_irqrestore(&iwcq->lock, flags); + + if (ib_cq->comp_wq) { + msleep(5); + cancel_work_sync(&ib_cq->work); + } + + zxdh_cq_rem_ref(ib_cq); + wait_for_completion(&iwcq->free_cq); + + zxdh_cq_wq_destroy(iwdev->rf, cq); + zxdh_cq_free_rsrc(iwdev->rf, iwcq); + + spin_lock_irqsave(&iwceq->ce_lock, flags); + zxdh_sc_cleanup_ceqes(cq, ceq); + spin_unlock_irqrestore(&iwceq->ce_lock, flags); + + return 0; +} + +#endif /* ZXDH_DESTROY_CQ_VER_4 */ + +#ifdef ZXDH_ALLOC_MW_VER_2 +/** + * zxdh_alloc_mw - Allocate memory window + * @ibmw: Memory Window + * @udata: user data pointer + */ +int zxdh_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) +{ + struct zxdh_device *iwdev = to_iwdev(ibmw->device); + struct zxdh_mr *iwmr = to_iwmw(ibmw); + int err_code; + u32 stag; + + stag = zxdh_create_stag(iwdev); + if (!stag) + return -ENOMEM; + + iwmr->stag = stag; + ibmw->rkey = stag; + + err_code = zxdh_hw_alloc_mw(iwdev, iwmr); + if (err_code) { + zxdh_free_stag(iwdev, stag); + return err_code; + } + + return 0; +} + +#endif /* ZXDH_ALLOC_MW_VER_2 */ + +/** + * zxdh_disassociate_ucontext - Disassociate user context + * @context: ib user context + */ +void zxdh_disassociate_ucontext(struct ib_ucontext *context) +{ +} + +#ifndef NETDEV_TO_IBDEV_SUPPORT +struct ib_device *ib_device_get_by_netdev(struct net_device *netdev, + int driver_id) +{ + struct zxdh_device *iwdev; + struct zxdh_handler *hdl; + unsigned long flags; + + spin_lock_irqsave(&zxdh_handler_lock, flags); + list_for_each_entry(hdl, &zxdh_handlers, list) { + iwdev = hdl->iwdev; + if (netdev == iwdev->netdev) { + spin_unlock_irqrestore(&zxdh_handler_lock, flags); + return &iwdev->ibdev; + } + } + spin_unlock_irqrestore(&zxdh_handler_lock, flags); + + return NULL; +} + +void ib_unregister_device_put(struct ib_device *device) +{ + ib_unregister_device(device); +} + +#endif /* NETDEV_TO_IBDEV_SUPPORT */ +struct zxdh_device *zxdh_device_get_by_source_netdev(struct net_device *netdev) +{ + struct zxdh_device *iwdev; + struct zxdh_handler *hdl; + unsigned long flags; + + spin_lock_irqsave(&zxdh_handler_lock, flags); + list_for_each_entry(hdl, &zxdh_handlers, list) { + iwdev = hdl->iwdev; + if (netdev == iwdev->source_netdev) { + spin_unlock_irqrestore(&zxdh_handler_lock, flags); + return iwdev; + } + } + spin_unlock_irqrestore(&zxdh_handler_lock, flags); + + return NULL; +} +/** + * zxdh_query_gid_roce - Query port GID for Roce + * @ibdev: device pointer from stack + * @port: port number + * @index: Entry index + * @gid: Global ID + */ +#ifdef QUERY_GID_ROCE_V2 +int zxdh_query_gid_roce(struct ib_device *ibdev, u32 port, int index, + union ib_gid *gid) +#endif +{ + int ret; + + ret = rdma_query_gid(ibdev, port, index, gid); + if (ret == -EAGAIN) { + memcpy(gid, &zgid, sizeof(*gid)); + return 0; + } + + return ret; +} + +/** + * zxdh_modify_port - modify port attributes + * @ibdev: device pointer from stack + * @port: port number for query + * @mask: Property mask + * @props: returning device attributes + */ +#ifdef MODIFY_PORT_V2 +int zxdh_modify_port(struct ib_device *ibdev, u32 port, int mask, + struct ib_port_modify *props) +#endif +{ + if (port > 1) + return -EINVAL; + + return 0; +} + +/** + * zxdh_query_pkey - Query partition key + * @ibdev: device pointer from stack + * @port: port number + * @index: index of pkey + * @pkey: pointer to store the pkey + */ +#ifdef QUERY_PKEY_V2 +int zxdh_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey) +#endif +{ + if (index >= ZXDH_PKEY_TBL_SZ) + return -EINVAL; + + *pkey = ZXDH_DEFAULT_PKEY; + return 0; +} + +#ifdef ROCE_PORT_IMMUTABLE_V2 +int zxdh_roce_port_immutable(struct ib_device *ibdev, u32 port_num, + struct ib_port_immutable *immutable) +#endif +{ + struct ib_port_attr attr; + int err; + + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP | RDMA_CORE_CAP_PROT_ROCE; + err = ib_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->max_mad_size = IB_MGMT_MAD_SIZE; + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} + +/** + * zxdh_query_port - get port attributes + * @ibdev: device pointer from stack + * @port: port number for query + * @props: returning device attributes + */ +#ifdef QUERY_PORT_V2 +int zxdh_query_port(struct ib_device *ibdev, u32 port, + struct ib_port_attr *props) +#endif +{ + struct zxdh_device *iwdev = to_iwdev(ibdev); + struct net_device *netdev = iwdev->netdev; + u32 val = 0; + /* no need to zero out pros here. done by caller */ + + props->max_mtu = IB_MTU_4096; + props->active_mtu = zxdh_mtu_int_to_enum(netdev->mtu); + props->lid = 0; + props->lmc = 0; + props->sm_lid = 0; + props->sm_sl = 0; + if (netif_carrier_ok(netdev) && netif_running(netdev)) { + props->state = IB_PORT_ACTIVE; + props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; + } else { + props->state = IB_PORT_DOWN; + props->phys_state = IB_PORT_PHYS_STATE_DISABLED; + } + zxdh_get_eth_speed(ibdev, netdev, port, &props->active_speed, + &props->active_width); + if (rdma_protocol_roce(ibdev, 1)) { + props->gid_tbl_len = 255; + kc_set_props_ip_gid_caps(props); + props->pkey_tbl_len = ZXDH_PKEY_TBL_SZ; + } else { + props->gid_tbl_len = 1; + } + props->qkey_viol_cntr = 0; + props->port_cap_flags |= IB_PORT_CM_SUP; + props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size; + val = rd32(iwdev->rf->sc_dev.hw, RDMARX_PRI_BASE_RD); + props->qkey_viol_cntr = + (u32)FIELD_GET(ZXDH_PRI_BASE_RD_BAD_QKEY_COUNTER, val); + return 0; +} + +#ifdef ALLOC_HW_STATS_STRUCT_V2 +extern const struct rdma_stat_desc zxdh_hw_stat_descs[]; + +#endif + +#ifdef ALLOC_HW_STATS_V3 +/** + * zxdh_alloc_hw_port_stats - Allocate a hw stats structure + * @ibdev: device pointer from stack + * @port_num: port number + */ +struct rdma_hw_stats *zxdh_alloc_hw_port_stats(struct ib_device *ibdev, + u32 port_num) +#endif +{ + struct zxdh_device *iwdev = to_iwdev(ibdev); + struct zxdh_sc_dev *dev = &iwdev->rf->sc_dev; + + int num_counters = dev->hw_attrs.max_stat_idx; + unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN; + + /* We support only per port stats */ + if (port_num == 0) + return NULL; + + if (!dev->privileged) + lifespan = 1000; +#ifdef ALLOC_HW_STATS_STRUCT_V2 + return rdma_alloc_hw_stats_struct(zxdh_hw_stat_descs, num_counters, + lifespan); +#else + return rdma_alloc_hw_stats_struct(zxdh_hw_stat_names, num_counters, + lifespan); +#endif +} + +/** + * zxdh_get_hw_stats - Populates the rdma_hw_stats structure + * @ibdev: device pointer from stack + * @stats: stats pointer from stack + * @port_num: port number + * @index: which hw counter the stack is requesting we update + */ +#ifdef GET_HW_STATS_V2 +int zxdh_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, + u32 port_num, int index) +#endif +{ + struct zxdh_device *iwdev = to_iwdev(ibdev); + int ret; + int i; + struct zxdh_sc_dev *dev; + struct zxdh_rdma_stats_get rdma_stats; + struct zxdh_hw_stats *stats_entry; + + stats_entry = &iwdev->rf->sc_dev.stats_entry; + dev = &iwdev->rf->sc_dev; + memset(&rdma_stats, 0, sizeof(struct zxdh_rdma_stats_get)); + + ret = zxdh_rdma_stats_read(dev, &rdma_stats); + if (ret) { + return ret; + } + for (i = 0; i < ZXDH_HW_STAT_INDEX_MAX; i++) { + if (rdma_stats.rdma_stats_entry_sta[i] == ZXDH_HW_STATS_VALID) + stats_entry->rdma_stats_entry[i] = + rdma_stats.rdma_stats_entry[i]; + } + memcpy(&stats->value[0], &stats_entry->rdma_stats_entry, + sizeof(u64) * stats->num_counters); + return stats->num_counters; +} + +/* + * zxdh_process_mad - process an incoming MAD packet + * @ibdev: the infiniband device this packet came in on + * @mad_flags: MAD flags + * @port_num: the port number this packet came in on + * @in_wc: the work completion entry for this packet + * @in_grh: the global route header for this packet + * @in_mad: the incoming MAD + * @out_mad: any outgoing MAD reply + * @out_mad_size:outgoing MAD size + * @out_mad_pkey_index:outgoing MAD pkey index + */ +#if defined(PROCESS_MAD_VER_3) || defined(PROCESS_MAD_VER_4) +#ifdef PROCESS_MAD_VER_3 +int zxdh_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num, + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad *in_mad, struct ib_mad *out_mad, + size_t *out_mad_size, u16 *out_mad_pkey_index) +#elif defined PROCESS_MAD_VER_4 +int zxdh_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad *in_mad, struct ib_mad *out_mad, + size_t *out_mad_size, u16 *out_mad_pkey_index) +#endif +{ + struct zxdh_device *iwdev = to_iwdev(ibdev); + struct zxdh_sc_dev *dev; + u8 mgmt_class; + int ret; + + ret = IB_MAD_RESULT_FAILURE; + dev = &iwdev->rf->sc_dev; + mgmt_class = in_mad->mad_hdr.mgmt_class; + pr_debug( + "%s %d vhca_id:%d mgmt_class:%d base_version:0x%x method:0x%x\n", + __func__, __LINE__, dev->vhca_id, mgmt_class, + in_mad->mad_hdr.base_version, in_mad->mad_hdr.method); + if (in_mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) + return -EINVAL; + if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET) + return -EINVAL; + switch (mgmt_class) { + case IB_MGMT_CLASS_PERF_MGMT: + ret = zxdh_process_pma_cmd(dev, port_num, in_mad, out_mad); + break; + default: + ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; + break; + } + return ret; +} +#endif + +/** + * zxdh_query_gid - Query port GID + * @ibdev: device pointer from stack + * @port: port number + * @index: Entry index + * @gid: Global ID + */ +#ifdef QUERY_GID_V2 +int zxdh_query_gid(struct ib_device *ibdev, u32 port, int index, + union ib_gid *gid) +#endif +{ + struct zxdh_device *iwdev = to_iwdev(ibdev); + + memset(gid->raw, 0, sizeof(gid->raw)); + ether_addr_copy(gid->raw, iwdev->netdev->dev_addr); + + return 0; +} + +/** + * zxdh_query_qpc - query qpc + * @qp: points to qp + * @qpc_buf: qpc buffer + */ +int zxdh_query_qpc(struct zxdh_sc_qp *qp, struct zxdh_dma_mem *qpc_buf) +{ + struct zxdh_sc_dev *dev = qp->dev; + struct zxdh_pci_f *rf = container_of(dev, struct zxdh_pci_f, sc_dev); + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + int err_code = 0; + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) { + err_code = -ENOMEM; + goto free_rsrc; + } + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = ZXDH_OP_QUERY_QPC; + cqp_info->post_sq = 1; + cqp_info->in.u.query_qpc.dev = dev; + cqp_info->in.u.query_qpc.qpn = qp->qp_ctx_num; + cqp_info->in.u.query_qpc.qpc_buf_pa = qpc_buf->pa; + cqp_info->in.u.query_qpc.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + if (status) { + err_code = -ENOMEM; + goto free_rsrc; + } + return 0; + +free_rsrc: + return err_code; +} + +void zxdh_print_hw_qpc(__le64 *qp_ctx) +{ + u64 temp; + u64 lsn_bit0, rnr_retry_time_bits_l30, ssn_bits_low20; + u64 hw_sq_tail_bits_low11, rdwqe_pyld_length_bits_low5; + u64 vhca_id_bits_low6; + + pr_info("******TX Part******\n"); + + get_64bit_val(qp_ctx, 0, &temp); + pr_info("txwindow_waddr[7:0]:0x%llx\n", + FIELD_GET(GENMASK_ULL(7, 0), temp)); + pr_info("Retry_Count:0x%llx\n", FIELD_GET(GENMASK_ULL(10, 8), temp)); + pr_info("Cur_Retry_Count:0x%llx\n", + FIELD_GET(GENMASK_ULL(13, 11), temp)); + pr_info("read_retry_flag:0x%llx\n", FIELD_GET(BIT_ULL(14), temp)); + pr_info("tx_Last_Ack_PSN:0x%llx\n", + FIELD_GET(GENMASK_ULL(38, 15), temp)); + pr_info("ACK_MSN:0x%llx\n", FIELD_GET(GENMASK_ULL(62, 39), temp)); + lsn_bit0 = (u64)FIELD_GET(BIT_ULL(63), temp); + + get_64bit_val(qp_ctx, 8, &temp); + pr_info("LSN:0x%llx\n", + (FIELD_GET(GENMASK_ULL(22, 0), temp) << 1) + lsn_bit0); + pr_info("tx_Ack_Credits:0x%llx\n", + FIELD_GET(GENMASK_ULL(27, 23), temp)); + pr_info("rnr_retry_flag:0x%llx\n", FIELD_GET(BIT_ULL(28), temp)); + pr_info("rnr_retry_threshold:0x%llx\n", + FIELD_GET(GENMASK_ULL(33, 29), temp)); + rnr_retry_time_bits_l30 = (u64)FIELD_GET(GENMASK_ULL(63, 34), temp); + + get_64bit_val(qp_ctx, 16, &temp); + pr_info("rnr_retry_time:0x%llx\n", + (FIELD_GET(GENMASK_ULL(1, 0), temp) << 30) + + rnr_retry_time_bits_l30); + pr_info("wqe_offset:0x%llx\n", FIELD_GET(GENMASK_ULL(33, 2), temp)); + pr_info("fence flag:0x%llx\n", FIELD_GET(GENMASK_ULL(35, 34), temp)); + pr_info("cur_ord_cnt:0x%llx\n", FIELD_GET(GENMASK_ULL(43, 36), temp)); + ssn_bits_low20 = (u64)FIELD_GET(GENMASK_ULL(63, 44), temp); + + get_64bit_val(qp_ctx, 24, &temp); + pr_info("SSN:0x%llx\n", + (FIELD_GET(GENMASK_ULL(3, 0), temp) << 20) + ssn_bits_low20); + pr_info("first_packet_done_flag:0x%llx\n", FIELD_GET(BIT_ULL(4), temp)); + pr_info("PSN MAX:0x%llx\n", FIELD_GET(GENMASK_ULL(28, 5), temp)); + pr_info("PSN_Next:0x%llx\n", FIELD_GET(GENMASK_ULL(52, 29), temp)); + hw_sq_tail_bits_low11 = (u64)FIELD_GET(GENMASK_ULL(63, 53), temp); + + get_64bit_val(qp_ctx, 32, &temp); + pr_info("HW_SQ_Tail:0x%llx\n", + (FIELD_GET(GENMASK_ULL(6, 0), temp) << 11) + + hw_sq_tail_bits_low11); + pr_info("last_packet_time:0x%llx\n", + FIELD_GET(GENMASK_ULL(38, 7), temp)); + pr_info("incast_fragment_cnt:0x%llx\n", + FIELD_GET(GENMASK_ULL(56, 39), temp)); + pr_info("local_ack_timeout:0x%llx\n", + FIELD_GET(GENMASK_ULL(61, 57), temp)); + pr_info("retry_flag:0x%llx\n", FIELD_GET(BIT_ULL(62), temp)); + + get_64bit_val(qp_ctx, 40, &temp); + pr_info("HW_SQ_Tail_una:0x%llx\n", FIELD_GET(GENMASK_ULL(15, 0), temp)); + pr_info("last_ack_wqe_offset:0x%llx\n", + FIELD_GET(GENMASK_ULL(46, 16), temp)); + pr_info("err_flag:0x%llx\n", FIELD_GET(BIT_ULL(47), temp)); + pr_info("ack_err_flag:0x%llx\n", FIELD_GET(BIT_ULL(48), temp)); + pr_info("in_flight:0x%llx\n", FIELD_GET(GENMASK_ULL(58, 49), temp)); + rdwqe_pyld_length_bits_low5 = (u64)FIELD_GET(GENMASK_ULL(63, 59), temp); + + get_64bit_val(qp_ctx, 48, &temp); + pr_info("rdwqe_pyld_length:0x%llx\n", + (FIELD_GET(GENMASK_ULL(26, 0), temp) << 5) + + rdwqe_pyld_length_bits_low5); + pr_info("package_err_flag:0x%llx\n", FIELD_GET(BIT_ULL(27), temp)); + pr_info("txwindow_waddr[9:8]:0x%llx\n", + FIELD_GET(GENMASK_ULL(29, 28), temp)); + pr_info("txwindow_raddr:0x%llx\n", + FIELD_GET(GENMASK_ULL(39, 30), temp)); + pr_info("rd_msg_loss_err_flag:0x%llx\n", FIELD_GET(BIT_ULL(40), temp)); + pr_info("pktchk_rd_msg_loss_err_cnt:0x%llx\n", + FIELD_GET(GENMASK_ULL(42, 41), temp)); + pr_info("recv_rd_msg_loss_err_cnt:0x%llx\n", + FIELD_GET(GENMASK_ULL(44, 43), temp)); + pr_info("recv_rd_msg_loss_err_flag:0x%llx\n", + FIELD_GET(BIT_ULL(45), temp)); + pr_info("recv_err_flag:0x%llx\n", FIELD_GET(GENMASK_ULL(47, 46), temp)); + pr_info("recv_read_flag:0x%llx\n", FIELD_GET(BIT_ULL(48), temp)); + + get_64bit_val(qp_ctx, 56, &temp); + pr_info("retry_cqe_sq_opcode:0x%llx\n", + FIELD_GET(GENMASK_ULL(5, 0), temp)); + + get_64bit_val(qp_ctx, 64, &temp); + pr_info("Service_Type:0x%llx\n", FIELD_GET(GENMASK_ULL(2, 0), temp)); + pr_info("SQ_Virtually_Mapped:0x%llx\n", FIELD_GET(BIT_ULL(3), temp)); + pr_info("SQ_Leaf_PBL_Size:0x%llx\n", + FIELD_GET(GENMASK_ULL(5, 4), temp)); + pr_info("is_QP1:0x%llx\n", FIELD_GET(BIT_ULL(6), temp)); + pr_info("IPv4:0x%llx\n", FIELD_GET(BIT_ULL(7), temp)); + pr_info("FastRegisterEnable:0x%llx\n", FIELD_GET(BIT_ULL(8), temp)); + pr_info("BindEnable:0x%llx\n", FIELD_GET(BIT_ULL(9), temp)); + pr_info("Insert_VLAN_Tag:0x%llx\n", FIELD_GET(BIT_ULL(10), temp)); + pr_info("VLAN_Tag:0x%llx\n", FIELD_GET(GENMASK_ULL(26, 11), temp)); + pr_info("PD_Index:0x%llx\n", FIELD_GET(GENMASK_ULL(50, 27), temp)); + pr_info("rev_l_key_en:0x%llx\n", FIELD_GET(BIT_ULL(51), temp)); + pr_info("ECN_enable:0x%llx\n", FIELD_GET(BIT_ULL(63), temp)); + + get_64bit_val(qp_ctx, 72, &temp); + pr_info("SQ_Address:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 80, &temp); + pr_info("Dest_IP_Address_lo:0x%llx\n", + FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 88, &temp); + pr_info("Dest_IP_Address_hi:0x%llx\n", + FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 96, &temp); + pr_info("Source_Port_Number:0x%llx\n", + FIELD_GET(GENMASK_ULL(15, 0), temp)); + pr_info("Dest_Port_Number:0x%llx\n", + FIELD_GET(GENMASK_ULL(31, 16), temp)); + pr_info("Flow_Label:0x%llx\n", FIELD_GET(GENMASK_ULL(51, 32), temp)); + pr_info("Hop_Limit_or_TTL:0x%llx\n", + FIELD_GET(GENMASK_ULL(59, 52), temp)); + pr_info("ROCE_Tver:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 60), temp)); + + get_64bit_val(qp_ctx, 104, &temp); + pr_info("Q_Key:0x%llx\n", FIELD_GET(GENMASK_ULL(31, 0), temp)); + pr_info("Dest_QPN:0x%llx\n", FIELD_GET(GENMASK_ULL(55, 32), temp)); + pr_info("ORD_Size:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 56), temp)); + + get_64bit_val(qp_ctx, 112, &temp); + pr_info("P_Key:0x%llx\n", FIELD_GET(GENMASK_ULL(15, 0), temp)); + pr_info("Dest_MAC:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 16), temp)); + + get_64bit_val(qp_ctx, 120, &temp); + pr_info("QP_Completion_Context:0x%llx\n", + FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 128, &temp); + pr_info("S_IP_low:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 136, &temp); + pr_info("S_IP_high:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 144, &temp); + pr_info("Src_MAC:0x%llx\n", FIELD_GET(GENMASK_ULL(47, 0), temp)); + pr_info("PMTU:0x%llx\n", FIELD_GET(GENMASK_ULL(50, 48), temp)); + pr_info("ack_timeout:0x%llx\n", FIELD_GET(GENMASK_ULL(55, 51), temp)); + pr_info("Log_SQ_Size:0x%llx\n", FIELD_GET(GENMASK_ULL(59, 56), temp)); + + get_64bit_val(qp_ctx, 152, &temp); + pr_info("TxCmpQueueNum:0x%llx\n", FIELD_GET(GENMASK_ULL(20, 0), temp)); + pr_info("NVME_OF_QID:0x%llx\n", FIELD_GET(GENMASK_ULL(30, 21), temp)); + pr_info("Is_NVME_OF_Target:0x%llx\n", FIELD_GET(BIT_ULL(31), temp)); + pr_info("Is_NVME_OF_IOQ:0x%llx\n", FIELD_GET(BIT_ULL(32), temp)); + pr_info("GQP_id:0x%llx\n", FIELD_GET(GENMASK_ULL(43, 33), temp)); + pr_info("DCQCN_enable:0x%llx\n", FIELD_GET(BIT_ULL(49), temp)); + pr_info("queue_Tc:0x%llx\n", FIELD_GET(GENMASK_ULL(52, 50), temp)); + + get_64bit_val(qp_ctx, 160, &temp); + pr_info("QPN:0x%llx\n", FIELD_GET(GENMASK_ULL(19, 0), temp)); + pr_info("rtt_first_index:0x%llx\n", + FIELD_GET(GENMASK_ULL(35, 22), temp)); + pr_info("rtt_last_index:0x%llx\n", + FIELD_GET(GENMASK_ULL(49, 36), temp)); + pr_info("Traffic_Class_or_TOS:0x%llx\n", + FIELD_GET(GENMASK_ULL(57, 50), temp)); + vhca_id_bits_low6 = (u64)FIELD_GET(GENMASK_ULL(63, 58), temp); + + get_64bit_val(qp_ctx, 168, &temp); + pr_info("VHCA_ID:0x%llx\n", + (FIELD_GET(GENMASK_ULL(3, 0), temp) << 6) + vhca_id_bits_low6); + pr_info("8k_index:0x%llx\n", FIELD_GET(GENMASK_ULL(16, 4), temp)); + pr_info("RDMA_State:0x%llx\n", FIELD_GET(GENMASK_ULL(19, 17), temp)); + pr_info("debug_set:0x%llx\n", FIELD_GET(GENMASK_ULL(29, 20), temp)); + pr_info("qp_link_in:0x%llx\n", FIELD_GET(BIT_ULL(30), temp)); + pr_info("128k_index:0x%llx\n", FIELD_GET(GENMASK_ULL(47, 31), temp)); + + pr_info("******RX Part******\n"); + + get_64bit_val(qp_ctx, 256, &temp); + pr_info("Wr_Dma_Len:0x%llx\n", FIELD_GET(GENMASK_ULL(31, 0), temp)); + pr_info("Wr_R_Key:0x%llx\n", FIELD_GET(GENMASK_ULL(55, 32), temp)); + pr_info("Last_Opcode:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 56), temp)); + + get_64bit_val(qp_ctx, 264, &temp); + pr_info("Wr_Virt_Addr/Q_Key:0x%llx\n", + FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 272, &temp); + pr_info("send_psn:0x%llx\n", FIELD_GET(GENMASK_ULL(23, 0), temp)); + pr_info("HW_RQ_Tail/Rnr_Wqe_Index:0x%llx\n", + FIELD_GET(GENMASK_ULL(39, 24), temp)); + pr_info("E_PSN/Rnr_Nak_Psn:0x%llx\n", + FIELD_GET(GENMASK_ULL(63, 40), temp)); + + get_64bit_val(qp_ctx, 280, &temp); + pr_info("HW_RQ_Tail_credit[14:14]:0x%llx\n", + FIELD_GET(BIT_ULL(0), temp)); + pr_info("nof_check_state:0x%llx\n", FIELD_GET(BIT_ULL(1), temp)); + pr_info("qp_check_state:0x%llx\n", FIELD_GET(BIT_ULL(2), temp)); + pr_info("R_MSN:0x%llx\n", FIELD_GET(GENMASK_ULL(26, 3), temp)); + pr_info("ack_nack_flag:0x%llx\n", FIELD_GET(BIT_ULL(27), temp)); + pr_info("HW_RQ_Tail_credit[15:15]:0x%llx\n", + FIELD_GET(BIT_ULL(28), temp)); + pr_info("nak_syn:0x%llx\n", FIELD_GET(GENMASK_ULL(36, 29), temp)); + pr_info("ird_tx_num0/ird_tx_num1:0x%llx\n", + FIELD_GET(GENMASK_ULL(45, 37), temp)); + pr_info("ird_rx_num0/ird_rx_num1:0x%llx\n", + FIELD_GET(GENMASK_ULL(54, 46), temp)); + pr_info("cnp_pending:0x%llx\n", FIELD_GET(BIT_ULL(55), temp)); + pr_info("is_in_list:0x%llx\n", FIELD_GET(BIT_ULL(56), temp)); + pr_info("mr_hit_flag:0x%llx\n", FIELD_GET(BIT_ULL(57), temp)); + pr_info("ack_nak_rsv:0x%llx\n", FIELD_GET(GENMASK_ULL(62, 58), temp)); + pr_info("Rnr_Nak_Signal:0x%llx\n", FIELD_GET(BIT_ULL(63), temp)); + + get_64bit_val(qp_ctx, 288, &temp); + pr_info("SW_RQ_Tail:0x%llx\n", FIELD_GET(GENMASK_ULL(15, 0), temp)); + pr_info("psn_seq_error_signal:0x%llx\n", FIELD_GET(BIT_ULL(21), temp)); + pr_info("prifield_check_error_signal:0x%llx\n", + FIELD_GET(BIT_ULL(22), temp)); + pr_info("read_tail[0:0]:0x%llx\n", FIELD_GET(BIT_ULL(23), temp)); + pr_info("tx_send_length:0x%llx\n", + FIELD_GET(GENMASK_ULL(63, 24), temp)); + + get_64bit_val(qp_ctx, 296, &temp); + pr_info("read_tail[8:1]:0x%llx\n", FIELD_GET(GENMASK_ULL(7, 0), temp)); + pr_info("last_read_psn:0x%llx\n", FIELD_GET(GENMASK_ULL(31, 8), temp)); + pr_info("ird_send_offset:0x%llx\n", + FIELD_GET(GENMASK_ULL(55, 32), temp)); + pr_info("HW_RQ_Tail_credit[13:6]:0x%llx\n", + FIELD_GET(GENMASK_ULL(63, 56), temp)); + + get_64bit_val(qp_ctx, 304, &temp); + pr_info("Comm Esta sig:0x%llx\n", FIELD_GET(BIT_ULL(0), temp)); + pr_info("rtt:0x%llx\n", FIELD_GET(GENMASK_ULL(16, 1), temp)); + pr_info("cq_overflow:0x%llx\n", FIELD_GET(BIT_ULL(17), temp)); + pr_info("rq:sec_index[27:12] /\n"); + pr_info("srq:wqe_index[15:0]:0x%llx\n", + FIELD_GET(GENMASK_ULL(33, 18), temp)); + pr_info("last_expected_sent_read_psn:0x%llx\n", + FIELD_GET(GENMASK_ULL(57, 34), temp)); + pr_info("HW_RQ_Tail_credit[5:0]:0x%llx\n", + FIELD_GET(GENMASK_ULL(63, 58), temp)); + + get_64bit_val(qp_ctx, 312, &temp); + pr_info("rq:sec_index[11:0]+first index[63:12] /\n"); + pr_info("srq:wqe_addr:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 320, &temp); + pr_info("S_IP_low:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 328, &temp); + pr_info("Src_MAC[47:32]:0x%llx\n", FIELD_GET(GENMASK_ULL(15, 0), temp)); + pr_info("Dest_MAC:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 16), temp)); + + get_64bit_val(qp_ctx, 336, &temp); + pr_info("Is_NVME_OF_IOQ:0x%llx\n", FIELD_GET(BIT_ULL(0), temp)); + pr_info("Insert_VLAN_Tag:0x%llx\n", FIELD_GET(BIT_ULL(1), temp)); + pr_info("PMTU:0x%llx\n", FIELD_GET(GENMASK_ULL(4, 2), temp)); + pr_info("Service_Type:0x%llx\n", FIELD_GET(GENMASK_ULL(7, 5), temp)); + pr_info("IPv4:0x%llx\n", FIELD_GET(BIT_ULL(8), temp)); + pr_info("PD_Index:0x%llx\n", FIELD_GET(GENMASK_ULL(28, 9), temp)); + pr_info("RDMA_State:0x%llx\n", FIELD_GET(GENMASK_ULL(31, 29), temp)); + pr_info("Src_MAC[31:0]:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 32), temp)); + + get_64bit_val(qp_ctx, 344, &temp); + pr_info("Dest_QPN[23:12]:0x%llx\n", + FIELD_GET(GENMASK_ULL(11, 0), temp)); + pr_info("Flow_Label:0x%llx\n", FIELD_GET(GENMASK_ULL(31, 12), temp)); + pr_info("Hop_Limit_or_TTL:0x%llx\n", + FIELD_GET(GENMASK_ULL(39, 32), temp)); + pr_info("Traffic_Class_or_TOS:0x%llx\n", + FIELD_GET(GENMASK_ULL(47, 40), temp)); + pr_info("VLAN_Tag:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 48), temp)); + + get_64bit_val(qp_ctx, 352, &temp); + pr_info("srqn[18:0]:0x%llx /\n", FIELD_GET(GENMASK_ULL(18, 0), temp)); + pr_info("is_nvme_of_target[10:10]+nvme_of_qid[9:0] /\n"); + pr_info("rq_address[63:0]:0x%llx\n", + FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 360, &temp); + pr_info("db_address:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 368, &temp); + pr_info("header length:0x%llx\n", FIELD_GET(GENMASK_ULL(9, 0), temp)); + pr_info("P_Key:0x%llx\n", FIELD_GET(GENMASK_ULL(47, 32), temp)); + pr_info("Source_Port_Number:0x%llx\n", + FIELD_GET(GENMASK_ULL(63, 48), temp)); + + get_64bit_val(qp_ctx, 376, &temp); + pr_info("wqe_sign_enbale:0x%llx\n", FIELD_GET(BIT_ULL(1), temp)); + pr_info("RQ_Virtually_Mapped:0x%llx\n", FIELD_GET(BIT_ULL(2), temp)); + pr_info("IRD_Size:0x%llx\n", FIELD_GET(GENMASK_ULL(6, 3), temp)); + pr_info("Log_RQ_Size:0x%llx\n", FIELD_GET(GENMASK_ULL(10, 7), temp)); + pr_info("Rse_Enable:0x%llx\n", FIELD_GET(BIT_ULL(11), temp)); + pr_info("Rwr_Enable:0x%llx\n", FIELD_GET(BIT_ULL(12), temp)); + pr_info("Rre_Enable:0x%llx\n", FIELD_GET(BIT_ULL(13), temp)); + pr_info("Log_RQ_WQE_Size:0x%llx\n", + FIELD_GET(GENMASK_ULL(16, 14), temp)); + pr_info("rq_type:0x%llx\n", FIELD_GET(BIT_ULL(17), temp)); + pr_info("RxCmpQueueNum:0x%llx\n", FIELD_GET(GENMASK_ULL(38, 18), temp)); + pr_info("Dest_QPN[11:0]:0x%llx\n", + FIELD_GET(GENMASK_ULL(50, 39), temp)); + pr_info("RQ_Leaf_PBL_Size:0x%llx\n", + FIELD_GET(GENMASK_ULL(52, 51), temp)); + pr_info("rsv_lkey_enable:0x%llx\n", FIELD_GET(BIT_ULL(53), temp)); + pr_info("t_ver:0x%llx\n", FIELD_GET(GENMASK_ULL(57, 54), temp)); + pr_info("RQ_Rnr_Nak_Timer:0x%llx\n", + FIELD_GET(GENMASK_ULL(62, 58), temp)); + pr_info("rx_Ack_Credits:0x%llx\n", FIELD_GET(BIT_ULL(63), temp)); + + get_64bit_val(qp_ctx, 384, &temp); + pr_info("global_qp_num:0x%llx\n", FIELD_GET(GENMASK_ULL(10, 0), temp)); + pr_info("8k_qp_index:0x%llx\n", FIELD_GET(GENMASK_ULL(23, 11), temp)); + pr_info("debug_set:0x%llx\n", FIELD_GET(GENMASK_ULL(49, 40), temp)); + pr_info("vHCA:0x%llx\n", FIELD_GET(GENMASK_ULL(59, 50), temp)); + pr_info("queue_tc:0x%llx\n", FIELD_GET(GENMASK_ULL(62, 59), temp)); + + get_64bit_val(qp_ctx, 392, &temp); + pr_info("cq_context:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 400, &temp); + pr_info("Dest_IP_Address_hi:0x%llx\n", + FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 408, &temp); + pr_info("Dest_IP_Address_lo:0x%llx\n", + FIELD_GET(GENMASK_ULL(63, 0), temp)); + + get_64bit_val(qp_ctx, 416, &temp); + pr_info("S_IP_high:0x%llx\n", FIELD_GET(GENMASK_ULL(63, 0), temp)); +} + +#ifdef GET_LINK_LAYER_V2 +enum rdma_link_layer zxdh_get_link_layer(struct ib_device *ibdev, u32 port_num) +#endif +{ + return IB_LINK_LAYER_ETHERNET; +} + +enum ib_mtu zxdh_mtu_int_to_enum(int mtu) +{ + mtu = mtu - ZXDH_MTU_HEADER_RSV; + + if (mtu >= 4096) + return IB_MTU_4096; + else if (mtu >= 2048) + return IB_MTU_2048; + else if (mtu >= 1024) + return IB_MTU_1024; + else if (mtu >= 512) + return IB_MTU_512; + else + return IB_MTU_256; +} + +int zxdh_fill_qpc(struct zxdh_sc_dev *dev, u32 qpn, struct zxdh_dma_mem *qpc_buf) +{ + struct zxdh_pci_f *rf = container_of(dev, struct zxdh_pci_f, sc_dev); + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + int err_code = 0; + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) { + err_code = -ENOMEM; + return err_code; + } + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = ZXDH_OP_QUERY_QPC; + cqp_info->post_sq = 1; + cqp_info->in.u.query_qpc.dev = dev; + cqp_info->in.u.query_qpc.qpn = qpn; + cqp_info->in.u.query_qpc.qpc_buf_pa = qpc_buf->pa; + cqp_info->in.u.query_qpc.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + if (status) { + err_code = -ENOMEM; + return err_code; + } + return 0; +} + +int zxdh_fill_cqc(struct zxdh_sc_dev *dev, u32 cqn, struct zxdh_dma_mem *cqc_buf) +{ + struct zxdh_pci_f *rf = container_of(dev, struct zxdh_pci_f, sc_dev); + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + int err_code = 0; + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) { + err_code = -ENOMEM; + return err_code; + } + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = ZXDH_OP_QUERY_CQC; + cqp_info->post_sq = 1; + cqp_info->in.u.query_cqc.dev = dev; + cqp_info->in.u.query_cqc.cqn = cqn; + cqp_info->in.u.query_cqc.cqc_buf_pa = cqc_buf->pa; + cqp_info->in.u.query_cqc.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + if (status) + err_code = -ENOMEM; + + return err_code; +} + +int zxdh_fill_ceqc(struct zxdh_sc_dev *dev, u32 ceqn, struct zxdh_dma_mem *ceqc_buf) +{ + struct zxdh_pci_f *rf = container_of(dev, struct zxdh_pci_f, sc_dev); + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + int err_code = 0; + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) { + err_code = -ENOMEM; + return err_code; + } + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = ZXDH_OP_QUERY_CEQC; + cqp_info->post_sq = 1; + cqp_info->in.u.query_ceqc.dev = dev; + cqp_info->in.u.query_ceqc.ceqn = ceqn; + cqp_info->in.u.query_ceqc.ceqc_buf_pa = ceqc_buf->pa; + cqp_info->in.u.query_ceqc.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + if (status) + err_code = -ENOMEM; + + return err_code; +} + +int zxdh_fill_aeqc(struct zxdh_sc_dev *dev, struct zxdh_dma_mem *aeqc_buf) +{ + struct zxdh_pci_f *rf = container_of(dev, struct zxdh_pci_f, sc_dev); + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + int err_code = 0; + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) { + err_code = -ENOMEM; + return err_code; + } + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = ZXDH_OP_QUERY_AEQC; + cqp_info->post_sq = 1; + cqp_info->in.u.query_aeqc.dev = dev; + cqp_info->in.u.query_aeqc.aeqn = dev->vhca_id; + cqp_info->in.u.query_aeqc.aeqc_buf_pa = aeqc_buf->pa; + cqp_info->in.u.query_aeqc.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + if (status) { + err_code = -ENOMEM; + } + + return err_code; +} + +int zxdh_fill_srqc(struct zxdh_sc_dev *dev, u32 srqn, struct zxdh_dma_mem *srqc_buf) +{ + struct zxdh_pci_f *rf = container_of(dev, struct zxdh_pci_f, sc_dev); + struct zxdh_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + int err_code = 0; + int status; + + cqp_request = zxdh_alloc_and_get_cqp_request(&rf->cqp, true); + if (!cqp_request) { + err_code = -ENOMEM; + return err_code; + } + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = ZXDH_OP_QUERY_SRQC; + cqp_info->post_sq = 1; + cqp_info->in.u.query_srqc.dev = dev; + cqp_info->in.u.query_srqc.srqn = srqn; + cqp_info->in.u.query_srqc.srqc_buf_pa = srqc_buf->pa; + cqp_info->in.u.query_srqc.scratch = (uintptr_t)cqp_request; + status = zxdh_handle_cqp_op(rf, cqp_request); + zxdh_put_cqp_request(&rf->cqp, cqp_request); + if (status) { + err_code = -ENOMEM; + } + + return err_code; +} diff --git a/drivers/infiniband/hw/zrdma/zrdma_kcompat.h b/drivers/infiniband/hw/zrdma/zrdma_kcompat.h new file mode 100644 index 000000000000..bbbfd5de43a1 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/zrdma_kcompat.h @@ -0,0 +1,479 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZRDMA_KCOMPAT_H +#define ZRDMA_KCOMPAT_H + +#ifndef LINUX_VERSION_CODE +#include +#else +#ifndef KERNEL_VERSION +#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) +#endif +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE +#include +#endif +#if KERNEL_VERSION(3, 4, 0) <= LINUX_VERSION_CODE +#include +#endif +#if KERNEL_VERSION(4, 19, 0) <= LINUX_VERSION_CODE +#include +#endif +#if KERNEL_VERSION(4, 4, 0) > LINUX_VERSION_CODE +#include +#else +#include +#endif + +// #include "distro_ver.h" + +// #if defined(__OFED_BUILD__) || defined(__OFED_4_8__) +// #if (defined(__OFED_24_04__) || defined(__OFED_24_10__)) && defined(KYLIN_V10_4) +// #include "kylin_kcompat.h" +// #else +// #include "ofed_kcompat.h" +// #endif +// #elif defined(RHEL_RELEASE_CODE) +// #include "rhel_kcompat.h" +// #elif defined(CONFIG_SUSE_KERNEL) +// #include "suse_kcompat.h" +// #elif defined(UTS_UBUNTU_RELEASE_ABI) +// #include "ubuntu_kcompat.h" +// #elif defined(KYLIN_RELEASE_CODE) +// #include "kylin_kcompat.h" +// #else +#include "linux_kcompat.h" +// #endif + +#ifndef RDMA_DRIVER_ZXDH +#define RDMA_DRIVER_ZXDH 50 +#endif + +#ifndef IB_QP_ATTR_STANDARD_BITS +#define IB_QP_ATTR_STANDARD_BITS GENMASK(20, 0) +#endif + +#if (KERNEL_VERSION(5, 10, 0) > LINUX_VERSION_CODE) +#define TASKLET_DATA_TYPE unsigned long +#define TASKLET_FUNC_TYPE void (*)(TASKLET_DATA_TYPE) + +#define tasklet_setup(tasklet, callback) \ + tasklet_init((tasklet), (TASKLET_FUNC_TYPE)(callback), \ + (TASKLET_DATA_TYPE)(tasklet)) + +#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \ + container_of(callback_tasklet, typeof(*var), tasklet_fieldname) +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) */ + +#if (KERNEL_VERSION(4, 14, 0) > LINUX_VERSION_CODE) +#define TIMER_DATA_TYPE unsigned long +#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE) + +#define timer_setup(timer, callback, flags) \ + __setup_timer((timer), (TIMER_FUNC_TYPE)(callback), \ + (TIMER_DATA_TYPE)(timer), (flags)) + +#define from_timer(var, callback_timer, timer_fieldname) \ + container_of(callback_timer, typeof(*var), timer_fieldname) +#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) */ + +#if !defined(__OFED_BUILD__) && !defined(__OFED_4_8__) +#if KERNEL_VERSION(5, 0, 0) > LINUX_VERSION_CODE +#define dma_alloc_coherent dma_zalloc_coherent +#endif +#endif + +#if KERNEL_VERSION(5, 0, 0) > LINUX_VERSION_CODE && \ + !(defined(KYLIN_V10_4) && (defined(__OFED_24_10__) || defined(__OFED_24_04__))) && \ + !((KERNEL_VERSION(4, 19, 90) == LINUX_VERSION_CODE) && (defined(__OFED_24_10__))) +#define IB_GET_NETDEV_OP_NOT_DEPRECATED +#endif + +#ifdef USE_KMAP +#define kmap_local_page kmap +#if defined(__OFED_BUILD__) && !(KERNEL_VERSION(5, 14, 0) == LINUX_VERSION_CODE) +#define kunmap_local(sq_base) kunmap(iwqp->page) +#endif +#endif + +#define ZXDH_MTU_HEADER_RSV 102 + +// #ifdef IB_IW_PKEY +// static inline int zxdh_iw_query_pkey(struct ib_device *ibdev, u8 port, +// u16 index, u16 *pkey) +// { +// *pkey = 0; +// return 0; +// } +// #endif +/*******************************************************************************/ +struct zxdh_mr; +struct zxdh_cq; +struct zxdh_cq_buf; +struct zxdh_ucontext; +u32 zxdh_create_stag(struct zxdh_device *iwdev); +void zxdh_free_stag(struct zxdh_device *iwdev, u32 stag); +int zxdh_hw_alloc_mw(struct zxdh_device *iwdev, struct zxdh_mr *iwmr); +void zxdh_cq_free_rsrc(struct zxdh_pci_f *rf, struct zxdh_cq *iwcq); +int zxdh_process_resize_list(struct zxdh_cq *iwcq, struct zxdh_device *iwdev, + struct zxdh_cq_buf *lcqe_buf); +#if KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE || defined(GET_ETH_SPEED_V1) +int zxdh_get_eth_speed(struct ib_device *dev, struct net_device *netdev, + u32 port_num, u16 *speed, u8 *width); +#elif KERNEL_VERSION(5, 4, 195) == LINUX_VERSION_CODE +#ifdef __OFED_23_10__ +int zxdh_get_eth_speed(struct ib_device *dev, struct net_device *netdev, + u32 port_num, u16 *speed, u8 *width); +#else +int zxdh_get_eth_speed(struct ib_device *dev, struct net_device *netdev, + u32 port_num, u8 *speed, u8 *width); +#endif +#else +int zxdh_get_eth_speed(struct ib_device *dev, struct net_device *netdev, + u32 port_num, u8 *speed, u8 *width); +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0) +#define ZXDH_SET_UDP_SPORT_BYFLOW_LABLE +#endif + +#ifdef Z_DH_DEBUG +#endif /* Z_DH_DEBUG */ + +#define kc_set_driver_id(x) +/*****************************************************************************/ + +/*********************************************************/ +#ifndef ether_addr_copy +#define ether_addr_copy(mac_addr, new_mac_addr) \ + memcpy(mac_addr, new_mac_addr, ETH_ALEN) +#endif + +#ifndef ether_addr_cmp +#define ether_addr_cmp(mac_addr, new_mac_addr) \ + memcmp(mac_addr, new_mac_addr, ETH_ALEN) +#endif + +#ifndef eth_zero_addr +#define eth_zero_addr(mac_addr) memset(mac_addr, 0x00, ETH_ALEN) +#endif + +#if KERNEL_VERSION(2, 6, 35) <= LINUX_VERSION_CODE +#define zxdh_for_each_ipv6_addr(ifp, tmp, idev) \ + list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) +#else +#define zxdh_for_each_ipv6_addr(ifp, tmp, idev) \ + for (ifp = idev->addr_list; ifp != NULL; ifp = ifp->if_next) +#endif /* >= 2.6.35 */ + +#ifdef IB_FW_VERSION_NAME_MAX +void zxdh_get_dev_fw_str(struct ib_device *dev, char *str); +#else +void zxdh_get_dev_fw_str(struct ib_device *dev, char *str, size_t str_len); +#endif /* IB_FW_VERSION_NAME_MAX */ + +/*****************************************************************************/ +#ifdef CREATE_AH_VER_5 +int zxdh_create_ah_v2(struct ib_ah *ib_ah, struct rdma_ah_attr *attr, u32 flags, + struct ib_udata *udata); +int zxdh_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr, + struct ib_udata *udata); +#endif + +#ifdef DESTROY_AH_VER_4 +int zxdh_destroy_ah(struct ib_ah *ibah, u32 ah_flags); +#endif + +#ifdef CREATE_CQ_VER_4 +int zxdh_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct uverbs_attr_bundle *attrs); +#elif defined(CREATE_CQ_VER_3) +int zxdh_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct ib_udata *udata); +#endif + +/* functions called by zxdh_create_qp and zxdh_free_qp_rsrc */ +int zxdh_validate_qp_attrs(struct ib_qp_init_attr *init_attr, + struct zxdh_device *iwdev); + +void zxdh_setup_virt_qp(struct zxdh_device *iwdev, struct zxdh_qp *iwqp, + struct zxdh_qp_init_info *init_info); + +int zxdh_setup_kmode_qp(struct zxdh_device *iwdev, struct zxdh_qp *iwqp, + struct zxdh_qp_init_info *info, + struct ib_qp_init_attr *init_attr); + +void zxdh_roce_fill_and_set_qpctx_info(struct zxdh_qp *iwqp, + struct zxdh_qp_host_ctx_info *ctx_info); + +int zxdh_cqp_create_qp_cmd(struct zxdh_qp *iwqp); + +void zxdh_free_qp_rsrc(struct zxdh_qp *iwqp); + +#ifdef ZXDH_ALLOC_MW_VER_2 +int zxdh_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata); +#endif + +#ifdef CREATE_QP_VER_2 +int zxdh_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, + struct ib_udata *udata); +#endif + +int zxdh_hw_alloc_stag(struct zxdh_device *iwdev, struct zxdh_mr *iwmr); + +#ifdef ZXDH_ALLOC_MR_VER_0 +struct ib_mr *zxdh_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, + u32 max_num_sg); +#endif + +#ifdef ALLOC_UCONTEXT_VER_2 +int zxdh_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); +#endif + +#ifdef DEALLOC_UCONTEXT_VER_2 +void zxdh_dealloc_ucontext(struct ib_ucontext *context); +#endif + +#if defined(ETHER_COPY_VER_1) +void zxdh_ether_copy(u8 *dmac, struct ib_ah_attr *attr); +#endif + +#ifdef ALLOC_PD_VER_3 +int zxdh_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); +#endif + +#ifdef DEALLOC_PD_VER_4 +int zxdh_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); +#endif + +#ifdef ZXDH_DESTROY_CQ_VER_4 +int zxdh_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); +#endif + +#ifdef DESTROY_QP_VER_2 +int zxdh_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); +#define kc_zxdh_destroy_qp(ibqp, udata) zxdh_destroy_qp(ibqp, udata) +#endif + +#ifdef DEREG_MR_VER_2 +int zxdh_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata); +#endif + +int zxdh_hwdereg_mr(struct ib_mr *ib_mr); + +#ifdef REREG_MR_VER_2 +struct ib_mr *zxdh_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, + u64 len, u64 virt, int new_access, + struct ib_pd *new_pd, struct ib_udata *udata); +#endif + +int zxdh_hwreg_mr(struct zxdh_device *iwdev, struct zxdh_mr *iwmr, u16 access); + +struct ib_mr *zxdh_rereg_mr_trans(struct zxdh_mr *iwmr, u64 start, u64 len, + u64 virt, struct ib_udata *udata); + +struct zxdh_pbl *zxdh_get_pbl(unsigned long va, struct list_head *pbl_list); + +void zxdh_copy_user_pgaddrs(struct zxdh_mr *iwmr, u64 *pblpar, + struct zxdh_pble_info **pbleinfo, + enum zxdh_pble_level level, bool use_pbles, + bool pble_type); + +void zxdh_del_memlist(struct zxdh_mr *iwmr, struct zxdh_ucontext *ucontext); + +void zxdh_unregister_rdma_device(struct ib_device *ibdev); +#ifndef RDMA_MMAP_DB_SUPPORT +int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma, + unsigned long pfn, unsigned long size, pgprot_t prot); +#endif +void zxdh_disassociate_ucontext(struct ib_ucontext *context); +int kc_zxdh_set_roce_cm_info(struct zxdh_qp *iwqp, struct ib_qp_attr *attr, + u16 *vlan_id); +int kc_zxdh_create_sysfs_file(struct ib_device *ibdev); +struct zxdh_device *kc_zxdh_get_device(struct net_device *netdev); +void kc_zxdh_put_device(struct zxdh_device *iwdev); + +#ifdef QUERY_GID_ROCE_V2 +int zxdh_query_gid_roce(struct ib_device *ibdev, u32 port, int index, + union ib_gid *gid); +#endif + +#ifdef MODIFY_PORT_V2 +int zxdh_modify_port(struct ib_device *ibdev, u32 port, int mask, + struct ib_port_modify *props); +#endif + +#ifdef QUERY_PKEY_V2 +int zxdh_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey); +#endif + +#ifdef ROCE_PORT_IMMUTABLE_V2 +int zxdh_roce_port_immutable(struct ib_device *ibdev, u32 port_num, + struct ib_port_immutable *immutable); +#endif + +#ifdef IW_PORT_IMMUTABLE_V2 +int zxdh_iw_port_immutable(struct ib_device *ibdev, u32 port_num, + struct ib_port_immutable *immutable); +#endif + +#ifdef ALLOC_HW_STATS_V3 +struct rdma_hw_stats *zxdh_alloc_hw_port_stats(struct ib_device *ibdev, + u32 port_num); +#endif + +#ifdef GET_HW_STATS_V2 +int zxdh_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, + u32 port_num, int index); +#endif + +#ifdef PROCESS_MAD_VER_4 +int zxdh_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad *in_mad, struct ib_mad *out_mad, + size_t *out_mad_size, u16 *out_mad_pkey_index); +#endif +#ifdef PROCESS_MAD_VER_3 +int zxdh_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num, + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad *in_mad, struct ib_mad *out_mad, + size_t *out_mad_size, u16 *out_mad_pkey_index); +#endif + +#ifdef QUERY_GID_V2 +int zxdh_query_gid(struct ib_device *ibdev, u32 port, int index, + union ib_gid *gid); +#endif + +int zxdh_query_qpc(struct zxdh_sc_qp *qp, struct zxdh_dma_mem *qpc_buf); +void zxdh_print_hw_qpc(__le64 *qp_ctx); + +#ifdef GET_LINK_LAYER_V2 +enum rdma_link_layer zxdh_get_link_layer(struct ib_device *ibdev, u32 port_num); +#endif + +#ifdef QUERY_PORT_V2 +int zxdh_query_port(struct ib_device *ibdev, u32 port, + struct ib_port_attr *props); +#endif + +void zxdh_clean_cqes(struct zxdh_qp *iwqp, struct zxdh_cq *iwcq); +#ifndef NETDEV_TO_IBDEV_SUPPORT +struct ib_device *ib_device_get_by_netdev(struct net_device *ndev, + int driver_id); +void ib_unregister_device_put(struct ib_device *device); +#endif +struct zxdh_device *zxdh_device_get_by_source_netdev(struct net_device *netdev); +#if defined(DEREG_MR_VER_2) && defined(HAS_IB_SET_DEVICE_OP) +#define kc_free_lsmm_dereg_mr(iwdev, iwqp) \ + ((iwdev)->ibdev.ops.dereg_mr((iwqp)->lsmm_mr, NULL)) +#elif defined(DEREG_MR_VER_2) && !defined(HAS_IB_SET_DEVICE_OP) +#define kc_free_lsmm_dereg_mr(iwdev, iwqp) \ + ((iwdev)->ibdev.dereg_mr((iwqp)->lsmm_mr, NULL)) +#elif !defined(DEREG_MR_VER_2) && defined(HAS_IB_SET_DEVICE_OP) +#define kc_free_lsmm_dereg_mr(iwdev, iwqp) \ + ((iwdev)->ibdev.ops.dereg_mr((iwqp)->lsmm_mr)) +#else +#define kc_free_lsmm_dereg_mr(iwdev, iwqp) \ + ((iwdev)->ibdev.dereg_mr((iwqp)->lsmm_mr)) +#endif + +static inline int cq_validate_flags(u32 flags, u8 hw_rev) +{ + /* GEN1 does not support CQ create flags */ + if (hw_rev == ZXDH_GEN_1) + return flags ? -EOPNOTSUPP : 0; + + return flags & ~IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ? -EOPNOTSUPP : + 0; +} + +static inline u64 *zxdh_next_pbl_addr(u64 *pbl, struct zxdh_pble_info **pinfo, + u32 *idx, u32 *l2_pinfo_cnt) +{ + *idx += 1; + if (!(*pinfo) || *idx != (*pinfo)->cnt) + return ++pbl; + *idx = 0; + (*pinfo)++; + *l2_pinfo_cnt += 1; + return (*pinfo)->addr; +} + +/* Introduced in this series https://lore.kernel.org/linux-rdma/0-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.com/ + * An zrdma version helper doing same for older functions with difference that iova is passed in + * as opposed to derived from umem->iova. + */ +static inline size_t zxdh_ib_umem_num_dma_blocks(struct ib_umem *umem, + unsigned long pgsz, u64 iova) +{ + /* some older OFED distros do not have ALIGN_DOWN */ +#ifndef ALIGN_DOWN +#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a)-1), (a)) +#endif + + return (size_t)((ALIGN(iova + umem->length, pgsz) - + ALIGN_DOWN(iova, pgsz))) / + pgsz; +} + +int zxdh_fill_qpc(struct zxdh_sc_dev *dev, u32 qpn, struct zxdh_dma_mem *qpc_buf); +int zxdh_fill_cqc(struct zxdh_sc_dev *dev, u32 cqn, struct zxdh_dma_mem *cqc_buf); +int zxdh_fill_ceqc(struct zxdh_sc_dev *dev, u32 ceqn, struct zxdh_dma_mem *ceqc_buf); +int zxdh_fill_aeqc(struct zxdh_sc_dev *dev, struct zxdh_dma_mem *aeqc_buf); +int zxdh_fill_srqc(struct zxdh_sc_dev *dev, u32 srqn, struct zxdh_dma_mem *srqc_buf); + +enum ib_mtu zxdh_mtu_int_to_enum(int mtu); + +/* UAPI */ +#if KERNEL_VERSION(4, 19, 0) == LINUX_VERSION_CODE || defined(KYLIN_V10_4) +#define ZXDH_UAPI_DEF +#endif + +#if KERNEL_VERSION(5, 0, 0) <= LINUX_VERSION_CODE +#define ZXDH_UAPI_DEF +#endif + +/* ZXDH_SW_RDMA_DEVICE */ +#if (KERNEL_VERSION(5, 1, 0) <= LINUX_VERSION_CODE) +#define zxdh_rdma_device_to_drv_device(device, ibdev) \ + rdma_device_to_drv_device(device, struct zxdh_device, ibdev) +#else +#define zxdh_rdma_device_to_drv_device(device, ibdev) \ + container_of(device, struct zxdh_device, ibdev.dev) +#endif + +#if (KERNEL_VERSION(4, 20, 0) > LINUX_VERSION_CODE) +#define IB_READ_GID_ATTRIBUTE_NETDEVICE_NOT_DEFINE +#endif + +#endif /* ZRDMA_KCOMPAT_H_ */ diff --git a/drivers/infiniband/hw/zrdma/zxdh_auxiliary_bus.h b/drivers/infiniband/hw/zrdma/zxdh_auxiliary_bus.h new file mode 100644 index 000000000000..caa5bea29832 --- /dev/null +++ b/drivers/infiniband/hw/zrdma/zxdh_auxiliary_bus.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef _AUXILIARY_BUS_H_ +#define _AUXILIARY_BUS_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +#define ZXDH_AUXILIARY_NAME_SIZE 32 + +struct zxdh_auxiliary_device_id { + char name[ZXDH_AUXILIARY_NAME_SIZE]; + kernel_ulong_t driver_data; +}; + +struct zxdh_auxiliary_device { + struct device dev; + const char *name; + uint32_t id; +}; + +/** + * struct zxdh_auxiliary_driver - Definition of an auxiliary bus driver + * @probe: Called when a matching device is added to the bus. + * @remove: Called when device is removed from the bus. + * @shutdown: Called at shut-down time to quiesce the device. + * @suspend: Called to put the device to sleep mode. Usually to a power state. + * @resume: Called to bring a device from sleep mode. + * @name: Driver name. + * @driver: Core driver structure. + * @id_table: Table of devices this driver should match on the bus. + * + * Auxiliary drivers follow the standard driver model convention, where + * discovery/enumeration is handled by the core, and drivers provide probe() + * and remove() methods. They support power management and shutdown + * notifications using the standard conventions. + * + * Auxiliary drivers register themselves with the bus by calling + * zxdh_auxiliary_driver_register(). The id_table contains the match_names of + * auxiliary devices that a driver can bind with. + * + * .. code-block:: c + * + * static const struct zxdh_auxiliary_device_id my_auxiliary_id_table[] = { + * { .name = "foo_mod.foo_dev" }, + * {}, + * }; + * + * MODULE_DEVICE_TABLE(zxdh_auxiliary, my_auxiliary_id_table); + * + * struct zxdh_auxiliary_driver my_drv = { + * .name = "myauxiliarydrv", + * .id_table = my_auxiliary_id_table, + * .probe = my_drv_probe, + * .remove = my_drv_remove + * }; + */ +struct zxdh_auxiliary_driver { + int32_t (*probe)(struct zxdh_auxiliary_device *auxdev, + const struct zxdh_auxiliary_device_id *id); + int32_t (*remove)(struct zxdh_auxiliary_device *auxdev); + void (*shutdown)(struct zxdh_auxiliary_device *auxdev); + int32_t (*suspend)(struct zxdh_auxiliary_device *auxdev, + pm_message_t state); + int32_t (*resume)(struct zxdh_auxiliary_device *auxdev); + const char *name; + struct device_driver driver; + const struct zxdh_auxiliary_device_id *id_table; +}; + +int32_t zxdh_aux_drv_register(struct zxdh_auxiliary_driver *auxdrv, + struct module *owner, const char *modname); +#define zxdh_auxiliary_driver_register(auxdrv) \ + zxdh_aux_drv_register(auxdrv, THIS_MODULE, KBUILD_MODNAME) + +void zxdh_auxiliary_driver_unregister(struct zxdh_auxiliary_driver *auxdrv); + +#ifdef __cplusplus +} +#endif + +#endif /* _AUXILIARY_BUS_H_ */ diff --git a/drivers/infiniband/hw/zrdma/zxdh_user_ioctl_cmds.h b/drivers/infiniband/hw/zrdma/zxdh_user_ioctl_cmds.h new file mode 100644 index 000000000000..71d8d67ca05c --- /dev/null +++ b/drivers/infiniband/hw/zrdma/zxdh_user_ioctl_cmds.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_USER_IOCTL_CMDS_H +#define ZXDH_USER_IOCTL_CMDS_H + +#include +#include + +enum zxdh_ib_dev_get_log_trace_attrs { + ZXDH_IB_ATTR_DEV_GET_LOG_TARCE_SWITCH = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum zxdh_ib_dev_set_log_trace_attrs { + ZXDH_IB_ATTR_DEV_SET_LOG_TARCE_SWITCH = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum zxdh_ib_dev_cap_start { + ZXDH_IB_ATTR_DEV_CAP_START = (1U << UVERBS_ID_NS_SHIFT), + ZXDH_IB_ATTR_DEV_CAP_START_RESP, +}; + +enum zxdh_ib_dev_cap_stop { + ZXDH_IB_ATTR_DEV_CAP_STOP = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum zxdh_ib_dev_cap_free { + ZXDH_IB_ATTR_DEV_CAP_FREE = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum zxdh_ib_dev_mp_cap { + ZXDH_IB_ATTR_DEV_MP_CAP = (1U << UVERBS_ID_NS_SHIFT), + ZXDH_IB_ATTR_DEV_MP_CAP_RESP, +}; + +enum zxdh_ib_dev_mp_get_data { + ZXDH_IB_ATTR_DEV_MP_GET_DATA = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum zxdh_ib_dev_mp_cap_clear { + ZXDH_IB_ATTR_DEV_MP_CAP_CLEAR = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum zxdh_ib_dev_get_act_vhca_gqps { + ZXDH_IB_ATTR_DEV_GET_ACT_VHCA_GQPS_RESP = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum zxdh_ib_dev_get_cc_basic_info { + ZXDH_IB_ATTR_DEV_GET_CC_BASIC_INFO_RESP = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum zxdh_ib_dev_get_hmc { + ZXDH_IB_ATTR_DEV_GET_HMC = (1U << UVERBS_ID_NS_SHIFT), + ZXDH_IB_ATTR_DEV_GET_HMC_RESP, +}; + +enum zxdh_ib_dev_get_obj_data { + ZXDH_IB_ATTR_DEV_GET_OBJ_DATA = (1U << UVERBS_ID_NS_SHIFT), + ZXDH_IB_ATTR_DEV_GET_OBJ_DATA_RESP, +}; + +enum zxdh_ib_dev_health_check { + ZXDH_IB_ATTR_DEV_HEALTH_CHECK = (1U << UVERBS_ID_NS_SHIFT), + ZXDH_IB_ATTR_DEV_HEALTH_CHECK_RESP, +}; + +enum zxdh_ib_dev_cfg_parameter { + ZXDH_IB_ATTR_DEV_CFG_PARAMETER = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum zxdh_ib_dev_show_res_map { + ZXDH_IB_ATTR_DEV_SHOW_RES_MAP = (1U << UVERBS_ID_NS_SHIFT), + ZXDH_IB_ATTR_DEV_SHOW_RES_MAP_RESP, +}; + +enum zxdh_ib_dev_read_ram { + ZXDH_IB_ATTR_DEV_READ_RAM = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum zxdh_ib_dev_methods { + ZXDH_IB_METHOD_DEV_GET_LOG_TRACE = (1U << UVERBS_ID_NS_SHIFT), + ZXDH_IB_METHOD_DEV_SET_LOG_TRACE, + ZXDH_IB_METHOD_DEV_CAP_START, + ZXDH_IB_METHOD_DEV_CAP_STOP, + ZXDH_IB_METHOD_DEV_CAP_FREE, + ZXDH_IB_METHOD_DEV_MP_CAP, + ZXDH_IB_METHOD_DEV_MP_GET_DATA, + ZXDH_IB_METHOD_DEV_MP_CAP_CLEAR, +}; + +enum zxdh_ib_device_methods { + ZXDH_IB_METHOD_DEV_GET_ACT_VHCA_GQPS = (1U << UVERBS_ID_NS_SHIFT), + ZXDH_IB_METHOD_DEV_GET_CC_BASIC_INFO, + ZXDH_IB_METHOD_DEV_GET_HMC, + ZXDH_IB_METHOD_DEV_GET_OBJ_DATA, + ZXDH_IB_METHOD_DEV_HEALTH_CHECK, + ZXDH_IB_METHOD_DEV_CFG_PARAMETER, + ZXDH_IB_METHOD_DEV_SHOW_RES_MAP, + ZXDH_IB_METHOD_DEV_READ_RAM, +}; + +enum zxdh_ib_qp_modify_udp_sport_attrs { + ZXDH_IB_ATTR_QP_UDP_PORT = (1U << UVERBS_ID_NS_SHIFT), + ZXDH_IB_ATTR_QP_QPN, +}; + +enum zxdh_ib_qp_query_qpc_attrs { + ZXDH_IB_ATTR_QP_QUERY_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + ZXDH_IB_ATTR_QP_QUERY_RESP, +}; + +enum zxdh_ib_qp_modify_qpc_attrs { + ZXDH_IB_ATTR_QP_MODIFY_QPC_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + ZXDH_IB_ATTR_QP_MODIFY_QPC_REQ, + ZXDH_IB_ATTR_QP_MODIFY_QPC_MASK, +}; + +enum zxdh_ib_qp_reset_qp_attrs { + ZXDH_IB_ATTR_QP_RESET_QP_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + ZXDH_IB_ATTR_QP_RESET_OP_CODE, +}; + +enum zxdh_ib_qp_credit_flag_attrs { + ZXDH_IB_ATTR_QP_SET_CREDIT_FLAG_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + ZXDH_IB_ATTR_QP_CREDIT_FLAG, +}; + +enum zxdh_ib_qp_methods { + ZXDH_IB_METHOD_QP_MODIFY_UDP_SPORT = (1U << UVERBS_ID_NS_SHIFT), + ZXDH_IB_METHOD_QP_QUERY_QPC, + ZXDH_IB_METHOD_QP_MODIFY_QPC, + ZXDH_IB_METHOD_QP_RESET_QP, + ZXDH_IB_METHOD_QP_SET_CREDIT_FLAG, +}; + +enum zxdh_ib_objects { + ZXDH_IB_OBJECT_DEV = (1U << UVERBS_ID_NS_SHIFT), + ZXDH_IB_OBJECT_QP_OBJ, + ZXDH_IB_OBJECT_DEVICE_EX, +}; + +#endif \ No newline at end of file diff --git a/drivers/infiniband/hw/zrdma/zxdh_user_ioctl_verbs.h b/drivers/infiniband/hw/zrdma/zxdh_user_ioctl_verbs.h new file mode 100644 index 000000000000..66ee98c054ca --- /dev/null +++ b/drivers/infiniband/hw/zrdma/zxdh_user_ioctl_verbs.h @@ -0,0 +1,212 @@ +/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* Copyright (c) 2023 - 2024 ZTE Corporation */ + +#ifndef ZXDH_USER_IOCTL_VERBS_H +#define ZXDH_USER_IOCTL_VERBS_H + +#include + +struct zxdh_query_qpc_resp { + __u8 retry_flag; + __u8 rnr_retry_flag; + __u8 read_retry_flag; + __u8 cur_retry_count; + __u8 retry_cqe_sq_opcode; + __u8 err_flag; + __u8 ack_err_flag; + __u8 package_err_flag; + __u8 recv_err_flag; + __u8 retry_count; + __u32 tx_last_ack_psn; +}; + +struct zxdh_modify_qpc_req { + __u8 retry_flag; + __u8 rnr_retry_flag; + __u8 read_retry_flag; + __u8 cur_retry_count; + __u8 retry_cqe_sq_opcode; + __u8 err_flag; + __u8 ack_err_flag; + __u8 package_err_flag; +}; + +struct zxdh_cap_start_resp { + __u64 cap_pa_node0; + __u64 cap_pa_node1; +}; + +struct zxdh_mp_cap_resp { + __u8 mcode_type; + __u8 cap_gqp_num; + __u16 cap_gqpid[4]; + __u64 cap_pa; +}; + +#define MAX_ACTIVE_GQP_NUM 16 +struct zxdh_active_vhca_gqps { + __u16 vhca_id; + __u16 gqp_id[MAX_ACTIVE_GQP_NUM]; + __u8 gqp_num; +}; + +struct zxdh_cc_basic_info { + __u32 active_gqp_cnt; + __u16 active_vhca_sq_cnt; + __u16 active_vhca_read_cnt; + __u16 active_vhca_ack_cnt; + __u16 active_qp_sq_cur_cnt; + __u16 active_qp_rq_cur_cnt; + __u16 task_prefetch_recv_com_cnt; + __u64 tx_pkt_cnt; + __u64 rx_pkt_cnt; + __u16 flight_pkt_cnt; + __u16 retry_timeout_cnt; + __u16 retry_read_cnt; + __u16 retry_rnr_cnt; + __u16 retry_nak_cnt; + __u16 drop_read_msg_cnt; + __u32 tx_pkt_cnp_cnt; + __u32 rx_pkt_cnp_cnt; + __u32 tx_pkt_rtt_t1_cnt; + __u32 rx_pkt_rtt_t2_cnt; + __u32 tx_pkt_rtt_t4_cnt; + __u32 rx_pkt_rtt_t5_cnt; + __u16 limit_tx_sq_cnt; + __u16 limit_tx_read_cnt; + __u32 backpres_tx_pfc_flg_pyh0_3; + __u32 backpres_tx_pfc_flg_pyh4_7; + __u16 limit_tx_ack_cnt; + __u16 backpres_tx_pfc_cnt; + __u16 rx_pkt_ecn_cnt; + __u8 backpres_rx_pfc_cnt; + __u8 backpres_rx; +}; + +struct zxdh_get_object_data_req { + __u32 queue_id; + __u8 object_id; + __u32 entry_idx; + __u8 object_num; +}; + +struct zxdh_get_object_data_resp { + __u64 object_mmap_offset; + __u32 length; + __u32 object_size; + __u64 srqp_aligned_offset; + __u16 vhca_id; + __u8 route_id; +}; + +struct zxdh_db_show_res_map_req { + __u8 type; + __u32 qp_id; + __u64 reg_va; + __u64 value_va; + __u64 idx_va; + __u32 count; +}; + +struct zxdh_db_show_res_map_resp { + __u32 count; + __u64 qp_8k_index; +}; + +enum zxdh_show_res_map_type { + ZXDH_SHOW_RES_MAP_PF_TO_QPN, + ZXDH_SHOW_RES_MAP_PF_TO_VHCA, + ZXDH_SHOW_RES_MAP_VHCA_TO_PF, + ZXDH_SHOW_RES_MAP_8K_TO_GQP, + ZXDH_SHOW_RES_MAP_GQP_TO_VHCA_CREATED, + ZXDH_SHOW_RES_MAP_GQP_TO_VHCA_ACTIVE, + ZXDH_SHOW_RES_MAP_QP_TO_8K, + ZXDH_SHOW_RES_MAP_UNKNOWN, +}; + +enum zxdh_context_type { + ZXDH_RX_READ_QPC = 1, + ZXDH_TX_READ_QPC, + ZXDH_READ_CQC, + ZXDH_READ_CEQC, + ZXDH_READ_AEQC, + ZXDH_RX_READ_SRQC, + ZXDH_READ_MRTE, +}; + +struct zxdh_context_req { + enum zxdh_context_type type; + __u32 resource_id; +}; + +#define MAX_CONTEXT_SIZE 22 +struct zxdh_context_resp { + __u64 context_info[MAX_CONTEXT_SIZE]; + __u8 context_size; +}; + +enum zxdh_health_check_reg_type { + ZXDH_NORMAL_REG, + ZXDH_WRITE_FIRST_REG, + ZXDH_SMMU_REG, +}; + +struct zxdh_health_check_req { + __u64 reg_va; + __u64 value_va; + __u64 reg_value_va_ex; + __u16 count; + __u8 reg_type : 2; +}; + +enum zxdh_cfg_dev_parameter_type { + TX_STOP_ON_AEQ = 1, + RX_STOP_ON_AEQ, + TXRX_STOP_IOVA_CAP, + CLEAR_ALL_CC_BASIC_CNT, + CLEAR_ALL_GQPS_MP_CAP, +}; + +struct zxdh_health_check_resp { + __u16 count; + __u16 count_ex; +}; + +struct zxdh_reg_value { + __u64 reg_addr; + __u32 value; +}; + +struct zxdh_cfg_dev_parameter_req { + __u8 type; + __u8 reserved1; + __u16 reserved2; +}; + +enum hw_module { + HW_MODULE_TX, + HW_MODULE_RX, + HW_MODULE_CQP, +}; + +enum zxdh_ram_read_error_code_const { + ZXDH_CUSTOM_READ_RAM_ERROR_BASE = 100, + ZXDH_COPY_USER_PARAM_ERROR, + ZXDH_READ_RAM_ERROR, + ZXDH_COPY_DATA_TO_USER_ERROR, + /* Must be last entry*/ + ZXDH_CUSTOM_ERROR_UNKOWN, +}; + +struct zxdh_read_ram_req { + __u32 ram_id; + __u32 ram_addr; + __u32 ram_width; + __u32 read_count; + __u64 value_va; + __u16 hw_module; + __u16 reserved1; + __u32 reserved2; +}; + +#endif diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 2f3fa53af598..182562249c4c 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -198,5 +198,6 @@ source "drivers/net/ethernet/wangxun/Kconfig" source "drivers/net/ethernet/xilinx/Kconfig" source "drivers/net/ethernet/xircom/Kconfig" source "drivers/net/ethernet/northlink/Kconfig" +source "drivers/net/ethernet/dinghai/Kconfig" endif # ETHERNET diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 1de1e6e61291..8b9b8e069395 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -111,3 +111,4 @@ obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/ obj-$(CONFIG_NET_VENDOR_PENSANDO) += pensando/ obj-$(CONFIG_NET_VENDOR_NORTHLINK) += northlink/ obj-$(CONFIG_THIRDPARTY_TCNIC_NET) += tcnic/ +obj-$(CONFIG_NET_VENDOR_DINGHAI) += dinghai/ diff --git a/drivers/net/ethernet/dinghai/Kconfig b/drivers/net/ethernet/dinghai/Kconfig new file mode 100644 index 000000000000..86831c41842a --- /dev/null +++ b/drivers/net/ethernet/dinghai/Kconfig @@ -0,0 +1,97 @@ +config NET_VENDOR_DINGHAI + tristate "Dinghai Devices" + depends on (X86 || ARM64) + default y + help + If you have a Ethernet DINGHAI device belonging to this + class, say y. + +if NET_VENDOR_DINGHAI + +config DINGHAI_DH_CMD + tristate "Dinghai DH_CMD Support" + depends on (X86 || ARM64) + default m + help + Command interface for Dinghai devices. + +config DINGHAI_EN_AUX + tristate "Dinghai Auxiliary Support" + depends on (X86 || ARM64) + default y + help + Auxiliary functionality for Dinghai devices. + +config DINGHAI_NP + tristate "Dinghai NP Support" + default m + depends on DINGHAI_DH_CMD && (X86 || ARM64) + help + Network Processor (NP) support. Requires DINGHAI_DH_CMD. + +config DINGHAI_PTP + tristate "Dinghai PTP Support" + depends on (X86 || ARM64) + default m + depends on DINGHAI_DH_CMD && PTP_1588_CLOCK && (X86 || ARM64) + help + Precision Time Protocol (PTP) support. Requires DINGHAI_DH_CMD and PTP_1588_CLOCK. + +config DINGHAI_TSN + bool "Dinghai Support TSN Macro" + depends on (X86 || ARM64) + default y + help + Enable TSN-related compile-time macros. + +config DINGHAI_TSN_M + tristate "Dinghai TSN Module Support" + depends on (X86 || ARM64) + default m + help + TSN runtime module support. + +config ZXDH_SF + tristate "Dinghai SF Support" + depends on (X86 || ARM64) + default y + help + Sub-Function (SF) support. + +config ZXDH_MSGQ + tristate "Dinghai MSGQ Support" + depends on (X86 || ARM64) + default m + help + Message queue support. + +config DINGHAI_SEC + tristate "Dinghai SEC Support" + depends on (X86 || ARM64) + default n + help + Security (SEC) module support. + +config DINGHAI_AUXILIARY + tristate "Dinghai AUXILIARY Support" + depends on (X86 || ARM64) + default m + help + Enable Dinghai AUXILIARY module Support + +# Main PF driver +config DINGHAI_PF + tristate "Dinghai PF Support" + default m + depends on DINGHAI_DH_CMD + depends on DINGHAI_AUXILIARY + depends on DINGHAI_NP + depends on DINGHAI_PTP + depends on DINGHAI_TSN_M + depends on (X86 || ARM64) + help + Primary Function (PF) driver for Dinghai devices. + This driver requires CMD, Auxiliary, NP, PTP, and TSN modules. + Note: PTP also requires PTP_1588_CLOCK to be enabled. + +endif # NET_VENDOR_DINGHAI diff --git a/drivers/net/ethernet/dinghai/Makefile b/drivers/net/ethernet/dinghai/Makefile new file mode 100644 index 000000000000..1f0b65ed6694 --- /dev/null +++ b/drivers/net/ethernet/dinghai/Makefile @@ -0,0 +1,49 @@ +subdir-ccflags-y += -I$(src) +subdir-ccflags-y += -I$(srctree)/include/linux/dinghai/ +ccflags-y += -Werror + +EXTRA_CFLAGS += -DZXDH_MSGQ + +ifeq ($(CONFIG_DINGHAI_SEC),m) +EXTRA_CFLAGS += -DZXDH_SEC +endif + +ifeq ($(CONFIG_DINGHAI_PTP),m) +EXTRA_CFLAGS += -DPTP_DRIVER_INTERFACE_EN +endif + +ifeq ($(CONFIG_DRIVER_VERSION),) +EXTRA_CFLAGS += -DDRIVER_VERSION_VAL=\"1.0-1\" +else +EXTRA_CFLAGS += -DDRIVER_VERSION_VAL=\"$(CONFIG_DRIVER_VERSION)\" +endif + +obj-$(CONFIG_DINGHAI_DH_CMD) += dinghai10e_cmd.o +dinghai10e_cmd-y := dh_cmd.o cmd/msg_main.o cmd/msg_chan_lock.o cmd/msg_chan_test.o log.o + +obj-$(CONFIG_DINGHAI_PF) += dinghai10e.o +dinghai10e-y := events.o en_pf.o eq.o pci_irq.o en_pf/en_pf_irq.o en_pf/en_pf_eq.o devlink.o en_pf/en_pf_devlink.o en_pf/en_pf_events.o \ + en_pf/msg_func.o dh_procfs.o lag/lag.o lag/lag_procfs.o plcr.o sriov_sysfs.o xarray.o health.o log.o slib.o + +dinghai10e-$(CONFIG_ZXDH_SF) += irq_affinity.o en_sf.o en_sf/en_sf_eq.o en_sf/en_sf_irq.o en_sf/en_sf_devlink.o + +dinghai10e-$(CONFIG_DINGHAI_EN_AUX) += en_aux.o eq.o pci_irq.o irq_affinity.o en_aux/queue.o en_aux/en_aux_cmd.o en_aux/en_aux_eq.o \ + en_aux/en_aux_events.o en_ethtool/ethtool.o en_aux/en_aux_ioctl.o \ + en_aux/dcbnl/en_dcbnl.o en_aux/dcbnl/en_dcbnl_api.o \ + zxdh_tools/zxdh_tools_ioctl.o zxdh_tools/zxdh_tools_netlink.o \ + en_aux/en_1588_pkt_proc.o en_aux/en_1588_pkt_proc_func.o xarray.o en_aux/selq.o log.o slib.o +dinghai10e-y += en_aux/drs_sec_dtb.o +dinghai10e-y += en_aux/priv_queue.o +dinghai10e-y += bonding/zxdh_lag.o bonding/rdma_ops.o + +obj-$(CONFIG_DINGHAI_AUXILIARY) += dinghai10e_auxiliary.o +dinghai10e_auxiliary-y += en_auxiliary.o + +obj-$(CONFIG_DINGHAI_PTP) += dinghai10e_ptp.o +dinghai10e_ptp-y :=en_ptp/tod_driver.o en_ptp/tod_driver_stub.o en_ptp/zxdh_ptp.o log.o + +obj-$(CONFIG_DINGHAI_TSN_M) += dinghai10e_tsn.o +dinghai10e_tsn-y :=en_tsn/zxdh_tsn.o en_tsn/zxdh_tsn_reg.o en_tsn/zxdh_tsn_ioctl.o log.o + +include $(src)/en_np/Makefile + diff --git a/drivers/net/ethernet/dinghai/bonding/rdma_ops.c b/drivers/net/ethernet/dinghai/bonding/rdma_ops.c new file mode 100644 index 000000000000..6b7423dd02a4 --- /dev/null +++ b/drivers/net/ethernet/dinghai/bonding/rdma_ops.c @@ -0,0 +1,58 @@ +#include "rdma_ops.h" + +static struct zxdh_rdma_hb_if *zxdh_rdma_hb_ops; + +void zxdh_hwbond_register_rdma_ops(struct zxdh_rdma_hb_if *ops) +{ + if (zxdh_rdma_hb_ops == NULL) { + zxdh_rdma_hb_ops = ops; + } + LOG_INFO("enter\n"); + zxdh_update_rdma_hwbond_master(); +} +EXPORT_SYMBOL(zxdh_hwbond_register_rdma_ops); + +void zxdh_hwbond_unregister_rdma_ops(void) +{ + zxdh_rdma_hb_ops = NULL; +} +EXPORT_SYMBOL(zxdh_hwbond_unregister_rdma_ops); + +int32_t zxdh_set_rdma_hwbond_master(struct net_device *primary_netdev, + struct net_device *linux_bond_netdev, + bool hb_enable) +{ + if (zxdh_rdma_hb_ops == NULL) { + LOG_DEBUG("zxdh_rdma_hb_ops unregister\n"); + return -1; + } + if (zxdh_rdma_hb_ops->cfg_rdma_hb_master == NULL) { + LOG_ERR("cfg_rdma_hb_master unregister\n"); + return -1; + } + if (primary_netdev == NULL || linux_bond_netdev == NULL) { + LOG_ERR("primary_netdev or linux_bond_netdev is null\n"); + return -1; + } + LOG_INFO("primary_netdev %s linux_bond_netdev %s hb_enable %d\n", + primary_netdev->name, linux_bond_netdev->name, hb_enable); + return zxdh_rdma_hb_ops->cfg_rdma_hb_master( + primary_netdev, linux_bond_netdev, hb_enable); +} + +int32_t zxdh_set_rdma_hwbond_speed(struct net_device *netdev, uint32_t bps) +{ + if (zxdh_rdma_hb_ops == NULL) { + LOG_DEBUG("zxdh_rdma_hb_ops unregister\n"); + return -1; + } + if (zxdh_rdma_hb_ops->cfg_rdma_hb_speed == NULL) { + LOG_ERR("cfg_rdma_hb_speed unregister\n"); + return -1; + } + if (bps == 0 || bps == SPEED_UNKNOWN) { + LOG_ERR("speed invalid\n"); + return -1; + } + return zxdh_rdma_hb_ops->cfg_rdma_hb_speed(netdev, bps); +} \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/bonding/rdma_ops.h b/drivers/net/ethernet/dinghai/bonding/rdma_ops.h new file mode 100644 index 000000000000..b2763e820cae --- /dev/null +++ b/drivers/net/ethernet/dinghai/bonding/rdma_ops.h @@ -0,0 +1,21 @@ +#ifndef ZXDH_RDMA_OPS_H +#define ZXDH_RDMA_OPS_H + +#include +#include + +struct zxdh_rdma_hb_if { + int32_t (*cfg_rdma_hb_master)(struct net_device *primary_netdev, + struct net_device *linux_bond_netdev, + bool hb_enable); + int32_t (*cfg_rdma_hb_speed)(struct net_device *netdev, uint32_t bps); +}; + +int32_t zxdh_set_rdma_hwbond_master(struct net_device *primary_netdev, + struct net_device *linux_bond_netdev, + bool hb_enable); +int32_t zxdh_set_rdma_hwbond_speed(struct net_device *netdev, uint32_t bps); + +extern void zxdh_update_rdma_hwbond_master(void); + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/bonding/zxdh_lag.c b/drivers/net/ethernet/dinghai/bonding/zxdh_lag.c new file mode 100644 index 000000000000..82a52f7a4bef --- /dev/null +++ b/drivers/net/ethernet/dinghai/bonding/zxdh_lag.c @@ -0,0 +1,1989 @@ +#include +#include +#include +#include +#include +#include "zxdh_lag.h" +#include "rdma_ops.h" +#include +#include + +/* 定义全局变量,记录bond组信息 */ +static LIST_HEAD(zxdh_bond_list); +static LIST_HEAD(zxdh_aux_netdev_list); +static DEFINE_IDA(zxdh_bond_group_ids); +static struct mutex mlock; +extern const struct net_device_ops zxdh_netdev_ops; +#define RDMA_PHY_PORT_0_bit 17 +#define RDMA_PHY_PORT_1_bit 16 + +static int32_t zxdh_changeupper_event_handler(struct zxdh_bond_device *bond_dev, + struct event_node *node); + +void init_bond_dev_hooks(void); +void destroy_bond_dev_hooks(void); + +void zxdh_lag_lock_init(void) +{ + mutex_init(&mlock); + init_bond_dev_hooks(); +} + +void zxdh_lag_lock_deinit(void) +{ + destroy_bond_dev_hooks(); + mutex_destroy(&mlock); +} + +static bool netif_is_zxdh_aux(struct net_device *dev) +{ + return dev && (dev->netdev_ops == &zxdh_netdev_ops); +} + +static uint16_t zxdh_convert_pcie_id_2_vfid(uint16_t pcie_id) +{ + u16 pf_id = 0; + u16 ep_id = 0; + + pf_id = (pcie_id >> 8) & 0x7; + ep_id = (pcie_id >> 12) & 0x7; + + return ZXDH_PF_VFID(ep_id, pf_id); +} + +static uint16_t zxdh_covert_netdev_2_vfid(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + return zxdh_convert_pcie_id_2_vfid(en_dev->pcie_id); +} + +static uint32_t zxdh_covert_hash_type(uint32_t hash_type) +{ + u32 np_hash_type = 0; + + switch (hash_type) { + case NETDEV_LAG_HASH_L2: { + np_hash_type = ZXDH_NETDEV_LAG_HASH_L2; + break; + } + case NETDEV_LAG_HASH_L23: { + np_hash_type = ZXDH_NETDEV_LAG_HASH_L23; + break; + } + case NETDEV_LAG_HASH_L34: { + np_hash_type = ZXDH_NETDEV_LAG_HASH_L34; + break; + } + default: { + np_hash_type = ZXDH_NETDEV_LAG_HASH_NONE; + break; + } + } + + return np_hash_type; +} + +static uint32_t zxdh_covert_bond_tx_type(uint32_t tx_type) +{ + u32 np_tx_type = 0; + + switch (tx_type) { + case NETDEV_LAG_TX_TYPE_ACTIVEBACKUP: { + np_tx_type = ZXDH_NETDEV_LAG_TX_TYPE_ACTIVEBACKUP; + break; + } + case NETDEV_LAG_TX_TYPE_HASH: { + np_tx_type = ZXDH_NETDEV_LAG_TX_TYPE_HASH; + break; + } + default: { + np_tx_type = ZXDH_NETDEV_LAG_TX_TYPE_UNKNOWN; + break; + } + } + + return np_tx_type; +} + +static int32_t +zxdh_hardware_bond_link(struct zxdh_bond_device *bond_dev, + struct netdev_lag_upper_info *lag_upper_info) +{ + struct zxdh_bond_group *group = bond_dev->group; + + if (!group) { + LOG_ERR("zxdh_hardware_bond_link fail\n"); + return -1; + } + + group->lag_tx_type = (uint8_t)zxdh_covert_bond_tx_type( + (uint32_t)lag_upper_info->tx_type); + group->hash_policy = (uint8_t)zxdh_covert_hash_type( + (uint32_t)lag_upper_info->hash_type); + + return 0; +} + +static struct zxdh_bond_group * +zxdh_find_hardware_bond_group(struct net_device *upper) +{ + struct zxdh_bond_group *tmp_group, *bond_group = NULL; + + list_for_each_entry(tmp_group, &zxdh_bond_list, node) { + if (strcmp(dev_name(&upper->dev), + zxdh_bond_group_name(tmp_group)) == 0) { + bond_group = tmp_group; + break; + } + } + + return bond_group; +} + +static void *zxdh_create_hardware_bond_group(struct net_device *upper, + bool is_special_bond) +{ + struct zxdh_bond_group *bond_group = NULL; + + if (!upper) { + return ERR_PTR(-EINVAL); + } + + bond_group = zxdh_find_hardware_bond_group(upper); + if (bond_group) { + goto out; + } + + bond_group = kzalloc(sizeof(*bond_group), GFP_KERNEL); + if (!bond_group) { + return ERR_PTR(-ENOMEM); + } + + /* create bond group id, range [0, 7] */ + if (is_special_bond) { + bond_group->group_ida = ZXDH_SPECIAL_LGA_ID; + } else { + bond_group->group_ida = + ida_alloc_range(&zxdh_bond_group_ids, 1, 7, GFP_KERNEL); + } + if (bond_group->group_ida < 0) { + goto err; + } + + strlcpy(bond_group->name, dev_name(&upper->dev), IFNAMSIZ); + list_add_tail(&bond_group->node, &zxdh_bond_list); + +out: + return bond_group; +err: + kfree(bond_group); + return ERR_PTR(-EINVAL); +} + +static int32_t zxdh_hardware_bond_group_init(struct zxdh_bond_device *bond_dev, + struct net_device *upper) +{ + struct zxdh_bond_group *bond_group = NULL; + + bond_group = zxdh_create_hardware_bond_group( + upper, bond_dev->is_special_bond_dev); + if (IS_ERR(bond_group)) { + return -1; + } + + bond_dev->group = bond_group; + bond_dev->upper_netdev = upper; + + return 0; +} + +static bool +zxdh_is_lower_state_change(struct zxdh_bond_device *bond_dev, + struct netdev_lag_lower_state_info *lag_lower_info) +{ + bool flag = true; + + if (bond_dev->link_up == lag_lower_info->link_up && + bond_dev->tx_enabled == lag_lower_info->tx_enabled) { + flag = false; + } + + return flag; +} + +static uint32_t +zxdh_hardware_bond_set_mac_to_primary(struct zxdh_bond_device *bond_dev, + struct net_device *temp_netdev, + struct net_device *primary_netdev) +{ + struct zxdh_en_priv *primary_en_priv, *temp_en_priv; + struct zxdh_en_device *primary_en_dev, *temp_en_dev; + DPP_PF_INFO_T dpp_pf_info; + s32 ret = 0; + u16 sriov_vlan_tpid = 0; + u16 sriov_vlan_id = 0; + u16 current_vport = 0; + struct netdev_hw_addr *ha = NULL; + bool delete_flag = true; + bool add_flag = true; + + if (!netif_is_zxdh_aux(temp_netdev) && + !netif_is_zxdh_aux(primary_netdev)) { + LOG_INFO("that is not zxdh aux netdev\n"); + return -1; + } + + if (!bond_dev->group) { + LOG_INFO("bond_dev->group is NULL\n"); + return -1; + } + + if (bond_dev->group->lag_tx_type != + ZXDH_NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) { + LOG_DEBUG( + "bond_dev->group->lag_tx_type is not ZXDH_NETDEV_LAG_TX_TYPE_ACTIVEBACKUP\n"); + return 0; + } + + temp_en_priv = netdev_priv(temp_netdev); + temp_en_dev = &temp_en_priv->edev; + if (temp_en_dev->hardware_bond->primary) { //primary口无需给自己更新mac + LOG_DEBUG("primary pf %s don't need update mac for self\n", + temp_netdev->name); + return 0; + } + + if ((!bond_dev->primary) && + (bond_dev->netdev != temp_netdev)) { //非primary口不能处理其余口的mac + LOG_DEBUG("no-primary pf %s can't update mac for other pf %s\n", + bond_dev->netdev->name, temp_netdev->name); + return 0; + } + + primary_en_priv = netdev_priv(primary_netdev); + primary_en_dev = &primary_en_priv->edev; + + dpp_pf_info.slot = primary_en_dev->slot_id; + dpp_pf_info.vport = primary_en_dev->vport; + + if (!memcmp(primary_netdev->dev_addr, temp_netdev->dev_addr, + temp_netdev->addr_len)) { + LOG_INFO( + "primary pf %s netdev mac %pM is same with temp pf %s netdev mac %pM, can't add\n", + primary_netdev->name, primary_netdev->dev_addr, + temp_netdev->name, temp_netdev->dev_addr); + goto err; + } + if (!memcmp(primary_netdev->dev_addr, + temp_en_dev->hardware_bond->last_mac_addr.sa_data, + temp_netdev->addr_len)) { + LOG_INFO( + "primary pf %s netdev mac %pM is same with temp pf %s last add mac %pM, can't del\n", + primary_netdev->name, primary_netdev->dev_addr, + temp_netdev->name, + temp_en_dev->hardware_bond->last_mac_addr.sa_data); + goto err; + } + + list_for_each_entry(ha, &primary_netdev->uc.list, + list) { //遍历primary PF通过bridge fdb配置的MAC + if (!memcmp(ha->addr, + temp_en_dev->hardware_bond->last_mac_addr.sa_data, + temp_netdev->addr_len)) { + delete_flag = false; + LOG_INFO( + "%pM is used by uc.list of primary pf %s, can't del\n", + temp_en_dev->hardware_bond->last_mac_addr + .sa_data, + primary_netdev->name); + goto err; + } + if (!memcmp(ha->addr, temp_netdev->dev_addr, + temp_netdev->addr_len)) { + add_flag = false; + LOG_INFO( + "%pM is used by uc.list of primary pf %s, can't add\n", + temp_netdev->dev_addr, primary_netdev->name); + goto err; + } + } + if (delete_flag) { + ret = dpp_unicast_mac_search( + &dpp_pf_info, + temp_en_dev->hardware_bond->last_mac_addr.sa_data, + sriov_vlan_tpid, sriov_vlan_id, + ¤t_vport); //在primary PF的转发域内搜索 + if ((ret == 0) && + (dpp_pf_info.vport == + current_vport)) { //说明当前mac属于primary PF,需要删除 + ret = dpp_del_mac(&dpp_pf_info, + temp_en_dev->hardware_bond + ->last_mac_addr.sa_data, + sriov_vlan_tpid, sriov_vlan_id); + if (ret != 0) { + LOG_ERR("pf del mac failed, retval: %d\n", ret); + goto err; + } + } else if ((ret == 0) && + (dpp_pf_info.vport != + current_vport)) { //如果找到mac, 属于primary PF的vf, 则报错返回 + LOG_INFO( + "%pM is used by primary pf(%s)-vf 0x%x, can't del\n", + temp_en_dev->hardware_bond->last_mac_addr + .sa_data, + primary_netdev->name, current_vport); + goto err; + } else if (ret == + DPP_HASH_RC_SRH_FAIL) { //如果没找到mac,则无需删除 + LOG_DEBUG( + "don't find %pM in primary pf(%s), don't need del\n", + temp_netdev->dev_addr, primary_netdev->name); + } else { + LOG_ERR("dpp_unicast_mac_search err ,ret%d\n", ret); + goto err; + } + } + + if (add_flag) { + ret = dpp_unicast_mac_search( + &dpp_pf_info, temp_netdev->dev_addr, sriov_vlan_tpid, + sriov_vlan_id, + ¤t_vport); //在primary PF的转发域内搜索 + if ((ret == 0) && + (dpp_pf_info.vport == + current_vport)) { //说明当前mac属于primary PF,不需要添加 + LOG_DEBUG( + "%pM is used by primary pf(%s), don't need add\n", + temp_netdev->dev_addr, primary_netdev->name); + } + + if ((ret == 0) && + (dpp_pf_info.vport != + current_vport)) { //如果找到mac, 属于primary PF的vf, 则报错返回 + LOG_ERR("%pM is used by primary pf(%s) vport %d, can't add\n", + temp_netdev->dev_addr, primary_netdev->name, + current_vport); + goto err; + } else if (ret == DPP_HASH_RC_SRH_FAIL) { //没找到mac。则需要添加 + ret = dpp_add_mac(&dpp_pf_info, temp_netdev->dev_addr, + sriov_vlan_tpid, sriov_vlan_id); + if (ret != 0) { + LOG_ERR("pf add mac failed, retval: %d\n", ret); + goto err; + } + } else { + LOG_ERR("dpp_unicast_mac_search err ,ret%d\n", ret); + goto err; + } + } + ether_addr_copy(temp_en_dev->hardware_bond->last_mac_addr.sa_data, + temp_netdev->dev_addr); + ether_addr_copy( + primary_en_dev->hardware_bond->last_mac_addr.sa_data, + temp_netdev + ->dev_addr); //将no-primary PF最后一次下表到primary PF的MAC保存到last_mac_addr中 + return 0; +err: + return -1; +} + +static uint32_t +zxdh_hardware_bond_set_primary_vfid(struct zxdh_bond_device *bond_dev, + struct net_device *temp_netdev, + uint16_t primary_vfid) +{ + struct zxdh_en_priv *en_priv; + struct zxdh_en_device *en_dev; + DPP_PF_INFO_T dpp_pf_info; + + if (!netif_is_zxdh_aux(temp_netdev)) { + LOG_ERR("that is not zxdh aux netdev\n"); + return -1; + } + + if ((!bond_dev->primary) && + (bond_dev->netdev != + temp_netdev)) { //非primary口不能给别的口下表primary_vfid + LOG_DEBUG( + "no-primary pf %s can't update primary_vfid %d to other pf %s\n", + bond_dev->netdev->name, primary_vfid, + temp_netdev->name); + return 0; + } + + en_priv = netdev_priv(temp_netdev); + en_dev = &en_priv->edev; + + dpp_pf_info.slot = en_dev->slot_id; + dpp_pf_info.vport = en_dev->vport; + + // 配置 primary口 的 vfid 到自身NP中 + dpp_uplink_phy_attr_set(&dpp_pf_info, en_dev->phy_port, + UPLINK_PHY_PORT_PRIMARY_PF_VQM_VFID, + primary_vfid); + LOG_INFO("%s set primary pf vqm vfid %hu, phyport %hu\n", + temp_netdev->name, primary_vfid, en_dev->phy_port); + + return 0; +} + +static int32_t +zxdh_hardware_bond_get_primary_netdev(struct zxdh_bond_device *bond_dev, + struct net_device **primary_netdev) +{ + struct net_device *ndev_tmp; + struct zxdh_en_priv *en_priv; + struct zxdh_en_device *en_dev; + u32 primary_port_cnt = 0; + + if (!bond_dev->upper_netdev) { + return -1; + } + + rcu_read_lock(); + for_each_netdev_in_bond_rcu(bond_dev->upper_netdev, ndev_tmp) { + if (!netif_is_zxdh_aux(ndev_tmp)) { + continue; + } + + en_priv = netdev_priv(ndev_tmp); + en_dev = &en_priv->edev; + + if (en_dev->is_primary_port) { + primary_port_cnt++; + *primary_netdev = ndev_tmp; + } + } + rcu_read_unlock(); + + LOG_DEBUG("%s primary port num %u\n", bond_dev->netdev->name, + primary_port_cnt); + + /* if no primary port */ + if ((primary_port_cnt == 0) || (*primary_netdev) || + (primary_port_cnt > 1)) { + return -1; + } + return 0; +} + +static int32_t +zxdh_hardware_bond_prepare_for_vf(struct zxdh_bond_device *bond_dev) +{ + struct net_device *ndev_tmp, *primary_netdev = NULL; + u16 vfid = 0; + s32 ret = 0; + + ret = zxdh_hardware_bond_get_primary_netdev(bond_dev, &primary_netdev); + if (ret != 0) { + LOG_INFO("%s get primary netdev failed\n", + bond_dev->netdev->name); + return -1; + } + + vfid = zxdh_covert_netdev_2_vfid(primary_netdev); + + rcu_read_lock(); + for_each_netdev_in_bond_rcu(bond_dev->upper_netdev, ndev_tmp) { + zxdh_hardware_bond_set_primary_vfid(bond_dev, ndev_tmp, vfid); + zxdh_hardware_bond_set_mac_to_primary(bond_dev, ndev_tmp, + primary_netdev); + } + rcu_read_unlock(); + + return 0; +} + +static int32_t zxdh_bond_set_dpp_member_port(struct zxdh_bond_device *bond_dev, + bool enable, + struct event_node *node) +{ + u32 lagid = 0; + u8 phy_port = 0; + DPP_PF_INFO_T pf_info = { 0 }; + u32 actual_enable_bit = bond_dev->phy_port == 0 ? + RDMA_PHY_PORT_0_bit : + RDMA_PHY_PORT_1_bit; + struct zxdh_en_device *en_dev = NULL; + struct zxdh_en_priv *en_priv = NULL; + + en_priv = netdev_priv(bond_dev->netdev); + en_dev = &en_priv->edev; + if (!bond_dev->group) { + return -1; + } + + lagid = bond_dev->group->group_ida; + + phy_port = bond_dev->phy_port; + pf_info.slot = bond_dev->slot; + pf_info.vport = bond_dev->vport; + + if (en_dev->device_state != ZXDH_DEVICE_STATE_INTERNAL_ERROR) { + if (enable) { + dpp_lag_group_member_add(&pf_info, lagid, phy_port); + } else { + dpp_lag_group_member_del(&pf_info, lagid, phy_port); + } + + /* set panel attribute: BOND_LINK_UP */ + dpp_uplink_phy_attr_set(&pf_info, phy_port, + UPLINK_PHY_PORT_BOND_LINK_UP, !!enable); + } + + dpp_pktrx_mcode_glb_cfg_write(&pf_info, actual_enable_bit, + actual_enable_bit, !!enable); + + LOG_INFO( + "%s node %d set members: lagid %hhu, phyport %hhu bond_link_up %s\n", + netdev_name(bond_dev->netdev), node->idx, lagid, phy_port, + enable ? "true" : "false"); + + return 0; +} + +static void zxdh_print_hardware_bond_info(struct zxdh_bond_device *bond_dev, + struct event_node *node) +{ + struct zxdh_bond_group *group = bond_dev->group; + + LOG_INFO( + "%s node %d event_type %ld bonded: %s txq %hu rxq %hu slot %hu vport 0x%x vfid %hu phyport %hhu linkup %hhu txenable %hhu\n", + bond_dev->netdev->name, node->idx, node->event, + bond_dev->bonded ? "true" : "false", bond_dev->txq, + bond_dev->rxq, bond_dev->slot, bond_dev->vport, bond_dev->vfid, + bond_dev->phy_port, node->link_up, node->tx_enabled); + + if (group) { + LOG_INFO( + "%s node %d event_type %ld master %s: group id %d tx_type %hhu hash_policy %hhu configured %s num_slaves %hu\n", + bond_dev->netdev->name, node->idx, node->event, + group->name, group->group_ida, group->lag_tx_type, + group->hash_policy, + group->configured ? "true" : "false", + group->num_slaves); + } +} + +void del_slave_mac(struct zxdh_bond_device *bond_dev) +{ + DPP_PF_INFO_T dpp_pf_info; + u16 sriov_vlan_tpid = 0; + u16 sriov_vlan_id = 0; + s32 ret = 0; + + dpp_pf_info.slot = bond_dev->slot; + dpp_pf_info.vport = bond_dev->vport; + + if (!is_valid_ether_addr(bond_dev->last_mac_addr.sa_data)) { + return; + } + + ret = dpp_del_mac(&dpp_pf_info, bond_dev->last_mac_addr.sa_data, + sriov_vlan_tpid, sriov_vlan_id); + if (ret != 0) { + LOG_ERR("pf del mac failed, retval: %d\n", ret); + } else { + LOG_INFO("del MAC %pM\n", bond_dev->last_mac_addr.sa_data); + memset(bond_dev->last_mac_addr.sa_data, 0, ETH_ALEN); + } +} + +static int32_t +zxdh_update_hardware_bond_group(struct zxdh_bond_device *bond_dev, + struct event_node *node) +{ + s32 group_ida = 0; + s32 ret = 0; + struct zxdh_bond_group *group = bond_dev->group; + struct zxdh_en_device *en_dev = NULL; + struct zxdh_en_priv *en_priv = NULL; + DPP_PF_INFO_T dpp_pf_info = { + .slot = bond_dev->slot, + .vport = bond_dev->vport, + }; + en_priv = netdev_priv(bond_dev->netdev); + en_dev = &en_priv->edev; + LOG_INFO("%s node %d linking %d, event %ld\n", + netdev_name(bond_dev->netdev), node->idx, node->linking, + node->event); + if (!node->linking && + node->event == NETDEV_CHANGEUPPER) { //如果此设备是解除绑定 + bond_dev->group = NULL; + bond_dev->bonded = false; + if (en_dev->device_state != ZXDH_DEVICE_STATE_INTERNAL_ERROR) { + /* vport attr: LAG ID, LAG DISABLE */ + ret = dpp_vport_attr_set(&dpp_pf_info, + SRIOV_VPORT_LAG_ID, 0); + if (ret != 0) { + LOG_ERR("dpp_vport_attr_set SRIOV_VPORT_LAG_ID 0 failed\n"); + return ret; + } + ret = dpp_vport_attr_set(&dpp_pf_info, + SRIOV_VPORT_LAG_EN_OFF, 0); + if (ret != 0) { + LOG_ERR("dpp_vport_attr_set SRIOV_VPORT_LAG_EN_OFF 0 failed\n"); + return ret; + } + /* lag bond attr: disable member */ + dpp_uplink_phy_attr_set( + &dpp_pf_info, bond_dev->phy_port, + UPLINK_PHY_PORT_SRIOV_HD_BOND_EN, + 0); //关闭标卡硬bond标识 + dpp_uplink_phy_attr_set( + &dpp_pf_info, bond_dev->phy_port, + UPLINK_PHY_PORT_PRIMARY_PF_VQM_VFID, 0); + /* panel attr: HARDWARE_BOND_ENABLE */ + dpp_uplink_phy_hardware_bond_set(&dpp_pf_info, + bond_dev->phy_port, 0); + if (bond_dev->primary) + del_slave_mac(bond_dev); + } + zxdh_bond_set_dpp_member_port(bond_dev, false, node); + if (bond_dev->primary) { + zxdh_set_rdma_hwbond_master(bond_dev->netdev, + bond_dev->upper_netdev, + false); + zxdh_set_rdma_hwbond_speed(bond_dev->netdev, + en_dev->speed); + } + bond_dev->upper_netdev = NULL; + LOG_INFO("%s hardware bond set slave's group null\n", + netdev_name(bond_dev->netdev)); + ret = -1; + } + /* if no slaves, we need to free bond group */ + if (node->group_slave_num == 0 && node->event == NETDEV_CHANGEUPPER && + !node->linking) { + /* free bond group */ + list_del(&group->node); + ida_free(&zxdh_bond_group_ids, group->group_ida); + kfree(group); + group = NULL; + LOG_INFO("%s hardware bond group disabled\n", + netdev_name(bond_dev->netdev)); + ret = -1; + } + if (ret == -1) { + return ret; + } + + /* if slaves, we check that configured */ + if (!group->configured) { + group_ida = group->group_ida; + dpp_lag_group_create(&dpp_pf_info, group_ida); + dpp_lag_mode_set(&dpp_pf_info, group_ida, group->lag_tx_type); + dpp_lag_group_hash_factor_set(&dpp_pf_info, group_ida, + group->hash_policy); + group->configured = true; + } + + return 0; +} + +static int32_t +zxdh_update_hardware_bond_slave(struct zxdh_bond_device *bond_dev) +{ + s32 ret = 0; + struct zxdh_bond_group *group = bond_dev->group; + DPP_PF_INFO_T dpp_pf_info = { + .slot = bond_dev->slot, + .vport = bond_dev->vport, + }; + struct zxdh_en_device *en_dev = NULL; + struct zxdh_en_priv *en_priv = NULL; + + en_priv = netdev_priv(bond_dev->netdev); + en_dev = &en_priv->edev; + + if (bond_dev->bonded) { + return 0; + } + + if (en_dev->device_state == ZXDH_DEVICE_STATE_INTERNAL_ERROR) { + return 0; + } + + /* vport attr: LAG ID,LAG ENABLE */ + ret = dpp_vport_attr_set(&dpp_pf_info, SRIOV_VPORT_LAG_EN_OFF, 1); + if (ret != 0) { + LOG_ERR("dpp_vport_attr_set SRIOV_VPORT_LAG_EN_OFF 1 failed\n"); + return ret; + } + ret = dpp_vport_attr_set(&dpp_pf_info, SRIOV_VPORT_LAG_ID, + group->group_ida); + if (ret != 0) { + LOG_ERR("dpp_vport_attr_set SRIOV_VPORT_LAG_ID %d failed\n", + group->group_ida); + return ret; + } + dpp_uplink_phy_attr_set(&dpp_pf_info, bond_dev->phy_port, + UPLINK_PHY_PORT_SRIOV_HD_BOND_EN, + 1); //使能标卡硬bond标识 + dpp_uplink_phy_hardware_bond_set(&dpp_pf_info, bond_dev->phy_port, 1); + + if (bond_dev->primary) { + zxdh_set_rdma_hwbond_master(bond_dev->netdev, + bond_dev->upper_netdev, true); + } + + zxdh_hardware_bond_prepare_for_vf(bond_dev); + + bond_dev->bonded = true; + LOG_INFO("bond slave %s enabled\n", netdev_name(bond_dev->netdev)); + + return 0; +} + +static int32_t zxdh_bond_cofig_rdma(struct zxdh_bond_device *bond_dev) +{ + struct net_device *ndev_tmp, *primary_netdev = NULL; + struct zxdh_en_priv *en_priv; + struct zxdh_en_device *en_dev; + u32 primary_port_cnt = 0; + struct ethtool_link_ksettings ks = { 0 }; + + if (!bond_dev->upper_netdev) { + return 0; + } + // 速率计算,通过bond组master,由内核bonding模块计算,不再关心bond模式,协商状态; + // 此处,获取数值,和ethtool bond2 看到的一致 + rtnl_lock(); + if (bond_dev->upper_netdev->ethtool_ops) { + // 获取bond口速率,非自定义设备 + bond_dev->upper_netdev->ethtool_ops->get_link_ksettings( + bond_dev->upper_netdev, &ks); + } + rtnl_unlock(); + + rcu_read_lock(); + // 寻找 primary port,用于给RDMA做配置,; + for_each_netdev_in_bond_rcu(bond_dev->upper_netdev, ndev_tmp) { + if (!netif_is_zxdh_aux(ndev_tmp)) { + continue; + } + en_priv = netdev_priv(ndev_tmp); + en_dev = &en_priv->edev; + if (en_dev->is_primary_port) { + primary_port_cnt++; + primary_netdev = ndev_tmp; + } + } + rcu_read_unlock(); + + /* if no primary port */ + if ((primary_port_cnt == 0) || (!primary_netdev)) { + return 0; + } else if (primary_port_cnt > 1) { + LOG_ERR("no primary port\n"); + return -1; + } + + zxdh_set_rdma_hwbond_speed(bond_dev->upper_netdev, ks.base.speed); + + return 0; +} + +/* bond_state*/ +struct bond_port_info { + u8 slave1_state; + u8 slave1_port; + u8 slave2_state; + u8 slave2_port; + u32 slave1_fid; + u32 slave2_fid; +}; + +typedef int (*bond_dev_create_notify_hook_t)(char *ifname, + struct bond_port_info *bond_info, + uint8_t mode); +typedef int (*bond_dev_update_notify_hook_t)(char *ifname, + struct bond_port_info *bond_info); + +/* 全局变量定义 */ +struct bond_dev_hooks { + bond_dev_create_notify_hook_t bond_dev_create_hook; + bond_dev_update_notify_hook_t bond_dev_update_hook; + struct mutex create_hook_lock; + struct mutex update_hook_lock; +}; + +/* 初始化全局变量 */ +static struct bond_dev_hooks bond_hooks = { + .bond_dev_create_hook = NULL, + .bond_dev_update_hook = NULL, + .create_hook_lock = __MUTEX_INITIALIZER(bond_hooks.create_hook_lock), + .update_hook_lock = __MUTEX_INITIALIZER(bond_hooks.update_hook_lock), +}; + +/* 初始化函数 */ +void init_bond_dev_hooks(void) +{ + mutex_init(&bond_hooks.create_hook_lock); + mutex_init(&bond_hooks.update_hook_lock); + bond_hooks.bond_dev_create_hook = NULL; + bond_hooks.bond_dev_update_hook = NULL; + LOG_DEBUG("Bond hooks initialized successfully.\n"); +} + +/* 销毁 bond_dev_hooks 的函数 */ +void destroy_bond_dev_hooks(void) +{ + mutex_lock(&bond_hooks.create_hook_lock); + if (bond_hooks.bond_dev_create_hook) { + bond_hooks.bond_dev_create_hook = NULL; + LOG_DEBUG("bond_dev_create_hook destroyed successfully.\n"); + } + mutex_unlock(&bond_hooks.create_hook_lock); + + mutex_lock(&bond_hooks.update_hook_lock); + if (bond_hooks.bond_dev_update_hook) { + bond_hooks.bond_dev_update_hook = NULL; + LOG_DEBUG("bond_dev_update_hook destroyed successfully.\n"); + } + mutex_unlock(&bond_hooks.update_hook_lock); +} + +/* 注册 bond_dev_create_hook 的函数 */ +int zxdh_register_bond_dev_create_hook(bond_dev_create_notify_hook_t hook) +{ + int ret = 0; + + /* 加锁保护 */ + mutex_lock(&bond_hooks.create_hook_lock); + if (bond_hooks.bond_dev_create_hook && hook) { + LOG_DEBUG(KERN_ERR + "Repeat register bond_dev_create_notify_hook_t.\n"); + ret = -1; + } else { + bond_hooks.bond_dev_create_hook = hook; + } + mutex_unlock(&bond_hooks.create_hook_lock); + + return ret; +} +EXPORT_SYMBOL(zxdh_register_bond_dev_create_hook); + +/* 注册 bond_dev_update_hook 的函数 */ +int zxdh_register_bond_dev_update_hook(bond_dev_update_notify_hook_t hook) +{ + int ret = 0; + + /* 加锁保护 */ + mutex_lock(&bond_hooks.update_hook_lock); + if (bond_hooks.bond_dev_update_hook && hook) { + LOG_DEBUG(KERN_ERR + "Repeat register bond_dev_update_notify_hook_t.\n"); + ret = -1; + } else { + bond_hooks.bond_dev_update_hook = hook; + } + mutex_unlock(&bond_hooks.update_hook_lock); + + return ret; +} +EXPORT_SYMBOL(zxdh_register_bond_dev_update_hook); + +/* 调用 bond_dev_create_hook 的函数*/ +void zxdh_bond_dev_create_hook_call(char *ifname, + struct bond_port_info *bond_info, + uint8_t mode) +{ + int ret = 0; + + mutex_lock(&bond_hooks.create_hook_lock); + + if (bond_hooks.bond_dev_create_hook) { + /* 调用钩子函数 */ + ret = bond_hooks.bond_dev_create_hook(ifname, bond_info, mode); + if (ret != 0) { + LOG_DEBUG("Error in bond dev create hook: %d\n", ret); + } else { + LOG_DEBUG( + "bond dev create hook called successfully for %s.\n", + ifname); + } + } else { + LOG_DEBUG("bond dev create hook is not registered.\n"); + } + mutex_unlock(&bond_hooks.create_hook_lock); +} + +/* 调用 bond_dev_update_hook 的函数 */ +void zxdh_bond_dev_update_hook_call(char *ifname, + struct bond_port_info *bond_info) +{ + int ret = 0; + + mutex_lock(&bond_hooks.update_hook_lock); + + if (bond_hooks.bond_dev_update_hook) { + ret = bond_hooks.bond_dev_update_hook(ifname, bond_info); + if (ret != 0) { + LOG_DEBUG("Error in bond dev update hook: %d\n", ret); + } else { + LOG_DEBUG( + "bond dev update hook called successfully for %s.\n", + ifname); + } + } else { + LOG_DEBUG("bond dev update hook is not registered.\n"); + } + mutex_unlock(&bond_hooks.update_hook_lock); +} + +static bool +zxdh_bond_dev_is_support_dualtor(struct zxdh_bond_device *hw_bond_dev) +{ + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + u64 dual_tor_addr = 0; + + if (!hw_bond_dev) { + return FALSE; + } + en_priv = netdev_priv(hw_bond_dev->netdev); + en_dev = &en_priv->edev; + dual_tor_addr = en_dev->ops->get_bar_virt_addr(en_dev->parent, 0) + + ZXDH_DUALTOR_LABEL_OFFSET; + if (*(uint32_t *)dual_tor_addr != ZXDH_BAR_DUALTOR_LABEL_ON) { + LOG_DEBUG("nic did not support dual tor!.\n"); + return FALSE; + } + return TRUE; +} + +/* 获取面板口对应的面板口以及状态信息*/ +static int32_t zxdh_get_hw_bond_panel_state(struct zxdh_bond_device *bond_dev) +{ + struct net_device *ndev_tmp = NULL; + struct zxdh_en_priv *en_priv; + struct zxdh_en_device *en_dev; + struct bond_port_info bond_info = { 0 }; + + if (!zxdh_bond_dev_is_support_dualtor(bond_dev)) { + return 0; + } + + if (!bond_dev->upper_netdev) { + return 0; + } + + rcu_read_lock(); + for_each_netdev_in_bond_rcu(bond_dev->upper_netdev, ndev_tmp) { + if (!netif_is_zxdh_aux(ndev_tmp)) { + continue; + } + en_priv = netdev_priv(ndev_tmp); + en_dev = &en_priv->edev; + + if (en_dev->panel_id == 0) { + bond_info.slave1_state = + en_dev->hardware_bond->link_up ? 1 : 0; + bond_info.slave1_port = en_dev->phy_port; + LOG_DEBUG( + "slave1 %s states: %u, is_pri: %u, np_port: %u\n", + ndev_tmp->name, bond_info.slave1_state, + en_dev->is_primary_port, bond_info.slave1_port); + } else if (en_dev->panel_id == 1) { + bond_info.slave2_state = + en_dev->hardware_bond->link_up ? 1 : 0; + bond_info.slave2_port = en_dev->phy_port; + LOG_DEBUG( + "slave2 %s states: %u, is_pri: %u, np_port: %u\n", + ndev_tmp->name, bond_info.slave2_state, + en_dev->is_primary_port, bond_info.slave2_port); + } + } + rcu_read_unlock(); + zxdh_bond_dev_update_hook_call(bond_dev->upper_netdev->name, + &bond_info); + + return 0; +} + +int fid_gen_from_en_dev(struct zxdh_en_device *en_dev, uint32_t *fid_out) +{ + u32 fid = 0; + + if (!en_dev) { + LOG_ERR("err ptr, null ptr en_dev.\n"); + return -1; + } + + fid = ((en_dev->slot_id & 0x0000ffff) << 16); + fid |= en_dev->pcie_id; + + *fid_out = fid; + return 0; +} + +/* 创建操作*/ +static int32_t zxdh_create_hw_bond_panel(struct zxdh_bond_device *bond_dev, + struct event_node *node) +{ + struct net_device *ndev_tmp = NULL; + struct zxdh_en_priv *en_priv; + struct zxdh_en_device *en_dev; + struct net_device *primary_dev = NULL; + struct bond_port_info bond_info = { 0 }; + + if (!bond_dev->upper_netdev) { + LOG_DEBUG("do not exist uppder_dev.\n"); + return 0; + } + /* 成员口未完全加载成功*/ + if (bond_dev->group->num_slaves != 2) { + LOG_DEBUG("do not have 2 slaves yet.\n"); + return 0; + } + LOG_DEBUG("notify create hook.\n"); + + rcu_read_lock(); + /* 寻找 primary port,用于给RDMA做配置*/ + for_each_netdev_in_bond_rcu(bond_dev->upper_netdev, ndev_tmp) { + if (!netif_is_zxdh_aux(ndev_tmp)) { + continue; + } + en_priv = netdev_priv(ndev_tmp); + en_dev = &en_priv->edev; + if (en_dev->is_primary_port) { + primary_dev = ndev_tmp; + } + + if (en_dev->panel_id == 0) { + bond_info.slave1_state = node->link_up ? 1 : 0; + bond_info.slave1_port = en_dev->phy_port; + fid_gen_from_en_dev(en_dev, &bond_info.slave1_fid); + LOG_DEBUG( + "slave1 fid 0x%x states: %u, is_pri: %u, np_port: %u\n", + bond_info.slave1_fid, bond_info.slave1_state, + en_dev->is_primary_port, bond_info.slave1_port); + } else if (en_dev->panel_id == 1) { + bond_info.slave2_state = node->link_up ? 1 : 0; + bond_info.slave2_port = en_dev->phy_port; + fid_gen_from_en_dev(en_dev, &bond_info.slave2_fid); + LOG_DEBUG( + "slave2 fid 0x%x states: %u, is_pri: %u, np_port: %u\n", + bond_info.slave2_fid, bond_info.slave2_state, + en_dev->is_primary_port, bond_info.slave2_port); + } + } + rcu_read_unlock(); + + /*如果linking是up表示添加, 检测当前数量是否为2, 找到主口发送*/ + zxdh_bond_dev_create_hook_call(bond_dev->upper_netdev->name, &bond_info, + 1); + LOG_DEBUG( + "add primary dev:%s, slave1_state: %u, port1: %u, slave2_state: %u, port2: %u.\n", + primary_dev->name, bond_info.slave1_state, + bond_info.slave1_port, bond_info.slave2_state, + bond_info.slave2_port); + return 0; +} + +static int32_t zxdh_del_hw_bond_panel(struct zxdh_bond_device *bond_dev) +{ + struct bond_port_info bond_info = { 0 }; + + if (!bond_dev->upper_netdev) { + return 0; + } + + /* 没有完全删除*/ + if (bond_dev->group->num_slaves != 0) { + LOG_DEBUG("not delete all slaves.\n"); + return 0; + } + + zxdh_bond_dev_create_hook_call(bond_dev->upper_netdev->name, &bond_info, + 0); + LOG_DEBUG("del bond_dev : %s.\n", bond_dev->upper_netdev->name); + + return 0; +} + +static void zxdh_do_hardware_bond(struct zxdh_bond_device *bond_dev, + struct event_node *node) +{ + s32 ret = 0; + bool lagstat = false; + + zxdh_print_hardware_bond_info(bond_dev, node); + if (!bond_dev->group) { + goto out; + } + ret = zxdh_update_hardware_bond_group(bond_dev, node); + if (ret != 0) { + goto out; + } + ret = zxdh_update_hardware_bond_slave(bond_dev); + if (ret != 0) { + LOG_INFO("zxdh_update_hardware_bond_group fail\n"); + } + + /* update: lag bond members、panel link */ + lagstat = node->link_up && node->tx_enabled; + zxdh_bond_set_dpp_member_port(bond_dev, lagstat, node); + + LOG_DEBUG("link_up: %u, tx_enable: %u.\n", node->link_up, + node->tx_enabled); + if (node->link_up == node->tx_enabled) { + /* (link_up = 0, tx_enable = 0) or (link_up = 1, tx_enable = 1) */ + zxdh_get_hw_bond_panel_state(bond_dev); + } + + zxdh_bond_cofig_rdma(bond_dev); +out: + return; +} + +static int32_t zxdh_update_special_bond_slave(struct zxdh_bond_device *bond_dev) +{ + s32 ret = 0; + DPP_PF_INFO_T dpp_pf_info = { + .slot = bond_dev->slot, + .vport = bond_dev->vport, + }; + struct zxdh_en_priv *en_priv = netdev_priv(bond_dev->netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + u16 ovs_vfid = en_dev->ops->get_ovs_pf_vfid(en_dev->parent); + + if (bond_dev->bonded || + en_dev->device_state == ZXDH_DEVICE_STATE_INTERNAL_ERROR) { + return 0; + } + + ret = dpp_vport_attr_set(&dpp_pf_info, SRIOV_VPORT_HW_BOND_EN_OFF, 1); + if (ret != 0) { + LOG_ERR("zxdh_update_special_bond_slave dpp_vport_attr_set SRIOV_VPORT_HW_BOND_EN_OFF fail,ret: %d\n", + ret); + return ret; + } + dpp_uplink_phy_hardware_bond_set(&dpp_pf_info, bond_dev->phy_port, 1); + dpp_uplink_phy_attr_set(&dpp_pf_info, en_dev->phy_port, + UPLINK_PHY_PORT_PF_VQM_VFID, ovs_vfid); + bond_dev->bonded = true; + LOG_INFO("bond slave %s enabled\n", netdev_name(bond_dev->netdev)); + + return 0; +} + +static int32_t +zxdh_bond_set_special_bond_member_port(struct zxdh_bond_device *bond_dev, + bool enable, struct event_node *node) +{ + u32 lagid = 0; + u8 phy_port = 0; + DPP_PF_INFO_T pf_info = { 0 }; + struct zxdh_en_device *en_dev = NULL; + struct zxdh_en_priv *en_priv = NULL; + + en_priv = netdev_priv(bond_dev->netdev); + en_dev = &en_priv->edev; + + if (!bond_dev->group) { + return -1; + } + if (en_dev->device_state == ZXDH_DEVICE_STATE_INTERNAL_ERROR) { + return 0; + } + lagid = bond_dev->group->group_ida; + + phy_port = bond_dev->phy_port; + pf_info.slot = bond_dev->slot; + pf_info.vport = bond_dev->vport; + + if (enable) { + dpp_lag_group_member_add(&pf_info, lagid, phy_port); + } else { + dpp_lag_group_member_del(&pf_info, lagid, phy_port); + } + + /* set panel attribute: BOND_LINK_UP */ + dpp_uplink_phy_attr_set(&pf_info, phy_port, + UPLINK_PHY_PORT_BOND_LINK_UP, !!enable); + + LOG_INFO( + "%s node %d set members: lagid %hhu, phyport %hhu bond_link_up %s\n", + netdev_name(bond_dev->netdev), node->idx, lagid, phy_port, + enable ? "true" : "false"); + + return 0; +} + +static int32_t zxdh_update_special_bond_group(struct zxdh_bond_device *bond_dev, + struct event_node *node) +{ + s32 ret = 0; + struct zxdh_bond_group *group = bond_dev->group; + struct zxdh_en_device *en_dev = NULL; + struct zxdh_en_priv *en_priv = NULL; + DPP_PF_INFO_T dpp_pf_info = { + .slot = bond_dev->slot, + .vport = bond_dev->vport, + }; + en_priv = netdev_priv(bond_dev->netdev); + en_dev = &en_priv->edev; + LOG_INFO("%s node %d linking %d, event %ld\n", + netdev_name(bond_dev->netdev), node->idx, node->linking, + node->event); + + if (!node->linking && + node->event == NETDEV_CHANGEUPPER) { //如果此设备是解除绑定 + bond_dev->group = NULL; + bond_dev->bonded = false; + if (en_dev->device_state != ZXDH_DEVICE_STATE_INTERNAL_ERROR) { + zxdh_bond_set_special_bond_member_port(bond_dev, false, + node); + ret = dpp_vport_attr_set(&dpp_pf_info, + SRIOV_VPORT_HW_BOND_EN_OFF, 0); + if (ret != 0) { + LOG_ERR("zxdh_update_special_bond_group dpp_vport_attr_set SRIOV_VPORT_HW_BOND_EN_OFF fail,ret: %d\n", + ret); + return ret; + } + dpp_uplink_phy_hardware_bond_set(&dpp_pf_info, + bond_dev->phy_port, 0); + dpp_uplink_phy_attr_set( + &dpp_pf_info, en_dev->phy_port, + UPLINK_PHY_PORT_PF_VQM_VFID, + zxdh_convert_pcie_id_2_vfid(en_dev->pcie_id)); + } + bond_dev->upper_netdev = NULL; + LOG_INFO("%s hardware bond set slave's group null\n", + netdev_name(bond_dev->netdev)); + ret = -1; + } + /* if no slaves, we need to free bond group */ + if (node->group_slave_num == 0 && node->event == NETDEV_CHANGEUPPER && + !node->linking) { + /* free bond group */ + list_del(&group->node); + kfree(group); + group = NULL; + LOG_INFO("%s hardware bond group disabled\n", + netdev_name(bond_dev->netdev)); + ret = -1; + } + if (ret == -1) { + return ret; + } + + /* if slaves, we check that configured */ + if (!group->configured) { + dpp_lag_group_create(&dpp_pf_info, group->group_ida); + if (group->lag_tx_type == + ZXDH_NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) { + dpp_lag_mode_set(&dpp_pf_info, group->group_ida, + group->lag_tx_type); + } else if (group->lag_tx_type == ZXDH_NETDEV_LAG_TX_TYPE_HASH) { + dpp_lag_mode_set(&dpp_pf_info, group->group_ida, + group->lag_tx_type); + dpp_lag_group_hash_factor_set( + &dpp_pf_info, group->group_ida, + ZXDH_NETDEV_LAG_HASH_L34); /* 顾剑总要求,HASH模式直接写死Layer3+4 */ + } + group->configured = true; + } + + return 0; +} + +static void zxdh_do_special_bond(struct zxdh_bond_device *bond_dev, + struct event_node *node) +{ + s32 ret = 0; + bool lagstat = false; + + zxdh_print_hardware_bond_info(bond_dev, node); + if (!bond_dev->group) { + goto out; + } + ret = zxdh_update_special_bond_group(bond_dev, node); + if (ret != 0) { + goto out; + } + ret = zxdh_update_special_bond_slave(bond_dev); + if (ret != 0) { + LOG_INFO("zxdh_update_hardware_bond_group fail\n"); + } + + /* update: lag bond members、panel link */ + lagstat = node->link_up && node->tx_enabled; + zxdh_bond_set_special_bond_member_port(bond_dev, lagstat, node); +out: + return; +} + +void zxdh_changeupper_dualtor_handler(struct zxdh_bond_device *hw_bond_dev, + struct event_node *node); +static void zxdh_do_hardware_bond_work(struct work_struct *work) +{ + struct delayed_work *delayed_work = to_delayed_work(work); + struct event_ctx *ctx = + container_of(delayed_work, struct event_ctx, bond_work); + struct zxdh_bond_device *bond_dev = + container_of(ctx, struct zxdh_bond_device, ctx); + struct event_node *node, *tmp; + s32 changed = 1; + LIST_HEAD(local_list); // 临时链表 + // 原子化转移队列内容到临时链表 + spin_lock(&ctx->lock); + list_splice_init(&ctx->event_list, &local_list); // 切割整个队列 + spin_unlock(&ctx->lock); + LOG_INFO("%s enter zxdh_do_hardware_bond_work\n", + bond_dev->netdev->name); + mutex_lock(&mlock); + LOG_INFO("%s success get mlock\n", bond_dev->netdev->name); + // 按顺序处理临时链表中的每个事件 + list_for_each_entry_safe(node, tmp, &local_list, list) { + list_del(&node->list); // 从临时链表移除 + LOG_INFO( + "%s node %d addr %p get from list, event %ld linking %d link_up %d tx_enabled %d node_slave_num %d\n", + bond_dev->netdev->name, node->idx, (void *)node, + node->event, node->linking, node->link_up, + node->tx_enabled, node->group_slave_num); + if (node->event == NETDEV_CHANGEUPPER) { + changed = + zxdh_changeupper_event_handler(bond_dev, node); + if (changed) + zxdh_changeupper_dualtor_handler(bond_dev, + node); + } + if (changed) { + if (bond_dev->is_special_bond_dev) { + zxdh_do_special_bond(bond_dev, node); + } else { + zxdh_do_hardware_bond(bond_dev, node); + } + } + LOG_INFO("%s node %d addr %p has been processed\n", + bond_dev->netdev->name, node->idx, (void *)node); + kfree(node); + } + mutex_unlock(&mlock); + LOG_INFO("%s release mlock\n", bond_dev->netdev->name); +} + +static void zxdh_queue_hardware_bond_work(struct zxdh_bond_device *bond_dev, + struct event_ctx *ctx, + unsigned long delay) +{ + queue_delayed_work(bond_dev->wq, &ctx->bond_work, delay); +} + +static int32_t zxdh_get_hardware_bond_slaves_count(struct net_device *upper) +{ + struct net_device *ndev_tmp; + s32 num_slaves = 0; + + rcu_read_lock(); + for_each_netdev_in_bond_rcu(upper, ndev_tmp) { + if (!netif_is_zxdh_aux(ndev_tmp)) { + LOG_ERR("%s is not zxdh aux\n", + dev_name(&ndev_tmp->dev)); + continue; + } + + num_slaves++; + } + rcu_read_unlock(); + + LOG_INFO("%s slaves num %d\n", upper->name, num_slaves); + + return num_slaves; +} + +static int32_t zxdh_update_bond_slaves(struct zxdh_bond_device *bond_dev, + struct net_device *upper) +{ + if (!bond_dev->group) { + LOG_ERR("!bond_dev->group\n"); + return -1; + } + + bond_dev->group->num_slaves = + zxdh_get_hardware_bond_slaves_count(upper); + return 0; +} + +static int32_t +zxdh_changeupper_event_pre_handler(struct zxdh_bond_device *bond_dev, + struct net_device *netdev, void *ptr) +{ + struct netdev_notifier_changeupper_info *info = + (struct netdev_notifier_changeupper_info *)ptr; + + if (!netif_is_lag_master(info->upper_dev)) { + return 0; + } + bond_dev->upper_info.upper_dev = info->upper_dev; + + LOG_INFO("%s bonding %s\n", netdev->name, + info->linking ? "LINK" : "UNLINK"); + bond_dev->linking = info->linking; + if (info->linking) { + bond_dev->upper_info.lag_upper_info.tx_type = + ((struct netdev_lag_upper_info *)info->upper_info) + ->tx_type; + bond_dev->upper_info.lag_upper_info.hash_type = + ((struct netdev_lag_upper_info *)info->upper_info) + ->hash_type; + } + return 1; +} + +static int32_t zxdh_changeupper_event_handler(struct zxdh_bond_device *bond_dev, + struct event_node *node) +{ + s32 ret = 0; + + ret = zxdh_hardware_bond_group_init(bond_dev, + bond_dev->upper_info.upper_dev); + if (ret != 0) { + LOG_ERR("zxdh init hardware bond group fail\n"); + return 0; + } + + if (node->linking) { + zxdh_hardware_bond_link(bond_dev, + &node->upper_info.lag_upper_info); + } + + zxdh_update_bond_slaves(bond_dev, bond_dev->upper_info.upper_dev); + return 1; +} + +static int32_t +zxdh_changelowerstate_event_handler(struct zxdh_bond_device *bond_dev, + struct net_device *netdev, void *ptr) +{ + struct netdev_lag_lower_state_info *lag_lower_info; + struct netdev_notifier_changelowerstate_info *info; + s32 change = 0; + + if (!netif_is_lag_port(netdev)) { + return 0; + } + + info = (struct netdev_notifier_changelowerstate_info *)ptr; + lag_lower_info = info->lower_state_info; + if (!lag_lower_info) { + return 0; + } + + /* check if lower device state changed */ + if (zxdh_is_lower_state_change(bond_dev, lag_lower_info)) { + change = 1; + } + + LOG_INFO("%s change: %d, link up: %hhu - %hhu, tx enable %hhu - %hhu\n", + netdev->name, change, lag_lower_info->link_up, + bond_dev->link_up, lag_lower_info->tx_enabled, + bond_dev->tx_enabled); + + bond_dev->link_up = lag_lower_info->link_up; + bond_dev->tx_enabled = lag_lower_info->tx_enabled; + + return change; +} + +static int32_t +zxdh_bonding_info_event_handler(struct zxdh_bond_device *bond_dev, + struct net_device *netdev, void *ptr) +{ + return 0; +} + +static int32_t zxdh_changeaddr_event_handler(struct zxdh_bond_device *bond_dev, + struct net_device *netdev) +{ + s32 ret = 0; + struct net_device *primary_netdev = NULL; + + if (!netif_is_lag_port(netdev)) { //如果当前不处于 linux bond 场景,不处理 + LOG_DEBUG( + "zxdh_changeaddr_event_handler failed when netdev %s isn't bond slave\n", + netdev->name); + return 0; + } + + if (bond_dev->is_special_bond_dev) { + LOG_DEBUG( + "don't neet exec zxdh_changeaddr_event_handler when netdev %s is special_bond\n", + netdev->name); + return 0; + } + + ret = zxdh_hardware_bond_get_primary_netdev(bond_dev, &primary_netdev); + if (ret != 0) { + LOG_DEBUG("%s get primary netdev failed\n", + bond_dev->netdev->name); + return -1; + } + + zxdh_hardware_bond_set_mac_to_primary(bond_dev, bond_dev->netdev, + primary_netdev); + + return 0; +} + +/* 发生link状态变化netdev设备和bond设备*/ +void zxdh_changeupper_dualtor_handler(struct zxdh_bond_device *hw_bond_dev, + struct event_node *node) +{ + if (node->linking != 0) { + LOG_DEBUG("create bond detected.\n"); + zxdh_create_hw_bond_panel(hw_bond_dev, node); + } + /* 如果是删除主设备, 向主设备发送删除信息*/ + else { + LOG_DEBUG("del bond detected.\n"); + zxdh_del_hw_bond_panel(hw_bond_dev); + } + return; +} + +void zxdh_bond_update_ctx_node(struct event_node *node, + struct zxdh_bond_device *bond_dev) +{ + // node->upper_info.upper_dev = bond_dev->upper_info.upper_dev + node->linking = bond_dev->linking; + node->link_up = bond_dev->link_up; + node->tx_enabled = bond_dev->tx_enabled; + node->group_slave_num = 0; + if (node->event == NETDEV_CHANGEUPPER) { + node->group_slave_num = zxdh_get_hardware_bond_slaves_count( + bond_dev->upper_info.upper_dev); + if (node->linking) { + node->upper_info.lag_upper_info.tx_type = + bond_dev->upper_info.lag_upper_info.tx_type; + node->upper_info.lag_upper_info.hash_type = + bond_dev->upper_info.lag_upper_info.hash_type; + } + } +} + +static int zxdh_hardware_bond_event_handler(struct notifier_block *notif_blk, + unsigned long event, void *ptr) +{ + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + struct zxdh_bond_device *hw_bond_dev; + s32 changed = 0; + struct event_ctx *ctx = NULL; + struct event_node *node = NULL; + struct zxdh_en_device *en_dev = NULL; + struct zxdh_en_priv *en_priv = NULL; + + hw_bond_dev = + container_of(notif_blk, struct zxdh_bond_device, notif_block); + + if (!hw_bond_dev->netdev) { + return NOTIFY_DONE; + } + + /* Check that the netdev is in the working namespace */ + if (!net_eq(dev_net(netdev), &init_net)) { + return NOTIFY_DONE; + } + + if (netdev != hw_bond_dev->netdev) { + return NOTIFY_DONE; + } + + /* check that the netdev is hardware bond mode */ + if ((!zxdh_netdev_is_hwbond(netdev)) && + (!hw_bond_dev->is_special_bond_dev)) { + return NOTIFY_DONE; + } + en_priv = netdev_priv(hw_bond_dev->netdev); + en_dev = &en_priv->edev; + switch (event) { + case NETDEV_CHANGEUPPER: { + changed = zxdh_changeupper_event_pre_handler(hw_bond_dev, + netdev, ptr); + LOG_INFO("%s node %d NETDEV_CHANGEUPPER, linking:%u\n", + netdev->name, hw_bond_dev->ctx.idx, + hw_bond_dev->linking); + break; + } + case NETDEV_CHANGELOWERSTATE: { + LOG_INFO("%s node %d NETDEV_CHANGELOWERSTATE\n", netdev->name, + hw_bond_dev->ctx.idx); + changed = zxdh_changelowerstate_event_handler(hw_bond_dev, + netdev, ptr); + break; + } + case NETDEV_BONDING_INFO: { + changed = zxdh_bonding_info_event_handler(hw_bond_dev, netdev, + ptr); + break; + } + case NETDEV_CHANGEADDR: { + LOG_INFO("%s NETDEV_CHANGEADDR\n", netdev->name); + if (en_dev->device_state != ZXDH_DEVICE_STATE_INTERNAL_ERROR) + zxdh_changeaddr_event_handler(hw_bond_dev, netdev); + break; + } + } + + if (changed) { + ctx = &hw_bond_dev->ctx; + node = kmalloc(sizeof(*node), GFP_ATOMIC); + if (!node) { + LOG_ERR("Failed to allocate event node!\n"); + return NOTIFY_OK; + } + node->idx = ctx->idx; + node->event = event; + ctx->idx++; + zxdh_bond_update_ctx_node(node, hw_bond_dev); + // 将节点加入队列 + spin_lock(&ctx->lock); + list_add_tail(&node->list, &ctx->event_list); // 添加到队尾 + LOG_INFO( + "%s node %d addr %p add to list, event %ld linking %d link_up %d tx_enabled %d\n", + netdev->name, node->idx, (void *)node, node->event, + node->linking, node->link_up, node->tx_enabled); + spin_unlock(&ctx->lock); + zxdh_queue_hardware_bond_work(hw_bond_dev, ctx, 0); + } + + return NOTIFY_DONE; +} + +static int32_t zxdh_hardware_bond_device_init(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + struct zxdh_bond_device *bond_dev; + struct notifier_block *notif_blk; + char queue_name[32]; + + en_priv = netdev_priv(netdev); + en_dev = &en_priv->edev; + + /* rmda auxiliary device plug default */ + bond_dev = en_dev->hardware_bond; + + bond_dev->netdev = netdev; + bond_dev->pf_core_dev = en_dev->parent->parent; + bond_dev->bonded = false; + bond_dev->upper_netdev = NULL; + bond_dev->group = NULL; + bond_dev->tx_enabled = false; + bond_dev->link_up = false; + + bond_dev->primary = en_dev->is_primary_port; + + bond_dev->rxq = en_dev->phy_index[0]; + bond_dev->txq = en_dev->phy_index[1]; + + bond_dev->vport = en_dev->vport; + bond_dev->slot = en_dev->slot_id; + bond_dev->vfid = zxdh_convert_pcie_id_2_vfid(en_dev->pcie_id); + bond_dev->phy_port = en_dev->phy_port; + bond_dev->is_special_bond_dev = + en_dev->ops->is_special_bond(en_dev->parent); + if (!bond_dev->primary) { + ether_addr_copy(bond_dev->last_mac_addr.sa_data, + en_dev->netdev->dev_addr); + } else { + memset(bond_dev->last_mac_addr.sa_data, 0, ETH_ALEN); + } + + notif_blk = &bond_dev->notif_block; + notif_blk->notifier_call = zxdh_hardware_bond_event_handler; + if (register_netdevice_notifier(notif_blk)) { + LOG_ERR("FAIL register bdf %x hardware bond event handler!\n", + en_dev->ep_bdf); + notif_blk->notifier_call = NULL; + return -EINVAL; + } + + LOG_DEBUG("bdf %x hardware bond event handler registered\n", + en_dev->ep_bdf); + + snprintf(queue_name, sizeof(queue_name), "bond_work_%x", + en_dev->ep_bdf); + bond_dev->wq = create_singlethread_workqueue(queue_name); + if (!bond_dev->wq) { + LOG_ERR("FAIL register bdf %x hardware bond workqueue!\n", + en_dev->ep_bdf); + unregister_netdevice_notifier(notif_blk); + notif_blk->notifier_call = NULL; + return -ENOMEM; + } + INIT_LIST_HEAD(&bond_dev->ctx.event_list); + INIT_DELAYED_WORK(&bond_dev->ctx.bond_work, zxdh_do_hardware_bond_work); + + return 0; +} + +int32_t zxdh_rdma_bond_dpp_init(struct zxdh_bond_device *bond_dev) +{ + u8 phy_port = bond_dev->phy_port; + s32 ret = 0; + DPP_PF_INFO_T dpp_pf_info = { + .slot = bond_dev->slot, + .vport = bond_dev->vport, + }; + + ret = dpp_uplink_phy_lacp_pf_memport_qid_set(&dpp_pf_info, phy_port, + bond_dev->rxq); + if (ret != 0) { + LOG_ERR("dpp_uplink_phy_lacp_pf_memport_qid_set failed: %d\n", + ret); + goto out; + } + ret = dpp_uplink_phy_lacp_pf_vqm_vfid_set(&dpp_pf_info, phy_port, + bond_dev->vfid); + if (ret != 0) { + LOG_ERR("dpp_uplink_phy_lacp_pf_vqm_vfid_set failed: %d\n", + ret); + goto out; + } + +out: + return ret; +} + +int32_t zxdh_special_bond_dpp_init(struct zxdh_en_device *en_dev) +{ + s32 ret = 0; + DPP_PF_INFO_T dpp_pf_info = { + .slot = en_dev->slot_id, + .vport = en_dev->vport, + }; + + ret = dpp_uplink_phy_bond_vport(&dpp_pf_info, en_dev->phy_port); + if (ret != 0) { + LOG_ERR("dpp_uplink_phy_bond_vport failed: %d\n", ret); + goto out; + } + ret = dpp_uplink_phy_lacp_pf_vqm_vfid_set( + &dpp_pf_info, en_dev->phy_port, en_dev->hardware_bond->vfid); + if (ret != 0) { + LOG_ERR("dpp_uplink_phy_lacp_pf_vqm_vfid_set failed: %d\n", + ret); + goto out; + } + ret = dpp_uplink_phy_lacp_pf_memport_qid_set( + &dpp_pf_info, en_dev->phy_port, en_dev->hardware_bond->rxq); + if (ret != 0) { + LOG_ERR("dpp_uplink_phy_lacp_pf_memport_qid_set failed: %d\n", + ret); + goto out; + } + +out: + return ret; +} + +int32_t zxdh_hardware_bond_dpp_init(struct zxdh_en_device *en_dev) +{ + s32 ret = 0; + + en_dev->hardware_bond->rxq = en_dev->phy_index[0]; + en_dev->hardware_bond->txq = en_dev->phy_index[1]; + if (en_dev->hardware_bond->is_special_bond_dev) { + ret = zxdh_special_bond_dpp_init(en_dev); + } else { + ret = zxdh_rdma_bond_dpp_init(en_dev->hardware_bond); + } + return ret; +} + +int32_t zxdh_hardware_bond_init(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + s32 ret = 0; + + en_priv = netdev_priv(netdev); + en_dev = &en_priv->edev; + + /* do nothing if vf */ + if ((en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) || + (!zxdh_en_is_panel_port(en_dev))) { + return 0; + } + + /* create hardware bond device */ + en_dev->hardware_bond = + kzalloc(sizeof(struct zxdh_bond_device), GFP_KERNEL); + if (!en_dev->hardware_bond) { + LOG_ERR("zxdh hardware bond device kzalloc fail\n"); + return -ENOMEM; + } + + ret = zxdh_hardware_bond_device_init(netdev); + if (ret != 0) { + LOG_ERR("zxdh hardware bond device init fail\n"); + goto err_bond_dev_init; + } + + ret = zxdh_hardware_bond_dpp_init(en_dev); + if (ret != 0) { + LOG_ERR("zxdh_hardware_bond_dpp_init fail\n"); + goto err_bond_dpp_init; + } + + list_add_tail(&en_dev->hardware_bond->node, &zxdh_aux_netdev_list); + + LOG_INFO("bdf 0x%x success\n", en_dev->ep_bdf); + + return 0; + +err_bond_dpp_init: + unregister_netdevice_notifier(&en_dev->hardware_bond->notif_block); + en_dev->hardware_bond->notif_block.notifier_call = NULL; + cancel_delayed_work_sync(&en_dev->hardware_bond->ctx.bond_work); + destroy_workqueue(en_dev->hardware_bond->wq); +err_bond_dev_init: + kfree(en_dev->hardware_bond); + en_dev->hardware_bond = NULL; + return ret; +} + +void zxdh_hardware_bond_uninit(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + struct zxdh_bond_device *hardware_bond; + struct event_node *node, *tmp; + + en_priv = netdev_priv(netdev); + en_dev = &en_priv->edev; + + if ((en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) || + (!zxdh_en_is_panel_port(en_dev))) { + return; + } + + if (en_dev->hardware_bond) { + list_del(&en_dev->hardware_bond->node); + + hardware_bond = en_dev->hardware_bond; + if (hardware_bond->notif_block.notifier_call) { + unregister_netdevice_notifier( + &hardware_bond->notif_block); + hardware_bond->notif_block.notifier_call = NULL; + } + + cancel_delayed_work_sync(&hardware_bond->ctx.bond_work); + destroy_workqueue(hardware_bond->wq); + + spin_lock(&hardware_bond->ctx.lock); + list_for_each_entry_safe( + node, tmp, &hardware_bond->ctx.event_list, list) { + list_del(&node->list); + LOG_INFO( + "%s node %d addr %p del from list, event %ld linking %d link_up %d tx_enabled %d\n", + netdev->name, node->idx, (void *)node, + node->event, node->linking, node->link_up, + node->tx_enabled); + kfree(node); + } + spin_unlock(&hardware_bond->ctx.lock); + + if (en_dev->hardware_bond->group) { + en_dev->hardware_bond->group->configured = false; + } + + kfree(en_dev->hardware_bond); + en_dev->hardware_bond = NULL; + } +} + +int32_t zxdh_recover_hwbond_in_reload(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct zxdh_bond_device *hardware_bond; + s32 ret = 0; + u8 hit_flag = 0; + DPP_PF_INFO_T dpp_pf_info = { + .slot = en_dev->slot_id, + .vport = en_dev->vport, + }; + + if ((en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) || + (!zxdh_en_is_panel_port(en_dev)) || + (en_dev->ops->is_bond(en_dev->parent))) { + return 0; + } + + if (en_dev->hardware_bond) { + ret = zxdh_hardware_bond_dpp_init(en_dev); + if (ret != 0) { + LOG_ERR("zxdh_hardware_bond_dpp_init failed: %d\n", + ret); + goto out; + } + hardware_bond = en_dev->hardware_bond; + if (en_dev->hardware_bond->group) { + dpp_lag_hit_flag_get( + &dpp_pf_info, + en_dev->hardware_bond->group->group_ida, + &hit_flag); + LOG_INFO("%s check lag_bond %d hist_flag %d\n", + netdev->name, + en_dev->hardware_bond->group->group_ida, + hit_flag); + en_dev->hardware_bond->group->configured = + hit_flag == 0 ? false : true; + } + en_dev->hardware_bond->bonded = false; + if (!en_dev->hardware_bond->is_special_bond_dev) { + en_dev->ops->optim_hardware_bond_time( + en_dev->parent, en_dev->is_hwbond); + } + } +out: + return ret; +} + +void zxdh_update_rdma_hwbond_master(void) +{ + struct zxdh_bond_device *bond_dev; + struct net_device *netdev; + struct net_device *uplink_upper; + struct net_device *primary_netdev; + s32 ret = 0; + + list_for_each_entry(bond_dev, &zxdh_aux_netdev_list, node) { + netdev = bond_dev->netdev; + if (!netdev) { + continue; + } + + /* if netdev not hwbond */ + if (!zxdh_netdev_is_hwbond(netdev)) { + continue; + } + + rcu_read_lock(); + uplink_upper = netdev_master_upper_dev_get_rcu(netdev); + rcu_read_unlock(); + if (uplink_upper && netif_is_lag_master(uplink_upper)) { + ret = zxdh_hardware_bond_get_primary_netdev( + bond_dev, &primary_netdev); + if (!ret) { + /* set rdma dev bind netdev and port speed */ + zxdh_set_rdma_hwbond_master(primary_netdev, + uplink_upper, true); + zxdh_bond_cofig_rdma(bond_dev); + continue; + } + + LOG_INFO("%s get primary netdev failed\n", + netdev_name(netdev)); + } + } + + return; +} diff --git a/drivers/net/ethernet/dinghai/bonding/zxdh_lag.h b/drivers/net/ethernet/dinghai/bonding/zxdh_lag.h new file mode 100644 index 000000000000..77fe86c44391 --- /dev/null +++ b/drivers/net/ethernet/dinghai/bonding/zxdh_lag.h @@ -0,0 +1,103 @@ +#ifndef _ZXDH_HARDWARE_BOND_H_ +#define _ZXDH_HARDWARE_BOND_H_ + +#include "../en_aux.h" + +struct zxdh_bond_group; + +#define ZXDH_SPECIAL_LGA_ID 0 + +struct upper_info_struct { + struct net_device *upper_dev; + struct netdev_lag_upper_info lag_upper_info; +}; + +// 每个事件对应一个独立的节点 +struct event_node { + struct list_head list; // 内核链表节点 + unsigned long event; // 事件类型(如NETDEV_XXX) + struct upper_info_struct upper_info; + bool linking; + uint8_t link_up; + uint8_t tx_enabled; + uint32_t idx; + int32_t group_slave_num; +}; + +// 全局上下文 +struct event_ctx { + struct delayed_work bond_work; + spinlock_t lock; // 保护队列的锁 + struct list_head event_list; // 事件链表头(FIFO队列) + uint32_t idx; +}; + +struct zxdh_bond_device { + struct dh_core_dev *pf_core_dev; /* backlink to PF core dev struct */ + struct net_device *netdev; /* this PF's netdev */ + struct net_device *upper_netdev; /* upper bonding netdev */ + struct notifier_block notif_block; + + struct event_ctx ctx; + struct workqueue_struct *wq; + + uint8_t bonded : 1; /* currently bonded */ + uint8_t tx_enabled : 1; + uint8_t link_up : 1; + + bool primary; /* this is a primary port */ + bool is_special_bond_dev; + + uint16_t rxq; + uint16_t txq; + + uint16_t slot; + uint16_t vport; + + uint16_t vfid; + uint16_t ovs_pf_vfid; + uint8_t phy_port; + // uint16_t primary_vfid; + bool linking; + struct list_head node; + + struct upper_info_struct upper_info; + struct zxdh_bond_group *group; + struct sockaddr last_mac_addr; +}; + +struct zxdh_bond_group { + char name[IFNAMSIZ]; + + int32_t group_ida; + + uint8_t lag_tx_type; /* enum zxdh_netdev_lag_tx_type */ + uint8_t hash_policy; + uint8_t num_slaves; + + bool configured; + + struct list_head node; +}; + +static inline bool zxdh_netdev_is_hwbond(const struct net_device *netdev) +{ + return (&((struct zxdh_en_priv *)netdev_priv(netdev))->edev)->is_hwbond; +} + +static inline uint16_t +zxdh_bond_device_get_vport(const struct zxdh_bond_device *bond_dev) +{ + return bond_dev->vport; +} + +static inline const char * +zxdh_bond_group_name(const struct zxdh_bond_group *group) +{ + return group->name; +} + +void zxdh_lag_lock_init(void); +void zxdh_lag_lock_deinit(void); + +#endif /* END _ZXDH_HARDWARE_BOND_H_ */ \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/cmd.c b/drivers/net/ethernet/dinghai/cmd.c new file mode 100644 index 000000000000..772da57554d8 --- /dev/null +++ b/drivers/net/ethernet/dinghai/cmd.c @@ -0,0 +1,39 @@ +#include +#include +#include +#include + +static int32_t cmd_status_err(struct dh_core_dev *dev, int32_t err, + uint16_t opcode, void *out) +{ + u8 status = DH_GET(mbox_out, out, status); + + return err; +} +static int32_t cmd_exec(struct dh_core_dev *dev, void *in, int32_t in_size, + void *out, int32_t out_size, zxdh_cmd_cbk_t callback, + void *context, bool force_polling) +{ + return 0; +} + +int32_t zxdh_cmd_do(struct dh_core_dev *dev, void *in, int32_t in_size, + void *out, int32_t out_size) +{ + int32_t err = + cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false); + uint16_t opcode = DH_GET(mbox_in, in, opcode); + + err = cmd_status_err(dev, err, opcode, out); + + return err; +} +EXPORT_SYMBOL(zxdh_cmd_do); + +int32_t zxdh_cmd_exec(struct dh_core_dev *dev, void *in, int32_t in_size, + void *out, int32_t out_size) +{ + int32_t err = zxdh_cmd_do(dev, in, in_size, out, out_size); + + return zxdh_cmd_check(dev, err, in, out); +} \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/cmd/msg_chan_lock.c b/drivers/net/ethernet/dinghai/cmd/msg_chan_lock.c new file mode 100644 index 000000000000..95eda9175300 --- /dev/null +++ b/drivers/net/ethernet/dinghai/cmd/msg_chan_lock.c @@ -0,0 +1,314 @@ +#include +#include +#include "msg_chan_lock.h" +#include "msg_chan_priv.h" +/***************************************** +[src/dst]时应该将消息发到硬件锁还是软件所 +src/dst: TO_RISC, TO_PFVF, TO_MPF +MPF: 1 1 1 +PF: 0 0 1 +VF: 0 0 1 +******************************************/ + +/*/PF0-7 DIRECT_CHNA/(PF0)VF0-VF32/(PF1)VF0-VF32/...*/ +struct mutex lock_array[LOCK_ARR_LENGTH] = { 0 }; + +uint8_t lock_type_tbl[BAR_MSG_SRC_NUM][BAR_MSG_DST_NUM] = { + { LOCK_TYPE_HARD, LOCK_TYPE_HARD, LOCK_TYPE_HARD }, + { LOCK_TYPE_HARD, LOCK_TYPE_SOFT, LOCK_TYPE_HARD }, + { LOCK_TYPE_HARD, LOCK_TYPE_HARD, LOCK_TYPE_HARD } +}; + +/** + * pcieid_to_lockid - 将pcie_id转化成lock_id + * @src_pcieid: pcie_id + * @result: 软件数组的索引值 + */ +uint16_t pcieid_to_lockid(uint16_t src_pcieid, uint8_t dst) +{ + uint16_t lock_id = 0; + uint16_t pf_idx = 0; + uint16_t vf_idx = 0; + uint16_t ep_idx = 0; + + pf_idx = (src_pcieid & PCIEID_PF_IDX_MASK) >> PCIEID_PF_IDX_OFFSET; + vf_idx = (src_pcieid & PCIEID_VF_IDX_MASK); + ep_idx = (src_pcieid & PCIEID_EP_IDX_MASK) >> PCIEID_EP_IDX_OFFSET; + switch (dst) { + case MSG_CHAN_END_RISC: { + if (src_pcieid & PCIEID_IS_PF_MASK) { + lock_id = MULTIPLY_BY_8(ep_idx) + pf_idx; + } else { + lock_id = MULTIPLY_BY_256(ep_idx) + + MULTIPLY_BY_32(pf_idx) + vf_idx + + MULTIPLY_BY_32(1); + } + break; + } + case MSG_CHAN_END_VF: { + lock_id = MULTIPLY_BY_8(ep_idx) + pf_idx + + MULTIPLY_BY_32(1 + VF_NUM_PER_PF); + break; + } + case MSG_CHAN_END_PF: { + lock_id = MULTIPLY_BY_8(ep_idx) + pf_idx + + MULTIPLY_BY_32(2 + VF_NUM_PER_PF); + break; + } + default: { + lock_id = 0; + break; + } + } + + if (lock_id >= LOCK_ARR_LENGTH) { + lock_id = 0; + } + + return lock_id; +} + +/** + * pcie_id_to_hard_lock - 将pcie_id转化为虚拟的硬件锁lock_id + * @src_pcieid: pcie_id + * @result: 软件数组的索引值 + */ +uint16_t pcie_id_to_hard_lock(uint16_t src_pcieid, uint8_t dst) +{ + uint16_t lock_id = 0; + uint16_t pf_idx = 0; + uint16_t vf_idx = 0; + uint16_t ep_idx = 0; + + pf_idx = (src_pcieid & PCIEID_PF_IDX_MASK) >> PCIEID_PF_IDX_OFFSET; + vf_idx = (src_pcieid & PCIEID_VF_IDX_MASK); + ep_idx = (src_pcieid & PCIEID_EP_IDX_MASK) >> PCIEID_EP_IDX_OFFSET; + + switch (dst) { + /* msg to risc*/ + case MSG_CHAN_END_RISC: { + lock_id = MULTIPLY_BY_8(ep_idx) + pf_idx; + break; + } + /* msg to pf/vf*/ + case MSG_CHAN_END_VF: + case MSG_CHAN_END_PF: + + { + lock_id = MULTIPLY_BY_8(ep_idx) + pf_idx + + MULTIPLY_BY_8(1 + MAX_EP_NUM); + break; + } + /* default*/ + default: { + lock_id = 0; + break; + } + } + + if (lock_id >= MAX_HARD_SPINLOCK_NUM) { + lock_id = 0; + } + + return lock_id; +} + +static uint8_t spinklock_read(spl_addr_t virt_lock_addr, uint32_t lock_id) +{ + return readb((spl_addr_t)(virt_lock_addr + lock_id)); +} + +static void spinlock_write(spl_addr_t virt_lock_addr, uint32_t lock_id, + uint8_t data) +{ + writeb(data, (spl_addr_t)(virt_lock_addr + lock_id)); +} + +static void label_write(spl_addr_t label_lock_addr, uint32_t lock_id, + uint16_t value) +{ + writew(value, (spl_addr_t)(label_lock_addr + lock_id * 2)); +} + +static uint16_t label_read(spl_addr_t label_lock_addr, uint32_t lock_id) +{ + return readw(label_lock_addr + lock_id * 2); +} + +/* 硬件锁加锁接口*/ +int32_t zxdh_spinlock_lock(uint32_t virt_lock_id, uint64_t lock_virt_addr, + uint64_t label_addr, uint16_t masterid) +{ + uint8_t spl_val = 0; + uint32_t lock_rd_cnt = 0; + + do { + /* 读寄存器加锁 */ + spl_val = spinklock_read((spl_addr_t)(uintptr_t)lock_virt_addr, + virt_lock_id); + if (0 == spl_val) { + label_write((spl_addr_t)(uintptr_t)label_addr, + virt_lock_id, masterid); + break; + } + lock_rd_cnt++; + msleep(BAR_CHAN_HARD_LOCK_POLLING_SPAN_MS); + } while (lock_rd_cnt < MAX_HARD_SPINLOCK_ASK_TIMES); + + if (lock_rd_cnt >= MAX_HARD_SPINLOCK_ASK_TIMES) { + BAR_LOG_ERR("spl val: 0x%x lock_id:%u, masterID: 0x%x.\n", + spl_val, virt_lock_id, + label_read((spl_addr_t)(uintptr_t)label_addr, + virt_lock_id)); + return BAR_MSG_ERR_LOCK_FAILED; + } + + return 0; +} + +int32_t zxdh_spinlock_unlock(uint32_t virt_lock_id, uint64_t lock_virt_addr, + uint64_t label_addr) +{ + label_write((spl_addr_t)(uintptr_t)label_addr, virt_lock_id, 0); + spinlock_write((spl_addr_t)(uintptr_t)lock_virt_addr, virt_lock_id, 0); + return 0; +} + +void bar_soft_lock(uint16_t src_pcieid, uint8_t dst) +{ + uint16_t lockid = 0; + + lockid = pcieid_to_lockid(src_pcieid, dst); + mutex_lock(&lock_array[lockid]); +} + +void bar_soft_unlock(uint16_t src_pcieid, uint8_t dst) +{ + uint16_t lockid = 0; + + lockid = pcieid_to_lockid(src_pcieid, dst); + mutex_unlock(&lock_array[lockid]); +} + +int bar_hard_lock(uint16_t src_pcieid, uint8_t dst, uint64_t chan_virt_addr) +{ + int ret = 0; + uint16_t lockid = 0; + + lockid = pcie_id_to_hard_lock(src_pcieid, dst); + if (dst == MSG_CHAN_END_RISC) { + ret = zxdh_spinlock_lock( + lockid, chan_virt_addr + CHAN_RISC_SPINLOCK_OFFSET, + chan_virt_addr + CHAN_RISC_LABEL_OFFSET, + src_pcieid | LOCK_MASTER_ID_MASK); + } else { + ret = zxdh_spinlock_lock( + lockid, chan_virt_addr + CHAN_PFVF_SPINLOCK_OFFSET, + chan_virt_addr + CHAN_PFVF_LABEL_OFFSET, + src_pcieid | LOCK_MASTER_ID_MASK); + } + return ret; +} + +void bar_hard_unlock(uint16_t src_pcieid, uint8_t dst, uint64_t chan_virt_addr) +{ + uint16_t lockid = 0; + + lockid = pcie_id_to_hard_lock(src_pcieid, dst); + if (dst == MSG_CHAN_END_RISC) { + zxdh_spinlock_unlock(lockid, + chan_virt_addr + CHAN_RISC_SPINLOCK_OFFSET, + chan_virt_addr + CHAN_RISC_LABEL_OFFSET); + } else { + zxdh_spinlock_unlock(lockid, + chan_virt_addr + CHAN_PFVF_SPINLOCK_OFFSET, + chan_virt_addr + CHAN_PFVF_LABEL_OFFSET); + } + return; +} + +/** + * bar_init_lock_arr - 初始化软件锁数组 + * 在msg_chan模块初始化函数中调用软件锁初始化 + */ +void bar_init_lock_arr(void) +{ + int idx = 0; + + for (idx = 0; idx < ARR_LEN(lock_array); idx++) { + mutex_init(&lock_array[idx]); + } +} + +/** + * bar_chan_lock - 通道上锁 + * @src: 消息源类型 + * @dst: 消息对端类型 + * @src_pcieid: 源pcieId + * @return: 0成功, 1失败 + */ +int bar_chan_lock(uint8_t src, uint8_t dst, uint16_t src_pcieid, + uint64_t virt_addr) +{ + int ret = 0; + uint16_t idx = 0; + uint8_t src_index = 0; + uint8_t dst_index = 0; + + src_index = bar_msg_row_index_trans(src); + dst_index = bar_msg_col_index_trans(dst); + if (src_index == BAR_MSG_SRC_ERR || dst_index == BAR_MSG_DST_ERR) { + BAR_LOG_ERR("lock ERR: chan doesn't exist.\n"); + return BAR_MSG_ERR_TYPE; + } + idx = lock_type_tbl[src_index][dst_index]; + if (idx == LOCK_TYPE_SOFT) { + bar_soft_lock(src_pcieid, dst); + } else { + ret = bar_hard_lock(src_pcieid, dst, virt_addr); + } + return ret; +} + +/** + * bar_chan_lock - 通道解锁功能 + * @src: 消息源类型 + * @dst: 消息对端类型 + * @src_pcieid: 源pcieId + * @return: 0成功1失败 + */ +int bar_chan_unlock(uint8_t src, uint8_t dst, uint16_t src_pcieid, + uint64_t virt_addr) +{ + uint16_t idx = 0; + uint8_t src_index = 0; + uint8_t dst_index = 0; + + src_index = bar_msg_row_index_trans(src); + dst_index = bar_msg_col_index_trans(dst); + if (src_index == BAR_MSG_SRC_ERR || dst_index == BAR_MSG_DST_ERR) { + BAR_LOG_ERR("unlock ERR: chan doesn't exist.\n"); + return BAR_MSG_ERR_TYPE; + } + idx = lock_type_tbl[src_index][dst_index]; + if (idx == LOCK_TYPE_SOFT) { + bar_soft_unlock(src_pcieid, dst); + } else { + bar_hard_unlock(src_pcieid, dst, virt_addr); + } + return BAR_MSG_OK; +} + +int bar_chan_pf_init_spinlock(uint16_t pcie_id, uint64_t bar_base_addr) +{ + int lock_id = 0; + + lock_id = pcie_id_to_hard_lock(pcie_id, MSG_CHAN_END_RISC); + zxdh_spinlock_unlock(lock_id, bar_base_addr + BAR0_SPINLOCK_OFFSET, + bar_base_addr + HW_LABEL_OFFSET); + lock_id = pcie_id_to_hard_lock(pcie_id, MSG_CHAN_END_VF); + zxdh_spinlock_unlock(lock_id, bar_base_addr + BAR0_SPINLOCK_OFFSET, + bar_base_addr + HW_LABEL_OFFSET); + return 0; +} +EXPORT_SYMBOL(bar_chan_pf_init_spinlock); diff --git a/drivers/net/ethernet/dinghai/cmd/msg_chan_lock.h b/drivers/net/ethernet/dinghai/cmd/msg_chan_lock.h new file mode 100644 index 000000000000..b81e8fa2a03d --- /dev/null +++ b/drivers/net/ethernet/dinghai/cmd/msg_chan_lock.h @@ -0,0 +1,67 @@ +#ifndef _ZXDH_MSG_CHAN_LOCK_H_ +#define _ZXDH_MSG_CHAN_LOCK_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define ARR_LEN(arr) (sizeof(arr) / sizeof(arr[0])) + +typedef void __iomem *spl_addr_t; + +/* PCIEID位域掩码*/ +#define PCIEID_IS_PF_MASK (0x0800) +#define PCIEID_PF_IDX_MASK (0x0700) +#define PCIEID_VF_IDX_MASK (0x00ff) +#define PCIEID_EP_IDX_MASK (0x7000) +#define PF0_PCIEID (0x0800) + +/* PCIEID位域偏移*/ +#define PCIEID_PF_IDX_OFFSET (8) +#define PCIEID_EP_IDX_OFFSET (12) + +#define MAX_HARD_SPINLOCK_NUM (511) +#define MAX_HARD_SPINLOCK_ASK_TIMES (1500) +#define BAR_CHAN_HARD_LOCK_POLLING_SPAN_MS (1) + +/* 硬件锁软件锁*/ +#define LOCK_TYPE_HARD 1 +#define LOCK_TYPE_SOFT 0 + +/* bar空间偏移*/ +#define BAR0_CHAN_RISC_OFFSET (0x2000) +#define BAR0_CHAN_PFVF_OFFSET (0x3000) +#define BAR0_SPINLOCK_OFFSET (0x4000) + +#define CHAN_RISC_SPINLOCK_OFFSET (BAR0_SPINLOCK_OFFSET - BAR0_CHAN_RISC_OFFSET) +#define CHAN_PFVF_SPINLOCK_OFFSET (BAR0_SPINLOCK_OFFSET - BAR0_CHAN_PFVF_OFFSET) + +#define MAX_EP_NUM 4 +#define PF_NUM_PER_EP 8 +#define VF_NUM_PER_PF 32 + +#define MULTIPLY_BY_8(x) ((x) << 3) +#define MULTIPLY_BY_32(x) ((x) << 5) +#define MULTIPLY_BY_256(x) ((x) << 8) + +#define LOCK_ARR_LENGTH (MAX_EP_NUM * PF_NUM_PER_EP * (3 + VF_NUM_PER_PF)) + +#define FW_SHRD_OFFSET (0x5000) +#define FW_SHRD_INNER_HW_LABEL_PAT (0x800) +#define LOCK_MASTER_ID_MASK (0x8000) +#define HW_LABEL_OFFSET (FW_SHRD_OFFSET + FW_SHRD_INNER_HW_LABEL_PAT) +#define CHAN_RISC_LABEL_OFFSET (HW_LABEL_OFFSET - BAR0_CHAN_RISC_OFFSET) +#define CHAN_PFVF_LABEL_OFFSET (HW_LABEL_OFFSET - BAR0_CHAN_PFVF_OFFSET) + +void bar_init_lock_arr(void); + +int bar_chan_lock(uint8_t src, uint8_t dst, uint16_t src_pcieid, + uint64_t virt_addr); +int bar_chan_unlock(uint8_t src, uint8_t dst, uint16_t src_pcieid, + uint64_t virt_addr); + +#ifdef __cplusplus +} +#endif + +#endif /* _ZXDH_MSG_CHAN_LOCK_H_ */ diff --git a/drivers/net/ethernet/dinghai/cmd/msg_chan_priv.h b/drivers/net/ethernet/dinghai/cmd/msg_chan_priv.h new file mode 100644 index 000000000000..f5ee5386fbb3 --- /dev/null +++ b/drivers/net/ethernet/dinghai/cmd/msg_chan_priv.h @@ -0,0 +1,311 @@ +#ifndef _ZXDH_MSG_CHAN_PRIV_H_ +#define _ZXDH_MSG_CHAN_PRIV_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include +#include +/* */ +#define BAR_KFREE_PTR(ptr) \ + { \ + if (ptr != NULL) { \ + kfree(ptr); \ + } \ + ptr = NULL; \ + } + +#define BAR_LOG_ERR(fmt, arg...) DH_LOG_ERR(MODULE_CMD, fmt, ##arg); +#define BAR_LOG_INFO(fmt, arg...) DH_LOG_INFO(MODULE_CMD, fmt, ##arg); +#define BAR_LOG_DEBUG(fmt, arg...) DH_LOG_DEBUG(MODULE_CMD, fmt, ##arg); +#define BAR_LOG_WARN(fmt, arg...) DH_LOG_WARNING(MODULE_CMD, fmt, ##arg); + +#define HOST_OR_ZX 0 + +#define MAX_MSG_BUFF_NUM 0xffff + +#define BAR_ALIGN_WORD_MASK 0xffffffc +#define BAR_MSG_ADDR_CHAN_INTERVAL (1024 * 2) + +/* 消息类型*/ +#define BAR_CHAN_MSG_SYNC 0 +#define BAR_CHAN_MSG_ASYNC 1 +#define BAR_CHAN_MSG_NO_EMEC 0 +#define BAR_CHAN_MSG_EMEC 1 +#define BAR_CHAN_MSG_NO_ACK 0 +#define BAR_CHAN_MSG_ACK 1 + +/* payload, valid和内容的偏移*/ +#define BAR_MSG_PLAYLOAD_OFFSET (sizeof(struct bar_msg_header)) +#define BAR_MSG_LEN_OFFSET 2 +#define BAR_MSG_VALID_OFFSET 0 + +/* valid字段的掩码*/ +#define BAR_MSG_VALID_MASK 1 + +/* reps_buff的偏移*/ +#define REPS_HEADER_VALID_OFFSET 0 +#define REPS_HEADER_LEN_OFFSET 1 +#define REPS_HEADER_PAYLOAD_OFFSET 4 + +#define REPS_HEADER_REPLYED 0xff + +/* 通道状态*/ +#define BAR_MSG_CHAN_USABLE 0 +#define BAR_MSG_CHAN_USED 1 + +/* 超时时间 = 100 us *30000次轮询 = 3s*/ +#define BAR_MSG_POLLING_SPAN_US 100 +#define BAR_MSG_TIMEOUT_TH 30000 + +/* vf,pf,mpf总数*/ +#define BAR_DRIVER_TOTAL_NUM (BAR_MPF_NUM + BAR_PF_NUM + BAR_VF_NUM) + +/* bar的通道偏移*/ +#define BAR_INDEX_TO_RISC 0 +#define BAR_MPF_NUM 1 + +/* 定时器周期宏*/ +#define BAR_MSGID_FREE_THRESHOLD (jiffies + msecs_to_jiffies(2000)) + +/* 管理pf信息*/ +#define BAR_MSG_OFFSET (0x2000) +#define MPF_VENDOR_ID (0x16c3) +#define MPF_DEVICE_ID (0x8045) + +enum { + TYPE_SEND_NP = 0x0, + TYPE_SEND_DRS = 0x01, + TYPE_SEND_DTP = 0x10, + TYPE_END, +}; + +/************************************************************************** + * common.ko会工作在5中场景,不同场景每个mpf/pf/vf可以看到的bar不一样 + * 1、DPU场景下的host中:SCENE_HOST_IN_DPU + * 2、DPU场景下的ZF中: SCENE_ZF_IN_DPU + * 3、智能网卡带ddr: SCENE_NIC_WITH_DDR + * 4、智能网卡不带ddr: SCENE_NIC_NO_DDR + * 5、普卡: SCENE_STD_NIC +**************************************************************************/ +#define SCENE_TEST + +#ifdef SCENE_HOST_IN_DPU +#define BAR_PF_NUM 31 +#define BAR_VF_NUM 1024 +#define BAR_INDEX_PF_TO_VF 1 +#define BAR_INDEX_MPF_TO_MPF 1 +#define BAR_INDEX_MPF_TO_PFVF 0xff +#define BAR_INDEX_PFVF_TO_MPF 0xff +#endif + +#ifdef SCENE_ZF_IN_DPU +#define BAR_PF_NUM 7 +#define BAR_VF_NUM 128 +#define BAR_INDEX_PF_TO_VF 0xff +#define BAR_INDEX_MPF_TO_MPF 1 +#define BAR_INDEX_MPF_TO_PFVF 0xff +#define BAR_INDEX_PFVF_TO_MPF 0xff +#endif + +#ifdef SCENE_NIC_WITH_DDR +#define BAR_PF_NUM 31 +#define BAR_VF_NUM 1024 +#define BAR_INDEX_PF_TO_VF 1 +#define BAR_INDEX_MPF_TO_MPF 0xff +#define BAR_INDEX_MPF_TO_PFVF 0xff +#define BAR_INDEX_PFVF_TO_MPF 0xff +#endif + +#ifdef SCENE_NIC_NO_DDR +#define BAR_PF_NUM 31 +#define BAR_VF_NUM 1024 +#define BAR_INDEX_PF_TO_VF 1 +#define BAR_INDEX_MPF_TO_MPF 0xff +#define BAR_INDEX_MPF_TO_PFVF 1 +#define BAR_INDEX_PFVF_TO_MPF 2 +#endif + +#ifdef SCENE_STD_NIC +#define BAR_PF_NUM 7 +#define BAR_VF_NUM 256 +#define BAR_INDEX_PF_TO_VF 1 +#define BAR_INDEX_MPF_TO_MPF 0xff +#define BAR_INDEX_MPF_TO_PFVF 1 +#define BAR_INDEX_PFVF_TO_MPF 2 +#endif + +#ifdef SCENE_TEST +#define BAR_PF_NUM 7 +#define BAR_VF_NUM 256 +#define BAR_INDEX_PF_TO_VF 0 +#define BAR_INDEX_MPF_TO_MPF 0xff +#define BAR_INDEX_MPF_TO_PFVF 0 +#define BAR_INDEX_PFVF_TO_MPF 0 +#endif + +/* 左边通道还是右边通道*/ +#define BAR_SUBCHAN_INDEX_SEND 0 +#define BAR_SUBCHAN_INDEX_RECV 1 + +/* 消息源索引*/ +#define BAR_MSG_SRC_NUM 3 +#define BAR_MSG_SRC_MPF 0 +#define BAR_MSG_SRC_PF 1 +#define BAR_MSG_SRC_VF 2 +#define BAR_MSG_SRC_ERR 0xff + +/* 消息目的索引*/ +#define BAR_MSG_DST_NUM 3 +#define BAR_MSG_DST_RISC 0 +#define BAR_MSG_DST_MPF 2 +#define BAR_MSG_DST_PFVF 1 +#define BAR_MSG_DST_ERR 0xff + +/* msg_id项标志位状态*/ +#define REPS_INFO_FLAG_USABLE 0 +#define REPS_INFO_FLAG_USED 1 + +#define BAR_MSG_PAYLOAD_MAX_LEN \ + (BAR_MSG_ADDR_CHAN_INTERVAL - sizeof(struct bar_msg_header)) + +#define BAR_MSG_POL_MASK (0x10) +#define BAR_MSG_POL_OFFSET (4) + +enum { + CHECK_STATE_OK = 0, + CHECK_STATE_EVENT_EXCEED = 1, + CHECK_STATE_EVENT_NOT_EXIST = 2, + CHECK_STATE_EVENT_ERR_RET = 4, + CHECK_STATE_EVENT_ERR_REPS_LEN = 5, +}; + +struct zxdh_pcie_bar_msg_internal { + uint32_t id; /**< the msg id that passing through */ + uint64_t virt_addr; /**< pcie bar mapping virtual addr */ +}; + +/* bar通道消息头 */ +struct bar_msg_header { + uint8_t valid : 1; /* 消息通道状态 */ + uint8_t sync : 1; /* 同步消息or异步消息*/ + uint8_t emec : 1; /* 消息是否紧急 */ + uint8_t ack : 1; /* 是否是回复消息*/ + uint8_t poll : 1; + uint8_t usr : 1; + uint8_t check; + uint16_t event_id; /* 请求的消息处理函数标识 */ + uint16_t len; /* 消息长度 */ + uint16_t msg_id; /* 消息id*/ + uint16_t src_pcieid; + uint16_t dst_pcieid; /* 用于pf给vf发消息*/ +}; + +/* 根据消息的msgid查询回复缓存的地址和长度*/ +struct msgid_reps_info { + void *reps_buffer; /* reps的地址*/ + uint16_t id; /* msg_id*/ + uint16_t buffer_len; /* buffer的最大长度*/ + uint16_t flag; /* 该条目是否被分配,已经非配和未被分配*/ + struct timer_list id_timer; /* 该id对应的定时器*/ +}; + +struct msix_msg { + uint16_t pcie_id; + uint16_t vector_risc; + uint16_t vector_pfvf; + uint16_t vector_mpf; +} __attribute__((packed)); + +struct offset_get_msg { + uint16_t pcie_id; + uint16_t type; +}; + +struct bar_offset_reps { + uint16_t check; + uint16_t rsv; + uint32_t offset; + uint32_t length; +} __attribute__((packed)); + +struct bar_recv_msg { + uint8_t replied; + uint16_t reps_len; + uint8_t rsv1; + union { + struct bar_offset_reps offset_reps; + uint8_t data[BAR_MSG_PAYLOAD_MAX_LEN - 4]; + }; +} __attribute__((packed)); + +struct msgid_ring { + uint16_t msg_id; + spinlock_t lock; + struct msgid_reps_info reps_info_tbl[MAX_MSG_BUFF_NUM]; +}; + +/* 异步消息相关实体*/ +struct async_msg_entity { + struct task_struct *async_proc; /* 异步队列消息线程*/ + struct mutex async_qlock; /* 易怒队列入队锁*/ + struct bar_async_node *noemq_head; /* 非紧急队列头*/ + struct bar_async_node *noemq_tail; /* 非紧急队列尾部*/ + struct bar_async_node *emq_head; /* 紧急队列头*/ + struct bar_async_node *emq_tail; /* 紧急队列尾部*/ +}; + +/* 异步消息队列节点*/ +struct bar_async_node { + uint32_t msg_id; + void *payload_addr; /**< 消息净荷起始地址,有用户创建并填充 */ + uint64_t payload_len; /**< 消息净荷长度. */ + uint64_t subchan_addr; /**< 消息发送到哪个2K, 由virt_addr, src, dst共同决定,计算交给common来做>**/ + uint32_t event_id; /**< 消息发送模块,描述消息哪个模块发送 */ + uint16_t src_pcieid; + uint16_t dst_pcieid; /**< 消息目的的bdf号,适用于PF与VF公用4K的时候用>**/ + uint16_t emec; /**< 消息紧急类型,异步消息可以分为紧急消息和非紧急消息>**/ + uint16_t ack; + uint8_t src; + uint8_t dst; + struct bar_async_node *next; +}; + +struct vqm_qid_reset_msg { + uint32_t qid; +} __attribute__((packed)); +typedef struct { + uint16_t vqm_vfid; /* 设备号 */ + uint16_t opcode; /* get:0, set:1 */ +#define VQM_QUEUE_RSET (14) + uint16_t cmd; /* 控制命令类 mac-1, 多队列-4, feature-5 */ + union { + uint8_t value[8]; /* 如果是set, 附带数据, 目前已知的有mac地址 */ + struct vqm_qid_reset_msg q_reset_msg; + } __attribute__((packed)); +} __attribute__((packed)) OVS_TO_VQM_MSG; + +typedef struct { + uint32_t reps_hdr; +#define VQM_REPS_SUCCESS (0xaa) + uint32_t check_result; + union { + uint8_t rsv[40]; + } __attribute__((packed)); +} __attribute__((packed)) VQM_RSP_OVS_DATA; + +#define VCQ_NOTIFY_EVENT_ID (36) + +uint8_t bar_msg_col_index_trans(uint8_t dst); +uint8_t bar_msg_row_index_trans(uint8_t src); + +#ifdef __cplusplus +} +#endif + +#endif /* _ZXDH_MSG_CHAN_PRIV_H_ */ diff --git a/drivers/net/ethernet/dinghai/cmd/msg_chan_test.c b/drivers/net/ethernet/dinghai/cmd/msg_chan_test.c new file mode 100644 index 000000000000..3c6382a6613e --- /dev/null +++ b/drivers/net/ethernet/dinghai/cmd/msg_chan_test.c @@ -0,0 +1,151 @@ +#include +#include +#include +#include +#include +#include +#include +#include "cmd/msg_chan_priv.h" + +#ifdef BAR_MSG_TEST + +/*计算方法: risc时间戳 - host时间戳*/ +#define HOST_RISC_DIFF (307762) + +uint64_t print_time(char *str) +{ + ktime_t kt = ktime_get(); + s64 us_since_boot = ktime_to_us(kt); + LOG_INFO("%s timestamp: %llu us\n", str, us_since_boot); + return us_since_boot; +} +struct msg_time_statis_reps { + uint16_t sum_check; + + uint64_t risc_recv_msg_t; + uint64_t risc_push_msg_t; + uint64_t risc_pop_msg_t; + uint64_t risc_msg_proc_t; + uint64_t risc_notice_peer_t; +} __attribute__((packed)); + +struct msg_time_host_risc { + uint64_t host_send_msg_t; + uint64_t host_recv_msg_t; + struct msg_time_statis_reps risc_time; +} __attribute__((packed)); + +struct msg_time_host_risc global_time_stat = { 0 }; + +void print_risc_time_stamp(struct msg_time_host_risc *stat) +{ +#if 0 + LOF_INFO("host_send_msg_t: %llu us.\n", stat->host_send_msg_t); + LOF_INFO("risc_recv_msg_t: %llu us.\n", stat->risc_time.risc_recv_msg_t); + LOF_INFO("risc_push_msg_t: %llu us.\n", stat->risc_time.risc_push_msg_t); + LOF_INFO("risc_pop_msg_t: %llu us.\n", stat->risc_time.risc_pop_msg_t); + LOF_INFO("risc_msg_proc_t: %llu us.\n", stat->risc_time.risc_msg_proc_t); + LOF_INFO("risc_notice_peer_t: %llu us.\n", stat->risc_time.risc_notice_peer_t); + LOF_INFO("host_recv_msg_t: %llu us.\n", stat->host_recv_msg_t); +#endif + LOG_INFO("risc recv->msg push: %llu us.\n", + stat->risc_time.risc_push_msg_t - + stat->risc_time.risc_recv_msg_t); + LOG_INFO("risc push->risc pop: %llu us.\n", + stat->risc_time.risc_pop_msg_t - + stat->risc_time.risc_push_msg_t); + LOG_INFO("risc pop->before proc : %llu us.\n", + stat->risc_time.risc_msg_proc_t - + stat->risc_time.risc_pop_msg_t); + LOG_INFO("after proc->risc set valid: %llu us.\n", + stat->risc_time.risc_notice_peer_t - + stat->risc_time.risc_msg_proc_t); +} + +uint16_t sum_func(void *data, uint16_t len) +{ + uint64_t result = 0; + int idx = 0; + uint16_t ret = 0; + + if (data == NULL) { + return 0; + } + + for (idx = 0; idx < len; idx++) { + result += *((uint8_t *)data + idx); + } + + ret = (uint16_t)result; + return ret; +} + +uint16_t test_sync_send(void) +{ + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + uint16_t payload_len = 0; + uint64_t bar_base_addr = 0; + void *payload_addr = NULL; + uint8_t recv_buffer[200] = { 0 }; + uint16_t reps = 0; + uint16_t ret = 0; + + payload_len = 100; + payload_addr = kmalloc(payload_len, GFP_KERNEL); + if (!payload_addr) { + LOG_ERR("malloca failed"); + return 0xaa; + } + get_random_bytes(payload_addr, payload_len); + LOG_INFO("sync send msg len: %x", payload_len); + + in.src_pcieid = 0x900; + in.virt_addr = 0; + in.payload_addr = payload_addr; + in.payload_len = payload_len; + in.src = MSG_CHAN_END_MPF; + in.dst = MSG_CHAN_END_RISC; + in.event_id = 50; + + /* 构造result接收参数*/ + result.recv_buffer = recv_buffer; + result.buffer_len = sizeof(recv_buffer); + + /* 调用发送接口*/ + LOG_INFO("start to sync send test."); + global_time_stat.host_send_msg_t = + print_time("before send.") + HOST_RISC_DIFF; + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + global_time_stat.host_recv_msg_t = + print_time("after send.") + HOST_RISC_DIFF; + + if (ret != BAR_MSG_OK) { + LOG_ERR("sync send failed"); + ret = 0xAA; + goto out; + } + + struct msg_time_statis_reps *reps_ptr = + (struct msg_time_statis_reps *)((uint8_t *)result.recv_buffer + + 4); + if (reps_ptr->sum_check == sum_func(payload_addr, payload_len)) { + memcpy(&global_time_stat.risc_time, (void *)reps_ptr, + sizeof(struct msg_time_statis_reps)); + print_risc_time_stamp(&global_time_stat); + ret = 0; + LOG_ERR("reps validate success: %d", reps); + goto out; + } else { + LOG_ERR("reps valide failed: %d", reps); + ret = 0xAA; + } + +out: + if (!payload_addr) { + kfree(payload_addr); + payload_addr = NULL; + } + return ret; +} +#endif diff --git a/drivers/net/ethernet/dinghai/cmd/msg_chan_ver.h b/drivers/net/ethernet/dinghai/cmd/msg_chan_ver.h new file mode 100644 index 000000000000..f30e899f395d --- /dev/null +++ b/drivers/net/ethernet/dinghai/cmd/msg_chan_ver.h @@ -0,0 +1,16 @@ +#ifndef _ZXDH_MSG_CHAN_VERSION_H_ +#define _ZXDH_MSG_CHAN_VERSION_H_ + +#ifdef DRIVER_VERSION_VAL +#define DRV_VERSION DRIVER_VERSION_VAL +#else +#define DRV_VERSION "1.0-1" +#endif + +#define DRV_RELDATE "December 1, 2022" +#define DRV_NAME "msg_chan" +#define DRV_DESCRIPTION "DPU MSG Channel Driver" + +#define hbond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n" + +#endif /* _ZXDH_MSG_CHAN_VERSION_H_ */ \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/cmd/msg_main.c b/drivers/net/ethernet/dinghai/cmd/msg_main.c new file mode 100644 index 000000000000..7c1ea5fecedd --- /dev/null +++ b/drivers/net/ethernet/dinghai/cmd/msg_main.c @@ -0,0 +1,31 @@ +#include +#include +#include + +#include "msg_chan_ver.h" +#include "msg_chan_priv.h" + +static int __init msg_chan_init(void) +{ + BAR_LOG_INFO("enter\n"); + zxdh_bar_msg_chan_init(); + +#ifdef TEST + BAR_TestApp(); +#endif + + return 0; +} + +static void __exit msg_chan_exit(void) +{ + zxdh_bar_msg_chan_remove(); + BAR_LOG_INFO("exit\n"); +} + +module_init(msg_chan_init); +module_exit(msg_chan_exit); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION); +MODULE_AUTHOR("ZTE Corporation"); diff --git a/drivers/net/ethernet/dinghai/devlink.c b/drivers/net/ethernet/dinghai/devlink.c new file mode 100644 index 000000000000..eac03a5a508d --- /dev/null +++ b/drivers/net/ethernet/dinghai/devlink.c @@ -0,0 +1,40 @@ +#include +#include + +int32_t zxdh_devlink_register(struct devlink *devlink) +{ + struct dh_core_dev *dh_dev = devlink_priv(devlink); + int32_t err = 0; + + devlink_register(devlink); + + err = dh_dev->devlink_ops->params_register(devlink); + if (err != 0) { + LOG_ERR("params_register failed: %d\n", err); + return err; + } + + return err; +} + +struct devlink *zxdh_devlink_alloc(struct device *dev, + struct devlink_ops *dh_devlink_ops, + size_t priv_size) +{ + return devlink_alloc(dh_devlink_ops, + sizeof(struct dh_core_dev) + priv_size, dev); +} + +void zxdh_devlink_free(struct devlink *devlink) +{ + devlink_free(devlink); +} + +void zxdh_devlink_unregister(struct devlink *devlink) +{ + struct dh_core_dev *dev = devlink_priv(devlink); + + dev->devlink_ops->params_unregister(devlink); + + devlink_unregister(devlink); +} diff --git a/drivers/net/ethernet/dinghai/dh_cmd.c b/drivers/net/ethernet/dinghai/dh_cmd.c new file mode 100644 index 000000000000..5bd24ecd9694 --- /dev/null +++ b/drivers/net/ethernet/dinghai/dh_cmd.c @@ -0,0 +1,1358 @@ +#include +#include +#include +#include +#include +#include +#include "cmd/msg_chan_priv.h" +#include "cmd/msg_chan_lock.h" +#include "en_aux/en_aux_cmd.h" +#include "msg_common.h" + +/***************************************** +[src/dst]时应该将消息发到低2k(0)还是高2K(1) +src/dst: TO_RISC, TO_PFVF, TO_MPF +MPF: 0 0 0 +PF: 0 0 1 +VF: 0 1 1 +******************************************/ +uint8_t subchan_id_tbl[BAR_MSG_SRC_NUM][BAR_MSG_DST_NUM] = { + { BAR_SUBCHAN_INDEX_SEND, BAR_SUBCHAN_INDEX_SEND, + BAR_SUBCHAN_INDEX_SEND }, + { BAR_SUBCHAN_INDEX_SEND, BAR_SUBCHAN_INDEX_SEND, + BAR_SUBCHAN_INDEX_RECV }, + { BAR_SUBCHAN_INDEX_SEND, BAR_SUBCHAN_INDEX_RECV, + BAR_SUBCHAN_INDEX_RECV } +}; + +uint8_t chan_id_tbl[BAR_MSG_SRC_NUM][BAR_MSG_DST_NUM] = { + { BAR_INDEX_TO_RISC, BAR_INDEX_MPF_TO_PFVF, BAR_INDEX_MPF_TO_MPF }, + { BAR_INDEX_TO_RISC, BAR_INDEX_PF_TO_VF, BAR_INDEX_PFVF_TO_MPF }, + { BAR_INDEX_TO_RISC, BAR_INDEX_PF_TO_VF, BAR_INDEX_PFVF_TO_MPF } +}; + +void *internal_addr; + +bool is_mpf_scaned = FALSE; + +static struct msgid_ring g_msgid_ring; + +/* 消息处理函数表*/ +zxdh_bar_chan_msg_recv_callback msg_recv_func_tbl[MSG_MODULE_NUM]; + +void bar_chan_check_chan_stats(int ret, uint64_t addr) +{ + struct bar_msg_header *hdr = (struct bar_msg_header *)(uintptr_t)addr; + + if (ret == 0) { + return; + } + /* check bar msg_header*/ + BAR_LOG_ERR( + "bar msg err, ret: %d, valid: %u, msg_id: %u, event_id: %u, " + "ack: %u, src_pcieid: 0x%x, dst_pcieid: 0x%x, chan_addr: 0x%llx.\n", + ret, hdr->valid, hdr->msg_id, hdr->event_id, hdr->ack, + hdr->src_pcieid, hdr->dst_pcieid, addr); +} + +uint16_t bar_msg_src_parse(struct zxdh_pci_bar_msg *in) +{ + if (in == NULL) { + return BAR_MSG_ERR_NULL; + } + + if (in->src == MSG_CHAN_END_MPF) { + if (!is_mpf_scaned) { + return BAR_MSG_ERR_MPF_NOT_SCANED; + } + in->virt_addr = + (uint64_t)(uintptr_t)internal_addr + BAR_MSG_OFFSET; + in->src_pcieid = PF0_PCIEID; + } + return BAR_MSG_OK; +} + +void bar_chan_sync_fill_header(uint32_t msg_id, struct zxdh_pci_bar_msg *in, + struct bar_msg_header *msg_header) +{ + memset(msg_header, 0, sizeof(*msg_header)); + msg_header->sync = BAR_CHAN_MSG_SYNC; + msg_header->event_id = in->event_id; + msg_header->len = in->payload_len; + msg_header->msg_id = msg_id; + msg_header->dst_pcieid = in->dst_pcieid; + msg_header->src_pcieid = in->src_pcieid; +} + +int bar_chan_msgid_allocate(uint16_t *msgid) +{ + int ret = BAR_MSG_OK; + uint16_t msg_id = 0; + struct msgid_reps_info *msgid_reps_info = NULL; + uint16_t count = 0; + + spin_lock(&g_msgid_ring.lock); + msg_id = g_msgid_ring.msg_id; + do { + count++; + ++msg_id; + msg_id %= MAX_MSG_BUFF_NUM; + msgid_reps_info = &g_msgid_ring.reps_info_tbl[msg_id]; + + } while (msgid_reps_info->flag != REPS_INFO_FLAG_USABLE && + (count < MAX_MSG_BUFF_NUM)); + + if (count >= MAX_MSG_BUFF_NUM) { + ret = -1; + goto out; + } + + msgid_reps_info->flag = REPS_INFO_FLAG_USED; + g_msgid_ring.msg_id = msg_id; + *msgid = msg_id; + +out: + spin_unlock(&g_msgid_ring.lock); + return ret; +} + +uint16_t bar_chan_save_recv_info(struct zxdh_msg_recviver_mem *result, + uint16_t *msg_id) +{ + int ret = 0; + struct msgid_reps_info *reps_info = NULL; + + ret = bar_chan_msgid_allocate(msg_id); + if (ret == -1) { + return BAR_MSG_ERR_MSGID; + } + reps_info = &g_msgid_ring.reps_info_tbl[*msg_id]; + reps_info->reps_buffer = result->recv_buffer; + reps_info->buffer_len = result->buffer_len; + + return BAR_MSG_OK; +} + +void bar_chan_msgid_free(uint16_t msg_id) +{ + struct msgid_reps_info *msgid_reps_info = NULL; + if (msg_id >= MAX_MSG_BUFF_NUM) { + return; + } + msgid_reps_info = &g_msgid_ring.reps_info_tbl[msg_id]; + spin_lock(&g_msgid_ring.lock); + msgid_reps_info->flag = REPS_INFO_FLAG_USABLE; + spin_unlock(&g_msgid_ring.lock); + return; +} + +uint8_t bar_msg_row_index_trans(uint8_t src) +{ + uint8_t src_index = 0; + + switch (src) { + case MSG_CHAN_END_MPF: { + src_index = BAR_MSG_SRC_MPF; + break; + } + case MSG_CHAN_END_PF: { + src_index = BAR_MSG_SRC_PF; + break; + } + case MSG_CHAN_END_VF: { + src_index = BAR_MSG_SRC_VF; + break; + } + default: { + src_index = BAR_MSG_SRC_ERR; + break; + } + } + return src_index; +} + +uint8_t bar_msg_col_index_trans(uint8_t dst) +{ + uint8_t dst_index = 0; + + switch (dst) { + case MSG_CHAN_END_MPF: { + dst_index = BAR_MSG_DST_MPF; + break; + } + case MSG_CHAN_END_PF: { + dst_index = BAR_MSG_DST_PFVF; + break; + } + case MSG_CHAN_END_VF: { + dst_index = BAR_MSG_DST_PFVF; + break; + } + case MSG_CHAN_END_RISC: { + dst_index = BAR_MSG_DST_RISC; + break; + } + default: { + dst_index = BAR_MSG_SRC_ERR; + break; + } + } + return dst_index; +} + +int bar_chan_send_para_check(struct zxdh_pci_bar_msg *in, + struct zxdh_msg_recviver_mem *result) +{ + uint8_t src_index = 0; + uint8_t dst_index = 0; + + if (in == NULL || result == NULL) { + BAR_LOG_ERR("send para ERR: null para.\n"); + return BAR_MSG_ERR_NULL_PARA; + } + + src_index = bar_msg_row_index_trans((uint8_t)in->src); + dst_index = bar_msg_col_index_trans((uint8_t)in->dst); + if (src_index == BAR_MSG_SRC_ERR || dst_index == BAR_MSG_DST_ERR) { + BAR_LOG_ERR("send para ERR: chan doesn't exist.\n"); + return BAR_MSG_ERR_TYPE; + } + if (in->event_id > MSG_MODULE_NUM) { + BAR_LOG_ERR("send para ERR: invalid event_id: %d.\n", + in->event_id); + return BAR_MSG_ERR_MODULE; + } + if (in->payload_addr == NULL) { + BAR_LOG_ERR("send para ERR: null message.\n"); + return BAR_MSG_ERR_BODY_NULL; + } + if (in->payload_len > BAR_MSG_PAYLOAD_MAX_LEN) { + BAR_LOG_ERR("send para ERR: len %x is too long.\n", + in->payload_len); + return BAR_MSG_ERR_LEN; + } + if (in->virt_addr == 0 || result->recv_buffer == NULL) { + BAR_LOG_ERR( + "send para ERR: virt_addr or recv_buffer is NULL.\n"); + return BAR_MSG_ERR_VIRTADDR_NULL; + } + if (result->buffer_len < REPS_HEADER_PAYLOAD_OFFSET) { + BAR_LOG_ERR( + "recv buffer's len: %d is short than mininal 4 bytes\n", + result->buffer_len); + } + return BAR_MSG_OK; +} + +/* 根据用户提供的src和dst和当前的场景来推算2K的偏移*/ +void bar_chan_subchan_addr_get(struct zxdh_pci_bar_msg *in, + uint64_t *subchan_addr) +{ + uint8_t src_index, dst_index; + uint16_t chan_id, subchan_id; + + src_index = bar_msg_row_index_trans((uint8_t)in->src); + dst_index = bar_msg_col_index_trans((uint8_t)in->dst); + + if (src_index == BAR_MSG_SRC_ERR || dst_index == BAR_MSG_DST_ERR) { + return; + } + + chan_id = chan_id_tbl[src_index][dst_index]; + subchan_id = subchan_id_tbl[src_index][dst_index]; + *subchan_addr = in->virt_addr + + (2 * chan_id + subchan_id) * BAR_MSG_ADDR_CHAN_INTERVAL; + return; +} + +int bar_chan_reg_write(uint64_t subchan_addr, uint32_t offset, uint32_t data) +{ + uint32_t algin_offset = (offset & BAR_ALIGN_WORD_MASK); + + if (algin_offset >= BAR_MSG_ADDR_CHAN_INTERVAL) { + return -EADDRNOTAVAIL; + } + + writel(data, (volatile void *)(uintptr_t)(subchan_addr + algin_offset)); + return 0; +} + +int bar_chan_reg_read(uint64_t subchan_addr, uint32_t offset, uint32_t *pdata) +{ + uint32_t algin_offset = (offset & BAR_ALIGN_WORD_MASK); + + if (algin_offset >= BAR_MSG_ADDR_CHAN_INTERVAL) { + return -EADDRNOTAVAIL; + } + + *pdata = readl((const volatile void *)(uintptr_t)(subchan_addr + + algin_offset)); + return 0; +} + +uint16_t bar_chan_msg_header_set(uint64_t subchan_addr, + struct bar_msg_header *msg_header) +{ + uint32_t *data = (uint32_t *)msg_header; + uint16_t idx = 0; + + for (idx = 0; idx < (BAR_MSG_PLAYLOAD_OFFSET >> 2); idx++) { + bar_chan_reg_write(subchan_addr, idx * 4, *(data + idx)); + } + + return BAR_MSG_OK; +} + +uint16_t bar_chan_msg_header_get(uint64_t subchan_addr, + struct bar_msg_header *msg_header) +{ + uint32_t *data = (uint32_t *)msg_header; + uint16_t idx = 0; + + for (idx = 0; idx < (BAR_MSG_PLAYLOAD_OFFSET >> 2); idx++) { + bar_chan_reg_read(subchan_addr, idx * 4, data + idx); + } + + return BAR_MSG_OK; +} + +uint16_t bar_chan_msg_payload_set(uint64_t subchan_addr, uint8_t *msg, + uint16_t len) +{ + uint32_t *data = (uint32_t *)msg; + uint32_t count = (len / sizeof(uint32_t)); + uint32_t remain = (len % sizeof(uint32_t)); + uint32_t ix = 0, remain_data = 0; + + for (ix = 0; ix < count; ix++) { + bar_chan_reg_write(subchan_addr, + 4 * ix + BAR_MSG_PLAYLOAD_OFFSET, + *(data + ix)); + } + for (ix = 0; ix < remain; ix++) { + remain_data |= *((uint8_t *)(msg + (len - remain + ix))) + << (8 * ix); + } + bar_chan_reg_write(subchan_addr, 4 * count + BAR_MSG_PLAYLOAD_OFFSET, + remain_data); + + return BAR_MSG_OK; +} + +uint16_t bar_chan_msg_payload_get(uint64_t subchan_addr, uint8_t *msg, + uint16_t len) +{ + uint32_t *data = (uint32_t *)msg; + uint32_t count = (len / sizeof(uint32_t)); + uint32_t remain = (len % sizeof(uint32_t)); + uint32_t ix = 0, remain_data = 0; + + for (ix = 0; ix < count; ix++) { + bar_chan_reg_read(subchan_addr, + 4 * ix + BAR_MSG_PLAYLOAD_OFFSET, + (data + ix)); + } + bar_chan_reg_read(subchan_addr, 4 * count + BAR_MSG_PLAYLOAD_OFFSET, + &remain_data); + for (ix = 0; ix < remain; ix++) { + *((uint8_t *)(msg + (len - remain + ix))) = remain_data >> + (8 * ix); + } + return BAR_MSG_OK; +} + +uint16_t bar_chan_msg_valid_set(uint64_t subchan_addr, uint8_t valid_label) +{ + uint32_t data = 0; + + bar_chan_reg_read(subchan_addr, BAR_MSG_VALID_OFFSET, &data); + data &= (~BAR_MSG_VALID_MASK); + data |= (uint32_t)valid_label; + bar_chan_reg_write(subchan_addr, BAR_MSG_VALID_OFFSET, data); + + return BAR_MSG_OK; +} + +uint16_t bar_msg_valid_stat_get(uint64_t subchan_addr) +{ + uint32_t data = 0; + + bar_chan_reg_read(subchan_addr, BAR_MSG_VALID_OFFSET, &data); + if (BAR_MSG_CHAN_USABLE == (data & BAR_MSG_VALID_MASK)) { + return BAR_MSG_CHAN_USABLE; + } + + return BAR_MSG_CHAN_USED; +} + +uint16_t bar_chan_msg_poltag_set(uint64_t subchan_addr, uint8_t label) +{ + uint32_t data = 0; + + bar_chan_reg_read(subchan_addr, BAR_MSG_VALID_OFFSET, &data); + data &= (~(uint32_t)BAR_MSG_POL_MASK); + data |= ((uint32_t)label << BAR_MSG_POL_OFFSET); + bar_chan_reg_write(subchan_addr, BAR_MSG_VALID_OFFSET, data); + + return BAR_MSG_OK; +} + +static uint8_t payload_temp_buf[BAR_MSG_ADDR_CHAN_INTERVAL] = { 0 }; +uint16_t bar_chan_msg_send(uint64_t subchan_addr, void *payload_addr, + uint16_t payload_len, + struct bar_msg_header *msg_header) +{ + uint8_t *msg = (uint8_t *)(payload_addr); + struct bar_msg_header hdr_read = { 0 }; + uint16_t valid = 0; + + bar_chan_msg_header_set(subchan_addr, msg_header); + bar_chan_msg_header_get(subchan_addr, &hdr_read); + + bar_chan_msg_payload_set(subchan_addr, msg, payload_len); + bar_chan_msg_payload_get(subchan_addr, payload_temp_buf, payload_len); + + bar_chan_msg_valid_set(subchan_addr, BAR_MSG_CHAN_USED); + valid = bar_msg_valid_stat_get(subchan_addr); + + return BAR_MSG_OK; +} + +int bar_chan_recv_func_check(uint16_t check) +{ + if (CHECK_STATE_OK == check) { + return BAR_MSG_OK; + } else { + BAR_LOG_ERR("recv func check failed, check field: 0x%x", check); + return BAR_MSG_ERR_USR_RET_ERR; + } +} + +int bar_chan_sync_msg_reps_get(uint64_t subchan_addr, uint64_t recv_buffer, + uint16_t buffer_len, uint16_t send_msg_id, + uint16_t op_code) +{ + int ret = BAR_MSG_OK; + uint16_t recv_msg_id = 0; + uint16_t recv_len = 0; + uint8_t *recv_msg = (uint8_t *)(uintptr_t)recv_buffer; + struct bar_msg_header msg_header; + struct msgid_reps_info *reps_info = NULL; + + /*从消息头中取出消息回复的长度,取出msg_id,如果msg_id对应的usable的话,该条同步回复作废*/ + memset(&msg_header, 0, sizeof(msg_header)); + bar_chan_msg_header_get(subchan_addr, &msg_header); + recv_len = msg_header.len; + recv_msg_id = msg_header.msg_id; + + if (recv_msg_id != send_msg_id) { + BAR_LOG_ERR("send msg id: %d, but get reply msg id: %d.\n", + send_msg_id, recv_msg_id); + ret = BAR_MSG_ERR_REPLY; + goto out; + } + + reps_info = &g_msgid_ring.reps_info_tbl[recv_msg_id]; + if (reps_info->flag != REPS_INFO_FLAG_USED) { + BAR_LOG_ERR("msg_id: %d is release", recv_msg_id); + ret = BAR_MSG_ERR_REPLY; + goto out; + } + + if ((op_code < ZXDH_GET_SW_STATS) && + (recv_len > + ZXDH_REPS_MAX_SIZE_BEFORE57)) { //op_code=57之前的PF VF兼容性场景考虑 + recv_len = ZXDH_REPS_MAX_SIZE_BEFORE57; + } + if (recv_len > buffer_len - REPS_HEADER_PAYLOAD_OFFSET) { + BAR_LOG_ERR("reps_buf_len is %d, but reps_msg_len is %d", + buffer_len, recv_len + 4); + ret = BAR_MSG_ERR_REPSBUFF_LEN; + goto out; + } + + /* 从reps_buff + 4的位置拷贝进回复数据*/ + bar_chan_msg_payload_get( + subchan_addr, recv_msg + REPS_HEADER_PAYLOAD_OFFSET, recv_len); + + ret = bar_chan_recv_func_check(msg_header.check); + if (ret != BAR_MSG_OK) { + goto out; + } + + /* 拷贝数据长度*/ + *(uint16_t *)(recv_msg + REPS_HEADER_LEN_OFFSET) = recv_len; + /* reps头valid置位*/ + *recv_msg = REPS_HEADER_REPLYED; + +out: + return ret; +} + +uint64_t subchan_addr_cal(uint64_t virt_addr, uint8_t chan_id, + uint8_t subchan_id) +{ + return virt_addr + + (2 * chan_id + subchan_id) * BAR_MSG_ADDR_CHAN_INTERVAL; +} + +uint64_t recv_addr_get(uint8_t src_type, uint8_t dst_type, uint64_t virt_addr) +{ + uint8_t chan_id = 0; + uint8_t subchan_id = 0; + uint8_t src = bar_msg_col_index_trans(src_type); + uint8_t dst = bar_msg_row_index_trans(dst_type); + + if (src >= BAR_MSG_SRC_NUM || dst >= BAR_MSG_DST_NUM) { + return 0; + } + /* 接收通道id和发送通道id相同*/ + chan_id = chan_id_tbl[dst][src]; + /* 接收子通道id和发送子通道相反*/ + subchan_id = (!!subchan_id_tbl[dst][src]) ? BAR_SUBCHAN_INDEX_SEND : + BAR_SUBCHAN_INDEX_RECV; + return subchan_addr_cal(virt_addr, chan_id, subchan_id); +} + +uint64_t reply_addr_get(uint8_t sync, uint8_t src_type, uint8_t dst_type, + uint64_t virt_addr) +{ + uint8_t chan_id = 0; + uint8_t subchan_id = 0; + uint64_t recv_rep_addr = 0; + uint8_t src = bar_msg_col_index_trans(src_type); + uint8_t dst = bar_msg_row_index_trans(dst_type); + + if (src == BAR_MSG_SRC_ERR || dst == BAR_MSG_DST_ERR) { + return 0; + } + + chan_id = chan_id_tbl[dst][src]; + subchan_id = (!!subchan_id_tbl[dst][src]) ? BAR_SUBCHAN_INDEX_SEND : + BAR_SUBCHAN_INDEX_RECV; + if (sync == BAR_CHAN_MSG_SYNC) { //同步消息 + recv_rep_addr = + subchan_addr_cal(virt_addr, chan_id, subchan_id); + } else { + recv_rep_addr = + subchan_addr_cal(virt_addr, chan_id, 1 - subchan_id); + } + return recv_rep_addr; +} + +uint16_t bar_chan_msg_header_check(struct bar_msg_header *msg_header) +{ + uint8_t event_id = 0; + uint16_t len = 0; + + if (msg_header == NULL) { + return BAR_MSG_ERR_NULL; + } + if (msg_header->valid != BAR_MSG_CHAN_USED) { + BAR_LOG_ERR("recv header ERR: valid label is not used.\n"); + return BAR_MSG_ERR_MODULE; + } + event_id = msg_header->event_id; + if (event_id >= (uint8_t)MSG_MODULE_NUM) { + BAR_LOG_ERR("recv header ERR: invalid event_id: %d.\n", + event_id); + return BAR_MSG_ERR_MODULE; + } + len = msg_header->len; + if (len > BAR_MSG_PAYLOAD_MAX_LEN) { + BAR_LOG_ERR("recv header ERR: invalid mesg len: %d.\n", len); + return BAR_MSG_ERR_LEN; + } + if (msg_header->ack == BAR_CHAN_MSG_NO_ACK && + msg_recv_func_tbl[msg_header->event_id] == NULL) { + BAR_LOG_DEBUG("recv header ERR: module:%d doesn't register", + event_id); + return BAR_MSG_ERR_MODULE_NOEXIST; + } + return BAR_MSG_OK; +} + +/* 同步消息接收处理*/ +void bar_msg_sync_msg_proc(uint64_t reply_addr, + struct bar_msg_header *msg_header, + uint8_t *reciver_buff, void *dev) +{ + uint16_t reps_len = 0; + uint8_t *reps_buffer = NULL; + zxdh_bar_chan_msg_recv_callback recv_func = NULL; + + reps_buffer = kmalloc(BAR_MSG_PAYLOAD_MAX_LEN, GFP_KERNEL); + if (reps_buffer == NULL) { + return; + } + /* 查询消息处理函数,处理消息,消息处理的结果放到reps_buffer中, 长度放到reps_len中*/ + recv_func = msg_recv_func_tbl[msg_header->event_id]; + recv_func(reciver_buff, msg_header->len, reps_buffer, &reps_len, dev); + msg_header->ack = BAR_CHAN_MSG_ACK; + msg_header->len = reps_len; + /* 计算回复消息2K的地址*/ + bar_chan_msg_header_set(reply_addr, msg_header); + bar_chan_msg_payload_set(reply_addr, reps_buffer, reps_len); + bar_chan_msg_valid_set(reply_addr, BAR_MSG_CHAN_USABLE); + + BAR_KFREE_PTR(reps_buffer); + return; +} + +zxdh_usr_msg_cache_callback msg_cache_func = NULL; +spinlock_t cache_func_lock; +void zxdh_usr_msg_cache_func_register(zxdh_usr_msg_cache_callback func) +{ + spin_lock(&cache_func_lock); + BAR_LOG_INFO("register push func success.\n"); + msg_cache_func = func; + spin_unlock(&cache_func_lock); +} +EXPORT_SYMBOL(zxdh_usr_msg_cache_func_register); + +void bar_cache_msg_to_usr_queue(uint16_t event_id, void *msg, uint16_t msg_len) +{ + spin_lock(&cache_func_lock); + if (msg_cache_func) { + msg_cache_func(event_id, msg, msg_len); + } + spin_unlock(&cache_func_lock); +} + +/* 统一的中断处理函数*/ +int zxdh_bar_irq_recv(uint8_t src, uint8_t dst, uint64_t virt_addr, void *dev) +{ + uint64_t recv_addr = 0; + uint64_t reps_addr = 0; + struct bar_msg_header msg_header = { 0 }; + uint8_t *recved_msg = NULL; + uint16_t ret = 0; + + /*1 接收消息地址*/ + recv_addr = recv_addr_get(src, dst, virt_addr); + //BAR_LOG_DEBUG("recv_addr: 0x%llx, \nvirt_addr: 0x%llx", recv_addr, virt_addr); + if (recv_addr == 0) { + BAR_LOG_ERR("invalid driver type"); + return BAR_MSG_ERR_NULL; + } + /*2 取消息头并检查是否合法*/ + bar_chan_msg_header_get(recv_addr, &msg_header); + ret = bar_chan_msg_header_check(&msg_header); + if (ret != BAR_MSG_OK) { + bar_chan_check_chan_stats(ret, recv_addr); + return ret; + } + /*3 创建消息payload buf,取出消息暂存*/ + recved_msg = kmalloc(msg_header.len, GFP_KERNEL); + if (recved_msg == NULL) { + BAR_LOG_ERR("create temp buff failed"); + return BAR_MSG_ERR_NULL; + } + bar_chan_msg_payload_get(recv_addr, recved_msg, msg_header.len); + + if (msg_header.usr == 0) { + /* risc send msg to kernel*/ + reps_addr = + reply_addr_get(msg_header.sync, src, dst, virt_addr); + bar_msg_sync_msg_proc(reps_addr, &msg_header, recved_msg, dev); + goto out; + } else { + /* risc send msg to user*/ + bar_cache_msg_to_usr_queue(msg_header.event_id, recved_msg, + msg_header.len); + msg_header.len = 0; + msg_header.ack = 1; + bar_chan_msg_header_set(recv_addr, &msg_header); + } + + bar_chan_msg_poltag_set(recv_addr, 0); + bar_chan_msg_valid_set(recv_addr, BAR_MSG_CHAN_USABLE); + +out: + kfree(recved_msg); + return BAR_MSG_OK; +} +EXPORT_SYMBOL(zxdh_bar_irq_recv); + +int32_t call_msg_recv_func_tbl(uint16_t event_id, void *pay_load, uint16_t len, + void *reps_buffer, uint16_t *reps_len, void *dev) +{ + zxdh_bar_chan_msg_recv_callback recv_func = NULL; + + recv_func = msg_recv_func_tbl[event_id]; + if (unlikely(recv_func == NULL)) { + BAR_LOG_ERR("event_id[%d] unregister\n", event_id); + return BAR_MSG_ERR_MODULE_NOEXIST; + } + + return recv_func(pay_load, len, reps_buffer, reps_len, dev); +} +EXPORT_SYMBOL(call_msg_recv_func_tbl); + +static void bar_chan_reset_flag_normal(uint64_t subchan_addr, uint8_t dst) +{ + if (dst != MSG_CHAN_END_RISC) { + bar_chan_msg_valid_set(subchan_addr, BAR_MSG_CHAN_USABLE); + } + return; +} + +#define VALID_FLAG_DETECT_SPAN_MS (10) +#define VALID_FLAG_DETECT_TIMEOUT_MS (6000) +#define US_NUMS_PER_MS (1000) + +static int bar_chan_sync_wait(uint64_t subchan_addr, uint8_t dst, + uint32_t *wait_reps_retry_times) +{ + uint8_t valid = 0; + int retry_cnt = 0; + uint32_t timeout_th_res_ms = 0; + int max_retries = + VALID_FLAG_DETECT_TIMEOUT_MS / VALID_FLAG_DETECT_SPAN_MS / 2; + + if (dst != MSG_CHAN_END_RISC) { + *wait_reps_retry_times = BAR_MSG_TIMEOUT_TH; + return 0; + } + + for (retry_cnt = 0; retry_cnt < max_retries; retry_cnt++) { + valid = bar_msg_valid_stat_get(subchan_addr); + if (valid == BAR_MSG_CHAN_USABLE) { + timeout_th_res_ms = + VALID_FLAG_DETECT_TIMEOUT_MS - + retry_cnt * VALID_FLAG_DETECT_SPAN_MS; + *wait_reps_retry_times = timeout_th_res_ms * + US_NUMS_PER_MS / + BAR_MSG_POLLING_SPAN_US; + return 0; + } + msleep(VALID_FLAG_DETECT_SPAN_MS); + } + return -1; +} + +/** + * zxdh_bar_chan_sync_msg_send - 通过PCIE BAR空间发送同步消息 + * @in: 消息发送信息 + * @result: 消息结果反馈 + * @return: 0 成功,其他失败 + */ +int zxdh_bar_chan_sync_msg_send(struct zxdh_pci_bar_msg *in, + struct zxdh_msg_recviver_mem *result) +{ + int ret = 0; + uint16_t valid = 0; + uint16_t time_out_cnt = 0; + uint32_t wait_reps_retry_times = 0; + uint16_t msg_id = 0; + uint64_t subchan_addr = 0; + uint16_t op_code = 0; + struct bar_msg_header msg_header = { 0 }; + + ret = bar_msg_src_parse(in); + if (ret != BAR_MSG_OK) { + return ret; + } + + ret = bar_chan_send_para_check(in, result); + if (ret != BAR_MSG_OK) { + BAR_LOG_ERR("para check failed, %d.", ret); + return ret; + } + + /* 申请msg_id,并将缓存信息存放到表中*/ + ret = bar_chan_save_recv_info(result, &msg_id); + if (ret != BAR_MSG_OK) { + BAR_LOG_ERR("msg_id allocated failed."); + return ret; + } + /* 计算2K通道的地址*/ + bar_chan_subchan_addr_get(in, &subchan_addr); + if (*(uint32_t *)(uintptr_t)subchan_addr == 0xffffffff) { + BAR_LOG_ERR("pcie bar abnormal.\n"); + ret = BAR_MSG_ERR_BAR_ABNORMAL; + bar_chan_msgid_free(msg_id); + return ret; + } + /* 填充消息头*/ + bar_chan_sync_fill_header(msg_id, in, &msg_header); + /* 给通道上锁,根据src和dst判断是分配硬件锁还是软件锁*/ + ret = bar_chan_lock(in->src, in->dst, in->src_pcieid, in->virt_addr); + if (ret != BAR_MSG_OK) { + bar_chan_msgid_free(msg_id); + return ret; + } + + ret = bar_chan_sync_wait(subchan_addr, in->dst, &wait_reps_retry_times); + if (ret != 0) { + BAR_LOG_ERR("chan valid flag is used while send msg-%u.\n", + msg_id); + goto free_chan; + } + BAR_LOG_DEBUG("pcie_id-0x%x src-%u, dst-%u get lock.\n", + in->src_pcieid, in->src, in->dst); + + op_code = *(uint8_t *)(in->payload_addr); + /* 消息头、消息体发送到bar空间, valid置位*/ + bar_chan_msg_send(subchan_addr, in->payload_addr, in->payload_len, + &msg_header); + /* 轮询等待消息回复*/ + do { + usleep_range(BAR_MSG_POLLING_SPAN_US, + BAR_MSG_POLLING_SPAN_US + 10); + valid = bar_msg_valid_stat_get(subchan_addr); + time_out_cnt++; + } while ((time_out_cnt < wait_reps_retry_times) && + (BAR_MSG_CHAN_USED == valid)); + + /* 如果超时恢复标志位*/ + if ((wait_reps_retry_times == time_out_cnt) && + (BAR_MSG_CHAN_USABLE != valid)) { + bar_chan_reset_flag_normal(subchan_addr, in->dst); + bar_chan_msg_poltag_set(subchan_addr, 0); + BAR_LOG_ERR("BAR MSG ERR: msg_id: %d time out.\n", + msg_header.msg_id); + ret = BAR_MSG_ERR_TIME_OUT; + } else { + /* 从消息头中取出回复消息的长度len, 从payload中取出消息内容,放到本地缓存reps_buff*/ + ret = bar_chan_sync_msg_reps_get( + subchan_addr, (uint64_t)(uintptr_t)result->recv_buffer, + result->buffer_len, msg_id, op_code); + } +free_chan: + bar_chan_msgid_free(msg_id); + /*通道解锁*/ + bar_chan_check_chan_stats(ret, subchan_addr); + BAR_LOG_DEBUG("pcie_id-0x%x src-%u, dst-%u release lock.\n", + in->src_pcieid, in->src, in->dst); + bar_chan_unlock((uint8_t)in->src, (uint8_t)in->dst, in->src_pcieid, + in->virt_addr); + return ret; +} +EXPORT_SYMBOL(zxdh_bar_chan_sync_msg_send); + +static int +bar_chan_callback_register_check(uint8_t event_id, + zxdh_bar_chan_msg_recv_callback callback) +{ + if (event_id >= (uint8_t)MSG_MODULE_NUM) { + BAR_LOG_ERR("register ERR: invalid event_id: %d.\n", event_id); + return BAR_MSG_ERR_MODULE; + } + if (callback == NULL) { + BAR_LOG_ERR("register ERR: null callback.\n"); + return BAR_MEG_ERR_NULL_FUNC; + } + if (msg_recv_func_tbl[event_id] != NULL) { + BAR_LOG_ERR("register ERR: repeat register.\n"); + return BAR_MSG_ERR_REPEAT_REGISTER; + } + return BAR_MSG_OK; +} + +/** + * zxdh_bar_chan_msg_recv_register - PCIE BAR空间消息方式,注册消息接收回调 + * @event_id: 注册模块id + * @callback: 模块实现的接收处理函数指针 + * @return: 0 成功,其他失败 + * 一般在驱动初始化时调用 + */ +int zxdh_bar_chan_msg_recv_register(uint8_t event_id, + zxdh_bar_chan_msg_recv_callback callback) +{ + int ret = 0; + + ret = bar_chan_callback_register_check(event_id, callback); + + if (BAR_MSG_OK == ret) { + msg_recv_func_tbl[event_id] = callback; + BAR_LOG_DEBUG("register module: %d success.\n", event_id); + } + + return ret; +} +EXPORT_SYMBOL(zxdh_bar_chan_msg_recv_register); + +/** + * zxdh_bar_chan_msg_recv_unregister - PCIE BAR空间消息方式,解注册消息接收回调 + * @event_id: 内核PCIE设备地址 + * @return:0 成功,其他失败 + * 在驱动卸载时需要调用 + */ +int zxdh_bar_chan_msg_recv_unregister(uint8_t event_id) +{ + if (event_id >= (uint8_t)MSG_MODULE_NUM) { + BAR_LOG_ERR("unregister ERR: invalid event_id :%d.\n", + event_id); + return BAR_MSG_ERR_MODULE; + } + if (msg_recv_func_tbl[event_id] == NULL) { + BAR_LOG_ERR("unregister ERR: null proccess func.\n"); + return BAR_MSG_ERR_UNGISTER; + } + msg_recv_func_tbl[event_id] = NULL; + BAR_LOG_DEBUG("unregister module %d success.\n", event_id); + return BAR_MSG_OK; +} +EXPORT_SYMBOL(zxdh_bar_chan_msg_recv_unregister); + +int zxdh_bar_callback_register_state(uint16_t event_id) +{ + if (event_id >= (uint16_t)MSG_MODULE_NUM) { + BAR_LOG_ERR("unregister ERR: invalid event_id :%hu.\n", + event_id); + return BAR_MSG_ERR_MODULE; + } + if (msg_recv_func_tbl[event_id] == NULL) { + BAR_LOG_ERR("unregister ERR: null proccess func.\n"); + return BAR_MSG_ERR_UNGISTER; + } + return BAR_MSG_OK; +} +EXPORT_SYMBOL(zxdh_bar_callback_register_state); + +#ifdef BAR_MSG_TEST +int bar_mpf_addr_ioremap(void) +{ + uint64_t addr; + uint64_t len; + struct pci_dev *pdev = NULL; + + pdev = pci_get_device(MPF_VENDOR_ID, MPF_DEVICE_ID, NULL); + + if (pdev == NULL) { + BAR_LOG_DEBUG("not found device: deviceID %x, VendorID: %x", + MPF_DEVICE_ID, MPF_VENDOR_ID); + return -EINVAL; + } + + addr = pci_resource_start(pdev, 0); + len = pci_resource_len(pdev, 0); + if (addr == 0 || len == 0) { + BAR_LOG_ERR("pci resouce addr or len is 0\n"); + return -EINVAL; + } + + internal_addr = ioremap(addr, len); + if (IS_ERR_OR_NULL(internal_addr)) { + BAR_LOG_ERR("ioremap failed, internal_addr=0x%p\n", + internal_addr); + return -ENOMEM; + } + is_mpf_scaned = TRUE; + + return BAR_MSG_OK; +} + +void bar_mpf_addr_iounmap(void) +{ + if (internal_addr != NULL) { + iounmap(internal_addr); + } + internal_addr = NULL; + is_mpf_scaned = FALSE; + return; +} +#endif + +int bar_msgid_ring_init(void) +{ + uint16_t msg_id = 0; + struct msgid_reps_info *reps_info = NULL; + + spin_lock_init(&cache_func_lock); + spin_lock_init(&g_msgid_ring.lock); + for (msg_id = 0; msg_id < MAX_MSG_BUFF_NUM; msg_id++) { + reps_info = &(g_msgid_ring.reps_info_tbl[msg_id]); + reps_info->id = msg_id; + reps_info->flag = REPS_INFO_FLAG_USABLE; + } + return BAR_MSG_OK; +} + +void bar_msgid_ring_free(void) +{ + uint16_t msg_id = 0; + struct msgid_reps_info *reps_info = NULL; + + for (msg_id = 0; msg_id < MAX_MSG_BUFF_NUM; msg_id++) { + reps_info = &g_msgid_ring.reps_info_tbl[msg_id]; + del_timer_sync(&reps_info->id_timer); + } +} + +extern uint16_t test_sync_send(void); +int zxdh_bar_msg_chan_init(void) +{ + /* 测试pf地址映射*/ +#ifdef BAR_MSG_TEST + int ret = 0; + + ret = bar_mpf_addr_ioremap(); + if (ret != BAR_MSG_OK) { + BAR_LOG_DEBUG( + "mpf do not exit, but do not impact the msg chan.\n"); + } + + test_sync_send(); +#endif + + /* msg_id锁初始化*/ + bar_init_lock_arr(); + bar_msgid_ring_init(); + + return BAR_MSG_OK; +} + +void bar_chan_timer_callback(struct timer_list *timer) +{ + struct msgid_reps_info *reps_info = NULL; + + reps_info = container_of(timer, struct msgid_reps_info, id_timer); + if (reps_info->flag == REPS_INFO_FLAG_USED) { + reps_info->reps_buffer = NULL; + reps_info->buffer_len = 0; + reps_info->flag = REPS_INFO_FLAG_USABLE; + BAR_LOG_ERR( + "RECV ERR: get async reply time out, free msg_id: %u.\n", + reps_info->id); + } else { + BAR_LOG_DEBUG( + "RECV NOTICE: get async reply message success.\n"); + } + return; +} + +int zxdh_bar_msg_chan_remove(void) +{ + bar_msgid_ring_free(); + /* mpf解ioremap*/ +#ifdef BAR_MSG_TEST + bar_mpf_addr_iounmap(); +#endif + /* 消息链表资源释放*/ + + BAR_LOG_DEBUG("zxdh_msg_chan_bar remove success"); + + return 0; +} + +uint16_t bar_get_sum(uint8_t *ptr, uint8_t len) +{ + int idx = 0; + uint64_t sum = 0; + for (idx = 0; idx < len; idx++) { + sum += *(ptr + idx); + } + return (uint16_t)sum; +} + +/** + * zxdh_bar_enable_chan - 驱动使能通道函数 + * @_msix_para: msix中断配置信息 + * @vport: 查询到的vport + * @return: 0 成功,其他失败 + */ +int zxdh_bar_enable_chan(struct msix_para *_msix_para, uint16_t *vport) +{ + int ret = 0; + uint8_t recv_buf[12] = { 0 }; + uint16_t check_token, sum_res; +#if 0 + uint32_t domain, bus, devid, function; +#endif + struct msix_msg msix_msg = { 0 }; + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + + if (!_msix_para || !_msix_para->pdev) { + return -BAR_MSG_ERR_NULL; + } +#if 0 + sscanf(pci_name(_msix_para->pdev), "%x:%x:%x.%u", &domain, &bus, &devid, &function); + msix_msg.bdf = BDF_ECAM(bus, devid, function); +#endif + msix_msg.pcie_id = _msix_para->pcie_id; + msix_msg.vector_risc = _msix_para->vector_risc; + msix_msg.vector_pfvf = _msix_para->vector_pfvf; + msix_msg.vector_mpf = _msix_para->vector_mpf; + + in.payload_addr = &msix_msg; + in.payload_len = sizeof(msix_msg); + in.virt_addr = _msix_para->virt_addr; + in.src = _msix_para->driver_type; + in.dst = MSG_CHAN_END_RISC; + in.event_id = MODULE_MSIX; + in.src_pcieid = _msix_para->pcie_id; + + result.recv_buffer = recv_buf; + result.buffer_len = sizeof(recv_buf); + + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + if (ret != BAR_MSG_OK) { + return -ret; + } + + check_token = *(uint16_t *)(recv_buf + 6); + sum_res = bar_get_sum((uint8_t *)&msix_msg, sizeof(msix_msg)); + if (check_token != sum_res) { + BAR_LOG_DEBUG("expect token: 0x%x, get token: 0x%x.\n", sum_res, + check_token); + return -BAR_MSG_ERR_NOT_MATCH; + } + *vport = *(uint16_t *)(recv_buf + 8); + BAR_LOG_DEBUG("vport of %s get success.\n", pci_name(_msix_para->pdev)); + return BAR_MSG_OK; +} +EXPORT_SYMBOL(zxdh_bar_enable_chan); + +int zxdh_get_bar_offset(struct bar_offset_params *paras, + struct bar_offset_res *res) +{ + int ret = 0; + uint16_t check_token, sum_res; + struct offset_get_msg send_msg = { 0 }; + struct bar_recv_msg *recv_msg = NULL; + + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + + if (!paras || !res) { + return BAR_MSG_ERR_NULL; + } + + send_msg.pcie_id = paras->pcie_id; + send_msg.type = paras->type; + + in.payload_addr = &send_msg; + in.payload_len = sizeof(send_msg); + in.virt_addr = paras->virt_addr; + in.src = MSG_CHAN_END_PF; + in.dst = MSG_CHAN_END_RISC; + in.event_id = MODULE_OFFSET_GET; + in.src_pcieid = paras->pcie_id; + + recv_msg = kzalloc(sizeof(struct bar_recv_msg), GFP_KERNEL); + if (recv_msg == NULL) { + LOG_ERR("NULL ptr\n"); + return -1; + } + result.recv_buffer = recv_msg; + result.buffer_len = sizeof(struct bar_recv_msg); + + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + if (ret != BAR_MSG_OK) { + ret = -ret; + goto free_msg; + } + + check_token = recv_msg->offset_reps.check; + sum_res = bar_get_sum((uint8_t *)&send_msg, sizeof(send_msg)); + if (check_token != sum_res) { + BAR_LOG_ERR("expect token: 0x%x, get token: 0x%x.\n", sum_res, + check_token); + ret = -BAR_MSG_ERR_NOT_MATCH; + goto free_msg; + } + res->bar_offset = recv_msg->offset_reps.offset; + res->bar_length = recv_msg->offset_reps.length; + +free_msg: + kfree(recv_msg); + return ret; +} +EXPORT_SYMBOL(zxdh_get_bar_offset); + +void zxdh_bar_reset_valid(uint64_t subchan_addr) +{ + struct bar_msg_header msg_header = { 0 }; + + bar_chan_msg_header_get(subchan_addr, &msg_header); + + subchan_addr += BAR_MSG_ADDR_CHAN_INTERVAL; + bar_chan_msg_valid_set(subchan_addr, BAR_MSG_CHAN_USABLE); + bar_chan_msg_poltag_set(subchan_addr, 0); +} +EXPORT_SYMBOL(zxdh_bar_reset_valid); + +uint16_t zxdh_get_event_id(uint64_t subchan_addr, uint8_t src_type, + uint8_t dst_type) +{ + uint8_t subchan_id = 0; + struct bar_msg_header msg_header = { 0 }; + uint8_t src = bar_msg_col_index_trans(src_type); + uint8_t dst = bar_msg_row_index_trans(dst_type); + + if (src == BAR_MSG_SRC_ERR || dst == BAR_MSG_DST_ERR) { + return 0; + } + /* 接收子通道id和发送子通道相反*/ + subchan_id = (!!subchan_id_tbl[dst][src]) ? BAR_SUBCHAN_INDEX_SEND : + BAR_SUBCHAN_INDEX_RECV; + subchan_addr += subchan_id * BAR_MSG_ADDR_CHAN_INTERVAL; + bar_chan_msg_header_get(subchan_addr, &msg_header); + return msg_header.event_id; +} +EXPORT_SYMBOL(zxdh_get_event_id); + +int32_t zxdh_send_command(uint64_t vaddr, uint16_t pcie_id, uint16_t module_id, + void *msg, void *ack, bool is_sync_msg) +{ + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + struct bar_recv_msg *bar_reps = NULL; + int32_t ret = 0; + + if ((msg == NULL) || (ack == NULL)) { + LOG_ERR("NULL ptr\n"); + return -1; + } + + in.payload_addr = msg; + in.payload_len = sizeof(union zxdh_msg); + + if (((pcie_id >> PFVF_FLAG_OFFSET) & 1) == 1) { + in.src = MSG_CHAN_END_PF; + } else { + in.src = MSG_CHAN_END_VF; + } + + bar_reps = kzalloc(BAR_MSG_PAYLOAD_MAX_LEN, GFP_KERNEL); + if (bar_reps == NULL) { + LOG_ERR("NULL ptr\n"); + return -1; + } + in.dst = MSG_CHAN_END_RISC; + in.event_id = module_id; + in.virt_addr = vaddr; + in.src_pcieid = pcie_id; + result.recv_buffer = bar_reps; + result.buffer_len = sizeof(union zxdh_msg) + REPS_HEADER_PAYLOAD_OFFSET; + + switch (module_id) { + case MODULE_VF_BAR_MSG_TO_PF: { + in.dst = MSG_CHAN_END_PF; + in.dst_pcieid = FIND_PF_PCIE_ID(pcie_id); + in.virt_addr += ZXDH_BAR_PFVF_MSG_OFFSET; + break; + } + case MODULE_PF_BAR_MSG_TO_VF: { + in.dst = MSG_CHAN_END_VF; + in.dst_pcieid = ((zxdh_msg_info *)msg)->hdr_vf.dst_pcie_id; + in.virt_addr += ZXDH_BAR_PFVF_MSG_OFFSET; + break; + } + case MODULE_TBL: { + in.payload_len = MSG_STRUCT_HD_LEN + + ((zxdh_msg_info *)msg)->hdr_to_cmn.write_bytes; + break; + } + case MODULE_PF_TIMER_TO_RISC_MSG: { + in.payload_len = MSG_STRUCT_HD_LEN + + ((zxdh_msg_info *)msg)->hdr_to_cmn.write_bytes; + break; + } + case MODULE_PHYPORT_QUERY: { + in.payload_len = sizeof(struct zxdh_port_msg); + break; + } + case MODULE_NPSDK: { + in.payload_len = sizeof(zxdh_cfg_np_msg); + break; + } + } + + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + if (ret != ZXDH_NET_ACK_OK) { + ret = -ret; + goto free_reps; + } + + if (is_sync_msg && bar_reps->replied != BAR_MSG_REPS_OK) { + LOG_ERR("reps get failed\n"); + ret = -1; + goto free_reps; + } + + if (bar_reps->reps_len > + (BAR_MSG_PAYLOAD_MAX_LEN - REPS_HEADER_PAYLOAD_OFFSET)) { + LOG_ERR("reps len too long\n"); + ret = -1; + goto free_reps; + } + memcpy(ack, bar_reps->data, bar_reps->reps_len); + +free_reps: + kfree(bar_reps); + return ret; +} +EXPORT_SYMBOL(zxdh_send_command); + +int zxdh_bar_send_without_reps_hdr(struct zxdh_pci_bar_msg *in, + struct zxdh_msg_recviver_mem *result) +{ + int ret = 0; + struct zxdh_msg_recviver_mem res_with_hdr = { 0 }; + uint8_t *temp_recv_buff = NULL; + + temp_recv_buff = kmalloc( + result->buffer_len + REPS_HEADER_PAYLOAD_OFFSET, GFP_KERNEL); + if (!temp_recv_buff) { + BAR_LOG_ERR("malloc temp buffer failed.\n"); + return -1; + } + + res_with_hdr.recv_buffer = temp_recv_buff; + res_with_hdr.buffer_len = + result->buffer_len + REPS_HEADER_PAYLOAD_OFFSET; + + ret = zxdh_bar_chan_sync_msg_send(in, &res_with_hdr); + if (ret != 0) { + goto out; + } + /* 去掉四字节的回复头*/ + memcpy(result->recv_buffer, temp_recv_buff + REPS_HEADER_PAYLOAD_OFFSET, + result->buffer_len); + +out: + kfree(temp_recv_buff); + return ret; +} +EXPORT_SYMBOL(zxdh_bar_send_without_reps_hdr); + +int32_t zxdh_vqm_queue_cfg(uint64_t virt_addr, uint16_t pcie_id, + uint32_t phy_queue_idx) +{ + int32_t ret = 0; + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + + OVS_TO_VQM_MSG msg = { 0 }; + VQM_RSP_OVS_DATA reps = { 0 }; + + msg.q_reset_msg.qid = phy_queue_idx; + msg.cmd = VQM_QUEUE_RSET; + + in.virt_addr = virt_addr + ZXDH_BAR_MSG_OFFSET; + in.payload_addr = &msg; + in.payload_len = sizeof(msg); + in.src_pcieid = pcie_id; + in.src = MSG_CHAN_END_PF; + in.dst = MSG_CHAN_END_RISC; + in.event_id = VCQ_NOTIFY_EVENT_ID; + + result.recv_buffer = &reps; + result.buffer_len = sizeof(reps); + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + if (ret != BAR_MSG_OK) { + return -ret; + } + + if (reps.check_result != VQM_REPS_SUCCESS) { + BAR_LOG_ERR("check result failed.reps.check_result: 0x%x\n", + reps.check_result); + return -1; + } + return BAR_MSG_OK; +} +EXPORT_SYMBOL(zxdh_vqm_queue_cfg); diff --git a/drivers/net/ethernet/dinghai/dh_procfs.c b/drivers/net/ethernet/dinghai/dh_procfs.c new file mode 100644 index 000000000000..88eedf27c4d8 --- /dev/null +++ b/drivers/net/ethernet/dinghai/dh_procfs.c @@ -0,0 +1,76 @@ +#include + +#include "dh_procfs.h" + +#define DRV_NAME "dinghai" + +static struct fs_entry_desc fs_entry_table[] = { { FS_ENTRY_BOND, "lag" }, + { 0 } }; + +uint32_t find_fs_entry(uint32_t type) +{ + uint32_t idx = 0; + + while (fs_entry_table[idx].file_name) { + if (type == fs_entry_table[idx].type) { + break; + } + idx++; + } + + return idx; +} + +void zxdh_create_proc_dir(struct zxdh_proc_fs *procfs) +{ + if (!procfs->proc_dir) { + procfs->proc_dir = proc_mkdir(DRV_NAME, NULL); + if (!procfs->proc_dir) { + pr_warn("Warning: Cannot create /proc/%s\n", DRV_NAME); + } + } +} + +void zxdh_destroy_proc_dir(struct zxdh_proc_fs *procfs) +{ + if (procfs->proc_dir) { + remove_proc_entry(DRV_NAME, NULL); + procfs->proc_dir = NULL; + } +} + +void zxdh_create_proc_entry(struct zxdh_proc_fs *procfs, uint32_t type, + struct seq_operations *seq_ops, void *data) +{ + uint32_t idx = 0; + char *file_name = NULL; + + if (procfs->proc_dir) { + idx = find_fs_entry(type); + file_name = fs_entry_table[idx].file_name; + if (file_name && (idx < PROC_ENTRY_MAX)) { + procfs->proc_entry[idx] = proc_create_seq_data( + file_name, 0444, procfs->proc_dir, seq_ops, + data); + if (procfs->proc_entry[idx] == NULL) { + pr_info("Cannot create /proc/%s/%s\n", DRV_NAME, + file_name); + } + } + } +} + +void zxdh_remove_proc_entry(struct zxdh_proc_fs *procfs, uint32_t type) +{ + uint32_t idx = 0; + char *file_name = NULL; + + if (procfs->proc_dir) { + idx = find_fs_entry(type); + file_name = fs_entry_table[idx].file_name; + if (file_name && (idx < PROC_ENTRY_MAX)) { + remove_proc_entry(file_name, procfs->proc_dir); + procfs->proc_entry[idx] = NULL; + } + } +} diff --git a/drivers/net/ethernet/dinghai/dh_procfs.h b/drivers/net/ethernet/dinghai/dh_procfs.h new file mode 100644 index 000000000000..504f25aeab17 --- /dev/null +++ b/drivers/net/ethernet/dinghai/dh_procfs.h @@ -0,0 +1,26 @@ +#ifndef _ZXDH_PROC_FS_H_ +#define _ZXDH_PROC_FS_H_ + +#define PROC_ENTRY_MAX (16) + +struct zxdh_proc_fs { + struct proc_dir_entry *proc_dir; + struct proc_dir_entry *proc_entry[PROC_ENTRY_MAX]; +}; + +struct fs_entry_desc { + uint32_t type; + char *file_name; +}; + +enum { + FS_ENTRY_BOND = 0, +}; + +void zxdh_create_proc_dir(struct zxdh_proc_fs *procfs); +void zxdh_destroy_proc_dir(struct zxdh_proc_fs *procfs); +void zxdh_create_proc_entry(struct zxdh_proc_fs *procfs, uint32_t type, + struct seq_operations *seq_ops, void *data); +void zxdh_remove_proc_entry(struct zxdh_proc_fs *procfs, uint32_t type); + +#endif /* _ZXDH_PROC_FS_H_ */ \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_aux.c b/drivers/net/ethernet/dinghai/en_aux.c new file mode 100644 index 000000000000..ca9cbae4d89a --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_aux.c @@ -0,0 +1,6705 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "en_aux.h" +#include "en_ethtool/ethtool.h" +#include "en_np/table/include/dpp_tbl_api.h" +#include "en_np/table/include/dpp_tbl_comm.h" +#include "en_aux/en_aux_events.h" +#include "en_aux/en_aux_eq.h" +#include "en_aux/en_aux_cmd.h" +#include "msg_common.h" +#include "en_pf.h" +#include "en_aux/en_aux_ioctl.h" +#include "slib.h" +#include +#ifdef ZXDH_MSGQ +#include "en_aux/priv_queue.h" +#endif +#include "en_aux/en_1588_pkt_proc.h" +#include "en_aux/en_aux_cmd.h" +#include "zxdh_tools/zxdh_tools_netlink.h" +#include +#include "en_pf/msg_func.h" +#include "bonding/zxdh_lag.h" +#ifdef ZXDH_DCBNL_OPEN +#include "en_aux/dcbnl/en_dcbnl.h" +#endif +#include + +#define IS_1588_MESSAGE 0 +#define IS_NOT_1588_MESSAGE 1 +#define IS_LB_PKT 0 +#define IS_NOT_LB_PKT 1 +#define ETHER_MAC_LEN 6 +#define ETHER_TYPE_LEN 2 +#define IP_PROT_OFFSET 9 /* IP头中protocol字段的偏移 */ +#define IPV4_HDR_LEN 20 +#define UDP_HDR_LEN 8 +#define ETH_LEN 42 + +#define KERNEL_PF_FC 92500 /* 支持VQM PF默认92.5G(94.5)限速 */ +#define KERNEL_PF_FC_1 23750 /* 支持VQM PF默认23.75G(23.75)限速 */ + +struct work_struct work_cfg_del = { 0 }; +uint8_t card_num; +const uint8_t BOND_MCAST_ADDR[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02 }; +static unsigned int mac_hash(struct zxdh_ipv6_mac_tbl *mac_tbl, + const uint8_t *mac_addr); +extern int32_t zxdh_get_ptp_clock_index(struct zxdh_en_device *en_dev, + uint32_t *ptp_clock_idx); +uint32_t max_pairs = ZXDH_MQ_PAIRS_NUM; +module_param(max_pairs, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(max_pairs, "Max queue pairs"); + +MODULE_LICENSE("Dual BSD/GPL"); + +int32_t zxdh_port_enable(struct zxdh_en_device *en_dev, bool enable) +{ + DPP_PF_INFO_T pf_info = { 0 }; + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + if (en_dev->vqmb_port_ctl) + return 0; + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) { + return zxdh_vf_egr_port_attr_set(en_dev, SRIOV_VPORT_IS_UP, + enable, 0); + } + if (en_dev->is_hwbond || en_dev->ops->is_special_bond(en_dev->parent)) { + dpp_uplink_phy_attr_set(&pf_info, en_dev->phy_port, + UPLINK_PHY_PORT_IS_UP, enable); + } + + /* 标卡/dpu卡link-down-on-close开关打开 */ + if (en_dev->link_down_on_close) { + zxdh_spm_port_enable_cfg(en_dev, enable); + } + return dpp_vport_attr_set(&pf_info, SRIOV_VPORT_IS_UP, enable); +} + +/* WARNING Do not use netif_carrier_on/off(), + it may affect the ethtool function */ +static int32_t en_open(struct net_device *netdev, bool boot) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t i = 0; + int32_t err = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + LOG_INFO("zxdh_en_open start\n"); + mutex_lock(&en_priv->lock); + + set_bit(ZXDH_DEVICE_STATE_OPENED, &en_dev->state); + + for (i = 0; i < en_dev->max_vq_pairs; i++) { + if (i < en_dev->curr_queue_pairs) { + /* Make sure we have some buffers: if oom use wq */ + if (!try_fill_recv(&en_dev->rq[i], GFP_KERNEL)) + schedule_delayed_work(&en_dev->refill, 0); + } + + err = xdp_rxq_info_reg(&en_dev->rq[i].xdp_rxq, netdev, i, + en_dev->rq[i].napi.napi_id); + if (err < 0) { + mutex_unlock(&en_priv->lock); + LOG_ERR("xdp_rxq_info_reg failed\n"); + return err; + } + + err = xdp_rxq_info_reg_mem_model(&en_dev->rq[i].xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL); + if (err < 0) { + mutex_unlock(&en_priv->lock); + LOG_ERR("xdp_rxq_info_reg_mem_model failed\n"); + xdp_rxq_info_unreg(&en_dev->rq[i].xdp_rxq); + return err; + } + + virtnet_napi_enable(en_dev->rq[i].vq, &en_dev->rq[i].napi); + virtnet_napi_tx_enable(netdev, en_dev->sq[i].vq, + &en_dev->sq[i].napi); + } + + mutex_unlock(&en_priv->lock); + + if (boot) { +#ifdef ZXDH_CONFIG_SPECIAL_SQ_EN + err = zxdh_flow_map_init(en_priv); + if (err) { + LOG_ERR("zxdh_flow_map_init failed\n"); + } +#endif + } + + if (!en_dev->link_up) { + /* link-down-on-close 开关打开 */ + if (!en_dev->link_down_on_close) { + return 0; + } + } + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + if (!en_dev->ops->is_bond(en_dev->parent)) { + return zxdh_port_enable(en_dev, TRUE); + } + + /* 智卡的link-down-on-close 开关打开 */ + if (en_dev->link_down_on_close) { + zxdh_spm_port_enable_cfg(en_dev, 1); + } + + /* 给bond-pf的端口属性表配置为up */ + err = dpp_vport_attr_set(&pf_info, SRIOV_VPORT_IS_UP, 1); + if (err != 0) { + LOG_ERR("dpp_vport_attr_set bond pf failed\n"); + return err; + } + + return zxdh_uplink_phy_attr_set(&pf_info, en_dev->phy_port, + UPLINK_PHY_PORT_IS_UP, 1); +} + +int32_t zxdh_en_open(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + return en_open(netdev, true); +} + +static int32_t en_phyport_close(struct zxdh_en_device *en_dev) +{ + DPP_PF_INFO_T pf_info = { 0 }; + int32_t err = 0; + + if (!en_dev->link_up) { + /* link-down-on-close 开关打开 */ + if (!en_dev->link_down_on_close) { + return 0; + } + } + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + if (!en_dev->ops->is_bond(en_dev->parent)) { + return zxdh_port_enable(en_dev, FALSE); + } + + /* 智卡link-down-on-close 开关打开 */ + if (en_dev->link_down_on_close) { + zxdh_spm_port_enable_cfg(en_dev, 0); + } + + /* 给bond-pf的端口属性表配置为down */ + err = dpp_vport_attr_set(&pf_info, SRIOV_VPORT_IS_UP, 0); + if (err != 0) { + LOG_ERR("dpp_vport_attr_set bond pf failed\n"); + return err; + } + + return zxdh_uplink_phy_attr_set(&pf_info, en_dev->phy_port, + UPLINK_PHY_PORT_IS_UP, 0); +} + +int32_t zxdh_en_close(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t i = 0; + + LOG_INFO("zxdh_en_close start\n"); + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + + if (!test_bit(ZXDH_DEVICE_STATE_OPENED, &en_dev->state)) { + return 0; + } + mutex_lock(&en_priv->lock); + clear_bit(ZXDH_DEVICE_STATE_OPENED, &en_dev->state); + + /* Make sure refill_work doesn't re-enable napi! */ + cancel_delayed_work_sync(&en_dev->refill); + + for (i = 0; i < en_dev->max_vq_pairs; i++) { + xdp_rxq_info_unreg(&en_dev->rq[i].xdp_rxq); + napi_disable(&en_dev->rq[i].napi); + virtnet_napi_tx_disable(&en_dev->sq[i].napi); + } + mutex_unlock(&en_priv->lock); + +#ifdef ZXDH_CONFIG_SPECIAL_SQ_EN + zxdh_flow_map_cleanup(en_priv); +#endif + + return en_phyport_close(en_dev); +} + +void pkt_transport_protocol_parse(int8_t next_protocol, + struct zxdh_net_hdr_tx *hdr) +{ + if (next_protocol == IPPROTO_UDP) { + hdr->pipd_hdr.pi_hdr.pt.type_ctx.pkt_code = PCODE_UDP; + } else if (next_protocol == IPPROTO_TCP) { + hdr->pipd_hdr.pi_hdr.pt.type_ctx.pkt_code = PCODE_TCP; + } else { + hdr->pipd_hdr.pi_hdr.pt.type_ctx.pkt_code = PCODE_IP; + } + + return; +} + +void pkt_protocol_parse(struct sk_buff *skb, struct zxdh_net_hdr_tx *hdr, + int32_t flag) +{ + struct ethhdr *mach = NULL; + struct iphdr *ipv4h = NULL; + struct ipv6hdr *ipv6h = NULL; + struct vlan_hdr *vlanhdr = NULL; + struct vlan_hdr vh = { 0 }; + uint16_t l3_protocol = 0; + unsigned int offset = 0; + + if (flag == 0) { + if (skb->protocol == htons(ETH_P_IP)) { + ipv4h = (struct iphdr *)skb_network_header(skb); + hdr->pipd_hdr.pi_hdr.pt.type_ctx.ip_type = IPV4_TYPE; + pkt_transport_protocol_parse(ipv4h->protocol, hdr); + } else if (skb->protocol == htons(ETH_P_IPV6)) { + ipv6h = (struct ipv6hdr *)skb_network_header(skb); + hdr->pipd_hdr.pi_hdr.pt.type_ctx.ip_type = IPV6_TYPE; + pkt_transport_protocol_parse(ipv6h->nexthdr, hdr); + } else { + hdr->pipd_hdr.pi_hdr.pt.type_ctx.ip_type = NOT_IP_TYPE; + hdr->pipd_hdr.pi_hdr.pt.type_ctx.pkt_code = PCODE_NO_IP; + } + } else { + mach = (struct ethhdr *)skb_inner_mac_header(skb); + if ((mach->h_proto == htons(ETH_P_8021Q)) || + (mach->h_proto == htons(ETH_P_8021AD))) { + offset = skb_inner_network_offset(skb); + vlanhdr = skb_header_pointer(skb, offset - sizeof(vh), + sizeof(vh), &vh); + if (unlikely(vlanhdr == NULL)) { + LOG_ERR("zxdh_en_send error: skb_header_pointer failed\n"); + return; + } + l3_protocol = vlanhdr->h_vlan_encapsulated_proto; + } + + if ((mach->h_proto == htons(ETH_P_IP)) || + (l3_protocol == htons(ETH_P_IP))) { + ipv4h = (struct iphdr *)skb_inner_network_header(skb); + hdr->pipd_hdr.pi_hdr.pt.type_ctx.ip_type = IPV4_TYPE; + pkt_transport_protocol_parse(ipv4h->protocol, hdr); + } else if ((mach->h_proto == htons(ETH_P_IPV6)) || + (l3_protocol == htons(ETH_P_IPV6))) { + ipv6h = (struct ipv6hdr *)skb_inner_network_header(skb); + hdr->pipd_hdr.pi_hdr.pt.type_ctx.ip_type = IPV6_TYPE; + pkt_transport_protocol_parse(ipv6h->nexthdr, hdr); + } else { + hdr->pipd_hdr.pi_hdr.pt.type_ctx.ip_type = NOT_IP_TYPE; + hdr->pipd_hdr.pi_hdr.pt.type_ctx.pkt_code = PCODE_NO_IP; + } + } +} + +int32_t vxlan_tso_cksum(struct sk_buff *skb) +{ + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } out_ip, in_ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } out_l4; + unsigned char *out_trans_start = 0; + unsigned short out_udp_len_temp = 0; + struct vlan_hdr *vlanhdr = NULL; + struct vlan_hdr vh = { 0 }; + uint16_t l3_protocol = skb->protocol; + + if ((skb->protocol == htons(ETH_P_8021Q)) || + (skb->protocol == htons(ETH_P_8021AD))) { + vlanhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(vh), &vh); + if (vlanhdr == NULL) { + return -1; + } + l3_protocol = vlanhdr->h_vlan_encapsulated_proto; + } + + out_ip.hdr = skb_network_header(skb); + out_l4.hdr = skb_transport_header(skb); + if (l3_protocol == htons(ETH_P_IP)) { + out_trans_start = (unsigned char *)&out_ip.v4->saddr; + } else if (l3_protocol == htons(ETH_P_IPV6)) { + out_trans_start = (unsigned char *)&out_ip.v6->saddr; + } else { + return -1; + } + + out_l4.udp->check = 0; + out_udp_len_temp = out_l4.udp->len; + out_l4.udp->len = 0; + in_ip.hdr = skb_inner_network_header(skb); + out_l4.udp->check = csum_fold( + csum_partial(out_trans_start, in_ip.hdr - out_trans_start, 0)); + out_l4.udp->len = out_udp_len_temp; + + return 0; +} + +int32_t zxdh_tx_checksum_offload(struct zxdh_en_device *en_dev, + struct sk_buff *skb, + struct zxdh_net_hdr_tx *hdr) +{ + if (skb->ip_summed != CHECKSUM_PARTIAL) { + return 0; + } + + /* 隧道报文只支持vxlan的checksum */ + if (skb->encapsulation == 1) { + hdr->pipd_hdr.pi_hdr.bttl_pi_len = ENABLE_PI_FLAG_32B; + hdr->pipd_hdr.pd_hdr.ol_flag |= + htons(0x1 << OUTER_IP_CHECKSUM_OFFSET); + hdr->pipd_hdr.pd_hdr.ol_flag |= htons(0x1 << NP_IS_VXLAN_FLAG); + pkt_protocol_parse(skb, hdr, 1); + hdr->pipd_hdr.pi_hdr.hdr_l3_offset = + htons(en_dev->hdr_len + skb_inner_network_offset(skb)); + hdr->pipd_hdr.pi_hdr.hdr_l4_offset = htons( + en_dev->hdr_len + skb_inner_transport_offset(skb)); + + if ((skb_shinfo(skb)->gso_size != 0) && + (vxlan_tso_cksum(skb) == 0)) { + hdr->pipd_hdr.pd_hdr.ol_flag |= + htons(0x1 << NP_VXLAN_UDP_CHCKSUM_ENABLE); + } + } + + hdr->pipd_hdr.pi_hdr.pkt_action_flag1 |= + htons(0x1 << INNER_IP_CHECKSUM_OFFSET); + hdr->pipd_hdr.pi_hdr.pkt_action_flag2 |= 0x1 + << INNER_L4_CHECKSUM_OFFSET; + return 0; +} + +static int pipd_hdr_validate_vlan(struct zxdh_en_device *en_dev, + struct sk_buff *skb, + struct zxdh_net_hdr_tx *hdr) +{ + if (skb && skb_vlan_tag_present(skb)) { + hdr->pipd_hdr.pd_hdr.ctci = htons(skb_vlan_tag_get(skb)); + hdr->pipd_hdr.pd_hdr.ol_flag |= htons(TXCAP_CTAG_INSERT_EN_BIT); + en_dev->hw_stats.netdev_stats.tx_added_vlan_packets++; + } + return 0; +} + +static int pd_hdr_validate_vlan(struct zxdh_en_device *en_dev, + struct sk_buff *skb, + struct zxdh_net_hdr_tx *hdr) +{ + if (skb && skb_vlan_tag_present(skb)) { + hdr->pd_hdr.ctci = htons(skb_vlan_tag_get(skb)); + hdr->pd_hdr.ol_flag |= htons(TXCAP_CTAG_INSERT_EN_BIT); + en_dev->hw_stats.netdev_stats.tx_added_vlan_packets++; + } + return 0; +} + +int32_t pipd_net_hdr_from_skb(struct zxdh_en_device *en_dev, + struct sk_buff *skb, struct zxdh_net_hdr_tx *hdr, + bool is_lb) +{ + uint32_t gso_type = 0; + uint16_t mss = 0; + + hdr->tx_port = TX_PORT_DTP; + hdr->pipd_hdr.pi_hdr.bttl_pi_len = + DISABLE_PI_FIELD_PARSE + ENABLE_PI_FLAG_32B; + hdr->pipd_hdr.pi_hdr.pt.type_ctx.pkt_src = PKT_SRC_CPU; + hdr->pipd_hdr.pi_hdr.eth_port_id = INVALID_ETH_PORT_ID; + + pipd_hdr_validate_vlan(en_dev, skb, hdr); + + mss = skb_shinfo(skb)->gso_size; + gso_type = skb_shinfo(skb)->gso_type; + if (gso_type & SKB_GSO_TCPV4) { + mss = (mss > 0) ? + min(skb_shinfo(skb)->gso_size, + (uint16_t)(en_dev->netdev->mtu - + IP_BASE_HLEN - TCP_BASE_HLEN)) : + (uint16_t)(en_dev->netdev->mtu - IP_BASE_HLEN - + TCP_BASE_HLEN); + hdr->pipd_hdr.pi_hdr.pkt_action_flag1 |= + htons((mss / ETH_MTU_4B_UNIT) + NOT_IP_FRG_CSUM_FLAG); + hdr->pipd_hdr.pi_hdr.pkt_action_flag2 |= + TCP_FRG_CSUM_FLAG; /*0x24 bit21,18: 带pi,tso,计算checksum */ + } else if (gso_type & SKB_GSO_TCPV6) { + mss = (mss > 0) ? + min(skb_shinfo(skb)->gso_size, + (uint16_t)(en_dev->netdev->mtu - + IPV6_BASE_HLEN - TCP_BASE_HLEN)) : + (uint16_t)(en_dev->netdev->mtu - IPV6_BASE_HLEN - + TCP_BASE_HLEN); + hdr->pipd_hdr.pi_hdr.pkt_action_flag1 |= + htons((mss / ETH_MTU_4B_UNIT) + NOT_IP_FRG_CSUM_FLAG); + hdr->pipd_hdr.pi_hdr.pkt_action_flag2 |= + TCP_FRG_CSUM_FLAG; /*0x24 bit21,18: 带pi,tso,计算checksum */ + } else if (gso_type & (SKB_GSO_UDP | SKB_GSO_UDP_L4 | + SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) { + hdr->pipd_hdr.pi_hdr.pkt_action_flag1 = htons( + (uint16_t)(en_dev->netdev->mtu / ETH_MTU_4B_UNIT) + + IP_FRG_CSUM_FLAG); + hdr->pipd_hdr.pi_hdr.pkt_action_flag2 |= NOT_TCP_FRG_CSUM_FLAG; + } else { + hdr->pipd_hdr.pi_hdr.pkt_action_flag1 |= + htons((en_dev->netdev->mtu / ETH_MTU_4B_UNIT) + + NOT_IP_FRG_CSUM_FLAG); + hdr->pipd_hdr.pi_hdr.pkt_action_flag2 |= NOT_TCP_FRG_CSUM_FLAG; + } +#ifdef CONFIG_INET + /* 将这个报文打上lb标识 */ + if (is_lb == IS_LB_PKT) { + hdr->pipd_hdr.pd_hdr.ol_flag |= htons(LB_EN); + } +#endif + + if (en_dev->netdev->features & NETIF_F_HW_CSUM) { + zxdh_tx_checksum_offload(en_dev, skb, hdr); + } + + if ((en_dev->ops->is_bond(en_dev->parent) || + en_dev->ops->is_special_bond(en_dev->parent) || + en_dev->is_hwbond) && + (skb->protocol == htons(ETH_P_SLOW) || + skb->protocol == htons(ETH_P_PAUSE))) { + hdr->pipd_hdr.pd_hdr.ol_flag |= htons(PANELID_EN); + hdr->pipd_hdr.pd_hdr.panel_id = en_dev->phy_port; + } + +#ifdef ZXDH_DCBNL_OPEN + if (NULL != skb->sk) { + hdr->pipd_hdr.pd_hdr.ol_flag |= + htons(ZXDH_DCBNL_SET_SK_PRIO(skb->sk->sk_priority)); + } +#endif + + return 0; +} + +int32_t pd_net_hdr_from_skb(struct zxdh_en_device *en_dev, struct sk_buff *skb, + struct zxdh_net_hdr_tx *hdr, bool is_lb) +{ + hdr->tx_port = TX_PORT_NP; + + pd_hdr_validate_vlan(en_dev, skb, hdr); + +#ifdef CONFIG_INET + /* 将这个报文打上lb标识 */ + if (is_lb == IS_LB_PKT) { + hdr->pd_hdr.ol_flag |= htons(LB_EN); + } +#endif + + if ((en_dev->ops->is_bond(en_dev->parent) || en_dev->is_hwbond) && + (skb->protocol == htons(ETH_P_SLOW) || + skb->protocol == htons(ETH_P_PAUSE))) { + hdr->pd_hdr.ol_flag |= htons(PANELID_EN); + hdr->pd_hdr.panel_id = en_dev->phy_port; + } + +#ifdef ZXDH_DCBNL_OPEN + if (NULL != skb->sk) { + hdr->pd_hdr.ol_flag |= + htons(ZXDH_DCBNL_SET_SK_PRIO(skb->sk->sk_priority)); + } +#endif + + return 0; +} + +int32_t net_hdr_from_skb(struct zxdh_en_device *en_dev, struct sk_buff *skb, + struct zxdh_net_hdr_tx *hdr, bool is_lb) +{ + memset(hdr, 0, en_dev->hdr_len); + hdr->pd_len = en_dev->hdr_len / HDR_2B_UNIT; + + if (en_dev->delay_statistics_enable) { + pkt_delay_statistics_proc(skb, hdr, en_dev); + } + + if (en_dev->dtp_drs_offload == true) { + pipd_net_hdr_from_skb(en_dev, skb, hdr, is_lb); + } else { + pd_net_hdr_from_skb(en_dev, skb, hdr, is_lb); + } + + return 0; +} + +void zxdh_netdev_features_over_dtp(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + en_dev->dtp_drs_offload = false; + + if ((netdev->features & NETIF_F_TSO) || + (netdev->features & NETIF_F_TSO6) || + (netdev->features & NETIF_F_HW_CSUM) || + (netdev->features & NETIF_F_GSO_UDP_TUNNEL_CSUM)) { + en_dev->dtp_drs_offload = true; + } + LOG_DEBUG( + "dtp_drs_offload=%d. TSO:%lld, TSO6:%lld, HWCSUM:%lld, GSO_UDP_TUNNEL_CSUM:%lld\n", + en_dev->dtp_drs_offload, netdev->features & NETIF_F_TSO, + netdev->features & NETIF_F_TSO6, + netdev->features & NETIF_F_HW_CSUM, + netdev->features & NETIF_F_GSO_UDP_TUNNEL_CSUM); + + if (en_dev->dtp_drs_offload == true) { + en_dev->hdr_len = sizeof(struct zxdh_net_hdr_tx); + } else { + en_dev->hdr_len = + sizeof(struct zxdh_net_hdr_tx) - sizeof(struct pi_hdr); + } + + return; +} + +int32_t is_udp_loopback_pkt(uint8_t *pData) +{ + uint16_t eth_type_lay3 = ntohs(*( + (uint16_t *)(pData + (2 * ETHER_MAC_LEN)))); /* get Eth Type */ + uint8_t eth_type_lay4 = 0; + struct zxdh_ehdr *zxdhh = NULL; + + if (eth_type_lay3 != ETH_P_IP) { + return 1; + } + + eth_type_lay4 = *(pData + (2 * ETHER_MAC_LEN) + ETHER_TYPE_LEN + + IP_PROT_OFFSET); + if (eth_type_lay4 != IPPROTO_UDP) { + return 1; + } + + zxdhh = (struct zxdh_ehdr *)(pData + (2 * ETHER_MAC_LEN) + + ETHER_TYPE_LEN + IPV4_HDR_LEN + + UDP_HDR_LEN); + if (zxdhh->magic != cpu_to_be64(ZXDH_TEST_MAGIC)) { + return 1; + } + LOG_DEBUG("it is udp lb pkt\n"); + return 0; +} + +int32_t xmit_skb(struct net_device *netdev, struct send_queue *sq, + struct sk_buff *skb) +{ + struct zxdh_net_hdr_tx *hdr = NULL; + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t num_sg = 0; + uint hdr_len = en_dev->hdr_len; + bool can_push = false; + uint8_t *hdr_buf = sq->hdr_buf; + uint8_t *pData = NULL; + uint8_t *ptpHdr = NULL; + uint8_t ts_offset = 0; + struct zxdh_net_1588_hdr *hdr_1588 = NULL; + struct zxdh_net_1588_nopi_hdr *hdr_1588_nopi = NULL; + int32_t ret = 0; + int32_t is_1588_flag = IS_NOT_1588_MESSAGE; + int32_t is_lb = IS_NOT_LB_PKT; + + CHECK_EQUAL_ERR(skb, NULL, -EADDRNOTAVAIL, "skb is null\n"); + pData = skb->data; + CHECK_EQUAL_ERR(pData, NULL, -EADDRNOTAVAIL, "skb->data is null\n"); + + /* Determine if it is a 1588 message */ + if (en_dev->enable_1588 == true) { + is_1588_flag = get_hdr_point(pData, &ts_offset, &ptpHdr); + if (is_1588_flag == IS_1588_MESSAGE) { + LOG_DEBUG("pkt_1588_proc_xmit dtp offload %d\n", + en_dev->dtp_drs_offload); + if (en_dev->dtp_drs_offload == true) { + hdr_len = sizeof(struct zxdh_net_1588_hdr); + } else { + hdr_len = sizeof(struct zxdh_net_1588_nopi_hdr); + } + } + } + +#ifdef CONFIG_INET + /* Determine if it is a udp lb test message*/ + if (en_dev->local_lb_enable == true) { + if (is_udp_loopback_pkt(pData) == IS_LB_PKT) { + is_lb = IS_LB_PKT; + } + } +#endif + + can_push = en_dev->any_header_sg && + !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && + !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; + /* Even if we can, don't push here yet as this would skew + * csum_start offset below. */ + if (can_push) { + hdr = (struct zxdh_net_hdr_tx *)(skb->data - hdr_len); + } else { + hdr_buf += sq->hdr_idx * HDR_BUFFER_LEN; + memset(hdr_buf, 0, HDR_BUFFER_LEN); + hdr = (struct zxdh_net_hdr_tx *)(hdr_buf); + sq->hdr_idx++; + sq->hdr_idx = sq->hdr_idx % en_dev->eth_config.tx_queue_size; + } + + if (net_hdr_from_skb(en_dev, skb, hdr, is_lb)) { + return -EPROTO; + } + + if (en_dev->enable_1588 == true) { + if (is_1588_flag == IS_1588_MESSAGE) { + if (en_dev->dtp_drs_offload == true) { + hdr->pd_len = sizeof(struct zxdh_net_1588_hdr) / + HDR_2B_UNIT; + hdr_1588 = (struct zxdh_net_1588_hdr *)hdr; + + /* Started by AICoder, pid:ye867r46b9yf81f14cf8095a80a7ac0190011532 */ + memset(&(hdr_1588->pd_1588), 0, + sizeof(struct zxdh_1588_pd_tx)); + /* Ended by AICoder, pid:ye867r46b9yf81f14cf8095a80a7ac0190011532 */ + hdr_1588->pd_1588.ts_offset = ts_offset; + ret = pkt_1588_proc_xmit(skb, + &(hdr_1588->pd_1588), + en_dev->clock_no, + en_dev, ptpHdr); + if (ret != 0) { + DEBUG_1588( + "dev %s vport 0x%x pkt_1588_proc_xmit ERR, ret: %d\n", + en_dev->netdev->name, + en_dev->vport, ret); + } + DEBUG_1588("NET HDR:"); + DEBUG_1588_DATA( + (uint8_t *)hdr_1588, + sizeof(struct zxdh_net_1588_hdr)); + } else { + hdr->pd_len = + sizeof(struct zxdh_net_1588_nopi_hdr) / + HDR_2B_UNIT; + hdr_1588_nopi = + (struct zxdh_net_1588_nopi_hdr *)hdr; + memset(&(hdr_1588_nopi->pd_1588), 0, + sizeof(struct zxdh_1588_pd_tx)); + hdr_1588_nopi->pd_1588.ts_offset = ts_offset; + ret = pkt_1588_proc_xmit( + skb, &(hdr_1588_nopi->pd_1588), + en_dev->clock_no, en_dev, ptpHdr); + if (ret != 0) { + LOG_ERR("dev %s vport 0x%x pkt_1588_proc_nopi_xmit ERR, ret: %d\n", + en_dev->netdev->name, + en_dev->vport, ret); + return ret; + } + DEBUG_1588("NET HDR:"); + DEBUG_1588_DATA( + (uint8_t *)hdr_1588_nopi, + sizeof(struct zxdh_net_1588_nopi_hdr)); + } + } + DEBUG_1588("skb->data:"); + DEBUG_1588_DATA((uint8_t *)skb->data, skb->len); + } + + sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); + if (can_push) { + __skb_push(skb, hdr_len); + num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); + if (unlikely(num_sg < 0)) { + return num_sg; + } + /* Pull header back to avoid skew in tx bytes calculations. */ + __skb_pull(skb, hdr_len); + } else { + sg_set_buf(sq->sg, hdr, hdr_len); + num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); + if (unlikely(num_sg < 0)) { + return num_sg; + } + num_sg++; + } + + return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); +} + +netdev_tx_t zxdh_en_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t qnum = skb_get_queue_mapping(skb); + struct send_queue *sq = &en_dev->sq[qnum]; + int32_t err = 0; + struct netdev_queue *txq = netdev_get_tx_queue(netdev, qnum); + bool kick = !netdev_xmit_more(); + bool use_napi = sq->napi.weight; + + /* Free up any pending old buffers before queueing new ones. */ + do { + if (use_napi) { + virtqueue_disable_cb(sq->vq); + } + + free_old_xmit_skbs(netdev, sq, false); + + } while (use_napi && kick && + unlikely(!virtqueue_enable_cb_delayed(sq->vq))); + + /* timestamp packet in software */ + skb_tx_timestamp(skb); + + /* Try to transmit */ + err = xmit_skb(netdev, sq, skb); + + /* This should not happen! */ + if (unlikely(err)) { + netdev->stats.tx_fifo_errors++; + netdev->stats.tx_errors++; + if (net_ratelimit()) { + LOG_WARN("unexpected TXQ (%d) queue failure: %d\n", + qnum, err); + } + netdev->stats.tx_dropped++; + en_dev->hw_stats.q_stats[qnum].q_tx_dropped++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* If running out of space, stop queue to avoid getting packets that we + * are then unable to transmit. + * An alternative would be to force queuing layer to requeue the skb by + * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be + * returned in a normal path of operation: it means that driver is not + * maintaining the TX queue stop/start state properly, and causes + * the stack to do a non-trivial amount of useless work. + * Since most packets only take 1 or 2 ring slots, stopping the queue + * early means 16 slots are typically wasted. + */ + if (sq->vq->num_free < 2 + MAX_SKB_FRAGS) { + netif_stop_subqueue(netdev, qnum); + en_dev->hw_stats.q_stats[qnum].q_tx_stopped++; + if (!use_napi && + unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { + /* More just got used, free them then recheck. */ + free_old_xmit_skbs(netdev, sq, false); + if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) { + netif_start_subqueue(netdev, qnum); + virtqueue_disable_cb(sq->vq); + } + } + } + + if (kick || netif_xmit_stopped(txq)) { + if (virtqueue_kick_prepare_packed(sq->vq) && + virtqueue_notify(sq->vq)) { + u64_stats_update_begin(&sq->stats.syncp); + sq->stats.kicks++; + u64_stats_update_end(&sq->stats.syncp); + } + } + + return NETDEV_TX_OK; +} + +static inline unsigned int +u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp) +{ + return u64_stats_fetch_begin(syncp); +} + +static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp, + unsigned int start) +{ + return u64_stats_fetch_retry(syncp, start); +} + +#ifdef HAVE_NDO_GET_STATS64 +#ifdef HAVE_VOID_NDO_GET_STATS64 +static void zxdh_en_get_netdev_stats_struct(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#else +static struct rtnl_link_stats64 * +zxdh_en_get_netdev_stats_struct(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#endif +{ +#ifdef HAVE_VOID_NDO_GET_STATS64 + struct zxdh_en_device *en_dev = netdev_priv(netdev); + struct receive_queue *rq = NULL; + struct send_queue *sq = NULL; + uint32_t start = 0; + uint32_t i = 0; + uint64_t tpackets = 0; + uint64_t tbytes = 0; + uint64_t rpackets = 0; + uint64_t rbytes = 0; + uint64_t rdrops = 0; + uint32_t loop_cnt = en_dev->max_queue_pairs; + int32_t ret = 0; + struct zxdh_en_vport_np_stats *np_stats = + &en_dev->hw_stats.vport_stats.np_stats; + uint32_t vf_id = GET_VFID(en_dev->vport); + DPP_PF_INFO_T pf_info = { 0 }; + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + if (en_dev->device_state != ZXDH_DEVICE_STATE_INTERNAL_ERROR) { + ret = zxdh_mac_stats_get(en_dev); + if (ret != 0) { + LOG_ERR("zxdh_mac_stats_get failed, ret: %d\n", ret); + return; + } + } + + if (en_dev->ops->is_bond(en_dev->parent)) { + stats->rx_packets = en_dev->hw_stats.phy_stats.rx_packets_phy; + stats->rx_bytes = en_dev->hw_stats.phy_stats.rx_bytes_phy; + stats->rx_errors = en_dev->hw_stats.phy_stats.rx_error_phy; + stats->rx_dropped = en_dev->hw_stats.phy_stats.rx_drop_phy; + stats->tx_packets = en_dev->hw_stats.phy_stats.tx_packets_phy; + stats->tx_bytes = en_dev->hw_stats.phy_stats.tx_bytes_phy; + stats->tx_errors = en_dev->hw_stats.phy_stats.tx_error_phy; + stats->tx_dropped = en_dev->hw_stats.phy_stats.tx_drop_phy; + return; + } + +#ifdef ZXDH_MSGQ + if (NEED_MSGQ(en_dev)) + loop_cnt--; +#endif + + for (i = 0; i < loop_cnt; ++i) { + sq = &en_dev->sq[i]; + rq = &en_dev->rq[i]; + + do { + start = u64_stats_fetch_begin_irq(&sq->stats.syncp); + tpackets = sq->stats.packets; + tbytes = sq->stats.bytes; + } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); + + do { + start = u64_stats_fetch_begin_irq(&rq->stats.syncp); + rpackets = rq->stats.packets; + rbytes = rq->stats.bytes; + rdrops = rq->stats.drops; + } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start)); + + stats->rx_packets += rpackets; + stats->rx_bytes += rbytes; + stats->rx_dropped += rdrops; + stats->tx_packets += tpackets; + stats->tx_bytes += tbytes; + } + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + if (en_dev->device_state != ZXDH_DEVICE_STATE_INTERNAL_ERROR) + dpp_stat_MTU_packet_msg_rx_cnt_get( + &pf_info, vf_id, NP_GET_PKT_CNT, + &(np_stats->rx_vport_mtu_drop_bytes), + &(np_stats->rx_vport_mtu_drop_packets)); + stats->rx_errors = netdev->stats.rx_errors + + en_dev->hw_stats.phy_stats.rx_error_phy + + np_stats->rx_vport_mtu_drop_packets; + stats->tx_errors = netdev->stats.tx_errors + + en_dev->hw_stats.phy_stats.tx_drop_phy; + } else { + stats->rx_errors = netdev->stats.rx_errors; + stats->tx_errors = netdev->stats.tx_errors; + } + stats->tx_dropped = netdev->stats.tx_dropped; + stats->tx_carrier_errors = netdev->stats.tx_carrier_errors; + return; +#else + return stats; +#endif +} +#endif /* HAVE_VOID_NDO_GET_STATS_64 */ + +static void zxdh_en_set_rx_mode(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + if (en_dev->ops->is_bond(en_dev->parent)) + return; + ZXDH_AUX_INIT_COMP_CHECK(en_dev); + queue_work(en_priv->events->wq, &en_dev->rx_mode_set_work); +} + +bool is_standard_predefined_ipv6_multicast_mac(const uint8_t *mac) +{ + return ((mac[0] == 0x33) && (mac[1] == 0x33) && (mac[2] == 0x00)); +} + +bool zxdh_check_special_addr(const u8 *addr) +{ + if (ether_addr_equal(BOND_MCAST_ADDR, addr) || + is_standard_predefined_ipv6_multicast_mac(addr)) { + return true; + } + return false; +} + +static int zxdh_addr_sync(struct net_device *netdev, const u8 *addr, + mac_queue *add_queue) +{ + if (!zxdh_check_special_addr(addr)) { + return 0; + } + + if (add_queue->count >= DEV_MULTICAST_MAX_NUM) { + LOG_ERR("mac num is larger max size\n"); + return -1; + } + + ether_addr_copy(add_queue->addr[add_queue->count], addr); + add_queue->count++; + + return 0; +} + +static int zxdh_addr_unsync(struct net_device *netdev, const u8 *addr, + mac_queue *del_queue) +{ + if (!zxdh_check_special_addr(addr)) { + return 0; + } + + if (del_queue->count >= DEV_MULTICAST_MAX_NUM) { + LOG_ERR("mac num is larger max size\n"); + return -1; + } + + ether_addr_copy(del_queue->addr[del_queue->count], addr); + del_queue->count++; + + return 0; +} + +static int __hw_addr_del_entry(struct netdev_hw_addr_list *list, + struct netdev_hw_addr *ha, bool global, + bool sync) +{ + if (global && !ha->global_use) + return -ENOENT; + + if (sync && !ha->synced) + return -ENOENT; + + if (global) + ha->global_use = false; + + if (sync) + ha->synced--; + + if (--ha->refcount) + return 0; + + rb_erase(&ha->node, &list->tree); + list_del_rcu(&ha->list); + kfree_rcu(ha, rcu_head); + list->count--; + return 0; +} + +static int zxdh_hw_addr_sync_dev( + struct netdev_hw_addr_list *list, struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *, mac_queue *), + int (*unsync)(struct net_device *, const unsigned char *, mac_queue *), + mac_queue *add_queue, mac_queue *del_queue) +{ + struct netdev_hw_addr *ha, *tmp; + int err; + + list_for_each_entry_safe(ha, tmp, &list->list, list) { + if (!ha->sync_cnt || ha->refcount != 1) + continue; + + if (unsync && unsync(dev, ha->addr, del_queue)) + continue; + + if (zxdh_check_special_addr(ha->addr)) { + ha->sync_cnt--; + __hw_addr_del_entry(list, ha, false, false); + } + } + + list_for_each_entry_safe(ha, tmp, &list->list, list) { + if (ha->sync_cnt) + continue; + + err = sync(dev, ha->addr, add_queue); + if (err) + return err; + + if (zxdh_check_special_addr(ha->addr)) { + ha->sync_cnt++; + ha->refcount++; + } + } + + return 0; +} + +static int zxdh_dev_mc_sync(struct net_device *dev, + int (*sync)(struct net_device *, + const unsigned char *, mac_queue *), + int (*unsync)(struct net_device *, + const unsigned char *, mac_queue *), + mac_queue *add_queue, mac_queue *del_queue) +{ + LOG_DEBUG("%s is called", dev->name); + return zxdh_hw_addr_sync_dev(&dev->mc, dev, sync, unsync, add_queue, + del_queue); +} + +static int zxdh_handle_mc_operation(struct zxdh_en_device *en_dev, + DPP_PF_INFO_T *pf_info, const u8 *addr, + bool is_add) +{ + int err = 0; + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + if (is_add) { + err = dpp_multi_mac_add_member(pf_info, addr); + if (err != 0) { + LOG_ERR("dpp_multi_mac_add_member failed:0x%x\n", + err); + return err; + } + } else { + err = dpp_multi_mac_del_member(pf_info, addr); + if (err != 0) { + LOG_ERR("dpp_multi_mac_del_member failed:0x%x\n", + err); + return err; + } + } + } else { + if (is_add) { + err = zxdh_vf_dpp_add_lacp_mac(en_dev, addr); + if (err != 0) { + LOG_ERR("zxdh_vf_dpp_add_mac failed:0x%x\n", + err); + return err; + } + } else { + err = zxdh_vf_dpp_del_lacp_mac(en_dev, addr); + if (err != 0) { + LOG_ERR("zxdh_vf_dpp_del_mac failed:0x%x\n", + err); + return err; + } + } + } + + return 0; +} + +static int zxdh_dev_mc_proc(struct zxdh_en_device *en_dev, + DPP_PF_INFO_T *pf_info, mac_queue *add_queue, + mac_queue *del_queue) +{ + int32_t err = 0; + uint8_t i = 0; + + for (i = 0; i < add_queue->count; i++) { + err = zxdh_handle_mc_operation(en_dev, pf_info, + add_queue->addr[i], true); + if (err != 0) { + return err; + } + en_dev->curr_multicast_num++; + } + + for (i = 0; i < del_queue->count; i++) { + err = zxdh_handle_mc_operation(en_dev, pf_info, + del_queue->addr[i], false); + if (err != 0) { + return err; + } + en_dev->curr_multicast_num--; + } + + return 0; +} + +int32_t zxdh_dev_promisc_sync(struct zxdh_en_device *en_dev) +{ + bool promisc_changed = !!(en_dev->netdev->flags & IFF_PROMISC); + bool allmulti_changed = !!(en_dev->netdev->flags & IFF_ALLMULTI); + bool is_pf = en_dev->ops->get_coredev_type(en_dev->parent) == + DH_COREDEV_PF; + DPP_PF_INFO_T pf_info = { + .slot = en_dev->slot_id, + .vport = en_dev->vport, + }; + + if ((en_dev->promisc_enabled == promisc_changed) && + (en_dev->allmulti_enabled == allmulti_changed)) + return 0; + + LOG_DEBUG("promisc_changed: %d, allmulti_changed: %d\n", + promisc_changed, allmulti_changed); + en_dev->promisc_enabled = promisc_changed; + en_dev->allmulti_enabled = allmulti_changed; + + if (!promisc_changed && allmulti_changed) { + if (is_pf) { + return dpp_vport_mc_promisc_set(&pf_info, 1); + } else { + return zxdh_vf_port_promisc_set( + en_dev, ZXDH_ALLMULTI_MODE, 1, 0); + } + } + + if (is_pf) { + dpp_vport_uc_promisc_set(&pf_info, en_dev->promisc_enabled); + dpp_vport_promisc_en_set(&pf_info, en_dev->promisc_enabled); + dpp_vport_mc_promisc_set(&pf_info, en_dev->promisc_enabled); + } else { + return zxdh_vf_port_promisc_set(en_dev, ZXDH_PROMISC_MODE, + promisc_changed, 1); + } + + return 0; +} + +void rx_mode_set_handler(struct work_struct *work) +{ + struct zxdh_en_device *en_dev = + container_of(work, struct zxdh_en_device, rx_mode_set_work); + int32_t err = 0; + DPP_PF_INFO_T pf_info = { 0 }; + struct mac_queue add_queue = { 0 }; + struct mac_queue del_queue = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + netif_addr_lock_bh(en_dev->netdev); + zxdh_dev_mc_sync(en_dev->netdev, zxdh_addr_sync, zxdh_addr_unsync, + &add_queue, &del_queue); + netif_addr_unlock_bh(en_dev->netdev); + + err = zxdh_dev_mc_proc(en_dev, &pf_info, &add_queue, &del_queue); + if (err != 0) { + LOG_ERR("zxdh_dev_mc_proc err:0x%x\n", err); + } + + err = zxdh_dev_promisc_sync(en_dev); + if (err != 0) { + LOG_ERR("zxdh_dev_promisc_sync err:0x%x\n", err); + } +} + +void zxdh_netdev_addr_set(struct net_device *dev, const u8 *addr) +{ + struct zxdh_en_priv *en_priv = netdev_priv(dev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + dev_addr_set(dev, addr); + ether_addr_copy(en_dev->eth_config.dev_addr, dev->dev_addr); +} + +static int zxdh_en_set_mac(struct net_device *netdev, void *p) +{ + struct sockaddr *addr = (struct sockaddr *)p; + struct zxdh_en_device *en_dev = NULL; + struct zxdh_en_priv *en_priv = NULL; + struct netdev_hw_addr *ha = NULL; + bool delete_flag = true; + bool add_flag = true; + int32_t ret = 0; + uint16_t sriov_vlan_tpid = 0; + uint16_t sriov_vlan_id = 0; + uint16_t current_vport = 0; + uint16_t vport = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + if (!is_valid_ether_addr(addr->sa_data)) { + LOG_INFO("invalid mac address %pM\n", addr->sa_data); + return -EADDRNOTAVAIL; + } + + en_priv = netdev_priv(netdev); + en_dev = &en_priv->edev; + if (en_dev->device_state == ZXDH_DEVICE_STATE_INTERNAL_ERROR) { + LOG_INFO("update %s mac %p in INTERNAL_ERROR\n", netdev->name, + addr->sa_data); + zxdh_netdev_addr_set(netdev, addr->sa_data); + return -ENXIO; + } + + if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { + if ((!en_dev->ops->is_bond(en_dev->parent)) && + (!ether_addr_equal(en_dev->last_np_mac_addr.sa_data, + netdev->dev_addr))) { + zxdh_netdev_addr_set(netdev, + en_dev->last_np_mac_addr.sa_data); + goto continue_run; + } + LOG_INFO("already using mac address %pM\n", addr->sa_data); + return 0; + } +continue_run: + list_for_each_entry(ha, &netdev->uc.list, list) { + if (!memcmp(ha->addr, netdev->dev_addr, netdev->addr_len)) { + delete_flag = false; + } + + if (!memcmp(ha->addr, addr->sa_data, netdev->addr_len)) { + add_flag = false; + } + } + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + if (en_dev->ops->is_bond(en_dev->parent)) { + zxdh_netdev_addr_set(netdev, addr->sa_data); + return 0; + } + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + vport = pf_info.vport; + ret = dpp_unicast_mac_search(&pf_info, addr->sa_data, + sriov_vlan_tpid, sriov_vlan_id, + ¤t_vport); + if ((ret == 0) && (vport == current_vport)) { + return 0; + } else if ((ret == 0) && (vport != current_vport)) { + LOG_ERR("Mac already exists\n"); + return -EEXIST; + } else if ((ret != 0) && (ret != DPP_HASH_RC_SRH_FAIL)) { + LOG_ERR("dpp_unicast_mac_search failed, ret:%d\n", ret); + return -1; + } + + if (delete_flag) { + ret = dpp_del_mac(&pf_info, netdev->dev_addr, + sriov_vlan_tpid, sriov_vlan_id); + if (ret != 0) { + LOG_ERR("pf del mac failed, retval: %d\n", ret); + return -1; + } + } + + if (add_flag) { + ret = dpp_add_mac(&pf_info, addr->sa_data, + sriov_vlan_tpid, sriov_vlan_id); + if (ret != 0) { + LOG_ERR("pf add mac failed: %d\n", ret); + return -1; + } + } + + LOG_DEBUG("set pf new mac address %pM\n", addr->sa_data); + zxdh_netdev_addr_set(netdev, addr->sa_data); + } else { + ret = zxdh_vf_dpp_dump_mac(en_dev, addr->sa_data); + if (ret != 0) { + LOG_ERR("Mac already exists"); + return -EEXIST; + } + + ret = zxdh_vf_dpp_del_mac(en_dev, netdev->dev_addr, + UNFILTER_MAC, delete_flag); + if (ret != 0) { + LOG_ERR("zxdh vf dpp del mac failed: %d\n", ret); + return -1; + } + + if (add_flag) { + ret = zxdh_vf_dpp_add_mac(en_dev, addr->sa_data, + UNFILTER_MAC); + if (ret != 0) { + LOG_ERR("zxdh vf dpp add mac failed: %d\n", + ret); + return -1; + } + en_dev->ops->set_mac(en_dev->parent, addr->sa_data); + } + + LOG_DEBUG("set vf new mac address %pM\n", addr->sa_data); + zxdh_netdev_addr_set(netdev, addr->sa_data); + } + ether_addr_copy(en_dev->last_np_mac_addr.sa_data, + en_dev->netdev->dev_addr); + LOG_DEBUG("update last_np_mac_addr %pM\n", + en_dev->last_np_mac_addr.sa_data); + return ret; +} + +int32_t zxdh_en_config_mtu_to_np(struct net_device *netdev, int32_t mtu_value) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t ret = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + if (!zxdh_en_is_panel_port(en_dev)) + return ret; + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + dpp_uplink_phy_attr_set(&pf_info, en_dev->phy_port, + UPLINK_PHY_PORT_MTU_OFFLOAD_ENABLE, 1); + dpp_uplink_phy_attr_set(&pf_info, en_dev->phy_port, + UPLINK_PHY_PORT_MTU, mtu_value); + ret = dpp_vport_attr_set(&pf_info, + SRIOV_VPORT_MTU_OFFLOAD_EN_OFF, 1); + if (ret != 0) { + LOG_ERR("zxdh_pf_egr_port_attr_set config mtu enable failed: %d\n", + ret); + return ret; + } + ret = dpp_vport_attr_set(&pf_info, SRIOV_VPORT_MTU, mtu_value); + if (ret != 0) { + LOG_ERR("zxdh_pf_egr_port_attr_set config mtu value failed: %d\n", + ret); + return ret; + } + } else { + ret = zxdh_vf_egr_port_attr_set( + en_dev, SRIOV_VPORT_MTU_OFFLOAD_EN_OFF, 1, 0); + if (ret != 0) { + LOG_ERR("zxdh_vf_egr_port_attr_set config mtu enable failed: %d\n", + ret); + return ret; + } + ret = zxdh_vf_egr_port_attr_set(en_dev, SRIOV_VPORT_MTU, + mtu_value, 0); + if (ret != 0) { + LOG_ERR("zxdh_vf_egr_port_attr_set config mut value failed: %d\n", + ret); + return ret; + } + } + + return 0; +} + +static int zxdh_en_change_mtu(struct net_device *netdev, int new_mtu) +{ + int32_t ret = 0; + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + if ((new_mtu < ETH_MIN_MTU) || (new_mtu > ZXDH_MAX_MTU)) { + LOG_ERR("changing MTU over %d-%d\n", ETH_MIN_MTU, ZXDH_MAX_MTU); + return -EINVAL; + } + LOG_DEBUG("changing MTU from %d to %d\n", netdev->mtu, new_mtu); + + netdev->mtu = new_mtu; + + ret = zxdh_en_config_mtu_to_np(netdev, new_mtu); + if (ret != 0) { + LOG_ERR("zxdh_en_config_mtu_to_np failed: %d\n", ret); + return -1; + } + + return 0; +} + +#ifdef HAVE_TX_TIMEOUT_TXQUEUE +static void zxdh_en_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + return; +} +#else +static void zxdh_en_tx_timeout(struct net_device *netdev) +{ + return; +} +#endif + +#ifdef HAVE_VLAN_RX_REGISTER +static void zxdh_en_vlan_rx_register(struct net_device *netdev, + struct vlan_group *grp) +{ + return; +} +#endif + +static int __attribute__((unused)) +vf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +{ + int ret = 0; + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + union zxdh_msg *msg = NULL; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + + msg->payload.hdr.op_code = ZXDH_VLAN_FILTER_ADD; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + msg->payload.rx_vid_add_msg.vlan_id = vid; + + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, + msg, msg, ¶); + if (ret != 0 || msg->reps.flag != ZXDH_REPS_SUCC) { + LOG_ERR("pcieid:0x%x send msg to pf add vlan:%d failed! ret = %d, flag = 0x%x\n", + en_dev->pcie_id, vid, ret, msg->reps.flag); + kfree(msg); + return -1; + } + kfree(msg); + return 0; +} + +int vlan_set_bit(uint16_t vlan_id, void *bit_map) +{ + uint8_t *bitmap = 0; + uint16_t byte_index = 0; + uint8_t bit_index = 0; + + LOG_DEBUG("lan_set_bit, id:%d.\n", vlan_id); + if (!bit_map) { + return -1; + } + if (vlan_id >= VLAN_BITMAP_LENGTH) { + return -2; + } + + bitmap = (uint8_t *)bit_map; + byte_index = vlan_id / BIT_NUM_PER_BYTE; + bit_index = vlan_id % BIT_NUM_PER_BYTE; + + bitmap[byte_index] |= (1 << bit_index); + + return 0; +} + +int vlan_reset_bit(uint16_t vlan_id, void *bit_map) +{ + uint8_t *bitmap = 0; + uint16_t byte_index = 0; + uint8_t bit_index = 0; + + LOG_DEBUG("lan_reset_bit, id:%d.\n", vlan_id); + + if (!bit_map) { + return -1; + } + if (vlan_id >= VLAN_BITMAP_LENGTH) { + return -2; + } + + bitmap = (uint8_t *)bit_map; + byte_index = vlan_id / BIT_NUM_PER_BYTE; + bit_index = vlan_id % BIT_NUM_PER_BYTE; + + bitmap[byte_index] &= ~(1 << bit_index); + + return 0; +} + +#if defined(HAVE_INT_NDO_VLAN_RX_ADD_VID) && defined(NETIF_F_HW_VLAN_CTAG_RX) +static int zxdh_en_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + int retval = 0; + + struct zxdh_en_priv *zxdev = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &zxdev->edev; + DPP_PF_INFO_T pf_info = { 0 }; + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + if (vid > MAX_VLAN_ID) { + LOG_ERR("vlan id:%d input is err!\n", vid); + return -EINVAL; + } + + if (en_dev->ops->get_coredev_type(en_dev->parent) == + DH_COREDEV_VF) { /* VF */ + retval = vf_vlan_rx_add_vid(netdev, vid); + goto exit; + } + + retval = dpp_add_vlan_filter(&pf_info, vid); + if (0 != retval) { + LOG_ERR("failed to add vlan: %d\n", vid); + goto exit; + } + LOG_INFO("pf add vlan %d succeed, retval %d.\n", vid, retval); + +exit: + if (retval == 0) { + retval = + vlan_set_bit(vid, en_dev->eth_config.vlan_trunk_bitmap); + } + + return retval; +} +#elif defined(HAVE_INT_NDO_VLAN_RX_ADD_VID) && !defined(NETIF_F_HW_VLAN_CTAG_RX) +static int zxdh_en_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +{ + return 0; +} +#else +static void zxdh_en_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +{ + return; +} +#endif + +static int vf_vlan_rx_del_vid(struct net_device *netdev, u16 vid) +{ + int ret = 0; + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + union zxdh_msg *msg = NULL; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + + msg->payload.hdr.op_code = ZXDH_VLAN_FILTER_DEL; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + msg->payload.rx_vid_del_msg.vlan_id = vid; + + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, + msg, msg, ¶); + if (ret != 0 || msg->reps.flag != ZXDH_REPS_SUCC) { + LOG_ERR("pcieid:0x%x send msg to pf del vlan:%d failed! ret = %d, flag = 0x%x\n", + en_dev->pcie_id, vid, ret, msg->reps.flag); + kfree(msg); + return -1; + } + kfree(msg); + return 0; +} + +#if defined(HAVE_INT_NDO_VLAN_RX_ADD_VID) && defined(NETIF_F_HW_VLAN_CTAG_RX) +static int zxdh_en_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + int retval = 0; + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + DPP_PF_INFO_T pf_info = { 0 }; + + LOG_DEBUG("del vid: %d.\n", vid); + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + if (vid > MAX_VLAN_ID) { + LOG_ERR("vlan id:%d input is err!\n", vid); + return -EINVAL; + } + + if (en_dev->ops->get_coredev_type(en_dev->parent) == + DH_COREDEV_VF) { /* VF */ + retval = vf_vlan_rx_del_vid(netdev, vid); + goto exit; + } + + retval = dpp_del_vlan_filter(&pf_info, vid); + if (0 != retval) { + LOG_ERR("failed to del vlan: %d\n", vid); + goto exit; + } + LOG_INFO("pf del vlan %d succeed.\n", vid); + +exit: + if (!retval) { + retval = vlan_reset_bit(vid, + en_dev->eth_config.vlan_trunk_bitmap); + } + + return retval; +} +#elif defined(HAVE_INT_NDO_VLAN_RX_ADD_VID) && !defined(NETIF_F_HW_VLAN_CTAG_RX) +static int zxdh_en_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +{ + return 0; +} +#else +static void zxdh_en_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +{ + return; +} +#endif + +static int zxdh_en_xdp_set(struct net_device *dev, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + unsigned long int max_sz = + PAGE_SIZE - sizeof(struct padded_zxdh_net_hdr); + struct zxdh_en_priv *en_priv = netdev_priv(dev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct bpf_prog *old_prog = NULL; + uint16_t xdp_qp = 0; + uint16_t curr_qp = 0; + int i = 0; + + if ((dev->features & NETIF_F_GRO_HW) || + (dev->features & NETIF_F_HW_CSUM)) { + LOG_ERR("Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first\n"); + return -EOPNOTSUPP; + } + + if (en_dev->mergeable_rx_bufs && !en_dev->any_header_sg) { + LOG_ERR("XDP expects header/data in single page, any_header_sg required\n"); + return -EINVAL; + } + + if (dev->mtu > max_sz) { + LOG_ERR("MTU too large to enable XDP\n"); + netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz); + return -EINVAL; + } + + curr_qp = en_dev->curr_queue_pairs - en_dev->xdp_queue_pairs; + if (prog) + xdp_qp = 0; //nr_cpu_ids + + /* XDP requires extra queues for XDP_TX */ + if (curr_qp + xdp_qp > en_dev->max_vq_pairs) { + netdev_warn_once( + dev, + "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n", + curr_qp + xdp_qp, en_dev->max_vq_pairs); + xdp_qp = 0; + } + + old_prog = rtnl_dereference(en_dev->rq[0].xdp_prog); + if (!prog && !old_prog) + return 0; + + if (prog) { + bpf_prog_add(prog, en_dev->max_vq_pairs - 1); + } + + /* Make sure NAPI is not using any XDP TX queues for RX. */ + if (netif_running(dev)) { + for (i = 0; i < en_dev->max_vq_pairs; i++) { + napi_disable(&en_dev->rq[i].napi); + virtnet_napi_tx_disable(&en_dev->sq[i].napi); + } + } + + en_dev->xdp_enabled = !!prog; + for (i = 0; i < en_dev->max_vq_pairs; i++) { + rcu_assign_pointer(en_dev->rq[i].xdp_prog, prog); + } + + for (i = 0; i < en_dev->max_vq_pairs; i++) { + if (old_prog) + bpf_prog_put(old_prog); + if (netif_running(dev)) { + virtnet_napi_enable(en_dev->rq[i].vq, + &en_dev->rq[i].napi); + virtnet_napi_tx_enable(dev, en_dev->sq[i].vq, + &en_dev->sq[i].napi); + } + } + + return 0; +} + +#if (!((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 7)) || \ + (!RHEL_RELEASE_CODE && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0))))) +static uint32_t zxdh_xdp_query(struct net_device *dev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(dev); + struct zxdh_en_device *en_dev = &en_priv->edev; + const struct bpf_prog *xdp_prog = NULL; + int32_t i; + + for (i = 0; i < en_dev->max_vq_pairs; i++) { + xdp_prog = rtnl_dereference(en_dev->rq[i].xdp_prog); + if (xdp_prog) + return xdp_prog->aux->id; + } + return 0; +} +#endif + +int zxdh_en_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + switch (xdp->command) { + case XDP_SETUP_PROG: + return zxdh_en_xdp_set(dev, xdp->prog, xdp->extack); +#if (!((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 7)) || \ + (!RHEL_RELEASE_CODE && \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0))))) + case XDP_QUERY_PROG: + xdp->prog_id = zxdh_xdp_query(dev); + return 0; +#endif + default: + return -EINVAL; + } +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void zxdh_en_netpoll(struct net_device *netdev) +{ + return; +} +#endif + +#ifdef HAVE_SETUP_TC +int zxdh_en_setup_tc(struct net_device *netdev, u8 tc) +{ + return 0; +} + +#ifdef NETIF_F_HW_TC +#ifdef HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +static int __zxdh_en_setup_tc(struct net_device *netdev, + enum tc_setup_type type, void *type_data) +#elif defined(HAVE_NDO_SETUP_TC_CHAIN_INDEX) +static int __zxdh_en_setup_tc(struct net_device *netdev, u32 handle, + u32 chain_index, __be16 proto, + struct tc_to_netdev *tc) +#else +static int __zxdh_en_setup_tc(struct net_device *netdev, u32 handle, + __be16 proto, struct tc_to_netdev *tc) +#endif +{ + return 0; +} +#endif +#endif + +static int32_t zxdh_dtp_offload_set(struct zxdh_en_device *en_dev, + DPP_PF_INFO_T *pf_info) +{ + ZXDH_SRIOV_VPORT_T port_attr_entry = { 0 }; + int32_t ret = 0; + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + ret = dpp_vport_attr_get(pf_info, &port_attr_entry); + if (ret != 0) { + LOG_ERR("dpp_vport_attr_get failed: %d\n", ret); + return ret; + } + + if (!port_attr_entry.lro_offload && + !port_attr_entry.ip_recombine_offload && + !port_attr_entry.ip_checksum_offload && + !port_attr_entry.tcp_udp_checksum_offload) { + ret = dpp_vport_attr_set( + pf_info, SRIOV_VPORT_ACCELERATOR_OFFLOAD_FLAG, + 0); + } else { + ret = dpp_vport_attr_set( + pf_info, SRIOV_VPORT_ACCELERATOR_OFFLOAD_FLAG, + 1); + } + + return ret; + } + + ret = zxdh_vf_egr_port_attr_get(en_dev, &port_attr_entry); + if (ret != 0) { + LOG_ERR("dpp_vport_attr_get failed: %d\n", ret); + return ret; + } + + if (!port_attr_entry.lro_offload && + !port_attr_entry.ip_recombine_offload && + !port_attr_entry.ip_checksum_offload && + !port_attr_entry.tcp_udp_checksum_offload) { + ret = zxdh_vf_egr_port_attr_set( + en_dev, SRIOV_VPORT_ACCELERATOR_OFFLOAD_FLAG, 0, 0); + } else { + ret = zxdh_vf_egr_port_attr_set( + en_dev, SRIOV_VPORT_ACCELERATOR_OFFLOAD_FLAG, 1, 0); + } + + return ret; +} + +static int32_t set_feature_rx_checksum(struct zxdh_en_device *en_dev, + bool enable) +{ + int en_value = enable ? 1 : 0; + DPP_PF_INFO_T pf_info = { 0 }; + int32_t ret = 0; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + ret = dpp_vport_attr_set(&pf_info, SRIOV_VPORT_IP_CHKSUM, + enable); + if (ret != 0) { + LOG_ERR("SRIOV_VPORT_IP_CHKSUM set failed: %d\n", ret); + return ret; + } + ret = dpp_vport_attr_set(&pf_info, SRIOV_VPORT_TCP_UDP_CHKSUM, + enable); + if (ret != 0) { + LOG_ERR("SRIOV_VPORT_TCP_UDP_CHKSUM set failed: %d\n", + ret); + return ret; + } + } else if (en_dev->ops->get_coredev_type(en_dev->parent) == + DH_COREDEV_VF) { + ret = zxdh_vf_egr_port_attr_set(en_dev, SRIOV_VPORT_IP_CHKSUM, + en_value, 0); + if (ret != 0) { + LOG_ERR("SRIOV_VPORT_IP_CHKSUM set failed: %d\n", ret); + return ret; + } + ret = zxdh_vf_egr_port_attr_set( + en_dev, SRIOV_VPORT_TCP_UDP_CHKSUM, en_value, 0); + if (ret != 0) { + LOG_ERR("SRIOV_VPORT_TCP_UDP_CHKSUM set failed: %d\n", + ret); + return ret; + } + } + + return zxdh_dtp_offload_set(en_dev, &pf_info); +} + +static int set_feature_tx_checksum(struct zxdh_en_device *en_dev, bool enable) +{ + return 0; +} + +static int set_feature_tso(struct zxdh_en_device *en_dev, bool enable) +{ + return 0; +} + +static int set_feature_tso6(struct zxdh_en_device *en_dev, bool enable) +{ + return 0; +} + +static int set_feature_vxlan_checksum(struct zxdh_en_device *en_dev, + bool enable) +{ + int ret = 0; + int en_value = enable ? 1 : 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + ret = dpp_vport_attr_set(&pf_info, + SRIOV_VPORT_OUTER_IP_CHECKSUM_OFFLOAD, + enable); + if (ret != 0) { + LOG_ERR("zxdh set vxlan rx checksum failed!\n"); + return ret; + } + } else if (en_dev->ops->get_coredev_type(en_dev->parent) == + DH_COREDEV_VF) { + ret = zxdh_vf_egr_port_attr_set( + en_dev, SRIOV_VPORT_OUTER_IP_CHECKSUM_OFFLOAD, en_value, + 0); + if (ret != 0) { + LOG_ERR("zxdh_vf_egr_port_attr_set vxlan rx checksum failed!\n"); + return ret; + } + } + + return ret; +} + +int32_t set_feature_rxhash(struct zxdh_en_device *en_dev, bool enable) +{ + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + return dpp_vport_rss_en_set(&pf_info, enable); + } + + return zxdh_vf_rss_en_set(en_dev, enable); +} + +int32_t set_feature_ntuple(struct zxdh_en_device *en_dev, bool enable) +{ + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + return dpp_vport_fd_en_set(&pf_info, enable); + } + + return zxdh_vf_fd_en_set(en_dev, enable); +} + +static int32_t set_vf_cvlan_filter(struct zxdh_en_device *en_dev, bool enable) +{ + union zxdh_msg *msg = NULL; + int32_t ret = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + msg->payload.hdr.op_code = ZXDH_VLAN_FILTER_SET; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + msg->payload.vlan_filter_set_msg.enable = enable; + + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, + msg, msg, ¶); + if (ret != 0 || msg->reps.flag != ZXDH_REPS_SUCC) { + LOG_ERR("pcieid:0x%x send msg to pf set vlan filter enable:%s failed! ret = %d, flag = 0x%x\n", + en_dev->pcie_id, enable ? "enable" : "disable", ret, + msg->reps.flag); + } + + kfree(msg); + return ret; +} + +/** + * zxdh_pf_switch_business_vlan - 配置bussness vlan子开关 + * @pf_info: pf信息 + * @type: vlan二级开关 + * @wanted_feature: 切换的值 + */ +int zxdh_pf_switch_business_vlan(DPP_PF_INFO_T *pf_info, uint8_t type, + uint32_t wanted_feature) +{ + int ret = 0; + ZXDH_VQM_VFID_VLAN_T vf_vlan_attr = { 0 }; + bool old_vport_bit = 0; + bool wanted_vport_bit = 0; + uint32_t *changed_vlan_attr = NULL; + + /* sizeof(vf_vlan_attr)/sizeof(vf_vlan_attr.rsv): 结构体成员数量*/ + if (type >= sizeof(vf_vlan_attr) / sizeof(vf_vlan_attr.rsv)) { + LOG_ERR("zxdh_pf_switch_business_vlan para type err: %u.\n", + type); + return -1; + } + changed_vlan_attr = (uint32_t *)&vf_vlan_attr + type; + + ret = dpp_vqm_vfid_vlan_get(pf_info, &vf_vlan_attr); + if (ret != 0) { + LOG_ERR("dpp_vqm_vfid_vlan_get failed: %d.\n", ret); + return -1; + } + + old_vport_bit = vf_vlan_attr.sriov_business_qinq_vlan_strip_offload | + vf_vlan_attr.sriov_business_vlan_filter | + vf_vlan_attr.sriov_business_vlan_strip_offload; + + *changed_vlan_attr = wanted_feature; + + wanted_vport_bit = vf_vlan_attr.sriov_business_qinq_vlan_strip_offload | + vf_vlan_attr.sriov_business_vlan_filter | + vf_vlan_attr.sriov_business_vlan_strip_offload; + + /* 先切换二级开关*/ + ret = dpp_vqm_vfid_vlan_set(pf_info, type, wanted_feature); + if (ret != 0) { + LOG_ERR("dpp_vqm_vfid_vlan_set, ret: %d\n", ret); + return -1; + } + + /* 如果一级开关不需要切换则退出*/ + if (!(old_vport_bit ^ wanted_vport_bit)) { + return 0; + } + + ret = dpp_vport_business_vlan_offload_en_set(pf_info, wanted_vport_bit); + if (ret != 0) { + LOG_ERR("dpp_vport_business_vlan_offload_en_set, ret: %d\n", + ret); + return -1; + } + return 0; +} + +static int set_feature_cvlan_filter(struct zxdh_en_device *en_dev, bool enable) +{ + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + if ((en_dev->ops->get_coredev_type(en_dev->parent) == + DH_COREDEV_VF)) { /* VF */ + return set_vf_cvlan_filter(en_dev, enable); + } + + return zxdh_pf_switch_business_vlan( + &pf_info, VLAN_SRIOV_BUSINESS_VLAN_FILTER, enable); +} + +static int __attribute__((unused)) +set_feature_svlan_filter(struct zxdh_en_device *en_dev, bool enable) +{ + int ret = 0; +#if 0 //TODO:STAG 暂时没有设置 + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + ret = dpp_vport_vlan_qinq_en_set(en_dev->vport, enable); +#endif + return ret; +} + +int set_vf_qinq_tpid(struct zxdh_en_device *en_dev, uint16_t tpid) +{ + union zxdh_msg *msg = NULL; + int ret = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -1; + } + + msg->payload.hdr.op_code = ZXDH_SET_TPID; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.tpid_cfg_msg.tpid = tpid; + + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, + msg, msg, ¶); + if (ret != 0 || msg->reps.flag != ZXDH_REPS_SUCC) { + LOG_ERR("pcieid:0x%x send msg to vfs set tpid: 0x%x failed! ret = %d.\n", + en_dev->pcie_id, tpid, ret); + kfree(msg); + return -EINVAL; + } + + kfree(msg); + return 0; +} + +static int set_vf_vlan_strip(struct zxdh_en_device *en_dev, bool enable, + uint8_t flag) +{ + union zxdh_msg *msg = NULL; + int ret = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + msg->payload.hdr.op_code = ZXDH_VLAN_OFFLOAD_SET; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + msg->payload.vlan_strip_msg.enable = enable; + msg->payload.vlan_strip_msg.flag = flag; + + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, + msg, msg, ¶); + if (ret != 0 || msg->reps.flag != ZXDH_REPS_SUCC) { + LOG_ERR("pcieid:0x%x send msg to vfs set vlan strip enable:%s failed! ret = %d, flag = 0x%x\n", + en_dev->pcie_id, enable ? "enable" : "disable", ret, + msg->reps.flag); + ret = -EINVAL; + } + + kfree(msg); + return ret; +} + +static int set_feature_vlan_strip(struct zxdh_en_device *en_dev, bool enable) +{ + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + return zxdh_pf_switch_business_vlan( + &pf_info, VLAN_SRIOV_BUSINESS_VLAN_STRIP_OFFLIAD, + enable); + } + + return set_vf_vlan_strip(en_dev, enable, VLAN_STRIP_MSG_TYPE); +} + +static int set_feature_qinq_strip(struct zxdh_en_device *en_dev, bool enable) +{ + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + return zxdh_pf_switch_business_vlan( + &pf_info, VLAN_SRIOV_BUSINESS_QINQ_VLAN_STRIP_OFFLOAD, + enable); + } + + return set_vf_vlan_strip(en_dev, enable, QINQ_STRIP_MSG_TYPE); +} + +static int32_t set_feature_lro(struct zxdh_en_device *en_dev, bool enable) +{ + uint32_t en_value = enable ? 1 : 0; + DPP_PF_INFO_T pf_info = { 0 }; + int32_t ret = 0; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + ret = dpp_vport_attr_set( + &pf_info, SRIOV_VPORT_IPV4_TCP_ASSEMBLE, en_value); + if (ret != 0) { + LOG_ERR("SRIOV_VPORT_IPV4_TCP_ASSEMBLE set failed: %d\n", + ret); + return ret; + } + ret = dpp_vport_attr_set( + &pf_info, SRIOV_VPORT_IPV6_TCP_ASSEMBLE, en_value); + if (ret != 0) { + LOG_ERR("SRIOV_VPORT_IPV6_TCP_ASSEMBLE set failed: %d\n", + ret); + return ret; + } + } else { + ret = zxdh_vf_egr_port_attr_set( + en_dev, SRIOV_VPORT_IPV4_TCP_ASSEMBLE, en_value, 0); + if (ret != 0) { + LOG_ERR("SRIOV_VPORT_IPV4_TCP_ASSEMBLE set failed: %d\n", + ret); + return ret; + } + ret = zxdh_vf_egr_port_attr_set( + en_dev, SRIOV_VPORT_IPV6_TCP_ASSEMBLE, en_value, 0); + if (ret != 0) { + LOG_ERR("SRIOV_VPORT_IPV6_TCP_ASSEMBLE set failed: %d\n", + ret); + return ret; + } + } + + return zxdh_dtp_offload_set(en_dev, &pf_info); +} + +static const struct { + netdev_features_t feature; + zxdh_feature_handler handler; +} feature_handlers[] = { + { NETIF_F_RXCSUM, set_feature_rx_checksum }, + { NETIF_F_HW_CSUM, set_feature_tx_checksum }, + { NETIF_F_TSO, set_feature_tso }, + { NETIF_F_TSO6, set_feature_tso6 }, + { NETIF_F_GSO_UDP_TUNNEL_CSUM, set_feature_vxlan_checksum }, + { NETIF_F_RXHASH, set_feature_rxhash }, + { NETIF_F_LRO, set_feature_lro }, + { NETIF_F_NTUPLE, set_feature_ntuple }, + { NETIF_F_HW_VLAN_CTAG_RX, set_feature_vlan_strip }, + { NETIF_F_HW_VLAN_STAG_RX, set_feature_qinq_strip }, + { NETIF_F_HW_VLAN_CTAG_FILTER, set_feature_cvlan_filter }, +}; + +int32_t zxdh_en_sync_features(struct zxdh_en_device *en_dev, + netdev_features_t want_features) +{ + int32_t err = 0; + int32_t i = 0; + netdev_features_t feature; + bool enable; + + for (i = 0; i < ARRAY_SIZE(feature_handlers); i++) { + feature = feature_handlers[i].feature; + enable = !!(want_features & feature); + + if (feature_handlers[i].handler) + err = feature_handlers[i].handler(en_dev, enable); + if (err) { + LOG_ERR("%s feature %pNF (%#llx) failed %d\n", + enable ? "Enable" : "Disable", &feature, + feature, err); + break; + } + } + + zxdh_netdev_features_over_dtp(en_dev->netdev); + return err; +} + +int32_t zxdh_en_set_features(struct net_device *netdev, + netdev_features_t wanted_features) +{ + const netdev_features_t changes = wanted_features ^ netdev->features; + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t err = 0; + int32_t ret = 0; + int32_t i = 0; + netdev_features_t feature; + bool enable; + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + if (en_dev->xdp_enabled) { + LOG_ERR("XDP is enabled, can't change features\n"); + return -EBUSY; + } + + for (i = 0; i < ARRAY_SIZE(feature_handlers); i++) { + feature = feature_handlers[i].feature; + + if (!(changes & feature)) { + continue; + } + + enable = !!(wanted_features & feature); + + if (feature_handlers[i].handler) { + LOG_DEBUG("%s feature %pNF (%#llx), err %d\n", + enable ? "Enable" : "Disable", &feature, + feature, err); + err = feature_handlers[i].handler(en_dev, enable); + } + if (err) { + ret |= err; + continue; + } + netdev->features = enable ? (netdev->features | feature) : + (netdev->features & ~feature); + } + + zxdh_netdev_features_over_dtp(netdev); + return ret; +} + +static uint32_t list_hw_addr_create(struct netdev_hw_addr_list *list, + const uint8_t *addr, int32_t addr_len, + uint8_t addr_type, bool global, bool sync, + struct rb_node *parent, + struct rb_node **ins_point) +{ + struct netdev_hw_addr *ha = NULL; + + ha = kzalloc(sizeof(struct netdev_hw_addr), GFP_KERNEL); + if (ha == NULL) { + LOG_ERR("Kzalloc struct netdev_hw_addr failed \n"); + return 1; + } + + /* 结构体赋值 */ + memcpy(ha->addr, addr, addr_len); + ha->type = addr_type; + ha->refcount = 1; /* 引用计数 */ + ha->global_use = global; + ha->synced = sync ? 1 : 0; + ha->sync_cnt = 0; + + rb_link_node(&ha->node, parent, ins_point); + rb_insert_color(&ha->node, &list->tree); + list_add_tail_rcu(&ha->list, &list->list); + list->count++; /* 链表节点加1 */ + + return 0; +} + +static uint32_t list_hw_addr_del(struct netdev_hw_addr_list *list, + struct netdev_hw_addr *ha) +{ + int32_t refcount = ha->refcount; + + /* 引用的计数大于1,则不能删除此mac地址 */ + if (--refcount) { + return 1; + } + + rb_erase(&ha->node, &list->tree); + + /* 从链表中删除此条目 */ + list_del_rcu(&ha->list); + + /* 释放ha结构体占用内存,rcu_head可以安全地释放ha占用的内存*/ + kfree_rcu(ha, rcu_head); + list->count--; + + return 0; +} + +bool is_this_mac_exist(struct net_device *netdev, const uint8_t *addr, + struct netdev_hw_addr **ha, struct rb_node ***ins_point, + struct rb_node **parent) +{ + bool isexist = false; + struct netdev_hw_addr *entry = NULL; + unsigned char addr_type; + int diff; + + if (is_unicast_ether_addr(addr)) { + addr_type = NETDEV_HW_ADDR_T_UNICAST; + *ins_point = &netdev->uc.tree.rb_node; + } else { + addr_type = NETDEV_HW_ADDR_T_MULTICAST; + *ins_point = &netdev->mc.tree.rb_node; + } + + /* 给net_device结构体上锁 */ + netif_addr_lock_bh(netdev); + + while (**ins_point) { + entry = rb_entry(**ins_point, struct netdev_hw_addr, node); + diff = memcmp(addr, entry->addr, netdev->addr_len); + if (diff == 0) { + diff = memcmp(&addr_type, &entry->type, + sizeof(addr_type)); + } + *parent = **ins_point; + if (diff < 0) { + *ins_point = &((*parent)->rb_left); + } else if (diff > 0) { + *ins_point = &((*parent)->rb_right); + } else { //存在此MAC + isexist = true; + *ha = entry; + goto out; + } + } + +out: + if (!isexist) { + *ha = NULL; + } + /* 给net_device结构体释放锁 */ + netif_addr_unlock_bh(netdev); + + return isexist; +} + +/** + * zxdh_dev_list_addr_add - 在地址链表中添加此mac地址 + * @netdev: 网络设备结构体 + * @addr: 要添加的mac地址 + * @addr_type: mac地址类型 + */ +int32_t zxdh_dev_list_addr_add(struct net_device *netdev, const uint8_t *addr, + struct rb_node *parent, + struct rb_node **ins_point) +{ + int32_t err = 0; + + /* 给net_device结构体上锁 */ + netif_addr_lock_bh(netdev); + + /* 判断此mac地址类型 */ + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { + /* 将此mac地址添加到地址链表中 */ + err = list_hw_addr_create(&netdev->uc, addr, netdev->addr_len, + NETDEV_HW_ADDR_T_UNICAST, false, + false, parent, ins_point); + if (err != 0) { + LOG_ERR("list_hw_addr_create failed\n"); + } + } else { + err = list_hw_addr_create(&netdev->mc, addr, netdev->addr_len, + NETDEV_HW_ADDR_T_MULTICAST, false, + false, parent, ins_point); + if (err != 0) { + LOG_ERR("list_hw_addr_create failed\n"); + } + } + + /* 给net_device结构体释放锁 */ + netif_addr_unlock_bh(netdev); + + return err; +} + +/** + * zxdh_dev_list_addr_del - 在地址链表中删除此mac地址 + * @netdev: 网络设备结构体 + * @addr: 要删除的mac地址 + * @addr_type: mac地址类型 + */ +int32_t zxdh_dev_list_addr_del(struct net_device *netdev, const uint8_t *addr) +{ + struct netdev_hw_addr *ha = NULL; + int32_t err = 0; + + /* 给net_device上锁 */ + netif_addr_lock_bh(netdev); + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { + /* 遍历单播mac地址链表 */ + list_for_each_entry(ha, &netdev->uc.list, list) { + /* 检查该单播地址链表中是否存在此mac,且此mac地址标志为单播 */ + if ((!memcmp(ha->addr, addr, netdev->addr_len)) && + (ha->type == NETDEV_HW_ADDR_T_UNICAST)) { + /* 从单播地址链表中删除此mac */ + err = list_hw_addr_del(&netdev->uc, ha); + if (err != 0) { + LOG_ERR("list_hw_addr_del failed\n"); + } + goto out; + } + } + } else { + /* 遍历组播mac地址链表 */ + list_for_each_entry(ha, &netdev->mc.list, list) { + /* 检查该组播地址链表中是否存在此mac,且此mac地址标志为组播 */ + if ((!memcmp(ha->addr, addr, netdev->addr_len)) && + (ha->type == NETDEV_HW_ADDR_T_MULTICAST)) { + /* 从组播地址链表中删除此mac */ + err = list_hw_addr_del(&netdev->mc, ha); + if (err != 0) { + LOG_ERR("list_hw_addr_del failed\n"); + } + goto out; + } + } + } + +out: + /* 给net_device结构体释放锁 */ + netif_addr_unlock_bh(netdev); + + return err; +} + +#ifdef MAC_CONFIG_DEBUG +int32_t zxdh_pf_dump_all_mac(struct zxdh_en_device *en_dev) +{ + MAC_VPORT_INFO *unicast_mac_arry = NULL; + MAC_VPORT_INFO *multicast_mac_arry = NULL; + uint32_t current_unicast_num = 0; + uint32_t current_multicast_num = 0; + int32_t err = 1; + int32_t i = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + /* 开辟单播数组和组播数组*/ + unicast_mac_arry = (MAC_VPORT_INFO *)kzalloc( + sizeof(MAC_VPORT_INFO) * UNICAST_MAX_NUM, GFP_KERNEL); + if (unicast_mac_arry == NULL) { + LOG_ERR("kzalloc unicast_mac_arry failed \n"); + return err; + } + + multicast_mac_arry = (MAC_VPORT_INFO *)kzalloc( + sizeof(MAC_VPORT_INFO) * MULTICAST_MAX_NUM, GFP_KERNEL); + if (multicast_mac_arry == NULL) { + LOG_ERR("kzalloc multicast_mac_arry failed \n"); + goto out1; + } + + /* 从NP中dump所有单播mac地址*/ + err = dpp_unicast_mac_dump(&pf_info, unicast_mac_arry, + ¤t_unicast_num); + if (err != 0) { + LOG_ERR("dpp_unicast_mac_dump failed, ret:%d\n", err); + goto out2; + } + + /* 从NP中dump所有组播mac地址*/ + err = dpp_multicast_mac_dump(&pf_info, multicast_mac_arry, + ¤t_multicast_num); + if (err != 0) { + LOG_ERR("dpp_multicast_mac_dump failed\n"); + goto out2; + } + + for (i = 0; i < current_unicast_num; ++i) { + LOG_INFO("unicast_mac_arry[%d].vport is %#x\n", i, + unicast_mac_arry[i].vport); + LOG_INFO("unicast_mac_arry[%d].mac is %pM\n", i, + unicast_mac_arry[i].addr); + } + for (i = 0; i < current_multicast_num; ++i) { + LOG_INFO("multicast_mac_arry[%d].vport is %#x\n", i, + multicast_mac_arry[i].vport); + LOG_INFO("multicast_mac_arry[%d].mac is %pM\n", i, + multicast_mac_arry[i].addr); + } + +out2: + if (multicast_mac_arry != NULL) { + kfree(multicast_mac_arry); + } + +out1: + if (unicast_mac_arry != NULL) { + kfree(unicast_mac_arry); + } + + return err; +} +#endif /* MAC_CONFIG_DEBUG */ + +int32_t unicast_mac_add(struct zxdh_en_device *en_dev, struct net_device *dev, + const uint8_t *addr, uint16_t flags) +{ + struct netdev_hw_addr *ha = NULL; + int32_t err = 0; + MAC_VPORT_INFO *p_mac_arr = NULL; + uint32_t p_mac_num = 0; + uint16_t current_vport = 0; + uint16_t sriov_vlan_tpid = 0; + uint16_t sriov_vlan_id = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + struct rb_node **ins_point = NULL, *parent = NULL; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + /* 判断单个设备所配置mac地址数量是否超过32上限 */ + if (en_dev->curr_unicast_num >= DEV_UNICAST_MAX_NUM - 1) { + LOG_ERR("curr_unicast_num is beyond maximum\n"); + return -ENOSPC; + } + + /* 遍历单播地址链表,判断是否此设备存在此单播mac */ + if (is_this_mac_exist(dev, addr, &ha, &ins_point, &parent)) { + LOG_DEBUG("Mac already exists\n"); + if (!(flags & NLM_F_EXCL)) { + return 0; + } + return -EEXIST; + } + + /* 如果待配置mac和本机mac相同,则不配置到NP中, 只将此mac添加到单播地址链表中 */ + if (!memcmp(addr, dev->dev_addr, dev->addr_len)) { + goto out; + } + + /* 将此mac地址配置到np中 */ + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + /* dump整个转发域已经配置的单播mac数量 */ + err = dpp_unicast_mac_dump(&pf_info, p_mac_arr, &p_mac_num); + if (err != 0) { + LOG_ERR("dpp_unicast_mac_dump failed, ret:%d\n", err); + return -1; + } + LOG_DEBUG("p_mac_num is %d\n", p_mac_num); + + /* 判断整个转发域配置的单播mac数量是否超过上限 */ + if (p_mac_num >= UNICAST_MAX_NUM) { + LOG_ERR("curr_all_unicast_num is beyond maximum\n"); + return -ENOSPC; + } + + /* 遍历整个转发域,判断此转发域是否存在此单播mac地址 */ + err = dpp_unicast_mac_search(&pf_info, addr, sriov_vlan_tpid, + sriov_vlan_id, ¤t_vport); + if (err == 0) { + LOG_DEBUG("Mac already exists\n"); + return -EEXIST; + } else if (err != DPP_HASH_RC_SRH_FAIL) { + LOG_ERR("dpp_unicast_mac_search failed, ret:%d\n", err); + return -1; + } + + err = dpp_add_mac(&pf_info, addr, sriov_vlan_tpid, + sriov_vlan_id); + if (err != 0) { + LOG_ERR("dpp_add_mac failed, ret:%d\n", err); + return -1; + } + } else { + err = zxdh_vf_dpp_add_mac(en_dev, addr, FILTER_MAC); + if (err != 0) { + if (err == ZXDH_REPS_BEYOND_MAC) { + LOG_ERR("curr_all_unicast_num is beyond maximum\n"); + return -ENOSPC; + } else if (err == ZXDH_REPS_EXIST_MAC) { + LOG_DEBUG("Mac already exists\n"); + return -EEXIST; + } + LOG_ERR("zxdh_vf_dpp_add_mac failed, ret:%d\n", err); + return -1; + } + } + +out: + /* 将此单播mac地址添加到地址链表中 */ + err = zxdh_dev_list_addr_add(dev, addr, parent, ins_point); + if (err != 0) { + LOG_ERR("zxdh_dev_list_addr_add failed, ret:%d\n", err); + return -1; + } + en_dev->curr_unicast_num++; + LOG_DEBUG("curr_unicast_num is %d\n", en_dev->curr_unicast_num); + return err; +} + +bool is_ipv6_multicast_mac(const uint8_t *mac) +{ + return ((mac[0] == 0x33) && (mac[1] == 0x33) && (mac[2] == 0xff)); +} + +bool ipv6_mac_refcount_get(struct zxdh_en_device *en_dev, const uint8_t *ip6mac, + int32_t *ipv6_mac_refconut) +{ + uint32_t mac_hash_val = 0; + struct zxdh_ipv6_mac_tbl *ip6mac_tbl = + en_dev->ops->get_ip6mac_tbl(en_dev->parent); + struct zxdh_ipv6_mac_entry *ce = NULL; + struct zxdh_ipv6_mac_entry *cte = NULL; + + if (!ip6mac_tbl) { + LOG_ERR("ip6mac_tbl is NULL\n"); + return -ENXIO; + } + + mac_hash_val = mac_hash(ip6mac_tbl, ip6mac); + + mutex_lock(&ip6mac_tbl->mlock); + + list_for_each_entry(cte, &ip6mac_tbl->hash_list[mac_hash_val], list) { + if (memcmp(cte->ipv6_mac, ip6mac, ETH_ALEN) == + 0) { /* MAC已经存在 */ + ce = cte; + *ipv6_mac_refconut = refcount_read(&ce->refcnt); + mutex_unlock(&ip6mac_tbl->mlock); + return true; + } + } + mutex_unlock(&ip6mac_tbl->mlock); + LOG_INFO("ipv6_mac_refcount_get end\n"); + return false; +} + +int32_t multicast_mac_add_operate(struct zxdh_en_device *en_dev, + const uint8_t *addr, uint16_t flags, + struct netdev_hw_addr *ha) +{ + int32_t ipv6_mac_refconut = 0; + + if (!ipv6_mac_refcount_get(en_dev, addr, &ipv6_mac_refconut)) { + goto exist_flag; + } + + if (ipv6_mac_refconut == ha->refcount) { + ++ha->refcount; + LOG_INFO("ipv6_mac_refconut == ha->refcount\n"); + return 0; + } + + if (ipv6_mac_refconut < ha->refcount) { + LOG_INFO("ipv6_mac_refconut < ha->refcount\n"); + goto exist_flag; + } + LOG_ERR("ipv6_mac_refconut[%d] < ha->refcount[%d]\n", ipv6_mac_refconut, + ha->refcount); + return -1; + +exist_flag: + if (!(flags & NLM_F_EXCL)) { + return 0; + } + return -EEXIST; +} + +int32_t multicast_mac_add(struct zxdh_en_device *en_dev, struct net_device *dev, + const uint8_t *addr, uint16_t flags) +{ + struct netdev_hw_addr *ha = NULL; + int64_t err = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + struct rb_node **ins_point = NULL, *parent = NULL; + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + /* 判断目前所配置组播mac地址数量是否超过32个上限 */ + if (en_dev->curr_multicast_num >= DEV_MULTICAST_MAX_NUM) { + LOG_ERR("curr_multicast_num is beyond maximum\n"); + return -ENOSPC; + } + + /* 遍历组播地址链表,判断是否存在此mac */ + if (is_this_mac_exist(dev, addr, &ha, &ins_point, &parent)) { + if (!is_ipv6_multicast_mac(addr)) { /* 非ipv6组播mac地址 */ + LOG_DEBUG("Mac already exists\n"); + if (!(flags & NLM_F_EXCL)) { + return 0; + } + return -EEXIST; + } + + if (ha == NULL) { + LOG_ERR("ha is NULL"); + return -1; + } + /* ipv6组播mac地址 */ + return multicast_mac_add_operate(en_dev, addr, flags, ha); + } + + /* 将此组播mac地址配置到np中 */ + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + err = dpp_multi_mac_add_member(&pf_info, addr); + if (err != 0) { + if (err == + DPP_RC_TBL_IS_FULL) { /*判断整个转发域已配置组播mac数量是否超过上限*/ + LOG_ERR("multicast mac beyond all mac num\n"); + return -ENOSPC; + } + LOG_ERR("dpp_multi_mac_add_member failed\n"); + return -1; + } + } else { + err = zxdh_vf_dpp_add_mac(en_dev, addr, FILTER_MAC); + if (err != 0) { + if (err == + ZXDH_REPS_BEYOND_MAC) { /*判断整个转发域已配置组播mac数量超过上限*/ + LOG_ERR("multicast mac is beyond all mac num\n"); + return -ENOSPC; + } + LOG_ERR("zxdh_vf_dpp_add_mac failed, ret:%lld\n", err); + return -1; + } + } + + /* 将此组播mac地址添加到地址链表中 */ + err = zxdh_dev_list_addr_add(dev, addr, parent, ins_point); + if (err != 0) { + LOG_ERR("zxdh_dev_list_addr_add failed, ret:%lld\n", err); + return -1; + } + en_dev->curr_multicast_num++; + LOG_DEBUG("curr_multicast_num is %d\n", en_dev->curr_multicast_num); + LOG_INFO("multicast_mac_add end\n"); + return 0; +} + +int32_t unicast_mac_del(struct zxdh_en_device *en_dev, struct net_device *dev, + const uint8_t *addr) +{ + struct netdev_hw_addr *ha = NULL; + int32_t err = 0; + DPP_PF_INFO_T pf_info = { 0 }; + uint16_t sriov_vlan_tpid = 0; + uint16_t sriov_vlan_id = 0; + + struct rb_node **ins_point = NULL, *parent = NULL; + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + /* 判断目前所配置mac地址数量是否小于0 */ + if (en_dev->curr_unicast_num <= 0) { + LOG_ERR("curr_unicast_num is less than 0\n"); + return -ENOENT; + } + + /* 遍历单播地址链表,判断是否存在此mac */ + if (!is_this_mac_exist(dev, addr, &ha, &ins_point, &parent)) { + LOG_DEBUG("Mac is not exists\n"); + return -ENOENT; + } + + /* 如果待删除mac和本机mac相同,则不从NP中删除,只从链表中删除 */ + if (!memcmp(addr, dev->dev_addr, dev->addr_len)) { + goto out; + } + + /* 从np中删除此单播mac */ + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + /* 此设备为PF */ + err = dpp_del_mac(&pf_info, addr, sriov_vlan_tpid, + sriov_vlan_id); + if (err != 0) { + LOG_ERR("dpp_del_mac failed, ret:%d\n", err); + return -1; + } + LOG_DEBUG("dpp_del_mac succeed\n"); + } else { + /* 此设备为VF */ + err = zxdh_vf_dpp_del_mac(en_dev, addr, FILTER_MAC, true); + if (err != 0) { + LOG_ERR("zxdh_vf_dpp_del_mac failed, ret:%d\n", err); + return -1; + } + LOG_DEBUG("zxdh_vf_dpp_del_mac succeed\n"); + } + +out: + /* 从链表中删除单播mac */ + err = zxdh_dev_list_addr_del(dev, addr); + if (err != 0) { + LOG_ERR("zxdh_dev_list_addr_del failed, ret:%d\n", err); + return -1; + } + en_dev->curr_unicast_num--; + LOG_DEBUG("curr_unicast_num is %d\n", en_dev->curr_unicast_num); + return err; +} + +int32_t multicast_mac_del_operate(struct zxdh_en_device *en_dev, + const uint8_t *addr, + struct netdev_hw_addr *ha) +{ + int32_t ipv6_mac_refconut = 0; + + if (!ipv6_mac_refcount_get(en_dev, addr, &ipv6_mac_refconut)) { + return 0; + } + + if (ipv6_mac_refconut == ha->refcount) { + LOG_INFO("ipv6_mac_refconut == ha->refcount"); + return -1; + } + + if ((ipv6_mac_refconut < ha->refcount) && (ipv6_mac_refconut > 0)) { + --ha->refcount; + LOG_INFO("ipv6_mac_refconut < ha->refcount"); + return -1; + } + LOG_ERR("ipv6_mac_refconut[%d] < ha->refcount[%d]", ipv6_mac_refconut, + ha->refcount); + return -1; +} + +int32_t multicast_mac_del(struct zxdh_en_device *en_dev, struct net_device *dev, + const uint8_t *addr) +{ + struct netdev_hw_addr *ha = NULL; + int32_t err = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + struct rb_node **ins_point = NULL, *parent = NULL; + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + /* 遍历组播地址链表,判断是否存在此组播mac,如果不存在,则返回报错 */ + if (!is_this_mac_exist(dev, addr, &ha, &ins_point, &parent)) { + LOG_DEBUG("Mac is not exists\n"); + return -ENOENT; + } + + if (ha == NULL) { + LOG_ERR("ha is NULL"); + return -1; + } + + if (is_ipv6_multicast_mac(addr)) { /* ipv6组播mac地址*/ + err = multicast_mac_del_operate(en_dev, addr, ha); + if (err != 0) { + LOG_DEBUG("Mac is not permitted del\n"); + return 0; + } + } + + /* 从np中删除此组播mac */ + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + /* 此设备为PF */ + err = dpp_multi_mac_del_member(&pf_info, addr); + if (err != 0) { + LOG_ERR("dpp_multi_mac_del_member failed, ret:%d\n", + err); + return -1; + } + LOG_DEBUG("dpp_multi_mac_del_member succeed\n"); + } else { + /* 此设备为VF */ + err = zxdh_vf_dpp_del_mac(en_dev, addr, FILTER_MAC, true); + if (err != 0) { + LOG_ERR("zxdh_vf_dpp_del_mac failed, ret:%d\n", err); + return -1; + } + } + + /* 从链表中删除组播mac */ + err = zxdh_dev_list_addr_del(dev, addr); + if (err != 0) { + LOG_ERR("zxdh_dev_list_addr_del failed, ret:%d\n", err); + return -1; + } + en_dev->curr_multicast_num--; + LOG_DEBUG("curr_multicast_num is %d\n", en_dev->curr_multicast_num); + return err; +} + +static unsigned int mac_hash(struct zxdh_ipv6_mac_tbl *mac_tbl, + const uint8_t *mac_addr) +{ + unsigned int mact_size_half = mac_tbl->ip6mact_size / 2; + uint32_t mac_part1 = (mac_addr[0] << 24) | (mac_addr[1] << 16) | + (mac_addr[2] << 8) | mac_addr[3]; + uint32_t mac_part2 = (mac_addr[4] << 8) | mac_addr[5]; + + uint32_t xor = mac_part1 ^ mac_part2; + + return (jhash_1word(xor, 0) % mact_size_half); +} + +int32_t zxdh_ip6mac_to_np(struct zxdh_en_device *en_dev, + struct zxdh_ipv6_mac_tbl *ip6mac_tbl, + const uint8_t *ip6mac, uint8_t action) +{ + int32_t err = 0; + DPP_PF_INFO_T pf_info = { 0 }; + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + switch (action) { + case ADD_IP6MAC: { + if (en_dev->ops->get_coredev_type(en_dev->parent) == + DH_COREDEV_PF) { /* PF流程 */ + /* 将此组播mac地址配置到np中 */ + err = dpp_multi_mac_add_member(&pf_info, ip6mac); + if (err != 0) { + LOG_ERR("dpp_multi_mac_add_member failed, err:%d\n", + err); + } + } else { /* VF流程*/ + err = zxdh_vf_dpp_add_ipv6_mac(en_dev, ip6mac); + if (err != 0) { + LOG_ERR("zxdh_vf_dpp_add_ipv6_mac failed, err:%d\n", + err); + } + } + break; + } + case DEL_IP6MAC: { + if (en_dev->ops->get_coredev_type(en_dev->parent) == + DH_COREDEV_PF) { /* PF流程 */ + err = dpp_multi_mac_del_member(&pf_info, ip6mac); + if (err != 0) { + LOG_ERR("dpp_multi_mac_del_member failed, err:%d\n", + err); + } + } else { /* VF流程 */ + err = zxdh_vf_dpp_del_ipv6_mac(en_dev, ip6mac); + if (err != 0) { + LOG_ERR("zxdh_vf_dpp_del_ipv6_mac failed, err:%d\n", + err); + } + } + break; + } + } + return err; +} + +int32_t zxdh_ip4mac_to_np(struct zxdh_en_device *en_dev, const uint8_t *ip4mac, + uint8_t action) +{ + int32_t err = 0; + DPP_PF_INFO_T pf_info = { 0 }; + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + switch (action) { + case NETDEV_UP: { + if (en_dev->ops->get_coredev_type(en_dev->parent) == + DH_COREDEV_PF) { /* PF流程 */ + /* 将此组播mac地址配置到np中 */ + err = dpp_multi_mac_add_member(&pf_info, ip4mac); + if (err != 0) { + LOG_ERR("pf config ipv4 mac failed, err:%d\n", + err); + } + } else { /* VF流程*/ + err = zxdh_vf_dpp_add_ipv6_mac(en_dev, ip4mac); + if (err != 0) { + LOG_ERR("vf config ipv4 mac failed, err:%d\n", + err); + } + } + break; + } + case NETDEV_DOWN: { + if (en_dev->ops->get_coredev_type(en_dev->parent) == + DH_COREDEV_PF) { /* PF流程 */ + err = dpp_multi_mac_del_member(&pf_info, ip4mac); + if (err != 0) { + LOG_ERR("vf del ipv4 mac failed, err:%d\n", + err); + } + } else { /* VF流程 */ + err = zxdh_vf_dpp_del_ipv6_mac(en_dev, ip4mac); + if (err != 0) { + LOG_ERR("vf del ipv4 mac failed, err:%d\n", + err); + } + } + break; + } + } + return err; +} + +int32_t zxdh_ip6mac_add(struct zxdh_en_device *en_dev, const uint32_t *addr6, + const uint8_t *ip6mac) +{ + int32_t err = 0; + unsigned int mac_hash_val; + struct zxdh_ipv6_mac_tbl *ip6mac_tbl = + en_dev->ops->get_ip6mac_tbl(en_dev->parent); + struct zxdh_ipv6_mac_entry *ce, *cte; + + if (!ip6mac_tbl) { + LOG_ERR("ip6mac_tbl is NULL\n"); + return -ENXIO; + } + + /* 判断目前所配置组播mac地址数量是否超过32个上限 */ + if (en_dev->curr_multicast_num >= DEV_MULTICAST_MAX_NUM) { + LOG_ERR("curr_multicast_num is beyond maximum\n"); + return -ENOSPC; + } + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + mutex_lock(&ip6mac_tbl->mlock); + err = zxdh_ip6mac_to_np(en_dev, ip6mac_tbl, ip6mac, ADD_IP6MAC); + if (err != 0) { + mutex_unlock(&ip6mac_tbl->mlock); + return err; + } + } else { + err = zxdh_ip6mac_to_np(en_dev, ip6mac_tbl, ip6mac, ADD_IP6MAC); + if (err != 0) { + return err; + } + mutex_lock(&ip6mac_tbl->mlock); + } + + mac_hash_val = mac_hash(ip6mac_tbl, ip6mac); + //如果没有报错,则说明MAC已经存在或成功存入NP + list_for_each_entry(cte, &ip6mac_tbl->hash_list[mac_hash_val], list) { + if (memcmp(cte->ipv6_mac, ip6mac, ETH_ALEN) == + 0) { //MAC已经存在 + ce = cte; + refcount_inc(&ce->refcnt); + DH_LOG_DEBUG( + MODULE_PF, + "Eth:%s, Increase Multicast MAC Address(%pM) refcnt:%d\n", + en_dev->netdev->name, ip6mac, + refcount_read(&ce->refcnt)); + mutex_unlock(&ip6mac_tbl->mlock); + return 0; + } + } + + //成功新增MAC至NP,需要更新 ip6mac_tbl + if (list_empty(&ip6mac_tbl->ip6mac_free_head)) { + if (en_dev->ops->get_coredev_type(en_dev->parent) == + DH_COREDEV_PF) { + err = zxdh_ip6mac_to_np(en_dev, ip6mac_tbl, ip6mac, + DEL_IP6MAC); + mutex_unlock(&ip6mac_tbl->mlock); + } else { + mutex_unlock(&ip6mac_tbl->mlock); + err = zxdh_ip6mac_to_np(en_dev, ip6mac_tbl, ip6mac, + DEL_IP6MAC); + } + LOG_ERR("ip6mac_tbl overflow, can't add; del mac from NP, ret:%d\n", + err); + return -ENOMEM; + } + ce = list_first_entry(&ip6mac_tbl->ip6mac_free_head, + struct zxdh_ipv6_mac_entry, list); + list_del(&ce->list); + INIT_LIST_HEAD(&ce->list); + spin_lock_init(&ce->lock); + refcount_set(&ce->refcnt, 0); + list_add_tail(&ce->list, &ip6mac_tbl->hash_list[mac_hash_val]); + memcpy(ce->ipv6_mac, ip6mac, ETH_ALEN); + refcount_set(&ce->refcnt, 1); + mutex_unlock(&ip6mac_tbl->mlock); + en_dev->curr_multicast_num++; + DH_LOG_DEBUG(MODULE_PF, "curr_multicast_num is %d\n", + en_dev->curr_multicast_num); + DH_LOG_DEBUG(MODULE_PF, + "Eth:%s, Add New Multicast MAC Address: %pM, refcnt:%d\n", + en_dev->netdev->name, ip6mac, refcount_read(&ce->refcnt)); + + return 0; +} + +int32_t zxdh_ip4mac_add(struct zxdh_en_device *en_dev, const uint8_t *ip4mac, + uint8_t action) +{ + struct net_device *netdev = en_dev->netdev; + struct netdev_hw_addr *entry = NULL; + int32_t err = 0; + + /* 判断目前所配置组播mac地址数量是否超过32个上限 */ + if (en_dev->curr_multicast_num >= DEV_MULTICAST_MAX_NUM) { + LOG_ERR("curr_multicast_num is beyond maximum\n"); + return -ENOSPC; + } + + /* 遍历组播mac地址链表,判断此mac地址是否已经配置 */ + list_for_each_entry(entry, &netdev->mc.list, list) { + if ((!memcmp(entry->addr, ip4mac, ETH_ALEN)) && + (entry->type == NETDEV_HW_ADDR_T_MULTICAST)) { + LOG_DEBUG("entry->refcount is %d\n", entry->refcount); + if (entry->refcount > 1) { + LOG_INFO("MAC:%pM already config to np\n", + ip4mac); + return NOTIFY_OK; + } + } + } + + err = zxdh_ip4mac_to_np(en_dev, ip4mac, action); + if (err != 0) { + return err; + } + + /* 如果没有报错,则说明MAC已经存在或成功存入NP */ + en_dev->curr_multicast_num++; + LOG_DEBUG("curr_multicast_num is %d\n", en_dev->curr_multicast_num); + return 0; +} + +/* Work queue handler for IPv6 MAC deletion using dynamic work items */ +void zxdh_ip6mac_del_work_handler(struct work_struct *work) +{ + struct zxdh_ip6mac_work_item *work_item = + container_of(work, struct zxdh_ip6mac_work_item, work); + struct zxdh_en_device *en_dev = work_item->en_dev; + zxdh_ip6mac_del(en_dev, work_item->data.addr6, work_item->data.ip6mac); + kfree(work_item); +} + +/* Safe version of zxdh_ip6mac_del that handles atomic context with dynamic work items */ +int32_t zxdh_ip6mac_del_safe(struct zxdh_en_device *en_dev, + const uint32_t *addr6, const uint8_t *ip6mac) +{ + struct zxdh_ip6mac_work_item *work_item; + struct zxdh_en_priv *en_priv = + container_of(en_dev, struct zxdh_en_priv, edev); + /* Allocate work item dynamically - each call gets its own work item with embedded data */ + work_item = kmalloc(sizeof(struct zxdh_ip6mac_work_item), GFP_ATOMIC); + if (!work_item) { + return -ENOMEM; + } + INIT_WORK(&work_item->work, zxdh_ip6mac_del_work_handler); + work_item->en_dev = en_dev; + zte_memcpy_s(work_item->data.addr6, addr6, + sizeof(work_item->data.addr6)); + zte_memcpy_s(work_item->data.ip6mac, ip6mac, + sizeof(work_item->data.ip6mac)); + + if (!queue_work(en_priv->events->wq, &work_item->work)) { + kfree(work_item); + return -EBUSY; + } + + return 0; +} + +/* Work queue handler for IPv6 MAC addition using dynamic work items */ +void zxdh_ip6mac_add_work_handler(struct work_struct *work) +{ + struct zxdh_ip6mac_work_item *work_item = + container_of(work, struct zxdh_ip6mac_work_item, work); + struct zxdh_en_device *en_dev = work_item->en_dev; + zxdh_ip6mac_add(en_dev, work_item->data.addr6, work_item->data.ip6mac); + kfree(work_item); +} + +/* Safe version of zxdh_ip6mac_add that handles atomic context with dynamic work items */ +int32_t zxdh_ip6mac_add_safe(struct zxdh_en_device *en_dev, + const uint32_t *addr6, const uint8_t *ip6mac) +{ + struct zxdh_ip6mac_work_item *work_item; + struct zxdh_en_priv *en_priv = + container_of(en_dev, struct zxdh_en_priv, edev); + work_item = kmalloc(sizeof(struct zxdh_ip6mac_work_item), GFP_ATOMIC); + if (!work_item) { + return -ENOMEM; + } + INIT_WORK(&work_item->work, zxdh_ip6mac_add_work_handler); + work_item->en_dev = en_dev; + zte_memcpy_s(work_item->data.addr6, addr6, + sizeof(work_item->data.addr6)); + zte_memcpy_s(work_item->data.ip6mac, ip6mac, + sizeof(work_item->data.ip6mac)); + + if (!queue_work(en_priv->events->wq, &work_item->work)) { + kfree(work_item); + return -EBUSY; + } + + return 0; +} + +int32_t zxdh_ip6mac_del(struct zxdh_en_device *en_dev, const uint32_t *addr6, + const uint8_t *ip6mac) +{ + int32_t err = 0; + struct zxdh_ipv6_mac_tbl *ip6mac_tbl = + en_dev->ops->get_ip6mac_tbl(en_dev->parent); + struct zxdh_ipv6_mac_entry *ce, *cte; + unsigned int mac_hash_val; + int32_t refcnt = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + if (!ip6mac_tbl) { + LOG_ERR("ip6mac_tbl is NULL"); + return -ENXIO; + } + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + mac_hash_val = mac_hash(ip6mac_tbl, ip6mac); + + mutex_lock(&ip6mac_tbl->mlock); + list_for_each_entry(cte, &ip6mac_tbl->hash_list[mac_hash_val], list) { + if (memcmp(cte->ipv6_mac, ip6mac, ETH_ALEN) == 0) { //MAC存在 + ce = cte; + goto found; + } + } + DH_LOG_DEBUG(MODULE_PF, + "Don't Found Multicast MAC Address: %pM in Hash List\n", + ip6mac); + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + err = zxdh_ip6mac_to_np(en_dev, ip6mac_tbl, ip6mac, DEL_IP6MAC); + mutex_unlock(&ip6mac_tbl->mlock); + } else { + mutex_unlock(&ip6mac_tbl->mlock); + err = zxdh_ip6mac_to_np(en_dev, ip6mac_tbl, ip6mac, DEL_IP6MAC); + } + return err; + +found: + spin_lock_bh(&ce->lock); + if (!refcount_dec_and_test(&ce->refcnt)) { + DH_LOG_DEBUG( + MODULE_PF, + "Eth:%s, Decrease Multicast MAC Address(%pM) refcnt:%d\n", + en_dev->netdev->name, ip6mac, + refcount_read(&ce->refcnt)); + spin_unlock_bh(&ce->lock); + mutex_unlock(&ip6mac_tbl->mlock); + return err; + } + //如果引用计数减到0 + list_del(&ce->list); + INIT_LIST_HEAD(&ce->list); + list_add_tail(&ce->list, &ip6mac_tbl->ip6mac_free_head); + refcnt = refcount_read(&ce->refcnt); + spin_unlock_bh(&ce->lock); + if (en_dev->ops->get_coredev_type(en_dev->parent) == + DH_COREDEV_PF) { //PF设备需要带锁下表 + err = zxdh_ip6mac_to_np(en_dev, ip6mac_tbl, ip6mac, DEL_IP6MAC); + mutex_unlock(&ip6mac_tbl->mlock); + } else { //VF设备需要先释放锁再发消息下表,因为PF侧会加锁 + mutex_unlock(&ip6mac_tbl->mlock); + err = zxdh_ip6mac_to_np(en_dev, ip6mac_tbl, ip6mac, DEL_IP6MAC); + } + en_dev->curr_multicast_num--; + DH_LOG_DEBUG(MODULE_PF, "curr_multicast_num is %d\n", + en_dev->curr_multicast_num); + DH_LOG_DEBUG( + MODULE_PF, + "Eth:%s, Del Multicast MAC Address: %pM Completely, refcnt:%d, np ret:%d\n", + en_dev->netdev->name, ip6mac, refcnt, err); + return err; +} + +int32_t zxdh_ip4mac_del(struct zxdh_en_device *en_dev, const uint8_t *ip4mac, + uint8_t action) +{ + int32_t err = 0; + struct netdev_hw_addr *entry = NULL; + struct net_device *netdev = en_dev->netdev; + + /* 遍历组播mac地址链表,判断此mac地址是否已经删除 */ + list_for_each_entry(entry, &netdev->mc.list, list) { + /* 检查该组播地址链表中是否存在此mac,且此mac地址类型为组播 */ + if ((!memcmp(entry->addr, ip4mac, ETH_ALEN)) && + (entry->type == NETDEV_HW_ADDR_T_MULTICAST)) { + LOG_DEBUG("entry->refcount is %d\n", entry->refcount); + if (entry->refcount > 0) { + LOG_INFO( + "MAC:%pM is used by other dev or operation\n", + ip4mac); + return NOTIFY_OK; + } + } + } + + err = zxdh_ip4mac_to_np(en_dev, ip4mac, action); + if (err != 0) { + return err; + } + + en_dev->curr_multicast_num--; + LOG_INFO("curr_multicast_num is %d\n", en_dev->curr_multicast_num); + return err; +} + +int32_t zxdh_pf_add_vf_unicast_mac(struct zxdh_en_device *en_dev, + struct dhtool_set_vf_mac_msg *msg) +{ + struct zxdh_vf_item *vf_item = NULL; + uint16_t current_vport = 0; + int32_t ret = 0; + uint32_t i = 0; + uint16_t sriov_vlan_tpid = 0; + uint16_t sriov_vlan_id = 0; + uint32_t p_mac_num = 0; + uint32_t max_unicast_num = 0; + DPP_PF_INFO_T pf_info = { 0 }; + ZXDH_L2_FWD_KEY *l2_entry = NULL; + MAC_VPORT_INFO *p_mac_arr = NULL; + + LOG_INFO("zxdh_pf_add_vf_unicast_mac is called\n"); + + /* 判断vf是否probe */ + if (!en_dev->ops->get_vf_is_probe(en_dev->parent, + msg->mac_config.target_vf)) { + LOG_ERR("vf(%u) is not probed\n", msg->mac_config.target_vf); + return VF_ERROR; + } + + /* 获取vf_item */ + vf_item = en_dev->ops->get_vf_item(en_dev->parent, + msg->mac_config.target_vf); + if (IS_ERR_OR_NULL(vf_item)) { + LOG_ERR("get_vf(%u)_item failed\n", msg->mac_config.target_vf); + return MAC_CONFIG_FAILED; + } + + mutex_lock(&vf_item->lock); + pf_info.slot = en_dev->slot_id; + pf_info.vport = vf_item->vport; + + /* 判断单播mac是否超过vf的mac上限 */ + LOG_DEBUG("unicast_add_count is %u\n", + msg->mac_config.unicast_add_count); + LOG_DEBUG("vf_item->vf_mac_info.current_unicast_num is %u\n", + vf_item->vf_mac_info.current_unicast_num); + + if ((vf_item->vf_mac_info.current_unicast_num + + msg->mac_config.unicast_add_count) > VF_MAX_UNICAST_MAC) { + LOG_ERR("current mac num beyond 128\n"); + mutex_unlock(&vf_item->lock); + return UNICAST_MAC_NUM_BEYOND_MAXNUM; + } + + /* 遍历整个转发域,获取前pf级已经配置单播mac数量 */ + ret = dpp_unicast_mac_dump(&pf_info, p_mac_arr, &p_mac_num); + if (ret != 0) { + LOG_ERR("dpp_unicast_mac_dump failed\n"); + mutex_unlock(&vf_item->lock); + return ret; + } + LOG_DEBUG("p_mac_num is %d\n", p_mac_num); + + /* 获取当前pf级最大单播mac数量 */ + ret = dpp_unicast_mac_max_get(&pf_info, &max_unicast_num); + if (ret != 0) { + LOG_ERR("dpp_unicast_mac_max_get failed %u\n", max_unicast_num); + mutex_unlock(&vf_item->lock); + return MAC_CONFIG_FAILED; + } + + if ((p_mac_num + msg->mac_config.unicast_add_count) > max_unicast_num) { + LOG_ERR("dpp_unicast_mac_dump failed\n"); + mutex_unlock(&vf_item->lock); + return UNICAST_MAC_NUM_BEYOND_MAXNUM; + } + + /* 获取vlan信息 */ + sriov_vlan_tpid = vf_item->vlan_proto; + sriov_vlan_id = ZXDH_VLAN_TCI_GEN(vf_item->vlan, vf_item->qos); + + pf_info.vport = vf_item->vport; + /* 判断此单播mac是否存在 */ + for (i = 0; i < msg->mac_config.unicast_add_count; i++) { + ret = dpp_unicast_mac_search( + &pf_info, msg->mac_config.unicast_mac_array[i].mac_addr, + sriov_vlan_tpid, sriov_vlan_id, ¤t_vport); + if (ret == 0) { /* 找到此单播mac */ + if (current_vport == vf_item->vport) { + continue; + } else { + LOG_ERR("Mac:%pM Already exists in other vf\n", + msg->mac_config.unicast_mac_array[i] + .mac_addr); + mutex_unlock(&vf_item->lock); + return MAC_ALREADY_EXISTS_IN_OTHER_VF; + } + } + } + + /* 构建mac数组 */ + l2_entry = kzalloc(sizeof(ZXDH_L2_FWD_KEY) * + msg->mac_config.unicast_add_count, + GFP_KERNEL); + if (l2_entry == NULL) { + LOG_ERR("l2_entry malloc fialed\n"); + mutex_unlock(&vf_item->lock); + return MAC_CONFIG_FAILED; + } + for (i = 0; i < msg->mac_config.unicast_add_count; i++) { + memcpy(l2_entry[i].dmac_addr, + msg->mac_config.unicast_mac_array[i].mac_addr, ETH_ALEN); + l2_entry[i].sriov_vlan_id = sriov_vlan_id; + l2_entry[i].sriov_vlan_tpid = sriov_vlan_tpid; + } + + /* 配置单播mac */ + ret = dpp_batch_add_unicast_mac( + &pf_info, msg->mac_config.unicast_add_count, l2_entry); + if (ret != 0) { + kfree(l2_entry); + LOG_ERR("config unicast mac failed\n"); + mutex_unlock(&vf_item->lock); + return MAC_CONFIG_FAILED; + } + + /* 将此单播mac地址添加到vf_item */ + for (i = 0; i < msg->mac_config.unicast_add_count; i++) { + zxdh_vf_item_mac_add(vf_item, l2_entry[i].dmac_addr, 1); + } + + LOG_DEBUG("current_unicast_num is %u\n", + vf_item->vf_mac_info.current_unicast_num); + kfree(l2_entry); + mutex_unlock(&vf_item->lock); + return ret; +} + +int32_t zxdh_pf_add_vf_multicast_mac(struct zxdh_en_device *en_dev, + struct dhtool_set_vf_mac_msg *msg) +{ + struct zxdh_vf_item *vf_item = NULL; + uint32_t ret = 0; + uint32_t i = 0; + DPP_PF_INFO_T pf_info = { 0 }; + MacAddress *hash_entry = NULL; + + LOG_INFO("zxdh_pf_add_vf_multicast_mac is called\n"); + + pf_info.slot = en_dev->slot_id; + + /* 判断vf是否probe */ + if (!en_dev->ops->get_vf_is_probe(en_dev->parent, + msg->mac_config.target_vf)) { + LOG_ERR("vf(%u) is not probed\n", msg->mac_config.target_vf); + return VF_ERROR; + } + + /* 获取vf_item */ + vf_item = en_dev->ops->get_vf_item(en_dev->parent, + msg->mac_config.target_vf); + if (IS_ERR_OR_NULL(vf_item)) { + LOG_ERR("get_vf(%u)_item failed\n", msg->mac_config.target_vf); + return MAC_CONFIG_FAILED; + } + + mutex_lock(&vf_item->lock); + pf_info.slot = en_dev->slot_id; + pf_info.vport = vf_item->vport; + + /* 判断组播mac是否超过vf的mac上限 */ + if ((vf_item->vf_mac_info.current_multicast_num + + msg->mac_config.multicast_add_count) > VF_MAX_MULTICAST_MAC) { + LOG_ERR("current multicast mac num beyond 32\n"); + mutex_unlock(&vf_item->lock); + return MULTICAST_MAC_NUM_BEYOND_MAXNUM; + } + + /* 构建mac数组 */ + hash_entry = kzalloc(sizeof(MacAddress) * + msg->mac_config.multicast_add_count, + GFP_KERNEL); + if (hash_entry == NULL) { + LOG_ERR("hash_entry malloc failed\n"); + mutex_unlock(&vf_item->lock); + return MAC_CONFIG_FAILED; + } + for (i = 0; i < msg->mac_config.multicast_add_count; i++) { + memcpy(hash_entry[i].mac_addr, + msg->mac_config.multicast_mac_array[i].mac_addr, + ETH_ALEN); + } + + /* 配置组播mac */ + ret = dpp_batch_add_multicast_mac( + &pf_info, msg->mac_config.multicast_add_count, hash_entry); + if (ret != 0) { + kfree(hash_entry); + mutex_unlock(&vf_item->lock); + if (ret == + DPP_RC_TBL_IS_FULL) { /* 超过整个pf级转发域组播mac上限 */ + LOG_ERR("current multicast mac num PF beyond mac\n"); + return MULTICAST_MAC_NUM_BEYOND_MAXNUM; + } else { + LOG_ERR("multicast config failed\n"); + return MAC_CONFIG_FAILED; + } + } + + /* 添加到vf_item */ + for (i = 0; i < msg->mac_config.multicast_add_count; i++) { + /* 将此组播mac地址添加到vf_item */ + zxdh_vf_item_mac_add(vf_item, hash_entry[i].mac_addr, 1); + } + + LOG_DEBUG("current_multicast_num is %u\n", + vf_item->vf_mac_info.current_multicast_num); + kfree(hash_entry); + mutex_unlock(&vf_item->lock); + return MAC_CONFIG_SUCCESS; +} + +int32_t zxdh_pf_del_vf_unicast_mac(struct zxdh_en_device *en_dev, + struct dhtool_set_vf_mac_msg *msg) +{ + ZXDH_L2_FWD_KEY *l2_entry = NULL; + struct zxdh_vf_item *vf_item = NULL; + uint16_t current_vport = 0; + int32_t ret = 0; + uint32_t i = 0; + uint16_t sriov_vlan_tpid = 0; + uint16_t sriov_vlan_id = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + LOG_INFO("zxdh_pf_del_vf_unicast_mac is called\n"); + + /* 判断vf是否probe */ + if (!en_dev->ops->get_vf_is_probe(en_dev->parent, + msg->mac_config.target_vf)) { + LOG_ERR("vf(%u) is not probed\n", msg->mac_config.target_vf); + return VF_ERROR; + } + + /* 获取vf_item */ + vf_item = en_dev->ops->get_vf_item(en_dev->parent, + msg->mac_config.target_vf); + if (IS_ERR_OR_NULL(vf_item)) { + LOG_ERR("get_vf(%u)_item failed\n", msg->mac_config.target_vf); + return MAC_CONFIG_FAILED; + } + + mutex_lock(&vf_item->lock); + /* 获取vlan信息 */ + sriov_vlan_tpid = vf_item->vlan_proto; + sriov_vlan_id = ZXDH_VLAN_TCI_GEN(vf_item->vlan, vf_item->qos); + + /* 判断单播mac是否存在 */ + pf_info.slot = en_dev->slot_id; + pf_info.vport = vf_item->vport; + + for (i = 0; i < msg->mac_config.unicast_del_count; i++) { + ret = dpp_unicast_mac_search( + &pf_info, msg->mac_config.unicast_mac_array[i].mac_addr, + sriov_vlan_tpid, sriov_vlan_id, ¤t_vport); + if ((ret == 0) && + (current_vport == + vf_item->vport)) { /* 找到此单播mac,且属于此vf */ + continue; + } else { + LOG_INFO("Mac:%pM not exists\n", + msg->mac_config.unicast_mac_array[i].mac_addr); + mutex_unlock(&vf_item->lock); + return MAC_CONFIG_SUCCESS; + } + } + + /* 构建单播mac数组 */ + l2_entry = kzalloc(sizeof(ZXDH_L2_FWD_KEY) * + msg->mac_config.unicast_del_count, + GFP_KERNEL); + if (l2_entry == NULL) { + LOG_ERR("l2_entry kzalloc failed\n"); + mutex_unlock(&vf_item->lock); + return MAC_CONFIG_FAILED; + } + for (i = 0; i < msg->mac_config.unicast_del_count; i++) { + memcpy(l2_entry[i].dmac_addr, + msg->mac_config.unicast_mac_array[i].mac_addr, ETH_ALEN); + l2_entry[i].sriov_vlan_id = sriov_vlan_id; + l2_entry[i].sriov_vlan_tpid = sriov_vlan_tpid; + } + + /* 删除单播mac */ + ret = dpp_batch_del_unicast_mac( + &pf_info, msg->mac_config.unicast_del_count, l2_entry); + if (ret != 0) { + kfree(l2_entry); + mutex_unlock(&vf_item->lock); + LOG_ERR("del unicast failed\n"); + return MAC_CONFIG_FAILED; + } + + /* 从vf_item中删除 */ + for (i = 0; i < msg->mac_config.unicast_del_count; i++) { + zxdh_vf_item_mac_del(vf_item, l2_entry[i].dmac_addr); + } + + LOG_DEBUG("current_unicast_num is %u\n", + vf_item->vf_mac_info.current_unicast_num); + kfree(l2_entry); + mutex_unlock(&vf_item->lock); + return MAC_CONFIG_SUCCESS; +} + +int32_t zxdh_pf_del_vf_multicast_mac(struct zxdh_en_device *en_dev, + struct dhtool_set_vf_mac_msg *msg) +{ + struct zxdh_vf_item *vf_item = NULL; + int32_t ret = 0; + uint32_t i = 0; + uint32_t j = 0; + uint32_t is_exists = 0; + DPP_PF_INFO_T pf_info = { 0 }; + MacAddress *hash_entry = NULL; + + LOG_INFO("zxdh_pf_del_vf_multicast_mac is called\n"); + + /* 判断vf是否probe */ + if (!en_dev->ops->get_vf_is_probe(en_dev->parent, + msg->mac_config.target_vf)) { + LOG_ERR("vf(%u) is not probed\n", msg->mac_config.target_vf); + return VF_ERROR; + } + + /* 获取vf_item */ + vf_item = en_dev->ops->get_vf_item(en_dev->parent, + msg->mac_config.target_vf); + if (IS_ERR_OR_NULL(vf_item)) { + LOG_ERR("get_vf(%u)_item failed\n", msg->mac_config.target_vf); + return MAC_CONFIG_FAILED; + } + + mutex_lock(&vf_item->lock); + pf_info.slot = en_dev->slot_id; + pf_info.vport = vf_item->vport; + + /* 判断此组播mac是否存在 */ + for (i = 0; i < msg->mac_config.multicast_del_count; i++) { + for (j = 0; j < VF_MAX_MULTICAST_MAC; j++) { + if (ether_addr_equal( + msg->mac_config.multicast_mac_array[i] + .mac_addr, + vf_item->vf_mac_info.multicast_mac[j] + .mac_addr)) { + is_exists = 1; + } + } + if (is_exists != 1) { + LOG_INFO( + "multicast mac:%pM not exists\n", + msg->mac_config.multicast_mac_array[i].mac_addr); + mutex_unlock(&vf_item->lock); + return MAC_CONFIG_SUCCESS; + } + } + + /* 构建mac数组 */ + hash_entry = kzalloc(sizeof(MacAddress) * + msg->mac_config.multicast_del_count, + GFP_KERNEL); + if (hash_entry == NULL) { + LOG_ERR("hash_entry kzalloc failed\n"); + mutex_unlock(&vf_item->lock); + return MAC_CONFIG_FAILED; + } + for (i = 0; i < msg->mac_config.multicast_del_count; i++) { + memcpy(hash_entry[i].mac_addr, + msg->mac_config.multicast_mac_array[i].mac_addr, + ETH_ALEN); + } + + /* 删除组播mac */ + ret = dpp_batch_del_multicast_mac( + &pf_info, msg->mac_config.multicast_del_count, hash_entry); + if (ret != 0) { + kfree(hash_entry); + mutex_unlock(&vf_item->lock); + LOG_ERR("del multicast failed\n"); + return MAC_CONFIG_FAILED; + } + + /* 从vf_item中删除 */ + for (i = 0; i < msg->mac_config.multicast_del_count; i++) { + zxdh_vf_item_mac_del(vf_item, hash_entry[i].mac_addr); + } + + LOG_DEBUG("current_multicast_num is %u\n", + vf_item->vf_mac_info.current_multicast_num); + kfree(hash_entry); + mutex_unlock(&vf_item->lock); + return MAC_CONFIG_SUCCESS; +} + +void vf_item_unicast_mac_transfer(struct zxdh_vf_item *src_vf_item, + struct zxdh_vf_item *dst_vf_item) +{ + int32_t i = 0; + int32_t j = 0; + + /* 迁移单播mac */ + for (i = 0; i < VF_MAX_UNICAST_MAC; i++) { + /* 先找到待迁移的单播mac */ + if (is_zero_ether_addr( + src_vf_item->vf_mac_info.unicast_mac[i].mac_addr)) { + continue; + } + + /* 判断是否有相同mac */ + for (j = 0; j < VF_MAX_UNICAST_MAC; j++) { + if (!is_zero_ether_addr( + dst_vf_item->vf_mac_info.unicast_mac[j] + .mac_addr)) { + if (ether_addr_equal(src_vf_item->vf_mac_info + .unicast_mac[i] + .mac_addr, + dst_vf_item->vf_mac_info + .unicast_mac[j] + .mac_addr)) { + /* src_vf和dst_vf存在相同mac,不做迁移*/ + break; + } + } + } + + /* 添加到dst_vf的vf_item */ + zxdh_vf_item_mac_add( + dst_vf_item, + src_vf_item->vf_mac_info.unicast_mac[i].mac_addr, 1); + } + return; +} + +void vf_item_multicast_mac_transfer(struct zxdh_vf_item *src_vf_item, + struct zxdh_vf_item *dst_vf_item) +{ + int32_t i = 0; + int32_t j = 0; + + /* 迁移单播mac */ + for (i = 0; i < VF_MAX_MULTICAST_MAC; i++) { + /* 先找到待迁移的单播mac */ + if (is_zero_ether_addr(src_vf_item->vf_mac_info.multicast_mac[i] + .mac_addr)) { + continue; + } + + /* 判断是否有相同mac */ + for (j = 0; j < VF_MAX_MULTICAST_MAC; j++) { + if (!is_zero_ether_addr( + dst_vf_item->vf_mac_info.multicast_mac[j] + .mac_addr)) { + if (ether_addr_equal(src_vf_item->vf_mac_info + .multicast_mac[i] + .mac_addr, + dst_vf_item->vf_mac_info + .multicast_mac[j] + .mac_addr)) { + /* src_vf和dst_vf存在相同mac,不做迁移*/ + break; + } + } + } + + /* 添加到dst_vf的vf_item */ + zxdh_vf_item_mac_add( + dst_vf_item, + src_vf_item->vf_mac_info.multicast_mac[i].mac_addr, 1); + } + return; +} + +void vf_item_mac_print(struct zxdh_vf_item *vf_item, uint32_t vf_id) +{ + int32_t i = 0; + + for (i = 0; i < VF_MAX_UNICAST_MAC; i++) { + if (!is_zero_ether_addr( + vf_item->vf_mac_info.unicast_mac[i].mac_addr)) { + LOG_DEBUG("the %u unicast mac is %pM\n", i, + vf_item->vf_mac_info.unicast_mac[i].mac_addr); + } + } + + for (i = 0; i < VF_MAX_MULTICAST_MAC; i++) { + if (!is_zero_ether_addr( + vf_item->vf_mac_info.multicast_mac[i].mac_addr)) { + LOG_DEBUG( + "the %u multicast mac is %pM\n", i, + vf_item->vf_mac_info.multicast_mac[i].mac_addr); + } + } + + LOG_INFO("print %u mac success\n", vf_id) + return; +} + +/* 判断MAC地址对应的是组播IPv6还是组播IPv4地址 */ +int32_t MulticastType_get(uint8_t *mac_addr) +{ + if (mac_addr[0] == 0x33 && mac_addr[1] == 0x33) { + return 0; + } else if (mac_addr[0] == 0x01 && mac_addr[1] == 0x00 && + mac_addr[2] == 0x5e) { + return 0; + } + return -1; /* 返回-1表示不是已知类型的组播MAC地址*/ +} + +int32_t ip4_ip6_multicast_mac_del(struct zxdh_en_device *en_dev, + struct zxdh_vf_item *src_vf_item, + uint32_t src_vf) +{ + struct dhtool_set_vf_mac_msg msg = { 0 }; + int32_t i = 0; + int32_t ret = 0; + + msg.mac_config.target_vf = src_vf; + + for (i = 0; i < VF_MAX_MULTICAST_MAC; i++) { + /* 删除条件:非dhtool添加的ipv6和Ipv4组播mac */ + if ((MulticastType_get(src_vf_item->vf_mac_info.multicast_mac[i] + .mac_addr) == 0) && + (src_vf_item->vf_mac_info.multicast_mac[i] + .dhtool_mac_set_flag == 0)) { + memcpy(msg.mac_config + .multicast_mac_array + [msg.mac_config + .multicast_del_count] + .mac_addr, + src_vf_item->vf_mac_info.multicast_mac[i] + .mac_addr, + ETH_ALEN); + msg.mac_config.multicast_del_count++; + } + } + + if (msg.mac_config.multicast_del_count != 0) { + ret = zxdh_pf_del_vf_multicast_mac(en_dev, &msg); + if (ret != 0) { + LOG_ERR("zxdh_pf_del_vf_multicast_mac failed before transfer\n"); + return MAC_CONFIG_FAILED; + } + } + return 0; +} + +int32_t local_host_unciast_del(struct zxdh_en_device *en_dev, + struct zxdh_vf_item *src_vf_item, + uint32_t src_vf) +{ + struct dhtool_set_vf_mac_msg msg = { 0 }; + int32_t ret = 0; + + /* 先删除待迁移的vf0的本机mac */ + memcpy(msg.mac_config.unicast_mac_array[0].mac_addr, + src_vf_item->vf_mac_info.unicast_mac[0].mac_addr, ETH_ALEN); + msg.mac_config.target_vf = src_vf; + msg.mac_config.unicast_del_count = 1; + + ret = zxdh_pf_del_vf_unicast_mac(en_dev, &msg); + if (ret != 0) { + LOG_ERR("del mac before transfer failed\n"); + return MAC_CONFIG_FAILED; + } + + /* 删除vf_item中存放的本机mac */ + memset(src_vf_item->vf_mac_info.unicast_mac[0].mac_addr, 0, ETH_ALEN); + src_vf_item->vf_mac_info.current_unicast_num--; + + return 0; +} + +int32_t zxdh_pf_transfer_vf_mac(struct zxdh_en_device *en_dev, uint32_t src_vf, + uint32_t dst_vf) +{ + struct zxdh_vf_item *src_vf_item = NULL; + struct zxdh_vf_item *dst_vf_item = NULL; + DPP_PF_INFO_T src_pf_info = { 0 }; + DPP_PF_INFO_T dst_pf_info = { 0 }; + int32_t ret = 0; + + LOG_INFO("zxdh_pf_transfer_vf_mac is called\n"); + + /* 判断src_vf是否probe */ + if (!en_dev->ops->get_vf_is_probe(en_dev->parent, src_vf)) { + LOG_ERR("vf(%u) is not probed\n", src_vf); + return VF_ERROR; + } + + /* 判断dst_vf是否probe */ + if (!en_dev->ops->get_vf_is_probe(en_dev->parent, dst_vf)) { + LOG_ERR("vf(%u) is not probed\n", dst_vf); + return VF_ERROR; + } + + /* 获取src_vf的vf_item */ + src_vf_item = en_dev->ops->get_vf_item(en_dev->parent, src_vf); + if (IS_ERR_OR_NULL(src_vf_item)) { + LOG_ERR("get_vf(%u)_item failed\n", src_vf); + return MAC_CONFIG_FAILED; + } + src_pf_info.slot = en_dev->slot_id; + src_pf_info.vport = src_vf_item->vport; + + /* 获取dst_vf的vf_item */ + dst_vf_item = en_dev->ops->get_vf_item(en_dev->parent, dst_vf); + if (IS_ERR_OR_NULL(dst_vf_item)) { + LOG_ERR("get_vf(%u)_item failed\n", dst_vf); + return MAC_CONFIG_FAILED; + } + dst_pf_info.slot = en_dev->slot_id; + dst_pf_info.vport = dst_vf_item->vport; + + /* 本机mac删除 */ + ret = local_host_unciast_del(en_dev, src_vf_item, src_vf); + if (ret != 0) { + LOG_ERR("del local mac failed\n"); + return MAC_CONFIG_FAILED; + } + + /* 删除ipv4和ipv6对应的组播mac */ + ret = ip4_ip6_multicast_mac_del(en_dev, src_vf_item, src_vf); + if (ret != 0) { + LOG_ERR("ip4_ip6_multicast_mac_del failed\n"); + return MAC_CONFIG_FAILED; + } + + mutex_lock(&src_vf_item->lock); + mutex_lock(&dst_vf_item->lock); + /* 组播mac迁移 */ + if (src_vf_item->vf_mac_info.current_multicast_num != 0) { + ret = dpp_multicast_mac_transfer(&src_pf_info, &dst_pf_info); + if (ret != 0) { + LOG_ERR("dpp_multicast_mac_transfer from %u to %u failed\n", + src_vf, dst_vf); + mutex_unlock(&dst_vf_item->lock); + mutex_unlock(&src_vf_item->lock); + return MULTICAST_MAC_TRANSFER_FAILED; + } + } + + /* 单播mac迁移 */ + if (src_vf_item->vf_mac_info.current_unicast_num != 0) { + ret = dpp_unicast_mac_transfer(&src_pf_info, &dst_pf_info); + if (ret != 0) { + LOG_ERR("dpp_unicast_mac_transfer from %u to %u failed\n", + src_vf, dst_vf); + mutex_unlock(&dst_vf_item->lock); + mutex_unlock(&src_vf_item->lock); + return UNICAST_MAC_TRANSFER_FAILED; + } + } + + /* 将src_vf的vf_item中的的mac迁移到dst_vf中 */ + vf_item_unicast_mac_transfer(src_vf_item, dst_vf_item); + vf_item_multicast_mac_transfer(src_vf_item, dst_vf_item); + + /* 清除src_vf的vf_item中存放的mac数组*/ + memset(&src_vf_item->vf_mac_info, 0, sizeof(src_vf_item->vf_mac_info)); + + LOG_DEBUG("src_vf(%u) current unicast mac num is %u\n", src_vf, + src_vf_item->vf_mac_info.current_unicast_num); + LOG_DEBUG("src_vf(%u) current multicast mac num is %u\n", src_vf, + src_vf_item->vf_mac_info.current_multicast_num); + LOG_DEBUG("dst_vf(%u) current unicast mac num is %u\n", dst_vf, + dst_vf_item->vf_mac_info.current_unicast_num); + LOG_DEBUG("dst_vf(%u) current multicast mac num is %u\n", dst_vf, + dst_vf_item->vf_mac_info.current_multicast_num); + + vf_item_mac_print(src_vf_item, src_vf); + vf_item_mac_print(dst_vf_item, dst_vf); + + mutex_unlock(&dst_vf_item->lock); + mutex_unlock(&src_vf_item->lock); + + LOG_INFO("zxdh_pf_transfer_vf_mac success\n"); + return MAC_CONFIG_SUCCESS; +} + +int32_t zxdh_en_set_vepa(struct zxdh_en_device *en_dev, bool setting) +{ + struct zxdh_vf_item *vf_item = NULL; + bool vepa = false; + uint16_t vf_idx = 0; + int32_t ret = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + vepa = en_dev->ops->get_vepa(en_dev->parent); + if (setting == vepa) { + LOG_ERR("vport(0x%x) is now %s mode\n", en_dev->vport, + vepa ? "vepa" : "veb"); + return 0; + } + + en_dev->ops->set_vepa(en_dev->parent, setting); + ret = dpp_vport_attr_set(&pf_info, SRIOV_VPORT_VEPA_EN_OFF, + (uint32_t)setting); + if (ret != 0) { + LOG_ERR("Failed to setup vport(0x%x) %s mode, ret: %d\n", + en_dev->vport, setting ? "vepa" : "veb", ret); + return ret; + } + + for (vf_idx = 0; vf_idx < ZXDH_VF_NUM_MAX; vf_idx++) { + vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_idx); + if (IS_ERR_OR_NULL(vf_item)) { + break; + } + + if (vf_item->is_probed) { + pf_info.vport = vf_item->vport; + ret = dpp_vport_attr_set(&pf_info, + SRIOV_VPORT_VEPA_EN_OFF, + (uint32_t)setting); + if (ret != 0) { + LOG_ERR("Failed to setup vport(0x%x) %s mode, ret: %d\n", + vf_item->vport, + setting ? "vepa" : "veb", ret); + return ret; + } + LOG_DEBUG("Configure vport(0x%x) to %s mode\n", + vf_item->vport, setting ? "vepa" : "veb"); + } + } + + LOG_INFO("Configure vport(0x%x) to %s mode\n", en_dev->vport, + setting ? "vepa" : "veb"); + + return ret; +} + +#ifdef HAVE_FDB_OPS +static int zxdh_en_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid, u16 flags, + struct netlink_ext_ack *extack) +{ + struct zxdh_en_priv *en_priv = netdev_priv(dev); + struct zxdh_en_device *en_dev = + &en_priv->edev; /*aux层net_device的私有结构体 */ + int32_t err = 0; + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + + LOG_DEBUG("vport is %#x\n", en_dev->vport); + LOG_DEBUG("addr is %pM\n", addr); + LOG_DEBUG("ndm_state is %u\n", ndm->ndm_state); + + /* 检查这个设备的ndm状态是否是静态的 */ + if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { + LOG_ERR("FDB only supports static addresses\n"); + return -EINVAL; + } + + /* 判断mac地址是否全0 */ + if (is_zero_ether_addr(addr)) { + LOG_ERR("Invalid mac\n"); + return -EINVAL; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { + err = unicast_mac_add(en_dev, dev, addr, flags); + if (err != 0) { + LOG_ERR("unicast_mac_add failed"); + return err; + } + } else if (is_multicast_ether_addr(addr)) { + err = multicast_mac_add(en_dev, dev, addr, flags); + if (err != 0) { + LOG_ERR("multicast_mac_add failed"); + return err; + } + } else { + err = -EINVAL; + } + +#ifdef MAC_CONFIG_DEBUG + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + err = zxdh_pf_dump_all_mac(en_dev); + if (err != 0) { + LOG_INFO("zxdh_pf_dump_all_mac failed, ret:%d\n", err); + return -1; + } + } +#endif /* MAC_CONFIG_DEBUG */ + + LOG_DEBUG("zxdh_en_ndo_fdb_add end\n"); + return err; +} + +#ifdef HAVE_NDO_FEATURES_CHECK +static netdev_features_t zxdh_en_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + return features; +} +#endif /* HAVE_NDO_FEATURES_CHECK */ + +static int zxdh_en_ndo_fdb_del(struct ndmsg *ndm, struct nlattr **nla, + struct net_device *dev, + const unsigned char *addr, u16 vid, + struct netlink_ext_ack *ack) +{ + struct zxdh_en_priv *en_priv = netdev_priv(dev); + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t err = 0; + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + + LOG_DEBUG("the vport is %#x", en_dev->vport); + LOG_DEBUG("the addr is %pM\n", addr); + LOG_DEBUG("ndm_state is %u,\n", ndm->ndm_state); + + /* 检查这个设备的ndm状态是否是静态的 */ + if (!(ndm->ndm_state & NUD_PERMANENT)) { + LOG_ERR("FDB only supports static addresses\n"); + return -EINVAL; + } + + /* 地址是否全为0 */ + if (is_zero_ether_addr(addr)) { + LOG_ERR("Invalid mac address\n"); + return -EINVAL; + } + + /* 根据mac地址类型,对相对应地址链表做删除操作 */ + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { + err = unicast_mac_del(en_dev, dev, addr); + if (err != 0) { + LOG_ERR("unicast_mac_del failed\n"); + return err; + } + } else if (is_multicast_ether_addr(addr)) { + err = multicast_mac_del(en_dev, dev, addr); + if (err != 0) { + LOG_ERR("multicast_mac_del failed\n"); + return err; + } + } else { + return -EINVAL; + } + +#ifdef MAC_CONFIG_DEBUG + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + /*先dump所有mac地址*/ + err = zxdh_pf_dump_all_mac(en_dev); + if (err != 0) { + LOG_ERR("zxdh_pf_dump_all_mac failed, ret:%d\n", err); + return -1; + } + } +#endif /* MAC_CONFIG_DEBUG */ + + LOG_DEBUG("zxdh_en_ndo_fdb_del end\n"); + return err; +} + +#ifdef HAVE_BRIDGE_ATTRIBS +static int zxdh_en_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh, u16 flags, + struct netlink_ext_ack *extack) +{ + struct zxdh_en_priv *en_priv = netdev_priv(dev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct nlattr *attr = NULL; + struct nlattr *br_spec = NULL; + int32_t rem = 0; + uint16_t mode = BRIDGE_MODE_UNDEF; + bool setting = false; + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + return -EOPNOTSUPP; + } + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (br_spec == NULL) { + return -EINVAL; + } + + nla_for_each_nested(attr, br_spec, rem) { + if (nla_type(attr) != IFLA_BRIDGE_MODE) { + continue; + } + + if (nla_len(attr) < sizeof(mode)) { + return -EINVAL; + } + + mode = nla_get_u16(attr); + if (mode > BRIDGE_MODE_VEPA) { + return -EINVAL; + } + break; + } + + if (mode == BRIDGE_MODE_UNDEF) { + return -EINVAL; + } + + setting = (mode == BRIDGE_MODE_VEPA) ? 1 : 0; + + return zxdh_en_set_vepa(en_dev, setting); +} + +#ifdef HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +static int zxdh_en_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 __always_unused filter_mask, + int nlflags) +#elif defined(HAVE_BRIDGE_FILTER) +static int zxdh_en_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 __always_unused filter_mask) +#else +static int zxdh_en_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev) +#endif /* NDO_BRIDGE_STUFF */ +{ + struct zxdh_en_priv *en_priv = netdev_priv(dev); + struct zxdh_en_device *en_dev = &en_priv->edev; + uint8_t mode = 0; + bool vepa = false; + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + return -EOPNOTSUPP; + } + + vepa = en_dev->ops->get_vepa(en_dev->parent); + mode = vepa ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB; + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags, + filter_mask, NULL); +} +#endif /* HAVE_BRIDGE_ATTRIBS */ +#endif /* HAVE_FDB_OPS */ + +static int32_t zxdh_pf_notify_vf_set_link_state(struct zxdh_en_device *en_dev, + int vf_idx, bool link_up) +{ + int32_t retval = 0; + uint16_t func_no = 0; + uint16_t pf_no = FIND_PF_ID(en_dev->pcie_id); + uint8_t link_info = 0; + uint8_t link_up_val = 0; + uint8_t phyport_val = 0; + union zxdh_msg *msg = NULL; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + msg->payload.hdr_to_agt.op_code = AGENT_DEV_STATUS_NOTIFY; + msg->payload.hdr_to_agt.pcie_id = en_dev->pcie_id; + + func_no = GET_FUNC_NO(pf_no, vf_idx); + LOG_DEBUG("vf_idx:%d, func_no=0x%x\n", vf_idx, func_no); + msg->payload.pcie_msix_msg.func_no[msg->payload.pcie_msix_msg.num++] = + func_no; + if (en_dev->ops->is_bond(en_dev->parent)) { + link_up_val = link_up ? 1 : 0; + phyport_val = en_dev->ops->get_pf_phy_port(en_dev->parent); + link_info = (phyport_val & 0x0F) << 4 | (link_up_val & 0x0F); + LOG_DEBUG("phyport and link_up need write to VQM, val: 0x%x\n", + link_info); + en_dev->ops->set_vf_link_info(en_dev->parent, vf_idx, + link_info); + } else { + en_dev->ops->set_vf_link_info(en_dev->parent, vf_idx, + link_up ? 1 : 0); + } + LOG_DEBUG("msg->pcie_msix_msg.num:%d\n", + msg->payload.pcie_msix_msg.num); + retval = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_MAC, msg, msg, + ¶); + if (retval != 0) { + LOG_ERR("failed to update VF link info\n"); + } + kfree(msg); + return retval; +} + +static int32_t zxdh_pf_set_vf_link_state(struct zxdh_en_device *en_dev, + int vf_idx, int link_status) +{ + int32_t retval = 0; + struct zxdh_vf_item *vf_item = NULL; + bool pf_link_up = en_dev->ops->get_pf_link_up(en_dev->parent); + + vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_idx); + switch (link_status) { + case IFLA_VF_LINK_STATE_AUTO: + LOG_DEBUG( + "[SET_VF_LINK_STATE]--NDO set VF %d link state auto\n", + vf_idx); + vf_item->link_forced = FALSE; + vf_item->link_up = pf_link_up; + break; + case IFLA_VF_LINK_STATE_ENABLE: + LOG_DEBUG( + "[SET_VF_LINK_STATE]--NDO set VF %d link state enable\n", + vf_idx); + vf_item->link_forced = TRUE; + vf_item->link_up = TRUE; + break; + case IFLA_VF_LINK_STATE_DISABLE: + LOG_DEBUG( + "[SET_VF_LINK_STATE]--NDO set VF %d link state disable\n", + vf_idx); + vf_item->link_forced = TRUE; + vf_item->link_up = FALSE; + break; + default: + LOG_ERR("[SET_VF_LINK_STATE]--NDO set VF %d - invalid link status %d\n", + vf_idx, link_status); + return -EINVAL; + } + LOG_DEBUG("vf_item->is_probed: %s\n", + vf_item->is_probed ? "TRUE" : "FALSE"); + if (vf_item->is_probed) { + /* Notify the VF of its new link state */ + retval = zxdh_pf_notify_vf_set_link_state(en_dev, vf_idx, + vf_item->link_up); + if (0 != retval) { + LOG_ERR("[SET_VF_LINK_STATE]--Failed to set VF %d link state %d\n", + vf_idx, vf_item->link_up); + return retval; + } + } + return retval; +} + +int zxdh_en_ndo_set_vf_link_state(struct net_device *netdev, int vf_idx, + int link_status) +{ + int num_vfs = 0; + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct pci_dev *pdev = NULL; + struct dh_core_dev *dh_dev = NULL; + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + dh_dev = en_dev->parent; + pdev = en_dev->ops->get_pdev(dh_dev); + num_vfs = pci_num_vf(pdev); + if ((vf_idx < 0) || (vf_idx >= num_vfs)) { + LOG_ERR("[SET_VF_LINK_STATE]--NDO set VF link - invalid VF idx: %d\n", + vf_idx); + return -EINVAL; + } + return zxdh_pf_set_vf_link_state(en_dev, vf_idx, link_status); +} + +static int zxdh_enable_sriov_vlan_tbl(DPP_PF_INFO_T *pf_info, uint16_t vlan_tci, + uint16_t vlan_proto) +{ + int ret = 0; + + ret = dpp_vport_vlan_offload_en_set(pf_info, 1); + if (ret != 0) { + return ret; + } + + dpp_vqm_vfid_vlan_set(pf_info, VLAN_SRIOV_VLAN_TCI, vlan_tci); + + dpp_vqm_vfid_vlan_set(pf_info, VLAN_SRIOV_VLAN_TPID, vlan_proto); + + return 0; +} + +static int zxdh_disable_sriov_vlan_tbl(DPP_PF_INFO_T *pf_info) +{ + int ret = 0; + + ret = dpp_vport_vlan_offload_en_set(pf_info, 0); + if (ret != 0) { + return ret; + } + + dpp_vqm_vfid_vlan_set(pf_info, VLAN_SRIOV_VLAN_TCI, 0); + + dpp_vqm_vfid_vlan_set(pf_info, VLAN_SRIOV_VLAN_TPID, 0); + + return 0; +} + +static int32_t zxdh_handle_mac_operations(DPP_PF_INFO_T *pf_info, + MAC_VPORT_INFO *unicast_mac_arry, + uint32_t current_unicast_num, + struct zxdh_vf_item *vf_item, + uint16_t new_vlan_tci, + uint16_t vlan_proto, int add) +{ + int32_t retval = 0; + uint32_t i = 0; + for (i = 0; i < current_unicast_num; i++) { + if (vf_item->vport == unicast_mac_arry[i].vport) { + if (add) { + retval = dpp_add_mac(pf_info, + unicast_mac_arry[i].addr, + htons(vlan_proto), + new_vlan_tci); + } else { + retval = dpp_del_mac( + pf_info, unicast_mac_arry[i].addr, + unicast_mac_arry[i].sriov_vlan_tpid, + unicast_mac_arry[i].sriov_vlan_id); + } + + if (retval != 0) { + return retval; + } + } + } + return 0; +} + +static int32_t zxdh_pf_set_vf_port_vlan(struct zxdh_en_device *en_dev, + int vf_idx, u16 vid, u8 qos, + uint16_t vlan_proto) +{ + int32_t retval = 0; + uint32_t i = 0; + struct zxdh_vf_item *vf_item = NULL; + DPP_PF_INFO_T pf_info = { 0 }; + uint16_t new_vlan_tci = ZXDH_VLAN_TCI_GEN(vid, qos); + uint16_t old_vlan_tci = 0; + MAC_VPORT_INFO *unicast_mac_arry = NULL; + uint32_t current_unicast_num = 0; + uint16_t current_vport = 0; + struct zxdh_vf_item *cur_vf_item = NULL; + struct pci_dev *pdev = NULL; + uint16_t num_vfs = 0; + + pdev = en_dev->ops->get_pdev(en_dev->parent); + num_vfs = pci_num_vf(pdev); + if (num_vfs == 0) { + LOG_ERR("vf is disable, vf number:%d\n", num_vfs); + return -ENODEV; + } + + vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_idx); + old_vlan_tci = ZXDH_VLAN_TCI_GEN(vf_item->vlan, vf_item->qos); + if (new_vlan_tci == 0) { + vlan_proto = 0; + } + + /* 参数vlan_proto是网络字节序, 转为为主机字节序保存在vf_item中*/ + if (new_vlan_tci == old_vlan_tci && + vf_item->vlan_proto == htons(vlan_proto)) { + return 0; + } + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + unicast_mac_arry = (MAC_VPORT_INFO *)kzalloc( + sizeof(MAC_VPORT_INFO) * UNICAST_MAX_NUM, GFP_KERNEL); + if (unicast_mac_arry == NULL) { + LOG_ERR("kzalloc unicast_mac_arry failed \n"); + return -ENOMEM; + } + + mutex_lock(&vf_item->lock); + retval = dpp_unicast_mac_dump(&pf_info, unicast_mac_arry, + ¤t_unicast_num); + if (retval != 0) { + LOG_ERR("dpp_unicast_mac_dump failed, ret:%d\n", retval); + retval = -1; + goto out_free; + } + + for (i = 0; i < current_unicast_num; i++) { + if (vf_item->vport == unicast_mac_arry[i].vport) { + retval = dpp_unicast_mac_search( + &pf_info, unicast_mac_arry[i].addr, + htons(vlan_proto), new_vlan_tci, + ¤t_vport); + if ((retval == 0) && + (vf_item->vport != current_vport)) { + LOG_ERR("modify vlan failed, [Mac]+[vlan] Already exists: current_vport=0x%04x\n", + current_vport); + LOG_ERR("new_vlan_tci=%d; htons(vlan_proto)=%d\n", + new_vlan_tci, htons(vlan_proto)); + LOG_ERR("mac = %x %x %x %x %x %x\n", + unicast_mac_arry[i].addr[0], + unicast_mac_arry[i].addr[1], + unicast_mac_arry[i].addr[2], + unicast_mac_arry[i].addr[3], + unicast_mac_arry[i].addr[4], + unicast_mac_arry[i].addr[5]); + retval = -EEXIST; + goto out_free; + } else if ((retval != 0) && + (retval != DPP_HASH_RC_SRH_FAIL)) { + LOG_ERR("dpp_unicast_mac_search failed, ret:%d\n", + retval); + retval = -1; + goto out_free; + } + } + } + + if (vf_item->pf_set_mac) { + retval = dpp_unicast_mac_search(&pf_info, vf_item->mac, + htons(vlan_proto), new_vlan_tci, + ¤t_vport); + if ((retval == 0) && (current_vport != vf_item->vport)) { + LOG_ERR("modify vlan failed, [Itm Mac]+[vlan] Already exists np: current_vport=0x%04x\n", + current_vport); + LOG_ERR("new_vlan_tci=%d; htons(vlan_proto)=%d\n", + new_vlan_tci, htons(vlan_proto)); + LOG_ERR("mac = %x %x %x %x %x %x\n", vf_item->mac[0], + vf_item->mac[1], vf_item->mac[2], + vf_item->mac[3], vf_item->mac[4], + vf_item->mac[5]); + retval = -EEXIST; + goto out_free; + } else if ((retval != 0) && (retval != DPP_HASH_RC_SRH_FAIL)) { + LOG_ERR("dpp_unicast_mac_search failed, ret:%d\n", + retval); + retval = -1; + goto out_free; + } + + for (i = 0; i < num_vfs; i++) { + if (i == vf_idx) + continue; + cur_vf_item = + en_dev->ops->get_vf_item(en_dev->parent, i); + if (IS_ERR_OR_NULL(cur_vf_item)) { + LOG_ERR("Failed to get vf_item, vf_idx:%d\n", + i); + retval = -EEXIST; + goto out_free; + } + + if (ether_addr_equal(cur_vf_item->mac, vf_item->mac) && + ((ZXDH_VLAN_TCI_GEN(cur_vf_item->vlan, + cur_vf_item->qos) == + new_vlan_tci) && + (cur_vf_item->vlan_proto == htons(vlan_proto)))) { + LOG_ERR("modify vlan failed, [Itm Mac]+[vlan] Already exists: current_vport=0x%04x\n", + current_vport); + LOG_ERR("new_vlan_tci=%d; htons(vlan_proto)=%d\n", + new_vlan_tci, htons(vlan_proto)); + LOG_ERR("mac = %x %x %x %x %x %x\n", + vf_item->mac[0], vf_item->mac[1], + vf_item->mac[2], vf_item->mac[3], + vf_item->mac[4], vf_item->mac[5]); + retval = -EEXIST; + goto out_free; + } + } + } + + pf_info.vport = vf_item->vport; + + if (vid) { + if (en_dev->ops->get_vf_is_probe(en_dev->parent, vf_idx)) { + retval = zxdh_handle_mac_operations( + &pf_info, unicast_mac_arry, current_unicast_num, + vf_item, old_vlan_tci, vlan_proto, 0); + if (retval != 0) { + LOG_ERR("del handle mac operations failed: %d\n", + retval); + retval = -1; + goto out_free; + } + + retval = zxdh_handle_mac_operations( + &pf_info, unicast_mac_arry, current_unicast_num, + vf_item, new_vlan_tci, vlan_proto, 1); + if (retval != 0) { + LOG_ERR("add handle mac operations failed: %d\n", + retval); + retval = -1; + goto out_free; + } + } + + retval = zxdh_enable_sriov_vlan_tbl(&pf_info, new_vlan_tci, + htons(vlan_proto)); + if (retval != 0) { + LOG_ERR("zxdh_enable_sriov_vlan_tbl, retval: %d\n", + retval); + retval = -1; + goto out_free; + } + } else { + if (en_dev->ops->get_vf_is_probe(en_dev->parent, vf_idx)) { + retval = zxdh_handle_mac_operations( + &pf_info, unicast_mac_arry, current_unicast_num, + vf_item, old_vlan_tci, vlan_proto, 0); + if (retval != 0) { + LOG_ERR("dpp del all unicast mac failed: %d\n", + retval); + retval = -1; + goto out_free; + } + + retval = zxdh_handle_mac_operations(&pf_info, + unicast_mac_arry, + current_unicast_num, + vf_item, 0, 0, 1); + if (retval != 0) { + LOG_ERR("dpp add all unicast mac failed: %d\n", + retval); + retval = -1; + goto out_free; + } + } + + retval = zxdh_disable_sriov_vlan_tbl(&pf_info); + if (retval != 0) { + LOG_ERR("zxdh_disable_sriov_vlan_tbl failed: %d\n", + retval); + retval = -1; + goto out_free; + } + } + + /* 更新pf本地的vf vlan信息,用于ip link show显示, 和vf初始化获取和重配*/ + vf_item->vlan = vid; + vf_item->qos = qos; + vf_item->vlan_proto = htons(vlan_proto); + +out_free: + mutex_unlock(&vf_item->lock); + kfree(unicast_mac_arry); + return retval; +} + +int zxdh_en_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct zxdh_vf_item *vf_item = NULL; + struct zxdh_vf_item *cur_vf_item = NULL; + int32_t retval = 0; + uint8_t i = 0; + uint16_t sriov_vlan_tpid = 0; + uint16_t sriov_vlan_id = 0; + uint16_t current_vport = 0; + uint16_t num_vfs = 0; + struct pci_dev *pdev = NULL; + DPP_PF_INFO_T pf_info = { 0 }; + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + LOG_DEBUG("[SET_VF_MAC]--setting MAC %pM on VF %d\n", mac, vf_id); + pdev = en_dev->ops->get_pdev(en_dev->parent); + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + num_vfs = pci_num_vf(pdev); + if (num_vfs == 0) { + LOG_ERR("vf is disable, vf number:%d\n", num_vfs); + return -ENODEV; + } + + vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_id); + if (IS_ERR_OR_NULL(vf_item)) { + return -ENODEV; + } + + if (is_multicast_ether_addr(mac)) { + LOG_ERR("Invalid Ethernet address %pM for VF %d\n", mac, vf_id); + return -EINVAL; + } + + if (is_zero_ether_addr(mac)) { + vf_item->pf_set_mac = false; + ether_addr_copy(vf_item->mac, mac); + return 0; + } + + mutex_lock(&vf_item->lock); + sriov_vlan_tpid = vf_item->vlan_proto; + sriov_vlan_id = ZXDH_VLAN_TCI_GEN(vf_item->vlan, vf_item->qos); + retval = dpp_unicast_mac_search(&pf_info, mac, sriov_vlan_tpid, + sriov_vlan_id, ¤t_vport); + if ((retval == 0) && (current_vport == vf_item->vport)) { + mutex_unlock(&vf_item->lock); + return 0; + } else if ((retval == 0) && (current_vport != vf_item->vport)) { + LOG_ERR("Mac Already exists\n"); + mutex_unlock(&vf_item->lock); + return -EEXIST; + } else if ((retval != 0) && (retval != DPP_HASH_RC_SRH_FAIL)) { + LOG_ERR("dpp_unicast_mac_search failed, ret:%d\n", retval); + mutex_unlock(&vf_item->lock); + return -1; + } + + for (i = 0; i < num_vfs; i++) { + if (i == vf_id) + continue; + cur_vf_item = en_dev->ops->get_vf_item(en_dev->parent, i); + if (IS_ERR_OR_NULL(cur_vf_item)) { + LOG_ERR("Failed to get vf_item, vf_id:%d\n", i); + mutex_unlock(&vf_item->lock); + return -ENODEV; + } + + if (ether_addr_equal(cur_vf_item->mac, mac) && + ((ZXDH_VLAN_TCI_GEN(cur_vf_item->vlan, cur_vf_item->qos) == + sriov_vlan_id) && + (cur_vf_item->vlan_proto == vf_item->vlan_proto))) { + LOG_INFO("Mac already exists vf %d\n", i); + mutex_unlock(&vf_item->lock); + return -EEXIST; + } + } + + if (!en_dev->ops->get_vf_is_probe(en_dev->parent, vf_id)) { + goto set_flag; + } + + en_dev->ops->set_vf_mac(en_dev->parent, mac, vf_id); + +set_flag: + vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_id); + vf_item->pf_set_mac = true; + ether_addr_copy(vf_item->mac, mac); + mutex_unlock(&vf_item->lock); + return 0; +} + +#ifdef IFLA_VF_VLAN_INFO_MAX +int zxdh_en_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, + u16 vlan_id, u8 qos, __be16 vlan_proto) +#else +int zxdh_en_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, + u16 vlan_id, u8 qos) +#endif /* IFLA_VF_VLAN_INFO_MAX */ +{ + int num_vfs = 0; + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct pci_dev *pdev = NULL; + struct dh_core_dev *dh_dev = NULL; + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + /* Comparing with the mellnox network card, it only supports the configuration of cvlan*/ + if (vlan_proto != htons(ETH_P_8021Q) && + vlan_proto != htons(ETH_P_8021AD)) { + return -EPROTONOSUPPORT; + } + dh_dev = en_dev->parent; + pdev = en_dev->ops->get_pdev(dh_dev); + num_vfs = pci_num_vf(pdev); + if (vf_id >= num_vfs || vlan_id > MAX_VLAN_ID || qos > MAX_QOS_ID) { + LOG_ERR("[SET+VF_VLAN]--NDO set VF vlan - invalid VF idx: %d\n", + vf_id); + return -EINVAL; + } + return zxdh_pf_set_vf_port_vlan(en_dev, vf_id, vlan_id, qos, + vlan_proto); +} + +int zxdh_en_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, + int max_tx_rate) +{ + return 0; +} + +int zxdh_en_ndo_get_vf_config(struct net_device *netdev, int vf_idx, + struct ifla_vf_info *ivi) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct zxdh_vf_item *vf_item = NULL; + + vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_idx); + if (IS_ERR_OR_NULL(vf_item)) { + LOG_ERR("Failed to get vf_item, vf_idx:%d\n", vf_idx); + return PTR_ERR(vf_item); + } + + ivi->vf = vf_idx; + + ether_addr_copy(ivi->mac, vf_item->mac); + +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + ivi->max_tx_rate = vf_item->max_tx_rate; + ivi->min_tx_rate = vf_item->min_tx_rate; +#else + ivi->tx_rate = vf_item->max_tx_rate; +#endif + + ivi->vlan = vf_item->vlan; + ivi->qos = vf_item->qos; + ivi->vlan_proto = htons(vf_item->vlan_proto); + +#ifdef HAVE_NDO_SET_VF_LINK_STATE + if (vf_item->link_forced == false) { + ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; + } else if (vf_item->link_up == true) { + ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; + } else { + ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; + } +#endif + +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + ivi->spoofchk = vf_item->spoofchk; +#endif + +#ifdef HAVE_NDO_SET_VF_TRUST + ivi->trusted = vf_item->trusted; +#endif + + return 0; +} + +int zxdh_en_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_idx, + bool enable) +{ + int ret = 0; + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct zxdh_vf_item *vf_item = NULL; + DPP_PF_INFO_T pf_info = { 0 }; + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_idx); + if (IS_ERR_OR_NULL(vf_item)) { + LOG_ERR("Failed to get vf_item, vf_idx:%d\n", vf_idx); + return PTR_ERR(vf_item); + } + + pf_info.slot = en_dev->slot_id; + pf_info.vport = vf_item->vport; + vf_item->spoofchk = enable; + LOG_DEBUG("vf %d spoof check is %s\n", vf_idx, + vf_item->spoofchk ? "on" : "off"); + if (vf_item->is_probed) { + ret = dpp_vport_attr_set(&pf_info, SRIOV_VPORT_SPOOFCHK_EN_OFF, + enable); + if (0 != ret) { + LOG_ERR("[SET_VF_SPOOFCHK]--Failed to set vf %d spookchk %s\n", + vf_idx, enable ? "on" : "off"); + return -1; + } + } + return ret; +} + +#ifdef HAVE_NDO_SET_VF_TRUST +int zxdh_en_ndo_set_vf_trust(struct net_device *netdev, int vf_idx, + bool setting) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct zxdh_vf_item *vf_item = NULL; + DPP_PF_INFO_T pf_info = { 0 }; + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_idx); + if (IS_ERR_OR_NULL(vf_item)) { + LOG_ERR("Failed to get vf_item, vf_idx:%d\n", vf_idx); + return PTR_ERR(vf_item); + } + + pf_info.slot = en_dev->slot_id; + pf_info.vport = vf_item->vport; + vf_item->trusted = setting; + LOG_DEBUG("VF %u is now %strusted\n", vf_idx, setting ? "" : "un"); + if (vf_item->is_probed && !vf_item->trusted) { + LOG_DEBUG("vport[0x%x] promisc and allmulti off\n", + vf_item->vport); + vf_item->promisc = false; + vf_item->mc_promisc = false; + dpp_vport_uc_promisc_set(&pf_info, vf_item->promisc); + dpp_vport_promisc_en_set(&pf_info, vf_item->promisc); + dpp_vport_mc_promisc_set(&pf_info, vf_item->mc_promisc); + } + + return 0; +} +#endif + +int zxdh_en_ndo_set_tx_maxrate(struct net_device *netdev, int qid, + uint32_t max_rate) +{ + int rtn = 0; + zxdh_plcr_rate_limit_paras rate_limit_paras; + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct dh_core_dev *dh_dev = en_dev->parent; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev->parent); + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + PLCR_FUNC_DBG_ENTER(); + + /*1. 入参检测:队列号不能超过vf下实际的队列数*/ + if (qid >= en_dev->curr_queue_pairs) { + LOG_ERR("zxdh_en_ndo_set_tx_maxrate : invalid parameter qid=%d\n", + qid); + return -EINVAL; + } +#if 0 + if (!en_dev->link_up) { + LOG_ERR("[EN SET TX MAXRATE]--PF is not link up.\n"); + return -EINVAL; + } + link_speed = en_dev->link_speed; +#endif + + rate_limit_paras.req_type = E_RATE_LIMIT_REQ_QUEUE_BYTE; + rate_limit_paras.direction = E_RATE_LIMIT_TX; + rate_limit_paras.mode = E_RATE_LIMIT_BYTE; + rate_limit_paras.max_rate = max_rate; + rate_limit_paras.min_rate = 0; + rate_limit_paras.queue_id = qid; + rate_limit_paras.vf_idx = PLCR_INVALID_PARAM; + rate_limit_paras.vfid = PLCR_INVALID_PARAM; + rate_limit_paras.group_id = PLCR_INVALID_PARAM; + + rtn = zxdh_plcr_unified_set_rate_limit(pf_dev, &rate_limit_paras); + PLCR_COMM_ASSERT(rtn); + + PLCR_LOG_INFO("The maxrate of tx-%d has been set to %dMbit/s\n", qid, + max_rate); + + //私有配置命令 + //引入vqm vf限速,提高小流量限速精度; + //条件1,4G以内满足配置能满足vqm限速周期全局共享,ECO回片可放宽限速值 + //条件2,每次配置新值需要将vqm vf限速清空再做配置 + rtn = zxdh_vqm_vf_set_rate_limit(pf_dev, qid, 0); + PLCR_COMM_ASSERT(rtn); + if (max_rate < 4000) { + rtn = zxdh_vqm_vf_set_rate_limit(pf_dev, qid, max_rate); + PLCR_COMM_ASSERT(rtn); + + PLCR_LOG_INFO( + "The Rate of VF item:%d has been set to: Max Tx Rate: %dMbit/s in vqm\n", + qid, max_rate); + } + return rtn; +} + +/**-------------------------------------------------------------------------------------------------------------------@n + * 功能详述: + * - zxdh_en_ndo_set_vf_rate函数属于接口函数, 其功能是: + * - 设置vf端口发送方向,最大速率和最小保证速率 + * - 该接口会挂接到内核的钩子上,函数声明是固定的 + * + * 基于plcr的端口限速背景: + * - 1.一级flowid与vqm的2K个(接收和发送)队列是一一映射的 + * - 2.二级flow id与vf num的映射关系 + * 端口限速,需要将vf下的发送队列(即一级flow id)映射到二级flowid + * 二级flow id的资源是4K,dpu限制vf数量是1K,即二级flow id数量 > vf数量 + * 所以规定固定的映射关系:二级flow id前1K <---> 与1K个vf(发送)一一对应 + * 下面的链接整理了pf下vf转换成全局vf(0-1023)的原理 + * https://i.zte.com.cn/#/space/4e62cb2b730540ff8721c1a8552b2356/wiki/page/ff8178f1304e45dc9457e92ff196cce5/view + * - 3.vf限速的设置 + * 项目对vf提出了最小保证带宽的需求; + * 二级CAR的限速模板使用:双速率,三色算法,色敏模式 + * - 4.创建vf的其它考虑 + * 参考mlx的做法,vf创建之后,默认关联到vf组0(注意:>>>>>>>>先交付vf端口限速的需求,这一步可以暂时不实现<<<<<<<<); + * vf创建之后,用户设置限速才会调用到这里,用户不设置限速,vf(二级flow id)就不用关联限速模板 + * + * 参数概述: + * - netdev : 网络设备结构体指针 + * - vf_id :pf内vf的编号(从0开始) + * - min_tx_rate : 最小保证速率 + * - max_tx_rate : 最大速率 + * - 返回值类型是INT32, 含义是: 错误码,正确时为S_OK + * + * 引用(类变量,外部变量,接口函数): + * - 无 + * + * 注意:该函数挂接到pf的钩子上,只在pf下执行 + *--------------------------------------------------------------------------------------------------------------------*/ +int zxdh_en_ndo_set_vf_rate(struct net_device *netdev, int vf_id, + int min_tx_rate, int max_tx_rate) +{ + int rtn; + zxdh_plcr_rate_limit_paras rate_limit_paras; + + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct dh_core_dev *dh_dev = en_dev->parent; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev->parent); + struct zxdh_vf_item *vf_item = NULL; + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + PLCR_FUNC_DBG_ENTER(); + + rate_limit_paras.req_type = E_RATE_LIMIT_REQ_VF_BYTE; + rate_limit_paras.direction = E_RATE_LIMIT_TX; + rate_limit_paras.mode = E_RATE_LIMIT_BYTE; + rate_limit_paras.max_rate = max_tx_rate; + rate_limit_paras.min_rate = min_tx_rate; + rate_limit_paras.queue_id = PLCR_INVALID_PARAM; + rate_limit_paras.vf_idx = vf_id; + rate_limit_paras.vfid = PLCR_INVALID_PARAM; + rate_limit_paras.group_id = PLCR_INVALID_PARAM; + + vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_id); + if (IS_ERR_OR_NULL(vf_item)) { + LOG_ERR("Failed to get vf_item, vf_idx:%d\n", vf_id); + return PTR_ERR(vf_item); + } + vf_item->min_tx_rate = min_tx_rate; + vf_item->max_tx_rate = max_tx_rate; + + if (!en_dev->ops->get_vf_is_probe(en_dev->parent, vf_id)) { + LOG_INFO("zxdh_en_ndo_set_vf_rate, vf %d is not probed\n", + vf_id); + return 0; + } + + rtn = zxdh_plcr_unified_set_rate_limit(pf_dev, &rate_limit_paras); + PLCR_COMM_ASSERT(rtn); + + PLCR_LOG_INFO( + "The Rate of VF%d has been set to: Min Tx Rate: %dMbit/s, Max Tx Rate: %dMbit/s\n", + vf_id, min_tx_rate, max_tx_rate); + + // vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_id); + // if (IS_ERR_OR_NULL(vf_item)) + // { + // LOG_ERR("Failed to get vf_item, vf_idx:%d\n", vf_id); + // return PTR_ERR(vf_item); + // } + // vf_item->min_tx_rate = min_tx_rate; + // vf_item->max_tx_rate = max_tx_rate; + + //引入vqm vf限速,提高小流量限速精度; + //条件1,4G以内满足配置能满足vqm限速周期全局共享,ECO回片可放宽限速值 + //条件2,每次配置新值需要将vqm vf限速清空再做配置 + rtn = zxdh_vqm_vf_set_rate_limit(pf_dev, vf_item->vport, 0); + PLCR_COMM_ASSERT(rtn); + if (max_tx_rate < 4000) { + rtn = zxdh_vqm_vf_set_rate_limit(pf_dev, vf_item->vport, + max_tx_rate); + PLCR_COMM_ASSERT(rtn); + + PLCR_LOG_INFO( + "The Rate of VF item:%d has been set to: Max Tx Rate: %dMbit/s in vqm\n", + vf_item->vport, max_tx_rate); + } + + return rtn; +} + +const struct net_device_ops zxdh_netdev_ops = { + .ndo_open = zxdh_en_open, + .ndo_stop = zxdh_en_close, + .ndo_start_xmit = zxdh_en_xmit, + + .ndo_get_stats64 = zxdh_en_get_netdev_stats_struct, + .ndo_set_rx_mode = zxdh_en_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = zxdh_en_set_mac, + + .ndo_change_mtu = zxdh_en_change_mtu, + + .ndo_eth_ioctl = zxdh_en_ioctl, + .ndo_siocdevprivate = zxdh_en_private_ioctl, + +#ifdef ZXDH_CONFIG_SPECIAL_SQ_EN + .ndo_select_queue = zxdh_en_select_queue, +#endif +#ifdef ZXDH_PLCR_OPEN + .ndo_set_tx_maxrate = zxdh_en_ndo_set_tx_maxrate, +#endif + .ndo_tx_timeout = zxdh_en_tx_timeout, + +#ifdef HAVE_VLAN_RX_REGISTER + .ndo_vlan_rx_register = zxdh_en_vlan_rx_register, +#endif + .ndo_vlan_rx_add_vid = zxdh_en_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = zxdh_en_vlan_rx_kill_vid, + .ndo_bpf = zxdh_en_xdp, + .ndo_xdp_xmit = zxdh_en_xdp_xmit, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = zxdh_en_netpoll, +#endif + +#ifdef HAVE_SETUP_TC +#ifdef NETIF_F_HW_TC + .ndo_setup_tc = __zxdh_en_setup_tc, +#else + .ndo_setup_tc = zxdh_en_setup_tc, +#endif /* NETIF_F_HW_TC */ +#endif /* HAVE_SETUP_TC */ + +#ifdef IFLA_VF_MAX + .ndo_set_vf_mac = zxdh_en_ndo_set_vf_mac, +#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN + .extended.ndo_set_vf_vlan = zxdh_en_ndo_set_vf_port_vlan, +#else + .ndo_set_vf_vlan = zxdh_en_ndo_set_vf_port_vlan, +#endif +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#ifdef ZXDH_PLCR_OPEN + .ndo_set_vf_rate = zxdh_en_ndo_set_vf_rate, +#else + .ndo_set_vf_rate = zxdh_en_ndo_set_vf_bw, +#endif +#else + .ndo_set_vf_rate = zxdh_en_ndo_set_vf_bw, +#endif + .ndo_get_vf_config = zxdh_en_ndo_get_vf_config, +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + .ndo_set_vf_spoofchk = zxdh_en_ndo_set_vf_spoofchk, +#endif +#ifdef HAVE_NDO_SET_VF_TRUST +#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT + .extended.ndo_set_vf_trust = zxdh_en_ndo_set_vf_trust, +#else + .ndo_set_vf_trust = zxdh_en_ndo_set_vf_trust, +#endif /* HAVE_RHEL7_NET_DEVICE_OPS_EXT */ +#endif /* HAVE_NDO_SET_VF_TRUST */ +#endif /* IFLA_VF_MAX */ + +#ifdef HAVE_UDP_ENC_RX_OFFLOAD +#ifdef HAVE_VXLAN_RX_OFFLOAD +#if IS_ENABLED(CONFIG_VXLAN) + .ndo_add_vxlan_port = zxdh_en_add_vxlan_port, + .ndo_del_vxlan_port = zxdh_en_del_vxlan_port, +#endif +#endif /* HAVE_VXLAN_RX_OFFLOAD */ + +#ifdef HAVE_GENEVE_RX_OFFLOAD +#if IS_ENABLED(CONFIG_GENEVE) + .ndo_add_geneve_port = zxdh_en_add_geneve_port, + .ndo_del_geneve_port = zxdh_en_del_geneve_port, +#endif +#endif /* HAVE_GENEVE_RX_OFFLOAD */ +#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ + + .ndo_set_features = zxdh_en_set_features, + +#ifdef HAVE_FDB_OPS + .ndo_fdb_add = zxdh_en_ndo_fdb_add, + .ndo_fdb_del = zxdh_en_ndo_fdb_del, +#ifdef HAVE_NDO_FEATURES_CHECK + .ndo_features_check = zxdh_en_features_check, +#endif /* HAVE_NDO_FEATURES_CHECK */ +#ifdef HAVE_BRIDGE_ATTRIBS + .ndo_bridge_getlink = zxdh_en_ndo_bridge_getlink, + .ndo_bridge_setlink = zxdh_en_ndo_bridge_setlink, +#endif /* HAVE_BRIDGE_ATTRIBS */ +#endif /* HAVE_FDB_OPS */ + +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +}; + +/* RHEL6 keeps these operations in a separate structure */ +static const struct net_device_ops_ext zxdh_netdev_ops_ext = { + .size = sizeof(struct net_device_ops_ext), +#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ + +#ifdef HAVE_NDO_SET_FEATURES + .ndo_set_features = zxdh_en_set_features, +#endif /* HAVE_NDO_SET_FEATURES */ + +#ifdef HAVE_NDO_SET_VF_LINK_STATE + .ndo_set_vf_link_state = zxdh_en_ndo_set_vf_link_state, +#endif +}; + +static void priv_flags_init(struct zxdh_en_priv *priv) +{ + priv->edev.pflags = 0; + + priv->edev.pflags |= BIT(ZXDH_PFLAG_ENABLE_LLDP); /* LLDP默认为开 */ + priv->edev.pflags |= BIT(ZXDH_PFLAG_HARDWARE_BOND_PRIMARY); + priv->edev.pflags |= BIT(ZXDH_PFLAG_PCIE_AER_CPL_TIMEOUT); +} + +static int32_t get_max_num_qs(struct zxdh_en_container *en_con) +{ + if (en_con->ops->is_nic(en_con->parent) || + en_con->ops->is_special_bond(en_con->parent)) { + return en_con->ops->get_qpairs(en_con->parent); + } + + return en_con->ops->is_bond(en_con->parent) ? + ZXDH_BOND_ETH_MQ_PAIRS_NUM : + max_pairs; +} + +static int32_t fw_version_init(struct zxdh_en_device *en_dev) +{ + int32_t ret = 0; + uint8_t fw_version[ETHTOOL_FWVERS_LEN] = { 0 }; + uint8_t fw_version_len = 0; + + ret = zxdh_en_firmware_version_get(en_dev, fw_version, &fw_version_len); + if (ret != 0) { + LOG_ERR("zxdh_en_firmware_version_get err, ret %d!!!!\n", ret); + return ret; + } + if (fw_version_len > ETHTOOL_FWVERS_LEN) { + LOG_ERR("fw_version_len (%d) greater than 31!!!!\n", + fw_version_len); + return -1; + } + + fw_version[ETHTOOL_FWVERS_LEN - 1] = '\0'; + en_dev->fw_version_len = ETHTOOL_FWVERS_LEN; + memcpy(en_dev->fw_version, (uint8_t *)fw_version, + en_dev->fw_version_len); + LOG_INFO("fw_version:%s\n", en_dev->fw_version); + + return 0; +} + +int32_t zxdh_priv_init(struct zxdh_en_priv *priv, struct net_device *netdev) +{ + int32_t ret = 0; + struct zxdh_en_device *en_dev = &priv->edev; + + mutex_init(&priv->lock); + priv_flags_init(priv); + en_dev->msglevel = NETIF_MSG_LINK; + + /* 优先级4,只支持MAGIC WAKE */ + en_dev->wol_support = WAKE_MAGIC; + en_dev->wolopts = WAKE_MAGIC; + + ret = fw_version_init(en_dev); + if (ret != 0) { + LOG_ERR("fw_version_init err ret: %d\n", ret); + return ret; + } + + return 0; +} + +struct net_device *zxdh_create_netdev(struct zxdh_en_container *en_con, + uint16_t max_vq_pairs) +{ + struct net_device *netdev = NULL; + struct zxdh_en_priv *en_priv = NULL; + struct dh_core_dev *dh_dev = en_con->parent; + + netdev = alloc_etherdev_mqs(sizeof(struct zxdh_en_priv), max_vq_pairs, + max_vq_pairs); + if (unlikely(netdev == NULL)) { + LOG_ERR("alloc_etherdev_mqs() failed\n"); + return NULL; + } + + en_priv = netdev_priv(netdev); + + en_priv->edev.parent = dh_dev; + en_priv->edev.ops = en_con->ops; + en_priv->edev.netdev = netdev; + + zxdh_priv_init(en_priv, netdev); + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + dev_net_set(netdev, dh_core_net(dh_dev)); + + return netdev; +} + +void zxdh_netdev_features_init(struct net_device *netdev) +{ + netdev->features |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM | NETIF_F_TSO | + NETIF_F_SG | NETIF_F_GSO | + // NETIF_F_LRO | + NETIF_F_TSO6 | NETIF_F_GRO | + NETIF_F_HW_VLAN_STAG_FILTER | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_RXHASH; + + netdev->hw_features |= + NETIF_F_RXCSUM | NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_SG | + NETIF_F_GSO | NETIF_F_LRO | NETIF_F_TSO6 | NETIF_F_GRO | + NETIF_F_HW_VLAN_STAG_FILTER | NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_RXHASH | NETIF_F_NTUPLE; + + netdev->hw_enc_features |= + NETIF_F_RXCSUM | NETIF_F_HW_CSUM | NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_UDP_TUNNEL | NETIF_F_TSO | NETIF_F_TSO6; + + netdev->vlan_features = + NETIF_F_RXCSUM | NETIF_F_HW_CSUM | NETIF_F_GRO | NETIF_F_TSO | + NETIF_F_SG | NETIF_F_TSO6 | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_LRO | NETIF_F_RXHASH; + return; +} + +extern const struct xfrmdev_ops zxdh_xfrmdev_ops; +static void zxdh_build_nic_netdev(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct dh_core_dev *dh_dev = en_priv->edev.parent; + + SET_NETDEV_DEV(netdev, &dh_dev->parent->pdev->dev); + + netdev->netdev_ops = &zxdh_netdev_ops; + +#ifdef ZXDH_SEC + /*内核 sec相关*/ + netdev->features |= NETIF_F_HW_ESP; + netdev->xfrmdev_ops = &zxdh_xfrmdev_ops; +#endif + +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT + zxdh_en_set_ethtool_ops_ext(netdev); +#else + zxdh_en_set_ethtool_ops(netdev); +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ + + zxdh_netdev_features_init(netdev); +} + +int32_t zxdh_en_bond_get_mac(struct net_device *netdev, uint8_t pannel_id, + uint8_t *mac) +{ + int32_t ret = 0; + union zxdh_msg *msg = NULL; + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -1; + } + + msg->payload.hdr_to_agt.op_code = AGENT_FLASH_MAC_READ; + msg->payload.flash_read_msg.index = pannel_id; + + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_FLASH, msg, msg, + ¶); + if (ret != 0) { + LOG_ERR("en_dev->ops->msg_send_cmd failed: %d\n", ret); + kfree(msg); + return ret; + } + + LOG_INFO("bond get mac %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", + msg->reps.flash_mac_read_msg.mac[0], + msg->reps.flash_mac_read_msg.mac[1], + msg->reps.flash_mac_read_msg.mac[2], + msg->reps.flash_mac_read_msg.mac[3], + msg->reps.flash_mac_read_msg.mac[4], + msg->reps.flash_mac_read_msg.mac[5]); + + ether_addr_copy(mac, msg->reps.flash_mac_read_msg.mac); + kfree(msg); + return ret; +} + +int32_t zxdh_mac_addr_init(struct net_device *netdev) +{ + uint8_t mac[6] = { 0 }; + uint8_t pannel_id = 0; + int32_t ret = 0; + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + if (en_dev->ops->is_bond(en_dev->parent)) { + pannel_id = en_dev->pannel_id; + ret = zxdh_en_bond_get_mac(netdev, pannel_id, mac); + if (ret != 0) { + LOG_ERR("zxdh_en_bond_mac_get failed: %d\n", ret); + } + } else { + en_dev->ops->get_mac(en_dev->parent, mac); + } + + if (!is_valid_ether_addr(mac)) { + get_random_bytes(mac, 6); + mac[0] &= 0xfe; + LOG_INFO("set random mac %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + } + LOG_INFO("set mac %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", mac[0], mac[1], + mac[2], mac[3], mac[4], mac[5]); + zxdh_netdev_addr_set(netdev, mac); + + return ret; +} + +int32_t zxdh_status_init(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + if (en_dev->ops->if_init(en_dev->parent)) { + zxdh_vp_reset(netdev); + } + + /* Disable VQ/configuration callbacks. */ + zxdh_vp_disable_cbs(netdev); + + zxdh_add_status(netdev, ZXDH_CONFIG_S_ACKNOWLEDGE); + + zxdh_add_status(netdev, ZXDH_CONFIG_S_DRIVER); + + /* fix features, not set features*/ + zxdh_pf_features_init(netdev); + + might_sleep(); + zxdh_add_status(netdev, ZXDH_CONFIG_S_FEATURES_OK); + if (!zxdh_has_status(netdev, ZXDH_CONFIG_S_FEATURES_OK)) { + LOG_ERR("device refuses features ok\n"); + return -ENODEV; + } + + return 0; +} + +void zxdh_device_ready(struct net_device *netdev) +{ + zxdh_vp_enable_cbs(netdev); + + zxdh_add_status(netdev, ZXDH_CONFIG_S_DRIVER_OK); +} + +void zxdh_link_state_notify_kernel(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + if (en_dev->link_up) { + netif_carrier_off(netdev); + udelay(10); + netif_carrier_on(netdev); + } else { + netif_carrier_on(netdev); + udelay(10); + netif_carrier_off(netdev); + } +} + +int32_t aux_get_bond_attrs(struct zxdh_en_device *en_dev, + struct zxdh_lag_attrs *attr) +{ + *attr = (struct zxdh_lag_attrs){ + .pannel_id = en_dev->pannel_id, + .vport = en_dev->vport, + .slot_id = en_dev->slot_id, + .qid[0] = en_dev->phy_index[0], + .qid[1] = en_dev->phy_index[1], + .pcie_id = en_dev->pcie_id, + .phy_port = en_dev->phy_port, + }; + + LOG_INFO( + "bond pf: pannel %hu, vport 0x%hx, phy_qid[0] %u, phy_qid[1] %u, pcie id 0x%x\n", + attr->pannel_id, attr->vport, attr->qid[0], attr->qid[1], + attr->pcie_id); + + return 0; +} + +void aux_set_netdev_name(struct net_device *netdev, uint16_t pannel_id) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct dh_core_dev *dh_dev = en_dev->parent; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev->parent); + + if (en_dev->ops->is_bond(en_dev->parent)) { + netdev->dev_port = pannel_id + 1; + } else if ((pf_dev->board_type == DH_STDA) || + (pf_dev->board_type == DH_STD_E312S) || + (pf_dev->board_type == DH_STD_E312S_D)) { + if (!en_dev->is_multi_ep) + return; + /* 将vf_index插入到bit0-bit7,将panel_id插入到bit8 */ + netdev->dev_id = ((en_dev->pcie_id & 0xFF) | + ((en_dev->panel_id & 0x01) << 8)) & + ~(1 << 9); + /* 将is_pf插入到bit9, 0(VF)/1(PF) */ + netdev->dev_id |= ((en_dev->pcie_id & (1 << 11)) >> 2); + LOG_INFO("board_type: %d,netdev->dev_id: %#x\n", + pf_dev->board_type, netdev->dev_id); + } +} + +int32_t ptp_set_pf_uplink_vfid(struct zxdh_en_device *en_dev) +{ + int32_t ret = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + en_dev->vf_1588_call_np_num = PTP_PORT_VFID_SET; + LOG_INFO("vport: 0x%x, IS_PF: %d VFID %u", en_dev->vport, + IS_PF(en_dev->vport), VQM_VFID(en_dev->vport)); + if (IS_PF(en_dev->vport)) { + ret = dpp_ptp_port_vfid_set(&pf_info, VQM_VFID(en_dev->vport)); + if (ret != 0) { + LOG_ERR("dpp_ptp_port_vfid_set failed!!!\n"); + return -1; + } + } + return 0; +} + +int32_t ptp_set_pf_tc_enable(struct zxdh_en_device *en_dev, uint32_t tc_enable) +{ + int32_t ret = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + en_dev->ptp_tc_enable_opt = tc_enable; + LOG_DEBUG("ptp_tc_enable_opt = %u\n", en_dev->ptp_tc_enable_opt); + + en_dev->vf_1588_call_np_num = PTP_TC_ENABLE_SET; + + if (IS_PF(en_dev->vport)) { + ret = dpp_ptp_tc_enable_set(&pf_info, + en_dev->ptp_tc_enable_opt); + if (ret != 0) { + LOG_ERR("dpp_ptp_tc_enable_set failed!!!\n"); + return -1; + } + } + return 0; +} + +int32_t zxdh_en_mtu_init(struct net_device *netdev) +{ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = ZXDH_MAX_MTU; + + return zxdh_en_config_mtu_to_np(netdev, ZXDH_DEFAULT_MTU); +} + +void zxdh_cap_pkt_init(struct zxdh_en_device *en_dev) +{ + en_dev->pkt_save_file.log_file = NULL; + en_dev->pkt_save_file.enable_pkt_num_mode = 0; + en_dev->pkt_save_file.pkt_file_size = 0; + en_dev->pkt_save_file.pkt_set_count = 0; + en_dev->pkt_save_file.is_stop = 0; + en_dev->pkt_save_file.pkt_rbuf_idx = 0; + en_dev->pkt_save_file.pkt_ubuf_idx = 0; + en_dev->pkt_save_file.pkt_cur_num = 0; + memset(en_dev->pkt_save_file.file_path, 0, + sizeof(en_dev->pkt_save_file.file_path)); + en_dev->pkt_save_file.file_pos = 0; + en_dev->pkt_save_file.total_written_bytes = 0; + en_dev->pkt_dev_flag = 0; + en_dev->pkt_dev_speed = ZXDH_PKT_INIT_SPEED; + en_dev->pkt_file_num = 0; + en_dev->pkt_cap_switch = 0; + en_dev->pkt_save_file_flag = 0; + en_dev->pkt_addr_marked = 0; +} + +/* Started by AICoder, pid:93575f5d4cs5818140a70aa4c06dfc4f5bd055e2 */ +int32_t zxdh_hash_id_init(struct zxdh_en_device *en_dev) +{ + int32_t ret = 0; + + if (en_dev->ops->get_dev_type(en_dev->parent) == ZXDH_DEV_UPF) { + en_dev->hash_search_idx = 2; + } else if (en_dev->ops->get_dev_type(en_dev->parent) == ZXDH_DEV_NE0) { + en_dev->hash_search_idx = 0; + } else if (en_dev->ops->get_dev_type(en_dev->parent) == ZXDH_DEV_NE1) { + en_dev->hash_search_idx = 1; + } else if (!en_dev->ops->is_bond(en_dev->parent)) { + ret = zxdh_hash_id_get(en_dev); + if (ret != 0) { + LOG_ERR("zxdh_hash_id_get failed: %d\n", ret); + return -1; + } + } + + return 0; +} +/* Ended by AICoder, pid:93575f5d4cs5818140a70aa4c06dfc4f5bd055e2 */ + +uint32_t pcie_id2vfid_for_pf(uint16_t pcie_id) //仅支持PF +{ + uint8_t ep_id = (pcie_id >> 12) & 0x7; + uint8_t pf_id = (pcie_id >> 8) & 0x7; + + return (ep_id * 8 + pf_id + 1152); +} + +void zxdh_cfg_vqm_vf_fc_kbps(struct zxdh_en_device *en_dev, uint32_t pf_fc_val) +{ + union zxdh_msg *msg = NULL; + int32_t err = 0; + struct zxdh_pf_device *pf_dev = dh_core_priv(en_dev->parent->parent); + uint32_t vqm_vfid = 0xffff; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return; + } + vqm_vfid = pcie_id2vfid_for_pf(pf_dev->pcie_id); + if (vqm_vfid >= 0xffff) { + LOG_ERR("vfid(%u) is invalid!\n", vqm_vfid); + kfree(msg); + return; + } + msg->vqm_msg.vqm_vfid = (uint16_t)vqm_vfid; + msg->vqm_msg.opcode = OPCODE_SET; + msg->vqm_msg.cmd = VQM_VF_FC_CMD; + msg->vqm_msg.vqm_vf_fc.pps = 0; + msg->vqm_msg.vqm_vf_fc.kbps = pf_fc_val; + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_CFG_VQM, msg, + msg, ¶); + if (err != 0) { + LOG_ERR("send vf fc msg to riscv failed!\n"); + } else { + if (msg->vqm_reps.check_result != 0xaa) { + LOG_ERR("cfg vqm vf(%u) rate %ukbps failed!\n", + vqm_vfid, pf_fc_val); + } else { + LOG_INFO("cfg vqm vf(%u) rate %ukbps success.\n", + vqm_vfid, pf_fc_val); + } + } + kfree(msg); +} + +void zxdh_set_pf_fc(struct zxdh_en_device *en_dev) +{ + uint32_t pf_fc_val = 0; + + if (en_dev->ops->get_coredev_type(en_dev->parent) != + DH_COREDEV_PF) { // 非PF则跳过 + return; + } + + if (en_dev->ops->is_pf_rate_enable(en_dev->parent, &pf_fc_val)) { + LOG_INFO("pf rate: %uMbps\n", pf_fc_val); + if (pf_fc_val == KERNEL_PF_FC || pf_fc_val == KERNEL_PF_FC_1) { + zxdh_cfg_vqm_vf_fc_kbps(en_dev, pf_fc_val * 1000); + } + } else { + LOG_INFO("pf rate: disable\n"); + } +} + +void zxdh_del_pf_fc(struct zxdh_en_device *en_dev) +{ + uint32_t pf_fc_val = 0; + + if (en_dev->quick_remove) + return; + + if (en_dev->ops->get_coredev_type(en_dev->parent) != + DH_COREDEV_PF) { // 非PF则跳过 + return; + } + if (en_dev->ops->is_pf_rate_enable(en_dev->parent, &pf_fc_val)) { + if (pf_fc_val == KERNEL_PF_FC || pf_fc_val == KERNEL_PF_FC_1) { + zxdh_cfg_vqm_vf_fc_kbps(en_dev, 0); + } + } +} + +static void enable_1588_init(struct zxdh_en_device *en_dev) +{ + union zxdh_msg *msg = NULL; + int32_t ret = 0; + struct zxdh_bar_extra_para para = { 0 }; + DPP_PF_INFO_T dpp_pf_info = { + .slot = en_dev->slot_id, + .vport = en_dev->vport, + }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + en_dev->enable_1588 = false; + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return; + } + + msg->payload.vf_1588_enable.proc_cmd = ZXDH_VF_1588_ENABLE_SET; + msg->payload.hdr.op_code = ZXDH_VF_1588_ENABLE; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + msg->payload.vf_1588_enable.enable_1588_vf = (uint32_t) false; + ret = en_dev->ops->msg_send_cmd(en_dev->parent, + MODULE_VF_BAR_MSG_TO_PF, msg, + msg, ¶); + if (ret != 0) { + LOG_ERR("zxdh_send_command_to_pf failed: %d\n", ret); + kfree(msg); + return; + } + + kfree(msg); + return; + } + + ret = dpp_vport_attr_set(&dpp_pf_info, SRIOV_VPORT_1588_EN, + (uint32_t) false); + if (ret != 0) { + LOG_ERR("dpp_vport_attr_set SRIOV_VPORT_1588_EN failed, ret:%d\n", + ret); + return; + } + return; +} + +static int32_t zxdh_en_dev_probe(struct zxdh_auxiliary_device *adev, + const struct zxdh_auxiliary_device_id *id) +{ + struct zxdh_en_container *en_container = + container_of(adev, struct zxdh_en_container, adev); + struct net_device *netdev = NULL; + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + struct zxdh_lag_attrs lag_attrs; + int32_t err = 0; + int32_t vqs_channel_num = 0; + uint32_t phcidx = 0xff; + uint16_t max_vq_pairs = 0; + uint8_t link_up = 0; + uint8_t carrier_status = 0; + + LOG_INFO("aux level start\n"); + mutex_lock(&en_container->parent->lock); + + max_vq_pairs = get_max_num_qs(en_container); + netdev = zxdh_create_netdev(en_container, max_vq_pairs); + if (unlikely(netdev == NULL)) { + LOG_ERR("zxdh_create_netdev is null\n"); + err = -ENOMEM; + goto err_create_netdev; + } + + zxdh_build_nic_netdev(netdev); + + dev_set_drvdata(&adev->dev, netdev_priv(netdev)); + + en_priv = netdev_priv(netdev); + en_dev = &en_priv->edev; + en_dev->dmadev = en_dev->ops->get_dma_dev(en_dev->parent); + en_dev->channels_num = en_dev->ops->get_channels_num(en_dev->parent); + en_dev->ops->set_rdma_netdev(en_dev->parent, netdev); + en_dev->curr_unicast_num = 1; + en_dev->curr_multicast_num = 0; + en_dev->init_comp_flag = AUX_INIT_INCOMPLETED; + en_dev->delay_statistics_enable = 0; + en_dev->phy_port = INVALID_PHY_PORT; + en_dev->link_down_on_close = false; + en_dev->time_sync_done = false; + + en_dev->max_vq_pairs = max_vq_pairs; +#ifdef CONFIG_INET + en_dev->local_lb_enable = false; +#endif + en_dev->board_type = en_dev->ops->get_board_type(en_dev->parent); + + vqs_channel_num = + en_dev->ops->create_vqs_channels(en_dev->parent, en_dev); + if (vqs_channel_num < 0) { + LOG_ERR("create_vqs_channels failed, vqs_channel_num: %d\n", + vqs_channel_num); + err = vqs_channel_num; + goto err_create_vqs_channels; + } + + err = dh_aux_eq_table_init(en_priv); + if (err != 0) { + LOG_ERR("Failed to alloc IRQs: %d\n", err); + goto err_eq_table_init; + } + + err = dh_aux_events_init(en_priv); + if (err != 0) { + LOG_ERR("dh_aux_events_init failed: %d\n", err); + goto err_events_init; + } + + err = dh_aux_eq_table_create(en_priv); + if (err != 0) { + LOG_ERR("Failed to alloc EQs: %d\n", err); + goto err_eq_table_create; + } + + err = zxdh_status_init(netdev); + if (err != 0) { + LOG_ERR("zxdh_status_init failed: %d\n", err); + goto err_status_init; + } + + en_dev->ep_bdf = en_dev->ops->get_epbdf(en_dev->parent); + en_dev->vport = en_dev->ops->get_vport(en_dev->parent); + en_dev->pcie_id = en_dev->ops->get_pcie_id(en_dev->parent); + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + en_dev->slot_id = en_dev->ops->get_slot_id(en_dev->parent); + } + LOG_INFO( + "ep_bdf: 0x%x, vport: 0x%x, pcie_id: %d, slot_id: %d. is_bond %d\n", + en_dev->ep_bdf, en_dev->vport, en_dev->pcie_id, en_dev->slot_id, + en_dev->ops->is_bond(en_dev->parent)); + if (!en_dev->ops->is_bond(en_dev->parent)) { + en_dev->spec_sbdf = en_dev->ops->get_spec_sbdf(en_dev->parent); + en_dev->is_hwbond = en_dev->ops->is_hwbond( + en_dev->parent, en_dev->is_hwbond, FALSE); + en_dev->is_rdma_aux_plug = en_dev->ops->is_rdma_aux_plug( + en_dev->parent, en_dev->is_rdma_aux_plug, FALSE); + en_dev->is_primary_port = en_dev->ops->is_primary_port( + en_dev->parent, en_dev->is_primary_port, FALSE); + en_dev->is_multi_ep = en_dev->ops->is_multi_ep(en_dev->parent); + ZXDH_SET_PFLAG(en_dev->pflags, ZXDH_PFLAG_HARDWARE_BOND, + en_dev->is_hwbond ? 1 : 0); + ZXDH_SET_PFLAG(en_dev->pflags, ZXDH_PFLAG_HARDWARE_BOND_PRIMARY, + en_dev->is_primary_port ? 1 : 0); + } +#ifdef ZXDH_MSGQ + if (IS_MSGQ_DEV(en_dev)) { + en_dev->need_msgq = true; + } +#endif + en_dev->eth_config.rx_queue_size = ZXDH_PF_DEFAULT_DESC_NUM(en_dev); + en_dev->eth_config.tx_queue_size = ZXDH_PF_DEFAULT_DESC_NUM(en_dev); + + // PF92.5特殊限速 + zxdh_set_pf_fc(en_dev); + + err = zxdh_vqs_init(netdev); + if (err != 0) { + LOG_ERR("zxdh_vqs_init failed: %d\n", err); + goto err_vqs_init; + } + + if (en_dev->ops->is_drs_sec_enable(en_dev->parent)) { + err = zxdh_sec_vqs_init(netdev); + if (err != 0) { + LOG_ERR("zxdh_sec_vqs_init failed\n"); + goto err_sec_vqs_init; + } + } + + err = zxdh_hash_id_init(en_dev); + if (err != 0) { + LOG_ERR("zxdh_hash_id_init failed: %d\n", err); + goto err_do_vqs_free; + } + + err = zxdh_panel_id_init(en_dev); + if (err != 0) { + LOG_ERR("zxdh_panel_id_init failed: %d\n", err); + goto err_do_vqs_free; + } + en_dev->eth_config.hash_func = ZXDH_FUNC_CRC32; + en_dev->eth_config.hash_mode = ZXDH_NET_RX_FLOW_HASH_SDFNT; + en_dev->eth_config.curr_combined = en_dev->curr_queue_pairs; + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + zxdh_cap_pkt_init(en_dev); + if (en_dev->ops->is_bond(en_dev->parent)) { + err = zxdh_aux_alloc_pannel(en_dev); + if (err != 0) { + LOG_ERR("zxdh_aux_alloc_pannel failed: %d\n", + err); + goto err_do_vqs_free; + } + } else if (zxdh_en_is_panel_port(en_dev)) { + err = zxdh_phyport_get(en_dev); + if (err != 0) { + LOG_ERR("zxdh_phyport_get failed: %d\n", err); + goto err_do_vqs_free; + } + } + + err = zxdh_mac_addr_init(netdev); + if (err != 0) { + LOG_ERR("zxdh_mac_addr_init failed: %d\n", err); + goto err_mac_addr_init; + } + + en_dev->wolopts = WAKE_MAGIC; + err = zxdh_pf_port_init(en_dev, true); + if (err != 0) { + LOG_ERR("zxdh_pf_port_init failed: %d\n", err); + goto err_mac_addr_init; + } + } else { + err = zxdh_vf_dpp_port_init(en_dev); + if (err != 0) { + LOG_ERR("zxdh_vf_dpp_port_init failed: %d\n", err); + goto err_mac_addr_init; + } + } + enable_1588_init(en_dev); + + if (!en_dev->ops->is_bond(en_dev->parent)) { + netdev->priv_flags &= ~IFF_RXFH_CONFIGURED; + err = zxdh_num_channels_changed(en_dev, + en_dev->curr_queue_pairs); + if (err != 0) { + LOG_ERR("zxdh_num_channels_changed failed: %d\n", err); + goto err_do_vport_free; + } + + err = zxdh_en_sync_features(en_dev, netdev->features); + ZXDH_CHECK_RET_GOTO_ERR(err, err_do_rxfh_free, + "zxdh_en_sync_features failed: %d\n", + err); + } + + zxdh_device_ready(netdev); + + err = zxdh_en_mtu_init(netdev); + if (err != 0) { + LOG_ERR("zxdh_en_mtu_init failed: %d\n", err); + goto err_do_rxfh_free; + } + + en_dev->hw_stats.q_stats = + kmalloc_array(en_dev->max_vq_pairs, + sizeof(struct zxdh_en_queue_stats), GFP_KERNEL); + if (unlikely(en_dev->hw_stats.q_stats == NULL)) { + LOG_ERR("hw_stats.q_stats kmalloc failed\n"); + goto err_do_rxfh_free; + } + memset(en_dev->hw_stats.q_stats, 0, + en_dev->max_vq_pairs * sizeof(struct zxdh_en_queue_stats)); + + if (!en_dev->ops->is_bond(en_dev->parent)) { + err = zxdh_hardware_bond_init(netdev); + if (err != 0) { + LOG_ERR("zxdh_hardware_bond_init failed, %d\n", err); + goto err_hardware_bond_init; + } + } + + memset(&en_dev->pre_stats, 0, sizeof(struct zxdh_en_vport_stats)); + en_dev->last_tx_vport_ssvpc_packets = 0; + + err = zxdh_en_vport_pre_stats_get(en_dev); + if (err != 0) { + LOG_ERR("get vport pre stats failed, %d\n", err); + } + + aux_set_netdev_name(netdev, en_dev->pannel_id); + err = register_netdev(netdev); + if (err != 0) { + LOG_ERR("register_netdev failed, %d\n", err); + goto err_register_netdev; + } + + zxdh_link_state_notify_kernel(netdev); + + if (en_dev->ops->is_bond(en_dev->parent)) { + aux_get_bond_attrs(en_dev, &lag_attrs); + err = zxdh_ldev_add_netdev(en_container->parent, + en_dev->pannel_id, netdev, + &lag_attrs); + if (err != 0) { + goto err_ldev_add_netdev; + } + } + +#ifdef ZXDH_PLCR_OPEN + err = zxdh_plcr_init(en_priv); + if (err != 0) { + LOG_ERR("zxdh_plcr_init failed, %d\n", err); + //TODO:goto? + } +#endif + + err = dh_aux_vxlan_netdev_notifier_init(en_priv); + if (err != 0) { + LOG_ERR("dh_aux_vxlan_netdev_notifier_init failed: %d\n", err); + goto err_vxlan_netdev_notifier_init; + } + + err = dh_aux_ipv6_notifier_init(en_priv); + if (err != 0) { + LOG_ERR("dh_aux_ipv6_notifier_init failed: %d\n", err); + goto err_ipv6_notifier_init; + } +#ifdef ZXDH_MSGQ + if (NEED_MSGQ(en_dev)) { + err = zxdh_msgq_init(en_dev); + if (err) { + LOG_ERR("zxdh_msgq_init failed: %d\n", err); + goto err_msgq_init; + } + } +#endif + + if ((en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) && + (zxdh_en_is_panel_port(en_dev))) { + /* clear mcode gate,successfully build the scheduling tree, and then open it again */ + zxdh_dcbnl_set_tm_pport_mcode_gate_close(netdev); +#ifdef ZXDH_DCBNL_OPEN + err = zxdh_dcbnl_initialize(netdev); + if (err != 0) { + LOG_ERR("zxdh_dcbnl_initialize failed: %d\n", err); + //TODO:goto? + } +#endif + } + + en_dev->ops->set_bond_num(en_dev->parent, true); + + en_dev->init_comp_flag = AUX_INIT_COMPLETED; + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + en_dev->autoneg_enable = AUTONEG_ENABLE; + err = zxdh_en_phyport_init(en_dev); + if (err != 0) { + LOG_ERR("zxdh_en_phyport_init failed: %d\n", err); + goto err_phyport_init; + } + } + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + if (zxdh_get_ptp_clock_index(en_dev, &phcidx)) { + LOG_ERR("%s: aux dev get phc fail\n", netdev->name); + } + en_dev->clock_no = phcidx; + if (!en_dev->ops->is_bond(en_dev->parent)) { + if (ptp_set_pf_uplink_vfid(en_dev)) { + LOG_ERR("%s: set ptp l2 vfid fail\n", + netdev->name); + } + + if (ptp_set_pf_tc_enable(en_dev, 0)) { + LOG_ERR("%s: set tc enable fail\n", + netdev->name); + } + } + } + + en_dev->ops->set_init_comp_flag(en_dev->parent, 1); + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) { + carrier_status = netif_carrier_ok(en_dev->netdev) ? 1 : 0; + en_dev->ops->get_link_info_from_vqm(en_dev->parent, &link_up); + link_up = link_up == 0 ? 0 : 1; + LOG_INFO( + "VF device: %s vqm_link_state %d vs kernel_link_state %d\n", + pci_name(en_dev->ops->get_pdev(en_dev->parent)), + link_up, carrier_status); + if (link_up != carrier_status) { + LOG_INFO( + "VF device: %s vqm_link_state %d not equal to kernel_link_state %d\n", + pci_name(en_dev->ops->get_pdev(en_dev->parent)), + link_up, carrier_status); + dh_eq_async_link_info_int_process(en_priv); + } + } + card_num++; + + dh_ip_mac_init(en_priv); + mutex_unlock(&en_container->parent->lock); + LOG_INFO("%s: aux level completed\n", netdev->name); + + return 0; + +err_phyport_init: + en_dev->ops->set_bond_num(en_dev->parent, false); +#ifdef ZXDH_DCBNL_OPEN + zxdh_dcbnl_ets_uninit(netdev); +#endif +#ifdef ZXDH_MSGQ + if (NEED_MSGQ(en_dev)) + zxdh_msgq_exit(en_dev); +err_msgq_init: +#endif + dh_inet6_addr_change_notifier_unregister(&(en_dev->ipv6_notifier)); +err_ipv6_notifier_init: + dh_vxlan_netdev_change_notifier_unregister(&(en_dev->vxlan_notifier)); +err_vxlan_netdev_notifier_init: +#ifdef ZXDH_PLCR_OPEN + zxdh_plcr_uninit(en_priv); +#endif + if (en_dev->ops->is_bond(en_dev->parent)) { + aux_get_bond_attrs(en_dev, &lag_attrs); + zxdh_ldev_remove_netdev(en_dev->parent, netdev, &lag_attrs); + } +err_ldev_add_netdev: + unregister_netdev(netdev); +err_register_netdev: + if (!en_dev->ops->is_bond(en_dev->parent)) { + zxdh_hardware_bond_uninit(netdev); + } +err_hardware_bond_init: + kfree(en_dev->hw_stats.q_stats); +err_do_rxfh_free: + if (!en_dev->ops->is_bond(en_dev->parent)) { + zxdh_rxfh_del(en_dev); + } +err_do_vport_free: + zxdh_vport_uninit(netdev); +err_mac_addr_init: + if (en_dev->ops->is_bond(en_dev->parent)) { + en_dev->ops->release_port(en_dev->parent, en_dev->pannel_id); + } +err_do_vqs_free: + if (en_dev->ops->is_drs_sec_enable(en_dev->parent)) { + zxdh_sec_vqs_uninit(netdev, ZXDH_SEC_QUEUES_NUM(en_dev)); + } +err_sec_vqs_init: + zxdh_vqs_uninit(netdev); +err_vqs_init: + zxdh_del_pf_fc(en_dev); + zxdh_add_status(netdev, ZXDH_CONFIG_S_FAILED); +err_status_init: + dh_aux_eq_table_destroy(en_priv); +err_eq_table_create: + dh_aux_events_uninit(en_priv); +err_events_init: + dh_aux_eq_table_cleanup(en_priv); +err_eq_table_init: + en_dev->ops->destroy_vqs_channels(en_dev->parent); +err_create_vqs_channels: + free_netdev(netdev); +err_create_netdev: + mutex_unlock(&en_container->parent->lock); + return -EPERM; +} + +static int32_t eth_pflags_config_recover(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + uint8_t i = 0; + int32_t err = 0; + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) + return 0; + + ZXDH_SET_PFLAG(en_dev->pflags, ZXDH_PFLAG_ENABLE_SSHD, 0); + ZXDH_SET_PFLAG(en_dev->pflags, ZXDH_PFLAG_IP, 0); + err = zxdh_dual_tor_label_get(en_dev); + if (err == 1) + en_dev->pflags |= BIT(ZXDH_PFLAG_DUAL_TOR_CTRL); + else if (err == 0) + en_dev->pflags &= ~BIT(ZXDH_PFLAG_DUAL_TOR_CTRL); + + for (i = 0; i < ZXDH_NUM_PFLAGS; ++i) { + if (i == ZXDH_PFLAG_ENABLE_SSHD || i == ZXDH_PFLAG_ETS_SWITCH || + i == ZXDH_PFLAG_PCIE_AER_CPL_TIMEOUT || + i == ZXDH_PFLAG_PCIE_HP_IRQ_CTRL) + continue; + err = zxdh_pflags_update(netdev, i, en_dev->pflags & BIT(i)); + if (err) { + HEAL_ERR("%s zxdh_pflags_update[%d] failed: %d\n", + netdev->name, i, err); + return err; + } + } + + return 0; +} + +static void zxdh_init_stats(struct zxdh_en_device *en_dev) +{ + int32_t err = 0; + int32_t i = 0; + + memset(&en_dev->hw_stats.netdev_stats, 0, + sizeof(struct zxdh_en_netdev_stats)); + memset(&en_dev->hw_stats.vport_stats, 0, + sizeof(struct zxdh_en_vport_stats)); + memset(&en_dev->hw_stats.phy_stats, 0, + sizeof(struct zxdh_en_phy_stats)); + memset(&en_dev->hw_stats.udp_stats, 0, + sizeof(struct zxdh_en_udp_phy_stats)); + memset(&en_dev->pre_stats, 0, sizeof(struct zxdh_en_vport_stats)); + memset(en_dev->hw_stats.q_stats, 0, + en_dev->max_vq_pairs * sizeof(struct zxdh_en_queue_stats)); + en_dev->last_tx_vport_ssvpc_packets = 0; + + err = zxdh_en_vport_pre_stats_get(en_dev); + if (err != 0) { + LOG_ERR("get vport pre stats failed, %d\n", err); + } + + for (i = 0; i < en_dev->max_queue_pairs; i++) { + memset(&en_dev->rq[i].stats, 0, + sizeof(struct virtnet_rq_stats)); + memset(&en_dev->sq[i].stats, 0, + sizeof(struct virtnet_sq_stats)); + } + return; +} + +int32_t zxdh_aux_load(struct zxdh_en_priv *en_priv) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + struct net_device *netdev = en_dev->netdev; + struct zxdh_lag_attrs lag_attrs; + int32_t err = 0; + int32_t vqs_channel_num = 0; + + LOG_INFO("%s aux level load start\n", netdev->name); + + mutex_lock(&en_dev->parent->lock); + if (en_dev->parent->driver_process == ZXDH_REMOVE) + goto unlock; + + vqs_channel_num = + en_dev->ops->create_vqs_channels(en_dev->parent, en_dev); + if (vqs_channel_num < 0) { + HEAL_ERR("%s create_vqs_channels failed, vqs_channel_num: %d\n", + netdev->name, vqs_channel_num); + goto unlock; + } + + err = dh_aux_eq_table_create(en_priv); + if (err != 0) { + HEAL_ERR("%s Failed to alloc EQs: %d\n", netdev->name, err); + goto err_eq_table_create; + } + + err = zxdh_status_init(netdev); + if (err != 0) { + HEAL_ERR("%s zxdh_status_init failed: %d\n", netdev->name, err); + goto err_status_init; + } + + // PF92.5特殊限速 + zxdh_set_pf_fc(en_dev); + err = zxdh_vqs_init(netdev); + if (err != 0) { + HEAL_ERR("%s zxdh_vqs_init failed: %d\n", netdev->name, err); + goto err_vqs_init; + } + + if (en_dev->ops->is_drs_sec_enable(en_dev->parent)) { + err = zxdh_sec_vqs_init(netdev); + if (err != 0) { + HEAL_ERR("%s zxdh_sec_vqs_init failed\n", netdev->name); + goto err_sec_vqs_init; + } + } + + zxdh_device_ready(netdev); + + err = zxdh_port_init(netdev); + if (err != 0) { + HEAL_ERR("%s zxdh_port_init failed: %d\n", netdev->name, err); + goto err_port_init; + } + + if (en_dev->ops->is_bond(en_dev->parent)) { + aux_get_bond_attrs(en_dev, &lag_attrs); + err = zxdh_ldev_add_netdev(en_dev->parent, en_dev->pannel_id, + netdev, &lag_attrs); + if (err != 0) { + HEAL_ERR("%s zxdh_ldev_add_netdev failed: %d\n", + netdev->name, err); + goto err_ldev_add_netdev; + } + } + +#ifdef ZXDH_MSGQ + if (NEED_MSGQ(en_dev)) { + err = zxdh_msgq_init(en_dev); + if (err) { + HEAL_ERR("%s zxdh_msgq_init failed: %d\n", netdev->name, + err); + goto err_msgq_init; + } + } +#endif + en_dev->init_comp_flag = AUX_INIT_COMPLETED; + zxdh_init_stats(en_dev); + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) { + dh_eq_async_link_info_int_process(en_priv); + } + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + err = zxdh_recover_hwbond_in_reload(en_dev->netdev); + if (err != 0) { + HEAL_ERR("zxdh_recover_hwbond_in_reload failed: %d\n", + err); + goto err_phyport_init; + } + err = zxdh_en_phyport_init(en_dev); + if (err != 0) { + HEAL_ERR("%s zxdh_en_phyport_init failed: %d\n", + netdev->name, err); + goto err_phyport_init; + } + } + en_dev->ops->set_init_comp_flag(en_dev->parent, 1); + en_dev->fast_unload = false; + en_dev->ops->set_bond_num(en_dev->parent, true); + mutex_unlock(&en_dev->parent->lock); + + en_dev->curr_queue_pairs = en_dev->eth_config.curr_combined; + if (netif_running(netdev)) + en_open(netdev, false); + else + en_phyport_close(en_dev); + netif_tx_wake_all_queues(netdev); + en_dev->device_state = ZXDH_DEVICE_STATE_UP; + eth_pflags_config_recover(en_dev->netdev); + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + mod_timer(&en_dev->service_riscv_timer, jiffies); + } + return 0; + +err_phyport_init: +#ifdef ZXDH_MSGQ + if (NEED_MSGQ(en_dev)) + zxdh_msgq_exit(en_dev); +#endif +err_msgq_init: + if (en_dev->ops->is_bond(en_dev->parent)) { + aux_get_bond_attrs(en_dev, &lag_attrs); + zxdh_ldev_remove_netdev(en_dev->parent, netdev, &lag_attrs); + } +err_ldev_add_netdev: + zxdh_vport_uninit(netdev); +err_port_init: + if (en_dev->ops->is_drs_sec_enable(en_dev->parent)) { + zxdh_sec_vqs_uninit(netdev, ZXDH_SEC_QUEUES_NUM(en_dev)); + } +err_sec_vqs_init: + zxdh_vqs_uninit(netdev); +err_vqs_init: + zxdh_del_pf_fc(en_dev); + zxdh_add_status(netdev, ZXDH_CONFIG_S_FAILED); +err_status_init: + dh_aux_eq_table_destroy(en_priv); +err_eq_table_create: + en_dev->ops->destroy_vqs_channels(en_dev->parent); +unlock: + mutex_unlock(&en_dev->parent->lock); + return -EPERM; +} + +static void del_cfg_shell_script(struct work_struct *work) +{ + static const char command[] = "/etc/zxdh_cfg/smart_nic_cfg_proc.sh"; + char *argv[] = { (char *)command, "d", NULL }; + static char *envp[] = { "HOME=/", "TERM=linux", + "PATH=/bin:/sbin:/usr/bin:/usr/sbin:/bin", + NULL }; + int32_t ret = 0; + + ret = call_usermodehelper(command, argv, envp, UMH_WAIT_PROC); + if (ret < 0) { + LOG_ERR("Failed to execute smart_nic_cfg_del.sh(err:%d)\n", + ret); + } else { + LOG_INFO("smart_nic_cfg_del.sh executed successfully,ret:%d\n", + ret); + } +} + +void zxdh_aux_unload(struct zxdh_en_priv *en_priv) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + struct net_device *netdev = en_dev->netdev; + struct zxdh_lag_attrs lag_attrs; + uint16_t i = 0; + + if (test_bit(ZXDH_DEVICE_STATE_OPENED, &en_dev->state)) { + mutex_lock(&en_priv->lock); + cancel_delayed_work_sync(&en_dev->refill); + + for (i = 0; i < en_dev->max_vq_pairs; i++) { + xdp_rxq_info_unreg(&en_dev->rq[i].xdp_rxq); + napi_disable(&en_dev->rq[i].napi); + virtnet_napi_tx_disable(&en_dev->sq[i].napi); + } + mutex_unlock(&en_priv->lock); + } + + mutex_lock(&en_dev->parent->lock); + if (en_dev->parent->driver_process == ZXDH_REMOVE) { + mutex_unlock(&en_dev->parent->lock); + return; + } + en_dev->parent->driver_process = ZXDH_UNLOAD; + en_dev->init_comp_flag = AUX_INIT_INCOMPLETED; + en_dev->ops->set_init_comp_flag(en_dev->parent, 0); + en_dev->ops->set_bond_num(en_dev->parent, false); + en_dev->fast_unload = true; + +#ifdef ZXDH_MSGQ + if (NEED_MSGQ(en_dev)) + zxdh_msgq_exit(en_dev); +#endif + if (en_dev->ops->is_bond(en_dev->parent)) { + aux_get_bond_attrs(en_dev, &lag_attrs); + zxdh_ldev_remove_netdev(en_dev->parent, netdev, &lag_attrs); + } + + if (en_dev->ops->is_drs_sec_enable(en_dev->parent)) { + zxdh_sec_vqs_uninit(netdev, ZXDH_SEC_QUEUES_NUM(en_dev)); + } + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + zxdh_cap_pkt_uninit(en_dev, false); + } + + synchronize_net(); // 等待所有正在处理的网络操作完成 + zxdh_vqs_uninit(netdev); + + en_dev->ops->destroy_vqs_channels(en_dev->parent); + mutex_unlock(&en_dev->parent->lock); +} + +static int32_t zxdh_en_dev_remove(struct zxdh_auxiliary_device *adev) +{ + struct zxdh_en_priv *en_priv = + (struct zxdh_en_priv *)dev_get_drvdata(&adev->dev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct net_device *netdev = en_dev->netdev; + struct zxdh_lag_attrs lag_attrs; + + LOG_INFO("%s: aux level start\n", netdev->name); + if (!en_dev->ops->get_rp_link_status(en_dev->parent)) { + en_dev->quick_remove = true; + LOG_INFO("%s: quick_remove start\n", netdev->name); + } + + mutex_lock(&en_dev->parent->lock); + en_dev->ops->set_init_comp_flag(en_dev->parent, 0); + en_dev->parent->driver_process = ZXDH_REMOVE; + en_dev->init_comp_flag = AUX_INIT_INCOMPLETED; + mutex_unlock(&en_dev->parent->lock); + + en_dev->ops->set_bond_num(en_dev->parent, false); +#ifdef ZXDH_DCBNL_OPEN + zxdh_dcbnl_ets_uninit(netdev); +#endif + +#ifdef ZXDH_MSGQ + if (!en_dev->fast_unload) { + if (NEED_MSGQ(en_dev)) + zxdh_msgq_exit(en_dev); + } +#endif + dh_vxlan_netdev_change_notifier_unregister(&(en_dev->vxlan_notifier)); + dh_inet6_addr_change_notifier_unregister(&(en_dev->ipv6_notifier)); + +#ifdef ZXDH_PLCR_OPEN + zxdh_plcr_uninit(en_priv); +#endif + + if (en_dev->ops->is_bond(en_dev->parent)) { + if (!en_dev->fast_unload) { + aux_get_bond_attrs(en_dev, &lag_attrs); + zxdh_ldev_remove_netdev(en_dev->parent, netdev, + &lag_attrs); + } + } else { + zxdh_hardware_bond_uninit(netdev); + } + + unregister_netdev(netdev); + kfree(en_dev->hw_stats.q_stats); + + if (!en_dev->fast_unload) { + if (!en_dev->ops->is_bond(en_dev->parent)) + zxdh_rxfh_del(en_dev); + + zxdh_vport_uninit(netdev); + if (en_dev->ops->is_drs_sec_enable(en_dev->parent)) { + zxdh_sec_vqs_uninit(netdev, + ZXDH_SEC_QUEUES_NUM(en_dev)); + } + zxdh_vqs_uninit(netdev); + zxdh_del_pf_fc(en_dev); + zxdh_add_status(netdev, ZXDH_CONFIG_S_FAILED); + dh_aux_eq_table_destroy(en_priv); + } + + dh_aux_events_uninit(en_priv); + dh_aux_eq_table_cleanup(en_priv); + if (!en_dev->fast_unload) + en_dev->ops->destroy_vqs_channels(en_dev->parent); + if (en_dev->ops->is_bond(en_dev->parent)) + en_dev->ops->release_port(en_dev->parent, en_dev->pannel_id); + mutex_destroy(&en_priv->lock); + free_netdev(netdev); + + card_num--; + if (card_num == 0) { + INIT_WORK(&work_cfg_del, del_cfg_shell_script); + queue_work(system_wq, &work_cfg_del); + } + + LOG_INFO("aux level completed\n"); + + return 0; +} + +static void zxdh_en_dev_shutdown(struct zxdh_auxiliary_device *adev) +{ + LOG_INFO("aux level start\n"); + zxdh_en_dev_remove(adev); + LOG_INFO("aux level completed\n"); +}; + +static const struct zxdh_auxiliary_device_id zxdh_en_dev_id_table[] = { + { + .name = ZXDH_PF_NAME "." ZXDH_EN_DEV_ID_NAME, + }, + {}, +}; + +MODULE_DEVICE_TABLE(zxdh_auxiliary, zxdh_en_dev_id_table); + +static struct zxdh_auxiliary_driver zxdh_en_driver = { + .name = ZXDH_EN_DEV_ID_NAME, + .probe = zxdh_en_dev_probe, + .remove = zxdh_en_dev_remove, + .shutdown = zxdh_en_dev_shutdown, + .id_table = zxdh_en_dev_id_table, +}; + +int32_t zxdh_en_driver_register(void) +{ + int32_t err = 0; + + if ((max_pairs == 0) || (max_pairs >= ZXDH_MAX_PAIRS_NUM)) { + LOG_INFO( + "max_pairs %u parameter is a invalid value, use the default value %u\n", + max_pairs, ZXDH_MQ_PAIRS_NUM); + max_pairs = ZXDH_MQ_PAIRS_NUM; + } + + err = zxdh_auxiliary_driver_register(&zxdh_en_driver); + if (err != 0) { + LOG_ERR("zxdh_auxiliary_driver_register failed: %d\n", err); + goto err_aux_register; + } + + err = dh_aux_msg_recv_func_register(); + if (err != 0) { + LOG_ERR("dh_aux_msg_recv_func_register failed: %d\n", err); + goto err_msg_recv_register; + } + + err = zxdh_tools_netlink_register(); + if (err != 0) { + LOG_ERR("zxdh_tools_msg_family register error failed: %d\n", + err); + goto err_netlink_register; + } + + zxdh_lag_lock_init(); + LOG_INFO("all driver insmod completed\n"); + + return 0; + +err_netlink_register: + dh_aux_msg_recv_func_unregister(); +err_msg_recv_register: + zxdh_auxiliary_driver_unregister(&zxdh_en_driver); +err_aux_register: + return err; +} + +void zxdh_en_driver_unregister(void) +{ + LOG_INFO("driver rmmod start\n"); + zxdh_lag_lock_deinit(); + zxdh_tools_netlink_unregister(); + dh_aux_msg_recv_func_unregister(); + zxdh_auxiliary_driver_unregister(&zxdh_en_driver); +} diff --git a/drivers/net/ethernet/dinghai/en_aux.h b/drivers/net/ethernet/dinghai/en_aux.h new file mode 100644 index 000000000000..bb6950938b8f --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_aux.h @@ -0,0 +1,806 @@ +#ifndef __ZXDH_EN_AUX_H__ +#define __ZXDH_EN_AUX_H__ + +#include "msg_common.h" +#include "zxdh_tools/zxdh_tools_ioctl.h" +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include +#include +#include "./en_aux/queue.h" +#include "./en_aux/en_aux_cmd.h" +#include "./en_pf.h" +#include "./en_aux/dcbnl/en_dcbnl.h" +#include "./en_np/driver/include/dpp_drv_hash.h" +#include "./en_pf/msg_func.h" +#include "./en_ethtool/ethtool.h" + +#define MAX_VLAN_ID (4095) +#define MAX_QOS_ID (7) +#define VLAN_BITMAP_LENGTH (MAX_VLAN_ID + 1) +#define VLAN_BITMAP_BYTE_SIZE (512) +#define BIT_NUM_PER_BYTE (8) + +#define PF_AC_MASK 0x800 +#define FILTER_MAC 0xAA +#define UNFILTER_MAC 0xFF + +#define AUX_INIT_INCOMPLETED 0 +#define AUX_INIT_COMPLETED 1 + +#define IS_DELAY_STATISTICS_PKT 0 +#define IS_NOT_DELAY_STATICTICS_PKT 1 + +#define ADD_IP6MAC 1 +#define DEL_IP6MAC 2 + +#define WAKE_MAGIC (1 << 5) + +/* IPv6 MAC work data structure */ +struct zxdh_ip6mac_work_data { + uint32_t addr6[4]; /* IPv6 address */ + uint8_t ip6mac[ETH_ALEN]; /* MAC address */ +}; + +/* IPv6 MAC work item - each work item has its own data */ +struct zxdh_ip6mac_work_item { + struct work_struct work; + struct zxdh_en_device *en_dev; + struct zxdh_ip6mac_work_data data; +}; + +extern const uint8_t BOND_MCAST_ADDR[ETH_ALEN]; + +#define ZXDH_AUX_INIT_COMP_CHECK(en_dev) \ + do { \ + if (en_dev->init_comp_flag != AUX_INIT_COMPLETED) { \ + return; \ + } \ + } while (0) + +typedef int (*zxdh_feature_handler)(struct zxdh_en_device *en_dev, bool enable); + +extern uint32_t max_pairs; + +struct zxdh_rdma_if; +struct zxdh_en_if; +struct zxdh_sec_if; + +struct zxdh_en_container { + struct zxdh_auxiliary_device adev; + struct zxdh_rdma_dev_info *rdma_infos; + struct zxdh_rdma_if *rdma_ops; + struct zxdh_en_if *ops; + struct dh_core_dev *parent; + int32_t aux_id; + struct zxdh_sec_if *sec_ops; + void *auxiliary_ops[17]; //max support 20 auxiliary devices +}; + +struct zxdh_en_queue_stats { + uint64_t q_rx_pkts; + uint64_t q_tx_pkts; + uint64_t q_rx_bytes; + uint64_t q_tx_bytes; + uint64_t q_tx_stopped; + uint64_t q_tx_wake; + uint64_t q_tx_dropped; +}; + +struct zxdh_en_netdev_stats { + uint64_t rx_packets; + uint64_t tx_packets; + uint64_t rx_bytes; + uint64_t tx_bytes; + uint64_t tx_queue_wake; + uint64_t tx_queue_stopped; + uint64_t tx_queue_dropped; + uint64_t rx_removed_vlan_packets; + uint64_t tx_added_vlan_packets; + uint64_t rx_csum_offload_good; + uint64_t rx_csum_offload_error; +}; + +struct zxdh_en_vport_vqm_stats { + uint64_t rx_vport_packets; + uint64_t tx_vport_packets; + uint64_t rx_vport_bytes; + uint64_t tx_vport_bytes; + uint64_t rx_vport_dropped; +}; + +struct zxdh_en_vport_dtp_stats { + uint64_t rx_lro_packets; + uint64_t rx_udp_csum_fail_packets; + uint64_t tx_udp_csum_fail_packets; + uint64_t rx_tcp_csum_fail_packets; + uint64_t tx_tcp_csum_fail_packets; + uint64_t rx_ipv4_csum_fail_packets; + uint64_t tx_ipv4_csum_fail_packets; +}; + +struct zxdh_en_vport_stats { + struct zxdh_en_vport_vqm_stats vqm_stats; + struct zxdh_en_vport_np_stats np_stats; + struct zxdh_en_vport_dtp_stats dtp_stats; +}; + +struct zxdh_en_phy_stats { + uint64_t rx_packets_phy; + uint64_t tx_packets_phy; + uint64_t rx_bytes_phy; + uint64_t tx_bytes_phy; + uint64_t rx_error_phy; + uint64_t tx_error_phy; + uint64_t rx_drop_phy; + uint64_t tx_drop_phy; + uint64_t rx_good_bytes_phy; + uint64_t tx_good_bytes_phy; + uint64_t rx_unicast_phy; + uint64_t tx_unicast_phy; + uint64_t rx_multicast_phy; + uint64_t tx_multicast_phy; + uint64_t rx_broadcast_phy; + uint64_t tx_broadcast_phy; + uint64_t rx_under64_drop; + uint64_t rx_undersize_phy; + uint64_t rx_size_64_phy; + uint64_t rx_size_65_127; + uint64_t rx_size_128_255; + uint64_t rx_size_256_511; + uint64_t rx_size_512_1023; + uint64_t rx_size_1024_1518; + uint64_t rx_size_1519_mru; + uint64_t rx_oversize_phy; + uint64_t tx_undersize_phy; + uint64_t tx_size_64_phy; + uint64_t tx_size_65_127; + uint64_t tx_size_128_255; + uint64_t tx_size_256_511; + uint64_t tx_size_512_1023; + uint64_t tx_size_1024_1518; + uint64_t tx_size_1519_mtu; + uint64_t tx_oversize_phy; + uint64_t rx_pause_phy; + uint64_t tx_pause_phy; + uint64_t rx_crc_errors; + uint64_t tx_crc_errors; + uint64_t rx_mac_control_phy; + uint64_t tx_mac_control_phy; + uint64_t rx_fragment_phy; + uint64_t tx_fragment_phy; + uint64_t rx_jabber_phy; + uint64_t tx_jabber_phy; + uint64_t rx_vlan_phy; + uint64_t tx_vlan_phy; + uint64_t rx_eee_phy; + uint64_t tx_eee_phy; +} __attribute__((packed)); + +struct zxdh_en_udp_phy_stats { + uint64_t rx_arn_phy; + uint64_t tx_psn_phy; + uint64_t rx_psn_phy; + uint64_t tx_psn_ack_phy; + uint64_t rx_psn_ack_phy; +} __attribute__((packed)); + +struct zxdh_en_spm_stats { + uint64_t rx_total; + uint64_t rx_pause; + uint64_t rx_unicast; + uint64_t rx_multicast; + uint64_t rx_broadcast; + uint64_t rx_vlan; + uint64_t rx_size_64; + uint64_t rx_size_65_127; + uint64_t rx_size_128_255; + uint64_t rx_size_256_511; + uint64_t rx_size_512_1023; + uint64_t rx_size_1024_1518; + uint64_t rx_size_1519_mru; + uint64_t rx_undersize; + uint64_t rx_oversize; + uint64_t rx_fragment; + uint64_t rx_jabber; + uint64_t rx_control; + uint64_t rx_eee; + + uint64_t tx_total; + uint64_t tx_pause; + uint64_t tx_unicast; + uint64_t tx_multicast; + uint64_t tx_broadcast; + uint64_t tx_vlan; + uint64_t tx_size_64; + uint64_t tx_size_65_127; + uint64_t tx_size_128_255; + uint64_t tx_size_256_511; + uint64_t tx_size_512_1023; + uint64_t tx_size_1024_1518; + uint64_t tx_size_1519_mtu; + uint64_t tx_undersize; + uint64_t tx_oversize; + uint64_t tx_fragment; + uint64_t tx_jabber; + uint64_t tx_control; + uint64_t tx_eee; + + uint64_t rx_error; + uint64_t rx_fcs_error; + uint64_t rx_drop; + + uint64_t tx_error; + uint64_t tx_fcs_error; + uint64_t tx_drop; +} __attribute__((packed)); + +struct zxdh_en_spm_bytes { + uint64_t rx_total_bytes; + uint64_t rx_good_bytes; + + uint64_t tx_total_bytes; + uint64_t tx_good_bytes; +} __attribute__((packed)); + +struct zxdh_en_hw_stats { + struct zxdh_en_netdev_stats netdev_stats; + struct zxdh_en_vport_stats vport_stats; + struct zxdh_en_phy_stats phy_stats; + struct zxdh_en_udp_phy_stats udp_stats; + struct zxdh_en_queue_stats *q_stats; +}; + +struct zxdh_vlan_dev { + uint8_t qos; + uint8_t rsv; + uint16_t protcol; + uint16_t vlan_id; +}; + +/* drs sec */ +typedef struct { + uint64_t SecVAddr; /*每个设备的sec私有内存的虚拟基地址*/ + uint64_t SecPAddr; /*每个设备的sec私有内存的物理基地址*/ + uint32_t SecMemSize; /*每个设备的sec私有内存的大小*/ +} zxdh_sec_pri; + +struct zxdh_sec_info { + dma_addr_t ring_dma_addr; + dma_addr_t driver_event_dma_addr; + dma_addr_t device_event_dma_addr; + struct vring_packed_desc *desc; + struct vring_packed_desc_event *driver; + struct vring_packed_desc_event *device; + size_t ring_size_in_bytes; + size_t event_size_in_bytes; + + uint16_t desc_num; + uint8_t queue_pairs; + uint32_t phy_index; + uint64_t notify_phy_addr; + + uint64_t bar0_phy_addr; + uint64_t bar0_vir_addr; + uint64_t bar0_size; + uint16_t pcie_id; + struct pci_dev *pdev; +}; + +struct zxdh_ethtool_table { + struct ethtool_rx_flow_spec rfs; + uint32_t loc; + uint32_t index; + bool is_used; +}; + +struct zxdh_flow_steering { + struct zxdh_ethtool_table ethtool_fs[ETHTOOL_FD_MAX_NUM]; + uint32_t tot_num_rules; +}; +struct en_device_config { + uint16_t rx_queue_size; + uint16_t tx_queue_size; + uint16_t curr_combined; + uint32_t hash_mode; + uint8_t hash_func; + uint8_t dev_addr[6]; + uint32_t queue_map[ZXDH_INDIR_RQT_SIZE]; + uint8_t vlan_trunk_bitmap[VLAN_BITMAP_BYTE_SIZE]; + struct recover_mac pf_recover_mac; +}; + +struct zxdh_pkt_file_info { + uint8_t *pkt_addr_array; + uint32_t pkt_buf_len; +}; + +struct zxdh_pkt_save_file { + struct file *log_file; + uint8_t enable_pkt_num_mode; + uint32_t pkt_file_size; + uint32_t pkt_set_count; + uint32_t is_stop; + uint32_t pkt_rbuf_idx; + uint32_t pkt_ubuf_idx; + uint32_t pkt_cur_num; + char file_path[150]; + loff_t file_pos; + size_t total_written_bytes; +}; + +struct zxdh_en_device { + struct dh_core_dev *parent; + struct net_device *netdev; + struct device *dmadev; + void *msgq_dev; + struct zxdh_en_if *ops; + struct zxdh_en_hw_stats hw_stats; + struct zxdh_en_vport_stats pre_stats; + struct zxdh_vlan_dev vlan_dev; + + uint32_t device_id; + uint32_t vendor_id; + + uint64_t driver_feature; + uint64_t device_feature; + uint64_t guest_feature; + + struct list_head vqs_list; + spinlock_t vqs_list_lock; + uint32_t indir_rqt[ZXDH_INDIR_RQT_SIZE]; + + int32_t channels_num; + struct zxdh_flow_steering fs; + + /* a list of queues so we can dispatch IRQs */ + spinlock_t lock; + struct list_head virtqueues; + /* array of all queues for house-keeping */ + struct zxdh_pci_vq_info **vqs; + + struct send_queue *sq; + struct receive_queue *rq; + uint32_t status; + + /* Max # of queue pairs supported by the device */ + uint16_t curr_queue_pairs; + uint16_t max_queue_pairs; /* max_vq_pairs + msg_qpairs */ + uint16_t max_vq_pairs; + uint16_t xdp_queue_pairs; + + uint16_t old_queue_pairs; /* for selq flow_map attrbuite group */ + + bool xdp_enabled; + + enum zxdh_device_state device_state; + bool need_msgq; + /* Host can handle any s/g split between our header and packet data */ + bool any_header_sg; + bool mergeable_rx_bufs; + /* Packet custom queue header size */ + uint8_t hdr_len; + uint8_t hdr_1588_len; + /* Work struct for refilling if we run low on memory. */ + struct delayed_work refill; + + /* CPU hotplug instances for online & dead */ + struct hlist_node node; + struct hlist_node node_dead; + bool fast_unload; + bool vqmb_port_ctl; + + bool dtp_drs_offload; + + uint32_t phy_index[ZXDH_MAX_QUEUES_NUM]; + + uint8_t link_check_bit; + uint8_t pannel_id; + uint8_t rsv[2]; + + uint16_t ep_bdf; + uint64_t spec_sbdf; /* special bdf, 用于rdma持久化配置文件路径创建 */ + uint16_t pcie_id; + /* vfunc_active */ + uint16_t slot_id; + uint16_t vport; + uint8_t phy_port; + uint8_t panel_id; + uint8_t hash_search_idx; + + uint32_t link_speed; + bool link_up; + uint8_t duplex; + + uint32_t speed; + uint32_t curr_speed_modes; + uint32_t autoneg_enable; + uint32_t supported_speed_modes; + uint32_t advertising_speed_modes; + + bool promisc_enabled; + bool allmulti_enabled; + uint32_t pflags; + uint8_t clock_no; + uint32_t msglevel; + uint32_t wol_support; + uint32_t wolopts; + uint8_t fw_version[ETHTOOL_FWVERS_LEN]; + uint8_t fw_version_len; + uint32_t vf_1588_call_np_num; + uint32_t ptp_tc_enable_opt; + uint32_t delay_statistics_enable; + + struct work_struct vf_link_info_update_work; + struct work_struct link_info_irq_update_vf_work; + struct work_struct link_info_irq_process_work; + struct work_struct link_info_irq_update_np_work; + struct work_struct rx_mode_set_work; + struct work_struct plug_adev_work; + struct work_struct unplug_adev_work; + struct work_struct smart_nic_copy_work; + + uint8_t curr_unicast_num; + uint8_t curr_multicast_num; + struct work_struct pf_notify_vf_link_state_work; + struct work_struct pf2vf_msg_proc_work; + struct work_struct service_task; + struct work_struct service_riscv_task; + struct timer_list service_timer; + struct timer_list service_riscv_timer; + struct work_struct riscv2aux_msg_proc_work; + struct work_struct capture_save_file_work; + /* QoS DCB */ + struct zxdh_dcbnl_para dcb_para; + struct zxdh_dcbnl_ets_switch_info ets_info; + + /* SEC */ + zxdh_sec_pri drs_sec_pri; + struct zxdh_sec_info *sec_info; + uint32_t sec_phy_index[256]; + resource_size_t notify_phy_addr; + +#ifdef ZXDH_CONFIG_SPECIAL_SQ_EN + DECLARE_HASHTABLE(flow_map_hash, ilog2(ZXDH_MAX_PAIRS_NUM)); +#endif + /* initialization completion flag */ + uint8_t init_comp_flag; + + struct notifier_block ipv6_notifier; + struct notifier_block vxlan_notifier; + + /* just for hardware bond */ + bool is_hwbond; + bool is_primary_port; + bool is_rdma_aux_plug; + struct zxdh_bond_device *hardware_bond; + uint64_t last_tx_vport_ssvpc_packets; + /* link-down-on-close */ + bool link_down_on_close; + bool enable_1588; +#ifdef CONFIG_INET + bool local_lb_enable; +#endif + unsigned long state; + uint8_t pkt_dev_flag; + uint8_t pkt_cap_switch; + uint8_t pkt_save_file_flag; + uint8_t pkt_file_num; + uint8_t pkt_addr_marked; + uint32_t pkt_dev_speed; + struct zxdh_pkt_file_info *pkt_file_info; + struct zxdh_pkt_save_file pkt_save_file; + struct workqueue_struct *pkt_wq; + + struct en_device_config eth_config; + struct sockaddr last_np_mac_addr; + + uint32_t board_type; + bool is_multi_ep; + bool quick_remove; + bool time_sync_done; +}; + +struct zxdh_en_priv { + struct zxdh_en_device edev; + struct mutex lock; + struct dh_eq_table eq_table; + struct dh_events *events; +}; + +typedef struct { + uint8_t mac_addr[ETH_ALEN]; +} MacAddress; + +typedef struct { + uint32_t mac_num; + uint32_t target_vf; + union { + uint32_t unicast_add_count; + uint32_t unicast_del_count; + }; + union { + uint32_t multicast_add_count; + uint32_t multicast_del_count; + }; + MacAddress unicast_mac_array[128]; /* 用于存储多个单播MAC地址的链表 */ + MacAddress multicast_mac_array[32]; /* 用于存储多个组播MAC地址的链表 */ +} mac_config_info; + +typedef struct { + uint32_t src_vf; /* 被转移的vf */ + uint32_t dst_vf; /* 转移的vf*/ +} mac_transfer_info; + +struct dhtool_set_vf_mac_msg { + enum { MAC_ADD, + MAC_DEL, + MAC_TRANSFER } action; /* 配置mac动作 */ + union { + mac_transfer_info mac_transfer; + mac_config_info mac_config; + }; +}; + +typedef enum { + MAC_CONFIG_SUCCESS = 0, /* 配置mac成功 */ + MAC_CONFIG_FAILED = 1, /* 配置mac失败*/ + MAC_ALREADY_EXISTS_IN_OTHER_VF = 2, /* mac已经存在 */ + UNICAST_MAC_NUM_BEYOND_MAXNUM = 3, /* 单播mac数量超过上限*/ + MULTICAST_MAC_NUM_BEYOND_MAXNUM = 4, /* 组播mac数量超过上限*/ + UNICAST_MAC_NOT_EXISTS = 5, /* 单播mac不存在 */ + MULTICAST_MAC_NOT_EXISTS = 6, /* 组播mac不存在 */ + UNICAST_MAC_TRANSFER_FAILED = 7, /* 单播迁移异常 */ + MULTICAST_MAC_TRANSFER_FAILED = 8, /* 组播迁移异常 */ + VF_ERROR = 9, /* vf异常 */ +} VF_MAC_SET_RET; + +struct padded_zxdh_net_hdr { + struct zxdh_net_hdr_tx hdr; + /* + * hdr is in a separate sg buffer, and data sg buffer shares same page + * with this header sg. This padding makes next sg 16 byte aligned + * after the header. + */ + char padding[4]; +}; + +#define DEV_UNICAST_MAX_NUM 128 /* 每个PF/VF存储的单播mac转发表上限 */ +#define DEV_MULTICAST_MAX_NUM 32 /* 每个PF/VF存储的组播mac转发表上限 */ +#define UNICAST_MAX_NUM (16 * 257) +#define MULTICAST_MAX_NUM (4 * 257) + +#define EXTRACT_BUS(bdf) ((bdf >> 8) & 0xff) /* 从BDF号中提取bus */ +#define EXTRACT_DEVICE(bdf) ((bdf >> 3) & 0x1f) /* 从BDF号中提取device */ +#define DEVICE_RANGE 31 /* 每个bus下面可以挂载的vf设备数量 */ + +typedef struct mac_queue { + uint8_t addr[DEV_MULTICAST_MAX_NUM][ETH_ALEN]; + uint8_t count; +} mac_queue; + +int32_t dh_aux_eq_table_init(struct zxdh_en_priv *en_priv); +void dh_aux_eq_table_cleanup(struct zxdh_en_priv *en_priv); +int32_t zxdh_ip6mac_add(struct zxdh_en_device *en_dev, const uint32_t *addr6, + const uint8_t *ip6mac); +int32_t zxdh_ip6mac_del(struct zxdh_en_device *en_dev, const uint32_t *addr6, + const uint8_t *ip6mac); +int32_t zxdh_ip6mac_del_safe(struct zxdh_en_device *en_dev, + const uint32_t *addr6, const uint8_t *ip6mac); +void zxdh_ip6mac_del_work_handler(struct work_struct *work); +int32_t zxdh_ip6mac_add_safe(struct zxdh_en_device *en_dev, + const uint32_t *addr6, const uint8_t *ip6mac); +void zxdh_ip6mac_add_work_handler(struct work_struct *work); +int32_t zxdh_ip4mac_add(struct zxdh_en_device *en_dev, const uint8_t *ip4mac, + uint8_t action); +int32_t zxdh_ip4mac_del(struct zxdh_en_device *en_dev, const uint8_t *ip4mac, + uint8_t action); +#ifdef HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED +uint16_t zxdh_en_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev); +#else +uint16_t zxdh_en_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback); +#endif +void zxdh_flow_map_cleanup(struct zxdh_en_priv *en_priv); +int32_t zxdh_flow_map_init(struct zxdh_en_priv *en_priv); +int32_t zxdh_flow_map_update_sysfs(struct net_device *netdev); +void zxdh_netdev_addr_set(struct net_device *dev, const u8 *addr); +extern void zxdh_netdev_features_over_dtp(struct net_device *netdev); +int32_t set_feature_rxhash(struct zxdh_en_device *en_dev, bool enable); +int32_t set_feature_ntuple(struct zxdh_en_device *en_dev, bool enable); +int32_t zxdh_pf_add_vf_unicast_mac(struct zxdh_en_device *en_dev, + struct dhtool_set_vf_mac_msg *msg); +int32_t zxdh_pf_del_vf_unicast_mac(struct zxdh_en_device *en_dev, + struct dhtool_set_vf_mac_msg *msg); +int32_t zxdh_pf_add_vf_multicast_mac(struct zxdh_en_device *en_dev, + struct dhtool_set_vf_mac_msg *msg); +int32_t zxdh_pf_del_vf_multicast_mac(struct zxdh_en_device *en_dev, + struct dhtool_set_vf_mac_msg *msg); +int32_t zxdh_pf_transfer_vf_mac(struct zxdh_en_device *en_dev, uint32_t src_vf, + uint32_t dst_vf); +int32_t zxdh_pflags_update(struct net_device *netdev, uint8_t flag, + bool enable); +int32_t zxdh_port_enable(struct zxdh_en_device *en_dev, bool enable); +int32_t zxdh_en_sync_features(struct zxdh_en_device *en_dev, + netdev_features_t want_features); +int32_t zxdh_en_config_mtu_to_np(struct net_device *netdev, int32_t mtu_value); +int32_t zxdh_vlan_trunk_recover(DPP_PF_INFO_T *pf_info, + uint8_t *vlan_trunk_bitmap); + +#define ZXDH_DEVICE_STATE_CHECK_RTN(en_dev) \ + do { \ + if (en_dev->device_state == ZXDH_DEVICE_STATE_INTERNAL_ERROR) \ + return -ENXIO; \ + } while (0) + +struct zxdh_rdma_if { + void *(*get_rdma_netdev)(struct dh_core_dev *dh_dev); +}; + +struct zxdh_sec_if { + void *(*get_sec_info)(struct dh_core_dev *dh_dev); +}; + +struct zxdh_en_if { + uint16_t (*get_channels_num)(struct dh_core_dev *dh_dev); + int32_t (*create_vqs_channels)(struct dh_core_dev *dh_dev, void *data); + void (*destroy_vqs_channels)(struct dh_core_dev *dh_dev); + void (*switch_vqs_channel)(struct dh_core_dev *dh_dev, int32_t channel, + int32_t op); + int32_t (*vqs_channel_bind_handler)(struct dh_core_dev *dh_dev, + int32_t vqs_channel_num, + struct dh_vq_handler *handler); + void (*vqs_channel_unbind_handler)(struct dh_core_dev *dh_dev, + int32_t vqs_channel_num); + int32_t (*vq_bind_channel)(struct dh_core_dev *dh_dev, + int32_t channel_num, int32_t queue_index, + uint16_t vq_idx); + void (*vq_unbind_channel)(struct dh_core_dev *dh_dev, + int32_t queue_index); + int32_t (*vqs_bind_eqs)(struct dh_core_dev *dh_dev, + int32_t vqs_channel_num, + struct list_head *vq_node); + void (*vqs_unbind_eqs)(struct dh_core_dev *dh_dev, + int32_t vqs_channel_num); + void __iomem *(*vp_modern_map_vq_notify)(struct dh_core_dev *dh_dev, + uint32_t index, + resource_size_t *pa); + void (*vp_modern_unmap_vq_notify)(struct dh_core_dev *dh_dev, + void *priv); + int32_t (*get_vq_lock)(struct dh_core_dev *dh_dev); + int32_t (*find_valid_vqs)(struct dh_core_dev *dh_dev, uint16_t vqs_cnt, + uint32_t vq_index[]); + int32_t (*write_vqs_bit)(struct dh_core_dev *dh_dev, uint16_t vqs_cnt, + uint32_t vq_index[]); + int32_t (*write_queue_tlb)(struct dh_core_dev *dh_dev, uint16_t vqs_cnt, + uint32_t vq_index[], bool need_msgq); + uint16_t (*get_fw_patch)(struct dh_core_dev *dh_dev); + int32_t (*release_vq_lock)(struct dh_core_dev *dh_dev); + void (*activate_phy_vq)(struct dh_core_dev *dh_dev, uint32_t phy_index, + int32_t queue_size, uint64_t desc_addr, + uint64_t driver_addr, uint64_t device_addr); + void (*de_activate_phy_vq)(struct dh_core_dev *dh_dev, + uint32_t phy_index); + void (*set_status)(struct dh_core_dev *dh_dev, uint8_t status); + uint8_t (*get_status)(struct dh_core_dev *dh_dev); + uint8_t (*get_cfg_gen)(struct dh_core_dev *dh_dev); + bool (*get_rp_link_status)(struct dh_core_dev *dh_dev); + void (*set_vf_mac)(struct dh_core_dev *dh_dev, uint8_t *mac, + int32_t vf_id); + void (*get_vf_mac)(struct dh_core_dev *dh_dev, uint8_t *mac, + int32_t vf_id); + void (*set_mac)(struct dh_core_dev *dh_dev, uint8_t *mac); + void (*get_mac)(struct dh_core_dev *dh_dev, uint8_t *mac); + uint64_t (*get_features)(struct dh_core_dev *dh_dev); + void (*set_features)(struct dh_core_dev *dh_dev, uint64_t features); + uint16_t (*get_queue_num)(struct dh_core_dev *dh_dev); + uint16_t (*get_queue_size)(struct dh_core_dev *dh_dev, uint32_t index); + void (*set_queue_size)(struct dh_core_dev *dh_dev, uint32_t index, + uint16_t size); + void (*set_queue_enable)(struct dh_core_dev *dh_dev, uint16_t index, + bool enable); + uint16_t (*get_epbdf)(struct dh_core_dev *dh_dev); + uint64_t (*get_spec_sbdf)(struct dh_core_dev *dh_dev); + bool (*is_multi_ep)(struct dh_core_dev *dh_dev); + uint16_t (*get_vport)(struct dh_core_dev *dh_dev); + uint16_t (*get_pcie_id)(struct dh_core_dev *dh_dev); + uint16_t (*get_slot_id)(struct dh_core_dev *dh_dev); + bool (*is_bond)(struct dh_core_dev *dh_dev); + bool (*is_upf)(struct dh_core_dev *dh_dev); + enum dh_coredev_type (*get_coredev_type)(struct dh_core_dev *dh_dev); + struct pci_dev *(*get_pdev)(struct dh_core_dev *dh_dev); + uint64_t (*get_bar_virt_addr)(struct dh_core_dev *dh_dev, + uint8_t bar_num); + uint64_t (*get_bar_phy_addr)(struct dh_core_dev *dh_dev, + uint8_t bar_num); + uint64_t (*get_bar_size)(struct dh_core_dev *dh_dev, uint8_t bar_num); + int32_t (*msg_send_cmd)(struct dh_core_dev *dh_dev, uint16_t module_id, + void *msg, void *ack, + struct zxdh_bar_extra_para *para); + int32_t (*async_eq_enable)(struct dh_core_dev *dh_dev, + struct dh_eq_async *eq, const char *name, + bool attach); + void (*aux_nh_attach)(struct dh_core_dev *dh_dev, struct dh_nb *nb, + bool attach); + struct zxdh_vf_item *(*get_vf_item)(struct dh_core_dev *dh_dev, + uint16_t vf_idx); + void (*set_pf_link_up)(struct dh_core_dev *dh_dev, bool link_up); + bool (*get_pf_link_up)(struct dh_core_dev *dh_dev); + void (*update_pf_link_info)(struct dh_core_dev *dh_dev, + struct link_info_struct *link_info_val); + int32_t (*get_pf_drv_msg)(struct dh_core_dev *dh_dev, + uint8_t *drv_version, + uint8_t *drv_version_len); + void (*set_vepa)(struct dh_core_dev *dh_dev, bool setting); + bool (*get_vepa)(struct dh_core_dev *dh_dev); + void (*set_bond_num)(struct dh_core_dev *dh_dev, bool add); + bool (*if_init)(struct dh_core_dev *dh_dev); + int32_t (*request_port)(struct dh_core_dev *dh_dev, void *data); + int32_t (*release_port)(struct dh_core_dev *dh_dev, uint32_t port_id); + void (*get_link_info_from_vqm)(struct dh_core_dev *dh_dev, + uint8_t *link_up); + void (*set_vf_link_info)(struct dh_core_dev *dh_dev, uint16_t vf_idx, + uint8_t link_up); + bool (*get_vf_is_probe)(struct dh_core_dev *dh_dev, uint16_t vf_idx); + void (*set_pf_phy_port)(struct dh_core_dev *dh_dev, uint8_t phy_port); + void (*set_rdma_netdev)(struct dh_core_dev *dh_dev, void *data); + uint8_t (*get_pf_phy_port)(struct dh_core_dev *dh_dev); + void (*set_init_comp_flag)(struct dh_core_dev *dh_dev, uint8_t flag); + struct zxdh_ipv6_mac_tbl *(*get_ip6mac_tbl)(struct dh_core_dev *dh_dev); + struct device *(*get_dma_dev)(struct dh_core_dev *dh_dev); + void (*unplug_adev)(struct dh_core_dev *dh_dev, + enum AUX_DEVICE_TYPE adev_type); + int32_t (*plug_adev)(struct dh_core_dev *dh_dev, + enum AUX_DEVICE_TYPE adev_type); + bool (*is_nic)(struct dh_core_dev *dh_dev); + bool (*is_special_bond)(struct dh_core_dev *dh_dev); + uint8_t (*get_qpairs)(struct dh_core_dev *dh_dev); + int32_t (*eth_config_recover)(struct net_device *netdev); + void (*eth_config_show)(struct net_device *netdev); + int32_t (*events_call_chain)(struct dh_core_dev *dh_dev, + unsigned long type, void *data); + int32_t (*get_cpl_timeout_if_mask)(struct dh_core_dev *dh_dev); + int32_t (*set_cpl_timeout_mask)(struct dh_core_dev *dh_dev, + uint32_t mask); + int32_t (*get_hp_irq_ctrl_status)(struct dh_core_dev *dh_dev); + int32_t (*set_hp_irq_ctrl_status)(struct dh_core_dev *dh_dev, + uint32_t status); + uint32_t (*get_dev_type)(struct dh_core_dev *dh_dev); + bool (*if_suport_np_ext_stats)(struct dh_core_dev *dh_dev); + struct zxdh_np_ext_stats *(*get_np_ext_stats)( + struct dh_core_dev *dh_dev, uint8_t panel_id); + void (*set_sec_info)(struct dh_core_dev *dh_dev, void *data); + bool (*is_drs_sec_enable)(struct dh_core_dev *dh_dev); + bool (*is_fw_feature_support)(struct dh_core_dev *dh_dev, + uint32_t feature); + bool (*is_pf_rate_enable)(struct dh_core_dev *dh_dev, + uint32_t *pf_fc_val); + uint16_t (*get_ovs_pf_vfid)(struct dh_core_dev *dh_dev); + uint8_t (*get_board_type)(struct dh_core_dev *dh_dev); + bool (*is_hwbond)(struct dh_core_dev *dh_dev, bool is_hwbond, + bool update_pf); + bool (*is_rdma_aux_plug)(struct dh_core_dev *dh_dev, + bool is_rdma_aux_plug, bool update_pf); + bool (*is_primary_port)(struct dh_core_dev *dh_dev, + bool is_primary_port, bool update_pf); + void (*optim_hardware_bond_time)(struct dh_core_dev *dh_dev, + bool enable); + int32_t (*update_hb_file_val)(struct dh_core_dev *dh_dev, + uint64_t spec_sbdf, const char *file_name, + bool flag); + bool (*is_rdma_enable)(struct dh_core_dev *dh_dev); +}; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl.c b/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl.c new file mode 100644 index 000000000000..292ce4819772 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl.c @@ -0,0 +1,854 @@ +//#include +#include "../../en_aux.h" +#include "en_dcbnl.h" +#include "en_np/qos/include/dpp_drv_qos.h" +#include "en_aux/en_aux_cmd.h" +#include "en_dcbnl_api.h" + +uint32_t g_maxrate_num; +static int zxdh_dcbnl_ieee_getets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + uint32_t tc = 0; + uint32_t j = 0; + + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + LOG_ERR("zxdh_dcbnl_ieee_getets: coredev type is not a PF"); + return -EOPNOTSUPP; + } + + ets->willing = 0; + + ets->ets_cap = ZXDH_DCBNL_MAX_TRAFFIC_CLASS; + + memcpy(ets->tc_tsa, en_dev->dcb_para.ets_cfg.tc_tsa, + sizeof(ets->tc_tsa)); + memcpy(ets->tc_tx_bw, en_dev->dcb_para.ets_cfg.tc_tx_bw, + sizeof(ets->tc_tx_bw)); + memcpy(ets->prio_tc, en_dev->dcb_para.ets_cfg.prio_tc, + sizeof(ets->prio_tc)); + + for (tc = 0; tc < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; tc++) { + if (ets->tc_tsa[tc] != IEEE_8021QAZ_TSA_ETS) { + ets->tc_tx_bw[tc] = 0; + } + } + + /* debug */ + for (j = 0; j < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; j++) { + LOG_INFO( + " idx:%d, ets->tc_tsa:%d, ets->tc_tx_bw:%d, ets->prio_tc:%d \n", + j, ets->tc_tsa[j], ets->tc_tx_bw[j], ets->prio_tc[j]); + } + + return 0; +} + +static int zxdh_dcbnl_check_ets_maxtc(struct ieee_ets *ets) +{ + uint32_t i; + + for (i = 0; i < ZXDH_DCBNL_MAX_PRIORITY; i++) { + if (ets->prio_tc[i] >= ZXDH_DCBNL_MAX_TRAFFIC_CLASS) { + LOG_ERR("dcbnl_check_ets: Failed! TC value greater than max(%d)\n", + ZXDH_DCBNL_MAX_TRAFFIC_CLASS); + return 1; + } + } + return 0; +} + +static int zxdh_dcbnl_check_ets_tcbw(struct ieee_ets *ets) +{ + bool have_ets_tc = false; + uint32_t bw_sum = 0; + uint32_t i; + + for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; i++) { + if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) { + have_ets_tc = true; + bw_sum += ets->tc_tx_bw[i]; + } + } + + if (have_ets_tc && ((bw_sum != 100) && (bw_sum != 0))) { + LOG_ERR("dcbnl_check_ets_tcbw: Failed! ETS BW sum is illegal\n"); + return 1; + } + + return 0; +} + +static int zxdh_dcbnl_check_ets_para(struct ieee_ets *ets) +{ + uint32_t err = 0; + + err = zxdh_dcbnl_check_ets_maxtc(ets); + if (err) { + return -EINVAL; + } + + err = zxdh_dcbnl_check_ets_tcbw(ets); + if (err) { + return -EINVAL; + } + LOG_INFO(" end \n"); + return 0; +} + +static int zxdh_dcbnl_ieee_divide_tc_type(struct ieee_ets *ets, + uint8_t *tc_type) +{ + uint32_t i; + + for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; i++) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_ETS: + tc_type[i] = ets->tc_tx_bw[i] ? + ZXDH_DCBNL_ETS_TC : + ZXDH_DCBNL_ZEROBW_ETS_TC; + break; + case IEEE_8021QAZ_TSA_STRICT: + tc_type[i] = ZXDH_DCBNL_STRICT_TC; + break; + case IEEE_8021QAZ_TSA_VENDOR: + tc_type[i] = ZXDH_DCBNL_VENDOR_TC; + break; + default: + tc_type[i] = ZXDH_DCBNL_STRICT_TC; + LOG_ERR("dcbnl: %d tsa error, change to strict \n", + ets->tc_tsa[i]); + break; + } + } + + return 0; +} + +static int zxdh_dcbnl_ieee_convert_tc_bw(struct ieee_ets *ets, uint8_t *tc_type, + uint8_t *tc_tx_bw) +{ + uint32_t i; + uint8_t zero_ets_bw = 0; + uint8_t zero_ets_num = 0; + + for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; i++) { + if (tc_type[i] == ZXDH_DCBNL_ZEROBW_ETS_TC) { + zero_ets_num++; + } + } + + if (zero_ets_num) { + zero_ets_bw = (uint8_t)ZXDH_DCBNL_MAX_BW_ALLOC / zero_ets_num; + } + + for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; i++) { + switch (tc_type[i]) { + case ZXDH_DCBNL_ZEROBW_ETS_TC: + tc_tx_bw[i] = zero_ets_bw; + break; + case ZXDH_DCBNL_ETS_TC: + tc_tx_bw[i] = ets->tc_tx_bw[i]; + break; + case ZXDH_DCBNL_STRICT_TC: + case ZXDH_DCBNL_VENDOR_TC: + tc_tx_bw[i] = ZXDH_DCBNL_MAX_BW_ALLOC; + break; + default: + break; + } + } + /* debug */ + LOG_INFO(" zero_ets_num:%d, zero_ets_bw:%d \n", zero_ets_num, + zero_ets_bw); + + return 0; +} + +static uint32_t zxdh_dcbnl_ieee_set_ets_para(struct zxdh_en_priv *en_priv, + struct ieee_ets *ets) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + uint8_t tc_type[ZXDH_DCBNL_MAX_TRAFFIC_CLASS]; + uint8_t tc_tx_bw[ZXDH_DCBNL_MAX_TRAFFIC_CLASS]; + uint32_t err = 0; + uint32_t j = 0; + + zxdh_dcbnl_ieee_divide_tc_type(ets, tc_type); + + zxdh_dcbnl_ieee_convert_tc_bw(ets, tc_type, tc_tx_bw); + + err = zxdh_dcbnl_set_tc_scheduling(en_priv, tc_type, tc_tx_bw); + if (err) { + LOG_ERR("set_tc_scheduling failed \n"); + return err; + } + + err = zxdh_dcbnl_set_ets_up_tc_map(en_priv, ets->prio_tc); + if (err) { + LOG_ERR("set_prio_tc_map failed \n"); + return err; + } + + memcpy(en_dev->dcb_para.ets_cfg.tc_tsa, ets->tc_tsa, + sizeof(ets->tc_tsa)); + memcpy(en_dev->dcb_para.ets_cfg.tc_tx_bw, ets->tc_tx_bw, + sizeof(ets->tc_tx_bw)); + memcpy(en_dev->dcb_para.ets_cfg.prio_tc, ets->prio_tc, + sizeof(ets->prio_tc)); + /* debug */ + for (j = 0; j < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; j++) { + LOG_DEBUG(" idx:%d, tc_tsa:%d, tc_tx_bw:%d, prio_tc:%d \n", j, + en_dev->dcb_para.ets_cfg.tc_tsa[j], + en_dev->dcb_para.ets_cfg.tc_tx_bw[j], + en_dev->dcb_para.ets_cfg.prio_tc[j]); + + LOG_DEBUG(" idx:%d, tc_type:%d, tc_tx_bw:%d \n", j, tc_type[j], + tc_tx_bw[j]); + } + + return 0; +} + +static int zxdh_dcbnl_ieee_setets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + uint32_t err; + uint32_t j = 0; + + /* debug */ + for (j = 0; j < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; j++) { + LOG_DEBUG( + " idx:%d, ets->tc_tsa:%d, ets->tc_tx_bw:%d, ets->prio_tc:%d \n", + j, ets->tc_tsa[j], ets->tc_tx_bw[j], ets->prio_tc[j]); + } + + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + LOG_ERR(" coredev type is not a PF"); + return -EOPNOTSUPP; + } + + err = zxdh_dcbnl_check_ets_para(ets); + if (err) { + return err; + } + + err = zxdh_dcbnl_ieee_set_ets_para(en_priv, ets); + if (err) { + return err; + } + + return 0; +} + +static int zxdh_dcbnl_ieee_getpfc(struct net_device *netdev, + struct ieee_pfc *pfc) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + uint32_t pfc_cur_mac_en = 0; + int32_t ret = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + /*获取端口pfc使能函数*/ + ret = zxdh_en_fc_mode_get(en_dev, &pfc_cur_mac_en); + + if (0 != ret) { + LOG_ERR("zxdh_port_pfc_enable_get failed"); + return ret; + } + + if (pfc_cur_mac_en == BIT(SPM_FC_PFC_FULL)) { + pfc->pfc_en = 255; + } else { + pfc->pfc_en = 0; + } + + /*ieee要的最多8个优先级,最大延迟为7*/ + pfc->pfc_cap = 8; + pfc->delay = 7; + + return ret; +} + +static int zxdh_dcbnl_ieee_setpfc(struct net_device *netdev, + struct ieee_pfc *pfc) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + uint32_t port_mac_en = 0; + uint32_t cur_port_mac_en = 0; + int32_t ret = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + if (pfc->pfc_en != 0 && pfc->pfc_en != 0xff) { + LOG_INFO("pfc->pfc_en input invalid: %d", pfc->pfc_en); + return EINVAL; + } + + ret = zxdh_en_fc_mode_get(en_dev, &cur_port_mac_en); + + if (pfc->pfc_en != 0) { + port_mac_en = BIT(SPM_FC_PFC_FULL); + } else if (cur_port_mac_en == BIT(SPM_FC_PFC_FULL)) { + port_mac_en = BIT(SPM_FC_NONE); + } else { + return 0; + } + + /*mac部分端口pfc使能*/ + ret |= zxdh_en_fc_mode_set(en_dev, port_mac_en); + + /*错误判断及打印*/ + if (0 != ret) { + LOG_ERR("zxdh_dcbnl_ieee_setpfc pfc_en:%c failed, %d", + pfc->pfc_en, ret); + } + + return ret; +} + +static int zxdh_dcbnl_ieee_getmaxrate(struct net_device *netdev, + struct ieee_maxrate *maxrate) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + uint32_t i = 0; + uint32_t j = 0; + + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + LOG_ERR("coredev type is not a PF"); + return -EOPNOTSUPP; + } + + for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; i++) { + if (ZXDH_DCBNL_MAXRATE_KBITPS <= + en_dev->dcb_para.tc_maxrate[i]) { + maxrate->tc_maxrate[i] = 0; //0 indicates unlimited + } else { + maxrate->tc_maxrate[i] = en_dev->dcb_para.tc_maxrate[i]; + } + } + + /* debug */ + for (j = 0; j < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; j++) { + LOG_DEBUG(" tc:%d,tc_maxrate:%lld \n", j, + maxrate->tc_maxrate[j]); + } + + return 0; +} + +static int zxdh_dcbnl_ieee_setmaxrate(struct net_device *netdev, + struct ieee_maxrate *maxrate) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev; + uint32_t maxrate_kbps[ZXDH_DCBNL_MAX_TRAFFIC_CLASS] = { 0 }; + uint32_t err, i; + uint32_t j = 0; + uint32_t tc_td_th[ZXDH_DCBNL_MAX_TRAFFIC_CLASS] = { + ZXDH_DCBNL_FLOW_TDTH_DEFAULT + }; + struct dh_core_dev *dh_dev; + struct zxdh_pf_device *pf_dev; + uint64_t oldmaxrate = 0; + + ZXDH_DCBNL_CHECK_POINT_RET(netdev, ZXDH_DCBNL_INVALID_PARA); + ZXDH_DCBNL_CHECK_POINT_RET(maxrate, ZXDH_DCBNL_INVALID_PARA); + + ZXDH_DCBNL_CHECK_POINT_RET(en_priv, ZXDH_DCBNL_INVALID_PARA); + en_dev = &en_priv->edev; + ZXDH_DCBNL_CHECK_POINT_RET(en_dev, ZXDH_DCBNL_INVALID_PARA); + + dh_dev = en_dev->parent; + ZXDH_DCBNL_CHECK_POINT_RET(dh_dev, ZXDH_DCBNL_INVALID_PARA); + + pf_dev = dh_core_priv(dh_dev->parent); + ZXDH_DCBNL_CHECK_POINT_RET(pf_dev, ZXDH_DCBNL_INVALID_PARA); + + ZXDH_DCBNL_CHECK_POINT_RET(en_dev->ops, ZXDH_DCBNL_INVALID_PARA); + ZXDH_DCBNL_CHECK_POINT_RET(en_dev->ops->get_coredev_type, + ZXDH_DCBNL_INVALID_PARA); + + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + LOG_ERR("coredev type is not a PF"); + return -EOPNOTSUPP; + } + + /* Values are 64 bits and specified in Kbps */ + for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; i++) { + oldmaxrate = en_dev->dcb_para.tc_maxrate[i]; + + if ((maxrate->tc_maxrate[i] == 0) || + (maxrate->tc_maxrate[i] >= ZXDH_DCBNL_MAXRATE_KBITPS)) { + if (pf_dev->board_type == DH_STDB || + pf_dev->board_type == DH_STDA || + pf_dev->board_type == DH_STDC) { + tc_td_th[i] = ZXDH_DCBNL_FLOW_TDTH_DEFAULT; + LOG_DEBUG(" old[%u]: maxrate %llu num %u\n", i, + oldmaxrate, g_maxrate_num); + if ((g_maxrate_num > 0) && (oldmaxrate > 0) && + (oldmaxrate < ZXDH_DCBNL_MAXRATE_KBITPS)) { + g_maxrate_num--; + err = zxdh_dcbnl_set_single_td_th( + en_priv, i, tc_td_th[i]); + ZXDH_DCBNL_CHECK_RET_RETURN(err); + } + } + maxrate_kbps[i] = ZXDH_DCBNL_MAXRATE_KBITPS; + + } else if (maxrate->tc_maxrate[i] <= + ZXDH_DCBNL_MINRATE_KBITPS) { + maxrate_kbps[i] = ZXDH_DCBNL_MINRATE_KBITPS; + } else { + maxrate_kbps[i] = (uint32_t)maxrate->tc_maxrate[i]; + + /* Started by AICoder, pid:810b4i0f5ee515a147a5098fc09d1b0ef8947567 */ + if (pf_dev->board_type == DH_STDB || + pf_dev->board_type == DH_STDA || + pf_dev->board_type == DH_STDC) { + LOG_DEBUG( + " old[%u]: maxrate %llu new %u num %u\n", + i, oldmaxrate, maxrate_kbps[i], + g_maxrate_num); + tc_td_th[i] = ZXDH_DCBNL_FLOW_TDTH_DEFAULT; + if ((oldmaxrate == 0) || + (oldmaxrate >= ZXDH_DCBNL_MAXRATE_KBITPS)) { + g_maxrate_num++; + } + + if (g_maxrate_num <= MAX_RATE_LIMITED_NUM) { + tc_td_th[i] = ZXDH_DCBNL_FLOW_TDTH_OPT; + err = zxdh_dcbnl_set_single_td_th( + en_priv, i, tc_td_th[i]); + ZXDH_DCBNL_CHECK_RET_RETURN(err); + } + } + /* Ended by AICoder, pid:810b4i0f5ee515a147a5098fc09d1b0ef8947567 */ + } + } + LOG_DEBUG(" g_maxrate_num %u\n", g_maxrate_num); + + tc_td_th[0] = ZXDH_DCBNL_FLOW_TDTH_UPF; + /* debug */ + for (j = 0; j < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; j++) { + LOG_DEBUG(" tc:%d,maxrate->tc_maxrate:%lld,maxrate_kbps:%d \n", + j, maxrate->tc_maxrate[j], maxrate_kbps[j]); + } + + err = zxdh_dcbnl_set_tc_maxrate(en_priv, maxrate_kbps); + if (err) { + return err; + } + + return 0; +} + +static int zxdh_dcbnl_ieee_setapp(struct net_device *netdev, + struct dcb_app *app) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct dcb_app app_old; + bool is_new = false; + int err = 0; + + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + LOG_ERR(" coredev type is not a PF"); + return -EOPNOTSUPP; + } + + if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) || + (app->protocol >= ZXDH_DCBNL_MAX_DSCP) || + (app->priority >= ZXDH_DCBNL_MAX_PRIORITY)) { + return -EINVAL; + } + /* Save the old entry info */ + app_old.selector = IEEE_8021QAZ_APP_SEL_DSCP; + app_old.protocol = app->protocol; + app_old.priority = en_dev->dcb_para.dscp2prio[app->protocol]; + + LOG_INFO(" protocol:%d, priority:%d \n", app->protocol, app->priority); + + if (!en_dev->dcb_para.dscp_app_num) { + err = zxdh_dcbnl_set_ets_trust(en_priv, + ZXDH_DCBNL_ETS_TRUST_DSCP); + if (err) { + return err; + } + } + + if (app->priority != en_dev->dcb_para.dscp2prio[app->protocol]) { + err = zxdh_dcbnl_set_dscp2prio(en_priv, app->protocol, + app->priority); + if (err) { + zxdh_dcbnl_set_ets_trust(en_priv, + ZXDH_DCBNL_ETS_TRUST_PCP); + return err; + } + } + + /* Delete the old entry if exists */ + err = dcb_ieee_delapp(netdev, &app_old); + if (err) { + is_new = true; + } + /* Add new entry and update counter */ + err = dcb_ieee_setapp(netdev, app); + if (err) { + return err; + } + if (is_new) { + en_dev->dcb_para.dscp_app_num++; + } + LOG_INFO(" dscp_app_num:%d \n", en_dev->dcb_para.dscp_app_num); + + return err; +} + +static int zxdh_dcbnl_ieee_delapp(struct net_device *netdev, + struct dcb_app *app) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + int err = 0; + + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + LOG_ERR("zxdh_dcbnl_ieee_delapp coredev type is not a PF"); + return -EOPNOTSUPP; + } + + if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) || + (app->protocol >= ZXDH_DCBNL_MAX_DSCP)) { + return -EINVAL; + } + + if (!en_dev->dcb_para.dscp_app_num) { + return -ENOENT; + } + + if (app->priority != en_dev->dcb_para.dscp2prio[app->protocol]) { + return -ENOENT; + } + + /* Delete the app entry */ + err = dcb_ieee_delapp(netdev, app); + if (err) { + return err; + } + + /* Restore to default */ + err = zxdh_dcbnl_set_dscp2prio(en_priv, app->protocol, + app->protocol >> 3); + if (err) { + zxdh_dcbnl_set_ets_trust(en_priv, ZXDH_DCBNL_ETS_TRUST_PCP); + return err; + } + en_dev->dcb_para.dscp_app_num--; + LOG_INFO(" protocol:%d, dscp_app_num:%d \n", app->protocol, + en_dev->dcb_para.dscp_app_num); + + if (!en_dev->dcb_para.dscp_app_num) { + err = zxdh_dcbnl_set_ets_trust(en_priv, + ZXDH_DCBNL_ETS_TRUST_PCP); + } + + return err; +} +#ifdef ZXDH_DCBNL_CEE_SUPPORT +static void zxdh_dcbnl_setpgtccfgtx(struct net_device *netdev, int tc, + uint8_t prio_type, uint8_t pgid, + uint8_t bw_pct, uint8_t up_map) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct zxdh_dcbnl_cee_ets *cee_ets_cfg; + uint32_t i; + + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + LOG_ERR("zxdh_dcbnl_setpgtccfgtx coredev type is not a PF"); + return; + } + + if ((tc < 0) || (tc >= ZXDH_DCBNL_MAX_TRAFFIC_CLASS)) { + return; + } + + cee_ets_cfg = &en_dev->dcb_para.cee_ets_cfg; + for (i = 0; i < ZXDH_DCBNL_MAX_PRIORITY; i++) { + if (up_map & BIT(i)) { + cee_ets_cfg->prio_tc[i] = tc; + } + } + cee_ets_cfg->tc_tsa[tc] = IEEE_8021QAZ_TSA_ETS; +} +static void zxdh_dcbnl_setpgbwgcfgtx(struct net_device *netdev, int pgid, + uint8_t bw_pct) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + LOG_ERR("zxdh_dcbnl_setpgbwgcfgtx coredev type is not a PF"); + return; + } + + if ((pgid >= 0) && (pgid < ZXDH_DCBNL_MAX_TRAFFIC_CLASS)) { + en_dev->dcb_para.cee_ets_cfg.tc_tx_bw[pgid] = bw_pct; + } + LOG_INFO(" tc_tx_bw[%d]:%d \n", pgid, bw_pct); +} + +static void zxdh_dcbnl_getpgtccfgtx(struct net_device *netdev, int prio, + uint8_t *prio_type, uint8_t *pgid, + uint8_t *bw_pct, uint8_t *up_map) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + /* pf检查 */ + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + LOG_ERR("zxdh_dcbnl_getpgtccfgtx coredev type is not a PF"); + return; + } + + if ((prio >= 0) && (prio < ZXDH_DCBNL_MAX_PRIORITY)) { + *pgid = en_dev->dcb_para.ets_cfg.prio_tc[prio]; + } +} + +static void zxdh_dcbnl_getpgbwgcfgtx(struct net_device *netdev, int pgid, + uint8_t *bw_pct) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + LOG_ERR("zxdh_dcbnl_getpgbwgcfgtx coredev type is not a PF"); + return; + } + + if ((pgid >= 0) && (pgid < ZXDH_DCBNL_MAX_TRAFFIC_CLASS)) { + *bw_pct = en_dev->dcb_para.ets_cfg.tc_tx_bw[pgid]; + } +} + +static void zxdh_dcbnl_setpgtccfgrx(struct net_device *netdev, int prio, + uint8_t prio_type, uint8_t pgid, + uint8_t bw_pct, uint8_t up_map) +{ + LOG_ERR("Rx PG TC Config Not Supported.\n"); +} + +static void zxdh_dcbnl_setpgbwgcfgrx(struct net_device *netdev, int pgid, + uint8_t bw_pct) +{ + LOG_ERR("Rx PG BWG Config Not Supported.\n"); +} + +static void zxdh_dcbnl_getpgtccfgrx(struct net_device *netdev, int prio, + uint8_t *prio_type, uint8_t *pgid, + uint8_t *bw_pct, uint8_t *up_map) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + LOG_ERR("zxdh_dcbnl_getpgtccfgrx coredev type is not a PF"); + return; + } + + if ((prio >= 0) && (prio < ZXDH_DCBNL_MAX_PRIORITY)) { + *pgid = en_dev->dcb_para.ets_cfg.prio_tc[prio]; + } +} + +static void zxdh_dcbnl_getpgbwgcfgrx(struct net_device *netdev, int pgid, + uint8_t *bw_pct) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + LOG_ERR("zxdh_dcbnl_getpgbwgcfgrx coredev type is not a PF"); + return; + } + + if ((pgid >= 0) && (pgid < ZXDH_DCBNL_MAX_TRAFFIC_CLASS)) { + *bw_pct = 0; + } +} + +static uint8_t zxdh_dcbnl_setall(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct ieee_ets ets = { 0 }; + uint32_t i = 0; + uint32_t err = 0; + uint32_t j = 0; + + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + LOG_ERR("zxdh_dcbnl_setall coredev type is not a PF"); + return 1; + } + + ets.ets_cap = ZXDH_DCBNL_MAX_TRAFFIC_CLASS; + for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; i++) { + ets.tc_tx_bw[i] = en_dev->dcb_para.cee_ets_cfg.tc_tx_bw[i]; + ets.tc_rx_bw[i] = en_dev->dcb_para.cee_ets_cfg.tc_tx_bw[i]; + ets.tc_tsa[i] = en_dev->dcb_para.cee_ets_cfg.tc_tsa[i]; + } + + for (i = 0; i < ZXDH_DCBNL_MAX_PRIORITY; i++) { + ets.prio_tc[i] = en_dev->dcb_para.cee_ets_cfg.prio_tc[i]; + } + /* debug */ + for (j = 0; j < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; j++) { + LOG_INFO(" idx:%d, tc_tsa:%d, tc_tx_bw:%d, prio_tc:%d \n", j, + ets.tc_tx_bw[j], ets.tc_tsa[j], ets.prio_tc[j]); + } + + err = zxdh_dcbnl_check_ets_para(&ets); + if (err) { + return err; + } + + err = zxdh_dcbnl_ieee_set_ets_para(en_priv, &ets); + if (err) { + return err; + } + + return 0; +} + +static uint8_t zxdh_dcbnl_getstate(struct net_device *netdev) +{ + return ZXDH_DCBNL_CEE_STATE_UP; +} + +static uint8_t zxdh_dcbnl_setstate(struct net_device *netdev, u8 state) +{ + return 0; +} +#endif + +static const struct dcbnl_rtnl_ops zxdh_dcbnl_ops = { + .ieee_getets = zxdh_dcbnl_ieee_getets, + .ieee_setets = zxdh_dcbnl_ieee_setets, + .ieee_getpfc = zxdh_dcbnl_ieee_getpfc, + .ieee_setpfc = zxdh_dcbnl_ieee_setpfc, + + .ieee_getmaxrate = zxdh_dcbnl_ieee_getmaxrate, + .ieee_setmaxrate = zxdh_dcbnl_ieee_setmaxrate, + + .ieee_setapp = zxdh_dcbnl_ieee_setapp, + .ieee_delapp = zxdh_dcbnl_ieee_delapp, + +#ifdef ZXDH_DCBNL_CEE_SUPPORT + /* CEE not support */ + .setall = zxdh_dcbnl_setall, + + .getstate = zxdh_dcbnl_getstate, + .setstate = zxdh_dcbnl_setstate, + + .setpgtccfgtx = zxdh_dcbnl_setpgtccfgtx, + .setpgbwgcfgtx = zxdh_dcbnl_setpgbwgcfgtx, + .getpgtccfgtx = zxdh_dcbnl_getpgtccfgtx, + .getpgbwgcfgtx = zxdh_dcbnl_getpgbwgcfgtx, + + .setpgtccfgrx = zxdh_dcbnl_setpgtccfgrx, + .setpgbwgcfgrx = zxdh_dcbnl_setpgbwgcfgrx, + .getpgtccfgrx = zxdh_dcbnl_getpgtccfgrx, + .getpgbwgcfgrx = zxdh_dcbnl_getpgbwgcfgrx, +#endif +}; + +uint32_t zxdh_dcbnl_set_tm_pport_mcode_gate_open(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + uint32_t err = 0; + err = zxdh_dcbnl_set_tm_gate(en_priv, 1); + if (err) { + LOG_ERR(" set_tm_gate close failed \n"); + } + LOG_INFO(" tm mcode gate open "); + return err; +} +EXPORT_SYMBOL(zxdh_dcbnl_set_tm_pport_mcode_gate_open); + +uint32_t zxdh_dcbnl_set_tm_pport_mcode_gate_close(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + uint32_t err = 0; + err = zxdh_dcbnl_set_tm_gate(en_priv, 0); + if (err) { + LOG_ERR(" set_tm_gate close failed \n"); + } + LOG_INFO(" tm mcode gate close "); + return err; +} +EXPORT_SYMBOL(zxdh_dcbnl_set_tm_pport_mcode_gate_close); + +uint32_t zxdh_dcbnl_initialize(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + uint32_t err = 0; + + LOG_INFO("%s dcbnl init begin\n", netdev->name); + + err = zxdh_dcbnl_init_port_speed(en_priv); + if (err) { + LOG_INFO("dcbnl_init_ets: init_port_speed failed \n"); + //return err; + } + + err = zxdh_dcbnl_init_ets_scheduling_tree(en_priv); + if (err) { + LOG_ERR("dcbnl_init_ets: init_ets_scheduling_tree failed \n"); + return err; + } + + zxdh_dcbnl_printk_ets_tree(en_priv); + + en_dev->dcb_para.init_flag = ZXDH_DCBNL_INIT_FLAG; + netdev->dcbnl_ops = &zxdh_dcbnl_ops; + + //zxdh_dcbnl_set_tm_pport_mcode_gate_open(netdev); + LOG_INFO("%s dcbnl init ok ", netdev->name); + return 0; +} + +uint32_t zxdh_dcbnl_ets_uninit(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + if ((en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) || + (!zxdh_en_is_panel_port(en_dev))) { + return 0; + } + LOG_INFO("%s dcbnl uninit begin\n", netdev->name); + + en_dev->dcb_para.init_flag = 0; + netdev->dcbnl_ops = NULL; + zxdh_dcbnl_set_tm_pport_mcode_gate_close(netdev); + + zxdh_dcbnl_free_flow_resources(en_priv); + + zxdh_dcbnl_free_se_resources(en_priv); + + LOG_INFO("%s dcbnl uninit ok ", netdev->name); + return 0; +} diff --git a/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl.h b/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl.h new file mode 100644 index 000000000000..fd6810133a7b --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl.h @@ -0,0 +1,256 @@ +#ifndef __ZXDH_EN_DCBNL_H__ +#define __ZXDH_EN_DCBNL_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/* 启用dcb会大幅度增加初始化时间,暂时先注释 */ +#define ZXDH_DCBNL_OPEN + +/* CEE not support */ +//#define ZXDH_DCBNL_CEE_SUPPORT + +#define ZXDH_DCBNL_INIT_FLAG (0x5a5a5a5a) +#define ZXDH_DCBNL_NULL_ID (0xffffffff) + +#define ZXDH_DCBNL_MAX_PRIORITY (8) +#define ZXDH_DCBNL_MAX_TRAFFIC_CLASS (8) + +#define ZXDH_DCBNL_MAX_DSCP (64) + +#define ZXDH_DCBNL_MAX_BW_ALLOC (100) +#define ZXDH_DCBNL_MAX_WEIGHT (512) + +#define ZXDH_DCBNL_RATEUNIT_K (1000) +#define ZXDH_DCBNL_RATEUNIT_M (1000000) +#define ZXDH_DCBNL_RATEUNIT_G (1000000000) +#define ZXDH_DCBNL_MAXRATE_KBITPS (400 * 1000000) +#define ZXDH_DCBNL_MINRATE_KBITPS (64) + +#define ZXDH_DCBNL_INITRATE_KBITPS (400 * 1000000) + +#define ZXDH_DCBNL_FLOW_RATE_CIR (0) + +#define ZXDH_DCBNL_FLOW_RATE_CBS (2000) +#define ZXDH_DCBNL_FLOW_RATE_EBS (4000) +#define ZXDH_DCBNL_PORT_RATE_CBS (4000) + +#define ZXDH_DCBNL_FLOW_RATE_CBS_REFRESH (0) +#define ZXDH_DCBNL_FLOW_RATE_EBS_REFRESH (0) + +#define ZXDH_DCBNL_FLOW_TDTH (200) +#define ZXDH_DCBNL_FLOW_TDTH_UPF (900) //待更改成最优值 +#define ZXDH_DCBNL_FLOW_TDTH_OPT (500) +#define ZXDH_DCBNL_FLOW_TDTH_DEFAULT (100) + +#define ZXDH_DCBNL_CEE_STATE_UP (1) + +#define ZXDH_DCBNL_MAX_SE_NODE_NUM (12) +#define ZXDH_DCBNL_MAX_TREE_LEVEL (7) +#define ZXDH_DCBNL_ETS_TREE_ROOT_LEVEL (4) +#define ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL (0) + +#define ZXDH_DCBNL_GSCHID_ID_MASK (0xFFFF) +#define ZXDH_DCBNL_GSCHID_ID_SHIFT (0) + +#define ZXDH_DCBNL_GET_GSCHID_MSG(val, mask, shift) ((val >> shift) & mask) + +#define ZXDH_DCBNL_INVALID_PARA (0xffffffff) + +#define ZXDH_DCBNL_CHECK_MAX_WITH_RETURN(val, max, ret) \ + do { \ + if (val >= max) \ + return ret; \ + } while (0) + +#define ZXDH_DCBNL_CHECK_RANGE_WITH_RETURN(val, min, max, ret) \ + do { \ + if (!(min <= val && val <= max)) \ + return ret; \ + } while (0) + +#define ZXDH_DCBNL_CHECK_POINT_RET(point, ret) \ + do { \ + if (NULL == point) { \ + LOG_ERR("\n %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + return ret; \ + } \ + } while (0) + +#define ZXDH_DCBNL_CHECK_POINT(point) \ + do { \ + if (NULL == point) { \ + LOG_ERR("\n %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + return; \ + } \ + } while (0) + +#define ZXDH_DCBNL_CHECK_RET_RETURN(ret) \ + do { \ + if (0 != ret) { \ + LOG_ERR("\n %s:%d error FUNCTION : %s!\n", __FILE__, \ + __LINE__, __FUNCTION__); \ + return ret; \ + } \ + } while (0) + +#define MAX_RATE_LIMITED_NUM (8) +enum zxdh_dcbnl_ets_trust { + ZXDH_DCBNL_ETS_TRUST_PCP = 0, + ZXDH_DCBNL_ETS_TRUST_DSCP = 1, +}; + +enum zxdh_dcbnl_ets_tc_tsa { + ZXDH_DCBNL_VENDOR_TC = 0, + ZXDH_DCBNL_STRICT_TC = 1, + ZXDH_DCBNL_ETS_TC = 2, + ZXDH_DCBNL_ZEROBW_ETS_TC = 3, +}; + +enum zxdh_dcbnl_ets_node_link_point { + ZXDH_DCBNL_ETS_NODE_NULL = 0, + ZXDH_DCBNL_ETS_NODE_VENDOR_C = 1, + ZXDH_DCBNL_ETS_NODE_STRICT_C = 2, + ZXDH_DCBNL_ETS_NODE_ETS_C = 3, + ZXDH_DCBNL_ETS_NODE_ZEROBW_ETS_C = 4, + ZXDH_DCBNL_ETS_NODE_VENDOR_E = 5, + ZXDH_DCBNL_ETS_NODE_STRICT_E = 6, + ZXDH_DCBNL_ETS_NODE_ETS_E = 7, + ZXDH_DCBNL_ETS_NODE_ZEROBW_ETS_E = 8, +}; + +enum zxdh_dcbnl_se_flow_node_type { + ZXDH_DCBNL_ETS_NODE_FQ = 0, + ZXDH_DCBNL_ETS_NODE_FQ2 = 1, + ZXDH_DCBNL_ETS_NODE_FQ4 = 2, + ZXDH_DCBNL_ETS_NODE_FQ8 = 3, + ZXDH_DCBNL_ETS_NODE_SP = 4, + ZXDH_DCBNL_ETS_NODE_WFQ = 5, + ZXDH_DCBNL_ETS_NODE_WFQ2 = 6, + ZXDH_DCBNL_ETS_NODE_WFQ4 = 7, + ZXDH_DCBNL_ETS_NODE_WFQ8 = 8, + ZXDH_DCBNL_ETS_NODE_FLOW = 9, +}; + +struct zxdh_dcbnl_ets_se_node { + struct zxdh_dcbnl_ets_se_node *se_next; + uint64_t gsch_id; + uint32_t node_idx; + uint32_t node_type; + uint32_t se_id; + uint32_t se_link_id; + uint32_t se_link_weight; + uint32_t se_link_sp; + uint32_t link_point; +}; + +struct zxdh_dcbnl_ets_flow_node { + struct zxdh_dcbnl_ets_flow_node *flow_next; + uint64_t gsch_id; + uint32_t flow_id; + uint32_t tc_id; + uint32_t tc_type; + uint32_t tc_tx_bw; + uint32_t td_th; + uint32_t c_linkid; + uint32_t c_weight; + uint32_t c_sp; + uint32_t c_rate; + uint32_t mode; + uint32_t e_linkid; + uint32_t e_weight; + uint32_t e_sp; + uint32_t e_rate; +}; + +struct zxdh_dcbnl_ets_node_list_head { + struct zxdh_dcbnl_ets_se_node *se_next; + struct zxdh_dcbnl_ets_flow_node *flow_next; + uint32_t node_num; +}; + +struct zxdh_dcbnl_ets_se_flow_resource { + uint32_t numq; + uint32_t level; + uint32_t flags; + uint32_t resource_id; + uint64_t gsch_id; +}; + +struct zxdh_dcbnl_se_tree_config { + uint32_t level; + uint32_t idx; + uint32_t type; + uint32_t link_level; + uint32_t link_idx; + uint32_t link_weight; + uint32_t link_sp; + uint32_t link_point; +}; + +struct zxdh_dcbnl_tc_flow_config { + uint32_t link_level; + uint32_t tc_type; + uint32_t tc_tx_bw; + uint32_t c_rate; + uint32_t e_rate; + uint32_t td_th; +}; + +struct zxdh_dcbnl_tc_flow_shape_para { + uint32_t cir; + uint32_t cbs; + uint32_t db_en; + uint32_t eir; + uint32_t ebs; +}; + +struct zxdh_dcbnl_ieee_ets { + uint8_t willing; + uint8_t ets_cap; + uint8_t cbs; + uint8_t tc_tx_bw[ZXDH_DCBNL_MAX_TRAFFIC_CLASS]; + uint8_t tc_tsa[ZXDH_DCBNL_MAX_TRAFFIC_CLASS]; + uint8_t prio_tc[ZXDH_DCBNL_MAX_PRIORITY]; +}; + +struct zxdh_dcbnl_cee_ets { + uint8_t tc_tx_bw[ZXDH_DCBNL_MAX_TRAFFIC_CLASS]; + uint8_t tc_tsa[ZXDH_DCBNL_MAX_TRAFFIC_CLASS]; + uint8_t prio_tc[ZXDH_DCBNL_MAX_PRIORITY]; +}; + +struct zxdh_dcbnl_para { + uint32_t init_flag; + uint32_t trust; + uint32_t dscp_app_num; + uint8_t dscp2prio[ZXDH_DCBNL_MAX_DSCP]; + uint64_t tc_maxrate[ZXDH_DCBNL_MAX_TRAFFIC_CLASS]; + struct zxdh_dcbnl_ieee_ets ets_cfg; + struct zxdh_dcbnl_cee_ets cee_ets_cfg; + struct zxdh_dcbnl_ets_node_list_head + ets_node_list_head[ZXDH_DCBNL_MAX_TREE_LEVEL]; +}; + +// 只在切换ets状态时维护 +struct zxdh_dcbnl_ets_switch_info { + uint32_t cur_ets; + uint32_t tc_td_th[ZXDH_DCBNL_MAX_TRAFFIC_CLASS]; + uint32_t switch_flag; +}; + +uint32_t zxdh_dcbnl_initialize(struct net_device *netdev); +uint32_t zxdh_dcbnl_ets_uninit(struct net_device *netdev); +uint32_t zxdh_dcbnl_set_tm_pport_mcode_gate_open(struct net_device *netdev); +uint32_t zxdh_dcbnl_set_tm_pport_mcode_gate_close(struct net_device *netdev); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl_api.c b/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl_api.c new file mode 100644 index 000000000000..3ac6d1edc6e3 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl_api.c @@ -0,0 +1,1486 @@ +//#include +#include "../../en_aux.h" +#include "en_dcbnl.h" +#include "en_dcbnl_api.h" +#include "en_np/qos/include/dpp_drv_qos.h" +#include "en_np/table/include/dpp_tbl_tm.h" +#include "en_np/fc/include/dpp_drv_fc.h" +#include "en_np/sdk/include/api/dpp_pbu_api.h" +#include "en_np/sdk/include/api/dpp_ppu_api.h" + +uint32_t zxdh_dcbnl_get_se_flow_resources( + struct zxdh_en_device *en_dev, + struct zxdh_dcbnl_ets_se_flow_resource *tree_resource) +{ + uint64_t gsch_id = 0; + uint32_t err = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + if (tree_resource->level == ZXDH_DCBNL_ETS_TREE_ROOT_LEVEL) { + err = dpp_sch_base_node_get(&pf_info, en_dev->phy_port, + &gsch_id); + } else { + err = dpp_cosq_gsch_id_add(&pf_info, en_dev->phy_port, + tree_resource->numq, + tree_resource->level, + tree_resource->flags, &gsch_id); + } + + if (err) { + LOG_ERR("dcbnl_init_ets: get se/flow resources failed, level: %d, type: %d, err:%d \n", + tree_resource->level, tree_resource->flags, err); + return err; + } + tree_resource->gsch_id = gsch_id; + tree_resource->resource_id = ZXDH_DCBNL_GET_GSCHID_MSG( + gsch_id, ZXDH_DCBNL_GSCHID_ID_MASK, ZXDH_DCBNL_GSCHID_ID_SHIFT); + /* debug */ + LOG_INFO(" gsch_id:0x%llx,resource_id:0x%x level:%d, flags:%d\n", + gsch_id, tree_resource->resource_id, tree_resource->level, + tree_resource->flags); + + return 0; +} + +uint32_t zxdh_dcbnl_find_se_link_id(struct zxdh_en_priv *en_priv, + uint32_t level, uint32_t link_level, + uint32_t link_idx, uint32_t link_sp, + uint32_t *link_id) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + struct zxdh_dcbnl_ets_se_node *se_link_node = NULL; + struct zxdh_dcbnl_ets_node_list_head *ets_node_list_head = + &en_dev->dcb_para.ets_node_list_head[link_level]; + + *link_id = ZXDH_DCBNL_NULL_ID; + + if (level < ZXDH_DCBNL_ETS_TREE_ROOT_LEVEL) { + if (ets_node_list_head->se_next == NULL) { + LOG_ERR("dcbnl: no nodes in the link_level: %d \n", + link_level); + return 1; + } + + se_link_node = ets_node_list_head->se_next; + + while ((NULL != se_link_node) && + (se_link_node->node_idx != link_idx)) { + se_link_node = se_link_node->se_next; + } + + if (se_link_node != NULL) { + *link_id = se_link_node->se_id + link_sp; + } else { + LOG_ERR("dcbnl: find se link_id failed, link_level: %d, link_idx: %d\n", + link_level, link_idx); + return 1; + } + } + + return 0; +} + +uint32_t +zxdh_dcbnl_save_se_resources(struct zxdh_en_priv *en_priv, + struct zxdh_dcbnl_se_tree_config *tree_node_cfg) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + struct zxdh_dcbnl_ets_se_flow_resource tree_resource = { 0 }; + struct zxdh_dcbnl_ets_se_node *new_se_node = NULL; + struct zxdh_dcbnl_ets_node_list_head *ets_node_list_head = NULL; + uint32_t level = 0; + uint32_t link_level = 0; + uint32_t link_idx = 0; + uint32_t link_id = 0; + uint32_t err = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + level = tree_node_cfg->level; + link_level = tree_node_cfg->link_level; + link_idx = tree_node_cfg->link_idx; + + if ((level == 0) || (level > 4) || (link_level > 5) || + (level >= link_level)) { + LOG_ERR("dcbnl_init_ets: configuration level error, level: %d, link_level: %d\n", + level, link_level); + return 1; //todo:考虑使用标准的错误定义 + } + + ets_node_list_head = &en_dev->dcb_para.ets_node_list_head[level]; + + tree_resource.numq = 1; + tree_resource.level = level; + tree_resource.flags = tree_node_cfg->type; + err = zxdh_dcbnl_get_se_flow_resources(en_dev, &tree_resource); + if (err) { + LOG_ERR("dcbnl_init_ets: get se resources failed, level: %d, idx: %d\n", + tree_resource.level, tree_node_cfg->idx); + return err; + } + + err = zxdh_dcbnl_find_se_link_id(en_priv, level, link_level, link_idx, + tree_node_cfg->link_sp, &link_id); + if (err) { + LOG_ERR("dcbnl_init_ets: find se link_id failed, link_level: %d, link_idx: %d\n", + link_level, link_idx); + return err; + } + + new_se_node = + kmalloc(sizeof(struct zxdh_dcbnl_ets_se_node), GFP_KERNEL); + if (NULL == new_se_node) { + LOG_ERR("dcbnl_init_ets: kmalloc se node failed\n"); + return 1; + } + + new_se_node->se_next = NULL; + new_se_node->gsch_id = tree_resource.gsch_id; + new_se_node->node_idx = tree_node_cfg->idx; + new_se_node->node_type = tree_node_cfg->type; + new_se_node->se_id = tree_resource.resource_id; + new_se_node->se_link_id = link_id; + new_se_node->se_link_weight = tree_node_cfg->link_weight; + new_se_node->se_link_sp = tree_node_cfg->link_sp; + new_se_node->link_point = tree_node_cfg->link_point; + + if (level < ZXDH_DCBNL_ETS_TREE_ROOT_LEVEL) { + err = dpp_crdt_se_link_set(&pf_info, new_se_node->se_id, + new_se_node->se_link_id, + new_se_node->se_link_weight, + new_se_node->se_link_sp); + if (err) { + LOG_ERR("dcbnl_init_ets: dpp_crdt_se_link_set failed, level: %d, idx: %d, err:%d\n", + level, tree_node_cfg->idx, err); + kfree(new_se_node); + return err; + } + } + + new_se_node->se_next = ets_node_list_head->se_next; + ets_node_list_head->se_next = new_se_node; + + ets_node_list_head->node_num += 1; + + LOG_INFO(" level:%d, node_idx:%d, node_num:%d \n", level, + new_se_node->node_idx, ets_node_list_head->node_num); + return 0; +} + +uint32_t zxdh_dcbnl_build_ets_scheduling_tree(struct zxdh_en_priv *en_priv) +{ + uint32_t i = 0; + uint32_t err = 0; + + struct zxdh_dcbnl_se_tree_config + ets_se_config_table[ZXDH_DCBNL_MAX_SE_NODE_NUM + 1] = { + /*level idx type link_level link_idx link_weight link_sp link_point*/ + { 4, 0, ZXDH_DCBNL_ETS_NODE_WFQ, 5, 0, 1, 0, + ZXDH_DCBNL_ETS_NODE_NULL }, + { 3, 0, ZXDH_DCBNL_ETS_NODE_FQ2, 4, 0, 1, 0, + ZXDH_DCBNL_ETS_NODE_NULL }, + { 2, 0, ZXDH_DCBNL_ETS_NODE_FQ4, 3, 0, 1, 0, + ZXDH_DCBNL_ETS_NODE_NULL }, + { 2, 1, ZXDH_DCBNL_ETS_NODE_FQ4, 3, 0, 1, 1, + ZXDH_DCBNL_ETS_NODE_NULL }, + { 1, 0, ZXDH_DCBNL_ETS_NODE_FQ, 2, 0, 1, 0, + ZXDH_DCBNL_ETS_NODE_VENDOR_C }, + { 1, 1, ZXDH_DCBNL_ETS_NODE_FQ8, 2, 0, 1, 1, + ZXDH_DCBNL_ETS_NODE_STRICT_C }, + { 1, 2, ZXDH_DCBNL_ETS_NODE_WFQ, 2, 0, 1, 2, + ZXDH_DCBNL_ETS_NODE_ETS_C }, + { 1, 3, ZXDH_DCBNL_ETS_NODE_WFQ, 2, 0, 1, 3, + ZXDH_DCBNL_ETS_NODE_ZEROBW_ETS_C }, + { 1, 4, ZXDH_DCBNL_ETS_NODE_FQ, 2, 1, 1, 0, + ZXDH_DCBNL_ETS_NODE_VENDOR_E }, + { 1, 5, ZXDH_DCBNL_ETS_NODE_FQ8, 2, 1, 1, 1, + ZXDH_DCBNL_ETS_NODE_STRICT_E }, + { 1, 6, ZXDH_DCBNL_ETS_NODE_WFQ, 2, 1, 1, 2, + ZXDH_DCBNL_ETS_NODE_ETS_E }, + { 1, 7, ZXDH_DCBNL_ETS_NODE_WFQ, 2, 1, 1, 3, + ZXDH_DCBNL_ETS_NODE_ZEROBW_ETS_E }, + { 0xff } + }; + + for (i = 0; i < ZXDH_DCBNL_MAX_SE_NODE_NUM && + ets_se_config_table[i].level != 0xff; + i++) { + err = zxdh_dcbnl_save_se_resources(en_priv, + &ets_se_config_table[i]); + if (err) { + LOG_ERR("dcbnl_init_ets: build_tc_scheduling_tree failed, entry: %d\n", + i); + return err; + } + } + + return 0; +} + +void zxdh_dcbnl_tc_map_to_link_point(uint32_t tc_type, uint32_t *c_type, + uint32_t *e_type) +{ + switch (tc_type) { + case ZXDH_DCBNL_VENDOR_TC: + *c_type = ZXDH_DCBNL_ETS_NODE_VENDOR_C; + *e_type = ZXDH_DCBNL_ETS_NODE_VENDOR_E; + break; + + case ZXDH_DCBNL_STRICT_TC: + *c_type = ZXDH_DCBNL_ETS_NODE_STRICT_C; + *e_type = ZXDH_DCBNL_ETS_NODE_STRICT_E; + break; + + case ZXDH_DCBNL_ETS_TC: + *c_type = ZXDH_DCBNL_ETS_NODE_ETS_C; + *e_type = ZXDH_DCBNL_ETS_NODE_ETS_E; + break; + + case ZXDH_DCBNL_ZEROBW_ETS_TC: + *c_type = ZXDH_DCBNL_ETS_NODE_ZEROBW_ETS_C; + *e_type = ZXDH_DCBNL_ETS_NODE_ZEROBW_ETS_E; + break; + default: + break; + } +} + +void zxdh_dcbnl_get_tc_weight_sp(uint32_t tc_type, uint32_t tc_tx_bw, + uint32_t tc_id, uint32_t *c_weight, + uint32_t *e_weight, uint32_t *c_sp, + uint32_t *e_sp) +{ + if (tc_tx_bw == ZXDH_DCBNL_MAX_BW_ALLOC) { + *c_weight = 1; + *e_weight = 1; + } else { + *c_weight = ZXDH_DCBNL_MAX_WEIGHT * tc_tx_bw / + ZXDH_DCBNL_MAX_BW_ALLOC; + *e_weight = ZXDH_DCBNL_MAX_WEIGHT * tc_tx_bw / + ZXDH_DCBNL_MAX_BW_ALLOC; + } + + if ((tc_type == ZXDH_DCBNL_STRICT_TC) && + (tc_id < ZXDH_DCBNL_MAX_TRAFFIC_CLASS)) { + *c_sp = ZXDH_DCBNL_MAX_TRAFFIC_CLASS - 1 - tc_id; + *e_sp = ZXDH_DCBNL_MAX_TRAFFIC_CLASS - 1 - tc_id; + } else { + *c_sp = 0; + *e_sp = 0; + } +} + +uint32_t zxdh_dcbnl_find_flow_link_se_id(struct zxdh_en_priv *en_priv, + uint32_t tc_type, uint32_t link_level, + uint32_t *c_linkid, uint32_t *e_linkid, + uint32_t c_sp, uint32_t e_sp) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + struct zxdh_dcbnl_ets_se_node *se_node = + en_dev->dcb_para.ets_node_list_head[link_level].se_next; + uint32_t c_type = 0; + uint32_t e_type = 0; + + if (NULL == se_node) { + LOG_ERR("dcbnl: find_flow_link_se_id no nodes \n"); + return 1; + } + + zxdh_dcbnl_tc_map_to_link_point(tc_type, &c_type, &e_type); + + *c_linkid = ZXDH_DCBNL_NULL_ID; + *e_linkid = ZXDH_DCBNL_NULL_ID; + + while ((NULL != se_node) && ((ZXDH_DCBNL_NULL_ID == *c_linkid) || + (ZXDH_DCBNL_NULL_ID == *e_linkid))) { + if (se_node->link_point == c_type) { + *c_linkid = se_node->se_id + c_sp; + } else if (se_node->link_point == e_type) { + *e_linkid = se_node->se_id + e_sp; + } + + se_node = se_node->se_next; + } + + if ((ZXDH_DCBNL_NULL_ID == *c_linkid) || + (ZXDH_DCBNL_NULL_ID == *e_linkid)) { + LOG_ERR("dcbnl: find_flow_link_se_id failed, c_linkid: 0x%x, e_linkid: 0x%x\n", + *c_linkid, *e_linkid); + return 1; + } + return 0; +} + +uint32_t zxdh_dcbnl_get_ieee_tsa(uint32_t tc_type) +{ + uint32_t tsa = 0; + switch (tc_type) { + case ZXDH_DCBNL_ETS_TC: + case ZXDH_DCBNL_ZEROBW_ETS_TC: + tsa = IEEE_8021QAZ_TSA_ETS; + break; + case ZXDH_DCBNL_STRICT_TC: + tsa = IEEE_8021QAZ_TSA_STRICT; + break; + case ZXDH_DCBNL_VENDOR_TC: + tsa = IEEE_8021QAZ_TSA_VENDOR; + break; + default: + tsa = IEEE_8021QAZ_TSA_STRICT; + LOG_ERR("dcbnl:tsa error, change to strict \n"); + break; + } + return tsa; +} + +uint32_t zxdh_dcbnl_save_flow_resources( + struct zxdh_en_priv *en_priv, + struct zxdh_dcbnl_tc_flow_config *tc_flow_config, + struct zxdh_dcbnl_ets_se_flow_resource *tree_resource, uint32_t tc_id) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + struct zxdh_dcbnl_ets_flow_node *new_flow_node = NULL; + struct zxdh_dcbnl_ets_node_list_head *ets_node_list_head = NULL; + struct zxdh_dcbnl_tc_flow_shape_para p_para = { 0 }; + uint32_t c_linkid = 0; + uint32_t e_linkid = 0; + uint32_t c_weight = 0; + uint32_t e_weight = 0; + uint32_t c_sp = 0; + uint32_t e_sp = 0; + uint32_t err = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + if (tc_flow_config->link_level != 1) { + LOG_ERR("dcbnl_init_ets: zxdh_dcbnl_save_flow_resources link_level err\n"); + return 1; + } + + ets_node_list_head = + &en_dev->dcb_para + .ets_node_list_head[ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL]; + + if (tc_id == 0) { + tree_resource->numq = ZXDH_DCBNL_MAX_TRAFFIC_CLASS; + tree_resource->level = ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL; + tree_resource->flags = ZXDH_DCBNL_ETS_NODE_FLOW; + err = zxdh_dcbnl_get_se_flow_resources(en_dev, tree_resource); + if (err) { + LOG_ERR("dcbnl_init_ets: get flow resources err\n"); + return err; + } + + err = dpp_tm_flowid_pport_table_set(&pf_info, en_dev->phy_port, + tree_resource->resource_id); + if (err) { + LOG_ERR("dcbnl_init_ets: flowid_pport_table_set failed, port: %d, flowid:%d, err:%d\n", + en_dev->phy_port, tree_resource->resource_id, + err); + return err; + } + } + + zxdh_dcbnl_get_tc_weight_sp(tc_flow_config->tc_type, + tc_flow_config->tc_tx_bw, tc_id, &c_weight, + &e_weight, &c_sp, &e_sp); + + err = zxdh_dcbnl_find_flow_link_se_id(en_priv, tc_flow_config->tc_type, + tc_flow_config->link_level, + &c_linkid, &e_linkid, c_sp, e_sp); + if (err) { + LOG_ERR("dcbnl_init_ets init ets: find_flow_link_se_id failed, tc_id: %d, tc_type: %d\n", + tc_id, tc_flow_config->tc_type); + return err; + } + + new_flow_node = + kmalloc(sizeof(struct zxdh_dcbnl_ets_flow_node), GFP_KERNEL); + if (new_flow_node == NULL) { + LOG_ERR("dcbnl_init_ets: kmalloc new flow node failed\n"); + return 1; + } + + new_flow_node->flow_next = NULL; + new_flow_node->gsch_id = tree_resource->gsch_id + tc_id; + new_flow_node->flow_id = tree_resource->resource_id + tc_id; + new_flow_node->tc_id = tc_id; + new_flow_node->tc_type = tc_flow_config->tc_type; + new_flow_node->tc_tx_bw = tc_flow_config->tc_tx_bw; + new_flow_node->td_th = tc_flow_config->td_th; + new_flow_node->c_linkid = c_linkid; + new_flow_node->c_weight = c_weight; + new_flow_node->c_sp = c_sp; + new_flow_node->c_rate = tc_flow_config->c_rate; + new_flow_node->mode = 1; + new_flow_node->e_linkid = e_linkid; + new_flow_node->e_weight = e_weight; + new_flow_node->e_sp = e_sp; + new_flow_node->e_rate = tc_flow_config->e_rate; + + err = dpp_flow_map_port_set(&pf_info, new_flow_node->flow_id, + en_dev->phy_port); + if (err) { + LOG_ERR("dcbnl_init_ets: dpp_flow_map_port_set failed, flow_id: %d, phy_port: %d, err:%d\n", + new_flow_node->flow_id, en_dev->phy_port, err); + kfree(new_flow_node); + return err; + } + + err = dpp_crdt_flow_link_set(&pf_info, new_flow_node->flow_id, c_linkid, + c_weight, c_sp, new_flow_node->mode, + e_linkid, e_weight, e_sp); + if (err) { + LOG_ERR("dcbnl_init_ets: dpp_crdt_flow_link_set failed, flow_id: %d, c_linkid: %d, e_linkid: %d, err:%d\n", + new_flow_node->flow_id, c_linkid, e_linkid, err); + kfree(new_flow_node); + return err; + } + + err = dpp_flow_td_th_set(&pf_info, new_flow_node->flow_id, + new_flow_node->td_th); + if (err) { + LOG_ERR("dcbnl_init_ets: dpp_flow_td_th_set failed,vport:%d flow_id: %d, td_th: %d, err:%d\n", + en_dev->vport, new_flow_node->flow_id, + new_flow_node->td_th, err); + //kfree(new_flow_node); //The default is 150 + //return err; + } + + p_para.cir = new_flow_node->c_rate; + p_para.cbs = ZXDH_DCBNL_FLOW_RATE_CBS; + p_para.db_en = 1; + p_para.eir = new_flow_node->e_rate; + p_para.ebs = ZXDH_DCBNL_FLOW_RATE_EBS; + + err = dpp_flow_shape_set(&pf_info, new_flow_node->flow_id, p_para.cir, + p_para.cbs, p_para.db_en, p_para.eir, + p_para.ebs); + if (err) { + LOG_ERR("dcbnl_init_ets: dpp_flow_shape_set failed, vport: %d, flow_id: %d, tc_id: %d, e_rate: %d, err:%d\n", + en_dev->vport, new_flow_node->flow_id, + new_flow_node->tc_id, new_flow_node->e_rate, err); + } + LOG_DEBUG( + "dcbnl_init_ets dpp_flow_shape_set end vport%d,phy_port:%d, flow_id:%d,tc_id:%d, cir:%d, cbs:%d, db_en:%d, eir:%d,ebs:%d,err:%d \n", + en_dev->vport, en_dev->phy_port, new_flow_node->flow_id, + new_flow_node->tc_id, p_para.cir, p_para.cbs, p_para.db_en, + p_para.eir, p_para.ebs, err); + + new_flow_node->flow_next = ets_node_list_head->flow_next; + ets_node_list_head->flow_next = new_flow_node; + ets_node_list_head->node_num += 1; + + en_dev->dcb_para.ets_cfg.tc_tsa[tc_id] = + zxdh_dcbnl_get_ieee_tsa(new_flow_node->tc_type); + en_dev->dcb_para.ets_cfg.tc_tx_bw[tc_id] = tc_flow_config->tc_tx_bw; + en_dev->dcb_para.tc_maxrate[tc_id] = new_flow_node->e_rate; + + LOG_DEBUG(" level:%d, tc_id:%d, flow_id:%d, node_num:%d \n", + tree_resource->level, new_flow_node->tc_id, + new_flow_node->flow_id, ets_node_list_head->node_num); + + return 0; +} + +uint32_t zxdh_dcbnl_scheduling_tree_link_tc(struct zxdh_en_priv *en_priv) +{ + struct zxdh_dcbnl_ets_se_flow_resource tree_resource; + uint32_t i = 0; + uint32_t err = 0; + + struct zxdh_dcbnl_tc_flow_config + ets_tc_config_table[ZXDH_DCBNL_MAX_TRAFFIC_CLASS + 1] = { + /*link_level tc_type tc_tx_bw c_rate e_rate td_th */ + { 1, ZXDH_DCBNL_STRICT_TC, 100, + ZXDH_DCBNL_FLOW_RATE_CIR, ZXDH_DCBNL_INITRATE_KBITPS, + ZXDH_DCBNL_FLOW_TDTH_UPF }, + { 1, ZXDH_DCBNL_STRICT_TC, 100, + ZXDH_DCBNL_FLOW_RATE_CIR, ZXDH_DCBNL_INITRATE_KBITPS, + ZXDH_DCBNL_FLOW_TDTH }, + { 1, ZXDH_DCBNL_STRICT_TC, 100, + ZXDH_DCBNL_FLOW_RATE_CIR, ZXDH_DCBNL_INITRATE_KBITPS, + ZXDH_DCBNL_FLOW_TDTH }, + { 1, ZXDH_DCBNL_STRICT_TC, 100, + ZXDH_DCBNL_FLOW_RATE_CIR, ZXDH_DCBNL_INITRATE_KBITPS, + ZXDH_DCBNL_FLOW_TDTH }, + { 1, ZXDH_DCBNL_STRICT_TC, 100, + ZXDH_DCBNL_FLOW_RATE_CIR, ZXDH_DCBNL_INITRATE_KBITPS, + ZXDH_DCBNL_FLOW_TDTH }, + { 1, ZXDH_DCBNL_STRICT_TC, 100, + ZXDH_DCBNL_FLOW_RATE_CIR, ZXDH_DCBNL_INITRATE_KBITPS, + ZXDH_DCBNL_FLOW_TDTH }, + { 1, ZXDH_DCBNL_STRICT_TC, 100, + ZXDH_DCBNL_FLOW_RATE_CIR, ZXDH_DCBNL_INITRATE_KBITPS, + ZXDH_DCBNL_FLOW_TDTH }, + { 1, ZXDH_DCBNL_STRICT_TC, 100, + ZXDH_DCBNL_FLOW_RATE_CIR, ZXDH_DCBNL_INITRATE_KBITPS, + ZXDH_DCBNL_FLOW_TDTH }, + { 0xff } + }; + + for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS && + ets_tc_config_table[i].link_level != 0xff; + i++) { + err = zxdh_dcbnl_save_flow_resources( + en_priv, &ets_tc_config_table[i], &tree_resource, i); + if (err) { + LOG_ERR("dcbnl_init_ets: save_flow_resources failed, entry: %d\n", + i); + return err; + } + } + + return 0; +} + +uint32_t zxdh_dcbnl_set_ets_trust(struct zxdh_en_priv *en_priv, uint32_t trust) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + uint32_t err = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + err = dpp_tm_pport_trust_mode_table_set(&pf_info, en_dev->phy_port, + trust); + if (err) { + LOG_ERR("dcbnl_set_ets: set_ets_trust failed, vport: %d, trust: %d, err:%d\n", + en_dev->vport, trust, err); + return err; + } + en_dev->dcb_para.trust = trust; + LOG_DEBUG(" trust:%d \n", trust); + return 0; +} + +uint32_t zxdh_dcbnl_init_trust_and_table(struct zxdh_en_priv *en_priv) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + uint32_t i = 0; + uint32_t err = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + for (i = 0; i < ZXDH_DCBNL_MAX_PRIORITY; i++) { + err = dpp_tm_pport_up_map_table_set(&pf_info, en_dev->phy_port, + i, + i); //初始时,配置一一对应 + if (err) { + LOG_ERR("dcbnl_init_ets: dpp_tm_pport_up_map_table_set failed, vport: %d, phy_port: %d, err:%d\n", + en_dev->vport, en_dev->phy_port, err); + return err; + } + en_dev->dcb_para.ets_cfg.prio_tc[i] = i; + } + LOG_DEBUG(" vport:%d,phy_port:%d prio2tc ok \n", en_dev->vport, + en_dev->phy_port); + + for (i = 0; i < ZXDH_DCBNL_MAX_DSCP; i++) { + err = dpp_tm_pport_dscp_map_table_set( + &pf_info, en_dev->phy_port, i, i >> 3); + if (err) { + LOG_ERR("dcbnl_init_ets: dscp_map_table_set failed, vport: %d, phy_port: %d, err:%d\n", + en_dev->vport, en_dev->phy_port, err); + return err; + } + + en_dev->dcb_para.dscp2prio[i] = i >> 3; + } + LOG_DEBUG("vport:%d,phy_port:%d,dscp2prio ok \n", en_dev->vport, + en_dev->phy_port); + + err = zxdh_dcbnl_set_ets_trust(en_priv, ZXDH_DCBNL_ETS_TRUST_PCP); + if (err) { + LOG_INFO("set_ets_trust failed \n"); + return err; + } + en_dev->dcb_para.trust = ZXDH_DCBNL_ETS_TRUST_PCP; + LOG_DEBUG(" vport:%d,phy_port:%d,trust:%d \n", en_dev->vport, + en_dev->phy_port, en_dev->dcb_para.trust); + return 0; +} + +uint32_t zxdh_dcbnl_init_ets_list(struct zxdh_en_priv *en_priv) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + uint32_t level = 0; + + for (level = 0; level < ZXDH_DCBNL_MAX_TREE_LEVEL; level++) { + en_dev->dcb_para.ets_node_list_head[level].se_next = NULL; + en_dev->dcb_para.ets_node_list_head[level].flow_next = NULL; + en_dev->dcb_para.ets_node_list_head[level].node_num = 0; + } + return 0; +} + +/* Normal release se*/ +uint32_t zxdh_dcbnl_free_se_resources(struct zxdh_en_priv *en_priv) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + struct zxdh_dcbnl_ets_se_node *se_node = NULL; + struct zxdh_dcbnl_ets_node_list_head *ets_node_list_head = NULL; + uint32_t err = 0; + uint32_t level = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + LOG_DEBUG(" vport:%d, phy_port:%d \n", en_dev->vport, en_dev->phy_port); + for (level = 1; level <= ZXDH_DCBNL_ETS_TREE_ROOT_LEVEL; level++) { + ets_node_list_head = + &en_dev->dcb_para.ets_node_list_head[level]; + while (NULL != ets_node_list_head->se_next) { + se_node = ets_node_list_head->se_next; + ets_node_list_head->se_next = se_node->se_next; + if (level < ZXDH_DCBNL_ETS_TREE_ROOT_LEVEL) { + if (!en_dev->quick_remove) { + err = dpp_crdt_del_se_link_set( + &pf_info, se_node->se_id, + se_node->se_id); + if (err) { + LOG_ERR("dcbnl_free_ets: dpp_crdt_del_se_link_set failed, se_id: %d, err:%d \n", + se_node->se_id, err); + } + LOG_DEBUG(" dpp_crdt_del_se_link_set"); + + err = dpp_cosq_gsch_id_delete( + &pf_info, en_dev->phy_port, + se_node->gsch_id); + if (err) { + LOG_ERR("dcbnl_free_ets: dpp_cosq_gsch_id_delete failed, se_id: %lld, err:%d \n", + se_node->gsch_id, err); + } + LOG_DEBUG( + "del se id dpp_cosq_gsch_id_delete"); + } + } + LOG_DEBUG(" free level:%d se_id:%x \n", level, + se_node->se_id); + kfree(se_node); + ets_node_list_head->node_num -= 1; + LOG_DEBUG("current node_num:%d \n", + ets_node_list_head->node_num); + } + } + + return 0; +} +/* Normal release flow*/ +uint32_t zxdh_dcbnl_free_flow_resources(struct zxdh_en_priv *en_priv) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + struct zxdh_dcbnl_ets_flow_node *flow_node = NULL; + struct zxdh_dcbnl_ets_node_list_head *ets_node_list_head = + &en_dev->dcb_para + .ets_node_list_head[ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL]; + uint32_t err = 0; + bool have_flow = false; + DPP_PF_INFO_T pf_info = { 0 }; + struct zxdh_dcbnl_tc_flow_shape_para p_para = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + LOG_DEBUG(" vport:%d, phy_port:%d \n", en_dev->vport, en_dev->phy_port); + while (NULL != ets_node_list_head->flow_next) { + have_flow = true; + flow_node = ets_node_list_head->flow_next; + ets_node_list_head->flow_next = flow_node->flow_next; + + p_para.cir = 0; + p_para.cbs = ZXDH_DCBNL_FLOW_RATE_CBS_REFRESH; + p_para.db_en = 1; + p_para.eir = 0; + p_para.ebs = ZXDH_DCBNL_FLOW_RATE_EBS_REFRESH; + if (!en_dev->quick_remove) { + err = dpp_flow_shape_set(&pf_info, flow_node->flow_id, + p_para.cir, p_para.cbs, + p_para.db_en, p_para.eir, + p_para.ebs); + if (err) { + LOG_ERR("dcbnl_set_ets: dpp_flow_shape_set failed, vport: %d, flow_id: %d, tc_id: %d, eir: %d, err:%d\n", + en_dev->vport, flow_node->flow_id, + flow_node->tc_id, p_para.eir, err); + return err; + } + LOG_INFO("clean maxrate"); + err = dpp_flow_td_th_set(&pf_info, flow_node->flow_id, + 0); + if (err) { + LOG_ERR("dcbnl_free_ets: dpp_flow_td_th_set failed,vport:%d flow_id: %d, td_th: 0, err:%d\n", + en_dev->vport, flow_node->flow_id, err); + } + LOG_INFO(" clean TD "); + + err = dpp_crdt_del_flow_link_set(&pf_info, + flow_node->flow_id, + flow_node->flow_id); + if (err) { + LOG_ERR("dcbnl_free_ets: dpp_crdt_del_flow_link_set failed, flow_id: %d, err:%d \n", + flow_node->flow_id, err); + } + LOG_INFO(" dpp_crdt_del_flow_link_set"); + + err = dpp_cosq_gsch_id_delete( + &pf_info, en_dev->phy_port, flow_node->gsch_id); + if (err) { + LOG_ERR("dcbnl_free_ets: dpp_cosq_gsch_id_delete failed, gsch_id: %lld ,err:%d\n", + flow_node->gsch_id, err); + } + LOG_INFO("del id dpp_cosq_gsch_id_delete"); + + LOG_INFO(" free level:0, flow_id:%d, tc:%d\n", + flow_node->flow_id, flow_node->tc_id); + } + + kfree(flow_node); + ets_node_list_head->node_num -= 1; + LOG_INFO("current node_num:%d \n", + ets_node_list_head->node_num); + } + + if (have_flow) { + if (!en_dev->quick_remove) { + err = dpp_tm_flowid_pport_table_del(&pf_info, + en_dev->phy_port); + if (err) { + LOG_ERR("dcbnl_free_ets: dpp_tm_flowid_pport_table_del failed,vport:%d, phy_port: %d \n", + en_dev->vport, en_dev->phy_port); + } + LOG_INFO("del table dpp_tm_flowid_pport_table_del"); + } + } + + return 0; +} + +/* host no reset,risc reset? */ +uint32_t zxdh_dcbnl_check_and_free_node_memory(struct zxdh_en_priv *en_priv) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + struct zxdh_dcbnl_ets_flow_node *flow_node = NULL; + struct zxdh_dcbnl_ets_se_node *se_node = NULL; + struct zxdh_dcbnl_ets_node_list_head *ets_node_list_head = NULL; + uint32_t level = 0; + + ets_node_list_head = + &en_dev->dcb_para + .ets_node_list_head[ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL]; + while (NULL != ets_node_list_head->flow_next) { + flow_node = ets_node_list_head->flow_next; + ets_node_list_head->flow_next = flow_node->flow_next; + kfree(flow_node); + ets_node_list_head->node_num -= 1; + } + + for (level = 1; level < ZXDH_DCBNL_ETS_TREE_ROOT_LEVEL + 1; level++) { + ets_node_list_head = + &en_dev->dcb_para.ets_node_list_head[level]; + while (NULL != ets_node_list_head->se_next) { + se_node = ets_node_list_head->se_next; + ets_node_list_head->se_next = se_node->se_next; + kfree(se_node); + ets_node_list_head->node_num -= 1; + } + } + + return 0; +} + +uint32_t zxdh_dcbnl_set_tc_scheduling(struct zxdh_en_priv *en_priv, + uint8_t *tc_type, uint8_t *tc_tx_bw) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + struct zxdh_dcbnl_ets_flow_node *flow_node = + en_dev->dcb_para + .ets_node_list_head[ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL] + .flow_next; + uint32_t tc_id = 0; + uint32_t c_linkid = 0; + uint32_t e_linkid = 0; + uint32_t c_weight = 0; + uint32_t e_weight = 0; + uint32_t c_sp = 0; + uint32_t e_sp = 0; + uint32_t i = 0; + uint32_t err = 0; + DPP_PF_INFO_T pf_info = { 0 }; + struct zxdh_dcbnl_tc_flow_shape_para p_para = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + if (NULL == flow_node) { + LOG_ERR("dcbnl_set_ets: set_tc_scheduling no flow in the tree\n"); + return 1; + } + + for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS && flow_node != NULL; + i++) { + tc_id = flow_node->tc_id; + if ((flow_node->tc_type == tc_type[tc_id]) && + (flow_node->tc_tx_bw == tc_tx_bw[tc_id])) { + LOG_DEBUG( + "Same configuration,tc_id:%d, tc_type:%d, tc_tx_bw:%d\n", + tc_id, tc_type[tc_id], tc_tx_bw[tc_id]); + flow_node = flow_node->flow_next; + continue; + } + + zxdh_dcbnl_get_tc_weight_sp(tc_type[tc_id], tc_tx_bw[tc_id], + tc_id, &c_weight, &e_weight, &c_sp, + &e_sp); + + err = zxdh_dcbnl_find_flow_link_se_id(en_priv, tc_type[tc_id], + 1, &c_linkid, &e_linkid, + c_sp, e_sp); + if (err) { + LOG_ERR("dcbnl_set_ets: find_flow_link_se_id failed, tc_id: %d, tc_type: %d\n", + tc_id, tc_type[tc_id]); + return err; + } + + /* 1、清限速,刷新桶深,断流*/ + p_para.cir = 0; + p_para.cbs = ZXDH_DCBNL_FLOW_RATE_CBS_REFRESH; + p_para.db_en = 1; + p_para.eir = 0; + p_para.ebs = ZXDH_DCBNL_FLOW_RATE_EBS_REFRESH; + + LOG_DEBUG( + "clean maxrate vport%d,phy_port:%d, flow_id:%d,tc_id:%d, cir:%d, cbs:%d, db_en:%d, eir:%d,ebs:%d \n", + en_dev->vport, en_dev->phy_port, flow_node->flow_id, + tc_id, p_para.cir, p_para.cbs, p_para.db_en, p_para.eir, + p_para.ebs); + + err = dpp_flow_shape_set(&pf_info, flow_node->flow_id, + p_para.cir, p_para.cbs, p_para.db_en, + p_para.eir, p_para.ebs); + if (err) { + LOG_ERR("dcbnl_set_ets: dpp_flow_shape_set failed, vport: %d, flow_id: %d, tc_id: %d, eir: %d, err:%d\n", + en_dev->vport, flow_node->flow_id, tc_id, + p_para.eir, err); + return err; + } + + /* 2、清TD,断流*/ + err = dpp_flow_td_th_set(&pf_info, flow_node->flow_id, 0); + if (err) { + LOG_ERR("dcbnl_set_ets: dpp_flow_td_th_set failed,vport:%d flow_id: %d, td_th: 0, err:%d\n", + en_dev->vport, flow_node->flow_id, err); + } + + /* 3、删除挂接*/ + err = dpp_crdt_del_flow_link_set(&pf_info, flow_node->flow_id, + flow_node->flow_id); + if (err) { + LOG_ERR("dcbnl_set_ets: dpp_crdt_del_flow_link_set failed, vport: %d, flow_id: %d, err:%d\n", + en_dev->vport, flow_node->flow_id, err); + return err; + } + + /* 4、重新挂接*/ + err = dpp_crdt_flow_link_set(&pf_info, flow_node->flow_id, + c_linkid, c_weight, c_sp, 1, + e_linkid, e_weight, e_sp); + if (err) { + LOG_ERR("dcbnl_set_ets: dpp_crdt_flow_link_set failed, flow_id: %d, flow_id: %d, flow_id: %d, err:%d\n", + flow_node->flow_id, c_linkid, e_linkid, err); + return err; + } + + /* 5、恢复TD*/ + err = dpp_flow_td_th_set(&pf_info, flow_node->flow_id, + flow_node->td_th); + if (err) { + LOG_ERR("dcbnl_set_ets: dpp_flow_td_th_set failed,vport:%d flow_id: %d, td_th:%d, err:%d\n", + en_dev->vport, flow_node->flow_id, + flow_node->td_th, err); + } + + /* 6、恢复限速*/ + p_para.cir = ZXDH_DCBNL_FLOW_RATE_CIR; + p_para.cbs = ZXDH_DCBNL_FLOW_RATE_CBS; + p_para.db_en = 1; + p_para.eir = flow_node->e_rate; + p_para.ebs = ZXDH_DCBNL_FLOW_RATE_EBS; + + LOG_DEBUG( + "dpp_flow_shape_set begin vport%d,phy_port:%d, flow_id:%d,tc_id:%d, cir:%d, cbs:%d, db_en:%d, eir:%d,ebs:%d \n", + en_dev->vport, en_dev->phy_port, flow_node->flow_id, + tc_id, p_para.cir, p_para.cbs, p_para.db_en, p_para.eir, + p_para.ebs); + + err = dpp_flow_shape_set(&pf_info, flow_node->flow_id, + p_para.cir, p_para.cbs, p_para.db_en, + p_para.eir, p_para.ebs); + if (err) { + LOG_ERR("dcbnl_set_ets: dpp_flow_shape_set failed, vport: %d, flow_id: %d, tc_id: %d, eir: %d, err:%d\n", + en_dev->vport, flow_node->flow_id, tc_id, + p_para.eir, err); + return err; + } + LOG_INFO(" dpp_flow_shape_set end"); + flow_node->tc_type = tc_type[tc_id]; + flow_node->tc_tx_bw = tc_tx_bw[tc_id]; + + flow_node->c_linkid = c_linkid; + flow_node->c_weight = c_weight; + flow_node->c_sp = c_sp; + + flow_node->e_linkid = e_linkid; + flow_node->e_weight = e_weight; + flow_node->e_sp = e_sp; + + LOG_DEBUG( + " tc_id:%d, tc_type:%d, c_linkid:%x, e_weight:%d, e_sp:%d ,e_linkid:%x, e_weight:%d, e_sp:%d \n", + tc_id, flow_node->tc_type, flow_node->c_linkid, + flow_node->c_weight, flow_node->c_sp, + flow_node->e_linkid, flow_node->e_weight, + flow_node->e_sp); + + flow_node = flow_node->flow_next; + } + + return 0; +} + +uint32_t zxdh_dcbnl_set_ets_up_tc_map(struct zxdh_en_priv *en_priv, + uint8_t *prio_tc) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + uint32_t i = 0; + uint32_t err = 0; + DPP_PF_INFO_T pf_info = { 0 }; + uint32_t tc_td_th[ZXDH_DCBNL_MAX_TRAFFIC_CLASS] = { 0 }; + + LOG_DEBUG(" begin \n"); + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + for (i = 0; i < ZXDH_DCBNL_MAX_PRIORITY; i++) { + err = dpp_tm_pport_up_map_table_set(&pf_info, en_dev->phy_port, + i, prio_tc[i]); + if (err) { + LOG_ERR("dcbnl_set_ets: failed, vport: %d, prio: %d, tc: %d, err:%d\n", + en_dev->vport, i, prio_tc[i], err); + return err; + } + LOG_DEBUG(" vport:%d, phy_port:%d, prio:%d, tc:%d \n", + en_dev->vport, en_dev->phy_port, i, prio_tc[i]); + } + + for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS; i++) { + tc_td_th[i] = ZXDH_DCBNL_FLOW_TDTH; //初值 + } + + if (prio_tc[0] < ZXDH_DCBNL_MAX_TRAFFIC_CLASS) { + tc_td_th[prio_tc[0]] = + ZXDH_DCBNL_FLOW_TDTH_UPF; //prio0对应的TC设置成UPF的TD值 + } + + err = zxdh_dcbnl_set_flow_td_th(en_priv, tc_td_th); + if (err) { + return err; + } + + return 0; +} + +uint32_t zxdh_dcbnl_set_tc_maxrate(struct zxdh_en_priv *en_priv, + uint32_t *maxrate) +{ + struct zxdh_en_device *en_dev; + struct zxdh_dcbnl_ets_flow_node *flow_node; + struct zxdh_dcbnl_tc_flow_shape_para p_para = { 0 }; + uint32_t tc_id = 0; + uint32_t i = 0; + uint32_t err = 0; + DPP_PF_INFO_T pf_info = { 0 }; + uint32_t temp = 0; + struct dh_core_dev *dh_dev; + struct zxdh_pf_device *pf_dev; + + ZXDH_DCBNL_CHECK_POINT_RET(en_priv, ZXDH_DCBNL_INVALID_PARA); + ZXDH_DCBNL_CHECK_POINT_RET(maxrate, ZXDH_DCBNL_INVALID_PARA); + + en_dev = &en_priv->edev; + ZXDH_DCBNL_CHECK_POINT_RET(en_dev, ZXDH_DCBNL_INVALID_PARA); + + flow_node = en_dev->dcb_para + .ets_node_list_head[ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL] + .flow_next; + dh_dev = en_dev->parent; + ZXDH_DCBNL_CHECK_POINT_RET(dh_dev, ZXDH_DCBNL_INVALID_PARA); + + pf_dev = dh_core_priv(dh_dev->parent); + ZXDH_DCBNL_CHECK_POINT_RET(pf_dev, ZXDH_DCBNL_INVALID_PARA); + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + if (NULL == flow_node) { + LOG_ERR("dcbnl_set_ets: set_tc_maxrate no flow in the tree\n"); + return 1; + } + + for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS && flow_node != NULL; + i++) { + tc_id = flow_node->tc_id; + ZXDH_DCBNL_CHECK_MAX_WITH_RETURN(tc_id, + ZXDH_DCBNL_MAX_TRAFFIC_CLASS, + ZXDH_DCBNL_INVALID_PARA); + if (flow_node->e_rate == maxrate[tc_id]) { + LOG_DEBUG("Same configuration, tc_id:%d, maxrate:%d\n", + tc_id, maxrate[tc_id]); + flow_node = flow_node->flow_next; + continue; + } + /* clean CBS、EBS*/ + p_para.cir = 0; + p_para.cbs = ZXDH_DCBNL_FLOW_RATE_CBS_REFRESH; + p_para.db_en = 1; + p_para.eir = 0; + p_para.ebs = ZXDH_DCBNL_FLOW_RATE_EBS_REFRESH; + LOG_DEBUG(" refresh maxrate "); + err = dpp_flow_shape_set(&pf_info, flow_node->flow_id, + p_para.cir, p_para.cbs, p_para.db_en, + p_para.eir, p_para.ebs); + if (err) { + LOG_ERR("dcbnl_set_ets: dpp_flow_shape_set failed, vport: %d, flow_id: %d, tc_id: %d, eir: %d, err:%d\n", + en_dev->vport, flow_node->flow_id, tc_id, + p_para.eir, err); + return err; + } + /* 2、set maxrate*/ + p_para.cir = ZXDH_DCBNL_FLOW_RATE_CIR; + p_para.cbs = ZXDH_DCBNL_FLOW_RATE_CBS; + p_para.db_en = 1; + p_para.eir = maxrate[tc_id]; + LOG_DEBUG(" new pf_dev-boardtype %d \n", pf_dev->board_type); + + temp = maxrate[tc_id] / 20; + if ((maxrate[tc_id] != ZXDH_DCBNL_MAXRATE_KBITPS) && + maxrate[tc_id] != 0) { + p_para.eir = maxrate[tc_id] + temp; + } + + p_para.ebs = ZXDH_DCBNL_FLOW_RATE_EBS; + + LOG_DEBUG( + " vport%d,phy_port:%d, flow_id:%d,tc_id:%d, cir:%d, cbs:%d, db_en:%d, eir:%d,ebs:%d \n", + en_dev->vport, en_dev->phy_port, flow_node->flow_id, + tc_id, p_para.cir, p_para.cbs, p_para.db_en, p_para.eir, + p_para.ebs); + + err = dpp_flow_shape_set(&pf_info, flow_node->flow_id, + p_para.cir, p_para.cbs, p_para.db_en, + p_para.eir, p_para.ebs); + if (err) { + LOG_ERR("dcbnl_set_ets: dpp_flow_shape_set failed, vport: %d, flow_id: %d, tc_id: %d, eir: %d, err:%d\n", + en_dev->vport, flow_node->flow_id, tc_id, + p_para.eir, err); + return err; + } + + flow_node->e_rate = maxrate[tc_id]; + en_dev->dcb_para.tc_maxrate[tc_id] = maxrate[tc_id]; + + flow_node = flow_node->flow_next; + } + + return 0; +} + +uint32_t zxdh_dcbnl_set_dscp2prio(struct zxdh_en_priv *en_priv, uint16_t dscp, + uint8_t prio) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + uint32_t err = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + err = dpp_tm_pport_dscp_map_table_set(&pf_info, en_dev->phy_port, dscp, + prio); + if (err) { + LOG_ERR("dcbnl_set_ets: set_dscp2prio failed, vport: %d, dscp: %d, prio: %d, err:%d\n", + en_dev->vport, dscp, prio, err); + return err; + } + en_dev->dcb_para.dscp2prio[dscp] = prio; + LOG_DEBUG(" vport:%d, ephy_port:%d,dscp:%d, up:%d \n", en_dev->vport, + en_dev->phy_port, dscp, prio); + + return 0; +} +uint32_t zxdh_dcbnl_set_single_td_th(struct zxdh_en_priv *en_priv, uint32_t tc, + uint32_t tc_td_th) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + struct zxdh_dcbnl_ets_flow_node *flow_node = + en_dev->dcb_para + .ets_node_list_head[ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL] + .flow_next; + uint32_t err = 0; + uint32_t i = 0; + uint32_t tc_id = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + if (flow_node == NULL) { + LOG_ERR("dcbnl_set_ets: set_flow_td_th no flow in the tree\n"); + return 1; + } + + for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS && flow_node != NULL; + i++) { + tc_id = flow_node->tc_id; + if (tc_id == tc) { + err = dpp_flow_td_th_set(&pf_info, flow_node->flow_id, + tc_td_th); + if (err) { + LOG_ERR("dcbnl_set_ets: set_flow_td_th failed, vport: %d, flow_id:%d, tc_id:%d, td_th: %d, err:%d\n", + en_dev->vport, flow_node->flow_id, + tc_id, tc_td_th, err); + return err; + } + return 0; + } + flow_node = flow_node->flow_next; + } + + return 0; +} +uint32_t zxdh_dcbnl_set_flow_td_th(struct zxdh_en_priv *en_priv, + uint32_t *tc_td_th) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + struct zxdh_dcbnl_ets_flow_node *flow_node = + en_dev->dcb_para + .ets_node_list_head[ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL] + .flow_next; + uint32_t err = 0; + uint32_t i = 0; + uint32_t tc_id = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + if (flow_node == NULL) { + LOG_ERR("dcbnl_set_ets: set_flow_td_th no flow in the tree\n"); + return 1; + } + + if (tc_td_th == NULL) { + LOG_ERR("dcbnl_set_ets: tc_td_th is null \n"); + return 1; + } + + for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS && flow_node != NULL; + i++) { + tc_id = flow_node->tc_id; + if (flow_node->td_th != tc_td_th[tc_id]) { + err = dpp_flow_td_th_set(&pf_info, flow_node->flow_id, + tc_td_th[tc_id]); + if (err) { + LOG_ERR("dcbnl_set_ets: set_flow_td_th failed, vport: %d, flow_id:%d, tc_id:%d, td_th: %d, err:%d\n", + en_dev->vport, flow_node->flow_id, + tc_id, tc_td_th[tc_id], err); + return err; + } + flow_node->td_th = tc_td_th[tc_id]; + } + flow_node = flow_node->flow_next; + } + + return 0; +} + +uint32_t zxdh_dcbnl_clear_flow_td_th(struct zxdh_en_priv *en_priv) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + struct zxdh_dcbnl_ets_flow_node *flow_node = + en_dev->dcb_para + .ets_node_list_head[ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL] + .flow_next; + uint32_t err = 0; + uint32_t i = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + if (flow_node == NULL) { + LOG_ERR("dcbnl_set_ets: clear_flow_td_th no flow in the tree\n"); + return 1; + } + + for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS && flow_node != NULL; + i++) { + if (flow_node->td_th != 0) { + err = dpp_flow_td_th_set(&pf_info, flow_node->flow_id, + 0); + if (err) { + LOG_ERR("dcbnl_set_ets: clear_flow_td_th failed, vport: %d, flow_id:%d, err:%d\n", + en_dev->vport, flow_node->flow_id, err); + return err; + } + flow_node->td_th = 0; + } + flow_node = flow_node->flow_next; + } + + LOG_INFO("clear td_th success!\n"); + + return 0; +} + +uint32_t zxdh_dcbnl_get_flow_td_th(struct zxdh_en_priv *en_priv, + uint32_t *tc_td_th) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + struct zxdh_dcbnl_ets_flow_node *flow_node = + en_dev->dcb_para + .ets_node_list_head[ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL] + .flow_next; + uint32_t err = 0; + uint32_t i = 0; + uint32_t tc_id = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + if (flow_node == NULL) { + LOG_ERR("get_flow_td_th no flow in the tree\n"); + return 1; + } + + if (tc_td_th == NULL) { + LOG_ERR(" tc_td_th is null \n"); + return 1; + } + + for (i = 0; i < ZXDH_DCBNL_MAX_TRAFFIC_CLASS && flow_node != NULL; + i++) { + tc_id = flow_node->tc_id; + err = dpp_flow_td_th_get(&pf_info, flow_node->flow_id, + &tc_td_th[tc_id]); + if (err) { + LOG_ERR("get_flow_td_th failed, vport: %d, flow_id:%d, tc_id:%d, err:%d\n", + en_dev->vport, flow_node->flow_id, tc_id, err); + return err; + } + flow_node = flow_node->flow_next; + } + + return 0; +} + +uint32_t zxdh_dcbnl_init_port_speed(struct zxdh_en_priv *en_priv) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + struct zxdh_dcbnl_tc_flow_shape_para p_para = { 0 }; + uint32_t err = 0; + uint32_t speed = 0; + uint32_t max_speed = ZXDH_DCBNL_MAXRATE_KBITPS / ZXDH_DCBNL_RATEUNIT_K; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + speed = en_dev->speed; + if ((0 == speed) || (speed > max_speed)) { + LOG_INFO("get port speed is : %u ,set to max:%u\n", speed, + max_speed); + speed = max_speed; + } + + p_para.cir = speed * ZXDH_DCBNL_RATEUNIT_K; //Mbps->Kbps + p_para.cbs = ZXDH_DCBNL_PORT_RATE_CBS; + p_para.db_en = 0; + p_para.eir = 0; + p_para.ebs = 0; + + LOG_DEBUG(" vport:%d,phy_port:%d, p_para.cir:%d, speed:%d \n", + en_dev->vport, en_dev->phy_port, p_para.cir, speed); + + err = dpp_port_shape_set(&pf_info, en_dev->phy_port, p_para.cir, + p_para.cbs, 1); + if (err) { + LOG_ERR("dcbnl_set_ets: dpp_port_shape_set failed, port:%d, speed:%d, speed:%d,err:%d \n", + en_dev->phy_port, speed, p_para.cir, err); + return err; + } + + return 0; +} + +uint32_t zxdh_dcbnl_printk_ets_tree(struct zxdh_en_priv *en_priv) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + struct zxdh_dcbnl_ets_flow_node *flow_node = NULL; + struct zxdh_dcbnl_ets_se_node *se_node = NULL; + struct zxdh_dcbnl_ets_node_list_head *ets_node_list_head = NULL; + uint32_t level = 0; + + LOG_DEBUG(" ***vport:%d port:%d \n", en_dev->vport, en_dev->phy_port); + + for (level = ZXDH_DCBNL_ETS_TREE_ROOT_LEVEL; level > 0; level--) { + ets_node_list_head = + &en_dev->dcb_para.ets_node_list_head[level]; + se_node = ets_node_list_head->se_next; + while (NULL != se_node) { + LOG_DEBUG( + " se_node *** level:%d, node_idx:%d, se_id:0x%x *** \n", + level, se_node->node_idx, se_node->se_id); + LOG_DEBUG( + " se_node gsch_id:0x%llx, node_type:%d, se_id:0x%x \n", + se_node->gsch_id, se_node->node_type, + se_node->se_id); + LOG_DEBUG( + " se_node se_link_id:0x%x, se_link_weight:%d, se_link_sp:%d, link_point:%d \n", + se_node->se_link_id, se_node->se_link_weight, + se_node->se_link_sp, se_node->link_point); + se_node = se_node->se_next; + } + } + + ets_node_list_head = + &en_dev->dcb_para + .ets_node_list_head[ZXDH_DCBNL_ETS_TREE_FLOW_LEVEL]; + flow_node = ets_node_list_head->flow_next; + while (NULL != flow_node) { + LOG_DEBUG(" flow_node *** tc_id:%d, flow_id:%d *** \n", + flow_node->tc_id, flow_node->flow_id); + LOG_DEBUG(" flow_node gsch_id:0x%llx, tc_type:%d, td_th:%d \n", + flow_node->gsch_id, flow_node->tc_type, + flow_node->td_th); + LOG_DEBUG( + " flow_node c_linkid:0x%x, c_weight:%d, c_sp:%d, c_rate:%d \n", + flow_node->c_linkid, flow_node->c_weight, + flow_node->c_sp, flow_node->c_rate); + LOG_DEBUG( + " flow_node e_linkid:0x%x, e_weight:%d, e_sp:%d, e_rate:%d \n", + flow_node->e_linkid, flow_node->e_weight, + flow_node->e_sp, flow_node->e_rate); + flow_node = flow_node->flow_next; + } + + return 0; +} + +uint32_t zxdh_dcbnl_init_ets_scheduling_tree(struct zxdh_en_priv *en_priv) +{ + uint32_t err = 0; + + zxdh_dcbnl_init_ets_list(en_priv); + + err = zxdh_dcbnl_build_ets_scheduling_tree(en_priv); + if (err) { + LOG_ERR("dcbnl_init_ets: build_tc_scheduling_tree failed \n"); + goto init_ets_se_error; + } + + err = zxdh_dcbnl_scheduling_tree_link_tc(en_priv); + if (err) { + LOG_ERR("dcbnl_init_ets: scheduling_tree_link_tc failed \n"); + goto init_ets_error; + } + + err = zxdh_dcbnl_init_trust_and_table(en_priv); + if (err) { + LOG_ERR("dcbnl_init_ets: init_trust_and_table failed \n"); + goto init_ets_error; + } + + return 0; + +init_ets_error: + zxdh_dcbnl_free_flow_resources(en_priv); +init_ets_se_error: + zxdh_dcbnl_free_se_resources(en_priv); + LOG_INFO("dcbnl_init_ets failed \n"); + return err; +} + +uint32_t zxdh_dcbnl_set_tm_gate(struct zxdh_en_priv *en_priv, uint32_t mode) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + uint32_t err = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + if (!en_dev->quick_remove) { + if (mode == 1) { + err = dpp_tm_pport_mcode_switch_set( + &pf_info, en_dev->phy_port, 1); + if (err) { + LOG_ERR(" set_tm_gate open failed \n"); + } + } else if (mode == 0) { + err = dpp_tm_pport_mcode_switch_del(&pf_info, + en_dev->phy_port); + if (err) { + LOG_ERR(" set_tm_gate close failed \n"); + } + } else { + LOG_ERR(" error \n"); + } + } + + return err; +} + +uint32_t zxdh_dcbnl_enable_debug(struct zxdh_en_priv *en_priv) +{ + uint32_t err = 0; + DPP_PF_INFO_T pf_info = { 0 }; + struct zxdh_en_device *en_dev = &en_priv->edev; + ZXIC_UINT32 dbg_status = 0; + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + err = dpp_ppu_set_debug_mode(&pf_info, &dbg_status); + if (err) { + LOG_ERR(" set debug mode enable failed \n"); + } + if (dbg_status == 0) { + LOG_ERR("many packet into PPU!!!\n"); + return 0; + } + return err; +} +uint32_t zxdh_dcbnl_disable_debug(struct zxdh_en_priv *en_priv) +{ + uint32_t err = 0; + DPP_PF_INFO_T pf_info = { 0 }; + struct zxdh_en_device *en_dev = &en_priv->edev; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + err = dpp_ppu_close_debug_mode(&pf_info); + if (err) { + LOG_ERR(" set debug mode diable failed \n"); + } + return err; +} diff --git a/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl_api.h b/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl_api.h new file mode 100644 index 000000000000..4d7e221df7bb --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl_api.h @@ -0,0 +1,45 @@ +#ifndef __ZXDH_EN_DCBNL_API_H__ +#define __ZXDH_EN_DCBNL_API_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +struct zxdh_en_priv; + +uint32_t zxdh_dcbnl_init_port_speed(struct zxdh_en_priv *en_priv); +uint32_t zxdh_dcbnl_init_ets_scheduling_tree(struct zxdh_en_priv *en_priv); +uint32_t zxdh_dcbnl_printk_ets_tree(struct zxdh_en_priv *en_priv); +uint32_t zxdh_dcbnl_pfc_init(struct zxdh_en_priv *en_priv); + +uint32_t zxdh_dcbnl_free_flow_resources(struct zxdh_en_priv *en_priv); +uint32_t zxdh_dcbnl_free_se_resources(struct zxdh_en_priv *en_priv); + +uint32_t zxdh_dcbnl_set_tc_scheduling(struct zxdh_en_priv *en_priv, + uint8_t *tc_type, uint8_t *tc_tx_bw); +uint32_t zxdh_dcbnl_set_ets_up_tc_map(struct zxdh_en_priv *en_priv, + uint8_t *prio_tc); +uint32_t zxdh_dcbnl_set_tc_maxrate(struct zxdh_en_priv *en_priv, + uint32_t *maxrate); +uint32_t zxdh_dcbnl_set_ets_trust(struct zxdh_en_priv *en_priv, uint32_t trust); +uint32_t zxdh_dcbnl_set_dscp2prio(struct zxdh_en_priv *en_priv, uint16_t dscp, + uint8_t prio); + +uint32_t zxdh_dcbnl_set_tm_gate(struct zxdh_en_priv *en_priv, uint32_t mode); +uint32_t zxdh_dcbnl_set_flow_td_th(struct zxdh_en_priv *en_priv, + uint32_t *tc_td_th); +uint32_t zxdh_dcbnl_set_single_td_th(struct zxdh_en_priv *en_priv, uint32_t tc, + uint32_t tc_td_th); +uint32_t zxdh_dcbnl_get_flow_td_th(struct zxdh_en_priv *en_priv, + uint32_t *tc_td_th); +uint32_t zxdh_dcbnl_clear_flow_td_th(struct zxdh_en_priv *en_priv); +uint32_t zxdh_dcbnl_enable_debug(struct zxdh_en_priv *en_priv); +uint32_t zxdh_dcbnl_disable_debug(struct zxdh_en_priv *en_priv); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_aux/drs_sec_dtb.c b/drivers/net/ethernet/dinghai/en_aux/drs_sec_dtb.c new file mode 100644 index 000000000000..ac9f7e16c1d9 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_aux/drs_sec_dtb.c @@ -0,0 +1,1092 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include "driver.h" +#include "../en_aux.h" +#include "drs_sec_dtb.h" + +UINT32 g_udDownloadSaNum = 1; //sa表的数量 +UINT32 gudTunnelID; +UINT32 gudDtbSaNum = 1; +E_INLINE_TYPE e_gInlineType; //0是inline入境 1是inline出境 +UINT64 guddAntiWindow = 2047; //得配成2047,否则覆盖不到sn为0的情况 + +UINT64 guddSecTestSaDtbPdVirAddr; + +UINT32 gudSecTestSwanSrcIp = 0x0A04B007; +UINT32 gudSecTestSwanDstIp = 0x0AE3656D; + +UINT8 gudIpType = 1; +// 出入境分开下表需要将此字段置0,会影响入境下表 +UINT16 gusOutSaOffset; +UINT32 gudOutSaId; + +UINT64 HalBttlSecRegBaseGet(struct zxdh_en_device *en_dev) +{ + PUB_CHECK_NULL_PTR_RET_ERR(en_dev); + return en_dev->ops->get_bar_virt_addr(en_dev->parent, 0) + + 0x7000; //0x7000是目前sec模块寄存器基地址的固定偏移,包括PF/VF +} + +#if 1 +static int zxdh_ipsec_cipher_id_get(u8 ealgo, char *p_alg_name, + char *p_aead_name, + E_HAL_SEC_IPSEC_CIPHER_ALG *p_zxdh_ealgo_id) +{ + int i = 0; + T_ZXDH_EALGO atZxdhEalgo[] = { + { "rfc7539esp(chacha20,poly1305)", "", + e_HAL_IPSEC_CIPHER_CHACHA }, + }; + + if ((NULL == p_alg_name) || (NULL == p_aead_name)) { + return -1; + } + for (i = 0; i < sizeof(atZxdhEalgo) / sizeof(T_ZXDH_EALGO); i++) { + if ((0 == strcmp(p_alg_name, atZxdhEalgo[i].alg_name)) || + (0 == strcmp(p_alg_name, atZxdhEalgo[i].compat_name))) { + *p_zxdh_ealgo_id = atZxdhEalgo[i].e_zxdh_ealgo_id; + return 0; + } + if ((0 == strcmp(p_aead_name, atZxdhEalgo[i].alg_name)) || + (0 == strcmp(p_aead_name, atZxdhEalgo[i].compat_name))) { + *p_zxdh_ealgo_id = atZxdhEalgo[i].e_zxdh_ealgo_id; + return 0; + } + } + + switch (ealgo) { + case SADB_EALG_NULL: + case SADB_EALG_NONE: + *p_zxdh_ealgo_id = e_HAL_IPSEC_CIPHER_NULL; + break; + case SADB_EALG_DESCBC: + *p_zxdh_ealgo_id = e_HAL_IPSEC_CIPHER_DES_CBC; + break; + case SADB_EALG_3DESCBC: + *p_zxdh_ealgo_id = e_HAL_IPSEC_CIPHER_3DES_CBC; + break; + case SADB_X_EALG_AESCBC: + *p_zxdh_ealgo_id = e_HAL_IPSEC_CIPHER_AES_CBC; + break; + case SADB_X_EALG_AESCTR: + *p_zxdh_ealgo_id = e_HAL_IPSEC_CIPHER_AES_CTR; + break; + case SADB_X_EALG_AES_CCM_ICV8: + *p_zxdh_ealgo_id = e_HAL_IPSEC_CIPHER_AES_CCM; + break; + case SADB_X_EALG_AES_CCM_ICV12: + *p_zxdh_ealgo_id = e_HAL_IPSEC_CIPHER_AES_CCM; + break; + case SADB_X_EALG_AES_CCM_ICV16: + *p_zxdh_ealgo_id = e_HAL_IPSEC_CIPHER_AES_CCM; + break; + case SADB_X_EALG_AES_GCM_ICV8: + *p_zxdh_ealgo_id = e_HAL_IPSEC_CIPHER_AES_GCM; + break; + case SADB_X_EALG_AES_GCM_ICV12: + *p_zxdh_ealgo_id = e_HAL_IPSEC_CIPHER_AES_GCM; + break; + case SADB_X_EALG_AES_GCM_ICV16: + *p_zxdh_ealgo_id = e_HAL_IPSEC_CIPHER_AES_GCM; + break; + case SADB_X_EALG_NULL_AES_GMAC: + *p_zxdh_ealgo_id = e_HAL_IPSEC_CIPHER_AES_GMAC; + break; +#if 0 + //5.13不支持 + case SADB_X_EALG_SM4CBC: + *p_zxdh_ealgo_id = e_HAL_IPSEC_CIPHER_SM4_CBC; + break; +#endif + default: + return -1; + } + + return 0; +} + +static int zxdh_ipsec_auth_id_get(u8 aalgo, char *p_alg_name, + E_HAL_SEC_IPSEC_AUTH_ALG *p_zxdh_auth_id) +{ + int i = 0; + T_ZXDH_ALGO atZxdhAlgo[] = { + { "cmac(aes)", "", e_HAL_IPSEC_AUTH_AES_CMAC32 }, + }; + + if (NULL == p_alg_name) { + return -1; + } + for (i = 0; i < sizeof(atZxdhAlgo) / sizeof(T_ZXDH_ALGO); i++) { + DH_LOG_INFO(MODULE_SEC, "%s\n", p_alg_name); + DH_LOG_INFO(MODULE_SEC, "%s\n", atZxdhAlgo[i].alg_name); + if ((0 == strcmp(p_alg_name, atZxdhAlgo[i].alg_name)) || + (0 == strcmp(p_alg_name, atZxdhAlgo[i].compat_name))) { + *p_zxdh_auth_id = atZxdhAlgo[i].e_zxdh_auth_id; + return 0; + } + } + + switch (aalgo) { + case SADB_X_AALG_NULL: + case SADB_AALG_NONE: + *p_zxdh_auth_id = e_HAL_IPSEC_AUTH_NULL; + break; + case SADB_AALG_MD5HMAC: + *p_zxdh_auth_id = e_HAL_IPSEC_AUTH_AES_MD5; + break; + case SADB_AALG_SHA1HMAC: + *p_zxdh_auth_id = e_HAL_IPSEC_AUTH_AES_SHA1; + break; + case SADB_X_AALG_SHA2_256HMAC: + *p_zxdh_auth_id = e_HAL_IPSEC_AUTH_AES_SHA256; + break; + case SADB_X_AALG_SHA2_384HMAC: + *p_zxdh_auth_id = e_HAL_IPSEC_AUTH_AES_SHA384; + break; + case SADB_X_AALG_SHA2_512HMAC: + *p_zxdh_auth_id = e_HAL_IPSEC_AUTH_AES_SHA512; + break; + case SADB_X_AALG_AES_XCBC_MAC: + *p_zxdh_auth_id = e_HAL_IPSEC_AUTH_AES_XCBCMAC; + break; +#if 0 + //5.13不支持 + case SADB_X_AALG_SM3_256HMAC: + *p_zxdh_auth_id = e_HAL_IPSEC_AUTH_SM3; /*不清楚对不对*/ + break; +#endif + default: + return -1; + } + + return 0; +} + +UINT32 CmdkBttlSecSaParamConstruct(UINT32 udEntryValid, + E_CMDK_SEC_IPSEC_MODE eTunnelMode, + UINT32 udSeqCnterOverflow, + E_CMDK_LIVETIME_TYPES eLiveTimeType, + E_CMDK_SEC_SA_DF_MODE eSaDfMode, + E_CMDK_SEC_ENCRYP_MODE eEncryptionMode, + UINT32 udIcvLen, UINT16 *pusSaParam) +{ + UINT32 udIcvLenNew = udIcvLen / 4; //以4字节为单位 + + BTTL_PUB_ID_CHECK(udEntryValid, BITWIDTH1 + 1); + BTTL_PUB_ID_CHECK(eTunnelMode, e_SEC_IPSEC_MODE_LAST); + BTTL_PUB_ID_CHECK(udSeqCnterOverflow, BITWIDTH1 + 1); + BTTL_PUB_ID_CHECK(eLiveTimeType, e_SEC_SA_LIVETIME_TYPE_LAST); + BTTL_PUB_ID_CHECK(eSaDfMode, e_SEC_SA_DF_MODE_LAST); + BTTL_PUB_ID_CHECK(eEncryptionMode, e_SEC_ENCRYP_MODE_LAST); + BTTL_PUB_ID_CHECK(udIcvLenNew, BITWIDTH6 + 1); + + //BTTL_PUB_NULL_CHECK(pusSaParam); + + *pusSaParam = udIcvLenNew | (eEncryptionMode << 6) | (eSaDfMode << 9) | + (eLiveTimeType << 11) | (udSeqCnterOverflow << 13) | + (eTunnelMode << 14) | (udEntryValid << 15); + + return 0; +} +static int zxdh_ipsec_dtb_out_sa_get(struct xfrm_state *xs, + T_HAL_SA_DTB_HW_OUT *ptDtbOutSa) +{ + int err = -EINVAL; + u16 usSaParam = 0; + u32 udIcvLen = 0; + E_HAL_SEC_IPSEC_AUTH_ALG zxdh_auth_id; + E_HAL_SEC_IPSEC_CIPHER_ALG zxdh_ealgo_id; + E_CMDK_SEC_ENCRYP_MODE zxdh_encpy_mode = e_SEC_ENCRYP_MODE_LAST; + char test_alg_name[] = "zxdh_alg_test"; + char *p_aalg_alg_name = test_alg_name; + char *p_ealg_alg_name = test_alg_name; + char *p_aead_alg_name = test_alg_name; + + if (NULL != xs->aalg) { + p_aalg_alg_name = xs->aalg->alg_name; + } + if (NULL != xs->ealg) { + p_ealg_alg_name = xs->ealg->alg_name; + } + if (NULL != xs->aead) { + p_aead_alg_name = xs->aead->alg_name; + } + + /*AH应该提前拦截*/ + /*空加密空认证应该提前拦截*/ + + //DH_LOG_INFO(MODULE_SEC, "xs:0x%llx\n",xs); + //DH_LOG_INFO(MODULE_SEC, "ptDtbOutSa:0x%llx\n",ptDtbOutSa); + /*应该和pcs的思路一样 ,mlx5e_xfrm_validate_state 参数校验里去把sa的赋值做了*/ + + err = zxdh_ipsec_auth_id_get(xs->props.aalgo, p_aalg_alg_name, + &zxdh_auth_id); + if (err) { + DH_LOG_INFO(MODULE_SEC, "Cannot offload xfrm state aalgo:%u\n", + xs->props.aalgo); + return -EINVAL; + } + err = zxdh_ipsec_cipher_id_get(xs->props.ealgo, p_ealg_alg_name, + p_aead_alg_name, &zxdh_ealgo_id); + if (err) { + DH_LOG_INFO(MODULE_SEC, "Cannot offload xfrm state ealgo:%u\n", + xs->props.aalgo); + return -EINVAL; + } + + //DH_LOG_INFO(MODULE_SEC, "replay_esn 0x%llx\n",xs->replay_esn); + ptDtbOutSa->ucAuthkeyLen = 0; /*默认值*/ + + /*处理单认证算法*/ + if (zxdh_auth_id == e_HAL_IPSEC_AUTH_NULL) { + zxdh_encpy_mode = e_SEC_ENCRYP_ESP_ENCRYP_MODE; + } else { + ptDtbOutSa->ucAuthkeyLen = (xs->aalg->alg_key_len + 7) / 8; + udIcvLen = (xs->aalg->alg_trunc_len + 7) / 8; + memcpy((ptDtbOutSa->aucSaAuthKey), xs->aalg->alg_key, + ptDtbOutSa->ucAuthkeyLen); + } + + if ((zxdh_ealgo_id != e_HAL_IPSEC_CIPHER_NULL) && + (zxdh_auth_id != e_HAL_IPSEC_AUTH_NULL)) { + zxdh_encpy_mode = e_SEC_ENCRYP_ESP_AUTH_AND_ESP_ENCRYP_MODE; + } + /*这里处理组合算法的4字节的salt*/ + if ((zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_AES_GCM) || + (zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_CHACHA) || + (zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_AES_GMAC)) { + zxdh_encpy_mode = e_SEC_ENCRYP_ESP_COMBINED_MODE; + + ptDtbOutSa->ucCipherkeyLen = + (xs->aead->alg_key_len + 7) / 8 - 4; + udIcvLen = (xs->aead->alg_icv_len + 7) / 8; + memcpy(&(ptDtbOutSa->udSalt), + xs->aead->alg_key + ptDtbOutSa->ucCipherkeyLen, + sizeof(ptDtbOutSa->udSalt)); + memcpy((ptDtbOutSa->aucSaCipherKey), xs->aead->alg_key, + ptDtbOutSa->ucCipherkeyLen); + } + /*这里处理组合算法CCM,CCM的salt是3B*/ + else if (zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_AES_CCM) { + zxdh_encpy_mode = e_SEC_ENCRYP_ESP_COMBINED_MODE; + + ptDtbOutSa->ucCipherkeyLen = + (xs->aead->alg_key_len + 7) / 8 - 3; + udIcvLen = (xs->aead->alg_icv_len + 7) / 8; + memcpy(&(ptDtbOutSa->udSalt), + xs->aead->alg_key + ptDtbOutSa->ucCipherkeyLen, + sizeof(ptDtbOutSa->udSalt)); + memcpy((ptDtbOutSa->aucSaCipherKey), xs->aead->alg_key, + ptDtbOutSa->ucCipherkeyLen); + } + /*这里处理有salt的单加密算法CTR,salt是4B*/ + else if (zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_AES_CTR) { + ptDtbOutSa->ucCipherkeyLen = + (xs->ealg->alg_key_len + 7) / 8 - 4; + memcpy(&(ptDtbOutSa->udSalt), + xs->ealg->alg_key + ptDtbOutSa->ucCipherkeyLen, + sizeof(ptDtbOutSa->udSalt)); + memcpy((ptDtbOutSa->aucSaCipherKey), xs->ealg->alg_key, + ptDtbOutSa->ucCipherkeyLen); + } + /*空加密算法*/ + else if (zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_NULL) { + zxdh_encpy_mode = e_SEC_ENCRYP_ESP_AUTH_MODE; + ptDtbOutSa->ucCipherkeyLen = 0; + } + /*单加密算法,且没有salt*/ + else { + ptDtbOutSa->ucCipherkeyLen = (xs->ealg->alg_key_len + 7) / 8; + memcpy((ptDtbOutSa->aucSaCipherKey), xs->ealg->alg_key, + ptDtbOutSa->ucCipherkeyLen); + } + + ptDtbOutSa->udSN = xs->replay.oseq; + ptDtbOutSa->uddProcessedByteCnt = + xs->curlft + .bytes; //PUB_HTON64(uddProcessedByteCnt); //没法设置,用于构造iv, 一般都是用seq构造的iv + + ptDtbOutSa->udSPI = xs->id.spi; + ptDtbOutSa->udSaId = PUB_HTON32(0x80001); //PUB_HTON32(udSaId); /*这个要软件自己管理,需要设计一下*/ + + ptDtbOutSa->ucCiperID = zxdh_ealgo_id; + ptDtbOutSa->ucAuthID = zxdh_auth_id; + + //CmdkBttlSecSaParamConstruct(UINT32 udEntryValid,E_CMDK_SEC_IPSEC_MODE eTunnelMode,UINT32 udSeqCnterOverflow,E_CMDK_LIVETIME_TYPES eLiveTimeType,E_CMDK_SEC_SA_DF_MODE eSaDfMode,E_CMDK_SEC_ENCRYP_MODE eEncryptionMode,UINT32 udIcvLen,UINT16* pusSaParam) + //E_CMDK_SEC_ENCRYP_MODE 这个只能根据算法反推 gcm ccm gmac chacha是combine gaucSecSwanIpv6Data + //udIcvLen + //mode的定义刚好一样E_CMDK_SEC_IPSEC_MODE , XFRM_MODE_TRANSPORT + /*这个地方还要根据算法做个转换 e_SEC_ENCRYP_ESP_COMBINED_MODE 暂时用GCM*/ + CmdkBttlSecSaParamConstruct(1, xs->props.mode, 0, + e_SEC_SA_LIVETIME_TIME_TYPE, + e_SEC_SA_DF_BYPASS_MODE, zxdh_encpy_mode, + udIcvLen, &usSaParam); + ptDtbOutSa->usSaParam = PUB_HTON16(usSaParam); + + ptDtbOutSa->usFrag_State = PUB_HTON16(0xd2c8); + + ptDtbOutSa->udLifetimeSecMax = PUB_HTON32(0xc4454766); + ptDtbOutSa->uddLifetimByteCntMax = PUB_HTON64(0xffffffffffffffff); + + ptDtbOutSa->ucProtocol = xs->id.proto; //50esp协议 51ah + ptDtbOutSa->ucTOS = 0xbb; + + /*esn相关*/ + ptDtbOutSa->ucEsnFlag = 0; /* 默认是非ESN模式 */ + if (xs->props.flags & XFRM_STATE_ESN) { + if (NULL == xs->replay_esn) + return 1; + ptDtbOutSa->ucEsnFlag = + 0xff; //ucEsnFlag; //0xff表示开启ESN,否则不开启 + ptDtbOutSa->udSN = xs->replay_esn->oseq; + ptDtbOutSa->udESN = + xs->replay_esn + ->oseq_hi; /*不需要考虑replay_esn为null的情况?*/ + } + + /*ipv4*/ + if (AF_INET == xs->props.family) { + ptDtbOutSa->ucIpType = + 1 << 6; //bit[7:6] 1:ivp4 2:ipv6 /*换成宏*/ + ptDtbOutSa->udSrcAddress0 = xs->props.saddr.a4; + ptDtbOutSa->udSrcAddress1 = 0x0; + ptDtbOutSa->udSrcAddress2 = 0x0; + ptDtbOutSa->udSrcAddress3 = 0x0; + + ptDtbOutSa->udDstAddress0 = xs->id.daddr.a4; + ptDtbOutSa->udDstAddress1 = 0x0; + ptDtbOutSa->udDstAddress2 = 0x0; + ptDtbOutSa->udDstAddress3 = 0x0; + } + /*ipv4*/ + else if (AF_INET6 == xs->props.family) { + ptDtbOutSa->ucIpType = + 2 << 6; //bit[7:6] 1:ivp4 2:ipv6 /*换成宏*/ + ptDtbOutSa->udSrcAddress0 = xs->props.saddr.a6[0]; + ptDtbOutSa->udSrcAddress1 = xs->props.saddr.a6[1]; + ptDtbOutSa->udSrcAddress2 = xs->props.saddr.a6[2]; + ptDtbOutSa->udSrcAddress3 = xs->props.saddr.a6[3]; + + ptDtbOutSa->udDstAddress0 = xs->id.daddr.a6[0]; + ptDtbOutSa->udDstAddress1 = xs->id.daddr.a6[1]; + ptDtbOutSa->udDstAddress2 = xs->id.daddr.a6[2]; + ptDtbOutSa->udDstAddress3 = xs->id.daddr.a6[3]; + } else { + return -EINVAL; /*不可能走到这里,前面函数已经校验过了*/ + } + + ptDtbOutSa->udRSV0 = 0x0; + ptDtbOutSa->udRSV1 = 0x0; + ptDtbOutSa->udRSV2 = 0x0; + + DH_LOG_INFO(MODULE_SEC, "ptDtbOutSa->ucAuthkeyLen:0x%x\n", + ptDtbOutSa->ucAuthkeyLen); + DH_LOG_INFO(MODULE_SEC, "ptDtbOutSa->ucCipherkeyLen:0x%x\n", + ptDtbOutSa->ucCipherkeyLen); + DH_LOG_INFO(MODULE_SEC, "zxdh_encpy_mode:0x%x\n", zxdh_encpy_mode); + DH_LOG_INFO(MODULE_SEC, "ptDtbOutSa->ucCiperID:0x%x\n", + ptDtbOutSa->ucCiperID); + DH_LOG_INFO(MODULE_SEC, "ptDtbOutSa->ucAuthID:0x%x\n", + ptDtbOutSa->ucAuthID); + + return 0; +} + +static int zxdh_ipsec_dtb_in_sa_get(struct xfrm_state *xs, + T_HAL_SA_DTB_HW_IN *ptDtbInSa) +{ + int err = -EINVAL; + u16 usSaParam = 0; + u32 udIcvLen = 0; + E_HAL_SEC_IPSEC_AUTH_ALG zxdh_auth_id; + E_HAL_SEC_IPSEC_CIPHER_ALG zxdh_ealgo_id; + E_CMDK_SEC_ENCRYP_MODE zxdh_encpy_mode = e_SEC_ENCRYP_MODE_LAST; + char test_alg_name[] = "zxdh_alg_test"; + char *p_aalg_alg_name = test_alg_name; + char *p_ealg_alg_name = test_alg_name; + char *p_aead_alg_name = test_alg_name; + + /*应该和pcs的思路一样 ,mlx5e_xfrm_validate_state 参数校验里去把sa的赋值做了*/ + if (NULL != xs->aalg) { + p_aalg_alg_name = xs->aalg->alg_name; + } + if (NULL != xs->ealg) { + p_ealg_alg_name = xs->ealg->alg_name; + } + if (NULL != xs->aead) { + p_aead_alg_name = xs->aead->alg_name; + } + err = zxdh_ipsec_auth_id_get(xs->props.aalgo, p_aalg_alg_name, + &zxdh_auth_id); + if (err) { + DH_LOG_INFO(MODULE_SEC, "Cannot offload xfrm state aalgo:%u\n", + xs->props.aalgo); + return -EINVAL; + } + err = zxdh_ipsec_cipher_id_get(xs->props.ealgo, p_ealg_alg_name, + p_aead_alg_name, &zxdh_ealgo_id); + if (err) { + DH_LOG_INFO(MODULE_SEC, "Cannot offload xfrm state ealgo:%u\n", + xs->props.aalgo); + return -EINVAL; + } + + ptDtbInSa->ucAuthkeyLen = 0; /*默认值*/ + + /*处理单认证算法*/ + if (zxdh_auth_id == e_HAL_IPSEC_AUTH_NULL) { + zxdh_encpy_mode = e_SEC_ENCRYP_ESP_ENCRYP_MODE; + } else { + ptDtbInSa->ucAuthkeyLen = (xs->aalg->alg_key_len + 7) / 8; + udIcvLen = (xs->aalg->alg_trunc_len + 7) / 8; + memcpy((ptDtbInSa->aucSaAuthKey), xs->aalg->alg_key, + ptDtbInSa->ucAuthkeyLen); + } + + if ((zxdh_ealgo_id != e_HAL_IPSEC_CIPHER_NULL) && + (zxdh_auth_id != e_HAL_IPSEC_AUTH_NULL)) { + zxdh_encpy_mode = e_SEC_ENCRYP_ESP_AUTH_AND_ESP_ENCRYP_MODE; + } + /*这里处理组合算法的4字节的salt*/ + if ((zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_AES_GCM) || + (zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_CHACHA) || + (zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_AES_GMAC)) { + zxdh_encpy_mode = e_SEC_ENCRYP_ESP_COMBINED_MODE; + + ptDtbInSa->ucCipherkeyLen = (xs->aead->alg_key_len + 7) / 8 - 4; + udIcvLen = (xs->aead->alg_icv_len + 7) / 8; + memcpy(&(ptDtbInSa->udSalt), + xs->aead->alg_key + ptDtbInSa->ucCipherkeyLen, + sizeof(ptDtbInSa->udSalt)); + memcpy((ptDtbInSa->aucSaCipherKey), xs->aead->alg_key, + ptDtbInSa->ucCipherkeyLen); + } + /*这里处理组合算法CCM,CCM的salt是3B*/ + else if (zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_AES_CCM) { + zxdh_encpy_mode = e_SEC_ENCRYP_ESP_COMBINED_MODE; + + ptDtbInSa->ucCipherkeyLen = (xs->aead->alg_key_len + 7) / 8 - 3; + udIcvLen = (xs->aead->alg_icv_len + 7) / 8; + memcpy(&(ptDtbInSa->udSalt), + xs->aead->alg_key + ptDtbInSa->ucCipherkeyLen, + sizeof(ptDtbInSa->udSalt)); + memcpy((ptDtbInSa->aucSaCipherKey), xs->aead->alg_key, + ptDtbInSa->ucCipherkeyLen); + } + /*这里处理有salt的单加密算法CTR,salt是4B*/ + else if (zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_AES_CTR) { + ptDtbInSa->ucCipherkeyLen = (xs->ealg->alg_key_len + 7) / 8 - 4; + memcpy(&(ptDtbInSa->udSalt), + xs->ealg->alg_key + ptDtbInSa->ucCipherkeyLen, + sizeof(ptDtbInSa->udSalt)); + memcpy((ptDtbInSa->aucSaCipherKey), xs->ealg->alg_key, + ptDtbInSa->ucCipherkeyLen); + } + /*空加密算法*/ + else if (zxdh_ealgo_id == e_HAL_IPSEC_CIPHER_NULL) { + zxdh_encpy_mode = e_SEC_ENCRYP_ESP_AUTH_MODE; + ptDtbInSa->ucCipherkeyLen = 0; + } + /*单加密算法,且没有salt*/ + else { + ptDtbInSa->ucCipherkeyLen = (xs->ealg->alg_key_len + 7) / 8; + memcpy((ptDtbInSa->aucSaCipherKey), xs->ealg->alg_key, + ptDtbInSa->ucCipherkeyLen); + } + + ptDtbInSa->uddProcessedByteCnt = + xs->curlft + .bytes; //PUB_HTON64(uddProcessedByteCnt); //没法设置,用于构造iv, 一般都是用seq构造的iv + + ptDtbInSa->udSPI = xs->id.spi; + ptDtbInSa->udSaId = PUB_HTON32(0x80000); //PUB_HTON32(udSaId); /*这个要软件自己管理,需要设计一下*/ + + ptDtbInSa->ucCiperID = zxdh_ealgo_id; + ptDtbInSa->ucAuthID = zxdh_auth_id; + + //CmdkBttlSecSaParamConstruct(UINT32 udEntryValid,E_CMDK_SEC_IPSEC_MODE eTunnelMode,UINT32 udSeqCnterOverflow,E_CMDK_LIVETIME_TYPES eLiveTimeType,E_CMDK_SEC_SA_DF_MODE eSaDfMode,E_CMDK_SEC_ENCRYP_MODE eEncryptionMode,UINT32 udIcvLen,UINT16* pusSaParam) + //CmdkBttlSecSaParamConstruct(1,x->props.mode,不确定,e_SEC_SA_LIVETIME_BYTE_TYPE(好像硬件只支持这个),E_CMDK_SEC_SA_DF_MODE(0),) + //E_CMDK_SEC_ENCRYP_MODE 这个只能根据算法反推 gcm ccm gmac chacha是combine gaucSecSwanIpv6Data + //udIcvLen + //mode的定义刚好一样E_CMDK_SEC_IPSEC_MODE , XFRM_MODE_TRANSPORT + CmdkBttlSecSaParamConstruct(1, xs->props.mode, 0, + e_SEC_SA_LIVETIME_TIME_TYPE, + e_SEC_SA_DF_BYPASS_MODE, zxdh_encpy_mode, + udIcvLen, &usSaParam); + ptDtbInSa->usSaParam = PUB_HTON16(usSaParam); + + ptDtbInSa->usFrag_State = PUB_HTON16(0xd2c8); + + ptDtbInSa->udLifetimeSecMax = PUB_HTON32(0xc4454766); + ptDtbInSa->uddLifetimByteCntMax = PUB_HTON64(0xffffffffffffffff); + + ptDtbInSa->ucProtocol = xs->id.proto; //50esp协议 51ah + ptDtbInSa->ucTOS = 0xbb; + + /*esn相关*/ + ptDtbInSa->ucEsnFlag = 0; /* 默认是非ESN模式 */ + if (xs->props.flags & XFRM_STATE_ESN) { + if (NULL == xs->replay_esn) + return 1; + ptDtbInSa->ucEsnFlag = + 0xff; //ucEsnFlag; //0xff表示开启ESN,否则不开启 + ptDtbInSa->udAntiWindowHigh = + PUB_HTON32(xs->replay_esn->seq_hi); /*ESN*/ + ptDtbInSa->udAntiWindowLow = + PUB_HTON32(xs->replay_esn->replay_window - + 1); /*窗口上限sn,这里使用窗口大小-1*/ + memcpy((void *)ptDtbInSa->aucBitmap, + (void *)xs->replay_esn->bmp, + xs->replay_esn->bmp_len * + sizeof(__u32)); /*需要提前判断bmp_len不能太大,避免超过64(拦截窗口大小就行)*/ + } + + /*ipv4*/ + if (AF_INET == xs->props.family) { + ptDtbInSa->ucIpType = + 1 << 6; //bit[7:6] 1:ivp4 2:ipv6 /*换成宏*/ + ptDtbInSa->udSrcAddress0 = xs->props.saddr.a4; + ptDtbInSa->udSrcAddress1 = 0x0; + ptDtbInSa->udSrcAddress2 = 0x0; + ptDtbInSa->udSrcAddress3 = 0x0; + + ptDtbInSa->udDstAddress0 = xs->id.daddr.a4; + ptDtbInSa->udDstAddress1 = 0x0; + ptDtbInSa->udDstAddress2 = 0x0; + ptDtbInSa->udDstAddress3 = 0x0; + } + /*ipv4*/ + else if (AF_INET6 == xs->props.family) { + ptDtbInSa->ucIpType = + 2 << 6; //bit[7:6] 1:ivp4 2:ipv6 /*换成宏*/ + ptDtbInSa->udSrcAddress0 = xs->props.saddr.a6[0]; + ptDtbInSa->udSrcAddress1 = xs->props.saddr.a6[1]; + ptDtbInSa->udSrcAddress2 = xs->props.saddr.a6[2]; + ptDtbInSa->udSrcAddress3 = xs->props.saddr.a6[3]; + + ptDtbInSa->udDstAddress0 = xs->id.daddr.a6[0]; + ptDtbInSa->udDstAddress1 = xs->id.daddr.a6[1]; + ptDtbInSa->udDstAddress2 = xs->id.daddr.a6[2]; + ptDtbInSa->udDstAddress3 = xs->id.daddr.a6[3]; + } else { + return -EINVAL; /*不可能走到这里,前面函数已经校验过了*/ + } + + ptDtbInSa->udOutSaId = 0x0; /*内核不需要出入境sa一起下吧,固定为0*/ + ptDtbInSa->usOutSaOffset = 0x0; + + ptDtbInSa->udRSV0 = 0x0; + ptDtbInSa->udRSV1 = 0x0; + + DH_LOG_INFO(MODULE_SEC, "ptDtbInSa->ucAuthkeyLen:0x%x\n", + ptDtbInSa->ucAuthkeyLen); + DH_LOG_INFO(MODULE_SEC, "ptDtbInSa->ucCipherkeyLen:0x%x\n", + ptDtbInSa->ucCipherkeyLen); + DH_LOG_INFO(MODULE_SEC, "zxdh_encpy_mode:0x%x\n", zxdh_encpy_mode); + DH_LOG_INFO(MODULE_SEC, "ptDtbInSa->ucCiperID:0x%x\n", + ptDtbInSa->ucCiperID); + DH_LOG_INFO(MODULE_SEC, "ptDtbInSa->ucAuthID:0x%x\n", + ptDtbInSa->ucAuthID); + + return 0; +} +#endif + +VOID RdlSecWrite(UINT64 uddSecBase, UINT32 udRegOff, UINT32 udRegVal) +{ + PUB_WRITE_REG32(uddSecBase + udRegOff, udRegVal); +} + +UINT32 HalSecWrite(struct zxdh_en_device *en_dev, UINT32 udSecEngineId, + UINT32 udRegOff, UINT32 udRegVal) +{ + UINT64 uddBttlSecBase = 0; + UINT32 udSecnBaseOff = 0; + UINT64 uddSecnBase = 0; + + PUB_CHECK_NULL_PTR_RET_ERR(en_dev); + + uddBttlSecBase = HalBttlSecRegBaseGet(en_dev); + //udSecnBaseOff = udSecEngineId * REG_SEC_IDX_OFFSET; //host不允许操作第二套 + uddSecnBase = uddBttlSecBase + udSecnBaseOff; + //DH_LOG_INFO(MODULE_SEC, "HalBttlSecRegBaseGet regBase vir:0x%llx\n",uddSecnBase); + //DH_LOG_INFO(MODULE_SEC, "HalBttlSecRegBaseGet regBase pa:0x%llx\n",virt_to_phys((void*)uddSecnBase)); + RdlSecWrite(uddSecnBase, udRegOff, udRegVal); + + return 0; +} + +UINT32 RdlSecRead(UINT64 uddSecBase, UINT32 udRegOff) +{ + return PUB_READ_REG32(uddSecBase + udRegOff); +} + +UINT32 HalSecRead(struct zxdh_en_device *en_dev, UINT32 udSecEngineId, + UINT32 udRegOff) +{ + UINT64 uddBttlSecBase = 0; + UINT32 udSecnBaseOff = 0; + UINT64 uddSecnBase = 0; + + PUB_CHECK_NULL_PTR_RET_ERR(en_dev); + uddBttlSecBase = HalBttlSecRegBaseGet(en_dev); + udSecnBaseOff = udSecEngineId * REG_SEC_IDX_OFFSET; + uddSecnBase = uddBttlSecBase + udSecnBaseOff; + + return RdlSecRead(uddSecnBase, udRegOff); +} + +UINT64 HalBttlVaToVpa(struct zxdh_en_device *en_dev, UINT64 pVaAddr) +{ + PUB_CHECK_NULL_PTR_RET_ERR(en_dev); + return (UINT64)virt_to_phys((void *)pVaAddr); +} + +UINT64 HalBttlVpaToVa(struct zxdh_en_device *en_dev, UINT64 pVpaAddr) +{ + PUB_CHECK_NULL_PTR_RET_ERR(en_dev); + return (UINT64)phys_to_virt(pVpaAddr); +} + +#if 0 +VOID PubDumpBuf(UINT8 *pucBuf, UINT32 udLen) +{ + UINT32 i = 0; + UINT32 j = 0; + UINT8 *pucPtr = NULL; + + pucPtr = pucBuf; + for (j = 0; j < 48; j++) { + PUB_PRINTF("-"); + } + PUB_PRINTF("\n"); + + for (i = 0; i < udLen; i++) { + PUB_PRINTF("%02X ", pucPtr[i]); + if (15 == i % 16) { + PUB_PRINTF(" *"); + PUB_PRINTF("*\n"); + } + } + + if (0 == udLen % 16) { + PUB_PRINTF("\n"); + } else { + for (i = (udLen % 16); i < 16; i++) { + PUB_PRINTF(" "); + } + PUB_PRINTF(" *"); + PUB_PRINTF("\n\n"); + } +} +#endif + +void BttlPubDump(unsigned char *ucBuf, UINT32 udLen) +{ + int i = 0; + int j = 0; + unsigned char *ptr = NULL; + + ptr = ucBuf; + + if (ucBuf == NULL || udLen == 0) { + return; + } + + for (j = 0; j < 64; j++) { + printk("-"); + } + printk("\n"); + + for (i = 0; i < udLen; i++) { + if (0 == i % 16) { + printk("0x%08x ", (i / 16) * 16); + } + printk("%02X ", ptr[i]); + if (15 == i % 16) { + printk(" *"); + printk("*\n"); + } + } + + if (0 == udLen % 16) { + printk("\n"); + } else { + for (i = (udLen % 16); i < 16; i++) { + printk(" "); + } + printk(" *"); + printk("\n\n"); + } + + return; +} + +UINT32 CmdkBttlTestSaAckRslGet(UINT64 uddSaVirAddr, + E_CMDK_DTB_SA_CMD_TYPE eDtbSaCmdType, + UINT32 *pudIsDtbAckFinish, UINT32 *pudDtbAckRsl) +{ + UINT32 udVal = 0; + UINT32 udDtbAckFinish = 0; + + /*入参检查*/ + //BTTL_PUB_ID_CHECK(eDtbSaCmdType, E_DTB_SA_CMD_LAST); + //BTTL_PUB_NULL_CHECK(pudIsDtbAckFinish); + //BTTL_PUB_NULL_CHECK(pudDtbAckRsl); + + *pudIsDtbAckFinish = 0; /* 默认置为响应未完成 */ + + udVal = *((UINT32 *)(uddSaVirAddr)); + udDtbAckFinish = PUB_BIT_FIELD_RIGHT_JUST_GET64(udVal, 0, 24); + *pudDtbAckRsl = PUB_BIT_FIELD_RIGHT_JUST_GET64(udVal, 24, 8); + + if (E_DTB_SA_CMD_FLOW_DOWN == eDtbSaCmdType) { + if (udDtbAckFinish == 0x5a5a5a) { + *pudIsDtbAckFinish = 1; + } + } else { + if (udDtbAckFinish == 0x555555) { + *pudIsDtbAckFinish = 1; + } + } + + return 0; +} + +/*参考NP的函数 dpp_dtb_user_info_set*/ +UINT32 CmdkBttlSecSaDownload(struct zxdh_en_device *en_dev, + UINT32 udSecEngineId, T_QUEUE_DTB_REG *pt, + UINT32 udQueIndex) +{ + UINT32 udRet; + UINT32 udRegVal; + //UINT32 udQueIndex = 1; + UINT32 udEpldVfunNum = 0; + UINT32 udPcieDbiEn = + 1; /*1为dbi中断,0为match中断,NP这里是全局变量,暂时写死*/ + UINT32 udEpid = 5; + UINT32 udVfuncNum = 0; + UINT32 udCfgMsixVector = 2; /*以前host驱动写的是2,暂时写死*/ + UINT32 udFuncNum = 2; + UINT32 udVfuncActive = 0; + UINT16 usVport = 0; + + //BTTL_PUB_ID_CHECK(en_dev, CMDK_BTTL_PUB_CHIP_MAX); + //BTTL_PUB_ID_CHECK(udSecEngineId, HAL_SEC_MAX_ENGINE); + //BTTL_PUB_NULL_CHECK(pt); + PUB_CHECK_NULL_PTR_RET_ERR(en_dev); + + usVport = en_dev->ops->get_vport(en_dev->parent); + + //写sa的队列锁状态寄存器CFG_DTB_QUEUE_LOCK_STATE,共128个队列,理论上应该查询 + //udRet = HalSecWrite(en_dev, udSecEngineId, REG_SEC_DTB_QUEUE_LOCK_STATE_0_3(0), PUB_BIT_SET(udLockMask,udQueIndex)); + + //暂时沟通是,只需要将epid配置为0,下表模块就会去riscv侧下表,暂时可以不配 + udEpid = EPID(usVport) + 5; + udVfuncNum = VFUNC_NUM(usVport); + udFuncNum = FUNC_NUM(usVport); + udVfuncActive = VF_ACTIVE(usVport); + + DH_LOG_INFO(MODULE_SEC, "udEpid:0x%x,udVfuncNum:0x%x\n", udEpid, + udVfuncNum); + DH_LOG_INFO(MODULE_SEC, "udFuncNum:0x%x,udVfuncActive:0x%x\n", + udFuncNum, udVfuncActive); + + PUB_BIT_FIELD_SET64(udEpldVfunNum, udVfuncActive, 0, 1); + PUB_BIT_FIELD_SET64(udEpldVfunNum, udFuncNum, 5, 3); + PUB_BIT_FIELD_SET64(udEpldVfunNum, udCfgMsixVector, 8, 7); + PUB_BIT_FIELD_SET64(udEpldVfunNum, udVfuncNum, 16, 8); + PUB_BIT_FIELD_SET64(udEpldVfunNum, udEpid, 24, 4); + PUB_BIT_FIELD_SET64(udEpldVfunNum, udPcieDbiEn, 31, 1); + + //return 0; + DH_LOG_INFO(MODULE_SEC, "udEpldVfunNum = 0x%x\n", udEpldVfunNum); + HalSecWrite(en_dev, udSecEngineId, + REG_SEC_CFG_EPID_V_FUNC_NUM_0_127(udQueIndex), + udEpldVfunNum); + + //查询所申请队列剩余空间,如果队列剩余空间大于0则可入队; + udRegVal = + HalSecRead(en_dev, udSecEngineId, + REG_SEC_INFO_QUEUE_BUF_SPACE_LEFT_0_127(udQueIndex)); + if (udRegVal < 2) { + BTTL_PRINTF("queue:%u buf empty left:%u\n", udQueIndex, + udRegVal); + return 1; + } + if (udRegVal > 0x20) { + BTTL_PRINTF("queue:%u buf left:%u\n", udQueIndex, udRegVal); + return 1; + } + + //先写DTB_ADDR[63:32],接着写DTB_ADDR[31:0],最后写usdtb_len(软件需严格遵守该顺序) + udRet = HalSecWrite(en_dev, udSecEngineId, + REG_SEC_CFG_QUEUE_DTB_ADDR_H_0_127(udQueIndex), + pt->DtbAddrH); + + //DH_LOG_INFO(MODULE_SEC, "pt->DtbAddrH = 0x%x\n",pt->DtbAddrH); + udRet = HalSecWrite(en_dev, udSecEngineId, + REG_SEC_CFG_QUEUE_DTB_ADDR_L_0_127(udQueIndex), + pt->DtbAddrL); + + //DH_LOG_INFO(MODULE_SEC, "pt->DtbAddrL = 0x%x\n",pt->DtbAddrL); + //DH_LOG_INFO(MODULE_SEC, "pt->DtbAddrVir = 0x%llx\n",HalBttlVpaToVa(en_dev,(UINT64)((UINT64)pt->DtbAddrH)<<32)+pt->DtbAddrL); + // CMD寄存器最后配 + udRet = HalSecWrite(en_dev, udSecEngineId, + REG_SEC_CFG_QUEUE_DTB_LEN_0_127(udQueIndex), + pt->DtbCmd); + + //DH_LOG_INFO(MODULE_SEC, "pt->DtbCmd = 0x%x\n",pt->DtbCmd); + return 0; +} + +/* + sa下表模块测试 + SA存放地址,第二套L2D uddSaL2DPhyAddr= 0x6201000000;理论上为68位,目前场景为64位 + usdtb_len =30; +*/ +//E_SA_TYPE geSaType; + +UINT32 gudTestCnt; +UINT32 CmdkBttlTestSecDtbSaAdd(struct zxdh_en_device *en_dev, + E_CMDK_DTB_SA_CMD_TYPE eDtbSaCmdType, + E_SA_TYPE eSaType, UINT64 uddSaVirAddr, + UINT32 udDtbSaIsIntEn, UINT32 udDtbLen, + UINT32 udQueIndex) +{ + /* int_en指示是否产生需要中断 第29位,cmd_type=0指示为流表下发命令,cmd_type=1指示为流表dump命令 第30位 一对sa表的大小为480字节,以16字节为单位*/ + T_QUEUE_DTB_REG tDtbReg = { 0 }; + UINT32 udDtbCmd = 0; + UINT64 uddSaPhaAddr = 0; + UINT32 udIsDtbAckFinish = 0; + UINT32 udDtbAckRsl = 0; + UINT32 udRet = 0; + int i = 0; + + /*入参检查*/ + //BTTL_PUB_ID_CHECK(en_dev, CMDK_BTTL_PUB_CHIP_MAX); + //BTTL_PUB_ID_CHECK(eDtbSaCmdType, E_DTB_SA_CMD_LAST); + PUB_CHECK_NULL_PTR_RET_ERR(en_dev); + + uddSaPhaAddr = (UINT64)HalBttlVaToVpa(en_dev, uddSaVirAddr); + //BTTL_PUB_0_CHECK(uddSaPhaAddr); + DH_LOG_INFO(MODULE_SEC, "uddSaVirAddr:0x%llx,uddSaPhaAddr:0x%llx\n", + uddSaVirAddr, uddSaPhaAddr); + + //构造udDtbCmd + PUB_BIT_FIELD_SET64(udDtbCmd, udDtbLen >> 4, 0, 10); + PUB_BIT_FIELD_SET64(udDtbCmd, eSaType, 27, 2); + PUB_BIT_FIELD_SET64(udDtbCmd, udDtbSaIsIntEn, 29, 1); + PUB_BIT_FIELD_SET64(udDtbCmd, eDtbSaCmdType, 30, 1); + + tDtbReg.DtbAddrH = + (UINT32)PUB_BIT_FIELD_RIGHT_JUST_GET64(uddSaPhaAddr, 32, 32); + tDtbReg.DtbAddrL = + (UINT32)PUB_BIT_FIELD_RIGHT_JUST_GET64(uddSaPhaAddr, 0, 32); + tDtbReg.DtbCmd = udDtbCmd; + + /* 配置下表寄存器 */ + for (i = 0; i < gudDtbSaNum; i++) { + //gudTestCnt++; + //(*(volatile UINT32*)(uddSaVirAddr + 16)) = PUB_NTOH32(gudTestCnt); + udRet = CmdkBttlSecSaDownload(en_dev, 0, &tDtbReg, udQueIndex); + //PUB_CHECK_RET_VAL_RV(udRet); + } + /* 等待 */ + msleep(1000); + // PubUsDelay(10); //等待多久需要微院确认 + + udRet = CmdkBttlTestSaAckRslGet(uddSaVirAddr, eDtbSaCmdType, + &udIsDtbAckFinish, &udDtbAckRsl); + PUB_CHECK_RET_VAL_RV(udRet); + + if ((1 == udIsDtbAckFinish) && (0xff == udDtbAckRsl)) { + return 0; + } else { + BTTL_PRINTF("CmdkBttlTestSa Dtb Ack is error!! udIsDtbAckFinish:%u,udDtbAckRsl:%u\n", + udIsDtbAckFinish, udDtbAckRsl); + BttlPubDump((unsigned char *)uddSaVirAddr, 0x60); + return 1; + } + + return 0; +} + +#if 1 +static int +zxdh_ipsec_add_sa(struct xfrm_state *xs, + __attribute__((unused)) struct netlink_ext_ack *extack) +{ + struct xfrm_dev_offload *xso = &xs->xso; + struct net_device *netdev = xso->dev; + struct zxdh_en_priv *en_priv = NULL; + //struct zxdh_en_device *en_dev = NULL; + struct zxdh_en_device *en_dev = NULL; + dma_addr_t dma_handle; + UINT32 dma_size = 0x1000; //暂定4K,批量下表情况下需要更多 + + UINT64 uddDtbSaVirAddr = 0; + UINT32 udSaTblLen = 0; + int ret = 0; + + en_priv = netdev_priv(netdev); + en_dev = &(en_priv->edev); + + if (unlikely(en_dev->drs_sec_pri.SecVAddr == 0)) { + en_dev->drs_sec_pri.SecVAddr = (uint64_t)dma_alloc_coherent(en_dev->dmadev, dma_size, &dma_handle, GFP_KERNEL); + if (en_dev->drs_sec_pri.SecVAddr == 0) { + DH_LOG_INFO(MODULE_SEC, + "zxdh_ipsec_add_sa dma_alloc_coherent fail\n"); + return -1; + } + en_dev->drs_sec_pri.SecPAddr = dma_handle; + en_dev->drs_sec_pri.SecMemSize = dma_size; + } + uddDtbSaVirAddr = en_dev->drs_sec_pri.SecVAddr; + + DH_LOG_INFO(MODULE_SEC, "uddDtbSaVirAddr:0x%llx\n", uddDtbSaVirAddr); + //DH_LOG_INFO(MODULE_SEC, "xs:0x%llx\n",xs); + + memset((void *)uddDtbSaVirAddr, 0, 1024); + + //else if(1 == xs->xso.dir) + if (xso->flags & XFRM_OFFLOAD_INBOUND) { + ret = zxdh_ipsec_dtb_in_sa_get(xs, (T_HAL_SA_DTB_HW_IN *)(uddDtbSaVirAddr + 16)); + if (ret != 0) { + return 1; + } + BttlPubDump((unsigned char *)uddDtbSaVirAddr, + 0x210); //传入时加了16字节的回写空间 + +#if 1 + udSaTblLen = 512 - 16; + CmdkBttlTestSecDtbSaAdd(en_dev, E_DTB_SA_CMD_FLOW_DOWN, + E_SATYPE_IN, uddDtbSaVirAddr, 0, + udSaTblLen, 2); + +#endif + } + //if(2 == xs->xso.dir) + else { + ret = zxdh_ipsec_dtb_out_sa_get(xs, (T_HAL_SA_DTB_HW_OUT *)(uddDtbSaVirAddr + 16)); + if (ret != 0) { + return 1; + } + BttlPubDump((unsigned char *)uddDtbSaVirAddr, + 0x110); //传入时加了16字节的回写空间 + +#if 1 + udSaTblLen = 256 - 16; + CmdkBttlTestSecDtbSaAdd(en_dev, E_DTB_SA_CMD_FLOW_DOWN, + E_SATYPE_OUT, uddDtbSaVirAddr, 0, + udSaTblLen, 2); +#endif + } + + return 0; +} + +void zxdh_ipsec_del_sa(struct xfrm_state *xs) +{ + DH_LOG_INFO(MODULE_SEC, "zxdh_ipsec_del_sa\n"); + return; +} + +bool zxdh_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) +{ + DH_LOG_INFO(MODULE_SEC, "zxdh_ipsec_offload_ok\n"); + return true; +} + +void zxdh_ipsec_state_advance_esn(struct xfrm_state *x) +{ + DH_LOG_INFO(MODULE_SEC, "zxdh_ipsec_state_advance_esn\n"); + return; +} +void zxdh_ipsec_state_update_curlft(struct xfrm_state *x) +{ + DH_LOG_INFO(MODULE_SEC, "zxdh_ipsec_state_update_curlft\n"); + return; +} +int zxdh_ipsec_policy_add(struct xfrm_policy *x) +{ +#if 1 + int32_t ret = 0; + UINT8 aucSip[4] = { 0xc8, 0xfe, 0x00, 0x1 }; + UINT8 aucDip[4] = { 0xc8, 0xfe, 0x00, 0x2 }; + UINT8 aucSipMask[4] = { 0xff, 0xff, 0x00, 0x0 }; + UINT8 aucDipMask[4] = { 0xff, 0xff, 0x00, 0x0 }; + /*6.2的内核才有*/ + //struct xfrm_dev_offload *xdo = &x->xdo; + //struct net_device *netdev = xdo->dev; + struct net_device *netdev = NULL; //低版本内核仅编译通过 + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + DPP_PF_INFO_T pf_info = { 0 }; + + DH_LOG_INFO(MODULE_SEC, "zxdh_ipsec_policy_add\n"); + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + /*np下表 inline sec模式 打开*/ + ret = dpp_vport_attr_set(&pf_info, SRIOV_VPORT_INLINE_SEC_OFFLOAD, 1); + if (ret != 0) { + LOG_ERR("Failed to set port_attr SRIOV_VPORT_INLINE_SEC_OFFLOAD !\n"); + } + + /*配置np ipset加密表*/ + ret = dpp_ipsec_enc_entry_add(&pf_info, 0, aucSip, aucDip, aucSipMask, + aucDipMask, 1, 0x80001); + if (ret != 0) { + LOG_ERR("xfrm policy dpp_ipsec_enc_entry_add Failed!\n"); + } +#endif + + return 0; +} +void zxdh_ipsec_policy_delete(struct xfrm_policy *x) +{ + DH_LOG_INFO(MODULE_SEC, "zxdh_ipsec_policy_delete\n"); + return; +} +void zxdh_ipsec_policy_free(struct xfrm_policy *x) +{ + DH_LOG_INFO(MODULE_SEC, "zxdh_ipsec_policy_free\n"); + return; +} + +const struct xfrmdev_ops zxdh_xfrmdev_ops = { + .xdo_dev_state_add = zxdh_ipsec_add_sa, + .xdo_dev_state_delete = zxdh_ipsec_del_sa, + .xdo_dev_offload_ok = zxdh_ipsec_offload_ok, + //.xdo_dev_state_advance_esn = zxdh_ipsec_state_advance_esn, + //.xdo_dev_state_update_curlft = zxdh_ipsec_state_update_curlft, + //.xdo_dev_policy_add = zxdh_ipsec_policy_add, + //.xdo_dev_policy_free = zxdh_ipsec_policy_free, +}; +#endif diff --git a/drivers/net/ethernet/dinghai/en_aux/drs_sec_dtb.h b/drivers/net/ethernet/dinghai/en_aux/drs_sec_dtb.h new file mode 100644 index 000000000000..801d263d1555 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_aux/drs_sec_dtb.h @@ -0,0 +1,534 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : drs_sec.dtb.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2024/01/29 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ +#ifndef DRS_SEC_DTB_H +#define DRS_SEC_DTB_H +/*同步pub宏定义*/ +typedef void VOID; +typedef float FLOAT; +typedef double DOUBLE; + +typedef signed char INT8; +typedef unsigned char UINT8; + +typedef char CHAR; + +typedef signed short INT16; +typedef unsigned short UINT16; + +typedef signed int INT32; +typedef unsigned int UINT32; + +typedef signed long long INT64; +typedef unsigned long long UINT64; + +#define BITWIDTH1 ((UINT32)0x00000001) +#define BITWIDTH2 ((UINT32)0x00000003) +#define BITWIDTH3 ((UINT32)0x00000007) +#define BITWIDTH4 ((UINT32)0x0000000f) +#define BITWIDTH5 ((UINT32)0x0000001f) +#define BITWIDTH6 ((UINT32)0x0000003f) +#define BITWIDTH7 ((UINT32)0x0000007f) +#define BITWIDTH8 ((UINT32)0x000000ff) +#define BITWIDTH9 ((UINT32)0x000001ff) +#define BITWIDTH10 ((UINT32)0x000003ff) +#define BITWIDTH11 ((UINT32)0x000007ff) +#define BITWIDTH12 ((UINT32)0x00000fff) +#define BITWIDTH13 ((UINT32)0x00001fff) +#define BITWIDTH14 ((UINT32)0x00003fff) +#define BITWIDTH15 ((UINT32)0x00007fff) +#define BITWIDTH16 ((UINT32)0x0000ffff) +#define BITWIDTH17 ((UINT32)0x0001ffff) +#define BITWIDTH18 ((UINT32)0x0003ffff) +#define BITWIDTH19 ((UINT32)0x0007ffff) +#define BITWIDTH20 ((UINT32)0x000fffff) +#define BITWIDTH21 ((UINT32)0x001fffff) +#define BITWIDTH22 ((UINT32)0x003fffff) +#define BITWIDTH23 ((UINT32)0x007fffff) +#define BITWIDTH24 ((UINT32)0x00ffffff) +#define BITWIDTH25 ((UINT32)0x01ffffff) +#define BITWIDTH26 ((UINT32)0x03ffffff) +#define BITWIDTH27 ((UINT32)0x07ffffff) +#define BITWIDTH28 ((UINT32)0x0fffffff) +#define BITWIDTH29 ((UINT32)0x1fffffff) +#define BITWIDTH30 ((UINT32)0x3fffffff) +#define BITWIDTH31 ((UINT32)0x7fffffff) +#define BITWIDTH32 ((UINT32)0xffffffff) + +#define PUB_OK (0) +#define PUB_ERROR (0xffffffff) /*直接定义为0xffffffff*/ + +#define BTTL_PRINTF(fmt, arg...) DH_LOG_INFO(MODULE_SEC, fmt, ##arg) +#define BTTL_PUB_PRINT_ERROR(fmt, arg...) DH_LOG_ERR(MODULE_SEC, fmt, ##arg) + +/* 寄存器单bit位操作 */ + +/** 某bit置位,其它bit不变 */ +#define PUB_BIT_SET(reg, bit) ((reg) = ((reg) | (1u << (bit)))) + +/** 某bit清零,其它bit不变 */ +#define PUB_BIT_CLEAR(reg, bit) ((reg) = ((reg) & (~(1u << (bit))))) + +/** 获取某bit的值 (0/1) */ +#define PUB_GET_BIT_VAL(reg, bit) (((reg) >> (bit)) & 1u) + +/** 判断某bit的值是否为1 */ +#define PUB_IS_BIT_SET(reg, pos) (((reg) & (1u << (pos))) != 0x0u) + +/** 判断某bit的值是否为0 */ +#define PUB_IS_BIT_CLEAR(reg, pos) (((reg) & (1u << (pos))) == 0x0u) + +/** 某bit位填写值val,其他bit不变 */ +#define PUB_BIT_INSR(reg, bit, val) \ + ((reg) = (((reg) & (~(1u << (bit)))) | (((val)&1u) << (bit)))) + +#define PUB_BIT_FIELD_MASK_GET64(bitoff, bitfieldlen) \ + ((((UINT64)0x01 << (bitfieldlen)) - 1) << (bitoff)) + +#define PUB_BIT_FIELD_GET64(val, bitoff, bitfieldlen) \ + ((val)&PUB_BIT_FIELD_MASK_GET64(bitoff, bitfieldlen)) + +#define PUB_BIT_FIELD_SET64(var, val, bitoff, bitlen) \ + ((var) = (((var) & (~PUB_BIT_FIELD_MASK_GET64(bitoff, bitlen))) | \ + (((UINT64)val) << (bitoff)))) + +#define PUB_BIT_FIELD_RIGHT_JUST_GET64(val, bitoff, bitfieldlen) \ + (((val) >> (bitoff)) & (((UINT64)0x01 << (bitfieldlen)) - 1)) + +/** 检查空指针,返回错误 */ +#define PUB_CHECK_NULL_PTR_RET_ERR(ptr) \ + do { \ + if (NULL == ptr) { \ + DH_LOG_INFO(MODULE_SEC, \ + "Null Ptr Err! Fuc:%s,Line:%d,File:%s\n", \ + __FUNCTION__, __LINE__, __FILE__); \ + return PUB_ERROR; \ + } \ + } while (0) + +/** 检查空指针,返回VOID */ +#define PUB_CHECK_NULL_PTR_RET_VOID(ptr) \ + do { \ + if (NULL == ptr) { \ + DH_LOG_INFO(MODULE_SEC, \ + "Null Ptr Err! Fuc:%s,Line:%d,File:%s\n", \ + __FUNCTION__, __LINE__, __FILE__); \ + return; \ + } \ + } while (0) + +#define PUB_CHECK_RET_VAL_RV(expr) \ + do { \ + UINT32 _ret = (expr); \ + if (PUB_OK != _ret) { \ + DH_LOG_INFO(MODULE_SEC, "%s Error,Line:%d,Ret:0x%x\n", \ + __FUNCTION__, __LINE__, _ret); \ + return _ret; \ + } \ + } while (0) + +#define BTTL_PUB_ID_CHECK(id, cmpid) \ + do { \ + if (cmpid <= (id)) { \ + DH_LOG_INFO( \ + MODULE_SEC, \ + " ID %d <= %d check Err! Fuc:%s,Line:%d,File:%s\n", \ + id, cmpid, __FUNCTION__, __LINE__, __FILE__); \ + return 1; \ + } \ + } while (0) + +#define BTTL_PUB_0_CHECK(value) \ + do { \ + if (0 == (value)) { \ + DH_LOG_INFO( \ + MODULE_SEC, \ + " value %x 0 check Err! Fuc:%s,Line:%d,File:%s\n", \ + value, __FUNCTION__, __LINE__, __FILE__); \ + return E_INVALID_VALUE; \ + } \ + } while (0) + +/* 大小端操作 */ +/** 16位数据大小端转换 */ +#define PUB_SWAP16(x) ((UINT16)((((x) >> 8) & 0xffu) | (((x)&0xffu) << 8))) +/** 32位数据大小端转换 */ +#define PUB_SWAP32(x) \ + ((UINT32)((((UINT32)(x) & (UINT32)0x000000ffUL) << 24) | \ + (((UINT32)(x) & (UINT32)0x0000ff00UL) << 8) | \ + (((UINT32)(x) & (UINT32)0x00ff0000UL) >> 8) | \ + (((UINT32)(x) & (UINT32)0xff000000UL) >> 24))) +/** 64位数据大小端转换 */ +#define PUB_SWAP64(x) \ + ((UINT64)((((UINT64)(x) & (UINT64)0x00000000000000ffUL) << 56) | \ + (((UINT64)(x) & (UINT64)0x000000000000ff00UL) << 40) | \ + (((UINT64)(x) & (UINT64)0x0000000000ff0000UL) << 24) | \ + (((UINT64)(x) & (UINT64)0x00000000ff000000UL) << 8) | \ + (((UINT64)(x) & (UINT64)0x000000ff00000000UL) >> 8) | \ + (((UINT64)(x) & (UINT64)0x0000ff0000000000UL) >> 24) | \ + (((UINT64)(x) & (UINT64)0x00ff000000000000UL) >> 40) | \ + (((UINT64)(x) & (UINT64)0xff00000000000000UL) >> 56))) + +/* 已知数据的大小端,转换为网络序 */ +#define PUB_LE_TO_NET16(x) PUB_SWAP16(x) /**< 将小端数据转换为网络序 */ +#define PUB_LE_TO_NET32(x) PUB_SWAP32(x) /**< 将小端数据转换为网络序 */ +#define PUB_LE_TO_NET64(x) PUB_SWAP64(x) /**< 将小端数据转换为网络序 */ +#define PUB_DE_TO_NET16(x) (x) /**< 将大端数据转换为网络序 */ +#define PUB_DE_TO_NET32(x) (x) /**< 将大端数据转换为网络序 */ +#define PUB_DE_TO_NET64(x) (x) /**< 将大端数据转换为网络序 */ + +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + +#define PUB_LE_TO_HOST16(x) PUB_SWAP16(x) /**< 小端16位数据转换为主机序 */ +#define PUB_LE_TO_HOST32(x) PUB_SWAP32(x) /**< 小端32位数据转换为主机序 */ +#define PUB_LE_TO_HOST64(x) PUB_SWAP64(x) /**< 小端64位数据转换为主机序 */ +#define PUB_DE_TO_HOST16(x) (x) /**< 大端16位数据转换为主机序 */ +#define PUB_DE_TO_HOST32(x) (x) /**< 大端32位数据转换为主机序 */ +#define PUB_DE_TO_HOST64(x) (x) /**< 大端64位数据转换为主机序 */ +#define PUB_HTON16(x) (x) /**< 16位数据主机序转换为网络序 */ +#define PUB_HTON32(x) (x) /**< 32位数据主机序转换为网络序 */ +#define PUB_HTON64(x) (x) /**< 64位数据主机序转换为网络序 */ +#define PUB_NTOH16(x) (x) /**< 16位数据网络序转换为主机序 */ +#define PUB_NTOH32(x) (x) /**< 32位数据网络序转换为主机序 */ +#define PUB_NTOH64(x) (x) /**< 64位数据网络序转换为主机序 */ + +#else +#define PUB_LE_TO_HOST16(x) (x) +#define PUB_LE_TO_HOST32(x) (x) +#define PUB_LE_TO_HOST64(x) (x) +#define PUB_DE_TO_HOST16(x) PUB_SWAP16(x) +#define PUB_DE_TO_HOST32(x) PUB_SWAP32(x) +#define PUB_DE_TO_HOST64(x) PUB_SWAP64(x) +#define PUB_HTON16(x) PUB_SWAP16(x) +#define PUB_HTON32(x) PUB_SWAP32(x) +#define PUB_HTON64(x) PUB_SWAP64(x) +#define PUB_NTOH16(x) PUB_SWAP16(x) +#define PUB_NTOH32(x) PUB_SWAP32(x) +#define PUB_NTOH64(x) PUB_SWAP64(x) + +#endif + +#if 0 +/*因为SEC下表和NP下表硬件基本一样,这里同步NP关于EPID等定义*/ +#define VF_ACTIVE(VPORT) ((VPORT & 0x0800) >> 11) +#define EPID(VPORT) ((VPORT & 0x7000) >> 12) +#define FUNC_NUM(VPORT) ((VPORT & 0x0700) >> 8) +#define VFUNC_NUM(VPORT) ((VPORT & 0x00FF)) + +#define PF_VQM_VFID_OFFSET (1152) +#define IS_PF(VPORT) (!VF_ACTIVE(VPORT)) +#define VQM_VFID(VPORT) \ + (IS_PF(VPORT) ? \ + (PF_VQM_VFID_OFFSET + EPID(VPORT) * 8 + FUNC_NUM(VPORT)) : \ + (EPID(VPORT) * 256 + VFUNC_NUM(VPORT))) + +#define OWNER_PF_VQM_VFID(VPORT) \ + (PF_VQM_VFID_OFFSET + EPID(VPORT) * 8 + FUNC_NUM(VPORT)) +#define OWNER_PF_VPORT(VPORT) (((EPID(VPORT)) << 12) | ((FUNC_NUM(VPORT)) << 8)) + +#define VQM_VFID_MAX_NUM (2048) +#endif + +/*vport格式 +15 |14 13 12 | 11 |10 9 8|7 6 5 4 3 2 1 0| +rsv| ep_id |func_active|func_num| vfunc_num | +*/ +#define VPORT_EPID_BT_START (12) /*EPID起始位*/ +#define VPORT_EPID_BT_LEN (3) /*EPID长度*/ +#define VPORT_FUNC_ACTIVE_BT_START (11) /*FUNC_ACTIVE起始位*/ +#define VPORT_FUNC_ACTIVE_BT_LEN (1) /*FUNC_ACTIVE长度*/ +#define VPORT_FUNC_NUM_BT_START (8) /*FUNC_NUM起始位*/ +#define VPORT_FUNC_NUM_BT_LEN (3) /*FUNC_NUM长度*/ +#define VPORT_VFUNC_NUM_BT_START (0) /*FUNC_NUM起始位*/ +#define VPORT_VFUNC_NUM_BT_LEN (8) /*FUNC_NUM长度*/ + +/** +* @name 通用寄存器操作宏 +* @brief 读寄存器宏定义 +* @{ +*/ +#define PUB_READ_REG8(addr) (*(volatile UINT8 *)(addr)) /**< 读8位寄存器 */ +#define PUB_READ_REG16(addr) (*(volatile UINT16 *)(addr)) /**< 读16位寄存器 */ +#define PUB_READ_REG32(addr) (*(volatile UINT32 *)(addr)) /**< 读32位寄存器 */ +/** @} 通用寄存器操作宏 */ + +/** +* @name 通用寄存器操作宏 +* @brief 写寄存器宏定义 +* @{ +*/ +#define PUB_WRITE_REG8(addr, val_8) \ + (*(volatile UINT8 *)(addr) = val_8) /**< 写8位寄存器 */ +#define PUB_WRITE_REG16(addr, val_16) \ + (*(volatile UINT16 *)(addr) = val_16) /**< 写16位寄存器 */ +#define PUB_WRITE_REG32(addr, val_32) \ + (*(volatile UINT32 *)(addr) = val_32) /**< 写32位寄存器 */ +/** @} 通用寄存器操作宏 */ + +/*寄存器偏移定义*/ +#define REG_SEC_IDX_OFFSET (0x800000) /* SEC内部基地址偏移 */ + +#define REG_SEC_TOP_DTB_OFFSET (0) /*host驱动 这里为0,因为就是从dtb开始映射的*/ +/* CFG_QUEUE_DTB_ADDR_H_0_127 虚机队列入队的高地址寄存器 n=0~127 */ +#define REG_SEC_CFG_QUEUE_DTB_ADDR_H_0_127(n) \ + (REG_SEC_TOP_DTB_OFFSET + 0x0000 + n * 32) + +/* CFG_QUEUE_DTB_ADDR_L_0_127 虚机队列入队的低地址寄存器 n=0~127*/ +#define REG_SEC_CFG_QUEUE_DTB_ADDR_L_0_127(n) \ + (REG_SEC_TOP_DTB_OFFSET + 0x0004 + n * 32) + +/* CFG_QUEUE_DTB_LEN_0_127 虚机队列入队的长度寄存器 n=0~127*/ +#define REG_SEC_CFG_QUEUE_DTB_LEN_0_127(n) \ + (REG_SEC_TOP_DTB_OFFSET + 0x0008 + n * 32) + +/* INFO_QUEUE_BUF_SPACE_LEFT_0_127 靠靠靠靠靠靠?n=0~127*/ +#define REG_SEC_INFO_QUEUE_BUF_SPACE_LEFT_0_127(n) \ + (REG_SEC_TOP_DTB_OFFSET + 0x000C + n * 32) + +/* CFG_EPID_V_FUNC_NUM_0_127 SOC虚机信息配置寄存器 n=0~127*/ +#define REG_SEC_CFG_EPID_V_FUNC_NUM_0_127(n) \ + (REG_SEC_TOP_DTB_OFFSET + 0x0010 + n * 32) + +/* DTB_QUEUE_LOCK_STATE_0_3 队列锁状态寄存器,4个寄存器共128bit,对应队列0~127 n=0~3 */ +#define REG_SEC_DTB_QUEUE_LOCK_STATE_0_3(n) \ + (REG_SEC_TOP_DTB_OFFSET + 0x4080 + n * 4) + +typedef enum { + e_SEC_IPSEC_TRANSPORT_MODE = 0, /*传输模式*/ + e_SEC_IPSEC_TUNNEL_MODE, /*隧道模式*/ + e_SEC_IPSEC_MODE_LAST, +} E_CMDK_SEC_IPSEC_MODE; + +typedef enum { + e_SEC_SA_DF_BYPASS_MODE = 0, /*00 bypass DF bit*/ + e_SEC_SA_DF_CLEAR_MODE, /*01 clear*/ + e_SEC_SA_DF_SET_MODE, /*10 set*/ + e_SEC_SA_DF_COPY_MODE, /*11 copy*/ + e_SEC_SA_DF_MODE_LAST, +} E_CMDK_SEC_SA_DF_MODE; + +typedef enum { + E_DTB_SA_CMD_FLOW_DOWN = 0, + E_DTB_SA_CMD_DUMP, + E_DTB_SA_CMD_LAST, +} E_CMDK_DTB_SA_CMD_TYPE; + +typedef enum { + E_SATYPE_IN = 1, + E_SATYPE_OUT, + E_SATYPE_IN_AND_OUT = 3, +} E_SA_TYPE; + +typedef enum { + E_INLINE_IN, + E_INLINE_OUT, + E_INLINE_IN_AND_OUT, +} E_INLINE_TYPE; + +typedef enum { + e_SEC_ENCRYP_AH_MODE = 0, /*000 AH认证*/ + e_SEC_ENCRYP_ESP_AUTH_MODE, /*001 ESP完整性*/ + e_SEC_ENCRYP_ESP_ENCRYP_MODE, /*010 ESP加密*/ + e_SEC_ENCRYP_ESP_AUTH_AND_ESP_ENCRYP_MODE, /*011 ESP加密+ESP完整*/ + e_SEC_ENCRYP_ESP_COMBINED_MODE, /*100 ESP组合模式*/ + e_SEC_ENCRYP_MODE_LAST, +} E_CMDK_SEC_ENCRYP_MODE; + +typedef enum { + e_SEC_SA_LIVETIME_NONE_TYPE = 0, /*00 none*/ + e_SEC_SA_LIVETIME_TIME_TYPE, /*01 生存时间*/ + e_SEC_SA_LIVETIME_BYTE_TYPE, /*10 byte数*/ + e_SEC_SA_LIVETIME_PKT_TYPE, /*11 pkt数(预留,目前不支持)*/ + e_SEC_SA_LIVETIME_TYPE_LAST, +} E_CMDK_LIVETIME_TYPES; + +#pragma pack(1) +typedef struct IPV4_HEAD { + UINT8 ip_headlen_version; + UINT8 ip_tos; + UINT16 usTotallen; + + UINT16 usIdentify; + UINT16 ip_fragoff; + + UINT8 uclive_time; + UINT8 ucProtocal; + UINT16 usHeadChecksum; + + UINT32 udSrcIpAddr; + UINT32 udDstIpAddr; +} T_IPV4_HEAD; +#pragma pack() + +typedef struct { + UINT32 DtbAddrH; /*地址的高32位*/ + UINT32 DtbAddrL; /*地址的低32位,两个地址组成64位然后左移4位得到68位的真实地址*/ + UINT32 DtbCmd; /*研规上的DTB_LEN字段 */ +} T_QUEUE_DTB_REG; + +//SA下表模块使用的结构体 +typedef struct { + UINT32 udSPI; + UINT32 udSaId; + UINT16 usSaParam; + UINT8 ucCiperID; + UINT8 ucAuthID; + UINT8 ucCipherkeyLen; + UINT8 ucAuthkeyLen; + UINT16 usFrag_State; + + UINT32 udESN; + UINT32 udSN; + UINT64 uddProcessedByteCnt; + + UINT32 udSalt; + UINT32 udLifetimeSecMax; + UINT64 uddLifetimByteCntMax; + + UINT8 ucProtocol; + UINT8 ucTOS; + UINT8 ucEsnFlag; + UINT8 ucIpType; + UINT32 udRSV0; + UINT32 udRSV1; + UINT32 udRSV2; + + UINT32 udSrcAddress0; + UINT32 udSrcAddress1; + UINT32 udSrcAddress2; + UINT32 udSrcAddress3; + + UINT32 udDstAddress0; + UINT32 udDstAddress1; + UINT32 udDstAddress2; + UINT32 udDstAddress3; + + UINT8 aucSaCipherKey[32]; + UINT8 aucSaAuthKey[128]; +} __attribute__((packed)) T_HAL_SA_DTB_HW_OUT; + +typedef struct { + UINT32 udSrcAddress0; + UINT32 udSrcAddress1; + UINT32 udSrcAddress2; + UINT32 udSrcAddress3; + + UINT32 udDstAddress0; + UINT32 udDstAddress1; + UINT32 udDstAddress2; + UINT32 udDstAddress3; + + UINT32 udSPI; + UINT32 udSaId; + UINT16 usSaParam; + UINT8 ucCiperID; + UINT8 ucAuthID; + UINT8 ucCipherkeyLen; + UINT8 ucAuthkeyLen; + UINT16 usFrag_State; + + UINT32 udSalt; + UINT32 udLifetimeSecMax; + UINT64 uddLifetimByteCntMax; + + UINT8 ucProtocol; + UINT8 ucTOS; + UINT8 ucEsnFlag; + UINT8 ucIpType; + UINT16 usOutSaOffset; + UINT16 udRSV0; + UINT32 udOutSaId; + UINT32 udRSV1; + + UINT8 aucBitmap[256]; + + UINT32 udAntiWindowHigh; + UINT32 udAntiWindowLow; + UINT64 uddProcessedByteCnt; + + UINT8 aucSaCipherKey[32]; + UINT8 aucSaAuthKey[128]; +} __attribute__((packed)) T_HAL_SA_DTB_HW_IN; + +typedef enum { + e_HAL_IPSEC_CIPHER_NULL = 0x00, + e_HAL_IPSEC_CIPHER_AES_CTR = 0x11, + e_HAL_IPSEC_CIPHER_AES_CBC = 0x12, + e_HAL_IPSEC_CIPHER_AES_ECB = 0x13, + e_HAL_IPSEC_CIPHER_AES_GCM = 0x14, + e_HAL_IPSEC_CIPHER_AES_CCM = 0x15, + e_HAL_IPSEC_CIPHER_AES_GMAC = 0x16, + /* 新增SM4算法 */ + e_HAL_IPSEC_CIPHER_SM4_CTR = 0x17, + e_HAL_IPSEC_CIPHER_SM4_CBC = 0x18, + e_HAL_IPSEC_CIPHER_SM4_ECB = 0x19, + /* 新增XTS算法 */ + e_HAL_IPSEC_CIPHER_AES_XTS = 0x1a, + e_HAL_IPSEC_CIPHER_SM4_XTS = 0x1b, + + e_HAL_IPSEC_CIPHER_DES_CBC = 0x31, + e_HAL_IPSEC_CIPHER_3DES_CBC = 0x32, + e_HAL_IPSEC_CIPHER_CHACHA = 0x50, +} E_HAL_SEC_IPSEC_CIPHER_ALG; + +typedef enum { + e_HAL_IPSEC_AUTH_NULL = 0x00, + + /* 新增 */ + e_HAL_IPSEC_AUTH_AES_GMAC = 0x16, /* 1 */ + e_HAL_IPSEC_AUTH_SM4_GMAC = 0x1e, + + e_HAL_IPSEC_AUTH_AES_CMAC32 = 0x22, /* 3 */ + e_HAL_IPSEC_AUTH_AES_CMAC96 = 0x23, + e_HAL_IPSEC_AUTH_AES_XCBCMAC = 0x21, + e_HAL_IPSEC_AUTH_AES_SHA1 = 0x41, /* 6 */ + e_HAL_IPSEC_AUTH_AES_SHA224 = 0x42, + e_HAL_IPSEC_AUTH_AES_SHA256 = 0x44, + e_HAL_IPSEC_AUTH_AES_SHA384 = 0x45, + e_HAL_IPSEC_AUTH_AES_SHA512 = 0x46, + e_HAL_IPSEC_AUTH_AES_MD5 = 0x43, + e_HAL_IPSEC_AUTH_SM3 = 0x47, +} E_HAL_SEC_IPSEC_AUTH_ALG; + +typedef struct { + char alg_name[64]; + char compat_name[64]; + E_HAL_SEC_IPSEC_CIPHER_ALG e_zxdh_ealgo_id; +} T_ZXDH_EALGO; + +typedef struct { + char alg_name[64]; + char compat_name[64]; + E_HAL_SEC_IPSEC_AUTH_ALG e_zxdh_auth_id; +} T_ZXDH_ALGO; + +void BttlPubDump(unsigned char *ucBuf, UINT32 udLen); +UINT32 CmdkBttlTestSecDtbSaAdd(struct zxdh_en_device *en_dev, + E_CMDK_DTB_SA_CMD_TYPE eDtbSaCmdType, + E_SA_TYPE eSaType, UINT64 uddSaVirAddr, + UINT32 udDtbSaIsIntEn, UINT32 udDtbLen, + UINT32 udQueIndex); +void zxdh_ipsec_del_sa(struct xfrm_state *xs); +bool zxdh_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs); +void zxdh_ipsec_state_advance_esn(struct xfrm_state *x); +void zxdh_ipsec_state_update_curlft(struct xfrm_state *x); +int zxdh_ipsec_policy_add(struct xfrm_policy *x); +void zxdh_ipsec_policy_delete(struct xfrm_policy *x); +void zxdh_ipsec_policy_free(struct xfrm_policy *x); + +#endif diff --git a/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc.c b/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc.c new file mode 100644 index 000000000000..698f8a808e5e --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc.c @@ -0,0 +1,617 @@ +/***************************************************************************** +(C) 2023 ZTE Corporation. 版权所有. + +文件名 : en_1588_pkt_proc.c +内容摘要 : 提供PTP数据包处理相关接口 +作者/日期 : Limin / 2023.10.12 +版本 : 1.0 +*****************************************************************************/ + +#include "en_1588_pkt_proc.h" +#include "en_aux_ioctl.h" +#include "queue.h" +#include "../en_ethtool/ethtool.h" + +#define PTP_MESSAGE_HRD_LEN 34 +#define IPV6_HDR_LEN 40 +#define IPV6_PROT_OFFSET 6 +#define UDP_DEST_PORT_OFFSET 2 +#define VLAN_TPID 0x8100 +#define UDP_PRORT_EVENT_1588 319 +#define UDP_PRORT_GENERAL_1588 320 + +/* pi头中pkt_type字段值 */ +#define PTP_EVENT_TYPE_NOSECURITY 2 +#define PTP_EVENT_TYPE_SECURITY 3 +#define PTP_GENERAL_TYPE 0 +#define PTP_TYPE_OFFSET 4 +/* 下行层四1588微码是否需要查询ipsec表 */ +#define PTP_L4_NEED_QUERY_IPSEC_TABLE 1 +#define PTP_TYPE_L4_SECURITY_OFFSET 3 + +/* L3报文类型 */ +#define ETH_TYPE_PTP 0x88f7 +#define ETH_TYPE_IPV4 0x0800 +#define ETH_TYPE_IPV6 0x86dd + +/* L4报文类型 */ +#define ETH_TYPE_UDP 0x11 +#define ETH_TYPE_TCP 0x06 + +#define UDP_HDR_LEN 0x08 +#define TCP_HDR_LEN 0x14 + +/* 报文中关键字段的长度 */ +#define ETHER_TYPE_LEN 2 +#define ETHER_MAC_LEN 6 +#define L2_PKT_HDR_LEN ((2 * ETHER_MAC_LEN) + ETHER_TYPE_LEN) + +#define IP_PROT_OFFSET 9 /* IP头中protocol字段的偏移 */ + +#define PTP_MSG_ERROR_TYPE 0xff +#define PTPHDR_CF_OFFSET 8 + +#define VLAN_LEN 4 +#ifdef PTP_DRIVER_INTERFACE_EN +extern int get_hw_timestamp(struct zxdh_en_device *en_dev, u32 *hwts); +#endif +/* PTP报文类型和处理函数对应关系结构体 */ +typedef struct { + uint8_t type; + int32_t (*proc_func)(struct sk_buff *skb, struct zxdh_1588_pd_tx *hdr, + uint8_t *ptpHdr, struct time_stamps *t5g, + struct time_stamps *tsn, uint32_t *thw, + struct zxdh_en_device *en_dev); +} MsgProc_t; + +typedef struct { + uint8_t type; + int32_t (*proc_func)(struct zxdh_1588_pd_rx *hdr, uint8_t *ptpHdr, + struct time_stamps *t5g, struct time_stamps *tsn, + uint32_t *thw, + struct skb_shared_info *ptSkbSharedInfo, + struct zxdh_en_device *en_dev); +} MsgRcv_t; + +/* PTP报文类型和处理函数对应关系表 */ +MsgProc_t g_MsgProcTable[] = { + { PTP_MSG_TYPE_SYNC, pkt_proc_type_sync }, + { PTP_MSG_TYPE_DELAY_REQ, pkt_proc_type_delay_req }, + { PTP_MSG_TYPE_PDELAY_REQ, pkt_proc_type_pdelay_req }, + { PTP_MSG_TYPE_PDELAY_RESP, pkt_proc_type_pdelay_resp }, + + { PTP_MSG_TYPE_FOLLOW_UP, pkt_proc_type_follow_up }, + { PTP_MSG_TYPE_DELAY_RESP, pkt_proc_type_delay_resp }, + { PTP_MSG_TYPE_PDELAY_RESP_FOLLOW_UP, + pkt_proc_type_pdelay_resp_follow_up }, + { PTP_MSG_TYPE_ANNOUNCE, pkt_proc_type_announce }, + { PTP_MSG_TYPE_SIGNALING, pkt_proc_type_signaling }, + { PTP_MSG_TYPE_MANAGEMENT, pkt_proc_type_management }, + + { PTP_MSG_ERROR_TYPE, NULL } +}; + +MsgRcv_t g_MsgRcvTable[] = { { PTP_MSG_TYPE_SYNC, pkt_rcv_type_event }, + { PTP_MSG_TYPE_DELAY_REQ, pkt_rcv_type_event }, + { PTP_MSG_TYPE_PDELAY_REQ, pkt_rcv_type_event }, + { PTP_MSG_TYPE_PDELAY_RESP, pkt_rcv_type_event }, + + { PTP_MSG_TYPE_DELAY_RESP, + pkt_rcv_type_delay_resp }, + + { PTP_MSG_ERROR_TYPE, NULL } }; + +/* 判断是否为事件报文 */ +bool is_event_message(const uint8_t msg_type) +{ + if (msg_type <= PTP_MSG_TYPE_PDELAY_RESP) { + return true; + } + return false; +} + +/* 判断是否为普通报文 */ +bool is_general_message(const uint8_t msg_type) +{ + if ((PTP_MSG_TYPE_FOLLOW_UP <= msg_type) && + (msg_type <= PTP_MSG_TYPE_MANAGEMENT)) { + return true; + } + return false; +} + +// 处理扩展首部通用逻辑,返回是否成功处理该扩展首部 +int process_extension_header(const uint8_t **ptr, uint16_t *remaining_len, + uint8_t header_type) +{ + uint16_t ext_len = 0; + if (*remaining_len == 0) { + DEBUG_1588( + "Remaining length is 0, can't process extension header.\n"); + return 0; // 剩余长度为0,无法处理任何扩展首部了 + } + + // 获取扩展首部长度字段(不同扩展首部长度计算方式类似,此处统一处理基础逻辑) + ext_len = (uint16_t)((*(*ptr)) + 1) * 8; + DEBUG_1588("Extension header type: %d, declared length: %hu bytes\n", + header_type, ext_len); + if (ext_len > (*remaining_len + 1)) { + DEBUG_1588( + "Extension header length exceeds remaining length, can't process completely.\n"); + return 0; // 扩展首部声明的长度超过剩余可处理长度,无法完整处理 + } + + // 移动指针并更新剩余长度 + *ptr = *ptr + (ext_len - 1); + *remaining_len = *remaining_len - (ext_len - 1); + DEBUG_1588( + "Successfully processed extension header, new pointer position and remaining length updated.\n"); + return 1; // 成功处理该扩展首部 +} + +int is_fragmented_ipv6(struct ipv6hdr *ipv6h) +{ + uint8_t next_header_type = 0; + const uint8_t *ptr = NULL; + uint16_t remaining_len = 0; + uint16_t payload_len = ntohs(ipv6h->payload_len); + + next_header_type = ipv6h->nexthdr; + if (next_header_type == 44) { + DEBUG_1588( + "Encountered fragmentation-related next header in IPv6 header, packet is fragmented.\n"); + return 1; // 如果IPv6报文头的下一报头就是表示分片相关的类型,直接返回1表示是分片 + } + + ptr = (const uint8_t *)ipv6h + sizeof(struct ipv6hdr); + remaining_len = payload_len; + + while (remaining_len > 0) { + next_header_type = *ptr; + ptr++; + remaining_len--; + DEBUG_1588( + "Starting to process extension headers, initial remaining length: %hu bytes\n", + remaining_len); + DEBUG_1588("Processing extension header of type: %d\n", + next_header_type); + + switch (next_header_type) { + case 0: // 逐跳选项首部 + case 60: // 目的选项首部 + case 43: // 路由首部 + if (!process_extension_header(&ptr, &remaining_len, + next_header_type)) { + return 1; // 若无法完整处理扩展首部,直接返回1表示是分片,1588处理流程不处理 + } + break; + case 44: + DEBUG_1588( + "Encountered fragmentation-related extension header, packet is fragmented.\n"); + return 1; // 遇到特定表示分片的扩展首部类型,返回1表示是分片 + default: + return 0; // 其他未知扩展首部类型,暂认为不是分片 + } + } + + return 0; // 遍历完所有扩展首部后没发现分片相关标识,认为不是分片 +} + +// 函数用于判断是否是分片报文,返回1表示是分片报文,0表示不是 +bool is_fragmented_ipv4(struct iphdr *ipv4h) +{ + uint16_t mf_flag = 0; + uint16_t flags_fragment_offset = 0; + + flags_fragment_offset = ntohs(ipv4h->frag_off); + + DEBUG_1588("frag_off:%hu\n", flags_fragment_offset); + // 获取标志位中的"更多分片(MF)"位,通过与运算提取出对应位的值 + mf_flag = (flags_fragment_offset >> 13) & 0x01; + DEBUG_1588("mf_flag:%hu\n", mf_flag); + if (mf_flag != 0) { + return true; + } + + // MF位为0,再看分片偏移量是否为0,若不为0也是分片(最后一个分片) + flags_fragment_offset = flags_fragment_offset & 0x1FFF; + DEBUG_1588("flags_fragment_offset:%hu\n", flags_fragment_offset); + if (flags_fragment_offset != 0) { + return true; + } + return false; +} + +/* p得到PTP报文头位置 */ +int32_t get_hdr_point(uint8_t *pData, uint8_t *piTs0ffset, uint8_t **ptpHdr) +{ + uint16_t udp_dest_port_ptp = 0; + uint16_t offset = 0; + uint16_t temp_len = 0; + uint16_t eth_type_lay3 = ntohs(*( + (uint16_t *)(pData + (2 * ETHER_MAC_LEN)))); /* get Eth Type */ + uint8_t eth_type_lay4 = 0; + uint8_t eth_type_lay4_ipv6 = 0; + uint16_t eth_type_vlan_lay3 = + ntohs(*((uint16_t *)(pData + (2 * ETHER_MAC_LEN) + VLAN_LEN))); + struct iphdr *ipv4h = NULL; + struct ipv6hdr *ipv6h = NULL; + + /* 计算PTP头的偏移 */ + offset = L2_PKT_HDR_LEN; + + if ((VLAN_TPID == eth_type_lay3) && + (VLAN_TPID != eth_type_vlan_lay3)) { /* 单vlan偏移 */ + offset += VLAN_LEN; + } else if ((VLAN_TPID == eth_type_lay3) && + (VLAN_TPID == eth_type_vlan_lay3)) { /* 双vlan偏移 */ + offset += (VLAN_LEN * 2); + } + + eth_type_lay3 = ntohs(*((uint16_t *)(pData + offset - ETHER_TYPE_LEN))); + eth_type_lay4 = *(pData + offset + IP_PROT_OFFSET); + + eth_type_lay4_ipv6 = *(pData + offset + IPV6_PROT_OFFSET); + + if ((ETH_TYPE_PTP != eth_type_lay3) && + (ETH_TYPE_IPV4 != eth_type_lay3) && + (ETH_TYPE_IPV6 != eth_type_lay3)) { + DEBUG_1588("unknown L3 eth type: %d\n", eth_type_lay3); + return IS_NOT_PTP_MSG; + } + + if (ETH_TYPE_IPV4 == eth_type_lay3) { + /* 判断ipv4报文是否分片 */ + ipv4h = (struct iphdr *)(pData + offset); + if (is_fragmented_ipv4(ipv4h)) { + DEBUG_1588("is fragmented ipv4!!\n"); + return IS_NOT_PTP_MSG; + } + + /* IP首部第一字节: 版本(4b)+首部长度(4b),这里取低4位,长度是以4字节为单位 */ + temp_len = *(pData + offset); + temp_len = (temp_len & 0x0f) * 4; + offset += temp_len; + + /* L4类型PTP只有UDP */ + if (ETH_TYPE_UDP == eth_type_lay4) { + udp_dest_port_ptp = + ntohs(*(uint16_t *)(pData + offset + + UDP_DEST_PORT_OFFSET)); + if ((udp_dest_port_ptp != UDP_PRORT_EVENT_1588) && + (udp_dest_port_ptp != UDP_PRORT_GENERAL_1588)) { + DEBUG_1588( + "UDP destination port(%hd) is not 319 or 320!!\n", + udp_dest_port_ptp); + return IS_NOT_PTP_MSG; + } + temp_len = UDP_HDR_LEN; + offset += temp_len; + } else { + DEBUG_1588("eth_type_lay4 = %hhu, is not UDP!!!!!\n", + eth_type_lay4); + return IS_NOT_PTP_MSG; + } + } else if (ETH_TYPE_IPV6 == eth_type_lay3) { + /* 判断ipv6报文是否分片 */ + ipv6h = (struct ipv6hdr *)(pData + offset); + if (is_fragmented_ipv6(ipv6h)) { + DEBUG_1588("is fragmented ipv6!!\n"); + return IS_NOT_PTP_MSG; + } + + temp_len = IPV6_HDR_LEN; + offset += temp_len; + + /* L4类型PTP只有UDP */ + if (ETH_TYPE_UDP == eth_type_lay4_ipv6) { + udp_dest_port_ptp = + ntohs(*(uint16_t *)(pData + offset + + UDP_DEST_PORT_OFFSET)); + if ((udp_dest_port_ptp != UDP_PRORT_EVENT_1588) && + (udp_dest_port_ptp != UDP_PRORT_GENERAL_1588)) { + DEBUG_1588( + "UDP destination port(%hd) is not 319 or 320!!\n", + udp_dest_port_ptp); + return IS_NOT_PTP_MSG; + } + temp_len = UDP_HDR_LEN; + offset += temp_len; + } else { + DEBUG_1588( + "eth_type_lay4_ipv6 = %hhu, is not UDP!!!!!!\n", + eth_type_lay4_ipv6); + return IS_NOT_PTP_MSG; + } + } + + *ptpHdr = pData + offset; + + /* 赋值pd头的ts_offset字段 */ + *piTs0ffset = offset; + + return PTP_SUCCESS; +} + +/* 从PTP报文头中解析出报文类型 */ +uint8_t get_msgtype_from_hrd(uint8_t *hrd, const uint8_t len) +{ + uint8_t msg_type = PTP_MSG_ERROR_TYPE; + + CHECK_UNEQUAL_ERR(len, PTP_MESSAGE_HRD_LEN, -EFAULT, "error len %d!", + len); + + msg_type = hrd[0] & 0x0f; + if (is_event_message(msg_type) || is_general_message(msg_type)) { + return msg_type; + } + + DEBUG_1588("error message type %d", msg_type); + return PTP_MSG_ERROR_TYPE; +} + +/* 调用PTP模块驱动接口,读取3个时间戳:两个80bit(T1,T2),一个32bit(T3) */ +#ifdef PTP_DRIVER_INTERFACE_EN +extern int get_pkt_timestamp(int32_t clock_no, struct zxdh_en_device *en_dev, + struct time_stamps *ts, u32 *hwts); +#endif /* PTP_DRIVER_INTERFACE_EN */ + +int32_t get_tstamps_from_ptp(int32_t clock_no, struct time_stamps *t5g, + struct time_stamps *tsn, uint32_t *thw, + struct zxdh_en_device *en_dev) +{ + uint32_t hwts = 0; + struct time_stamps ts[2] = { 0 }; + +#ifdef PTP_DRIVER_INTERFACE_EN + int32_t ret = 0; + ret = get_pkt_timestamp(clock_no, en_dev, ts, &hwts); //todo + if (unlikely(ret != 0)) { + LOG_ERR("netdev %s get tsn clock %d failed!, ret = %d", + en_dev->netdev->name, clock_no, ret); + return -1; + } +#endif /* PTP_DRIVER_INTERFACE_EN */ + + LOG_DEBUG("===GET-PTP===: hwts=%u", hwts); + LOG_DEBUG("===GET-PTP===: ts[0].s=%llu, ts[0].ns=%u", ts[0].s, + ts[0].ns); + LOG_DEBUG("===GET-PTP===: ts[1].s=%llu, ts[1].ns=%u", ts[1].s, + ts[1].ns); + + *t5g = ts[1]; + *tsn = ts[1]; + *thw = hwts; + + return 0; +} + +/* 发送流程中的报文时间戳处理 */ +int32_t pkt_1588_proc_xmit(struct sk_buff *skb, struct zxdh_1588_pd_tx *hdr, + int32_t clock_no, struct zxdh_en_device *en_dev, + uint8_t *ptpHdr) +{ + struct time_stamps ts_5g; /* 5G时间戳,有效值80bit */ + struct time_stamps ts_tsn; /* TSN时间戳,有效值80bit */ + uint32_t ts_thw = 0; /* 硬件当前时间戳,有效值32bit */ + uint8_t ret = 0; + uint8_t i = 0; + uint8_t cnt = 0; + uint8_t msg_type = 0xff; + struct ptpHdr_t *ptPtpHdr = NULL; + + memset(&ts_5g, 0, sizeof(struct time_stamps)); + memset(&ts_tsn, 0, sizeof(struct time_stamps)); + + CHECK_EQUAL_ERR(skb, NULL, -EADDRNOTAVAIL, "skb is NULL!\n"); + CHECK_EQUAL_ERR(hdr, NULL, -EADDRNOTAVAIL, "hdr is NULL!\n"); + + ptPtpHdr = (struct ptpHdr_t *)ptpHdr; + + /* 解析PTP报文类型 */ + msg_type = get_msgtype_from_hrd(ptpHdr, PTP_MESSAGE_HRD_LEN); + if (PTP_MSG_ERROR_TYPE == msg_type) { + DEBUG_1588("unknow PTP msg type!\n"); + return -EFAULT; + } + + LOG_DEBUG("pkt_1588_proc_xmit msg_type %d\n", msg_type); + /* 如果是事件报文,提取时间戳 */ + if (is_event_message(msg_type)) { + ret = get_tstamps_from_ptp(clock_no, &ts_5g, &ts_tsn, &ts_thw, + en_dev); + CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, + "get tstamps from ptp failed!\n"); + + /* ptp_type[2]的低bit2-4表示pkt_type,加密事件报文类型为2,非加密事件报文为3, */ + hdr->ptp_type[2] = + (hdr->ptp_type[2] & 0x8F) + + (PTP_EVENT_TYPE_NOSECURITY << PTP_TYPE_OFFSET); + if (0 != ((ptPtpHdr->flagField) & 0x0080)) { + hdr->ptp_type[2] = + (hdr->ptp_type[2] & 0x8F) + + (PTP_EVENT_TYPE_SECURITY << PTP_TYPE_OFFSET); + } + } else { + /* 普通报文类型为0 */ + hdr->ptp_type[2] = (hdr->ptp_type[2] & 0x8F) + + (PTP_GENERAL_TYPE << PTP_TYPE_OFFSET); + } + /* 层四1588报文,下行微码处理时,是否需要查ipsec表, 加密报文需要 */ + if (0 != ((ptPtpHdr->flagField) & 0x0080)) { + hdr->ptp_type[2] = (hdr->ptp_type[2] & 0xF7) + + (PTP_L4_NEED_QUERY_IPSEC_TABLE + << PTP_TYPE_L4_SECURITY_OFFSET); + } + /* 层二发送方向的出端口需要这里指示 */ + hdr->port = en_dev->phy_port; + + /* 根据不同报文类型做不同处理 */ + cnt = sizeof(g_MsgProcTable) / sizeof(MsgProc_t); + for (i = 0; i < cnt; i++) { + if (g_MsgProcTable[i].type == msg_type) { + if (likely(g_MsgProcTable[i].proc_func != NULL)) { + ret = g_MsgProcTable[i].proc_func( + skb, hdr, ptpHdr, &ts_5g, &ts_tsn, + &ts_thw, en_dev); + } + } + } + + return ret; +} + +/* 接收流程中的报文时间戳处理 */ +int32_t pkt_1588_proc_rcv(struct sk_buff *skb, struct zxdh_1588_pd_rx *hdr, + int32_t clock_no, struct zxdh_en_device *en_dev) +{ + struct time_stamps ts_5g; /* 5G时间戳,有效值80bit */ + struct time_stamps ts_tsn; /* TSN时间戳,有效值80bit */ + uint32_t ts_thw = 0; /* 硬件当前时间戳,有效值32bit */ + uint8_t *pData = NULL; + uint8_t *ptpHdr = NULL; + int32_t ret = 0; + uint8_t i = 0; + uint8_t cnt = 0; + uint8_t msg_type = 0xff; + uint8_t piTsOffset = 0; + + memset(&ts_5g, 0, sizeof(struct time_stamps)); + memset(&ts_tsn, 0, sizeof(struct time_stamps)); + + CHECK_EQUAL_ERR(skb, NULL, -EADDRNOTAVAIL, "skb is NULL!\n"); + CHECK_EQUAL_ERR(hdr, NULL, -EADDRNOTAVAIL, "hdr is NULL!\n"); + + pData = skb->data; + + /* 获得ptp报文头指针&赋值pi头ts_offset字段 */ + ret = get_hdr_point(pData, &piTsOffset, &ptpHdr); + CHECK_EQUAL_ERR(ptpHdr, NULL, -EADDRNOTAVAIL, "get ptp hdr failed!\n"); + if (ret != 0) { + DEBUG_1588("is not ptp msg or get hdr err!!\n"); + return -EFAULT; + } + + /* 解析PTP报文类型 */ + msg_type = get_msgtype_from_hrd(ptpHdr, PTP_MESSAGE_HRD_LEN); + if (PTP_MSG_ERROR_TYPE == msg_type) { + DEBUG_1588("unknow PTP msg type!\n"); + return -EFAULT; + } + + /* 如果是事件报文,提取时间戳 */ + if (is_event_message(msg_type)) { + ret = get_tstamps_from_ptp(clock_no, &ts_5g, &ts_tsn, &ts_thw, + en_dev); + CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, + "get tstamps from ptp failed!\n"); + } + + /* 根据不同报文类型做不同处理 */ + cnt = sizeof(g_MsgRcvTable) / sizeof(MsgRcv_t); + for (i = 0; i < cnt; i++) { + if (g_MsgRcvTable[i].type == msg_type) { + if (likely(g_MsgRcvTable[i].proc_func != NULL)) { + ret = g_MsgRcvTable[i].proc_func( + hdr, ptpHdr, &ts_5g, &ts_tsn, &ts_thw, + skb_shinfo(skb), en_dev); + } + } + } + + return ret; +} + +int32_t is_delay_statistics_pkt(uint8_t *pData) +{ + uint16_t udp_dest_port = 0; + uint16_t offset = 0; + uint16_t temp_len = 0; + uint16_t eth_type_lay3 = ntohs(*( + (uint16_t *)(pData + (2 * ETHER_MAC_LEN)))); /* get Eth Type */ + uint8_t eth_type_lay4 = 0; + uint8_t eth_type_lay4_ipv6 = 0; + uint16_t eth_type_vlan_lay3 = + ntohs(*((uint16_t *)(pData + (2 * ETHER_MAC_LEN) + VLAN_LEN))); + + /* 计算PTP头的偏移 */ + offset = L2_PKT_HDR_LEN; + + if ((VLAN_TPID == eth_type_lay3) && + (VLAN_TPID != eth_type_vlan_lay3)) { /* 单vlan偏移 */ + offset += VLAN_LEN; + } else if ((VLAN_TPID == eth_type_lay3) && + (VLAN_TPID == eth_type_vlan_lay3)) { /* 双vlan偏移 */ + offset += (VLAN_LEN * 2); + } + + eth_type_lay3 = ntohs(*((uint16_t *)(pData + offset - ETHER_TYPE_LEN))); + eth_type_lay4 = *(pData + offset + IP_PROT_OFFSET); + + eth_type_lay4_ipv6 = *(pData + offset + IPV6_PROT_OFFSET); + + if (ETH_TYPE_IPV4 != eth_type_lay3) { + // LOG_ERR("unknown L4 eth type: %d\n", eth_type_lay3); + return IS_NOT_STATISTICS_PKT; + } + + if (ETH_TYPE_IPV4 == eth_type_lay3) { + /* IP首部第一字节: 版本(4b)+首部长度(4b),这里取低4位,长度是以4字节为单位 */ + temp_len = *(pData + offset); + temp_len = (temp_len & 0x0f) * 4; + offset += temp_len; + + /* L4类型PTP只有UDP */ + if (ETH_TYPE_UDP == eth_type_lay4) { + udp_dest_port = + ntohs(*(uint16_t *)(pData + offset + + UDP_DEST_PORT_OFFSET)); + if (udp_dest_port != 49184) { + // LOG_ERR("UDP destination port(%hd) is not 49184!!\n", udp_dest_port); + return IS_NOT_STATISTICS_PKT; + } + } else { + // LOG_ERR("eth_type_lay4 = %c, is not UDP!!!!!\n", eth_type_lay4); + return IS_NOT_STATISTICS_PKT; + } + } + + return PTP_SUCCESS; +} + +/* delay统计报文发送流程中的时间戳处理 */ +int32_t pkt_delay_statistics_proc(struct sk_buff *skb, + struct zxdh_net_hdr_tx *hdr, + struct zxdh_en_device *en_dev) +{ + uint8_t *pData = NULL; + uint8_t ret = 0; + uint32_t ts_thw = 0; + + CHECK_EQUAL_ERR(skb, NULL, -EADDRNOTAVAIL, "skb is NULL!\n"); + CHECK_EQUAL_ERR(hdr, NULL, -EADDRNOTAVAIL, "hdr is NULL!\n"); + + pData = skb->data; + + /* 检查是否是delay统计报文: udp端口号:49184 */ + if (IS_NOT_STATISTICS_PKT == is_delay_statistics_pkt(pData)) { + return DELAY_STATISTICS_FAILED; + } + /* 时延统计使能 */ + if (en_dev->dtp_drs_offload == true) { + hdr->pipd_hdr.pd_hdr.ol_flag |= + htons(DELAY_STATISTICS_INSERT_EN_BIT); + } else { + hdr->pd_hdr.ol_flag |= htons(DELAY_STATISTICS_INSERT_EN_BIT); + } + +#ifdef PTP_DRIVER_INTERFACE_EN + ret = get_hw_timestamp(en_dev, &ts_thw); + CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, "get_hw_timestamp failed!\n"); +#endif + /*hw的时间戳,写到 PD头的5~8字节:高29位为ns位,低3bits位为小数ns位 */ + *(uint32_t *)(&(hdr->pd_hdr.tag_idx)) = + htonl(ts_thw << CPU_TX_DECIMAL_NS); /* 大端对齐 */ + + return ret; +} diff --git a/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc.h b/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc.h new file mode 100644 index 000000000000..6b55eeacaf0d --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc.h @@ -0,0 +1,41 @@ +/***************************************************************************** +(C) 2023 ZTE Corporation. 版权所有. + +文件名 : en_1588_pkt_proc.h +内容摘要 : 提供PTP数据包处理相关接口 +作者/日期 : Limin / 2023.10.12 +版本 : 1.0 +*****************************************************************************/ + +#ifndef _EN_1588_PKT_PROC_H_ +#define _EN_1588_PKT_PROC_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "en_1588_pkt_proc_func.h" + +#define PTP_SUCCESS 0 +#define PTP_FAILED (-1) +#define IS_NOT_PTP_MSG 1 +#define IS_NOT_STATISTICS_PKT 1 +#define DELAY_STATISTICS_FAILED (-1) + +int32_t pkt_1588_proc_xmit(struct sk_buff *skb, struct zxdh_1588_pd_tx *hdr, + int32_t clock_no, struct zxdh_en_device *en_dev, + uint8_t *ptpHdr); +int32_t pkt_1588_proc_rcv(struct sk_buff *skb, struct zxdh_1588_pd_rx *hdr, + int32_t clock_no, struct zxdh_en_device *en_dev); +int32_t pi_1588_net_hdr_add(struct sk_buff *skb, struct zxdh_net_hdr_tx *hdr, + int32_t clock_no, struct zxdh_en_device *en_dev); +int32_t pkt_delay_statistics_proc(struct sk_buff *skb, + struct zxdh_net_hdr_tx *hdr, + struct zxdh_en_device *en_dev); +int32_t get_hdr_point(uint8_t *pData, uint8_t *piTs0ffset, uint8_t **ptpHdr); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _EN_1588_PKT_PROC_H_ */ \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc_func.c b/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc_func.c new file mode 100644 index 000000000000..e389ca6329d8 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc_func.c @@ -0,0 +1,663 @@ +/***************************************************************************** +(C) 2023 ZTE Corporation. 版权所有. + +文件名 : en_1588_pkt_proc_func.c +内容摘要 : 不同数据类型包的处理接口实现 +作者/日期 : Limin / 2023.10.12 +版本 : 1.0 +*****************************************************************************/ + +#include "en_1588_pkt_proc_func.h" +#include "en_aux_cmd.h" +#include "en_aux_ioctl.h" + +struct ptp_update_buff tGlobalPtpBuff = { 0 }; + +uint64_t htonll(uint64_t u64_host) +{ + uint64_t u64_net = 0; + uint32_t u32_host_h = 0; + uint32_t u32_host_l = 0; + + u32_host_l = u64_host & 0xffffffff; + u32_host_h = (u64_host >> 32) & 0xffffffff; + + u64_net = htonl(u32_host_l); + u64_net = (u64_net << 32) | htonl(u32_host_h); + + return u64_net; +} + +/** +* @brief 计算两时间戳subtraction和minuend之差,并将差值赋值给*ptMinusRet +* @param minuend 高48bit为s位,低32bit为ns位 +*/ +int32_t bits_80_minus(struct time_stamps subtraction, Bits80_t minuend, + struct time_stamps *ptMinusRet) +{ + uint64_t minusHigh48_s = 0; + uint32_t minusLow32_ns = 0; + + /* 取出80bits被减数的ns位值和s位值 */ + memcpy((uint8_t *)(&minusHigh48_s), &minuend, S_SIZE); + memcpy(&minusLow32_ns, (uint8_t *)(&minuend) + S_SIZE, NS_SIZE); + + /* minuend大端 */ + minusHigh48_s = htonll(minusHigh48_s) >> 16; + minusLow32_ns = htonl(minusLow32_ns); + + /* 如果减数值小于被减数值 */ + if ((subtraction.s < minusHigh48_s) || + ((subtraction.s == minusHigh48_s) && + (subtraction.ns < minusLow32_ns))) { + LOG_ERR("The difference between the two times is negative!!"); + return PTP_RET_TIME_ERR; + } + + if (subtraction.ns > minusLow32_ns) { + ptMinusRet->ns = subtraction.ns - minusLow32_ns; /* 赋值ns位 */ + ptMinusRet->s = subtraction.s - minusHigh48_s; /* 赋值s位 */ + } else { + ptMinusRet->ns = S_HOLD - (minusLow32_ns - + subtraction.ns); /* 赋值ns位 */ + ptMinusRet->s = subtraction.s - minusHigh48_s - 1; /* 赋值s位 */ + } + + return PTP_RET_SUCCESS; +} + +int32_t pkt_proc_type_sync(struct sk_buff *skb, struct zxdh_1588_pd_tx *hdr, + uint8_t *ptpHdr, struct time_stamps *t5g, + struct time_stamps *tsn, uint32_t *thw, + struct zxdh_en_device *en_dev) +{ + struct SkbSharedHwtstamps_t tShhwtstamps; + struct time_stamps tMinusRet; + struct skb_shared_hwtstamps tHwtstamps5g; + struct skb_shared_hwtstamps tHwtstampsTsn; + Bits80_t tTsi; + struct ptpHdr_t *ptPtpHdr = NULL; + uint8_t *pOriginTimeStamp = NULL; + uint8_t majorSdoId = 0; + uint32_t t5gNsBig = 0; + uint64_t t5gSBig = 0; + uint32_t tsnNsBig = 0; + uint64_t tsnSBig = 0; + uint32_t frequency = 0; + uint64_t cfAddedVal = 0; + uint8_t *tsiTlv = NULL; + uint32_t cpuTx_ns = 0; + uint32_t cpuTx_frac_ns = 0; + uint64_t cfNs = 0; + + ptPtpHdr = (struct ptpHdr_t *)ptpHdr; + majorSdoId = ((ptPtpHdr->majorType) & 0xf0) >> 4; + pOriginTimeStamp = ptpHdr + sizeof(struct ptpHdr_t); + t5gSBig = (htonll(t5g->s)) >> 16; + t5gNsBig = htonl(t5g->ns); + tsnSBig = (htonll(tsn->s)) >> 16; + tsnNsBig = htonl(tsn->ns); + + memset(&tShhwtstamps, 0, sizeof(struct SkbSharedHwtstamps_t)); + memset(&tHwtstamps5g, 0, sizeof(struct skb_shared_hwtstamps)); + memset(&tHwtstampsTsn, 0, sizeof(struct skb_shared_hwtstamps)); + memset(&tMinusRet, 0, sizeof(struct time_stamps)); + memset(&tTsi, 0, sizeof(Bits80_t)); + + /* 解析PTP Header的majorSdoId字段,如果是0,表示PTP消息由1588使用 */ + if (0 == majorSdoId) { + /* 解析Flag字段低一字节bit1,如果为0,则是一步法,为1不做处理 */ + if (0 == ((ptPtpHdr->flagField) & 0x0002)) { + memcpy(pOriginTimeStamp, &t5gSBig, S_SIZE); + memcpy(pOriginTimeStamp + S_SIZE, &t5gNsBig, NS_SIZE); + } + } else if (1 == majorSdoId) { /* 如果是1,表示PTP消息由802.1AS协议使用 */ + /* 解析Flag字段低一字节bit1,如果为0,则是一步法 */ + if (0 == ((ptPtpHdr->flagField) & 0x0002)) { + /* tsn时间戳放到Sync报文的originTimestamp字段 */ + memcpy(pOriginTimeStamp, &tsnSBig, S_SIZE); + memcpy(pOriginTimeStamp + S_SIZE, &tsnNsBig, NS_SIZE); + + if (0 != + ((ptPtpHdr->flagField) & + 0x8000)) { /* 解析Flag字段高一字节bit7,如果为1,做如下处理,为0不做处理*/ + frequency = + *(uint32_t *)(ptpHdr + + PTPHDR_FREQUENCY_OFFSET); + frequency = htonl(frequency); /* 频率比 */ + + memcpy(&tTsi, ptpHdr + PTPHDR_TSI_OFFSET, + sizeof(Bits80_t)); + + bits_80_minus(*t5g, tTsi, &tMinusRet); + + /* (*t5g-*tsi)*频率比 计算结果叠加到CF字段ns位,(不会出现CF字段ns位值溢出情况) */ + cfAddedVal = + (tMinusRet.s * S_HOLD + tMinusRet.ns) * + frequency; + memcpy(&cfNs, ptPtpHdr->correctionField, + CF_NS_SIZE); + cfNs = htonll(cfNs) >> 16; + cfNs += cfAddedVal; + cfNs = htonll(cfNs) >> 16; + memcpy(&(ptPtpHdr->correctionField[0]), &cfNs, + CF_NS_SIZE); + + /* flagField字段的高1字节bit7清0 */ + ptPtpHdr->flagField = (ptPtpHdr->flagField) & + 0x7f; + + /* 清除20 byte tTsi TLV为0 */ + tsiTlv = ptpHdr + PTPHDR_TSI_TLV_OFFSET; + memset(tsiTlv, 0, PTPHDR_TSI_TLV_LEN); + + /* 把Header中的messageLength值减去20 */ + ptPtpHdr->msglen = htons(ptPtpHdr->msglen); + ptPtpHdr->msglen -= PTPHDR_TSI_TLV_LEN; + ptPtpHdr->msglen = htons(ptPtpHdr->msglen); + } + } else { /* 如果为1,则是两步法 */ + /* 解析Flag字段高一字节bit7,如果为1,做如下处理,为0不做处理 */ + if (0 != ((ptPtpHdr->flagField) & 0x8000)) { + memcpy(&tTsi, ptpHdr + PTPHDR_TSI_OFFSET, + sizeof(Bits80_t)); + + bits_80_minus(*t5g, tTsi, &tMinusRet); + + /* (*t5g-*tsi)*频率比 计算结果叠加到CF字段ns位,(不会出现CF字段ns位值溢出情况) */ + frequency = htonl(ptPtpHdr->msgTypeSpecific); + cfAddedVal = + (tMinusRet.s * S_HOLD + tMinusRet.ns) * + frequency; + memcpy(&cfNs, ptPtpHdr->correctionField, + CF_NS_SIZE); + cfNs = htonll(cfNs) >> 16; + cfNs += cfAddedVal; + cfNs = htonll(cfNs) >> 16; + memcpy(&(ptPtpHdr->correctionField[0]), &cfNs, + CF_NS_SIZE); + + /* 将messagetypespecific清0 */ + memset(&(ptPtpHdr->msgTypeSpecific), 0, + sizeof(uint32_t)); + + /* flagField字段的高1字节bit7清0 */ + ptPtpHdr->flagField = (ptPtpHdr->flagField) & + 0x7f; + + /* 清除20 byte tTsi TLV为0 */ + tsiTlv = ptpHdr + PTPHDR_TSI_TLV_OFFSET_TWO; + memset(tsiTlv, 0, PTPHDR_TSI_TLV_LEN); + + /* 把Header中的messageLength值减去20 */ + ptPtpHdr->msglen = htons(ptPtpHdr->msglen); + ptPtpHdr->msglen -= PTPHDR_TSI_TLV_LEN; + ptPtpHdr->msglen = htons(ptPtpHdr->msglen); + } + } + } + + /*PTPM的32bit的时间戳,写到 PI头的cpu_tx字段:高29位为ns位,低3bits位为小数ns位 */ + cpuTx_frac_ns = (hdr->cpu_tx) & 0x07; + cpuTx_ns = *thw << CPU_TX_DECIMAL_NS; + hdr->cpu_tx = htonl(cpuTx_ns + cpuTx_frac_ns); /* 大端对齐 */ + + /* 两个80bit时间戳(T1,T2)放到socket的ERR_QUEUE中 */ + tShhwtstamps.ts_5g_t = *t5g; + tShhwtstamps.ts_tsn_t = *tsn; + tHwtstamps5g.hwtstamp = + tShhwtstamps.ts_5g_t.ns + tShhwtstamps.ts_5g_t.s * S_HOLD; + tHwtstampsTsn.hwtstamp = + tShhwtstamps.ts_tsn_t.ns + tShhwtstamps.ts_tsn_t.s * S_HOLD; + skb_tstamp_tx(skb, &tHwtstamps5g); +#ifdef CGEL_TSTAMP_2_PATCH_EN + skb_tstamp_tx_2(skb, &tHwtstampsTsn); +#endif /* CGEL_TSTAMP_2_PATCH_EN */ + + return PTP_RET_SUCCESS; +} + +int32_t delay_and_pdelay_req_proc(struct sk_buff *skb, + struct zxdh_1588_pd_tx *hdr, + struct time_stamps *t5g, + struct time_stamps *tsn, uint32_t *thw) +{ + struct SkbSharedHwtstamps_t tShhwtstamps; + struct skb_shared_hwtstamps tHwtstamps5g; + struct skb_shared_hwtstamps tHwtstampsTsn; + uint32_t cpuTx_ns = 0; + uint32_t cpuTx_frac_ns = 0; + + memset(&tShhwtstamps, 0, sizeof(struct SkbSharedHwtstamps_t)); + memset(&tHwtstamps5g, 0, sizeof(struct skb_shared_hwtstamps)); + memset(&tHwtstampsTsn, 0, sizeof(struct skb_shared_hwtstamps)); + /*PTPM的32bit的时间戳,写到 PI头的cpu_tx字段:高29位为ns位,低3bits位为小数ns位 */ + cpuTx_frac_ns = (hdr->cpu_tx) & 0x07; + cpuTx_ns = *thw << CPU_TX_DECIMAL_NS; + hdr->cpu_tx = htonl(cpuTx_ns + cpuTx_frac_ns); + + tShhwtstamps.ts_5g_t = *t5g; + tShhwtstamps.ts_tsn_t = *tsn; + + /* 2个80bit放到socket error queue中 */ + tHwtstamps5g.hwtstamp = + tShhwtstamps.ts_5g_t.ns + tShhwtstamps.ts_5g_t.s * S_HOLD; + tHwtstampsTsn.hwtstamp = + tShhwtstamps.ts_tsn_t.ns + tShhwtstamps.ts_tsn_t.s * S_HOLD; + skb_tstamp_tx(skb, &tHwtstamps5g); +#ifdef CGEL_TSTAMP_2_PATCH_EN + skb_tstamp_tx_2(skb, &tHwtstampsTsn); +#endif /* CGEL_TSTAMP_2_PATCH_EN */ + return PTP_RET_SUCCESS; +} + +int32_t pkt_proc_type_delay_req(struct sk_buff *skb, + struct zxdh_1588_pd_tx *hdr, uint8_t *ptpHdr, + struct time_stamps *t5g, + struct time_stamps *tsn, uint32_t *thw, + struct zxdh_en_device *en_dev) +{ + int32_t ret = 0; + ret = delay_and_pdelay_req_proc(skb, hdr, t5g, tsn, thw); + return ret; +} + +int32_t pkt_proc_type_pdelay_req(struct sk_buff *skb, + struct zxdh_1588_pd_tx *hdr, uint8_t *ptpHdr, + struct time_stamps *t5g, + struct time_stamps *tsn, uint32_t *thw, + struct zxdh_en_device *en_dev) +{ + int32_t ret = 0; + ret = delay_and_pdelay_req_proc(skb, hdr, t5g, tsn, thw); + return ret; +} + +int32_t pkt_proc_type_pdelay_resp(struct sk_buff *skb, + struct zxdh_1588_pd_tx *hdr, uint8_t *ptpHdr, + struct time_stamps *t5g, + struct time_stamps *tsn, uint32_t *thw, + struct zxdh_en_device *en_dev) +{ + Bits80_t tReqReceTs; + struct SkbSharedHwtstamps_t tShhwtstamps; + struct skb_shared_hwtstamps tHwtstamps5g; + struct skb_shared_hwtstamps tHwtstampsTsn; + struct time_stamps tMinusRet; + struct ptpHdr_t *ptPtpHdr = NULL; + uint64_t MinusVal = 0; + uint32_t cpuTx_ns = 0; + uint32_t cpuTx_frac_ns = 0; + uint64_t cfNs = 0; + + memset(&tReqReceTs, 0, sizeof(Bits80_t)); + memset(&tShhwtstamps, 0, sizeof(struct SkbSharedHwtstamps_t)); + memset(&tMinusRet, 0, sizeof(struct time_stamps)); + memset(&tHwtstamps5g, 0, sizeof(struct skb_shared_hwtstamps)); + memset(&tHwtstampsTsn, 0, sizeof(struct skb_shared_hwtstamps)); + ptPtpHdr = (struct ptpHdr_t *)ptpHdr; + + /*PTPM的32bit的时间戳,写到 PI头的cpu_tx字段:高29位为ns位,低3bits位为小数ns位 */ + cpuTx_frac_ns = (hdr->cpu_tx) & 0x07; + cpuTx_ns = *thw << CPU_TX_DECIMAL_NS; + hdr->cpu_tx = htonl(cpuTx_ns + cpuTx_frac_ns); + + /* 解析Header中flagField的低1字节的bit1,如果是0(一步法) */ + if (0 == (ptPtpHdr->flagField & 0x0002)) { + /* 提取requestRecieptTimestamp */ + tReqReceTs = + *(Bits80_t *)(ptpHdr + + sizeof(struct ptpHdr_t)); /* 记为T2 */ + + /* 将*tsn-T2的差值加到CorrectionField字段的高48bit ns位上 */ + bits_80_minus(*tsn, tReqReceTs, &tMinusRet); + MinusVal = tMinusRet.ns + tMinusRet.s * S_HOLD; + memcpy(&cfNs, ptPtpHdr->correctionField, CF_NS_SIZE); + cfNs = htonll(cfNs) >> 16; + cfNs += MinusVal; + cfNs = htonll(cfNs) >> 16; + memcpy(&(ptPtpHdr->correctionField[0]), &cfNs, CF_NS_SIZE); + } + + tShhwtstamps.ts_5g_t = *t5g; + tShhwtstamps.ts_tsn_t = *tsn; + + /* 2个80bit放到socket error queue中 */ + tHwtstamps5g.hwtstamp = + tShhwtstamps.ts_5g_t.ns + tShhwtstamps.ts_5g_t.s * S_HOLD; + tHwtstampsTsn.hwtstamp = + tShhwtstamps.ts_tsn_t.ns + tShhwtstamps.ts_tsn_t.s * S_HOLD; + skb_tstamp_tx(skb, &tHwtstamps5g); +#ifdef CGEL_TSTAMP_2_PATCH_EN + skb_tstamp_tx_2(skb, &tHwtstampsTsn); +#endif /* CGEL_TSTAMP_2_PATCH_EN */ + return PTP_RET_SUCCESS; +} + +/* 接收方向的事件报文的时间戳处理函数:在1588驱动中对接收方向的事件报文的时间戳处理是一致的 */ +int32_t pkt_rcv_type_event(struct zxdh_1588_pd_rx *hdr, uint8_t *ptpHdr, + struct time_stamps *t5g, struct time_stamps *tsn, + uint32_t *thw, + struct skb_shared_info *ptSkbSharedInfo, + struct zxdh_en_device *en_dev) +{ + struct SkbSharedHwtstamps_t tShhwtstamps; + uint32_t tsRx = 0; + uint32_t tsRx_ns = 0; + uint32_t tsRx_frac_ns = 0; + int32_t MinusRetThwCpu = 0; + uint64_t temp = 0x20000000; + uint32_t i = 0; + + memset(&tShhwtstamps, 0, sizeof(struct SkbSharedHwtstamps_t)); + /* cpu_tx高29bits ns位,低3bits小数ns位 */ + tsRx = htonl(hdr->rx_ts); + + tsRx_frac_ns = tsRx & 0x07; + tsRx_ns = tsRx >> 3; + // LOG_DEBUG("hdr->rx_ts = %d, tsRx = %d, tsRx_ns = %d\n", hdr->rx_ts, tsRx, tsRx_ns); + + if (tsRx_frac_ns > 4) { + tsRx_ns += 1; + } + // LOG_DEBUG("thw = %d, tsRx_ns = %d\n", *thw, tsRx_ns); + MinusRetThwCpu = (*thw & 0x1fffffff) - tsRx_ns; + + if (MinusRetThwCpu < 0) { + MinusRetThwCpu += temp; + } + + LOG_DEBUG("MinusRetThwCpu = %d\n", MinusRetThwCpu); + + tShhwtstamps.ts_5g_t = *t5g; + tShhwtstamps.ts_tsn_t = *tsn; + + /* 更新两个80bits时间戳 */ + if (tShhwtstamps.ts_5g_t.ns > MinusRetThwCpu) { + tShhwtstamps.ts_5g_t.ns -= MinusRetThwCpu; + } else { + for (i = 1; i < tShhwtstamps.ts_5g_t.s + 1; i++) { + temp = i * S_HOLD + tShhwtstamps.ts_5g_t.ns; + + if (temp > MinusRetThwCpu) { + tShhwtstamps.ts_5g_t.ns = temp - MinusRetThwCpu; + tShhwtstamps.ts_5g_t.s -= i; + break; + } + } + if (temp < MinusRetThwCpu) { + LOG_ERR("ts_5g_t < MinusRetThwCpu!!!\n"); + } + } + + if (tShhwtstamps.ts_tsn_t.ns > MinusRetThwCpu) { + tShhwtstamps.ts_tsn_t.ns -= MinusRetThwCpu; + } else { + for (i = 1; i < tShhwtstamps.ts_tsn_t.s + 1; i++) { + temp = i * S_HOLD + tShhwtstamps.ts_tsn_t.ns; + if (temp > MinusRetThwCpu) { + tShhwtstamps.ts_tsn_t.ns = + temp - MinusRetThwCpu; + tShhwtstamps.ts_tsn_t.s -= i; + break; + } + } + if (temp < MinusRetThwCpu) { + LOG_ERR("ts_tsn_t < MinusRetThwCpu!!!\n"); + } + } + + LOG_DEBUG("enter in pkt_rcv_type_event!!!!\n"); + LOG_DEBUG( + "tShhwtstamps.ts_5g_t.s = %llu, tShhwtstamps.ts_5g_t.ns = %d\n", + tShhwtstamps.ts_5g_t.s, tShhwtstamps.ts_5g_t.ns); + LOG_DEBUG( + "tShhwtstamps.ts_tsn_t.s = %llu, tShhwtstamps.ts_tsn_t.ns = %d\n", + tShhwtstamps.ts_tsn_t.s, tShhwtstamps.ts_tsn_t.ns); + + /* 2个80bit放到socket cmsg中。连同报文返回给应用 */ + ptSkbSharedInfo->hwtstamps.hwtstamp = + ktime_set(tShhwtstamps.ts_5g_t.s, tShhwtstamps.ts_5g_t.ns); +#ifdef CGEL_TSTAMP_2_PATCH_EN + ptSkbSharedInfo->hwtstamps2.hwtstamp = + ktime_set(tShhwtstamps.ts_tsn_t.s, tShhwtstamps.ts_tsn_t.ns); +#endif /* CGEL_TSTAMP_2_PATCH_EN */ + return PTP_RET_SUCCESS; +} + +/** +* @fn read_ts_match_info +* @brief 查询时间戳匹配信息,查询到匹配信息后更新cf字段和本地时间戳信息 +* @param msgType ptp事件报文类型 +* @return 返回值为0表示查询时间戳匹配信息成功 +*/ +int32_t read_ts_match_info(uint32_t msgType, uint8_t *ptpHdr) +{ + uint32_t mssageType = 0; + int32_t cfNum = 0; + uint32_t srcPortIdFifo = 0; + uint32_t sequeIdFifo = 0; + struct ptpHdr_t *ptPtpHdr = NULL; + uint32_t matchInfo = 0; + uint8_t srcPortId = 0; + uint64_t cfVal = 0; + + ptPtpHdr = (struct ptpHdr_t *)ptpHdr; + + CHECK_EQUAL_ERR(ptPtpHdr, NULL, -EADDRNOTAVAIL, "tPtpBuff is NULL\n"); + + srcPortId = *(uint8_t *)(ptPtpHdr->srcPortIdentity + SRCPORTID_LEN - + 1); /* 只取srcPortIdentity最后一字节值 */ + + for (cfNum = 0; cfNum < tGlobalPtpBuff.cfCount; cfNum++) { + matchInfo = tGlobalPtpBuff.ptpRegInfo[cfNum].matchInfo; + mssageType = (matchInfo >> MSGTYPE_OFFSET) & 0xf; + srcPortIdFifo = (matchInfo >> SRCPORTID_OFFSET) & 0xf; + sequeIdFifo = htons(matchInfo & 0xffff); + + if ((mssageType == msgType) && + (srcPortIdFifo == (srcPortId & 0xf)) && + (sequeIdFifo == ptPtpHdr->sequenceId)) { + LOG_DEBUG("read the match info successfully!!!\n"); + LOG_DEBUG( + "mssageType: %u, srcPortIdFifo: %u, sequeIdFifo: %u\n", + mssageType, srcPortIdFifo, sequeIdFifo); + memcpy(&cfVal, + &(tGlobalPtpBuff.ptpRegInfo[cfNum].cfVal[0]), + CF_SIZE); + cfVal = htonll(cfVal); + memcpy(&(ptPtpHdr->correctionField[0]), &cfVal, + CF_SIZE); + + /* 将匹配到的信息从本地buff去除 */ + tGlobalPtpBuff.cfCount--; + if (cfNum == MAX_PTP_REG_INFO_NUM - 1) { + memset(&(tGlobalPtpBuff.ptpRegInfo[cfNum]), 0, + sizeof(struct ptp_reg_info)); + return 0; + } + memcpy(&(tGlobalPtpBuff.ptpRegInfo[cfNum]), + &(tGlobalPtpBuff.ptpRegInfo[cfNum + 1]), + (MAX_PTP_REG_INFO_NUM - cfNum - 1) * + sizeof(struct ptp_reg_info)); + memset(&(tGlobalPtpBuff + .ptpRegInfo[MAX_PTP_REG_INFO_NUM - 1]), + 0, sizeof(struct ptp_reg_info)); + + return 0; + } + } + + return -1; +} + +#ifdef PTP_DRIVER_INTERFACE_EN +extern int32_t get_event_ts_info(struct zxdh_en_device *en_dev, + struct ptp_buff *p_tsInfo, int32_t mac_number); +#endif /* PTP_DRIVER_INTERFACE_EN */ + +/** +* @fn general_encrypt_msg_proc +* @brief 使用两步法,获取、存储和处理不同的ptp加密事件报文的时间戳信息 +* @param msgType ptp事件报文类型 +*/ +int32_t general_encrypt_msg_proc(uint32_t msgType, uint8_t *ptpHdr, + struct zxdh_en_device *en_dev) +{ + int32_t num = 0; + int32_t macNum = 0; + int32_t ret = 0; + struct ptpHdr_t *ptPtpHdr = NULL; + struct ptp_buff tempBuff; + + memset(&tempBuff, 0, sizeof(struct ptp_buff)); + ptPtpHdr = (struct ptpHdr_t *)ptpHdr; + + /* 判断报文是否是加密报文 */ + if (!(0x0080 == ((ptPtpHdr->flagField) & 0x0080))) { + return ret; + } + + macNum = zxdh_pf_macpcs_num_get(en_dev); + if (macNum < 0) { + LOG_ERR("get mac num %d err, its value should is 0-2!\n", + macNum); + return -1; + } + + // LOG_INFO("ptp buff:\n "); + // print_data((uint8_t *)&tGlobalPtpBuff, sizeof(struct ptp_update_buff)); + + /* 1、从本地buff查询和处理时间戳匹配信息,并更新本地buff */ + ret = read_ts_match_info(msgType, ptpHdr); + + /* 2、从本地没匹配到信息,则读取FIFO中信息,将读取到的信息更新到本地,重新匹配 */ + if (ret != 0) { + // LOG_INFO("cannot read the matchInfo from the BUFF!---------------"); + +#ifdef PTP_DRIVER_INTERFACE_EN + ret = get_event_ts_info(en_dev, &tempBuff, macNum); + CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, + "read FIFO form ptpDriver failed!!!"); +#endif /* PTP_DRIVER_INTERFACE_EN */ + + /* 2.1 将读取到到的FIFO信息,添加到本地全局buff */ + if (tempBuff.cfCount > 0) { + if (tempBuff.cfCount + tGlobalPtpBuff.cfCount < + MAX_PTP_REG_INFO_NUM) { + memcpy(&(tGlobalPtpBuff.ptpRegInfo + [tGlobalPtpBuff.cfCount]), + tempBuff.ptpRegInfo, + sizeof(struct ptp_reg_info) * + tempBuff.cfCount); + + tGlobalPtpBuff.cfCount += tempBuff.cfCount; + // LOG_INFO("tGlobalPtpBuff.cfCount: %u\n", tGlobalPtpBuff.cfCount); + } else { /* 当超过64组时间戳信息时 */ + num = tempBuff.cfCount + + tGlobalPtpBuff.cfCount - + MAX_PTP_REG_INFO_NUM; + + /* 丢弃掉最先存在本地的信息(此信息更大的概率匹配不上) */ + memcpy(&(tGlobalPtpBuff.ptpRegInfo[0]), + &(tGlobalPtpBuff.ptpRegInfo[num]), + sizeof(struct ptp_reg_info) * + (MAX_PTP_REG_INFO_NUM - num)); + tGlobalPtpBuff.cfCount -= num; + + /* 添加新的信息到本地 */ + memcpy(&(tGlobalPtpBuff.ptpRegInfo + [tGlobalPtpBuff.cfCount]), + tempBuff.ptpRegInfo, + sizeof(struct ptp_reg_info) * + tempBuff.cfCount); + tGlobalPtpBuff.cfCount = MAX_PTP_REG_INFO_NUM; + } + + /* 2.2 在更新后的本地全局buff查询和处理匹配信息,并更新本地buff*/ + ret = read_ts_match_info(msgType, ptpHdr); + CHECK_UNEQUAL_ERR( + ret, 0, -EFAULT, + "cannot read the matchInfo from the local BUFF!"); + } + } + + // LOG_INFO("ptp buff:\n "); + // print_data((uint8_t *)&tGlobalPtpBuff, sizeof(struct ptp_update_buff)); + + return ret; +} + +int32_t pkt_proc_type_follow_up(struct sk_buff *skb, + struct zxdh_1588_pd_tx *hdr, uint8_t *ptpHdr, + struct time_stamps *t5g, + struct time_stamps *tsn, uint32_t *thw, + struct zxdh_en_device *en_dev) +{ + int32_t ret = 0; + ret = general_encrypt_msg_proc(PTP_MSG_TYPE_SYNC, ptpHdr, en_dev); + + return ret; +} + +int32_t pkt_proc_type_delay_resp(struct sk_buff *skb, + struct zxdh_1588_pd_tx *hdr, uint8_t *ptpHdr, + struct time_stamps *t5g, + struct time_stamps *tsn, uint32_t *thw, + struct zxdh_en_device *en_dev) +{ + return 0; +} + +int32_t pkt_rcv_type_delay_resp(struct zxdh_1588_pd_rx *hdr, uint8_t *ptpHdr, + struct time_stamps *t5g, + struct time_stamps *tsn, uint32_t *thw, + struct skb_shared_info *ptSkbSharedInfo, + struct zxdh_en_device *en_dev) +{ + int32_t ret = 0; + ret = general_encrypt_msg_proc(PTP_MSG_TYPE_DELAY_REQ, ptpHdr, en_dev); + + return ret; +} + +int32_t pkt_proc_type_pdelay_resp_follow_up( + struct sk_buff *skb, struct zxdh_1588_pd_tx *hdr, uint8_t *ptpHdr, + struct time_stamps *t5g, struct time_stamps *tsn, uint32_t *thw, + struct zxdh_en_device *en_dev) +{ + int32_t ret = 0; + ret = general_encrypt_msg_proc(PTP_MSG_TYPE_PDELAY_RESP, ptpHdr, + en_dev); + + return ret; +} + +int32_t pkt_proc_type_announce(struct sk_buff *skb, struct zxdh_1588_pd_tx *hdr, + uint8_t *ptpHdr, struct time_stamps *t5g, + struct time_stamps *tsn, uint32_t *thw, + struct zxdh_en_device *en_dev) +{ + /* 驱动不做处理 */ + return 0; +} + +int32_t pkt_proc_type_signaling(struct sk_buff *skb, + struct zxdh_1588_pd_tx *hdr, uint8_t *ptpHdr, + struct time_stamps *t5g, + struct time_stamps *tsn, uint32_t *thw, + struct zxdh_en_device *en_dev) +{ + /* 驱动不做处理 */ + return 0; +} + +int32_t pkt_proc_type_management(struct sk_buff *skb, + struct zxdh_1588_pd_tx *hdr, uint8_t *ptpHdr, + struct time_stamps *t5g, + struct time_stamps *tsn, uint32_t *thw, + struct zxdh_en_device *en_dev) +{ + /* 驱动不做处理 */ + return 0; +} \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc_func.h b/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc_func.h new file mode 100644 index 000000000000..97f428cf8eaa --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_aux/en_1588_pkt_proc_func.h @@ -0,0 +1,198 @@ +/***************************************************************************** +(C) 2023 ZTE Corporation. 版权所有. + +文件名 : en_1588_pkt_proc_func.h +内容摘要 : 不同数据类型包的处理接口实现 +作者/日期 : Limin / 2023.10.12 +版本 : 1.0 +*****************************************************************************/ + +#ifndef _EN_1588_PKT_PROC_FUNC_H_ +#define _EN_1588_PKT_PROC_FUNC_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "../en_aux.h" +#include "queue.h" + +#define PTP_REG_INFO_NUM 32 +#define MAX_PTP_REG_INFO_NUM 64 + +/* MAC FIFO相关定义 */ +#define MSGTYPE_OFFSET 20 +#define SRCPORTID_OFFSET 16 + +/* PTP报文时间戳处理函数返回值 */ +#define PTP_RET_SUCCESS 0 +#define PTP_RET_TIME_ERR (-1) + +/* CF字段ns位和s位长度 */ +#define CF_DECIMAL_NS_SIZE 2 +#define CF_NS_SIZE 6 +#define CF_SIZE 8 + +/* PTP时间戳长度 */ +#define PTP_TS_5G_LEN 10 +#define PTP_TS_TSN_LEN 10 +#define PTP_REQRECE_TS_LEN 10 + +/* PTP报文后缀相关字段偏移和长度 */ +#define PTPHDR_FREQUENCY_OFFSET 54 +#define PTPHDR_TSI_OFFSET 86 +#define PTPHDR_TSI_TLV_OFFSET 76 +#define PTPHDR_TSI_TLV_OFFSET_TWO 44 +#define PTPHDR_TSI_TLV_LEN 20 +#define ORIGINTIMESTAMP_LEN 10 +#define FOLLOWUP_TLV_LEN 32 +#define TSITLV_LEN 20 +#define SRCPORTID_LEN 10 + +/* 80bit时间戳ns位和s位长度 */ +#define S_SIZE 6 /* 高48位 */ +#define NS_SIZE 4 /* 低32位 */ +#define S_HOLD 1000000000L /* 进位阈值,即低32位达到1e9 */ + +/* pi头中cpu_tx字段,高29位为ns位,低3位为小数ns位 */ +#define CPU_TX_DECIMAL_NS 3 +#define CPU_TX_NS 29 + +typedef struct { + uint8_t data[S_SIZE + NS_SIZE]; +} Bits80_t; + +struct time_stamps { + uint64_t s; + uint32_t ns; +}; + +struct SkbSharedHwtstamps_t { + struct time_stamps ts_5g_t; + struct time_stamps ts_tsn_t; +}; + +struct ptpHdr_t { + uint8_t majorType; /* 高4位为majorSdoId,低4位为msgType */ + uint8_t versionPTP; + uint16_t msglen; + uint8_t domainNumber; + uint8_t minorSdoId; + uint16_t flagField; + uint8_t correctionField[CF_SIZE]; /* 高48位为ns位,低16字节为小数ns位 */ + uint32_t msgTypeSpecific; /* 大端 */ + uint8_t srcPortIdentity[SRCPORTID_LEN]; + uint16_t sequenceId; + uint8_t controlField; + uint8_t logMsgInterval; +} __attribute__((packed)); + +struct ptp_reg_info { + uint32_t cfVal + [2]; //High寄存器是ns位,Low寄存器的高16bit是ns位,低16bit是ns小数位 + uint32_t matchInfo; //保存的内容是[bit23:0]: {MessageType[23:20], sourcePortIdentity[19:16], sequenceId[15:0]} +}; + +struct ptp_buff { + uint32_t cfCount; + struct ptp_reg_info ptpRegInfo[PTP_REG_INFO_NUM]; +}; + +struct ptp_update_buff { + uint32_t cfCount; + struct ptp_reg_info ptpRegInfo[MAX_PTP_REG_INFO_NUM]; +}; + +/* PTP报文类型枚举 */ +enum { + /* event message types */ + PTP_MSG_TYPE_SYNC = 0, + PTP_MSG_TYPE_DELAY_REQ, + PTP_MSG_TYPE_PDELAY_REQ, + PTP_MSG_TYPE_PDELAY_RESP, + + /* general message types */ + PTP_MSG_TYPE_FOLLOW_UP = 8, + PTP_MSG_TYPE_DELAY_RESP, + PTP_MSG_TYPE_PDELAY_RESP_FOLLOW_UP, + PTP_MSG_TYPE_ANNOUNCE, + PTP_MSG_TYPE_SIGNALING, + PTP_MSG_TYPE_MANAGEMENT +}; + +uint64_t htonll(uint64_t u64_host); + +int32_t pkt_proc_type_sync(struct sk_buff *skb, struct zxdh_1588_pd_tx *hdr, + uint8_t *ptpHdr, struct time_stamps *t5g, + struct time_stamps *tsn, uint32_t *thw, + struct zxdh_en_device *en_dev); + +int32_t pkt_proc_type_delay_req(struct sk_buff *skb, + struct zxdh_1588_pd_tx *hdr, uint8_t *ptpHdr, + struct time_stamps *t5g, + struct time_stamps *tsn, uint32_t *thw, + struct zxdh_en_device *en_dev); + +int32_t pkt_proc_type_pdelay_req(struct sk_buff *skb, + struct zxdh_1588_pd_tx *hdr, uint8_t *ptpHdr, + struct time_stamps *t5g, + struct time_stamps *tsn, uint32_t *thw, + struct zxdh_en_device *en_dev); + +int32_t pkt_proc_type_pdelay_resp(struct sk_buff *skb, + struct zxdh_1588_pd_tx *hdr, uint8_t *ptpHdr, + struct time_stamps *t5g, + struct time_stamps *tsn, uint32_t *thw, + struct zxdh_en_device *en_dev); + +int32_t pkt_proc_type_follow_up(struct sk_buff *skb, + struct zxdh_1588_pd_tx *hdr, uint8_t *ptpHdr, + struct time_stamps *t5g, + struct time_stamps *tsn, uint32_t *thw, + struct zxdh_en_device *en_dev); + +int32_t pkt_proc_type_delay_resp(struct sk_buff *skb, + struct zxdh_1588_pd_tx *hdr, uint8_t *ptpHdr, + struct time_stamps *t5g, + struct time_stamps *tsn, uint32_t *thw, + struct zxdh_en_device *en_dev); + +int32_t pkt_proc_type_pdelay_resp_follow_up( + struct sk_buff *skb, struct zxdh_1588_pd_tx *hdr, uint8_t *ptpHdr, + struct time_stamps *t5g, struct time_stamps *tsn, uint32_t *thw, + struct zxdh_en_device *en_dev); + +int32_t pkt_proc_type_announce(struct sk_buff *skb, struct zxdh_1588_pd_tx *hdr, + uint8_t *ptpHdr, struct time_stamps *t5g, + struct time_stamps *tsn, uint32_t *thw, + struct zxdh_en_device *en_dev); + +int32_t pkt_proc_type_signaling(struct sk_buff *skb, + struct zxdh_1588_pd_tx *hdr, uint8_t *ptpHdr, + struct time_stamps *t5g, + struct time_stamps *tsn, uint32_t *thw, + struct zxdh_en_device *en_dev); + +int32_t pkt_proc_type_management(struct sk_buff *skb, + struct zxdh_1588_pd_tx *hdr, uint8_t *ptpHdr, + struct time_stamps *t5g, + struct time_stamps *tsn, uint32_t *thw, + struct zxdh_en_device *en_dev); + +int32_t pkt_rcv_type_event(struct zxdh_1588_pd_rx *hdr, uint8_t *ptpHdr, + struct time_stamps *t5g, struct time_stamps *tsn, + uint32_t *thw, + struct skb_shared_info *ptSkbSharedInfo, + struct zxdh_en_device *en_dev); + +int32_t pkt_rcv_type_delay_resp(struct zxdh_1588_pd_rx *hdr, uint8_t *ptpHdr, + struct time_stamps *t5g, + struct time_stamps *tsn, uint32_t *thw, + struct skb_shared_info *ptSkbSharedInfo, + struct zxdh_en_device *en_dev); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _EN_1588_PKT_PROC_FUNC_H_ */ \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_aux/en_aux_cmd.c b/drivers/net/ethernet/dinghai/en_aux/en_aux_cmd.c new file mode 100644 index 000000000000..f21766b7b86b --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_aux/en_aux_cmd.c @@ -0,0 +1,3176 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include "../slib.h" +#include +#include "../en_aux.h" +#include "../en_np/table/include/dpp_tbl_api.h" +#include "../msg_common.h" +#include "en_aux_cmd.h" + +#define UINT64_MAX (0xFFFFFFFFFFFFFFFF) + +static int32_t write_queue_index_to_message(struct zxdh_en_device *en_dev, + uint32_t queue_nums, uint32_t field, + uint16_t *bytes, uint16_t *data, + union zxdh_msg *old_msg) +{ + uint32_t ix = 0; + uint16_t old_queue_nums = 0; + + if (OP_CODE_DATA_CHAN == field) { + *bytes = (uint16_t)((queue_nums + 1) * ZXDH_QS_PAIRS); + data[0] = (uint16_t)queue_nums; + + for (ix = 0; ix < queue_nums; ix = ix + ZXDH_QS_PAIRS) { + data[ix + 1] = + en_dev->phy_index + [ix]; //en_dev->rq[ix / ZXDH_QS_PAIRS].vq->phy_index; + data[ix + 2] = + en_dev->phy_index + [ix + + 1]; //en_dev->sq[ix / ZXDH_QS_PAIRS].vq->phy_index; + } + + if (old_msg != NULL) { + LOG_DEBUG( + "old_msg->reps.cmn_vq_msg.queue_nums: %u; queue_nums: %u", + old_msg->reps.cmn_vq_msg.queue_nums, + queue_nums); + if (old_msg->reps.cmn_vq_msg.queue_nums > 0) { + old_queue_nums = + old_msg->reps.cmn_vq_msg.queue_nums; + if ((old_queue_nums + queue_nums) > 256) { + LOG_ERR("Exceeded the maximum number of queues, old_queue_nums(%d)+queue_nums(%d)\n", + old_queue_nums, queue_nums); + return -1; + } + + *bytes = (uint16_t)((queue_nums + + old_queue_nums + 1) * + ZXDH_QS_PAIRS); + data[0] = + (uint16_t)(queue_nums + old_queue_nums); + memcpy(data + queue_nums + 1, + old_msg->reps.cmn_vq_msg.phy_qidx, + old_queue_nums * ZXDH_QS_PAIRS); + + for (ix = 1; + ix <= (queue_nums + old_queue_nums); + ix++) { + LOG_DEBUG("vq phy_qid: %d ", data[ix]); + } + } + } + } +#ifdef ZXDH_MSGQ + else if (OP_CODE_MSGQ_CHAN == field) { + if (en_dev->curr_queue_pairs * 2 > (ZXDH_MAX_QUEUES_NUM - 1)) { + LOG_ERR("curr_queue_pairs out range!\n"); + return -1; + } + *bytes = (uint16_t)(queue_nums * ZXDH_QS_PAIRS); + data[0] = + en_dev->phy_index + [en_dev->curr_queue_pairs * + 2]; //en_dev->rq[en_dev->curr_queue_pairs].vq->phy_index; + data[1] = + en_dev->phy_index + [en_dev->curr_queue_pairs * 2 + + 1]; //en_dev->sq[en_dev->curr_queue_pairs].vq->phy_index; + } +#endif + + return 0; +} + +static int32_t cmd_tbl_messgae_to_riscv_send(struct zxdh_en_device *en_dev, + void *payload, uint32_t pld_len) +{ + int32_t ret = 0; + struct cmd_hdr_recv *hdr_recv; + struct cmd_tbl_ack cmd_tbl_ack = { 0 }; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_TBL, payload, + &cmd_tbl_ack, ¶); + if (0 != ret) { + LOG_ERR("en_dev->ops->msg_send_cmd failed\n"); + goto out; + } + + hdr_recv = (struct cmd_hdr_recv *)&cmd_tbl_ack; + if (hdr_recv->check != OP_CODE_TBL_STAT) { + LOG_ERR("tbl init message recv check failed\n"); + ret = -1; + } +out: + return ret; +} + +static int32_t cmd_common_tbl_init(struct zxdh_en_device *en_dev, + uint32_t queue_nums, uint32_t field, + union zxdh_msg *old_msg) +{ + int32_t ret = 0; + union zxdh_msg *msg = NULL; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -1; + } + + if ((2 * ZXDH_MAX_PAIRS_NUM) < queue_nums) { + LOG_ERR("queue pairs %u out of range\n", queue_nums); + kfree(msg); + return -ENOMEM; + } + + msg->payload.hdr_to_cmn.field = field; + msg->payload.hdr_to_cmn.type = OP_CODE_WRITE; + msg->payload.hdr_to_cmn.pcie_id = en_dev->pcie_id; + ret = write_queue_index_to_message(en_dev, queue_nums, field, + &msg->payload.hdr_to_cmn.write_bytes, + msg->payload.cmn_tbl_msg, old_msg); + if (0 != ret) { + LOG_ERR("write_queue_index_to_message failed, ret: %d\n", ret); + kfree(msg); + return ret; + } + + ret = cmd_tbl_messgae_to_riscv_send( + en_dev, msg, + MSG_STRUCT_HD_LEN + msg->payload.hdr_to_cmn.write_bytes); + if (0 != ret) { + LOG_ERR("zxdh_bar_chan_sync_msg_send failed, ret: %d\n", ret); + } + + kfree(msg); + + return ret; +} + +int32_t zxdh_common_tbl_init(struct net_device *netdev, union zxdh_msg *old_msg) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t ret = 0; + +#ifdef ZXDH_MSGQ + if (NEED_MSGQ(en_dev)) { + ret = cmd_common_tbl_init(en_dev, ZXDH_QS_PAIRS, + OP_CODE_MSGQ_CHAN, old_msg); + if (0 != ret) { + LOG_ERR("field msgq message failed\n"); + return -1; + } + } +#endif + + ret = cmd_common_tbl_init(en_dev, + en_dev->curr_queue_pairs * ZXDH_QS_PAIRS, + OP_CODE_DATA_CHAN, old_msg); + if (0 != ret) { + LOG_ERR("field data message failed\n"); + return -1; + } + + return 0; +} + +int32_t get_common_table_msg(struct zxdh_en_device *en_dev, uint16_t pcie_id, + uint8_t field, void *ack) +{ + int32_t ret = 0; + union zxdh_msg *msg = NULL; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -1; + } + + msg->payload.hdr_to_cmn.type = RISC_TYPE_READ; + msg->payload.hdr_to_cmn.field = field; + msg->payload.hdr_to_cmn.pcie_id = pcie_id; + msg->payload.hdr_to_cmn.write_bytes = 0; + + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_TBL, msg, ack, + ¶); + + kfree(msg); + + return ret; +} + +int32_t zxdh_hash_id_get(struct zxdh_en_device *en_dev) +{ + int32_t ret = 0; + union zxdh_msg *msg = NULL; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -1; + } + + ret = get_common_table_msg(en_dev, en_dev->pcie_id, + RISC_FIELD_HASHID_CHANNEL, msg); + if (ret != 0) { + LOG_ERR("get own hash_id failed: %d\n", ret); + kfree(msg); + return ret; + } + + en_dev->hash_search_idx = msg->reps.cmn_recv_msg.value; + LOG_DEBUG("hash_id: %u\n", en_dev->hash_search_idx); + if (en_dev->hash_search_idx > ZXDH_MAX_HASH_INDEX) { + LOG_ERR("hash_id is invalid value: %u\n", + en_dev->hash_search_idx); + kfree(msg); + return -EINVAL; + } + if (en_dev->hash_search_idx == + ZXDH_MAX_HASH_INDEX) { //TODO:if should be delete + en_dev->hash_search_idx = 1; + } + + kfree(msg); + + return ret; +} + +int32_t zxdh_phyport_get(struct zxdh_en_device *en_dev) +{ + int32_t ret = 0; + union zxdh_msg *msg = NULL; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -1; + } + + ret = get_common_table_msg(en_dev, en_dev->pcie_id, + RISC_FIELD_PHYPORT_CHANNEL, msg); + if (ret != 0) { + LOG_ERR("get own phyport failed: %d\n", ret); + kfree(msg); + return ret; + } + + en_dev->phy_port = msg->reps.cmn_recv_msg.value; + if (en_dev->phy_port == INVALID_PHY_PORT) { + LOG_ERR("get phy_port failed\n"); + kfree(msg); + return -EINVAL; + } + en_dev->ops->set_pf_phy_port(en_dev->parent, en_dev->phy_port); + LOG_DEBUG("0x%x phy_port: %u\n", en_dev->ep_bdf, en_dev->phy_port); + + kfree(msg); + + return ret; +} + +int32_t zxdh_panel_id_init(struct zxdh_en_device *en_dev) +{ + int32_t ret = 0; + union zxdh_msg *msg = NULL; + + if (!zxdh_en_is_panel_port(en_dev) || + en_dev->ops->is_bond(en_dev->parent)) { + return ret; + } + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -1; + } + + ret = get_common_table_msg(en_dev, en_dev->pcie_id, RISC_FIELD_PANEL_ID, + msg); + if (ret != 0) { + LOG_ERR("get own phyport failed: %d\n", ret); + kfree(msg); + return ret; + } + + en_dev->panel_id = msg->reps.cmn_recv_msg.value; + if (en_dev->panel_id > MAX_PANEL_ID) { + LOG_ERR("get panel_id failed, panel_id: %u\n", + en_dev->panel_id); + kfree(msg); + return -EINVAL; + } + LOG_DEBUG("panel_id: %u\n", en_dev->panel_id); + + kfree(msg); + + return ret; +} + +int32_t zxdh_pf_macpcs_num_get(struct zxdh_en_device *en_dev) +{ + int32_t phy_port = 0; + int32_t mac_num = 0; //0-2 + + phy_port = en_dev->phy_port; + + if (phy_port < 4) { + mac_num = 0; + } else if (phy_port < 8) { + mac_num = 1; + } else if (phy_port < 10) { + mac_num = 2; + } else { + LOG_ERR("phy_port(%d) err, not in 0-9!!\n", phy_port); + mac_num = -1; + return mac_num; + } + + LOG_DEBUG("mac_num: %d\n", mac_num); + return mac_num; +} + +int32_t zxdh_lldp_enable_set(struct zxdh_en_device *en_dev, bool lldp_enable) +{ + union zxdh_msg *msg = NULL; + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + msg->payload.hdr_to_agt.op_code = AGENT_DEBUG_LLDP_ENABLE_SET; + msg->payload.hdr_to_agt.port_id = en_dev->panel_id; + + if (en_dev->ops->is_bond(en_dev->parent)) { + msg->payload.hdr_to_agt.port_id = en_dev->pannel_id; + } + msg->payload.lldp_msg.lldp_enable = lldp_enable; + + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_DEBUG, msg, msg, + ¶); + kfree(msg); + return err; +} + +static int32_t zxdh_vf_dualtor_label_get(struct zxdh_en_device *en_dev, + uint32_t *dual_tor) +{ + uint64_t dula_label_addr = 0; + + if (!en_dev) { + LOG_ERR("en_dev is null.\n"); + return -1; + } + + dula_label_addr = en_dev->ops->get_bar_virt_addr(en_dev->parent, 0) + + ZXDH_DUALTOR_LABEL_OFFSET; + *dual_tor = + !!((ZXDH_BAR_DUALTOR_LABEL_ON == *(uint32_t *)dula_label_addr)); + return 0; +} + +int32_t zxdh_dual_tor_switch(struct zxdh_en_device *en_dev, bool state) +{ + int ret = 0; + DPP_PF_INFO_T pf_info = { 0 }; + uint64_t dula_label_addr = 0; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + if (en_dev->ops->get_coredev_type(en_dev->parent) == + DH_COREDEV_VF) { /* VF */ + LOG_ERR("vfs do not support dual switch.\n"); + return 0; + } + + ret = dpp_pktrx_mcode_glb_cfg_write(&pf_info, + ZXDH_NP_GLOBAL_PSN_ENABLE_BIT, + ZXDH_NP_GLOBAL_PSN_ENABLE_BIT, + state); + if (ret != 0) { + LOG_ERR("switch dual tor to state: %u failed.\n", state); + return -1; + } + + dula_label_addr = en_dev->ops->get_bar_virt_addr(en_dev->parent, 0) + + ZXDH_DUALTOR_LABEL_OFFSET; + *(uint32_t *)dula_label_addr = state ? ZXDH_BAR_DUALTOR_LABEL_ON : 0; + + ret = dpp_l2d_psn_cfg_set(&pf_info, state); + if (ret != 0) { + LOG_ERR("dpp_l2d_psn_cfg_set failed.\n"); + return -1; + } + + LOG_INFO("switch dual tor to state: %u success.\n", state); + return 0; +} + +int32_t zxdh_dual_tor_label_get(struct zxdh_en_device *en_dev) +{ + int ret = 0; + DPP_PF_INFO_T pf_info = { 0 }; + uint32_t global_value = 0; + uint32_t psn_cfg = 0; + uint32_t dula_tor = 0; + + if (en_dev->ops->get_coredev_type(en_dev->parent) == + DH_COREDEV_VF) { /* VF */ + ret = zxdh_vf_dualtor_label_get(en_dev, &dula_tor); + if (ret != 0) { + return -1; + } + goto succ; + } + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + ret = dpp_l2d_psn_cfg_get(&pf_info, &psn_cfg); + if (ret != 0) { + LOG_ERR("dpp_l2d_psn_cfg_get failed.\n"); + return -1; + } + + if (psn_cfg != 0) { + return psn_cfg; + } + + ret = dpp_glb_cfg_get_1(&pf_info, &global_value); + if (ret != 0) { + LOG_ERR("dpp_glb_cfg_get_1 failed.\n"); + return -1; + } + dula_tor = !!(global_value & + ((uint32_t)1 << ZXDH_NP_GLOBAL_PSN_ENABLE_BIT)); +succ: + /* 开启为1,关闭为0, 异常为-1*/ + return dula_tor; +} + +int32_t zxdh_sshd_enable_set(struct zxdh_en_device *en_dev, bool sshd_enable) +{ + union zxdh_msg *msg = NULL; + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + if (sshd_enable) { + msg->payload.hdr_to_agt.op_code = AGENT_SSHD_START; + } else { + msg->payload.hdr_to_agt.op_code = AGENT_SSHD_STOP; + } + + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_LOGIN_CTRL, msg, + msg, ¶); + kfree(msg); + return err; +} + +int32_t zxdh_lldp_enable_get(struct zxdh_en_device *en_dev, + uint32_t *lldp_enable) +{ + int32_t ret = 0; + union zxdh_msg *msg = NULL; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + msg->payload.hdr_to_agt.op_code = AGENT_DEBUG_LLDP_ENABLE_GET; + msg->payload.hdr_to_agt.port_id = en_dev->panel_id; + + if (en_dev->ops->is_bond(en_dev->parent)) { + msg->payload.hdr_to_agt.port_id = en_dev->pannel_id; + } + + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_DEBUG, msg, msg, + ¶); + if (ret != 0) { + LOG_ERR("zxdh_lldp_enable_get failed: %d\n", ret); + kfree(msg); + return ret; + } + + *lldp_enable = (uint32_t)(msg->reps.debug_lldp_msg.lldp_status); + kfree(msg); + return ret; +} + +int32_t zxdh_slot_info_send(struct zxdh_en_device *en_dev, uint8_t *slot_info) +{ + int32_t ret = 0; + union zxdh_msg *msg = NULL; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + + msg->payload.hdr_to_agt.op_code = AGENT_SLOT_INFO_SEND; + msg->payload.debug_ip_send.slot_info = *slot_info; + + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_LOGIN_CTRL, msg, + msg, ¶); + if (ret != 0) { + LOG_ERR("send slot info to riscv failed: %d\n", ret); + kfree(msg); + return ret; + } + kfree(msg); + return ret; +} + +/* Started by AICoder, pid:f65bbz8feaa0ad114c5a0a4f7052b7297a83eb86 */ +int8_t zxdh_debug_ip_get(struct zxdh_en_device *en_dev, int8_t *ip) +{ + int32_t ret = 0; + uint8_t slot_info = 0; + char ip_address[20] = { 0 }; + + slot_info = + (uint8_t)((en_dev->slot_id) & 0xff); /* 获取槽位号的低8bit */ + slot_info++; /* 防止槽位号是1,生成ip是网关 */ + sprintf(ip_address, "26.20.5.%d", + slot_info); /* 直接将slot_info转换为字符串并添加到IP地址字符串后面 */ + strcpy(ip, ip_address); + + /* 将槽位号发送给riscv */ + ret = zxdh_slot_info_send(en_dev, &slot_info); + if (ret != 0) { + LOG_ERR("zxdh_slot_info_send failed: %d\n", ret); + return ret; + } + + LOG_DEBUG("DEBUG IP is: %s\n", ip_address); + + return ret; +} + +int32_t zxdh_spm_port_enable_cfg(struct zxdh_en_device *en_dev, uint32_t enable) +{ + int32_t ret = 0; + union zxdh_msg *msg = NULL; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + if (!zxdh_en_is_panel_port(en_dev)) + return ret; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + msg->payload.hdr_to_agt.op_code = AGENT_SPM_PORT_ENABLE_SET; + msg->payload.hdr_to_agt.phyport = en_dev->phy_port; + msg->payload.spm_port_enable_set.enable = enable; + + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_MAC, msg, msg, + ¶); + if (ret != 0) { + LOG_ERR("set spm port enable failed: %d\n", ret); + } + + kfree(msg); + return ret; +} + +#define FLASH_OPEN_FW +int32_t zxdh_en_firmware_version_get(struct zxdh_en_device *en_dev, + uint8_t *fw_version, + uint8_t *fw_version_len) +{ +#ifdef FLASH_OPEN_FW + int32_t ret = 0; + union zxdh_msg *msg = NULL; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + msg->payload.hdr_to_agt.op_code = AGENT_FLASH_FIR_VERSION_GET; + + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_FLASH, msg, msg, + ¶); + if (ret != 0) { + LOG_ERR("en_dev->ops->msg_send_cmd failed: %d\n", ret); + goto free_msg; + } + + memcpy(fw_version, msg->reps.flash_msg.firmware_version, + FW_VERSION_LEN); + *fw_version_len = FW_VERSION_LEN; +#else + uint8_t fw_version_test[] = "V2.24.10.01B4"; + + memcpy(fw_version, fw_version_test, sizeof(fw_version_test)); + *fw_version_len = sizeof(fw_version_test); +#endif + +free_msg: + kfree(msg); + return ret; +} + +void do_get_np_ext_stats(struct zxdh_en_device *en_dev, + struct zxdh_en_vport_stats *vport_stats) +{ + struct zxdh_np_ext_stats *ext_stats = NULL; + + if (!en_dev->ops->if_suport_np_ext_stats(en_dev->parent)) { + return; + } + + ext_stats = + en_dev->ops->get_np_ext_stats(en_dev->parent, en_dev->phy_port); + + vport_stats->np_stats.rx_vport_idma_drop_packets = + ext_stats->rx_vport2np_packets; +} + +int32_t do_get_vport_stats(struct zxdh_en_device *en_dev, uint8_t np_mode, + struct zxdh_en_vport_stats *vport_stats, + bool is_init_get) +{ + union zxdh_msg *msg = NULL; + uint32_t vf_id = GET_VFID(en_dev->vport); + uint32_t pf_id_offst = 0; + int32_t err = 0; + DPP_PF_INFO_T pf_info = { 0 }; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = 0; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + //VQM统计 + msg->payload.hdr_to_agt.op_code = AGENT_VQM_DEVICE_STATS_GET; + msg->payload.hdr_to_agt.vf_id = vf_id; + msg->payload.hdr_to_agt.pcie_id = en_dev->pcie_id; + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VQM, msg, msg, + ¶); + if (err != 0) { + LOG_ERR("zxdh_vport_stats_get failed, err: %d\n", err); + goto free_msg; + } + vport_stats->vqm_stats.rx_vport_packets = msg->reps.stats_msg.rx_total; + vport_stats->vqm_stats.tx_vport_packets = msg->reps.stats_msg.tx_total; + vport_stats->vqm_stats.rx_vport_bytes = + msg->reps.stats_msg.rx_total_bytes; + vport_stats->vqm_stats.tx_vport_bytes = + msg->reps.stats_msg.tx_total_bytes; + vport_stats->vqm_stats.rx_vport_dropped = msg->reps.stats_msg.rx_drop; + + //DTP统计 + memset(msg, 0, sizeof(union zxdh_msg)); + msg->payload.hdr_to_agt.op_code = AGENT_DTP_STATS_GET; + msg->payload.hdr_to_agt.vf_id = vf_id; + msg->payload.hdr_to_agt.pcie_id = en_dev->pcie_id; + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_DTP, msg, msg, + ¶); + if (err != 0) { + LOG_ERR("zxdh_dtp_stats_get failed, err: %d\n", err); + goto free_msg; + } + vport_stats->dtp_stats.rx_lro_packets = msg->reps.stats_msg.rx_total; + vport_stats->dtp_stats.rx_udp_csum_fail_packets = + msg->reps.stats_msg.tx_total; + vport_stats->dtp_stats.tx_udp_csum_fail_packets = + msg->reps.stats_msg.rx_total_bytes; + vport_stats->dtp_stats.rx_tcp_csum_fail_packets = + msg->reps.stats_msg.tx_total_bytes; + vport_stats->dtp_stats.tx_tcp_csum_fail_packets = + msg->reps.stats_msg.rx_good_bytes; + vport_stats->dtp_stats.rx_ipv4_csum_fail_packets = + msg->reps.stats_msg.tx_good_bytes; + vport_stats->dtp_stats.tx_ipv4_csum_fail_packets = + msg->reps.stats_msg.rx_error; + + //NP & RDMA统计 + memset(msg, 0, sizeof(union zxdh_msg)); + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) { + msg->payload.hdr.op_code = ZXDH_GET_NP_STATS; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.vf_id = vf_id; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + msg->payload.np_stats_get_msg.clear_mode = np_mode; + msg->payload.np_stats_get_msg.is_init_get = is_init_get; + err = en_dev->ops->msg_send_cmd(en_dev->parent, + MODULE_VF_BAR_MSG_TO_PF, msg, + msg, ¶); + if (err != 0) { + LOG_ERR("zxdh_send_command_to_pf failed: %d\n", err); + goto free_msg; + } + memcpy(&(vport_stats->np_stats), &(msg->reps.np_stats_msg), + sizeof(vport_stats->np_stats)); + } else { + dpp_stat_port_uc_packet_rx_cnt_get( + &pf_info, vf_id, np_mode, + &(vport_stats->np_stats.rx_vport_unicast_bytes), + &(vport_stats->np_stats.rx_vport_unicast_packets)); + dpp_stat_port_uc_packet_tx_cnt_get( + &pf_info, vf_id, np_mode, + &(vport_stats->np_stats.tx_vport_unicast_bytes), + &(vport_stats->np_stats.tx_vport_unicast_packets)); + dpp_stat_port_mc_packet_rx_cnt_get( + &pf_info, vf_id, np_mode, + &(vport_stats->np_stats.rx_vport_multicast_bytes), + &(vport_stats->np_stats.rx_vport_multicast_packets)); + dpp_stat_port_mc_packet_tx_cnt_get( + &pf_info, vf_id, np_mode, + &(vport_stats->np_stats.tx_vport_multicast_bytes), + &(vport_stats->np_stats.tx_vport_multicast_packets)); + dpp_stat_port_bc_packet_rx_cnt_get( + &pf_info, vf_id, np_mode, + &(vport_stats->np_stats.rx_vport_broadcast_bytes), + &(vport_stats->np_stats.rx_vport_broadcast_packets)); + dpp_stat_port_bc_packet_tx_cnt_get( + &pf_info, vf_id, np_mode, + &(vport_stats->np_stats.tx_vport_broadcast_bytes), + &(vport_stats->np_stats.tx_vport_broadcast_packets)); + dpp_stat_MTU_packet_msg_rx_cnt_get( + &pf_info, vf_id, np_mode, + &(vport_stats->np_stats.rx_vport_mtu_drop_bytes), + &(vport_stats->np_stats.rx_vport_mtu_drop_packets)); + dpp_stat_MTU_packet_msg_tx_cnt_get( + &pf_info, vf_id, np_mode, + &(vport_stats->np_stats.tx_vport_mtu_drop_bytes), + &(vport_stats->np_stats.tx_vport_mtu_drop_packets)); + dpp_stat_plcr_packet_drop_rx_cnt_get( + &pf_info, vf_id, np_mode, + &(vport_stats->np_stats.rx_vport_plcr_drop_bytes), + &(vport_stats->np_stats.rx_vport_plcr_drop_packets)); + dpp_stat_plcr_packet_drop_tx_cnt_get( + &pf_info, vf_id, np_mode, + &(vport_stats->np_stats.tx_vport_plcr_drop_bytes), + &(vport_stats->np_stats.tx_vport_plcr_drop_packets)); + pf_id_offst = DH_AUX_PF_ID_OFFSET(en_dev->vport); + dpp_stat_spoof_packet_drop_cnt_get( + &pf_info, pf_id_offst, np_mode, + &(vport_stats->np_stats.tx_vport_ssvpc_packets)); + do_get_np_ext_stats(en_dev, vport_stats); + } + +free_msg: + kfree(msg); + return err; +} + +int32_t zxdh_en_vport_pre_stats_get(struct zxdh_en_device *en_dev) +{ + int32_t err = 0; + struct zxdh_en_vport_stats *vport_stats = &en_dev->pre_stats; + + err = do_get_vport_stats(en_dev, NP_GET_PKT_CNT, vport_stats, TRUE); + if (err != 0) { + LOG_ERR("zxdh_en_vport_pre_stat_get failed\n"); + } + en_dev->last_tx_vport_ssvpc_packets = + en_dev->pre_stats.np_stats.tx_vport_ssvpc_packets; + return err; +} + +int32_t zxdh_en_udp_pkt_stats_get(struct zxdh_en_device *en_dev) +{ + union zxdh_msg *msg = NULL; + uint32_t vf_id = GET_VFID(en_dev->vport); + DPP_PF_INFO_T pf_info = { 0 }; + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + if (!zxdh_en_is_panel_port(en_dev)) + return 0; + + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + + msg->payload.hdr.op_code = ZXDH_VF_GET_UDP_STATS; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.vf_id = vf_id; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + err = en_dev->ops->msg_send_cmd(en_dev->parent, + MODULE_VF_BAR_MSG_TO_PF, msg, + msg, ¶); + if (err != 0) { + LOG_ERR("zxdh_send_command_to_pf failed: %d\n", err); + kfree(msg); + return err; + } + zte_memcpy_s(&en_dev->hw_stats.udp_stats, + &msg->reps.udp_phy_stats_msg, + sizeof(udp_phy_stats)); + kfree(msg); + return 0; + } + + err = dpp_stat_asn_phyport_rx_pkt_cnt_get( + &pf_info, en_dev->phy_port, STAT_RD_CLR_MODE_UNCLR, + &en_dev->hw_stats.udp_stats.rx_arn_phy); + if (err != 0) { + LOG_ERR("dpp_stat_asn_phyport_rx_pkt_cnt_get failed: %d\n", + err); + return -1; + } + + err = dpp_stat_psn_phyport_tx_pkt_cnt_get( + &pf_info, en_dev->phy_port, STAT_RD_CLR_MODE_UNCLR, + &en_dev->hw_stats.udp_stats.tx_psn_phy); + if (err != 0) { + LOG_ERR("dpp_stat_psn_phyport_tx_pkt_cnt_get failed: %d\n", + err); + return -1; + } + + err = dpp_stat_psn_phyport_rx_pkt_cnt_get( + &pf_info, en_dev->phy_port, STAT_RD_CLR_MODE_UNCLR, + &en_dev->hw_stats.udp_stats.rx_psn_phy); + if (err != 0) { + LOG_ERR("dpp_stat_psn_phyport_rx_pkt_cnt_get failed: %d\n", + err); + return -1; + } + + err = dpp_stat_psn_ack_phyport_tx_pkt_cnt_get( + &pf_info, en_dev->phy_port, STAT_RD_CLR_MODE_UNCLR, + &en_dev->hw_stats.udp_stats.tx_psn_ack_phy); + if (err != 0) { + LOG_ERR("dpp_stat_psn_ack_phyport_tx_pkt_cnt_get failed: %d\n", + err); + return -1; + } + + err = dpp_stat_psn_ack_phyport_rx_pkt_cnt_get( + &pf_info, en_dev->phy_port, STAT_RD_CLR_MODE_UNCLR, + &en_dev->hw_stats.udp_stats.rx_psn_ack_phy); + if (err != 0) { + LOG_ERR("dpp_stat_psn_ack_phyport_rx_pkt_cnt_get failed: %d\n", + err); + return -1; + } + + return err; +} + +int32_t zxdh_vport_stats_get(struct zxdh_en_device *en_dev) +{ + int32_t err = 0; + struct zxdh_en_vport_stats *vport_stats = &en_dev->hw_stats.vport_stats; + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + err = do_get_vport_stats(en_dev, NP_GET_PKT_CNT, vport_stats, FALSE); + if (err != 0) { + LOG_ERR("zxdh_vport_stats_get failed\n"); + return err; + } + + vport_stats->vqm_stats.rx_vport_packets -= + en_dev->pre_stats.vqm_stats.rx_vport_packets; + vport_stats->vqm_stats.tx_vport_packets -= + en_dev->pre_stats.vqm_stats.tx_vport_packets; + vport_stats->vqm_stats.rx_vport_bytes -= + en_dev->pre_stats.vqm_stats.rx_vport_bytes; + vport_stats->vqm_stats.tx_vport_bytes -= + en_dev->pre_stats.vqm_stats.tx_vport_bytes; + vport_stats->vqm_stats.rx_vport_dropped -= + en_dev->pre_stats.vqm_stats.rx_vport_dropped; + + vport_stats->dtp_stats.rx_lro_packets -= + en_dev->pre_stats.dtp_stats.rx_lro_packets; + vport_stats->dtp_stats.rx_udp_csum_fail_packets -= + en_dev->pre_stats.dtp_stats.rx_udp_csum_fail_packets; + vport_stats->dtp_stats.tx_udp_csum_fail_packets -= + en_dev->pre_stats.dtp_stats.tx_udp_csum_fail_packets; + vport_stats->dtp_stats.rx_tcp_csum_fail_packets -= + en_dev->pre_stats.dtp_stats.rx_tcp_csum_fail_packets; + vport_stats->dtp_stats.tx_tcp_csum_fail_packets -= + en_dev->pre_stats.dtp_stats.tx_tcp_csum_fail_packets; + vport_stats->dtp_stats.rx_ipv4_csum_fail_packets -= + en_dev->pre_stats.dtp_stats.rx_ipv4_csum_fail_packets; + vport_stats->dtp_stats.tx_ipv4_csum_fail_packets -= + en_dev->pre_stats.dtp_stats.tx_ipv4_csum_fail_packets; + + vport_stats->np_stats.rx_vport_unicast_packets -= + en_dev->pre_stats.np_stats.rx_vport_unicast_packets; + vport_stats->np_stats.tx_vport_unicast_packets -= + en_dev->pre_stats.np_stats.tx_vport_unicast_packets; + vport_stats->np_stats.rx_vport_unicast_bytes -= + en_dev->pre_stats.np_stats.rx_vport_unicast_bytes; + vport_stats->np_stats.tx_vport_unicast_bytes -= + en_dev->pre_stats.np_stats.tx_vport_unicast_bytes; + vport_stats->np_stats.rx_vport_multicast_packets -= + en_dev->pre_stats.np_stats.rx_vport_multicast_packets; + vport_stats->np_stats.tx_vport_multicast_packets -= + en_dev->pre_stats.np_stats.tx_vport_multicast_packets; + vport_stats->np_stats.rx_vport_multicast_bytes -= + en_dev->pre_stats.np_stats.rx_vport_multicast_bytes; + vport_stats->np_stats.tx_vport_multicast_bytes -= + en_dev->pre_stats.np_stats.tx_vport_multicast_bytes; + vport_stats->np_stats.rx_vport_broadcast_packets -= + en_dev->pre_stats.np_stats.rx_vport_broadcast_packets; + vport_stats->np_stats.tx_vport_broadcast_packets -= + en_dev->pre_stats.np_stats.tx_vport_broadcast_packets; + vport_stats->np_stats.rx_vport_broadcast_bytes -= + en_dev->pre_stats.np_stats.rx_vport_broadcast_bytes; + vport_stats->np_stats.tx_vport_broadcast_bytes -= + en_dev->pre_stats.np_stats.tx_vport_broadcast_bytes; + vport_stats->np_stats.rx_vport_mtu_drop_packets -= + en_dev->pre_stats.np_stats.rx_vport_mtu_drop_packets; + vport_stats->np_stats.tx_vport_mtu_drop_packets -= + en_dev->pre_stats.np_stats.tx_vport_mtu_drop_packets; + vport_stats->np_stats.rx_vport_mtu_drop_bytes -= + en_dev->pre_stats.np_stats.rx_vport_mtu_drop_bytes; + vport_stats->np_stats.tx_vport_mtu_drop_bytes -= + en_dev->pre_stats.np_stats.tx_vport_mtu_drop_bytes; + vport_stats->np_stats.rx_vport_plcr_drop_packets -= + en_dev->pre_stats.np_stats.rx_vport_plcr_drop_packets; + vport_stats->np_stats.tx_vport_plcr_drop_packets -= + en_dev->pre_stats.np_stats.tx_vport_plcr_drop_packets; + vport_stats->np_stats.rx_vport_plcr_drop_bytes -= + en_dev->pre_stats.np_stats.rx_vport_plcr_drop_bytes; + vport_stats->np_stats.tx_vport_plcr_drop_bytes -= + en_dev->pre_stats.np_stats.tx_vport_plcr_drop_bytes; + vport_stats->np_stats.tx_vport_ssvpc_packets -= + en_dev->pre_stats.np_stats.tx_vport_ssvpc_packets; + vport_stats->np_stats.rx_vport_idma_drop_packets -= + en_dev->pre_stats.np_stats.rx_vport_idma_drop_packets; + return err; +} + +static inline bool is_zf_dev(struct zxdh_en_device *en_dev) +{ + /* bit[12:14]-ep_id(0~4) */ + if ((en_dev->pcie_id & BIT(14)) != 0) + return true; + else + return false; +} + +bool zxdh_en_is_panel_port(struct zxdh_en_device *en_dev) +{ + if ((en_dev->ops->get_dev_type(en_dev->parent) == ZXDH_DEV_UPF) || + (en_dev->ops->get_dev_type(en_dev->parent) == ZXDH_DEV_NE0) || + (en_dev->ops->get_dev_type(en_dev->parent) == ZXDH_DEV_NE1)) { + return false; + } + + return true; +} + +int32_t zxdh_mac_stats_get(struct zxdh_en_device *en_dev) +{ + uint64_t virt_addr = 0; + uint64_t stats_addr = 0; + uint64_t bytes_addr = 0; + struct zxdh_en_spm_stats spm_stats; + struct zxdh_en_spm_bytes spm_bytes; + struct zxdh_en_phy_stats *phy_stats = &en_dev->hw_stats.phy_stats; + + if ((en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) || + (en_dev->phy_port > ZXDH_PHY_PORT_MAX)) + return 0; + + switch (en_dev->curr_speed_modes) { + case BIT(SPM_SPEED_1X_1G): + case BIT(SPM_SPEED_1X_10G): + case BIT(SPM_SPEED_1X_25G): + case BIT(SPM_SPEED_1X_50G): { + stats_addr = ZXDH_SPM_STATS_OFFSET + + (en_dev->phy_port % 4) * + sizeof(struct zxdh_en_spm_stats); + bytes_addr = ZXDH_SPM_BYTES_OFFSET + + (en_dev->phy_port % 4) * + sizeof(struct zxdh_en_spm_bytes); + break; + } + case BIT(SPM_SPEED_2X_100G): { + stats_addr = ZXDH_SPM_STATS_OFFSET + + (4 + (en_dev->phy_port % 4) / 2) * + sizeof(struct zxdh_en_spm_stats); + bytes_addr = ZXDH_SPM_BYTES_OFFSET + + (4 + (en_dev->phy_port % 4) / 2) * + sizeof(struct zxdh_en_spm_bytes); + break; + } + case BIT(SPM_SPEED_4X_40G): + case BIT(SPM_SPEED_4X_100G): + case BIT(SPM_SPEED_4X_200G): { + stats_addr = ZXDH_SPM_STATS_OFFSET + + 4 * sizeof(struct zxdh_en_spm_stats); + bytes_addr = ZXDH_SPM_BYTES_OFFSET + + 4 * sizeof(struct zxdh_en_spm_bytes); + break; + } + default: { + return 0; + } + } + + if (is_zf_dev(en_dev)) { + stats_addr = TO_ZF_ADDR(stats_addr); + bytes_addr = TO_ZF_ADDR(bytes_addr); + } + + virt_addr = en_dev->ops->get_bar_virt_addr(en_dev->parent, 0); + memcpy(&spm_stats, (void *)(virt_addr + stats_addr), + sizeof(struct zxdh_en_spm_stats)); + memcpy(&spm_bytes, (void *)(virt_addr + bytes_addr), + sizeof(struct zxdh_en_spm_bytes)); + + /* 规避自愈过程中bar全F导致错包统计异常的问题 */ + if ((spm_stats.rx_error == UINT64_MAX) || + (spm_stats.tx_error == UINT64_MAX)) + return 0; + + phy_stats->rx_packets_phy = spm_stats.rx_total; + phy_stats->tx_packets_phy = spm_stats.tx_total; + phy_stats->rx_bytes_phy = spm_bytes.rx_total_bytes; + phy_stats->tx_bytes_phy = spm_bytes.tx_total_bytes; + phy_stats->rx_error_phy = spm_stats.rx_error; + phy_stats->tx_error_phy = spm_stats.tx_error; + phy_stats->rx_drop_phy = spm_stats.rx_drop; + phy_stats->tx_drop_phy = spm_stats.tx_drop; + phy_stats->rx_good_bytes_phy = spm_bytes.rx_good_bytes; + phy_stats->tx_good_bytes_phy = spm_bytes.tx_good_bytes; + phy_stats->rx_unicast_phy = spm_stats.rx_unicast; + phy_stats->tx_unicast_phy = spm_stats.tx_unicast; + phy_stats->rx_multicast_phy = spm_stats.rx_multicast; + phy_stats->tx_multicast_phy = spm_stats.tx_multicast; + phy_stats->rx_broadcast_phy = spm_stats.rx_broadcast; + phy_stats->tx_broadcast_phy = spm_stats.tx_broadcast; + phy_stats->rx_under64_drop = spm_stats.rx_undersize; + phy_stats->rx_undersize_phy = spm_stats.rx_undersize; + phy_stats->rx_size_64_phy = spm_stats.rx_size_64; + phy_stats->rx_size_65_127 = spm_stats.rx_size_65_127; + phy_stats->rx_size_128_255 = spm_stats.rx_size_128_255; + phy_stats->rx_size_256_511 = spm_stats.rx_size_256_511; + phy_stats->rx_size_512_1023 = spm_stats.rx_size_512_1023; + phy_stats->rx_size_1024_1518 = spm_stats.rx_size_1024_1518; + phy_stats->rx_size_1519_mru = spm_stats.rx_size_1519_mru; + phy_stats->rx_oversize_phy = spm_stats.rx_oversize; + phy_stats->tx_undersize_phy = spm_stats.tx_undersize; + phy_stats->tx_size_64_phy = spm_stats.tx_size_64; + phy_stats->tx_size_65_127 = spm_stats.tx_size_65_127; + phy_stats->tx_size_128_255 = spm_stats.tx_size_128_255; + phy_stats->tx_size_256_511 = spm_stats.tx_size_256_511; + phy_stats->tx_size_512_1023 = spm_stats.tx_size_512_1023; + phy_stats->tx_size_1024_1518 = spm_stats.tx_size_1024_1518; + phy_stats->tx_size_1519_mtu = spm_stats.tx_size_1519_mtu; + phy_stats->tx_oversize_phy = spm_stats.tx_oversize; + phy_stats->rx_pause_phy = spm_stats.rx_pause; + phy_stats->tx_pause_phy = spm_stats.tx_pause; + phy_stats->rx_crc_errors = spm_stats.rx_fcs_error; + phy_stats->tx_crc_errors = spm_stats.tx_fcs_error; + phy_stats->rx_mac_control_phy = spm_stats.rx_control; + phy_stats->tx_mac_control_phy = spm_stats.tx_control; + phy_stats->rx_fragment_phy = spm_stats.rx_fragment; + phy_stats->tx_fragment_phy = spm_stats.tx_fragment; + phy_stats->rx_jabber_phy = spm_stats.rx_jabber; + phy_stats->tx_jabber_phy = spm_stats.tx_jabber; + phy_stats->rx_vlan_phy = spm_stats.rx_vlan; + phy_stats->tx_vlan_phy = spm_stats.tx_vlan; + phy_stats->rx_eee_phy = spm_stats.rx_eee; + phy_stats->tx_eee_phy = spm_stats.tx_eee; + + return 0; +} + +int32_t zxdh_mac_stats_clear(struct zxdh_en_device *en_dev) +{ + union zxdh_msg *msg = NULL; + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + if (!zxdh_en_is_panel_port(en_dev)) + return err; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + + msg->payload.hdr_to_agt.op_code = AGENT_MAC_STATS_CLEAR; + msg->payload.hdr_to_agt.phyport = en_dev->phy_port; + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_MAC, msg, msg, + ¶); + if (err != 0) { + LOG_ERR("zxdh_mac_stats_clear failed, err: %d\n", err); + kfree(msg); + return err; + } + kfree(msg); + return err; +} + +int32_t zxdh_en_phyport_init(struct zxdh_en_device *en_dev) +{ + union zxdh_msg *msg = NULL; + int32_t err = 0; + struct link_info_struct link_info_val = { 0 }; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + + msg->payload.hdr_to_agt.op_code = AGENT_MAC_PHYPORT_INIT; + msg->payload.hdr_to_agt.phyport = en_dev->phy_port; + if (en_dev->ops->is_upf(en_dev->parent)) { + msg->payload.hdr_to_agt.phyport = 0; + msg->payload.hdr_to_agt.is_upf = 1; + en_dev->link_up = FALSE; + en_dev->speed = SPEED_UNKNOWN; + en_dev->ops->set_pf_link_up(en_dev->parent, en_dev->link_up); + netif_carrier_off(en_dev->netdev); + LOG_INFO("upf link down init\n"); + kfree(msg); + return err; + } else if ((en_dev->ops->get_dev_type(en_dev->parent) == + ZXDH_DEV_NE0) || + (en_dev->ops->get_dev_type(en_dev->parent) == + ZXDH_DEV_NE1)) { + msg->payload.hdr_to_agt.phyport = 0; + msg->payload.hdr_to_agt.is_upf = 1; + //TODO 确定VGCF设备link状态获取方案 + en_dev->link_up = true; + en_dev->speed = SPEED_100000; + en_dev->ops->set_pf_link_up(en_dev->parent, en_dev->link_up); + netif_carrier_on(en_dev->netdev); + } + + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_MAC, msg, msg, + ¶); + if (err != 0) { + LOG_ERR("zxdh_send_command_to_riscv_mac failed, err: %d\n", + err); + kfree(msg); + return err; + } + + en_dev->supported_speed_modes = msg->reps.mac_set_msg.speed_modes; + en_dev->advertising_speed_modes = msg->reps.mac_set_msg.speed_modes; + + link_info_val.speed = en_dev->speed; + link_info_val.autoneg_enable = en_dev->autoneg_enable; + link_info_val.supported_speed_modes = en_dev->supported_speed_modes; + link_info_val.advertising_speed_modes = en_dev->advertising_speed_modes; + link_info_val.duplex = en_dev->duplex; + en_dev->ops->update_pf_link_info(en_dev->parent, &link_info_val); + + kfree(msg); + return err; +} + +int32_t zxdh_en_autoneg_set(struct zxdh_en_device *en_dev, uint8_t enable, + uint32_t speed_modes) +{ + int32_t err = 0; + union zxdh_msg *msg = NULL; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + if (!zxdh_en_is_panel_port(en_dev)) + return err; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + + msg->payload.hdr_to_agt.op_code = AGENT_MAC_AUTONEG_SET; + msg->payload.hdr_to_agt.phyport = en_dev->phy_port; + msg->payload.mac_set_msg.autoneg = enable; + msg->payload.mac_set_msg.speed_modes = speed_modes; + + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_MAC, msg, msg, + ¶); + if (err != 0) { + LOG_ERR("zxdh_send_command_to_riscv_mac failed, err: %d\n", + err); + } + kfree(msg); + return err; +} + +int32_t zxdh_en_fec_mode_set(struct zxdh_en_device *en_dev, uint32_t fec_cfg) +{ + union zxdh_msg *msg = NULL; + int32_t ret = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + if (!zxdh_en_is_panel_port(en_dev)) + return ret; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + + msg->payload.hdr_to_agt.op_code = AGENT_MAC_FEC_MODE_SET; + msg->payload.hdr_to_agt.phyport = en_dev->phy_port; + msg->payload.mac_fec_mode_msg.fec_cfg = fec_cfg; + + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_MAC, msg, msg, + ¶); + kfree(msg); + return ret; +} + +int32_t zxdh_en_fec_mode_get(struct zxdh_en_device *en_dev, uint32_t *fec_cap, + uint32_t *fec_cfg, uint32_t *fec_active) +{ + union zxdh_msg *msg = NULL; + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = 0; + + if (!zxdh_en_is_panel_port(en_dev)) + return err; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + + msg->payload.hdr_to_agt.op_code = AGENT_MAC_FEC_MODE_GET; + msg->payload.hdr_to_agt.phyport = en_dev->phy_port; + + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_MAC, msg, msg, + ¶); + if (err != 0) { + LOG_ERR("zxdh_send_command_to_riscv_mac failed, err: %d\n", + err); + kfree(msg); + return err; + } + + if (fec_cap) + *fec_cap = msg->reps.mac_fec_mode_msg.fec_cap; + if (fec_cfg) + *fec_cfg = msg->reps.mac_fec_mode_msg.fec_cfg; + if (fec_active) + *fec_active = msg->reps.mac_fec_mode_msg.fec_link; + + kfree(msg); + return err; +} + +int32_t zxdh_en_fc_mode_set(struct zxdh_en_device *en_dev, uint32_t fc_mode) +{ + union zxdh_msg *msg = NULL; + int32_t ret = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + if (!zxdh_en_is_panel_port(en_dev)) + return ret; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + + msg->payload.hdr_to_agt.op_code = AGENT_MAC_FC_MODE_SET; + msg->payload.hdr_to_agt.phyport = en_dev->phy_port; + msg->payload.mac_fc_mode_msg.fc_mode = fc_mode; + + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_MAC, msg, msg, + ¶); + kfree(msg); + return ret; +} + +int32_t zxdh_en_fc_mode_get(struct zxdh_en_device *en_dev, uint32_t *fc_mode) +{ + union zxdh_msg *msg = NULL; + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + if (!zxdh_en_is_panel_port(en_dev)) + return err; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + + msg->payload.hdr_to_agt.op_code = AGENT_MAC_FC_MODE_GET; + msg->payload.hdr_to_agt.phyport = en_dev->phy_port; + + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_MAC, msg, msg, + ¶); + if (err != 0) { + LOG_ERR("zxdh_send_command_to_riscv_mac failed, err: %d\n", + err); + kfree(msg); + return err; + } + + if (fc_mode) + *fc_mode = msg->reps.mac_fc_mode_msg.fc_mode; + + kfree(msg); + return err; +} + +uint32_t zxdh_en_module_eeprom_read(struct zxdh_en_device *en_dev, + struct zxdh_en_module_eeprom_param *query, + uint8_t *data) +{ + union zxdh_msg *msg = NULL; + uint8_t length = 0; + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + if (!zxdh_en_is_panel_port(en_dev)) + return err; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + + msg->payload.hdr_to_agt.op_code = AGENT_MAC_MODULE_EEPROM_READ; + msg->payload.hdr_to_agt.phyport = en_dev->phy_port; + msg->payload.module_eeprom_msg.i2c_addr = query->i2c_addr; + msg->payload.module_eeprom_msg.bank = query->bank; + msg->payload.module_eeprom_msg.page = query->page; + msg->payload.module_eeprom_msg.offset = query->offset; + msg->payload.module_eeprom_msg.length = query->length; + + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_MAC, msg, msg, + ¶); + if (err != 0) { + LOG_ERR("zxdh_send_command_to_riscv_mac failed, err: %d\n", + err); + kfree(msg); + return 0; + } + + if (data) + memcpy(data, msg->reps.module_eeprom_msg.data, + msg->reps.module_eeprom_msg.length); + + length = msg->reps.module_eeprom_msg.length; + kfree(msg); + return length; +} + +int32_t zxdh_vf_1588_call_np_interface(struct zxdh_en_device *en_dev) +{ + union zxdh_msg *msg = NULL; + int32_t ret = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + + msg->payload.hdr.op_code = ZXDH_VF_1588_CALL_NP; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + msg->payload.vf_1588_call_np.vfid = VQM_VFID(msg->payload.hdr.vport); + msg->payload.vf_1588_call_np.call_np_interface_num = + en_dev->vf_1588_call_np_num; + msg->payload.vf_1588_call_np.ptp_tc_enable_opt = + en_dev->ptp_tc_enable_opt; + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, + msg, msg, ¶); + if (ret != 0) { + LOG_ERR("zxdh_send_command_to_pf failed: %d\n", ret); + kfree(msg); + return ret; + } + + kfree(msg); + return ret; +} + +int32_t zxdh_vf_port_create(struct zxdh_en_device *en_dev) +{ + int32_t ret = 0; + union zxdh_msg *msg = NULL; + uint8_t link_up = 0; + bool is_upf = false; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + + if (!zxdh_en_is_panel_port(en_dev)) { + is_upf = true; + } + + msg->payload.hdr.op_code = ZXDH_VF_PORT_INIT; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + msg->payload.vf_init_msg.base_qid = en_dev->phy_index[0]; + msg->payload.vf_init_msg.hash_search_idx = en_dev->hash_search_idx; + msg->payload.vf_init_msg.rss_enable = 1; + msg->payload.vf_init_msg.is_upf = is_upf; + + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, + msg, msg, ¶); + if (ret != 0) { + LOG_ERR("zxdh_send_command_to_pf failed: %d\n", ret); + kfree(msg); + return ret; + } + + if (is_upf) { + en_dev->link_up = msg->reps.vf_init_msg.link_up; + } else { + en_dev->ops->get_link_info_from_vqm(en_dev->parent, &link_up); + en_dev->link_up = link_up; + LOG_DEBUG("vf read link_up: %d from vqm\n", link_up); + } + + zxdh_netdev_addr_set(en_dev->netdev, msg->reps.vf_init_msg.mac_addr); + ether_addr_copy(en_dev->last_np_mac_addr.sa_data, + en_dev->netdev->dev_addr); + en_dev->netdev->addr_assign_type = + msg->reps.vf_init_msg.addr_assign_type; + en_dev->speed = msg->reps.vf_init_msg.speed; + en_dev->autoneg_enable = msg->reps.vf_init_msg.autoneg_enable; + en_dev->supported_speed_modes = msg->reps.vf_init_msg.sup_link_modes; + en_dev->advertising_speed_modes = msg->reps.vf_init_msg.adv_link_modes; + en_dev->duplex = msg->reps.vf_init_msg.duplex; + en_dev->vlan_dev.vlan_id = msg->reps.vf_init_msg.vlan_id; + en_dev->vlan_dev.qos = msg->reps.vf_init_msg.vlan_qos; + + if (!is_upf) { + en_dev->phy_port = msg->reps.vf_init_msg.phy_port; + en_dev->ops->set_pf_phy_port(en_dev->parent, en_dev->phy_port); + } + + if (en_dev->link_up) { + en_dev->ops->set_pf_link_up(en_dev->parent, TRUE); + netif_carrier_on(en_dev->netdev); + } else { + en_dev->ops->set_pf_link_up(en_dev->parent, FALSE); + netif_carrier_off(en_dev->netdev); + } + + kfree(msg); + return ret; +} + +int32_t zxdh_vf_port_delete(struct zxdh_en_device *en_dev) +{ + union zxdh_msg *msg = NULL; + int32_t ret = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + + //dpp_np_uninit + msg->payload.hdr.op_code = ZXDH_VF_PORT_UNINIT; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, + msg, msg, ¶); + kfree(msg); + return ret; +} + +#ifdef VF_STATS_UPDATE +int32_t zxdh_vf_item_init_stats_update(struct zxdh_en_device *en_dev) +{ + union zxdh_msg *msg = NULL; + int32_t ret = 0; + uint32_t vf_id = GET_VFID(en_dev->vport); + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = 0; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + + msg->payload.hdr.op_code = ZXDH_GET_NP_STATS; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.vf_id = vf_id; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + msg->payload.np_stats_get_msg.clear_mode = NP_GET_PKT_CNT; + msg->payload.np_stats_get_msg.is_init_get = true; + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, + msg, msg, ¶); + if (ret != 0) { + LOG_ERR("zxdh_send_command_to_pf failed: %d\n", ret); + } + + kfree(msg); + return ret; +} +#endif + +int32_t zxdh_vf_dpp_add_mac(struct zxdh_en_device *en_dev, + const uint8_t *dev_addr, uint8_t filter_flag) +{ + union zxdh_msg *msg = NULL; + int32_t ret = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + + msg->payload.hdr.op_code = ZXDH_MAC_ADD; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + msg->payload.mac_addr_set_msg.filter_flag = filter_flag; + memcpy(msg->payload.mac_addr_set_msg.mac_addr, dev_addr, + en_dev->netdev->addr_len); + + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, + msg, msg, ¶); + if (ret != 0) { + if (msg->reps.vf_mac_set_msg.mac_err_flag == + ZXDH_REPS_BEYOND_MAC) { + kfree(msg); + return ZXDH_REPS_BEYOND_MAC; + } else if (msg->reps.vf_mac_set_msg.mac_err_flag == + ZXDH_REPS_EXIST_MAC) { + kfree(msg); + return ZXDH_REPS_EXIST_MAC; + } + } + kfree(msg); + return ret; +} + +int32_t zxdh_vf_dpp_dump_mac(struct zxdh_en_device *en_dev, + const uint8_t *dev_addr) +{ + union zxdh_msg *msg = NULL; + int32_t ret = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + + msg->payload.hdr.op_code = ZXDH_MAC_DUMP; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + memcpy(msg->payload.mac_addr_set_msg.mac_addr, dev_addr, + en_dev->netdev->addr_len); + + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, + msg, msg, ¶); + if (ret != 0) { + LOG_ERR("en_dev->ops->msg_send_cmd failed, ret = %d\n", ret); + kfree(msg); + return ZXDH_REPS_EXIST_MAC; + } + + kfree(msg); + return ret; +} + +int32_t zxdh_vf_dpp_del_mac(struct zxdh_en_device *en_dev, + const uint8_t *dev_addr, uint8_t filter_flag, + bool mac_flag) +{ + union zxdh_msg *msg = NULL; + int32_t ret = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + + msg->payload.hdr.op_code = ZXDH_MAC_DEL; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + msg->payload.mac_addr_set_msg.filter_flag = filter_flag; + msg->payload.mac_addr_set_msg.mac_flag = mac_flag; + memcpy(msg->payload.mac_addr_set_msg.mac_addr, dev_addr, + en_dev->netdev->addr_len); + + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, + msg, msg, ¶); + if (ret != 0) { + LOG_ERR("en_dev->ops->msg_send_cmd failed, ret = %d\n", ret); + } + kfree(msg); + + return ret; +} + +int32_t zxdh_vf_rss_en_set(struct zxdh_en_device *en_dev, uint32_t enable) +{ + union zxdh_msg *msg = NULL; + int32_t ret = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + + msg->payload.hdr.op_code = ZXDH_RSS_EN_SET; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + msg->payload.rss_enable_msg.rss_enable = enable; + + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, + msg, msg, ¶); + if (ret != 0) { + LOG_ERR("en_dev->ops->msg_send_cmd failed, ret = %d\n", ret); + } + kfree(msg); + + return ret; +} + +int32_t zxdh_vf_dpp_add_ipv6_mac(struct zxdh_en_device *en_dev, + const uint8_t *mac_addr) +{ + union zxdh_msg *msg = NULL; + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + + msg->payload.hdr.op_code = ZXDH_IPV6_MAC_ADD; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + memcpy(msg->payload.mac_addr_set_msg.mac_addr, mac_addr, + en_dev->netdev->addr_len); + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, + msg, msg, ¶); + if ((err != 0) && + (msg->reps.vf_mac_set_msg.mac_err_flag == ZXDH_REPS_BEYOND_MAC)) { + LOG_ERR("Add Multicast MAC Address(%pM) Failed, Beyond Max MAC Num in the whole transfer area\n", + mac_addr); + } + kfree(msg); + + return err; +} + +int32_t zxdh_vf_dpp_del_ipv6_mac(struct zxdh_en_device *en_dev, + const uint8_t *mac_addr) +{ + union zxdh_msg *msg = NULL; + int32_t ret = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + + msg->payload.hdr.op_code = ZXDH_IPV6_MAC_DEL; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + memcpy(msg->payload.mac_addr_set_msg.mac_addr, mac_addr, + en_dev->netdev->addr_len); + + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, + msg, msg, ¶); + if (ret != 0) { + LOG_ERR("en_dev->ops->msg_send_cmd failed, ret = %d\n", ret); + } + kfree(msg); + + return ret; +} + +int32_t zxdh_vf_dpp_add_lacp_mac(struct zxdh_en_device *en_dev, + const uint8_t *mac_addr) +{ + union zxdh_msg *msg = NULL; + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + + msg->payload.hdr.op_code = ZXDH_LACP_MAC_ADD; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + memcpy(msg->payload.mac_addr_set_msg.mac_addr, mac_addr, + en_dev->netdev->addr_len); + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, + msg, msg, ¶); + if (err != 0) { + LOG_ERR("Add LACP Multicast MAC Address(%pM) Failed\n", + mac_addr); + } + kfree(msg); + + return err; +} + +int32_t zxdh_vf_dpp_del_lacp_mac(struct zxdh_en_device *en_dev, + const uint8_t *mac_addr) +{ + union zxdh_msg *msg = NULL; + int32_t ret = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + + msg->payload.hdr.op_code = ZXDH_LACP_MAC_DEL; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + memcpy(msg->payload.mac_addr_set_msg.mac_addr, mac_addr, + en_dev->netdev->addr_len); + + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, + msg, msg, ¶); + if (ret != 0) { + LOG_ERR("DEL LACP Multicast MAC Address(%pM) Failed\n", + mac_addr); + } + kfree(msg); + + return ret; +} + +int32_t zxdh_vf_egr_port_attr_set(struct zxdh_en_device *en_dev, uint32_t mode, + uint32_t value, uint8_t fow) +{ + union zxdh_msg *msg = NULL; + int32_t ret = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + + msg->payload.hdr.op_code = ZXDH_PORT_ATTRS_SET; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + msg->payload.port_attr_set_msg.mode = mode; + msg->payload.port_attr_set_msg.value = value; + msg->payload.port_attr_set_msg.allmulti_follow = fow; + + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, + msg, msg, ¶); + if (ret != 0) { + LOG_ERR("en_dev->ops->msg_send_cmd failed, ret = %d\n", ret); + } + kfree(msg); + + return ret; +} + +int32_t zxdh_vf_egr_port_attr_get(struct zxdh_en_device *en_dev, + ZXDH_SRIOV_VPORT_T *port_attr_entry) +{ + union zxdh_msg *msg = NULL; + int32_t ret = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + + msg->payload.hdr.op_code = ZXDH_PORT_ATTRS_GET; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, + msg, msg, ¶); + if (ret != 0) { + LOG_ERR("en_dev->ops->msg_send_cmd failed, ret = %d\n", ret); + kfree(msg); + return ret; + } + + memcpy(port_attr_entry, &msg->reps.port_attr_get_msg.port_attr_entry, + sizeof(ZXDH_SRIOV_VPORT_T)); + kfree(msg); + + return ret; +} + +int32_t zxdh_vf_port_promisc_set(struct zxdh_en_device *en_dev, uint8_t mode, + uint8_t value, uint8_t fow) +{ + union zxdh_msg *msg = NULL; + int32_t ret = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + + msg->payload.hdr.op_code = ZXDH_PROMISC_SET; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + msg->payload.promisc_set_msg.mode = mode; + msg->payload.promisc_set_msg.value = value; + msg->payload.promisc_set_msg.mc_follow = fow; + + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, + msg, msg, ¶); + if (ret != 0) { + LOG_ERR("en_dev->ops->msg_send_cmd failed, ret = %d\n", ret); + } + + kfree(msg); + + return ret; +} + +int32_t zxdh_get_vf_err_stats(struct zxdh_en_device *en_dev, + zxdh_get_sw_stats *payload, + zxdh_sw_stats_reply *reply) +{ + union zxdh_msg *msg = NULL; + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = 0; + + /* 判断vf是否probe */ + if (!en_dev->ops->get_vf_is_probe(en_dev->parent, payload->vf_idx)) { + LOG_ERR("vf(%u) is not probed\n", payload->vf_idx); + return VF_ERR; + } + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return GET_STAT_FAILED; + } + + /* 发送消息填写 */ + msg->payload.hdr_vf.op_code = ZXDH_GET_SW_STATS; + msg->payload.hdr_vf.dst_pcie_id = + FIND_VF_PCIE_ID(en_dev->pcie_id, payload->vf_idx); + memcpy(&msg->payload.vf_sw_stats, payload, sizeof(zxdh_get_sw_stats)); + + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_PF_BAR_MSG_TO_VF, + msg, msg, ¶); + if (err != 0) { + if (err == ZXDH_INVALID_OP_CODE) { + LOG_ERR("vf is used by kernel driver, action is not supported!!!\n"); + kfree(msg); + return ACTION_IS_NOT_SUPPORTED; + } + LOG_ERR("failed to get VF[%d] err stats:%d\n", payload->vf_idx, + err); + kfree(msg); + return GET_STAT_FAILED; + } + memcpy(reply, &msg->reps.vf_sw_stats_rsp, sizeof(zxdh_sw_stats_reply)); + kfree(msg); + return GET_STAT_SUCCESS; +} +EXPORT_SYMBOL(zxdh_get_vf_err_stats); + +int32_t zxdh_cfg_misx_mode(struct zxdh_en_device *en_dev, uint16_t rx_msix_mode, + uint16_t tx_msix_mode) +{ + union zxdh_msg *msg = NULL; + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + + msg->vqm_msg.opcode = MSIX_MODE_SET; + msg->vqm_msg.cmd = MSIX_MODE_CMD; + msg->vqm_msg.msix_mode_sel.rx_msix_mode = rx_msix_mode; + msg->vqm_msg.msix_mode_sel.tx_msix_mode = tx_msix_mode; + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_CFG_VQM, msg, + msg, ¶); + if (err != 0) { + LOG_ERR("send cfg msix mode msg to riscv failed\n"); + } + kfree(msg); + return err; +} + +int32_t zxdh_get_misx_mode(struct zxdh_en_device *en_dev, + uint16_t *rx_msix_mode, uint16_t *tx_msix_mode) +{ + union zxdh_msg *msg = NULL; + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + + msg->vqm_msg.opcode = MSIX_MODE_GET; + msg->vqm_msg.cmd = MSIX_MODE_CMD; + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_CFG_VQM, msg, + msg, ¶); + if (err != 0) { + LOG_ERR("send cfg msix mode msg to riscv failed\n"); + kfree(msg); + return 1; + } + + *rx_msix_mode = msg->vqm_reps.msix_mode_sel.rx_msix_mode; + *tx_msix_mode = msg->vqm_reps.msix_mode_sel.tx_msix_mode; + kfree(msg); + return 0; +} + +int32_t zxdh_cfg_coalesce_usecs(struct zxdh_en_device *en_dev, + uint32_t rx_coalesce_usecs, + uint32_t tx_coalesce_usecs) +{ + union zxdh_msg *msg = NULL; + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + + msg->vqm_msg.opcode = MSIX_MODE_SET; + msg->vqm_msg.cmd = COALESCE_USECS_CMD; + msg->vqm_msg.wr_used_t.rx_used_ring_t = rx_coalesce_usecs; + msg->vqm_msg.wr_used_t.tx_used_ring_t = tx_coalesce_usecs; + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_CFG_VQM, msg, + msg, ¶); + if (err != 0) { + LOG_ERR("send cfg msix mode msg to riscv failed\n"); + } + kfree(msg); + return err; +} + +int32_t zxdh_get_coalesce_usecs(struct zxdh_en_device *en_dev, + uint32_t *rx_coalesce_usecs, + uint32_t *tx_coalesce_usecs) +{ + union zxdh_msg *msg = NULL; + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + + msg->vqm_msg.opcode = MSIX_MODE_GET; + msg->vqm_msg.cmd = COALESCE_USECS_CMD; + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_CFG_VQM, msg, + msg, ¶); + if (err != 0) { + LOG_ERR("send get_coalesce_usecs msg to riscv failed\n"); + kfree(msg); + return 1; + } + + *rx_coalesce_usecs = msg->vqm_reps.wr_used_t.rx_used_ring_t; + *tx_coalesce_usecs = msg->vqm_reps.wr_used_t.tx_used_ring_t; + kfree(msg); + return 0; +} + +int32_t zxdh_en_vport_create(struct zxdh_en_device *en_dev) +{ + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + if (!en_dev->ops->if_init(en_dev->parent)) { + return 0; + } + + return dpp_vport_create(&pf_info); +} + +int32_t zxdh_en_vport_delete(struct zxdh_en_device *en_dev) +{ + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + if (!en_dev->ops->if_init(en_dev->parent)) { + return 0; + } + + return dpp_vport_delete(&pf_info); +} + +int32_t zxdh_pf_vport_create(struct zxdh_en_device *en_dev) +{ + int32_t ret = 0; + DPP_PF_INFO_T pf_info = { 0 }; + uint32_t lag_id = 0; /* 默认为0 */ + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + ret = zxdh_en_vport_create(en_dev); + if (ret != 0) { + LOG_ERR("zxdh_en_vport_create failed: %d\n", ret); + return ret; + } + + ret = dpp_vport_bond_pf(&pf_info); + if (ret != 0) { + LOG_ERR("dpp_vport_bond_pf failed: %d\n", ret); + goto err_vport; + } + + if (!zxdh_en_is_panel_port(en_dev)) { + if ((en_dev->ops->get_dev_type(en_dev->parent) == ZXDH_DEV_NE1)) + lag_id = 1; + + ret = dpp_vport_attr_set(&pf_info, SRIOV_VPORT_LAG_ID, lag_id); + if (ret != 0) { + LOG_ERR("dpp_vport_attr_set lag_id 0 failed: %d\n", + ret); + goto err_vport; + } + + ret = dpp_vport_attr_set(&pf_info, SRIOV_VPORT_LAG_EN_OFF, 1); + if (ret != 0) { + LOG_ERR("dpp_vport_attr_set bond_en 1 failed: %d\n", + ret); + goto err_vport; + } + } else { + ret = dpp_uplink_phy_bond_vport(&pf_info, en_dev->phy_port); + if (ret != 0) { + LOG_ERR("dpp_uplink_phy_bond_vport failed: %d\n", ret); + goto err_vport; + } + } + + return ret; + +err_vport: + zxdh_en_vport_delete(en_dev); + return ret; +} + +int32_t zxdh_rxfh_set(struct zxdh_en_device *en_dev, uint32_t *queue_map) +{ + union zxdh_msg *msg = NULL; + int32_t err = 0; + DPP_PF_INFO_T pf_info = { 0 }; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -1; + } + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + if (queue_map == NULL) { + kfree(msg); + return -1; + } + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + err = dpp_rxfh_set(&pf_info, queue_map, ZXDH_INDIR_RQT_SIZE); + if (err != 0) { + LOG_ERR("dpp_rxfh_set failed: %d\n", err); + } + } else { + msg->payload.hdr.op_code = ZXDH_RXFH_SET; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + memcpy(msg->payload.rxfh_set_msg.queue_map, queue_map, + ZXDH_INDIR_RQT_SIZE * sizeof(uint32_t)); + err = en_dev->ops->msg_send_cmd(en_dev->parent, + MODULE_VF_BAR_MSG_TO_PF, msg, + msg, ¶); + if (err != 0) { + LOG_ERR("zxdh_send_command_to_pf_np failed: %d\n", err); + } + } + + kfree(msg); + return err; +} + +void zxdh_rxfh_del(struct zxdh_en_device *en_dev) +{ + union zxdh_msg *msg = NULL; + int32_t err = 0; + DPP_PF_INFO_T pf_info = { 0 }; + struct zxdh_bar_extra_para para = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + if (en_dev->quick_remove) + return; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return; + } + + if (en_dev->ops->is_bond(en_dev->parent)) { + kfree(msg); + return; + } + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + dpp_rxfh_del(&pf_info); + } else { + msg->payload.hdr.op_code = ZXDH_RXFH_DEL; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + err = en_dev->ops->msg_send_cmd(en_dev->parent, + MODULE_VF_BAR_MSG_TO_PF, msg, + msg, ¶); + if (err != 0) { + LOG_ERR("zxdh_send_command_to_pf_np failed: %d\n", err); + } + } + kfree(msg); +} + +int32_t zxdh_ethtool_init(struct zxdh_en_device *en_dev) +{ + int32_t ret = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + ret = dpp_vport_hash_funcs_set(&pf_info, en_dev->eth_config.hash_func); + if (ret != 0) { + LOG_ERR("dpp_vport_hash_funcs_set failed: %d\n", ret); + return ret; + } + + ret = dpp_vport_rx_flow_hash_set(&pf_info, + en_dev->eth_config.hash_mode); + if (ret != 0) { + LOG_ERR("zxdh_rx_flow_hash_set failed: %d\n", ret); + return ret; + } + + ret = dpp_vport_attr_set(&pf_info, SRIOV_VPORT_PORT_BASE_QID, + (uint16_t)en_dev->phy_index[0]); + if (ret != 0) { + LOG_ERR("dpp_vport_attr_set %d failed: %d\n", + en_dev->phy_index[0], ret); + return ret; + } + + ret = dpp_vqm_vfid_vlan_init(&pf_info); + if (ret != 0) { + LOG_ERR("dpp_vqm_vfid_vlan_init failed: %d\n", ret); + return ret; + } + + ret = dpp_vlan_filter_init(&pf_info); + if (ret != 0) { + LOG_ERR("dpp_vlan_filter_init failed: %d\n", ret); + return ret; + } + + ret = dpp_add_vlan_filter(&pf_info, 0); + if (ret != 0) { + LOG_ERR("dpp_add_vlan_filter 0 failed: %d\n", ret); + return ret; + } + + return ret; +} + +int32_t zxdh_pf_flush_mac(struct zxdh_en_device *en_dev) +{ + int32_t err = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + /* 删除此转发域所有单播mac地址 */ + err = dpp_unicast_all_mac_delete(&pf_info); + if (err != 0) { + LOG_ERR("dpp_unicast_all_mac_delete failed\n"); + return err; + } + LOG_DEBUG("dpp_unicast_all_mac_delete succeed\n"); + + /* 删除此转发域中所有组播mac地址 */ + err = dpp_multicast_all_mac_delete(&pf_info); + if (err != 0) { + LOG_ERR("dpp_multicast_all_mac_delete failed\n"); + return err; + } + LOG_DEBUG("dpp_multicast_all_mac_delete succeed\n"); + + return err; +} + +int32_t zxdh_pf_flush_mac_online(struct zxdh_en_device *en_dev) +{ + int32_t err = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + /* 删除此转发域所有单播mac地址 */ + err = dpp_unicast_all_mac_online_delete(&pf_info); + if (err != 0) { + LOG_ERR("dpp_unicast_all_mac_online_delete failed:%d\n", err); + return err; + } + + /* 删除此转发域中所有组播mac地址 */ + err = dpp_multicast_all_mac_online_delete(&pf_info); + if (err != 0) { + LOG_ERR("dpp_multicast_all_mac_online_delete failed:%d\n", err); + return err; + } + + return err; +} + +int32_t zxdh_pf_port_delete(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t ret = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + if (en_dev == NULL) { + return -1; + } + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + dpp_vport_uc_promisc_set(&pf_info, 0); + dpp_vport_mc_promisc_set(&pf_info, 0); + + /* pf删除所有配置到np的mac地址 */ + if (!en_dev->ops->is_bond(en_dev->parent)) { + ret = zxdh_pf_flush_mac_online(en_dev); + if (ret != 0) { + LOG_ERR("zxdh_pf_flush_mac_online failed: %d\n", ret); + return ret; + } + } + ret = dpp_fd_acl_all_delete(&pf_info); + if (ret != 0) { + LOG_ERR("dpp_fd_acl_all_delete failed: %d\n", ret); + return ret; + } + ret = zxdh_en_vport_delete(en_dev); + if (ret != 0) { + LOG_ERR("dpp_vport_delete failed: %d\n", ret); + return ret; + } + + return ret; +} + +int32_t zxdh_aux_alloc_pannel(struct zxdh_en_device *en_dev) +{ + int32_t ret = 0; + struct zxdh_pannle_port port; + + ret = en_dev->ops->request_port(en_dev->parent, &port); + if (ret != 0) { + LOG_ERR("zxdh_aux_alloc_pannel failed \n"); + goto out; + } + + en_dev->phy_port = port.phyport; + en_dev->pannel_id = port.pannel_id; + en_dev->link_check_bit = port.link_check_bit; + + LOG_DEBUG("bond pf: pannel %u, phyport %u check bit %u \n", + en_dev->pannel_id, en_dev->phy_port, en_dev->link_check_bit); + +out: + return ret; +} + +int32_t zxdh_vf_fd_en_set(struct zxdh_en_device *en_dev, uint32_t enable) +{ + union zxdh_msg *msg = NULL; + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + + msg->payload.hdr.op_code = ZXDH_FD_EN_SET; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + msg->payload.vf_fd_enable_msg.fd_enable = enable; + + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, + msg, msg, ¶); + if (err != 0) { + LOG_ERR("Fd_set:zxdh_send_command_to_pf_np to set fd failed: %d\n", + err); + } + + kfree(msg); + return err; +} + +int32_t zxdh_vf_add_fd(struct zxdh_en_device *en_dev, + struct ethtool_rx_flow_spec *fs, uint32_t *index) +{ + union zxdh_msg *msg = NULL; + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -1; + } + msg->payload.hdr.op_code = ZXDH_FD_ADD; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + zte_memcpy_s(&msg->payload.vf_fd_cfg_msg.fs, fs, sizeof(*fs)); + + msg->payload.vf_fd_cfg_msg.index = DEFAULT_ADD_INDEX; + if (en_dev->fs.ethtool_fs[fs->location].is_used) + msg->payload.vf_fd_cfg_msg.index = + en_dev->fs.ethtool_fs[fs->location].index; + + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, + msg, msg, ¶); + if (err != 0) { + LOG_ERR("Add_fd:zxdh_send_command_to_pf_np failed: %d\n", err); + kfree(msg); + return err; + } + /* 非替换操作,更新index */ + *index = msg->reps.fd_cfg_resp.index; + kfree(msg); + return err; +} + +int32_t zxdh_vf_get_fd(struct zxdh_en_device *en_dev, uint32_t index) +{ + union zxdh_msg *msg = NULL; + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -1; + } + msg->payload.hdr.op_code = ZXDH_FD_GET; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + + msg->payload.vf_fd_cfg_msg.index = index; + + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, + msg, msg, ¶); + if (err != 0) { + LOG_ERR("Get_fd:zxdh_send_command_to_pf_np failed: %d\n", err); + } + kfree(msg); + return err; +} + +int32_t zxdh_vf_del_fd(struct zxdh_en_device *en_dev, uint32_t index) +{ + union zxdh_msg *msg = NULL; + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -1; + } + + msg->payload.hdr.op_code = ZXDH_FD_DEL; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + msg->payload.vf_fd_cfg_msg.index = index; + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, + msg, msg, ¶); + if (err != 0) { + LOG_ERR("Del_fd:zxdh_send_command_to_pf_np failed: %d\n", err); + } + + kfree(msg); + return err; +} +#if 0 +int32_t zxdh_aux_query_phyport(struct zxdh_en_device *en_dev) +{ + int32_t ret = 0; + struct aux_phyport_message recv = {0}; + struct aux_phyport_message *recv_data = &recv; + zxdh_aux_phyport_msg msg = {0}; + zxdh_aux_phyport_msg *payload = &msg; + struct zxdh_bar_extra_para para = {0}; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + payload->pcie_id = en_dev->pcie_id; + payload->pannel_id = en_dev->pannel_id; + payload->rsv = en_dev->pannel_id; + + + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_PHYPORT_QUERY, payload, recv_data, ¶); + if (ret != 0) { + LOG_ERR("zxdh_aux_query_phyport send message failed \n"); + goto out; + } + + en_dev->phy_port = recv_data->phyport; + +out: + return ret; +} +#endif + +static void pf_recover_mac_get(struct zxdh_en_device *en_dev) +{ + struct netdev_hw_addr *ha = NULL; + uint32_t i = 0; + uint32_t j = 0; + + /* 给net_device结构体上锁 */ + netif_addr_lock_bh(en_dev->netdev); + + /* 获取单播链表mac */ + list_for_each_entry(ha, &en_dev->netdev->uc.list, list) { + if (i >= VF_MAX_UNICAST_MAC) { + LOG_ERR("umac_num: %d exceed the max num: %d\n", i, + VF_MAX_UNICAST_MAC); + break; + } + zte_memcpy_s(en_dev->eth_config.pf_recover_mac.umac[i].mac_addr, + ha->addr, ETH_ALEN); + i++; + } + + /* 获取组播链表mac */ + list_for_each_entry(ha, &en_dev->netdev->mc.list, list) { + if (j >= VF_MAX_MULTICAST_MAC) { + LOG_ERR("mmac_num: %d exceed the max num: %d", j, + VF_MAX_MULTICAST_MAC); + break; + } + zte_memcpy_s(en_dev->eth_config.pf_recover_mac.mmac[j].mac_addr, + ha->addr, ETH_ALEN); + j++; + } + + /* 给net_device结构体释放锁 */ + netif_addr_unlock_bh(en_dev->netdev); + en_dev->eth_config.pf_recover_mac.umac_num = i; + en_dev->eth_config.pf_recover_mac.mmac_num = j; + return; +} + +static int32_t eth_pf_mac_addr_recover(struct zxdh_en_device *en_dev) +{ + DPP_PF_INFO_T pf_info = { 0 }; + uint32_t i = 0; + int32_t ret = 0; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + /* 本地mac配置 */ + ret = dpp_add_mac(&pf_info, en_dev->netdev->dev_addr, 0, 0); + if (ret != 0) { + LOG_ERR("pf add mac failed in recover local mac: %d\n", ret); + return ret; + } + + /* 遍历链表,获取当前待回复mac */ + pf_recover_mac_get(en_dev); + + /* 单播链表mac配置 */ + for (i = 0; i < en_dev->eth_config.pf_recover_mac.umac_num; i++) { + ret = dpp_add_mac( + &pf_info, + en_dev->eth_config.pf_recover_mac.umac[i].mac_addr, 0, + 0); + if (ret != 0) { + LOG_ERR("pf add mac failed in recover uc list: %d\n", + ret); + return ret; + } + } + + /* 组播链表mac配置 */ + for (i = 0; i < en_dev->eth_config.pf_recover_mac.mmac_num; i++) { + ret = dpp_multi_mac_add_member( + &pf_info, + en_dev->eth_config.pf_recover_mac.mmac[i].mac_addr); + if (ret != 0) { + LOG_ERR("pf add mac failed in recover mc list: %d\n", + ret); + return ret; + } + } + return 0; +} + +static int32_t zxdh_recover_fd_cfg(struct zxdh_en_device *en_dev) +{ + uint32_t orig_index = 0; + uint32_t new_index = 0; + uint32_t flow_num = 0; + uint32_t location = 0; + int32_t err = 0; + ZXDH_FD_CFG_T p_fd_cfg = { 0 }; + DPP_PF_INFO_T pf_info = { 0 }; + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + LOG_INFO("recover_flow_table: total flow_num is %d", + en_dev->fs.tot_num_rules); + while (flow_num < en_dev->fs.tot_num_rules && + location < ETHTOOL_FD_MAX_NUM) { + if (en_dev->fs.ethtool_fs[location].is_used) { + orig_index = en_dev->fs.ethtool_fs[location].index; + if (en_dev->ops->get_coredev_type(en_dev->parent) == + DH_COREDEV_VF) { + en_dev->fs.ethtool_fs[location].is_used = false; + err = zxdh_vf_add_fd( + en_dev, + &en_dev->fs.ethtool_fs[location].rfs, + &new_index); + if (err) { + LOG_ERR("zxdh_vf_recover_fd failed, location %d\n", + location); + return -1; + } + en_dev->fs.ethtool_fs[location].is_used = true; + } else { + zxdh_flow_table_add( + &en_dev->fs.ethtool_fs[location].rfs, + &p_fd_cfg, &pf_info); + err = zxdh_flow_table_pf_action_add( + en_dev, + &en_dev->fs.ethtool_fs[location].rfs, + &p_fd_cfg); + if (err) { + LOG_ERR("zxdh_cfg_fd_add_action failed, location %d", + location); + return -EINVAL; + } + err = dpp_fd_acl_index_request(&pf_info, + &new_index); + if (err != 0) { + LOG_ERR("zxdh_cfg_np_fd_acl_request failed, location %d\n", + location); + return -1; + } + err = dpp_tbl_fd_cfg_add(&pf_info, + ZXDH_SDT_FD_CFG_TABLE, + new_index, &p_fd_cfg); + if (err != 0) { + LOG_ERR("zxdh_cfg_np_fd_recover failed, location %d\n", + location); + return -1; + } + } + en_dev->fs.ethtool_fs[location].index = new_index; + flow_num++; + LOG_INFO( + "recover_flow_table: location is %u, orig_index is %u, new_index is %u", + location, orig_index, new_index); + } + location++; + } + return 0; +} + +int32_t zxdh_pf_port_init(struct zxdh_en_device *en_dev, bool boot) +{ + bool vepa = false; + int32_t ret = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + if (en_dev == NULL) { + return -1; + } + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + +#if 0 + en_dev->ops->dpp_np_init(en_dev->parent, en_dev->vport); +#endif + + ret = zxdh_pf_vport_create(en_dev); + ZXDH_CHECK_RET_RETURN(ret, "zxdh_pf_vport_create failed: %d\n", ret); + + if (zxdh_en_is_panel_port(en_dev)) + dpp_uplink_phy_attr_set(&pf_info, en_dev->phy_port, + UPLINK_PHY_PORT_MAGIC_PACKET_ENABLE, + (en_dev->wolopts == WAKE_MAGIC)); + + zxdh_mac_stats_clear(en_dev); + + if (en_dev->ops->is_bond(en_dev->parent)) { + if (!en_dev->ops->if_init(en_dev->parent)) { + LOG_INFO("First net-device is init\n"); + return 0; + } + + /* 只将第一个网络设备的队列配置到vport属性表中 */ + ret = dpp_vport_attr_set(&pf_info, SRIOV_VPORT_PORT_BASE_QID, + (uint16_t)en_dev->phy_index[0]); + ZXDH_CHECK_RET_GOTO_ERR(ret, err_vport, + "set qid: %d failed: %d\n", + (uint16_t)en_dev->phy_index[0], ret); + return 0; + } + + vepa = en_dev->ops->get_vepa(en_dev->parent); + ret = dpp_vport_attr_set(&pf_info, SRIOV_VPORT_VEPA_EN_OFF, + (uint32_t)vepa); + ZXDH_CHECK_RET_GOTO_ERR( + ret, err_vport, + "dpp_vport_attr_set SRIOV_VPORT_VEPA_EN_OFF failed: %d\n", ret); + LOG_INFO("init vport(0x%x) to %s mode\n", en_dev->vport, + vepa ? "vepa" : "veb"); + + ret = dpp_vport_attr_set(&pf_info, SRIOV_VPORT_HASH_SEARCH_INDEX, + en_dev->hash_search_idx); + ZXDH_CHECK_RET_GOTO_ERR(ret, err_vport, + "set hash_search_index %u failed: %d\n", + en_dev->hash_search_idx, ret); + + ret = zxdh_ethtool_init(en_dev); + ZXDH_CHECK_RET_GOTO_ERR(ret, err_vport, + "zxdh_ethtool_init failed: %d\n", ret); + + if (boot) { + /* PF删除复位前配置到np的mac */ + ret = zxdh_pf_flush_mac(en_dev); + ZXDH_CHECK_RET_GOTO_ERR(ret, err_vport, + "zxdh_pf_flush_mac failed: %d\n", ret); + + ret = dpp_add_mac(&pf_info, en_dev->netdev->dev_addr, 0, 0); + ZXDH_CHECK_RET_GOTO_ERR(ret, err_vport, + "dpp_add_mac failed: %d\n", ret); + } else { + ret = eth_pf_mac_addr_recover(en_dev); + ZXDH_CHECK_RET_GOTO_ERR(ret, err_vport, + "eth_pf_mac_addr_recover failed: %d\n", + ret); + } + + if (!boot) { + ret = zxdh_vlan_trunk_recover( + &pf_info, en_dev->eth_config.vlan_trunk_bitmap); + ZXDH_CHECK_RET_GOTO_ERR(ret, err_vport, + "zxdh_vlan_trunk_recover failed: %d\n", + ret); + } + + ether_addr_copy(en_dev->last_np_mac_addr.sa_data, + en_dev->netdev->dev_addr); + if (en_dev->promisc_enabled) { + dpp_vport_uc_promisc_set(&pf_info, 1); + dpp_vport_mc_promisc_set(&pf_info, 1); + dpp_vport_promisc_en_set(&pf_info, 1); + } else if (en_dev->allmulti_enabled) { + dpp_vport_mc_promisc_set(&pf_info, 1); + } else { + dpp_vport_uc_promisc_set(&pf_info, 0); + dpp_vport_mc_promisc_set(&pf_info, 0); + } + + if (!boot) { + /* 删除复位前配置的fd表 */ + ret = dpp_fd_acl_all_delete(&pf_info); + ZXDH_CHECK_RET_GOTO_ERR(ret, err_vport, + "dpp_fd_acl_all_delete failed: %d\n", + ret); + ret = zxdh_recover_fd_cfg(en_dev); + ZXDH_CHECK_RET_GOTO_ERR(ret, err_vport, + "dpp_fd_acl_all_recover failed: %d\n", + ret); + } + + if ((!zxdh_en_is_panel_port(en_dev)) || (!boot)) + return 0; + + /* pf先清除arn/psn类型的udp报文统计 */ + ret = dpp_stat_asn_phyport_rx_pkt_cnt_get( + &pf_info, en_dev->phy_port, STAT_RD_CLR_MODE_CLR, + &en_dev->hw_stats.udp_stats.rx_arn_phy); + ZXDH_CHECK_RET_GOTO_ERR( + ret, err_vport, + "dpp_stat_asn_phyport_rx_pkt_cnt_get failed: %d\n", ret); + + ret = dpp_stat_psn_phyport_tx_pkt_cnt_get( + &pf_info, en_dev->phy_port, STAT_RD_CLR_MODE_CLR, + &en_dev->hw_stats.udp_stats.tx_psn_phy); + ZXDH_CHECK_RET_GOTO_ERR( + ret, err_vport, + "dpp_stat_psn_phyport_tx_pkt_cnt_get failed: %d\n", ret); + + ret = dpp_stat_psn_phyport_rx_pkt_cnt_get( + &pf_info, en_dev->phy_port, STAT_RD_CLR_MODE_CLR, + &en_dev->hw_stats.udp_stats.rx_psn_phy); + ZXDH_CHECK_RET_GOTO_ERR( + ret, err_vport, + "dpp_stat_psn_phyport_rx_pkt_cnt_get failed: %d\n", ret); + + ret = dpp_stat_psn_ack_phyport_tx_pkt_cnt_get( + &pf_info, en_dev->phy_port, STAT_RD_CLR_MODE_CLR, + &en_dev->hw_stats.udp_stats.tx_psn_ack_phy); + ZXDH_CHECK_RET_GOTO_ERR( + ret, err_vport, + "dpp_stat_psn_ack_phyport_tx_pkt_cnt_get failed: %d\n", ret); + + ret = dpp_stat_psn_ack_phyport_rx_pkt_cnt_get( + &pf_info, en_dev->phy_port, STAT_RD_CLR_MODE_CLR, + &en_dev->hw_stats.udp_stats.rx_psn_ack_phy); + ZXDH_CHECK_RET_GOTO_ERR( + ret, err_vport, + "dpp_stat_psn_ack_phyport_rx_pkt_cnt_get failed: %d\n", ret); + + return 0; + +err_vport: + zxdh_en_vport_delete(en_dev); + return ret; +} + +int32_t zxdh_vf_dpp_port_init(struct zxdh_en_device *en_dev) +{ + int32_t ret = 0; + + ret = zxdh_vf_port_create(en_dev); + if (ret != 0) { + LOG_ERR("zxdh_vf_port_create failed: %d\n", ret); + } + + return ret; +} + +int32_t zxdh_port_reload(struct zxdh_en_device *en_dev) +{ + union zxdh_msg *msg = NULL; + int32_t ret = 0; + bool is_upf = false; + uint8_t link_up = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + + msg->payload.hdr.op_code = ZXDH_VF_PORT_RELOAD; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + + msg->payload.vf_reload_msg.base_qid = en_dev->phy_index[0]; + is_upf = !(zxdh_en_is_panel_port(en_dev)); + msg->payload.vf_reload_msg.is_upf = is_upf; + msg->payload.vf_reload_msg.hash_search_idx = en_dev->hash_search_idx; + zte_memcpy_s(msg->payload.vf_reload_msg.queue_map, + en_dev->eth_config.queue_map, + ZXDH_INDIR_RQT_SIZE * sizeof(uint32_t)); + + msg->payload.vf_reload_msg.hash_mode = en_dev->eth_config.hash_mode; + msg->payload.vf_reload_msg.hash_func = en_dev->eth_config.hash_func; + + /* 将vlan trunk表copy到消息中*/ + zte_memcpy_s(msg->payload.vf_reload_msg.vlan_trunk_bitmap, + en_dev->eth_config.vlan_trunk_bitmap, + sizeof(en_dev->eth_config.vlan_trunk_bitmap)); + + if (en_dev->promisc_enabled) { + msg->payload.vf_reload_msg.uc_promisc = true; + msg->payload.vf_reload_msg.mc_promisc = true; + } else if (en_dev->allmulti_enabled) { + msg->payload.vf_reload_msg.mc_promisc = true; + } + + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_VF_BAR_MSG_TO_PF, + msg, msg, ¶); + if (ret != 0) { + LOG_ERR("zxdh_send_command_to_pf failed: %d\n", ret); + kfree(msg); + return ret; + } + + if (is_upf) { + en_dev->link_up = msg->reps.vf_reload_msg.link_up; + } else { + en_dev->ops->get_link_info_from_vqm(en_dev->parent, &link_up); + en_dev->link_up = link_up; + LOG_DEBUG("vf read link_up: %d from vqm\n", link_up); + } + + en_dev->speed = msg->reps.vf_reload_msg.speed; + en_dev->duplex = msg->reps.vf_reload_msg.duplex; + en_dev->vlan_dev.qos = msg->reps.vf_reload_msg.vlan_qos; + + netif_tx_wake_all_queues(en_dev->netdev); + if (en_dev->link_up) { + en_dev->ops->set_pf_link_up(en_dev->parent, TRUE); + netif_carrier_on(en_dev->netdev); + } else { + en_dev->ops->set_pf_link_up(en_dev->parent, FALSE); + netif_carrier_off(en_dev->netdev); + } + + kfree(msg); + return ret; +} + +int32_t zxdh_port_init(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t err = 0; + + err = zxdh_indir_to_queue_map(en_dev, en_dev->indir_rqt); + ZXDH_CHECK_RET_RETURN(err, "zxdh_indir_to_queue_map failed: %d\n", err); + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + err = zxdh_pf_port_init(en_dev, false); + ZXDH_CHECK_RET_RETURN(err, "zxdh_port_init failed: %d\n", err); + err = zxdh_en_hash_key_recover(en_dev); + ZXDH_CHECK_RET_GOTO_ERR(err, port_uninit, + "zxdh_en_hash_key_recover failed: %d\n", + err); + if (!en_dev->ops->is_bond(en_dev->parent)) { + err = zxdh_rxfh_set(en_dev, + en_dev->eth_config.queue_map); + ZXDH_CHECK_RET_GOTO_ERR(err, port_uninit, + "zxdh_rxfh_set failed: %d\n", + err); + } + } else { + err = zxdh_port_reload(en_dev); + ZXDH_CHECK_RET_RETURN(err, "zxdh_port_reload failed: %d\n", + err); + err = zxdh_recover_fd_cfg(en_dev); + ZXDH_CHECK_RET_RETURN(err, "zxdh_port_recover_fd failed: %d\n", + err); + } + + err = zxdh_en_config_mtu_to_np(netdev, netdev->mtu); + ZXDH_CHECK_RET_GOTO_ERR(err, port_uninit, + "zxdh_en_mtu_init failed: %d\n", err); + + if (!en_dev->ops->is_bond(en_dev->parent)) { + err = zxdh_en_sync_features(en_dev, netdev->features); + ZXDH_CHECK_RET_GOTO_ERR(err, port_uninit, + "zxdh_en_sync_features failed: %d\n", + err); + } + + return 0; +port_uninit: + zxdh_vport_uninit(netdev); + return err; +} + +void zxdh_vport_uninit(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t ret = 0; + + if (en_dev->quick_remove) + return; + + if (en_dev->device_state == ZXDH_DEVICE_STATE_INTERNAL_ERROR) + return; + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + ret = zxdh_pf_port_delete(netdev); + if (ret != 0) { + LOG_ERR("zxdh_pf_port_delete failed: %d\n", ret); + } + } else { +#ifdef VF_STATS_UPDATE + ret = zxdh_vf_item_init_stats_update(en_dev); + if (ret != 0) { + LOG_ERR("zxdh_vf_item_init_stats_update failed: %d\n", + ret); + } +#endif + ret = zxdh_vf_port_delete(en_dev); + if (ret != 0) { + LOG_ERR("zxdh_vf_port_delete failed: %d\n", ret); + } + } +} + +uint32_t zxdh_uplink_phy_attr_set(DPP_PF_INFO_T *pf_info, uint8_t phy_port, + uint32_t attr, uint32_t value) +{ + if (phy_port == INVALID_PHY_PORT) + return 0; + + return dpp_uplink_phy_attr_set(pf_info, phy_port, attr, value); +} diff --git a/drivers/net/ethernet/dinghai/en_aux/en_aux_cmd.h b/drivers/net/ethernet/dinghai/en_aux/en_aux_cmd.h new file mode 100644 index 000000000000..2a454d4cf1b2 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_aux/en_aux_cmd.h @@ -0,0 +1,276 @@ +#ifndef _EN_AUX_CMD_H_ +#define _EN_AUX_CMD_H_ + +#include +#include "../msg_common.h" + +#define ZXDH_QRES_TBL_LEN (300) +#define ZXDH_QS_PAIRS (2) + +#define INVALID_PHY_PORT 0xff +#define ZXDH_PHY_PORT_MAX 9 +#define ZXDH_MAX_HASH_INDEX 6 //TODO:should is 5 + +/* HASH_FUNC TYPE */ +#define ZXDH_FUNC_TOP 0x04 +#define ZXDH_FUNC_XOR 0x02 +#define ZXDH_FUNC_CRC32 0x01 + +/* RX_NFC */ +#define ZXDH_NET_RX_FLOW_HASH_MV 4 +#define ZXDH_NET_RX_FLOW_HASH_SDT 2 +#define ZXDH_NET_RX_FLOW_HASH_SDFNT 1 + +/* RISCV OPCODE */ +#define RISC_TYPE_READ 0 +#define RISC_FIELD_PANEL_ID 5 +#define RISC_FIELD_PHYPORT_CHANNEL 6 +#define RISC_FIELD_HASHID_CHANNEL 10 +#define RISC_SERVER_TIME 0xF0 + +#define MAX_PANEL_ID 9 + +/* SPM STATS */ +#define ZXDH_SPM_STATS_OFFSET (0x24000 + 0x1000 + 408) +#define ZXDH_SPM_BYTES_OFFSET (0x24000 + 0xb000) + +/* ZF bar address bit[16:63] needs to be shifted left by 4 bits */ +#define TO_ZF_ADDR(addr) (((addr & 0xFFFFFFFFFFFF0000) << 4) | (addr & 0xFFFF)) + +#define ZXDH_NP_GLOBAL_PSN_ENABLE_BIT (28) + +//#define MAC_CONFIG_DEBUG 1 +enum riscv_op_code { + OP_CODE_WRITE = 1, + OP_CODE_MSGQ_CHAN = 2, + OP_CODE_DATA_CHAN = 3, + OP_CODE_MAX, +}; + +#define OP_CODE_TBL_STAT (0xaa) +#define MSG_STRUCT_HD_LEN 8 + +struct queue_index_message { + uint8_t type; + uint8_t field; + uint16_t ep_bdf; + uint16_t write_bytes; + uint16_t rsv; + uint16_t write_data[0]; +} __attribute__((packed)); + +struct cmd_hdr_recv { + uint8_t check; + uint8_t rsv; + uint16_t data_len_bytes; +}; + +struct cmd_tbl_ack { + struct cmd_hdr_recv hdr; + uint8_t phy_port; + uint8_t rsv[3]; +} __attribute__((packed)); + +enum zxdh_msg_chan_opc { + ZXDH_VPORT_GET = 4, + ZXDH_PHYPORT_GET = 6, +}; + +struct zxdh_debug_msg { + uint8_t opcode; + uint8_t phyport; + bool lldp_enable; +} __attribute__((packed)); + +struct zxdh_debug_rcv_msg { + uint8_t reps_states; + uint8_t lldp_enable; +} __attribute__((packed)); + +enum zxdh_en_link_speed_bit_indices { + SPM_SPEED_1X_1G = 2, + SPM_SPEED_1X_10G = 5, + SPM_SPEED_1X_25G = 6, + SPM_SPEED_1X_50G = 7, + SPM_SPEED_2X_100G = 8, + SPM_SPEED_4X_40G = 9, + SPM_SPEED_4X_100G = 10, + SPM_SPEED_4X_200G = 11, +}; + +enum zxdh_en_fec_mode_bit_indices { + SPM_FEC_NONE = 0, + SPM_FEC_BASER = 1, + SPM_FEC_RS528 = 2, + SPM_FEC_RS544 = 3, +}; + +enum zxdh_en_fc_mode_bit_indices { + SPM_FC_NONE = 0, + SPM_FC_PAUSE_RX = 1, + SPM_FC_PAUSE_TX = 2, + SPM_FC_PAUSE_FULL = 3, + SPM_FC_PFC_FULL = 4, +}; + +struct zxdh_en_module_eeprom_param { + uint8_t i2c_addr; + uint8_t bank; + uint8_t page; + uint8_t offset; + uint8_t length; +}; + +#define SFF_I2C_ADDRESS_LOW (0x50) +#define SFF_I2C_ADDRESS_HIGH (0x51) + +enum zxdh_module_id { + ZXDH_MODULE_ID_SFP = 0x3, + ZXDH_MODULE_ID_QSFP = 0xC, + ZXDH_MODULE_ID_QSFP_PLUS = 0xD, + ZXDH_MODULE_ID_QSFP28 = 0x11, + ZXDH_MODULE_ID_QSFP_DD = 0x18, + ZXDH_MODULE_ID_OSFP = 0x19, + ZXDH_MODULE_ID_DSFP = 0x1B, + ZXDH_MODULE_ID_QSFP_PLUS_WITH_CMIS = 0x1E, + ZXDH_MODULE_ID_SFP_DD_WITH_CMIS = 0x1F, + ZXDH_MODULE_ID_SFP_PLUS_WITH_CMIS = 0x20, +}; + +#define SPEED_MODES_TO_SPEED(speed_modes, speed) \ + do { \ + if (((speed_modes)&BIT(SPM_SPEED_1X_1G)) == \ + BIT(SPM_SPEED_1X_1G)) { \ + (speed) = SPEED_1000; \ + } else if (((speed_modes)&BIT(SPM_SPEED_1X_10G)) == \ + BIT(SPM_SPEED_1X_10G)) { \ + (speed) = SPEED_10000; \ + } else if (((speed_modes)&BIT(SPM_SPEED_1X_25G)) == \ + BIT(SPM_SPEED_1X_25G)) { \ + (speed) = SPEED_25000; \ + } else if (((speed_modes)&BIT(SPM_SPEED_4X_40G)) == \ + BIT(SPM_SPEED_4X_40G)) { \ + (speed) = SPEED_40000; \ + } else if (((speed_modes)&BIT(SPM_SPEED_1X_50G)) == \ + BIT(SPM_SPEED_1X_50G)) { \ + (speed) = SPEED_50000; \ + } else if (((speed_modes)&BIT(SPM_SPEED_2X_100G)) == \ + BIT(SPM_SPEED_2X_100G)) { \ + (speed) = SPEED_100000; \ + } else if (((speed_modes)&BIT(SPM_SPEED_4X_100G)) == \ + BIT(SPM_SPEED_4X_100G)) { \ + (speed) = SPEED_100000; \ + } else if (((speed_modes)&BIT(SPM_SPEED_4X_200G)) == \ + BIT(SPM_SPEED_4X_200G)) { \ + (speed) = SPEED_200000; \ + } else { \ + (speed) = SPEED_UNKNOWN; \ + } \ + } while (0) + +#define GET_VFID(vport) \ + ((en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) ? \ + (PF_VQM_VFID_OFFSET + EPID(vport) * 8 + FUNC_NUM(vport)) : \ + (EPID(vport) * 256 + VFUNC_NUM(vport))) + +#define DH_AUX_PF_ID_OFFSET(vport) (EPID(vport) * 8 + FUNC_NUM(vport)) + +#define NP_GET_PKT_CNT 0 +#define NP_CLEAR_PKT_CNT 1 + +struct zxdh_en_device; +int32_t get_common_table_msg(struct zxdh_en_device *en_dev, uint16_t pcie_id, + uint8_t field, void *ack); +int32_t zxdh_common_tbl_init(struct net_device *netdev, + union zxdh_msg *old_msg); +int32_t zxdh_en_phyport_init(struct zxdh_en_device *en_dev); +int32_t zxdh_en_autoneg_set(struct zxdh_en_device *en_dev, uint8_t enable, + uint32_t speed_modes); +int32_t zxdh_vport_stats_get(struct zxdh_en_device *en_dev); +int32_t zxdh_en_vport_pre_stats_get(struct zxdh_en_device *en_dev); +int32_t zxdh_mac_stats_get(struct zxdh_en_device *en_dev); +int32_t zxdh_en_udp_pkt_stats_get(struct zxdh_en_device *en_dev); +int32_t zxdh_mac_stats_clear(struct zxdh_en_device *en_dev); +int32_t zxdh_hash_id_get(struct zxdh_en_device *en_dev); +int32_t zxdh_en_fec_mode_set(struct zxdh_en_device *en_dev, uint32_t fec_cfg); +int32_t zxdh_en_fec_mode_get(struct zxdh_en_device *en_dev, uint32_t *fec_cap, + uint32_t *fec_cfg, uint32_t *fec_active); +int32_t zxdh_en_fc_mode_set(struct zxdh_en_device *en_dev, uint32_t fc_mode); +int32_t zxdh_en_fc_mode_get(struct zxdh_en_device *en_dev, uint32_t *fc_mode); +uint32_t zxdh_en_module_eeprom_read(struct zxdh_en_device *en_dev, + struct zxdh_en_module_eeprom_param *query, + uint8_t *data); +int32_t zxdh_lldp_enable_set(struct zxdh_en_device *en_dev, bool lldp_enable); +int32_t zxdh_sshd_enable_set(struct zxdh_en_device *en_dev, bool sshd_enable); +int32_t zxdh_vf_dpp_add_mac(struct zxdh_en_device *en_dev, + const uint8_t *dev_addr, uint8_t filter_flag); +int32_t zxdh_vf_dpp_del_mac(struct zxdh_en_device *en_dev, + const uint8_t *dev_addr, uint8_t filter_flag, + bool mac_flag); +int32_t zxdh_vf_dpp_dump_mac(struct zxdh_en_device *en_dev, + const uint8_t *dev_addr); +void zxdh_vport_uninit(struct net_device *netdev); +int32_t zxdh_pf_port_init(struct zxdh_en_device *en_dev, bool boot); +int32_t zxdh_vf_dpp_port_init(struct zxdh_en_device *en_dev); +int32_t zxdh_port_init(struct net_device *netdev); +int32_t zxdh_vf_egr_port_attr_set(struct zxdh_en_device *en_dev, uint32_t mode, + uint32_t value, uint8_t fow); +int32_t zxdh_vf_egr_port_attr_get(struct zxdh_en_device *en_dev, + ZXDH_SRIOV_VPORT_T *port_attr_entry); +int32_t zxdh_vf_rss_en_set(struct zxdh_en_device *en_dev, uint32_t enable); +int32_t zxdh_num_channels_changed(struct zxdh_en_device *en_dev, + uint16_t num_changed); +int32_t zxdh_pf_macpcs_num_get(struct zxdh_en_device *en_dev); +int32_t zxdh_lldp_enable_get(struct zxdh_en_device *en_dev, + uint32_t *lldp_enable); +int32_t zxdh_indir_to_queue_map(struct zxdh_en_device *en_dev, + const uint32_t *indir); +int32_t zxdh_rxfh_set(struct zxdh_en_device *en_dev, uint32_t *queue_map); +void zxdh_rxfh_del(struct zxdh_en_device *en_dev); +void zxdh_u32_array_print(uint32_t *array, uint16_t size); +int32_t zxdh_en_firmware_version_get(struct zxdh_en_device *en_dev, + uint8_t *fw_version, + uint8_t *fw_version_len); +int32_t zxdh_panel_id_init(struct zxdh_en_device *en_dev); +int32_t zxdh_vf_port_promisc_set(struct zxdh_en_device *en_dev, uint8_t mode, + uint8_t value, uint8_t fow); +int32_t zxdh_phyport_get(struct zxdh_en_device *en_dev); +int32_t zxdh_vf_1588_call_np_interface(struct zxdh_en_device *en_dev); +int32_t zxdh_aux_alloc_pannel(struct zxdh_en_device *en_dev); +int8_t zxdh_debug_ip_get(struct zxdh_en_device *en_dev, int8_t *ip); +int32_t zxdh_vf_dpp_add_ipv6_mac(struct zxdh_en_device *en_dev, + const uint8_t *mac_addr); +int32_t zxdh_vf_dpp_del_ipv6_mac(struct zxdh_en_device *en_dev, + const uint8_t *mac_addr); +int32_t zxdh_vf_dpp_add_lacp_mac(struct zxdh_en_device *en_dev, + const uint8_t *mac_addr); +int32_t zxdh_vf_dpp_del_lacp_mac(struct zxdh_en_device *en_dev, + const uint8_t *mac_addr); +int32_t zxdh_spm_port_enable_cfg(struct zxdh_en_device *en_dev, + uint32_t enable); +uint32_t zxdh_uplink_phy_attr_set(DPP_PF_INFO_T *pf_info, uint8_t phy_port, + uint32_t attr, uint32_t value); +bool zxdh_en_is_panel_port(struct zxdh_en_device *en_dev); +int32_t zxdh_get_vf_err_stats(struct zxdh_en_device *en_dev, + zxdh_get_sw_stats *payload, + zxdh_sw_stats_reply *reply); +int32_t zxdh_cfg_misx_mode(struct zxdh_en_device *en_dev, uint16_t rx_msix_mode, + uint16_t tx_msix_mode); +int32_t zxdh_get_misx_mode(struct zxdh_en_device *en_dev, + uint16_t *rx_msix_mode, uint16_t *tx_msix_mode); +int32_t zxdh_get_coalesce_usecs(struct zxdh_en_device *en_dev, + uint32_t *rx_coalesce_usecs, + uint32_t *tx_coalesce_usecs); +int32_t zxdh_cfg_coalesce_usecs(struct zxdh_en_device *en_dev, + uint32_t rx_coalesce_usecs, + uint32_t tx_coalesce_usecs); +int32_t zxdh_dual_tor_switch(struct zxdh_en_device *en_dev, bool state); +int32_t zxdh_dual_tor_label_get(struct zxdh_en_device *en_dev); +int32_t zxdh_en_hash_key_recover(struct zxdh_en_device *en_dev); +int32_t zxdh_vf_add_fd(struct zxdh_en_device *en_dev, + struct ethtool_rx_flow_spec *fs, uint32_t *index); +int32_t zxdh_vf_get_fd(struct zxdh_en_device *en_dev, uint32_t index); +int32_t zxdh_vf_del_fd(struct zxdh_en_device *en_dev, uint32_t index); +int32_t zxdh_vf_fd_en_set(struct zxdh_en_device *en_dev, uint32_t enable); + +#endif /* END __ZXDH_EN_COMMAND_H_ */ diff --git a/drivers/net/ethernet/dinghai/en_aux/en_aux_eq.c b/drivers/net/ethernet/dinghai/en_aux/en_aux_eq.c new file mode 100644 index 000000000000..0d94ccf747a4 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_aux/en_aux_eq.c @@ -0,0 +1,360 @@ +#include +#include +#include +#include "en_aux_eq.h" +#include "../en_ethtool/ethtool.h" + +int32_t dh_bond_pf_link_info_get(struct zxdh_en_priv *en_priv) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + uint8_t link_up = 0; + uint8_t link_info = 0; + uint8_t bit_value = 0; + + if (en_dev == NULL) { + LOG_ERR("null ptr\n"); + return -1; + } + + if (en_dev->init_comp_flag != AUX_INIT_COMPLETED) { + return 0; + } + + if (!en_dev->ops->is_bond(en_dev->parent)) { + LOG_DEBUG("isn't bond_pf exit\n"); + return 0; + } + + //读取state寄存器中第en_dev->link_check_bit位的值 + en_dev->ops->get_link_info_from_vqm(en_dev->parent, &link_info); + bit_value = (link_info >> en_dev->link_check_bit) & 0x01; + LOG_DEBUG("%s read VQM[0x%x]: link_check_bit[%d]-bit_value[%d]\n", + en_dev->netdev->name, link_info, en_dev->link_check_bit, + bit_value); + link_up |= bit_value; + + if (en_dev->link_up == link_up) { + LOG_DEBUG("%s link info is no changed, current link is %d\n", + en_dev->netdev->name, en_dev->link_up); + return 0; + } + + en_dev->link_up = link_up; + queue_work(en_priv->events->wq, &en_dev->link_info_irq_update_np_work); + + if (link_up == 0) { + netif_carrier_off(en_dev->netdev); + en_dev->speed = SPEED_UNKNOWN; + en_dev->duplex = DUPLEX_UNKNOWN; + LOG_INFO("%s is link down\n", en_dev->netdev->name); + } else { + LOG_INFO("%s is link up\n", en_dev->netdev->name); + queue_work(en_priv->events->wq, + &en_dev->link_info_irq_process_work); + } + + return 0; +} + +static int32_t dh_eq_async_link_info_int_bond_pf(struct notifier_block *nb, + unsigned long action, + void *data) +{ + struct dh_eq_async *eq_link_info_async = + container_of(nb, struct dh_eq_async, irq_nb); + struct zxdh_en_priv *en_priv = + (struct zxdh_en_priv *)eq_link_info_async->priv; + + return dh_bond_pf_link_info_get(en_priv); +} + +int32_t dh_eq_async_link_info_int_process(struct zxdh_en_priv *en_priv) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + uint8_t link_up = 0; + uint8_t link_info = 0; + + //判断是否为bond_pf + if (en_dev == NULL) { + LOG_ERR("null ptr\n"); + return -1; + } + + if (en_dev->init_comp_flag != AUX_INIT_COMPLETED) { + return 0; + } + + if (en_dev->ops->is_bond(en_dev->parent)) { + LOG_DEBUG("is bond_pf, exit\n"); + return 0; + } + + if ((en_dev->ops->get_dev_type(en_dev->parent) == ZXDH_DEV_NE0) || + (en_dev->ops->get_dev_type(en_dev->parent) == ZXDH_DEV_NE1)) { + return 0; + } + + if (en_dev->ops->is_upf(en_dev->parent)) { //是upf设备 + en_dev->ops->get_link_info_from_vqm( + en_dev->parent, + &link_info); //高四位phoport, 低四位link_up信息 + link_up = link_info & 0x0F; + LOG_INFO("[upf dev netdev %s] read VQM[0x%x]: link_up[%d]\n", + en_dev->netdev->name, link_info, link_up); + } else { + en_dev->ops->get_link_info_from_vqm(en_dev->parent, &link_up); + } + + en_dev->link_up = link_up; + queue_work(en_priv->events->wq, &en_dev->link_info_irq_update_np_work); + if (link_up == 0) { + en_dev->ops->set_pf_link_up(en_dev->parent, FALSE); + netif_carrier_off(en_dev->netdev); + en_dev->speed = SPEED_UNKNOWN; + en_dev->duplex = DUPLEX_UNKNOWN; + LOG_INFO("%s is link down\n", en_dev->netdev->name); + } else { + en_dev->ops->set_pf_link_up(en_dev->parent, TRUE); + LOG_INFO("%s is link up\n", en_dev->netdev->name); + if (en_dev->ops->is_upf(en_dev->parent)) { + netif_carrier_on(en_dev->netdev); + en_dev->speed = SPEED_200000; + en_dev->duplex = DUPLEX_FULL; + } else { + queue_work(en_priv->events->wq, + &en_dev->link_info_irq_process_work); + } + } + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + queue_work(en_priv->events->wq, + &en_dev->link_info_irq_update_vf_work); + } + return 0; +} + +static int32_t dh_eq_async_link_info_int(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct dh_eq_async *eq_link_info_async = + container_of(nb, struct dh_eq_async, irq_nb); + struct zxdh_en_priv *en_priv = + (struct zxdh_en_priv *)eq_link_info_async->priv; + return dh_eq_async_link_info_int_process(en_priv); +} + +static int32_t dh_eq_async_riscv_int(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct dh_eq_async *eq_riscv_async = + container_of(nb, struct dh_eq_async, irq_nb); + struct zxdh_en_priv *en_priv = + (struct zxdh_en_priv *)eq_riscv_async->priv; + struct zxdh_en_device *en_dev = &en_priv->edev; + struct dh_events *events = en_priv->events; + struct dh_event_nb *event_nb = NULL; + uint64_t virt_addr = 0; + int32_t event_type = 0; + uint16_t event_idx = 0; + uint16_t i = 0; + + virt_addr = en_dev->ops->get_bar_virt_addr(en_dev->parent, 0) + + ZXDH_BAR_MSG_OFFSET; + event_idx = zxdh_get_event_id(virt_addr, MSG_CHAN_END_RISC, + MSG_CHAN_END_PF); + event_type = dh_eq_event_type_get(event_idx); + + if (events == NULL) { + LOG_ERR("riscv_irq trigger, events is null\n"); + return 0; + } + + for (i = 0; i < events->evt_num; i++) { + event_nb = &events->notifiers[i]; + + if (event_type == event_nb->nb.event_type) { + LOG_DEBUG("en_aux event_id[%d] is called\n", + event_type); + en_dev->ops->events_call_chain(en_dev->parent, + event_type, NULL); + return NOTIFY_STOP_MASK; + } + } + + return 0; +} + +static int32_t dh_eq_async_pf_int(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct dh_eq_async *eq_pf_async = + container_of(nb, struct dh_eq_async, irq_nb); + struct zxdh_en_priv *en_priv = (struct zxdh_en_priv *)eq_pf_async->priv; + struct zxdh_en_device *en_dev = &en_priv->edev; + struct dh_events *events = en_priv->events; + struct dh_event_nb *event_nb = NULL; + uint64_t virt_addr = 0; + int32_t event_type = 0; + uint16_t event_idx = 0; + uint16_t i = 0; + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + return 0; + } + + virt_addr = en_dev->ops->get_bar_virt_addr(en_dev->parent, 0) + + ZXDH_BAR_MSG_OFFSET + ZXDH_BAR_PFVF_MSG_OFFSET; + event_idx = + zxdh_get_event_id(virt_addr, MSG_CHAN_END_PF, MSG_CHAN_END_VF); + event_type = dh_eq_event_type_get(event_idx); + //LOG_INFO("------------- event_idx: %d, event_type: %d------------\n", event_idx, event_type); + + for (i = 0; i < events->evt_num; i++) { + event_nb = &events->notifiers[i]; + + if (event_type == event_nb->nb.event_type) { + LOG_INFO("en_aux async pf irq_handler called\n"); + en_dev->ops->events_call_chain(en_dev->parent, + event_type, NULL); + return NOTIFY_STOP_MASK; + } + } + + return 0; +} + +struct dh_aux_async_eq_table { + char name[64]; + notifier_fn_t async_int; +}; + +static struct dh_aux_async_eq_table dh_aux_async_eq_tbl[] = { + { "riscv", dh_eq_async_riscv_int }, + { "pf", dh_eq_async_pf_int }, + { "link_info", dh_eq_async_link_info_int }, + { "link_info", dh_eq_async_link_info_int_bond_pf }, +}; + +static int32_t dh_aux_setup_async_eq(struct zxdh_en_priv *en_priv, + struct dh_eq_async *eq, const char *name, + notifier_fn_t call) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t err = 0; + + spin_lock_init(&eq->lock); //unused + eq->priv = en_priv; + eq->irq_nb.notifier_call = call; + err = en_dev->ops->async_eq_enable(en_dev->parent, eq, name, true); + if (err != 0) { + LOG_ERR("failed to enable %s EQ %d\n", name, err); + } + + return err; +} + +static void cleanup_async_eq(struct zxdh_en_priv *en_priv, + struct dh_eq_async *eq, const char *name) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t err = 0; + + err = en_dev->ops->async_eq_enable(en_dev->parent, eq, name, false); + if (err != 0) { + LOG_ERR("failed to disable %s EQ %d\n", name, err); + } +} + +static void destroy_async_eqs(struct zxdh_en_priv *en_priv) +{ + struct dh_eq_table *table = &en_priv->eq_table; + struct dh_aux_eq_table *table_priv = table->priv; + int32_t i = 0; + + for (i = 0; i < ZXDH_AUX_ASYNC_EQ_NUM; ++i) { + cleanup_async_eq(en_priv, &table_priv->async_eq_tbl[i], + dh_aux_async_eq_tbl[i].name); + } +} + +void dh_aux_eq_table_destroy(struct zxdh_en_priv *en_priv) +{ + destroy_async_eqs(en_priv); +} + +void dh_aux_eq_table_cleanup(struct zxdh_en_priv *en_priv) +{ + kvfree(en_priv->eq_table.priv); +} + +int32_t dh_aux_eq_table_init(struct zxdh_en_priv *en_priv) +{ + struct dh_eq_table *eq_table; + struct dh_aux_eq_table *table_priv = NULL; + int32_t err = 0; + uint32_t i = 0; + + eq_table = &en_priv->eq_table; + + table_priv = kvzalloc(sizeof(*table_priv), GFP_KERNEL); + if (unlikely(table_priv == NULL)) { + LOG_ERR("dh_aux_eq_table kvzalloc failed\n"); + err = -ENOMEM; + goto err_table_priv; + } + + eq_table->priv = table_priv; + + mutex_init(&eq_table->lock); + for (i = 0; i < DH_EVENT_TYPE_MAX; i++) { + ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]); + } + + eq_table->irq_table = NULL; + + return 0; + +err_table_priv: + return err; +} + +static int32_t create_async_eqs(struct zxdh_en_priv *en_priv) +{ + struct dh_eq_table *eq_table = &en_priv->eq_table; + struct dh_aux_eq_table *table_priv = eq_table->priv; + int32_t err = 0; + int32_t i = 0; + int32_t j = 0; + + for (i = 0; i < ZXDH_AUX_ASYNC_EQ_NUM; ++i) { + err = dh_aux_setup_async_eq(en_priv, + &table_priv->async_eq_tbl[i], + dh_aux_async_eq_tbl[i].name, + dh_aux_async_eq_tbl[i].async_int); + if (err != 0) { + LOG_ERR("Failed to setup aux_async_eq_tbl[%d]\n", i); + goto err_setup_async_eq; + } + } + + return err; + +err_setup_async_eq: + for (j = 0; j < i; ++j) { + cleanup_async_eq(en_priv, &table_priv->async_eq_tbl[j], + dh_aux_async_eq_tbl[j].name); + } + return err; +} + +int32_t dh_aux_eq_table_create(struct zxdh_en_priv *en_priv) +{ + int32_t err = 0; + + err = create_async_eqs(en_priv); + if (err != 0) { + LOG_ERR("Failed to create async EQs\n"); + } + + return err; +} diff --git a/drivers/net/ethernet/dinghai/en_aux/en_aux_eq.h b/drivers/net/ethernet/dinghai/en_aux/en_aux_eq.h new file mode 100644 index 000000000000..2d2796d12c31 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_aux/en_aux_eq.h @@ -0,0 +1,24 @@ +#ifndef __EN_AUX_EQ_H__ +#define __EN_AUX_EQ_H__ + +#include "../en_aux.h" +#ifdef __cplusplus +extern "C" { +#endif + +#define ZXDH_AUX_ASYNC_EQ_NUM 4 +struct dh_aux_eq_table { + struct dh_eq_async async_eq_tbl[ZXDH_AUX_ASYNC_EQ_NUM]; +}; + +int32_t dh_aux_eq_table_init(struct zxdh_en_priv *en_priv); +int32_t dh_aux_eq_table_create(struct zxdh_en_priv *en_priv); +void dh_aux_eq_table_destroy(struct zxdh_en_priv *en_priv); +void dh_aux_eq_table_cleanup(struct zxdh_en_priv *en_priv); +int32_t dh_eq_async_link_info_int_process(struct zxdh_en_priv *en_priv); +int32_t dh_bond_pf_link_info_get(struct zxdh_en_priv *en_priv); +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_aux/en_aux_events.c b/drivers/net/ethernet/dinghai/en_aux/en_aux_events.c new file mode 100644 index 000000000000..776f8529ee0c --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_aux/en_aux_events.c @@ -0,0 +1,1511 @@ +#include +#include +#include +#include +#include +#include +#include +#include "en_aux_events.h" +#include "en_aux_eq.h" +#include "en_aux_cmd.h" +#include "../msg_common.h" +#include "../en_np/table/include/dpp_tbl_api.h" +#include "../zxdh_tools/zxdh_tools_netlink.h" +#include "../zxdh_tools/zxdh_tools_ioctl.h" +#include "dcbnl/en_dcbnl_api.h" +#include "zxic_common.h" +#include +#include +#include +#include +#include +#include // 对于VLAN设备 +#include // 对于bonding设备 +#include +#include +#include "../en_ethtool/ethtool.h" + +static struct mutex rdma_lock; +void zxdh_aux_unload(struct zxdh_en_priv *en_priv); +int32_t zxdh_aux_load(struct zxdh_en_priv *en_priv); +static int32_t pf2vf_notifier(struct notifier_block *, unsigned long, void *); +static int32_t riscv2aux_notifier(struct notifier_block *, unsigned long, + void *); +static int32_t aux_unload_notifier(struct notifier_block *, unsigned long, + void *); +static int32_t aux_load_notifier(struct notifier_block *, unsigned long, + void *); +static int32_t aux_state_notifier(struct notifier_block *, unsigned long, + void *); +void zxdh_eth_config_show(struct net_device *netdev); +int32_t zxdh_eth_config_recover(struct net_device *netdev); + +void rx_mode_set_handler(struct work_struct *work); + +static struct dh_nb aux_events[] = { + { .nb.notifier_call = pf2vf_notifier, + .event_type = DH_EVENT_TYPE_NOTIFY_PF_TO_VF }, + { .nb.notifier_call = aux_unload_notifier, + .event_type = DH_EVENT_TYPE_AUX_UNLOAD }, + { .nb.notifier_call = aux_load_notifier, + .event_type = DH_EVENT_TYPE_AUX_LOAD }, + { .nb.notifier_call = aux_state_notifier, + .event_type = DH_EVENT_TYPE_AUX_STATE }, + { .nb.notifier_call = riscv2aux_notifier, + .event_type = DH_EVENT_TYPE_NOTIFY_RISCV_TO_AUX }, +}; + +static int32_t do_pf_vf_inet6_update_mac_to_np(struct zxdh_en_device *en_dev, + const struct in6_addr *ipv6_addr, + unsigned long action) +{ + int32_t ret = 0; + struct in6_addr sol_addr = { 0 }; + uint8_t mcast_mac[ETH_ALEN]; + + // 打印IPv6地址,使用%pI6c格式化IPv6地址,确保正确显示 + DH_LOG_DEBUG( + MODULE_PF, + "IPv6 address changed on interface %s, %s address: %pI6c\n", + en_dev->netdev->name, + (action == 1) ? "add" : + (action == 2) ? "del" : + "unknown action with", + ipv6_addr); + // Calculate the multicast MAC address from the IPv6 address + addrconf_addr_solict_mult(ipv6_addr, &sol_addr); + ipv6_eth_mc_map(&sol_addr, mcast_mac); + DH_LOG_DEBUG(MODULE_PF, "Multicast MAC Address: %pM\n", mcast_mac); + + switch (action) { + case NETDEV_UP: { + ret = zxdh_ip6mac_add_safe(en_dev, ipv6_addr->s6_addr32, + mcast_mac); + if (ret != 0) { + LOG_ERR("zxdh_ip6mac_add_safe failed"); + } + break; + } + case NETDEV_DOWN: { + ret = zxdh_ip6mac_del_safe(en_dev, ipv6_addr->s6_addr32, + mcast_mac); + if (ret != 0) { + LOG_ERR("zxdh_ip6mac_del_safe failed"); + } + break; + } + default: + break; + } + return ret; +} + +static int32_t do_pf_vf_vxlan_update_mac_to_np(struct zxdh_en_device *en_dev, + uint8_t *mcast_mac, + unsigned long action) +{ + int32_t ret = 0; + + switch (action) { + case NETDEV_UP: { + ret = zxdh_ip4mac_add(en_dev, mcast_mac, action); + if (ret != 0) { + LOG_ERR("zxdh_ip4mac_add failed\n"); + return ret; + } + break; + } + case NETDEV_DOWN: { + ret = zxdh_ip4mac_del(en_dev, mcast_mac, action); + if (ret != 0) { + LOG_ERR("zxdh_ip6mac_del failed\n"); + return ret; + } + break; + } + default: + break; + } + return ret; +} + +static int32_t do_bond_master_inet6_update_mac_to_np( + struct net_device *notifier_dev, const struct in6_addr *ipv6_addr, + struct zxdh_en_device *en_dev, unsigned long action) +{ + int32_t ret = 0; + struct list_head *iter = NULL; + struct slave *slave_dev = NULL; + struct bonding *bond = netdev_priv(notifier_dev); + + // 遍历所有slave设备 + if (!bond_has_slaves(bond)) { + DH_LOG_DEBUG(MODULE_PF, "Bond device %s don't have slave\n", + notifier_dev->name); + return 0; + } + + bond_for_each_slave(bond, slave_dev, iter) { + if (strcmp(en_dev->netdev->name, slave_dev->dev->name) != 0) { + continue; + } + DH_LOG_DEBUG(MODULE_PF, + "Bond device %s have slave device: %s\n", + notifier_dev->name, slave_dev->dev->name); + ret = do_pf_vf_inet6_update_mac_to_np(en_dev, ipv6_addr, + action); + if (ret != 0) { + return ret; + } + } + return 0; +} + +static int32_t inet6_addr_change_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct inet6_ifaddr *ifa = NULL; + struct net_device *notifier_dev = NULL; //触发事件的网络设备 + struct zxdh_en_device *en_dev = + container_of(nb, struct zxdh_en_device, + ipv6_notifier); //处理此回调函数的设备 + + if (data == NULL) { + LOG_ERR("data is NULL"); + return NOTIFY_OK; + } + + ifa = (struct inet6_ifaddr *)data; + notifier_dev = ifa->idev->dev; + + if (notifier_dev == NULL) { + LOG_ERR("notifier_dev is NULL"); + return NOTIFY_OK; + } + + // 检查是否为vlan设备 + if (is_vlan_dev(notifier_dev)) + notifier_dev = vlan_dev_real_dev(notifier_dev); + + // 检查是否为bond master设备 + if (netif_is_bond_master(notifier_dev)) + return do_bond_master_inet6_update_mac_to_np( + notifier_dev, &ifa->addr, en_dev, action); + + // 检查是否为自定义设备 + if (strcmp(en_dev->netdev->name, notifier_dev->name) == 0) + return do_pf_vf_inet6_update_mac_to_np(en_dev, &ifa->addr, + action); + + return NOTIFY_OK; +} + +static void multicast_ipv4_to_mac(struct in_addr ipv4_addr, uint8_t *mac_addr) +{ + uint32_t ip = ntohl(ipv4_addr.s_addr); + + mac_addr[0] = MULTI_FLAG; + mac_addr[1] = IPV4_TYPE_FLAG; + mac_addr[2] = GLOBAL_FLAG; + mac_addr[3] = (ip >> BIT16) & + BIT_23_L; /* Take bits 16-23 from the IP address*/ + mac_addr[4] = (ip >> BIT8) & + BIT_15_L; /* Take bits 8-15 from the IP address */ + mac_addr[5] = ip & BIT_7_L; /* Take bits 0-7 from the IP address */ + + return; +} + +static int32_t vxlan_netdev_change_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct vxlan_dev *vxlan = NULL; + struct net_device *notifier_dev = + netdev_notifier_info_to_dev(data); /* 通知设备 */ + struct zxdh_en_device *en_dev = container_of( + nb, struct zxdh_en_device, vxlan_notifier); /* 父设备相关 */ + struct vxlan_config *cfg = NULL; + uint32_t ipv4_addr = 0; + struct in6_addr *ipv6_addr = NULL; + uint8_t mac_addr[6] = { 0 }; + + int32_t ret = 0; + + if (notifier_dev == NULL) { + LOG_ERR("notifier_dev is NULL\n"); + return NOTIFY_BAD; + } + + if (en_dev == NULL) { + LOG_ERR("en_dev is NULL\n"); + return NOTIFY_BAD; + } + + /* 检查是否为vxlan设备 */ + if (!(notifier_dev->rtnl_link_ops && + strcmp(notifier_dev->rtnl_link_ops->kind, "vxlan") == 0)) { + return NOTIFY_DONE; + } + + /* 获取vxlan设备的父设备信息 */ + en_dev = container_of(nb, struct zxdh_en_device, vxlan_notifier); + if (en_dev == NULL) { + LOG_ERR("en_dev is NULL\n"); + return NOTIFY_BAD; + } + + vxlan = netdev_priv(notifier_dev); + cfg = &vxlan->cfg; + + /* 判断ip地址类型 */ + if (cfg->remote_ip.sa.sa_family == AF_INET) { + ipv4_addr = cfg->remote_ip.sin.sin_addr.s_addr; + if ((ipv4_addr & htonl(0xF0000000)) != htonl(0xE0000000)) { + return NOTIFY_DONE; + } + /* 转成对应的组播mac */ + multicast_ipv4_to_mac(cfg->remote_ip.sin.sin_addr, mac_addr); + LOG_DEBUG( + "VXLAN device %s IPv4 address %pI4 to multi mac %pM\n", + notifier_dev->name, &cfg->remote_ip.sin.sin_addr, + mac_addr); + } else if (cfg->remote_ip.sa.sa_family == AF_INET6) { + ipv6_addr = &cfg->remote_ip.sin6.sin6_addr; + if (ipv6_addr->s6_addr[0] != 0xFF) { + return NOTIFY_DONE; + } + /* 转成对应的组播mac */ + ipv6_eth_mc_map(ipv6_addr, mac_addr); + LOG_DEBUG( + "VXLAN device %s IPv6 address %pI6c to multi mac %pM\n", + notifier_dev->name, ipv6_addr, mac_addr); + } else { + LOG_INFO("Unsupported address family\n"); + } + + ret = do_pf_vf_vxlan_update_mac_to_np(en_dev, mac_addr, action); + if (ret != 0) { + LOG_ERR("do_pf_vf_vxlan_update_mac_to_np failed\n"); + return NOTIFY_BAD; + } + return NOTIFY_OK; +} + +static void vf_link_info_update_handler(struct work_struct *_work) +{ + struct zxdh_en_device *en_dev = container_of( + _work, struct zxdh_en_device, vf_link_info_update_work); + union zxdh_msg *msg = NULL; + struct zxdh_vf_item *vf_item = NULL; + int32_t err = 0; + uint16_t vf_idx = 0; + struct pci_dev *pdev = NULL; + uint16_t num_vfs = 0; + bool pf_link_up = false; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + ZXDH_AUX_INIT_COMP_CHECK(en_dev); + pf_link_up = en_dev->ops->get_pf_link_up(en_dev->parent); + pdev = en_dev->ops->get_pdev(en_dev->parent); + num_vfs = pci_num_vf(pdev); + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return; + } + for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) { + msg->payload.hdr_vf.op_code = ZXDH_SET_VF_LINK_STATE; + msg->payload.link_state_msg.is_link_force_set = FALSE; + msg->payload.link_state_msg.link_up = pf_link_up; + msg->payload.link_state_msg.speed = en_dev->speed; + msg->payload.link_state_msg.autoneg_enable = + en_dev->autoneg_enable; + msg->payload.link_state_msg.supported_speed_modes = + en_dev->supported_speed_modes; + msg->payload.link_state_msg.advertising_speed_modes = + en_dev->advertising_speed_modes; + msg->payload.hdr_vf.dst_pcie_id = + FIND_VF_PCIE_ID(en_dev->pcie_id, vf_idx); + vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_idx); + if (vf_item->is_probed) { + msg->payload.link_state_msg.link_forced = + vf_item->link_forced; + err = en_dev->ops->msg_send_cmd(en_dev->parent, + MODULE_PF_BAR_MSG_TO_VF, + msg, msg, ¶); + if (err != 0) { + LOG_ERR("failed to update VF[%d]\n", vf_idx); + } + } + } + kfree(msg); +} + +static void link_info_irq_update_vf_handler(struct work_struct *_work) +{ + struct zxdh_en_device *en_dev = container_of( + _work, struct zxdh_en_device, link_info_irq_update_vf_work); + struct zxdh_vf_item *vf_item = NULL; + int32_t err = 0; + uint16_t vf_idx = 0; + struct pci_dev *pdev = NULL; + uint16_t num_vfs = 0; + bool pf_link_up = en_dev->ops->get_pf_link_up(en_dev->parent); + uint16_t func_no = 0; + uint16_t pf_no = FIND_PF_ID(en_dev->pcie_id); + union zxdh_msg *msg = NULL; + uint8_t link_info = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + ZXDH_AUX_INIT_COMP_CHECK(en_dev); + if (en_dev->ops->is_upf(en_dev->parent)) { + link_info = (en_dev->phy_port & 0x0F) << 4 | + (en_dev->link_up & 0x0F); + LOG_DEBUG("upf update vf link_info: %u\n", link_info); + } else { + link_info = pf_link_up ? 1 : 0; + } + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return; + } + + msg->payload.hdr_to_agt.op_code = AGENT_DEV_STATUS_NOTIFY; + msg->payload.hdr_to_agt.pcie_id = en_dev->pcie_id; + + pdev = en_dev->ops->get_pdev(en_dev->parent); + num_vfs = pci_num_vf(pdev); + for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) { + vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_idx); + LOG_INFO("vf_idx:%d, vf_item->link_forced %d, is_probed %d\n", + vf_idx, vf_item->link_forced, vf_item->is_probed); + if (vf_item->link_forced == FALSE) { + en_dev->ops->set_vf_link_info(en_dev->parent, vf_idx, + link_info); + if (vf_item->is_probed) { + func_no = GET_FUNC_NO(pf_no, vf_idx); + msg->payload.pcie_msix_msg + .func_no[msg->payload.pcie_msix_msg + .num++] = func_no; + } + } + } + if (msg->payload.pcie_msix_msg.num > 0) { + LOG_INFO("%s update %d vf link info\n", en_dev->netdev->name, + msg->payload.pcie_msix_msg.num); + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_MAC, msg, + msg, ¶); + if (err != 0) { + LOG_ERR("failed to update VF link info\n"); + } + } + + kfree(msg); +} + +static void link_info_irq_process_handler(struct work_struct *_work) +{ + struct zxdh_en_device *en_dev = container_of( + _work, struct zxdh_en_device, link_info_irq_process_work); + int32_t ret = 0; + struct link_info_struct link_info_val = { 0 }; + union zxdh_msg *msg = NULL; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + ZXDH_AUX_INIT_COMP_CHECK(en_dev); + + if (!zxdh_en_is_panel_port(en_dev)) + return; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return; + } + + msg->payload.hdr_to_agt.op_code = AGENT_MAC_LINK_INFO_GET; + msg->payload.hdr_to_agt.phyport = en_dev->phy_port; + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_MAC, msg, msg, + ¶); + if (ret != 0) { + LOG_ERR("get speed and duplex from agent failed: %d\n", ret); + kfree(msg); + return; + } + en_dev->speed = msg->reps.mac_set_msg.speed; + en_dev->curr_speed_modes = msg->reps.mac_set_msg.speed_modes; + en_dev->duplex = msg->reps.mac_set_msg.duplex; + LOG_INFO("netdev:%s, phy_port:0x%x, speed:%d, duplex:0x%x\n", + en_dev->netdev->name, en_dev->phy_port, en_dev->speed, + en_dev->duplex); + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + link_info_val.speed = en_dev->speed; + link_info_val.autoneg_enable = en_dev->autoneg_enable; + link_info_val.supported_speed_modes = + en_dev->supported_speed_modes; + link_info_val.advertising_speed_modes = + en_dev->advertising_speed_modes; + link_info_val.duplex = en_dev->duplex; + + en_dev->ops->update_pf_link_info(en_dev->parent, + &link_info_val); + } + + if (en_dev->speed != SPEED_UNKNOWN) + netif_carrier_on(en_dev->netdev); + + kfree(msg); + + return; +} + +static void link_info_irq_update_np_work_handler(struct work_struct *_work) +{ + int32_t ret = 0; + struct zxdh_en_device *en_dev = container_of( + _work, struct zxdh_en_device, link_info_irq_update_np_work); + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + ZXDH_AUX_INIT_COMP_CHECK(en_dev); + if (!en_dev->ops->is_bond(en_dev->parent)) { + if (!netif_running(en_dev->netdev)) { + return; + } + if (en_dev->ops->get_coredev_type(en_dev->parent) == + DH_COREDEV_VF) { + zxdh_vf_egr_port_attr_set(en_dev, SRIOV_VPORT_IS_UP, + en_dev->link_up, 0); + } else { + ret = dpp_vport_attr_set(&pf_info, SRIOV_VPORT_IS_UP, + en_dev->link_up); + if (ret != 0) { + LOG_ERR("dpp_vport_attr_set SRIOV_VPORT_IS_UP %d failed, ret:%d\n", + en_dev->link_up, ret); + return; + } + if (en_dev->is_hwbond || + en_dev->ops->is_special_bond(en_dev->parent)) { + dpp_uplink_phy_attr_set(&pf_info, + en_dev->phy_port, + UPLINK_PHY_PORT_IS_UP, + en_dev->link_up); + } + } + return; + } + + if (!en_dev->link_up) { + zxdh_uplink_phy_attr_set(&pf_info, en_dev->phy_port, + UPLINK_PHY_PORT_IS_UP, 0); + } else { + if (en_dev->netdev->flags & IFF_UP) { + zxdh_uplink_phy_attr_set(&pf_info, en_dev->phy_port, + UPLINK_PHY_PORT_IS_UP, 1); + } + } +} + +static void en_aux_spoof_check(struct zxdh_en_device *en_dev) +{ + uint64_t prev_ssvpc_num = 0; + uint16_t en_aux_pf_id = 0; + uint32_t ret = 0; + uint16_t num_vfs = 0; + uint64_t ssvpc_incr = 0; + struct pci_dev *pdev = NULL; + struct dh_core_dev *dh_dev = NULL; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + dh_dev = en_dev->parent; + pdev = en_dev->ops->get_pdev(dh_dev); + num_vfs = pci_num_vf(pdev); + + if (!IS_PF(en_dev->vport)) { + return; + } + if (num_vfs == 0) { + return; + } + prev_ssvpc_num = en_dev->last_tx_vport_ssvpc_packets; + en_aux_pf_id = DH_AUX_PF_ID_OFFSET(en_dev->vport); + // spoof static register not clear to 0 after read + ret = dpp_stat_spoof_packet_drop_cnt_get( + &pf_info, en_aux_pf_id, NP_GET_PKT_CNT, + &(en_dev->last_tx_vport_ssvpc_packets)); + if (ret != 0) { + LOG_ERR("Failed to get spoof check dropped packets number.\n"); + return; + } + ssvpc_incr = en_dev->last_tx_vport_ssvpc_packets - prev_ssvpc_num; + if (!ssvpc_incr) { + return; + } + LOG_DEBUG("%llu Spoofed packets detected in EP%d, PF%d\n", ssvpc_incr, + EPID(en_dev->vport), FUNC_NUM(en_dev->vport)); + return; +} + +static void en_aux_service_task(struct work_struct *_work) +{ + struct zxdh_en_device *en_dev = + container_of(_work, struct zxdh_en_device, service_task); + + ZXDH_AUX_INIT_COMP_CHECK(en_dev); + en_aux_spoof_check(en_dev); +} + +static bool en_aux_all_vfs_spoof_check_off(struct zxdh_en_device *en_dev) +{ + uint16_t vf_idx = 0; + int32_t num_vfs = 0; + struct pci_dev *pdev = NULL; + struct zxdh_pf_device *pf_dev = NULL; + struct dh_core_dev *dh_dev = NULL; + dh_dev = en_dev->parent; + pdev = en_dev->ops->get_pdev(dh_dev); + pf_dev = dh_core_priv(dh_dev->parent); + + num_vfs = pci_num_vf(pdev); + if (num_vfs == 0) { + return true; + } + + for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) { + if (pf_dev->vf_item[vf_idx].spoofchk == true) { + return false; + } + } + return true; +} + +static void en_aux_service_timer(struct timer_list *t) +{ + unsigned long next_event_offset = HZ * 2; + struct zxdh_en_device *en_dev = from_timer(en_dev, t, service_timer); + struct zxdh_en_priv *en_priv = + container_of(en_dev, struct zxdh_en_priv, edev); + bool all_vfs_spoof_check_off_flag = + en_aux_all_vfs_spoof_check_off(en_dev); + + /* Reset the timer */ + mod_timer(&en_dev->service_timer, next_event_offset + jiffies); + if (!all_vfs_spoof_check_off_flag) { + queue_work(en_priv->events->wq, &en_dev->service_task); + } +} + +static void en_aux_service_riscv_task(struct work_struct *_work) +{ + int32_t retval = 0; + time64_t time64; + struct rtc_time tm; + struct zxdh_en_device *en_dev = + container_of(_work, struct zxdh_en_device, service_riscv_task); + union zxdh_msg *msg = NULL; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + ZXDH_AUX_INIT_COMP_CHECK(en_dev); + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return; + } + + if (!IS_PF(en_dev->vport)) { + kfree(msg); + return; + } + + msg->payload.hdr_to_cmn.pcie_id = en_dev->pcie_id; + ; + msg->payload.hdr_to_cmn.write_bytes = 9; + msg->payload.hdr_to_cmn.type = RISC_SERVER_TIME; + msg->payload.hdr_to_cmn.field = 0; + + time64 = ktime_get_real_seconds(); + time64 += 28800; //CST比UST晚八个小时 + rtc_time64_to_tm(time64, &tm); + + msg->payload.time_cfg_msg.tmmng_type = 0xF0; + msg->payload.time_cfg_msg.dir = 0x2; + msg->payload.time_cfg_msg.year = tm.tm_year + 1900; + msg->payload.time_cfg_msg.month = tm.tm_mon + 1; + msg->payload.time_cfg_msg.day = tm.tm_mday; + msg->payload.time_cfg_msg.hour = tm.tm_hour; + msg->payload.time_cfg_msg.min = tm.tm_min; + msg->payload.time_cfg_msg.sec = tm.tm_sec; + + retval = en_dev->ops->msg_send_cmd( + en_dev->parent, MODULE_PF_TIMER_TO_RISC_MSG, msg, msg, ¶); + if (retval != 0) { + LOG_ERR("zxdh_send_command_to_riscv failed: %d\n", retval); + en_dev->time_sync_done = false; + } else { + LOG_DEBUG("send msg timer to riscv:%d-%d-%d %d:%d:%d\n", + msg->payload.time_cfg_msg.year, + msg->payload.time_cfg_msg.month, + msg->payload.time_cfg_msg.day, + msg->payload.time_cfg_msg.hour, + msg->payload.time_cfg_msg.min, + msg->payload.time_cfg_msg.sec); + en_dev->time_sync_done = true; + } + + kfree(msg); +} + +static void en_aux_service_riscv_timer(struct timer_list *t) +{ + unsigned long next_event_offset; + struct zxdh_en_device *en_dev = + from_timer(en_dev, t, service_riscv_timer); + struct zxdh_en_priv *en_priv = + container_of(en_dev, struct zxdh_en_priv, edev); + + if (en_dev->time_sync_done) { + next_event_offset = HZ * 259200; // 3天 + } else { + next_event_offset = HZ * 60; // 60秒 + } + + /* Reset the timer */ + mod_timer(&en_dev->service_riscv_timer, next_event_offset + jiffies); + queue_work(en_priv->events->wq, &en_dev->service_riscv_task); +} + +static void pf2vf_msg_proc_work_handler(struct work_struct *_work) +{ + struct zxdh_en_device *en_dev = + container_of(_work, struct zxdh_en_device, pf2vf_msg_proc_work); + uint64_t virt_addr = 0; + + LOG_DEBUG("is called\n"); + ZXDH_AUX_INIT_COMP_CHECK(en_dev); + virt_addr = en_dev->ops->get_bar_virt_addr(en_dev->parent, 0) + + ZXDH_BAR_MSG_OFFSET + ZXDH_BAR_PFVF_MSG_OFFSET; + zxdh_bar_irq_recv(MSG_CHAN_END_PF, MSG_CHAN_END_VF, virt_addr, en_dev); +} + +static int32_t pf2vf_notifier(struct notifier_block *nb, unsigned long type, + void *data) +{ + struct dh_event_nb *event_nb = dh_nb_cof(nb, struct dh_event_nb, nb); + struct zxdh_en_priv *en_priv = (struct zxdh_en_priv *)event_nb->ctx; + + LOG_DEBUG("is called\n"); + queue_work(en_priv->events->wq, &en_priv->edev.pf2vf_msg_proc_work); + + return NOTIFY_OK; +} + +static void riscv2aux_msg_proc_work_handler(struct work_struct *_work) +{ + struct zxdh_en_device *en_dev = container_of( + _work, struct zxdh_en_device, riscv2aux_msg_proc_work); + uint64_t virt_addr = 0; + uint16_t src = MSG_CHAN_END_RISC; + uint16_t dst = MSG_CHAN_END_PF; + + ZXDH_AUX_INIT_COMP_CHECK(en_dev); + virt_addr = en_dev->ops->get_bar_virt_addr(en_dev->parent, 0) + + ZXDH_BAR_MSG_OFFSET; + zxdh_bar_irq_recv(src, dst, virt_addr, en_dev); +} + +static int32_t riscv2aux_notifier(struct notifier_block *nb, unsigned long type, + void *data) +{ + struct dh_event_nb *event_nb = dh_nb_cof(nb, struct dh_event_nb, nb); + struct zxdh_en_priv *en_priv = (struct zxdh_en_priv *)event_nb->ctx; + + LOG_DEBUG("is called\n"); + queue_work(en_priv->events->wq, &en_priv->edev.riscv2aux_msg_proc_work); + return NOTIFY_OK; +} + +typedef int32_t (*zxdh_rdma_event_handler)(struct net_device *netdev, + uint8_t event_type, void *data); +static zxdh_rdma_event_handler zxdh_rdma_events_handler; +void zxdh_rdma_events_register(zxdh_rdma_event_handler callback) +{ + if (zxdh_rdma_events_handler == NULL) + zxdh_rdma_events_handler = callback; +} +EXPORT_SYMBOL(zxdh_rdma_events_register); + +void zxdh_rdma_events_unregister(void) +{ + zxdh_rdma_events_handler = NULL; +} +EXPORT_SYMBOL(zxdh_rdma_events_unregister); + +int32_t zxdh_rdma_events_call(struct net_device *netdev, uint8_t event_type, + void *data) +{ + if (zxdh_rdma_events_handler) + return zxdh_rdma_events_handler(netdev, event_type, data); + + return 0; +} + +static int32_t aux_unload_notifier(struct notifier_block *nb, + unsigned long type, void *data) +{ + struct dh_event_nb *event_nb = dh_nb_cof(nb, struct dh_event_nb, nb); + struct zxdh_en_priv *en_priv = (struct zxdh_en_priv *)event_nb->ctx; + struct zxdh_en_device *en_dev = &en_priv->edev; + + HEAL_INFO("%s is called\n", en_dev->netdev->name); + zxdh_rdma_events_call(en_dev->netdev, ZXDH_RDMA_HEALTH_EVENT, NULL); + zxdh_aux_unload(en_priv); + return NOTIFY_OK; +} + +static int32_t aux_load_notifier(struct notifier_block *nb, unsigned long type, + void *data) +{ + struct dh_event_nb *event_nb = dh_nb_cof(nb, struct dh_event_nb, nb); + struct zxdh_en_priv *en_priv = (struct zxdh_en_priv *)event_nb->ctx; + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t err = 0; + + HEAL_INFO("%s is called\n", en_dev->netdev->name); + err = zxdh_aux_load(en_priv); + if (err != 0) { + *((int32_t *)data) = err; + return NOTIFY_OK; + } + + if (en_dev->is_rdma_aux_plug) { + mutex_lock(&rdma_lock); + en_dev->ops->unplug_adev(en_dev->parent, RDMA_AUX_DEVICE); + en_dev->ops->plug_adev(en_dev->parent, RDMA_AUX_DEVICE); + mutex_unlock(&rdma_lock); + } + + return NOTIFY_OK; +} + +static int32_t aux_state_notifier(struct notifier_block *nb, unsigned long type, + void *data) +{ + struct dh_event_nb *event_nb = dh_nb_cof(nb, struct dh_event_nb, nb); + struct zxdh_en_priv *en_priv = (struct zxdh_en_priv *)event_nb->ctx; + struct zxdh_en_device *en_dev = &en_priv->edev; + + if (en_dev->device_state == *((uint8_t *)data)) + return NOTIFY_OK; + + HEAL_INFO("%s device_state update: %d\n", en_dev->netdev->name, + *((uint8_t *)data)); + en_dev->device_state = *((uint8_t *)data); + if (en_dev->device_state == ZXDH_DEVICE_STATE_INTERNAL_ERROR) { + netif_tx_stop_all_queues(en_dev->netdev); + netif_carrier_off(en_dev->netdev); + en_dev->link_up = false; + } else if (en_dev->device_state == ZXDH_DEVICE_STATE_UP) { + netif_tx_wake_all_queues(en_dev->netdev); + if (en_dev->ops->is_bond(en_dev->parent)) + dh_bond_pf_link_info_get(en_priv); + else + dh_eq_async_link_info_int_process(en_priv); + } + + return NOTIFY_OK; +} + +void plug_adev_work_handler(struct work_struct *work) +{ + struct zxdh_en_device *en_dev = + container_of(work, struct zxdh_en_device, plug_adev_work); + + en_dev->ops->plug_adev(en_dev->parent, RDMA_AUX_DEVICE); + en_dev->is_rdma_aux_plug = true; + en_dev->ops->is_rdma_aux_plug(en_dev->parent, en_dev->is_rdma_aux_plug, + TRUE); +} + +void unplug_adev_work_handler(struct work_struct *work) +{ + struct zxdh_en_device *en_dev = + container_of(work, struct zxdh_en_device, unplug_adev_work); + + en_dev->ops->unplug_adev(en_dev->parent, RDMA_AUX_DEVICE); + en_dev->is_rdma_aux_plug = false; + en_dev->ops->is_rdma_aux_plug(en_dev->parent, en_dev->is_rdma_aux_plug, + TRUE); +} + +typedef uint32_t (*zxdh_pf_msg_func)(zxdh_msg_info *msg, zxdh_reps_info *reps, + struct zxdh_en_device *en_dev); + +typedef struct { + zxdh_msg_op_code op_code; + uint8_t proc_name[64]; + zxdh_pf_msg_func msg_proc; +} zxdh_pf_msg_proc; + +static uint32_t zxdh_set_vf_link_state(zxdh_msg_info *msg, zxdh_reps_info *reps, + struct zxdh_en_device *en_dev) +{ + uint32_t ret = 0; + uint16_t vf_idx = msg->hdr_vf.dst_pcie_id & (0xff); + + if (!msg->link_state_msg.is_link_force_set) { + en_dev->speed = msg->link_state_msg.speed; + en_dev->autoneg_enable = msg->link_state_msg.autoneg_enable; + en_dev->supported_speed_modes = + msg->link_state_msg.supported_speed_modes; + en_dev->advertising_speed_modes = + msg->link_state_msg.advertising_speed_modes; + if (msg->link_state_msg.link_forced) { + return 0; + } + } + + en_dev->ops->set_pf_link_up(en_dev->parent, + msg->link_state_msg.link_up); + if (en_dev->ops->get_pf_link_up(en_dev->parent)) { + netif_carrier_on(en_dev->netdev); + } else { + netif_carrier_off(en_dev->netdev); + } + LOG_DEBUG( + "[VF GET MSG FROM PF]--VF[%d] link_state[%s] update success!\n", + vf_idx, + en_dev->ops->get_pf_link_up(en_dev->parent) ? "TRUE" : "FALSE"); + return ret; +} + +static uint32_t zxdh_set_vf_reset(zxdh_msg_info *msg, zxdh_reps_info *reps, + struct zxdh_en_device *en_dev) +{ + return 0; +} + +static uint32_t zxdh_set_vf_vlan(zxdh_msg_info *msg, zxdh_reps_info *reps, + struct zxdh_en_device *edev) +{ + uint32_t ret = 0; + /* update local var*/ + edev->vlan_dev.vlan_id = msg->vf_vlan_msg.vlan_id; + edev->vlan_dev.qos = msg->vf_vlan_msg.qos; + edev->vlan_dev.protcol = msg->vf_vlan_msg.protocl; + + return ret; +} + +static uint32_t zxdh_pf_get_vf_queue(zxdh_msg_info *msg, zxdh_reps_info *reps, + struct zxdh_en_device *edev) +{ + uint32_t ret = 0; + uint32_t vir_queue_start; + uint32_t vir_queue_num; + uint32_t queue_index; + uint32_t queue_num; + uint32_t max_queue_num = edev->curr_queue_pairs; + + PLCR_LOG_INFO("vf's edev->vport = 0x%x\n", edev->vport); + PLCR_LOG_INFO("vf's max_queue_num(pairs) = 0x%x\n", max_queue_num); + PLCR_LOG_INFO("edev->device_id = %x\n", edev->device_id); + PLCR_LOG_INFO("edev->rq[0].vq->phy_index = %x\n", + edev->rq[0].vq->phy_index); + PLCR_LOG_INFO("edev->sq[0].vq->phy_index = %x\n", + edev->sq[0].vq->phy_index); + + vir_queue_start = msg->plcr_pf_get_vf_queue_info_msg.vir_queue_start; + vir_queue_num = msg->plcr_pf_get_vf_queue_info_msg.vir_queue_num; + + PLCR_LOG_INFO("vir_queue_start = 0x%x\n", vir_queue_start); + PLCR_LOG_INFO("vir_queue_num = 0x%x\n", vir_queue_num); + + if (max_queue_num > (vir_queue_num + vir_queue_num)) { + max_queue_num = vir_queue_num + vir_queue_num; + } + + for (queue_index = vir_queue_start, queue_num = 0; + queue_index < max_queue_num; queue_index++, queue_num++) { + //get rx&tx queue info + reps->plcr_pf_get_vf_queue_info_rsp.phy_rxq[queue_num] = + edev->rq[queue_num].vq->phy_index; + reps->plcr_pf_get_vf_queue_info_rsp.phy_txq[queue_num] = + edev->sq[queue_num].vq->phy_index; + } + + reps->plcr_pf_get_vf_queue_info_rsp.phy_queue_num = queue_num; + + PLCR_LOG_INFO("queue_num = 0x%x\n", queue_num); + + return ret; +} + +zxdh_pf_msg_proc pf_msg_proc[] = { + { ZXDH_SET_VF_LINK_STATE, "set_vf_link_state", zxdh_set_vf_link_state }, + { ZXDH_SET_VF_RESET, "set_vf_reset", zxdh_set_vf_reset }, + { ZXDH_PF_SET_VF_VLAN, "pf_set_vf_vlan", zxdh_set_vf_vlan }, + { ZXDH_PF_GET_VF_QUEUE_INFO, "pf_get_vf_queue_info", + zxdh_pf_get_vf_queue }, +}; + +int32_t zxdh_vf_msg_recv_func(void *pay_load, uint16_t len, void *reps_buffer, + uint16_t *reps_len, void *dev) +{ + zxdh_msg_info *msg = (zxdh_msg_info *)pay_load; + zxdh_reps_info *reps = (zxdh_reps_info *)reps_buffer; + struct zxdh_en_device *en_dev = (struct zxdh_en_device *)dev; + int32_t ret = 0; + int32_t i = 0; + int32_t num = 0; + + LOG_DEBUG("is called\n"); + if (len != sizeof(union zxdh_msg)) { + LOG_ERR("invalid data_len\n"); + return -1; + } + + if (en_dev == NULL) { + LOG_ERR("dev is NULL\n"); + return -1; + } + + num = sizeof(pf_msg_proc) / sizeof(zxdh_pf_msg_proc); + + for (i = 0; i < num; i++) { + *reps_len = sizeof(union zxdh_msg); + if (pf_msg_proc[i].op_code == msg->hdr_vf.op_code) { + LOG_DEBUG("%s is called", pf_msg_proc[i].proc_name); + ret = pf_msg_proc[i].msg_proc(msg, reps, en_dev); + if (ret != 0) { + reps->flag = ZXDH_REPS_FAIL; + LOG_ERR("%s failed, ret: %d\n", + pf_msg_proc[i].proc_name, ret); + return -1; + } + reps->flag = ZXDH_REPS_SUCC; + return 0; + } + } + + LOG_ERR("invalid op_code: [%u]\n", msg->hdr_vf.op_code); + reps->flag = ZXDH_INVALID_OP_CODE; + return -2; +} + +int32_t dh_ip_mac_init(struct zxdh_en_priv *en_priv) +{ + int32_t err = 0; + struct zxdh_en_device *en_dev = &en_priv->edev; + DPP_PF_INFO_T pf_info = { 0 }; + uint8_t ip4_mac[6] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x01 }; + uint8_t ip6_mac[6] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x01 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + /* 判断目前所配置组播mac地址数量是否超过32个上限 */ + if (en_dev->curr_multicast_num >= DEV_MULTICAST_MAX_NUM) { + LOG_ERR("curr_multicast_num is beyond maximum\n"); + return -ENOSPC; + } + + if (en_dev->ops->get_coredev_type(en_dev->parent) == + DH_COREDEV_PF) { /* PF流程 */ + err = dpp_multi_mac_add_member(&pf_info, ip4_mac); + if (err != 0) { + LOG_ERR("dpp_multi_mac_add_member mac:%pM failed, err:%d\n", + ip4_mac, err); + return err; + } + en_dev->curr_multicast_num++; + err = dpp_multi_mac_add_member(&pf_info, ip6_mac); + if (err != 0) { + LOG_ERR("dpp_multi_mac_add_member mac:%pM failed, err:%d\n", + ip6_mac, err); + return err; + } + en_dev->curr_multicast_num++; + LOG_INFO("current multicast num: %d", + en_dev->curr_multicast_num); + } else { /* VF流程*/ + err = zxdh_vf_dpp_add_ipv6_mac(en_dev, ip4_mac); + if (err != 0) { + LOG_ERR("zxdh_vf_ip_mac_init mac:%pM failed, err:%d\n", + ip4_mac, err); + return err; + } + en_dev->curr_multicast_num++; + + err = zxdh_vf_dpp_add_ipv6_mac(en_dev, ip6_mac); + if (err != 0) { + LOG_ERR("zxdh_vf_ip_mac_init mac:%pM failed, err:%d\n", + ip6_mac, err); + return err; + } + en_dev->curr_multicast_num++; + LOG_INFO("current multicast num is %d", + en_dev->curr_multicast_num); + } + + LOG_DEBUG("config exist mac to np\n"); + return 0; +} + +int32_t dh_aux_ipv6_notifier_init(struct zxdh_en_priv *en_priv) +{ + int32_t ret = 0; + struct zxdh_en_device *en_dev = &en_priv->edev; + en_dev->ipv6_notifier.notifier_call = inet6_addr_change_notifier; + en_dev->ipv6_notifier.priority = 0; + ret = dh_inet6_addr_change_notifier_register(&(en_dev->ipv6_notifier)); + if (ret) { + LOG_ERR("Failed to register inet6addr_notifier, ret:%d\n", ret); + return ret; + } + LOG_INFO("netdev:%s ipv6_notifier_init success\n", + en_dev->netdev->name); + return ret; +} + +int32_t dh_aux_vxlan_netdev_notifier_init(struct zxdh_en_priv *en_priv) +{ + int32_t ret = 0; + struct zxdh_en_device *en_dev = &en_priv->edev; + en_dev->vxlan_notifier.notifier_call = vxlan_netdev_change_notifier; + en_dev->vxlan_notifier.priority = 0; + ret = dh_vxlan_netdev_change_notifier_register( + &(en_dev->vxlan_notifier)); + if (ret) { + LOG_ERR("Failed to register vxlan_notifier, ret:%d\n", ret); + return ret; + } + LOG_DEBUG("netdev:%s vxlan_notifier_init success\n", + en_dev->netdev->name); + return ret; +} + +static void run_cfg_shell_script(struct work_struct *work) +{ + static const char command[] = "/etc/zxdh_cfg/smart_nic_cfg_proc.sh"; + char *argv[] = { (char *)command, "c", NULL }; + static char *envp[] = { "HOME=/", "TERM=linux", + "PATH=/bin:/sbin:/usr/bin:/usr/sbin:/bin", + NULL }; + int32_t ret = 0; + + ret = call_usermodehelper(command, argv, envp, UMH_WAIT_PROC); + if (ret < 0) { + LOG_DEBUG("Failed to execute shell script(err:%d)\n", ret); + } else { + LOG_DEBUG("Shell script executed successfully,ret:%d\n", ret); + } +} + +void zxdh_cap_pkt_uninit(struct zxdh_en_device *en_dev, bool offload_mode) +{ + uint32_t ret = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + if (en_dev->pkt_dev_flag == 1) { + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + if (en_dev->pkt_wq) { + destroy_workqueue(en_dev->pkt_wq); + en_dev->pkt_wq = NULL; + } + + if (offload_mode) { + ret = dpp_pkt_capture_disable_all(&pf_info); + if (ret != 0) { + LOG_ERR("dpp_pkt_capture_disable_all failed, ret:%d!!!\n", + ret); + } + + ret = dpp_pkt_capture_table_flush(&pf_info); + if (ret != 0) { + LOG_ERR("dpp_pkt_capture_table_flush failed, ret:%d!!!\n", + ret); + } + + ret = dpp_pkt_capture_speed_set(&pf_info, + ZXDH_PKT_INIT_SPEED); + if (ret != 0) { + LOG_ERR("dpp_pkt_capture_speed_set failed, ret:%d!!!\n", + ret); + } + } + + en_dev->pkt_cap_switch = 1; + en_dev->pkt_save_file_flag = 0; + en_dev->pkt_file_num = 0; + en_dev->pkt_save_file.enable_pkt_num_mode = 0; + en_dev->pkt_save_file.pkt_file_size = 0; + en_dev->pkt_save_file.pkt_set_count = 0; + en_dev->pkt_save_file.pkt_cur_num = 0; + en_dev->pkt_addr_marked = 0; + en_dev->pkt_dev_speed = ZXDH_PKT_INIT_SPEED; + en_dev->pkt_save_file.file_pos = 0; + + while (en_dev->pkt_save_file.pkt_ubuf_idx != + en_dev->pkt_save_file.pkt_rbuf_idx) { + if (en_dev->pkt_file_info && + en_dev->pkt_file_info[en_dev->pkt_save_file + .pkt_ubuf_idx] + .pkt_addr_array) { + SAFE_KFREE(en_dev->pkt_file_info + [en_dev->pkt_save_file + .pkt_ubuf_idx] + .pkt_addr_array); + } + + en_dev->pkt_save_file.pkt_ubuf_idx++; + if (en_dev->pkt_save_file.pkt_ubuf_idx >= + (ZXDH_MQ_PAIRS_NUM * ZXDH_PF_MIN_DESC_NUM)) { + en_dev->pkt_save_file.pkt_ubuf_idx = 0; + } + } + + if (en_dev->pkt_file_info) { + if (en_dev->pkt_file_info[en_dev->pkt_save_file + .pkt_rbuf_idx] + .pkt_addr_array) { + SAFE_KFREE(en_dev->pkt_file_info + [en_dev->pkt_save_file + .pkt_rbuf_idx] + .pkt_addr_array); + } + + kfree(en_dev->pkt_file_info); + en_dev->pkt_file_info = NULL; + } + + if (en_dev->pkt_save_file.log_file != NULL) { + close_log_file(en_dev->pkt_save_file.log_file); + en_dev->pkt_save_file.log_file = NULL; + } + + en_dev->pkt_save_file.pkt_ubuf_idx = 0; + en_dev->pkt_save_file.pkt_rbuf_idx = 0; + en_dev->pkt_dev_flag = 0; + } +} + +int32_t dh_aux_events_init(struct zxdh_en_priv *en_priv) +{ + struct dh_events *events = NULL; + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t i = 0; + int32_t ret = 0; + uint32_t evt_num = ARRAY_SIZE(aux_events); + + if (!en_dev->ops->if_init(en_dev->parent)) + evt_num -= 1; //TODO + + events = kzalloc( + (sizeof(*events) + evt_num * sizeof(struct dh_event_nb)), + GFP_KERNEL); + if (unlikely(events == NULL)) { + LOG_ERR("events kzalloc failed: %p\n", events); + ret = -ENOMEM; + goto err_events_kzalloc; + } + + events->evt_num = evt_num; + events->dev = NULL; + en_priv->events = events; + events->wq = create_singlethread_workqueue("dh_aux_events"); + if (!events->wq) { + LOG_ERR("events->wq create_singlethread_workqueue failed: %p\n", + events->wq); + ret = -ENOMEM; + goto err_create_wq; + } + + INIT_WORK(&en_dev->vf_link_info_update_work, + vf_link_info_update_handler); + INIT_WORK(&en_dev->link_info_irq_update_vf_work, + link_info_irq_update_vf_handler); + INIT_WORK(&en_dev->link_info_irq_process_work, + link_info_irq_process_handler); + INIT_WORK(&en_dev->link_info_irq_update_np_work, + link_info_irq_update_np_work_handler); + INIT_WORK(&en_dev->rx_mode_set_work, rx_mode_set_handler); + INIT_WORK(&en_dev->pf2vf_msg_proc_work, pf2vf_msg_proc_work_handler); + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + INIT_WORK(&en_dev->service_task, en_aux_service_task); + INIT_WORK(&en_dev->service_riscv_task, + en_aux_service_riscv_task); + } + + INIT_WORK(&en_dev->riscv2aux_msg_proc_work, + riscv2aux_msg_proc_work_handler); + INIT_WORK(&en_dev->plug_adev_work, plug_adev_work_handler); + INIT_WORK(&en_dev->unplug_adev_work, unplug_adev_work_handler); + + INIT_WORK(&en_dev->smart_nic_copy_work, run_cfg_shell_script); + queue_work(events->wq, &en_dev->smart_nic_copy_work); + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + timer_setup(&en_dev->service_timer, en_aux_service_timer, 0); + ret = mod_timer(&en_dev->service_timer, jiffies); + if (ret) { + LOG_ERR("timer add failed\n"); + goto err_mod_timer; + } + + timer_setup(&en_dev->service_riscv_timer, + en_aux_service_riscv_timer, 0); + ret = mod_timer(&en_dev->service_riscv_timer, jiffies); + if (ret) { + LOG_ERR("timer add failed\n"); + goto err_riscv_timer; + } + } + + for (i = 0; i < evt_num; i++) { + events->notifiers[i].nb = aux_events[i]; + events->notifiers[i].ctx = en_priv; + en_dev->ops->aux_nh_attach(en_dev->parent, + &events->notifiers[i].nb, true); + } + + return ret; + +err_riscv_timer: + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + del_timer_sync(&en_dev->service_riscv_timer); + } +err_mod_timer: + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + del_timer_sync(&en_dev->service_timer); + } + destroy_workqueue(events->wq); +err_create_wq: + kfree(events); +err_events_kzalloc: + return ret; +} + +void dh_aux_events_uninit(struct zxdh_en_priv *en_priv) +{ + struct dh_events *events = en_priv->events; + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t i = 0; + + for (i = events->evt_num - 1; i >= 0; i--) { + // dh_eq_notifier_unregister(&en_priv->eq_table, &events->notifiers[i].nb); + en_dev->ops->aux_nh_attach(en_dev->parent, + &events->notifiers[i].nb, false); + } + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + del_timer_sync(&en_dev->service_timer); + del_timer_sync(&en_dev->service_riscv_timer); + zxdh_cap_pkt_uninit(en_dev, true); + } + + destroy_workqueue(en_priv->events->wq); + kfree(en_priv->events); + + return; +} + +static int32_t mgr_test_cnt(void *data, uint16_t len, void *reps, + uint16_t *reps_len, void *dev) +{ + uint8_t *pay_load = (uint8_t *)data; + uint8_t *reps_buffer = (uint8_t *)reps; + uint16_t idx = 0; + uint16_t sum = 0; + + if (reps_buffer == NULL) { + return 0; + } + + for (idx = 0; idx < len; idx++) { + sum += pay_load[idx]; + } + + reps_buffer[0] = (uint8_t)sum; + reps_buffer[1] = (uint8_t)(sum >> 8); + *reps_len = 2; + return 0; +} + +static int32_t msgq_test_func(void *data, uint16_t len, void *reps, + uint16_t *reps_len, void *dev) +{ + if (reps == NULL) { + return 0; + } + + *reps_len = len; + return 0; +} + +typedef uint32_t (*zxdh_vqmb_msg_func)(vqmb_to_host_msg *msg, + zxdh_reps_info *reps, + struct zxdh_en_device *en_dev); + +typedef struct { + uint8_t proc_name[64]; + zxdh_vqmb_msg_func msg_proc; +} zxdh_vqmb_msg_proc; + +enum { + MSG_BIT_VQMB_CTRL_NP = 1, + VQMB_MSG_TYPE_MAX = 63, +}; + +static uint32_t vqmb_port_ctrl_func(vqmb_to_host_msg *msg, zxdh_reps_info *reps, + struct zxdh_en_device *en_dev) +{ + uint32_t err = 0; + bool port_enable = msg->vqmb_port_ctrl_msg.port_enable; + + if (port_enable) { + en_dev->vqmb_port_ctl = !port_enable; + if (netif_running(en_dev->netdev)) + err = zxdh_port_enable(en_dev, port_enable); + } else { + if (netif_running(en_dev->netdev)) + err = zxdh_port_enable(en_dev, port_enable); + en_dev->vqmb_port_ctl = !port_enable; + } + LOG_INFO("port_enable: %d, vfid: %d\n", port_enable, + msg->vqmb_hdr.vfid); + return err; +} + +zxdh_vqmb_msg_proc vqmb_msg_proc[] = { + { "invalid", NULL }, + { "vqmb_port_ctrl_func", vqmb_port_ctrl_func }, +}; + +int32_t zxdh_vqmb_msg_recv_func(void *pay_load, uint16_t len, void *reps_buffer, + uint16_t *reps_len, void *dev) +{ + vqmb_to_host_msg *msg = (vqmb_to_host_msg *)pay_load; + zxdh_reps_info *reps = (zxdh_reps_info *)reps_buffer; + struct zxdh_en_device *en_dev = (struct zxdh_en_device *)dev; + int32_t ret = 0; + uint32_t num = 0; + uint32_t i = 0; + + if (en_dev == NULL) { + LOG_ERR("dev is NULL\n"); + return -1; + } + + *reps_len = sizeof(reps->flag); + num = ARRAY_SIZE(vqmb_msg_proc); + for (i = MSG_BIT_VQMB_CTRL_NP; i < VQMB_MSG_TYPE_MAX; i++) { + if (i >= num) + break; + if (((msg->vqmb_hdr.bits & (1 << i)) == 0)) + continue; + LOG_DEBUG("%s is called", vqmb_msg_proc[i].proc_name); + if (!vqmb_msg_proc[i].msg_proc) + continue; + ret = vqmb_msg_proc[i].msg_proc(msg, reps, en_dev); + if (ret != 0) { + reps->flag = ZXDH_REPS_FAIL; + LOG_ERR("%s failed, ret: %d\n", + vqmb_msg_proc[i].proc_name, ret); + return -1; + } + } + + reps->flag = ZXDH_REPS_SUCC; + LOG_DEBUG("reps->flag: 0x%x, reps_len: %d\n", reps->flag, *reps_len); + return 0; +} + +int32_t dh_aux_msg_recv_func_register(void) +{ + int32_t ret = 0; + + mutex_init(&rdma_lock); + ret = zxdh_bar_chan_msg_recv_register(MODULE_PF_BAR_MSG_TO_VF, + zxdh_vf_msg_recv_func); + if (0 != ret) { + LOG_ERR("event_id[%d] register failed: %d\n", + MODULE_PF_BAR_MSG_TO_VF, ret); + return ret; + } + + ret = zxdh_bar_chan_msg_recv_register(MODULE_DHTOOL, + zxdh_tools_sendto_user_netlink); + if (0 != ret) { + LOG_ERR("event_id[%d] register failed: %d\n", MODULE_DHTOOL, + ret); + goto unregister_pf_to_vf; + } + + ret = zxdh_bar_chan_msg_recv_register(MODULE_DEMO, mgr_test_cnt); + if (0 != ret) { + LOG_ERR("event_id[%d] register failed: %d\n", MODULE_DEMO, ret); + goto unregister_dhtool; + } + + ret = zxdh_bar_chan_msg_recv_register(MODULE_MSGQ, msgq_test_func); + if (0 != ret) { + LOG_ERR("event_id[%d] register failed: %d\n", MODULE_MSGQ, ret); + goto unregister_demo; + } + + ret = zxdh_bar_chan_msg_recv_register(MODULE_VQMB, + zxdh_vqmb_msg_recv_func); + if (0 != ret) { + LOG_ERR("event_id[%d] register failed: %d\n", MODULE_VQMB, ret); + goto unregister_msgq; + } + + return ret; +unregister_msgq: + zxdh_bar_chan_msg_recv_unregister(MODULE_MSGQ); +unregister_demo: + zxdh_bar_chan_msg_recv_unregister(MODULE_DEMO); +unregister_dhtool: + zxdh_bar_chan_msg_recv_unregister(MODULE_DHTOOL); +unregister_pf_to_vf: + zxdh_bar_chan_msg_recv_unregister(MODULE_PF_BAR_MSG_TO_VF); + return ret; +} + +void dh_aux_msg_recv_func_unregister(void) +{ + mutex_destroy(&rdma_lock); + zxdh_bar_chan_msg_recv_unregister(MODULE_VQMB); + zxdh_bar_chan_msg_recv_unregister(MODULE_MSGQ); + zxdh_bar_chan_msg_recv_unregister(MODULE_DEMO); + zxdh_bar_chan_msg_recv_unregister(MODULE_DHTOOL); + zxdh_bar_chan_msg_recv_unregister(MODULE_PF_BAR_MSG_TO_VF); + return; +} diff --git a/drivers/net/ethernet/dinghai/en_aux/en_aux_events.h b/drivers/net/ethernet/dinghai/en_aux/en_aux_events.h new file mode 100644 index 000000000000..9cef3b190c48 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_aux/en_aux_events.h @@ -0,0 +1,50 @@ +#ifndef __EN_AUX_EVENTS_H__ +#define __EN_AUX_EVENTS_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include "en_aux.h" +#include "../en_np/table/include/dpp_tbl_comm.h" + +#define MULTI_FLAG (0x01) +#define IPV4_TYPE_FLAG (0x00) +#define GLOBAL_FLAG (0x5E) +#define BIT16 (16) +#define BIT8 (8) +#define BIT_23_L (0x7F) +#define BIT_15_L (0xFF) +#define BIT_7_L (0xFF) + +enum { + ZXDH_RDMA_HEALTH_EVENT = 1, + ZXDH_RDMA_SRIOV_EVENT = 2, +}; + +struct zxdh_rdma_sriov_event_info { + struct pci_dev *pdev; + uint64_t bar0_virt_addr; + uint16_t vport_id; + uint16_t num_vfs; +}; + +int32_t dh_aux_events_init(struct zxdh_en_priv *en_priv); +void dh_aux_events_uninit(struct zxdh_en_priv *en_priv); +int32_t dh_aux_msg_recv_func_register(void); +void dh_aux_msg_recv_func_unregister(void); +int32_t dh_aux_ipv6_notifier_init(struct zxdh_en_priv *en_priv); +int32_t dh_aux_vxlan_netdev_notifier_init(struct zxdh_en_priv *en_priv); +int32_t dh_ip_mac_init(struct zxdh_en_priv *en_priv); +int32_t zxdh_rdma_events_call(struct net_device *netdev, uint8_t event_type, + void *data); +void zxdh_cap_pkt_uninit(struct zxdh_en_device *en_dev, bool offload_mode); +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_aux/en_aux_ioctl.c b/drivers/net/ethernet/dinghai/en_aux/en_aux_ioctl.c new file mode 100644 index 000000000000..841786f537ac --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_aux/en_aux_ioctl.c @@ -0,0 +1,1904 @@ +#include +#include "en_aux_ioctl.h" +#include "en_aux_cmd.h" +#include "../zxdh_tools/zxdh_tools_ioctl.h" +#include "queue.h" +#include "priv_queue.h" +#include "../en_np/table/include/dpp_tbl_api.h" +#include "../en_pf/msg_func.h" +#include "../en_pf/en_pf_eq.h" +#ifdef CONFIG_DINGHAI_TSN +#include "../en_tsn/zxdh_tsn_ioctl.h" +#endif + +#ifdef PTP_DRIVER_INTERFACE_EN +extern int32_t tod_device_set_bar_virtual_addr(uint64_t virtaddr, + uint16_t pcieid); +#endif +int32_t print_data(uint8_t *data, uint32_t len) +{ + int32_t i = 0; + uint32_t loopcnt = 0; + uint32_t last_line_len = 0; + uint32_t line_len = PKT_PRINT_LINE_LEN; + uint8_t last_line_data[PKT_PRINT_LINE_LEN] = { 0 }; + + if (len == 0) { + return 0; + } + loopcnt = len / line_len; + last_line_len = len % line_len; + + LOG_DEBUG("***************packet data[len: %d]***************\n", len); + for (i = 0; i < loopcnt; i++) { + LOG_INFO( + "%.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x\n", + *(data + (line_len * i) + 0), + *(data + (line_len * i) + 1), + *(data + (line_len * i) + 2), + *(data + (line_len * i) + 3), + *(data + (line_len * i) + 4), + *(data + (line_len * i) + 5), + *(data + (line_len * i) + 6), + *(data + (line_len * i) + 7), + *(data + (line_len * i) + 8), + *(data + (line_len * i) + 9), + *(data + (line_len * i) + 10), + *(data + (line_len * i) + 11), + *(data + (line_len * i) + 12), + *(data + (line_len * i) + 13), + *(data + (line_len * i) + 14), + *(data + (line_len * i) + 15)); + } + if (last_line_len != 0) { + memcpy(last_line_data, (data + (line_len * i)), last_line_len); + LOG_INFO( + "%.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x\n", + last_line_data[0], last_line_data[1], last_line_data[2], + last_line_data[3], last_line_data[4], last_line_data[5], + last_line_data[6], last_line_data[7], last_line_data[8], + last_line_data[9], last_line_data[10], + last_line_data[11], last_line_data[12], + last_line_data[13], last_line_data[14], + last_line_data[15]); + } + LOG_INFO("****************end packet data**************\n"); + + return 0; +} + +int32_t zxdh_read_reg_cmd(struct net_device *netdev, struct ifreq *ifr) +{ + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + struct zxdh_en_reg *reg = NULL; + uint32_t size = sizeof(struct zxdh_en_reg); + uint64_t base_addr = 0; + uint32_t num = 0; + int32_t err = 0; + + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + + en_priv = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, + "netdev priv is null!\n"); + en_dev = &en_priv->edev; + + reg = kzalloc(size, GFP_KERNEL); + CHECK_EQUAL_ERR(reg, NULL, -EADDRNOTAVAIL, "reg is null!\n"); + + if (copy_from_user(reg, ifr->ifr_ifru.ifru_data, size)) { + LOG_ERR("copy_from_user failed\n"); + err = -EFAULT; + goto err_ret; + } + + if ((reg->num == 0) || (reg->num > MAX_ACCESS_NUM)) { + LOG_ERR("transmit failed, reg->num=%u\n", reg->num); + err = -EFAULT; + goto err_ret; + } + + base_addr = en_dev->ops->get_bar_virt_addr(en_dev->parent, 0); + + for (num = 0; num < reg->num; num++) { + reg->data[num] = readl( + (const volatile void *)(base_addr + + (reg->offset & 0xfffffffc) + + num * 4)); + } + + if (copy_to_user(ifr->ifr_ifru.ifru_data, reg, size)) { + LOG_ERR("copy_to_user failed\n"); + err = -EFAULT; + } + +err_ret: + kfree(reg); + return err; +} + +int32_t zxdh_write_reg_cmd(struct net_device *netdev, struct ifreq *ifr) +{ + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + struct zxdh_en_reg *reg = NULL; + uint32_t size = sizeof(struct zxdh_en_reg); + uint64_t base_addr = 0; + uint32_t num = 0; + int32_t err = 0; + + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + + en_priv = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, + "netdev priv is null!\n"); + en_dev = &en_priv->edev; + + reg = kzalloc(size, GFP_KERNEL); + CHECK_EQUAL_ERR(reg, NULL, -EADDRNOTAVAIL, "reg is null!\n"); + + if (copy_from_user(reg, ifr->ifr_ifru.ifru_data, size)) { + LOG_ERR("copy_from_user failed\n"); + err = -EFAULT; + goto err_ret; + } + + if ((reg->num == 0) || (reg->num > MAX_ACCESS_NUM)) { + LOG_ERR("transmit failed, reg->num=%u\n", reg->num); + err = -EFAULT; + goto err_ret; + } + + base_addr = en_dev->ops->get_bar_virt_addr(en_dev->parent, 0); + + for (num = 0; num < reg->num; num++) { + writel(reg->data[num], + (volatile void *)(base_addr + + (reg->offset & 0xfffffffc) + num * 4)); + } + +err_ret: + kfree(reg); + return err; +} + +int32_t print_vring_info(struct virtqueue *vq, struct zxdh_en_reg *reg) +{ + struct vring_virtqueue *vvq = to_vvq(vq); + + if ((reg->num + reg->data[0]) > vvq->packed.vring.num) { + LOG_ERR("the sum of desc_index %u and desc_num %u over desc depth %u, should be [0-%u]\n", + reg->num, reg->data[0], vvq->packed.vring.num, + vvq->packed.vring.num - 1); + return -EINVAL; + } + + zxdh_print_vring_info(vq, reg->num, reg->num + reg->data[0]); + + return 0; +} + +int32_t zxdh_get_vring_info(struct net_device *netdev, struct ifreq *ifr) +{ + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + struct zxdh_en_reg *reg = NULL; + uint32_t size = sizeof(struct zxdh_en_reg); + struct virtqueue *vq = NULL; + int32_t ret = 0; + + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + + en_priv = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, + "netdev priv is null!\n"); + en_dev = &en_priv->edev; + + reg = kzalloc(size, GFP_KERNEL); + CHECK_EQUAL_ERR(reg, NULL, -EADDRNOTAVAIL, "reg is null!\n"); + + if (copy_from_user(reg, ifr->ifr_ifru.ifru_data, size)) { + LOG_ERR("copy_from_user failed\n"); + ret = -EFAULT; + goto err_ret; + } + + if (reg->offset >= en_dev->max_queue_pairs) { + LOG_ERR("the queue index %u over the curr_queue_pairs %u, should be [0-%u]\n", + reg->offset, en_dev->curr_queue_pairs, + en_dev->curr_queue_pairs - 1); + ret = -EINVAL; + goto err_ret; + } + + vq = en_dev->sq[reg->offset].vq; + LOG_INFO( + "******************************tx vring info****************************\n"); + ret = print_vring_info(vq, reg); + if (ret != 0) { + LOG_ERR("print tx vring info failed!\n"); + ret = -EINVAL; + goto err_ret; + } + + vq = en_dev->rq[reg->offset].vq; + LOG_INFO( + "******************************rx vring info****************************\n"); + ret = print_vring_info(vq, reg); + if (ret != 0) { + LOG_ERR("print rx vring info failed!\n"); + ret = -EINVAL; + } + +err_ret: + kfree(reg); + return ret; +} + +int32_t zxdh_en_set_clock_no(struct net_device *netdev, struct ifreq *ifr, + struct zxdh_en_reg *reg) +{ + uint32_t reg_size = sizeof(struct zxdh_en_reg); + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + + en_priv = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, + "netdev priv is null!\n"); + en_dev = &en_priv->edev; + + if (reg->num != 1) { + LOG_ERR("Transmit failed[len = %d]!\n", reg->num); + goto err_ret; + } + + en_dev->clock_no = reg->data[0]; + LOG_INFO("en_dev %s clock_no = %d\n", en_dev->netdev->name, + en_dev->clock_no); + + reg->num = 0; + if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) { + LOG_ERR("copy_to_user failed!\n"); + goto err_ret; + } + + return 0; + +err_ret: + return -1; +} + +void copy_u32_to_u8(uint8_t *data_pkt, uint32_t *data, uint32_t pktlen) +{ + uint32_t i = 0; + + for (i = 0; i < pktlen; i++) { + *data_pkt++ = data[i]; + } +} + +int32_t zxdh_tx_file_pkts(struct zxdh_en_priv *en_priv, struct zxdh_en_reg *reg) +{ + int32_t total_sg = 0; + uint8_t *data_pkt = NULL; + struct scatterlist *sg = NULL; + struct zxdh_en_device *en_dev = &en_priv->edev; + struct send_queue *sq = en_dev->sq; + struct page *page = NULL; + struct data_packet pkt = { 0 }; + uint16_t i = 0; + uint32_t len = 0; + void *ptr = NULL; + uint32_t last_buff_len = 0; + uint32_t pktLen = reg->num; + uint32_t buffLen = 4096; + + while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { + LOG_ERR("virtqueue_get_buf() != NULL, ptr=0x%llx, len=0x%x\n", + (uint64_t)ptr, len); + }; + + sg = sq->sg; + pkt.buf_size = 16 * PAGE_SIZE; + page = alloc_pages(GFP_KERNEL, 4); + if (unlikely(page == NULL)) { + LOG_ERR("page is null\n"); + goto err; + } + + pkt.buf = page_address(page); + if (unlikely(pkt.buf == NULL)) { + LOG_ERR("pkt.buf is null\n"); + goto err1; + } + memset(pkt.buf, 0, pkt.buf_size); + + data_pkt = (uint8_t *)pkt.buf; + copy_u32_to_u8(data_pkt, reg->data, pktLen); + print_data(data_pkt, + (pktLen > PKT_PRINT_LEN_MAX) ? PKT_PRINT_LEN_MAX : pktLen); + + total_sg = pktLen / buffLen; + last_buff_len = pktLen % buffLen; + if (last_buff_len != 0) { + total_sg += 1; + } + + sg_init_table(sg, total_sg); + for (i = 0; i < total_sg; i++) { + if (i == (total_sg - 1)) { + sg_set_buf(&sg[i], data_pkt + (i * buffLen), + ((last_buff_len != 0) ? last_buff_len : + buffLen)); + } else { + sg_set_buf(&sg[i], data_pkt + (i * buffLen), buffLen); + } + } + + if (unlikely(virtqueue_add_outbuf(sq->vq, sg, total_sg, data_pkt, + GFP_ATOMIC) != 0)) { + LOG_ERR("virtqueue_add_outbuf failure!\n"); + goto err1; + } + + if (virtqueue_kick_prepare_packed(sq->vq) && virtqueue_notify(sq->vq)) { + u64_stats_update_begin(&sq->stats.syncp); + sq->stats.kicks++; + u64_stats_update_end(&sq->stats.syncp); + } + + en_dev->netdev->stats.tx_packets++; + en_dev->netdev->stats.tx_bytes += pktLen; + LOG_INFO("en_dev->netdev->stats.tx_packets=%ld, tx pktLen=%d\n", + en_dev->netdev->stats.tx_packets, pktLen); + + return 0; + +err1: + free_pages((uint64_t)pkt.buf, 4); +err: + return -1; +} + +int32_t zxdh_send_file_pkt(struct net_device *netdev, struct ifreq *ifr) +{ + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_reg *reg = NULL; + uint32_t size = sizeof(struct zxdh_en_reg); + int32_t ret = 0; + + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + + en_priv = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, + "netdev priv is null!\n"); + + reg = kzalloc(size, GFP_KERNEL); + CHECK_EQUAL_ERR(reg, NULL, -EADDRNOTAVAIL, "reg is null!\n"); + + if (copy_from_user(reg, ifr->ifr_ifru.ifru_data, size)) { + LOG_ERR("copy_from_user failed\n"); + ret = -EFAULT; + goto err_ret; + } + + if ((reg->num == 0) || (reg->num > MAX_ACCESS_NUM)) { + LOG_ERR("transmit failed, reg->num=%d\n", reg->num); + ret = -EFAULT; + goto err_ret; + } + + ret = zxdh_tx_file_pkts(en_priv, reg); + if (unlikely(ret != 0)) { + LOG_ERR("transmit failed[ret = %d]!", ret); + ret = -1; + goto err_ret; + } + + reg->num = 0; + if (copy_to_user(ifr->ifr_ifru.ifru_data, reg, size)) { + LOG_ERR("copy_to_user failed\n"); + ret = -EFAULT; + } + +err_ret: + kfree(reg); + return ret; +} + +#ifdef PTP_DRIVER_INTERFACE_EN +/* ptp发送加密报文时,需要调用使能函数进行使能 */ +extern int32_t enable_write_ts_to_fifo(struct zxdh_en_device *en_dev, + uint32_t enable, uint32_t mac_number); +extern int32_t set_interrupt_capture_timer(struct zxdh_en_device *en_dev, + uint32_t index); +extern int32_t zxdh_set_pps_selection(struct zxdh_en_device *en_dev, + uint32_t pps_type, uint32_t selection); +extern int32_t zxdh_set_pd_detection(struct zxdh_en_device *en_dev, + uint32_t pd_index, uint32_t pd_input1, + uint32_t pd_input2); +extern int32_t zxdh_get_pd_value(struct zxdh_en_device *en_dev, + uint32_t pd_index, uint32_t *pd_result); +extern int32_t zxdh_set_pps_interrupt_support(struct zxdh_en_device *en_dev, + uint32_t support); +extern int32_t zxdh_get_pps_interrupt_support(struct zxdh_en_device *en_dev, + uint32_t *support); +extern int32_t +zxdh_set_local_pps_interrupt_enable(struct zxdh_en_device *en_dev, + uint32_t enable); +extern int32_t zxdh_set_ext_pps_interrupt_enable(struct zxdh_en_device *en_dev, + uint32_t pps_src, + uint32_t enable); +extern int32_t zxdh_set_pd_sel_shift(struct zxdh_en_device *en_dev, + uint32_t pd_index, uint32_t sel, + uint32_t shift); +extern int32_t zxdh_get_ptp_clock_index(struct zxdh_en_device *en_dev, + uint32_t *ptp_clock_idx); +#endif /* PTP_DRIVER_INTERFACE_EN */ +int32_t zxdh_en_enable_ptp_encrypted_msg(struct net_device *netdev, + struct ifreq *ifr, + struct zxdh_en_reg *reg) +{ + uint32_t reg_size = sizeof(struct zxdh_en_reg); + int32_t mac_num = 0; //0-2 + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + uint32_t enable = 0; + int32_t ret = 0; + + LOG_INFO("enter in zxdh_en_enable_ptp_encrypted_msg\n"); + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + + en_priv = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, + "netdev priv is null!\n"); + en_dev = &en_priv->edev; + + mac_num = zxdh_pf_macpcs_num_get(en_dev); + if (mac_num < 0) { + LOG_ERR("get mac num %d err, its value should is 0-2!\n", + mac_num); + goto err_ret; + } + + if (unlikely(copy_from_user(reg, ifr->ifr_ifru.ifru_data, reg_size))) { + LOG_ERR("copy_from_user failed!\n"); + goto err_ret; + } + if (reg->num != 1) { + LOG_ERR("Transmit failed[len = %d]!\n", reg->num); + goto err_ret; + } + + enable = reg->data[0]; + if ((enable != 0) && (enable != 1)) { + LOG_ERR("Transmit failed[enable = %u]!\n", enable); + goto err_ret; + } + + LOG_INFO("enable = %u\n", enable); + +#ifdef PTP_DRIVER_INTERFACE_EN + /* 使能ptp加密报文发送接口 */ + ret = enable_write_ts_to_fifo(en_dev, enable, mac_num); + CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, + "enable ptp encrypted msg failed!!\n"); +#endif /* PTP_DRIVER_INTERFACE_EN */ + + reg->num = 0; + if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) { + LOG_ERR("copy_to_user failed!\n"); + goto err_ret; + } + + return ret; + +err_ret: + return -1; +} + +int32_t zxdh_en_set_intr_capture_timer(struct net_device *netdev, + struct ifreq *ifr, + struct zxdh_en_reg *reg) +{ + u_int32_t index; + uint32_t reg_size = sizeof(struct zxdh_en_reg); + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + int32_t ret = 0; + + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + + en_priv = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, + "netdev priv is null!\n"); + en_dev = &en_priv->edev; + + if (reg->num != 1) { + LOG_ERR("Transmit failed[len = %d]!", reg->num); + goto err_ret; + } + + index = reg->data[0]; + LOG_INFO("index = %d\n", index); + if (index > 4) { + LOG_ERR("capture timer out of range!"); + goto err_ret; + } +#ifdef PTP_DRIVER_INTERFACE_EN + ret = set_interrupt_capture_timer(en_dev, index); + CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, + "set interrupt capture timer failed!!\n"); +#endif /* PTP_DRIVER_INTERFACE_EN */ + + reg->num = 0; + if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) { + LOG_ERR("copy_to_user failed!!!\n"); + goto err_ret; + } + + return ret; + +err_ret: + return -1; +} + +int32_t zxdh_en_set_pps_selection(struct net_device *netdev, struct ifreq *ifr, + struct zxdh_en_reg *reg) +{ + uint32_t pps_type; + uint32_t selection; + uint32_t reg_size = sizeof(struct zxdh_en_reg); + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + int32_t ret = 0; + + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + + en_priv = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, + "netdev priv is null!\n"); + en_dev = &en_priv->edev; + + if (reg->num != 2) { + LOG_ERR("Transmit failed[len = %d]!", reg->num); + goto err_ret; + } + + pps_type = reg->data[0]; + selection = reg->data[1]; + LOG_INFO("pps_type = %u, selection = %u\n", pps_type, selection); +#ifdef PTP_DRIVER_INTERFACE_EN + ret = zxdh_set_pps_selection(en_dev, pps_type, selection); + CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, "set pps selection failed!!\n"); +#endif /* PTP_DRIVER_INTERFACE_EN */ + + reg->num = 0; + if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) { + LOG_ERR("copy_to_user failed!!!\n"); + goto err_ret; + } + + return ret; + +err_ret: + return -1; +} + +int32_t zxdh_en_set_phase_detection(struct net_device *netdev, + struct ifreq *ifr, struct zxdh_en_reg *reg) +{ + uint32_t pd_index; + uint32_t pd_input1; + uint32_t pd_input2; + uint32_t reg_size = sizeof(struct zxdh_en_reg); + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + int32_t ret = 0; + + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + + en_priv = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, + "netdev priv is null!\n"); + en_dev = &en_priv->edev; + + if (reg->num != 3) { + LOG_ERR("Transmit failed[len = %d]!", reg->num); + goto err_ret; + } + + pd_index = reg->data[0]; + pd_input1 = reg->data[1]; + pd_input2 = reg->data[2]; + LOG_INFO("pd_index = %u, pd_input1 = %u, pd_input2 = %u\n", pd_index, + pd_input1, pd_input2); +#ifdef PTP_DRIVER_INTERFACE_EN + ret = zxdh_set_pd_detection(en_dev, pd_index, pd_input1, pd_input2); + CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, "set pd detection failed!!\n"); +#endif /* PTP_DRIVER_INTERFACE_EN */ + + reg->num = 0; + if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) { + LOG_ERR("copy_to_user failed!!!\n"); + goto err_ret; + } + + return ret; + +err_ret: + return -1; +} + +int32_t zxdh_en_get_pd_value(struct net_device *netdev, struct ifreq *ifr, + struct zxdh_en_reg *reg) +{ + uint32_t pd_index; + uint32_t pd_result; + uint32_t reg_size = sizeof(struct zxdh_en_reg); + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + int32_t ret = 0; + + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + + en_priv = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, + "netdev priv is null!\n"); + en_dev = &en_priv->edev; + + if (reg->num != 1) { + LOG_ERR("Transmit failed[len = %d]!", reg->num); + goto err_ret; + } + + pd_index = reg->data[0]; + LOG_INFO("pd_index = %u\n", pd_index); +#ifdef PTP_DRIVER_INTERFACE_EN + ret = zxdh_get_pd_value(en_dev, pd_index, &pd_result); + CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, "get pd value failed!!\n"); +#endif /* PTP_DRIVER_INTERFACE_EN */ + + reg->num = 1; + reg->data[0] = pd_result; + if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) { + LOG_ERR("copy_to_user failed!!!\n"); + goto err_ret; + } + + return ret; + +err_ret: + return -1; +} + +int32_t zxdh_en_set_l2_ptp_port(struct net_device *netdev, struct ifreq *ifr, + struct zxdh_en_reg *reg) +{ + uint32_t reg_size = sizeof(struct zxdh_en_reg); + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + int32_t ret = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + + en_priv = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, + "netdev priv is null!\n"); + en_dev = &en_priv->edev; + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + LOG_INFO("reg->num: %d", reg->num); + LOG_INFO("reg->offset: %d", reg->offset); + if (reg->num != 1) { + LOG_ERR("Transmit failed[len = %d]!", reg->num); + goto err_ret; + } + + en_dev->vf_1588_call_np_num = PTP_PORT_VFID_SET; + LOG_INFO("en_dev->vport: 0x%x, IS_PF(en_dev->vport): %d", en_dev->vport, + IS_PF(en_dev->vport)); + if (IS_PF(en_dev->vport)) { + ret = dpp_ptp_port_vfid_set(&pf_info, VQM_VFID(en_dev->vport)); + if (ret != 0) { + LOG_ERR("dpp_ptp_port_vfid_set failed!!!\n"); + goto err_ret; + } + } else { + ret = zxdh_vf_1588_call_np_interface(en_dev); + if (ret != 0) { + LOG_ERR("zxdh_vf_1588_call_np_interface failed!!!\n"); + goto err_ret; + } + } + + reg->num = 0; + if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) { + LOG_ERR("copy_to_user failed!!!\n"); + goto err_ret; + } + LOG_INFO("dpp_ptp_port_vfid_set success"); + + return ret; + +err_ret: + return -1; +} + +int32_t zxdh_en_set_ptp_tc_enable(struct net_device *netdev, struct ifreq *ifr, + struct zxdh_en_reg *reg) +{ + uint32_t reg_size = sizeof(struct zxdh_en_reg); + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + int32_t ret = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + + en_priv = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, + "netdev priv is null!\n"); + en_dev = &en_priv->edev; + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + if (reg->num != 1) { + LOG_ERR("Transmit failed[len = %d]!", reg->num); + goto err_ret; + } + + en_dev->ptp_tc_enable_opt = reg->data[0]; + LOG_INFO("en_dev->ptp_tc_enable_opt = %u\n", en_dev->ptp_tc_enable_opt); + + en_dev->vf_1588_call_np_num = PTP_TC_ENABLE_SET; + + if (IS_PF(en_dev->vport)) { + ret = dpp_ptp_tc_enable_set(&pf_info, + en_dev->ptp_tc_enable_opt); + if (ret != 0) { + LOG_ERR("dpp_ptp_tc_enable_set failed!!!\n"); + goto err_ret; + } + } else { + ret = zxdh_vf_1588_call_np_interface(en_dev); + if (ret != 0) { + LOG_ERR("zxdh_vf_1588_call_np_interface failed!!!\n"); + goto err_ret; + } + } + + reg->num = 0; + if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) { + goto err_ret; + } + + return ret; + +err_ret: + return -1; +} + +int32_t zxdh_en_set_synce_recovery_port(struct net_device *netdev, + struct ifreq *ifr, + struct zxdh_en_reg *reg) +{ + uint32_t reg_size = sizeof(struct zxdh_en_reg); + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + union zxdh_msg *msg = NULL; + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + + en_priv = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, + "netdev priv is null!\n"); + en_dev = &en_priv->edev; + + if (reg->num != 1) { + LOG_ERR("Transmit failed[len = %d]!", reg->num); + return -1; + } + + if (en_dev->phy_port == INVALID_PHY_PORT) { + LOG_ERR("phyport is invalid!"); + return -EOPNOTSUPP; + } + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + msg->payload.hdr_to_agt.op_code = AGENT_MAC_RECOVERY_CLK_SET; + msg->payload.hdr_to_agt.phyport = en_dev->phy_port; + msg->payload.synce_clk_recovery_port.clk_speed = reg->data[0]; + LOG_INFO("phyport = %u, clk_speed = %u\n", + msg->payload.hdr_to_agt.phyport, + msg->payload.synce_clk_recovery_port.clk_speed); + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_MAC, msg, msg, + ¶); + if (err != 0) { + LOG_ERR("zxdh_en_set_synce_recovery_port failed, err: %d\n", + err); + goto free_msg; + } + + reg->num = 0; + err = copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size); + +free_msg: + kfree(msg); + return err; +} + +int32_t zxdh_en_get_synce_clk_stats(struct net_device *netdev, + struct ifreq *ifr, struct zxdh_en_reg *reg) +{ + uint32_t reg_size = sizeof(struct zxdh_en_reg); + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + union zxdh_msg *msg = NULL; + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + + en_priv = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, + "netdev priv is null!\n"); + en_dev = &en_priv->edev; + + if (reg->num != 1) { + LOG_ERR("Transmit failed[len = %d]!", reg->num); + return -1; + } + + if (en_dev->phy_port == INVALID_PHY_PORT) { + LOG_ERR("phyport is invalid!"); + return -EOPNOTSUPP; + } + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + msg->payload.hdr_to_agt.op_code = AGENT_MAC_SYNCE_CLK_STATS_GET; + msg->payload.hdr_to_agt.phyport = en_dev->phy_port; + LOG_INFO("phyport = %u\n", msg->payload.hdr_to_agt.phyport); + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_MAC, msg, msg, + ¶); + if (err != 0) { + LOG_ERR("zxdh_en_get_synce_clk_stats failed, err: %d\n", err); + goto free_msg; + } + + reg->num = 1; + reg->data[0] = msg->reps.synce_clk_recovery_port.clk_stats; + err = copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size); + LOG_INFO("num = %u, clk_stats: 0x%x\n", reg->num, reg->data[0]); + +free_msg: + kfree(msg); + return err; +} + +int32_t zxdh_en_set_spm_port_tstamp_enable(struct net_device *netdev, + struct ifreq *ifr, + struct zxdh_en_reg *reg) +{ + uint32_t reg_size = sizeof(struct zxdh_en_reg); + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + int32_t ret = 0; + union zxdh_msg *msg = NULL; + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + + en_priv = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, + "netdev priv is null!\n"); + en_dev = &en_priv->edev; + + if (reg->num != 2) { + LOG_ERR("Transmit failed[len = %d]!", reg->num); + return -1; + } + + if (en_dev->phy_port == INVALID_PHY_PORT) { + LOG_ERR("phyport is invalid!"); + return -EOPNOTSUPP; + } + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + msg->payload.hdr_to_agt.op_code = AGENT_MAC_PORT_TSTAMP_ENABLE_SET; + msg->payload.hdr_to_agt.phyport = en_dev->phy_port; // 0~9 + msg->payload.mac_tstamp_msg.tx_enable = reg->data[0]; + msg->payload.mac_tstamp_msg.rx_enable = reg->data[1]; + LOG_INFO("phyport = %u, tx_enable: %u, rx_enable: %u\n", + msg->payload.hdr_to_agt.phyport, + msg->payload.mac_tstamp_msg.tx_enable, + msg->payload.mac_tstamp_msg.rx_enable); + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_MAC, msg, msg, + ¶); + if (err != 0) { + LOG_ERR("zxdh_en_set_spm_port_tstamp_enable failed, err: %d\n", + err); + goto free_msg; + } + + reg->num = 0; + ret = copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size); + +free_msg: + kfree(msg); + return ret; +} + +int32_t zxdh_en_get_spm_port_tstamp_enable(struct net_device *netdev, + struct ifreq *ifr, + struct zxdh_en_reg *reg) +{ + uint32_t reg_size = sizeof(struct zxdh_en_reg); + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + int32_t ret = 0; + union zxdh_msg *msg = NULL; + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + + en_priv = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, + "netdev priv is null!\n"); + en_dev = &en_priv->edev; + + if (en_dev->phy_port == INVALID_PHY_PORT) { + LOG_ERR("phyport is invalid!"); + return -EOPNOTSUPP; + } + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + msg->payload.hdr_to_agt.op_code = AGENT_MAC_PORT_TSTAMP_ENABLE_GET; + msg->payload.hdr_to_agt.phyport = en_dev->phy_port; // 0~9 + LOG_INFO("phyport = %u\n", msg->payload.hdr_to_agt.phyport); + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_MAC, msg, msg, + ¶); + if (err != 0) { + LOG_ERR("zxdh_en_get_spm_port_tstamp_enable failed, err: %d\n", + err); + goto free_msg; + } + + reg->num = 2; + reg->data[0] = msg->reps.mac_tstamp_msg.tx_enable; + reg->data[1] = msg->reps.mac_tstamp_msg.rx_enable; + LOG_INFO("tx_enable: %u, rx_enable: %u\n", + msg->reps.mac_tstamp_msg.tx_enable, + msg->reps.mac_tstamp_msg.rx_enable); + + ret = copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size); + +free_msg: + kfree(msg); + return ret; +} + +int32_t zxdh_en_set_spm_port_tstamp_mode(struct net_device *netdev, + struct ifreq *ifr, + struct zxdh_en_reg *reg) +{ + uint32_t reg_size = sizeof(struct zxdh_en_reg); + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + int32_t ret = 0; + union zxdh_msg *msg = NULL; + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + + en_priv = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, + "netdev priv is null!\n"); + en_dev = &en_priv->edev; + + if (en_dev->phy_port == INVALID_PHY_PORT) { + LOG_ERR("phyport is invalid!"); + return -EOPNOTSUPP; + } + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + + if (reg->num != 2) { + LOG_ERR("Transmit failed[len = %d]!", reg->num); + goto err_ret; + } + + msg->payload.hdr_to_agt.op_code = AGENT_MAC_PORT_TSTAMP_MODE_SET; + msg->payload.hdr_to_agt.phyport = en_dev->phy_port; // 0~9 + msg->payload.mac_tstamp_msg.tx_mode = reg->data[0]; + msg->payload.mac_tstamp_msg.rx_mode = reg->data[1]; + LOG_INFO("phyport = %u, tx_mode: %u, rx_mode: %u\n", + msg->payload.hdr_to_agt.phyport, + msg->payload.mac_tstamp_msg.tx_mode, + msg->payload.mac_tstamp_msg.rx_mode); + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_MAC, msg, msg, + ¶); + if (err != 0) { + LOG_ERR("zxdh_en_set_spm_port_tstamp_mode failed, err: %d\n", + err); + kfree(msg); + return err; + } + + reg->num = 0; + if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) { + goto err_ret; + } + kfree(msg); + return ret; + +err_ret: + kfree(msg); + return -1; +} + +int32_t zxdh_en_get_spm_port_tstamp_mode(struct net_device *netdev, + struct ifreq *ifr, + struct zxdh_en_reg *reg) +{ + uint32_t reg_size = sizeof(struct zxdh_en_reg); + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + union zxdh_msg *msg = NULL; + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + + en_priv = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, + "netdev priv is null!\n"); + en_dev = &en_priv->edev; + + if (en_dev->phy_port == INVALID_PHY_PORT) { + LOG_ERR("phyport is invalid!"); + return -EOPNOTSUPP; + } + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + + msg->payload.hdr_to_agt.op_code = AGENT_MAC_PORT_TSTAMP_MODE_GET; + msg->payload.hdr_to_agt.phyport = en_dev->phy_port; // 0~9 + LOG_INFO("phyport = %u\n", msg->payload.hdr_to_agt.phyport); + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_MAC, msg, msg, + ¶); + if (err != 0) { + LOG_ERR("zxdh_en_get_spm_port_tstamp_mode failed, err: %d\n", + err); + kfree(msg); + return err; + } + + reg->num = 2; + reg->data[0] = msg->reps.mac_tstamp_msg.tx_mode; + reg->data[1] = msg->reps.mac_tstamp_msg.rx_mode; + LOG_INFO("tx_mode: %u, rx_mode: %u\n", msg->reps.mac_tstamp_msg.tx_mode, + msg->reps.mac_tstamp_msg.rx_mode); + + if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) { + goto err_ret; + } + + kfree(msg); + return 0; + +err_ret: + kfree(msg); + return -1; +} + +/* 配置时延测量功能是否打开, 维测功能 */ +int32_t zxdh_en_set_delay_statistics_enable(struct net_device *netdev, + struct ifreq *ifr, + struct zxdh_en_reg *reg) +{ + uint32_t reg_size = sizeof(struct zxdh_en_reg); + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + int32_t ret = 0; + + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + + en_priv = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, + "netdev priv is null!\n"); + en_dev = &en_priv->edev; + + if (reg->num != 1) { + LOG_ERR("Transmit failed[len = %d]!", reg->num); + goto err_ret; + } + + en_dev->delay_statistics_enable = reg->data[0]; + LOG_INFO("en_dev->delay_statistics_enable = %u\n", + en_dev->delay_statistics_enable); + + reg->num = 0; + if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) { + goto err_ret; + } + + return ret; + +err_ret: + return -1; +} + +int32_t zxdh_en_get_delay_statistics_value(struct net_device *netdev, + struct ifreq *ifr, + struct zxdh_en_reg *reg) +{ + uint32_t reg_size = sizeof(struct zxdh_en_reg); + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + int32_t ret = 0; + union zxdh_msg *msg = NULL; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + + en_priv = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, + "netdev priv is null!\n"); + en_dev = &en_priv->edev; + + if (en_dev->phy_port == INVALID_PHY_PORT) { + LOG_ERR("phyport is invalid!"); + return -EOPNOTSUPP; + } + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + + msg->payload.hdr_to_agt.op_code = AGENT_MAC_PORT_DELAY_VALUE_GET; + msg->payload.hdr_to_agt.phyport = en_dev->phy_port; // 0~9 + LOG_INFO("phyport = %u\n", msg->payload.hdr_to_agt.phyport); + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_MAC, msg, msg, + ¶); + if (ret != 0) { + LOG_ERR("zxdh_en_get_delay_statistics_value failed, ret: %d\n", + ret); + kfree(msg); + return ret; + } + + reg->num = 4; + reg->data[0] = (uint32_t)(msg->reps.delay_statistics_val.min_delay & + 0xffffffff); + reg->data[1] = + (uint32_t)((msg->reps.delay_statistics_val.min_delay >> 32) & + 0xffffffff); + reg->data[2] = (uint32_t)(msg->reps.delay_statistics_val.max_delay & + 0xffffffff); + reg->data[3] = + (uint32_t)((msg->reps.delay_statistics_val.max_delay >> 32) & + 0xffffffff); + LOG_INFO("delay val: min_delay: %llu, max_delay: %llu\n", + msg->reps.delay_statistics_val.min_delay, + msg->reps.delay_statistics_val.max_delay); + if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) { + goto err_ret; + } + kfree(msg); + return 0; + +err_ret: + kfree(msg); + return -1; +} + +int32_t zxdh_en_clear_delay_statistics_value(struct net_device *netdev, + struct ifreq *ifr, + struct zxdh_en_reg *reg) +{ + uint32_t reg_size = sizeof(struct zxdh_en_reg); + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + int32_t ret = 0; + union zxdh_msg *msg = NULL; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + + en_priv = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, + "netdev priv is null!\n"); + en_dev = &en_priv->edev; + + if (en_dev->phy_port == INVALID_PHY_PORT) { + LOG_ERR("phyport is invalid!"); + return -EOPNOTSUPP; + } + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + + msg->payload.hdr_to_agt.op_code = AGENT_MAC_PORT_DELAY_VALUE_CLR; + msg->payload.hdr_to_agt.phyport = en_dev->phy_port; // 0~9 + LOG_INFO("phyport = %u\n", msg->payload.hdr_to_agt.phyport); + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_MAC, msg, msg, + ¶); + if (ret != 0) { + LOG_ERR("zxdh_en_clear_delay_statistics_value failed, ret: %d\n", + ret); + kfree(msg); + return ret; + } + + reg->num = 0; + if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) { + kfree(msg); + goto err_ret; + } + + kfree(msg); + return 0; + +err_ret: + return -1; +} + +int32_t zxdh_en_set_local_pps_interrupt_enable(struct net_device *netdev, + struct ifreq *ifr, + struct zxdh_en_reg *reg) +{ +#ifdef PTP_DRIVER_INTERFACE_EN + uint32_t enable; + uint32_t support; + uint32_t reg_size = sizeof(struct zxdh_en_reg); +#endif + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + int32_t ret = 0; + + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + + en_priv = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, + "netdev priv is null!\n"); + en_dev = &en_priv->edev; + + if (reg->num != 1) { + LOG_ERR("Transmit failed[len = %d]!", reg->num); + goto err_ret; + } + +#ifdef PTP_DRIVER_INTERFACE_EN + ret = zxdh_get_pps_interrupt_support(en_dev, &support); + CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, + "get pps interrupt support failed!!\n"); + enable = reg->data[0]; + LOG_INFO("enable = %u\n", enable); + // not support + if (support != 1) { + reg->num = 1; + reg->data[0] = 1; // notify user not support pps interrupt + if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, + reg_size))) { + LOG_ERR("copy_to_user failed!!!\n"); + goto err_ret; + } + goto err_ret; + } + + ret = zxdh_set_local_pps_interrupt_enable(en_dev, enable); + CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, + "set local pps interrupt failed!!\n"); + + reg->num = 1; + reg->data[0] = 0; // notify user support pps interrupt + if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) { + LOG_ERR("copy_to_user failed!!!\n"); + goto err_ret; + } +#endif /* PTP_DRIVER_INTERFACE_EN */ + return ret; + +err_ret: + return -1; +} + +int32_t zxdh_en_set_ext_pps_interrupt_enable(struct net_device *netdev, + struct ifreq *ifr, + struct zxdh_en_reg *reg) +{ +#ifdef PTP_DRIVER_INTERFACE_EN + uint32_t pps_type; + uint32_t enable; + uint32_t support; + uint32_t reg_size = sizeof(struct zxdh_en_reg); +#endif + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + int32_t ret = 0; + + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + + en_priv = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, + "netdev priv is null!\n"); + en_dev = &en_priv->edev; + + if (reg->num != 2) { + LOG_ERR("Transmit failed[len = %d]!", reg->num); + goto err_ret; + } + +#ifdef PTP_DRIVER_INTERFACE_EN + ret = zxdh_get_pps_interrupt_support(en_dev, &support); + CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, + "get pps interrupt support failed!!\n"); + pps_type = reg->data[0]; + enable = reg->data[1]; + LOG_INFO("pps_type = %u, enable = %u\n", pps_type, enable); + // not support + if (support != 1) { + reg->num = 1; + reg->data[0] = 1; // notify user not support pps interrupt + if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, + reg_size))) { + LOG_ERR("copy_to_user failed!!!\n"); + goto err_ret; + } + goto err_ret; + } + + ret = zxdh_set_ext_pps_interrupt_enable(en_dev, pps_type, enable); + CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, + "set ext pps interrupt enable failed!!\n"); + + reg->num = 1; + reg->data[0] = 0; // notify user support pps interrupt + if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) { + LOG_ERR("copy_to_user failed!!!\n"); + goto err_ret; + } +#endif /* PTP_DRIVER_INTERFACE_EN */ + return ret; + +err_ret: + return -1; +} + +int32_t zxdh_en_set_pd_sel_shift(struct net_device *netdev, struct ifreq *ifr, + struct zxdh_en_reg *reg) +{ + uint32_t pd_index; + uint32_t pd_sel; + uint32_t shift; + uint32_t reg_size = sizeof(struct zxdh_en_reg); + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + int32_t ret = 0; + + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + + en_priv = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, + "netdev priv is null!\n"); + en_dev = &en_priv->edev; + + if (reg->num != 3) { + LOG_ERR("Transmit failed[len = %d]!", reg->num); + goto err_ret; + } + + pd_index = reg->data[0]; + pd_sel = reg->data[1]; + shift = reg->data[2]; + LOG_INFO("pd_index = %u, pd_sel = %u, shift = %u\n", pd_index, pd_sel, + shift); + +#ifdef PTP_DRIVER_INTERFACE_EN + ret = zxdh_set_pd_sel_shift(en_dev, pd_index, pd_sel, shift); + CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, "set pd sel shift failed!!\n"); +#endif /* PTP_DRIVER_INTERFACE_EN */ + reg->num = 1; + reg->data[0] = 0; // notify user support pps interrupt + if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) { + LOG_ERR("copy_to_user failed!!!\n"); + goto err_ret; + } + + return ret; + +err_ret: + return -1; +} + +int32_t zxdh_en_get_ptp_clock_index(struct net_device *netdev, + struct ifreq *ifr, struct zxdh_en_reg *reg) +{ + uint32_t ptp_clock_index; + uint32_t reg_size = sizeof(struct zxdh_en_reg); + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + int32_t ret = 0; + + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + + en_priv = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, + "netdev priv is null!\n"); + en_dev = &en_priv->edev; + +#ifdef PTP_DRIVER_INTERFACE_EN + ret = zxdh_get_ptp_clock_index(en_dev, &ptp_clock_index); + CHECK_UNEQUAL_ERR(ret, 0, -EFAULT, "get ptp clock index failed!!\n"); +#endif /* PTP_DRIVER_INTERFACE_EN */ + + reg->num = 1; + reg->data[0] = ptp_clock_index; + if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) { + LOG_ERR("copy_to_user failed!!!\n"); + goto err_ret; + } + + return ret; + +err_ret: + return -1; +} + +struct zxdh_en_ptp_ioctl_table ioctl_ptp_table[] = { + { PTP_SET_CLOCK_NO, zxdh_en_set_clock_no }, + { PTP_ENABLE_PTP_ENCRYPTED_MSG, zxdh_en_enable_ptp_encrypted_msg }, + { PTP_SET_INTR_CAPTURE_TIMER, zxdh_en_set_intr_capture_timer }, + { PTP_SET_PP1S_SELECTION, zxdh_en_set_pps_selection }, + { PTP_SET_PHASE_DETECTION, zxdh_en_set_phase_detection }, + { PTP_GET_PD_VALUE, zxdh_en_get_pd_value }, + { PTP_SET_L2PTP_PORT, zxdh_en_set_l2_ptp_port }, + { PTP_SET_PTP_EC_ENABLE, zxdh_en_set_ptp_tc_enable }, + { PTP_SET_SYNCE_CLK_PORT, zxdh_en_set_synce_recovery_port }, + { PTP_GET_SYNCE_CLK_STATS, zxdh_en_get_synce_clk_stats }, + { PTP_SET_SPM_PORT_TSTAMP_ENABLE, zxdh_en_set_spm_port_tstamp_enable }, + { PTP_GET_SPM_PORT_TSTAMP_ENABLE, zxdh_en_get_spm_port_tstamp_enable }, + { PTP_SET_SPM_PORT_TSTAMP_MODE, zxdh_en_set_spm_port_tstamp_mode }, + { PTP_GET_SPM_PORT_TSTAMP_MODE, zxdh_en_get_spm_port_tstamp_mode }, + { PTP_SET_DELAY_STATISTICS_ENABLE, + zxdh_en_set_delay_statistics_enable }, + { PTP_GET_DELAY_STATISTICS_VALUE, zxdh_en_get_delay_statistics_value }, + { PTP_CLR_DELAY_STATISTICS_VALUE, + zxdh_en_clear_delay_statistics_value }, + { PTP_SET_LOCAL_PPS_INTERRUPT_ENABLE, + zxdh_en_set_local_pps_interrupt_enable }, + { PTP_SET_EXT_PPS_INTERRUPT_ENABLE, + zxdh_en_set_ext_pps_interrupt_enable }, + { PTP_SET_PD_SEL_SHIFT, zxdh_en_set_pd_sel_shift }, + { PTP_GET_PTP_CLOCK_INDEX, zxdh_en_get_ptp_clock_index } +}; + +int32_t ptp_table_match_func(struct net_device *netdev, struct ifreq *ifr, + struct zxdh_en_reg *reg) +{ + uint32_t i = 0; + uint32_t ret = 0; + uint32_t table_size = + sizeof(ioctl_ptp_table) / sizeof(struct zxdh_en_ioctl_table); + for (i = 0; i < table_size; i++) { + if ((reg->offset == ioctl_ptp_table[i].cmd) && + (ioctl_ptp_table[i].func != NULL)) { + ret = ioctl_ptp_table[i].func(netdev, ifr, reg); + break; + } + } + return ret; +} + +int32_t zxdh_en_ptp_func(struct net_device *netdev, struct ifreq *ifr) +{ + struct zxdh_en_reg *reg = NULL; + uint32_t reg_size = sizeof(struct zxdh_en_reg); + + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + + reg = kzalloc(reg_size, GFP_KERNEL); + CHECK_EQUAL_ERR(reg, NULL, -EADDRNOTAVAIL, "reg is null!\n"); + + if (unlikely(copy_from_user(reg, ifr->ifr_ifru.ifru_data, reg_size))) { + LOG_ERR("copy_from_user failed!\n"); + goto err_ret; + } + + if (-1 == ptp_table_match_func(netdev, ifr, reg)) { + LOG_ERR("ptp_table_match_func failed!\n"); + goto err_ret; + } + + kfree(reg); + return 0; + +err_ret: + kfree(reg); + return -1; +} + +int32_t zxdh_en_pps_func(struct net_device *netdev, struct ifreq *ifr) +{ + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + struct dh_core_dev *dh_dev = NULL; + struct zxdh_pf_device *pf_dev = NULL; + struct dh_eq_table *table = NULL; + struct dh_pf_eq_table *table_priv = NULL; + uint64_t virtaddr = 0x0; + struct dh_irq *expps = NULL; + struct dh_irq *lopps = NULL; + union zxdh_msg *msg = NULL; + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + +#ifdef PTP_DRIVER_INTERFACE_EN + int32_t ret = 0; +#endif /* PTP_DRIVER_INTERFACE_EN */ + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + + en_priv = netdev_priv(netdev); + en_dev = &en_priv->edev; + dh_dev = en_dev->parent->parent; + pf_dev = dh_core_priv(dh_dev); + + table = &dh_dev->eq_table; + table_priv = table->priv; + + LOG_ERR("pf_dev->pci_ioremap_addr[0]: 0x%llx\n", + pf_dev->pci_ioremap_addr[0]); + + virtaddr = pf_dev->pci_ioremap_addr[0] + ZXDH_BAR_MSG_OFFSET; +#ifdef PTP_DRIVER_INTERFACE_EN + tod_device_set_bar_virtual_addr(virtaddr, pf_dev->pcie_id); +#endif + + expps = table_priv->async_irq_tbl[3]; + lopps = table_priv->async_irq_tbl[4]; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + + msg->payload.msg_pps.pcieid = pf_dev->pcie_id; + msg->payload.msg_pps.extern_pps_vector = expps->index; + msg->payload.msg_pps.local_pps_vector = lopps->index; + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_PPS, msg, msg, + ¶); + if (err != 0) { + LOG_ERR("zxdh_en_pps_func failed, err: %d\n", err); + goto free_msg; + } + +#ifdef PTP_DRIVER_INTERFACE_EN + ret = zxdh_set_pps_interrupt_support( + en_dev, msg->reps.msg_pps.pps_intr_support); + if (unlikely(ret != 0)) { + LOG_ERR("set pps interrupt support failed!!\n"); + err = -EFAULT; + goto free_msg; + } +#endif /* PTP_DRIVER_INTERFACE_EN */ + +free_msg: + kfree(msg); + + return err; +} + +#ifdef ZXDH_MSGQ +int32_t zxdh_msgq_msg_send(struct net_device *netdev, struct ifreq *ifr) +{ + struct zxdh_en_device *en_dev = NULL; + struct msgq_dev *msgq_dev = NULL; + struct zxdh_en_reg *reg = NULL; + struct msgq_pkt_info pkt_info = { 0 }; + uint32_t size = sizeof(struct zxdh_en_reg); + struct reps_info reps = { 0 }; + uint32_t loop_cnt = 0; + uint32_t i = 0; + int32_t err = -2; + uint64_t start_us = 0; + uint64_t end_us = 0; + + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + en_dev = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_dev, NULL, -EADDRNOTAVAIL, "en_dev is null!\n"); + msgq_dev = (struct msgq_dev *)en_dev->msgq_dev; + CHECK_EQUAL_ERR(msgq_dev, NULL, -EADDRNOTAVAIL, "msgq_dev is null!\n"); + + reg = kzalloc(size, GFP_KERNEL); + CHECK_EQUAL_ERR(reg, NULL, -EADDRNOTAVAIL, "reg is null!\n"); + + if (unlikely(copy_from_user(reg, ifr->ifr_ifru.ifru_data, size))) { + LOG_ERR("copy_from_user failed!\n"); + goto err_ret; + } + + pkt_info.event_id = MODULE_DEMO; + pkt_info.timeout_us = 500000; + pkt_info.len = reg->data[0] + PRIV_HEADER_LEN; + pkt_info.no_reps = (reg->data[1] == 0) ? false : true; + loop_cnt = reg->data[2]; + if (loop_cnt == 0 || pkt_info.len > MSGQ_MAX_ADDR_LEN) { + goto err_ret; + } + + if (loop_cnt > 100000000) { + loop_cnt = 100000000; + } + + reps.len = 14000; + reps.addr = vmalloc(reps.len); + if (reps.addr == NULL) { + LOG_ERR("vmalloc failed!\n"); + goto err_ret; + } + + LOG_DEBUG("len: %d, no_reps: %d, loop_cnt: %d\n", pkt_info.len, + pkt_info.no_reps, loop_cnt); + + start_us = jiffies_to_usecs(jiffies); + for (i = 0; i < loop_cnt; ++i) { + pkt_info.addr = kzalloc(pkt_info.len, GFP_KERNEL); + if (pkt_info.addr == NULL) { + err = -3; + break; + }; + err = zxdh_msgq_send_cmd(msgq_dev, &pkt_info, &reps); + } + + end_us = jiffies_to_usecs(jiffies); + if (i != 0) { + LOG_DEBUG("exec_time: %lld us, single_time: %lld us\n", + end_us - start_us, (end_us - start_us) / i); + } + + reg->num = -err; + if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, size))) { + LOG_ERR("copy_to_user failed!\n"); + } + + if (pkt_info.is_async && !pkt_info.no_reps) { + usleep_range(pkt_info.timeout_us, pkt_info.timeout_us + 100); + } + + vfree(reps.addr); +err_ret: + kfree(reg); + return err; +} + +int32_t zxdh_msgq_dev_config(struct net_device *netdev, struct ifreq *ifr) +{ + uint32_t reg_size = sizeof(struct zxdh_en_reg); + struct zxdh_en_device *en_dev = NULL; + struct msgq_dev *msgq_dev = NULL; + struct zxdh_en_reg *reg = NULL; + + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + en_dev = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_dev, NULL, -EADDRNOTAVAIL, "en_dev is null!\n"); + msgq_dev = (struct msgq_dev *)en_dev->msgq_dev; + CHECK_EQUAL_ERR(msgq_dev, NULL, -EADDRNOTAVAIL, "msgq_dev is null!\n"); + + reg = kzalloc(reg_size, GFP_KERNEL); + CHECK_EQUAL_ERR(reg, NULL, -EADDRNOTAVAIL, "reg is null!\n"); + + if (unlikely(copy_from_user(reg, ifr->ifr_ifru.ifru_data, reg_size))) { + LOG_ERR("copy_from_user failed!\n"); + goto err_ret; + } + + if (reg->data[1] == MSGQ_PRINT_STA) { + LOG_DEBUG("msgq_rx_pkts: %lld\n", + msgq_dev->rq_priv->stats.packets); + LOG_DEBUG("msgq_rx_kicks: %lld\n", + msgq_dev->rq_priv->stats.kicks); + LOG_DEBUG("msgq_rx_bytes: %lld\n", + msgq_dev->rq_priv->stats.bytes); + LOG_DEBUG("msgq_rx_drops: %lld\n", + msgq_dev->rq_priv->stats.drops); + LOG_DEBUG("msgq_rx_errs: %lld\n", + msgq_dev->rq_priv->stats.xdp_drops); + + LOG_DEBUG("msgq_tx_pkts: %lld\n", + msgq_dev->sq_priv->stats.packets); + LOG_DEBUG("msgq_tx_bytes: %lld\n", + msgq_dev->sq_priv->stats.bytes); + LOG_DEBUG("msgq_tx_kicks: %lld\n", + msgq_dev->sq_priv->stats.kicks); + LOG_DEBUG("msgq_tx_timeouts: %lld\n", + msgq_dev->sq_priv->stats.tx_timeouts); + LOG_DEBUG("msgq_tx_errs: %lld\n", + msgq_dev->sq_priv->stats.xdp_tx_drops); + + kfree(reg); + return 0; + } + + msgq_dev->loopback = (reg->data[0] != 0 ? true : false); + msgq_dev->print_flag = reg->data[1]; + LOG_INFO("msgq_dev->print_flag = %d\n", msgq_dev->print_flag); + LOG_INFO("msgq_dev->loopback = %d\n", msgq_dev->loopback); + + if (unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, reg, reg_size))) { + LOG_ERR("copy_to_user failed!\n"); + goto err_ret; + } + + kfree(reg); + return 0; + +err_ret: + kfree(reg); + return -1; +} +#endif + +struct zxdh_en_ioctl_table ioctl_table[] = { + { SIOCGMIIREG, zxdh_read_reg_cmd }, + { SIOCSMIIREG, zxdh_write_reg_cmd }, + { SIOCDEVPRIVATE_VQ_INFO, zxdh_get_vring_info }, + { SIOCDEVPRIVATE_SEND_FILE_PKT, zxdh_send_file_pkt }, +#ifdef ZXDH_MSGQ + { SIOCDEVPRIVATE_MSGQ_SNED, zxdh_msgq_msg_send }, + { SIOCDEVPRIVATE_MSGQ_CONFIG, zxdh_msgq_dev_config }, +#endif + { SIOCDEVPRIVATE_PTP_FUNC, zxdh_en_ptp_func }, + { SIOCDEVPRIVATE_PPS_FUNC, zxdh_en_pps_func }, +#ifdef CONFIG_DINGHAI_TSN + { SIOCDEVPRIVATE_TSN_FUNC, zxdh_en_tsn_func }, +#endif + { SIOCDEVPRIVATE_DH_TOOLS, zxdh_tools_ioctl_dispatcher }, +}; + +int32_t ioctl_table_match_func(struct net_device *netdev, struct ifreq *ifr, + int32_t cmd, + struct zxdh_en_ioctl_table *func_table, + uint32_t table_size) +{ + int32_t ret = 0; + uint32_t i = 0; + + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + for (i = 0; i < table_size; i++) { + if ((func_table[i].cmd == cmd) && + (func_table[i].func != NULL)) { + ret = func_table[i].func(netdev, ifr); + break; + } + } + + return ret; +} + +int32_t zxdh_en_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + uint32_t table_size = + sizeof(ioctl_table) / sizeof(struct zxdh_en_ioctl_table); + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + return ioctl_table_match_func(netdev, ifr, cmd, ioctl_table, + table_size); +} + +int32_t zxdh_en_private_ioctl(struct net_device *netdev, struct ifreq *ifr, + void *data, int cmd) +{ + uint32_t table_size = + sizeof(ioctl_table) / sizeof(struct zxdh_en_ioctl_table); + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + return ioctl_table_match_func(netdev, ifr, cmd, ioctl_table, + table_size); +} \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_aux/en_aux_ioctl.h b/drivers/net/ethernet/dinghai/en_aux/en_aux_ioctl.h new file mode 100644 index 000000000000..f439994ce8a4 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_aux/en_aux_ioctl.h @@ -0,0 +1,102 @@ +#ifndef _EN_AUX_IOCTL_H_ +#define _EN_AUX_IOCTL_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "../en_aux.h" + +#define SIOCDEVPRIVATE_WRITE_MAC (SIOCDEVPRIVATE + 1) +#define SIOCDEVPRIVATE_VQ_INFO (SIOCDEVPRIVATE + 2) +#define SIOCDEVPRIVATE_MSGQ_SNED (SIOCDEVPRIVATE + 3) +#define SIOCDEVPRIVATE_MSGQ_CONFIG (SIOCDEVPRIVATE + 4) +#define SIOCDEVPRIVATE_SEND_FILE_PKT (SIOCDEVPRIVATE + 6) +#define SIOCDEVPRIVATE_PTP_FUNC (SIOCDEVPRIVATE + 9) +#define SIOCDEVPRIVATE_PPS_FUNC (SIOCDEVPRIVATE + 10) +#define SIOCDEVPRIVATE_TSN_FUNC (SIOCDEVPRIVATE + 11) +#define SIOCDEVPRIVATE_DH_TOOLS (SIOCDEVPRIVATE + 13) + +#define PTP_SET_CLOCK_NO (0) +#define PTP_ENABLE_PTP_ENCRYPTED_MSG (1) +#define PTP_SET_INTR_CAPTURE_TIMER (2) +#define PTP_SET_PP1S_SELECTION (3) +#define PTP_SET_PHASE_DETECTION (4) +#define PTP_GET_PD_VALUE (5) +#define PTP_SET_L2PTP_PORT (6) +#define PTP_SET_PTP_EC_ENABLE (7) +#define PTP_SET_SYNCE_CLK_PORT (8) +#define PTP_GET_SYNCE_CLK_STATS (9) +#define PTP_SET_SPM_PORT_TSTAMP_ENABLE (10) +#define PTP_GET_SPM_PORT_TSTAMP_ENABLE (11) +#define PTP_SET_SPM_PORT_TSTAMP_MODE (12) +#define PTP_GET_SPM_PORT_TSTAMP_MODE (13) +#define PTP_SET_DELAY_STATISTICS_ENABLE (14) +#define PTP_GET_DELAY_STATISTICS_VALUE (15) +#define PTP_CLR_DELAY_STATISTICS_VALUE (16) +#define PTP_SET_LOCAL_PPS_INTERRUPT_ENABLE (17) +#define PTP_SET_EXT_PPS_INTERRUPT_ENABLE (18) +#define PTP_SET_PD_SEL_SHIFT (19) +#define PTP_GET_PTP_CLOCK_INDEX (20) + +#define PI_HDR_MAX_NUM 128 +#define GET_LOW32 0x00000000ffffffff +#define MIN_ALIGN_BYTE 64 +#define SEND_PKT_CNT_MAX 0xffffffff +#define PKT_PRINT_LINE_LEN 16 +#define PKT_PRINT_LEN_MAX (16 * 1024) + +#define CONFIG_RISC_PCS_LOOPB_OPCODE 13 +#define CONFIG_RISC_PCS_NORMAL_OPCODE 14 + +#define MSG_MODULE_DEBUG_RISC 20 + +#define MAX_ACCESS_NUM 500 +struct zxdh_en_reg { + uint32_t offset; + uint32_t num; + uint32_t data[MAX_ACCESS_NUM]; +}; + +struct risc_config_mac_msg { + uint8_t op_code; + uint8_t phyport; + uint8_t spm_speed; + uint8_t spm_fec; + uint8_t loop_enable; +}; + +struct risc_config_userspace { + uint8_t op_code; + uint8_t arg_num; + uint8_t filestr_size; + uint8_t file[100]; +}; + +struct data_packet { + void *buf; + uint32_t buf_size; +}; + +struct zxdh_en_ioctl_table { + int32_t cmd; + int32_t (*func)(struct net_device *netdev, struct ifreq *ifr); +}; + +struct zxdh_en_ptp_ioctl_table { + int32_t cmd; + int32_t (*func)(struct net_device *netdev, struct ifreq *ifr, + struct zxdh_en_reg *reg); +}; + +int32_t print_data(uint8_t *data, uint32_t len); +int32_t zxdh_en_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); + +int32_t zxdh_en_private_ioctl(struct net_device *netdev, struct ifreq *ifr, + void *data, int cmd); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_aux/priv_queue.c b/drivers/net/ethernet/dinghai/en_aux/priv_queue.c new file mode 100644 index 000000000000..5a8f58b93bad --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_aux/priv_queue.c @@ -0,0 +1,788 @@ + +#include +#include +#include +#include +#include "priv_queue.h" + +static void poll_timer_callback(struct timer_list *this_timer) +{ + struct msgq_dev *msgq_dev = + from_timer(msgq_dev, this_timer, poll_timer); + struct msg_buff *this_msg_buff = NULL; + uint16_t i = 0; + uint32_t tx_timeouts = 0; + + if (msgq_dev == NULL) { + LOG_ERR("msgq_dev is NULL\n"); + return; + } + + for (i = 0; i < MSGQ_MAX_MSG_BUFF_NUM; ++i) { + if (msgq_dev->free_cnt == 0) { + msgq_dev->timer_in_use = false; + return; + } + this_msg_buff = &msgq_dev->msg_buff_ring[i]; + + if (!this_msg_buff->using || !this_msg_buff->need_free) { + continue; + } + + if (this_msg_buff->timeout_cnt == 0) { + *(this_msg_buff->data_len) = 0; + this_msg_buff->data = NULL; + msgq_dev->free_cnt--; + tx_timeouts++; + LOG_ERR("msg[%d] get callback out of time\n", i); + this_msg_buff->using = false; + continue; + } + this_msg_buff->timeout_cnt--; + } + + u64_stats_update_begin(&msgq_dev->sq_priv->stats.syncp); + msgq_dev->sq_priv->stats.tx_timeouts += tx_timeouts; + u64_stats_update_end(&msgq_dev->sq_priv->stats.syncp); + + mod_timer(this_timer, jiffies + msecs_to_jiffies(TIMER_DELAY_US)); +} + +static uint32_t msgq_get_mergeable_buf_len(struct receive_queue *rq, + struct ewma_pkt_len *avg_pkt_len) +{ + const size_t hdr_len = PRIV_HEADER_LEN; + uint32_t len = 0; + + len = hdr_len + clamp_t(uint32_t, ewma_pkt_len_read(avg_pkt_len), + rq->min_buf_len, PAGE_SIZE - hdr_len); + + return ALIGN(len, L1_CACHE_BYTES); +} + +static int32_t msgq_add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) +{ + struct page_frag *alloc_frag = &rq->alloc_frag; + char *buf = NULL; + void *ctx = NULL; + int32_t err = 0; + uint32_t len = 0; + uint32_t hole = 0; + + len = msgq_get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len); + if (unlikely(!dh_skb_page_frag_refill(len, alloc_frag, gfp))) { + return -ENOMEM; + } + + buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; + get_page(alloc_frag->page); + alloc_frag->offset += len; + hole = alloc_frag->size - alloc_frag->offset; + if (hole < len) { + len += hole; + alloc_frag->offset += hole; + } + + sg_init_one(rq->sg, buf, len); + ctx = (void *)(unsigned long)len; + err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); + if (err < 0) { + put_page(virt_to_head_page(buf)); + } + + return err; +} + +static bool msgq_try_fill_recv(struct receive_queue *rq, gfp_t gfp) +{ + int32_t err = 0; + bool oom = 0; + unsigned long flags = 0; + + do { + err = msgq_add_recvbuf_mergeable(rq, gfp); + oom = err == -ENOMEM; + if (err) { + break; + } + } while (rq->vq->num_free); + + if (virtqueue_kick_prepare_packed(rq->vq) && virtqueue_notify(rq->vq)) { + flags = u64_stats_update_begin_irqsave(&rq->stats.syncp); + rq->stats.kicks++; + u64_stats_update_end_irqrestore(&rq->stats.syncp, flags); + } + + return !oom; +} + +uint32_t msgq_mergeable_min_buf_len(struct virtqueue *vq) +{ + const uint32_t hdr_len = PRIV_HEADER_LEN; + uint32_t rq_size = virtqueue_get_vring_size(vq); + uint32_t min_buf_len = DIV_ROUND_UP(BUFF_LEN, rq_size); + + return max(max(min_buf_len, hdr_len) - hdr_len, + (uint32_t)GOOD_PACKET_LEN); +} + +int32_t msgq_privq_init(struct msgq_dev *msgq_dev, struct net_device *netdev) +{ + struct receive_queue *rq = msgq_dev->rq_priv; + struct send_queue *sq = msgq_dev->sq_priv; + + rq->pages = NULL; + rq->min_buf_len = msgq_mergeable_min_buf_len(rq->vq); + + netif_napi_add(netdev, &rq->napi, zxdh_msgq_poll); + netif_napi_add_tx_weight(netdev, &sq->napi, NULL, NAPI_POLL_WEIGHT); + sg_init_table(rq->sg, ARRAY_SIZE(rq->sg)); + ewma_pkt_len_init(&rq->mrg_avg_pkt_len); + sg_init_table(sq->sg, ARRAY_SIZE(sq->sg)); + + u64_stats_init(&rq->stats.syncp); + u64_stats_init(&sq->stats.syncp); + + if (!msgq_try_fill_recv(rq, GFP_KERNEL)) { + LOG_ERR("msgq_try_fill_recv failed\n"); + netif_napi_del(&msgq_dev->rq_priv->napi); + netif_napi_del(&msgq_dev->sq_priv->napi); + return MSGQ_RET_ERR_CHANNEL_NOT_READY; + } + + msgq_dev->msgq_enable = true; + virtnet_napi_enable(rq->vq, &rq->napi); + LOG_DEBUG("success\n"); + return MSGQ_RET_OK; +} + +int32_t zxdh_msgq_init(struct zxdh_en_device *en_dev) +{ + struct msgq_dev *msgq_dev = NULL; + int32_t idx = 0; + int32_t err = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + en_dev->msgq_dev = kzalloc(sizeof(struct msgq_dev), GFP_KERNEL); + ZXDH_CHECK_PTR_RETURN(en_dev->msgq_dev); + + idx = en_dev->max_queue_pairs - ZXDH_PQ_PAIRS_NUM; + msgq_dev = (struct msgq_dev *)en_dev->msgq_dev; + msgq_dev->sq_priv = &en_dev->sq[idx]; + msgq_dev->rq_priv = &en_dev->rq[idx]; + msgq_dev->msgq_vfid = (uint16_t)VQM_VFID(en_dev->vport); + msgq_dev->msgq_rqid = (uint16_t)msgq_dev->rq_priv->vq->phy_index; + + dpp_vport_create_by_vqm_vfid(&pf_info, RISCV_COMMON_VFID); + spin_lock_init(&msgq_dev->sn_lock); + spin_lock_init(&msgq_dev->tx_lock); + msgq_dev->mlock = kzalloc(sizeof(struct mutex), GFP_KERNEL); + ZXDH_CHECK_PTR_GOTO_ERR(msgq_dev->mlock, err_mutex); + mutex_init(msgq_dev->mlock); + timer_setup(&msgq_dev->poll_timer, poll_timer_callback, 0); + + err = msgq_privq_init(msgq_dev, en_dev->netdev); + ZXDH_CHECK_RET_GOTO_ERR(err, free_msgq, "msgq_privq_init failed: %d\n", + err); + return 0; + +free_msgq: + del_timer_sync(&msgq_dev->poll_timer); + mutex_destroy(msgq_dev->mlock); + ZXDH_FREE_PTR(msgq_dev->mlock); +err_mutex: + ZXDH_FREE_PTR(msgq_dev); + return err; +} + +void msgq_privq_uninit(struct msgq_dev *msgq_dev) +{ + msgq_dev->msgq_enable = false; + napi_disable(&msgq_dev->rq_priv->napi); + netif_napi_del(&msgq_dev->rq_priv->napi); + netif_napi_del(&msgq_dev->sq_priv->napi); + del_timer_sync(&msgq_dev->poll_timer); +} + +void zxdh_msgq_exit(struct zxdh_en_device *en_dev) +{ + struct msgq_dev *msgq_dev = (struct msgq_dev *)en_dev->msgq_dev; + + if (msgq_dev == NULL) { + LOG_ERR("msgq_dev is null!\n"); + return; + } + + msgq_privq_uninit(msgq_dev); + mutex_destroy(msgq_dev->mlock); + ZXDH_FREE_PTR(msgq_dev->mlock); + ZXDH_FREE_PTR(msgq_dev); + LOG_INFO("zxdh_msg_chan_pkt remove success\n"); +} + +void msgq_print_data(uint8_t *buf, uint32_t len, uint8_t flag) +{ + uint32_t print_len = 0; + + if (flag == MSGQ_PRINT_HDR) { + print_len = PRIV_HEADER_LEN; + } else if (flag == MSGQ_PRINT_128B) { + print_len = len > 128 ? 128 : len; + } else if (flag == MSGQ_PRINT_ALL) { + print_len = len; + } + print_data(buf, print_len); +} + +static int32_t zxdh_msg_para_check(struct msgq_pkt_info *msg, + struct reps_info *reps) +{ + ZXDH_CHECK_PTR_RETURN(msg); + ZXDH_CHECK_PTR_RETURN(msg->addr); + + if ((msg->len == 0) || (msg->len > MSGQ_MAX_ADDR_LEN)) { + LOG_ERR("invalid data_len: %d\n", msg->len); + goto free_addr; + } + + if (msg->event_id >= MSG_MODULE_NUM) { + LOG_ERR("invalid event_id\n"); + goto free_addr; + } + + if (msg->no_reps) { + return MSGQ_RET_OK; + } + + ZXDH_CHECK_PTR_GOTO_ERR(reps, free_addr); + ZXDH_CHECK_PTR_GOTO_ERR(reps->addr, free_addr); + if (reps->len == 0) { + LOG_ERR("invalid reps_len: %d\n", reps->len); + goto free_addr; + } + + return MSGQ_RET_OK; +free_addr: + ZXDH_FREE_PTR(msg->addr); + return MSGQ_RET_ERR_INVALID_PARA; +} + +static int32_t zxdh_sequence_num_get(struct msgq_dev *msgq_dev, + uint16_t *sequence_num) +{ + uint16_t sn = 0; + uint16_t loop = 0; + + spin_lock(&msgq_dev->sn_lock); + sn = msgq_dev->sequence_num; + + for (loop = 0; loop < MSGQ_MAX_MSG_BUFF_NUM; loop++) { + if (!msgq_dev->msg_buff_ring[sn].using) { + *sequence_num = sn; + msgq_dev->msg_buff_ring[sn].using = true; + msgq_dev->msg_buff_ring[sn].valid = false; + msgq_dev->free_cnt++; + SEQUENCE_NUM_ADD(sn); + break; + } + SEQUENCE_NUM_ADD(sn); + } + + msgq_dev->sequence_num = sn; + spin_unlock(&msgq_dev->sn_lock); + + if (loop == MSGQ_MAX_MSG_BUFF_NUM) { + return MSGQ_RET_ERR_CHAN_BUSY; + } + + return MSGQ_RET_OK; +} + +static int32_t page_send_cmd(struct send_queue *sq, uint8_t *buf, + uint16_t buf_len, uint8_t print) +{ + uint16_t i = 0; + int32_t err = 0; + uint16_t total_sg = 0; + uint16_t last_buff_len = 0; + + if (print != 0) { + LOG_DEBUG("send pkt start\n"); + msgq_print_data(buf, buf_len, print); + } + + total_sg = buf_len / BUFF_LEN; + last_buff_len = buf_len % BUFF_LEN; + if (last_buff_len != 0) { + total_sg += 1; + } + + sg_init_table(sq->sg, total_sg); + for (i = 0; i < total_sg; ++i) { + if (i == (total_sg - 1)) { + sg_set_buf(&sq->sg[i], buf + (i * BUFF_LEN), + ((last_buff_len != 0) ? (last_buff_len) : + (BUFF_LEN))); + } else { + sg_set_buf(&sq->sg[i], buf + (i * BUFF_LEN), BUFF_LEN); + } + } + + err = virtqueue_add_outbuf(sq->vq, sq->sg, total_sg, buf, GFP_ATOMIC); + ZXDH_CHECK_RET_GOTO_ERR(err, free_addr, + "virtqueue_add_outbuf failed: %d\n", err); + + if (virtqueue_kick_prepare_packed(sq->vq) && virtqueue_notify(sq->vq)) { + u64_stats_update_begin(&sq->stats.syncp); + sq->stats.kicks++; + u64_stats_update_end(&sq->stats.syncp); + } + return err; + +free_addr: + return MSGQ_RET_ERR_VQ_BROKEN; +} + +static int32_t zxdh_msgq_pkt_send(struct msgq_dev *msgq_dev, + struct msgq_pkt_info *pkt_info, uint16_t sn) +{ + struct priv_queues_net_hdr *hdr = + (struct priv_queues_net_hdr *)pkt_info->addr; + void *buf = NULL; + uint32_t len = 0; + + if (spin_trylock(&msgq_dev->tx_lock)) { + while ((buf = virtqueue_get_buf(msgq_dev->sq_priv->vq, &len)) != + NULL) { + ZXDH_FREE_PTR(buf); + }; + spin_unlock(&msgq_dev->tx_lock); + } + + memset(hdr, 0, PRIV_HEADER_LEN); + hdr->tx_port = TX_PORT_NP; + hdr->pd_len = PRIV_HEADER_LEN / 2; + hdr->pi_hdr.pi_type = DEFAULT_PI_TYPE; + hdr->pi_hdr.pkt_type = CONTROL_MSG_TYPE; + hdr->pi_hdr.vfid_dst = htons(RISCV_COMMON_VFID); + hdr->pi_hdr.qid_dst = htons(RISCV_COMMON_QID); + hdr->pi_hdr.vfid_src = htons(msgq_dev->msgq_vfid); + hdr->pi_hdr.qid_src = htons(msgq_dev->msgq_rqid); + hdr->pi_hdr.event_id = pkt_info->event_id; + hdr->pi_hdr.sequence_num = sn; + if (sn == NO_REPS_SEQUENCE_NUM) { + hdr->pi_hdr.msg_type = NO_REPS_MSG; + } + if (msgq_dev->loopback) { + hdr->pi_hdr.event_id = MODULE_MSGQ; + hdr->pi_hdr.vfid_dst = hdr->pi_hdr.vfid_src; + hdr->pi_hdr.qid_dst = hdr->pi_hdr.qid_src; + } + + return page_send_cmd(msgq_dev->sq_priv, pkt_info->addr, pkt_info->len, + msgq_dev->print_flag); +} + +int32_t zxdh_msgq_send_cmd(struct msgq_dev *msgq_dev, + struct msgq_pkt_info *pkt_info, + struct reps_info *reps) +{ + uint16_t sn = NO_REPS_SEQUENCE_NUM; + uint16_t sync_poll_cnt = 0; + int32_t err = 0; + int32_t i = 0; + uint32_t tx_timeouts = 0; + uint32_t tx_errs = 0; + + err = zxdh_msg_para_check(pkt_info, reps); + ZXDH_CHECK_RET_GOTO_ERR(err, tx_err, "zxdh_msg_para_check failed: %d\n", + err); + + ZXDH_CHECK_PTR_GOTO_ERR(msgq_dev, free_addr); + CHECK_CHANNEL_USABLE(msgq_dev, err, free_addr); + + if (!pkt_info->no_reps) { + err = zxdh_sequence_num_get(msgq_dev, &sn); + ZXDH_CHECK_RET_GOTO_ERR(err, free_addr, + "zxdh_sequence_num_get failed: %d\n", + err); + } + + mutex_lock(msgq_dev->mlock); + err = zxdh_msgq_pkt_send(msgq_dev, pkt_info, sn); + mutex_unlock(msgq_dev->mlock); + ZXDH_CHECK_RET_GOTO_ERR(err, free_addr, + "zxdh_msgq_pkt_send failed: %d\n", err); + + if (pkt_info->no_reps) { + return MSGQ_RET_OK; + } + + msgq_dev->msg_buff_ring[sn].data = &reps->addr; + msgq_dev->msg_buff_ring[sn].data_len = &reps->len; + msgq_dev->msg_buff_ring[sn].timeout_cnt = + pkt_info->timeout_us / TIMER_DELAY_US; + if (!pkt_info->is_async) { + sync_poll_cnt = pkt_info->timeout_us / 10; + for (i = 0; i < sync_poll_cnt; ++i) { + usleep_range(5, 10); + if (!msgq_dev->msg_buff_ring[sn].using && + msgq_dev->msg_buff_ring[sn].valid) { + return MSGQ_RET_OK; + } + } + err = MSGQ_RET_ERR_CALLBACK_OUT_OF_TIME; + goto free_sn; + } else { + msgq_dev->msg_buff_ring[sn].need_free = true; + if (!msgq_dev->timer_in_use) { + mod_timer(&msgq_dev->poll_timer, + jiffies + usecs_to_jiffies(TIMER_DELAY_US)); + msgq_dev->timer_in_use = true; + } + } + return MSGQ_RET_OK; + +free_addr: + ZXDH_FREE_PTR(pkt_info->addr); +tx_err: + tx_errs++; +free_sn: + if ((sn != NO_REPS_SEQUENCE_NUM) && (sn < MSGQ_MAX_MSG_BUFF_NUM)) { + LOG_ERR("timeout, sn[%d] is free\n", sn); + msgq_dev->msg_buff_ring[sn].using = false; + tx_timeouts++; + msgq_dev->free_cnt--; + } + u64_stats_update_begin(&msgq_dev->sq_priv->stats.syncp); + msgq_dev->sq_priv->stats.xdp_tx_drops += tx_errs; + msgq_dev->sq_priv->stats.tx_timeouts += tx_timeouts; + u64_stats_update_end(&msgq_dev->sq_priv->stats.syncp); + return err; +} + +static void zxdh_swap_dst_and_src(uint16_t *dst, uint16_t *src) +{ + uint16_t temp = 0; + + temp = *dst; + *dst = *src; + *src = temp; +} + +static int32_t zxdh_pi_header_check(struct pi_header *hdr) +{ + if (hdr->pi_type != DEFAULT_PI_TYPE) { + LOG_ERR("INVALID_PI_TYPE: %d\n", hdr->pi_type); + return MSGQ_RET_ERR_CALLBACK_FAIL; + } + + if (hdr->pkt_type != CONTROL_MSG_TYPE) { + LOG_ERR("INVALID_PKT_TYPE: %d\n", hdr->pkt_type); + return MSGQ_RET_ERR_CALLBACK_FAIL; + } + + if (hdr->msg_type > NO_REPS_MSG) { + LOG_ERR("INVALID_MSG_TYPE: %d\n", hdr->msg_type); + return MSGQ_RET_ERR_CALLBACK_FAIL; + } + + if (hdr->event_id >= MSG_MODULE_NUM) { + LOG_ERR("INVALID_MSG_MODULE_ID: %d\n", hdr->event_id); + return MSGQ_RET_ERR_CALLBACK_FAIL; + } + + if (hdr->err_code != MSGQ_RET_OK) { + LOG_ERR("MSG_ERR_CODE: %d\n", hdr->err_code); + return MSGQ_RET_ERR_CALLBACK_FAIL; + } + + return MSGQ_RET_OK; +} + +static void rx_free_pages(struct msgq_dev *msgq_dev, void *buf, uint32_t len) +{ + if (msgq_dev->print_flag == MSGQ_PRINT_ALL) { + print_data((uint8_t *)buf, len); + LOG_DEBUG("buf: 0x%llx refcnt: %d\n", (uint64_t)buf, + page_ref_count(virt_to_head_page(buf))); + } + put_page(virt_to_head_page(buf)); +} + +static int32_t zxdh_response_msg_handle(struct msgq_dev *msgq_dev, + struct virtnet_rq_stats *stats, + uint16_t num_buf, void *buf, + uint32_t len) +{ + struct priv_queues_net_hdr *hdr = (struct priv_queues_net_hdr *)buf; + uint16_t sn = hdr->pi_hdr.sequence_num; + int32_t err = MSGQ_RET_OK; + struct msg_buff *tmp_buff = NULL; + uint32_t max_len = 0; + uint32_t pkt_len = 0; + + if (sn >= MSGQ_MAX_MSG_BUFF_NUM) { + LOG_ERR("INVALID_SEQUENCE_NUM: %d\n", sn); + err = MSGQ_RET_ERR; + goto put_page; + } + + tmp_buff = &msgq_dev->msg_buff_ring[sn]; + if (!tmp_buff->using) { + LOG_ERR("buff[%d] is free\n", sn); + err = MSGQ_RET_ERR_CALLBACK_OUT_OF_TIME; + goto put_page; + } + ZXDH_CHECK_PTR_GOTO_ERR(*tmp_buff->data, put_page); + + max_len = *(tmp_buff->data_len); + pkt_len = len - PRIV_HEADER_LEN; + if (pkt_len > max_len) { + LOG_ERR("buf_len: %d > tmp_buff->data_len: %d\n", pkt_len, + max_len); + err = MSGQ_RET_ERR_REPS_LEN_NOT_ENOUGH; + goto put_page; + } + + memcpy(*tmp_buff->data, (uint8_t *)buf + PRIV_HEADER_LEN, pkt_len); + while (--num_buf != 0) { + rx_free_pages(msgq_dev, buf, len); + buf = virtqueue_get_buf(msgq_dev->rq_priv->vq, &len); + if (unlikely(buf == NULL)) { + LOG_ERR("msgq rx error: %dth buffers missing\n", + num_buf); + stats->drops++; + err = MSGQ_RET_ERR_RX_INVALID_NUM_BUF; + goto out; + } + + if ((len + pkt_len) > max_len) { + LOG_ERR("buf_len: %d > tmp_buff->data_len: %d\n", + len + pkt_len, max_len); + err = MSGQ_RET_ERR_REPS_LEN_NOT_ENOUGH; + goto put_page; + } + + stats->bytes += len; + memcpy((*tmp_buff->data) + pkt_len, buf, len); + pkt_len += len; + } + *(tmp_buff->data_len) = pkt_len; + tmp_buff->valid = true; + stats->xdp_drops--; + +put_page: + put_page(virt_to_head_page(buf)); +out: + if (tmp_buff != NULL) { + tmp_buff->using = false; + tmp_buff->data = NULL; + msgq_dev->free_cnt--; + } + return err; +} + +static int32_t zxdh_callback_msg_handle(struct zxdh_en_device *en_dev, + uint8_t *buf_addr, uint32_t buf_len) +{ + struct msgq_dev *msgq_dev = (struct msgq_dev *)en_dev->msgq_dev; + int32_t err = BAR_MSG_ERR_MODULE_NOEXIST; + uint8_t *reps_addr = NULL; + uint16_t reps_len = MAX_PACKET_LEN; + uint16_t hdr_len = PRIV_HEADER_LEN; + struct priv_queues_net_hdr *hdr = NULL; + + hdr = (struct priv_queues_net_hdr *)buf_addr; + if (hdr->pi_hdr.msg_type == NO_REPS_MSG) { + return call_msg_recv_func_tbl(hdr->pi_hdr.event_id, + buf_addr + hdr_len, + buf_len - hdr_len, NULL, 0, + en_dev); + } + + reps_addr = kzalloc(MSGQ_MAX_ADDR_LEN, GFP_ATOMIC); + ZXDH_CHECK_PTR_RETURN(reps_addr); + memcpy(reps_addr, buf_addr, hdr_len); + hdr = (struct priv_queues_net_hdr *)reps_addr; + + if (hdr->pi_hdr.event_id < MSG_MODULE_NUM) { + err = call_msg_recv_func_tbl(hdr->pi_hdr.event_id, + buf_addr + hdr_len, + buf_len - hdr_len, + reps_addr + hdr_len, &reps_len, + en_dev); + hdr->pi_hdr.msg_type = ACK_MSG; + } + + if (err == BAR_MSG_ERR_MODULE_NOEXIST) { + hdr->pi_hdr.err_code = ERR_CODE_EVENT_UNREGIST; + } else if ((err != MSGQ_RET_OK) || (reps_len > MAX_PACKET_LEN)) { + LOG_ERR("get reps failed, reps_len:%d\n", reps_len); + hdr->pi_hdr.err_code = ERR_CODE_EVENT_FAIL; + } + + zxdh_swap_dst_and_src(&hdr->pi_hdr.vfid_dst, &hdr->pi_hdr.vfid_src); + zxdh_swap_dst_and_src(&hdr->pi_hdr.qid_dst, &hdr->pi_hdr.qid_src); + + return page_send_cmd(msgq_dev->sq_priv, reps_addr, reps_len + hdr_len, + msgq_dev->print_flag); +} + +static void msgq_receive_buf(struct zxdh_en_device *en_dev, + struct receive_queue *rq, void *buf, uint32_t len, + void **ctx, struct virtnet_rq_stats *stats) +{ + struct priv_queues_net_hdr *hdr = (struct priv_queues_net_hdr *)buf; + uint16_t num_buf = vqm16_to_cpu(en_dev, hdr->num_buffers); + struct msgq_dev *msgq_dev = (struct msgq_dev *)en_dev->msgq_dev; + int32_t err = MSGQ_RET_OK; + uint8_t *tmp_addr = NULL; + uint32_t tmp_addr_len = len; + bool free_tmp_addr = false; + + if (msgq_dev->print_flag != 0) { + LOG_DEBUG("receive pkt start, num_buf: %d\n", num_buf); + msgq_print_data((uint8_t *)buf, len, msgq_dev->print_flag); + } + + stats->xdp_drops++; + err = zxdh_pi_header_check(&hdr->pi_hdr); + ZXDH_CHECK_RET_GOTO_ERR(err, free_pages, "invalid pi_header\n"); + + if (hdr->pi_hdr.msg_type == ACK_MSG) { + err = zxdh_response_msg_handle(msgq_dev, stats, num_buf, buf, + len); + goto free_addr; + } else if (num_buf == 1) { + err = zxdh_callback_msg_handle(en_dev, (uint8_t *)buf, len); + } else { + tmp_addr = kzalloc(MSGQ_MAX_ADDR_LEN, GFP_ATOMIC); + ZXDH_CHECK_PTR_GOTO_ERR(tmp_addr, free_pages); + memcpy(tmp_addr, buf, tmp_addr_len); + free_tmp_addr = true; + while (--num_buf != 0) { + rx_free_pages(msgq_dev, buf, len); + buf = virtqueue_get_buf_ctx_packed(rq->vq, &len, ctx); + if (unlikely(buf == NULL)) { + LOG_ERR("msgq rx error: %dth buffers missing\n", + num_buf); + stats->drops++; + goto free_addr; + } + + memcpy(tmp_addr + tmp_addr_len, buf, len); + tmp_addr_len += len; + } + err = zxdh_callback_msg_handle(en_dev, tmp_addr, tmp_addr_len); + } + stats->xdp_drops--; + +free_pages: + put_page(virt_to_head_page(buf)); +free_addr: + if (free_tmp_addr) { + ZXDH_FREE_PTR(tmp_addr); + } + stats->bytes += tmp_addr_len; + return; +} + +static int32_t zxdh_msgq_receive(struct receive_queue *rq, int32_t budget) +{ + struct zxdh_en_device *en_dev = rq->vq->en_dev; + struct virtnet_rq_stats stats = {}; + uint32_t len = 0; + void *buf = NULL; + int32_t i = 0; + void *ctx = NULL; + uint64_t *item = NULL; + + while (stats.packets < budget && + (buf = virtqueue_get_buf_ctx_packed(rq->vq, &len, &ctx))) { + msgq_receive_buf(en_dev, rq, buf, len, &ctx, &stats); + stats.packets++; + } + + if (rq->vq->num_free > + min((uint32_t)budget, virtqueue_get_vring_size(rq->vq)) / 2) { + if (!msgq_try_fill_recv(rq, GFP_ATOMIC)) { + LOG_ERR("msgq_try_fill_recv failed\n"); + } + } + + u64_stats_update_begin(&rq->stats.syncp); + for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) { + size_t offset = virtnet_rq_stats_desc[i].offset; + item = (uint64_t *)((uint8_t *)&rq->stats + offset); + *item += *(uint64_t *)((uint8_t *)&stats + offset); + } + u64_stats_update_end(&rq->stats.syncp); + + return stats.packets; +} + +/* TODO 出现not a head问题,原因待分析 +static void free_old_xmit_bufs(struct send_queue *sq) +{ + uint32_t len = 0; + uint32_t packets = 0; + uint32_t bytes = 0; + void *buf = NULL; + + while ((buf = virtqueue_get_buf(sq->vq, &len)) != NULL) + { + bytes += len; + packets++; + ZXDH_FREE_PTR(buf); + } + + if (!packets) + { + return; + } + + u64_stats_update_begin(&sq->stats.syncp); + sq->stats.bytes += bytes; + sq->stats.packets += packets; + u64_stats_update_end(&sq->stats.syncp); +}*/ + +static void msgq_poll_cleantx(struct receive_queue *rq) +{ + struct zxdh_en_device *en_dev = rq->vq->en_dev; + struct msgq_dev *msgq_dev = (struct msgq_dev *)en_dev->msgq_dev; + struct send_queue *sq = msgq_dev->sq_priv; + + if (!sq->napi.weight) { + return; + } + + if (spin_trylock(&msgq_dev->tx_lock)) { + virtqueue_disable_cb(sq->vq); + //free_old_xmit_bufs(sq); + spin_unlock(&msgq_dev->tx_lock); + } +} + +int zxdh_msgq_poll(struct napi_struct *napi, int budget) +{ + struct receive_queue *rq = + container_of(napi, struct receive_queue, napi); + struct zxdh_en_device *en_dev = rq->vq->en_dev; + struct msgq_dev *msgq_dev = (struct msgq_dev *)en_dev->msgq_dev; + uint32_t received = 0; + + if (msgq_dev->msgq_enable) { + msgq_poll_cleantx(rq); + received = zxdh_msgq_receive(rq, budget); + } + + if (received < budget) { + virtqueue_napi_complete(napi, rq->vq, received); + } + + return received; +} diff --git a/drivers/net/ethernet/dinghai/en_aux/priv_queue.h b/drivers/net/ethernet/dinghai/en_aux/priv_queue.h new file mode 100644 index 000000000000..4a9333807520 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_aux/priv_queue.h @@ -0,0 +1,192 @@ +#ifndef __ZXDH_PRIV_QUEUE_H__ +#define __ZXDH_PRIV_QUEUE_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include +#include "queue.h" +#include "../en_aux.h" +#include "../../dinghai/en_np/table/include/dpp_tbl_api.h" + +#define MSGQ_TEST 1 + +#define MSGQ_RET_OK 0 +#define MSGQ_RET_ERR (-1) +#define MSGQ_RET_ERR_NULL_PTR (-2) +#define MSGQ_RET_ERR_INVALID_PARA (-3) +#define MSGQ_RET_ERR_CHANNEL_NOT_READY (-5) +#define MSGQ_RET_ERR_CHAN_BUSY (-6) +#define MSGQ_RET_ERR_VQ_BROKEN (-7) +#define MSGQ_RET_ERR_CALLBACK_OUT_OF_TIME (-8) +#define MSGQ_RET_ERR_CALLBACK_FAIL (-9) +#define MSGQ_RET_ERR_REPS_LEN_NOT_ENOUGH (-10) +#define MSGQ_RET_ERR_RX_INVALID_NUM_BUF (-11) + +struct reps_info { + uint32_t len; + uint8_t *addr; +}; + +struct msgq_pkt_info { + uint32_t timeout_us; + uint16_t event_id; + bool is_async; + bool no_reps; + uint8_t msg_priority; + uint8_t rsv; + uint32_t len; + uint8_t *addr; +} __attribute__((packed)); + +/* msg_chan_pkt Definitions */ +#define MAX_PACKET_LEN (MSGQ_MAX_ADDR_LEN - PRIV_HEADER_LEN) +#define MSGQ_MAX_ADDR_LEN 14000 +#define NO_REPS_SEQUENCE_NUM 0x8000 + +#define TIMER_DELAY_US 100 +#define MSGQ_MAX_MSG_BUFF_NUM 1024 +#define BUFF_LEN 4096 + +#define PRIV_HEADER_LEN sizeof(struct priv_queues_net_hdr) +#define DEFAULT_PI_TYPE 0x00 /*NP*/ +#define CONTROL_MSG_TYPE 0x1f +#define NEED_REPS_MSG 0x00 +#define ACK_MSG 0x01 +#define NO_REPS_MSG 0x02 + +#define RISCV_COMMON_VFID (1192) +#define RISCV_COMMON_QID (4092) + +enum msgq_err_code { + ERR_CODE_INVALID_EVENTID = 1, + ERR_CODE_EVENT_UNREGIST, + ERR_CODE_INVALID_ACK, + ERR_CODE_EVENT_FAIL, + ERR_CODE_INVALID_REPS_LEN, + ERR_CODE_PEER_BROKEN, +}; + +struct pi_header { + uint8_t pi_type; + uint8_t pkt_type; + uint16_t event_id; + uint16_t vfid_dst; + uint16_t qid_dst; + uint16_t vfid_src; + uint16_t qid_src; + uint16_t sequence_num; + uint8_t msg_priority; + uint8_t msg_type; + uint8_t err_code; + uint8_t rsv[3]; +}; + +struct msgq_pi_info { + uint16_t event_id; + uint16_t vfid_dst; + uint16_t qid_dst; + uint16_t vfid_src; + uint16_t qid_src; + uint16_t sequence_num; +} __attribute__((packed)); + +struct priv_queues_net_hdr { + uint8_t tx_port; + uint8_t pd_len; + uint8_t num_buffers; + uint8_t rsv; + struct pi_header pi_hdr; +}; + +struct msg_buff { + bool using; + bool valid; + bool need_free; + uint32_t timeout_cnt; + uint8_t **data; + uint32_t *data_len; +} __attribute__((packed)); + +#define MSGQ_PRINT_HDR 1 +#define MSGQ_PRINT_128B 2 +#define MSGQ_PRINT_ALL 3 +#define MSGQ_PRINT_STA 4 + +struct msgq_dev { + bool msgq_enable; + bool timer_in_use; + bool loopback; + uint8_t print_flag; + uint16_t sequence_num; + uint16_t free_cnt; + uint16_t msgq_vfid; + uint16_t msgq_rqid; + struct send_queue *sq_priv; + struct receive_queue *rq_priv; + struct mutex *mlock; + struct spinlock sn_lock; + struct spinlock tx_lock; + struct msg_buff msg_buff_ring[MSGQ_MAX_MSG_BUFF_NUM]; + struct timer_list poll_timer; +} __attribute__((packed)); + +#define CHECK_CHANNEL_USABLE(msgq, ret, err) \ + do { \ + if (!(msgq)->msgq_enable) { \ + LOG_ERR("msgq unable\n"); \ + ret = MSGQ_RET_ERR_CHANNEL_NOT_READY; \ + goto err; \ + } \ + } while (0) + +#define ZXDH_CHECK_PTR_RETURN(ptr) \ + do { \ + if (unlikely((ptr) == NULL)) { \ + LOG_ERR("null pointer\n"); \ + return MSGQ_RET_ERR_NULL_PTR; \ + } \ + } while (0) + +#define ZXDH_CHECK_PTR_GOTO_ERR(ptr, err) \ + do { \ + if (unlikely((ptr) == NULL)) { \ + LOG_ERR("null pointer\n"); \ + goto err; \ + } \ + } while (0) + +#define ZXDH_FREE_PTR(ptr) \ + do { \ + if ((ptr) != NULL) { \ + kfree(ptr); \ + (ptr) = NULL; \ + } \ + } while (0) + +#define SEQUENCE_NUM_ADD(id) \ + do { \ + (id)++; \ + (id) %= MSGQ_MAX_MSG_BUFF_NUM; \ + } while (0) + +int32_t zxdh_msgq_init(struct zxdh_en_device *en_dev); +void zxdh_msgq_exit(struct zxdh_en_device *en_dev); +int32_t print_data(uint8_t *data, uint32_t len); +int32_t zxdh_msgq_send_cmd(struct msgq_dev *msgq_dev, + struct msgq_pkt_info *pkt_info, + struct reps_info *reps); +int zxdh_msgq_poll(struct napi_struct *napi, int budget); +int32_t msgq_privq_init(struct msgq_dev *msgq_dev, struct net_device *netdev); +void msgq_privq_uninit(struct msgq_dev *msgq_dev); + +#ifdef __cplusplus +} +#endif + +#endif /* __ZXDH_PRIV_QUEUE_H__ */ diff --git a/drivers/net/ethernet/dinghai/en_aux/queue.c b/drivers/net/ethernet/dinghai/en_aux/queue.c new file mode 100644 index 000000000000..51312678867d --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_aux/queue.c @@ -0,0 +1,3832 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../en_aux.h" +#include "queue.h" +#include "en_1588_pkt_proc.h" +#include "../en_ethtool/ethtool.h" +#include "zxdh_tools/zxdh_tools_ioctl.h" + +#ifdef ZXDH_MSGQ +#include "priv_queue.h" +#endif + +#define RCV_1588_MSG_BIT 27 + +/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ +#define ZXDH_XDP_HEADROOM 256 + +/* Separating two types of XDP xmit */ +#define ZXDH_XDP_TX BIT(0) +#define ZXDH_XDP_REDIR BIT(1) +#define ZXDH_XDP_FLAG BIT(0) + +static uint32_t features_table[] = { + ZXDH_NET_F_MRG_RXBUF, ZXDH_NET_F_STATUS, ZXDH_NET_F_CTRL_VQ, + ZXDH_NET_F_MQ, ZXDH_RING_F_INDIRECT_DESC, ZXDH_RING_F_EVENT_IDX, + ZXDH_F_VERSION_1, ZXDH_F_RING_PACKED +}; + +static bool is_xdp_frame(void *ptr) +{ + return (unsigned long)ptr & ZXDH_XDP_FLAG; +} + +static void *xdp_to_ptr(struct xdp_frame *ptr) +{ + return (void *)((unsigned long)ptr | ZXDH_XDP_FLAG); +} + +static struct xdp_frame *ptr_to_xdp(void *ptr) +{ + return (struct xdp_frame *)((unsigned long)ptr & ~ZXDH_XDP_FLAG); +} + +static unsigned int zxdh_en_get_headroom(struct zxdh_en_device *en_dev) +{ + return en_dev->xdp_enabled ? ZXDH_XDP_HEADROOM : 0; +} + +/* We copy the packet for XDP in the following cases: + * + * 1) Packet is scattered across multiple rx buffers. + * 2) Headroom space is insufficient. + * + * This is inefficient but it's a temporary condition that + * we hit right after XDP is enabled and until queue is refilled + * with large buffers with sufficient headroom - so it should affect + * at most queue size packets. + * Afterwards, the conditions to enable + * XDP should preclude the underlying device from sending packets + * across multiple buffers (num_buf > 1), and we make sure buffers + * have enough headroom. + */ +static struct page *xdp_linearize_page(struct receive_queue *rq, u16 *num_buf, + struct page *p, int offset, int page_off, + unsigned int *len) +{ + struct page *page = alloc_page(GFP_ATOMIC); + + if (!page) + return NULL; + + memcpy(page_address(page) + page_off, page_address(p) + offset, *len); + page_off += *len; + + while (--*num_buf) { + int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + unsigned int buflen; + void *buf; + int off; + + buf = virtqueue_get_buf(rq->vq, &buflen); + if (unlikely(!buf)) + goto err_buf; + + p = virt_to_head_page(buf); + off = buf - page_address(p); + + /* guard against a misconfigured or uncooperative backend that + * is sending packet larger than the MTU. + */ + if ((page_off + buflen + tailroom) > PAGE_SIZE) { + put_page(p); + goto err_buf; + } + + memcpy(page_address(page) + page_off, page_address(p) + off, + buflen); + page_off += buflen; + put_page(p); + } + + /* Headroom does not contribute to packet length */ + *len = page_off - ZXDH_XDP_HEADROOM; + return page; +err_buf: + __free_pages(page, 0); + return NULL; +} + +void zxdh_print_vring_info(struct virtqueue *vq, uint32_t desc_index, + uint32_t desc_num) +{ + struct vring_virtqueue *vvq = to_vvq(vq); + struct vring_packed_desc *desc = NULL; + uint32_t i = 0; + uint32_t j = 0; + + LOG_INFO("phy_index : %d\n", vq->phy_index); + LOG_INFO("num free : %d\n", vq->num_free); + LOG_INFO("vring address : 0x%llx\n", (uint64_t)&vvq->packed.vring); + LOG_INFO("vring size : %d\n", vvq->packed.vring.num); + LOG_INFO("last_used_idx : %d\n", vvq->last_used_idx); + LOG_INFO("avail_wrap_counter: %d\n", vvq->packed.avail_wrap_counter); + LOG_INFO("next_avail_idx : %d\n", vvq->packed.next_avail_idx); + LOG_INFO("free head : %d\n", vvq->free_head); + LOG_INFO("driver->flags : 0x%x\n", vvq->packed.vring.driver->flags); + LOG_INFO("driver->off_wrap : %d\n", + vvq->packed.vring.driver->off_wrap); + LOG_INFO("device->flags : 0x%x\n", vvq->packed.vring.device->flags); + LOG_INFO("device->off_wrap : %d\n", + vvq->packed.vring.device->off_wrap); + LOG_INFO( + "DESC[x]:\tDESC_ADDR\t[BUFFER_ADDR]\t\t[LEN]\t\t[ID]\t[FLAG]\n"); + + for (i = 0; i < desc_num; i++) { + j = (desc_index + i) % vvq->packed.vring.num; + desc = &vvq->packed.vring.desc[j]; + LOG_INFO("DESC[%d] 0x%llx:\t0x%016llx\t0x%08x\t%8d\t0x%x\n", j, + (uint64_t)desc, desc->addr, desc->len, desc->id, + desc->flags); + } + + return; +} + +/* enable irq handlers */ +void zxdh_vp_enable_cbs(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t i = 0; + + for (i = 0; i < en_dev->channels_num; i++) { + en_dev->ops->switch_vqs_channel(en_dev->parent, i, 1); + } +} + +/* disable irq handlers */ +void zxdh_vp_disable_cbs(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t i = 0; + + for (i = 0; i < en_dev->channels_num; i++) { + en_dev->ops->switch_vqs_channel(en_dev->parent, i, 0); + } +} + +#define VP_RESET_MS_TIMEOUT_CNT (10000) +void zxdh_vp_reset(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + uint32_t timeout_cnt = 0; + + /* 0 status means a reset. */ + en_dev->ops->set_status(en_dev->parent, 0); + + /* After writing 0 to device_status, the driver MUST wait for a read of + * device_status to return 0 before reinitializing the device. + * This will flush out the status write, and flush in device writes, + * including MSI-X interrupts, if any. + */ + LOG_INFO("get_status start\n"); + while (en_dev->ops->get_status(en_dev->parent) != 0) { + msleep(1); + timeout_cnt++; + if (timeout_cnt >= VP_RESET_MS_TIMEOUT_CNT) { + LOG_ERR("vp reset time out!\n"); + break; + } + } + LOG_INFO("get_status stop\n"); + + return; +} + +void zxdh_add_status(struct net_device *netdev, uint32_t status) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + uint8_t dev_status = 0; + + might_sleep(); + + dev_status = en_dev->ops->get_status(en_dev->parent); + + en_dev->ops->set_status(en_dev->parent, (dev_status | status)); + + return; +} + +bool zxdh_has_status(struct net_device *netdev, uint32_t sbit) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + uint8_t dev_status = 0; + + dev_status = en_dev->ops->get_status(en_dev->parent); + + return (dev_status & sbit); +} + +void zxdh_pf_features_init(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + uint32_t i = 0; + uint64_t features = 0; + + en_dev->device_feature = en_dev->ops->get_features(en_dev->parent); + en_dev->device_feature |= BIT(34); + en_dev->driver_feature = 0; + + for (i = 0; i < ARRAY_SIZE(features_table); i++) { + features = features_table[i]; + en_dev->driver_feature |= (1ULL << features); + } + en_dev->guest_feature = en_dev->device_feature & 0xfffffff7dfffffff; + LOG_INFO("device_feature: 0x%llx, guest_feature: 0x%llx\n", + en_dev->device_feature, en_dev->guest_feature); + en_dev->ops->set_features(en_dev->parent, en_dev->guest_feature); + + return; +} + +bool zxdh_has_feature(struct zxdh_en_device *en_dev, uint32_t fbit) +{ + return en_dev->guest_feature & BIT_ULL(fbit); +} + +int32_t vq2txq(struct virtqueue *vq) +{ + return (vq->index - 1) / 2; +} + +int32_t txq2vq(int32_t txq) +{ + return txq * 2 + 1; +} +int32_t vq2rxq(struct virtqueue *vq) +{ + return vq->index / 2; +} + +int32_t rxq2vq(int32_t rxq) +{ + return rxq * 2; +} + +inline void vqm_mb(bool weak_barriers) +{ + if (weak_barriers) { + virt_mb(); + } else { + mb(); + } +} + +inline void vqm_rmb(bool weak_barriers) +{ + if (weak_barriers) { + virt_rmb(); + } else { + dma_rmb(); + } +} + +inline void vqm_wmb(bool weak_barriers) +{ + if (weak_barriers) { + virt_wmb(); + } else { + dma_wmb(); + } +} + +void vring_del_virtqueue(struct virtqueue *_vq) +{ + struct zxdh_en_device *en_dev = _vq->en_dev; + struct vring_virtqueue *vq = to_vvq(_vq); + + spin_lock(&en_dev->vqs_list_lock); + list_del(&_vq->list); + spin_unlock(&en_dev->vqs_list_lock); + + if (vq->we_own_ring) { + vring_free_queue(vq->vq.en_dev, vq->packed.ring_size_in_bytes, + vq->packed.vring.desc, + vq->packed.ring_dma_addr); + + vring_free_queue(vq->vq.en_dev, vq->packed.event_size_in_bytes, + vq->packed.vring.driver, + vq->packed.driver_event_dma_addr); + + vring_free_queue(vq->vq.en_dev, vq->packed.event_size_in_bytes, + vq->packed.vring.device, + vq->packed.device_event_dma_addr); + + kfree(vq->packed.desc_state); + vq->packed.desc_state = NULL; + kfree(vq->packed.desc_extra); + vq->packed.desc_extra = NULL; + } + + kfree(vq); + vq = NULL; +} + +void del_vq(struct zxdh_pci_vq_info *info) +{ + struct virtqueue *vq = info->vq; + struct zxdh_en_device *en_dev = vq->en_dev; + + en_dev->ops->vq_unbind_channel(en_dev->parent, vq->phy_index); + + en_dev->ops->vp_modern_unmap_vq_notify(en_dev->parent, vq->priv); + + vring_del_virtqueue(vq); +} + +void vp_del_vq(struct virtqueue *vq) +{ + struct zxdh_en_device *en_dev = vq->en_dev; + struct zxdh_pci_vq_info *info = en_dev->vqs[vq->index]; + unsigned long flags; + + spin_lock_irqsave(&en_dev->lock, flags); + list_del(&info->node); + spin_unlock_irqrestore(&en_dev->lock, flags); + + en_dev->vqs[vq->index] = NULL; + del_vq(info); + kfree(info); +} + +void vp_detach_vqs(void *para) +{ + struct net_device *netdev = para; + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct virtqueue *vq; + struct virtqueue *n; + + list_for_each_entry_safe(vq, n, &en_dev->vqs_list, list) { + vp_del_vq(vq); + } +} + +void vp_del_vqs(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + vp_detach_vqs(netdev); + + kfree(en_dev->vqs); + en_dev->vqs = NULL; +} + +/** + * virtqueue_get_vring_size - return the size of the virtqueue's vring + * @_vq: the struct virtqueue containing the vring of interest. + * + * Returns the size of the vring. This is mainly used for boasting to + * userspace. Unlike other operations, this need not be serialized. + */ +uint32_t virtqueue_get_vring_size(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + return vq->packed.vring.num; +} + +dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + BUG_ON(!vq->we_own_ring); + + return vq->packed.ring_dma_addr; +} + +dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + BUG_ON(!vq->we_own_ring); + + return vq->packed.driver_event_dma_addr; +} + +dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + BUG_ON(!vq->we_own_ring); + + return vq->packed.device_event_dma_addr; +} + +bool vqm_has_dma_quirk(struct zxdh_en_device *en_dev) +{ + /* + * Note the reverse polarity of the quirk feature (compared to most + * other features), this is for compatibility with legacy systems. + */ + return !zxdh_has_feature(en_dev, ZXDH_F_ACCESS_PLATFORM); +} + +bool vring_use_dma_api(struct zxdh_en_device *en_dev) +{ + if (!vqm_has_dma_quirk(en_dev)) { + return true; + } + + /* Otherwise, we are left to guess. */ + /* + * In theory, it's possible to have a buggy QEMU-supposed + * emulated Q35 IOMMU and Xen enabled at the same time. On + * such a configuration, zxdh has never worked and will + * not work without an even larger kludge. Instead, enable + * the DMA API if we're a Xen guest, which at least allows + * all of the sensible Xen configurations to work correctly. + */ + if (xen_domain()) { + return true; + } + + return false; +} + +void vring_free_queue(struct zxdh_en_device *en_dev, size_t size, void *queue, + dma_addr_t dma_handle) +{ + if (vring_use_dma_api(en_dev)) { + dma_free_coherent(en_dev->dmadev, size, queue, dma_handle); + } else { + free_pages_exact(queue, PAGE_ALIGN(size)); + } +} + +void *vring_alloc_queue(struct zxdh_en_device *en_dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + if (vring_use_dma_api(en_dev)) { + return dma_alloc_coherent(en_dev->dmadev, size, dma_handle, + flag); + } else { + void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag); + + if (queue) { + phys_addr_t phys_addr = virt_to_phys(queue); + *dma_handle = (dma_addr_t)phys_addr; + + /* + * Sanity check: make sure we dind't truncate + * the address. The only arches I can find that + * have 64-bit phys_addr_t but 32-bit dma_addr_t + * are certain non-highmem MIPS and x86 + * configurations, but these configurations + * should never allocate physical pages above 32 + * bits, so this is fine. Just in case, throw a + * warning and abort if we end up with an + * unrepresentable address. + */ + if (WARN_ON_ONCE(*dma_handle != phys_addr)) { + free_pages_exact(queue, PAGE_ALIGN(size)); + return NULL; + } + } + return queue; + } +} + +struct vring_desc_extra *vring_alloc_desc_extra(struct vring_virtqueue *vq, + uint32_t num) +{ + struct vring_desc_extra *desc_extra = NULL; + uint32_t i = 0; + + desc_extra = + kmalloc_array(num, sizeof(struct vring_desc_extra), GFP_KERNEL); + if (unlikely(desc_extra == NULL)) { + LOG_ERR("desc_extra kmalloc_array failed\n"); + return NULL; + } + + memset(desc_extra, 0, num * sizeof(struct vring_desc_extra)); + + for (i = 0; i < num - 1; i++) { + desc_extra[i].next = i + 1; + } + + return desc_extra; +} + +struct virtqueue *vring_create_virtqueue_packed( + uint32_t index, uint32_t num, uint32_t vring_align, + struct net_device *netdev, bool weak_barriers, bool may_reduce_num, + bool context, bool (*notify)(struct virtqueue *), + void (*callback)(struct virtqueue *), const char *name) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct vring_virtqueue *vq = NULL; + struct vring_packed_desc *ring = NULL; + struct vring_packed_desc_event *driver = NULL; + struct vring_packed_desc_event *device = NULL; + dma_addr_t ring_dma_addr; + dma_addr_t driver_event_dma_addr; + dma_addr_t device_event_dma_addr; + size_t ring_size_in_bytes; + size_t event_size_in_bytes; + + ring_size_in_bytes = ZXDH_PF_MAX_DESC_NUM(en_dev) * + sizeof(struct vring_packed_desc) + + ZXDH_DESC_EXTRA_SIZE; + ring = vring_alloc_queue(en_dev, ring_size_in_bytes, &ring_dma_addr, + GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); + if (unlikely(ring == NULL)) { + LOG_ERR("ring vring_alloc_queue failed\n"); + goto err_ring; + } + + event_size_in_bytes = sizeof(struct vring_packed_desc_event); + + driver = vring_alloc_queue(en_dev, event_size_in_bytes, + &driver_event_dma_addr, + GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); + if (unlikely(driver == NULL)) { + LOG_ERR("driver vring_alloc_queue failed\n"); + goto err_driver; + } + + device = vring_alloc_queue(en_dev, event_size_in_bytes, + &device_event_dma_addr, + GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); + if (unlikely(device == NULL)) { + LOG_ERR("device vring_alloc_queue failed\n"); + goto err_device; + } + + vq = kmalloc(sizeof(*vq), GFP_KERNEL); + if (unlikely(vq == NULL)) { + LOG_ERR("vq kmalloc failed\n"); + goto err_vq; + } + + vq->vq.callback = callback; + vq->vq.en_dev = en_dev; + vq->vq.name = name; + vq->vq.num_free = num; + vq->vq.index = index; + vq->we_own_ring = true; + vq->notify = notify; + vq->weak_barriers = weak_barriers; + vq->broken = false; + vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR); + vq->event_triggered = false; + vq->num_added = 0; + vq->packed_ring = true; + vq->use_dma_api = vring_use_dma_api(en_dev); +#ifdef DEBUG + vq->in_use = false; + vq->last_add_time_valid = false; +#endif + + vq->indirect = zxdh_has_feature(en_dev, ZXDH_RING_F_INDIRECT_DESC) && + !context; + vq->event = zxdh_has_feature(en_dev, ZXDH_RING_F_EVENT_IDX); + + if (zxdh_has_feature(en_dev, ZXDH_F_ORDER_PLATFORM)) { + vq->weak_barriers = false; + } + + vq->packed.ring_dma_addr = ring_dma_addr; + vq->packed.driver_event_dma_addr = driver_event_dma_addr; + vq->packed.device_event_dma_addr = device_event_dma_addr; + + vq->packed.ring_size_in_bytes = ring_size_in_bytes; + vq->packed.event_size_in_bytes = event_size_in_bytes; + + vq->packed.vring.num = num; + vq->packed.vring.desc = ring; + vq->packed.vring.driver = driver; + vq->packed.vring.device = device; + + vq->packed.next_avail_idx = 0; + vq->packed.avail_wrap_counter = 1; + vq->packed.event_flags_shadow = 0; + vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL; + + vq->packed.desc_state = kmalloc_array( + ZXDH_PF_MAX_DESC_NUM(en_dev), + sizeof(struct vring_desc_state_packed), GFP_KERNEL); + if (unlikely(vq->packed.desc_state == NULL)) { + LOG_ERR("vq->packed.desc_state kmalloc_array failed\n"); + goto err_desc_state; + } + + memset(vq->packed.desc_state, 0, + num * sizeof(struct vring_desc_state_packed)); + + /* Put everything in free lists. */ + vq->free_head = 0; + + vq->packed.desc_extra = + vring_alloc_desc_extra(vq, ZXDH_PF_MAX_DESC_NUM(en_dev)); + if (unlikely(vq->packed.desc_extra == NULL)) { + LOG_ERR("vq->packed.desc_extra vring_alloc_desc_extra failed\n"); + goto err_desc_extra; + } + + /* No callback? Tell other side not to bother us. */ + if (!callback) { + vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE; + vq->packed.vring.driver->flags = + cpu_to_le16(vq->packed.event_flags_shadow); + } + + spin_lock(&en_dev->vqs_list_lock); + list_add_tail(&vq->vq.list, &en_dev->vqs_list); + spin_unlock(&en_dev->vqs_list_lock); + + return &vq->vq; + +err_desc_extra: + kfree(vq->packed.desc_state); + vq->packed.desc_state = NULL; +err_desc_state: + kfree(vq); + vq = NULL; +err_vq: + vring_free_queue(en_dev, event_size_in_bytes, device, + device_event_dma_addr); +err_device: + vring_free_queue(en_dev, event_size_in_bytes, driver, + driver_event_dma_addr); +err_driver: + vring_free_queue(en_dev, ring_size_in_bytes, ring, ring_dma_addr); +err_ring: + return NULL; +} + +void zxdh_en_xmit_pkts(struct virtqueue *tvq); +void zxdh_vvq_reset(struct zxdh_en_device *en_dev) +{ + struct virtqueue *vq = NULL; + struct vring_virtqueue *vvq = NULL; + uint16_t num; + int32_t i; + int32_t j; + + for (i = 0; i < 2 * en_dev->max_queue_pairs; ++i) { + vq = en_dev->vqs[i]->vq; + vvq = to_vvq(vq); + + if (i % 2 == 0) + num = en_dev->eth_config.rx_queue_size; + else { + num = en_dev->eth_config.tx_queue_size; + vq->callback = zxdh_en_xmit_pkts; + } + + vq->num_free = num; + vvq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR); + vvq->num_added = 0; + vvq->packed.vring.num = num; + vvq->packed.next_avail_idx = 0; + vvq->packed.avail_wrap_counter = 1; + vvq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL; + vvq->free_head = 0; + + memset(vvq->packed.desc_state, 0, + num * sizeof(struct vring_desc_state_packed)); + memset(vvq->packed.vring.desc, 0, + num * sizeof(struct vring_packed_desc) + + ZXDH_DESC_EXTRA_SIZE); + memset(vvq->packed.desc_extra, 0, + num * sizeof(struct vring_desc_extra)); + for (j = 0; j < num - 1; ++j) { + vvq->packed.desc_extra[j].next = j + 1; + } + + en_dev->ops->set_queue_size(en_dev->parent, + en_dev->phy_index[i], num); + en_dev->ops->set_queue_enable(en_dev->parent, + en_dev->phy_index[i], true); + } +} + +/* the notify function used when creating a virt queue */ +bool vp_notify(struct virtqueue *vq) +{ + /* we write the queue's selector into the notification register to + * signal the other end */ + iowrite16(vq->phy_index, (void __iomem *)vq->priv); + + return true; +} + +struct virtqueue *vp_setup_vq(struct net_device *netdev, unsigned index, + void (*callback)(struct virtqueue *vq), + const char *name, bool ctx, uint16_t channel_num) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct zxdh_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL); + struct virtqueue *vq = NULL; + struct virtqueue *n = NULL; + unsigned long flags; + uint16_t num = 0; + uint16_t alloc_channel_num = 0; + int32_t err = 0; + struct dh_vq_handler vq_handler; + + /* fill out our structure that represents an active queue */ + if (unlikely(info == NULL)) { + LOG_ERR("info kmalloc failed\n"); + return ERR_PTR(-ENOMEM); + } + + if (index % 2 == 0) + num = en_dev->eth_config.rx_queue_size; + else + num = en_dev->eth_config.tx_queue_size; + + /* create the vring */ + vq = vring_create_virtqueue_packed(index, num, SMP_CACHE_BYTES, + en_dev->netdev, true, true, ctx, + vp_notify, callback, name); + if (vq == NULL) { + LOG_ERR("create the vring failed\n"); + err = -ENOMEM; + goto out_info; + } + + /* activate the queue */ + en_dev->ops->activate_phy_vq(en_dev->parent, en_dev->phy_index[index], + virtqueue_get_vring_size(vq), + virtqueue_get_desc_addr(vq), + virtqueue_get_avail_addr(vq), + virtqueue_get_used_addr(vq)); + + vq->priv = (void __force *)en_dev->ops->vp_modern_map_vq_notify( + en_dev->parent, en_dev->phy_index[index], NULL); + if (!vq->priv) { + LOG_ERR("vp_modern_map_vq_notify failed\n"); + err = -ENOMEM; + goto err_map_notify; + } + + vq->phy_index = en_dev->phy_index[index]; + vq->index = index; + info->channel_num = channel_num; + + memset(&vq_handler, 0, sizeof(struct dh_vq_handler)); + vq_handler.callback = dh_eq_vqs_vring_int; + alloc_channel_num = en_dev->ops->get_channels_num(en_dev->parent); + if (channel_num < alloc_channel_num) { + err = en_dev->ops->vqs_channel_bind_handler( + en_dev->parent, channel_num, &vq_handler); + if (err < 0) { + LOG_ERR("vqs_channel_bind_handler failed: %d\n", err); + goto err_vqs_channel_bind_handler; + } + } + + if (channel_num >= alloc_channel_num) { + if (alloc_channel_num == 0) { + channel_num = 0; + } else { + channel_num = (channel_num - alloc_channel_num) % + alloc_channel_num; + } + } + err = en_dev->ops->vq_bind_channel(en_dev->parent, channel_num, + en_dev->phy_index[index], index); + if (err < 0) { + LOG_ERR("vq_bind_channel failed: %d\n", err); + goto err_vq_bind_channel; + } + + if (callback) { + spin_lock_irqsave(&en_dev->lock, flags); + err = en_dev->ops->vqs_bind_eqs(en_dev->parent, channel_num, + &info->node); + spin_unlock_irqrestore(&en_dev->lock, flags); + if (err < 0) { + LOG_ERR("vqs_bind_eqs failed: %d\n", err); + goto err_vqs_bind_eqs; + } + } else { + INIT_LIST_HEAD(&info->node); + } + + info->vq = vq; + en_dev->vqs[index] = info; + return vq; + +err_vqs_bind_eqs: + list_for_each_entry_safe(vq, n, &en_dev->vqs_list, list) { + en_dev->ops->vq_unbind_channel(en_dev->parent, vq->phy_index); + } +err_vq_bind_channel: + if (channel_num < alloc_channel_num) { + en_dev->ops->vqs_channel_unbind_handler(en_dev->parent, + channel_num); + } +err_vqs_channel_bind_handler: + en_dev->ops->vp_modern_unmap_vq_notify( + en_dev->parent, (void __iomem __force *)vq->priv); +err_map_notify: + vring_del_virtqueue(vq); +out_info: + kfree(info); + en_dev->vqs[index] = NULL; + return ERR_PTR(err); +} + +uint32_t get_mergeable_buf_len(struct receive_queue *rq, + struct ewma_pkt_len *avg_pkt_len, uint32_t room) +{ + struct zxdh_en_device *en_dev = rq->vq->en_dev; + size_t hdr_len = 0; + uint32_t len = 0; + + hdr_len = sizeof(struct zxdh_net_hdr_rx); + if (en_dev->dtp_drs_offload == false) { + hdr_len = + sizeof(struct zxdh_net_hdr_rx) - sizeof(struct pi_hdr); + } + + if (room) { + return PAGE_SIZE - room; + } + + len = hdr_len + DH_BUFF_LEN; + + return ALIGN(len, L1_CACHE_BYTES); +} + +/* + * The DMA ops on various arches are rather gnarly right now, and + * making all of the arch DMA ops work on the vring device itself + * is a mess. For now, we use the parent device for DMA ops. + */ +static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq) +{ + return vq->vq.en_dev->dmadev; +} + +/* Map one sg entry. */ +dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq, + struct scatterlist *sg, + enum dma_data_direction direction) +{ + if (!vq->use_dma_api) { + return (dma_addr_t)sg_phys(sg); + } + + /* + * We can't use dma_map_sg, because we don't use scatterlists in + * the way it expects (we don't guarantee that the scatterlist + * will exist for the lifetime of the mapping). + */ + return dma_map_page(vring_dma_dev(vq), sg_page(sg), sg->offset, + sg->length, direction); +} + +dma_addr_t vring_map_single(const struct vring_virtqueue *vq, void *cpu_addr, + size_t size, enum dma_data_direction direction) +{ + if (!vq->use_dma_api) { + return (dma_addr_t)virt_to_phys(cpu_addr); + } + + return dma_map_single(vring_dma_dev(vq), cpu_addr, size, direction); +} + +int32_t vring_mapping_error(const struct vring_virtqueue *vq, dma_addr_t addr) +{ + if (!vq->use_dma_api) { + return 0; + } + + return dma_mapping_error(vring_dma_dev(vq), addr); +} + +/* + * Packed ring specific functions - *_packed(). + */ +void vring_unmap_state_packed(const struct vring_virtqueue *vq, + struct vring_desc_extra *state) +{ + uint16_t flags = 0; + + if (!vq->use_dma_api) { + return; + } + + flags = state->flags; + if (flags & VRING_DESC_F_INDIRECT) { + dma_unmap_single(vring_dma_dev(vq), state->addr, state->len, + (flags & VRING_DESC_F_WRITE) ? + DMA_FROM_DEVICE : + DMA_TO_DEVICE); + } else { + dma_unmap_page(vring_dma_dev(vq), state->addr, state->len, + (flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE : + DMA_TO_DEVICE); + } +} + +void vring_unmap_desc_packed(const struct vring_virtqueue *vq, + struct vring_packed_desc *desc) +{ + uint16_t flags = 0; + + if (!vq->use_dma_api) { + return; + } + + flags = le16_to_cpu(desc->flags); + + if (flags & VRING_DESC_F_INDIRECT) { + dma_unmap_single(vring_dma_dev(vq), le64_to_cpu(desc->addr), + le32_to_cpu(desc->len), + (flags & VRING_DESC_F_WRITE) ? + DMA_FROM_DEVICE : + DMA_TO_DEVICE); + } else { + dma_unmap_page(vring_dma_dev(vq), le64_to_cpu(desc->addr), + le32_to_cpu(desc->len), + (flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE : + DMA_TO_DEVICE); + } +} + +void *mergeable_len_to_ctx(uint32_t truesize, uint32_t headroom) +{ + return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | + truesize); +} + +inline bool virtqueue_use_indirect(struct virtqueue *_vq, unsigned int total_sg) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + /* + * If the host supports indirect descriptor tables, and we have multiple + * buffers, then go indirect. FIXME: tune this threshold + */ + return (vq->indirect && total_sg > 1 && vq->vq.num_free); +} + +struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg, + gfp_t gfp) +{ + struct vring_packed_desc *desc = NULL; + + /* + * We require lowmem mappings for the descriptors because + * otherwise virt_to_phys will give us bogus addresses in the + * virtqueue. + */ + gfp &= ~__GFP_HIGHMEM; + + desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp); + + return desc; +} + +int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, + struct scatterlist *sgs[], + unsigned int total_sg, unsigned int out_sgs, + unsigned int in_sgs, void *data, gfp_t gfp) +{ + struct vring_packed_desc *desc = NULL; + struct scatterlist *sg = NULL; + uint32_t i = 0; + uint32_t n = 0; + uint32_t err_idx = 0; + uint16_t head = 0; + uint16_t id = 0; + dma_addr_t addr; + + head = vq->packed.next_avail_idx; + desc = alloc_indirect_packed(total_sg, gfp); + if (desc == NULL) { + LOG_ERR("desc alloc_indirect_packed failed\n"); + return -ENOMEM; + } + + if (unlikely(vq->vq.num_free < 1)) { + kfree(desc); + END_USE(vq); + return -ENOSPC; + } + + i = 0; + id = vq->free_head; + BUG_ON(id == vq->packed.vring.num); + + for (n = 0; n < out_sgs + in_sgs; n++) { + for (sg = sgs[n]; sg; sg = sg_next(sg)) { + addr = vring_map_one_sg(vq, sg, + n < out_sgs ? DMA_TO_DEVICE : + DMA_FROM_DEVICE); + if (vring_mapping_error(vq, addr)) { + LOG_ERR("vring_map_one_sg error\n"); + goto unmap_release; + } + + desc[i].flags = cpu_to_le16( + n < out_sgs ? 0 : VRING_DESC_F_WRITE); + desc[i].addr = cpu_to_le64(addr); + desc[i].len = cpu_to_le32(sg->length); + i++; + } + } + + /* Now that the indirect table is filled in, map it. */ + addr = vring_map_single(vq, desc, + total_sg * sizeof(struct vring_packed_desc), + DMA_TO_DEVICE); + if (vring_mapping_error(vq, addr)) { + LOG_ERR("vring_map_single error\n"); + goto unmap_release; + } + + vq->packed.vring.desc[head].addr = cpu_to_le64(addr); + vq->packed.vring.desc[head].len = + cpu_to_le32(total_sg * sizeof(struct vring_packed_desc)); + vq->packed.vring.desc[head].id = cpu_to_le16(id); + + if (vq->use_dma_api) { + vq->packed.desc_extra[id].addr = addr; + vq->packed.desc_extra[id].len = + total_sg * sizeof(struct vring_packed_desc); + vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT | + vq->packed.avail_used_flags; + } + + /* + * A driver MUST NOT make the first descriptor in the list + * available before all subsequent descriptors comprising + * the list are made available. + */ + vqm_wmb(vq->weak_barriers); + vq->packed.vring.desc[head].flags = cpu_to_le16( + VRING_DESC_F_INDIRECT | vq->packed.avail_used_flags); + + /* We're using some buffers from the free list. */ + vq->vq.num_free -= 1; + + /* Update free pointer */ + n = head + 1; + if (n >= vq->packed.vring.num) { + n = 0; + vq->packed.avail_wrap_counter ^= 1; + vq->packed.avail_used_flags ^= 1 << VRING_PACKED_DESC_F_AVAIL | + 1 << VRING_PACKED_DESC_F_USED; + } + vq->packed.next_avail_idx = n; + vq->free_head = vq->packed.desc_extra[id].next; + + /* Store token and indirect buffer state. */ + vq->packed.desc_state[id].num = 1; + vq->packed.desc_state[id].data = data; + vq->packed.desc_state[id].indir_desc = desc; + vq->packed.desc_state[id].last = id; + + vq->num_added += 1; + + //LOG_DEBUG("added buffer head %i to %p\n", head, vq); + END_USE(vq); + + return 0; + +unmap_release: + err_idx = i; + + for (i = 0; i < err_idx; i++) { + vring_unmap_desc_packed(vq, &desc[i]); + } + + kfree(desc); + + END_USE(vq); + return -ENOMEM; +} + +int32_t virtqueue_add_packed(struct virtqueue *_vq, struct scatterlist *sgs[], + uint32_t total_sg, uint32_t out_sgs, + uint32_t in_sgs, void *data, void *ctx, gfp_t gfp) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + struct vring_packed_desc *desc = NULL; + struct scatterlist *sg = NULL; + uint32_t i = 0; + uint32_t n = 0; + uint32_t c = 0; + uint32_t descs_used = 0; + uint32_t err_idx = 0; + __le16 head_flags = 0; + __le16 flags = 0; + uint16_t head = 0; + uint16_t id = 0; + uint16_t prev = 0; + uint16_t curr = 0; + uint16_t avail_used_flags = 0; + int32_t err = 0; + + START_USE(vq); + + BUG_ON(data == NULL); + BUG_ON(ctx && vq->indirect); + + if (unlikely(vq->broken)) { + LOG_ERR("vq->broken\n"); + END_USE(vq); + return -EIO; + } + + LAST_ADD_TIME_UPDATE(vq); + + BUG_ON(total_sg == 0); + + if (virtqueue_use_indirect(_vq, total_sg)) { + err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs, + in_sgs, data, gfp); + if (err != -ENOMEM) { + END_USE(vq); + return err; + } + /* fall back on direct */ + } + + head = vq->packed.next_avail_idx; + avail_used_flags = vq->packed.avail_used_flags; + + WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect); + + desc = vq->packed.vring.desc; + i = head; + descs_used = total_sg; + + if (unlikely(vq->vq.num_free < descs_used)) { + END_USE(vq); + return -ENOSPC; + } + + id = vq->free_head; + BUG_ON(id == vq->packed.vring.num); + + curr = id; + c = 0; + for (n = 0; n < out_sgs + in_sgs; n++) { + for (sg = sgs[n]; sg; sg = sg_next(sg)) { + dma_addr_t addr = vring_map_one_sg( + vq, sg, + n < out_sgs ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + if (vring_mapping_error(vq, addr)) { + LOG_ERR("vring_map_one_sg error\n"); + goto unmap_release; + } + + flags = cpu_to_le16( + vq->packed.avail_used_flags | + (++c == total_sg ? 0 : VRING_DESC_F_NEXT) | + (n < out_sgs ? 0 : VRING_DESC_F_WRITE)); + + desc[i].addr = cpu_to_le64(addr); + desc[i].len = cpu_to_le32(sg->length); + desc[i].id = cpu_to_le16(id); + + if (i == head) { + head_flags = flags; + } else { + desc[i].flags = flags; + } + + if (unlikely(vq->use_dma_api)) { + vq->packed.desc_extra[curr].addr = addr; + vq->packed.desc_extra[curr].len = sg->length; + vq->packed.desc_extra[curr].flags = + le16_to_cpu(flags); + } + prev = curr; + curr = vq->packed.desc_extra[curr].next; + + if ((unlikely(++i >= vq->packed.vring.num))) { + i = 0; + vq->packed.avail_used_flags ^= + 1 << VRING_PACKED_DESC_F_AVAIL | + 1 << VRING_PACKED_DESC_F_USED; + } + } + } + + if (i < head) { + vq->packed.avail_wrap_counter ^= 1; + } + + /* We're using some buffers from the free list. */ + vq->vq.num_free -= descs_used; + + /* Update free pointer */ + vq->packed.next_avail_idx = i; + vq->free_head = curr; + + /* Store token. */ + vq->packed.desc_state[id].num = descs_used; + vq->packed.desc_state[id].data = data; + vq->packed.desc_state[id].indir_desc = ctx; + vq->packed.desc_state[id].last = prev; + + /* + * A driver MUST NOT make the first descriptor in the list + * available before all subsequent descriptors comprising + * the list are made available. + */ + vqm_wmb(vq->weak_barriers); + vq->packed.vring.desc[head].flags = head_flags; + vq->num_added += descs_used; + + //LOG_INFO("added buffer head %i to %p\n", head, vq); + END_USE(vq); + + return 0; + +unmap_release: + err_idx = i; + i = head; + curr = vq->free_head; + + vq->packed.avail_used_flags = avail_used_flags; + + for (n = 0; n < total_sg; n++) { + if (i == err_idx) { + break; + } + + vring_unmap_state_packed(vq, &vq->packed.desc_extra[curr]); + curr = vq->packed.desc_extra[curr].next; + i++; + if (i >= vq->packed.vring.num) { + i = 0; + } + } + + END_USE(vq); + return -EIO; +} + +/** + * virtqueue_add_inbuf_ctx - expose input buffers to other end + * @vq: the struct virtqueue we're talking about. + * @sg: scatterlist (must be well-formed and terminated!) + * @num: the number of entries in @sg writable by other side + * @data: the token identifying the buffer. + * @ctx: extra context for the token + * @gfp: how to do memory allocations (if necessary). + * + * Caller must ensure we don't call this with other virtqueue operations + * at the same time (except where noted). + * + * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). + */ +int32_t virtqueue_add_inbuf_ctx(struct virtqueue *vq, struct scatterlist *sg, + uint32_t num, void *data, void *ctx, gfp_t gfp) +{ + return virtqueue_add_packed(vq, &sg, num, 0, 1, data, ctx, gfp); +} + +bool is_used_desc_packed(struct vring_virtqueue *vq, uint16_t idx, + bool used_wrap_counter) +{ + bool avail = false; + bool used = false; + uint16_t flags = 0; + + flags = le16_to_cpu(vq->packed.vring.desc[idx].flags); + avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL)); + used = !!(flags & (1 << VRING_PACKED_DESC_F_USED)); + + return avail == used && used == used_wrap_counter; +} + +bool virtqueue_poll_packed(struct virtqueue *_vq, uint16_t off_wrap) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + bool wrap_counter = false; + uint16_t used_idx = 0; + + wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR; + used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR); + + return is_used_desc_packed(vq, used_idx, wrap_counter); +} + +/** + * virtqueue_poll - query pending used buffers + * @_vq: the struct virtqueue we're talking about. + * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). + * + * Returns "true" if there are pending used buffers in the queue. + * + * This does not need to be serialized. + */ +bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + if (unlikely(vq->broken)) { + LOG_ERR("vq->broken\n"); + return false; + } + + vqm_mb(vq->weak_barriers); + return virtqueue_poll_packed(_vq, last_used_idx); +} + +unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + START_USE(vq); + + /* + * We optimistically turn back on interrupts, then check if there was + * more to do. + */ + if (vq->event) { + vq->packed.vring.driver->off_wrap = + cpu_to_le16(vq->last_used_idx); + /* + * We need to update event offset and event wrap + * counter first before updating event flags. + */ + vqm_wmb(vq->weak_barriers); + } + + if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { + vq->packed.event_flags_shadow = + vq->event ? VRING_PACKED_EVENT_FLAG_DESC : + VRING_PACKED_EVENT_FLAG_ENABLE; + vq->packed.vring.driver->flags = + cpu_to_le16(vq->packed.event_flags_shadow); + } + + END_USE(vq); + return vq->last_used_idx; +} + +int32_t virtqueue_enable_cb_prepare(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + if (vq->event_triggered) { + vq->event_triggered = false; + } + + return virtqueue_enable_cb_prepare_packed(_vq); +} + +bool packed_used_wrap_counter(uint16_t last_used_idx) +{ + return !!(last_used_idx & (1 << VRING_PACKED_EVENT_F_WRAP_CTR)); +} + +uint16_t packed_last_used(uint16_t last_used_idx) +{ + return last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR)); +} + +bool more_used_packed(struct vring_virtqueue *vq) +{ + uint16_t last_used = 0; + uint16_t last_used_idx = 0; + bool used_wrap_counter = false; + + last_used_idx = READ_ONCE(vq->last_used_idx); + last_used = packed_last_used(last_used_idx); + used_wrap_counter = packed_used_wrap_counter(last_used_idx); + + return is_used_desc_packed(vq, last_used, used_wrap_counter); +} + +#define MIN_WAIT_COUNT 10 +bool is_flow_stopped(struct zxdh_en_device *en_dev) +{ + struct virtqueue *vq = NULL; + struct vring_virtqueue *vvq = NULL; + int32_t consecutive_false_count = 0; + uint16_t last_used = 0; + uint16_t last_used_idx = 0; + int32_t i = 0; + int32_t j = 0; + + for (i = 0; i < 2 * en_dev->max_queue_pairs; ++i) { + vq = en_dev->vqs[i]->vq; + vvq = to_vvq(vq); + j = 0; + consecutive_false_count = 0; + + for (j = 0; j < 2000; ++j) { + if (i % 2 == 0) { + if (more_used_packed(vvq)) { + synchronize_net(); + consecutive_false_count = 0; + } else { + if (++consecutive_false_count >= + MIN_WAIT_COUNT) { + break; + } + } + } else { + vq->callback = NULL; + return true; + } + usleep_range(5, 10); + } + + if (consecutive_false_count < MIN_WAIT_COUNT) { + last_used_idx = READ_ONCE(vvq->last_used_idx); + last_used = packed_last_used(last_used_idx); + zxdh_print_vring_info(vq, last_used - 10, 10); + zxdh_print_vring_info(vq, last_used, 30); + return false; + } + } + + return true; +} + +void detach_buf_packed(struct vring_virtqueue *vq, uint32_t id, void **ctx) +{ + struct vring_desc_state_packed *state = NULL; + struct vring_packed_desc *desc = NULL; + uint32_t i = 0; + uint32_t curr = 0; + + state = &vq->packed.desc_state[id]; + + /* Clear data ptr. */ + state->data = NULL; + + vq->packed.desc_extra[state->last].next = vq->free_head; + vq->free_head = id; + vq->vq.num_free += state->num; + + if (unlikely(vq->use_dma_api)) { + curr = id; + for (i = 0; i < state->num; i++) { + vring_unmap_state_packed(vq, + &vq->packed.desc_extra[curr]); + curr = vq->packed.desc_extra[curr].next; + } + } + + if (vq->indirect) { + uint32_t len; + + /* Free the indirect table, if any, now that it's unmapped. */ + desc = state->indir_desc; + if (!desc) { + return; + } + + if (vq->use_dma_api) { + len = vq->packed.desc_extra[id].len; + for (i = 0; i < len / sizeof(struct vring_packed_desc); + i++) { + vring_unmap_desc_packed(vq, &desc[i]); + } + } + kfree(desc); + state->indir_desc = NULL; + } else if (ctx) { + *ctx = state->indir_desc; + } +} + +void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq, uint32_t *len, + void **ctx) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + uint16_t last_used = 0; + uint16_t id = 0; + uint16_t last_used_idx = 0; + bool used_wrap_counter = false; + void *ret = NULL; + + START_USE(vq); + + if (unlikely(vq->broken)) { + END_USE(vq); + return NULL; + } + + if (!more_used_packed(vq)) { + //LOG_ERR("no more buffers in queue\n"); + END_USE(vq); + return NULL; + } + + /* Only get used elements after they have been exposed by host. */ + vqm_rmb(vq->weak_barriers); + + last_used_idx = READ_ONCE(vq->last_used_idx); + used_wrap_counter = packed_used_wrap_counter(last_used_idx); + last_used = packed_last_used(last_used_idx); + id = le16_to_cpu(vq->packed.vring.desc[last_used].id); + *len = le32_to_cpu(vq->packed.vring.desc[last_used].len); + + if (unlikely(id >= vq->packed.vring.num)) { + zxdh_print_vring_info(_vq, 0, vq->packed.vring.num); + BAD_RING(vq, "id %u out of range\n", id); + return NULL; + } + if (unlikely(!vq->packed.desc_state[id].data)) { + zxdh_print_vring_info(_vq, last_used - 10, 10); + zxdh_print_vring_info(_vq, last_used, 30); + BAD_RING(vq, "id %u is not a head!\n", id); + return NULL; + } + + /* detach_buf_packed clears data, so grab it now. */ + ret = vq->packed.desc_state[id].data; + detach_buf_packed(vq, id, ctx); + + last_used += vq->packed.desc_state[id].num; + if (unlikely(last_used >= vq->packed.vring.num)) { + last_used -= vq->packed.vring.num; + used_wrap_counter ^= 1; + } + + last_used = (last_used | + (used_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR)); + WRITE_ONCE(vq->last_used_idx, last_used); + + /* + * If we expect an interrupt for the next entry, tell host + * by writing event index and flush out the write before + * the read in the next get_buf call. + */ + if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC) + vqm_store_mb(vq->weak_barriers, + &vq->packed.vring.driver->off_wrap, + cpu_to_le16(vq->last_used_idx)); + + LAST_ADD_TIME_INVALID(vq); + + END_USE(vq); + return ret; +} + +void *virtqueue_get_buf(struct virtqueue *_vq, uint32_t *len) +{ + return virtqueue_get_buf_ctx_packed(_vq, len, NULL); +} + +/* + * private is used to chain pages for big packets, put the whole + * most recent used list in the beginning for reuse + */ +void give_pages(struct receive_queue *rq, struct page *page) +{ + struct page *end = NULL; + + /* Find end of list, sew whole thing into vi->rq.pages. */ + for (end = page; end->private; end = (struct page *)end->private) + ; + end->private = (unsigned long)rq->pages; + rq->pages = page; +} + +void free_old_xmit_skbs(struct net_device *netdev, struct send_queue *sq, + bool in_napi) +{ + uint32_t len = 0; + uint32_t packets = 0; + uint32_t bytes = 0; + void *ptr = NULL; + + while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { + if (likely(!is_xdp_frame(ptr))) { + struct sk_buff *skb = ptr; + + bytes += skb->len; + napi_consume_skb(skb, in_napi); + } else { + struct xdp_frame *frame = ptr_to_xdp(ptr); + + bytes += frame->len; + xdp_return_frame(frame); + } + packets++; + } + + /* Avoid overhead when no packets have been processed + * happens when called speculatively from start_xmit. + */ + if (!packets) { + return; + } + + u64_stats_update_begin(&sq->stats.syncp); + sq->stats.bytes += bytes; + sq->stats.packets += packets; + u64_stats_update_end(&sq->stats.syncp); +} + +void virtqueue_disable_cb_packed(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) { + vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE; + vq->packed.vring.driver->flags = + cpu_to_le16(vq->packed.event_flags_shadow); + } +} + +/** + * virtqueue_disable_cb - disable callbacks + * @_vq: the struct virtqueue we're talking about. + * + * Note that this is not necessarily synchronous, hence unreliable and only + * useful as an optimization. + * + * Unlike other operations, this need not be serialized. + */ +void virtqueue_disable_cb(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + /* If device triggered an event already it won't trigger one again: + * no need to disable. + */ + if (vq->event_triggered) { + return; + } + + virtqueue_disable_cb_packed(_vq); +} + +void virtqueue_napi_schedule(struct napi_struct *napi, struct virtqueue *vq) +{ + if (napi_schedule_prep(napi)) { + virtqueue_disable_cb(vq); + __napi_schedule(napi); + } +} + +void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi) +{ + napi_enable(napi); + + /* If all buffers were filled by other side before we napi_enabled, we + * won't get another interrupt, so process any outstanding packets now. + * Call local_bh_enable after to trigger softIRQ processing. + */ + local_bh_disable(); + virtqueue_napi_schedule(napi, vq); + local_bh_enable(); +} + +void virtnet_napi_tx_enable(struct net_device *netdev, struct virtqueue *vq, + struct napi_struct *napi) +{ + if (!napi->weight) { + return; + } + + virtnet_napi_enable(vq, napi); + + return; +} + +void virtnet_napi_tx_disable(struct napi_struct *napi) +{ + if (napi->weight) { + napi_disable(napi); + } +} + +static bool is_xdp_raw_buffer_queue(struct zxdh_en_device *en_dev, int q) +{ + if (q < (en_dev->curr_queue_pairs - en_dev->xdp_queue_pairs)) + return false; + else if (q < en_dev->curr_queue_pairs) + return true; + else + return false; +} + +int virtnet_poll_tx(struct napi_struct *napi, int budget) +{ + struct send_queue *sq = container_of(napi, struct send_queue, napi); + struct zxdh_en_device *en_dev = sq->vq->en_dev; + uint32_t index = vq2txq(sq->vq); + struct netdev_queue *txq = NULL; + int32_t opaque = 0; + bool done = false; + + if (unlikely(is_xdp_raw_buffer_queue(en_dev, index))) { + /* We don't need to enable cb for XDP */ + napi_complete_done(napi, 0); + return 0; + } + + txq = netdev_get_tx_queue(en_dev->netdev, index); + __netif_tx_lock(txq, raw_smp_processor_id()); + virtqueue_disable_cb(sq->vq); + free_old_xmit_skbs(en_dev->netdev, sq, true); + + if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) { + netif_tx_wake_queue(txq); + } + + opaque = virtqueue_enable_cb_prepare(sq->vq); + + done = napi_complete_done(napi, 0); + + if (!done) { + virtqueue_disable_cb(sq->vq); + } + + __netif_tx_unlock(txq); + + if (done) { + if (unlikely(virtqueue_poll(sq->vq, opaque))) { + if (napi_schedule_prep(napi)) { + __netif_tx_lock(txq, raw_smp_processor_id()); + virtqueue_disable_cb(sq->vq); + __netif_tx_unlock(txq); + __napi_schedule(napi); + } + } + } + + return 0; +} + +bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + uint16_t used_idx = 0; + uint16_t wrap_counter = 0; + uint16_t last_used_idx = 0; + uint16_t bufs = 0; + + START_USE(vq); + + /* + * We optimistically turn back on interrupts, then check if there was + * more to do. + */ + + if (vq->event) { + /* TODO: tune this threshold */ + bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4; + last_used_idx = READ_ONCE(vq->last_used_idx); + wrap_counter = packed_used_wrap_counter(last_used_idx); + + used_idx = packed_last_used(last_used_idx) + bufs; + if (used_idx >= vq->packed.vring.num) { + used_idx -= vq->packed.vring.num; + wrap_counter ^= 1; + } + + vq->packed.vring.driver->off_wrap = cpu_to_le16( + used_idx | + (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR)); + + /* + * We need to update event offset and event wrap + * counter first before updating event flags. + */ + vqm_wmb(vq->weak_barriers); + } + + if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { + vq->packed.event_flags_shadow = + vq->event ? VRING_PACKED_EVENT_FLAG_DESC : + VRING_PACKED_EVENT_FLAG_ENABLE; + vq->packed.vring.driver->flags = + cpu_to_le16(vq->packed.event_flags_shadow); + } + + /* + * We need to update event suppression structure first + * before re-checking for more used buffers. + */ + vqm_mb(vq->weak_barriers); + + last_used_idx = READ_ONCE(vq->last_used_idx); + wrap_counter = packed_used_wrap_counter(last_used_idx); + used_idx = packed_last_used(last_used_idx); + if (is_used_desc_packed(vq, used_idx, wrap_counter)) { + END_USE(vq); + return false; + } + + END_USE(vq); + return true; +} +uint16_t __vqm16_to_cpu(bool little_endian, __vqm16 val) +{ + if (little_endian) { + return le16_to_cpu((__force __le16)val); + } else { + return be16_to_cpu((__force __be16)val); + } +} + +static inline bool zxdh_legacy_is_little_endian(void) +{ +#ifdef __LITTLE_ENDIAN + return true; +#else + return false; +#endif +} + +bool zxdh_is_little_endian(struct zxdh_en_device *en_dev) +{ + return zxdh_has_feature(en_dev, ZXDH_F_VERSION_1) || + zxdh_legacy_is_little_endian(); +} + +/* Memory accessors */ +uint16_t vqm16_to_cpu(struct zxdh_en_device *en_dev, __vqm16 val) +{ + return __vqm16_to_cpu(zxdh_is_little_endian(en_dev), val); +} + +uint32_t mergeable_ctx_to_headroom(void *mrg_ctx) +{ + return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT; +} + +uint32_t mergeable_ctx_to_truesize(void *mrg_ctx) +{ + return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); +} + +/** + * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. + * @_vq: the struct virtqueue we're talking about. + * + * This re-enables callbacks but hints to the other side to delay + * interrupts until most of the available buffers have been processed; + * it returns "false" if there are many pending buffers in the queue, + * to detect a possible race between the driver checking for more work, + * and enabling callbacks. + * + * Caller must ensure we don't call this with other virtqueue + * operations at the same time (except where noted). + */ +bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + if (vq->event_triggered) { + vq->event_triggered = false; + } + + return virtqueue_enable_cb_delayed_packed(_vq); +} + +void virtnet_poll_cleantx(struct receive_queue *rq) +{ + struct zxdh_en_device *en_dev = rq->vq->en_dev; + uint32_t index = vq2rxq(rq->vq); + struct send_queue *sq = &en_dev->sq[index]; + struct netdev_queue *txq = netdev_get_tx_queue(en_dev->netdev, index); + + if (!sq->napi.weight || is_xdp_raw_buffer_queue(en_dev, index)) { + return; + } + + if (__netif_tx_trylock(txq)) { + do { + virtqueue_disable_cb(sq->vq); + free_old_xmit_skbs(en_dev->netdev, sq, true); + } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq))); + + if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) { + netif_tx_wake_queue(txq); + } + + __netif_tx_unlock(txq); + } +} + +inline struct zxdh_net_hdr_rx *skb_vnet_hdr(struct sk_buff *skb) +{ + return (struct zxdh_net_hdr_rx *)skb->cb; +} + +/* Called from bottom half context */ +struct sk_buff *page_to_skb(struct zxdh_en_device *en_dev, + struct receive_queue *rq, struct page *page, + uint32_t offset, uint32_t len, uint32_t truesize, + uint32_t metasize, uint32_t headroom) +{ + struct sk_buff *skb = NULL; + uint32_t copy = 0; + uint32_t hdr_len = 0; + struct page *page_to_free = NULL; + int32_t tailroom = 0; + int32_t shinfo_size = 0; + char *p = NULL; + char *hdr_p = NULL; + char *buf = NULL; + + p = page_address(page) + offset; + hdr_p = p; + + hdr_len = (((struct zxdh_net_hdr_rx *)hdr_p)->pd_len) * HDR_2B_UNIT; + + /* If headroom is not 0, there is an offset between the beginning of the + * data and the allocated space, otherwise the data and the allocated + * space are aligned. + * + * Buffers with headroom use PAGE_SIZE as alloc size, see + * add_recvbuf_mergeable() + get_mergeable_buf_len() + */ + truesize = headroom ? PAGE_SIZE : truesize; + tailroom = truesize - headroom; + buf = p - headroom; + + len -= hdr_len; + offset += hdr_len; + p += hdr_len; + tailroom -= hdr_len + len; + + shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + /* copy small packet so we can reuse these pages */ + if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) { + skb = build_skb(buf, truesize); + if (unlikely(!skb)) { + LOG_ERR("build_skb is null\n"); + return NULL; + } + + skb_reserve(skb, p - buf); + skb_put(skb, len); + + page = (struct page *)page->private; + if (page) { + give_pages(rq, page); + } + goto ok; + } + + /* copy small packet so we can reuse these pages for small data */ + skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); + if (unlikely(!skb)) { + LOG_ERR("napi_alloc_skb is null\n"); + return NULL; + } + + /* Copy all frame if it fits skb->head */ + if (len <= skb_tailroom(skb)) { + copy = len; + } else { + copy = ETH_HLEN + metasize; + } + skb_put_data(skb, p, copy); + + len -= copy; + offset += copy; + + if (len) { + skb_add_rx_frag(skb, 0, page, offset, len, truesize); + } else { + page_to_free = page; + } + +ok: + if (page_to_free) { + put_page(page_to_free); + } + + if (metasize) { + __skb_pull(skb, metasize); + skb_metadata_set(skb, metasize); + } + + return skb; +} + +/** + * virtqueue_add_outbuf - expose output buffers to other end + * @vq: the struct virtqueue we're talking about. + * @sg: scatterlist (must be well-formed and terminated!) + * @num: the number of entries in @sg readable by other side + * @data: the token identifying the buffer. + * @gfp: how to do memory allocations (if necessary). + * + * Caller must ensure we don't call this with other virtqueue operations + * at the same time (except where noted). + * + * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). + */ +int32_t virtqueue_add_outbuf(struct virtqueue *vq, struct scatterlist *sg, + uint32_t num, void *data, gfp_t gfp) +{ + return virtqueue_add_packed(vq, &sg, num, 1, 0, data, NULL, gfp); +} + +static int __zxdh_en_xdp_xmit_one(struct zxdh_en_device *en_dev, + struct send_queue *sq, struct xdp_frame *xdpf) +{ + struct zxdh_net_hdr_tx *hdr; + int err; + + if (unlikely(xdpf->headroom < en_dev->hdr_len)) + return -EOVERFLOW; + + /* Make room for virtqueue hdr (also change xdpf->headroom?) */ + xdpf->data -= en_dev->hdr_len; + /* Zero header and leave csum up to XDP layers */ + hdr = xdpf->data; + memset(hdr, 0, en_dev->hdr_len); + xdpf->len += en_dev->hdr_len; + + hdr->pd_len = en_dev->hdr_len / HDR_2B_UNIT; + + sg_init_one(sq->sg, xdpf->data, xdpf->len); + + err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf), + GFP_ATOMIC); + if (unlikely(err)) + return -ENOSPC; /* Caller handle free/refcnt */ + + return 0; +} + +/* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on + * the current cpu, so it does not need to be locked. + * + * Here we use marco instead of inline functions because we have to deal with + * three issues at the same time: 1. the choice of sq. 2. judge and execute the + * lock/unlock of txq 3. make sparse happy. It is difficult for two inline + * functions to perfectly solve these three problems at the same time. + */ +#define zxdh_en_xdp_get_sq(en_dev) \ + ({ \ + int cpu = smp_processor_id(); \ + struct netdev_queue *txq; \ + typeof(en_dev) v = (en_dev); \ + unsigned int qp; \ + \ + if (v->curr_queue_pairs > nr_cpu_ids) { \ + qp = v->curr_queue_pairs - v->xdp_queue_pairs; \ + qp += cpu; \ + txq = netdev_get_tx_queue(v->netdev, qp); \ + __netif_tx_acquire(txq); \ + } else { \ + qp = cpu % v->curr_queue_pairs; \ + txq = netdev_get_tx_queue(v->netdev, qp); \ + __netif_tx_lock(txq, cpu); \ + } \ + v->sq + qp; \ + }) + +#define zxdh_en_xdp_put_sq(en_dev, q) \ + { \ + struct netdev_queue *txq; \ + typeof(en_dev) v = (en_dev); \ + \ + txq = netdev_get_tx_queue(v->netdev, (q)-v->sq); \ + if (v->curr_queue_pairs > nr_cpu_ids) \ + __netif_tx_release(txq); \ + else \ + __netif_tx_unlock(txq); \ + } + +int zxdh_en_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, + u32 flags) +{ + struct zxdh_en_priv *en_priv = netdev_priv(dev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct receive_queue *rq = en_dev->rq; + struct bpf_prog *xdp_prog; + struct send_queue *sq; + unsigned int len; + int packets = 0; + int bytes = 0; + int nxmit = 0; + int kicks = 0; + void *ptr; + int ret; + int i; + + /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this + * indicate XDP resources have been successfully allocated. + */ + xdp_prog = rcu_access_pointer(rq->xdp_prog); + if (!xdp_prog) + return -ENXIO; + + sq = zxdh_en_xdp_get_sq(en_dev); + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { + ret = -EINVAL; + goto out; + } + + /* Free up any pending old buffers before queueing new ones. */ + while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { + if (likely(is_xdp_frame(ptr))) { + struct xdp_frame *frame = ptr_to_xdp(ptr); + + bytes += frame->len; + xdp_return_frame(frame); + } else { + struct sk_buff *skb = ptr; + + bytes += skb->len; + napi_consume_skb(skb, false); + } + packets++; + } + + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + + if (__zxdh_en_xdp_xmit_one(en_dev, sq, xdpf)) + break; + nxmit++; + } + ret = nxmit; + + if (flags & XDP_XMIT_FLUSH) { + if (virtqueue_kick_prepare_packed(sq->vq) && + virtqueue_notify(sq->vq)) + kicks = 1; + } +out: + u64_stats_update_begin(&sq->stats.syncp); + sq->stats.bytes += bytes; + sq->stats.packets += packets; + sq->stats.xdp_tx += n; + sq->stats.xdp_tx_drops += n - nxmit; + sq->stats.kicks += kicks; + u64_stats_update_end(&sq->stats.syncp); + + zxdh_en_xdp_put_sq(en_dev, sq); + return ret; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(5, 7, 0)) +static __always_inline void +zxdh_xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq) +{ + xdp->frame_sz = frame_sz; + xdp->rxq = rxq; +} + +static __always_inline void zxdh_xdp_prepare_buff(struct xdp_buff *xdp, + unsigned char *hard_start, + int headroom, int data_len, + const bool meta_valid) +{ + unsigned char *data = hard_start + headroom; + + xdp->data_hard_start = hard_start; + xdp->data = data; + xdp->data_end = data + data_len; + xdp->data_meta = meta_valid ? data : data + 1; +} +#endif + +struct sk_buff *receive_mergeable(struct net_device *netdev, + struct zxdh_en_device *en_dev, + struct receive_queue *rq, void *buf, + void *ctx, uint32_t len, uint32_t *xdp_xmit, + struct virtnet_rq_stats *stats) +{ + struct zxdh_net_hdr_rx *hdr = buf; + uint16_t num_buf = vqm16_to_cpu(en_dev, hdr->num_buffers); + uint16_t hdr_len = hdr->pd_len * HDR_2B_UNIT; + struct page *page = virt_to_head_page(buf); + int32_t offset = buf - page_address(page); + struct sk_buff *head_skb = NULL; + struct sk_buff *curr_skb = NULL; + struct bpf_prog *xdp_prog; + uint32_t truesize = mergeable_ctx_to_truesize(ctx); + uint32_t headroom = mergeable_ctx_to_headroom(ctx); + uint32_t metasize = 0; +#if (LINUX_VERSION_CODE > KERNEL_VERSION(5, 7, 0)) + uint32_t frame_sz = 0; +#endif + int32_t err; + + stats->bytes += (len - hdr_len); + netdev->stats.rx_bytes += (len - hdr_len); + + if (unlikely(len > truesize)) { + LOG_ERR("%s: rx error: len %u exceeds truesize %lu\n", + netdev->name, len, (unsigned long)ctx); + netdev->stats.rx_length_errors++; + netdev->stats.rx_errors++; + goto err_skb; + } + + if (likely(!en_dev->xdp_enabled)) { + xdp_prog = NULL; + goto skip_xdp; + } + + rcu_read_lock(); + xdp_prog = rcu_dereference(rq->xdp_prog); + if (xdp_prog) { + struct xdp_frame *xdpf; + struct page *xdp_page; + struct xdp_buff xdp; + void *data; + uint32_t act; + + /* Transient failure which in theory could occur if + * in-flight packets from before XDP was enabled reach + * the receive path after XDP is loaded. + */ + // if (unlikely(hdr->hdr.gso_type)) //ZY TODO + // goto err_xdp; + + /* Buffers with headroom use PAGE_SIZE as alloc size, + * see add_recvbuf_mergeable() + get_mergeable_buf_len() + */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(5, 7, 0)) + frame_sz = headroom ? PAGE_SIZE : truesize; +#endif + + /* This happens when rx buffer size is underestimated + * or headroom is not enough because of the buffer + * was refilled before XDP is set. This should only + * happen for the first several packets, so we don't + * care much about its performance. + */ + if (unlikely(num_buf > 1 || + headroom < zxdh_en_get_headroom(en_dev))) { + /* linearize data for XDP */ + xdp_page = xdp_linearize_page(rq, &num_buf, page, + offset, ZXDH_XDP_HEADROOM, + &len); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(5, 7, 0)) + frame_sz = PAGE_SIZE; +#endif + + if (!xdp_page) + goto err_xdp; + offset = ZXDH_XDP_HEADROOM; + } else { + xdp_page = page; + } + + /* Allow consuming headroom but reserve enough space to push + * the descriptor on if we get an XDP_TX return code. + */ + data = page_address(xdp_page) + offset; + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(5, 7, 0)) + zxdh_xdp_init_buff(&xdp, frame_sz - hdr_len, &rq->xdp_rxq); + zxdh_xdp_prepare_buff(&xdp, data - ZXDH_XDP_HEADROOM + hdr_len, + ZXDH_XDP_HEADROOM, len - hdr_len, true); +#else + xdp.data_hard_start = data - ZXDH_XDP_HEADROOM + hdr_len; + xdp.data = data + hdr_len; + xdp.data_end = xdp.data + (len - hdr_len); + xdp.data_meta = xdp.data; + xdp.rxq = &rq->xdp_rxq; +#endif + + act = bpf_prog_run_xdp(xdp_prog, &xdp); + stats->xdp_packets++; + + switch (act) { + case XDP_PASS: + metasize = xdp.data - xdp.data_meta; + + /* recalculate offset to account for any header + * adjustments and minus the metasize to copy the + * metadata in page_to_skb(). Note other cases do not + * build an skb and avoid using offset + */ + offset = xdp.data - page_address(xdp_page) - hdr_len - + metasize; + + /* recalculate len if xdp.data, xdp.data_end or + * xdp.data_meta were adjusted + */ + len = xdp.data_end - xdp.data + hdr_len + metasize; + /* We can only create skb based on xdp_page. */ + if (unlikely(xdp_page != page)) { + rcu_read_unlock(); + put_page(page); + head_skb = page_to_skb(en_dev, rq, xdp_page, + offset, len, PAGE_SIZE, + metasize, + ZXDH_XDP_HEADROOM); + return head_skb; + } + break; + case XDP_TX: + stats->xdp_tx++; + xdpf = xdp_convert_buff_to_frame(&xdp); + if (unlikely(!xdpf)) + goto err_xdp; + err = zxdh_en_xdp_xmit(netdev, 1, &xdpf, 0); + if (unlikely(!err)) { + xdp_return_frame_rx_napi(xdpf); + } else if (unlikely(err < 0)) { + trace_xdp_exception(en_dev->netdev, xdp_prog, + act); + if (unlikely(xdp_page != page)) + put_page(xdp_page); + goto err_xdp; + } + *xdp_xmit |= ZXDH_XDP_TX; + if (unlikely(xdp_page != page)) + put_page(page); + rcu_read_unlock(); + goto xdp_xmit; + case XDP_REDIRECT: + stats->xdp_redirects++; + err = xdp_do_redirect(netdev, &xdp, xdp_prog); + if (err) { + if (unlikely(xdp_page != page)) + put_page(xdp_page); + goto err_xdp; + } + *xdp_xmit |= ZXDH_XDP_REDIR; + if (unlikely(xdp_page != page)) + put_page(page); + rcu_read_unlock(); + goto xdp_xmit; + default: + bpf_warn_invalid_xdp_action(en_dev->netdev, xdp_prog, + act); + if (unlikely(xdp_page != page)) + __free_pages(xdp_page, 0); + goto err_xdp; + case XDP_ABORTED: + case XDP_DROP: + if (unlikely(xdp_page != page)) + __free_pages(xdp_page, 0); + goto err_xdp; + } + } + rcu_read_unlock(); + +skip_xdp: + head_skb = page_to_skb(en_dev, rq, page, offset, len, truesize, + metasize, headroom); + curr_skb = head_skb; + + if (unlikely(!curr_skb)) { + LOG_ERR("page_to_skb is null\n"); + goto err_skb; + } + while (--num_buf) { + int32_t num_skb_frags; + + buf = virtqueue_get_buf_ctx_packed(rq->vq, &len, &ctx); + if (unlikely(!buf)) { + LOG_ERR("%s: rx error: %d buffers out of %d missing\n", + netdev->name, num_buf, + vqm16_to_cpu(en_dev, hdr->num_buffers)); + netdev->stats.rx_length_errors++; + netdev->stats.rx_errors++; + goto err_buf; + } + + stats->bytes += len; + page = virt_to_head_page(buf); + + truesize = mergeable_ctx_to_truesize(ctx); + if (unlikely(len > truesize)) { + LOG_ERR("%s: rx error: len %u exceeds truesize %lu\n", + netdev->name, len, (unsigned long)ctx); + netdev->stats.rx_length_errors++; + netdev->stats.rx_errors++; + goto err_skb; + } + + num_skb_frags = skb_shinfo(curr_skb)->nr_frags; + if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { + struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); + + if (unlikely(!nskb)) { + LOG_ERR("alloc_skb is null\n"); + goto err_skb; + } + if (curr_skb == head_skb) { + skb_shinfo(curr_skb)->frag_list = nskb; + } else { + curr_skb->next = nskb; + } + curr_skb = nskb; + head_skb->truesize += nskb->truesize; + num_skb_frags = 0; + } + + if (curr_skb != head_skb) { + head_skb->data_len += len; + head_skb->len += len; + head_skb->truesize += truesize; + } + offset = buf - page_address(page); + + if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { + put_page(page); + skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, len, + truesize); + } else { + skb_add_rx_frag(curr_skb, num_skb_frags, page, offset, + len, truesize); + } + } + + ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); + return head_skb; + +err_xdp: + rcu_read_unlock(); + stats->xdp_drops++; +err_skb: + put_page(page); + while (num_buf-- > 1) { + buf = virtqueue_get_buf(rq->vq, &len); + if (unlikely(!buf)) { + LOG_ERR("%s: rx error: %d buffers missing\n", + netdev->name, num_buf); + netdev->stats.rx_length_errors++; + netdev->stats.rx_errors++; + break; + } + stats->bytes += len; + page = virt_to_head_page(buf); + put_page(page); + } +err_buf: + stats->drops++; + dev_kfree_skb(head_skb); +xdp_xmit: + return NULL; +} + +void pipd_receive_handle(struct net_device *netdev, struct sk_buff *skb, + struct zxdh_net_hdr_rx *hdr_rcv, + struct virtnet_rq_stats *stats) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + uint16_t cvid = 0; + uint16_t svid = 0; + uint16_t vid = 0; + bool vlan_striped = false; + + /* rx packet contain the strip label & open rxvlan offloading*/ + vlan_striped = hdr_rcv->pipd_hdr.pd_hdr.flags & RX_VLAN_STRIPED_MASK; + if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && vlan_striped) { + cvid = htons(hdr_rcv->pipd_hdr.pd_hdr.striped_ctci) & + RX_TPID_VLAN_ID_MASK; + svid = htons(hdr_rcv->pipd_hdr.pd_hdr.striped_stci) & + RX_TPID_VLAN_ID_MASK; + vid = (hdr_rcv->pipd_hdr.pd_hdr.flags & RX_IS_QINQ_PKT_MASK) ? + svid : + cvid; + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + stats->rx_removed_vlan_packets++; + } + + if (netdev->features & NETIF_F_RXCSUM) { + if (!(hdr_rcv->pipd_hdr.pi_hdr.error_code[1]) && + !(hdr_rcv->pipd_hdr.pi_hdr.error_code[0]) && + !(hdr_rcv->pipd_hdr.pd_hdr.flags & + OUTER_IP_CHKSUM_ERROR_CODE)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + stats->rx_csum_offload_good++; + } else { + skb->ip_summed = CHECKSUM_NONE; + en_dev->hw_stats.netdev_stats.rx_csum_offload_error++; + } + } +} + +void pd_receive_handle(struct net_device *netdev, struct sk_buff *skb, + struct zxdh_net_hdr_rx *hdr_rcv, + struct virtnet_rq_stats *stats) +{ + uint16_t cvid = 0; + uint16_t svid = 0; + uint16_t vid = 0; + bool vlan_striped = false; + + /* rx packet contain the strip label & open rxvlan offloading*/ + vlan_striped = hdr_rcv->pd_hdr.flags & RX_VLAN_STRIPED_MASK; + if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && vlan_striped) { + cvid = htons(hdr_rcv->pd_hdr.striped_ctci) & + RX_TPID_VLAN_ID_MASK; + svid = htons(hdr_rcv->pd_hdr.striped_stci) & + RX_TPID_VLAN_ID_MASK; + vid = (hdr_rcv->pd_hdr.flags & RX_IS_QINQ_PKT_MASK) ? svid : + cvid; + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + stats->rx_removed_vlan_packets++; + } +} + +void receive_buf(struct zxdh_en_device *en_dev, struct receive_queue *rq, + void *buf, uint32_t len, void **ctx, uint32_t *xdp_xmit, + struct virtnet_rq_stats *stats) +{ + struct net_device *netdev = en_dev->netdev; + struct sk_buff *skb = NULL; + struct zxdh_net_hdr_rx *hdr_rcv = (struct zxdh_net_hdr_rx *)buf; + int32_t ret = 0; + struct zxdh_net_1588_hdr_rcv *hdr_rcv_1588 = NULL; + struct zxdh_net_1588_nopi_hdr_rcv *hdr_rcv_nopi_1588 = NULL; + uint8_t pd_len = 0; + uint8_t pkt_flag = 0; + uint8_t packet_to_file = 0; + // struct iphdr *iph = NULL; + // struct udphdr *udph = NULL; + // __sum16 skb_sum = 0; + DEBUG_1588("receive buf:"); + DEBUG_1588_DATA((uint8_t *)buf, len); + + pd_len = hdr_rcv->pd_len * HDR_2B_UNIT; + DEBUG_1588("pd_len:%hhu", pd_len); + + if ((hdr_rcv->pd_hdr.flags & 0xff) == ZXDH_PKT_FLAG) { + pkt_flag = 1; + } + + if (unlikely(len < (hdr_rcv->pd_len * HDR_2B_UNIT) + ETH_HLEN)) { + LOG_ERR("%s: short packet %i\n", netdev->name, len); + netdev->stats.rx_length_errors++; + netdev->stats.rx_errors++; + pkt_packet_process(en_dev, buf, len, pkt_flag); + goto ret_out; + } + + /* 因为后面还要用buf。先get一下将引用计数加一,防止buf所在page被释放。 */ + get_page(virt_to_head_page(buf)); + + skb = receive_mergeable(netdev, en_dev, rq, buf, ctx, len, xdp_xmit, + stats); + + if (unlikely(!skb)) { + LOG_ERR("skb receive_mergeable null\n"); + pkt_packet_process(en_dev, buf, len, pkt_flag); + goto ret_out; + } + + if (hdr_rcv->pd_len > ZXDH_HAS_PI_FLAG) { + pipd_receive_handle(netdev, skb, hdr_rcv, stats); + } else if (hdr_rcv->pd_len != + ZXDH_TYPE_FLAG_LEN) { /* True:非LACP报文, False:LACP报文*/ + pd_receive_handle(netdev, skb, hdr_rcv, stats); + } + + if (en_dev->enable_1588 == true) { + if (pd_len == sizeof(struct zxdh_net_1588_hdr_rcv) || + pd_len == sizeof(struct zxdh_net_1588_nopi_hdr_rcv)) { + if (en_dev->dtp_drs_offload == true) { + hdr_rcv_1588 = + (struct zxdh_net_1588_hdr_rcv *)hdr_rcv; + ret = pkt_1588_proc_rcv( + skb, &(hdr_rcv_1588->pd_1588), + en_dev->clock_no, en_dev); + + DEBUG_1588("vport 0x%x rx 1588 hdr :", + en_dev->vport); + DEBUG_1588_DATA( + (uint8_t *)hdr_rcv_1588, + sizeof(struct zxdh_net_1588_hdr_rcv)); + } else { + hdr_rcv_nopi_1588 = + (struct zxdh_net_1588_nopi_hdr_rcv *) + hdr_rcv; + ret = pkt_1588_proc_rcv( + skb, &(hdr_rcv_nopi_1588->pd_1588), + en_dev->clock_no, en_dev); + + DEBUG_1588("vport 0x%x rx 1588 hdr nopi :", + en_dev->vport); + DEBUG_1588_DATA( + (uint8_t *)hdr_rcv_nopi_1588, + sizeof(struct zxdh_net_1588_nopi_hdr_rcv)); + } + + if ((ret != PTP_SUCCESS) && (ret != IS_NOT_PTP_MSG)) { + DEBUG_1588( + "dev %s vport 0x%x pkt_1588_proc_rcv !!!\n", + en_dev->netdev->name, en_dev->vport); + } + + if (skb->ip_summed == CHECKSUM_NONE) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + // // 计算 IP 校验和 + // iph = ip_hdr(skb); + // iph->check = 0; + // iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); + + // // 计算 UDP 伪首部校验和 + // udph = udp_hdr(skb); + // udph->check = 0; + // udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - ip_hdrlen(skb), IPPROTO_UDP, 0); + + // //计算skb校验和 + // skb_sum = skb_checksum(skb, 0, skb->len, 0); + // skb->csum = skb_sum; + } + + DEBUG_1588("rx skb->data:"); + DEBUG_1588_DATA((uint8_t *)skb->data, skb->len); + } + } + + skb_record_rx_queue(skb, vq2rxq(rq->vq)); + packet_to_file = pkt_skb_packet_process(en_dev, skb, pkt_flag); + skb->protocol = eth_type_trans(skb, netdev); + + //LOG_INFO("receiving skb proto 0x%04x len %i type %i\n", ntohs(skb->protocol), skb->len, skb->pkt_type); + if (packet_to_file == 0) { + napi_gro_receive(&rq->napi, skb); + } + +ret_out: + /* buf使用完,引用计数减1。 */ + put_page(virt_to_head_page(buf)); + return; +} + +/** + * virtqueue_notify - second half of split virtqueue_kick call. + * @_vq: the struct virtqueue + * + * This does not need to be serialized. + * + * Returns false if host notify failed or queue is broken, otherwise true. + */ +bool virtqueue_notify(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + if (unlikely(vq->broken)) { + LOG_ERR("vq->broken\n"); + return false; + } + + /* Prod other side to tell it about changes. */ + if (!vq->notify(_vq)) { + LOG_ERR("vq->notify(_vq) failed\n"); + vq->broken = true; + return false; + } + + return true; +} + +bool dh_skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, + gfp_t gfp) +{ + if (pfrag->page) { + if (page_ref_count(pfrag->page) == 1) { + pfrag->offset = 0; + return true; + } + if (pfrag->offset + sz <= pfrag->size) + return true; + put_page(pfrag->page); + } + pfrag->offset = 0; + if (1) { + /* Avoid direct reclaim but allow kswapd to wake */ + pfrag->page = + alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) | __GFP_COMP | + __GFP_NOWARN | __GFP_NORETRY, + DH_SKB_FRAG_PAGE_ORDER); + if (likely(pfrag->page)) { + pfrag->size = PAGE_SIZE << DH_SKB_FRAG_PAGE_ORDER; + return true; + } + } + pfrag->page = alloc_page(gfp); + if (likely(pfrag->page)) { + pfrag->size = PAGE_SIZE; + return true; + } + return false; +} + +int32_t add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) +{ + struct page_frag *alloc_frag = &rq->alloc_frag; + uint32_t headroom = 0; + uint32_t tailroom = 0; + uint32_t room = SKB_DATA_ALIGN(headroom + tailroom); + char *buf = NULL; + void *ctx = NULL; + int32_t err = 0; + uint32_t len = 0; + uint32_t hole = 0; + + /* Extra tailroom is needed to satisfy XDP's assumption. This + * means rx frags coalescing won't work, but consider we've + * disabled GSO for XDP, it won't be a big issue. + */ + len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room); + if (unlikely(!dh_skb_page_frag_refill(len + room, alloc_frag, gfp))) { + LOG_ERR("dh_skb_page_frag_refill failed\n"); + return -ENOMEM; + } + + buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; + buf += headroom; /* advance address leaving hole at front of pkt */ + get_page(alloc_frag->page); + alloc_frag->offset += len + room; + hole = alloc_frag->size - alloc_frag->offset; + if (hole < len + room) { + /* To avoid internal fragmentation, if there is very likely not + * enough space for another buffer, add the remaining space to + * the current buffer. + */ + len += hole; + alloc_frag->offset += hole; + } + + sg_init_one(rq->sg, buf, len); + ctx = mergeable_len_to_ctx(len, headroom); + err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); + if (err < 0) { + put_page(virt_to_head_page(buf)); + } + + return err; +} + +/* Assuming a given event_idx value from the other side, if + * we have just incremented index from old to new_idx, + * should we trigger an event? */ +int32_t vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old) +{ + /* Note: Xen has similar logic for notification hold-off + * in include/xen/interface/io/ring.h with req_event and req_prod + * corresponding to event_idx + 1 and new_idx respectively. + * Note also that req_event and req_prod in Xen start at 1, + * event indexes in custom queue start at 0. */ + return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old); +} + +bool virtqueue_kick_prepare_packed(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + uint16_t new = 0; + uint16_t old = 0; + uint16_t off_wrap = 0; + uint16_t flags = 0; + uint16_t wrap_counter = 0; + uint16_t event_idx = 0; + bool needs_kick = false; + union { + struct { + __le16 off_wrap; + __le16 flags; + }; + uint32_t u32; + } snapshot; + + START_USE(vq); + + /* + * We need to expose the new flags value before checking notification + * suppressions. + */ + vqm_mb(vq->weak_barriers); + + old = vq->packed.next_avail_idx - vq->num_added; + new = vq->packed.next_avail_idx; + vq->num_added = 0; + + snapshot.u32 = *(uint32_t *)vq->packed.vring.device; + flags = le16_to_cpu(snapshot.flags); + + LAST_ADD_TIME_CHECK(vq); + LAST_ADD_TIME_INVALID(vq); + + if (flags != VRING_PACKED_EVENT_FLAG_DESC) { + needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE); + goto out; + } + + off_wrap = le16_to_cpu(snapshot.off_wrap); + + wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR; + event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR); + if (wrap_counter != vq->packed.avail_wrap_counter) { + event_idx -= vq->packed.vring.num; + } + + needs_kick = vring_need_event(event_idx, new, old); +out: + END_USE(vq); + return needs_kick; +} + +/* + * Returns false if we couldn't fill entirely (OOM). + * + * Normally run in the receive path, but can also be run from ndo_open + * before we're receiving packets, or from refill_work which is + * careful to disable receiving (using napi_disable). + */ +bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) +{ + int32_t err = 0; + bool oom = 0; + unsigned long flags = 0; + + do { + err = add_recvbuf_mergeable(rq, gfp); + oom = err == -ENOMEM; + if (err) { + break; + } + } while (rq->vq->num_free); + + if (virtqueue_kick_prepare_packed(rq->vq) && virtqueue_notify(rq->vq)) { + flags = u64_stats_update_begin_irqsave(&rq->stats.syncp); + rq->stats.kicks++; + u64_stats_update_end_irqrestore(&rq->stats.syncp, flags); + } + + return !oom; +} + +int32_t virtnet_receive(struct receive_queue *rq, int32_t budget, + uint32_t *xdp_xmit) +{ + struct zxdh_en_device *en_dev = rq->vq->en_dev; + struct virtnet_rq_stats stats = {}; + uint32_t len = 0; + void *buf = NULL; + int32_t i = 0; + void *ctx = NULL; + + while (stats.packets < budget && + (buf = virtqueue_get_buf_ctx_packed(rq->vq, &len, &ctx))) { + receive_buf(en_dev, rq, buf, len, ctx, xdp_xmit, &stats); + stats.packets++; + } + + if (rq->vq->num_free > + min((uint32_t)budget, virtqueue_get_vring_size(rq->vq)) / 2) { + if (!try_fill_recv(rq, GFP_ATOMIC)) { + schedule_delayed_work(&en_dev->refill, 0); + } + } + + u64_stats_update_begin(&rq->stats.syncp); + for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) { + size_t offset = virtnet_rq_stats_desc[i].offset; + uint64_t *item; + + item = (uint64_t *)((uint8_t *)&rq->stats + offset); + *item += *(uint64_t *)((uint8_t *)&stats + offset); + } + u64_stats_update_end(&rq->stats.syncp); + + return stats.packets; +} + +void virtqueue_napi_complete(struct napi_struct *napi, struct virtqueue *vq, + int32_t processed) +{ + int32_t opaque = 0; + + opaque = virtqueue_enable_cb_prepare(vq); + if (napi_complete_done(napi, processed)) { + if (unlikely(virtqueue_poll(vq, opaque))) { + virtqueue_napi_schedule(napi, vq); + } + } else { + virtqueue_disable_cb(vq); + } +} + +int virtnet_poll(struct napi_struct *napi, int budget) +{ + struct receive_queue *rq = + container_of(napi, struct receive_queue, napi); + struct zxdh_en_device *en_dev = rq->vq->en_dev; + struct send_queue *sq; + uint32_t received = 0; + uint32_t xdp_xmit = 0; + + virtnet_poll_cleantx(rq); + + received = virtnet_receive(rq, budget, &xdp_xmit); + + /* Out of packets? */ + if (received < budget) { + virtqueue_napi_complete(napi, rq->vq, received); + } + + if (xdp_xmit & ZXDH_XDP_REDIR) { +#if (LINUX_VERSION_CODE > KERNEL_VERSION(5, 5, 0)) + xdp_do_flush(); +#else + xdp_do_flush_map(); +#endif + } + + if (xdp_xmit & ZXDH_XDP_TX) { + sq = zxdh_en_xdp_get_sq(en_dev); + if (virtqueue_kick_prepare_packed(sq->vq) && + virtqueue_notify(sq->vq)) { + u64_stats_update_begin(&sq->stats.syncp); + sq->stats.kicks++; + u64_stats_update_end(&sq->stats.syncp); + } + zxdh_en_xdp_put_sq(en_dev, sq); + } + + return received; +} + +int32_t virtnet_alloc_queues(struct zxdh_en_device *en_dev) +{ + int32_t i = 0; + + en_dev->sq = kcalloc(en_dev->max_queue_pairs, sizeof(*en_dev->sq), + GFP_KERNEL); + if (unlikely(en_dev->sq == NULL)) { + LOG_ERR("en_dev->sq kcalloc failed\n"); + goto err_sq; + } + + en_dev->rq = kcalloc(en_dev->max_queue_pairs, sizeof(*en_dev->rq), + GFP_KERNEL); + if (unlikely(en_dev->rq == NULL)) { + LOG_ERR("en_dev->rq kcalloc failed\n"); + goto err_rq; + } + + INIT_DELAYED_WORK(&en_dev->refill, refill_work); + + for (i = 0; i < en_dev->curr_queue_pairs; i++) { + en_dev->rq[i].pages = NULL; + + netif_napi_add(en_dev->netdev, &en_dev->rq[i].napi, + virtnet_poll); + netif_napi_add_tx_weight(en_dev->netdev, &en_dev->sq[i].napi, + virtnet_poll_tx, NAPI_POLL_WEIGHT); + sg_init_table(en_dev->rq[i].sg, ARRAY_SIZE(en_dev->rq[i].sg)); + ewma_pkt_len_init(&en_dev->rq[i].mrg_avg_pkt_len); + sg_init_table(en_dev->sq[i].sg, ARRAY_SIZE(en_dev->sq[i].sg)); + + u64_stats_init(&en_dev->rq[i].stats.syncp); + u64_stats_init(&en_dev->sq[i].stats.syncp); + } + + return 0; + +err_rq: + kfree(en_dev->sq); + en_dev->sq = NULL; +err_sq: + return -ENOMEM; +} + +/** + * virtqueue_set_affinity - setting affinity for a virtqueue + * @vq: the virtqueue + * @cpu_mask: the cpu no. + * + * Pay attention the function are best-effort: the affinity hint may not be set + * due to config support, irq type and sharing. + * + */ +int32_t virtqueue_set_affinity(struct virtqueue *vq, + const struct cpumask *cpu_mask) +{ + if (!vq->callback) { + LOG_ERR("vq->callback is null\n"); + return -EINVAL; + } + + return 0; +} + +void refill_work(struct work_struct *work) +{ + int32_t i = 0; + bool still_empty = false; + struct zxdh_en_device *en_dev = + container_of(work, struct zxdh_en_device, refill.work); + + for (i = 0; i < en_dev->curr_queue_pairs; i++) { + struct receive_queue *rq = &en_dev->rq[i]; + + napi_disable(&rq->napi); + still_empty = !try_fill_recv(rq, GFP_KERNEL); + virtnet_napi_enable(rq->vq, &rq->napi); + + /* In theory, this can happen: if we don't get any buffers in + * we will *never* try to fill again. + */ + if (still_empty) { + schedule_delayed_work(&en_dev->refill, HZ / 2); + } + } +} + +int32_t dh_eq_vqs_vring_int(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct dh_eq_vq *eq_vq = container_of(nb, struct dh_eq_vq, irq_nb); + struct dh_eq_vqs *eq_vqs = container_of(eq_vq, struct dh_eq_vqs, vq_s); + struct list_head *item = NULL; + struct zxdh_pci_vq_info *info = NULL; + struct vring_virtqueue *vq = NULL; + struct zxdh_en_device *en_dev = NULL; + unsigned long flags; + + en_dev = (struct zxdh_en_device *)data; + spin_lock_irqsave(&en_dev->lock, flags); + + list_for_each(item, &eq_vqs->vqs) { + info = list_entry(item, struct zxdh_pci_vq_info, node); + + vq = to_vvq(info->vq); + if (!more_used_packed(vq)) { + continue; + } + + if (unlikely(vq->broken)) { + LOG_ERR("vq:%d is broken\n", info->vq->phy_index); + continue; + } + + /* Just a hint for performance: so it's ok that this can be racy! */ + if (vq->event) { + vq->event_triggered = true; + } + + if (vq->vq.callback) { + vq->vq.callback(&vq->vq); + } + } + + spin_unlock_irqrestore(&en_dev->lock, flags); + + return 0; +} + +int32_t vp_get_phy_vqs(struct net_device *netdev, uint16_t vq_cnt, + uint32_t *phy_index, const char *type) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + uint16_t fw_patch = en_dev->ops->get_fw_patch(en_dev->parent); + int32_t err = 0; + union zxdh_msg *old_msg = NULL; + uint32_t i = 0; +#ifdef ZXDH_MSGQ + bool need_msgq = false; +#endif + unsigned long flags = 0; + + if (vq_cnt > ZXDH_MAX_QUEUES_NUM) { + LOG_ERR("Too many vqs: vq_cnt=%d out of rang:%d", vq_cnt, + ZXDH_MAX_QUEUES_NUM); + return -1; + } + + if (fw_patch < DH_NEW_QUEEU_ALLOC_PATCH) { + old_msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (old_msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -1; + } + + /* if bond device, read queue already used */ + if (en_dev->ops->is_bond(en_dev->parent)) { + LOG_DEBUG("Start get_common_table_msg!!!"); + err = get_common_table_msg(en_dev, en_dev->pcie_id, + OP_CODE_DATA_CHAN, old_msg); + if (err != 0) { + LOG_ERR("Failed to get bond device queue information: %d\n", + err); + kfree(old_msg); + return -1; + } + + LOG_DEBUG("old_msg->reps.cmn_vq_msg.queue_nums=%u", + old_msg->reps.cmn_vq_msg.queue_nums); + for (i = 0; i < old_msg->reps.cmn_vq_msg.queue_nums; + i++) { + LOG_DEBUG( + "old_msg->reps.cmn_vq_msg.phy_qidx[%u]: %u", + i, + old_msg->reps.cmn_vq_msg.phy_qidx[i]); + } + } + } + + if (fw_patch >= DH_NEW_QUEEU_ALLOC_PATCH) { + local_irq_save(flags); // 关闭中断 + preempt_disable(); // 关闭抢占 + } + + /* get phy vq lock */ + err = en_dev->ops->get_vq_lock(en_dev->parent); + if (err < 0) { + goto err_get_lock; + } + + /* find valid vqs */ + err = en_dev->ops->find_valid_vqs(en_dev->parent, vq_cnt, phy_index); + if (err < 0) { + goto err_find_valid_vqs; + } + + /* write common list */ + if (fw_patch < DH_NEW_QUEEU_ALLOC_PATCH) { + err = zxdh_common_tbl_init(netdev, old_msg); + } else { +#ifdef ZXDH_MSGQ + if (NEED_MSGQ(en_dev)) + need_msgq = true; +#endif + err = en_dev->ops->write_queue_tlb(en_dev->parent, vq_cnt, + phy_index, need_msgq); + } + if (err != 0) { + goto err_find_valid_vqs; + } + + /* write vq list */ + en_dev->ops->write_vqs_bit(en_dev->parent, vq_cnt, phy_index); + +err_find_valid_vqs: + /* release phy vq lock */ + en_dev->ops->release_vq_lock(en_dev->parent); +err_get_lock: + if (fw_patch < DH_NEW_QUEEU_ALLOC_PATCH) { + kfree(old_msg); + } + + if (fw_patch >= DH_NEW_QUEEU_ALLOC_PATCH) { + preempt_enable(); + local_irq_restore(flags); + } + + printk(KERN_CONT "[zxdh_pf][%s][%d] %s phy_index: ", __func__, __LINE__, + type); + for (i = 0; i < vq_cnt; i++) { + printk(KERN_CONT "%u ", phy_index[i]); + } + printk(KERN_CONT "\n"); + + return err; +} + +int32_t vp_find_vqs_msix(struct net_device *netdev, unsigned nvqs, + struct virtqueue *vqs[], vq_callback_t *callbacks[], + const char *const names[], const bool *ctx) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t err = 0; + uint16_t qidx = 0; + + en_dev->vqs = kcalloc(nvqs, sizeof(*en_dev->vqs), GFP_KERNEL); + if (unlikely(en_dev->vqs == NULL)) { + LOG_ERR("en_dev->vqs kcalloc failed\n"); + return -ENOMEM; + } + + err = vp_get_phy_vqs(netdev, nvqs, en_dev->phy_index, "std"); + if (err < 0) { + LOG_ERR("get vq phy lock failed!"); + goto err_find_vq; + } + + for (qidx = 0; qidx < nvqs; ++qidx) { + vqs[qidx] = vp_setup_vq(netdev, qidx, callbacks[qidx], + names[qidx], ctx ? ctx[qidx] : false, + qidx); + if (IS_ERR_OR_NULL(vqs[qidx])) { + err = PTR_ERR(vqs[qidx]); + LOG_ERR("vp_setup_vq failed: %d\n", err); + goto err_setup_vq; + } + + en_dev->ops->set_queue_enable(en_dev->parent, + en_dev->phy_index[qidx], true); + } + return 0; + +err_setup_vq: + zxdh_vp_reset(netdev); +err_find_vq: + vp_del_vqs(netdev); + return err; +} + +void zxdh_en_recv_pkts(struct virtqueue *rvq) +{ + struct zxdh_en_device *en_dev = rvq->en_dev; + struct receive_queue *rq = &en_dev->rq[vq2rxq(rvq)]; + + virtqueue_napi_schedule(&rq->napi, rvq); +} + +void zxdh_en_xmit_pkts(struct virtqueue *tvq) +{ + struct zxdh_en_device *en_dev = tvq->en_dev; + struct napi_struct *napi = &en_dev->sq[vq2txq(tvq)].napi; + + /* Suppress further interrupts. */ + virtqueue_disable_cb(tvq); + + if (napi->weight) { + virtqueue_napi_schedule(napi, tvq); + } else { + /* We were probably waiting for more output buffers. */ + netif_wake_subqueue(en_dev->netdev, vq2txq(tvq)); + en_dev->hw_stats.q_stats[vq2txq(tvq)].q_tx_wake++; + } +} + +void zxdh_free_hdr_buf(struct zxdh_en_device *en_dev) +{ + int32_t i = 0; + + for (i = 0; i < en_dev->max_queue_pairs; i++) { + if (en_dev->sq[i].hdr_buf != NULL) { + kfree(en_dev->sq[i].hdr_buf); + } + } +} + +int32_t zxdh_alloc_hdr_buf(struct zxdh_en_device *en_dev) +{ + int32_t i = 0; + + for (i = 0; i < en_dev->max_queue_pairs; i++) { + en_dev->sq[i].hdr_idx = 0; + en_dev->sq[i].hdr_buf = + kzalloc(ZXDH_PF_MAX_DESC_NUM(en_dev) * HDR_BUFFER_LEN, + GFP_KERNEL); + if (en_dev->sq[i].hdr_buf == NULL) { + LOG_ERR("en_dev->sq[%d].hdr_buf kzalloc failed\n", i); + zxdh_free_hdr_buf(en_dev); + return -1; + } + } + + return 0; +} + +int32_t virtnet_find_vqs(struct zxdh_en_device *en_dev) +{ + vq_callback_t **callbacks = NULL; + struct virtqueue **vqs = NULL; + int32_t ret = -ENOMEM; + int32_t i = 0; + int32_t total_vqs = 0; + const char **names = NULL; + bool *ctx = NULL; + + total_vqs = en_dev->max_queue_pairs * 2; + + /* Allocate space for find_vqs parameters */ + vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL); + if (unlikely(vqs == NULL)) { + LOG_ERR("vqs kcalloc failed\n"); + goto err_vq; + } + + callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL); + if (unlikely(callbacks == NULL)) { + LOG_ERR("callbacks kmalloc_array failed\n"); + goto err_callback; + } + + names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL); + if (unlikely(names == NULL)) { + LOG_ERR("names kmalloc_array failed\n"); + goto err_names; + } + + ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL); + if (unlikely(ctx == NULL)) { + LOG_ERR("ctx kmalloc failed\n"); + goto err_ctx; + } + + /* Allocate/initialize parameters for services send/receive virtqueues */ + for (i = 0; i < en_dev->max_queue_pairs; i++) { + callbacks[rxq2vq(i)] = zxdh_en_recv_pkts; + callbacks[txq2vq(i)] = zxdh_en_xmit_pkts; + sprintf(en_dev->rq[i].name, "input.%d", i); + sprintf(en_dev->sq[i].name, "output.%d", i); + names[rxq2vq(i)] = en_dev->rq[i].name; + names[txq2vq(i)] = en_dev->sq[i].name; + if (ctx) { + ctx[rxq2vq(i)] = true; + } + } + +#ifdef ZXDH_MSGQ + if (NEED_MSGQ(en_dev)) { + callbacks[txq2vq(en_dev->max_queue_pairs - 1)] = NULL; + } +#endif + + ret = vp_find_vqs_msix(en_dev->netdev, total_vqs, vqs, callbacks, names, + ctx); + if (ret) { + LOG_ERR("vp_find_vqs_msix failed: %d\n", ret); + goto err_find; + } + + for (i = 0; i < en_dev->max_queue_pairs; i++) { + en_dev->rq[i].vq = vqs[rxq2vq(i)]; + en_dev->sq[i].vq = vqs[txq2vq(i)]; + } + +err_find: + kfree(ctx); + ctx = NULL; +err_ctx: + kfree(names); + names = NULL; +err_names: + kfree(callbacks); + callbacks = NULL; +err_callback: + kfree(vqs); + vqs = NULL; +err_vq: + return ret; +} + +void virtnet_free_queues(struct zxdh_en_device *en_dev) +{ + int32_t i = 0; + uint16_t qpairs = 0; + + qpairs = en_dev->max_queue_pairs; +#ifdef ZXDH_MSGQ + if (NEED_MSGQ(en_dev)) { + --qpairs; + } +#endif + + for (i = 0; i < qpairs; i++) { + netif_napi_del(&en_dev->rq[i].napi); + netif_napi_del(&en_dev->sq[i].napi); + } + + /* We called __netif_napi_del(), + * we need to respect an RCU grace period before freeing zxdev->rq + */ + synchronize_net(); + + /* 确保msgq已经释放 */ + kfree(en_dev->rq); + kfree(en_dev->sq); +} + +void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + uint32_t i = 0; + void *buf = NULL; + + START_USE(vq); + + for (i = 0; i < vq->packed.vring.num; i++) { + if (!vq->packed.desc_state[i].data) { + continue; + } + + /* detach_buf clears data, so grab it now. */ + buf = vq->packed.desc_state[i].data; + detach_buf_packed(vq, i, NULL); + END_USE(vq); + return buf; + } + + /* That should have freed everything. */ + BUG_ON(vq->vq.num_free != vq->packed.vring.num); + + END_USE(vq); + return NULL; +} + +void zxdh_free_unused_bufs(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct virtqueue *vq = NULL; + void *buf = NULL; + int32_t i = 0; + + for (i = 0; i < en_dev->max_queue_pairs; i++) { + vq = en_dev->sq[i].vq; + while ((buf = virtqueue_detach_unused_buf_packed(vq)) != NULL) { +#ifdef ZXDH_MSGQ + if (i == (en_dev->max_queue_pairs - 1)) { + if (NEED_MSGQ(en_dev)) { + ZXDH_FREE_PTR(buf); + continue; + } + } +#endif + if (!is_xdp_frame(buf)) + dev_kfree_skb(buf); + else + xdp_return_frame(ptr_to_xdp(buf)); + } + } + + for (i = 0; i < en_dev->max_queue_pairs; i++) { + vq = en_dev->rq[i].vq; + while ((buf = virtqueue_detach_unused_buf_packed(vq)) != NULL) { + put_page(virt_to_head_page(buf)); + } //每8k睡眠一次 + } +} + +struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) +{ + struct page *p = rq->pages; + + if (p) { + rq->pages = (struct page *)p->private; + /* clear private here, it is used to chain pages */ + p->private = 0; + } else { + p = alloc_page(gfp_mask); + } + return p; +} + +void _free_receive_bufs(struct zxdh_en_device *en_dev) +{ + struct bpf_prog *old_prog = NULL; + int32_t i = 0; + + for (i = 0; i < en_dev->max_queue_pairs; i++) { + while (en_dev->rq[i].pages) { + __free_pages(get_a_page(&en_dev->rq[i], GFP_KERNEL), 0); + + old_prog = rtnl_dereference(en_dev->rq[i].xdp_prog); + RCU_INIT_POINTER(en_dev->rq[i].xdp_prog, NULL); + if (old_prog) + bpf_prog_put(old_prog); + } + } +} + +void zxdh_free_receive_bufs(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + rtnl_lock(); + _free_receive_bufs(en_dev); + rtnl_unlock(); +} + +void zxdh_free_receive_page_frags(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t i = 0; + + for (i = 0; i < en_dev->max_queue_pairs; i++) { + if (en_dev->rq[i].alloc_frag.page) { + put_page(en_dev->rq[i].alloc_frag.page); + } + } +} + +void zxdh_virtnet_del_vqs(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + vp_del_vqs(netdev); + en_dev->ops->vqs_unbind_eqs(en_dev->parent, + (en_dev->max_queue_pairs * 2 - 1)); + en_dev->ops->vqs_channel_unbind_handler( + en_dev->parent, (en_dev->max_queue_pairs * 2 - 1)); + virtnet_free_queues(en_dev); +} + +void zxdh_sec_release_vqs(struct net_device *netdev, + struct zxdh_sec_info *sec_info, uint8_t qidx) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + en_dev->ops->vp_modern_unmap_vq_notify( + en_dev->parent, + (void __iomem __force *)sec_info[qidx].notify_phy_addr); + dma_free_coherent(en_dev->dmadev, sec_info[qidx].event_size_in_bytes, + sec_info[qidx].device, + sec_info[qidx].device_event_dma_addr); + dma_free_coherent(en_dev->dmadev, sec_info[qidx].event_size_in_bytes, + sec_info[qidx].driver, + sec_info[qidx].driver_event_dma_addr); + dma_free_coherent(en_dev->dmadev, sec_info[qidx].ring_size_in_bytes, + sec_info[qidx].desc, sec_info[qidx].ring_dma_addr); + + return; +} + +int8_t zxdh_sec_create_vqs(struct net_device *netdev, + struct zxdh_sec_info *sec_info, uint8_t qidx) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct vring_packed_desc *ring = NULL; + struct vring_packed_desc_event *driver = NULL; + struct vring_packed_desc_event *device = NULL; + dma_addr_t ring_dma_addr; + dma_addr_t driver_event_dma_addr; + dma_addr_t device_event_dma_addr; + size_t ring_size_in_bytes; + size_t event_size_in_bytes; + void *notify_addr = NULL; + + ring_size_in_bytes = + ZXDH_SEC_MIN_DESC_NUM * sizeof(struct vring_packed_desc); + ring = dma_alloc_coherent(en_dev->dmadev, ring_size_in_bytes, + &ring_dma_addr, + GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); + if (unlikely(ring == NULL)) { + LOG_ERR("ring dma_alloc_coherent failed\n"); + goto err_ring; + } + + event_size_in_bytes = sizeof(struct vring_packed_desc_event); + driver = dma_alloc_coherent(en_dev->dmadev, event_size_in_bytes, + &driver_event_dma_addr, + GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); + if (unlikely(driver == NULL)) { + LOG_ERR("driver dma_alloc_coherent failed\n"); + goto err_driver; + } + device = dma_alloc_coherent(en_dev->dmadev, event_size_in_bytes, + &device_event_dma_addr, + GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); + if (unlikely(device == NULL)) { + LOG_ERR("device dma_alloc_coherent failed\n"); + goto err_device; + } + + en_dev->ops->activate_phy_vq(en_dev->parent, + en_dev->sec_phy_index[qidx], + ZXDH_SEC_MIN_DESC_NUM, ring_dma_addr, + driver_event_dma_addr, + device_event_dma_addr); + notify_addr = (void __force *)en_dev->ops->vp_modern_map_vq_notify( + en_dev->parent, en_dev->sec_phy_index[qidx], + &en_dev->notify_phy_addr); + if (unlikely(notify_addr == NULL)) { + LOG_ERR("vp_modern_map_vq_notify failed\n"); + goto err_map_notify; + } + en_dev->ops->vq_unbind_channel(en_dev->parent, + en_dev->sec_phy_index[qidx]); + en_dev->ops->set_queue_enable(en_dev->parent, + en_dev->sec_phy_index[qidx], true); + + sec_info[qidx].ring_dma_addr = ring_dma_addr; + sec_info[qidx].driver_event_dma_addr = driver_event_dma_addr; + sec_info[qidx].device_event_dma_addr = device_event_dma_addr; + + sec_info[qidx].desc = ring; + sec_info[qidx].driver = driver; + sec_info[qidx].device = device; + + sec_info[qidx].ring_size_in_bytes = ring_size_in_bytes; + sec_info[qidx].event_size_in_bytes = event_size_in_bytes; + + sec_info[qidx].desc_num = ZXDH_SEC_MIN_DESC_NUM; + sec_info[qidx].queue_pairs = ZXDH_SEC_QUEUES_NUM(en_dev) / 2; + sec_info[qidx].phy_index = en_dev->sec_phy_index[qidx]; + sec_info[qidx].notify_phy_addr = en_dev->notify_phy_addr; + + sec_info[qidx].bar0_phy_addr = + en_dev->ops->get_bar_phy_addr(en_dev->parent, 0); + sec_info[qidx].bar0_vir_addr = + en_dev->ops->get_bar_virt_addr(en_dev->parent, 0); + sec_info[qidx].bar0_size = en_dev->ops->get_bar_size(en_dev->parent, 0); + sec_info[qidx].pcie_id = en_dev->ops->get_pcie_id(en_dev->parent); + sec_info[qidx].pdev = en_dev->ops->get_pdev(en_dev->parent); + + return 0; + +err_map_notify: + dma_free_coherent(en_dev->dmadev, event_size_in_bytes, device, + device_event_dma_addr); +err_device: + dma_free_coherent(en_dev->dmadev, event_size_in_bytes, driver, + driver_event_dma_addr); +err_driver: + dma_free_coherent(en_dev->dmadev, ring_size_in_bytes, ring, + ring_dma_addr); +err_ring: + return -1; +} + +void zxdh_sec_vqs_uninit(struct net_device *netdev, uint8_t qidx) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + uint8_t i = 0; + + for (i = 0; i < qidx; i++) { + zxdh_sec_release_vqs(netdev, en_dev->sec_info, i); + } + + return; +} + +int32_t zxdh_sec_vqs_init(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + uint8_t qidx = 0; + int32_t err = 0; + + en_dev->sec_info = kzalloc(sizeof(struct zxdh_sec_info) * + ZXDH_SEC_QUEUES_NUM(en_dev), + GFP_KERNEL); + if (unlikely(en_dev->sec_info == NULL)) { + LOG_ERR("sec_info kzalloc failed\n"); + return -1; + } + + err = vp_get_phy_vqs(netdev, ZXDH_SEC_QUEUES_NUM(en_dev), + en_dev->sec_phy_index, "sec"); + if (err < 0) { + LOG_ERR("get vq phy lock failed!"); + goto err_find_vq; + } + + for (qidx = 0; qidx < ZXDH_SEC_QUEUES_NUM(en_dev); qidx++) { + err = zxdh_sec_create_vqs(netdev, en_dev->sec_info, qidx); + if (err != 0) { + LOG_ERR("zxdh_sec_create_vqs failed: %d\n", err); + goto err_create_vqs; + } + } + + en_dev->ops->set_sec_info(en_dev->parent, en_dev->sec_info); + + return 0; + +err_create_vqs: + zxdh_sec_vqs_uninit(netdev, qidx); +err_find_vq: + kfree(en_dev->sec_info); + return -1; +} + +void zxdh_vqs_uninit(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + if (en_dev->device_state != ZXDH_DEVICE_STATE_INTERNAL_ERROR && + !en_dev->quick_remove) { + zxdh_vp_reset(netdev); + } + + cancel_delayed_work_sync(&en_dev->refill); + zxdh_free_unused_bufs(netdev); + zxdh_free_receive_bufs(netdev); + zxdh_free_receive_page_frags(netdev); + zxdh_free_hdr_buf(en_dev); + zxdh_virtnet_del_vqs(netdev); +} + +void zxdh_set_default_xps_cpumasks(struct zxdh_en_device *en_dev) +{ + uint16_t queue_pairs = en_dev->curr_queue_pairs; + cpumask_var_t xps_mask; + int i; + int numa = dev_to_node(en_dev->dmadev); + + if (queue_pairs == 0) { + LOG_INFO("en_dev->curr_queue_pairs is 0\n"); + return; + } + + if (!zalloc_cpumask_var(&xps_mask, GFP_KERNEL)) { + LOG_ERR("zalloc_cpumask_var failed for xps_mask\n"); + return; + } + + for (i = 0; i < queue_pairs; i++) { + cpumask_set_cpu(cpumask_local_spread(i, numa), xps_mask); + + netif_set_xps_queue(en_dev->netdev, xps_mask, i); + cpumask_clear(xps_mask); + } + + free_cpumask_var(xps_mask); +} + +int32_t zxdh_vqs_init(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t ret = 0; + + zxdh_netdev_features_over_dtp(netdev); + en_dev->hdr_len = sizeof(struct zxdh_net_hdr_tx); + if (en_dev->dtp_drs_offload == false) { + en_dev->hdr_len = + sizeof(struct zxdh_net_hdr_tx) - sizeof(struct pi_hdr); + } + + en_dev->any_header_sg = zxdh_has_feature(en_dev, ZXDH_F_ANY_LAYOUT); + en_dev->mergeable_rx_bufs = + zxdh_has_feature(en_dev, ZXDH_NET_F_MRG_RXBUF); + en_dev->netdev->needed_headroom = sizeof(struct zxdh_net_hdr_rx); + en_dev->max_queue_pairs = en_dev->max_vq_pairs; + en_dev->curr_queue_pairs = en_dev->max_queue_pairs; + memset(en_dev->phy_index, 0xFF, sizeof(en_dev->phy_index)); + +#ifdef ZXDH_MSGQ + if (NEED_MSGQ(en_dev)) { + en_dev->max_queue_pairs += ZXDH_PQ_PAIRS_NUM; + LOG_INFO("Add msgq, update max_queue_pairs: %d\n", + en_dev->max_queue_pairs); + } +#endif + + INIT_LIST_HEAD(&en_dev->vqs_list); + spin_lock_init(&en_dev->vqs_list_lock); + + INIT_LIST_HEAD(&en_dev->virtqueues); + spin_lock_init(&en_dev->lock); + + /* Allocate services send & receive queues */ + ret = virtnet_alloc_queues(en_dev); + if (ret) { + LOG_ERR("virtnet_alloc_queues failed: %d\n", ret); + return ret; + } + + ret = zxdh_alloc_hdr_buf(en_dev); + if (ret != 0) { + LOG_ERR("zxdh_alloc_hdr_buf failed\n"); + goto err_alloc_hdr_buf; + } + + ret = virtnet_find_vqs(en_dev); + if (ret) { + LOG_ERR("virtnet_find_vqs failed: %d\n", ret); + goto err_find_vqs; + } + + zxdh_set_default_xps_cpumasks(en_dev); + + rtnl_lock(); + netif_set_real_num_tx_queues(en_dev->netdev, en_dev->curr_queue_pairs); + rtnl_unlock(); + rtnl_lock(); + netif_set_real_num_rx_queues(en_dev->netdev, en_dev->curr_queue_pairs); + rtnl_unlock(); + + return 0; + +err_find_vqs: + zxdh_free_hdr_buf(en_dev); +err_alloc_hdr_buf: + virtnet_free_queues(en_dev); + return ret; +} diff --git a/drivers/net/ethernet/dinghai/en_aux/queue.h b/drivers/net/ethernet/dinghai/en_aux/queue.h new file mode 100644 index 000000000000..09f833d3ea01 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_aux/queue.h @@ -0,0 +1,916 @@ +#ifndef __ZXDH_QUEUE_H__ +#define __ZXDH_QUEUE_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/*======================================================== + * 是否打开依赖PTP驱动的接口调用代码: + * 在提交代码时候这里注释掉,在实际调试时需要打开。 + *=========================================================*/ +// #define PTP_DRIVER_INTERFACE_EN + +/**======================================================== + * 是否打开依赖os时间戳补丁接口的代码: + * 在提交代码时候这里注释掉,在实际调试时需要打开。 + *=========================================================*/ +/* #define CGEL_TSTAMP_2_PATCH_EN TODO 补丁不可用*/ + +#define ZXDH_CONFIG_SPECIAL_SQ_EN + +/* 判断两个值是否相等,相等表示出错,打印信息后返回指定值 */ +#define CHECK_EQUAL_ERR(a, b, c, fmt, arg...) \ + do { \ + if (unlikely(a == b)) { \ + LOG_ERR(fmt, ##arg); \ + return c; \ + } \ + } while (0) + +/* 判断两个值是否不等,不等表示出错,打印信息后返回指定值 */ +#define CHECK_UNEQUAL_ERR(a, b, c, fmt, arg...) \ + do { \ + if (unlikely(a != b)) { \ + LOG_ERR(fmt, ##arg); \ + return c; \ + } \ + } while (0) + +#define ZXDH_MQ_PAIRS_NUM 8 +#define ZXDH_PQ_PAIRS_NUM 1 +#define ZXDH_MAX_PAIRS_NUM 128 +#define ZXDH_BOND_ETH_MQ_PAIRS_NUM 1 +#define ZXDH_MAX_QUEUES_NUM 4096 +#define ZXDH_SEC_QUEUES_NUM(en_dev) \ + (((256 - en_dev->max_queue_pairs * 2) >= 128) ? \ + 128 : \ + (256 - en_dev->max_queue_pairs * 2)) +#define ZXDH_PF_MAX_BAR_VAL 0x5 +#define ZXDH_PF_BAR0 0 +#define ZXDH_PF_MAX_DESC_NUM(en_dev) \ + ((en_dev->board_type == DH_INICD) ? 1024 : (16 * 1024)) +#define ZXDH_PF_DEFAULT_DESC_NUM(en_dev) \ + ((en_dev->board_type == DH_INICD) ? 1024 : (8 * 1024)) +#define ZXDH_PF_MIN_DESC_NUM (64) +#define ZXDH_SEC_MIN_DESC_NUM 1024 +#define ZXDH_INDIR_RQT_SIZE 256 +#define ZXDH_NET_HASH_KEY_SIZE 40 +#define ZXDH_HAS_PI_FLAG \ + 19 //38B, 2B unit, consider the receiving scenario of max 1588 packet +#define ZXDH_TYPE_FLAG_LEN 2 +#define ZXDH_DESC_EXTRA_SIZE 512 + +#define VQM_HOST_BAR_OFFSET 0x0 +#define ZXDH_VQ_TLB_OFFSET 0x1bf8 +#define PHY_VQ_REG_OFFSET 0x5000 +#define LOCK_VQ_REG_OFFSET 0x90 +#define ZXDH_PHY_REG_BITS 32 +#define ZXDH_PF_LOCK_ENABLE_MASK 0x1 +#define ZXDH_PF_RELEASE_LOCK_VAL 0 +#define ZXDH_PF_GET_PHY_INDEX_DONE 1 +#define ZXDH_PF_GET_PHY_INDEX_BIT 1 +#define ZXDH_PF_WAIT_COUNT 6000 +#define ZXDH_PF_DELAY_US 100 +#define ZXDH_PF_RQ_TYPE 0 +#define ZXDH_PF_TQ_TYPE 1 +#define ZXDH_PF_POWER_INDEX2 2 + +#define MSG_PAYLOAD_FIX_FIELD 8 +#define MSG_CHAN_PF_MODULE_ID 0 +#define MSG_PAYLOAD_TYPE_WRITE 1 +#define MSG_PAYLOAD_FIELD_MSG_CHL 2 +#define MSG_PAYLOAD_FIELD_DATA_CHL 3 +#define MSG_PAYLOAD_MSG_CHL_SLEN 4 +#define MSG_RECV_BUF_LEN 6 + +#define ZXDH_MAC_NUM 6 +#define ZXDH_MAX_MTU 13500 +#define ZXDH_DEFAULT_MTU 1500 +#define DH_SKB_FRAG_PAGE_ORDER get_order(32768) +#define DH_BUFF_LEN 2048 + +/* The feature bitmap for zxdh net */ +#define ZXDH_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */ +#define ZXDH_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */ +#define ZXDH_NET_F_CTRL_GUEST_OFFLOADS 2 /* Dynamic offload configuration. */ +#define ZXDH_NET_F_MTU 3 /* Initial MTU advice */ +#define ZXDH_NET_F_MAC 5 /* Host has given MAC address. */ +#define ZXDH_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */ +#define ZXDH_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */ +#define ZXDH_NET_F_GUEST_ECN 9 /* Guest can handle TSO[6] w/ ECN in. */ +#define ZXDH_NET_F_GUEST_UFO 10 /* Guest can handle UFO in. */ +#define ZXDH_NET_F_HOST_TSO4 11 /* Host can handle TSOv4 in. */ +#define ZXDH_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in. */ +#define ZXDH_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in. */ +#define ZXDH_NET_F_HOST_UFO 14 /* Host can handle UFO in. */ +#define ZXDH_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */ +#define ZXDH_NET_F_STATUS 16 /* net_config.status available */ +#define ZXDH_NET_F_CTRL_VQ 17 /* Control channel available */ +#define ZXDH_NET_F_MQ 22 /* Device supports Receive Flow Steering */ +#define ZXDH_F_ANY_LAYOUT 27 /* Can the device handle any descriptor layout? */ +#define ZXDH_RING_F_INDIRECT_DESC \ + 28 /* We support indirect buffer descriptors */ + +/* The Guest publishes the used index for which it expects an interrupt + * at the end of the avail ring. Host should ignore the avail->flags field. */ +/* The Host publishes the avail index for which it expects a kick + * at the end of the used ring. Guest should ignore the used->flags field. */ +#define ZXDH_RING_F_EVENT_IDX 29 + +#define ZXDH_F_VERSION_1 32 /* v1.0 compliant */ + +/* + * If clear - device has the platform DMA (e.g. IOMMU) bypass quirk feature. + * If set - use platform DMA tools to access the memory. + * + * Note the reverse polarity (compared to most other features), + * this is for compatibility with legacy systems. + */ +#define ZXDH_F_ACCESS_PLATFORM 33 + +/* This feature indicates support for the packed virtqueue layout. */ +#define ZXDH_F_RING_PACKED 34 + +/* + * This feature indicates that memory accesses by the driver and the + * device are ordered in a way described by the platform. + */ +#define ZXDH_F_ORDER_PLATFORM 36 + +/* This marks a buffer as continuing via the next field. */ +#define VRING_DESC_F_NEXT 1 +/* This marks a buffer as write-only (otherwise read-only). */ +#define VRING_DESC_F_WRITE 2 +/* This means the buffer contains a list of buffer descriptors. */ +#define VRING_DESC_F_INDIRECT 4 + +/* + * Mark a descriptor as available or used in packed ring. + * Notice: they are defined as shifts instead of shifted values. + */ +#define VRING_PACKED_DESC_F_AVAIL 7 +#define VRING_PACKED_DESC_F_USED 15 + +/* The Host uses this in used->flags to advise the Guest: don't kick me when + * you add a buffer. It's unreliable, so it's simply an optimization. Guest + * will still kick if it's out of buffers. */ +#define VRING_USED_F_NO_NOTIFY 1 +/* The Guest uses this in avail->flags to advise the Host: don't interrupt me + * when you consume a buffer. It's unreliable, so it's simply an + * optimization. */ +#define VRING_AVAIL_F_NO_INTERRUPT 1 + +/* Enable events in packed ring. */ +#define VRING_PACKED_EVENT_FLAG_ENABLE 0x0 +/* Disable events in packed ring. */ +#define VRING_PACKED_EVENT_FLAG_DISABLE 0x1 +/* + * Enable events for a specific descriptor in packed ring. + * (as specified by Descriptor Ring Change Event Offset/Wrap Counter). + * Only valid if ZXDH_RING_F_EVENT_IDX has been negotiated. + */ +#define VRING_PACKED_EVENT_FLAG_DESC 0x2 + +/* + * Wrap counter bit shift in event suppression structure + * of packed ring. + */ +#define VRING_PACKED_EVENT_F_WRAP_CTR 15 + +/* Alignment requirements for vring elements */ +#define VRING_AVAIL_ALIGN_SIZE 2 +#define VRING_USED_ALIGN_SIZE 4 +#define VRING_DESC_ALIGN_SIZE 16 + +#define MRG_CTX_HEADER_SHIFT 22 + +/* FIXME: MTU in config. */ +#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) +#define GOOD_COPY_LEN 128 + +#define TX_PORT_NP 0x00 +#define TX_PORT_DRS 0x01 +#define TX_PORT_DTP 0x02 +#define HDR_2B_UNIT 2 +#define ENABLE_PI_FLAG_32B 0x1 +#define DISABLE_PI_FIELD_PARSE 0x80 +#define IPV4_TYPE 0x0 +#define IPV6_TYPE 0x1 +#define NOT_IP_TYPE 0x2 +#define PKT_SRC_NP 0x0 +#define PKT_SRC_CPU 0x1 +#define PCODE_IP 0x1 +#define PCODE_TCP 0x2 +#define PCODE_UDP 0x3 +#define PCODE_NO_IP 0x9 +#define INVALID_ETH_PORT_ID 0xff +#define ETH_MTU_4B_UNIT 4 +#define IP_FRG_CSUM_FLAG 0x8000 +#define NOT_IP_FRG_CSUM_FLAG 0x6000 +#define TCP_FRG_CSUM_FLAG 0x24 +#define NOT_TCP_FRG_CSUM_FLAG 0x30 +#define HDR_2B_UNIT 2 + +#define HDR_BUFFER_LEN 100 +#define IP_BASE_HLEN 20 +#define IPV6_BASE_HLEN 40 +#define TCP_BASE_HLEN 20 + +#define OUTER_IP_CHECKSUM_OFFSET (12) +#define INNER_IP_CHECKSUM_OFFSET (15) +#define INNER_L4_CHECKSUM_OFFSET (2) +//#define PI_HDR_L3_CHKSUM_ERROR_CODE (0xff) +//#define PI_HDR_L4_CHKSUM_ERROR_CODE (0xff) +#define OUTER_IP_CHKSUM_ERROR_CODE (0x20) +#define NP_VXLAN_UDP_CHCKSUM_ENABLE (6) +#define NP_IS_VXLAN_FLAG (5) + +#define RX_VLAN_STRIPED_MASK (1 << 4) +#define RX_QINQ_STRIPED_MASK (1 << 14) +#define RX_IS_QINQ_PKT_MASK (1 << 12) +#define RX_TPID_VLAN_ID_MASK (0xfff) + +/* PD header offload flags */ +#define PANELID_EN (1 << 15) +#define LB_EN (1 << 11) + +/* PD header sk_prio */ +#define ZXDH_DCBNL_SET_SK_PRIO(sk_prio) ((0x7 & sk_prio) << 8) + +/* + * __vqm{16,32,64} have the following meaning: + * - __u{16,32,64} for zxdh devices in legacy mode, accessed in native endian + * - __le{16,32,64} for standard-compliant zxdh devices + */ +typedef __u16 __bitwise __vqm16; +typedef __u32 __bitwise __vqm32; +typedef __u64 __bitwise __vqm64; + +/* Constants for MSI-X */ +/* Use first vector for configuration changes, second and the rest for + * virtqueues Thus, we need at least 2 vectors for MSI. */ +enum { + VP_MSIX_CONFIG_VECTOR = 0, + VP_MSIX_VQ_VECTOR = 1, +}; + +struct vring_packed_desc_event { + /* Descriptor Ring Change Event Offset/Wrap Counter. */ + __le16 off_wrap; + /* Descriptor Ring Change Event Flags. */ + __le16 flags; +}; + +struct vring_packed_desc { + /* Buffer Address. */ + __le64 addr; + /* Buffer Length. */ + __le32 len; + /* Buffer ID. */ + __le16 id; + /* The flags depending on descriptor type. */ + __le16 flags; +}; + +struct vring_desc_state_packed { + void *data; /* Data for callback. */ + struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */ + uint16_t num; /* Descriptor list length. */ + uint16_t last; /* The last desc state in a list. */ +}; + +struct vring_desc_extra { + dma_addr_t addr; /* Buffer DMA addr. */ + uint32_t len; /* Buffer length. */ + uint16_t flags; /* Descriptor flags. */ + uint16_t next; /* The next desc state in a list. */ +}; + +union pkt_type_t { + uint8_t pkt_type; + struct { + uint8_t pkt_code : 5; + uint8_t pkt_src : 1; + uint8_t ip_type : 2; + } type_ctx; +} __attribute__((packed)); + +struct pi_hdr { + uint8_t bttl_pi_len; + union pkt_type_t pt; + uint16_t vlan_id; + uint32_t ipv6_exp_flags; + uint16_t hdr_l3_offset; + uint16_t hdr_l4_offset; + uint8_t eth_port_id; + uint8_t pkt_action_flag2; + uint16_t pkt_action_flag1; + uint8_t sa_index[8]; + uint8_t error_code[2]; + uint8_t rsv[6]; +} __attribute__((packed)); + +struct pd_net_hdr_tx { +#define TXCAP_STAG_INSERT_EN_BIT (1 << 14) +#define TXCAP_CTAG_INSERT_EN_BIT (1 << 13) +#define DELAY_STATISTICS_INSERT_EN_BIT (1 << 7) + uint16_t ol_flag; + uint8_t rsv; + uint8_t panel_id; + uint16_t stci; + uint16_t ctci; + uint8_t tag_idx; + uint8_t tag_data; + uint16_t vfid; /* bit15~11:rsv bit10~0:发送端口vfid */ +} __attribute__((packed)); + +struct pd_net_hdr_rx { +#define RX_PD_HEAD_VLAN_STRIP_BIT (1 << 28) + uint32_t flags; + uint32_t rss_hash; + uint32_t fd; + uint16_t striped_stci; + uint16_t striped_ctci; + uint16_t outer_pkt_type; + uint16_t inner_pkt_type; + uint16_t pkt_len; + uint8_t tag_idx; + uint8_t tag_data; + uint16_t src_port; /* bit15~11:rsv bit10~0:源端口vfid */ +} __attribute__((packed)); + +/* zxdh net header */ +struct pipd_net_hdr_tx { + struct pi_hdr pi_hdr; //32B + struct pd_net_hdr_tx pd_hdr; //12B +} __attribute__((packed)); + +struct zxdh_net_hdr_tx { + uint8_t tx_port; //bit7:2 rsv; bit1:0 00:np, 01:DRS, 10:DTP + uint8_t pd_len; //bit7 rsv; bit6:0 L2报文前的描述符长度,以2B为单位 + uint8_t num_buffers; //表示接收方向num buffers字段 + uint8_t rsv; //保留 + + union { + struct pd_net_hdr_tx pd_hdr; //12B + struct pipd_net_hdr_tx pipd_hdr; //44B + }; +} __attribute__((packed)); + +struct zxdh_1588_pd_tx { + uint8_t ptp_type[3]; /* 低bit0-16预留,bit17-19 pkt_type, bit23 ptp_udp */ + uint8_t ts_offset; + uint32_t cpu_tx; + uint8_t port; /* egress_port/ingress_port, L4报文此字段无用 */ + uint8_t rsv1[4]; + uint8_t sec_1588_key[3]; +}; + +struct zxdh_net_1588_hdr { + uint8_t tx_port; //bit7:2 rsv; bit1:0 00:np, 01:DRS, 10:DTP + uint8_t pd_len; //bit7 rsv; bit6:0 L2报文前的描述符长度,以2B为单位 + uint8_t num_buffers; //表示接收方向num buffers字段 + uint8_t rsv; //保留 + + struct pi_hdr pi_hdr; + struct pd_net_hdr_tx pd_hdr; + + // uint8_t ptp_type[3]; /* 低bit0-16预留,bit17-19 pkt_type, bit23 ptp_udp */ + // uint8_t ts_offset; + // uint32_t cpu_tx; + // uint8_t port; /* egress_port/ingress_port, L4报文此字段无用 */ + // uint8_t rsv1[4]; + // uint8_t sec_1588_key[3]; + struct zxdh_1588_pd_tx pd_1588; +} __attribute__((packed)); + +struct zxdh_net_1588_nopi_hdr { + uint8_t tx_port; //bit7:2 rsv; bit1:0 00:np, 01:DRS, 10:DTP + uint8_t pd_len; //bit7 rsv; bit6:0 L2报文前的描述符长度,以2B为单位 + uint8_t num_buffers; //表示接收方向num buffers字段 + uint8_t rsv; //保留 + + struct pd_net_hdr_tx pd_hdr; + + // uint8_t ptp_type[3]; /* 低bit0-16预留,bit17-19 pkt_type, bit23 ptp_udp */ + // uint8_t ts_offset; + // uint32_t cpu_tx; + // uint8_t port; /* egress_port/ingress_port, L4报文此字段无用 */ + // uint8_t rsv1[4]; + // uint8_t sec_1588_key[3]; + struct zxdh_1588_pd_tx pd_1588; +} __attribute__((packed)); + +struct pipd_net_hdr_rx { + struct pi_hdr pi_hdr; //32B + struct pd_net_hdr_rx pd_hdr; //26B +} __attribute__((packed)); + +struct zxdh_net_hdr_rx { + uint8_t tx_port; //bit7:2 rsv; bit1:0 00:np, 01:DRS, 10:DTP + uint8_t pd_len; //bit7 rsv; bit6:0 L2报文前的描述符长度,以2B为单位 + uint8_t num_buffers; //表示接收方向num buffers字段 + uint8_t rsv; //保留 + + union { + struct pd_net_hdr_rx pd_hdr; //26B + struct pipd_net_hdr_rx pipd_hdr; //58B + }; +} __attribute__((packed)); + +struct zxdh_1588_pd_rx { + uint8_t egress_port; + uint8_t ptp_type + [2]; /* 低bit0-8预留,bit9-11 pkt_type, bit 12-14预留,bit15 ptp_udp */ + uint8_t ts_offset; + uint32_t rx_ts; +}; +struct zxdh_net_1588_hdr_rcv { + uint8_t tx_port; //bit7:2 rsv; bit1:0 00:np, 01:DRS, 10:DTP + uint8_t pd_len; //bit7 rsv; bit6:0 L2报文前的描述符长度,以2B为单位 + uint8_t num_buffers; //表示接收方向num buffers字段 + uint8_t rsv; //保留 + + struct pi_hdr pi_hdr; + struct pd_net_hdr_rx pd_hdr; + + // uint8_t egress_port; + // uint8_t ptp_type[2]; /* 低bit0-8预留,bit9-11 pkt_type, bit 12-14预留,bit15 ptp_udp */ + // uint8_t ts_offset; + // uint32_t rx_ts; + struct zxdh_1588_pd_rx pd_1588; +} __attribute__((packed)); + +struct zxdh_net_1588_nopi_hdr_rcv { + uint8_t tx_port; //bit7:2 rsv; bit1:0 00:np, 01:DRS, 10:DTP + uint8_t pd_len; //bit7 rsv; bit6:0 L2报文前的描述符长度,以2B为单位 + uint8_t num_buffers; //表示接收方向num buffers字段 + uint8_t rsv; //保留 + + struct pd_net_hdr_rx pd_hdr; + + // uint8_t egress_port; + // uint8_t ptp_type[2]; /* 低bit0-8预留,bit9-11 pkt_type, bit 12-14预留,bit15 ptp_udp */ + // uint8_t ts_offset; + // uint32_t rx_ts; + struct zxdh_1588_pd_rx pd_1588; +} __attribute__((packed)); + +#ifdef DEBUG +/* For development, we want to crash whenever the ring is screwed. */ +#define BAD_RING(_vq, fmt, args...) \ + do { \ + LOG_ERR("%s:" fmt, (_vq)->vq.name, ##args); \ + BUG(); \ + } while (0) +/* Caller is supposed to guarantee no reentry. */ +#define START_USE(_vq) \ + do { \ + if ((_vq)->in_use) \ + panic("%s:in_use = %i\n", (_vq)->vq.name, \ + (_vq)->in_use); \ + (_vq)->in_use = __LINE__; \ + } while (0) +#define END_USE(_vq) \ + do { \ + BUG_ON(!(_vq)->in_use); \ + (_vq)->in_use = 0; \ + } while (0) +#define LAST_ADD_TIME_UPDATE(_vq) \ + do { \ + ktime_t now = ktime_get(); \ + \ + /* No kick or get, with .1 second between? Warn. */ \ + if ((_vq)->last_add_time_valid) \ + WARN_ON(ktime_to_ms(ktime_sub( \ + now, (_vq)->last_add_time)) > 100); \ + (_vq)->last_add_time = now; \ + (_vq)->last_add_time_valid = true; \ + } while (0) +#define LAST_ADD_TIME_CHECK(_vq) \ + do { \ + if ((_vq)->last_add_time_valid) { \ + WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \ + (_vq)->last_add_time)) > \ + 100); \ + } \ + } while (0) +#define LAST_ADD_TIME_INVALID(_vq) ((_vq)->last_add_time_valid = false) +#else +#define BAD_RING(_vq, fmt, args...) \ + do { \ + LOG_ERR("%s:" fmt, (_vq)->vq.name, ##args); \ + (_vq)->broken = true; \ + } while (0) +#define START_USE(vq) +#define END_USE(vq) +#define LAST_ADD_TIME_UPDATE(vq) +#define LAST_ADD_TIME_CHECK(vq) +#define LAST_ADD_TIME_INVALID(vq) +#endif + +#define vqm_store_mb(weak_barriers, p, v) \ + do { \ + if (weak_barriers) { \ + virt_store_mb(*p, v); \ + } else { \ + WRITE_ONCE(*p, v); \ + mb(); \ + } \ + } while (0) + +/* This is the PCI capability header: */ +struct zxdh_pci_cap { + __u8 cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */ + __u8 cap_next; /* Generic PCI field: next ptr. */ + __u8 cap_len; /* Generic PCI field: capability length */ + __u8 cfg_type; /* Identifies the structure. */ + __u8 bar; /* Where to find it. */ + __u8 id; /* Multiple capabilities of the same type */ + __u8 padding[2]; /* Pad to full dword. */ + __le32 offset; /* Offset within bar. */ + __le32 length; /* Length of the structure, in bytes. */ +}; + +struct zxdh_pci_notify_cap { + struct zxdh_pci_cap cap; + __le32 notify_off_multiplier; /* Multiplier for queue_notify_off. */ +}; + +struct virtqueue { + struct list_head list; + void (*callback)(struct virtqueue *vq); + const char *name; + struct zxdh_en_device *en_dev; + uint32_t index; + uint32_t phy_index; + uint32_t num_free; + void *priv; +}; + +/* custom queue ring descriptors: 16 bytes. These can chain together via "next". */ +struct vring_desc { + /* Address (guest-physical). */ + uint64_t addr; + /* Length. */ + uint32_t len; + /* The flags as indicated above. */ + uint16_t flags; + /* We chain unused descriptors via this, too */ + uint16_t next; +}; + +struct vring_avail { + uint16_t flags; + uint16_t idx; + uint16_t ring[]; +}; + +/* u32 is used here for ids for padding reasons. */ +struct vring_used_elem { + /* Index of start of used descriptor chain. */ + uint32_t id; + /* Total length of the descriptor chain which was used (written to) */ + uint32_t len; +}; + +typedef struct vring_used_elem __attribute__((aligned(VRING_USED_ALIGN_SIZE))) +vring_used_elem_t; + +struct vring_used { + uint16_t flags; + uint16_t idx; + vring_used_elem_t ring[]; +}; + +typedef struct vring_desc __attribute__((aligned(VRING_DESC_ALIGN_SIZE))) +vring_desc_t; +typedef struct vring_avail __attribute__((aligned(VRING_AVAIL_ALIGN_SIZE))) +vring_avail_t; +typedef struct vring_used __attribute__((aligned(VRING_USED_ALIGN_SIZE))) +vring_used_t; + +struct vring { + uint32_t num; + + vring_desc_t *desc; + + vring_avail_t *avail; + + vring_used_t *used; +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Is this a packed ring? */ + bool packed_ring; + + /* Is DMA API used? */ + bool use_dma_api; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + uint32_t free_head; + /* Number we've added since last sync. */ + uint32_t num_added; + + /* Last used index we've seen. + * for split ring, it just contains last used index + * for packed ring: + * bits up to VRING_PACKED_EVENT_F_WRAP_CTR include the last used index. + * bits from VRING_PACKED_EVENT_F_WRAP_CTR include the used wrap counter. + */ + uint16_t last_used_idx; + + /* Hint for event idx: already triggered no need to disable. */ + bool event_triggered; + + /* Available for packed ring */ + struct { + /* Actual memory layout for this queue. */ + struct { + uint32_t num; + struct vring_packed_desc *desc; + struct vring_packed_desc_event *driver; + struct vring_packed_desc_event *device; + } vring; + + /* Driver ring wrap counter. */ + bool avail_wrap_counter; + + /* Avail used flags. */ + uint16_t avail_used_flags; + + /* Index of the next avail descriptor. */ + uint16_t next_avail_idx; + + /* + * Last written value to driver->flags in + * guest byte order. + */ + uint16_t event_flags_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_packed *desc_state; + struct vring_desc_extra *desc_extra; + + /* DMA address and size information */ + dma_addr_t ring_dma_addr; + dma_addr_t driver_event_dma_addr; + dma_addr_t device_event_dma_addr; + size_t ring_size_in_bytes; + size_t event_size_in_bytes; + } packed; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + uint32_t in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif +}; + +struct zxdh_pci_vq_info { + /* the actual virtqueue */ + struct virtqueue *vq; + + /* the list node for the virtqueues list */ + struct list_head node; + + /* channel num map 1-1 to vector*/ + unsigned channel_num; +}; + +struct virtnet_stat_desc { + char desc[ETH_GSTRING_LEN]; + size_t offset; +}; + +struct virtnet_sq_stats { + struct u64_stats_sync syncp; + uint64_t packets; + uint64_t bytes; + uint64_t xdp_tx; + uint64_t xdp_tx_drops; + uint64_t kicks; + uint64_t tx_timeouts; +}; + +struct virtnet_rq_stats { + struct u64_stats_sync syncp; + uint64_t packets; + uint64_t bytes; + uint64_t drops; + uint64_t xdp_packets; + uint64_t xdp_tx; + uint64_t xdp_redirects; + uint64_t xdp_drops; + uint64_t kicks; + uint64_t rx_csum_offload_good; + uint64_t rx_removed_vlan_packets; +}; +#define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m) +#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m) + +#ifdef ZXDH_CONFIG_SPECIAL_SQ_EN +struct zxdh_sq_flow_map { + struct hlist_node hlist; + uint32_t dst_ip; + uint16_t dst_port; + uint16_t queue_index; +}; +#endif + +static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { + { "packets", VIRTNET_SQ_STAT(packets) }, + { "bytes", VIRTNET_SQ_STAT(bytes) }, + { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) }, + { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) }, + { "kicks", VIRTNET_SQ_STAT(kicks) }, + { "tx_timeouts", VIRTNET_SQ_STAT(tx_timeouts) }, +}; + +static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { + { "packets", VIRTNET_RQ_STAT(packets) }, + { "bytes", VIRTNET_RQ_STAT(bytes) }, + { "drops", VIRTNET_RQ_STAT(drops) }, + { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) }, + { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) }, + { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) }, + { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) }, + { "kicks", VIRTNET_RQ_STAT(kicks) }, + { "rx_csum_offload_good", VIRTNET_RQ_STAT(rx_csum_offload_good) }, + { "rx_removed_vlan_packets", VIRTNET_RQ_STAT(rx_removed_vlan_packets) }, +}; + +#define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc) +#define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc) + +/* RX packet size EWMA. The average packet size is used to determine the packet + * buffer size when refilling RX rings. As the entire RX ring may be refilled + * at once, the weight is chosen so that the EWMA will be insensitive to short- + * term, transient changes in packet size. + */ +DECLARE_EWMA(pkt_len, 0, 64) + +/* Internal representation of a send virtqueue */ +struct send_queue { + /* Virtqueue associated with this send _queue */ + struct virtqueue *vq; + + /* TX: fragments + linear part + custom queue header */ + struct scatterlist sg[MAX_SKB_FRAGS + 2]; + + /* Name of the send queue: output.$index */ + char name[40]; + + struct virtnet_sq_stats stats; + + struct napi_struct napi; + + uint8_t *hdr_buf; + uint16_t hdr_idx; + +#ifdef ZXDH_CONFIG_SPECIAL_SQ_EN + struct zxdh_sq_flow_map flow_map; +#endif +}; + +/* Internal representation of a receive virtqueue */ +struct receive_queue { + /* Virtqueue associated with this receive_queue */ + struct virtqueue *vq; + + struct napi_struct napi; + + struct bpf_prog __rcu *xdp_prog; + + struct virtnet_rq_stats stats; + + /* Chain pages by the private ptr. */ + struct page *pages; + + /* Average packet length for mergeable receive buffers. */ + struct ewma_pkt_len mrg_avg_pkt_len; //todo + + /* Page frag for packet buffer allocation. */ + struct page_frag alloc_frag; + + /* RX: fragments + linear part + custom queue header */ + struct scatterlist sg[MAX_SKB_FRAGS + 2]; + + /* Min single buffer size for mergeable buffers case. */ + uint32_t min_buf_len; + + /* Name of this receive queue: input.$index */ + char name[40]; + + struct xdp_rxq_info xdp_rxq; +}; + +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +typedef void vq_callback_t(struct virtqueue *); + +void zxdh_set_default_xps_cpumasks(struct zxdh_en_device *en_dev); +void zxdh_print_vring_info(struct virtqueue *vq, uint32_t desc_index, + uint32_t desc_num); +void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi); +void virtnet_napi_tx_enable(struct net_device *netdev, struct virtqueue *vq, + struct napi_struct *napi); +void virtnet_napi_tx_disable(struct napi_struct *napi); +void refill_work(struct work_struct *work); +int virtnet_poll(struct napi_struct *napi, int budget); +int virtnet_poll_tx(struct napi_struct *napi, int budget); +int32_t txq2vq(int32_t txq); +int32_t rxq2vq(int32_t rxq); +uint16_t vqm16_to_cpu(struct zxdh_en_device *en_dev, __vqm16 val); +uint8_t vp_get_status(struct net_device *netdev); +void vp_set_status(struct net_device *netdev, uint8_t status); +void vp_set_reset_status(struct net_device *netdev, uint8_t status); +void zxdh_add_status(struct net_device *netdev, uint32_t status); +void zxdh_vp_enable_cbs(struct net_device *netdev); +void zxdh_vp_disable_cbs(struct net_device *netdev); +void zxdh_vp_reset(struct net_device *netdev); +void vring_free_queue(struct zxdh_en_device *en_dev, size_t size, void *queue, + dma_addr_t dma_handle); +netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *netdev); +bool try_fill_recv(struct receive_queue *rq, gfp_t gfp); +inline struct zxdh_net_hdr_rx *skb_vnet_hdr(struct sk_buff *skb); +int32_t virtqueue_add_outbuf(struct virtqueue *vq, struct scatterlist *sg, + uint32_t num, void *data, gfp_t gfp); +void virtqueue_disable_cb(struct virtqueue *_vq); +void free_old_xmit_skbs(struct net_device *netdev, struct send_queue *sq, + bool in_napi); +bool virtqueue_enable_cb_delayed(struct virtqueue *_vq); +bool virtqueue_kick_prepare_packed(struct virtqueue *_vq); +bool virtqueue_notify(struct virtqueue *_vq); +void zxdh_pf_features_init(struct net_device *netdev); +bool zxdh_has_feature(struct zxdh_en_device *en_dev, uint32_t fbit); +bool zxdh_has_status(struct net_device *netdev, uint32_t sbit); +void zxdh_free_unused_bufs(struct net_device *netdev); +void zxdh_free_receive_bufs(struct net_device *netdev); +void zxdh_free_receive_page_frags(struct net_device *netdev); +void zxdh_virtnet_del_vqs(struct net_device *netdev); +void zxdh_vqs_uninit(struct net_device *netdev); +int32_t zxdh_vqs_init(struct net_device *netdev); +int32_t dh_eq_vqs_vring_int(struct notifier_block *nb, unsigned long action, + void *data); +int32_t vq2rxq(struct virtqueue *vq); +void *virtqueue_get_buf(struct virtqueue *_vq, uint32_t *len); +void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq, uint32_t *len, + void **ctx); +uint32_t virtqueue_get_vring_size(struct virtqueue *_vq); +void virtqueue_napi_complete(struct napi_struct *napi, struct virtqueue *vq, + int32_t processed); +int32_t virtqueue_add_inbuf_ctx(struct virtqueue *vq, struct scatterlist *sg, + uint32_t num, void *data, void *ctx, gfp_t gfp); +bool dh_skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, + gfp_t gfp); + +int32_t zxdh_sec_vqs_init(struct net_device *netdev); +void zxdh_sec_vqs_uninit(struct net_device *netdev, uint8_t qidx); +void zxdh_vvq_reset(struct zxdh_en_device *en_dev); +bool is_flow_stopped(struct zxdh_en_device *en_dev); +int zxdh_en_xdp(struct net_device *dev, struct netdev_bpf *xdp); +int zxdh_en_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, + uint32_t flags); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_aux/selq.c b/drivers/net/ethernet/dinghai/en_aux/selq.c new file mode 100644 index 000000000000..1b8a8d92e8cf --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_aux/selq.c @@ -0,0 +1,376 @@ +#include +#include +#include +#include +#include +#include "../en_aux.h" + +#ifdef ZXDH_CONFIG_SPECIAL_SQ_EN + +struct netdev_queue_attribute { + struct attribute attr; + ssize_t (*show)(struct netdev_queue *queue, char *buf); + ssize_t (*store)(struct netdev_queue *queue, const char *buf, + size_t count); +}; + +enum { + ATTR_DST_IP, + ATTR_DST_PORT, +}; + +static ssize_t zxdh_flow_param_show(struct netdev_queue *queue, char *buf, + int32_t type) +{ + struct net_device *netdev = queue->dev; + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + unsigned int queue_index = queue - netdev->_tx; + struct send_queue *sq = &en_dev->sq[queue_index]; + int32_t count; + + LOG_INFO("enter\n"); + switch (type) { + case ATTR_DST_IP: + count = sprintf(buf, "0x%8x\n", ntohl(sq->flow_map.dst_ip)); + break; + case ATTR_DST_PORT: + count = sprintf(buf, "%d\n", ntohs(sq->flow_map.dst_port)); + break; + default: + return -EINVAL; + } + + return count; +} + +static ssize_t zxdh_flow_param_store(struct netdev_queue *queue, + const char *buf, size_t count, + int32_t type) +{ + struct net_device *netdev = queue->dev; + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + unsigned int queue_index = queue - netdev->_tx; + struct send_queue *sq = &en_dev->sq[queue_index]; + int32_t rtn = 0; + uint32_t key; + + LOG_INFO("enter\n"); + switch (type) { + case ATTR_DST_IP: + rtn = kstrtou32(buf, 16, &sq->flow_map.dst_ip); + if (rtn < 0) + return rtn; + sq->flow_map.dst_ip = htonl(sq->flow_map.dst_ip); + break; + case ATTR_DST_PORT: + rtn = kstrtou16(buf, 0, &sq->flow_map.dst_port); + if (rtn < 0) + return rtn; + sq->flow_map.dst_port = htons(sq->flow_map.dst_port); + break; + default: + return -EINVAL; + } + + /* Each queue can only apear once in the hash table */ + hash_del_rcu(&sq->flow_map.hlist); + + sq->flow_map.queue_index = queue_index; + if (sq->flow_map.dst_ip != 0 || sq->flow_map.dst_port != 0) { + /* hash and add to hash table */ + key = sq->flow_map.dst_ip ^ sq->flow_map.dst_port; + hash_add_rcu(en_dev->flow_map_hash, &sq->flow_map.hlist, key); + } + + return count; +} + +static ssize_t zxdh_dst_port_store(struct netdev_queue *queue, const char *buf, + size_t count) +{ + return zxdh_flow_param_store(queue, buf, count, ATTR_DST_PORT); +} + +static ssize_t zxdh_dst_port_show(struct netdev_queue *queue, char *buf) +{ + return zxdh_flow_param_show(queue, buf, ATTR_DST_PORT); +} + +static ssize_t zxdh_dst_ip_store(struct netdev_queue *queue, const char *buf, + size_t count) +{ + return zxdh_flow_param_store(queue, buf, count, ATTR_DST_IP); +} + +static ssize_t zxdh_dst_ip_show(struct netdev_queue *queue, char *buf) +{ + return zxdh_flow_param_show(queue, buf, ATTR_DST_IP); +} + +static struct netdev_queue_attribute dst_port = { + .attr = { .name = "dst_port", .mode = (S_IWUSR | S_IRUGO) }, + .show = zxdh_dst_port_show, + .store = zxdh_dst_port_store, +}; + +static struct netdev_queue_attribute dst_ip = { + .attr = { .name = "dst_ip", .mode = (S_IWUSR | S_IRUGO) }, + .show = zxdh_dst_ip_show, + .store = zxdh_dst_ip_store, +}; + +static struct attribute *zxdh_txmap_attrs[] = { + &dst_port.attr, + &dst_ip.attr, + NULL, +}; + +static struct attribute_group zxdh_txmap_attr = { + .name = "flow_map", + .attrs = zxdh_txmap_attrs, +}; + +int32_t zxdh_flow_map_update_sysfs(struct net_device *netdev) +{ + int32_t rtn; + int32_t i; + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct netdev_queue *txq; + + if (en_dev->old_queue_pairs > en_dev->curr_queue_pairs) { + LOG_INFO("old_queue_pairs(%d) > curr_queue_pairs(%d)\n", + en_dev->old_queue_pairs, en_dev->curr_queue_pairs); + } else { + for (i = en_dev->old_queue_pairs; i < en_dev->curr_queue_pairs; + i++) { + txq = netdev_get_tx_queue(netdev, i); + rtn = sysfs_create_group(&txq->kobj, &zxdh_txmap_attr); + if (rtn) { + LOG_ERR("Failed to create flow_map for tx-%d (err=%d)\n", + i, rtn); + goto rollback; + } + } + } + + return 0; + +rollback: + //配置回滚,删除已经创建的sysfs属性 + for (i--; i >= en_dev->old_queue_pairs; i--) { + txq = netdev_get_tx_queue(netdev, i); + sysfs_remove_group(&txq->kobj, &zxdh_txmap_attr); + } + return rtn; +} + +int32_t zxdh_flow_map_init_sysfs(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct netdev_queue *txq; + int32_t rtn; + int32_t qid; + + LOG_DEBUG("enter\n"); + for (qid = 0; qid < en_dev->curr_queue_pairs; qid++) { + // qid = i + params.num_channels * params.num_tc; + txq = netdev_get_tx_queue(netdev, qid); + rtn = sysfs_create_group(&txq->kobj, &zxdh_txmap_attr); + if (rtn) + goto rtn; + } + return 0; + +rtn: + for (--qid; qid >= 0; qid--) { + // qid = i + params.num_channels * params.num_tc; + txq = netdev_get_tx_queue(netdev, qid); + sysfs_remove_group(&txq->kobj, &zxdh_txmap_attr); + } + return rtn; +} + +void zxdh_flow_map_remove_sysfs(struct zxdh_en_device *en_dev) +{ + struct netdev_queue *txq; + struct kernfs_node *kfnode; + int32_t qid; + + LOG_INFO("Entering zxdh_flow_map_remove_sysfs\n"); + + for (qid = 0; qid < en_dev->curr_queue_pairs; qid++) { + // qid = i + en_dev->channels.params.num_channels * + // en_dev->channels.params.num_tc; + txq = netdev_get_tx_queue(en_dev->netdev, qid); + if (!txq) { + LOG_ERR("Failed to get TX queue for qid %d\n", qid); + continue; + } + if (!kobject_get(&txq->kobj)) { + LOG_WARN("Failed to get kobject for qid %d\n", qid); + continue; + } + + kfnode = sysfs_get_dirent(txq->kobj.sd, zxdh_txmap_attr.name); + if (kfnode) { + sysfs_remove_group(&txq->kobj, &zxdh_txmap_attr); + kernfs_put(kfnode); // 释放引用计数,防止内存泄露 + } else { + LOG_INFO("Directory entry not found for qid %d\n", qid); + } + + kobject_put(&txq->kobj); + } +} + +static int32_t zxdh_select_queue_assigned(struct zxdh_en_device *en_dev, + struct sk_buff *skb, + uint32_t *queue_index) +{ + struct zxdh_sq_flow_map *flow_map; + // int32_t sk_ix = sk_tx_queue_get(skb->sk); + uint32_t key_all, key_dip, key_dport; + uint16_t dport; + uint32_t dip; + __be16 protocol; + uint8_t l4_proto = 0; + + // if (sk_ix >= en_dev->channels.params.num_channels) + // return sk_ix; + if (hash_empty(en_dev->flow_map_hash)) { + goto fallback; + } + + protocol = vlan_get_protocol(skb); + l4_proto = ip_hdr(skb)->protocol; + + if (protocol == htons(ETH_P_IP)) { + dip = ip_hdr(skb)->daddr; + + if (l4_proto == IPPROTO_UDP || l4_proto == IPPROTO_TCP) + dport = udp_hdr(skb)->dest; + else + goto fallback; + } else { + goto fallback; + } + + // LOG_INFO("dst_ip = 0x%8x, dst_port = %d", ntohl(dip), htons(dport)); + key_all = dip ^ dport; + hash_for_each_possible_rcu(en_dev->flow_map_hash, flow_map, hlist, + key_all) + if (flow_map->dst_ip == dip && flow_map->dst_port == dport) { + *queue_index = flow_map->queue_index; + return 1; + } + + key_dip = dip; + hash_for_each_possible_rcu(en_dev->flow_map_hash, flow_map, hlist, + key_dip) + if (flow_map->dst_ip == dip) { + *queue_index = flow_map->queue_index; + return 1; + } + + key_dport = dport; + hash_for_each_possible_rcu(en_dev->flow_map_hash, flow_map, hlist, + key_dport) + if (flow_map->dst_port == dport) { + *queue_index = flow_map->queue_index; + return 1; + } + +fallback: + return 0; +} + +#ifdef HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED +uint16_t zxdh_en_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev) +#else +uint16_t zxdh_en_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback) +#endif +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + uint32_t queue_index; + int32_t rtn = 0; + + if (en_dev->device_state == ZXDH_DEVICE_STATE_INTERNAL_ERROR) + return 0; + + rtn = zxdh_select_queue_assigned(en_dev, skb, &queue_index); + if (rtn) { + sk_tx_queue_set(skb->sk, queue_index); + // LOG_INFO("queue_index = %d\n", queue_index); + return queue_index; + } + +// #ifdef HAVE_QUEUE_SELECTION_HELPERS_RENAME +#ifdef HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED + queue_index = netdev_pick_tx(netdev, skb, NULL); + return queue_index; +#else + return fallback(netdev, skb, NULL); +#endif +} + +// txq_ix = netdev_pick_tx(dev, skb, NULL); +// up = 0; + +// if (!netdev_get_num_tc(dev)) +// return txq_ix; + +// #ifdef CONFIG_EN_DCB +// if (en_dev->dcbx_dp.trust_state == QPTS_TRUST_DSCP) +// up = zxdh_get_dscp_up(en_dev, skb); +// else +// #endif +// if (skb_vlan_tag_present(skb)) +// up = skb_vlan_tag_get_prio(skb); + +// /* txq_ix can be larger than num_channels since +// * dev->num_real_tx_queues = num_channels * num_tc +// */ +// num_channels = en_dev->channels.params.num_channels; +// if (txq_ix >= num_channels) +// txq_ix = en_dev->txq2sq[txq_ix]->ch_ix; + +// return en_dev->channel_tc2realtxq[txq_ix][up]; + +void zxdh_flow_map_cleanup(struct zxdh_en_priv *en_priv) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + + LOG_INFO("enter\n"); + zxdh_flow_map_remove_sysfs(en_dev); + hash_init(en_dev->flow_map_hash); +} + +int32_t zxdh_flow_map_init(struct zxdh_en_priv *en_priv) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t rtn; + + LOG_DEBUG("enter\n"); + rtn = zxdh_flow_map_init_sysfs(en_dev->netdev); + if (!rtn) { + WARN_ON(!hash_empty(en_dev->flow_map_hash)); + hash_init(en_dev->flow_map_hash); + } else { + zxdh_flow_map_cleanup(en_priv); + LOG_ERR("failed to init rate limit\n"); + } + + return rtn; +} + +#endif /*ZXDH_CONFIG_SPECIAL_SQ_EN*/ \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_auxiliary.c b/drivers/net/ethernet/dinghai/en_auxiliary.c new file mode 100644 index 000000000000..050d464e4a0a --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_auxiliary.c @@ -0,0 +1,329 @@ + +#ifdef pr_fmt +#undef pr_fmt +#endif + +#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_COMPAT_AUXILIARY_EXTERNAL_INIT +#include "../../../../drivers/base/base.h" +#endif + +static const struct zxdh_auxiliary_device_id * +zxdh_auxiliary_match_id(const struct zxdh_auxiliary_device_id *id, + const struct zxdh_auxiliary_device *auxdev) +{ + for (; id->name[0]; id++) { + const char *p = strrchr(dev_name(&auxdev->dev), '.'); + int32_t match_size; + + if (!p) { + continue; + } + match_size = p - dev_name(&auxdev->dev); + + /* use dev_name(&auxdev->dev) prefix before last '.' char to match to */ + if (strlen(id->name) == match_size && + !strncmp(dev_name(&auxdev->dev), id->name, match_size)) { + return id; + } + } + + return NULL; +} + +static int32_t zxdh_auxiliary_match(struct device *dev, + struct device_driver *drv) +{ + struct zxdh_auxiliary_device *auxdev = zxdh_to_auxiliary_dev(dev); + struct zxdh_auxiliary_driver *auxdrv = zxdh_to_auxiliary_drv(drv); + + return !!zxdh_auxiliary_match_id(auxdrv->id_table, auxdev); +} + +static int32_t zxdh_auxiliary_uevent(const struct device *dev, + struct kobj_uevent_env *env) +{ + const char *name; + const char *p; + + name = dev_name(dev); + p = strrchr(name, '.'); + + return add_uevent_var(env, "MODALIAS=%s%.*s", + ZXDH_AUXILIARY_MODULE_PREFIX, (int32_t)(p - name), + name); +} + +static const struct dev_pm_ops zxdh_auxiliary_dev_pm_ops = { SET_RUNTIME_PM_OPS( + pm_generic_runtime_suspend, pm_generic_runtime_resume, + NULL) SET_SYSTEM_SLEEP_PM_OPS(pm_generic_suspend, pm_generic_resume) }; + +static int32_t zxdh_auxiliary_bus_probe(struct device *dev) +{ + struct zxdh_auxiliary_driver *auxdrv = + zxdh_to_auxiliary_drv(dev->driver); + struct zxdh_auxiliary_device *auxdev = zxdh_to_auxiliary_dev(dev); + int32_t ret = 0; + + ret = dev_pm_domain_attach(dev, true); + + /* In case of old kernels 4.17 and below do nothing in case of + * failure of ENODEV */ + if (ret == -ENODEV) { + ret = 0; + } + + if (ret != 0) { + LOG_WARN("Failed to attach to PM Domain : %d\n", ret); + return ret; + } + + ret = auxdrv->probe(auxdev, + zxdh_auxiliary_match_id(auxdrv->id_table, auxdev)); + if (ret != 0) + dev_pm_domain_detach(dev, true); + + return ret; +} + +static void zxdh_auxiliary_bus_remove(struct device *dev) +{ + struct zxdh_auxiliary_driver *auxdrv = + zxdh_to_auxiliary_drv(dev->driver); + struct zxdh_auxiliary_device *auxdev = zxdh_to_auxiliary_dev(dev); + + if (auxdrv->remove) { + auxdrv->remove(auxdev); + } + + dev_pm_domain_detach(dev, true); +} + +static void zxdh_auxiliary_bus_shutdown(struct device *dev) +{ + struct zxdh_auxiliary_driver *auxdrv = NULL; + struct zxdh_auxiliary_device *auxdev = NULL; + + if (dev->driver) { + auxdrv = zxdh_to_auxiliary_drv(dev->driver); + auxdev = zxdh_to_auxiliary_dev(dev); + } + + if (auxdrv && auxdrv->shutdown) { + auxdrv->shutdown(auxdev); + } +} + +static struct bus_type zxdh_auxiliary_bus_type = { + .name = "dinghai10e_auxiliary", + .probe = zxdh_auxiliary_bus_probe, + .remove = zxdh_auxiliary_bus_remove, + .shutdown = zxdh_auxiliary_bus_shutdown, + .match = zxdh_auxiliary_match, + .uevent = zxdh_auxiliary_uevent, + .pm = &zxdh_auxiliary_dev_pm_ops, +}; + +/** + * zxdh_auxiliary_device_init - check zxdh_auxiliary_device and initialize + * @auxdev: auxiliary device struct + * + * This is the second step in the three-step process to register an + * zxdh_auxiliary_device. + * + * When this function returns an error code, then the device_initialize will + * *not* have been performed, and the caller will be responsible to free any + * memory allocated for the zxdh_auxiliary_device in the error path directly. + * + * It returns 0 on success. On success, the device_initialize has been + * performed. After this point any error unwinding will need to include a call + * to zxdh_auxiliary_device_uninit(). In this post-initialize error scenario, a call + * to the device's .release callback will be triggered, and all memory clean-up + * is expected to be handled there. + */ +int32_t zxdh_auxiliary_device_init(struct zxdh_auxiliary_device *auxdev) +{ + struct device *dev = &auxdev->dev; + + if (!dev->parent) { + LOG_ERR("zxdh_auxiliary_device has a NULL dev->parent\n"); + return -EINVAL; + } + + if (!auxdev->name) { + LOG_ERR("zxdh_auxiliary_device has a NULL name\n"); + return -EINVAL; + } + + dev->bus = &zxdh_auxiliary_bus_type; + device_initialize(&auxdev->dev); + + return 0; +} +EXPORT_SYMBOL_GPL(zxdh_auxiliary_device_init); + +/** + * zxdh_aux_dev_add - add an auxiliary bus device + * @auxdev: auxiliary bus device to add to the bus + * @modname: name of the parent device's driver module + * + * This is the third step in the three-step process to register an + * zxdh_auxiliary_device. + * + * This function must be called after a successful call to + * zxdh_auxiliary_device_init(), which will perform the device_initialize. This + * means that if this returns an error code, then a call to + * zxdh_auxiliary_device_uninit() must be performed so that the .release callback + * will be triggered to free the memory associated with the zxdh_auxiliary_device. + * + * The expectation is that users will call the "zxdh_auxiliary_device_add" macro so + * that the caller's KBUILD_MODNAME is automatically inserted for the modname + * parameter. Only if a user requires a custom name would this version be + * called directly. + */ +int32_t zxdh_aux_dev_add(struct zxdh_auxiliary_device *auxdev, + const char *modname) +{ + struct device *dev = &auxdev->dev; + int32_t ret = 0; + + if (!modname) { + LOG_ERR("zxdh auxiliary device modname is NULL\n"); + return -EINVAL; + } + + ret = dev_set_name(dev, "%s.%s.%d", modname, auxdev->name, auxdev->id); + if (ret != 0) { + LOG_ERR("zxdh auxiliary device dev_set_name failed: %d\n", ret); + return ret; + } + + ret = device_add(dev); + if (ret != 0) { + LOG_ERR("adding zxdh auxiliary device failed!: %d\n", ret); + } + + return ret; +} +EXPORT_SYMBOL_GPL(zxdh_aux_dev_add); + +/** + * zxdh_auxiliary_find_device - auxiliary device iterator for locating a particular device. + * @start: Device to begin with + * @data: Data to pass to match function + * @match: Callback function to check device + * + * This function returns a reference to a device that is 'found' + * for later use, as determined by the @match callback. + * + * The reference returned should be released with put_device(). + * + * The callback should return 0 if the device doesn't match and non-zero + * if it does. If the callback returns non-zero, this function will + * return to the caller and not iterate over any more devices. + */ +struct zxdh_auxiliary_device *zxdh_auxiliary_find_device( + struct device *start, const void *data, + int32_t (*match)(struct device *dev, const void *data)) +{ + struct device *dev = NULL; + + dev = bus_find_device(&zxdh_auxiliary_bus_type, start, data, match); + if (dev == NULL) { + return NULL; + } + + return zxdh_to_auxiliary_dev(dev); +} +EXPORT_SYMBOL_GPL(zxdh_auxiliary_find_device); + +/** + * zxdh_aux_drv_register - register a driver for auxiliary bus devices + * @auxdrv: zxdh_auxiliary_driver structure + * @owner: owning module/driver + * @modname: KBUILD_MODNAME for parent driver + * + * The expectation is that users will call the "zxdh_auxiliary_driver_register" + * macro so that the caller's KBUILD_MODNAME is automatically inserted for the + * modname parameter. Only if a user requires a custom name would this version + * be called directly. + */ +int32_t zxdh_aux_drv_register(struct zxdh_auxiliary_driver *auxdrv, + struct module *owner, const char *modname) +{ + int32_t ret = 0; + + if (WARN_ON(!auxdrv->probe) || WARN_ON(!auxdrv->id_table)) { + return -EINVAL; + } + + if (auxdrv->name) { + auxdrv->driver.name = + kasprintf(GFP_KERNEL, "%s.%s", modname, auxdrv->name); + } else { + auxdrv->driver.name = kasprintf(GFP_KERNEL, "%s", modname); + } + if (!auxdrv->driver.name) { + return -ENOMEM; + } + + auxdrv->driver.owner = owner; + auxdrv->driver.bus = &zxdh_auxiliary_bus_type; + auxdrv->driver.mod_name = modname; + + ret = driver_register(&auxdrv->driver); + if (ret) { + kfree(auxdrv->driver.name); + } + + return ret; +} +EXPORT_SYMBOL_GPL(zxdh_aux_drv_register); + +/** + * zxdh_auxiliary_driver_unregister - unregister a driver + * @auxdrv: zxdh_auxiliary_driver structure + */ +void zxdh_auxiliary_driver_unregister(struct zxdh_auxiliary_driver *auxdrv) +{ + driver_unregister(&auxdrv->driver); + kfree(auxdrv->driver.name); +} +EXPORT_SYMBOL_GPL(zxdh_auxiliary_driver_unregister); + +#ifdef CONFIG_COMPAT_AUXILIARY_EXTERNAL_INIT +void __init zxdh_auxiliary_bus_init(void) +{ + WARN_ON(bus_register(&zxdh_auxiliary_bus_type)); +} +#else +static int32_t __init zxdh_auxiliary_bus_init(void) +{ + return bus_register(&zxdh_auxiliary_bus_type); +} + +static void __exit zxdh_auxiliary_bus_exit(void) +{ + bus_unregister(&zxdh_auxiliary_bus_type); +} +module_init(zxdh_auxiliary_bus_init); +module_exit(zxdh_auxiliary_bus_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Auxiliary Bus"); +MODULE_INFO(supported, "external"); +MODULE_AUTHOR("David Ertman "); +MODULE_AUTHOR("Kiran Patil "); +#endif diff --git a/drivers/net/ethernet/dinghai/en_ethtool/ethtool.c b/drivers/net/ethernet/dinghai/en_ethtool/ethtool.c new file mode 100644 index 000000000000..996209c704f8 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_ethtool/ethtool.c @@ -0,0 +1,3991 @@ +#include +#include +#include +#include +#include +#include +#include "../slib.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../en_aux/queue.h" +#include "../en_aux.h" +#include "../en_aux/en_aux_cmd.h" +#include "../en_np/table/include/dpp_tbl_api.h" +#include "ethtool.h" +#include "linux/dinghai/dh_cmd.h" +#include "../msg_common.h" +#include "../bonding/rdma_ops.h" +#include "../bonding/zxdh_lag.h" +#include "../en_aux/dcbnl/en_dcbnl_api.h" +#include "../en_aux/priv_queue.h" +#include "../en_aux/queue.h" +#include "../en_pf/msg_func.h" + +MODULE_LICENSE("Dual BSD/GPL"); + +#define DRV_NAME "dinghai10e" +#define ETHTOOL_LINK_MODE_MASK_MAX_KERNEL_NBITS 32 +#define MAX_DRV_NAME_LEN 32 +#define MAX_DRV_VERSION_LEN 32 +#define PCI_BUS(PCI_BDF) ((PCI_BDF >> 8) & 0xff) + +#define ZXDH_EN_LINK_MODE_ADD(ks, name, sup) \ + do { \ + if (sup) { \ + ethtool_link_ksettings_add_link_mode((ks), supported, \ + name); \ + } else { \ + ethtool_link_ksettings_add_link_mode( \ + (ks), advertising, name); \ + } \ + } while (0) + +#define ZXDH_EN_SPEED_MODE_TO_ETHTOOL(en_dev, bit, sup) \ + ((sup) ? (((en_dev)->supported_speed_modes) & BIT(bit)) == BIT(bit) : \ + (((en_dev)->advertising_speed_modes) & BIT(bit)) == BIT(bit)) + +#define GET_FEC_LINK_FLAG (0) +#define GET_FEC_CFG_FLAG (1) +#define GET_FEC_CAP_FLAG (2) + +bool enable_1588_debug; + +extern int32_t zxdh_get_ptp_clock_index(struct zxdh_en_device *en_dev, + uint32_t *ptp_clock_idx); +static const uint32_t fec_2_ethtool_fecparam[] = { + [SPM_FEC_NONE] = ETHTOOL_FEC_OFF, + [SPM_FEC_BASER] = ETHTOOL_FEC_BASER, + [SPM_FEC_RS528] = ETHTOOL_FEC_RS, + [SPM_FEC_RS544] = ETHTOOL_FEC_RS, +}; + +static uint32_t zxdh_en_fec_to_ethtool_fecparam(uint32_t fec_mode, + uint32_t flag) +{ + int32_t i; + uint32_t fecparam_cap = 0; + + if (!fec_mode) { + if (flag == GET_FEC_LINK_FLAG) + return ETHTOOL_FEC_NONE; + else if (flag == GET_FEC_CFG_FLAG) + return ETHTOOL_FEC_AUTO; + } + + for (i = 0; i < ARRAY_SIZE(fec_2_ethtool_fecparam); i++) { + if (fec_mode & BIT(i)) { + fecparam_cap |= fec_2_ethtool_fecparam[i]; + } + } + + if (flag == GET_FEC_CAP_FLAG) + fecparam_cap |= ETHTOOL_FEC_AUTO; + + return fecparam_cap; +} + +static void zxdh_en_fec_to_link_ksettings(uint32_t fec_mode, + struct ethtool_link_ksettings *ks, + bool sup) +{ + if (fec_mode & BIT(SPM_FEC_NONE)) + ZXDH_EN_LINK_MODE_ADD(ks, FEC_NONE, sup); + if (fec_mode & BIT(SPM_FEC_BASER)) + ZXDH_EN_LINK_MODE_ADD(ks, FEC_BASER, sup); + if (fec_mode & BIT(SPM_FEC_RS528) || fec_mode & BIT(SPM_FEC_RS544)) + ZXDH_EN_LINK_MODE_ADD(ks, FEC_RS, sup); +} + +static void zxdh_en_fec_link_ksettings_get(struct zxdh_en_device *en_dev, + struct ethtool_link_ksettings *ks) +{ + int32_t ret; + uint32_t fec_cap; + uint32_t fec_active; + + ret = zxdh_en_fec_mode_get(en_dev, &fec_cap, NULL, &fec_active); + if (ret) { + LOG_ERR("zxdh_en_fec_mode_get failed!\n"); + return; + } + //LOG_INFO("fec_cap=0x%x, fec_active=0x%x\n", fec_cap, fec_active); + + zxdh_en_fec_to_link_ksettings(fec_cap, ks, true); + zxdh_en_fec_to_link_ksettings(fec_active, ks, false); + + return; +} + +static void zxdh_en_pause_link_ksettings_get(struct zxdh_en_device *en_dev, + struct ethtool_link_ksettings *ks) +{ + int32_t err; + uint32_t fc_mode; + + err = zxdh_en_fc_mode_get(en_dev, &fc_mode); + if (err != 0) { + LOG_ERR("zxdh_en_fc_mode_get failed!\n"); + return; + } + + ZXDH_EN_LINK_MODE_ADD(ks, Pause, true); + + if (fc_mode == BIT(SPM_FC_PAUSE_FULL)) + ZXDH_EN_LINK_MODE_ADD(ks, Pause, false); + else if (fc_mode == BIT(SPM_FC_PAUSE_RX) || + fc_mode == BIT(SPM_FC_PAUSE_TX)) + ZXDH_EN_LINK_MODE_ADD(ks, Asym_Pause, false); + + return; +} + +static void zxdh_en_phytype_to_ethtool(struct zxdh_en_device *en_dev, + struct ethtool_link_ksettings *ks, + bool sup) +{ + //0x20000020020 + if (ZXDH_EN_SPEED_MODE_TO_ETHTOOL(en_dev, SPM_SPEED_1X_1G, sup)) { + ZXDH_EN_LINK_MODE_ADD(ks, 1000baseT_Full, sup); + ZXDH_EN_LINK_MODE_ADD(ks, 1000baseKX_Full, sup); + ZXDH_EN_LINK_MODE_ADD(ks, 1000baseX_Full, sup); + } + + //0x5C0000081000 + if (ZXDH_EN_SPEED_MODE_TO_ETHTOOL(en_dev, SPM_SPEED_1X_10G, sup)) { + ZXDH_EN_LINK_MODE_ADD(ks, 10000baseT_Full, sup); + ZXDH_EN_LINK_MODE_ADD(ks, 10000baseKR_Full, sup); + ZXDH_EN_LINK_MODE_ADD(ks, 10000baseCR_Full, sup); + ZXDH_EN_LINK_MODE_ADD(ks, 10000baseSR_Full, sup); + ZXDH_EN_LINK_MODE_ADD(ks, 10000baseLR_Full, sup); + ZXDH_EN_LINK_MODE_ADD(ks, 10000baseER_Full, sup); + } + + //0x380000000 + if (ZXDH_EN_SPEED_MODE_TO_ETHTOOL(en_dev, SPM_SPEED_1X_25G, sup)) { + ZXDH_EN_LINK_MODE_ADD(ks, 25000baseCR_Full, sup); + ZXDH_EN_LINK_MODE_ADD(ks, 25000baseKR_Full, sup); + ZXDH_EN_LINK_MODE_ADD(ks, 25000baseSR_Full, sup); + } + + //0x10C00000000 + if (ZXDH_EN_SPEED_MODE_TO_ETHTOOL(en_dev, SPM_SPEED_1X_50G, sup)) { + ZXDH_EN_LINK_MODE_ADD(ks, 50000baseCR2_Full, sup); + ZXDH_EN_LINK_MODE_ADD(ks, 50000baseKR2_Full, sup); + ZXDH_EN_LINK_MODE_ADD(ks, 50000baseSR2_Full, sup); + } + + //0x7800000 + if (ZXDH_EN_SPEED_MODE_TO_ETHTOOL(en_dev, SPM_SPEED_4X_40G, sup)) { + ZXDH_EN_LINK_MODE_ADD(ks, 40000baseKR4_Full, sup); + ZXDH_EN_LINK_MODE_ADD(ks, 40000baseCR4_Full, sup); + ZXDH_EN_LINK_MODE_ADD(ks, 40000baseSR4_Full, sup); + ZXDH_EN_LINK_MODE_ADD(ks, 40000baseLR4_Full, sup); + } + + //0xF000000000 + if (ZXDH_EN_SPEED_MODE_TO_ETHTOOL(en_dev, SPM_SPEED_4X_100G, sup)) { + ZXDH_EN_LINK_MODE_ADD(ks, 100000baseKR4_Full, sup); + ZXDH_EN_LINK_MODE_ADD(ks, 100000baseSR4_Full, sup); + ZXDH_EN_LINK_MODE_ADD(ks, 100000baseCR4_Full, sup); + ZXDH_EN_LINK_MODE_ADD(ks, 100000baseLR4_ER4_Full, sup); + } + +#ifndef NEED_XARRAY + //0x1E00000000000000 + if (ZXDH_EN_SPEED_MODE_TO_ETHTOOL(en_dev, SPM_SPEED_2X_100G, sup)) { + ZXDH_EN_LINK_MODE_ADD(ks, 100000baseKR2_Full, sup); + ZXDH_EN_LINK_MODE_ADD(ks, 100000baseSR2_Full, sup); + ZXDH_EN_LINK_MODE_ADD(ks, 100000baseCR2_Full, sup); + ZXDH_EN_LINK_MODE_ADD(ks, 100000baseLR2_ER2_FR2_Full, sup); + ZXDH_EN_LINK_MODE_ADD(ks, 100000baseCR2_Full, sup); + } + + //0x5C000000000000000 + if (ZXDH_EN_SPEED_MODE_TO_ETHTOOL(en_dev, SPM_SPEED_4X_200G, sup)) { + ZXDH_EN_LINK_MODE_ADD(ks, 200000baseKR4_Full, sup); + ZXDH_EN_LINK_MODE_ADD(ks, 200000baseSR4_Full, sup); + ZXDH_EN_LINK_MODE_ADD(ks, 200000baseCR4_Full, sup); + ZXDH_EN_LINK_MODE_ADD(ks, 200000baseLR4_ER4_FR4_Full, sup); + } +#endif + return; +} + +static void zxdh_en_ethtool_to_phytype(struct ethtool_link_ksettings *ks, + uint32_t *speed_modes) +{ + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseT_Full)) { + *speed_modes |= BIT(SPM_SPEED_1X_1G); + } + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseT_Full)) { + *speed_modes |= BIT(SPM_SPEED_1X_10G); + } + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 25000baseCR_Full)) { + *speed_modes |= BIT(SPM_SPEED_1X_25G); + } + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 50000baseCR2_Full)) { + *speed_modes |= BIT(SPM_SPEED_1X_50G); + } + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseKR4_Full)) { + *speed_modes |= BIT(SPM_SPEED_4X_40G); + } + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 100000baseKR4_Full)) { + *speed_modes |= BIT(SPM_SPEED_4X_100G); + } + +#ifndef NEED_XARRAY + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 100000baseKR2_Full)) { + *speed_modes |= BIT(SPM_SPEED_2X_100G); + } + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 200000baseKR4_Full)) { + *speed_modes |= BIT(SPM_SPEED_4X_200G); + } +#endif + + return; +} + +static int32_t zxdh_en_speed_to_speed_modes(uint32_t speed, + uint32_t *speed_modes, + uint32_t sup_modes) +{ + switch (speed) { + case SPEED_1000: { + *speed_modes |= BIT(SPM_SPEED_1X_1G); + break; + } + case SPEED_10000: { + *speed_modes |= BIT(SPM_SPEED_1X_10G); + break; + } + case SPEED_25000: { + *speed_modes |= BIT(SPM_SPEED_1X_25G); + break; + } + case SPEED_40000: { + *speed_modes |= BIT(SPM_SPEED_4X_40G); + break; + } + case SPEED_50000: { + *speed_modes |= BIT(SPM_SPEED_1X_50G); + break; + } + case SPEED_100000: { + *speed_modes |= BIT(SPM_SPEED_2X_100G); + *speed_modes |= BIT(SPM_SPEED_4X_100G); + break; + } + case SPEED_200000: { + *speed_modes |= BIT(SPM_SPEED_4X_200G); + break; + } + default: { + return -EINVAL; + } + } + + *speed_modes &= sup_modes; + if (*speed_modes == 0) { + return -EINVAL; + } + + return 0; +} + +static int32_t zxdh_en_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ks) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); + + ks->base.port = PORT_FIBRE; + ks->base.autoneg = en_dev->autoneg_enable; + ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + + if (en_dev->autoneg_enable == AUTONEG_ENABLE) { + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + } + + ks->base.speed = en_dev->speed; + if ((!netif_running(netdev)) || (!netif_carrier_ok(netdev))) { + ks->base.speed = SPEED_UNKNOWN; + } + ks->base.duplex = ks->base.speed == SPEED_UNKNOWN ? DUPLEX_UNKNOWN : + DUPLEX_FULL; + + zxdh_en_phytype_to_ethtool(en_dev, ks, true); + zxdh_en_phytype_to_ethtool(en_dev, ks, false); + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF && + zxdh_en_is_panel_port(en_dev)) { + zxdh_en_fec_link_ksettings_get(en_dev, ks); + zxdh_en_pause_link_ksettings_get(en_dev, ks); + } + + return 0; +} + +static int32_t +zxdh_en_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ks) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct ethtool_link_ksettings safe_ks; + uint32_t advertising_link_modes = 0; + uint32_t off_speed_modes = 0; + uint32_t on_speed_modes = 0; + int32_t err = 0; + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + if ((en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) || + (!zxdh_en_is_panel_port(en_dev))) { + return -EOPNOTSUPP; + } + + if (ks->base.duplex == DUPLEX_HALF) { + return -ENAVAIL; + } + + memset(&safe_ks, 0, sizeof(safe_ks)); + ethtool_link_ksettings_zero_link_mode(&safe_ks, supported); + ethtool_link_ksettings_zero_link_mode(&safe_ks, advertising); + + if (ks->base.autoneg == AUTONEG_DISABLE) { + err = zxdh_en_speed_to_speed_modes( + ks->base.speed, &off_speed_modes, + en_dev->supported_speed_modes); + LOG_DEBUG("set speed: %d, off_speed_modes: 0x%x\n", + ks->base.speed, off_speed_modes); + if (err != 0) { + LOG_ERR("zxdh_en_speed_to_speed_mode failed: %d\n", + err); + return -EOPNOTSUPP; + } + + advertising_link_modes = off_speed_modes; + } else { + zxdh_en_phytype_to_ethtool(en_dev, &safe_ks, true); + if (!bitmap_intersects(ks->link_modes.advertising, + safe_ks.link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS)) { + LOG_ERR("link_mode not supported\n"); + return -EOPNOTSUPP; + } + + bitmap_and(safe_ks.link_modes.advertising, + ks->link_modes.advertising, + safe_ks.link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS); + zxdh_en_ethtool_to_phytype(&safe_ks, &on_speed_modes); + LOG_DEBUG("on_speed_modes: 0x%x\n", on_speed_modes); + advertising_link_modes = on_speed_modes; + } + + if ((advertising_link_modes == en_dev->advertising_speed_modes) && + (ks->base.autoneg == en_dev->autoneg_enable)) { + LOG_DEBUG("nothing changed\n"); + return 0; + } + + safe_ks.base.speed = en_dev->speed; + en_dev->speed = SPEED_UNKNOWN; + LOG_INFO("autoneg %d, link_modes: 0x%x\n", ks->base.autoneg, + advertising_link_modes); + err = zxdh_en_autoneg_set(en_dev, ks->base.autoneg, + advertising_link_modes); + if (err != 0) { + en_dev->speed = safe_ks.base.speed; + LOG_ERR("zxdh_en_autoneg_set failed: %d\n", err); + return err; + } else { + en_dev->autoneg_enable = ks->base.autoneg; + en_dev->advertising_speed_modes = advertising_link_modes; + en_dev->link_up = false; + netif_carrier_off(netdev); + en_dev->ops->set_pf_link_up(en_dev->parent, + FALSE); //TODO:是否需要更新pf信息? + queue_work(en_priv->events->wq, + &en_priv->edev.vf_link_info_update_work); + queue_work(en_priv->events->wq, + &en_priv->edev.link_info_irq_update_np_work); + } + + return err; +} + +static uint32_t zxdh_en_get_link(struct net_device *netdev) +{ + return netif_carrier_ok(netdev) ? 1 : 0; +} + +static int zxdh_en_get_eeprom_len(struct net_device *netdev) +{ + return 0; +} + +static int zxdh_en_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + return 0; +} + +static int zxdh_en_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + return 0; +} + +static void zxdh_en_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *ack) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + param->rx_max_pending = ZXDH_PF_MAX_DESC_NUM(en_dev); + param->tx_max_pending = ZXDH_PF_MAX_DESC_NUM(en_dev); + param->rx_pending = en_dev->eth_config.rx_queue_size; + param->tx_pending = en_dev->eth_config.tx_queue_size; + + return; +} + +static int32_t zxdh_phy_vq_reset(struct zxdh_en_device *en_dev) +{ + union zxdh_msg *msg = NULL; + int32_t err = 0; + int32_t i = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + + msg->vqm_msg.opcode = MSIX_MODE_SET; + msg->vqm_msg.cmd = OVS_VQM_CTRL_RESET_QIDS; + msg->vqm_msg.qid_reset_msg.version = ZXDH_VNET_ZTE; + msg->vqm_msg.qid_reset_msg.qnum = en_dev->max_queue_pairs * 2; + for (i = 0; i < en_dev->max_queue_pairs * 2; ++i) { + msg->vqm_msg.qid_reset_msg.qid[i] = en_dev->phy_index[i]; + } + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_CFG_VQM, msg, + msg, ¶); + if (err != 0) { + LOG_ERR("send cfg msix mode msg to riscv failed\n"); + } + kfree(msg); + return err; +} + +static int zxdh_en_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *ack) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t carrier_ok; + int32_t err = 0; + int32_t i = 0; + bool is_up = netif_running(netdev); + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + if (!en_dev->ops->is_fw_feature_support(en_dev->parent, + FW_FEATURE_QUEUE_RESET)) { + LOG_ERR("fw feature not supported\n"); + return -EINVAL; + } + + if (en_dev->ops->is_bond(en_dev->parent)) + return -EINVAL; + + if (param->rx_jumbo_pending) { + LOG_ERR("rx_jumbo_pending not supported\n"); + return -EINVAL; + } + if (param->rx_mini_pending) { + LOG_ERR("rx_mini_pending not supported\n"); + return -EINVAL; + } + + if ((param->rx_pending < ZXDH_PF_MIN_DESC_NUM) || + (param->rx_pending > ZXDH_PF_MAX_DESC_NUM(en_dev))) { + LOG_ERR("rx_pending (%d) out of range\n", param->rx_pending); + return -EINVAL; + } + + if ((param->tx_pending < ZXDH_PF_MIN_DESC_NUM) || + (param->tx_pending > ZXDH_PF_MAX_DESC_NUM(en_dev))) { + LOG_ERR("tx_pending (%d) out of range\n", param->tx_pending); + return -EINVAL; + } + + if (param->rx_pending == en_dev->eth_config.rx_queue_size && + param->tx_pending == en_dev->eth_config.tx_queue_size) { + LOG_DEBUG("no need to set ring param\n"); + return 0; + } + + //1、关端口 + carrier_ok = netif_carrier_ok(netdev); + netif_carrier_off(netdev); + if (is_up) { + zxdh_port_enable(en_dev, false); + } + + //2、确保接收方向停流 + if (carrier_ok) { + msleep(80); //等待vqm清空缓存报文 + if (!is_flow_stopped(en_dev)) { + LOG_ERR("rx flow stopped failed\n"); + err = -EINVAL; + goto out; + } + } + +#ifdef ZXDH_MSGQ + if (NEED_MSGQ(en_dev)) + msgq_privq_uninit((struct msgq_dev *)en_dev->msgq_dev); +#endif + if (is_up) { + mutex_lock(&en_priv->lock); + cancel_delayed_work_sync(&en_dev->refill); + for (i = 0; i < en_dev->max_vq_pairs; i++) { + napi_disable(&en_dev->rq[i].napi); + virtnet_napi_tx_disable(&en_dev->sq[i].napi); + } + netif_tx_stop_all_queues(netdev); + netif_tx_disable(netdev); + } + + //3、改配队列深度 + en_dev->eth_config.rx_queue_size = + roundup_pow_of_two(param->rx_pending); + en_dev->eth_config.tx_queue_size = + roundup_pow_of_two(param->tx_pending); + LOG_DEBUG("rx_size: %d, tx_size: %d\n", + en_dev->eth_config.rx_queue_size, + en_dev->eth_config.tx_queue_size); + mutex_lock(&en_dev->parent->lock); + zxdh_free_unused_bufs(netdev); + usleep_range(70, 100); + zxdh_vvq_reset(en_dev); + mutex_unlock(&en_dev->parent->lock); + + //4、队列reset + err = zxdh_phy_vq_reset(en_dev); + if (err != 0) { + LOG_ERR("zxdh_phy_vq_reset failed\n"); + err = -EINVAL; + } + + //5、回填描述符 + if (is_up) { + for (i = 0; i < en_dev->max_vq_pairs; i++) { + if (i < en_dev->curr_queue_pairs) { + if (!try_fill_recv(&en_dev->rq[i], GFP_KERNEL)) + schedule_delayed_work(&en_dev->refill, + 0); + } + virtnet_napi_enable(en_dev->rq[i].vq, + &en_dev->rq[i].napi); + virtnet_napi_tx_enable(netdev, en_dev->sq[i].vq, + &en_dev->sq[i].napi); + } + mutex_unlock(&en_priv->lock); + } + +out: + //6、开端口 +#ifdef ZXDH_MSGQ + if (NEED_MSGQ(en_dev)) + msgq_privq_init((struct msgq_dev *)en_dev->msgq_dev, netdev); +#endif + if (is_up) { + zxdh_port_enable(en_dev, true); + netif_tx_wake_all_queues(netdev); + } + if (carrier_ok) + netif_carrier_on(netdev); + + return err; +} + +static void zxdh_en_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + int32_t err; + uint32_t fc_mode; + struct zxdh_en_device *en_dev = netdev_priv(netdev); + + if (en_dev->device_state == ZXDH_DEVICE_STATE_INTERNAL_ERROR) + return; + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) { + return; + } + + err = zxdh_en_fc_mode_get(en_dev, &fc_mode); + if (err != 0) { + LOG_ERR("zxdh_en_fc_mode_get failed!\n"); + return; + } + + pause->autoneg = 0; + + switch (fc_mode) { + case BIT(SPM_FC_PAUSE_FULL): { + pause->rx_pause = 1; + pause->tx_pause = 1; + break; + } + case BIT(SPM_FC_PAUSE_RX): { + pause->rx_pause = 1; + pause->tx_pause = 0; + break; + } + case BIT(SPM_FC_PAUSE_TX): { + pause->rx_pause = 0; + pause->tx_pause = 1; + break; + } + default: { + pause->rx_pause = 0; + pause->tx_pause = 0; + break; + } + } + + return; +} + +static int32_t zxdh_en_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + int32_t err; + uint32_t fc_mode_cur; + uint32_t fc_mode_cfg; + struct zxdh_en_device *en_dev = netdev_priv(netdev); + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + if ((en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) || + !(zxdh_en_is_panel_port(en_dev))) { + return -EOPNOTSUPP; + } + + if (pause->autoneg) { + LOG_ERR("not support pause autoneg!\n"); + return -EOPNOTSUPP; + } + + err = zxdh_en_fc_mode_get(en_dev, &fc_mode_cur); + if (err != 0) { + LOG_ERR("zxdh_en_fc_mode_get failed!\n"); + return err; + } + + if ((pause->rx_pause || pause->tx_pause) && + (fc_mode_cur == BIT(SPM_FC_PFC_FULL))) { + LOG_ERR("warning, ethtool cfg pause on, this will lead to pfc off!\n"); + } + + if (pause->rx_pause && pause->tx_pause) { + fc_mode_cfg = BIT(SPM_FC_PAUSE_FULL); + } else if (pause->rx_pause) { + fc_mode_cfg = BIT(SPM_FC_PAUSE_RX); + } else if (pause->tx_pause) { + fc_mode_cfg = BIT(SPM_FC_PAUSE_TX); + } else { + if (fc_mode_cur == BIT(SPM_FC_PFC_FULL)) + fc_mode_cfg = BIT(SPM_FC_PFC_FULL); + else + fc_mode_cfg = BIT(SPM_FC_NONE); + } + + if (fc_mode_cfg != fc_mode_cur) { + err = zxdh_en_fc_mode_set(en_dev, fc_mode_cfg); + if (err != 0) { + LOG_ERR("zxdh_en_fc_mode_set failed!\n"); + return err; + } + } + + return 0; +} + +static int32_t zxdh_en_get_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fecparam) +{ + int32_t err; + uint32_t fec_cfg; + uint32_t fec_active; + struct zxdh_en_device *en_dev = netdev_priv(netdev); + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + if ((en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) || + (!zxdh_en_is_panel_port(en_dev))) { + return -EOPNOTSUPP; + } + + err = zxdh_en_fec_mode_get(en_dev, NULL, &fec_cfg, &fec_active); + if (err != 0) { + LOG_ERR("zxdh_en_fec_mode_get failed!\n"); + return err; + } + + fecparam->fec = + zxdh_en_fec_to_ethtool_fecparam(fec_cfg, GET_FEC_CFG_FLAG); + fecparam->active_fec = + zxdh_en_fec_to_ethtool_fecparam(fec_active, GET_FEC_LINK_FLAG); + + //LOG_INFO("fec_cfg=0x%x, fecparam->fec=0x%x, fec_active=0x%x, fecparam->active_fec=0x%x\n", + // fec_cfg, fecparam->fec, fec_active, fecparam->active_fec); + + return 0; +} + +static int32_t zxdh_en_set_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fecparam) +{ + int32_t i; + int32_t err; + uint32_t fec_cap; + uint32_t fec_cfg = 0; + uint32_t fecparam_cap; + struct zxdh_en_device *en_dev = netdev_priv(netdev); + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + if ((en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) || + (!zxdh_en_is_panel_port(en_dev))) { + return -EOPNOTSUPP; + } + + err = zxdh_en_fec_mode_get(en_dev, &fec_cap, NULL, NULL); + if (err != 0) { + LOG_ERR("zxdh_en_fec_mode_get failed!\n"); + return err; + } + fecparam_cap = + zxdh_en_fec_to_ethtool_fecparam(fec_cap, GET_FEC_CAP_FLAG); + + if ((fecparam->fec | fecparam_cap) != fecparam_cap) { + LOG_ERR("fecparam->fec 0x%x unsupport !\n", fecparam->fec); + return -EOPNOTSUPP; + } + + for (i = 0; i < ARRAY_SIZE(fec_2_ethtool_fecparam); i++) { + if (fecparam->fec == fec_2_ethtool_fecparam[i]) { + fec_cfg |= BIT(i); + } + } + + if (!fec_cfg && (fecparam->fec != ETHTOOL_FEC_AUTO)) { + LOG_ERR("fecparam->fec 0x%x unsupport !\n", fecparam->fec); + return -EOPNOTSUPP; + } + + //LOG_INFO("fecparam_cap=0x%x, fec_cap=0x%x, fecparam->fec=0x%x, fec_cfg=0x%x\n", + // fecparam_cap, fec_cap, fecparam->fec, fec_cfg); + + err = zxdh_en_fec_mode_set(en_dev, fec_cfg); + if (err != 0) { + LOG_ERR("zxdh_en_fec_mode_set failed!\n"); + return err; + } + + return 0; +} + +static int32_t zxdh_en_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + uint32_t read_bytes; + uint8_t data[2] = { 0 }; + struct zxdh_en_module_eeprom_param query = { 0 }; + struct zxdh_en_device *en_dev = netdev_priv(netdev); + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + if ((en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) || + (!zxdh_en_is_panel_port(en_dev))) { + return -EOPNOTSUPP; + } + + query.i2c_addr = SFF_I2C_ADDRESS_LOW; + query.page = 0; + query.offset = 0; + query.length = 2; + read_bytes = zxdh_en_module_eeprom_read(en_dev, &query, data); + if (read_bytes != query.length) { + return -EIO; + } + + switch (data[0]) { + case ZXDH_MODULE_ID_SFP: + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + case ZXDH_MODULE_ID_QSFP: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; + break; + case ZXDH_MODULE_ID_QSFP_PLUS: + case ZXDH_MODULE_ID_QSFP28: + if (data[1] < 3) { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; + } + break; + /*ZXDH_MODULE_ID_QSFP_DD ZXDH_MODULE_ID_OSFP在长度上不太对,其他类型的光模块开源代码没有*/ + case ZXDH_MODULE_ID_QSFP_DD: + case ZXDH_MODULE_ID_OSFP: + case ZXDH_MODULE_ID_DSFP: + case ZXDH_MODULE_ID_QSFP_PLUS_WITH_CMIS: + case ZXDH_MODULE_ID_SFP_DD_WITH_CMIS: + case ZXDH_MODULE_ID_SFP_PLUS_WITH_CMIS: + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; + break; + default: + LOG_ERR("can not recognize module identifier 0x%x!\n", data[0]); + return -EINVAL; + } + + return 0; +} + +static int32_t zxdh_en_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct zxdh_en_module_eeprom_param query = { 0 }; + struct zxdh_en_device *en_dev = netdev_priv(netdev); + uint32_t offset = ee->offset; + uint32_t length = ee->len; + uint8_t identifier; + uint32_t offset_boundary = 0; + uint32_t total_read_bytes = 0; + uint32_t read_bytes = 0; + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + + if ((en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) || + (!zxdh_en_is_panel_port(en_dev))) { + return -EOPNOTSUPP; + } + + //LOG_INFO("offset %u, len %u\n", ee->offset, ee->len); + + if (!ee->len) + return -EINVAL; + + memset(data, 0, ee->len); + + query.i2c_addr = SFF_I2C_ADDRESS_LOW; + query.bank = 0; + query.page = 0; + query.offset = 0; + query.length = 1; + read_bytes = zxdh_en_module_eeprom_read(en_dev, &query, &identifier); + if (read_bytes != query.length) { + return -EIO; + } + + while (total_read_bytes < ee->len) { + if (identifier == ZXDH_MODULE_ID_SFP) { + if (offset < 256) { + query.i2c_addr = SFF_I2C_ADDRESS_LOW; + query.page = 0; + query.offset = offset; + } else { + query.i2c_addr = SFF_I2C_ADDRESS_HIGH; + query.page = 0; + query.offset = offset - 256; + } + offset_boundary = (query.offset < 128) ? 128 : 256; + query.length = + ((query.offset + length) > offset_boundary) ? + (offset_boundary - query.offset) : + length; + } else if (identifier == ZXDH_MODULE_ID_QSFP || + identifier == ZXDH_MODULE_ID_QSFP_PLUS || + identifier == ZXDH_MODULE_ID_QSFP28 || + identifier == ZXDH_MODULE_ID_QSFP_DD || + identifier == ZXDH_MODULE_ID_OSFP || + identifier == ZXDH_MODULE_ID_DSFP || + identifier == ZXDH_MODULE_ID_QSFP_PLUS_WITH_CMIS || + identifier == ZXDH_MODULE_ID_SFP_DD_WITH_CMIS || + identifier == ZXDH_MODULE_ID_SFP_PLUS_WITH_CMIS) { + query.i2c_addr = SFF_I2C_ADDRESS_LOW; + if (offset < 256) { + query.page = 0; + query.offset = offset; + } else { + query.page = (offset - 256) / 128 + 1; + query.offset = offset - 128 * query.page; + } + offset_boundary = (query.offset < 128) ? 128 : 256; + query.length = + ((query.offset + length) > offset_boundary) ? + (offset_boundary - query.offset) : + length; + } else { + LOG_ERR("can not recognize module identifier 0x%x!\n", + identifier); + return -EINVAL; + } + + read_bytes = zxdh_en_module_eeprom_read( + en_dev, &query, data + total_read_bytes); + if (read_bytes != query.length) { + return -EIO; + } + + total_read_bytes += read_bytes; + offset += read_bytes; + length -= read_bytes; + } + + return 0; +} + +static int32_t +zxdh_en_get_module_eeprom_by_page(struct net_device *netdev, + const struct ethtool_module_eeprom *page_data, + struct netlink_ext_ack *extack) +{ + struct zxdh_en_module_eeprom_param query = { 0 }; + struct zxdh_en_device *en_dev = netdev_priv(netdev); + uint32_t read_bytes = 0; + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + + if ((en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) || + (!zxdh_en_is_panel_port(en_dev))) { + return -EOPNOTSUPP; + } + + //LOG_INFO("offset %u, length %u, page %u, bank %u, i2c_address %u\n", + // page_data->offset, page_data->length, page_data->page, page_data->bank, page_data->i2c_address); + + if (!page_data->length) + return -EINVAL; + + zte_memset_s(page_data->data, 0, page_data->length); + + query.i2c_addr = page_data->i2c_address; + query.bank = page_data->bank; + query.page = page_data->page; + query.offset = page_data->offset; + query.length = page_data->length; + read_bytes = + zxdh_en_module_eeprom_read(en_dev, &query, page_data->data); + if (read_bytes != query.length) { + return -EIO; + } + + return read_bytes; +} + +static int32_t zxdh_test_health_info(struct zxdh_en_priv *en_priv) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + struct dh_core_dev *dh_dev = en_dev->parent; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev->parent); + struct zxdh_core_health *health = &pf_dev->health; + + return health->fatal ? 1 : 0; +} + +static int32_t zxdh_test_link_speed(struct zxdh_en_priv *en_priv) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + + if (!test_bit(ZXDH_DEVICE_STATE_OPENED, &en_dev->state)) { + return 1; + } + + if (en_dev->speed == SPEED_UNKNOWN) { + LOG_ERR("get link speed error\n"); + return 1; + } + return 0; +} + +static int32_t zxdh_test_link_state(struct zxdh_en_priv *en_priv) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + + if (!test_bit(ZXDH_DEVICE_STATE_OPENED, &en_dev->state)) { + return 1; + } + + if ((!en_dev->link_up) && (!netif_carrier_ok(en_dev->netdev))) { + LOG_ERR("curr state is link down\n"); + return 1; + } + + return 0; +} + +#ifdef CONFIG_INET +static int32_t zxdh_test_loopback_validate(struct sk_buff *skb, + struct net_device *ndev, + struct packet_type *pt, + struct net_device *orig_ndev) +{ + struct zxdh_lbt_priv *lbtp = pt->af_packet_priv; + struct zxdh_ehdr *zxdhh = NULL; + struct ethhdr *ethh = NULL; + struct udphdr *udph = NULL; + struct iphdr *iph = NULL; + + ethh = (struct ethhdr *)skb_mac_header(skb); + if (!ether_addr_equal(ethh->h_dest, orig_ndev->dev_addr)) { + goto out; + } + + iph = ip_hdr(skb); + if (iph->protocol != IPPROTO_UDP) { + goto out; + } + + /* Don't assume skb_transport_header() was set */ + udph = (struct udphdr *)((uint8_t *)iph + 4 * iph->ihl); + if (udph->dest != htons(9)) { + goto out; + } + + zxdhh = (struct zxdh_ehdr *)((int8_t *)udph + sizeof(*udph)); + if (zxdhh->magic != cpu_to_be64(ZXDH_TEST_MAGIC)) { + goto out; /* so close ! */ + } + lbtp->loopback_ok = true; + complete(&lbtp->comp); +out: + kfree_skb(skb); + return 0; +} + +int32_t zxdh_test_loopback_setup(struct zxdh_en_priv *en_priv, + struct zxdh_lbt_priv *lbtp) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + + en_dev->local_lb_enable = true; /* 使能环回标识 */ + lbtp->loopback_ok = false; + init_completion(&lbtp->comp); /* 初始化完成量 */ + + lbtp->pt.type = htons(ETH_P_IP); + lbtp->pt.func = zxdh_test_loopback_validate; + lbtp->pt.dev = en_dev->netdev; + lbtp->pt.af_packet_priv = lbtp; + dev_add_pack(&lbtp->pt); /* 注册回调函数 */ + return 0; +} + +static void zxdh_test_loopback_cleanup(struct zxdh_en_priv *en_priv, + struct zxdh_lbt_priv *lbtp) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + + en_dev->local_lb_enable = false; /* 去使能环回标识 */ + dev_remove_pack(&lbtp->pt); +} + +/* 构造udp报文 */ +static struct sk_buff *zxdh_test_get_udp_skb(struct zxdh_en_priv *en_priv) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + struct sk_buff *skb = NULL; + struct zxdh_ehdr *zxdhh = NULL; + struct ethhdr *ethh = NULL; /* 报文的L2头*/ + struct udphdr *udph = NULL; /* 报文的L4头*/ + struct iphdr *iph = NULL; /* 报文的L3头*/ + int32_t iplen = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + skb = netdev_alloc_skb(en_dev->netdev, ZXDH_TEST_PKT_SIZE); + if (skb == NULL) { + LOG_ERR("Failed to alloc loopback skb\n"); + return NULL; + } + + /* Reserve for ethernet and IP header */ + ethh = skb_push(skb, ETH_HLEN); /* 插入L2头*/ + skb_reset_mac_header(skb); + + skb_set_network_header(skb, skb->len); + iph = skb_put(skb, sizeof(struct iphdr)); /* 插入ip头 */ + + skb_set_transport_header(skb, skb->len); + udph = skb_put(skb, sizeof(struct udphdr)); /* 插入udp头 */ + + /* Fill ETH header */ + ether_addr_copy(ethh->h_dest, en_dev->netdev->dev_addr); + eth_zero_addr(ethh->h_source); + ethh->h_proto = htons(ETH_P_IP); /* ipv4 */ + + /* Fill UDP header */ + udph->source = htons(9); + udph->dest = + htons(9); /* Discard服务:测试网络连接,到达此端口的包会被drop*/ + udph->len = htons(sizeof(struct zxdh_ehdr) + sizeof(struct udphdr)); + udph->check = 0; + + /* Fill IP header */ + iph->ihl = 5; + iph->ttl = 32; + iph->version = 4; + iph->protocol = IPPROTO_UDP; + iplen = sizeof(struct iphdr) + sizeof(struct udphdr) + + sizeof(struct zxdh_ehdr); /* ip数据包的总长度 */ + iph->tot_len = htons(iplen); + iph->frag_off = 0; + iph->saddr = 0; + iph->daddr = 0; + iph->tos = 0; + iph->id = 0; + ip_send_check(iph); + + /* Fill test header and data */ + zxdhh = skb_put(skb, sizeof(*zxdhh)); + zxdhh->magic = cpu_to_be64(ZXDH_TEST_MAGIC); + + skb->csum = 0; + skb->ip_summed = CHECKSUM_PARTIAL; + udp4_hwcsum(skb, iph->saddr, iph->daddr); /* udp校验*/ + + skb->protocol = htons(ETH_P_IP); + skb->pkt_type = PACKET_HOST; + skb->dev = en_dev->netdev; + + return skb; +} + +static int32_t zxdh_test_loopback(struct zxdh_en_priv *en_priv) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + struct zxdh_lbt_priv *lbtp = NULL; + struct sk_buff *skb = NULL; + int32_t err = 0; + + if (!test_bit(ZXDH_DEVICE_STATE_OPENED, &en_dev->state)) { + LOG_ERR("Can't perform loopback test while device is down\n"); + return -ENODEV; + } + + lbtp = kzalloc(sizeof(*lbtp), GFP_KERNEL); + if (lbtp == NULL) { + return -ENOMEM; + } + lbtp->loopback_ok = false; + + err = zxdh_test_loopback_setup(en_priv, lbtp); + if (err != 0) { + goto out; + } + + skb = zxdh_test_get_udp_skb(en_priv); + if (skb == NULL) { + err = -ENOMEM; + goto cleanup; + } + + skb_set_queue_mapping(skb, 0); + err = dev_queue_xmit(skb); + if (err) { + LOG_ERR("Failed to xmit loopback packet err(%d)\n", err); + goto cleanup; + } + + wait_for_completion_timeout(&lbtp->comp, ZXDH_LB_VERIFY_TIMEOUT); + err = !lbtp->loopback_ok; + +cleanup: + zxdh_test_loopback_cleanup(en_priv, lbtp); +out: + kfree(lbtp); + return err; +} +#endif /* CONFIG_INET */ + +static int32_t (*zxdh_st_func[ZXDH_ST_NUM])(struct zxdh_en_priv *) = { + zxdh_test_link_state, + zxdh_test_link_speed, + zxdh_test_health_info, +#ifdef CONFIG_INET + zxdh_test_loopback, +#endif +}; + +int32_t zxdh_en_self_test_num(void) +{ + return ARRAY_SIZE(zxdh_self_tests); +} + +static void zxdh_en_diag_test(struct net_device *netdev, + struct ethtool_test *etest, u64 *buf) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + int32_t i = 0; + + if (en_priv->edev.device_state == ZXDH_DEVICE_STATE_INTERNAL_ERROR) + return; + + memset(buf, 0, sizeof(uint64_t) * ZXDH_ST_NUM); + + mutex_lock(&en_priv->lock); + + LOG_INFO("Self test begin...\n"); + + for (i = 0; i < ZXDH_ST_NUM; i++) { + LOG_INFO("[%d] %s start..\n", i, zxdh_self_tests[i]); + buf[i] = zxdh_st_func[i](en_priv); + LOG_INFO("[%d] %s end: result(%lld)\n", i, zxdh_self_tests[i], + buf[i]); + } + + mutex_unlock(&en_priv->lock); + + for (i = 0; i < ZXDH_ST_NUM; i++) { + if (buf[i]) { + etest->flags |= ETH_TEST_FL_FAILED; + break; + } + } + + LOG_INFO("Self test out: status flags(0x%x)\n", etest->flags); +} + +static int32_t zxdh_hardware_bond_enable_proc(struct net_device *netdev, + bool enable) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct event_node *node, *tmp; + + if ((en_dev->ops->is_bond(en_dev->parent)) || + (en_dev->ops->is_special_bond(en_dev->parent)) || + (!zxdh_en_is_panel_port(en_dev)) || + (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF)) + return 0; + + if (netif_is_lag_port( + netdev)) { //当前正处于 linux bond 场景,不允许配置rdma硬bond功能 + LOG_INFO( + "refuse to configure hardware_bond when netdev %s is bond slave\n", + netdev->name); + return -EINVAL; + } + + spin_lock(&(en_dev->hardware_bond->ctx.lock)); + list_for_each_entry_safe( + node, tmp, &(en_dev->hardware_bond->ctx.event_list), list) { + list_del(&node->list); + LOG_INFO( + "%s node %d addr %p del from list, event %ld linking %d link_up %d tx_enabled %d\n", + netdev->name, node->idx, (void *)node, node->event, + node->linking, node->link_up, node->tx_enabled); + kfree(node); + } + spin_unlock(&(en_dev->hardware_bond->ctx.lock)); + + en_dev->is_hwbond = enable; + en_dev->ops->is_hwbond(en_dev->parent, en_dev->is_hwbond, TRUE); + en_dev->ops->optim_hardware_bond_time(en_dev->parent, + en_dev->is_hwbond); + if (!en_dev->is_hwbond && en_dev->is_primary_port) { + zxdh_set_rdma_hwbond_speed(netdev, en_dev->speed); + } + + return en_dev->ops->update_hb_file_val( + en_dev->parent, en_dev->spec_sbdf, "solid", en_dev->is_hwbond); +} + +static int32_t zxdh_hardware_bond_primary_enable_proc(struct net_device *netdev, + bool enable) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + if (en_dev->ops->is_bond(en_dev->parent) || + (en_dev->ops->is_special_bond(en_dev->parent)) || + (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) || + (!zxdh_en_is_panel_port(en_dev))) + return 0; + + if (netif_is_lag_port( + netdev)) { //当前正处于 linux bond 场景,不允许进行主次口配置。 + LOG_INFO( + "refuse to configure hardware_bond_primary when netdev %s is bond slave\n", + netdev->name); + return -EINVAL; + } + + en_dev->is_primary_port = enable; + en_dev->ops->is_primary_port(en_dev->parent, en_dev->is_primary_port, + TRUE); + en_dev->hardware_bond->primary = en_dev->is_primary_port; + // ZXDH_PFLAG_HARDWARE_BOND_PRIMARY + if (en_dev->ops->is_rdma_enable(en_dev->parent)) { + if (en_dev->is_primary_port && !en_dev->is_rdma_aux_plug) { + LOG_INFO("plug rdma auxiliary device\n"); + queue_work(en_priv->events->wq, + &en_dev->plug_adev_work); + } else if (!en_dev->is_primary_port && + en_dev->is_rdma_aux_plug) { + LOG_INFO("unplug rdma auxiliary device\n"); + queue_work(en_priv->events->wq, + &en_dev->unplug_adev_work); + } + } + + // 检查 primary 文件是否存在,并向其内部写值 + return en_dev->ops->update_hb_file_val(en_dev->parent, + en_dev->spec_sbdf, "primary", + en_dev->is_primary_port); +} + +static int32_t zxdh_lldp_enable_proc(struct net_device *netdev, bool enable) +{ + int32_t ret = 0; + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + + ret = zxdh_lldp_enable_set(&en_priv->edev, enable); + if (0 != ret) { + LOG_ERR("%s lldp failed!\n", enable ? "enable" : "disable"); + return ret; + } + + return ret; +} + +static int32_t zxdh_dual_tor_switch_proc(struct net_device *netdev, bool enable) +{ + int32_t ret = 0; + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + + ret = zxdh_dual_tor_switch(&en_priv->edev, enable); + if (0 != ret) { + LOG_ERR("%s zxdh_dual_tor_switch failed!\n", + enable ? "enable" : "disable"); + return ret; + } + + return ret; +} + +static int32_t zxdh_sshd_enable_proc(struct net_device *netdev, bool enable) +{ + int32_t ret = 0; + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + + ret = zxdh_sshd_enable_set(&en_priv->edev, enable); + if (ret != 0) { + LOG_ERR("%s riscv sshd failed!\n", + enable ? "enable" : "disable"); + return ret; + } + + ZXDH_SET_PFLAG(en_priv->edev.pflags, ZXDH_PFLAG_IP, enable); + + return ret; +} + +static int32_t zxdh_1588_debug_enable_proc(struct net_device *netdev, + bool enable) +{ + enable_1588_debug = enable; + return 0; +} + +static int32_t zxdh_1588_enable_proc(struct net_device *netdev, bool enable) +{ + union zxdh_msg *msg = NULL; + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t ret = 0; + struct zxdh_bar_extra_para para = { 0 }; + DPP_PF_INFO_T dpp_pf_info = { + .slot = en_dev->slot_id, + .vport = en_dev->vport, + }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + + msg->payload.vf_1588_enable.proc_cmd = ZXDH_VF_1588_ENABLE_SET; + msg->payload.hdr.op_code = ZXDH_VF_1588_ENABLE; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + msg->payload.vf_1588_enable.enable_1588_vf = (uint32_t)enable; + ret = en_dev->ops->msg_send_cmd(en_dev->parent, + MODULE_VF_BAR_MSG_TO_PF, msg, + msg, ¶); + if (ret != 0) { + LOG_ERR("zxdh_send_command_to_pf failed: %d\n", ret); + kfree(msg); + return ret; + } + + kfree(msg); + en_dev->enable_1588 = enable; + return ret; + } + + ret = dpp_vport_attr_set(&dpp_pf_info, SRIOV_VPORT_1588_EN, + (uint32_t)enable); + if (ret != 0) { + LOG_ERR("dpp_vport_attr_set SRIOV_VPORT_1588_EN failed, ret:%d\n", + ret); + return ret; + } + + en_dev->enable_1588 = enable; + return ret; +} + +static int32_t zxdh_link_down_on_close_proc(struct net_device *netdev, + bool enable) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + int32_t ret = 0; + + en_priv->edev.link_down_on_close = enable; + + return ret; +} + +static int32_t zxdh_ets_info_update(struct zxdh_en_priv *en_priv, uint32_t mode, + uint32_t *cur_mode, uint32_t *tc_td_th) +{ + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t ret = 0; + uint32_t ets_tc_td_th[ZXDH_DCBNL_MAX_TRAFFIC_CLASS] = { 0 }; + DPP_PF_INFO_T pf_info = { 0 }; + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + ret = zxdh_dcbnl_get_flow_td_th(en_priv, ets_tc_td_th); + if (ret) { + LOG_ERR("get td_th fail\n"); + return ret; + } + + *cur_mode = en_dev->ets_info.cur_ets; // 传出当前开关状态 + if (!en_dev->ets_info.switch_flag) { // 第一次从关到开 + memcpy(en_dev->ets_info.tc_td_th, ets_tc_td_th, + sizeof(uint32_t) * ZXDH_DCBNL_MAX_TRAFFIC_CLASS); + } else if (en_dev->ets_info + .cur_ets) { // 只有当前开关状态是开的状态下才更新tc_td_th + memcpy(en_dev->ets_info.tc_td_th, ets_tc_td_th, + sizeof(uint32_t) * ZXDH_DCBNL_MAX_TRAFFIC_CLASS); + LOG_INFO( + "Updated PF ets_info: slot=%d, vport=0x%x, ets_mode=%u\n", + pf_info.slot, pf_info.vport, mode); + LOG_INFO( + "td_th -> tc_td_th[0]:%d tc_td_th[1]:%d tc_td_th[2]:%d tc_td_th[3]:%d tc_td_th[4]:%d tc_td_th[5]:%d tc_td_th[6]:%d tc_td_th[7]:%d\n", + tc_td_th[0], tc_td_th[1], tc_td_th[2], tc_td_th[3], + tc_td_th[4], tc_td_th[5], tc_td_th[6], tc_td_th[7]); + } + en_dev->ets_info.cur_ets = mode; + en_dev->ets_info.switch_flag = 1; + + memcpy(tc_td_th, en_dev->ets_info.tc_td_th, + sizeof(uint32_t) * + ZXDH_DCBNL_MAX_TRAFFIC_CLASS); // 关->开,传出存储的td配置 + + return ret; +} + +/* Started by AICoder, pid:t3176w5cf0n5e57140e90a54e0c0e732a8f01a25 */ +static int32_t zxdh_ets_switch_proc(struct net_device *netdev, bool enable) +{ + int32_t ret = 0; + uint32_t mode = enable == true ? 1 : 0; + uint32_t old_mode = 0; + uint32_t tc_td_th[ZXDH_DCBNL_MAX_TRAFFIC_CLASS] = { 0 }; + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + + ret = zxdh_dcbnl_enable_debug(en_priv); + if (ret) { + LOG_ERR("clean flow fail %s\n", netdev->name); + return ret; + } + + ret = zxdh_ets_info_update(en_priv, mode, &old_mode, tc_td_th); + if (ret) { + LOG_ERR("ets_info_update fail %s\n", netdev->name); + return ret; + } + + if ((old_mode ^ mode) && mode) { // ets开关状态发生改变,ets 关->开 + ret = zxdh_dcbnl_set_flow_td_th(en_priv, tc_td_th); + LOG_INFO( + "set_flow_td_th -> tc_td_th[0]:%d tc_td_th[1]:%d tc_td_th[2]:%d tc_td_th[3]:%d tc_td_th[4]:%d tc_td_th[5]:%d tc_td_th[6]:%d tc_td_th[7]:%d\n", + tc_td_th[0], tc_td_th[1], tc_td_th[2], tc_td_th[3], + tc_td_th[4], tc_td_th[5], tc_td_th[6], tc_td_th[7]); + if (ret) { + LOG_ERR("set td_th fail %s\n", netdev->name); + return ret; + } + + ret = zxdh_dcbnl_set_tm_gate(en_priv, mode); + if (ret) { + LOG_ERR("ets switch fail %s %u\n", netdev->name, mode); + return ret; + } + } + + if ((old_mode ^ mode) && !mode) { // ets开关状态发生改变,ets 开->关 + ret = zxdh_dcbnl_set_tm_gate(en_priv, mode); + if (ret) { + LOG_ERR("ets switch fail %s %u\n", netdev->name, mode); + return ret; + } + + ret = zxdh_dcbnl_clear_flow_td_th(en_priv); + if (ret) { + LOG_ERR("clear td_th fail %s\n", netdev->name); + return ret; + } + } + + ret = zxdh_dcbnl_disable_debug(en_priv); + if (ret) { + LOG_ERR("disable debug %s\n", netdev->name); + return ret; + } + + LOG_INFO("ets switch success %s %u\n", netdev->name, mode); + return ret; +} +/* Ended by AICoder, pid:t3176w5cf0n5e57140e90a54e0c0e732a8f01a25 */ +static int32_t zxdh_pcie_rp_cpl_timeout(struct net_device *netdev, bool mask) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + return en_dev->ops->set_cpl_timeout_mask(en_dev->parent, mask); +} + +static int32_t zxdh_pcie_rp_hp_irq_ctl(struct net_device *netdev, bool status) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + return en_dev->ops->set_hp_irq_ctrl_status(en_dev->parent, status); +} + +typedef int32_t (*zxdh_pflag_handler)(struct net_device *netdev, bool enable); + +struct flag_desc { + uint8_t name[ETH_GSTRING_LEN]; + uint32_t bitno; + zxdh_pflag_handler handler; +}; + +#define ZXDH_PRIV_DESC(_name, _bitno, _handler) \ + { \ + .name = _name, .bitno = _bitno, .handler = _handler, \ + } + +static const struct flag_desc zxdh_gstrings_priv_flags[] = { + ZXDH_PRIV_DESC("enable_lldp", ZXDH_PFLAG_ENABLE_LLDP, + zxdh_lldp_enable_proc), + ZXDH_PRIV_DESC("enable_sshd", ZXDH_PFLAG_ENABLE_SSHD, + zxdh_sshd_enable_proc), + ZXDH_PRIV_DESC("debug_ip", ZXDH_PFLAG_IP, NULL), + ZXDH_PRIV_DESC("1588_debug", ZXDH_PFLAG_1588_DEBUG, + zxdh_1588_debug_enable_proc), + ZXDH_PRIV_DESC("hardware-bond", ZXDH_PFLAG_HARDWARE_BOND, + zxdh_hardware_bond_enable_proc), + ZXDH_PRIV_DESC("hardware-bond-primary", + ZXDH_PFLAG_HARDWARE_BOND_PRIMARY, + zxdh_hardware_bond_primary_enable_proc), + ZXDH_PRIV_DESC("link-down-on-close", ZXDH_PFLAG_LINK_DOWN_ON_CLOSE, + zxdh_link_down_on_close_proc), + ZXDH_PRIV_DESC("ets-switch", ZXDH_PFLAG_ETS_SWITCH, + zxdh_ets_switch_proc), + ZXDH_PRIV_DESC("pcie_aer_cpl_timeout", ZXDH_PFLAG_PCIE_AER_CPL_TIMEOUT, + zxdh_pcie_rp_cpl_timeout), + ZXDH_PRIV_DESC("pcie_rp_hp_irq_ctl", ZXDH_PFLAG_PCIE_HP_IRQ_CTRL, + zxdh_pcie_rp_hp_irq_ctl), + ZXDH_PRIV_DESC("dual_tor", ZXDH_PFLAG_DUAL_TOR_CTRL, + zxdh_dual_tor_switch_proc), + ZXDH_PRIV_DESC("1588_enable", ZXDH_PFLAG_1588_ENABLE, + zxdh_1588_enable_proc), +}; + +#define ZXDH_PRIV_FALG_ARRAY_SIZE ARRAY_SIZE(zxdh_gstrings_priv_flags) + +static void zxdh_en_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct dh_core_dev *dh_dev = en_dev->parent; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev->parent); + uint16_t i = 0; + int8_t ip[20] = { 0 }; + int32_t ret = 0; + + switch (stringset) { + case ETH_SS_STATS: { + snprintf(data, ETH_GSTRING_LEN, + "rx_packets"); //get stat from netdev->stats + ZXDH_ADD_STRING(data, "tx_packets"); + ZXDH_ADD_STRING(data, "rx_bytes"); + ZXDH_ADD_STRING(data, "tx_bytes"); + ZXDH_ADD_STRING(data, "tx_queue_wake"); + ZXDH_ADD_STRING(data, "tx_queue_stopped"); + ZXDH_ADD_STRING(data, "tx_queue_dropped"); + ZXDH_ADD_STRING( + data, + "rx_removed_vlan_packets"); //get stat from xmit-func + ZXDH_ADD_STRING(data, "tx_added_vlan_packets"); + ZXDH_ADD_STRING(data, "rx_csum_offload_good"); + ZXDH_ADD_STRING(data, "rx_csum_offload_error"); + + ZXDH_ADD_STRING(data, "rx_vport_packets"); //get stat from vqm + ZXDH_ADD_STRING(data, "tx_vport_packets"); + ZXDH_ADD_STRING(data, "rx_vport_bytes"); + ZXDH_ADD_STRING(data, "tx_vport_bytes"); + ZXDH_ADD_STRING(data, "rx_vport_dropped"); + ZXDH_ADD_STRING(data, + "rx_vport_unicast_packets"); //get stat from np + ZXDH_ADD_STRING(data, "tx_vport_unicast_packets"); + ZXDH_ADD_STRING(data, "rx_vport_unicast_bytes"); + ZXDH_ADD_STRING(data, "tx_vport_unicast_bytes"); + ZXDH_ADD_STRING(data, "rx_vport_multicast_packets"); + ZXDH_ADD_STRING(data, "tx_vport_multicast_packets"); + ZXDH_ADD_STRING(data, "rx_vport_multicast_bytes"); + ZXDH_ADD_STRING(data, "tx_vport_multicast_bytes"); + ZXDH_ADD_STRING(data, "rx_vport_broadcast_packets"); + ZXDH_ADD_STRING(data, "tx_vport_broadcast_packets"); + ZXDH_ADD_STRING(data, "rx_vport_broadcast_bytes"); + ZXDH_ADD_STRING(data, "tx_vport_broadcast_bytes"); + ZXDH_ADD_STRING(data, "rx_vport_mtu_drop_packets"); + ZXDH_ADD_STRING(data, "tx_vport_mtu_drop_packets"); + ZXDH_ADD_STRING(data, "rx_vport_mtu_drop_bytes"); + ZXDH_ADD_STRING(data, "tx_vport_mtu_drop_bytes"); + ZXDH_ADD_STRING(data, "rx_vport_plcr_drop_packets"); + ZXDH_ADD_STRING(data, "tx_vport_plcr_drop_packets"); + ZXDH_ADD_STRING(data, "rx_vport_plcr_drop_bytes"); + ZXDH_ADD_STRING(data, "tx_vport_plcr_drop_bytes"); + ZXDH_ADD_STRING(data, "tx_vport_ssvpc_packets"); + ZXDH_ADD_STRING(data, "rx_vport_idma_drop_packets"); + ZXDH_ADD_STRING(data, "rx_lro_packets"); + ZXDH_ADD_STRING(data, "rx_udp_csum_fail_packets"); + ZXDH_ADD_STRING(data, "tx_udp_csum_fail_packets"); + ZXDH_ADD_STRING(data, "rx_tcp_csum_fail_packets"); + ZXDH_ADD_STRING(data, "tx_tcp_csum_fail_packets"); + ZXDH_ADD_STRING(data, "rx_ipv4_csum_fail_packets"); + ZXDH_ADD_STRING(data, "tx_ipv4_csum_fail_packets"); + + ZXDH_ADD_STRING(data, "rx_packets_phy"); //get stat from mac + ZXDH_ADD_STRING(data, "tx_packets_phy"); + ZXDH_ADD_STRING(data, "rx_bytes_phy"); + ZXDH_ADD_STRING(data, "tx_bytes_phy"); + ZXDH_ADD_STRING(data, "rx_error_phy"); + ZXDH_ADD_STRING(data, "tx_error_phy"); + ZXDH_ADD_STRING(data, "rx_drop_phy"); + ZXDH_ADD_STRING(data, "tx_drop_phy"); + ZXDH_ADD_STRING(data, "rx_good_bytes_phy"); + ZXDH_ADD_STRING(data, "tx_good_bytes_phy"); + ZXDH_ADD_STRING(data, "rx_unicast_phy"); + ZXDH_ADD_STRING(data, "tx_unicast_phy"); + ZXDH_ADD_STRING(data, "rx_multicast_phy"); + ZXDH_ADD_STRING(data, "tx_multicast_phy"); + ZXDH_ADD_STRING(data, "rx_broadcast_phy"); + ZXDH_ADD_STRING(data, "tx_broadcast_phy"); + ZXDH_ADD_STRING(data, "rx_under64_drop"); + ZXDH_ADD_STRING(data, "rx_undersize_phy"); + ZXDH_ADD_STRING(data, "rx_size_64_phy"); + ZXDH_ADD_STRING(data, "rx_size_65_127"); + ZXDH_ADD_STRING(data, "rx_size_128_255"); + ZXDH_ADD_STRING(data, "rx_size_256_511"); + ZXDH_ADD_STRING(data, "rx_size_512_1023"); + ZXDH_ADD_STRING(data, "rx_size_1024_1518"); + ZXDH_ADD_STRING(data, "rx_size_1519_mru"); + ZXDH_ADD_STRING(data, "rx_oversize_phy"); + ZXDH_ADD_STRING(data, "tx_undersize_phy"); + ZXDH_ADD_STRING(data, "tx_size_64_phy"); + ZXDH_ADD_STRING(data, "tx_size_65_127"); + ZXDH_ADD_STRING(data, "tx_size_128_255"); + ZXDH_ADD_STRING(data, "tx_size_256_511"); + ZXDH_ADD_STRING(data, "tx_size_512_1023"); + ZXDH_ADD_STRING(data, "tx_size_1024_1518"); + ZXDH_ADD_STRING(data, "tx_size_1519_mtu"); + ZXDH_ADD_STRING(data, "tx_oversize_phy"); + ZXDH_ADD_STRING(data, "rx_pause_phy"); + ZXDH_ADD_STRING(data, "tx_pause_phy"); + ZXDH_ADD_STRING(data, "rx_crc_errors"); + ZXDH_ADD_STRING(data, "tx_crc_errors"); + ZXDH_ADD_STRING(data, "rx_mac_control_phy"); + ZXDH_ADD_STRING(data, "tx_mac_control_phy"); + ZXDH_ADD_STRING(data, "rx_fragment_phy"); + ZXDH_ADD_STRING(data, "tx_fragment_phy"); + ZXDH_ADD_STRING(data, "rx_jabber_phy"); + ZXDH_ADD_STRING(data, "tx_jabber_phy"); + ZXDH_ADD_STRING(data, "rx_vlan_phy"); + ZXDH_ADD_STRING(data, "tx_vlan_phy"); + ZXDH_ADD_STRING(data, "rx_eee_phy"); + ZXDH_ADD_STRING(data, "tx_eee_phy"); + ZXDH_ADD_STRING(data, "rx_arn_phy"); + ZXDH_ADD_STRING(data, "tx_psn_phy"); + ZXDH_ADD_STRING(data, "rx_psn_phy"); + ZXDH_ADD_STRING(data, "tx_psn_ack_phy"); + ZXDH_ADD_STRING(data, "rx_psn_ack_phy"); + + for (i = 0; i < en_dev->curr_queue_pairs; i++) { + ZXDH_ADD_QUEUE_STRING(data, "rx_pkts", i); + ZXDH_ADD_QUEUE_STRING(data, "tx_pkts", i); + ZXDH_ADD_QUEUE_STRING(data, "rx_bytes", i); + ZXDH_ADD_QUEUE_STRING(data, "tx_bytes", i); + ZXDH_ADD_QUEUE_STRING(data, "tx_stopped", i); + ZXDH_ADD_QUEUE_STRING(data, "tx_wake", i); + ZXDH_ADD_QUEUE_STRING(data, "tx_dropped", i); + } + break; + } + case ETH_SS_PRIV_FLAGS: { + for (i = 0; i < ZXDH_NUM_PFLAGS; i++) { + strncpy(data + i * ETH_GSTRING_LEN, + zxdh_gstrings_priv_flags[i].name, + ETH_GSTRING_LEN); + } + + if ((pf_dev->board_type == DH_STDA) || + (pf_dev->board_type == DH_STDB) || + (pf_dev->board_type == DH_STDC) || + (pf_dev->board_type == DH_STD_E312S)) { + LOG_INFO("zios not supported telnet\n"); + break; + } + + /* 获取debug口的ip地址*/ + ret = zxdh_debug_ip_get(en_dev, ip); + if (ret != 0) { + LOG_ERR("ip get failed"); + break; + } + strncpy(data + ZXDH_PFLAG_IP * ETH_GSTRING_LEN, ip, + ETH_GSTRING_LEN); + break; + } + case ETH_SS_TEST: + for (i = 0; i < zxdh_en_self_test_num(); i++) { + strcpy(data + i * ETH_GSTRING_LEN, zxdh_self_tests[i]); + } + break; + default: { + LOG_ERR("invalid para\n"); + break; + } + } + + return; +} + +int32_t zxdh_pflags_update(struct net_device *netdev, uint8_t flag, bool enable) +{ + if (!zxdh_gstrings_priv_flags[flag].handler) + return 0; + return zxdh_gstrings_priv_flags[flag].handler(netdev, enable); +} + +static int32_t zxdh_handle_pflag(struct net_device *netdev, + uint32_t wanted_flags, + enum zxdh_priv_flag flag) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + bool enable = !!(wanted_flags & BIT(flag)); + uint32_t changes = wanted_flags ^ en_priv->edev.pflags; + int32_t err = 0; + + /* 判断设置的值是否改变&改变的位是否为flag位 */ + if (!(changes & BIT(flag))) { + return 0; + } + + err = zxdh_pflags_update(netdev, flag, enable); + if (0 != err) { + LOG_ERR("%s private flag '%s' failed err %d\n", + enable ? "Enable" : "Disable", + zxdh_gstrings_priv_flags[flag].name, err); + return err; + } + + if (flag != ZXDH_PFLAG_IP) + ZXDH_SET_PFLAG(en_priv->edev.pflags, flag, enable); + + return 0; +} + +static int32_t zxdh_en_set_priv_flags(struct net_device *netdev, + uint32_t pflags) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + enum zxdh_priv_flag pflag = 0; + int32_t err = 0; + uint32_t changes = pflags ^ en_priv->edev.pflags; + bool hardware_bond_change = changes & BIT(ZXDH_PFLAG_HARDWARE_BOND); + bool hardware_bond_prima_change = changes & + BIT(ZXDH_PFLAG_HARDWARE_BOND_PRIMARY); + LOG_INFO("hardware_bond_change %d, hardware_bond_prima_change %d\n", + hardware_bond_change, hardware_bond_prima_change); + if (!(hardware_bond_change || hardware_bond_prima_change)) { + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + } + for (pflag = 0; pflag < ZXDH_NUM_PFLAGS; pflag++) { + err = zxdh_handle_pflag(netdev, pflags, pflag); + if (0 != err) { + break; + } + } + + return err; +} + +static void flag_enable_1588_get(struct zxdh_en_device *en_dev) +{ + int32_t ret = 0; + union zxdh_msg *msg = NULL; + ZXDH_SRIOV_VPORT_T port_attr_entry = { 0 }; + struct zxdh_bar_extra_para para = { 0 }; + DPP_PF_INFO_T dpp_pf_info = { + .slot = en_dev->slot_id, + .vport = en_dev->vport, + }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return; + } + + msg->payload.vf_1588_enable.proc_cmd = ZXDH_VF_1588_ENABLE_GET; + msg->payload.hdr.op_code = ZXDH_VF_1588_ENABLE; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + ret = en_dev->ops->msg_send_cmd(en_dev->parent, + MODULE_VF_BAR_MSG_TO_PF, msg, + msg, ¶); + if (ret != 0) { + LOG_ERR("zxdh_send_command_to_pf failed: %d\n", ret); + kfree(msg); + return; + } + + en_dev->enable_1588 = + msg->reps.vf_1588_enable_rsp.enable_1588_vf_rsp; + kfree(msg); + } else { + ret = dpp_vport_attr_get(&dpp_pf_info, &port_attr_entry); + if (ret != 0) { + LOG_ERR("dpp_vport_attr_get SRIOV_VPORT_1588_EN failed, ret:%d\n", + ret); + return; + } + + en_dev->enable_1588 = port_attr_entry.flag_1588_enable; + } + + if (en_dev->enable_1588 == 0) { + en_dev->pflags &= ~BIT(ZXDH_PFLAG_1588_ENABLE); + } else { + en_dev->pflags |= BIT(ZXDH_PFLAG_1588_ENABLE); + } +} +static uint32_t zxdh_en_get_priv_flags(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + uint32_t flag_lldp = 0; + uint32_t lldp_mask = 0; + uint32_t cpl_timeout_mask = 0; + uint32_t hp_irq_ctrl_status = 0; + int32_t ret = 0; + + ret = zxdh_lldp_enable_get(&en_priv->edev, &flag_lldp); + if ((ret != 0) && (flag_lldp != 0) && (flag_lldp != 1)) { + LOG_ERR("zxdh_lldp_enable_get err, ret(%d), flag_lldp(%u).\n", + ret, flag_lldp); + } + + flag_lldp = flag_lldp << ZXDH_PFLAG_ENABLE_LLDP; + + lldp_mask = 0xFFFFFFFF ^ BIT(ZXDH_PFLAG_ENABLE_LLDP); + en_priv->edev.pflags = (en_priv->edev.pflags & lldp_mask) | flag_lldp; + + if (en_dev->is_hwbond) { + en_priv->edev.pflags |= (1 << ZXDH_PFLAG_HARDWARE_BOND); + } else { + en_priv->edev.pflags &= ~(1 << ZXDH_PFLAG_HARDWARE_BOND); + } + if (en_dev->is_primary_port) { + en_priv->edev.pflags |= (1 << ZXDH_PFLAG_HARDWARE_BOND_PRIMARY); + } else { + en_priv->edev.pflags &= + ~(1 << ZXDH_PFLAG_HARDWARE_BOND_PRIMARY); + } + + cpl_timeout_mask = en_dev->ops->get_cpl_timeout_if_mask(en_dev->parent); + LOG_DEBUG("cpl_timeout_mask: %d\n", cpl_timeout_mask); + if (cpl_timeout_mask == 1) + en_dev->pflags |= BIT(ZXDH_PFLAG_PCIE_AER_CPL_TIMEOUT); + else + en_dev->pflags &= ~BIT(ZXDH_PFLAG_PCIE_AER_CPL_TIMEOUT); + + hp_irq_ctrl_status = + en_dev->ops->get_hp_irq_ctrl_status(en_dev->parent); + LOG_DEBUG("hp_irq_ctrl_status: %d\n", hp_irq_ctrl_status); + if (hp_irq_ctrl_status == 1) + en_dev->pflags |= BIT(ZXDH_PFLAG_PCIE_HP_IRQ_CTRL); + else + en_dev->pflags &= ~BIT(ZXDH_PFLAG_PCIE_HP_IRQ_CTRL); + + ret = zxdh_dual_tor_label_get(en_dev); + if (ret == 1) + en_dev->pflags |= BIT(ZXDH_PFLAG_DUAL_TOR_CTRL); + else if (ret == 0) + en_dev->pflags &= ~BIT(ZXDH_PFLAG_DUAL_TOR_CTRL); + + flag_enable_1588_get(en_dev); + + return en_priv->edev.pflags; +} + +static int zxdh_en_get_regs_len(struct net_device *netdev) +{ +#define ZXDH_REGS_LEN (128 * 1024) + return ZXDH_REGS_LEN * sizeof(uint32_t); +} + +static void zxdh_en_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ +} + +static void zxdh_en_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + wol->supported = en_dev->wol_support; + if (wol->supported == 0) { + return; + } + wol->wolopts = en_dev->wolopts; +} + +static int zxdh_en_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + DPP_PF_INFO_T pf_info = { 0 }; + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + if ((en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) || + !zxdh_en_is_panel_port(en_dev)) { + return -EOPNOTSUPP; + } + + LOG_INFO("wol mode=0x%x, en_dev->phy_port=0x%x\n", wol->wolopts, + en_dev->phy_port); + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + return -EOPNOTSUPP; + } + + if (wol->wolopts & WAKE_MAGIC) { + en_dev->wolopts = WAKE_MAGIC; + dpp_uplink_phy_attr_set(&pf_info, en_dev->phy_port, + UPLINK_PHY_PORT_MAGIC_PACKET_ENABLE, 1); + } else if (wol->wolopts == 0) { + en_dev->wolopts = 0; + dpp_uplink_phy_attr_set(&pf_info, en_dev->phy_port, + UPLINK_PHY_PORT_MAGIC_PACKET_ENABLE, 0); + } else { + return -EOPNOTSUPP; + } + + return 0; +} + +static uint32_t zxdh_en_get_msglevel(struct net_device *netdev) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + return en_dev->msglevel; +} + +static void zxdh_en_set_msglevel(struct net_device *netdev, uint32_t data) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + en_dev->msglevel = data; +} + +static int zxdh_en_nway_reset(struct net_device *netdev) +{ + return 0; +} + +#ifdef HAVE_ETHTOOL_SET_PHYS_ID +static int zxdh_en_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + int ret = 0; + union zxdh_msg *msg = NULL; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + if (en_dev->phy_port == INVALID_PHY_PORT) { + LOG_ERR("phyport is invalid!"); + return -EOPNOTSUPP; + } + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + + switch (state) { + case ETHTOOL_ID_ACTIVE: { + msg->payload.mac_set_msg.blink_enable = 1; + break; + } + case ETHTOOL_ID_INACTIVE: { + msg->payload.mac_set_msg.blink_enable = 0; + break; + } + default: { + kfree(msg); + return -EOPNOTSUPP; + } + } + msg->payload.hdr_to_agt.op_code = AGENT_MAC_LED_BLINK; + msg->payload.hdr_to_agt.phyport = en_dev->phy_port; + LOG_DEBUG("send phyport %d, blink_enable=%d\n", en_dev->phy_port, + msg->payload.mac_set_msg.blink_enable); + ret = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_MAC, msg, msg, + ¶); + if (ret != 0) { + LOG_ERR("zxdh_send_command_to_riscv_mac failed, err: %d\n", + ret); + } + kfree(msg); + return ret; +} +#else +static int zxdh_en_phys_id(struct net_device *netdev, u32 data) +{ + return 0; +} +#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ + +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT +static int32_t zxdh_en_get_sset_count(struct net_device *netdev, int sset) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + + switch (sset) { + case ETH_SS_STATS: { + return ZXDH_NET_PF_STATS_NUM(en_dev); + } + case ETH_SS_PRIV_FLAGS: { + return ZXDH_NUM_PFLAGS; + } + case ETH_SS_TEST: { + return zxdh_en_self_test_num(); + } + default: { + return -EOPNOTSUPP; + } + } + + return 0; +} +#endif + +static void zxdh_en_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t ret = 0; + uint8_t drv_name_len = 0; + uint8_t drv_version[MAX_DRV_VERSION_LEN] = { 0 }; + uint8_t drv_version_len = 0; + uint16_t vport = 0; + + ret = en_dev->ops->get_pf_drv_msg(en_dev->parent, drv_version, + &drv_version_len); + if (drv_version_len > MAX_DRV_NAME_LEN) { + LOG_ERR("drv_version_len(%hhu) greater than %u", + drv_version_len, MAX_DRV_NAME_LEN); + drv_version_len = MAX_DRV_NAME_LEN; + } + + vport = en_dev->vport; + drv_name_len = strlen(DRV_NAME); + + if (drv_name_len > MAX_DRV_NAME_LEN) { + LOG_ERR("drv_name_len(%hhu) greater than %u", drv_name_len, + MAX_DRV_NAME_LEN); + drv_name_len = MAX_DRV_NAME_LEN; + } + + memcpy(drvinfo->driver, DRV_NAME, drv_name_len); + memcpy(drvinfo->version, drv_version, drv_version_len); + + strlcpy(drvinfo->bus_info, dev_name(en_dev->parent->parent->device), + sizeof(drvinfo->bus_info)); + + drvinfo->n_priv_flags = ZXDH_NUM_PFLAGS; + drvinfo->n_stats = ZXDH_NET_PF_STATS_NUM(en_dev); + drvinfo->eedump_len = zxdh_en_get_eeprom_len(netdev); + drvinfo->regdump_len = zxdh_en_get_regs_len(netdev); + drvinfo->testinfo_len = zxdh_en_self_test_num(); + + memcpy(drvinfo->fw_version, en_dev->fw_version, en_dev->fw_version_len); +} + +int32_t zxdh_stats_update(struct zxdh_en_device *en_dev) +{ + uint16_t i = 0; + int32_t ret = 0; + + ret = zxdh_vport_stats_get(en_dev); + if (ret != 0) { + LOG_ERR("zxdh_vport_stats_get failed, ret: %d\n", ret); + return -1; + } + + ret = zxdh_mac_stats_get(en_dev); + if (ret != 0) { + LOG_ERR("zxdh_mac_stats_get failed, ret: %d\n", ret); + return -1; + } + + ret = zxdh_en_udp_pkt_stats_get(en_dev); + if (ret != 0) { + LOG_ERR("zxdh_en_udp_pkt_stats_get failed, ret: %d\n", ret); + return -1; + } + + en_dev->hw_stats.netdev_stats.rx_packets = 0; + en_dev->hw_stats.netdev_stats.tx_packets = 0; + en_dev->hw_stats.netdev_stats.rx_bytes = 0; + en_dev->hw_stats.netdev_stats.tx_bytes = 0; + en_dev->hw_stats.netdev_stats.tx_queue_wake = 0; + en_dev->hw_stats.netdev_stats.tx_queue_stopped = 0; + en_dev->hw_stats.netdev_stats.tx_queue_dropped = 0; + en_dev->hw_stats.netdev_stats.rx_csum_offload_good = 0; + en_dev->hw_stats.netdev_stats.rx_removed_vlan_packets = 0; + for (i = 0; i < en_dev->curr_queue_pairs; i++) { + /* queue software statistics */ + en_dev->hw_stats.q_stats[i].q_rx_pkts = + en_dev->rq[i].stats.packets; + en_dev->hw_stats.q_stats[i].q_tx_pkts = + en_dev->sq[i].stats.packets; + en_dev->hw_stats.q_stats[i].q_rx_bytes = + en_dev->rq[i].stats.bytes; + en_dev->hw_stats.q_stats[i].q_tx_bytes = + en_dev->sq[i].stats.bytes; + + en_dev->hw_stats.netdev_stats.rx_packets += + en_dev->rq[i].stats.packets; + en_dev->hw_stats.netdev_stats.tx_packets += + en_dev->sq[i].stats.packets; + en_dev->hw_stats.netdev_stats.rx_bytes += + en_dev->rq[i].stats.bytes; + en_dev->hw_stats.netdev_stats.tx_bytes += + en_dev->sq[i].stats.bytes; + en_dev->hw_stats.netdev_stats.rx_csum_offload_good += + en_dev->rq[i].stats.rx_csum_offload_good; + en_dev->hw_stats.netdev_stats.rx_removed_vlan_packets += + en_dev->rq[i].stats.rx_removed_vlan_packets; + en_dev->hw_stats.netdev_stats.tx_queue_wake += + en_dev->hw_stats.q_stats[i].q_tx_wake; + en_dev->hw_stats.netdev_stats.tx_queue_stopped += + en_dev->hw_stats.q_stats[i].q_tx_stopped; + en_dev->hw_stats.netdev_stats.tx_queue_dropped += + en_dev->hw_stats.q_stats[i].q_tx_dropped; + } + + return ret; +} + +static void zxdh_en_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + uint32_t offset = ZXDH_NETDEV_STATS_NUM + ZXDH_MAC_STATS_NUM + + ZXDH_VPORT_STATS_NUM + ZXDH_UDP_STATS_NUM; + + if (en_dev->device_state == ZXDH_DEVICE_STATE_INTERNAL_ERROR) + return; + + zxdh_stats_update(en_dev); + memcpy(data, &en_dev->hw_stats, + ZXDH_NET_PF_STATS_NUM(en_dev) * sizeof(uint64_t)); + memcpy(data + offset, en_dev->hw_stats.q_stats, + (en_dev->curr_queue_pairs * ZXDH_QUEUE_STATS_NUM) * + sizeof(uint64_t)); + + return; +} + +static int zxdh_en_get_ts_info(struct net_device *netdev, + struct ethtool_ts_info *info) +{ + /* Started by AICoder, pid:3de164760c033b9146ed0b10407e2720681505ce */ + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + int32_t ret = 0; + uint32_t ptp_clock_index; + + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + + en_priv = netdev_priv(netdev); + CHECK_EQUAL_ERR(en_priv, NULL, -EADDRNOTAVAIL, + "netdev priv is null!\n"); + en_dev = &en_priv->edev; + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + + /* Started by AICoder, pid:4a311a2f56c7b6e1476f0b5d90d1440b13548a62 */ + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + return 0; + } + /* Ended by AICoder, pid:4a311a2f56c7b6e1476f0b5d90d1440b13548a62 */ + ret = zxdh_get_ptp_clock_index(en_dev, &ptp_clock_index); + if (ret != 0) { + return 0; + } + + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->phc_index = ptp_clock_index; + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + /* Ended by AICoder, pid:3de164760c033b9146ed0b10407e2720681505ce */ + return 0; +} + +#ifdef CONFIG_PM_RUNTIME +static int zxdh_en_ethtool_begin(struct net_device *netdev) +{ + return 0; +} + +static void zxdh_en_ethtool_complete(struct net_device *netdev) +{ +} +#endif + +#ifndef HAVE_NDO_SET_FEATURES +static int zxdh_en_get_rx_csum(struct net_device *netdev) +{ + return 0; +} + +static int zxdh_en_set_rx_csum(struct net_device *netdev, u32 data) +{ + return 0; +} + +static int zxdh_en_set_tx_csum(struct net_device *netdev, u32 data) +{ + return 0; +} + +#ifdef NETIF_F_TSO +static int zxdh_en_set_tso(struct net_device *netdev, u32 data) +{ + return 0; +} +#endif /* NETIF_F_TSO */ + +#ifdef ETHTOOL_GFLAGS +static int zxdh_en_set_flags(struct net_device *netdev, u32 data) +{ + return 0; +} +#endif /* ETHTOOL_GFLAGS */ +#endif /* HAVE_NDO_SET_FEATURES */ + +static int zxdh_en_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + return 0; +} + +static int zxdh_en_set_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + return 0; +} +#ifdef ETHTOOL_GRXFHINDIR +#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE +static u32 zxdh_en_get_rxfh_indir_size(struct net_device *netdev) +{ + return ZXDH_INDIR_RQT_SIZE; +} + +static u32 zxdh_en_get_rxfh_key_size(struct net_device *netdev) +{ + return ZXDH_NET_HASH_KEY_SIZE; +} + +int zxdh_en_hash_key_get(struct zxdh_en_device *en_dev, uint8_t *key) +{ + union zxdh_msg *msg = NULL; + int32_t ret = 0; + DPP_PF_INFO_T pf_info = { 0 }; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("malloc(%lu) failed !", sizeof(union zxdh_msg)); + return -ENOMEM; + } + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + ret = dpp_thash_key_get(&pf_info, key, ZXDH_NET_HASH_KEY_SIZE); + } else { + msg->payload.hdr.op_code = ZXDH_THASH_KEY_GET; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + ret = en_dev->ops->msg_send_cmd(en_dev->parent, + MODULE_VF_BAR_MSG_TO_PF, msg, + msg, ¶); + if (ret == 0) + zte_memcpy_s(key, msg->reps.thash_key_set_msg.key_map, + ZXDH_NET_HASH_KEY_SIZE); + } + + if (ret != 0) + LOG_ERR("get hash key failed !\n"); + + kfree(msg); + return ret; +} + +#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)) +#ifdef HAVE_RXFH_HASHFUNC +static int zxdh_en_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +#else +static int zxdh_en_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +#endif /* HAVE_RXFH_HASHFUNC */ +#else +static int zxdh_en_get_rxfh_indir(struct net_device *netdev, u32 *indir) +#endif /* HAVE_ETHTOOL_GSRSSH */ +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t ret = 0; + uint8_t func = 0; + + LOG_DEBUG("zxdh_en_get_rxfh start\n"); + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + + if (indir != NULL) { + memcpy(indir, en_dev->indir_rqt, + sizeof(uint32_t) * ZXDH_INDIR_RQT_SIZE); + } + + if (key != NULL) { + LOG_DEBUG("get key is called\n"); + ret = zxdh_en_hash_key_get(en_dev, key); + if (ret) + return -EOPNOTSUPP; + } + + if (hfunc != NULL) { + func = en_dev->eth_config.hash_func; + switch (func) { + case ZXDH_FUNC_TOP: { + *hfunc = ETH_RSS_HASH_TOP; + break; + } + case ZXDH_FUNC_XOR: { + *hfunc = ETH_RSS_HASH_XOR; + break; + } + case ZXDH_FUNC_CRC32: { + *hfunc = ETH_RSS_HASH_CRC32; + break; + } + default: { + return -EOPNOTSUPP; + } + } + } + + return 0; +} +#else +static int zxdh_en_get_rxfh_indir(struct net_device *netdev, + struct ethtool_rxfh_indir *indir) +{ + return 0; +} +#endif /* HAVE_ETHTOOL_GRXFHINDIR_SIZE */ +#endif /* ETHTOOL_GRXFHINDIR */ + +int32_t zxdh_indir_to_queue_map(struct zxdh_en_device *en_dev, + const uint32_t *indir) +{ + uint32_t *queue_map = NULL; + int32_t err = 0; + uint16_t i = 0; + uint16_t j = 0; + + queue_map = kzalloc(ZXDH_INDIR_RQT_SIZE * sizeof(uint32_t), GFP_KERNEL); + if (queue_map == NULL) { + LOG_ERR("queue_map is NULL\n"); + return -ENOMEM; + } + for (i = 0; i < ZXDH_INDIR_RQT_SIZE; i++) { + j = indir[i]; + queue_map[i] = en_dev->phy_index[2 * j]; + } + + memcpy(en_dev->eth_config.queue_map, queue_map, + ZXDH_INDIR_RQT_SIZE * sizeof(uint32_t)); + + kfree(queue_map); + return err; +} + +int zxdh_en_hash_func_set(struct zxdh_en_device *en_dev, uint8_t func) +{ + union zxdh_msg *msg = NULL; + int32_t ret = 0; + DPP_PF_INFO_T pf_info = { 0 }; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("malloc(%lu) failed !", sizeof(union zxdh_msg)); + return -ENOMEM; + } + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + ret = dpp_vport_hash_funcs_set(&pf_info, func); + } else { + msg->payload.hdr.op_code = ZXDH_HASH_FUNC_SET; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + msg->payload.hfunc_set_msg.func = func; + ret = en_dev->ops->msg_send_cmd(en_dev->parent, + MODULE_VF_BAR_MSG_TO_PF, msg, + msg, ¶); + } + + if (ret == 0) + en_dev->eth_config.hash_func = func; + + kfree(msg); + return ret; +} + +static int zxdh_en_hash_key_set(struct zxdh_en_device *en_dev, uint8_t *key) +{ + union zxdh_msg *msg = NULL; + int32_t ret = 0; + DPP_PF_INFO_T pf_info = { 0 }; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("malloc(%lu) failed !", sizeof(union zxdh_msg)); + return -ENOMEM; + } + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + ret = dpp_thash_key_set(&pf_info, key, ZXDH_NET_HASH_KEY_SIZE); + } else { + msg->payload.hdr.op_code = ZXDH_THASH_KEY_SET; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + zte_memcpy_s(msg->payload.thash_key_set_msg.key_map, key, + ZXDH_NET_HASH_KEY_SIZE); + ret = en_dev->ops->msg_send_cmd(en_dev->parent, + MODULE_VF_BAR_MSG_TO_PF, msg, + msg, ¶); + } + + kfree(msg); + return ret; +} + +int32_t zxdh_en_hash_key_recover(struct zxdh_en_device *en_dev) +{ + uint8_t key[ZXDH_NET_HASH_KEY_SIZE] = { 0 }; + int32_t err = 0; + + err = zxdh_en_hash_key_get(en_dev, key); + ZXDH_CHECK_RET_RETURN(err, "zxdh_en_hash_key_get failed: %d\n", err); + + err = zxdh_en_hash_key_set(en_dev, key); + ZXDH_CHECK_RET_RETURN(err, "zxdh_en_hash_key_set failed: %d\n", err); + + return err; +} + +#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE +#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)) +#ifdef HAVE_RXFH_HASHFUNC +static int zxdh_en_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +#else +static int zxdh_en_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key) +#endif /* HAVE_RXFH_HASHFUNC */ +#else +static int zxdh_en_set_rxfh_indir(struct net_device *netdev, const u32 *indir) +#endif /* HAVE_ETHTOOL_GSRSSH */ +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t ret = 0; + uint8_t func = 0; + + LOG_DEBUG("zxdh_en_set_rxfh_indir start\n"); + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + + switch (hfunc) { + case ETH_RSS_HASH_NO_CHANGE: { + break; + } + case ETH_RSS_HASH_TOP: { + func = ZXDH_FUNC_TOP; + break; + } + case ETH_RSS_HASH_XOR: { + func = ZXDH_FUNC_XOR; + break; + } + case ETH_RSS_HASH_CRC32: { + func = ZXDH_FUNC_CRC32; + break; + } + default: { + return -EOPNOTSUPP; + } + } + + if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && + (func != en_dev->eth_config.hash_func)) { + LOG_DEBUG("func: %u\n", func); + ret = zxdh_en_hash_func_set(en_dev, func); + if (ret != 0) { + LOG_ERR("hunc set failed: %d", ret); + return -EOPNOTSUPP; + } + } + + if (indir != NULL) { + LOG_DEBUG("set indir is called\n"); + ret = zxdh_indir_to_queue_map(en_dev, indir); + if (ret != 0) { + LOG_ERR("indir set failed: %d", ret); + return -EOPNOTSUPP; + } + + memcpy(en_dev->indir_rqt, indir, + ZXDH_INDIR_RQT_SIZE * sizeof(uint32_t)); + ret = zxdh_rxfh_set(en_dev, en_dev->eth_config.queue_map); + if (ret != 0) { + LOG_ERR("zxdh_rxfh_set failed: %d\n", ret); + return -EOPNOTSUPP; + } + } + + if (key != NULL) { + LOG_DEBUG("set thash key is called\n"); + ret = zxdh_en_hash_key_set(en_dev, (uint8_t *)key); + } + + return ret; +} +#else +static int zxdh_en_set_rxfh_indir(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + return 0; +} +#endif +#ifdef ETHTOOL_GCHANNELS +static void zxdh_en_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + union zxdh_msg *msg = NULL; + int32_t err = 0; + DPP_PF_INFO_T pf_info = { 0 }; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + ch->max_combined = en_dev->max_vq_pairs; + ch->combined_count = en_dev->curr_queue_pairs; + + if (en_dev->device_state == ZXDH_DEVICE_STATE_INTERNAL_ERROR) + return; + + if (en_dev->ops->is_bond(en_dev->parent)) + return; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return; + } + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + err = dpp_rxfh_get(&pf_info, + msg->payload.rxfh_set_msg.queue_map, + ZXDH_INDIR_RQT_SIZE); + if (err != 0) { + LOG_ERR("dpp_rxfh_get failed: %d\n", err); + goto free_msg; + } + + LOG_DEBUG("*******pf_queue_map*******\n"); + zxdh_u32_array_print(msg->payload.rxfh_set_msg.queue_map, + ZXDH_INDIR_RQT_SIZE); + } else { + msg->payload.hdr.op_code = ZXDH_RXFH_GET; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + err = en_dev->ops->msg_send_cmd(en_dev->parent, + MODULE_VF_BAR_MSG_TO_PF, msg, + msg, ¶); + if (err != 0) { + LOG_ERR("dpp_rxfh_get failed: %d\n", err); + goto free_msg; + } + + LOG_DEBUG("*******vf_queue_map*******\n"); + zxdh_u32_array_print(msg->reps.rxfh_get_msg.queue_map, + ZXDH_INDIR_RQT_SIZE); + } +free_msg: + kfree(msg); +} +#endif /* ETHTOOL_GCHANNELS */ + +int32_t zxdh_num_channels_changed(struct zxdh_en_device *en_dev, + uint16_t num_changed) +{ + uint32_t *indir = NULL; + int32_t err = 0; + uint16_t i = 0; + + if (num_changed == 0) { + LOG_ERR("num_changed cannot be zero\n"); + return -1; + } + + indir = kzalloc(sizeof(uint32_t) * ZXDH_INDIR_RQT_SIZE, GFP_KERNEL); + if (unlikely(NULL == indir)) { + LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + + if (!netif_is_rxfh_configured(en_dev->netdev)) { + LOG_DEBUG("indir_is_default\n"); + for (i = 0; i < ZXDH_INDIR_RQT_SIZE; ++i) { + indir[i] = i % num_changed; + } + + err = zxdh_indir_to_queue_map(en_dev, indir); + if (err != 0) { + LOG_ERR("zxdh_indir_to_queue_map failed: %d\n", err); + kfree(indir); + return err; + } + + zte_memcpy_s(en_dev->indir_rqt, indir, + ZXDH_INDIR_RQT_SIZE * sizeof(uint32_t)); + err = zxdh_rxfh_set(en_dev, en_dev->eth_config.queue_map); + if (err != 0) { + LOG_ERR("zxdh_rxfh_set failed: %d\n", err); + kfree(indir); + return -EOPNOTSUPP; + } + } + + en_dev->old_queue_pairs = en_dev->curr_queue_pairs; + en_dev->curr_queue_pairs = num_changed; + LOG_INFO("old_queue_pairs: %d, curr_queue_pairs: %d\n", + en_dev->old_queue_pairs, en_dev->curr_queue_pairs); + kfree(indir); + + return set_feature_rxhash(en_dev, + en_dev->curr_queue_pairs != 1 ? true : false); +} + +#ifdef ETHTOOL_SCHANNELS +static int zxdh_en_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t ret = 0; + + LOG_DEBUG("zxdh_en_set_channels start\n"); + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + /* verify that the number of channels does not invalidate any current + * flow director rules + */ + //TODO + + /* We don't support separate rx/tx channels. + * We don't allow setting 'other' channels. + */ + if (ch->rx_count || ch->tx_count || ch->other_count) { + LOG_ERR("not supported\n"); + return -EINVAL; + } + + if ((ch->combined_count > en_dev->max_vq_pairs) || + (ch->combined_count == 0)) { + LOG_ERR("invalid para\n"); + return -EINVAL; + } + + if (en_dev->xdp_enabled) { + LOG_ERR("XDP is enabled, not support set channels\n"); + return -EOPNOTSUPP; + } + + if (ch->combined_count == en_dev->curr_queue_pairs) { + return 0; + } + + ret = zxdh_num_channels_changed(en_dev, ch->combined_count); + if (ret != 0) { + LOG_ERR("zxdh_num_channels_changed failed: %d\n", ret); + return -1; + } + + en_dev->eth_config.curr_combined = en_dev->curr_queue_pairs; + zxdh_set_default_xps_cpumasks(en_dev); + netif_set_real_num_tx_queues(netdev, en_dev->curr_queue_pairs); + netif_set_real_num_rx_queues(netdev, en_dev->curr_queue_pairs); + + zxdh_flow_map_update_sysfs(netdev); + + return 0; +} +#endif + +static int32_t zxdh_get_rss_hash(struct ethtool_rxnfc *cmd, + struct zxdh_en_device *en_dev) +{ + union zxdh_msg *msg = NULL; + uint32_t hash_mode = 0; + int32_t ret = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + struct zxdh_bar_extra_para para = { 0 }; + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + ret = dpp_vport_rx_flow_hash_get(&pf_info, &hash_mode); + } else { + msg->payload.hdr.op_code = ZXDH_RX_FLOW_HASH_GET; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + ret = en_dev->ops->msg_send_cmd(en_dev->parent, + MODULE_VF_BAR_MSG_TO_PF, msg, + msg, ¶); + hash_mode = msg->reps.rx_flow_hash_set_msg.hash_mode; + } + if (ret != 0) { + kfree(msg); + return ret; + } + + LOG_INFO("hash_mode: %u\n", hash_mode); + switch (hash_mode) { + case ZXDH_NET_RX_FLOW_HASH_MV: { + cmd->data = RXH_L2DA + RXH_VLAN; + break; + } + case ZXDH_NET_RX_FLOW_HASH_SDT: { + cmd->data = RXH_L3_PROTO + RXH_IP_SRC + RXH_IP_DST; + break; + } + case ZXDH_NET_RX_FLOW_HASH_SDFNT: { + cmd->data = RXH_L3_PROTO + RXH_IP_SRC + RXH_IP_DST + + RXH_L4_B_0_1 + RXH_L4_B_2_3; + break; + } + default: { + LOG_ERR("invalid hash_mode\n"); + kfree(msg); + return -1; + } + } + kfree(msg); + return 0; +} + +static int32_t zxdh_ethtool_get_flow(struct zxdh_en_device *en_dev, + struct ethtool_rxnfc *info, + int32_t location) +{ + ZXDH_FD_CFG_T p_fd_cfg = { 0 }; + DPP_PF_INFO_T pf_info = { 0 }; + int32_t err = 0; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + if (location < 0 || location >= ETHTOOL_FD_MAX_NUM) + return -EINVAL; + + if (!en_dev->fs.ethtool_fs[location].is_used) { + return -ENOENT; + } else { + zte_memcpy_s(&info->fs, &en_dev->fs.ethtool_fs[location].rfs, + sizeof(struct ethtool_rx_flow_spec)); + } + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + err = dpp_tbl_fd_cfg_get(&pf_info, ZXDH_SDT_FD_CFG_TABLE, + en_dev->fs.ethtool_fs[location].index, + &p_fd_cfg); + if (err != 0) { + LOG_ERR("pf can't find fd_cfg %d\n", location); + return -ENOENT; + } + } else { + err = zxdh_vf_get_fd(en_dev, + en_dev->fs.ethtool_fs[location].index); + if (err != 0) { + LOG_ERR("vf can't find fd_cfg %d\n", location); + return -ENOENT; + } + } + return 0; +} + +/* @rule_cnt: Number of rules to be affected + * @rule_locs: Array of used rule locations + */ +static int32_t zxdh_ethtool_get_all_flows(struct zxdh_en_device *en_dev, + struct ethtool_rxnfc *info, + uint32_t *rule_locs) +{ + int32_t location = 0; + int32_t idx = 0; + int32_t err = 0; + + info->data = ETHTOOL_FD_MAX_NUM; + + LOG_INFO("zxdh_ethtool_get_all_flows rule_cnt:%d\n", info->rule_cnt); + + while ((!err || err == -ENOENT) && idx < info->rule_cnt) { + err = zxdh_ethtool_get_flow(en_dev, info, location); + if (!err) + rule_locs[idx++] = + location; /* 成功找到流表,存入rule_locs */ + location++; /* 继续找下一条 */ + } + if (info->rule_cnt > idx) { + LOG_INFO("zxdh_ethtool_get_all_flows idx:%d less than %d\n", + idx, info->rule_cnt); + } + return err; +} + +static int zxdh_en_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *info, +#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS + void *rule_locs) +#else + u32 *rule_locs) +#endif +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t err = 0; + + LOG_INFO("zxdh_en_get_rxnfc start\n"); + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = en_dev->curr_queue_pairs; + break; + case ETHTOOL_GRXFH: + err = zxdh_get_rss_hash(info, en_dev); + break; + case ETHTOOL_GRXCLSRLCNT: + info->rule_cnt = en_dev->fs.tot_num_rules; + break; + case ETHTOOL_GRXCLSRULE: + err = zxdh_ethtool_get_flow(en_dev, info, info->fs.location); + break; + case ETHTOOL_GRXCLSRLALL: + err = zxdh_ethtool_get_all_flows(en_dev, info, rule_locs); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int32_t validate_ethter(struct ethtool_rx_flow_spec *fs) +{ + struct ethhdr *eth_mask = &fs->m_u.ether_spec; + int32_t ntuples = 0; + + if (!is_zero_ether_addr(eth_mask->h_dest)) + ntuples++; + if (!is_zero_ether_addr(eth_mask->h_source)) + ntuples++; + if (eth_mask->h_proto) + ntuples++; + + LOG_INFO("current ethet_addr num is %d\n", ntuples); + return ntuples; +} + +static int32_t validate_tcpudp4(struct ethtool_rx_flow_spec *fs) +{ + struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec; + int ntuples = 0; + + if (l4_mask->tos) /* 目前还不支支持tos */ + return -EINVAL; + if (l4_mask->ip4src) + ntuples++; + if (l4_mask->ip4dst) + ntuples++; + if (l4_mask->psrc) + ntuples++; + if (l4_mask->pdst) + ntuples++; + + /* tcp4/udp4 flow: proto and ethtype is masked */ + ntuples += 2; + + LOG_INFO("current TCP/UDP4 num is %d\n", ntuples); + return ntuples; +} + +static int32_t validate_ip4(struct ethtool_rx_flow_spec *fs) +{ + struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec; + int32_t ntuples = 0; + + if (l3_mask->l4_4_bytes || l3_mask->tos || + fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4) + return -EINVAL; + if (l3_mask->ip4src) + ntuples++; + if (l3_mask->ip4dst) + ntuples++; + if (l3_mask->proto) + ntuples++; + + /* ip4 flow: ethtype is masked */ + ntuples++; + LOG_INFO("current Ipv4 num is %d\n", ntuples); + return ntuples; +} + +static int32_t validate_ip6(struct ethtool_rx_flow_spec *fs) +{ + struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec; + int32_t ntuples = 0; + + if (l3_mask->l4_4_bytes || l3_mask->tclass) + return -EINVAL; + if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6src)) + ntuples++; + if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6dst)) + ntuples++; + if (l3_mask->l4_proto) + ntuples++; + + /* ip6 flow: ethtype is masked */ + ntuples++; + LOG_INFO("current IPv6 flow-type num is %d\n", ntuples); + return ntuples; +} + +static int32_t validate_tcpudp6(struct ethtool_rx_flow_spec *fs) +{ + struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec; + int32_t ntuples = 0; + + if (l4_mask->tclass) + return -EINVAL; + if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6src)) + ntuples++; + if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6dst)) + ntuples++; + if (l4_mask->psrc) + ntuples++; + if (l4_mask->pdst) + ntuples++; + + /* tcp6/udp6 flow: proto and ethtype is masked */ + ntuples += 2; + LOG_INFO("current TCP/UDP6 flow-type num is %d\n", ntuples); + return ntuples; +} + +static int32_t validate_vlan(struct ethtool_rx_flow_spec *fs) +{ + int ntuples = 0; + + /* vlan_etype非0, 设置了Vlan */ + if (fs->m_ext.vlan_etype && + ntohs(fs->h_ext.vlan_etype) != ETH_TYPE_VLAN) + return -EINVAL; + /* vlan_tci非0, 设置了vlan_tci匹配,下面判断Vlan id是否在有效范围 */ + if (fs->m_ext.vlan_tci) { + if (ntohs(fs->h_ext.vlan_tci) >= VLAN_N_VID) { + return -EINVAL; + } else { + ntuples++; + } + } + LOG_INFO("current extra vlan flow num is %d\n", ntuples); + return ntuples; +} + +/* 假设 ethtool_flow_union 和 ethtool_flow_ext 已定义 */ +void print_ethtool_rx_flow_spec(const struct ethtool_rx_flow_spec *fs) +{ + LOG_DEBUG("struct ethtool_rx_flow_spec:\n"); + LOG_DEBUG(" flow_type: 0x%08x\n", fs->flow_type); + + /* 解析 flow_type 的基本类型(屏蔽 FLOW_EXT 和 FLOW_MAC_EXT) */ + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case TCP_V4_FLOW: + LOG_DEBUG("TCP_V4_FLOW\n"); + break; + case UDP_V4_FLOW: + LOG_DEBUG("UDP_V4_FLOW\n"); + break; + case TCP_V6_FLOW: + LOG_DEBUG("TCP_V6_FLOW\n"); + break; + case UDP_V6_FLOW: + LOG_DEBUG("UDP_V6_FLOW\n"); + break; + case IP_USER_FLOW: + LOG_DEBUG("IP_USER_FLOW\n"); + break; + case IPV6_USER_FLOW: + LOG_DEBUG("IPV6_USER_FLOW\n"); + break; + case ETHER_FLOW: + LOG_DEBUG("ETHER_FLOW\n"); + break; + default: + LOG_DEBUG("UNKNOWN\n"); + break; + } + if (fs->flow_type & FLOW_EXT) + LOG_DEBUG(" | FLOW_EXT\n"); + if (fs->flow_type & FLOW_MAC_EXT) + LOG_DEBUG(" | FLOW_MAC_EXT\n"); + + /* 打印 h_u 和 m_u,根据 flow_type 选择联合体成员 */ + if ((fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) == TCP_V4_FLOW || + (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) == UDP_V4_FLOW || + (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) == IP_USER_FLOW) { + LOG_DEBUG(" h_u.tcp_ip4_spec:\n"); + LOG_DEBUG(" ip4src: %pI4\n", &fs->h_u.tcp_ip4_spec.ip4src); + LOG_DEBUG(" ip4dst: %pI4\n", &fs->h_u.tcp_ip4_spec.ip4dst); + LOG_DEBUG(" psrc: %u\n", ntohs(fs->h_u.tcp_ip4_spec.psrc)); + LOG_DEBUG(" pdst: %u\n", ntohs(fs->h_u.tcp_ip4_spec.pdst)); + LOG_DEBUG(" tos: 0x%02x\n", fs->h_u.tcp_ip4_spec.tos); + + LOG_DEBUG(" m_u.tcp_ip4_spec:\n"); + LOG_DEBUG(" ip4src: 0x%08x\n", + ntohl(fs->m_u.tcp_ip4_spec.ip4src)); + LOG_DEBUG(" ip4dst: 0x%08x\n", + ntohl(fs->m_u.tcp_ip4_spec.ip4dst)); + LOG_DEBUG(" psrc: 0x%04x\n", + ntohs(fs->m_u.tcp_ip4_spec.psrc)); + LOG_DEBUG(" pdst: 0x%04x\n", + ntohs(fs->m_u.tcp_ip4_spec.pdst)); + LOG_DEBUG(" tos: 0x%02x\n", fs->m_u.tcp_ip4_spec.tos); + } else if ((fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) == ETHER_FLOW) { + LOG_DEBUG(" h_u.ether_spec:\n"); + LOG_DEBUG(" h_dest: %02x:%02x:%02x:%02x:%02x:%02x\n", + fs->h_u.ether_spec.h_dest[0], + fs->h_u.ether_spec.h_dest[1], + fs->h_u.ether_spec.h_dest[2], + fs->h_u.ether_spec.h_dest[3], + fs->h_u.ether_spec.h_dest[4], + fs->h_u.ether_spec.h_dest[5]); + LOG_DEBUG(" h_source: %02x:%02x:%02x:%02x:%02x:%02x\n", + fs->h_u.ether_spec.h_source[0], + fs->h_u.ether_spec.h_source[1], + fs->h_u.ether_spec.h_source[2], + fs->h_u.ether_spec.h_source[3], + fs->h_u.ether_spec.h_source[4], + fs->h_u.ether_spec.h_source[5]); + LOG_DEBUG(" h_proto: 0x%04x\n", + ntohs(fs->h_u.ether_spec.h_proto)); + + LOG_DEBUG(" m_u.ether_spec:\n"); + LOG_DEBUG(" h_dest: %02x:%02x:%02x:%02x:%02x:%02x\n", + fs->m_u.ether_spec.h_dest[0], + fs->m_u.ether_spec.h_dest[1], + fs->m_u.ether_spec.h_dest[2], + fs->m_u.ether_spec.h_dest[3], + fs->m_u.ether_spec.h_dest[4], + fs->m_u.ether_spec.h_dest[5]); + LOG_DEBUG(" h_source: %02x:%02x:%02x:%02x:%02x:%02x\n", + fs->m_u.ether_spec.h_source[0], + fs->m_u.ether_spec.h_source[1], + fs->m_u.ether_spec.h_source[2], + fs->m_u.ether_spec.h_source[3], + fs->m_u.ether_spec.h_source[4], + fs->m_u.ether_spec.h_source[5]); + LOG_DEBUG(" h_proto: 0x%04x\n", + ntohs(fs->m_u.ether_spec.h_proto)); + } else { + LOG_DEBUG(" h_u/m_u: \n"); + } + + /* 打印 h_ext 和 m_ext */ + LOG_DEBUG(" h_ext:\n"); + if (fs->flow_type & FLOW_MAC_EXT) { + LOG_DEBUG(" h_dest: %02x:%02x:%02x:%02x:%02x:%02x\n", + fs->h_ext.h_dest[0], fs->h_ext.h_dest[1], + fs->h_ext.h_dest[2], fs->h_ext.h_dest[3], + fs->h_ext.h_dest[4], fs->h_ext.h_dest[5]); + } else { + LOG_DEBUG(" h_dest: \n"); + } + if (fs->flow_type & FLOW_EXT) { + LOG_DEBUG(" vlan_etype: 0x%04x\n", + ntohs(fs->h_ext.vlan_etype)); + LOG_DEBUG(" vlan_tci: 0x%04x (VLAN ID: %u, Priority: %u)\n", + ntohs(fs->h_ext.vlan_tci), + ntohs(fs->h_ext.vlan_tci) & 0x0FFF, + (ntohs(fs->h_ext.vlan_tci) >> 13) & 0x7); + LOG_DEBUG(" data: 0x%08x 0x%08x\n", ntohl(fs->h_ext.data[0]), + ntohl(fs->h_ext.data[1])); + } else { + LOG_DEBUG( + " vlan_etype, vlan_tci, data: \n"); + } + + LOG_DEBUG(" m_ext:\n"); + if (fs->flow_type & FLOW_MAC_EXT) { + LOG_DEBUG("h_dest: %02x:%02x:%02x:%02x:%02x:%02x\n", + fs->m_ext.h_dest[0], fs->m_ext.h_dest[1], + fs->m_ext.h_dest[2], fs->m_ext.h_dest[3], + fs->m_ext.h_dest[4], fs->m_ext.h_dest[5]); + } else { + LOG_DEBUG(" h_dest: \n"); + } + + if (fs->flow_type & FLOW_EXT) { + LOG_DEBUG(" vlan_etype: 0x%04x\n", + ntohs(fs->m_ext.vlan_etype)); + LOG_DEBUG(" vlan_tci: 0x%04x\n", ntohs(fs->m_ext.vlan_tci)); + LOG_DEBUG(" data: 0x%08x 0x%08x\n", ntohl(fs->m_ext.data[0]), + ntohl(fs->m_ext.data[1])); + } else { + LOG_DEBUG( + " vlan_etype, vlan_tci, data: \n"); + } + + /* 打印 ring_cookie */ + LOG_DEBUG(" ring_cookie: 0x%llx\n", + (unsigned long long)fs->ring_cookie); + if (fs->ring_cookie == RX_CLS_FLOW_DISC) { + LOG_DEBUG("DISCARD\n"); + } else { + uint8_t vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); + uint32_t queue = ethtool_get_flow_spec_ring(fs->ring_cookie); + + if (vf) { + LOG_DEBUG("Action: Direct to VF %u queue %u\n", vf - 1, + queue); + } else { + LOG_DEBUG("Action: Direct to queue %u\n", queue); + } + } + + /* 打印 location */ + LOG_DEBUG(" location: %u\n", fs->location); +} + +static int32_t validate_ring_cookie(struct ethtool_rx_flow_spec *fs, + struct zxdh_en_device *en_dev) +{ + uint64_t ring_cookie = fs->ring_cookie; + uint8_t vf_id = 0; + uint32_t queue_id = 0; + + if (ring_cookie == RX_CLS_FLOW_DISC) { + LOG_INFO("fs->ring_cookie is 0x%llx, action is DISCARD\n", + fs->ring_cookie); + return 0; + } + LOG_INFO("fs->ring_cookie is 0x%llx", fs->ring_cookie); + vf_id = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); + queue_id = ethtool_get_flow_spec_ring(fs->ring_cookie); + if (vf_id > 0) { + vf_id--; + LOG_INFO("vf_id is %u\n", vf_id); + } + if (queue_id == QUEUE_RSS) { + LOG_INFO("queue_id is 0xffff, use rss to distribute packets\n"); + } else if (queue_id >= en_dev->curr_queue_pairs && + queue_id != QUEUE_RSS) { + LOG_ERR("queue_id is out of range %d\n", + en_dev->curr_queue_pairs - 1); + return -EINVAL; + } else { + LOG_INFO("queue_id is %u\n", queue_id); + } + return 0; +} +/* 流规则校验 */ +static int32_t validate_flow(struct zxdh_en_device *en_dev, + struct ethtool_rx_flow_spec *fs) +{ + int32_t num_tuples = 0; /* 记录有效元组数量 */ + int32_t ret = 0; + + /* 调试打印,显示流类型 */ + print_ethtool_rx_flow_spec(fs); + + if (fs->location >= ETHTOOL_FD_MAX_NUM) + return -EINVAL; + + if (validate_ring_cookie(fs, en_dev)) { + return -EINVAL; + } + + /* 对流类型进行验证 */ + switch (fs->flow_type & + ~(FLOW_EXT | FLOW_MAC_EXT)) { /* 判断流基本类型,屏蔽扩展类型 */ + case ETHER_FLOW: + num_tuples += validate_ethter(fs); + break; + case TCP_V4_FLOW: + case UDP_V4_FLOW: + ret = validate_tcpudp4(fs); + if (ret < 0) + return ret; + num_tuples += ret; + break; + case IP_USER_FLOW: + ret = validate_ip4(fs); + if (ret < 0) + return ret; + num_tuples += ret; + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + ret = validate_tcpudp6(fs); + if (ret < 0) + return ret; + num_tuples += ret; + break; + case IPV6_USER_FLOW: + ret = validate_ip6(fs); + if (ret < 0) + return ret; + num_tuples += ret; + break; + default: + return -ENOTSUPP; + } + + /* 校验Vlan扩展字段 */ + if ((fs->flow_type & FLOW_EXT)) { + ret = validate_vlan(fs); + if (ret < 0) + return ret; + num_tuples += ret; + } + + /* 校验mac扩展字段 */ + if ((fs->flow_type & FLOW_MAC_EXT) && + (!is_zero_ether_addr(fs->m_ext.h_dest))) + num_tuples++; + + /* For coverity */ + if (num_tuples > 0) { + num_tuples = MAX_NUM_TUPLES; + } else { + num_tuples = 0; + } + return num_tuples; +} + +int32_t zxdh_flow_table_pf_action_add(struct zxdh_en_device *en_dev, + struct ethtool_rx_flow_spec *fs, + ZXDH_FD_CFG_T *p_fd_cfg) +{ + uint8_t vf_id = 0; + uint32_t queue_id = 0; + struct zxdh_vf_item *vf_item = NULL; + DPP_PF_INFO_T pf_info = { 0 }; + uint32_t base_qid = 0; + int32_t ret = 0; + + if (fs->ring_cookie == RX_CLS_FLOW_DISC) { + p_fd_cfg->as_rlt.action_index |= ACTION_TYPE_DROP; + return 0; + } + + vf_id = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); + queue_id = ethtool_get_flow_spec_ring(fs->ring_cookie); + + if (vf_id > 0) { + vf_id--; + vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_id); + if (IS_ERR_OR_NULL(vf_item)) { + LOG_ERR("vif_item %d get failed", vf_id); + return -EINVAL; + } + p_fd_cfg->as_rlt.action_index2 |= ACTION_TYPE_SPEC_PORT; + p_fd_cfg->as_rlt.spec_port_vfid = vf_id; + if (queue_id == QUEUE_RSS) { + p_fd_cfg->as_rlt.action_index |= ACTION_TYPE_RSS; + return 0; + } + + pf_info.slot = en_dev->slot_id; + pf_info.vport = vf_item->vport; + + ret = dpp_vport_base_qid_get(&pf_info, &base_qid); + if (ret) { + LOG_ERR("zxdh_cfg_fd_add: get vf %u base qid failed", + vf_id); + return ret; + } + LOG_INFO("zxdh_cfg_fd_add, vf %u, phy base qid is %u", vf_id, + base_qid); + } else { + if (queue_id == QUEUE_RSS) { + p_fd_cfg->as_rlt.action_index |= ACTION_TYPE_RSS; + return 0; + } + base_qid = en_dev->phy_index[0]; + } + p_fd_cfg->as_rlt.action_index |= ACTION_TYPE_QUEUE; + p_fd_cfg->as_rlt.v_qid = queue_id * 2 + base_qid; + LOG_INFO("zxdh_cfg_fd_add, phy queue id is %u", p_fd_cfg->as_rlt.v_qid); + return 0; +} + +static int32_t zxdh_cfg_np_fd(struct zxdh_en_device *en_dev, + struct ethtool_rx_flow_spec *fs, uint32_t *index) +{ + DPP_PF_INFO_T pf_info = { 0 }; + ZXDH_FD_CFG_T p_fd_cfg = { 0 }; + uint32_t handle = 0; + uint32_t err = 0; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + LOG_INFO("zxdh_cfg_np_fd start\n"); + + /* vf添加fd流表*/ + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) { + err = zxdh_vf_add_fd(en_dev, fs, index); + if (err) { + LOG_ERR("zxdh_vf_add_fd failed\n"); + return -1; + } + return 0; + } + + /* 填写fd表 */ + zxdh_flow_table_add(fs, &p_fd_cfg, &pf_info); + err = zxdh_flow_table_pf_action_add(en_dev, fs, &p_fd_cfg); + if (err) { + LOG_ERR("zxdh_cfg_fd_add_action failed"); + return -EINVAL; + } + /* 获取index */ + if (!en_dev->fs.ethtool_fs[fs->location].is_used) { + /* 申请新的index */ + err = dpp_fd_acl_index_request(&pf_info, &handle); + if (err) { + LOG_ERR("failed to request index!\n"); + return -ENOSPC; + } + } else { + handle = en_dev->fs.ethtool_fs[fs->location] + .index; /* 使用旧的index */ + } + + *index = handle; + LOG_INFO("fd index is %d\n", *index); + + LOG_INFO("dpp_tbl_fd_cfg_add start\n"); + /* 配置到np */ + err = dpp_tbl_fd_cfg_add(&pf_info, ZXDH_SDT_FD_CFG_TABLE, handle, + &p_fd_cfg); + if (err != 0) { + LOG_ERR("failed to add fd in np!\n"); + return -1; + } + + LOG_INFO("dpp_tbl_fd_cfg_add end\n"); + return 0; +} + +static void set_flow_table(struct zxdh_en_device *en_dev, + struct ethtool_rx_flow_spec *fs, uint32_t index) +{ + /* 将流标规则存放在私有结构体中 */ + zte_memcpy_s(&en_dev->fs.ethtool_fs[fs->location].rfs, fs, + sizeof(struct ethtool_rx_flow_spec)); + if (!en_dev->fs.ethtool_fs[fs->location].is_used) { /* add */ + en_dev->fs.ethtool_fs[fs->location].loc = fs->location; + en_dev->fs.ethtool_fs[fs->location].index = index; + en_dev->fs.ethtool_fs[fs->location].is_used = true; + en_dev->fs.tot_num_rules++; + } + + LOG_INFO("set_flow_table: location is %u, index is %u\n", fs->location, + index); + return; +} + +static int32_t zxdh_ethtool_flow_replace(struct zxdh_en_device *en_dev, + struct ethtool_rx_flow_spec *fs) +{ + int32_t num_tuples = 0; + int32_t err = 0; + uint32_t index = 0; + + LOG_INFO("zxdh_ethtool_flow_replace start!\n"); + + /* 检查流规则是否有效 */ + num_tuples = validate_flow(en_dev, fs); + if (num_tuples <= 0) { + LOG_ERR("flow is not valid %d\n", num_tuples); + return -EINVAL; + } + + /* 将流标规则配置到np中 */ + err = zxdh_cfg_np_fd(en_dev, fs, &index); + if (err != 0) { + LOG_ERR("zxdh_cfg_np_fd failed!\n"); + return -EINVAL; + } + + /* 将此fd流表存储在私有结构体中 */ + set_flow_table(en_dev, fs, index); + return 0; +} + +static int32_t zxdh_ethtool_flow_remove(struct zxdh_en_device *en_dev, + int32_t location) +{ + uint32_t err = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + LOG_INFO("zxdh_ethtool_flow_remove start!\n"); + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + if (location < 0 || location >= ETHTOOL_FD_MAX_NUM) + return -EINVAL; + + if (!en_dev->fs.ethtool_fs[location].is_used) { + LOG_ERR("location %d is not used!!!\n", location); + return -EINVAL; + } + + /* 清除vf的fd表 */ + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) { + err = zxdh_vf_del_fd(en_dev, + en_dev->fs.ethtool_fs[location].index); + if (err != 0) { + LOG_ERR("zxdh_vf_del_fd failed!\n"); + return -EINVAL; + } + goto free_flow_table; + } + + /* 清除np表项 */ + err = dpp_tbl_fd_cfg_del(&pf_info, ZXDH_SDT_FD_CFG_TABLE, + en_dev->fs.ethtool_fs[location].index); + if (err != 0) { + LOG_ERR("dpp_tbl_fd_cfg_del failed!\n"); + return -EINVAL; + } + + /* 释放index */ + err = dpp_fd_acl_index_release(&pf_info, + en_dev->fs.ethtool_fs[location].index); + if (err) { + LOG_ERR("failed to release index!!!\n"); + return -EINVAL; + } + +free_flow_table: + /* 清除私有结构体保存的信息*/ + zte_memset_s(&en_dev->fs.ethtool_fs[location], 0, + sizeof(struct zxdh_ethtool_table)); + en_dev->fs.tot_num_rules--; + return 0; +} + +int32_t zxdh_ethtool_rss_set(struct zxdh_en_device *en_dev, + struct ethtool_rxnfc *cmd) +{ + union zxdh_msg *msg = NULL; + uint32_t hash_mode = 0; + int32_t ret = 0; + + DPP_PF_INFO_T pf_info = { 0 }; + struct zxdh_bar_extra_para para = { 0 }; + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + + switch (cmd->data) { + /* input parameter mv */ + case (RXH_L2DA + RXH_VLAN): { + hash_mode = ZXDH_NET_RX_FLOW_HASH_MV; + break; + } + /* input parameter sdt */ + case (RXH_L3_PROTO + RXH_IP_SRC + RXH_IP_DST): { + hash_mode = ZXDH_NET_RX_FLOW_HASH_SDT; + break; + } + /* input parameter sdfnt */ + case (RXH_L3_PROTO + RXH_IP_SRC + RXH_IP_DST + RXH_L4_B_0_1 + + RXH_L4_B_2_3): { + hash_mode = ZXDH_NET_RX_FLOW_HASH_SDFNT; + break; + } + default: { + LOG_ERR("invalid para, support mv, sdt, sdfnt\n"); + ret = -EOPNOTSUPP; + goto free_msg; + } + } + LOG_INFO("hash_mode: %u\n", hash_mode); + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_PF) { + ret = dpp_vport_rx_flow_hash_set(&pf_info, hash_mode); + } else { + msg->payload.hdr.op_code = ZXDH_RX_FLOW_HASH_SET; + msg->payload.hdr.vport = en_dev->vport; + msg->payload.hdr.pcie_id = en_dev->pcie_id; + msg->payload.rx_flow_hash_set_msg.hash_mode = hash_mode; + ret = en_dev->ops->msg_send_cmd(en_dev->parent, + MODULE_VF_BAR_MSG_TO_PF, msg, + msg, ¶); + } + if (ret == 0) + en_dev->eth_config.hash_mode = hash_mode; +free_msg: + kfree(msg); + return ret; +} + +static int zxdh_en_set_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + int32_t ret = 0; + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + + LOG_INFO("zxdh_en_set_rxnfc start\n"); + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = zxdh_ethtool_flow_replace(en_dev, &cmd->fs); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = zxdh_ethtool_flow_remove(en_dev, cmd->fs.location); + break; + case ETHTOOL_SRXFH: + ret = zxdh_ethtool_rss_set(en_dev, cmd); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +static int32_t zxdh_en_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kec, + struct netlink_ext_ack *ack) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + uint16_t rx_msix_mode = 0; + uint16_t tx_msix_mode = 0; + uint32_t rx_coalesce_usecs = 0; + uint32_t tx_coalesce_usecs = 0; + int32_t err = 0; + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + LOG_DEBUG("zxdh_en_get_coalesce start\n"); + err = zxdh_get_misx_mode(en_dev, &rx_msix_mode, &tx_msix_mode); + if (err != 0) { + LOG_ERR("zxdh_get_misx_mode failed\n"); + return -1; + } + + LOG_DEBUG("get rx_msix_mode is %d\n", rx_msix_mode); + LOG_DEBUG("get tx_msix_mode is %d\n", tx_msix_mode); + + if (rx_msix_mode == PROTOCOL_MODE) { + coal->use_adaptive_rx_coalesce = 1; + } else if (rx_msix_mode == AGGREGATION_MODE) { + coal->use_adaptive_rx_coalesce = 0; + } else { + LOG_ERR("invalid rx_msix_mode:%d\n", rx_msix_mode); + } + + if (tx_msix_mode == PROTOCOL_MODE) { + coal->use_adaptive_tx_coalesce = 1; + } else if (tx_msix_mode == AGGREGATION_MODE) { + coal->use_adaptive_tx_coalesce = 0; + } else { + LOG_ERR("invalid tx_msix_mode:%d\n", tx_msix_mode); + } + + err = zxdh_get_coalesce_usecs(en_dev, &rx_coalesce_usecs, + &tx_coalesce_usecs); + if (err != 0) { + LOG_ERR("zxdh_get_coalesce_usecs failed\n"); + return -1; + } + LOG_DEBUG("get rx_coalesce_usecs is:%d\n", rx_coalesce_usecs); + LOG_DEBUG("get tx_coalesce_usecs is:%d\n", tx_coalesce_usecs); + + coal->rx_coalesce_usecs = rx_coalesce_usecs; + coal->tx_coalesce_usecs = tx_coalesce_usecs; + + LOG_DEBUG("zxdh_en_get_coalesce end\n"); + return 0; +} + +static int32_t zxdh_en_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kec, + struct netlink_ext_ack *ack) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + uint16_t rx_msix_mode = 0; + uint16_t tx_msix_mode = 0; + int32_t err = 0; + + ZXDH_DEVICE_STATE_CHECK_RTN(en_dev); + LOG_DEBUG("zxdh_en_set_coalesce start\n"); + if (coal->tx_coalesce_usecs > ZXDH_MAX_COAL_TIME || + coal->rx_coalesce_usecs > ZXDH_MAX_COAL_TIME) { + LOG_ERR("maximum coalesce time supported is %u usecs\n", + ZXDH_MAX_COAL_TIME); + return -ERANGE; + } + + mutex_lock(&en_priv->lock); + rx_msix_mode = (coal->use_adaptive_rx_coalesce == 1) ? PROTOCOL_MODE : + AGGREGATION_MODE; + tx_msix_mode = (coal->use_adaptive_tx_coalesce == 1) ? PROTOCOL_MODE : + AGGREGATION_MODE; + + LOG_DEBUG("cfg rx_msix_mode is %d\n", rx_msix_mode); + LOG_DEBUG("cfg tx_msix_mode is %d\n", tx_msix_mode); + LOG_DEBUG("cfg rx_coalesce_usecs is %d\n", coal->rx_coalesce_usecs); + LOG_DEBUG("cfg tx_coalesce_usecs is %d\n", coal->tx_coalesce_usecs); + + err = zxdh_cfg_misx_mode(en_dev, rx_msix_mode, tx_msix_mode); + if (err != 0) { + LOG_ERR("zxdh_cfg_misx_mode failed\n"); + goto out; + } + + err = zxdh_cfg_coalesce_usecs(en_dev, coal->rx_coalesce_usecs, + coal->tx_coalesce_usecs); + if (err != 0) { + LOG_ERR("zxdh_cfg_coalesce_usecs failed\n"); + goto out; + } + +out: + mutex_unlock(&en_priv->lock); + return err; +} + +static const struct ethtool_ops zxdh_en_ethtool_ops = { +#ifdef HAVE_ETHTOOL_COALESCE_PARAMS_SUPPORT + .supported_coalesce_params = + ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_RX_USECS | + ETHTOOL_COALESCE_TX_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX | + ETHTOOL_COALESCE_USE_ADAPTIVE_TX, +#endif + .get_coalesce = zxdh_en_get_coalesce, + .set_coalesce = zxdh_en_set_coalesce, + .get_drvinfo = zxdh_en_get_drvinfo, + .get_link_ksettings = zxdh_en_get_link_ksettings, + .set_link_ksettings = zxdh_en_set_link_ksettings, + .get_regs_len = zxdh_en_get_regs_len, + .get_regs = zxdh_en_get_regs, + .get_wol = zxdh_en_get_wol, + .set_wol = zxdh_en_set_wol, + .get_msglevel = zxdh_en_get_msglevel, + .set_msglevel = zxdh_en_set_msglevel, + .nway_reset = zxdh_en_nway_reset, + .get_link = zxdh_en_get_link, + .get_eeprom_len = zxdh_en_get_eeprom_len, + .get_eeprom = zxdh_en_get_eeprom, + .set_eeprom = zxdh_en_set_eeprom, + .get_ringparam = zxdh_en_get_ringparam, + .set_ringparam = zxdh_en_set_ringparam, + .get_pauseparam = zxdh_en_get_pauseparam, + .set_pauseparam = zxdh_en_set_pauseparam, + .get_fecparam = zxdh_en_get_fecparam, + .set_fecparam = zxdh_en_set_fecparam, + .get_module_info = zxdh_en_get_module_info, + .get_module_eeprom = zxdh_en_get_module_eeprom, + .get_module_eeprom_by_page = zxdh_en_get_module_eeprom_by_page, + .self_test = zxdh_en_diag_test, + .get_strings = zxdh_en_get_strings, + .get_priv_flags = zxdh_en_get_priv_flags, + .set_priv_flags = zxdh_en_set_priv_flags, +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#ifdef HAVE_ETHTOOL_SET_PHYS_ID + .set_phys_id = zxdh_en_set_phys_id, +#else + .phys_id = zxdh_en_phys_id, +#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ + +#ifdef HAVE_ETHTOOL_GET_SSET_COUNT + .get_sset_count = zxdh_en_get_sset_count, +#endif + .get_ethtool_stats = zxdh_en_get_ethtool_stats, + +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#ifdef HAVE_ETHTOOL_GET_TS_INFO + .get_ts_info = zxdh_en_get_ts_info, +#endif /* HAVE_ETHTOOL_GET_TS_INFO */ +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ +#ifdef CONFIG_PM_RUNTIME + .begin = zxdh_en_ethtool_begin, + .complete = zxdh_en_ethtool_complete, +#endif /* CONFIG_PM_RUNTIME */ +#ifndef HAVE_NDO_SET_FEATURES + .get_rx_csum = zxdh_en_get_rx_csum, + .set_rx_csum = zxdh_en_set_rx_csum, + .set_tx_csum = zxdh_en_set_tx_csum, +#ifdef NETIF_F_TSO + .set_tso = zxdh_en_set_tso, +#endif +#ifdef ETHTOOL_GFLAGS + .set_flags = zxdh_en_set_flags, +#endif /* ETHTOOL_GFLAGS */ +#endif /* HAVE_NDO_SET_FEATURES */ + +#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#ifdef ETHTOOL_GEEE + .get_eee = zxdh_en_get_eee, +#endif +#ifdef ETHTOOL_SEEE + .set_eee = zxdh_en_set_eee, +#endif +#ifdef ETHTOOL_GRXFHINDIR +#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE + .get_rxfh_indir_size = zxdh_en_get_rxfh_indir_size, + .get_rxfh_key_size = zxdh_en_get_rxfh_key_size, +#endif /* HAVE_ETHTOOL_GRSFHINDIR_SIZE */ +#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)) + .get_rxfh = zxdh_en_get_rxfh, +#else + .get_rxfh_indir = zxdh_en_get_rxfh_indir, +#endif /* HAVE_ETHTOOL_GSRSSH */ +#endif /* ETHTOOL_GRXFHINDIR */ +#ifdef ETHTOOL_SRXFHINDIR +#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH)) + .set_rxfh = zxdh_en_set_rxfh, +#else + .set_rxfh_indir = zxdh_en_set_rxfh_indir, +#endif /* HAVE_ETHTOOL_GSRSSH */ +#endif /* ETHTOOL_SRXFHINDIR */ +#ifdef ETHTOOL_GCHANNELS + .get_channels = zxdh_en_get_channels, +#endif /* ETHTOOL_GCHANNELS */ +#ifdef ETHTOOL_SCHANNELS + .set_channels = zxdh_en_set_channels, +#endif /* ETHTOOL_SCHANNELS */ +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ +#ifdef ETHTOOL_GRXFH + .get_rxnfc = zxdh_en_get_rxnfc, + .set_rxnfc = zxdh_en_set_rxnfc, +#endif +}; + +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +static const struct ethtool_ops_ext zxdh_en_ethtool_ops_ext = { + .size = sizeof(struct ethtool_ops_ext), + .get_ts_info = zxdh_en_get_ts_info, + .set_phys_id = zxdh_en_set_phys_id, + .get_eee = zxdh_en_get_eee, + .set_eee = zxdh_en_set_eee, +#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE + .get_rxfh_indir_size = zxdh_en_get_rxfh_indir_size, +#endif /* HAVE_ETHTOOL_GRSFHINDIR_SIZE */ + .get_rxfh_indir = zxdh_en_get_rxfh_indir, + .set_rxfh_indir = zxdh_en_set_rxfh_indir, + .get_channels = zxdh_en_get_channels, + .set_channels = zxdh_en_set_channels, +}; + +void zxdh_en_set_ethtool_ops_ext(struct net_device *netdev) +{ + netdev->ethtool_ops = &zxdh_en_ethtool_ops; + set_ethtool_ops_ext(netdev, &zxdh_en_ethtool_ops_ext); +} +#else +void zxdh_en_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &zxdh_en_ethtool_ops; +} +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ diff --git a/drivers/net/ethernet/dinghai/en_ethtool/ethtool.h b/drivers/net/ethernet/dinghai/en_ethtool/ethtool.h new file mode 100644 index 000000000000..103522913bd4 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_ethtool/ethtool.h @@ -0,0 +1,159 @@ +#ifndef __ZXDH_EN_ETHTOOL_H__ +#define __ZXDH_EN_ETHTOOL_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +#ifndef SPEED_200000 +#define SPEED_200000 200000 +#endif + +extern bool enable_1588_debug; +#define DEBUG_1588(fmt, arg...) \ + do { \ + if (enable_1588_debug == true) { \ + printk(KERN_INFO "[PTP INFO][%s][%d]: " fmt "\n", \ + __FUNCTION__, __LINE__, ##arg); \ + } \ + } while (0) + +extern int32_t print_data(uint8_t *data, uint32_t len); +#define DEBUG_1588_DATA(data, len) \ + do { \ + if (enable_1588_debug == true) { \ + print_data(data, len); \ + } \ + } while (0) + +enum zxdh_priv_flag { + ZXDH_PFLAG_ENABLE_LLDP, + ZXDH_PFLAG_ENABLE_SSHD, + ZXDH_PFLAG_IP = 2, + ZXDH_PFLAG_1588_DEBUG, + ZXDH_PFLAG_HARDWARE_BOND, + ZXDH_PFLAG_HARDWARE_BOND_PRIMARY, + ZXDH_PFLAG_LINK_DOWN_ON_CLOSE, + ZXDH_PFLAG_ETS_SWITCH, + ZXDH_PFLAG_PCIE_AER_CPL_TIMEOUT, + ZXDH_PFLAG_PCIE_HP_IRQ_CTRL, + ZXDH_PFLAG_DUAL_TOR_CTRL, + ZXDH_PFLAG_1588_ENABLE, + ZXDH_NUM_PFLAGS, /* Keep last */ +}; + +#ifdef CONFIG_INET +struct zxdh_ehdr { + uint64_t magic; +}; + +#define ZXDH_TEST_PKT_SIZE 100 +#define ZXDH_LB_VERIFY_TIMEOUT (msecs_to_jiffies(200)) +#define ZXDH_TEST_MAGIC 0x6AEED15C001ULL + +struct zxdh_lbt_priv { + struct packet_type pt; + struct completion comp; + bool loopback_ok; +}; +#endif /* CONFIG_INET */ + +enum { + ZXDH_ST_LINK_STATE, + ZXDH_ST_LINK_SPEED, + ZXDH_ST_HEALTH_INFO, +#ifdef CONFIG_INET + ZXDH_ST_LOOPBACK, +#endif + ZXDH_ST_NUM, +}; + +static const int8_t zxdh_self_tests[ZXDH_ST_NUM][ETH_GSTRING_LEN] = { + "Link Test", + "Speed Test", + "Health Test", +#ifdef CONFIG_INET + "Loopback Test", +#endif +}; + +enum interrupt_mode { + PROTOCOL_MODE = 0, + NONE_MODE = 1, + TRIGGERED_EVERGE_MODE = 2, + AGGREGATION_MODE = 3, +}; + +#define ZXDH_MAX_COAL_TIME 32 + +#define ZXDH_SET_PFLAG(pflags, flag, enable) \ + do { \ + if (enable) { \ + pflags |= BIT(flag); \ + } else { \ + pflags &= ~(BIT(flag)); \ + } \ + } while (0) + +#define ZXDH_ADD_STRING(data, str) \ + do { \ + data += ETH_GSTRING_LEN; \ + snprintf(data, ETH_GSTRING_LEN, str); \ + } while (0) + +#define ZXDH_ADD_QUEUE_STRING(data, str, i) \ + do { \ + data += ETH_GSTRING_LEN; \ + snprintf(data, ETH_GSTRING_LEN, "queue[%u]_%s", i, str); \ + } while (0) + +#define ZXDH_NETDEV_STATS_NUM \ + (sizeof(struct zxdh_en_netdev_stats) / sizeof(uint64_t)) +#define ZXDH_VPORT_STATS_NUM \ + (sizeof(struct zxdh_en_vport_stats) / sizeof(uint64_t)) +#define ZXDH_MAC_STATS_NUM (sizeof(struct zxdh_en_phy_stats) / sizeof(uint64_t)) +#define ZXDH_QUEUE_STATS_NUM \ + (sizeof(struct zxdh_en_queue_stats) / sizeof(uint64_t)) +#define ZXDH_UDP_STATS_NUM \ + (sizeof(struct zxdh_en_udp_phy_stats) / sizeof(uint64_t)) + +#define ZXDH_NET_PF_STATS_NUM(en_dev) \ + (ZXDH_NETDEV_STATS_NUM + ZXDH_MAC_STATS_NUM + ZXDH_VPORT_STATS_NUM + \ + ZXDH_UDP_STATS_NUM + en_dev->curr_queue_pairs * ZXDH_QUEUE_STATS_NUM) + +#define ZXDH_GET_PFLAG(pflags, flag) (!!(pflags & (BIT(flag)))) + +#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +void zxdh_en_set_ethtool_ops_ext(struct net_device *netdev); +#else +void zxdh_en_set_ethtool_ops(struct net_device *netdev); +#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ + +int32_t zxdh_flow_table_pf_action_add(struct zxdh_en_device *en_dev, + struct ethtool_rx_flow_spec *fs, + ZXDH_FD_CFG_T *p_fd_cfg); + +#define MAX_NUM_TUPLES 10 +#define ETH_TYPE_VLAN 0x8100 +#define VLAN_VID_MASK 0x0fff +#define VLAN_N_VID 4096 +#define VLAN_PCP_MASK 0xe000 +#define VLAN_PCP_SHIFT 13 +#define ETHTOOL_FD_MAX_NUM 2048 +#define ETHTOOL_IP4_LEN 4 +#define ETHTOOL_IP6_LEN 16 +#define ETHTOOL_TRUE_MASK 0 +#define ACTION_TYPE_QUEUE 0x40 +#define ACTION_TYPE_SPEC_PORT 0x80 +#define ACTION_TYPE_DROP 0x10 +#define ACTION_TYPE_RSS 0x04 +#define QUEUE_RSS 0xffff + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_mpf.c b/drivers/net/ethernet/dinghai/en_mpf.c new file mode 100644 index 000000000000..28e558769bc1 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_mpf.c @@ -0,0 +1,297 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "./en_mpf/en_mpf_events.h" +#include "./en_mpf/en_mpf_eq.h" +#include "./en_mpf/en_mpf_irq.h" +#include "en_mpf.h" +#include "en_mpf/en_mpf_cfg_sf.h" + +MODULE_LICENSE("Dual BSD/GPL"); + +uint32_t dh_debug_mask; +module_param_named(debug_mask, dh_debug_mask, uint, 0644); +MODULE_PARM_DESC( + debug_mask, + "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0"); + +extern struct devlink_ops dh_mpf_devlink_ops; +extern struct dh_core_devlink_ops dh_mpf_core_devlink_ops; + +int32_t dh_mpf_pci_init(struct dh_core_dev *dev) +{ + int32_t ret = 0; + struct dh_en_mpf_dev *mpf_dev = NULL; + + pci_set_drvdata(dev->pdev, dev); + + ret = pci_enable_device(dev->pdev); + if (ret != 0) { + dev_err(dev->device, "pci_enable_device failed: %d\n", ret); + return -ENOMEM; + } + + ret = dma_set_mask_and_coherent(dev->device, DMA_BIT_MASK(64)); + if (ret != 0) { + ret = dma_set_mask_and_coherent(dev->device, DMA_BIT_MASK(32)); + if (ret != 0) { + dev_err(dev->device, + "dma_set_mask_and_coherent failed: %d\n", ret); + goto err_pci; + } + } + + ret = pci_request_selected_regions( + dev->pdev, pci_select_bars(dev->pdev, IORESOURCE_MEM), + "dh-mpf"); + if (ret != 0) { + dev_err(dev->device, + "pci_request_selected_regions failed: %d\n", ret); + goto err_pci; + } + + pci_enable_pcie_error_reporting(dev->pdev); + pci_set_master(dev->pdev); + ret = pci_save_state(dev->pdev); + if (ret != 0) { + dev_err(dev->device, "pci_save_state failed: %d\n", ret); + goto err_pci_save_state; + } + + mpf_dev = dh_core_priv(dev); + mpf_dev->pci_ioremap_addr = + (uint64_t)ioremap(pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0)); + LOG_INFO("pci_ioremap_addr=0x%llx, ioremap(0x%llx, 0x%llx)\n", + mpf_dev->pci_ioremap_addr, pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0)); + if (mpf_dev->pci_ioremap_addr == 0) { + ret = -1; + LOG_ERR("ioremap(0x%llx, 0x%llx) failed\n", + pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0)); + goto err_pci_save_state; + } + + return 0; + +err_pci_save_state: + pci_disable_pcie_error_reporting(dev->pdev); + pci_release_selected_regions( + dev->pdev, pci_select_bars(dev->pdev, IORESOURCE_MEM)); +err_pci: + pci_disable_device(dev->pdev); + return ret; +} + +static const struct pci_device_id dh_mpf_pci_table[] = { + { PCI_DEVICE(ZXDH_MPF_VENDOR_ID, ZXDH_MPF_DEVICE_ID), 0 }, + { + 0, + } +}; + +MODULE_DEVICE_TABLE(pci, dh_mpf_pci_table); + +void dh_mpf_pci_close(struct dh_core_dev *dev) +{ + struct dh_en_mpf_dev *mpf_dev = NULL; + + mpf_dev = dh_core_priv(dev); + iounmap((void *)mpf_dev->pci_ioremap_addr); + pci_disable_pcie_error_reporting(dev->pdev); + pci_release_selected_regions( + dev->pdev, pci_select_bars(dev->pdev, IORESOURCE_MEM)); + pci_disable_device(dev->pdev); + + return; +} + +static int32_t dh_mpf_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct dh_core_dev *dh_dev = NULL; + struct devlink *devlink = NULL; + int32_t err = 0; + + LOG_INFO("mpf driver start to probe\n"); + + devlink = zxdh_devlink_alloc(&pdev->dev, &dh_mpf_devlink_ops, + sizeof(struct dh_en_mpf_dev)); + if (devlink == NULL) { + dev_err(&pdev->dev, "devlink alloc failed\n"); + return -ENOMEM; + } + + dh_dev = devlink_priv(devlink); + dh_dev->device = &pdev->dev; + dh_dev->pdev = pdev; + dh_dev->devlink_ops = &dh_mpf_core_devlink_ops; + + err = dh_mpf_pci_init(dh_dev); + if (err != 0) { + dev_err(&pdev->dev, "dh_mpf_pci_init failed: %d\n", err); + goto err_devlink_cleanup; + } + + err = dh_mpf_irq_table_init(dh_dev); + if (err != 0) { + dh_err(dh_dev, "Failed to alloc IRQs\n"); + goto err_pci; + } + + err = dh_mpf_eq_table_init(dh_dev); + if (err != 0) { + dh_err(dh_dev, "Failed to alloc IRQs\n"); + goto err_eq_table_init; + } + + err = dh_mpf_irq_table_create(dh_dev); + if (err != 0) { + dh_err(dh_dev, "Failed to alloc IRQs\n"); + goto err_irq_table_create; + } + + err = dh_mpf_eq_table_create(dh_dev); + if (err != 0) { + dh_err(dh_dev, "Failed to alloc EQs\n"); + goto err_eq_table_create; + } + + err = dh_mpf_events_init(dh_dev); + if (err != 0) { + dh_err(dh_dev, "failed to initialize events\n"); + goto err_events_init_cleanup; + } + + zxdh_devlink_register(devlink); + + LOG_INFO("mpf driver probe completed\n"); + return 0; + +err_events_init_cleanup: + dh_mpf_eq_table_destroy(dh_dev); +err_eq_table_create: + dh_mpf_irq_table_destroy(dh_dev); +err_irq_table_create: + dh_eq_table_cleanup(dh_dev); +err_eq_table_init: + dh_irq_table_cleanup(dh_dev); +err_pci: + dh_mpf_pci_close(dh_dev); +err_devlink_cleanup: + zxdh_devlink_free(devlink); + return err; +} + +static void dh_mpf_remove(struct pci_dev *pdev) +{ + struct dh_core_dev *dh_dev = pci_get_drvdata(pdev); + struct devlink *devlink = priv_to_devlink(dh_dev); + LOG_INFO("mpf driver start to remove"); + + zxdh_devlink_unregister(devlink); + dh_mpf_events_uninit(dh_dev); + dh_mpf_eq_table_destroy(dh_dev); + dh_mpf_irq_table_destroy(dh_dev); + dh_eq_table_cleanup(dh_dev); + dh_irq_table_cleanup(dh_dev); + dh_mpf_pci_close(dh_dev); + zxdh_devlink_free(devlink); + + pci_set_drvdata(pdev, NULL); + LOG_INFO("mpf driver remove completed\n"); +} + +static int32_t dh_mpf_suspend(struct pci_dev *pdev, pm_message_t state) +{ + return 0; +} + +static int32_t dh_mpf_resume(struct pci_dev *pdev) +{ + return 0; +} + +static void dh_mpf_shutdown(struct pci_dev *pdev) +{ + dh_mpf_remove(pdev); +} + +static pci_ers_result_t dh_pci_err_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + return PCI_ERS_RESULT_NONE; +} + +static pci_ers_result_t dh_mpf_pci_slot_reset(struct pci_dev *pdev) +{ + return PCI_ERS_RESULT_NONE; +} + +static void dh_mpf_pci_resume(struct pci_dev *pdev) +{ +} + +static const struct pci_error_handlers dh_mpf_err_handler = { + .error_detected = dh_pci_err_detected, + .slot_reset = dh_mpf_pci_slot_reset, + .resume = dh_mpf_pci_resume +}; + +static struct pci_driver dh_mpf_driver = { + .name = KBUILD_MODNAME, + .id_table = dh_mpf_pci_table, + .probe = dh_mpf_probe, + .remove = dh_mpf_remove, + .suspend = dh_mpf_suspend, + .resume = dh_mpf_resume, + .shutdown = dh_mpf_shutdown, + .err_handler = &dh_mpf_err_handler, +}; + +static int32_t __init init(void) +{ + int32_t err = 0; + + err = pci_register_driver(&dh_mpf_driver); + if (err != 0) { + LOG_ERR("pci_register_driver failed: %d\n", err); + return err; + } + +#ifdef CONFIG_ZXDH_SF + err = zxdh_mpf_sf_driver_register(); + if (err != 0) { + LOG_ERR("zxdh_en_sf_driver_register failed: %d\n", err); + goto err_sf; + } +#endif + + LOG_INFO("zxdh_mpf driver init success\n"); + + return 0; + +err_sf: + pci_unregister_driver(&dh_mpf_driver); + return err; +} + +static void __exit cleanup(void) +{ +#ifdef CONFIG_ZXDH_SF + zxdh_mpf_sf_driver_uregister(); +#endif + pci_unregister_driver(&dh_mpf_driver); + + LOG_INFO("zxdh_mpf driver remove success\n"); +} + +module_init(init); +module_exit(cleanup); diff --git a/drivers/net/ethernet/dinghai/en_mpf.h b/drivers/net/ethernet/dinghai/en_mpf.h new file mode 100644 index 000000000000..cfa2d78c6b9c --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_mpf.h @@ -0,0 +1,32 @@ +#ifndef __ZXDH_EN_MPF_H__ +#define __ZXDH_EN_MPF_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +#define ZXDH_MPF_VENDOR_ID 0x1111 +#define ZXDH_MPF_DEVICE_ID 0x1041 + +#define ZXDH_BAR1_CHAN_OFFSET 0x2000 //0x7801000 +#define ZXDH_BAR2_CHAN_OFFSET 0x3000 //0x7802000 + +struct dh_en_mpf_dev { + uint16_t ep_bdf; + uint16_t pcie_id; + uint16_t vport; + + uint64_t pci_ioremap_addr; + + struct work_struct dh_np_sdk_from_risc; + struct work_struct dh_np_sdk_from_pf; +}; + +#ifdef __cplusplus +} +#endif + +#endif /* __ZXDH_EN_MPF_H__ */ \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_mpf/en_mpf_cfg_sf.c b/drivers/net/ethernet/dinghai/en_mpf/en_mpf_cfg_sf.c new file mode 100644 index 000000000000..53febd14ab57 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_mpf/en_mpf_cfg_sf.c @@ -0,0 +1,60 @@ +#include +#include +#include + +#include "en_mpf_cfg_sf.h" + +static int32_t zxdh_cfg_resume(struct zxdh_auxiliary_device *adev) +{ + return 0; +} + +static int32_t zxdh_cfg_suspend(struct zxdh_auxiliary_device *adev, + pm_message_t state) +{ + return 0; +} + +static int32_t zxdh_cfg_probe(struct zxdh_auxiliary_device *adev, + const struct zxdh_auxiliary_device_id *id) +{ + struct cfg_sf_dev *__attribute__((unused)) + cfg_sf_dev = container_of(adev, struct cfg_sf_dev, adev); + + return 0; +} + +static int32_t zxdh_cfg_remove(struct zxdh_auxiliary_device *adev) +{ + return 0; +} + +static const struct zxdh_auxiliary_device_id zxdh_cfg_id_table[] = { + { + .name = ZXDH_EN_SF_NAME ".mpf_cfg", + }, + {}, +}; + +//MODULE_DEVICE_TABLE(auxiliary_zxdh_id_table, zxdh_cfg_id_table); + +static struct zxdh_auxiliary_driver zxdh_cfg_driver = { + .name = "mpf_cfg", + .probe = zxdh_cfg_probe, + .remove = zxdh_cfg_remove, + .suspend = zxdh_cfg_suspend, + .resume = zxdh_cfg_resume, + .id_table = zxdh_cfg_id_table, +}; + +int32_t zxdh_mpf_sf_driver_register(void) +{ + return zxdh_auxiliary_driver_register(&zxdh_cfg_driver); + ; +} + +void zxdh_mpf_sf_driver_uregister(void) +{ + zxdh_auxiliary_driver_unregister(&zxdh_cfg_driver); + ; +} diff --git a/drivers/net/ethernet/dinghai/en_mpf/en_mpf_cfg_sf.h b/drivers/net/ethernet/dinghai/en_mpf/en_mpf_cfg_sf.h new file mode 100644 index 000000000000..b5da6682e3ef --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_mpf/en_mpf_cfg_sf.h @@ -0,0 +1,27 @@ +#ifndef __EN_MPF_CFG_SF_H__ +#define __EN_MPF_CFG_SF_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +struct cfg_sf_ops { +}; + +struct cfg_sf_dev { + struct zxdh_auxiliary_device adev; + struct dh_core_dev *dh_dev; + struct cfg_sf_ops *ops; +}; + +int32_t zxdh_mpf_sf_driver_register(void); +void zxdh_mpf_sf_driver_uregister(void); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_mpf/en_mpf_devlink.c b/drivers/net/ethernet/dinghai/en_mpf/en_mpf_devlink.c new file mode 100644 index 000000000000..23d7e8855c10 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_mpf/en_mpf_devlink.c @@ -0,0 +1,126 @@ +#include +#include +#include "en_mpf_devlink.h" + +struct devlink_ops dh_mpf_devlink_ops = { + +}; + +enum { + DH_MPF_PARAMS_MAX, +}; + +static int32_t __attribute__((unused)) sample_check(struct dh_core_dev *dev) +{ + return 1; +} + +enum dh_mpf_devlink_param_id { + DH_MPF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + DH_MPF_DEVLINK_PARAM_ID_SAMPLE, +}; + +static int32_t dh_devlink_sample_set(struct devlink *devlink, uint32_t id, + struct devlink_param_gset_ctx *ctx) +{ + struct dh_core_dev *__attribute__((unused)) dev = devlink_priv(devlink); + + return 0; +} + +static int32_t dh_devlink_sample_get(struct devlink *devlink, uint32_t id, + struct devlink_param_gset_ctx *ctx) +{ + struct dh_core_dev *__attribute__((unused)) dev = devlink_priv(devlink); + + return 0; +} + +#ifdef HAVE_DEVLINK_PARAM_REGISTER +static const struct devlink_params { + const char *name; + int32_t (*check)(struct dh_core_dev *dev); + struct devlink_param param; +} devlink_params[] = { [DH_MPF_PARAMS_MAX] = { + .name = "sample", + .check = &sample_check, + .param = DEVLINK_PARAM_DRIVER( + DH_MPF_DEVLINK_PARAM_ID_SAMPLE, "sample", + DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + dh_devlink_sample_get, + dh_devlink_sample_set, NULL), + } }; + +static int32_t params_register(struct devlink *devlink) +{ + int32_t i = 0; + int32_t err = 0; + struct dh_core_dev *dh_dev = devlink_priv(devlink); + + for (i = 0; i < ARRAY_SIZE(devlink_params); i++) { + if (devlink_params[i].check(dh_dev)) { + err = devlink_param_register(devlink, + &devlink_params[i].param); + if (err) { + goto rollback; + } + } + } + + return 0; + +rollback: + if (i == 0) { + return err; + } + + for (; i > 0; i--) { + devlink_param_unregister(devlink, &devlink_params[i].param); + } + + return err; +} + +static int32_t params_unregister(struct devlink *devlink) +{ + int32_t i = 0; + + for (i = 0; i < ARRAY_SIZE(devlink_params); i++) { + devlink_param_unregister(devlink, &devlink_params[i].param); + } + + return 0; +} +#else +static struct devlink_param devlink_params[] = { + [DH_MPF_PARAMS_MAX] = DEVLINK_PARAM_DRIVER( + DH_MPF_DEVLINK_PARAM_ID_SAMPLE, "sample", + DEVLINK_PARAM_TYPE_BOOL, BIT(DEVLINK_PARAM_CMODE_RUNTIME), + dh_devlink_sample_get, dh_devlink_sample_set, NULL), +}; + +static int32_t params_register(struct devlink *devlink) +{ + struct dh_core_dev *__attribute__((unused)) + dh_dev = devlink_priv(devlink); + int32_t err = 0; + + err = devlink_params_register(devlink, devlink_params, + ARRAY_SIZE(devlink_params)); + + return err; +} +static int32_t params_unregister(struct devlink *devlink) +{ + devlink_params_unregister(devlink, devlink_params, + ARRAY_SIZE(devlink_params)); + + return 0; +} +#endif + +struct dh_core_devlink_ops dh_mpf_core_devlink_ops = { + .params_register = params_register, + .params_unregister = params_unregister +}; diff --git a/drivers/net/ethernet/dinghai/en_mpf/en_mpf_devlink.h b/drivers/net/ethernet/dinghai/en_mpf/en_mpf_devlink.h new file mode 100644 index 000000000000..dbfb67587736 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_mpf/en_mpf_devlink.h @@ -0,0 +1,14 @@ +#ifndef __EN_MPF_DEVLINK_H__ +#define __EN_MPF_DEVLINK_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_mpf/en_mpf_eq.c b/drivers/net/ethernet/dinghai/en_mpf/en_mpf_eq.c new file mode 100644 index 000000000000..cd5645d3da31 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_mpf/en_mpf_eq.c @@ -0,0 +1,239 @@ +#include +#include +#include +#include +#include "en_mpf_irq.h" +#include "en_mpf_eq.h" +#include "../en_mpf.h" + +struct dh_mpf_eq_table { + struct dh_irq **comp_irqs; + struct dh_irq *async_risc_irq; + struct dh_irq *async_pf_irq; + struct dh_eq_async async_risc_eq; + struct dh_eq_async async_pf_eq; +}; + +static int32_t create_async_eqs(struct dh_core_dev *dev); + +static int32_t __attribute__((unused)) create_eq_map(struct dh_eq_param *param) +{ + int32_t err = 0; + + /* inform device*/ + return err; +} + +int32_t dh_mpf_eq_table_init(struct dh_core_dev *dev) +{ + struct dh_eq_table *eq_table; + struct dh_mpf_eq_table *table_priv = NULL; + int32_t err = 0; + + eq_table = &dev->eq_table; + + table_priv = kvzalloc(sizeof(*table_priv), GFP_KERNEL); + if (unlikely(table_priv == NULL)) { + err = -ENOMEM; + goto err_table_priv; + } + + dh_eq_table_init(dev, table_priv); + + return 0; + +err_table_priv: + kvfree(eq_table); + return err; +} + +/*todo*/ +int32_t dh_eq_get_comp_eqs(struct dh_core_dev *dev) +{ + return 0; +} + +static int32_t create_comp_eqs(struct dh_core_dev *dev) +{ + return 0; +} + +static int32_t destroy_async_eq(struct dh_core_dev *dev) +{ + struct dh_eq_table *eq_table = &dev->eq_table; + + mutex_lock(&eq_table->lock); + /*unmap inform device*/ + mutex_unlock(&eq_table->lock); + + return 0; +} + +static void cleanup_async_eq(struct dh_core_dev *dev, struct dh_eq_async *eq, + const char *name) +{ + dh_eq_disable(dev, &eq->core, &eq->irq_nb); +} + +static void destroy_async_eqs(struct dh_core_dev *dev) +{ + struct dh_eq_table *table = &dev->eq_table; + struct dh_mpf_eq_table *table_priv = table->priv; + + cleanup_async_eq(dev, &table_priv->async_risc_eq, "riscv"); + cleanup_async_eq(dev, &table_priv->async_pf_eq, "pf"); + destroy_async_eq(dev); + dh_irqs_release_vectors(&table_priv->async_risc_irq, 1); + dh_irqs_release_vectors(&table_priv->async_pf_irq, 1); +} + +void destroy_comp_eqs(struct dh_core_dev *dev) +{ +} + +void dh_mpf_eq_table_destroy(struct dh_core_dev *dev) +{ + destroy_comp_eqs(dev); + destroy_async_eqs(dev); +} + +int32_t dh_mpf_eq_table_create(struct dh_core_dev *dev) +{ + int32_t err = 0; + + err = create_async_eqs(dev); + if (err != 0) { + dh_err(dev, "Failed to create async EQs\n"); + goto err_async_eqs; + } + + err = create_comp_eqs(dev); + if (err != 0) { + dh_err(dev, "Failed to create completion EQs\n"); + goto err_comp_eqs; + } + + return 0; + +err_comp_eqs: + destroy_async_eqs(dev); +err_async_eqs: + return err; +} + +/*create eventq*/ +static int32_t create_async_eq(struct dh_core_dev *dev, struct dh_irq *risc, + struct dh_irq *pf) +{ + struct dh_eq_table *eq_table = &dev->eq_table; + struct dh_en_mpf_dev *mpf_dev = dh_core_priv(dev); + struct msix_para in = { 0 }; + int32_t err = 0; + + in.vector_risc = risc->index; + in.vector_pfvf = pf->index; + in.vector_mpf = 0xff; + in.driver_type = MSG_CHAN_END_PF; //TODO + in.pdev = dev->pdev; + in.virt_addr = mpf_dev->pci_ioremap_addr + ZXDH_BAR1_CHAN_OFFSET; + + mutex_lock(&eq_table->lock); + + err = zxdh_bar_enable_chan(&in, &mpf_dev->vport); + + mutex_unlock(&eq_table->lock); + + return err; +} + +static int32_t dh_eq_async_riscv_int(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct dh_eq_async *eq_riscv_async = + container_of(nb, struct dh_eq_async, irq_nb); + struct dh_core_dev *dev = (struct dh_core_dev *)eq_riscv_async->priv; + struct dh_eq_table *eq_table = &dev->eq_table; + + atomic_notifier_call_chain( + &eq_table->nh[DH_EVENT_TYPE_NOTIFY_RISC_TO_MPF], + DH_EVENT_TYPE_NOTIFY_RISC_TO_MPF, NULL); + + return 0; +} + +static int32_t dh_eq_async_mpf_int(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct dh_eq_async *eq_riscv_async = + container_of(nb, struct dh_eq_async, irq_nb); + struct dh_core_dev *dev = (struct dh_core_dev *)eq_riscv_async->priv; + struct dh_eq_table *eq_table = &dev->eq_table; + + atomic_notifier_call_chain( + &eq_table->nh[DH_EVENT_TYPE_NOTIFY_PF_TO_MPF], + DH_EVENT_TYPE_NOTIFY_PF_TO_MPF, NULL); + + return 0; +} + +static int32_t create_async_eqs(struct dh_core_dev *dev) +{ + struct dh_eq_table *table = &dev->eq_table; + struct dh_mpf_eq_table *table_priv = table->priv; + struct dh_eq_param param = {}; + int32_t err = 0; + + dh_dbg(dev, "start\r\n"); + table_priv->async_risc_irq = dh_mpf_async_irq_request(dev); + if (IS_ERR_OR_NULL(table_priv->async_risc_irq)) { + dh_err(dev, "Failed to get async_risc_irq\n"); + return PTR_ERR(table_priv->async_risc_irq); + } + + table_priv->async_pf_irq = dh_mpf_async_irq_request(dev); + if (IS_ERR_OR_NULL(table_priv->async_pf_irq)) { + err = PTR_ERR(table_priv->async_pf_irq); + dh_err(dev, "Failed to get async_pf_irq\n"); + goto err_irq_request; + } + + err = create_async_eq(dev, table_priv->async_risc_irq, + table_priv->async_pf_irq); + if (err != 0) { + dh_err(dev, "Failed to create async_eq\n"); + goto err_create_async_eq; + } + + param = (struct dh_eq_param){ + .irq = table_priv->async_risc_irq, + .nent = 10, + .event_type = + DH_EVENT_QUEUE_TYPE_RISCV /* used for inform dpu */ + }; + err = setup_async_eq(dev, &table_priv->async_risc_eq, ¶m, + dh_eq_async_riscv_int, "riscv", dev); + if (err != 0) { + dh_err(dev, "Failed to setup async_risc_eq\n"); + goto err_setup_async_eq; + } + + param.irq = table_priv->async_pf_irq, + err = setup_async_eq(dev, &table_priv->async_pf_eq, ¶m, + dh_eq_async_mpf_int, "pf", dev); + if (err != 0) { + dh_err(dev, "Failed to setup async_pf_eq\n"); + goto cleanup_async_eq; + } + + return 0; + +cleanup_async_eq: + cleanup_async_eq(dev, &table_priv->async_risc_eq, "riscv"); +err_setup_async_eq: + destroy_async_eq(dev); +err_create_async_eq: + dh_irqs_release_vectors(&table_priv->async_pf_irq, 1); +err_irq_request: + dh_irqs_release_vectors(&table_priv->async_risc_irq, 1); + return err; +} \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_mpf/en_mpf_eq.h b/drivers/net/ethernet/dinghai/en_mpf/en_mpf_eq.h new file mode 100644 index 000000000000..b700c4b2f42f --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_mpf/en_mpf_eq.h @@ -0,0 +1,19 @@ +#ifndef __EN_MPF_EQ_H__ +#define __EN_MPF_EQ_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +int32_t dh_mpf_eq_table_init(struct dh_core_dev *dev); + +int32_t dh_mpf_eq_table_create(struct dh_core_dev *dev); +void dh_mpf_eq_table_destroy(struct dh_core_dev *dev); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_mpf/en_mpf_events.c b/drivers/net/ethernet/dinghai/en_mpf/en_mpf_events.c new file mode 100644 index 000000000000..0ba6d0e88fb9 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_mpf/en_mpf_events.c @@ -0,0 +1,144 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include "en_mpf_events.h" +#include "../en_mpf.h" + +static int32_t riscv_notifier(struct notifier_block *nb, unsigned long type, + void *data); +static int32_t pf_notifier(struct notifier_block *nb, unsigned long type, + void *data); + +static struct dh_nb mpf_events[] = { + { .nb.notifier_call = riscv_notifier, + .event_type = DH_EVENT_TYPE_NOTIFY_RISC_TO_MPF }, + { .nb.notifier_call = pf_notifier, + .event_type = DH_EVENT_TYPE_NOTIFY_PF_TO_MPF } +}; + +static int32_t riscv_notifier(struct notifier_block *nb, unsigned long type, + void *data) +{ + struct dh_event_nb *event_nb = dh_nb_cof(nb, struct dh_event_nb, nb); + struct dh_core_dev *dh_dev = (struct dh_core_dev *)event_nb->ctx; + struct dh_en_mpf_dev *mpf_dev = dh_core_priv(dh_dev); + + zxdh_events_work_enqueue(dh_dev, &mpf_dev->dh_np_sdk_from_risc); + + return NOTIFY_OK; +} + +static int32_t pf_notifier(struct notifier_block *nb, unsigned long type, + void *data) +{ + struct dh_event_nb *event_nb = dh_nb_cof(nb, struct dh_event_nb, nb); + struct dh_core_dev *dh_dev = (struct dh_core_dev *)event_nb->ctx; + struct dh_en_mpf_dev *mpf_dev = dh_core_priv(dh_dev); + + zxdh_events_work_enqueue(dh_dev, &mpf_dev->dh_np_sdk_from_pf); + + return NOTIFY_OK; +} + +void np_sdk_handler_from_risc(struct work_struct *p_work) +{ + struct dh_en_mpf_dev *mpf_dev = + container_of(p_work, struct dh_en_mpf_dev, dh_np_sdk_from_risc); + + LOG_INFO("is called\n"); + zxdh_bar_irq_recv(MSG_CHAN_END_RISC, MSG_CHAN_END_MPF, + mpf_dev->pci_ioremap_addr + ZXDH_BAR1_CHAN_OFFSET, + NULL); + return; +} + +void np_sdk_handler_from_pf(struct work_struct *p_work) +{ + struct dh_en_mpf_dev *mpf_dev = + container_of(p_work, struct dh_en_mpf_dev, dh_np_sdk_from_pf); + + LOG_INFO("is called\n"); + zxdh_bar_irq_recv(MSG_CHAN_END_PF, MSG_CHAN_END_MPF, + mpf_dev->pci_ioremap_addr + ZXDH_BAR2_CHAN_OFFSET, + NULL); + return; +} + +void zxdh_events_start(struct dh_core_dev *dev) +{ + struct dh_events *events = dev->events; + int32_t i; + int32_t err; + + for (i = 0; i < ARRAY_SIZE(mpf_events); i++) { + events->notifiers[i].nb = mpf_events[i]; + events->notifiers[i].ctx = dev; + err = dh_eq_notifier_register(&dev->eq_table, + &events->notifiers[i].nb); + if (err != 0) { + LOG_ERR("i: %d, err: %d.\n", i, err); + } + } +} + +int32_t dh_mpf_events_init(struct dh_core_dev *dev) +{ + struct dh_events *events = NULL; + struct dh_en_mpf_dev *mpf_dev = dh_core_priv(dev); + int32_t ret = 0; + + events = kzalloc((sizeof(*events) + + ARRAY_SIZE(mpf_events) * sizeof(struct dh_event_nb)), + GFP_KERNEL); + if (unlikely(events == NULL)) { + LOG_ERR("events kzalloc failed: %p\n", events); + ret = -ENOMEM; + goto err_events_kzalloc; + } + + events->evt_num = ARRAY_SIZE(mpf_events); + events->dev = dev; + dev->events = events; + events->wq = create_singlethread_workqueue("dh_mpf_events"); + if (!events->wq) { + LOG_ERR("events->wq create_singlethread_workqueue failed: %p\n", + events->wq); + ret = -ENOMEM; + goto err_create_wq; + } + + INIT_WORK(&mpf_dev->dh_np_sdk_from_risc, np_sdk_handler_from_risc); + INIT_WORK(&mpf_dev->dh_np_sdk_from_pf, np_sdk_handler_from_pf); + + zxdh_events_start(dev); + + return 0; + +err_create_wq: + kfree(events); +err_events_kzalloc: + return ret; +} + +void dh_events_stop(struct dh_core_dev *dev) +{ + struct dh_events *events = dev->events; + int32_t i = 0; + + for (i = ARRAY_SIZE(mpf_events) - 1; i >= 0; i--) { + dh_eq_notifier_unregister(&dev->eq_table, + &events->notifiers[i].nb); + } + + zxdh_events_cleanup(dev); +} + +void dh_mpf_events_uninit(struct dh_core_dev *dev) +{ + return dh_events_stop(dev); +} \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_mpf/en_mpf_events.h b/drivers/net/ethernet/dinghai/en_mpf/en_mpf_events.h new file mode 100644 index 000000000000..6643df4a96ae --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_mpf/en_mpf_events.h @@ -0,0 +1,18 @@ +#ifndef __EN_MPF_EVENTS_H__ +#define __EN_MPF_EVENTS_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +int32_t dh_mpf_events_init(struct dh_core_dev *dev); +void dh_mpf_events_uninit(struct dh_core_dev *dev); +void zxdh_events_start(struct dh_core_dev *dev); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_mpf/en_mpf_irq.c b/drivers/net/ethernet/dinghai/en_mpf/en_mpf_irq.c new file mode 100644 index 000000000000..d7847d852754 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_mpf/en_mpf_irq.c @@ -0,0 +1,165 @@ +#include +#include +#include +#include "en_mpf_irq.h" + +#define ZXDH_MPF_ASYNC_IRQ_MIN_COMP 0 +#define ZXDH_MPF_ASYNC_IRQ_MAX_COMP 1 + +#define ZXDH_MPF_COMP_IRQ_MIN_COMP 0 +#define ZXDH_MPF_COMP_IRQ_MAX_COMP 1 + +#ifndef CONFIG_DINGHAI_ZF_MPF +#define ZXDH_MPF_ASYNC_IRQ_NUM 2 +#else +#define ZXDH_MPF_ASYNC_IRQ_NUM 6 +#endif + +struct dh_mpf_irq_table { + struct dh_irq_pool *mpf_comp_pool; + struct dh_irq_pool *mpf_async_pool; +}; + +struct dh_irq_range { + int32_t start; + int32_t size; +}; + +static struct dh_irq_range zxdh_get_mpf_range(struct dh_core_dev *dev) +{ + struct dh_irq_range tmp = { .start = 0, + .size = ZXDH_MPF_ASYNC_IRQ_NUM }; + + return tmp; +} +static struct dh_irq_range zxdh_get_comp_mpf_range(struct dh_core_dev *dev) +{ + struct dh_irq_range tmp = { .start = ZXDH_MPF_ASYNC_IRQ_NUM + 1, + .size = ZXDH_MPF_ASYNC_IRQ_NUM + 1 }; + + return tmp; +} + +static int32_t irq_pools_init(struct dh_core_dev *dev) +{ + struct dh_irq_table *table = &dev->irq_table; + int32_t err = 0; + struct dh_irq_range irq_range; + struct dh_mpf_irq_table *mpf_irq_table = table->priv; + + /* init mpf_pool */ + irq_range = zxdh_get_mpf_range(dev); + + mpf_irq_table->mpf_async_pool = irq_pool_alloc( + dev, irq_range.start, irq_range.size, "zxdh_mpf_msg", + ZXDH_MPF_ASYNC_IRQ_MIN_COMP, ZXDH_MPF_ASYNC_IRQ_MAX_COMP); + if (IS_ERR_OR_NULL(mpf_irq_table->mpf_async_pool)) { + return PTR_ERR(mpf_irq_table->mpf_async_pool); + } + + /* init sf_comp_pool */ + irq_range = zxdh_get_comp_mpf_range(dev); + + mpf_irq_table->mpf_comp_pool = irq_pool_alloc( + dev, irq_range.start, irq_range.size, "zxdh_mpf_comp", + ZXDH_MPF_COMP_IRQ_MIN_COMP, ZXDH_MPF_COMP_IRQ_MAX_COMP); + if (IS_ERR_OR_NULL(mpf_irq_table->mpf_comp_pool)) { + err = PTR_ERR(mpf_irq_table->mpf_comp_pool); + goto err_mpf_comp; + } + + mpf_irq_table->mpf_comp_pool->irqs_per_cpu = + kcalloc(nr_cpu_ids, sizeof(u16), GFP_KERNEL); + if (unlikely(mpf_irq_table->mpf_comp_pool->irqs_per_cpu == NULL)) { + err = -ENOMEM; + goto err_irqs_per_cpu; + } + + return 0; + +err_irqs_per_cpu: + irq_pool_free(mpf_irq_table->mpf_comp_pool); +err_mpf_comp: + irq_pool_free(mpf_irq_table->mpf_async_pool); + return err; +} + +static void irq_pools_destroy(struct dh_irq_table *table) +{ + struct dh_mpf_irq_table *mpf_irq_table = + (struct dh_mpf_irq_table *)table->priv; + + irq_pool_free(mpf_irq_table->mpf_comp_pool); + irq_pool_free(mpf_irq_table->mpf_async_pool); +} + +/*todo*/ +static int32_t zxdh_get_total_vec(struct dh_core_dev *dev) +{ + return ZXDH_MPF_ASYNC_IRQ_NUM; +} + +int32_t dh_mpf_irq_table_create(struct dh_core_dev *dev) +{ + int32_t total_vec = 0; + int32_t err = 0; + + total_vec = zxdh_get_total_vec(dev); + + total_vec = pci_alloc_irq_vectors(dev->pdev, total_vec, total_vec, + PCI_IRQ_MSIX); + if (total_vec < 0) { + dh_err(dev, "pci_alloc_irq_vectors failed: %d\n", total_vec); + return total_vec; + } + + err = irq_pools_init(dev); + if (err != 0) { + pci_free_irq_vectors(dev->pdev); + } + + return err; +} + +void dh_mpf_irq_table_destroy(struct dh_core_dev *dev) +{ + struct dh_irq_table *table = &dev->irq_table; + + /* There are cases where IRQs still will be in used when we reaching + * to here. Hence, making sure all the irqs are released. + */ + irq_pools_destroy(table); + pci_free_irq_vectors(dev->pdev); +} + +struct dh_irq *dh_mpf_async_irq_request(struct dh_core_dev *dev) +{ + struct dh_irq_table *table = &dev->irq_table; + struct dh_mpf_irq_table *mpf_irq_table = + (struct dh_mpf_irq_table *)table->priv; + + struct dh_irq *irq = + zxdh_get_irq_of_pool(dev, mpf_irq_table->mpf_async_pool); + if (IS_ERR_OR_NULL(irq)) + dh_err(dev, "irq=0x%llx\r\n", (unsigned long long)irq); + dh_dbg(dev, "end\r\n"); + return irq; +} + +/* irq_table API */ +int32_t dh_mpf_irq_table_init(struct dh_core_dev *dev) +{ + struct dh_irq_table *irq_table; + struct dh_mpf_irq_table *mpf_irq_table = NULL; + + irq_table = &dev->irq_table; + + mpf_irq_table = kvzalloc(sizeof(*mpf_irq_table), GFP_KERNEL); + if (unlikely(mpf_irq_table == NULL)) { + return -ENOMEM; + } + + irq_table->priv = mpf_irq_table; + + return 0; +} diff --git a/drivers/net/ethernet/dinghai/en_mpf/en_mpf_irq.h b/drivers/net/ethernet/dinghai/en_mpf/en_mpf_irq.h new file mode 100644 index 000000000000..2e9d0259bf03 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_mpf/en_mpf_irq.h @@ -0,0 +1,20 @@ +#ifndef __EN_MPF_IRQ_H__ +#define __EN_MPF_IRQ_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +struct dh_irq *dh_mpf_async_irq_request(struct dh_core_dev *dev); +void dh_mpf_irq_table_destroy(struct dh_core_dev *dev); +int32_t dh_mpf_irq_table_create(struct dh_core_dev *dev); +int32_t dh_mpf_irq_table_init(struct dh_core_dev *dev); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_mpf/tc.c b/drivers/net/ethernet/dinghai/en_mpf/tc.c new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/drivers/net/ethernet/dinghai/en_np/.clang-format b/drivers/net/ethernet/dinghai/en_np/.clang-format new file mode 100644 index 000000000000..b2d29a76f951 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/.clang-format @@ -0,0 +1,176 @@ +--- +# 语言: None, Cpp, Java, JavaScript, ObjC, Proto, TableGen, TextProto +Language: Cpp +# BasedOnStyle: LLVM +# 访问说明符(public、private等)的偏移 +AccessModifierOffset: -4 +# 开括号(开圆括号、开尖括号、开方括号)后的对齐: Align, DontAlign, AlwaysBreak(总是在开括号后换行) +AlignAfterOpenBracket: Align +# 连续赋值时,对齐所有等号 +AlignConsecutiveAssignments: true +# 连续声明时,对齐所有声明的变量名 +AlignConsecutiveDeclarations: false +# 左对齐逃脱换行(使用反斜杠换行)的反斜杠 +AlignEscapedNewlinesLeft: true +# 水平对齐二元和三元表达式的操作数 +AlignOperands: true +# 对齐连续的尾随的注释 +AlignTrailingComments: true +# 允许函数声明的所有参数在放在下一行 +AllowAllParametersOfDeclarationOnNextLine: false +# 允许短的块放在同一行 +AllowShortBlocksOnASingleLine: false +# 允许短的case标签放在同一行 +AllowShortCaseLabelsOnASingleLine: false +# 允许短的函数放在同一行: None, InlineOnly(定义在类中), Empty(空函数), Inline(定义在类中,空函数), All +AllowShortFunctionsOnASingleLine: Empty +# 允许短的if语句保持在同一行 +AllowShortIfStatementsOnASingleLine: false +# 允许短的循环保持在同一行 +AllowShortLoopsOnASingleLine: false +# 总是在定义返回类型后换行(deprecated) +AlwaysBreakAfterDefinitionReturnType: None +# 总是在返回类型后换行: None, All, TopLevel(顶级函数,不包括在类中的函数), +# AllDefinitions(所有的定义,不包括声明), TopLevelDefinitions(所有的顶级函数的定义) +AlwaysBreakAfterReturnType: None +# 总是在多行string字面量前换行 +AlwaysBreakBeforeMultilineStrings: false +# 总是在template声明后换行 +AlwaysBreakTemplateDeclarations: true +# false表示函数实参要么都在同一行,要么都各自一行 +BinPackArguments: true +# false表示所有形参要么都在同一行,要么都各自一行 +BinPackParameters: true +# 大括号换行,只有当BreakBeforeBraces设置为Custom时才有效 +BraceWrapping: + # class定义后面 + AfterClass: false + # 控制语句后面 + AfterControlStatement: false + # enum定义后面 + AfterEnum: false + # 函数定义后面 + AfterFunction: false + # 命名空间定义后面 + AfterNamespace: false + # ObjC定义后面 + AfterObjCDeclaration: false + # struct定义后面 + AfterStruct: false + # union定义后面 + AfterUnion: false + # catch之前 + BeforeCatch: true + # else之前 + BeforeElse: true + # 缩进大括号 + IndentBraces: false +# 在二元运算符前换行: None(在操作符后换行), NonAssignment(在非赋值的操作符前换行), All(在操作符前换行) +BreakBeforeBinaryOperators: NonAssignment +# 在大括号前换行: Attach(始终将大括号附加到周围的上下文), Linux(除函数、命名空间和类定义,与Attach类似), +# Mozilla(除枚举、函数、记录定义,与Attach类似), Stroustrup(除函数定义、catch、else,与Attach类似), +# Allman(总是在大括号前换行), GNU(总是在大括号前换行,并对于控制语句的大括号增加额外的缩进), WebKit(在函数前换行), Custom +# 注:这里认为语句块也属于函数 +BreakBeforeBraces: Allman +# 在三元运算符前换行 +BreakBeforeTernaryOperators: true +# 在构造函数的初始化列表的逗号前换行 +BreakConstructorInitializersBeforeComma: false +# 每行字符的限制,0表示没有限制 +ColumnLimit: 120 +# 描述具有特殊意义的注释的正则表达式,它不应该被分割为多行或以其它方式改变 +CommentPragmas: '^ IWYU pragma:' +# 构造函数的初始化列表要么都在同一行,要么都各自一行 +ConstructorInitializerAllOnOneLineOrOnePerLine: false +# 构造函数的初始化列表的缩进宽度 +ConstructorInitializerIndentWidth: 4 +# 延续的行的缩进宽度 +ContinuationIndentWidth: 4 +# 去除C++11的列表初始化的大括号{后和}前的空格 +Cpp11BracedListStyle: true +# 继承最常用的指针和引用的对齐方式 +DerivePointerAlignment: false +# 关闭格式化 +DisableFormat: false +# 自动检测函数的调用和定义是否被格式为每行一个参数(Experimental) +ExperimentalAutoDetectBinPacking: false +# 宏对齐 使用VSCode所带的clang-format 在linux下也可以支持这个属性:) +AlignConsecutiveMacros: true +# 需要被解读为foreach循环而不是函数调用的宏 +ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ] +# 对#include进行排序,匹配了某正则表达式的#include拥有对应的优先级,匹配不到的则默认优先级为INT_MAX(优先级越小排序越靠前), +# 可以定义负数优先级从而保证某些#include永远在最前面 +IncludeCategories: + - Regex: '^"(llvm|llvm-c|clang|clang-c)/' + Priority: 2 + - Regex: '^(<|"(gtest|isl|json)/)' + Priority: 3 + - Regex: '.*' + Priority: 1 +# 缩进case标签 +IndentCaseLabels: true +# 缩进宽度 +IndentWidth: 4 +# 函数返回类型换行时,缩进函数声明或函数定义的函数名 +IndentWrappedFunctionNames: false +# 保留在块开始处的空行 +KeepEmptyLinesAtTheStartOfBlocks: true +# 开始一个块的宏的正则表达式 +MacroBlockBegin: '' +# 结束一个块的宏的正则表达式 +MacroBlockEnd: '' +# 连续空行的最大数量 +MaxEmptyLinesToKeep: 1 +# 命名空间的缩进: None, Inner(缩进嵌套的命名空间中的内容), All +NamespaceIndentation: Inner +# 使用ObjC块时缩进宽度 +ObjCBlockIndentWidth: 4 +# 在ObjC的@property后添加一个空格 +ObjCSpaceAfterProperty: false +# 在ObjC的protocol列表前添加一个空格 +ObjCSpaceBeforeProtocolList: true +# 在call(后对函数调用换行的penalty +PenaltyBreakBeforeFirstCallParameter: 19 +# 在一个注释中引入换行的penalty +PenaltyBreakComment: 300 +# 第一次在<<前换行的penalty +PenaltyBreakFirstLessLess: 120 +# 在一个字符串字面量中引入换行的penalty +PenaltyBreakString: 1000 +# 对于每个在行字符数限制之外的字符的penalty +PenaltyExcessCharacter: 1000000 +# 将函数的返回类型放到它自己的行的penalty +PenaltyReturnTypeOnItsOwnLine: 60 +# 指针和引用的对齐: Left, Right, Middle +PointerAlignment: Right +# 允许重新排版注释 +ReflowComments: true +# 允许排序#include +SortIncludes: false +# 在C风格类型转换后添加空格 +SpaceAfterCStyleCast: false +# 在赋值运算符之前添加空格 +SpaceBeforeAssignmentOperators: true +# 开圆括号之前添加一个空格: Never, ControlStatements, Always +SpaceBeforeParens: ControlStatements +# 在空的圆括号中添加空格 +SpaceInEmptyParentheses: false +# 在尾随的评论前添加的空格数(只适用于//) +SpacesBeforeTrailingComments: 2 +# 在尖括号的<后和>前添加空格 +SpacesInAngles: false +# 在容器(ObjC和JavaScript的数组和字典等)字面量中添加空格 +SpacesInContainerLiterals: false +# 在C风格类型转换的括号中添加空格 +SpacesInCStyleCastParentheses: false +# 在圆括号的(后和)前添加空格 +SpacesInParentheses: false +# 在方括号的[后和]前添加空格,lamda表达式和未指明大小的数组的声明不受影响 +SpacesInSquareBrackets: false +# 标准: Cpp03, Cpp11, Auto +Standard: Cpp03 +# tab宽度 +TabWidth: 4 +# 使用tab字符: Never, ForIndentation, ForContinuationAndIndentation, Always +UseTab: Never +... diff --git a/drivers/net/ethernet/dinghai/en_np/Makefile b/drivers/net/ethernet/dinghai/en_np/Makefile new file mode 100644 index 000000000000..08c650ab45ff --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/Makefile @@ -0,0 +1,24 @@ +subdirs := en_np/agent/ +subdirs += en_np/cmd/ +subdirs += en_np/comm/ +subdirs += en_np/driver/ +subdirs += en_np/init/ +subdirs += en_np/netlink/ +subdirs += en_np/sdk/ +subdirs += en_np/table/ +subdirs += en_np/qos/ +subdirs += en_np/fc/ +subdirs += en_np/flow/ + +#dinghai_root := $(CWD)/drivers/net/ethernet/dinghai +dinghai_root := $(src) +include $(dinghai_root)/en_np/Makefile.include + +src_files := +include $(foreach subdir, $(subdirs), $(dinghai_root)/$(subdir)Kbuild.include) +obj_files := $(src_files:.c=.o) + +obj-$(CONFIG_DINGHAI_NP) += dinghai10e_np.o +dinghai10e_np-y := $(obj_files) +dinghai10e_np-y += log.o +# dinghai10e_np-$(CONFIG_DINGHAI_DH_CMD) += dh_cmd.o cmd/msg_chan_netlink.o cmd/msg_chan_lock.o diff --git a/drivers/net/ethernet/dinghai/en_np/Makefile.include b/drivers/net/ethernet/dinghai/en_np/Makefile.include new file mode 100644 index 000000000000..432a25360ea5 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/Makefile.include @@ -0,0 +1,60 @@ +abs_include := -I$(dinghai_root)/en_np/agent/include +abs_include += -I$(dinghai_root)/en_np/cmd/include +abs_include += -I$(dinghai_root)/en_np/comm/include +abs_include += -I$(dinghai_root)/en_np/driver/include +abs_include += -I$(dinghai_root)/en_np/init/include +abs_include += -I$(dinghai_root)/en_np/netlink/include +abs_include += -I$(dinghai_root)/en_np/sdk/include/api +abs_include += -I$(dinghai_root)/en_np/sdk/include/dev/chip +abs_include += -I$(dinghai_root)/en_np/sdk/include/dev/init +abs_include += -I$(dinghai_root)/en_np/sdk/include/dev/module/cfg +abs_include += -I$(dinghai_root)/en_np/sdk/include/dev/module/dbg +abs_include += -I$(dinghai_root)/en_np/sdk/include/dev/module/ddos +abs_include += -I$(dinghai_root)/en_np/sdk/include/dev/module/dma +abs_include += -I$(dinghai_root)/en_np/sdk/include/dev/module/nppu +abs_include += -I$(dinghai_root)/en_np/sdk/include/dev/module/oam +abs_include += -I$(dinghai_root)/en_np/sdk/include/dev/module/ppu +abs_include += -I$(dinghai_root)/en_np/sdk/include/dev/module/se +abs_include += -I$(dinghai_root)/en_np/sdk/include/dev/module/table/sdt +abs_include += -I$(dinghai_root)/en_np/sdk/include/dev/module/table/se +abs_include += -I$(dinghai_root)/en_np/sdk/include/dev/module/tm +abs_include += -I$(dinghai_root)/en_np/sdk/include/dev/reg +abs_include += -I$(dinghai_root)/en_np/sdk/include/diag +abs_include += -I$(dinghai_root)/en_np/table/include +abs_include += -I$(dinghai_root)/en_np/qos/include +abs_include += -I$(dinghai_root)/en_np/fc/include +abs_include += -I$(dinghai_root)/en_np/flow/api/include +abs_include += -I$(dinghai_root)/en_np/flow/common/include + +abs_include += -I$(dinghai_root)/../../../../include/linux/dinghai +# ZF_MPF delete -msse -msse2 +# ccflags-y += $(abs_include) -DMACRO_CPU64 -DDPP_FOR_PCIE -DDPP_FLOW_HW_INIT +ccflags-y += $(abs_include) -DMACRO_CPU64 -DDPP_FOR_PCIE + +# ifeq (${ARCH},arm64) +# ccflags-y += -DDPP_FOR_AARCH64 +# endif + +dpp-rm-files := *.ko +dpp-rm-files += *.mod.c +dpp-rm-files += *.symvers +dpp-rm-files += *.order +dpp-rm-files += *.o +dpp-rm-files += .*.cmd +dpp-rm-files += .tmp_versions +dpp-rm-files += *.mod +export dpp-rm-files + +clean := -f $(DPP_KO_MAKEFILE_DIR)/Makefile.clean obj + +# echo command. +# Short version is used, if $(quiet) equals `quiet_', otherwise full one. +echo-cmd = $(if $($(quiet)cmd_$(1)), echo ' $($(quiet)cmd_$(1))';) + +# sink stdout for 'make -s' + redirect := + quiet_redirect := +silent_redirect := exec >/dev/null; + +# printing commands +cmd = @set -e; $(echo-cmd) $($(quiet)redirect) $(cmd_$(1)) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/agent/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/agent/Kbuild.include new file mode 100644 index 000000000000..44387bfbd69e --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/agent/Kbuild.include @@ -0,0 +1,4 @@ +cur_dir := en_np/agent/ +subdirs := source/ +src_files += +include $(foreach subdir, $(subdirs), $(dinghai_root)/$(cur_dir)$(subdir)/Kbuild.include) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/agent/include/dpp_agent_channel.h b/drivers/net/ethernet/dinghai/en_np/agent/include/dpp_agent_channel.h new file mode 100644 index 000000000000..58bc753d4056 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/agent/include/dpp_agent_channel.h @@ -0,0 +1,464 @@ +#ifndef DPP_AGENT_CHANNEL_H +#define DPP_AGENT_CHANNEL_H + +#include "zxic_common.h" +#include "dpp_dev.h" +#include "dpp_type_api.h" +#include "dpp_stat_api.h" +#include "dpp_stat_car.h" +#include "dpp_ppu.h" +#include "dpp_agent_se_res.h" + +#define BUFFER_LEN_MAX (256) +#define REG_REPS_LEN (8) +#define CHANNEL_REPS_LEN (4) +#define NP_AGENT_ID (16) +#define MSG_REP_OFFSET (4) +#define MSG_REP_VALID (0Xff) +#define MSG_REP_LEN_OFFSET (1) +#define SCHE_RSP_LEN (2) +#define SCHE_REQ_VALID (0Xffff) +#define PROFILEID_REQ_VALID (0Xffff) +#define EPID_LEVEL (4) +#define BAR_MSG_RETRY_MAX_TIME (10) + +#pragma pack(1) + +typedef enum dpp_tm_level { + Q_LEVEL = 0, + P_LEVEL, + S_LEVEL, + MAX_LEVEL +} TM_LEVEL_E; +typedef enum dpp_agent_msg_type { + DPP_REG_MSG = 0, + DPP_DTB_MSG, + DPP_TM_MSG, + DPP_PLCR_MSG, + DPP_PKTRX_IND_REG_RW_MSG, + DPP_PCIE_BAR_MSG, + DPP_RESET_MSG, + DPP_PXE_MSG, + DPP_TM_FLOW_SHAPE, + DPP_TM_TD, + DPP_TM_SE_SHAPE, + DPP_TM_PP_SHAPE, + DPP_PLCR_CAR_RATE, + DPP_PLCR_CAR_PKT_RATE, + DPP_PPU_THASH_RSK, + DPP_ACL_MSG, + DPP_STAT_MSG, + DPP_RES_MSG, + DPP_PSN_CFG_MSG, + DPP_MSG_MAX +} MSG_TYPE_E; + +typedef enum dpp_agent_msg_oper { + DPP_WR = 0, + DPP_RD, + DPP_WR_RD_MAX +} MSG_OPER_E; +typedef enum dpp_agent_msg_res { + RES_STD_NIC_MSG = 0, + RES_OFFLOAD_MSG, + RES_MAX_MSG +} MSG_RES_TYPE_E; + +typedef enum dpp_msg_dtb_oper { + QUEUE_REQUEST = 0, + QUEUE_RELEASE = 1, + QUEUE_SYNC_CFG = 2, +} MSG_DTB_OPER_E; + +typedef enum dpp_msg_tm_oper { + SEID_REQUEST = 0, + SEID_RELEASE = 1, + SEID_QUERY = 2 +} MSG_TM_OPER_E; + +typedef enum dpp_msg_plcr_oper { + PROFILEID_REQUEST = 0, + PROFILEID_RELEASE = 1, +} MSG_PLCR_OPER_E; +typedef enum dpp_cosq_sche_type { + FQ_SCHE = 0, + FQ2_SCHE = 1, + FQ4_SCHE = 2, + FQ8_SCHE = 3, + SP_SCHE = 4, + WFQ_SCHE = 5, + WFQ2_SCHE = 6, + WFQ4_SCHE = 7, + WFQ8_SCHE = 8, + FLOW_SCHE = 9, + SCHE_TYPE = 10 +} DPP_COSQ_SCHE_TYPE; + +typedef enum dpp_agent_msg_csflag { + DPP_CS_ADDR_FLAG = 0, + DPP_CS_REGNO_FLAG, + DPP_CS_FLAG_MAX +} MSG_CSFLAG_E; + +typedef enum dpp_ppu_thash_rsk_oper { + DPP_PPU_THASH_RSK_RD = 0, + DPP_PPU_THASH_RSK_WR, + DPP_PPU_THASH_RSK_MAX +} DPP_PPU_THASH_RSK_OPER_E; + +typedef enum dpp_pktrx_ind_reg_rw_oper { + DPP_PKTRX_IND_REG_RD = 0, + DPP_PKTRX_IND_REG_WR, + DPP_PKTRX_IND_REG_MAX +} DPP_PKTRX_IND_REG_RW_OPER_E; + +typedef enum dpp_msg_acl_index_oper { + ACL_INDEX_REQUEST = 0, /*申请一个index*/ + ACL_INDEX_RELEASE = 1, /*释放指定index*/ + ACL_INDEX_VPORT_REL = 2, /*释放vport下的所有index*/ + ACL_INDEX_ALL_REL = 3, /*释放所有的index*/ + ACL_INDEX_STAT_CLR = 4, /*释放指定vport下的index对应的统计项*/ + ACL_INDEX_MAX +} MSG_ACL_INDEX_OPER_E; +typedef enum dpp_se_res_oper { + HASH_FUNC_BULK_REQ = 0, + HASH_TBL_REQ = 1, + ERAM_TBL_REQ = 2, + ACL_TBL_REQ = 3, + LPM_TBL_REQ = 4, + DDR_TBL_REQ = 5, + STAT_CFG_REQ = 6, + RES_REQ_MAX +} MSG_SE_RES_OPER_E; + +typedef enum dpp_agent_pcie_bar { + BAR_MSG_NUM_REQ = 0, + PCIE_BAR_MAX +} MSG_PCIE_BAR_E; + +typedef enum dpp_psn_cfg_oper { + PSN_CFG_L2D_WR = 0, /*PSN配置到L2D*/ + PSN_CFG_L2D_RD = 1, /*从L2D中获取PSN*/ + PSN_CFG_OPR_MAX +} MSG_PSN_CFG_OPER_E; + +typedef struct dpp_agent_channel_reg_msg { + ZXIC_UINT8 devId; + ZXIC_UINT8 type; + ZXIC_UINT8 subtype; + ZXIC_UINT8 oper; + ZXIC_UINT32 reg_no; + ZXIC_UINT32 addr; + ZXIC_UINT32 val_len; + ZXIC_UINT32 val[32]; +} DPP_AGENT_CHANNEL_REG_MSG_T; + +typedef struct dpp_agent_channel_dtb_msg { + ZXIC_UINT8 devId; + ZXIC_UINT8 type; + ZXIC_UINT8 oper; + ZXIC_UINT8 rsv; + ZXIC_UINT8 name[32]; + ZXIC_UINT32 vport; + ZXIC_UINT32 queue_id; +} DPP_AGENT_CHANNEL_DTB_MSG_T; +typedef struct dpp_agent_channel_tm_msg { + ZXIC_UINT8 devId; + ZXIC_UINT8 type; + ZXIC_UINT8 oper; + ZXIC_UINT8 num; + ZXIC_UINT32 port; + ZXIC_UINT32 vport; + ZXIC_UINT32 sche_level; + ZXIC_UINT32 sche_type; + ZXIC_UINT32 se_id; +} DPP_AGENT_CHANNEL_TM_MSG_T; +typedef struct dpp_agent_channel_plcr_msg { + ZXIC_UINT8 devId; + ZXIC_UINT8 type; + ZXIC_UINT8 oper; + ZXIC_UINT8 rsv; + ZXIC_UINT32 vport; + ZXIC_UINT32 car_type; + ZXIC_UINT32 profile_id; +} DPP_AGENT_CHANNEL_PLCR_MSG_T; + +typedef struct dpp_agent_tm_flow_shape_msg { + ZXIC_UINT8 devId; + ZXIC_UINT8 type; + ZXIC_UINT8 rsv; + ZXIC_UINT8 rsv1; + ZXIC_UINT32 flow_id; + ZXIC_UINT32 cir; + ZXIC_UINT32 cbs; + ZXIC_UINT32 db_en; + ZXIC_UINT32 eir; + ZXIC_UINT32 ebs; +} DPP_AGENT_TM_FLOW_SHAPE_MSG_T; + +typedef struct dpp_agent_tm_td_msg { + ZXIC_UINT8 devId; + ZXIC_UINT8 type; + ZXIC_UINT8 rsv; + ZXIC_UINT8 rsv1; + ZXIC_UINT32 level; + ZXIC_UINT32 id; + ZXIC_UINT32 td_th; +} DPP_AGENT_TM_TD_MSG_T; + +typedef struct dpp_agent_tm_se_shape_msg { + ZXIC_UINT8 devId; + ZXIC_UINT8 type; + ZXIC_UINT8 rsv; + ZXIC_UINT8 rsv1; + ZXIC_UINT32 se_id; + ZXIC_UINT32 pir; + ZXIC_UINT32 pbs; + ZXIC_UINT32 db_en; + ZXIC_UINT32 cir; + ZXIC_UINT32 cbs; +} DPP_AGENT_TM_SE_SHAPE_MSG_T; + +typedef struct dpp_agent_tm_pp_shape_msg { + ZXIC_UINT8 devId; + ZXIC_UINT8 type; + ZXIC_UINT8 rsv; + ZXIC_UINT8 rsv1; + ZXIC_UINT32 pp_port; + ZXIC_UINT32 cir; + ZXIC_UINT32 cbs; + ZXIC_UINT32 c_en; +} DPP_AGENT_TM_PP_SHAPE_MSG_T; + +typedef struct dpp_agent_car_pkt_profile_msg { + ZXIC_UINT8 devId; + ZXIC_UINT8 type; + ZXIC_UINT8 rsv; + ZXIC_UINT8 rsv1; + ZXIC_UINT32 car_level; + ZXIC_UINT32 profile_id; + ZXIC_UINT32 pkt_sign; + ZXIC_UINT32 cir; + ZXIC_UINT32 cbs; + ZXIC_UINT32 pri[DPP_CAR_PRI_MAX]; +} DPP_AGENT_CAR_PKT_PROFILE_MSG_T; + +typedef struct dpp_agent_car_profile_msg { + ZXIC_UINT8 devId; + ZXIC_UINT8 type; + ZXIC_UINT8 rsv; + ZXIC_UINT8 rsv1; + ZXIC_UINT32 car_level; + ZXIC_UINT32 profile_id; + ZXIC_UINT32 pkt_sign; + ZXIC_UINT32 cd; + ZXIC_UINT32 cf; + ZXIC_UINT32 cm; + ZXIC_UINT32 cir; + ZXIC_UINT32 cbs; + ZXIC_UINT32 eir; + ZXIC_UINT32 ebs; + ZXIC_UINT32 random_disc_e; + ZXIC_UINT32 random_disc_c; + ZXIC_UINT32 c_pri[DPP_CAR_PRI_MAX]; + ZXIC_UINT32 e_green_pri[DPP_CAR_PRI_MAX]; + ZXIC_UINT32 e_yellow_pri[DPP_CAR_PRI_MAX]; +} DPP_AGENT_CAR_PROFILE_MSG_T; + +typedef struct dpp_agent_ppu_thash_rsk_msg { + ZXIC_UINT8 devId; + ZXIC_UINT8 type; + ZXIC_UINT8 oper; + ZXIC_UINT8 rsv; + ZXIC_UINT32 rsk_319_288; + ZXIC_UINT32 rsk_287_256; + ZXIC_UINT32 rsk_255_224; + ZXIC_UINT32 rsk_223_192; + ZXIC_UINT32 rsk_191_160; + ZXIC_UINT32 rsk_159_128; + ZXIC_UINT32 rsk_127_096; + ZXIC_UINT32 rsk_095_064; + ZXIC_UINT32 rsk_063_032; + ZXIC_UINT32 rsk_031_000; +} DPP_AGENT_PPU_THASH_RSK_MSG_T; + +typedef struct dpp_agent_pktrx_ind_reg_rw_msg { + ZXIC_UINT8 devId; + ZXIC_UINT8 type; + ZXIC_UINT8 oper; + ZXIC_UINT8 rsv; + ZXIC_UINT32 mem_addr; + ZXIC_UINT32 mem_id; + ZXIC_UINT32 len; + ZXIC_UINT32 ind_data[8]; +} DPP_AGENT_PKTRX_IND_REG_RW_MSG_T; + +typedef struct dpp_agent_channel_acl_msg { + ZXIC_UINT8 devId; + ZXIC_UINT8 type; + ZXIC_UINT8 oper; + ZXIC_UINT8 rsv; + ZXIC_UINT32 sdt_no; + ZXIC_UINT32 vport; + ZXIC_UINT32 index; + ZXIC_UINT32 counter_id; + ZXIC_UINT32 rd_mode; +} DPP_AGENT_CHANNEL_ACL_MSG_T; + +typedef struct dpp_agent_channel_stat_msg { + ZXIC_UINT8 devId; + ZXIC_UINT8 type; + ZXIC_UINT8 oper; + ZXIC_UINT8 rsv; + ZXIC_UINT32 counter_id; + ZXIC_UINT32 rd_mode; + ZXIC_UINT32 num; +} DPP_AGENT_CHANNEL_STAT_MSG_T; + +typedef struct dpp_agent_se_res_msg { + ZXIC_UINT8 devId; + ZXIC_UINT8 type; + ZXIC_UINT8 sub_type; + ZXIC_UINT8 oper; +} DPP_AGENT_SE_RES_MSG_T; + +typedef struct dpp_agent_channel_pcie_bar_msg { + ZXIC_UINT8 devId; + ZXIC_UINT8 type; + ZXIC_UINT8 oper; + ZXIC_UINT8 rsv; +} DPP_AGENT_PCIE_BAR_MSG_T; + +typedef struct dpp_agent_channel_psn_cfg_msg { + ZXIC_UINT8 devId; + ZXIC_UINT8 type; + ZXIC_UINT8 oper; + ZXIC_UINT8 psn; +} DPP_AGENT_PSN_CFG_MSG_T; + +typedef struct dpp_agent_channel_msg { + ZXIC_UINT32 msg_len; + ZXIC_VOID *msg; +} DPP_AGENT_CHANNEL_MSG_T; + +#pragma pack() + +DPP_STATUS dpp_agent_channel_init(ZXIC_VOID); +DPP_STATUS dpp_agent_channel_exit(ZXIC_VOID); +DPP_STATUS dpp_agent_channel_sync_send(DPP_DEV_T *dev, + DPP_AGENT_CHANNEL_MSG_T *pMsg, + ZXIC_UINT32 *pData, ZXIC_UINT32 rep_len); +DPP_STATUS dpp_agent_channel_reg_sync_send(DPP_DEV_T *dev, + DPP_AGENT_CHANNEL_REG_MSG_T *pMsg, + ZXIC_UINT32 *pData, + ZXIC_UINT32 rep_len); +DPP_STATUS dpp_agent_channel_reg_write(DPP_DEV_T *dev, ZXIC_UINT32 reg_type, + ZXIC_UINT32 reg_no, + ZXIC_UINT32 reg_width, ZXIC_UINT32 addr, + ZXIC_UINT32 *pData); +DPP_STATUS dpp_agent_channel_reg_read(DPP_DEV_T *dev, ZXIC_UINT32 reg_type, + ZXIC_UINT32 reg_no, ZXIC_UINT32 reg_width, + ZXIC_UINT32 addr, ZXIC_UINT32 *pData); +DPP_STATUS dpp_agent_channel_dtb_sync_send(DPP_DEV_T *dev, + DPP_AGENT_CHANNEL_DTB_MSG_T *pMsg, + ZXIC_UINT32 *pData, + ZXIC_UINT32 rep_len); +DPP_STATUS dpp_agent_channel_dtb_queue_request(DPP_DEV_T *dev, + ZXIC_CONST ZXIC_UINT8 *p_name, + ZXIC_UINT32 vport_info, + ZXIC_UINT32 *p_queue_id); +DPP_STATUS dpp_agent_channel_dtb_queue_release(DPP_DEV_T *dev, + ZXIC_CONST ZXIC_UINT8 *p_name, + ZXIC_UINT32 queue_id); +DPP_STATUS dpp_agent_channel_dtb_queue_sync_cfg(DPP_DEV_T *dev, + ZXIC_CONST ZXIC_UINT8 *p_name, + ZXIC_UINT32 vport_info, + ZXIC_UINT32 queue_id); + +DPP_STATUS dpp_agent_channel_tm_sync_send(DPP_DEV_T *dev, + DPP_AGENT_CHANNEL_TM_MSG_T *pMsg, + ZXIC_UINT32 *pData, + ZXIC_UINT32 rep_len); +DPP_STATUS dpp_agent_channel_tm_seid_request(DPP_DEV_T *dev, ZXIC_UINT32 port, + ZXIC_UINT32 vport, + ZXIC_UINT32 sche_level, + ZXIC_UINT32 sche_type, + ZXIC_UINT32 num, + ZXIC_UINT32 *p_se_id); +DPP_STATUS dpp_agent_channel_tm_seid_release(DPP_DEV_T *dev, ZXIC_UINT32 port, + ZXIC_UINT32 vport, + ZXIC_UINT32 sche_level, + ZXIC_UINT32 sche_type, + ZXIC_UINT32 num, + ZXIC_UINT32 se_id); + +DPP_STATUS dpp_agent_channel_tm_base_node_get(DPP_DEV_T *dev, ZXIC_UINT32 port, + ZXIC_UINT32 vport, + ZXIC_UINT32 *p_se_id); + +DPP_STATUS dpp_agent_channel_plcr_sync_send(DPP_DEV_T *dev, + DPP_AGENT_CHANNEL_PLCR_MSG_T *pMsg, + ZXIC_UINT32 *pData, + ZXIC_UINT32 rep_len); +DPP_STATUS dpp_agent_channel_plcr_profileid_request(DPP_DEV_T *dev, + ZXIC_UINT32 vport, + ZXIC_UINT32 car_type, + ZXIC_UINT32 *p_profileid); +DPP_STATUS dpp_agent_channel_plcr_profileid_release(DPP_DEV_T *dev, + ZXIC_UINT32 vport, + ZXIC_UINT32 car_type, + ZXIC_UINT32 profileid); + +DPP_STATUS dpp_agent_channel_tm_flow_shape(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 cir, ZXIC_UINT32 cbs, + ZXIC_UINT32 db_en, ZXIC_UINT32 eir, + ZXIC_UINT32 ebs); +DPP_STATUS dpp_agent_channel_tm_td_set(DPP_DEV_T *dev, ZXIC_UINT32 level, + ZXIC_UINT32 id, ZXIC_UINT32 td_th); +DPP_STATUS dpp_agent_channel_tm_se_shape(DPP_DEV_T *dev, ZXIC_UINT32 se_id, + ZXIC_UINT32 pir, ZXIC_UINT32 pbs, + ZXIC_UINT32 db_en, ZXIC_UINT32 cir, + ZXIC_UINT32 cbs); +DPP_STATUS dpp_agent_channel_tm_port_shape(DPP_DEV_T *dev, ZXIC_UINT32 pp_port, + ZXIC_UINT32 cir, ZXIC_UINT32 cbs, + ZXIC_UINT32 c_en); + +DPP_STATUS dpp_agent_channel_plcr_car_rate(DPP_DEV_T *dev, ZXIC_UINT32 car_type, + ZXIC_UINT32 pkt_sign, + ZXIC_UINT32 profile_id, + ZXIC_VOID *p_car_profile_cfg); +DPP_STATUS dpp_agent_channel_ppu_thash_rsk(DPP_DEV_T *dev, + DPP_PPU_THASH_RSK_OPER_E oper, + DPP_PPU_PPU_COP_THASH_RSK_T *p_para); + +DPP_STATUS dpp_agent_channel_acl_index_request(DPP_DEV_T *dev, + ZXIC_UINT32 sdt_no, + ZXIC_UINT32 vport, + ZXIC_UINT32 *p_index); +DPP_STATUS dpp_agent_channel_acl_index_release(DPP_DEV_T *dev, + ZXIC_UINT32 rel_type, + ZXIC_UINT32 sdt_no, + ZXIC_UINT32 vport, + ZXIC_UINT32 index); +DPP_STATUS dpp_agent_channel_acl_stat_clr(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 vport, + ZXIC_UINT32 counter_id, + ZXIC_UINT32 rd_mode); +DPP_STATUS dpp_agent_channel_stat_clr(DPP_DEV_T *dev, ZXIC_UINT32 count_id, + ZXIC_UINT32 rd_mode, ZXIC_UINT32 num); +DPP_STATUS dpp_agent_channel_se_res_get(DPP_DEV_T *dev, ZXIC_UINT32 sub_type, + ZXIC_UINT32 opr, + ZXIC_UINT32 *p_rsp_buff, + ZXIC_UINT32 buff_size); +DPP_STATUS dpp_agent_channel_pcie_bar_request(DPP_DEV_T *dev, + ZXIC_UINT32 *p_bar_msg_num); +DPP_STATUS dpp_agent_channel_pktrx_ind_reg_rw(DPP_DEV_T *dev, + ZXIC_UINT32 mem_addr, + ZXIC_UINT32 mem_id, + ZXIC_UINT32 oper, ZXIC_UINT32 len, + ZXIC_UINT32 *p_data); +DPP_STATUS dpp_agent_channel_psn_cfg_l2d_write(DPP_DEV_T *dev, + ZXIC_UINT8 psn_cfg); +DPP_STATUS dpp_agent_channel_psn_cfg_l2d_read(DPP_DEV_T *dev, + ZXIC_UINT32 *p_psn_cfg); +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/agent/include/dpp_agent_se_res.h b/drivers/net/ethernet/dinghai/en_np/agent/include/dpp_agent_se_res.h new file mode 100644 index 000000000000..7072524e3876 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/agent/include/dpp_agent_se_res.h @@ -0,0 +1,250 @@ +#ifndef DPP_AGENT_SE_RES_H +#define DPP_AGENT_SE_RES_H + +#include "zxic_common.h" +#include "dpp_type_api.h" + +#define HASH_FUNC_MAX_NUM (4) +#define HASH_BULK_MAX_NUM (32) +#define HASH_TABLE_MAX_NUM (38) +#define ERAM_MAX_NUM (60) +#define ETCAM_MAX_NUM (8) +#define DDR_MAX_NUM (50) +#define LPM_MAX_NUM (2) +#define STAT_ITEM_MAX_NUM (256) + +#define ETCAM_BLOCK_NUM (8) +#define SMMU0_LPM_AS_TBL_ID_NUM (8) + +#pragma pack(1) + +typedef struct sdt_tbl_eram_t { + ZXIC_UINT32 table_type; /** <@brief 查找表项类型 */ + ZXIC_UINT32 eram_mode; /** <@brief eRam返回位宽 */ + ZXIC_UINT32 eram_base_addr; /** <@brief eRam表项基地址,128bit为单位 */ + ZXIC_UINT32 eram_table_depth; /** <@brief 表项深度,作为越界检查使用 */ + ZXIC_UINT32 eram_clutch_en; /** <@brief 抓包使能 */ +} SDTTBL_ERAM_T; + +/** DDR3直接表SDT属性*/ +typedef struct sdt_tbl_ddr3_t { + ZXIC_UINT32 table_type; /** <@brief 查找表项类型 */ + ZXIC_UINT32 ddr3_base_addr; /** <@brief ddr 基地址 */ + ZXIC_UINT32 ddr3_share_type; /** <@brief ddr 共享类型 */ + ZXIC_UINT32 ddr3_rw_len; /** <@brief 表项返回/写入位宽 */ + ZXIC_UINT32 ddr3_sdt_num; /** <@brief SDT表号/复制信息ram的表号 */ + ZXIC_UINT32 ddr3_ecc_en; /** <@brief ecc使能 */ + ZXIC_UINT32 ddr3_clutch_en; /** <@brief 抓包使能 */ +} SDTTBL_DDR3_T; + +/** HASH表SDT属性*/ +typedef struct sdt_tbl_hash_t { + ZXIC_UINT32 table_type; /** <@brief 查找表项类型 */ + ZXIC_UINT32 hash_id; /** <@brief 访问hash的引擎 */ + ZXIC_UINT32 hash_table_width; /** <@brief hash 表项存储位宽 */ + ZXIC_UINT32 key_size; /** <@brief hash 键值长度 */ + ZXIC_UINT32 hash_table_id; /** <@brief hash 逻辑表号 */ + ZXIC_UINT32 learn_en; /** <@brief 硬件学习使能 */ + ZXIC_UINT32 keep_alive; /** <@brief 保活标志使能 */ + ZXIC_UINT32 keep_alive_baddr; /** <@brief 保活标志基地址 */ + ZXIC_UINT32 rsp_mode; /** <@brief 表项返回数据位宽 */ + ZXIC_UINT32 hash_clutch_en; /** <@brief 抓包使能 */ +} SDTTBL_HASH_T; + +/** LPM表SDT属性*/ +typedef struct sdt_tbl_lpm_t { + ZXIC_UINT32 table_type; /** <@brief 查找表项类型 */ + ZXIC_UINT32 lpm_v46_id; /** <@brief ipv4/ipv6标志 */ + ZXIC_UINT32 rsp_mode; /** <@brief 表项返回数据位宽 */ + ZXIC_UINT32 lpm_table_depth; /** <@brief 表项深度,越界检查 */ + ZXIC_UINT32 lpm_clutch_en; /** <@brief 抓包使能 */ +} SDTTBL_LPM_T; + +/** eTCAM表SDT属性*/ +typedef struct sdt_tbl_etcam_t { + ZXIC_UINT32 table_type; /** <@brief 查找表项类型 */ + ZXIC_UINT32 etcam_id; /** <@brief etcam通道 */ + ZXIC_UINT32 etcam_key_mode; /** <@brief etcam键值长度 */ + ZXIC_UINT32 etcam_table_id; /** <@brief etcam表项号 */ + ZXIC_UINT32 no_as_rsp_mode; /** <@brief handle模式返回位宽 */ + ZXIC_UINT32 as_en; /** <@brief 级联eram使能 */ + ZXIC_UINT32 as_eram_baddr; /** <@brief 级联eram基地址 */ + ZXIC_UINT32 as_rsp_mode; /** <@brief 级联返回位宽 */ + ZXIC_UINT32 etcam_table_depth; /** <@brief 表项深度,越界检查 */ + ZXIC_UINT32 etcam_clutch_en; /** <@brief 抓包使能 */ +} SDTTBL_ETCAM_T; + +typedef struct hash_func_res_t { + ZXIC_UINT32 func_id; + ZXIC_UINT32 zblk_num; + ZXIC_UINT32 zblk_bitmap; + ZXIC_UINT32 ddr_dis; +} HASH_FUNC_RES_T; + +typedef struct hash_bulk_res_t { + ZXIC_UINT32 func_id; + ZXIC_UINT32 bulk_id; + ZXIC_UINT32 zcell_num; + ZXIC_UINT32 zreg_num; + ZXIC_UINT32 ddr_baddr; + ZXIC_UINT32 ddr_item_num; + ZXIC_UINT32 ddr_width_mode; + ZXIC_UINT32 ddr_crc_sel; + ZXIC_UINT32 ddr_ecc_en; +} HASH_BULK_RES_T; + +typedef struct hash_table_t { + ZXIC_UINT32 sdtNo; + ZXIC_UINT32 sdt_partner; + SDTTBL_HASH_T hashSdt; + ZXIC_UINT32 tbl_flag; +} HASH_TABLE_T; + +typedef struct eram_table_t { + ZXIC_UINT32 sdtNo; + SDTTBL_ERAM_T eRamSdt; + ZXIC_UINT32 opr_mode; + ZXIC_UINT32 rd_mode; +} ERAM_TABLE_T; + +typedef struct ddr_table_t { + ZXIC_UINT32 sdtNo; /** <@brief sdt no 0~255 */ + SDTTBL_DDR3_T eDdrSdt; /** <@brief DDR属性*/ + ZXIC_UINT32 ddr_table_depth; /** <@brief DDR表项深度,单位与读写模式一致*/ +} DDR_TABLE_T; + +typedef struct acl_res_t { + ZXIC_UINT32 + pri_mode; /** <@brief1:显式优先级,2:隐式优先级,以条目下发顺序作为优先级,3:用户指定每个条目在tcam中的存放索引*/ + ZXIC_UINT32 entry_num; /** <@brief 可配置的条目数*/ + ZXIC_UINT32 block_num; /** <@brief 最大8个 */ + ZXIC_UINT32 block_index[ETCAM_BLOCK_NUM]; /** <@brief 0~7 */ +} ACL_RES_T; + +typedef struct acl_table_t { + ZXIC_UINT32 sdtNo; /** <@brief sdt no 0~255 */ + ZXIC_UINT32 + sdt_partner; /** <@brief sdt no 0~255,eram直接表维护acl index信息,不存在,则设置无效值-1(0xffffffff) */ + SDTTBL_ETCAM_T aclSdt; /** <@brief acl属性*/ + ACL_RES_T aclRes; /** <@brief acl资源*/ +} ACL_TABLE_T; + +typedef struct route_as_eram_t { + ZXIC_UINT32 baddr; /**< @brief LPM级联eRam结果基地址*/ + ZXIC_UINT32 + rsp_mode; /**< @brief LPM级联eRam结果位宽模式,取值参照ERAM128_TBL_MODE_E的定义*/ +} ROUTE_AS_ERAM_T; + +/** 前缀匹配路由级联片外DDR结果表属性 */ +typedef struct route_as_ddr_t { + ZXIC_UINT32 + baddr; /**< @brief 分配给级联ddr结果表空间的基地址,以4K*128bit为单位*/ + ZXIC_UINT32 + rsp_len; /**< @brief LPM级联DDR结果位宽模式,取值参照DPP_ROUTE_AS_RSP_LEN_E的定义*/ + ZXIC_UINT32 + ecc_en; /**< @brief 级联结果表DDR空间ECC校验使能标志: 0-不使能,1-使能*/ +} ROUTE_AS_DDR_T; + +typedef struct lpm_res_t { + ZXIC_UINT32 + pri_mode; /** <@brief1:显式优先级,2:隐式优先级,以条目下发顺序作为优先级,3:用户指定每个条目在tcam中的存放索引*/ + ZXIC_UINT32 entry_num; /** <@brief 可配置的条目数*/ + ZXIC_UINT32 block_num; /** <@brief 最大8个 */ + ZXIC_UINT32 block_index[ETCAM_BLOCK_NUM]; /** <@brief 0~7 */ +} LPM_RES_T; + +/* @param lpm_flags 配置信息 +*|0:eRam(第5bit)1:ddr| (第4bit) | (第3bit) | (第2bit) | (第1bit) | (第0bit) | +*| 级联结果表模式 | v6是否非线速模式 | v4是否非线速模式 | 是否v6片外查找 | 是否v4片外查找 | 是否使能级联结果表查找 |*/ +typedef struct route_res_t { + ZXIC_UINT32 lpm_flags; + ZXIC_UINT32 zblk_num; /**< @brief LPM ipv4和ipv6共享的zblock数目*/ + ZXIC_UINT32 zblk_bitmap; /**< @brief LPM ipv4和ipv6共享的bitmap*/ + ZXIC_UINT32 mono_ipv4_zblk_num; /**< @brief ipv4独占zblock数目*/ + ZXIC_UINT32 mono_ipv4_zblk_bitmap; /**< @brief ipv4独占zblock bitmap*/ + ZXIC_UINT32 mono_ipv6_zblk_num; /**< @brief ipv6独占zblock数目*/ + ZXIC_UINT32 mono_ipv6_zblk_bitmap; /**< @brief ipv6独占zblock bitmap*/ + ZXIC_UINT32 + ddr4_item_num; /**< @brief 分配给ipv4前缀查找的ddr存储条目数,以256bit为单位*/ + ZXIC_UINT32 + ddr4_baddr; /**< @brief 分配给ipv4前缀查找的ddr存储空间的基地址,以4K*128bit为单位*/ + ZXIC_UINT32 + ddr4_base_offset; /**< @brief ipv4前缀查找相对于片外ddr存储空间基地址的偏移量,以256bit为单位*/ + ZXIC_UINT32 + ddr4_ecc_en; /**< @brief 固定配为1,分配给ipv4前缀查找的ddr存储空间的ECC校验使能标志*/ + ZXIC_UINT32 + ddr6_item_num; /**< @brief 分配给ipv6前缀查找的ddr存储条目数,以256bit为单位*/ + ZXIC_UINT32 + ddr6_baddr; /**< @brief 分配给ipv6前缀查找的ddr存储空间的基地址,以4K*128bit为单位*/ + ZXIC_UINT32 + ddr6_base_offset; /**< @brief ipv6前缀查找相对于片外ddr存储空间基地址的偏移量,以256bit为单位*/ + ZXIC_UINT32 + ddr6_ecc_en; /**< @brief 固定配为1,分配给ipv4前缀查找的ddr存储空间的ECC校验使能标志*/ +} ROUTE_RES_T; + +typedef struct lpm_table_t { + ZXIC_UINT32 sdtNo; /** <@brief sdt no 0~255 */ + SDTTBL_LPM_T lpmSdt; /** <@brief lpm属性*/ + ROUTE_AS_ERAM_T as_eram_cfg + [SMMU0_LPM_AS_TBL_ID_NUM]; /**< @brief LPM级联eRam结果表空间属性*/ + ROUTE_AS_DDR_T as_ddr_cfg; /**< @brief LPM级联DDR结果表空间属性*/ +} LPM_TABLE_T; + +typedef struct se_hash_func_bulk_t { + ZXIC_UINT32 func_num; + ZXIC_UINT32 bulk_num; + HASH_FUNC_RES_T fun[HASH_FUNC_MAX_NUM]; + HASH_BULK_RES_T bulk[HASH_BULK_MAX_NUM]; +} SE_HASH_FUNC_BULK_T; + +typedef struct se_hash_tbl_t { + ZXIC_UINT32 tbl_num; + HASH_TABLE_T table[HASH_TABLE_MAX_NUM]; +} SE_HASH_TBL_T; + +typedef struct se_eram_tbl_t { + ZXIC_UINT32 tbl_num; + ERAM_TABLE_T eram[ERAM_MAX_NUM]; +} SE_ERAM_TBL_T; + +typedef struct se_acl_tbl_t { + ZXIC_UINT32 tbl_num; + ACL_TABLE_T acl[ETCAM_MAX_NUM]; +} SE_ACL_TBL_T; + +typedef struct se_ddr_tbl_t { + ZXIC_UINT32 tbl_num; + DDR_TABLE_T ddr[DDR_MAX_NUM]; +} SE_DDR_TBL_T; + +typedef struct se_lpm_tbl_t { + ZXIC_UINT32 tbl_num; /*最大个数为2*/ + LPM_TABLE_T lpm_res[LPM_MAX_NUM]; /*ipv4/ipv6资源*/ + ROUTE_RES_T glb_res; /*ipv4/ipv6公共资源*/ +} SE_LPM_TBL_T; + +typedef struct se_stat_cfg_t { + ZXIC_UINT32 eram_baddr; /*片内统计基地址,单位128bit*/ + ZXIC_UINT32 eram_depth; /*片内统计深度,单位128bit*/ + ZXIC_UINT32 ddr_baddr; /*片外统计基地址,单位2k*256bit*/ + ZXIC_UINT32 ppu_ddr_offset; /*片外DDR统计偏移,单位128bit,默认为0*/ +} SE_STAT_CFG_T; + +typedef struct zxdh_np_se_res_t { + SE_HASH_FUNC_BULK_T hash_func_bulk; + SE_HASH_TBL_T hash_tbl; + SE_ERAM_TBL_T eram_tbl; + SE_ACL_TBL_T acl_tbl; + SE_LPM_TBL_T lpm_tbl; + SE_DDR_TBL_T ddr_tbl; + SE_STAT_CFG_T stat_cfg; +} ZXDH_NP_SE_RES_T; + +typedef struct zxdh_np_res { + ZXDH_NP_SE_RES_T std_res; + ZXDH_NP_SE_RES_T offload_res; +} ZXDH_NP_RES; + +#pragma pack() + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/agent/source/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/agent/source/Kbuild.include new file mode 100644 index 000000000000..0de549029e4c --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/agent/source/Kbuild.include @@ -0,0 +1,2 @@ +cur_dir := en_np/agent/source/ +src_files += $(addprefix $(cur_dir),$(notdir $(wildcard $(dinghai_root)/$(cur_dir)*.c))) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/agent/source/dpp_agent_channel.c b/drivers/net/ethernet/dinghai/en_np/agent/source/dpp_agent_channel.c new file mode 100644 index 000000000000..563893b03bec --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/agent/source/dpp_agent_channel.c @@ -0,0 +1,1361 @@ +#include "dpp_agent_channel.h" +#include "dh_cmd.h" +#include "dpp_dev.h" +#include "dpp_pktrx_api.h" + +DPP_STATUS dpp_agent_channel_init(void) +{ + // zxdh_bar_msg_chan_init(); + return DPP_OK; +} + +DPP_STATUS dpp_agent_channel_exit(void) +{ + // zxdh_bar_msg_chan_remove(); + return DPP_OK; +} + +static ZXIC_VOID dpp_agent_msg_prt(ZXIC_UINT8 type, ZXIC_UINT32 rtn) +{ + switch (rtn) { + case DPP_RC_CTRLCH_MSG_LEN_ZERO: { + ZXIC_COMM_TRACE_ERROR("type[%u]:msg len is zero!\n", type); + break; + } + case DPP_RC_CTRLCH_MSG_PRO_ERR: { + ZXIC_COMM_TRACE_ERROR("type[%u]:msg process error!\n", type); + break; + } + case DPP_RC_CTRLCH_MSG_TYPE_NOT_SUPPORT: { + ZXIC_COMM_TRACE_ERROR("type[%u]:fw not support the msg!\n", + type); + break; + } + case DPP_RC_CTRLCH_MSG_OPER_NOT_SUPPORT: { + ZXIC_COMM_TRACE_ERROR( + "type[%u]:fw not support opr of the msg!\n", type); + break; + } + case DPP_RC_CTRLCH_MSG_DROP: { + ZXIC_COMM_TRACE_ERROR("type[%u]:fw not support,drop msg!\n", + type); + break; + } + default: + break; + } + return; +} +static DPP_STATUS dpp_agent_bar_msg_check(DPP_DEV_T *dev, + DPP_AGENT_CHANNEL_MSG_T *pMsg) +{ + ZXIC_UINT8 type = 0; + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(pMsg); + + type = *((ZXIC_UINT8 *)(pMsg->msg) + 1); + if (type != DPP_PCIE_BAR_MSG) { + if (type >= DEV_PCIE_BAR_MSG_NUM(dev)) { + ZXIC_COMM_TRACE_ERROR( + "type[%u] > fw_bar_msg_num[%u]!\n", type, + DEV_PCIE_BAR_MSG_NUM(dev)); + return DPP_RC_CTRLCH_MSG_TYPE_NOT_SUPPORT; + } + } + + return DPP_OK; +} +DPP_STATUS dpp_agent_channel_reg_sync_send(DPP_DEV_T *dev, + DPP_AGENT_CHANNEL_REG_MSG_T *pMsg, + ZXIC_UINT32 *pData, + ZXIC_UINT32 rep_len) +{ + DPP_STATUS ret = DPP_OK; + DPP_AGENT_CHANNEL_MSG_T agentMsg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(pMsg); + + agentMsg.msg = (ZXIC_VOID *)pMsg; + agentMsg.msg_len = sizeof(DPP_AGENT_CHANNEL_REG_MSG_T); + + ret = dpp_agent_channel_sync_send(dev, &agentMsg, pData, rep_len); + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, "dpp_agent_channel_sync_send"); + + ret = *pData; + if (DPP_OK != ret) { + ZXIC_COMM_TRACE_ERROR( + "%s: dpp_agent_channel_sync_send failed in buffer\n", + __FUNCTION__); + return DPP_ERR; + } + + return DPP_OK; +} + +DPP_STATUS dpp_agent_channel_sync_send(DPP_DEV_T *dev, + DPP_AGENT_CHANNEL_MSG_T *pMsg, + ZXIC_UINT32 *pData, ZXIC_UINT32 rep_len) +{ + DPP_STATUS ret = DPP_OK; + ZXIC_UINT8 *reply_ptr = NULL; + ZXIC_UINT8 retry_count = 0; + ZXIC_UINT16 reply_msg_len = 0; + ZXIC_UINT32 *recv_buffer = NULL; + + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(pMsg); + ZXIC_COMM_CHECK_POINT(pData); + + ret = dpp_agent_bar_msg_check(dev, pMsg); + ZXIC_COMM_CHECK_RC(ret, "dpp_agent_bar_msg_check"); + + recv_buffer = + (ZXIC_UINT32 *)ZXIC_COMM_MALLOC(rep_len + CHANNEL_REPS_LEN); + ZXIC_COMM_CHECK_POINT(recv_buffer); + ZXIC_COMM_MEMSET(recv_buffer, 0, rep_len + CHANNEL_REPS_LEN); + + in.virt_addr = DEV_PCIE_MSG_ADDR(dev); + in.payload_addr = pMsg->msg; + in.payload_len = pMsg->msg_len; + in.src = MSG_CHAN_END_PF; + in.dst = MSG_CHAN_END_RISC; + in.event_id = NP_AGENT_ID; + in.src_pcieid = DEV_PCIE_ID(dev); + + result.buffer_len = rep_len + CHANNEL_REPS_LEN; + result.recv_buffer = recv_buffer; + + ZXIC_COMM_TRACE_DEBUG("%s: in.virt_addr 0x%llx.\n", __FUNCTION__, + in.virt_addr); + + do { + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + if (ret == BAR_MSG_ERR_LOCK_FAILED) { + retry_count++; + ZXIC_COMM_TRACE_INFO( + "zxdh_bar_chan_sync_msg_send return %d, retry %d times...\n", + ret, retry_count); + msleep(200); + } else { + break; + } + } while (retry_count < BAR_MSG_RETRY_MAX_TIME); + + if (retry_count >= BAR_MSG_RETRY_MAX_TIME) { + ZXIC_COMM_CHECK_RC_MEMORY_FREE( + ret, "zxdh_bar_chan_sync_msg_send", recv_buffer); + } + + if (ret == BAR_MSG_ERR_BAR_ABNORMAL) { + ret = ZXIC_PAR_CHK_BAR_ABNORMAL; + } + ZXIC_COMM_CHECK_RC_MEMORY_FREE(ret, "zxdh_bar_chan_sync_msg_send", + recv_buffer); + + reply_ptr = (ZXIC_UINT8 *)(result.recv_buffer); + if (MSG_REP_VALID == *reply_ptr) { + reply_msg_len = + *(ZXIC_UINT16 *)(reply_ptr + MSG_REP_LEN_OFFSET); + ZXIC_COMM_MEMCPY_S(pData, rep_len, reply_ptr + MSG_REP_OFFSET, + reply_msg_len); + + ZXIC_COMM_FREE(recv_buffer); + return DPP_OK; + } + + ZXIC_COMM_FREE(recv_buffer); + + ZXIC_COMM_TRACE_ERROR("%s: zxdh_bar_chan_sync_msg_send failed.\n", + __FUNCTION__); + + return DPP_ERR; +} + +DPP_STATUS dpp_agent_channel_reg_write(DPP_DEV_T *dev, ZXIC_UINT32 reg_type, + ZXIC_UINT32 reg_no, + ZXIC_UINT32 reg_width, ZXIC_UINT32 addr, + ZXIC_UINT32 *pData) +{ + DPP_STATUS ret = 0; + ZXIC_UINT32 resp_len = 0; + ZXIC_UINT8 *resp_buffer = NULL; + + DPP_AGENT_CHANNEL_REG_MSG_T msgcfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(pData); + + msgcfg.devId = 0; + msgcfg.type = DPP_REG_MSG; + msgcfg.subtype = reg_type; + msgcfg.oper = DPP_WR; + msgcfg.reg_no = reg_no; + msgcfg.addr = addr; + msgcfg.val_len = reg_width / 4; + memcpy(msgcfg.val, pData, reg_width); + + resp_len = reg_width + 4; + resp_buffer = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC(resp_len); + ZXIC_COMM_CHECK_POINT(resp_buffer); + + memset(resp_buffer, 0, resp_len); + + ret = dpp_agent_channel_reg_sync_send( + dev, &msgcfg, (ZXIC_UINT32 *)resp_buffer, resp_len); + + ZXIC_COMM_CHECK_RC_MEMORY_FREE(ret, "dpp_agent_channel_reg_sync_send", + resp_buffer); + + if (DPP_OK != *((ZXIC_UINT32 *)resp_buffer)) { + ZXIC_COMM_TRACE_ERROR( + "%s: dpp_agent_channel_reg_sync_send failed in buffer\n", + __FUNCTION__); + ZXIC_COMM_FREE(resp_buffer); + return DPP_ERR; + } + + memcpy(pData, resp_buffer + 4, reg_width); + + ZXIC_COMM_FREE(resp_buffer); + + return DPP_OK; +} + +DPP_STATUS dpp_agent_channel_reg_read(DPP_DEV_T *dev, ZXIC_UINT32 reg_type, + ZXIC_UINT32 reg_no, ZXIC_UINT32 reg_width, + ZXIC_UINT32 addr, ZXIC_UINT32 *pData) +{ + DPP_STATUS ret = 0; + ZXIC_UINT32 resp_len = 0; + ZXIC_UINT8 *resp_buffer = NULL; + + DPP_AGENT_CHANNEL_REG_MSG_T msgcfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(pData); + + msgcfg.devId = 0; + msgcfg.type = DPP_REG_MSG; + msgcfg.subtype = reg_type; + msgcfg.oper = DPP_RD; + msgcfg.reg_no = reg_no; + msgcfg.addr = addr; + msgcfg.val_len = reg_width / 4; + + resp_len = reg_width + 4; + resp_buffer = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC(resp_len); + ZXIC_COMM_CHECK_POINT(resp_buffer); + + memset(resp_buffer, 0, resp_len); + + ret = dpp_agent_channel_reg_sync_send( + dev, &msgcfg, (ZXIC_UINT32 *)resp_buffer, resp_len); + + ZXIC_COMM_CHECK_RC_MEMORY_FREE(ret, "dpp_agent_channel_reg_sync_send", + resp_buffer); + + if (DPP_OK != *((ZXIC_UINT32 *)resp_buffer)) { + ZXIC_COMM_TRACE_ERROR( + "%s: dpp_agent_channel_reg_sync_send failed in buffer\n", + __FUNCTION__); + ZXIC_COMM_FREE(resp_buffer); + return DPP_ERR; + } + + memcpy(pData, resp_buffer + 4, reg_width); + + ZXIC_COMM_FREE(resp_buffer); + + return DPP_OK; +} + +DPP_STATUS dpp_agent_channel_dtb_sync_send(DPP_DEV_T *dev, + DPP_AGENT_CHANNEL_DTB_MSG_T *pMsg, + ZXIC_UINT32 *pData, + ZXIC_UINT32 rep_len) +{ + DPP_STATUS ret = DPP_OK; + + DPP_AGENT_CHANNEL_MSG_T agentMsg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(pMsg); + ZXIC_COMM_CHECK_POINT(pData); + + agentMsg.msg = (ZXIC_VOID *)pMsg; + agentMsg.msg_len = sizeof(DPP_AGENT_CHANNEL_DTB_MSG_T); + + ret = dpp_agent_channel_sync_send(dev, &agentMsg, pData, rep_len); + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, "dpp_agent_channel_sync_send"); + + return DPP_OK; +} + +DPP_STATUS dpp_agent_channel_dtb_queue_request(DPP_DEV_T *dev, + ZXIC_CONST ZXIC_UINT8 *p_name, + ZXIC_UINT32 vport_info, + ZXIC_UINT32 *p_queue_id) +{ + DPP_STATUS ret = DPP_OK; + ZXIC_UINT32 rsp_buff[2] = { 0 }; + ZXIC_UINT32 msg_result = 0; + ZXIC_UINT32 queue_id = 0; + + DPP_AGENT_CHANNEL_DTB_MSG_T msgcfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + + msgcfg.devId = DEV_ID(dev); + msgcfg.type = DPP_DTB_MSG; + msgcfg.oper = QUEUE_REQUEST; + ZXIC_COMM_MEMCPY(msgcfg.name, p_name, ZXIC_COMM_STRLEN(p_name)); + msgcfg.vport = vport_info; + + ZXIC_COMM_TRACE_INFO("%s: msgcfg.name = %s.\n", __FUNCTION__, + msgcfg.name); + + ret = dpp_agent_channel_dtb_sync_send(dev, &msgcfg, rsp_buff, + ZXIC_SIZEOF(rsp_buff)); + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, "dpp_agent_channel_dtb_sync_send"); + + msg_result = rsp_buff[0]; + queue_id = rsp_buff[1]; + + ZXIC_COMM_TRACE_INFO("%s: msg_result: %d.\n", __FUNCTION__, msg_result); + ZXIC_COMM_TRACE_INFO("%s: queue_id: %d.\n", __FUNCTION__, queue_id); + + *p_queue_id = queue_id; + + return msg_result; +} + +DPP_STATUS dpp_agent_channel_dtb_queue_release(DPP_DEV_T *dev, + ZXIC_CONST ZXIC_UINT8 *p_name, + ZXIC_UINT32 queue_id) +{ + DPP_STATUS ret = DPP_OK; + ZXIC_UINT32 msg_result = 0; + ZXIC_UINT32 rsp_buff[2] = { 0 }; + + DPP_AGENT_CHANNEL_DTB_MSG_T msgcfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + + msgcfg.devId = DEV_ID(dev); + msgcfg.type = DPP_DTB_MSG; + msgcfg.oper = QUEUE_RELEASE; + msgcfg.queue_id = queue_id; + ZXIC_COMM_MEMCPY(msgcfg.name, p_name, ZXIC_COMM_STRLEN(p_name)); + + ZXIC_COMM_TRACE_INFO("%s: msgcfg.name = %s.\n", __FUNCTION__, + msgcfg.name); + + ret = dpp_agent_channel_dtb_sync_send(dev, &msgcfg, rsp_buff, + ZXIC_SIZEOF(rsp_buff)); + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, "dpp_agent_channel_dtb_sync_send"); + + msg_result = rsp_buff[0]; + ZXIC_COMM_TRACE_INFO("%s: msg_result: %d.\n", __FUNCTION__, msg_result); + + return msg_result; +} + +DPP_STATUS dpp_agent_channel_dtb_queue_sync_cfg(DPP_DEV_T *dev, + ZXIC_CONST ZXIC_UINT8 *p_name, + ZXIC_UINT32 vport_info, + ZXIC_UINT32 queue_id) +{ + DPP_STATUS ret = DPP_OK; + ZXIC_UINT32 rsp_buff[2] = { 0 }; + ZXIC_UINT32 msg_result = 0; + + DPP_AGENT_CHANNEL_DTB_MSG_T msgcfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + + msgcfg.devId = DEV_ID(dev); + msgcfg.type = DPP_DTB_MSG; + msgcfg.oper = QUEUE_SYNC_CFG; + msgcfg.queue_id = queue_id; + ZXIC_COMM_MEMCPY(msgcfg.name, p_name, ZXIC_COMM_STRLEN(p_name)); + msgcfg.vport = vport_info; + + ZXIC_COMM_TRACE_INFO("%s: msgcfg.name = %s.\n", __FUNCTION__, + msgcfg.name); + + ret = dpp_agent_channel_dtb_sync_send(dev, &msgcfg, rsp_buff, + ZXIC_SIZEOF(rsp_buff)); + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, "dpp_agent_channel_dtb_sync_send"); + + msg_result = rsp_buff[0]; + + ZXIC_COMM_TRACE_INFO("%s: msg_result: %d.\n", __FUNCTION__, msg_result); + ZXIC_COMM_TRACE_INFO("%s: queue_id: %d.\n", __FUNCTION__, queue_id); + + return msg_result; +} + +DPP_STATUS dpp_agent_channel_tm_sync_send(DPP_DEV_T *dev, + DPP_AGENT_CHANNEL_TM_MSG_T *pMsg, + ZXIC_UINT32 *pData, + ZXIC_UINT32 rep_len) +{ + DPP_STATUS ret = DPP_OK; + DPP_AGENT_CHANNEL_MSG_T agentMsg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(pMsg); + ZXIC_COMM_CHECK_POINT(pData); + + agentMsg.msg = (ZXIC_VOID *)pMsg; + agentMsg.msg_len = sizeof(DPP_AGENT_CHANNEL_TM_MSG_T); + + ret = dpp_agent_channel_sync_send(dev, &agentMsg, pData, rep_len); + + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, "dpp_agent_channel_sync_send"); + + // ret = *(ZXIC_UINT8 *)pData; + // if (DPP_OK != ret) + // { + // ZXIC_COMM_TRACE_ERROR("%s: dpp_agent_channel_tm_sync_send failed in buffer\n", __FUNCTION__); + // return DPP_ERR; + // } + + return DPP_OK; +} + +DPP_STATUS dpp_agent_channel_tm_seid_request(DPP_DEV_T *dev, ZXIC_UINT32 port, + ZXIC_UINT32 vport, + ZXIC_UINT32 sche_level, + ZXIC_UINT32 sche_type, + ZXIC_UINT32 num, + ZXIC_UINT32 *p_se_id) +{ + DPP_STATUS ret = DPP_OK; + ZXIC_UINT32 resp_buffer[2] = { 0 }; + + DPP_AGENT_CHANNEL_TM_MSG_T msgcfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_se_id); + + msgcfg.devId = 0; + msgcfg.type = DPP_TM_MSG; + msgcfg.oper = SEID_REQUEST; + msgcfg.port = port; + msgcfg.vport = vport; + msgcfg.sche_level = sche_level; + msgcfg.sche_type = sche_type; + msgcfg.num = num; + msgcfg.se_id = SCHE_REQ_VALID; + + if (FLOW_SCHE != sche_type) { + msgcfg.num = 1; + } + + ret = dpp_agent_channel_tm_sync_send(dev, &msgcfg, resp_buffer, + sizeof(resp_buffer)); + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, "dpp_agent_channel_tm_sync_send"); + + memcpy(p_se_id, resp_buffer, sizeof(ZXIC_UINT32) * SCHE_RSP_LEN); + + return ret; +} + +DPP_STATUS dpp_agent_channel_tm_seid_release(DPP_DEV_T *dev, ZXIC_UINT32 port, + ZXIC_UINT32 vport, + ZXIC_UINT32 sche_level, + ZXIC_UINT32 sche_type, + ZXIC_UINT32 num, ZXIC_UINT32 se_id) +{ + DPP_STATUS ret = DPP_OK; + ZXIC_UINT32 resp_buffer[2] = { 0 }; + + DPP_AGENT_CHANNEL_TM_MSG_T msgcfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + + msgcfg.devId = 0; + msgcfg.type = DPP_TM_MSG; + msgcfg.oper = SEID_RELEASE; + msgcfg.port = port; + msgcfg.vport = vport; + msgcfg.sche_level = sche_level; + msgcfg.sche_type = sche_type; + msgcfg.num = num; + msgcfg.se_id = se_id; + + if (FLOW_SCHE != sche_type) { + msgcfg.num = 1; + } + + ret = dpp_agent_channel_tm_sync_send(dev, &msgcfg, resp_buffer, + sizeof(resp_buffer)); + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, "dpp_agent_channel_tm_sync_send"); + + ret = *(ZXIC_UINT8 *)resp_buffer; + + return ret; +} + +DPP_STATUS dpp_agent_channel_tm_base_node_get(DPP_DEV_T *dev, ZXIC_UINT32 port, + ZXIC_UINT32 vport, + ZXIC_UINT32 *p_se_id) +{ + DPP_STATUS ret = DPP_OK; + ZXIC_UINT32 resp_buffer[2] = { 0 }; + + DPP_AGENT_CHANNEL_TM_MSG_T msgcfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_se_id); + + msgcfg.devId = 0; + msgcfg.type = DPP_TM_MSG; + msgcfg.oper = SEID_QUERY; + msgcfg.port = port; + msgcfg.vport = vport; + msgcfg.sche_level = EPID_LEVEL; + msgcfg.sche_type = WFQ_SCHE; + msgcfg.num = 1; + msgcfg.se_id = SCHE_REQ_VALID; + + ret = dpp_agent_channel_tm_sync_send(dev, &msgcfg, resp_buffer, + sizeof(resp_buffer)); + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, "dpp_agent_channel_tm_sync_send"); + + memcpy(p_se_id, resp_buffer, sizeof(ZXIC_UINT32) * SCHE_RSP_LEN); + + return ret; +} + +DPP_STATUS dpp_agent_channel_plcr_sync_send(DPP_DEV_T *dev, + DPP_AGENT_CHANNEL_PLCR_MSG_T *pMsg, + ZXIC_UINT32 *pData, + ZXIC_UINT32 rep_len) +{ + DPP_STATUS ret = DPP_OK; + DPP_AGENT_CHANNEL_MSG_T agentMsg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(pMsg); + + agentMsg.msg = (ZXIC_VOID *)pMsg; + agentMsg.msg_len = sizeof(DPP_AGENT_CHANNEL_PLCR_MSG_T); + + ret = dpp_agent_channel_sync_send(dev, &agentMsg, pData, rep_len); + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, "dpp_agent_channel_sync_send"); + + // ret = *(ZXIC_UINT8*)pData; + // if (DPP_OK != ret) + // { + // ZXIC_COMM_TRACE_ERROR("%s: dpp_agent_channel_sync_send failed in buffer\n", __FUNCTION__); + // return DPP_ERR; + // } + + return DPP_OK; +} + +DPP_STATUS dpp_agent_channel_plcr_profileid_request(DPP_DEV_T *dev, + ZXIC_UINT32 vport, + ZXIC_UINT32 car_type, + ZXIC_UINT32 *p_profileid) +{ + DPP_STATUS ret = DPP_OK; + ZXIC_UINT32 resp_buffer[2] = { 0 }; + + DPP_AGENT_CHANNEL_PLCR_MSG_T msgcfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_profileid); + + msgcfg.devId = 0; + msgcfg.type = DPP_PLCR_MSG; + msgcfg.oper = PROFILEID_REQUEST; + msgcfg.vport = vport; + msgcfg.car_type = car_type; + msgcfg.profile_id = PROFILEID_REQ_VALID; + + ret = dpp_agent_channel_plcr_sync_send(dev, &msgcfg, resp_buffer, + sizeof(resp_buffer)); + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, "dpp_agent_channel_plcr_sync_send"); + + memcpy(p_profileid, resp_buffer, sizeof(ZXIC_UINT32) * SCHE_RSP_LEN); + + return ret; +} + +DPP_STATUS dpp_agent_channel_plcr_profileid_release(DPP_DEV_T *dev, + ZXIC_UINT32 vport, + ZXIC_UINT32 car_type, + ZXIC_UINT32 profileid) +{ + DPP_STATUS ret = DPP_OK; + ZXIC_UINT32 resp_buffer[2] = { 0 }; + + DPP_AGENT_CHANNEL_PLCR_MSG_T msgcfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + + msgcfg.devId = 0; + msgcfg.type = DPP_PLCR_MSG; + msgcfg.oper = PROFILEID_RELEASE; + msgcfg.vport = vport; + msgcfg.car_type = car_type; + msgcfg.profile_id = profileid; + + ret = dpp_agent_channel_plcr_sync_send(dev, &msgcfg, resp_buffer, + sizeof(resp_buffer)); + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, "dpp_agent_channel_plcr_sync_send"); + + ret = *(ZXIC_UINT8 *)resp_buffer; + + return ret; +} + +DPP_STATUS dpp_agent_channel_tm_flow_shape(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 cir, ZXIC_UINT32 cbs, + ZXIC_UINT32 db_en, ZXIC_UINT32 eir, + ZXIC_UINT32 ebs) +{ + DPP_STATUS ret = DPP_OK; + ZXIC_UINT32 resp_buffer[2] = { 0 }; + ZXIC_UINT32 resp_len = 8; + DPP_AGENT_CHANNEL_MSG_T agentMsg = { 0 }; + DPP_AGENT_TM_FLOW_SHAPE_MSG_T msgcfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + + msgcfg.devId = 0; + msgcfg.type = DPP_TM_FLOW_SHAPE; + msgcfg.flow_id = flow_id; + msgcfg.cir = cir; + msgcfg.cbs = cbs; + msgcfg.db_en = db_en; + msgcfg.eir = eir; + msgcfg.ebs = ebs; + + agentMsg.msg = (ZXIC_VOID *)&msgcfg; + agentMsg.msg_len = sizeof(DPP_AGENT_TM_FLOW_SHAPE_MSG_T); + + ret = dpp_agent_channel_sync_send(dev, &agentMsg, resp_buffer, + resp_len); + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, "dpp_agent_channel_sync_send"); + + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, "dpp_agent_channel_tm_sync_send"); + + ret = *(ZXIC_UINT8 *)resp_buffer; + + return ret; +} + +DPP_STATUS dpp_agent_channel_tm_td_set(DPP_DEV_T *dev, ZXIC_UINT32 level, + ZXIC_UINT32 id, ZXIC_UINT32 td_th) +{ + DPP_STATUS ret = DPP_OK; + ZXIC_UINT32 resp_buffer[2] = { 0 }; + ZXIC_UINT32 resp_len = 8; + DPP_AGENT_CHANNEL_MSG_T agentMsg = { 0 }; + DPP_AGENT_TM_TD_MSG_T msgcfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + + msgcfg.devId = 0; + msgcfg.type = DPP_TM_TD; + msgcfg.level = level; + msgcfg.id = id; + msgcfg.td_th = td_th; + + agentMsg.msg = (ZXIC_VOID *)&msgcfg; + agentMsg.msg_len = sizeof(DPP_AGENT_TM_TD_MSG_T); + + ret = dpp_agent_channel_sync_send(dev, &agentMsg, resp_buffer, + resp_len); + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, "dpp_agent_channel_sync_send"); + + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, "dpp_agent_channel_tm_sync_send"); + + ret = *(ZXIC_UINT8 *)resp_buffer; + + return ret; +} + +DPP_STATUS dpp_agent_channel_tm_se_shape(DPP_DEV_T *dev, ZXIC_UINT32 se_id, + ZXIC_UINT32 pir, ZXIC_UINT32 pbs, + ZXIC_UINT32 db_en, ZXIC_UINT32 cir, + ZXIC_UINT32 cbs) +{ + DPP_STATUS ret = DPP_OK; + ZXIC_UINT32 resp_buffer[2] = { 0 }; + ZXIC_UINT32 resp_len = 8; + DPP_AGENT_CHANNEL_MSG_T agentMsg = { 0 }; + DPP_AGENT_TM_SE_SHAPE_MSG_T msgcfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + + msgcfg.devId = 0; + msgcfg.type = DPP_TM_SE_SHAPE; + msgcfg.se_id = se_id; + msgcfg.pir = pir; + msgcfg.pbs = pbs; + msgcfg.db_en = db_en; + msgcfg.cir = cir; + msgcfg.cbs = cbs; + + agentMsg.msg = (ZXIC_VOID *)&msgcfg; + agentMsg.msg_len = sizeof(DPP_AGENT_TM_SE_SHAPE_MSG_T); + + ret = dpp_agent_channel_sync_send(dev, &agentMsg, resp_buffer, + resp_len); + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, "dpp_agent_channel_sync_send"); + + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, "dpp_agent_channel_tm_sync_send"); + + ret = *(ZXIC_UINT8 *)resp_buffer; + + return ret; +} + +DPP_STATUS dpp_agent_channel_tm_port_shape(DPP_DEV_T *dev, ZXIC_UINT32 pp_port, + ZXIC_UINT32 cir, ZXIC_UINT32 cbs, + ZXIC_UINT32 c_en) +{ + DPP_STATUS ret = DPP_OK; + ZXIC_UINT32 resp_buffer[2] = { 0 }; + ZXIC_UINT32 resp_len = 8; + DPP_AGENT_CHANNEL_MSG_T agentMsg = { 0 }; + DPP_AGENT_TM_PP_SHAPE_MSG_T msgcfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + + msgcfg.devId = 0; + msgcfg.type = DPP_TM_PP_SHAPE; + msgcfg.pp_port = pp_port; + msgcfg.cir = cir; + msgcfg.cbs = cbs; + msgcfg.c_en = c_en; + + agentMsg.msg = (ZXIC_VOID *)&msgcfg; + agentMsg.msg_len = sizeof(DPP_AGENT_TM_PP_SHAPE_MSG_T); + + ret = dpp_agent_channel_sync_send(dev, &agentMsg, resp_buffer, + resp_len); + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, "dpp_agent_channel_sync_send"); + + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, "dpp_agent_channel_tm_sync_send"); + + ret = *(ZXIC_UINT8 *)resp_buffer; + + return ret; +} + +DPP_STATUS dpp_agent_channel_plcr_car_rate(DPP_DEV_T *dev, ZXIC_UINT32 car_type, + ZXIC_UINT32 pkt_sign, + ZXIC_UINT32 profile_id, + ZXIC_VOID *p_car_profile_cfg) +{ + DPP_STATUS ret = DPP_OK; + ZXIC_UINT32 resp_buffer[2] = { 0 }; + ZXIC_UINT32 resp_len = 8; + ZXIC_UINT32 i = 0; + DPP_AGENT_CHANNEL_MSG_T agentMsg = { 0 }; + DPP_AGENT_CAR_PKT_PROFILE_MSG_T msgpktcfg = { 0 }; + DPP_AGENT_CAR_PROFILE_MSG_T msgcfg = { 0 }; + DPP_STAT_CAR_PROFILE_CFG_T *p_stat_car_profile_cfg = NULL; + DPP_STAT_CAR_PKT_PROFILE_CFG_T *p_stat_pkt_car_profile_cfg = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + + if ((STAT_CAR_A_TYPE == car_type) && (1 == pkt_sign)) { + p_stat_pkt_car_profile_cfg = + (DPP_STAT_CAR_PKT_PROFILE_CFG_T *)p_car_profile_cfg; + msgpktcfg.devId = 0; + msgpktcfg.type = DPP_PLCR_CAR_PKT_RATE; + msgpktcfg.car_level = car_type; + msgpktcfg.cir = p_stat_pkt_car_profile_cfg->cir; + msgpktcfg.cbs = p_stat_pkt_car_profile_cfg->cbs; + msgpktcfg.profile_id = p_stat_pkt_car_profile_cfg->profile_id; + msgpktcfg.pkt_sign = p_stat_pkt_car_profile_cfg->pkt_sign; + for (i = 0; i < DPP_CAR_PRI_MAX; i++) { + msgpktcfg.pri[i] = p_stat_pkt_car_profile_cfg->pri[i]; + } + + agentMsg.msg = (ZXIC_VOID *)&msgpktcfg; + agentMsg.msg_len = sizeof(DPP_AGENT_CAR_PKT_PROFILE_MSG_T); + + ret = dpp_agent_channel_sync_send(dev, &agentMsg, resp_buffer, + resp_len); + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, + "dpp_agent_channel_sync_send"); + + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, + "dpp_agent_channel_tm_sync_send"); + + ret = *(ZXIC_UINT8 *)resp_buffer; + } else { + p_stat_car_profile_cfg = + (DPP_STAT_CAR_PROFILE_CFG_T *)p_car_profile_cfg; + msgcfg.devId = 0; + msgcfg.type = DPP_PLCR_CAR_RATE; + msgcfg.car_level = car_type; + msgcfg.cir = p_stat_car_profile_cfg->cir; + msgcfg.cbs = p_stat_car_profile_cfg->cbs; + msgcfg.profile_id = p_stat_car_profile_cfg->profile_id; + msgcfg.pkt_sign = p_stat_car_profile_cfg->pkt_sign; + msgcfg.cd = p_stat_car_profile_cfg->cd; + msgcfg.cf = p_stat_car_profile_cfg->cf; + msgcfg.cm = p_stat_car_profile_cfg->cm; + msgcfg.cir = p_stat_car_profile_cfg->cir; + msgcfg.cbs = p_stat_car_profile_cfg->cbs; + msgcfg.eir = p_stat_car_profile_cfg->eir; + msgcfg.ebs = p_stat_car_profile_cfg->ebs; + msgcfg.random_disc_e = p_stat_car_profile_cfg->random_disc_e; + msgcfg.random_disc_c = p_stat_car_profile_cfg->random_disc_c; + for (i = 0; i < DPP_CAR_PRI_MAX; i++) { + msgcfg.c_pri[i] = p_stat_car_profile_cfg->c_pri[i]; + msgcfg.e_green_pri[i] = + p_stat_car_profile_cfg->e_green_pri[i]; + msgcfg.e_yellow_pri[i] = + p_stat_car_profile_cfg->e_yellow_pri[i]; + } + + agentMsg.msg = (ZXIC_VOID *)&msgcfg; + agentMsg.msg_len = sizeof(DPP_AGENT_CAR_PROFILE_MSG_T); + + ret = dpp_agent_channel_sync_send(dev, &agentMsg, resp_buffer, + resp_len); + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, + "dpp_agent_channel_sync_send"); + + //ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, "dpp_agent_channel_tm_sync_send"); + + ret = *(ZXIC_UINT8 *)resp_buffer; + } + + return ret; +} + +DPP_STATUS dpp_agent_channel_ppu_thash_rsk(DPP_DEV_T *dev, + DPP_PPU_THASH_RSK_OPER_E oper, + DPP_PPU_PPU_COP_THASH_RSK_T *p_para) +{ + DPP_STATUS ret = DPP_OK; + ZXIC_UINT32 resp_buffer = 0; + DPP_PPU_PPU_COP_THASH_RSK_T thash = { 0 }; + DPP_AGENT_CHANNEL_MSG_T agentMsg = { 0 }; + DPP_AGENT_PPU_THASH_RSK_MSG_T msgcfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_para); + ZXIC_COMM_CHECK_INDEX(oper, DPP_PPU_THASH_RSK_RD, DPP_PPU_THASH_RSK_WR); + + switch (oper) { + case DPP_PPU_THASH_RSK_RD: + msgcfg.devId = 0; + msgcfg.type = DPP_PPU_THASH_RSK; + msgcfg.oper = oper; + msgcfg.rsv = 0; + msgcfg.rsk_031_000 = 0; + msgcfg.rsk_063_032 = 0; + msgcfg.rsk_095_064 = 0; + msgcfg.rsk_127_096 = 0; + msgcfg.rsk_159_128 = 0; + msgcfg.rsk_191_160 = 0; + msgcfg.rsk_223_192 = 0; + msgcfg.rsk_255_224 = 0; + msgcfg.rsk_287_256 = 0; + msgcfg.rsk_319_288 = 0; + + agentMsg.msg = (ZXIC_VOID *)&msgcfg; + agentMsg.msg_len = sizeof(DPP_AGENT_PPU_THASH_RSK_MSG_T); + + ret = dpp_agent_channel_sync_send( + dev, &agentMsg, (ZXIC_UINT32 *)&thash, + sizeof(DPP_PPU_PPU_COP_THASH_RSK_T)); + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, + "dpp_agent_channel_sync_send"); + + memcpy(p_para, &thash, sizeof(DPP_PPU_PPU_COP_THASH_RSK_T)); + break; + + case DPP_PPU_THASH_RSK_WR: + msgcfg.devId = 0; + msgcfg.type = DPP_PPU_THASH_RSK; + msgcfg.oper = oper; + msgcfg.rsv = 0; + msgcfg.rsk_031_000 = p_para->rsk_031_000; + msgcfg.rsk_063_032 = p_para->rsk_063_032; + msgcfg.rsk_095_064 = p_para->rsk_095_064; + msgcfg.rsk_127_096 = p_para->rsk_127_096; + msgcfg.rsk_159_128 = p_para->rsk_159_128; + msgcfg.rsk_191_160 = p_para->rsk_191_160; + msgcfg.rsk_223_192 = p_para->rsk_223_192; + msgcfg.rsk_255_224 = p_para->rsk_255_224; + msgcfg.rsk_287_256 = p_para->rsk_287_256; + msgcfg.rsk_319_288 = p_para->rsk_319_288; + + agentMsg.msg = (ZXIC_VOID *)&msgcfg; + agentMsg.msg_len = sizeof(DPP_AGENT_PPU_THASH_RSK_MSG_T); + + ret = dpp_agent_channel_sync_send(dev, &agentMsg, &resp_buffer, + sizeof(ZXIC_UINT32)); + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, + "dpp_agent_channel_sync_send"); + + ret = resp_buffer; + break; + + default: + ZXIC_COMM_TRACE_ERROR( + "The message to ppu_thash_rsk is not defined\n"); + ret = DPP_ERR; + break; + } + + return ret; +} + +DPP_STATUS dpp_agent_channel_pktrx_ind_reg_rw(DPP_DEV_T *dev, + ZXIC_UINT32 mem_addr, + ZXIC_UINT32 mem_id, + ZXIC_UINT32 oper, ZXIC_UINT32 len, + ZXIC_UINT32 *p_data) +{ + DPP_STATUS ret = DPP_OK; + ZXIC_UINT32 resp_buffer = 0; + ZXIC_UINT32 oper_len = 0; + ZXIC_UINT32 data[8] = { 0 }; + DPP_AGENT_CHANNEL_MSG_T agentMsg = { 0 }; + DPP_AGENT_PKTRX_IND_REG_RW_MSG_T pktrx_ind_msg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_data); + ZXIC_COMM_CHECK_INDEX(mem_id, 0, (MEM_ID_MUX_NUM - 1)); + ZXIC_COMM_CHECK_INDEX(len, 1, 4 * 8); + ZXIC_COMM_CHECK_INDEX(mem_addr, 0, (1 << 12) - 1); + ZXIC_COMM_CHECK_INDEX(oper, DPP_PKTRX_IND_REG_RD, DPP_PKTRX_IND_REG_WR); + + ZXIC_COMM_MEMSET_S(&pktrx_ind_msg, + sizeof(DPP_AGENT_PKTRX_IND_REG_RW_MSG_T), 0, + sizeof(DPP_AGENT_PKTRX_IND_REG_RW_MSG_T)); + + oper_len = (len % 4 != 0) ? (len / 4 + 1) : (len / 4); + + pktrx_ind_msg.devId = 0; + pktrx_ind_msg.type = DPP_PKTRX_IND_REG_RW_MSG; + pktrx_ind_msg.oper = oper; + pktrx_ind_msg.rsv = 0; + pktrx_ind_msg.mem_addr = mem_addr; + pktrx_ind_msg.mem_id = mem_id; + pktrx_ind_msg.len = len; + + agentMsg.msg = (ZXIC_VOID *)&pktrx_ind_msg; + agentMsg.msg_len = sizeof(DPP_AGENT_PKTRX_IND_REG_RW_MSG_T); + + switch (oper) { + case DPP_PKTRX_IND_REG_RD: + ret = dpp_agent_channel_sync_send(dev, &agentMsg, data, 32); + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, + "dpp_agent_channel_sync_send"); + + ZXIC_COMM_MEMCPY_S(p_data, oper_len * 4, data, oper_len * 4); + break; + + case DPP_PKTRX_IND_REG_WR: + ZXIC_COMM_MEMCPY_S(pktrx_ind_msg.ind_data, 32, p_data, + oper_len * 4); + ret = dpp_agent_channel_sync_send(dev, &agentMsg, &resp_buffer, + sizeof(ZXIC_UINT32)); + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, + "dpp_agent_channel_sync_send"); + + ret = resp_buffer; + break; + + default: + ZXIC_COMM_TRACE_ERROR( + "The message to ppu_thash_rsk is not defined\n"); + ret = DPP_ERR; + break; + } + + return ret; +} + +/***********************************************************/ +/** 通过代理通道申请acl index +* @param dev 设备 +* @param sdt_no sdt号 +* @param vport 端口号 +* @param p_index 出参,申请到的acl index +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/14 +************************************************************/ +DPP_STATUS dpp_agent_channel_acl_index_request(DPP_DEV_T *dev, + ZXIC_UINT32 sdt_no, + ZXIC_UINT32 vport, + ZXIC_UINT32 *p_index) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 rsp_buff[2] = { 0 }; + ZXIC_UINT32 msg_result = 0; + ZXIC_UINT32 acl_index = 0; + DPP_AGENT_CHANNEL_ACL_MSG_T msgcfg = { 0 }; + DPP_AGENT_CHANNEL_MSG_T agentMsg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_index); + + msgcfg.devId = 0; //在ricv上使用,只有1个np + msgcfg.type = DPP_ACL_MSG; + msgcfg.oper = ACL_INDEX_REQUEST; + msgcfg.vport = vport; + msgcfg.sdt_no = sdt_no; + + agentMsg.msg = (ZXIC_VOID *)&msgcfg; + agentMsg.msg_len = sizeof(DPP_AGENT_CHANNEL_ACL_MSG_T); + rc = dpp_agent_channel_sync_send(dev, &agentMsg, rsp_buff, + ZXIC_SIZEOF(rsp_buff)); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_agent_channel_sync_send"); + + msg_result = rsp_buff[0]; + acl_index = rsp_buff[1]; + + ZXIC_COMM_TRACE_INFO("dev_id: %d, msg_result: %d\n", dev_id, + msg_result); + ZXIC_COMM_TRACE_INFO("dev_id: %d, acl_index: %d\n", dev_id, acl_index); + + *p_index = acl_index; + + return msg_result; +} + +/***********************************************************/ +/** 通过代理通道释放acl index +* @param dev 设备 +* @param rel_type 释放类型,详见MSG_ACL_INDEX_OPER_E +* @param sdt_no sdt号 +* @param vport 端口号 +* @param p_index 指定释放的index +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/14 +************************************************************/ +DPP_STATUS dpp_agent_channel_acl_index_release(DPP_DEV_T *dev, + ZXIC_UINT32 rel_type, + ZXIC_UINT32 sdt_no, + ZXIC_UINT32 vport, + ZXIC_UINT32 index) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 msg_result = 0; + ZXIC_UINT32 rsp_buff[2] = { 0 }; + DPP_AGENT_CHANNEL_ACL_MSG_T msgcfg = { 0 }; + DPP_AGENT_CHANNEL_MSG_T agentMsg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + + msgcfg.devId = 0; //在ricv上使用,只有1个np + msgcfg.type = DPP_ACL_MSG; + msgcfg.oper = rel_type; + msgcfg.index = index; + msgcfg.sdt_no = sdt_no; + msgcfg.vport = vport; + + agentMsg.msg = (ZXIC_VOID *)&msgcfg; + agentMsg.msg_len = sizeof(DPP_AGENT_CHANNEL_ACL_MSG_T); + rc = dpp_agent_channel_sync_send(dev, &agentMsg, rsp_buff, + ZXIC_SIZEOF(rsp_buff)); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_agent_channel_sync_send"); + + msg_result = rsp_buff[0]; + ZXIC_COMM_TRACE_INFO("msg_result: %d\n", msg_result); + + return msg_result; +} + +/***********************************************************/ +/** 发送消息给riscv读清stat +* @param dev 设备 +* @param count_id 统计编号,对应微码中的address +* @param rd_mode 读取位宽模式,参见STAT_CNT_MODE_E,0-64bit,1-128bit +* @param num 连续读清的统计个数 +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/14 +************************************************************/ +DPP_STATUS dpp_agent_channel_stat_clr(DPP_DEV_T *dev, ZXIC_UINT32 count_id, + ZXIC_UINT32 rd_mode, ZXIC_UINT32 num) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 msg_result = 0; + ZXIC_UINT32 rsp_buff[2] = { 0 }; + DPP_AGENT_CHANNEL_STAT_MSG_T msgcfg = { 0 }; + DPP_AGENT_CHANNEL_MSG_T agentMsg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + + msgcfg.devId = 0; //在ricv上使用,只有1个np + msgcfg.type = DPP_STAT_MSG; + msgcfg.oper = 0; + msgcfg.counter_id = count_id; + msgcfg.rd_mode = rd_mode; + msgcfg.num = num; + + agentMsg.msg = (ZXIC_VOID *)&msgcfg; + agentMsg.msg_len = sizeof(DPP_AGENT_CHANNEL_STAT_MSG_T); + rc = dpp_agent_channel_sync_send(dev, &agentMsg, rsp_buff, + ZXIC_SIZEOF(rsp_buff)); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_agent_channel_sync_send"); + + msg_result = rsp_buff[0]; + ZXIC_COMM_TRACE_INFO("msg_result: %d\n", msg_result); + + return msg_result; +} + +/***********************************************************/ +/** 读清acl表项对应的stat表项 +* @param dev 设备,支持多芯片 +* @param sdt_no sdt号 +* @param vport 端口号 +* @param counter_id 统计编号,对应微码中的address +* @param rd_mode 读取位宽模式,参见STAT_CNT_MODE_E,0-64bit,1-128bit +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/14 +************************************************************/ +DPP_STATUS dpp_agent_channel_acl_stat_clr(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 vport, + ZXIC_UINT32 counter_id, + ZXIC_UINT32 rd_mode) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + + ZXIC_UINT32 msg_result = 0; + ZXIC_UINT32 rsp_buff[2] = { 0 }; + DPP_AGENT_CHANNEL_ACL_MSG_T msgcfg = { 0 }; + DPP_AGENT_CHANNEL_MSG_T agentMsg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + + msgcfg.devId = 0; //在ricv上使用,只有1个np + msgcfg.type = DPP_ACL_MSG; + msgcfg.oper = ACL_INDEX_STAT_CLR; + msgcfg.sdt_no = sdt_no; + msgcfg.vport = vport; + msgcfg.counter_id = counter_id; + msgcfg.rd_mode = rd_mode; + + agentMsg.msg = (ZXIC_VOID *)&msgcfg; + agentMsg.msg_len = sizeof(DPP_AGENT_CHANNEL_ACL_MSG_T); + rc = dpp_agent_channel_sync_send(dev, &agentMsg, rsp_buff, + ZXIC_SIZEOF(rsp_buff)); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_agent_channel_sync_send"); + + msg_result = rsp_buff[0]; + ZXIC_COMM_TRACE_INFO("msg_result: %d\n", msg_result); + + return msg_result; +} +/***********************************************************/ +/** 从固件获取流表资源 +* @param dev 设备,支持多芯片 +* @param sub_type 标卡资源或者非标卡资源MSG_RES_TYPE_E +* @param opr 请求资源类型MSG_SE_RES_OPER_E +* @param p_rsp_buff 出参,rsp[0]+se_res +* @param buff_size 资源缓存大小 +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/14 +************************************************************/ +DPP_STATUS dpp_agent_channel_se_res_get(DPP_DEV_T *dev, ZXIC_UINT32 sub_type, + ZXIC_UINT32 opr, + ZXIC_UINT32 *p_rsp_buff, + ZXIC_UINT32 buff_size) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 msg_result = 0; + DPP_AGENT_SE_RES_MSG_T msgcfg = { 0 }; + DPP_AGENT_CHANNEL_MSG_T agentMsg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(opr, 0, RES_REQ_MAX - 1); + ZXIC_COMM_CHECK_POINT_NO_ASSERT(p_rsp_buff); + + msgcfg.devId = 0; //在ricv上使用,只有1个np + msgcfg.type = DPP_RES_MSG; + msgcfg.sub_type = sub_type; + msgcfg.oper = opr; + agentMsg.msg = (ZXIC_VOID *)&msgcfg; + agentMsg.msg_len = sizeof(DPP_AGENT_SE_RES_MSG_T); + + rc = dpp_agent_channel_sync_send(dev, &agentMsg, p_rsp_buff, buff_size); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_agent_channel_sync_send"); + + msg_result = p_rsp_buff[0]; + ZXIC_COMM_TRACE_INFO("msg_result: %d\n", msg_result); + dpp_agent_msg_prt(msgcfg.type, msg_result); + + return msg_result; +} + +/***********************************************************/ +/** 通过代理通道获取bar消息个数 +* @param dev 设备 +* @param p_pcie_bar_num 出参,bar消息个数 +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/16 +************************************************************/ +DPP_STATUS dpp_agent_channel_pcie_bar_request(DPP_DEV_T *dev, + ZXIC_UINT32 *p_bar_msg_num) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 rsp_buff[2] = { 0 }; + ZXIC_UINT32 msg_result = 0; + ZXIC_UINT32 bar_msg_num = 0; + DPP_AGENT_PCIE_BAR_MSG_T msgcfg = { 0 }; + DPP_AGENT_CHANNEL_MSG_T agentMsg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_bar_msg_num); + + msgcfg.devId = 0; //在ricv上使用,只有1个np + msgcfg.type = DPP_PCIE_BAR_MSG; + msgcfg.oper = BAR_MSG_NUM_REQ; + agentMsg.msg = (ZXIC_VOID *)&msgcfg; + agentMsg.msg_len = sizeof(DPP_AGENT_PCIE_BAR_MSG_T); + + rc = dpp_agent_channel_sync_send(dev, &agentMsg, rsp_buff, + ZXIC_SIZEOF(rsp_buff)); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_agent_channel_dtb_sync_send"); + + msg_result = rsp_buff[0]; + bar_msg_num = rsp_buff[1]; + ZXIC_COMM_TRACE_INFO("dev_id: %d, msg_result: %d\n", dev_id, + msg_result); + ZXIC_COMM_TRACE_INFO("dev_id: %d, bar_num: %d\n", dev_id, bar_msg_num); + dpp_agent_msg_prt(msgcfg.type, msg_result); + + *p_bar_msg_num = bar_msg_num; + + return msg_result; +} + +/***********************************************************/ +/** 通过代理通道把双平面配置写入L2D +* @param dev 设备 +* @param psn_cfg 双平面配置 +* @return +* @remark 无 +* @see +* @author cbb @date 2024/11/16 +************************************************************/ +DPP_STATUS dpp_agent_channel_psn_cfg_l2d_write(DPP_DEV_T *dev, + ZXIC_UINT8 psn_cfg) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 msg_result = 0; + DPP_AGENT_PSN_CFG_MSG_T msgcfg = { 0 }; + DPP_AGENT_CHANNEL_MSG_T agentMsg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + + msgcfg.devId = 0; //在ricv上使用,只有1个np + msgcfg.type = DPP_PSN_CFG_MSG; + msgcfg.oper = PSN_CFG_L2D_WR; + msgcfg.psn = psn_cfg; + agentMsg.msg = (ZXIC_VOID *)&msgcfg; + agentMsg.msg_len = sizeof(DPP_AGENT_PSN_CFG_MSG_T); + + rc = dpp_agent_channel_sync_send(dev, &agentMsg, &msg_result, + ZXIC_SIZEOF(msg_result)); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_agent_channel_sync_send"); + + ZXIC_COMM_TRACE_INFO("dev_id: %d, msg_result: %d\n", dev_id, + msg_result); + dpp_agent_msg_prt(msgcfg.type, msg_result); + + return msg_result; +} + +/***********************************************************/ +/** 通过代理通道从L2D中读出双平面配置 +* @param dev 设备 +* @param psn_cfg 双平面配置 +* @return +* @remark 无 +* @see +* @author cbb @date 2024/11/16 +************************************************************/ +DPP_STATUS dpp_agent_channel_psn_cfg_l2d_read(DPP_DEV_T *dev, + ZXIC_UINT32 *p_psn_cfg) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 rsp_buff[2] = { 0 }; + ZXIC_UINT32 msg_result = 0; + DPP_AGENT_PSN_CFG_MSG_T msgcfg = { 0 }; + DPP_AGENT_CHANNEL_MSG_T agentMsg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_psn_cfg); + + msgcfg.devId = 0; //在ricv上使用,只有1个np + msgcfg.type = DPP_PSN_CFG_MSG; + msgcfg.oper = PSN_CFG_L2D_RD; + agentMsg.msg = (ZXIC_VOID *)&msgcfg; + agentMsg.msg_len = sizeof(DPP_AGENT_PSN_CFG_MSG_T); + + rc = dpp_agent_channel_sync_send(dev, &agentMsg, rsp_buff, + ZXIC_SIZEOF(rsp_buff)); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_agent_channel_sync_send"); + + msg_result = rsp_buff[0]; + ZXIC_COMM_TRACE_NOTICE("dev_id: %d, msg_result: %d\n", dev_id, + msg_result); + + dpp_agent_msg_prt(msgcfg.type, msg_result); + + *p_psn_cfg = rsp_buff[1]; + + ZXIC_COMM_TRACE_NOTICE("dev_id: %d, psn_cfg: %d\n", dev_id, + rsp_buff[1]); + + return msg_result; +} \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/cmd/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/cmd/Kbuild.include new file mode 100644 index 000000000000..4de9993cc4e9 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/cmd/Kbuild.include @@ -0,0 +1,4 @@ +cur_dir := en_np/cmd/ +subdirs := source/ +src_files += +include $(foreach subdir, $(subdirs), $(dinghai_root)/$(cur_dir)$(subdir)/Kbuild.include) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/cmd/include/dpp_cmd_init.h b/drivers/net/ethernet/dinghai/en_np/cmd/include/dpp_cmd_init.h new file mode 100644 index 000000000000..5b9fe3cfaf64 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/cmd/include/dpp_cmd_init.h @@ -0,0 +1,16 @@ +//generate function cmdlist from symbol file + +#ifndef DPP_CMD_INIT_H +#define DPP_CMD_INIT_H + +#include "zxic_common.h" + +#define MSG_ID_MSG_DPP_CMD_SHELL ((ZXIC_UINT32)(100)) +typedef struct { + ZXIC_UINT32 msgId; + ZXIC_UINT8 command[256]; +} T_MSG_CMD_SHELL; + +ZXIC_UINT32 dpp_cmd_init(ZXIC_VOID); + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/cmd/include/dpp_cmd_shell.h b/drivers/net/ethernet/dinghai/en_np/cmd/include/dpp_cmd_shell.h new file mode 100644 index 000000000000..13ed94732f79 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/cmd/include/dpp_cmd_shell.h @@ -0,0 +1,395 @@ +//generate function cmdlist from symbol file + +#ifndef DPP_CMD_SHELL_H +#define DPP_CMD_SHELL_H + +#include "zxic_common.h" +#include "dpp_dtb.h" +#include "dpp_tbl_diag.h" + +typedef struct { + ZXIC_CHAR *name; + ZXIC_CHAR *doc; + ZXIC_VOID *func; +} DPP_COMMAND; + +ZXIC_UINT32 dpp_cmd_help(ZXIC_VOID); + +DPP_COMMAND dpp_commands[] = { + { "dpp_cmd_help", "none", dpp_cmd_help }, + { "zxic_comm_set_print_level", "debug_level", + zxic_comm_set_print_level }, + { "zxic_comm_set_print_en", "enable", zxic_comm_set_print_en }, + { "diag_dpp_sdt_tbl_prt", "sdt_no", diag_dpp_sdt_tbl_prt }, + { "diag_dpp_se_smmu0_wr64", "slot vport base_addr index data0 data1", + diag_dpp_se_smmu0_wr64 }, + { "diag_dpp_se_smmu0_rd64", "slot vport base_addr index", + diag_dpp_se_smmu0_rd64 }, + { "diag_dpp_se_smmu0_wr128", + "slot vport base_addr index data0 data1 data2 data3", + diag_dpp_se_smmu0_wr128 }, + { "diag_dpp_se_smmu0_rd128", "slot vport base_addr index", + diag_dpp_se_smmu0_rd128 }, + { "diag_dpp_vport_mac_add", + "slot vport sriov_vlan_tpid sriov_vlan_id mac0...5", + diag_dpp_vport_mac_add }, + { "diag_dpp_vport_mac_del", + "slot vport sriov_vlan_tpid sriov_vlan_id mac0...5", + diag_dpp_vport_mac_del }, + { "diag_dpp_vport_batch_mac_add", + "slot vport mac_num vlanid mac16 mac32", + diag_dpp_vport_batch_mac_add }, + { "diag_dpp_vport_batch_mac_del", + "slot vport mac_num vlanid mac16 mac32", + diag_dpp_vport_batch_mac_del }, + { "diag_dpp_vport_mac_transter", "slot vport new_vport", + diag_dpp_vport_mac_transter }, + { "diag_dpp_vport_mac_max_num", "slot vport", + diag_dpp_vport_mac_max_num }, + { "diag_dpp_vport_mac_flush_online", "slot vport", + diag_dpp_vport_mac_flush_online }, + { "diag_dpp_vport_mac_flush_offline", "slot vport", + diag_dpp_vport_mac_flush_offline }, + { "diag_dpp_vport_mac_search", + "slot vport sriov_vlan_tpid sriov_vlan_id mac0...5", + diag_dpp_vport_mac_search }, + { "diag_dpp_vport_mac_prt", "slot vport", diag_dpp_vport_mac_prt }, + { "diag_dpp_vport_mc_mac_add", "slot vport mac0...5", + diag_dpp_vport_mc_mac_add }, + { "diag_dpp_vport_mc_mac_del", "slot vport mac0...5", + diag_dpp_vport_mc_mac_del }, + { "diag_dpp_vport_batch_mc_mac_add", "slot vport mac_num mac0...5", + diag_dpp_vport_batch_mc_mac_add }, + { "diag_dpp_vport_batch_mc_mac_del", "slot vport mac_num mac0...5", + diag_dpp_vport_batch_mc_mac_del }, + { "diag_dpp_vport_mc_mac_transter", "slot vport new_vport", + diag_dpp_vport_mc_mac_transter }, + { "diag_dpp_vport_mc_mac_max_num", "slot vport", + diag_dpp_vport_mc_mac_max_num }, + { "diag_dpp_vport_mc_mac_flush_online", "slot vport", + diag_dpp_vport_mc_mac_flush_online }, + { "diag_dpp_vport_mc_mac_flush_offline", "slot vport", + diag_dpp_vport_mc_mac_flush_offline }, + { "diag_dpp_vport_mc_mac_prt", "slot vport", + diag_dpp_vport_mc_mac_prt }, + { "diag_dpp_vport_table_set", "slot vport attr value", + diag_dpp_vport_table_set }, + { "diag_dpp_vport_table_init", "slot vport", + diag_dpp_vport_table_init }, + { "diag_dpp_vport_table_delete", "slot vport", + diag_dpp_vport_table_delete }, + { "diag_dpp_vport_table_prt", "slot vport", diag_dpp_vport_table_prt }, + { "diag_dpp_vport_egress_meter_en_set", "slot vport enable", + diag_dpp_vport_egress_meter_en_set }, + { "diag_dpp_vport_egress_meter_en_prt", "slot vport", + diag_dpp_vport_egress_meter_en_prt }, + { "diag_dpp_vport_ingress_meter_en_set", "slot vport enable", + diag_dpp_vport_ingress_meter_en_set }, + { "diag_dpp_vport_ingress_meter_en_prt", "slot vport", + diag_dpp_vport_ingress_meter_en_prt }, + { "diag_dpp_vport_egress_meter_mode_set", "slot vport mode", + diag_dpp_vport_egress_meter_mode_set }, + { "diag_dpp_vport_egress_meter_mode_prt", "slot vport", + diag_dpp_vport_egress_meter_mode_prt }, + { "diag_dpp_vport_ingress_meter_mode_set", "slot vport mode", + diag_dpp_vport_ingress_meter_mode_set }, + { "diag_dpp_vport_ingress_meter_mode_prt", "slot vport", + diag_dpp_vport_ingress_meter_mode_prt }, + { "diag_dpp_vport_rx_flow_hash_set", "slot vport hash_mode", + diag_dpp_vport_rx_flow_hash_set }, + { "diag_dpp_vport_rx_flow_hash_prt", "slot vport", + diag_dpp_vport_rx_flow_hash_prt }, + { "diag_dpp_vport_hash_index_prt", "slot vport", + diag_dpp_vport_hash_index_prt }, + { "diag_dpp_vport_hash_funcs_set", "slot vport funcs", + diag_dpp_vport_hash_funcs_set }, + { "diag_dpp_vport_rss_en_set", "slot vport enable", + diag_dpp_vport_rss_en_set }, + { "diag_dpp_vport_virtio_en_set", "slot vport enable", + diag_dpp_vport_virtio_en_set }, + { "diag_dpp_vport_virtio_version_set", "slot vport version", + diag_dpp_vport_virtio_version_set }, + { "diag_dpp_vport_promisc_en_set", "slot vport enable", + diag_dpp_vport_promisc_en_set }, + { "diag_dpp_vport_business_vlan_offload_en_set", "slot vport enable", + diag_dpp_vport_business_vlan_offload_en_set }, + { "diag_dpp_vport_vlan_offload_en_set", "slot vport enable", + diag_dpp_vport_vlan_offload_en_set }, + { "diag_dpp_uplink_phy_port_table_set", + "slot vport uplink_phy_port_id attr value", + diag_dpp_uplink_phy_port_table_set }, + { "diag_dpp_uplink_phy_port_table_prt", "slot vport uplink_phy_port_id", + diag_dpp_uplink_phy_port_table_prt }, + { "diag_dpp_uplink_phy_bond_vport", "slot vport port_id", + diag_dpp_uplink_phy_bond_vport }, + { "diag_dpp_uplink_phy_hardware_bond_set", "slot vport port_id enable", + diag_dpp_uplink_phy_hardware_bond_set }, + { "diag_dpp_uplink_phy_lacp_pf_vqm_vfid_set", + "slot vport port_id vqm_vfid", + diag_dpp_uplink_phy_lacp_pf_vqm_vfid_set }, + { "diag_dpp_uplink_phy_lacp_pf_memport_qid_set", + "slot vport port_id qid", + diag_dpp_uplink_phy_lacp_pf_memport_qid_set }, + { "diag_dpp_ptp_port_vfid_set", "slot vport ptp_port_vfid", + diag_dpp_ptp_port_vfid_set }, + { "diag_dpp_ptp_tc_enable_set", "slot vport ptp_tc_enable", + diag_dpp_ptp_tc_enable_set }, + { "diag_dpp_tm_flowid_pport_table_set", "slot vport port_id flow_id", + diag_dpp_tm_flowid_pport_table_set }, + { "diag_dpp_tm_flowid_pport_table_del", "slot vport port_id", + diag_dpp_tm_flowid_pport_table_del }, + { "diag_dpp_tm_pport_trust_mode_table_set", "slot vport port_id mode", + diag_dpp_tm_pport_trust_mode_table_set }, + { "diag_dpp_tm_pport_trust_mode_table_del", "slot vport port_id", + diag_dpp_tm_pport_trust_mode_table_del }, + { "diag_dpp_tm_pport_mcode_switch_set", "slot vport port_id mode", + diag_dpp_tm_pport_mcode_switch_set }, + { "diag_dpp_tm_pport_mcode_switch_del", "slot vport port_id", + diag_dpp_tm_pport_mcode_switch_del }, + { "diag_dpp_vport_bc_table_set", "slot vport enable", + diag_dpp_vport_bc_table_set }, + { "diag_dpp_vport_bc_table_prt", "slot vport", + diag_dpp_vport_bc_table_prt }, + { "diag_dpp_vport_uc_promisc_table_set", "slot vport enable", + diag_dpp_vport_uc_promisc_table_set }, + { "diag_dpp_vport_uc_promisc_table_prt", "slot vport", + diag_dpp_vport_uc_promisc_table_prt }, + { "diag_dpp_vport_mc_promisc_table_set", "slot vport enable", + diag_dpp_vport_mc_promisc_table_set }, + { "diag_dpp_vport_mc_promisc_table_prt", "slot vport", + diag_dpp_vport_mc_promisc_table_prt }, + { "dpp_dtb_prt_disable", "none", dpp_dtb_prt_disable }, + { "dpp_dtb_prt_enable", "none", dpp_dtb_prt_enable }, + { "dpp_dtb_debug_fun_enable", "none", dpp_dtb_debug_fun_enable }, + { "dpp_dtb_debug_fun_disable", "none", dpp_dtb_debug_fun_disable }, + { "dpp_dtb_soft_perf_test_set", "value", dpp_dtb_soft_perf_test_set }, + { "dpp_dtb_hardware_perf_test_set", "value", + dpp_dtb_hardware_perf_test_set }, + { "dpp_dtb_down_table_overtime_set", "times_s", + dpp_dtb_down_table_overtime_set }, + { "dpp_dtb_dump_table_overtime_set", "times_s", + dpp_dtb_dump_table_overtime_set }, + { "diag_dpp_rdma_trans_item_add", "rdma", + diag_dpp_rdma_trans_item_add }, + { "diag_dpp_rdma_trans_item_del", "rdma", + diag_dpp_rdma_trans_item_del }, + { "diag_dpp_pcie_channel_prt", "none", diag_dpp_pcie_channel_prt }, + { "diag_dpp_se_hash_stat_prt", "slot fun_id", + diag_dpp_se_hash_stat_prt }, + { "diag_dpp_se_hash_stat_clr", "slot fun_id", + diag_dpp_se_hash_stat_clr }, + { "diag_dpp_hash_item_prt", "slot sdt", diag_dpp_hash_item_prt }, + { "diag_dpp_vqm_vfid_vlan_init", "slot vport", + diag_dpp_vqm_vfid_vlan_init }, + { "diag_dpp_vqm_vfid_vlan_delete", "slot vport", + diag_dpp_vqm_vfid_vlan_delete }, + { "diag_dpp_vqm_vfid_vlan_set", "slot vport attr value", + diag_dpp_vqm_vfid_vlan_set }, + { "diag_dpp_vqm_vfid_vlan_prt", "slot vport", + diag_dpp_vqm_vfid_vlan_prt }, + { "diag_dpp_rxfh_set", "slot vport qid0...3 qnum", diag_dpp_rxfh_set }, + { "diag_dpp_rxfh_del", "slot vport", diag_dpp_rxfh_del }, + { "diag_dpp_rxfh_prt", "slot vport", diag_dpp_rxfh_prt }, + { "diag_dpp_thash_key_set", "slot vport rsk_031_000~rsk_319_288", + diag_dpp_thash_key_set }, + { "diag_dpp_thash_key_prt", "slot vport", diag_dpp_thash_key_prt }, + { "diag_dpp_vport_register_info_prt", "none", + diag_dpp_vport_register_info_prt }, + { "diag_dpp_stat_mc_packet_rx_cnt_prt", "slot vport index mode", + diag_dpp_stat_mc_packet_rx_cnt_prt }, + { "diag_dpp_stat_bc_packet_rx_cnt_prt", "slot vport index mode", + diag_dpp_stat_bc_packet_rx_cnt_prt }, + { "diag_dpp_stat_1588_packet_rx_cnt_prt", "slot vport index mode", + diag_dpp_stat_1588_packet_rx_cnt_prt }, + { "diag_dpp_stat_1588_packet_tx_cnt_prt", "slot vport index mode", + diag_dpp_stat_1588_packet_tx_cnt_prt }, + { "diag_dpp_stat_1588_packet_drop_cnt_prt", "slot vport index mode", + diag_dpp_stat_1588_packet_drop_cnt_prt }, + { "diag_dpp_stat_1588_enc_packet_rx_cnt_prt", "slot vport index mode", + diag_dpp_stat_1588_enc_packet_rx_cnt_prt }, + { "diag_dpp_stat_1588_enc_packet_tx_cnt_prt", "slot vport index mode", + diag_dpp_stat_1588_enc_packet_tx_cnt_prt }, + { "diag_dpp_stat_spoof_packet_drop_cnt_prt", "slot vport index mode", + diag_dpp_stat_spoof_packet_drop_cnt_prt }, + { "diag_dpp_stat_mcode_packet_cnt_prt", "slot vport index mode", + diag_dpp_stat_mcode_packet_cnt_prt }, + { "diag_dpp_stat_port_RDMA_packet_msg_tx_cnt_prt", + "slot vport index mode", + diag_dpp_stat_port_RDMA_packet_msg_tx_cnt_prt }, + { "diag_dpp_stat_port_RDMA_packet_msg_rx_cnt_prt", + "slot vport index mode", + diag_dpp_stat_port_RDMA_packet_msg_rx_cnt_prt }, + { "diag_dpp_stat_plcr_packet_drop_tx_cnt_prt", "slot vport index mode", + diag_dpp_stat_plcr_packet_drop_tx_cnt_prt }, + { "diag_dpp_stat_plcr_packet_drop_rx_cnt_prt", "slot vport index mode", + diag_dpp_stat_plcr_packet_drop_rx_cnt_prt }, + { "diag_dpp_stat_MTU_packet_msg_tx_cnt_prt", "slot vport index mode", + diag_dpp_stat_MTU_packet_msg_tx_cnt_prt }, + { "diag_dpp_stat_MTU_packet_msg_rx_cnt_prt", "slot vport index mode", + diag_dpp_stat_MTU_packet_msg_rx_cnt_prt }, + { "diag_dpp_stat_port_uc_packet_rx_cnt_prt", "slot vport index mode", + diag_dpp_stat_port_uc_packet_rx_cnt_prt }, + { "diag_dpp_stat_port_uc_packet_tx_cnt_prt", "slot vport index mode", + diag_dpp_stat_port_uc_packet_tx_cnt_prt }, + { "diag_dpp_stat_port_mc_packet_rx_cnt_prt", "slot vport index mode", + diag_dpp_stat_port_mc_packet_rx_cnt_prt }, + { "diag_dpp_stat_port_mc_packet_tx_cnt_prt", "slot vport index mode", + diag_dpp_stat_port_mc_packet_tx_cnt_prt }, + { "diag_dpp_stat_port_bc_packet_rx_cnt_prt", "slot vport index mode", + diag_dpp_stat_port_bc_packet_rx_cnt_prt }, + { "diag_dpp_stat_port_bc_packet_tx_cnt_prt", "slot vport index mode", + diag_dpp_stat_port_bc_packet_tx_cnt_prt }, + { "diag_dpp_stat_asn_phyport_rx_pkt_cnt_get", "slot vport index mode", + diag_dpp_stat_asn_phyport_rx_pkt_cnt_get }, + { "diag_dpp_stat_psn_phyport_tx_pkt_cnt_get", "slot vport index mode", + diag_dpp_stat_psn_phyport_tx_pkt_cnt_get }, + { "diag_dpp_stat_psn_phyport_rx_pkt_cnt_get", "slot vport index mode", + diag_dpp_stat_psn_phyport_rx_pkt_cnt_get }, + { "diag_dpp_stat_psn_ack_phyport_tx_pkt_cnt_get", + "slot vport index mode", + diag_dpp_stat_psn_ack_phyport_tx_pkt_cnt_get }, + { "diag_dpp_stat_psn_ack_phyport_rx_pkt_cnt_get", + "slot vport index mode", + diag_dpp_stat_psn_ack_phyport_rx_pkt_cnt_get }, + { "diag_dpp_lag_group_create", "slot vport lag_id", + diag_dpp_lag_group_create }, + { "diag_dpp_lag_group_delete", "slot vport lag_id", + diag_dpp_lag_group_delete }, + { "diag_dpp_lag_mode_set", "slot vport lag_id mode", + diag_dpp_lag_mode_set }, + { "diag_dpp_lag_group_hash_factor_set", "slot vport lag_id factor", + diag_dpp_lag_group_hash_factor_set }, + { "diag_dpp_lag_group_member_add", + "slot vport lag_id uplink_phy_port_id", + diag_dpp_lag_group_member_add }, + { "diag_dpp_lag_group_member_del", + "slot vport lag_id uplink_phy_port_id", + diag_dpp_lag_group_member_del }, + { "diag_dpp_lag_table_prt", "slot vport lag_id", + diag_dpp_lag_table_prt }, + { "diag_dpp_tm_pport_dscp_map_table_set", + "slot vport port dscp_id up_id", + diag_dpp_tm_pport_dscp_map_table_set }, + { "diag_dpp_tm_pport_dscp_map_table_del", "slot vport port dscp_id", + diag_dpp_tm_pport_dscp_map_table_del }, + { "diag_dpp_tm_pport_dscp_map_table_prt", "slot vport port dscp_id", + diag_dpp_tm_pport_dscp_map_table_prt }, + { "diag_dpp_tm_pport_up_map_table_set", "slot vport port up_id tc_id", + diag_dpp_tm_pport_up_map_table_set }, + { "diag_dpp_tm_pport_up_map_table_del", "slot vport port up_id", + diag_dpp_tm_pport_up_map_table_del }, + { "diag_dpp_tm_pport_up_map_table_prt", "slot vport port up_id", + diag_dpp_tm_pport_up_map_table_prt }, + { "diag_dpp_vport_vhca_id_add", "slot vport vhca_id", + diag_dpp_vport_vhca_id_add }, + { "diag_dpp_vport_vhca_id_del", "slot vport vhca_id", + diag_dpp_vport_vhca_id_del }, + { "diag_dpp_vport_vhca_id_table_prt", "slot vport vhca_id", + diag_dpp_vport_vhca_id_table_prt }, + { "diag_dpp_vport_reset", "slot vport", diag_dpp_vport_reset }, + { "diag_dpp_vlan_filter_init", "slot vport", + diag_dpp_vlan_filter_init }, + { "diag_dpp_add_vlan_filter", "slot vport vlan_id", + diag_dpp_add_vlan_filter }, + { "diag_dpp_del_vlan_filter", "slot vport vlan_id", + diag_dpp_del_vlan_filter }, + { "diag_dpp_vlan_filter_table_prt", "slot vport vlan_group_id", + diag_dpp_vlan_filter_table_prt }, + { "diag_dpp_fd_cfg_pre1", "smac dmac sip dip sport dport", + diag_dpp_fd_cfg_pre1 }, + { "diag_dpp_fd_cfg_pre2", "ethertype cvlan_pri vlan vxlan_vni vqm_vfid", + diag_dpp_fd_cfg_pre2 }, + { "diag_dpp_fd_cfg_pre3", + "action_index action_index2 count_id hash_alg rss_hash_factor", + diag_dpp_fd_cfg_pre3 }, + { "diag_dpp_fd_cfg_pre4", "uplink_fd_id v_qid", diag_dpp_fd_cfg_pre4 }, + { "diag_dpp_fd_cfg_add", "slot vport", diag_dpp_fd_cfg_add }, + { "diag_dpp_fd_cfg_del", "slot vport index", diag_dpp_fd_cfg_del }, + { "diag_dpp_fd_cfg_get", "slot vport index", diag_dpp_fd_cfg_get }, + { "diag_dpp_fd_cfg_search", "slot vport index", + diag_dpp_fd_cfg_search }, + { "diag_dpp_acl_glb_data_prt", "", diag_dpp_acl_glb_data_prt }, + { "diag_dpp_fd_acl_index_req", "slot vport", + diag_dpp_fd_acl_index_req }, + { "diag_dpp_fd_acl_index_rel", "slot vport index", + diag_dpp_fd_acl_index_rel }, + { "diag_dpp_fd_acl_all_delete", "slot vport", + diag_dpp_fd_acl_all_delete }, + { "diag_dpp_dtb_stat_ppu_cnt_clr", "slot vport rd_mode counter_id num", + diag_dpp_dtb_stat_ppu_cnt_clr }, + { "diag_dpp_fd_acl_stat_clear", "slot vport", + diag_dpp_fd_acl_stat_clear }, + { "diag_dpp_se_eram_res_prt", "slot vport", diag_dpp_se_eram_res_prt }, + { "diag_dpp_se_hash_res_prt", "slot vport", diag_dpp_se_hash_res_prt }, + { "diag_dpp_se_acl_res_prt", "slot vport", diag_dpp_se_acl_res_prt }, + { "diag_dpp_se_lpm_res_prt", "slot vport", diag_dpp_se_lpm_res_prt }, + { "diag_dpp_se_ddr_res_prt", "slot vport", diag_dpp_se_ddr_res_prt }, + { "diag_dpp_se_stat_res_prt", "slot vport", diag_dpp_se_stat_res_prt }, + { "diag_dpp_eram_data_stub", "data0 data1 data2 data3", + diag_dpp_eram_data_stub }, + { "diag_dpp_eram_entry_insert", "slot vport sdt index", + diag_dpp_eram_entry_insert }, + { "diag_dpp_eram_entry_delete", "slot vport sdt index", + diag_dpp_eram_entry_delete }, + { "diag_dpp_eram_entry_get", "slot vport sdt index", + diag_dpp_eram_entry_get }, + { "diag_dpp_stat_item_prt", "slot vport stat_item_no", + diag_dpp_stat_item_prt }, + { "diag_dpp_stat_item_prt_all", "slot vport", + diag_dpp_stat_item_prt_all }, + { "diag_dpp_stat_item_cnt_prt", "slot vport stat_item_no index rd_mode", + diag_dpp_stat_item_cnt_prt }, + { "diag_dpp_glb_cfg_set", + "slot vport glb_cfg_data_0 glb_cfg_data_1 glb_cfg_data_2 glb_cfg_data_3", + diag_dpp_glb_cfg_set }, + { "diag_dpp_glb_cfg_prt", "slot vport", diag_dpp_glb_cfg_prt }, + { "diag_dpp_pkt_capture_enable", "slot vport cap_point", + diag_dpp_pkt_capture_enable }, + { "diag_dpp_pkt_capture_disable", "slot vport cap_point", + diag_dpp_pkt_capture_disable }, + { "diag_dpp_pkt_capture_disable_all", "slot vport", + diag_dpp_pkt_capture_disable_all }, + { "diag_dpp_pkt_capture_enable_status_get", "slot vport", + diag_dpp_pkt_capture_enable_status_get }, + { "diag_dpp_pkt_capture_rule_index_to_tcam_index", + "rule_index rule_mode cap_point", + diag_dpp_pkt_capture_rule_index_to_tcam_index }, + { "diag_dpp_pkt_capture_tcam_index_to_rule_index", "tcam_index", + diag_dpp_pkt_capture_tcam_index_to_rule_index }, + { "diag_dpp_pkt_capture_item_l2_set", + "dmac_0 dmac_1 smac_0 smac_1 ethtype", + diag_dpp_pkt_capture_item_l2_set }, + { "diag_dpp_pkt_capture_item_l3_set", + "sip_0 sip_1 sip_2 sip_3 dip0 dip_1 dip_2 dip_3 protocol", + diag_dpp_pkt_capture_item_l3_set }, + { "diag_dpp_pkt_capture_item_l4_set", "dport sport qp", + diag_dpp_pkt_capture_item_l4_set }, + { "diag_dpp_pkt_capture_item_kw_set", + "kw_0 kw_1 kw_2 kw_3 kw_off kw_len", + diag_dpp_pkt_capture_item_kw_set }, + { "diag_dpp_pkt_capture_item_insert", + "slot vport tcam_index rule_config cap_point panel_id vqm_vfid vhca_id", + diag_dpp_pkt_capture_item_insert }, + { "diag_dpp_pkt_capture_item_delete", "slot vport tcam_index", + diag_dpp_pkt_capture_item_delete }, + { "diag_dpp_pkt_capture_table_dump", "slot vport", + diag_dpp_pkt_capture_table_dump }, + { "diag_dpp_pkt_capture_table_flush", "slot vport", + diag_dpp_pkt_capture_table_flush }, + { "diag_dpp_pkt_capture_speed_set", "slot vport speed", + diag_dpp_pkt_capture_speed_set }, + { "diag_dpp_pkt_capture_speed_get", "slot vport", + diag_dpp_pkt_capture_speed_get }, + { "diag_dpp_mcode_feature_get", "slot vport index", + diag_dpp_mcode_feature_get }, + { "diag_dpp_pktrx_mcode_glb_cfg_write", + "slot vport start_bit_no end_bit_no glb_cfg_data_1", + diag_dpp_pktrx_mcode_glb_cfg_write }, + { "diag_dpp_l2d_psn_cfg_set", "slot vport psn_cfg", + diag_dpp_l2d_psn_cfg_set }, + { "diag_dpp_l2d_psn_cfg_get", "slot vport", diag_dpp_l2d_psn_cfg_get }, + { "diag_dpp_dtb_dump_test", "slot vport num flag", + diag_dpp_dtb_dump_test }, +}; + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/cmd/source/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/cmd/source/Kbuild.include new file mode 100644 index 000000000000..c446d492e2ac --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/cmd/source/Kbuild.include @@ -0,0 +1,2 @@ +cur_dir := en_np/cmd/source/ +src_files += $(addprefix $(cur_dir),$(notdir $(wildcard $(dinghai_root)/$(cur_dir)*.c))) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/cmd/source/dpp_cmd_init.c b/drivers/net/ethernet/dinghai/en_np/cmd/source/dpp_cmd_init.c new file mode 100644 index 000000000000..57cc92a60941 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/cmd/source/dpp_cmd_init.c @@ -0,0 +1,36 @@ +#include "zxic_common.h" +#include "dpp_netlink.h" +#include "dpp_cmd_init.h" + +extern ZXIC_CHAR *dpp_cmd_trim(ZXIC_CHAR *line); +extern ZXIC_UINT32 dpp_cmd_exec(ZXIC_CHAR *line); + +ZXIC_UINT32 dpp_cmd_msg_proc(ZXIC_VOID *msg_body, ZXIC_UINT32 msg_len, + ZXIC_VOID **resp, ZXIC_UINT32 *reps_len) +{ + ZXIC_CHAR *line = NULL; + T_MSG_CMD_SHELL *msg = (T_MSG_CMD_SHELL *)(msg_body); + + ZXIC_COMM_CHECK_POINT(msg); + + line = dpp_cmd_trim(msg->command); + ZXIC_COMM_CHECK_POINT(line); + + if (*line) { + ZXIC_COMM_PRINT( + "---------------------------------------------------\n"); + dpp_cmd_exec(line); + ZXIC_COMM_PRINT( + "---------------------------------------------------\n"); + } + + return DPP_OK; +} + +ZXIC_UINT32 dpp_cmd_init(ZXIC_VOID) +{ + dpp_netlink_regist_msg_proc_fun(MSG_ID_MSG_DPP_CMD_SHELL, + dpp_cmd_msg_proc); + + return DPP_OK; +} diff --git a/drivers/net/ethernet/dinghai/en_np/cmd/source/dpp_cmd_shell.c b/drivers/net/ethernet/dinghai/en_np/cmd/source/dpp_cmd_shell.c new file mode 100644 index 000000000000..16bb3968a8cd --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/cmd/source/dpp_cmd_shell.c @@ -0,0 +1,306 @@ +#include "linux/string.h" +#include "zxic_common.h" +#include "dpp_cmd_shell.h" + +#ifndef whitespace +#define whitespace(c) (((c) == ' ') || ((c) == '\t')) +#endif + +#define DPP_CMD_ARG_NUM_MAX (15) + +ZXIC_UINT32 dpp_cmd_help(ZXIC_VOID) +{ + ZXIC_UINT32 i = 0; + ZXIC_UINT32 list_index = 0; + + list_index = sizeof(dpp_commands) / sizeof(dpp_commands[0]); + + for (i = 0; i < list_index; i++) { + ZXIC_COMM_PRINT("%-40s | %s\n", dpp_commands[i].name, + dpp_commands[i].doc); + } + + return DPP_OK; +} + +ZXIC_UINT32 dpp_cmd_atoi(ZXIC_CHAR *str) +{ + ZXIC_UINT32 n = 0; + ZXIC_SINT32 rc = 0; + + if (str == NULL) { + return 0x0; + } + + if ((str[0] == '0') && (str[1] == 'x')) { + rc = sscanf(str, "0x%x", &n); + } else if ((str[0] == '0') && (str[1] == 'X')) { + rc = sscanf(str, "0X%x", &n); + } else { + rc = sscanf(str, "%u", &n); + } + + if (rc < 0) { + return 0; + } + + return n; +} + +DPP_COMMAND *dpp_cmd_find(ZXIC_CHAR *name) +{ + ZXIC_UINT32 i = 0; + ZXIC_UINT32 list_index = 0; + + list_index = sizeof(dpp_commands) / sizeof(dpp_commands[0]); + + for (i = 0; i < list_index; i++) { + if (strcmp(name, dpp_commands[i].name) == 0) { + return &dpp_commands[i]; + } + } + + return ((DPP_COMMAND *)NULL); +} + +ZXIC_UINT32 dpp_cmd_strtok(ZXIC_CHAR *str, ZXIC_CHAR **arg_v, + ZXIC_UINT32 *arg_num) +{ + ZXIC_CHAR *p_tok = NULL; + ZXIC_CONST ZXIC_CHAR *delim = " ();\t"; + ZXIC_UINT32 i = 0; + + ZXIC_COMM_CHECK_POINT(str); + ZXIC_COMM_CHECK_POINT(arg_v); + ZXIC_COMM_CHECK_POINT(arg_num); + + ZXIC_COMM_MEMSET(arg_v, 0, DPP_CMD_ARG_NUM_MAX * sizeof(ZXIC_CHAR *)); + + p_tok = strsep(&str, delim); + ZXIC_COMM_CHECK_POINT(p_tok); + + arg_v[0] = p_tok; + + i = 1; + while (p_tok && (i < DPP_CMD_ARG_NUM_MAX)) { + p_tok = strsep(&str, delim); + if (p_tok == NULL) { + break; + } + arg_v[i++] = p_tok; + } + + *arg_num = i; + + return DPP_OK; +} + +ZXIC_CHAR *dpp_cmd_trim(ZXIC_CHAR *line) +{ + ZXIC_CHAR *s; + ZXIC_CHAR *t; + + ZXIC_COMM_CHECK_POINT_RETURN_NULL(line); + + for (s = line; whitespace(*s); s++) + ; + + if (*s == 0) { + return s; + } + + t = s + strlen(s) - 1; + while ((t > s) && whitespace(*t)) + t--; + *++t = '\0'; + + return s; +} + +ZXIC_UINT32 dpp_cmd_exec(ZXIC_CHAR *line) +{ + ZXIC_UINT32 i = 0; + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 arg_num = 0; + ZXIC_UINT32 len = 0; + ZXIC_CHAR *word = 0; + ZXIC_CHAR *arg_v[DPP_CMD_ARG_NUM_MAX] = { 0 }; + DPP_COMMAND *command = NULL; + + ZXIC_UINT32 (*func0) + (ZXIC_VOID); + ZXIC_UINT32 (*func1) + (ZXIC_UINT32); + ZXIC_UINT32 (*func2) + (ZXIC_UINT32, ZXIC_UINT32); + ZXIC_UINT32 (*func3) + (ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32); + ZXIC_UINT32(*func4) + (ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32); + ZXIC_UINT32(*func5) + (ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32); + ZXIC_UINT32(*func6) + (ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, + ZXIC_UINT32); + ZXIC_UINT32(*func7) + (ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, + ZXIC_UINT32, ZXIC_UINT32); + ZXIC_UINT32(*func8) + (ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, + ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32); + ZXIC_UINT32(*func9) + (ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, + ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32); + ZXIC_UINT32(*func10) + (ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, + ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32); + ZXIC_UINT32(*func11) + (ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, + ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, + ZXIC_UINT32); + ZXIC_UINT32(*func12) + (ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, + ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, ZXIC_UINT32, + ZXIC_UINT32, ZXIC_UINT32); + + ZXIC_COMM_CHECK_POINT(line); + + len = ZXIC_COMM_STRLEN(line); + if (0 == len) { + ZXIC_COMM_PRINT("%s: len is 0.\n", __FUNCTION__); + return DPP_OK; + } + + i = 0; + while (line[i % (len + 1)] && whitespace(line[i % (len + 1)])) { + i++; + } + word = line + i; + + while (line[i % (len + 1)] && !whitespace(line[i % (len + 1)])) { + i++; + } + + if (line[i % (len + 1)]) { + line[i++] = '\0'; + } + + command = dpp_cmd_find(word); + ZXIC_COMM_CHECK_POINT(command); + ZXIC_COMM_CHECK_POINT(command->func); + + while (whitespace(line[i % (len + 1)])) { + i++; + } + + word = line + i; + + rc = dpp_cmd_strtok(word, arg_v, &arg_num); + ZXIC_COMM_CHECK_RC(rc, "dpp_cmd_strtok"); + ZXIC_COMM_CHECK_INDEX(arg_num, 0, DPP_CMD_ARG_NUM_MAX); + + switch (arg_num) { + case 0: { + func0 = command->func; + ((*(func0))()); + break; + } + case 1: { + func1 = command->func; + ((*(func1))(dpp_cmd_atoi(arg_v[0]))); + break; + } + case 2: { + func2 = command->func; + ((*(func2))(dpp_cmd_atoi(arg_v[0]), dpp_cmd_atoi(arg_v[1]))); + break; + } + case 3: { + func3 = command->func; + ((*(func3))(dpp_cmd_atoi(arg_v[0]), dpp_cmd_atoi(arg_v[1]), + dpp_cmd_atoi(arg_v[2]))); + break; + } + case 4: { + func4 = command->func; + ((*(func4))(dpp_cmd_atoi(arg_v[0]), dpp_cmd_atoi(arg_v[1]), + dpp_cmd_atoi(arg_v[2]), dpp_cmd_atoi(arg_v[3]))); + break; + } + case 5: { + func5 = command->func; + ((*(func5))(dpp_cmd_atoi(arg_v[0]), dpp_cmd_atoi(arg_v[1]), + dpp_cmd_atoi(arg_v[2]), dpp_cmd_atoi(arg_v[3]), + dpp_cmd_atoi(arg_v[4]))); + break; + } + case 6: { + func6 = command->func; + ((*(func6))(dpp_cmd_atoi(arg_v[0]), dpp_cmd_atoi(arg_v[1]), + dpp_cmd_atoi(arg_v[2]), dpp_cmd_atoi(arg_v[3]), + dpp_cmd_atoi(arg_v[4]), dpp_cmd_atoi(arg_v[5]))); + break; + } + case 7: { + func7 = command->func; + ((*(func7))(dpp_cmd_atoi(arg_v[0]), dpp_cmd_atoi(arg_v[1]), + dpp_cmd_atoi(arg_v[2]), dpp_cmd_atoi(arg_v[3]), + dpp_cmd_atoi(arg_v[4]), dpp_cmd_atoi(arg_v[5]), + dpp_cmd_atoi(arg_v[6]))); + break; + } + case 8: { + func8 = command->func; + ((*(func8))(dpp_cmd_atoi(arg_v[0]), dpp_cmd_atoi(arg_v[1]), + dpp_cmd_atoi(arg_v[2]), dpp_cmd_atoi(arg_v[3]), + dpp_cmd_atoi(arg_v[4]), dpp_cmd_atoi(arg_v[5]), + dpp_cmd_atoi(arg_v[6]), dpp_cmd_atoi(arg_v[7]))); + break; + } + case 9: { + func9 = command->func; + ((*(func9))(dpp_cmd_atoi(arg_v[0]), dpp_cmd_atoi(arg_v[1]), + dpp_cmd_atoi(arg_v[2]), dpp_cmd_atoi(arg_v[3]), + dpp_cmd_atoi(arg_v[4]), dpp_cmd_atoi(arg_v[5]), + dpp_cmd_atoi(arg_v[6]), dpp_cmd_atoi(arg_v[7]), + dpp_cmd_atoi(arg_v[8]))); + break; + } + case 10: { + func10 = command->func; + ((*(func10))(dpp_cmd_atoi(arg_v[0]), dpp_cmd_atoi(arg_v[1]), + dpp_cmd_atoi(arg_v[2]), dpp_cmd_atoi(arg_v[3]), + dpp_cmd_atoi(arg_v[4]), dpp_cmd_atoi(arg_v[5]), + dpp_cmd_atoi(arg_v[6]), dpp_cmd_atoi(arg_v[7]), + dpp_cmd_atoi(arg_v[8]), dpp_cmd_atoi(arg_v[9]))); + break; + } + case 11: { + func11 = command->func; + ((*(func11))(dpp_cmd_atoi(arg_v[0]), dpp_cmd_atoi(arg_v[1]), + dpp_cmd_atoi(arg_v[2]), dpp_cmd_atoi(arg_v[3]), + dpp_cmd_atoi(arg_v[4]), dpp_cmd_atoi(arg_v[5]), + dpp_cmd_atoi(arg_v[6]), dpp_cmd_atoi(arg_v[7]), + dpp_cmd_atoi(arg_v[8]), dpp_cmd_atoi(arg_v[9]), + dpp_cmd_atoi(arg_v[10]))); + break; + } + case 12: { + func12 = command->func; + ((*(func12))(dpp_cmd_atoi(arg_v[0]), dpp_cmd_atoi(arg_v[1]), + dpp_cmd_atoi(arg_v[2]), dpp_cmd_atoi(arg_v[3]), + dpp_cmd_atoi(arg_v[4]), dpp_cmd_atoi(arg_v[5]), + dpp_cmd_atoi(arg_v[6]), dpp_cmd_atoi(arg_v[7]), + dpp_cmd_atoi(arg_v[8]), dpp_cmd_atoi(arg_v[9]), + dpp_cmd_atoi(arg_v[10]), dpp_cmd_atoi(arg_v[11]))); + break; + } + default: { + ZXIC_COMM_PRINT("%s: err [arg_num:%d] oversize.\n", + __FUNCTION__, arg_num); + break; + } + } + + return DPP_OK; +} diff --git a/drivers/net/ethernet/dinghai/en_np/comm/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/comm/Kbuild.include new file mode 100644 index 000000000000..939659855d7c --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/comm/Kbuild.include @@ -0,0 +1,4 @@ +cur_dir := en_np/comm/ +subdirs := source/ +src_files += +include $(foreach subdir, $(subdirs), $(dinghai_root)/$(cur_dir)$(subdir)/Kbuild.include) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_avl_tree.h b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_avl_tree.h new file mode 100644 index 000000000000..e600b91ac731 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_avl_tree.h @@ -0,0 +1,89 @@ +/********************************************************************* + * 版权所有 (C)2012, 深圳市中兴通讯股份有限公司。 + * + * 文件名称:zxic_avl_tree.h + * 文件标识: + * 内容摘要:// 本文件从pfe_index 移植过来 + * 其它说明:// 其它内容的说明 + * 当前版本: + * 作 者: + * 完成日期: + * + * 修 改: 蒋文明10124041 2012.04.26移植到NSE项目 + ********************************************************************/ +#ifndef __ZXIC_COMM_AVL_TREE_H__ +#define __ZXIC_COMM_AVL_TREE_H__ + +/* Since the trees are balanced, their heignse will never be large. */ +#define avl_maxheight 41 /* why this? a small exercise */ +#define heightoftree(tree) ((tree) == NULL ? 0 : (tree)->avl_height) + +struct _ZXIC_AVL_CFG; +struct _ZXIC_AVL_NODE; + +#define ZXIC_LIST_ENTRY(ptr, type, member) \ + ((type *)((ZXIC_UINT8 *)(ptr) - \ + (((unsigned long)(&((type *)64)->member)) - 64))) + +#define ZXIC_GET_AVL_KEY_ADDR(p_avl_cfg, key_index) \ + ((p_avl_cfg->p_key_base) + (p_avl_cfg->key_len * (key_index))); + +typedef ZXIC_SINT32 (*ZXIC_KEY_CMP_FUNC)(void *p_new_key, void *p_old_key, + ZXIC_UINT32 key_len); + +typedef struct _ZXIC_AVL_NODE { + void *p_key; + //void *p_owner; /*the owner of this node*/ + ZXIC_UINT32 result; /*该节点的索引*/ + ZXIC_SINT32 avl_height; + struct _ZXIC_AVL_NODE *p_avl_left; + struct _ZXIC_AVL_NODE *p_avl_right; + D_NODE avl_node_list; /*the data is owner*/ +} ZXIC_AVL_NODE; + +typedef struct _ZXIC_AVL_CFG { + ZXIC_AVL_NODE *p_root; + ZXIC_UINT32 avl_node_num; /*avl 中已使用节点的数目*/ + D_HEAD avl_node_list_head; /*avl 线索化节点的头节点*/ + ZXIC_UINT32 key_len; + ZXIC_UINT32 item_num; + ZXIC_KEY_CMP_FUNC avl_cmp_func; + ZXIC_UINT8 *p_key_base; + ZXIC_AVL_NODE *p_avl_node_base; + ZXIC_LISTSTACK_MANGER *p_avl_node_liststack; + + ZXIC_UINT32 is_dynamic; /*是否支持avl 的节点动态生成,1:支持;0:不支持*/ + ZXIC_UINT32 is_init; + +} ZXIC_AVL_CFG; + +ZXIC_RTN32 zxic_comm_avl_init( + ZXIC_AVL_CFG *p_avl_cfg, /* avl配置*/ + ZXIC_UINT32 item_num, /* 插入键值的数目,如果为0表示动态申请节点*/ + ZXIC_UINT32 key_length, /* 插入键值的长度,以字节为单位*/ + ZXIC_KEY_CMP_FUNC + avl_cmp_func); /* 键值比较函数,使用默认比较函数可以为NULL, + 如果按整型比较,需要用户提供比较函数*/ +ZXIC_RTN32 zxic_comm_avl_insert(ZXIC_AVL_CFG *p_avl_cfg, void *p_new_key, + ZXIC_UINT32 *p_index); + +ZXIC_RTN32 zxic_comm_avl_remove(ZXIC_AVL_CFG *p_avl_cfg, void *p_delete_key, + void *p_out); + +ZXIC_RTN32 zxic_comm_avl_find(ZXIC_AVL_CFG *p_avl_cfg, void *p_find_key, + void *p_out); + +ZXIC_RTN32 zxic_comm_avl_destroy(ZXIC_AVL_CFG *p_avl_cfg); + +ZXIC_UINT32 ic_comm_avl_get_node_num(ZXIC_AVL_CFG *p_avl_cfg); +ZXIC_UINT32 ic_comm_avl_is_none(ZXIC_AVL_CFG *p_avl_cfg); +ZXIC_RTN32 ic_comm_avl_get_1st_key(ZXIC_AVL_CFG *p_avl_cfg, void *p_key_out); +ZXIC_RTN32 ic_comm_avl_get_last_key(ZXIC_AVL_CFG *p_avl_cfg, void *p_key_out); +ZXIC_RTN32 ic_comm_avl_get_1st_node(ZXIC_AVL_CFG *p_avl_cfg, + ZXIC_AVL_NODE **p_node_out); +ZXIC_RTN32 ic_comm_avl_get_last_node(ZXIC_AVL_CFG *p_avl_cfg, + ZXIC_AVL_NODE **p_node_out); + +ZXIC_RTN32 zxic_comm_avl_show_info(ZXIC_AVL_CFG *p_avl_cfg); + +#endif /*__ZXIC_AVL_TREE_H__*/ diff --git a/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_double_link.h b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_double_link.h new file mode 100644 index 000000000000..74a16f0c586d --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_double_link.h @@ -0,0 +1,108 @@ +/***************************************************************************** + * 版权所有 (C)2008-2010, 深圳市中兴通讯股份有限公司。 + * + * 文件名称: zxic_comm_double_link.h + * 文件标识: + * 内容摘要: 双链表实现 + * 其它说明: + * 当前版本: V1.00.10 + * 作 者: + * 完成日期: 2012年2月14日 + * 当前责任人-1:ChenWei188471 + * 当前责任人-2: + * 历史责任人-3: + * + * 修改记录: + * 修改日期: 版 本 号 修 改 人 修改内容 + 1 20120214 V1.00.10 ChenWei188471 创建 + 2 + *****************************************************************************/ + +#ifndef _ZXIC_COMM_DOUBLE_LINK_H +#define _ZXIC_COMM_DOUBLE_LINK_H +#define TEST_NUMBER (255) + +/************************************************************************** + * double_link api * + **************************************************************************/ +typedef struct _d_node { + void *data; + struct _d_node *prev; + struct _d_node *next; +} D_NODE; + +typedef struct _d_head { + ZXIC_UINT32 used; + ZXIC_UINT32 maxnum; + D_NODE *p_next; + D_NODE *p_prev; +} D_HEAD; + +typedef ZXIC_SINT32 (*CMP_FUNC)(D_NODE *data1, D_NODE *data2, void *); + +typedef ZXIC_RTN32 (*fun_free)(void *); + +ZXIC_RTN32 zxic_comm_double_link_insert_1st(D_NODE *newnode, D_HEAD *head); +ZXIC_RTN32 zxic_comm_double_link_insert_aft(D_NODE *newnode, D_NODE *oldnode, + D_HEAD *head); +ZXIC_RTN32 zxic_comm_double_link_insert_pre(D_NODE *newnode, D_NODE *oldnode, + D_HEAD *head); +ZXIC_RTN32 zxic_comm_double_link_insert_last(D_NODE *newnode, D_HEAD *head); +ZXIC_RTN32 zxic_comm_double_link_merge_list(D_HEAD *d_list, D_HEAD *s_list); + +ZXIC_RTN32 zxic_comm_double_link_insert_sort(D_NODE *newnode, D_HEAD *head, + CMP_FUNC fuc, void *); + +ZXIC_RTN32 zxic_comm_double_link_search(D_NODE *data, D_HEAD *head); +ZXIC_RTN32 zxic_comm_double_link_del(D_NODE *data, D_HEAD *head); +ZXIC_RTN32 zxic_comm_double_link_init(ZXIC_UINT32 elmemtnum, D_HEAD *head); +ZXIC_RTN32 zxic_comm_double_link_insert_merge(D_NODE *p_newnode, D_HEAD *p_head, + ZXIC_UINT32 is_head); + +/*fun指向的是释放dnode指向的空间,如果没有,可以传NULL*/ +ZXIC_RTN32 zxic_comm_dlink_release(D_HEAD *p_head, fun_free fun); +ZXIC_SINT32 zxic_comm_double_link_default_cmp_fuc(D_NODE *p_data1, + D_NODE *p_data2, void *); + +ZXIC_RTN32 zxic_comm_double_link_del_pos(D_HEAD *p_head, void *cmp_data, + fun_free fun); +ZXIC_RTN32 zxic_comm_double_link_insert_cmp(D_HEAD *p_head, void *cmp_data, + ZXIC_UINT32 *is_same); + +#define INIT_D_NODE(ptr, pdata) \ + do { \ + (ptr)->data = pdata; \ + (ptr)->prev = NULL; \ + (ptr)->next = NULL; \ + } while (0) + +/*add by lius +将0转 换成(TYPE*),结构以内存空间首地址0作为起始地址,则成员地址为偏移地址;*/ +#define MEM_OFF(type, member) (ZXIC_COMM_PTR_TO_VAL(&(((type *)0)->member))) + +/*根据当前双链表的指针,找到本节点的指针*/ +#define STRUCT_ENTRY_POINT(ptr, type, member) \ + ((type *)(ZXIC_COMM_PTR_TO_VAL(ptr) - MEM_OFF(type, member))) + +/* 不依据0,找结构体首地址 */ +#define MEM_OFF_NOT_NULL(type, member) \ + (ZXIC_COMM_PTR_TO_VAL(&(((type *)4)->member)) - \ + ZXIC_COMM_PTR_TO_VAL(((type *)4))) + +/* 为了消除原STRUCT_ENTRY_POINT中“直接解引用 NULL”的coverity */ +#define GET_STRUCT_ENTRY_POINT(ptr, type, member) \ + ((type *)(ZXIC_COMM_PTR_TO_VAL(ptr) - MEM_OFF_NOT_NULL(type, member))) + +#define DLINK_IS_FULL(p_dlink) ((p_dlink)->used == (p_dlink)->maxnum) + +ZXIC_RTN32 zxic_comm_double_link_sort(D_HEAD *p_head, CMP_FUNC cmp_fuc); +ZXIC_RTN32 zxic_comm_double_link_swap(D_NODE *p_pre, D_NODE *p_next); +ZXIC_RTN32 zxic_comm_double_link_test(ZXIC_VOID); +ZXIC_RTN32 zxic_comm_double_link_print(D_HEAD *p_head); +ZXIC_RTN32 zxic_comm_double_link_del_by_data(D_HEAD *p_head, + ZXIC_VOID *cmp_data, fun_free fun); +ZXIC_RTN32 zxic_comm_double_link_del_by_info(D_HEAD *p_head, void *cmp_data, + CMP_FUNC cmp_fuc, + ZXIC_UINT32 *p_deled_num); + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_doublelink_index.h b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_doublelink_index.h new file mode 100644 index 000000000000..35b471b2c70e --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_doublelink_index.h @@ -0,0 +1,147 @@ +/***************************************************************************** + * 版权所有 (C)2008-2010, 深圳市中兴通讯股份有限公司。 + * + * 文件名称:zxic_doublelink_index.h + * 文件标识: + * 内容摘要:zxic_doublelink_index.c 的头文件, 这里定义了.c文件需要用到的数据结构, + 并且申明了提供外部模块调用的接口函数 + * 其它说明: 其它内容的说明 + * 当前版本: + * 作 者:HuangHe(170389) Z T E 中兴 + * 完成日期:2009-02-16 + * 当前责任人-1:HuangHe Z T E 中兴 + * 当前责任人-2: + * 历史责任人-3: + * + * 修改记录: + * 修改日期: 版 本 号 修 改 人 修改内容 + 1 yyyymmdd V*.* 姓名工号 + 2 + *****************************************************************************/ +#ifndef __ZXIC_COMM_DOUBLELINK_INDEX_H__ +#define __ZXIC_COMM_DOUBLELINK_INDEX_H__ + +/************************** include head files *****************************/ + +/************************** type define *****************************/ +#define DOUBLELINK_CHECKSUM ((ZXIC_UINT32)(0xAABBBAAB)) + +/************************** const variables **************************/ +#define DOUBLELINK_INVALID_PREVIOUS (0x0) +#define DOUBLELINK_INVALID_NEXT (0x0) +#define DOUBLELINK_USED_FLAG ((ZXIC_UINT32)(0xffffffff)) +#define DOUBLELINK_LASTEST_ELEMENT ((ZXIC_UINT32)(0x0ffffffe)) + +/************************************************************************** + * double_link_index api * + **************************************************************************/ + +/** + * NAME: DLINK_NODE + * + * DESCRIPTION: Structure Node the information of the doublelink. + **/ +typedef struct { + ZXIC_UINT32 dw_next_node; /*后一个节点*/ + ZXIC_UINT32 dw_pre_node; /*前一个节点*/ + ZXIC_UINT32 dw_self_node; /*当前一个节点*/ + +} DLINK_NODE; + +/** + * NAME: FTMCOMM_DOUBLELINK_MANGER + * + * DESCRIPTION: Structure containing the information required by the + * implementation of the doublelink. +**/ + +typedef struct _FtmComm_DoubleLink_Manager { + /* + * p_array is a pointer to the array of elements used to track which + * indexes have been allocated. + */ + DLINK_NODE *p_array; + + /* + * numElements is the number of indexes managed by this instance of the + * index_pool. + */ + ZXIC_UINT32 capacity; + + /* + * currFreeElement stores a free element for where to alloc next free element. + * This helps prevent looping over a large sections of the array each time + * a new index is allocated. + */ + ZXIC_UINT32 free_num; + + ZXIC_UINT32 used_num; + + ZXIC_UINT32 first_used; + + ZXIC_UINT32 last_used; + + ZXIC_UINT32 first_free; + + ZXIC_UINT32 last_free; + /* + * offset is an adjustment value, allowing the caller to prevent certain + * indexes from being allocated. This value is only meaningful to the + * client, and does not affect how the indexes are managed within the + * doublelink. + */ + ZXIC_UINT32 offset; + + ZXIC_UINT32 check_sum; /*用来检查传入的地址是否是双链表管理结构地址*/ + +} ZXIC_DOUBLELINK_MANGER; + +ZXIC_RTN32 zxic_comm_dlink_manage_create(ZXIC_UINT32 dw_element_num, + ZXIC_UINT32 dw_offset, + ZXIC_DOUBLELINK_MANGER **p_dlink); + +ZXIC_RTN32 zxic_comm_dlink_alloc(ZXIC_DOUBLELINK_MANGER *p_dlink, + ZXIC_UINT32 *index); + +ZXIC_RTN32 zxic_comm_dlink_free(ZXIC_DOUBLELINK_MANGER *p_dlink, + ZXIC_UINT32 index); + +ZXIC_RTN32 zxic_comm_dlink_get_next(ZXIC_DOUBLELINK_MANGER *p_dlink, + ZXIC_UINT32 dw_index, + ZXIC_UINT32 *p_next_index); + +ZXIC_RTN32 zxic_comm_dlink_manage_clear(ZXIC_DOUBLELINK_MANGER *p_dlink); + +ZXIC_RTN32 zxic_comm_dlink_get_previous(ZXIC_DOUBLELINK_MANGER *p_dlink, + ZXIC_UINT32 dw_index, + ZXIC_UINT32 *p_pre_index); + +ZXIC_RTN32 zxic_comm_dlink_is_used(ZXIC_DOUBLELINK_MANGER *p_dlink, + ZXIC_UINT32 dw_index, ZXIC_UINT8 *p_is_used); + +ZXIC_RTN32 zxic_comm_dlink_first_free(ZXIC_DOUBLELINK_MANGER *p_dlink, + ZXIC_UINT32 *p_index); + +ZXIC_RTN32 zxic_comm_dlink_first_used(ZXIC_DOUBLELINK_MANGER *p_dlink, + ZXIC_UINT32 *p_index); + +ZXIC_RTN32 zxic_comm_dlink_manage_reset(ZXIC_DOUBLELINK_MANGER *p_dlink); + +ZXIC_RTN32 zxic_comm_dlink_get_curr_info(ZXIC_DOUBLELINK_MANGER *p_dlink, + ZXIC_UINT32 *p_free_num, + ZXIC_UINT32 *p_curr_free_index); + +ZXIC_RTN32 zxic_comm_dlink_last_used(ZXIC_DOUBLELINK_MANGER *p_dlink, + ZXIC_UINT32 *p_index); + +ZXIC_RTN32 zxic_comm_dlink_used_num(ZXIC_DOUBLELINK_MANGER *p_dlink, + ZXIC_UINT32 *p_num); + +ZXIC_RTN32 zxic_comm_dlink_show_node_info(ZXIC_DOUBLELINK_MANGER *p_dlink, + ZXIC_UINT32 dw_node_index); + +ZXIC_RTN32 zxic_comm_dlink_show_current_status(ZXIC_DOUBLELINK_MANGER *p_dlink); + +ZXIC_RTN32 zxic_comm_dlink_self_test(ZXIC_VOID); + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_index_ctrl.h b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_index_ctrl.h new file mode 100644 index 000000000000..a9ab4ab836ff --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_index_ctrl.h @@ -0,0 +1,83 @@ +/********************************************************************* +* 版权所有 (C)2001, 深圳市中兴通讯股份有限公司。 +* +* 文件名称: +* 文件标识: +* 内容摘要: +* 其它说明: +* +* +* 当前版本: +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT : 100% +* 作 者: +* 完成日期:2010-10-29 +********************************************************************/ +#ifndef _ZXIC_COMM_INDEX_CONTROLLER_H_ +#define _ZXIC_COMM_INDEX_CONTROLLER_H_ + +//#include "zxic_common.h" +//#include "zxic_comm_rb_tree.h" +//#include "zxic_comm_double_link.h" + +#define ZXIC_INDEX_EXPAND_MAX_NUM (900) +#define INDEX_KEY_LENGTH (4) +typedef struct zxic_index_ctrl_cfg { + ZXIC_UINT32 index_cursor_current; + ZXIC_UINT32 index_cursor_last; + ZXIC_UINT32 index_cursor_max_cur; + ZXIC_UINT32 index_ctrl_is_init; + ZXIC_UINT32 *p_index_buf; + + ZXIC_RB_CFG index_ctrl_rb_tree; + ZXIC_RB_CFG rcd_ctrl_rb_tree; + + D_HEAD *p_index_ctrl_link; +} ZXIC_INDEX_CTRL_CFG; + +typedef struct _zxic_index_api_params { + ZXIC_UINT32 zxic_expand_num; /*the expand num of this item */ + ZXIC_UINT32 zxic_opera_mode; /*0:add;1:del;2:sch */ + ZXIC_UINT32 zxic_rsp_isexit; /*the rsp of whether is exit */ + ZXIC_UINT32 *p_zxic_out_index; /*the address of response */ + ZXIC_VOID *p_zxic_data; /*the data of inserting in tcam*/ + +} ZXIC_INDEX_API_PARAMS; + +typedef enum functionNo /* 接口提供的表操作类型 */ +{ INDEX_CTRL_ADD, + INDEX_CTRL_ADD_FROM_LAST, + INDEX_CTRL_DEL, + INDEX_CTRL_SEARCH, + INDEX_CTRL_UNDEFINED } FUNCTION_NO; + +ZXIC_RTN32 zxic_comm_indexctrl_get_free_index(ZXIC_INDEX_CTRL_CFG *p_table_info, + ZXIC_UINT32 func_type, + ZXIC_UINT32 *p_free_index_num); +ZXIC_RTN32 zxic_comm_indexctrl_add_from_last(ZXIC_INDEX_CTRL_CFG *p_table_info, + ZXIC_VOID *data, + ZXIC_UINT32 expand_num, + ZXIC_UINT32 *out_index); +ZXIC_RTN32 zxic_comm_indexctrl_sch(ZXIC_INDEX_CTRL_CFG *p_table_info, + ZXIC_VOID *data, ZXIC_UINT32 *p_is_exit, + ZXIC_UINT32 *out_index); +ZXIC_RTN32 +zxic_comm_indexctrl_extcommand(ZXIC_INDEX_API_PARAMS *p_zxic_api_params, + ZXIC_INDEX_CTRL_CFG *p_table_info); +ZXIC_RTN32 zxic_comm_indexctrl_add(ZXIC_INDEX_CTRL_CFG *p_table_info, + ZXIC_VOID *data, ZXIC_UINT32 expand_num, + ZXIC_UINT32 *out_index); +ZXIC_RTN32 zxic_comm_indexctrl_init(ZXIC_INDEX_CTRL_CFG *p_table_info, + ZXIC_UINT32 index_max_num, + ZXIC_UINT32 table_key_len); +ZXIC_RTN32 +zxic_comm_indexctrl_getindex_from_last(ZXIC_INDEX_CTRL_CFG *p_table_info, + ZXIC_UINT32 *p_index_out); +ZXIC_RTN32 zxic_comm_indexctrl_del(ZXIC_INDEX_CTRL_CFG *p_table_info, + ZXIC_VOID *data, ZXIC_UINT32 *out_index); +ZXIC_RTN32 zxic_comm_indexctrl_getindex(ZXIC_INDEX_CTRL_CFG *p_table_info, + ZXIC_UINT32 *p_index_out); +ZXIC_SINT32 zxic_comm_indexctrl_cmp_key(ZXIC_VOID *new_key, ZXIC_VOID *old_key, + ZXIC_UINT32 key_len); + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_index_fill.h b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_index_fill.h new file mode 100644 index 000000000000..3723485e7eb5 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_index_fill.h @@ -0,0 +1,65 @@ +/***************************************************************************** + * 版权所有 (C)2001-2015, 深圳市中兴通讯股份有限公司。 + * + * 文件名称: zxic_index_fill.h + * 文件标识: + * 内容摘要: 索引空位填充源代码头文件 + * 其它说明: + * 当前版本: + * 作 者: ChenWei10088471 + * 完成日期: + * 当前责任人-1: + * 当前责任人-2: + * + * DEPARTMENT : ASIC_FPGA_R&D_Dept + * MANUAL_PERCENT : 100% + *****************************************************************************/ + +#ifndef _ZXIC_COMM_INDEX_FILL_H +#define _ZXIC_COMM_INDEX_FILL_H + +typedef ZXIC_RTN32 (*INDEXFILL_SWAP_FUNC)(ZXIC_UINT32 old_index, + ZXIC_UINT32 new_index, + ZXIC_VOID *p_cfg); + +typedef struct { + ZXIC_RB_TN rb_node; + ZXIC_UINT32 position; + ZXIC_UINT32 usednum; +} INDEX_FILL_NODE; + +typedef struct { + ZXIC_RB_CFG fill_rb; + ZXIC_UINT32 index_num; + INDEX_FILL_NODE *fill_node; + INDEXFILL_SWAP_FUNC swap_fun; + ZXIC_UINT32 total_used; +} INDEX_FILL_CFG; + +ZXIC_RTN32 ic_comm_node_data_free(void *p_data); + +ZXIC_RTN32 zxic_comm_indexfill_init(INDEX_FILL_CFG *p_fill_cfg, + ZXIC_UINT32 index_num, + ZXIC_KEY_CMP_FUNC p_cmp_fun, + INDEXFILL_SWAP_FUNC p_swap_fun, + ZXIC_UINT32 key_len); + +ZXIC_RTN32 zxic_comm_indexfill_free(INDEX_FILL_CFG *p_fill_cfg, + ZXIC_UINT32 free_index, ZXIC_VOID *p_rb_key, + ZXIC_UINT32 *out_index); + +ZXIC_RTN32 zxic_comm_indexfill_destroy(INDEX_FILL_CFG *p_fill_cfg); + +ZXIC_RTN32 zxic_comm_indexfill_store(INDEX_FILL_CFG *p_fill_cfg, + ZXIC_UINT32 *p_size, + ZXIC_UINT8 **p_data_buff); + +ZXIC_RTN32 zxic_comm_indexfill_show_all_position(INDEX_FILL_CFG *p_fill_cfg); +ZXIC_RTN32 zxic_comm_indexfill_clear(INDEX_FILL_CFG *p_fill_cfg); + +#define ICMINF_GET_NODE_LASTPOS(p_inf_node) \ + ((p_inf_node)->position + (p_inf_node)->usednum - 1) + +#define ICMINF_GET_NODE_FSTPOS(p_inf_node) ((p_inf_node)->position) + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_index_fill_type.h b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_index_fill_type.h new file mode 100644 index 000000000000..9a1e89044f11 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_index_fill_type.h @@ -0,0 +1,90 @@ +/***************************************************************************** + * 版权所有 (C)2001-2015, 深圳市中兴通讯股份有限公司。 + * + * 文件名称: zxic_index_fill.h + * 文件标识: + * 内容摘要: 索引空位填充源代码头文件 + * 其它说明: + * 当前版本: + * 作 者: ChenWei10088471 + * 完成日期: + * 当前责任人-1: + * 当前责任人-2: + * + * DEPARTMENT : ASIC_FPGA_R&D_Dept + * MANUAL_PERCENT : 100% + *****************************************************************************/ + +#ifndef _ZXIC_COMM_INDEX_FILL_TYPE_H +#define _ZXIC_COMM_INDEX_FILL_TYPE_H + +typedef ZXIC_RTN32 (*INDEXFILL_TYPE_SWAP_FUNC)(ZXIC_UINT32 old_index, + ZXIC_UINT32 new_index, + ZXIC_VOID *p_cfg); + +typedef struct { + ZXIC_UINT32 is_used; /* 0空闲,1已分配 */ + ZXIC_UINT32 prio; +} INDEX_FILL_TYPE_INDEX_STATUS; /* is_used==0时,不关心prio */ + +typedef struct { + ZXIC_RB_TN prio_rb_node; /* prio_rb_tree中的节点 */ + ZXIC_RB_CFG + idx_rb_cfg; /* 以index为key,详细记录每个prio的每个index信息 */ + ZXIC_UINT32 prio; /* prio值*/ +} INDEX_FILL_TYPE_PRIO_NODE; + +typedef struct { + ZXIC_UINT32 prio; +} SSP4_INDEX_FILL_TYPE_PRIO_RB_KEY; /* prio红黑树的key */ + +typedef struct { + ZXIC_RB_CFG prio_rb; /* 每type的prio红黑树,以prio为key */ + D_HEAD mv_list_head; /* 同type的移位链表 */ + ZXIC_VOID *p_cfg; /* type相关的其他参数定义,外部自行定义 */ +} INDEX_FILL_TYPE_MNG_CFG; + +typedef struct { + ZXIC_UINT32 index_num; /* 本池的索引数量 */ + ZXIC_UINT32 total_used; /* 本池已使用的索引数量 */ + ZXIC_UINT32 prio_max; /* 最大优先级范围 */ + ZXIC_UINT32 global_max_num; /* 全局最大数量*/ + INDEX_FILL_TYPE_INDEX_STATUS + *p_idx_status; /* 全局索引池状态位记录数组指针 */ + INDEXFILL_TYPE_SWAP_FUNC swap_fun; /* 向前或者向后挤压时的移位操作函数 */ +} INDEX_FILL_TYPE_INDEX_POOL_CFG; /* 多种type共享的索引池 */ + +ZXIC_UINT32 zxic_comm_indexfill_type_idx_status_get( + INDEX_FILL_TYPE_INDEX_STATUS *index_status, ZXIC_UINT32 index, + ZXIC_UINT32 *used_status_flag, ZXIC_UINT32 *used_status_prio); + +ZXIC_UINT32 zxic_comm_indexfill_type_idx_status_set( + INDEX_FILL_TYPE_INDEX_STATUS *index_status, ZXIC_UINT32 index, + ZXIC_UINT32 prio, ZXIC_UINT32 used_flag); + +ZXIC_RTN32 zxic_comm_indexfill_type_init( + INDEX_FILL_TYPE_INDEX_POOL_CFG *p_fill_type_index_pool_cfg, + ZXIC_UINT32 index_num, ZXIC_UINT32 prio_max, ZXIC_UINT32 global_max_num, + INDEXFILL_TYPE_SWAP_FUNC p_swap_fun); + +ZXIC_RTN32 +zxic_comm_indexfill_type_rb_init(INDEX_FILL_TYPE_MNG_CFG *p_fill_type_mng_cfg); + +/* 多张type共享的索引记录 */ +/* 每种type,一个prio红黑树管理结构,一个prio+index红黑树管理结构 */ +ZXIC_RTN32 zxic_comm_indexfill_type_alloc( + INDEX_FILL_TYPE_INDEX_POOL_CFG *p_fill_type_index_pool_cfg, + INDEX_FILL_TYPE_MNG_CFG *p_fill_type_mng_cfg, ZXIC_UINT32 prio, + ZXIC_UINT32 *out_index); +/* 多张type共享的索引记录 */ +/* 每种type,一个prio红黑树管理结构,一个prio+index红黑树管理结构 */ +ZXIC_RTN32 zxic_comm_indexfill_type_free( + INDEX_FILL_TYPE_INDEX_POOL_CFG *p_fill_type_index_pool_cfg, + INDEX_FILL_TYPE_MNG_CFG *p_fill_type_mng_cfg, ZXIC_UINT32 free_index, + ZXIC_UINT32 *out_index); + +ZXIC_RTN32 zxic_comm_indexfill_type_show_all_position( + INDEX_FILL_TYPE_INDEX_POOL_CFG *p_fill_type_index_pool_cfg, + INDEX_FILL_TYPE_MNG_CFG *p_fill_type_mng_cfg); + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_index_reserve.h b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_index_reserve.h new file mode 100644 index 000000000000..39b437854adf --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_index_reserve.h @@ -0,0 +1,79 @@ +/***************************************************************************** + * 版权所有 (C)2001-2015, 深圳市中兴通讯股份有限公司。 + * + * 文件名称: zxic_index_reserve.h + * 文件标识: + * 内容摘要: 索引预留算法源代码头文件 + * 其它说明: + * 当前版本: + * 作 者: ChenWei10088471 + * 完成日期: + * 当前责任人-1: + * 当前责任人-2: + * + * DEPARTMENT : ASIC_FPGA_R&D_Dept + * MANUAL_PERCENT : 100% + *****************************************************************************/ +#ifndef _ZXIC_COMM_INDEX_RESERVE_H +#define _ZXIC_COMM_INDEX_RESERVE_H + +#define CMP_MODE_LOW (0) +#define CMP_MODE_HIGH (1) +typedef ZXIC_UINT32 (*SWAP_FUNC)(ZXIC_UINT32 old_index, ZXIC_UINT32 new_index); +typedef ZXIC_UINT32 (*LOCAL_SWAP_FUNC)(ZXIC_VOID *p_cfg, ZXIC_UINT32 old_index, + ZXIC_UINT32 new_index); + +typedef struct { + ZXIC_UINT32 head_curr; + ZXIC_UINT32 tail_curr; +} INDEX_CURR; + +typedef struct { + ZXIC_UINT32 old_handle; + ZXIC_UINT32 new_handle; +} INR_SWAP_NODE; + +typedef struct _index_res_cfg { + ZXIC_UINT32 total_num; + ZXIC_UINT32 space_num; + ZXIC_UINT32 *index_prop; + ZXIC_RB_CFG *index_usedrb; + ZXIC_RB_CFG *index_freerb; + ZXIC_RB_TN *index_node; + SWAP_FUNC swap_fun; + LOCAL_SWAP_FUNC local_fun; + INDEX_CURR *index_curr; + D_HEAD swap_list; + ZXIC_UINT32 total_used; + ZXIC_UINT32 is_init; + ZXIC_UINT32 indexres_id; +} INDEX_RES_CFG; +ZXIC_VOID zxic_comm_rb_tn_relation_clear(ZXIC_RB_TN *rb_tn_node); + +ZXIC_RTN32 zxic_comm_indexres_init( + INDEX_RES_CFG *p_indexres_cfg, /*配置句柄*/ + ZXIC_UINT32 arg_total_num, /*索引总数*/ + ZXIC_UINT32 arg_space_num, /*空间总数*/ + ZXIC_UINT32 * + arg_index_prop, /*空间大小,若用户提供,则按用户提供的进行空间分配,否则平均分配所有空间*/ + SWAP_FUNC p_swap_fun, + LOCAL_SWAP_FUNC local_fun); /*重排函数,注册则调用,否则不调*/ + +ZXIC_RTN32 +zxic_comm_indexres_alloc(INDEX_RES_CFG *p_indexres_cfg, /*配置句柄*/ + ZXIC_UINT32 space_val, /*空间序列*/ + ZXIC_UINT32 *out_index); /*出参,分配的索引*/ + +ZXIC_RTN32 zxic_comm_indexres_free(INDEX_RES_CFG *p_indexres_cfg, + ZXIC_UINT32 space_val, + ZXIC_UINT32 free_index); + +ZXIC_RTN32 zxic_comm_indexres_destory(INDEX_RES_CFG *p_indexres_cfg); + +ZXIC_RTN32 zxic_comm_indexres_reset(INDEX_RES_CFG *p_indexres_cfg); + +ZXIC_RTN32 zxic_comm_indexres_showinfo( + INDEX_RES_CFG * + p_indexres_cfg); /*重置整个索引空间,恢复到最初的配置状态,注意,此时所有的索引都需要在未使用状态*/ + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_liststack.h b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_liststack.h new file mode 100644 index 000000000000..c9cbd687c882 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_liststack.h @@ -0,0 +1,88 @@ +/***************************************************************************** + * 版权所有 (C)2001-2005, 深圳市中兴通讯股份有限公司。 + * + * 文件名称: Rtm_ListStack.h + * 文件标识: + * 内容摘要: 链表栈管理头文件 + * 其它说明: 释放的元素放到队头 + * 当前版本: ZXR10 V2.6 + * 作 者: 郑纪伟 + * 完成日期: 2006-9-27 10:34 + * 当前责任人-1: + * 当前责任人-2: + * + * 修改记录1: + * 修改日期:2006-9-27 10:34 + * 版 本 号:ZXR10 V2.6 + * 修 改 人:郑纪伟 + * 修改内容:创建 + * + *修改记录2: + * 修改文件名称:ftmcomm_liststack.h + * 修改日期:2008-10-16 15:14 + * 版 本 号:ZXR10 V2.6 + * 修 改 人:HuangHe 170389 + * 修改内容:移植到T8000项目使用 + *修改记录3: + * 修改文件名称:zxic_liststack.h + * 修改日期:2012-03-15 15:14 + * 版 本 号:ZXR10 V2.6 + * 修 改 人:JiangWenming 12010401 + * 修改内容:移植到NSE项目使用 + * + *****************************************************************************/ +#ifndef __ZXIC_COMM_LIST_STACK_H__ +#define __ZXIC_COMM_LIST_STACK_H__ + +/************************************************************************** + * 宏定义 * + **************************************************************************/ + +#define LISTSTACK_MAX_ELEMENT ((ZXIC_UINT32)(0x0ffffffe)) +#define LISTSTACK_INVALID_INDEX (0) +#define ALLOC_NUMBER (0x3) +/************************************************************************** + * liststack api * + **************************************************************************/ + +typedef struct _s_freelink { + ZXIC_UINT32 index; + ZXIC_UINT32 next; +} ZXIC_COMM_FREELINK; + +typedef struct _s_List_Stack_Manager { + ZXIC_COMM_FREELINK *p_array; + + ZXIC_UINT32 capacity; + + ZXIC_UINT32 p_head; + + ZXIC_UINT32 free_num; + ZXIC_UINT32 used_num; + +} ZXIC_LISTSTACK_MANGER; + +/* +**zxic_comm_liststack_creat: +*/ + +ZXIC_RTN32 zxic_comm_liststack_creat(ZXIC_UINT32 element_num, + ZXIC_LISTSTACK_MANGER **p_list); + +/* +**NOTE:index allocated from 0: +*/ +ZXIC_RTN32 zxic_comm_liststack_alloc(ZXIC_LISTSTACK_MANGER *p_list, + ZXIC_UINT32 *index); +ZXIC_RTN32 zxic_comm_liststack_free(ZXIC_LISTSTACK_MANGER *p_list, + ZXIC_UINT32 index); +ZXIC_RTN32 zxic_comm_liststack_destroy(ZXIC_LISTSTACK_MANGER *p_list); +ZXIC_RTN32 zxic_comm_liststack_alloc_spec_index(ZXIC_LISTSTACK_MANGER *p_list, + ZXIC_UINT32 index); + +ZXIC_RTN32 zxic_comm_liststack_show_used(ZXIC_LISTSTACK_MANGER *p_list, + ZXIC_UINT32 line_number); +ZXIC_RTN32 zxic_comm_liststack_show_free(ZXIC_LISTSTACK_MANGER *p_list, + ZXIC_UINT32 line_number); + +#endif /* end "_FTMCOMM_LIST_STACK_H" */ diff --git a/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_rb_tree.h b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_rb_tree.h new file mode 100644 index 000000000000..2e2d3c1a677e --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_rb_tree.h @@ -0,0 +1,117 @@ +/***************************************************************************** + * 版权所有 (C)2001-2015, 深圳市中兴通讯股份有限公司。 + * + * 文件名称: zxic_comm_rb_tree.h + * 文件标识: + * 内容摘要: + * 其它说明: + * 当前版本: + * 作 者: ChenWei10088471 + * 完成日期: + * 当前责任人-1: + * 当前责任人-2: + * + * DEPARTMENT : ASIC_FPGA_R&D_Dept + * MANUAL_PERCENT : 100% + *****************************************************************************/ + +#ifndef _ZXIC_COMM_RB_TREE_H +#define _ZXIC_COMM_RB_TREE_H + +#include "zxic_comm_double_link.h" +#include "zxic_comm_liststack.h" + +#define ZXIC_RBT_RED (0x1) +#define ZXIC_RBT_BLACK (0x2) +#define ZXIC_RBT_MAX_DEPTH (96) + +typedef ZXIC_SINT32 (*ZXIC_RB_CMPFUN)(ZXIC_VOID *p_new, ZXIC_VOID *p_old, + ZXIC_UINT32 keysize); + +typedef struct _rb_tn { + ZXIC_VOID *p_key; + ZXIC_UINT32 + color_lsv; /*last 2 bits indicate color, bit2-31 if dynamic=0 indicate list val*/ + struct _rb_tn *p_left; + struct _rb_tn *p_right; + struct _rb_tn *p_parent; + D_NODE tn_ln; +} ZXIC_RB_TN; + +typedef struct _rb_cfg { + ZXIC_UINT32 key_size; + ZXIC_UINT32 + is_dynamic; /* 1 - customer manage memory;0 - alloc all memory*/ + ZXIC_RB_TN *p_root; /* rb tree root node */ + D_HEAD tn_list; + ZXIC_RB_CMPFUN p_cmpfun; + ZXIC_LISTSTACK_MANGER *p_lsm; /* list stack manage*/ + ZXIC_UINT8 *p_keybase; + ZXIC_RB_TN *p_tnbase; + ZXIC_UINT32 is_init; +} ZXIC_RB_CFG; + +#define GET_TN_COLOR(p_tn) \ + ((p_tn == NULL) ? ZXIC_RBT_BLACK : (p_tn)->color_lsv & 0x3) + +#define SET_TN_COLOR(p_tn, color) \ + do { \ + (p_tn)->color_lsv &= 0xfffffffc; \ + (p_tn)->color_lsv |= (color & 0x3); \ + } while (0) + +#define GET_TN_LSV(p_tn) ((p_tn)->color_lsv >> 2) + +#define SET_TN_LSV(p_tn, list_val) \ + do { \ + (p_tn)->color_lsv &= 0x3; \ + (p_tn)->color_lsv |= ((list_val) << 2); \ + } while (0) + +/*init the rb node ,be careful init_color is red*/ +#define INIT_RBT_TN(p_tn, p_newkey) \ + do { \ + (p_tn)->p_key = p_newkey; \ + (p_tn)->color_lsv = 0; \ + (p_tn)->p_left = NULL; \ + (p_tn)->p_right = NULL; \ + (p_tn)->p_parent = NULL; \ + INIT_D_NODE(&((p_tn)->tn_ln), (p_tn)); \ + } while (0) + +ZXIC_RTN32 zxic_comm_rb_init(ZXIC_RB_CFG *p_rb_cfg, ZXIC_UINT32 total_num, + ZXIC_UINT32 key_size, ZXIC_RB_CMPFUN cmpfun); + +ZXIC_RTN32 zxic_comm_rb_insert(ZXIC_RB_CFG *p_rb_cfg, ZXIC_VOID *p_key, + ZXIC_VOID *out_val); + +ZXIC_RTN32 zxic_comm_rb_delete(ZXIC_RB_CFG *p_rb_cfg, ZXIC_VOID *p_key, + ZXIC_VOID *out_val); + +ZXIC_RTN32 zxic_comm_rb_search(ZXIC_RB_CFG *p_rb_cfg, ZXIC_VOID *p_key, + ZXIC_VOID *out_val); + +ZXIC_RTN32 zxic_comm_rb_destroy(ZXIC_RB_CFG *p_rb_cfg); + +ZXIC_RB_TN *zxic_comm_rb_get_1st_tn(ZXIC_RB_CFG *p_rb_cfg); + +ZXIC_RB_TN *zxic_comm_rb_get_last_tn(ZXIC_RB_CFG *p_rb_cfg); + +ZXIC_RTN32 zxic_comm_rb_get_1st_key(ZXIC_RB_CFG *p_rb_cfg, + ZXIC_VOID *p_key_out); + +ZXIC_RTN32 zxic_comm_rb_get_last_key(ZXIC_RB_CFG *p_rb_cfg, + ZXIC_VOID *p_key_out); + +ZXIC_RTN32 zxic_comm_rb_insert_spec_index(ZXIC_RB_CFG *p_rb_cfg, + ZXIC_VOID *p_key, ZXIC_UINT32 in_idx); + +#define ZXIC_RBT_RC_BASE (0x1000) + +#define ZXIC_RBT_RC_UPDATE (ZXIC_RBT_RC_BASE | 0x1) +#define ZXIC_RBT_RC_SRHFAIL (ZXIC_RBT_RC_BASE | 0x2) +#define ZXIC_RBT_RC_FULL (ZXIC_RBT_RC_BASE | 0x3) +#define ZXIC_RBT_ISEMPTY_ERR (ZXIC_RBT_RC_BASE | 0x4) +#define ZXIC_RBT_PARA_INVALID (ZXIC_RBT_RC_BASE | 0x5) + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_socket.h b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_socket.h new file mode 100644 index 000000000000..7d0ff8ba46b5 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_socket.h @@ -0,0 +1,145 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : zxic_comm_socket.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : 王春雷 +* 完成日期 : 2014/02/08 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef _ZXIC_COMM_SOCKET_H_ +#define _ZXIC_COMM_SOCKET_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef ZXIC_OS_WIN +#include +typedef SOCKET ZXIC_SOCKET; + +#else +#include +#include +#include +#include +typedef ZXIC_SINT32 ZXIC_SOCKET; +#endif + +typedef struct sockaddr SOCKADDR; +typedef struct sockaddr_in SOCKADDR_IN; + +#define ZXIC_SOCK_VALID (0) +#define ZXIC_SOCK_INVALID (-1) +#define ZXIC_SOCK_NUM_MAX (16) + +#define ZXIC_SOCK_INADDR_ANY (0x00000000) +#define ZXIC_SOCK_INADDR_LOOPBACK (0x7f000001) +#define ZXIC_SOCK_INADDR_BROADCAST (0xffffffff) +#define ZXIC_SOCK_INADDR_NONE (0xffffffff) + +/* socket domain */ +#define ZXIC_SOCK_AF_INET AF_INET /* internetwork: UDP, TCP, etc. */ +#define ZXIC_SOCK_AF_INET6 AF_INET6 /* Internetwork Version 6 */ + +/* socket type */ +#define ZXIC_SOCK_STREAM SOCK_STREAM /* stream socket */ +#define ZXIC_SOCK_DGRAM SOCK_DGRAM /* datagram socket */ +#define ZXIC_SOCK_RAW SOCK_RAW /* raw-protocol interface */ +#define ZXIC_SOCK_RDM SOCK_RDM /* reliably-delivered message */ +#define ZXIC_SOCK_SEQPACKET SOCK_SEQPACKET /* sequenced packet stream */ + +/* socket protocol */ +#define ZXIC_SOCK_IPPROTO_IP IPPROTO_IP /* dummy for IP */ +#define ZXIC_SOCK_IPPROTO_TCP IPPROTO_TCP /* tcp */ +#define ZXIC_SOCK_IPPROTO_UDP IPPROTO_UDP /* user datagram protocol */ + +/* socket level */ +#define ZXIC_SOCK_SOL_SOCKET SOL_SOCKET /* options for socket level */ + +/* socket OptName */ +#define ZXIC_SOCK_SO_DEBUG SO_DEBUG /* turn on debugging info recording */ +#define ZXIC_SOCK_SO_ACCEPTCONN SO_ACCEPTCONN /* socket has had listen() */ +#define ZXIC_SOCK_SO_REUSEADDR SO_REUSEADDR /* allow local address reuse */ +#define ZXIC_SOCK_SO_KEEPALIVE SO_KEEPALIVE /* keep connections alive */ +#define ZXIC_SOCK_SO_DONTROUTE SO_DONTROUTE /* just use interface addresses */ +#define ZXIC_SOCK_SO_BROADCAST \ + SO_BROADCAST /* permit sending of broadcast msgs */ +#define ZXIC_SOCK_SO_USELOOPBACK \ + SO_USELOOPBACK /* bypass hardware when possible */ +#define ZXIC_SOCK_SO_LINGER SO_LINGER /* linger on close if data present */ +#define ZXIC_SOCK_SO_OOBINLINE \ + SO_OOBINLINE /* leave received OOB data in line */ +#define ZXIC_SOCK_SO_SNDBUF SO_SNDBUF /* send buffer size */ +#define ZXIC_SOCK_SO_RCVBUF SO_RCVBUF /* receive buffer size */ +#define ZXIC_SOCK_SO_SNDLOWAT SO_SNDLOWAT /* send low-water mark */ +#define ZXIC_SOCK_SO_RCVLOWAT SO_RCVLOWAT /* receive low-water mark */ +#define ZXIC_SOCK_SO_SNDTIMEO SO_SNDTIMEO /* send timeout */ +#define ZXIC_SOCK_SO_RCVTIMEO SO_RCVTIMEO /* receive timeout */ +#define ZXIC_SOCK_SO_ERROR SO_ERROR /* get error status and clear */ +#define ZXIC_SOCK_SO_TYPE SO_TYPE /* get socket type */ + +#define ZXIC_TCP_OP_NODELAY TCP_NODELAY + +typedef struct zxic_comm_sock_addr_t { + ZXIC_UINT32 family; + ZXIC_UINT32 port; + ZXIC_UINT32 addr; +} ZXIC_SOCK_ADDR_T; + +typedef struct zxic_comm_sock_mgr_t { + ZXIC_UINT32 is_init; + ZXIC_UINT32 count; + ZXIC_SOCKET socks[ZXIC_SOCK_NUM_MAX]; + ZXIC_UINT8 sock_vld[ZXIC_SOCK_NUM_MAX]; + ZXIC_MUTEX_T mutex; +} ZXIC_SOCK_MGR_T; + +/* API */ +ZXIC_RTN32 zxic_comm_sock_init(ZXIC_VOID); +ZXIC_RTN32 zxic_comm_sock_service_start(ZXIC_VOID); +ZXIC_RTN32 zxic_comm_sock_service_close(ZXIC_VOID); +ZXIC_RTN32 zxic_comm_sock_create(ZXIC_SOCKET *p_socket, ZXIC_SINT32 domain, + ZXIC_SINT32 type, ZXIC_SINT32 protocol); + +ZXIC_RTN32 zxic_comm_sock_set_opt(ZXIC_SOCKET sock, ZXIC_SINT32 level, + ZXIC_SINT32 opt_name, ZXIC_VOID *p_opt_val, + ZXIC_UINT32 opt_len); + +ZXIC_RTN32 zxic_comm_sock_get_opt(ZXIC_SOCKET sock, ZXIC_SINT32 level, + ZXIC_SINT32 opt_name, ZXIC_VOID *p_opt_val, + ZXIC_UINT32 *p_opt_len); + +ZXIC_RTN32 zxic_comm_sock_bind_listen(ZXIC_SOCKET sock, + ZXIC_SOCK_ADDR_T *p_sock_addr); + +ZXIC_RTN32 zxic_comm_sock_accpet(ZXIC_SOCKET listen_sock, + ZXIC_SOCKET *p_cnnt_sock, + ZXIC_SOCK_ADDR_T *p_sock_addr); + +ZXIC_RTN32 zxic_comm_sock_connect(ZXIC_SOCKET sock, + ZXIC_SOCK_ADDR_T *p_sock_addr); + +ZXIC_SINT32 zxic_comm_sock_send(ZXIC_SOCKET sock, ZXIC_CHAR *p_buf, + ZXIC_SINT32 len, ZXIC_SINT32 flag); + +ZXIC_SINT32 zxic_comm_sock_recv(ZXIC_SOCKET sock, ZXIC_CHAR *p_buf, + ZXIC_SINT32 len, ZXIC_SINT32 flag); + +ZXIC_RTN32 zxic_comm_sock_close(ZXIC_SOCKET sock); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_thread.h b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_thread.h new file mode 100644 index 000000000000..7b987c91b2ce --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_comm_thread.h @@ -0,0 +1,89 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : zxic_comm_thread.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : 王春雷 +* 完成日期 : 2014/02/08 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef _ZXIC_COMM_THREAD_H_ +#define _ZXIC_COMM_THREAD_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef ZXIC_OS_LINUX +#include +#include +#endif + +#define THREAD_NAME_MAX (64) +#define ZXIC_THREAD_TIME_INFINITE (0xFFFFFFFF) /* Infinite timeout */ + +typedef ZXIC_VOID *(*ZXIC_THREAD_FUNC)(ZXIC_VOID *); + +/* Thread ID */ +typedef struct zxic_comm_thread_id_t { +#ifdef ZXIC_OS_WIN + HANDLE id; +#else + //pthread_t id; + int id; +#endif +} ZXIC_THREAD_ID_T; + +/* Thread CreateFlag */ +#define ZXIC_THREAD_FLAG_DETACH (1 << 0) +#define ZXIC_THREAD_FLAG_EXPLICIT_SCHED (1 << 1) + +typedef struct zxic_comm_thread_info_t { + ZXIC_CHAR name[THREAD_NAME_MAX]; /* 线程名 */ + ZXIC_UINT32 priority; /* 优先级 */ + ZXIC_UINT32 stack_size; /* 初始栈大小,以字节为单位 */ + ZXIC_UINT32 create_flag; /* 线程标志 */ + ZXIC_THREAD_ID_T id; /* 线程ID */ + ZXIC_THREAD_FUNC thread_func; /* 线程函数 */ + ZXIC_VOID *p_arg; /* 线程入参 */ + ZXIC_UINT32 is_valid; /* 是否有效 */ +} ZXIC_THREAD_INFO_T; + +/* API */ +ZXIC_RTN32 zxic_comm_thread_info_init(ZXIC_VOID); +ZXIC_RTN32 +zxic_comm_thread_info_add(ZXIC_THREAD_ID_T *p_thread_id, + ZXIC_CONST ZXIC_CHAR *p_name, ZXIC_UINT32 priority, + ZXIC_UINT32 stack_size, ZXIC_UINT32 create_flag, + ZXIC_THREAD_FUNC p_thread_func, ZXIC_VOID *p_arg, + ZXIC_UINT32 *p_info_index); +ZXIC_RTN32 zxic_comm_thread_info_del(ZXIC_THREAD_ID_T *p_thread_id); +ZXIC_RTN32 zxic_comm_thread_info_print(ZXIC_VOID); + +ZXIC_RTN32 zxic_comm_thread_create(ZXIC_CONST ZXIC_CHAR *p_name, + ZXIC_UINT32 priority, ZXIC_UINT32 stack_size, + ZXIC_UINT32 create_flag, + ZXIC_THREAD_FUNC thread_func, + ZXIC_VOID *p_arg, + ZXIC_THREAD_ID_T *p_thread_id); + +ZXIC_RTN32 zxic_comm_thread_exit(ZXIC_VOID); +ZXIC_RTN32 zxic_comm_thread_wait(ZXIC_THREAD_ID_T *p_thread_id, + ZXIC_DWORD wait_time); +ZXIC_RTN32 zxic_comm_thread_close_handle(ZXIC_THREAD_ID_T *p_thread_id); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_common.h b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_common.h new file mode 100644 index 000000000000..a6513863201a --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_common.h @@ -0,0 +1,3548 @@ +/************************************************************** +* 版权所有 (C)2013-2020, 深圳市中兴通讯股份有限公司 +* 文件名称 : zxic_common.h +* 文件标识 : +* 内容摘要 : 大部分的项目,只需要感知这一个头文件即可, + 其包括了变量定义/打印/日志/参数校验/互斥锁/bit流拼接等常用定义和功能 +* 其它说明 : 编译宏:ZXIC_OS_WIN/ZXIC_RELEASE/MACRO_CPU64 +* 当前版本 : +* 作 者 : +* 完成日期 : 2020/07/20 +* DEPARTMENT: 有线开发四部-系统软件团队 +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ +#ifndef __ZXIC_COMMON_H__ +#define __ZXIC_COMMON_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +/* +编译宏说明 +ZXIC_OS_WIN :若不导入该宏,则默认为 ZXIC_OS_LINUX +ZXIC_RELEASE:若不导入该宏,则默认为 ZXIC_DEBUG +MACRO_CPU64 :若不导入该宏,则默认为32位操作系统 +*/ + +#include "zxic_private_top.h" +#include "zxic_private.h" +#include +#include +#include +#include + +#ifdef ZXIC_OS_WIN /* 编译宏导入 */ +#include +#include +#include +#include +#pragma warning(disable : 4996) +#else +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif + +#if ZXIC_REAL("数据类型定义") +/* 无符号整数 */ +#define ZXIC_UINT8 unsigned char +#define ZXIC_UINT16 unsigned short + +#ifndef ZXIC_UINT32 +#define ZXIC_UINT32 unsigned int +#endif + +#ifndef ZXIC_UINT64 +#define ZXIC_UINT64 unsigned long long +#endif +#define ZXIC_DWORD \ + unsigned long /* 慎用,注意:WIN系统:32bits LINUX系统:64bits */ +#define ZXIC_SIZE_T size_t /* 32位系统:uint32 64位系统:uint64 */ + +/* 有符号整数 */ +#define ZXIC_CHAR \ + char /* char默认为unsigned还是signed取决于编译器,定义字符可用 */ +#define ZXIC_SINT8 signed char +#define ZXIC_SINT16 signed short + +#ifndef ZXIC_SINT32 +#define ZXIC_SINT32 signed int +#endif + +#ifndef ZXIC_SINT64 +#define ZXIC_SINT64 signed long long +#endif + +#define ZXIC_SDWORD long + +/* 浮点数 */ +//#define ZXIC_FLOAT float +//#define ZXIC_DOUBLE double +//#define ZXIC_LDOUBLE long double +#define ZXIC_FLOAT int +#define ZXIC_DOUBLE long +#define ZXIC_LDOUBLE long long + +/* 文件类型 */ +typedef struct file ZXIC_FILE; + +/* */ +#define ZXIC_VOL volatile +#define ZXIC_VOID void +#define ZXIC_CONST const +#define ZXIC_RTN32 ZXIC_UINT32 + +#ifdef MACRO_CPU64 /* 64位系统编译宏导入,慎重修改 */ +#define ZXIC_ADDR_T ZXIC_UINT64 +#define ZXIC_SIZEOF(x) \ + (sizeof(x) & 0xFFFFFFFFU) /* 在64位环境下,sizeof的长度是64b */ +#define ZXIC_SIZEOF_T(x) ((ZXIC_UINT32)(sizeof(x) & 0xFFFFFFFF)) + +#else +#define ZXIC_ADDR_T ZXIC_UINT32 +#define ZXIC_SIZEOF(x) (sizeof(x)) +#define ZXIC_SIZEOF_T(x) (sizeof(x)) +#endif + +/* 特殊变量 */ +#define ZXIC_NULL (0) +#define ZXIC_OK (0U) /* 后面加U,防止32位系统64位系统常量的默认长度不一致 */ +#define ZXIC_ERR (1U) +#define ZXIC_TRUE (1U) +#define ZXIC_FALSE (0U) +#define ZXIC_UINT8_MAX (0xFFU) +#define ZXIC_UINT32_MAX (0xFFFFFFFFU) +#define ZXIC_ULONG_MAX (0xFFFFFFFFFFFFFFFFUL) +#define ZXIC_SINT_MAX (0x7FFFFFFF) +#define ZXIC_SINT_MIN (-ZXIC_SINT_MAX - 1) + +#define ZXIC_UINT64_MASK (0xFFFFFFFFFFFFFFFFULL) +#define ZXIC_UINT32_MASK (0xFFFFFFFFU) +#define ZXIC_UINT16_MASK (0xFFFFU) +#define ZXIC_UINT8_MASK (0xFFU) +#endif /* END 数据类型定义 */ + +#if ZXIC_REAL("宏函数定义") +#define ZXIC_COMM_MEMCMP ic_comm_memcmp +#define ZXIC_COMM_MEMSET memset +#define ZXIC_COMM_MEMMOVE memmove +#define ZXIC_COMM_MEMSET_S ic_comm_memset_s +#define ZXIC_COMM_MEMCPY ic_comm_memcpy +#define ZXIC_COMM_MEMCPY_S ic_comm_memcpy_s +#define ZXIC_COMM_STRLEN strlen +#define ZXIC_COMM_STRNLEN strnlen +#define ZXIC_COMM_STRNLEN_S ic_comm_strnlen_s +#define ZXIC_COMM_STRCPY strcpy +#define ZXIC_COMM_STRCPY_S ic_comm_strcpy_s +#define ZXIC_COMM_STRNCPY strncpy +#define ZXIC_COMM_STRNCPY_S ic_comm_strncpy_s +#define ZXIC_COMM_STRCMP strcmp +#define ZXIC_COMM_STRNCMP ic_comm_strncmp +#define ZXIC_COMM_STRTOK strtok +#define ZXIC_COMM_STRTOK_S ic_comm_strtok_s +#define ZXIC_COMM_STRCAT_S ic_comm_strcat_s +#define ZXIC_COMM_STRNCAT_S ic_comm_strncat_s + +//#define ZXIC_COMM_FOPEN fopen +#define ZXIC_COMM_FOPEN filp_open +//#define ZXIC_COMM_FCLOSE fclose +#define ZXIC_COMM_FCLOSE(a) filp_close(a, NULL) +#define ZXIC_COMM_FGETS fgets +#define ZXIC_COMM_FPUTS fputs +#define ZXIC_COMM_FREAD fread + +// #define ZXIC_COMM_FPRINTF fprintf +// #define ZXIC_COMM_FPRINTF(a,b,c) printk(b,c) +#define ZXIC_COMM_SSCANF ic_comm_sscanf +#define ZXIC_COMM_FSCANF ((void)fscanf) +#define ZXIC_COMM_SNPRINTF_S ic_comm_snprintf_s +#define ZXIC_COMM_VSNPRINTF_S ic_comm_vsnprintf_s + +#define ZXIC_COMM_TIME time +#define ZXIC_COMM_ATOI atoi + +#ifdef ZXIC_OS_WIN +#define ZXIC_COMM_ACCESS _access +#define ZXIC_COMM_SNPRINTF _snprintf +#define ZXIC_COMM_VSNPRINTF _vsnprintf +#define ZXIC_COMM_GETPID _getpid +#else +#define ZXIC_COMM_ACCESS kern_path +/*#define ZXIC_COMM_SNPRINTF snprintf*/ +#define ZXIC_COMM_SNPRINTF(a, b, c...) __snprintf_chk(a, b, 0, b, c) +#define ZXIC_COMM_VSNPRINTF vsnprintf +#define ZXIC_COMM_GETPID getpid +#endif + +#ifdef MACRO_CPU64 /* 64位系统编译宏导入,慎重修改 */ +#define ZXIC_COMM_PTR_TO_VAL(p) ((ZXIC_UINT64)(p)) +#define ZXIC_COMM_VAL_TO_PTR(v) ((ZXIC_VOID *)((ZXIC_UINT64)(v))) +#define ZXIC_SSIZE_T ZXIC_SINT64 +#else +#define ZXIC_COMM_PTR_TO_VAL(p) ((ZXIC_UINT32)(p)) +#define ZXIC_COMM_VAL_TO_PTR(v) ((ZXIC_VOID *)(long)((ZXIC_UINT32)(v))) +#define ZXIC_SSIZE_T ZXIC_SINT32 + +#endif + +#ifdef ZXIC_OS_WIN +#define ZXIC_COMM_STRCASECMP stricmp +#else +#define ZXIC_COMM_STRCASECMP strcasecmp +#endif + +#define ZXIC_COMM_FFLUSH ((ZXIC_VOID)fflush) +#define ZXIC_COMM_SPRINTF ((ZXIC_VOID)sprintf) + +#ifdef ZXIC_RELEASE /* 编译宏控制,默认编译为DEBUG版本 */ +#define ZXIC_COMM_ASSERT(x) +#else +#ifdef ZXIC_FOR_FUZZER +#define ZXIC_COMM_ASSERT(x) +#else +#define ZXIC_COMM_ASSERT(x) +#endif +#endif + +#endif /* END 宏函数定义 */ + +#define ZXIC_COMM_MEMORY_MAX_B_SIZE (200 * 1024 * 1024) /* 200M */ +#define ZXIC_COMM_STRNLEN_MAX (0xffffffff) + +#if ZXIC_REAL("打印-print") +#if ZXIC_REAL("开关") +ZXIC_VOID zxic_comm_set_print_en(ZXIC_UINT32 enable); +ZXIC_RTN32 zxic_comm_get_print_en(ZXIC_VOID); +ZXIC_VOID zxic_comm_set_print_level(ZXIC_TRACE_LEVEL debug_level); +ZXIC_RTN32 zxic_comm_get_print_level(ZXIC_VOID); +#endif +#if ZXIC_REAL("功能") +ZXIC_VOID ZXIC_COMM_PRINT(ZXIC_CONST ZXIC_CHAR *format, ...); +ZXIC_VOID ZXIC_COMM_TRACE_ERROR(ZXIC_CONST ZXIC_CHAR *format, ...); +ZXIC_VOID ZXIC_COMM_TRACE_NOTICE(ZXIC_CONST ZXIC_CHAR *format, ...); +ZXIC_VOID ZXIC_COMM_TRACE_INFO(ZXIC_CONST ZXIC_CHAR *format, ...); +ZXIC_VOID ZXIC_COMM_TRACE_DEBUG(ZXIC_CONST ZXIC_CHAR *format, ...); +ZXIC_VOID ZXIC_COMM_TRACE_ALL(ZXIC_CONST ZXIC_CHAR *format, ...); + +ZXIC_VOID ZXIC_COMM_TRACE_DEV_ERROR(ZXIC_UINT32 dev_id, + ZXIC_CONST ZXIC_CHAR *format, ...); +ZXIC_VOID ZXIC_COMM_TRACE_DEV_NOTICE(ZXIC_UINT32 dev_id, + ZXIC_CONST ZXIC_CHAR *format, ...); +ZXIC_VOID ZXIC_COMM_TRACE_DEV_INFO(ZXIC_UINT32 dev_id, + ZXIC_CONST ZXIC_CHAR *format, ...); +ZXIC_VOID ZXIC_COMM_TRACE_DEV_DEBUG(ZXIC_UINT32 dev_id, + ZXIC_CONST ZXIC_CHAR *format, ...); +ZXIC_VOID ZXIC_COMM_TRACE_DEV_ALL(ZXIC_UINT32 dev_id, + ZXIC_CONST ZXIC_CHAR *format, ...); + +ZXIC_VOID ZXIC_COMM_DBGCNT64_PRINT(ZXIC_CONST ZXIC_CHAR *name, + ZXIC_UINT64 value); +ZXIC_VOID ZXIC_COMM_DBGCNT32_PRINT(ZXIC_CONST ZXIC_CHAR *name, + ZXIC_UINT32 value); +ZXIC_VOID ZXIC_COMM_DBGCNT32_PAR_PRINT(ZXIC_CONST ZXIC_CHAR *name, + ZXIC_UINT32 parm, ZXIC_UINT32 value); + +#endif +#endif + +#if ZXIC_REAL("互斥锁") +ZXIC_RTN32 zxic_comm_mutex_create(ZXIC_MUTEX_T *p_mutex); +ZXIC_RTN32 zxic_comm_mutex_lock(ZXIC_MUTEX_T *p_mutex); +ZXIC_RTN32 zxic_comm_mutex_unlock(ZXIC_MUTEX_T *p_mutex); +ZXIC_RTN32 zxic_comm_mutex_destroy(ZXIC_MUTEX_T *p_mutex); +#endif + +#if ZXIC_REAL("信号量") +ZXIC_RTN32 zxic_comm_sem_create(ZXIC_SEM_T *p_sem, ZXIC_SINT32 share, + ZXIC_SINT32 IniCount, ZXIC_SINT32 MaxCount); +ZXIC_RTN32 zxic_comm_sem_release(ZXIC_SEM_T *p_sem); +ZXIC_RTN32 zxic_comm_sem_wait(ZXIC_SEM_T *p_sem); +#endif + +#if ZXIC_REAL("参数检查") + +#if ZXIC_REAL("NO DEV_ID & ASSERT") + +#define ZXIC_COMM_CHECK_RC(rc, becall) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + ZXIC_COMM_ASSERT(0); \ + return rc; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_RC_MEMORY_FREE(rc, becall, ptr) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXICP %s:%d [ErrorCode:0x%x], %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + ZXIC_COMM_FREE(ptr); \ + ZXIC_COMM_ASSERT(0); \ + return rc; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_RC_MEMORY_VFREE(rc, becall, ptr) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXICP %s:%d [ErrorCode:0x%x], %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + ZXIC_COMM_VFREE(ptr); \ + ZXIC_COMM_ASSERT(0); \ + return rc; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_POINT(point) \ + do { \ + if (ZXIC_NULL == (point)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_POINT_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_POINT_MEMORY_FREE(point, ptr) \ + do { \ + if (ZXIC_NULL == (point)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_POINT_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_POINT_MEMORY_VFREE(point, ptr) \ + do { \ + if (ZXIC_NULL == (point)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + ZXIC_COMM_VFREE(ptr); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_POINT_NULL; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_POINT_EMEMORY_FREE2PTR(point, ptr0, ptr1) \ + do { \ + if (ZXIC_NULL == (point)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + ZXIC_COMM_FREE(ptr0); \ + ZXIC_COMM_FREE(ptr1); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_POINT_NULL; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_POINT_RETURN_NULL(point) \ + do { \ + if (ZXIC_NULL == (point)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX(val, min, max) \ + do { \ + if (ZXIC_PAR_CHK_INVALID_INDEX == \ + zxic_comm_index_check(val, min, max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } else if (ZXIC_PAR_CHK_INVALID_RANGE == \ + zxic_comm_index_check(val, min, max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_RANGE; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_EQUAL(val, equal) \ + do { \ + if (val == equal) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [equal=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, equal, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_EQUAL_RETURN_OK(val, equal) \ + do { \ + if (val == equal) { \ + return ZXIC_OK; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_NOT_EQUAL(val, equal) \ + do { \ + if (val != equal) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [equal=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, equal, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_NOT_EQUAL_UNLOCK(val, equal, mutex) \ + do { \ + if (val != equal) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [equal=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, equal, __FUNCTION__); \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_UPPER(val, max) \ + do { \ + if ((val) > (max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, max, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_LOWER(val, min) \ + do { \ + if ((val) < (min)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_LOWER_UNLOCK(val, min, mutex) \ + do { \ + if ((val) < (min)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, __FUNCTION__); \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_BOTH(val, min, max) \ + do { \ + if (((val) < (min)) || ((val) > (max))) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_UPPER_MEMORY_FREE(val, max, ptr) \ + do { \ + if ((val) > (max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, max, __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_LOWER_MEMORY_FREE(val, min, ptr) \ + do { \ + if ((val) < (min)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_BOTH_MEMORY_FREE(val, min, max, ptr) \ + do { \ + if (((val) < (min)) || ((val) > (max))) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_MEMORY_FREE(val0, val1, ptr) \ + do { \ + if ((ZXIC_UINT32_MAX - (val0)) < (val1)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW(val0, val1) \ + do { \ + if ((ZXIC_UINT32_MAX - (val0)) < (val1)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_64(val0, val1) \ + do { \ + if ((ZXIC_ULONG_MAX - (val0)) < (val1)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[val0=0x%016llx] INVALID] [val1=0x%016llx] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_UNLOCK(val0, val1, mutex) \ + do { \ + if ((ZXIC_UINT32_MAX - (val0)) < (val1)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW(val0, val1) \ + do { \ + if ((val0) < (val1)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW_64(val0, val1) \ + do { \ + if ((val0) < (val1)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[val0=0x%016llx] INVALID] [val1=0x%016llx] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW_UNLOCK(val0, val1, mutex) \ + do { \ + if ((val0) < (val1)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_MUL_OVERFLOW(val0, val1) \ + do { \ + if (((val0) > 0) && ((ZXIC_UINT32_MAX / (val0)) < (val1))) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_MUL_OVERFLOW_64(val0, val1) \ + do { \ + if (((val0) > 0) && ((ZXIC_ULONG_MAX / (val0)) < (val1))) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[val0=0x%016llx] INVALID] [val1=0x%016llx] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_MUL_OVERFLOW_UNLOCK(val0, val1, mutex) \ + do { \ + if (((val0) > 0) && ((ZXIC_UINT32_MAX / (val0)) < (val1))) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_RETURN_NULL(val, min, max) \ + do { \ + if (ZXIC_PAR_CHK_INVALID_INDEX == \ + zxic_comm_index_check(val, min, max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_NULL; \ + } else if (ZXIC_PAR_CHK_INVALID_RANGE == \ + zxic_comm_index_check(val, min, max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_RC_UNLOCK(rc, becall, mutex) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + ZXIC_COMM_ASSERT(0); \ + return rc; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_RC_CLOSE_FP(rc, becall, fp) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + if (ZXIC_COMM_FCLOSE(fp)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d !-- %s close file Fail!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + } \ + ZXIC_COMM_ASSERT(0); \ + return rc; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_POINT_CLOSE_FP(point, fp) \ + do { \ + if (NULL == (point)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + if (ZXIC_COMM_FCLOSE(fp)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d !-- %s close file Fail!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + } \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_POINT_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_RETURN(val0, val1) \ + do { \ + if ((0xFFFFFFFF - (val0)) < (val1)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "ICM %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + return; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW_RETURN(val0, val1) \ + do { \ + if (val0 < val1) { \ + ZXIC_COMM_TRACE_ERROR( \ + "ICM %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + return; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_CLOSE_FP_NO_ASSERT( \ + dev_id, val0, val1, fp) \ + do { \ + if ((0xFFFFFFFF - (val0)) < (val1)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "ICM %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + if (ZXIC_COMM_FCLOSE(fp)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ICM %s:%d !-- %s close file Fail!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + } \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_CLOSE_FP_NO_ASSERT( \ + dev_id, val0, val1, fp) \ + do { \ + if ((val0 > 0) && ((0xFFFFFFFF / (val0)) < (val1))) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "ICM %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + if (ZXIC_COMM_FCLOSE(fp)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ICM %s:%d !-- %s close file Fail!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + } \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#endif + +#if ZXIC_REAL("NO DEV_ID & NO ASSERT") +#define ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, becall) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + return rc; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_RC_NO_ASSERT_UNLOCK(rc, becall, mutex) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + return rc; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT(rc, becall, ptr) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXICP %s:%d [ErrorCode:0x%x], %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + ZXIC_COMM_FREE(ptr); \ + return rc; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_RC_MEMORY_VFREE_NO_ASSERT(rc, becall, ptr) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXICP %s:%d [ErrorCode:0x%x], %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + ZXIC_COMM_VFREE(ptr); \ + return rc; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_RC_MEMORY_FREE_UNLOCK_NO_ASSERT(rc, becall, ptr, \ + mutex) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXICP %s:%d [ErrorCode:0x%x], %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + ZXIC_COMM_FREE(ptr); \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + return rc; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_RC_MEMORY_VFREE_UNLOCK_NO_ASSERT(rc, becall, ptr, \ + mutex) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXICP %s:%d [ErrorCode:0x%x], %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + ZXIC_COMM_VFREE(ptr); \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + return rc; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_RC_MEMORY_FREE2PTR_NO_ASSERT(rc, becall, ptr1, ptr2) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXICP %s:%d [ErrorCode:0x%x], %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + ZXIC_COMM_FREE(ptr1); \ + ZXIC_COMM_FREE(ptr2); \ + return rc; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_RC_MEMORY_VFREE2PTR_NO_ASSERT(rc, becall, ptr1, ptr2) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXICP %s:%d [ErrorCode:0x%x], %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + ZXIC_COMM_VFREE(ptr1); \ + ZXIC_COMM_VFREE(ptr2); \ + return rc; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_RC_MEMORY_FREE2PTR_UNLOCK_NO_ASSERT(rc, becall, ptr1, \ + ptr2, mutex) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXICP %s:%d [ErrorCode:0x%x], %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + ZXIC_COMM_FREE(ptr1); \ + ZXIC_COMM_FREE(ptr2); \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + return rc; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_RC_MEMORY_VFREE2PTR_UNLOCK_NO_ASSERT(rc, becall, ptr1, \ + ptr2, mutex) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXICP %s:%d [ErrorCode:0x%x], %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + ZXIC_COMM_VFREE(ptr1); \ + ZXIC_COMM_VFREE(ptr2); \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + return rc; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_RC_MEMORY_FREE3PTR_NO_ASSERT(rc, becall, ptr1, ptr2, \ + ptr3) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXICP %s:%d [ErrorCode:0x%x], %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + ZXIC_COMM_FREE(ptr1); \ + ZXIC_COMM_FREE(ptr2); \ + ZXIC_COMM_FREE(ptr3); \ + return rc; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_RC_MEMORY_VFREE3PTR_NO_ASSERT(rc, becall, ptr1, ptr2, \ + ptr3) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXICP %s:%d [ErrorCode:0x%x], %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + ZXIC_COMM_VFREE(ptr1); \ + ZXIC_COMM_VFREE(ptr2); \ + ZXIC_COMM_VFREE(ptr3); \ + return rc; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_RC_MEMORY_FREE3PTR_UNLOCK_NO_ASSERT(rc, becall, ptr1, \ + ptr2, ptr3, mutex) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXICP %s:%d [ErrorCode:0x%x], %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + ZXIC_COMM_FREE(ptr1); \ + ZXIC_COMM_FREE(ptr2); \ + ZXIC_COMM_FREE(ptr3); \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + return rc; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_POINT_NO_ASSERT(point) \ + do { \ + if (ZXIC_NULL == (point)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + return ZXIC_PAR_CHK_POINT_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_POINT_CLOSE_FP_NO_ASSERT(point, fp) \ + do { \ + if (NULL == (point)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + if (ZXIC_COMM_FCLOSE(fp)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d !-- %s close file Fail!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + } \ + return ZXIC_PAR_CHK_POINT_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_POINT_MEMORY_FREE_NO_ASSERT(point, ptr) \ + do { \ + if (ZXIC_NULL == (point)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + return ZXIC_PAR_CHK_POINT_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_POINT_MEMORY_VFREE_NO_ASSERT(point, ptr) \ + do { \ + if (ZXIC_NULL == (point)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + ZXIC_COMM_VFREE(ptr); \ + return ZXIC_PAR_CHK_POINT_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_POINT_MEMORY_FREE2PTR_NO_ASSERT(point, ptr1, ptr2) \ + do { \ + if (ZXIC_NULL == (point)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + ZXIC_COMM_FREE(ptr1); \ + ZXIC_COMM_FREE(ptr2); \ + return ZXIC_PAR_CHK_POINT_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_POINT_MEMORY_VFREE2PTR_NO_ASSERT(point, ptr1, ptr2) \ + do { \ + if (ZXIC_NULL == (point)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + ZXIC_COMM_VFREE(ptr1); \ + ZXIC_COMM_VFREE(ptr2); \ + return ZXIC_PAR_CHK_POINT_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_POINT_MEMORY_FREE3PTR_NO_ASSERT(point, ptr1, ptr2, \ + ptr3) \ + do { \ + if (ZXIC_NULL == (point)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + ZXIC_COMM_FREE(ptr1); \ + ZXIC_COMM_FREE(ptr2); \ + ZXIC_COMM_FREE(ptr3); \ + return ZXIC_PAR_CHK_POINT_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_NO_ASSERT(val, min, max) \ + do { \ + if (ZXIC_PAR_CHK_INVALID_INDEX == \ + zxic_comm_index_check(val, min, max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } else if (ZXIC_PAR_CHK_INVALID_RANGE == \ + zxic_comm_index_check(val, min, max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + return ZXIC_PAR_CHK_INVALID_RANGE; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_NO_ASSERT_UNLOCK(val, min, max, mutex) \ + do { \ + if (ZXIC_PAR_CHK_INVALID_INDEX == \ + zxic_comm_index_check(val, min, max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } else if (ZXIC_PAR_CHK_INVALID_RANGE == \ + zxic_comm_index_check(val, min, max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + return ZXIC_PAR_CHK_INVALID_RANGE; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_UPPER_NO_ASSERT(val, max) \ + do { \ + if ((val) > (max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, max, __FUNCTION__); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_UPPER_NO_ASSERT_UNLOCK(val, max, mutex) \ + do { \ + if ((val) > (max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, max, __FUNCTION__); \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_UPPER_MEMORY_FREE_NO_ASSERT(val, max, ptr) \ + do { \ + if ((val) > (max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, max, __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_LOWER_NO_ASSERT(val, min) \ + do { \ + if ((val) < (min)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, __FUNCTION__); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_LOWER_NO_ASSERT_UNLOCK(val, min, mutex) \ + do { \ + if ((val) < (min)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, __FUNCTION__); \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_LOWER_MEMORY_FREE_NO_ASSERT(val, min, ptr) \ + do { \ + if ((val) < (min)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_BOTH_NO_ASSERT(val, min, max) \ + do { \ + if (((val) < (min)) || ((val) > (max))) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_BOTH_NO_ASSERT_UNLOCK(val, min, max, mutex) \ + do { \ + if (((val) < (min)) || ((val) > (max))) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_BOTH_MEMORY_FREE_NO_ASSERT(val, min, max, ptr) \ + do { \ + if (((val) < (min)) || ((val) > (max))) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT(val0, val1) \ + do { \ + if ((ZXIC_UINT32_MAX - (val0)) < (val1)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_64_NO_ASSERT(val0, val1) \ + do { \ + if ((ZXIC_ULONG_MAX - (val0)) < (val1)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT_UNLOCK(val0, val1, mutex) \ + do { \ + if ((ZXIC_UINT32_MAX - (val0)) < (val1)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW_NO_ASSERT(val0, val1) \ + do { \ + if ((val0) < (val1)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW_NO_ASSERT_UNLOCK(val0, val1, mutex) \ + do { \ + if ((val0) < (val1)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_MUL_OVERFLOW_NO_ASSERT(val0, val1) \ + do { \ + if (((val0) > 0) && ((ZXIC_UINT32_MAX / (val0)) < (val1))) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_MUL_OVERFLOW_NO_ASSERT_UNLOCK(val0, val1, mutex) \ + do { \ + if (((val0) > 0) && ((ZXIC_UINT32_MAX / (val0)) < (val1))) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_RETURN_VOID_NO_ASSERT(val, min, max) \ + do { \ + if (ZXIC_PAR_CHK_INVALID_INDEX == \ + zxic_comm_index_check(val, min, max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + return; \ + } else if (ZXIC_PAR_CHK_INVALID_RANGE == \ + zxic_comm_index_check(val, min, max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + return; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_RETURN_NULL_NO_ASSERT(val, min, max) \ + do { \ + if (ZXIC_PAR_CHK_INVALID_INDEX == \ + zxic_comm_index_check(val, min, max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + return ZXIC_NULL; \ + } else if (ZXIC_PAR_CHK_INVALID_RANGE == \ + zxic_comm_index_check(val, min, max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + return ZXIC_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_CLOSE_FP_NO_ASSERT(val, min, max, fp) \ + do { \ + if (ZXIC_PAR_CHK_INVALID_INDEX == \ + zxic_comm_index_check(val, min, max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + if (ZXIC_COMM_FCLOSE(fp)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d !-- %s close file Fail!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + } \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } else if (ZXIC_PAR_CHK_INVALID_RANGE == \ + zxic_comm_index_check(val, min, max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + if (ZXIC_COMM_FCLOSE(fp)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d !-- %s close file Fail!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + } \ + return ZXIC_PAR_CHK_INVALID_RANGE; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_MEMORY_FREE_NO_ASSERT(val, min, max, ptr) \ + do { \ + if (ZXIC_PAR_CHK_INVALID_INDEX == \ + zxic_comm_index_check(val, min, max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } else if (ZXIC_PAR_CHK_INVALID_RANGE == \ + zxic_comm_index_check(val, min, max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + return ZXIC_PAR_CHK_INVALID_RANGE; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_MEMORY_FREE2PTR_NO_ASSERT(val, min, max, ptr1, \ + ptr2) \ + do { \ + if (ZXIC_PAR_CHK_INVALID_INDEX == \ + zxic_comm_index_check(val, min, max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_FREE(ptr1); \ + ZXIC_COMM_FREE(ptr2); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } else if (ZXIC_PAR_CHK_INVALID_RANGE == \ + zxic_comm_index_check(val, min, max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_FREE(ptr1); \ + ZXIC_COMM_FREE(ptr2); \ + return ZXIC_PAR_CHK_INVALID_RANGE; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_MEMORY_FREE3PTR_NO_ASSERT(val, min, max, ptr1, \ + ptr2, ptr3) \ + do { \ + if (ZXIC_PAR_CHK_INVALID_INDEX == \ + zxic_comm_index_check(val, min, max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_FREE(ptr1); \ + ZXIC_COMM_FREE(ptr2); \ + ZXIC_COMM_FREE(ptr3); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } else if (ZXIC_PAR_CHK_INVALID_RANGE == \ + zxic_comm_index_check(val, min, max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_FREE(ptr1); \ + ZXIC_COMM_FREE(ptr2); \ + ZXIC_COMM_FREE(ptr3); \ + return ZXIC_PAR_CHK_INVALID_RANGE; \ + } \ + } while (0) + +#endif + +#if ZXIC_REAL("DEV_ID & ASSERT") + +#define ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, becall) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXIC %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + ZXIC_COMM_ASSERT(0); \ + return rc; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_RC_NULL(dev_id, rc, becall) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + ZXIC_COMM_ASSERT(0); \ + return NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_RC_UNLOCK(dev_id, rc, becall, mutex) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXIC %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + ZXIC_COMM_ASSERT(0); \ + return rc; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_RC_CLOSE_FP(dev_id, rc, becall, fp) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXIC %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + if (ZXIC_COMM_FCLOSE(fp)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d !-- %s close file Fail!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + } \ + ZXIC_COMM_ASSERT(0); \ + return rc; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_RC_MEMORY_FREE(dev_id, rc, becall, ptr) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXICP %s:%d [ErrorCode:0x%x], %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + ZXIC_COMM_FREE(ptr); \ + ZXIC_COMM_ASSERT(0); \ + return rc; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_POINT(dev_id, point) \ + do { \ + if (NULL == (point)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_POINT_NULL; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_POINT_MEMORY_FREE(dev_id, point, ptr) \ + do { \ + if (ZXIC_NULL == (point)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_POINT_NULL; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_POINT_EMEMORY_FREE2PTR(dev_id, point, ptr0, ptr1) \ + do { \ + if (ZXIC_NULL == (point)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + ZXIC_COMM_FREE(ptr0); \ + ZXIC_COMM_FREE(ptr1); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_POINT_NULL; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_POINT_RETURN_NULL(dev_id, point) \ + do { \ + if (NULL == (point)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_POINT_UNLOCK(dev_id, point, mutex) \ + do { \ + if (NULL == (point)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_POINT_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_POINT_CLOSE_FP(dev_id, point, fp) \ + do { \ + if (NULL == (point)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + if (ZXIC_COMM_FCLOSE(fp)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXIC %s:%d !-- %s close file Fail!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + } \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_POINT_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX(dev_id, val, min, max) \ + do { \ + if (ZXIC_PAR_CHK_INVALID_INDEX == \ + zxic_comm_dev_index_check(dev_id, val, min, max)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } else if (ZXIC_PAR_CHK_INVALID_RANGE == \ + zxic_comm_dev_index_check(dev_id, val, min, max)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_RANGE; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_UPPER(dev_id, val, max) \ + do { \ + if ((val) > (max)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, max, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_LOWER(dev_id, val, min) \ + do { \ + if ((val) < (min)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_BOTH(dev_id, val, min, max) \ + do { \ + if (((val) < (min)) || ((val) > (max))) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_UPPER_MEMORY_FREE(dev_id, val, max, ptr) \ + do { \ + if ((val) > (max)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, max, __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_LOWER_MEMORY_FREE(dev_id, val, min, ptr) \ + do { \ + if ((val) < (min)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_BOTH_MEMORY_FREE(dev_id, val, min, max, ptr) \ + do { \ + if (((val) < (min)) || ((val) > (max))) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_RETURN_NULL(dev_id, val, min, max) \ + do { \ + if (ZXIC_PAR_CHK_INVALID_INDEX == \ + zxic_comm_index_check(val, min, max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_NULL; \ + } else if (ZXIC_PAR_CHK_INVALID_RANGE == \ + zxic_comm_index_check(val, min, max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_ID(dev_id) \ + do { \ + if (ZXIC_PAR_CHK_INVALID_INDEX == \ + zxic_comm_index_check(dev_id, 0, \ + zxic_comm_channel_max_get() - 1)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, dev_id, 0, \ + zxic_comm_channel_max_get() - 1, \ + __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } else if (ZXIC_PAR_CHK_INVALID_RANGE == \ + zxic_comm_index_check(dev_id, 0, \ + zxic_comm_channel_max_get() - \ + 1)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, dev_id, 0, \ + zxic_comm_channel_max_get() - 1, \ + __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_RANGE; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW(dev_id, val0, val1) \ + do { \ + if ((ZXIC_UINT32_MAX - (val0)) < (val1)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_64(dev_id, val0, val1) \ + do { \ + if ((ZXIC_ULONG_MAX - (val0)) < (val1)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[val0=0x%016llx] INVALID] [val1=0x%016llx] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_SUB_OVERFLOW(dev_id, val0, val1) \ + do { \ + if ((val0) < (val1)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW(dev_id, val0, val1) \ + do { \ + if (((val0) > 0) && ((ZXIC_UINT32_MAX / (val0)) < (val1))) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_64(dev_id, val0, val1) \ + do { \ + if (((val0) > 0) && \ + ((0xFFFFFFFFFFFFFFFF / (val0)) < (val1))) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[val0=0x%016llx] INVALID] [val1=0x%016llx] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#endif + +#if ZXIC_REAL("DEV_ID & NO ASSERT") +#define ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, becall) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXIC %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + return rc; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK(dev_id, rc, becall, mutex) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXIC %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + return rc; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_RC_CLOSE_FP_NO_ASSERT(dev_id, rc, becall, fp) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXIC %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + if (ZXIC_COMM_FCLOSE(fp)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXIC %s:%d !-- %s close file Fail!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + } \ + return rc; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_RC_MEMORY_FREE_NO_ASSERT(dev_id, rc, becall, ptr) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXICP %s:%d [ErrorCode:0x%x], %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + ZXIC_COMM_FREE(ptr); \ + return rc; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_RC_MEMORY_FREE2PTR_NO_ASSERT(dev_id, rc, becall, \ + ptr1, ptr2) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXICP %s:%d [ErrorCode:0x%x], %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + ZXIC_COMM_FREE(ptr1); \ + ZXIC_COMM_FREE(ptr2); \ + return rc; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_RC_MEMORY_FREE3PTR_NO_ASSERT(dev_id, rc, becall, \ + ptr1, ptr2, ptr3) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXICP %s:%d [ErrorCode:0x%x], %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + ZXIC_COMM_FREE(ptr1); \ + ZXIC_COMM_FREE(ptr2); \ + ZXIC_COMM_FREE(ptr3); \ + return rc; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_RC_MEMORY_VFREE3PTR_NO_ASSERT(dev_id, rc, becall, \ + ptr1, ptr2, ptr3) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXICP %s:%d [ErrorCode:0x%x], %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + ZXIC_COMM_VFREE(ptr1); \ + ZXIC_COMM_VFREE(ptr2); \ + ZXIC_COMM_VFREE(ptr3); \ + return rc; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_POINT_NO_ASSERT(dev_id, point) \ + do { \ + if (NULL == (point)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + return ZXIC_PAR_CHK_POINT_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_POINT_RETURN_NULL_NO_ASSERT(dev_id, point) \ + do { \ + if (ZXIC_NULL == (point)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + return ZXIC_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_POINT_CLOSE_FP_NO_ASSERT(dev_id, point, fp) \ + do { \ + if (NULL == (point)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + if (ZXIC_COMM_FCLOSE(fp)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXIC %s:%d !-- %s close file Fail!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + } \ + return ZXIC_PAR_CHK_POINT_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_POINT_MEMORY_FREE_NO_ASSERT(dev_id, point, ptr) \ + do { \ + if (ZXIC_NULL == (point)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + return ZXIC_PAR_CHK_POINT_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_POINT_MEMORY_FREE2PTR_NO_ASSERT(dev_id, point, \ + ptr1, ptr2) \ + do { \ + if (ZXIC_NULL == (point)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + ZXIC_COMM_FREE(ptr1); \ + ZXIC_COMM_FREE(ptr2); \ + return ZXIC_PAR_CHK_POINT_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_POINT_MEMORY_FREE3PTR_NO_ASSERT(dev_id, point, \ + ptr1, ptr2, ptr3) \ + do { \ + if (ZXIC_NULL == (point)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + ZXIC_COMM_FREE(ptr1); \ + ZXIC_COMM_FREE(ptr2); \ + ZXIC_COMM_FREE(ptr3); \ + return ZXIC_PAR_CHK_POINT_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, val, min, max) \ + do { \ + if (ZXIC_PAR_CHK_INVALID_INDEX == \ + zxic_comm_dev_index_check(dev_id, val, min, max)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } else if (ZXIC_PAR_CHK_INVALID_RANGE == \ + zxic_comm_dev_index_check(dev_id, val, min, max)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + return ZXIC_PAR_CHK_INVALID_RANGE; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT_UNLOCK(dev_id, val, min, max, \ + mutex) \ + do { \ + if (ZXIC_PAR_CHK_INVALID_INDEX == \ + zxic_comm_dev_index_check(dev_id, val, min, max)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } else if (ZXIC_PAR_CHK_INVALID_RANGE == \ + zxic_comm_dev_index_check(dev_id, val, min, max)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + return ZXIC_PAR_CHK_INVALID_RANGE; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_UPPER_NO_ASSERT(dev_id, val, max) \ + do { \ + if ((val) > (max)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, max, __FUNCTION__); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_UPPER_NO_ASSERT_UNLOCK(dev_id, val, max, \ + mutex) \ + do { \ + if ((val) > (max)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, max, __FUNCTION__); \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_UPPER_MEMORY_FREE_NO_ASSERT(dev_id, val, \ + max, ptr) \ + do { \ + if ((val) > (max)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, max, __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_LOWER_NO_ASSERT(dev_id, val, min) \ + do { \ + if ((val) < (min)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, __FUNCTION__); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_LOWER_NO_ASSERT_UNLOCK(dev_id, val, min, \ + mutex) \ + do { \ + if ((val) < (min)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, __FUNCTION__); \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_LOWER_MEMORY_FREE_NO_ASSERT(dev_id, val, \ + min, ptr) \ + do { \ + if ((val) < (min)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_BOTH_NO_ASSERT(dev_id, val, min, max) \ + do { \ + if (((val) < (min)) || ((val) > (max))) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_BOTH_NO_ASSERT_UNLOCK(dev_id, val, min, max, \ + mutex) \ + do { \ + if (((val) < (min)) || ((val) > (max))) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_BOTH_MEMORY_FREE_NO_ASSERT(dev_id, val, min, \ + max, ptr) \ + do { \ + if (((val) < (min)) || ((val) > (max))) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_RETURN_NULL_NO_ASSERT(dev_id, val, min, max) \ + do { \ + if (ZXIC_PAR_CHK_INVALID_INDEX == \ + zxic_comm_index_check(val, min, max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + return ZXIC_NULL; \ + } else if (ZXIC_PAR_CHK_INVALID_RANGE == \ + zxic_comm_index_check(val, min, max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + return ZXIC_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_MEMORY_FREE_NO_ASSERT(dev_id, val, min, max, \ + ptr) \ + do { \ + if (ZXIC_PAR_CHK_INVALID_INDEX == \ + zxic_comm_dev_index_check(dev_id, val, min, max)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } else if (ZXIC_PAR_CHK_INVALID_RANGE == \ + zxic_comm_dev_index_check(dev_id, val, min, max)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + return ZXIC_PAR_CHK_INVALID_RANGE; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_MEMORY_FREE2PTR_NO_ASSERT(dev_id, val, min, \ + max, ptr1, ptr2) \ + do { \ + if (ZXIC_PAR_CHK_INVALID_INDEX == \ + zxic_comm_dev_index_check(dev_id, val, min, max)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_FREE(ptr1); \ + ZXIC_COMM_FREE(ptr2); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } else if (ZXIC_PAR_CHK_INVALID_RANGE == \ + zxic_comm_dev_index_check(dev_id, val, min, max)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_FREE(ptr1); \ + ZXIC_COMM_FREE(ptr2); \ + return ZXIC_PAR_CHK_INVALID_RANGE; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_MEMORY_FREE3PTR_NO_ASSERT( \ + dev_id, val, min, max, ptr1, ptr2, ptr3) \ + do { \ + if (ZXIC_PAR_CHK_INVALID_INDEX == \ + zxic_comm_dev_index_check(dev_id, val, min, max)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_FREE(ptr1); \ + ZXIC_COMM_FREE(ptr2); \ + ZXIC_COMM_FREE(ptr3); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } else if (ZXIC_PAR_CHK_INVALID_RANGE == \ + zxic_comm_dev_index_check(dev_id, val, min, max)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_FREE(ptr1); \ + ZXIC_COMM_FREE(ptr2); \ + ZXIC_COMM_FREE(ptr3); \ + return ZXIC_PAR_CHK_INVALID_RANGE; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_ID_NO_ASSERT(dev_id) \ + do { \ + if (ZXIC_PAR_CHK_INVALID_INDEX == \ + zxic_comm_index_check(dev_id, 0, \ + zxic_comm_channel_max_get() - 1)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, dev_id, 0, \ + zxic_comm_channel_max_get() - 1, \ + __FUNCTION__); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } else if (ZXIC_PAR_CHK_INVALID_RANGE == \ + zxic_comm_index_check(dev_id, 0, \ + zxic_comm_channel_max_get() - \ + 1)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, dev_id, 0, \ + zxic_comm_channel_max_get() - 1, \ + __FUNCTION__); \ + return ZXIC_PAR_CHK_INVALID_RANGE; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_ID_RETURN_NULL_NO_ASSERT(dev_id) \ + do { \ + if (ZXIC_PAR_CHK_INVALID_INDEX == \ + zxic_comm_index_check(dev_id, 0, \ + zxic_comm_channel_max_get() - 1)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, dev_id, 0, \ + zxic_comm_channel_max_get() - 1, \ + __FUNCTION__); \ + return ZXIC_NULL; \ + } else if (ZXIC_PAR_CHK_INVALID_RANGE == \ + zxic_comm_index_check(dev_id, 0, \ + zxic_comm_channel_max_get() - \ + 1)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, dev_id, 0, \ + zxic_comm_channel_max_get() - 1, \ + __FUNCTION__); \ + return ZXIC_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_SUB_OVERFLOW_NO_ASSERT(dev_id, val0, val1) \ + do { \ + if ((val0) < (val1)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_SUB_OVERFLOW_NO_ASSERT_UNLOCK(dev_id, val0, \ + val1, mutex) \ + do { \ + if ((val0) < (val1)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(dev_id, val0, val1) \ + do { \ + if ((ZXIC_UINT32_MAX - (val0)) < (val1)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT_UNLOCK(dev_id, val0, \ + val1, mutex) \ + do { \ + if ((ZXIC_UINT32_MAX - (val0)) < (val1)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_64_NO_ASSERT(dev_id, val0, \ + val1) \ + do { \ + if ((ZXIC_ULONG_MAX - (val0)) < (val1)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[val0=0x%016llx] INVALID] [val1=0x%016llx] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, val0, val1) \ + do { \ + if (((val0) > 0) && ((ZXIC_UINT32_MAX / (val0)) < (val1))) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT_UNLOCK(dev_id, val0, \ + val1, mutex) \ + do { \ + if (((val0) > 0) && ((ZXIC_UINT32_MAX / (val0)) < (val1))) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "File: [%s],Function:[%s],Line:%u mutex unlock failed!-->Return ERROR\n", \ + __FILE__, __FUNCTION__, __LINE__); \ + } \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_64_NO_ASSERT(dev_id, val0, \ + val1) \ + do { \ + if (((val0) > 0) && ((ZXIC_ULONG_MAX / (val0)) < (val1))) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#endif +#if ZXIC_REAL("return no code") +#define ZXIC_COMM_CHECK_RC_RETURN_NONE(rc, becall) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + ZXIC_COMM_ASSERT(0); \ + return; \ + } \ + } while (0) + +/* 不带返回值,用于VOID类型函数 */ +#define ZXIC_COMM_CHECK_POINT_RETURN_NONE(point) \ + do { \ + if (ZXIC_NULL == (point)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_RETURN_NONE(val0, val1) \ + do { \ + if ((ZXIC_UINT32_MAX - (val0)) < (val1)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW_RETURN_NONE(val0, val1) \ + do { \ + if ((val0) < (val1)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_MUL_OVERFLOW_RETURN_NONE(val0, val1) \ + do { \ + if (((val0) > 0) && ((ZXIC_UINT32_MAX / (val0)) < (val1))) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_UPPER_RETURN_NONE(val, max) \ + do { \ + if ((val) > (max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, max, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_LOWER_RETURN_NONE(val, min) \ + do { \ + if ((val) < (min)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_BOTH_RETURN_NONE(val, min, max) \ + do { \ + if (((val) < (min)) || ((val) > (max))) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_UPPER_MEMORY_FREE_RETURN_NONE(val, max, ptr) \ + do { \ + if ((val) > (max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, max, __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + ZXIC_COMM_ASSERT(0); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_LOWER_MEMORY_FREE_RETURN_NONE(val, min, ptr) \ + do { \ + if ((val) < (min)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + ZXIC_COMM_ASSERT(0); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_BOTH_MEMORY_FREE_RETURN_NONE(val, min, max, ptr) \ + do { \ + if (((val) < (min)) || ((val) > (max))) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + ZXIC_COMM_ASSERT(0); \ + return; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_RC_RETURN_NONE(dev_id, rc, becall) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXIC %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + ZXIC_COMM_ASSERT(0); \ + return; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_POINT_RETURN_NONE(dev_id, point) \ + do { \ + if (NULL == (point)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_UPPER_RETURN_NONE(dev_id, val, max) \ + do { \ + if ((val) > (max)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, max, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_LOWER_RETURN_NONE(dev_id, val, min) \ + do { \ + if ((val) < (min)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_BOTH_RETURN_NONE(dev_id, val, min, max) \ + do { \ + if (((val) < (min)) || ((val) > (max))) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_UPPER_MEMORY_FREE_RETURN_NONE(dev_id, val, \ + max, ptr) \ + do { \ + if ((val) > (max)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, max, __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + ZXIC_COMM_ASSERT(0); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_LOWER_MEMORY_FREE_RETURN_NONE(dev_id, val, \ + min, ptr) \ + do { \ + if ((val) < (min)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + ZXIC_COMM_ASSERT(0); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_BOTH_MEMORY_FREE_RETURN_NONE(dev_id, val, \ + min, max, ptr) \ + do { \ + if (((val) < (min)) || ((val) > (max))) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + ZXIC_COMM_ASSERT(0); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_RETURN_NONE(dev_id, val0, val1) \ + do { \ + if ((ZXIC_UINT32_MAX - (val0)) < (val1)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_64_RETURN_NONE(dev_id, val0, \ + val1) \ + do { \ + if ((ZXIC_ULONG_MAX - (val0)) < (val1)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[val0=0x%016llx] INVALID] [val1=0x%016llx] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_SUB_OVERFLOW_RETURN_NONE(dev_id, val0, val1) \ + do { \ + if ((val0) < (val1)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_RETURN_NONE(dev_id, val0, val1) \ + do { \ + if (((val0) > 0) && ((ZXIC_UINT32_MAX / (val0)) < (val1))) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return; \ + } \ + } while (0) +#endif + +#if ZXIC_REAL("return no code & no assert") +#define ZXIC_COMM_CHECK_RC_RETURN_NONE_NO_ASSERT(rc, becall) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + return; \ + } \ + } while (0) + +/* 不带返回值,用于VOID类型函数 */ +#define ZXIC_COMM_CHECK_POINT_RETURN_NONE_NO_ASSERT(point) \ + do { \ + if (ZXIC_NULL == (point)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_RETURN_NONE_NO_ASSERT(val0, val1) \ + do { \ + if ((ZXIC_UINT32_MAX - (val0)) < (val1)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW_RETURN_NONE_NO_ASSERT(val0, val1) \ + do { \ + if ((val0) < (val1)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_MUL_OVERFLOW_RETURN_NONE_NO_ASSERT(val0, val1) \ + do { \ + if (((val0) > 0) && ((ZXIC_UINT32_MAX / (val0)) < (val1))) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_UPPER_RETURN_NONE_NO_ASSERT(val, max) \ + do { \ + if ((val) > (max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, max, __FUNCTION__); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_LOWER_RETURN_NONE_NO_ASSERT(val, min) \ + do { \ + if ((val) < (min)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, __FUNCTION__); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_BOTH_RETURN_NONE_NO_ASSERT(val, min, max) \ + do { \ + if (((val) < (min)) || ((val) > (max))) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_UPPER_MEMORY_FREE_RETURN_NONE_NO_ASSERT( \ + val, max, ptr) \ + do { \ + if ((val) > (max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, max, __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_LOWER_MEMORY_FREE_RETURN_NONE_NO_ASSERT( \ + val, min, ptr) \ + do { \ + if ((val) < (min)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_BOTH_MEMORY_FREE_RETURN_NONE_NO_ASSERT(val, min, \ + max, ptr) \ + do { \ + if (((val) < (min)) || ((val) > (max))) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + return; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_RC_RETURN_NONE_NO_ASSERT(dev_id, rc, becall) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXIC %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + return; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_POINT_RETURN_NONE_NO_ASSERT(dev_id, point) \ + do { \ + if (NULL == (point)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + return; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_UPPER_RETURN_NONE_NO_ASSERT(dev_id, val, \ + max) \ + do { \ + if ((val) > (max)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, max, __FUNCTION__); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_LOWER_RETURN_NONE_NO_ASSERT(dev_id, val, \ + min) \ + do { \ + if ((val) < (min)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, __FUNCTION__); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_BOTH_RETURN_NONE_NO_ASSERT(dev_id, val, min, \ + max) \ + do { \ + if (((val) < (min)) || ((val) > (max))) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_UPPER_MEMORY_FREE_RETURN_NONE_NO_ASSERT( \ + dev_id, val, max, ptr) \ + do { \ + if ((val) > (max)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, max, __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_LOWER_MEMORY_FREE_RETURN_NONE_NO_ASSERT( \ + dev_id, val, min, ptr) \ + do { \ + if ((val) < (min)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_BOTH_MEMORY_FREE_RETURN_NONE_NO_ASSERT( \ + dev_id, val, min, max, ptr) \ + do { \ + if (((val) < (min)) || ((val) > (max))) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + ZXIC_COMM_FREE(ptr); \ + return; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_RETURN_NONE_NO_ASSERT( \ + dev_id, val0, val1) \ + do { \ + if ((ZXIC_UINT32_MAX - (val0)) < (val1)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + return; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_SUB_OVERFLOW_RETURN_NONE_NO_ASSERT( \ + dev_id, val0, val1) \ + do { \ + if ((val0) < (val1)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + return; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_RETURN_NONE_NO_ASSERT( \ + dev_id, val0, val1) \ + do { \ + if (((val0) > 0) && ((ZXIC_UINT32_MAX / (val0)) < (val1))) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + return; \ + } \ + } while (0) +#endif + +#if ZXIC_REAL("no return") +#define ZXIC_COMM_CHECK_RC_NONE(rc, becall) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + ZXIC_COMM_ASSERT(0); \ + } \ + } while (0) + +/* 不带返回值,用于VOID类型函数 */ +#define ZXIC_COMM_CHECK_POINT_NONE(point) \ + do { \ + if (ZXIC_NULL == (point)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + } \ + } while (0) + +/* 不带返回值,用于VOID类型函数 */ +#define ZXIC_COMM_CHECK_INDEX_NONE(val, min, max) \ + do { \ + if (ZXIC_OK != zxic_comm_index_check(val, min, max)) \ + ; \ + { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_UPPER_NONE(val, max) \ + do { \ + if ((val) > (max)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, max, __FUNCTION__); \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NONE(val0, val1) \ + do { \ + if ((ZXIC_UINT32_MAX - (val0)) < (val1)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW_NONE(val0, val1) \ + do { \ + if ((val0) < (val1)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_MUL_OVERFLOW_NONE(val0, val1) \ + do { \ + if (((val0) > 0) && ((ZXIC_UINT32_MAX / (val0)) < (val1))) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val0, val1, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_LOWER_NONE(val, min) \ + do { \ + if ((val) < (min)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, __FUNCTION__); \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_BOTH_NONE(val, min, max) \ + do { \ + if (((val) < (min)) || ((val) > (max))) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_RC_NONE(dev_id, rc, becall) \ + do { \ + if (ZXIC_OK != rc) { \ + if (ZXIC_OK != zxic_comm_errcode_check(rc)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXIC %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, \ + becall); \ + } \ + ZXIC_COMM_ASSERT(0); \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_POINT_NONE(dev_id, point) \ + do { \ + if (NULL == (point)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_DEV_INDEX_NONE(dev_id, val, min, max) \ + do { \ + if (ZXIC_PAR_CHK_INVALID_INDEX == \ + zxic_comm_dev_index_check(dev_id, val, min, max)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + } else if (ZXIC_PAR_CHK_INVALID_RANGE == \ + zxic_comm_dev_index_check(dev_id, val, min, max)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_UPPER_NONE(dev_id, val, max) \ + do { \ + if ((val) > (max)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, max, __FUNCTION__); \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_LOWER_NONE(dev_id, val, min) \ + do { \ + if ((val) < (min)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, __FUNCTION__); \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_INDEX_BOTH_NONE(dev_id, val, min, max) \ + do { \ + if (((val) < (min)) || ((val) > (max))) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", \ + __FILE__, __LINE__, val, min, max, \ + __FUNCTION__); \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_DEV_RC_INT(dev_id, check_rc, rc, becall) \ + do { \ + if (check_rc < 0) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n ICM %s:%d [ErrorCode:0x%x] [rc:0x%x] !-- %s Call %s Fail!\n", \ + __FILE__, __LINE__, check_rc, rc, \ + __FUNCTION__, becall); \ + ZXIC_COMM_ASSERT(0); \ + return rc; \ + } \ + } while (0) + +#endif +#if ZXIC_REAL("no print") +#define ZXIC_COMM_CHECK_RC_NO_PRINT(rc, error_code) \ + do { \ + if (ZXIC_OK != rc) { \ + ZXIC_COMM_ASSERT(0); \ + return error_code; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_RC_UNLOCK_NO_PRINT(rc, p_mutex, error_code) \ + do { \ + if (ZXIC_OK != rc) { \ + (ZXIC_VOID) zxic_comm_mutex_unlock(p_mutex); \ + ZXIC_COMM_ASSERT(0); \ + return error_code; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_POINT_NO_PRINT(point) \ + do { \ + if (ZXIC_NULL == (point)) { \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_POINT_NULL; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_POINT_NO_PRINT_UNLOCK(point, p_mutex) \ + do { \ + if (ZXIC_NULL == (point)) { \ + (ZXIC_VOID) zxic_comm_mutex_unlock(p_mutex); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_POINT_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_RC_POINT_NO_PRINT(point, rc) \ + do { \ + if (ZXIC_NULL == (point)) { \ + ZXIC_COMM_ASSERT(0); \ + return rc; \ + } \ + } while (0) + +#define ZXIC_COMM_CHECK_INDEX_NO_PRINT_UNLOCK(val, min, max, p_mutex) \ + do { \ + if (ZXIC_PAR_CHK_INVALID_INDEX == \ + zxic_comm_index_check(val, min, max)) { \ + (ZXIC_VOID) zxic_comm_mutex_unlock(p_mutex); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } else if (ZXIC_PAR_CHK_INVALID_RANGE == \ + zxic_comm_index_check(val, min, max)) { \ + (ZXIC_VOID) zxic_comm_mutex_unlock(p_mutex); \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_RANGE; \ + } \ + } while (0) +#define ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW_NO_PRINT_UNLOCK(val0, val1, mutex) \ + do { \ + if ((val0) < (val1)) { \ + if (0 != zxic_comm_mutex_unlock(mutex)) { \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_PARA; \ + } \ + ZXIC_COMM_ASSERT(0); \ + return ZXIC_PAR_CHK_INVALID_INDEX; \ + } \ + } while (0) + +#endif +#endif /* ZXIC_REAL("参数检查") */ + +//#ifdef ZXIC_FOR_LLT +#if ZXIC_REAL("UT_TEST") +#define ZXIC_CHECK_DEV_UT_RC(dev_id, rc, val, becall) \ + do { \ + if (val != rc) { \ + ZXIC_COMM_PRINT( \ + "\n ZXICP %s:%d [ErrorCode:0x%x], %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, becall); \ + zxic_comm_ut_detail_trace_dev_error( \ + dev_id, \ + "\n ZXICP %s:%d [ErrorCode:0x%x], %s Call %s Fail!\n", \ + __FILE__, __LINE__, rc, __FUNCTION__, becall); \ + return ZXIC_E_LLT_CHECK; \ + } \ + } while (0) +#endif + +#if ZXIC_REAL("字节序") +/* DWORD变量字节序转换 */ +#define ZXIC_COMM_CONVERT32(dw_data) \ + ((((dw_data)&0xff) << 24) | (((dw_data)&0xff00) << 8) | \ + (((dw_data)&0xff0000) >> 8) | (((dw_data)&0xff000000) >> 24)) +/* WORD变量字节序转换 */ +#define ZXIC_COMM_CONVERT16(w_data) \ + ((((w_data)&0xff) << 8) | (((w_data)&0xff00) >> 8)) + +/* WORD变量字节序转换 */ +#define ZXIC_COMM_CONVERT32_16b(w_data) \ + ((((w_data)&0xffff) << 16) | (((w_data)&0xffff0000) >> 16)) + +ZXIC_VOID zxic_comm_swap_en_set(ZXIC_UINT32 enable); +ZXIC_RTN32 zxic_comm_swap_en_get(ZXIC_VOID); +ZXIC_RTN32 zxic_comm_is_big_endian(ZXIC_VOID); +ZXIC_RTN32 zxic_comm_endian_prt(ZXIC_VOID); +ZXIC_VOID zxic_comm_swap(ZXIC_UINT8 *p_uc_data, ZXIC_UINT32 dw_byte_len); +ZXIC_VOID zxic_comm_swap_16b(ZXIC_UINT8 *p_uc_data, ZXIC_UINT32 dw_byte_len); +ZXIC_UINT64 ZXIC_COMM_COUNTER64_BUILD(ZXIC_UINT32 hi, ZXIC_UINT32 lo); +#endif + +#if ZXIC_REAL("内存") +ZXIC_UINT32 zxic_comm_get_malloc_num(ZXIC_VOID); +ZXIC_UINT32 zxic_comm_get_malloc_size(ZXIC_VOID); +ZXIC_VOID zxic_clr_malloc_num(ZXIC_VOID); + +#define ZXIC_COMM_FREE(p_data) \ + do { \ + if (p_data != NULL) { \ + ic_comm_free_record(); \ + kfree(p_data); \ + p_data = ZXIC_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_VFREE(p_data) \ + do { \ + if (p_data != NULL) { \ + ic_comm_vfree_record(); \ + vfree(p_data); \ + p_data = ZXIC_NULL; \ + } \ + } while (0) + +#define ZXIC_COMM_MALLOC(size) ic_comm_malloc_memory(size) + +#define ZXIC_COMM_VMALLOC(size) ic_comm_vmalloc_memory(size) + +#define ZXIC_COMM_ALLOC_MEMORY(ptr, size) \ + do { \ + (ptr) = ZXIC_COMM_MALLOC(size); \ + ZXIC_COMM_CHECK_POINT(ptr); \ + ZXIC_COMM_MEMSET((ptr), 0, size); \ + } while (0) +#define ZXIC_COMM_ALLOC_MEMORY_DEV(dev_id, ptr, size) \ + do { \ + (ptr) = ZXIC_COMM_MALLOC(size); \ + ZXIC_COMM_CHECK_DEV_POINT(dev_id, ptr); \ + ZXIC_COMM_MEMSET((ptr), 0, size); \ + } while (0) + +#endif + +#if ZXIC_REAL("延时") +// ZXIC_VOID zxic_comm_sleep(ZXIC_UINT32 milliseconds); +ZXIC_VOID zxic_comm_msleep(ZXIC_UINT32 millisecond); +ZXIC_VOID zxic_comm_udelay(ZXIC_UINT32 microseconds); +ZXIC_VOID zxic_comm_delay(ZXIC_UINT32 milliseconds); +ZXIC_DOUBLE zxic_comm_get_ticks_s(ZXIC_VOID); +ZXIC_DOUBLE zxic_comm_get_ticks_ms(ZXIC_VOID); +ZXIC_DOUBLE zxic_get_ticks_uses(ZXIC_VOID); +#endif + +#if ZXIC_REAL("bit操作") + +/* ZXIC_UINT32 写某几bit的值 */ +#define ZXIC_COMM_MASK_BIT(intType, _bitNum_) ((intType)(0x1U << (_bitNum_))) + +#define ZXIC_COMM_GET_BIT_MASK(_intType_, _bitQnt_) \ + ((_intType_)(((_bitQnt_) < 32) ? \ + ((_intType_)ZXIC_COMM_MASK_BIT( \ + _intType_, ((_bitQnt_)&0x1F)) - \ + 1) : \ + ((_intType_)(0xffffffff)))) + +#define ZXIC_COMM_UINT32_WRITE_BITS(_uiDst_, _uiSrc_, _uiStartPos_, _uiLen_) \ + do { \ + (_uiDst_) = ((_uiDst_) & \ + ~(ZXIC_COMM_GET_BIT_MASK(ZXIC_UINT32, (_uiLen_)) \ + << (_uiStartPos_))) | \ + (((_uiSrc_)&ZXIC_COMM_GET_BIT_MASK(ZXIC_UINT32, \ + (_uiLen_))) \ + << (_uiStartPos_)); \ + } while (0) + +#define ZXIC_COMM_UINT32_WRITE_BITS_ZERO(_uiDst_, _uiStartPos_, _uiLen_) \ + do { \ + (_uiDst_) = ((_uiDst_) & \ + ~(ZXIC_COMM_GET_BIT_MASK(ZXIC_UINT32, (_uiLen_)) \ + << (_uiStartPos_))); \ + } while (0) + +#define ZXIC_COMM_UINT32_GET_BITS(_uiDst_, _uiSrc_, _uiStartPos_, _uiLen_) \ + do { \ + (_uiDst_) = \ + (((_uiSrc_) >> (_uiStartPos_)) & \ + (ZXIC_COMM_GET_BIT_MASK(ZXIC_UINT32, (_uiLen_)))); \ + } while (0) + +#define ZXIC_COMM_UINT32_GET_RETURN_BITS(_uiSrc_, _uiStartPos_, _uiLen_) \ + (((_uiSrc_) >> (_uiStartPos_)) & \ + (ZXIC_COMM_GET_BIT_MASK(ZXIC_UINT32, (_uiLen_)))) + +/* ZXIC_UINT64 写某几bit的值 */ +#define ZXIC_COMM_MASK_BIT_64(intType, _bitNum_) \ + ((intType)(0x1ULL << (_bitNum_))) + +#define ZXIC_COMM_GET_64_BIT_MASK(_intType_, _bitQnt_) \ + ((_intType_)(((_bitQnt_) < 64) ? \ + ((_intType_)ZXIC_COMM_MASK_BIT_64( \ + _intType_, ((_bitQnt_)&0x3F)) - \ + 1) : \ + ((_intType_)(0xFFFFFFFFFFFFFFFFULL)))) + +#define ZXIC_COMM_UINT64_WRITE_BITS(_uiDst_, _uiSrc_, _uiStartPos_, _uiLen_) \ + do { \ + (_uiDst_) = \ + ((_uiDst_) & \ + ~(ZXIC_COMM_GET_64_BIT_MASK(ZXIC_UINT64, (_uiLen_)) \ + << (_uiStartPos_))) | \ + (((_uiSrc_)&ZXIC_COMM_GET_64_BIT_MASK(ZXIC_UINT64, \ + (_uiLen_))) \ + << (_uiStartPos_)); \ + } while (0) + +/* Base type for declarations */ +#define ZXIC_COMM_BITDCL ZXIC_UINT32 +#define ZXIC_COMM_BITWID (32) + +/* (internal) Number of ZXICP_BITDCLs needed to contain _max bits */ +#define NPE_BITDCLSIZE(_max) \ + (((_max) + ZXIC_COMM_BITWID - 1) / ZXIC_COMM_BITWID) + +/* Size for giving to malloc and memset to handle _max bits */ +#define ZXIC_COMM_BITALLOCSIZE(_max) \ + (NPE_BITDCLSIZE(_max) * sizeof(ZXIC_COMM_BITDCL)) + +/* (internal) Generic operation macro on bit array _a, with bit _b */ +#define NPE_BITOP(_a, _b, _op) \ + (((_a)[(_b) / ZXIC_COMM_BITWID]) _op(1U << ((_b) % ZXIC_COMM_BITWID))) + +/* Specific operations */ +#define ZXIC_COMM_BITGET(_a, _b) NPE_BITOP(_a, _b, &) +#define ZXIC_COMM_BITSET(_a, _b) NPE_BITOP(_a, _b, |=) +#define ZXIC_COMM_BITCLR(_a, _b) NPE_BITOP(_a, _b, &= ~) + +ZXIC_RTN32 zxic_comm_bit_count(ZXIC_UINT32 data); +ZXIC_RTN32 zxic_comm_read_bits(ZXIC_UINT8 *p_base, ZXIC_UINT32 base_size_bit, + ZXIC_UINT32 *p_data, ZXIC_UINT32 start_bit, + ZXIC_UINT32 end_bit); +ZXIC_RTN32 zxic_comm_write_bits(ZXIC_UINT8 *p_base, ZXIC_UINT32 base_size_bit, + ZXIC_UINT32 data, ZXIC_UINT32 start_bit, + ZXIC_UINT32 end_bit); +ZXIC_RTN32 zxic_comm_write_bits_ex(ZXIC_UINT8 *p_base, + ZXIC_UINT32 base_size_bit, ZXIC_UINT32 data, + ZXIC_UINT32 msb_start_pos, ZXIC_UINT32 len); +ZXIC_RTN32 zxic_comm_read_bits_ex(ZXIC_UINT8 *p_base, ZXIC_UINT32 base_size_bit, + ZXIC_UINT32 *p_data, + ZXIC_UINT32 msb_start_pos, ZXIC_UINT32 len); +ZXIC_RTN32 zxic_comm_write_bits_op(ZXIC_UINT8 *p_src_dat, + ZXIC_UINT32 src_size_bit, + ZXIC_UINT32 input_data, + ZXIC_UINT32 start_bit, ZXIC_UINT32 end_bit); +ZXIC_RTN32 zxic_comm_read_bits_op(ZXIC_UINT8 *p_src_dat, + ZXIC_UINT32 src_size_bit, + ZXIC_UINT32 *p_out_data, + ZXIC_UINT32 start_bit, ZXIC_UINT32 end_bit); +#endif + +#if ZXIC_REAL("算数运算") +ZXIC_UINT64 zxic_comm_get_gcd(ZXIC_UINT64 a, ZXIC_UINT64 b); +ZXIC_RTN32 zxic_comm_multi_big_integer(ZXIC_CONST ZXIC_CHAR *num1, + ZXIC_CONST ZXIC_CHAR *num2, + ZXIC_CHAR *str_num); +ZXIC_RTN32 zxic_comm_div_big_integer(ZXIC_CONST ZXIC_CHAR *num1, + ZXIC_CONST ZXIC_CHAR *num2, + ZXIC_UINT32 *quo_val); +ZXIC_SINT32 zxic_comm_sub_stract(ZXIC_SINT32 *p1, ZXIC_SINT32 *p2, + ZXIC_SINT32 len1, ZXIC_SINT32 len2); +ZXIC_SINT32 zxic_comm_cmpm_calc(ZXIC_UINT64 cm_cal, ZXIC_UINT64 pm_cal, + ZXIC_UINT32 *cm, ZXIC_UINT32 *pm); +ZXIC_VOID zxic_comm_pm_cm_cal(ZXIC_UINT64 cm_y, ZXIC_UINT64 pm_y, + ZXIC_UINT32 *cm, ZXIC_UINT32 *pm); +#endif + +#if ZXIC_REAL("字符串") +ZXIC_RTN32 zxic_comm_strcasecmp(ZXIC_CHAR *str1, ZXIC_CHAR *str2); +ZXIC_UINT8 zxic_comm_char_to_hex(ZXIC_UINT8 c); +ZXIC_DWORD zxic_comm_ipaddr_to_dword(ZXIC_CONST ZXIC_CHAR *p_addr); +ZXIC_RTN32 zxic_comm_char_to_number(ZXIC_CHAR a, ZXIC_CHAR b, + ZXIC_UINT8 *number); +ZXIC_CHAR *zxic_comm_strlower(ZXIC_CHAR *str); +ZXIC_RTN32 ic_comm_check_str_size(ZXIC_CHAR *str); +ZXIC_SIZE_T ic_comm_getAbsValue(ZXIC_UINT8 *dest, ZXIC_CONST ZXIC_UINT8 *src); +ZXIC_VOID ic_comm_memset_s(void *dest, ZXIC_SIZE_T dmax, ZXIC_UINT8 c, + ZXIC_SIZE_T n); +ZXIC_SINT32 ic_comm_memcmp(void *str1, const void *str2, ZXIC_SIZE_T n); +ZXIC_SINT32 ic_comm_strncmp(const ZXIC_CHAR *str1, const ZXIC_CHAR *str2, + ZXIC_SIZE_T n); + +#endif + +#if ZXIC_REAL("dma内存分配") +#define ZXIC_DMA_PHY_ADDR dma_addr_t +ZXIC_RTN32 zxic_comm_dma_mem_malloc(ZXIC_ADDR_T *vir_addr, + ZXIC_ADDR_T *phy_addr, + ZXIC_UINT32 dma_size); +ZXIC_RTN32 zxic_comm_dma_mem_free(ZXIC_ADDR_T vir_addr, ZXIC_ADDR_T phy_addr, + ZXIC_UINT32 dma_size); +#endif + +#if ZXIC_REAL("OTHER") +#define MIN_VAL(x, y) ((x) <= (y) ? (x) : (y)) +#define MAX_VAL(x, y) ((x) <= (y) ? (y) : (x)) +#define ZXIC_COMM_DM_TO_X(d, m) ((d) & ~(m)) +#define ZXIC_COMM_DM_TO_Y(d, m) (~(d) & ~(m)) +#define ZXIC_COMM_XY_TO_MASK(x, y) (~(x) & ~(y)) +#define ZXIC_COMM_XY_TO_DATA(x, y) (x) /* valid only when mask is 0 */ +#define ZXIC_RD_CNT_MAX (50) + +/* 新增常用数据掩码 */ +#define ZXIC_COMM_WORD64_MASK (0xFFFFFFFFFFFFFFFFULL) +#define ZXIC_COMM_WORD32_MASK (0xFFFFFFFFU) +#define ZXIC_COMM_WORD16_MASK (0xFFFFU) +#define ZXIC_COMM_BYTE_MASK (0xFFU) + +ZXIC_RTN32 ZXIC_COMM_GET_MASK_VALUE(ZXIC_UINT32 total, ZXIC_UINT32 masklen); +ZXIC_UINT32 zxic_comm_random(ZXIC_VOID); +ZXIC_VOID zxic_comm_channel_max_set(ZXIC_UINT32 dev_max); + +ZXIC_RTN32 zxic_comm_channel_max_get(ZXIC_VOID); + +ZXIC_VOID zxic_comm_dbgcnt64_select_print(const ZXIC_CHAR *name, + ZXIC_UINT64 value, + ZXIC_UINT32 prt_mode); + +ZXIC_VOID zxic_comm_dbgcnt64_select_par_print(const ZXIC_CHAR *name, + ZXIC_UINT32 parm, + ZXIC_UINT64 value, + ZXIC_UINT32 prt_mode); + +ZXIC_VOID zxic_comm_dbgcnt32_select_print(const ZXIC_CHAR *name, + ZXIC_UINT32 value, + ZXIC_UINT32 prt_mode); + +ZXIC_VOID zxic_comm_dbgcnt32_select_par_print(const ZXIC_CHAR *name, + ZXIC_UINT32 parm, + ZXIC_UINT32 value, + ZXIC_UINT32 prt_mode); + +#define ZXIC_COMM_CHECK_ADD_CNT(x, y) \ + (((0xffffffff - (x)) < (y)) ? ((y) - (0xffffffff - (x)) - 1) : \ + ((x) + (y))) +#define ZXIC_COMM_CHECK_ADD_CNT_WORD64(x, y) \ + (((0xffffffffffffffff - (x)) < (y)) ? \ + ((y) - (0xffffffffffffffff - (x)) - 1) : \ + ((x) + (y))) +#endif + +#if ZXIC_REAL("UT_TEST") + +ZXIC_VOID zxic_comm_ut_detail_info_trace(const ZXIC_CHAR *format, ...); + +ZXIC_VOID zxic_comm_ut_result_info_trace(const ZXIC_CHAR *format, ...); + +ZXIC_VOID zxic_comm_ut_detail_trace_error(const ZXIC_CHAR *format, ...); + +ZXIC_VOID zxic_comm_ut_detail_trace_dev_error(ZXIC_UINT32 dev_id, + const ZXIC_CHAR *format, ...); + +#endif + +#if ZXIC_REAL("头文件") +#include "zxic_comm_double_link.h" +#include "zxic_comm_doublelink_index.h" +#include "zxic_comm_liststack.h" +#include "zxic_comm_avl_tree.h" +#include "zxic_comm_rb_tree.h" +#include "zxic_comm_index_ctrl.h" +#include "zxic_comm_index_reserve.h" +#include "zxic_comm_index_fill.h" +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* end __ZXIC_COMMON_H__ */ diff --git a/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_private.h b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_private.h new file mode 100644 index 000000000000..ff5803a21c79 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_private.h @@ -0,0 +1,114 @@ +/************************************************************** +* 版权所有 (C)2013-2020, 深圳市中兴通讯股份有限公司 +* 文件名称 : +* 文件标识 : +* 内容摘要 : 集合几个文件用到的定义,不对外部开放 +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2020/07/20 +* DEPARTMENT: 有线开发四部-系统软件团队 +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ +#ifndef __ZXIC_PRIVATE_H__ +#define __ZXIC_PRIVATE_H__ + +#if ZXIC_REAL("日志相关") +#include +#include +#include + +#define ZXIC_TRACE_LOG_FILE_GZ_MAX_CNT (50) /* 最大压缩后日志文件数 */ + +typedef struct zxic_log_file_info { + char fname[50]; + struct file *p_log_fp; + unsigned int f_size; /* 日志文件大小上限,以字节为单位 */ +} ZXIC_LOG_FILE_INFO; + +#endif /* ZXIC_REAL("日志相关") */ + +#if ZXIC_REAL("打印相关") +typedef enum { + ZXIC_TRACE_PRINT = 0, + ZXIC_TRACE_ERROR_PRINT = 1, + ZXIC_TRACE_NOTICE_PRINT, + ZXIC_TRACE_INFO_PRINT, + ZXIC_TRACE_DEBUG_PRINT, + ZXIC_TRACE_ALL_PRINT, + ZXIC_TRACE_INVALID_PRINT +} ZXIC_TRACE_LEVEL; + +typedef enum zxic_log_file_type_e { + ZXIC_LOG_SDK = 0, /*SDK日常配置记录*/ + ZXIC_LOG_INIT = 1, /*初始化日志记录*/ + ZXIC_LOG_LIF = 2, /*LIF日志记录*/ + ZXIC_LOG_SERDES = 3, /*SERDES日志记录*/ + ZXIC_LOG_SE_ERAM = 4, /*SE ERAM日志记录*/ + ZXIC_LOG_SE_HBM = 5, /*SE HBM日志记录*/ + ZXIC_LOG_SE_OTHER = 6, /*SE other (se 部分模块)日志记录*/ + ZXIC_LOG_REG = 7, /*寄存器和打桩信息日志记录,特殊使用*/ + ZXIC_LOG_DEBUG = 8, /*打印,诊断计数打印等日志记录*/ + ZXIC_LOG_DUMP = 9, /*捞数据日志记录,特殊使用*/ + ZXIC_LOG_SE_LPM_SAMPLE_V4 = 10, /*SE LPM_V4样本打印记录*/ + ZXIC_LOG_SE_LPM_SAMPLE_V6 = 11, /*SE LPM_V4样本打印记录*/ + ZXIC_LOG_UT_DETAIL = 12, /* ut check 失败信息 */ + ZXIC_LOG_UT_RESULT = 13, /* utcheck 统计信息 */ + ZXIC_LOG_SE_HASH = 14, /*SE HASH日志记录*/ + ZXIC_LOG_SE_ACL = 15, /*SE ACL日志记录*/ + ZXIC_LOG_SLT = 16, /*SLT 日志记录*/ + ZXIC_LOG_SDS_COMM = 17, /*SERDES COMM 库日志记录 */ + ZXIC_LOG_THREAD = 31, /*多线程log打印,注意多线程占用了31b~26b*/ + ZXIC_LOG_MAX, +} ZXIC_LOG_FILE_TYPE_E; + +#define ZXIC_THREAD_ID_NUM_MAX (2048) + +#define ZXIC_MALLOC_MAX_B_SIZE (0xC800000U) /* 200M */ + +#endif /* ZXIC_REAL("打印相关") */ + +#if ZXIC_REAL("互斥锁相关") +typedef struct zxic_mutex_t { +#ifdef ZXIC_OS_WIN + HANDLE mutex; +#else + struct mutex mutex; + +#endif +} ZXIC_MUTEX_T; +#endif + +#if ZXIC_REAL("信号量相关") +typedef struct zxic_sem_t { +#ifdef ZXIC_OS_WIN + HANDLE sem; +#else + struct semaphore sem; +#endif +} ZXIC_SEM_T; +#endif + +void *ic_comm_sdk_print_regist(void); +unsigned int ic_comm_callback_print_get(void *pExcCall); +unsigned int ic_comm_callback_err_log_get(void *pExcCall); +void ic_comm_set_os_callback(ZXIC_OS_CALLBACK *p_os_cb); +void ic_comm_malloc_record(unsigned int size); +void ic_comm_free_record(void); +/***********************************************************/ +/** 从path_name所指的目录中查找符合参数的执行文件,找到后便执行该文件, + 然后将第二个参数argv传给该欲执行的文件,请参照execv用法,异常时候返回-1 +* @return +* @remark 无 +* @see +* @author pj @date 2020/03/30 +************************************************************/ +int zxic_system(const char *path_name, char *const argv[]); + +#endif /* end __ZXIC_COMMON_TOP_H__ */ diff --git a/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_private_top.h b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_private_top.h new file mode 100644 index 000000000000..87a2a6e4d76c --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_private_top.h @@ -0,0 +1,198 @@ +/************************************************************** +* 版权所有 (C)2013-2020, 深圳市中兴通讯股份有限公司 +* 文件名称 : zxic_comm_top.h +* 文件标识 : +* 内容摘要 :提供少许 common.h 开头就要用到的宏 +* 其它说明 : 项目线不需要感知 +* 当前版本 : +* 作 者 : +* 完成日期 : 2020/07/20 +* DEPARTMENT: 有线开发四部-系统软件团队 +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ +#ifndef __ZXIC_PRIVATE_TOP_H__ +#define __ZXIC_PRIVATE_TOP_H__ + +#include +#include +#include +#include +#include +#include + +#define ZXIC_REAL(x) (1U) /* 用于#if 对同一类的代码进行 分段 */ +#define ZXIC_NOREAL(x) (0U) /* 用于#if 对同一类的代码进行 分段 */ + +#ifndef ZXIC_OS_WIN /* 操作系统类型导入,编译宏控制 */ +#define ZXIC_OS_LINUX /* 默认为LINUX操作系统 */ +#endif + +#if ZXIC_REAL("内存") +typedef void *(*ZXIC_OS_MEM_MALLOC)(unsigned int mem_size); +typedef void (*ZXIC_OS_MEM_FREE)(void *p_free); + +typedef struct _zxic_os_callback { + ZXIC_OS_MEM_MALLOC p_mem_malloc; + ZXIC_OS_MEM_FREE p_mem_free; +} ZXIC_OS_CALLBACK; +#endif + +#if ZXIC_REAL("大小端") +/*大小端序专用*/ +typedef union zxic_endian_u { + unsigned int a; + unsigned char b; +} ZXIC_ENDIAN_U; +#endif + +#if ZXIC_REAL("异常返回值宏定义") +#define ZXIC_RC_BASE (0x1000U) + +/* bit_stream */ +#define ZXIC_BIT_STREAM_BASE (ZXIC_RC_BASE | 0x100) +#define ZXIC_BIT_STREAM_INDEX_ERR (ZXIC_BIT_STREAM_BASE | 0x001) +#define ZXIC_BIT_STREAM_DATA_TOO_BIG (ZXIC_BIT_STREAM_BASE | 0x002) + +/* parameter check */ +#define ZXIC_PARAMETER_CHK_BASE (ZXIC_RC_BASE | 0x200) +#define ZXIC_PAR_CHK_POINT_NULL (ZXIC_PARAMETER_CHK_BASE | 0x001) +#define ZXIC_PAR_CHK_ARGIN_ZERO (ZXIC_PARAMETER_CHK_BASE | 0x002) +#define ZXIC_PAR_CHK_ARGIN_OVERFLOW (ZXIC_PARAMETER_CHK_BASE | 0x003) +#define ZXIC_PAR_CHK_ARGIN_ERROR (ZXIC_PARAMETER_CHK_BASE | 0x004) +#define ZXIC_PAR_CHK_INVALID_INDEX (ZXIC_PARAMETER_CHK_BASE | 0x005) +#define ZXIC_PAR_CHK_INVALID_RANGE (ZXIC_PARAMETER_CHK_BASE | 0x006) +#define ZXIC_PAR_CHK_INVALID_DEV_ID (ZXIC_PARAMETER_CHK_BASE | 0x007) +#define ZXIC_PAR_CHK_INVALID_PARA (ZXIC_PARAMETER_CHK_BASE | 0x008) +#define ZXIC_PAR_CHK_BAR_ABNORMAL (ZXIC_PARAMETER_CHK_BASE | 0x009) +#define ZXIC_PAR_CHK_DEV_STATUS_OFF (ZXIC_PARAMETER_CHK_BASE | 0x00A) + +/* mutex lock */ +#define ZXIC_MUTEX_LOCK_BASE (ZXIC_RC_BASE | 0x300) +#define ZXIC_MUTEX_LOCK_INIT_FAIL (ZXIC_MUTEX_LOCK_BASE | 0x001) +#define ZXIC_MUTEX_LOCK_LOCK_FAIL (ZXIC_MUTEX_LOCK_BASE | 0x002) +#define ZXIC_MUTEX_LOCK_ULOCK_FAIL (ZXIC_MUTEX_LOCK_BASE | 0X003) +#define ZXIC_MUTEX_LOCK_DESTROY_FAIL (ZXIC_MUTEX_LOCK_BASE | 0X004) + +/* thread */ +#define ZXIC_THREAD_BASE (ZXIC_RC_BASE | 0x400) +#define ZXIC_THREAD_INFO_ADD_FAIL (ZXIC_THREAD_BASE | 0x001) +#define ZXIC_THREAD_CREATE_FAIL (ZXIC_THREAD_BASE | 0x002) + +/* socket */ +#define ZXIC_SOCKET_BASE (ZXIC_RC_BASE | 0x400) +#define ZXIC_SOKET_SERVICE_START_FAIL (ZXIC_THREAD_BASE | 0x001) +#define ZXIC_SOKET_SERVICE_CLOSE_FAIL (ZXIC_THREAD_BASE | 0x002) +#define ZXIC_SOKET_SET_PARA_FAIL (ZXIC_THREAD_BASE | 0x003) +#define ZXIC_SOKET_CREATE_FAIL (ZXIC_THREAD_BASE | 0x004) +#define ZXIC_SOKET_BIND_LISTEN_FAIL (ZXIC_THREAD_BASE | 0x005) +#define ZXIC_SOKET_CONNECT_FAIL (ZXIC_THREAD_BASE | 0x006) +#define ZXIC_SOKET_CLOSE_FAIL (ZXIC_THREAD_BASE | 0x007) +#define ZXIC_SOKET_GET_PARA_FAIL (ZXIC_THREAD_BASE | 0x008) + +/* double link */ +#define ZXIC_DOUBLE_LINK_BASE (ZXIC_RC_BASE | 0x500) +#define ZXIC_DOUBLE_LINK_ELEMENT_NUM_ERR (ZXIC_DOUBLE_LINK_BASE | 0x001) +#define ZXIC_DOUBLE_LINK_MALLOC_FAIL (ZXIC_DOUBLE_LINK_BASE | 0x002) +#define ZXIC_DOUBLE_LINK_POINT_NULL (ZXIC_DOUBLE_LINK_BASE | 0x003) +#define ZXIC_DOUBLE_LINK_CHK_SUM_ERR (ZXIC_DOUBLE_LINK_BASE | 0x004) +#define ZXIC_DOUBLE_LINK_NO_EXIST_FREENODE (ZXIC_DOUBLE_LINK_BASE | 0x005) +#define ZXIC_DOUBLE_LINK_FREE_INDX_INVALID (ZXIC_DOUBLE_LINK_BASE | 0x006) +#define ZXIC_DOUBLE_LINK_NO_EXIST_PRENODE (ZXIC_DOUBLE_LINK_BASE | 0x007) +#define ZXIC_DOUBLE_LINK_INPUT_INDX_INVALID (ZXIC_DOUBLE_LINK_BASE | 0x008) +#define ZXIC_DOUBLE_LINK_INIT_ELEMENT_NUM_ERR (ZXIC_DOUBLE_LINK_BASE | 0x009) + +/* index ctrl */ +#define ZXIC_INDEX_CTRL_BASE (ZXIC_RC_BASE | 0x600) +#define ZXIC_INDEX_CTRL_TREE_FULL (ZXIC_INDEX_CTRL_BASE | 0x001) +#define ZXIC_INDEX_CTRL_SAME_RECORD (ZXIC_INDEX_CTRL_BASE | 0x002) +#define ZXIC_INDEX_CTRL_OPER_MODE_ERR (ZXIC_INDEX_CTRL_BASE | 0x003) + +/* index reverse */ +#define ZXIC_INDEX_RSV_BASE (ZXIC_RC_BASE | 0x700) +#define ZXIC_INDEX_RESERVE_GET_INDX_FAIL (ZXIC_INDEX_RSV_BASE | 0x001) +#define ZXIC_INDEX_RESERVE_BORROW_HIG_FAIL (ZXIC_INDEX_RSV_BASE | 0x002) +#define ZXIC_INDEX_RESERVE_BORROW_LOW_FAIL (ZXIC_INDEX_RSV_BASE | 0x003) +#define ZXIC_INDEX_RESERVE_ALLOCI_INDX_FAIL (ZXIC_INDEX_RSV_BASE | 0x004) +#define ZXIC_INDEX_RESERVE_FREE_INDX_FAIL (ZXIC_INDEX_RSV_BASE | 0x005) +#define ZXIC_INDEX_RESERVE_RESET_INDX_FAIL (ZXIC_INDEX_RSV_BASE | 0x006) + +/* list stack */ +#define ZXIC_LIST_STACK_BASE (ZXIC_RC_BASE | 0x800) +#define ZXIC_LIST_STACK_ELEMENT_NUM_ERR (ZXIC_LIST_STACK_BASE | 0x001) +#define ZXIC_LIST_STACK_POINT_NULL (ZXIC_LIST_STACK_BASE | 0x002) +#define ZXIC_LIST_STACK_ALLOC_MEMORY_FAIL (ZXIC_LIST_STACK_BASE | 0x003) +#define ZXIC_LIST_STACK_ISEMPTY_ERR (ZXIC_LIST_STACK_BASE | 0x004) +#define ZXIC_LIST_STACK_FREE_INDEX_INVALID (ZXIC_LIST_STACK_BASE | 0x005) +#define ZXIC_LIST_STACK_ALLOC_INDEX_INVALID (ZXIC_LIST_STACK_BASE | 0x006) +#define ZXIC_LIST_STACK_ALLOC_INDEX_USED (ZXIC_LIST_STACK_BASE | 0x007) + +/* avl tree */ +#define ZXIC_AVL_TREE_BASE (ZXIC_RC_BASE | 0x900) +#define ZXIC_AVL_TREE_INVALID_INDEX (ZXIC_AVL_TREE_BASE | 0x001) +#define ZXIC_AVL_TREE_SORT_INIT_INPUT_PARA_ERR (ZXIC_AVL_TREE_BASE | 0x002) +#define ZXIC_AVL_TREE_SORT_ISEMPTY_ERR (ZXIC_AVL_TREE_BASE | 0x003) +#define ZXIC_AVL_TREE_SORT_INSERT_SAME_KEY (ZXIC_AVL_TREE_BASE | 0x004) +#define ZXIC_AVL_TREE_SORT_IS_FULL (ZXIC_AVL_TREE_BASE | 0x005) +#define ZXIC_AVL_TREE_KEY_SIZE_ERR (ZXIC_AVL_TREE_BASE | 0x006) + +/* index fill */ +#define ZXIC_INDEX_FILL_BASE (ZXIC_RC_BASE | 0xA00) +#define ZXIC_INDEX_FILL_FULL (ZXIC_INDEX_FILL_BASE | 0x001) +#define ZXIC_INDEX_DEL_FAIL (ZXIC_INDEX_FILL_BASE | 0x002) + +#define ZXIC_COMM_LOG_BASE (ZXIC_RC_BASE | 0xB00) +#define ZXIC_COMM_LOG_MUTEX_CREATE_FAIL (ZXIC_COMM_LOG_BASE | 0x001) +#define ZXIC_COMM_LOG_MUTEX_LOCK_FAIL (ZXIC_COMM_LOG_BASE | 0x002) +#define ZXIC_COMM_LOG_FILE_RENAME_FAIL (ZXIC_COMM_LOG_BASE | 0x003) +#define ZXIC_COMM_LOG_FILE_DELETE_FAIL (ZXIC_COMM_LOG_BASE | 0x004) + +/* index fill type */ +#define ZXIC_INDEX_FILL_TYPE_BASE (ZXIC_RC_BASE | 0xC00) +#define ZXIC_INDEX_FILL_TYPE_FULL (ZXIC_INDEX_FILL_TYPE_BASE | 0x001) +#define ZXIC_INDEX_DEL_TYPE_FAIL (ZXIC_INDEX_FILL_TYPE_BASE | 0x002) + +#define ZXIC_COMM_C_INVALID_PARAM (ZXIC_COMM_LOG_BASE | 0x005) + +#define ZXIC_E_LLT_CHECK (25) +#define ZXIC_E_LLT_ASSERT (26) + +#endif /* END 返回值宏定义 */ + +#if ZXIC_REAL("") +signed int ic_comm_snprintf_s(char *buffer, size_t sizeofbuf, size_t count, + const char *format, ...); +signed int ic_comm_vsnprintf_s(char *buffer, size_t sizeofbuf, size_t count, + const char *format, va_list ap); +signed int ic_comm_sscanf(const char *src, const char *format, ...); +char *ic_comm_strcpy_s(char *pcDst, size_t dwMaxSize, const char *pcSrc); +char *ic_comm_strncpy_s(char *pcDst, size_t dwMaxSize, const char *pcSrc, + size_t dwCount); +unsigned int ic_comm_memcpy(void *dest, const void *src, size_t n); +unsigned int ic_comm_memcpy_s(void *dest, size_t dest_len, const void *src, + size_t n); +char *ic_comm_strcat_s(char *pcDst, size_t dwMaxSize, const char *pcSrc); +char *ic_comm_strncat_s(char *pcDst, size_t dwMaxSize, const char *pcSrc, + size_t dwCount); +size_t ic_comm_strnlen_s(const char *str, size_t MaxCount); +char *ic_comm_strtok_s(char *string_org, const char *demial, char **context); +ZXIC_OS_CALLBACK *zxic_comm_get_os_callback(void); +void *ic_comm_malloc_memory(unsigned int size); +void ic_comm_free_record(void); +void *ic_comm_vmalloc_memory(unsigned int size); +void ic_comm_vfree_record(void); +unsigned int zxic_comm_index_check(unsigned int val, unsigned int min, + unsigned int max); +unsigned int zxic_comm_dev_index_check(unsigned int dev_id, unsigned int val, + unsigned int min, unsigned int max); +unsigned int zxic_comm_index_check(unsigned int val, unsigned int min, + unsigned int max); +unsigned int zxic_comm_errcode_check(unsigned int error_code); +#endif + +#endif /* end __ZXIC_COMMON_TOP_H__ */ diff --git a/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_slt.h b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_slt.h new file mode 100644 index 000000000000..ba22cd44d987 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/comm/include/zxic_slt.h @@ -0,0 +1,781 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : zxic_slt.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : PJ +* 完成日期 : 2022/03/26 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef __ZXIC_SLT_H__ +#define __ZXIC_SLT_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#define ZXIC_SLT_CASE_PASS (0U) +#define ZXIC_SLT_CASE_FAILURE (1U) + +#define ZXIC_SLT_ERR_INFO_LEN (255U) +#define ZXIC_SLT_CASE_MAX_NUM (255U) +#define ZXIC_SLT_DEV_ID_MAX_NUM (4U) +#define ZXIC_SLT_CHIP_ID_LEN (3U) + +/* 封装类型,不同产品含义不同,暂时定义最大支持5个*/ +#define ZXIC_SLT_MAX_ENCAP (5U) + +/* BIN_CODE 编码*/ +#define ZXIC_SLT_BIN_CODE_PASS ZXIC_SLT_CASE_PASS +#define ZXIC_SLT_BIN_CODE_SERDES_FAIL (1U) /* sredes类检测*/ +#define ZXIC_SLT_BIN_CODE_FLOW_FAIL (2U) /* pvt类检测 FLOW通流类*/ +#define ZXIC_SLT_BIN_CODE_RAM_FAIL (3U) /* RAM类检测*/ +#define ZXIC_SLT_BIN_CODE_HBM_DDR_FAIL (4U) /* hbm类检测*/ +#define ZXIC_SLT_BIN_CODE_OTHER_FAIL (5U) /* PLL EFUSE PCIE RISCV */ +#define ZXIC_SLT_BIN_CODE_MIX_FAIL (6U) /* 上述多种类型的混合*/ + +/* 测试用例全集case_no定义*/ +typedef enum zxic_slt_case_no_e { + ZXIC_SLT_CASE_PLL_LOCK_STATUS_CHECK = (0x08U), + /* efuse锁定相关测试用例 */ + ZXIC_SLT_CASE_EFUSE_CHECK = (0x09U), + + /* serdes用例编号范围0x10~0x7f*/ + /* lifx(LIF0 LIF1 LIF2)接口serdes prbs相关测试用例 0x10~0x2f*/ + ZXIC_SLT_CASE_SERDES_LIFX_PRBS_1G = (0x10U), + ZXIC_SLT_CASE_SERDES_LIFX_PRBS_3G = (0x11U), + ZXIC_SLT_CASE_SERDES_LIFX_PRBS_5G = (0x12U), + ZXIC_SLT_CASE_SERDES_LIFX_PRBS_6G = (0x13U), + ZXIC_SLT_CASE_SERDES_LIFX_PRBS_9G = (0x14U), + ZXIC_SLT_CASE_SERDES_LIFX_PRBS_10G = (0x15U), + ZXIC_SLT_CASE_SERDES_LIFX_PRBS_12G = (0x16U), + ZXIC_SLT_CASE_SERDES_LIFX_PRBS_15G = (0x17U), + ZXIC_SLT_CASE_SERDES_LIFX_PRBS_20G = (0x18U), + ZXIC_SLT_CASE_SERDES_LIFX_PRBS_25G = (0x19U), + ZXIC_SLT_CASE_SERDES_LIFX_PRBS_26G = (0x1aU), + ZXIC_SLT_CASE_SERDES_LIFX_PRBS_27G = (0x1bU), + ZXIC_SLT_CASE_SERDES_LIFX_PRBS_28G = (0x1cU), + ZXIC_SLT_CASE_SERDES_LIFX_PRBS_30G = (0x1dU), + ZXIC_SLT_CASE_SERDES_LIFX_PRBS_50G = (0x1fU), + ZXIC_SLT_CASE_SERDES_LIFX_PRBS_51G = (0x20U), + ZXIC_SLT_CASE_SERDES_LIFX_PRBS_53G = (0x21U), + ZXIC_SLT_CASE_SERDES_LIFX_PRBS_55G = (0x22U), + ZXIC_SLT_CASE_SERDES_LIFX_PRBS_56G = (0x23U), + ZXIC_SLT_CASE_SERDES_LIFX_PRBS_57G = (0x24U), + ZXIC_SLT_CASE_SERDES_LIFX_PRBS_98G = (0x25U), + ZXIC_SLT_CASE_SERDES_LIFX_PRBS_106G = (0x26U), + ZXIC_SLT_CASE_SERDES_LIFX_PRBS_111G = (0x27U), + ZXIC_SLT_CASE_SERDES_LIFX_PRBS_112G = (0x28U), + + /* lifc接口serdes相关测试用例 0x30~0x37*/ + ZXIC_SLT_CASE_SERDES_LIFC_PRBS_1G = (0x30U), + ZXIC_SLT_CASE_SERDES_LIFC_PRBS_10G = (0x31U), + ZXIC_SLT_CASE_SERDES_LIFC_PRBS_25G = (0x32U), + ZXIC_SLT_CASE_SERDES_LIFC_PRBS_53G = (0x33U), + + /* lifc eth 接口发包测试相关测试用例 0x38~0x3f*/ + ZXIC_SLT_CASE_PORT_LIFC_SD_1G = (0x38U), + ZXIC_SLT_CASE_PORT_LIFC_SD_10G = (0x39U), + ZXIC_SLT_CASE_PORT_LIFC_SD_25G = (0x3aU), + ZXIC_SLT_CASE_PORT_LIFC_SD_53G = (0x3bU), + + /* 外部查找的interlaken-la接口*/ + ZXIC_SLT_CASE_SERDES_INTERLAKEN_LA = (0x3fU), + + /* lif0 eth 接口发包测试相关测试用例 0x40~0x5f*/ + ZXIC_SLT_CASE_PORT_LIFO_SD_1G = (0x40U), + ZXIC_SLT_CASE_PORT_LIFO_SD_5G = (0x41U), + ZXIC_SLT_CASE_PORT_LIFO_SD_10G = (0x42U), + ZXIC_SLT_CASE_PORT_LIFO_SD_12G = (0x43U), + ZXIC_SLT_CASE_PORT_LIFO_SD_25G = (0x44U), + ZXIC_SLT_CASE_PORT_LIFO_SD_26G = (0x45U), + ZXIC_SLT_CASE_PORT_LIFO_SD_51G = (0x46U), + ZXIC_SLT_CASE_PORT_LIFO_SD_53G = (0x47U), + ZXIC_SLT_CASE_PORT_LIFO_SD_106G = (0x48U), + ZXIC_SLT_CASE_PORT_LIFO_SD_112G = (0x49U), + ZXIC_SLT_CASE_PORT_LIFO_SD_20G = (0x4AU), + + /* lif0 intlk接口发包测试相关测试用例 0x60~0x6f*/ + ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_3G = (0x60U), + ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_6G = (0x61U), + ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_10G = (0x62U), + ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_12G = (0x63U), + ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_25G = (0x64U), + ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_28G = (0x65U), + ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_30G = (0x66U), + ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_50G = (0x67U), + ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_51G = (0x68U), + ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_53G = (0x69U), + ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_56G = (0x6aU), + ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_106G = (0x6bU), + ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_112G = (0x6cU), + + /* 交换侧lif1 FEC测试相关测试用例 0x70~0x7f*/ + ZXIC_SLT_CASE_SERDES_LIF1_FEC_25G = (0x70U), + ZXIC_SLT_CASE_SERDES_LIF1_FEC_26G = (0x71U), + ZXIC_SLT_CASE_SERDES_LIF1_FEC_28G = (0x72U), + ZXIC_SLT_CASE_SERDES_LIF1_FEC_50G = (0x73U), + ZXIC_SLT_CASE_SERDES_LIF1_FEC_53G = (0x74U), + ZXIC_SLT_CASE_SERDES_LIF1_FEC_56G = (0x75U), + ZXIC_SLT_CASE_SERDES_LIF1_FEC_98G = (0x76U), + ZXIC_SLT_CASE_SERDES_LIF1_FEC_106G = (0x77U), + ZXIC_SLT_CASE_SERDES_LIF1_FEC_112G = (0x78U), + + /*片内ram用例编号范围0x80~0x87*/ + /* CPU可读写寄存器测试用例 */ + ZXIC_SLT_CASE_REG_CPU_W_R = (0x80U), + /* CPU可读写ram测试用例 */ + ZXIC_SLT_CASE_RAM_CPU_W_R = (0x81U), + /* RAM表项读写测试(微码)*/ + ZXIC_SLT_CASE_RAM_MC_W_R = (0x82U), + /* RAM表项读写测试 memtest86(微码)*/ + ZXIC_SLT_CASE_RAM_MC_MEMTEST86 = (0x83U), + + /* HBM/DDR用例编号范围 0x90~0x9f*/ + /*mbist测试*/ + ZXIC_SLT_CASE_HBM_DDR_CTRL_BIST = (0x90U), + /* HBM/DDR表项CPU读写测试*/ + ZXIC_SLT_CASE_HBM_DDR_CPU_W_R = (0x91U), + /* HBM/DDR表项读写测试(微码)*/ + ZXIC_SLT_CASE_HBM_DDR_MC_W_R = (0x92U), + /* HBM/DDR表项读写测试 memtest86(微码)*/ + ZXIC_SLT_CASE_HBM_DDR_MC_MEMTEST86 = (0x93U), + /* HBM/DDR TM覆盖测试*/ + ZXIC_SLT_CASE_HBM_DDR_TM_TEST = (0x94U), + + /* 发流测试和全业务PVT测试用例编号范围 0xa0~0xaf*/ + /* 数据路径测试用例 */ + ZXIC_SLT_CASE_PKT_FLOW_NOM = (0xA0U), + /* OAM发包测试用例 */ + ZXIC_SLT_CASE_OAM_SEND_PKT = (0xA1U), + /* se查表全集测试 */ + ZXIC_SLT_CASE_PKT_FLOW_SE_FULL = (0xA2U), + /* tm全队列测试用例 */ + ZXIC_SLT_CASE_PKT_FLOW_TM_FULL = (0xA3U), + /* LIFC口测试用例 */ + ZXIC_SLT_CASE_PKT_FLOW_LIFC = (0xA4U), + + /* 芯片PVT 功耗、温度和电压测试用例 */ + ZXIC_SLT_CASE_PVT_EXCEED_POWER_TEST = (0xA8U), + ZXIC_SLT_CASE_PVT_EXCEED_TEM_VOLT_TEST = (0xA9U), + ZXIC_SLT_CASE_PVT_EXCEED_HBM_TEMPER_TEST = (0xAaU), + + /* 其他测试用例编号范围0xb0~0xff*/ + /* 调压测试*/ + ZXIC_SLT_CASE_VOLT_TEST = (0xB0U), + /* 时钟频偏测试 */ + ZXIC_SLT_CASE_CLK_TEST = (0xB1U), + + /* PCIE压力测试 */ + ZXIC_SLT_CASE_PCIE_W_R_TEST = (0xB2U), + /* 下述两个测试用例必须放在最后执行 */ + /* PCIE建链测试 */ + ZXIC_SLT_CASE_PCIE_TEST = (0xB3U), + /* RISCV测试用例 */ + ZXIC_SLT_CASE_RISCV_W_R_TEST = (0xB4U), + + ZXIC_SLT_CASE_MAX, +} ZXIC_SLT_CASE_NO_E; + +#if ZXIC_REAL("ERR_CODE") +#define ZXIC_SLT_RC_BASE (0x100U) + +#define ZXIC_SLT_TEST_CASE_FNC_POINT_NULL (ZXIC_SLT_RC_BASE | 0x1) +#define ZXIC_SLT_CASE_PLL_LOCK_STATUS_CHECK_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PLL_LOCK_STATUS_CHECK) +#define ZXIC_SLT_CASE_EFUSE_CHECK_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_EFUSE_CHECK) + +/* lifx*/ +#define ZXIC_SLT_CASE_SERDES_LIFX_PRBS_1G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFX_PRBS_1G) +#define ZXIC_SLT_CASE_SERDES_LIFX_PRBS_3G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFX_PRBS_3G) +#define ZXIC_SLT_CASE_SERDES_LIFX_PRBS_5G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFX_PRBS_5G) +#define ZXIC_SLT_CASE_SERDES_LIFX_PRBS_6G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFX_PRBS_6G) +#define ZXIC_SLT_CASE_SERDES_LIFX_PRBS_9G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFX_PRBS_9G) +#define ZXIC_SLT_CASE_SERDES_LIFX_PRBS_10G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFX_PRBS_10G) +#define ZXIC_SLT_CASE_SERDES_LIFX_PRBS_12G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFX_PRBS_12G) +#define ZXIC_SLT_CASE_SERDES_LIFX_PRBS_15G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFX_PRBS_15G) +#define ZXIC_SLT_CASE_SERDES_LIFX_PRBS_20G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFX_PRBS_20G) +#define ZXIC_SLT_CASE_SERDES_LIFX_PRBS_25G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFX_PRBS_25G) +#define ZXIC_SLT_CASE_SERDES_LIFX_PRBS_26G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFX_PRBS_26G) +#define ZXIC_SLT_CASE_SERDES_LIFX_PRBS_27G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFX_PRBS_27G) +#define ZXIC_SLT_CASE_SERDES_LIFX_PRBS_28G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFX_PRBS_28G) +#define ZXIC_SLT_CASE_SERDES_LIFX_PRBS_30G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFX_PRBS_30G) +#define ZXIC_SLT_CASE_SERDES_LIFX_PRBS_50G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFX_PRBS_50G) +#define ZXIC_SLT_CASE_SERDES_LIFX_PRBS_51G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFX_PRBS_51G) +#define ZXIC_SLT_CASE_SERDES_LIFX_PRBS_53G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFX_PRBS_53G) +#define ZXIC_SLT_CASE_SERDES_LIFX_PRBS_55G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFX_PRBS_55G) +#define ZXIC_SLT_CASE_SERDES_LIFX_PRBS_56G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFX_PRBS_56G) +#define ZXIC_SLT_CASE_SERDES_LIFX_PRBS_57G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFX_PRBS_57G) +#define ZXIC_SLT_CASE_SERDES_LIFX_PRBS_98G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFX_PRBS_98G) +#define ZXIC_SLT_CASE_SERDES_LIFX_PRBS_106G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFX_PRBS_106G) +#define ZXIC_SLT_CASE_SERDES_LIFX_PRBS_111G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFX_PRBS_111G) +#define ZXIC_SLT_CASE_SERDES_LIFX_PRBS_112G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFX_PRBS_112G) + +/* lifc*/ +#define ZXIC_SLT_CASE_SERDES_LIFC_PRBS_1G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFC_PRBS_1G) +#define ZXIC_SLT_CASE_SERDES_LIFC_PRBS_10G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFC_PRBS_10G) +#define ZXIC_SLT_CASE_SERDES_LIFC_PRBS_25G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFC_PRBS_25G) +#define ZXIC_SLT_CASE_SERDES_LIFC_PRBS_53G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIFC_PRBS_53G) + +/* lifc */ +#define ZXIC_SLT_CASE_PORT_LIFC_SD_1G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFC_SD_1G) +#define ZXIC_SLT_CASE_PORT_LIFC_SD_10G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFC_SD_10G) +#define ZXIC_SLT_CASE_PORT_LIFC_SD_25G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFC_SD_25G) +#define ZXIC_SLT_CASE_PORT_LIFC_SD_53G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFC_SD_53G) + +/* lif0 ETH*/ +#define ZXIC_SLT_CASE_PORT_LIFO_SD_1G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFO_SD_1G) +#define ZXIC_SLT_CASE_PORT_LIFO_SD_5G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFO_SD_5G) +#define ZXIC_SLT_CASE_PORT_LIFO_SD_10G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFO_SD_10G) +#define ZXIC_SLT_CASE_PORT_LIFO_SD_12G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFO_SD_12G) +#define ZXIC_SLT_CASE_PORT_LIFO_SD_25G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFO_SD_25G) +#define ZXIC_SLT_CASE_PORT_LIFO_SD_26G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFO_SD_26G) +#define ZXIC_SLT_CASE_PORT_LIFO_SD_51G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFO_SD_51G) +#define ZXIC_SLT_CASE_PORT_LIFO_SD_53G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFO_SD_53G) +#define ZXIC_SLT_CASE_PORT_LIFO_SD_106G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFO_SD_106G) +#define ZXIC_SLT_CASE_PORT_LIFO_SD_112G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFO_SD_112G) +#define ZXIC_SLT_CASE_PORT_LIFO_SD_20G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFO_SD_20G) + +/* lif0 intlk*/ +#define ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_3G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_3G) +#define ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_6G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_6G) +#define ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_10G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_10G) +#define ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_12G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_12G) +#define ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_25G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_25G) +#define ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_28G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_28G) +#define ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_30G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_30G) +#define ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_50G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_50G) +#define ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_51G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_51G) +#define ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_53G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_53G) +#define ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_56G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_56G) +#define ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_106G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_106G) +#define ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_112G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PORT_LIFO_SD_ILK_112G) + +/* lif1*/ +#define ZXIC_SLT_CASE_SERDES_LIF1_FEC_25G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIF1_FEC_25G) +#define ZXIC_SLT_CASE_SERDES_LIF1_FEC_26G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIF1_FEC_26G) +#define ZXIC_SLT_CASE_SERDES_LIF1_FEC_28G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIF1_FEC_28G) +#define ZXIC_SLT_CASE_SERDES_LIF1_FEC_50G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIF1_FEC_50G) +#define ZXIC_SLT_CASE_SERDES_LIF1_FEC_53G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIF1_FEC_53G) +#define ZXIC_SLT_CASE_SERDES_LIF1_FEC_56G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIF1_FEC_56G) +#define ZXIC_SLT_CASE_SERDES_LIF1_FEC_98G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIF1_FEC_98G) +#define ZXIC_SLT_CASE_SERDES_LIF1_FEC_106G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIF1_FEC_106G) +#define ZXIC_SLT_CASE_SERDES_LIF1_FEC_112G_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_LIF1_FEC_112G) + +/* 外部查找的interlaken-la接口*/ +#define ZXIC_SLT_CASE_SERDES_INTELAKEN_LA_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_SERDES_INTELAKEN_LA) + +/*片内ram读写测试*/ +#define ZXIC_SLT_CASE_RAM_CPU_W_R_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_RAM_CPU_W_R) +#define ZXIC_SLT_CASE_REG_CPU_W_R_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_REG_CPU_W_R) +#define ZXIC_SLT_CASE_RAM_MC_W_R_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_RAM_MC_W_R) +#define ZXIC_SLT_CASE_RAM_MC_MEMTEST86_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_RAM_MC_MEMTEST86) + +/* HBM/DDR */ +#define ZXIC_SLT_CASE_HBM_DDR_CTRL_BIST_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_HBM_DDR_CTRL_BIST) +#define ZXIC_SLT_CASE_HBM_DDR_MC_W_R_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_HBM_DDR_MC_W_R) +#define ZXIC_SLT_CASE_HBM_DDR_MC_MEMTEST86_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_HBM_DDR_MC_MEMTEST86) +#define ZXIC_SLT_CASE_HBM_DDR_CPU_W_R_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_HBM_DDR_CPU_W_R) +#define ZXIC_SLT_CASE_HBM_DDR_TM_TEST_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_HBM_DDR_TM_TEST) + +/* 发流测试和全业务PVT测试*/ +#define ZXIC_SLT_CASE_PKT_FLOW_NOM_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PKT_FLOW_NOM) +#define ZXIC_SLT_CASE_OAM_SEND_PKT_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_OAM_SEND_PKT) +#define ZXIC_SLT_CASE_PKT_FLOW_SE_FULL_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PKT_FLOW_SE_FULL) +#define ZXIC_SLT_CASE_PKT_FLOW_TM_FULL_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PKT_FLOW_TM_FULL) +#define ZXIC_SLT_CASE_PKT_FLOW_LIFC_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PKT_FLOW_LIFC) + +#define ZXIC_SLT_CASE_PVT_EXCEED_POWER_TEST_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PVT_EXCEED_POWER_TEST) +#define ZXIC_SLT_CASE_PVT_EXCEED_TEM_VOLT_TEST_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PVT_EXCEED_TEM_VOLT_TEST) +#define ZXIC_SLT_CASE_PVT_EXCEED_HBM_TEMPER_TEST_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PVT_EXCEED_HBM_TEMPER_TEST) + +#define ZXIC_SLT_CASE_PCIE_TEST_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PCIE_TEST) +#define ZXIC_SLT_CASE_PCIE_W_R_TEST_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_PCIE_W_R_TEST) +#define ZXIC_SLT_CASE_RISCV_W_R_TEST_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_RISCV_W_R_TEST) +#define ZXIC_SLT_CASE_VOLT_TEST_FAIL \ + (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_VOLT_TEST) +#define ZXIC_SLT_CASE_CLK_TEST_FAIL (ZXIC_SLT_RC_BASE | ZXIC_SLT_CASE_CLK_TEST) + +#endif + +typedef struct zxic_slt_err_info_mng_t { + ZXIC_UINT32 err_code; /*参见ERR_CODE定义*/ + const ZXIC_CHAR *err_info; +} ZXIC_SLT_ERR_INFO_MNG_T; + +typedef struct zxic_slt_init_ctrl_t { + ZXIC_ADDR_T pcie_vir_baddr; /**< @brief PCIe映射虚拟基地址*/ + ZXIC_ADDR_T riscv_vir_baddr; /**< @brief RISCV映射虚拟基地址 */ + ZXIC_ADDR_T dma_vir_baddr; /**< @brief DMA映射虚拟地址*/ + ZXIC_ADDR_T dma_phy_baddr; /**< @brief DMA内存物理地址*/ +} ZXIC_SLT_INIT_CTRL_T; + +typedef struct zxic_slt_case_para_t + +{ + ZXIC_UINT32 chip_id[ZXIC_SLT_CHIP_ID_LEN]; /* 返回对应芯片ID */ + ZXIC_UINT32 case_num; /* 返回执行SLT 用例数 */ + ZXIC_UINT32 case_no[ZXIC_SLT_CASE_MAX_NUM]; /* 返回执行SLT 用例编号列表 */ + ZXIC_UINT32 case_result + [ZXIC_SLT_CASE_MAX_NUM]; /* 返回执行SLT 用例对应结果,成功返回值或错误码 */ + ZXIC_UINT32 bin_code; /* 返回 SLT 执行分BIN码 */ + ZXIC_DOUBLE temp; /* 返回 SLT 用例测试前初始温度 */ + ZXIC_DOUBLE volt; /* 返回 SLT 用例测试前核电压 */ + +} ZXIC_SLT_CASE_PARA_T; + +/*case 函数标准定义*/ +typedef ZXIC_RTN32 (*ZXIC_SLT_INST_CASE_FN)(ZXIC_UINT32 device_id); + +typedef struct zxic_slt_case_mng_t { + const ZXIC_CHAR *case_name; /*用例名称*/ + ZXIC_UINT32 case_no; /*参见ZXIC_SLT_CASE_NO_E定义*/ + ZXIC_SLT_INST_CASE_FN inst_case_fn; /*单个case函数指针*/ +} ZXIC_SLT_CASE_MNG_T; + +/* 功耗类型定义*/ +typedef enum zxic_slt_power_type_t { + ZXIC_SLT_POWER_VCC_SSP4_CORE = (0x0U), + ZXIC_SLT_POWER_VCC0V75_SSP4_SERDES_DVDD = (0x1U), + ZXIC_SLT_POWER_VCC0V75_SSP4_SERDES_AVDDL = (0x2U), + ZXIC_SLT_POWER_VCC1V2_SSP4_SERDES_AVDDH = (0x3U), + ZXIC_SLT_POWER_VCC1V2_SSP4_HBM_VDDQ = (0x4U), + + ZXIC_SLT_POWER_VCC0V75_SSP4_AVDD = (0x5U), + ZXIC_SLT_POWER_VCC1V2_SSP4_AVDD = (0x6U), + ZXIC_SLT_POWER_VCC1V2_SSP4_GPIO_DVDD = (0x7U), + ZXIC_SLT_POWER_VCC2V5_SSP4_VPP = (0x8U), + + ZXIC_SLT_POWER_TYPE_MAX, + +} ZXIC_SLT_POWER_TYPE_T; + +typedef struct zxic_slt_power_t { + ZXIC_DOUBLE volt; /* 电压值 */ + ZXIC_DOUBLE cur; /* 电流值 */ + ZXIC_UINT32 type; /* 功耗编码,参见ZXIC_SLT_POWER_TYPE_T定义 */ +} ZXIC_SLT_POWER_T; + +typedef struct zxic_slt_power_para_t { + ZXIC_SLT_POWER_T *p_para; + ZXIC_UINT32 num; /* 有效功耗个数 */ +} ZXIC_SLT_POWER_PARA_T; + +/* 电压类型定义*/ +typedef enum zxic_slt_volt_type_t { + ZXIC_SLT_VOLT_VCC_SSP4_CORE = (0x0U), + ZXIC_SLT_VOLT_VCC0V75_SSP4_SERDES_DVDD = (0x1U), + ZXIC_SLT_VOLT_VCC0V75_SSP4_SERDES_AVDDL = (0x2U), + ZXIC_SLT_VOLT_VCC1V2_SSP4_SERDES_AVDDH = (0x3U), + ZXIC_SLT_VOLT_VCC1V2_SSP4_HBM_VDDQ = (0x4U), + + ZXIC_SLT_VOLT_TYPE_MAX, + +} ZXIC_SLT_VOLT_TYPE_T; + +typedef struct zxic_slt_volt_t { + ZXIC_UINT32 percent; /* 调压上下浮动值,百分比*100,例如上调1%,传入数值1*/ + ZXIC_UINT32 flag; /* 正偏:0 负偏:1 */ + ZXIC_UINT32 vol_type; /* 电压类型,参见ZXIC_SLT_VOLT_TYPE_T定义 */ +} ZXIC_SLT_VOLT_T; + +typedef struct zxic_slt_volt_para_t { + ZXIC_SLT_VOLT_T *p_para; + ZXIC_UINT32 num; /* 有效调压个数 */ +} ZXIC_SLT_VOLT_PARA_T; + +/* 时钟类型定义*/ +typedef enum zxic_slt_clk_type_t { + ZXIC_SLT_CLK_AU5327_A = (0x0U), /*SSP4 SOCKET 单板AU5327#1 HOST_100M*/ + ZXIC_SLT_CLK_AU5327_B = + (0x1U), /*SSP4 SOCKET 单板AU5327#1 PLL_SYS_CLK/SERDES 156.25M*/ + ZXIC_SLT_CLK_AU5327_C = + (0x2U), /*SSP4 SOCKET 单板AU5327#1 PLL_LOCAL_CLK 156.25M*/ + ZXIC_SLT_CLK_AU5327_D = + (0x3U), /*SSP4 SOCKET 单板AU5327#1 PLL_TS_CLK 125M*/ + + ZXIC_SLT_CLK_TYPE_MAX, + +} ZXIC_SLT_CLK_TYPE_T; + +typedef struct zxic_slt_clk_t { + ZXIC_UINT32 + value; /* 时钟拉偏值,单位HZ(PPM),1PPM = 1HZ(1MHZ=1000KHZ=1000000HZ)*/ + ZXIC_UINT32 flag; /* 正偏:0 负偏:1 */ + ZXIC_UINT32 clk_type; /* 时钟类型,参见ZXIC_SLT_CLK_TYPE_T定义 */ +} ZXIC_SLT_CLK_T; + +typedef struct zxic_slt_clk_para_t { + ZXIC_SLT_CLK_T *p_para; + ZXIC_UINT32 num; /* 有效拉偏时钟个数 */ +} ZXIC_SLT_CLK_PARA_T; + +#if ZXIC_REAL("define_for_bsp") +/***********************************************************/ +/** 功耗获取函数 +* @param ZXIC_SLT_CASE_GETPOWER_FN +* @param device_id +* @param p_para +* +* @return +* @remark 无 +* @see +* @author PJ @date 2022/04/28 +************************************************************/ +typedef ZXIC_RTN32 (*ZXIC_SLT_CASE_GETPOWER_FN)(ZXIC_UINT32 device_id, + ZXIC_SLT_POWER_PARA_T *p_para); + +/***********************************************************/ +/** riscv测试函数 +* @param ZXIC_SLT_CASE_RISCV_FN +* @param device_id +* @param reg_addr 寄存器地址 +* @param wr_data 写入寄存器的值 +* @param rd_data 期望从寄存器中读出的值 +* +* @return +* @remark 无 +* @see +* @author PJ @date 2022/04/28 +************************************************************/ +typedef ZXIC_RTN32 (*ZXIC_SLT_CASE_RISCV_FN)(ZXIC_UINT32 device_id, + ZXIC_UINT32 reg_addr, + ZXIC_UINT32 wr_data, + ZXIC_UINT32 rd_data); + +/***********************************************************/ +/** PCIE测试函数 +* @param ZXIC_SLT_CASE_PCIE_FN +* @param device_id +* @param times pcie建链测试次数 +* +* @return +* @remark 无 +* @see +* @author PJ @date 2022/04/28 +************************************************************/ +typedef ZXIC_RTN32 (*ZXIC_SLT_CASE_PCIE_FN)(ZXIC_UINT32 device_id, + ZXIC_UINT32 times); + +/***********************************************************/ +/** 调电压函数 +* @param ZXIC_SLT_CASE_ADJUST_VOLT_FN +* @param device_id +* @param p_para +* +* @return +* @remark 无 +* @see +* @author PJ @date 2022/04/28 +************************************************************/ +typedef ZXIC_RTN32 (*ZXIC_SLT_CASE_ADJUST_VOLT_FN)(ZXIC_UINT32 device_id, ZXIC_SLT_VOLT_PARA_T *p_para); + +/***********************************************************/ +/** 调时钟函数 +* @param ZXIC_SLT_CASE_ADJUST_CLK_FN +* @param device_id +* @param p_para +* +* @return +* @remark 无 +* @see +* @author PJ @date 2022/04/28 +************************************************************/ +typedef ZXIC_RTN32 (*ZXIC_SLT_CASE_ADJUST_CLK_FN)(ZXIC_UINT32 device_id, + ZXIC_SLT_CLK_PARA_T *p_para); + +/*BSP回调函数定义*/ +typedef struct zxic_slt_bsp_register_fn_t { + ZXIC_SLT_CASE_GETPOWER_FN zxic_slt_power_fn; + ZXIC_SLT_CASE_RISCV_FN zxic_slt_riscv_fn; + ZXIC_SLT_CASE_PCIE_FN zxic_slt_pcie_fn; + ZXIC_SLT_CASE_ADJUST_VOLT_FN zxic_slt_volt_fn; + ZXIC_SLT_CASE_ADJUST_CLK_FN zxic_slt_clk_fn; + +} ZXIC_SLT_BSP_REGISTER_FN_T; + +#endif + +#if ZXIC_REAL("define_for_sdk") +/***********************************************************/ +/** 初始化函数 +* @param ZXIC_SLT_INIT_FN +* @param device_id +* @param chip_type 用于区分不同封装类型 +* @param p_init_ctrl 参见ZXIC_SLT_INIT_CTRL_T定义 +* +* @return +* @remark 无 +* @see +* @author PJ @date 2022/04/22 +************************************************************/ +typedef ZXIC_RTN32 (*ZXIC_SLT_INIT_FN)(ZXIC_UINT32 device_id, + ZXIC_UINT32 chip_type, + ZXIC_SLT_INIT_CTRL_T *p_init_ctrl); + +/* Chip_ID 组成结构说明: +** Chip_ID = efuse里面的wafer批次号+wafer片号+x坐标+Y坐标组合而成 +** wafer批次号: 48bit lot_id5~0 +** wafer片号: 5bit wafer_no +** x坐标: 8bit x_addr +** Y坐标: 8bit y_addr +** chip_id2 = lot_id5[7:3] +** chip_id1 = lot_id5[2:0] + lot_id4 + lot_id3 + lot_id2 + lot_id1[7:3] +** chip_id0 = lot_id1[2:0] + lot_id0 + wafer_no[4:0] + x_addr + y_addr*/ + +/***********************************************************/ +/** 执行测试用例之前的预处理函数,用来获取chip_id,温度,电压信息 +* @param ZXIC_SLT_PRE_FN +* @param device_id +* @param chip_type 用于区分不同封装类型 +* @param p_chip_id 返回芯片的chip_id信息(参见上述格式说明) +* @param p_temp 返回 SLT 用例测试前初始温度 +* @param p_volt 返回 SLT 用例测试前核电压 +* +* @return +* @remark 无 +* @see +* @author PJ @date 2022/03/29 +************************************************************/ +typedef ZXIC_RTN32 (*ZXIC_SLT_PRE_FN)(ZXIC_UINT32 device_id, + ZXIC_UINT32 chip_type, + ZXIC_UINT32 *p_chip_id, + ZXIC_DOUBLE *p_temp, ZXIC_DOUBLE *p_volt); + +/***********************************************************/ +/** 用例执行函数 +* @param ZXIC_SLT_ALL_CASE_FN +* @param device_id +* @param chip_type 用于区分不同封装类型 +* @param p_case_num 返回执行SLT 用例数 +* @param p_case_no 返回执行SLT 用例编号列表 +* @param p_case_result 返回执行SLT 用例对应结果,成功返回值或错误码 +* +* @return +* @remark 无 +* @see +* @author PJ @date 2022/03/29 +************************************************************/ +typedef ZXIC_RTN32 (*ZXIC_SLT_ALL_CASE_FN)(ZXIC_UINT32 device_id, + ZXIC_UINT32 chip_type, + ZXIC_UINT32 *p_case_num, + ZXIC_UINT32 *p_case_no, + ZXIC_UINT32 *p_case_result); + +/*SDK回调函数定义*/ +typedef struct zxic_slt_register_fn_t { + ZXIC_SLT_INIT_FN zxic_slt_init_fn; /* 初始化函数 */ + ZXIC_SLT_PRE_FN zxic_slt_pre_fn; /* 预处理函数 */ + ZXIC_SLT_ALL_CASE_FN zxic_slt_all_case_fn; /* 用例执行函数*/ + +} ZXIC_SLT_REGISTER_FN_T; + +#endif + +#if ZXIC_REAL("fn for sdk") +/***********************************************************/ +/** 回调功能注册函数,不区分芯片ID,仅注册一次即可,提供给SDK设置 +* @return +* @remark 无 +* @see +* @author PJ @date 2020/05/03 +************************************************************/ +ZXIC_RTN32 zxic_slt_register_fn_set(ZXIC_SLT_REGISTER_FN_T *pExcCall); + +/***********************************************************/ +/** 获取POWER函数 +* @param pExcCall +* +* @return +* @remark 无 +* @see +* @author PJ @date 2020/05/03 +************************************************************/ +ZXIC_RTN32 zxic_slt_callback_getpower(ZXIC_VOID *pExcCall); + +/***********************************************************/ +/** 获取MODI函数 +* @param pExcCall +* +* @return +* @remark 无 +* @see +* @author PJ @date 2020/05/03 +************************************************************/ +ZXIC_RTN32 zxic_slt_callback_riscv(ZXIC_VOID *pExcCall); + +/***********************************************************/ +/** 获PCIE函数 +* @param pExcCall +* +* @return +* @remark 无 +* @see +* @author PJ @date 2020/05/03 +************************************************************/ +ZXIC_RTN32 zxic_slt_callback_pcie(ZXIC_VOID *pExcCall); + +/***********************************************************/ +/** 获取VOLT函数 +* @param pExcCall +* +* @return +* @remark 无 +* @see +* @author PJ @date 2020/05/03 +************************************************************/ +ZXIC_RTN32 zxic_slt_callback_adjust_volt(ZXIC_VOID *pExcCall); + +/***********************************************************/ +/** 获取CLK函数 +* @param pExcCall +* +* @return +* @remark 无 +* @see +* @author PJ @date 2020/05/03 +************************************************************/ +ZXIC_RTN32 zxic_slt_callback_adjust_clk(ZXIC_VOID *pExcCall); + +#endif + +#if ZXIC_REAL("fn for bsp") + +/***********************************************************/ +/** 回调功能注册函数,不区分芯片ID,仅注册一次即可,提供给SDK设置 +* @return +* @remark 无 +* @see +* @author PJ @date 2020/05/03 +************************************************************/ +ZXIC_RTN32 zxic_slt_bsp_register_fn_set(ZXIC_SLT_BSP_REGISTER_FN_T *pExcCall); + +/***********************************************************/ +/** 设置是否删除log标记,提供给BSP调用 +* @return +* @remark 无 +* @see +* @author PJ @date 2020/05/03 +************************************************************/ +ZXIC_VOID zxic_slt_log_rm_flag_set(ZXIC_UINT32 log_rm_flag); + +/***********************************************************/ +/** 执行SDK SLT 所有用例测试,提供给BSP调用 +* @param dev_id +* @param chip_type 芯片封装类型 +* @param p_init_ctrl 详细参见ZXIC_SLT_INIT_CTRL_T说明 +* @param p_case_para 详细参见ZXIC_SLT_CASE_PARA_T说明 +* +* @return +* @remark 无 +* @see +* @author PJ @date 2022/03/29 +************************************************************/ +ZXIC_RTN32 zxic_slt_all_case_test(ZXIC_UINT32 dev_id, ZXIC_UINT32 chip_type, + ZXIC_SLT_INIT_CTRL_T *p_init_ctrl, + ZXIC_SLT_CASE_PARA_T *p_case_para); + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* end __ZXIC_SLT_H__ */ diff --git a/drivers/net/ethernet/dinghai/en_np/comm/source/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/comm/source/Kbuild.include new file mode 100644 index 000000000000..aa1a28ec74a1 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/comm/source/Kbuild.include @@ -0,0 +1,2 @@ +cur_dir := en_np/comm/source/ +src_files += $(addprefix $(cur_dir),$(notdir $(wildcard $(dinghai_root)/$(cur_dir)*.c))) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/comm/source/zxic_comm_double_link.c b/drivers/net/ethernet/dinghai/en_np/comm/source/zxic_comm_double_link.c new file mode 100644 index 000000000000..3c5f98279051 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/comm/source/zxic_comm_double_link.c @@ -0,0 +1,544 @@ +/***************************************************************************** + * 版权所有 (C)2008-2010, 深圳市中兴通讯股份有限公司。 + * + * 文件名称: zxic_comm_double_link.c + * 文件标识: + * 内容摘要: 双链表实现 + * 其它说明: + * 当前版本: V1.00.10 + * 作 者: + * 完成日期: 2012年2月14日 + * 当前责任人-1:ChenWei188471 + * 当前责任人-2: + * 历史责任人-3: + * + * 修改记录: + * 修改日期: 版 本 号 修 改 人 修改内容 + 1 20120214 V1.00.10 ChenWei188471 创建 + 2 + *****************************************************************************/ + +#include "zxic_common.h" +#include "zxic_comm_double_link.h" + +/*当前节点都插在头节点之后,新插入的节点永远在第一个*/ +ZXIC_RTN32 zxic_comm_double_link_insert_1st(D_NODE *p_newnode, D_HEAD *p_head) +{ + ZXIC_COMM_CHECK_POINT(p_newnode); + ZXIC_COMM_CHECK_POINT(p_head); + + ZXIC_COMM_CHECK_INDEX((p_head->used + 1), 1, p_head->maxnum); + + ZXIC_COMM_ASSERT(!(!p_head->p_next && p_head->p_prev)); + ZXIC_COMM_ASSERT(!(p_head->p_next && !p_head->p_prev)); + + p_newnode->next = p_head->p_next; + p_newnode->prev = NULL; /*新节点前驱为NULL*/ + + if (p_head->p_next) { + p_head->p_next->prev = p_newnode; + } else { + p_head->p_prev = p_newnode; + } + + p_head->p_next = p_newnode; + p_head->used++; + + return ZXIC_OK; +} + +/*目的是配合sort函数实现相同节点无需重复写入*/ +ZXIC_RTN32 zxic_comm_double_link_insert_cmp(D_HEAD *p_head, void *cmp_data, + ZXIC_UINT32 *is_same) +{ + D_NODE *p_dn = 0; + + *is_same = 0; + + p_dn = p_head->p_next; + + while (p_dn) { + if (*(ZXIC_UINT32 *)cmp_data == *(ZXIC_UINT32 *)p_dn->data) { + *is_same = 1; + + break; + } + + p_dn = p_dn->next; + } + + return ZXIC_OK; +} + +ZXIC_RTN32 zxic_comm_double_link_insert_merge(D_NODE *p_newnode, D_HEAD *p_head, + ZXIC_UINT32 is_head) +{ + D_NODE *p_dn = 0; + ZXIC_UINT32 is_same = 0; + + p_dn = p_head->p_next; + + while (p_dn) { + if (p_dn->data == p_newnode->data) { + is_same = 1; + break; + } + + p_dn = p_dn->next; + } + + if (!is_same) { + if (is_head) { + return zxic_comm_double_link_insert_1st(p_newnode, + p_head); + } else { + return zxic_comm_double_link_insert_last(p_newnode, + p_head); + } + } + + return ZXIC_OK; +} + +/* 在OLD节点之后插入 */ +ZXIC_RTN32 zxic_comm_double_link_insert_aft(D_NODE *p_newnode, + D_NODE *p_oldnode, D_HEAD *p_head) +{ + ZXIC_COMM_CHECK_POINT(p_newnode); + ZXIC_COMM_CHECK_POINT(p_oldnode); + ZXIC_COMM_CHECK_POINT(p_head); + + ZXIC_COMM_CHECK_INDEX((p_head->used + 1), 1, p_head->maxnum); + + ZXIC_COMM_ASSERT(!(!p_head->p_next && p_head->p_prev)); + ZXIC_COMM_ASSERT(!(p_head->p_next && !p_head->p_prev)); + + p_newnode->next = p_oldnode->next; + p_newnode->prev = p_oldnode; + + if (p_oldnode->next) { + p_oldnode->next->prev = p_newnode; + } else { + p_head->p_prev = p_newnode; + } + + p_oldnode->next = p_newnode; + p_head->used++; + + return ZXIC_OK; +} + +/* 在OLD节点前插入 */ +ZXIC_RTN32 zxic_comm_double_link_insert_pre(D_NODE *p_newnode, + D_NODE *p_oldnode, D_HEAD *p_head) +{ + ZXIC_COMM_CHECK_POINT(p_newnode); + ZXIC_COMM_CHECK_POINT(p_oldnode); + ZXIC_COMM_CHECK_POINT(p_head); + + ZXIC_COMM_CHECK_INDEX((p_head->used + 1), 1, p_head->maxnum); + + ZXIC_COMM_ASSERT(!(!p_head->p_next && p_head->p_prev)); + ZXIC_COMM_ASSERT(!(p_head->p_next && !p_head->p_prev)); + + p_newnode->next = p_oldnode; + p_newnode->prev = p_oldnode->prev; + + if (p_oldnode->prev) { + p_oldnode->prev->next = p_newnode; + } else { + p_head->p_next = p_newnode; + } + + p_oldnode->prev = p_newnode; + p_head->used++; + + return ZXIC_OK; +} +ZXIC_RTN32 zxic_comm_double_link_insert_last(D_NODE *p_newnode, D_HEAD *p_head) +{ + D_NODE *p_dnode = NULL; + + ZXIC_COMM_CHECK_POINT(p_newnode); + ZXIC_COMM_CHECK_POINT(p_head); + + ZXIC_COMM_CHECK_INDEX((p_head->used + 1), 1, p_head->maxnum); + + ZXIC_COMM_ASSERT(!(!p_head->p_next && p_head->p_prev)); + ZXIC_COMM_ASSERT(!(p_head->p_next && !p_head->p_prev)); + + p_dnode = p_head->p_prev; + + if (!p_dnode) { + p_head->p_next = p_newnode; + p_head->p_prev = p_newnode; + p_newnode->next = NULL; + p_newnode->prev = NULL; + } else { + p_newnode->prev = p_dnode; + p_newnode->next = NULL; + p_head->p_prev = p_newnode; + p_dnode->next = p_newnode; + } + + p_head->used++; + + return ZXIC_OK; +} +ZXIC_RTN32 zxic_comm_double_link_del(D_NODE *delnode, D_HEAD *p_head) +{ + D_NODE *next = NULL; + D_NODE *pre = NULL; + + ZXIC_COMM_CHECK_POINT(delnode); + ZXIC_COMM_CHECK_POINT(p_head); + + ZXIC_COMM_CHECK_INDEX(p_head->used, 1, p_head->maxnum); + + next = delnode->next; + pre = delnode->prev; + + if (next) { + next->prev = delnode->prev; + } else { + p_head->p_prev = delnode->prev; + } + + if (pre) { + pre->next = delnode->next; + } else { + p_head->p_next = delnode->next; + } + + p_head->used--; + delnode->next = NULL; + delnode->prev = NULL; + return ZXIC_OK; +} + +ZXIC_RTN32 zxic_comm_double_link_init(ZXIC_UINT32 elmemtnum, D_HEAD *p_head) +{ + ZXIC_UINT32 err_code = 0; + + ZXIC_COMM_CHECK_POINT(p_head); + + if (elmemtnum == 0) { + err_code = ZXIC_DOUBLE_LINK_INIT_ELEMENT_NUM_ERR; + ZXIC_COMM_TRACE_ERROR( + "\nError:[0x%x] zxic_doule_link_init Element Num Err !", + err_code); + return err_code; + } + + p_head->maxnum = elmemtnum; + p_head->used = 0; + p_head->p_next = NULL; + p_head->p_prev = NULL; + + return ZXIC_OK; +} + +ZXIC_RTN32 zxic_comm_dlink_release(D_HEAD *p_head, fun_free fun) +{ + ZXIC_UINT32 rc = 0; + D_NODE *p_node = NULL; + + ZXIC_COMM_CHECK_POINT(p_head); + + while (p_head->used) { + p_node = p_head->p_next; + + if (NULL != fun) { + rc = fun(p_node->data); + ZXIC_COMM_CHECK_RC(rc, "fun"); + } + + rc = zxic_comm_double_link_del(p_node, p_head); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_double_link_del"); + + ZXIC_COMM_FREE(p_node); + } + + return ZXIC_OK; +} +/*connetc the s_list to d_list */ + +ZXIC_RTN32 zxic_comm_double_link_merge_list(D_HEAD *d_list, D_HEAD *s_list) +{ + if (d_list->p_prev) { + d_list->p_prev->next = s_list->p_next; + } else { + ZXIC_COMM_ASSERT(!d_list->p_next); + d_list->p_next = s_list->p_next; + } + + if (s_list->p_next) { + ZXIC_COMM_ASSERT(s_list->p_prev); + s_list->p_next->prev = d_list->p_prev; + d_list->p_prev = s_list->p_prev; + } + + d_list->used += s_list->used; + + return ZXIC_OK; +} +/********************************************************************* + * 函数名称:zxic_comm_double_link_insert_sort + * 功能描述: + * 函数功能简介 + * 插入排序, + * 输入参数: + * 输出参数: + * 返 回 值:ZXIC_RTN32 + * 全局变量: + * 注 释: +============================================================ + * 修改记录: + * 修改日期 版本号 修改人 修改内容 +============================================================ + * 2012-03-19 jiangwenming + + ******************************************************************/ +ZXIC_RTN32 zxic_comm_double_link_insert_sort(D_NODE *p_newnode, D_HEAD *p_head, + CMP_FUNC cmp_fuc, void *cmp_data) +{ + D_NODE *pre_node = NULL; + + ZXIC_COMM_CHECK_POINT(p_head); + ZXIC_COMM_CHECK_POINT(p_newnode); + + if (NULL == cmp_fuc) { + cmp_fuc = zxic_comm_double_link_default_cmp_fuc; + } + + ZXIC_COMM_CHECK_INDEX((p_head->used + 1), 1, p_head->maxnum); + + /*此时表中的数据,已经排序了,再插入时,只需从表头开始比较,然后插入适当位置*/ + if (0 == p_head->used) { + return zxic_comm_double_link_insert_1st(p_newnode, p_head); + } else { + pre_node = p_head->p_next; + + while (NULL != pre_node) { + /*新节点的键值小于等于当前的键值*/ + if (cmp_fuc(p_newnode, pre_node, cmp_data) <= 0) { + return zxic_comm_double_link_insert_pre( + p_newnode, pre_node, p_head); + } else { + pre_node = pre_node->next; + } + } + + /*循环结束后,说明插入的节点大于链表中的所有节点,需要在尾部插入*/ + return zxic_comm_double_link_insert_last(p_newnode, p_head); + } +} + +/********************************************************************* + * 函数名称:zxic_comm_double_link_del_pos + * 功能描述: + * 函数功能简介 + * 根据node中指定的信息删除节点 + * 输入参数: + * 输出参数: + * 返 回 值:ZXIC_RTN32 + * 全局变量: + * 注 释: 根据指定的信息,可能会删除多个 +============================================================ + * 修改记录: + * 修改日期 版本号 修改人 修改内容 +============================================================ + * 2020-11-11 徐晨曦 + + ******************************************************************/ +ZXIC_RTN32 zxic_comm_double_link_del_by_info(D_HEAD *p_head, void *cmp_data, + CMP_FUNC cmp_fuc, + ZXIC_UINT32 *p_deled_num) +{ + ZXIC_UINT32 rc = 0; + ZXIC_UINT32 is_same = 0; + + D_NODE *p; + D_NODE *p_cur_node = ZXIC_NULL; + + if (NULL == cmp_fuc) { + cmp_fuc = zxic_comm_double_link_default_cmp_fuc; + } + + p = p_head->p_next; + + while (p) { + /*新节点的键值等于当前的键值*/ + if (cmp_fuc(cmp_data, p, cmp_data) == 0) { + rc = zxic_comm_double_link_del(p, p_head); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_double_link_del"); + + p_cur_node = p; + p = p->next; + + ZXIC_COMM_FREE(p_cur_node); + is_same++; + continue; + } + + p = p->next; + } + + if (0 == is_same) { + return ZXIC_ERR; + } + + *p_deled_num = is_same; + + ZXIC_COMM_TRACE_DEBUG(" DOUBLE LINK DEL NUM %d \n", is_same); + + return ZXIC_OK; +} + +/********************************************************************* + * 函数名称:zxic_comm_double_link_del_pos + * 功能描述: + * 函数功能简介 + * 删除指定位置的数据 + * 输入参数: + * 输出参数: + * 返 回 值:ZXIC_RTN32 + * 全局变量: + * 注 释: +============================================================ + * 修改记录: + * 修改日期 版本号 修改人 修改内容 +============================================================ + * 2012-03-19 jiangwenming + + ******************************************************************/ +ZXIC_RTN32 zxic_comm_double_link_del_pos(D_HEAD *p_head, void *cmp_data, + fun_free fun) +{ + ZXIC_UINT32 rc = 0; + ZXIC_UINT32 is_same = 0; + + D_NODE *p; + + p = p_head->p_next; + + while (p) { + if (*(ZXIC_UINT32 *)cmp_data == *(ZXIC_UINT32 *)p->data) { + is_same = 1; + + break; + } + + p = p->next; + } + + if (is_same) { + rc = zxic_comm_double_link_del(p, p_head); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_double_link_del"); + + if (NULL != fun) { + rc = fun(p->data); + ZXIC_COMM_CHECK_RC(rc, "fun"); + } + + ZXIC_COMM_FREE(p); + } + + return ZXIC_OK; +} + +/********************************************************************* + * 函数名称:zxic_comm_double_link_swap + * 功能描述: + * 函数功能简介 + * 交换链表中的两个节点 + * 输入参数: + * 输出参数: + * 返 回 值:ZXIC_RTN32 + * 全局变量: + * 注 释: +============================================================ + * 修改记录: + * 修改日期 版本号 修改人 修改内容 +============================================================ + * 2012-03-19 jiangwenming + + ******************************************************************/ +ZXIC_RTN32 zxic_comm_double_link_print(D_HEAD *p_head) +{ + D_NODE *p_pre = NULL; + D_NODE *p_next = NULL; + + ZXIC_COMM_CHECK_POINT(p_head); + /*正向打印*/ + p_next = p_head->p_next; + ZXIC_COMM_PRINT("*************sequ order***********\n"); + + while (p_next) { + ZXIC_COMM_PRINT("==>%d", *(ZXIC_UINT32 *)(p_next->data)); + p_next = p_next->next; + } + + /*反向打印*/ + ZXIC_COMM_PRINT("\n\n*************reverve order***********\n"); + p_pre = p_head->p_prev; + + while (p_pre != NULL) { + ZXIC_COMM_PRINT("==>%d", *(ZXIC_UINT32 *)(p_pre->data)); + p_pre = p_pre->prev; + } + + return ZXIC_OK; +} + +ZXIC_SINT32 zxic_comm_double_link_default_cmp_fuc(D_NODE *p_data1, + D_NODE *p_data2, void *p_data) +{ + ZXIC_UINT32 data1 = *(ZXIC_UINT32 *)p_data1->data; + ZXIC_UINT32 data2 = *(ZXIC_UINT32 *)p_data2->data; + + if (data1 > data2) { + return 1; + } else if (data1 == data2) { + return 0; + } else { + return -1; + } +} + +ZXIC_RTN32 zxic_comm_double_link_del_by_data(D_HEAD *p_head, + ZXIC_VOID *cmp_data, fun_free fun) +{ + ZXIC_UINT32 rc = 0; + ZXIC_UINT32 is_same = 0; + + D_NODE *p; + + p = p_head->p_next; + + while (p) { + if (cmp_data == p->data) { + is_same = 1; + + break; + } + p = p->next; + } + + if (is_same) { + rc = zxic_comm_double_link_del(p, p_head); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_double_link_del"); + if (NULL != fun) { + rc = fun(p->data); + ZXIC_COMM_CHECK_RC(rc, "fun"); + } + + ZXIC_COMM_FREE(p); + } else { + ZXIC_COMM_TRACE_ERROR( + "\nError:data not exist. FUNCTION : %s!\n", + __FUNCTION__); + } + + return ZXIC_OK; +} diff --git a/drivers/net/ethernet/dinghai/en_np/comm/source/zxic_comm_index_fill.c b/drivers/net/ethernet/dinghai/en_np/comm/source/zxic_comm_index_fill.c new file mode 100644 index 000000000000..b941c7c2dc23 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/comm/source/zxic_comm_index_fill.c @@ -0,0 +1,442 @@ +/***************************************************************************** + * 版权所有 (C)2001-2015, 深圳市中兴通讯股份有限公司。 + * + * 文件名称: zxic_index_fill.c + * 文件标识: + * 内容摘要: 索引空位填充源代码头文件 + * 其它说明: + * 当前版本: + * 作 者: ChenWei10088471 + * 完成日期: + * 当前责任人-1: + * 当前责任人-2: + * + * DEPARTMENT : ASIC_FPGA_R&D_Dept + * MANUAL_PERCENT : 100% + *****************************************************************************/ +#include "zxic_common.h" +#include "zxic_comm_index_fill.h" +#include "zxic_comm_double_link.h" + +/***********************************************************/ +/** 释放数据节点 +* @param p_data +* +* @return +* @remark 无 +* @see +* @author ChenWei10088471 @date 2014/02/07 +************************************************************/ +ZXIC_RTN32 ic_comm_node_data_free(void *p_data) +{ + ZXIC_COMM_CHECK_POINT(p_data); + ZXIC_COMM_FREE(p_data); + + return ZXIC_OK; +} + +ZXIC_RTN32 zxic_comm_indexfill_init(INDEX_FILL_CFG *p_fill_cfg, + ZXIC_UINT32 index_num, + ZXIC_KEY_CMP_FUNC p_cmp_fun, + INDEXFILL_SWAP_FUNC p_swap_fun, + ZXIC_UINT32 key_len) +{ + ZXIC_UINT32 rtn = 0; + + p_fill_cfg->index_num = index_num; + p_fill_cfg->total_used = 0; + p_fill_cfg->swap_fun = p_swap_fun; + + rtn = zxic_comm_rb_init(&p_fill_cfg->fill_rb, 0, key_len, p_cmp_fun); + ZXIC_COMM_CHECK_RC(rtn, "zxic_comm_rb_init"); + + return ZXIC_OK; +} +/* 前向挤压 */ +ZXIC_RTN32 zxic_comm_indexfill_handle_position_right(INDEX_FILL_CFG *p_fill_cfg, + INDEX_FILL_NODE *p_start, + INDEX_FILL_NODE *p_end) +{ + ZXIC_UINT32 rtn = 0; + + INDEX_FILL_NODE *p_cur_node = p_start; + INDEX_FILL_NODE *p_nxt_node = (INDEX_FILL_NODE *)STRUCT_ENTRY_POINT( + p_cur_node->rb_node.tn_ln.next, ZXIC_RB_TN, tn_ln); + INDEX_FILL_NODE *p_pre_node = NULL; + + while (p_cur_node != p_end) { + if (ICMINF_GET_NODE_LASTPOS(p_cur_node) + 1 < + ICMINF_GET_NODE_FSTPOS(p_nxt_node)) { + break; + } else { + p_cur_node = p_nxt_node; + + if (p_cur_node->rb_node.tn_ln.next) { + p_nxt_node = + (INDEX_FILL_NODE *)STRUCT_ENTRY_POINT( + p_cur_node->rb_node.tn_ln.next, + ZXIC_RB_TN, tn_ln); + } + } + } + + ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW(p_fill_cfg->index_num, 1); + if (p_cur_node == p_end && + p_fill_cfg->index_num - 1 == ICMINF_GET_NODE_LASTPOS(p_end)) { + return ZXIC_INDEX_FILL_FULL; + } + + p_pre_node = (INDEX_FILL_NODE *)STRUCT_ENTRY_POINT( + p_cur_node->rb_node.tn_ln.prev, ZXIC_RB_TN, tn_ln); + + while (p_cur_node != p_start) { + if (p_fill_cfg->swap_fun) { + rtn = p_fill_cfg->swap_fun( + ICMINF_GET_NODE_FSTPOS(p_cur_node), + ICMINF_GET_NODE_LASTPOS(p_cur_node) + 1, + p_fill_cfg); + } + + ICMINF_GET_NODE_FSTPOS(p_cur_node) = + ICMINF_GET_NODE_FSTPOS(p_cur_node) + 1; + + p_cur_node = p_pre_node; + + if (p_cur_node->rb_node.tn_ln.prev) { + p_pre_node = (INDEX_FILL_NODE *)STRUCT_ENTRY_POINT( + p_cur_node->rb_node.tn_ln.prev, ZXIC_RB_TN, + tn_ln); + } + } + + if (p_fill_cfg->swap_fun) { + rtn = p_fill_cfg->swap_fun(ICMINF_GET_NODE_FSTPOS(p_start), + ICMINF_GET_NODE_LASTPOS(p_start) + 1, + p_fill_cfg); + } + + ICMINF_GET_NODE_FSTPOS(p_start) = ICMINF_GET_NODE_FSTPOS(p_start) + 1; + + return rtn; +} +/* 后向挤压 */ +ZXIC_RTN32 zxic_comm_indexfill_handle_position_left(INDEX_FILL_CFG *p_fill_cfg, + INDEX_FILL_NODE *p_start, + INDEX_FILL_NODE *p_end) +{ + ZXIC_UINT32 rtn = 0; + + INDEX_FILL_NODE *p_cur_node = p_start; + INDEX_FILL_NODE *p_nxt_node = (INDEX_FILL_NODE *)STRUCT_ENTRY_POINT( + p_cur_node->rb_node.tn_ln.prev, ZXIC_RB_TN, tn_ln); + INDEX_FILL_NODE *p_pre_node = NULL; + + while (p_cur_node != p_end) { + ZXIC_COMM_CHECK_POINT(p_nxt_node); + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW(p_nxt_node->position, + p_nxt_node->usednum); + ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW( + (p_nxt_node->position + p_nxt_node->usednum), 1); + + if (ICMINF_GET_NODE_FSTPOS(p_cur_node) - 1 > + ICMINF_GET_NODE_LASTPOS(p_nxt_node)) { + break; + } else { + p_cur_node = p_nxt_node; + + if (p_cur_node->rb_node.tn_ln.prev) { + p_nxt_node = + (INDEX_FILL_NODE *)STRUCT_ENTRY_POINT( + p_cur_node->rb_node.tn_ln.prev, + ZXIC_RB_TN, tn_ln); + } + } + } + + if (p_cur_node == p_end && 0 == ICMINF_GET_NODE_FSTPOS(p_end)) { + return ZXIC_INDEX_FILL_FULL; + } + + p_pre_node = (INDEX_FILL_NODE *)STRUCT_ENTRY_POINT( + p_cur_node->rb_node.tn_ln.next, ZXIC_RB_TN, tn_ln); + + while (p_cur_node != p_start) { + if (p_fill_cfg->swap_fun) { /*将最后一个移动到本节点第一个*/ + rtn = p_fill_cfg->swap_fun( + ICMINF_GET_NODE_LASTPOS(p_cur_node), + ICMINF_GET_NODE_FSTPOS(p_cur_node) - 1, + p_fill_cfg); + } + + ICMINF_GET_NODE_FSTPOS(p_cur_node) = + ICMINF_GET_NODE_FSTPOS(p_cur_node) - 1; + + p_cur_node = p_pre_node; + + if (p_cur_node->rb_node.tn_ln.next) { + p_pre_node = (INDEX_FILL_NODE *)STRUCT_ENTRY_POINT( + p_cur_node->rb_node.tn_ln.next, ZXIC_RB_TN, + tn_ln); + } + } + + if (p_fill_cfg->swap_fun) { + rtn = p_fill_cfg->swap_fun(ICMINF_GET_NODE_LASTPOS(p_start), + ICMINF_GET_NODE_FSTPOS(p_start) - 1, + p_fill_cfg); + } + + ICMINF_GET_NODE_FSTPOS(p_start) = ICMINF_GET_NODE_FSTPOS(p_start) - 1; + + return rtn; +} + +ZXIC_RTN32 zxic_comm_indexfill_free(INDEX_FILL_CFG *p_fill_cfg, + ZXIC_UINT32 free_index, ZXIC_VOID *p_rb_key, + ZXIC_UINT32 *out_index) +{ + ZXIC_UINT32 rtn = 0; + ZXIC_RB_TN *p_rb_out = NULL; + INDEX_FILL_NODE *p_inf_node = NULL; + + rtn = zxic_comm_rb_search(&p_fill_cfg->fill_rb, p_rb_key, &p_rb_out); + + if ((!p_rb_out) || (rtn != ZXIC_OK)) { + ZXIC_COMM_TRACE_ERROR("\n srh fail ,the key is not exist"); + return ZXIC_INDEX_DEL_FAIL; + } + + p_inf_node = (INDEX_FILL_NODE *)p_rb_out; + + ZXIC_COMM_ASSERT(p_inf_node->usednum); + + ZXIC_COMM_CHECK_INDEX(free_index, ICMINF_GET_NODE_FSTPOS(p_inf_node), + ICMINF_GET_NODE_LASTPOS(p_inf_node)); + + *out_index = free_index; + + if (free_index == ICMINF_GET_NODE_FSTPOS(p_inf_node)) { + p_inf_node->position++; + } else if (free_index != ICMINF_GET_NODE_LASTPOS(p_inf_node)) { + *out_index = ICMINF_GET_NODE_FSTPOS(p_inf_node); + + if (p_fill_cfg->swap_fun) { + p_fill_cfg->swap_fun(ICMINF_GET_NODE_FSTPOS(p_inf_node), + free_index, p_fill_cfg); + } + + p_inf_node->position++; + } else { + ZXIC_COMM_TRACE_DEBUG( + "\n Free the last position,do nothing \n"); + } + + p_inf_node->usednum--; + + if (p_inf_node->usednum == 0) { + rtn = zxic_comm_rb_delete(&p_fill_cfg->fill_rb, p_rb_key, + &p_rb_out); + ZXIC_COMM_CHECK_RC(rtn, "zxic_comm_rb_delete"); + + if (!p_rb_out) { + ZXIC_COMM_TRACE_ERROR( + "\n srh fail ,the key is not exist"); + return ZXIC_INDEX_DEL_FAIL; + } + + ZXIC_COMM_FREE(p_inf_node->rb_node.p_key); + + ZXIC_COMM_FREE(p_inf_node); + } else { + /*ICMINF_GET_NODE_FSTPOS(p_inf_node) = ICMINF_GET_NODE_FSTPOS(p_inf_node)-1;*/ + } + + ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW(p_fill_cfg->total_used, 1); + p_fill_cfg->total_used--; + + return ZXIC_OK; +} + +ZXIC_RTN32 zxic_comm_indexfill_destroy(INDEX_FILL_CFG *p_fill_cfg) +{ + ZXIC_UINT32 rtn = 0; + + rtn = zxic_comm_dlink_release(&p_fill_cfg->fill_rb.tn_list, + ic_comm_node_data_free); + ZXIC_COMM_CHECK_RC(rtn, "zxic_comm_dlink_release"); + + ZXIC_COMM_MEMSET(p_fill_cfg, 0, ZXIC_SIZEOF(INDEX_FILL_CFG)); + + return ZXIC_OK; +} + +ZXIC_RTN32 zxic_comm_indexfill_show_all_position(INDEX_FILL_CFG *p_fill_cfg) +{ + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + D_NODE *p_node = NULL; + INDEX_FILL_NODE *p_inf_node = NULL; + + p_node = p_fill_cfg->fill_rb.tn_list.p_next; + + ZXIC_COMM_PRINT( + "\n *************************Used Position*************************\n"); + + while (p_node) { + ZXIC_COMM_PRINT("\n ==== Num [%d ] ==== :", i); + + p_inf_node = (INDEX_FILL_NODE *)(STRUCT_ENTRY_POINT( + p_node, ZXIC_RB_TN, tn_ln)); + + for (j = 0; j < p_inf_node->usednum; j++) { + ZXIC_COMM_PRINT(" %d ", + ICMINF_GET_NODE_FSTPOS(p_inf_node) + j); + + if (j != 0 && j % 8 == 0) { + ZXIC_COMM_PRINT("\n "); + } + } + + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW(i, 1); + i++; + + p_node = p_node->next; + } + + ZXIC_COMM_PRINT("\n*******End*******\n"); + + return ZXIC_OK; +} + +ZXIC_RTN32 zxic_comm_indexfill_store(INDEX_FILL_CFG *p_fill_cfg, + ZXIC_UINT32 *p_size, + ZXIC_UINT8 **p_data_buff) +{ + ZXIC_UINT32 rtn = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 used_node_num = 0; + ZXIC_UINT32 rb_node_size = 0; + ZXIC_UINT32 max_index_num = 0; + ZXIC_UINT32 buff_offset = 0; + ZXIC_UINT32 tmp_val = 0; + D_NODE *p_node = NULL; + INDEX_FILL_NODE *p_inf_node = NULL; + ZXIC_UINT8 *p_item_buff = NULL; + ZXIC_UINT32 item_buff_offset = 0; + /* + | used_node_num | rb_node_size | max_index_num |//head + | node_start_index | node_used_num | rb_key ... ... |//item + */ + + ZXIC_COMM_CHECK_POINT(p_fill_cfg); + ZXIC_COMM_CHECK_POINT(p_size); + //item size + rb_node_size = p_fill_cfg->fill_rb.key_size + + 8; /*sizeof(ZXIC_UINT32) + sizeof(ZXIC_UINT32);*/ + max_index_num = p_fill_cfg->index_num; + + p_node = p_fill_cfg->fill_rb.tn_list.p_next; + + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW(used_node_num, 1); + while (p_node) { + //p_inf_node = (INDEX_FILL_NODE *)(STRUCT_ENTRY_POINT(p_node,ZXIC_RB_TN,tn_ln)); + used_node_num++; + p_node = p_node->next; + } + tmp_val = ZXIC_SIZEOF(ZXIC_UINT32) * 3; + ZXIC_COMM_CHECK_INDEX_MUL_OVERFLOW(rb_node_size, used_node_num); + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW(tmp_val, + rb_node_size * used_node_num); + *p_size = tmp_val + rb_node_size * used_node_num; + + *p_data_buff = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC(*p_size); + ZXIC_COMM_CHECK_POINT(*p_data_buff); + ZXIC_COMM_MEMSET(*p_data_buff, 0, *p_size); + + buff_offset = 0; + + ZXIC_COMM_MEMCPY_S(*p_data_buff + buff_offset, ZXIC_SIZEOF(ZXIC_UINT32), + &used_node_num, ZXIC_SIZEOF(ZXIC_UINT32)); + buff_offset += ZXIC_SIZEOF(ZXIC_UINT32); + ZXIC_COMM_MEMCPY_S(*p_data_buff + buff_offset, ZXIC_SIZEOF(ZXIC_UINT32), + &rb_node_size, ZXIC_SIZEOF(ZXIC_UINT32)); + buff_offset += ZXIC_SIZEOF(ZXIC_UINT32); + ZXIC_COMM_MEMCPY_S(*p_data_buff + buff_offset, ZXIC_SIZEOF(ZXIC_UINT32), + &max_index_num, ZXIC_SIZEOF(ZXIC_UINT32)); + buff_offset += ZXIC_SIZEOF(ZXIC_UINT32); + + p_item_buff = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC(rb_node_size); + ZXIC_COMM_CHECK_POINT(p_item_buff); + ZXIC_COMM_MEMSET(p_item_buff, 0, rb_node_size); + + for (i = 0; i < used_node_num; i++) { + if (0 == i) { + p_node = p_fill_cfg->fill_rb.tn_list.p_next; + ZXIC_COMM_CHECK_POINT_MEMORY_FREE(p_node, p_item_buff); + } else { + p_node = p_node->next; + ZXIC_COMM_CHECK_POINT_MEMORY_FREE(p_node, p_item_buff); + } + item_buff_offset = 0; + + p_inf_node = (INDEX_FILL_NODE *)(STRUCT_ENTRY_POINT( + p_node, ZXIC_RB_TN, tn_ln)); + + ZXIC_COMM_MEMCPY_S(p_item_buff + item_buff_offset, + ZXIC_SIZEOF(ZXIC_UINT32), + &(p_inf_node->position), + ZXIC_SIZEOF(ZXIC_UINT32)); + item_buff_offset += ZXIC_SIZEOF(ZXIC_UINT32); + ZXIC_COMM_MEMCPY_S(p_item_buff + item_buff_offset, + ZXIC_SIZEOF(ZXIC_UINT32), + &(p_inf_node->usednum), + ZXIC_SIZEOF(ZXIC_UINT32)); + item_buff_offset += ZXIC_SIZEOF(ZXIC_UINT32); + ZXIC_COMM_MEMCPY_S(p_item_buff + item_buff_offset, + p_fill_cfg->fill_rb.key_size, + p_inf_node->rb_node.p_key, + p_fill_cfg->fill_rb.key_size); + + ZXIC_COMM_MEMCPY_S(*p_data_buff + buff_offset, rb_node_size, + p_item_buff, rb_node_size); + + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_MEMORY_FREE( + buff_offset, rb_node_size, p_item_buff); + buff_offset += rb_node_size; + } + + ZXIC_COMM_ASSERT(buff_offset == *p_size); + + ZXIC_COMM_FREE(p_item_buff); + + return rtn; +} + +ZXIC_RTN32 zxic_comm_indexfill_clear(INDEX_FILL_CFG *p_fill_cfg) +{ + ZXIC_UINT32 rtn = 0; + ZXIC_UINT32 key_len = p_fill_cfg->fill_rb.key_size; + ZXIC_KEY_CMP_FUNC p_cmp_fun = p_fill_cfg->fill_rb.p_cmpfun; + INDEX_FILL_NODE *fill_node; + D_NODE *p_curnode = NULL; + void *cur_data; + p_curnode = p_fill_cfg->fill_rb.tn_list.p_next; + + while (p_curnode) { + cur_data = p_curnode->data; + p_curnode = p_curnode->next; + fill_node = cur_data; + ZXIC_COMM_FREE(fill_node->rb_node.p_key); + ZXIC_COMM_FREE(fill_node); + } + + rtn = zxic_comm_rb_destroy(&p_fill_cfg->fill_rb); + ZXIC_COMM_CHECK_RC(rtn, "zxic_comm_rb_destroy"); + + rtn = zxic_comm_rb_init(&p_fill_cfg->fill_rb, 0, key_len, p_cmp_fun); + ZXIC_COMM_CHECK_RC(rtn, "zxic_comm_rb_init"); + + p_fill_cfg->total_used = 0; + + return ZXIC_OK; +} diff --git a/drivers/net/ethernet/dinghai/en_np/comm/source/zxic_comm_liststack.c b/drivers/net/ethernet/dinghai/en_np/comm/source/zxic_comm_liststack.c new file mode 100644 index 000000000000..8f30714f016f --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/comm/source/zxic_comm_liststack.c @@ -0,0 +1,394 @@ +/***************************************************************************** + * 版权所有 (C)2001-2006, 深圳市中兴通讯股份有限公司。 + * + * 文件名称: Rtm_ListStack.c + * 文件标识: + * 内容摘要: 链表栈管理文件 + * + * 其它说明: 链表栈的管理方式,后进先出,释放的元素挂到队头 + * + * 当前版本: ZXR10 V2.6 + * 作 者: 郑纪伟 + * 完成日期: 2006-9-27 10:34 + * 当前责任人-1: + * 当前责任人-2: + * + * 修改记录1: + * 修改日期:2006-9-27 10:34 + * 版 本 号:ZXR10 V2.6 + * 修 改 人:郑纪伟 + * 修改内容:创建 + * + * 修改记录2: + * 文件名称:ftmcomm_liststack.c + * 修改日期:2008-10-16 15:08 + * 版 本 号:ZXR10 V2.6 + * 修 改 人:HuangHe 170389 + * 修改内容:移植到T8000项目使用 + * + *修改记录3: + * 修改文件名称:zxic_liststack.c + * 修改日期:2012-03-15 15:14 + * 版 本 号:ZXR10 V2.6 + * 修 改 人:JiangWenming 12010401 + * 修改内容:移植到NSE项目使用 + * + *****************************************************************************/ +#include "zxic_common.h" + +ZXIC_RTN32 zxic_comm_liststack_creat(ZXIC_UINT32 element_num, + ZXIC_LISTSTACK_MANGER **p_list) +{ + ZXIC_LISTSTACK_MANGER *p_local_list = NULL; + ZXIC_UINT32 dw_list_size = 0; + ZXIC_UINT32 dw_manage_size = 0; + ZXIC_UINT32 dw_actual_element_num = 0; + ZXIC_UINT32 i = 0; + + /*611002174859 zj068187 检查双重指针*/ + if (NULL == p_list) { + ZXIC_COMM_PRINT("\n p_list is NULL!\n"); + return ZXIC_LIST_STACK_POINT_NULL; + } + if (element_num <= 0) { + *p_list = NULL; + ZXIC_COMM_PRINT("\n FtmComm_ListStackCreat_dwElementNum <=0"); + return ZXIC_LIST_STACK_ELEMENT_NUM_ERR; + } + + if (element_num > LISTSTACK_MAX_ELEMENT - 1) { + dw_actual_element_num = LISTSTACK_MAX_ELEMENT; + } else { + dw_actual_element_num = + element_num + 1; /*10124041 index from 0*/ + } + + dw_list_size = + (dw_actual_element_num * ZXIC_SIZEOF(ZXIC_COMM_FREELINK)) & + 0xffffffff; + dw_manage_size = (ZXIC_SIZEOF(ZXIC_LISTSTACK_MANGER) + dw_list_size) & + 0xffffffff; + + p_local_list = + (ZXIC_LISTSTACK_MANGER *)ZXIC_COMM_MALLOC(dw_manage_size); + + if (p_local_list == NULL) { + *p_list = NULL; + ZXIC_COMM_PRINT("\n zxic_comm_liststack_creat Fail \n"); + return ZXIC_LIST_STACK_ALLOC_MEMORY_FAIL; + } + + ZXIC_COMM_MEMSET(p_local_list, 0, dw_manage_size); + + p_local_list->p_array = + (ZXIC_COMM_FREELINK *)((ZXIC_UINT8 *)p_local_list + + ZXIC_SIZEOF(ZXIC_LISTSTACK_MANGER)); + + p_local_list->capacity = dw_actual_element_num; + p_local_list->free_num = + dw_actual_element_num - 1; /* for index = 0 is reserved */ + p_local_list->used_num = 0; + + for (i = 1; i < (dw_actual_element_num - 1); i++) { + p_local_list->p_array[i].index = i; + p_local_list->p_array[i].next = i + 1; + } + + p_local_list->p_array[0].index = 0; + p_local_list->p_array[0].next = 0; + + p_local_list->p_array[dw_actual_element_num - 1].index = + dw_actual_element_num - 1; + p_local_list->p_array[dw_actual_element_num - 1].next = 0xffffffff; + + p_local_list->p_head = p_local_list->p_array[1].index; + + *p_list = p_local_list; + + return ZXIC_OK; +} + +ZXIC_RTN32 zxic_comm_liststack_alloc(ZXIC_LISTSTACK_MANGER *p_list, + ZXIC_UINT32 *p_index) +{ + ZXIC_UINT32 dw_alloc_index = 0; + ZXIC_UINT32 dw_next_free = 0; + + if (p_list == NULL) { + *p_index = LISTSTACK_INVALID_INDEX; + ZXIC_COMM_PRINT( + "\n zxic_comm_liststack_alloc! ERROR LINE:%d\n ", + __LINE__); + return ZXIC_LIST_STACK_POINT_NULL; + } + + if ((p_list->p_head) == LISTSTACK_INVALID_INDEX) { + *p_index = LISTSTACK_INVALID_INDEX; + + /*ZXIC_COMM_PRINT("\n zxic_comm_liststack_alloc is full! LINE:%d\n ",__LINE__);*/ + return ZXIC_LIST_STACK_ISEMPTY_ERR; + } + + dw_alloc_index = p_list->p_head; + + dw_next_free = p_list->p_array[dw_alloc_index].next; + p_list->p_array[dw_alloc_index].next = LISTSTACK_INVALID_INDEX; + + if (dw_next_free != 0xffffffff) { /* ZXIC_UINT32 - 1 为0xffffffff*/ + p_list->p_head = p_list->p_array[dw_next_free].index; + } else { + p_list->p_head = LISTSTACK_INVALID_INDEX; + } + + *p_index = dw_alloc_index - 1; /*减1是为了使索引从0开始分配*/ + + p_list->free_num--; + p_list->used_num++; + + /*分配一个条目后,判断队列是否变为空,若变为空,则头指向无效索引*/ + if ((p_list->free_num == 0) || + (p_list->used_num == (p_list->capacity - 1))) { + /*ZXIC_COMM_PRINT("\n zxic_comm_liststack_alloc! ERROR LINE:%d\n ",__LINE__);*/ + p_list->p_head = LISTSTACK_INVALID_INDEX; + } + + return ZXIC_OK; +} + +ZXIC_RTN32 zxic_comm_liststack_free(ZXIC_LISTSTACK_MANGER *p_list, + ZXIC_UINT32 index) +{ + ZXIC_UINT32 dw_free_index = 0; + ZXIC_UINT32 dw_prev_free = 0; + ZXIC_UINT32 dw_index = 0; + + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW(index, 1); + dw_index = index + 1; /*加1是为了使索引从0开始分配*/ + + if (p_list == NULL) { + ZXIC_COMM_PRINT( + "\n zxic_comm_liststack_free is null! LINE:%d\n ", + __LINE__); + return ZXIC_LIST_STACK_POINT_NULL; + } + + if (dw_index >= p_list->capacity) { + ZXIC_COMM_PRINT( + "\n zxic_comm_liststack_free is null! LINE:%d\n ", + __LINE__); + return ZXIC_LIST_STACK_FREE_INDEX_INVALID; + } + + if (p_list->p_array[dw_index].next != LISTSTACK_INVALID_INDEX) { + return ZXIC_OK; + } + + dw_free_index = dw_index; + dw_prev_free = p_list->p_head; + + if (dw_prev_free != 0) { + p_list->p_array[dw_free_index].next = + p_list->p_array[dw_prev_free].index; + } else { + p_list->p_array[dw_free_index].next = 0xffffffff; + } + + /* 释放的元素挂到队头*/ + p_list->p_head = p_list->p_array[dw_free_index].index; + + p_list->free_num++; + p_list->used_num--; + + return ZXIC_OK; +} + +ZXIC_RTN32 zxic_comm_liststack_alloc_spec_index(ZXIC_LISTSTACK_MANGER *p_list, + ZXIC_UINT32 index) +{ + ZXIC_UINT32 dw_free_index = 0; + ZXIC_UINT32 dw_index = 0; + + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW(index, 1); + dw_index = index + 1; //加1是为了使索引从0开始分配 + if (p_list == NULL) { + ZXIC_COMM_PRINT( + "\n zxic_comm_liststack_alloc_spec_index: address is full! ERROR LINE:%d\n ", + __LINE__); + return ZXIC_LIST_STACK_POINT_NULL; + } + + if ((p_list->p_head) == LISTSTACK_INVALID_INDEX) { + //ZXIC_COMM_PRINT("\n zxic_comm_liststack_alloc is full! LINE:%d\n ",__LINE__); + return ZXIC_LIST_STACK_ISEMPTY_ERR; + } + + if (dw_index >= p_list->capacity) { + ZXIC_COMM_PRINT( + "\n zxic_comm_liststack_alloc_spec_index: input invalid index! LINE:%d\n ", + __LINE__); + return ZXIC_LIST_STACK_ALLOC_INDEX_INVALID; + } + + if (p_list->p_array[dw_index].next == LISTSTACK_INVALID_INDEX) { + ZXIC_COMM_PRINT( + "\n zxic_comm_liststack_alloc_spec_index: index is used, not alloc again! LINE:%d\n ", + __LINE__); + return ZXIC_LIST_STACK_ALLOC_INDEX_USED; + } else { + if (p_list->p_head == dw_index) { + if (p_list->p_array[dw_index].next != 0xffffffff) { + p_list->p_head = p_list->p_array[dw_index].next; + } else { + p_list->p_head = LISTSTACK_INVALID_INDEX; + } + } else { + dw_free_index = p_list->p_head; + while (p_list->p_array[dw_free_index].next != + 0xffffffff) { + if (p_list->p_array[dw_free_index].next == + dw_index) { + p_list->p_array[dw_free_index].next = + p_list->p_array[dw_index].next; + break; + } + dw_free_index = + p_list->p_array[dw_free_index].next; + } + } + + p_list->p_array[dw_index].next = LISTSTACK_INVALID_INDEX; + p_list->free_num--; + p_list->used_num++; + } + + /*分配一个条目后,判断队列是否变为空,若变为空,则头指向无效索引*/ + if ((p_list->free_num == 0) || + (p_list->used_num == (p_list->capacity - 1))) { + //ZXIC_COMM_PRINT("\n zxic_comm_liststack_alloc! ERROR LINE:%d\n ",__LINE__); + p_list->p_head = LISTSTACK_INVALID_INDEX; + } + + return ZXIC_OK; +} + +ZXIC_RTN32 zxic_comm_liststack_destroy(ZXIC_LISTSTACK_MANGER *p_list) +{ + if (p_list == NULL) { + ZXIC_COMM_PRINT("\n zxic_comm_liststack_destroy! LINE:%d\n ", + __LINE__); + return ZXIC_LIST_STACK_POINT_NULL; + } + ZXIC_COMM_FREE(p_list); + + return ZXIC_OK; +} + +ZXIC_RTN32 zxic_comm_liststack_showlist_info(ZXIC_LISTSTACK_MANGER *p_list) +{ + ZXIC_COMM_PRINT("\n\t List: 0x%p", (void *)p_list); + ZXIC_COMM_PRINT("\n\t Array: 0x%p", (void *)p_list->p_array); + ZXIC_COMM_PRINT("\n\t capacity: 0x%x", p_list->capacity); + ZXIC_COMM_PRINT("\n\t p_head: 0x%x", p_list->p_head); + ZXIC_COMM_PRINT("\n\t free_num: 0x%x", p_list->free_num); + ZXIC_COMM_PRINT("\n\t used_num: 0x%x\n", p_list->used_num); + + return 0; +} + +ZXIC_RTN32 zxic_comm_liststack_show_used(ZXIC_LISTSTACK_MANGER *p_list, + ZXIC_UINT32 line_number) +{ + ZXIC_RTN32 rc = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 used_number = 0; + ZXIC_UINT32 dw_index = 0; + ZXIC_UINT32 dw_last_free_idx = 0; + + if (p_list == NULL) { + ZXIC_COMM_PRINT("\n Please Input Param!"); + return 0; + } + + if (line_number == 0) { + line_number = 32; + } + + rc = zxic_comm_liststack_showlist_info(p_list); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_liststack_showlist_info"); + ZXIC_COMM_PRINT("\n"); + + /*611002175032 zj068187 begin*/ + if (LISTSTACK_INVALID_INDEX == p_list->p_head) { + ZXIC_COMM_PRINT("\n The index are all used!\n"); + } + + dw_index = p_list->p_head; + ZXIC_COMM_CHECK_INDEX_UPPER(dw_index, (p_list->capacity - 1)); + while (LISTSTACK_INVALID_INDEX != p_list->p_array[dw_index].next) { + dw_index = p_list->p_array[dw_index].next; + ZXIC_COMM_CHECK_INDEX_UPPER(dw_index, (p_list->capacity - 1)); + } + + dw_last_free_idx = p_list->p_array[dw_index].index; + /*611002175032 zj068187 end*/ + + for (i = 1; i < p_list->capacity; i++) { + /*611002175032 zj068187 modify*/ + if ((LISTSTACK_INVALID_INDEX == p_list->p_array[i].next) && + (dw_last_free_idx != p_list->p_array[i].index)) { + ZXIC_COMM_PRINT(" 0x%x", i); + used_number++; + + if ((used_number % line_number) == 0) { + ZXIC_COMM_PRINT("\n"); + } + } + } + + ZXIC_COMM_PRINT("\n used_number: 0x%x", used_number); + + return ZXIC_OK; +} + +ZXIC_RTN32 zxic_comm_liststack_show_free(ZXIC_LISTSTACK_MANGER *p_list, + ZXIC_UINT32 line_number) +{ + ZXIC_RTN32 rc = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 free_number = 0; + + if (p_list == NULL) { + ZXIC_COMM_PRINT("\n Please Input Param!"); + return 0; + } + + if (line_number == 0) { + line_number = 32; + } + + rc = zxic_comm_liststack_showlist_info(p_list); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_liststack_showlist_info"); + ZXIC_COMM_PRINT("\n"); + + index = p_list->p_head; + + for (i = p_list->capacity - 1; i != 0; i--) { + if (index != LISTSTACK_INVALID_INDEX) { + ZXIC_COMM_PRINT(" 0x%x", index); + free_number++; + + index = p_list->p_array[index].next; + + if ((free_number % line_number) == 0) { + ZXIC_COMM_PRINT("\n"); + } + } else { + break; + } + } + + ZXIC_COMM_PRINT("\n free_number: 0x%x", free_number); + + return ZXIC_OK; +} diff --git a/drivers/net/ethernet/dinghai/en_np/comm/source/zxic_comm_mutex_lock.c b/drivers/net/ethernet/dinghai/en_np/comm/source/zxic_comm_mutex_lock.c new file mode 100644 index 000000000000..e3eede05d224 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/comm/source/zxic_comm_mutex_lock.c @@ -0,0 +1,180 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : zxic_sal.c +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : 王春雷 +* 完成日期 : 2014/02/07 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#include "zxic_common.h" +#include "zxic_private.h" + +#ifdef ZXIC_OS_WIN +#define ZXIC_MUTEX_WAITTIME_MAX (INFINITE) /* 互斥锁最大等待时间 */ +#else +#define ZXIC_MUTEX_WAITTIME_MAX (5000) /* 互斥锁最大等待时间:5000ms */ +#endif + +/***********************************************************/ +/** 初始化互斥量 +* @param p_mutex 互斥量 +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/02/07 +************************************************************/ +ZXIC_RTN32 zxic_comm_mutex_create(ZXIC_MUTEX_T *p_mutex) +{ + // ZXIC_SINT32 rc = 0; + + ZXIC_COMM_CHECK_POINT(p_mutex); + +#ifdef ZXIC_OS_WIN + p_mutex->mutex = CreateMutex(ZXIC_NULL, ZXIC_FALSE, ZXIC_NULL); + if (p_mutex->mutex == 0) { + ZXIC_COMM_TRACE_ERROR("\nErrCode[ 0x%x ]: Create mutex failed.", + ZXIC_MUTEX_LOCK_INIT_FAIL); + return ZXIC_MUTEX_LOCK_INIT_FAIL; + } +#else + /*rc = pthread_mutex_init(&p_mutex->mutex, NULL); + if(rc != 0) + { + ZXIC_COMM_TRACE_ERROR("\nErrCode[ 0x%x ]: Create mutex failed", ZXIC_MUTEX_LOCK_INIT_FAIL); + return ZXIC_MUTEX_LOCK_INIT_FAIL; + }*/ + mutex_init(&p_mutex->mutex); + +#endif + + return ZXIC_OK; +} + +/***********************************************************/ +/** 互斥量加锁 +* @param p_mutex +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/02/07 +************************************************************/ +ZXIC_RTN32 zxic_comm_mutex_lock(ZXIC_MUTEX_T *p_mutex) +{ + ZXIC_SINT32 rc = 0; +#ifndef ZXIC_FOR_FUZZER + ZXIC_COMM_CHECK_POINT(p_mutex); + +#ifdef ZXIC_OS_WIN + switch (WaitForSingleObject(p_mutex->mutex, ZXIC_MUTEX_WAITTIME_MAX)) { + case (WAIT_OBJECT_0): { + /* wait mutex success. */ + break; + } + default: { + ZXIC_COMM_TRACE_ERROR( + "\nErrCode[ 0x%x ]: WaitForSingleObject failed.", + ZXIC_MUTEX_LOCK_LOCK_FAIL); + ZXIC_COMM_ASSERT(0); + return ZXIC_MUTEX_LOCK_LOCK_FAIL; + } + } +#else + /*rc = pthread_mutex_lock(&p_mutex->mutex); + if(rc != 0) + { + ZXIC_COMM_TRACE_ERROR("\nErrCode[ 0x%x ]: Get mutex lock fail.", ZXIC_MUTEX_LOCK_LOCK_FAIL); + //ZXIC_COMM_ASSERT(0); + //return ZXIC_MUTEX_LOCK_LOCK_FAIL; + return rc; + }*/ + mutex_lock(&p_mutex->mutex); +#endif +#endif + + return rc; +} + +/***********************************************************/ +/** 互斥量解锁 +* @param p_mutex +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/02/07 +************************************************************/ +ZXIC_RTN32 zxic_comm_mutex_unlock(ZXIC_MUTEX_T *p_mutex) +{ + ZXIC_SINT32 rc = 0; +#ifndef ZXIC_FOR_FUZZER + + ZXIC_COMM_CHECK_POINT(p_mutex); + +#ifdef ZXIC_OS_WIN + if (!ReleaseMutex(p_mutex->mutex)) { + ZXIC_COMM_TRACE_ERROR("\nErrCode[ 0x%x ]: ReleaseMutex failed.", + ZXIC_MUTEX_LOCK_ULOCK_FAIL); + return ZXIC_MUTEX_LOCK_ULOCK_FAIL; + } +#else + /*rc = pthread_mutex_unlock(&p_mutex->mutex); + if(rc != 0) + { + ZXIC_COMM_TRACE_ERROR("\nErrCode[ 0x%x ]: Release mutex lock fail.", ZXIC_MUTEX_LOCK_ULOCK_FAIL); + return ZXIC_MUTEX_LOCK_ULOCK_FAIL; + }*/ + mutex_unlock(&p_mutex->mutex); +#endif +#endif + + return rc; +} + +/***********************************************************/ +/** 销毁互斥量 +* @param p_mutex +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/02/07 +************************************************************/ +ZXIC_RTN32 zxic_comm_mutex_destroy(ZXIC_MUTEX_T *p_mutex) +{ + // ZXIC_SINT32 rc = 0; + + ZXIC_COMM_CHECK_POINT(p_mutex); + +#ifdef ZXIC_OS_WIN + if (p_mutex->mutex == 0) { + ZXIC_COMM_TRACE_ERROR( + "\nErrCode[ 0x%x ]: Destroy mutex failed.", + ZXIC_MUTEX_LOCK_DESTROY_FAIL); + return ZXIC_MUTEX_LOCK_DESTROY_FAIL; + } + CloseHandle(p_mutex->mutex); +#else + /*rc = pthread_mutex_destroy(&p_mutex->mutex); + if(rc != 0) + { + ZXIC_COMM_TRACE_ERROR("\nErrCode[ 0x%x ]: Destroy mutex fail", ZXIC_MUTEX_LOCK_DESTROY_FAIL); + return ZXIC_MUTEX_LOCK_DESTROY_FAIL; + }*/ + mutex_destroy(&p_mutex->mutex); +#endif + + return ZXIC_OK; +} \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/comm/source/zxic_comm_print.c b/drivers/net/ethernet/dinghai/en_np/comm/source/zxic_comm_print.c new file mode 100644 index 000000000000..dc0e4b50d3b1 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/comm/source/zxic_comm_print.c @@ -0,0 +1,554 @@ +/************************************************************** +* 版权所有 (C)2013-2020, 深圳市中兴通讯股份有限公司 +* 文件名称 : zxic_comm_print.c +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : xuchenxi_10235594 +* 完成日期 : 2020/07/20 +* DEPARTMENT: 有线开发四部-系统软件团队 +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ +#include "zxic_common.h" +#include "zxic_private.h" +#include "log.h" + +#if ZXIC_REAL("全局变量定义") +ZXIC_UINT32 g_zxic_print_level = ZXIC_TRACE_ERROR_PRINT; /*打印级别,默认为INFO*/ +ZXIC_UINT32 g_zxic_print_en = 1; /*界面打印控制开关*/ +#endif + +#define ZXIC_COMM_TRACE_BUFFER_SIZE (512) + +#if ZXIC_REAL("开关") +/***********************************************************/ +/** 设置打印开关,决定ZXIC_COMM_PRINT等调试打印函数是否输出到屏幕 +* @param enable 0-不打印到屏幕,1-打印到屏幕 +* +* @return +* @remark 无 +* @see +* @author zhaisyu @date 2018/10/25 +************************************************************/ +ZXIC_VOID zxic_comm_set_print_en(ZXIC_UINT32 enable) +{ + g_zxic_print_en = enable; +} + +ZXIC_RTN32 zxic_comm_get_print_en(ZXIC_VOID) +{ + return g_zxic_print_en; +} + +/***********************************************************/ +/** 设置Debug打印级别 +* @param debug_level 0打印级别最低,4打印级别最高,即打印的东西最多 +* +* @return +* @remark 无 +* @see +* @author ChenWei10088471 @date 2014/02/07 +************************************************************/ +ZXIC_VOID zxic_comm_set_print_level(ZXIC_UINT32 debug_level) +{ + g_zxic_print_level = debug_level; +} + +/***********************************************************/ +/** 获取Debug打印级别 +* @param ZXIC_VOID +* +* @return +* @remark 无 +* @see +* @author ChenWei10088471 @date 2014/02/07 +************************************************************/ +ZXIC_RTN32 zxic_comm_get_print_level(ZXIC_VOID) +{ + return g_zxic_print_level; +} + +#endif + +#if ZXIC_REAL("打印函数") +ZXIC_VOID ZXIC_COMM_PRINT(ZXIC_CONST ZXIC_CHAR *format, ...) +{ + va_list ap; + ZXIC_CHAR szBuffer[ZXIC_COMM_TRACE_BUFFER_SIZE]; + + ZXIC_COMM_ASSERT(format); + + va_start(ap, format); + { + ZXIC_COMM_VSNPRINTF_S(szBuffer, ZXIC_SIZEOF(szBuffer), + ZXIC_SIZEOF(szBuffer), format, ap); + if (g_zxic_print_en) { + DH_LOG_INFO(MODULE_NP, "%s", szBuffer); + } + } + va_end(ap); +} + +/***********************************************************/ +/** Error信息打印函数 +* @param format +* +* @return +* @remark 无 +* @see +* @author ChenWei10088471 @date 2014/02/07 +************************************************************/ +ZXIC_VOID ZXIC_COMM_TRACE_ERROR(ZXIC_CONST ZXIC_CHAR *format, ...) +{ + va_list ap; + ZXIC_CHAR szBuffer[ZXIC_COMM_TRACE_BUFFER_SIZE]; + + ZXIC_COMM_ASSERT(format); + + if (zxic_comm_get_print_level() == 0 || + zxic_comm_get_print_level() >= ZXIC_TRACE_INVALID_PRINT) { + return; + } + + va_start(ap, format); + { + ZXIC_COMM_VSNPRINTF_S(szBuffer, ZXIC_SIZEOF(szBuffer), + ZXIC_SIZEOF(szBuffer), format, ap); + if (zxic_comm_get_print_en()) { + DH_LOG_ERR(MODULE_NP, "%s", szBuffer); + } + } + va_end(ap); +} + +/***********************************************************/ +/** Notice信息打印函数 +* @param format +* +* @return +* @remark 无 +* @see +* @author ChenWei10088471 @date 2014/02/07 +************************************************************/ +ZXIC_VOID ZXIC_COMM_TRACE_NOTICE(ZXIC_CONST ZXIC_CHAR *format, ...) +{ + va_list ap; + ZXIC_CHAR szBuffer[ZXIC_COMM_TRACE_BUFFER_SIZE]; + + ZXIC_COMM_ASSERT(format); + + if (zxic_comm_get_print_level() == 0 || + zxic_comm_get_print_level() >= ZXIC_TRACE_INVALID_PRINT) { + return; + } + + if (zxic_comm_get_print_level() >= ZXIC_TRACE_NOTICE_PRINT) { + va_start(ap, format); + { + ZXIC_COMM_VSNPRINTF_S(szBuffer, ZXIC_SIZEOF(szBuffer), + ZXIC_SIZEOF(szBuffer), format, + ap); + if (zxic_comm_get_print_en()) { + DH_LOG_INFO(MODULE_NP, "%s", szBuffer); + } + } + va_end(ap); + } +} + +/***********************************************************/ +/** Info信息打印函数 +* @param format +* +* @return +* @remark 无 +* @see +* @author ChenWei10088471 @date 2014/02/07 +************************************************************/ +ZXIC_VOID ZXIC_COMM_TRACE_INFO(ZXIC_CONST ZXIC_CHAR *format, ...) +{ + va_list ap; + ZXIC_CHAR szBuffer[ZXIC_COMM_TRACE_BUFFER_SIZE]; + + ZXIC_COMM_ASSERT(format); + + if (zxic_comm_get_print_level() == 0 || + zxic_comm_get_print_level() >= ZXIC_TRACE_INVALID_PRINT) { + return; + } + + if (zxic_comm_get_print_level() >= ZXIC_TRACE_INFO_PRINT) { + va_start(ap, format); + { + ZXIC_COMM_VSNPRINTF_S(szBuffer, ZXIC_SIZEOF(szBuffer), + ZXIC_SIZEOF(szBuffer), format, + ap); + if (zxic_comm_get_print_en()) { + DH_LOG_INFO(MODULE_NP, "%s", szBuffer); + } + } + va_end(ap); + } +} + +/***********************************************************/ +/** Debug信息打印函数 +* @param format +* +* @return +* @remark 无 +* @see +* @author ChenWei10088471 @date 2014/02/07 +************************************************************/ +ZXIC_VOID ZXIC_COMM_TRACE_DEBUG(ZXIC_CONST ZXIC_CHAR *format, ...) +{ + va_list ap; + ZXIC_CHAR szBuffer[ZXIC_COMM_TRACE_BUFFER_SIZE]; + + ZXIC_COMM_ASSERT(format); + + if (zxic_comm_get_print_level() == 0 || + zxic_comm_get_print_level() >= ZXIC_TRACE_INVALID_PRINT) { + return; + } + + if (zxic_comm_get_print_level() >= ZXIC_TRACE_DEBUG_PRINT) { + va_start(ap, format); + { + ZXIC_COMM_VSNPRINTF_S(szBuffer, ZXIC_SIZEOF(szBuffer), + ZXIC_SIZEOF(szBuffer), format, + ap); + if (zxic_comm_get_print_en()) { + DH_LOG_DEBUG(MODULE_NP, "%s", szBuffer); + } + } + va_end(ap); + } +} + +/***********************************************************/ +/** 所有调试信息打印函数 +* @param format +* +* @return +* @remark 无 +* @see +* @author ChenWei10088471 @date 2014/02/07 +************************************************************/ +ZXIC_VOID ZXIC_COMM_TRACE_ALL(ZXIC_CONST ZXIC_CHAR *format, ...) +{ + va_list ap; + ZXIC_CHAR szBuffer[ZXIC_COMM_TRACE_BUFFER_SIZE]; + + ZXIC_COMM_ASSERT(format); + + if (zxic_comm_get_print_level() == 0 || + zxic_comm_get_print_level() >= ZXIC_TRACE_INVALID_PRINT) { + return; + } + + if (zxic_comm_get_print_level() >= ZXIC_TRACE_ALL_PRINT) { + va_start(ap, format); + { + ZXIC_COMM_VSNPRINTF_S(szBuffer, ZXIC_SIZEOF(szBuffer), + ZXIC_SIZEOF(szBuffer), format, + ap); + if (zxic_comm_get_print_en()) { + DH_LOG_DEBUG(MODULE_NP, "%s", szBuffer); + } + } + va_end(ap); + } +} + +/***********************************************************/ +/** 支持多芯片的Error信息打印函数 +* 打印级别为1及以上时执行打印 +* @param dev_id +* @param format +* +* @return +* @remark 无 +* @see +* @author xcx @date 2020/07/20 +************************************************************/ +ZXIC_VOID ZXIC_COMM_TRACE_DEV_ERROR(ZXIC_UINT32 dev_id, + ZXIC_CONST ZXIC_CHAR *format, ...) +{ + va_list ap; + ZXIC_CHAR szBuffer[ZXIC_COMM_TRACE_BUFFER_SIZE - 32] = { 0 }; + ZXIC_CHAR devBuffer[ZXIC_COMM_TRACE_BUFFER_SIZE] = { 0 }; + + ZXIC_COMM_ASSERT(format); + + if (zxic_comm_get_print_level() == 0 || + zxic_comm_get_print_level() >= ZXIC_TRACE_INVALID_PRINT) { + return; + } + + if (zxic_comm_get_print_level() >= ZXIC_TRACE_ERROR_PRINT) { + ZXIC_COMM_SNPRINTF_S(devBuffer, ZXIC_SIZEOF(devBuffer), + ZXIC_SIZEOF(devBuffer), + "Dev_id[%u]_ERROR: ", dev_id); + + va_start(ap, format); + { + ZXIC_COMM_VSNPRINTF_S(szBuffer, ZXIC_SIZEOF(szBuffer), + ZXIC_SIZEOF(szBuffer), format, + ap); + ZXIC_COMM_STRNCAT_S( + devBuffer, ZXIC_SIZEOF(devBuffer), szBuffer, + ZXIC_COMM_STRNLEN_S(szBuffer, + ZXIC_SIZEOF(szBuffer))); + + if (zxic_comm_get_print_en()) { + DH_LOG_ERR(MODULE_NP, "%s", devBuffer); + } + } + va_end(ap); + } +} + +/***********************************************************/ +/** 支持多芯片的Info信息打印函数 +* 打印级别为2及以上时执行打印 +* @param dev_id +* @param format +* +* @return +* @remark 无 +* @see +* @author wcl @date 2018/09/08 +************************************************************/ +ZXIC_VOID ZXIC_COMM_TRACE_DEV_NOTICE(ZXIC_UINT32 dev_id, + ZXIC_CONST ZXIC_CHAR *format, ...) +{ + va_list ap; + ZXIC_CHAR szBuffer[ZXIC_COMM_TRACE_BUFFER_SIZE - 32] = { 0 }; + ZXIC_CHAR devBuffer[ZXIC_COMM_TRACE_BUFFER_SIZE] = { 0 }; + + ZXIC_COMM_ASSERT(format); + + if (zxic_comm_get_print_level() == 0 || + zxic_comm_get_print_level() >= ZXIC_TRACE_INVALID_PRINT) { + return; + } + + if (zxic_comm_get_print_level() >= ZXIC_TRACE_NOTICE_PRINT) { + ZXIC_COMM_SNPRINTF_S(devBuffer, ZXIC_SIZEOF(devBuffer), + ZXIC_SIZEOF(devBuffer), + "Dev_id[%u]_NOTICE: ", dev_id); + + va_start(ap, format); + { + ZXIC_COMM_VSNPRINTF_S(szBuffer, ZXIC_SIZEOF(szBuffer), + ZXIC_SIZEOF(szBuffer), format, + ap); + ZXIC_COMM_STRNCAT_S( + devBuffer, ZXIC_SIZEOF(devBuffer), szBuffer, + ZXIC_COMM_STRNLEN_S(szBuffer, + ZXIC_SIZEOF(szBuffer))); + + if (zxic_comm_get_print_en()) { + DH_LOG_INFO(MODULE_NP, "%s", devBuffer); + } + } + va_end(ap); + } +} + +/***********************************************************/ +/** 支持多芯片的Info信息打印函数 +* 打印级别为2及以上时执行打印 +* @param dev_id +* @param format +* +* @return +* @remark 无 +* @see +* @author wcl @date 2018/09/08 +************************************************************/ +ZXIC_VOID ZXIC_COMM_TRACE_DEV_INFO(ZXIC_UINT32 dev_id, + ZXIC_CONST ZXIC_CHAR *format, ...) +{ + va_list ap; + ZXIC_CHAR szBuffer[ZXIC_COMM_TRACE_BUFFER_SIZE - 32] = { 0 }; + ZXIC_CHAR devBuffer[ZXIC_COMM_TRACE_BUFFER_SIZE] = { 0 }; + + ZXIC_COMM_ASSERT(format); + + if (zxic_comm_get_print_level() == 0 || + zxic_comm_get_print_level() >= ZXIC_TRACE_INVALID_PRINT) { + return; + } + + if (zxic_comm_get_print_level() >= ZXIC_TRACE_INFO_PRINT) { + ZXIC_COMM_SNPRINTF_S(devBuffer, ZXIC_SIZEOF(devBuffer), + ZXIC_SIZEOF(devBuffer), + "Dev_id[%u]_INFO: ", dev_id); + + va_start(ap, format); + { + ZXIC_COMM_VSNPRINTF_S(szBuffer, ZXIC_SIZEOF(szBuffer), + ZXIC_SIZEOF(szBuffer), format, + ap); + ZXIC_COMM_STRNCAT_S( + devBuffer, ZXIC_SIZEOF(devBuffer), szBuffer, + ZXIC_COMM_STRNLEN_S(szBuffer, + ZXIC_SIZEOF(szBuffer))); + + if (zxic_comm_get_print_en()) { + DH_LOG_INFO(MODULE_NP, "%s", devBuffer); + } + } + va_end(ap); + } +} + +/***********************************************************/ +/** 支持多芯片的Debug信息打印函数 +* @param dev_id +* @param format +* +* @return +* @remark 无 +* @see +* @author wcl @date 2018/09/08 +************************************************************/ +ZXIC_VOID ZXIC_COMM_TRACE_DEV_DEBUG(ZXIC_UINT32 dev_id, + ZXIC_CONST ZXIC_CHAR *format, ...) +{ + va_list ap; + ZXIC_CHAR szBuffer[ZXIC_COMM_TRACE_BUFFER_SIZE - 32] = { 0 }; + ZXIC_CHAR devBuffer[ZXIC_COMM_TRACE_BUFFER_SIZE] = { 0 }; + + ZXIC_COMM_ASSERT(format); + + if (zxic_comm_get_print_level() == 0 || + zxic_comm_get_print_level() >= ZXIC_TRACE_INVALID_PRINT) { + return; + } + + if (zxic_comm_get_print_level() >= ZXIC_TRACE_DEBUG_PRINT) { + ZXIC_COMM_SNPRINTF_S(devBuffer, ZXIC_SIZEOF(devBuffer), + ZXIC_SIZEOF(devBuffer), + "Dev_id[%u]_DEBUG: ", dev_id); + + va_start(ap, format); + { + ZXIC_COMM_VSNPRINTF_S(szBuffer, ZXIC_SIZEOF(szBuffer), + ZXIC_SIZEOF(szBuffer), format, + ap); + ZXIC_COMM_STRNCAT_S( + devBuffer, ZXIC_SIZEOF(devBuffer), szBuffer, + ZXIC_COMM_STRNLEN_S(szBuffer, + ZXIC_SIZEOF(szBuffer))); + + if (zxic_comm_get_print_en()) { + DH_LOG_DEBUG(MODULE_NP, "%s", devBuffer); + } + } + va_end(ap); + } +} + +/***********************************************************/ +/** 支持多芯片的所有调试信息打印函数 +* @param dev_id +* @param format +* +* @return +* @remark 无 +* @see +* @author wcl @date 2018/09/08 +************************************************************/ +ZXIC_VOID ZXIC_COMM_TRACE_DEV_ALL(ZXIC_UINT32 dev_id, + ZXIC_CONST ZXIC_CHAR *format, ...) +{ + va_list ap; + ZXIC_CHAR szBuffer[ZXIC_COMM_TRACE_BUFFER_SIZE - 32] = { 0 }; + ZXIC_CHAR devBuffer[ZXIC_COMM_TRACE_BUFFER_SIZE] = { 0 }; + + ZXIC_COMM_ASSERT(format); + + if (zxic_comm_get_print_level() == 0 || + zxic_comm_get_print_level() >= ZXIC_TRACE_INVALID_PRINT) { + return; + } + + if (zxic_comm_get_print_level() >= ZXIC_TRACE_ALL_PRINT) { + ZXIC_COMM_SNPRINTF_S(devBuffer, ZXIC_SIZEOF(devBuffer), + ZXIC_SIZEOF(devBuffer), + "Dev_id[%u]_ALL:", dev_id); + + va_start(ap, format); + { + ZXIC_COMM_VSNPRINTF_S(szBuffer, ZXIC_SIZEOF(szBuffer), + ZXIC_SIZEOF(szBuffer), format, + ap); + ZXIC_COMM_STRNCAT_S( + devBuffer, ZXIC_SIZEOF(devBuffer), szBuffer, + ZXIC_COMM_STRNLEN_S(szBuffer, + ZXIC_SIZEOF(szBuffer))); + + if (zxic_comm_get_print_en()) { + DH_LOG_DEBUG(MODULE_NP, "%s", devBuffer); + } + } + va_end(ap); + } +} +#endif + +#if ZXIC_REAL("") + +ZXIC_VOID ZXIC_COMM_DBGCNT64_PRINT(ZXIC_CONST ZXIC_CHAR *name, + ZXIC_UINT64 value) +{ + ZXIC_CHAR temp_buff[50] = { 0 }; + + if (-1 == ZXIC_COMM_SNPRINTF_S(temp_buff, 50, 50, "0x%016llx", value)) { + return; + } + + ZXIC_COMM_PRINT("%-50s : %18s\n", name, temp_buff); +} + +ZXIC_VOID ZXIC_COMM_DBGCNT32_PRINT(ZXIC_CONST ZXIC_CHAR *name, + ZXIC_UINT32 value) +{ + ZXIC_CHAR temp_buff[50] = { 0 }; + + if (-1 == ZXIC_COMM_SNPRINTF_S(temp_buff, 50, 50, "0x%08x", value)) { + return; + } + + ZXIC_COMM_PRINT("%-50s : %18s\n", name, temp_buff); +} + +/** 双参数打印 */ +ZXIC_VOID ZXIC_COMM_DBGCNT32_PAR_PRINT(ZXIC_CONST ZXIC_CHAR *name, + ZXIC_UINT32 parm, ZXIC_UINT32 value) +{ + ZXIC_CHAR temp_buff[50] = { 0 }; + ZXIC_CHAR vlaue_buff[18] = { 0 }; + + if (-1 == ZXIC_COMM_SNPRINTF_S(temp_buff, 50, 50, name, parm)) { + return; + } + + if (-1 == ZXIC_COMM_SNPRINTF_S(vlaue_buff, 18, 18, "0x%08x", value)) { + return; + } + + ZXIC_COMM_PRINT("%-50s : %18s\n", temp_buff, vlaue_buff); +} + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/comm/source/zxic_comm_rb_tree.c b/drivers/net/ethernet/dinghai/en_np/comm/source/zxic_comm_rb_tree.c new file mode 100644 index 000000000000..6ed7bdf9bea9 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/comm/source/zxic_comm_rb_tree.c @@ -0,0 +1,1009 @@ +/***************************************************************************** + * 版权所有 (C)2001-2015, 深圳市中兴通讯股份有限公司。 + * + * 文件名称: zxic_comm_rb_tree.c + * 文件标识: + * 内容摘要: + * 其它说明: + * 当前版本: + * 作 者: ChenWei10088471 + * 完成日期: + * 当前责任人-1: + * 当前责任人-2: + * + * DEPARTMENT : ASIC_FPGA_R&D_Dept + * MANUAL_PERCENT : 100% + *****************************************************************************/ +#include "zxic_common.h" +#include "zxic_comm_rb_tree.h" +#include "zxic_comm_double_link.h" + +ZXIC_SINT32 zxic_comm_rb_def_cmp(ZXIC_VOID *p_new, ZXIC_VOID *p_old, + ZXIC_UINT32 key_size) +{ + return ZXIC_COMM_MEMCMP(p_new, p_old, key_size); +} + +ZXIC_RTN32 zxic_comm_rb_init(ZXIC_RB_CFG *p_rb_cfg, ZXIC_UINT32 total_num, + ZXIC_UINT32 key_size, ZXIC_RB_CMPFUN cmpfun) +{ + ZXIC_UINT32 rtn = ZXIC_OK; + ZXIC_UINT32 malloc_size = 0; + ZXIC_UINT32 memset_size = 0; + + ZXIC_COMM_CHECK_POINT(p_rb_cfg); + if (p_rb_cfg->is_init) { + ZXIC_COMM_TRACE_ERROR("\n zxic_comm_rb_init already init!"); + return ZXIC_OK; + } + + p_rb_cfg->key_size = key_size; + p_rb_cfg->p_root = NULL; + + if (cmpfun) { + p_rb_cfg->p_cmpfun = cmpfun; + } else { + p_rb_cfg->p_cmpfun = zxic_comm_rb_def_cmp; + } + + if (total_num) { + p_rb_cfg->is_dynamic = 0; + + rtn = zxic_comm_double_link_init(total_num, &p_rb_cfg->tn_list); + ZXIC_COMM_CHECK_RC(rtn, "zxic_comm_double_link_init"); + + rtn = zxic_comm_liststack_creat(total_num, &p_rb_cfg->p_lsm); + ZXIC_COMM_CHECK_RC(rtn, "zxic_comm_liststack_creat"); + + p_rb_cfg->p_keybase = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC( + total_num * p_rb_cfg->key_size); + ZXIC_COMM_CHECK_POINT(p_rb_cfg->p_keybase); + memset_size = total_num * p_rb_cfg->key_size; + ZXIC_COMM_MEMSET(p_rb_cfg->p_keybase, 0, memset_size); + + malloc_size = (ZXIC_SIZEOF(ZXIC_RB_TN) * total_num) & + ZXIC_UINT32_MASK; + + p_rb_cfg->p_tnbase = + (ZXIC_RB_TN *)ZXIC_COMM_MALLOC(malloc_size); + ZXIC_COMM_CHECK_POINT(p_rb_cfg->p_tnbase); + ZXIC_COMM_MEMSET(p_rb_cfg->p_tnbase, 0, + total_num * ZXIC_SIZEOF(ZXIC_RB_TN)); + } else { /*totalnum = 0 indicate that customer manage the memory*/ + p_rb_cfg->is_dynamic = 1; + + rtn = zxic_comm_double_link_init(0xFFFFFFFF, + &p_rb_cfg->tn_list); + ZXIC_COMM_CHECK_RC(rtn, "zxic_comm_double_link_init"); + } + p_rb_cfg->is_init = 1; + + return ZXIC_OK; +} + +ZXIC_RTN32 zxic_comm_rb_destroy(ZXIC_RB_CFG *p_rb_cfg) +{ + ZXIC_UINT32 rtn = 0; + + ZXIC_COMM_CHECK_POINT(p_rb_cfg); + /*静态才使用*/ + if (0 == p_rb_cfg->is_dynamic) { + zxic_comm_liststack_destroy(p_rb_cfg->p_lsm); + } + if (NULL != p_rb_cfg->p_keybase) { + ZXIC_COMM_FREE(p_rb_cfg->p_keybase); + p_rb_cfg->p_keybase = NULL; + } + + if (NULL != p_rb_cfg->p_tnbase) { + ZXIC_COMM_FREE(p_rb_cfg->p_tnbase); + p_rb_cfg->p_tnbase = NULL; + } + + ZXIC_COMM_MEMSET(p_rb_cfg, 0, ZXIC_SIZEOF(ZXIC_RB_CFG)); + + return rtn; +} + +ZXIC_VOID zxic_comm_rb_swich_color(ZXIC_RB_TN *p_tn1, ZXIC_RB_TN *p_tn2) +{ + ZXIC_UINT32 color1, color2; + + ZXIC_COMM_CHECK_POINT_NONE(p_tn1); + ZXIC_COMM_CHECK_POINT_NONE(p_tn2); + + color1 = GET_TN_COLOR(p_tn1); + color2 = GET_TN_COLOR(p_tn2); + + SET_TN_COLOR(p_tn1, color2); + SET_TN_COLOR(p_tn2, color1); + + return; +} + +ZXIC_RB_TN *zxic_comm_rb_get_brotn(ZXIC_RB_TN *p_cur_tn) +{ + ZXIC_COMM_CHECK_POINT_RETURN_NULL(p_cur_tn); + ZXIC_COMM_CHECK_POINT_RETURN_NULL(p_cur_tn->p_parent); + + return (p_cur_tn->p_parent->p_left == p_cur_tn) ? + p_cur_tn->p_parent->p_right : + p_cur_tn->p_parent->p_left; +} + +ZXIC_RTN32 zxic_comm_rb_handle_ins(ZXIC_RB_CFG *p_rb_cfg, + ZXIC_RB_TN ***stack_tn, + ZXIC_UINT32 stack_top) +{ + ZXIC_RB_TN **pp_cur_tn = NULL; + ZXIC_RB_TN *p_cur_tn = NULL; + ZXIC_RB_TN **pp_tmp_tn = NULL; + ZXIC_RB_TN *p_tmp_tn = NULL; + + ZXIC_COMM_CHECK_POINT(p_rb_cfg); + ZXIC_COMM_CHECK_POINT(stack_tn); + + while (stack_top > 0) { + pp_cur_tn = stack_tn[stack_top]; + p_cur_tn = *pp_cur_tn; + + if (!p_cur_tn->p_parent) { /*root must be black*/ + SET_TN_COLOR(p_cur_tn, ZXIC_RBT_BLACK); + break; + } else if (GET_TN_COLOR(p_cur_tn->p_parent) == ZXIC_RBT_RED) { + ZXIC_RB_TN *p_unc_tn = + zxic_comm_rb_get_brotn(p_cur_tn->p_parent); + + ZXIC_COMM_ASSERT(p_cur_tn->p_parent == + *stack_tn[stack_top - 1]); + + if (GET_TN_COLOR(p_unc_tn) == + ZXIC_RBT_RED) { /*unc is red,so we change the black of parent and unc*/ + ZXIC_COMM_ASSERT(p_unc_tn); + SET_TN_COLOR(p_cur_tn->p_parent, + ZXIC_RBT_BLACK); + SET_TN_COLOR(p_unc_tn, ZXIC_RBT_BLACK); + + ZXIC_COMM_ASSERT(p_cur_tn->p_parent->p_parent == + *stack_tn[stack_top - 2]); + + SET_TN_COLOR(p_cur_tn->p_parent->p_parent, + ZXIC_RBT_RED); + stack_top -= 2; + } else { /*we need shift ,p_cur_tn->parent->parent*/ + ZXIC_RB_TN *p_bro_tn = NULL; + + pp_tmp_tn = stack_tn[stack_top - 2]; + p_tmp_tn = *pp_tmp_tn; + + if (p_cur_tn->p_parent == p_tmp_tn->p_left && + p_cur_tn == p_cur_tn->p_parent->p_left) { + *pp_tmp_tn = p_cur_tn->p_parent; + + p_bro_tn = zxic_comm_rb_get_brotn( + p_cur_tn); + p_cur_tn->p_parent->p_parent = + p_tmp_tn->p_parent; + + p_tmp_tn->p_left = p_bro_tn; + p_tmp_tn->p_parent = p_cur_tn->p_parent; + p_cur_tn->p_parent->p_right = p_tmp_tn; + + if (p_bro_tn) { + p_bro_tn->p_parent = p_tmp_tn; + } + + zxic_comm_rb_swich_color(*pp_tmp_tn, + p_tmp_tn); + } else if (p_cur_tn->p_parent == + p_tmp_tn->p_left && + p_cur_tn == + p_cur_tn->p_parent->p_right) { + *pp_tmp_tn = p_cur_tn; + + p_cur_tn->p_parent->p_right = + p_cur_tn->p_left; + + if (p_cur_tn->p_left) { + p_cur_tn->p_left->p_parent = + p_cur_tn->p_parent; + } + + p_cur_tn->p_parent->p_parent = p_cur_tn; + p_tmp_tn->p_left = p_cur_tn->p_right; + + if (p_cur_tn->p_right) { + p_cur_tn->p_right->p_parent = + p_tmp_tn; + } + + p_cur_tn->p_left = p_cur_tn->p_parent; + p_cur_tn->p_right = p_tmp_tn; + + p_cur_tn->p_parent = p_tmp_tn->p_parent; + p_tmp_tn->p_parent = p_cur_tn; + + zxic_comm_rb_swich_color(*pp_tmp_tn, + p_tmp_tn); + } else if (p_cur_tn->p_parent == + p_tmp_tn->p_right && + p_cur_tn == + p_cur_tn->p_parent->p_right) { + *pp_tmp_tn = p_cur_tn->p_parent; + p_bro_tn = zxic_comm_rb_get_brotn( + p_cur_tn); + + p_cur_tn->p_parent->p_parent = + p_tmp_tn->p_parent; + + p_tmp_tn->p_right = + p_cur_tn->p_parent->p_left; + p_tmp_tn->p_parent = p_cur_tn->p_parent; + p_cur_tn->p_parent->p_left = p_tmp_tn; + + if (p_bro_tn) { + p_bro_tn->p_parent = p_tmp_tn; + } + + zxic_comm_rb_swich_color(*pp_tmp_tn, + p_tmp_tn); + } else { + *pp_tmp_tn = p_cur_tn; + p_cur_tn->p_parent->p_left = + p_cur_tn->p_right; + + if (p_cur_tn->p_right) { + p_cur_tn->p_right->p_parent = + p_cur_tn->p_parent; + } + + p_cur_tn->p_parent->p_parent = p_cur_tn; + p_tmp_tn->p_right = p_cur_tn->p_left; + + if (p_cur_tn->p_left) { + p_cur_tn->p_left->p_parent = + p_tmp_tn; + } + + p_cur_tn->p_right = p_cur_tn->p_parent; + p_cur_tn->p_left = p_tmp_tn; + + p_cur_tn->p_parent = p_tmp_tn->p_parent; + p_tmp_tn->p_parent = p_cur_tn; + + zxic_comm_rb_swich_color(*pp_tmp_tn, + p_tmp_tn); + } + + /*change color*/ + + /* SET_TN_COLOR(p_cur_tn->p_parent,ZXIC_RBT_BLACK); + SET_TN_COLOR(p_tmp_tn,ZXIC_RBT_RED); */ + break; + } + } else { /*parent is black ,nothing to do ,end*/ + break; + } + } + + return ZXIC_OK; +} + +ZXIC_RTN32 zxic_comm_rb_insert(ZXIC_RB_CFG *p_rb_cfg, ZXIC_VOID *p_key, + ZXIC_VOID *out_val) +{ + ZXIC_UINT32 rtn = 0; + ZXIC_UINT32 stack_top = 1; + ZXIC_SINT32 cmprtn = 0; + ZXIC_UINT32 lsm_out = 0; + + ZXIC_RB_TN **stack_tn[ZXIC_RBT_MAX_DEPTH] = { 0 }; + //ZXIC_RB_TN **pp_tmp_tn = NULL; + ZXIC_RB_TN *p_cur_tn = NULL; + ZXIC_RB_TN *p_pre_tn = NULL; + ZXIC_RB_TN **pp_cur_tn = NULL; + ZXIC_VOID *p_cur_key = NULL; + ZXIC_RB_TN *p_ins_tn = p_key; + + ZXIC_COMM_CHECK_POINT(p_rb_cfg); + ZXIC_COMM_CHECK_POINT(p_key); + + p_cur_key = p_rb_cfg->is_dynamic ? ((ZXIC_RB_TN *)p_key)->p_key : p_key; + + pp_cur_tn = &p_rb_cfg->p_root; + + for (;;) { + p_cur_tn = *pp_cur_tn; + + if (!p_cur_tn) { /*find the insert position*/ + if (p_rb_cfg->is_dynamic == 0) { + rtn = zxic_comm_liststack_alloc(p_rb_cfg->p_lsm, + &lsm_out); + + if (rtn == ZXIC_LIST_STACK_ISEMPTY_ERR) { + return ZXIC_RBT_RC_FULL; + } + + ZXIC_COMM_CHECK_RC(rtn, + "zxic_comm_liststack_alloc"); + + p_ins_tn = p_rb_cfg->p_tnbase + lsm_out; + + ZXIC_COMM_CHECK_INDEX_MUL_OVERFLOW( + p_rb_cfg->key_size, lsm_out); + INIT_RBT_TN(p_ins_tn, + p_rb_cfg->key_size * lsm_out + + p_rb_cfg->p_keybase); + + ZXIC_COMM_MEMCPY_S(p_ins_tn->p_key, + p_rb_cfg->key_size, p_key, + p_rb_cfg->key_size); + + SET_TN_LSV(p_ins_tn, lsm_out); + + if (out_val) { + *((ZXIC_UINT32 *)out_val) = lsm_out; + } + } else { + INIT_D_NODE(&p_ins_tn->tn_ln, p_ins_tn); + } + + /*all insert tn color set to red*/ + SET_TN_COLOR(p_ins_tn, ZXIC_RBT_RED); + + /*insert list*/ + if (cmprtn < 0) { + rtn = zxic_comm_double_link_insert_pre( + &p_ins_tn->tn_ln, &p_pre_tn->tn_ln, + &p_rb_cfg->tn_list); + ZXIC_COMM_CHECK_RC( + rtn, + "zxic_comm_double_link_insert_pre"); + } else if (cmprtn > 0) { + rtn = zxic_comm_double_link_insert_aft( + &p_ins_tn->tn_ln, &p_pre_tn->tn_ln, + &p_rb_cfg->tn_list); + ZXIC_COMM_CHECK_RC( + rtn, + "zxic_comm_double_link_insert_aft"); + } else { + /*first insert*/ + ZXIC_COMM_ASSERT(!p_pre_tn); + + rtn = zxic_comm_double_link_insert_1st( + &p_ins_tn->tn_ln, &p_rb_cfg->tn_list); + ZXIC_COMM_CHECK_RC( + rtn, + "zxic_comm_double_link_insert_1st"); + } + + /*get out loop */ + break; + } + + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW(stack_top, 1); + stack_tn[stack_top++] = pp_cur_tn; + p_pre_tn = p_cur_tn; + cmprtn = p_rb_cfg->p_cmpfun(p_cur_key, p_cur_tn->p_key, + p_rb_cfg->key_size); + + if (cmprtn > 0) { + pp_cur_tn = &p_cur_tn->p_right; + } else if (cmprtn < 0) { + pp_cur_tn = &p_cur_tn->p_left; + } else { + ZXIC_COMM_TRACE_ALL("info ,rb_key is same \n"); + + if (p_rb_cfg->is_dynamic) { + if (out_val) { + *((ZXIC_RB_TN **)out_val) = p_cur_tn; + } + } else { + if (out_val) { + *((ZXIC_UINT32 *)out_val) = + GET_TN_LSV(p_cur_tn); + } + } + + return ZXIC_RBT_RC_UPDATE; + } + } + + /*handle parenet ptr*/ + //pp_tmp_tn = stack_tn[stack_top - 1]; + + /*p_ins_tn->p_parent = stack_top != 1 ? *pp_tmp_tn : NULL;*/ + p_ins_tn->p_parent = (stack_top != 1) ? *stack_tn[stack_top - 1] : NULL; + + stack_tn[stack_top] = pp_cur_tn; + + *pp_cur_tn = p_ins_tn; + + rtn = zxic_comm_rb_handle_ins(p_rb_cfg, stack_tn, stack_top); + ZXIC_COMM_CHECK_RC(rtn, "zxic_comm_rb_handle_ins"); + + /* 新插入时也返回 */ + if (p_rb_cfg->is_dynamic) { + if (out_val) { + *((ZXIC_RB_TN **)out_val) = p_ins_tn; + } + } + + return ZXIC_OK; +} + +ZXIC_RTN32 zxic_comm_rb_handle_del(ZXIC_RB_CFG *p_rb_cfg, + ZXIC_RB_TN ***stack_tn, + ZXIC_UINT32 stack_top) +{ + ZXIC_RB_TN **pp_cur_tn = NULL; + ZXIC_RB_TN *p_cur_tn = NULL; + ZXIC_RB_TN *p_tmp_tn = NULL; + ZXIC_RB_TN *p_unc_tn = NULL; + ZXIC_RB_TN *p_par_tn = NULL; + + ZXIC_COMM_CHECK_POINT(p_rb_cfg); + ZXIC_COMM_CHECK_POINT(stack_tn); + + while (stack_top > 1) { + pp_cur_tn = stack_tn[stack_top]; + p_cur_tn = *pp_cur_tn; + + p_par_tn = *stack_tn[stack_top - 1]; + + if (p_cur_tn && p_cur_tn->p_parent) { + p_unc_tn = zxic_comm_rb_get_brotn(p_cur_tn); + } else if (p_cur_tn && !p_cur_tn->p_parent) { + ZXIC_COMM_ASSERT(p_par_tn == p_cur_tn->p_parent); + + SET_TN_COLOR(p_cur_tn, ZXIC_RBT_BLACK); + + break; + } else { + ZXIC_COMM_ASSERT(!p_cur_tn); + + if (p_par_tn) { + p_unc_tn = p_par_tn->p_left ? p_par_tn->p_left : + p_par_tn->p_right; + } else { + break; + } + } + + if (p_unc_tn) { + ZXIC_COMM_ASSERT(p_unc_tn->p_parent == p_par_tn); + } + + if (GET_TN_COLOR(p_unc_tn) == ZXIC_RBT_RED) { /*shift */ + ZXIC_COMM_CHECK_INDEX_BOTH(stack_top, 1, + (ZXIC_RBT_MAX_DEPTH - 2)); + if (p_unc_tn == p_par_tn->p_left) { /*shift right */ + *stack_tn[stack_top - 1] = p_unc_tn; + p_unc_tn->p_parent = p_par_tn->p_parent; + p_par_tn->p_left = p_unc_tn->p_right; + + if (p_unc_tn->p_right) { + p_unc_tn->p_right->p_parent = p_par_tn; + } + + p_par_tn->p_parent = p_unc_tn; + p_unc_tn->p_right = p_par_tn; + + stack_tn[stack_top++] = &p_unc_tn->p_right; + ZXIC_COMM_CHECK_INDEX_UPPER( + stack_top, (ZXIC_RBT_MAX_DEPTH - 1)); + stack_tn[stack_top] = &p_par_tn->p_right; + } else { /*shift left*/ + ZXIC_COMM_ASSERT(p_unc_tn == p_par_tn->p_right); + *stack_tn[stack_top - 1] = p_unc_tn; + p_unc_tn->p_parent = p_par_tn->p_parent; + p_par_tn->p_right = p_unc_tn->p_left; + + if (p_unc_tn->p_left) { + p_unc_tn->p_left->p_parent = p_par_tn; + } + + p_par_tn->p_parent = p_unc_tn; + p_unc_tn->p_left = p_par_tn; + + stack_tn[stack_top++] = &p_unc_tn->p_left; + ZXIC_COMM_CHECK_INDEX_UPPER( + stack_top, (ZXIC_RBT_MAX_DEPTH - 1)); + stack_tn[stack_top] = &p_par_tn->p_left; + } + + zxic_comm_rb_swich_color(p_unc_tn, p_par_tn); + } else if (!p_unc_tn) { + /*this branch will never run ,consider too much*/ + ZXIC_COMM_ASSERT(0); + ZXIC_COMM_ASSERT(GET_TN_COLOR(p_par_tn) == + ZXIC_RBT_RED); + + SET_TN_COLOR(p_par_tn, ZXIC_RBT_BLACK); + + break; + } else { + if (GET_TN_COLOR(p_unc_tn->p_left) == ZXIC_RBT_BLACK && + GET_TN_COLOR(p_unc_tn->p_right) == ZXIC_RBT_BLACK) { + if (GET_TN_COLOR(p_unc_tn->p_parent) == + ZXIC_RBT_BLACK) { + SET_TN_COLOR(p_unc_tn, ZXIC_RBT_RED); + stack_top--; + } else { + ZXIC_COMM_ASSERT( + GET_TN_COLOR( + p_unc_tn->p_parent) == + ZXIC_RBT_RED); + + zxic_comm_rb_swich_color( + p_unc_tn->p_parent, p_unc_tn); + + break; + } + } else if (p_unc_tn == p_par_tn->p_right) { + if (GET_TN_COLOR(p_unc_tn->p_right) == + ZXIC_RBT_RED) { /*shift left*/ + *stack_tn[stack_top - 1] = p_unc_tn; + p_unc_tn->p_parent = p_par_tn->p_parent; + p_par_tn->p_right = p_unc_tn->p_left; + + if (p_unc_tn->p_left) { + p_unc_tn->p_left->p_parent = + p_par_tn; + } + + p_par_tn->p_parent = p_unc_tn; + p_unc_tn->p_left = p_par_tn; + + zxic_comm_rb_swich_color(p_unc_tn, + p_par_tn); + + SET_TN_COLOR(p_unc_tn->p_right, + ZXIC_RBT_BLACK); + + break; + } else { + ZXIC_COMM_ASSERT( + GET_TN_COLOR( + p_unc_tn->p_left) == + ZXIC_RBT_RED); + + p_tmp_tn = p_unc_tn->p_left; + + p_par_tn->p_right = p_tmp_tn; + p_tmp_tn->p_parent = p_par_tn; + p_unc_tn->p_left = p_tmp_tn->p_right; + + if (p_tmp_tn->p_right) { + p_tmp_tn->p_right->p_parent = + p_unc_tn; + } + + p_tmp_tn->p_right = p_unc_tn; + p_unc_tn->p_parent = p_tmp_tn; + + zxic_comm_rb_swich_color(p_tmp_tn, + p_unc_tn); + } + } else { + ZXIC_COMM_ASSERT(p_unc_tn == p_par_tn->p_left); + + if (GET_TN_COLOR(p_unc_tn->p_left) == + ZXIC_RBT_RED) { /*shift right*/ + *stack_tn[stack_top - 1] = p_unc_tn; + p_unc_tn->p_parent = p_par_tn->p_parent; + p_par_tn->p_left = p_unc_tn->p_right; + + if (p_unc_tn->p_right) { + p_unc_tn->p_right->p_parent = + p_par_tn; + } + + p_par_tn->p_parent = p_unc_tn; + p_unc_tn->p_right = p_par_tn; + + zxic_comm_rb_swich_color(p_unc_tn, + p_par_tn); + + SET_TN_COLOR(p_unc_tn->p_left, + ZXIC_RBT_BLACK); + break; + } else { + ZXIC_COMM_ASSERT( + GET_TN_COLOR( + p_unc_tn->p_right) == + ZXIC_RBT_RED); + + p_tmp_tn = p_unc_tn->p_right; + + p_par_tn->p_left = p_tmp_tn; + p_tmp_tn->p_parent = p_par_tn; + p_unc_tn->p_right = p_tmp_tn->p_left; + + if (p_tmp_tn->p_left) { + p_tmp_tn->p_left->p_parent = + p_unc_tn; + } + + p_tmp_tn->p_left = p_unc_tn; + p_unc_tn->p_parent = p_tmp_tn; + + zxic_comm_rb_swich_color(p_tmp_tn, + p_unc_tn); + } + } + } + } + + return ZXIC_OK; +} + +ZXIC_RTN32 zxic_comm_rb_delete(ZXIC_RB_CFG *p_rb_cfg, ZXIC_VOID *p_key, + ZXIC_VOID *out_val) +{ + ZXIC_UINT32 rtn = 0; + ZXIC_UINT32 stack_top = 1; + ZXIC_SINT32 cmprtn = 0; + ZXIC_UINT32 rsv_stack = 0; + ZXIC_UINT32 del_is_red = 0; + + ZXIC_RB_TN **stack_tn[ZXIC_RBT_MAX_DEPTH] = { 0 }; + ZXIC_RB_TN *p_cur_tn = NULL; + ZXIC_RB_TN **pp_cur_tn = NULL; + ZXIC_VOID *p_cur_key = NULL; + ZXIC_RB_TN *p_rsv_tn = NULL; + ZXIC_RB_TN *p_del_tn = NULL; + + ZXIC_COMM_CHECK_POINT(p_rb_cfg); + ZXIC_COMM_CHECK_POINT(out_val); + + p_cur_key = p_key; + + pp_cur_tn = &p_rb_cfg->p_root; + + for (;;) { + p_cur_tn = *pp_cur_tn; + + if (!p_cur_tn) { + /*ZXIC_COMM_TRACE_ERROR("\n error ,the key is not exist !");*/ + return ZXIC_RBT_RC_SRHFAIL; + } + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW(stack_top, 1); + stack_tn[stack_top++] = pp_cur_tn; + + cmprtn = p_rb_cfg->p_cmpfun(p_cur_key, p_cur_tn->p_key, + p_rb_cfg->key_size); + + if (cmprtn > 0) { + pp_cur_tn = &p_cur_tn->p_right; + } else if (cmprtn < 0) { + pp_cur_tn = &p_cur_tn->p_left; + } else { + ZXIC_COMM_TRACE_ALL(" find the key!\n"); + + break; + } + } + ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW(stack_top, 1); + rsv_stack = stack_top - 1; /*save stack pos*/ + p_rsv_tn = p_cur_tn; + + pp_cur_tn = &p_cur_tn->p_right; + p_cur_tn = *pp_cur_tn; + + if (p_cur_tn) { + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW(stack_top, 1); + stack_tn[stack_top++] = pp_cur_tn; + + pp_cur_tn = &p_cur_tn->p_left; + p_cur_tn = *pp_cur_tn; + + while (p_cur_tn) { + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW(stack_top, 1); + stack_tn[stack_top++] = pp_cur_tn; + pp_cur_tn = &p_cur_tn->p_left; + p_cur_tn = *pp_cur_tn; + } + + /*get the del tn*/ + ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW(stack_top, 1); + p_del_tn = *stack_tn[stack_top - 1]; + + /*set tn is left child to cur place*/ + *stack_tn[stack_top - 1] = p_del_tn->p_right; + + if (p_del_tn->p_right) { + p_del_tn->p_right->p_parent = p_del_tn->p_parent; + } + + /*rsv the del tn info for delete*/ + if (GET_TN_COLOR(p_del_tn) == ZXIC_RBT_RED) { + del_is_red = 1; + } + + /*replace the delete val*/ + ZXIC_COMM_CHECK_INDEX_UPPER(rsv_stack, + (ZXIC_RBT_MAX_DEPTH - 2)); + *stack_tn[rsv_stack] = p_del_tn; + + stack_tn[rsv_stack + 1] = &p_del_tn->p_right; + + SET_TN_COLOR(p_del_tn, GET_TN_COLOR(p_rsv_tn)); + p_del_tn->p_parent = p_rsv_tn->p_parent; + + p_del_tn->p_left = p_rsv_tn->p_left; + + if (p_rsv_tn->p_left) { + p_rsv_tn->p_left->p_parent = p_del_tn; + } + + p_del_tn->p_right = p_rsv_tn->p_right; + + if (p_rsv_tn->p_right) { + p_rsv_tn->p_right->p_parent = p_del_tn; + } + } else { + if (GET_TN_COLOR(p_rsv_tn) == ZXIC_RBT_RED) { + del_is_red = 1; + } + + ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW(stack_top, 1); + *stack_tn[stack_top - 1] = p_rsv_tn->p_left; + + if (p_rsv_tn->p_left) { + p_rsv_tn->p_left->p_parent = p_rsv_tn->p_parent; + } + } + + ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW(stack_top, 1); + stack_top--; + ZXIC_COMM_CHECK_INDEX_UPPER(stack_top, (ZXIC_RBT_MAX_DEPTH - 1)); + if (GET_TN_COLOR(*stack_tn[stack_top]) == ZXIC_RBT_RED) { + SET_TN_COLOR(*stack_tn[stack_top], ZXIC_RBT_BLACK); + } else if (!del_is_red) { /*del node is red ,do nothing*/ + rtn = zxic_comm_rb_handle_del(p_rb_cfg, stack_tn, stack_top); + ZXIC_COMM_CHECK_RC(rtn, "zxic_comm_rb_handle_del"); + } + + /*clear the node from the list */ + rtn = zxic_comm_double_link_del(&p_rsv_tn->tn_ln, &p_rb_cfg->tn_list); + ZXIC_COMM_CHECK_RC(rtn, "zxic_comm_double_link_del"); + + if (p_rb_cfg->is_dynamic) { + *(ZXIC_RB_TN **)out_val = p_rsv_tn; + } else { + rtn = zxic_comm_liststack_free(p_rb_cfg->p_lsm, + GET_TN_LSV(p_rsv_tn)); + ZXIC_COMM_CHECK_RC(rtn, "zxic_comm_liststack_free"); + + *(ZXIC_UINT32 *)out_val = GET_TN_LSV(p_rsv_tn); + + ZXIC_COMM_MEMSET(p_rsv_tn->p_key, 0, p_rb_cfg->key_size); + ZXIC_COMM_MEMSET(p_rsv_tn, 0, ZXIC_SIZEOF(ZXIC_RB_TN)); + } + + return ZXIC_OK; +} + +/* 注意此处传入的p_key和insert时候传入的p_key不是同一个东西 */ +ZXIC_RTN32 zxic_comm_rb_search(ZXIC_RB_CFG *p_rb_cfg, ZXIC_VOID *p_key, + ZXIC_VOID *out_val) +{ + ZXIC_SINT32 cmprtn = 0; + ZXIC_RB_TN *p_cur_tn = NULL; + + ZXIC_COMM_CHECK_POINT(p_rb_cfg); + ZXIC_COMM_CHECK_POINT(p_key); + ZXIC_COMM_CHECK_POINT(out_val); + + p_cur_tn = p_rb_cfg->p_root; + + while (p_cur_tn) { + cmprtn = p_rb_cfg->p_cmpfun(p_key, p_cur_tn->p_key, + p_rb_cfg->key_size); + + if (cmprtn > 0) { + p_cur_tn = p_cur_tn->p_right; + } else if (cmprtn < 0) { + p_cur_tn = p_cur_tn->p_left; + } else { + break; + } + } + + if (!p_cur_tn) { + ZXIC_COMM_TRACE_ALL("rb srh fail \n"); + return ZXIC_RBT_RC_SRHFAIL; + } + + if (p_rb_cfg->is_dynamic) { + *(ZXIC_RB_TN **)out_val = p_cur_tn; + } else { + *(ZXIC_UINT32 *)out_val = GET_TN_LSV(p_cur_tn); + } + + return ZXIC_OK; +} + +ZXIC_SINT32 zxic_comm_rb_is_none(ZXIC_RB_CFG *p_rb_cfg) +{ + ZXIC_COMM_CHECK_POINT(p_rb_cfg); + + if (0 == p_rb_cfg->tn_list.used) { + return 1; + } else { + return 0; + } +} + +ZXIC_RB_TN *zxic_comm_rb_get_1st_tn(ZXIC_RB_CFG *p_rb_cfg) +{ + ZXIC_COMM_CHECK_POINT_RETURN_NULL(p_rb_cfg); + + return (p_rb_cfg->p_root) ? p_rb_cfg->tn_list.p_next->data : NULL; +} + +ZXIC_RTN32 zxic_comm_rb_get_1st_key(ZXIC_RB_CFG *p_rb_cfg, ZXIC_VOID *p_key_out) +{ + D_NODE *rb_list_node = NULL; + ZXIC_RB_TN *p_rb_node = NULL; + + ZXIC_COMM_CHECK_POINT(p_rb_cfg); + ZXIC_COMM_CHECK_POINT(p_key_out); + + if (zxic_comm_rb_is_none(p_rb_cfg)) { + return ZXIC_RBT_ISEMPTY_ERR; + } + + rb_list_node = p_rb_cfg->tn_list.p_next; + ZXIC_COMM_CHECK_POINT(rb_list_node); + + p_rb_node = (ZXIC_RB_TN *)rb_list_node->data; + ZXIC_COMM_CHECK_POINT(p_rb_node); + + ZXIC_COMM_MEMCPY_S(p_key_out, p_rb_cfg->key_size, p_rb_node->p_key, + p_rb_cfg->key_size); + + return ZXIC_OK; +} + +ZXIC_RB_TN *zxic_comm_rb_get_last_tn(ZXIC_RB_CFG *p_rb_cfg) +{ + ZXIC_COMM_CHECK_POINT_RETURN_NULL(p_rb_cfg); + + return (p_rb_cfg->p_root) ? p_rb_cfg->tn_list.p_prev->data : NULL; +} + +ZXIC_RTN32 zxic_comm_rb_get_last_key(ZXIC_RB_CFG *p_rb_cfg, + ZXIC_VOID *p_key_out) +{ + D_NODE *p_rb_list_node = NULL; + ZXIC_RB_TN *p_rb_node = NULL; + + ZXIC_COMM_CHECK_POINT(p_rb_cfg); + + if (zxic_comm_rb_is_none(p_rb_cfg)) { + return ZXIC_RBT_ISEMPTY_ERR; + } + + p_rb_list_node = p_rb_cfg->tn_list.p_prev; + + p_rb_node = (ZXIC_RB_TN *)p_rb_list_node->data; + ZXIC_COMM_MEMCPY_S(p_key_out, p_rb_cfg->key_size, p_rb_node->p_key, + p_rb_cfg->key_size); + + return ZXIC_OK; +} + +/* 不支持动态内存分配模式, 此函数仅用于CPU软复位 */ +ZXIC_RTN32 zxic_comm_rb_insert_spec_index(ZXIC_RB_CFG *p_rb_cfg, + ZXIC_VOID *p_key, ZXIC_UINT32 in_idx) +{ + ZXIC_UINT32 rtn = 0; + ZXIC_UINT32 stack_top = 1; + ZXIC_SINT32 cmprtn = 0; + + ZXIC_RB_TN **stack_tn[ZXIC_RBT_MAX_DEPTH] = { 0 }; + //ZXIC_RB_TN **pp_tmp_tn = NULL; + ZXIC_RB_TN *p_cur_tn = NULL; + ZXIC_RB_TN *p_pre_tn = NULL; + ZXIC_RB_TN **pp_cur_tn = NULL; + ZXIC_VOID *p_cur_key = NULL; + ZXIC_RB_TN *p_ins_tn = p_key; + + ZXIC_COMM_CHECK_POINT(p_rb_cfg); + ZXIC_COMM_CHECK_POINT(p_key); + + if (p_rb_cfg->is_dynamic) { + ZXIC_COMM_PRINT( + "zxic_comm_rb_insert_spec_index: dynamic mode is not support ! Error"); + return ZXIC_RBT_PARA_INVALID; + } + + p_cur_key = p_key; + + pp_cur_tn = &p_rb_cfg->p_root; + + for (;;) { + p_cur_tn = *pp_cur_tn; + + if (!p_cur_tn) { /*find the insert position*/ + rtn = zxic_comm_liststack_alloc_spec_index( + p_rb_cfg->p_lsm, in_idx); + ZXIC_COMM_CHECK_RC( + rtn, "zxic_comm_liststack_alloc_spec_index"); + + p_ins_tn = p_rb_cfg->p_tnbase + in_idx; + + ZXIC_COMM_CHECK_INDEX_MUL_OVERFLOW(p_rb_cfg->key_size, + in_idx); + INIT_RBT_TN(p_ins_tn, p_rb_cfg->key_size * in_idx + + p_rb_cfg->p_keybase); + + ZXIC_COMM_MEMCPY_S(p_ins_tn->p_key, p_rb_cfg->key_size, + p_key, p_rb_cfg->key_size); + + SET_TN_LSV(p_ins_tn, in_idx); + + /*all insert tn color set to red*/ + SET_TN_COLOR(p_ins_tn, ZXIC_RBT_RED); + + /*insert list*/ + if (cmprtn < 0) { + rtn = zxic_comm_double_link_insert_pre( + &p_ins_tn->tn_ln, &p_pre_tn->tn_ln, + &p_rb_cfg->tn_list); + ZXIC_COMM_CHECK_RC( + rtn, + "zxic_comm_double_link_insert_pre"); + } else if (cmprtn > 0) { + rtn = zxic_comm_double_link_insert_aft( + &p_ins_tn->tn_ln, &p_pre_tn->tn_ln, + &p_rb_cfg->tn_list); + ZXIC_COMM_CHECK_RC( + rtn, + "zxic_comm_double_link_insert_aft"); + } else { + /*first insert*/ + ZXIC_COMM_ASSERT(!p_pre_tn); + + rtn = zxic_comm_double_link_insert_1st( + &p_ins_tn->tn_ln, &p_rb_cfg->tn_list); + ZXIC_COMM_CHECK_RC( + rtn, + "zxic_comm_double_link_insert_1st"); + } + + /*get out loop */ + break; + } + + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW(stack_top, 1); + stack_tn[stack_top++] = pp_cur_tn; + p_pre_tn = p_cur_tn; + cmprtn = p_rb_cfg->p_cmpfun(p_cur_key, p_cur_tn->p_key, + p_rb_cfg->key_size); + + if (cmprtn > 0) { + pp_cur_tn = &p_cur_tn->p_right; + } else if (cmprtn < 0) { + pp_cur_tn = &p_cur_tn->p_left; + } else { + ZXIC_COMM_TRACE_ALL("info ,rb_key is same \n"); + + return ZXIC_RBT_RC_UPDATE; + } + } + + /*handle parenet ptr*/ + p_ins_tn->p_parent = (stack_top != 1) ? *stack_tn[stack_top - 1] : NULL; + + stack_tn[stack_top] = pp_cur_tn; + + *pp_cur_tn = p_ins_tn; + + rtn = zxic_comm_rb_handle_ins(p_rb_cfg, stack_tn, stack_top); + ZXIC_COMM_CHECK_RC(rtn, "zxic_comm_rb_handle_ins"); + + return ZXIC_OK; +} diff --git a/drivers/net/ethernet/dinghai/en_np/comm/source/zxic_common.c b/drivers/net/ethernet/dinghai/en_np/comm/source/zxic_common.c new file mode 100644 index 000000000000..91691a7cd11b --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/comm/source/zxic_common.c @@ -0,0 +1,1052 @@ +/************************************************************** +* 版权所有 (C)2013-2020, 深圳市中兴通讯股份有限公司 +* 文件名称 : zxic_common.c +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : xuchenxi_10235594 +* 完成日期 : 2020/07/20 +* DEPARTMENT: 有线开发四部-系统软件团队 +* MANUAL_PERCENT: 0% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ +#include "zxic_common.h" + +#if ZXIC_REAL("全局变量") +ZXIC_UINT32 g_zxic_malloc_num; +ZXIC_UINT32 g_zxic_malloc_size; /* 单位字节 */ +ZXIC_UINT32 g_zxic_vmalloc_num; +ZXIC_UINT32 g_zxic_vmalloc_size; /* 单位字节 */ +ZXIC_UINT32 g_zxic_byte_swap_en = 1; +ZXIC_UINT32 g_zxic_comm_channel_max = 4; +#endif + +#if ZXIC_REAL("内存") +/***********************************************************/ +/** 释放内存 +* @param p_data +* +* @return +* @remark 无 +* @see +* @author ChenWei10088471 @date 2014/02/07 +************************************************************/ +ZXIC_VOID ic_comm_free_record(void) +{ + if (g_zxic_malloc_num > 0) { + g_zxic_malloc_num--; + } else { + ZXIC_COMM_TRACE_ERROR("Note:g_zxicp_malloc_num is zero now\n"); + } +} + +/***********************************************************/ +/** 释放内存 +* @param p_data +* +* @return +* @remark 无 +* @see +* @author cq @date 2025/06/30 +************************************************************/ +ZXIC_VOID ic_comm_vfree_record(void) +{ + if (g_zxic_vmalloc_num > 0) { + g_zxic_vmalloc_num--; + } else { + ZXIC_COMM_TRACE_ERROR("Note:g_zxicp_vmalloc_num is zero now\n"); + } +} + +/***********************************************************/ +/** 分配内存 +* @param size +* +* @return +* @remark 无 +* @see +* @author ChenWei10088471 @date 2014/02/07 +************************************************************/ +ZXIC_VOID *ic_comm_malloc_memory(ZXIC_UINT32 size) +{ + /* 独立安全测评 限定申请内存的大小 */ + if (size > ZXIC_MALLOC_MAX_B_SIZE) { + ZXIC_COMM_TRACE_ERROR( + "malloc size err, size more than 200M \n"); + return ZXIC_NULL; + } + if (g_zxic_malloc_num < ZXIC_UINT32_MAX) { + g_zxic_malloc_num++; + } else { + ZXIC_COMM_TRACE_ERROR( + "Note:g_zxicp_malloc_num is maxvalue now, reset 0\n"); + g_zxic_malloc_num = 0; + } + + if (g_zxic_malloc_size < (ZXIC_UINT32_MAX - size)) { + g_zxic_malloc_size += size; + } else { + ZXIC_COMM_TRACE_INFO( + "Note:g_zxic_malloc_size[0x%x] and size[0x%x] sum is over maxvalue now, reset 0\n", + g_zxic_malloc_size, size); + g_zxic_malloc_size = 0; + } + + return kmalloc(size, GFP_KERNEL); +} + +/***********************************************************/ +/** 分配内存 +* @param size +* +* @return +* @remark 无 +* @see +* @author cq @date 2025/06/30 +************************************************************/ +ZXIC_VOID *ic_comm_vmalloc_memory(ZXIC_UINT32 size) +{ + /* 独立安全测评 限定申请内存的大小 */ + if (size > ZXIC_MALLOC_MAX_B_SIZE) { + ZXIC_COMM_TRACE_ERROR( + "malloc size err, size more than 200M \n"); + return ZXIC_NULL; + } + if (g_zxic_vmalloc_num < ZXIC_UINT32_MAX) { + g_zxic_vmalloc_num++; + } else { + ZXIC_COMM_TRACE_ERROR( + "Note:g_zxicp_vmalloc_num is maxvalue now, reset 0\n"); + g_zxic_vmalloc_num = 0; + } + + if (g_zxic_vmalloc_size < (ZXIC_UINT32_MAX - size)) { + g_zxic_vmalloc_size += size; + } else { + ZXIC_COMM_TRACE_INFO( + "Note:g_zxic_vmalloc_size[0x%x] and size[0x%x] sum is over maxvalue now, reset 0\n", + g_zxic_vmalloc_size, size); + g_zxic_vmalloc_size = 0; + } + + return vmalloc(size); +} +#endif /* 内存 */ + +#if ZXIC_REAL("延时") +/***********************************************************/ +/** 毫秒级延时 +* @param milliseconds +* +* @return +* @remark 无 +* @see +* @author ChenWei10088471 @date 2014/02/07 +************************************************************/ +// ZXIC_VOID zxic_comm_sleep(ZXIC_UINT32 milliseconds) +// { +// /* 打桩测试不需要延时 modify by zhangjintao 2022.01.21 */ +// #ifndef ZXIC_FOR_LLT +// #ifdef ZXIC_OS_WIN +// Sleep(milliseconds); +// #else +// ZXIC_COMM_CHECK_INDEX_MUL_OVERFLOW_NONE((ZXIC_UINT32)milliseconds, 1000); + +// msleep(milliseconds); +// #endif +// #endif +// } + +/***********************************************************/ +/** 微秒级延时,互坼锁中使用 +* @param milliseconds +* +* @return +* @remark +* @see +* @author fyl @date 2020/04/09 +************************************************************/ +ZXIC_VOID zxic_comm_udelay(ZXIC_UINT32 microseconds) +{ +#ifndef ZXIC_FOR_LLT + + ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW_NONE(microseconds, 1); + + udelay(microseconds); +#endif +} + +/***********************************************************/ +/** 毫秒级延时,互坼锁中使用 +* @param milliseconds +* +* @return +* @remark 无 +* @see +* @author ChenWei10088471 @date 2014/02/07 +************************************************************/ +ZXIC_VOID zxic_comm_delay(ZXIC_UINT32 milliseconds) +{ +#ifndef ZXIC_FOR_LLT + // ZXIC_UINT32 i = 0; + + ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW_RETURN(milliseconds, 1); + // while (--milliseconds != 0) + // { + // for (i = 0; i < 600; i++); + // } + mdelay(milliseconds); +#endif +} + +/***********************************************************/ +/** LINUX :毫秒级延时 + WINDOWS:毫秒级延时 +* @param millisecond +* +* @return +* @remark 无 +* @see +* @author ChenWei10088471 @date 2014/02/07 +************************************************************/ +ZXIC_VOID zxic_comm_msleep(ZXIC_UINT32 millisecond) +{ +#ifndef ZXIC_FOR_LLT +#ifdef ZXIC_OS_WIN + Sleep(millisecond); +#else + msleep(millisecond); +#endif +#endif +} + +/***********************************************************/ +/**获取时间函数,毫秒 +* @param total +* @param masklen +* +* @return +* @remark 无 +* @see +* @author ChenWei10088471 @date 2014/02/07 +************************************************************/ +ZXIC_DOUBLE zxic_comm_get_ticks_ms(void) +{ +#ifdef ZXIC_OS_WIN + return (ZXIC_FLOAT)GetTickCount(); +#else + struct timespec64 tv = { 0 }; + get_timespec64(&tv, ZXIC_NULL); + return (ZXIC_DOUBLE)1000 * tv.tv_sec + (ZXIC_DOUBLE)tv.tv_nsec / 1000; +#endif +} +#endif + +#if ZXIC_REAL("字节序") +/***********************************************************/ +/** 判断CPU的大小端字节序 +* @param ZXIC_VOID +* +* @return 0-小端;1-大端 +* @remark 无 +* @see +* @author ChenWei10088471 @date 2014/02/07 +************************************************************/ +ZXIC_RTN32 zxic_comm_is_big_endian(ZXIC_VOID) +{ + ZXIC_ENDIAN_U c_data; + + c_data.a = 1; + + if (c_data.b == 1) { + return 0; + } else { + return 1; + } +} + +/***********************************************************/ +/** 字节序转换,以4字节为单位进行转序 +* @param p_uc_data +* @param dw_byte_len +* +* @return +* @remark 无 +* @see +* @author ChenWei10088471 @date 2014/02/07 +************************************************************/ +ZXIC_VOID zxic_comm_swap(ZXIC_UINT8 *p_uc_data, ZXIC_UINT32 dw_byte_len) +{ + ZXIC_UINT32 dw_byte_num = 0; + ZXIC_UINT8 uc_byte_mode = 0; + ZXIC_UINT32 uc_is_big_flag = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT16 *p_w_tmp = ZXIC_NULL; + ZXIC_UINT32 *p_dw_tmp = ZXIC_NULL; + + if (g_zxic_byte_swap_en) { + p_dw_tmp = (ZXIC_UINT32 *)(p_uc_data); + + uc_is_big_flag = zxic_comm_is_big_endian(); + + if (uc_is_big_flag) { + return; + } else { + dw_byte_num = dw_byte_len >> 2; + uc_byte_mode = dw_byte_len % 4 & 0xff; + + for (i = 0; i < dw_byte_num; i++) { + (*p_dw_tmp) = ZXIC_COMM_CONVERT32(*p_dw_tmp); + p_dw_tmp++; + } + + if (uc_byte_mode > 1) { + p_w_tmp = (ZXIC_UINT16 *)(p_dw_tmp); + (*p_w_tmp) = ZXIC_COMM_CONVERT16(*p_w_tmp); + } + } + } + + return; +} + +/***********************************************************/ +/** WORD32拼装成WORD64 +* @param hi 高32bit +* @param lo 低32bit +* +* @return WORD64 +* @remark 无 +* @see +* @author pj @date 2019/10/22 +************************************************************/ +ZXIC_UINT64 ZXIC_COMM_COUNTER64_BUILD(ZXIC_UINT32 hi, ZXIC_UINT32 lo) +{ + ZXIC_UINT64 value = hi; + + value = value << 32; + value = value | lo; + + return value; +} +#endif + +#if ZXIC_REAL("bit操作") +/***********************************************************/ +/** 将数据写入缓存区的指定bit位置,一次只能写入32bit的数据 + p_base的低字节存放数据的低比特,高字节存放数据的高比特。 +* @param p_base 数据缓存区指针 +* @param base_size_bit 缓存总的bit位宽 +* @param data 数据, 比特顺序左低右高,小端比特序 +* @param start_bit 起始bit位置(必须小于结束bit位置) +* @param end_bit 结束bit位置 +* +* @return +* @remark exp: + ZXIC_UINT8 data0[4] = {0x22, 0x44, 0x66, 0x88}; + zxic_comm_write_bits(data0, 32, 0xAABBCC, 0,23); + ZXIC_COMM_PRINT("0x%02X %02X %02X %02X \n", data0[0], data0[1], data0[2], data0[3]); + 输出:0xAA BB CC 88 + +* @see +* @author ChenWei10088471 @date 2014/02/07 +************************************************************/ +ZXIC_RTN32 zxic_comm_write_bits(ZXIC_UINT8 *p_base, ZXIC_UINT32 base_size_bit, + ZXIC_UINT32 data, ZXIC_UINT32 start_bit, + ZXIC_UINT32 end_bit) +{ + ZXIC_UINT32 len = 0; + ZXIC_UINT32 start_byte_index = 0; + ZXIC_UINT32 end_byte_index = 0; + ZXIC_UINT8 mask_value = 0; + ZXIC_UINT32 byte_num = 0; + ZXIC_UINT32 buffer_size = 0; + + if (0 != (base_size_bit % 8)) { + ZXIC_COMM_TRACE_ERROR("\n buffer must be:%d", __LINE__); + //assert(0); + return ZXIC_BIT_STREAM_INDEX_ERR; + } + + if (start_bit > end_bit) { + ZXIC_COMM_TRACE_ERROR( + "\nend_bit cannot be less than start_bit:%d", __LINE__); + //assert(0); + return ZXIC_BIT_STREAM_INDEX_ERR; + } + + if (base_size_bit < end_bit) { + ZXIC_COMM_TRACE_ERROR( + "\nend_bit exceeds the base_size!line:%d,base_size_bit:%d end_bit:%d", + __LINE__, base_size_bit, end_bit); + //assert(0); + return ZXIC_BIT_STREAM_INDEX_ERR; + } + + ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW(end_bit, start_bit); + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW(end_bit - start_bit, 1); + + len = end_bit - start_bit + 1; + buffer_size = base_size_bit / 8; + + /*寄存器位宽需要时2的次方,如不是,需要累加到最近的2的次方*/ + /*用于解决KW检查中zxic_comm_write_bits_ex出现的内存泄漏错误*/ + ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW(buffer_size, 1); + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW(buffer_size, 1); + + while (0 != (buffer_size & (buffer_size - 1))) { + buffer_size += 1; + } + + if (buffer_size != base_size_bit / 8) { + ZXIC_COMM_TRACE_ALL( + "\n buffer size[0x%x] is not 2^n: add up to [0x%x]", + base_size_bit / 8, buffer_size); + } + + /*一次只能写32bit*/ + ZXIC_COMM_CHECK_INDEX(len, 1, 32); + + if (data > (ZXIC_UINT32)(0xffffffff >> (32 - len))) { + ZXIC_COMM_PRINT( + "\nValue is too big to write in the bit field!:%d,data:%x,len:%x", + __LINE__, data, + (ZXIC_UINT32)(0xffffffff >> (32 - len))); + return ZXIC_BIT_STREAM_DATA_TOO_BIG; + } + + end_byte_index = (end_bit >> 3); + start_byte_index = (start_bit >> 3); + + if (start_byte_index == end_byte_index) { + mask_value = ((0xFE << (7 - (start_bit & 7))) & 0xff); + mask_value |= (((1 << (7 - (end_bit & 7))) - 1) & 0xff); + p_base[end_byte_index] &= mask_value; + p_base[end_byte_index] |= + (((data << (7 - (end_bit & 7)))) & 0xff); + return ZXIC_OK; + } + + if (7 != (end_bit & 7)) { + mask_value = ((0x7f >> (end_bit & 7)) & 0xff); + p_base[end_byte_index] &= mask_value; + p_base[end_byte_index] |= + ((data << (7 - (end_bit & 7))) & 0xff); + end_byte_index--; + data >>= 1 + (end_bit & 7); + } + + for (byte_num = end_byte_index; byte_num > start_byte_index; + byte_num--) { + /* critical */ + p_base[byte_num & (buffer_size - 1)] = data & 0xff; + data >>= 8; + } + + mask_value = ((0xFE << (7 - (start_bit & 7))) & 0xff); + p_base[byte_num] &= mask_value; + p_base[byte_num] |= data; + + return ZXIC_OK; +} + +/***********************************************************/ +/** 从缓存区中读取指定bit位置的数据,一次最多只能读32bit +* @param p_base 数据缓存区指针 +* @param base_size_bit 缓存区总的bit位宽 +* @param p_data 返回数据的指针 +* @param start_bit 起始bit位置(必须小于结束bit位置) +* @param end_bit 结束bit位置 +* +* @return +* @remark exp: + ZXIC_UINT8 data0[4] = {0x22, 0x44, 0x66, 0x88}; + ZXIC_UINT32 test_a = 0; + zxic_comm_read_bits(data0, 32, &test_a, 0, 23); + 输出: test_a = 0x224466 +* @see +* @author ChenWei10088471 @date 2014/02/07 +************************************************************/ +ZXIC_RTN32 zxic_comm_read_bits(ZXIC_UINT8 *p_base, ZXIC_UINT32 base_size_bit, + ZXIC_UINT32 *p_data, ZXIC_UINT32 start_bit, + ZXIC_UINT32 end_bit) +{ + ZXIC_UINT32 len = 0; + ZXIC_UINT32 start_byte_index = 0; + ZXIC_UINT32 end_byte_index = 0; + ZXIC_UINT32 byte_num = 0; + ZXIC_UINT32 buffer_size = 0; + + if (0 != (base_size_bit % 8)) { + ZXIC_COMM_TRACE_ERROR("\n buffer must be:%d", __LINE__); + return ZXIC_BIT_STREAM_INDEX_ERR; + } + + if (start_bit > end_bit) { + ZXIC_COMM_TRACE_ERROR( + "\nend_bit cannot be less than start_bit:%d", __LINE__); + return ZXIC_BIT_STREAM_INDEX_ERR; + } + + if (base_size_bit < end_bit) { + ZXIC_COMM_TRACE_ERROR( + "\nend_bit exceeds the base_size:%d,end_bit:%d", + __LINE__, end_bit); + return ZXIC_BIT_STREAM_INDEX_ERR; + } + + len = end_bit - start_bit + 1; + buffer_size = base_size_bit / 8; + + /*寄存器位宽需要时2的次方,如不是,需要累加到最近的2的次方*/ + /*用于解决KW检查中zxic_comm_read_bits_ex出现的内存泄漏错误*/ + ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW(buffer_size, 1); + + while (0 != (buffer_size & (buffer_size - 1))) { + buffer_size += 1; + } + + if (buffer_size != base_size_bit / 8) { + ZXIC_COMM_TRACE_ALL( + "\n buffer size[0x%x] is not 2^n: add up to [0x%x]", + base_size_bit / 8, buffer_size); + } + + /*先将返回的数据清零*/ + *p_data = 0; + + /*一次最多只能读32bit*/ + ZXIC_COMM_CHECK_INDEX(len, 1, 32); + + end_byte_index = (end_bit >> 3); + start_byte_index = (start_bit >> 3); + + if (start_byte_index == end_byte_index) { + *p_data = (ZXIC_UINT32)(((p_base[start_byte_index] >> + (7U - (end_bit & 7))) & + (0xff >> (8U - len))) & + 0xff); + return ZXIC_OK; + } + + if (start_bit & 7) { + *p_data = + (p_base[start_byte_index] & (0xff >> (start_bit & 7))) & + ZXIC_UINT8_MASK; + start_byte_index++; + } + + for (byte_num = start_byte_index; byte_num < end_byte_index; + byte_num++) { + *p_data <<= 8; + + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW(*p_data, p_base[byte_num]); + *p_data += p_base[byte_num]; + } + + *p_data <<= 1 + (end_bit & 7); + /* critical */ + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW( + *p_data, (((p_base[byte_num & (buffer_size - 1)] & + (0xff << (7 - (end_bit & 7)))) >> + (7 - (end_bit & 7))) & + 0xff)); + *p_data += ((p_base[byte_num & (buffer_size - 1)] & + (0xff << (7 - (end_bit & 7)))) >> + (7 - (end_bit & 7))) & + 0xff; + + return ZXIC_OK; +} + +/***********************************************************/ +/** 比特流拼装,比特流的形式为: p_base的低字节存放 + 数据的高比特,高字节存放数据的低比特。 +* @param p_base +* @param base_size_bit +* @param data 数据, 比特顺序左高右低,大端比特序 +* @param msb_start_pos 数据最高比特的位置 +* @param len 数据长度 +* +* @return +* @remark exp: + ZXIC_UINT8 data0[4] = {0x22, 0x44, 0x66, 0x88}; + zxic_comm_write_bits_ex(data0, 32, 0x123456, 23,24); + ZXIC_COMM_PRINT("0x%02X %02X %02X %02X \n", data0[0], data0[1], data0[2], data0[3]); + 输出:0x22 12 34 56 + +* @see +* @author 王春雷 @date 2014/03/08 +************************************************************/ +ZXIC_RTN32 zxic_comm_write_bits_ex(ZXIC_UINT8 *p_base, + ZXIC_UINT32 base_size_bit, ZXIC_UINT32 data, + ZXIC_UINT32 msb_start_pos, ZXIC_UINT32 len) +{ + ZXIC_RTN32 rtn = ZXIC_OK; + ZXIC_COMM_CHECK_POINT(p_base); + + ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW(base_size_bit, 1); + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW(base_size_bit - 1, msb_start_pos); + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW(base_size_bit - 1 - msb_start_pos, + len); + ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW( + base_size_bit - 1 - msb_start_pos + len, 1); + + rtn = zxic_comm_write_bits( + p_base, base_size_bit, data, + (base_size_bit - 1 - msb_start_pos), + (base_size_bit - 1 - msb_start_pos + len - 1)); + + return rtn; +} + +/***********************************************************/ +/** 从比特流中读取数据,比特流的形式为: p_base的低字节存放 + 数据的高比特,高字节存放数据的低比特。 +* @param p_base +* @param base_size_bit +* @param p_data +* @param msb_start_pos +* @param len +* +* @return +* @remark exp: + ZXIC_UINT8 data0[4] = {0x22, 0x44, 0x66, 0x88}; + zxic_comm_read_bits_ex(data0, 32, &test_a, 23, 24); + ZXIC_COMM_PRINT("0test_a = 0x%x \n", test_a); + 输出: 0test_a = 0x446688 + +* @see +* @author 王春雷 @date 2014/03/08 +************************************************************/ +ZXIC_RTN32 zxic_comm_read_bits_ex(ZXIC_UINT8 *p_base, ZXIC_UINT32 base_size_bit, + ZXIC_UINT32 *p_data, + ZXIC_UINT32 msb_start_pos, ZXIC_UINT32 len) +{ + ZXIC_RTN32 rtn = ZXIC_OK; + ZXIC_COMM_CHECK_POINT(p_base); + ZXIC_COMM_CHECK_POINT(p_data); + + ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW(base_size_bit, 1); + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW(base_size_bit - 1, msb_start_pos); + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW(base_size_bit - 1 - msb_start_pos, + len); + ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW( + base_size_bit - 1 - msb_start_pos + len, 1); + + rtn = zxic_comm_read_bits( + p_base, base_size_bit, p_data, + (base_size_bit - 1 - msb_start_pos), + (base_size_bit - 1 - msb_start_pos + len - 1)); + return rtn; +} +#endif /* */ + +#if ZXIC_REAL("字符串") +/***********************************************************/ +/** +* @param buffer 目标字符串 +* @param sizeofbuf sizeofbuffer +* @param count 要拷贝字节数 +* @param format +* +* @return 待拷贝的实际字符串长度 +* @remark snprintf +* @see +* @author sj @date 2020/12/09 +************************************************************/ +ZXIC_SINT32 ic_comm_snprintf_s(ZXIC_CHAR *buffer, ZXIC_SIZE_T sizeofbuf, + ZXIC_SIZE_T count, const ZXIC_CHAR *format, ...) +{ + va_list ap; + ZXIC_SINT32 ret = -1; + + if ((ZXIC_NULL == buffer) || (ZXIC_NULL == format)) { + ZXIC_COMM_TRACE_ERROR( + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", + __FILE__, __LINE__, __FUNCTION__); + return ret; + } + if (!count) { + ZXIC_COMM_TRACE_ERROR( + "\n ZXIC %s:%d[Error:count err], FUNCTION : %s!\n", + __FILE__, __LINE__, __FUNCTION__); + return ret; + } + va_start(ap, format); + ret = ZXIC_COMM_VSNPRINTF_S(buffer, sizeofbuf, count, format, ap); + va_end(ap); + + if (ret == -1) { + ZXIC_COMM_TRACE_ERROR( + "\n ZXIC %s:%d[Error:snprintf_s err], FUNCTION : %s!\n", + __FILE__, __LINE__, __FUNCTION__); + } + return ret; +} +/***********************************************************/ +/** +* @param buffer 目标字符串 +* @param sizeofbuf sizeofbuffer +* @param count 要拷贝字节数 +* @param format +* +* @return 待拷贝的实际字符串长度 +* @remark vsnprintf +* @see +* @author sj @date 2020/12/09 +************************************************************/ +ZXIC_SINT32 ic_comm_vsnprintf_s(ZXIC_CHAR *buffer, ZXIC_SIZE_T sizeofbuf, + ZXIC_SIZE_T count, const ZXIC_CHAR *format, + va_list ap) +{ + ZXIC_SINT32 ret = -1; + + if ((ZXIC_NULL == buffer) || (ZXIC_NULL == format)) { + ZXIC_COMM_TRACE_ERROR( + "\n ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", + __FILE__, __LINE__, __FUNCTION__); + return ret; + } + if (!count) { + ZXIC_COMM_TRACE_ERROR( + "\n ZXIC %s:%d[Error:count err], FUNCTION : %s!\n", + __FILE__, __LINE__, __FUNCTION__); + return ret; + } + if (count < sizeofbuf) { + sizeofbuf = count; + } +#ifdef ZXIC_OS_WIN + ret = _vsnprintf(buffer, sizeofbuf, format, ap); +#else + ret = vsnprintf(buffer, sizeofbuf, format, ap); +#endif + if (ret == -1) { + ZXIC_COMM_TRACE_ERROR( + "\n ZXIC %s:%d[Error:vsnprintf err], FUNCTION : %s!\n", + __FILE__, __LINE__, __FUNCTION__); + } + return ret; +} + +/***********************************************************/ +/** +* @param pcDst    目的地址 +* @param dwMaxSize 目的长度 +* @param pcSrc    源地址 +* @param dwCount  要复制的最大字符数 +* +* @return +* @remark 将源地址的字符拷贝到目的字符数组; +* @see +* @author +************************************************************/ +ZXIC_CHAR *ic_comm_strncpy_s(ZXIC_CHAR *pcDst, size_t dwMaxSize, + ZXIC_CONST ZXIC_CHAR *pcSrc, size_t dwCount) +{ + size_t dwIndex = 1, dwCopyNum = dwCount; + ZXIC_CHAR *pcResult = pcDst; + + ZXIC_COMM_CHECK_RC_POINT_NO_PRINT(pcDst, pcResult); + ZXIC_COMM_CHECK_RC_POINT_NO_PRINT(pcSrc, pcResult); + + if ((dwMaxSize <= 1) || (dwMaxSize > ZXIC_COMM_MEMORY_MAX_B_SIZE) || + (dwCount == 0)) { + return pcResult; + } + + /* 计算拷贝的字符长度,不含结束符 */ + if (dwCount >= dwMaxSize) { + dwCopyNum = dwMaxSize - 1; + } + + if (ic_comm_getAbsValue((unsigned ZXIC_CHAR *)pcDst, + (ZXIC_CONST ZXIC_UINT8 *)pcSrc) < dwCopyNum) { + return pcResult; + } + + while ('\0' != (*pcDst++ = *pcSrc++)) { + /* 判断拷贝字符数是否等于dwCopyNum,等于就退出 */ + /* 由于判断放在循环体内,进行判断前已经拷贝了一次,所以i初试值为1 */ + if (dwIndex++ >= dwCopyNum) { + *pcDst = '\0'; + + return pcResult; + } + } + + /* 本处的处理是为了保持和库函数中的解释一致,对于源串长度小于dwCopyNum,剩余部分全部填0 */ + while (dwIndex++ <= dwCopyNum) { + *pcDst++ = '\0'; + } + + return pcResult; +} + +ZXIC_SIZE_T ic_comm_getAbsValue(ZXIC_UINT8 *dest, ZXIC_CONST ZXIC_UINT8 *src) +{ + return dest > src ? (dest - src) : (src - dest); +} +/***********************************************************/ +/** +* @param dest   目的地址 +* @param src  源地址 +* @param n   要复制的长度 +* +* @return +* @remark 从源地址拷贝若干字节的长度到目的内存处 +* @see +* @author +************************************************************/ +ZXIC_RTN32 ic_comm_memcpy(ZXIC_VOID *dest, ZXIC_CONST ZXIC_VOID *src, size_t n) +{ + ZXIC_COMM_CHECK_POINT(dest); + ZXIC_COMM_CHECK_POINT(src); + + /* memcpy 的大小限制在200M */ + if (n > 200 * 1024 * 1024) { + return ZXIC_PAR_CHK_INVALID_PARA; + } + + if (ic_comm_getAbsValue((ZXIC_UINT8 *)dest, + (ZXIC_CONST ZXIC_UINT8 *)src) < n) { + return ZXIC_ERR; + } +#ifdef ZXIC_OS_WIN + memcpy(dest, src, n); +#else + //__memcpy_chk(dest, src, n, n); + memcpy(dest, src, n); +#endif + return ZXIC_OK; +} +/***********************************************************/ +/** +* @param dest   目的地址 +* @param dest_len 目的长度 +* @param src  源地址 +* @param n   要复制的长度 +* +* @return +* @remark 从源地址拷贝若干字节的长度到目的内存处,增加源目的长度之间的检查 +* @see +* @author +************************************************************/ +ZXIC_RTN32 ic_comm_memcpy_s(ZXIC_VOID *dest, size_t dest_len, const void *src, + size_t n) +{ + ZXIC_COMM_CHECK_POINT(dest); + ZXIC_COMM_CHECK_POINT(src); + + /* memcpy 的大小限制在200M */ + if (n > 200 * 1024 * 1024) { + return ZXIC_PAR_CHK_INVALID_PARA; + } + + if (ic_comm_getAbsValue((ZXIC_UINT8 *)dest, + (ZXIC_CONST ZXIC_UINT8 *)src) < n) { + return ZXIC_PAR_CHK_ARGIN_ERROR; + } +#ifdef ZXIC_OS_WIN + if (dest_len < n) { + return ZXIC_ERR; + } + memcpy(dest, src, n); +#else + //__memcpy_chk(dest, src, n, dest_len); + memcpy(dest, src, n); +#endif + return ZXIC_OK; +} + +/***********************************************************/ +/** +* @param pcDst    目的地址 +* @param dwMaxSize 目的长度 +* @param pcSrc    源地址 +* @param dwCount   待连接的字符数 +* +* @return +* @remark 字符串连接函数,将pcSrc的dwCount字符复制到pcDst的字符串后,覆盖"\0"; +* @see +* @author +************************************************************/ +ZXIC_CHAR *ic_comm_strncat_s(ZXIC_CHAR *pcDst, size_t dwMaxSize, + ZXIC_CONST ZXIC_CHAR *pcSrc, size_t dwCount) +{ + ZXIC_SIZE_T dwIndex = 1, dwCopyNum = 0; + ZXIC_CHAR *pcResult = pcDst; + + ZXIC_COMM_CHECK_RC_POINT_NO_PRINT(pcDst, pcResult); + ZXIC_COMM_CHECK_RC_POINT_NO_PRINT(pcSrc, pcResult); + + if ((dwMaxSize == 0) || (dwMaxSize > ZXIC_COMM_MEMORY_MAX_B_SIZE) || + (dwCount == 0)) { + return pcResult; + } + + /* 计算目的串的长度 */ + while ((*pcDst++ != '\0') && (++dwCopyNum < dwMaxSize)) { + } + + if (dwCopyNum >= dwMaxSize) { + return pcResult; + } + + dwCopyNum = dwMaxSize - dwCopyNum; /* 计算剩余的缓冲区长度 */ + + /* 计算拷贝的字符长度,不含结束符 */ + if (dwCount >= dwCopyNum) { + dwCopyNum = dwCopyNum - 1; + } else { + dwCopyNum = dwCount; + } + + if (dwCopyNum == 0) { /* 宋志强 修正pcDst 空间刚好满导致内存越界的问题 */ + return pcResult; + } + + pcDst--; + + if (ic_comm_getAbsValue((unsigned ZXIC_CHAR *)pcDst, + (const unsigned ZXIC_CHAR *)pcSrc) < + dwCopyNum) { + return pcResult; + } + + while ('\0' != (*pcDst++ = *pcSrc++)) { + /* 判断拷贝字符数是否等于dwCopyNum,等于就退出 */ + /* 由于判断放在循环体内,进行判断前已经拷贝了一次,所以i初试值为1 */ + if (dwIndex++ >= dwCopyNum) { + *pcDst = '\0'; + + return pcResult; + } + } + + /* 本处的处理是为了保持和库函数中的解释一致,对于源串长度小于dwCopyNum,剩余部分全部填0 */ + while (dwIndex++ <= dwCopyNum) { + *pcDst++ = '\0'; + } + + return pcResult; +} + +/***********************************************************/ +/** +* @param str   字符串首地址 +* @param MaxCount 可返回的最大长度,若计算的长度大于该长度,则返回MaxCount; +* +* @return +* @remark 计算字符串的长度 +* @see +* @author +************************************************************/ +ZXIC_SIZE_T ic_comm_strnlen_s(const ZXIC_CHAR *str, ZXIC_SIZE_T MaxCount) +{ + return (str == 0) ? 0 : ZXIC_COMM_STRNLEN(str, MaxCount); +} + +ZXIC_VOID ic_comm_memset_s(void *dest, ZXIC_SIZE_T dmax, ZXIC_UINT8 c, + ZXIC_SIZE_T n) +{ + if ((ZXIC_NULL == dest) || (dmax > ZXIC_COMM_MEMORY_MAX_B_SIZE) || + (0 == n) || (n > dmax)) { + ZXIC_COMM_TRACE_ERROR( + "zxic_memset_s para err:ptr is null or size err.\n"); + return; + } + memset(dest, c, n); +} + +ZXIC_SINT32 ic_comm_memcmp(void *str1, const void *str2, ZXIC_SIZE_T n) +{ + if ((ZXIC_NULL == str1) || (ZXIC_NULL == str2) || + (n > ZXIC_COMM_MEMORY_MAX_B_SIZE) || (0 == n)) { + ZXIC_COMM_TRACE_ERROR( + "zxic_memcmp para err:ptr is null or size more than 200M.\n"); + return 0x7fffffff; + } + return memcmp(str1, str2, n); +} +#endif + +ZXIC_RTN32 zxic_comm_random(void) +{ +#ifdef ZXIC_OS_WIN + /* return RtlGenRandom(); Modify by wcl, 20200424, Windows版本编译失败,先改回rand */ + return rand(); +#else + ZXIC_UINT8 buff[4] = { 0 }; + ZXIC_UINT32 ticks = 0; + ZXIC_UINT32 random_d = 0; + struct timespec64 tv; + ZXIC_UINT32 result_len = 0; + struct file *fd = NULL; + loff_t pos = 0; + + //fd = open("/dev/urandom", O_RDONLY); + fd = filp_open("/dev/urandom", O_RDONLY, 0); + + if (NULL == fd) { + get_timespec64(&tv, ZXIC_NULL); + ticks = ((tv.tv_sec & ZXIC_UINT32_MAX) + + (tv.tv_nsec & ZXIC_UINT32_MAX)) & + ZXIC_UINT32_MAX; + random_d = ticks; + } else { + result_len = (kernel_read(fd, buff, ZXIC_SIZEOF(buff), &pos) & 0xFFFFFFFF); + if (result_len != ZXIC_SIZEOF(buff)) { + get_timespec64(&tv, ZXIC_NULL); + ticks = ((tv.tv_sec & ZXIC_UINT32_MAX) + + (tv.tv_nsec & ZXIC_UINT32_MAX)) & + ZXIC_UINT32_MAX; + random_d = ticks; + filp_close(fd, NULL); + } else { + random_d = (((buff[0] << 24) & 0xff000000) | + ((buff[1] << 16) & 0x00ff0000) | + ((buff[2] << 8) & 0x0000ff00) | + (buff[3] & 0x000000ff)); + filp_close(fd, NULL); + } + } + + return random_d; +#endif +} + +/***********************************************************/ +/** 设置最大通道数 +* @param dev_max 设置的最大设备数 +* +* @return +* @remark 无 +* @see +* @author lihk @date 2021/03/31 +************************************************************/ +ZXIC_VOID zxic_comm_channel_max_set(ZXIC_UINT32 dev_max) +{ + g_zxic_comm_channel_max = dev_max; +} + +/***********************************************************/ +/** 获取最大通道数 +* @param +* +* @return +* @remark 无 +* @see +* @author lihk @date 2021/03/31 +************************************************************/ +ZXIC_RTN32 zxic_comm_channel_max_get(ZXIC_VOID) +{ + return g_zxic_comm_channel_max; +} diff --git a/drivers/net/ethernet/dinghai/en_np/comm/source/zxic_private_top.c b/drivers/net/ethernet/dinghai/en_np/comm/source/zxic_private_top.c new file mode 100644 index 000000000000..ae4238aef285 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/comm/source/zxic_private_top.c @@ -0,0 +1,146 @@ +/************************************************************** +* 版权所有 (C)2013-2020, 深圳市中兴通讯股份有限公司 +* 文件名称 : zxic_common.c +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : xuchenxi_10235594 +* 完成日期 : 2020/07/20 +* DEPARTMENT: 有线开发四部-系统软件团队 +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ +#include "zxic_private_top.h" +#include "zxic_common.h" +#include +//#include +//#include +#include +#include +#include +//#include +//#include +//#include +#include + +#ifdef ZXIC_OS_WIN +#include +#include +#include +#include +#pragma warning(disable : 4996) +#else +//#include +#include +#include +#include +#include +#include +#include +#include +#endif + +#if ZXIC_REAL("参数检查函数定义") +/***********************************************************/ +/** +* @param val +* @param min +* @param max +* +* @return +* @remark 无 +* @see +* @author XXX @date 2019/07/13 +************************************************************/ +ZXIC_RTN32 zxic_comm_index_check(ZXIC_UINT32 val, ZXIC_UINT32 min, + ZXIC_UINT32 max) +{ + if (min <= max) { + if (0 == min) { + if ((val) > (max)) { + return ZXIC_PAR_CHK_INVALID_INDEX; + } + } else { + if ((val) < (min) || (val) > (max)) { + return ZXIC_PAR_CHK_INVALID_INDEX; + } + } + } else { + return ZXIC_PAR_CHK_INVALID_RANGE; + } + + return ZXIC_OK; +} + +/***********************************************************/ +/** +* @param dev_id +* @param val +* @param min +* @param max +* +* @return +* @remark 无 +* @see +* @author PJ @date 2019/07/13 +************************************************************/ +ZXIC_RTN32 zxic_comm_dev_index_check(ZXIC_UINT32 dev_id, ZXIC_UINT32 val, + ZXIC_UINT32 min, ZXIC_UINT32 max) +{ + if (min <= max) { + if (0 == min) { + if ((val) > (max)) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "ZXIC %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", + __FILE__, __LINE__, val, min, max, + __FUNCTION__); + return ZXIC_PAR_CHK_INVALID_INDEX; + } + } else { + if ((val) < (min) || (val) > (max)) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "ZXIC %s:%d[Error:VALUE[0x%x] INVALID] [min=0x%x,max=0x%x] ! FUNCTION :%s !\n", + __FILE__, __LINE__, val, min, max, + __FUNCTION__); + return ZXIC_PAR_CHK_INVALID_INDEX; + } + } + } else { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "ZXIC %s:%d[Error:RANGE INVALID] [val=0x%x,min=0x%x,max=0x%x] ! FUNCTION :%s !\n", + __FILE__, __LINE__, val, min, max, __FUNCTION__); + return ZXIC_PAR_CHK_INVALID_RANGE; + } + + return ZXIC_OK; +} + +/***********************************************************/ +/** 校验码 +* @param +* +* @return +* @remark 无 +* @see +* @author cq @date 2025/05/23 +************************************************************/ +ZXIC_RTN32 zxic_comm_errcode_check(ZXIC_UINT32 error_code) +{ + if ((error_code == ZXIC_PAR_CHK_BAR_ABNORMAL) || + (error_code == ZXIC_PAR_CHK_DEV_STATUS_OFF)) { + return ZXIC_OK; + } + + return ZXIC_PAR_CHK_INVALID_INDEX; +} + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/driver/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/driver/Kbuild.include new file mode 100644 index 000000000000..f63887084c69 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/driver/Kbuild.include @@ -0,0 +1,4 @@ +cur_dir := en_np/driver/ +subdirs := source/ +src_files += +include $(foreach subdir, $(subdirs), $(dinghai_root)/$(cur_dir)$(subdir)/Kbuild.include) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/driver/include/dpp_drv_acl.h b/drivers/net/ethernet/dinghai/en_np/driver/include/dpp_drv_acl.h new file mode 100644 index 000000000000..7693428a46ff --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/driver/include/dpp_drv_acl.h @@ -0,0 +1,119 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_drv_acl.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2014/01/27 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef DPP_DRV_ACL_H +#define DPP_DRV_ACL_H + +#include "zxic_common.h" +#include "dpp_apt_se_api.h" +#include "dpp_apt_se.h" + +typedef struct zxdh_ipsec_enc_key { + ZXIC_UINT8 dip[16]; + ZXIC_UINT8 sip[16]; + ZXIC_UINT32 rsv2; + ZXIC_UINT32 rsv1; +} ZXDH_IPSEC_ENC_KEY; + +typedef struct zxdh_ipsec_enc_mask { + ZXIC_UINT8 dip[16]; + ZXIC_UINT8 sip[16]; + ZXIC_UINT32 rsv2; + ZXIC_UINT32 rsv1; +} ZXDH_IPSEC_ENC_MASK; + +typedef struct zxdh_ipsec_enc_entry { + ZXIC_UINT32 sa_id; + ZXIC_UINT32 rsv; + ZXIC_UINT32 hit_flag; +} ZXDH_IPSEC_ENC_ENTRY; + +typedef struct zxdh_ipsec_enc_t { + ZXIC_UINT32 index; + ZXDH_IPSEC_ENC_KEY key; + ZXDH_IPSEC_ENC_MASK mask; + ZXDH_IPSEC_ENC_ENTRY entry; +} ZXDH_IPSEC_ENC_T; + +typedef struct zxdh_pkt_cap_key { + ZXIC_UINT32 rsv; + ZXIC_UINT32 qp; + ZXIC_UINT16 vhca_id; + ZXIC_UINT16 vqm_vfid; + ZXIC_UINT16 ethtype; + ZXIC_UINT16 sport; + ZXIC_UINT16 dport; + ZXIC_UINT16 key_word_off; + ZXIC_UINT8 protocol; + ZXIC_UINT8 key_word_len; + ZXIC_UINT8 capture_pkt_flag; + ZXIC_UINT8 panel_id; + ZXIC_UINT8 sip[16]; + ZXIC_UINT8 dip[16]; + ZXIC_UINT8 dmac[6]; + ZXIC_UINT8 smac[6]; + ZXIC_UINT8 key_word[15]; +} ZXDH_PKT_CAP_KEY; + +typedef struct zxdh_pkt_cap_mask { + ZXIC_UINT32 rsv_mask; + ZXIC_UINT32 qp_mask; + ZXIC_UINT16 vhca_id_mask; + ZXIC_UINT16 vqm_vfid_mask; + ZXIC_UINT16 ethtype_mask; + ZXIC_UINT16 sport_mask; + ZXIC_UINT16 dport_mask; + ZXIC_UINT16 key_word_off_mask; + ZXIC_UINT8 protocol_mask; + ZXIC_UINT8 key_word_len_mask; + ZXIC_UINT8 capture_pkt_flag_mask; + ZXIC_UINT8 panel_id_mask; + ZXIC_UINT8 sip_mask[16]; + ZXIC_UINT8 dip_mask[16]; + ZXIC_UINT8 dmac_mask[6]; + ZXIC_UINT8 smac_mask[6]; + ZXIC_UINT8 key_word_mask[15]; +} ZXDH_PKT_CAP_MASK; + +typedef struct zxdh_pkt_cap_entry { + ZXIC_UINT32 vqm_vfid; + ZXIC_UINT32 index; + ZXIC_UINT32 value_flag; + ZXIC_UINT32 hit_flag; +} ZXDH_PKT_CAP_ENTRY; + +typedef struct zxdh_pkt_cap_t { + ZXIC_UINT32 index; + ZXDH_PKT_CAP_KEY key; + ZXDH_PKT_CAP_MASK mask; + ZXDH_PKT_CAP_ENTRY entry; +} ZXDH_PKT_CAP_T; + +ZXIC_UINT32 dpp_apt_set_ipsec_enc_data(ZXIC_VOID *pData, + DPP_ACL_ENTRY_EX_T *aclEntry); +ZXIC_UINT32 dpp_apt_get_ipsec_enc_data(ZXIC_VOID *pData, + DPP_ACL_ENTRY_EX_T *aclEntry); +ZXIC_UINT32 dpp_apt_set_pkt_cap_data(ZXIC_VOID *pData, + DPP_ACL_ENTRY_EX_T *aclEntry); +ZXIC_UINT32 dpp_apt_get_pkt_cap_data(ZXIC_VOID *pData, + DPP_ACL_ENTRY_EX_T *aclEntry); + +SE_APT_ACL_CONVERT_T *se_acl_callback_get(ZXIC_UINT32 sdt_no); + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/driver/include/dpp_drv_eram.h b/drivers/net/ethernet/dinghai/en_np/driver/include/dpp_drv_eram.h new file mode 100644 index 000000000000..907e5a5b3bd2 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/driver/include/dpp_drv_eram.h @@ -0,0 +1,323 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_drv_eram.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2014/01/27 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef DPP_DRV_ERAM_H +#define DPP_DRV_ERAM_H + +#include "zxic_common.h" +#include "dpp_apt_se_api.h" +#include "dpp_apt_se.h" + +typedef struct zxdh_vxlan_t { + ZXIC_UINT64 port : 16; + ZXIC_UINT64 rsv : 47; + ZXIC_UINT64 hit_flag : 1; +} ZXDH_VXLAN_T; + +typedef struct zxdh_sriov_vport_t { + // byte[15:16] + ZXIC_UINT32 rsv6 /* : 16; */; + + // byte[13:14] + ZXIC_UINT32 vhca /* : 10; */; + ZXIC_UINT32 rsv5 /* : 5; */; + + // byte[12] + ZXIC_UINT32 rss_hash_factor /* : 8; */; + + // byte[11] + ZXIC_UINT32 hash_alg /* : 4; */; + ZXIC_UINT32 uplink_phy_port_id /* : 4; */; + + // byte[9:10] + ZXIC_UINT32 lag_id /* : 3; */; + ZXIC_UINT32 fd_vxlan_offload_en /* : 1; */; + ZXIC_UINT32 pf_vqm_vfid /* : 11; */; + ZXIC_UINT32 rsv3 /* : 1; */; + + // byte[7:8] + ZXIC_UINT32 mtu /* : 16; */; + + // byte[5:6] + ZXIC_UINT32 port_base_qid /* : 12; */; + ZXIC_UINT32 hash_search_index /* : 3; */; + ZXIC_UINT32 rsv2 /* : 1; */; + + // byte[4] + ZXIC_UINT32 np_egress_meter_enable /* : 1; */; + ZXIC_UINT32 np_ingress_meter_enable /* : 1; */; + ZXIC_UINT32 np_egress_meter_mode /* : 1; */; + ZXIC_UINT32 np_ingress_meter_mode /* : 1; */; + ZXIC_UINT32 np_egress_tm_enable /* : 1; */; + ZXIC_UINT32 np_ingress_tm_enable /* : 1; */; + ZXIC_UINT32 rsv1 /* : 1; */; + ZXIC_UINT32 spoof_check_enable /* : 1; */; + + // byte[3] + ZXIC_UINT32 inline_sec_offload /* : 1; */; + ZXIC_UINT32 fd_enable /* : 1; */; + ZXIC_UINT32 lag_enable /* : 1; */; + ZXIC_UINT32 vepa_enable /* : 1; */; + ZXIC_UINT32 is_vf /* : 1; */; + ZXIC_UINT32 virtio_version /* : 2; */; + ZXIC_UINT32 virtio_enable /* : 1; */; + + // byte[2] + ZXIC_UINT32 accelerator_offload_flag /* : 1; */; + ZXIC_UINT32 lro_offload /* : 1; */; + ZXIC_UINT32 ip_recombine_offload /* : 1; */; + ZXIC_UINT32 tcp_udp_checksum_offload /* : 1; */; + ZXIC_UINT32 ip_checksum_offload /* : 1; */; + ZXIC_UINT32 outer_ip_checksum_offload /* : 1; */; + ZXIC_UINT32 is_up /* : 1; */; + ZXIC_UINT32 business_enable /* : 1; */; + + // byte[1] + ZXIC_UINT32 hw_bond_enable /* : 1; */; + ZXIC_UINT32 rdma_offload_enable /* : 1; */; + ZXIC_UINT32 promisc_enable /* : 1; */; + ZXIC_UINT32 sriov_vlan_offload_enable /* : 1; */; + ZXIC_UINT32 sriov_business_vlan_offload_enable /* : 1; */; + ZXIC_UINT32 rss_enable /* : 1; */; + ZXIC_UINT32 mtu_offload_enable /* : 1; */; + ZXIC_UINT32 hit_flag /*: 1; */; + + // byte[13:14] + ZXIC_UINT32 flag_1588_enable /*: 1; */; +} ZXDH_SRIOV_VPORT_T; + +// 注意新增字段会打乱原有顺序 +typedef struct zxdh_uplink_phy_port_t { + ZXIC_UINT32 rsv6 /* : 5; */; + ZXIC_UINT32 pf_vqm_vfid /* : 11; */; + ZXIC_UINT32 rsv5 /* : 5; */; + ZXIC_UINT32 lacp_pf_memport_qid /* : 12; */; + ZXIC_UINT32 rsv4 /* : 4; */; + ZXIC_UINT32 lacp_pf_vqm_vfid /* : 11; */; + ZXIC_UINT32 rsv3 /* : 1; */; + ZXIC_UINT32 is_up /* : 1; */; + ZXIC_UINT32 bond_link_up /* : 1; */; + ZXIC_UINT32 hw_bond_enable /* : 1; */; + ZXIC_UINT32 mtu /* : 16; */; + ZXIC_UINT32 mtu_offload_enable /* : 1; */; + ZXIC_UINT32 rsv2 /* : 3; */; + ZXIC_UINT32 tm_base_queue /* : 12; */; + ZXIC_UINT32 ptp_port_vfid /* : 11; */; + ZXIC_UINT32 rsv1 /* : 15 */; + ZXIC_UINT32 magic_packet_enable /* : 1; */; + ZXIC_UINT32 tm_shape_enable /* : 1; */; + ZXIC_UINT32 ptp_tc_enable /* : 2; */; + ZXIC_UINT32 trust_mode /* : 1; */; + ZXIC_UINT32 hit_flag /* : 1; */; + ZXIC_UINT32 primary_pf_vqm_vfid /* : 11; */; + ZXIC_UINT32 sriov_hdbond_enable /* : 1; */; +} ZXDH_UPLINK_PHY_PORT_T; + +typedef struct zxdh_dscp_to_up_t { + ZXIC_UINT32 rsv2 /* : 32; */; + ZXIC_UINT32 up /* : 3; */; + ZXIC_UINT32 rsv1 /* : 28; */; + ZXIC_UINT32 hit_flag /* : 1; */; +} ZXDH_DSCP_TO_UP_T; + +typedef struct zxdh_up_to_tc_t { + ZXIC_UINT32 rsv2 /* : 32; */; + ZXIC_UINT32 tc /* : 3; */; + ZXIC_UINT32 rsv1 /* : 28; */; + ZXIC_UINT32 hit_flag /* : 1; */; +} ZXDH_UP_TO_TC_T; + +typedef struct zxdh_rss_to_vqid_t { + ZXIC_UINT32 vqm_qid[8]; + ZXIC_UINT32 hit_flag; +} ZXDH_RSS_TO_VQID_T; + +typedef struct zxdh_vlan_filter_t { + ZXIC_UINT8 vport_bitmap[15]; + ZXIC_UINT8 rsv : 7; + ZXIC_UINT8 hit_flag : 1; + +} ZXDH_VLAN_FILTER_T; + +typedef struct zxdh_lag_t { + ZXIC_UINT32 member_bitmap; + ZXIC_UINT32 rsv2; + ZXIC_UINT32 hash_factor; + ZXIC_UINT32 bond_mode; + ZXIC_UINT32 member_num; + ZXIC_UINT32 rsv1; + ZXIC_UINT32 hit_flag; +} ZXDH_LAG_T; + +typedef struct zxdh_bc_t { + ZXIC_UINT64 bc_bitmap; + ZXIC_UINT32 rsv2; + ZXIC_UINT32 rsv1; + ZXIC_UINT32 hit_flag; +} ZXDH_BC_T; + +typedef struct zxdh_promisc_t { + ZXIC_UINT64 bitmap; + ZXIC_UINT32 rsv2; + ZXIC_UINT32 rsv1; + ZXIC_UINT32 pf_enable; + ZXIC_UINT32 hit_flag; +} ZXDH_PROMISC_T; + +typedef struct zxdh_vhca_t { + ZXIC_UINT32 rsv2; + ZXIC_UINT32 vqm_vfid; + ZXIC_UINT32 rsv1; + ZXIC_UINT32 valid; +} ZXDH_VHCA_T; + +typedef struct zxdh_network_attr_t { + ZXIC_UINT32 rsv; + ZXIC_UINT32 upf; + ZXIC_UINT32 sdn_dyn_sriov_cni; + ZXIC_UINT32 three_plane_aggr; + ZXIC_UINT32 single_pipe; + ZXIC_UINT32 hit_flag; +} ZXDH_NETWORK_ATTR_T; + +typedef struct ovs_attr_para_t { + ZXIC_UINT32 rsv1; + ZXIC_UINT32 uplink_vqm_vfid; + ZXIC_UINT32 rsv0; + ZXIC_UINT32 is_passthrough; +} OVS_ATTR_PARA_T; + +typedef struct upf_attr_para_t { + ZXIC_UINT32 offload_eio_vfw; + ZXIC_UINT32 offload_raw_vfw; + ZXIC_UINT32 offload_eion_lb; + ZXIC_UINT32 offload_raw_lb; + ZXIC_UINT32 offload_eio; + ZXIC_UINT32 offload_raw; + ZXIC_UINT32 normal; +} UPF_ATTR_PARA_T; +typedef struct zxdh_vport_traffic_attr_t { + union { + OVS_ATTR_PARA_T ovs_attr; + UPF_ATTR_PARA_T upf_attr; + } vport_traffic_attr; + ZXIC_UINT32 hit_flag; +} ZXDH_VPORT_TRAFFIC_ATTR_T; + +typedef struct zxdh_vqm_vfid_vlan_t { + ZXIC_UINT32 sriov_vlan_tci; + ZXIC_UINT32 sriov_vlan_tpid; + ZXIC_UINT32 sriov_business_vlan_tpid; + ZXIC_UINT32 rsv; + ZXIC_UINT32 sriov_business_vlan_strip_offload; + ZXIC_UINT32 sriov_business_qinq_vlan_strip_offload; + ZXIC_UINT32 sriov_business_vlan_filter; + ZXIC_UINT32 hit_flag; +} ZXDH_VQM_VFID_VLAN_T; + +typedef struct zxdh_fd_index_mng_t { + ZXIC_UINT32 vport; + ZXIC_UINT32 rsv; + ZXIC_UINT32 hit_flag; +} ZXDH_FD_INDEX_MNG_T; + +typedef struct zxdh_pkt_cap_kw_mode_t { + ZXIC_UINT64 rule2_key_word_off : 13; + ZXIC_UINT64 rsv4 : 3; + ZXIC_UINT64 rule2_key_word_len : 4; + ZXIC_UINT64 rsv3 : 12; + ZXIC_UINT64 rule1_key_word_off : 13; + ZXIC_UINT64 rsv2 : 3; + ZXIC_UINT64 rule1_key_word_len : 4; + ZXIC_UINT64 rsv1 : 11; + ZXIC_UINT64 hit_flag : 1; +} ZXDH_PKT_CAP_KW_MODE_T; + +typedef struct zxdh_stat_attr_t { + ZXIC_UINT32 valid; + ZXIC_UINT32 mode; + ZXIC_UINT32 addr_offset; + ZXIC_UINT32 depth; +} ZXDH_STAT_ATTR_T; + +/*************eram call back ****************/ +ZXIC_UINT32 dpp_apt_set_vxlan_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]); +ZXIC_UINT32 dpp_apt_get_vxlan_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]); + +ZXIC_UINT32 dpp_apt_set_vport_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]); +ZXIC_UINT32 dpp_apt_get_vport_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]); + +ZXIC_UINT32 dpp_apt_set_uplink_phy_port_data(ZXIC_VOID *pData, + ZXIC_UINT32 buff[4]); +ZXIC_UINT32 dpp_apt_get_uplink_phy_port_data(ZXIC_VOID *pData, + ZXIC_UINT32 buff[4]); + +ZXIC_UINT32 dpp_apt_set_dscp_to_up_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]); +ZXIC_UINT32 dpp_apt_get_dscp_to_up_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]); + +ZXIC_UINT32 dpp_apt_set_up_to_tc_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]); +ZXIC_UINT32 dpp_apt_get_up_to_tc_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]); + +ZXIC_UINT32 dpp_apt_set_rss_to_vqid_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]); +ZXIC_UINT32 dpp_apt_get_rss_to_vqid_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]); + +ZXIC_UINT32 dpp_apt_set_vlan_filter_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]); +ZXIC_UINT32 dpp_apt_get_vlan_filter_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]); + +ZXIC_UINT32 dpp_apt_set_lag_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]); +ZXIC_UINT32 dpp_apt_get_lag_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]); + +ZXIC_UINT32 dpp_apt_set_bc_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]); +ZXIC_UINT32 dpp_apt_get_bc_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]); + +ZXIC_UINT32 dpp_apt_set_promisc_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]); +ZXIC_UINT32 dpp_apt_get_promisc_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]); + +ZXIC_UINT32 dpp_apt_set_vhca_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]); +ZXIC_UINT32 dpp_apt_get_vhca_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]); + +ZXIC_UINT32 dpp_apt_set_network_attr_data(ZXIC_VOID *pData, + ZXIC_UINT32 buff[4]); +ZXIC_UINT32 dpp_apt_get_network_attr_data(ZXIC_VOID *pData, + ZXIC_UINT32 buff[4]); + +ZXIC_UINT32 dpp_apt_set_vport_traffic_attr_data(ZXIC_VOID *pData, + ZXIC_UINT32 buff[4]); +ZXIC_UINT32 dpp_apt_get_vport_traffic_attr_data(ZXIC_VOID *pData, + ZXIC_UINT32 buff[4]); + +ZXIC_UINT32 dpp_apt_set_vqm_vfid_vlan_data(ZXIC_VOID *pData, + ZXIC_UINT32 buff[4]); +ZXIC_UINT32 dpp_apt_get_vqm_vfid_vlan_data(ZXIC_VOID *pData, + ZXIC_UINT32 buff[4]); + +ZXIC_UINT32 dpp_apt_set_fd_index_mng(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]); +ZXIC_UINT32 dpp_apt_get_fd_index_mng(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]); + +ZXIC_UINT32 dpp_apt_set_cap_keyword_attr_data(ZXIC_VOID *pData, + ZXIC_UINT32 buff[4]); +ZXIC_UINT32 dpp_apt_get_cap_keyword_attr_data(ZXIC_VOID *pData, + ZXIC_UINT32 buff[4]); + +ZXIC_UINT32 dpp_apt_set_stat_attr_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]); +ZXIC_UINT32 dpp_apt_get_stat_attr_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]); + +SE_APT_ERAM_CONVERT_T *se_eram_callback_get(ZXIC_UINT32 sdt_no); + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/driver/include/dpp_drv_hash.h b/drivers/net/ethernet/dinghai/en_np/driver/include/dpp_drv_hash.h new file mode 100644 index 000000000000..69452bed3ad9 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/driver/include/dpp_drv_hash.h @@ -0,0 +1,110 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_drv_hash.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2014/01/27 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ +#ifndef DPP_DRV_HASH_H +#define DPP_DRV_HASH_H + +#include "zxic_common.h" +#include "dpp_apt_se_api.h" + +/* hash-function for hash tbl */ + +/* L2 forward */ +typedef struct zxdh_l2_fwd_key { + ZXIC_UINT16 sriov_vlan_id /* : 16; */; + ZXIC_UINT16 sriov_vlan_tpid /* : 16; */; + ZXIC_UINT8 dmac_addr[6] /* : 48; */; +} ZXDH_L2_FWD_KEY; + +typedef struct zxdh_l2_fwd_entry { + ZXIC_UINT32 vqm_vfid /* : 11;*/; + ZXIC_UINT32 rsv /* : 20; */; + ZXIC_UINT32 hit_flag /* : 1; */; +} ZXDH_L2_FWD_ENTRY; + +typedef struct zxdh_l2_fwd_t { + ZXDH_L2_FWD_KEY key; + ZXDH_L2_FWD_ENTRY entry; +} ZXDH_L2_ENTRY_T; + +/* multicast */ +typedef struct zxdh_mc_key { + ZXIC_UINT8 mc_mac[6]; + ZXIC_UINT32 group_id; + ZXIC_UINT32 rsv; +} ZXDH_MC_KEY; + +typedef struct zxdh_mc_entry { + ZXIC_UINT64 mc_bitmap; + ZXIC_UINT32 rsv2; + ZXIC_UINT32 rsv1; + ZXIC_UINT32 mc_pf_enable; + ZXIC_UINT32 hit_flag; +} ZXDH_MC_ENTRY; + +typedef struct zxdh_mc_t { + ZXDH_MC_KEY key; + ZXDH_MC_ENTRY entry; +} ZXDH_MC_T; + +typedef struct zxdh_rdma_trans_key { + ZXIC_UINT8 mac_addr[6]; /**< @brief key */ + ZXIC_UINT16 rsv /* : 16; */; +} ZXDH_RDMA_TRANS_KEY; + +typedef struct zxdh_rdma_trans_entry { + ZXIC_UINT32 rdma_vhca_id /* : 10;*/; + ZXIC_UINT32 rsv /* : 21; */; + ZXIC_UINT32 hit_flag /* : 1; */; +} ZXDH_RDMA_TRANS_ENTRY; + +typedef struct zxdh_rdma_trans_t { + ZXDH_RDMA_TRANS_KEY key; + ZXDH_RDMA_TRANS_ENTRY entry; +} ZXDH_RDMA_TRANS_T; + +ZXIC_UINT32 dpp_apt_set_l2entry_data(ZXIC_VOID *pData, DPP_HASH_ENTRY *pEntry); +ZXIC_UINT32 dpp_apt_get_l2entry_data(ZXIC_VOID *pData, DPP_HASH_ENTRY *pEntry); + +ZXIC_UINT32 dpp_apt_set_mc_data(ZXIC_VOID *pData, DPP_HASH_ENTRY *pEntry); +ZXIC_UINT32 dpp_apt_get_mc_data(ZXIC_VOID *pData, DPP_HASH_ENTRY *pEntry); + +ZXIC_UINT32 dpp_apt_set_rdma_trans_data(ZXIC_VOID *pData, + DPP_HASH_ENTRY *pEntry); +ZXIC_UINT32 dpp_apt_get_rdma_trans_data(ZXIC_VOID *pData, + DPP_HASH_ENTRY *pEntry); + +typedef struct dpp_hash_init_t { + ZXIC_UINT32 func_num; + DPP_APT_HASH_FUNC_RES_T *func; + ZXIC_UINT32 bulk_num; + DPP_APT_HASH_BULK_RES_T *bulk; + ZXIC_UINT32 ser_num; + DPP_APT_HASH_TABLE_T *ser; +} DPP_HASH_INIT_T; + +DPP_STATUS dpp_apt_dtb_hash_table_unicast_mac_dump( + DPP_DEV_T *dev, ZXIC_UINT32 queue_id, ZXIC_UINT32 sdt_no, + ZXDH_L2_ENTRY_T *pHashDataArr, ZXIC_UINT32 *p_entry_num); +DPP_STATUS dpp_apt_dtb_hash_table_multicast_mac_dump(DPP_DEV_T *dev, + ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, + ZXDH_MC_T *pHashDataArr, + ZXIC_UINT32 *p_entry_num); +SE_APT_HASH_CONVERT_T *se_hash_callback_get(ZXIC_UINT32 sdt_no); +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/driver/include/dpp_drv_init.h b/drivers/net/ethernet/dinghai/en_np/driver/include/dpp_drv_init.h new file mode 100644 index 000000000000..cdd15d47d9da --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/driver/include/dpp_drv_init.h @@ -0,0 +1,32 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_drv_init.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2014/01/27 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef DPP_DRV_INIT_H +#define DPP_DRV_INIT_H + +#include "zxic_common.h" +#include "dpp_type_api.h" +#include "dpp_dev.h" + +DPP_STATUS dpp_flow_init(DPP_DEV_T *dev); +DPP_STATUS dpp_flow_uninit(DPP_DEV_T *dev); +ZXIC_VOID dpp_flow_init_status_init(ZXIC_VOID); +DPP_STATUS dpp_flow_data_all_flush(DPP_DEV_T *dev, ZXIC_UINT32 queue_id); +DPP_STATUS dpp_bar_msg_num_init(DPP_DEV_T *dev); +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/driver/include/dpp_drv_sdt.h b/drivers/net/ethernet/dinghai/en_np/driver/include/dpp_drv_sdt.h new file mode 100644 index 000000000000..2c8411a5cd55 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/driver/include/dpp_drv_sdt.h @@ -0,0 +1,73 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_drv_sdt.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2014/01/27 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ +#ifndef DPP_DRV_SDT_H +#define DPP_DRV_SDT_H + +#include "zxic_common.h" + +/*************SDT配置***************************/ +/* eram直接表 */ +#define ZXDH_SDT_VXLAN_ATTR_TABLE ((ZXIC_UINT32)(0)) +#define ZXDH_SDT_SRIOV_VPORT_ATTR_TABLE ((ZXIC_UINT32)(1)) +#define ZXDH_SDT_UPLINK_PHY_PORT_ATTR_TABLE ((ZXIC_UINT32)(2)) +#define ZXDH_SDT_RSS_TO_VQID_TABLE ((ZXIC_UINT32)(3)) +#define ZXDH_SDT_VLAN_FILTER_TABLE ((ZXIC_UINT32)(4)) +#define ZXDH_SDT_LAG_TABLE ((ZXIC_UINT32)(5)) +#define ZXDH_SDT_BC_TABLE ((ZXIC_UINT32)(6)) +#define ZXDH_SDT_DSCP_TO_UP_TABLE ((ZXIC_UINT32)(7)) +#define ZXDH_SDT_UP_TO_TC_TABLE ((ZXIC_UINT32)(8)) +#define ZXDH_SDT_UC_PROMISC_TABLE ((ZXIC_UINT32)(10)) +#define ZXDH_SDT_MC_PROMISC_TABLE ((ZXIC_UINT32)(11)) +#define ZXDH_SDT_FLOW_ID_TABLE ((ZXIC_UINT32)(12)) +#define ZXDH_SDT_MAINTAIN_TABLE ((ZXIC_UINT32)(13)) +#define ZXDH_SDT_NETWORK_ATTR_TABLE ((ZXIC_UINT32)(14)) +#define ZXDH_SDT_VPORT_TRAFFIC_ATTR_TABLE ((ZXIC_UINT32)(15)) +#define ZXDH_SDT_VQM_VFID_VLAN_ATTR_TABLE ((ZXIC_UINT32)(16)) +#define ZXDH_SDT_CAP_KEYWORD_ATTR_TABLE ((ZXIC_UINT32)(17)) +#define ZXDH_SDT_MAINTAIN_DIAG_TABLE ((ZXIC_UINT32)(18)) +#define ZXDH_SDT_PSN_ARN_ATTR_TABLE ((ZXIC_UINT32)(19)) +#define ZXDH_SDT_STAT_ATTR_TABLE ((ZXIC_UINT32)(20)) +#define ZXDH_SDT_TUNNEL_ENCAP0_TABLE ((ZXIC_UINT32)(28)) +#define ZXDH_SDT_TUNNEL_ENCAP1_TABLE ((ZXIC_UINT32)(29)) +#define ZXDH_SDT_ACL_INDEX_MNG_TABLE ((ZXIC_UINT32)(30)) +#define ZXDH_SDT_VHCA_TABLE ((ZXIC_UINT32)(50)) + +/* hash表 */ +#define ZXDH_SDT_L2_ENTRY_TABLE_PHYPORT0 ((ZXIC_UINT32)(64)) +#define ZXDH_SDT_L2_ENTRY_TABLE_PHYPORT1 ((ZXIC_UINT32)(65)) +#define ZXDH_SDT_L2_ENTRY_TABLE_PHYPORT2 ((ZXIC_UINT32)(66)) +#define ZXDH_SDT_L2_ENTRY_TABLE_PHYPORT3 ((ZXIC_UINT32)(67)) +#define ZXDH_SDT_L2_ENTRY_TABLE_PHYPORT4 ((ZXIC_UINT32)(68)) +#define ZXDH_SDT_L2_ENTRY_TABLE_PHYPORT5 ((ZXIC_UINT32)(69)) + +#define ZXDH_SDT_MC_TABLE_PHYPORT0 ((ZXIC_UINT32)(76)) +#define ZXDH_SDT_MC_TABLE_PHYPORT1 ((ZXIC_UINT32)(77)) +#define ZXDH_SDT_MC_TABLE_PHYPORT2 ((ZXIC_UINT32)(78)) +#define ZXDH_SDT_MC_TABLE_PHYPORT3 ((ZXIC_UINT32)(79)) +#define ZXDH_SDT_MC_TABLE_PHYPORT4 ((ZXIC_UINT32)(80)) +#define ZXDH_SDT_MC_TABLE_PHYPORT5 ((ZXIC_UINT32)(81)) +#define ZXDH_SDT_LOGIC_GROUP_TABLE ((ZXIC_UINT32)(87)) +#define ZXDH_SDT_RDMA_ENTRY_TABLE ((ZXIC_UINT32)(90)) + +/* etcam表 */ +#define ZXDH_SDT_FD_CFG_TABLE ((ZXIC_UINT32)(130)) +#define ZXDH_SDT_IPSEC_ENC_TABLE ((ZXIC_UINT32)(131)) +#define ZXDH_SDT_CAPTURE_PKT_TABLE ((ZXIC_UINT32)(132)) + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/driver/source/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/driver/source/Kbuild.include new file mode 100644 index 000000000000..da11be2ad94b --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/driver/source/Kbuild.include @@ -0,0 +1,2 @@ +cur_dir := en_np/driver/source/ +src_files += $(addprefix $(cur_dir),$(notdir $(wildcard $(dinghai_root)/$(cur_dir)*.c))) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/driver/source/dpp_drv_acl.c b/drivers/net/ethernet/dinghai/en_np/driver/source/dpp_drv_acl.c new file mode 100644 index 000000000000..d6594525d95f --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/driver/source/dpp_drv_acl.c @@ -0,0 +1,477 @@ +#include "dpp_apt_se_api.h" +#include "dpp_se_api.h" +#include "dpp_se_diag.h" +#include "dpp_drv_sdt.h" +#include "dpp_drv_acl.h" + +static SE_APT_ACL_CONVERT_T g_se_acl_callback[] = { + { + ZXDH_SDT_IPSEC_ENC_TABLE, + dpp_apt_set_ipsec_enc_data, /** <@brief 结构体转换为码流 */ + dpp_apt_get_ipsec_enc_data, /** <@brief 码流转换为结构体 */ + }, + { + // res表 + ZXDH_SDT_CAPTURE_PKT_TABLE, + dpp_apt_set_pkt_cap_data, /** <@brief 结构体转换为码流 */ + dpp_apt_get_pkt_cap_data, /** <@brief 码流转换为结构体 */ + }, +}; + +SE_APT_ACL_CONVERT_T *se_acl_callback_get(ZXIC_UINT32 sdt_no) +{ + ZXIC_UINT32 index = 0; + ZXIC_UINT32 num = 0; + num = sizeof(g_se_acl_callback) / sizeof(SE_APT_ACL_CONVERT_T); + for (index = 0; index < num; index++) { + if (g_se_acl_callback[index].sdt_no == sdt_no) { + return &g_se_acl_callback[index]; + } + } + return NULL; +} + +ZXIC_UINT32 dpp_apt_set_ipsec_enc_data(ZXIC_VOID *pData, + DPP_ACL_ENTRY_EX_T *aclEntry) +{ + ZXIC_UINT32 key_data = 0; + ZXIC_UINT32 key_mask = 0; + ZXIC_UINT32 rst = 0; + + ZXDH_IPSEC_ENC_T *ipsec_enc_table = pData; + + ZXIC_COMM_CHECK_POINT(aclEntry); + ZXIC_COMM_CHECK_POINT(ipsec_enc_table); + + aclEntry->pri = ipsec_enc_table->index; + + if (aclEntry->key_data) { + ZXIC_COMM_UINT32_WRITE_BITS(key_data, ipsec_enc_table->key.rsv1, + 0, 32); + zxic_comm_swap((ZXIC_UINT8 *)&key_data, sizeof(ZXIC_UINT32)); + ZXIC_COMM_MEMCPY(aclEntry->key_data, &key_data, + sizeof(ZXIC_UINT32)); + + ZXIC_COMM_UINT32_WRITE_BITS(key_data, ipsec_enc_table->key.rsv2, + 0, 32); + zxic_comm_swap((ZXIC_UINT8 *)&key_data, sizeof(ZXIC_UINT32)); + ZXIC_COMM_MEMCPY(aclEntry->key_data + 4, &key_data, + sizeof(ZXIC_UINT32)); + + ZXIC_COMM_MEMCPY(aclEntry->key_data + 8, + ipsec_enc_table->key.sip, 16); + ZXIC_COMM_MEMCPY(aclEntry->key_data + 24, + ipsec_enc_table->key.dip, 16); + } + + if (aclEntry->key_mask) { + ZXIC_COMM_UINT32_WRITE_BITS(key_mask, + ipsec_enc_table->mask.rsv1, 0, 32); + zxic_comm_swap((ZXIC_UINT8 *)&key_mask, sizeof(ZXIC_UINT32)); + ZXIC_COMM_MEMCPY(aclEntry->key_mask, &key_mask, + sizeof(ZXIC_UINT32)); + + ZXIC_COMM_UINT32_WRITE_BITS(key_mask, + ipsec_enc_table->mask.rsv2, 0, 32); + zxic_comm_swap((ZXIC_UINT8 *)&key_mask, sizeof(ZXIC_UINT32)); + ZXIC_COMM_MEMCPY(aclEntry->key_mask + 4, &key_mask, + sizeof(ZXIC_UINT32)); + + ZXIC_COMM_MEMCPY(aclEntry->key_mask + 8, + ipsec_enc_table->mask.sip, 16); + ZXIC_COMM_MEMCPY(aclEntry->key_mask + 24, + ipsec_enc_table->mask.dip, 16); + } + + if (aclEntry->p_as_rslt) { + ZXIC_COMM_UINT32_WRITE_BITS( + rst, ipsec_enc_table->entry.hit_flag, 31, 1); + ZXIC_COMM_UINT32_WRITE_BITS(rst, ipsec_enc_table->entry.rsv, 0, + 31); + zxic_comm_swap((ZXIC_UINT8 *)&rst, sizeof(ZXIC_UINT32)); + ZXIC_COMM_MEMCPY(aclEntry->p_as_rslt, &rst, + sizeof(ZXIC_UINT32)); + + ZXIC_COMM_UINT32_WRITE_BITS(rst, ipsec_enc_table->entry.sa_id, + 0, 32); + zxic_comm_swap((ZXIC_UINT8 *)&rst, sizeof(ZXIC_UINT32)); + ZXIC_COMM_MEMCPY(aclEntry->p_as_rslt + 4, &rst, + sizeof(ZXIC_UINT32)); + } + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_get_ipsec_enc_data(ZXIC_VOID *pData, + DPP_ACL_ENTRY_EX_T *aclEntry) +{ + ZXDH_IPSEC_ENC_T *ipsec_enc_table = pData; + + ZXIC_COMM_CHECK_POINT(aclEntry); + ZXIC_COMM_CHECK_POINT(ipsec_enc_table); + + ipsec_enc_table->index = aclEntry->pri; + + if (aclEntry->key_data) { + zxic_comm_swap(aclEntry->key_data, sizeof(ZXIC_UINT32)); + ZXIC_COMM_UINT32_GET_BITS(ipsec_enc_table->key.rsv1, + *(ZXIC_UINT32 *)(aclEntry->key_data), + 0, 32); + + zxic_comm_swap(aclEntry->key_data + 4, sizeof(ZXIC_UINT32)); + ZXIC_COMM_UINT32_GET_BITS( + ipsec_enc_table->key.rsv2, + *(ZXIC_UINT32 *)(aclEntry->key_data + 4), 0, 32); + + ZXIC_COMM_MEMCPY(ipsec_enc_table->key.sip, + aclEntry->key_data + 8, 16); + ZXIC_COMM_MEMCPY(ipsec_enc_table->key.dip, + aclEntry->key_data + 24, 16); + } + + if (aclEntry->key_mask) { + zxic_comm_swap(aclEntry->key_mask, sizeof(ZXIC_UINT32)); + ZXIC_COMM_UINT32_GET_BITS(ipsec_enc_table->mask.rsv1, + *(ZXIC_UINT32 *)(aclEntry->key_mask), + 0, 32); + + zxic_comm_swap(aclEntry->key_mask + 4, sizeof(ZXIC_UINT32)); + ZXIC_COMM_UINT32_GET_BITS( + ipsec_enc_table->mask.rsv2, + *(ZXIC_UINT32 *)(aclEntry->key_mask + 4), 0, 32); + + ZXIC_COMM_MEMCPY(ipsec_enc_table->mask.sip, + aclEntry->key_mask + 8, 16); + ZXIC_COMM_MEMCPY(ipsec_enc_table->mask.dip, + aclEntry->key_mask + 24, 16); + } + + if (aclEntry->p_as_rslt) { + zxic_comm_swap(aclEntry->p_as_rslt, sizeof(ZXIC_UINT32)); + ZXIC_COMM_UINT32_GET_BITS(ipsec_enc_table->entry.hit_flag, + *(ZXIC_UINT32 *)(aclEntry->p_as_rslt), + 31, 1); + ZXIC_COMM_UINT32_GET_BITS(ipsec_enc_table->entry.rsv, + *(ZXIC_UINT32 *)(aclEntry->p_as_rslt), + 0, 31); + + zxic_comm_swap(aclEntry->p_as_rslt + 4, sizeof(ZXIC_UINT32)); + ZXIC_COMM_UINT32_GET_BITS( + ipsec_enc_table->entry.sa_id, + *(ZXIC_UINT32 *)(aclEntry->p_as_rslt + 4), 0, 32); + } + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_set_pkt_cap_data(ZXIC_VOID *pData, + DPP_ACL_ENTRY_EX_T *aclEntry) +{ + ZXIC_UINT32 key_data = 0; + ZXIC_UINT32 key_mask = 0; + ZXIC_UINT32 rst = 0; + + ZXDH_PKT_CAP_T *pkt_cap_table = pData; + + ZXIC_COMM_CHECK_POINT(aclEntry); + ZXIC_COMM_CHECK_POINT(pkt_cap_table); + + aclEntry->pri = pkt_cap_table->index; + + if (aclEntry->key_data) { + ZXIC_COMM_UINT32_WRITE_BITS(key_data, pkt_cap_table->key.rsv, + 31, 1); + ZXIC_COMM_UINT32_WRITE_BITS( + key_data, pkt_cap_table->key.capture_pkt_flag, 28, 3); + ZXIC_COMM_UINT32_WRITE_BITS(key_data, + pkt_cap_table->key.panel_id, 24, 4); + ZXIC_COMM_UINT32_WRITE_BITS(key_data, + pkt_cap_table->key.protocol, 16, 8); + ZXIC_COMM_UINT32_WRITE_BITS(key_data, + pkt_cap_table->key.vqm_vfid, 0, 16); + zxic_comm_swap((ZXIC_UINT8 *)&key_data, sizeof(ZXIC_UINT32)); + ZXIC_COMM_MEMCPY(aclEntry->key_data, &key_data, + sizeof(ZXIC_UINT32)); + + ZXIC_COMM_UINT32_WRITE_BITS(key_data, + pkt_cap_table->key.vhca_id, 16, 16); + ZXIC_COMM_UINT32_WRITE_BITS(key_data, + pkt_cap_table->key.ethtype, 0, 16); + zxic_comm_swap((ZXIC_UINT8 *)&(key_data), sizeof(ZXIC_UINT32)); + ZXIC_COMM_MEMCPY(aclEntry->key_data + 4, &key_data, + sizeof(ZXIC_UINT32)); + + ZXIC_COMM_MEMCPY(aclEntry->key_data + 8, + pkt_cap_table->key.dmac, 6); + ZXIC_COMM_MEMCPY(aclEntry->key_data + 14, + pkt_cap_table->key.smac, 6); + ZXIC_COMM_MEMCPY(aclEntry->key_data + 20, + pkt_cap_table->key.sip, 16); + ZXIC_COMM_MEMCPY(aclEntry->key_data + 36, + pkt_cap_table->key.dip, 16); + + ZXIC_COMM_UINT32_WRITE_BITS(key_data, pkt_cap_table->key.sport, + 16, 16); + ZXIC_COMM_UINT32_WRITE_BITS(key_data, pkt_cap_table->key.dport, + 0, 16); + zxic_comm_swap((ZXIC_UINT8 *)&key_data, sizeof(ZXIC_UINT32)); + ZXIC_COMM_MEMCPY(aclEntry->key_data + 52, &key_data, + sizeof(ZXIC_UINT32)); + + ZXIC_COMM_UINT32_WRITE_BITS(key_data, pkt_cap_table->key.qp, 8, + 24); + ZXIC_COMM_UINT32_WRITE_BITS(key_data, pkt_cap_table->key.rsv, 0, + 8); + zxic_comm_swap((ZXIC_UINT8 *)&key_data, sizeof(ZXIC_UINT32)); + ZXIC_COMM_MEMCPY(aclEntry->key_data + 56, &key_data, + sizeof(ZXIC_UINT32)); + + ZXIC_COMM_UINT32_WRITE_BITS(key_data, pkt_cap_table->key.rsv, + 20, 12); + ZXIC_COMM_UINT32_WRITE_BITS( + key_data, pkt_cap_table->key.key_word_len, 16, 4); + ZXIC_COMM_UINT32_WRITE_BITS(key_data, pkt_cap_table->key.rsv, + 13, 3); + ZXIC_COMM_UINT32_WRITE_BITS( + key_data, pkt_cap_table->key.key_word_off, 0, 13); + zxic_comm_swap((ZXIC_UINT8 *)&key_data, sizeof(ZXIC_UINT32)); + ZXIC_COMM_MEMCPY(aclEntry->key_data + 60, &key_data, + sizeof(ZXIC_UINT32)); + + ZXIC_COMM_MEMCPY(aclEntry->key_data + 64, + pkt_cap_table->key.key_word, 15); + + key_data = pkt_cap_table->key.rsv; + ZXIC_COMM_MEMCPY(aclEntry->key_data + 79, &key_data, 1); + } + + if (aclEntry->key_mask) { + ZXIC_COMM_UINT32_WRITE_BITS( + key_mask, pkt_cap_table->mask.rsv_mask, 31, 1); + ZXIC_COMM_UINT32_WRITE_BITS( + key_mask, pkt_cap_table->mask.capture_pkt_flag_mask, 28, + 3); + ZXIC_COMM_UINT32_WRITE_BITS( + key_mask, pkt_cap_table->mask.panel_id_mask, 24, 4); + ZXIC_COMM_UINT32_WRITE_BITS( + key_mask, pkt_cap_table->mask.protocol_mask, 16, 8); + ZXIC_COMM_UINT32_WRITE_BITS( + key_mask, pkt_cap_table->mask.vqm_vfid_mask, 0, 16); + zxic_comm_swap((ZXIC_UINT8 *)&key_mask, sizeof(ZXIC_UINT32)); + ZXIC_COMM_MEMCPY(aclEntry->key_mask, &key_mask, + sizeof(ZXIC_UINT32)); + + ZXIC_COMM_UINT32_WRITE_BITS( + key_mask, pkt_cap_table->mask.vhca_id_mask, 16, 16); + ZXIC_COMM_UINT32_WRITE_BITS( + key_mask, pkt_cap_table->mask.ethtype_mask, 0, 16); + zxic_comm_swap((ZXIC_UINT8 *)&(key_mask), sizeof(ZXIC_UINT32)); + ZXIC_COMM_MEMCPY(aclEntry->key_mask + 4, &key_mask, + sizeof(ZXIC_UINT32)); + + ZXIC_COMM_MEMCPY(aclEntry->key_mask + 8, + pkt_cap_table->mask.dmac_mask, 6); + ZXIC_COMM_MEMCPY(aclEntry->key_mask + 14, + pkt_cap_table->mask.smac_mask, 6); + ZXIC_COMM_MEMCPY(aclEntry->key_mask + 20, + pkt_cap_table->mask.sip_mask, 16); + ZXIC_COMM_MEMCPY(aclEntry->key_mask + 36, + pkt_cap_table->mask.dip_mask, 16); + + ZXIC_COMM_UINT32_WRITE_BITS( + key_mask, pkt_cap_table->mask.sport_mask, 16, 16); + ZXIC_COMM_UINT32_WRITE_BITS( + key_mask, pkt_cap_table->mask.dport_mask, 0, 16); + zxic_comm_swap((ZXIC_UINT8 *)&(key_mask), sizeof(ZXIC_UINT32)); + ZXIC_COMM_MEMCPY(aclEntry->key_mask + 52, &key_mask, + sizeof(ZXIC_UINT32)); + + ZXIC_COMM_UINT32_WRITE_BITS(key_mask, + pkt_cap_table->mask.qp_mask, 8, 24); + ZXIC_COMM_UINT32_WRITE_BITS(key_mask, + pkt_cap_table->mask.rsv_mask, 0, 8); + zxic_comm_swap((ZXIC_UINT8 *)&key_mask, sizeof(ZXIC_UINT32)); + ZXIC_COMM_MEMCPY(aclEntry->key_mask + 56, &key_mask, + sizeof(ZXIC_UINT32)); + + ZXIC_COMM_UINT32_WRITE_BITS( + key_mask, pkt_cap_table->mask.rsv_mask, 20, 12); + ZXIC_COMM_UINT32_WRITE_BITS( + key_mask, pkt_cap_table->mask.key_word_len_mask, 16, 4); + ZXIC_COMM_UINT32_WRITE_BITS( + key_mask, pkt_cap_table->mask.rsv_mask, 13, 3); + ZXIC_COMM_UINT32_WRITE_BITS( + key_mask, pkt_cap_table->mask.key_word_off_mask, 0, 13); + zxic_comm_swap((ZXIC_UINT8 *)&key_mask, sizeof(ZXIC_UINT32)); + ZXIC_COMM_MEMCPY(aclEntry->key_mask + 60, &key_mask, + sizeof(ZXIC_UINT32)); + + ZXIC_COMM_MEMCPY(aclEntry->key_mask + 64, + pkt_cap_table->mask.key_word_mask, 15); + + key_mask = pkt_cap_table->mask.rsv_mask; + ZXIC_COMM_MEMCPY(aclEntry->key_mask + 79, &key_mask, 1); + } + + if (aclEntry->p_as_rslt) { + ZXIC_COMM_UINT32_WRITE_BITS(rst, pkt_cap_table->entry.hit_flag, + 31, 1); + ZXIC_COMM_UINT32_WRITE_BITS( + rst, pkt_cap_table->entry.value_flag, 30, 1); + ZXIC_COMM_UINT32_WRITE_BITS(rst, 0, 24, 6); + ZXIC_COMM_UINT32_WRITE_BITS(rst, pkt_cap_table->entry.index, 16, + 8); + ZXIC_COMM_UINT32_WRITE_BITS(rst, pkt_cap_table->entry.vqm_vfid, + 0, 16); + ZXIC_COMM_MEMCPY(aclEntry->p_as_rslt, &rst, + sizeof(ZXIC_UINT32)); + + rst = 0; + ZXIC_COMM_MEMCPY(aclEntry->p_as_rslt + 4, &rst, + sizeof(ZXIC_UINT32)); + } + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_get_pkt_cap_data(ZXIC_VOID *pData, + DPP_ACL_ENTRY_EX_T *aclEntry) +{ + ZXDH_PKT_CAP_T *pkt_cap_table = pData; + + ZXIC_COMM_CHECK_POINT(aclEntry); + ZXIC_COMM_CHECK_POINT(pkt_cap_table); + + pkt_cap_table->index = aclEntry->pri; + + if (aclEntry->key_data) { + zxic_comm_swap(aclEntry->key_data, sizeof(ZXIC_UINT32)); + ZXIC_COMM_UINT32_GET_BITS(pkt_cap_table->key.capture_pkt_flag, + *(ZXIC_UINT32 *)(aclEntry->key_data), + 28, 3); + ZXIC_COMM_UINT32_GET_BITS(pkt_cap_table->key.panel_id, + *(ZXIC_UINT32 *)(aclEntry->key_data), + 24, 4); + ZXIC_COMM_UINT32_GET_BITS(pkt_cap_table->key.protocol, + *(ZXIC_UINT32 *)(aclEntry->key_data), + 16, 8); + ZXIC_COMM_UINT32_GET_BITS(pkt_cap_table->key.vqm_vfid, + *(ZXIC_UINT32 *)(aclEntry->key_data), + 0, 16); + + zxic_comm_swap(aclEntry->key_data + 4, sizeof(ZXIC_UINT32)); + ZXIC_COMM_UINT32_GET_BITS( + pkt_cap_table->key.vhca_id, + *(ZXIC_UINT32 *)(aclEntry->key_data + 4), 16, 16); + ZXIC_COMM_UINT32_GET_BITS( + pkt_cap_table->key.ethtype, + *(ZXIC_UINT32 *)(aclEntry->key_data + 4), 0, 16); + + ZXIC_COMM_MEMCPY(pkt_cap_table->key.dmac, + aclEntry->key_data + 8, 6); + ZXIC_COMM_MEMCPY(pkt_cap_table->key.smac, + aclEntry->key_data + 14, 6); + ZXIC_COMM_MEMCPY(pkt_cap_table->key.sip, + aclEntry->key_data + 20, 16); + ZXIC_COMM_MEMCPY(pkt_cap_table->key.dip, + aclEntry->key_data + 36, 16); + + zxic_comm_swap(aclEntry->key_data + 52, sizeof(ZXIC_UINT32)); + ZXIC_COMM_UINT32_GET_BITS( + pkt_cap_table->key.sport, + *(ZXIC_UINT32 *)(aclEntry->key_data + 52), 16, 16); + ZXIC_COMM_UINT32_GET_BITS( + pkt_cap_table->key.dport, + *(ZXIC_UINT32 *)(aclEntry->key_data + 52), 0, 16); + + zxic_comm_swap(aclEntry->key_data + 56, sizeof(ZXIC_UINT32)); + ZXIC_COMM_UINT32_GET_BITS( + pkt_cap_table->key.qp, + *(ZXIC_UINT32 *)(aclEntry->key_data + 56), 8, 24); + + zxic_comm_swap(aclEntry->key_data + 60, sizeof(ZXIC_UINT32)); + ZXIC_COMM_UINT32_GET_BITS( + pkt_cap_table->key.key_word_len, + *(ZXIC_UINT32 *)(aclEntry->key_data + 60), 16, 4); + ZXIC_COMM_UINT32_GET_BITS( + pkt_cap_table->key.key_word_off, + *(ZXIC_UINT32 *)(aclEntry->key_data + 60), 0, 13); + + ZXIC_COMM_MEMCPY(pkt_cap_table->key.key_word, + aclEntry->key_data + 64, 15); + } + + if (aclEntry->key_mask) { + zxic_comm_swap(aclEntry->key_mask, sizeof(ZXIC_UINT32)); + ZXIC_COMM_UINT32_GET_BITS( + pkt_cap_table->mask.capture_pkt_flag_mask, + *(ZXIC_UINT32 *)(aclEntry->key_mask), 28, 3); + ZXIC_COMM_UINT32_GET_BITS(pkt_cap_table->mask.panel_id_mask, + *(ZXIC_UINT32 *)(aclEntry->key_mask), + 24, 4); + ZXIC_COMM_UINT32_GET_BITS(pkt_cap_table->mask.protocol_mask, + *(ZXIC_UINT32 *)(aclEntry->key_mask), + 16, 8); + ZXIC_COMM_UINT32_GET_BITS(pkt_cap_table->mask.vqm_vfid_mask, + *(ZXIC_UINT32 *)(aclEntry->key_mask), + 0, 16); + + zxic_comm_swap(aclEntry->key_mask + 4, sizeof(ZXIC_UINT32)); + ZXIC_COMM_UINT32_GET_BITS( + pkt_cap_table->mask.vhca_id_mask, + *(ZXIC_UINT32 *)(aclEntry->key_mask + 4), 16, 16); + ZXIC_COMM_UINT32_GET_BITS( + pkt_cap_table->mask.ethtype_mask, + *(ZXIC_UINT32 *)(aclEntry->key_mask + 4), 0, 16); + + ZXIC_COMM_MEMCPY(pkt_cap_table->mask.dmac_mask, + aclEntry->key_mask + 8, 6); + ZXIC_COMM_MEMCPY(pkt_cap_table->mask.smac_mask, + aclEntry->key_mask + 14, 6); + ZXIC_COMM_MEMCPY(pkt_cap_table->mask.sip_mask, + aclEntry->key_mask + 20, 16); + ZXIC_COMM_MEMCPY(pkt_cap_table->mask.dip_mask, + aclEntry->key_mask + 36, 16); + + zxic_comm_swap(aclEntry->key_mask + 52, sizeof(ZXIC_UINT32)); + ZXIC_COMM_UINT32_GET_BITS( + pkt_cap_table->mask.sport_mask, + *(ZXIC_UINT32 *)(aclEntry->key_mask + 52), 16, 16); + ZXIC_COMM_UINT32_GET_BITS( + pkt_cap_table->mask.dport_mask, + *(ZXIC_UINT32 *)(aclEntry->key_mask + 52), 0, 16); + + zxic_comm_swap(aclEntry->key_mask + 56, sizeof(ZXIC_UINT32)); + ZXIC_COMM_UINT32_GET_BITS( + pkt_cap_table->mask.qp_mask, + *(ZXIC_UINT32 *)(aclEntry->key_mask + 56), 8, 24); + + zxic_comm_swap(aclEntry->key_mask + 60, sizeof(ZXIC_UINT32)); + ZXIC_COMM_UINT32_GET_BITS( + pkt_cap_table->mask.key_word_len_mask, + *(ZXIC_UINT32 *)(aclEntry->key_mask + 60), 16, 4); + ZXIC_COMM_UINT32_GET_BITS( + pkt_cap_table->mask.key_word_off_mask, + *(ZXIC_UINT32 *)(aclEntry->key_mask + 60), 0, 13); + + ZXIC_COMM_MEMCPY(pkt_cap_table->mask.key_word_mask, + aclEntry->key_mask + 64, 15); + } + + if (aclEntry->p_as_rslt) { + ZXIC_COMM_UINT32_GET_BITS(pkt_cap_table->entry.hit_flag, + *(ZXIC_UINT32 *)(aclEntry->p_as_rslt), + 31, 1); + ZXIC_COMM_UINT32_GET_BITS(pkt_cap_table->entry.value_flag, + *(ZXIC_UINT32 *)(aclEntry->p_as_rslt), + 30, 1); + ZXIC_COMM_UINT32_GET_BITS(pkt_cap_table->entry.index, + *(ZXIC_UINT32 *)(aclEntry->p_as_rslt), + 16, 8); + ZXIC_COMM_UINT32_GET_BITS(pkt_cap_table->entry.vqm_vfid, + *(ZXIC_UINT32 *)(aclEntry->p_as_rslt), + 0, 16); + } + + return DPP_OK; +} diff --git a/drivers/net/ethernet/dinghai/en_np/driver/source/dpp_drv_eram.c b/drivers/net/ethernet/dinghai/en_np/driver/source/dpp_drv_eram.c new file mode 100644 index 000000000000..3e010e493f16 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/driver/source/dpp_drv_eram.c @@ -0,0 +1,825 @@ +#include "dpp_apt_se_api.h" +#include "dpp_se_api.h" +#include "dpp_drv_sdt.h" +#include "dpp_drv_eram.h" +#include "dpp_apt_se.h" + +static SE_APT_ERAM_CONVERT_T g_se_eram_callback[] = { + { ZXDH_SDT_VXLAN_ATTR_TABLE, dpp_apt_set_vxlan_data, + dpp_apt_get_vxlan_data }, + { ZXDH_SDT_SRIOV_VPORT_ATTR_TABLE, dpp_apt_set_vport_data, + dpp_apt_get_vport_data }, + { ZXDH_SDT_UPLINK_PHY_PORT_ATTR_TABLE, dpp_apt_set_uplink_phy_port_data, + dpp_apt_get_uplink_phy_port_data }, + { ZXDH_SDT_RSS_TO_VQID_TABLE, dpp_apt_set_rss_to_vqid_data, + dpp_apt_get_rss_to_vqid_data }, + { ZXDH_SDT_VLAN_FILTER_TABLE, dpp_apt_set_vlan_filter_data, + dpp_apt_get_vlan_filter_data }, + { ZXDH_SDT_LAG_TABLE, dpp_apt_set_lag_data, dpp_apt_get_lag_data }, + { ZXDH_SDT_BC_TABLE, dpp_apt_set_bc_data, dpp_apt_get_bc_data }, + { ZXDH_SDT_DSCP_TO_UP_TABLE, dpp_apt_set_dscp_to_up_data, + dpp_apt_get_dscp_to_up_data }, + { ZXDH_SDT_UP_TO_TC_TABLE, dpp_apt_set_up_to_tc_data, + dpp_apt_get_up_to_tc_data }, + { ZXDH_SDT_ACL_INDEX_MNG_TABLE, dpp_apt_set_fd_index_mng, + dpp_apt_get_fd_index_mng }, + { ZXDH_SDT_VHCA_TABLE, dpp_apt_set_vhca_data, dpp_apt_get_vhca_data }, + { ZXDH_SDT_UC_PROMISC_TABLE, dpp_apt_set_promisc_data, + dpp_apt_get_promisc_data }, + { ZXDH_SDT_MC_PROMISC_TABLE, dpp_apt_set_promisc_data, + dpp_apt_get_promisc_data }, + { ZXDH_SDT_VQM_VFID_VLAN_ATTR_TABLE, dpp_apt_set_vqm_vfid_vlan_data, + dpp_apt_get_vqm_vfid_vlan_data }, + { ZXDH_SDT_CAP_KEYWORD_ATTR_TABLE, dpp_apt_set_cap_keyword_attr_data, + dpp_apt_get_cap_keyword_attr_data }, + { ZXDH_SDT_STAT_ATTR_TABLE, dpp_apt_set_stat_attr_data, + dpp_apt_get_stat_attr_data } +}; + +SE_APT_ERAM_CONVERT_T *se_eram_callback_get(ZXIC_UINT32 sdt_no) +{ + ZXIC_UINT32 index = 0; + ZXIC_UINT32 num = 0; + num = sizeof(g_se_eram_callback) / sizeof(SE_APT_ERAM_CONVERT_T); + for (index = 0; index < num; index++) { + if (g_se_eram_callback[index].sdt_no == sdt_no) { + return &g_se_eram_callback[index]; + } + } + return NULL; +} + +ZXIC_UINT32 dpp_apt_set_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4], + ZXIC_UINT32 size) +{ + if (pData == NULL) { + return DPP_ERR; + } + memcpy(buff, pData, size); + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_get_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4], + ZXIC_UINT32 size) +{ + if (pData == NULL) { + return DPP_ERR; + } + memcpy(pData, buff, size); + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_set_vxlan_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]) +{ + return dpp_apt_set_data(pData, buff, sizeof(ZXDH_VXLAN_T)); +} + +ZXIC_UINT32 dpp_apt_get_vxlan_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]) +{ + return dpp_apt_get_data(pData, buff, sizeof(ZXDH_VXLAN_T)); +} + +ZXIC_UINT32 dpp_apt_set_vport_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]) +{ + ZXDH_SRIOV_VPORT_T *port_attr = (ZXDH_SRIOV_VPORT_T *)pData; + + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], port_attr->hit_flag, 31, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], port_attr->mtu_offload_enable, 30, + 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], port_attr->rss_enable, 29, 1); + ZXIC_COMM_UINT32_WRITE_BITS( + buff[0], port_attr->sriov_business_vlan_offload_enable, 28, 1); + ZXIC_COMM_UINT32_WRITE_BITS( + buff[0], port_attr->sriov_vlan_offload_enable, 27, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], port_attr->promisc_enable, 26, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], port_attr->rdma_offload_enable, 25, + 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], port_attr->hw_bond_enable, 24, 1); + + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], port_attr->business_enable, 23, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], port_attr->is_up, 22, 1); + ZXIC_COMM_UINT32_WRITE_BITS( + buff[0], port_attr->outer_ip_checksum_offload, 21, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], port_attr->ip_checksum_offload, 20, + 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], + port_attr->tcp_udp_checksum_offload, 19, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], port_attr->ip_recombine_offload, + 18, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], port_attr->lro_offload, 17, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], + port_attr->accelerator_offload_flag, 16, 1); + + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], port_attr->virtio_enable, 15, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], port_attr->virtio_version, 13, 2); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], port_attr->is_vf, 12, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], port_attr->vepa_enable, 11, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], port_attr->lag_enable, 10, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], port_attr->fd_enable, 9, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], port_attr->inline_sec_offload, 8, + 1); + + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], port_attr->spoof_check_enable, 7, + 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], port_attr->rsv1, 6, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], port_attr->np_ingress_tm_enable, 5, + 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], port_attr->np_egress_tm_enable, 4, + 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], port_attr->np_ingress_meter_mode, + 3, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], port_attr->np_egress_meter_mode, 2, + 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], port_attr->np_ingress_meter_enable, + 1, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], port_attr->np_egress_meter_enable, + 0, 1); + + ZXIC_COMM_UINT32_WRITE_BITS(buff[1], port_attr->rsv2, 31, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[1], port_attr->hash_search_index, 28, + 3); + ZXIC_COMM_UINT32_WRITE_BITS(buff[1], port_attr->port_base_qid, 16, 12); + + ZXIC_COMM_UINT32_WRITE_BITS(buff[1], port_attr->mtu, 0, 16); + + ZXIC_COMM_UINT32_WRITE_BITS(buff[2], port_attr->rsv3, 31, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[2], port_attr->pf_vqm_vfid, 20, 11); + ZXIC_COMM_UINT32_WRITE_BITS(buff[2], port_attr->fd_vxlan_offload_en, 19, + 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[2], port_attr->lag_id, 16, 3); + + ZXIC_COMM_UINT32_WRITE_BITS(buff[2], port_attr->uplink_phy_port_id, 12, + 4); + ZXIC_COMM_UINT32_WRITE_BITS(buff[2], port_attr->hash_alg, 8, 4); + ZXIC_COMM_UINT32_WRITE_BITS(buff[2], port_attr->rss_hash_factor, 0, 8); + + ZXIC_COMM_UINT32_WRITE_BITS(buff[3], port_attr->flag_1588_enable, 31, + 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[3], port_attr->rsv5, 26, 5); + ZXIC_COMM_UINT32_WRITE_BITS(buff[3], port_attr->vhca, 16, 10); + ZXIC_COMM_UINT32_WRITE_BITS(buff[3], port_attr->rsv6, 0, 16); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_get_vport_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]) +{ + ZXDH_SRIOV_VPORT_T *port_attr = (ZXDH_SRIOV_VPORT_T *)pData; + + ZXIC_COMM_UINT32_GET_BITS(port_attr->hit_flag, buff[0], 31, 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->mtu_offload_enable, buff[0], 30, + 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->rss_enable, buff[0], 29, 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->sriov_business_vlan_offload_enable, + buff[0], 28, 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->sriov_vlan_offload_enable, buff[0], + 27, 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->promisc_enable, buff[0], 26, 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->rdma_offload_enable, buff[0], 25, + 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->hw_bond_enable, buff[0], 24, 1); + + ZXIC_COMM_UINT32_GET_BITS(port_attr->business_enable, buff[0], 23, 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->is_up, buff[0], 22, 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->outer_ip_checksum_offload, buff[0], + 21, 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->ip_checksum_offload, buff[0], 20, + 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->tcp_udp_checksum_offload, buff[0], + 19, 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->ip_recombine_offload, buff[0], 18, + 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->lro_offload, buff[0], 17, 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->accelerator_offload_flag, buff[0], + 16, 1); + + ZXIC_COMM_UINT32_GET_BITS(port_attr->virtio_enable, buff[0], 15, 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->virtio_version, buff[0], 13, 2); + ZXIC_COMM_UINT32_GET_BITS(port_attr->is_vf, buff[0], 12, 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->vepa_enable, buff[0], 11, 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->lag_enable, buff[0], 10, 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->fd_enable, buff[0], 9, 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->inline_sec_offload, buff[0], 8, 1); + + ZXIC_COMM_UINT32_GET_BITS(port_attr->spoof_check_enable, buff[0], 7, 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->rsv1, buff[0], 6, 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->np_ingress_tm_enable, buff[0], 5, + 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->np_egress_tm_enable, buff[0], 4, + 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->np_ingress_meter_mode, buff[0], 3, + 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->np_egress_meter_mode, buff[0], 2, + 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->np_ingress_meter_enable, buff[0], + 1, 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->np_egress_meter_enable, buff[0], 0, + 1); + + ZXIC_COMM_UINT32_GET_BITS(port_attr->rsv2, buff[1], 31, 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->hash_search_index, buff[1], 28, 3); + ZXIC_COMM_UINT32_GET_BITS(port_attr->port_base_qid, buff[1], 16, 12); + + ZXIC_COMM_UINT32_GET_BITS(port_attr->mtu, buff[1], 0, 16); + + ZXIC_COMM_UINT32_GET_BITS(port_attr->rsv3, buff[2], 31, 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->pf_vqm_vfid, buff[2], 20, 11); + ZXIC_COMM_UINT32_GET_BITS(port_attr->fd_vxlan_offload_en, buff[2], 19, + 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->lag_id, buff[2], 16, 3); + + ZXIC_COMM_UINT32_GET_BITS(port_attr->uplink_phy_port_id, buff[2], 12, + 4); + ZXIC_COMM_UINT32_GET_BITS(port_attr->hash_alg, buff[2], 8, 4); + ZXIC_COMM_UINT32_GET_BITS(port_attr->rss_hash_factor, buff[2], 0, 8); + + ZXIC_COMM_UINT32_GET_BITS(port_attr->flag_1588_enable, buff[3], 31, 1); + ZXIC_COMM_UINT32_GET_BITS(port_attr->rsv5, buff[3], 26, 6); + ZXIC_COMM_UINT32_GET_BITS(port_attr->vhca, buff[3], 16, 10); + ZXIC_COMM_UINT32_GET_BITS(port_attr->rsv6, buff[3], 0, 16); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_set_uplink_phy_port_data(ZXIC_VOID *pData, + ZXIC_UINT32 buff[4]) +{ + ZXDH_UPLINK_PHY_PORT_T *attr = (ZXDH_UPLINK_PHY_PORT_T *)pData; + + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], attr->hit_flag, 31, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], attr->trust_mode, 30, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], attr->ptp_tc_enable, 28, 2); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], attr->tm_shape_enable, 27, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], attr->magic_packet_enable, 26, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], attr->rsv1, 11, 15); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], attr->ptp_port_vfid, 0, 11); + ZXIC_COMM_UINT32_WRITE_BITS(buff[1], attr->tm_base_queue, 20, 12); + ZXIC_COMM_UINT32_WRITE_BITS(buff[1], attr->rsv2, 17, 3); + ZXIC_COMM_UINT32_WRITE_BITS(buff[1], attr->mtu_offload_enable, 16, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[1], attr->mtu, 0, 16); + + ZXIC_COMM_UINT32_WRITE_BITS(buff[2], attr->hw_bond_enable, 31, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[2], attr->bond_link_up, 30, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[2], attr->is_up, 29, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[2], attr->sriov_hdbond_enable, 28, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[2], attr->rsv3, 27, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[2], attr->lacp_pf_vqm_vfid, 16, 11); + ZXIC_COMM_UINT32_WRITE_BITS(buff[2], attr->rsv4, 12, 4); + ZXIC_COMM_UINT32_WRITE_BITS(buff[2], attr->lacp_pf_memport_qid, 0, 12); + + ZXIC_COMM_UINT32_WRITE_BITS(buff[3], attr->rsv5, 27, 5); + ZXIC_COMM_UINT32_WRITE_BITS(buff[3], attr->pf_vqm_vfid, 16, 11); + ZXIC_COMM_UINT32_WRITE_BITS(buff[3], attr->rsv6, 11, 5); + ZXIC_COMM_UINT32_WRITE_BITS(buff[3], attr->primary_pf_vqm_vfid, 0, 11); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_get_uplink_phy_port_data(ZXIC_VOID *pData, + ZXIC_UINT32 buff[4]) +{ + ZXDH_UPLINK_PHY_PORT_T *attr = (ZXDH_UPLINK_PHY_PORT_T *)pData; + + ZXIC_COMM_UINT32_GET_BITS(attr->hit_flag, buff[0], 31, 1); + ZXIC_COMM_UINT32_GET_BITS(attr->trust_mode, buff[0], 30, 1); + ZXIC_COMM_UINT32_GET_BITS(attr->ptp_tc_enable, buff[0], 28, 2); + ZXIC_COMM_UINT32_GET_BITS(attr->tm_shape_enable, buff[0], 27, 1); + ZXIC_COMM_UINT32_GET_BITS(attr->magic_packet_enable, buff[0], 26, 1); + ZXIC_COMM_UINT32_GET_BITS(attr->rsv1, buff[0], 11, 15); + ZXIC_COMM_UINT32_GET_BITS(attr->ptp_port_vfid, buff[0], 0, 11); + ZXIC_COMM_UINT32_GET_BITS(attr->tm_base_queue, buff[1], 20, 12); + ZXIC_COMM_UINT32_GET_BITS(attr->rsv2, buff[1], 17, 3); + ZXIC_COMM_UINT32_GET_BITS(attr->mtu_offload_enable, buff[1], 16, 1); + ZXIC_COMM_UINT32_GET_BITS(attr->mtu, buff[1], 0, 16); + + ZXIC_COMM_UINT32_GET_BITS(attr->hw_bond_enable, buff[2], 31, 1); + ZXIC_COMM_UINT32_GET_BITS(attr->bond_link_up, buff[2], 30, 1); + ZXIC_COMM_UINT32_GET_BITS(attr->is_up, buff[2], 29, 1); + ZXIC_COMM_UINT32_GET_BITS(attr->sriov_hdbond_enable, buff[2], 28, 1); + ZXIC_COMM_UINT32_GET_BITS(attr->rsv3, buff[2], 27, 1); + ZXIC_COMM_UINT32_GET_BITS(attr->lacp_pf_vqm_vfid, buff[2], 16, 11); + ZXIC_COMM_UINT32_GET_BITS(attr->rsv4, buff[2], 12, 4); + ZXIC_COMM_UINT32_GET_BITS(attr->lacp_pf_memport_qid, buff[2], 0, 12); + + ZXIC_COMM_UINT32_GET_BITS(attr->rsv5, buff[3], 27, 5); + ZXIC_COMM_UINT32_GET_BITS(attr->pf_vqm_vfid, buff[3], 16, 11); + ZXIC_COMM_UINT32_GET_BITS(attr->rsv6, buff[3], 11, 5); + ZXIC_COMM_UINT32_GET_BITS(attr->primary_pf_vqm_vfid, buff[3], 0, 11); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_set_dscp_to_up_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]) +{ + ZXDH_DSCP_TO_UP_T *attr = (ZXDH_DSCP_TO_UP_T *)pData; + + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], attr->hit_flag, 31, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], attr->rsv1, 3, 28); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], attr->up, 0, 3); + + ZXIC_COMM_UINT32_WRITE_BITS(buff[1], attr->rsv2, 0, 32); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_get_dscp_to_up_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]) +{ + ZXDH_DSCP_TO_UP_T *attr = (ZXDH_DSCP_TO_UP_T *)pData; + + ZXIC_COMM_UINT32_GET_BITS(attr->hit_flag, buff[0], 31, 1); + ZXIC_COMM_UINT32_GET_BITS(attr->rsv1, buff[0], 3, 28); + ZXIC_COMM_UINT32_GET_BITS(attr->up, buff[0], 0, 3); + + ZXIC_COMM_UINT32_GET_BITS(attr->rsv2, buff[1], 0, 32); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_set_up_to_tc_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]) +{ + ZXDH_UP_TO_TC_T *attr = (ZXDH_UP_TO_TC_T *)pData; + + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], attr->hit_flag, 31, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], attr->rsv1, 3, 28); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], attr->tc, 0, 3); + + ZXIC_COMM_UINT32_WRITE_BITS(buff[1], attr->rsv2, 0, 32); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_get_up_to_tc_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]) +{ + ZXDH_UP_TO_TC_T *attr = (ZXDH_UP_TO_TC_T *)pData; + + ZXIC_COMM_UINT32_GET_BITS(attr->hit_flag, buff[0], 31, 1); + ZXIC_COMM_UINT32_GET_BITS(attr->rsv1, buff[0], 3, 28); + ZXIC_COMM_UINT32_GET_BITS(attr->tc, buff[0], 0, 3); + + ZXIC_COMM_UINT32_GET_BITS(attr->rsv2, buff[1], 0, 32); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_set_rss_to_vqid_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]) +{ + ZXDH_RSS_TO_VQID_T *attr = (ZXDH_RSS_TO_VQID_T *)pData; + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], attr->hit_flag, 31, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], attr->vqm_qid[0], 16, 15); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], attr->vqm_qid[1], 0, 16); + + ZXIC_COMM_UINT32_WRITE_BITS(buff[1], attr->vqm_qid[2], 16, 16); + ZXIC_COMM_UINT32_WRITE_BITS(buff[1], attr->vqm_qid[3], 0, 16); + + ZXIC_COMM_UINT32_WRITE_BITS(buff[2], attr->vqm_qid[4], 16, 16); + ZXIC_COMM_UINT32_WRITE_BITS(buff[2], attr->vqm_qid[5], 0, 16); + + ZXIC_COMM_UINT32_WRITE_BITS(buff[3], attr->vqm_qid[6], 16, 16); + ZXIC_COMM_UINT32_WRITE_BITS(buff[3], attr->vqm_qid[7], 0, 16); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_get_rss_to_vqid_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]) +{ + ZXDH_RSS_TO_VQID_T *attr = (ZXDH_RSS_TO_VQID_T *)pData; + + ZXIC_COMM_UINT32_GET_BITS(attr->hit_flag, buff[0], 31, 1); + ZXIC_COMM_UINT32_GET_BITS(attr->vqm_qid[0], buff[0], 16, 15); + ZXIC_COMM_UINT32_GET_BITS(attr->vqm_qid[1], buff[0], 0, 16); + + ZXIC_COMM_UINT32_GET_BITS(attr->vqm_qid[2], buff[1], 16, 16); + ZXIC_COMM_UINT32_GET_BITS(attr->vqm_qid[3], buff[1], 0, 16); + + ZXIC_COMM_UINT32_GET_BITS(attr->vqm_qid[4], buff[2], 16, 16); + ZXIC_COMM_UINT32_GET_BITS(attr->vqm_qid[5], buff[2], 0, 16); + + ZXIC_COMM_UINT32_GET_BITS(attr->vqm_qid[6], buff[3], 16, 16); + ZXIC_COMM_UINT32_GET_BITS(attr->vqm_qid[7], buff[3], 0, 16); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_set_vlan_filter_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]) +{ + ZXDH_VLAN_FILTER_T *vlan_filter_table = (ZXDH_VLAN_FILTER_T *)pData; + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], vlan_filter_table->hit_flag, 31, + 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], vlan_filter_table->rsv, 24, 7); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], vlan_filter_table->vport_bitmap[0], + 16, 8); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], vlan_filter_table->vport_bitmap[1], + 8, 8); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], vlan_filter_table->vport_bitmap[2], + 0, 8); + + ZXIC_COMM_UINT32_WRITE_BITS(buff[1], vlan_filter_table->vport_bitmap[3], + 24, 8); + ZXIC_COMM_UINT32_WRITE_BITS(buff[1], vlan_filter_table->vport_bitmap[4], + 16, 8); + ZXIC_COMM_UINT32_WRITE_BITS(buff[1], vlan_filter_table->vport_bitmap[5], + 8, 8); + ZXIC_COMM_UINT32_WRITE_BITS(buff[1], vlan_filter_table->vport_bitmap[6], + 0, 8); + + ZXIC_COMM_UINT32_WRITE_BITS(buff[2], vlan_filter_table->vport_bitmap[7], + 24, 8); + ZXIC_COMM_UINT32_WRITE_BITS(buff[2], vlan_filter_table->vport_bitmap[8], + 16, 8); + ZXIC_COMM_UINT32_WRITE_BITS(buff[2], vlan_filter_table->vport_bitmap[9], + 8, 8); + ZXIC_COMM_UINT32_WRITE_BITS(buff[2], + vlan_filter_table->vport_bitmap[10], 0, 8); + + ZXIC_COMM_UINT32_WRITE_BITS(buff[3], + vlan_filter_table->vport_bitmap[11], 24, 8); + ZXIC_COMM_UINT32_WRITE_BITS(buff[3], + vlan_filter_table->vport_bitmap[12], 16, 8); + ZXIC_COMM_UINT32_WRITE_BITS(buff[3], + vlan_filter_table->vport_bitmap[13], 8, 8); + ZXIC_COMM_UINT32_WRITE_BITS(buff[3], + vlan_filter_table->vport_bitmap[14], 0, 8); + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_get_vlan_filter_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]) +{ + ZXDH_VLAN_FILTER_T *vlan_filter_table = (ZXDH_VLAN_FILTER_T *)pData; + ZXIC_COMM_UINT32_GET_BITS(vlan_filter_table->hit_flag, buff[0], 31, 1); + ZXIC_COMM_UINT32_GET_BITS(vlan_filter_table->rsv, buff[0], 24, 7); + ZXIC_COMM_UINT32_GET_BITS(vlan_filter_table->vport_bitmap[0], buff[0], + 16, 8); + ZXIC_COMM_UINT32_GET_BITS(vlan_filter_table->vport_bitmap[1], buff[0], + 8, 8); + ZXIC_COMM_UINT32_GET_BITS(vlan_filter_table->vport_bitmap[2], buff[0], + 0, 8); + + ZXIC_COMM_UINT32_GET_BITS(vlan_filter_table->vport_bitmap[3], buff[1], + 24, 8); + ZXIC_COMM_UINT32_GET_BITS(vlan_filter_table->vport_bitmap[4], buff[1], + 16, 8); + ZXIC_COMM_UINT32_GET_BITS(vlan_filter_table->vport_bitmap[5], buff[1], + 8, 8); + ZXIC_COMM_UINT32_GET_BITS(vlan_filter_table->vport_bitmap[6], buff[1], + 0, 8); + + ZXIC_COMM_UINT32_GET_BITS(vlan_filter_table->vport_bitmap[7], buff[2], + 24, 8); + ZXIC_COMM_UINT32_GET_BITS(vlan_filter_table->vport_bitmap[8], buff[2], + 16, 8); + ZXIC_COMM_UINT32_GET_BITS(vlan_filter_table->vport_bitmap[9], buff[2], + 8, 8); + ZXIC_COMM_UINT32_GET_BITS(vlan_filter_table->vport_bitmap[10], buff[2], + 0, 8); + + ZXIC_COMM_UINT32_GET_BITS(vlan_filter_table->vport_bitmap[11], buff[3], + 24, 8); + ZXIC_COMM_UINT32_GET_BITS(vlan_filter_table->vport_bitmap[12], buff[3], + 16, 8); + ZXIC_COMM_UINT32_GET_BITS(vlan_filter_table->vport_bitmap[13], buff[3], + 8, 8); + ZXIC_COMM_UINT32_GET_BITS(vlan_filter_table->vport_bitmap[14], buff[3], + 0, 8); + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_set_lag_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]) +{ + ZXDH_LAG_T *lag_entry = (ZXDH_LAG_T *)pData; + + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], lag_entry->hit_flag, 31, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], lag_entry->rsv1, 27, 4); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], lag_entry->member_num, 24, 3); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], lag_entry->bond_mode, 16, 8); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], lag_entry->hash_factor, 8, 8); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], lag_entry->rsv2, 0, 8); + + ZXIC_COMM_UINT32_WRITE_BITS(buff[1], lag_entry->rsv2, 16, 16); + ZXIC_COMM_UINT32_WRITE_BITS(buff[1], lag_entry->member_bitmap, 0, 16); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_get_lag_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]) +{ + ZXDH_LAG_T *lag_entry = (ZXDH_LAG_T *)pData; + + ZXIC_COMM_UINT32_GET_BITS(lag_entry->hit_flag, buff[0], 31, 1); + ZXIC_COMM_UINT32_GET_BITS(lag_entry->rsv1, buff[0], 27, 4); + ZXIC_COMM_UINT32_GET_BITS(lag_entry->member_num, buff[0], 24, 3); + ZXIC_COMM_UINT32_GET_BITS(lag_entry->bond_mode, buff[0], 16, 8); + ZXIC_COMM_UINT32_GET_BITS(lag_entry->hash_factor, buff[0], 8, 8); + ZXIC_COMM_UINT32_GET_BITS(lag_entry->rsv2, buff[0], 0, 8); + + ZXIC_COMM_UINT32_GET_BITS(lag_entry->rsv2, buff[1], 16, 16); + ZXIC_COMM_UINT32_GET_BITS(lag_entry->member_bitmap, buff[1], 0, 16); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_set_bc_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]) +{ + ZXIC_UINT32 bc_bitmap = 0; + ZXDH_BC_T *bc_entry = (ZXDH_BC_T *)pData; + + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], bc_entry->hit_flag, 31, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], bc_entry->rsv1, 0, 31); + + ZXIC_COMM_UINT32_WRITE_BITS(buff[1], bc_entry->rsv2, 0, 32); + + bc_bitmap = bc_entry->bc_bitmap >> 32; + ZXIC_COMM_UINT32_WRITE_BITS(buff[2], bc_bitmap, 0, 32); + + bc_bitmap = bc_entry->bc_bitmap; + ZXIC_COMM_UINT32_WRITE_BITS(buff[3], bc_bitmap, 0, 32); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_get_bc_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]) +{ + ZXIC_UINT32 bc_bitmap = 0; + ZXDH_BC_T *bc_entry = (ZXDH_BC_T *)pData; + + ZXIC_COMM_UINT32_GET_BITS(bc_entry->hit_flag, buff[0], 31, 1); + ZXIC_COMM_UINT32_GET_BITS(bc_entry->rsv1, buff[0], 0, 31); + + ZXIC_COMM_UINT32_GET_BITS(bc_entry->rsv2, buff[1], 0, 32); + + ZXIC_COMM_UINT32_GET_BITS(bc_bitmap, buff[2], 0, 32); + bc_entry->bc_bitmap = (((ZXIC_UINT64)bc_bitmap) << 32); + + ZXIC_COMM_UINT32_GET_BITS(bc_bitmap, buff[3], 0, 32); + bc_entry->bc_bitmap |= bc_bitmap; + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_set_promisc_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]) +{ + ZXIC_UINT32 bitmap = 0; + ZXDH_PROMISC_T *promisc_entry = (ZXDH_PROMISC_T *)pData; + + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], promisc_entry->hit_flag, 31, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], promisc_entry->pf_enable, 30, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], promisc_entry->rsv1, 0, 30); + + ZXIC_COMM_UINT32_WRITE_BITS(buff[1], promisc_entry->rsv2, 0, 32); + + bitmap = promisc_entry->bitmap >> 32; + ZXIC_COMM_UINT32_WRITE_BITS(buff[2], bitmap, 0, 32); + + bitmap = promisc_entry->bitmap; + ZXIC_COMM_UINT32_WRITE_BITS(buff[3], bitmap, 0, 32); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_get_promisc_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]) +{ + ZXIC_UINT32 bitmap = 0; + ZXDH_PROMISC_T *promisc_entry = (ZXDH_PROMISC_T *)pData; + + ZXIC_COMM_UINT32_GET_BITS(promisc_entry->hit_flag, buff[0], 31, 1); + ZXIC_COMM_UINT32_GET_BITS(promisc_entry->pf_enable, buff[0], 30, 1); + ZXIC_COMM_UINT32_GET_BITS(promisc_entry->rsv1, buff[0], 0, 30); + + ZXIC_COMM_UINT32_GET_BITS(promisc_entry->rsv2, buff[1], 0, 32); + + ZXIC_COMM_UINT32_GET_BITS(bitmap, buff[2], 0, 32); + promisc_entry->bitmap = (((ZXIC_UINT64)bitmap) << 32); + + ZXIC_COMM_UINT32_GET_BITS(bitmap, buff[3], 0, 32); + promisc_entry->bitmap |= bitmap; + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_set_vhca_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]) +{ + ZXDH_VHCA_T *vhca_entry = (ZXDH_VHCA_T *)pData; + + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], vhca_entry->valid, 31, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], vhca_entry->rsv1, 11, 20); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], vhca_entry->vqm_vfid, 0, 11); + + ZXIC_COMM_UINT32_WRITE_BITS(buff[1], vhca_entry->rsv2, 0, 32); + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_get_vhca_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]) +{ + ZXDH_VHCA_T *vhca_entry = (ZXDH_VHCA_T *)pData; + + ZXIC_COMM_UINT32_GET_BITS(vhca_entry->valid, buff[0], 31, 1); + ZXIC_COMM_UINT32_GET_BITS(vhca_entry->rsv1, buff[0], 11, 20); + ZXIC_COMM_UINT32_GET_BITS(vhca_entry->vqm_vfid, buff[0], 0, 11); + + ZXIC_COMM_UINT32_GET_BITS(vhca_entry->rsv2, buff[1], 0, 32); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_set_network_attr_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]) +{ + ZXDH_NETWORK_ATTR_T *network_attr = (ZXDH_NETWORK_ATTR_T *)pData; + + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], network_attr->hit_flag, 31, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], network_attr->single_pipe, 30, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], network_attr->three_plane_aggr, 29, + 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], network_attr->sdn_dyn_sriov_cni, + 28, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], network_attr->upf, 27, 1); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_get_network_attr_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]) +{ + ZXDH_NETWORK_ATTR_T *network_attr = (ZXDH_NETWORK_ATTR_T *)pData; + + ZXIC_COMM_MEMSET_S(network_attr, sizeof(ZXDH_NETWORK_ATTR_T), 0x0, + sizeof(ZXDH_NETWORK_ATTR_T)); + ZXIC_COMM_UINT32_GET_BITS(network_attr->hit_flag, buff[0], 31, 1); + ZXIC_COMM_UINT32_GET_BITS(network_attr->single_pipe, buff[0], 30, 1); + ZXIC_COMM_UINT32_GET_BITS(network_attr->three_plane_aggr, buff[0], 29, + 1); + ZXIC_COMM_UINT32_GET_BITS(network_attr->sdn_dyn_sriov_cni, buff[0], 28, + 1); + ZXIC_COMM_UINT32_GET_BITS(network_attr->upf, buff[0], 27, 1); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_set_vport_traffic_attr_data(ZXIC_VOID *pData, + ZXIC_UINT32 buff[4]) +{ + ZXDH_VPORT_TRAFFIC_ATTR_T *vport_attr = + (ZXDH_VPORT_TRAFFIC_ATTR_T *)pData; + + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], vport_attr->hit_flag, 31, 1); + ZXIC_COMM_UINT32_WRITE_BITS( + buff[0], vport_attr->vport_traffic_attr.ovs_attr.is_passthrough, + 30, 1); + ZXIC_COMM_UINT32_WRITE_BITS( + buff[0], + vport_attr->vport_traffic_attr.ovs_attr.uplink_vqm_vfid, 15, + 16); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_get_vport_traffic_attr_data(ZXIC_VOID *pData, + ZXIC_UINT32 buff[4]) +{ + ZXDH_VPORT_TRAFFIC_ATTR_T *vport_attr = + (ZXDH_VPORT_TRAFFIC_ATTR_T *)pData; + + ZXIC_COMM_MEMSET_S(vport_attr, sizeof(ZXDH_VPORT_TRAFFIC_ATTR_T), 0x0, + sizeof(ZXDH_VPORT_TRAFFIC_ATTR_T)); + ZXIC_COMM_UINT32_GET_BITS(vport_attr->hit_flag, buff[0], 31, 1); + ZXIC_COMM_UINT32_GET_BITS( + vport_attr->vport_traffic_attr.ovs_attr.is_passthrough, buff[0], + 30, 1); + ZXIC_COMM_UINT32_GET_BITS( + vport_attr->vport_traffic_attr.ovs_attr.uplink_vqm_vfid, + buff[0], 15, 16); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_set_vqm_vfid_vlan_data(ZXIC_VOID *pData, + ZXIC_UINT32 buff[4]) +{ + ZXDH_VQM_VFID_VLAN_T *vlan_entry = (ZXDH_VQM_VFID_VLAN_T *)pData; + + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], vlan_entry->hit_flag, 31, 1); + ZXIC_COMM_UINT32_WRITE_BITS( + buff[0], vlan_entry->sriov_business_vlan_filter, 30, 1); + ZXIC_COMM_UINT32_WRITE_BITS( + buff[0], vlan_entry->sriov_business_qinq_vlan_strip_offload, 29, + 1); + ZXIC_COMM_UINT32_WRITE_BITS( + buff[0], vlan_entry->sriov_business_vlan_strip_offload, 28, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], vlan_entry->rsv, 16, 12); + ZXIC_COMM_UINT32_WRITE_BITS( + buff[0], vlan_entry->sriov_business_vlan_tpid, 0, 16); + + ZXIC_COMM_UINT32_WRITE_BITS(buff[1], vlan_entry->sriov_vlan_tpid, 16, + 16); + ZXIC_COMM_UINT32_WRITE_BITS(buff[1], vlan_entry->sriov_vlan_tci, 0, 16); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_get_vqm_vfid_vlan_data(ZXIC_VOID *pData, + ZXIC_UINT32 buff[4]) +{ + ZXDH_VQM_VFID_VLAN_T *vlan_entry = (ZXDH_VQM_VFID_VLAN_T *)pData; + + ZXIC_COMM_UINT32_GET_BITS(vlan_entry->hit_flag, buff[0], 31, 1); + ZXIC_COMM_UINT32_GET_BITS(vlan_entry->sriov_business_vlan_filter, + buff[0], 30, 1); + ZXIC_COMM_UINT32_GET_BITS( + vlan_entry->sriov_business_qinq_vlan_strip_offload, buff[0], 29, + 1); + ZXIC_COMM_UINT32_GET_BITS(vlan_entry->sriov_business_vlan_strip_offload, + buff[0], 28, 1); + ZXIC_COMM_UINT32_GET_BITS(vlan_entry->rsv, buff[0], 16, 12); + ZXIC_COMM_UINT32_GET_BITS(vlan_entry->sriov_business_vlan_tpid, buff[0], + 0, 16); + + ZXIC_COMM_UINT32_GET_BITS(vlan_entry->sriov_vlan_tpid, buff[1], 16, 16); + ZXIC_COMM_UINT32_GET_BITS(vlan_entry->sriov_vlan_tci, buff[1], 0, 16); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_set_fd_index_mng(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]) +{ + ZXDH_FD_INDEX_MNG_T *fd_index_mng_entry = (ZXDH_FD_INDEX_MNG_T *)pData; + + ZXIC_COMM_CHECK_POINT(fd_index_mng_entry); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], fd_index_mng_entry->hit_flag, 31, + 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], fd_index_mng_entry->rsv, 16, 15); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], fd_index_mng_entry->vport, 0, 16); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_get_fd_index_mng(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]) +{ + ZXDH_FD_INDEX_MNG_T *fd_index_mng_entry = (ZXDH_FD_INDEX_MNG_T *)pData; + + ZXIC_COMM_CHECK_POINT(fd_index_mng_entry); + ZXIC_COMM_UINT32_GET_BITS(fd_index_mng_entry->hit_flag, buff[0], 31, 1); + ZXIC_COMM_UINT32_GET_BITS(fd_index_mng_entry->rsv, buff[0], 16, 15); + ZXIC_COMM_UINT32_GET_BITS(fd_index_mng_entry->vport, buff[0], 0, 16); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_set_cap_keyword_attr_data(ZXIC_VOID *pData, + ZXIC_UINT32 buff[4]) +{ + ZXDH_PKT_CAP_KW_MODE_T *kw_mode = (ZXDH_PKT_CAP_KW_MODE_T *)pData; + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], kw_mode->hit_flag, 31, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], kw_mode->rule1_key_word_len, 16, + 4); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], kw_mode->rule1_key_word_off, 0, + 13); + + ZXIC_COMM_UINT32_WRITE_BITS(buff[1], kw_mode->rule2_key_word_len, 16, + 4); + ZXIC_COMM_UINT32_WRITE_BITS(buff[1], kw_mode->rule2_key_word_off, 0, + 13); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_get_cap_keyword_attr_data(ZXIC_VOID *pData, + ZXIC_UINT32 buff[4]) +{ + ZXDH_PKT_CAP_KW_MODE_T *kw_mode = (ZXDH_PKT_CAP_KW_MODE_T *)pData; + + ZXIC_COMM_UINT32_GET_BITS(kw_mode->hit_flag, buff[0], 31, 1); + ZXIC_COMM_UINT32_GET_BITS(kw_mode->rule1_key_word_len, buff[0], 16, 4); + ZXIC_COMM_UINT32_GET_BITS(kw_mode->rule1_key_word_off, buff[0], 0, 13); + + ZXIC_COMM_UINT32_GET_BITS(kw_mode->rule2_key_word_len, buff[1], 16, 4); + ZXIC_COMM_UINT32_GET_BITS(kw_mode->rule2_key_word_off, buff[1], 0, 13); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_set_stat_attr_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]) +{ + ZXDH_STAT_ATTR_T *stat_attr = (ZXDH_STAT_ATTR_T *)pData; + + ZXIC_COMM_CHECK_POINT(stat_attr); + ZXIC_COMM_MEMSET_S(buff, sizeof(ZXIC_UINT32) * 4, 0x0, + sizeof(ZXIC_UINT32) * 4); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], stat_attr->valid, 31, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[0], stat_attr->mode, 30, 1); + ZXIC_COMM_UINT32_WRITE_BITS(buff[1], stat_attr->addr_offset, 0, 32); + ZXIC_COMM_UINT32_WRITE_BITS(buff[2], stat_attr->depth, 0, 32); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_get_stat_attr_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4]) +{ + ZXDH_STAT_ATTR_T *stat_attr = (ZXDH_STAT_ATTR_T *)pData; + + ZXIC_COMM_CHECK_POINT(stat_attr); + ZXIC_COMM_MEMSET_S(stat_attr, sizeof(ZXDH_STAT_ATTR_T), 0x0, + sizeof(ZXDH_STAT_ATTR_T)); + ZXIC_COMM_UINT32_GET_BITS(stat_attr->valid, buff[0], 31, 1); + ZXIC_COMM_UINT32_GET_BITS(stat_attr->mode, buff[0], 30, 1); + ZXIC_COMM_UINT32_GET_BITS(stat_attr->addr_offset, buff[1], 0, 32); + ZXIC_COMM_UINT32_GET_BITS(stat_attr->depth, buff[2], 0, 32); + + return DPP_OK; +} \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/driver/source/dpp_drv_hash.c b/drivers/net/ethernet/dinghai/en_np/driver/source/dpp_drv_hash.c new file mode 100644 index 000000000000..569946b3eab4 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/driver/source/dpp_drv_hash.c @@ -0,0 +1,430 @@ +#include "dpp_apt_se_api.h" +#include "dpp_se_api.h" +#include "dpp_apt_se.h" +#include "dpp_sdt.h" +#include "dpp_hash.h" +#include "dpp_dtb_table.h" +#include "dpp_drv_sdt.h" +#include "dpp_drv_hash.h" +#include "dpp_kernel_init.h" + +static SE_APT_HASH_CONVERT_T g_se_hash_callback[] = { + { ZXDH_SDT_L2_ENTRY_TABLE_PHYPORT0, dpp_apt_set_l2entry_data, + dpp_apt_get_l2entry_data }, + { ZXDH_SDT_L2_ENTRY_TABLE_PHYPORT1, dpp_apt_set_l2entry_data, + dpp_apt_get_l2entry_data }, + { ZXDH_SDT_L2_ENTRY_TABLE_PHYPORT2, dpp_apt_set_l2entry_data, + dpp_apt_get_l2entry_data }, + { ZXDH_SDT_L2_ENTRY_TABLE_PHYPORT3, dpp_apt_set_l2entry_data, + dpp_apt_get_l2entry_data }, + { ZXDH_SDT_MC_TABLE_PHYPORT0, dpp_apt_set_mc_data, + dpp_apt_get_mc_data }, + { ZXDH_SDT_MC_TABLE_PHYPORT1, dpp_apt_set_mc_data, + dpp_apt_get_mc_data }, + { ZXDH_SDT_MC_TABLE_PHYPORT2, dpp_apt_set_mc_data, + dpp_apt_get_mc_data }, + { ZXDH_SDT_MC_TABLE_PHYPORT3, dpp_apt_set_mc_data, + dpp_apt_get_mc_data }, + { ZXDH_SDT_RDMA_ENTRY_TABLE, dpp_apt_set_rdma_trans_data, + dpp_apt_get_rdma_trans_data } +}; + +SE_APT_HASH_CONVERT_T *se_hash_callback_get(ZXIC_UINT32 sdt_no) +{ + ZXIC_UINT32 index = 0; + ZXIC_UINT32 num = 0; + + num = sizeof(g_se_hash_callback) / sizeof(SE_APT_HASH_CONVERT_T); + for (index = 0; index < num; index++) { + if (g_se_hash_callback[index].sdt_no == sdt_no) { + return &g_se_hash_callback[index]; + } + } + + return NULL; +} + +ZXIC_UINT32 dpp_apt_set_l2entry_data(ZXIC_VOID *pData, DPP_HASH_ENTRY *pEntry) +{ + ZXIC_UINT32 key = 0; + ZXIC_UINT32 rst = 0; + ZXDH_L2_ENTRY_T *pL2Entry = NULL; + + ZXIC_COMM_CHECK_POINT(pData); + ZXIC_COMM_CHECK_POINT(pEntry); + ZXIC_COMM_CHECK_POINT(pEntry->p_key); + ZXIC_COMM_CHECK_POINT(pEntry->p_rst); + + pL2Entry = (ZXDH_L2_ENTRY_T *)pData; + + ZXIC_COMM_MEMCPY(pEntry->p_key + 1, pL2Entry->key.dmac_addr, 6); + + ZXIC_COMM_UINT32_WRITE_BITS(key, pL2Entry->key.sriov_vlan_tpid, 16, 16); + ZXIC_COMM_UINT32_WRITE_BITS(key, pL2Entry->key.sriov_vlan_id, 0, 16); + zxic_comm_swap((ZXIC_UINT8 *)&key, sizeof(ZXIC_UINT32)); + ZXIC_COMM_MEMCPY(pEntry->p_key + 7, &key, sizeof(ZXIC_UINT32)); + + ZXIC_COMM_UINT32_WRITE_BITS(rst, pL2Entry->entry.hit_flag, 31, 1); + ZXIC_COMM_UINT32_WRITE_BITS(rst, pL2Entry->entry.rsv, 11, 20); + ZXIC_COMM_UINT32_WRITE_BITS(rst, pL2Entry->entry.vqm_vfid, 0, 11); + + zxic_comm_swap((ZXIC_UINT8 *)&rst, sizeof(ZXIC_UINT32)); + ZXIC_COMM_MEMCPY(pEntry->p_rst, &rst, sizeof(ZXIC_UINT32)); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_get_l2entry_data(ZXIC_VOID *pData, DPP_HASH_ENTRY *pEntry) +{ + ZXDH_L2_ENTRY_T *pL2Entry = NULL; + + ZXIC_UINT32 key = 0; + + ZXIC_COMM_CHECK_POINT(pData); + ZXIC_COMM_CHECK_POINT(pEntry); + ZXIC_COMM_CHECK_POINT(pEntry->p_rst); + + pL2Entry = (ZXDH_L2_ENTRY_T *)pData; + + key = *(ZXIC_UINT32 *)(pEntry->p_key + 7); + zxic_comm_swap((ZXIC_UINT8 *)&key, sizeof(ZXIC_UINT32)); + ZXIC_COMM_UINT32_GET_BITS(pL2Entry->key.sriov_vlan_tpid, key, 16, 16); + ZXIC_COMM_UINT32_GET_BITS(pL2Entry->key.sriov_vlan_id, key, 0, 16); + + ZXIC_COMM_MEMCPY(pL2Entry->key.dmac_addr, pEntry->p_key + 1, 6); + + zxic_comm_swap(pEntry->p_rst, sizeof(ZXIC_UINT32)); + ZXIC_COMM_UINT32_GET_BITS(pL2Entry->entry.hit_flag, + *(ZXIC_UINT32 *)pEntry->p_rst, 31, 1); + ZXIC_COMM_UINT32_GET_BITS(pL2Entry->entry.rsv, + *(ZXIC_UINT32 *)pEntry->p_rst, 11, 20); + ZXIC_COMM_UINT32_GET_BITS(pL2Entry->entry.vqm_vfid, + *(ZXIC_UINT32 *)pEntry->p_rst, 0, 11); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_set_mc_data(ZXIC_VOID *pData, DPP_HASH_ENTRY *pEntry) +{ + ZXIC_UINT32 key = 0; + ZXIC_UINT32 rst = 0; + + ZXDH_MC_T *mc_table = NULL; + + ZXIC_COMM_CHECK_POINT(pData); + ZXIC_COMM_CHECK_POINT(pEntry); + ZXIC_COMM_CHECK_POINT(pEntry->p_key); + ZXIC_COMM_CHECK_POINT(pEntry->p_rst); + + mc_table = (ZXDH_MC_T *)pData; + + ZXIC_COMM_UINT32_WRITE_BITS(key, mc_table->key.rsv, 18, 14); + ZXIC_COMM_UINT32_WRITE_BITS(key, mc_table->key.group_id, 16, 2); + + zxic_comm_swap((ZXIC_UINT8 *)&key, sizeof(ZXIC_UINT32)); + ZXIC_COMM_MEMCPY(pEntry->p_key + 1, &key, sizeof(ZXIC_UINT32)); + + ZXIC_COMM_MEMCPY(pEntry->p_key + 3, mc_table->key.mc_mac, 6); + + ZXIC_COMM_UINT32_WRITE_BITS(rst, mc_table->entry.hit_flag, 31, 1); + ZXIC_COMM_UINT32_WRITE_BITS(rst, mc_table->entry.mc_pf_enable, 30, 1); + ZXIC_COMM_UINT32_WRITE_BITS(rst, mc_table->entry.rsv1, 0, 30); + zxic_comm_swap((ZXIC_UINT8 *)&rst, sizeof(ZXIC_UINT32)); + ZXIC_COMM_MEMCPY(pEntry->p_rst, &rst, sizeof(ZXIC_UINT32)); + + ZXIC_COMM_UINT32_WRITE_BITS(rst, mc_table->entry.rsv2, 0, 32); + zxic_comm_swap((ZXIC_UINT8 *)&rst, sizeof(ZXIC_UINT32)); + ZXIC_COMM_MEMCPY(pEntry->p_rst + 4, &rst, sizeof(ZXIC_UINT32)); + + rst = mc_table->entry.mc_bitmap >> 32; + zxic_comm_swap((ZXIC_UINT8 *)&rst, sizeof(ZXIC_UINT32)); + ZXIC_COMM_MEMCPY(pEntry->p_rst + 8, &rst, sizeof(ZXIC_UINT32)); + + rst = mc_table->entry.mc_bitmap; + zxic_comm_swap((ZXIC_UINT8 *)&rst, sizeof(ZXIC_UINT32)); + ZXIC_COMM_MEMCPY(pEntry->p_rst + 12, &rst, sizeof(ZXIC_UINT32)); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_get_mc_data(ZXIC_VOID *pData, DPP_HASH_ENTRY *pEntry) +{ + ZXIC_UINT32 key = 0; + ZXIC_UINT32 rst = 0; + + ZXDH_MC_T *mc_table = NULL; + + ZXIC_COMM_CHECK_POINT(pData); + ZXIC_COMM_CHECK_POINT(pEntry); + ZXIC_COMM_CHECK_POINT(pEntry->p_key); + ZXIC_COMM_CHECK_POINT(pEntry->p_rst); + + mc_table = (ZXDH_MC_T *)pData; + + key = *(ZXIC_UINT32 *)(pEntry->p_key + 1); + zxic_comm_swap((ZXIC_UINT8 *)&key, sizeof(ZXIC_UINT32)); + ZXIC_COMM_UINT32_GET_BITS(mc_table->key.rsv, key, 18, 14); + ZXIC_COMM_UINT32_GET_BITS(mc_table->key.group_id, key, 16, 2); + + ZXIC_COMM_MEMCPY(mc_table->key.mc_mac, pEntry->p_key + 3, 6); + + zxic_comm_swap(pEntry->p_rst, sizeof(ZXIC_UINT32)); + ZXIC_COMM_UINT32_GET_BITS(mc_table->entry.hit_flag, + *(ZXIC_UINT32 *)pEntry->p_rst, 31, 1); + ZXIC_COMM_UINT32_GET_BITS(mc_table->entry.mc_pf_enable, + *(ZXIC_UINT32 *)pEntry->p_rst, 30, 1); + ZXIC_COMM_UINT32_GET_BITS(mc_table->entry.rsv1, + *(ZXIC_UINT32 *)pEntry->p_rst, 0, 30); + + zxic_comm_swap(pEntry->p_rst + 4, sizeof(ZXIC_UINT32)); + ZXIC_COMM_UINT32_GET_BITS(mc_table->entry.rsv2, + *(ZXIC_UINT32 *)(pEntry->p_rst + 4), 0, 32); + + zxic_comm_swap(pEntry->p_rst + 8, sizeof(ZXIC_UINT32)); + ZXIC_COMM_UINT32_GET_BITS(rst, *(ZXIC_UINT32 *)(pEntry->p_rst + 8), 0, + 32); + mc_table->entry.mc_bitmap = (((ZXIC_UINT64)rst) << 32); + + zxic_comm_swap(pEntry->p_rst + 12, sizeof(ZXIC_UINT32)); + ZXIC_COMM_UINT32_GET_BITS(rst, *(ZXIC_UINT32 *)(pEntry->p_rst + 12), 0, + 32); + mc_table->entry.mc_bitmap |= rst; + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_set_rdma_trans_data(ZXIC_VOID *pData, + DPP_HASH_ENTRY *pEntry) +{ + ZXIC_UINT32 key = 0; + ZXIC_UINT32 rst = 0; + + ZXDH_RDMA_TRANS_T *rdma_trans_table = NULL; + + ZXIC_COMM_CHECK_POINT(pData); + ZXIC_COMM_CHECK_POINT(pEntry); + ZXIC_COMM_CHECK_POINT(pEntry->p_key); + ZXIC_COMM_CHECK_POINT(pEntry->p_rst); + + rdma_trans_table = (ZXDH_RDMA_TRANS_T *)pData; + + ZXIC_COMM_MEMCPY(pEntry->p_key + 1, &key, 2); + + ZXIC_COMM_MEMCPY(pEntry->p_key + 3, rdma_trans_table->key.mac_addr, 6); + + ZXIC_COMM_UINT32_WRITE_BITS(rst, rdma_trans_table->entry.hit_flag, 31, + 1); + ZXIC_COMM_UINT32_WRITE_BITS(rst, rdma_trans_table->entry.rsv, 10, 21); + ZXIC_COMM_UINT32_WRITE_BITS(rst, rdma_trans_table->entry.rdma_vhca_id, + 0, 10); + + zxic_comm_swap((ZXIC_UINT8 *)&rst, sizeof(ZXIC_UINT32)); + ZXIC_COMM_MEMCPY(pEntry->p_rst, &rst, sizeof(ZXIC_UINT32)); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_apt_get_rdma_trans_data(ZXIC_VOID *pData, + DPP_HASH_ENTRY *pEntry) +{ + ZXDH_RDMA_TRANS_T *rdma_trans_table = NULL; + + ZXIC_COMM_CHECK_POINT(pData); + ZXIC_COMM_CHECK_POINT(pEntry); + ZXIC_COMM_CHECK_POINT(pEntry->p_rst); + + rdma_trans_table = (ZXDH_RDMA_TRANS_T *)pData; + + ZXIC_COMM_MEMCPY(rdma_trans_table->key.mac_addr, pEntry->p_key + 3, 6); + + zxic_comm_swap(pEntry->p_rst, sizeof(ZXIC_UINT32)); + ZXIC_COMM_UINT32_GET_BITS(rdma_trans_table->entry.hit_flag, + *(ZXIC_UINT32 *)pEntry->p_rst, 31, 1); + ZXIC_COMM_UINT32_GET_BITS(rdma_trans_table->entry.rsv, + *(ZXIC_UINT32 *)pEntry->p_rst, 10, 21); + ZXIC_COMM_UINT32_GET_BITS(rdma_trans_table->entry.rdma_vhca_id, + *(ZXIC_UINT32 *)pEntry->p_rst, 0, 10); + + return DPP_OK; +} + +DPP_STATUS dpp_apt_dtb_hash_table_unicast_mac_dump( + DPP_DEV_T *dev, ZXIC_UINT32 queue_id, ZXIC_UINT32 sdt_no, + ZXDH_L2_ENTRY_T *pHashDataArr, ZXIC_UINT32 *p_entry_num) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 max_item_num = DTB_DUMP_UNICAST_MAC_DUMP_NUM; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 entryNum = 0; + ZXIC_UINT8 *pDumpData = NULL; + ZXIC_UINT8 *pKey = NULL; + ZXIC_UINT8 *pRst = NULL; + + DPP_HASH_ENTRY *p_dump_hash_entry = NULL; + DPP_HASH_ENTRY *p_temp_entry = NULL; + DPP_SDTTBL_HASH_T sdt_hash_info = { 0 }; /*SDT内容*/ + SE_APT_CALLBACK_T *pAptCallback = NULL; + ZXDH_L2_ENTRY_T *p_l2_mac_entry = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(pHashDataArr); + ZXIC_COMM_CHECK_POINT(p_entry_num); + + //从sdt_no中获取SDT配置 + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_hash_info); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_soft_sdt_tbl_get"); + + rc = dpp_hash_max_item_num_get(dev, sdt_no, &max_item_num); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_hash_max_item_num_get"); + + pAptCallback = dpp_apt_get_func(dev, sdt_no); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), pAptCallback); + + //分配空间 + pDumpData = (ZXIC_UINT8 *)ZXIC_COMM_VMALLOC(max_item_num * + sizeof(DPP_HASH_ENTRY)); + ZXIC_COMM_CHECK_POINT_NO_ASSERT(pDumpData); + pKey = (ZXIC_UINT8 *)ZXIC_COMM_VMALLOC(max_item_num * HASH_KEY_MAX); + ZXIC_COMM_CHECK_POINT_MEMORY_VFREE_NO_ASSERT(pKey, pDumpData); + pRst = (ZXIC_UINT8 *)ZXIC_COMM_VMALLOC(max_item_num * HASH_RST_MAX); + ZXIC_COMM_CHECK_POINT_MEMORY_VFREE2PTR_NO_ASSERT(pRst, pKey, pDumpData); + + ZXIC_COMM_MEMSET_S(pDumpData, max_item_num * sizeof(DPP_HASH_ENTRY), + 0x0, max_item_num * sizeof(DPP_HASH_ENTRY)); + ZXIC_COMM_MEMSET_S(pKey, max_item_num * HASH_KEY_MAX, 0x0, + max_item_num * HASH_KEY_MAX); + ZXIC_COMM_MEMSET_S(pRst, max_item_num * HASH_RST_MAX, 0x0, + max_item_num * HASH_RST_MAX); + + p_dump_hash_entry = (DPP_HASH_ENTRY *)pDumpData; + for (index = 0; index < max_item_num; index++) { + p_temp_entry = p_dump_hash_entry + index; + p_temp_entry->p_key = pKey + index * HASH_KEY_MAX; + p_temp_entry->p_rst = pRst + index * HASH_RST_MAX; + } + + rc = dpp_dtb_hash_dump(dev, queue_id, sdt_no, pDumpData, &entryNum); + ZXIC_COMM_CHECK_DEV_RC_MEMORY_VFREE3PTR_NO_ASSERT( + DEV_ID(dev), rc, "dpp_apt_dtb_hash_dump", pRst, pKey, + pDumpData); + ZXIC_COMM_TRACE_INFO( + "dpp_dtb_hash_table_only_zcam_dump unicast entry_num: %d\n", + entryNum); + + for (index = 0; index < entryNum; index++) { + p_temp_entry = p_dump_hash_entry + index; + p_l2_mac_entry = pHashDataArr + index; + // //打印数据 + dpp_dtb_data_print( + p_temp_entry->p_key, + DPP_GET_ACTU_KEY_BY_SIZE(sdt_hash_info.key_size) + 1); + dpp_dtb_data_print(p_temp_entry->p_rst, + 4 * (0x1 << sdt_hash_info.rsp_mode)); + + rc = pAptCallback->se_func_info.hashFunc.hash_get_func( + p_l2_mac_entry, p_temp_entry); + ZXIC_COMM_CHECK_DEV_RC_MEMORY_VFREE3PTR_NO_ASSERT( + DEV_ID(dev), rc, "hash_set_func", pRst, pKey, + pDumpData); + } + + *p_entry_num = entryNum; + + ZXIC_COMM_VFREE(pKey); + ZXIC_COMM_VFREE(pRst); + ZXIC_COMM_VFREE(pDumpData); + + return DPP_OK; +} + +DPP_STATUS dpp_apt_dtb_hash_table_multicast_mac_dump(DPP_DEV_T *dev, + ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, + ZXDH_MC_T *pHashDataArr, + ZXIC_UINT32 *p_entry_num) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 max_item_num = DTB_DUMP_MULTICAST_MAC_DUMP_NUM; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 entryNum = 0; + ZXIC_UINT8 *pDumpData = NULL; + ZXIC_UINT8 *pKey = NULL; + ZXIC_UINT8 *pRst = NULL; + + DPP_HASH_ENTRY *p_dump_hash_entry = NULL; + DPP_HASH_ENTRY *p_temp_entry = NULL; + DPP_SDTTBL_HASH_T sdt_hash_info = { 0 }; /*SDT内容*/ + SE_APT_CALLBACK_T *pAptCallback = NULL; + ZXDH_MC_T *p_multicast_mac_data = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(pHashDataArr); + ZXIC_COMM_CHECK_POINT(p_entry_num); + + //从sdt_no中获取SDT配置 + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_hash_info); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_soft_sdt_tbl_get"); + + rc = dpp_hash_max_item_num_get(dev, sdt_no, &max_item_num); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_hash_max_item_num_get"); + + pAptCallback = dpp_apt_get_func(dev, sdt_no); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), pAptCallback); + + //分配空间 + pDumpData = (ZXIC_UINT8 *)ZXIC_COMM_VMALLOC(max_item_num * + sizeof(DPP_HASH_ENTRY)); + ZXIC_COMM_CHECK_POINT_NO_ASSERT(pDumpData); + pKey = (ZXIC_UINT8 *)ZXIC_COMM_VMALLOC(max_item_num * HASH_KEY_MAX); + ZXIC_COMM_CHECK_POINT_MEMORY_VFREE_NO_ASSERT(pKey, pDumpData); + pRst = (ZXIC_UINT8 *)ZXIC_COMM_VMALLOC(max_item_num * HASH_RST_MAX); + ZXIC_COMM_CHECK_POINT_MEMORY_VFREE2PTR_NO_ASSERT(pRst, pKey, pDumpData); + + ZXIC_COMM_MEMSET_S(pDumpData, max_item_num * sizeof(DPP_HASH_ENTRY), + 0x0, max_item_num * sizeof(DPP_HASH_ENTRY)); + ZXIC_COMM_MEMSET_S(pKey, max_item_num * HASH_KEY_MAX, 0x0, + max_item_num * HASH_KEY_MAX); + ZXIC_COMM_MEMSET_S(pRst, max_item_num * HASH_RST_MAX, 0x0, + max_item_num * HASH_RST_MAX); + + p_dump_hash_entry = (DPP_HASH_ENTRY *)pDumpData; + for (index = 0; index < max_item_num; index++) { + p_temp_entry = p_dump_hash_entry + index; + p_temp_entry->p_key = pKey + index * HASH_KEY_MAX; + p_temp_entry->p_rst = pRst + index * HASH_RST_MAX; + } + + rc = dpp_dtb_hash_dump(dev, queue_id, sdt_no, pDumpData, &entryNum); + ZXIC_COMM_CHECK_DEV_RC_MEMORY_VFREE3PTR_NO_ASSERT( + DEV_ID(dev), rc, "dpp_apt_dtb_hash_dump", pRst, pKey, + pDumpData); + + ZXIC_COMM_TRACE_INFO( + "dpp_dtb_hash_table_only_zcam_dump multicast entry_num: %d\n", + entryNum); + + for (index = 0; index < entryNum; index++) { + p_temp_entry = p_dump_hash_entry + index; + p_multicast_mac_data = pHashDataArr + index; + // //打印数据 + dpp_dtb_data_print( + p_temp_entry->p_key, + DPP_GET_ACTU_KEY_BY_SIZE(sdt_hash_info.key_size) + 1); + dpp_dtb_data_print(p_temp_entry->p_rst, + 4 * (0x1 << sdt_hash_info.rsp_mode)); + + rc = pAptCallback->se_func_info.hashFunc.hash_get_func( + p_multicast_mac_data, p_temp_entry); + ZXIC_COMM_CHECK_DEV_RC_MEMORY_VFREE3PTR_NO_ASSERT( + DEV_ID(dev), rc, "hash_get_func", pRst, pKey, + pDumpData); + } + + *p_entry_num = entryNum; + + ZXIC_COMM_VFREE(pKey); + ZXIC_COMM_VFREE(pRst); + ZXIC_COMM_VFREE(pDumpData); + + return DPP_OK; +} diff --git a/drivers/net/ethernet/dinghai/en_np/driver/source/dpp_drv_init.c b/drivers/net/ethernet/dinghai/en_np/driver/source/dpp_drv_init.c new file mode 100644 index 000000000000..e8a9ed052d6c --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/driver/source/dpp_drv_init.c @@ -0,0 +1,276 @@ +#include "dpp_apt_se_api.h" +#include "dpp_stat_api.h" +#include "dpp_drv_init.h" +#include "dpp_drv_acl.h" +#include "dpp_drv_hash.h" +#include "dpp_drv_eram.h" +#include "dpp_hash.h" +#include "dpp_apt_se.h" +#include "dpp_tbl_pkt_cap.h" +#include "dpp_dtb_table_api.h" +#include "dpp_tbl_api.h" +extern DPP_DEV_MGR_T *dpp_dev_mgr_get(ZXIC_VOID); + +#define DPP_FLOW_INIT_START ((ZXIC_UINT32)(0)) +#define DPP_FLOW_INIT_SUCCESS ((ZXIC_UINT32)(1)) + +#define DPP_FLOW_INIT_STATUS_CHECK(dev) \ + do { \ + if ((DEV_PCIE_SLOT(dev) < DPP_PCIE_SLOT_MAX) && \ + (DPP_FLOW_INIT_SUCCESS != \ + dpp_flow_init_status[DEV_PCIE_SLOT(dev)])) { \ + return DPP_OK; \ + } \ + } while (0) + +#define DPP_FLOW_INIT_SUCCESS_STATUS_CHECK(dev) \ + do { \ + if ((DEV_PCIE_SLOT(dev) < DPP_PCIE_SLOT_MAX) && \ + (DPP_FLOW_INIT_SUCCESS == \ + dpp_flow_init_status[DEV_PCIE_SLOT(dev)])) { \ + return DPP_OK; \ + } \ + } while (0) + +ZXIC_UINT32 dpp_flow_init_status[DPP_PCIE_SLOT_MAX] = { DPP_FLOW_INIT_START }; + +ZXIC_VOID dpp_flow_init_status_init(ZXIC_VOID) +{ + ZXIC_COMM_MEMSET_S(dpp_flow_init_status, sizeof(dpp_flow_init_status), + DPP_FLOW_INIT_START, sizeof(dpp_flow_init_status)); +} + +DPP_STATUS dpp_flow_init_status_set(DPP_DEV_T *dev, ZXIC_UINT32 status) +{ + ZXIC_UINT32 slot = 0; + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(status, DPP_FLOW_INIT_START, + DPP_FLOW_INIT_SUCCESS); + slot = DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_INDEX(slot, 0, (DPP_PCIE_SLOT_MAX - 1)); + + dpp_flow_init_status[slot] = status; + + return DPP_OK; +} + +static DPP_STATUS dpp_drv_se_func_set(DPP_APT_SE_RES_T *p_se_res) +{ + ZXIC_UINT32 index = 0; + SE_APT_ERAM_CONVERT_T *pAptEramCov = NULL; + SE_APT_ACL_CONVERT_T *pAptAclCov = NULL; + SE_APT_HASH_CONVERT_T *pAptHashCov = NULL; + DPP_APT_ERAM_TABLE_T *pTempEramTbl = NULL; + DPP_APT_ACL_TABLE_T *pTempAclTbl = NULL; + DPP_APT_HASH_TABLE_T *pTempHashTbl = NULL; + + ZXIC_COMM_CHECK_POINT(p_se_res); + + for (index = 0; index < (p_se_res->eram_num); index++) { + pTempEramTbl = &(p_se_res->eram_tbl[index]); + pAptEramCov = se_eram_callback_get(pTempEramTbl->sdtNo); + if (pAptEramCov) { + pTempEramTbl->eram_set_func = + pAptEramCov->eram_set_func; + pTempEramTbl->eram_get_func = + pAptEramCov->eram_get_func; + } + } + for (index = 0; index < (p_se_res->acl_num); index++) { + pTempAclTbl = &(p_se_res->acl_tbl[index]); + pAptAclCov = se_acl_callback_get(pTempAclTbl->sdtNo); + if (pAptAclCov) { + pTempAclTbl->acl_set_func = pAptAclCov->acl_set_func; + pTempAclTbl->acl_get_func = pAptAclCov->acl_get_func; + } + } + for (index = 0; index < (p_se_res->hash_tbl_num); index++) { + pTempHashTbl = &(p_se_res->hash_tbl[index]); + pAptHashCov = se_hash_callback_get(pTempHashTbl->sdtNo); + if (pAptHashCov) { + pTempHashTbl->hash_set_func = + pAptHashCov->hash_set_func; + pTempHashTbl->hash_get_func = + pAptHashCov->hash_get_func; + } + } + return DPP_OK; +} + +DPP_STATUS dpp_bar_msg_num_init(DPP_DEV_T *dev) +{ + ZXIC_UINT32 rc = 0; + ZXIC_UINT32 bar_msg_num = 0xFFFFFFFF; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT16 slot = 0; + DPP_DEV_CFG_T *p_dev_info = NULL; + DPP_DEV_MGR_T *p_dev_mgr = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + DPP_FLOW_INIT_SUCCESS_STATUS_CHECK(dev); + + slot = dev->pcie_channel.slot; + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, slot, 0, DPP_PCIE_SLOT_MAX - 1); + + p_dev_mgr = dpp_dev_mgr_get(); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev_mgr); + if (!p_dev_mgr->is_init) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "ErrorCode[ 0x%x]: Device Manager is not init!!!\n", + DPP_RC_DEV_MGR_NOT_INIT); + return DPP_RC_DEV_MGR_NOT_INIT; + } + + p_dev_info = p_dev_mgr->p_dev_array[dev_id]; + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev_info); + + rc = dpp_pcie_bar_msg_num_get(dev, &bar_msg_num); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_pcie_bar_msg_num_get"); + + p_dev_info->bar_msg_num[slot] = bar_msg_num; + dev->pcie_channel.bar_msg_num = bar_msg_num; + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x bar_msg_num: %u.\n", + __FUNCTION__, slot, dev->pcie_channel.vport, + bar_msg_num); + return DPP_OK; +} + +DPP_STATUS dpp_flow_init(DPP_DEV_T *dev) +{ + DPP_STATUS rc = DPP_OK; + DPP_PF_INFO_T pf_info = { 0 }; + DPP_APT_SE_RES_T *p_se_res = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_PCIE_SLOT(dev), 0, DPP_PCIE_SLOT_MAX - 1); + DPP_FLOW_INIT_SUCCESS_STATUS_CHECK(dev); + + pf_info.slot = dev->pcie_channel.slot; + pf_info.vport = dev->pcie_channel.vport; + + ZXIC_COMM_TRACE_NOTICE("[%s] slot:%d start.\n", __FUNCTION__, + DEV_PCIE_SLOT(dev)); + + rc = dpp_se_res_mem_alloc(dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_se_res_mem_alloc"); + + rc = dpp_agent_se_res_get(dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_agent_se_res_get"); + + p_se_res = (DPP_APT_SE_RES_T *)dpp_dev_get_se_res_ptr(dev); + ZXIC_COMM_CHECK_POINT(p_se_res); + + rc = dpp_drv_se_func_set(p_se_res); + ZXIC_COMM_CHECK_RC(rc, "dpp_drv_se_func_set"); + + // hash init + rc = dpp_apt_hash_global_res_init(dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_hash_global_res_init"); + + rc = dpp_apt_hash_func_res_init(dev, p_se_res->hash_func_num, + p_se_res->hash_func); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_hash_func_res_init"); + + rc = dpp_apt_hash_bulk_res_init(dev, p_se_res->hash_bulk_num, + p_se_res->hash_bulk); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_hash_bulk_res_init"); + + // tbl-res must be initialized after fun-res and buld-res + rc = dpp_apt_hash_tbl_res_init(dev, p_se_res->hash_tbl_num, + p_se_res->hash_tbl); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_hash_tbl_res_init"); + + // eram init + rc = dpp_apt_eram_res_init(dev, p_se_res->eram_num, p_se_res->eram_tbl); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_eram_res_init"); + + // init acl + rc = dpp_apt_acl_res_init(dev, p_se_res->acl_num, p_se_res->acl_tbl); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_acl_res_init"); + +#ifdef DPP_FLOW_HW_INIT + rc = dpp_stat_ppu_eram_baddr_set(dev, p_se_res->stat_cfg.eram_baddr); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_ppu_eram_baddr_set"); + + rc = dpp_stat_ppu_eram_depth_set( + dev, p_se_res->stat_cfg.eram_depth); //表项深度以128bit为单位 + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_ppu_eram_depth_set"); +#endif + + rc = dpp_pkt_capture_init(&pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_pkt_capture_init"); + + rc = dpp_stat_tbl_get(dev, p_se_res); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_tbl_get"); + + rc = dpp_flow_init_status_set(dev, DPP_FLOW_INIT_SUCCESS); + ZXIC_COMM_CHECK_RC(rc, "dpp_flow_init_status_set"); + + ZXIC_COMM_PRINT("[%s] success.\n", __FUNCTION__); + + return DPP_OK; +} + +DPP_STATUS dpp_flow_uninit(DPP_DEV_T *dev) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 slot = 0; + ZXIC_UINT32 last_flag = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = dev->pcie_channel.slot; + pf_info.vport = dev->pcie_channel.vport; + + rc = dpp_dev_last_check(dev, &last_flag); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dev_last_check"); + + slot = DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_INDEX(slot, 0, (DPP_PCIE_SLOT_MAX - 1)); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot[%d] last_flag[%d] start.\n", + __FUNCTION__, slot, last_flag); + + if (last_flag) { + rc = dpp_hash_soft_uninstall(dev); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_hash_soft_uninstall"); + + rc = dpp_apt_hash_global_res_uninit(dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_hash_global_res_uninit"); + + rc = dpp_apt_acl_soft_res_uninit(dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_acl_global_res_uninit"); + + rc = dpp_se_res_mem_free(dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_se_res_mem_free"); + + dpp_flow_init_status[slot] = DPP_FLOW_INIT_START; + } else { + rc = dpp_unicast_all_mac_soft_delete(&pf_info); + ZXIC_COMM_CHECK_RC_NONE(rc, "dpp_unicast_all_mac_soft_delete"); + + rc = dpp_multicast_all_mac_soft_delete(&pf_info); + ZXIC_COMM_CHECK_RC_NONE(rc, + "dpp_multicast_all_mac_soft_delete"); + } + + ZXIC_COMM_PRINT("[%s] slot[%d] success.\n", __FUNCTION__, slot); + + return DPP_OK; +} + +DPP_STATUS dpp_flow_data_all_flush(DPP_DEV_T *dev, ZXIC_UINT32 queue_id) +{ + DPP_STATUS rc = DPP_OK; + DPP_APT_SE_RES_T *p_se_res = NULL; + + p_se_res = (DPP_APT_SE_RES_T *)dpp_dev_get_se_res_ptr(dev); + ZXIC_COMM_CHECK_POINT(p_se_res); + rc = dpp_apt_hash_func_flush_hardware_all( + dev, p_se_res->hash_func_num, p_se_res->hash_func, queue_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_apt_hash_func_flush_hardware"); + + return rc; +} diff --git a/drivers/net/ethernet/dinghai/en_np/fc/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/fc/Kbuild.include new file mode 100644 index 000000000000..d26452cbb5c4 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/fc/Kbuild.include @@ -0,0 +1,4 @@ +cur_dir := en_np/fc/ +subdirs := source/ +src_files += +include $(foreach subdir, $(subdirs), $(dinghai_root)/$(cur_dir)$(subdir)/Kbuild.include) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/fc/include/dpp_drv_fc.h b/drivers/net/ethernet/dinghai/en_np/fc/include/dpp_drv_fc.h new file mode 100644 index 000000000000..72b07f9990ef --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/fc/include/dpp_drv_fc.h @@ -0,0 +1,85 @@ +#ifndef _DPP_DRV_FC_H_ +#define _DPP_DRV_FC_H_ + +#include "zxic_common.h" +#include "dpp_pbu.h" +#include "dpp_pbu_api.h" +#include "dpp_drv_qos.h" + +/***********************************************************/ +/**对外接口 配置基于vport的端口指针阈值 +* @param vport_id--vport号 +* @param port_id 端口号 +* @param p_para 端口阈值 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_port_th_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 port_id, + DPP_PBU_PORT_TH_PARA_T *p_para); + +/***********************************************************/ +/** 读取端口的阈值 +* @param vport_id--vport号 +* @param port_id 端口号 +* @param p_para 端口阈值 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_port_th_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 port_id, + DPP_PBU_PORT_TH_PARA_T *p_para); + +/***********************************************************/ +/**对外接口 配置基于vport的端口指定端口按cos优先级起pfc流控的优先级流控指针阈值 +* @param vport_id--vport号 +* @param port_id 端口号 +* @param p_para cos阈值,要求高优先级的阈值不小于低优先级的阈值 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_port_cos_th_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 port_id, + DPP_PBU_PORT_COS_TH_PARA_T *p_para); + +/***********************************************************/ +/** 读取指定端口中各cos的优先级流控指针阈值,仅对lif0的48个通道有效 +* @param vport_id--vport号 +* @param port_id 端口号 +* @param p_para cos阈值,要求高优先级的阈值不小于低优先级的阈值 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_port_cos_th_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 port_id, + DPP_PBU_PORT_COS_TH_PARA_T *p_para); + +/***********************************************************/ +/**对外接口 配置PFC防抖延时时间 +* @param pf_info PF信息 +* @param delayTime 延时时间(单位ns) +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_pfc_delay_time_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT64 delayTime); + +/***********************************************************/ +/**对外接口 获取PFC防抖延时时间 +* @param pf_info PF信息 +* @param delayTime 延时时间(单位ns) +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_pfc_delay_time_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT64 *delayTime); + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/fc/source/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/fc/source/Kbuild.include new file mode 100644 index 000000000000..4cda9e269208 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/fc/source/Kbuild.include @@ -0,0 +1,2 @@ +cur_dir := en_np/fc/source/ +src_files += $(addprefix $(cur_dir),$(notdir $(wildcard $(dinghai_root)/$(cur_dir)*.c))) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/fc/source/dpp_drv_fc.c b/drivers/net/ethernet/dinghai/en_np/fc/source/dpp_drv_fc.c new file mode 100644 index 000000000000..eb1741cee030 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/fc/source/dpp_drv_fc.c @@ -0,0 +1,187 @@ +#include "dpp_drv_fc.h" + +/***********************************************************/ +/**对外接口 配置基于vport的端口指针阈值 +* @param vport_id--vport号 +* @param port_id 端口号 +* @param p_para 端口阈值 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_port_th_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 port_id, + DPP_PBU_PORT_TH_PARA_T *p_para) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(port_id, 0, DPP_TM_PP_NUM - 1); + ZXIC_COMM_CHECK_POINT(p_para); + + ret = dpp_pbu_port_th_set(&dev, port_id, p_para); + ZXIC_COMM_CHECK_RC(ret, "dpp_pbu_port_th_set"); + + return ret; +} +EXPORT_SYMBOL(dpp_port_th_set); + +/***********************************************************/ +/** 读取端口的阈值 +* @param vport_id--vport号 +* @param port_id 端口号 +* @param p_para 端口阈值 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_port_th_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 port_id, + DPP_PBU_PORT_TH_PARA_T *p_para) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(port_id, 0, DPP_TM_PP_NUM - 1); + ZXIC_COMM_CHECK_POINT(p_para); + + ret = dpp_pbu_port_th_get(&dev, port_id, p_para); + ZXIC_COMM_CHECK_RC(ret, "dpp_pbu_port_th_get"); + + return ret; +} +EXPORT_SYMBOL(dpp_port_th_get); + +/***********************************************************/ +/**对外接口 配置基于vport的端口指定端口按cos优先级起pfc流控的优先级流控指针阈值 +* @param vport_id--vport号 +* @param port_id 端口号 +* @param p_para cos阈值,要求高优先级的阈值不小于低优先级的阈值 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_port_cos_th_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 port_id, + DPP_PBU_PORT_COS_TH_PARA_T *p_para) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(port_id, 0, DPP_TM_PP_NUM - 1); + ZXIC_COMM_CHECK_POINT(p_para); + + ret = dpp_pbu_port_cos_th_set(&dev, port_id, p_para); + ZXIC_COMM_CHECK_RC(ret, "dpp_pbu_port_cos_th_set"); + + return ret; +} +EXPORT_SYMBOL(dpp_port_cos_th_set); + +/***********************************************************/ +/** 读取指定端口中各cos的优先级流控指针阈值,仅对lif0的48个通道有效 +* @param vport_id--vport号 +* @param port_id 端口号 +* @param p_para cos阈值,要求高优先级的阈值不小于低优先级的阈值 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_port_cos_th_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 port_id, + DPP_PBU_PORT_COS_TH_PARA_T *p_para) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(port_id, 0, DPP_TM_PP_NUM - 1); + ZXIC_COMM_CHECK_POINT(p_para); + + ret = dpp_pbu_port_cos_th_get(&dev, port_id, p_para); + ZXIC_COMM_CHECK_RC(ret, "dpp_pbu_port_cos_th_get"); + + return ret; +} +EXPORT_SYMBOL(dpp_port_cos_th_get); + +/***********************************************************/ +/**对外接口 配置PFC防抖延时时间 +* @param pf_info PF信息 +* @param delayTime 延时时间(单位ns) +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_pfc_delay_time_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT64 delayTime) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + + ret = dpp_pbu_pfc_delay_time_set(&dev, delayTime); + ZXIC_COMM_CHECK_RC(ret, "dpp_pbu_pfc_delay_time_set"); + + return ret; +} +EXPORT_SYMBOL(dpp_pfc_delay_time_set); + +/***********************************************************/ +/**对外接口 获取PFC防抖延时时间 +* @param pf_info PF信息 +* @param delayTime 延时时间(单位ns) +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_pfc_delay_time_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT64 *delayTime) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(delayTime); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + + ret = dpp_pbu_pfc_delay_time_get(&dev, delayTime); + ZXIC_COMM_CHECK_RC(ret, "dpp_pbu_pfc_delay_time_get"); + + return ret; +} +EXPORT_SYMBOL(dpp_pfc_delay_time_get); \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/flow/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/flow/Kbuild.include new file mode 100644 index 000000000000..add176775fda --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/flow/Kbuild.include @@ -0,0 +1,4 @@ +cur_dir := en_np/flow/ +subdirs := common/source/ api/source/ +src_files += +include $(foreach subdir, $(subdirs), $(dinghai_root)/$(cur_dir)$(subdir)/Kbuild.include) diff --git a/drivers/net/ethernet/dinghai/en_np/flow/README.txt b/drivers/net/ethernet/dinghai/en_np/flow/README.txt new file mode 100644 index 000000000000..67b2d12a75dc --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/flow/README.txt @@ -0,0 +1,9 @@ +一、脚本说明 +1、flow目录下有两个xml文件,zxdh_flow_attr_api和zxdh_flow_attr_demo,zxdh_flow_attr_api.xml编写请参照demo; +2、demo只作为示例使用,不参与编译;api作为编译脚本,用于生成api/source和api/include中的文件,对外提供api接口; +二、编译脚本使用说明 +1、按照正确流表资源填写zxdh_flow_attr_api.xml,注意比特位要求连续且不能重叠; +2、在en_np/flow目录下执行python3 tool.py api,分别生成en_np/flow/api下的文件; +三、补充说明 + +注意:请确保python的解释器版本大于等于3.80 \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/flow/api/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/flow/api/Kbuild.include new file mode 100644 index 000000000000..9455de14d46e --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/flow/api/Kbuild.include @@ -0,0 +1,4 @@ +cur_dir := en_np/flow/api/ +subdirs := source/ +src_files += +include $(foreach subdir, $(subdirs), $(dinghai_root)/$(cur_dir)$(subdir)/Kbuild.include) diff --git a/drivers/net/ethernet/dinghai/en_np/flow/api/include/dpp_tbl_fd_cfg.h b/drivers/net/ethernet/dinghai/en_np/flow/api/include/dpp_tbl_fd_cfg.h new file mode 100644 index 000000000000..2e720913b70e --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/flow/api/include/dpp_tbl_fd_cfg.h @@ -0,0 +1,57 @@ +#ifndef DPP_TBL_FD_CFG_H +#define DPP_TBL_FD_CFG_H + +#include "zxic_common.h" + +typedef struct zxdh_fd_cfg_key { + ZXIC_UINT8 dmac[6]; + ZXIC_UINT8 smac[6]; + ZXIC_UINT32 ethtype; + ZXIC_UINT16 cvlan_pri; + ZXIC_UINT16 cvlanid; + ZXIC_UINT8 sip[16]; + ZXIC_UINT8 dip[16]; + ZXIC_UINT8 rsv1; + ZXIC_UINT8 tos; + ZXIC_UINT8 proto; + ZXIC_UINT8 fragment; + ZXIC_UINT16 sport; + ZXIC_UINT16 dport; + ZXIC_UINT32 rsv2; + ZXIC_UINT32 vxlan_vni; + ZXIC_UINT16 vqm_vfid; + ZXIC_UINT16 rsv3; +} ZXDH_FD_CFG_KEY; + +typedef ZXDH_FD_CFG_KEY ZXDH_FD_CFG_MASK; + +typedef struct zxdh_fd_cfg_as_rlt { + ZXIC_UINT8 hit_flag; + ZXIC_UINT8 action_index; + ZXIC_UINT16 action_index2; + ZXIC_UINT32 v_qid; + ZXIC_UINT32 uplink_fd_id; + ZXIC_UINT32 spec_port_vfid; + ZXIC_UINT32 count_id; + ZXIC_UINT16 hash_alg; + ZXIC_UINT16 rss_hash_factor; + ZXIC_UINT16 rsv3; + ZXIC_UINT16 encap0_index; +} ZXDH_FD_CFG_AS_RLT; + +typedef struct zxdh_fd_cfg_t { + ZXDH_FD_CFG_KEY key; + ZXDH_FD_CFG_MASK mask; + ZXDH_FD_CFG_AS_RLT as_rlt; +} ZXDH_FD_CFG_T; + +ZXIC_UINT32 dpp_tbl_fd_cfg_add(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 handle, ZXDH_FD_CFG_T *p_fd_cfg); +ZXIC_UINT32 dpp_tbl_fd_cfg_del(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 handle); +ZXIC_UINT32 dpp_tbl_fd_cfg_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 handle, ZXDH_FD_CFG_T *p_fd_cfg); +ZXIC_UINT32 dpp_tbl_fd_cfg_search(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 handle, ZXDH_FD_CFG_T *p_fd_cfg); + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/flow/api/source/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/flow/api/source/Kbuild.include new file mode 100644 index 000000000000..b7345b16bf87 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/flow/api/source/Kbuild.include @@ -0,0 +1,2 @@ +cur_dir := en_np/flow/api/source/ +src_files += $(addprefix $(cur_dir),$(notdir $(wildcard $(dinghai_root)/$(cur_dir)*.c))) diff --git a/drivers/net/ethernet/dinghai/en_np/flow/api/source/dpp_fd_cfg_api.c b/drivers/net/ethernet/dinghai/en_np/flow/api/source/dpp_fd_cfg_api.c new file mode 100644 index 000000000000..11dedb74ed33 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/flow/api/source/dpp_fd_cfg_api.c @@ -0,0 +1,133 @@ +#include "dpp_flow_comm.h" +#include "dpp_tbl_fd_cfg.h" +#include "dpp_dev.h" + +ZXIC_UINT32 dpp_tbl_fd_cfg_add(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 handle, ZXDH_FD_CFG_T *p_fd_cfg) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(p_fd_cfg); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_acl_entry_insert_ex(&dev, queue, sdt_no, handle, + p_fd_cfg); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_acl_entry_insert_ex", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_tbl_fd_cfg_add); + +ZXIC_UINT32 dpp_tbl_fd_cfg_del(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 handle) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_acl_entry_del_ex(&dev, queue, sdt_no, handle); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_acl_entry_del_ex", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_tbl_fd_cfg_del); + +ZXIC_UINT32 dpp_tbl_fd_cfg_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 handle, ZXDH_FD_CFG_T *p_fd_cfg) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(p_fd_cfg); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_acl_entry_get_ex(&dev, queue, sdt_no, handle, + p_fd_cfg); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_acl_entry_get_ex", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_tbl_fd_cfg_get); + +ZXIC_UINT32 dpp_tbl_fd_cfg_search(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 handle, ZXDH_FD_CFG_T *p_fd_cfg) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(p_fd_cfg); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_acl_entry_search_ex(&dev, queue, sdt_no, handle, + p_fd_cfg); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_acl_entry_search_ex", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_tbl_fd_cfg_search); diff --git a/drivers/net/ethernet/dinghai/en_np/flow/api/source/dpp_flow_struct.c b/drivers/net/ethernet/dinghai/en_np/flow/api/source/dpp_flow_struct.c new file mode 100644 index 000000000000..3d42124fd1c0 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/flow/api/source/dpp_flow_struct.c @@ -0,0 +1,57 @@ +#include "dpp_flow_comm.h" +#include "dpp_tbl_fd_cfg.h" +ZXDH_FLOW_ATTR_FIELD_T g_fd_cfg_fields[] = { + { "dmac", DPP_ATTR_FLAG_KEY, 6, 1, 639, 48 }, + { "smac", DPP_ATTR_FLAG_KEY, 6, 1, 591, 48 }, + { "ethtype", DPP_ATTR_FLAG_KEY, 1, 4, 543, 16 }, + { "cvlan_pri", DPP_ATTR_FLAG_KEY, 1, 2, 527, 4 }, + { "cvlanid", DPP_ATTR_FLAG_KEY, 1, 2, 523, 12 }, + { "sip", DPP_ATTR_FLAG_KEY, 16, 1, 511, 128 }, + { "dip", DPP_ATTR_FLAG_KEY, 16, 1, 383, 128 }, + { "rsv1", DPP_ATTR_FLAG_KEY, 1, 1, 255, 8 }, + { "tos", DPP_ATTR_FLAG_KEY, 1, 1, 247, 8 }, + { "proto", DPP_ATTR_FLAG_KEY, 1, 1, 239, 8 }, + { "fragment", DPP_ATTR_FLAG_KEY, 1, 1, 231, 8 }, + { "sport", DPP_ATTR_FLAG_KEY, 1, 2, 223, 16 }, + { "dport", DPP_ATTR_FLAG_KEY, 1, 2, 207, 16 }, + { "rsv2", DPP_ATTR_FLAG_KEY, 1, 4, 191, 8 }, + { "vxlan_vni", DPP_ATTR_FLAG_KEY, 1, 4, 183, 24 }, + { "vqm_vfid", DPP_ATTR_FLAG_KEY, 1, 2, 159, 16 }, + { "rsv3", DPP_ATTR_FLAG_KEY, 1, 2, 143, 16 }, + { "dmac", DPP_ATTR_FLAG_MASK, 6, 1, 639, 48 }, + { "smac", DPP_ATTR_FLAG_MASK, 6, 1, 591, 48 }, + { "ethtype", DPP_ATTR_FLAG_MASK, 1, 4, 543, 16 }, + { "cvlan_pri", DPP_ATTR_FLAG_MASK, 1, 2, 527, 4 }, + { "cvlanid", DPP_ATTR_FLAG_MASK, 1, 2, 523, 12 }, + { "sip", DPP_ATTR_FLAG_MASK, 16, 1, 511, 128 }, + { "dip", DPP_ATTR_FLAG_MASK, 16, 1, 383, 128 }, + { "rsv1", DPP_ATTR_FLAG_MASK, 1, 1, 255, 8 }, + { "tos", DPP_ATTR_FLAG_MASK, 1, 1, 247, 8 }, + { "proto", DPP_ATTR_FLAG_MASK, 1, 1, 239, 8 }, + { "fragment", DPP_ATTR_FLAG_MASK, 1, 1, 231, 8 }, + { "sport", DPP_ATTR_FLAG_MASK, 1, 2, 223, 16 }, + { "dport", DPP_ATTR_FLAG_MASK, 1, 2, 207, 16 }, + { "rsv2", DPP_ATTR_FLAG_MASK, 1, 4, 191, 8 }, + { "vxlan_vni", DPP_ATTR_FLAG_MASK, 1, 4, 183, 24 }, + { "vqm_vfid", DPP_ATTR_FLAG_MASK, 1, 2, 159, 16 }, + { "rsv3", DPP_ATTR_FLAG_MASK, 1, 2, 143, 16 }, + { "hit_flag", DPP_ATTR_FLAG_RST, 1, 1, 127, 1 }, + { "action_index", DPP_ATTR_FLAG_RST, 1, 1, 126, 7 }, + { "action_index2", DPP_ATTR_FLAG_RST, 1, 2, 119, 8 }, + { "v_qid", DPP_ATTR_FLAG_RST, 1, 4, 111, 16 }, + { "uplink_fd_id", DPP_ATTR_FLAG_RST, 1, 4, 95, 32 }, + { "spec_port_vfid", DPP_ATTR_FLAG_RST, 1, 4, 63, 12 }, + { "count_id", DPP_ATTR_FLAG_RST, 1, 4, 51, 20 }, + { "hash_alg", DPP_ATTR_FLAG_RST, 1, 2, 31, 8 }, + { "rss_hash_factor", DPP_ATTR_FLAG_RST, 1, 2, 23, 8 }, + { "rsv3", DPP_ATTR_FLAG_RST, 1, 2, 15, 4 }, + { "encap0_index", DPP_ATTR_FLAG_RST, 1, 2, 11, 12 }, +}; + +ZXDH_FLOW_ATTR_T g_flow_attr_list[] = { + { "fd_cfg", 130, DPP_FLOW_SDT_ACL, 640, 640, 128, 45, g_fd_cfg_fields }, +}; +ZXIC_UINT32 dpp_flow_attr_list_size_get(void) +{ + return sizeof(g_flow_attr_list) / sizeof(ZXDH_FLOW_ATTR_T); +} diff --git a/drivers/net/ethernet/dinghai/en_np/flow/common/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/flow/common/Kbuild.include new file mode 100644 index 000000000000..d657bea3eda6 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/flow/common/Kbuild.include @@ -0,0 +1,4 @@ +cur_dir := en_np/flow/common/ +subdirs := source/ +src_files += +include $(foreach subdir, $(subdirs), $(dinghai_root)/$(cur_dir)$(subdir)/Kbuild.include) diff --git a/drivers/net/ethernet/dinghai/en_np/flow/common/include/dpp_flow_comm.h b/drivers/net/ethernet/dinghai/en_np/flow/common/include/dpp_flow_comm.h new file mode 100644 index 000000000000..e940f157bc23 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/flow/common/include/dpp_flow_comm.h @@ -0,0 +1,73 @@ +#ifndef DPP_FLOW_COMM_H +#define DPP_FLOW_COMM_H + +#include "zxic_common.h" +#include "dpp_type_api.h" +#include "dpp_dev.h" +#include "dpp_dtb.h" +#include "dpp_tbl_comm.h" + +#define DPP_ATTR_FLAG_KEY (0 << 0) /* 键值 */ +#define DPP_ATTR_FLAG_MASK (1 << 0) /* 掩码 */ +#define DPP_ATTR_FLAG_RST (1 << 1) /* 结果 */ + +typedef enum dpp_flow_sdt_type_e { + DPP_FLOW_SDT_INVALID = 0, /**< @brief 无效类型*/ + DPP_FLOW_SDT_ERAM = 1, /**< @brief eRAM直接表类型*/ + DPP_FLOW_SDT_DDR = 2, /**< @brief DDR直接表类型*/ + DPP_FLOW_SDT_HASH = 3, /**< @brief Hash表类型*/ + DPP_FLOW_SDT_LPM = 4, /**< @brief LPM表类型*/ + DPP_FLOW_SDT_ACL = 5, /**< @brief 片内Tcam表类型*/ + DPP_FLOW_SDT_MAX = 6, +} DPP_FLOW_SDT_TYPE_E; + +typedef struct zxdh_flow_attr_field_t { + ZXIC_CHAR *p_field_name; /* 属性名 */ + ZXIC_UINT32 flags; /* 属性特征key mask rst*/ + ZXIC_UINT16 array_num; /* 属性数组个数*/ + ZXIC_UINT32 element_size; /* 数组元素的大小,以字节为单位*/ + ZXIC_UINT16 msb_pos; /* 最高比特位置,以属性列表为准*/ + ZXIC_UINT16 len; /* 字段长度,以比特为单位 */ +} ZXDH_FLOW_ATTR_FIELD_T; + +typedef struct zxdh_flow_attr_t { + ZXIC_CHAR *attr_name; /* 结构体名称*/ + ZXIC_UINT32 sdt_no; /* sdt号 */ + ZXIC_UINT32 + table_type; /* 流表类型ERAM/HASH/ACL/DDR/LPM DPP_SDT_TABLE_TYPE_E*/ + ZXIC_UINT32 width; /* hash条目或者直接表表项位宽,以bit为单位 */ + ZXIC_UINT32 key_width; /* hash键值或者acl键值掩码位宽,以bit为单位 */ + ZXIC_UINT32 rst_width; /* hash结果或者acl级联结果位宽,以bit为单位*/ + ZXIC_UINT32 field_num; /* 包含的字段个数 */ + ZXDH_FLOW_ATTR_FIELD_T *p_fields; /* 结构体所有字段 */ +} ZXDH_FLOW_ATTR_T; + +DPP_STATUS dpp_apt_dtb_eram_get_ex(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 index, + void *pData); +DPP_STATUS dpp_apt_dtb_eram_insert_ex(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 index, + void *pData); +DPP_STATUS dpp_apt_dtb_eram_clear_ex(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 index); + +DPP_STATUS dpp_apt_dtb_hash_search_ex(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, void *pData); +DPP_STATUS dpp_apt_dtb_hash_insert_ex(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, void *pData); +DPP_STATUS dpp_apt_dtb_hash_delete_ex(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, void *pData); + +DPP_STATUS dpp_apt_dtb_acl_entry_search_ex(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, + ZXIC_UINT32 handle, void *pData); +DPP_STATUS dpp_apt_dtb_acl_entry_get_ex(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 handle, + void *pData); +DPP_STATUS dpp_apt_dtb_acl_entry_insert_ex(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, + ZXIC_UINT32 handle, void *pData); +DPP_STATUS dpp_apt_dtb_acl_entry_del_ex(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 handle); + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/flow/common/source/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/flow/common/source/Kbuild.include new file mode 100644 index 000000000000..15e616a89abb --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/flow/common/source/Kbuild.include @@ -0,0 +1,2 @@ +cur_dir := en_np/flow/common/source/ +src_files += $(addprefix $(cur_dir),$(notdir $(wildcard $(dinghai_root)/$(cur_dir)*.c))) diff --git a/drivers/net/ethernet/dinghai/en_np/flow/common/source/dpp_flow_comm.c b/drivers/net/ethernet/dinghai/en_np/flow/common/source/dpp_flow_comm.c new file mode 100644 index 000000000000..2173839206a5 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/flow/common/source/dpp_flow_comm.c @@ -0,0 +1,821 @@ +#include "zxic_common.h" +#include "dpp_flow_comm.h" +#include "dpp_dtb_table_api.h" +#include "dpp_se_api.h" +#include "dpp_etcam.h" +#include "dpp_dtb_cfg.h" +#include "dpp_dtb_table.h" +#include "dpp_se.h" + +extern ZXDH_FLOW_ATTR_T g_flow_attr_list[]; +extern ZXIC_UINT32 dpp_flow_attr_list_size_get(void); + +ZXDH_FLOW_ATTR_T *zxdh_flow_attr_get(ZXIC_UINT32 sdt_no) +{ + ZXIC_UINT32 index = 0; + + for (index = 0; index < dpp_flow_attr_list_size_get(); index++) { + if (sdt_no == g_flow_attr_list[index].sdt_no) { + return &g_flow_attr_list[index]; + } + } + + return NULL; +} + +static DPP_STATUS dpp_field_to_bitstream(ZXDH_FLOW_ATTR_FIELD_T *p_field, + ZXIC_UINT32 width, ZXIC_UINT8 *pData, + ZXIC_UINT8 *p_buff, + ZXIC_UINT32 *p_offset) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 offset = 0; + ZXIC_UINT32 element_width = 0; /*unit:bit*/ + ZXIC_UINT32 msb_temp = 0; /*最高bit*/ + ZXIC_UINT32 array_index = 0; + ZXIC_UINT32 temp_data = 0; + + ZXIC_COMM_CHECK_POINT(p_field); + ZXIC_COMM_CHECK_POINT(pData); + ZXIC_COMM_CHECK_POINT(p_buff); + ZXIC_COMM_CHECK_POINT(p_offset); + + offset = *p_offset; + + //默认所有属性都是数组形式 + for (array_index = 0; array_index < (p_field->array_num); + array_index++) { + //每个数组成员在比特流中占据的位宽 + element_width = (p_field->len) / (p_field->array_num); + + //从结构体中取出要写入的数据信息,需要考虑字节序的问题 + ZXIC_COMM_MEMCPY_S(&temp_data, p_field->element_size, + pData + offset, p_field->element_size); + temp_data = temp_data & + ZXIC_COMM_GET_BIT_MASK(ZXIC_UINT32, element_width); + + //计算数据写入比特流的最高位置 + msb_temp = + ((p_field->msb_pos) > (array_index * element_width)) ? + ((p_field->msb_pos) - + (array_index * element_width)) : + 0; + + //按照计算好的位置填充数据 + rc = zxic_comm_write_bits_ex(p_buff, width, temp_data, msb_temp, + element_width); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_write_bits_ex"); + + //移动到下一个数据位置 + offset += p_field->element_size; + } + + *p_offset = offset; + + return DPP_OK; +} + +static DPP_STATUS dpp_bitstream_to_field(ZXDH_FLOW_ATTR_FIELD_T *p_field, + ZXIC_UINT32 width, ZXIC_UINT8 *pData, + ZXIC_UINT8 *p_buff, + ZXIC_UINT32 *p_offset) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 offset = 0; + ZXIC_UINT32 element_width = 0; /*unit:bit*/ + ZXIC_UINT32 msb_temp = 0; + ZXIC_UINT32 array_index = 0; + ZXIC_UINT32 temp_data = 0; + + ZXIC_COMM_CHECK_POINT(p_field); + ZXIC_COMM_CHECK_POINT(pData); + ZXIC_COMM_CHECK_POINT(p_buff); + ZXIC_COMM_CHECK_POINT(p_offset); + + offset = *p_offset; + + //默认所有属性都是数组形式 + for (array_index = 0; array_index < (p_field->array_num); + array_index++) { + //每个数组成员在比特流中占据的位宽 + element_width = (p_field->len) / (p_field->array_num); + + //计算数据在比特流的最高位置 + msb_temp = + ((p_field->msb_pos) > (array_index * element_width)) ? + ((p_field->msb_pos) - + (array_index * element_width)) : + 0; + + //按照计算好的位置填充数据 + rc = zxic_comm_read_bits_ex(p_buff, width, &temp_data, msb_temp, + element_width); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_write_bits_ex"); + + //把从比特流中读取的数据存储在结构体中,需要考虑字节序的问题 + temp_data = temp_data & + ZXIC_COMM_GET_BIT_MASK(ZXIC_UINT32, element_width); + ZXIC_COMM_MEMCPY_S(pData + offset, p_field->element_size, + &temp_data, p_field->element_size); + + //移动到下一个数据位置 + offset += p_field->element_size; + } + + *p_offset = offset; + + return DPP_OK; +} + +DPP_STATUS dpp_flow_eram_attr_to_bitstream(ZXDH_FLOW_ATTR_T *p_flow_attr, + ZXIC_VOID *pData, + DPP_DTB_ERAM_ENTRY_INFO_T *eramEntry) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 offset = 0; + ZXDH_FLOW_ATTR_FIELD_T *p_field = NULL; + + ZXIC_COMM_CHECK_POINT(p_flow_attr); + ZXIC_COMM_CHECK_POINT(pData); + ZXIC_COMM_CHECK_POINT(eramEntry); + ZXIC_COMM_CHECK_POINT(eramEntry->p_data); + + for (index = 0; index < (p_flow_attr->field_num); index++) { + //获取到属性表 + p_field = p_flow_attr->p_fields + index; + rc = dpp_field_to_bitstream(p_field, p_flow_attr->width, + (ZXIC_UINT8 *)pData, + (ZXIC_UINT8 *)(eramEntry->p_data), + &offset); + ZXIC_COMM_CHECK_RC(rc, "dpp_field_to_bitstream"); + } + + zxic_comm_swap((ZXIC_UINT8 *)(eramEntry->p_data), + p_flow_attr->width / 8); + + return DPP_OK; +} + +static DPP_STATUS +dpp_flow_hash_attr_to_bitstream(ZXDH_FLOW_ATTR_T *p_flow_attr, ZXIC_VOID *pData, + DPP_DTB_HASH_ENTRY_INFO_T *hashEntry) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 offset = 0; + ZXIC_UINT32 width = 0; + ZXIC_UINT32 flags = 0; /*key mask or rst*/ + ZXIC_UINT8 *p_buff = NULL; + ZXDH_FLOW_ATTR_FIELD_T *p_field = NULL; + + ZXIC_COMM_CHECK_POINT(p_flow_attr); + ZXIC_COMM_CHECK_POINT(pData); + ZXIC_COMM_CHECK_POINT(hashEntry); + ZXIC_COMM_CHECK_POINT(hashEntry->p_actu_key); + + for (index = 0; index < (p_flow_attr->field_num); index++) { + //获取到属性表 + p_field = p_flow_attr->p_fields + index; + flags = p_field->flags; + if (flags == DPP_ATTR_FLAG_KEY) { + width = p_flow_attr->key_width; + p_buff = hashEntry->p_actu_key; + } else if (flags == DPP_ATTR_FLAG_RST) { + width = p_flow_attr->rst_width; + p_buff = hashEntry->p_rst; + } + + if (p_buff != NULL) { + rc = dpp_field_to_bitstream(p_field, width, + (ZXIC_UINT8 *)pData, p_buff, + &offset); + ZXIC_COMM_CHECK_RC(rc, "dpp_field_to_bitstream"); + } + } + + return DPP_OK; +} + +ZXIC_VOID dpp_acl_dtb_entry_print(DPP_DTB_ACL_ENTRY_INFO_T *aclEntry) +{ + ZXIC_UINT32 i = 0; + ZXIC_COMM_TRACE_INFO("key_data:"); + + for (i = 0; i < DPP_ETCAM_WIDTH_MAX / 8; i++) { + ZXIC_COMM_TRACE_INFO("%02x", aclEntry->key_data[i]); + } + + ZXIC_COMM_TRACE_INFO("\n"); + + ZXIC_COMM_TRACE_INFO("key_mask:"); + + for (i = 0; i < DPP_ETCAM_WIDTH_MAX / 8; i++) { + ZXIC_COMM_TRACE_INFO("%02x", aclEntry->key_mask[i]); + } + + ZXIC_COMM_TRACE_INFO("\n"); + + ZXIC_COMM_TRACE_INFO("rst:"); + for (i = 0; i < (DPP_SMMU0_READ_REG_MAX_NUM * 4); i++) { + ZXIC_COMM_TRACE_INFO("%02x", aclEntry->p_as_rslt[i]); + } + + ZXIC_COMM_TRACE_INFO("\n"); +} + +static DPP_STATUS +dpp_flow_acl_attr_to_bitstream(ZXDH_FLOW_ATTR_T *p_flow_attr, ZXIC_VOID *pData, + DPP_DTB_ACL_ENTRY_INFO_T *aclEntry) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 offset = 0; + ZXIC_UINT32 width = 0; + ZXIC_UINT32 flags = 0; /*key mask or rst*/ + ZXIC_UINT8 *p_buff = NULL; + ZXDH_FLOW_ATTR_FIELD_T *p_field = NULL; + + ZXIC_COMM_CHECK_POINT(p_flow_attr); + ZXIC_COMM_CHECK_POINT(pData); + ZXIC_COMM_CHECK_POINT(aclEntry); + ZXIC_COMM_CHECK_POINT(aclEntry->key_data); + ZXIC_COMM_CHECK_POINT(aclEntry->key_mask); + + for (index = 0; index < (p_flow_attr->field_num); index++) { + //获取到属性表 + p_field = p_flow_attr->p_fields + index; + flags = p_field->flags; + if (flags == DPP_ATTR_FLAG_KEY) { + width = p_flow_attr->key_width; + p_buff = aclEntry->key_data; + } else if (flags == DPP_ATTR_FLAG_MASK) { + width = p_flow_attr->key_width; + p_buff = aclEntry->key_mask; + } else if (flags == DPP_ATTR_FLAG_RST) { + width = p_flow_attr->rst_width; + p_buff = aclEntry->p_as_rslt; + } + + if (p_buff != NULL) { + rc = dpp_field_to_bitstream(p_field, width, + (ZXIC_UINT8 *)pData, p_buff, + &offset); + ZXIC_COMM_CHECK_RC(rc, "dpp_field_to_bitstream"); + } + } + + if (aclEntry->p_as_rslt) { + zxic_comm_swap((ZXIC_UINT8 *)(aclEntry->p_as_rslt), + p_flow_attr->rst_width / 8); + } + + return DPP_OK; +} + +static DPP_STATUS +dpp_flow_eram_bitstream_to_attr(ZXDH_FLOW_ATTR_T *p_flow_attr, ZXIC_VOID *pData, + DPP_DTB_ERAM_ENTRY_INFO_T *eramEntry) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 offset = 0; + ZXDH_FLOW_ATTR_FIELD_T *p_field = NULL; + + ZXIC_COMM_CHECK_POINT(p_flow_attr); + ZXIC_COMM_CHECK_POINT(pData); + ZXIC_COMM_CHECK_POINT(eramEntry); + ZXIC_COMM_CHECK_POINT(eramEntry->p_data); + + zxic_comm_swap((ZXIC_UINT8 *)(eramEntry->p_data), + p_flow_attr->width / 8); + for (index = 0; index < (p_flow_attr->field_num); index++) { + //获取到属性表 + p_field = p_flow_attr->p_fields + index; + rc = dpp_bitstream_to_field(p_field, p_flow_attr->width, + (ZXIC_UINT8 *)pData, + (ZXIC_UINT8 *)(eramEntry->p_data), + &offset); + ZXIC_COMM_CHECK_RC(rc, "dpp_bitstream_to_field"); + } + + return DPP_OK; +} + +static DPP_STATUS +dpp_flow_hash_bitstream_to_attr(ZXDH_FLOW_ATTR_T *p_flow_attr, ZXIC_VOID *pData, + DPP_DTB_HASH_ENTRY_INFO_T *hashEntry) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 offset = 0; + ZXIC_UINT32 width = 0; + ZXIC_UINT32 flags = 0; /*key mask or rst*/ + ZXIC_UINT8 *p_buff = NULL; + ZXDH_FLOW_ATTR_FIELD_T *p_field = NULL; + + ZXIC_COMM_CHECK_POINT(p_flow_attr); + ZXIC_COMM_CHECK_POINT(pData); + ZXIC_COMM_CHECK_POINT(hashEntry); + ZXIC_COMM_CHECK_POINT(hashEntry->p_actu_key); + + for (index = 0; index < (p_flow_attr->field_num); index++) { + //获取到属性表 + p_field = p_flow_attr->p_fields + index; + flags = p_field->flags; + if (flags == DPP_ATTR_FLAG_KEY) { + width = p_flow_attr->key_width; + p_buff = hashEntry->p_actu_key; + } else if (flags == DPP_ATTR_FLAG_RST) { + width = p_flow_attr->rst_width; + p_buff = hashEntry->p_rst; + } + + if (p_buff != NULL) { + rc = dpp_bitstream_to_field(p_field, width, + (ZXIC_UINT8 *)pData, p_buff, + &offset); + ZXIC_COMM_CHECK_RC(rc, "dpp_bitstream_to_field"); + } + } + + return DPP_OK; +} + +static DPP_STATUS +dpp_flow_acl_bitstream_to_attr(ZXDH_FLOW_ATTR_T *p_flow_attr, ZXIC_VOID *pData, + DPP_DTB_ACL_ENTRY_INFO_T *aclEntry) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 offset = 0; + ZXIC_UINT32 width = 0; + ZXIC_UINT32 flags = 0; /*key mask or rst*/ + ZXIC_UINT8 *p_buff = NULL; + ZXDH_FLOW_ATTR_FIELD_T *p_field = NULL; + + ZXIC_COMM_CHECK_POINT(p_flow_attr); + ZXIC_COMM_CHECK_POINT(pData); + ZXIC_COMM_CHECK_POINT(aclEntry); + ZXIC_COMM_CHECK_POINT(aclEntry->key_data); + ZXIC_COMM_CHECK_POINT(aclEntry->key_mask); + + if (aclEntry->p_as_rslt) { + zxic_comm_swap((ZXIC_UINT8 *)(aclEntry->p_as_rslt), + p_flow_attr->rst_width / 8); + } + + for (index = 0; index < (p_flow_attr->field_num); index++) { + //获取到属性表 + p_field = p_flow_attr->p_fields + index; + flags = p_field->flags; + if (flags == DPP_ATTR_FLAG_KEY) { + width = p_flow_attr->key_width; + p_buff = aclEntry->key_data; + } else if (flags == DPP_ATTR_FLAG_MASK) { + width = p_flow_attr->key_width; + p_buff = aclEntry->key_mask; + } else if (flags == DPP_ATTR_FLAG_RST) { + width = p_flow_attr->rst_width; + p_buff = aclEntry->p_as_rslt; + } + + if (p_buff != NULL) { + rc = dpp_bitstream_to_field(p_field, width, + (ZXIC_UINT8 *)pData, p_buff, + &offset); + ZXIC_COMM_CHECK_RC(rc, "dpp_bitstream_to_field"); + } + } + + return DPP_OK; +} + +ZXIC_UINT32 dpp_flow_attr_to_bitstream(ZXIC_UINT32 sdt_no, ZXIC_VOID *pData, + ZXIC_VOID *p_Entry) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 flow_type = 0; + ZXDH_FLOW_ATTR_T *p_flow_attr = NULL; + + //获取结构体 + p_flow_attr = zxdh_flow_attr_get(sdt_no); + ZXIC_COMM_CHECK_POINT(p_flow_attr); + + flow_type = p_flow_attr->table_type; + switch (flow_type) { + case DPP_FLOW_SDT_ERAM: { + rc = dpp_flow_eram_attr_to_bitstream( + p_flow_attr, pData, + (DPP_DTB_ERAM_ENTRY_INFO_T *)p_Entry); + ZXIC_COMM_CHECK_RC(rc, "dpp_flow_eram_attr_to_bitstream"); + break; + } + case DPP_FLOW_SDT_HASH: { + rc = dpp_flow_hash_attr_to_bitstream( + p_flow_attr, pData, + (DPP_DTB_HASH_ENTRY_INFO_T *)p_Entry); + ZXIC_COMM_CHECK_RC(rc, "dpp_flow_hash_attr_to_bitstream"); + break; + } + case DPP_FLOW_SDT_ACL: { + rc = dpp_flow_acl_attr_to_bitstream( + p_flow_attr, pData, + (DPP_DTB_ACL_ENTRY_INFO_T *)p_Entry); + ZXIC_COMM_CHECK_RC(rc, "dpp_flow_acl_attr_to_bitstream"); + + dpp_acl_dtb_entry_print((DPP_DTB_ACL_ENTRY_INFO_T *)p_Entry); + break; + } + default: { + ZXIC_COMM_TRACE_ERROR("[%s]:sdt[%u] flow_type[%u] error!\n", + __FUNCTION__, sdt_no, flow_type); + return DPP_ERR; + } + } + + return DPP_OK; +} + +// pData是流表具体信息ZXDH_NAME_T,p_Entry是码流信息DPP_DTB_ERAM/HASH/ACL_ENTRY_INFO_T +ZXIC_UINT32 dpp_flow_bitstream_to_attr(ZXIC_UINT32 sdt_no, ZXIC_VOID *pData, + ZXIC_VOID *p_Entry) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 flow_type = 0; + ZXDH_FLOW_ATTR_T *p_flow_attr = NULL; + + //获取结构体 + p_flow_attr = zxdh_flow_attr_get(sdt_no); + ZXIC_COMM_CHECK_POINT(p_flow_attr); + + flow_type = p_flow_attr->table_type; + switch (flow_type) { + case DPP_FLOW_SDT_ERAM: { + rc = dpp_flow_eram_bitstream_to_attr( + p_flow_attr, pData, + (DPP_DTB_ERAM_ENTRY_INFO_T *)p_Entry); + ZXIC_COMM_CHECK_RC(rc, "dpp_flow_eram_bitstream_to_attr"); + break; + } + case DPP_FLOW_SDT_HASH: { + rc = dpp_flow_hash_bitstream_to_attr( + p_flow_attr, pData, + (DPP_DTB_HASH_ENTRY_INFO_T *)p_Entry); + ZXIC_COMM_CHECK_RC(rc, "dpp_flow_hash_bitstream_to_attr"); + break; + } + case DPP_FLOW_SDT_ACL: { + rc = dpp_flow_acl_bitstream_to_attr( + p_flow_attr, pData, + (DPP_DTB_ACL_ENTRY_INFO_T *)p_Entry); + ZXIC_COMM_CHECK_RC(rc, "dpp_flow_acl_bitstream_to_attr"); + break; + } + default: { + ZXIC_COMM_TRACE_ERROR("[%s]:sdt[%u] flow_type[%u] error!\n", + __FUNCTION__, sdt_no, flow_type); + return DPP_ERR; + } + } + + return DPP_OK; +} + +/* ===================================================== ERAM =====================================================*/ + +// pData是流表具体信息ZXDH_NAME_T +DPP_STATUS dpp_apt_dtb_eram_get_ex(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 index, + void *pData) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 dump_data[DPP_SMMU0_READ_REG_MAX_NUM] = { 0 }; + + DPP_DTB_ERAM_ENTRY_INFO_T dump_eram_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(pData); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(queue_id, 0, DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + + ZXIC_COMM_MEMSET_S(dump_data, sizeof(dump_data), 0x00, + sizeof(dump_data)); + + dump_eram_entry.index = index; + dump_eram_entry.p_data = dump_data; + rc = dpp_dtb_eram_data_get(dev, queue_id, sdt_no, &dump_eram_entry); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_eram_data_get"); + + rc = dpp_flow_bitstream_to_attr(sdt_no, pData, &dump_eram_entry); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_flow_bitstream_to_attr"); + + return rc; +} + +DPP_STATUS dpp_apt_dtb_eram_insert_ex(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 index, + void *pData) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 element_id = 0; + ZXIC_UINT32 dump_data[DPP_SMMU0_READ_REG_MAX_NUM] = { 0 }; + + DPP_DTB_ERAM_ENTRY_INFO_T dtb_eram_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(pData); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(queue_id, 0, DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + + ZXIC_COMM_MEMSET_S(dump_data, sizeof(dump_data), 0x00, + sizeof(dump_data)); + + dtb_eram_entry.index = index; + dtb_eram_entry.p_data = dump_data; + + rc = dpp_flow_attr_to_bitstream(sdt_no, pData, &dtb_eram_entry); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_flow_attr_to_bitstream"); + + //dtb配表 + dtb_eram_entry.index = index; + dtb_eram_entry.p_data = dump_data; + rc = dpp_dtb_eram_dma_write(dev, queue_id, sdt_no, 1, &dtb_eram_entry, + &element_id); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_eram_dma_write"); + + return rc; +} + +DPP_STATUS dpp_apt_dtb_eram_clear_ex(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 index) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 element_id = 0; + ZXIC_UINT32 dump_data[DPP_SMMU0_READ_REG_MAX_NUM] = { 0 }; + + DPP_DTB_ERAM_ENTRY_INFO_T dtb_eram_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(queue_id, 0, DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + + ZXIC_COMM_MEMSET_S(dump_data, sizeof(dump_data), 0x00, + sizeof(dump_data)); + + //dtb配表 + dtb_eram_entry.index = index; + dtb_eram_entry.p_data = dump_data; + rc = dpp_dtb_eram_dma_write(dev, queue_id, sdt_no, 1, &dtb_eram_entry, + &element_id); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_eram_dma_write"); + + return rc; +} + +/* ===================================================== HASH =====================================================*/ + +// pData是流表具体信息ZXDH_NAME_T +DPP_STATUS dpp_apt_dtb_hash_search_ex(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, void *pData) +{ + DPP_STATUS rc = DPP_OK; + + DPP_DTB_HASH_ENTRY_INFO_T tDtbHashEntry = { 0 }; + ZXIC_UINT8 key[HASH_KEY_MAX] = { 0 }; + ZXIC_UINT8 rst[HASH_RST_MAX] = { 0 }; + ZXIC_UINT32 srch_mode = HASH_SRH_MODE_HDW; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(queue_id, 0, DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + + tDtbHashEntry.p_actu_key = key; + tDtbHashEntry.p_rst = rst; + + rc = dpp_flow_attr_to_bitstream(sdt_no, pData, &tDtbHashEntry); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_flow_attr_to_bitstream"); + + rc = dpp_dtb_hash_data_get(dev, queue_id, sdt_no, &tDtbHashEntry, + srch_mode); + if (rc != DPP_OK) { + if (rc == DPP_HASH_RC_SRH_FAIL) { + ZXIC_COMM_PRINT("There is no such hash!\n"); + return DPP_HASH_RC_SRH_FAIL; + } + return DPP_ERR; + } + + rc = dpp_flow_bitstream_to_attr(sdt_no, pData, &tDtbHashEntry); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_flow_bitstream_to_attr"); + + return rc; +} + +DPP_STATUS dpp_apt_dtb_hash_insert_ex(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, void *pData) +{ + DPP_STATUS rc = DPP_OK; + DPP_DTB_HASH_ENTRY_INFO_T tDtbHashEntry = { 0 }; + ZXIC_UINT32 element_id = 0; + ZXIC_UINT8 key[HASH_KEY_MAX] = { 0 }; + ZXIC_UINT8 rst[HASH_RST_MAX] = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(queue_id, 0, DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + + tDtbHashEntry.p_actu_key = key; + tDtbHashEntry.p_rst = rst; + + rc = dpp_flow_attr_to_bitstream(sdt_no, pData, &tDtbHashEntry); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_flow_attr_to_bitstream"); + + rc = dpp_dtb_hash_dma_insert(dev, queue_id, sdt_no, 1, &tDtbHashEntry, + &element_id); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_hash_dma_insert"); + + return rc; +} + +DPP_STATUS dpp_apt_dtb_hash_delete_ex(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, void *pData) +{ + DPP_STATUS rc = DPP_OK; + DPP_DTB_HASH_ENTRY_INFO_T tDtbHashEntry = { 0 }; + ZXIC_UINT32 element_id = 0; + ZXIC_UINT8 key[HASH_KEY_MAX] = { 0 }; + ZXIC_UINT8 rst[HASH_RST_MAX] = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(queue_id, 0, DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + + tDtbHashEntry.p_actu_key = key; + tDtbHashEntry.p_rst = rst; + + rc = dpp_flow_attr_to_bitstream(sdt_no, pData, &tDtbHashEntry); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_flow_attr_to_bitstream"); + + rc = dpp_dtb_hash_dma_delete(dev, queue_id, sdt_no, 1, &tDtbHashEntry, + &element_id); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_hash_dma_delete"); + + return rc; +} + +/* ===================================================== ACL =====================================================*/ + +// pData是流表具体信息ZXDH_NAME_T +// pData中的handle值是传入的(已知) +DPP_STATUS dpp_apt_dtb_acl_entry_search_ex(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, + ZXIC_UINT32 handle, void *pData) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_DTB_ACL_ENTRY_INFO_T tDtbAclEntry = { 0 }; + ZXIC_UINT8 data[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; /*640bit*/ + ZXIC_UINT8 mask[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; /*640bit*/ + ZXIC_UINT8 rst[DPP_SMMU0_READ_REG_MAX_NUM * 4] = { 0 }; /*128bit*/ + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + ZXIC_COMM_CHECK_POINT(pData); + + ZXIC_COMM_MEMSET_S(&tDtbAclEntry, sizeof(DPP_DTB_ACL_ENTRY_INFO_T), 0x0, + sizeof(DPP_DTB_ACL_ENTRY_INFO_T)); + ZXIC_COMM_MEMSET_S(data, sizeof(data), 0x0, sizeof(data)); + ZXIC_COMM_MEMSET_S(mask, sizeof(mask), 0x0, sizeof(mask)); + ZXIC_COMM_MEMSET_S(rst, sizeof(rst), 0x0, sizeof(rst)); + tDtbAclEntry.key_data = data; + tDtbAclEntry.key_mask = mask; + tDtbAclEntry.p_as_rslt = rst; + + rc = dpp_flow_attr_to_bitstream(sdt_no, pData, &tDtbAclEntry); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_flow_attr_to_bitstream"); + + tDtbAclEntry.handle = handle; + + rc = dpp_dtb_acl_data_get(dev, queue_id, sdt_no, &tDtbAclEntry); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_acl_data_get"); + + dpp_acl_dtb_entry_print(&tDtbAclEntry); + + rc = dpp_flow_bitstream_to_attr(sdt_no, pData, &tDtbAclEntry); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_flow_bitstream_to_attr"); + + return rc; +} + +DPP_STATUS dpp_apt_dtb_acl_entry_get_ex(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 handle, + void *pData) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_DTB_ACL_ENTRY_INFO_T tDtbAclEntry = { 0 }; + ZXIC_UINT8 data[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; /*640bit*/ + ZXIC_UINT8 mask[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; /*640bit*/ + ZXIC_UINT8 rst[DPP_SMMU0_READ_REG_MAX_NUM * 4] = { 0 }; /*128bit*/ + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + ZXIC_COMM_CHECK_POINT(pData); + + ZXIC_COMM_MEMSET_S(&tDtbAclEntry, sizeof(DPP_DTB_ACL_ENTRY_INFO_T), 0x0, + sizeof(DPP_DTB_ACL_ENTRY_INFO_T)); + ZXIC_COMM_MEMSET_S(data, sizeof(data), 0x0, sizeof(data)); + ZXIC_COMM_MEMSET_S(mask, sizeof(mask), 0x0, sizeof(mask)); + ZXIC_COMM_MEMSET_S(rst, sizeof(rst), 0x0, sizeof(rst)); + tDtbAclEntry.key_data = data; + tDtbAclEntry.key_mask = mask; + tDtbAclEntry.p_as_rslt = rst; + + tDtbAclEntry.handle = handle; + + rc = dpp_dtb_etcam_data_get(dev, queue_id, sdt_no, &tDtbAclEntry); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_acl_data_get"); + + dpp_acl_dtb_entry_print(&tDtbAclEntry); + + rc = dpp_flow_bitstream_to_attr(sdt_no, pData, &tDtbAclEntry); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_flow_bitstream_to_attr"); + + return rc; +} + +DPP_STATUS dpp_apt_dtb_acl_entry_insert_ex(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, + ZXIC_UINT32 handle, void *pData) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 element_id = 0; + DPP_DTB_ACL_ENTRY_INFO_T tDtbAclEntry = { 0 }; + ZXIC_UINT8 data[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; /*640bit*/ + ZXIC_UINT8 mask[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; /*640bit*/ + ZXIC_UINT8 rst[DPP_SMMU0_READ_REG_MAX_NUM * 4] = { 0 }; /*128bit*/ + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + ZXIC_COMM_CHECK_POINT(pData); + + ZXIC_COMM_MEMSET_S(&tDtbAclEntry, sizeof(DPP_DTB_ACL_ENTRY_INFO_T), 0x0, + sizeof(DPP_DTB_ACL_ENTRY_INFO_T)); + ZXIC_COMM_MEMSET_S(data, sizeof(data), 0x0, sizeof(data)); + ZXIC_COMM_MEMSET_S(mask, sizeof(mask), 0x0, sizeof(mask)); + ZXIC_COMM_MEMSET_S(rst, sizeof(rst), 0x0, sizeof(rst)); + tDtbAclEntry.key_data = data; + tDtbAclEntry.key_mask = mask; + tDtbAclEntry.p_as_rslt = rst; + + rc = dpp_flow_attr_to_bitstream(sdt_no, pData, &tDtbAclEntry); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_flow_attr_to_bitstream"); + + tDtbAclEntry.handle = handle; + + rc = dpp_dtb_acl_dma_insert(dev, queue_id, sdt_no, 1, &tDtbAclEntry, + &element_id); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_acl_dma_insert"); + + return rc; +} + +DPP_STATUS dpp_apt_dtb_acl_entry_del_ex(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 handle) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 element_id = 0; + DPP_DTB_ACL_ENTRY_INFO_T tDtbAclEntry = { 0 }; + ZXIC_UINT8 data[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; /*640bit*/ + ZXIC_UINT8 mask[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; /*640bit*/ + ZXIC_UINT8 rst[DPP_SMMU0_READ_REG_MAX_NUM * 4] = { 0 }; /*128bit*/ + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + + ZXIC_COMM_MEMSET_S(&tDtbAclEntry, sizeof(DPP_DTB_ACL_ENTRY_INFO_T), 0x0, + sizeof(DPP_DTB_ACL_ENTRY_INFO_T)); + ZXIC_COMM_MEMSET_S(data, sizeof(data), 0xff, sizeof(data)); + ZXIC_COMM_MEMSET_S(mask, sizeof(mask), 0x0, sizeof(mask)); + ZXIC_COMM_MEMSET_S(rst, sizeof(rst), 0xff, sizeof(rst)); + + tDtbAclEntry.handle = handle; + tDtbAclEntry.key_data = data; + tDtbAclEntry.key_mask = mask; + tDtbAclEntry.p_as_rslt = rst; + + rc = dpp_dtb_acl_dma_insert(dev, queue_id, sdt_no, 1, &tDtbAclEntry, + &element_id); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_acl_dma_insert"); + + return rc; +} \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/flow/tool.py b/drivers/net/ethernet/dinghai/en_np/flow/tool.py new file mode 100644 index 000000000000..d1e5c11c4d5e --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/flow/tool.py @@ -0,0 +1,577 @@ +import os +import sys +import xml.etree.ElementTree as ET + +def parse_flow_attr_xml(xml_file): + tree = ET.parse(xml_file) + root = tree.getroot() + + sdt_templates = {} # 保存sdt-template内容 + structures = [] # 保存最终SDT结构体内容 + + # 解析SDT模板(sdt-template) + for sdt_template in root.findall("sdt-template"): + sdt_template_id = sdt_template.attrib["id"] + sdt_type = sdt_template.attrib.get("type", "") + sdt_name = sdt_template.attrib.get("name", "") + width = int(sdt_template.attrib.get("width", 0)) + key_width = int(sdt_template.attrib.get("key", 0)) + rst_width = int(sdt_template.attrib.get("rst", 0)) + fields = [] + + # 解析sdt-template中的field定义 + for field in sdt_template.findall("field"): + fields.append({ + "name": field.attrib["name"], + "attr": field.attrib["attr"], + "array_num": int(field.attrib["array_num"]), + "element_size": int(field.attrib["element_bits"]) // 8, + "msb": int(field.attrib["msb"]), + "length": int(field.attrib["bit_num"]), + }) + + sdt_templates[sdt_template_id] = { + "type": sdt_type, + "name": sdt_name, + "width": width, + "key_width": key_width, + "rst_width": rst_width, + "fields": fields, + } + + # 解析具体SDT实例(sdt) + for sdt in root.findall("sdt"): + sdt_no = int(sdt.attrib["no"]) + use_template = sdt.attrib.get("use-template") + + if use_template: # 如果实例引用模板 + if use_template in sdt_templates: # 引用了sdt-template + template_data = sdt_templates[use_template] + sdt_type = template_data["type"] + sdt_name = template_data["name"] + width = template_data["width"] + key_width = template_data["key_width"] + rst_width = template_data["rst_width"] + fields = template_data["fields"] + else: + raise ValueError(f"Undefined sdt-template reference: {use_template}") + else: # 如果没有引用模板,解析自身定义 + sdt_type = sdt.attrib.get("type", "") + sdt_name = sdt.attrib.get("name", "") + width = int(sdt.attrib.get("width", 0)) + key_width = int(sdt.attrib.get("key", 0)) + rst_width = int(sdt.attrib.get("rst", 0)) + fields = [] + for field in sdt.findall("field"): + fields.append({ + "name": field.attrib["name"], + "attr": field.attrib["attr"], + "array_num": int(field.attrib["array_num"]), + "element_size": int(field.attrib["element_bits"]) // 8, + "msb": int(field.attrib["msb"]), + "length": int(field.attrib["bit_num"]), + }) + + # 如果类型是 ACL,设置 key_width 和处理 fields + if sdt_type == "ACL": + key_width = width # key_width 等于 width + additional_fields = [] + for field in fields: + if field["attr"] == "key": + mask_field = field.copy() + mask_field["attr"] = "mask" + additional_fields.append(mask_field) + fields.extend(additional_fields) + + # 调整字段顺序为 key -> mask -> rst + fields.sort(key=lambda x: {"key": 0, "mask": 1, "rst": 2}.get(x["attr"], 3)) + + # 将解析后的 SDT 添加到结构体 + structures.append({ + "no": sdt_no, + "type": sdt_type, + "name": sdt_name, + "width": width, + "key_width": key_width, + "rst_width": rst_width, + "fields": fields, + }) + + return structures + + + +def check_parse(structures): + for structure in structures: + # 获取当前结构体的字段列表 + fields = structure["fields"] + + # 将字段按 attr 分类 + fields_by_attr = {} + for field in fields: + attr = field["attr"] + if attr not in fields_by_attr: + fields_by_attr[attr] = [] + fields_by_attr[attr].append(field) + + # 按分类逐一检查字段的连续性和长度 + for attr, fields_list in fields_by_attr.items(): + # 按 msb 从大到小排序 + fields_list.sort(key=lambda f: f["msb"], reverse=True) + + # 检查连续性和 array_num * 8 = length 的关系 + for i in range(len(fields_list)): + current_field = fields_list[i] + + # 如果不是最后一个字段,检查连续性 + if i < len(fields_list) - 1: + next_field = fields_list[i + 1] + if current_field["msb"] - current_field["length"] != next_field["msb"]: + print(f"Error in Structure No: {structure['no']} for attr: {attr}") + print(f"Field {current_field['name']} and {next_field['name']} are not continuous.") + return False + + # 如果所有结构体和字段都检查通过 + return True + + +def print_structures(structures): + # 遍历 structures 列表 + for structure in structures: + print(f"Structure No: {structure['no']}") + print(f" Type: {structure['type']}") + print(f" Name: {structure['name']}") + print(f" Width: {structure['width']}") + print(f" Key Width: {structure['key_width']}") + print(f" Rst Width: {structure['rst_width']}") + + # 打印字段列表 + print(" Fields:") + for field in structure['fields']: + print(f" - {field}") + print("-" * 30) # 分隔符 + + +def generate_tbl_c_file(structures): + # 查看数组内容是否全部正确写入 + # print_structures(structures) + + # 用于记录已生成的结构体名称,避免重复生成 + generated_names = set() + + # 生成字段数组 + for struct in structures: + if not struct["name"]: + continue + if struct["name"] in generated_names: + continue + generated_names.add(struct["name"]) + output_file = f"source/dpp_tbl_{struct['name']}.c".lower() + + with open(output_file, "w") as f: + f.write('#include "dpp_flow_comm.h"\n') + f.write(f'#include "dpp_tbl_{struct["name"]}.h"\n') + f.write(f"ZXDH_FLOW_ATTR_FIELD_T g_{struct['name']}_fields[] = \n{{\n") + for field in struct["fields"]: + f.write( + f' {{"{field["name"]}", DPP_ATTR_FLAG_{field["attr"].upper()}, ' + f'{field["array_num"]}, {field["element_size"]}, {field["msb"]}, {field["length"]}}},\n' + ) + f.write("};\n\n") + + +def generate_tbl_list_c_file(structures, output_file): + # 检查目标文件是否存在,存在则删除 + if os.path.exists(output_file): + os.remove(output_file) + + with open(output_file, "w") as f: + f.write('#include "dpp_flow_comm.h"\n') + + generated_names0 = set() + for struct in structures: + if struct["name"] in generated_names0: + continue + generated_names0.add(struct["name"]) + f.write(f'#include "dpp_tbl_{struct["name"].lower()}.h"\n') + + # 查看数组内容是否全部正确写入 + print_structures(structures) + # 用于记录已生成的结构体名称,避免重复生成 + generated_names = set() + + # 生成字段数组 + for struct in structures: + if not struct["name"]: + continue + if struct["name"] in generated_names: + continue + generated_names.add(struct["name"]) + + f.write(f"ZXDH_FLOW_ATTR_FIELD_T g_{struct['name']}_fields[] = \n{{\n") + for field in struct["fields"]: + f.write( + f' {{"{field["name"]}", DPP_ATTR_FLAG_{field["attr"].upper()}, ' + f'{field["array_num"]}, {field["element_size"]}, {field["msb"]}, {field["length"]}}},\n' + ) + f.write("};\n\n") + + # 生成流属性列表 + f.write("ZXDH_FLOW_ATTR_T g_flow_attr_list[]= \n{\n") + for struct in structures: + # if not struct["no"]: + # continue + f.write(" {\n") + f.write(f' "{struct["name"]}",\n') + f.write(f" {struct['no']},\n") + f.write(f" DPP_FLOW_SDT_{struct['type'].upper()},\n") + f.write(f" {struct['width']},\n") + f.write(f" {struct['key_width']},\n") + f.write(f" {struct['rst_width']},\n") + f.write(f" {len(struct['fields'])},\n") + f.write(f" g_{struct['name']}_fields\n") + f.write(" },\n") + f.write("};\n") + + f.write("ZXIC_UINT32 dpp_flow_attr_list_size_get(void)\n") + f.write("{\n") + f.write(" return sizeof(g_flow_attr_list)/sizeof(ZXDH_FLOW_ATTR_T);\n") + f.write("}\n") + + +def generate_tbl_h_file(structures): + # 用于存储已生成的结构体名称,避免重复生成 + generated_names = set() + + for structure in structures: + struct_name = f"zxdh_{structure['name']}".lower() + output_file = f"{sys.argv[1]}/include/dpp_tbl_{structure['name']}.h".lower() + + # 检查是否已经生成该结构体 + if struct_name in generated_names: + continue + generated_names.add(struct_name) + + with open(output_file, "w") as f: + # 写入头文件保护 + f.write(f"#ifndef DPP_TBL_{structure['name'].upper()}_H\n") + f.write(f"#define DPP_TBL_{structure['name'].upper()}_H\n\n") + f.write('#include "zxic_common.h"\n\n') + + if structure["type"] == "ERAM" or structure["type"] == "DDR": + # ERAM 和 DDR 处理逻辑:直接生成结构体 + f.write(f"typedef struct {struct_name}_t\n") + f.write("{\n") + for field in structure["fields"]: + array = f"[{field['array_num']}]" if field["array_num"] > 1 else "" + type_map = { + 1: "ZXIC_UINT8", + 2: "ZXIC_UINT16", + 4: "ZXIC_UINT32", + 8: "ZXIC_UINT64" + } + field_type = type_map.get(field["element_size"], "ZXIC_UINT32") + f.write(f" {field_type} {field['name']}{array};\n") + f.write(f"}} ZXDH_{structure['name'].upper()}_T;\n\n") + + f.write( + f"ZXIC_UINT32 dpp_tbl_{structure['name']}_add(DPP_PF_INFO_T * pf_info, ZXIC_UINT32 sdt_no, ZXIC_UINT32 index, ZXDH_{structure['name'].upper()}_T *p_Data);\n") + f.write( + f"ZXIC_UINT32 dpp_tbl_{structure['name']}_del(DPP_PF_INFO_T * pf_info, ZXIC_UINT32 sdt_no, ZXIC_UINT32 index);\n") + f.write( + f"ZXIC_UINT32 dpp_tbl_{structure['name']}_get(DPP_PF_INFO_T * pf_info, ZXIC_UINT32 sdt_no, ZXIC_UINT32 index, ZXDH_{structure['name'].upper()}_T *p_Data);\n\n") + + elif structure["type"] == "HASH": + # HASH 处理逻辑:生成 key, entry 和整体结构体 + # Key 部分 + f.write(f"typedef struct {struct_name}_key\n") + f.write("{\n") + for field in structure["fields"]: + if field["attr"] == "key": + array = f"[{field['array_num']}]" if field["array_num"] > 1 else "" + type_map = { + 1: "ZXIC_UINT8", + 2: "ZXIC_UINT16", + 4: "ZXIC_UINT32", + 8: "ZXIC_UINT64" + } + field_type = type_map.get(field["element_size"], "ZXIC_UINT32") + f.write(f" {field_type} {field['name']}{array};\n") + f.write(f"}} ZXDH_{structure['name'].upper()}_KEY;\n\n") + + # Entry 部分 + f.write(f"typedef struct {struct_name}_entry\n") + f.write("{\n") + for field in structure["fields"]: + if field["attr"] == "rst": + array = f"[{field['array_num']}]" if field["array_num"] > 1 else "" + type_map = { + 1: "ZXIC_UINT8", + 2: "ZXIC_UINT16", + 4: "ZXIC_UINT32", + 8: "ZXIC_UINT64" + } + field_type = type_map.get(field["element_size"], "ZXIC_UINT32") + f.write(f" {field_type} {field['name']}{array};\n") + f.write(f"}} ZXDH_{structure['name'].upper()}_ENTRY;\n\n") + + # 整体结构体 + f.write(f"typedef struct {struct_name}_t\n") + f.write("{\n") + f.write(f" ZXDH_{structure['name'].upper()}_KEY key;\n") + f.write(f" ZXDH_{structure['name'].upper()}_ENTRY entry;\n") + f.write(f"}} ZXDH_{structure['name'].upper()}_T;\n\n") + + f.write( + f"ZXIC_UINT32 dpp_tbl_{structure['name']}_add(DPP_PF_INFO_T * pf_info, ZXIC_UINT32 sdt_no, ZXDH_{structure['name'].upper()}_T *p_Data);\n") + f.write( + f"ZXIC_UINT32 dpp_tbl_{structure['name']}_del(DPP_PF_INFO_T * pf_info, ZXIC_UINT32 sdt_no, ZXDH_{structure['name'].upper()}_T *p_Data);\n") + f.write( + f"ZXIC_UINT32 dpp_tbl_{structure['name']}_search(DPP_PF_INFO_T * pf_info, ZXIC_UINT32 sdt_no, ZXDH_{structure['name'].upper()}_T *p_Data);\n\n") + + elif structure["type"] == "ACL": + # ACL 处理逻辑:生成 key, mask, as_rlt 和整体结构体 + # Key 部分 + f.write(f"typedef struct {struct_name}_key\n") + f.write("{\n") + for field in structure["fields"]: + if field["attr"] == "key": + array = f"[{field['array_num']}]" if field["array_num"] > 1 else "" + type_map = { + 1: "ZXIC_UINT8", + 2: "ZXIC_UINT16", + 4: "ZXIC_UINT32", + 8: "ZXIC_UINT64" + } + field_type = type_map.get(field["element_size"], "ZXIC_UINT32") + f.write(f" {field_type} {field['name']}{array};\n") + f.write(f"}} ZXDH_{structure['name'].upper()}_KEY;\n\n") + + # Mask 部分 + f.write(f"typedef ZXDH_{structure['name'].upper()}_KEY ZXDH_{structure['name'].upper()}_MASK;\n\n") + # As_rlt 部分 + f.write(f"typedef struct {struct_name}_as_rlt\n") + f.write("{\n") + for field in structure["fields"]: + if field["attr"] == "rst": + array = f"[{field['array_num']}]" if field["array_num"] > 1 else "" + type_map = { + 1: "ZXIC_UINT8", + 2: "ZXIC_UINT16", + 4: "ZXIC_UINT32", + 8: "ZXIC_UINT64" + } + field_type = type_map.get(field["element_size"], "ZXIC_UINT32") + f.write(f" {field_type} {field['name']}{array};\n") + f.write(f"}} ZXDH_{structure['name'].upper()}_AS_RLT;\n\n") + + # 整体结构体 + f.write(f"typedef struct {struct_name}_t\n") + f.write("{\n") + f.write(f" ZXDH_{structure['name'].upper()}_KEY key;\n") + f.write(f" ZXDH_{structure['name'].upper()}_MASK mask;\n") + f.write(f" ZXDH_{structure['name'].upper()}_AS_RLT as_rlt;\n") + f.write(f"}} ZXDH_{structure['name'].upper()}_T;\n\n") + + f.write( + f"ZXIC_UINT32 dpp_tbl_{structure['name']}_add(DPP_PF_INFO_T * pf_info, ZXIC_UINT32 sdt_no, ZXIC_UINT32 handle, ZXDH_{structure['name'].upper()}_T *p_{structure['name']});\n") + f.write( + f"ZXIC_UINT32 dpp_tbl_{structure['name']}_del(DPP_PF_INFO_T * pf_info, ZXIC_UINT32 sdt_no, ZXIC_UINT32 handle);\n") + f.write( + f"ZXIC_UINT32 dpp_tbl_{structure['name']}_get(DPP_PF_INFO_T * pf_info, ZXIC_UINT32 sdt_no, ZXIC_UINT32 handle, ZXDH_{structure['name'].upper()}_T *p_{structure['name']});\n") + f.write( + f"ZXIC_UINT32 dpp_tbl_{structure['name']}_search(DPP_PF_INFO_T * pf_info, ZXIC_UINT32 sdt_no, ZXIC_UINT32 handle, ZXDH_{structure['name'].upper()}_T *p_{structure['name']});\n\n") + + # 写入尾部的头文件保护结束 + f.write("#endif\n") + + +def generate_acl_function(name, action): + if action == "add": + function_call = f"dpp_apt_dtb_acl_entry_insert_ex(&dev, queue, sdt_no, handle, p_{name})" + elif action == "del": + function_call = f"dpp_apt_dtb_acl_entry_del_ex(&dev, queue, sdt_no, handle)" + elif action == "get": + function_call = f"dpp_apt_dtb_acl_entry_get_ex(&dev, queue, sdt_no, handle, p_{name})" + elif action == "search": + function_call = f"dpp_apt_dtb_acl_entry_search_ex(&dev, queue, sdt_no, handle, p_{name})" + else: + raise ValueError("Invalid action for ACL") + + last_param = f", ZXDH_{name.upper()}_T *p_{name}" if action != "del" else "" + check_point = f"ZXIC_COMM_CHECK_POINT(p_{name});" if action != "del" else "" + + template = f""" +ZXIC_UINT32 dpp_tbl_{name}_{action}(DPP_PF_INFO_T* pf_info, ZXIC_UINT32 sdt_no, ZXIC_UINT32 handle{last_param}) +{{ + DPP_DEV_T dev = {{0}}; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + {check_point} + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = {function_call}; + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "{function_call.split('(')[0]}", DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + return DPP_OK; +}} +EXPORT_SYMBOL(dpp_tbl_{name}_{action}); +""" + return template + + +def generate_eram_function(name, action): + if action == "add": + function_call = f"dpp_apt_dtb_eram_insert_ex(&dev, queue, sdt_no, index, pData)" + elif action == "del": + function_call = f"dpp_apt_dtb_eram_clear_ex(&dev, queue, sdt_no, index)" + elif action == "get": + function_call = f"dpp_apt_dtb_eram_get_ex(&dev, queue, sdt_no, index, pData)" + else: + raise ValueError("Invalid action for ERAM") + + template = f""" +ZXIC_UINT32 dpp_tbl_{name}_{action}(DPP_PF_INFO_T* pf_info, ZXIC_UINT32 sdt_no, ZXIC_UINT32 index{", ZXDH_" + name.upper() + "_T *pData" if action != "del" else ""}) +{{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 queue = 0; + DPP_DEV_T dev = {{0}}; + + ZXIC_COMM_CHECK_POINT(pf_info); + {"ZXIC_COMM_CHECK_POINT(pData);" if action != "del" else ""} + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = {function_call}; + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "{function_call.split('(')[0]}", DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + return DPP_OK; +}} +EXPORT_SYMBOL(dpp_tbl_{name}_{action}); +""" + return template + + +def generate_hash_function(name, action): + if action == "add": + function_call = f"dpp_apt_dtb_hash_insert_ex(&dev, queue, sdt_no, pData)" + elif action == "del": + function_call = f"dpp_apt_dtb_hash_delete_ex(&dev, queue, sdt_no, pData)" + elif action == "search": + function_call = f"dpp_apt_dtb_hash_search_ex(&dev, queue, sdt_no, pData)" + else: + raise ValueError("Invalid action for HASH") + + template = f""" +ZXIC_UINT32 dpp_tbl_{name}_{action}(DPP_PF_INFO_T* pf_info, ZXIC_UINT32 sdt_no, ZXDH_{name.upper()}_T* pData) +{{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 queue = 0; + DPP_DEV_T dev = {{0}}; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(pData); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = {function_call}; + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "{function_call.split('(')[0]}", DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + return DPP_OK; +}} +EXPORT_SYMBOL(dpp_tbl_{name}_{action}); +""" + return template + + +def generate_api_files(structures): + # 用于存储已生成的结构体名称,避免重复生成 + generated_names = set() + + for struct in structures: + type_ = struct['type'] + name = struct['name'] + + # 检查是否已经生成该结构体 + if name in generated_names: + continue + generated_names.add(name) + + # 确定输出文件名 + output_file = f"{sys.argv[1]}/source/dpp_{name}_api.c".lower() + + with open(output_file, "w") as f: + # 包含头文件 + f.write(f'#include "dpp_flow_comm.h"\n') + f.write(f'#include "dpp_tbl_{name}.h"\n') + f.write(f'#include "dpp_dev.h"\n\n') + + if type_ == "ACL": + # 添加函数 + for action in ["add", "del", "get", "search"]: + f.write(generate_acl_function(name, action)) + elif type_ == "ERAM": + # 添加函数 + for action in ["add", "del", "get"]: + f.write(generate_eram_function(name, action)) + elif type_ == "HASH": + # 添加函数 + for action in ["add", "del", "search"]: + f.write(generate_hash_function(name, action)) + + +def main(): + # 检查参数数量是否正确 + if len(sys.argv) != 2 or sys.argv[1] != "api": + print("用法: python3 tool.py api") + sys.exit(1) + + input_xml = f"zxdh_flow_attr_{sys.argv[1]}.xml" + c_file = f"{sys.argv[1]}/source/dpp_flow_struct.c" + + structures = parse_flow_attr_xml(input_xml) + + generate_tbl_list_c_file(structures, c_file) + generate_tbl_h_file(structures) + generate_api_files(structures) + + +if __name__ == "__main__": + main() diff --git a/drivers/net/ethernet/dinghai/en_np/flow/zxdh_flow_attr_api.xml b/drivers/net/ethernet/dinghai/en_np/flow/zxdh_flow_attr_api.xml new file mode 100644 index 000000000000..5512169aa207 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/flow/zxdh_flow_attr_api.xml @@ -0,0 +1,44 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/drivers/net/ethernet/dinghai/en_np/flow/zxdh_flow_attr_demo.xml b/drivers/net/ethernet/dinghai/en_np/flow/zxdh_flow_attr_demo.xml new file mode 100644 index 000000000000..de0600e31f6a --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/flow/zxdh_flow_attr_demo.xml @@ -0,0 +1,86 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/drivers/net/ethernet/dinghai/en_np/init/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/init/Kbuild.include new file mode 100644 index 000000000000..24dee734aaf0 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/init/Kbuild.include @@ -0,0 +1,4 @@ +cur_dir := en_np/init/ +subdirs := source/ +src_files += +include $(foreach subdir, $(subdirs), $(dinghai_root)/$(cur_dir)$(subdir)/Kbuild.include) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/init/include/dpp_np_init.h b/drivers/net/ethernet/dinghai/en_np/init/include/dpp_np_init.h new file mode 100644 index 000000000000..b1ef6d23333c --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/init/include/dpp_np_init.h @@ -0,0 +1,13 @@ +#ifndef _DPP_NP_INIT_H_ +#define _DPP_NP_INIT_H_ + +#include +#include "zxic_common.h" +#include "dpp_dev.h" + +ZXIC_UINT32 dpp_vport_register(DPP_PF_INFO_T *pf_info, struct pci_dev *p_dev); +ZXIC_UINT32 dpp_vport_unregister(DPP_PF_INFO_T *pf_info); +ZXIC_UINT32 dpp_vport_reset(DPP_PF_INFO_T *pf_info); +ZXIC_UINT32 dpp_dev_status_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 dev_status); + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/init/source/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/init/source/Kbuild.include new file mode 100644 index 000000000000..d93dc4005bf3 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/init/source/Kbuild.include @@ -0,0 +1,2 @@ +cur_dir := en_np/init/source/ +src_files += $(addprefix $(cur_dir),$(notdir $(wildcard $(dinghai_root)/$(cur_dir)*.c))) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/init/source/dpp_np_init.c b/drivers/net/ethernet/dinghai/en_np/init/source/dpp_np_init.c new file mode 100644 index 000000000000..8a1b9ebe714f --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/init/source/dpp_np_init.c @@ -0,0 +1,235 @@ +#include "zxic_common.h" +#include "dpp_np_init.h" +#include "dpp_dev.h" +#include "dpp_dtb.h" +#include "dpp_init.h" +#include "dpp_kernel_init.h" +#include "dpp_netlink.h" +#include "dpp_dtb_table_api.h" +#include "dpp_drv_init.h" +#include "dpp_tbl_comm.h" +#include "dpp_apt_se.h" +#include "dpp_cmd_init.h" +#include "dpp_agent_channel.h" +#include "dpp_hash.h" +#include "dpp_sdt_mgr.h" +#include "dpp_tbl_pkt_cap.h" +#include "dpp_drv_sdt.h" +#include "dpp_tbl_api.h" +extern DPP_DEV_MGR_T *dpp_dev_mgr_get(ZXIC_VOID); + +ZXIC_UINT32 dpp_vport_register(DPP_PF_INFO_T *pf_info, struct pci_dev *p_dev) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(p_dev); + ZXIC_COMM_CHECK_INDEX_EQUAL_RETURN_OK(IS_PF(pf_info->vport), 0); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x register start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + rc = dpp_dev_pcie_channel_add(pf_info, p_dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_pcie_channel_add"); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_bar_msg_num_init(&dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_bar_msg_num_init"); + + rc = dpp_dtb_init(&dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_init"); + + rc = dpp_apt_dtb_res_init(&dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_dtb_res_init"); + + rc = dpp_vport_mgr_init(pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_mgr_init"); + + rc = dpp_flow_init(&dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_flow_init"); + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x register success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_register); + +ZXIC_UINT32 dpp_vport_unregister(DPP_PF_INFO_T *pf_info) +{ + DPP_DEV_T dev = { 0 }; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_INDEX_EQUAL_RETURN_OK(IS_PF(pf_info->vport), 0); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x unregister start.\n", __FUNCTION__, + pf_info->slot, pf_info->vport); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_pkt_capture_uninit(pf_info); + ZXIC_COMM_CHECK_RC_NONE(rc, "dpp_pkt_capture_uninit"); + + rc = dpp_dtb_queue_release_ex(&dev); + if (rc != DPP_OK) { + rc = dpp_dtb_queue_release_soft(&dev); + ZXIC_COMM_CHECK_RC_NONE(rc, "dpp_dtb_queue_release_soft"); + } + + rc = dpp_flow_uninit(&dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_flow_uninit"); + + rc = dpp_dev_pcie_channel_del(pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_pcie_channel_del"); + + rc = dpp_vport_mgr_release(pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_mgr_release"); + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x unregister success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_unregister); + +ZXIC_UINT32 dpp_vport_reset(DPP_PF_INFO_T *pf_info) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_DEV_T dev = { 0 }; + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE("[%s] start.\n", __FUNCTION__); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_release_soft(&dev); + ZXIC_COMM_CHECK_RC_NONE(rc, "dpp_dtb_queue_release_soft"); + + rc = dpp_rdma_trans_item_soft_delete(pf_info); + ZXIC_COMM_CHECK_RC_NONE(rc, "dpp_rdma_trans_item_soft_delete"); + + rc = dpp_unicast_all_mac_soft_delete(pf_info); + ZXIC_COMM_CHECK_RC_NONE(rc, "dpp_unicast_all_mac_soft_delete"); + + rc = dpp_multicast_all_mac_soft_delete(pf_info); + ZXIC_COMM_CHECK_RC_NONE(rc, "dpp_multicast_all_mac_soft_delete"); + + rc = dpp_dev_pcie_channel_del(pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_pcie_channel_del"); + + rc = dpp_vport_mgr_release(pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_mgr_release"); + + ZXIC_COMM_PRINT("[%s] success.\n", __FUNCTION__); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_reset); + +ZXIC_UINT32 dpp_dev_status_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 dev_status) +{ + ZXIC_UINT16 slot = 0; + ZXIC_UINT16 channel_id = 0; + ZXIC_UINT32 dev_id = 0; + + DPP_DEV_CFG_T *p_dev_info = NULL; + DPP_DEV_MGR_T *p_dev_mgr = NULL; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, pf_info); + + slot = pf_info->slot; + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, slot, 0, DPP_PCIE_SLOT_MAX - 1); + + channel_id = DPP_PCIE_CHANNEL_ID(pf_info->vport); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, channel_id, 0, + DPP_PCIE_CHANNEL_MAX - 1); + + p_dev_mgr = dpp_dev_mgr_get(); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev_mgr); + if (!p_dev_mgr->is_init) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "ErrorCode[ 0x%x]: Device Manager is not init!!!\n", + DPP_RC_DEV_MGR_NOT_INIT); + return DPP_RC_DEV_MGR_NOT_INIT; + } + p_dev_info = p_dev_mgr->p_dev_array[dev_id]; + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev_info); + + ZXIC_COMM_CHECK_DEV_POINT( + dev_id, p_dev_info->pcie_channel[slot][channel_id].device); + + p_dev_info->pcie_channel[slot][channel_id].dev_status = dev_status; + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_dev_status_set); + +static int __init dpp_np_init(void) +{ + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_TRACE_NOTICE("[%s] start.\n", __FUNCTION__); + + rc = dpp_init(0); + ZXIC_COMM_CHECK_RC(rc, "dpp_init"); + + rc = dpp_agent_channel_init(); + ZXIC_COMM_CHECK_RC(rc, "dpp_agent_channel_init"); + + rc = dpp_netlink_init(); + ZXIC_COMM_CHECK_RC(rc, "dpp_netlink_init"); + + rc = dpp_cmd_init(); + ZXIC_COMM_CHECK_RC(rc, "dpp_cmd_init"); + + ZXIC_COMM_PRINT("[%s] success.\n", __FUNCTION__); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_np_online_uninstall(void) +{ + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_hash_tbl_clr(0); + ZXIC_COMM_CHECK_RC_NONE(rc, "dpp_hash_tbl_clr"); + + //rc = dpp_acl_res_destroy(0); + //ZXIC_COMM_CHECK_RC_NONE(rc, "dpp_acl_res_destroy"); + + rc = dpp_dtb_mgr_destory_all(); + ZXIC_COMM_CHECK_RC_NONE(rc, "dpp_dtb_mgr_destory_all"); + + rc = dpp_sdt_mgr_destroy(0); + ZXIC_COMM_CHECK_RC_NONE(rc, "dpp_sdt_mgr_destroy"); + + rc = dpp_dev_del(0); + ZXIC_COMM_CHECK_RC_NONE(rc, "dpp_dev_del"); + + return DPP_OK; +} + +static void __exit dpp_np_exit(void) +{ + ZXIC_COMM_TRACE_NOTICE("[%s] start.\n", __FUNCTION__); + + dpp_netlink_exit(); + dpp_agent_channel_exit(); + dpp_np_online_uninstall(); + + ZXIC_COMM_PRINT("[%s] success.\n", __FUNCTION__); +} + +module_init(dpp_np_init); +module_exit(dpp_np_exit); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/dinghai/en_np/netlink/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/netlink/Kbuild.include new file mode 100644 index 000000000000..77e6f0f55304 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/netlink/Kbuild.include @@ -0,0 +1,4 @@ +cur_dir := en_np/netlink/ +subdirs := source/ +src_files += +include $(foreach subdir, $(subdirs), $(dinghai_root)/$(cur_dir)$(subdir)/Kbuild.include) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/netlink/include/dpp_netlink.h b/drivers/net/ethernet/dinghai/en_np/netlink/include/dpp_netlink.h new file mode 100644 index 000000000000..8989efec4173 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/netlink/include/dpp_netlink.h @@ -0,0 +1,30 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_netlink.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2014/01/27 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef DPP_NETLINK_H +#define DPP_NETLINK_H + +#include "zxic_common.h" +#include "dpp_type_api.h" + +ZXIC_SINT32 dpp_netlink_init(ZXIC_VOID); +ZXIC_VOID dpp_netlink_exit(ZXIC_VOID); +DPP_STATUS dpp_netlink_regist_msg_proc_fun(ZXIC_UINT32 id, ZXIC_VOID *ptr); + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/netlink/source/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/netlink/source/Kbuild.include new file mode 100644 index 000000000000..12c00eac42ff --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/netlink/source/Kbuild.include @@ -0,0 +1,2 @@ +cur_dir := en_np/netlink/source/ +src_files += $(addprefix $(cur_dir),$(notdir $(wildcard $(dinghai_root)/$(cur_dir)*.c))) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/netlink/source/dpp_netlink.c b/drivers/net/ethernet/dinghai/en_np/netlink/source/dpp_netlink.c new file mode 100644 index 000000000000..60048d184b01 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/netlink/source/dpp_netlink.c @@ -0,0 +1,198 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "dpp_netlink.h" + +#define DPP_NETLINK_PROTOCOL ((ZXIC_UINT32)(29)) +#define DPP_NETLINK_GROUP_ID ((ZXIC_UINT32)(1)) +#define DPP_NETLINK_MAX_PROC ((ZXIC_UINT32)(2048)) + +typedef DPP_STATUS (*DPP_NETLINK_PROC_PTR)(ZXIC_VOID *msg_body, + ZXIC_UINT32 msg_len, + ZXIC_VOID **resp, + ZXIC_UINT32 *reps_len); + +static struct sock *dpp_netlink_sk; +static DPP_NETLINK_PROC_PTR dpp_netlink_proc_ptr[DPP_NETLINK_MAX_PROC] = { 0 }; + +static DPP_STATUS dpp_netlink_send_ack_msg(ZXIC_VOID *data, ZXIC_UINT32 len) +{ + struct sk_buff *skb = NULL; + struct nlmsghdr *nlh = NULL; + ZXIC_SINT32 rtn = DPP_OK; + + if (data == NULL) { + ZXIC_COMM_PRINT("%s: data invalid.\n", __FUNCTION__); + return DPP_ERR; + } + + skb = alloc_skb(NLMSG_SPACE(len), GFP_KERNEL); + if (skb == NULL) { + ZXIC_COMM_PRINT("%s: alloc_skb failed.\n", __FUNCTION__); + return DPP_ERR; + } + + nlh = nlmsg_put(skb, 0, 0, 0, len, 0); + if (nlh == NULL) { + ZXIC_COMM_PRINT("%s: nlmsg_put failed.\n", __FUNCTION__); + return DPP_ERR; + } + + nlh->nlmsg_flags = NLM_F_ACK; + memcpy(NLMSG_DATA(nlh), data, len); + + rtn = nlmsg_multicast(dpp_netlink_sk, skb, 0, DPP_NETLINK_GROUP_ID, 0); + if (rtn < 0) { + ZXIC_COMM_PRINT("%s: nlmsg_multicast failed, rtn %d.\n", + __FUNCTION__, rtn); + return DPP_ERR; + } + return DPP_OK; +} + +static DPP_STATUS dpp_netlink_dispach_msg(struct nlmsghdr *nlh) +{ + ZXIC_UINT8 *data = NULL; + ZXIC_UINT32 id = 0; + ZXIC_UINT32 len = 0; + ZXIC_UINT8 *req = NULL; + ZXIC_VOID *resp = NULL; + ZXIC_UINT32 resp_len = 0; + ZXIC_UINT32 rtn = DPP_OK; + + DPP_NETLINK_PROC_PTR ptr = NULL; + + if (nlh == NULL) { + ZXIC_COMM_PRINT("%s: nlh invalid.\n", __FUNCTION__); + return DPP_ERR; + } + + id = *(ZXIC_UINT32 *)NLMSG_DATA(nlh); + len = nlh->nlmsg_len - NLMSG_HDRLEN; + req = (ZXIC_UINT8 *)NLMSG_DATA(nlh); + if (id > (DPP_NETLINK_MAX_PROC - 1)) { + ZXIC_COMM_PRINT("%s: id %u invalid.\n", __FUNCTION__, id); + return DPP_ERR; + } + + ptr = dpp_netlink_proc_ptr[id]; + if (ptr == NULL) { + ZXIC_COMM_PRINT("%s: ptr invalid.\n", __FUNCTION__); + return DPP_ERR; + } + + rtn = ptr(req, len, &resp, &resp_len); + if (rtn != DPP_OK) { + ZXIC_COMM_FREE(resp); + ZXIC_COMM_PRINT("%s: proc id %u failed.\n", __FUNCTION__, id); + return rtn; + } + + data = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC(NLMSG_ALIGN(resp_len) + + sizeof(ZXIC_UINT32)); + if (data == NULL) { + ZXIC_COMM_FREE(resp); + ZXIC_COMM_PRINT("%s: ZXIC_COMM_MALLOC failed.\n", __FUNCTION__); + return DPP_ERR; + } + memcpy(data, &rtn, sizeof(ZXIC_UINT32)); + + if (resp != NULL) { + memcpy(data + sizeof(ZXIC_UINT32), resp, resp_len); + } + + dpp_netlink_send_ack_msg(data, + NLMSG_ALIGN(resp_len) + sizeof(ZXIC_UINT32)); + + ZXIC_COMM_FREE(data); + ZXIC_COMM_FREE(resp); + + return DPP_OK; +} + +static ZXIC_VOID dpp_netlink_recv_msg(struct sk_buff *__skb) +{ + struct sk_buff *skb; + struct nlmsghdr *nlh; + ZXIC_UINT32 rtn = DPP_ERR; + + skb = skb_get(__skb); + if (skb == NULL) { + ZXIC_COMM_PRINT("%s: get skb failed.\n", __FUNCTION__); + return; + } + + nlh = nlmsg_hdr(skb); + if ((nlh == NULL) || !NLMSG_OK(nlh, skb->len)) { + kfree_skb(skb); + ZXIC_COMM_PRINT("%s: skb format invalid.\n", __FUNCTION__); + return; + } + + if ((nlh->nlmsg_flags & NLM_F_REQUEST) != 0) { + if (dpp_netlink_dispach_msg(nlh) != DPP_OK) { + dpp_netlink_send_ack_msg((ZXIC_UINT8 *)&rtn, + sizeof(ZXIC_UINT32)); + ZXIC_COMM_PRINT("%s: dpp_netlink_dispach_msg failed.\n", + __FUNCTION__); + } + kfree_skb(skb); + return; + } + + kfree_skb(skb); + ZXIC_COMM_PRINT("%s: nlmsg_flags 0x%04x invalid.\n", __FUNCTION__, + nlh->nlmsg_flags); + return; +} + +DPP_STATUS dpp_netlink_regist_msg_proc_fun(ZXIC_UINT32 id, ZXIC_VOID *ptr) +{ + if (ptr == NULL) { + ZXIC_COMM_PRINT("%s: ptr invalid.\n", __FUNCTION__); + return DPP_ERR; + } + if (id > (DPP_NETLINK_MAX_PROC - 1)) { + ZXIC_COMM_PRINT("%s: id %u invalid.\n", __FUNCTION__, id); + return DPP_ERR; + } + dpp_netlink_proc_ptr[id] = ptr; + return DPP_OK; +} + +ZXIC_SINT32 dpp_netlink_init(ZXIC_VOID) +{ + struct netlink_kernel_cfg cfg = { + .input = dpp_netlink_recv_msg, + }; + + ZXIC_COMM_TRACE_NOTICE("[%s] start.\n", __FUNCTION__); + + // dpp_netlink_sk = netlink_kernel_create(get_net_ns_by_pid(1), DPP_NETLINK_PROTOCOL, &cfg); + dpp_netlink_sk = + netlink_kernel_create(&init_net, DPP_NETLINK_PROTOCOL, &cfg); + if (!dpp_netlink_sk) { + ZXIC_COMM_PRINT("%s: create socket failed.\n", __FUNCTION__); + return DPP_ERR; + } + + ZXIC_COMM_PRINT("[%s] success.\n", __FUNCTION__); + + return DPP_OK; +} + +ZXIC_VOID dpp_netlink_exit(ZXIC_VOID) +{ + ZXIC_COMM_TRACE_NOTICE("[%s] start.\n", __FUNCTION__); + netlink_kernel_release(dpp_netlink_sk); + ZXIC_COMM_PRINT("[%s] success.\n", __FUNCTION__); +} diff --git a/drivers/net/ethernet/dinghai/en_np/qos/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/qos/Kbuild.include new file mode 100644 index 000000000000..bcb34ee467bd --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/qos/Kbuild.include @@ -0,0 +1,4 @@ +cur_dir := en_np/qos/ +subdirs := source/ +src_files += +include $(foreach subdir, $(subdirs), $(dinghai_root)/$(cur_dir)$(subdir)/Kbuild.include) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/qos/include/dpp_drv_qos.h b/drivers/net/ethernet/dinghai/en_np/qos/include/dpp_drv_qos.h new file mode 100644 index 000000000000..4ef1cd42a36d --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/qos/include/dpp_drv_qos.h @@ -0,0 +1,520 @@ +/************************************************************** +* TM调度器资源分配如下 +* FQ +*fq_num: 1280 : 0, 1, 2 ... 0x4ff +*fq2_num: 1024 : 0x500, 0x502, 0x504 ... 0xcfe +*fq4_num: 1024 : 0xd00, 0xd04, 0xd08 ... 0x1cfc +*fq8_num: 1120 : 0x1d00, 0x1d08, 0x1d10 ... 0x3ff8 +* SP-WFQ +*sp_num: 128 : 0x4000, 0x4001, 0x4002 ... 0x407f +*wfq_num: 2048 : 0x4080, 0x4081, 0x4082 ... 0x487f +*wfq2_num: 256 : 0x4880, 0x4882, 0x4884 ... 0x4a7e +*wfq4_num: 256 : 0x4a80, 0x4a84, 0x4a88 ... 0x4e7c +*wfq8_num: 688 : 0x4e80, 0x4e88, 0x4e90 ... 0x63f8 +* +*队列资源分配如下 +*flow id 0~0xf 预留 0队列用于MR复制队列, 0x10~0xfff 通用队列 + +* PLCR profileid资源分配如下 +* 一级CAR +* flowid:32K, profileid:512 +* 一级CAR +* flowid:4K, profileid:128 +* 一级CAR +* flowid:1K, profileid:32 +***************************************************************/ +#ifndef _DPP_DRV_QOS_H_ +#define _DPP_DRV_QOS_H_ + +#include "zxic_common.h" +#include "dpp_tm.h" +#include "dpp_tm_api.h" +#include "dpp_stat_car.h" +#include "dpp_stat_api.h" +#include "dpp_agent_channel.h" + +/****************************************************************************** + * 宏定义 * + *****************************************************************************/ +#define DPP_VPORT_NUM_MAX (0x7fff) +#define DPP_CRDT_LEVEL_MAX (7) +#define DPP_SCHE_TYPE_MAX (10) + +#define DPP_TM_PORT_WIDTH (56) +#define DPP_TM_VPORT_WIDTH (32) +#define DPP_TM_LEVEL_WIDTH (28) +#define DPP_TM_TYPE_WIDTH (24) + +#define G_SCH_ID_LEN (8) + +#define CAR_TYPE_MAX (4) + +#define G_PROFILE_ID_LEN (8) + +/* Get Real port */ +#define DPP_TM_CRDT_PP_PORT_GET(_gsch_id, _pp_port) \ + ((_pp_port) = ((_gsch_id >> DPP_TM_PORT_WIDTH) & 0xffff)) + +/* Get Real vport */ +#define DPP_TM_CRDT_VPORT_GET(_gsch_id, _vport) \ + ((_vport) = ((_gsch_id >> DPP_TM_VPORT_WIDTH) & 0xffff)) + +/* Get Real level */ +#define DPP_TM_CRDT_LEVEL_GET(_gsch_id, _sche_level) \ + ((_sche_level) = ((_gsch_id >> DPP_TM_LEVEL_WIDTH) & 0xf)) + +/* Get Real type */ +#define DPP_TM_CRDT_TYPE_GET(_gsch_id, _sche_type) \ + ((_sche_type) = ((_gsch_id >> DPP_TM_TYPE_WIDTH) & 0xf)) + +/* Get Real se_id */ +#define DPP_TM_CRDT_SE_ID_GET(_gsch_id, _se_id) \ + ((_se_id) = ((_gsch_id & 0xffff))) + +/* Get Real profile */ +#define DPP_CAR_PROFILE_ID_GET(_profile_id, _profileid) \ + ((_profileid) = ((_profile_id & 0xffff))) + +/****************************************************************************** + * 类型定义 * + *****************************************************************************/ + +/****************************************************************************** + * 接口定义 * + *****************************************************************************/ +/***********************************************************/ +/**对外接口 TM资源申请 +* @param vport_id--vport号 +* @param pp_port--端口0~9 +* @param numq--申请id个数 1 +* @param level--挂接层级 +* @param flags--se_id类型 +* @param gsch_id--调度单元号 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_cosq_gsch_id_add(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 pp_port, + ZXIC_UINT32 numq, ZXIC_UINT32 level, + ZXIC_UINT32 flags, ZXIC_UINT64 *p_gsch_id); + +/***********************************************************/ +/**对外接口 TM资源释放 +* @param vport_id--vport号 +* @param pp_port--端口0~9 +* @param gsch_id--调度单元号 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_cosq_gsch_id_delete(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 pp_port, + ZXIC_UINT64 gsch_id); + +/***********************************************************/ +/**对外接口 读取TM根节点 +* @param vport_id--vport号 +* @param pp_port--端口0~9 +* @param gsch_id--调度单元号 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_sch_base_node_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 pp_port, + ZXIC_UINT64 *gsch_id); + +/***********************************************************/ +/**对外接口 配置se->pp->dev挂接关系 +* @param vport_id--vport号 +* @param se_id--调度器号 +* @param pp_id-端口号 +* @param weight-权重1 +* @param sp_mapping-优先级0-7 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_crdt_se_pp_link_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 se_id, + ZXIC_UINT32 pp_id, ZXIC_UINT32 weight, + ZXIC_UINT32 sp_mapping); + +/***********************************************************/ +/**对外接口 配置se->se层次化挂接关系 +* @param vport_id--vport号 +* @param se_id--调度器号 +* @param se_linkid--上级调度器号 +* @param se_weight -权重 +* @param se_sp-优先级0-7 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_crdt_se_link_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 se_id, + ZXIC_UINT32 se_linkid, ZXIC_UINT32 se_weight, + ZXIC_UINT32 se_sp); + +/***********************************************************/ +/**对外接口 配置flow级流队列挂接关系 +* @param vport_id--vport号 +* @param flow_id--0~4095 +* @param c_linkid--c桶se_id +* @param c_weight--c桶权重 +* @param c_sp--c桶优先级 +* @param mode--0-单桶 1-双桶 +* @param e_linkid--e桶se_id +* @param e_weight--e桶权重 +* @param e_sp--e桶优先级 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_crdt_flow_link_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 flow_id, + ZXIC_UINT32 c_linkid, ZXIC_UINT32 c_weight, + ZXIC_UINT32 c_sp, ZXIC_UINT32 mode, + ZXIC_UINT32 e_linkid, ZXIC_UINT32 e_weight, + ZXIC_UINT32 e_sp); + +/***********************************************************/ +/**对外接口 删除flow级流队列挂接关系 +* @param vport_id--vport号 +* @param id_s--起始flowid +* @param id_e--终止flowid +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_crdt_del_flow_link_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 id_s, + ZXIC_UINT32 id_e); + +/***********************************************************/ +/**对外接口 删除调度器挂接关系 +* @param vport_id--vport号 +* @param id_s--起始seid +* @param id_e--终止seid +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_crdt_del_se_link_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 id_s, + ZXIC_UINT32 id_e); + +/***********************************************************/ +/**对外接口 配置端口级整形 +* @param vport_id--vport号 +* @param pp_port--端口0~9 +* @param cir 单位Kb +* @param cbs 单位KB +* @param c_en c桶使能 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_port_shape_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 pp_port, + ZXIC_UINT32 cir, ZXIC_UINT32 cbs, + ZXIC_UINT32 c_en); + +/***********************************************************/ +/**对外接口 读取端口级整形 +* @param vport_id--vport号 +* @param pp_port--端口0~9 +* @param p_para 整形信息:cir/cbs/en +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_port_shape_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 pp_port, + DPP_TM_SHAPE_PP_PARA_T *p_para); + +/***********************************************************/ +/**对外接口 配置调度器整形 +* @param vport_id vport号 +* @param se_id 调度器编号号 +* @param pir pir总速率,单位Kb,范围同cir +* @param pbs pbs总桶深,单位KB,范围同cbs +* @param db_en 整形模式,0-单桶,1-双桶,仅FQ8/WFQ8有效 +* @param cir 调度器cir速率,单位Kb +* @param cbs 调度器cbs桶深,单位KB +* 注:cbs=0 表示关闭整形,即不限速 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_se_shape_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 se_id, + ZXIC_UINT32 pir, ZXIC_UINT32 pbs, ZXIC_UINT32 db_en, + ZXIC_UINT32 cir, ZXIC_UINT32 cbs); + +/***********************************************************/ +/**对外接口 配置flow整形 +* @param vport_id vport号 +* @param flow_id 流队列号 +* @param cir cir速率,单位Kb +* @param cbs cbs桶深,单位KB +* 注:cbs=0 表示关闭整形,即不限速 +* @param db_en 双桶整形使能,0-单桶,1-双桶 +* @param eir eir速率,单位Kb +* @param ebs ebs桶深,单位KB +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_flow_shape_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 flow_id, + ZXIC_UINT32 cir, ZXIC_UINT32 cbs, + ZXIC_UINT32 db_en, ZXIC_UINT32 eir, + ZXIC_UINT32 ebs); + +/***********************************************************/ +/**对外接口 配置流队列挂接到端口号 +* @param vport_id vport号 +* @param flow_id 流队列号 +* @param port 端口0~9 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_flow_map_port_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 flow_id, + ZXIC_UINT32 port); + +/***********************************************************/ +/**对外接口 读取流队列挂接的端口号 +* @param vport_id vport号 +* @param flowid 流队列号 +* @param port 端口0~9 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_flow_map_port_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 flow_id, + ZXIC_UINT32 *p_port); + +/***********************************************************/ +/**对外接口 配置TD门限值 +* @param vport_id vport号 +* @param flow_id 流队列号 +* @param td_th 配置的丢弃门限值 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_flow_td_th_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 flow_id, + ZXIC_UINT32 td_th); + +/***********************************************************/ +/**对外接口 读取TD门限值 +* @param vport_id vport号 +* @param flow_id 流队列号 +* @param p_td_th 配置的丢弃门限值 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_flow_td_th_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 flow_id, + ZXIC_UINT32 *p_td_th); + +/***********************************************************/ +/**对外接口 设置block值 +* @param vport_id vport号 +* @param size 配置block值 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_blk_size_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 size); + +/***********************************************************/ +/**对外接口 配置全局pfc使能状态 +* @param vport_id vport号 +* @param pfc_en 使能开关 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_qmu_pfc_en_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 pfc_en); + +/***********************************************************/ +/**对外接口 读取全局pfc使能状态 +* @param vport_id vport号 +* @param p_pfc_en 使能开关 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_qmu_pfc_en_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 *p_pfc_en); + +/***********************************************************/ +/**对外接口 配置物理端口pfc使能状态 +* @param vport_id vport号 +* @param port_id 端口0~9 +* @param port_en 使能开关 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_qmu_port_pfc_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 port_id, + ZXIC_UINT32 port_en); + +/***********************************************************/ +/**对外接口 读取物理端口pfc使能状态 +* @param vport_id vport号 +* @param port_id 端口0~9 +* @param p_port_en 使能开关 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_qmu_port_pfc_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 port_id, + ZXIC_UINT32 *p_port_en); + +/***********************************************************/ +/**对外接口 申请profile_id资源 +* @param vport_id vport号 +* @param numq 申请id个数 1 +* @param flags car类型 +* @param profile_id 限速模版号 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_car_profile_id_add(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 flags, + ZXIC_UINT64 *profile_id); + +/***********************************************************/ +/**对外接口 释放profile_id资源 +* @param vport_id vport号 +* @param flags car类型 +* @param profile_id 限速模版号 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_car_profile_id_delete(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 flags, + ZXIC_UINT64 profile_id); + +/***********************************************************/ +/**对外接口 配置flow_id和profile_id的绑定关系,并配置限速模板使能 +* @param vport_id vport号 +* @param car_type car模式 +* @param flow_id 队列号 +* @param drop_flag 丢弃标志 +* @param plcr_en 限速使能 +* @param profile_id 模板编号 +* +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_car_queue_cfg_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 car_type, + ZXIC_UINT32 flow_id, ZXIC_UINT32 drop_flag, + ZXIC_UINT32 plcr_en, ZXIC_UINT32 profile_id); + +/***********************************************************/ +/**对外接口 查询flow_id和profile_id的绑定关系 +* @param vport_id vport号 +* @param car_type car模式 +* @param flow_id 队列号 +* @param p_drop_flag 丢弃标志 +* @param p_plcr_en 限速使能 +* @param p_profile_id 模板编号 +* +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_car_queue_cfg_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 car_type, + ZXIC_UINT32 flow_id, ZXIC_UINT32 *p_drop_flag, + ZXIC_UINT32 *p_plcr_en, + ZXIC_UINT32 *p_profile_id); + +/***********************************************************/ +/**对外接口 配置profile_id限速模版 +* @param vport_id vport号 +* @param car_type car模式 +* @param pkt_sign 限速模式0-字节;1-包 +* @param profile_id 模板编号 +* @param p_car_profile_cfg 限速参数 +* +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_car_profile_cfg_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 car_type, + ZXIC_UINT32 pkt_sign, ZXIC_UINT32 profile_id, + ZXIC_VOID *p_car_profile_cfg); + +/***********************************************************/ +/**对外接口 查询profile_id限速模版参数 +* @param vport_id vport号 +* @param car_type car模式 +* @param pkt_sign 限速模式0-字节;1-包 +* @param profile_id 模板编号 +* @param p_car_profile_cfg 限速参数 +* +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_car_profile_cfg_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 car_type, + ZXIC_UINT32 pkt_sign, ZXIC_UINT32 profile_id, + ZXIC_VOID *p_car_profile_cfg); + +/***********************************************************/ +/**对外接口 配置队列映射关系 +* @param dev_id 设备号 +* @param car_type car模式类型,参见STAT_CAR_TYPE_E +* @param flow_id 队列号 +* @param map_flow_id 映射队列号 +* @param map_sp 映射sp +* +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_car_queue_map_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 car_type, + ZXIC_UINT32 flow_id, ZXIC_UINT32 map_flow_id, + ZXIC_UINT32 map_sp); + +/***********************************************************/ +/**对外接口 配置队列映射关系 +* @param dev_id 设备号 +* @param car_type car模式类型,参见STAT_CAR_TYPE_E +* @param flow_id 队列号 +* @param map_flow_id 映射队列号 +* @param map_sp 映射sp +* +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_car_queue_map_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 car_type, + ZXIC_UINT32 flow_id, + ZXIC_UINT32 *p_map_flow_id, + ZXIC_UINT32 *p_map_sp); + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/qos/source/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/qos/source/Kbuild.include new file mode 100644 index 000000000000..060457ba39aa --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/qos/source/Kbuild.include @@ -0,0 +1,2 @@ +cur_dir := en_np/qos/source/ +src_files += $(addprefix $(cur_dir),$(notdir $(wildcard $(dinghai_root)/$(cur_dir)*.c))) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/qos/source/dpp_drv_qos.c b/drivers/net/ethernet/dinghai/en_np/qos/source/dpp_drv_qos.c new file mode 100644 index 000000000000..80eb341e97a3 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/qos/source/dpp_drv_qos.c @@ -0,0 +1,1119 @@ +/************************************************************** + * 文件名称 : dpp_drv_qos.c + * 文件标识 : + * 内容摘要 : QOS出口调度树资源维护 + * 其它说明 : + * 当前版本 : 1.0 + * 作 者 : sun + * 完成日期 : + ***************************************************************/ + +/****************************************************************************** + * 头文件 * + *****************************************************************************/ +#include "dpp_drv_qos.h" + +/***********************************************************/ +/**对外接口 TM资源申请 + * @param vport_id--vport号 + * @param pp_port--端口0~9 + * @param numq--申请id个数 1 + * @param level--挂接层级 + * @param flags--se_id类型 + * @param gsch_id--调度单元号 + * @return + * @remark 无 + * @see + * @author sun @date 2023/11/17 + ************************************************************/ +DPP_STATUS dpp_cosq_gsch_id_add(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 pp_port, + ZXIC_UINT32 numq, ZXIC_UINT32 level, + ZXIC_UINT32 flags, ZXIC_UINT64 *p_gsch_id) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + ZXIC_UINT32 num = 0; + ZXIC_UINT32 *gsch_id = ZXIC_NULL; + ZXIC_UINT32 gsch_id_h = 0; + ZXIC_UINT32 gsch_id_l = 0; + ZXIC_UINT64 temp_id = 0; + + ZXIC_COMM_CHECK_POINT(pf_info); + + gsch_id = (ZXIC_UINT32 *)ZXIC_COMM_MALLOC(G_SCH_ID_LEN); + ZXIC_COMM_CHECK_POINT(gsch_id); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC_MEMORY_FREE(ret, "dpp_dev_get", gsch_id); + + ZXIC_COMM_CHECK_INDEX_MEMORY_FREE_NO_ASSERT(pf_info->vport, 0, + DPP_VPORT_NUM_MAX, gsch_id); + ZXIC_COMM_CHECK_INDEX_MEMORY_FREE_NO_ASSERT(pp_port, 0, + DPP_TM_PP_NUM - 1, gsch_id); + ZXIC_COMM_CHECK_INDEX_MEMORY_FREE_NO_ASSERT( + level, 0, DPP_CRDT_LEVEL_MAX, gsch_id); + ZXIC_COMM_CHECK_INDEX_MEMORY_FREE_NO_ASSERT(flags, 0, DPP_SCHE_TYPE_MAX, + gsch_id); + num = ((flags == FLOW_SCHE) ? numq : 1); + + ret = dpp_agent_channel_tm_seid_request(&dev, pp_port, pf_info->vport, + level, flags, num, gsch_id); + ZXIC_COMM_CHECK_RC_MEMORY_FREE(ret, "dpp_agent_channel_tm_seid_request", + gsch_id); + + gsch_id_h = *(gsch_id + 1); + gsch_id_l = *gsch_id; + + temp_id = ((ZXIC_UINT64)gsch_id_h) << 32 | ((ZXIC_UINT64)gsch_id_l); + + if (DPP_OK != (ZXIC_UINT32)(temp_id >> 56)) { + ZXIC_COMM_FREE(gsch_id); + return DPP_ERR; + } + + *p_gsch_id = temp_id; + ZXIC_COMM_FREE(gsch_id); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_cosq_gsch_id_add); + +/***********************************************************/ +/**对外接口 TM资源释放 + * @param vport_id--vport号 + * @param pp_port--端口0~9 + * @param gsch_id--调度单元号 + * @return + * @remark 无 + * @see + * @author sun @date 2023/11/17 + ************************************************************/ +DPP_STATUS dpp_cosq_gsch_id_delete(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 pp_port, + ZXIC_UINT64 gsch_id) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + ZXIC_UINT32 sche_level = 0; + ZXIC_UINT32 sche_type = 0; + ZXIC_UINT32 num = 1; + ZXIC_UINT32 se_id = 0; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pp_port, 0, DPP_TM_PP_NUM - 1); + + DPP_TM_CRDT_LEVEL_GET(gsch_id, sche_level); + DPP_TM_CRDT_TYPE_GET(gsch_id, sche_type); + DPP_TM_CRDT_SE_ID_GET(gsch_id, se_id); + + ret = dpp_agent_channel_tm_seid_release(&dev, pp_port, pf_info->vport, + sche_level, sche_type, num, + se_id); + ZXIC_COMM_CHECK_RC(ret, "dpp_agent_channel_tm_seid_release"); + + // if (DPP_OK != ret) + // { + // return DPP_ERR; + // } + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_cosq_gsch_id_delete); + +/***********************************************************/ +/**对外接口 读取TM根节点 + * @param vport_id--vport号 + * @param pp_port--端口0~9 + * @param gsch_id--调度单元号 + * @return + * @remark 无 + * @see + * @author sun @date 2023/11/17 + ************************************************************/ +DPP_STATUS dpp_sch_base_node_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 pp_port, + ZXIC_UINT64 *p_gsch_id) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + ZXIC_UINT32 *gsch_id = ZXIC_NULL; + ZXIC_UINT32 gsch_id_h = 0; + ZXIC_UINT32 gsch_id_l = 0; + ZXIC_UINT64 temp_id = 0; + + ZXIC_COMM_CHECK_POINT(pf_info); + + gsch_id = (ZXIC_UINT32 *)ZXIC_COMM_MALLOC(G_SCH_ID_LEN); + ZXIC_COMM_CHECK_POINT(gsch_id); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC_MEMORY_FREE(ret, "dpp_dev_get", gsch_id); + + ZXIC_COMM_CHECK_INDEX_MEMORY_FREE_NO_ASSERT(pf_info->vport, 0, + DPP_VPORT_NUM_MAX, gsch_id); + ZXIC_COMM_CHECK_INDEX_MEMORY_FREE_NO_ASSERT(pp_port, 0, + DPP_TM_PP_NUM - 1, gsch_id); + + ret = dpp_agent_channel_tm_base_node_get(&dev, pp_port, pf_info->vport, + gsch_id); + ZXIC_COMM_CHECK_RC_MEMORY_FREE( + ret, "dpp_agent_channel_tm_base_node_get", gsch_id); + + gsch_id_h = *(gsch_id + 1); + gsch_id_l = *gsch_id; + + temp_id = ((ZXIC_UINT64)gsch_id_h) << 32 | ((ZXIC_UINT64)gsch_id_l); + if (DPP_OK != (ZXIC_UINT32)(temp_id >> 56)) { + ZXIC_COMM_FREE(gsch_id); + return DPP_ERR; + } + + *p_gsch_id = temp_id; + ZXIC_COMM_FREE(gsch_id); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_sch_base_node_get); + +/***********************************************************/ +/**对外接口 配置se->pp->dev挂接关系 + * @param vport_id--vport号 + * @param se_id--调度器号 + * @param pp_id-端口号 + * @param weight-权重1 + * @param sp_mapping-优先级0-7 + * @return + * @remark 无 + * @see + * @author sun @date 2023/11/17 + ************************************************************/ +DPP_STATUS dpp_crdt_se_pp_link_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 se_id, + ZXIC_UINT32 pp_id, ZXIC_UINT32 weight, + ZXIC_UINT32 sp_mapping) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(se_id, 0, DPP_ETM_FQSPWFQ_NUM - 1); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pp_id, 0, DPP_TM_PP_NUM - 1); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(weight, 0, DPP_TM_SCH_WEIGHT_MAX); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(sp_mapping, DPP_TM_SCH_SP_0, + DPP_TM_SCH_SP_8); + + ret = dpp_tm_crdt_se_pp_link_set(&dev, se_id, pp_id, weight, + sp_mapping); + ZXIC_COMM_CHECK_RC(ret, "dpp_tm_crdt_se_pp_link_set"); + + return ret; +} +EXPORT_SYMBOL(dpp_crdt_se_pp_link_set); + +/***********************************************************/ +/**对外接口 配置se->se层次化挂接关系 + * @param vport_id--vport号 + * @param se_id--调度器号 + * @param se_linkid--上级调度器号 + * @param se_weight -权重 + * @param se_sp-优先级0-7 + * @return + * @remark 无 + * @see + * @author sun @date 2023/11/17 + ************************************************************/ +DPP_STATUS dpp_crdt_se_link_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 se_id, + ZXIC_UINT32 se_linkid, ZXIC_UINT32 se_weight, + ZXIC_UINT32 se_sp) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(se_id, 0, DPP_ETM_FQSPWFQ_NUM - 1); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(se_linkid, 0, DPP_ETM_FQSPWFQ_NUM - 1); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(se_weight, 0, DPP_TM_SCH_WEIGHT_MAX); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(se_sp, DPP_TM_SCH_SP_0, + DPP_TM_SCH_SP_8); + + ret = dpp_tm_crdt_se_link_set(&dev, se_id, se_linkid, se_weight, se_sp); + ZXIC_COMM_CHECK_RC(ret, "dpp_tm_crdt_se_link_set"); + + return ret; +} +EXPORT_SYMBOL(dpp_crdt_se_link_set); + +/***********************************************************/ +/**对外接口 配置flow级流队列挂接关系 + * @param vport_id--vport号 + * @param flow_id--0~4095 + * @param c_linkid--c桶se_id + * @param c_weight--c桶权重 + * @param c_sp--c桶优先级 + * @param mode--0-单桶 1-双桶 + * @param e_linkid--e桶se_id + * @param e_weight--e桶权重 + * @param e_sp--e桶优先级 + * @return + * @remark 无 + * @see + * @author sun @date 2023/11/17 + ************************************************************/ +DPP_STATUS dpp_crdt_flow_link_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 flow_id, + ZXIC_UINT32 c_linkid, ZXIC_UINT32 c_weight, + ZXIC_UINT32 c_sp, ZXIC_UINT32 mode, + ZXIC_UINT32 e_linkid, ZXIC_UINT32 e_weight, + ZXIC_UINT32 e_sp) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + + ret = dpp_tm_crdt_flow_link_set(&dev, flow_id, c_linkid, c_weight, c_sp, + mode, e_linkid, e_weight, e_sp); + ZXIC_COMM_CHECK_RC(ret, "dpp_tm_crdt_flow_link_set"); + + return ret; +} +EXPORT_SYMBOL(dpp_crdt_flow_link_set); + +/***********************************************************/ +/**对外接口 删除flow级流队列挂接关系 + * @param vport_id--vport号 + * @param id_s--起始flowid + * @param id_e--终止flowid + * @return + * @remark 无 + * @see + * @author sun @date 2023/11/17 + ************************************************************/ +DPP_STATUS dpp_crdt_del_flow_link_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 id_s, + ZXIC_UINT32 id_e) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(id_s, 0, DPP_ETM_CRDT_NUM); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(id_e, 0, DPP_ETM_CRDT_NUM); + + ret = dpp_tm_crdt_del_flow_link_set(&dev, id_s, id_e); + ZXIC_COMM_CHECK_RC(ret, "dpp_tm_crdt_del_flow_link_set"); + + return ret; +} +EXPORT_SYMBOL(dpp_crdt_del_flow_link_set); + +/***********************************************************/ +/**对外接口 删除调度器挂接关系 + * @param vport_id--vport号 + * @param id_s--起始seid + * @param id_e--终止seid + * @return + * @remark 无 + * @see + * @author sun @date 2023/11/17 + ************************************************************/ +DPP_STATUS dpp_crdt_del_se_link_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 id_s, + ZXIC_UINT32 id_e) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(id_s, 0, DPP_ETM_FQSPWFQ_NUM); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(id_e, 0, DPP_ETM_FQSPWFQ_NUM); + + ret = dpp_tm_crdt_del_se_link_set(&dev, id_s, id_e); + ZXIC_COMM_CHECK_RC(ret, "dpp_tm_crdt_del_se_link_set"); + + return ret; +} +EXPORT_SYMBOL(dpp_crdt_del_se_link_set); + +/***********************************************************/ +/**对外接口 配置端口级整形 + * @param vport_id--vport号 + * @param pp_port--端口0~9 + * @param cir 单位Kb + * @param cbs 单位KB + * @param c_en c桶使能 + * @return + * @remark 无 + * @see + * @author sun @date 2023/11/17 + ************************************************************/ +DPP_STATUS dpp_port_shape_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 pp_port, + ZXIC_UINT32 cir, ZXIC_UINT32 cbs, + ZXIC_UINT32 c_en) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pp_port, 0, DPP_TM_PP_NUM - 1); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(c_en, 0, 1); + + // ret = dpp_tm_shape_pp_para_wr(&dev, pp_port, cir, cbs, c_en); + // ZXIC_COMM_CHECK_RC(ret, "dpp_tm_shape_pp_para_wr"); + + ret = dpp_agent_channel_tm_port_shape(&dev, pp_port, cir, cbs, c_en); + ZXIC_COMM_CHECK_RC(ret, "dpp_agent_channel_tm_port_shape"); + + return ret; +} +EXPORT_SYMBOL(dpp_port_shape_set); + +/***********************************************************/ +/**对外接口 读取端口级整形 + * @param vport_id--vport号 + * @param pp_port--端口0~9 + * @param p_para 整形信息:cir/cbs/en + * @return + * @remark 无 + * @see + * @author sun @date 2023/11/17 + ************************************************************/ +DPP_STATUS dpp_port_shape_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 pp_port, + DPP_TM_SHAPE_PP_PARA_T *p_para) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + DPP_TM_SHAPE_PP_PARA_T pp_shap_para = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pp_port, 0, DPP_TM_PP_NUM - 1); + + ret = dpp_tm_shape_pp_para_get(&dev, pp_port, &pp_shap_para); + ZXIC_COMM_CHECK_RC(ret, "dpp_tm_shape_pp_para_get"); + + p_para->c_en = pp_shap_para.c_en; + p_para->cir = pp_shap_para.cir; + p_para->cbs = pp_shap_para.cbs; + + return ret; +} +EXPORT_SYMBOL(dpp_port_shape_get); + +/***********************************************************/ +/**对外接口 配置调度器整形 + * @param vport_id vport号 + * @param se_id 调度器编号号 + * @param pir pir总速率,单位Kb,范围同cir + * @param pbs pbs总桶深,单位KB,范围同cbs + * @param db_en 整形模式,0-单桶,1-双桶,仅FQ8/WFQ8有效 + * @param cir 调度器cir速率,单位Kb + * @param cbs 调度器cbs桶深,单位KB + * 注:cbs=0 表示关闭整形,即不限速 + * @return + * @remark 无 + * @see + * @author sun @date 2023/11/17 + ************************************************************/ +DPP_STATUS dpp_se_shape_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 se_id, + ZXIC_UINT32 pir, ZXIC_UINT32 pbs, ZXIC_UINT32 db_en, + ZXIC_UINT32 cir, ZXIC_UINT32 cbs) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + + // ret = dpp_tm_shape_se_para_set(&dev, se_id, pir, pbs, db_en, cir, cbs); + // ZXIC_COMM_CHECK_RC(ret, "dpp_tm_shape_se_para_set"); + + ret = dpp_agent_channel_tm_se_shape(&dev, se_id, pir, pbs, db_en, cir, + cbs); + ZXIC_COMM_CHECK_RC(ret, "dpp_agent_channel_tm_se_shape"); + + return ret; +} +EXPORT_SYMBOL(dpp_se_shape_set); + +/***********************************************************/ +/**对外接口 配置flow整形 + * @param vport_id vport号 + * @param flow_id 流队列号 + * @param cir cir速率,单位Kb + * @param cbs cbs桶深,单位KB + * 注:cbs=0 表示关闭整形,即不限速 + * @param db_en 双桶整形使能,0-单桶,1-双桶 + * @param eir eir速率,单位Kb + * @param ebs ebs桶深,单位KB + * @return + * @remark 无 + * @see + * @author sun @date 2023/11/17 + ************************************************************/ +DPP_STATUS dpp_flow_shape_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 flow_id, + ZXIC_UINT32 cir, ZXIC_UINT32 cbs, + ZXIC_UINT32 db_en, ZXIC_UINT32 eir, + ZXIC_UINT32 ebs) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + + // ret = dpp_tm_shape_flow_para_set(&dev, flow_id, cir, cbs, db_en, eir, ebs); + // ZXIC_COMM_CHECK_RC(ret, "dpp_tm_shape_flow_para_set"); + + ret = dpp_agent_channel_tm_flow_shape(&dev, flow_id, cir, cbs, db_en, + eir, ebs); + ZXIC_COMM_CHECK_RC(ret, "dpp_agent_channel_tm_flow_shape"); + + return ret; +} +EXPORT_SYMBOL(dpp_flow_shape_set); + +/***********************************************************/ +/**对外接口 配置流队列挂接到端口号 + * @param vport_id vport号 + * @param flow_id 流队列号 + * @param port 端口0~9 + * @return + * @remark 无 + * @see + * @author sun @date 2023/11/17 + ************************************************************/ +DPP_STATUS dpp_flow_map_port_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 flow_id, + ZXIC_UINT32 port) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + + ret = dpp_tm_cgavd_q_map_pp_set(&dev, flow_id, port); + ZXIC_COMM_CHECK_RC(ret, "dpp_tm_cgavd_q_map_pp_set"); + + return ret; +} +EXPORT_SYMBOL(dpp_flow_map_port_set); + +/***********************************************************/ +/**对外接口 读取流队列挂接的端口号 + * @param vport_id vport号 + * @param flowid 流队列号 + * @param port 端口0~9 + * @return + * @remark 无 + * @see + * @author sun @date 2023/11/17 + ************************************************************/ +DPP_STATUS dpp_flow_map_port_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 flow_id, + ZXIC_UINT32 *p_port) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + ZXIC_UINT32 pp_id = 0; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + + ret = dpp_tm_cgavd_q_map_pp_get(&dev, flow_id, &pp_id); + ZXIC_COMM_CHECK_RC(ret, "dpp_tm_cgavd_q_map_pp_get"); + + *p_port = pp_id; + + return ret; +} +EXPORT_SYMBOL(dpp_flow_map_port_get); + +/***********************************************************/ +/**对外接口 配置TD门限值 + * @param vport_id vport号 + * @param flow_id 流队列号 + * @param td_th 配置的丢弃门限值 + * @return + * @remark 无 + * @see + * @author sun @date 2023/11/17 + ************************************************************/ +DPP_STATUS dpp_flow_td_th_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 flow_id, + ZXIC_UINT32 td_th) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + + // ret = dpp_tm_cgavd_td_th_set(&dev, QUEUE_LEVEL, flow_id, td_th); + // ZXIC_COMM_CHECK_RC(ret, "dpp_tm_cgavd_td_th_set"); + + ret = dpp_agent_channel_tm_td_set(&dev, QUEUE_LEVEL, flow_id, td_th); + ZXIC_COMM_CHECK_RC(ret, "dpp_agent_channel_tm_td_set"); + + return ret; +} +EXPORT_SYMBOL(dpp_flow_td_th_set); + +/***********************************************************/ +/**对外接口 读取TD门限值 + * @param vport_id vport号 + * @param flow_id 流队列号 + * @param p_td_th 配置的丢弃门限值 + * @return + * @remark 无 + * @see + * @author sun @date 2023/11/17 + ************************************************************/ +DPP_STATUS dpp_flow_td_th_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 flow_id, + ZXIC_UINT32 *p_td_th) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + ZXIC_UINT32 td_th = 0; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + + ret = dpp_tm_cgavd_td_th_get(&dev, QUEUE_LEVEL, flow_id, &td_th); + ZXIC_COMM_CHECK_RC(ret, "dpp_tm_cgavd_td_th_get"); + + *p_td_th = td_th; + + return ret; +} +EXPORT_SYMBOL(dpp_flow_td_th_get); + +/***********************************************************/ +/**对外接口 设置block值 +* @param vport_id vport号 +* @param size 配置block值 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_blk_size_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 size) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + ret = dpp_tm_cfgmt_blk_size_set(&dev, size); + ZXIC_COMM_CHECK_RC_NO_ASSERT(ret, "dpp_tm_cfgmt_blk_size_set"); + + return ret; +} +EXPORT_SYMBOL(dpp_blk_size_set); + +/***********************************************************/ +/**对外接口 配置全局pfc使能状态 + * @param vport_id vport号 + * @param pfc_en 使能开关 + * @return + * @remark 无 + * @see + * @author sun @date 2023/11/17 + ************************************************************/ +DPP_STATUS dpp_qmu_pfc_en_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 pfc_en) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pfc_en, 0, 1); + + ret = dpp_tm_qmu_pfc_en_set(&dev, pfc_en); + ZXIC_COMM_CHECK_RC(ret, "dpp_tm_qmu_pfc_en_set"); + + return ret; +} +EXPORT_SYMBOL(dpp_qmu_pfc_en_set); + +/***********************************************************/ +/**对外接口 读取全局pfc使能状态 + * @param vport_id vport号 + * @param p_pfc_en 使能开关 + * @return + * @remark 无 + * @see + * @author sun @date 2023/11/17 + ************************************************************/ +DPP_STATUS dpp_qmu_pfc_en_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 *p_pfc_en) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + ZXIC_UINT32 pfc_en = 0; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pfc_en, 0, 1); + + ret = dpp_tm_qmu_pfc_en_get(&dev, &pfc_en); + ZXIC_COMM_CHECK_RC(ret, "dpp_tm_qmu_pfc_en_set"); + + *p_pfc_en = pfc_en; + + return ret; +} +EXPORT_SYMBOL(dpp_qmu_pfc_en_get); + +/***********************************************************/ +/**对外接口 配置物理端口pfc使能状态 + * @param vport_id vport号 + * @param port_id 端口0~9 + * @param port_en 使能开关 + * @return + * @remark 无 + * @see + * @author sun @date 2023/11/17 + ************************************************************/ +DPP_STATUS dpp_qmu_port_pfc_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 port_id, + ZXIC_UINT32 port_en) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(port_id, 0, DPP_TM_PP_NUM - 1); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(port_en, 0, 1); + + ret = dpp_tm_qmu_port_pfc_make_set(&dev, port_id, port_en); + ZXIC_COMM_CHECK_RC(ret, "dpp_tm_qmu_port_pfc_make_set"); + + return ret; +} +EXPORT_SYMBOL(dpp_qmu_port_pfc_set); + +/***********************************************************/ +/**对外接口 读取物理端口pfc使能状态 + * @param vport_id vport号 + * @param port_id 端口0~9 + * @param p_port_en 使能开关 + * @return + * @remark 无 + * @see + * @author sun @date 2023/11/17 + ************************************************************/ +DPP_STATUS dpp_qmu_port_pfc_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 port_id, + ZXIC_UINT32 *p_port_en) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + ZXIC_UINT32 port_en = 0; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(port_id, 0, DPP_TM_PP_NUM - 1); + + ret = dpp_tm_qmu_port_pfc_make_get(&dev, port_id, &port_en); + ZXIC_COMM_CHECK_RC(ret, "dpp_tm_qmu_port_pfc_make_get"); + + *p_port_en = port_en; + + return ret; +} +EXPORT_SYMBOL(dpp_qmu_port_pfc_get); + +/***********************************************************/ +/**对外接口 申请profile_id资源 +* @param vport_id vport号 +* @param flags car类型 +* @param p_profile_id 限速模版号 +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_car_profile_id_add(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 flags, + ZXIC_UINT64 *p_profile_id) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + ZXIC_UINT32 *profile_id = ZXIC_NULL; + ZXIC_UINT32 profile_id_h = 0; + ZXIC_UINT32 profile_id_l = 0; + ZXIC_UINT64 temp_profile_id = 0; + + ZXIC_COMM_CHECK_POINT(pf_info); + + profile_id = (ZXIC_UINT32 *)ZXIC_COMM_MALLOC(G_PROFILE_ID_LEN); + ZXIC_COMM_CHECK_POINT(profile_id); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC_MEMORY_FREE(ret, "dpp_dev_get", profile_id); + + ZXIC_COMM_CHECK_INDEX_MEMORY_FREE_NO_ASSERT( + pf_info->vport, 0, DPP_VPORT_NUM_MAX, profile_id); + ZXIC_COMM_CHECK_INDEX_MEMORY_FREE_NO_ASSERT(flags, 0, CAR_TYPE_MAX, + profile_id); + + ret = dpp_agent_channel_plcr_profileid_request(&dev, pf_info->vport, + flags, profile_id); + ZXIC_COMM_CHECK_RC_MEMORY_FREE( + ret, "dpp_agent_channel_plcr_profileid_request", profile_id); + + profile_id_h = *(profile_id + 1); + profile_id_l = *profile_id; + + temp_profile_id = ((ZXIC_UINT64)profile_id_l) << 32 | + ((ZXIC_UINT64)profile_id_h); + + if (DPP_OK != (ZXIC_UINT32)(temp_profile_id >> 56)) { + ZXIC_COMM_FREE(profile_id); + return DPP_ERR; + } + + *p_profile_id = temp_profile_id; + ZXIC_COMM_FREE(profile_id); + + return ret; +} +EXPORT_SYMBOL(dpp_car_profile_id_add); + +/***********************************************************/ +/**对外接口 释放profile_id资源 +* @param vport_id vport号 +* @param flags car类型 +* @param profile_id 限速模版号 + * @return + * @remark 无 + * @see + * @author sun @date 2023/11/17 + ************************************************************/ +DPP_STATUS dpp_car_profile_id_delete(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 flags, + ZXIC_UINT64 profile_id) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + ZXIC_UINT32 profileid = 0; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + DPP_CAR_PROFILE_ID_GET(profile_id, profileid); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(flags, 0, CAR_TYPE_MAX); + + ret = dpp_agent_channel_plcr_profileid_release(&dev, pf_info->vport, + flags, profileid); + ZXIC_COMM_CHECK_RC(ret, "dpp_agent_channel_plcr_profileid_release"); + + // if (DPP_OK != ret) + // { + // return DPP_ERR; + // } + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_car_profile_id_delete); + +/***********************************************************/ +/**对外接口 配置flow_id和profile_id的绑定关系,并配置限速模板使能 +* @param vport_id vport号 +* @param car_type car模式 +* @param flow_id 队列号 +* @param drop_flag 丢弃标志 +* @param plcr_en 限速使能 +* @param profile_id 模板编号 +* +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_car_queue_cfg_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 car_type, + ZXIC_UINT32 flow_id, ZXIC_UINT32 drop_flag, + ZXIC_UINT32 plcr_en, ZXIC_UINT32 profile_id) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + + ret = dpp_stat_car_queue_cfg_set(&dev, car_type, flow_id, drop_flag, + plcr_en, profile_id); + ZXIC_COMM_CHECK_RC(ret, "dpp_stat_car_queue_cfg_set"); + + return ret; +} +EXPORT_SYMBOL(dpp_car_queue_cfg_set); + +/***********************************************************/ +/**对外接口 查询flow_id和profile_id的绑定关系 +* @param vport_id vport号 +* @param car_type car模式 +* @param flow_id 队列号 +* @param p_drop_flag 丢弃标志 +* @param p_plcr_en 限速使能 +* @param p_profile_id 模板编号 +* +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_car_queue_cfg_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 car_type, + ZXIC_UINT32 flow_id, ZXIC_UINT32 *p_drop_flag, + ZXIC_UINT32 *p_plcr_en, + ZXIC_UINT32 *p_profile_id) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_CHECK_DEV_POINT(0, p_drop_flag); + ZXIC_COMM_CHECK_DEV_POINT(0, p_plcr_en); + ZXIC_COMM_CHECK_DEV_POINT(0, p_profile_id); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + + ret = dpp_stat_car_queue_cfg_get(&dev, car_type, flow_id, p_drop_flag, + p_plcr_en, p_profile_id); + ZXIC_COMM_CHECK_RC(ret, "dpp_stat_car_queue_cfg_get"); + + return ret; +} +EXPORT_SYMBOL(dpp_car_queue_cfg_get); + +/***********************************************************/ +/**对外接口 配置profile_id限速模版 +* @param dev_id +* @param car_type +* @param pkt_sign +* @param profile_id +* @param p_car_profile_cfg +* +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_car_profile_cfg_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 car_type, + ZXIC_UINT32 pkt_sign, ZXIC_UINT32 profile_id, + ZXIC_VOID *p_car_profile_cfg) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_POINT(p_car_profile_cfg); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + + // ret = dpp_stat_car_profile_cfg_set(&dev, car_type, pkt_sign, profile_id, p_car_profile_cfg); + // ZXIC_COMM_CHECK_RC(ret, "dpp_stat_car_queue_cfg_set"); + + ret = dpp_agent_channel_plcr_car_rate(&dev, car_type, pkt_sign, + profile_id, p_car_profile_cfg); + ZXIC_COMM_CHECK_RC(ret, "dpp_agent_channel_plcr_car_rate"); + + return ret; +} +EXPORT_SYMBOL(dpp_car_profile_cfg_set); + +/***********************************************************/ +/**对外接口 查询profile_id限速模版参数 +* @param vport_id vport号 +* @param car_type car模式 +* @param pkt_sign 限速模式0-字节;1-包 +* @param profile_id 模板编号 +* @param p_car_profile_cfg 限速参数 +* +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_car_profile_cfg_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 car_type, + ZXIC_UINT32 pkt_sign, ZXIC_UINT32 profile_id, + ZXIC_VOID *p_car_profile_cfg) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_POINT(p_car_profile_cfg); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + + ret = dpp_stat_car_profile_cfg_get(&dev, car_type, pkt_sign, profile_id, + p_car_profile_cfg); + ZXIC_COMM_CHECK_RC(ret, "dpp_stat_car_profile_cfg_get"); + + return ret; +} +EXPORT_SYMBOL(dpp_car_profile_cfg_get); + +/***********************************************************/ +/**对外接口 配置队列映射关系 +* @param dev_id 设备号 +* @param car_type car模式类型,参见STAT_CAR_TYPE_E +* @param flow_id 队列号 +* @param map_flow_id 映射队列号 +* @param map_sp 映射sp +* +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_car_queue_map_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 car_type, + ZXIC_UINT32 flow_id, ZXIC_UINT32 map_flow_id, + ZXIC_UINT32 map_sp) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + + ret = dpp_stat_car_queue_map_set(&dev, car_type, flow_id, map_flow_id, + map_sp); + ZXIC_COMM_CHECK_RC(ret, "dpp_stat_car_queue_map_set"); + + return ret; +} +EXPORT_SYMBOL(dpp_car_queue_map_set); + +/***********************************************************/ +/**对外接口 配置队列映射关系 +* @param dev_id 设备号 +* @param car_type car模式类型,参见STAT_CAR_TYPE_E +* @param flow_id 队列号 +* @param map_flow_id 映射队列号 +* @param map_sp 映射sp +* +* @return +* @remark 无 +* @see +* @author sun @date 2023/11/17 +************************************************************/ +DPP_STATUS dpp_car_queue_map_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 car_type, + ZXIC_UINT32 flow_id, + ZXIC_UINT32 *p_map_flow_id, + ZXIC_UINT32 *p_map_sp) +{ + DPP_STATUS ret = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ret = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(ret, "dpp_dev_get"); + + ZXIC_COMM_CHECK_POINT(p_map_flow_id); + ZXIC_COMM_CHECK_POINT(p_map_sp); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(pf_info->vport, 0, DPP_VPORT_NUM_MAX); + + ret = dpp_stat_car_queue_map_get(&dev, car_type, flow_id, p_map_flow_id, + p_map_sp); + ZXIC_COMM_CHECK_RC(ret, "dpp_stat_car_queue_map_get"); + + return ret; +} +EXPORT_SYMBOL(dpp_car_queue_map_get); diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/sdk/Kbuild.include new file mode 100644 index 000000000000..bf3528531db0 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/Kbuild.include @@ -0,0 +1,4 @@ +cur_dir := en_np/sdk/ +subdirs := source/ +src_files += +include $(foreach subdir, $(subdirs), $(dinghai_root)/$(cur_dir)$(subdir)/Kbuild.include) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_apt_se_api.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_apt_se_api.h new file mode 100644 index 000000000000..da25ca12e18f --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_apt_se_api.h @@ -0,0 +1,620 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_apt_se_api.h +* 文件标识: +* 内容摘要 : SE适配业务接口数据结构和函数声明 +* 其它说明: +* 当前版本: +* 作 者 : chenqin00181032 +* 完成日期 : 2022/02/22 +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef _DPP_APT_SE_API_H_ +#define _DPP_APT_SE_API_H_ + +#include "zxic_common.h" + +#if ZXIC_REAL("header file") +#include "dpp_dev.h" +#include "dpp_se_api.h" +#include "dpp_etcam.h" +#include "dpp_se.h" +#include "dpp_agent_se_res.h" +#endif + +#if ZXIC_REAL("data struct define") +typedef ZXIC_UINT32 (*DPP_APT_ACL_ENTRY_SET_FUNC)(ZXIC_VOID *pData, + DPP_ACL_ENTRY_EX_T *aclEntry); +typedef ZXIC_UINT32 (*DPP_APT_ACL_ENTRY_GET_FUNC)(ZXIC_VOID *pData, + DPP_ACL_ENTRY_EX_T *aclEntry); + +typedef ZXIC_UINT32 (*DPP_APT_ERAM_SET_FUNC)(ZXIC_VOID *pData, + ZXIC_UINT32 buf[4]); +typedef ZXIC_UINT32 (*DPP_APT_ERAM_GET_FUNC)(ZXIC_VOID *pData, + ZXIC_UINT32 buf[4]); + +typedef ZXIC_UINT32 (*DPP_APT_HASH_ENTRY_SET_FUNC)(ZXIC_VOID *pData, + DPP_HASH_ENTRY *pEntry); +typedef ZXIC_UINT32 (*DPP_APT_HASH_ENTRY_GET_FUNC)(ZXIC_VOID *pData, + DPP_HASH_ENTRY *pEntry); + +typedef ZXIC_UINT32 (*DPP_APT_LPM_ENTRY_SET_FUNC)(ZXIC_VOID *pData, + ZXIC_VOID *pEntry); +typedef ZXIC_UINT32 (*DPP_APT_LPM_ENTRY_GET_FUNC)(ZXIC_VOID *pData, + ZXIC_VOID *pEntry); + +typedef ZXIC_UINT32 (*DPP_APT_DDR_SET_FUNC)( + ZXIC_VOID *pData, ZXIC_UINT32 buf[DPP_DIR_TBL_BUF_MAX_NUM]); +typedef ZXIC_UINT32 (*DPP_APT_DDR_GET_FUNC)( + ZXIC_VOID *pData, ZXIC_UINT32 buf[DPP_DIR_TBL_BUF_MAX_NUM]); + +typedef enum dpp_se_res_type_e { + SE_STD_NIC_RES_TYPE = 0, /**< @brief 标卡资源*/ + SE_NON_STD_NIC_RES_TYPE = 1, /**< @brief 非标卡资源(业务卸载)*/ + SE_RES_TYPE_BUTT +} DPP_SE_RES_TYPE_E; + +typedef struct dpp_apt_eram_table_t { + ZXIC_UINT32 sdtNo; /** <@brief sdt no 0~255 */ + DPP_SDTTBL_ERAM_T eRamSdt; /** <@brief eRam属性*/ + ZXIC_UINT32 + opr_mode; /**cpu读写位宽模式DPP_ERAM128_OPR_MODE_E 0:128b 1:64b 2:1b 3:32b <@*/ + ZXIC_UINT32 + rd_mode; /*读清模式DPP_ERAM128_RD_CLR_MODE_E,0:正常读 1:读清模式*/ + DPP_APT_ERAM_SET_FUNC eram_set_func; /** <@brief 结构体转换为码流 */ + DPP_APT_ERAM_GET_FUNC eram_get_func; /** <@brief 码流转换为结构体 */ +} DPP_APT_ERAM_TABLE_T; + +typedef struct dpp_apt_ddr_table_t { + ZXIC_UINT32 sdtNo; /** <@brief sdt no 0~255 */ + DPP_SDTTBL_DDR3_T eDdrSdt; /** <@brief DDR属性*/ + ZXIC_UINT32 ddr_table_depth; /** <@brief DDR表项深度,单位与读写模式一致*/ + DPP_APT_DDR_SET_FUNC ddr_set_func; /** <@brief 结构体转换为码流 */ + DPP_APT_DDR_GET_FUNC ddr_get_func; /** <@brief 码流转换为结构体 */ +} DPP_APT_DDR_TABLE_T; + +typedef struct dpp_apt_acl_res_t { + ZXIC_UINT32 + pri_mode; /** <@brief1:显式优先级,2:隐式优先级,以条目下发顺序作为优先级,3:用户指定每个条目在tcam中的存放索引*/ + ZXIC_UINT32 entry_num; /** <@brief 可配置的条目数*/ + ZXIC_UINT32 block_num; /** <@brief 最大8个 */ + ZXIC_UINT32 block_index[DPP_ETCAM_BLOCK_NUM]; /** <@brief 0~7 */ +} DPP_APT_ACL_RES_T; + +typedef struct dpp_apt_acl_table_t { + ZXIC_UINT32 sdtNo; /** <@brief sdt no 0~255 */ + ZXIC_UINT32 + sdt_partner; /** <@brief sdt no 0~255,eram直接表维护acl index信息,不存在,则设置无效值-1(0xffffffff) */ + DPP_SDTTBL_ETCAM_T aclSdt; /** <@brief acl属性*/ + DPP_APT_ACL_RES_T aclRes; /** <@brief acl资源*/ + DPP_APT_ACL_ENTRY_SET_FUNC acl_set_func; /** <@brief 结构体转换为码流 */ + DPP_APT_ACL_ENTRY_GET_FUNC acl_get_func; /** <@brief 码流转换为结构体 */ +} DPP_APT_ACL_TABLE_T; + +typedef struct dpp_apt_hash_table_t { + ZXIC_UINT32 sdtNo; /** <@brief sdt no 0~255 */ + ZXIC_UINT32 + sdt_partner; /** <@brief 二级hash sdt号0~255,如果没有二级hash,则设置为无效值-1(0xffffffff) */ + DPP_SDTTBL_HASH_T hashSdt; /** <@brief hash sdt属性*/ + ZXIC_UINT32 + tbl_flag; /**< @brief 业务表初始化标记(bit0:老化保活置位使能,bit1:硬件学习使能,bit2:微码写表使能)*/ + DPP_APT_HASH_ENTRY_SET_FUNC + hash_set_func; /** <@brief 结构体转换为码流,转换时预留一个字节,从第1字节开始填充 */ + DPP_APT_HASH_ENTRY_GET_FUNC hash_get_func; /** <@brief 码流转换为结构体 */ +} DPP_APT_HASH_TABLE_T; + +typedef struct dpp_apt_hash_func_res_t { + ZXIC_UINT32 func_id; /**< @brief hash引擎id 0~3*/ + ZXIC_UINT32 zblk_num; /**< @brief 0~32*/ + ZXIC_UINT32 zblk_bitmap; /**< @brief 置1的bit位表示分配的block编号 */ + ZXIC_UINT32 ddr_dis; /** <@brief 0:混合模式,1:纯片内模式*/ +} DPP_APT_HASH_FUNC_RES_T; + +typedef struct dpp_apt_hash_bulk_res_t { + ZXIC_UINT32 func_id; /**< @brief 0~3*/ + ZXIC_UINT32 bulk_id; /**< @brief 0~7*/ + ZXIC_UINT32 zcell_num; /**< @brief 0~128*/ + ZXIC_UINT32 zreg_num; /**< @brief 0~128*/ + ZXIC_UINT32 + ddr_baddr; /**< @brief 分配给hash的DDR空间的硬件基地址,单位2k*256bit*/ + ZXIC_UINT32 + ddr_item_num; /**< @brief 复用字段,分配给hash的DDR空间单元数目,以256bit为一个单元(根据分配的基地址和单元数目确定分配的DDR空间大小)*/ + /**< @brief 复用字段,纯片内场景,支持的hash条目最大数目*/ + DPP_HASH_DDR_WIDTH_MODE + ddr_width_mode; /**< @brief 分配给hash的DDR空间物理存储位宽模式,0:无效值,1:256bit 2:512bit*/ + ZXIC_UINT32 + ddr_crc_sel; /**< @brief 选择一个DDR CRC多项式,取值范围0~3,0~3分别对应一个CRC多项式*/ + ZXIC_UINT32 ddr_ecc_en; /**< @brief DDR ECC使能: 0-不使能,1-使能*/ +} DPP_APT_HASH_BULK_RES_T; + +typedef struct dpp_apt_route_res_t { + ZXIC_UINT32 lpm_flags; + ZXIC_UINT32 zblk_num; /**< @brief LPM ipv4和ipv6共享的zblock数目*/ + ZXIC_UINT32 zblk_bitmap; /**< @brief LPM ipv4和ipv6共享的bitmap*/ + ZXIC_UINT32 mono_ipv4_zblk_num; /**< @brief ipv4独占zblock数目*/ + ZXIC_UINT32 mono_ipv4_zblk_bitmap; /**< @brief ipv4独占zblock bitmap*/ + ZXIC_UINT32 mono_ipv6_zblk_num; /**< @brief ipv6独占zblock数目*/ + ZXIC_UINT32 mono_ipv6_zblk_bitmap; /**< @brief ipv6独占zblock bitmap*/ + ZXIC_UINT32 + ddr4_item_num; /**< @brief 分配给ipv4前缀查找的ddr存储条目数,以256bit为单位*/ + ZXIC_UINT32 + ddr4_baddr; /**< @brief 分配给ipv4前缀查找的ddr存储空间的基地址,以4K*128bit为单位*/ + ZXIC_UINT32 + ddr4_base_offset; /**< @brief ipv4前缀查找相对于片外ddr存储空间基地址的偏移量,以256bit为单位*/ + ZXIC_UINT32 + ddr4_ecc_en; /**< @brief 固定配为1,分配给ipv4前缀查找的ddr存储空间的ECC校验使能标志*/ + ZXIC_UINT32 + ddr6_item_num; /**< @brief 分配给ipv6前缀查找的ddr存储条目数,以256bit为单位*/ + ZXIC_UINT32 + ddr6_baddr; /**< @brief 分配给ipv6前缀查找的ddr存储空间的基地址,以4K*128bit为单位*/ + ZXIC_UINT32 + ddr6_base_offset; /**< @brief ipv6前缀查找相对于片外ddr存储空间基地址的偏移量,以256bit为单位*/ + ZXIC_UINT32 + ddr6_ecc_en; /**< @brief 固定配为1,分配给ipv4前缀查找的ddr存储空间的ECC校验使能标志*/ +} DPP_APT_ROUTE_RES_T; + +typedef struct dpp_apt_lpm_table_t { + ZXIC_UINT32 sdtNo; /** <@brief sdt no 0~255 */ + DPP_SDTTBL_LPM_T lpmSdt; /** <@brief lpm属性*/ + DPP_ROUTE_AS_ERAM_T as_eram_cfg + [DPP_SMMU0_LPM_AS_TBL_ID_NUM]; /**< @brief LPM级联eRam结果表空间属性*/ + DPP_ROUTE_AS_DDR_T as_ddr_cfg; /**< @brief LPM级联DDR结果表空间属性*/ + DPP_APT_LPM_ENTRY_SET_FUNC lpm_set_func; /** <@brief 结构体转换为码流 */ + DPP_APT_LPM_ENTRY_GET_FUNC lpm_get_func; /** <@brief 码流转换为结构体 */ +} DPP_APT_LPM_TABLE_T; + +typedef struct dpp_apt_eram_res_init_t { + ZXIC_UINT32 tbl_num; + DPP_APT_ERAM_TABLE_T *eram_res; +} DPP_APT_ERAM_RES_INIT_T; + +typedef struct dpp_ddr_res_init_t { + ZXIC_UINT32 tbl_num; + DPP_APT_DDR_TABLE_T *ddr_res; +} DPP_APT_DDR_RES_INIT_T; + +typedef struct dpp_apt_hash_res_init_t { + ZXIC_UINT32 func_num; + ZXIC_UINT32 bulk_num; + ZXIC_UINT32 tbl_num; + DPP_APT_HASH_FUNC_RES_T *func_res; + DPP_APT_HASH_BULK_RES_T *bulk_res; + DPP_APT_HASH_TABLE_T *tbl_res; +} DPP_APT_HASH_RES_INIT_T; + +typedef struct dpp_apt_lpm_res_init_t { + ZXIC_UINT32 tbl_num; /*最大个数为2*/ + DPP_APT_LPM_TABLE_T *lpm_res; /*ipv4/ipv6资源*/ + DPP_APT_ROUTE_RES_T *glb_res; /*ipv4/ipv6公共资源*/ +} DPP_APT_LPM_RES_INIT_T; + +typedef struct dpp_apt_acl_res_init_t { + ZXIC_UINT32 tbl_num; + DPP_APT_ACL_TABLE_T *acl_res; +} DPP_APT_ACL_RES_INIT_T; + +typedef struct dpp_apt_stat_res_init_t { + ZXIC_UINT32 eram_baddr; /*片内统计基地址,单位128bit*/ + ZXIC_UINT32 eram_depth; /*片内统计深度,单位128bit*/ + ZXIC_UINT32 ddr_baddr; /*片外统计基地址,单位2k*256bit*/ + ZXIC_UINT32 ppu_ddr_offset; /*片外DDR统计偏移,单位128bit,默认为0*/ +} DPP_APT_STAT_RES_INIT_T; + +typedef struct dpp_stat_item_t { + //ZXIC_UINT32 index; /*统计项索引*/ + ZXIC_UINT32 mode; /*统计项模式:0:64bit,1:128bit*/ + ZXIC_UINT32 addr_offset; /*统计项地址偏移,单位与模式一致*/ + ZXIC_UINT32 depth; /*统计项深度,单位与模式一致*/ +} DPP_APT_STAT_ITEM_T; +#if 0 +typedef struct dpp_stat_tbl_init_t { + ZXIC_UINT32 stat_item_num; /*统计项个数,最多256个*/ + DPP_APT_STAT_ITEM_T stat_item[STAT_ITEM_MAX_NUM]; /*统计表项信息*/ +} DPP_APT_STAT_TBL_INIT_T; +#endif +typedef struct dpp_apt_se_res_t { + ZXIC_UINT32 valid; + ZXIC_UINT32 hash_func_num; + ZXIC_UINT32 hash_bulk_num; + ZXIC_UINT32 hash_tbl_num; + ZXIC_UINT32 eram_num; + ZXIC_UINT32 acl_num; + ZXIC_UINT32 lpm_num; + ZXIC_UINT32 ddr_num; + ZXIC_UINT32 stat_item_num; + DPP_APT_HASH_FUNC_RES_T hash_func[HASH_FUNC_MAX_NUM]; + DPP_APT_HASH_BULK_RES_T hash_bulk[HASH_BULK_MAX_NUM]; + DPP_APT_HASH_TABLE_T hash_tbl[HASH_TABLE_MAX_NUM]; + DPP_APT_ERAM_TABLE_T eram_tbl[ERAM_MAX_NUM]; + DPP_APT_ACL_TABLE_T acl_tbl[ETCAM_MAX_NUM]; + DPP_APT_ROUTE_RES_T lpm_global_res; + DPP_APT_LPM_TABLE_T lpm_tbl[LPM_MAX_NUM]; + DPP_APT_DDR_TABLE_T ddr_tbl[DDR_MAX_NUM]; + DPP_APT_STAT_RES_INIT_T stat_cfg; + DPP_APT_STAT_ITEM_T stat_item[STAT_ITEM_MAX_NUM]; +} DPP_APT_SE_RES_T; + +#define DTB_DUMP_UNICAST_MAC_DUMP_NUM (32 * 257) +#define DTB_DUMP_MULTICAST_MAC_DUMP_NUM (32 * 257) +#endif + +#if ZXIC_REAL("SE APT FUNCTION") + +/***********************************************************/ +/** eram表资源初始化 +* @param dev_id 设备号 +* @param tbl_num 需初始化的eram表个数 +* @param pEramTbl eram资源信息,包括SDT配置信息,直接表读取位宽和结构体码流转换回调函数 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +DPP_STATUS dpp_apt_eram_res_init(DPP_DEV_T *dev, ZXIC_UINT32 tbl_num, + DPP_APT_ERAM_TABLE_T *pEramTbl); + +/***********************************************************/ +/** DDR表资源初始化 +* @param dev_id 设备号 +* @param tbl_num 需初始化的DDR表个数 +* @param pDdrTbl ddr资源信息,包括SDT配置信息,直接表读取位宽和结构体码流转换回调函数 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/07/26 +************************************************************/ +DPP_STATUS dpp_apt_ddr_res_init(DPP_DEV_T *dev, ZXIC_UINT32 tbl_num, + DPP_APT_DDR_TABLE_T *pDdrTbl); + +/***********************************************************/ +/** acl资源初始化 +* @param dev_id 设备号 +* @param tbl_num etcam对应的sdt表个数 +* @param pAclTblRes acl表资源信息,包括SDT配置信息,acl资源(条目数,存放方式和占用的block)和结构体码流转换回调函数 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +DPP_STATUS dpp_apt_acl_res_init(DPP_DEV_T *dev, ZXIC_UINT32 tbl_num, + DPP_APT_ACL_TABLE_T *pAclTblRes); + +/***********************************************************/ +/** acl软件资源释放 +* @param dev 设备号 +* @return +* @remark 无 +* @see +* @author cq @date 2025/06/30 +************************************************************/ +DPP_STATUS dpp_apt_acl_soft_res_uninit(DPP_DEV_T *dev); + +/***********************************************************/ +/** hash表全局资源初始化 +* @param dev_id 设备号 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +DPP_STATUS dpp_apt_hash_global_res_init(DPP_DEV_T *dev); + +/***********************************************************/ +/** hash表全局资源去初始化 +* @param dev_id 设备号 +* @return +* @remark 无 +* @see +* @author cq @date 2024/08/01 +************************************************************/ +DPP_STATUS dpp_apt_hash_global_res_uninit(DPP_DEV_T *dev); + +/***********************************************************/ +/** hash引擎初始化 +* @param dev_id 设备号 +* @param func_num 需初始化的hash引擎个数 1~4 +* @param pHashFuncRes 每个hash引擎分配的zblock个数和编号,以及分配模式(混合模式或者纯片内模式) +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +DPP_STATUS dpp_apt_hash_func_res_init(DPP_DEV_T *dev, ZXIC_UINT32 func_num, + DPP_APT_HASH_FUNC_RES_T *pHashFuncRes); + +/***********************************************************/ +/** hash引擎初始化(删除硬件数据) +* @param dev_id 设备号 +* @param func_num 需初始化的hash引擎个数 1~4 +* @param pHashFuncRes 每个hash引擎分配的zblock个数和编号,以及分配模式(混合模式或者纯片内模式) +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +DPP_STATUS +dpp_apt_hash_func_flush_hardware_all(DPP_DEV_T *dev, ZXIC_UINT32 func_num, + DPP_APT_HASH_FUNC_RES_T *pHashFuncRes, + ZXIC_UINT32 queue_id); + +/***********************************************************/ +/** hash引擎bulk空间初始化 +* @param dev_id 设备号 +* @param bulk_num 需初始化的bulk表个数 1~32 +* @param pBulkRes zcell和zreg资源占用信息,如果是混合模式,需进行DDR资源分配 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +DPP_STATUS dpp_apt_hash_bulk_res_init(DPP_DEV_T *dev, ZXIC_UINT32 bulk_num, + DPP_APT_HASH_BULK_RES_T *pBulkRes); + +/***********************************************************/ +/** hash业务表属性初始化 +* @param dev_id 设备号 +* @param tbl_num 需初始化的业务表表个数 1~128 +* @param pHashTbl sdt配置信息,初始化标记和业务结构体码流转换函数 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +DPP_STATUS dpp_apt_hash_tbl_res_init(DPP_DEV_T *dev, ZXIC_UINT32 tbl_num, + DPP_APT_HASH_TABLE_T *pHashTbl); + +/***********************************************************/ +/** dtb eram表项插入/更新 +* @param dev_id 设备号 +* @param sdt_no SDT号 0~255 +* @param index 条目index,索引范围随wrt_mode模式不同 +* @param pData 插入表项内容,由业务确定 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +DPP_STATUS dpp_apt_dtb_eram_insert(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 index, + void *pData); + +/***********************************************************/ +/** eram表项数据获取,从软件缓存中获取 +* @param dev_id 设备号 +* @param sdt_no SDT号 0~255 +* @param index 条目index,索引范围随wrt_mode模式不同 +* @param pData 出参,返回业务表项内容 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +DPP_STATUS dpp_apt_dtb_eram_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 index, + void *pData); + +/***********************************************************/ +/** eram表项删除,软件维护删除 +* @param dev_id 设备号 +* @param sdt_no SDT号 0~255 +* @param index 条目index,索引范围随wrt_mode模式不同 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +DPP_STATUS dpp_apt_dtb_eram_clear(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 index); + +/***********************************************************/ +/** dtb hash表项插入/更新 +* @param dev_id 设备号 +* @param sdt_no sdt号 0~255 +* @param pData 插入hash表项信息,由业务确定 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +DPP_STATUS dpp_apt_dtb_hash_insert(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, void *pData); + +/***********************************************************/ +/** dtb hash表项删除 +* @param dev_id 设备号 +* @param sdt_no sdt号 0~255 +* @param pData 删除hash表项信息,由业务传入 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +DPP_STATUS dpp_apt_dtb_hash_delete(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, void *pData); + +/***********************************************************/ +/** dtb hash表项批量插入/更新 +* @param dev_id 设备号 +* @param queue_id 队列id +* @param sdt_no sdt号 0~255 +* @param entry_num 插入条目数 +* @param entry_size 插入条目结构体大小 +* @param pData 插入hash表项信息,由业务确定 +* @return +* @remark 无 +* @see +* @author cq @date 2024/10/23 +************************************************************/ +DPP_STATUS dpp_apt_dtb_multi_hash_insert(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, + ZXIC_UINT32 entry_num, + ZXIC_UINT32 entry_size, + ZXIC_VOID *pData); + +/***********************************************************/ +/** dtb hash表项批量删除 +* @param dev_id 设备号 +* @param queue_id 队列id +* @param sdt_no sdt号 0~255 +* @param entry_num 删除条目数 +* @param entry_size 删除条目结构体大小 +* @param pData 删除hash表项信息,由业务确定 +* @return +* @remark 无 +* @see +* @author cq @date 2024/10/23 +************************************************************/ +DPP_STATUS dpp_apt_dtb_multi_hash_delete(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, + ZXIC_UINT32 entry_num, + ZXIC_UINT32 entry_size, + ZXIC_VOID *pData); + +/***********************************************************/ +/** acl表项插入/更新 +* @param dev_id 设备号 +* @param sdt_no sdt号 0~255 +* @param pData 业务插入表项内容,具体结构体由业务确定(结构体的第一个字段必须为index),SDK不感知 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +DPP_STATUS dpp_apt_dtb_acl_entry_insert(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, void *pData); + +/***********************************************************/ +/** acl表项删除 +* @param dev_id 设备号 +* @param sdt_no sdt号 0~255 +* @param pData 删除业务表项内容,仅需填入index信息 +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/25 +************************************************************/ +DPP_STATUS dpp_apt_dtb_acl_entry_del(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, void *pData); + +/***********************************************************/ +/** acl表项查找(handle+data+mask有效) +* @param dev 设备 +* @param queue_id 队列号 +* @param sdt_no sdt号 0~255 +* @param pData 查找表项 +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/21 +************************************************************/ +DPP_STATUS dpp_apt_dtb_acl_entry_search(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, void *pData); + +/***********************************************************/ +/** 根据handle获取到acl表项信息 +* @param dev 设备 +* @param queue_id 队列号 +* @param sdt_no sdt号 0~255 +* @param pData 查找表项 +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/21 +************************************************************/ +DPP_STATUS dpp_apt_dtb_acl_entry_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, void *pData); + +/***********************************************************/ +/** 释放sdt资源以及适配资源 +* @param dev_id 设备号 +* @param sdt_no sdt号 +* @return +* @remark 无 +* @see +* @author cq @date 2023/11/09 +************************************************************/ +DPP_STATUS dpp_apt_sdt_res_deinit(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no); + +/***********************************************************/ +/** 消息通道获取指定类型的所有流表资源 +* @param dev NP设备 +* @param +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/11 +************************************************************/ +DPP_STATUS dpp_agent_se_res_get(DPP_DEV_T *dev); + +/***********************************************************/ +/** 初始化流表资源 +* @param dev NP设备 +* @param +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/11 +************************************************************/ +DPP_STATUS dpp_se_res_init(DPP_DEV_T *dev); + +/***********************************************************/ +/** 消息通道获取指定类型流表资源&流表资源初始化 +* @param dev NP设备 +* @param +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/11 +************************************************************/ +DPP_STATUS dpp_se_res_get_and_init(DPP_DEV_T *dev); + +/***********************************************************/ +/** 获取sdt对应的hash最大条目数 +* @param dev NP设备 +* @param sdt_no +* @param max_num 出参,获取的条目数上限 +* @param +* @return +* @remark 无 +* @see +* @author cq @date 2024/12/03 +************************************************************/ +DPP_STATUS dpp_hash_max_item_num_get(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 *max_num); + +/***********************************************************/ +/** 获取统计项的信息 +* @param dev NP设备 +* @param p_se_res 流表资源 +* @param +* @return +* @remark 无 +* @see +* @author cq @date 2025/02/15 +************************************************************/ +DPP_STATUS dpp_stat_tbl_get(DPP_DEV_T *dev, DPP_APT_SE_RES_T *p_se_res); + +/***********************************************************/ +/** 查看当前sdt号是否存在 +* @param p_se_res 流表资源 +* @param sdt_type 表类型 +* @param sdt_no sdt号 +* @return +* @remark 无 +* @see +* @author cq @date 2025/02/17 +************************************************************/ +DPP_STATUS dpp_apt_sdt_is_exist(DPP_APT_SE_RES_T *p_se_res, + DPP_SDT_TABLE_TYPE_E sdt_type, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 *p_is_exist); + +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_dtb_table_api.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_dtb_table_api.h new file mode 100644 index 000000000000..afa92db9d557 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_dtb_table_api.h @@ -0,0 +1,339 @@ +#ifndef _DPP_DTB_TABLE_API_H_ +#define _DPP_DTB_TABLE_API_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "dpp_dev.h" +#include "zxic_common.h" +#include "dpp_stat_api.h" + +#define DPP_DTB_DUMP_ZCAM_TYPE ((ZXIC_UINT32)(0)) +#define DPP_DTB_DUMP_DDR_TYPE ((ZXIC_UINT32)(1)) + +typedef struct dpp_dtb_user_entry_t { + ZXIC_UINT32 sdt_no; /*流表sdt号*/ + ZXIC_VOID *p_entry_data; /*该流表的数据,数据结构见各表的结构体定义*/ +} DPP_DTB_USER_ENTRY_T; + +typedef struct dpp_dtb_eram_entry_info_t { + ZXIC_UINT32 + index; /*条目index,根据wrt_mode模式为单位的的index,支持1/64/128bit */ + ZXIC_UINT32 *p_data; /*写入的表项信息 */ +} DPP_DTB_ERAM_ENTRY_INFO_T; + +typedef struct dpp_dtb_ddr_entry_info_t { + ZXIC_UINT32 + index; /*条目index,根据wrt_mode模式定义的index,支持128/256/384/512bit*/ + ZXIC_UINT32 *p_data; /*写入的表项信息*/ +} DPP_DTB_DDR_ENTRY_INFO_T; + +typedef struct dpp_dtb_hash_entry_info_t { + ZXIC_UINT8 * + p_actu_key; /*实际的键值,对于一种表来说键值长度是固定的,在初始化时,会通过dpp_hash_tbl_id_info_init进行配置*/ + ZXIC_UINT8 *p_rst; /*hash表结果 result的长度由当前业务结果位宽决定*/ +} DPP_DTB_HASH_ENTRY_INFO_T; + +typedef struct dpp_dtb_acl_entry_info_t { + ZXIC_UINT32 handle; /*条目索引*/ + ZXIC_UINT8 * + key_data; /*键值data部分 按mode的长度,支持640bit/320bit/160bit/80bit*/ + ZXIC_UINT8 *key_mask; /*键值mask部分 长度与data相同*/ + ZXIC_UINT8 + *p_as_rslt; /*关联结果,仅使能关联查找情况有效 支持1/64/128bit*/ +} DPP_DTB_ACL_ENTRY_INFO_T; + +typedef struct dpp_dtb_dump_index_t { + ZXIC_UINT32 index; /*index*/ + ZXIC_UINT32 index_type; /*index类型 */ +} DPP_DTB_DUMP_INDEX_T; + +typedef struct dtb_queue_dma_addr_info { + ZXIC_UINT32 slot_id; /*np所在的槽位号*/ + ZXIC_UINT32 queue_id; /*队列号*/ + ZXIC_UINT32 dma_size; /*该队列申请的dma大小*/ + ZXIC_UINT64 dma_phy_addr; /* dma 物理地址*/ + ZXIC_UINT64 dma_vir_addr; /* dma 内核虚拟地址*/ +} DTB_QUEUE_DMA_ADDR_INFO; + +/*dump地址信息获取*/ +ZXIC_UINT32 dpp_dtb_dump_sdt_addr_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT64 *phy_addr, + ZXIC_UINT64 *vir_addr, ZXIC_UINT32 *size); + +/***********************************************************/ +/** DTB通道申请 +* @param devId NP设备号 +* @param pName 申请DTB通道的唯一设备名(最大32字符) +* @param pQueueId 申请到的DTB通道编号 +* @return +* @remark 无 +* @see +* @author cbb @date 2023/07/03 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_requst(DPP_DEV_T *dev, ZXIC_CONST ZXIC_UINT8 *pName, + ZXIC_UINT16 vPort, ZXIC_UINT32 *pQueueId); + +/***********************************************************/ +/** DTB通道队列申请(申请到的队列在软件维护和硬件维护上都处于初态) +* @param devId NP设备号 +* @param pName 申请DTB通道的唯一设备名(最大32字符) +* @param pQueueId 申请到的DTB通道编号 +* @return +* @remark 无 +* @see +* @author cq @date 2025/06/05 +************************************************************/ +DPP_STATUS dpp_dtb_queue_requst_ex(DPP_DEV_T *dev, ZXIC_CONST ZXIC_UINT8 *pName, + ZXIC_UINT32 *p_queue_id); + +/***********************************************************/ +/** DTB通道释放 +* @param devId NP设备号 +* @param pName 要释放DTB通道的唯一设备名(最大32字符) +* @param pQueueId 要释放的DTB通道编号 +* @return +* @remark 无 +* @see +* @author cbb @date 2023/07/03 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_release(DPP_DEV_T *dev, ZXIC_CONST ZXIC_UINT8 *pName, + ZXIC_UINT32 queueId); + +/***********************************************************/ +/** DTB通道释放(增加锁保护) +* @param devId NP设备号 +* @return +* @remark 无 +* @see +* @author cbb @date 2023/07/03 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_release_ex(DPP_DEV_T *dev); + +/***********************************************************/ +/** 通知固件通道信息,保证固件与驱动配置一致 +* @param devId NP设备号 +* @param pName 要释放DTB通道的唯一设备名(最大32字符) +* @param vPort 端口号 +* @param pQueueId 同步队列 +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/06 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_sync_cfg(DPP_DEV_T *dev, ZXIC_CONST ZXIC_UINT8 *pName, + ZXIC_UINT16 vPort, ZXIC_UINT32 queueId); + +/***********************************************************/ +/** DTB队列软件资源释放 +* @param dev NP设备 +* @return +* @remark 无 +* @see +* @author cq @date 2025/06/06 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_release_soft(DPP_DEV_T *dev); + +/***********************************************************/ +/** DTB通道用户信息配置 +* @param devId NP设备号 +* @param queueId DTB通道编号 +* @param vPort vport信息 +* @param vector 中断号 +* @return +* @remark 无 +* @see +* @author cbb @date 2023/07/03 +************************************************************/ +ZXIC_UINT32 dpp_dtb_user_info_set(DPP_DEV_T *dev, ZXIC_UINT32 queueId, + ZXIC_UINT16 vPort, ZXIC_UINT32 vector); + +/***********************************************************/ +/** DTB通道下表空间地址设置,空间大小[32*(16+16*1024)B] +* @param devId NP设备号 +* @param queueId DTB通道编号 +* @param phyAddr 物理地址 +* @param virAddr 虚拟地址 +* @return +* @remark 无 +* @see +* @author cbb @date 2023/07/03 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_down_table_addr_set(DPP_DEV_T *dev, + ZXIC_UINT32 queueId, + ZXIC_UINT64 phyAddr, + ZXIC_UINT64 virAddr); + +/***********************************************************/ +/** DTB通道dump空间地址设置,空间大小[32*(16+16*1024)B] +* @param devId NP设备号 +* @param pName 要释放DTB通道的设备名 +* @param phyAddr 物理地址 +* @param virAddr 虚拟地址 +* @return +* @remark 无 +* @see +* @author cbb @date 2023/07/03 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_dump_table_addr_set(DPP_DEV_T *dev, + ZXIC_UINT32 queueId, + ZXIC_UINT64 phyAddr, + ZXIC_UINT64 virAddr); + +/***********************************************************/ +/** 大批量dump一个流表使用的地址空间配置 +* @param devId NP设备号 +* @param queueId DTB队列编号 +* @param sdtNo 流表std号 +* @param phyAddr 物理地址 +* @param virAddr 虚拟地址 +* @param size (最大64MB) +* @return +* @remark 无 +* @see +* @author cbb @date 2023/07/03 +************************************************************/ +ZXIC_UINT32 dpp_dtb_dump_sdt_addr_set(DPP_DEV_T *dev, ZXIC_UINT32 queueId, + ZXIC_UINT32 sdtNo, ZXIC_UINT64 phyAddr, + ZXIC_UINT64 virAddr, ZXIC_UINT32 size); + +/***********************************************************/ +/** 清除大批量dump一个流表使用的地址空间配置 +* @param devId NP设备号 +* @param queueId DTB队列编号 +* @param sdtNo 流表std号 +* @return +* @remark 无 +* @see +* @author cbb @date 2023/07/03 +************************************************************/ +ZXIC_UINT32 dpp_dtb_dump_sdt_addr_clear(DPP_DEV_T *dev, ZXIC_UINT32 queueId, + ZXIC_UINT32 sdtNo); + +/***********************************************************/ +/** 释放当前sdt下的所有流表(硬件方式) +* (适用于进程启动后,仅配置流表资源,软件未配置流表,但需要删除硬件上已配置流表的场景) +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no sdt号 +* @return +* @remark 无 +* @see +* @author cq @date 2023/12/04 +************************************************************/ +DPP_STATUS dpp_dtb_hash_offline_delete(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no); + +/***********************************************************/ +/** 释放当前sdt下的所有流表(硬件方式) +* (适用于进程正常退出前删除表项,软件上有存储表项) +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no sdt号 +* @return +* @remark 无 +* @see +* @author cq @date 2023/12/04 +************************************************************/ +DPP_STATUS dpp_dtb_hash_online_delete(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no); + +/***********************************************************/ +/** acl index资源申请 +* @param dev NP设备 +* @param sdt_no 流表sdt号(0~255) +* @param vport 端口号 +* @param p_index 申请到的索引值,acl下表时使用 +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/14 +************************************************************/ +DPP_STATUS dpp_dtb_acl_index_request(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 vport, ZXIC_UINT32 *p_index); + +/***********************************************************/ +/** acl index资源释放 +* @param dev NP设备 +* @param sdt_no 流表sdt号(0~255) +* @param vport 端口号 +* @param index 需要释放的索引值 +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/14 +************************************************************/ +DPP_STATUS dpp_dtb_acl_index_release(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 vport, ZXIC_UINT32 index); + +/***********************************************************/ +/** 离线删除与vport关联的acl表项和索引值 +* @param dev NP设备 +* @param queue_id dtb通道队列号(0~127) +* @param sdt_no 流表sdt号(0~255) +* @param vport 端口号 +* @param counter_id 统计编号,对应微码中的address +* @param rd_mode 统计读取方式 0:64bit 1:128bit +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/14 +************************************************************/ +DPP_STATUS dpp_dtb_acl_offline_delete(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 vport, + ZXIC_UINT32 counter_id, + ZXIC_UINT32 rd_mode); + +/***********************************************************/ +/** 统计计数读清 +* @param dev NP设备 +* @param queue_id 队列号 +* @param rd_mode 读取位宽模式,参见STAT_CNT_MODE_E,0-64bit,1-128bit +* @param start_count_id 统计起始编号,对应微码中的address +* @param num 统计项个数 +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/14 +************************************************************/ +DPP_STATUS dpp_dtb_stat_ppu_cnt_clr(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + STAT_CNT_MODE_E rd_mode, + ZXIC_UINT32 start_count_id, + ZXIC_UINT32 num); + +/***********************************************************/ +/** 清除指定vport的对应的stat统计(dtb方式) +* @param dev NP设备 +* @param queue_id 队列号 +* @param sdt_no 流表sdt号(0~255) +* @param vport 端口号 +* @param rd_mode 读取位宽模式,参见STAT_CNT_MODE_E,0-64bit,1-128bit +* @param start_counter_id 统计起始编号,对应微码中的address +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/14 +************************************************************/ +DPP_STATUS dpp_dtb_acl_stat_clr_by_vport(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 vport, + STAT_CNT_MODE_E rd_mode, + ZXIC_UINT32 start_counter_id); + +/***********************************************************/ +/** 消息通道获取pcie bar消息数目 +* @param dev NP设备 +* @param p_bar_msg_num 出参,获取pcie bar数目 +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/16 +************************************************************/ +ZXIC_UINT32 dpp_pcie_bar_msg_num_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_bar_msg_num); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_pbu_api.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_pbu_api.h new file mode 100644 index 000000000000..b712552ea1fb --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_pbu_api.h @@ -0,0 +1,130 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_pbu_api.h +* 文件标识 : pbu模块对外数据类型定义和接口函数声明 +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : djf +* 完成日期 : 2015/02/04 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef _DPP_PBU_API_H_ +#define _DPP_PBU_API_H_ + +#include "dpp_module.h" + +#if ZXIC_REAL("data struct define") +typedef struct dpp_pbu_mc_cos_para_t { + ZXIC_UINT32 mc_cos_th[8]; /**< @brief cos0~7的mc指针阈值*/ + ZXIC_UINT32 mc_cos_mode[8]; /**< @brief cos0~7的mc指针溢出处理模式*/ +} DPP_PBU_MC_COS_PARA_T; + +typedef struct dpp_pbu_port_th_para_t { + ZXIC_UINT32 lif_th; /**< @brief lif阈值*/ + ZXIC_UINT32 lif_prv; /**< @brief lif私有阈值*/ + ZXIC_UINT32 idma_prv; /**< @brief idma私有阈值*/ + ZXIC_UINT32 idma_th_cos0; /**< @brief idma cos0指针阈值*/ + ZXIC_UINT32 idma_th_cos1; /**< @brief idma cos1指针阈值*/ + ZXIC_UINT32 idma_th_cos2; /**< @brief idma cos2指针阈值*/ + ZXIC_UINT32 idma_th_cos3; /**< @brief idma cos3指针阈值*/ + ZXIC_UINT32 idma_th_cos4; /**< @brief idma cos4指针阈值*/ + ZXIC_UINT32 idma_th_cos5; /**< @brief idma cos5指针阈值*/ + ZXIC_UINT32 idma_th_cos6; /**< @brief idma cos6指针阈值*/ + ZXIC_UINT32 idma_th_cos7; /**< @brief idma cos7指针阈值*/ +} DPP_PBU_PORT_TH_PARA_T; + +typedef struct dpp_pbu_port_cos_th_para_t { + ZXIC_UINT32 cos_th + [8]; /**< @brief 各cos对应的阈值,要求高优先级的阈值不小于低优先级的阈值 */ +} DPP_PBU_PORT_COS_TH_PARA_T; + +typedef struct dpp_pbu_global_th_t { + ZXIC_UINT32 idma_public_th; /**< @brief idma总的共有指针*/ + ZXIC_UINT32 lif_public_th; /**< @brief lif总的共有指针*/ + ZXIC_UINT32 idma_total_th; /**< @brief idma总的指针阈值,最大支持16384*/ + ZXIC_UINT32 lif_total_th; /**< @brief lif总的指针阈值,最大支持16384*/ + ZXIC_UINT32 mc_total_th; /**< @brief 组播总的指针阈值*/ +} DPP_PBU_GLOBAL_TH_T; + +#endif //struct + +#if ZXIC_REAL("function declaration") +#if 0 +/***********************************************************/ +/** 全局公共使用指针阈值总体配置 +* @param dev_id 芯片ID 0~3 +* @param p_global_th 参见DPP_PBU_GLOBAL_TH_T详细定义 +* +* @return +* @remark 无 +* @see +* @author pj @date 2019/12/04 +************************************************************/ +DPP_STATUS dpp_pbu_global_th_set(ZXIC_UINT32 dev_id, + DPP_PBU_GLOBAL_TH_T *p_global_th); + +#endif +/***********************************************************/ +/** 配置基于端口的指针阈值 +* @param dev_id 设备编号 +* @param port_id 端口号 +* @param p_para 端口阈值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/07/09 +************************************************************/ +DPP_STATUS dpp_pbu_port_th_set(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + DPP_PBU_PORT_TH_PARA_T *p_para); + +/***********************************************************/ +/** 配置指定端口按cos优先级起pfc流控的优先级流控指针阈值,仅对lif0的48个通道有效 +* @param dev_id 设备编号 +* @param port_id 端口号 +* @param p_para cos阈值,要求高优先级的阈值不小于低优先级的阈值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/07/09 +************************************************************/ +DPP_STATUS dpp_pbu_port_cos_th_set(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + DPP_PBU_PORT_COS_TH_PARA_T *p_para); + +#if 0 +/***********************************************************/ +/** pbu初始化函数 +* @param dev_id 设备编号 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/07/09 +************************************************************/ +DPP_STATUS dpp_pbu_init(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** 配置基于cos的mc复制指针阈值和mc指针溢出处理模式 +* @param dev_id 设备编号 +* @param p_para cos0~7的mc指针阈值和mc指针溢出处理模式 0-wait 1-disc +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/04/14 +************************************************************/ +DPP_STATUS dpp_pbu_mc_cos_para_set(ZXIC_UINT32 dev_id, + DPP_PBU_MC_COS_PARA_T *p_para); +#endif +#endif //function +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_pktrx_api.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_pktrx_api.h new file mode 100644 index 000000000000..5fe4fe852053 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_pktrx_api.h @@ -0,0 +1,86 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_pbu_api.h +* 文件标识 : pbu模块对外数据类型定义和接口函数声明 +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : djf +* 完成日期 : 2015/02/04 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef _DPP_PKTRX_API_H_ +#define _DPP_PKTRX_API_H_ + +#include "zxic_common.h" +#include "dpp_module.h" +#include "dpp_reg.h" + +#define PKTRX_IND_CMD_WRT_FLAG (0) /** 写命令 */ +#define PKTRX_IND_CMD_RD_FLAG (1) /** 读命令 */ + +#define DPP_MCODE_FEATURE_LIST_NUM (6U) + +typedef enum dpp_pktrx_table_mem_id_e { + PHYPORT_TAB_0_MEM_ID = 0, /** 物理端口属性表0 */ + PHYPORT_TAB_1_MEM_ID = 1, /** 物理端口属性表1 */ + PHYPORT_TAB_2_MEM_ID = 2, /** 物理端口属性表2 */ + TCAM_MEM_ID_0 = 3, /** FLOWTCAM表0 */ + TCAM_MEM_ID_1 = 4, /** FLOWTCAM表1 */ + TCAM_RESULT_MEM_ID_0 = 5, /** FLOWTCAM结果表0 */ + TCAM_RESULT_MEM_ID_1 = 6, /** FLOWTCAM结果表1 */ + PKT_CAPTURE_MEM_ID = 7, /** 抓包 */ + MEM_ID_MUX_NUM = 8, /** PKTRX模块使用的内部表的个数*/ +} DPP_PKTRX_TBL_MEM_ID_E; + +typedef struct dpp_pktrx_phyport_udf_table_t { + ZXIC_UINT32 port_based_user_data[4]; /**< @brief 用户自定义表数据 */ +} DPP_PKTRX_PHYPORT_UDF_TABLE_T; + +/***********************************************************/ +/**全局配置寄存器设置 +* @param dev_id +* @param p_mcode_glb_cfg +* +* @return +* @remark 无 +* @see +* @author czd @date 2016/04/27 +************************************************************/ +ZXIC_UINT32 dpp_pktrx_mcode_glb_cfg_set_0(DPP_DEV_T *dev, + ZXIC_UINT32 glb_cfg_data_0); +ZXIC_UINT32 dpp_pktrx_mcode_glb_cfg_set_1(DPP_DEV_T *dev, + ZXIC_UINT32 glb_cfg_data_1); +ZXIC_UINT32 dpp_pktrx_mcode_glb_cfg_set_2(DPP_DEV_T *dev, + ZXIC_UINT32 glb_cfg_data_2); +ZXIC_UINT32 dpp_pktrx_mcode_glb_cfg_set_3(DPP_DEV_T *dev, + ZXIC_UINT32 glb_cfg_data_3); +ZXIC_UINT32 dpp_pktrx_mcode_glb_cfg_get_0(DPP_DEV_T *dev, + ZXIC_UINT32 *p_glb_cfg_data_0); +ZXIC_UINT32 dpp_pktrx_mcode_glb_cfg_get_1(DPP_DEV_T *dev, + ZXIC_UINT32 *p_glb_cfg_data_1); +ZXIC_UINT32 dpp_pktrx_mcode_glb_cfg_get_2(DPP_DEV_T *dev, + ZXIC_UINT32 *p_glb_cfg_data_2); +ZXIC_UINT32 dpp_pktrx_mcode_glb_cfg_get_3(DPP_DEV_T *dev, + ZXIC_UINT32 *p_glb_cfg_data_3); +ZXIC_UINT32 dpp_pktrx_mcode_glb_cfg_write_0(DPP_DEV_T *dev, + ZXIC_UINT32 start_bit_no, + ZXIC_UINT32 end_bit_no, + ZXIC_UINT32 glb_cfg_data_0); +ZXIC_UINT32 dpp_pktrx_mcode_glb_cfg_write_1(DPP_DEV_T *dev, + ZXIC_UINT32 start_bit_no, + ZXIC_UINT32 end_bit_no, + ZXIC_UINT32 glb_cfg_data_1); +DPP_STATUS +dpp_pktrx_udf_table_get(DPP_DEV_T *dev, ZXIC_UINT32 index, + DPP_PKTRX_PHYPORT_UDF_TABLE_T *p_phyport_user_info); + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_ppu_api.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_ppu_api.h new file mode 100644 index 000000000000..4cca265d0093 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_ppu_api.h @@ -0,0 +1,602 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_ppu_api.h +* 文件标识 : +* 内容摘要 : PPU模块对外数据结构和函数声明 +* 其它说明 : +* 当前版本 : +* 作 者 : wcl +* 完成日期 : 2015/02/13 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef _DPP_PPU_API_H_ +#define _DPP_PPU_API_H_ + +#if ZXIC_REAL("header file") +#include "dpp_module.h" + +#endif + +#if ZXIC_REAL("data struct define") + +/** mcode version time */ +typedef struct dpp_version_t { + /*对应微码伪指令 version = expr_a.expr_b.expr_c*/ + ZXIC_UINT32 version_a; /**< @brief expr_a*/ + ZXIC_UINT32 version_b; /**< @brief expr_b*/ + ZXIC_UINT32 version_c; /**< @brief xpr_c*/ + + /*对应微码编译版本的时间 年/月/日/小时*/ + ZXIC_UINT32 year; + ZXIC_UINT32 month; + ZXIC_UINT32 day; + ZXIC_UINT32 hour; +} DPP_VERSION_T; + +/** SDT表数据*/ +typedef struct dpp_sdt_tbl_data_t { + ZXIC_UINT32 data_high32; /**< @brief SDT表数据高32bit*/ + ZXIC_UINT32 data_low32; /**< @brief SDT表数据低32bit*/ +} DPP_SDT_TBL_DATA_T; + +#endif + +#if ZXIC_REAL("interrupt data struct define") +/** PPU 指令RAM ECC错误中断原因*/ +typedef struct dpp_ppu_ram_parity_err_t { + ZXIC_UINT32 + instrmem2_bank3_parity_err; /**< @brief 指令RAM instrmem2 BANK3 parity 错误*/ + ZXIC_UINT32 + instrmem2_bank2_parity_err; /**< @brief 指令RAM instrmem2 BANK2 parity 错误*/ + ZXIC_UINT32 + instrmem2_bank1_parity_err; /**< @brief 指令RAM instrmem2 BANK1 parity 错误*/ + ZXIC_UINT32 + instrmem2_bank0_parity_err; /**< @brief 指令RAM instrmem2 BANK0 parity 错误*/ + ZXIC_UINT32 + instrmem1_bank3_parity_err; /**< @brief 指令RAM instrmem1 BANK3 parity 错误*/ + ZXIC_UINT32 + instrmem1_bank2_parity_err; /**< @brief 指令RAM instrmem1 BANK2 parity 错误*/ + ZXIC_UINT32 + instrmem1_bank1_parity_err; /**< @brief 指令RAM instrmem1 BANK1 parity 错误*/ + ZXIC_UINT32 + instrmem1_bank0_parity_err; /**< @brief 指令RAM instrmem1 BANK0 parity 错误*/ + ZXIC_UINT32 + instrmem0_bank3_parity_err; /**< @brief 指令RAM instrmem0 BANK3 parity 错误*/ + ZXIC_UINT32 + instrmem0_bank2_parity_err; /**< @brief 指令RAM instrmem0 BANK2 parity 错误*/ + ZXIC_UINT32 + instrmem0_bank1_parity_err; /**< @brief 指令RAM instrmem0 BANK1 parity 错误*/ + ZXIC_UINT32 + instrmem0_bank0_parity_err; /**< @brief 指令RAM instrmem0 BANK0 parity 错误*/ +} DPP_PPU_RAM_PARITY_ERR_T; + +/** PPU ME指令RAM ECC错误中断原因*/ +typedef struct dpp_ppu_me_interrupt_t { + ZXIC_UINT32 me7_interrupt_mask; /**< @brief me7core的中断掩码*/ + ZXIC_UINT32 me6_interrupt_mask; /**< @brief me6core的中断掩码*/ + ZXIC_UINT32 me5_interrupt_mask; /**< @brief me5core的中断掩码*/ + ZXIC_UINT32 me4_interrupt_mask; /**< @brief me4core的中断掩码*/ + ZXIC_UINT32 me3_interrupt_mask; /**< @brief me3core的中断掩码*/ + ZXIC_UINT32 me2_interrupt_mask; /**< @brief me2core的中断掩码*/ + ZXIC_UINT32 me1_interrupt_mask; /**< @brief me1core的中断掩码*/ + ZXIC_UINT32 me0_interrupt_mask; /**< @brief me0core的中断掩码*/ + +} DPP_PPU_ME_INTERRUPT_T; + +/** cluster mex的错误中断原因*/ +typedef struct dpp_ppu_cluster_600m_mex_fifo_int_t { + ZXIC_UINT32 + ppu_se_ikey_afifo_underflow; /**< @brief se片内表key fifo读空中断*/ + ZXIC_UINT32 + ppu_se_ekey_afifo_underflow; /**< @brief se片外表key fifo读空中断*/ + ZXIC_UINT32 + ppu_sta_key_afifo_underflow; /**< @brief 统计计数key fifo读空中断*/ + ZXIC_UINT32 + ppu_cluster_mf_in_overflow; /**< @brief metafram接收 fifo满写中断*/ + ZXIC_UINT32 + ppu_ese_rsp_afifo_overflow; /**< @brief se片外表 rsp fifo满写中断*/ + ZXIC_UINT32 + ppu_ise_rsp_afifo_overflow; /**< @brief se片内表 rsp fifo满写中断*/ + ZXIC_UINT32 + ppu_sta_rsp_afifo_overflow; /**< @brief 统计计数 rsp fifo满写中断*/ +} DPP_PPU_CLUSTER_600M_MEX_FIFO_INT_T; + +/** cluster mex的错误中断原因*/ +typedef struct dpp_ppu_cluster_1200m_mex_fifo_int_t { + ZXIC_UINT32 + ppu_se_key_afifo_32x54_wrapper_overflow_flag; /**< @brief ppu_se_key_afifo_32x54_wrapper满写中断掩码*/ + ZXIC_UINT32 + ppu_se_key_afifo_32x665_wrapper_overflow_flag; /**< @brief ppu_se_key_afifo_32x665_wrapper满写中断掩码*/ + ZXIC_UINT32 + ppu_sta_key_afifo_32x110_wrapper_overflow_flag; /**< @brief ppu_sta_key_afifo_32x110_wrapper满写中断掩码*/ + ZXIC_UINT32 + ppu_cluster_mf_in_afifo_16x2048_wrapper_underflow_flag; /**< @brief ppu_cluster_mf_in_afifo_32x2048_wrapper读空中断掩码*/ + ZXIC_UINT32 + ppu_pbu_mcode_pf_rsp_fifo_32x13_wrapper_overflow_flag; /**< @brief ppu_pbu_mcode_pf_rsp_fifo满写中断掩码*/ + ZXIC_UINT32 + ppu_pbu_mcode_pf_rsp_fifo_32x13_wrapper_underflow_flag; /**< @brief ppu_pbu_mcode_pf_rsp_fifo_32x13_wrapper读空中断掩码*/ + ZXIC_UINT32 + ppu_coprocess_rsp_fifo_32x77_wrapper_overflow_flag; /**< @brief ppu_coprocess_rsp_fifo_32x77_wrapper满写中断掩码*/ + ZXIC_UINT32 + ppu_coprocess_rsp_fifo_32x77_wrapper_underflow_flag; /**< @brief ppu_coprocess_rsp_fifo_32x77_wrapper读空中断掩码*/ + ZXIC_UINT32 + ppu_coprocess_rsp_fwft_fifo_128x78_wrapper_overflow_flag; /**< @brief ppu_coprocess_rsp_fwft_fifo_128x78_wrapper满写中断掩码*/ + ZXIC_UINT32 + ppu_coprocess_rsp_fwft_fifo_128x78_wrapper_underflow_flag; /**< @brief ppu_coprocess_rsp_fwft_fifo_128x78_wrapper读空中断掩码*/ + ZXIC_UINT32 + ppu_ese_rsp_afifo_64x271_wrapper_u0_underflow_flag; /**< @brief ppu_ese_rsp_afifo_64x271_wrapper_u0满写中断掩码*/ + + ZXIC_UINT32 + ese_rsp_ram_free_ptr_u0_overflow_flag; /**< @brief ese_rsp_ram_free_ptr_u0满写中断掩码*/ + ZXIC_UINT32 + ese_rsp_ram_free_ptr_u0_underflow_flag; /**< @brief ese_rsp_ram_free_ptr_u0读空中断掩码*/ + ZXIC_UINT32 + ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u0_overflow_flag; /**< @brief ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u0满写中断掩码*/ + ZXIC_UINT32 + ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u0_underflow_flag; /**< @brief ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u0读空中断掩码*/ + ZXIC_UINT32 + ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u1_overflow_flag; /**< @brief ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u1满写中断掩码*/ + ZXIC_UINT32 + ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u1_underflow_flag; /**< @brief ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u1读空中断掩码*/ + ZXIC_UINT32 + ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u2_overflow_flag; /**< @brief ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u2满写中断掩码*/ + ZXIC_UINT32 + ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u2_underflow_flag; /**< @brief ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u2读空中断掩码*/ + ZXIC_UINT32 + ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u3_overflow_flag; /**< @brief ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u3满写中断标记*/ + ZXIC_UINT32 + ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u3_underflow_flag; /**< @brief ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u3读空中断标记*/ + ZXIC_UINT32 + ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u4_overflow_flag; /**< @brief ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u4满写中断标记*/ + ZXIC_UINT32 + ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u4_underflow_flag; /**< @brief ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u4读空中断标记*/ + ZXIC_UINT32 + ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u5_overflow_flag; /**< @brief ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u5满写中断标记*/ + ZXIC_UINT32 + ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u5_underflow_flag; /**< @brief ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u5读空中断标记*/ + ZXIC_UINT32 + ppu_ise_rsp_afifo_64x143_wrapper_u0_underflow_flag; /**< @brief ppu_ise_rsp_afifo_64x143_wrapper_u0读空中断标记*/ + ZXIC_UINT32 + ise_rsp_ram_free_ptr_u0_overflow_flag; /**< @brief ise_rsp_ram_free_ptr_u0满写中断标记 */ + ZXIC_UINT32 + ise_rsp_ram_free_ptr_u0_underflow_flag; /**< @brief ise_rsp_ram_free_ptr_u0读空中断标记*/ + ZXIC_UINT32 + ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u0_overflow_flag; /**< @brief ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u0满写中断标记*/ + ZXIC_UINT32 + ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u0_underflow_flag; /**< @brief ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u0读空中断标记*/ + ZXIC_UINT32 + ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u1_overflow_flag; /**< @brief ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u1满写中断标记*/ + ZXIC_UINT32 + ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u1_underflow_flag; /**< @brief ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u1读空中断标记*/ + ZXIC_UINT32 + ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u2_overflow_flag; /**< @brief ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u2满写中断标记*/ + ZXIC_UINT32 + ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u2_underflow_flag; /**< @brief ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u2读空中断标记*/ + ZXIC_UINT32 + ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u3_overflow_flag; /**< @brief ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u3满写中断标记*/ + ZXIC_UINT32 + ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u3_underflow_flag; /**< @brief ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u3读空中断标记*/ + ZXIC_UINT32 + ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u4_overflow_flag; /**< @brief ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u4满写中断标记*/ + ZXIC_UINT32 + ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u4_underflow_flag; /**< @brief ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u4读空中断标记*/ + ZXIC_UINT32 + ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u5_overflow_flag; /**< @brief ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u5满写中断标记*/ + ZXIC_UINT32 + ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u5_underflow_flag; /**< @brief ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u5读空中断标记*/ + ZXIC_UINT32 + ppu_sta_rsp_afifo_64x79_wrapper_underflow_flag; /**< @brief ppu_sta_rsp_afifo_64x79_wrapper读空中断标记*/ + ZXIC_UINT32 + ppu_sta_rsp_fwft_fifo_128x79_wrapper_overflow_flag; /**< @brief ppu_sta_rsp_fwft_fifo_128x79_wrapper满写中断标记*/ + ZXIC_UINT32 + ppu_sta_rsp_fwft_fifo_128x79_wrapper_underflow_flag; /**< @brief ppu_sta_rsp_fwft_fifo_128x79_wrapper读空中断标记*/ +} DPP_PPU_CLUSTER_1200M_MEX_FIFO_INT_T; + +typedef struct dpp_ppu_ppu_ram_err_t { + ZXIC_UINT32 + dup_freeptr_fwft_fifo_128x7_wrapper_u0_parity_err_flag; /**< @brief dup_freeptr_fwft_fifo_128x7_wrapper_u0_parity_err中断标记*/ + ZXIC_UINT32 + free_global_num_fwft_fifo_8192x13_wrapper_u0_parity_err_flag; /**< @brief free_global_num_fwft_fifo_8192x13_wrapper_u0_parity_err中断标记*/ + ZXIC_UINT32 + ppu_mccnt_fifo_32x15_wrapper_u0_parity_err_flag; /**< @brief ppu_mccnt_fifo_32x15_wrapper_u0_parity_err中断标记*/ + ZXIC_UINT32 + ppu_reorder_link_table_ram_1r1w_8192x13_wrapper_u1_parity_err_flag; /**< @brief ppu_reorder_link_table_ram_1r1w_8192x13_wrapper_u1_parity_err中断标记*/ + ZXIC_UINT32 + ppu_reorder_link_table_ram_1r1w_8192x13_wrapper_u0_parity_err_flag; /**< @brief ppu_reorder_link_table_ram_1r1w_8192x13_wrapper_u0_parity_err中断标记*/ +} DPP_PPU_PPU_RAM_ERR_T; + +typedef struct dpp_ppu_cls_ram_err_600m_t { + ZXIC_UINT32 + ppu_sdt_table_ram_2rw_256x64_wrapper_parity_errb_flag; /**< @brief ppu_sdt_table_ram_2rw_256x64_wrapper_parity_errb中断标记*/ + ZXIC_UINT32 + ppu_sdt_table_ram_2rw_256x64_wrapper_parity_erra_flag; /**< @brief ppu_sdt_table_ram_2rw_256x64_wrapper_parity_errba中断标记*/ +} DPP_PPU_CLS_RAM_ERR_600M_T; + +/** cluster mex的错误中断原因*/ +typedef struct dpp_ppu_cluster_me_fifo_int_t { + ZXIC_UINT32 + me_except_refetch_pc_overflow_flag; /**< @brief me_except_refetch_pc满写中断标记*/ + ZXIC_UINT32 + me_except_refetch_pc_underflow_flag; /**< @brief me_except_refetch_pc读空中断标记*/ + + ZXIC_UINT32 + me_free_pkt_q_overflow_flag; /**< @brief me_free_pkt_q满写中断状态*/ + ZXIC_UINT32 + me_free_pkt_q_underflow_flag; /**< @brief me_free_pkt_q读空中断状态*/ + ZXIC_UINT32 + me_free_thread_q_overflow_flag; /**< @brief me_free_thread_q满写中断状态*/ + ZXIC_UINT32 + me_free_thread_q_underflow_flag; /**< @brief me_free_thread_q读空中断状态*/ + ZXIC_UINT32 me_pkt_in_overflow_flag; /**< @brief me_pkt_in_满写中断状态*/ + ZXIC_UINT32 + me_pkt_in_underflow_flag; /**< @brief me_pkt_in_读空中断状态*/ + ZXIC_UINT32 me_rdy_q_overflow_flag; /**< @brief me_rdy_q满写中断状态*/ + ZXIC_UINT32 me_rdy_q_underflow_flag; /**< @brief me_rdy_q读空中断状态*/ + ZXIC_UINT32 + me_pkt_out_q_overflow_flag; /**< @brief me_pkt_out_q满写中断状态*/ + ZXIC_UINT32 + me_pkt_out_q_underflow_flag; /**< @brief me_pkt_out_q读空中断状态*/ + ZXIC_UINT32 + me_continue_q_overflow_flag; /**< @brief me_continue_q满写中断状态*/ + ZXIC_UINT32 + me_continue_q_underflow_flag; /**< @brief me_continue_q读空中断状态*/ + ZXIC_UINT32 me_esrh_q_overflow_flag; /**< @brief me_esrh_q满写中断状态*/ + ZXIC_UINT32 me_esrh_q_underflow_flag; /**< @brief me_esrh_q读空中断状态*/ + ZXIC_UINT32 me_isrh_q_overflow_flag; /**< @brief me_isrh_q满写中断状态*/ + ZXIC_UINT32 me_isrh_q_underflow_flag; /**< @brief me_isrh_q读空中断状态*/ + ZXIC_UINT32 + me_cache_miss_q_overflow_flag; /**< @brief me_cache_miss_q满写中断状态*/ + ZXIC_UINT32 + me_cache_miss_q_underflow_flag; /**< @brief me_cache_miss_q读空中断状态*/ + ZXIC_UINT32 + me_base_q_u0_overflow_flag; /**< @brief me_base_q_u0满写中断状态*/ + ZXIC_UINT32 + me_base_q_u0_underflow_flag; /**< @brief me_base_q_u0读空中断状态*/ + ZXIC_UINT32 + me_base_q_u1_overflow_flag; /**< @brief me_base_q_u1满写中断状态*/ + ZXIC_UINT32 + me_base_q_u1_underflow_flag; /**< @brief me_base_q_u1读空中断状态*/ + ZXIC_UINT32 + me_base_q_u2_overflow_flag; /**< @brief me_base_q_u2满写中断状态*/ + ZXIC_UINT32 + me_base_q_u2_underflow_flag; /**< @brief me_base_q_u2读空中断状态*/ + ZXIC_UINT32 + me_base_q_u3_overflow_flag; /**< @brief me_base_q_u3满写中断状态*/ + ZXIC_UINT32 + me_base_q_u3_underflow_flag; /**< @brief me_base_q_u3读空中断状态*/ + ZXIC_UINT32 + me_reg_pc_q_overflow_flag; /**< @brief me_reg_pc_q满写中断状态*/ + ZXIC_UINT32 + me_reg_pc_q_underflow_flag; /**< @brief me_reg_pc_q读空中断状态*/ + ZXIC_UINT32 + me_branch_q_overflow_flag; /**< @brief me_branch_q满写中断状态*/ + ZXIC_UINT32 + me_branch_q_underflow_flag; /**< @brief me_branch_q读空中断状态*/ + ZXIC_UINT32 + me_pkt_base_q_overflow_flag; /**< @brief me_pkt_base_q满写中断状态*/ + ZXIC_UINT32 + me_pkt_base_q_underflow_flag; /**< @brief me_pkt_base_q读空中断状态*/ +} DPP_PPU_CLUSTER_ME_FIFO_INT_T; + +typedef struct dpp_ppu_ppu_isu_ppu_demux_fifo_int_t { + ZXIC_UINT32 + isu_in_para_fwft_fifo_32x81_wrapper_u0_overflow_flag; /**< @brief isu_in_para_fwft_fifo_32x81_wrapper_u0_overflow中断标记*/ + ZXIC_UINT32 + isu_in_para_fwft_fifo_32x81_wrapper_u0_underflow_flag; /**< @brief isu_in_para_fwft_fifo_32x81_wrapper_u0_underflow中断标记*/ + ZXIC_UINT32 + isu_in_fifo_64x81_wrapper_u0_overflow_flag; /**< @brief isu_in_fifo_64x81_wrapper_u0_overflow中断标记*/ + ZXIC_UINT32 + isu_in_fifo_64x81_wrapper_u0_underflow_flag; /**< @brief isu_in_fifo_64x81_wrapper_u0_underflow中断标记*/ +} DPP_PPU_PPU_ISU_PPU_DEMUX_FIFO_INT_T; + +typedef struct dpp_ppu_ppu_ppu_multicast_fifo_int_t { + ZXIC_UINT32 + pf_req_fwft_fifo_16x36_wrapper_u0_overflow_flag; /**< @brief pf_req_fwft_fifo_16x36_wrapper_u0_overflow中断标记*/ + ZXIC_UINT32 + pf_req_fwft_fifo_16x36_wrapper_u0_underflow_flag; /**< @brief pf_req_fwft_fifo_16x36_wrapper_u0_underflow中断标记*/ + ZXIC_UINT32 + pf_rsp_fwft_fifo_32x34_wrapper_u0_overflow_flag; /**< @brief pf_rsp_fwft_fifo_32x34_wrapper_u0_overflow中断标记*/ + ZXIC_UINT32 + pf_rsp_fwft_fifo_32x34_wrapper_u0_underflow_flag; /**< @brief pf_rsp_fwft_fifo_32x34_wrapper_u0_underflow中断标记*/ + ZXIC_UINT32 + dup_para_fwft_fifo_16x35_wrapper_u0_overflow_flag; /**< @brief dup_para_fwft_fifo_16x35_wrapper_u0_overflow中断标记*/ + ZXIC_UINT32 + dup_para_fwft_fifo_16x35_wrapper_u0_underflow_flag; /**< @brief dup_para_fwft_fifo_16x35_wrapper_u0_underflow中断标记*/ + ZXIC_UINT32 + se_mc_rsp_fwft_fifo_32x17_wrapper_u0_overflow_flag; /**< @brief se_mc_rsp_fwft_fifo_32x17_wrapper_u0_overflow中断标记*/ + ZXIC_UINT32 + se_mc_rsp_fwft_fifo_32x17_wrapper_u0_underflow_flag; /**< @brief se_mc_rsp_fwft_fifo_32x17_wrapper_u0_underflow中断标记*/ + ZXIC_UINT32 + sa_para_fwft_fifo_64x17_wrapper_u0_overflow_flag; /**< @brief sa_para_fwft_fifo_64x17_wrapper_u0_overflow中断标记*/ + ZXIC_UINT32 + sa_para_fwft_fifo_64x17_wrapper_u0_underflow_flag; /**< @brief sa_para_fwft_fifo_64x17_wrapper_u0_underflow中断标记*/ + ZXIC_UINT32 + group_id_fifo_64x16_wrapper_u0_overflow_flag; /**< @brief group_id_fifo_64x16_wrapper_u0_overflow中断标记*/ + ZXIC_UINT32 + group_id_fifo_64x16_wrapper_u0_underflow_flag; /**< @brief group_id_fifo_64x16_wrapper_u0_underflow中断标记*/ + ZXIC_UINT32 + isu_mc_para_fwft_fifo_128x34_wrapper_u0_overflow_flag; /**< @brief isu_mc_para_fwft_fifo_128x34_wrapper_u0_overflow中断标记*/ + ZXIC_UINT32 + isu_mc_para_fwft_fifo_128x34_wrapper_u0_underflow_flag; /**< @brief isu_mc_para_fwft_fifo_128x34_wrapper_u0_underflow中断标记*/ + ZXIC_UINT32 + dup_freeptr_fwft_fifo_128x7_wrapper_u0_overflow_flag; /**< @brief dup_freeptr_fwft_fifo_128x7_wrapper_u0_overflow中断标记*/ + ZXIC_UINT32 + dup_freeptr_fwft_fifo_128x7_wrapper_u0_underflow_flag; /**< @brief dup_freeptr_fwft_fifo_128x7_wrapper_u0_underflow中断标记*/ + ZXIC_UINT32 + car_flag_fifo_32x1_wrapper_overflow_flag; /**< @brief car_flag_fifo_32x1_wrapper_overflow中断标记*/ + ZXIC_UINT32 + car_flag_fifo_32x1_wrapper_underflow_flag; /**< @brief car_flag_fifo_32x1_wrapper_underflow中断标记*/ +} DPP_PPU_PPU_PPU_MULTICAST_FIFO_INT_T; + +typedef struct dpp_ppu_ppu_ppu_in_schedule_fifo_int_t { + ZXIC_UINT32 + free_global_num_fwft_fifo_8192x13_wrapper_u0_overflow_flag; /**< @brief free_global_num_fwft_fifo_8192x13_满写中断标记*/ + ZXIC_UINT32 + free_global_num_fwft_fifo_8192x13_wrapper_u0_underflow_flag; /**< @brief free_global_num_fwft_fifo_8192x13_空读中断标记*/ + ZXIC_UINT32 + mc_mf_fifo_16x2048_wrapper_u0_overflow_flag; /**< @brief mc_mf_fifo_16x2048_满写中断标记*/ + ZXIC_UINT32 + mc_mf_fifo_16x2048_wrapper_u0_underflow_flag; /**< @brief mc_mf_fifo_16x2048_空读中断标记*/ + ZXIC_UINT32 + uc_mf_fifo_96x2048_wrapper_u0_overflow_flag; /**< @brief uc_mf_fifo_96x2048_满写中断标记*/ + ZXIC_UINT32 + uc_mf_fifo_96x2048_wrapper_u0_underflow_flag; /**< @brief uc_mf_fifo_96x2048_空读中断标记*/ +} DPP_PPU_PPU_PPU_IN_SCHEDULE_FIFO_INT_T; + +typedef struct dpp_ppu_ppu_ppu_mf_out_fifo_int_t { + ZXIC_UINT32 + ppu_cluster5_mf_out_afifo_16x2048_wrapper_overflow_flag; /**< @brief cluster5发送描述符fifo满写中断标记*/ + ZXIC_UINT32 + ppu_cluster5_mf_out_afifo_16x2048_wrapper_underflow_flag; /**< @brief cluster5发送描述符fifo空读中断标记*/ + ZXIC_UINT32 + ppu_cluster4_mf_out_afifo_16x2048_wrapper_overflow_flag; /**< @brief cluster4发送描述符fifo满写中断标记*/ + ZXIC_UINT32 + ppu_cluster4_mf_out_afifo_16x2048_wrapper_underflow_flag; /**< @brief cluster4发送描述符fifo空读中断标记*/ + ZXIC_UINT32 + ppu_cluster3_mf_out_afifo_16x2048_wrapper_overflow_flag; /**< @brief cluster3发送描述符fifo满写中断标记*/ + ZXIC_UINT32 + ppu_cluster3_mf_out_afifo_16x2048_wrapper_underflow_flag; /**< @brief cluster3发送描述符fifo空读中断标记*/ + ZXIC_UINT32 + ppu_cluster2_mf_out_afifo_16x2048_wrapper_overflow_flag; /**< @brief cluster2发送描述符fifo满写中断标记*/ + ZXIC_UINT32 + ppu_cluster2_mf_out_afifo_16x2048_wrapper_underflow_flag; /**< @brief cluster2发送描述符fifo空读中断标记*/ + ZXIC_UINT32 + ppu_cluster1_mf_out_afifo_16x2048_wrapper_overflow_flag; /**< @brief cluster1发送描述符fifo满写中断标记*/ + ZXIC_UINT32 + ppu_cluster1_mf_out_afifo_16x2048_wrapper_underflow_flag; /**< @brief cluster1发送描述符fifo空读中断标记*/ + ZXIC_UINT32 + ppu_cluster0_mf_out_afifo_16x2048_wrapper_overflow_flag; /**< @brief cluster0发送描述符fifo满写中断标记*/ + ZXIC_UINT32 + ppu_cluster0_mf_out_afifo_16x2048_wrapper_underflow_flag; /**< @brief cluster0发送描述符fifo空读中断标记*/ +} DPP_PPU_PPU_PPU_MF_OUT_FIFO_INT_T; + +typedef struct dpp_ppu_ppu_pbu_mcode_pf_req_schedule_fifo_int_t { + ZXIC_UINT32 + ppu_cluster5_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_flag; /**< @brief cluster5指针预取fifo满写中断标记*/ + ZXIC_UINT32 + ppu_cluster4_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_flag; /**< @brief cluster4指针预取fifo满写中断标记*/ + ZXIC_UINT32 + ppu_cluster3_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_flag; /**< @brief cluster3指针预取fifo满写中断标记*/ + ZXIC_UINT32 + ppu_cluster2_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_flag; /**< @brief cluster2指针预取fifo满写中断标记*/ + ZXIC_UINT32 + ppu_cluster1_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_flag; /**< @brief cluster1指针预取fifo满写中断标记*/ + ZXIC_UINT32 + ppu_cluster0_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_flag; /**< @brief cluster0指针预取fifo满写中断标记*/ + ZXIC_UINT32 + ppu_cluster5_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_flag; /**< @brief cluster5指针预取fifo空读中断标记*/ + ZXIC_UINT32 + ppu_cluster4_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_flag; /**< @brief cluster4指针预取fifo空读中断标记*/ + ZXIC_UINT32 + ppu_cluster3_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_flag; /**< @brief cluster3指针预取fifo空读中断标记*/ + ZXIC_UINT32 + ppu_cluster2_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_flag; /**< @brief cluster2指针预取fifo空读中断标记*/ + ZXIC_UINT32 + ppu_cluster1_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_flag; /**< @brief cluster1指针预取fifo空读中断标记*/ + ZXIC_UINT32 + ppu_cluster0_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_flag; /**< @brief cluster0指针预取fifo空读中断标记*/ +} DPP_PPU_PPU_PBU_MCODE_PF_REQ_SCHEDULE_FIFO_INT_T; + +typedef struct dpp_ppu_ppu_pbu_mcode_pf_rsp_schedule_fifo_int_t { + ZXIC_UINT32 + ppu_pbu_mcode_pf_rsp_afifo_64x16_wrapper_u0r_underflow_flag; /**< @brief ppu微码申请指针返回fifo空读中断标记*/ + ZXIC_UINT32 + ppu_pbu_mcode_pf_rsp_afifo_64x16_wrapper_u0_overflow_flag; /**< @brief ppu微码申请指针返回fifo满写中断标记*/ +} DPP_PPU_PPU_PBU_MCODE_PF_RSP_SCHEDULE_FIFO_INT_T; + +typedef struct dpp_ppu_ppu_ppu_mccnt_fifo_int_t { + ZXIC_UINT32 + ppu_mccnt_fifo_32x15_wrapper_u0_overflow_flag; /**< @brief ppu_mccnt_fifo_32x15_wrapper_u0_overflow中断标记*/ + ZXIC_UINT32 + ppu_mccnt_fifo_32x15_wrapper_u0_underflow_flag; /**< @brief ppu_mccnt_fifo_32x15_wrapper_u0_underflow中断标记*/ + ZXIC_UINT32 + ppu_wb_data_fifo_32x2048_wrapper_u0_overflow_flag; /**< @brief ppu_wb_data_fifo_32x2048_wrapper_u0_overflow中断标记*/ + ZXIC_UINT32 + ppu_wb_data_fifo_32x2048_wrapper_u0_underflow_flag; /**< @brief ppu_wb_data_fifo_32x2048_wrapper_u0_underflow中断标记*/ + ZXIC_UINT32 + mccnt_rsp_fifo_32x1_wrapper_u0_overflow_flag; /**< @brief mccnt_rsp_fifo_32x1_wrapper_u0_overflow中断标记*/ + ZXIC_UINT32 + mccnt_rsp_fifo_32x1_wrapper_u0_underflow_flag; /**< @brief mccnt_rsp_fifo_32x1_wrapper_u0_underflow中断标记*/ +} DPP_PPU_PPU_PPU_MCCNT_FIFO_INT_T; + +typedef struct dpp_ppu_ppu_coprocessor_fifo_int_t { + ZXIC_UINT32 + ppu_cop_random_mod_para_delay_fifo_48x16_wrapper_overflow_flag; /**< @brief 协处理random_mod参数fifo错误满写中断标记*/ + ZXIC_UINT32 + ppu_cop_random_mod_para_delay_fifo_48x16_wrapper_underflow_flag; /**< @brief 协处理random_mod参数fifo错误空读中断标记*/ + + ZXIC_UINT32 + ppu_cop_result_fwft_fifo_80x80_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + ppu_cop_result_fwft_fifo_80x80_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + ppu_cop_delay_fifo_48x16_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + ppu_cop_delay_fifo_48x16_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + ppu_cop_delay_fifo_16x48_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + ppu_cop_delay_fifo_16x48_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + ppu_cop_delay_fifo_16x32_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + ppu_cop_delay_fifo_16x32_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + ppu_cop_result_fwft_fifo_96x80_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + ppu_cop_result_fwft_fifo_96x80_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + ppu_cop_delay_fifo_16x16_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + ppu_cop_delay_fifo_16x16_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + ppu_cop_result_fwft_fifo_32x80_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + ppu_cop_result_fwft_fifo_32x80_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + ppu_cop_result_fwft_fifo_16x80_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + ppu_cop_result_fwft_fifo_16x80_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec5_cop_key_crc_fifo_32x625_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec5_cop_key_crc_fifo_32x625_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec5_cop_key_checksum_fifo_32x180_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec5_cop_key_checksum_fifo_32x180_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec5_cop_key_mul_fifo_32x52_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec5_cop_key_mul_fifo_32x52_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec5_cop_key_random_mod_fifo_32x44_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec5_cop_key_random_mod_fifo_32x44_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec4_cop_key_crc_fifo_32x625_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec4_cop_key_crc_fifo_32x625_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec4_cop_key_checksum_fifo_32x180_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec4_cop_key_checksum_fifo_32x180_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec4_cop_key_mul_fifo_32x52_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec4_cop_key_mul_fifo_32x52_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec4_cop_key_random_mod_fifo_32x44_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec4_cop_key_random_mod_fifo_32x44_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + + ZXIC_UINT32 + mec3_cop_key_crc_fifo_32x625_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec3_cop_key_crc_fifo_32x625_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec3_cop_key_checksum_fifo_32x180_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec3_cop_key_checksum_fifo_32x180_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec3_cop_key_mul_fifo_32x52_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec3_cop_key_mul_fifo_32x52_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec3_cop_key_random_mod_fifo_32x44_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec3_cop_key_random_mod_fifo_32x44_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec2_cop_key_crc_fifo_32x625_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec2_cop_key_crc_fifo_32x625_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec2_cop_key_checksum_fifo_32x180_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec2_cop_key_checksum_fifo_32x180_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec2_cop_key_mul_fifo_32x52_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec2_cop_key_mul_fifo_32x52_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec2_cop_key_random_mod_fifo_32x44_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec2_cop_key_random_mod_fifo_32x44_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec1_cop_key_crc_fifo_32x625_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec1_cop_key_crc_fifo_32x625_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec1_cop_key_checksum_fifo_32x180_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec1_cop_key_checksum_fifo_32x180_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec1_cop_key_mul_fifo_32x52_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec1_cop_key_mul_fifo_32x52_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec1_cop_key_random_mod_fifo_32x44_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec1_cop_key_random_mod_fifo_32x44_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec0_cop_key_crc_fifo_32x625_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec0_cop_key_crc_fifo_32x625_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec0_cop_key_checksum_fifo_32x180_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec0_cop_key_checksum_fifo_32x180_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec0_cop_key_mul_fifo_32x52_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec0_cop_key_mul_fifo_32x52_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec0_cop_key_random_mod_fifo_32x44_wrapper_overflow_flag; /**< @brief fifo错误中断标记*/ + ZXIC_UINT32 + mec0_cop_key_random_mod_fifo_32x44_wrapper_underflow_flag; /**< @brief fifo错误中断标记*/ +} DPP_PPU_PPU_COPROCESSOR_FIFO_INT_T; + +typedef struct dpp_ppu_ppu_cos_meter_cfg_t { + ZXIC_UINT32 cbs; /**< @brief 0~7队列CBS配置*/ + ZXIC_UINT32 pbs; /**< @brief 0~7队列PBS配置*/ + ZXIC_UINT32 + green_action; /**< @brief 0~7队列绿色报文动作配置,关闭CAR使能的时候,需要同时将该动作置为1*/ + ZXIC_UINT32 yellow_action; /**< @brief 0~7队列黄色报文动作配置*/ + ZXIC_UINT32 red_action; /**< @brief 0~7队列红色报文动作配置*/ + ZXIC_UINT32 + cir; /**< @brief 0~7队列CIR配置,单位Mpps,0x24a对应600Mpps,按照线性变化*/ + ZXIC_UINT32 + pir; /**< @brief 0~7队列PIR配置,单位Mpps,0x24a对应600Mpps,按照线性变化*/ + ZXIC_UINT32 car_en; /**< @brief 0~7队列CAR使能配置*/ +} DPP_PPU_PPU_COS_METER_CFG_T; + +#endif + +/***********************************************************/ +/** 配置SDT表 +* @param dev_id 设备号,范围0~3 +* @param cluster_id me cluster编号,范围0~7 +* @param index 地址,即sdt表号,范围0~255 +* @param p_sdt_data sdt表数据 +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/03/18 +************************************************************/ +DPP_STATUS dpp_ppu_sdt_tbl_write(DPP_DEV_T *dev, ZXIC_UINT32 cluster_id, + ZXIC_UINT32 index, + DPP_SDT_TBL_DATA_T *p_sdt_data); + +DPP_STATUS dpp_ppu_set_debug_mode(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *dbg_status); +DPP_STATUS dpp_ppu_close_debug_mode(DPP_PF_INFO_T *pf_info); +#endif /* dpp_ppu_api.h */ diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_reg_api.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_reg_api.h new file mode 100644 index 000000000000..36e42160f1b5 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_reg_api.h @@ -0,0 +1,191 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_reg.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : wcl +* 完成日期 : 2014/02/12 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef _DPP_REG_API_H_ +#define _DPP_REG_API_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "dpp_dev.h" +#include "dpp_reg_struct.h" + +/** 所有上层模块不需要直接使用该全局变量*/ +extern DPP_REG_T g_dpp_reg_info[]; + +/** public*/ +#define DPP_REG(no) (g_dpp_reg_info[no]) +#define DPP_REG_NAME(no) ((DPP_REG(no)).reg_name) +#define DPP_REG_NO(no) ((DPP_REG(no)).reg_no) +#define DPP_REG_MODULE_NO(no) ((DPP_REG(no)).module_no) +#define DPP_REG_FLAGS(no) ((DPP_REG(no)).flags) +#define DPP_REG_TYPE(no) ((DPP_REG(no)).array_type) +#define DPP_REG_ADDR(no) ((DPP_REG(no)).addr) +#define DPP_REG_WIDTH(no) ((DPP_REG(no)).width) +#define DPP_REG_M_SIZE(no) ((DPP_REG(no)).m_size) +#define DPP_REG_N_SIZE(no) ((DPP_REG(no)).n_size) +#define DPP_REG_M_STEP(no) ((DPP_REG(no)).m_step) +#define DPP_REG_N_STEP(no) ((DPP_REG(no)).n_step) +#define DPP_REG_FIELD_NUM(no) ((DPP_REG(no)).field_num) +#define DPP_REG_FIELD_NAME(no, field_no) \ + (((DPP_REG(no)).p_fields + field_no)->p_name) + +typedef enum dpp_bar_4k_e { + BAR_4K_DTB = 0, /**< @brief 0*/ + BAR_4K_ETCAM, /**< @brief 1*/ + BAR_4K_CLS0, /**< @brief 2*/ + BAR_4K_CLS1, /**< @brief 3*/ + BAR_4K_CLS2, /**< @brief 4*/ + BAR_4K_CLS3, /**< @brief 5*/ + BAR_4K_CLS4, /**< @brief 6*/ + BAR_4K_CLS5, /**< @brief 7*/ + BAR_4K_SE, /**< @brief 8*/ + BAR_4K_SMMU1, /**< @brief 9*/ + BAR_4K_MAX +} DPP_BAR_4K_E; + +typedef struct { + ZXIC_UINT32 reg_module; /*DPP_MODULE_E*/ + ZXIC_UINT32 index_4k; /*BAR NP空间映射4K相对索引*/ + ZXIC_UINT32 addr_offset; /*寄存器偏移,用于计算BAR映射空间位置*/ +} DPP_REG_OFFSET_ADDR; + +/***********************************************************/ +/** 获取寄存器属性 +* @param reg_no 寄存器编号 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 王春雷 @date 2014/04/17 +************************************************************/ +DPP_REG_T *dpp_reg_info_get(ZXIC_UINT32 reg_no); + +/***********************************************************/ +/** 通用寄存器写函数 +* @param dev_id 设备号,支持多芯片 +* @param reg_no 寄存器编号 +* @param m_offset 二元寄存器的m偏移 +* @param n_offset 一元寄存器或二元寄存器的n偏移 +* @param p_data 数据指针 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 王春雷 @date 2014/02/12 +************************************************************/ +DPP_STATUS dpp_reg_write(DPP_DEV_T *dev, ZXIC_UINT32 reg_no, + ZXIC_UINT32 m_offset, ZXIC_UINT32 n_offset, + ZXIC_VOID *p_data); + +/***********************************************************/ +/** 通用寄存器读函数 +* @param dev_id 设备号,支持多芯片 +* @param reg_no 寄存器编号 +* @param m_offset 二元寄存器的m偏移 +* @param n_offset 一元寄存器或二元寄存器的n偏移 +* @param p_data 数据指针 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 王春雷 @date 2014/02/12 +************************************************************/ +DPP_STATUS dpp_reg_read(DPP_DEV_T *dev, ZXIC_UINT32 reg_no, + ZXIC_UINT32 m_offset, ZXIC_UINT32 n_offset, + ZXIC_VOID *p_data); + +/***********************************************************/ +/** 根据寄存器编号获得寄存器芯片内绝对地址 +* @param reg_no +* @param m_offset +* @param n_offset +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/03/19 +************************************************************/ +ZXIC_UINT32 dpp_reg_get_reg_addr(ZXIC_UINT32 reg_no, ZXIC_UINT32 m_offset, + ZXIC_UINT32 n_offset); + +/***********************************************************/ +/** 通过寄存器编号配置寄存器,仅适用于32bit位宽 + 的常规寄存器 +* @param dev_id 设备号 +* @param reg_no 寄存器编号 +* @param m_offset 二元寄存器的m偏移 +* @param n_offset 一元寄存器或二元寄存器的n偏移 +* @param data 数据,32bit +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 王春雷 @date 2015/01/21 +************************************************************/ +DPP_STATUS dpp_reg_write32(DPP_DEV_T *dev, ZXIC_UINT32 reg_no, + ZXIC_UINT32 data); + +/***********************************************************/ +/** 通过寄存器编号读取寄存器的值,仅适用于32bit位宽的常规寄存器 +* @param dev_id 设备号 +* @param reg_no 寄存器编号 +* @param m_offset 二元寄存器的m偏移 +* @param n_offset 一元寄存器或二元寄存器的n偏移 +* @param p_data 出参,返回读取寄存器数值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 石金锋 @date 2015/03/09 +************************************************************/ +DPP_STATUS dpp_reg_read32(DPP_DEV_T *dev, ZXIC_UINT32 reg_no, + ZXIC_UINT32 m_offset, ZXIC_UINT32 n_offset, + ZXIC_UINT32 *p_data); +/***********************************************************/ +/** 判断是否为4K寄存器 +* @param reg_module +* +* @return +* @remark 无 +* @see +* @author cq @date 2023/11/29 +************************************************************/ +BOOLEAN dpp_4k_reg(ZXIC_UINT32 reg_module); +/***********************************************************/ +/** 获取NP对应模块的映射地址偏移(riscv或者非4K寄存器不做转换,host根据映射情况做转换) +* @param dev_id +* @param reg_module +* @param flags 标志位,DPP_REG_FLAG_INDIRECT DPP_REG_FLAG_DIRECT +* @param addr +* +* @return 映射地址 +* @remark 无 +* @see +* @author cq @date 2023/11/29 +************************************************************/ +ZXIC_UINT32 dpp_reg_addr_convert(ZXIC_UINT32 dev_id, ZXIC_UINT32 reg_module, + ZXIC_UINT32 flags, ZXIC_UINT32 addr); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_se_api.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_se_api.h new file mode 100644 index 000000000000..ccc357a1a228 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_se_api.h @@ -0,0 +1,1466 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_se_api.h +* 文件标识 : se模块对外数据类型定义和接口函数声明 +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : wcl +* 完成日期 : 2015/01/30 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ +#ifndef _DPP_SE_API_H_ +#define _DPP_SE_API_H_ + +#if ZXIC_REAL("header file") +#include "dpp_type_api.h" /* xcx_0619 */ +#endif + +#if ZXIC_REAL("data struct define") + +/** DPP+ 所有eram block 大小都变为2M ~深度是16K */ +#define SE_SMMU0_ERAM_BLOCK_NUM (32) +#define SE_SMMU0_ERAM_ADDR_NUM_PER_BLOCK (0x4000) +#define SE_SMMU0_ERAM_ADDR_NUM_TOTAL \ + (SE_SMMU0_ERAM_BLOCK_NUM * SE_SMMU0_ERAM_ADDR_NUM_PER_BLOCK) + +#define DPP_DDR4_PER_BANK_BADDR_SETP (0x800) /* 8G X16 */ +#define DPP_DDR4_BANK_NUM (64) +#define DPP_DDR4_PER_BANK_BADDR_CONV(ddr4_phy_type) \ + (DPP_DDR4_PER_BANK_BADDR_SETP * (4U >> ddr4_phy_type)) + +/*普通上送上送类型*/ +typedef enum dpp_dma_ecc_en_e { + DMA_ECC_DISABLE = 0, + DMA_ECC_ENABLE = 1, +} DPP_DMA_ECC_EN_E; + +/** eRam表读请模式 */ +typedef enum dpp_eram128_rd_clr_mode_e { + RD_MODE_HOLD = 0, /**< @brief 正常读,读完数据不清空 */ + RD_MODE_CLEAR = 1, /**< @brief 读清模式,返回eram中的值 */ +} DPP_ERAM128_RD_CLR_MODE_E; + +/** eRam表cpu读写位宽模式*/ +typedef enum dpp_eram128_opr_mode_e { + ERAM128_OPR_128b = + 0, /**< @brief 128bit模式读写eRam, 支持128bit读清模式*/ + ERAM128_OPR_64b = + 1, /**< @brief 64bit模式读写eRam, 支持64bit 读清模式*/ + ERAM128_OPR_1b = + 2, /**< @brief 1bit模式读写eRam, 不支持1bit 读清模式*/ + ERAM128_OPR_32b = + 3 /**< @brief 32bit模式读eRam, 不能用于写,支持32bit 读清模式*/ +} DPP_ERAM128_OPR_MODE_E; + +/** eRam直接表位宽模式,也可用于eRam做结果表时的位宽 */ +typedef enum dpp_eram128_tbl_mode_e { + ERAM128_TBL_1b = 0, /**< @brief 1bit eRam片内直接表 */ + ERAM128_TBL_32b = 1, /**< @brief 32bit eRam片内直接表 */ + ERAM128_TBL_64b = 2, /**< @brief 64bit eRam片内直接表 */ + ERAM128_TBL_128b = 3, /**< @brief 128bit eRam片内直接表 */ + ERAM128_TBL_2b = 4, /**< @brief 2bit eRam片内直接表 */ + ERAM128_TBL_4b = 5, /**< @brief 4bit eRam片内直接表 */ + ERAM128_TBL_8b = 6, /**< @brief 8bit eRam片内直接表 */ + ERAM128_TBL_16b = 7 /**< @brief 16bit eRam片内直接表 */ +} DPP_ERAM128_TBL_MODE_E; + +/** eRam查表通道*/ +typedef enum smmu0_empty_type_e { + SMMU0_EMPTY_CLS0 = 0, /**< @brief PPU cluster0通道*/ + SMMU0_EMPTY_CLS1 = 1, /**< @brief PPU cluster1通道*/ + SMMU0_EMPTY_CLS2 = 2, /**< @brief PPU cluster2通道*/ + SMMU0_EMPTY_CLS3 = 3, /**< @brief PPU cluster3通道*/ + SMMU0_EMPTY_CLS4 = 4, /**< @brief PPU cluster4通道*/ + SMMU0_EMPTY_CLS5 = 5, /**< @brief PPU cluster5通道*/ + SMMU0_EMPTY_CLS6 = 6, /**< @brief PPU cluster6通道*/ + SMMU0_EMPTY_CLS7 = 7, /**< @brief PPU cluster7通道*/ + SMMU0_EMPTY_MCAST = 8, /**< @brief 组播复制表通道*/ + SMMU0_EMPTY_ODMA = 9, /**< @brief ODMA保序计数通道*/ +} SMMU0_EMPTY_TYPE_E; + +/** 组播报文类型*/ +typedef enum smmu0_mcast_tbl_type_e { + MCAST_TDM = 0, /**< @brief TDM组播报文*/ + MCAST_DATA = 1, /**< @brief 数据组播报文*/ + + MCAST_INVALID, /**< @brief 无效值 */ +} SMMU0_MCAST_TBL_TYPE_E; + +/** 调试计数读清模式 */ +typedef enum se_dbg_cnt_read_mode_e { + SE_DBG_CNT_READ_UNCLR = 0, /**< @brief 非读清模式 */ + SE_DBG_CNT_READ_CLR, /**< @brief 读清模式 */ +} SE_DBG_CNT_READ_MODE_E; + +/** 调试计数溢出模式 */ +typedef enum se_dbg_cnt_overflow_mode_e { + SE_DBG_CNT_OVERFLOW_UNREVERSE = 0, /**< @brief 溢出保持 */ + SE_DBG_CNT_OVERFLOW_REVERSE, /**< @brief 溢出翻转 */ +} SE_DBG_CNT_OVERFLOW_MODE_E; + +/** DDR颗粒类型*/ +typedef enum se_ddr_phy_cfg_type_e { + DPP_SE_DDR_PHY_32G_16 = 0, /**< @brief DDR颗粒每组32G容量,x16颗粒*/ + DPP_SE_DDR_PHY_16G_16, /**< @brief DDR颗粒每组16G容量,x16颗粒*/ + DPP_SE_DDR_PHY_8G_16, /**< @brief DDR颗粒每组8G容量,x16颗粒*/ + DPP_SE_DDR_PHY_4G_16, /**< @brief DDR颗粒每组4G容量,x16颗粒*/ + DPP_SE_DDR_PHY_32G_8, /**< @brief DDR颗粒每组32G容量,x8颗粒*/ + DPP_SE_DDR_PHY_16G_8, /**< @brief DDR颗粒每组16G容量,x8颗粒*/ + DPP_SE_DDR_PHY_8G_8, /**< @brief DDR颗粒每组8G容量,x8颗粒*/ + DPP_SE_DDR_PHY_4G_8, /**< @brief DDR颗粒每组4G容量,x8颗粒*/ + DPP_SE_DDR_PHY_MAX, +} SE_DDR_PHY_CFG_TYPE_E; + +/** DDR表写数据位宽模式*/ +typedef enum smmu1_ddr_wrt_mode_e { + SMMU1_DDR_WRT_128b = 0, /**< @brief 128bit模式写DDR*/ + SMMU1_DDR_WRT_256b = 1, /**< @brief 256bit模式写DDR*/ + SMMU1_DDR_WRT_384b = 2, /**< @brief 384bit模式写DDR*/ + SMMU1_DDR_WRT_512b = 3, /**< @brief 512bit模式写DDR*/ +} SMMU1_DDR_WRT_MODE_E; + +/** DDR表的共享模式 */ +typedef enum smmu1_ddr_share_mode_e { + SMMU1_DDR_SHARE_NO_SHARE = 0, /**< @brief SE 占用整个DDR*/ + SMMU1_DDR_SHARE_1_2 = 1, /**< @brief SE 占用1/2 DDR*/ + SMMU1_DDR_SHARE_1_4 = 2, /**< @brief SE 占用1/4 DDR*/ + SMMU1_DDR_SHARE_1_8 = 3, /**< @brief SE 占用1/8 DDR*/ + SMMU1_DDR_SHARE_MAX +} SMMU1_DDR_SHARE_MODE_E; + +/** 占用smmu1的表类型 */ +typedef enum smmu1_ddr_tbl_type_e { + SMMU1_DDR_TBL_TYPE_DIR_TBL = 0, /**< @brief 片外直接表 */ + SMMU1_DDR_TBL_TYPE_HASH_TBL = 1, /**< @brief HASH占用表 */ + SMMU1_DDR_TBL_TYPE_LPM_TBL = 2, /**< @brief LPM 占用表 */ + SMMU1_DDR_TBL_TYPE_OAM_TBL = 3, /**< @brief OAM 占用表 */ + SMMU1_DDR_TBL_TYPE_FTM_TBL = 4, /**< @brief FTM 占用表 */ + SMMU1_DDR_TBL_TYPE_ETM_TBL = 5, /**< @brief ETM 占用表 */ + SMMU1_DDR_TBL_TYPE_MAX +} SMMU1_DDR_TBL_TYPE_E; + +typedef enum cmmu_rd_mode_e { + CMMU_RD_MODE_29_35 = 0, + CMMU_RD_MODE_32 = 1, + CMMU_RD_MODE_64 = 2, + CMMU_RD_MODE_128 = 3, + CMMU_RD_MODE_MAX, +} CMMU_RD_OPR_MODE_E; + +/** CMMU读请模式 */ +typedef enum cmmu_rd_clr_mode_e { + CMMU_RD_CLR_MODE_UNCLR = 0, /**< @brief 正常读,读完数据不清空 */ + CMMU_RD_CLR_MODE_CLR = 1, /**< @brief 读清模式*/ + CMMU_RD_CLR_MODE_MAX, +} CMMU_RD_CLR_MODE_E; + +/** DDR表读数据位宽模式*/ +typedef enum smmu1_ddr_srh_mode_e { + SMMU1_DDR_SRH_128b = 0, /**< @brief 访问DDR,以128bit模式返回数据*/ + SMMU1_DDR_SRH_256b, /**< @brief 访问DDR,以256bit模式返回数据*/ + SMMU1_DDR_SRH_512b, /**< @brief 访问DDR,以512bit模式返回数据*/ + SMMU1_DDR_SRH_MAX +} SMMU1_DDR_SRH_MODE_E; + +/** DDR地址映射参数*/ +typedef struct se_ddr_addr_map_info_t { + ZXIC_UINT32 + is_inited; /**< @brief 用于检测此结构体是否已经经过初始化 0xAA-已经初始化 其他值-未初始化 */ + ZXIC_UINT32 + ddr_phy_type; /**< @brief DDR颗粒类型 0-32G,x16颗粒,1-16G,x16颗粒,2-8G,x16颗粒 */ + ZXIC_UINT32 se_use_bank_num; /**< @brief SE使用的bank数目 */ + ZXIC_UINT32 bank_used_sate + [64]; /**< @brief 表示64个bank的使用状态 0-不存在 1-SE使用 2-PUB使用 3-FTM使用 4-ETM使用 */ +} SE_DDR_ADDR_MAP_INFO_T; + +/** se从ppu接收的查表请求排空状态*/ +typedef struct dpp_se_ept_flag_t { + ZXIC_UINT32 + ppu5_ept_flag; /**< @brief se从ppu通道5接收的查表请求排空状态*/ + ZXIC_UINT32 + ppu4_ept_flag; /**< @brief se从ppu通道4接收的查表请求排空状态*/ + ZXIC_UINT32 + ppu3_ept_flag; /**< @brief se从ppu通道3接收的查表请求排空状态*/ + ZXIC_UINT32 + ppu2_ept_flag; /**< @brief se从ppu通道2接收的查表请求排空状态*/ + ZXIC_UINT32 + ppu1_ept_flag; /**< @brief se从ppu通道1接收的查表请求排空状态*/ + ZXIC_UINT32 + ppu0_ept_flag; /**< @brief se从ppu通道0接收的查表请求排空状态*/ +} DPP_SE_EPT_FLAG_T; + +/** smmu0(eRam)各通道查表请求排空状态*/ +typedef struct dpp_smmu0_ept_flag_t { + ZXIC_UINT32 ept_flag8; /**< @brief odam请求排空状态*/ + ZXIC_UINT32 ept_flag7; /**< @brief odam tdm请求排空状态*/ + ZXIC_UINT32 ept_flag6; /**< @brief sa组播份数表请求排空状态*/ + ZXIC_UINT32 ept_flag5; /**< @brief ppu通道5查表请求排空状态*/ + ZXIC_UINT32 ept_flag4; /**< @brief ppu通道4查表请求排空状态*/ + ZXIC_UINT32 ept_flag3; /**< @brief ppu通道3查表请求排空状态*/ + ZXIC_UINT32 ept_flag2; /**< @brief ppu通道2查表请求排空状态*/ + ZXIC_UINT32 ept_flag1; /**< @brief ppu通道1查表请求排空状态*/ + ZXIC_UINT32 ept_flag0; /**< @brief ppu通道0查表请求排空状态*/ +} DPP_SMMU0_EPT_FLAG_T; + +/** SMMU0保序FIFO写满读空中断原因*/ +typedef struct dpp_se_smmu0_int0_t { + ZXIC_UINT32 dma_ordfifo; /**< @brief DMA保序fifo 写满读空标志*/ + ZXIC_UINT32 odma_ordfifo; /**< @brief ODMA保序fifo 写满读空标志*/ + ZXIC_UINT32 mcast_ordfifo; /**< @brief MCAST保序fifo 写满读空标志*/ +} DPP_SE_SMMU0_INT0_T; + +/* 数据组播报文类型 */ +typedef struct data_mcast_t_ { + ZXIC_UINT8 valid; /**< @brief valid 表示是否可用 */ + ZXIC_UINT8 rsv; /**< @brief rsv 保留位,字节对齐使用,勿需关心 */ + ZXIC_UINT16 mc_cnt; /**< @brief mc_cnt 16bit */ +} DATA_MCAST_T; + +/* TDM 组播报文类型 */ +typedef struct tdm_mcast_t_ { + ZXIC_UINT8 valid; /**< @brief valid 表示是否可用 */ + ZXIC_UINT8 rsv; /**< @brief rsv 保留位,字节对齐使用,勿需关心 */ + ZXIC_UINT8 bitmap[6]; /**< @brief bitmap[0]是最高位,bitmap[2]是最低位 */ +} TDM_MCAST_T; + +#endif + +#if ZXIC_REAL("eTcam data struct define") + +#endif + +#if ZXIC_REAL("interrupt data struct define") +/** SMMU0 中断状态 */ +typedef struct dpp_se_smmu0_int_t { + ZXIC_UINT32 smmu0_int0; + ZXIC_UINT32 smmu0_int1; + ZXIC_UINT32 smmu0_int2; + ZXIC_UINT32 smmu0_int3; + ZXIC_UINT32 smmu0_int4; + ZXIC_UINT32 smmu0_int5; + ZXIC_UINT32 smmu0_int6; + ZXIC_UINT32 smmu0_int7; + ZXIC_UINT32 smmu0_int8; + ZXIC_UINT32 smmu0_int9; + ZXIC_UINT32 smmu0_int10; + ZXIC_UINT32 smmu0_int11; + ZXIC_UINT32 smmu0_int12; + ZXIC_UINT32 smmu0_int13; + ZXIC_UINT32 smmu0_int14; + ZXIC_UINT32 smmu0_int15; + ZXIC_UINT32 smmu0_int16; + ZXIC_UINT32 smmu0_int17; + ZXIC_UINT32 smmu0_int18; + ZXIC_UINT32 smmu0_int19; + ZXIC_UINT32 smmu0_int20; + ZXIC_UINT32 smmu0_int21; + ZXIC_UINT32 smmu0_int22; + ZXIC_UINT32 smmu0_int23; + ZXIC_UINT32 smmu0_int24; + ZXIC_UINT32 smmu0_int25; + ZXIC_UINT32 smmu0_int26; + ZXIC_UINT32 smmu0_int27; + ZXIC_UINT32 smmu0_int28; + ZXIC_UINT32 smmu0_int29; + ZXIC_UINT32 smmu0_int30; + ZXIC_UINT32 smmu0_int31; + ZXIC_UINT32 smmu0_int32; + ZXIC_UINT32 smmu0_int33; + ZXIC_UINT32 smmu0_int34; + ZXIC_UINT32 smmu0_int35; + ZXIC_UINT32 smmu0_int36; + ZXIC_UINT32 smmu0_int37; + ZXIC_UINT32 smmu0_int38; + +} DPP_SE_SMMU0_INT_T; + +/** SMMU0 模块总的中断状态 */ +typedef struct dpp_smmu0_brief_int_t { + ZXIC_UINT32 smmu0_int14_unmask_flag; + ZXIC_UINT32 smmu0_int13_unmask_flag; + ZXIC_UINT32 smmu0_int12_unmask_flag; + ZXIC_UINT32 smmu0_int11_unmask_flag; + ZXIC_UINT32 smmu0_int10_unmask_flag; + ZXIC_UINT32 smmu0_int9_unmask_flag; + ZXIC_UINT32 smmu0_int8_unmask_flag; + ZXIC_UINT32 smmu0_int7_unmask_flag; + ZXIC_UINT32 smmu0_int6_unmask_flag; + ZXIC_UINT32 smmu0_int5_unmask_flag; + ZXIC_UINT32 smmu0_int4_unmask_flag; + ZXIC_UINT32 smmu0_int3_unmask_flag; + ZXIC_UINT32 smmu0_int2_unmask_flag; + ZXIC_UINT32 smmu0_int1_unmask_flag; + ZXIC_UINT32 smmu0_int0_unmask_flag; +} DPP_SMMU0_BRIEF_INT_T; + +/** SE 模块总的中断状态 */ +typedef struct dpp_se_int_status_t { + ZXIC_UINT32 + as_int_unmask_flag; /**< @brief 级联查找模块(AS)总中断状态 */ + ZXIC_UINT32 + kschd_int_unmask_flag; /**< @brief 键值调度模块(KSCHD)总中断状态 */ + ZXIC_UINT32 + rschd_int_unmask_flag; /**< @brief 返回调度模块(RSCHD)总中断状态 */ + ZXIC_UINT32 smmu1_int_unmask_flag; /**< @brief SMMU1 中断总中断状态 */ + ZXIC_UINT32 cmmu_int_unmask_flag; /**< @brief CMMU 总中断状态 */ + ZXIC_UINT32 + parser_int_unmask_flag; /**< @brief 解析模块(parse)总中断状态 */ +} DPP_SE_INT_STATUS_T; + +/** se解析模块中断状态 */ +typedef struct dpp_se_parser_int_t { + ZXIC_UINT32 parser_int_en; + ZXIC_UINT32 parser_int_mask; + ZXIC_UINT32 parser_int_status; +} DPP_SE_PARSER_INT_T; + +/** key调度模块(KSCHD)中断状态 */ +typedef struct dpp_se_kschd_int_t { + ZXIC_UINT32 kschd_int_0; + ZXIC_UINT32 kschd_int_1; + ZXIC_UINT32 kschd_int_2; + ZXIC_UINT32 kschd_int_3; + ZXIC_UINT32 kschd_int_4; +} DPP_SE_KSCHD_INT_T; + +/** rsp调度模块(RSCHD)中断状态 */ +typedef struct dpp_se_rschd_int_t { + ZXIC_UINT32 port0_int; + ZXIC_UINT32 port1_int; +} DPP_SE_RSCHD_INT_T; + +/** 关联查找(AS)模块的中断原因*/ +typedef struct dpp_se_as_int_t { + ZXIC_UINT32 as_int_0; + ZXIC_UINT32 as_int_1; + ZXIC_UINT32 as_int_2; +} DPP_SE_AS_INT_T; + +/** cmmu 模块中断*/ +typedef struct dpp_se_cmmu_int_t { + ZXIC_UINT32 cmmu_int12; + ZXIC_UINT32 cmmu_int11; + ZXIC_UINT32 cmmu_int10; + ZXIC_UINT32 cmmu_int9; + ZXIC_UINT32 cmmu_int8; + ZXIC_UINT32 cmmu_int7; + ZXIC_UINT32 cmmu_int6; + ZXIC_UINT32 cmmu_int5; + ZXIC_UINT32 cmmu_int4; + ZXIC_UINT32 cmmu_int3; + ZXIC_UINT32 cmmu_int2; + ZXIC_UINT32 cmmu_int1; + ZXIC_UINT32 cmmu_int0; +} DPP_SE_CMMU_INT_T; + +/** ALG 模块中断*/ +typedef struct dpp_se_alg_int_t { + ZXIC_UINT32 + wr_rsp_fifo_ovfl_int; /**< @brief 写返回缓存ptr fifo 溢出中断上报 */ + ZXIC_UINT32 + init_rd_cft_int; /**< @brief 初始化过程中出现读命令,冲突中断 */ + ZXIC_UINT32 + schd_lpm_fifo_parity_err_int; /**< @brief 调度lpm缓存FIFO奇偶校验错误上报 */ + ZXIC_UINT32 + schd_hash3_fifo_parity_err_int; /**< @brief 调度hash3缓存FIFO奇偶校验错误上报 */ + ZXIC_UINT32 + schd_hash2_fifo_parity_err_int; /**< @brief 调度hash2缓存FIFO奇偶校验错误上报 */ + ZXIC_UINT32 + schd_hash1_fifo_parity_err_int; /**< @brief 调度hash1缓存FIFO奇偶校验错误上报 */ + ZXIC_UINT32 + schd_hash0_fifo_parity_err_int; /**< @brief 调度hash0缓存FIFO奇偶校验错误上报 */ + ZXIC_UINT32 + schd_learn_fifo_parity_err_int; /**< @brief 调度学习缓存FIFO奇偶校验错误上报 */ + ZXIC_UINT32 + schd_lpm_fifo_ovfl_int; /**< @brief 调度lpm键值缓存FIFO溢出中断上报 */ + ZXIC_UINT32 + schd_hash3_fifo_ovfl_int; /**< @brief 调度hash3键值缓存FIFO溢出中断上报 */ + ZXIC_UINT32 + schd_hash2_fifo_unfl_int; /**< @brief 调度hash2键值缓存FIFO溢出中断上报 */ + ZXIC_UINT32 + schd_hash1_fifo_ovfl_int; /**< @brief 调度hash1键值缓存FIFO溢出中断上报 */ + ZXIC_UINT32 + schd_hash0_fifo_ovfl_int; /**< @brief 调度hash0键值缓存FIFO溢出中断上报 */ + ZXIC_UINT32 + schd_learn_fifo_ovfl_int; /**< @brief 调度学习命令缓存FIFO溢出中断上报 */ + + ZXIC_UINT32 zblk31_parity_int; /**< @brief zblock31 parity 中断 */ + ZXIC_UINT32 zblk30_parity_int; /**< @brief zblock30 parity 中断 */ + ZXIC_UINT32 zblk29_parity_int; /**< @brief zblock29 parity 中断 */ + ZXIC_UINT32 zblk28_parity_int; /**< @brief zblock28 parity 中断 */ + ZXIC_UINT32 zblk27_parity_int; /**< @brief zblock27 parity 中断 */ + ZXIC_UINT32 zblk26_parity_int; /**< @brief zblock26 parity 中断 */ + ZXIC_UINT32 zblk25_parity_int; /**< @brief zblock25 parity 中断 */ + ZXIC_UINT32 zblk24_parity_int; /**< @brief zblock24 parity 中断 */ + ZXIC_UINT32 zblk23_parity_int; /**< @brief zblock23 parity 中断 */ + ZXIC_UINT32 zblk22_parity_int; /**< @brief zblock22 parity 中断 */ + ZXIC_UINT32 zblk21_parity_int; /**< @brief zblock21 parity 中断 */ + ZXIC_UINT32 zblk20_parity_int; /**< @brief zblock20 parity 中断 */ + ZXIC_UINT32 zblk19_parity_int; /**< @brief zblock19 parity 中断 */ + ZXIC_UINT32 zblk18_parity_int; /**< @brief zblock18 parity 中断 */ + ZXIC_UINT32 zblk17_parity_int; /**< @brief zblock17 parity 中断 */ + ZXIC_UINT32 zblk16_parity_int; /**< @brief zblock16 parity 中断 */ + ZXIC_UINT32 zblk15_parity_int; /**< @brief zblock15 parity 中断 */ + ZXIC_UINT32 zblk14_parity_int; /**< @brief zblock14 parity 中断 */ + ZXIC_UINT32 zblk13_parity_int; /**< @brief zblock13 parity 中断 */ + ZXIC_UINT32 zblk12_parity_int; /**< @brief zblock12 parity 中断 */ + ZXIC_UINT32 zblk11_parity_int; /**< @brief zblock11 parity 中断 */ + ZXIC_UINT32 zblk10_parity_int; /**< @brief zblock10 parity 中断 */ + ZXIC_UINT32 zblk9_parity_int; /**< @brief zblock9 parity 中断 */ + ZXIC_UINT32 zblk8_parity_int; /**< @brief zblock8 parity 中断 */ + ZXIC_UINT32 zblk7_parity_int; /**< @brief zblock7 parity 中断 */ + ZXIC_UINT32 zblk6_parity_int; /**< @brief zblock6 parity 中断 */ + ZXIC_UINT32 zblk5_parity_int; /**< @brief zblock5 parity 中断 */ + ZXIC_UINT32 zblk4_parity_int; /**< @brief zblock4 parity 中断 */ + ZXIC_UINT32 zblk3_parity_int; /**< @brief zblock3 parity 中断 */ + ZXIC_UINT32 zblk2_parity_int; /**< @brief zblock2 parity 中断 */ + ZXIC_UINT32 zblk1_parity_int; /**< @brief zblock1 parity 中断 */ + ZXIC_UINT32 zblk0_parity_int; /**< @brief zblock0 parity 中断 */ + + ZXIC_UINT32 zcam_hash_p0_err_int; /**< @brief hash0 ZCAM查找出错中断 */ + ZXIC_UINT32 + hash0_agree_int_fifo_ovf_int; /**< @brief hash0 汇聚fifo 片内通道溢出中断 */ + ZXIC_UINT32 + hash0_agree_ext_fifo_ovf_int; /**< @brief hash0 汇聚fifo 片外通道parity中断 */ + ZXIC_UINT32 + hash0_agree_ext_fifo_parity_err_int; /**< @brief hash0 汇聚fifo 片外通道溢出中断*/ + ZXIC_UINT32 + hash0_agree_int_fifo_parity_err_int; /**< @brief hash0 汇聚fifo 片内通道parity中断 */ + ZXIC_UINT32 + hash0_key_fifo_ovfl_int; /**< @brief hash0 片外键值缓存FIFO 溢出中断 */ + ZXIC_UINT32 + hash0_sreq_fifo_ovfl_int; /**< @brief hash0 片外读地址缓存FIFO溢出中断 */ + ZXIC_UINT32 + hash0_key_fifo_parity_err_int; /**< @brief hash0 片外键值缓存FIFO parity err中断 */ + + ZXIC_UINT32 zcam_hash_p1_err_int; /**< @brief hash1 ZCAM查找出错中断 */ + ZXIC_UINT32 + hash1_agree_int_fifo_ovf_int; /**< @brief hash1 汇聚fifo 片内通道溢出中断 */ + ZXIC_UINT32 + hash1_agree_ext_fifo_ovf_int; /**< @brief hash1 汇聚fifo 片外通道parity中断 */ + ZXIC_UINT32 + hash1_agree_ext_fifo_parity_err_int; /**< @brief hash1 汇聚fifo 片外通道溢出中断*/ + ZXIC_UINT32 + hash1_agree_int_fifo_parity_err_int; /**< @brief hash1 汇聚fifo 片内通道parity中断 */ + ZXIC_UINT32 + hash1_key_fifo_ovfl_int; /**< @brief hash1 片外键值缓存FIFO 溢出中断 */ + ZXIC_UINT32 + hash1_sreq_fifo_ovfl_int; /**< @brief hash1 片外读地址缓存FIFO溢出中断 */ + ZXIC_UINT32 + hash1_key_fifo_parity_err_int; /**< @brief hash1 片外键值缓存FIFO parity err中断 */ + + ZXIC_UINT32 zcam_hash_p2_err_int; /**< @brief hash2 ZCAM查找出错中断 */ + ZXIC_UINT32 + hash2_agree_int_fifo_ovf_int; /**< @brief hash2 汇聚fifo 片内通道溢出中断 */ + ZXIC_UINT32 + hash2_agree_ext_fifo_ovf_int; /**< @brief hash2 汇聚fifo 片外通道parity中断 */ + ZXIC_UINT32 + hash2_agree_ext_fifo_parity_err_int; /**< @brief hash2 汇聚fifo 片外通道溢出中断*/ + ZXIC_UINT32 + hash2_agree_int_fifo_parity_err_int; /**< @brief hash2 汇聚fifo 片内通道parity中断 */ + ZXIC_UINT32 + hash2_key_fifo_ovfl_int; /**< @brief hash2 片外键值缓存FIFO 溢出中断 */ + ZXIC_UINT32 + hash2_sreq_fifo_ovfl_int; /**< @brief hash2 片外读地址缓存FIFO溢出中断 */ + ZXIC_UINT32 + hash2_key_fifo_parity_err_int; /**< @brief hash2 片外键值缓存FIFO parity err中断 */ + + ZXIC_UINT32 zcam_hash_p3_err_int; /**< @brief hash3 ZCAM查找出错中断 */ + ZXIC_UINT32 + hash3_agree_int_fifo_ovf_int; /**< @brief hash3 汇聚fifo 片内通道溢出中断 */ + ZXIC_UINT32 + hash3_agree_ext_fifo_ovf_int; /**< @brief hash3 汇聚fifo 片外通道parity中断 */ + ZXIC_UINT32 + hash3_agree_ext_fifo_parity_err_int; /**< @brief hash3 汇聚fifo 片外通道溢出中断*/ + ZXIC_UINT32 + hash3_agree_int_fifo_parity_err_int; /**< @brief hash3 汇聚fifo 片内通道parity中断 */ + ZXIC_UINT32 + hash3_key_fifo_ovfl_int; /**< @brief hash3 片外键值缓存FIFO 溢出中断 */ + ZXIC_UINT32 + hash3_sreq_fifo_ovfl_int; /**< @brief hash3 片外读地址缓存FIFO溢出中断 */ + ZXIC_UINT32 + hash3_key_fifo_parity_err_int; /**< @brief hash3 片外键值缓存FIFO parity err中断 */ + + ZXIC_UINT32 zcam_lpm_err_int; /**< @brief lpm ZCAM查找出错中断 */ + ZXIC_UINT32 + lpm_as_int_rsp_fifo_ovfl_int; /**< @brief lpm as 片内查找结果缓存FIFO溢出中断 */ + ZXIC_UINT32 + lpm_as_req_fifo_ovfl_int; /**< @brief lpm as 查找ddr 地址 FIFO溢出中断 */ + ZXIC_UINT32 + lpm_ext_ddr_rsp_fifo_parity_int; /**< @brief lpm ext ddr rsp FIFO parity中断 */ + ZXIC_UINT32 + lpm_ext_v6_key_parity_int; /**< @brief lpm ext v6 key FIFO parity中断 */ + ZXIC_UINT32 + lpm_ext_v4_key_parity_int; /**< @brief lpm ext v4 key FIFO parity中断 */ + ZXIC_UINT32 + lpm_ext_addr_fifo_ovfl_int; /**< @brief lpm ext ddr3 地址溢出中断 */ + ZXIC_UINT32 + lpm_ext_v4_fifo_ovfl_int; /**< @brief lpm ext v4 key溢出中断 */ + ZXIC_UINT32 + lpm_ext_v6_fifo_ovfl_int; /**< @brief lpm ext v6 key溢出中断 */ + ZXIC_UINT32 + lpm_ext_ddr_rsp_ovf_int; /**< @brief lpm ext ddr返回缓存fifo溢出中断 */ +} DPP_SE_ALG_INT_T; + +/** ALG 模块对外总中断*/ +typedef struct dpp_se_alg_brief_int_t { + ZXIC_UINT32 schd_int_unmask_flag; /**<@brief alg模块调度状态 */ + ZXIC_UINT32 + zblk_parity_int_unmask_flag; /**<@brief alg模块zblock奇偶校验中断 */ + ZXIC_UINT32 hash0_int_unmask_flag; /**<@brief alg模块hash0中断状态 */ + ZXIC_UINT32 hash1_int_unmask_flag; /**<@brief alg模块hash1中断状态 */ + ZXIC_UINT32 hash2_int_unmask_flag; /**<@brief alg模块hash2中断状态 */ + ZXIC_UINT32 hash3_int_unmask_flag; /**<@brief alg模块hash3中断状态 */ + ZXIC_UINT32 lpm_int_unmask_flag; /**<@brief alg模块lpm中断状态 */ +} DPP_SE_ALG_BRIEF_INT_T; + +/** SMMU1 中断 */ +typedef struct dpp_se_smmu1_int_t { + ZXIC_UINT32 smmu1_int0; + ZXIC_UINT32 smmu1_int1; + ZXIC_UINT32 smmu1_int2; + ZXIC_UINT32 smmu1_int3; + ZXIC_UINT32 smmu1_int4; + ZXIC_UINT32 smmu1_int5; + ZXIC_UINT32 smmu1_int6; + ZXIC_UINT32 smmu1_int7; + ZXIC_UINT32 smmu1_int8; + ZXIC_UINT32 smmu1_int9; + ZXIC_UINT32 smmu1_int10; + ZXIC_UINT32 smmu1_int11; + ZXIC_UINT32 smmu1_int12; + ZXIC_UINT32 smmu1_int13; + ZXIC_UINT32 smmu1_int14; + ZXIC_UINT32 smmu1_int15; + ZXIC_UINT32 smmu1_int16; + ZXIC_UINT32 smmu1_int17; +} DPP_SE_SMMU1_INT_T; + +typedef struct dpp_etcam_intr_t { + ZXIC_UINT32 etcam_int_33; + ZXIC_UINT32 etcam_int_32; + ZXIC_UINT32 etcam_int_31; + ZXIC_UINT32 etcam_int_30; + ZXIC_UINT32 etcam_int_29; + ZXIC_UINT32 etcam_int_28; + ZXIC_UINT32 etcam_int_27; + ZXIC_UINT32 etcam_int_26; + ZXIC_UINT32 etcam_int_25; + ZXIC_UINT32 etcam_int_24; + ZXIC_UINT32 etcam_int_23; + ZXIC_UINT32 etcam_int_22; + ZXIC_UINT32 etcam_int_21; + ZXIC_UINT32 etcam_int_20; + ZXIC_UINT32 etcam_int_19; + ZXIC_UINT32 etcam_int_18; + ZXIC_UINT32 etcam_int_17; + ZXIC_UINT32 etcam_int_16; + ZXIC_UINT32 etcam_int_15; + ZXIC_UINT32 etcam_int_14; + ZXIC_UINT32 etcam_int_13; + ZXIC_UINT32 etcam_int_12; + ZXIC_UINT32 etcam_int_11; + ZXIC_UINT32 etcam_int_10; + ZXIC_UINT32 etcam_int_9; + ZXIC_UINT32 etcam_int_8; + ZXIC_UINT32 etcam_int_7; + ZXIC_UINT32 etcam_int_6; + ZXIC_UINT32 etcam_int_5; + ZXIC_UINT32 etcam_int_4; + ZXIC_UINT32 etcam_int_3; + ZXIC_UINT32 etcam_int_2; + ZXIC_UINT32 etcam_int_1; + ZXIC_UINT32 etcam_int_0; +} DPP_ETCAM_INTR_T; + +typedef struct dpp_se_stat_int_t { + ZXIC_UINT32 stat_int0; + ZXIC_UINT32 stat_int1; + ZXIC_UINT32 stat_int2; + ZXIC_UINT32 stat_int3; + ZXIC_UINT32 stat_int4; + ZXIC_UINT32 stat_int5; +} DPP_SE_STAT_INT_T; + +#endif + +#if ZXIC_REAL("macro function define") + +#endif + +#if ZXIC_REAL("function declaration") +/***********************************************************/ +/** 写eRam +* @param dev_id 设备号 +* @param base_addr 基地址,以128bit为单位 +* @param index 条目索引 +* @param wrt_mode 数据位宽模式, 取值参考ERAM128_OPR_MODE_E的定义 +* @param p_data 数据 +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/04/16 +************************************************************/ +DPP_STATUS dpp_se_smmu0_ind_write(DPP_DEV_T *dev, ZXIC_UINT32 base_addr, + ZXIC_UINT32 index, ZXIC_UINT32 wrt_mode, + ZXIC_UINT32 *p_data); +/***********************************************************/ +/** 读eRam +* @param dev_id 设备号 +* @param base_addr 基地址,以128bit为单位 +* @param index 条目索引,支持128、64、32和1bit的索引值 +* @param rd_mode 读eRam模式, 取值参照ERAM128_OPR_MODE_E定义,读清模式下不支持1bit模式 +* @param rd_clr_mode eRam读清模式, 取值参照ERAM128_RD_CLR_MODE_E定义 +* @param p_data 返回数据缓存的指针 +* +* @return +* @remark 无 +* @see +* @author wcl @date 2015/01/30 +************************************************************/ +DPP_STATUS dpp_se_smmu0_ind_read(DPP_DEV_T *dev, ZXIC_UINT32 base_addr, + ZXIC_UINT32 index, ZXIC_UINT32 rd_mode, + ZXIC_UINT32 rd_clr_mode, ZXIC_UINT32 *p_data); + +#endif + +#if ZXIC_REAL("Hash & LPM data struct and function") + +/** lpm&hash算法模块硬件读写函数指针类型 */ +typedef DPP_STATUS (*WRITE32_FUN)(ZXIC_UINT32 dev_id, ZXIC_UINT32 addr, + ZXIC_UINT32 write_data); +typedef DPP_STATUS (*READ32_FUN)(ZXIC_UINT32 dev_id, ZXIC_UINT32 addr, + ZXIC_UINT32 *read_data); + +/** 前缀匹配路由关联结果表写硬件函数指针类型*/ +typedef DPP_STATUS (*LPM_AS_RSLT_WRT_FUNCTION)(ZXIC_UINT32 dev_id, + ZXIC_UINT32 as_type, + ZXIC_UINT32 tbl_id, + ZXIC_UINT32 index, + ZXIC_UINT8 *p_data); + +#define SE_ZGRP_NUM (4) +#define SE_ZBLK_NUM (32) +#define SE_ZCELL_NUM (4) +#define SE_ZCELL_TOTAL_NUM (SE_ZBLK_NUM * SE_ZCELL_NUM) +#define SE_ZREG_NUM (4) +#define SE_RAM_DEPTH (512) +#define MAX_FUN_NUM (8) +#define SE_ALG_BANK_NUM (29) + +#define LPM_THREAD_HW_WRITE_EN (0) /* ??????????????? */ +#if LPM_THREAD_HW_WRITE_EN +#define ROUTE_DEV_CHANNEL_MAX (4) /* ?????? NPE_DEV_CHANNEL_MAX ??????? */ +#define MAX_ITEM_INFO_BAK_NUM (0x100) +#endif +/* +* zcell/zreg 基本存储单元的配置信息,512bit为单位, +* zcell包含512个基本存储单元,zreg包含4个基本存储单元, +* 每个单元可以存储1个512bit条目、2个256bit条目或4个128bit条目, +* 本结构体不感知内部存储条目的位宽,hash中键值中的key_type可以 +* 可以获取表项的位宽信息 +*/ +typedef struct se_item_cfg { + D_HEAD item_list; /** 基本存储单元双链表头节点,可以存放放在本单元内的具体表项条目*/ + ZXIC_UINT32 item_index; /** 基本存储单元所处的索引值 */ + ZXIC_UINT32 hw_addr; /** 基本存储单元物理的地址 */ + ZXIC_UINT32 bulk_id; + ZXIC_UINT32 + item_type; /** 基本存储单元的存储类型,包括片外或片内,参见SE_ITEM_TYPE */ + ZXIC_UINT8 + wrt_mask; /** 基本存储单元写入的掩码,4bit,每bit表示待写入的128bit位置 */ + ZXIC_UINT8 valid; /** 基本存储单元是否被占用,0-未被占用,1-被占用 */ + ZXIC_UINT8 pad[2]; +} SE_ITEM_CFG; + +/** zcell属性标志*/ +#define DPP_ZCELL_FLAG_IS_MONO \ + (1) /**< @brief ZCELL是否被独占: 0-未被独占,1-已经被独占*/ + +/** zcell属性标志*/ +#define DPP_ZREG_FLAG_IS_MONO \ + (1) /**< @brief ZREG是否被独占: 0-未被独占,1-已经被独占*/ + +/* zcell 的配置信息*/ +typedef struct se_zcell_cfg { + ZXIC_UINT8 + flag; /* zcell属性标志,各比特的含义见上面DPP_ZCELL_FLAG_MODE等的定义 */ + ZXIC_UINT32 bulk_id; /* zcell所属空间ID */ + ZXIC_UINT32 zcell_idx; /* zcell 的索引值, 高6bit为zblock的索引 */ + ZXIC_UINT16 mask_len; /* 掩码长度 */ + ZXIC_UINT8 is_used; /* 本zcell是否被占用*/ + ZXIC_UINT8 is_share; /* 是否共享block中的zcell*/ + ZXIC_UINT32 item_used; + SE_ITEM_CFG item_info[SE_RAM_DEPTH]; /* 本zcell的基本存储单元配置*/ + + D_NODE zcell_dn; /* 双链表节点,初始化其data指针指向SE_ZCELL_CFG实例自身,然后插入双链表中*/ + ZXIC_AVL_NODE zcell_avl; /* 本hash程序未使用, 平衡二叉树节点*/ +} SE_ZCELL_CFG; +/* zreg 的配置信息*/ +typedef struct se_zreg_cfg { + ZXIC_UINT8 + flag; /* z属性标志,各比特的含义见上面DPP_ZCELL_FLAG_MODE等的定义 */ + ZXIC_UINT8 pad[3]; + ZXIC_UINT32 bulk_id; /* zcell所属空间ID */ + SE_ITEM_CFG item_info; /* zreg 存储的表条目配置*/ +} SE_ZREG_CFG; +/* zblock 的配置信息*/ +typedef struct se_zblk_cfg { + ZXIC_UINT32 zblk_idx; /* 统一编址,范围0~31 */ + ZXIC_UINT16 is_used; /* 本zblock 是否被占用 */ + ZXIC_UINT16 zcell_bm; /* 本hash程序未使用 */ + ZXIC_UINT16 hash_arg; /* hash crc算法的因子 */ + ZXIC_UINT16 pad; + + SE_ZCELL_CFG zcell_info[SE_ZCELL_NUM]; /* 本block的4个zcell配置 */ + SE_ZREG_CFG zreg_info[SE_ZREG_NUM]; /* 本block的4个zreg配置*/ + + D_NODE zblk_dn; /* 指向本实例的双链表节点 */ + +} SE_ZBLK_CFG; +/* alg表类型配置,包括4路hash引擎和v4 和 v6 lpm表项*/ +typedef struct func_id_info { + ZXIC_VOID *fun_ptr; + ZXIC_UINT8 fun_type; /* 业务类型 参见 SE_FUN_TYPE */ + ZXIC_UINT8 fun_id; + ZXIC_UINT8 is_used; + ZXIC_UINT8 pad; +} FUNC_ID_INFO; + +typedef struct ddr_mem { + ZXIC_UINT32 total_num; + ZXIC_UINT32 base_addr; + ZXIC_UINT32 base_addr_offset; + ZXIC_UINT32 ecc_en; + ZXIC_UINT32 bank_num; + ZXIC_UINT32 bank_info[SE_ALG_BANK_NUM]; + ZXIC_UINT32 share_type; /* DDR bank共享模式 */ + ZXIC_UINT32 item_used; + ZXIC_LISTSTACK_MANGER *p_ddr_mng; +} DDR_MEM; + +typedef struct share_ram { + ZXIC_UINT32 zblk_array[SE_ZBLK_NUM]; + D_HEAD zblk_list; + D_HEAD zcell_free_list; + ZXIC_UINT32 def_route_num; + + ZXIC_RB_CFG def_rb; + struct def_route_info *p_dr_info; + + DDR_MEM ddr4_info; + DDR_MEM ddr6_info; +} SHARE_RAM; + +/** 算法模块管理数据结构,用户仅需创建实例变量,然后将指针传给初始化函数dpp_se_init()和dpp_se_client_init()即可,不需要单独赋值成员变量*/ +typedef struct dpp_se_cfg { + SE_ZBLK_CFG zblk_info[SE_ZBLK_NUM]; + + FUNC_ID_INFO fun_info[MAX_FUN_NUM]; + + SHARE_RAM route_shareram; + ZXIC_UINT32 reg_base; + + WRITE32_FUN p_write32_fun; + READ32_FUN p_read32_fun; + + ZXIC_UINT32 lpm_flags; + + ZXIC_VOID *p_client; + + DPP_DEV_T dev; + ZXIC_UINT32 dev_id; + + LPM_AS_RSLT_WRT_FUNCTION p_as_rslt_wrt_fun; /* dpp_se_lpm_as_rslt_write */ + +#if LPM_THREAD_HW_WRITE_EN + // ZXIC_UINT32 mutex_location; /* dpp_route_cfg ?? cache mutex???????????route mode */ + ZXIC_MUTEX_T + cache_index_mutex[MAX_ITEM_INFO_BAK_NUM]; /* cache每个节点锁 */ + ZXIC_UINT32 + thread_hw_write_is_create; /* ??????????????? ???????? add by lining for thread_hw_write */ + ZXIC_LISTSTACK_MANGER *p_thread_liststack_mng; +#endif + +} DPP_SE_CFG; + +/** hash物理存储位宽类型*/ +typedef enum dpp_hash_ddr_width_mode { + DDR_WIDTH_INVALID = 0, + DDR_WIDTH_256b, /**< @brief 256bit位宽模式*/ + DDR_WIDTH_512b, /**< @brief 512bit位宽模式*/ +} DPP_HASH_DDR_WIDTH_MODE; + +/** hash条目类型*/ +typedef enum dpp_hash_key_type { + HASH_KEY_INVALID = 0, /**< @brief 无效类型*/ + HASH_KEY_128b, /**< @brief 128bit位宽类型*/ + HASH_KEY_256b, /**< @brief 256bit位宽类型*/ + HASH_KEY_512b, /**< @brief 512bit位宽类型*/ +} DPP_HASH_KEY_TYPE; + +/** hash ddr resource cfg info*/ +typedef struct dpp_hash_ddr_resc_cfg_t { + ZXIC_UINT32 + ddr_width_mode; /**< @brief 分配给hash的DDR空间物理存储位宽模式,取值参考DPP_HASH_DDR_WIDTH_MODE的定义*/ + ZXIC_UINT32 + ddr_crc_sel; /**< @brief 选择一个DDR CRC多项式,取值范围0~3,0~3分别对应一个CRC多项式*/ + ZXIC_UINT32 + ddr_item_num; /**< @brief 分配给hash的DDR空间单元数目,以256bit为一个单元*/ + ZXIC_UINT32 + ddr_baddr; /**< @brief 分配给hash的DDR空间的硬件基地址,以2k*256bit为单位*/ + ZXIC_UINT32 ddr_ecc_en; /**< @brief DDR ECC使能: 0-不使能,1-使能*/ +} DPP_HASH_DDR_RESC_CFG_T; + +/** hash search mode */ +typedef enum dpp_hash_srh_mode { + HASH_SRH_MODE_SOFT = 1, /**< @brief 查软件 */ + HASH_SRH_MODE_HDW = 2, /**< @brief 查硬件 */ +} DPP_HASH_SRH_MODE; + +/** hash tbl_flag */ +#define HASH_TBL_FLAG_AGE \ + (1 << 0) /**< @brief 老化保活置位使能: 0-不使能;1-使能*/ +#define HASH_TBL_FLAG_LEARN \ + (1 << 1) /**< @brief 硬件学习使能: 0-不使能;1-使能*/ +#define HASH_TBL_FLAG_MC_WRT \ + (1 << 2) /**< @brief 微码写表使能: 0-不使能,1-使能 */ + +/** hash条目*/ +typedef struct dpp_hash_entry { + ZXIC_UINT8 *p_key; /**< @brief 键值,格式详见各操作函数的说明*/ + ZXIC_UINT8 *p_rst; /**< @brief 结果*/ +} DPP_HASH_ENTRY; + +/** 前缀路由业务初始化标志*/ +#define LPM_FLAG_RT_HANDLE_START \ + (0) /**< @brief 是否使能级联结果表查找: 0-不使能,1-使能*/ +#define LPM_FLAG_RT_HANDLE_WIDTH (1) +#define LPM4_FLAG_DDR_EN_START \ + (1) /**< @brief 是否使能ipv4片外查找模式: 0-不使能,1-使能 */ +#define LPM4_FLAG_DDR_EN_WIDTH (1) +#define LPM6_FLAG_DDR_EN_START \ + (2) /**< @brief 是否使能ipv6片外查找模式: 0-不使能,1-使能 */ +#define LPM6_FLAG_DDR_EN_WIDTH (1) +#define LPM4_FLAG_DDR_SEL_START \ + (3) /**< @brief 是否为ipv4非线速模式: 0-线速,1-非线速 */ +#define LPM4_FLAG_DDR_SEL_WIDTH (1) +#define LPM6_FLAG_DDR_SEL_START \ + (4) /**< @brief 是否为ipv6非线速模式: 0-线速,1-非线速 */ +#define LPM6_FLAG_DDR_SEL_WIDTH (1) +#define LPM_FLAG_AS_MODE_START \ + (5) /**< @brief 级联结果表模式: 1-级联DDR,0-级联eRam */ +#define LPM_FLAG_AS_MODE_WIDTH (1) + +/** 前缀匹配路由级联DDR结果表返回位宽模式*/ +typedef enum dpp_route_as_rsp_len_e { + DPP_ROUTE_AS_128b = 0, /**< @brief 返回128bit模式*/ + DPP_ROUTE_AS_256b = 1, /**< @brief 返回256bit模式*/ + DPP_ROUTE_AS_384b = 2, /**< @brief 返回384bit模式*/ + DPP_ROUTE_AS_512b = 3 /**< @brief 返回512bit模式*/ +} DPP_ROUTE_AS_RSP_LEN_E; + +/** 前缀匹配路由业务ID*/ +typedef enum dpp_route_id_e { + DPP_ROUTE_V4_ID = 4, /**< @brief route ipv4 ID*/ + DPP_ROUTE_V6_ID = 5, /**< @brief route ipv6 ID*/ +} DPP_ROUTE_ID_E; + +/** 前缀匹配路由业务模式*/ +typedef enum dpp_route_mode_e { + DPP_ROUTE_MODE_IPV4 = 1UL, /**< @brief ipv4路由模式*/ + DPP_ROUTE_MODE_IPV6 /**< @brief ipv6路由模式*/ +} DPP_ROUTE_MODE_E; + +/** 前缀匹配路由DDR空间占用模式*/ +typedef enum dpp_route_ddr_use_mode_e { + DPP_ROUTE_DDR_USE_MINOR = 1, /**< @brief DDR使用量较少*/ + DPP_ROUTE_DDR_USE_MIDDLE = 2, /**< @brief DDR使用量中等*/ + DPP_ROUTE_DDR_USE_MAJOR = 3, /**< @brief DDR使用量较多*/ +} DPP_ROUTE_DDR_USE_MODE_E; + +/** 前缀匹配路由调试查找模式*/ +typedef enum dpp_route_srh_mode_e { + DPP_ROUTE_SRH_MODE_LP = 1, /**< @brief 前缀匹配查找模式*/ + DPP_ROUTE_SRH_MODE_EQUAL = 2, /**< @brief 精确匹配查找模式*/ +} DPP_ROUTE_SRH_MODE_E; + +/** 前缀匹配路由硬件资源 */ +typedef struct dpp_route_resource_t { + ZXIC_UINT32 zblk_num; /**< @brief LPM ipv4和ipv6共享的zblock数目*/ + ZXIC_UINT32 *zblk_idx; /**< @brief LPM ipv4和ipv6共享的zblock编号数组*/ + ZXIC_UINT32 + ddr4_item_num; /**< @brief 分配给ipv4前缀查找的ddr存储条目数,以256bit为单位*/ + ZXIC_UINT32 + ddr4_baddr; /**< @brief 分配给ipv4前缀查找的ddr存储空间的基地址,以4K*128bit为单位*/ + ZXIC_UINT32 + ddr4_base_offset; /**< @brief ipv4前缀查找相对于片外ddr存储空间基地址的偏移量,以256bit为单位*/ + ZXIC_UINT32 + ddr4_ecc_en; /**< @brief 固定配为1,分配给ipv4前缀查找的ddr存储空间的ECC校验使能标志*/ + ZXIC_UINT32 + ddr4_bank_num; /**< @brief ipv4前缀查找的ddr存储空间bank复制份数 */ + ZXIC_UINT32 * + ddr4_bank_info; /**< @brief ipv4前缀查找的ddr存储空间bank号,数组传入,支持离散分配*/ + ZXIC_UINT32 + ddr4_share_type; /**< @brief ipv4前缀查找的ddr存储空间bank共享模式,参见SMMU1_DDR_SHARE_MODE_E*/ + ZXIC_UINT32 + ddr6_item_num; /**< @brief 分配给ipv6前缀查找的ddr存储条目数,以256bit为单位*/ + ZXIC_UINT32 + ddr6_baddr; /**< @brief 分配给ipv6前缀查找的ddr存储空间的基地址,以4K*128bit为单位*/ + ZXIC_UINT32 + ddr6_base_offset; /**< @brief ipv6前缀查找相对于片外ddr存储空间基地址的偏移量,以256bit为单位*/ + ZXIC_UINT32 + ddr6_ecc_en; /**< @brief 固定配为1,分配给ipv4前缀查找的ddr存储空间的ECC校验使能标志*/ + ZXIC_UINT32 + ddr6_bank_num; /**< @brief ipv6前缀查找的ddr存储空间bank复制份数 */ + ZXIC_UINT32 * + ddr6_bank_info; /**< @brief ipv6前缀查找的ddr存储空间bank号,数组传入,支持离散分配*/ + ZXIC_UINT32 + ddr6_share_type; /**< @brief ipv6前缀查找的ddr存储空间bank共享模式,参见SMMU1_DDR_SHARE_MODE_E*/ +} DPP_ROUTE_RESOURCE_T; + +/** 前缀匹配路由级联片内eRam结果表属性 */ +typedef struct dpp_route_as_eram_t { + ZXIC_UINT32 baddr; /**< @brief LPM级联eRam结果基地址*/ + ZXIC_UINT32 + rsp_mode; /**< @brief LPM级联eRam结果位宽模式,取值参照ERAM128_TBL_MODE_E的定义*/ +} DPP_ROUTE_AS_ERAM_T; + +/** 前缀匹配路由级联片外DDR结果表属性 */ +typedef struct dpp_route_as_ddr_t { + ZXIC_UINT32 + baddr; /**< @brief 分配给级联ddr结果表空间的基地址,以4K*128bit为单位*/ + ZXIC_UINT32 + rsp_len; /**< @brief LPM级联DDR结果位宽模式,取值参照DPP_ROUTE_AS_RSP_LEN_E的定义*/ + ZXIC_UINT32 + ecc_en; /**< @brief 级联结果表DDR空间ECC校验使能标志: 0-不使能,1-使能*/ +} DPP_ROUTE_AS_DDR_T; + +typedef union dpp_route_as_rslttbl_u { + DPP_ROUTE_AS_ERAM_T as_eram_cfg; /**< @brief LPM级联eRam结果表空间属性*/ + DPP_ROUTE_AS_DDR_T as_ddr_cfg; /**< @brief LPM级联DDR结果表空间属性*/ +} DPP_ROUTE_AS_RSLTTBL_U; + +/** 前缀匹配IPv4键值*/ +typedef struct dpp_route_ipv4_key_t { + ZXIC_UINT32 vpnid; /**< @brief vpnid,16bit*/ + ZXIC_UINT32 + mask_len; /**< @brief IP地址的掩码长度,最小为0(表示默认路由),最大为32*/ + ZXIC_UINT32 ipv4_addr; /**< @brief IP地址,32bit*/ +} DPP_ROUTE_IPV4_KEY_T; + +/** 前缀匹配IPv4单个路由条目*/ +typedef struct dpp_route_entry_ipv4_t { + DPP_ROUTE_IPV4_KEY_T route_key; /**< @brief 键值*/ + ZXIC_UINT32 + route_handle; /**< @brief 键值匹配后,继续查转发结果表的索引*/ + ZXIC_UINT8 * + p_as_rslt; /**< @brief 转发结果,仅在使能lpm关联结果查找的情况下有效 */ +} DPP_ROUTE_ENTRY_IPV4_T; + +/** 前缀匹配IPv6键值*/ +typedef struct dpp_route_ipv6_key_t { + ZXIC_UINT32 vpnid; /**< @brief vpnid,16bit*/ + ZXIC_UINT32 + mask_len; /**< @brief IP地址的掩码长度,最小为0(表示默认路由),最大为128*/ + ZXIC_UINT32 ipaddr[4]; /**< @brief IP地址,128bit*/ +} DPP_ROUTE_IPV6_KEY_T; + +/** 前缀匹配IPv6单个路由条目*/ +typedef struct dpp_route_entry_ipv6_t { + DPP_ROUTE_IPV6_KEY_T route_key; /**< @brief 键值*/ + ZXIC_UINT32 + route_handle; /**< @brief 键值匹配后,继续查转发结果表的索引*/ + ZXIC_UINT8 * + p_as_rslt; /**< @brief 转发结果,仅在使能lpm关联结果查找的情况下有效 */ +} DPP_ROUTE_ENTRY_IPV6_T; + +/* 在HASH进行初始化的时候,需要存储的参数 */ +typedef struct dpp_hash_soft_reset_stor_dat { + /* 参照dpp_hash_init()的参数 */ + ZXIC_UINT32 + ddr_dis_flag[4]; /**< @brief 4个HASH引擎是否DISABLE DDR标志 */ + ZXIC_UINT32 zblk_num[4]; /**< @brief 4个HASH引擎使用的zblk数量 */ + ZXIC_UINT32 * + zblk_idx_start[4]; /**< @brief 4个HASH引擎使用的起始zblk index */ + + /* 参照dpp_hash_bulk_init()的参数 */ + ZXIC_UINT32 + ddr_item_num[4][8]; /**< @brief 每个bulk空间存放的ddr item数 */ + ZXIC_UINT32 ddr_base_addr[4]; /**< @brief 每个hash引擎基地址 */ + ZXIC_UINT32 ddr_bank_cp[4]; /**< @brief 每个hash引擎bank copy数量 */ + ZXIC_UINT32 ddr_ecc_en[4]; /**< @brief ddr是否开启ecc使能 */ + + /* ZXIC_UINT32 ddr_width_mode[4]; */ /**< @brief ddr存储位宽 */ + /* ZXIC_UINT32 ddr_crc_sel[4]; */ /**< @brief ddr多项式选择 */ + /* ZXIC_UINT32 ddr_share_type[4]; */ /**< @brief ddr 共享类型 */ + + ZXIC_UINT32 hash_id_valid; /** HASH引擎是否已经初始化标志 */ +} DPP_HASH_SOFT_RESET_STOR_DAT; + +/***********************************************************/ +/** 初始化算法管理数据结构,不包含用户自定义的数据指针 +* @param p_se_cfg 算法管理数据结构指针 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wcl @date 2015/05/20 +************************************************************/ +DPP_STATUS dpp_se_init(DPP_DEV_T *dev, DPP_SE_CFG *p_se_cfg); + +/***********************************************************/ +/** 初始化算法管理数据结构用户自定义的数据指针,当前仅用于传入设备号的值 +* @param p_se_cfg 算法管理数据结构指针 +* @param p_client 用户自定义的数据指针 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wcl @date 2015/05/20 +************************************************************/ +DPP_STATUS dpp_se_client_init(DPP_SE_CFG *p_se_cfg, ZXIC_VOID *p_client); + +/***********************************************************/ +/** 单个hash引擎初始化 +* @param p_se_cfg 算法模块公共管理数据结构指针 +* @param fun_id hash引擎号 +* @param zblk_num 分配给此hash引擎的zblock数目 +* @param zblk_idx 分配给此hash引擎的zblock编号 +* @param ddr_dis DDR关闭位,0-不关闭片外DDR, 1-关闭片外DDR +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wcl @date 2015/05/15 +************************************************************/ +DPP_STATUS dpp_hash_init(DPP_SE_CFG *p_se_cfg, ZXIC_UINT32 fun_id, + ZXIC_UINT32 zblk_num, ZXIC_UINT32 *zblk_idx, + ZXIC_UINT32 ddr_dis); + +/***********************************************************/ +/** 初始化单个hash引擎内的某个业务表,此接口支持为该业务表分配独占的zcell。 +* 必须先初始化hash引擎,再初始化业务表。 +* @param p_se_cfg 算法模块公共管理数据结构指针 +* @param fun_id hash引擎号 +* @param bulk_id 每个Hash引擎资源划分的空间ID号 +* @param p_ddr_resc_cfg 分配给hash引擎此资源空间的ddr资源属性 +* @param zcell_num 分配给hash引擎此资源空间的zcell数量 +* @param zreg_num 分配给hash引擎此资源空间的zreg数量 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wcl @date 2015/05/15 +************************************************************/ +DPP_STATUS dpp_hash_bulk_init(DPP_SE_CFG *p_se_cfg, ZXIC_UINT32 fun_id, + ZXIC_UINT32 bulk_id, + DPP_HASH_DDR_RESC_CFG_T *p_ddr_resc_cfg, + ZXIC_UINT32 zcell_num, ZXIC_UINT32 zreg_num); + +/***********************************************************/ +/** 初始化单个hash引擎内的某个业务表,此接口支持为该业务表分配独占的zcell。 +* 必须先初始化hash引擎,如果是片内+片外模式还必须先初始化dpp_hash_ddr_bulk_init, +* 再初始化业务表。 +* @param p_se_cfg 算法模块公共管理数据结构指针 +* @param fun_id hash引擎号 +* @param tbl_id 业务表 +* @param tbl_flag 初始化标记, bitmap的形式使用,如:HASH_TBL_FLAG_AGE等 +* @param key_type hash条目类型,取值参照DPP_HASH_KEY_TYPE的定义 +* @param actu_key_size 业务键值有效长度: 8bit*N,N=1~48 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wcl @date 2015/05/15 +************************************************************/ +DPP_STATUS dpp_hash_tbl_id_info_init(DPP_SE_CFG *p_se_cfg, ZXIC_UINT32 fun_id, + ZXIC_UINT32 tbl_id, ZXIC_UINT32 tbl_flag, + ZXIC_UINT32 key_type, + ZXIC_UINT32 actu_key_size); + +#endif + +#if ZXIC_REAL("eTcam data struct and function") +typedef DPP_STATUS (*ACL_AS_RSLT_WRT_FUNCTION)(ZXIC_UINT32 dev_id, + ZXIC_UINT32 base_addr, + ZXIC_UINT32 index, + ZXIC_UINT32 as_mode, + ZXIC_UINT8 *p_data); +#define DPP_ACL_TBL_ID_NUM (8U) +#define DPP_ACL_ETCAM_ID_NUM (1U) +#define DPP_ACL_BLOCK_NUM (8U) + +/** eTcam条目位宽模式*/ +typedef enum dpp_etcam_entry_mode_e { + DPP_ETCAM_KEY_640b = 0, + DPP_ETCAM_KEY_320b = 1, + DPP_ETCAM_KEY_160b = 2, + DPP_ETCAM_KEY_80b = 3, + DPP_ETCAM_KEY_INVALID, +} DPP_ETCAM_ENTRY_MODE_E; + +/** eTcam操作类型*/ +typedef enum dpp_etcam_opr_type_e { + DPP_ETCAM_OPR_DM = 0, /**< @brief data & mask类型*/ + DPP_ETCAM_OPR_XY = 1, /**< @brief X & Y类型*/ +} DPP_ETCAM_OPR_TYPE_E; + +/** eTcam条目格式*/ +typedef struct dpp_etcam_entry_t { + ZXIC_UINT32 + mode; /**< @brief 条目位宽模式: 2'b00-640bit,2'b01-320bit,2'b10-160bit,2'b11-80bit*/ + ZXIC_UINT8 *p_data; /**< @brief 键值*/ + ZXIC_UINT8 *p_mask; /**< @brief 掩码*/ +} DPP_ETCAM_ENTRY_T; + +/** ACL键值位宽模式*/ +typedef enum dpp_acl_key_mode_e { + DPP_ACL_KEY_640b = 0, /**< @brief 640bit键值位宽*/ + DPP_ACL_KEY_320b, /**< @brief 320bit键值位宽*/ + DPP_ACL_KEY_160b, /**< @brief 160bit键值位宽*/ + DPP_ACL_KEY_80b, /**< @brief 80bit键值位宽*/ + DPP_ACL_KEY_INVALID, +} DPP_ACL_KEY_MODE_E; + +/** ACL关联查找结果表位宽模式*/ +typedef enum dpp_acl_as_mode_e { + DPP_ACL_AS_MODE_16b = 0, + DPP_ACL_AS_MODE_32b = 1, + DPP_ACL_AS_MODE_64b = 2, /**< @brief 64bit结果位宽*/ + DPP_ACL_AS_MODE_128b = 3, /**< @brief 128bit结果位宽*/ + DPP_ACL_AS_MODE_INVALID, +} DPP_ACL_AS_MODE_E; + +/** ACL调试查找模式*/ +typedef enum dpp_acl_srh_mode_e { + DPP_ACL_SRH_SOFT = 0, /**< @brief 查软件*/ + DPP_ACL_SRH_HARDWARE = 1, /**< @brief 查硬件*/ +} DPP_ACL_SRH_MODE_E; + +/** ACL条目*/ +typedef struct dpp_acl_entry_t { + ZXIC_UINT32 handle; /**< @brief 条目索引*/ + ZXIC_UINT8 *key_data; /**< @brief 键值data部分*/ + ZXIC_UINT8 *key_mask; /**< @brief 键值mask部分: 0为关心,1为不关心*/ + ZXIC_UINT8 *p_as_rslt; /**< @brief 关联结果,仅使能关联查找情况有效*/ +} DPP_ACL_ENTRY_T; + +typedef struct dpp_acl_block_info_t { + ZXIC_UINT32 is_used; + ZXIC_UINT32 tbl_id; + ZXIC_UINT32 idx_base; +} DPP_ACL_BLOCK_INFO_T; + +typedef struct dpp_acl_etcamid_cfg_t { + ZXIC_UINT32 is_valid; + ZXIC_UINT32 as_enable; /* eTcam自动关联eRam结果表使能: 0-不使能,1-使能 */ + ZXIC_UINT32 as_idx_offset; /* 关联结果表基地址偏移,以128bit为单位 */ + ZXIC_UINT32 + as_eRam_base; /* eTcam自动关联的eRam block的基地址,以128bit为单位 */ + D_HEAD tbl_list; +} DPP_ACL_ETCAMID_CFG_T; + +typedef struct dpp_acl_key_info_t { + ZXIC_UINT32 handle; + ZXIC_UINT32 pri; + ZXIC_UINT8 key[0]; /* data+mask */ +} DPP_ACL_KEY_INFO_T; + +/** ACL初始化使能标志*/ +#define DPP_ACL_FLAG_ETCAM0_EN \ + (1 << 0) /**< @brief 开启eTcam端口0: 0-不开启,1-开启.*/ +#define DPP_ACL_FLAG_ETCAM0_AS \ + (1 << 2) /**< @brief 开启eTcam端口0的关联结果查找: 0-不开启,1-开启.*/ +//#define DPP_ACL_FLAG_ETCAM1_AS (1<<3) /**< @brief 开启eTcam端口1的关联结果查找: 0-不开启,1-开启.*/ + +typedef DPP_STATUS (*ACL_TBL_AS_DDR_WR_FUN)( + ZXIC_UINT32 dev_id, ZXIC_UINT32 tbl_type, ZXIC_UINT32 tbl_id, + ZXIC_UINT32 dir_tbl_share_type, ZXIC_UINT32 dir_tbl_base_addr, + ZXIC_UINT32 ecc_en, ZXIC_UINT32 index, ZXIC_UINT32 as_mode, + ZXIC_UINT8 *p_data); +typedef DPP_STATUS (*ACL_TBL_AS_DDR_RD_FUN)(ZXIC_UINT32 dev_id, + ZXIC_UINT32 base_addr, + ZXIC_UINT32 index, + ZXIC_UINT32 as_mode, + ZXIC_UINT8 *p_data); + +/** */ +typedef struct dpp_acl_tbl_cfg_t { + ZXIC_UINT32 tbl_type; + ZXIC_UINT32 table_id; + ZXIC_UINT8 is_as_ddr; + ZXIC_UINT8 ddr_bankcp_info; + ZXIC_UINT32 dir_tbl_share_type; + ZXIC_UINT8 ddr_ecc_en; + ZXIC_UINT32 pri_mode; + ZXIC_UINT32 key_mode; + ZXIC_UINT32 entry_num; + ZXIC_UINT32 block_num; + ZXIC_UINT32 *block_array; + ZXIC_UINT32 is_used; + ZXIC_UINT32 as_mode; + ZXIC_UINT32 as_idx_base; + ZXIC_UINT32 as_enable; /* eTcam自动关联eRam结果表使能: 0-不使能,1-使能 */ + ZXIC_UINT32 + as_eRam_base; /* eTcam自动关联的eRam block的基地址,以128bit为单位 */ + ZXIC_UINT32 ddr_baddr; + ZXIC_UINT32 + idx_offset; /* 相对于ddr_baddr的基地址的索引偏移,以as_mode对应的数据位宽为单位 */ + ACL_TBL_AS_DDR_WR_FUN p_as_ddr_wr_fun; + ACL_TBL_AS_DDR_RD_FUN p_as_ddr_rd_fun; + D_NODE entry_dn; + INDEX_FILL_CFG index_mng; + ZXIC_RB_CFG acl_rb; + DPP_ACL_KEY_INFO_T **acl_key_buff; + ZXIC_UINT8 *as_rslt_buff; +} DPP_ACL_TBL_CFG_T; + +/** ACL公共管理数据结构*/ +typedef struct dpp_acl_cfg_t { + ZXIC_VOID *p_client; + DPP_DEV_T *dev; + ZXIC_UINT32 dev_id; + ZXIC_UINT32 flags; + ACL_AS_RSLT_WRT_FUNCTION p_as_rslt_write_fun; + ACL_AS_RSLT_WRT_FUNCTION p_as_rslt_read_fun; + DPP_ACL_BLOCK_INFO_T acl_blocks[DPP_ACL_BLOCK_NUM]; + DPP_ACL_ETCAMID_CFG_T acl_etcamids; + DPP_ACL_TBL_CFG_T acl_tbls[DPP_ACL_TBL_ID_NUM]; +} DPP_ACL_CFG_T; + +/**< @brief ACL公共管理数据结构*/ +typedef struct dpp_acl_cfg_ex_t { + ZXIC_VOID *p_client; + DPP_DEV_T *dev; + ZXIC_UINT32 dev_id; + ZXIC_UINT32 flags; + ACL_AS_RSLT_WRT_FUNCTION p_as_rslt_write_fun; + ACL_AS_RSLT_WRT_FUNCTION p_as_rslt_read_fun; + DPP_ACL_BLOCK_INFO_T acl_blocks[DPP_ACL_BLOCK_NUM]; + DPP_ACL_ETCAMID_CFG_T acl_etcamids; + DPP_ACL_TBL_CFG_T acl_tbls[DPP_ACL_TBL_ID_NUM]; +} DPP_ACL_CFG_EX_T; + +/** acl优先级模式*/ +typedef enum dpp_acl_pri_mode_e { + DPP_ACL_PRI_EXPLICIT = 1, /**< @brief 显示优先级*/ + DPP_ACL_PRI_IMPLICIT, /**< @brief 隐式优先级,以条目下发顺序作为优先级*/ + DPP_ACL_PRI_SPECIFY, /**< @brief 用户指定每个条目的在tcam中的存放索引*/ + DPP_ACL_PRI_INVALID, +} DPP_ACL_PRI_MODE_E; + +/** */ +typedef struct dpp_acl_entry_ex_t { + ZXIC_UINT32 + idx_val; /**< @brief 一次插入单个acl条目时有效,返回单个索引*/ + D_HEAD idx_list; /**< @brief 一次插入多个acl条目时有效,返回多个索引*/ + ZXIC_UINT32 + pri; /* PRI_EXPLICIT: pri is priority, PRI_IMPLICIT: pri is invalid, PRI_SPECIFY: pri is handle */ + ZXIC_UINT8 *key_data; + ZXIC_UINT8 *key_mask; + ZXIC_UINT8 *p_as_rslt; +} DPP_ACL_ENTRY_EX_T; + +/***********************************************************/ +/** eTcam模块初始化 +* @param dev_id 设备号 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wcl @date 2015/07/24 +************************************************************/ +DPP_STATUS dpp_etcam_init(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** +* @param p_acl_cfg ACL公共管理数据结构指针 +* @param p_client 用户自定义的数据指针 +* @param flags +* @param p_as_wrt_fun 关联结果写硬件表回调函数指针 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yxh @date 2017/12/15 +************************************************************/ +DPP_STATUS dpp_acl_cfg_init_ex(DPP_DEV_T *dev, DPP_ACL_CFG_EX_T *p_acl_cfg, + ZXIC_VOID *p_client, ZXIC_UINT32 flags, + ACL_AS_RSLT_WRT_FUNCTION p_as_wrt_fun); + +/***********************************************************/ +/** acl业务表初始化,注意分配给一个table的多个block_idx + 必须按从小到大的顺序给定。支持多个优先级模式,暂不对外开放。 +* @param p_acl_cfg ACL公共管理数据结构指针 +* @param table_id 业务表号 +* @param as_enable 是否使能关联结果查找,0-不使能,1-使能 +* @param entry_num 最大条目数 +* @param pri_mode ACL优先级模式 +* @param key_mode ACL键值位宽模式, 取值参照DPP_ACL_KEY_MODE_E的定义 +* @param as_mode ACL关联查找结果表位宽模式 +* @param as_baddr 基地址 +* @param block_num 分配给当前业务表号的block数目 +* @param p_block_idx 分配给当前业务表号的block编号数组 +* +* @return +* @remark 无 +* @see +* @author wcl @date 2014/12/23 +************************************************************/ +DPP_STATUS dpp_acl_tbl_init_ex(DPP_ACL_CFG_EX_T *p_acl_cfg, + ZXIC_UINT32 table_id, ZXIC_UINT32 as_enable, + ZXIC_UINT32 entry_num, + DPP_ACL_PRI_MODE_E pri_mode, + ZXIC_UINT32 key_mode, DPP_ACL_AS_MODE_E as_mode, + ZXIC_UINT32 as_baddr, ZXIC_UINT32 block_num, + ZXIC_UINT32 *p_block_idx); +DPP_STATUS dpp_acl_res_destroy(ZXIC_UINT32 dev_id); + +#endif + +#if ZXIC_REAL("SDT data struct and function") +/** SDT属性中的表类型 */ +typedef enum dpp_sdt_table_type_e { + DPP_SDT_TBLT_INVALID = 0, /**< @brief 无效类型*/ + DPP_SDT_TBLT_eRAM = 1, /**< @brief eRAM直接表类型*/ + DPP_SDT_TBLT_DDR3 = 2, /**< @brief DDR3直接表类型*/ + DPP_SDT_TBLT_HASH = 3, /**< @brief Hash表类型*/ + DPP_SDT_TBLT_LPM = 4, /**< @brief LPM表类型*/ + DPP_SDT_TBLT_eTCAM = 5, /**< @brief 片内Tcam表类型*/ + DPP_SDT_TBLT_PORTTBL = 6, /**< @brief 物理端口属性表*/ + DPP_SDT_TBLT_MAX = 7, +} DPP_SDT_TABLE_TYPE_E; + +/** 返回位宽模式*/ +typedef enum dpp_sdt_rsp_mode_e { + DPP_SDT_RSP_32b = 0, /** 返回32bit位宽表结果*/ + DPP_SDT_RSP_64b = 1, /** 返回64bit位宽表结果*/ + DPP_SDT_RSP_128b = 2, /** 返回128bit位宽表结果*/ + DPP_SDT_RSP_256b = 3, /** 返回256bit位宽表结果*/ +} DPP_SDT_RSP_MODE_E; + +/** eRam直接表SDT属性*/ +typedef struct dpp_sdt_tbl_eram_t { + ZXIC_UINT32 table_type; /** <@brief 查找表项类型 */ + ZXIC_UINT32 eram_mode; /** <@brief eRam返回位宽 */ + ZXIC_UINT32 eram_base_addr; /** <@brief eRam表项基地址,128bit为单位 */ + ZXIC_UINT32 eram_table_depth; /** <@brief 表项深度,作为越界检查使用 */ + ZXIC_UINT32 eram_clutch_en; /** <@brief 抓包使能 */ +} DPP_SDTTBL_ERAM_T; + +/** DDR3直接表SDT属性*/ +typedef struct dpp_sdt_tbl_ddr3_t { + ZXIC_UINT32 table_type; /** <@brief 查找表项类型 */ + ZXIC_UINT32 ddr3_base_addr; /** <@brief ddr 基地址 */ + ZXIC_UINT32 ddr3_share_type; /** <@brief ddr 共享类型 */ + ZXIC_UINT32 ddr3_rw_len; /** <@brief 表项返回/写入位宽 */ + ZXIC_UINT32 ddr3_sdt_num; /** <@brief SDT表号/复制信息ram的表号 */ + ZXIC_UINT32 ddr3_ecc_en; /** <@brief ecc使能 */ + ZXIC_UINT32 ddr3_clutch_en; /** <@brief 抓包使能 */ +} DPP_SDTTBL_DDR3_T; + +/** HASH表SDT属性*/ +typedef struct dpp_sdt_tbl_hash_t { + ZXIC_UINT32 table_type; /** <@brief 查找表项类型 */ + ZXIC_UINT32 hash_id; /** <@brief 访问hash的引擎 */ + ZXIC_UINT32 hash_table_width; /** <@brief hash 表项存储位宽 */ + ZXIC_UINT32 key_size; /** <@brief hash 键值长度 */ + ZXIC_UINT32 hash_table_id; /** <@brief hash 逻辑表号 */ + ZXIC_UINT32 learn_en; /** <@brief 硬件学习使能 */ + ZXIC_UINT32 keep_alive; /** <@brief 保活标志使能 */ + ZXIC_UINT32 keep_alive_baddr; /** <@brief 保活标志基地址 */ + ZXIC_UINT32 rsp_mode; /** <@brief 表项返回数据位宽 */ + ZXIC_UINT32 hash_clutch_en; /** <@brief 抓包使能 */ +} DPP_SDTTBL_HASH_T; + +/** LPM表SDT属性*/ +typedef struct dpp_sdt_tbl_lpm_t { + ZXIC_UINT32 table_type; /** <@brief 查找表项类型 */ + ZXIC_UINT32 lpm_v46_id; /** <@brief ipv4/ipv6标志 */ + ZXIC_UINT32 rsp_mode; /** <@brief 表项返回数据位宽 */ + ZXIC_UINT32 lpm_table_depth; /** <@brief 表项深度,越界检查 */ + ZXIC_UINT32 lpm_clutch_en; /** <@brief 抓包使能 */ +} DPP_SDTTBL_LPM_T; + +/** eTCAM表SDT属性*/ +typedef struct dpp_sdt_tbl_etcam_t { + ZXIC_UINT32 table_type; /** <@brief 查找表项类型 */ + ZXIC_UINT32 etcam_id; /** <@brief etcam通道 */ + ZXIC_UINT32 etcam_key_mode; /** <@brief etcam键值长度 */ + ZXIC_UINT32 etcam_table_id; /** <@brief etcam表项号 */ + ZXIC_UINT32 no_as_rsp_mode; /** <@brief handle模式返回位宽 */ + ZXIC_UINT32 as_en; /** <@brief 级联eram使能 */ + ZXIC_UINT32 as_eram_baddr; /** <@brief 级联eram基地址 */ + ZXIC_UINT32 as_rsp_mode; /** <@brief 级联返回位宽 */ + ZXIC_UINT32 etcam_table_depth; /** <@brief 表项深度,越界检查 */ + ZXIC_UINT32 etcam_clutch_en; /** <@brief 抓包使能 */ +} DPP_SDTTBL_ETCAM_T; + +/** 物理端口属性表SDT属性*/ +typedef struct dpp_sdt_tbl_porttbl_t { + ZXIC_UINT32 table_type; /** <@brief 查找表项类型 */ + ZXIC_UINT32 porttbl_clutch_en; /** <@brief 抓包使能 */ +} DPP_SDTTBL_PORTTBL_T; + +/***********************************************************/ +/** 初始化SDT表配置管理 +* @param dev_num 设备数目 +* @param dev_id_array 设备dev_id数组 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wcl @date 2015/07/13 +************************************************************/ +DPP_STATUS dpp_sdt_init(ZXIC_UINT32 dev_num, ZXIC_UINT32 *dev_id_array); + +/***********************************************************/ +/** 写SDT属性表条目到硬件表,同时向8个cluster写入 +* @param dev_id 设备号 +* @param sdt_no 业务表对应的sdt号 +* @param table_type SDT属性中的表类型,取值参考DPP_SDT_TABLE_TYPE_E的定义(仅添加操作时有效) +* @param p_sdt_info 写入的SDT属性(仅添加操作时有效)。由table_type确定此ZXIC_VOID型指针对应的数据结构, 包括: \n +* DPP_SDTTBL_ERAM_T、DPP_SDTTBL_DDR3_T、DPP_SDTTBL_HASH_T、DPP_SDTTBL_LPM_T、\n +* DPP_SDTTBL_ETCAM_T、DPP_SDTTBL_XTCAM_T、DPP_SDTTBL_PORTTBL_T。 +* @param opr_type 操作类型: 0-添加条目,1-删除条目. +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wcl @date 2015/07/11 +************************************************************/ +DPP_STATUS dpp_sdt_tbl_write(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 table_type, ZXIC_VOID *p_sdt_info, + ZXIC_UINT32 opr_type); + +/***********************************************************/ +/** 单个hash引擎查找调试函数 +* @param p_se_cfg 算法模块公共管理数据结构指针 +* @param fun_id hash引擎号 +* @param p_entry hash条目,包括key和result,查找时的key的位宽为392bit,格式为:wr_flag(1bit) + key_type(2bit) + tbl_id(5bit) + reserve(M bit)+ actu_key(32*N bit) \n +* result用于在硬件学习使能的情况下,返回空闲位置的地址。 +* @param p_space_vld 学习使能时,是否有空闲空间 +* @param srh_mode 查找模式,取值参考DPP_HASH_SRH_MODE的定义 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wcl @date 2015/05/15 +************************************************************/ +DPP_STATUS dpp_hash_search(DPP_SE_CFG *p_se_cfg, ZXIC_UINT32 fun_id, + DPP_HASH_ENTRY *p_entry, ZXIC_UINT32 *p_space_vld, + ZXIC_UINT32 srh_mode); + +#endif + +#endif /*dpp_se_api.h*/ diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_stat_api.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_stat_api.h new file mode 100644 index 000000000000..9203ef38e1c6 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_stat_api.h @@ -0,0 +1,513 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_stat_api.h +* 文件标识 : stat计数模块对外数据类型定义和接口函数声明 +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : xjw +* 完成日期 : 2015/02/09 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef _DPP_STAT_API_H_ +#define _DPP_STAT_API_H_ + +#if ZXIC_REAL("header file") + +#include "dpp_dev.h" + +#endif + +#if ZXIC_REAL("data struct define") +/** STAT模块TM模式*/ +typedef enum stat_tm_mode_e { + STAT_TM_MODE_ETM = 0, /**< @brief 上行TM */ + STAT_TM_MODE_FTM = 1, /**< @brief 下行TM */ + STAT_TM_MODE_MAX +} STAT_TM_MODE_E; + +/** TM统计端口选择 */ +typedef enum stat_tm_port_mode_e { + STAT_TM_PORT_MODE_0_1 = 0, /**< @brief 端口0~1 */ + STAT_TM_PORT_MODE_2_3 = 1, /**< @brief 端口2~3 */ + STAT_TM_PORT_MODE_MAX, +} STAT_TM_PORT_MODE_E; + +/**TM统计计数模式选择 */ +typedef enum stat_tm_cnt_mode_e { + STAT_TM_CNT_MODE_MIX = 0, /**< @brief 混合模式 */ + STAT_TM_CNT_MODE_INNER = 1, /**< @brief 内部模式 */ + STAT_TM_CNT_MODE_MAX +} STAT_TM_CNT_MODE_E; + +/* TM计数器类型,共支持20种 */ +typedef enum tm_stat_type_e { + + TM_STAT_ENQUE_PKT = 0, /**< @brief 入队包计数*/ + TM_STAT_ENQUE_VALID_PKT = 1, /**< @brief 入队VALID包计数*/ + TM_STAT_ENQUE_DROP_PKT = 2, /**< @brief 入队丢弃包计数*/ + TM_STAT_ENQUE_TD_PKT = 3, /**< @brief 入队TD包计数*/ + TM_STAT_ENQUE_WRED_PKT = 4, /**< @brief 入队WRED包计数*/ + TM_STAT_ENQUE_DP0_PKT = 5, /**< @brief 入队DP0包计数*/ + TM_STAT_ENQUE_DP1_PKT = 6, /**< @brief 入队DP1包计数*/ + TM_STAT_ENQUE_DP2_PKT = 7, /**< @brief 入队DP2包计数*/ + TM_STAT_ENQUE_DP3_PKT = 8, /**< @brief 入队DP3包计数*/ + TM_STAT_ENQUE_DP4_PKT = 9, /**< @brief 入队DP4包计数*/ + TM_STAT_ENQUE_DP5_PKT = 10, /**< @brief 入队DP5包计数*/ + TM_STAT_ENQUE_DP6_PKT = 11, /**< @brief 入队DP6包计数*/ + TM_STAT_ENQUE_DP7_PKT = 12, /**< @brief 入队DP7包计数*/ + TM_STAT_ENQUE_BLOCK_PKT = 13, /**< @brief 入队BLOCK包计数*/ + TM_STAT_ENQUE_DISABLE_PKT = 14, /**< @brief 入队DISABLE包计数*/ + TM_STAT_DEQUE_PKT = 15, /**< @brief 出队包计数*/ + TM_STAT_DEQUE_VALID_PKT = 16, /**< @brief 出队VALID包计数*/ + TM_STAT_DEQUE_DISCARD_PKT = 17, /**< @brief 出队DISCARD包计数*/ + TM_STAT_DEQUE_CLEAR_PKT = 18, /**< @brief 出队CLEAR包计数*/ + TM_STAT_DEQUE_AGE_PKT = 19, /**< @brief 出队AGE包计数*/ + TM_STAT_TYPE_UNEN = 20, /**< @brief 不使能的时候,需配置成这个*/ + TM_STAT_TYPE_MAX +} TM_STAT_TYPE_E; + +/** car的监管类型 */ +typedef enum stat_car_type_e { + STAT_CAR_A_TYPE = 0, /**< @brief A级CAR*/ + STAT_CAR_B_TYPE, /**< @brief B级CAR*/ + STAT_CAR_C_TYPE, /**< @brief C级CAR*/ + STAT_CAR_MAX_TYPE +} STAT_CAR_TYPE_E; + +/** car监管队列配置*/ +typedef struct stat_car_queue_cfg_t { + ZXIC_UINT32 queue_id; /**< @brief 队列号*/ + ZXIC_UINT32 plcr_en; /**< @brief CAR使能*/ + ZXIC_UINT32 drop_flag; /**< @brief 丢弃标记*/ + ZXIC_VOID * + profile_cfg; /**< @brief carA的包模式时,结构体类型是DPP_STAT_CAR_PKT_PROFILE_CFG_T,其余模式组合时,结构体类型DPP_STAT_CAR_PROFILE_CFG_T*/ +} STAT_CAR_QUEUE_CFG_T; + +/** stat模块计数模式设置*/ +typedef struct stat_count_cfg_t { + ZXIC_UINT32 + rd_mode; /**< @brief 0:计数器在CPU读的下一拍自动清理,1:不自动清零*/ + ZXIC_UINT32 + overflow_mode; /**< @brief 0:计数器达到最大值后,一直保持最大值,1:计数器累积到最高1bit为1,最高1bit始终为1,而其余为继续计数*/ +} STAT_COUNT_CFG_T; + +/** stat 的smmu1属性 */ +typedef struct dpp_stat_smmu1_cfg_t { + ZXIC_UINT32 baddr; /**< @brief 基地址*/ +} DPP_STAT_SMMU1_CFG_T; + +/** stat模块公共配置 */ +typedef struct dpp_stat_comm_cfg_t { + DPP_STAT_SMMU1_CFG_T stat_smmu1_cfg + [DPP_DEV_CHANNEL_MAX]; /**< @brief stat 的smmu1属性*/ + ZXIC_UINT32 is_init[DPP_DEV_CHANNEL_MAX]; /**< @brief 初始化选择*/ +} DPP_STAT_COMM_CFG_T; + +/** stat模块tm统计配置 */ +typedef struct dpp_stat_tm_cfg_t { + ZXIC_UINT32 tm_en; /**< @brief TM统计使能 */ + ZXIC_UINT32 mov_en; /**< @brief 搬移使能*/ + ZXIC_UINT32 eram_en; /**< @brief 片内计数使能*/ + ZXIC_UINT32 ftm_pkt_en; /**< @brief ftm包计数使能*/ + ZXIC_UINT32 etm_pkt_en; /**< @brief etm包计数使能*/ + ZXIC_UINT32 ftm_port_type + [4]; /**< @brief ftm包计数端口类型选择 参考 TM_STAT_TYPE_E */ + ZXIC_UINT32 etm_port_type + [4]; /**< @brief etm包计数端口类型选择 参考 TM_STAT_TYPE_E */ + ZXIC_UINT32 etm_start_queue_id; /**< @brief etm起始队列号*/ + ZXIC_UINT32 etm_queue_depth_mode; /**< @brief etm队列深度*/ + DPP_STAT_SMMU1_CFG_T ftm_smmu1_cfg; /**< @brief ftm统计的smmu1属性*/ + DPP_STAT_SMMU1_CFG_T etm_smmu1_cfg; /**< @brief etm统计的smmu1属性*/ + ZXIC_UINT32 is_init[DPP_DEV_CHANNEL_MAX]; /**< @brief 初始化选择*/ +} DPP_STAT_TM_CFG_T; + +/** TM 统计计数信息 */ +typedef struct dpp_stat_tm_cnt_t { + ZXIC_UINT32 tm_cnt_en; /**< @brief tm计数使能*/ + ZXIC_UINT32 tm_mode; /**< @brief TM统计模式:0-ftm, 1-etm*/ + ZXIC_UINT32 tm_flow_id; /**< @brief TM统计流号*/ + ZXIC_UINT32 tm_stat_type; /**< @brief TM统计端口类型*/ + ZXIC_UINT32 is_tm_byte_en; /**< @brief TM统计字节计数使能*/ + ZXIC_UINT32 is_eram_en; /**< @brief TM统计片内计数使能*/ + ZXIC_UINT64 tm_cnt; /**< @brief TM计数结果*/ +} DPP_STAT_TM_CNT_T; + +/** STAT 中断状态 */ +typedef struct dpp_stat_brief_int_t { + ZXIC_UINT32 etcam_int; /**< @brief etcam模块中断 */ + ZXIC_UINT32 stat_sch_int; /**< @brief stat sch剩余部分的中断状态 */ +} DPP_STAT_BRIEF_INT_T; + +/** STAT fifo中断状态选择 */ +typedef struct dpp_stat_sch_intr_t { + ZXIC_UINT32 hardware_rsv; /**< @brief 系统保留,用户无需关心*/ + ZXIC_UINT32 oam0_ord_fifo_int; /**< @brief oam0保序模块中断*/ + ZXIC_UINT32 oam2_ord_fifo_int; /**< @brief oam2保序模块中断*/ + ZXIC_UINT32 oam3_ord_fifo_int; /**< @brief oam3保序模块中断*/ + ZXIC_UINT32 ddr_sch_fifo_int; /**< @brief ddr调度中断*/ + ZXIC_UINT32 plcr_sch_fifo_int; /**< @brief plcr调度中断*/ + ZXIC_UINT32 stat_schd_fifo_int; /**< @brief stat模块key调度中断*/ + ZXIC_UINT32 stat_rschd_fifo_int; /**< @brief stat模块rsp调度中断*/ +} DPP_STAT_SCH_INTR_T; + +/*stat 计数类型*/ +typedef enum stat_cnt_mode_e { + STAT_64_MODE = 0, /**< @brief 64bit位宽模式*/ + STAT_128_MODE = 1, /**< @brief 128bit位宽模式*/ + STAT_MAX_MODE, +} STAT_CNT_MODE_E; + +/**DPP STAT读清模式选择 */ +typedef enum stat_rd_clr_mode_e { + STAT_RD_CLR_MODE_UNCLR = 0, /**< @brief 不读请*/ + STAT_RD_CLR_MODE_CLR = 1, /**< @brief 读清*/ + STAT_RD_CLR_MODE_MAX, +} STAT_RD_CLR_MODE_E; + +/* car 优先级 */ +typedef enum dpp_car_priority_e { + DPP_CAR_PRI0 = 0, /**< @brief CAR优先级0配置*/ + DPP_CAR_PRI1 = 1, /**< @brief CAR优先级1配置*/ + DPP_CAR_PRI2 = 2, /**< @brief CAR优先级2配置*/ + DPP_CAR_PRI3 = 3, /**< @brief CAR优先级3配置*/ + DPP_CAR_PRI4 = 4, /**< @brief CAR优先级4配置*/ + DPP_CAR_PRI5 = 5, /**< @brief CAR优先级5配置*/ + DPP_CAR_PRI6 = 6, /**< @brief CAR优先级6配置*/ + DPP_CAR_PRI7 = 7, /**< @brief CAR优先级7配置*/ + DPP_CAR_PRI_MAX +} DPP_CAR_PRIORITY_E; + +/** car 监管模板参数设置的参数 */ +typedef struct dpp_stat_car_profile_cfg_t { + ZXIC_UINT32 profile_id; /**< @brief car模板号*/ + ZXIC_UINT32 pkt_sign; /**< @brief 包限速选择标志*/ + ZXIC_UINT32 + cd; /**< @brief CD算法标志/令牌桶算法标志 0:srtcm 1:trtcm 2:MEF10.1*/ + ZXIC_UINT32 cf; /**< @brief CF溢出耦合标志,0:不溢出,1:溢出*/ + ZXIC_UINT32 cm; /**< @brief CM色盲/色敏标志,0:色盲模式,1:色敏模式 */ + ZXIC_UINT32 + cir; /**< @brief C令牌桶添加速率(0~X, X Gbps/64kbps),最小值为64Kbps,步长为64Kbps*/ + ZXIC_UINT32 + cbs; /**< @brief C桶桶深(XM),配置范围为0~XMByte-1,步长为1Byte*/ + ZXIC_UINT32 + eir; /**< @brief E令牌桶添加速率(0~X, XGbps/64kbps),最小值为64Kbps,步长为64Kbps*/ + ZXIC_UINT32 + ebs; /**< @brief E桶桶深(XM),配置范围为0~XMByte-1,步长为1Byte*/ + ZXIC_UINT32 random_disc_e; /**< @brief 仅carB、carC支持 */ + ZXIC_UINT32 random_disc_c; /**< @brief 仅carB、carC支持 */ + ZXIC_UINT32 c_pri[DPP_CAR_PRI_MAX]; /**< @brief 仅pri 1~7是有效值*/ + ZXIC_UINT32 e_green_pri[DPP_CAR_PRI_MAX]; /**< @brief 仅pri 1~7是有效值*/ + ZXIC_UINT32 e_yellow_pri[DPP_CAR_PRI_MAX]; +} DPP_STAT_CAR_PROFILE_CFG_T; + +/* car 独占smmu0的模式 */ +typedef enum dpp_car_smmu0_mono_mode_e { + CAR_SMMU0_MONO_MODE_NONE = 0, /**< @brief CAR不独占smmu0*/ + CAR_SMMU0_MONO_MODE_1 = 1, /**< @brief CAR独占1片smmu0*/ + CAR_SMMU0_MONO_MODE_2 = 2, /**< @brief CAR独占2片smmu0*/ + CAR_SMMU0_MONO_MODE_MAX +} DPP_CAR_SMMU0_MONO_MODE_E; + +/* TM统计的读清模式 */ +typedef enum stat_tm_clr_mode_e { + STAT_TM_CLR_MODE_UNCLR = 0, /**< @brief TM统计不读请*/ + STAT_TM_CLR_MODE_CLR = 1, /**< @brief TM统计读请*/ + STAT_TM_CLR_MODE_MAX, +} STAT_TM_CLR_MODE_E; + +/** car A 队列设置的参数 */ +typedef struct dpp_stat_car_a_queue_cfg_t { + ZXIC_UINT32 flow_id; + ZXIC_UINT32 drop_flag; + ZXIC_UINT32 plcr_en; + ZXIC_UINT32 profile_id; + ZXIC_UINT64 tq; + ZXIC_UINT32 ted; + ZXIC_UINT32 tcd; + ZXIC_UINT32 tei; + ZXIC_UINT32 tci; +} DPP_STAT_CAR_A_QUEUE_CFG_T; + +#endif + +#if ZXIC_REAL("macro function define") + +#endif + +#if ZXIC_REAL("function declaration") +/***********************************************************/ +/** stat公共配置初始化 +* @param dev_id +* @param p_dpp_stat_comm_cfg +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/07/14 +************************************************************/ +DPP_STATUS dpp_stat_comm_init(ZXIC_UINT32 dev_id, + DPP_STAT_COMM_CFG_T *p_dpp_stat_comm_cfg); + +/***********************************************************/ +/** 设置ppu统计 ERAM基地址 +* @param dev_id 设备号 +* @param ppu_eram_baddr ppu统计eRam基地址,128bit为单位 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/03/31 +************************************************************/ +DPP_STATUS dpp_stat_ppu_eram_baddr_set(DPP_DEV_T *dev, + ZXIC_UINT32 ppu_eram_baddr); + +/***********************************************************/ +/** 设置ppu统计片内深度 +* @param dev_id 设备号 +* @param ppu_eram_depth ppu统计片内深度 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/03/31 +************************************************************/ +DPP_STATUS dpp_stat_ppu_eram_depth_set(DPP_DEV_T *dev, + ZXIC_UINT32 ppu_eram_depth); + +/***********************************************************/ +/** 设置ppu统计 DDR基地址 +* @param dev_id 设备号 +* @param ppu_ddr_baddr ppu统计DDR基地址 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/03/31 +************************************************************/ +DPP_STATUS dpp_stat_ppu_ddr_baddr_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 ppu_ddr_baddr); + +/***********************************************************/ +/** TM配置初始化 +* @param dev_id +* @param p_stat_tm_cfg +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/08/03 +************************************************************/ +DPP_STATUS dpp_stat_tm_init(ZXIC_UINT32 dev_id, + DPP_STAT_TM_CFG_T *p_stat_tm_cfg); + +/***********************************************************/ +/** 配置Etm 统计类型 +* @param dev_id 设备号 +* @param etm_port0_type 统计类型0 +* @param etm_port1_type 统计类型1 +* @param etm_port2_type 统计类型2 +* @param etm_port3_type 统计类型3 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/03/31 +************************************************************/ +DPP_STATUS dpp_stat_etm_port_type_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 etm_port0_type, + ZXIC_UINT32 etm_port1_type, + ZXIC_UINT32 etm_port2_type, + ZXIC_UINT32 etm_port3_type); + +/***********************************************************/ +/** 配置Ftm 统计类型 +* @param dev_id 设备号 +* @param ftm_port0_type 统计类型0 +* @param ftm_port1_type 统计类型1 +* @param ftm_port2_type 统计类型2 +* @param ftm_port3_type 统计类型3 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/03/31 +************************************************************/ +DPP_STATUS dpp_stat_ftm_port_type_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 ftm_port0_type, + ZXIC_UINT32 ftm_port1_type, + ZXIC_UINT32 ftm_port2_type, + ZXIC_UINT32 ftm_port3_type); + +/***********************************************************/ +/** car硬件初始化 +* @param dev_id 设备号 +* @param car_type car编号 +* @param car_type car模式,参见STAT_CAR_TYPE_E +* @param car_mono_mode car独占mono模式 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/27 +************************************************************/ +DPP_STATUS dpp_stat_car_hardware_init(ZXIC_UINT32 dev_id, ZXIC_UINT32 car_type, + ZXIC_UINT32 car_mono_mode); + +/***********************************************************/ +/** 配置car的层级模式 +* @param dev_id +* @param mode 2 - 三级car, 第一级支持16K +* 1 - 两级car, 第一级扩展为17K +* 0 - 一级car, 第一级扩展为21K +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/09/28 +************************************************************/ +DPP_STATUS dpp_stat_car_en_mode_set(DPP_DEV_T *dev, ZXIC_UINT32 mode); + +/***********************************************************/ +/** car A 字节限速监管模板设定 +* @param dev_id 设备 car号 +* @param profile_id 监管模板号 +* @param p_cara_profile_cfg 监管模板配置 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/05 +************************************************************/ +DPP_STATUS +dpp_stat_cara_profile_cfg_set(DPP_DEV_T *dev, ZXIC_UINT32 profile_id, + DPP_STAT_CAR_PROFILE_CFG_T *p_cara_profile_cfg); + +/***********************************************************/ +/** 获取car A的流设置 +* @param dev_id 设备 car编号 +* @param flow_id 流号 +* @param p_cara_queue_cfg car A流配置信息 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/05 +************************************************************/ +DPP_STATUS +dpp_stat_cara_queue_cfg_get(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + DPP_STAT_CAR_A_QUEUE_CFG_T *p_cara_queue_cfg); + +/***********************************************************/ +/** car A的流设置 +* @param dev_id 设备 car编号 +* @param flow_id 流号 +* @param drop_flag 丢弃标志 +* @param plcr_en 监管使能 +* @param profile_id 监管模板号 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/05 +************************************************************/ +DPP_STATUS dpp_stat_cara_queue_cfg_set(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 drop_flag, + ZXIC_UINT32 plcr_en, + ZXIC_UINT32 profile_id); + +/***********************************************************/ +/** stat模块常用函数列表 +* @param dev_id +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 刘硕10181552 @date 2016/02/03 +************************************************************/ +DPP_STATUS dpp_stat_help(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** stat 模块上电初始化 +* @param dev_id +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2017/07/20 +************************************************************/ +DPP_STATUS dpp_stat_module_init(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** ppu stat读统计结果配置 +* @param dev_id 设备号 +* @param rd_mode 读取位宽模式,参见STAT_CNT_MODE_E,0-64bit,1-128bit +* @param index 索引,具体位宽参见rd_mode +* @param p_data 出参,读取的数据 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author xhj @date 2018/02/01 +************************************************************/ +DPP_STATUS dpp_stat_ppu_cnt_set(ZXIC_UINT32 dev_id, STAT_CNT_MODE_E rd_mode, + ZXIC_UINT32 index, ZXIC_UINT32 *p_data); + +/***********************************************************/ +/** ppu计数值获取 +* @param dev_id 设备号 +* @param rd_mode 读取位宽模式,参见STAT_CNT_MODE_E,0-64bit,1-128bit +* @param index 索引,具体位宽参见rd_mode +* @param clr_mode 读清模式,参见STAT_RD_CLR_MODE_E,0-不读清,1-读清 +* @param p_data 出参,读取的数据 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/07/11 +************************************************************/ +DPP_STATUS dpp_stat_ppu_cnt_get(DPP_DEV_T *dev, STAT_CNT_MODE_E rd_mode, + ZXIC_UINT32 index, ZXIC_UINT32 clr_mode, + ZXIC_UINT32 *p_data); + +/***********************************************************/ +/** TM计数值获取 +* @param dev_id 设备号 +* @param tm_mode tm模式,0-ftm,1-etm 参见STAT_TM_MODE_E +* @param only_pkt_num_en 全包模式使能 +* @param port_mode 端口位置模式,参见STAT_TM_PORT_MODE_E +* @param cnt_mode 计数模式,0-片内计数,1-混合计数 +* @param clr_mode 读清模式,0-非读清,1-读清 +* @param index 索引值 +* @param p_data 数据 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/07/19 +************************************************************/ +DPP_STATUS dpp_stat_tm_cnt_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 tm_mode, + ZXIC_UINT32 only_pkt_num_en, + ZXIC_UINT32 port_mode, ZXIC_UINT32 cnt_mode, + ZXIC_UINT32 clr_mode, ZXIC_UINT32 index, + ZXIC_UINT32 *p_data); + +/***********************************************************/ +/** CMMU配置信息、ppu stat 统计计数配置打印 +* @param dev_id +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 刘硕10181552 @date 2016/01/20 +************************************************************/ +DPP_STATUS diag_dpp_stat_ppu_cfg_prt(ZXIC_UINT32 dev_id); + +#endif +#endif /*dpp_stat_api.h*/ diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_tm_api.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_tm_api.h new file mode 100644 index 000000000000..0fcb8533f200 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_tm_api.h @@ -0,0 +1,1134 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_tm_api.h +* 文件标识 : tm模块对外数据类型定义和接口函数声明 +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : djf +* 完成日期 : 2015/02/04 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ +#ifndef _DPP_TM_API_H_ +#define _DPP_TM_API_H_ + +#if ZXIC_REAL("header file") +#include "dpp_module.h" +#endif + +#if ZXIC_REAL("macro") +#define DPP_TM_SA_NUM (128) +#define DPP_TM_PP_NUM (64) /**< @brief TM内部逻辑端口数 */ +#define DPP_ETM_Q_NUM (9216) /**< @ETM qmu支持9K物理队列 */ +#define DPP_FTM_Q_NUM (2048) /**< @FTM qmu支持2k队列 */ +#define DPP_ETM_CRDT_NUM (0x47FF) /*etm 支持的crdt流队列逻辑编号cir+eir=18K */ +#define DPP_FTM_CRDT_NUM (0xFFF) /*ftm 支持的crdt流队列逻辑编号cir+eir=4K */ +#define DPP_ETM_MID_SE_NUM (6144) /**< @ETM中间级共享0x17FF调度单元 */ +#define DPP_FTM_MID_SE_NUM (512) /**< @FTM 中间级共享0x1FF调度单元 */ +#define DPP_ETM_MID_WFQFQ_NUM (8 * 6144) /**< @brief 中间级共享8 * 6144调度器 */ +#define DPP_FTM_MID_WFQFQ_NUM (8 * 512) /**< @brief 中间级共享8 * 512调度器 */ + +#define DPP_ETM_WFQSP_OFFSET (0x4000) /*etm sp-wfq调度器相对于fq调度器编号偏移*/ +#define DPP_ETM_FQ_NUM (16 * 1024) /*etm fq调度器个数*/ +#define DPP_ETM_WFQSP_NUM (9 * 1024) /*etm sp8和wfq调度器个数*/ +#define DPP_ETM_FQSPWFQ_NUM (25 * 1024) /*etm fq-sp-wfq调度器总个数*/ +#define DPP_FTM_WFQSP_NUM (1920 + 64) /*ftm sp8和wfq调度器个数+64个端口*/ +#define DPP_ETM_SCH_DEL_NUM (0xABFF) /*etm-crdt要删除的流和调度器编号*/ +#define DPP_FTM_SCH_DEL_NUM (0x177F) /*ftm-crdt要删除的流和调度器编号*/ + +#define DPP_TM_INVALID_PORT \ + (0xFFFF) /* 定义crdt无效端口号,用于判定crdt挂接状态 */ +#define DPP_FTM_DELETED_LINK_ID (0x7FF) /**定义ftm已被删除的link_id**/ +#define DPP_ETM_DELETED_LINK_ID (0x7FFF) /**定义etm已被删除的link_id**/ + +/* ftm/etm调度器往端口级挂接link_id偏移 */ +#define DPP_FTM_PORT_LINKID_BASE (0x780) +#define DPP_ETM_PORT_LINKID_BASE (0x7F80) +/**SHAP模块se_id编号基址(非从0开始)**/ +#define DPP_FTM_SHAP_SEID_BASE (0x1000) +#define DPP_ETM_SHAP_SEID_BASE (0x4800) + +/**< @brief TD */ +#define DPP_TM_Q_TD_TH_MAX (8192) /**< @brief Kbyte */ +#define DPP_TM_PP_TD_TH_MAX (8192) /**< @brief Kbyte */ +#define DPP_TM_SYS_TD_TH_MAX (8192) /**< @brief Kbyte */ + +#define DPP_TM_Q_AVG_Q_LEN_MAX (8192) /**< @brief Kbyte */ +#define DPP_TM_PP_AVG_Q_LEN_MAX (8192) /**< @brief Kbyte */ +#define DPP_TM_SYS_AVG_Q_LEN_MAX (8192) /**< @brief Kbyte */ + +/**< @brief WRED */ +#define DPP_TM_Q_WRED_NUM (32) /**< @brief 队列级WRED组数 */ +#define DPP_TM_Q_WRED_TH_MAX (8192) /**< @brief Kbyte */ +#define DPP_TM_Q_WRED_MAX_TH_MAX (8192) /**< @brief Kbyte */ +#define DPP_TM_Q_WRED_MIN_TH_MAX (8192) /**< @brief Kbyte */ +#define DPP_TM_Q_WRED_MAX_CFG_PARA (0xffffffff) + +#define DPP_TM_PP_WRED_NUM (8) /**< @brief 端口级WRED组数 */ +#define DPP_TM_PP_WRED_TH_MAX (8192) /**< @brief Kbyte */ +#define DPP_TM_PP_WRED_MAX_TH_MAX (8192) /**< @brief Kbyte */ +#define DPP_TM_PP_WRED_MIN_TH_MAX (8192) /**< @brief Kbyte */ +#define DPP_TM_PP_WRED_MAX_CFG_PARA (0xffffffff) + +/**< @brief GRED */ +#define DPP_TM_SYS_GRED_TH_MAX (8192) /**< @brief Kbyte */ +#define DPP_TM_SYS_GRED_MAX_TH_MAX (8192) /**< @brief Kbyte */ +#define DPP_TM_SYS_GRED_MIN_TH_MAX (8192) /**< @brief Kbyte */ +#define DPP_TM_SYS_GRED_MID_TH_MAX (8192) /**< @brief Kbyte */ + +#define DPP_TM_DP_NUM (8) /**< @brief DP曲线数 */ +#define DPP_TM_RED_P_MIN (1) +#define DPP_TM_RED_P_MAX (100) +#define DPP_TM_CGAVD_WEIGHT_MAX (15) +#define DPP_TM_CGAVD_MOVE_PROFILE_NUM (16) + +#define DPP_TM_SCH_WEIGHT_INVALID (0) /**< @brief WFQ权重,FQ时weight是无效的 */ +#define DPP_TM_SCH_WEIGHT_MIN (1) +#define DPP_TM_SCH_WEIGHT_MAX (511) +#define DPP_TM_SCH_SP_NUM (8) + +#define DPP_ETM_MID_SHAPE_PROFILE_NUM (512) /**< ETM中间级整形策略数 */ +#define DPP_FTM_MID_SHAPE_PROFILE_NUM (64) /**< FTM中间级整形策略数 */ +#define DPP_ETM_FLOW_SHAPE_PROFILE_NUM (512) /**< ETM 流级整形策略数 */ +#define DPP_FTM_FLOW_SHAPE_PROFILE_NUM (128) /**< FTM 流级整形策略数 */ + +#define DPP_TM_SHAPE_CIR_MIN (0) /**< @brief kbps */ +#define DPP_TM_SHAPE_CIR_MAX \ + (800 * 1000 * 1000) /**< @brief kbps:用户可配最大整形 */ +#define DPP_TM_SHAPE_CBS_MIN (0) /**< @brief kbyte */ +#define DPP_TM_SHAPE_CBS_MAX \ + (128 * 1024) /**< @brief kbyte:用户可配最大桶深 128M */ + +/**< CBS写入寄存器最大值为0x7FF */ +#define DPP_TM_SHAPE_CBS_REG_MIN (0) /**< @brief kbyte */ +#define DPP_TM_SHAPE_CBS_REG_MAX (0x7FF) /**< @brief kbyte */ + +#define DPP_TM_SYS_HZ (1000 * 1000 * 1000) /* 系统主频1000MHz */ + +#define DPP_TM_TC_NUM (8) + +#endif + +#if ZXIC_REAL("data struct define") +/**< @brief 内置TM工作模式 */ +typedef enum dpp_tm_work_mode_e { + DPP_TM_WORK_MODE_TM = 0, + DPP_TM_WORK_MODE_SA, + DPP_TM_WORK_MODE_INVALID +} DPP_TM_WORK_MODE_E; + +/**< @brief QMU工作模式 */ +typedef enum dpp_tm_qmu_work_mode_e { + DPP_TM_QMU_WORK_MODE_2M = 0, /**< @brief 2M节点工作模式 */ + DPP_TM_QMU_WORK_MODE_4M, /**< @brief 4M节点工作模式 */ + DPP_TM_QMU_WORK_MODE_INVALID +} DPP_TM_QMU_WORK_MODE_E; + +/**< @brief QMU DDR随机模式 */ +typedef enum dpp_tm_qmu_ddr_random_mode_e { + DPP_TM_QMU_DDR_NOT_RANDOM = 0, /**< @brief DDR不随机模式 */ + DPP_TM_QMU_DDR_RANDOM, /**< @brief DDR随机模式 */ + DPP_TM_QMU_DDR_RANDOM_MODE_INVALID +} DPP_TM_QMU_DDR_RANDOM_MODE_E; + +/**< @brief 计数模式寄存器 */ +typedef struct dpp_tm_cnt_mode_t { + ZXIC_UINT32 fc_count_mode; /**< @brief 计数流控模式 */ + ZXIC_UINT32 count_rd_mode; /**< @brief 计数读模式 */ + ZXIC_UINT32 count_overflow_mode; /**< @brief 计数溢出模式 */ +} DPP_TM_CNT_MODE_T; + +/**< @brief 中断信息 */ +typedef struct dpp_tm_int_t { + ZXIC_UINT32 shap_int; + ZXIC_UINT32 crdt_int; + ZXIC_UINT32 mmu_int; + ZXIC_UINT32 qmu_int; + ZXIC_UINT32 cgavd_int; + ZXIC_UINT32 olif_int; + ZXIC_UINT32 cfgmt_int; +} DPP_TM_INT_T; + +/**< @brief 拥塞避免层次 */ +typedef enum dpp_tm_cgavd_level_e { + QUEUE_LEVEL = 0, /**< @brief 流队列级 */ + PP_LEVEL, /**< @brief 端口级 */ + SYS_LEVEL, /**< @brief 系统级 */ + SA_LEVEL, /**< @brief SA队列不可达拥塞避免 */ + INVALID_LEVEL +} DPP_TM_CGAVD_LEVEL_E; + +/**< @brief dp选取值 */ +typedef enum dpp_tm_cgavd_dp_sel_e { + DP_SEL_DP = 0, /**< @brief 选tm头中dp字段作为cgavd的dp */ + DP_SEL_TC, /**< @brief 选tm头中tc字段作为cgavd的dp */ + DP_SEL_PKT_LEN, /**< @brief 选tm头中pkt[2:0]字段作为cgavd的dp */ + INVALID_DP +} DPP_TM_CGAVD_DP_SEL_E; + +/**< @brief 拥塞避免模式 */ +typedef enum dpp_tm_cgavd_method_e { + TD_METHOD = 0, /**< @brief TD模式 */ + WRED_GRED_METHOD, /**< @brief 流级和端口级为WRED模式,系统级为GRED模式 */ + INVALID_METHOD +} DPP_TM_CGAVD_METHOD_E; + +/**< @brief WRED DP曲线配置参数 */ +typedef struct dpp_tm_wred_dp_line_para_t { + ZXIC_UINT32 max_th; /**< @brief 平均队列深度上限阈值 */ + ZXIC_UINT32 min_th; /**< @brief 平均队列深度下限阈值 */ + ZXIC_UINT32 max_p; /**< @brief 最大丢弃概率 */ + ZXIC_UINT32 weight; /**< @brief 平均队列深度计算权重 */ + ZXIC_UINT32 q_len_th; /**< @brief 队列深度阈值 */ +} DPP_TM_WRED_DP_LINE_PARA_T; + +/**< @brief GRED DP曲线配置参数 */ +typedef struct dpp_tm_gred_dp_line_para_t { + ZXIC_UINT32 max_th; /**< @brief 第2段平均队列深度上限阈值 */ + ZXIC_UINT32 mid_th; /**< @brief 第1段平均队列深度上限阈值 */ + ZXIC_UINT32 min_th; /**< @brief 第1段平均队列深度下限阈值 */ + ZXIC_UINT32 max_p; /**< @brief 最大丢弃概率 */ + ZXIC_UINT32 weight; /**< @brief 平均队列深度计算权重 */ + ZXIC_UINT32 q_len_th; /**< @brief 队列深度阈值 */ +} DPP_TM_GRED_DP_LINE_PARA_T; + +/**< @brief CRDT调度层次 */ +typedef enum dpp_tm_sch_level_e { + DPP_TM_SCH_LEVEL_Q = 1, + DPP_TM_SCH_LEVEL_VC = 2, + DPP_TM_SCH_LEVEL_VCG = 3, + DPP_TM_SCH_LEVEL_VP = 4, + DPP_TM_SCH_LEVEL_PP = 5, + DPP_TM_SCH_LEVEL_INVALID +} DPP_TM_SCH_LEVEL_E; + +/**< @brief SP_ID */ +typedef enum dpp_tm_sch_sp_e { + DPP_TM_SCH_SP_0 = 0, + DPP_TM_SCH_SP_1 = 1, + DPP_TM_SCH_SP_2 = 2, + DPP_TM_SCH_SP_3 = 3, + DPP_TM_SCH_SP_4 = 4, + DPP_TM_SCH_SP_5 = 5, + DPP_TM_SCH_SP_6 = 6, + DPP_TM_SCH_SP_7 = 7, + DPP_TM_SCH_SP_8 = 8, + DPP_TM_SCH_SP_INVALID +} DPP_TM_SCH_SP_E; + +/**< @brief 调度单元挂接参数 */ +typedef struct dpp_tm_sch_para_t { + ZXIC_UINT32 + level_id; /**< @brief 当前为中间级时,vc:0 vcg:1 vp:2 para_get已加2供调用*/ + DPP_TM_SCH_LEVEL_E se_last_level; /**< @brief 挂接到的上级层次 */ + ZXIC_UINT32 se_id; /**< @brief 挂接到的上级调度单元ID */ + DPP_TM_SCH_SP_E c_sp_id; /**< @brief C桶挂接到的SPID,SP0~SP7 */ + DPP_TM_SCH_SP_E e_sp_id; /**< @brief E桶挂接到的SPID,SP0~SP7 */ + ZXIC_UINT32 + sp_relay; /**< @brief 队列优先级传递标志,FLOW级时表示FLOW_WORK_MODE */ + ZXIC_UINT32 + c_sp_weight; /**< @brief C桶挂接到的调度器是WFQ的话,WFQ的权重 */ + ZXIC_UINT32 + e_sp_weight; /**< @brief E桶挂接到的调度器是WFQ的话,WFQ的权重 */ +} DPP_TM_SCH_PARA_T; + +/**< @brief 调度器参数 */ +typedef struct dpp_tm_wfqfq_t { + ZXIC_UINT32 wfqfq_id + [8]; /**< @brief 本级调度单元下挂接的8个调度器ID,FQ/WFQ,由ID号区分 */ +} DPP_TM_WFQFQ_T; + +/**< @brief 流队列挂接参数 */ +typedef struct dpp_tm_sch_flow_para_t { + ZXIC_UINT32 c_linkid; /**< @brief c桶要挂接到的上级调度器id */ + ZXIC_UINT32 c_weight; /**< @brief c桶挂接到上级调度器的权重[1~511] */ + ZXIC_UINT32 + c_sp; /**< @brief c桶挂接到上级调度器的sp优先级,有效值[0-8],共9级,优先级依次降低 */ + ZXIC_UINT32 + mode; /**< @brief 挂接模式:0-单桶 1-双桶。配置单桶时无需关注后续参数,配0即可 */ + ZXIC_UINT32 e_linkid; /**< @brief e桶要挂接到的上级调度器id */ + ZXIC_UINT32 e_weight; /**< @brief e桶挂接到上级调度器的权重[1~511] */ + ZXIC_UINT32 + e_sp; /**< @brief e桶挂接到上级调度器的sp优先级,有效值[0-8],共9级,优先级依次降低 */ +} DPP_TM_SCH_FLOW_PARA_T; + +/**< @brief 调度单元挂接参数:非优先级传递 */ +typedef struct dpp_tm_sch_se_para_t { + ZXIC_UINT32 se_linkid; /**< @brief 要挂接到的上级调度器id */ + ZXIC_UINT32 + cp_token_en; /**< @brief 调度器cp双桶使能开关,仅fq8/wfq8支持 */ + ZXIC_UINT32 se_weight; /**< @brief 挂接到上级调度器的权重[1~511] */ + ZXIC_UINT32 + se_sp; /**< @brief 挂接到上级调度器的sp优先级,有效值[0-8],共9级,优先级依次降低 */ +} DPP_TM_SCH_SE_PARA_T; + +/**< @brief 调度单元挂接参数:优先级传递开启 */ +typedef struct dpp_tm_sch_se_para_insw_t { + ZXIC_UINT32 se_linkid; /**< @brief 要挂接到的上级调度器id */ + ZXIC_UINT32 + cp_token_en; /**< @brief 调度器cp双桶使能开关,仅fq8/wfq8支持 */ + ZXIC_UINT32 + se_sp; /**< @brief 挂接到上级调度器的sp优先级,有效值[0-8],共9级,优先级依次降低 */ + ZXIC_UINT32 se_weight + [8]; /**< @brief WFQ8中各调度器权重值[1~511],若是WFQ2/4 只取前面对应值,后面无效 */ +} DPP_TM_SCH_SE_PARA_INSW_T; + +typedef enum dpp_tm_sch_port_linkid_t { + DPP_TM_PP_LINKID_PORT0 = 0x7F80, + DPP_TM_PP_LINKID_PORT1 = 0x7F81, + DPP_TM_PP_LINKID_PORT2 = 0x7F82, + DPP_TM_PP_LINKID_PORT3 = 0x7F83, + DPP_TM_PP_LINKID_PORT4 = 0x7F84, + DPP_TM_PP_LINKID_PORT5 = 0x7F85, + DPP_TM_PP_LINKID_PORT6 = 0x7F86, + DPP_TM_PP_LINKID_PORT7 = 0x7F87, + DPP_TM_PP_LINKID_PORT8 = 0x7F88, + DPP_TM_PP_LINKID_PORT9 = 0x7F89, + DPP_TM_PP_LINKID_PORT10 = 0x7F8A, + DPP_TM_PP_LINKID_PORT11 = 0x7F8B, + DPP_TM_PP_LINKID_PORT12 = 0x7F8C, + DPP_TM_PP_LINKID_PORT13 = 0x7F8D, + DPP_TM_PP_LINKID_PORT14 = 0x7F8E, + DPP_TM_PP_LINKID_PORT15 = 0x7F8F, + DPP_TM_PP_LINKID_PORT16 = 0x7F90, + DPP_TM_PP_LINKID_PORT17 = 0x7F91, + DPP_TM_PP_LINKID_PORT18 = 0x7F92, + DPP_TM_PP_LINKID_PORT19 = 0x7F93, + DPP_TM_PP_LINKID_PORT20 = 0x7F94, + DPP_TM_PP_LINKID_PORT21 = 0x7F95, + DPP_TM_PP_LINKID_PORT22 = 0x7F96, + DPP_TM_PP_LINKID_PORT23 = 0x7F97, + DPP_TM_PP_LINKID_PORT24 = 0x7F98, + DPP_TM_PP_LINKID_PORT25 = 0x7F99, + DPP_TM_PP_LINKID_PORT26 = 0x7F9A, + DPP_TM_PP_LINKID_PORT27 = 0x7F9B, + DPP_TM_PP_LINKID_PORT28 = 0x7F9C, + DPP_TM_PP_LINKID_PORT29 = 0x7F9D, + DPP_TM_PP_LINKID_PORT30 = 0x7F9E, + DPP_TM_PP_LINKID_PORT31 = 0x7F9F, + DPP_TM_PP_LINKID_PORT32 = 0x7FA0, + DPP_TM_PP_LINKID_PORT33 = 0x7FA1, + DPP_TM_PP_LINKID_PORT34 = 0x7FA2, + DPP_TM_PP_LINKID_PORT35 = 0x7FA3, + DPP_TM_PP_LINKID_PORT36 = 0x7FA4, + DPP_TM_PP_LINKID_PORT37 = 0x7FA5, + DPP_TM_PP_LINKID_PORT38 = 0x7FA6, + DPP_TM_PP_LINKID_PORT39 = 0x7FA7, + DPP_TM_PP_LINKID_PORT40 = 0x7FA8, + DPP_TM_PP_LINKID_PORT41 = 0x7FA9, + DPP_TM_PP_LINKID_PORT42 = 0x7FAA, + DPP_TM_PP_LINKID_PORT43 = 0x7FAB, + DPP_TM_PP_LINKID_PORT44 = 0x7FAC, + DPP_TM_PP_LINKID_PORT45 = 0x7FAD, + DPP_TM_PP_LINKID_PORT46 = 0x7FAE, + DPP_TM_PP_LINKID_PORT47 = 0x7FAF, + DPP_TM_PP_LINKID_PORT48 = 0x7FB0, + DPP_TM_PP_LINKID_PORT49 = 0x7FB1, + DPP_TM_PP_LINKID_PORT50 = 0x7FB2, + DPP_TM_PP_LINKID_PORT51 = 0x7FB3, + DPP_TM_PP_LINKID_PORT52 = 0x7FB4, + DPP_TM_PP_LINKID_PORT53 = 0x7FB5, + DPP_TM_PP_LINKID_PORT54 = 0x7FB6, + DPP_TM_PP_LINKID_PORT55 = 0x7FB7, + DPP_TM_PP_LINKID_PORT56 = 0x7FB8, + DPP_TM_PP_LINKID_PORT57 = 0x7FB9, + DPP_TM_PP_LINKID_PORT58 = 0x7FBA, + DPP_TM_PP_LINKID_PORT59 = 0x7FBB, + DPP_TM_PP_LINKID_PORT60 = 0x7FBC, + DPP_TM_PP_LINKID_PORT61 = 0x7FBD, + DPP_TM_PP_LINKID_PORT62 = 0x7FBE, + DPP_TM_PP_LINKID_PORT63 = 0x7FBF, + DPP_TM_PP_LINKID_INVALID +} DPP_TM_SCH_PORT_LINKID_T; +typedef enum dpp_tm_shape_flag_e { + DPP_TM_SHAPE_FLAG_CIR = 0, + DPP_TM_SHAPE_FLAG_EIR = 1, + DPP_TM_SHAPE_FLAG_INVALID +} DPP_TM_SHAPE_FLAG_E; + +/**< @brief 整形令牌桶模式 */ +typedef enum dpp_tm_shape_mode_e { + DPP_TM_SINGLE_MIX_BUCKET = 0, /**< @brief 单桶/组合流 */ + DPP_TM_DUAL_BUCKET = 1, /**< @brief 双桶 */ + DPP_TM_DUAL_PIPE = 2, /**< @brief 被双桶 */ + DPP_TM_SHAPE_MODE_INVALID +} DPP_TM_SHAPE_MODE_E; + +/**< @brief 整形profile参数 */ +typedef struct dpp_tm_shape_profile_t { + ZXIC_UINT32 cir; + ZXIC_UINT32 cbs; + ZXIC_UINT32 eir; + ZXIC_UINT32 ebs; +} DPP_TM_SHAPE_PROFILE_T; + +/**< @brief 端口级整形参数 */ +typedef struct dpp_tm_shape_pp_para_t { + ZXIC_UINT32 cir; + ZXIC_UINT32 cbs; + ZXIC_UINT32 c_en; +} DPP_TM_SHAPE_PP_PARA_T; + +/**< @brief 调度单元整形参数 */ +typedef struct dpp_tm_shape_para_t { + ZXIC_UINT32 class_id; /**< @brief 当前中间级所属层次,vc:1 vcg:2 vp:3 */ + ZXIC_UINT32 profile_id; + ZXIC_UINT32 c_en; + ZXIC_UINT32 e_en; + DPP_TM_SHAPE_MODE_E mode; +} DPP_TM_SHAPE_PARA_T; + +/**< @brief 整形参数 */ +typedef struct dpp_tm_shape_t { + ZXIC_UINT32 mid_level; /**< @brief 当前中间级所属层次,vc:1 vcg:2 vp:3 */ + ZXIC_UINT32 cir; + ZXIC_UINT32 cbs; + ZXIC_UINT32 eir; + ZXIC_UINT32 ebs; + ZXIC_UINT32 c_en; + ZXIC_UINT32 e_en; + ZXIC_UINT32 mode; +} DPP_TM_SHAPE_T; + +typedef struct dpp_tm_shape_para { + ZXIC_UINT32 shape_cir; + ZXIC_UINT32 shape_cbs; + ZXIC_UINT32 shape_num; +} DPP_TM_SHAPE_PARA_TABLE; + +/**< @brief TM初始化参数 */ +typedef struct dpp_tm_init_para_t { + DPP_TM_WORK_MODE_E tm_sa_mode; /**< @brief TM或SA模式 */ + DPP_TM_QMU_WORK_MODE_E qmu_mode; /**< @brief QMU 2M或4M节点模式 */ + ZXIC_UINT32 + case_num; /**< @brief 四组QMU初始化场景编号为1-4;ddr*bank:1:4x2;2:4x4;3:8x2;4:4x8.*/ + DPP_TM_QMU_DDR_RANDOM_MODE_E ddr_random_mode; /**< @brief ddr随机模式 */ + ZXIC_UINT32 block_size; /**< @brief block模式,128/256/512/1024 */ + ZXIC_UINT32 local_sa_id; /**< @brief SA模式时,本地sa_id*/ +} DPP_TM_INIT_PARA_T; + +/* TM ASIC初始化信息配置 */ +typedef struct dpp_tm_asic_init_info_t { + ZXIC_UINT32 + blk_size; /**< @brief qmu配置的block大小 256B/512B[default]/1024B */ + ZXIC_UINT32 + case_num; /**< @brief 四组QMU初始化场景编号为1-4;ddr*bank:1:4x2;2:4x4;3:8x2;4:4x8. */ + ZXIC_UINT32 imem_omem; /**< @brief 0:片内外混合; 1:纯片内;2:纯片外 */ + ZXIC_UINT32 mode; /**< @brief TM工作模式 0:TM模式; 1:SA模式 */ +} DPP_TM_ASIC_INIT_INFO_T; + +#endif + +#if ZXIC_REAL("function declaration") + +/***********************************************************/ +/** 读取block长度模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_size block长度模式,256/512/1024 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/04/09 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_blk_size_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_size); + +/***********************************************************/ +/** 配置内置TM的工作模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param mode 配置的值,0-TM模式,1-SA模式 +*ETM仅工作在TM模式,FTM可以工作TM或SA模式 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/25 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_sa_work_mode_set(DPP_DEV_T *dev, + DPP_TM_WORK_MODE_E mode); + +/***********************************************************/ +/** 配置各级搬移功能使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 要配置的拥塞避免层次号,0:队列级,1:端口级,2:系统级 +* @param en 使能标记,0-不使能,1-使能 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2016/10/18 +************************************************************/ +#ifdef ETM_REAL +DPP_STATUS dpp_tm_cgavd_move_en_set(ZXIC_UINT32 dev_id, + DPP_TM_CGAVD_LEVEL_E level, ZXIC_UINT32 en); + +/***********************************************************/ +/** 配置各级搬移门限 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 拥塞避免支持层次号,0:队列级,1:端口级,2:系统级 +* @param id 队列号或端口号,系统级时,id参数无效 +* @param value 端口级和系统级时,为搬移门限值,单位为NPPU存包的单位,256B; + 流级时为搬移profile_id,0~15 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2016/10/18 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_move_th_set(ZXIC_UINT32 dev_id, + DPP_TM_CGAVD_LEVEL_E level, ZXIC_UINT32 id, + ZXIC_UINT32 value); + +/***********************************************************/ +/** 配置flow级的搬移策略 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param move_profile flow级的搬移门限分组索引,0~15 +* @param th flow级的搬移门限,单位为KB; +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2016/10/18 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_flow_move_profile_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 move_profile, + ZXIC_UINT32 th); +#endif + +/***********************************************************/ +/** 配置端口共享的搬移门限 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param th 端口共享的搬移门限,单位为NPPU存包的单位,256B; +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2016/10/18 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_port_share_th_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 th); + +/***********************************************************/ +/** 配置各级拥塞避免功能使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 要配置的拥塞避免层次号,0:队列级,1:端口级,2:系统级 +* @param en 使能标记,0-不使能,1-使能 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/28 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_en_set(DPP_DEV_T *dev, DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 en); + +/***********************************************************/ +/** 配置拥塞避免算法 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 拥塞避免支持层次号,0:队列级,1:端口级,2:系统级 +* @param id 队列号或端口号,系统级时,id参数无效 +* @param method 配置的拥塞避免算法,0:TD,1:WRED/GRED +* 配置TD算法时,先配TD阈值,再配置TD算法 +* 配置WRED算法时,先配置流级或端口级的平均队列深度,再配置WRED算法 +* 配置GRED算法时,先配置系统级的平均队列深度,在配置成GRED算法 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark +* @see +* @author taq @date 2015/04/14 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_method_set(DPP_DEV_T *dev, DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 id, + DPP_TM_CGAVD_METHOD_E method); + +/***********************************************************/ +/** 配置TD拥塞避免模式下的丢弃门限值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 拥塞避免支持层次号,0:队列级,1:端口级,2:系统级 +* @param id 队列号或端口号,系统级时,id参数无效 +* @param td_th 配置的丢弃门限值,用户配置门限值单位为Kbyte,需要转化为Block单位写入寄存器 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/17 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_td_th_set(DPP_DEV_T *dev, DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 id, ZXIC_UINT32 td_th); + +/***********************************************************/ +/** 配置指定端口或队列绑定的WRED GROUP ID +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level WRED支持层次号,0:队列级,1:端口级 +* @param id 队列号或端口号 +* @param wred_id 配置的WRED GROUP ID +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/17 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_wred_id_set(ZXIC_UINT32 dev_id, + DPP_TM_CGAVD_LEVEL_E level, ZXIC_UINT32 id, + ZXIC_UINT32 wred_id); + +/***********************************************************/ +/** 配置TM模式下流队列挂接的端口号;SA模式下流队列映射的目的芯片ID +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param q_id 队列号 +* @param pp_id 配置的端口号 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/18 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_q_map_pp_set(DPP_DEV_T *dev, ZXIC_UINT32 q_id, + ZXIC_UINT32 pp_id); + +/***********************************************************/ +/** 配置TM模式tc到flow的映射 +* @param dev_id 设备编号 +* @param tc_id itmd tc优先级(0~7) +* @param flow_id 映射的flowid号 (0~4095) +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author sun @date 2023/07/04 +************************************************************/ +DPP_STATUS dpp_tm_tc_map_flow_set(DPP_DEV_T *dev, ZXIC_UINT32 tc_id, + ZXIC_UINT32 flow_id); + +/***********************************************************/ +/** 配置指定端口或队列是否支持动态门限机制 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level WRED支持层次号,0:队列级,1:端口级 +* @param id 队列号或端口号 +* @param en 配置的值,0-不支持动态门限机制,1-支持动态门限机制 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/17 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_dyn_th_en_set(DPP_DEV_T *dev, + DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 id, ZXIC_UINT32 en); + +/***********************************************************/ +/** 配置强制片内或片外 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param en 1:使能 +* @param mode 1 :omem 强制片外 0:imem 强制片内 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2017/10/14 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_imem_omem_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 en, + ZXIC_UINT32 mode); + +/***********************************************************/ +/** 读取QMU队列授权价值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_credit_value 授权价值,默认值是400Byte +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/04/14 +************************************************************/ +DPP_STATUS dpp_tm_qmu_credit_value_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_credit_value); + +/****************************************************************************** +*包老化配置 +* @param: dev_id: 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* aging_en: 包老化使能:1表示包老化功能使能;0表示包老化功能关闭。 +* aging_interval: 普通老化两次的间隔配置 +* aging_step_interval: 普通老化的老化时间的步进配置值 +* aging_start_qnum: 老化起始队列 +* aging_end_qnum: 老化结束队列 +* aging_req_aful_th: 普通老化FIFO的将满阈值 +* aging_pkt_num: 一次老化的包个数 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/05/10 +************************************************************/ +DPP_STATUS dpp_tm_qmu_pkt_aging_set(DPP_DEV_T *dev, ZXIC_UINT32 aging_en, + ZXIC_UINT32 aging_interval, + ZXIC_UINT32 aging_step_interval, + ZXIC_UINT32 aging_start_qnum, + ZXIC_UINT32 aging_end_qnum, + ZXIC_UINT32 aging_pkt_num, + ZXIC_UINT32 aging_req_aful_th); + +/****************************************************************************** +*配置老化一个包的时间,一次老化一个包,老化队列范围为可配 +* @param: dev_id: 设备索引编号 +* @param tm_type 0: etm; 1: ftm; +* aging_en: 包老化使能:1表示包老化功能使能;0表示包老化功能关闭。 +* aging_time: 老化一个包的时间,单位ms + aging_que_start:老化起始队列 + aging_que_start:老化终止队列 +老化时间=2*aging_interval*step_interval*q_num +aging_interval = (aging_time * 600000) / (2 * 1 * DPP_TM_Q_NUM); +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/12/08 +************************************************************/ +DPP_STATUS dpp_tm_qmu_pkt_age_time_set(DPP_DEV_T *dev, ZXIC_UINT32 aging_en, + ZXIC_UINT32 aging_time, + ZXIC_UINT32 aging_que_start, + ZXIC_UINT32 aging_que_end); + +/***********************************************************/ +/** 分配etm调度器资源:fq/fq2/fq4/fq8 个数,(共16K= 16384) +* @param dev_id 设备编号 +* @param fq_num FQ调度器个数,须是8的倍数 +* @param fq2_num FQ2调度器个数,须是4的倍数 +* @param fq4_num FQ4调度器个数,须是2的倍数 +* @param fq8_num FQ8调度器个数 +* 调度器总数不能超过:16K= 16384 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/26 +************************************************************/ +DPP_STATUS dpp_etm_crdt_fq_set(DPP_DEV_T *dev, ZXIC_UINT32 fq_num, + ZXIC_UINT32 fq2_num, ZXIC_UINT32 fq4_num, + ZXIC_UINT32 fq8_num); + +/***********************************************************/ +/** 分配TM调度器资源:sp/wfq/wfq2/wfq4/wfq8 个数,(etm共9K=9216,ftm共1920个) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param sp_num SP调度器个数,须是8的倍数 +* @param wfq_num WFQ调度器个数,须是8的倍数 +* @param wfq2_num WFQ2调度器个数,须是4的倍数 +* @param wfq4_num WFQ4调度器个数,须是2的倍数 +* @param wfq8_num WFQ8调度器个数 +* 调度器总数不能超过:ETM= 9216; FTM= 1920 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/26 +************************************************************/ +DPP_STATUS dpp_tm_crdt_wfqsp_set(DPP_DEV_T *dev, ZXIC_UINT32 sp_num, + ZXIC_UINT32 wfq_num, ZXIC_UINT32 wfq2_num, + ZXIC_UINT32 wfq4_num, ZXIC_UINT32 wfq8_num); + +/***********************************************************/ +/** 配置flow级流队列的挂接关系(flow到上级调度器的挂接) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param flow_id 流队列号 +* @param c_linkid c桶要挂接到的上级调度器id +* @param c_weight c桶挂接到上级调度器的权重[1~511] +* @param c_sp c桶挂接到上级调度器的sp优先级,有效值[0-8],共9级,优先级依次降低 +* @param mode 挂接模式:0-单桶 1-双桶。配置单桶时无需关注后续参数,配0即可 +* @param e_linkid e桶要挂接到的上级调度器id +* @param e_weight e桶挂接到上级调度器的权重[1~511] +* @param e_sp e桶挂接到上级调度器的sp优先级,有效值[0-8],共9级,优先级依次降低 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_flow_link_set(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 c_linkid, ZXIC_UINT32 c_weight, + ZXIC_UINT32 c_sp, ZXIC_UINT32 mode, + ZXIC_UINT32 e_linkid, ZXIC_UINT32 e_weight, + ZXIC_UINT32 e_sp); + +/***********************************************************/ +/** 配置调度器层次化QOS的挂接关系:非优先级传递 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param se_id 本级调度器id +* @param se_linkid 要挂接到的上级调度器id +* @param se_weight 挂接到上级调度器的权重[1~511] +* @param se_sp 挂接到上级调度器的sp优先级,有效值[0-8],共9级,优先级依次降低 +* @param se_insw 优先级传递使能:0-关 1-开. 该参数不传递直接配0 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_se_link_set(DPP_DEV_T *dev, ZXIC_UINT32 se_id, + ZXIC_UINT32 se_linkid, ZXIC_UINT32 se_weight, + ZXIC_UINT32 se_sp); + +/***********************************************************/ +/** 配置调度器层次化QOS的挂接关系:优先级传递 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param se_id 本级调度器id +* @param se_linkid 要挂接到的上级调度器id +* @param se_weight WFQ2/4/8中各调度器权重值[1~511],取相等的值 +* @param se_sp 挂接到上级调度器的sp优先级,有效值[0-7],共8级,优先级依次降低 +* @param se_insw 优先级传递使能:0-关 1-开. 该参数不传递直接配1 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_se_link_insw_set(DPP_DEV_T *dev, ZXIC_UINT32 se_id, + ZXIC_UINT32 se_linkid, + ZXIC_UINT32 se_weight, + ZXIC_UINT32 se_sp); + +/***********************************************************/ +/** 配置调度器层次化QOS的挂接关系:优先级传递,单个调度器挂接 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param se_id 本级调度器id +* @param se_linkid 要挂接到的上级调度器id +* @param se_weight WFQ8中对应调度器权重值[1~511] +* @param se_sp 挂接到上级调度器的sp优先级,有效值[0-7],共8级,优先级依次降低 +* @param se_insw 优先级传递使能:0-关 1-开. 该参数不传递直接配1 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_se_link_insw_single_set(DPP_DEV_T *dev, + ZXIC_UINT32 se_id, + ZXIC_UINT32 se_linkid, + ZXIC_UINT32 se_weight, + ZXIC_UINT32 se_sp); + +/***********************************************************/ +/** 删除流挂接关系 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param id_s 要删除的流号或调度器起始id +* @param id_e 要删除的流号或调度器终止id +* ETM范围:0--0x47FF; FTM范围:0-0xFFF +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_del_flow_link_set(DPP_DEV_T *dev, ZXIC_UINT32 id_s, + ZXIC_UINT32 id_e); + +/***********************************************************/ +/** 删除调度器挂接关系(调度器编号从0开始) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param se_id_s 要删除的起始调度器id +* @param se_id_e 要删除的终止调度器id +* ETM范围:0--0x63FF; FTM范围:0-0x77F +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_del_se_link_set(DPP_DEV_T *dev, ZXIC_UINT32 id_s, + ZXIC_UINT32 id_e); + +/***********************************************************/ +/** 配置se->pp->dev挂接关系 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param se_id 往端口挂接的调度器id +* @param pp_id [0-63] +* @param weight [1-511] +* @param sp_mapping 0~7 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/3/4 +************************************************************/ +DPP_STATUS dpp_tm_crdt_se_pp_link_set(DPP_DEV_T *dev, ZXIC_UINT32 se_id, + ZXIC_UINT32 pp_id, ZXIC_UINT32 weight, + ZXIC_UINT32 sp_mapping); + +/***********************************************************/ +/** +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param que_id queue id +* @param en 1:过滤E桶队列CRS状态为SLOW的入链请求;0:E桶队列CRS SLOW正常入链; +* +* @return +* @remark 无 +* @see +* @author XXX @date 2019/05/08 +************************************************************/ +DPP_STATUS dpp_tm_crdt_eir_crs_filter_en_set(DPP_DEV_T *dev, ZXIC_UINT32 que_id, + ZXIC_UINT32 en); + +/***********************************************************/ +/** 清除整形表格里面的值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/07/16 +************************************************************/ +DPP_STATUS dpp_tm_clr_shape_para(DPP_DEV_T *dev); + +/***********************************************************/ +/** 配置流队列双桶整形使能及模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param db_en 双桶整形使能 +* @param mode 0:c+e模式,1:c+p模式 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_tm_shape_flow_db_en_set(DPP_DEV_T *dev, ZXIC_UINT32 db_en, + ZXIC_UINT32 mode); + +/***********************************************************/ +/** 配置流级整形参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param flow_id 流队列号 ETM:0-9215,FTM:0-2047 +* @param cir cir速率,单位Kb,范围[64Kb - 160Gb] +* @param cbs cbs桶深,单位KB,范围[1KB - 64M] +* 注:cbs=0 表示关闭整形,即不限速 +* @param db_en 双桶整形使能,0-单桶,1-双桶 +* @param eir eir速率,单位Kb,范围同cir +* @param ebs ebs桶深,单位Kb,范围同cbs +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_tm_shape_flow_para_set(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 cir, ZXIC_UINT32 cbs, + ZXIC_UINT32 db_en, ZXIC_UINT32 eir, + ZXIC_UINT32 ebs); + +/***********************************************************/ +/** tm配置调度器整形参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param se_id 调度器编号号 ETM:0x4800-0xABFF,FTM:0x1000-0x177F +* @param pir pir总速率,单位Kb,范围同cir +* @param pbs pbs总桶深,单位Kb,范围同cbs +* @param db_en 整形模式,0-单桶,1-双桶,仅FQ8/WFQ8有效 +* @param cir [0-3]调度器cir速率,单位Kb,范围[64Kb - 160Gb] +* @param cbs [0-3]调度器cbs桶深,单位KB,范围[1KB - 64M] +* 注:cbs=0 表示关闭整形,即不限速 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_tm_shape_se_para_set(DPP_DEV_T *dev, ZXIC_UINT32 se_id, + ZXIC_UINT32 pir, ZXIC_UINT32 pbs, + ZXIC_UINT32 db_en, ZXIC_UINT32 cir, + ZXIC_UINT32 cbs); + +/***********************************************************/ +/** 写入端口级整形信息 +* @param tm_type 0-ETM,1-FTM +* @param port_id 端口号0-63 +* @param cir +* @param cbs +* @param c_en c桶使能 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/03 +************************************************************/ +DPP_STATUS dpp_tm_shape_pp_para_wr(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + ZXIC_UINT32 cir, ZXIC_UINT32 cbs, + ZXIC_UINT32 c_en); + +/***********************************************************/ +/** 配置端口级整形参数 更改整形转换公式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param port_id 端口号 +* @param p_para 整形信息:CIR/CBS/EN +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/04/15 +************************************************************/ +DPP_STATUS dpp_tm_shape_pp_para_set(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + const DPP_TM_SHAPE_PP_PARA_T *p_para); + +/***********************************************************/ +/** 配置轮转扫描使能和扫描速率 +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param scan_en 轮转扫描使能。0:关闭,1:开启 +* @param scan_rate 轮转扫描速率,配置扫描周期不得少于256个周期 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/10 +************************************************************/ +DPP_STATUS dpp_tm_qmu_scan_rate_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 scan_en, + ZXIC_UINT32 scan_rate); + +/***********************************************************/ +/** 配置CMD_SW分端口(qmu出端口)整形速率和使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param shape_cir 整形值,单位Mbps,范围[0-160000] +* @param shape_cbs 桶深, 单位B,范围[0-0x1EE00] +* @param shape_en 整形使能 +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author whuashan 2020-3-17 +************************************************************/ +DPP_STATUS dpp_tm_qmu_egress_shape_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 port_id, + ZXIC_UINT32 shape_cir, + ZXIC_UINT32 shape_cbs, + ZXIC_UINT32 shape_en); + +/***********************************************************/ +/** 配置WRED丢弃曲线对应的参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level WRED支持层次号,0:队列级,1:端口级 +* @param wred_id 队列级共支持16个WRED组0-15,端口级支持8组0-7 +* @param dp 共支持8个dp,取值0-7 +* @param p_para 配置的WRED组参数值,包含以下五个参数 + max_th 平均队列深度上限阈值 + min_th 平均队列深度下限阈值 + max_p 最大丢弃概率 + weight 平均队列深度计算权重 + q_len_th 队列深度阈值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author taq @date 2015/04/20 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_wred_dp_line_para_set( + ZXIC_UINT32 dev_id, DPP_TM_CGAVD_LEVEL_E level, ZXIC_UINT32 wred_id, + ZXIC_UINT32 dp, DPP_TM_WRED_DP_LINE_PARA_T *p_para); + +/***********************************************************/ +/** 配置各级WRED丢弃曲线对应的参数 +* @param tm_type 0-ETM,1-FTM +* @param level WRED支持层次号,0:队列级,1:端口级 +* @param wred_id 队列级共支持16个WRED组0-15,端口级支持8组0-7 +* @param dp 共支持8个dp,取值0-7 +* @param max_th 平均队列深度上限阈值 +* @param min_th 平均队列深度下限阈值 +* @param max_p 最大丢弃概率 +* @param weight 平均队列深度计算权重 +* @param q_len_th 队列深度阈值 +* @param flag 忽略乘法里的当前包长和最大包长比标志位:1为忽略 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2015/11/9 +************************************************************/ +DPP_STATUS +dpp_tm_wred_dp_line_para_flag_wr(ZXIC_UINT32 dev_id, ZXIC_UINT32 level, + ZXIC_UINT32 wred_id, ZXIC_UINT32 dp, + ZXIC_UINT32 max_th, ZXIC_UINT32 min_th, + ZXIC_UINT32 max_p, ZXIC_UINT32 weight, + ZXIC_UINT32 q_len_th, ZXIC_UINT32 flag); + +/***********************************************************/ +/** 配置CPU设置的报文长度是否参与计算丢弃概率的使能 +* @param tm_type 0-ETM,1-FTM +* @param flag 忽略乘法里的当前包长和最大包长比标志位:1为忽略 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2015/11/9 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_wred_pke_len_calc_sign_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 flag); + +/***********************************************************/ +/** TMMU TM纯片内模式配置 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param imem_en 1纯片内 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 说明:高有效,表示使能打开,TMMU不会再发起对MMU的读写操作,用户需要保证Cache PD全部命中。 +* @see +* @author wush @date 2017/10/14 +************************************************************/ +DPP_STATUS dpp_tm_tmmu_imem_en_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 imem_en); + +/***********************************************************/ +/** TMMU 强制DDR RDY配置 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param ddr_force_rdy 1、如果bit【0】配置为1,则QMU看到的DDR0 RDY一直为1。 +* 2、bit【0】代表DDR0,bit【7】代表DDR7。 +* 3、纯片内模式需要配置为8'hff,排除DDR干扰。 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark +* @see +* @author wush @date 2017/10/14 +************************************************************/ +DPP_STATUS dpp_tm_tmmu_ddr_force_rdy_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 ddr_force_rdy); + +/***********************************************************/ +/** 写一片连续的TM寄存器 +* @param module_id 区分TM子模块 +* @param first_addr 起始寄存器的地址 +* @param reg_num 总共读取的寄存器数 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/07/26 +************************************************************/ +DPP_STATUS dpp_tm_wr_more_reg(ZXIC_UINT32 dev_id, ZXIC_UINT32 module_id, + ZXIC_UINT32 first_addr, ZXIC_UINT32 first_data, + ZXIC_UINT32 data_step, ZXIC_UINT32 reg_num); + +/***********************************************************/ +/** 打印tm诊断常用函数信息 +* +* @return +* @remark 无 +* @see +* @author 张明月 @date 2015/10/21 +************************************************************/ +DPP_STATUS dpp_tm_help(ZXIC_UINT32 dev_id); + +/***********TM CPU软复位接口 End*************/ + +#endif /***function declaration***/ + +#endif /****_DPP_TM_H_****/ diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_type_api.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_type_api.h new file mode 100644 index 000000000000..8d581963e5a2 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/api/dpp_type_api.h @@ -0,0 +1,424 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_error.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2014/01/27 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef _DPP_TYPE_API_H_ +#define _DPP_TYPE_API_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +//#define DPP_FOR_IDE /* IDE仿真器Driver编译控制宏,通过VC工具导入,不允许手动更改此行 */ +//#define DPP_FOR_LLT /* DPP 打桩控制宏,通过Makefile编译脚本导入,不允许手动更改此行 "make ssp4_test_64 DPPLLT=1"*/ + +#ifndef DPP_FOR_LLT +/* write hardware enable */ +#define DPP_HW_OPR_EN (1) +#else +/* write hardware disable */ +#define DPP_HW_OPR_EN (1) +#endif + +#ifndef DPP_FOR_LLT +#ifndef DPP_FOR_RISCV +#ifndef DPP_FOR_PCIE +/* dev VPCI enalbe */ +#define DPP_DEV_VPCI_EN (1) +#else +#define DPP_DEV_VPCI_EN (0) +#endif +#else +#define DPP_DEV_VPCI_EN (0) +#endif +#else +#define DPP_DEV_VPCI_EN (0) +#endif + +/* CPU软复位调试时开启 */ +#define DPP_CPU_SOFT_RESET (0) + +#if defined(ZXIC_OS_WIN) +#define DPP_OS_WIN32 +#elif defined(ZXIC_OS_LINUX) +#define DPP_OS_LINUX +#endif + +#ifdef ZXIC_OS_WIN +#define SDT_FILE_PATH "..\\tools\\midware\\table_info.txt" +#else +#define SDT_FILE_PATH "table_info.txt" +#endif + +#ifndef BOOL +#define BOOL int +#endif + +#ifndef BOOLEAN +#define BOOLEAN unsigned char +#endif + +#ifndef DPP_STATUS +#define DPP_STATUS ZXIC_UINT32 +#endif + +#ifndef DPP_OK +#define DPP_OK (0) +#endif + +#ifndef DPP_ERR +#define DPP_ERR (1) +#endif + +#define DPP_RD_CNT_MAX (100) /* 最大读完成状态次数 */ + +#define DPP_PRT_BIT_LEN_1 (1) +#define DPP_PRT_BIT_LEN_2 (2) +#define DPP_PRT_BIT_LEN_8 (8) +#define DPP_PRT_BIT_LEN_16 (16) +#define DPP_PRT_BIT_LEN_32 (32) + +/* agent module error code */ +#define DPP_RC_AGENT_BASE (0x100) +#define DPP_RC_AGENT_INVALID_PARAMS (DPP_RC_AGENT_BASE | 0x0) +#define DPP_RC_AGENT_INVALID_RANGES (DPP_RC_AGENT_BASE | 0x1) +#define DPP_RC_AGENT_CALL_FUN_FAIL (DPP_RC_AGENT_BASE | 0x2) +#define DPP_RC_AGENT_GET_POINTER_FAIL (DPP_RC_AGENT_BASE | 0x3) +#define DPP_RC_AGENT_SEND_MSG_FAIL (DPP_RC_AGENT_BASE | 0x4) +#define DPP_RC_AGENT_MSG_TYPE_INVALID (DPP_RC_AGENT_BASE | 0x5) + +/* common module error code */ +#define DPP_RC_COMMON_BASE (0x200) +#define DPP_RC_COMMON_INVALID_PARAMS (DPP_RC_COMMON_BASE | 0x0) +#define DPP_RC_COMMON_INVALID_RANGES (DPP_RC_COMMON_BASE | 0x1) +#define DPP_RC_COMMON_CALL_FUN_FAIL (DPP_RC_COMMON_BASE | 0x2) +#define DPP_RC_COMMON_GET_POINTER_FAIL (DPP_RC_COMMON_BASE | 0x3) +#define DPP_RC_COMMON_SEND_MSG_FAIL (DPP_RC_COMMON_BASE | 0x4) +#define DPP_RC_COMMON_MSG_TYPE_INVALID (DPP_RC_COMMON_BASE | 0x5) +#define DPP_RC_COMMON_MEMCPY_S_INVALID_PTR (DPP_RC_COMMON_BASE | 0x6) +#define DPP_RC_COMMON_MEMCPY_S_MEM_OVERLAP (DPP_RC_COMMON_BASE | 0x7) +#define DPP_RC_COMMON_CLOSE_FAIL (DPP_RC_COMMON_BASE | 0x8) + +/* config module error code */ +#define DPP_RC_CONFIG_BASE (0x300) +#define DPP_RC_CONFIG_PARA_INVALID (DPP_RC_CONFIG_BASE | 0x0) +#define DPP_RC_CONFIG_MSG_TYPE_INVALID (DPP_RC_CONFIG_BASE | 0x1) +#define DPP_RC_CONFIG_MEM_NO_INVALID (DPP_RC_CONFIG_BASE | 0x2) + +/* debug module error code */ +#define DPP_RC_DBG_BASE (0x400) +#define DPP_RC_DBG_INVALID_PARAMS (DPP_RC_DBG_BASE | 0x0) +#define DPP_RC_DBG_INVALID_RANGES (DPP_RC_DBG_BASE | 0x1) +#define DPP_RC_DBG_CALL_FUN_FAIL (DPP_RC_DBG_BASE | 0x2) +#define DPP_RC_DBG_GET_POINTER_FAIL (DPP_RC_DBG_BASE | 0x3) +#define DPP_RC_DBG_SEND_MSG_FAIL (DPP_RC_DBG_BASE | 0x4) +#define DPP_RC_DBG_MEM_NOT_ALLOC (DPP_RC_DBG_BASE | 0x5) + +/* download module error code */ +#define DPP_RC_LOAD_BASE (0x500) +#define DPP_RC_LOAD_INVALID_PARAMS (DPP_RC_LOAD_BASE | 0x0) +#define DPP_RC_LOAD_INVALID_RANGES (DPP_RC_LOAD_BASE | 0x1) +#define DPP_RC_LOAD_CALL_FUN_FAIL (DPP_RC_LOAD_BASE | 0x2) +#define DPP_RC_LOAD_GET_POINTER_FAIL (DPP_RC_LOAD_BASE | 0x3) +#define DPP_RC_LOAD_MSG_SEND_FAIL (DPP_RC_LOAD_BASE | 0x4) +#define DPP_RC_LOAD_MSG_TYPE_INVALID (DPP_RC_LOAD_BASE | 0x5) +#define DPP_RC_LOAD_DEV_NOT_EXIST (DPP_RC_LOAD_BASE | 0x6) + +/* device module error code */ +#define DPP_RC_DEV_BASE (0x600) +#define DPP_RC_DEV_PARA_INVALID (DPP_RC_DEV_BASE | 0x0) +#define DPP_RC_DEV_RANGE_INVALID (DPP_RC_DEV_BASE | 0x1) +#define DPP_RC_DEV_CALL_FUNC_FAIL (DPP_RC_DEV_BASE | 0x2) +#define DPP_RC_DEV_TYPE_INVALID (DPP_RC_DEV_BASE | 0x3) +#define DPP_RC_DEV_CONNECT_FAIL (DPP_RC_DEV_BASE | 0x4) +#define DPP_RC_DEV_MSG_INVALID (DPP_RC_DEV_BASE | 0x5) +#define DPP_RC_DEV_NOT_EXIST (DPP_RC_DEV_BASE | 0x6) +#define DPP_RC_DEV_MGR_NOT_INIT (DPP_RC_DEV_BASE | 0x7) +#define DPP_RC_DEV_CFG_NOT_INIT (DPP_RC_DEV_BASE | 0x8) +#define DPP_RC_DEV_STAUS_NOT_ON (DPP_RC_DEV_BASE | 0x9) +#define DPP_RC_DEV_DMA_MEM_ALLOC_FAIL (DPP_RC_DEV_BASE | 0xa) +#define DPP_RC_DEV_DMA_MEM_GET_FAIL (DPP_RC_DEV_BASE | 0xb) + +/* env(xOS) module error code */ +#define DPP_RC_ENV_BASE (0x700) +#define DPP_RC_ENV_PARA_INVALID (DPP_RC_ENV_BASE | 0x0) +#define DPP_RC_ENV_RANGE_INVALID (DPP_RC_ENV_BASE | 0x1) +#define DPP_RC_ENV_CALL_FUNC_FAIL (DPP_RC_ENV_BASE | 0x2) +#define DPP_RC_ENV_SOCKET_FAIL (DPP_RC_ENV_BASE | 0x3) +#define DPP_RC_ENV_SOCKET_FULL (DPP_RC_ENV_BASE | 0x4) +#define DPP_RC_ENV_THREAD_FAIL (DPP_RC_ENV_BASE | 0x5) +#define DPP_RC_ENV_NOT_INIT (DPP_RC_ENV_BASE | 0x6) +#define DPP_RC_ENV_TABLE_FULL (DPP_RC_ENV_BASE | 0x7) +#define DPP_RC_ENV_MUTEX_FAIL (DPP_RC_ENV_BASE | 0x8) +#define DPP_RC_ENV_SOCKET_NOT_EXIST (DPP_RC_ENV_BASE | 0x9) + +/* table module error code */ +#define DPP_RC_TABLE_BASE (0x800) +#define DPP_RC_TABLE_PARA_INVALID (DPP_RC_TABLE_BASE | 0x0) +#define DPP_RC_TABLE_RANGE_INVALID (DPP_RC_TABLE_BASE | 0x1) +#define DPP_RC_TABLE_CALL_FUNC_FAIL (DPP_RC_TABLE_BASE | 0x2) +#define DPP_RC_TABLE_SDT_MSG_INVALID (DPP_RC_TABLE_BASE | 0x3) +#define DPP_RC_TABLE_SDT_MGR_INVALID (DPP_RC_TABLE_BASE | 0x4) +#define DPP_RC_TABLE_IF_VALUE_FAIL (DPP_RC_TABLE_BASE | 0x5) +#define DPP_RC_TABLE_SDT_NOT_EXIST (DPP_RC_TABLE_BASE | 0x6) + +/* stat module error code */ +#define DPP_RC_STAT_BASE (0x900) +#define DPP_RC_STAT_INIT_ERR (DPP_RC_STAT_BASE | 0x1) +#define DPP_RC_STAT_TM_INIT_ERR (DPP_RC_STAT_BASE | 0x2) +#define DPP_RC_STAT_TM_DIS_EN (DPP_RC_STAT_BASE | 0x3) +#define DPP_RC_STAT_TM_TYPE_ERR (DPP_RC_STAT_BASE | 0xe) /* TM 计数类型错误*/ +#define DPP_RC_STAT_TM_DEPTH_ERR (DPP_RC_STAT_BASE | 0xf) /* TM 计数深度错误*/ + +/* appl module error code */ +#define DPP_RC_APPL_BASE (0xa00) +#define DPP_RC_APPL_PARA_INVALID (DPP_RC_APPL_BASE | 0x0) +#define DPP_RC_APPL_RANGE_INVALID (DPP_RC_APPL_BASE | 0x1) +#define DPP_RC_APPL_CALL_FUNC_FAIL (DPP_RC_APPL_BASE | 0x2) + +/* reg module error code */ +#define DPP_RC_MODULE_BASE (0xb00) +#define DPP_RC_MODULE_PARA_INVALID (DPP_RC_MODULE_BASE | 0x0) +#define DPP_RC_MODULE_RANGE_INVALID (DPP_RC_MODULE_BASE | 0x1) +#define DPP_RC_MODULE_CALL_FUNC_FAIL (DPP_RC_MODULE_BASE | 0x2) +#define DPP_RC_MODULE_ENUM_TYPE_FAIL (DPP_RC_MODULE_BASE | 0x3) +#define DPP_RC_MODULE_BCDR_WR_FAIL (DPP_RC_MODULE_BASE | 0x4) + +/* reg module error code */ +#define DPP_RC_REG_BASE (0xc00) +#define DPP_RC_REG_PARA_INVALID (DPP_RC_REG_BASE | 0x0) +#define DPP_RC_REG_RANGE_INVALID (DPP_RC_REG_BASE | 0x1) +#define DPP_RC_REG_CALL_FUNC_FAIL (DPP_RC_REG_BASE | 0x2) +#define DPP_RC_REG_FIELD_OVERFLOW (DPP_RC_REG_BASE | 0x3) + +/* DTB module error code */ +#define DPP_RC_DTB_BASE (0xd00) +#define DPP_RC_DTB_MGR_EXIST (DPP_RC_DTB_BASE | 0x0) +#define DPP_RC_DTB_MGR_NOT_EXIST (DPP_RC_DTB_BASE | 0x1) +#define DPP_RC_DTB_QUEUE_RES_EMPTY (DPP_RC_DTB_BASE | 0x2) +#define DPP_RC_DTB_QUEUE_BUFF_SIZE_ERR (DPP_RC_DTB_BASE | 0x3) +#define DPP_RC_DTB_QUEUE_ITEM_HW_EMPTY (DPP_RC_DTB_BASE | 0x4) +#define DPP_RC_DTB_QUEUE_ITEM_SW_EMPTY (DPP_RC_DTB_BASE | 0x5) +#define DPP_RC_DTB_TAB_UP_BUFF_EMPTY (DPP_RC_DTB_BASE | 0x6) +#define DPP_RC_DTB_TAB_DOWN_BUFF_EMPTY (DPP_RC_DTB_BASE | 0x7) +#define DPP_RC_DTB_TAB_UP_TRANS_ERR (DPP_RC_DTB_BASE | 0x8) +#define DPP_RC_DTB_TAB_DOWN_TRANS_ERR (DPP_RC_DTB_BASE | 0x9) +#define DPP_RC_DTB_QUEUE_IS_WORKING (DPP_RC_DTB_BASE | 0xa) +#define DPP_RC_DTB_QUEUE_IS_NOT_INIT (DPP_RC_DTB_BASE | 0xb) +#define DPP_RC_DTB_MEMORY_ALLOC_ERR (DPP_RC_DTB_BASE | 0xc) +#define DPP_RC_DTB_PARA_INVALID (DPP_RC_DTB_BASE | 0xd) +#define DPP_RC_DMA_RANGE_INVALID (DPP_RC_DTB_BASE | 0xe) +#define DPP_RC_DMA_RCV_DATA_EMPTY (DPP_RC_DTB_BASE | 0xf) +#define DPP_RC_DTB_LPM_INSERT_FAIL (DPP_RC_DTB_BASE | 0x10) +#define DPP_RC_DTB_LPM_DELETE_FAIL (DPP_RC_DTB_BASE | 0x11) +#define DPP_RC_DTB_DOWN_LEN_INVALID (DPP_RC_DTB_BASE | 0x12) +#define DPP_RC_DTB_DOWN_HASH_CONFLICT (DPP_RC_DTB_BASE | 0x13) //3347 +#define DPP_RC_DTB_QUEUE_NOT_ALLOC (DPP_RC_DTB_BASE | 0x14) +#define DPP_RC_DTB_QUEUE_NAME_ERROR (DPP_RC_DTB_BASE | 0x15) +#define DPP_RC_DTB_DUMP_SIZE_SMALL (DPP_RC_DTB_BASE | 0x16) +#define DPP_RC_DTB_SEARCH_VPORT_QUEUE_ZERO (DPP_RC_DTB_BASE | 0x17) +#define DPP_RC_DTB_QUEUE_NOT_ENABLE (DPP_RC_DTB_BASE | 0x18) +#define DPP_RC_DTB_OPEN_DEBUG_MODE (DPP_RC_DTB_BASE | 0x19) +#define DPP_RC_DTB_OVER_TIME (DPP_RC_DTB_BASE | 0x1a) //3354 +#define DPP_RC_DTB_TABLE_SWITCH_NOT_ENABLE (DPP_RC_DTB_BASE | 0x1b) +#define DPP_RC_DTB_BAR_ABNORMAL (DPP_RC_DTB_BASE | 0x1c) + +/* lif module error code */ +#define DPP_RC_LIF_BASE (0xe00) +#define DPP_RC_LIF_PARA_INVALID (DPP_RC_LIF_BASE | 0x0) +#define DPP_RC_LIF_RANGE_INVALID (DPP_RC_LIF_BASE | 0x1) +#define DPP_RC_LIF_CALL_FUNC_FAIL (DPP_RC_LIF_BASE | 0x2) +#define DPP_RC_LIF_OPER_MODE_ERR (DPP_RC_LIF_BASE | 0x3) +#define DPP_RC_SERDES_ARRAY_OVERFLOW (DPP_RC_LIF_BASE | 0x4) +#define DPP_RC_RCV_PLL_LOCK_TIMEOUT (DPP_RC_LIF_BASE | 0x5) +#define DPP_RC_LIF_TIMEOUT (DPP_RC_LIF_BASE | 0x7) +#define DPP_RC_LIF_NO_PORT (DPP_RC_LIF_BASE | 0x8) +#define DPP_RC_BCDR_PARA_ERR (DPP_RC_LIF_BASE | 0x9) +#define DPP_RC_LIF_NOT_SUPPORT (DPP_RC_LIF_BASE | 0xa) + +#define DPP_RC_CTRLCH_BASE (0xf00) +#define DPP_RC_CTRLCH_MSG_LEN_ZERO (DPP_RC_CTRLCH_BASE | 0x0) +#define DPP_RC_CTRLCH_MSG_PRO_ERR (DPP_RC_CTRLCH_BASE | 0x1) +#define DPP_RC_CTRLCH_MSG_TYPE_NOT_SUPPORT \ + (DPP_RC_CTRLCH_BASE | 0x2) /*消息类型不支持*/ +#define DPP_RC_CTRLCH_MSG_OPER_NOT_SUPPORT \ + (DPP_RC_CTRLCH_BASE | 0x3) /*消息操作不支持*/ +#define DPP_RC_CTRLCH_MSG_DROP (DPP_RC_CTRLCH_BASE | 0x4) /*消息操作不支持*/ + +/* ddr module error code */ +#define DPP_RC_DDR_BASE (0x1000) +#define DPP_RC_DDR_TIME_OUT (DPP_RC_DDR_BASE | 0x0) +#define DPP_RC_DDR_TRAIN_FAIL (DPP_RC_DDR_BASE | 0x1) +#define DPP_RC_DDR_TYPE_ERR (DPP_RC_DDR_BASE | 0x2) +#define DPP_RC_DDR_LOGCHK_ERR (DPP_RC_DDR_BASE | 0x3) +#define DPP_RC_DDR_PARA_ERR (DPP_RC_DDR_BASE | 0x4) +#define DPP_RC_DDR_DAMAGED (DPP_RC_DDR_BASE | 0x5) +#define DPP_RC_DDR_DISABLE (DPP_RC_DDR_BASE | 0x6) +#define DPP_RC_DDR_ARRAY_OVERFLOW (DPP_RC_DDR_BASE | 0x7) +#define DPP_RC_DDR_INVAL_TRAIN_MODE (DPP_RC_DDR_BASE | 0x8) +#define DPP_RC_DDR_INVAL_VREF_MODE (DPP_RC_DDR_BASE | 0x9) +#define DPP_RC_DDR_INTR_ERR (DPP_RC_DDR_BASE | 0xa) +#define DPP_RC_DDR_BIST_FAIL (DPP_RC_DDR_BASE | 0xb) +#define DPP_RC_DDR_SOFTCHK_FAIL (DPP_RC_DDR_BASE | 0xb) +#define DPP_RC_DDR_PLL_LOCK_TIMEOUT (DPP_RC_DDR_BASE | 0x0) + +/* tlb module error code */ +#define DPP_RC_TLB_BASE (0x2000) +#define DPP_RC_TLB_MGR_EXIST (DPP_RC_TLB_BASE | 0x0) +#define DPP_RC_TLB_MGR_NOT_EXIST (DPP_RC_TLB_BASE | 0x1) + +/* se alg error code */ +#define DPP_SE_RC_BASE (0x50000) +#define DPP_SE_RC_ZBLK_FULL (DPP_SE_RC_CFG_BASE | 0x1) +#define DPP_SE_RC_FUN_INVALID (DPP_SE_RC_CFG_BASE | 0x2) +#define DPP_SE_RC_PARA_INVALID (DPP_SE_RC_CFG_BASE | 0x3) + +#define DPP_SE_RC_CFG_BASE (DPP_SE_RC_BASE | 0x1000) +#define DPP_SE_RC_ACL_BASE (DPP_SE_RC_BASE | 0x2000) +#define DPP_SE_RC_LPM_BASE (DPP_SE_RC_BASE | 0x3000) +#define DPP_LPM_TBL_INVALID (DPP_SE_RC_LPM_BASE | 0x01) +#define DPP_LPM_MODE_INVALID (DPP_SE_RC_LPM_BASE | 0x02) +#define DPP_LPM_FUNID_INVALID (DPP_SE_RC_LPM_BASE | 0x03) +#define DPP_LPM_RAM_FULL (DPP_SE_RC_LPM_BASE | 0x04) +#define DPP_LPM_UPDATE (DPP_SE_RC_LPM_BASE | 0x05) +#define DPP_LPM_SRHFAIL (DPP_SE_RC_LPM_BASE | 0x06) +#define DPP_LPM_KEY_INVALID (DPP_SE_RC_LPM_BASE | 0x07) +#define DPP_LPM_DEF_REG_FULL \ + (DPP_SE_RC_LPM_BASE | 0x08) /*16组默认路由寄存器插满*/ +#define DPP_LPM_DEF_REG_NO_HIT \ + (DPP_SE_RC_LPM_BASE | 0x09) /*16组默认路由寄存器未匹配删除条目*/ +#define DPP_LPM_DEF_REG_UPDATE (DPP_SE_RC_LPM_BASE | 0x0A) +#define DPP_LPM_DIV_GO_ON (DPP_SE_RC_LPM_BASE | 0x0B) +#define DPP_LPM_DDR_FULL (DPP_SE_RC_LPM_BASE | 0x0C) +#define DPP_LPM_DDR_PARA_INVALID (DPP_SE_RC_LPM_BASE | 0x0D) + +#define DPP_SE_RC_HASH_BASE (DPP_SE_RC_BASE | 0x4000) +#define DPP_HASH_RC_INVALID_FUNCINFO (DPP_SE_RC_HASH_BASE | 0x1) +#define DPP_HASH_RC_INVALID_ZBLCK (DPP_SE_RC_HASH_BASE | 0x2) +#define DPP_HASH_RC_INVALID_ZCELL (DPP_SE_RC_HASH_BASE | 0x3) +#define DPP_HASH_RC_INVALID_KEY (DPP_SE_RC_HASH_BASE | 0x4) +#define DPP_HASH_RC_INVALID_TBL_ID_INFO (DPP_SE_RC_HASH_BASE | 0x5) +#define DPP_HASH_RC_RB_TREE_FULL (DPP_SE_RC_HASH_BASE | 0x6) +#define DPP_HASH_RC_INVALID_KEY_TYPE (DPP_SE_RC_HASH_BASE | 0x7) +#define DPP_HASH_RC_ADD_UPDATE (DPP_SE_RC_HASH_BASE | 0x8) +#define DPP_HASH_RC_DEL_SRHFAIL (DPP_SE_RC_HASH_BASE | 0x9) +#define DPP_HASH_RC_ITEM_FULL (DPP_SE_RC_HASH_BASE | 0xa) +#define DPP_HASH_RC_INVALID_DDR_WIDTH_MODE (DPP_SE_RC_HASH_BASE | 0xb) +#define DPP_HASH_RC_INVALID_PARA (DPP_SE_RC_HASH_BASE | 0xc) +#define DPP_HASH_RC_TBL_FULL (DPP_SE_RC_HASH_BASE | 0xd) +#define DPP_HASH_RC_SRH_FAIL (DPP_SE_RC_HASH_BASE | 0xe) +#define DPP_HASH_RC_MATCH_ITEM_FAIL (DPP_SE_RC_HASH_BASE | 0xf) +#define DPP_HASH_RC_DDR_WIDTH_MODE_ERR (DPP_SE_RC_HASH_BASE | 0x10) +#define DPP_HASH_RC_INVALID_ITEM_TYPE (DPP_SE_RC_HASH_BASE | 0x11) +#define DPP_HASH_RC_REPEAT_INIT (DPP_SE_RC_HASH_BASE | 0x12) + +#define DPP_SE_RC_ETCAM_BASE (DPP_SE_RC_BASE | 0x5000) +#define DPP_ETCAM_RC_TBL_INVALID (DPP_SE_RC_ETCAM_BASE | 0x1) +#define DPP_ETCAM_RC_TBL_OVERFLOW (DPP_SE_RC_ETCAM_BASE | 0x2) + +#define DPP_ACL_RC_BASE (0x60000) +#define DPP_ACL_RC_INVALID_TBLID (DPP_ACL_RC_BASE | 0x0) +#define DPP_ACL_RC_INVALID_BLOCKNUM (DPP_ACL_RC_BASE | 0x1) +#define DPP_ACL_RC_INVALID_BLOCKID (DPP_ACL_RC_BASE | 0x2) +#define DPP_ACL_RC_TBL_NOT_INIT (DPP_ACL_RC_BASE | 0x3) +#define DPP_ACL_RC_ETCAMID_NOT_INIT (DPP_ACL_RC_BASE | 0x4) +#define DPP_ACL_RC_AS_ERAM_NOT_ENOUGH (DPP_ACL_RC_BASE | 0x5) +#define DPP_ACL_RC_RB_TREE_FULL (DPP_ACL_RC_BASE | 0x6) +#define DPP_ACL_RC_TABLE_FULL (DPP_ACL_RC_BASE | 0x7) +#define DPP_ACL_RC_INVALID_PARA (DPP_ACL_RC_BASE | 0x8) +#define DPP_ACL_RC_DEL_SRHFAIL (DPP_ACL_RC_BASE | 0x9) +#define DPP_ACL_RC_TABLE_UPDATE (DPP_ACL_RC_BASE | 0xa) +#define DPP_ACL_RC_SRH_FAIL (DPP_ACL_RC_BASE | 0xb) +#define DPP_ACL_RC_INDEX_MGR_NOT_EXIST (DPP_ACL_RC_BASE | 0xc) +#define DPP_ACL_RC_INDEX_RES_FULL (DPP_ACL_RC_BASE | 0xd) + +#define DPP_SE_RC_CAR_BASE (0x1100) +#define DPP_RC_CAR_TIME_OUT (DPP_SE_RC_CAR_BASE | 0x1) +#define DPP_RC_CAR_QUEUE_OUTRANGE (DPP_SE_RC_CAR_BASE | 0x2) +#define DPP_RC_CAR_RB_TREE_FULL (DPP_SE_RC_CAR_BASE | 0x3) +#define DPP_RC_CAR_RB_TREE_UPDATE (DPP_SE_RC_CAR_BASE | 0x4) +#define DPP_RC_CAR_RB_TREE_DEL_FAIL (DPP_SE_RC_CAR_BASE | 0x5) +#define DPP_RC_CAR_RB_TREE_SRH_FAIL (DPP_SE_RC_CAR_BASE | 0x6) +#define DPP_RC_CAR_RB_TREE_GET_FAIL (DPP_SE_RC_CAR_BASE | 0x7) +#define DPP_RC_CAR_INIT_FAIL (DPP_SE_RC_CAR_BASE | 0x8) +#define DPP_RC_CAR_UN_INIT (DPP_SE_RC_CAR_BASE | 0x9) +#define DPP_RC_CAR_PROFILE_UNSET (DPP_SE_RC_CAR_BASE | 0xa) +#define DPP_RC_CAR_LISTSTACK_MNG_FULL (DPP_SE_RC_CAR_BASE | 0xb) + +/*system critical item check error code*/ +#define DPP_SYSTEM_CHECK_RC_BASE (0x70000) +#define DPP_SYSTEM_CHECK_CRM_RC_BASE (DPP_SYSTEM_CHECK_RC_BASE | 0x1000) +#define DPP_SYSTEM_CHECK_LIF0_RC_BASE (DPP_SYSTEM_CHECK_RC_BASE | 0x2000) +#define DPP_SYSTEM_CHECK_PKTRX_RC_BASE (DPP_SYSTEM_CHECK_RC_BASE | 0x3000) +#define DPP_SYSTEM_CHECK_PBU_RC_BASE (DPP_SYSTEM_CHECK_RC_BASE | 0x4000) +#define DPP_SYSTEM_CHECK_PPU_RC_BASE (DPP_SYSTEM_CHECK_RC_BASE | 0x5000) +#define DPP_SYSTEM_CHECK_ODMA_RC_BASE (DPP_SYSTEM_CHECK_RC_BASE | 0x6000) +#define DPP_SYSTEM_CHECK_CFG_RC_BASE (DPP_SYSTEM_CHECK_RC_BASE | 0x7000) +#define DPP_SYSTEM_CHECK_TM_RC_BASE (DPP_SYSTEM_CHECK_RC_BASE | 0x8000) +#define DPP_SYSTEM_CHECK_TM_READY_RC_BASE (DPP_SYSTEM_CHECK_RC_BASE | 0x100) +#define DPP_SYSTEM_CHECK_TM_SAIP_RC_BASE (DPP_SYSTEM_CHECK_RC_BASE | 0x200) +#define DPP_SYSTEM_CHECK_TM_ILIF_RC_BASE (DPP_SYSTEM_CHECK_TM_RC_BASE | 0x300) +#define DPP_SYSTEM_CHECK_TM_OLIF_RC_BASE (DPP_SYSTEM_CHECK_TM_RC_BASE | 0x400) +#define DPP_SYSTEM_CHECK_TM_TMMU_RC_BASE (DPP_SYSTEM_CHECK_TM_RC_BASE | 0x500) +#define DPP_SYSTEM_CHECK_TM_SHAPE_RC_BASE (DPP_SYSTEM_CHECK_TM_RC_BASE | 0x600) +#define DPP_SYSTEM_CHECK_TM_CRDT_RC_BASE (DPP_SYSTEM_CHECK_TM_RC_BASE | 0x700) +#define DPP_SYSTEM_CHECK_TM_CGAVD_RC_BASE (DPP_SYSTEM_CHECK_TM_RC_BASE | 0x800) +#define DPP_SYSTEM_CHECK_TM_QMU_RC_BASE (DPP_SYSTEM_CHECK_TM_RC_BASE | 0x900) + +/*system critical item check error code*/ +#define DPP_SERDES_CHECK_RC_BASE (0x80000) +#define DPP_SERDES_CHECK_PLL_BASE (DPP_SERDES_CHECK_RC_BASE | 0x1000) +#define DPP_SERDES_CHECK_PLL_A_BASE (DPP_SERDES_CHECK_PLL_BASE | 0x100) +#define DPP_SERDES_CHECK_PLL_B_BASE (DPP_SERDES_CHECK_PLL_BASE | 0x200) +#define DPP_SERDES_CHECK_PHYREADY_BASE (DPP_SERDES_CHECK_RC_BASE | 0x2000) +#define DPP_SERDES_CHECK_TX_PHYREADY_BASE \ + (DPP_SERDES_CHECK_PHYREADY_BASE | 0x100) +#define DPP_SERDES_CHECK_RX_PHYREADY_BASE \ + (DPP_SERDES_CHECK_PHYREADY_BASE | 0x200) +#define DPP_SERDES_CHECK_BERTOK_BASE (DPP_SERDES_CHECK_RC_BASE | 0x3000) + +/*TM RAM检测项返回错误码定义*/ +#define DPP_TM_CHECK_RC_BASE (0x100000) +#define DPP_TM_CHECK_RAM_RC_BASE (DPP_TM_CHECK_RC_BASE | 0x100000) +#define DPP_RC_TM_CRDT_INS_BUSY (DPP_TM_CHECK_RC_BASE | 0x100) + +/*SAIP RAM检测项返回错误码定义*/ +#define DPP_SA_CHECK_RC_BASE (0x110000) +#define DPP_SA_CHECK_RAM_RC_BASE (DPP_SA_CHECK_RC_BASE | 0x110000) + +/*soft reset check error code*/ +#define DPP_SOFT_RESET_CHECK_BASE (0x1300) +#define DPP_SOFT_RESET_CHECK_WR_FAIL (DPP_SOFT_RESET_CHECK_BASE | 0x0) +#define DPP_SOFT_RESET_CHECK_RD_FAIL (DPP_SOFT_RESET_CHECK_BASE | 0x1) +#define DPP_SOFT_RESET_CHECK_WR_SIZE_ERR (DPP_SOFT_RESET_CHECK_BASE | 0x2) +#define DPP_SOFT_RESET_CHECK_RD_SIZE_ERR (DPP_SOFT_RESET_CHECK_BASE | 0x3) +#define DPP_SOFT_RESET_CHECK_BACKUP_ERR (DPP_SOFT_RESET_CHECK_BASE | 0x4) + +//add by yinxh at 2018-1-15 +//soft interrupt check error code +#define DPP_SOFT_INT_CHECK_BASE (0x1400) +#define DPP_SOFT_INT_CHECK_INVALID (DPP_SOFT_INT_CHECK_BASE | 0x0) +#define DPP_SOFT_INT_CHECK_NOT_INIT (DPP_SOFT_INT_CHECK_BASE | 0x1) +#define DPP_SOFT_INT_CHECK_SRH_FAIL (DPP_SOFT_INT_CHECK_BASE | 0x2) +#define DPP_SOFT_INT_CHECK_RB_TREE_FULL (DPP_SOFT_INT_CHECK_BASE | 0x3) + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/chip/dpp_dev.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/chip/dpp_dev.h new file mode 100644 index 000000000000..609f36fc65e2 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/chip/dpp_dev.h @@ -0,0 +1,262 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_dev.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : 王春雷 +* 完成日期 : 2014/02/10 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: 代码规范性修改 +* 修改日期: 2014/02/10 +* 版 本 号: +* 修 改 人: 丁金凤 +* 修改内容: +***************************************************************/ + +#ifndef _DPP_DEV_H_ +#define _DPP_DEV_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "zxic_common.h" +#include "dpp_type_api.h" +#define DEV_HASH_FUNC_ID_NUM (4) + +#define DPP_KEYSIG_DEBUG (1) +#define DPP_DEV_CHANNEL_MAX (2) /* 最多支持6个芯片 */ +#define DPP_DEV_PPU_CLS_MAX (6) /* 芯片支持4级cluster */ +#define DPP_DEV_PPU_INSTR_REG_NUM (3) /* 芯片支持4级cluster 共2个指令空间*/ + +#define DPP_DEV_ME_MAX (8) /* 每级cluster支持8个me */ +#define DPP_DEV_SDT_ID_MAX (256U) +#define DPP_DTB_QUEUE_MAX (128) + +#define DPP_CHIP_DPP (0x279221) + +#define X86_ADDR_2_ARRCH64(X86_ADDR) \ + (((X86_ADDR & (~0xFFFF)) << 4) | (X86_ADDR & 0xFFFF)) + +#define DPP_PCIE_SLOT_MAX (64) +#define DPP_PCIE_CHANNEL_MAX (64) +#define DPP_PCIE_CHANNEL_ID(VPORT) \ + (((((VPORT)&0x7000) >> 9) | (((VPORT)&0x0700) >> 8)) & 0x3F) + +#define DEV_ID(DEV) (((DPP_DEV_T *)(DEV))->device_id) +#define DEV_PCIE_SLOT(DEV) (((DPP_DEV_T *)(DEV))->pcie_channel.slot) +#define DEV_PCIE_VPORT(DEV) (((DPP_DEV_T *)(DEV))->pcie_channel.vport) +#define DEV_PCIE_DEV(DEV) (((DPP_DEV_T *)(DEV))->pcie_channel.device) +#define DEV_PCIE_ADDR(DEV) (((DPP_DEV_T *)(DEV))->pcie_channel.base_addr) +#define DEV_PCIE_OFFSET_ADDR(DEV) \ + (((DPP_DEV_T *)(DEV))->pcie_channel.offset_addr) +#define DEV_PCIE_ID(DEV) (((DPP_DEV_T *)(DEV))->pcie_channel.pcie_id) +#define DEV_PCIE_LOCK(DEV) (((DPP_DEV_T *)(DEV))->pcie_channel.device_lock) +#define DEV_PCIE_BAR_MSG_NUM(DEV) \ + (((DPP_DEV_T *)(DEV))->pcie_channel.bar_msg_num) + +#define DEV_PCIE_MSG_OFFSET_ADDR (0x2000) +#define DEV_PCIE_MSG_ADDR(DEV) (DEV_PCIE_ADDR(DEV) + DEV_PCIE_MSG_OFFSET_ADDR) +#define DEV_PCIE_REG_ADDR(DEV) \ + (DEV_PCIE_ADDR(DEV) + DEV_PCIE_OFFSET_ADDR(DEV) - SYS_NP_BASE_ADDR1) + +typedef struct dpp_pf_info_t { + ZXIC_UINT16 slot; + ZXIC_UINT16 vport; +} DPP_PF_INFO_T; + +typedef struct dpp_pcie_channel_t { + ZXIC_UINT16 is_used; /* 0空闲,1已使用 */ + ZXIC_UINT16 slot; + ZXIC_UINT16 vport; + ZXIC_UINT16 pcie_id; + ZXIC_ADDR_T base_addr; + ZXIC_ADDR_T offset_addr; + struct pci_dev *device; + ZXIC_MUTEX_T *device_lock; + ZXIC_UINT32 bar_msg_num; + ZXIC_UINT32 hash_index; + ZXIC_UINT32 dev_status; + ZXIC_UINT32 dump_dma_size; + ZXIC_ADDR_T dump_dma_phy_addr; + ZXIC_ADDR_T dump_dma_vir_addr; +} DPP_PCIE_CHANNEL_T; + +typedef struct dpp_dev_t { + ZXIC_UINT32 device_id; + DPP_PCIE_CHANNEL_T pcie_channel; +} DPP_DEV_T; + +/** 底层设备硬件读写接口指针*/ +typedef DPP_STATUS (*DPP_DEV_WRITE_FUNC)(DPP_DEV_T *dev, ZXIC_UINT32 addr, + ZXIC_UINT32 size, ZXIC_UINT32 *p_data); +typedef DPP_STATUS (*DPP_DEV_READ_FUNC)(DPP_DEV_T *dev, ZXIC_UINT32 addr, + ZXIC_UINT32 size, ZXIC_UINT32 *p_data); +typedef DPP_STATUS (*DPP_ACCESS_SWITCH_FUNC)(ZXIC_UINT32 dev_id, + ZXIC_UINT32 access_type); + +/** 设备访问类型*/ +typedef enum dpp_dev_access_type_e { + DPP_DEV_ACCESS_TYPE_PCIE = 0, /**< @brief PCIe访问*/ + DPP_DEV_ACCESS_TYPE_RISCV = 1, /**< @brief RISCV访问*/ +} DPP_DEV_ACCESS_TYPE_E; + +/** 设备类型*/ +typedef enum dpp_dev_type_e { + DPP_DEV_TYPE_SIM = 0, /**< @brief 仿真器设备*/ + DPP_DEV_TYPE_VCS = 1, /**< @brief VCS设备*/ + DPP_DEV_TYPE_CHIP = 2, /**< @brief asci芯片设备*/ + DPP_DEV_TYPE_FPGA = 3, /**< @brief fpga设备*/ + DPP_DEV_TYPE_PCIE_ACC = 4, + DPP_DEV_TYPE_INVALID, +} DPP_DEV_TYPE_E; + +/** 设备版本*/ +typedef enum dpp_chip_version_e { + DPP_CHIP_VERSION_DPP = 0U, /**< @brief DPP */ + DPP_CHIP_VERSION_DPP_P = 1U, /**< @brief DPP+ */ + DPP_CHIP_VERSION_INVALID, +} DPP_CHIP_VERSION_E; + +/** 互斥锁类型*/ +typedef enum dpp_dev_mutex_type_e { + DPP_DEV_MUTEX_T_REG = 0, /**< @brief 寄存器操作互斥锁 */ + DPP_DEV_MUTEX_T_OAM = 1, /**< @brief OAM模块操作互斥锁 */ + DPP_DEV_MUTEX_T_ETM = 2, /**< @brief ETM模块操作互斥锁 */ + DPP_DEV_MUTEX_T_DDR = 4, /**< @brief DDR模块操作互斥锁 */ + DPP_DEV_MUTEX_T_IND = 5, /**< @brief RAM间接读写操作互斥锁 */ + DPP_DEV_MUTEX_T_ETCAM = 6, /**< @brief ETCAM间接读写操作互斥锁*/ + DPP_DEV_MUTEX_T_MMU = 7, /**< @brief MMU间接读写操作互斥锁 */ + DPP_DEV_MUTEX_T_CAR0 = 8, /**< @brief CAR0模块操作互斥锁 */ + DPP_DEV_MUTEX_T_ALG = 9, /**< @brief ALG间接读写操作互斥锁 */ + DPP_DEV_MUTEX_T_NPPU = 10, /**< @brief nppu间接读写操作互斥锁 */ + DPP_DEV_MUTEX_T_SMMU0 = 11, /**< @brief smmu0 模块操作互斥锁 */ + DPP_DEV_MUTEX_T_SMMU1 = 12, /**< @brief smmu1 模块操作互斥锁 */ + DPP_DEV_MUTEX_T_ETM_2ND = + 13, /**< @brief ETM模块二层间接表操作互斥锁 */ + DPP_DEV_MUTEX_T_LPM = 14, /**< @brief LPM模块操作互斥锁 */ + DPP_DEV_MUTEX_T_CRM_TEMP = 15, /**< @brief 温度获取操作互斥锁 */ + DPP_DEV_MUTEX_T_SIM = 16, /**< @brief 仿真器socket通信操作互斥锁 */ + DPP_DEV_MUTEX_T_DTB = 17, /**< @brief DTB队列操作互斥锁 */ + DPP_DEV_MUTEX_T_DTB_RB = 18, + DPP_DEV_MUTEX_T_PKTRX_MF_GLB_CFG_0 = 19, + DPP_DEV_MUTEX_T_PKTRX_MF_GLB_CFG_1 = 20, + DPP_DEV_MUTEX_T_PKTRX_MF_GLB_CFG_2 = 21, + DPP_DEV_MUTEX_T_PKTRX_MF_GLB_CFG_3 = 22, + DPP_DEV_MUTEX_T_SELF_RECOVER = 23, /**< @brief 自愈恢复互斥锁 */ + DPP_DEV_MUTEX_T_MAX +} DPP_DEV_MUTEX_TYPE_E; + +typedef enum module_init_e { + MODULE_INIT_NPPU = 0, /**< @brief 01:pktrx 0x2:pbu 0x4:odma*/ + MODULE_INIT_PPU, + MODULE_INIT_SE, /**< @brief corresponding bits indicacte SE submodule, 0:smmu0 1:smmu1 2:alg 3:as 4:etcam 5:stat 6:reg_fifo 31:SE_init_done*/ + MODULE_INIT_ETM, /**< @brief bit31:tm bit0:olif bit1:cgavd bit2:tmmu bit3:shap bit4:crdt bit5:qmu */ + MODULE_INIT_DLB, + MODULE_INIT_TRPG, + MODULE_INIT_TSN, + MODULE_INIT_MAX +} MODULE_INIT_E; + +typedef struct dpp_dev_cfg_t { + ZXIC_UINT32 device_id; /* 设备号 */ + DPP_DEV_TYPE_E + dev_type; /* 设备类型: 0-SIM, 1-VCS, 2-CHIP(ASIC), 3-FPGA. */ + ZXIC_UINT32 chip_ver; /* 设备类型: 0-DPP, 1-DPP+ */ + ZXIC_UINT32 access_type; /* 访问类型: 0-PCIe, 1-RISCV. */ + ZXIC_ADDR_T pcie_addr; /* PCIe映射地址 */ + ZXIC_ADDR_T riscv_addr; /* RISCV映射地址 */ + ZXIC_ADDR_T dma_vir_addr; /* DMA空间映射地址 */ + ZXIC_ADDR_T dma_phy_addr; /* 芯片地址相对偏移 */ + ZXIC_UINT32 init_flags[MODULE_INIT_MAX]; + DPP_DEV_WRITE_FUNC p_pcie_write_fun; /** PCIe硬件写回调函数 */ + DPP_DEV_READ_FUNC p_pcie_read_fun; /** PCIe硬件读回调函数 */ + DPP_DEV_WRITE_FUNC p_riscv_write_fun; /** RISCV硬件写回调函数 */ + DPP_DEV_READ_FUNC p_riscv_read_fun; /** RISCV硬件读回调函数 */ + ZXIC_MUTEX_T reg_opr_mutex; /** 寄存器操作互斥量 */ + ZXIC_MUTEX_T oam_mutex; /** OAM硬件操作互斥锁 */ + ZXIC_MUTEX_T etm_mutex; /** ETM硬件操作互斥锁 */ + ZXIC_MUTEX_T ddr_mutex; /** DDR硬件操作互斥锁 */ + ZXIC_MUTEX_T ind_mutex; /** RAM间接操作互斥锁 */ + ZXIC_MUTEX_T etcam_mutex; /** ETCAM硬件操作互斥锁 */ + ZXIC_MUTEX_T car0_mutex; /** CAR0硬件操作互斥锁 */ + ZXIC_MUTEX_T alg_mutex; /** alg硬件操作互斥锁*/ + ZXIC_MUTEX_T nppu_mutex; /** nppu硬件操作互斥锁*/ + ZXIC_MUTEX_T smmu0_mutex; /** smmu0 硬件操作互斥锁*/ + ZXIC_MUTEX_T smmu1_mutex; /** smmu1 硬件操作互斥锁*/ + ZXIC_MUTEX_T etm_2nd_mutex; /** ETM 二层间接表 硬件操作互斥锁*/ + ZXIC_MUTEX_T lpm_mutex; /** lpm配置 操作互斥锁*/ + ZXIC_MUTEX_T crm_temp_mutex; /** 温度获取 操作互斥锁*/ + ZXIC_MUTEX_T sim_mutex; /** 仿真器socket通信 操作互斥锁*/ + ZXIC_MUTEX_T dtb_mutex; /** DTB操作互斥锁*/ + ZXIC_MUTEX_T pktrx_mf_glb_cfg_mutex_0; /** PKTRX全局配置区互斥锁*/ + ZXIC_MUTEX_T pktrx_mf_glb_cfg_mutex_1; /** PKTRX全局配置区互斥锁*/ + ZXIC_MUTEX_T pktrx_mf_glb_cfg_mutex_2; /** PKTRX全局配置区互斥锁*/ + ZXIC_MUTEX_T pktrx_mf_glb_cfg_mutex_3; /** PKTRX全局配置区互斥锁*/ + ZXIC_MUTEX_T self_recover_mutex; /** 自愈恢复互斥锁*/ + ZXIC_MUTEX_T hash_mutex[DPP_PCIE_SLOT_MAX] + [DEV_HASH_FUNC_ID_NUM]; /** hash插入 操作互斥锁*/ + ZXIC_MUTEX_T dtb_rb_mutex[DPP_DTB_QUEUE_MAX]; /** hash插入 操作互斥锁*/ + ZXIC_MUTEX_T + dtb_queue_mutex[DPP_DTB_QUEUE_MAX]; /* DTB模块队列操作互斥锁 */ + DPP_PCIE_CHANNEL_T pcie_channel[DPP_PCIE_SLOT_MAX][DPP_PCIE_CHANNEL_MAX]; + ZXIC_VOID *p_std_nic_res[DPP_PCIE_SLOT_MAX]; /* 标卡流表资源 */ + ZXIC_UINT32 bar_msg_num[DPP_PCIE_SLOT_MAX]; /*bar通道消息个数*/ +} DPP_DEV_CFG_T; + +typedef struct dpp_dev_mngr_t { + ZXIC_UINT32 device_num; /* 设备数目 */ + ZXIC_UINT32 is_init; + DPP_DEV_CFG_T *p_dev_array[DPP_DEV_CHANNEL_MAX]; +} DPP_DEV_MGR_T; + +DPP_STATUS dpp_dev_init(ZXIC_VOID); +DPP_STATUS dpp_dev_add(ZXIC_UINT32 dev_id, DPP_DEV_TYPE_E dev_type, + DPP_DEV_ACCESS_TYPE_E access_type, ZXIC_ADDR_T pcie_addr, + ZXIC_ADDR_T riscv_addr, ZXIC_ADDR_T dma_vir_addr, + ZXIC_ADDR_T dma_phy_addr, + DPP_DEV_WRITE_FUNC p_pcie_write_fun, + DPP_DEV_READ_FUNC p_pcie_read_fun, + DPP_DEV_WRITE_FUNC p_riscv_write_fun, + DPP_DEV_READ_FUNC p_riscv_read_fun); +DPP_STATUS dpp_dev_del(ZXIC_UINT32 dev_id); +DPP_STATUS dpp_dev_get(DPP_PF_INFO_T *pf_info, DPP_DEV_T *dev); +DPP_STATUS dpp_dev_pcie_channel_add(DPP_PF_INFO_T *pf_info, + struct pci_dev *p_dev); +DPP_STATUS dpp_dev_pcie_channel_del(DPP_PF_INFO_T *pf_info); +ZXIC_VOID *dpp_dev_get_se_res_ptr(DPP_DEV_T *dev); +ZXIC_VOID dpp_dev_set_se_res_ptr(DPP_DEV_T *dev, ZXIC_VOID *se_ptr); +DPP_STATUS dpp_dev_opr_mutex_get(DPP_DEV_T *dev, ZXIC_UINT32 type, + ZXIC_MUTEX_T **p_mutex_out); +DPP_STATUS dpp_dev_dtb_opr_mutex_get(DPP_DEV_T *dev, ZXIC_UINT32 type, + ZXIC_UINT32 index, + ZXIC_MUTEX_T **p_mutex_out); +DPP_STATUS dpp_dev_pcie_default_write(DPP_DEV_T *dev, ZXIC_UINT32 addr, + ZXIC_UINT32 size, ZXIC_UINT32 *p_data); +DPP_STATUS dpp_dev_pcie_default_read(DPP_DEV_T *dev, ZXIC_UINT32 addr, + ZXIC_UINT32 size, ZXIC_UINT32 *p_data); +DPP_STATUS dpp_dev_write_channel(DPP_DEV_T *dev, ZXIC_UINT32 addr, + ZXIC_UINT32 size, ZXIC_UINT32 *p_data); +DPP_STATUS dpp_dev_read_channel(DPP_DEV_T *dev, ZXIC_UINT32 addr, + ZXIC_UINT32 size, ZXIC_UINT32 *p_data); +DPP_STATUS dpp_dev_hash_opr_mutex_get(DPP_DEV_T *dev, ZXIC_UINT32 fun_id, + ZXIC_MUTEX_T **p_mutex_out); +DPP_STATUS dpp_dev_hash_opr_mutex_create(DPP_DEV_T *dev); +DPP_STATUS dpp_dev_hash_opr_mutex_destroy(DPP_DEV_T *dev); +DPP_STATUS dpp_dev_last_check(DPP_DEV_T *dev, ZXIC_UINT32 *last_flag); +DPP_STATUS dpp_soft_hash_index_set(DPP_DEV_T *dev, ZXIC_UINT32 hash_index); +DPP_STATUS dpp_soft_hash_index_get(DPP_DEV_T *dev, ZXIC_UINT32 *hash_index); +DPP_STATUS dpp_dev_dump_dma_mem_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_dma_size, + ZXIC_UINT64 *p_dma_phy_addr, + ZXIC_UINT64 *p_dma_vir_addr); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/chip/dpp_init.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/chip/dpp_init.h new file mode 100644 index 000000000000..9d20068a67b2 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/chip/dpp_init.h @@ -0,0 +1,79 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_init.h +* 文件标识 : +* 内容摘要 : 芯片初始化头文件 +* 其它说明 : +* 当前版本 : +* 作 者 : wcl +* 完成日期 : 2015/03/17 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ +#ifndef _DPP_INIT_H_ +#define _DPP_INIT_H_ + +/** 系统初始化标志*/ +#define DPP_INIT_FLAG_ACCESS_TYPE \ + (1 << 0) /**< @brief 访问模式: 0-PCRISCV1-MDIO*/ +#define DPP_INIT_FLAG_SERDES_DOWN_TP \ + (1 << 1) /**< @brief serdes加载方式: 0-组播,1-单播*/ +#define DPP_INIT_FLAG_DDR_BACKDOOR \ + (1 << 2) /**< @brief DDR3校准: 0-校准,1-不校准*/ +#define DPP_INIT_FLAG_SA_MODE \ + (1 << 3) /**< @brief SA工作模式: 0-内置sa,1-非SA模式*/ +#define DPP_INIT_FLAG_SA_MESH \ + (1 << 4) /**< @brief SA mesh模式: 0-非mesh,1-mesh*/ +#define DPP_INIT_FLAG_SA_SERDES_MODE \ + (1 << 5) /**< @brief SA serdes编码方式: 0-64B/66B; 1-8B/10B */ +#define DPP_INIT_FLAG_INT_DEST_MODE \ + (1 << 6) /**< @brief 中断上报方式: 0-PCIe; 1-LocalBus(管脚)*/ +#define DPP_INIT_FLAG_LIF0_MODE (1 << 7) /**< @brief 0: PON模式 1: QSGMII模式*/ +#define DPP_INIT_FLAG_DMA_ENABLE \ + (1 << 8) /**< @brief DMA使能: 0-使能,1-不使能*/ +#define DPP_INIT_FLAG_TM_IMEM_FLAG \ + (1 << 9) /**< @brief TM片内外模式: 0-片外,1-片内*/ + +/** dpp sdk系统初始化控制结构*/ +typedef struct dpp_sys_init_ctrl_t { + DPP_DEV_TYPE_E + device_type; /**< @brief 设备类型,取值参考DPP_DEV_TYPE_E的定义*/ + ZXIC_UINT32 flags; /**< @brief 初始化标志,按bitmap使用,*/ + ZXIC_UINT32 sa_id; /**< @brief 内置sa模式下,sa的ID,范围0~127*/ + ZXIC_UINT32 + case_num; /**< @brief TM四组QMU初始化场景编号为1-8;ddr*bank:1:4x2;2:4x4;3:8x2;4:4x8.*/ + ZXIC_UINT32 + lif0_port_type; /**< @brief LIF0是PON口模式下,lif0的PON端口类型,详见DPP_LIF0_PON_TYPE_E */ + ZXIC_UINT32 + lif1_port_type; /**< @brief 非内置SA模式下,lif1的端口类型,详见DPP_LIF1_PORT_MODE_E */ + ZXIC_ADDR_T pcie_vir_baddr; /**< @brief PCIe映射虚拟基地址*/ + ZXIC_ADDR_T riscv_vir_baddr; /**< @brief RISCV映射虚拟基地址 */ + ZXIC_ADDR_T dma_vir_baddr; /**< @brief DMA映射虚拟地址*/ + ZXIC_ADDR_T dma_phy_baddr; /**< @brief DMA内存物理地址*/ + DPP_DEV_WRITE_FUNC pcie_write_fun; /**< @brief PCIe硬件写回调函数 */ + DPP_DEV_READ_FUNC pcie_read_fun; /**< @brief PCIe硬件读回调函数*/ + DPP_DEV_WRITE_FUNC riscv_write_fun; /**< @brief RISCV硬件写回调函数 */ + DPP_DEV_READ_FUNC riscv_read_fun; /**< @brief RISCV硬件读回调函数*/ + DPP_ACCESS_SWITCH_FUNC + access_switch_fun; /**< @brief PCIe/RISCV切换回调函数*/ +} DPP_SYS_INIT_CTRL_T; + +/***********************************************************/ +/** 芯片上电初始,完整版本 +* @param dev_id 设备号 +* @param p_init_ctrl 系统初始化控制数据结构,由用户完成实例化和成员赋值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wcl @date 2015/03/26 +************************************************************/ +DPP_STATUS dpp_init(ZXIC_UINT32 dev_id); + +#endif /* dpp_init.h */ diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/init/dpp_kernel_init.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/init/dpp_kernel_init.h new file mode 100644 index 000000000000..41cb2b4244ac --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/init/dpp_kernel_init.h @@ -0,0 +1,19 @@ +#ifndef _DPP_KERNEL_INIT_H_ +#define _DPP_KERNEL_INIT_H_ + +#include "zxic_common.h" +#include "dpp_dtb_table_api.h" + +ZXIC_SINT32 dpp_dtb_queue_dma_mem_alloc(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 size); +ZXIC_SINT32 dpp_dtb_queue_dma_mem_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + DTB_QUEUE_DMA_ADDR_INFO *dmaAddrInfo); +ZXIC_SINT32 dpp_dtb_queue_dma_mem_release(DPP_DEV_T *dev, ZXIC_UINT32 queue_id); +ZXIC_SINT32 dtb_sdt_dump_dma_alloc(DPP_DEV_T *dev, ZXIC_UINT32 dma_size, + ZXIC_UINT64 *p_dma_phy_addr, + ZXIC_UINT64 *p_dma_vir_addr); + +ZXIC_SINT32 dtb_sdt_dump_dma_release(DPP_DEV_T *dev, ZXIC_UINT32 dma_size, + ZXIC_UINT64 dma_phy_addr, + ZXIC_UINT64 dma_vir_addr); +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/dma/dpp_dtb.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/dma/dpp_dtb.h new file mode 100644 index 000000000000..30838dd863af --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/dma/dpp_dtb.h @@ -0,0 +1,732 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_dtb.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : zab +* 完成日期 : 2022/08/26 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ +#ifndef _DPP_DTB_H_ +#define _DPP_DTB_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "dpp_dtb_cfg.h" + +#define DPP_DTB_QUEUE_ITEM_NUM_MAX (32) + +#define DPP_DTB_ITEM_ACK_SIZE (16) +#define DPP_DTB_ITEM_BUFF_SIZE (16 * 1024) +#define DPP_DTB_ITEM_SIZE (16 + 16 * 1024) +#define DPP_DTB_TAB_UP_SIZE ((16 + 16 * 1024) * 32) +#define DPP_DTB_TAB_DOWN_SIZE ((16 + 16 * 1024) * 32) + +#define DPP_DTB_TAB_UP_ACK_VLD_MASK (0x555555) +#define DPP_DTB_TAB_DOWN_ACK_VLD_MASK (0x5a5a5a) +#define DPP_DTB_TAB_ACK_IS_USING_MASK (0x11111100) +#define DPP_DTB_TAB_ACK_UNUSED_MASK (0x0) +#define DPP_DTB_TAB_ACK_SUCCESS_MASK (0xff) +#define DPP_DTB_TAB_ACK_FAILED_MASK (0x1) +#define DPP_DTB_TAB_ACK_CHECK_VALUE (0x12345678) + +#define DPP_DTB_TAB_ACK_VLD_SHIFT (104) +#define DPP_DTB_TAB_ACK_STATUS_SHIFT (96) + +#define DPP_DTB_TAB_UP_PHY_ADDR_GET(SLOT_ID, DEV_ID, QUEUE_ID, INDEX) \ + (p_dpp_dtb_mgr[SLOT_ID][DEV_ID] \ + ->queue_info[QUEUE_ID] \ + .tab_up.start_phy_addr + \ + INDEX * p_dpp_dtb_mgr[SLOT_ID][DEV_ID] \ + ->queue_info[QUEUE_ID] \ + .tab_up.item_size) +#define DPP_DTB_TAB_UP_USER_PHY_ADDR_GET(SLOT_ID, DEV_ID, QUEUE_ID, INDEX) \ + (p_dpp_dtb_mgr[SLOT_ID][DEV_ID] \ + ->queue_info[QUEUE_ID] \ + .tab_up.user_addr[INDEX] \ + .phy_addr) +#define DPP_DTB_TAB_UP_USER_PHY_ADDR_FLAG_GET(SLOT_ID, DEV_ID, QUEUE_ID, \ + INDEX) \ + (p_dpp_dtb_mgr[SLOT_ID][DEV_ID] \ + ->queue_info[QUEUE_ID] \ + .tab_up.user_addr[INDEX] \ + .user_flag) +#define DPP_DTB_TAB_UP_USER_ADDR_FLAG_SET(SLOT_ID, DEV_ID, QUEUE_ID, INDEX, \ + VAL) \ + do { \ + p_dpp_dtb_mgr[SLOT_ID][DEV_ID] \ + ->queue_info[QUEUE_ID] \ + .tab_up.user_addr[INDEX] \ + .user_flag = VAL; \ + } while (0) +#define DPP_DTB_TAB_DOWN_PHY_ADDR_GET(SLOT_ID, DEV_ID, QUEUE_ID, INDEX) \ + (p_dpp_dtb_mgr[SLOT_ID][DEV_ID] \ + ->queue_info[QUEUE_ID] \ + .tab_down.start_phy_addr + \ + INDEX * p_dpp_dtb_mgr[SLOT_ID][DEV_ID] \ + ->queue_info[QUEUE_ID] \ + .tab_down.item_size) +#define DPP_DTB_TAB_UP_VIR_ADDR_GET(SLOT_ID, DEV_ID, QUEUE_ID, INDEX) \ + (p_dpp_dtb_mgr[SLOT_ID][DEV_ID] \ + ->queue_info[QUEUE_ID] \ + .tab_up.start_vir_addr + \ + INDEX * p_dpp_dtb_mgr[SLOT_ID][DEV_ID] \ + ->queue_info[QUEUE_ID] \ + .tab_up.item_size) +#define DPP_DTB_TAB_UP_USER_VIR_ADDR_GET(SLOT_ID, DEV_ID, QUEUE_ID, INDEX) \ + (p_dpp_dtb_mgr[SLOT_ID][DEV_ID] \ + ->queue_info[QUEUE_ID] \ + .tab_up.user_addr[INDEX] \ + .vir_addr) +#define DPP_DTB_TAB_DOWN_VIR_ADDR_GET(SLOT_ID, DEV_ID, QUEUE_ID, INDEX) \ + (p_dpp_dtb_mgr[SLOT_ID][DEV_ID] \ + ->queue_info[QUEUE_ID] \ + .tab_down.start_vir_addr + \ + INDEX * p_dpp_dtb_mgr[SLOT_ID][DEV_ID] \ + ->queue_info[QUEUE_ID] \ + .tab_down.item_size) +#define DPP_DTB_TAB_UP_WR_INDEX_GET(SLOT_ID, DEV_ID, QUEUE_ID) \ + (p_dpp_dtb_mgr[SLOT_ID][DEV_ID]->queue_info[QUEUE_ID].tab_up.wr_index) +#define DPP_DTB_TAB_UP_RD_INDEX_GET(SLOT_ID, DEV_ID, QUEUE_ID) \ + (p_dpp_dtb_mgr[SLOT_ID][DEV_ID]->queue_info[QUEUE_ID].tab_up.rd_index) +#define DPP_DTB_TAB_DOWN_WR_INDEX_GET(SLOT_ID, DEV_ID, QUEUE_ID) \ + (p_dpp_dtb_mgr[SLOT_ID][DEV_ID]->queue_info[QUEUE_ID].tab_down.wr_index) +#define DPP_DTB_TAB_DOWN_RD_INDEX_GET(SLOT_ID, DEV_ID, QUEUE_ID) \ + (p_dpp_dtb_mgr[SLOT_ID][DEV_ID]->queue_info[QUEUE_ID].tab_down.rd_index) +#define DPP_DTB_TAB_UP_DATA_LEN_GET(SLOT_ID, DEV_ID, QUEUE_ID, INDEX) \ + (p_dpp_dtb_mgr[SLOT_ID][DEV_ID] \ + ->queue_info[QUEUE_ID] \ + .tab_up.data_len[INDEX]) +#define DPP_DTB_QUEUE_INIT_FLAG_GET(SLOT_ID, DEV_ID, QUEUE_ID) \ + (p_dpp_dtb_mgr[SLOT_ID][DEV_ID]->queue_info[QUEUE_ID].init_flag) +#define DPP_DTB_QUEUE_VPORT_GET(SLOT_ID, DEV_ID, QUEUE_ID) \ + (p_dpp_dtb_mgr[SLOT_ID][DEV_ID]->queue_info[QUEUE_ID].vport) + +#define DPP_BDRING_ITEM_SIZE (16) + +#define DPP_UP_MAC_BD_ITEM_NUM (0XFF) + +#define DPP_BD_ITEM_NUM_MAX (0XFF) +#define DPP_UP_BD_BUFF_SIZE_MAX (4 * 1024) + +#define DPP_UP_MAC_BD_ITEM_SIZE \ + ((DPP_UP_MAC_BD_ITEM_NUM + 1) * DPP_BDRING_ITEM_SIZE) +#define DPP_UP_MAC_BUFF_SIZE (16 * 1024) +#define DPP_UP_MAC_BD_BUFF_SIZE (DPP_UP_MAC_BD_ITEM_NUM * DPP_UP_MAC_BUFF_SIZE) +#define DPP_UP_MAC_BD_TOTAL_SIZE \ + (DPP_UP_MAC_BD_ITEM_SIZE + DPP_UP_MAC_BD_BUFF_SIZE) +#define DPP_DMA_BUFF_SIZE (4 * 1024 * 1024) + +#define DPP_DMA_BD_VLD_MSK (0x80000000) +#define DPP_DMA_BD_DATA_LEN_MSK (0x7FF) + +#define DPP_EP_ID_MAX (16) + +#define DPP_DTB_LEN_MIN (1) +#define DPP_DTB_DOWN_LEN (0x3FF) + +#define DPP_DMA_HASH_KEY_OFFSET (6) +#define DPP_DMA_HASH_ITEM_MAX (64) +#define DPP_DMA_HASH_KEY_RST (DPP_DMA_HASH_ITEM_MAX - DPP_DMA_HASH_KEY_OFFSET) + +#define DPP_DMA_SENDTYPE_START_BIT (23) +#define DPP_DMA_SENDTYPE_BIT_NUM (2) +#define DPP_DMA_VALID_START_BIT (23) +#define DPP_DMA_VALID_BIT_NUM (1) +#define DPP_DMA_HASHID_START_BIT (21) +#define DPP_DMA_HASHID_BIT_NUM (2) +#define DPP_DMA_TBLID_START_BIT (16) +#define DPP_DMA_TBLID_BIT_NUM (2) + +typedef struct dpp_dtb_queue_cfg_t { + ZXIC_ADDR_T up_start_phy_addr; + ZXIC_ADDR_T up_start_vir_addr; + ZXIC_ADDR_T down_start_phy_addr; + ZXIC_ADDR_T down_start_vir_addr; + + ZXIC_UINT32 up_item_size; + ZXIC_UINT32 down_item_size; +} DPP_DTB_QUEUE_CFG_T; + +typedef struct dpp_dtb_tab_up_user_addr_t { + ZXIC_UINT32 user_flag; + + ZXIC_ADDR_T phy_addr; + ZXIC_ADDR_T vir_addr; +} DPP_DTB_TAB_UP_USER_ADDR_T; + +typedef struct dpp_dtb_tab_up_info_t { + ZXIC_ADDR_T start_phy_addr; + ZXIC_ADDR_T start_vir_addr; + ZXIC_UINT32 item_size; + + ZXIC_UINT32 wr_index; + ZXIC_UINT32 rd_index; + + ZXIC_UINT32 data_len[DPP_DTB_QUEUE_ITEM_NUM_MAX]; + DPP_DTB_TAB_UP_USER_ADDR_T user_addr[DPP_DTB_QUEUE_ITEM_NUM_MAX]; +} DPP_DTB_TAB_UP_INFO_T; + +typedef struct dpp_dtb_tab_down_info_t { + ZXIC_ADDR_T start_phy_addr; + ZXIC_ADDR_T start_vir_addr; + ZXIC_UINT32 item_size; + + ZXIC_UINT32 wr_index; + ZXIC_UINT32 rd_index; +} DPP_DTB_TAB_DOWN_INFO_T; + +typedef struct dpp_dtb_queue_info_t { + ZXIC_UINT32 init_flag; + ZXIC_UINT32 slot_id; + ZXIC_UINT32 vport; + ZXIC_UINT32 vector; + + DPP_DTB_TAB_UP_INFO_T tab_up; + DPP_DTB_TAB_DOWN_INFO_T tab_down; +} DPP_DTB_QUEUE_INFO_T; + +typedef struct dpp_dtb_mgr_t { + DPP_DTB_QUEUE_INFO_T queue_info[DPP_DTB_QUEUE_NUM_MAX]; +} DPP_DTB_MGR_T; + +typedef enum dpp_dtb_dir_type_e { + DPP_DTB_DIR_DOWN_TYPE = 0, + DPP_DTB_DIR_UP_TYPE = 1, + DPP_DTB_DIR_TYPE_MAX, +} DPP_DTB_DIR_TYPE_E; + +typedef enum dpp_dtb_tab_up_user_addr_type_e { + DPP_DTB_TAB_UP_NOUSER_ADDR_TYPE = 0, + DPP_DTB_TAB_UP_USER_ADDR_TYPE = 1, + DPP_DTB_TAB_UP_USER_ADDR_TYPE_MAX, +} DPP_DTB_TAB_UP_USER_ADDR_TYPE_E; + +typedef enum dpp_dma_send_type_e { + DMA_LEARN_HASH = 0, + DMA_DEL_HASH = 1, + DMA_UPDATE_HASH = 2, + DMA_ADD_HASH = 3, + DMA_SEND_TYPE_MAX +} DPP_DMA_SEND_TYPE_E; + +/* 单个通道BD表管理结构体 */ +typedef struct dpp_dma_bd_t { + ZXIC_ADDR_T bd_phy_addr; /* BD表物理地址 */ + ZXIC_ADDR_T bd_vir_addr; /* BD表进程空间虚拟地址 */ + ZXIC_ADDR_T buff_phy_addr; /* BD表指向的BUFF物理地址 */ + ZXIC_ADDR_T buff_vir_addr; /* BD表指向的BUFF进程空间虚拟地址 */ + ZXIC_UINT32 bd_index; /* BD表当前使用index */ +} DPP_DMA_BD_T; + +/* 单个设备BD表管理结构体 */ +typedef struct dpp_dma_mgr_t { + ZXIC_UINT32 init; + ZXIC_UINT32 endian_flag; /* DMA数据通道大小端,0-小端,1-大端 */ + DPP_DMA_BD_T up_mac; +} DPP_DMA_MGR_T; + +/**获取dtb准备完成标记*/ +ZXIC_UINT32 dtb_table_function_switch_get(ZXIC_VOID); + +/**使能dtb准备完成标记*/ +ZXIC_UINT32 dtb_table_function_switch_enable(ZXIC_VOID); + +/**去使能dtb准备完成标记*/ +ZXIC_UINT32 dtb_table_function_switch_disable(ZXIC_VOID); + +/**使能dtb调试函数*/ +ZXIC_UINT32 dpp_dtb_debug_fun_enable(ZXIC_VOID); + +/**去使能dtb调试函数*/ +ZXIC_UINT32 dpp_dtb_debug_fun_disable(ZXIC_VOID); + +/**获取dtb调试函数*/ +ZXIC_UINT32 dpp_dtb_debug_fun_get(ZXIC_VOID); + +/**使能dtb打印函数*/ +ZXIC_UINT32 dpp_dtb_prt_enable(ZXIC_VOID); + +/**去使能dtb打印函数*/ +ZXIC_UINT32 dpp_dtb_prt_disable(ZXIC_VOID); + +/**获取dtb打印函数*/ +ZXIC_UINT32 dpp_dtb_prt_get(ZXIC_VOID); + +ZXIC_UINT32 dpp_dtb_soft_perf_test_set(ZXIC_UINT32 value); + +ZXIC_UINT32 dpp_dtb_soft_perf_test_get(ZXIC_VOID); + +ZXIC_UINT32 dpp_dtb_hardware_perf_test_set(ZXIC_UINT32 value); + +ZXIC_UINT32 dpp_dtb_hardware_perf_test_get(ZXIC_VOID); + +ZXIC_UINT32 dpp_dtb_down_table_overtime_set(ZXIC_UINT32 times_s); +ZXIC_UINT32 dpp_dtb_down_table_overtime_get(ZXIC_VOID); + +ZXIC_UINT32 dpp_dtb_dump_table_overtime_set(ZXIC_UINT32 times_s); +ZXIC_UINT32 dpp_dtb_dump_table_overtime_get(ZXIC_VOID); + +#if ZXIC_REAL("MGR") +/***********************************************************/ +/** 创建DTB的管理结构 +* @param dev_id 设备号,支持多芯片 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_mgr_create(ZXIC_UINT32 slot_id, ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** 注销DTB的管理结构 +* @param dev_id 设备号,支持多芯片 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_mgr_destory_all(ZXIC_VOID); +ZXIC_UINT32 dpp_dtb_mgr_destory(ZXIC_UINT32 slot_id, ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** 重置DTB管理结构 +* @param dev_id 设备号,支持多芯片 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_mgr_reset(ZXIC_UINT32 slot_id, ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** 获取DMA管理结构 +* @param dev_id 设备号,支持多芯片 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +DPP_DTB_MGR_T *dpp_dtb_mgr_get(ZXIC_UINT32 slot_id, ZXIC_UINT32 dev_id); +#endif + +#if ZXIC_REAL("ACK_RW") +/***********************************************************/ +/** 读取BD表条目信息 +* @param dev_id 芯片的id号 +* @param queue_id 队列号,范围0-127 +* @param dir_flag 方向,1-上送表项,0-下发表项 +* @param index 条目索引,范围0-31 +* @param pos 一个item里面的4个32位,pos对应的是第几个ZXICP_WORD32, +* 取值为0,1,2,3 +* @param p_data 读取的数据,大端格式 +* +* @return +* @remark 无 +* @see +* @author zab @date 2019/11/2 +************************************************************/ +ZXIC_UINT32 dpp_dtb_item_ack_rd(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 dir_flag, ZXIC_UINT32 index, + ZXIC_UINT32 pos, ZXIC_UINT32 *p_data); + +/***********************************************************/ +/** 向BD表条目指定位置写入值 +* @param dev_id 芯片的id号 +* @param queue_id 队列号,范围0-127 +* @param dir_flag 方向,1-上送表项,0-下发表项 +* @param index 条目索引,范围0-31 +* @param pos 一个item里面的4个32位,pos对应的是第几个ZXICP_WORD32, +* 取值为0,1,2,3 +* @param data 读取的数据,大端格式 +* +* @return +* @remark 无 +* @see +* @author zab @date 2019/11/2 +************************************************************/ +ZXIC_UINT32 dpp_dtb_item_ack_wr(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 dir_flag, ZXIC_UINT32 index, + ZXIC_UINT32 pos, ZXIC_UINT32 data); +#endif + +#if ZXIC_REAL("BUFF_RW") +/***********************************************************/ +/** 读取dtb条目指向BUFF的数据 +* @param dev_id 芯片的id号 +* @param queue_id 队列号,范围0-127 +* @param dir_flag 方向,1-上送表项,0-下发表项 +* @param index 条目索引,范围0-31 +* @param pos 相对BUFF起始地址的偏移,单位32bit; +* @param p_data 读取的数据,大端格式 +* @param len 读取数据长度,单位32bit; +* +* @return +* @remark 无 +* @see +* @author zab @date 2019/11/2 +************************************************************/ +ZXIC_UINT32 dpp_dtb_item_buff_rd(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 dir_flag, ZXIC_UINT32 index, + ZXIC_UINT32 pos, ZXIC_UINT32 len, + ZXIC_UINT32 *p_data); + +/***********************************************************/ +/** 向BD表条目指向的BUFF指定位置写入值 +* @param dev_id 芯片的id号 +* @param queue_id 队列号,范围0-127 +* @param dir_flag 方向,1-上送表项,0-下发表项 +* @param index 条目索引,范围0-31 +* @param pos 相对BUFF起始地址的偏移,单位32bit; +* @param p_data 读取的数据,大端格式 +* @param len 写入数据长度,单位32bit; +* +* @return +* @remark 无 +* @see +* @author zab @date 2019/11/2 +************************************************************/ +ZXIC_UINT32 dpp_dtb_item_buff_wr(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 dir_flag, ZXIC_UINT32 index, + ZXIC_UINT32 pos, ZXIC_UINT32 len, + ZXIC_UINT32 *p_data); +#endif + +#if ZXIC_REAL("API") +/***********************************************************/ +/** 配置下发配置数据信息 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列号,范围0-31 +* @param int_flag 中断标志,0-无,1-有 +* @param data_len 数据长度,单位32bit; +* @param p_data 待下发数据 +* @param p_item_index 返回使用的条目编号 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_tab_down_info_set(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 int_flag, + ZXIC_UINT32 data_len, ZXIC_UINT32 *p_data, + ZXIC_UINT32 *p_item_index); + +/***********************************************************/ +/** 一个元素down成功状态检查 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列号,范围0-31 +* @param element_id 条目编号 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_tab_down_success_status_check(DPP_DEV_T *dev, + ZXIC_UINT32 queue_id, + ZXIC_UINT32 element_id); + +/***********************************************************/ +/** dump队列空闲条目获取 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列号,范围0-31 +* @param p_item_index 返回使用的条目编号 +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_tab_up_free_item_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 *p_item_index); + +/***********************************************************/ +/** 获取dump指定条目物理地址 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列编号,范围:0-127 +* @param item_index 条目编号,范围0-31 +* @param p_phy_haddr 物理地址高32bit +* @param p_phy_laddr 物理地址低32bit +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_tab_up_item_addr_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 item_index, + ZXIC_UINT32 *p_phy_haddr, + ZXIC_UINT32 *p_phy_laddr); + +/***********************************************************/ +/** 获取指定dump条目一定地址偏移的物理地址 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列编号,范围:0-127 +* @param item_index 条目编号,范围0-31 +* @param p_phy_haddr 物理地址高32bit +* @param p_phy_laddr 物理地址低32bit +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_tab_up_item_offset_addr_get(DPP_DEV_T *dev, + ZXIC_UINT32 queue_id, + ZXIC_UINT32 item_index, + ZXIC_UINT32 addr_offset, + ZXIC_UINT32 *p_phy_haddr, + ZXIC_UINT32 *p_phy_laddr); + +/***********************************************************/ +/** 设置dump指定条目空间地址,用于用户自定义空间传输 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列编号,范围:0-127 +* @param item_index 条目编号,范围0-31 +* @param phy_haddr 物理地址高 +* @param vir_laddr 虚拟地址低 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_tab_up_item_user_addr_set(DPP_DEV_T *dev, + ZXIC_UINT32 queue_id, + ZXIC_UINT32 item_index, + ZXIC_ADDR_T phy_addr, + ZXIC_ADDR_T vir_addr); + +/***********************************************************/ +/** 清除用户dump指定条目空间地址 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列编号,范围:0-127 +* @param item_index 条目编号,范围0-31 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_tab_up_item_user_addr_clr(DPP_DEV_T *dev, + ZXIC_UINT32 queue_id, + ZXIC_UINT32 item_index); + +/***********************************************************/ +/** dump配置描述符信息设置 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列号,范围0-31 +* @param item_index 返回使用的条目编号 +* @param int_flag 中断标志,0-无,1-有 +* @param data_len 数据长度,单位32bit; +* @param desc_len 描述符长度,单位32bit; +* @param p_desc_data 待下发描述符 +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_tab_up_info_set(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 item_index, + ZXIC_UINT32 int_flag, ZXIC_UINT32 data_len, + ZXIC_UINT32 desc_len, + ZXIC_UINT32 *p_desc_data); + +/***********************************************************/ +/** 一个元素dump成功状态检查 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列号,范围0-31 +* @param element_id 条目编号 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_tab_up_success_status_check(DPP_DEV_T *dev, + ZXIC_UINT32 queue_id, + ZXIC_UINT32 element_id); + +/***********************************************************/ +/** 获取dump数据 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列号,范围0-31 +* @param item_index 数据对应的的条目编号 +* @param data_len 数据长度,单位32bit; +* @param p_data dump数据 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_tab_up_data_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 item_index, + ZXIC_UINT32 data_len, ZXIC_UINT32 *p_data); + +/***********************************************************/ +/** dtb初始化 +* @param dev_id 设备号,支持多芯片 +* +* @return +* @remark 无 +* @see +* @author zab @date 2022/08/30 +************************************************************/ +ZXIC_UINT32 dpp_dtb_init(DPP_DEV_T *dev); + +/***********************************************************/ +/** dtb队列down初始化 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列号 +* @param p_queue_cfg 队列配置参数,具体见DPP_DTB_QUEUE_CFG_T结构体类型 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_down_init(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + DPP_DTB_QUEUE_CFG_T *p_queue_cfg); + +/***********************************************************/ +/** dtb队列dump初始化 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列号 +* @param p_queue_cfg 队列配置参数,具体见DPP_DTB_QUEUE_CFG_T结构体类型 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_dump_init(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + DPP_DTB_QUEUE_CFG_T *p_queue_cfg); + +/***********************************************************/ +/** dtb队列down 空间地址配置 +* @param channelId dtb通道号 +* @param phyAddr down物理地址 +* @param virAddr down虚拟地址 +* @param size 空间大小 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_down_channel_addr_set(DPP_DEV_T *dev, ZXIC_UINT32 channelId, + ZXIC_UINT64 phyAddr, + ZXIC_UINT64 virAddr, + ZXIC_UINT32 size); + +/***********************************************************/ +/** dtb队列dump 空间地址配置 +* @param channelId dtb通道号 +* @param phyAddr dump物理地址 +* @param virAddr dump虚拟地址 +* @param size 空间大小 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_dump_channel_addr_set(DPP_DEV_T *dev, ZXIC_UINT32 channelId, + ZXIC_UINT64 phyAddr, + ZXIC_UINT64 virAddr, + ZXIC_UINT32 size); + +/***********************************************************/ +/** 释放队列资源 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 分配到的队列号; +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_id_free(DPP_DEV_T *dev, ZXIC_UINT32 queue_id); + +/***********************************************************/ +/** 根据vport查找相应的队列号 +* @param dev_id 设备号,支持多芯片 +* @param vport vport信息 +* @param p_queue_arr 找到到队列数组 +* @param p_num 找到的队列个数 +* @return +* @remark 无 +* @see +* @author cbb @date 2023/09/13 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_id_search_by_vport(DPP_DEV_T *dev, + ZXIC_UINT32 *p_queue_arr, + ZXIC_UINT32 *p_num); + +/***********************************************************/ +/** 根据vport查找相应的队列号 +* @param dev_id 设备号,支持多芯片 +* @param vport vport信息 +* @param p_queue_arr 找到到队列数组 +* @param p_num 找到的队列个数 +* @return +* @remark 无 +* @see +* @author cbb @date 2023/09/13 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_id_get(DPP_DEV_T *dev, ZXIC_UINT32 *queue); + +/***********************************************************/ +/** 获取当前队列有效标识 +* @param dev 设备 +* @param queue 队列id +* @param valid_flag 出参 0:当前队列未被使用 1:当前队列已被vport使用 +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/06 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_valid_flag_get(DPP_DEV_T *dev, ZXIC_UINT32 queue, + ZXIC_UINT32 *valid_flag); + +/***********************************************************/ +/** 获取当前队列初始化标识 +* @param dev 设备 +* @param queue 队列id +* @param init_flag 出参 0:未初始化 1:已初始化 +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/06 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_init_flag_get(DPP_DEV_T *dev, ZXIC_UINT32 queue, + ZXIC_UINT32 *init_flag); + +#endif + +#if ZXIC_REAL("DMA") + +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/dma/dpp_dtb_cfg.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/dma/dpp_dtb_cfg.h new file mode 100644 index 000000000000..c82e22a90d17 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/dma/dpp_dtb_cfg.h @@ -0,0 +1,414 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_dtb_cfg.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : zab +* 完成日期 : 2022/08/23 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ +#ifndef _DPP_DTB_CFG_H_ +#define _DPP_DTB_CFG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "dpp_dev.h" + +#define DPP_DEV_SLOT_MAX (DPP_PCIE_SLOT_MAX) +#define DPP_DTB_QUEUE_NUM_MAX (128) +#define DPP_DTB_TRAF_CTRL_RAM_SIZE (256) +#define DPP_DTB_TRAF_CTRL_RAM_5_SIZE (64) +#define DPP_DTB_DUMP_PD_RAM_SIZE (2048) +#define DPP_DTB_RD_CTRL_RAM_SIZE (4096) +#define DPP_DTB_RD_TABLE_RAM_SIZE (8192) +#define DPP_DTB_CMD_MAN_RAM_SIZE (16384) + +/*vport格式 +15 |14 13 12 | 11 |10 9 8|7 6 5 4 3 2 1 0| +rsv| ep_id |func_active|func_num| vfunc_num | +*/ +#define VPORT_EPID_BT_START (12) /*EPID起始位*/ +#define VPORT_EPID_BT_LEN (3) /*EPID长度*/ +#define VPORT_FUNC_ACTIVE_BT_START (11) /*FUNC_ACTIVE起始位*/ +#define VPORT_FUNC_ACTIVE_BT_LEN (1) /*FUNC_ACTIVE长度*/ +#define VPORT_FUNC_NUM_BT_START (8) /*FUNC_NUM起始位*/ +#define VPORT_FUNC_NUM_BT_LEN (3) /*FUNC_NUM长度*/ +#define VPORT_VFUNC_NUM_BT_START (0) /*FUNC_NUM起始位*/ +#define VPORT_VFUNC_NUM_BT_LEN (8) /*FUNC_NUM长度*/ + +typedef struct dpp_dtb_queue_item_info_t { + ZXIC_UINT32 cmd_vld; + ZXIC_UINT32 cmd_type; + ZXIC_UINT32 int_en; + ZXIC_UINT32 data_len; + ZXIC_UINT32 data_laddr; + ZXIC_UINT32 data_hddr; +} DPP_DTB_QUEUE_ITEM_INFO_T; + +typedef struct dpp_dtb_queue_vm_info_t { + ZXIC_UINT32 dbi_en; + ZXIC_UINT32 queue_en; + ZXIC_UINT32 epid; + ZXIC_UINT32 vfunc_num; + ZXIC_UINT32 vector; + ZXIC_UINT32 func_num; + ZXIC_UINT32 vfunc_active; +} DPP_DTB_QUEUE_VM_INFO_T; + +#if ZXIC_REAL("DTB_CFG") +/***********************************************************/ +/** DTB队列元素信息配置 +* @param dev_id 芯片id +* @param queue_id 队列ID,范围0-127 +* @param p_item_info 队列元素配置信息 +* +* @return +* @remark 无 +* @see +* @author zab @date 2018/08/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_item_info_set(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + DPP_DTB_QUEUE_ITEM_INFO_T *p_item_info); + +/***********************************************************/ +/** 获取DTB队列中剩余未使用的条目数量 +* @param dev_id 芯片id +* @param queue_id 队列ID,范围0-127 +* @param p_item_num 剩余未使用条目数量 +* +* @return +* @remark 无 +* @see +* @author zab @date 2018/08/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_unused_item_num_get(DPP_DEV_T *dev, + ZXIC_UINT32 queue_id, + ZXIC_UINT32 *p_item_num); + +/***********************************************************/ +/** 配置队列VM相关信息 +* @param dev_id 芯片id +* @param queue_id 队列ID,范围0-127 +* @param p_vm_info VM配置信息 +* +* @return +* @remark 无 +* @see +* @author zab @date 2018/08/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_vm_info_set(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + DPP_DTB_QUEUE_VM_INFO_T *p_vm_info); + +/***********************************************************/ +/** 获取队列VM配置信息 +* @param dev_id оƬid +* @param queue_id 队列ID,范围0-127 +* @param p_vm_info VM配置信息 +* +* @return +* @remark 无 +* @see +* @author zab @date 2018/08/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_vm_info_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + DPP_DTB_QUEUE_VM_INFO_T *p_vm_info); + +/***********************************************************/ +/** 配置队列使能 +* @param dev_id 芯片id +* @param queue_id 队列ID,范围0-127 +* @param enable 1:队列使能,0:队列去使能 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2023/09/27 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_enable_set(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 enable); + +/***********************************************************/ +/** 获取队列使能状态 +* @param dev_id 芯片id +* @param queue_id 队列ID,范围0-127 +* @param enable 1:队列使能,0:队列去使能 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2023/09/27 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_enable_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 *enable); + +/***********************************************************/ +/** 配置 dtb 完成中断事件状态 +* @param dev_id 芯片id +* @param queue_id 队列ID,范围0-127 +* @param state 中断事件状态,1-发生中断,0-无中断发生 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/12 +************************************************************/ +ZXIC_UINT32 dpp_dtb_finish_interrupt_event_state_set(DPP_DEV_T *dev, + ZXIC_UINT32 queue_id, + ZXIC_UINT32 state); + +/***********************************************************/ +/** 清除 dtb 完成中断事件状态 +* @param dev_id 芯片id +* @param queue_id 队列ID,范围0-127 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/12 +************************************************************/ +ZXIC_UINT32 dpp_dtb_finish_interrupt_event_state_clr(DPP_DEV_T *dev, + ZXIC_UINT32 queue_id); + +ZXIC_UINT32 dpp_dtb_debug_mode_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_debug_mode); + +ZXIC_UINT32 dpp_dtb_mode_is_debug(DPP_DEV_T *dev); + +/***********************************************************/ +/** 读取axi 最近一次读表相关信息 +* @param dev_id 芯片id +* @param p_last_rd_table_addr_h axim最近一次读表高地址 +* @param p_last_rd_table_addr_l axim最近一次读表低地址 +* @param p_last_rd_table_len axim最近一次读表长度 +* @param p_last_rd_table_user axim最近一次读表USER信号 +* @param p_last_rd_table_onload_cnt axim最近一次读表在线计数 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/09 +************************************************************/ +ZXIC_UINT32 dpp_dtb_axi_last_rd_table_info_get( + DPP_DEV_T *dev, ZXIC_UINT32 *p_last_rd_table_addr_h, + ZXIC_UINT32 *p_last_rd_table_addr_l, ZXIC_UINT32 *p_last_rd_table_len, + ZXIC_UINT32 *p_last_rd_table_user, + ZXIC_UINT32 *p_last_rd_table_onload_cnt); + +/***********************************************************/ +/** 读表通道错误次数统计 +* @param dev_id 芯片id +* @param p_axi_rd_table_resp_err_cnt 读表通道返回错误次数 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/09 +************************************************************/ +ZXIC_UINT32 +dpp_dtb_axi_rd_table_resp_err_cnt_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_axi_rd_table_resp_err_cnt); + +/***********************************************************/ +/** 读取axi 最近一次读PD相关信息 +* @param dev_id 芯片id +* @param p_last_rd_pd_addr_h axim最近一次读PD高地址 +* @param p_last_rd_pd_addr_l axim最近一次读PD低地址 +* @param p_last_rd_pd_len axim最近一次读PD长度 +* @param p_last_rd_pd_user axim最近一次读PD USER信号 +* @param p_last_rd_pd_onload_cnt axim最近一次读PD在线计数 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/09 +************************************************************/ +ZXIC_UINT32 dpp_dtb_axi_last_rd_pd_info_get( + DPP_DEV_T *dev, ZXIC_UINT32 *p_last_rd_pd_addr_h, + ZXIC_UINT32 *p_last_rd_pd_addr_l, ZXIC_UINT32 *p_last_rd_pd_len, + ZXIC_UINT32 *p_last_rd_pd_user, ZXIC_UINT32 *p_last_rd_pd_onload_cnt); + +/***********************************************************/ +/** 读PD通道错误次数统计 +* @param dev_id 芯片id +* @param p_axi_rd_pd_resp_err_cnt 读PD通道返回错误次数 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/09 +************************************************************/ +ZXIC_UINT32 +dpp_dtb_axi_rd_pd_resp_err_cnt_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_axi_rd_pd_resp_err_cnt); + +/***********************************************************/ +/** 读取axim 最近一次写控制相关信息 +* @param dev_id 芯片id +* @param p_last_wr_ctrl_addr_h axim最近一次写控制高地址 +* @param p_last_wr_ctrl_addr_l axim最近一次写控制低地址 +* @param p_last_wr_ctrl_len axim最近一次写控制长度 +* @param p_last_wr_ctrl_user axim最近一次写控制 USER信号 +* @param p_last_wr_ctrl_onload_cnt axim最近一次写控制在线计数 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/09 +************************************************************/ +ZXIC_UINT32 dpp_dtb_axi_last_wr_ctrl_info_get( + DPP_DEV_T *dev, ZXIC_UINT32 *p_last_wr_ctrl_addr_h, + ZXIC_UINT32 *p_last_wr_ctrl_addr_l, ZXIC_UINT32 *p_last_wr_ctrl_len, + ZXIC_UINT32 *p_last_wr_ctrl_user, + ZXIC_UINT32 *p_last_wr_ctrl_onload_cnt); + +/***********************************************************/ +/** 获取写控制通道错误次数统计 +* @param dev_id 芯片id +* @param p_axi_wr_ctrl_resp_err_cnt 写控制通道返回错误次数 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/09 +************************************************************/ +ZXIC_UINT32 +dpp_dtb_axi_wr_ctrl_resp_err_cnt_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_axi_wr_ctrl_resp_err_cnt); + +/***********************************************************/ +/** 读取axim 最近一次写DDR相关信息 +* @param dev_id 芯片id +* @param p_last_wr_ddr_addr_h axim最近一次写控制高地址 +* @param p_last_wr_ddr_addr_l axim最近一次写控制低地址 +* @param p_last_wr_ddr_len axim最近一次写控制长度 +* @param p_last_wr_ddr_user axim最近一次写控制 USER信号 +* @param p_last_wr_ddr_onload_cnt axim最近一次写控制在线计数 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/09 +************************************************************/ +ZXIC_UINT32 dpp_dtb_axi_last_wr_ddr_info_get( + DPP_DEV_T *dev, ZXIC_UINT32 *p_last_wr_ddr_addr_h, + ZXIC_UINT32 *p_last_wr_ddr_addr_l, ZXIC_UINT32 *p_last_wr_ddr_len, + ZXIC_UINT32 *p_last_wr_ddr_user, ZXIC_UINT32 *p_last_wr_ddr_onload_cnt); + +/***********************************************************/ +/** 获取写DDR通道错误次数统计 +* @param dev_id 芯片id +* @param p_axi_wr_ddr_resp_err_cnt 写DDR通道返回错误次数 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/09 +************************************************************/ +ZXIC_UINT32 +dpp_dtb_axi_wr_ddr_resp_err_cnt_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_axi_wr_ddr_resp_err_cnt); + +/***********************************************************/ +/** 读取axim 最近一次写完成相关信息 +* @param dev_id 芯片id +* @param p_last_wr_fin_addr_h axim最近一次写控制高地址 +* @param p_last_wr_fin_addr_l axim最近一次写控制低地址 +* @param p_last_wr_fin_len axim最近一次写控制长度 +* @param p_last_wr_fin_user axim最近一次写控制 USER信号 +* @param p_last_wr_fin_onload_cnt axim最近一次写控制在线计数 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/09 +************************************************************/ +ZXIC_UINT32 dpp_dtb_axi_last_wr_fin_info_get( + DPP_DEV_T *dev, ZXIC_UINT32 *p_last_wr_fin_addr_h, + ZXIC_UINT32 *p_last_wr_fin_addr_l, ZXIC_UINT32 *p_last_wr_fin_len, + ZXIC_UINT32 *p_last_wr_fin_user, ZXIC_UINT32 *p_last_wr_fin_onload_cnt); + +/***********************************************************/ +/** 获取写完成通道错误次数统计 +* @param dev_id 芯片id +* @param p_axi_wr_fin_resp_err_cnt 写完成通道返回错误次数 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/09 +************************************************************/ +ZXIC_UINT32 +dpp_dtb_axi_wr_fin_resp_err_cnt_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_axi_wr_fin_resp_err_cnt); + +/***********************************************************/ +/** 读取 DTB 各通道状态机 +* @param dev_id 芯片id +* @param p_wr_ctrl_state_info 写控制状态机 +* @param p_rd_table_state_info 读表状态机 +* @param p_rd_pd_state_info 读描述符数据状态机 +* @param p_wr_ddr_state_info 写数据到ddr的状态机 +* @param p_wr_fin_state_info 写结束标志的状态机 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/09 +************************************************************/ +ZXIC_UINT32 dpp_dtb_state_info_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_wr_ctrl_state_info, + ZXIC_UINT32 *p_rd_table_state_info, + ZXIC_UINT32 *p_rd_pd_state_info, + ZXIC_UINT32 *p_wr_ddr_state_info, + ZXIC_UINT32 *p_dump_cmd_state_info); + +/***********************************************************/ +/** 各通道错误统计打印 +* @param dev_id 芯片的id号 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/11 +************************************************************/ +ZXIC_UINT32 diag_dpp_dtb_channels_axi_resp_err_cnt_prt(DPP_DEV_T *dev); + +/***********************************************************/ +/** AXIM最近一次操作信息记录打印 +* @param dev_id 芯片的id号 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/11 +************************************************************/ +ZXIC_UINT32 diag_dpp_dtb_axi_last_operate_info_prt(DPP_DEV_T *dev); + +/***********************************************************/ +/** DTB 各通道状态机信息获取 +* @param dev_id 芯片的id号 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/11 +************************************************************/ +ZXIC_UINT32 diag_dpp_dtb_channels_state_info_prt(DPP_DEV_T *dev); + +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/nppu/dpp_pbu.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/nppu/dpp_pbu.h new file mode 100644 index 000000000000..7decaad5baf7 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/nppu/dpp_pbu.h @@ -0,0 +1,444 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_pbu.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : djf +* 完成日期 : 2014/04/14 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ +#ifndef _DPP_PBU_H_ +#define _DPP_PBU_H_ + +#include "dpp_nppu_reg.h" +#include "dpp_pbu_api.h" + +#if ZXIC_REAL("macro") +/****************************************************************************** + * START: 宏定义 * + *****************************************************************************/ +#define DPP_PBU_IND_CMD_WRT_FLAG (0) +#define DPP_PBU_IND_CMD_RD_FLAG (1) + +#define PBU_FILE_PATH ("sa500t_pbu_output.txt") +#define PBU_FILE_PATH_RAM ("sa500t_pbu_output_ram.txt") + +#define DPP_PBU_PORT_NUM (119) /** 物理端口号个数(0-128) */ +#define DPP_PBU_PORT_TH_MAX (16380) /** IDMA指针阈值最大值 */ + +#define DPP_PBU_LIF0_PORT_NUM \ + (48) /** lif0 48个通道对应的物理端口号个数(0-47) */ +#define DPP_PBU_LIF1_PORT_NUM \ + (56) /** lif1 8个通道对应的物理端口号个数(48~55) */ +#define DPP_PBU_TM_LOOP_PORT_NUM (113) /** TM还回通道113 */ + +#define DPP_PBU_COS_NUM (8) /** COS个数*/ +#define DPP_PBU_ALL_FTM_LINK_TH_NUM (6) /** COS个数*/ +#define DPP_PBU_IDMA_MAX_TH (16380) /** IDMA指针阈值最大值 */ +#define DPP_PBU_LIF_MAX_TH (16384) /** LIF指针阈值最大值 */ +#define DPP_PBU_MC_MAX_TH (16380) /** 组播指针阈值最大值 */ +#define DPP_PBU_PORT_COS_MAX_TH (16380) /** 单个端口指针阈值最大值 */ +#define DPP_PBU_MC_MAX_DIFF_TH \ + (255) /** 组播指针申请逻辑复制和微码复制直接的阈值差最大值 */ +#define DPP_PBU_MC_MIN_DIFF_TH \ + (5) /** 组播指针申请逻辑复制和微码复制直接的阈值差最小值 */ +#define DPP_PBU_CAP_DATA_MODE_MIN (0) /** PBU抓包数据模式最小值 */ +#define DPP_PBU_CAP_DATA_MODE_MAX (12) /** PBU抓包数据模式最大值 */ + +#define DPP_PBU_CAP_FILTER_ADDR (0x185) +#define DPP_PBU_CAP_PKT_NUM (64) /** 抓包的最大分片个数 */ + +/****************************************************************************** + * END: 宏定义 * + *****************************************************************************/ +#endif + +#if ZXIC_REAL("struct") + +typedef enum npe_pbu_ptr_rotate_e { + DPP_PBU_TOTAL_PTR_ROTATE = 0X1, /**< @brief 全局指针翻转*/ + DPP_PBU_MC_PTR_ROTATE = 0X2, /**< @brief 组播指针翻转*/ + DPP_PBU_PORT_PTR_ROTATE = 0X4, /**< @brief 端口指针翻转*/ + +} DPP_PBU_PTR_ROTATE_E; + +/****************************************************************************** + * START: 类型定义 * + *****************************************************************************/ +typedef struct dpp_pbu_cnt_para_t { + ZXIC_UINT32 total_cnt; /* 总指针计数 */ + ZXIC_UINT32 idma_pub_cnt; /* idma共享指针计数 */ + ZXIC_UINT32 lif_pub_cnt; /* lif共享指针计数 */ + ZXIC_UINT32 mc_total_cnt; /* mc总指针计数 */ +} DPP_PBU_CNT_PARA_T; +/* +typedef struct dpp_pbu_module_fc_rdy_t +{ + ZXIC_UINT32 pbu_oam_send_fc_rdy; + ZXIC_UINT32 pbu_lif_ctrl_rdy; + ZXIC_UINT32 pbu_odma_fc_rdy; + ZXIC_UINT32 pbu_tm_fc_rdy; + ZXIC_UINT32 pbu_idma_cos_rdy; +}DPP_PBU_MODULE_FC_RDY_T; +*/ + +typedef struct dpp_mf_info_t { + ZXIC_CHAR *name; + ZXIC_UINT32 start_bit; + ZXIC_UINT32 end_bit; +} DPP_MF_INFO_T; + +typedef enum dpp_pbu_ind_mem_id_e { + DPP_PBU_IDMATH_RAM = 0, /**< @brief 端口阈值RAM,可读可写 */ + DPP_PBU_MACTH_RAM = 1, /**< @brief 端口COS阈值RAM,可读可写 */ + DPP_PBU_CFG_IND_MEM_ID_INVALID = + 2, /**< @brief PBU CFG模块使用的内部表的个数 */ +} DPP_PBU_CFG_IND_MEM_ID_E; + +typedef enum dpp_pbu_stat_ind_mem_id_e { + DPP_PBU_PORT_CNT = 1, /**< @brief 端口的指针计数,只读 */ + DPP_PBU_STAT = 2, /**< @brief PBU调试计数,只读 */ + DPP_PBU_IFB_CFG = 3, /**< @brief IFB中192字节报文,只读 */ + DPP_PBU_CAPTURE_CFG = 4, /**< @brief 报文抓包 可读可写 */ + DPP_PBU_PORT_PUB_CNT = 5, + DPP_PBU_IND_MEM_ID_INVALID = 6, /**< @brief PBU模块使用的内部表的个数*/ +} DPP_PBU_STAT_IND_MEM_ID_E; + +typedef enum dpp_idma_stat_ind_mem_id_e { + DPP_IDMA_STAT_RAM = 0, + DPP_IDMA_DEBUG_RAM = 1, + DPP_IDMA_IND_MEM_ID_INVALID = + 2, /**< @brief IDMA模块使用的内部表的个数*/ +} DPP_IDMA_STAT_IND_MEM_ID_E; + +typedef enum dpp_pbu_other_cnt_id_e { + DPP_PBU_IDMA_PTR_REQ_CNT = + 0, /**< @brief idma指针申请计数,含无效申请 */ + DPP_PBU_IDMA_RFD_WR_CNT = 1, /**< @brief idma写RFD计数 */ + DPP_PBU_IDMA_IFB_WR1_CNT = 2, /**< @brief idma写ifb高64字节计数 */ + DPP_PBU_IDMA_IFB_WR2_CNT = 3, /**< @brief idma写ifb低128字节计数 */ + DPP_PBU_PPU_IFB_RD_CNT = 4, /**< @brief ppu读ifb申请计数 */ + DPP_PBU_IFB_PPU_RDRSP_CNT = 5, /**< @brief ifb返回给ppu包头计数 */ + DPP_PBU_ODMA_RECY_PTR_CNT = 6, /**< @brief odma指针回收个数 */ + DPP_PBU_PPU_PF_REQ0_CNT = 7, /**< @brief ppu微码复制申请指针计数 */ + DPP_PBU_PBU_PF_RSP0_CNT = 8, /**< @brief pbu返回微码复制申请指针计数 */ + DPP_PBU_PPU_PF_REQ1_CNT = 9, /**< @brief ppu逻辑复制申请指针计数 */ + DPP_PBU_PBU_PF_RSP1_CNT = 10, /**< @brief pbu返回逻辑复制申请指针计数 */ + DPP_PBU_PPU_USE_PTR_CNT = 11, /**< @brief idma有效申请指针计数 */ + DPP_PBU_PPU_WRBK_CNT = 12, /**< @brief ppu回写计数 */ + DPP_PBU_PPU_REORDER_RSP_CNT = 13, /**< @brief pbu返回ppu回写参数计数 */ + DPP_PBU_SE_PBU_KEY_VLD_CNT = 14, /**< @brief 深度解析请求计数 */ + DPP_PBU_PBU_SE_RSP_VLD_CNT = 15, /**< @brief 深度解析返回计数 */ + DPP_PBU_ODMA_IFB_RD1_CNT = 16, /**< @brief odma读ifb接口1读请求计数 */ + DPP_PBU_ODMA_IFB_RD2_CNT = 17, /**< @brief odma读ifb接口2读请求计数 */ + DPP_PBU_IDMA_O_ISU_PKT_CNT = 18, /**< @brief idma输出报文总计数 */ + DPP_PBU_IDMA_O_ISU_EPKT_CNT = + 19, /**< @brief idma输出带error标记的报文总计数 */ + DPP_PBU_IDMA_DISPKT_CNT = 20, /**< @brief idma丢弃报文总数 */ + DPP_PBU_OTHER_CNT_ID_INVALID, /**< @brief PBU模块other计数的个数*/ +} DPP_PBU_OTHER_CNT_ID_E; + +typedef struct dpp_pbu_port_ptr_cnt_t { + ZXIC_UINT32 peak_port_cnt; /**< @brief 端口指针峰值 */ + ZXIC_UINT32 current_port_cnt; /**< @brief 端口指针占用 */ +} DPP_PBU_PORT_PTR_CNT_T; + +typedef struct dpp_pbu_ifb_data_t { + ZXIC_UINT32 pbu_ifb_data[64]; /**< @brief ifb数据 */ +} DPP_PBU_IFB_DATA_T; + +typedef struct dpp_pbu_all_ftm_link_th_t { + ZXIC_UINT32 total_congest_th[7]; /**< @brief 拥塞阈值*/ +} DPP_PBU_ALL_FTM_LINK_TH_T; + +typedef struct { + ZXIC_UINT32 pbu_lif_group0_pfc_rdy[12]; +} DPP_PBU_LIF_GROUP_PFC_RDY; + +/****************************************************************************** + * END: 类型定义 * + *****************************************************************************/ +#endif + +#if 1 +/****************************************************************************** + * START: 函数声明 * + *****************************************************************************/ +#if 0 +DPP_STATUS dpp_pbu_idma_public_th_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 th); +DPP_STATUS dpp_pbu_idma_public_th_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_th); +DPP_STATUS dpp_pbu_lif_public_th_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 th); +DPP_STATUS dpp_pbu_lif_public_th_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_th); +DPP_STATUS dpp_pbu_idma_total_th_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 th); +DPP_STATUS dpp_pbu_idma_total_th_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_th); +DPP_STATUS dpp_pbu_lif_total_th_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 th); +DPP_STATUS dpp_pbu_lif_total_th_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_th); +DPP_STATUS dpp_pbu_mc_total_th_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 th); +DPP_STATUS dpp_pbu_mc_total_th_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_th); + +DPP_STATUS dpp_pbu_mc_cos_para_get(ZXIC_UINT32 dev_id, + DPP_PBU_MC_COS_PARA_T *p_para); +DPP_STATUS dpp_pbu_sa_ip_en_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 en); +DPP_STATUS dpp_pbu_sa_ip_en_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_en); +DPP_STATUS dpp_pbu_cnt_ovfl_mode_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 mode); +DPP_STATUS dpp_pbu_cnt_ovfl_mode_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_mode); +DPP_STATUS dpp_pbu_cnt_rdclr_mode_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 mode); +DPP_STATUS dpp_pbu_cnt_rdclr_mode_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_mode); +DPP_STATUS dpp_pbu_mc_diff_th_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 th); +DPP_STATUS dpp_pbu_mc_diff_th_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_th); +DPP_STATUS dpp_pbu_peak_port_cnt_clr_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 port_no, ZXIC_UINT32 peak_port_cnt_clr); +DPP_STATUS dpp_pbu_peak_port_cnt_clr_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 port_no, ZXIC_UINT32 *p_peak_port_cnt_clr); +DPP_STATUS dpp_pbu_all_ftm_crdt_th_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 port_no, DPP_NPPU_PBU_CFG_ALL_FTM_CRDT_TH_T *p_all_ftm_crdt_th); +DPP_STATUS dpp_pbu_all_ftm_crdt_th_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 port_no, DPP_NPPU_PBU_CFG_ALL_FTM_CRDT_TH_T *p_all_ftm_crdt_th); +DPP_STATUS dpp_pbu_all_ftm_link_th_set(ZXIC_UINT32 dev_id, + DPP_PBU_ALL_FTM_LINK_TH_T *p_para); +DPP_STATUS dpp_pbu_all_ftm_link_th_get(ZXIC_UINT32 dev_id, + DPP_PBU_ALL_FTM_LINK_TH_T *p_para); +DPP_STATUS dpp_pbu_ftm_total_congest_th_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 total_congest_th); +DPP_STATUS dpp_pbu_ftm_total_congest_th_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *total_congest_th); +DPP_STATUS dpp_pbu_crdt_mode_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 crdt_mode); +DPP_STATUS dpp_pbu_crdt_mode_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_crdt_mode); +DPP_STATUS dpp_pbu_ind_reg_mode_status_check(ZXIC_UINT32 dev_id, ZXIC_UINT32 sub_module_ind_status_reg, ZXIC_UINT32 sleep_time); +DPP_STATUS dpp_pbu_ind_cmd_reg_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 mem_addr, + ZXIC_UINT32 mem_id, + ZXIC_UINT32 wrt_rd_flag); +DPP_STATUS dpp_pbu_ind_data_reg_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 len, + ZXIC_UINT32 *p_data, + ZXIC_UINT32 data_reg_base); +DPP_STATUS dpp_pbu_ind_data_reg_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 len, + ZXIC_UINT32 *p_data, + ZXIC_UINT32 data_reg_base); +DPP_STATUS dpp_pbu_cfg_ind_wrt(ZXIC_UINT32 dev_id, + ZXIC_UINT32 mem_addr, + DPP_PBU_CFG_IND_MEM_ID_E mem_id, + ZXIC_UINT32 len, + ZXIC_UINT32 *p_data); +DPP_STATUS dpp_pbu_cfg_ind_rd(ZXIC_UINT32 dev_id, + ZXIC_UINT32 mem_addr, + DPP_PBU_CFG_IND_MEM_ID_E mem_id, + ZXIC_UINT32 len, + ZXIC_UINT32 *p_data); +#endif +DPP_STATUS dpp_pbu_port_th_get(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + DPP_PBU_PORT_TH_PARA_T *p_para); +DPP_STATUS dpp_pbu_port_cos_th_get(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + DPP_PBU_PORT_COS_TH_PARA_T *p_para); +DPP_STATUS dpp_pbu_pfc_delay_time_set(DPP_DEV_T *dev, ZXIC_UINT64 delayTime); +DPP_STATUS dpp_pbu_pfc_delay_time_get(DPP_DEV_T *dev, ZXIC_UINT64 *delayTime); +#if 0 +DPP_STATUS dpp_pbu_cnt_clr_all(ZXIC_UINT32 dev_id); +DPP_STATUS dpp_pbu_int_flag_prt(ZXIC_UINT32 dev_id); +DPP_STATUS dpp_pbu_stat_cnt_para_get(ZXIC_UINT32 dev_id, + DPP_PBU_CNT_PARA_T *p_para); +DPP_STATUS dpp_pbu_stat_thram_init_done_check(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_rdy); +DPP_STATUS dpp_pbu_stat_fptr_init_done_check(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_rdy); +DPP_STATUS dpp_pbu_fc_rdy_get(ZXIC_UINT32 dev_id, + DPP_NPPU_PBU_STAT_PBU_FC_RDY_T *p_pbu_module_fc); +DPP_STATUS dpp_pbu_lif_fc_rdy_get(ZXIC_UINT32 dev_id, + DPP_NPPU_PBU_STAT_PBU_LIF_GROUP0_RDY0_T *p_pbu_group0_rdy0); +DPP_STATUS dpp_pbu_lif_pfc_rdy_get(ZXIC_UINT32 dev_id, DPP_PBU_LIF_GROUP_PFC_RDY *p_lif_group0_pfc_rdy); +DPP_STATUS dpp_pbu_pktrx_mr_pfc_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_pbu_pktrx_mr_pfc); +DPP_STATUS dpp_pbu_stat_reg_mode_status_check(ZXIC_UINT32 dev_id, ZXIC_UINT32 sub_module_ind_status_reg, ZXIC_UINT32 sleep_time); +DPP_STATUS dpp_pbu_stat_ind_cmd_reg_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 mem_addr, + ZXIC_UINT32 mem_id, + ZXIC_UINT32 wrt_rd_flag); +DPP_STATUS dpp_pbu_stat_ind_data_reg_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 len, + ZXIC_UINT32 *p_data); +DPP_STATUS dpp_pbu_stat_ind_data_reg_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 len, + ZXIC_UINT32 *p_data); +DPP_STATUS dpp_pbu_stat_ind_wrt(ZXIC_UINT32 dev_id, + ZXIC_UINT32 mem_addr, + DPP_PBU_STAT_IND_MEM_ID_E mem_id, + ZXIC_UINT32 len, + ZXIC_UINT32 *p_data); +DPP_STATUS dpp_pbu_stat_ind_rd(ZXIC_UINT32 dev_id, + ZXIC_UINT32 mem_addr, + DPP_PBU_STAT_IND_MEM_ID_E mem_id, + ZXIC_UINT32 len, + ZXIC_UINT32 *p_data); +DPP_STATUS dpp_pbu_stat_port_ptr_cnt_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 port_id, + DPP_PBU_PORT_PTR_CNT_T *p_port_ptr_cnt); +DPP_STATUS dpp_pbu_stat_ifb_req_vld_cnt_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_cnt); +DPP_STATUS dpp_pbu_stat_ifb_rsp_vld_cnt_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_cnt); +DPP_STATUS dpp_pbu_stat_odma_recy_ptr_cnt_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_cnt); +DPP_STATUS dpp_pbu_stat_mcode_pf_req_cnt_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_cnt); +DPP_STATUS dpp_pbu_stat_mcode_pf_rsp_cnt_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_cnt); +DPP_STATUS dpp_pbu_stat_logic_pf_req_cnt_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_cnt); +DPP_STATUS dpp_pbu_stat_logic_pf_rsp_cnt_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_cnt); +DPP_STATUS dpp_pbu_stat_ppu_use_ptr_pulse_cnt_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_cnt); +DPP_STATUS dpp_pbu_stat_ppu_wb_vld_cnt_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_cnt); +DPP_STATUS dpp_pbu_stat_ppu_reorder_para_cnt_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_cnt); +DPP_STATUS dpp_pbu_stat_se_dpi_key_vld_cnt_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_cnt); +DPP_STATUS dpp_pbu_stat_se_dpi_rsp_vld_cnt_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_cnt); +DPP_STATUS dpp_pbu_stat_odma_ifb_rd1_cnt_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_cnt); +DPP_STATUS dpp_pbu_stat_odma_ifb_rd2_cnt_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_cnt); +DPP_STATUS dpp_pbu_stat_mcode_pf_no_rsp_cnt_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_cnt); +DPP_STATUS dpp_pbu_stat_logic_pf_no_rsp_cnt_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_cnt); +DPP_STATUS dpp_pbu_stat_ifb_data_get(ZXIC_UINT32 dev_id, + DPP_PBU_IFB_DATA_T *p_data); +DPP_STATUS dpp_pbu_stat_port_public_ptr_cnt_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 port_no, + ZXIC_UINT32 *p_cnt); +DPP_STATUS dpp_idma_cfg_cnt_ovfl_mode_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 mode); +DPP_STATUS dpp_idma_cfg_cnt_ovfl_mode_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_mode); +DPP_STATUS dpp_idma_cfg_cnt_rdclr_mode_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 mode); +DPP_STATUS dpp_idma_cfg_cnt_rdclr_mode_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_mode); +DPP_STATUS dpp_idma_stat_ind_reg_mode_status_check(ZXIC_UINT32 dev_id, ZXIC_UINT32 sub_module_ind_status_reg, ZXIC_UINT32 sleep_time); +DPP_STATUS dpp_idma_stat_ind_cmd_reg_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 mem_addr, + ZXIC_UINT32 mem_id, + ZXIC_UINT32 wrt_rd_flag); +DPP_STATUS dpp_idma_stat_ind_data_reg_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_data); +DPP_STATUS dpp_idma_stat_ind_data_reg_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_data); +DPP_STATUS dpp_idma_stat_ind_wrt(ZXIC_UINT32 dev_id, + ZXIC_UINT32 mem_addr, + DPP_IDMA_STAT_IND_MEM_ID_E mem_id, + ZXIC_UINT32 *p_data); +DPP_STATUS dpp_idma_stat_ind_rd(ZXIC_UINT32 dev_id, + ZXIC_UINT32 mem_addr, + DPP_IDMA_STAT_IND_MEM_ID_E mem_id, + ZXIC_UINT32 *p_data); +DPP_STATUS dpp_idma_stat_to_isu_total_cnt_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_cnt); +DPP_STATUS dpp_idma_stat_to_isu_err_total_cnt_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_cnt); +DPP_STATUS dpp_idma_stat_disc_total_cnt_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_cnt); +DPP_STATUS dpp_idma_stat_port_to_isu_cnt_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 port_no, + ZXIC_UINT32 *p_cnt); +DPP_STATUS dpp_idma_stat_port_to_isu_err_cnt_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 port_no, + ZXIC_UINT32 *p_cnt); +DPP_STATUS dpp_idma_stat_port_disc_cnt_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 port_no, + ZXIC_UINT32 *p_cnt); +DPP_STATUS dpp_pbu_cap_data_mode_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 mod_data); +DPP_STATUS dpp_pbu_pkt_capture_start(ZXIC_UINT32 dev_id, ZXIC_UINT32 pkt_num); +DPP_STATUS dpp_pbu_pkt_capture_stop(ZXIC_UINT32 dev_id); +DPP_STATUS dpp_pbu_pkt_capture_mode_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 mode); +DPP_STATUS dpp_pbu_pkt_capture_cnt(ZXIC_UINT32 dev_id, ZXIC_UINT32 *pkt_num); +DPP_STATUS dpp_pbu_pkt_capture_dbg_print(ZXIC_UINT32 dev_id); +DPP_STATUS dpp_pbu_pkt_capture_cnt_print(ZXIC_UINT32 dev_id); +DPP_STATUS dpp_pbu_pkt_capture_print_one(ZXIC_UINT32 dev_id, ZXIC_UINT32 pkt_no); +DPP_STATUS dpp_pbu_pkt_capture_print(ZXIC_UINT32 dev_id, + ZXIC_UINT32 pkt_start_no, + ZXIC_UINT32 pkt_end_no); +DPP_STATUS dpp_pbu_pkt_capture_print_all(ZXIC_UINT32 dev_id); +DPP_STATUS dpp_pbu_pkt_capture_filter_set_bit(ZXIC_UINT32 dev_id, + ZXIC_UINT32 bit_no, + ZXIC_UINT32 filter_mode + ); +DPP_STATUS dpp_pbu_pkt_capture_filter_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 bit_start_no, + ZXIC_UINT32 bit_end_no, + ZXIC_UINT32 filter_mode); +DPP_STATUS dpp_pbu_pkt_capture_filter_mask_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 start, ZXIC_UINT32 end, ZXIC_UINT32 mask, ZXIC_UINT32 data); +DPP_STATUS dpp_pbu_pkt_capture_filter_pkt_mask_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 start, ZXIC_UINT32 end, ZXIC_UINT32 mask, ZXIC_UINT32 data); +DPP_STATUS dpp_pbu_pkt_capture_filter_sport_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 port); +DPP_STATUS dpp_pbu_pkt_capture_filter_dport_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 port); +DPP_STATUS dpp_pbu_pkt_capture_filter_clr_bit(ZXIC_UINT32 dev_id, + ZXIC_UINT32 bit_no); +DPP_STATUS dpp_pbu_pkt_capture_filter_clr(ZXIC_UINT32 dev_id, + ZXIC_UINT32 bit_start_no, + ZXIC_UINT32 bit_end_no); +DPP_STATUS dpp_pbu_pkt_capture_filter_clr_all(ZXIC_UINT32 dev_id); +/***********************************************************/ +/** 抓包报文分片打印by mf bit +* @param dev_id 芯片ID +* @param pkt_no 要打印的分片号 +* +* @return +* @remark 无 +* @see +* @author czd @date 2015/04/29 +************************************************************/ +DPP_STATUS dpp_pbu_pkt_capture_print_one_mf_by_bit(ZXIC_UINT32 dev_id, ZXIC_UINT32 pkt_no); + +DPP_STATUS dpp_pbu_glb_mgr_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_flag, + ZXIC_UINT32 *p_size, + ZXIC_UINT8 **pp_data_buff); +DPP_STATUS dpp_pbu_glb_mgr_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 size, + ZXIC_UINT8 *p_data_buff); +DPP_STATUS dpp_pbu_glb_size_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_size); + +ZXIC_UINT32 sim_soc_mf_field_print(ZXIC_UINT8 *pkt_buff); + + +DPP_STATUS dpp_pbu_pkt_capture_print_mf_by_bit(ZXIC_UINT32 dev_id, + ZXIC_UINT32 pkt_start_no, + ZXIC_UINT32 pkt_end_no); +DPP_STATUS dpp_pbu_pkt_capture_print_one_simple_mf_by_bit(ZXIC_UINT32 dev_id, ZXIC_UINT32 pkt_no); + +DPP_STATUS dpp_pbu_pkt_capture_print_all_mf_by_bit(ZXIC_UINT32 dev_id); + +#endif + +/****************************************************************************** + * END: 函数声明 * + *****************************************************************************/ +#endif + +#endif /* _DPP_PBU_H_ */ +/* 必须有个空行,否则可能编不过 */ diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/nppu/dpp_pktrx_cfg.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/nppu/dpp_pktrx_cfg.h new file mode 100644 index 000000000000..d373cb044327 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/nppu/dpp_pktrx_cfg.h @@ -0,0 +1,35 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_pktrx_cfg.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : zzh +* 完成日期 : 2015/02/06 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef _DPP_PKTRX_CFG_H_ +#define _DPP_PKTRX_CFG_H_ + +#include "dpp_pktrx_api.h" +#include "dpp_reg.h" + +ZXIC_UINT32 dpp_pktrx_mcode_glb_cfg_get_0(DPP_DEV_T *dev, + ZXIC_UINT32 *p_glb_cfg_data_0); +ZXIC_UINT32 dpp_pktrx_mcode_glb_cfg_get_1(DPP_DEV_T *dev, + ZXIC_UINT32 *p_glb_cfg_data_1); +ZXIC_UINT32 dpp_pktrx_mcode_glb_cfg_get_2(DPP_DEV_T *dev, + ZXIC_UINT32 *p_glb_cfg_data_2); +ZXIC_UINT32 dpp_pktrx_mcode_glb_cfg_get_3(DPP_DEV_T *dev, + ZXIC_UINT32 *p_glb_cfg_data_3); + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/ppu/dpp_ppu.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/ppu/dpp_ppu.h new file mode 100644 index 000000000000..663caefd5fb6 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/ppu/dpp_ppu.h @@ -0,0 +1,109 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_ppu.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : 王春雷 +* 完成日期 : 2014/03/18 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef _DPP_PPU_H_ +#define _DPP_PPU_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "dpp_module.h" +#include "dpp_ppu_api.h" +#include "dpp_ppu_reg.h" +#include "zxic_comm_thread.h" + +#define PPU_CLS_ME_NUM (8) +#define PPU_INSTR_MEM_NUM \ + (3) /*ppu中的微码指令空间数量 2个cluster 共用一个指令空间*/ +#define PPU_INSTR_REG_NUM (4) /*ppu中指令空间中的数据寄存器数目*/ +#define PPU_INSTR_NUM_MAX (32 * 1024) /* xjw mod at 18.6.2 from 16k to 32k */ +#define PPU_SDT_IDX_MIN (0) +#define PPU_SDT_IDX_MAX (255) +#define PPU_DUP_IDX_MIN (0) +#define PPU_DUP_IDX_MAX (63) +#define PPU_INSTR_COL_MAX (4) + +/** ME指令调试中断*/ +#define PPU_ME0_INT_BT_START (0) +#define PPU_ME0_INT_BT_LEN (1) +#define PPU_ME1_INT_BT_START (1) +#define PPU_ME1_INT_BT_LEN (1) +#define PPU_ME2_INT_BT_START (2) +#define PPU_ME2_INT_BT_LEN (1) +#define PPU_ME3_INT_BT_START (3) +#define PPU_ME3_INT_BT_LEN (1) +#define PPU_ME4_INT_BT_START (4) +#define PPU_ME4_INT_BT_LEN (1) +#define PPU_ME5_INT_BT_START (5) +#define PPU_ME5_INT_BT_LEN (1) +#define PPU_ME6_INT_BT_START (6) +#define PPU_ME6_INT_BT_LEN (1) +#define PPU_ME7_INT_BT_START (7) +#define PPU_ME7_INT_BT_LEN (1) + +#define DPP_FPGA_MAX_FLOWTCAM_NUM (32) +#define DPP_PPU_CLS_0_BIT_MAP (1 << 0) /*bit0 = 1 代表cluster0 启动*/ +#define DPP_PPU_CLS_1_BIT_MAP (1 << 1) /*bit1 = 1 代表cluster1 启动*/ +#define DPP_PPU_CLS_2_BIT_MAP (1 << 2) /*bit2 = 1 代表cluster2 启动*/ +#define DPP_PPU_CLS_3_BIT_MAP (1 << 3) /*bit3 = 1 代表cluster3 启动*/ +#define DPP_PPU_CLS_4_BIT_MAP (1 << 4) /*bit4 = 1 代表cluster4 启动*/ +#define DPP_PPU_CLS_5_BIT_MAP (1 << 5) /*bit5 = 1 代表cluster5 启动*/ + +#define DPP_PPU_CLS_ALL_START (0x3F) /*打开所有cluster*/ + +/*该结构在ppu初始化的时候生成 全局不可修改*/ +typedef struct dpp_ppu_cls_bitmap_t { + ZXIC_UINT32 cls_use + [DPP_PPU_CLUSTER_NUM]; /*记录配置生效的 cluster 由bitmap解析获得*/ + ZXIC_UINT32 instr_mem + [PPU_INSTR_MEM_NUM]; /*记录配置生效的 指令空间索引号, 每两个cluster 共享一个指令空间*/ +} DPP_PPU_CLS_BITMAP_T; + +typedef struct dpp_ppu_ppu_cop_thash_rsk_t { + ZXIC_UINT32 rsk_319_288; + ZXIC_UINT32 rsk_287_256; + ZXIC_UINT32 rsk_255_224; + ZXIC_UINT32 rsk_223_192; + ZXIC_UINT32 rsk_191_160; + ZXIC_UINT32 rsk_159_128; + ZXIC_UINT32 rsk_127_096; + ZXIC_UINT32 rsk_095_064; + ZXIC_UINT32 rsk_063_032; + ZXIC_UINT32 rsk_031_000; + +} DPP_PPU_PPU_COP_THASH_RSK_T; + +ZXIC_UINT32 dpp_ppu_cls_use_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 cluster_id, + ZXIC_UINT32 flag); +ZXIC_UINT32 dpp_ppu_cls_use_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 cluster_id); +ZXIC_UINT32 dpp_ppu_instr_mem_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 mem_id, + ZXIC_UINT32 flag); +ZXIC_UINT32 dpp_ppu_parse_cls_bitmap(ZXIC_UINT32 dev_id, ZXIC_UINT32 bitmap); + +DPP_STATUS dpp_ppu_ppu_cop_thash_rsk_set(DPP_DEV_T *dev, + DPP_PPU_PPU_COP_THASH_RSK_T *p_para); +DPP_STATUS +dpp_ppu_ppu_cop_thash_rsk_get(DPP_DEV_T *dev, + DPP_PPU_PPU_COP_THASH_RSK_T *p_ppu_cop_thash_rsk); +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/se/dpp_apt_se.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/se/dpp_apt_se.h new file mode 100644 index 000000000000..54cd45608d48 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/se/dpp_apt_se.h @@ -0,0 +1,144 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_apt_se.h +* 文件标识 : +* 内容摘要 : SE适配业务接口数据结构和函数声明 +* 其它说明 : +* 当前版本 : +* 作 者 : chenqin00181032 +* 完成日期 : 2022/02/22 +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef _DPP_APT_SE_H_ +#define _DPP_APT_SE_H_ + +#include "dpp_apt_se_api.h" + +#define SDT_OPER_ADD ((ZXIC_UINT32)(0)) +#define SDT_OPER_DEL ((ZXIC_UINT32)(1)) + +#define SDT_DDR_RW_128BIT ((ZXIC_UINT32)(0)) +#define SDT_DDR_RW_256BIT ((ZXIC_UINT32)(1)) +#define SDT_DDR_RW_512BIT ((ZXIC_UINT32)(2)) + +#define DDR_128BIT_BYTE ((ZXIC_UINT32)(16)) + +#define ERAM_ENTRY_SOFT_MAX ((ZXIC_UINT32)(16)) /*eram最大位宽128bit*/ +#define HASH_ENTRY_SOFT_MAX ((ZXIC_UINT32)(64)) /*hash最大位宽512bit*/ +#define ACL_ENTRY_SOFT_MAX \ + ((ZXIC_UINT32)(160)) /*acl最大位宽640bit key+640bit mask*/ + +typedef struct se_apt_eram_func_t { + ZXIC_UINT32 + opr_mode; /**cpu读写位宽模式DPP_ERAM128_OPR_MODE_E 0:128b 1:64b 2:1b 3:32b <@*/ + ZXIC_UINT32 + rd_mode; /*读清模式DPP_ERAM128_RD_CLR_MODE_E,0:正常读 1:读清模式*/ + DPP_APT_ERAM_SET_FUNC eram_set_func; + DPP_APT_ERAM_GET_FUNC eram_get_func; +} SE_APT_ERAM_FUNC_T; + +typedef struct se_apt_ddr_func_t { + ZXIC_UINT32 ddr_tbl_depth; /*ddr表项深度,单位与ddr读写模式一致*/ + DPP_APT_DDR_SET_FUNC ddr_set_func; + DPP_APT_DDR_GET_FUNC ddr_get_func; +} SE_APT_DDR_FUNC_T; + +typedef struct se_apt_acl_func_t { + ZXIC_UINT32 sdt_partner; + DPP_APT_ACL_ENTRY_SET_FUNC acl_set_func; + DPP_APT_ACL_ENTRY_GET_FUNC acl_get_func; +} SE_APT_ACL_FUNC_T; + +typedef struct se_apt_hash_func_t { + DPP_APT_HASH_ENTRY_SET_FUNC hash_set_func; + DPP_APT_HASH_ENTRY_GET_FUNC hash_get_func; +} SE_APT_HASH_FUNC_T; + +typedef struct se_apt_lpm_func_t { + DPP_APT_LPM_ENTRY_SET_FUNC lpm_set_func; + DPP_APT_LPM_ENTRY_GET_FUNC lpm_get_func; +} SE_APT_LPM_FUNC_T; +typedef struct se_apt_callback_t { + ZXIC_UINT32 sdtNo; /** <@brief sdt no 0~255 */ + ZXIC_UINT32 table_type; /** <@brief 查找表项类型 */ + + union { + SE_APT_ERAM_FUNC_T eramFunc; + SE_APT_DDR_FUNC_T ddrFunc; + SE_APT_ACL_FUNC_T aclFunc; + SE_APT_HASH_FUNC_T hashFunc; + SE_APT_LPM_FUNC_T lpmFunc; + } se_func_info; +} SE_APT_CALLBACK_T; + +typedef struct se_apt_eram_convert_t { + ZXIC_UINT32 sdt_no; + DPP_APT_ERAM_SET_FUNC eram_set_func; + DPP_APT_ERAM_GET_FUNC eram_get_func; +} SE_APT_ERAM_CONVERT_T; + +typedef struct se_apt_ddr_convert_t { + ZXIC_UINT32 sdt_no; + DPP_APT_DDR_SET_FUNC ddr_set_func; + DPP_APT_DDR_GET_FUNC ddr_get_func; +} SE_APT_DDR_CONVERT_T; + +typedef struct se_apt_hash_convert_t { + ZXIC_UINT32 sdt_no; + DPP_APT_HASH_ENTRY_SET_FUNC hash_set_func; + DPP_APT_HASH_ENTRY_GET_FUNC hash_get_func; +} SE_APT_HASH_CONVERT_T; + +typedef struct se_apt_acl_convert_t { + ZXIC_UINT32 sdt_no; + DPP_APT_ACL_ENTRY_SET_FUNC acl_set_func; + DPP_APT_ACL_ENTRY_GET_FUNC acl_get_func; +} SE_APT_ACL_CONVERT_T; + +typedef struct se_apt_lpm_convert_t { + ZXIC_UINT32 sdt_no; + DPP_APT_LPM_ENTRY_SET_FUNC lpm_set_func; + DPP_APT_LPM_ENTRY_GET_FUNC lpm_get_func; +} SE_APT_LPM_CONVERT_T; + +typedef struct se_apt_eram_soft_t { + ZXIC_UINT32 index; + ZXIC_UINT32 buff[ERAM_ENTRY_SOFT_MAX / 4]; +} SE_APT_ERAM_SOFT_T; + +typedef struct se_apt_eram_hash_t { + ZXIC_UINT32 index; + ZXIC_UINT8 aucData[HASH_ENTRY_SOFT_MAX]; +} SE_APT_HASH_SOFT_T; + +typedef struct se_apt_eram_acl_t { + ZXIC_UINT32 index; + ZXIC_UINT8 aucData[ACL_ENTRY_SOFT_MAX]; +} SE_APT_ACL_SOFT_T; + +ZXIC_SINT32 dpp_apt_table_key_cmp(void *p_new_key, void *p_old_key, + ZXIC_UINT32 key_len); +SE_APT_CALLBACK_T *dpp_apt_get_func(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no); +DPP_STATUS dpp_apt_set_callback(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 table_type, ZXIC_VOID *pData); +DPP_STATUS dpp_apt_sw_list_insert(ZXIC_RB_CFG *rb_cfg, void *pData, + ZXIC_UINT32 len); +DPP_STATUS dpp_apt_sw_list_search(ZXIC_RB_CFG *rb_cfg, void *pData, + ZXIC_UINT32 len); +DPP_STATUS dpp_apt_sw_list_delete(ZXIC_RB_CFG *rb_cfg, void *pData, + ZXIC_UINT32 len); +DPP_STATUS dpp_apt_get_zblock_index(ZXIC_UINT32 zblock_bitmap, + ZXIC_UINT32 *zblk_idx); +DPP_STATUS dpp_apt_dtb_res_init(DPP_DEV_T *dev); +DPP_STATUS dpp_apt_se_callback_init(DPP_DEV_T *dev); +ZXIC_UINT32 dpp_apt_get_sdt_partner(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no); +DPP_STATUS dpp_se_res_mem_alloc(DPP_DEV_T *dev); +DPP_STATUS dpp_se_res_mem_free(DPP_DEV_T *dev); +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/se/dpp_etcam.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/se/dpp_etcam.h new file mode 100644 index 000000000000..225f05af03f0 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/se/dpp_etcam.h @@ -0,0 +1,160 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_etcam.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : 王春雷 +* 完成日期 : 2014/04/03 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef _DPP_ETCAM_H_ +#define _DPP_ETCAM_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define DPP_ETCAM_BLOCK_NUM \ + (8) /* eTcam中Block的数目 16:dpp+的etcam block 数目 */ +#define DPP_ETCAM_TBLID_NUM (8) /* eTcam支持的业务表号 */ +#define DPP_ETCAM_RAM_NUM (8) /* Block内部RAM的个数 */ +#define DPP_ETCAM_RAM_WIDTH (80U) /* Block内部单个RAM的宽度,比特为单位 */ + +#ifdef DPP_FPGA_TEST_BAORD_SA500FT +#define DPP_ETCAM_RAM_DEPTH (16U) /* Block内部单个RAM的深度 */ +#else +#define DPP_ETCAM_RAM_DEPTH (512U) /* Block内部单个RAM的深度 */ +#endif + +#define DPP_ETCAM_WR_MASK_MAX \ + (((ZXIC_UINT32)1 << DPP_ETCAM_RAM_NUM) - 1) /*255*/ +#define DPP_ETCAM_WIDTH_MIN \ + (DPP_ETCAM_RAM_WIDTH) /* eTcam最小数据位宽,比特为单位 */ +#define DPP_ETCAM_WIDTH_MAX \ + (DPP_ETCAM_RAM_NUM * \ + DPP_ETCAM_RAM_WIDTH) /* eTcam最大数据位宽,比特为单位 */ + +#define DPP_ETCAM_DEFAULT_MIN (0) +#define DPP_ETCAM_ONE_BIT_MAX (1) + +#define DPP_ETCAM_PORT_NUM (1) + +typedef enum dpp_etcam_data_type_e { + DPP_ETCAM_DTYPE_MASK = 0, + DPP_ETCAM_DTYPE_DATA = 1, +} DPP_ETCAM_DATA_TYPE_E; + +/** etcam 条目vld信息 */ +typedef struct dpp_etcam_entry_vld_t { + ZXIC_UINT8 + vld; /** <@brief 标示每个条目的使用状态,每bit为指示当前条目是否被占用 */ + ZXIC_UINT8 rsv[3]; /** <@brief 数据对齐,勿需关心 */ +} DPP_ETCAM_ENTRY_VLD_T; + +/* error code */ +#define DPP_STAT_ETCAM_RC_BASE (0x6000) +#define DPP_ETCAM_RC_INVALID_PARA (DPP_STAT_ETCAM_RC_BASE | 0x0) + +/* macro function */ +#define DPP_ETCAM_ENTRY_SIZE_GET(entry_mode) \ + (((ZXIC_UINT32)DPP_ETCAM_RAM_WIDTH << (3 - entry_mode)) / 8) + +/* api */ +DPP_STATUS dpp_etcam_dm_to_xy(DPP_ETCAM_ENTRY_T *p_dm, DPP_ETCAM_ENTRY_T *p_xy, + ZXIC_UINT32 len); + +DPP_STATUS dpp_etcam_xy_to_dm(DPP_ETCAM_ENTRY_T *p_dm, DPP_ETCAM_ENTRY_T *p_xy, + ZXIC_UINT32 len); + +DPP_STATUS dpp_etcam_block_tbl_id_set(DPP_DEV_T *dev, ZXIC_UINT32 block_idx, + ZXIC_UINT32 tbl_id); + +DPP_STATUS dpp_etcam_block_tbl_id_get(DPP_DEV_T *dev, ZXIC_UINT32 block_idx, + ZXIC_UINT32 *p_tbl_id); + +DPP_STATUS dpp_etcam_block_baddr_set(DPP_DEV_T *dev, ZXIC_UINT32 block_idx, + ZXIC_UINT32 base_addr); + +DPP_STATUS dpp_etcam_block_baddr_get(DPP_DEV_T *dev, ZXIC_UINT32 block_idx, + ZXIC_UINT32 *p_base_addr); + +DPP_STATUS dpp_etcam_cpu_afull_get(DPP_DEV_T *dev, ZXIC_UINT32 block_idx, + ZXIC_UINT32 *p_cpu_afull); + +DPP_STATUS dpp_etcam_ind_cmd_set(DPP_DEV_T *dev, ZXIC_UINT32 addr, + ZXIC_UINT32 block_idx, + ZXIC_UINT32 data_or_mask, ZXIC_UINT32 wr_mask, + ZXIC_UINT32 opr_type, + ZXIC_UINT32 tacm_reg_flag, + ZXIC_UINT32 row_mask_flag, ZXIC_UINT32 vben, + ZXIC_UINT32 vbit); + +/***********************************************************/ +/** 添加eTcam表条目 +* @param dev_id 设备号 +* @param addr 每个block中的ram地址,位宽为8*80bit +* @param block_idx block编号,范围0~15 +* @param wr_mask 写表掩码,共8bit,每bit控制ram中对应位置的80bit数据是否有效 +* @param p_entry 条目数据,data和mask +* +* @return +* @remark 无 +* @see +* @author wcl @date 2014/04/03 +************************************************************/ +DPP_STATUS dpp_etcam_entry_add(DPP_DEV_T *dev, ZXIC_UINT32 addr, + ZXIC_UINT32 block_idx, ZXIC_UINT32 wr_mask, + ZXIC_UINT32 opr_type, + DPP_ETCAM_ENTRY_T *p_entry); + +/***********************************************************/ +/** 删除eTcam表项条目 +* @param dev_id 设备号 +* @param addr 每个block中的ram地址,位宽为8*80bit +* @param block_idx block的编号,范围0~15 +* @param wr_mask 写表掩码,共8bit,每bit控制ram中对应位置的80bit数据是否有效 +* +* @return +* @remark 无 +* @see +* @author wcl @date 2014/04/03 +************************************************************/ +DPP_STATUS dpp_etcam_entry_del(DPP_DEV_T *dev, ZXIC_UINT32 addr, + ZXIC_UINT32 block_idx, ZXIC_UINT32 wr_mask); + +ZXIC_UINT32 dpp_etcam_entry_cmp(DPP_ETCAM_ENTRY_T *p_entry_dm, + DPP_ETCAM_ENTRY_T *p_entry_xy); + +ZXIC_UINT32 dpp_etcam_ind_data_reg_opr_mask_get(ZXIC_UINT32 mask); + +#if ZXIC_REAL("调试计数") + +typedef struct dpp_etcam_port_cnt { + ZXIC_UINT32 as_etcam_req_cnt; + ZXIC_UINT32 etcam_as_index_cnt; + ZXIC_UINT32 etcam_not_hit_cnt; +} DPP_ETCAM_PORT_CNT_T; + +typedef struct dpp_etcam_dbg_cnt { + DPP_ETCAM_PORT_CNT_T dpp_etcam_port_cnt[DPP_ETCAM_PORT_NUM]; + ZXIC_UINT32 table_id_not_match_cnt; + ZXIC_UINT32 table_id_clash01_cnt; +} DPP_ETCAM_DBG_CNT_T; + +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/se/dpp_se.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/se/dpp_se.h new file mode 100644 index 000000000000..7050a202c7db --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/se/dpp_se.h @@ -0,0 +1,777 @@ +/************************************************************** +* 版权所�? (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_se.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* �? �? : 王春�? +* 完成日期 : 2014/03/11 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* �? �? �?: +* �? �? �?: +* 修改内容: +***************************************************************/ + +#ifndef _DPP_SE_H_ +#define _DPP_SE_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "dpp_se_api.h" + +#define DPP_HASH_ID_MIN (0) +#define DPP_HASH_ID_MAX (3) +#define DPP_HASH_ID_NUM (4) + +#define HASH_BULK_ID_MIN (0) +#define HASH_BULK_ID_MAX (7) +#define HASH_BULK_NUM (8) /* 每个Hash引擎存储资源划分的块�? */ + +#define CRC_POLY_SEL_MIN (0) +#define CRC_POLY_SEL_MAX (3) + +#define DPP_LPM_ID_MIN (4) +#define DPP_LPM_ID_MAX (5) +#define DPP_LPM_ID_NUM (2) + +#define DPP_ETCAM_ID_MIN (0) +#define DPP_ETCAM_ID_MAX (0) +#define DPP_ETCAM_ID_NUM (1) + +#define DPP_AGE_TBL_ID_MIN (0) +#define DPP_AGE_TBL_ID_MAX (15) + +#define DPP_SMMU1_DDR_GRP_NUM (1) + +//#define DPP_SMMU1_DIR_TBL_BANK_MAX (15) +#define DPP_SMMU1_DIR_TBL_INDEX_MAX (255) +//#define DPP_SMMU1_UN_DIR_TBL_BANK_MAX (29) + +#define DPP_SMMU1_HASH_TBL_INDEX_BASE (1) +#define DPP_SMMU1_HASH_TBL_INDEX_MAX (31) +#define DPP_SMMU1_LPM_TBL_INDEX_BASE (33) +#define DPP_SMMU1_LPM_TBL_INDEX_MAX (3) +#define DPP_SMMU1_OAM_TBL_INDEX_BASE (37) +#define DPP_SMMU1_FTM_TBL_INDEX_BASE (38) +#define DPP_SMMU1_ETM_TBL_INDEX_BASE (39) +#define DPP_SMMU1_DIR_TBL_INDEX_BASE (40) + +#define DPP_SMMU1_SINGLE_BNAK_MAX_ADDR ((1 << 25) - 1) +#define DPP_SMMU1_SINGLE_BANK_MAX_BADDR (DPP_SMMU1_SINGLE_BNAK_MAX_ADDR >> 12) +#define DPP_SMMU1_TOTAL_BANK_NUM (8) +#define DPP_SMMU1_TOTAL_MAX_ADDR (0xffffffff) +#define DPP_SMMU1_TOTAL_MAX_BADDR (DPP_SMMU1_TOTAL_MAX_ADDR >> 12) +#define DPP_SMMU1_BADDR_MASK (0x7ffff800) +#define DPP_SMMU1_DDR_GROUP_NUM (1) +#define DPP_SMMU1_BANK_COPY_MAX_NUM (16) +#define DPP_SMMU1_READ_REG_MAX_NUM (16) +#define DPP_DIR_TBL_BUF_MAX_NUM (DPP_SMMU1_READ_REG_MAX_NUM) + +/*hash ext crc cfg*/ +#define HASH_ECC_EN_BT_START (2) +#define HASH_ECC_EN_BT_WIDTH (1) +#define HASH_BANK_COPY_BT_START (3) +#define HASH_BANK_COPY_BT_WIDTH (3) +#define HASH_BASE_ADDR_BT_START (6) +#define HASH_BASE_ADDR_BT_WIDTH (15) +/*hash learn tbl cfg*/ +#define LEARN_HASH_TBL_BT_START (0) +#define LEARN_HASH_TBL_BT_WIDTH (19) + +#define DPP_SMMU0_MCAST_TBL_MAX_GROUP (0xffff) + +#define DPP_SMMU0_CAR0_MONO_POS (0) +#define DPP_SMMU0_CAR0_MONO_LEN (1) +#define DPP_SMMU0_CAR0_EN_POS (1) +#define DPP_SMMU0_CAR0_EN_LEN (1) +#define DPP_SMMU0_CAR1_MONO_POS (2) +#define DPP_SMMU0_CAR1_MONO_LEN (1) +#define DPP_SMMU0_CAR1_EN_POS (3) +#define DPP_SMMU0_CAR1_EN_LEN (1) + +#define DPP_SMMU0_LPM_AS_TBL_ID_MAX (7) +#define DPP_SMMU0_LPM_AS_TBL_ID_NUM (8) + +#define DPP_SMMU0_MCAST_DATA_VLD_POS (16) +#define DPP_SMMU0_MCAST_DATA_VLD_LEN (1) +#define DPP_SMMU0_MCAST_CNT_POS (0) +#define DPP_SMMU0_MCAST_CNT_LEN (16) + +#define DPP_SMMU0_INDIER_RDWR_OFFSET_NUM (4) +#define DPP_SMMU0_READ_REG_MAX_NUM (4) + +/** SMMU0 调度 fifo ecc 使能标志 */ +#define DPP_SMMU0_PPU_FIFO_POS (12) +#define DPP_SMMU0_PPU_FIFO_LEN (8) +#define DPP_SMMU0_STAT_FIFO_POS (11) +#define DPP_SMMU0_STAT_FIFO_LEN (1) +#define DPP_SMMU0_DMA_FIFO_POS (10) +#define DPP_SMMU0_DMA_FIFO_LEN (1) +#define DPP_SMMU0_ODMA_FIFO_POS (6) +#define DPP_SMMU0_ODMA_FIFO_LEN (4) +#define DPP_SMMU0_MCAST_FIFO_POS (5) +#define DPP_SMMU0_MCAST_FIFO_LEN (1) +#define DPP_SMMU0_ETCAM_FIFO_POS (1) +#define DPP_SMMU0_ETCAM_FIFO_LEN (4) +#define DPP_SMMU0_LPM_FIFO_POS (0) +#define DPP_SMMU0_LPM_FIFO_LEN (1) + +#define DPP_SMMU0_CTRL_ECC_CFG_POS (0) +#define DPP_SMMU0_CTRL_ECC_CFG_LEN (3) + +#define DPP_SMMU0_RSCHD_RAM_POS (0) +#define DPP_SMMU0_RSCHD_RAM_LEN (1) + +#define DPP_SMMU0_ERAM_ECC_CFG_POS (0) +#define DPP_SMMU0_ERAM_ECC_CFG_LEN (24) + +#define DPP_SMMU0_WR_ARB_ECC_CFG_POS (0) +#define DPP_SMMU0_WR_ARB_ECC_CFG_LEN (1) + +/* smmu0 int0 reg bit define */ +#define SMMU0_INT0_DMA_ORDFIFO_START (0) +#define SMMU0_INT0_DMA_ORDFIFO_LEN (1) +#define SMMU0_INT0_ODMA_ORDFIFO_START (1) +#define SMMU0_INT0_ODMA_ORDFIFO_LEN (1) +#define SMMU0_INT0_MCAST_ORDFIFO_START (2) +#define SMMU0_INT0_MCAST_ORDFIFO_LEN (1) + +#define DPP_ERAM128_BADDR_MASK \ + (0x3FFFF80) /* modified for dpp+ 25bit 2018-09-27*/ + +#define DPP_SE_SMMU1_MAX_BADDR_NO_SHARE ((1 << 20) - 1) +#define DPP_SE_SMMU1_MAX_BADDR_SHARE ((1 << 13) - 1) +#define DPP_SE_SMMU1_MAX_ADDR ((1 << 30) - 1) + +#define DPP_SE_SMMU1_BANK_NUM_POS (16) +#define DPP_SE_SMMU1_BANK_NUM_LEN (5) + +#define DPP_SE_SMMU1_SHARE_TYPE_POS (21) +#define DPP_SE_SMMU1_SHARE_TYPE_LEN (2) + +#define DPP_SE_SMMU1_RR_STATE_POS (0) +#define DPP_SE_SMMU1_RR_STATE_LEN (15) + +#define DPP_SE_CFG_PPU_INFO_POS (0) +#define DPP_SE_CFG_PPU_INFO_LEN (12) + +#define DPP_SE_CFG_DPI_FLAG_POS (12) +#define DPP_SE_CFG_DPI_FLAG_LEN (1) + +#define DPP_SE_CFG_WR_FLAG_POS (13) +#define DPP_SE_CFG_WR_FLAG_LEN (1) + +/** 中断相关 */ +#define DPP_SE_ALG_SCHD_INT_NUM (14) +#define DPP_SE_ALG_ZBLK_ECC_INT_NUM (32) +#define DPP_SE_ALG_HASH0_INT_NUM (8) +#define DPP_SE_ALG_HASH1_INT_NUM (8) +#define DPP_SE_ALG_HASH2_INT_NUM (8) +#define DPP_SE_ALG_HASH3_INT_NUM (8) +#define DPP_SE_ALG_LPM_INT_NUM (10) + +#define DPP_SMMU0_CLS_NUM (6) +#define DPP_SMMU0_STAT_NUM (10) +#define DPP_SMMU0_AS_ETCAM_NUM (DPP_ETCAM_ID_NUM) +#define DPP_SMMU0_PLCR_NUM (1) +#define DPP_SMMU0_ERAM_BLOCK_NUM (32) + +#define DPP_SMMU1_SCH_CNT (4) /*sch 调度次数 完成6->4调度*/ +//#define DPP_SMMU1_GRP_CNT (DPP_SMMU1_DDR_GRP_NUM) +#define DPP_SMMU1_GRP_CNT (8) /*与reg n_size保持一�?*/ +#define DPP_SMMU1_DIR_CHANNEL_CNT (4) + +#define DPP_PARSE_MEX_CHANNEL_NUM (6) +#define DPP_PARSE_KSCHD_CHANNEL_NUM (6) +#define DPP_RSCHD_PPU_CHANNEL_NUM (6) + +typedef enum smmu1_stat_type_e { + STAT_TYPE_PPU = 0, + STAT_TYPE_OAM = 1, + STAT_TYPE_MAX, +} SMMU1_STAT_TYPE_E; + +typedef enum alg_lpm_type_e { + ALG_LPM_V4 = 1, + ALG_LPM_V6 = 2, + ALG_LPM_V4_AS = 3, + ALG_LPM_V6_AS = 4, + ALG_LPM_MAX +} ALG_LPM_TYPE_E; + +typedef enum se_ddr_bank_info_e { + SE_DDR_BKINFO_LPM4 = 0, + SE_DDR_BKINFO_LPM6 = 1, + SE_DDR_BKINFO_LPM4_AS = 2, + SE_DDR_BKINFO_LPM6_AS = 3, +} SE_DDR_BANK_INFO_E; + +typedef enum alg_zblk_serv_type_e { + ALG_ZBLK_SERV_LPM = 0, + ALG_ZBLK_SERV_HASH, +} ALG_ZBLK_SERV_TYPE_E; + +typedef enum cmmu_ddr3_bank_enable_e { + CMMU_DDR3_BANK_DISABLE = 0, + CMMU_DDR3_BANK_ENABLE, +} CMMU_DDR3_BANK_ENABLE_E; + +/** TM统计读片外位�? */ +typedef enum stat_tm_rd_ddr_mode_e { + STAT_TM_RD_DDR_MODE_128 = 0, + STAT_TM_RD_DDR_MODE_256 = 1, + STAT_TM_RD_DDR_MODE_512 = 2, + STAT_TM_RD_DDR_MODE_MAX, +} STAT_TM_RD_DDR_MODE_E; + +typedef enum stat_tm_rd_clr_mode_e { + STAT_TM_RD_CLR_MODE_UNCLR = 0, /**< @brief 正常读,读完数据不清�? */ + STAT_TM_RD_CLR_MODE_CLR = 1, /**< @brief 读清模式*/ + STAT_TM_RD_CLR_MODE_MAX, +} STAT_TM_RD_CLR_MODE_E; + +typedef enum se_ddr_map_flag_e { + VIR_TO_PHY_FLAG = + 0, /**< @brief 虚拟地址/bank到物理地址/bank的映�? */ + PHY_TO_VIR_FLAG = + 1, /**< @brief 物理地址/bank到虚拟地址/bank的映�? */ +} SE_DDR_MAP_FLAG_E; + +/** module se*/ +typedef enum module_init_se_e { + MODULE_INIT_SE_SMMU0 = 0, + MODULE_INIT_SE_SMMU1, + MODULE_INIT_SE_ALG, + MODULE_INIT_SE_AS, + MODULE_INIT_SE_ETCAM, + MODULE_INIT_SE_STAT, + MODULE_INIT_SE_FIFO, + MODULE_INIT_SE_MAX +} MODULE_INIT_SE_E; + +typedef struct smmu1_kschd_hash_ddr_cfg_t { + ZXIC_UINT32 baddr; + ZXIC_UINT32 crcen; + ZXIC_UINT32 mode; +} SMMU1_KSCHD_HASH_DDR_CFG_T; + +typedef struct smmu1_kschd_lpm_ddr_cfg_t { + ZXIC_UINT32 baddr; + ZXIC_UINT32 bankcopy; + ZXIC_UINT32 crcen; + ZXIC_UINT32 flag; /* 0-256, 1-384 */ + ZXIC_UINT32 as_baddr; + ZXIC_UINT32 as_bankcopy; + ZXIC_UINT32 as_crcen; + ZXIC_UINT32 as_mode; +} SMMU1_KSCHD_LPM_DDR_CFG_T; + +typedef struct dpp_lpm_as_eram_info_t { + ZXIC_UINT32 as_baddr; + ZXIC_UINT32 as_mode; +} DPP_LPM_AS_ERAM_INFO_T; + +typedef struct dpp_lpm_res_info_t { + DPP_LPM_AS_ERAM_INFO_T as_eram_info[DPP_SMMU0_LPM_AS_TBL_ID_NUM]; + ZXIC_UINT32 v4_ddr_baddr; + ZXIC_UINT32 v4_as_ddr_baddr; + ZXIC_UINT32 v4_as_rsp_len; + ZXIC_UINT32 v6_ddr_baddr; + ZXIC_UINT32 v6_as_ddr_baddr; + ZXIC_UINT32 v6_as_rsp_len; +} DPP_LPM_RES_INFO_T; + +/*--------------------------------------------------------调试打印计数--------------------------------------------------------*/ + +/*smmu0 调试打印计数 开�?*/ +typedef struct dpp_smmu0_dbg_cnt_t { + ZXIC_UINT32 smmu0_rcv_as_age_req_cnt; + ZXIC_UINT32 smmu0_rcv_parse_req_cnt; + ZXIC_UINT32 smmu0_cpu_ind_rd_rsp_cnt; + ZXIC_UINT32 smmu0_cpu_ind_rd_req_cnt; + ZXIC_UINT32 smmu0_cpu_ind_wr_req_cnt; + + ZXIC_UINT32 smmu0_to_plcr_rsp_cnt[DPP_SMMU0_PLCR_NUM]; + ZXIC_UINT32 smmu0_rcv_plcr_req_cnt[DPP_SMMU0_PLCR_NUM]; + + ZXIC_UINT32 smmu0_to_lpm_as_rsp_cnt; + ZXIC_UINT32 smmu0_rcv_lpm_as_req_cnt; + + ZXIC_UINT32 + smmu0_to_as_etacm_rsp_cnt[DPP_SMMU0_AS_ETCAM_NUM]; /* 与fc是反�? */ + ZXIC_UINT32 smmu0_rcv_as_etacm_req_cnt[DPP_SMMU0_AS_ETCAM_NUM]; + + ZXIC_UINT32 smmu0_to_ppu_mc_rsp_cnt; + ZXIC_UINT32 smmu0_rcv_ppu_mc_req_cnt; + ZXIC_UINT32 smmu0_to_odma_tdm_mc_rsp_cnt; + ZXIC_UINT32 smmu0_rcv_odma_tdm_mc_req_cnt; + ZXIC_UINT32 smmu0_to_odma_rsp_cnt; + ZXIC_UINT32 smmu0_rcv_odma_req_cnt; + ZXIC_UINT32 smmu0_to_dma_rsp_cnt; + ZXIC_UINT32 smmu0_rcv_dma_req_cnt; + + ZXIC_UINT32 smmu0_to_stat_rsp_cnt[DPP_SMMU0_STAT_NUM]; + ZXIC_UINT32 smmu0_rcv_stat_req_cnt[DPP_SMMU0_STAT_NUM]; + ZXIC_UINT32 smmu0_to_ppu_rsp_cnt[DPP_SMMU0_CLS_NUM]; + ZXIC_UINT32 smmu0_rcv_ppu_req_cnt[DPP_SMMU0_CLS_NUM]; + + ZXIC_UINT32 smmu0_rcv_ftm_stat_req0_cnt; + ZXIC_UINT32 smmu0_rcv_ftm_stat_req1_cnt; + ZXIC_UINT32 smmu0_rcv_etm_stat_req0_cnt; + ZXIC_UINT32 smmu0_rcv_etm_stat_req1_cnt; + + ZXIC_UINT32 smmu0_block_rd_cnt[DPP_SMMU0_ERAM_BLOCK_NUM]; + ZXIC_UINT32 smmu0_block_wr_cnt[DPP_SMMU0_ERAM_BLOCK_NUM]; + +} DPP_SMMU0_DBG_CNT_T; + +typedef struct dpp_smmu0_dbg_fc_cnt_t { + ZXIC_UINT32 smmu0_to_as_age_req_fc_cnt; + ZXIC_UINT32 smmu0_to_parse_req_fc_cnt; + ZXIC_UINT32 smmu0_rcv_wr_arb_cpu_fc_cnt; + ZXIC_UINT32 smmu0_to_as_lpm_req_fc_cnt; + ZXIC_UINT32 smmu0_rcv_as_lpm_rsp_fc_cnt; + ZXIC_UINT32 smmu0_to_as_etacm_req_fc_cnt[DPP_SMMU0_AS_ETCAM_NUM]; + ZXIC_UINT32 smmu0_rcv_as_etacm_rsp_fc_cnt[DPP_SMMU0_AS_ETCAM_NUM]; + ZXIC_UINT32 smmu0_to_ppu_mc_req_fc_cnt; + ZXIC_UINT32 smmu0_rcv_ppu_mc_rsp_fc_cnt; + ZXIC_UINT32 smmu0_rcv_odma_tdm_mc_rsp_fc_cnt; + ZXIC_UINT32 smmu0_to_odma_tdm_mc_req_fc_cnt; + ZXIC_UINT32 smmu0_to_odma_req_fc_cnt; + ZXIC_UINT32 smmu0_to_dma_req_fc_cnt; + ZXIC_UINT32 smmu0_to_stat_req_fc_cnt[DPP_SMMU0_STAT_NUM]; + ZXIC_UINT32 smmu0_rcv_stat_rsp_fc_cnt[DPP_SMMU0_STAT_NUM]; + ZXIC_UINT32 smmu0_to_ppu_req_fc_cnt[DPP_SMMU0_CLS_NUM]; + ZXIC_UINT32 smmu0_rcv_ppu_rsp_fc_cnt[DPP_SMMU0_CLS_NUM]; +} DPP_SMMU0_DBG_FC_CNT_T; +/*smmu0 调试打印计数 结束*/ + +/*smmu1 计数�?*/ +typedef struct dpp_smmu1_dbg_cnt_t { + ZXIC_UINT32 ctrl_to_cash_fc_cnt[DPP_SMMU1_GRP_CNT]; + ZXIC_UINT32 cash_to_ctrl_req_cnt[DPP_SMMU1_GRP_CNT]; + ZXIC_UINT32 rschd_to_cache_fc_cnt[DPP_SMMU1_GRP_CNT]; + ZXIC_UINT32 cash_to_cache_rsp_cnt[DPP_SMMU1_GRP_CNT]; + ZXIC_UINT32 cash_to_ctrl_fc_cnt[DPP_SMMU1_GRP_CNT]; + ZXIC_UINT32 ctrl_to_cash_rsp_cnt[DPP_SMMU1_GRP_CNT]; + ZXIC_UINT32 kschd_to_cache_req_cnt[DPP_SMMU1_GRP_CNT]; + ZXIC_UINT32 cache_to_kschd_fc_cnt[DPP_SMMU1_GRP_CNT]; + ZXIC_UINT32 dma_to_smmu1_rd_req_cnt; + ZXIC_UINT32 oam_to_kschd_req_cnt; + ZXIC_UINT32 oam_rr_state_rsp_cnt; + ZXIC_UINT32 oam_clash_info_cnt; + ZXIC_UINT32 oam_to_rr_req_cnt; + ZXIC_UINT32 lpm_as_to_kschd_req_cnt; + ZXIC_UINT32 lpm_as_rr_state_rsp_cnt; + ZXIC_UINT32 lpm_as_clash_info_cnt; + ZXIC_UINT32 lpm_as_to_rr_req_cnt; + ZXIC_UINT32 lpm_to_kschd_req_cnt; + ZXIC_UINT32 lpm_rr_state_rsp_cnt; + ZXIC_UINT32 lpm_clash_info_cnt; + ZXIC_UINT32 lpm_to_rr_req_cnt; + ZXIC_UINT32 hash_to_kschd_req_cnt[DPP_HASH_ID_NUM]; + ZXIC_UINT32 hash_rr_state_rsp_cnt[DPP_HASH_ID_NUM]; + ZXIC_UINT32 hash_clash_info_cnt[DPP_HASH_ID_NUM]; + ZXIC_UINT32 hash_to_rr_req_cnt[DPP_HASH_ID_NUM]; + ZXIC_UINT32 dir_to_kschd_req_cnt[DPP_SMMU1_DIR_CHANNEL_CNT]; + ZXIC_UINT32 dir_clash_info_cnt[DPP_SMMU1_DIR_CHANNEL_CNT]; + ZXIC_UINT32 dir_tbl_wr_req_cnt; + ZXIC_UINT32 warbi_to_dir_tbl_warbi_fc_cnt; + ZXIC_UINT32 dir_to_bank_rr_req_cnt[DPP_SMMU1_DIR_CHANNEL_CNT]; + ZXIC_UINT32 kschd_to_dir_fc_cnt[DPP_SMMU1_DIR_CHANNEL_CNT]; + ZXIC_UINT32 dir_rr_state_rsp_cnt[DPP_SMMU1_DIR_CHANNEL_CNT]; + ZXIC_UINT32 wr_done_to_warbi_fc_cnt; + ZXIC_UINT32 wr_done_ptr_req_cnt; + ZXIC_UINT32 ctrl_to_warbi_fc_cnt[DPP_SMMU1_GRP_CNT]; + ZXIC_UINT32 warbi_to_ctrl_wr_req_cnt[DPP_SMMU1_GRP_CNT]; + ZXIC_UINT32 warbi_to_cash_wr_req_cnt[DPP_SMMU1_GRP_CNT]; + ZXIC_UINT32 warbi_to_cpu_wr_fc_cnt; + ZXIC_UINT32 cpu_wr_req_cnt; + ZXIC_UINT32 ctrl_to_cpu_rd_rsp_cnt[DPP_SMMU1_GRP_CNT]; + ZXIC_UINT32 cpu_to_ctrl_rd_req_cnt[DPP_SMMU1_GRP_CNT]; + ZXIC_UINT32 cpu_rd_dir_tbl_rsp_cnt; + ZXIC_UINT32 cpu_to_dir_tbl_rd_wr_req_cnt; + ZXIC_UINT32 smmu1_to_mmu_rsp_fc_cnt[DPP_SMMU1_GRP_CNT]; + ZXIC_UINT32 mmu_to_smmu1_rd_rsp_cnt[DPP_SMMU1_GRP_CNT]; + ZXIC_UINT32 mmu_to_smmu1_rd_fc_cnt[DPP_SMMU1_GRP_CNT]; + ZXIC_UINT32 smmu1_to_mmu_rd_req_cnt[DPP_SMMU1_GRP_CNT]; + ZXIC_UINT32 mmu_to_smmu1_wr_fc_cnt[DPP_SMMU1_GRP_CNT]; + ZXIC_UINT32 smmu1_to_mmu_wr_req_cnt[DPP_SMMU1_GRP_CNT]; + ZXIC_UINT32 se_to_smmu1_wr_rsp_fc_cnt; + ZXIC_UINT32 smmu1_to_se_wr_rsp_cnt; + ZXIC_UINT32 ddr_wr_rsp_cnt[DPP_SMMU1_GRP_CNT]; + ZXIC_UINT32 smmu1_to_as_fc_cnt; + ZXIC_UINT32 as_to_smmu1_wr_req_cnt; + ZXIC_UINT32 smmu1_to_se_parser_fc_cnt; + ZXIC_UINT32 se_parser_to_smmu1_req_cnt; /* 微码写ddr直接�? */ + ZXIC_UINT32 smmu1_to_etm_wr_fc_cnt; + ZXIC_UINT32 etm_wr_req_cnt; + ZXIC_UINT32 smmu1_to_ftm_wr_fc_cnt; + ZXIC_UINT32 ftm_wr_req_cnt; + ZXIC_UINT32 smmu1_to_state_wr_fc_cnt; + ZXIC_UINT32 state_wr_req_cnt; + ZXIC_UINT32 se_to_dma_rsp_cnt; + ZXIC_UINT32 se_to_dma_fc_cnt; + ZXIC_UINT32 oam_to_smmu1_fc_cnt; + ZXIC_UINT32 smmu1_to_oam_rsp_cnt; + ZXIC_UINT32 smmu1_to_oam_fc_cnt; + ZXIC_UINT32 oam_to_smmu1_req_cnt; + ZXIC_UINT32 smmu1_to_etm_rsp_cnt; + ZXIC_UINT32 smmu1_to_ftm_rsp_cnt; + ZXIC_UINT32 smmu1_to_etm_fc_cnt; + ZXIC_UINT32 etm_to_smmu1_req_cnt; + ZXIC_UINT32 smmu1_to_ftm_fc_cnt; + ZXIC_UINT32 ftm_to_smmu1_req_cnt; + ZXIC_UINT32 smmu1_to_stat_rsp_cnt; + ZXIC_UINT32 smmu1_to_stat_fc_cnt; + ZXIC_UINT32 stat_to_smmu1_req_cnt; /* cmmu */ + ZXIC_UINT32 lpm_as_to_smmu1_fc_cnt; + ZXIC_UINT32 lpm_to_smmu1_fc_cnt; + ZXIC_UINT32 smmu1_to_lpm_as_rsp_cnt; + ZXIC_UINT32 smmu1_to_lpm_rsp_cnt; + ZXIC_UINT32 smmu1_to_lpm_as_fc_cnt; + ZXIC_UINT32 smmu1_to_lpm_fc_cnt; + ZXIC_UINT32 lpm_as_to_smmu1_req_cnt; + ZXIC_UINT32 lpm_to_smmu1_req_cnt; + ZXIC_UINT32 hash_to_smmu1_fc_cnt[DPP_HASH_ID_NUM]; + ZXIC_UINT32 smmu1_to_hash_rsp_cnt[DPP_HASH_ID_NUM]; + ZXIC_UINT32 smmu1_to_hash_fc_cnt[DPP_HASH_ID_NUM]; + ZXIC_UINT32 hash_to_smmu1_cnt[DPP_HASH_ID_NUM]; + ZXIC_UINT32 se_to_smmu1_dir_rsp_fc_cnt[DPP_SMMU1_DIR_CHANNEL_CNT]; + ZXIC_UINT32 smmu1_to_se_dir_rsp_cnt[DPP_SMMU1_DIR_CHANNEL_CNT]; + ZXIC_UINT32 smmu1_to_se_dir_fc_cnt[DPP_SMMU1_DIR_CHANNEL_CNT]; + ZXIC_UINT32 se_to_smmu1_dir_cnt[DPP_SMMU1_DIR_CHANNEL_CNT]; + ZXIC_UINT32 cache_to_rschd_rsp_cnt[DPP_SMMU1_GRP_CNT]; +} DPP_SMMU1_DBG_CNT_T; + +/*parser 计数�?*/ +typedef struct se_parser_dbg_cnt_t { + ZXIC_UINT32 mex_req_cnt[DPP_PARSE_MEX_CHANNEL_NUM]; + ZXIC_UINT32 kschd_req_cnt[DPP_PARSE_KSCHD_CHANNEL_NUM]; + ZXIC_UINT32 kschd_parser_fc_cnt[DPP_PARSE_KSCHD_CHANNEL_NUM]; + ZXIC_UINT32 se_ppu_mex_fc_cnt[DPP_PARSE_MEX_CHANNEL_NUM]; + ZXIC_UINT32 smmu0_marc_fc_cnt; + ZXIC_UINT32 smmu0_marc_key_cnt; + ZXIC_UINT32 smmu1_key_cnt; + ZXIC_UINT32 smmu1_parser_fc_cnt; + ZXIC_UINT32 marc_tab_type_err_mex_cnt[DPP_PARSE_MEX_CHANNEL_NUM]; + ZXIC_UINT32 eram_fulladdr_drop_cnt; +} SE_PARSER_DBG_CNT_T; + +/*kschd 计数�?*/ +typedef struct se_kschd_dbg_cnt_t { + ZXIC_UINT32 parser_kschd_key_cnt[DPP_PARSE_KSCHD_CHANNEL_NUM]; + ZXIC_UINT32 kschd_smmu1_key_cnt[DPP_SMMU1_SCH_CNT]; + ZXIC_UINT32 kschd_to_as_hash0_key_cnt; + ZXIC_UINT32 kschd_to_as_hash1_key_cnt; + ZXIC_UINT32 kschd_to_as_hash2_key_cnt; + ZXIC_UINT32 kschd_to_as_hash3_key_cnt; + ZXIC_UINT32 kschd_to_as_lpm_key_cnt; + ZXIC_UINT32 kschd_to_as_etacm0_key_cnt; + ZXIC_UINT32 kschd_to_as_etacm1_key_cnt; + ZXIC_UINT32 kschd_to_as_pbu_key_cnt; + ZXIC_UINT32 kschd_to_parser_fc_cnt[DPP_PARSE_KSCHD_CHANNEL_NUM]; + ZXIC_UINT32 smmu1_kschd_fc_cnt[DPP_SMMU1_SCH_CNT]; + ZXIC_UINT32 kschd_rcv_as_hash0_fc_cnt; + ZXIC_UINT32 kschd_rcv_as_hash1_fc_cnt; + ZXIC_UINT32 kschd_rcv_as_hash2_fc_cnt; + ZXIC_UINT32 kschd_rcv_as_hash3_fc_cnt; + ZXIC_UINT32 kschd_rcv_as_lpm_fc_cnt; + ZXIC_UINT32 kschd_rcv_as_etacm0_fc_cnt; + ZXIC_UINT32 kschd_rcv_as_etacm1_fc_cnt; + ZXIC_UINT32 kschd_rcv_as_pbu_fc_cnt; +} SE_KSCHD_DBG_CNT_T; + +/*rschd 计数�?*/ +typedef struct se_rschd_dbg_cnt_t { + ZXIC_UINT32 se_ppu_mex_rsp_cnt[DPP_RSCHD_PPU_CHANNEL_NUM]; + ZXIC_UINT32 rschd_rcv_as_hash0_rsp_cnt; + ZXIC_UINT32 rschd_rcv_as_hash1_rsp_cnt; + ZXIC_UINT32 rschd_rcv_as_hash2_rsp_cnt; + ZXIC_UINT32 rschd_rcv_as_hash3_rsp_cnt; + ZXIC_UINT32 rschd_rcv_as_lpm_rsp_cnt; + ZXIC_UINT32 rschd_rcv_as_etacm0_rsp_cnt; + ZXIC_UINT32 rschd_rcv_as_etacm1_rsp_cnt; + ZXIC_UINT32 rschd_rcv_as_pbu_rsp_cnt; + ZXIC_UINT32 smmu1_rschd_rsp_cnt[DPP_SMMU1_SCH_CNT]; + ZXIC_UINT32 ppu_se_mex_fc_cnt[DPP_RSCHD_PPU_CHANNEL_NUM]; + ZXIC_UINT32 rschd_to_as_hash0_fc_cnt; + ZXIC_UINT32 rschd_to_as_hash1_fc_cnt; + ZXIC_UINT32 rschd_to_as_hash2_fc_cnt; + ZXIC_UINT32 rschd_to_as_hash3_fc_cnt; + ZXIC_UINT32 rschd_to_as_lpm_fc_cnt; + ZXIC_UINT32 rschd_to_as_etacm0_fc_cnt; + ZXIC_UINT32 rschd_to_as_etacm1_fc_cnt; + ZXIC_UINT32 rschd_to_as_pbu_fc_cnt; + ZXIC_UINT32 rschd_smmu1_rdy_cnt[DPP_SMMU1_SCH_CNT]; + ZXIC_UINT32 rschd_rcv_smmu0_wr_done_cnt; + ZXIC_UINT32 rschd_to_smmu0_wr_done_fc_cnt; + ZXIC_UINT32 rschd_rcv_smmu1_wr_done_cnt; + ZXIC_UINT32 rschd_to_smmu1_wr_done_fc_cnt; + ZXIC_UINT32 rschd_rcv_alg_wr_done_cnt; + ZXIC_UINT32 rschd_to_alg_wr_done_fc_cnt; +} SE_RSCHD_DBG_CNT_T; + +/*cmmu 计数�?*/ +typedef struct se_cmmu_dbg_cnt_t { + ZXIC_UINT32 stat_cmmu_req_cnt; + ZXIC_UINT32 cmmu_stat_fc_cnt; + ZXIC_UINT32 smmu1_cmmu_wr_fc_cnt; + ZXIC_UINT32 smmu1_cmmu_rd_fc_cnt; +} SE_CMMU_DBG_CNT_T; +/*cmmu req计数�? 结束*/ + +/*se_as模块 计数�?*/ +typedef struct se_as_dbg_cnt_t { + ZXIC_UINT32 hash_wr_req_cnt[DPP_HASH_ID_NUM]; + ZXIC_UINT32 smmu0_etcam_fc_cnt[DPP_ETCAM_ID_NUM]; + ZXIC_UINT32 etcam_smmu0_req_cnt[DPP_ETCAM_ID_NUM]; + ZXIC_UINT32 smmu0_etcam_rsp_cnt[DPP_ETCAM_ID_NUM]; + ZXIC_UINT32 as_hla_hash_key_cnt[DPP_HASH_ID_NUM]; + ZXIC_UINT32 as_hla_lpm_key_cnt; + ZXIC_UINT32 alg_as_hash_rsp_cnt[DPP_HASH_ID_NUM]; + ZXIC_UINT32 alg_as_hash_smf_rsp_cnt[DPP_HASH_ID_NUM]; /* 命中计数 */ + ZXIC_UINT32 alg_as_lpm_rsp_cnt; + ZXIC_UINT32 alg_as_lpm_smf_rsp_cnt; /* 命中计数 */ + ZXIC_UINT32 as_pbu_key_cnt; + ZXIC_UINT32 pbu_se_dpi_rsp_dat_cnt; + ZXIC_UINT32 as_etcam_ctrl_req_cnt[DPP_ETCAM_ID_NUM]; + ZXIC_UINT32 etcam_ctrl_as_index_cnt[DPP_ETCAM_ID_NUM]; /* 有效返回计数 */ + ZXIC_UINT32 etcam_ctrl_as_hit_cnt[DPP_ETCAM_ID_NUM]; + ZXIC_UINT32 as_smmu0_req_cnt; + ZXIC_UINT32 learn_hla_wr_cnt; /* 硬件学习 */ + ZXIC_UINT32 as_smmu1_req_cnt; + ZXIC_UINT32 se_cfg_mac_dat_cnt; /* 到DMA计数 */ + ZXIC_UINT32 alg_as_hash_fc_cnt[DPP_HASH_ID_NUM]; + ZXIC_UINT32 alg_as_lpm_fc_cnt; + ZXIC_UINT32 as_alg_hash_fc_cnt[DPP_HASH_ID_NUM]; + ZXIC_UINT32 as_alg_lpm_fc_cnt; + ZXIC_UINT32 as_pbu_fc_cnt; + ZXIC_UINT32 pbu_se_dpi_key_fc_cnt; + ZXIC_UINT32 as_etcam_ctrl_fc_cnt[DPP_ETCAM_ID_NUM]; + ZXIC_UINT32 etcam_ctrl_as_fc_cnt[DPP_ETCAM_ID_NUM]; + ZXIC_UINT32 smmu0_as_mac_age_fc_cnt; + ZXIC_UINT32 alg_learn_fc_cnt; + ZXIC_UINT32 smmu1_as_fc_cnt; + ZXIC_UINT32 cfg_se_mac_fc_cnt; +} SE_AS_DBG_CNT_T; + +/* hash 是否响应dma流控使能*/ +typedef enum se_as_hash_dma_fc_en_e { + HASH_EN_DMA_FC = 0, + HASH_UN_EN_DMA_FC = 1, +} SE_AS_HASH_DMA_FC_EN_E; +/*se_as模块 计数�? 结束*/ + +/** alg模块调试计数*/ +typedef struct se_alg_dbg_cnt_t { + ZXIC_UINT32 hash_key_cnt[4]; + ZXIC_UINT32 hash_rsp_cnt[4]; + ZXIC_UINT32 hash_hit_cnt[4]; + ZXIC_UINT32 hash_space_vld_cnt[4]; + ZXIC_UINT32 hash_ddr3_req_vld_cnt[4]; + ZXIC_UINT32 hash_ddr3_rsp_vld_cnt[4]; + + ZXIC_UINT32 lpm_key_cnt; + ZXIC_UINT32 lpm_rsp_cnt; + ZXIC_UINT32 lpm_hit_cnt; + ZXIC_UINT32 + lpm_key_ddr3_req_vld_cnt; /**< @brief LPM_SMMU1_P4口用于键�?*/ + ZXIC_UINT32 + lpm_key_ddr3_rsp_vld_cnt; /**< @brief LPM_SMMU1_P4口用于键�?*/ + ZXIC_UINT32 + lpm_as_ddr3_req_vld_cnt; /**< @brief LPM_SMMU1_P5口用于关联结�?*/ + ZXIC_UINT32 + lpm_as_ddr3_rsp_vld_cnt; /**< @brief LPM_SMMU1_P5口用于关联结�?*/ +} SE_ALG_DBG_CNT_T; + +typedef struct se_alg_dbg_excp_cnt_t { + ZXIC_UINT32 schd_learn_fifo_int_cnt; + ZXIC_UINT32 schd_hash_fifo_int_cnt[4]; + ZXIC_UINT32 schd_lpm_fifo_int_cnt; + ZXIC_UINT32 schd_learn_fifo_parity_err_cnt; + ZXIC_UINT32 schd_hash_fifo_parity_err_cnt[4]; + ZXIC_UINT32 schd_lpm_fifo_parity_err_cnt; + ZXIC_UINT32 + rd_init_cft_cnt; /**< @brief 初始化过程中出现的CPU读命令冲突计�?*/ + ZXIC_UINT32 zblk_ecc_err_cnt[32]; /**< @brief 32个zblock的ecc错误计数*/ + ZXIC_UINT32 zcam_hash_parity_err_cnt + [4]; /**< @brief 4个hash业务口的parity错误计数*/ + ZXIC_UINT32 zcam_lpm_err_cnt; /**< @brief LPM业务口错误计�?*/ + + ZXIC_UINT32 hash_sreq_fifo_parity_err_cnt[4]; + ZXIC_UINT32 hash_sreq_fifo_int_cnt[4]; + ZXIC_UINT32 hash_key_fifo_int_cnt[4]; + ZXIC_UINT32 hash_int_rsp_fifo_parity_err_cnt[4]; + ZXIC_UINT32 hash_ext_rsp_fifo_parity_err_cnt[4]; + ZXIC_UINT32 hash_ext_rsp_fifo_int_cnt[4]; + ZXIC_UINT32 hash_int_rsp_fifo_int_cnt[4]; + + ZXIC_UINT32 lpm_ext_rsp_fifo_int_cnt; + ZXIC_UINT32 lpm_ext_v6_fifo_int_cnt; + ZXIC_UINT32 lpm_ext_v4_fifo_int_cnt; + ZXIC_UINT32 lpm_ext_addr_fifo_int_cnt; + ZXIC_UINT32 lpm_ext_v4_fifo_parity_err_cnt; + ZXIC_UINT32 lpm_ext_v6_fifo_parity_err_cnt; + ZXIC_UINT32 lpm_ext_rsp_fifo_parity_err_cnt; + ZXIC_UINT32 lpm_as_req_fifo_int_cnt; + ZXIC_UINT32 lpm_as_int_rsp_fifo_int_cnt; +} SE_ALG_DBG_EXCP_CNT_T; + +#define LPM_HW_DAT_BUFF_SIZE_MAX (16 * 1024) +typedef enum { + LPM_DAT_WR_TYPE_DMA = 1UL, + LPM_DAT_WR_TYPE_REG = 2UL, +} LPM_DAT_WR_TYPE; + +typedef enum { + LPM_DAT_ZECLL = 1UL, + LPM_DAT_ZREG = 2UL, + LPM_DAT_DDR = 3UL, + LPM_DAT_DDR_RST = 4UL, + LPM_DAT_ERAM_RST = 5UL, + LPM_DAT_TYPE_MAX, +} ROUTE_DAT_TYPE; + +typedef struct _lpm_hw_dat_ddr { + ZXIC_UINT32 dat_type; /* ROUTE_DAT_TYPE :LPM_DAT_DDR/LPM_DAT_DDR_RST */ + ZXIC_UINT32 v4v6_flag; /* ALG_LPM_TYPE_E */ + ZXIC_UINT32 lpm_wr_vld; /* 0-WR 1-RD */ + ZXIC_UINT32 tbl_id; /* 对应复制通道 */ + ZXIC_UINT32 base_addr; /* 19b */ + ZXIC_UINT32 index; /* by rw_len */ + ZXIC_UINT32 ecc_en; + ZXIC_UINT32 rw_len; /* SMMU1_DDR_WRT_MODE_E */ + ZXIC_UINT8 data[512 / 8]; /* 左对�? */ +} ROUTE_HW_DAT_DDR; + +typedef struct _lpm_hw_dat_zcam { + ZXIC_UINT32 dat_type; /* ROUTE_DAT_TYPE :LPM_DAT_ZREG/LPM_DAT_ZECLL */ + ZXIC_UINT32 ram_reg_flag; /* 0-reg 1-cell */ + ZXIC_UINT32 rw_addr; /* by 512b */ + ZXIC_UINT8 data[512 / 8]; /* 左对�? */ +} ROUTE_HW_DAT_ZCAM; + +typedef struct _lpm_hw_dat_eram { + ZXIC_UINT32 dat_type; /* ROUTE_DAT_TYPE :LPM_DAT_ERAM_RST */ + ZXIC_UINT32 base_addr; /* by 128b */ + ZXIC_UINT32 index; /* by rw_len*/ + ZXIC_UINT32 rw_len; /* DPP_ERAM128_TBL_MODE_E */ + ZXIC_UINT8 data[128 / 8]; /* 左对�? */ +} ROUTE_HW_DAT_ERAM; + +typedef struct ppu_stat_cfg_t { + ZXIC_UINT32 eram_baddr; /*片内统计基地址,单位128bit*/ + ZXIC_UINT32 eram_depth; /*片内深度,单位128bit*/ + ZXIC_UINT32 + ddr_base_addr; /*片外统计基地址(PPU和OAM的片外计数共用此基地址),单位2k*256bit*/ + ZXIC_UINT32 ppu_addr_offset; /*PPU统计偏移地址,单位128bit*/ +} PPU_STAT_CFG_T; + +/***********************************************************/ +/** dpp hash的smmu1属性设�? +* @param dev_id 设备�? +* @param hash_id hash引擎�? +* @param tbl_id hash表号 +* @param ecc_en ecc使能 +* @param baddr ddr基地址 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark �? +* @see +* @author ls @date 2016/04/12 +************************************************************/ +DPP_STATUS dpp_se_smmu1_hash_tbl_cfg_set(DPP_DEV_T *dev, ZXIC_UINT32 hash_id, + ZXIC_UINT32 tbl_id, ZXIC_UINT32 ecc_en, + ZXIC_UINT32 baddr); + +/** 获取hash算法访问DDR空间的属性,从软件获取(待优化) +* @param dev_id 设备号 +* @param hash_id hash引擎号 +* @param bulk_id Hash引擎存储资源划分块数的ID号 +* @param p_ecc_en 使能ECC校验 +* @param p_base_addr DDR空间基地址 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author tf @date 2016/06/15 +************************************************************/ +DPP_STATUS dpp_se_smmu1_hash_tbl_soft_cfg_get(DPP_DEV_T *dev, + ZXIC_UINT32 hash_id, + ZXIC_UINT32 bulk_id, + ZXIC_UINT32 *p_ecc_en, + ZXIC_UINT32 *p_base_addr); + +DPP_STATUS dpp_se_zblk_serv_cfg_set(DPP_DEV_T *dev, ZXIC_UINT32 zblk_idx, + ZXIC_UINT32 serv_sel, ZXIC_UINT32 hash_id, + ZXIC_UINT32 enable); + +DPP_STATUS dpp_se_zcell_mono_cfg_set( + DPP_DEV_T *dev, ZXIC_UINT32 zblk_idx, ZXIC_UINT32 zcell0_tbl_id, + ZXIC_UINT32 zcell0_mono_flag, ZXIC_UINT32 zcell1_tbl_id, + ZXIC_UINT32 zcell1_mono_flag, ZXIC_UINT32 zcell2_tbl_id, + ZXIC_UINT32 zcell2_mono_flag, ZXIC_UINT32 zcell3_tbl_id, + ZXIC_UINT32 zcell3_mono_flag); + +DPP_STATUS dpp_se_zcell_mono_cfg_get( + DPP_DEV_T *dev, ZXIC_UINT32 zblk_idx, ZXIC_UINT32 *zcell0_tbl_id, + ZXIC_UINT32 *zcell0_mono_flag, ZXIC_UINT32 *zcell1_tbl_id, + ZXIC_UINT32 *zcell1_mono_flag, ZXIC_UINT32 *zcell2_tbl_id, + ZXIC_UINT32 *zcell2_mono_flag, ZXIC_UINT32 *zcell3_tbl_id, + ZXIC_UINT32 *zcell3_mono_flag); + +DPP_STATUS +dpp_se_zreg_mono_cfg_set(DPP_DEV_T *dev, ZXIC_UINT32 zblk_idx, + ZXIC_UINT32 zreg0_tbl_id, ZXIC_UINT32 zreg0_mono_flag, + ZXIC_UINT32 zreg1_tbl_id, ZXIC_UINT32 zreg1_mono_flag, + ZXIC_UINT32 zreg2_tbl_id, ZXIC_UINT32 zreg2_mono_flag, + ZXIC_UINT32 zreg3_tbl_id, ZXIC_UINT32 zreg3_mono_flag); + +DPP_STATUS dpp_se_zreg_mono_cfg_get( + DPP_DEV_T *dev, ZXIC_UINT32 zblk_idx, ZXIC_UINT32 *zreg0_tbl_id, + ZXIC_UINT32 *zreg0_mono_flag, ZXIC_UINT32 *zreg1_tbl_id, + ZXIC_UINT32 *zreg1_mono_flag, ZXIC_UINT32 *zreg2_tbl_id, + ZXIC_UINT32 *zreg2_mono_flag, ZXIC_UINT32 *zreg3_tbl_id, + ZXIC_UINT32 *zreg3_mono_flag); +DPP_STATUS dpp_se_hash_zcam_mono_flags_set(DPP_DEV_T *dev, + ZXIC_UINT32 hash0_mono_flag, + ZXIC_UINT32 hash1_mono_flag, + ZXIC_UINT32 hash2_mono_flag, + ZXIC_UINT32 hash3_mono_flag); + +DPP_STATUS dpp_se_hash_zcam_mono_flags_get(DPP_DEV_T *dev, + ZXIC_UINT32 *hash0_mono_flag, + ZXIC_UINT32 *hash1_mono_flag, + ZXIC_UINT32 *hash2_mono_flag, + ZXIC_UINT32 *hash3_mono_flag); + +DPP_STATUS dpp_se_hash_ext_cfg_set(DPP_DEV_T *dev, ZXIC_UINT32 hash_id, + ZXIC_UINT32 ext_mode, ZXIC_UINT32 flag); +DPP_STATUS dpp_se_hash_ext_cfg_get(DPP_DEV_T *dev, ZXIC_UINT32 hash_id, + ZXIC_UINT32 *p_content_type, + ZXIC_UINT32 *p_flag); +DPP_STATUS dpp_se_hash_tbl_depth_set( + DPP_DEV_T *dev, ZXIC_UINT32 hash_id, ZXIC_UINT32 hash_tbl0_depth, + ZXIC_UINT32 hash_tbl1_depth, ZXIC_UINT32 hash_tbl2_depth, + ZXIC_UINT32 hash_tbl3_depth, ZXIC_UINT32 hash_tbl4_depth, + ZXIC_UINT32 hash_tbl5_depth, ZXIC_UINT32 hash_tbl6_depth, + ZXIC_UINT32 hash_tbl7_depth); +DPP_STATUS dpp_se_hash_tbl_depth_get( + DPP_DEV_T *dev, ZXIC_UINT32 hash_id, ZXIC_UINT32 *hash_tbl0_depth, + ZXIC_UINT32 *hash_tbl1_depth, ZXIC_UINT32 *hash_tbl2_depth, + ZXIC_UINT32 *hash_tbl3_depth, ZXIC_UINT32 *hash_tbl4_depth, + ZXIC_UINT32 *hash_tbl5_depth, ZXIC_UINT32 *hash_tbl6_depth, + ZXIC_UINT32 *hash_tbl7_depth); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/se/dpp_stat_car.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/se/dpp_stat_car.h new file mode 100644 index 000000000000..0d75895e8353 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/se/dpp_stat_car.h @@ -0,0 +1,999 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_stat_car.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : ls +* 完成日期 : 2016/04/05 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ +#ifndef _DPP_STAT_CAR_H_ +#define _DPP_STAT_CAR_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "dpp_dev.h" +#include "dpp_stat_api.h" + +#define DPP_CAR_ID_MAX (1) + +#define DPP_CAR_DEBUG_SWITCH (0) + +#define DPP_CAR_A_FLOW_ID_MAX (0x7fff) /*car A支持动态配置暂取32K, 后续待修改*/ +#define DPP_CAR_B_FLOW_ID_MAX (0xfff) +#define DPP_CAR_C_FLOW_ID_MAX (0x3ff) +#define DPP_CAR_A_FLOW_ID_NUM (0x8000) /*car A支持动态配置暂取32K, 后续待修改*/ +#define DPP_CAR_B_FLOW_ID_NUM (0x1000) +#define DPP_CAR_C_FLOW_ID_NUM (0x400) + +#define DPP_CAR_PROFILE_ID_TOTAL ((0x200 + 0x80 + 0x20) * DPP_CAR_ID_MAX) + +#define DPP_CAR_PKT_PROFILE_ID_MAX (0x200) /* 512 */ +#define DPP_CAR_A_PROFILE_ID_MAX (0x1ff) +#define DPP_CAR_B_PROFILE_ID_MAX (0x7f) +#define DPP_CAR_C_PROFILE_ID_MAX (0x1f) + +#define DPP_CAR_B_PROFILE_ID_RANDOM_MAX (0x1f) +#define DPP_CAR_C_PROFILE_ID_RANDOM_MAX (0x7) +#define DPP_CAR_RANDOM_OFFSET_VAL (7) + +#define DPP_CAR_MAX_CBS_VALUE ((1 << 27) - 1) +#define DPP_CAR_MAX_EBS_VALUE ((1 << 27) - 1) +#define DPP_CAR_MAX_CIR_VALUE ((1 << 23) - 1) +#define DPP_CAR_MAX_EIR_VALUE ((1 << 23) - 1) +#define DPP_CAR_MAX_PKT_CIR_VALUE ((1 << 29) - 1) +#define DPP_CAR_MAX_PKT_CBS_VALUE ((1 << 13) - 1) +#define DPP_CAR_MAX_PRI_VALUE ((1 << 5) - 1) + +#define DPP_CAR_QUEUQ_CFG_TQ_LEN (64) +#define DPP_CAR_QUEUQ_CFG_TQ_HIGH_13BIT_POS (44) +#define DPP_CAR_QUEUQ_CFG_TQ_HIGH_13BIT_LEN (13) +#define DPP_CAR_QUEUQ_CFG_TQ_LOW_32BIT_POS (31) +#define DPP_CAR_QUEUQ_CFG_TQ_LOW_32BIT_LEN (32) + +#define DPP_CAR_PROFILE_CFG_ZXIC_UINT8 (28) +#define DPP_CAR_PROFILE_CFG_ZXIC_UINT32 (DPP_CAR_PROFILE_CFG_ZXIC_UINT8 >> 2) +#define DPP_CAR_PROFILE_CFG_WIDTH (DPP_CAR_PROFILE_CFG_ZXIC_UINT8 << 3) + +#define DPP_CAR_QUEUE_CFG_ZXIC_UINT8 (4) +#define DPP_CAR_QUEUE_CFG_WIDTH (DPP_CAR_QUEUE_CFG_ZXIC_UINT8 << 3) + +#define DPP_CAR_PROFILE_CFG_CAR_TYPE_POS (1) +#define DPP_CAR_PROFILE_CFG_CAR_TYPE_LEN (2) +#define DPP_CAR_PROFILE_CFG_PKT_SIGN_POS (2) +#define DPP_CAR_PROFILE_CFG_PKT_SIGN_LEN (1) +#define DPP_CAR_PROFILE_CFG_CD_POS (4) +#define DPP_CAR_PROFILE_CFG_CD_LEN (2) +#define DPP_CAR_PROFILE_CFG_CF_POS (5) +#define DPP_CAR_PROFILE_CFG_CF_LEN (1) +#define DPP_CAR_PROFILE_CFG_CM_POS (6) +#define DPP_CAR_PROFILE_CFG_CM_LEN (1) +#define DPP_CAR_PROFILE_CFG_EIR_POS (28) +#define DPP_CAR_PROFILE_CFG_EIR_LEN (22) +#define DPP_CAR_PROFILE_CFG_CIR_POS (50) +#define DPP_CAR_PROFILE_CFG_CIR_LEN (22) +#define DPP_CAR_PROFILE_CFG_EBS_POS (77) +#define DPP_CAR_PROFILE_CFG_EBS_LEN (27) +#define DPP_CAR_PROFILE_CFG_CBS_POS (104) +#define DPP_CAR_PROFILE_CFG_CBS_LEN (27) +#define DPP_CAR_PROFILE_CFG_C_PRI1_POS (139) +#define DPP_CAR_PROFILE_CFG_C_PRI1_LEN (5) +#define DPP_CAR_PROFILE_CFG_E_G_PRI1_POS (174) +#define DPP_CAR_PROFILE_CFG_E_G_PRI1_LEN (5) +#define DPP_CAR_PROFILE_CFG_E_Y_PRI0_POS (214) +#define DPP_CAR_PROFILE_CFG_E_Y_PRI0_LEN (5) + +#define DPP_CAR_PKT_PROFILE_CFG_CAR_TYPE_POS (1) +#define DPP_CAR_PKT_PROFILE_CFG_CAR_TYPE_LEN (2) +#define DPP_CAR_PKT_PROFILE_CFG_PKT_SIGN_POS (2) +#define DPP_CAR_PKT_PROFILE_CFG_PKT_SIGN_LEN (1) +#define DPP_CAR_PKT_PROFILE_CFG_CIR_POS (32) +#define DPP_CAR_PKT_PROFILE_CFG_CIR_LEN (30) +#define DPP_CAR_PKT_PROFILE_CFG_CBS_POS (46) +#define DPP_CAR_PKT_PROFILE_CFG_CBS_LEN (14) +#define DPP_CAR_PKT_PROFILE_CFG_C_PRI0_POS (86) +#define DPP_CAR_PKT_PROFILE_CFG_C_PRI0_LEN (5) + +#define DPP_CAR_QUEUE_CFG_CAR_TYPE_POS (1) +#define DPP_CAR_QUEUE_CFG_CAR_TYPE_LEN (2) +#define DPP_CAR_QUEUE_CFG_PKT_SIGN_POS (2) +#define DPP_CAR_QUEUE_CFG_PKT_SIGN_LEN (1) +#define DPP_CAR_QUEUE_CFG_QUEUE_ID_POS (16) +#define DPP_CAR_QUEUE_CFG_QUEUE_ID_LEN (14) + +/* car 算法模式 */ +typedef enum dpp_car_cd_mode_e { + CAR_CD_MODE_SRTCM = 0, + CAR_CD_MODE_TRTCM, + CAR_CD_MODE_MEF10_1, + CAR_CD_MODE_INVALID +} DPP_CAR_CD_MODE_E; + +/* car 读清模式*/ +typedef enum dpp_car_rd_mode_e { + CAR_READ_NOT_CLEAR = 0, + CAR_READ_AND_CLEAR = 1, +} DPP_CAR_RD_MODE_E; + +/* car 翻转模式 */ +typedef enum dpp_car_overflow_mode_e { + CAR_KEEP_COUNT = 0, + CAR_RE_COUNT = 1, +} DPP_CAR_OVERFLOW_MODE_E; + +/** QVOS翻转模式 */ +typedef enum dpp_car_qvos_mode_e { + CAR_QVOS_MODE_OVERFLOW_0 = 0, + CAR_QVOS_MODE_OVERFLOW_1 = 1, + CAR_QVOS_MODE_OVERFLOW_2 = 2, + CAR_QVOS_MODE_OVERFLOW_MAX +} DPP_CAR_QVOS_MODE_E; + +typedef enum dpp_car_en_mode_e { + DPP_CAR_EN_MODE_BOTH_EN = 0, + DPP_CAR_EN_MODE_A_EN = 1, + DPP_CAR_EN_MODE_A_B_EN = 2, + DPP_CAR_EN_MODE_INVALID +} DPP_CAR_EN_MODE_E; + +typedef enum dpp_car_cfg_operate_mode_e { + CAR_OPERATE_MODE_ADD = 0, + CAR_OPERATE_MODE_DEL = 1, + CAR_OPERATE_MODE_SRH = 2, + CAR_OPERATE_MODE_GET = 3, + CAR_OPERATE_MODE_MAX, +} DPP_CAR_CFG_OPERATE_MODE_E; + +typedef struct dpp_stat_car_dbg_cnt_t { + ZXIC_UINT32 pkt_input_total_cnt; + ZXIC_UINT32 pkt_input_green_cnt; + ZXIC_UINT32 pkt_input_yellow_cnt; + ZXIC_UINT32 pkt_input_red_cnt; + ZXIC_UINT32 pkt_output_total_cnt; + ZXIC_UINT32 pkt_output_green_cnt; + ZXIC_UINT32 pkt_output_yellow_cnt; + ZXIC_UINT32 pkt_output_red_cnt; + ZXIC_UINT32 pkt_fc_dbg_cnt; + ZXIC_UINT32 pkt_size_cnt; +} DPP_STAT_CAR_DBG_CNT_T; + +/** car 包监管模板参数设置的参数 */ +typedef struct dpp_stat_car_pkt_profile_cfg_t { + ZXIC_UINT32 profile_id; + ZXIC_UINT32 pkt_sign; + ZXIC_UINT32 cir; + ZXIC_UINT32 cbs; + ZXIC_UINT32 pri[DPP_CAR_PRI_MAX]; /**< @brief pri 0~7是有效值*/ +} DPP_STAT_CAR_PKT_PROFILE_CFG_T; + +/** car A包队列设置的参数 */ +typedef struct dpp_stat_car_a_pkt_queue_cfg_t { + ZXIC_UINT32 flow_id; + ZXIC_UINT32 drop_flag; + ZXIC_UINT32 plcr_en; + ZXIC_UINT32 profile_id; + ZXIC_UINT64 tq; + ZXIC_UINT64 dc; + ZXIC_UINT32 tc; +} DPP_STAT_CAR_A_PKT_QUEUE_CFG_T; + +/** car B 队列设置的参数 */ +typedef struct dpp_stat_car_b_queue_cfg_t { + ZXIC_UINT32 flow_id; + ZXIC_UINT32 drop_flag; + ZXIC_UINT32 plcr_en; + ZXIC_UINT32 profile_id; + ZXIC_UINT64 tq; + ZXIC_UINT32 tce_flag; + ZXIC_UINT32 tce; + ZXIC_UINT32 tc; + ZXIC_UINT32 te; +} DPP_STAT_CAR_B_QUEUE_CFG_T; + +/** car C 队列设置的参数 */ +typedef struct dpp_stat_car_c_queue_cfg_t { + ZXIC_UINT32 flow_id; + ZXIC_UINT32 drop_flag; + ZXIC_UINT32 plcr_en; + ZXIC_UINT32 profile_id; + ZXIC_UINT64 tq; + ZXIC_UINT32 tce_flag; + ZXIC_UINT32 tce; + ZXIC_UINT32 tc; + ZXIC_UINT32 te; +} DPP_STAT_CAR_C_QUEUE_CFG_T; + +/* profile配置键值 */ +typedef struct dpp_car_profile_rb_key_t { + /**carprofile: + * car_type[2]+pkt_sign[1]+cd[2]+cf[1]+cm[1]+eir[22]+cir[22]+ebs[27]+cbs[27]+cpri[35]+e_g_pri[35]+e_y_pri[40]+0[9]*/ + /**carpktprofile: + * car_type[2]+pkt_sign[1]+cir[30]+cbs[14]+c_pri[32]+0[115]*/ + ZXIC_UINT32 profile_cfg[DPP_CAR_PROFILE_CFG_ZXIC_UINT32]; + ZXIC_UINT32 + is_static; /* 是否静态,静态的profile必须由用户手动删除,待实现 */ + ZXIC_UINT32 use_count; +} DPP_CAR_PROFILE_RB_KEY_T; + +/* profile id与profile 配置节点关系键值 */ +typedef struct dpp_car_profile_id_rb_key_t { + ZXIC_UINT32 profile_id; + void *p_car_node; + ZXIC_UINT32 is_used; +} DPP_CAR_PROFILE_ID_RB_KEY_T; + +/* 队列和profile id绑定关系键值 */ +typedef struct dpp_car_queue_rb_key_t { /** carqueue + * car_type[2]+pkt_sign[1]+queue_id[14]+15[0]*/ + ZXIC_UINT8 profile_cfg[DPP_CAR_QUEUE_CFG_ZXIC_UINT8]; + ZXIC_UINT32 is_used; + ZXIC_UINT32 profile_id; +} DPP_CAR_QUEUE_RB_KEY_T; + +/* 红黑树管理 */ +typedef struct dpp_car_rb_mng_t { + ZXIC_UINT32 init_en; + ZXIC_UINT32 total_num; + ZXIC_UINT32 key_size; /** 单个红黑树节点空间的大小 */ + ZXIC_RB_CMPFUN p_cmpfun; + ZXIC_RB_CMPFUN p_id_cmpfun; + ZXIC_RB_CFG *p_plcr_rb; + ZXIC_RB_CFG *p_plcr_id_rb; /** 由id反查car的配置 的红黑树结构 */ + ZXIC_LISTSTACK_MANGER *p_liststack_mng; /** 索引分配 */ +} DPP_CAR_RB_MNG_T; + +/* profile管理 */ +typedef struct dpp_car_profile_mng_t { + ZXIC_UINT32 is_init; + DPP_CAR_RB_MNG_T *p_car_a_rb_profile_mng; + DPP_CAR_RB_MNG_T *p_car_b_rb_profile_mng; + DPP_CAR_RB_MNG_T *p_car_c_rb_profile_mng; +} DPP_CAR_PROFILE_MNG_T; + +/* 队列管理 */ +typedef struct dpp_car_queue_mng_t { + ZXIC_UINT32 is_init; + DPP_CAR_RB_MNG_T *p_car_rb_queue_mng; /** 不同的car*/ +} DPP_CAR_QUEUE_MNG_T; + +/* car配置信息 */ +typedef struct dpp_car_cfg_t { + ZXIC_UINT32 is_init[DPP_DEV_CHANNEL_MAX]; + ZXIC_UINT32 car0_mono_mode[DPP_DEV_CHANNEL_MAX]; + DPP_CAR_QUEUE_MNG_T *p_car_queue_mng[DPP_DEV_CHANNEL_MAX]; + DPP_CAR_PROFILE_MNG_T *p_car_profile_mng[DPP_DEV_CHANNEL_MAX]; +} DPP_CAR_CFG_T; + +/* car软复位需要存储的参数 */ +typedef struct dpp_car_soft_reset_item_t { + ZXIC_UINT32 flow_id; + ZXIC_UINT32 profile_id; +} DPP_CAR_SOFT_RESET_ITEM_T; + +typedef struct dpp_car_soft_reset_data_t { + ZXIC_UINT8 car_pkt_sign[DPP_CAR_PKT_PROFILE_ID_MAX]; /* */ + + ZXIC_UINT32 is_init; /* car是否初始化标志位 */ + ZXIC_UINT32 car0_pkt_num; /* */ + + ZXIC_UINT32 cara_flow_num; /* car0已配置过的流ID数目 */ + ZXIC_UINT32 carb_flow_num; /* car0已配置过的流ID数目 */ + ZXIC_UINT32 carc_flow_num; /* car0已配置过的流ID数目 */ + + DPP_CAR_SOFT_RESET_ITEM_T cara_item[DPP_CAR_A_FLOW_ID_NUM]; + DPP_CAR_SOFT_RESET_ITEM_T carb_item[DPP_CAR_B_FLOW_ID_NUM]; + DPP_CAR_SOFT_RESET_ITEM_T carc_item[DPP_CAR_C_FLOW_ID_NUM]; + +} DPP_CAR_SOFT_RESET_DATA_T; + +typedef struct dpp_car_random_ram_t { + ZXIC_UINT32 p1; /* 第一档丢弃概率(百分比),取值0-100,推荐为1(百分比) */ + ZXIC_UINT32 p2; /* 第二档丢弃概率(百分比),取值0-100,推荐为10(百分比) */ + ZXIC_UINT32 p3; /* 第三档丢弃概率(百分比),取值0-100,推荐为50(百分比) */ + ZXIC_UINT32 tc; /* 桶深,即CBS或EBS */ + ZXIC_UINT32 t1; /* 桶深的低水线,取值0-CBS/EBS,推荐是70%的桶深 */ + ZXIC_UINT32 t2; /* 桶深的中水线,取值0-CBS/EBS,推荐是85%的桶深 */ + ZXIC_UINT32 t3; /* 桶深的高水线,取值0-CBS/EBS,推荐是95%的桶深 */ +} DPP_CAR_RANDOM_RAM_T; + +typedef struct dpp_car_soft_reset_queue_t { + ZXIC_UINT32 car_type; + ZXIC_UINT32 flow_id; + ZXIC_UINT32 drop_flag; + ZXIC_UINT32 plcr_en; + ZXIC_UINT32 profile_id; + +} DPP_CAR_SOFT_RESET_QUEUE_T; + +/***********************************************************/ +/** car A包限速监管模板设定 +* @param dev_id 设备号 car号 +* @param profile_id 监管模板号 +* @param p_cara_profile_cfg 监管模板配置 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/05 +************************************************************/ +DPP_STATUS dpp_stat_cara_pkt_profile_cfg_set( + DPP_DEV_T *dev, ZXIC_UINT32 profile_id, + DPP_STAT_CAR_PKT_PROFILE_CFG_T *p_cara_profile_cfg); + +/***********************************************************/ +/** 获取car A 包长监管流配置 +* @param dev_id 设备号 car号 +* @param flow_id 流号 +* @param p_cara_queue_cfg car A流配置信息 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/05 +************************************************************/ +DPP_STATUS dpp_stat_cara_pkt_queue_cfg_get( + DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + DPP_STAT_CAR_A_PKT_QUEUE_CFG_T *p_cara_queue_cfg); + +/***********************************************************/ +/** 配置car B 概率丢弃配置参数 +* @param dev_id +* @param profile_id +* @param p_random_ram +* +* @return +* @remark 无 +* @see +* @author YXH @date 2019/04/01 +************************************************************/ +DPP_STATUS dpp_stat_carb_random_ram_set(DPP_DEV_T *dev, ZXIC_UINT32 profile_id, + DPP_CAR_RANDOM_RAM_T *p_random_ram_e, + DPP_CAR_RANDOM_RAM_T *p_random_ram_c); + +/***********************************************************/ +/** 获取car B 概率丢弃配置参数 +* @param dev_id +* @param profile_id +* @param p_random_ram +* +* @return +* @remark 无 +* @see +* @author YXH @date 2019/04/01 +************************************************************/ +DPP_STATUS dpp_stat_carb_random_ram_get(DPP_DEV_T *dev, ZXIC_UINT32 profile_id, + DPP_CAR_RANDOM_RAM_T *p_random_ram_e, + DPP_CAR_RANDOM_RAM_T *p_random_ram_c); + +/***********************************************************/ +/** 配置car C 概率丢弃配置参数 +* @param dev_id +* @param profile_id +* @param p_random_ram +* +* @return +* @remark 无 +* @see +* @author YXH @date 2019/04/01 +************************************************************/ +DPP_STATUS dpp_stat_carc_random_ram_set(DPP_DEV_T *dev, ZXIC_UINT32 profile_id, + DPP_CAR_RANDOM_RAM_T *p_random_ram_e, + DPP_CAR_RANDOM_RAM_T *p_random_ram_c); + +/***********************************************************/ +/** 获取car B 概率丢弃配置参数 +* @param dev_id +* @param profile_id +* @param p_random_ram +* +* @return +* @remark 无 +* @see +* @author YXH @date 2019/04/01 +************************************************************/ +DPP_STATUS dpp_stat_carc_random_ram_get(DPP_DEV_T *dev, ZXIC_UINT32 profile_id, + DPP_CAR_RANDOM_RAM_T *p_random_ram_e, + DPP_CAR_RANDOM_RAM_T *p_random_ram_c); + +/***********************************************************/ +/** 获取car的包长偏移 +* @param dev_id +* @param p_pkt_size_off +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author wll @date 2019/06/06 +************************************************************/ +DPP_STATUS dpp_stat_car_pkt_size_offset_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_pkt_size_off); + +/***********************************************************/ +/** 配置cara的最大包长 +* @param dev_id +* @param max_pkt_size +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author wll @date 2019/06/06 +************************************************************/ +DPP_STATUS dpp_stat_cara_max_pkt_size_set(DPP_DEV_T *dev, + ZXIC_UINT32 max_pkt_size); + +/***********************************************************/ +/** 获取cara的最大包长 +* @param dev_id +* @param p_max_pkt_size +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author wll @date 2019/06/06 +************************************************************/ +DPP_STATUS dpp_stat_cara_max_pkt_size_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_max_pkt_size); + +/***********************************************************/ +/** 配置carb的最大包长 +* @param dev_id +* @param max_pkt_size +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author wll @date 2019/06/06 +************************************************************/ +DPP_STATUS dpp_stat_carb_max_pkt_size_set(DPP_DEV_T *dev, + ZXIC_UINT32 max_pkt_size); + +/***********************************************************/ +/** 获取carb的最大包长 +* @param dev_id +* @param p_max_pkt_size +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author wll @date 2019/06/06 +************************************************************/ +DPP_STATUS dpp_stat_carb_max_pkt_size_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_max_pkt_size); + +/***********************************************************/ +/** 配置carc的最大包长 +* @param dev_id +* @param max_pkt_size +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author wll @date 2019/06/06 +************************************************************/ +DPP_STATUS dpp_stat_carc_max_pkt_size_set(DPP_DEV_T *dev, + ZXIC_UINT32 max_pkt_size); + +/***********************************************************/ +/** 获取carc的最大包长 +* @param dev_id +* @param p_max_pkt_size +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author wll @date 2019/06/06 +************************************************************/ +DPP_STATUS dpp_stat_carc_max_pkt_size_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_max_pkt_size); + +/***********************************************************/ +/** 获取最大包长 +* @param dev_id +* @param car_type +* @param p_max_pkt_len +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author wll @date 2019/06/06 +************************************************************/ +DPP_STATUS dpp_stat_car_max_pkt_size_get(DPP_DEV_T *dev, ZXIC_UINT32 car_type, + ZXIC_UINT32 *p_max_pkt_len); + +/***********************************************************/ +/** 配置car的包长偏移 +* @param dev_id +* @param pkt_size_off +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author wll @date 2019/06/06 +************************************************************/ +DPP_STATUS dpp_stat_car_pkt_size_offset_set(DPP_DEV_T *dev, + ZXIC_UINT32 pkt_size_off); + +/***********************************************************/ +/** 配置最大包长 +* @param dev_id +* @param car_type +* @param max_pkt_size +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author wll @date 2019/06/06 +************************************************************/ +DPP_STATUS dpp_stat_car_max_pkt_size_set(DPP_DEV_T *dev, ZXIC_UINT32 car_type, + ZXIC_UINT32 max_pkt_size); + +/***********************************************************/ +/** car 模块流配置 +* @param dev_id +* @param car_type +* @param flow_id +* @param drop_flag +* @param plcr_en +* @param profile_id +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/05/06 +************************************************************/ +DPP_STATUS dpp_stat_car_queue_cfg_set(DPP_DEV_T *dev, ZXIC_UINT32 car_type, + ZXIC_UINT32 flow_id, + ZXIC_UINT32 drop_flag, + ZXIC_UINT32 plcr_en, + ZXIC_UINT32 profile_id); + +/***********************************************************/ +/** car profile硬件写入 +* @param dev_id +* @param car_type +* @param pkt_sign +* @param profile_id +* @param p_car_profile_cfg +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/05/06 +************************************************************/ +DPP_STATUS dpp_stat_car_profile_cfg_set(DPP_DEV_T *dev, ZXIC_UINT32 car_type, + ZXIC_UINT32 pkt_sign, + ZXIC_UINT32 profile_id, + ZXIC_VOID *p_car_profile_cfg); + +/***********************************************************/ +/** car 队列映射关系配置 +* @param dev_id 设备号 0:上行CAR限速 1:下行CAR限速 +* @param car_type 0:A级car,1:B级car,2:C级car +* @param flow_id 流号 +* @param map_flow_id 映射后的流号 +* @param map_sp 映射后的优先级 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/09/27 +************************************************************/ +DPP_STATUS dpp_stat_car_queue_map_set(DPP_DEV_T *dev, ZXIC_UINT32 car_type, + ZXIC_UINT32 flow_id, + ZXIC_UINT32 map_flow_id, + ZXIC_UINT32 map_sp); + +/***********************************************************/ +/** +* @param dev_id +* @param car_type +* @param pkt_sign +* @param flow_id +* @param p_data +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/09/27 +************************************************************/ +DPP_STATUS dpp_stat_car_queue_get(DPP_DEV_T *dev, ZXIC_UINT32 car_type, + ZXIC_UINT32 pkt_sign, ZXIC_UINT32 flow_id, + ZXIC_VOID *p_data); +/***********************************************************/ +/** car 模块流配置获取 +* @param dev_id +* @param car_type +* @param flow_id +* @param p_drop_flag +* @param p_plcr_en +* @param p_profile_id +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/08/19 +************************************************************/ +DPP_STATUS dpp_stat_car_queue_cfg_get(DPP_DEV_T *dev, ZXIC_UINT32 car_type, + ZXIC_UINT32 flow_id, + ZXIC_UINT32 *p_drop_flag, + ZXIC_UINT32 *p_plcr_en, + ZXIC_UINT32 *p_profile_id); + +DPP_STATUS dpp_stat_car_profile_cfg_get(DPP_DEV_T *dev, ZXIC_UINT32 car_type, + ZXIC_UINT32 pkt_sign, + ZXIC_UINT32 profile_id, + ZXIC_VOID *p_car_profile_cfg); + +/***********************************************************/ +/** +* @param dev_id 设备ID car ID +* @param profile_id 模板ID +* @param p_random_ram_e E桶概率丢弃配置参数 +* @param p_random_ram_c C桶概率丢弃配置参数 +* +* @return +* @remark 无 +* @see +* @author YXH @date 2019/04/01 +************************************************************/ +DPP_STATUS dpp_stat_car_random_ram_set(DPP_DEV_T *dev, ZXIC_UINT32 car_type, + ZXIC_UINT32 profile_id, + DPP_CAR_RANDOM_RAM_T *p_random_ram_e, + DPP_CAR_RANDOM_RAM_T *p_random_ram_c); + +/***********************************************************/ +/** +* @param dev_id 设备ID car ID +* @param profile_id 模板ID +* @param p_random_ram_e E桶概率丢弃配置参数 +* @param p_random_ram_c C桶概率丢弃配置参数 +* +* @return +* @remark 无 +* @see +* @author YXH @date 2019/04/01 +************************************************************/ +DPP_STATUS dpp_stat_car_random_ram_get(DPP_DEV_T *dev, ZXIC_UINT32 car_type, + ZXIC_UINT32 profile_id, + DPP_CAR_RANDOM_RAM_T *p_random_ram_e, + DPP_CAR_RANDOM_RAM_T *p_random_ram_c); + +/***********************************************************/ +/** 获取 car 流号的绑定关系 +* @param dev_id +* @param car_type +* @param flow_id +* @param p_map_flow_id +* @param p_map_sp +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/09/27 +************************************************************/ +DPP_STATUS dpp_stat_car_queue_map_get(DPP_DEV_T *dev, ZXIC_UINT32 car_type, + ZXIC_UINT32 flow_id, + ZXIC_UINT32 *p_map_flow_id, + ZXIC_UINT32 *p_map_sp); + +/***********************************************************/ +/** car 模块调试计数 获取 +* @param dev_id +* @param car_type +* @param p_car_dbg_cnt +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/09/27 +************************************************************/ +DPP_STATUS dpp_stat_car_dbg_cnt_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 car_type, + DPP_STAT_CAR_DBG_CNT_T *p_car_dbg_cnt); +/***********************************************************/ +/** +* @param dev_id +* @param car_type +* @param overflow_mode +* @param rd_mode +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/09/27 +************************************************************/ +DPP_STATUS dpp_stat_car_dbg_cnt_mode_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 car_type, + ZXIC_UINT32 overflow_mode, + ZXIC_UINT32 rd_mode); +/***********************************************************/ +/** +* @param dev_id +* @param car_type +* @param p_overflow_mode +* @param p_rd_mode +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/09/27 +************************************************************/ +DPP_STATUS dpp_stat_car_dbg_cnt_mode_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 car_type, + ZXIC_UINT32 *p_overflow_mode, + ZXIC_UINT32 *p_rd_mode); + +/***********************************************************/ +/** car 模块初始化 +* @param dev_id +* @param p_car_cfg +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/05/05 +************************************************************/ +DPP_STATUS dpp_stat_car_init(ZXIC_UINT32 dev_id, DPP_CAR_CFG_T *p_car_cfg); + +/***********************************************************/ +/** STAT CAR复位获取全局变量大小函数 +* @param dev_id +* @param p_size +* +* @return +* @remark 无 +* @see +* @author yxh @date 2018/06/26 +************************************************************/ +DPP_STATUS dpp_stat_car_glb_size_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_size); + +/***********************************************************/ +/** STAT CAR复位设置全局变量函数 +* @param dev_id 设备号 +* @param size 大小,字节数 +* @param p_data_buff 全局变量数据 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author yxh @date 2018/06/26 +************************************************************/ +DPP_STATUS dpp_stat_car_glb_mgr_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 size, + ZXIC_UINT8 *p_data_buff); + +/***********************************************************/ +/** STAT CAR复位获取全局变量函数 +* @param dev_id 设备号 +* @param p_flag 释放使能,1-需要手动free,0-不需要手动free +* @param p_size 数据大小 +* @param pp_data_buff 全局变量数据 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author yxh @date 2018/06/26 +************************************************************/ +DPP_STATUS dpp_stat_car_glb_mgr_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_flag, + ZXIC_UINT32 *p_size, + ZXIC_UINT8 **pp_data_buff); + +DPP_STATUS dpp_stat_queue_rb_root_prt(ZXIC_RB_CFG *p_rb_cfg); +DPP_STATUS dpp_stat_car_profile_id_rb_root_prt(ZXIC_UINT32 dev_id, + ZXIC_RB_CFG *p_rb_cfg); + +/***********************************************************/ +/** 配置car A的qvos溢出模式 +* @param dev_id 设备号 car编号 +* @param flow_id 流号 +* @param qvos_mode 溢出模式,参见DPP_CAR_QVOS_MODE_E +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_cara_queue_qvos_set(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 qvos_mode); + +/***********************************************************/ +/** 获取car A的qvos溢出模式 +* @param dev_id 设备号 car编号 +* @param flow_id 流号 +* @param p_qvos_mode qvos溢出模式 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_cara_queue_qvos_get(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 *p_qvos_mode); + +/***********************************************************/ +/** car A指定队列模式配置,仅用于调试 +* @param dev_id 设备号 car编号 +* @param global_en 全局队列使能,0-不使能,1-使能 +* @param sp_en 优先级队列使能,0-不使能,1-使能 +* @param appoint_sp 指定的优先级 +* @param appoint_queue 指定的队列号 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_cara_queue_appoint_mode_set(DPP_DEV_T *dev, + ZXIC_UINT32 global_en, + ZXIC_UINT32 sp_en, + ZXIC_UINT32 appoint_sp, + ZXIC_UINT32 appoint_queue); + +/***********************************************************/ +/** 获取car A指定队列模式的配置 +* @param dev_id 设备号 car编号 +* @param p_global_en 全局队列使能,0-不使能,1-使能 +* @param p_sp_en 优先级队列使能,0-不使能,1-使能 +* @param p_appoint_sp 指定的优先级 +* @param p_appoint_queue 指定的队列号 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/07 +************************************************************/ +DPP_STATUS dpp_stat_cara_queue_appoint_mode_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_global_en, + ZXIC_UINT32 *p_sp_en, + ZXIC_UINT32 *p_appoint_sp, + ZXIC_UINT32 *p_appoint_queue); + +/***********************************************************/ +/** 配置car B的qvos溢出模式 +* @param dev_id 设备号 car编号 +* @param flow_id 流号 +* @param qvos_mode 溢出模式,参见DPP_CAR_QVOS_MODE_E +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_carb_queue_qvos_set(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 qvos_mode); + +/***********************************************************/ +/** 获取car B的qvos溢出模式 +* @param dev_id 设备号 car编号 +* @param flow_id 流号 +* @param p_qvos_mode qvos溢出模式 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_carb_queue_qvos_get(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 *p_qvos_mode); + +/***********************************************************/ +/** car B指定队列模式配置,仅用于调试 +* @param dev_id 设备号 car编号 +* @param global_en 全局队列使能,0-不使能,1-使能 +* @param sp_en 优先级队列使能,0-不使能,1-使能 +* @param appoint_sp 指定的优先级 +* @param appoint_queue 指定的队列号 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_carb_queue_appoint_mode_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 global_en, + ZXIC_UINT32 sp_en, + ZXIC_UINT32 appoint_sp, + ZXIC_UINT32 appoint_queue); +/***********************************************************/ +/** 获取car B指定队列模式的配置 +* @param dev_id 设备号 car编号 +* @param p_global_en 全局队列使能,0-不使能,1-使能 +* @param p_sp_en 优先级队列使能,0-不使能,1-使能 +* @param p_appoint_sp 指定的优先级 +* @param p_appoint_queue 指定的队列号 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/07 +************************************************************/ +DPP_STATUS dpp_stat_carb_queue_appoint_mode_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_global_en, + ZXIC_UINT32 *p_sp_en, + ZXIC_UINT32 *p_appoint_sp, + ZXIC_UINT32 *p_appoint_queue); + +/***********************************************************/ +/** 配置car C的qvos溢出模式 +* @param dev_id 设备号 car编号 +* @param flow_id 流号 +* @param qvos_mode 溢出模式,参见DPP_CAR_QVOS_MODE_E +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_carc_queue_qvos_set(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 qvos_mode); + +/***********************************************************/ +/** 获取car C的qvos溢出模式 +* @param dev_id 设备号 car编号 +* @param flow_id 流号 +* @param p_qvos_mode qvos溢出模式 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_carc_queue_qvos_get(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 *p_qvos_mode); + +/***********************************************************/ +/** car C指定队列模式配置,仅用于调试 +* @param dev_id 设备号 car编号 +* @param global_en 全局队列使能,0-不使能,1-使能 +* @param sp_en 优先级队列使能,0-不使能,1-使能 +* @param appoint_sp 指定的优先级 +* @param appoint_queue 指定的队列号 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_carc_queue_appoint_mode_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 global_en, + ZXIC_UINT32 sp_en, + ZXIC_UINT32 appoint_sp, + ZXIC_UINT32 appoint_queue); + +/***********************************************************/ +/** 获取car C指定队列模式的配置 +* @param dev_id 设备号 car编号 +* @param p_global_en 全局队列使能,0-不使能,1-使能 +* @param p_sp_en 优先级队列使能,0-不使能,1-使能 +* @param p_appoint_sp 指定的优先级 +* @param p_appoint_queue 指定的队列号 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/07 +************************************************************/ +DPP_STATUS dpp_stat_carc_queue_appoint_mode_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_global_en, + ZXIC_UINT32 *p_sp_en, + ZXIC_UINT32 *p_appoint_sp, + ZXIC_UINT32 *p_appoint_queue); + +/***********************************************************/ +/** +* @param dev_id +* @param p_mode +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/09/28 +************************************************************/ +DPP_STATUS dpp_stat_car_en_mode_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_mode); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/se/dpp_stat_cfg.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/se/dpp_stat_cfg.h new file mode 100644 index 000000000000..a478cd7c889f --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/se/dpp_stat_cfg.h @@ -0,0 +1,187 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_stat_cfg.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : ls +* 完成日期 : 2016/03/29 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ +#ifndef _DPP_STAT_CFG_H_ +#define _DPP_STAT_CFG_H_ + +#ifdef __cplusplus +extern "C" { +#endif +#include "dpp_stat_api.h" + +#if ZXIC_REAL("Variable definition") + +#define DPP_STAT_TM_PORT_MAX (4) +#define DPP_STAT_ETM_ADDR_MAX (9 * 1024) +#define DPP_STAT_FTM_ADDR_MAX (2048) +#define DPP_STAT_IND_WR_MODE (0) +#define DPP_STAT_IND_RD_MODE (1) +#define DPP_STAT_TM_MOV_PERIOD_MAX (0xff) + +#define DPP_STAT_WIDTH_3_MAX_VALUE ((1 << 3) - 1) +#define DPP_STAT_WIDTH_4_MAX_VALUE ((1 << 4) - 1) +#define DPP_STAT_PPU_ERAM_DEPTH_MAX (0x7ffff) +#define DPP_STAT_PPU_ERAM_BADDR_MAX (0x7ffff) +#define DPP_STAT_PPU_DDR_BADDR_MAX (0x4ffffff) + +#define DPP_STAT_OAM_ERAM_BADDR_MAX (0x7ffff) +#define DPP_STAT_OAM_DDR_BADDR_MAX (0x4ffffff) + +#define DPP_STAT_PLCR_ERAM_BADDR_MAX (0x7ffff) +#define DPP_STAT_PLCR_ID (0) + +#define DPP_STAT_TM_FLAG_FTM_PKT_EN (0) +#define DPP_STAT_TM_FLAG_ETM_PKT_EN (1) +#define DPP_STAT_TM_FLAG_ERAM_EN (2) + +#define DPP_STAT_PPU_STAT_CHANNEL_NUM (16) +#define DPP_STAT_PPU_MEX_NUM (6) +#define CMMU_DDR_DIR_CPY_NUM (15) + +/** stat TM类型 */ +typedef enum dpp_stat_tm_type_e { + DPP_STAT_TM_TYPE_ETM = 0, /*<@brief FTM模式 */ + DPP_STAT_TM_TYPE_FTM = 1, /*<@brief ETM模式 */ + DPP_STAT_TM_TYPE_MAX +} DPP_STAT_TM_TYPE_E; + +typedef enum dpp_stat_tm_store_mode_e { + DPP_STAT_TM_STORE_MODE_ERAM = 0, /* 片内存储模式 */ + DPP_STAT_TM_STORE_MODE_MIX = 1, /* 混合存储模式 */ + DPP_STAT_TM_STORE_MODE_MAX, +} DPP_STAT_TM_STORE_MODE_E; + +typedef enum dpp_stat_etm_depth_mode_e { + DPP_STAT_ETM_DEPTH_ERAM_1K = 0, + DPP_STAT_ETM_DEPTH_ERAM_4K = 1, + DPP_STAT_ETM_DEPTH_ERAM_5K = 2, + DPP_STAT_ETM_DEPTH_ERAM_8K = 3, + DPP_STAT_ETM_DEPTH_ERAM_9K = 4, + DPP_STAT_ETM_DEPTH_MIX_2K = 5, + DPP_STAT_ETM_DEPTH_MIX_8K = 6, + DPP_STAT_ETM_DEPTH_MIX_9K = 7, + DPP_STAT_ETM_DEPTH_MAX, +} DPP_STAT_ETM_DEPTH_MODE_E; + +typedef enum stat_store_mode_e { + STAT_STORE_MODE_IN_ERAM = 0, + STAT_STORE_MODE_IN_DDR = 1, + STAT_STORE_MODE_MAX, +} STAT_STORE_MODE_E; + +typedef enum stat_oam_type_e { + STAT_OAM_TYPE_ERAM = 0, + STAT_OAM_TYPE_LM_ERAM = 1, + STAT_OAM_TYPE_DDR = 2, + STAT_OAM_TYPE_MAX, +} STAT_OAM_TYPE_E; + +typedef struct dpp_stat_dbg_cnt_t { + ZXIC_UINT32 stat_to_smmu0_rsp_fc_cnt[DPP_STAT_PPU_STAT_CHANNEL_NUM]; + ZXIC_UINT32 stat_rcv_smmu0_req_fc_cnt[DPP_STAT_PPU_STAT_CHANNEL_NUM]; + ZXIC_UINT32 stat_to_ppu_req_fc_cnt[DPP_STAT_PPU_MEX_NUM]; + ZXIC_UINT32 stat_rcv_ppu_rsp_fc_cnt[DPP_STAT_PPU_MEX_NUM]; + ZXIC_UINT32 stat_rcv_se_etm_wr_fc_cnt; + ZXIC_UINT32 stat_rcv_se_etm_rd_fc_cnt; + ZXIC_UINT32 stat_rcv_se_ftm_wr_fc_cnt; + ZXIC_UINT32 stat_rcv_se_ftm_rd_fc_cnt; + ZXIC_UINT32 stat_to_etm_deq_fc_cnt; + ZXIC_UINT32 stat_to_etm_enq_fc_cnt; + ZXIC_UINT32 stat_to_ftm_deq_fc_cnt; + ZXIC_UINT32 stat_to_ftm_enq_fc_cnt; + ZXIC_UINT32 stat_to_oam_lm_fc_cnt; + ZXIC_UINT32 stat_rcv_oam_lm_fc_cnt; + ZXIC_UINT32 stat_to_oam_fc_cnt; + ZXIC_UINT32 stat_rcv_cmmu_fc_cnt; + ZXIC_UINT32 stat_to_cmmu_req_cnt; + ZXIC_UINT32 stat_rcv_smmu0_rsp_cnt[DPP_STAT_PPU_STAT_CHANNEL_NUM]; + ZXIC_UINT32 stat_to_smmu0_req_cnt[DPP_STAT_PPU_STAT_CHANNEL_NUM]; + ZXIC_UINT32 stat_plcr_rcv_smmu0_rsp1_cnt; + ZXIC_UINT32 stat_plcr_rcv_smmu0_rsp0_cnt; + ZXIC_UINT32 stat_plcr_to_smmu0_req1_cnt; + ZXIC_UINT32 stat_plcr_to_smmu0_req0_cnt; + ZXIC_UINT32 stat_to_ppu_mex_rsp_cnt[DPP_STAT_PPU_MEX_NUM]; + ZXIC_UINT32 stat_oam_lm_rsp_cnt; + ZXIC_UINT32 stat_rcv_oam_lm_req_cnt; + ZXIC_UINT32 stat_rcv_oam_req_cnt; + ZXIC_UINT32 stat_rcv_ppu_mex_key_cnt[DPP_STAT_PPU_MEX_NUM]; + ZXIC_UINT32 stat_rcv_se_etm_rsp_cnt; + ZXIC_UINT32 stat_rcv_etm_se_wr_req_cnt; + ZXIC_UINT32 stat_rcv_etm_se_rd_req_cnt; + ZXIC_UINT32 stat_rcv_se_ftm_rsp_cnt; + ZXIC_UINT32 stat_to_ftm_se_wr_req_cnt; + ZXIC_UINT32 stat_to_ftm_se_rd_req_cnt; + ZXIC_UINT32 stat_rcv_ftm_smmu0_req_cnt0; + ZXIC_UINT32 stat_rcv_ftm_smmu0_req_cnt1; + ZXIC_UINT32 stat_rcv_etm_smmu0_req_cnt0; + ZXIC_UINT32 stat_rcv_etm_smmu0_req_cnt1; + ZXIC_UINT32 ppu_no_exist_opcd_ex_cnt[DPP_STAT_PPU_MEX_NUM]; + ZXIC_UINT32 stat_rcv_tm_eram_cpu_rsp_cnt; + ZXIC_UINT32 cpu_rd_eram_req_cnt; + ZXIC_UINT32 cpu_wr_eram_req_cnt; + ZXIC_UINT32 tm_stat_ddr_cpu_rsp_cnt; + ZXIC_UINT32 cpu_rd_ddr_req_cnt; + ZXIC_UINT32 cpu_wr_ddr_req_cnt; +} DPP_STAT_DBG_CNT_T; + +#endif + +/***********************************************************/ +/** 获取ppu统计片内深度 +* @param dev_id 设备号 +* @param p_ppu_eram_depth ppu统计片内深度 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/03/31 +************************************************************/ +DPP_STATUS dpp_stat_ppu_eram_depth_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_ppu_eram_depth); + +/***********************************************************/ +/** 获取ppu统计 ERAM基地址 +* @param dev_id 设备号 +* @param p_ppu_eram_baddr ppu统计片内基地址 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/03/31 +************************************************************/ +DPP_STATUS dpp_stat_ppu_eram_baddr_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_ppu_eram_baddr); + +/***********************************************************/ +/** 获取ppu统计 ddr基地址 +* @param dev_id 设备号 +* @param p_ppu_ddr_baddr ppu统计片外基地址 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/03/31 +************************************************************/ +DPP_STATUS dpp_stat_ppu_ddr_baddr_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_ppu_ddr_baddr); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/sdt/dpp_sdt.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/sdt/dpp_sdt.h new file mode 100644 index 000000000000..3801b2d25cf8 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/sdt/dpp_sdt.h @@ -0,0 +1,167 @@ +/********************************************************************* +* 版权所有 (C)2013, 深圳市中兴通讯股份有限公司。 +* +* 文件名称: +* 文件标识: +* 内容摘要: +* 其它说明: +* +* +* 当前版本: +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT : 100% +* 作 者: 王春雷10111187 +* 完成日期: 2013-9-2 +********************************************************************/ + +#ifndef _DPP_SDT_H_ +#define _DPP_SDT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "dpp_ppu.h" + +/** SDT属性表高32bit中各比特字段的位宽定义*/ +#define DPP_SDT_H_TBL_TYPE_BT_POS (29) +#define DPP_SDT_H_TBL_TYPE_BT_LEN (3) + +#define DPP_SDT_H_ERAM_MODE_BT_POS (26) +#define DPP_SDT_H_ERAM_MODE_BT_LEN (3) +#define DPP_SDT_H_ERAM_BASE_ADDR_BT_POS (7) +#define DPP_SDT_H_ERAM_BASE_ADDR_BT_LEN (19) +#define DPP_SDT_L_ERAM_TABLE_DEPTH_BT_POS (1) +#define DPP_SDT_L_ERAM_TABLE_DEPTH_BT_LEN (22) + +#define DPP_SDT_H_DDR3_BASE_ADDR_BT_POS (9) +#define DPP_SDT_H_DDR3_BASE_ADDR_BT_LEN (20) +#define DPP_SDT_H_DDR3_SHARE_TYPE_BT_POS (7) +#define DPP_SDT_H_DDR3_SHARE_TYPE_BT_LEN (2) +#define DPP_SDT_H_DDR3_RW_LEN_BT_POS (5) +#define DPP_SDT_H_DDR3_RW_LEN_BT_LEN (2) +#define DPP_SDT_H_DDR3_SDT_NUM_BT_POS (0) +#define DPP_SDT_H_DDR3_SDT_NUM_BT_LEN (5) +#define DPP_SDT_L_DDR3_SDT_NUM_BT_POS (29) +#define DPP_SDT_L_DDR3_SDT_NUM_BT_LEN (3) +#define DPP_SDT_L_DDR3_ECC_EN_BT_POS (28) +#define DPP_SDT_L_DDR3_ECC_EN_BT_LEN (1) + +#define DPP_SDT_H_HASH_ID_BT_POS (27) +#define DPP_SDT_H_HASH_ID_BT_LEN (2) +#define DPP_SDT_H_HASH_TABLE_WIDTH_BT_POS (25) +#define DPP_SDT_H_HASH_TABLE_WIDTH_BT_LEN (2) +#define DPP_SDT_H_HASH_KEY_SIZE_BT_POS (19) +#define DPP_SDT_H_HASH_KEY_SIZE_BT_LEN (6) +#define DPP_SDT_H_HASH_TABLE_ID_BT_POS (14) +#define DPP_SDT_H_HASH_TABLE_ID_BT_LEN (5) +#define DPP_SDT_H_LEARN_EN_BT_POS (13) +#define DPP_SDT_H_LEARN_EN_BT_LEN (1) +#define DPP_SDT_H_KEEP_ALIVE_BT_POS (12) +#define DPP_SDT_H_KEEP_ALIVE_BT_LEN (1) +#define DPP_SDT_H_KEEP_ALIVE_BADDR_BT_POS (0) +#define DPP_SDT_H_KEEP_ALIVE_BADDR_BT_LEN (12) +#define DPP_SDT_L_KEEP_ALIVE_BADDR_BT_POS (25) +#define DPP_SDT_L_KEEP_ALIVE_BADDR_BT_LEN (7) +#define DPP_SDT_L_RSP_MODE_BT_POS (23) +#define DPP_SDT_L_RSP_MODE_BT_LEN (2) + +#define DPP_SDT_H_LPM_V46ID_BT_POS (28) +#define DPP_SDT_H_LPM_V46ID_BT_LEN (1) +#define DPP_SDT_H_LPM_RSP_MODE_BT_POS (0) +#define DPP_SDT_H_LPM_RSP_MODE_BT_LEN (2) +#define DPP_SDT_L_LPM_TABLE_DEPTH_BT_POS (1) +#define DPP_SDT_L_LPM_TABLE_DEPTH_BT_LEN (30) + +#define DPP_SDT_H_ETCAM_ID_BT_POS (27) +#define DPP_SDT_H_ETCAM_ID_BT_LEN (1) +#define DPP_SDT_H_ETCAM_KEY_MODE_BT_POS (25) +#define DPP_SDT_H_ETCAM_KEY_MODE_BT_LEN (2) +#define DPP_SDT_H_ETCAM_TABLE_ID_BT_POS (21) +#define DPP_SDT_H_ETCAM_TABLE_ID_BT_LEN (4) +#define DPP_SDT_H_ETCAM_NOAS_RSP_MODE_BT_POS (19) +#define DPP_SDT_H_ETCAM_NOAS_RSP_MODE_BT_LEN (2) +#define DPP_SDT_H_ETCAM_AS_EN_BT_POS (18) +#define DPP_SDT_H_ETCAM_AS_EN_BT_LEN (1) +#define DPP_SDT_H_ETCAM_AS_ERAM_BADDR_BT_POS (0) +#define DPP_SDT_H_ETCAM_AS_ERAM_BADDR_BT_LEN (18) +#define DPP_SDT_L_ETCAM_AS_ERAM_BADDR_BT_POS (31) +#define DPP_SDT_L_ETCAM_AS_ERAM_BADDR_BT_LEN (1) +#define DPP_SDT_L_ETCAM_AS_RSP_MODE_BT_POS (28) +#define DPP_SDT_L_ETCAM_AS_RSP_MODE_BT_LEN (3) +#define DPP_SDT_L_ETCAM_TABLE_DEPTH_BT_POS (1) +#define DPP_SDT_L_ETCAM_TABLE_DEPTH_BT_LEN (20) + +#define DPP_SDT_L_CLUTCH_EN_BT_POS (0) +#define DPP_SDT_L_CLUTCH_EN_BT_LEN (1) + +#define DPP_SDT_TBL_TYPE_NUM (8) + +/***********************************************************/ +/** 解析从硬件读取的64bit SDT属性 +* @param sdt_hig32 硬件表中存储的SDT属性高32bit +* @param sdt_low32 硬件表中存储的SDT属性低32bit +* @param p_sdt_info 解析之后的SDT属性,根据SDT属性中的table_type确定此ZXIC_VOID型指针对应的数据结构, 包括: \n +* DPP_SDTTBL_ERAM_T、DPP_SDTTBL_DDR3_T、DPP_SDTTBL_HASH_T、DPP_SDTTBL_LPM_T、\n +* DPP_SDTTBL_ETCAM_T、DPP_SDTTBL_PORTTBL_T。 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 王春雷 @date 2015/07/13 +************************************************************/ +DPP_STATUS dpp_sdt_tbl_data_parser(DPP_DEV_T *dev, ZXIC_UINT32 sdt_hig32, + ZXIC_UINT32 sdt_low32, + ZXIC_VOID *p_sdt_info); + +/***********************************************************/ +/** 从软件缓存中获取table data信息 +* @param dev_id 设备号 +* @param sdt_no 业务表对应的sdt号,0-255 +* @param p_sdt_data sdt表信息 +* +* @return +* @remark 无 +* @see +* @author lim @date 2020/04/16 +************************************************************/ +DPP_STATUS dpp_sdt_tbl_data_get(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no, + DPP_SDT_TBL_DATA_T *p_sdt_data); + +/***********************************************************/ +/** 将sdt信息保存在软件缓存中 +* @param dev_id 设备号 +* @param sdt_no 业务表对应的sdt号 +* @param table_type SDT属性中的表类型,取值参考DPP_SDT_TABLE_TYPE_E的定义 +* @param p_sdt_info 写入的SDT表信息。 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author lim @date 2020/04/16 +************************************************************/ +DPP_STATUS dpp_soft_sdt_tbl_set(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 table_type, + DPP_SDT_TBL_DATA_T *p_sdt_info); + +/***********************************************************/ +/** 从软件缓存中获取sdt信息 +* @param device_id 设备号 +* @param sdt_no 业务表对应的sdt号 +* @param p_sdt_info 写入的SDT属性。由table_type确定此ZXIC_VOID型指针对应的数据结构, 包括: \n +* DPP_SDTTBL_ERAM_T、DPP_SDTTBL_DDR_T、DPP_SDTTBL_HASH_T、DPP_SDTTBL_LPM_T、 +* DPP_SDTTBL_ETCAM_T。 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author lim @date 2020/04/16 +************************************************************/ +DPP_STATUS dpp_soft_sdt_tbl_get(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no, + ZXIC_VOID *p_sdt_info); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/sdt/dpp_sdt_def.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/sdt/dpp_sdt_def.h new file mode 100644 index 000000000000..ee1c098a7d2f --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/sdt/dpp_sdt_def.h @@ -0,0 +1,129 @@ +/********************************************************************* +* 版权所有 (C)2001, 深圳市中兴通讯股份有限公司。 +* +* 文件名称: +* 文件标识: +* 内容摘要: +* 其它说明: +* +* +* 当前版本: +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT : 100% +* 作 者: 石金锋 +* 完成日期:2012-09-21 +********************************************************************/ +#ifndef _DPP_SDT_DEF_H_ +#define _DPP_SDT_DEF_H_ + +typedef enum dpp_tbl_type_e { + TblType_Invalid = 0, + TblType_Eram128 = 1, + TblType_DDR3 = 2, + TblType_HASH = 3, + TblType_LPM = 4, + TblType_eTcam = 5, + TblType_PORTTBL = 6, + TblType_MAX = 7 +} DPP_TBL_TYPE_E; + +typedef enum dpp_eram128_mode_e { + Eram128Mode_1BITS = 0, + Eram128Mode_32BITS = 1, + Eram128Mode_64BITS = 2, + Eram128Mode_128BITS = 3, + Eram128Mode_2BITS = 4, /** Add By LiuShuo @ 2016年2月24日15:56:39*/ + Eram128Mode_4BITS = 5, + Eram128Mode_8BITS = 6, + Eram128Mode_16BITS = 7, + Eram128Mode_MAX = 8 +} DPP_ERAM128_MODE_E; + +typedef enum dpp_dde3_mode_e { + DDR3Mode_128BITS = 0, + DDR3Mode_256BITS = 1, + DDR3Mode_512BITS = 2, + DDR3Mode_MAX = 3 +} DPP_DDR3_MODE_E; +/** DDR3 共享模式 */ +typedef enum dpp_ddr3_share_mode_e { + DDR_SHARE_MODE_NONE = 0, /** <@brief 不共享模式 */ + DDR_SHARE_MODE_1_2 = 1, /** <@brief 1/2共享模式 */ + DDR_SHARE_MODE_1_4 = 2, /** <@brief 1/4共享模式 */ + DDR_SHARE_MODE_1_8 = 3, /** <@brief 1/8共享模式 */ + DDR_SHARE_MODE_MAX = 4, /** <@brief 不可用 */ +} DPP_DDR3_SHARE_MODE_E; + +typedef enum dpp_ddr3_copy_type_e { + DDR3_COPY_TYPE_INN_0 = 0, + DDR3_COPY_TYPE_INN_2 = 1, + DDR3_COPY_TYPE_INN_4 = 2, + DDR3_COPY_TYPE_INN_8 = 3, + DDR3_COPY_TYPE_OUT_0 = 4, + DDR3_COPY_TYPE_OUT_1 = 5, + DDR3_COPY_TYPE_OUT_2 = 7, +} DPP_DDR3_COPY_TYPE_E; + +/** 仿真器表项操作特殊定制,仅用于操作仿真器设备*/ +#define HASH_SIM_ADD_ADDR (0xFFFFFFF4) +#define HASH_SIM_DEL_ADDR (0xFFFFFFF8) +#define LPM_SIM_ADD_ADDR (0xFFFFFFE4) +#define LPM_SIM_DEL_ADDR (0xFFFFFFE8) +#define ETCAM_SIM_ADD_ADDR (0xFFFFFFD4) +#define ETCAM_SIM_DEL_ADDR (0xFFFFFFD8) + +typedef struct dpp_sdt_smmu0_t { + ZXIC_UINT32 *p_data; /*输入,输出数据地址 */ + ZXIC_UINT32 wr_rd_flag; /*0: Write 1: Read */ + ZXIC_UINT32 tbl_index; /*表项索引 */ + ZXIC_UINT32 tbl_base_addr; /*表项基地址 */ + ZXIC_UINT32 mode; /*参见DPP_ERAM128_MODE_E */ + ZXIC_UINT32 tbl_depth; /*表项的深度,越界检测 */ +} DPP_SDT_SMMU0_T; + +typedef struct dpp_sdt_smmu1_t { + ZXIC_UINT32 *p_data; /*输入,输出数据地址 */ + ZXIC_UINT32 crc_chk_en; /*Crc效验使能 */ + ZXIC_UINT32 wr_rd_flag; /*0: Write 1: Read */ + ZXIC_UINT32 ddr_share_type; /*参见DPP_DDR3_SHARE_MODE_E */ + ZXIC_UINT32 tbl_index; /*表项索引 */ + ZXIC_UINT32 tbl_base_addr; /*DDR3片内基地址 */ + ZXIC_UINT32 ddr_mode; /*参见DPP_DDR3_MODE_E */ + ZXIC_UINT32 sdt_no; /*SDT表号 */ +} DPP_SDT_SMMU1_T; + +typedef struct dpp_sdt_hash_t { + ZXIC_UINT32 id; /* hash引擎号 */ + ZXIC_UINT32 tbl_id; /* hash业务号 */ + ZXIC_UINT32 *p_data; + ZXIC_UINT32 addr; + ZXIC_UINT32 length; /* 数据长度,以4字节为单位 */ + ZXIC_UINT32 key_size; /* 键值长度,以字节为单位 */ + ZXIC_UINT32 key_type; /* 表项存储位宽 */ + ZXIC_UINT32 rsp_mode; + ZXIC_UINT32 wr_rd_flag; /* 0: Write 1: Read */ +} DPP_SDT_HASH_T; + +typedef struct dpp_sdt_lpm_t { + ZXIC_UINT32 *p_data; + ZXIC_UINT32 addr; + ZXIC_UINT32 length; /* 数据长度,以4字节为单位 */ + ZXIC_UINT32 wr_rd_flag; /* 0: Write 1: Read */ + ZXIC_UINT32 v46_flag; /* 1:ipv4, 0:ipv6 */ +} DPP_SDT_LPM_T; + +typedef struct dpp_sdt_etcam_t { + ZXIC_UINT32 id; /* etcam号 */ + ZXIC_UINT32 tbl_id; /* 业务号 */ + ZXIC_UINT32 *p_data; + ZXIC_UINT32 length; /* 数据长度,以4字节为单位 */ + ZXIC_UINT32 addr; + ZXIC_UINT32 wr_rd_flag; /* 0: Write 1: Read */ + ZXIC_UINT32 key_mode; + ZXIC_UINT32 rsp_mode; + ZXIC_UINT32 as_en; + ZXIC_UINT32 as_eram_baddr; + ZXIC_UINT32 as_rsp_mode; +} DPP_SDT_ETCAM_T; + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/sdt/dpp_sdt_mgr.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/sdt/dpp_sdt_mgr.h new file mode 100644 index 000000000000..833192959174 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/sdt/dpp_sdt_mgr.h @@ -0,0 +1,142 @@ +/********************************************************************* +* 版权所有 (C)2013, 深圳市中兴通讯股份有限公司。 +* +* 文件名称: +* 文件标识: +* 内容摘要: +* 其它说明: +* +* +* 当前版本: +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT : 100% +* 作 者: 王春雷10111187 +* 完成日期: 2013-9-2 +********************************************************************/ + +#ifndef _DPP_SDT_MGR_H_ +#define _DPP_SDT_MGR_H_ + +#include "dpp_sdt_def.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define DPP_SDT_CFG_LEN (2) + +#define DPP_SDT_VALID (1) +#define DPP_SDT_INVALID (0) + +#define DPP_SDT_WRITE (0) +#define DPP_SDT_READ (1) + +#define DPP_SDT_MAX_BUFF (18) + +#define DPP_TBL_DATA_MAX (50) + +typedef enum dpp_opr_type_e { + DPP_TABLE_UPDATE = 0, + DPP_TABLE_DELETE = 1, + DPP_TABLE_SEARCH = 2, +} DPP_OPR_TYEP_E; + +/** eRam SDT变量获取中间结构 */ +typedef struct dpp_eram128_params_t { + ZXIC_UINT32 tbl_base_addr; /** 表项基地址,128bit为单位 */ + DPP_ERAM128_MODE_E eram128_mode; /** 表项位宽模式 */ + ZXIC_UINT32 tbl_depth; /** 表项深度值。越界检测 */ + ZXIC_UINT32 count; /** 表示写入数据的长度,以32bit为单位*/ +} DPP_ERAM128_PARAMS_T; + +/** DDR3 SDT变量获取中间结构 */ +typedef struct dpp_ddr3_params_t { + ZXIC_UINT32 base_addr; /** 表项基地址,以4K*128bit 为单位 */ + ZXIC_UINT32 crc_check; /** ECC校验使能位 */ + DPP_DDR3_MODE_E ddr3_mode; /** DDR位宽模式 */ + ZXIC_UINT32 tbl_share_mode; /** bank 共享模式 */ + ZXIC_UINT32 wr_rd_count; /** 表示写入数据的长度,以32bit为单位 */ +} DPP_DDR3_PARAMS_T; + +typedef enum dpp_hash_key_width_e { + HashKey_Invalid = 0, + HashKey_128b, + HashKey_256b, + HashKey_512b, + HashKey_MAX +} DPP_HASH_KEY_WIDTH_E; + +/** HASH SDT变量获取中间结构 */ +typedef struct dpp_hash_params_t { + ZXIC_UINT8 hash_id; /** hash引擎号 */ + ZXIC_UINT8 key_tbl_width; /** hash表项存储位宽, */ + ZXIC_UINT8 key_size; /** hash键值长度,以8bit为单位,1~48 */ + ZXIC_UINT8 table_id; /** hash逻辑表号 */ + ZXIC_UINT8 rsp_mode; /** 返回数据宽度 0:32 1:64 2:128 3:256*/ +} DPP_HASH_PARAMS_T; + +typedef struct dpp_lpm_params_t { + ZXIC_UINT8 v46_flag; /* 1:Ipv4, 0:Ipv6 */ + ZXIC_UINT8 count; /* 以4字节为单位 */ + ZXIC_UINT8 pad[2]; /* 字节对齐 */ +} DPP_LPM_PARAMS_T; + +typedef struct dpp_etcam_params_t { + ZXIC_UINT8 id; + ZXIC_UINT8 table_id; + ZXIC_UINT8 key_mode; + ZXIC_UINT8 rsp_mode; + ZXIC_UINT8 as_en; + ZXIC_UINT32 as_baddr; + ZXIC_UINT8 as_rsp_mode; +} DPP_ETCAM_PARAMS_T; + +typedef ZXIC_UINT32 (*dpp_sdt_mgr_smmu0_mux_fun_ptr)( + ZXIC_UINT32 dev_id, DPP_SDT_SMMU0_T *p_sdt_smmu0); +typedef ZXIC_UINT32 (*dpp_sdt_mgr_smmu1_mux_fun_ptr)( + ZXIC_UINT32 dev_id, DPP_SDT_SMMU1_T *p_sdt_smmu1); +typedef ZXIC_UINT32 (*dpp_sdt_mgr_hash_mux_fun_ptr)(ZXIC_UINT32 dev_id, + DPP_SDT_HASH_T *p_sdt_hash); +typedef ZXIC_UINT32 (*dpp_sdt_mgr_lpm_mux_fun_ptr)(ZXIC_UINT32 dev_id, + DPP_SDT_LPM_T *p_sdt_lpm); +typedef ZXIC_UINT32 (*dpp_sdt_mgr_etcam_mux_fun_ptr)( + ZXIC_UINT32 dev_id, DPP_SDT_ETCAM_T *p_sdt_etcam); + +typedef struct dpp_sdt_item_t { + ZXIC_UINT32 valid; + ZXIC_UINT32 table_cfg[DPP_SDT_CFG_LEN]; +} DPP_SDT_ITEM_T; + +typedef struct dpp_sdt_soft_table_t { + ZXIC_UINT32 device_id; + DPP_SDT_ITEM_T sdt_array[DPP_PCIE_SLOT_MAX][DPP_DEV_SDT_ID_MAX]; +} DPP_SDT_SOFT_TABLE_T; + +typedef struct dpp_sdt_mgr_t { + ZXIC_UINT32 channel_num; + ZXIC_UINT32 is_init; + DPP_SDT_SOFT_TABLE_T *sdt_tbl_array[DPP_DEV_CHANNEL_MAX]; + dpp_sdt_mgr_smmu0_mux_fun_ptr p_sdt_mgr_smmu0_mux; + dpp_sdt_mgr_smmu1_mux_fun_ptr p_sdt_mgr_smmu1_mux; + dpp_sdt_mgr_hash_mux_fun_ptr p_sdt_mgr_hash_mux; + dpp_sdt_mgr_lpm_mux_fun_ptr p_sdt_mgr_lpm_mux; + dpp_sdt_mgr_etcam_mux_fun_ptr p_sdt_mgr_etcam_mux; +} DPP_SDT_MGR_T; + +ZXIC_UINT32 dpp_sdt_mgr_init(ZXIC_VOID); +ZXIC_UINT32 dpp_sdt_mgr_create(ZXIC_UINT32 dev_id); +ZXIC_UINT32 dpp_sdt_mgr_destroy(ZXIC_UINT32 dev_id); + +DPP_STATUS dpp_sdt_mgr_sdt_item_add(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 sdt_hig32, + ZXIC_UINT32 sdt_low32); +DPP_STATUS dpp_sdt_mgr_sdt_item_srh(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 *p_sdt_hig32, + ZXIC_UINT32 *p_sdt_low32); +DPP_STATUS dpp_sdt_mgr_sdt_item_del(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/se/dpp_acl.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/se/dpp_acl.h new file mode 100644 index 000000000000..83ecdb05d0a2 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/se/dpp_acl.h @@ -0,0 +1,112 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_acl.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : 王春雷 +* 完成日期 : 2014/12/17 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ +#ifndef _DPP_ACL_H_ +#define _DPP_ACL_H_ + +#include "dpp_se_api.h" + +#define DPP_ACL_TBL_ID_MIN (0) +#define DPP_ACL_TBL_ID_MAX (7) +#define DPP_ACL_ETCAM_ID_MIN (0) +#define DPP_ACL_ETCAM_ID_MAX (0) + +#define DPP_ACL_ENTRY_MAX_GET(key_mode, block_num) \ + ((block_num)*DPP_ETCAM_RAM_DEPTH * (1U << (key_mode))) + +#define DPP_ACL_AS_RSLT_SIZE_GET(mode) \ + (((mode) == DPP_ACL_AS_MODE_128b) ? \ + (128 / 8) : \ + (((mode) == DPP_ACL_AS_MODE_64b) ? \ + (64 / 8) : \ + (((mode) == DPP_ACL_AS_MODE_32b) ? \ + (32 / 8) : \ + (((mode) == DPP_ACL_AS_MODE_16b) ? \ + (16 / 8) : \ + (0))))) + +/** 仅内部调试接口使用*/ +#define DPP_ACL_AS_RSLT_SIZE_GET_EX(mode) (2U << (mode)) + +/** ACL关联查找结果表位宽模式,仅调试用*/ +typedef enum dpp_acl_as_mode_ex_e { + DPP_ACL_AS_MODE_EX_64b = 1, /**< @brief 64bit结果位宽*/ + DPP_ACL_AS_MODE_EX_128b = 2, /**< @brief 128bit结果位宽*/ + DPP_ACL_AS_MODE_EX_256b = + 3, /**< @brief 256bit结果位宽,仅当关联结果表为DDR时有效*/ + DPP_ACL_AS_MODE_EX_INVALID, +} DPP_ACL_AS_MODE_EX_E; + +/***********************************************************/ +/** 初始化ACL公共管理数据结构 +* @param p_acl_cfg ACL公共管理数据结构指针 +* @param p_client 用户自定义数据指针,目前仅为传入dev_id的值 +* @param flags ACL初始化使能标志,详见DPP_ACL_FLAG_ETCAM0_EN等的定义 +* @param p_as_wrt_fun 关联结果G从布s表回调函数指针 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wcl @date 2015/05/20 +************************************************************/ +DPP_STATUS dpp_acl_cfg_init(DPP_ACL_CFG_T *p_acl_cfg, ZXIC_VOID *p_client, + ZXIC_UINT32 flags, + ACL_AS_RSLT_WRT_FUNCTION p_as_wrt_fun); + +/***********************************************************/ +/** 获取ACL公共管理数据结构 +* @param p_acl_cfg ACL公共管理配置结构指针 +* +* @return DPP_OK-成功,DPP_ERR-失败 +************************************************************/ +DPP_STATUS dpp_acl_cfg_get(DPP_DEV_T *dev, DPP_ACL_CFG_EX_T **p_acl_cfg); + +/***********************************************************/ +/** 设置ACL全局配置 +* @param p_acl_cfg ACL公共管理数据结构指针 +* +* @return +************************************************************/ +ZXIC_VOID dpp_acl_cfg_set(DPP_DEV_T *dev, DPP_ACL_CFG_EX_T *p_acl_cfg); + +/***********************************************************/ +/** 初始化ACL业务表属G? +* @param p_acl_cfg ACL公共管理数据结构指针 +* @param table_id 业务表号,取值范围0~15 +* @param as_enable 是否使能关联结果查找,0-不使能,1-使能 +* @param entry_num 最大条目数 +* @param key_mode 键值位宽模式,取值参照DPP_ACL_KEY_MODE_E的定义 +* @param as_mode 关联结果位宽模式,取值参照DPP_ACL_AS_MODE_E的定义 +* @param block_num 分配给当前业务表号的block数目 +* @param p_block_idx 分配给当前业务表号的block编号数组 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wcl @date 2015/05/20 +************************************************************/ +DPP_STATUS dpp_acl_tbl_init(DPP_ACL_CFG_T *p_acl_cfg, ZXIC_UINT32 table_id, + ZXIC_UINT32 as_enable, ZXIC_UINT32 entry_num, + DPP_ACL_KEY_MODE_E key_mode, + DPP_ACL_AS_MODE_E as_mode, ZXIC_UINT32 block_num, + ZXIC_UINT32 *p_block_idx); + +DPP_STATUS dpp_acl_hdw_addr_get(DPP_ACL_TBL_CFG_T *p_tbl_cfg, + ZXIC_UINT32 handle, ZXIC_UINT32 *p_block_idx, + ZXIC_UINT32 *p_addr, ZXIC_UINT32 *p_wr_mask); +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/se/dpp_dtb_table.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/se/dpp_dtb_table.h new file mode 100644 index 000000000000..0e9dcab27d4c --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/se/dpp_dtb_table.h @@ -0,0 +1,843 @@ +#ifndef _DPP_DTB_TABLE_H_ +#define _DPP_DTB_TABLE_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "dpp_dev.h" +#include "dpp_hash.h" +#include "dpp_etcam.h" +#include "dpp_dtb_table_api.h" + +#define DISABLE (0) +#define ENABLE (1) + +#define DTB_DOWN_TABLE_CMD (0) +#define DTB_DUMP_TABLE_CMD (1) + +#define DTB_QUEUE_MAX (128) +#define DTB_QUEUE_ELEMENT_MAX (32) +#define DTB_DATA_SIZE_BIT (16 * 1024 * 8) +#define DPP_DTB_TABLE_DATA_BUFF_SIZE (1024 * 16) +#define DPP_DTB_TABLE_DUMP_INFO_BUFF_SIZE (1024 * 4) +#define DTB_TABLE_CMD_SIZE_BIT (128) +#define DTB_TABLE_CMD_SIZE_BYTE (16) +#define DTB_ERAM_DATA_SIZE_1b (128) +#define DTB_ERAM_DATA_SIZE_64b (128) +#define DTB_ERAM_DATA_SIZE_128b (256) +#define DTB_ERAM_ENTRY_CNT_MAX_1b (DTB_DATA_SIZE_BIT / DTB_ERAM_DATA_SIZE_1b) +#define DTB_ERAM_ENTRY_CNT_MAX_64b (DTB_DATA_SIZE_BIT / DTB_ERAM_DATA_SIZE_64b) +#define DTB_ERAM_ENTRY_CNT_MAX_128b \ + (DTB_DATA_SIZE_BIT / DTB_ERAM_DATA_SIZE_128b) +#define DTB_ZCAM_LEN_SIZE (5) /*单位16字节*/ +#define DTB_ETCAM_LEN_SIZE (6) /*单位16字节*/ +#define DTB_MC_HASH_LEN_SIZE (5) /*单位16字节*/ +#define DTB_ZCAM_DATA_SIZE ((ZXIC_UINT32)(64)) +#define DTB_DMUP_DATA_MAX ((ZXIC_UINT32)(4 * 1024 * 1024)) +#define DTB_DUMP_DDR_ITEMS_MAX (0x10000) + +#define DTB_SDT_DUMP_SIZE (0x400000) //4MB + +#define DTB_TABLE_VALID (1) +#define DTB_LEN_POS_SETP (16) /*DTB len 单位16字节 */ + +#define LPM_IPV4 (1) +#define LPM_IPV6 (0) +#define LPM_ENABLE (1) +#define LPM_DISABLE (0) + +#define DTB_TABLE_MODE_ERAM (0) +#define DTB_TABLE_MODE_DDR (1) +#define DTB_TABLE_MODE_ZCAM (2) +#define DTB_TABLE_MODE_ETCAM (3) +#define DTB_TABLE_MODE_MC_HASH (4) + +#define DTB_DUMP_MODE_ERAM (0) +#define DTB_DUMP_MODE_DDR (1) +#define DTB_DUMP_MODE_ZCAM (2) +#define DTB_DUMP_MODE_ETCAM (3) + +#define DTB_ITEM_ADD_OR_UPDATE (0) +#define DTB_ITEM_DELETE (1) + +/* DTB 表信息管理 */ +/*表顺序与g_dpp_dtb_table_info中顺序一致*/ +typedef enum dpp_dtb_table_info_e { + DTB_TABLE_DDR = 0, + DTB_TABLE_ERAM_1 = 1, + DTB_TABLE_ERAM_64 = 2, + DTB_TABLE_ERAM_128 = 3, + DTB_TABLE_ZCAM = 4, + DTB_TABLE_ETCAM = 5, + DTB_TABLE_MC_HASH = 6, + DTB_TABLE_ENUM_MAX +} DPP_DTB_TABLE_INFO_E; + +typedef enum dpp_dtb_dump_info_e { + DTB_DUMP_ERAM = 0, + DTB_DUMP_DDR = 1, + DTB_DUMP_ZCAM = 2, + DTB_DUMP_ETCAM = 3, + DTB_DUMP_ENUM_MAX +} DPP_DTB_DUMP_INFO_E; + +typedef enum dpp_dtb_dump_zcam_width_e { + DTB_DUMP_ZCAM_128b = 0, + DTB_DUMP_ZCAM_256b = 1, + DTB_DUMP_ZCAM_512b = 2, + DTB_DUMP_ZCAM_RSV = 3, +} DPP_DTB_DUMP_ZCAM_WIDTH_E; + +typedef enum dpp_dtb_dump_etcam_width_e { + DTB_DUMP_ETCAM_80b = 0, + DTB_DUMP_ETCAM_160b = 1, + DTB_DUMP_ETCAM_320b = 2, + DTB_DUMP_ETCAM_640b = 3, + DTB_DUMP_ETCAM_MAX +} DPP_DTB_DUMP_ETCAM_WIDTH_E; + +/* DTB下表格式字段定义 */ +typedef struct dpp_dtb_ddr_table_form_t { + ZXIC_UINT32 valid; /* 有效标识 1有效*/ + ZXIC_UINT32 type_mode; /* DDR:0x1 */ + ZXIC_UINT32 rw_len; /*数据长度 00:128 01:256 10:384 11:512*/ + ZXIC_UINT32 v46_flag; /*1:IPV4 0:IPV6*/ + ZXIC_UINT32 lpm_wr_vld; /*lpm表写有效标识*/ + ZXIC_UINT32 baddr; /*表基地址*/ + ZXIC_UINT32 ecc_en; /* ECC 使能*/ + ZXIC_UINT32 rw_addr; /*以数据宽度为单位的index*/ +} DPP_DTB_DDR_TABLE_FORM_T; + +typedef struct dpp_dtb_eram_table_form_t { + ZXIC_UINT32 valid; /* 有效标识 1有效*/ + ZXIC_UINT32 type_mode; /* ERAM:0x0 */ + ZXIC_UINT32 data_mode; /*数据长度 00:128 01:64 10:1*/ + ZXIC_UINT32 cpu_wr; /*CPU写使能*/ + ZXIC_UINT32 cpu_rd; /*CPU读使能*/ + ZXIC_UINT32 cpu_rd_mode; /*CPU读模式 0:读 1:读清*/ + ZXIC_UINT32 addr; /*访问eram 1bit为单位*/ + ZXIC_UINT32 data_h; /*数据高32bit*/ + ZXIC_UINT32 data_l; /*数据低32bit*/ +} DPP_DTB_ERAM_TABLE_FORM_T; + +typedef struct dpp_dtb_zcam_table_form_t { + ZXIC_UINT32 valid; /* 有效标识 1有效*/ + ZXIC_UINT32 type_mode; /* zcam:0x2 */ + ZXIC_UINT32 ram_reg_flag; /* ram reg 标识 */ + ZXIC_UINT32 zgroup_id; /* zgroup id */ + ZXIC_UINT32 zblock_id; /* zblock id */ + ZXIC_UINT32 zcell_id; /* zcell id */ + ZXIC_UINT32 mask; /* 掩码 */ + ZXIC_UINT32 sram_addr; /* ram地址 */ + +} DPP_DTB_ZCAM_TABLE_FORM_T; + +typedef struct dpp_dtb_etcam_table_form_t { + ZXIC_UINT32 valid; /* 有效标识 1有效*/ + ZXIC_UINT32 type_mode; /* etcam:0x3 */ + ZXIC_UINT32 block_sel; /*block索引 0 - 7*/ + ZXIC_UINT32 init_en; /*初始化使能 高有效*/ + ZXIC_UINT32 + row_or_col_msk; /* 1 write row mask reg 0:write col mask reg*/ + ZXIC_UINT32 vben; /* enable the valid bit addressed by addr*/ + ZXIC_UINT32 reg_tcam_flag; /* 1:配置内部row_col_mask寄存器 0:读写tcam*/ + ZXIC_UINT32 + uload; /*使能标识删除对应addr的表项条目,(80bit为单位,含义与wr_mode一一对应)*/ + ZXIC_UINT32 rd_wr; /*读写标志 0写 1读*/ + ZXIC_UINT32 + wr_mode; /*写入掩码,最高8bit,对应bit为1代表对应的80bit的数据*/ + ZXIC_UINT32 data_or_mask; /*数据或掩码标志 1:写x(data),0:写y(mask)*/ + ZXIC_UINT32 addr; /*etcam地址(0-511)*/ + ZXIC_UINT32 vbit; /*valid bit input*/ + +} DPP_DTB_ETCAM_TABLE_FORM_T; + +typedef struct dpp_dtb_mc_hash_table_form_t { + ZXIC_UINT32 valid; /* 有效标识 1有效 */ + ZXIC_UINT32 type_mode; /* 微码写hash 0x4 */ + ZXIC_UINT32 std_h; /* sdt信息高32bit */ + ZXIC_UINT32 std_l; /* sdt信息低32bit */ +} DPP_DTB_MC_HASH_TABLE_FORM_T; + +/* DTB DUMP表格式 */ +typedef struct dpp_dtb_eram_dump_form_t { + ZXIC_UINT32 valid; /* 有效标识 1有效 */ + ZXIC_UINT32 up_type; /* 00:eram */ + ZXIC_UINT32 base_addr; /* 128bit为单位 */ + ZXIC_UINT32 tb_depth; /* 表项深度,每条条目位宽128bit */ + ZXIC_UINT32 tb_dst_addr_h; /* 数据目的地址高32bit */ + ZXIC_UINT32 tb_dst_addr_l; /* 数据目的地址低32bit */ + +} DPP_DTB_ERAM_DUMP_FORM_T; + +typedef struct dpp_dtb_ddr_dump_form_t { + ZXIC_UINT32 valid; + ZXIC_UINT32 up_type; /* 01:ddr */ + ZXIC_UINT32 base_addr; /* 128bit为单位 */ + ZXIC_UINT32 tb_depth; /* 表项深度,每条条目位宽512bit */ + ZXIC_UINT32 tb_dst_addr_h; + ZXIC_UINT32 tb_dst_addr_l; + +} DPP_DTB_DDR_DUMP_FORM_T; + +typedef struct dpp_dtb_zcam_dump_form_t { + ZXIC_UINT32 valid; + ZXIC_UINT32 up_type; /* 10:zcam */ + ZXIC_UINT32 zgroup_id; /* */ + ZXIC_UINT32 zblock_id; /* */ + ZXIC_UINT32 ram_reg_flag; + ZXIC_UINT32 z_reg_cell_id; + ZXIC_UINT32 sram_addr; + ZXIC_UINT32 tb_depth; /* 表项深度 */ + ZXIC_UINT32 tb_width; /* 表项宽度 */ + ZXIC_UINT32 tb_dst_addr_h; + ZXIC_UINT32 tb_dst_addr_l; + +} DPP_DTB_ZCAM_DUMP_FORM_T; +typedef struct dpp_dtb_etcam_dump_form_t { + ZXIC_UINT32 valid; + ZXIC_UINT32 up_type; /* 11:etcam */ + ZXIC_UINT32 block_sel; /* block num */ + ZXIC_UINT32 addr; /* 640bit位单位 */ + ZXIC_UINT32 + rd_mode; /* 读模式,共8bit,每bit控制ram中对应位置的80bit数据是否有效*/ + ZXIC_UINT32 data_or_mask; /* data:1 mask:0*/ + ZXIC_UINT32 tb_depth; /* dump出数据深度,以640bit为单位 */ + ZXIC_UINT32 + tb_width; /* dump出数据宽度 00:80bit 01:160bit 10:320bit 11:640bit */ + ZXIC_UINT32 tb_dst_addr_h; /* dma地址高32bit */ + ZXIC_UINT32 tb_dst_addr_l; /* dma地址低32bit */ +} DPP_DTB_ETCAM_DUMP_FORM_T; + +typedef struct etcam_dump_info_t { + ZXIC_UINT32 block_sel; /* block index 0-7 */ + ZXIC_UINT32 addr; /* 单个block的RAM地址,范围0~511 640bit为单位 */ + ZXIC_UINT32 + rd_mode; /* 读模式,共8bit,每bit控制ram中对应位置的80bit数据是否有效 */ + ZXIC_UINT32 + data_or_mask; /* data:1 mask:0 参照DPP_ETCAM_DATA_TYPE_E定义*/ + ZXIC_UINT32 tb_depth; /* dump出表深度,以640bit为单位 */ + ZXIC_UINT32 + tb_width; /* dump出数据宽度 00:80bit 01:160bit 10:320bit 11:640bit */ +} ETCAM_DUMP_INFO_T; + +/* 表信息结构 */ +typedef struct dpp_dtb_field_t { + ZXIC_CHAR *p_name; /* 字段名 */ + ZXIC_UINT16 lsb_pos; /* 最低比特位置,以寄存器列表为准*/ + ZXIC_UINT16 len; /* 字段长度,以比特为单位 */ +} DPP_DTB_FIELD_T; + +typedef struct dpp_dtb_table_t { + ZXIC_CHAR *table_type; /* 表类型名称*/ + ZXIC_UINT32 table_no; /* 表编号 */ + ZXIC_UINT32 field_num; /* 包含的字段个数 */ + DPP_DTB_FIELD_T *p_fields; /* 表格式所有字段 */ +} DPP_DTB_TABLE_T; + +typedef struct dpp_dtb_entry_t { + ZXIC_UINT8 *cmd; /* 命令 128bit 即 16B*/ + ZXIC_UINT8 *data; /* 数据 */ + ZXIC_UINT32 data_in_cmd_flag; /*eram 1/64 bit模式时使用,1表示data在cmd中*/ + ZXIC_UINT32 data_size; /* 数据长度,以字节为单位 */ +} DPP_DTB_ENTRY_T; + +typedef struct dpp_dtb_cmd_t { + ZXIC_UINT32 queue_id; /*队列id*/ + ZXIC_UINT32 dtb_phy_addr_hi32; /*dtb描述符物理地址高32bit*/ + ZXIC_UINT32 dtb_phy_addr_lo32; /*dtb描述符物理地址低32bit*/ + ZXIC_UINT32 cmd_type; /*0为流表下发命令,1为流表dump命令*/ + ZXIC_UINT32 int_enable; /* 中断使能 */ + ZXIC_UINT32 dtb_len; /* 指示配表内容或dump描述符长度,以16字节为单位 */ +} DPP_DTB_CMD_T; + +typedef struct dpp_dtb_mc_hash_key_t { + ZXIC_UINT32 hash_key[16]; +} DPP_DTB_MC_HASH_KEY_T; + +typedef struct dpp_dtb_mixed_table_t { + ZXIC_UINT32 down_cmd_len; /**down描述符长度,字节为单位*/ + ZXIC_UINT32 dump_cmd_len; /**dump描述符长度,字节为单位*/ + ZXIC_UINT32 down_buff_offset; /**down buff 偏移,字节为单位*/ + ZXIC_UINT32 dump_buff_offset; /**dump buff 偏移,字节为单位*/ + ZXIC_UINT8 *p_down_cmd_buff; /**指向下表描述符空间,空间为16KB*/ + ZXIC_UINT8 *p_dump_cmd_buff; /**指向dump描述符空间,空间为4KB*/ +} DPP_DTB_MIXED_TABLE_T; + +typedef struct dpp_dtb_mc_hash_entry_info_t { + ZXIC_UINT32 delete_en; /* delete 浣胯兘 */ + ZXIC_UINT32 dma_en; /* dma 浣胯兘 */ + ZXIC_UINT32 *p_data; /*hash 鏉$洰鎸囬拡 闀垮害512bit*/ +} DPP_DTB_MC_HASH_ENTRY_INFO_T; + +/** dtb 中断配置 +* @param int_enable 中断使能 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_interrupt_status_set(ZXIC_UINT32 int_enable); + +/** dtb 中断获取 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +ZXIC_UINT32 dpp_dtb_interrupt_status_get(ZXIC_VOID); + +/** dtb cmd 大小端设置 +* @param int_enable 中断使能 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_cmd_endian_status_set(ZXIC_UINT32 endian); + +/** dtb cmd 大小端获取 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_cmd_endian_status_get(ZXIC_VOID); + +/** dtb写smmu0中的数据,数据长度不限 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param smmu0_base_addr smmu0基地址,以128bit为单位 +* @param smmu0_wr_mode smmu0写模式,参考DPP_ERAM128_OPR_MODE_E,仅支持128bit、64bit、1bit模式 +* @param entry_num 下发的条目数 +* @param p_entry_arr 待下发表项内容结构体数组指针 +* @param element_id 返回下表使用的元素id +* @return +* @remark 无 +* @see +* @author cbb @date 2024/01/04 +************************************************************/ +DPP_STATUS dpp_dtb_smmu0_data_write(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 smmu0_base_addr, + ZXIC_UINT32 smmu0_wr_mode, + ZXIC_UINT32 entry_num, + DPP_DTB_ERAM_ENTRY_INFO_T *p_entry_arr, + ZXIC_UINT32 *element_id); + +/** dtb flush smmu0中的数据,大数据量 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param smmu0_base_addr smmu0基地址,以128bit为单位 +* @param smmu0_wr_mode smmu0写模式,参考DPP_ERAM128_OPR_MODE_E,仅支持128bit、64bit、1bit模式 +* @param start_index flush开始的条目 +* @param entry_num 下发的条目数 +* @param element_id 返回下表使用的元素id +* @return +* @remark 无 +* @see +* @author cbb @date 2024/01/04 +************************************************************/ +DPP_STATUS dpp_dtb_smmu0_flush(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 smmu0_base_addr, + ZXIC_UINT32 smmu0_wr_mode, + ZXIC_UINT32 start_index, ZXIC_UINT32 entry_num, + ZXIC_UINT32 *element_id); + +/** dtb写eRam表 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no eram表sdt表号 +* @param entry_cnt 下发的条目数 +* @param p_entry_arr 待下发表项内容结构体数组指针 +* @param element_id 返回下表使用的元素id +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_eram_dma_write(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 entry_num, + DPP_DTB_ERAM_ENTRY_INFO_T *p_entry_arr, + ZXIC_UINT32 *element_id); + +/** dtb写HASH表,在插入条目时如果冲突,则对冲突条目进行记录 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no hash表sdt表号 +* @param entry_cnt 下发的条目数 +* @param p_entry_arr 待下发表项内容结构体数组指针 +* @param element_id 返回下表使用的元素id +* @return +* @remark 无 是否是有一个写不成功就返回,还是继续进行下一个条目并记录错误的条目 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_hash_dma_insert(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 entry_num, + DPP_DTB_HASH_ENTRY_INFO_T *p_arr_hash_entry, + ZXIC_UINT32 *element_id); + +/** dtb删除HASH表 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no hash表sdt表号 +* @param entry_cnt 删除的条目数 +* @param p_entry_arr 待下发表项内容结构体数组指针 +* @param element_id 返回下表使用的元素id +* @return +* @remark 无 +* @see +* @author @date 2023/03/14 +************************************************************/ +DPP_STATUS dpp_dtb_hash_dma_delete(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 entry_num, + DPP_DTB_HASH_ENTRY_INFO_T *p_arr_hash_entry, + ZXIC_UINT32 *element_id); + +/** dtb删除HASH表 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no hash表sdt表号 +* @param entry_cnt 删除的条目数 +* @param p_entry_arr 待下发表项内容结构体数组指针 +* @param element_id 返回下表使用的元素id +* @return +* @remark 无 +* @see +* @author @date 2023/03/14 +************************************************************/ +DPP_STATUS +dpp_dtb_hash_dma_delete_cycle(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 entry_num, + DPP_DTB_HASH_ENTRY_INFO_T *p_arr_hash_entry, + ZXIC_UINT32 *element_id); + +/** dtb写ACL表 (SPECIFY模式,条目中指定handle,支持级联64bit/128bit) +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no ACL表sdt表号 +* @param entry_num 下发的条目数 +* @param p_acl_entry_arr 待下发表项内容结构体数组指针 +* @param element_id 返回下表使用的元素id +* @return +* @remark 无 是否是有一个写不成功就返回,还是继续进行下一个条目并记录错误的条目 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_acl_dma_insert(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 entry_num, + DPP_DTB_ACL_ENTRY_INFO_T *p_acl_entry_arr, + ZXIC_UINT32 *element_id); + +/** dtb dump eram直接表表项内容 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no eram表sdt表号 +* @param p_dump_eram_entry eram数据结构,数据已分配相应内存 +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_eram_data_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, + DPP_DTB_ERAM_ENTRY_INFO_T *p_dump_eram_entry); + +/** dtb dump eram直接表表项内容 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no eram表sdt表号 +* @param p_dump_eram_entry eram数据结构,数据已分配相应内存 +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_eram_stat_data_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 base_addr, + ZXIC_UINT32 rd_mode, ZXIC_UINT32 index, + ZXIC_UINT32 *p_data); + +/***********************************************************/ +/** 配置数据获取模式 +* @param srh_mode 0:软件获取 1:硬件获取 +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/02 +************************************************************/ +ZXIC_VOID dpp_dtb_srh_mode_set(ZXIC_UINT32 srh_mode); + +/** 获取查找方式 +* @param +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/02 +************************************************************/ +ZXIC_UINT32 dpp_dtb_srh_mode_get(ZXIC_VOID); + +/** 根据键值查找hash表 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列id +* @param sdt_no 0~255 +* @param p_dtb_hash_entry 出参,返回描述符信息 +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/02 +************************************************************/ + +ZXIC_UINT32 dpp_dtb_hash_data_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, + DPP_DTB_HASH_ENTRY_INFO_T *p_dtb_hash_entry, + ZXIC_UINT32 srh_mode); + +ZXIC_UINT32 dpp_dtb_hash_zcam_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + HASH_ENTRY_CFG *p_hash_entry_cfg, + DPP_HASH_ENTRY *p_hash_entry, + ZXIC_UINT32 srh_mode, ZXIC_UINT8 *p_srh_succ); + +DPP_STATUS dpp_dtb_hash_zcam_get_hardware(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + HASH_ENTRY_CFG *p_hash_entry_cfg, + DPP_HASH_ENTRY *p_hash_entry, + ZXIC_UINT8 *p_srh_succ); + +DPP_STATUS dpp_dtb_hash_get_software(DPP_DEV_T *dev, + HASH_ENTRY_CFG *p_hash_entry_cfg, + DPP_HASH_ENTRY *p_hash_entry, + ZXIC_UINT8 *p_srh_succ); + +/** dtb 通过key和mask获取ACL表级联结果 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no eram表sdt表号 +* @param p_dump_acl_entry etcam 数据结构,数据已分配相应内存,需要输入key和mask +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_acl_data_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, + DPP_DTB_ACL_ENTRY_INFO_T *p_dump_acl_entry); + +/** dtb etcam 数据get接口,通过handle值获取etcam数据 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no eram表sdt表号 +* @param p_dump_acl_entry etcam 数据结构,数据已分配相应内存 +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_etcam_data_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, + DPP_DTB_ACL_ENTRY_INFO_T *p_dump_acl_entry); + +/***********************************************************/ +/** flush当前hash引擎占用的ZCAM空间 +* @param p_se_cfg 全局数据结构 +* @param queue_id 队列id +* @param fun_id hash引擎0~3 +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/02 +************************************************************/ +DPP_STATUS dpp_dtb_zcam_space_clr(DPP_DEV_T *dev, DPP_SE_CFG *p_se_cfg, + ZXIC_UINT32 queue_id, ZXIC_UINT32 fun_id); + +/***********************************************************/ +/** flush指定eram空间 +* @param dev_id 设备id +* @param queue_id 队列id +* @param sdt_no sdt号 +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/02 +************************************************************/ +DPP_STATUS dpp_dtb_eram_table_flush(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no); + +/***********************************************************/ +/** flush指定hash空间(DDR/ZCAM) +* @param dev_id 设备id +* @param queue_id 队列id +* @param sdt_no sdt号 +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/02 +************************************************************/ +DPP_STATUS dpp_dtb_hash_table_flush(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no); + +/***********************************************************/ +/** 清除hash表资源(硬件和软件,硬件通过dtb方式清除) +* @param dev_id 设备id +* @param queue_id 队列id +* @param hash_id hash引擎 0~3 +* @return +* @remark 无 +* @see +* @author cq @date 2023/09/26 +************************************************************/ +DPP_STATUS dpp_dtb_hash_all_entry_delete(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 hash_id); + +/***********************************************************/ +/** DTB etcam 整个流表清空Flush +* @param devId NP设备号 +* @param queueId DTB队列编号 +* @param sdtNo 流表std号 +* @return +* @remark 无 +* @see +* @author cbb @date 2023/07/03 +************************************************************/ +DPP_STATUS dpp_dtb_etcam_table_flush(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no); + +/** dtb dump eram直接表表项内容 支持64bit/128bit +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no eram表sdt表号 +* @param start_index 要dump的起始index,单位是sdt_no该表的mode +* @param p_dump_data_arr 本次dump出的数据,数据格式与下表格式相同 +* @param entry_num 本次dump实际的条目数 +* @param next_start_index 下次dump是开始的index +* @param finish_flag 整个表dump完成标志,1表示完成,0表示未完成 +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_eram_table_dump(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, + DPP_DTB_DUMP_INDEX_T start_index, + DPP_DTB_ERAM_ENTRY_INFO_T *p_dump_data_arr, + ZXIC_UINT32 *entry_num, + DPP_DTB_DUMP_INDEX_T *next_start_index, + ZXIC_UINT32 *finish_flag); + +/***********************************************************/ +/** dump eram表内容 +* @param dev 设备 +* @param queue_id 队列id 0~127 +* @param sdt_no sdt号 0~255 +* @param pDumpData 出参,dump数据,内存由用户分配,结构体DPP_HASH_ENTRY +* @param entryNum 出参,dump出的有效hash条目 +* @return +* @remark 无 +* @see +* @author cq @date 2025/04/03 +************************************************************/ +DPP_STATUS dpp_dtb_eram_dump(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT8 *pDumpData, + ZXIC_UINT32 *p_entry_num); + +/***********************************************************/ +/** 只dump hash表的zcam内容 +* @param dev_id 设备id +* @param queue_id 队列id 0~127 +* @param sdt_no sdt号 0~255 +* @param pDumpData 出参,dump数据,内存由用户分配,结构体DPP_DTB_HASH_ENTRY_INFO_T +* @param entryNum 出参,dump出的有效hash条目 +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/22 +************************************************************/ +DPP_STATUS dpp_dtb_hash_table_only_zcam_dump(DPP_DEV_T *dev, + ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, + ZXIC_UINT8 *pDumpData, + ZXIC_UINT32 *entryNum); + +/** dtb dump etcam直接表表项内容 级联eram支持64bit/128bit +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no acl表sdt表号 +* @param start_index 要dump的起始index,单位是sdt_no该表的mode +* @param p_dump_data_arr 本次dump出的数据,数据格式与下表格式相同 +* @param entry_num 本次dump实际的条目数 +* @param next_start_index 下次dump是开始的index +* @param finish_flag 整个表dump完成标志,1表示完成,0表示未完成 +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_acl_table_dump(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, + DPP_DTB_DUMP_INDEX_T start_index, + DPP_DTB_ACL_ENTRY_INFO_T *p_dump_data_arr, + ZXIC_UINT32 *entry_num, + DPP_DTB_DUMP_INDEX_T *next_start_index, + ZXIC_UINT32 *finish_flag); +/***********************************************************/ +/** dump eram表内容 +* @param dev 设备 +* @param queue_id 队列id 0~127 +* @param sdt_no sdt号 0~255 +* @param pDumpData 出参,dump数据,内存由用户分配,结构体DPP_HASH_ENTRY +* @param entryNum 出参,dump出的有效hash条目 +* @return +* @remark 无 +* @see +* @author cq @date 2025/04/03 +************************************************************/ +DPP_STATUS dpp_dtb_acl_dump(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT8 *pDumpData, + ZXIC_UINT32 *p_entry_num); + +ZXIC_VOID dpp_data_buff_print(ZXIC_UINT8 *buff, ZXIC_UINT32 size); +ZXIC_VOID dpp_acl_data_print(ZXIC_UINT8 *p_data, ZXIC_UINT8 *p_mask, + ZXIC_UINT32 etcam_mode); +ZXIC_VOID dpp_dtb_data_print(ZXIC_UINT8 *p_data, ZXIC_UINT32 len); + +ZXIC_UINT32 dpp_ddr_index_calc(ZXIC_UINT32 index, ZXIC_UINT32 width_mode, + ZXIC_UINT32 key_type, ZXIC_UINT32 byte_offset); + +DPP_STATUS +dpp_dtb_hash_dma_delete_hardware(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 entry_num, + DPP_DTB_HASH_ENTRY_INFO_T *p_arr_hash_entry, + ZXIC_UINT32 *element_id); + +ZXIC_UINT32 dpp_dtb_hash_zcam_delete_hardware(DPP_DEV_T *dev, + ZXIC_UINT32 queue_id, + HASH_ENTRY_CFG *p_hash_entry_cfg, + DPP_HASH_ENTRY *p_hash_entry, + DPP_DTB_ENTRY_T *p_entry, + ZXIC_UINT8 *p_srh_succ); + +DPP_STATUS dpp_dtb_se_zcam_dma_dump(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 addr, ZXIC_UINT32 tb_width, + ZXIC_UINT32 depth, ZXIC_UINT32 *p_data, + ZXIC_UINT32 *element_id); + +DPP_STATUS dpp_dtb_hash_dump(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT8 *pDumpData, + ZXIC_UINT32 *p_entry_num); + +ZXIC_UINT32 dpp_dtb_hash_data_parse(ZXIC_UINT32 item_type, + ZXIC_UINT32 key_by_size, + DPP_HASH_ENTRY *p_entry, + ZXIC_UINT8 *p_item_data, + ZXIC_UINT8 *p_data_offset); + +/***********************************************************/ +/** 释放vport下的所有index +* @param dev NP设备 +* @param sdt_no 流表sdt号(0~255) +* @param vport 端口号 +* @param index 需要释放的索引值 +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/14 +************************************************************/ +DPP_STATUS dpp_dtb_acl_index_release_by_vport(DPP_DEV_T *dev, + ZXIC_UINT32 sdt_no, + ZXIC_UINT32 vport); + +/***********************************************************/ +/** 获取当前vport下分配的所有index +* @param dev 设备 +* @param queue_id 队列号 +* @param eram_sdt_no 维护index的eram直接表号 +* @param vport 端口号 +* @param index_num 出参,当前vport分配的index个数 +* @param p_index_array 出参,当前vport分配的index数组 +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/18 +************************************************************/ +DPP_STATUS dpp_dtb_acl_index_parse(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 eram_sdt_no, ZXIC_UINT32 vport, + ZXIC_UINT32 *index_num, + ZXIC_UINT32 *p_index_array); + +/***********************************************************/ +/** 清除指定index的所有eram表项 +* @param dev 设备 +* @param queue_id 队列号 +* @param sdt_no 维护index的eram直接表号 +* @param index_num 当前vport分配的index个数 +* @param p_index_array 当前vport分配的index数组 +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/18 +************************************************************/ +DPP_STATUS dpp_dtb_eram_data_clear(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 index_num, + ZXIC_UINT32 *p_index_array); + +/***********************************************************/ +/** dtb方式清除指定index的所有统计项 +* @param dev 设备 +* @param queue_id 队列号 +* @param counter_id 统计编号,对应微码中的address +* @param rd_mode 统计读取方式 0:64bit 1:128bit +* @param sdt_no 维护index的eram直接表号 +* @param index_num 当前vport分配的index个数 +* @param p_index_array 当前vport分配的index数组 +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/18 +************************************************************/ +DPP_STATUS dpp_dtb_eram_stat_data_clear(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 counter_id, + STAT_CNT_MODE_E rd_mode, + ZXIC_UINT32 index_num, + ZXIC_UINT32 *p_index_array); + +/***********************************************************/ +/** 清除指定index的所有acl表项 +* @param dev 设备 +* @param queue_id 队列号 +* @param sdt_no acl表项的sdt号 +* @param index_num 当前vport分配的index个数 +* @param p_index_array 当前vport分配的index数组 +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/18 +************************************************************/ +DPP_STATUS dpp_dtb_acl_data_clear(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 index_num, + ZXIC_UINT32 *p_index_array); + +/***********************************************************/ +/** 指定vport的统计计数读清 +* @param dev NP设备 +* @param sdt_no 流表sdt号(0~255) +* @param vport 端口号 +* @param rd_mode 读取位宽模式,参见STAT_CNT_MODE_E,0-64bit,1-128bit +* @param start_counter_id 统计起始编号,对应微码中的address +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/14 +************************************************************/ +DPP_STATUS dpp_dtb_acl_stat_cnt_clr(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 vport, STAT_CNT_MODE_E rd_mode, + ZXIC_UINT32 start_counter_id); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/se/dpp_hash.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/se/dpp_hash.h new file mode 100644 index 000000000000..130f0afd2c52 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/se/dpp_hash.h @@ -0,0 +1,428 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_hash.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : wcl +* 完成日期 : 2014/02/14 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef _DPP_HASH_H_ +#define _DPP_HASH_H_ + +#include "dpp_se_cfg.h" + +#define HASH_FUNC_ID_MIN (0) +#define HASH_FUNC_ID_NUM (4) + +#define HASH_DDR_CRC_NUM (4) + +#define HASH_KEY_MAX (49) /* 最大键值长度,以字节为单位 */ +#define HASH_RST_MAX (32) /* 最大结果长度,以字节为单位 */ +#define HASH_ENTRY_POS_STEP (16) + +#define HASH_TBL_ID_NUM (32) /* 每个Hash引擎中的最大业务表数目 */ +#define HASH_BULK_NUM (8) /* 每个Hash引擎DDR资源划分的块数 */ + +#define HASH_ACTU_KEY_MIN (1) /* 业务实际键值长度 */ +#define HASH_ACTU_KEY_MAX (48) +#define HASH_ACTU_KEY_STEP (1) /* actual key的长度单位: 1字节 */ +#define HASH_KEY_CTR_SIZE (1) /* key中控制信息的长度, 1字节 */ +#define ITEM_ENTRY_NUM_2 (2) +#define ITEM_ENTRY_NUM_4 (4) + +#define HASH_DDR_ITEM_MIN (1 << 14) +#define HASH_DDR_ITEM_MAX (1 << 26) + +#define HASH_ZBLK_ID_MAX (31) + +/* hash ext cfg reg */ +#define HASH_EXT_MODE_BT_START (1) +#define HASH_EXT_MODE_BT_WIDTH (8) +#define HASH_EXT_FLAG_BT_START (0) +#define HASH_EXT_FLAG_BT_WIDTH (1) + +/* hash tbl30 depth reg */ +#define HASH_TBL0_DEPTH_BT_START (0) +#define HASH_TBL0_DEPTH_BT_WIDTH (8) +#define HASH_TBL1_DEPTH_BT_START (8) +#define HASH_TBL1_DEPTH_BT_WIDTH (8) +#define HASH_TBL2_DEPTH_BT_START (16) +#define HASH_TBL2_DEPTH_BT_WIDTH (8) +#define HASH_TBL3_DEPTH_BT_START (24) +#define HASH_TBL3_DEPTH_BT_WIDTH (8) + +/* hash tbl74 depth reg*/ +#define HASH_TBL4_DEPTH_BT_START (0) +#define HASH_TBL4_DEPTH_BT_WIDTH (8) +#define HASH_TBL5_DEPTH_BT_START (8) +#define HASH_TBL5_DEPTH_BT_WIDTH (8) +#define HASH_TBL6_DEPTH_BT_START (16) +#define HASH_TBL6_DEPTH_BT_WIDTH (8) +#define HASH_TBL7_DEPTH_BT_START (24) +#define HASH_TBL7_DEPTH_BT_WIDTH (8) + +/* hash ext crc cfg*/ +#define TBL0_EXT_CRC_CFG_BT_START (0) +#define TBL0_EXT_CRC_CFG_BT_WIDTH (2) +#define TBL1_EXT_CRC_CFG_BT_START (2) +#define TBL1_EXT_CRC_CFG_BT_WIDTH (2) +#define TBL2_EXT_CRC_CFG_BT_START (4) +#define TBL2_EXT_CRC_CFG_BT_WIDTH (2) +#define TBL3_EXT_CRC_CFG_BT_START (6) +#define TBL3_EXT_CRC_CFG_BT_WIDTH (2) +#define TBL4_EXT_CRC_CFG_BT_START (8) +#define TBL4_EXT_CRC_CFG_BT_WIDTH (2) +#define TBL5_EXT_CRC_CFG_BT_START (10) +#define TBL5_EXT_CRC_CFG_BT_WIDTH (2) +#define TBL6_EXT_CRC_CFG_BT_START (12) +#define TBL6_EXT_CRC_CFG_BT_WIDTH (2) +#define TBL7_EXT_CRC_CFG_BT_START (14) +#define TBL7_EXT_CRC_CFG_BT_WIDTH (2) + +/* hash mono flags*/ +#define HASH0_MONO_FLAG_BT_START (0) +#define HASH0_MONO_FLAG_BT_WIDTH (8) +#define HASH1_MONO_FLAG_BT_START (8) +#define HASH1_MONO_FLAG_BT_WIDTH (8) +#define HASH2_MONO_FLAG_BT_START (16) +#define HASH2_MONO_FLAG_BT_WIDTH (8) +#define HASH3_MONO_FLAG_BT_START (24) +#define HASH3_MONO_FLAG_BT_WIDTH (8) + +/* hash zcell mono*/ +#define ZCELL0_BULK_ID_BT_START (2) +#define ZCELL0_BULK_ID_BT_WIDTH (3) +#define ZCELL0_MONO_FLAG_BT_START (3) +#define ZCELL0_MONO_FLAG_BT_WIDTH (1) +#define ZCELL1_BULK_ID_BT_START (10) +#define ZCELL1_BULK_ID_BT_WIDTH (3) +#define ZCELL1_MONO_FLAG_BT_START (11) +#define ZCELL1_MONO_FLAG_BT_WIDTH (1) +#define ZCELL2_BULK_ID_BT_START (18) +#define ZCELL2_BULK_ID_BT_WIDTH (3) +#define ZCELL2_MONO_FLAG_BT_START (19) +#define ZCELL2_MONO_FLAG_BT_WIDTH (1) +#define ZCELL3_BULK_ID_BT_START (26) +#define ZCELL3_BULK_ID_BT_WIDTH (3) +#define ZCELL3_MONO_FLAG_BT_START (27) +#define ZCELL3_MONO_FLAG_BT_WIDTH (1) + +/* hash zreg mono */ +#define ZREG0_BULK_ID_BT_START (2) +#define ZREG0_BULK_ID_BT_WIDTH (3) +#define ZREG0_MONO_FLAG_BT_START (3) +#define ZREG0_MONO_FLAG_BT_WIDTH (1) +#define ZREG1_BULK_ID_BT_START (10) +#define ZREG1_BULK_ID_BT_WIDTH (3) +#define ZREG1_MONO_FLAG_BT_START (11) +#define ZREG1_MONO_FLAG_BT_WIDTH (1) +#define ZREG2_BULK_ID_BT_START (18) +#define ZREG2_BULK_ID_BT_WIDTH (3) +#define ZREG2_MONO_FLAG_BT_START (19) +#define ZREG2_MONO_FLAG_BT_WIDTH (1) +#define ZREG3_BULK_ID_BT_START (26) +#define ZREG3_BULK_ID_BT_WIDTH (3) +#define ZREG3_MONO_FLAG_BT_START (27) +#define ZREG3_MONO_FLAG_BT_WIDTH (1) + +#define OPR_CLR (0) +#define OPR_WR (1) + +#define OBTAIN_CONFLICT_KEY (0) + +/* HASH soft reset*/ +#define HASH_ARG_NUM_PER_BULK (8) /* 每个bulk 需要记录的参数数目 */ +#define HASH_ARG_NUM_PER_TBL (4) /* 每个表 需要记录的参数数目 */ +#define HASH_INIT_NUM (8) /* HASH引擎初始化参数数目 */ +#define HASH_BULK_INIT_NUM \ + (1 + HASH_BULK_NUM * HASH_ARG_NUM_PER_BULK + \ + 3) /* 1-bulk_valid +3是为了凑够4的倍数 */ +#define HASH_TBL_INIT_NUM \ + (1 + HASH_TBL_ID_NUM * HASH_ARG_NUM_PER_TBL + \ + 3) /* 1-tbl_valid +3是为了凑够4的整数倍 */ + +typedef struct dpp_hash_table_stat { + ZXIC_FLOAT ddr; + ZXIC_FLOAT zcell; + ZXIC_FLOAT zreg; + ZXIC_FLOAT sum; +} DPP_HASH_TABLE_STAT; + +typedef struct dpp_hash_zreg_mono_stat { + ZXIC_UINT32 zblk_id; + ZXIC_UINT32 zreg_id; +} DPP_HASH_ZREG_MONO_STAT; + +typedef struct dpp_hash_bulk_zcam_stat { + ZXIC_UINT32 zcell_mono_idx[SE_ZBLK_NUM * SE_ZCELL_NUM]; + DPP_HASH_ZREG_MONO_STAT zreg_mono_id[SE_ZBLK_NUM][SE_ZREG_NUM]; +} DPP_HASH_BULK_ZCAM_STAT; + +typedef struct dpp_hash_stat { + ZXIC_UINT32 insert_ok; + ZXIC_UINT32 insert_fail; + ZXIC_UINT32 insert_same; + ZXIC_UINT32 insert_ddr; + ZXIC_UINT32 insert_zcell; + ZXIC_UINT32 insert_zreg; + + ZXIC_UINT32 delete_ok; + ZXIC_UINT32 delete_fail; + + ZXIC_UINT32 search_ok; + ZXIC_UINT32 search_fail; + + ZXIC_UINT32 zblock_num; + ZXIC_UINT32 zblock_array[SE_ZBLK_NUM]; + + DPP_HASH_TABLE_STAT insert_table[HASH_TBL_ID_NUM]; + DPP_HASH_BULK_ZCAM_STAT *p_bulk_zcam_mono[HASH_BULK_NUM]; +} DPP_HASH_STAT; + +typedef enum dpp_hash_itme_pos { + HASH_ITEM_POS_0 = 0, + HASH_ITEM_POS_1 = 1, + HASH_ITEM_POS_2 = 2, + HASH_ITEM_POS_3 = 3, + HASH_ITEM_POS_MAX = 4, +} DPP_HASH_ITME_POS; + +typedef enum dpp_hash_item_inst_mode { + HASH_ITEM_INSERT_LAST = 0, + HASH_ITEM_INSERT_1ST, + HASH_ITEM_INSERT_NULL +} DPP_HASH_ITEM_INST_MODE; + +/* hash 表项信息 */ +typedef struct dpp_hash_tbl_info { + ZXIC_UINT32 fun_id; + ZXIC_UINT32 actu_key_size; /**< @brief 实际键值长度,以1字节为单位 */ + ZXIC_UINT32 + key_type; /**< @brief 表项长度类型: 1-128bit, 2-256bit, 3-512bit */ + ZXIC_UINT8 is_init; /**< @brief 是否初始化*/ + ZXIC_UINT8 mono_zcell; /**< @brief 是否有独占的zcell*/ + ZXIC_UINT8 zcell_num; /**< @brief 独占的zcell的数目*/ + ZXIC_UINT8 mono_zreg; /**< @brief 是否有独占的zcell*/ + ZXIC_UINT8 zreg_num; /**< @brief 独占的zcell的数目*/ + ZXIC_UINT8 is_age; /* 硬件老化标志,业务表支持硬件老化 */ + ZXIC_UINT8 is_lrn; /* 硬件学习标志,业务表支持硬件学习 */ + ZXIC_UINT8 is_mc_wrt; /* 微码写表标志,业务表支持微码写表 */ + //ZXIC_UINT8 pad[3]; +} DPP_HASH_TBL_ID_INFO; + +typedef struct dpp_hash_rbkey_info { + ZXIC_UINT8 key[HASH_KEY_MAX]; + ZXIC_UINT8 rst[HASH_RST_MAX]; + D_NODE entry_dn; + SE_ITEM_CFG *p_item_info; + /* ZXIC_UINT32 rb_idx;*/ + ZXIC_UINT32 entry_size; /* 条目宽度,以字节为单位 */ + ZXIC_UINT32 entry_pos; /* 条目在item中的起始位置,以128bit为偏移单位 */ +} DPP_HASH_RBKEY_INFO; + +/* DDR*/ +typedef struct hash_ddr_cfg { + ZXIC_UINT32 bulk_use; /**< @brief 该hash引擎bulk空间是否已经使用*/ + ZXIC_UINT32 ddr_baddr; /**< @brief 分配给hash的DDR空间的硬件基地址*/ + ZXIC_UINT32 ddr_ecc_en; /**< @brief DDR ECC使能: 0-不使能,1-使能*/ + ZXIC_UINT32 + item_num; /**< @brief 硬件分配的ddr存储单元数目,以位宽为单位*/ + ZXIC_UINT32 bulk_id; /**< @brief DDR空间编号*/ + ZXIC_UINT32 hash_ddr_arg; /**< @brief hash ddr CRC 计算式*/ + ZXIC_UINT32 width_mode; /**< @brief ddr3 位宽*/ + ZXIC_UINT32 hw_baddr; /**< @brief ddr3 存储单元起始偏移,以256bit为单位*/ + ZXIC_UINT32 zcell_num; /*cpu 软复位 存储记录该参数*/ + ZXIC_UINT32 zreg_num; /*cpu 软复位 存储记录该参数*/ + + SE_ITEM_CFG **p_item_array; /**< @brief 指向数组指针的指针*/ +} HASH_DDR_CFG; + +#define HASH_ADDR_EXT_FLAG_BT_OFF (31) +#define HASH_ADDR_WRT_MASK_BT_OFF (27) +#define HASH_ADDR_BT_OFF (1) +#define HASH_ADDR_DDR_BT_LEN (26) +#define HASH_ADDR_ZCAM_BT_LEN (17) +typedef struct dpp_hash_wrt_lrn_rsp { + ZXIC_UINT8 space_vld; /* 仅硬件学习时此标志有效 */ + ZXIC_UINT8 ext_flag; /* 软件模拟硬件学习标志位*/ + ZXIC_UINT8 wrt_mask; + ZXIC_UINT8 width_flag; + ZXIC_UINT32 lrn_addr; +} DPP_HASH_WRT_LRN_RSP; + +typedef struct dpp_hash_cfg { + ZXIC_UINT32 fun_id; + ZXIC_UINT8 ddr_valid; /* 是否使用DDR表项 */ + ZXIC_UINT8 pad[3]; + HASH_FUNCTION32 p_hash32_fun; + HASH_FUNCTION p_hash16_fun; + + HASH_DDR_CFG *p_bulk_ddr_info[HASH_BULK_NUM]; /* 每个DDR空间的配置*/ + ZXIC_UINT8 bulk_ram_mono[HASH_BULK_NUM]; /* 每个ZCAM空间独占标志*/ + SHARE_RAM hash_shareram; /* 共享的ZCAM资源 */ + DPP_SE_CFG *p_se_info; + + ZXIC_RB_CFG hash_rb; + ZXIC_RB_CFG ddr_cfg_rb; + DPP_HASH_STAT hash_stat; +} DPP_HASH_CFG; + +typedef struct hash_entry_cfg { + ZXIC_UINT32 fun_id; + ZXIC_UINT8 bulk_id; + ZXIC_UINT8 table_id; + ZXIC_UINT8 key_type; + ZXIC_UINT8 rsp_mode; + ZXIC_UINT32 actu_key_size; + ZXIC_UINT32 key_by_size; + ZXIC_UINT32 rst_by_size; + DPP_SE_CFG *p_se_cfg; + DPP_HASH_CFG *p_hash_cfg; + DPP_HASH_RBKEY_INFO *p_rbkey_new; + ZXIC_RB_TN *p_rb_tn_new; +} HASH_ENTRY_CFG; + +#define DPP_GET_HASH_KEY_CTRL(valid, type, tbl_id) \ + (((valid & 0x1) << 7) | ((type & 0x3) << 5) | (tbl_id & 0x1f)) +#define DPP_GET_HASH_TBL_ID(p_key) ((p_key)[0] & 0x1F) +#define DPP_GET_HASH_KEY_TYPE(p_key) (((p_key)[0] >> 5) & 0x3) +#define DPP_GET_HASH_KEY_VALID(p_key) (((p_key)[0] >> 7) & 0x1) + +/** 根据hash的类型获取其字节数,返回值包括16B,32B和64B,或者0*/ +#define DPP_GET_HASH_ENTRY_SIZE(key_type) \ + ((key_type == HASH_KEY_128b) ? \ + 16U : \ + ((key_type == HASH_KEY_256b) ? \ + 32U : \ + ((key_type == HASH_KEY_512b) ? 64U : 0))) + +#define DPP_GET_ACTU_KEY_BY_SIZE(actu_key_size) \ + (actu_key_size * HASH_ACTU_KEY_STEP) + +#define DPP_GET_KEY_SIZE(actu_key_size) \ + (DPP_GET_ACTU_KEY_BY_SIZE(actu_key_size) + HASH_KEY_CTR_SIZE) +#define DPP_GET_RST_SIZE(key_type, actu_key_size) \ + ((DPP_GET_HASH_ENTRY_SIZE(key_type) != 0) ? \ + (DPP_GET_HASH_ENTRY_SIZE(key_type) - \ + DPP_GET_ACTU_KEY_BY_SIZE(actu_key_size) - \ + HASH_KEY_CTR_SIZE) : \ + 0xFF) /* modify coverity kfr 2022.05.31 */ + +#define DPP_GET_HASH_RB_KEY(p_hash_rb, idx) \ + ((p_hash_rb)->p_keybase + ((p_hash_rb)->key_size * (idx))) + +#define DPP_GET_DDR_WR_MODE(key_type) \ + ((key_type == HASH_KEY_512b) ? key_type : (key_type - 1)) + +/** 根据条目位宽和起始的位置获取写入掩码 */ +#define DPP_GET_HASH_ENTRY_MASK(entry_size, entry_pos) \ + ((((1U << (entry_size / 16U)) - 1U) \ + << (4U - entry_size / 16U - entry_pos)) & \ + 0xF) + +DPP_STATUS dpp_hash_zblkcfg_write(DPP_SE_CFG *p_se_cfg, ZXIC_UINT32 fun_id, + SE_ZBLK_CFG *p_zblk_cfg); + +DPP_STATUS dpp_hash_bulk_mono_flags_write(DPP_SE_CFG *p_se_cfg, + ZXIC_UINT32 hash_id, + ZXIC_UINT32 bulk_id); + +DPP_STATUS dpp_hash_zcell_mono_write(DPP_SE_CFG *p_se_cfg, + SE_ZCELL_CFG *p_zcell_cfg); + +DPP_STATUS dpp_hash_zreg_mono_write(DPP_SE_CFG *p_se_cfg, ZXIC_UINT32 tbl_id, + ZXIC_UINT32 zblk_idx, ZXIC_UINT32 zreg_id); + +DPP_STATUS dpp_hash_ext_cfg_write(DPP_SE_CFG *p_se_cfg, ZXIC_UINT32 fun_id, + ZXIC_UINT32 bulk_id, HASH_DDR_CFG *p_ddr_cfg); + +DPP_STATUS dpp_hash_ext_cfg_clr(DPP_SE_CFG *p_se_cfg, ZXIC_UINT32 fun_id); + +DPP_STATUS dpp_hash_tbl_depth_write(DPP_SE_CFG *p_se_cfg, ZXIC_UINT32 fun_id, + ZXIC_UINT32 bulk_id, + HASH_DDR_CFG *p_ddr_cfg); + +DPP_STATUS dpp_hash_tbl_depth_clr(DPP_SE_CFG *p_se_cfg, ZXIC_UINT32 fun_id); + +DPP_STATUS dpp_hash_tbl_crc_poly_write(DPP_SE_CFG *p_se_cfg, ZXIC_UINT32 fun_id, + ZXIC_UINT32 bulk_id, + ZXIC_UINT32 crc_sel); + +ZXIC_SINT32 dpp_hash_rb_key_cmp(ZXIC_VOID *p_new, ZXIC_VOID *p_old, + ZXIC_UINT32 key_size); + +DPP_STATUS dpp_hash_insrt_to_item(DPP_HASH_CFG *p_hash_cfg, + DPP_HASH_RBKEY_INFO *p_rbkey, + SE_ITEM_CFG *p_item, ZXIC_UINT32 item_idx, + ZXIC_UINT32 item_type, + ZXIC_UINT32 insrt_key_type); + +DPP_STATUS dpp_hash_red_black_node_alloc(DPP_DEV_T *dev, + ZXIC_RB_TN **p_rb_tn_new, + DPP_HASH_RBKEY_INFO **p_rbkey_new); + +DPP_STATUS dpp_hash_rb_insert(DPP_DEV_T *dev, HASH_ENTRY_CFG *p_hash_entry_cfg, + DPP_HASH_ENTRY *p_entry); + +DPP_STATUS dpp_hash_set_crc_key(DPP_DEV_T *dev, + HASH_ENTRY_CFG *p_hash_entry_cfg, + DPP_HASH_ENTRY *p_entry, + ZXIC_UINT8 *p_temp_key); + +DPP_STATUS dpp_hash_insert_ddr(DPP_DEV_T *dev, HASH_ENTRY_CFG *p_hash_entry_cfg, + ZXIC_UINT8 *p_temp_key, ZXIC_UINT8 *p_end_flag); + +DPP_STATUS dpp_hash_insert_zcell(DPP_DEV_T *dev, DPP_SE_CFG *p_se_cfg, + HASH_ENTRY_CFG *p_hash_entry_cfg, + ZXIC_UINT8 *p_temp_key, + ZXIC_UINT8 *p_end_flag); + +DPP_STATUS dpp_hash_insert_zreg(DPP_DEV_T *dev, + HASH_ENTRY_CFG *p_hash_entry_cfg, + ZXIC_UINT8 *p_temp_key, ZXIC_UINT8 *p_end_flag); + +/***********************************************************/ +/** 清除hash引擎的所有hash表项(清除软件配置) +* @param p_se_cfg +* @param hash_id +* @param bulk_id +* +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/18 +************************************************************/ +DPP_STATUS dpp_hash_soft_all_entry_delete(DPP_SE_CFG *p_se_cfg, + ZXIC_UINT32 hash_id); + +/***********************************************************/ +/** 释放当前sdt下的所有hash流表表项(仅删除软件表项,不操作硬件) +* @param dev_id 设备号 +* @param sdt_no sdt号 +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/05 +************************************************************/ +DPP_STATUS dpp_hash_soft_delete_by_sdt(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no); + +DPP_STATUS dpp_hash_get_hash_info_from_sdt(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no, + HASH_ENTRY_CFG *p_hash_entry_cfg); + +DPP_STATUS dpp_hash_soft_uninstall(DPP_DEV_T *dev); + +DPP_STATUS dpp_one_hash_soft_uninstall(DPP_DEV_T *dev, ZXIC_UINT32 hash_id); + +DPP_STATUS dpp_hash_tbl_clr(ZXIC_UINT32 dev_id); +#endif /* dpp_hash.h */ diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/se/dpp_hash_crc.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/se/dpp_hash_crc.h new file mode 100644 index 000000000000..e47229a6958a --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/se/dpp_hash_crc.h @@ -0,0 +1,17 @@ +#ifndef _DPP_HASH_CRC_H_ +#define _DPP_HASH_CRC_H_ + +#define MAX_CRC_WIDTH (20) + +ZXIC_UINT32 dpp_crc32_calc(ZXIC_UINT8 *pInputKey, ZXIC_UINT32 dwByteNum, + ZXIC_UINT32 dwCrcPoly); + +ZXIC_UINT16 dpp_crc16_calc(ZXIC_UINT8 *pInputKey, ZXIC_UINT32 dwByteNum, + ZXIC_UINT16 dwCrcPoly); + +ZXIC_UINT16 dpp_crc16_get_idx(ZXIC_UINT16 crc_val); + +ZXIC_UINT16 dpp_crc16_table_lookup(ZXIC_UINT8 *pInputKey, ZXIC_UINT32 dwByteNum, + ZXIC_UINT16 dwCrcPoly); + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/se/dpp_se_cfg.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/se/dpp_se_cfg.h new file mode 100644 index 000000000000..aaff57b5152b --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/table/se/dpp_se_cfg.h @@ -0,0 +1,280 @@ +/***************************************************************************** + * 版权所有 (C)2001-2015, 深圳市中兴通讯股份有限公司。 + * + * 文件名称: dpp_se_cfg.h + * 文件标识: SE配置部分头文件 + * 内容摘要: + * 其它说明: + * 当前版本: + * 作 者: ChenWei10088471 + * 完成日期: + * 当前责任人-1: + * 当前责任人-2: + * + * DEPARTMENT : ASIC_FPGA_R&D_Dept + * MANUAL_PERCENT : 100% + *****************************************************************************/ + +#ifndef _DPP_SE_CFG_H_ +#define _DPP_SE_CFG_H_ + +#include "dpp_se_api.h" + +#define DPP_WRITE_FILE_EN (0) + +#define LPM_OPTIMIZE_EZXIC_VOLUTION_TIME_SET_EN (0) + +#define SE_ITEM_WIDTH_MAX (64) /* 单条item最大宽度, 以字节为单位 */ +#define SE_ENTRY_WIDTH_MAX (64) /* 单条entry最大宽度, 以字节为单位 */ + +#define SE_RAM_WIDTH (512) + +#define IPV4_DDR_WIDTH (256) +#define IPV6_DDR_WIDTH (512) +#define IPV6_DDR_WIDTH_LR (384) /*线速宽度为384*/ + +#define ROUTE_DEFAULT_REG_NUM (8) +#define ZBLK_LAST_INDREG_ADDR (0x15) +#define ZBLK_ECC_STATU_REG_ADDR (0x11) +#define ZBLK_HASH_LIST_REG0_ADDR (0xd) +#define ZBLK_HASH_LIST_REG3_ADDR (0x10) + +#define ZCELL_ADDR_BT_START (0) +#define ZCELL_ADDR_BT_WIDTH (9) +#define ZCELL_IDX_BT_START (9) +#define ZCELL_IDX_BT_WIDTH (2) +#define ZBLK_IDX_BT_START (11) +#define ZBLK_IDX_BT_WIDTH (3) +#define ZGRP_IDX_BT_START (14) +#define ZGRP_IDX_BT_WIDTH (2) +#define REG_SRAM_FLAG_BT_START (16) +#define REG_SRAM_FLAG_BT_WIDTH (1) +#define ZBLK_WRT_MASK_BT_START (17) +#define ZBLK_WRT_MASK_BT_WIDTH (4) + +#define ZBLK_NUM_PER_ZGRP (8) + +/*片外部分*/ +#define SE_DDR_WIDTH (128) + +struct def_route_info; + +/*HASH 函数部分*/ +typedef ZXIC_UINT16 (*HASH_FUNCTION)(ZXIC_UINT8 *pkey, ZXIC_UINT32 width, + ZXIC_UINT16 arg); +typedef ZXIC_UINT32 (*HASH_FUNCTION32)(ZXIC_UINT8 *pkey, ZXIC_UINT32 width, + ZXIC_UINT32 arg); + +typedef ZXIC_UINT32 (*WR_PROCESS)(ZXIC_UINT8 *p_buff, ZXIC_UINT32 size); + +typedef enum file_type { + FILE_TYPE_REG = 0, + FILE_TYPE_RAM, + FILE_TYPE_ZBLK_CFG, + FILE_TYPE_ZCELL_CFG, + FILE_TYPE_DEF_ROUTE, + FILE_TYPE_DDR256, + FILE_TYPE_DDR512, + FILE_TYPE_V6CMP_CFG, + FILE_TYPE_V4CMP_CFG, + FILE_TYPE_DDR128, +} FILE_TYPE; + +typedef enum se_item_type { + ITEM_INVALID = 0, + ITEM_RAM, + ITEM_DDR_256, + ITEM_DDR_512, + ITEM_REG, +} SE_ITEM_TYPE; + +typedef enum se_fun_type { + FUN_HASH = 1, + FUN_LPM, + FUN_ACL, + FUN_MAX +} SE_FUN_TYPE; + +typedef struct file_info { + ZXIC_FILE *fp; + ZXIC_UINT32 f_status; /*1 open, 0 close*/ +} FILE_INFO; + +typedef struct file_mng { + ZXIC_RB_CFG rb_fn; + FILE_INFO *p_fi; +} SE_FILE_MNG; + +#define ZBLK_CFG_BASE (0x8000) +#define SERVICE_REG_ADDR (0) +#define MASK_REG_ADDR (1) +#define DEFAULT_REG_ADDR (5) +#define V6CMP_REG_ADDR (0x12) +#define V4CMP_REG_ADDR (0x13) + +#define GET_ZBLK_IDX(zcell_idx) (((zcell_idx)&0x7F) >> 2) + +#define GET_ZCELL_IDX(zcell_idx) ((zcell_idx)&0x3) + +#define DPP_SE_GET_ZBLK_CFG(p_se, zblk_idx) \ + (&(((DPP_SE_CFG *)(p_se))->zblk_info[zblk_idx])) + +#define DPP_SE_GET_ZCELL_CFG(p_se, zcell_idx) \ + (&(((DPP_SE_CFG *)(p_se)) \ + ->zblk_info[GET_ZBLK_IDX(zcell_idx)] \ + .zcell_info[GET_ZCELL_IDX(zcell_idx)])) + +#define DPP_GET_FUN_INFO(p_se, fun_id) \ + (&(((DPP_SE_CFG *)(p_se))->fun_info[fun_id])) + +#define ZBLK_CHECK_FULL(p_zblk_cfg) \ + (((((SE_ZBLK_CFG *)(p_zblk_cfg))->zcell_bm & 0xF) == 0xF) ? 1 : 0) + +#define GET_ZCELL_CRC_VAL(zcell_id, crc16_val) \ + (((crc16_val) >> (zcell_id)) & (SE_RAM_DEPTH - 1)) + +#define ZBLK_ADDR_CONV(zblk_idx) \ + (((zblk_idx) / ZBLK_NUM_PER_ZGRP) * (1 << ZBLK_IDX_BT_WIDTH) + \ + (zblk_idx) % ZBLK_NUM_PER_ZGRP) + +#define ZCELL_ADDR_CONV(zcell_idx) \ + ((ZBLK_ADDR_CONV(((zcell_idx) >> ZCELL_IDX_BT_WIDTH) & \ + ((1 << (ZBLK_IDX_BT_WIDTH + ZGRP_IDX_BT_WIDTH)) - 1)) \ + << ZCELL_IDX_BT_WIDTH) | \ + ((zcell_idx) & ((1 << ZCELL_IDX_BT_WIDTH) - 1))) + +#define ZCELL_BASE_ADDR_CALC(zcell_idx) \ + ((0xF << ZBLK_WRT_MASK_BT_START) | \ + (((ZCELL_ADDR_CONV(zcell_idx)) & \ + ((1 << (ZCELL_IDX_BT_WIDTH + ZBLK_IDX_BT_WIDTH + \ + ZGRP_IDX_BT_WIDTH)) - \ + 1)) \ + << ZCELL_ADDR_BT_WIDTH)) + +#define ZBLK_ITEM_ADDR_CALC(zcell_idx, item_idx) \ + ((ZCELL_BASE_ADDR_CALC(zcell_idx)) | ((item_idx) & (SE_RAM_DEPTH - 1))) + +#define ZBLK_REG_ADDR_CALC(zblk_idx, offset) \ + ((0xF << ZBLK_WRT_MASK_BT_START) | (0x1 << REG_SRAM_FLAG_BT_START) | \ + ((ZBLK_ADDR_CONV(zblk_idx) & 0x1F) << ZBLK_IDX_BT_START) | \ + ((offset)&0x1FF)) + +#define ZBLK_HASH_LIST_REG_ADDR_CALC(zblk_idx, reg_idx) \ + (ZBLK_REG_ADDR_CALC((zblk_idx), (0xD + (reg_idx)))) + +#define ROUTEID_CONVT_ROUTEMODE(rout_id) \ + ((rout_id & 0x01) ? DPP_ROUTE_MODE_IPV6 : DPP_ROUTE_MODE_IPV4) + +DPP_STATUS dpp_se_cfg_set(DPP_DEV_T *dev, DPP_SE_CFG *p_se_cfg); + +DPP_STATUS dpp_se_cfg_get(DPP_DEV_T *dev, DPP_SE_CFG **p_se_cfg); + +DPP_STATUS dpp_se_init(DPP_DEV_T *dev, DPP_SE_CFG *p_se_cfg); + +DPP_STATUS dpp_se_fun_init(DPP_SE_CFG *p_se_cfg, ZXIC_UINT8 id, + ZXIC_UINT32 fun_type); + +DPP_STATUS dpp_se_fun_deinit(DPP_SE_CFG *p_se_cfg, ZXIC_UINT8 id, + ZXIC_UINT32 fun_type); + +#define DPP_SE_CHECK_FUN(p_func_id, id, type) \ + do { \ + if (!(p_func_id)->is_used) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n Error[0x%x],is_used Fun_id is invalid,(p_func_id)->is_used is [%d]", \ + DPP_SE_RC_FUN_INVALID, (p_func_id)->is_used); \ + return DPP_SE_RC_FUN_INVALID; \ + } else if ((p_func_id)->fun_id != (id)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n Error[0x%x],fun_id != (id) Fun_id is invalid;p_func_id->fun_id is [%d]", \ + DPP_SE_RC_FUN_INVALID, (p_func_id)->fun_id); \ + return DPP_SE_RC_FUN_INVALID; \ + } else if (!(p_func_id)->fun_ptr) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n Error[0x%x],fun_ptr Fun_id is invalid", \ + DPP_SE_RC_FUN_INVALID); \ + return DPP_SE_RC_FUN_INVALID; \ + } else if ((p_func_id)->fun_type != (type)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n Error[0x%x],type Fun_id is invalid", \ + DPP_SE_RC_FUN_INVALID); \ + return DPP_SE_RC_FUN_INVALID; \ + } \ + } while (0) + +#define DPP_SE_CHECK_FUN_MEMORY_FREE(p_func_id, id, type, ptr) \ + do { \ + if (!(p_func_id)->is_used) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n Error[0x%x],is_used Fun_id is invalid,(p_func_id)->is_used is [%d]", \ + DPP_SE_RC_FUN_INVALID, (p_func_id)->is_used); \ + ZXIC_COMM_FREE(ptr); \ + return DPP_SE_RC_FUN_INVALID; \ + } else if ((p_func_id)->fun_id != (id)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n Error[0x%x],fun_id != (id) Fun_id is invalid;p_func_id->fun_id is [%d]", \ + DPP_SE_RC_FUN_INVALID, (p_func_id)->fun_id); \ + ZXIC_COMM_FREE(ptr); \ + return DPP_SE_RC_FUN_INVALID; \ + } else if (!(p_func_id)->fun_ptr) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n Error[0x%x],fun_ptr Fun_id is invalid", \ + DPP_SE_RC_FUN_INVALID); \ + ZXIC_COMM_FREE(ptr); \ + return DPP_SE_RC_FUN_INVALID; \ + } else if ((p_func_id)->fun_type != (type)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n Error[0x%x],type Fun_id is invalid", \ + DPP_SE_RC_FUN_INVALID); \ + ZXIC_COMM_FREE(ptr); \ + return DPP_SE_RC_FUN_INVALID; \ + } \ + } while (0) + +#define DPP_SE_CHECK_FUN_MUTEX_UNLOCK(p_func_id, id, type, mutex) \ + do { \ + if (!(p_func_id)->is_used) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n Error[0x%x],is_used Fun_id is invalid,(p_func_id)->is_used is [%d]", \ + DPP_SE_RC_FUN_INVALID, (p_func_id)->is_used); \ + zxic_comm_mutex_unlock(mutex); \ + return DPP_SE_RC_FUN_INVALID; \ + } else if ((p_func_id)->fun_id != (id)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n Error[0x%x],fun_id != (id) Fun_id is invalid;p_func_id->fun_id is [%d]", \ + DPP_SE_RC_FUN_INVALID, (p_func_id)->fun_id); \ + zxic_comm_mutex_unlock(mutex); \ + return DPP_SE_RC_FUN_INVALID; \ + } else if (!(p_func_id)->fun_ptr) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n Error[0x%x],fun_ptr Fun_id is invalid", \ + DPP_SE_RC_FUN_INVALID); \ + zxic_comm_mutex_unlock(mutex); \ + return DPP_SE_RC_FUN_INVALID; \ + } else if ((p_func_id)->fun_type != (type)) { \ + ZXIC_COMM_TRACE_ERROR( \ + "\n Error[0x%x],type Fun_id is invalid", \ + DPP_SE_RC_FUN_INVALID); \ + zxic_comm_mutex_unlock(mutex); \ + return DPP_SE_RC_FUN_INVALID; \ + } \ + } while (0) + +#define DPP_SE_HW_POS(x) (SE_RAM_WIDTH - 1 - (x)) + +#define DPP_SE_ZBLK_OUT_DDR_V6_START (0) +#define DPP_SE_ZBLK_OUT_DDR_V6_END (0) + +#define DPP_SE_ZBLK_OUT_DDR_V4_START (0) +#define DPP_SE_ZBLK_OUT_DDR_V4_END (0) + +#define DPP_SE_ZBLK_SERVICE_TYPE_START (3) +#define DPP_SE_ZBLK_SERVICE_TYPE_END (3) + +#define DPP_SE_ZBLK_HASH_CHAN_START (2) +#define DPP_SE_ZBLK_HASH_CHAN_END (1) + +#define DPP_SE_ZBLK_HW_POS_EN_START (0) +#define DPP_SE_ZBLK_HW_POS_EN_END (0) + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/tm/dpp_tm.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/tm/dpp_tm.h new file mode 100644 index 000000000000..d83b88acf2fd --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/module/tm/dpp_tm.h @@ -0,0 +1,4793 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_tm.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : djf +* 完成日期 : 2014/02/25 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ +#ifndef _DPP_ETM_H_ +#define _DPP_ETM_H_ + +#include "dpp_tm_api.h" +#include "dpp_etm_reg.h" +#if ZXIC_REAL("ETM_MACRO") +/****************************************************************************** + * START: 宏定义 * + *****************************************************************************/ +#define ETM_WRITE_CHECK (1) +#define DPP_TM_CGAVD_KILO_UL (1024) /* Kbyte和byte换算 */ +#define DPP_TM_CGAVD_TD_MAX (16 * 512) +/* CIR颗粒度: bps */ +#define DPP_TM_SHAPE_CIR_STEP \ + ((ZXIC_DOUBLE)400 * 1000 * 1000 * 1000 / 0x3FFFFFE) +//#define DPP_TM_SHAPE_CIR_STEP (160069565217 / 0x3FFFFFE) +/* EIR颗粒度: bps */ +//define DPP_TM_SHAPE_EIR_STEP ((ZXIC_FLOAT) 160 * 1000 * 1000 * 1000 / 0x3FFFFFE) +#define DPP_TM_SHAPE_EIR_STEP (160 * 1000 * 1000 * 1000 / 0x3FFFFFE) +#define DPP_TM_SHAPE_DEFAULT_CBS \ + (20) /*寄存器写入CBS的最小值,小于该值时,整形不准*/ +#define DPP_TM_KILO_UL (1024) +#define DPP_TM_KILO_ULL (1000) +#define DPP_TM_QMU_PORT_SHAP_MAG (1.03) +#define DPP_ETM_SA_EGRS_MAX_PORTID (66) +/*cfgmt_byte_mode:0:block mode 1:byte mode*/ +#define DPP_TM_CGAVD_BLOCK_MODE (0) +#define DPP_TM_CGAVD_ZXIC_UINT8_MODE (1) + +/*shap整形模板最大值:每2K流或调度器一一映射一块令牌桶资源,每块资源拥有0-127个模板*/ +/*ETM 0xABFF/2K = 21 */ +#define DPP_ETM_SHAP_TABEL_ID_MAX (22) +/*FTM 0x17FF/2K = 2 */ +#define DPP_FTM_SHAP_TABEL_ID_MAX (3) +#define DPP_TM_SHAP_MAP_ID_MAX (128) + +/****************************************************************************** + * END: 宏定义 * + *****************************************************************************/ +#endif /* ETM_MACRO */ + +#if ZXIC_REAL("ETM_STRUCT") +/****************************************************************************** + * START: 类型定义 * + *****************************************************************************/ +/* block长度模式 */ +typedef enum dpp_tm_blk_size_e { + DPP_ETM_BLK_SIZE_128_B = 0, + DPP_ETM_BLK_SIZE_256_B, + DPP_ETM_BLK_SIZE_512_B, + DPP_ETM_BLK_SIZE_1024_B, + DPP_ETM_BLK_SIZE_INVALID +} DPP_ETM_BLK_SIZE_E; + +/* 动态门限放大因子参数 */ +typedef struct dpp_tm_amplify_gene_para_t { + ZXIC_UINT32 amplify_gene[16]; +} DPP_ETM_AMPLIFY_GENE_PARA_T; + +/* 等价包长阈值 */ +typedef struct dpp_tm_equal_pkt_len_th_para_t { + ZXIC_UINT32 equal_pkt_len_th[7]; +} DPP_ETM_EQUAL_PKT_LEN_TH_PARA_T; + +/* 等价包长*/ +typedef struct dpp_tm_equal_pkt_len_para_t { + ZXIC_UINT32 equal_pkt_len[8]; +} DPP_ETM_EQUAL_PKT_LEN_PARA_T; + +/*ETM_STRUCT_STAT */ +typedef enum dpp_tm_cgavd_stat_qnum_e { + DPP_ETM_CGAVD_STAT_QNUM1 = 0, + DPP_ETM_CGAVD_STAT_QNUM2 = 1, + DPP_ETM_CGAVD_STAT_QNUM_INVALID +} DPP_ETM_CGAVD_STAT_QNUM_E; + +typedef enum dpp_tm_cgavd_stat_mode_e { + DPP_ETM_CGAVD_STAT_ALL_QUEUE = 0, + DPP_ETM_CGAVD_STAT_ONE_QUEUE = 1, + DPP_ETM_CGAVD_STAT_MODE_INVALID +} DPP_ETM_CGAVD_STAT_MODE_E; + +typedef struct dpp_tm_cgavd_stat_para_t { + DPP_ETM_CGAVD_STAT_MODE_E mode; + ZXIC_UINT32 q_id; +} DPP_ETM_CGAVD_STAT_PARA_T; + +typedef struct dpp_tm_cgavd_stat_info_t { + DPP_ETM_CGAVD_STAT_MODE_E mode; + ZXIC_UINT32 q_id; + ZXIC_UINT32 lif_in_pkt_num; + ZXIC_UINT32 enqueue_pkt_num; + ZXIC_UINT32 dequeue_pkt_num; + ZXIC_UINT32 td_drop_pkt_num; + ZXIC_UINT32 wred_drop_pkt_num; + ZXIC_UINT32 wred_dpi_pkt_num[8]; + ZXIC_UINT32 gred_drop_pkt_num; + ZXIC_UINT32 gred_dpi_pkt_num[8]; +} DPP_ETM_CGAVD_STAT_INFO_T; + +typedef struct dpp_tm_qmu_stat_info_t { + ZXIC_UINT32 fc_cnt_mode; + ZXIC_UINT32 mmu_qmu_wr_fc_cnt; + ZXIC_UINT32 mmu_qmu_rd_fc_cnt; + ZXIC_UINT32 qmu_cgavd_fc_cnt; + ZXIC_UINT32 cgavd_qmu_pkt_cnt; + ZXIC_UINT32 cgavd_qmu_pktlen_all; + ZXIC_UINT32 cgavd_qmu_drop_tap; + ZXIC_UINT32 last_drop_qnum; + ZXIC_UINT32 crdt_qmu_credit_cnt; + ZXIC_UINT32 qmu_to_qsch_report_cnt; + ZXIC_UINT32 qmu_to_cgavd_report_cnt; + ZXIC_UINT32 qmu_crdt_crs_normal_cnt; + ZXIC_UINT32 qmu_crdt_crs_off_cnt; + ZXIC_UINT32 qsch_qlist_shedule_cnt; + ZXIC_UINT32 qsch_qlist_sch_ept_cnt; + ZXIC_UINT32 qmu_to_mmu_blk_wr_cnt; + ZXIC_UINT32 qmu_to_csw_blk_rd_cnt; + ZXIC_UINT32 qmu_to_mmu_sop_wr_cnt; + ZXIC_UINT32 qmu_to_mmu_eop_wr_cnt; + ZXIC_UINT32 qmu_to_mmu_drop_wr_cnt; + ZXIC_UINT32 qmu_to_csw_sop_rd_cnt; + ZXIC_UINT32 qmu_to_csw_eop_rd_cnt; + ZXIC_UINT32 qmu_to_csw_drop_rd_cnt; + ZXIC_UINT32 mmu_to_qmu_wr_release_cnt; + ZXIC_UINT32 mmu_to_qmu_rd_release_cnt; +} DPP_ETM_QMU_STAT_INFO_T; + +typedef struct dpp_tm_qmu_spec_q_stat_info_t { + ZXIC_UINT32 observe_portfc_spec; + ZXIC_UINT32 spec_lif_portfc_count; + ZXIC_UINT32 observe_qnum_set; + ZXIC_UINT32 spec_q_pkt_received; + ZXIC_UINT32 spec_q_pkt_dropped; + ZXIC_UINT32 spec_q_pkt_scheduled; + ZXIC_UINT32 spec_q_wr_cmd_sent; + ZXIC_UINT32 spec_q_rd_cmd_sent; + ZXIC_UINT32 spec_q_pkt_enq; + ZXIC_UINT32 spec_q_pkt_deq; + ZXIC_UINT32 spec_q_crdt_uncon_received; + ZXIC_UINT32 spec_q_crdt_cong_received; + ZXIC_UINT32 spec_q_crs_normal_cnt; + ZXIC_UINT32 spec_q_crs_off_cnt; +} DPP_ETM_QMU_SPEC_Q_STAT_INFO_T; + +typedef struct dpp_tm_qmu_spec_bat_stat_info_t { + ZXIC_UINT32 observe_batch_set; + ZXIC_UINT32 spec_bat_pkt_received; + ZXIC_UINT32 spec_bat_pkt_dropped; + ZXIC_UINT32 spec_bat_blk_scheduled; + ZXIC_UINT32 spec_bat_wr_cmd_sent; + ZXIC_UINT32 spec_bat_rd_cmd_sent; + ZXIC_UINT32 spec_bat_pkt_enq; + ZXIC_UINT32 spec_bat_pkt_deq; + ZXIC_UINT32 spec_bat_crdt_uncon_received; + ZXIC_UINT32 spec_bat_crdt_cong_received; + ZXIC_UINT32 spec_bat_crs_normal_cnt; + ZXIC_UINT32 spec_bat_crs_off_cnt; +} DPP_ETM_QMU_SPEC_BAT_STAT_INFO_T; + +typedef struct qmu_port_shape_para { + ZXIC_UINT32 shape_value_amplified; + ZXIC_UINT32 token_add_num; + ZXIC_UINT32 token_gap; +} QMU_PORT_SHAPE_PARA; + +/*etm 实际使用的ddr_attach、bank_num、depth(mmu中看到的)*/ +typedef struct dpp_etm_qmu_init_para { + ZXIC_UINT32 etm_mmu_ddr_attach; + ZXIC_UINT32 etm_mmu_bank_num; + ZXIC_UINT32 etm_mmu_depth; +} DPP_ETM_QMU_INIT_PARA; + +/*ftm 实际使用的ddr_attach、bank_num、depth(mmu中看到的)*/ +typedef struct dpp_ftm_qmu_init_para { + ZXIC_UINT32 ftm_mmu_ddr_attach; + ZXIC_UINT32 ftm_mmu_bank_num; + ZXIC_UINT32 ftm_mmu_depth; +} DPP_FTM_QMU_INIT_PARA; + +typedef struct dpp_tm_crdt_spwfq_start_num_t { + ZXIC_UINT32 start_num_fq; + ZXIC_UINT32 start_num_fq2; + ZXIC_UINT32 start_num_fq4; + ZXIC_UINT32 start_num_fq8; + ZXIC_UINT32 start_num_sp; + ZXIC_UINT32 start_num_wfq; + ZXIC_UINT32 start_num_wfq2; + ZXIC_UINT32 start_num_wfq4; + ZXIC_UINT32 start_num_wfq8; +} DPP_TM_CRDT_SPWFQ_START_NUM_T; + +/****************************************************************************** + * END: 类型定义 * + *****************************************************************************/ +#endif + +#if ZXIC_REAL("ETM_FUNCTION") +/****************************************************************************** + * START: 函数声明 * + *****************************************************************************/ +/***********************************************************/ +/** 打印指定的全局数组值以及清空全局数组 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param para_x 数组index_x +* @param para_y 数组index_y +* @param clear_flag 清空shape全局数组 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark +* @see +* @author xuhb @date 2019/06/10 +************************************************************/ +DPP_STATUS dpp_tm_shape_para_array_prt(ZXIC_UINT32 dev_id, ZXIC_UINT32 para_x, + ZXIC_UINT32 para_y, + ZXIC_UINT32 clear_flag); + +DPP_STATUS dpp_tm_qmu_qlist_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 ddr_num, + ZXIC_UINT32 bank_num_para, ZXIC_UINT32 bank_vld, + ZXIC_UINT32 gene_para); + +DPP_STATUS dpp_tm_cgavd_td_th_together_wr(DPP_DEV_T *dev, ZXIC_UINT32 level, + ZXIC_UINT32 id, ZXIC_UINT32 td_th, + ZXIC_UINT32 num); + +DPP_STATUS dpp_tm_cgavd_td_th_together_get(DPP_DEV_T *dev, ZXIC_UINT32 level, + ZXIC_UINT32 id, ZXIC_UINT32 num); + +DPP_STATUS dpp_tm_cgavd_dyn_th_en_set_more(DPP_DEV_T *dev, + DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 id, ZXIC_UINT32 en, + ZXIC_UINT32 num); + +/***********************************************************/ +/** 配置基于优先级的QMU接收NPPU数据的fifo阈值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param sp 优先级0~7 +* @param th 指定优先级的fifo阈值0~511,单位为fifo条目,fifo深度512 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2016/10/18 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_move_drop_sp_th_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 sp, + ZXIC_UINT32 th); + +DPP_STATUS dpp_tm_wred_dp_line_para_wr(ZXIC_UINT32 dev_id, ZXIC_UINT32 level, + ZXIC_UINT32 wred_id, ZXIC_UINT32 dp, + ZXIC_UINT32 max_th, ZXIC_UINT32 min_th, + ZXIC_UINT32 max_p, ZXIC_UINT32 weight, + ZXIC_UINT32 q_len_th); + +DPP_STATUS dpp_tm_gred_dp_line_para_wr(ZXIC_UINT32 dev_id, ZXIC_UINT32 dp, + ZXIC_UINT32 max_th, ZXIC_UINT32 mid_th, + ZXIC_UINT32 min_th, ZXIC_UINT32 max_p, + ZXIC_UINT32 weight, + ZXIC_UINT32 q_len_th); + +DPP_STATUS dpp_tm_crdt_idle_check(DPP_DEV_T *dev); + +/***********************************************************/ +/** 获取各调度器的起始编号(etm共25K=25600,ftm共1920个) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_spwfq_start_num 调度器起始编号结构体 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/28 +************************************************************/ +DPP_STATUS +dpp_tm_crdt_wfqsp_get(DPP_DEV_T *dev, + DPP_TM_CRDT_SPWFQ_START_NUM_T *p_spwfq_start_num); + +/***********************************************************/ +/** 获取调度器挂接配置参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param se_id 调度器编号 +* @param p_se_para_tbl 调度器参数 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_se_para_get(DPP_DEV_T *dev, ZXIC_UINT32 se_id, + DPP_ETM_CRDT_SE_PARA_TBL_T *p_se_para_tbl); + +/***********************************************************/ +/** 配置QMU工作模式,2M节点或4M节点 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param mode 0-第一种工作模式2M节点,1-第二种工作模式4M节点 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/25 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_qmu_work_mode_set(DPP_DEV_T *dev, + DPP_TM_QMU_WORK_MODE_E mode); + +/***********************************************************/ +/** 读取QMU工作模式,2M节点或4M节点 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_mode 0-第一种工作模式2M节点,1-第二种工作模式4M节点 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/25 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_qmu_work_mode_get(DPP_DEV_T *dev, + DPP_TM_QMU_WORK_MODE_E *p_mode); + +/***********************************************************/ +/** 配置本地sa_id,SA模式下需要配置 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param sa_id 配置的sa_id值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/25 ftm模式下使用 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_local_sa_id_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 sa_id); + +/***********************************************************/ +/** 读取本地sa_id,SA模式下有效 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_sa_id 读取的sa_id值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/25 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_local_sa_id_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_sa_id); + +/***********************************************************/ +/** CPU设置的各级队列深度配置 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 拥塞避免支持层次号,0:队列级,1:端口级,2:系统级 +* @param q_len_use_cpu_set_en 0:选取RAM中读出的队列深度; +* @param 1:选取q_len_cpu_set值 +* @param q_len_cpu_set CPU设置的各级队列深度,单位为block。 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/06/19 +************************************************************/ +DPP_STATUS dpp_tm_q_len_use_cpu_set(DPP_DEV_T *dev, DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 q_len_use_cpu_set_en, + ZXIC_UINT32 q_len_cpu_set); + +/***********************************************************/ +/** CPU设置的各级平均队列深度配置 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 拥塞避免支持层次号,0:队列级,1:端口级,2:系统级 +* @param q_avg_len_use_cpu_set_en 0:选取RAM中读出的队列深度; +* @param 1:选取q_avg_len_cpu_set值 +* @param q_avg_len_cpu_set CPU设置的各级平均队列深度。 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/06/19 +************************************************************/ +DPP_STATUS dpp_tm_q_avg_len_use_cpu_set(DPP_DEV_T *dev, + DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 q_avg_len_use_cpu_set_en, + ZXIC_UINT32 q_avg_len_cpu_set); + +/***********************************************************/ +/** 配置QMU查询队列Qos开关 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param q_id: 队列号 +* @param qos_sign: qos开关 0:关闭 1:开启 +* @param +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/07/03 +************************************************************/ +DPP_STATUS dpp_tm_qmu_qos_sign_set(DPP_DEV_T *dev, ZXIC_UINT32 q_id, + ZXIC_UINT32 qos_sign); + +/***********************************************************/ +/** 配置授权分发使能或者关闭 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param en 配置的值,0-关闭授权分发,1-使能授权分发 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/25 +************************************************************/ +DPP_STATUS dpp_tm_crdt_credit_en_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 en); + +/***********************************************************/ +/** 读取授权分发使能或者关闭 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_en 读出的值,0-关闭授权分发,1-使能授权分发 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/25 +************************************************************/ +DPP_STATUS dpp_tm_crdt_credit_en_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_en); + +/***********************************************************/ +/** 配置授权产生间隔 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param crdt_space_choose 授权发送间隔 0:固定16个周期 1:查表 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/10 +************************************************************/ +DPP_STATUS dpp_tm_crdt_space_choose_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 crdt_space_choose); + +/***********************************************************/ +/** 获得授权产生间隔 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param crdt_space_choose 授权发送间隔 0:固定16个周期 1:查表 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/10 +************************************************************/ +DPP_STATUS dpp_tm_crdt_space_choose_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_crdt_space_choose); + +/***********************************************************/ +/** 配置端口拥塞令牌桶使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param port_id 端口号:0~63 +* @param port_en 端口拥塞令牌桶使能,1表示不使用拥塞令牌桶的授权,0表示可以使用拥塞令牌桶授权 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/11 +************************************************************/ +DPP_STATUS dpp_tm_crdt_port_congest_en_set(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + ZXIC_UINT32 port_en); + +/***********************************************************/ +/** 获得端口拥塞令牌桶使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param port_id 端口号:0~120 +* @param p_port_en 端口拥塞令牌桶使能,1表示不使用拥塞令牌桶的授权,0表示可以使用拥塞令牌桶授权 +* +* @return +* @remark 无 +* @see +* @author djf @date 2015/03/11 +************************************************************/ +DPP_STATUS dpp_tm_crdt_port_congest_en_get(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + ZXIC_UINT32 *p_port_en); + +/***********************************************************/ +/** 流级中间级补充扫描机制使能配置 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param renew_scan_flow:0为不开启补充扫描 others:开启补充扫描,扫描间隔 +* @param renew_scan_mid:0为不开启补充扫描 others:开启补充扫描,扫描间隔 +* @param +* @return +* @remark 无 +* @see +* @author yjd @date 2015/07/03 +************************************************************/ +DPP_STATUS dpp_tm_crdt_renew_scan_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 renew_scan_flow, + ZXIC_UINT32 renew_scan_mid); + +/***********************************************************/ +/** 获取流级中间级补充扫描机制使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param renew_scan_flow:0为不开启补充扫描 others:开启补充扫描,扫描间隔 +* @param renew_scan_mid:0为不开启补充扫描 others:开启补充扫描,扫描间隔 +* @param +* @return +* @remark 无 +* @see +* @author yjd @date 2015/07/03 +************************************************************/ +DPP_STATUS dpp_tm_crdt_renew_scan_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *renew_scan_flow, + ZXIC_UINT32 *renew_scan_mid); + +/***********************************************************/ +/**cpu配置flow_id的crs强制为normal或者off开关使能,用于检测SA模式下队列到授权流的多对一问题 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param flow_id:流号(和授权流号一一对应) +* en 强制配置crs的使能,0-不使能,1-使能 +* crs_value:强制配置crs的值2'b00:off; 2'b01:low; 2'b10:normal; +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/04/25 +************************************************************/ +DPP_STATUS dpp_tm_crdt_crs_sheild_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 flow_id, + ZXIC_UINT32 en, ZXIC_UINT32 crs_value); + +/***********************************************************/ +/**获取flow_id的crs强制为normal或者off开关使能,用于检测SA模式下队列到授权流的多对一问题 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param flow_id:流号(和授权流号一一对应) +* en 强制配置crs的使能,0-不使能,1-使能 +* crs_value:强制配置crs的值2'b00:off; 2'b01:low; 2'b10:normal; +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/04/25 +************************************************************/ +DPP_STATUS dpp_tm_crdt_crs_sheild_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_flow_id, ZXIC_UINT32 *p_en, + ZXIC_UINT32 *p_crs_value); + +/***********************************************************/ +/** 控制授权速率的门限 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param index 0~6 +* @param rci_grade_th_0_data +* +* @return +* @remark 无 +* @see +* @author wush @date 2017/10/17 +************************************************************/ +DPP_STATUS dpp_tm_crdt_rci_grade_th_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 index, + ZXIC_UINT32 rci_grade_th_0_data); + +DPP_STATUS dpp_tm_crdt_rci_grade_th_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 index, + ZXIC_UINT32 *p_rci_grade_th_0_data); + +/***********************************************************/ +/** 控制授权间隔的门限,建议大于等于0XF,不可取0; +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param index 0~7 +* @param asm_interval_0_data 控制授权间隔的门限,建议大于等于0XF,不可取0; +* +* @return +* @remark 无 +* @see +* @author wush @date 2017/10/17 +************************************************************/ +DPP_STATUS dpp_tm_crdt_asm_interval_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 index, + ZXIC_UINT32 asm_interval_0_data); + +DPP_STATUS dpp_tm_crdt_asm_interval_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 index, + ZXIC_UINT32 *p_asm_interval_0_data); + +/***********************************************************/ +/** rci的级别 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_rci_grade_data +* +* @return +* @remark 无 +* @see +* @author wush @date 2017/10/17 +************************************************************/ +DPP_STATUS dpp_tm_crdt_rci_grade_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_rci_grade_data); + +DPP_STATUS dpp_tm_crdt_rci_value_r_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_crdt_rci_value_r_data); + +DPP_STATUS dpp_tm_crdt_interval_now_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_crdt_interval_now_data); + +/***********************************************************/ +/** 配置crdt interval使能, +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param crdt_interval_en_cfg_data 授权分发间隔使能,1打开,0关闭 +* +* @return +* @remark 无 +* @see +* @author wush @date 2017/03/27 +************************************************************/ +DPP_STATUS +dpp_tm_crdt_interval_en_cfg_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 crdt_interval_en_cfg_data); + +/***********************************************************/ +/**配置拥塞状态(有效链路数+UCN等级)到授权产生间隔的映射表 +* @param dev_id 设备编号 +* @param valid_serdes_num 有效的链路数(0~32) +* @param ucn_level UCN等级(0~7) +* @param cr_clk 授权产生间隔(0~0x3fffff) +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2017/03/28 +************************************************************/ +DPP_STATUS dpp_tm_crdt_cfgmt_interval_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 valid_serdes_num, + ZXIC_UINT32 ucn_level, + ZXIC_UINT32 cr_clk); + +/***********************************************************/ +/** 读取crdt interval使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param crdt_interval_en_cfg_data 授权分发间隔使能,1打开,0关闭 +* +* @return +* @remark 无 +* @see +* @author wush @date 2017/03/27 +************************************************************/ +DPP_STATUS +dpp_tm_crdt_interval_en_cfg_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_crdt_interval_en_cfg_data); + +/***********************************************************/ +/** 屏蔽ucn/asm_rdy的时能信号 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param ucn_rdy_shield_en 是否屏蔽ucn_rdy信号,1屏蔽,0不屏蔽 +* @param asm_rdy_shield_en 是否屏蔽asm_rdy信号,1屏蔽,0不屏蔽 +* +* @return +* @remark 无 +* @see +* @author wush @date 2017/10/17 +************************************************************/ +DPP_STATUS dpp_tm_crdt_ucn_asm_rdy_shield_en_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 ucn_rdy_shield_en, + ZXIC_UINT32 asm_rdy_shield_en); + +DPP_STATUS +dpp_tm_crdt_ucn_asm_rdy_shield_en_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_ucn_rdy_shield_en, + ZXIC_UINT32 *p_asm_rdy_shield_en); + +/***********************************************************/ +/** 配置QMU队列授权价值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param credit_value 授权价值,默认值是400Byte +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/04 +************************************************************/ +DPP_STATUS dpp_tm_qmu_credit_value_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 credit_value); + +/***********************************************************/ +/** QMU DDR随机模式时,DDR随机组配置 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param ddr_num ddr组数,1-6组 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/04 +************************************************************/ +DPP_STATUS dpp_tm_qmu_ddr_rand_grp_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 ddr_num); + +/***********************************************************/ +/** 配置QMU DDR BANK随机模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param ddr_random 模式:0-轮询模式;1-随机模式 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/04 +************************************************************/ +DPP_STATUS dpp_tm_qmu_ddr_random_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 ddr_random); + +/***********************************************************/ +/** QMU配置完成寄存器,在QMU链表和DDR随机模式寄存器写入后,将此寄存器写1,完成QMU配置 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/04 +************************************************************/ +DPP_STATUS dpp_tm_qmu_cfg_done_set(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** 配置CRS的e桶产生的crbal门限值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param index crs组数:0~15 +* @param crs_th CRS产生的crbal门限值 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author xuhb @date 2021/02/14 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crs_eir_th_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 index, + ZXIC_UINT32 crs_th); + +/***********************************************************/ +/** 配置CRS产生的crbal门限值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param index crs组数:0~15 +* @param crs_th CRS产生的crbal门限值 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/11 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crs_th_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 index, + ZXIC_UINT32 crs_th); + +/***********************************************************/ +/** 配置CRS产生的空队列确保门限值 +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param que_type 队列类型编号(0~15) +* @param empty_que_ack_th 空队列确保授权门限 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/09 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crs_th2_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 que_type, + ZXIC_UINT32 empty_que_ack_th); + +/***********************************************************/ +/** 配置QMU端口间交织模式 +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param pkt_blk_mode 交织模式: 1-按包交织; 0-按block交织SA模式只能配置为1 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author szq @date 2015/03/25 +************************************************************/ +DPP_STATUS dpp_tm_qmu_pkt_blk_mode_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 pkt_blk_mode); + +/***********************************************************/ +/** 获取QMU端口间交织模式 +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param p_pkt_blk_mode 交织模式: 0-按包交织; 1-按block交织SA模式只能配置为1 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author szq @date 2015/03/25 +************************************************************/ +DPP_STATUS dpp_tm_qmu_pkt_blk_mode_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_pkt_blk_mode); + +/***********************************************************/ +/** 配置读命令老化使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param aged_en 读命令老化使能:0:不使能;1:使能 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/11 +************************************************************/ +DPP_STATUS dpp_tm_qmu_wr_aged_en_set(DPP_DEV_T *dev, ZXIC_UINT32 aged_en); + +/***********************************************************/ +/** 配置读命令老化速率 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param scan_time 读命令老化速率(扫描间隔时间) +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/11 +************************************************************/ +DPP_STATUS dpp_tm_qmu_wr_aged_scan_time_set(DPP_DEV_T *dev, + ZXIC_UINT32 scan_time); + +/***********************************************************/ +/** 获得读命令老化速率 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_scan_time 读命令老化速率(扫描间隔时间) +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/11 +************************************************************/ +DPP_STATUS dpp_tm_qmu_wr_aged_scan_time_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_scan_time); + +/***********************************************************/ +/** 配置QMU队列到目的SAId的映射ETM 模式没有, +FTM 模式才有1024个。 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param que_id 队列号 +* @param dest_said 目的said +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author szq @date 2015/04/08 +************************************************************/ +DPP_STATUS dpp_tm_qmu_qcfg_dest_id_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 que_id, + ZXIC_UINT32 dest_said); + +/***********************************************************/ +/** 获取QMU队列到目的SAId的映射ETM 模式没有, +FTM 模式才有1024个。 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param que_id 队列号 +* @param p_dest_said 目的said +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author szq @date 2015/04/08 +************************************************************/ +DPP_STATUS dpp_tm_qmu_qcfg_dest_id_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 que_id, + ZXIC_UINT32 *p_dest_said); + +/***********************************************************/ +/** 配置出队暂存使用的进程总数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param used_inall 出队暂存使用的进程总数=19-N,默认3表示使用16个进程 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/18 +************************************************************/ +DPP_STATUS dpp_tm_qmu_pid_use_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 used_inall); + +/***********************************************************/ +/** 获得出队暂存使用的进程总数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_used_inall 出队暂存使用的进程总数=19-N,默认3表示使用16个进程 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/23 +************************************************************/ +DPP_STATUS dpp_tm_qmu_pid_use_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_used_inall); + +/***********************************************************/ +/** 配置出队暂存自回加进程总数阈值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param round_th 出队暂存自回加进程总数阈值=19-N,默认4表示使用15个进程就自回加 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/18 +************************************************************/ +DPP_STATUS dpp_tm_qmu_pid_round_th_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 round_th); + +/***********************************************************/ +/** 获得出队暂存自回加进程总数阈值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_round_th 出队暂存自回加进程总数阈值=19-N,默认4表示使用15个进程就自回加 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/23 +************************************************************/ + +DPP_STATUS dpp_tm_qmu_pid_round_th_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_round_th); + +/***********************************************************/ +/** 配置CRS发送强制使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param en 配置的值,0-不使能,1-使能 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/04 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crs_force_en_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 en); + +/***********************************************************/ +/** 配置CRS发送强制的队列 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param q_id 队列号 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/04 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crs_force_q_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 q_id); + +/***********************************************************/ +/** 配置CRS发送强置的状态 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param crs_state CRS发送强置的状态 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/18 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crs_force_state_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 crs_state); + +/***********************************************************/ +/** 配置特定队列发送特定CRS +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param qnum 队列号 +* qcfg_qsch_crs_force_crs:CRS状态(0:off;1:normal。) +* qcfg_qsch_crs_force_en:CRS发送强置使能(0:不使能;1:使能。) +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/04 +************************************************************/ +DPP_STATUS dpp_tm_qmu_qnum_crs_force(ZXIC_UINT32 dev_id, ZXIC_UINT32 qnum, + ZXIC_UINT32 qcfg_qsch_crs_force_crs, + ZXIC_UINT32 qcfg_qsch_crs_force_en); + +/***********************************************************/ +/** 配置CRS状态 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param qnum 队列号 +* @param state +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/19 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crs_state_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 qnum, + ZXIC_UINT32 state); + +/***********************************************************/ +/** 获得CRS状态 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param qnum 队列号 +* @param p_state +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/23 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crs_state_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 qnum, + ZXIC_UINT32 *p_state); + +/***********************************************************/ +/** 配置自动授权队列范围 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param first_que 自授权起始队列号 +* @param last_que 自授权终止队列号 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/19 +************************************************************/ +DPP_STATUS dpp_tm_qmu_auto_credit_que_set(DPP_DEV_T *dev, ZXIC_UINT32 first_que, + ZXIC_UINT32 last_que); + +/***********************************************************/ +/** 获得自动授权队列范围 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_first_que 自授权起始队列号 +* @param p_last_que 自授权终止队列号 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/23 +************************************************************/ +DPP_STATUS dpp_tm_qmu_auto_credit_que_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_first_que, + ZXIC_UINT32 *p_last_que); + +/***********************************************************/ +/** 配置自动授权开启使能及扫描速率 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param auto_crdt_en 自动授权开启使能,默认关闭。0:关闭;1:开启 +* @param auto_crdt_rate 自授权速率配置 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/19 +************************************************************/ +DPP_STATUS dpp_tm_qmu_auto_credit_rate_set(DPP_DEV_T *dev, + ZXIC_UINT32 auto_crdt_en, + ZXIC_UINT32 auto_crdt_rate); + +/***********************************************************/ +/** 获得自动授权开启使能及扫描速率 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_auto_crdt_en 自动授权开启使能,默认关闭。0:关闭;1:开启 +* @param p_auto_crdt_rate 自授权速率配置 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/23 +************************************************************/ +DPP_STATUS dpp_tm_qmu_auto_credit_rate_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_auto_crdt_en, + ZXIC_UINT32 *p_auto_crdt_rate); + +/***********************************************************/ +/** 配置授权丢弃使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param all_drop_en 所有授权丢弃使能:1:允许丢弃所有授权;0:仅允许丢弃拥塞授权 +* @param drop_en 授权丢弃使能:1:允许丢弃授权;0:禁止丢弃授权 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/23 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crbal_drop_en_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 all_drop_en, + ZXIC_UINT32 drop_en); + +/***********************************************************/ +/** 获得授权丢弃使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_all_drop_en 所有授权丢弃使能:1:允许丢弃所有授权;0:仅允许丢弃拥塞授权 +* @param p_drop_en 授权丢弃使能:1:允许丢弃授权;0:禁止丢弃授权 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/23 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crbal_drop_en_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_all_drop_en, + ZXIC_UINT32 *p_drop_en); + +/***********************************************************/ +/**设置自然拥塞反压门限值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param +* @return +* @remark 无 +* @see +* @author yjd @date 2015/07/16 +************************************************************/ +DPP_STATUS dpp_tm_qmu_qcfg_csch_congest_th_set(DPP_DEV_T *dev, + ZXIC_UINT32 port_id, + ZXIC_UINT32 qmu_congest_th); + +/***********************************************************/ +/**获取自然拥塞反压门限值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param +* @return +* @remark 无 +* @see +* @author yjd @date 2015/07/16 +************************************************************/ +DPP_STATUS dpp_tm_qmu_qcfg_csch_congest_th_get(DPP_DEV_T *dev, + ZXIC_UINT32 port_id, + ZXIC_UINT32 *p_qmu_congest_th); + +/***********************************************************/ +/**设置CMD_SCH分优先级反压门限值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param +* @return +* @remark 无 +* @see +* @author yjd @date 2015/07/16 +************************************************************/ +DPP_STATUS dpp_tm_qmu_qcfg_csch_sp_fc_th_set(DPP_DEV_T *dev, + ZXIC_UINT32 port_id, + ZXIC_UINT32 q_pri, + ZXIC_UINT32 qmu_sp_fc_th); + +/***********************************************************/ +/**获取自然拥塞反压门限值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param +* @return +* @remark 无 +* @see +* @author yjd @date 2015/07/16 +************************************************************/ +DPP_STATUS dpp_tm_qmu_qcfg_csch_sp_fc_th_get(DPP_DEV_T *dev, + ZXIC_UINT32 port_id, + ZXIC_UINT32 q_pri, + ZXIC_UINT32 *p_qmu_sp_fc_th); + +/***********************************************************/ +/** 配置QMU需要检测流控的端口号 +* @param dev_id 设备号 +* @param tm_type 0-ETM,1-FTM +* @param port_id 端口号 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/05/07 +************************************************************/ +DPP_STATUS dpp_tm_qmu_observe_portfc_set(DPP_DEV_T *dev, ZXIC_UINT32 port_id); + +/***********************************************************/ +/** 配置pfc使能 +* @param dev_id 设备编号 +* @param pfc_en 配置的值,0-不使能pfc,1-使能pfc +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author lsy @date 2022/08/22 +************************************************************/ +DPP_STATUS dpp_tm_qmu_pfc_en_set(DPP_DEV_T *dev, ZXIC_UINT32 pfc_en); + +/***********************************************************/ +/** 读取pfc使能 +* @param dev_id 设备编号 +* @param pfc_en 配置的值,0-不使能pfc,1-使能pfc +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author lsy @date 2022/08/22 +************************************************************/ +DPP_STATUS dpp_tm_qmu_pfc_en_get(DPP_DEV_T *dev, ZXIC_UINT32 *pfc_en); + +/***********************************************************/ +/** 配置端口pfc掩码 +* @param dev_id 设备编号 +* @param port_id 端口号:0~63 +* @param port_en 端口掩码配置,1pfc模式下该端口接收olif的优先级反压, +* 0pfc模式下该端口不接受olif的优先级反压,并将反压信号全部置1 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author lsy @date 2022/08/22 +************************************************************/ +DPP_STATUS dpp_tm_qmu_port_pfc_make_set(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + ZXIC_UINT32 port_en); + +/***********************************************************/ +/** 获得端口pfc掩码 +* @param dev_id 设备编号 +* @param port_id 端口号:0~63 +* @param port_en 端口掩码配置,1pfc模式下该端口接收olif的优先级反压, +* 0pfc模式下该端口不接受olif的优先级反压,并将反压信号全部置1 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author lsy @date 2022/08/22 +************************************************************/ +DPP_STATUS dpp_tm_qmu_port_pfc_make_get(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + ZXIC_UINT32 *p_port_en); + +/***********************************************************/ +/** QMU初始化配置场景一:4组*2bank,MMU开启rotatjon,MMU实际分配4~7组,每组2~3bank +* @param dev_id 设备编号 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/11/17 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_1(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** QMU初始化配置场景一:4组*2bank,MMU开启rotatjon,MMU实际分配0~3组,每组2~3bank +* @param dev_id 设备编号 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/11/17 +************************************************************/ +DPP_STATUS dpp_ftm_qmu_init_set_1(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** QMU初始化配置场景一ddr4模式,开启MMU/IP rotatjon,tm看到4组*2bank,对应DDR分配4~7组,每组2~3bank************* +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/11/17 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_1(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** QMU初始化配置场景二:ddr3模式,tm看到4组*4bank,对应DDR分配4~7组,每组2~3,6~7bank +* @param dev_id 设备编号 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/11/17 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_2(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** QMU初始化配置场景二:ddr3模式,tm看到4组*4bank,对应DDR分配0~3组,每组2~3,6~7bank* +* @param dev_id 设备编号 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/11/17 +************************************************************/ +DPP_STATUS dpp_ftm_qmu_init_set_2(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** QMU初始化配置场景二:ddr3模式,tm看到4组*4bank,对应DDR分配4~7组,每组2~3,6~7bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/11/17 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_2(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** QMU初始化配置场景二:ddr3模式,tm看到4组*4bank,对应DDR分配4~7组,每组2~3,6~7bank +* depth=64,代表16k +* @param dev_id 设备编号 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/11/17 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_pd16k_2(ZXIC_UINT32 dev_id, ZXIC_UINT32 depth); + +/***********************************************************/ +/** QMU初始化配置场景二:ddr3模式,8组*8bank +* depth=64,代表16k +* @param dev_id 设备编号 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author sun @date 2023/04/12 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_chuk32(ZXIC_UINT32 dev_id, ZXIC_UINT32 depth); + +/***********************************************************/ +/** QMU初始化配置场景二:ddr3模式,tm看到4组*4bank,对应DDR分配0~3组,每组2~3,6~7bank* +* @param dev_id 设备编号 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/11/17 +************************************************************/ +DPP_STATUS dpp_ftm_qmu_init_set_pd16k_2(ZXIC_UINT32 dev_id, ZXIC_UINT32 depth); + +/***********************************************************/ +/** QMU初始化配置场景二:ddr3模式,tm看到4组*4bank,对应DDR分配4~7组,每组2~3,6~7bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/11/17 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_pd16k_2(ZXIC_UINT32 dev_id, ZXIC_UINT32 depth); + +/***********************************************************/ +/** QMU初始化配置场景:ddr3模式,8组ddr*8bank +* @param dev_id 设备编号 +* @param depth bank depth +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author sun @date 2023/04/12 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_chuk32(ZXIC_UINT32 dev_id, ZXIC_UINT32 depth); + +/***********************************************************/ +/** QMU初始化配置场景三:8组*2bank,MMU开启rotatjon,MMU实际分配0~7组,每组6~7bank +* @param dev_id 设备编号 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/11/17 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_3(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** QMU初始化配置场景三:8组*2bank,MMU开启rotatjon,MMU实际分配0~3组,每组2~3bank +* @param dev_id 设备编号 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/11/17 +************************************************************/ +DPP_STATUS dpp_ftm_qmu_init_set_3(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** QMU初始化配置场景三ETM: 8组*2bank,MMU开启rotatjon,MMU实际分配0~7组,每组6~7bank +* FTM: 8组*2bank,MMU开启rotatjon,MMU实际分配0~3组,每组2~3bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/11/17 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_3(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/**场景4:TM共享2组ddr:FTM为2、4组ddr的01bank,ETM为2、4组ddr 23bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/06/06 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_4(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/**场景4:TM共享2组ddr:FTM为2、4组ddr的01bank,ETM为2、4组ddr 23bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/06/06 +************************************************************/ +DPP_STATUS dpp_ftm_qmu_init_set_4(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/**场景4:TM共享2组ddr:FTM为2、4组ddr的01bank,ETM为2、4组ddr 23bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/06/06 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_4(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** 场景5:TM共享4组ddr:FTM为1234组ddr的01bank,ETM为1234组ddr 23bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/06/06 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_5(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** 场景5:TM共享4组ddr:FTM为1234组ddr的01bank,ETM为1234组ddr 23bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/06/06 +************************************************************/ +DPP_STATUS dpp_ftm_qmu_init_set_5(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** 场景5:TM共享4组ddr:FTM为1234组ddr的01bank,ETM为1234组ddr 23bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/06/06 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_5(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** QMU初始化配置场景6:4组*2bank,MMU实际分配ftm:4,6,7,9组(01bank); etm:4,6,7,9组(23bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/05/13 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_6(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** QMU初始化配置场景6:4组*2bank,MMU实际分配ftm:4,6,7,9组(01bank); etm:4,6,7,9组(23bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/05/13 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_6(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** QMU初始化配置场景6:4组*2bank,MMU实际分配ftm:4,6,7,9组(01bank); etm:4,6,7,9组(23bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/05/13 +************************************************************/ +DPP_STATUS dpp_ftm_qmu_init_set_6(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** QMU初始化配置场景6:4组*4bank,MMU实际分配1,2(ftm); 5,6(etm)组,每组2~3 6~7bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/11/17 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_7(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** QMU初始化配置场景6:4组*4bank,MMU实际分配5~6组,每组2~3 6~7bank +* @param dev_id 设备编号 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/11/17 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_7(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** QMU初始化配置场景6:4组*4bank,MMU实际分配1~2组,每组2~3 6~7bank +* @param dev_id 设备编号 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/11/17 +************************************************************/ +DPP_STATUS dpp_ftm_qmu_init_set_7(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** QMU初始化配置场景6:4组*4bank,MMU实际分配1,2(ftm); 5,6(etm)组,每组2~3 6~7bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/11/17 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_8(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** QMU初始化配置场景6:4组*4bank,MMU实际分配5~6组,每组2~3 6~7bank +* @param dev_id 设备编号 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/11/17 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_8(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** QMU初始化配置场景6:4组*4bank,MMU实际分配1~2组,每组2~3 6~7bank +* @param dev_id 设备编号 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/11/17 +************************************************************/ +DPP_STATUS dpp_ftm_qmu_init_set_8(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** QMU初始化配置场景9:4组*2bank,MMU实际分配ftm:6,7,8,9组(01bank); etm:6,7,8,9组(23bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/09/28 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_9(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** QMU初始化配置场景9:4组*2bank,MMU实际分配ftm:6,7,8,9组(01bank); etm:6,7,8,9组(23bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/09/28 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_9(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** QMU初始化配置场景9:4组*2bank,MMU实际分配ftm:6,7,8,9组(01bank); etm:6,7,8,9组(23bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/09/28 +************************************************************/ +DPP_STATUS dpp_ftm_qmu_init_set_9(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** QMU初始化配置场景10:独享2组*8bank,MMU实际分配1,2; etm or ftm使用0-7bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/12/16 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_10(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** QMU初始化配置场景10:独享2组*8bank,MMU实际分配1,2; etm or ftm使用0-7bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/12/16 +************************************************************/ +DPP_STATUS dpp_ftm_qmu_init_set_10(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** QMU初始化配置场景10:独享2组*8bank,MMU实际分配1,2; etm or ftm使用0-7bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/12/16 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_10(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/**场景11:TM共享2组ddr:FTM为ddr_no1、ddr_no2组ddr的01bank,ETM为ddr_no1、ddr_no2组ddr 23bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/06/06 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_11(ZXIC_UINT32 dev_id, ZXIC_UINT32 ddr_no1, + ZXIC_UINT32 ddr_no2); + +/***********************************************************/ +/**场景11:TM共享2组ddr:FTM为ddr_no1、ddr_no2组ddr的01bank,ETM为ddr_no1、ddr_no2组ddr 23bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/06/06 +************************************************************/ +DPP_STATUS dpp_ftm_qmu_init_set_11(ZXIC_UINT32 dev_id, ZXIC_UINT32 ddr_no1, + ZXIC_UINT32 ddr_no2); + +/***********************************************************/ +/**场景4:TM共享2组ddr:FTM为ddr_no1,ddr_no2组ddr的01bank,ETM为ddr_no1,ddr_no2组ddr 23bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/06/06 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_11(ZXIC_UINT32 dev_id, ZXIC_UINT32 ddr_no1, + ZXIC_UINT32 ddr_no2); + +/***********************************************************/ +/** QMU/MMU初始化配置场景12:每个tm只用1组*4bank,MMU实际分配ftm:0-9中指定组(4567bank); etm:0-9中指定组(0123bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/4/14 +************************************************************/ +DPP_STATUS dpp_ftm_qmu_init_set_12(ZXIC_UINT32 dev_id, ZXIC_UINT32 ftm_ddr_no); + +/***********************************************************/ +/** QMU/MMU初始化配置场景15:每个tm只用1组*4bank,MMU实际分配ftm:0-9中指定组(4567bank); etm:0-9中指定组(0123bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/4/14 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_12(ZXIC_UINT32 dev_id, ZXIC_UINT32 ddr_no); + +/***********************************************************/ +/** QMU初始化配置场景13:TM独享4组ddr:FTM为指定4组ddr的0~7bank, +* ETM为指定4组ddr 0~7bank +* @param dev_id 设备编号 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2016/11/17 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_13(ZXIC_UINT32 dev_id, ZXIC_UINT32 ddr_no); + +/***********************************************************/ +/** QMU初始化配置场景13:TM独享4组ddr:FTM为指定4组ddr的0~7bank, +* ETM为指定4组ddr 0~7bank +* @param dev_id 设备编号 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2016/11/17 +************************************************************/ +DPP_STATUS dpp_ftm_qmu_init_set_13(ZXIC_UINT32 dev_id, ZXIC_UINT32 ddr_no); + +/***********************************************************/ +/** QMU/MMU初始化配置场景13:TM独享4组ddr:FTM为指定4组ddr的0~7bank +* ETM为指定4组ddr的0-7bank +* MMU开启rotatjon +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2016/11/17 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_13(ZXIC_UINT32 dev_id, ZXIC_UINT32 ddr_no); + +/** QMU/MMU初始化配置场景14:每个tm只用1组*8bank,MMU实际分配ftm:0-9中指定组(0-7bank); etm:0-9中指定组(0-7bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/4/14 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_14(ZXIC_UINT32 dev_id, ZXIC_UINT32 etm_ddr_no, + ZXIC_UINT32 ftm_ddr_no); + +/** QMU/MMU初始化配置场景14:每个tm只用1组*8bank,MMU实际分配ftm:0-9中指定组(0-7bank); etm:0-9中指定组(0-7bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/4/14 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_14(ZXIC_UINT32 dev_id, ZXIC_UINT32 etm_ddr_no); + +/** QMU/MMU初始化配置场景14:每个tm只用1组*8bank,MMU实际分配ftm:0-9中指定组(0-7bank); etm:0-9中指定组(0-7bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/4/14 +************************************************************/ +DPP_STATUS dpp_ftm_qmu_init_set_14(ZXIC_UINT32 dev_id, ZXIC_UINT32 ftm_ddr_no); + +/** QMU/MMU初始化配置场景15:每个tm只用1组*4bank,MMU实际分配ftm:0-9中指定组(0123bank); etm:0-9中指定组(0123bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/4/14 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_15(ZXIC_UINT32 dev_id, ZXIC_UINT32 etm_ddr_no, + ZXIC_UINT32 ftm_ddr_no); + +/** QMU/MMU初始化配置场景15:每个tm只用1组*4bank,MMU实际分配ftm:0-9中指定组(0123bank); etm:0-9中指定组(0123bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/4/14 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_15(ZXIC_UINT32 dev_id, ZXIC_UINT32 etm_ddr_no); + +/** QMU/MMU初始化配置场景15:每个tm只用1组*4bank,MMU实际分配ftm:0-9中指定组(0123bank); etm:0-9中指定组(0123bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/4/14 +************************************************************/ +DPP_STATUS dpp_ftm_qmu_init_set_15(ZXIC_UINT32 dev_id, ZXIC_UINT32 ftm_ddr_no); + +/***********************************************************/ +/** QMU初始化配置场景16:独享2组*8bank,MMU实际分配1,2; etm or ftm使用0-7bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/12/16 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_16(ZXIC_UINT32 dev_id, ZXIC_UINT32 etm_ddr_no, + ZXIC_UINT32 ftm_ddr_no); + +/***********************************************************/ +/** QMU初始化配置场景16:独享2组*8bank,MMU实际分配1,2; etm or ftm使用0-7bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/12/16 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_16(ZXIC_UINT32 dev_id, ZXIC_UINT32 ftm_ddr_no); + +/***********************************************************/ +/** QMU初始化配置场景16:独享2组*8bank,MMU实际分配1,2; etm or ftm使用0-7bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/12/16 +************************************************************/ +DPP_STATUS dpp_ftm_qmu_init_set_16(ZXIC_UINT32 dev_id, ZXIC_UINT32 etm_ddr_no); + +/***********************************************************/ +/** 64k队列的简单挂接函数配置 +*flow0~65535 -> ses0~4095 -> pp0 +:具体为flow0~15挂接到ses0的sp0,flow16~31挂接到ses1的sp0,以此类推 +* ses到PP的挂接都是挂接于pp0的sp0 +*@param dev_id 设备编号 +* port_id端口号 +* @ +* @return +* @remark 无 +* @see +* @author cy @date 2015/06/30 +************************************************************/ +DPP_STATUS dpp_tm_crdt_sch_64k_test(ZXIC_UINT32 dev_id, ZXIC_UINT32 port_id); + +/***********************************************************/ +/** 把整数分解成(指定位长)最高有效数和(2的)指数位数的形式,data=p_remdata*2^(p_exp) +* @param data 需要转换前的数 +* @param rembitsum 余数的位数 +* @param p_remdata 余数大小 +* @param p_exp 指数大小 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/27 +************************************************************/ +DPP_STATUS dpp_tm_rem_and_exp_translate(ZXIC_UINT32 data, ZXIC_UINT32 rembitsum, + ZXIC_UINT32 *p_remdata, + ZXIC_UINT32 *p_exp); + +/***********************************************************/ +/** 配置统计计数寄存器的统计方式 +* @param dev_id 设备编号 +* @param qnum 支持2组统计寄存器,0-qnum1,1-qnum2 +* @param mode 统计方式,0-统计全部队列,1-统计指定q_id +* @param q_id mode=1时生效,统计该队列的信息 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/04 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_stat_q_set(ZXIC_UINT32 dev_id, + DPP_ETM_CGAVD_STAT_QNUM_E qnum, + DPP_ETM_CGAVD_STAT_MODE_E mode, + ZXIC_UINT32 q_id); + +/***********************************************************/ +/** 读取统计计数寄存器的计数信息 +* @param dev_id 设备编号 +* @param qnum 支持2组统计寄存器,0-qnum1,1-qnum2 +* @param p_para 获得的统计信息 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/04 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_stat_q_get(ZXIC_UINT32 dev_id, + DPP_ETM_CGAVD_STAT_QNUM_E qnum, + DPP_ETM_CGAVD_STAT_INFO_T *p_para); + +/***********************************************************/ +/** 配置等价包长 +* @param dev_id +* @param tm_type 0-ETM,1-FTM +* @param p_equal_pkt_len 等价包长 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/28 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_equal_pkt_len_para_set( + DPP_DEV_T *dev, DPP_ETM_EQUAL_PKT_LEN_PARA_T *p_equal_pkt_len); + +/***********************************************************/ +/** 读取等价包长 +* @param dev_id +* @param tm_type 0-ETM,1-FTM +* @param p_equal_pkt_len 等价包长 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/28 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_equal_pkt_len_para_get( + DPP_DEV_T *dev, DPP_ETM_EQUAL_PKT_LEN_PARA_T *p_equal_pkt_len); + +/***********************************************************/ +/** 动态门限放大因子参数配置 +* @param dev_id +* @param tm_type 0-ETM,1-FTM +* @param p_amplify_gene_para 动态门限放大因子参数 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/28 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_amplify_gene_para_set( + DPP_DEV_T *dev, DPP_ETM_AMPLIFY_GENE_PARA_T *p_amplify_gene_para); + +/***********************************************************/ +/** 动态门限放大因子参数获取 +* @param dev_id +* @param tm_type 0-ETM,1-FTM +* @param p_amplify_gene_para 动态门限放大因子参数 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/28 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_amplify_gene_para_get( + DPP_DEV_T *dev, DPP_ETM_AMPLIFY_GENE_PARA_T *p_amplify_gene_para); + +/***********************************************************/ +/** 配置等价包长阈值 +* @param dev_id +* @param tm_type 0-ETM,1-FTM +* @param p_equal_pkt_len_th 等价包长阈值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/28 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_equal_pkt_len_th_para_set( + DPP_DEV_T *dev, DPP_ETM_EQUAL_PKT_LEN_TH_PARA_T *p_equal_pkt_len_th); + +/***********************************************************/ +/** 配置QMU DDR BANK随机模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_ddr_random 模式:0-轮询模式;1-随机模式 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/04 +************************************************************/ +DPP_STATUS dpp_tm_qmu_ddr_random_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_ddr_random); + +/***********************************************************/ +/** 读取等价包长阈值 +* @param dev_id +* @param tm_type 0-ETM,1-FTM +* @param p_equal_pkt_len_th 等价包长阈值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/28 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_equal_pkt_len_th_para_get( + DPP_DEV_T *dev, DPP_ETM_EQUAL_PKT_LEN_TH_PARA_T *p_equal_pkt_len_th); + +/***********************************************************/ +/** 读取全部队列的计数信息 +* @param dev_id 设备编号* +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 使用第2组统计寄存器进行统计 +* @see +* @author yjd @date 2015/07/04 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_stat_q_all_get_diag(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** 读取某一队列统计计数寄存器的计数信息 +* @param dev_id 设备编号 +* @param q_id 统计该队列的信息 +* 使用第1组统计寄存器进行统计 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/07/04 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_stat_q_single_get_diag(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** 配置默认队列使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param en 配置的值,0-不使能默认队列,1-使能默认队列, +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2016/08/16 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_default_queue_en_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 en); + +/***********************************************************/ +/** 读取默认队列使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_en 读取的值,0-不使能默认队列,1-使能默认队列, +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2016/08/16 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_default_queue_en_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_en); + +/***********************************************************/ +/** 配置默认队列起始末尾 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param def_start_que 起始默认队列block/byte单位 +* @param def_finish_que 结束默认队列block/byte单位 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2016/08/16 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_default_queue_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 def_start_queue, + ZXIC_UINT32 def_finish_queue); + +/***********************************************************/ +/** 读取默认队列起始末尾值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_def_start_que 默认队列起始值block/byte单位 +* @param p_def_finish_que 默认队列结束值block/byte单位 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2016/08/16 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_default_queue_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_def_start_queue, + ZXIC_UINT32 *p_def_finish_queue); + +/***********************************************************/ +/** 配置协议队列使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param en 配置的值,0-不使能默认队列,1-使能默认队列, +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2016/08/16 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_protocol_queue_en_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 en); + +/***********************************************************/ +/** 读取协议队列使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_en 读取的值,0-不使能通用门限,1-使能通用门限, +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2016/08/16 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_protocol_queue_en_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_en); + +/***********************************************************/ +/** 配置协议队列起始末尾 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param protocol_start_que 起始协议队列block/byte单位 +*@param protocol_-finish_que 末尾协议队列block/byte单位 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2016/08/16 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_protocol_queue_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 protocol_start_que, + ZXIC_UINT32 protocol_finish_que); + +/***********************************************************/ +/** 读取协议队列起始末尾值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_protocol_start_que 协议认队列起始值block/byte单位 +* @param p_protocol_finish_que 协议认队列末尾值block/byte单位 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2016/08/16 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_protocol_queue_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_protocol_start_que, + ZXIC_UINT32 *p_protocol_finish_que); + +/***********************************************************/ +/** 配置QMU需要统计的队列组 +* @param dev_id 设备号 +* @param tm_type 0-ETM,1-FTM +* @param batch_id 队列组 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/05/07 +************************************************************/ +DPP_STATUS dpp_tm_qmu_observe_batch_set(DPP_DEV_T *dev, ZXIC_UINT32 batch_id); + +/***********************************************************/ +/** 配置QMU需要统计的队列号 +* @param dev_id 设备号 +* @param tm_type 0-ETM,1-FTM +* @param q_id 队列号 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/05/07 +************************************************************/ +DPP_STATUS dpp_tm_qmu_observe_qnum_set(DPP_DEV_T *dev, ZXIC_UINT32 q_id); + +/***********************************************************/ +/** 配置授权盈余初始化值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param crbal_initial_value 授权盈余初始化值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/04 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crbal_initial_value_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 crbal_initial_value); + +/***********************************************************/ +/** 配置等价包长使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param en 使能标记,0-不使能,1-使能 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/28 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_equal_pkt_len_en_set(DPP_DEV_T *dev, ZXIC_UINT32 en); + +/***********************************************************/ +/** 读取等价包长使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_en 使能标记,0-不使能,1-使能 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/28 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_equal_pkt_len_en_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_en); + +/***********************************************************/ +/** 读取指定队列获得授权个数(只打印授权非零的队列号) +* @param dev_id 设备编号 +* @param ackflow_start 授权起始流号 +* @param ackflow_end 授权终止流号 +* @param sleep_time_ms 等待时间 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2017/01/19 +************************************************************/ +DPP_STATUS dpp_etm_crdt_traffic_diag(ZXIC_UINT32 dev_id, + ZXIC_UINT32 ackflow_start, + ZXIC_UINT32 ackflow_end, + ZXIC_UINT32 sleep_time_ms); + +/***********************************************************/ +/** 读取指定队列获得授权个数(只打印授权非零的队列号) +* @param dev_id 设备编号 +* @param ackflow_start 授权起始流号 +* @param ackflow_end 授权终止流号 +* @param sleep_time_ms 等待时间 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2017/01/19 +************************************************************/ +DPP_STATUS dpp_ftm_crdt_traffic_diag(ZXIC_UINT32 dev_id, + ZXIC_UINT32 ackflow_start, + ZXIC_UINT32 ackflow_end, + ZXIC_UINT32 sleep_time_ms); + +/***********************************************************/ +/** 读取指定队列获得授权个数(只打印授权非零的队列号) +* @param dev_id 设备编号 +* @param tm_type 0-ETM, 1FTM +* @param ackflow_start 授权流起始 +* @param ackflow_end 授权流终止 +* @param sleep_time_ms 等待时间 +* +* @return +* @remark 无 +* @see +* @author wush @date 2017/01/24 +************************************************************/ +DPP_STATUS diag_dpp_tm_crdt_traffic_diag(ZXIC_UINT32 dev_id, + ZXIC_UINT32 ackflow_start, + ZXIC_UINT32 ackflow_end, + ZXIC_UINT32 sleep_time_ms); + +/***********************************************************/ +/** +* @param dev_id +* @param tm_type +* @param i_or_e_sel +* @param port_or_dest_id_sel +* @param start_id +* @param start_port_dest_id +* @param num +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xjw @date 2018/02/01 +************************************************************/ +DPP_STATUS dpp_tm_olif_stat_set_mul(ZXIC_UINT32 dev_id, ZXIC_UINT32 i_or_e_sel, + ZXIC_UINT32 port_or_dest_id_sel, + ZXIC_UINT32 start_id, + ZXIC_UINT32 start_port_dest_id, + ZXIC_UINT32 num); + +/****************************************************************************** + * END: 函数声明 * + *****************************************************************************/ +#endif + +#if ZXIC_REAL("ETM_STAT") + +/***********************************************************/ +/** 打印dpp tm所有模块中断状态 +* @param tm_type 0-ETM,1-FTM +* @param dev_id 设备编号 +* @param +* @param +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/07/09 +************************************************************/ +DPP_STATUS diag_dpp_tm_int(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** 获取tm.c中qmu_init_set中配置的case_num +* @param tm_type 0-ETM,1-FTM +* @param dev_id 设备编号 +* @param +* @param +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/04/15 +************************************************************/ +DPP_STATUS dpp_tm_case_no_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *case_no); + +/***********************************************************/ +/** 配置dpp tm所有模块中断屏蔽 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param int_mask_flag 0:不屏蔽 1:屏蔽 +* @param +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/07/09 +************************************************************/ +DPP_STATUS dpp_tm_int_mask_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 int_mask_flag); + +/***********************************************************/ +/** 读取基于优先级的QMU接收NPPU数据的fifo阈值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param sp 优先级0~7 +* @param p_th 指定优先级的fifo阈值0~511,单位为fifo条目,fifo深度512 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2016/10/18 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_move_drop_sp_th_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 sp, + ZXIC_UINT32 *p_th); + +/***********************************************************/ +/** 打印cgavd模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_mode 0:block mode 1:byte mode +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/07/29 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_cfg_mode_get_diag(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** 打印cgavd TD阈值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 拥塞避免支持层次号,0:队列级,1:端口级,2:系统级 +* @param id 队列号或端口号,系统级时,id参数无效 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/08/01 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_td_byte_block_th_get_diag(ZXIC_UINT32 dev_id, + DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 id); + +/***********************************************************/ +/** QMU MMU 配置清除 +* @param dev_id +* @param tm_type +* +* @return +* @remark 无 +* @see +* @author XXX @date 2020/04/13 +************************************************************/ +DPP_STATUS dpp_tm_qmu_mmu_cfg_clr(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** 读取QMU所有队列的统计信息 +* @param dev_id 设备编号 +* @param p_para 获得的统计信息 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/04 +************************************************************/ +DPP_STATUS dpp_tm_qmu_stat_get(ZXIC_UINT32 dev_id, + DPP_ETM_QMU_STAT_INFO_T *p_para); + +/***********************************************************/ +/** 读取QMU指定队列的计数信息 +* @param dev_id 设备编号 +* @param p_para 获得的统计信息 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/04 +************************************************************/ +DPP_STATUS dpp_tm_qmu_spec_q_stat_get(ZXIC_UINT32 dev_id, + DPP_ETM_QMU_SPEC_Q_STAT_INFO_T *p_para); + +/***********************************************************/ +/** 读取QMU指定队列组的计数信息 +* @param dev_id 设备编号 +* @param p_para 获得的统计信息 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/04 +************************************************************/ +DPP_STATUS +dpp_tm_qmu_spec_bat_stat_get(ZXIC_UINT32 dev_id, + DPP_ETM_QMU_SPEC_BAT_STAT_INFO_T *p_para); + +/***********************************************************/ +/** 配置QMU流控计数模式 +* @param dev_id 设备号 +* @param tm_type 0-ETM,1-FTM +* @param mode 流控模式,0-电平流控;1-边沿流控 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/05/07 +************************************************************/ +DPP_STATUS dpp_tm_qmu_fc_cnt_mode_set(DPP_DEV_T *dev, ZXIC_UINT32 mode); + +/***********************************************************/ +/** 读取QMU流控计数模式 +* @param dev_id 设备号 +* @param tm_type 0-ETM,1-FTM +* @param p_mode 流控模式,0-电平流控;1-边沿流控 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/05/07 +************************************************************/ +DPP_STATUS dpp_tm_qmu_fc_cnt_mode_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_mode); + +/***********************************************************/ +/** 打印block长度 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/07/20 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_blk_size_get_diag(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** 打印队列空标志查询 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param qnum 配置的队列号 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author cy @date 2016/06/20 +************************************************************/ +DPP_STATUS dpp_tm_qlist_ept_flag_get_diag(ZXIC_UINT32 dev_id, ZXIC_UINT32 qnum); + +/***********************************************************/ +/** 获得队列空标志查询 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param qnum 配置的队列号 +* @param p_value 队列空标志查询 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author cy @date 2016/06/20 +************************************************************/ +DPP_STATUS dpp_tm_qlist_ept_flag_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 qnum, + ZXIC_UINT32 *p_value); + +/***********************************************************/ +/** 获得队列深度计数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param qnum 配置的队列号 +* @param p_value 队列深度计数 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author cy @date 2016/06/20 +************************************************************/ +DPP_STATUS dpp_tm_qlist_r_bcnt_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 qnum, + ZXIC_UINT32 *p_value); + +/***********************************************************/ +/** 打印队列深度计数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param qnum 配置的队列号 +* @param p_value 队列深度计数 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author cy @date 2016/06/20 +************************************************************/ +DPP_STATUS dpp_tm_qlist_r_bcnt_get_diag(ZXIC_UINT32 dev_id, ZXIC_UINT32 qnum); + +/***********************************************************/ +/** CMDSCH中分端口分优先级的BLOCK计数 +* @param dev_id 设备编号 +* @param pri +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author sun @date 2023/09/19 +************************************************************/ +DPP_STATUS dpp_tm_csch_r_block_cnt_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 pri, + ZXIC_UINT32 *p_value); + +/***********************************************************/ +/** 打印CMDSCH中分端口分优先级的BLOCK计数 +* @param dev_id 设备编号 +* @param port +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author sun @date 2023/09/19 +************************************************************/ +DPP_STATUS dpp_tm_csch_r_block_cnt_diag(ZXIC_UINT32 dev_id, ZXIC_UINT32 port); + +/***********************************************************/ +/** 打印队列入链状态 +* @param dev_id 设备编号 +* @param flow_id +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author sun @date 2023/09/19 +************************************************************/ +DPP_STATUS dpp_tm_crdt_flow_link_state_get_diag(ZXIC_UINT32 dev_id, + ZXIC_UINT32 flow_id); + +/***********************************************************/ +/** 打印调度器入链状态 +* @param dev_id 设备编号 +* @param flow_id +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author sun @date 2023/09/19 +************************************************************/ +DPP_STATUS dpp_tm_crdt_se_link_state_get_diag(ZXIC_UINT32 dev_id, + ZXIC_UINT32 se_id); + +/***********************************************************/ +/** 打印olif的fifo是否空状态 +* @param dev_id 设备编号 +* @param +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author sun @date 2023/09/19 +************************************************************/ +DPP_STATUS dpp_tm_olif_fifo_empty_state_get_diag(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** 授权个数统计寄存器清零 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author taq @date 2015/06/19 +************************************************************/ +DPP_STATUS dpp_tm_crdt_clr_diag(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/**cpu配置flow_id的crs强制为normal或者off开关使能,用于检测SA模式下队列到授权流的多对一问题 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param flow_id:流号(和授权流号一一对应) +* en 强制配置crs的使能,0-不使能,1-使能 +* crs_value:强制配置crs的值2'b00:off; 2'b01:low; 2'b10:normal; +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/04/25 +************************************************************/ +DPP_STATUS dpp_tm_crdt_crs_sheild_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 flow_id, + ZXIC_UINT32 en, ZXIC_UINT32 crs_value); + +/***********************************************************/ +/**获取flow_id的crs强制为normal或者off开关使能,用于检测SA模式下队列到授权流的多对一问题 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param flow_id:流号(和授权流号一一对应) +* en 强制配置crs的使能,0-不使能,1-使能 +* crs_value:强制配置crs的值2'b00:off; 2'b01:low; 2'b10:normal; +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/04/25 +************************************************************/ +DPP_STATUS dpp_tm_crdt_crs_sheild_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_flow_id, ZXIC_UINT32 *p_en, + ZXIC_UINT32 *p_crs_value); + +/***********************************************************/ +/** 获得拥塞状态(有效链路数+UCN等级)到授权产生间隔的映射表 +* @param dev_id 设备编号 +* @param valid_serdes_num 有效的链路数(0~32 +* @param ucn_level UCN等级(0~7) +* @param p_cr_clk UCN等级(0~7) +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2017/03/28 +************************************************************/ +DPP_STATUS dpp_tm_crdt_cfgmt_interval_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 valid_serdes_num, + ZXIC_UINT32 ucn_level, + ZXIC_UINT32 *p_cr_clk); + +/***********************************************************/ +/**每隔10s获取crs状态的个数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param +* @return +* @remark 无 +* @see +* @author zmy @date 2015/08/07 +************************************************************/ +DPP_STATUS dpp_tm_crs_statics(ZXIC_UINT32 dev_id, ZXIC_UINT32 que_id); + +/***********************************************************/ +/** 统计QMU发送和CRDT模块指定授权流接收的CRS计数(10s内) +* @param dev_id 设备编号 +* @param que_id QMU队列号 +* @param ackflow_id 授权流号 +* @param valid_flag 0:队列发送和授权流接收都统计; 1:只关注队列发送,2:只关注授权流接收。 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2017/05/12 +************************************************************/ +DPP_STATUS dpp_tm_crs_cnt_prt(ZXIC_UINT32 dev_id, ZXIC_UINT32 que_id, + ZXIC_UINT32 ackflow_id, ZXIC_UINT32 valid_flag); + +/***********************************************************/ +/** 带停流的统计QMU发送和CRDT模块指定授权流接收的CRS计数 +* @param dev_id 设备编号 +* @param que_id QMU队列号 +* @param ackflow_id 授权流号 +* @param valid_flag 0:默认队列发送和授权流接收都统计,此时队列授权都在本板; +* 1:只关注队列发送,2:只关注授权流接收,需要与源端队列停流配合使用, +* 先停流,运行该函数;或者直接不停流得到的是某段时间的计数。 +* @param sleep_time 统计多长时间内的crs计数 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2017/05/12 +************************************************************/ +DPP_STATUS dpp_tm_crs_cnt_prt_1(ZXIC_UINT32 dev_id, ZXIC_UINT32 que_id, + ZXIC_UINT32 ackflow_id, ZXIC_UINT32 valid_flag, + ZXIC_UINT32 sleep_time); + +/***********************************************************/ +/** 读取qlist入队及出队状态监控 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2015/08/26 +************************************************************/ +DPP_STATUS dpp_tm_qmu_qlist_state_query(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** 打印被指定统计的第0~15个端口消耗的令牌个数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM + +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 统计时间为2s,其中c桶统计1s,e桶统计1s +* @see +* @author whuashan @date 2019/03/15 +************************************************************/ +DPP_STATUS dpp_tm_shape_token_dec_cnt_diag(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** 打印被指定统计的第0~15个端口接收的令牌个数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 统计时间为2s,其中c桶统计1s,e桶统计1s +* @see +* @author whuashan @date 2019/03/15 +************************************************************/ +DPP_STATUS dpp_tm_shape_token_dist_cnt_diag(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** 配置olif统计组信息 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param id olif统计组号 +* @param all_or_by_port 0-统计所有,1-统计某一端口或某一dest_id +* @param i_or_e_sel 10-统计片外,01-统计片内,其他值-统计所有 +* @param port_or_dest_id_sel 0-统计port,1-统计dest_id +* @param port_dest_id port号或dest_id号 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/04/21 +************************************************************/ +DPP_STATUS dpp_tm_olif_stat_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 id, + ZXIC_UINT32 all_or_by_port, + ZXIC_UINT32 i_or_e_sel, + ZXIC_UINT32 port_or_dest_id_sel, + ZXIC_UINT32 port_dest_id); + +/***********************************************************/ +/** 查看CRDT接收到和发送的某端口拥塞授权总数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param pp_id 0~63 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/12/08 +************************************************************/ +DPP_STATUS diag_dpp_tm_crdt_port_congest_credit_cnt(ZXIC_UINT32 dev_id, + ZXIC_UINT32 pp_id); + +#endif /*ETM_STAT */ + +#if ZXIC_REAL("TM_REG") +/***********************************************************/ +/** 写TM寄存器 +* @param module_id 区分TM子模块 +* @param addr 基于子模块的地址 +* @param data 写入的数据 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/07/26 +************************************************************/ +DPP_STATUS dpp_tm_wr_reg(ZXIC_UINT32 dev_id, ZXIC_UINT32 module_id, + ZXIC_UINT32 addr, ZXIC_UINT32 data); + +/***********************************************************/ +/** 读TM寄存器 +* @param module_id 区分TM子模块 +* @param addr 基于子模块的地址 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/07/26 +************************************************************/ +DPP_STATUS dpp_tm_rd_reg(ZXIC_UINT32 dev_id, ZXIC_UINT32 module_id, + ZXIC_UINT32 addr); + +/***********************************************************/ +/** 读一片连续的TM寄存器 +* @param module_id 区分TM子模块 +* @param first_addr 起始寄存器的地址 +* @param reg_num 总共读取的寄存器数 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2017/07/26 +************************************************************/ +DPP_STATUS dpp_tm_rd_more_reg(ZXIC_UINT32 dev_id, ZXIC_UINT32 module_id, + ZXIC_UINT32 first_addr, ZXIC_UINT32 reg_num); + +/***********************************************************/ +/** 写tm模块二层间接寄存器(仅crdt/shap模块使用) +* @param module_id 区分TM子模块 +* @param addr 基于子模块的地址 +* @param data 写入的数据 +* +* @return +* @remark 无 +* @see +* @author whuashan @date 2019/02/25 +************************************************************/ +DPP_STATUS dpp_tm_ind_wr_reg(ZXIC_UINT32 dev_id, ZXIC_UINT32 module_id, + ZXIC_UINT32 addr, ZXIC_UINT64 data); + +/***********************************************************/ +/** 读tm模块二层间接寄存器(仅crdt/shap模块使用) +* @param module_id 区分TM子模块 +* @param addr 基于子模块的地址 +* +* @return +* @remark 无 +* @see +* @author whuashan @date 2019/02/25 +************************************************************/ +DPP_STATUS dpp_tm_ind_rd_reg(ZXIC_UINT32 dev_id, ZXIC_UINT32 module_id, + ZXIC_UINT32 addr); +#endif /*TM_REG */ + +#if ZXIC_REAL("TM_CFGMT") +/***********************************************************/ +/** 校验子系统初始化就绪,所有子系统均初始化就绪,p_rdy值为1 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_rdy 初始化就绪标记,1-就绪,0-未就绪 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/04/09 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_subsystem_rdy_check(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** cpu读写通道验证,其读出值等于写入值。读出值不等于写入值时,返回err +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/04/09 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_cpu_check(DPP_DEV_T *dev); + +/***********************************************************/ +/** 读取内置TM的工作模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_mode 读取的值,0-TM模式,1-SA模式 +*ETM仅工作在TM模式,FTM可以工作TM或SA模式 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/25 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_sa_work_mode_get(DPP_DEV_T *dev, + DPP_TM_WORK_MODE_E *p_mode); + +/***********************************************************/ +/** 配置ddr3挂接组数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param ddr_num ddr组数,1-6组 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/04/09 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_ddr_attach_set(DPP_DEV_T *dev, ZXIC_UINT32 ddr_num); + +/***********************************************************/ +/** 读取ddr3挂接组数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_ddr_num ddr组数,1-6组 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/04/09 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_ddr_attach_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_ddr_num); + +DPP_STATUS dpp_qmu_init_info(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** 配置包存储的CRC功能是否使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param en 配置的值,0-禁止CRC功能,1-允许CRC功能 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/25 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_crc_en_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 en); + +/***********************************************************/ +/** 配置qmu端口转换使能,SA模式下需要配置 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param en 配置的使能值,0-不使能,1-使能 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/25 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_port_transfer_en_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 en); + +/***********************************************************/ +/** 读取qmu端口转换使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_en 读取的使能值,0-不使能,1-使能 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/25 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_port_transfer_en_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_en); + +/***********************************************************/ +/** 读取包存储的CRC功能是否使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_en 读取的值,0-禁止CRC功能,1-允许CRC功能 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/25 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_crc_en_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_en); + +/***********************************************************/ +/** 配置block长度模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param size block长度模式:256/512/1024 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/04/09 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_blk_size_set(DPP_DEV_T *dev, ZXIC_UINT32 size); + +/***********************************************************/ +/** 配置计数模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_mode 计数模式 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/04/09 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_cnt_mode_set(ZXIC_UINT32 dev_id, + DPP_TM_CNT_MODE_T *p_mode); + +/***********************************************************/ +/** 读取计数模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_mode 计数模式 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/04/09 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_cnt_mode_get(ZXIC_UINT32 dev_id, + DPP_TM_CNT_MODE_T *p_mode); + +/***********************************************************/ +/** 配置中断屏蔽 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_para 中断屏蔽 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/04/09 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_int_mask_set(ZXIC_UINT32 dev_id, DPP_TM_INT_T *p_para); + +/***********************************************************/ +/** 读取中断屏蔽 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_para 中断屏蔽 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/04/09 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_int_mask_get(ZXIC_UINT32 dev_id, DPP_TM_INT_T *p_para); + +/***********************************************************/ +/** 读取中断状态 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_para 中断状态 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/04/09 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_int_state_get(ZXIC_UINT32 dev_id, DPP_TM_INT_T *p_para); + +/***********************************************************/ +/** 配置tm时钟门控是否使能 +* @param dev_id 设备编号 +* @param en 配置的值,0-禁止tm时钟门控,1-使能tm时钟门控 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author lsy @date 2022/08/22 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_clkgate_en_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 en); + +/***********************************************************/ +/** 读取tm时钟门控是否使能 +* @param dev_id 设备编号 +* @param en 配置的值,0-禁止tm时钟门控,1-使能tm时钟门控 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author lsy @date 2022/08/22 + +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_clkgate_en_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_en); + +/***********************************************************/ +/** 配置tm软复位是否使能 +* @param dev_id 设备编号 +* @param en 配置的值,0-禁止tm软复位,1-使能tm软复位 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author lsy @date 2022/08/22 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_softrst_en_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 en); + +/***********************************************************/ +/** 读取tm软复位是否使能 +* @param dev_id 设备编号 +* @param en 配置的值,0-禁止tm软复位,1-使能tm软复位 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author lsy @date 2022/08/22 + +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_softrst_en_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_en); + +#endif /*TM_CFGMT */ + +#if ZXIC_REAL("TM_CGAVD") +/***********************************************************/ +/** 读取各级搬移功能使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 要配置的拥塞避免层次号,0:队列级,1:端口级,2:系统级 +* @param p_en 读出的使能标记,0-不使能,1-使能 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2016/10/18 +************************************************************/ +#ifdef ETM_REAL +DPP_STATUS dpp_tm_cgavd_move_en_get(ZXIC_UINT32 dev_id, + DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 *p_en); + +/***********************************************************/ +/** 配置各级搬移门限 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 拥塞避免支持层次号,0:队列级,1:端口级,2:系统级 +* @param id 队列号或端口号,系统级时,id参数无效 +* @param value 端口级和系统级时,为搬移门限值,单位为NPPU存包的单位,256B; +* 流级时为搬移profile_id,0~15 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2016/10/18 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_move_th_set(ZXIC_UINT32 dev_id, + DPP_TM_CGAVD_LEVEL_E level, ZXIC_UINT32 id, + ZXIC_UINT32 value); + +/***********************************************************/ +/** 读取各级搬移门限 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 拥塞避免支持层次号,0:队列级,1:端口级,2:系统级 +* @param id 队列号或端口号,系统级时,id参数无效 +* @param p_value 端口级和系统级时,为搬移门限值,单位为NPPU存包的单位,256B; +* 流级时为搬移profile_id,0~15 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2016/10/18 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_move_th_get(ZXIC_UINT32 dev_id, + DPP_TM_CGAVD_LEVEL_E level, ZXIC_UINT32 id, + ZXIC_UINT32 *p_value); + +/***********************************************************/ +/** 读取flow级的搬移策略 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param move_profile flow级的搬移门限分组索引,0~15 +* @param p_th flow级的搬移门限,单位为KB; +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2016/10/18 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_flow_move_profile_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 move_profile, + ZXIC_UINT32 *p_th); +#endif +/***********************************************************/ +/** 读取端口共享的搬移门限 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_th 端口共享的搬移门限,单位为NPPU存包的单位,256B; +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2016/10/18 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_port_share_th_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_th); + +/***********************************************************/ +/** 读取各级拥塞避免功能使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 要读取的拥塞避免层次号,0:队列级,1:端口级,2:系统级 +* @param p_en 读出的使能标记,0-不使能,1-使能 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/28 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_en_get(DPP_DEV_T *dev, DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 *p_en); + +/***********************************************************/ +/** dp选取来源 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 要配置的拥塞避免层次号,0:队列级,1:端口级,2:系统级 +* @param dp_sel dp选取来源,0-dp,1-tc,2-pkt_len[2:0] +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2017/03/14 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_dp_sel_set(DPP_DEV_T *dev, DPP_TM_CGAVD_LEVEL_E level, + DPP_TM_CGAVD_DP_SEL_E dp_sel); + +/***********************************************************/ +/** 读取拥塞避免算法 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 拥塞避免支持层次号,0:队列级,1:端口级,2:系统级 +* @param id 队列号或端口号,系统级时,id参数无效 +* @param p_method 配置的拥塞避免算法,0:TD,1:WRED/GRED +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/17 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_method_get(DPP_DEV_T *dev, DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 id, + DPP_TM_CGAVD_METHOD_E *p_method); + +/***********************************************************/ +/** 流队列级队列深度的获取 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param que_id 队列号 +* p_len 队列深度以KB为单位 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/06/19 +************************************************************/ +DPP_STATUS dpp_tm_flow_que_len_get(DPP_DEV_T *dev, ZXIC_UINT32 que_id, + ZXIC_UINT32 *p_len); + +/***********************************************************/ +/** 端口级队列深度的获取 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param pp_id 队列号 +* pp_len 队列深度以KB为单位 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/06/19 +************************************************************/ +DPP_STATUS dpp_tm_port_que_len_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 pp_id, + ZXIC_UINT32 *pp_len); + +/***********************************************************/ +/** 系统级队列深度的获取 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param sys_len 系统级深度以block为单位 +* sys_protocol_len 系统级包含协议队列深度以KB为单位 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/06/19 +************************************************************/ +DPP_STATUS dpp_tm_sys_que_len_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *sys_len, + ZXIC_UINT32 *sys_protocol_len); + +/***********************************************************/ +/** 读取TD拥塞避免模式下的丢弃门限值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 拥塞避免支持层次号,0:队列级,1:端口级,2:系统级 +* @param id 队列号或端口号,系统级时,id参数无效 +* @param p_td_th 配置的丢弃门限值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/17 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_td_th_get(DPP_DEV_T *dev, DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 id, ZXIC_UINT32 *p_td_th); + +/***********************************************************/ +/** 读取指定端口或队列绑定的WRED GROUP ID +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level WRED支持层次号,0:队列级,1:端口级 +* @param id 队列号或端口号 +* @param p_wred_id 配置的WRED GROUP ID +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/17 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_wred_id_get(ZXIC_UINT32 dev_id, + DPP_TM_CGAVD_LEVEL_E level, ZXIC_UINT32 id, + ZXIC_UINT32 *p_wred_id); + +/***********************************************************/ +/** 读取WRED丢弃曲线对应的参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level WRED支持层次号,0:队列级,1:端口级 +* @param wred_id 队列级共支持16个WRED组0-15,端口级支持8组0-7 +* @param dp 共支持8个dp,取值0-7 +* @param p_para 配置的WRED组参数值,包含以下五个参数 +* max_th 平均队列深度上限阈值 +* min_th 平均队列深度下限阈值 +* max_p 最大丢弃概率 +* weight 平均队列深度计算权重 +* q_len_th 队列深度阈值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author taq @date 2015/04/20 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_wred_dp_line_para_get( + ZXIC_UINT32 dev_id, DPP_TM_CGAVD_LEVEL_E level, ZXIC_UINT32 wred_id, + ZXIC_UINT32 dp, DPP_TM_WRED_DP_LINE_PARA_T *p_para); + +/***********************************************************/ +/** 配置系统级GRED丢弃曲线对应的参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param dp 共支持8个dp,取值0-7 +* @param p_para 配置的GRED丢弃曲线参数值,包含以下六个参数 + max_th 平均队列深度上限阈值 + mid_th 平均队列深度中间阈值 + min_th 平均队列深度下限阈值 + max_p 最大丢弃概率 + weight 平均队列深度计算权重 + q_len_th 队列深度阈值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author taq @date 2015/04/20 +************************************************************/ +DPP_STATUS +dpp_tm_cgavd_gred_dp_line_para_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 dp, + DPP_TM_GRED_DP_LINE_PARA_T *p_para); + +/***********************************************************/ +/** 配置系统级阶梯TD 丢弃曲线对应的参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param dp 共支持8个dp,取值0-7 +* @param td_th TD 门限 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/08/02 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_ladtd_dp_line_para_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 dp, + ZXIC_UINT32 td_th); + +/***********************************************************/ +/** 读取系统级GRED丢弃曲线对应的参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param dp 共支持8个dp,取值0-7 +* @param p_para 配置的GRED丢弃曲线参数值,包含以下六个参数 +* max_th 平均队列深度上限阈值 +* mid_th 平均队列深度中间阈值 +* min_th 平均队列深度下限阈值 +* max_p 最大丢弃概率 +* weight 平均队列深度计算权重 +* q_len_th 队列深度阈值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author taq @date 2015/04/20 +************************************************************/ +DPP_STATUS +dpp_tm_cgavd_gred_dp_line_para_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 dp, + DPP_TM_GRED_DP_LINE_PARA_T *p_para); + +/***********************************************************/ +/** 读取指定端口或队列是否支持动态门限机制 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level WRED支持层次号,0:队列级,1:端口级 +* @param id 队列号或端口号 +* @param p_en 读取的值,0-不支持动态门限机制,1-支持动态门限机制 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/17 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_dyn_th_en_get(DPP_DEV_T *dev, + DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 id, ZXIC_UINT32 *p_en); + +/***********************************************************/ +/** 配置通用门限使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param en 配置的值,0-不使能通用门限,1-使能通用门限, +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/17 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_uniform_th_en_set(DPP_DEV_T *dev, ZXIC_UINT32 en); + +/***********************************************************/ +/** 读取通用门限使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_en 读取的值,0-不使能通用门限,1-使能通用门限, +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/17 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_uniform_th_en_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_en); + +/***********************************************************/ +/** 配置通用门限 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param th 通用门限值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/17 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_uniform_th_set(DPP_DEV_T *dev, ZXIC_UINT32 th); + +/***********************************************************/ +/** 读取通用门限 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_th 通用门限值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/17 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_uniform_th_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_th); + +/***********************************************************/ +/** 配置流队列所属优先级 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param q_id 队列号 +* @param pri 配置的优先级,0~7 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/17 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_q_pri_set(DPP_DEV_T *dev, ZXIC_UINT32 q_id, + ZXIC_UINT32 pri); + +/***********************************************************/ +/** 读取TM模式下流队列挂接的端口号;SA模式下流队列映射的目的芯片ID +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param q_id 队列号 +* @param p_pp_id 读取的端口号 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/18 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_q_map_pp_get(DPP_DEV_T *dev, ZXIC_UINT32 q_id, + ZXIC_UINT32 *p_pp_id); + +/***********************************************************/ +/** 读取配置TM模式tc到flow的映射 +* @param dev_id 设备编号 +* @param tc_id itmd tc优先级(0~7) +* @param flow_id 读取映射的flowid号 (0~4095) +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author sun @date 2023/07/04 +************************************************************/ +DPP_STATUS dpp_tm_tc_map_flow_get(DPP_DEV_T *dev, ZXIC_UINT32 tc_id, + ZXIC_UINT32 *flow_id); + +/***********************************************************/ +/** 获取强制片内或片外 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param en 1:使能 +* @param mode 1 :omem 强制片外 0:imem 强制片内 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2017/10/14 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_imem_omem_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_en, + ZXIC_UINT32 *p_mode); + +/***********************************************************/ +/** 连续配置各级搬移门限 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 拥塞避免支持层次号,0:队列级,1:端口级,2:系统级 +* @param start_id 为起始 队列号或端口号,系统级时,id参数无效 +* @param value 端口级和系统级时,为搬移门限值,单位为NPPU存包的单位,256B; +* 流级时为搬移profile_id,0~15 +* @param num 为队列或端口个数 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2016/11/19 +************************************************************/ +#ifdef ETM_REAL +DPP_STATUS dpp_tm_cgavd_move_th_together_wr(ZXIC_UINT32 dev_id, + DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 start_id, + ZXIC_UINT32 value, ZXIC_UINT32 num); +#endif +/***********************************************************/ +/** 系统级缓存使用上下限阈值配置 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param th_h: 系统级缓存使用上限阈值 +* @param th_l: 系统级缓存使用下限阈值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/07/03 +************************************************************/ +DPP_STATUS dpp_tm_sys_window_th_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 th_h, + ZXIC_UINT32 th_l); + +/***********************************************************/ +/** 配置cgavd强制反压 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param cgavd_fc: 0:不强制反压 1:强制反压 +* @param +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/07/03 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_cfg_fc_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 cgavd_fc); + +/***********************************************************/ +/** 获取cgavd强制反压状态 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param cgavd_fc: 0:不强制反压 1:强制反压 +* @param +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/07/03 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_cfg_fc_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *cgavd_fc); + +/***********************************************************/ +/** 配置cgavd强制不反压 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param cgavd_no_fc: 0:不强制 1:强制不反压 +* @param +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/07/03 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_cfg_no_fc_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 cgavd_no_fc); + +/***********************************************************/ +/** 获取cgavd强制不反压状态 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param cgavd_no_fc: 0:不强制 1:强制不反压 +* @param +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/07/03 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_cfg_no_fc_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *cgavd_no_fc); + +/***********************************************************/ +/** 配置cgavd平均队列深度归零 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param en: 0:关闭 1:使能 +* @param +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/08/05 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_avg_qlen_return_zero_en_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 en); +#endif /*TM_CGAVD */ + +/** crdt ram初始化 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/22 +************************************************************/ +DPP_STATUS dpp_tm_crdt_ram_init(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** 获取调度器类型 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param se_id 调度器编号 +* @param item_num 调度器中包含的子调度器个数 +* @param sch_type_num 调度器类型编号 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/26 +************************************************************/ +DPP_STATUS dpp_tm_crdt_sch_type_get(DPP_DEV_T *dev, ZXIC_UINT32 se_id, + ZXIC_UINT32 *item_num, + ZXIC_UINT32 *sch_type_num); + +/***********************************************************/ +/** 配置flow级流队列的挂接关系(flow到上级调度器的挂接) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param flow_id 流队列号 +* @param c_linkid c桶要挂接到的上级调度器id +* @param c_weight c桶挂接到上级调度器的权重[1~511] +* @param c_sp c桶挂接到上级调度器的sp优先级,有效值[0-8],共9级,优先级依次降低 +* @param mode 挂接模式:0-单桶 1-双桶。配置单桶时无需关注后续参数,配0即可 +* @param e_linkid e桶要挂接到的上级调度器id +* @param e_weight e桶挂接到上级调度器的权重[1~511] +* @param e_sp e桶挂接到上级调度器的sp优先级,有效值[0-8],共9级,优先级依次降低 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_flow_link_wr(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + DPP_TM_SCH_FLOW_PARA_T *p_flow_para); + +/***********************************************************/ +/** 批量配置flow级流队列的挂接关系 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param flow_id_s 起始流队列号 +* @param flow_id_e 终止流队列号 +* @param c_linkid c桶要挂接到的上级调度器id +* @param c_weight c桶挂接到上级调度器的权重[1~511] +* @param c_sp c桶挂接到上级调度器的sp优先级,有效值[0-8],共9级,优先级依次降低 +* @param mode 挂接模式:0-单桶 1-双桶。配置单桶时无需关注后续参数,配0即可 +* @param e_linkid e桶要挂接到的上级调度器id +* @param e_weight e桶挂接到上级调度器的权重[1~511] +* @param e_sp e桶挂接到上级调度器的sp优先级,有效值[0-8],共9级,优先级依次降低 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS +dpp_tm_crdt_flow_link_more_set(DPP_DEV_T *dev, ZXIC_UINT32 flow_id_s, + ZXIC_UINT32 flow_id_e, ZXIC_UINT32 c_linkid, + ZXIC_UINT32 c_weight, ZXIC_UINT32 c_sp, + ZXIC_UINT32 mode, ZXIC_UINT32 e_linkid, + ZXIC_UINT32 e_weight, ZXIC_UINT32 e_sp); + +/***********************************************************/ +/** 配置调度器层次化QOS的挂接关系:非优先级传递 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param se_id 本级调度器id +* @param se_linkid 要挂接到的上级调度器id +* @param se_weight 挂接到上级调度器的权重[1~511] +* @param se_sp 挂接到上级调度器的sp优先级,有效值[0-8],共9级,优先级依次降低 +* @param se_insw 优先级传递使能:0-关 1-开。该参数不传递直接配0 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_se_link_wr(DPP_DEV_T *dev, ZXIC_UINT32 se_id, + DPP_TM_SCH_SE_PARA_T *p_sch_se_para); + +/***********************************************************/ +/** 配置调度器层次化QOS的挂接关系:优先级传递 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param se_id 本级调度器id +* @param se_linkid 要挂接到的上级调度器id +* @param se_sp 挂接到上级调度器的sp优先级,有效值[0-3],最多4级,优先级按调度单元分配, +* 每个调度单元内部调度器优先级相同! +* @param se_weight0-7 WFQ8中各调度器权重值[1~511],若是WFQ2/4 只取前面对应值,后面无效 +* @param se_insw 优先级传递使能:0-关 1-开. 该参数不传递直接配1 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS +dpp_tm_crdt_se_link_insw_wr(DPP_DEV_T *dev, ZXIC_UINT32 se_id, + DPP_TM_SCH_SE_PARA_INSW_T *p_sch_se_para_insw); + +/***********************************************************/ +/** 获取流队列入链状态 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param flow_id 流队列号 +* @param link_state 0-未入链 1-在调度器链表中 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_flow_link_state_get(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 *link_state); + +/***********************************************************/ +/** 获取调度器入链状态 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param se_id 调度器编号 +* @param link_state 0-未入链 1-在调度器链表中 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_se_link_state_get(DPP_DEV_T *dev, ZXIC_UINT32 se_id, + ZXIC_UINT32 *link_state); + +/***********************************************************/ +/** 判断crdt流删除命令是否空闲 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_del_cmd_idle(DPP_DEV_T *dev); + +/***********************************************************/ +/** 删除流/调度器挂接关系 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param id 要删除的流号或调度器id +* ETM范围:0--0xABFF; FTM范围:0-0x177F +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_del_link_set(DPP_DEV_T *dev, ZXIC_UINT32 id); + +/***********************************************************/ +/** 获取pp->dev挂接关系 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param pp_id 0~63 +* @param p_weight 0~127 +* @param p_sp_mapping 0~7 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/04/20 +************************************************************/ +DPP_STATUS dpp_tm_crdt_pp_para_get(DPP_DEV_T *dev, ZXIC_UINT32 pp_id, + ZXIC_UINT32 *p_weight, + ZXIC_UINT32 *p_sp_mapping); + +/***********************************************************/ +/** +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param que_id_s 起始队列号 +* @param que_id_e 终止队列号 +* @param en 1:过滤E桶队列CRS状态为SLOW的入链请求;0:E桶队列CRS SLOW正常入链; +* +* @return +* @remark 无 +* @see +* @author XXX @date 2019/05/08 +************************************************************/ +DPP_STATUS dpp_tm_crdt_eir_crs_filter_en_more_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 que_id_s, + ZXIC_UINT32 que_id_e, + ZXIC_UINT32 en); + +/***********************************************************/ +/** +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param que_id queue id +* @param p_en +* +* @return +* @remark 无 +* @see +* @author XXX @date 2019/05/08 +************************************************************/ +DPP_STATUS dpp_tm_crdt_eir_crs_filter_en_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 que_id, + ZXIC_UINT32 *p_en); + +/***********************************************************/ +/** +* @param dev_id +* @param tm_type +* @param que_id +* +* @return +* @remark 无 +* @see +* @author XXX @date 2019/05/08 +************************************************************/ +DPP_STATUS dpp_tm_crdt_eir_crs_filter_en_get_diag(ZXIC_UINT32 dev_id, + ZXIC_UINT32 que_id); + +/***********************************************************/ +/** 打印指定的全局数组值以及清空全局数组 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param para_x 数组index_x +* @param para_y 数组index_y +* @param clear_flag 清空shape全局数组 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark +* @see +* @author xuhb @date 2019/06/10 +************************************************************/ +DPP_STATUS dpp_tm_shape_para_array_prt(ZXIC_UINT32 dev_id, ZXIC_UINT32 para_x, + ZXIC_UINT32 para_y, + ZXIC_UINT32 clear_flag); + +/***********************************************************/ +/** 配置shap模块中 crd_grain授权价值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param credit_value 授权价值,默认值是0x5feByte +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/08/13 +************************************************************/ +DPP_STATUS dpp_tm_shap_crd_grain_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 credit_value); + +/***********************************************************/ +/** shap ram初始化 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/22 +************************************************************/ +DPP_STATUS dpp_tm_shap_ram_init(ZXIC_UINT32 dev_id); + +/***********************************************************/ +/** 获取流队列双桶整形使能及模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param db_en 双桶整形使能 +* @param mode 0:c+e模式,1:c+p模式 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_tm_shape_flow_db_en_get(DPP_DEV_T *dev, ZXIC_UINT32 *db_en, + ZXIC_UINT32 *mode); + +/***********************************************************/ +/** 配置桶深最小单位配置:共8档:0-7 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param token_grain 3’d0:最小单位为128K +* 3’d1:最小单位为64k +* 3’d2:最小单位为32k +* 3’d3:最小单位为16k +* 3’d4:最小单位为8k +* 3’d5:最小单位为4k +* 3’d6:最小单位为2k +* 3’d7:最小单位为1k +* 默认为0,即128K +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_tm_shape_token_grain_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 token_grain); + +/***********************************************************/ +/** 获取桶深最小单位配置:共8档:0-7 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param token_grain 3’d0:最小单位为128K +* 3’d1:最小单位为64k +* 3’d2:最小单位为32k +* 3’d3:最小单位为16k +* 3’d4:最小单位为8k +* 3’d5:最小单位为4k +* 3’d6:最小单位为2k +* 3’d7:最小单位为1k +* 默认为0,即128K +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_tm_shape_token_grain_get(DPP_DEV_T *dev, + ZXIC_UINT32 *token_grain); + +/***********************************************************/ +/** 配置流或调度器映射到整形参数表的某个ID +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param id 流或调度器编号ETM:0-ABFF,FTM:0-177F +* @param profile_id 整形参数表:[0-127] +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_tm_shape_map_table_set(DPP_DEV_T *dev, ZXIC_UINT32 id, + ZXIC_UINT32 profile_id); + +/***********************************************************/ +/** 获取流或调度器映射到整形参数表的配置ID +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param id 流或调度器编号ETM:0-ABFF,FTM:0-177F +* @param profile_id 整形参数表:[0-127] +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_tm_shape_map_table_get(DPP_DEV_T *dev, ZXIC_UINT32 id, + ZXIC_UINT32 *profile_id); + +/***********************************************************/ +/** 获取流级整形参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param flow_id 流队列号 ETM:0-9215,FTM:0-2047 +* @param cir cir速率,单位Kb,范围[64Kb - 160Gb] +* @param cbs cbs桶深,单位KB,范围[1KB - 64M] +* 注:cbs=0 表示关闭整形,即不限速 +* @param mode_e 整形模式,0-获取c桶参数,1-获取对应e桶参数 +* @param p_para_id 整形模板索引:ETM=[0-AFF],FTM=[0-17F] +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_tm_shape_flow_para_get(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 mode, ZXIC_UINT32 *p_para_id, + DPP_TM_SHAPE_PARA_TABLE *p_flow_para_tbl); + +/***********************************************************/ +/** etm配置流级整形参数 +* @param dev_id 设备编号 +* @param flow_id 流队列号 ETM:0-9215,FTM:0-2047 +* @param cir cir速率,单位Kb,范围[64Kb - 160Gb] +* @param cbs cbs桶深,单位KB,范围[1KB - 64M] +* @param db_en 双桶整形使能,0-单桶,1-双桶 +* @param eir eir速率,单位Kb,范围同cir +* @param ebs ebs桶深,单位Kb,范围同cbs +* 注:cbs=0 表示关闭整形,即不限速 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_etm_shape_flow_para_set(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 cir, ZXIC_UINT32 cbs, + ZXIC_UINT32 db_en, ZXIC_UINT32 eir, + ZXIC_UINT32 ebs); + +/***********************************************************/ +/** 获取调度单元整形参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param se_id 调度器单元号 ETM:0-63FF,FTM:0-77F +* @param cir cir速率,单位Kb,范围[64Kb - 160Gb] +* @param cbs cbs桶深,单位KB,范围[1KB - 64M] +* @param mode 整形模式,0-获取p桶参数,1-获取对应c桶参数(仅FQ8/WFQ8支持) +* @param p_para_id 整形模板索引:ETM=[0-AFF],FTM=[0-17F] +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_tm_shape_se_para_get(DPP_DEV_T *dev, ZXIC_UINT32 se_id, + ZXIC_UINT32 mode, ZXIC_UINT32 *p_para_id, + DPP_TM_SHAPE_PARA_TABLE *p_se_para_tbl); + +/***********************************************************/ +/** ftm配置调度器整形参数 +* @param dev_id 设备编号 +* @param se_id 调度器编号号 ETM:0x4800-0xABFF,FTM:0x1000-0x177F +* @param pir pir总速率,单位Kb,范围同cir +* @param pbs pbs总桶深,单位Kb,范围同cbs +* @param db_en 整形模式,0-单桶,1-双桶,仅FQ8/WFQ8有效 +* @param cir [0-3]调度器cir速率,单位Kb,范围[64Kb - 160Gb] +* @param cbs [0-3]调度器cbs桶深,单位KB,范围[1KB - 64M] +* 注:cbs=0 表示关闭整形,即不限速 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_ftm_shape_se_para_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 se_id, + ZXIC_UINT32 pir, ZXIC_UINT32 pbs, + ZXIC_UINT32 db_en, ZXIC_UINT32 cir, + ZXIC_UINT32 cbs); + +/***********************************************************/ +/** etm配置调度器整形参数 +* @param dev_id 设备编号 +* @param se_id 调度器编号号 ETM:0x4800-0xABFF,FTM:0x1000-0x177F +* @param pir pir总速率,单位Kb,范围同cir +* @param pbs pbs总桶深,单位Kb,范围同cbs +* @param db_en 整形模式,0-单桶,1-双桶,仅FQ8/WFQ8有效 +* @param cir [0-3]调度器cir速率,单位Kb,范围[64Kb - 160Gb] +* @param cbs [0-3]调度器cbs桶深,单位KB,范围[1KB - 64M] +* 注:cbs=0 表示关闭整形,即不限速 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_etm_shape_se_para_set(DPP_DEV_T *dev, ZXIC_UINT32 se_id, + ZXIC_UINT32 pir, ZXIC_UINT32 pbs, + ZXIC_UINT32 db_en, ZXIC_UINT32 cir, + ZXIC_UINT32 cbs); + +/***********************************************************/ +/** 写入流/调度器整形参数配置表 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param map_id 整形参数表中模板索引id ETM:0-AFF,FTM:0-17F +* @param cir 整形速率(c/e桶统一) +* @param cbs 桶深 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_tm_shape_para_set(DPP_DEV_T *dev, ZXIC_UINT32 total_para_id, + ZXIC_UINT32 cir, ZXIC_UINT32 cbs); + +/***********************************************************/ +/** 读取流/调度器整形参数配置表 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param total_para_id 整形参数表中模板索引id ETM:0-AFF,FTM:0-17F +* @param cir 整形速率(c/e桶统一) +* @param cbs 桶深 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_tm_shape_para_get(DPP_DEV_T *dev, ZXIC_UINT32 total_para_id, + DPP_TM_SHAPE_PARA_TABLE *p_shap_para_tbl); + +/***********************************************************/ +/** 配置第0~15个被统计得到令牌个数的端口号 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param port_id 被统计得到令牌个数的端口号 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/3/15 STM模式下使用 +************************************************************/ +DPP_STATUS dpp_tm_shape_token_pp_cfg(ZXIC_UINT32 dev_id, ZXIC_UINT32 port_id); + +/***********************************************************/ +/** 打印各级及指定被统计的第0~15个授权流得到的授权个数 stm模式下使用 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/06/19 +************************************************************/ +DPP_STATUS dpp_tm_crdt_ackcnt_diag(ZXIC_UINT32 dev_id, ZXIC_UINT32 delay_ms); + +/***********************************************************/ +/** 读取端口级整形参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param port_id 端口号 +* @param p_para 整形信息:CIR/CBS/EN +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/04/15 +************************************************************/ +DPP_STATUS dpp_tm_shape_pp_para_get(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + DPP_TM_SHAPE_PP_PARA_T *p_para); + +/***********************************************************/ +/** 配置SA模式下各个版本的授权价值 +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param sa_ver_id 版本号(0~7) +* @param sa_credit_value 授权价值 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author szq @date 2015/03/25 +************************************************************/ +DPP_STATUS dpp_tm_qmu_sa_credit_value_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 sa_ver_id, + ZXIC_UINT32 sa_credit_value); + +/***********************************************************/ +/** 获取SA模式下各个版本的授权价值 +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param sa_ver_id 版本号(0~7) +* @param p_sa_credit_value 授权价值 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author szq @date 2015/03/25 +************************************************************/ +DPP_STATUS dpp_tm_qmu_sa_credit_value_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 sa_ver_id, + ZXIC_UINT32 *p_sa_credit_value); + +/***********************************************************/ +/** 配置CRS发送的速率 +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param sent_cyc CRS发送的间隔(单位:时钟周期) +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author szq @date 2015/03/25 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crs_sent_rate_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 sent_cyc); + +/***********************************************************/ +/** 获取CRS发送的速率 +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param p_sent_cyc CRS发送的间隔(单位:时钟周期) +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author szq @date 2015/03/25 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crs_sent_rate_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_sent_cyc); + +/***********************************************************/ +/** 配置CRS过滤使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param en 配置的值,0-不使能过滤,1-使能过滤 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/04 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crs_filter_en_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 en); + +/***********************************************************/ +/** 配置多播授权令牌添加个数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param token_add_num 令牌添加时,每次增加的令牌数目,取值范围为1~255,默认为1;禁止配置为0,配置为0时,将不会产生授权。 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/09 +************************************************************/ +DPP_STATUS dpp_tm_qmu_mul_token_gen_num_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 token_add_num); + +/***********************************************************/ +/** 配置多播授权整形桶参数和使能参数 +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param q3_lb_control_en 3号队列整形功能开启使能。0:关闭;1:开启。 +* @param q012_lb_control_en 0~2号队列整形功能开启使能。0:关闭;1:开启。 +* @param q3_lb_max_cnt 3号队列整形桶桶深。 +* @param q012_lb_max_cnt 0~2号队列整形桶桶深。 +* @param q3_lb_add_rate 3号队列令牌添加速率,时钟周期为单位。不可配置为0,配置为0整形使能时,不能产生队列3授权调度信号。 +* @param q012_lb_add_rate 0~2号队列令牌添加速率,以时钟周期单位。不可配置为0,配置为0并整形使能时,不能产生队列0、1、2授权调度信号。 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/09 +************************************************************/ +DPP_STATUS dpp_tm_qmu_mul_ack_lb_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 q3_lb_control_en, + ZXIC_UINT32 q012_lb_control_en, + ZXIC_UINT32 q3_lb_max_cnt, + ZXIC_UINT32 q012_lb_max_cnt, + ZXIC_UINT32 q3_lb_add_rate, + ZXIC_UINT32 q012_lb_add_rate); + +/***********************************************************/ +/** 配置0,1号队列挂接1或2号MCN漏桶信息 +* @param tm_type 0-ETM,1-FTM +* @param dev_id 设备索引编号 +* @param mcn_lb_sel 0:0,1号队列挂接1号MCN漏桶 1:0,1号队列挂接2号MCN漏桶 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/09 +************************************************************/ +DPP_STATUS dpp_tm_qmu_mcn_lb_sel_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 mcn_lb_sel); + +/***********************************************************/ +/** 配置多播队列0~2的授权输出SP、DWRR +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param sp_or_dwrr SP、DWRR模式选择。0:SP;1:DWRR。 +* @param dwrr_w0 0号队列DWRR权重(0~127) +* @param dwrr_w1 1号队列DWRR权重(0~127) +* @param dwrr_w2 2号队列DWRR权重(0~127) +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/09 +************************************************************/ +DPP_STATUS dpp_tm_qmu_mul_sp_dwrr_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 sp_or_dwrr, + ZXIC_UINT32 dwrr_w0, ZXIC_UINT32 dwrr_w1, + ZXIC_UINT32 dwrr_w2); + +/***********************************************************/ +/** 配置分目的SA整形打开或关闭 +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param shap_en 分目的SA整形使能开关 0:表示关闭 1:表示打开 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/09 +************************************************************/ +DPP_STATUS dpp_tm_qmu_dest_sa_shap_en_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 shap_en); + +/***********************************************************/ +/** 获得轮转扫描使能和扫描速率 +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param scan_en 轮转扫描使能。0:关闭,1:开启 +* @param scan_rate 轮转扫描速率,配置扫描周期不得少于256个周期 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/10 +************************************************************/ +DPP_STATUS dpp_tm_qmu_scan_rate_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_scan_en, + ZXIC_UINT32 *p_scan_rate); + +/***********************************************************/ +/** 配置轮转扫描队列范围 +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param first_que 起始队列号 +* @param last_que 终止队列号 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author whuashan @date 2019/09/10 +************************************************************/ +DPP_STATUS dpp_tm_qmu_scan_que_range_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 first_que, + ZXIC_UINT32 last_que); + +/***********************************************************/ +/** 获取轮转扫描队列范围 +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param first_que 起始队列号 +* @param last_que 终止队列号 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author whuashan @date 2019/09/10 +************************************************************/ +DPP_STATUS dpp_tm_qmu_scan_que_range_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *first_que, + ZXIC_UINT32 *last_que); + +/***********************************************************/ +/** 获取QMU清空状态 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_clr_done_flag 队列是否清空完成 +* +* @return +* @remark 无 +* @see +* @author szq @date 2015/05/21 +************************************************************/ +DPP_STATUS dpp_tm_qmu_qlist_qcfg_clr_done_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_clr_done_flag); + +/***********************************************************/ +/** 配置qsch调度分端口整形速率和使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param shape_en 整形使能 +* @param token_add_num [23:12]:添加令牌数目 +* @param token_gap [11:0]:添加令牌间隔,其中实际间隔为配置间隔+1 +* @param token_depth 桶深,单位B,范围[0-0x1EE00] +*公式:(600*8*token_num)/(gap+1) = X Mbps +* 主频= 600 MHz +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author xuhb 2020-5-15 +************************************************************/ +DPP_STATUS dpp_tm_qmu_qsch_port_shape_set(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + ZXIC_UINT32 token_add_num, + ZXIC_UINT32 token_gap, + ZXIC_UINT32 token_depth, + ZXIC_UINT32 shape_en); + +/***********************************************************/ +/** 配置CMD_SW分端口整形速率和使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param shape_en 整形使能 +* @param token_add_num [23:12]:添加令牌数目 +* @param token_gap [11:0]:添加令牌间隔,其中实际间隔为配置间隔+1 +* @param token_depth 桶深,单位B,范围[0-0x1EE00] +*公式:(600*8*token_num)/(gap+1) = X Mbps +* 主频= 600 MHz +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author whuashan 2020-3-17 +************************************************************/ +DPP_STATUS dpp_tm_qmu_port_shape_set(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + ZXIC_UINT32 token_add_num, + ZXIC_UINT32 token_gap, + ZXIC_UINT32 token_depth, + ZXIC_UINT32 shape_en); + +/***********************************************************/ +/** 获得CMD_SW分端口整形速率和使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_shape_en 整形使能 +* @param p_token_add_num [23:12]:添加令牌数目 +* @param p_token_gap [11:0]:添加令牌间隔,其中实际间隔为配置间隔+1 +* @param p_token_depth 桶深,单位B +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author whuashan 2020-3-17 +************************************************************/ +DPP_STATUS dpp_tm_qmu_port_shape_get(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + ZXIC_UINT32 *p_token_add_num, + ZXIC_UINT32 *p_token_gap, + ZXIC_UINT32 *p_token_depth, + ZXIC_UINT32 *p_shape_en); + +/***********************************************************/ +/** 获取CMD_SW分端口整形速率和使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param shape_vlue 整形值,单位Mbps +* @param shape_en 整形使能 +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author zmy @20151217 +************************************************************/ +DPP_STATUS dpp_tm_qmu_egress_shape_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 port_id, + ZXIC_UINT32 *shape_value, + ZXIC_UINT32 *shape_en); + +/***********************************************************/ +/** 配置需要检测的特定队列号 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param qnum 需要检测统计的特定的队列号 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/18 +************************************************************/ +DPP_STATUS dpp_tm_qmu_spec_qnum_set(DPP_DEV_T *dev, ZXIC_UINT32 qnum); + +/***********************************************************/ +/** 获得特定的队列号 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_qnum 需要检测统计的特定的队列号 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/23 +************************************************************/ +DPP_STATUS dpp_tm_qmu_spec_qnum_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_qnum); + +/***********************************************************/ +/** 配置需要检测的队列组 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param group_num 需要检测统计的特定的队列组。这里按取q的低3bit +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/18 +************************************************************/ +DPP_STATUS dpp_tm_qmu_spec_group_set(DPP_DEV_T *dev, ZXIC_UINT32 group_num); + +/***********************************************************/ +/** 获得需要检测的队列组 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_group_num 需要检测统计的特定的队列组。这里按取q的低3bit +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/23 +************************************************************/ +DPP_STATUS dpp_tm_qmu_spec_group_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_group_num); + +/***********************************************************/ +/** 配置队列授权盈余 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param qnum 配置的队列号 +* @param value 授权盈余 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/18 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crbal_value_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 qnum, + ZXIC_UINT32 value); + +/***********************************************************/ +/** 获得队列授权盈余 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param qnum 配置的队列号 +* @param p_value 授权盈余 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/23 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crbal_value_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 qnum, + ZXIC_UINT32 *p_value); + +/***********************************************************/ +/** 配置分目的SA整形桶深上、下限参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param max_value 分目的SA整形桶深上限,必须配置为正值 +* @param min_value 分目的SA整形桶深下限,必须配置为负值 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/18 +************************************************************/ +DPP_STATUS dpp_tm_qmu_dest_sa_shape_para_set(ZXIC_UINT32 dev_id, + ZXIC_SINT32 max_value, + ZXIC_SINT32 min_value); + +/***********************************************************/ +/** 获得分目的SA整形桶深上、下限参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_max_value 分目的SA整形桶深上限,必须配置为正值 +* @param p_min_value 分目的SA整形桶深下限,必须配置为负值 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/23 +************************************************************/ +DPP_STATUS dpp_tm_qmu_dest_sa_shape_para_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_max_value, + ZXIC_UINT32 *p_min_value); + +/***********************************************************/ +/** 获取特定队列发送的crs normal的个数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param 注 须先设置统计的特定队列 +* @param +* @return +* @remark 无 +* @see +* @author yjd @date 2015/07/09 +************************************************************/ +DPP_STATUS dpp_tm_qmu_spec_q_crs_normal_cnt(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_que_crs_normal_cnt); + +/***********************************************************/ +/** 获取特定队列发送的crs off的个数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param 注 须先设置统计的特定队列 +* @param +* @return +* @remark 无 +* @see +* @author yjd @date 2015/07/09 +************************************************************/ +DPP_STATUS dpp_tm_qmu_spec_q_crs_off_cnt(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_que_crs_off_cnt); + +/***********************************************************/ +/** QMU初始化配置场景 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param case_no 四组QMU初始化场景编号为1-4;ddr*bank:1:4x2;2:4x4;3:8x2;4:4x8. +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/11/17 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 case_no); + +/***********************************************************/ +/** TMMU TM纯片内模式配置获取 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_imem_en 1纯片内 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2017/10/14 +************************************************************/ +DPP_STATUS dpp_tm_tmmu_imem_en_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_imem_en); + +/***********************************************************/ +/** TMMU 强制DDR RDY配置获取 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_ddr_force_rdy 1、如果bit【0】配置为1,则QMU看到的DDR0 RDY一直为1。 +* 2、bit【0】代表DDR0,bit【7】代表DDR7。 +* 3、纯片内模式需要配置为8'hff,排除DDR干扰。 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2017/10/14 +************************************************************/ +DPP_STATUS dpp_tm_tmmu_ddr_force_rdy_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_ddr_force_rdy); + +DPP_STATUS dpp_tm_mr_init(ZXIC_UINT32 dev_id); +/***********************************************************/ +/** 配置TM模式下初始化代码 +* @param dev_id +* @param tm_type 0-ETM,1-FTM +* @param p_tm_init_info 配置TM模式下初始化信息包括以下 +* blk_size 配置qmu block大小 +* case_num 四组QMU初始化场景编号为1-4;ddr*bank:1:4x2;2:4x4;3:8x2;4:4x8. +* imem_omem; 0:片内外混合; 1:纯片内;2:纯片外 +* mode 0:TM 1:SA +* +* @return +* @remark 无 +* @see +* @author szq @date 2015/03/26 +************************************************************/ +DPP_STATUS dpp_tm_asic_init(ZXIC_UINT32 dev_id, + DPP_TM_ASIC_INIT_INFO_T *p_tm_asic_init_info); + +/***********************************************************/ +/** 配置TM模式下初始化代码 +* @param dev_id +* @param tm_type 0-ETM,1-FTM +* @param p_tm_init_info 配置TM模式下初始化信息包括以下 +* blk_size 配置qmu block大小 +* case_num 四组QMU初始化场景编号为1-4;ddr*bank:1:4x2;2:4x4;3:8x2;4:4x8. +* imem_omem; 0:片内外混合; 1:纯片内;2:纯片外 +* mode 0:TM 1:SA +* +* @return +* @remark 无 +* @see +* @author szq @date 2015/03/26 +************************************************************/ +DPP_STATUS dpp_tm_asic_init_diag(ZXIC_UINT32 dev_id, ZXIC_UINT32 blk_size, + ZXIC_UINT32 case_num, ZXIC_UINT32 imem_omem, + ZXIC_UINT32 mode); + +/**************************************************************************** +* 函数名称: dpp_tm_avg_que_len_get +* 功能描述: 各级平均队列深度获取 +* 输入参数: dev_id: 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* cgavd_level: 拥塞避免支持层次号,0:队列级,1:端口级,2:系统级 +* que_id: 本级别层次内的队列编号。 +* 输出参数: p_avg_len: 平均队列深度,单位为BLOCK。 +* 返 回 值: DPP_OK-成功,DPP_ERR-失败 +* 其它说明: +* author cy @date 2015/06/29 +*****************************************************************************/ +DPP_STATUS dpp_tm_avg_que_len_get(ZXIC_UINT32 dev_id, + DPP_TM_CGAVD_LEVEL_E cgavd_level, + ZXIC_UINT32 que_id, ZXIC_UINT32 *p_avg_len); + +/***********************************************************/ +/** 获取配置CPU设置的报文长度是否参与计算丢弃概率的使能 +* @param tm_type 0-ETM,1-FTM +* @param flag 忽略乘法里的当前包长和最大包长比标志位:1为忽略 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2015/11/9 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_wred_pke_len_calc_sign_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_flag); + +/***********************************************************/ +/** 配置配置cgavd模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param mode 0:block mode 1:byte mode +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/07/28 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_cfg_mode_set(DPP_DEV_T *dev, ZXIC_UINT32 mode); + +/***********************************************************/ +/** 配置配置cgavd模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_mode 0:block mode 1:byte mode +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/07/29 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_cfg_mode_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_mode); + +/***********************************************************/ +/** 配置TD拥塞避免模式下的丢弃门限值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 拥塞避免支持层次号,0:队列级,1:端口级,2:系统级 +* @param id 队列号或端口号,系统级时,id参数无效 +* @param byte_block_th 配置的丢弃门限值,ZXIC_UINT8/BLOCK单位写入寄存器 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/07/29 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_td_byte_block_th_set(DPP_DEV_T *dev, + DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 id, + ZXIC_UINT32 byte_block_th); + +/***********************************************************/ +/** 读取TD拥塞避免模式下的丢弃门限值字节模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 拥塞避免支持层次号,0:队列级,1:端口级,2:系统级 +* @param id 队列号或端口号,系统级时,id参数无效 +* @param p_byte_block_th 配置的丢弃门限值ZXIC_UINT8/BLOCK单位 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/07/29 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_td_byte_block_th_get(DPP_DEV_T *dev, + DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 id, + ZXIC_UINT32 *p_byte_block_th); + +/***********************************************************/ +/** 配置通用门限block模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param byte_block_uni_th 通用门限值block/byte单位 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/08/01 +************************************************************/ +DPP_STATUS +dpp_tm_cgavd_uniform_byte_block_th_set(DPP_DEV_T *dev, + ZXIC_UINT32 byte_block_uni_th); + +/***********************************************************/ +/** 读取通用门限 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_byte_block_uni_th 通用门限值block/byte单位 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/08/01 +************************************************************/ +DPP_STATUS +dpp_tm_cgavd_uniform_byte_block_th_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_byte_block_uni_th); + +/***********TM CPU软复位接口 Begin*************/ +/***********************************************************/ +/** 设置TM的全局变量,shape_para只保存profile被使用的数量,整形相关参数从寄存器中重新读取 +* @param dev_id +* @param size data_buff的长度 +* @param p_data_buff 需要恢复的内容 +* +* @return +* @remark 无 +* @see +* @author XXX @date 2018/06/25 +************************************************************/ +DPP_STATUS dpp_tm_glb_mgr_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 size, + ZXIC_UINT8 *p_data_buff); + +/***********************************************************/ +/** 获取TM的全局变量,shape_para只保存profile被使用的数量,整形相关参数从寄存器中重新读取 +* @param dev_id +* @param p_flag 上层释放data_buff的标志,1:需要上层free,0:不需要上层free +* @param p_size data_buff的长度 +* @param pp_data_buff 二级指针(指向函数内部malloc空间的地址) +* +* @return +* @remark 无 +* @see +* @author XXX @date 2018/06/25 +************************************************************/ +DPP_STATUS dpp_tm_glb_mgr_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_flag, + ZXIC_UINT32 *p_size, ZXIC_UINT8 **pp_data_buff); + +/***********************************************************/ +/** 获取TM的全局变量,shape_para只保存profile被使用的数量,整形相关参数从寄存器中重新读取 +* @param dev_id +* @param p_size data_buff的长度 +* +* @return +* @remark 无 +* @see +* @author XXX @date 2018/06/25 +************************************************************/ +DPP_STATUS dpp_tm_glb_size_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_size); + +/***********************************************************/ +/** 配置tm授权价值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param credit_value 授权价值,默认值是0x5feByte +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/08/13 +************************************************************/ +DPP_STATUS dpp_tm_credit_value_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 credit_value); + +/***********************************************************/ +/** 获取全局数组中用户实际配置的整形值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param flow_id 流队列号 ETM:0-9215,FTM:0-2047 +* @param cir cir速率,单位Kb,范围[64Kb - 160Gb] +* @param cbs cbs桶深,单位KB,范围[1KB - 64M] +* 注:cbs=0 表示关闭整形,即不限速 +* @param mode_e 整形模式,0-获取c桶参数,1-获取对应e桶参数 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark +* @see +* @author xuhb @date 2019/06/10 +************************************************************/ +DPP_STATUS +dpp_tm_shape_flow_para_array_get(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 mode, + DPP_TM_SHAPE_PARA_TABLE *p_flow_para_tbl); + +#endif +/* 必须有个空行,否则可能编不过 */ diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_axi_reg.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_axi_reg.h new file mode 100644 index 000000000000..e3d25e5c98e6 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_axi_reg.h @@ -0,0 +1,42 @@ + +#ifndef _DPP_AXI_REG_H_ +#define _DPP_AXI_REG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct dpp_axi_axi_conv_cfg_epid_v_func_num_t { + ZXIC_UINT32 user_en; + ZXIC_UINT32 cfg_epid; + ZXIC_UINT32 cfg_vfunc_num; + ZXIC_UINT32 cfg_func_num; + ZXIC_UINT32 cfg_vfunc_active; +} DPP_AXI_AXI_CONV_CFG_EPID_V_FUNC_NUM_T; + +typedef struct dpp_axi_axi_conv_info_axim_rw_hsk_cnt_t { + ZXIC_UINT32 axim_rd_handshake_cnt; + ZXIC_UINT32 axim_wr_handshake_cnt; +} DPP_AXI_AXI_CONV_INFO_AXIM_RW_HSK_CNT_T; + +typedef struct dpp_axi_axi_conv_info_axim_last_wr_id_t { + ZXIC_UINT32 axim_rd_id; + ZXIC_UINT32 axim_wr_id; +} DPP_AXI_AXI_CONV_INFO_AXIM_LAST_WR_ID_T; + +typedef struct dpp_axi_axi_conv_info_axim_last_wr_addr_h_t { + ZXIC_UINT32 aximlastwraddrhigh; +} DPP_AXI_AXI_CONV_INFO_AXIM_LAST_WR_ADDR_H_T; + +typedef struct dpp_axi_axi_conv_info_axim_last_wr_addr_l_t { + ZXIC_UINT32 aximlastrdaddrlow; +} DPP_AXI_AXI_CONV_INFO_AXIM_LAST_WR_ADDR_L_T; + +typedef struct dpp_axi_axi_conv_cfg_debug_info_clr_en_t { + ZXIC_UINT32 cfg_global_clr_en; +} DPP_AXI_AXI_CONV_CFG_DEBUG_INFO_CLR_EN_T; + +#ifdef __cplusplus +} +#endif +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_cfg_reg.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_cfg_reg.h new file mode 100644 index 000000000000..3589db9123ea --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_cfg_reg.h @@ -0,0 +1,1000 @@ + +#ifndef _DPP_CFG_REG_H_ +#define _DPP_CFG_REG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct dpp_cfg_pcie_int_repeat_t { + ZXIC_UINT32 int_repeat; +} DPP_CFG_PCIE_INT_REPEAT_T; + +typedef struct dpp_cfg_dma_dma_up_size_t { + ZXIC_UINT32 dma_up_size; +} DPP_CFG_DMA_DMA_UP_SIZE_T; + +typedef struct dpp_cfg_csr_soc_wr_time_out_thresh_t { + ZXIC_UINT32 soc_wr_time_out_thresh; +} DPP_CFG_CSR_SOC_WR_TIME_OUT_THRESH_T; + +typedef struct dpp_cfg_pcie_pcie_ddr_switch_t { + ZXIC_UINT32 pcie_ddr_switch; +} DPP_CFG_PCIE_PCIE_DDR_SWITCH_T; + +typedef struct dpp_cfg_pcie_user0_int_en_t { + ZXIC_UINT32 user_int_en; +} DPP_CFG_PCIE_USER0_INT_EN_T; + +typedef struct dpp_cfg_pcie_user0_int_mask_t { + ZXIC_UINT32 user_int_mask; +} DPP_CFG_PCIE_USER0_INT_MASK_T; + +typedef struct dpp_cfg_pcie_user0_int_status_t { + ZXIC_UINT32 user_int_status; +} DPP_CFG_PCIE_USER0_INT_STATUS_T; + +typedef struct dpp_cfg_pcie_user1_int_en_t { + ZXIC_UINT32 user_int_en; +} DPP_CFG_PCIE_USER1_INT_EN_T; + +typedef struct dpp_cfg_pcie_user1_int_mask_t { + ZXIC_UINT32 user_int_mask; +} DPP_CFG_PCIE_USER1_INT_MASK_T; + +typedef struct dpp_cfg_pcie_user1_int_status_t { + ZXIC_UINT32 user_int_status; +} DPP_CFG_PCIE_USER1_INT_STATUS_T; + +typedef struct dpp_cfg_pcie_user2_int_en_t { + ZXIC_UINT32 user_int_en; +} DPP_CFG_PCIE_USER2_INT_EN_T; + +typedef struct dpp_cfg_pcie_user2_int_mask_t { + ZXIC_UINT32 user_int_mask; +} DPP_CFG_PCIE_USER2_INT_MASK_T; + +typedef struct dpp_cfg_pcie_user2_int_status_t { + ZXIC_UINT32 user_int_status; +} DPP_CFG_PCIE_USER2_INT_STATUS_T; + +typedef struct dpp_cfg_pcie_ecc_1b_int_en_t { + ZXIC_UINT32 ecc_1b_int_en; +} DPP_CFG_PCIE_ECC_1B_INT_EN_T; + +typedef struct dpp_cfg_pcie_ecc_1b_int_mask_t { + ZXIC_UINT32 ecc_1b_int_mask; +} DPP_CFG_PCIE_ECC_1B_INT_MASK_T; + +typedef struct dpp_cfg_pcie_ecc_1b_int_status_t { + ZXIC_UINT32 ecc_1b_int_status; +} DPP_CFG_PCIE_ECC_1B_INT_STATUS_T; + +typedef struct dpp_cfg_pcie_ecc_2b_int_en_t { + ZXIC_UINT32 ecc_2b_int_en; +} DPP_CFG_PCIE_ECC_2B_INT_EN_T; + +typedef struct dpp_cfg_pcie_ecc_2b_int_mask_t { + ZXIC_UINT32 ecc_2b_int_mask; +} DPP_CFG_PCIE_ECC_2B_INT_MASK_T; + +typedef struct dpp_cfg_pcie_ecc_2b_int_status_t { + ZXIC_UINT32 ecc_2b_int_status; +} DPP_CFG_PCIE_ECC_2B_INT_STATUS_T; + +typedef struct dpp_cfg_pcie_cfg_int_status_t { + ZXIC_UINT32 cfg_int_status; +} DPP_CFG_PCIE_CFG_INT_STATUS_T; + +typedef struct dpp_cfg_pcie_i_core_to_cntl_t { + ZXIC_UINT32 i_core_to_cntl; +} DPP_CFG_PCIE_I_CORE_TO_CNTL_T; + +typedef struct dpp_cfg_pcie_test_in_low_t { + ZXIC_UINT32 test_in_low; +} DPP_CFG_PCIE_TEST_IN_LOW_T; + +typedef struct dpp_cfg_pcie_test_in_high_t { + ZXIC_UINT32 test_in_high; +} DPP_CFG_PCIE_TEST_IN_HIGH_T; + +typedef struct dpp_cfg_pcie_local_interrupt_out_t { + ZXIC_UINT32 local_interrupt_out; +} DPP_CFG_PCIE_LOCAL_INTERRUPT_OUT_T; + +typedef struct dpp_cfg_pcie_pl_ltssm_t { + ZXIC_UINT32 pl_ltssm; +} DPP_CFG_PCIE_PL_LTSSM_T; + +typedef struct dpp_cfg_pcie_test_out0_t { + ZXIC_UINT32 test_out0; +} DPP_CFG_PCIE_TEST_OUT0_T; + +typedef struct dpp_cfg_pcie_test_out1_t { + ZXIC_UINT32 test_out1; +} DPP_CFG_PCIE_TEST_OUT1_T; + +typedef struct dpp_cfg_pcie_test_out2_t { + ZXIC_UINT32 test_out2; +} DPP_CFG_PCIE_TEST_OUT2_T; + +typedef struct dpp_cfg_pcie_test_out3_t { + ZXIC_UINT32 test_out3; +} DPP_CFG_PCIE_TEST_OUT3_T; + +typedef struct dpp_cfg_pcie_test_out4_t { + ZXIC_UINT32 test_out4; +} DPP_CFG_PCIE_TEST_OUT4_T; + +typedef struct dpp_cfg_pcie_test_out5_t { + ZXIC_UINT32 test_out5; +} DPP_CFG_PCIE_TEST_OUT5_T; + +typedef struct dpp_cfg_pcie_test_out6_t { + ZXIC_UINT32 test_out6; +} DPP_CFG_PCIE_TEST_OUT6_T; + +typedef struct dpp_cfg_pcie_test_out7_t { + ZXIC_UINT32 test_out7; +} DPP_CFG_PCIE_TEST_OUT7_T; + +typedef struct dpp_cfg_pcie_sync_o_core_status_t { + ZXIC_UINT32 sync_o_core_status; +} DPP_CFG_PCIE_SYNC_O_CORE_STATUS_T; + +typedef struct dpp_cfg_pcie_sync_o_alert_dbe_t { + ZXIC_UINT32 sync_o_alert_dbe; +} DPP_CFG_PCIE_SYNC_O_ALERT_DBE_T; + +typedef struct dpp_cfg_pcie_sync_o_alert_sbe_t { + ZXIC_UINT32 sync_o_alert_sbe; +} DPP_CFG_PCIE_SYNC_O_ALERT_SBE_T; + +typedef struct dpp_cfg_pcie_sync_o_link_loopback_en_t { + ZXIC_UINT32 sync_o_link_loopback_en; +} DPP_CFG_PCIE_SYNC_O_LINK_LOOPBACK_EN_T; + +typedef struct dpp_cfg_pcie_sync_o_local_fs_lf_valid_t { + ZXIC_UINT32 sync_o_local_fs_lf_valid; +} DPP_CFG_PCIE_SYNC_O_LOCAL_FS_LF_VALID_T; + +typedef struct dpp_cfg_pcie_sync_o_rx_idle_detect_t { + ZXIC_UINT32 sync_o_rx_idle_detect; +} DPP_CFG_PCIE_SYNC_O_RX_IDLE_DETECT_T; + +typedef struct dpp_cfg_pcie_sync_o_rx_rdy_t { + ZXIC_UINT32 sync_o_rx_rdy; +} DPP_CFG_PCIE_SYNC_O_RX_RDY_T; + +typedef struct dpp_cfg_pcie_sync_o_tx_rdy_t { + ZXIC_UINT32 sync_o_tx_rdy; +} DPP_CFG_PCIE_SYNC_O_TX_RDY_T; + +typedef struct dpp_cfg_pcie_pcie_link_up_cnt_t { + ZXIC_UINT32 pcie_link_up_cnt; +} DPP_CFG_PCIE_PCIE_LINK_UP_CNT_T; + +typedef struct dpp_cfg_pcie_test_out_pcie0_t { + ZXIC_UINT32 test_out_pcie0; +} DPP_CFG_PCIE_TEST_OUT_PCIE0_T; + +typedef struct dpp_cfg_pcie_test_out_pcie1_t { + ZXIC_UINT32 test_out_pcie1; +} DPP_CFG_PCIE_TEST_OUT_PCIE1_T; + +typedef struct dpp_cfg_pcie_test_out_pcie2_t { + ZXIC_UINT32 test_out_pcie2; +} DPP_CFG_PCIE_TEST_OUT_PCIE2_T; + +typedef struct dpp_cfg_pcie_test_out_pcie3_t { + ZXIC_UINT32 test_out_pcie3; +} DPP_CFG_PCIE_TEST_OUT_PCIE3_T; + +typedef struct dpp_cfg_pcie_test_out_pcie4_t { + ZXIC_UINT32 test_out_pcie4; +} DPP_CFG_PCIE_TEST_OUT_PCIE4_T; + +typedef struct dpp_cfg_pcie_test_out_pcie5_t { + ZXIC_UINT32 test_out_pcie5; +} DPP_CFG_PCIE_TEST_OUT_PCIE5_T; + +typedef struct dpp_cfg_pcie_test_out_pcie6_t { + ZXIC_UINT32 test_out_pcie6; +} DPP_CFG_PCIE_TEST_OUT_PCIE6_T; + +typedef struct dpp_cfg_pcie_test_out_pcie7_t { + ZXIC_UINT32 test_out_pcie7; +} DPP_CFG_PCIE_TEST_OUT_PCIE7_T; + +typedef struct dpp_cfg_pcie_test_out_pcie8_t { + ZXIC_UINT32 test_out_pcie8; +} DPP_CFG_PCIE_TEST_OUT_PCIE8_T; + +typedef struct dpp_cfg_pcie_test_out_pcie9_t { + ZXIC_UINT32 test_out_pcie9; +} DPP_CFG_PCIE_TEST_OUT_PCIE9_T; + +typedef struct dpp_cfg_pcie_test_out_pcie10_t { + ZXIC_UINT32 test_out_pcie10; +} DPP_CFG_PCIE_TEST_OUT_PCIE10_T; + +typedef struct dpp_cfg_pcie_test_out_pcie11_t { + ZXIC_UINT32 test_out_pcie11; +} DPP_CFG_PCIE_TEST_OUT_PCIE11_T; + +typedef struct dpp_cfg_pcie_test_out_pcie12_t { + ZXIC_UINT32 test_out_pcie12; +} DPP_CFG_PCIE_TEST_OUT_PCIE12_T; + +typedef struct dpp_cfg_pcie_test_out_pcie13_t { + ZXIC_UINT32 test_out_pcie13; +} DPP_CFG_PCIE_TEST_OUT_PCIE13_T; + +typedef struct dpp_cfg_pcie_test_out_pcie14_t { + ZXIC_UINT32 test_out_pcie14; +} DPP_CFG_PCIE_TEST_OUT_PCIE14_T; + +typedef struct dpp_cfg_pcie_test_out_pcie15_t { + ZXIC_UINT32 test_out_pcie15; +} DPP_CFG_PCIE_TEST_OUT_PCIE15_T; + +typedef struct dpp_cfg_pcie_int_repeat_en_t { + ZXIC_UINT32 int_repeat_en; +} DPP_CFG_PCIE_INT_REPEAT_EN_T; + +typedef struct dpp_cfg_pcie_dbg_awid_axi_mst_t { + ZXIC_UINT32 dbg_awid_axi_mst; +} DPP_CFG_PCIE_DBG_AWID_AXI_MST_T; + +typedef struct dpp_cfg_pcie_dbg_awaddr_axi_mst0_t { + ZXIC_UINT32 dbg_awaddr_axi_mst0; +} DPP_CFG_PCIE_DBG_AWADDR_AXI_MST0_T; + +typedef struct dpp_cfg_pcie_dbg_awaddr_axi_mst1_t { + ZXIC_UINT32 dbg_awaddr_axi_mst1; +} DPP_CFG_PCIE_DBG_AWADDR_AXI_MST1_T; + +typedef struct dpp_cfg_pcie_dbg_awlen_axi_mst_t { + ZXIC_UINT32 dbg_awlen_axi_mst; +} DPP_CFG_PCIE_DBG_AWLEN_AXI_MST_T; + +typedef struct dpp_cfg_pcie_dbg_awsize_axi_mst_t { + ZXIC_UINT32 dbg_awid_axi_mst; +} DPP_CFG_PCIE_DBG_AWSIZE_AXI_MST_T; + +typedef struct dpp_cfg_pcie_dbg_awburst_axi_mst_t { + ZXIC_UINT32 dbg_awburst_axi_mst; +} DPP_CFG_PCIE_DBG_AWBURST_AXI_MST_T; + +typedef struct dpp_cfg_pcie_dbg_awlock_axi_mst_t { + ZXIC_UINT32 dbg_awlock_axi_mst; +} DPP_CFG_PCIE_DBG_AWLOCK_AXI_MST_T; + +typedef struct dpp_cfg_pcie_dbg_awcache_axi_mst_t { + ZXIC_UINT32 dbg_awcache_axi_mst; +} DPP_CFG_PCIE_DBG_AWCACHE_AXI_MST_T; + +typedef struct dpp_cfg_pcie_dbg_awprot_axi_mst_t { + ZXIC_UINT32 dbg_awprot_axi_mst; +} DPP_CFG_PCIE_DBG_AWPROT_AXI_MST_T; + +typedef struct dpp_cfg_pcie_dbg_wid_axi_mst_t { + ZXIC_UINT32 dbg_wid_axi_mst; +} DPP_CFG_PCIE_DBG_WID_AXI_MST_T; + +typedef struct dpp_cfg_pcie_dbg_wdata_axi_mst0_t { + ZXIC_UINT32 dbg_wdata_axi_mst0; +} DPP_CFG_PCIE_DBG_WDATA_AXI_MST0_T; + +typedef struct dpp_cfg_pcie_dbg_wdata_axi_mst1_t { + ZXIC_UINT32 dbg_wdata_axi_mst1; +} DPP_CFG_PCIE_DBG_WDATA_AXI_MST1_T; + +typedef struct dpp_cfg_pcie_dbg_wdata_axi_mst2_t { + ZXIC_UINT32 dbg_wdata_axi_mst2; +} DPP_CFG_PCIE_DBG_WDATA_AXI_MST2_T; + +typedef struct dpp_cfg_pcie_dbg_wdata_axi_mst3_t { + ZXIC_UINT32 dbg_wdata_axi_mst3; +} DPP_CFG_PCIE_DBG_WDATA_AXI_MST3_T; + +typedef struct dpp_cfg_pcie_dbg_wstrb_axi_mst_t { + ZXIC_UINT32 dbg_wstrb_axi_mst; +} DPP_CFG_PCIE_DBG_WSTRB_AXI_MST_T; + +typedef struct dpp_cfg_pcie_dbg_wlast_axi_mst_t { + ZXIC_UINT32 dbg_wlast_axi_mst; +} DPP_CFG_PCIE_DBG_WLAST_AXI_MST_T; + +typedef struct dpp_cfg_pcie_dbg_arid_axi_mst_t { + ZXIC_UINT32 dbg_arid_axi_mst; +} DPP_CFG_PCIE_DBG_ARID_AXI_MST_T; + +typedef struct dpp_cfg_pcie_dbg_araddr_axi_mst0_t { + ZXIC_UINT32 dbg_araddr_axi_mst0; +} DPP_CFG_PCIE_DBG_ARADDR_AXI_MST0_T; + +typedef struct dpp_cfg_pcie_dbg_araddr_axi_mst1_t { + ZXIC_UINT32 dbg_araddr_axi_mst1; +} DPP_CFG_PCIE_DBG_ARADDR_AXI_MST1_T; + +typedef struct dpp_cfg_pcie_dbg_arlen_axi_mst_t { + ZXIC_UINT32 dbg_arlen_axi_mst; +} DPP_CFG_PCIE_DBG_ARLEN_AXI_MST_T; + +typedef struct dpp_cfg_pcie_dbg_arsize_axi_mst_t { + ZXIC_UINT32 dbg_arsize_axi_mst; +} DPP_CFG_PCIE_DBG_ARSIZE_AXI_MST_T; + +typedef struct dpp_cfg_pcie_dbg_arburst_axi_mst_t { + ZXIC_UINT32 dbg_arburst_axi_mst; +} DPP_CFG_PCIE_DBG_ARBURST_AXI_MST_T; + +typedef struct dpp_cfg_pcie_dbg_arlock_axi_mst_t { + ZXIC_UINT32 dbg_arlock_axi_mst; +} DPP_CFG_PCIE_DBG_ARLOCK_AXI_MST_T; + +typedef struct dpp_cfg_pcie_dbg_arcache_axi_mst_t { + ZXIC_UINT32 dbg_arcache_axi_mst; +} DPP_CFG_PCIE_DBG_ARCACHE_AXI_MST_T; + +typedef struct dpp_cfg_pcie_dbg_arprot_axi_mst_t { + ZXIC_UINT32 dbg_arprot_axi_mst; +} DPP_CFG_PCIE_DBG_ARPROT_AXI_MST_T; + +typedef struct dpp_cfg_pcie_dbg_rdata_axi_mst0_t { + ZXIC_UINT32 dbg_rdata_axi_mst0; +} DPP_CFG_PCIE_DBG_RDATA_AXI_MST0_T; + +typedef struct dpp_cfg_pcie_dbg_rdata_axi_mst1_t { + ZXIC_UINT32 dbg_rdata_axi_mst1; +} DPP_CFG_PCIE_DBG_RDATA_AXI_MST1_T; + +typedef struct dpp_cfg_pcie_dbg_rdata_axi_mst2_t { + ZXIC_UINT32 dbg_rdata_axi_mst2; +} DPP_CFG_PCIE_DBG_RDATA_AXI_MST2_T; + +typedef struct dpp_cfg_pcie_dbg_rdata_axi_mst3_t { + ZXIC_UINT32 dbg_rdata_axi_mst3; +} DPP_CFG_PCIE_DBG_RDATA_AXI_MST3_T; + +typedef struct dpp_cfg_pcie_axi_mst_state_t { + ZXIC_UINT32 axi_mst_state; +} DPP_CFG_PCIE_AXI_MST_STATE_T; + +typedef struct dpp_cfg_pcie_axi_cfg_state_t { + ZXIC_UINT32 axi_cfg_state; +} DPP_CFG_PCIE_AXI_CFG_STATE_T; + +typedef struct dpp_cfg_pcie_axi_slv_rd_state_t { + ZXIC_UINT32 axi_slv_rd_state; +} DPP_CFG_PCIE_AXI_SLV_RD_STATE_T; + +typedef struct dpp_cfg_pcie_axi_slv_wr_state_t { + ZXIC_UINT32 axi_slv_wr_state; +} DPP_CFG_PCIE_AXI_SLV_WR_STATE_T; + +typedef struct dpp_cfg_pcie_axim_delay_en_t { + ZXIC_UINT32 axim_delay_en; +} DPP_CFG_PCIE_AXIM_DELAY_EN_T; + +typedef struct dpp_cfg_pcie_axim_delay_t { + ZXIC_UINT32 axim_delay; +} DPP_CFG_PCIE_AXIM_DELAY_T; + +typedef struct dpp_cfg_pcie_axim_speed_wr_t { + ZXIC_UINT32 axim_speed_wr; +} DPP_CFG_PCIE_AXIM_SPEED_WR_T; + +typedef struct dpp_cfg_pcie_axim_speed_rd_t { + ZXIC_UINT32 axim_speed_rd; +} DPP_CFG_PCIE_AXIM_SPEED_RD_T; + +typedef struct dpp_cfg_pcie_dbg_awaddr_axi_slv0_t { + ZXIC_UINT32 dbg_awaddr_axi_slv0; +} DPP_CFG_PCIE_DBG_AWADDR_AXI_SLV0_T; + +typedef struct dpp_cfg_pcie_dbg_awaddr_axi_slv1_t { + ZXIC_UINT32 dbg_awaddr_axi_slv1; +} DPP_CFG_PCIE_DBG_AWADDR_AXI_SLV1_T; + +typedef struct dpp_cfg_pcie_dbg0_wdata_axi_slv0_t { + ZXIC_UINT32 dbg0_wdata_axi_slv0; +} DPP_CFG_PCIE_DBG0_WDATA_AXI_SLV0_T; + +typedef struct dpp_cfg_pcie_dbg0_wdata_axi_slv1_t { + ZXIC_UINT32 dbg0_wdata_axi_slv1; +} DPP_CFG_PCIE_DBG0_WDATA_AXI_SLV1_T; + +typedef struct dpp_cfg_pcie_dbg0_wdata_axi_slv2_t { + ZXIC_UINT32 dbg0_wdata_axi_slv2; +} DPP_CFG_PCIE_DBG0_WDATA_AXI_SLV2_T; + +typedef struct dpp_cfg_pcie_dbg0_wdata_axi_slv3_t { + ZXIC_UINT32 dbg0_wdata_axi_slv3; +} DPP_CFG_PCIE_DBG0_WDATA_AXI_SLV3_T; + +typedef struct dpp_cfg_pcie_dbg1_wdata_axi_slv0_t { + ZXIC_UINT32 dbg1_wdata_axi_slv0; +} DPP_CFG_PCIE_DBG1_WDATA_AXI_SLV0_T; + +typedef struct dpp_cfg_pcie_dbg1_wdata_axi_slv1_t { + ZXIC_UINT32 dbg1_wdata_axi_slv1; +} DPP_CFG_PCIE_DBG1_WDATA_AXI_SLV1_T; + +typedef struct dpp_cfg_pcie_dbg1_wdata_axi_slv2_t { + ZXIC_UINT32 dbg1_wdata_axi_slv2; +} DPP_CFG_PCIE_DBG1_WDATA_AXI_SLV2_T; + +typedef struct dpp_cfg_pcie_dbg1_wdata_axi_slv3_t { + ZXIC_UINT32 dbg1_wdata_axi_slv3; +} DPP_CFG_PCIE_DBG1_WDATA_AXI_SLV3_T; + +typedef struct dpp_cfg_pcie_dbg2_wdata_axi_slv0_t { + ZXIC_UINT32 dbg2_wdata_axi_slv0; +} DPP_CFG_PCIE_DBG2_WDATA_AXI_SLV0_T; + +typedef struct dpp_cfg_pcie_dbg2_wdata_axi_slv1_t { + ZXIC_UINT32 dbg2_wdata_axi_slv1; +} DPP_CFG_PCIE_DBG2_WDATA_AXI_SLV1_T; + +typedef struct dpp_cfg_pcie_dbg2_wdata_axi_slv2_t { + ZXIC_UINT32 dbg2_wdata_axi_slv2; +} DPP_CFG_PCIE_DBG2_WDATA_AXI_SLV2_T; + +typedef struct dpp_cfg_pcie_dbg2_wdata_axi_slv3_t { + ZXIC_UINT32 dbg2_wdata_axi_slv3; +} DPP_CFG_PCIE_DBG2_WDATA_AXI_SLV3_T; + +typedef struct dpp_cfg_pcie_dbg3_wdata_axi_slv0_t { + ZXIC_UINT32 dbg3_wdata_axi_slv0; +} DPP_CFG_PCIE_DBG3_WDATA_AXI_SLV0_T; + +typedef struct dpp_cfg_pcie_dbg3_wdata_axi_slv1_t { + ZXIC_UINT32 dbg3_wdata_axi_slv1; +} DPP_CFG_PCIE_DBG3_WDATA_AXI_SLV1_T; + +typedef struct dpp_cfg_pcie_dbg3_wdata_axi_slv2_t { + ZXIC_UINT32 dbg3_wdata_axi_slv2; +} DPP_CFG_PCIE_DBG3_WDATA_AXI_SLV2_T; + +typedef struct dpp_cfg_pcie_dbg3_wdata_axi_slv3_t { + ZXIC_UINT32 dbg3_wdata_axi_slv3; +} DPP_CFG_PCIE_DBG3_WDATA_AXI_SLV3_T; + +typedef struct dpp_cfg_pcie_dbg4_wdata_axi_slv0_t { + ZXIC_UINT32 dbg4_wdata_axi_slv0; +} DPP_CFG_PCIE_DBG4_WDATA_AXI_SLV0_T; + +typedef struct dpp_cfg_pcie_dbg4_wdata_axi_slv1_t { + ZXIC_UINT32 dbg4_wdata_axi_slv1; +} DPP_CFG_PCIE_DBG4_WDATA_AXI_SLV1_T; + +typedef struct dpp_cfg_pcie_dbg4_wdata_axi_slv2_t { + ZXIC_UINT32 dbg4_wdata_axi_slv2; +} DPP_CFG_PCIE_DBG4_WDATA_AXI_SLV2_T; + +typedef struct dpp_cfg_pcie_dbg4_wdata_axi_slv3_t { + ZXIC_UINT32 dbg4_wdata_axi_slv3; +} DPP_CFG_PCIE_DBG4_WDATA_AXI_SLV3_T; + +typedef struct dpp_cfg_pcie_dbg5_wdata_axi_slv0_t { + ZXIC_UINT32 dbg5_wdata_axi_slv0; +} DPP_CFG_PCIE_DBG5_WDATA_AXI_SLV0_T; + +typedef struct dpp_cfg_pcie_dbg5_wdata_axi_slv1_t { + ZXIC_UINT32 dbg5_wdata_axi_slv1; +} DPP_CFG_PCIE_DBG5_WDATA_AXI_SLV1_T; + +typedef struct dpp_cfg_pcie_dbg5_wdata_axi_slv2_t { + ZXIC_UINT32 dbg5_wdata_axi_slv2; +} DPP_CFG_PCIE_DBG5_WDATA_AXI_SLV2_T; + +typedef struct dpp_cfg_pcie_dbg5_wdata_axi_slv3_t { + ZXIC_UINT32 dbg5_wdata_axi_slv3; +} DPP_CFG_PCIE_DBG5_WDATA_AXI_SLV3_T; + +typedef struct dpp_cfg_pcie_dbg6_wdata_axi_slv0_t { + ZXIC_UINT32 dbg6_wdata_axi_slv0; +} DPP_CFG_PCIE_DBG6_WDATA_AXI_SLV0_T; + +typedef struct dpp_cfg_pcie_dbg6_wdata_axi_slv1_t { + ZXIC_UINT32 dbg6_wdata_axi_slv1; +} DPP_CFG_PCIE_DBG6_WDATA_AXI_SLV1_T; + +typedef struct dpp_cfg_pcie_dbg6_wdata_axi_slv2_t { + ZXIC_UINT32 dbg6_wdata_axi_slv2; +} DPP_CFG_PCIE_DBG6_WDATA_AXI_SLV2_T; + +typedef struct dpp_cfg_pcie_dbg6_wdata_axi_slv3_t { + ZXIC_UINT32 dbg6_wdata_axi_slv3; +} DPP_CFG_PCIE_DBG6_WDATA_AXI_SLV3_T; + +typedef struct dpp_cfg_pcie_dbg7_wdata_axi_slv0_t { + ZXIC_UINT32 dbg7_wdata_axi_slv0; +} DPP_CFG_PCIE_DBG7_WDATA_AXI_SLV0_T; + +typedef struct dpp_cfg_pcie_dbg7_wdata_axi_slv1_t { + ZXIC_UINT32 dbg7_wdata_axi_slv1; +} DPP_CFG_PCIE_DBG7_WDATA_AXI_SLV1_T; + +typedef struct dpp_cfg_pcie_dbg7_wdata_axi_slv2_t { + ZXIC_UINT32 dbg7_wdata_axi_slv2; +} DPP_CFG_PCIE_DBG7_WDATA_AXI_SLV2_T; + +typedef struct dpp_cfg_pcie_dbg7_wdata_axi_slv3_t { + ZXIC_UINT32 dbg7_wdata_axi_slv3; +} DPP_CFG_PCIE_DBG7_WDATA_AXI_SLV3_T; + +typedef struct dpp_cfg_pcie_dbg8_wdata_axi_slv0_t { + ZXIC_UINT32 dbg8_wdata_axi_slv0; +} DPP_CFG_PCIE_DBG8_WDATA_AXI_SLV0_T; + +typedef struct dpp_cfg_pcie_dbg8_wdata_axi_slv1_t { + ZXIC_UINT32 dbg8_wdata_axi_slv1; +} DPP_CFG_PCIE_DBG8_WDATA_AXI_SLV1_T; + +typedef struct dpp_cfg_pcie_dbg8_wdata_axi_slv2_t { + ZXIC_UINT32 dbg8_wdata_axi_slv2; +} DPP_CFG_PCIE_DBG8_WDATA_AXI_SLV2_T; + +typedef struct dpp_cfg_pcie_dbg8_wdata_axi_slv3_t { + ZXIC_UINT32 dbg8_wdata_axi_slv3; +} DPP_CFG_PCIE_DBG8_WDATA_AXI_SLV3_T; + +typedef struct dpp_cfg_pcie_dbg9_wdata_axi_slv0_t { + ZXIC_UINT32 dbg9_wdata_axi_slv0; +} DPP_CFG_PCIE_DBG9_WDATA_AXI_SLV0_T; + +typedef struct dpp_cfg_pcie_dbg9_wdata_axi_slv1_t { + ZXIC_UINT32 dbg9_wdata_axi_slv1; +} DPP_CFG_PCIE_DBG9_WDATA_AXI_SLV1_T; + +typedef struct dpp_cfg_pcie_dbg9_wdata_axi_slv2_t { + ZXIC_UINT32 dbg9_wdata_axi_slv2; +} DPP_CFG_PCIE_DBG9_WDATA_AXI_SLV2_T; + +typedef struct dpp_cfg_pcie_dbg9_wdata_axi_slv3_t { + ZXIC_UINT32 dbg9_wdata_axi_slv3; +} DPP_CFG_PCIE_DBG9_WDATA_AXI_SLV3_T; + +typedef struct dpp_cfg_pcie_dbg_awlen_axi_slv_t { + ZXIC_UINT32 dbg_awlen_axi_slv; +} DPP_CFG_PCIE_DBG_AWLEN_AXI_SLV_T; + +typedef struct dpp_cfg_pcie_dbg_wlast_axi_slv_t { + ZXIC_UINT32 dbg_wlast_axi_slv; +} DPP_CFG_PCIE_DBG_WLAST_AXI_SLV_T; + +typedef struct dpp_cfg_pcie_dbg_araddr_axi_slv0_t { + ZXIC_UINT32 dbg5_wdata_axi_slv1; +} DPP_CFG_PCIE_DBG_ARADDR_AXI_SLV0_T; + +typedef struct dpp_cfg_pcie_dbg_araddr_axi_slv1_t { + ZXIC_UINT32 dbg5_wdata_axi_slv2; +} DPP_CFG_PCIE_DBG_ARADDR_AXI_SLV1_T; + +typedef struct dpp_cfg_pcie_dbg0_rdata_axi_slv0_t { + ZXIC_UINT32 dbg5_wdata_axi_slv3; +} DPP_CFG_PCIE_DBG0_RDATA_AXI_SLV0_T; + +typedef struct dpp_cfg_pcie_dbg0_rdata_axi_slv1_t { + ZXIC_UINT32 dbg6_wdata_axi_slv0; +} DPP_CFG_PCIE_DBG0_RDATA_AXI_SLV1_T; + +typedef struct dpp_cfg_pcie_dbg0_rdata_axi_slv2_t { + ZXIC_UINT32 dbg6_wdata_axi_slv1; +} DPP_CFG_PCIE_DBG0_RDATA_AXI_SLV2_T; + +typedef struct dpp_cfg_pcie_dbg0_rdata_axi_slv3_t { + ZXIC_UINT32 dbg6_wdata_axi_slv2; +} DPP_CFG_PCIE_DBG0_RDATA_AXI_SLV3_T; + +typedef struct dpp_cfg_pcie_dbg1_rdata_axi_slv0_t { + ZXIC_UINT32 dbg6_wdata_axi_slv3; +} DPP_CFG_PCIE_DBG1_RDATA_AXI_SLV0_T; + +typedef struct dpp_cfg_pcie_dbg1_rdata_axi_slv1_t { + ZXIC_UINT32 dbg7_wdata_axi_slv0; +} DPP_CFG_PCIE_DBG1_RDATA_AXI_SLV1_T; + +typedef struct dpp_cfg_pcie_dbg1_rdata_axi_slv2_t { + ZXIC_UINT32 dbg7_wdata_axi_slv1; +} DPP_CFG_PCIE_DBG1_RDATA_AXI_SLV2_T; + +typedef struct dpp_cfg_pcie_dbg1_rdata_axi_slv3_t { + ZXIC_UINT32 dbg7_wdata_axi_slv2; +} DPP_CFG_PCIE_DBG1_RDATA_AXI_SLV3_T; + +typedef struct dpp_cfg_pcie_dbg_rlast_axi_slv_t { + ZXIC_UINT32 dbg_rlast_axi_slv; +} DPP_CFG_PCIE_DBG_RLAST_AXI_SLV_T; + +typedef struct dpp_cfg_dma_dma_enable_t { + ZXIC_UINT32 dma_enable; +} DPP_CFG_DMA_DMA_ENABLE_T; + +typedef struct dpp_cfg_dma_up_req_t { + ZXIC_UINT32 up_req; +} DPP_CFG_DMA_UP_REQ_T; + +typedef struct dpp_cfg_dma_dma_up_current_state_t { + ZXIC_UINT32 dma_up_current_state; +} DPP_CFG_DMA_DMA_UP_CURRENT_STATE_T; + +typedef struct dpp_cfg_dma_dma_up_req_ack_t { + ZXIC_UINT32 dma_up_req_ack; +} DPP_CFG_DMA_DMA_UP_REQ_ACK_T; + +typedef struct dpp_cfg_dma_dma_done_latch_t { + ZXIC_UINT32 done_latch; +} DPP_CFG_DMA_DMA_DONE_LATCH_T; + +typedef struct dpp_cfg_dma_dma_up_cpu_addr_low32_t { + ZXIC_UINT32 dma_up_cpu_addr_low; +} DPP_CFG_DMA_DMA_UP_CPU_ADDR_LOW32_T; + +typedef struct dpp_cfg_dma_dma_up_cpu_addr_high32_t { + ZXIC_UINT32 dma_up_cpu_addr_high; +} DPP_CFG_DMA_DMA_UP_CPU_ADDR_HIGH32_T; + +typedef struct dpp_cfg_dma_dma_up_se_addr_t { + ZXIC_UINT32 dma_up_se_addr; +} DPP_CFG_DMA_DMA_UP_SE_ADDR_T; + +typedef struct dpp_cfg_dma_dma_done_int_t { + ZXIC_UINT32 dma_done_int; +} DPP_CFG_DMA_DMA_DONE_INT_T; + +typedef struct dpp_cfg_dma_sp_cfg_t { + ZXIC_UINT32 sp_cfg; +} DPP_CFG_DMA_SP_CFG_T; + +typedef struct dpp_cfg_dma_dma_ing_t { + ZXIC_UINT32 dma_ing; +} DPP_CFG_DMA_DMA_ING_T; + +typedef struct dpp_cfg_dma_rd_timeout_thresh_t { + ZXIC_UINT32 rd_timeout_thresh; +} DPP_CFG_DMA_RD_TIMEOUT_THRESH_T; + +typedef struct dpp_cfg_dma_dma_tab_sta_up_fifo_gap_t { + ZXIC_UINT32 dma_tab_sta_up_fifo_gap; +} DPP_CFG_DMA_DMA_TAB_STA_UP_FIFO_GAP_T; + +typedef struct dpp_cfg_dma_cfg_mac_tim_t { + ZXIC_UINT32 cfg_mac_tim; +} DPP_CFG_DMA_CFG_MAC_TIM_T; + +typedef struct dpp_cfg_dma_cfg_mac_num_t { + ZXIC_UINT32 cfg_mac_num; +} DPP_CFG_DMA_CFG_MAC_NUM_T; + +typedef struct dpp_cfg_dma_init_bd_addr_t { + ZXIC_UINT32 init_bd_addr; +} DPP_CFG_DMA_INIT_BD_ADDR_T; + +typedef struct dpp_cfg_dma_mac_up_bd_addr1_low32_t { + ZXIC_UINT32 mac_up_bd_addr1_low32; +} DPP_CFG_DMA_MAC_UP_BD_ADDR1_LOW32_T; + +typedef struct dpp_cfg_dma_mac_up_bd_addr1_high32_t { + ZXIC_UINT32 mac_up_bd_addr1_high32; +} DPP_CFG_DMA_MAC_UP_BD_ADDR1_HIGH32_T; + +typedef struct dpp_cfg_dma_mac_up_bd_addr2_low32_t { + ZXIC_UINT32 mac_up_bd_addr2_low32; +} DPP_CFG_DMA_MAC_UP_BD_ADDR2_LOW32_T; + +typedef struct dpp_cfg_dma_mac_up_bd_addr2_high32_t { + ZXIC_UINT32 mac_up_bd_addr2_high32; +} DPP_CFG_DMA_MAC_UP_BD_ADDR2_HIGH32_T; + +typedef struct dpp_cfg_dma_cfg_mac_max_num_t { + ZXIC_UINT32 cfg_mac_max_num; +} DPP_CFG_DMA_CFG_MAC_MAX_NUM_T; + +typedef struct dpp_cfg_dma_dma_wbuf_ff_empty_t { + ZXIC_UINT32 dma_wbuf_ff_empty; +} DPP_CFG_DMA_DMA_WBUF_FF_EMPTY_T; + +typedef struct dpp_cfg_dma_dma_wbuf_state_t { + ZXIC_UINT32 dma_wbuf_state; +} DPP_CFG_DMA_DMA_WBUF_STATE_T; + +typedef struct dpp_cfg_dma_dma_mac_bd_addr_low32_t { + ZXIC_UINT32 dma_mac_bd_addr_low32; +} DPP_CFG_DMA_DMA_MAC_BD_ADDR_LOW32_T; + +typedef struct dpp_cfg_dma_dma_mac_bd_addr_high32_t { + ZXIC_UINT32 dma_mac_bd_addr_high32; +} DPP_CFG_DMA_DMA_MAC_BD_ADDR_HIGH32_T; + +typedef struct dpp_cfg_dma_mac_up_enable_t { + ZXIC_UINT32 mac_up_enable; +} DPP_CFG_DMA_MAC_UP_ENABLE_T; + +typedef struct dpp_cfg_dma_mac_endian_t { + ZXIC_UINT32 mac_endian; +} DPP_CFG_DMA_MAC_ENDIAN_T; + +typedef struct dpp_cfg_dma_up_endian_t { + ZXIC_UINT32 up_endian; +} DPP_CFG_DMA_UP_ENDIAN_T; + +typedef struct dpp_cfg_dma_dma_up_rd_cnt_latch_t { + ZXIC_UINT32 dma_up_rd_cnt_latch; +} DPP_CFG_DMA_DMA_UP_RD_CNT_LATCH_T; + +typedef struct dpp_cfg_dma_dma_up_rcv_cnt_latch_t { + ZXIC_UINT32 dma_up_rcv_cnt_latch; +} DPP_CFG_DMA_DMA_UP_RCV_CNT_LATCH_T; + +typedef struct dpp_cfg_dma_dma_up_cnt_latch_t { + ZXIC_UINT32 dma_up_cnt_latch; +} DPP_CFG_DMA_DMA_UP_CNT_LATCH_T; + +typedef struct dpp_cfg_dma_cpu_rd_bd_pulse_t { + ZXIC_UINT32 cpu_rd_bd_pulse; +} DPP_CFG_DMA_CPU_RD_BD_PULSE_T; + +typedef struct dpp_cfg_dma_cpu_bd_threshold_t { + ZXIC_UINT32 cpu_bd_threshold; +} DPP_CFG_DMA_CPU_BD_THRESHOLD_T; + +typedef struct dpp_cfg_dma_cpu_bd_used_cnt_t { + ZXIC_UINT32 cpu_bd_used_cnt; +} DPP_CFG_DMA_CPU_BD_USED_CNT_T; + +typedef struct dpp_cfg_dma_dma_up_rcv_status_t { + ZXIC_UINT32 dma_up_rcv_status; +} DPP_CFG_DMA_DMA_UP_RCV_STATUS_T; + +typedef struct dpp_cfg_dma_slv_rid_err_en_t { + ZXIC_UINT32 slv_rid_err_en; +} DPP_CFG_DMA_SLV_RID_ERR_EN_T; + +typedef struct dpp_cfg_dma_slv_rresp_err_en_t { + ZXIC_UINT32 slv_rresp_err_en; +} DPP_CFG_DMA_SLV_RRESP_ERR_EN_T; + +typedef struct dpp_cfg_dma_se_rdbk_ff_full_t { + ZXIC_UINT32 se_rdbk_ff_full; +} DPP_CFG_DMA_SE_RDBK_FF_FULL_T; + +typedef struct dpp_cfg_dma_dma_up_data_count_t { + ZXIC_UINT32 dma_up_data_count; +} DPP_CFG_DMA_DMA_UP_DATA_COUNT_T; + +typedef struct dpp_cfg_dma_dma_mwr_fifo_afull_gap_t { + ZXIC_UINT32 dma_mwr_fifo_afull_gap; +} DPP_CFG_DMA_DMA_MWR_FIFO_AFULL_GAP_T; + +typedef struct dpp_cfg_dma_dma_info_fifo_afull_gap_t { + ZXIC_UINT32 dma_mwr_fifo_afull_gap; +} DPP_CFG_DMA_DMA_INFO_FIFO_AFULL_GAP_T; + +typedef struct dpp_cfg_dma_dma_rd_timeout_set_t { + ZXIC_UINT32 dma_rd_timeout_set; +} DPP_CFG_DMA_DMA_RD_TIMEOUT_SET_T; + +typedef struct dpp_cfg_dma_dma_bd_dat_err_en_t { + ZXIC_UINT32 dma_bd_dat_err_en; +} DPP_CFG_DMA_DMA_BD_DAT_ERR_EN_T; + +typedef struct dpp_cfg_dma_dma_repeat_cnt_t { + ZXIC_UINT32 dma_repeat_cnt; +} DPP_CFG_DMA_DMA_REPEAT_CNT_T; + +typedef struct dpp_cfg_dma_dma_rd_timeout_en_t { + ZXIC_UINT32 dma_rd_timeout_en; +} DPP_CFG_DMA_DMA_RD_TIMEOUT_EN_T; + +typedef struct dpp_cfg_dma_dma_repeat_read_t { + ZXIC_UINT32 dma_repeat_read; +} DPP_CFG_DMA_DMA_REPEAT_READ_T; + +typedef struct dpp_cfg_dma_dma_repeat_read_en_t { + ZXIC_UINT32 dma_repeat_read_en; +} DPP_CFG_DMA_DMA_REPEAT_READ_EN_T; + +typedef struct dpp_cfg_dma_bd_ctl_state_t { + ZXIC_UINT32 bd_ctl_state; +} DPP_CFG_DMA_BD_CTL_STATE_T; + +typedef struct dpp_cfg_dma_dma_done_int_cnt_wr_t { + ZXIC_UINT32 dma_done_int_cnt_wr; +} DPP_CFG_DMA_DMA_DONE_INT_CNT_WR_T; + +typedef struct dpp_cfg_dma_dma_done_int_cnt_mac_t { + ZXIC_UINT32 dma_done_int_cnt_mac; +} DPP_CFG_DMA_DMA_DONE_INT_CNT_MAC_T; + +typedef struct dpp_cfg_dma_current_mac_num_t { + ZXIC_UINT32 current_mac_num; +} DPP_CFG_DMA_CURRENT_MAC_NUM_T; + +typedef struct dpp_cfg_dma_cfg_mac_afifo_afull_t { + ZXIC_UINT32 cfg_mac_afifo_afull; +} DPP_CFG_DMA_CFG_MAC_AFIFO_AFULL_T; + +typedef struct dpp_cfg_dma_dma_mac_ff_full_t { + ZXIC_UINT32 dma_mac_ff_full; +} DPP_CFG_DMA_DMA_MAC_FF_FULL_T; + +typedef struct dpp_cfg_dma_user_axi_mst_t { + ZXIC_UINT32 user_en; + ZXIC_UINT32 cfg_epid; + ZXIC_UINT32 cfg_vfunc_num; + ZXIC_UINT32 cfg_func_num; + ZXIC_UINT32 cfg_vfunc_active; +} DPP_CFG_DMA_USER_AXI_MST_T; + +typedef struct dpp_cfg_csr_sbus_state_t { + ZXIC_UINT32 sbus_state; +} DPP_CFG_CSR_SBUS_STATE_T; + +typedef struct dpp_cfg_csr_mst_debug_en_t { + ZXIC_UINT32 mst_debug_en; +} DPP_CFG_CSR_MST_DEBUG_EN_T; + +typedef struct dpp_cfg_csr_sbus_command_sel_t { + ZXIC_UINT32 sbus_command_sel; +} DPP_CFG_CSR_SBUS_COMMAND_SEL_T; + +typedef struct dpp_cfg_csr_soc_rd_time_out_thresh_t { + ZXIC_UINT32 soc_rd_time_out_thresh; +} DPP_CFG_CSR_SOC_RD_TIME_OUT_THRESH_T; + +typedef struct dpp_cfg_csr_big_little_byte_order_t { + ZXIC_UINT32 big_little_byte_order; +} DPP_CFG_CSR_BIG_LITTLE_BYTE_ORDER_T; + +typedef struct dpp_cfg_csr_ecc_bypass_read_t { + ZXIC_UINT32 ecc_bypass_read; +} DPP_CFG_CSR_ECC_BYPASS_READ_T; + +typedef struct dpp_cfg_csr_ahb_async_wr_fifo_afull_gap_t { + ZXIC_UINT32 ahb_async_wr_fifo_afull_gap; +} DPP_CFG_CSR_AHB_ASYNC_WR_FIFO_AFULL_GAP_T; + +typedef struct dpp_cfg_csr_ahb_async_rd_fifo_afull_gap_t { + ZXIC_UINT32 ahb_async_rd_fifo_afull_gap; +} DPP_CFG_CSR_AHB_ASYNC_RD_FIFO_AFULL_GAP_T; + +typedef struct dpp_cfg_csr_ahb_async_cpl_fifo_afull_gap_t { + ZXIC_UINT32 ahb_async_cpl_fifo_afull_gap; +} DPP_CFG_CSR_AHB_ASYNC_CPL_FIFO_AFULL_GAP_T; + +typedef struct dpp_cfg_csr_mst_debug_data0_high26_t { + ZXIC_UINT32 mst_debug_data0_high26; +} DPP_CFG_CSR_MST_DEBUG_DATA0_HIGH26_T; + +typedef struct dpp_cfg_csr_mst_debug_data0_low32_t { + ZXIC_UINT32 mst_debug_data0_low32; +} DPP_CFG_CSR_MST_DEBUG_DATA0_LOW32_T; + +typedef struct dpp_cfg_csr_mst_debug_data1_high26_t { + ZXIC_UINT32 mst_debug_data1_high26; +} DPP_CFG_CSR_MST_DEBUG_DATA1_HIGH26_T; + +typedef struct dpp_cfg_csr_mst_debug_data1_low32_t { + ZXIC_UINT32 mst_debug_data1_low32; +} DPP_CFG_CSR_MST_DEBUG_DATA1_LOW32_T; + +typedef struct dpp_cfg_csr_mst_debug_data2_high26_t { + ZXIC_UINT32 mst_debug_data2_high26; +} DPP_CFG_CSR_MST_DEBUG_DATA2_HIGH26_T; + +typedef struct dpp_cfg_csr_mst_debug_data2_low32_t { + ZXIC_UINT32 mst_debug_data2_low32; +} DPP_CFG_CSR_MST_DEBUG_DATA2_LOW32_T; + +typedef struct dpp_cfg_csr_mst_debug_data3_high26_t { + ZXIC_UINT32 mst_debug_data3_high26; +} DPP_CFG_CSR_MST_DEBUG_DATA3_HIGH26_T; + +typedef struct dpp_cfg_csr_mst_debug_data3_low32_t { + ZXIC_UINT32 mst_debug_data3_low32; +} DPP_CFG_CSR_MST_DEBUG_DATA3_LOW32_T; + +typedef struct dpp_cfg_csr_mst_debug_data4_high26_t { + ZXIC_UINT32 mst_debug_data4_high26; +} DPP_CFG_CSR_MST_DEBUG_DATA4_HIGH26_T; + +typedef struct dpp_cfg_csr_mst_debug_data4_low32_t { + ZXIC_UINT32 mst_debug_data4_low32; +} DPP_CFG_CSR_MST_DEBUG_DATA4_LOW32_T; + +typedef struct dpp_cfg_csr_mst_debug_data5_high26_t { + ZXIC_UINT32 mst_debug_data5_high26; +} DPP_CFG_CSR_MST_DEBUG_DATA5_HIGH26_T; + +typedef struct dpp_cfg_csr_mst_debug_data5_low32_t { + ZXIC_UINT32 mst_debug_data5_low32; +} DPP_CFG_CSR_MST_DEBUG_DATA5_LOW32_T; + +typedef struct dpp_cfg_csr_mst_debug_data6_high26_t { + ZXIC_UINT32 mst_debug_data6_high26; +} DPP_CFG_CSR_MST_DEBUG_DATA6_HIGH26_T; + +typedef struct dpp_cfg_csr_mst_debug_data6_low32_t { + ZXIC_UINT32 mst_debug_data6_low32; +} DPP_CFG_CSR_MST_DEBUG_DATA6_LOW32_T; + +typedef struct dpp_cfg_csr_mst_debug_data7_high26_t { + ZXIC_UINT32 mst_debug_data7_high26; +} DPP_CFG_CSR_MST_DEBUG_DATA7_HIGH26_T; + +typedef struct dpp_cfg_csr_mst_debug_data7_low32_t { + ZXIC_UINT32 mst_debug_data7_low32; +} DPP_CFG_CSR_MST_DEBUG_DATA7_LOW32_T; + +typedef struct dpp_cfg_csr_mst_debug_data8_high26_t { + ZXIC_UINT32 mst_debug_data8_high26; +} DPP_CFG_CSR_MST_DEBUG_DATA8_HIGH26_T; + +typedef struct dpp_cfg_csr_mst_debug_data8_low32_t { + ZXIC_UINT32 mst_debug_data8_low32; +} DPP_CFG_CSR_MST_DEBUG_DATA8_LOW32_T; + +typedef struct dpp_cfg_csr_mst_debug_data9_high26_t { + ZXIC_UINT32 mst_debug_data9_high26; +} DPP_CFG_CSR_MST_DEBUG_DATA9_HIGH26_T; + +typedef struct dpp_cfg_csr_mst_debug_data9_low32_t { + ZXIC_UINT32 mst_debug_data9_low32; +} DPP_CFG_CSR_MST_DEBUG_DATA9_LOW32_T; + +typedef struct dpp_cfg_csr_mst_debug_data10_high26_t { + ZXIC_UINT32 mst_debug_data10_high26; +} DPP_CFG_CSR_MST_DEBUG_DATA10_HIGH26_T; + +typedef struct dpp_cfg_csr_mst_debug_data10_low32_t { + ZXIC_UINT32 mst_debug_data10_low32; +} DPP_CFG_CSR_MST_DEBUG_DATA10_LOW32_T; + +typedef struct dpp_cfg_csr_mst_debug_data11_high26_t { + ZXIC_UINT32 mst_debug_data11_high26; +} DPP_CFG_CSR_MST_DEBUG_DATA11_HIGH26_T; + +typedef struct dpp_cfg_csr_mst_debug_data11_low32_t { + ZXIC_UINT32 mst_debug_data11_low32; +} DPP_CFG_CSR_MST_DEBUG_DATA11_LOW32_T; + +typedef struct dpp_cfg_csr_mst_debug_data12_high26_t { + ZXIC_UINT32 mst_debug_data12_high26; +} DPP_CFG_CSR_MST_DEBUG_DATA12_HIGH26_T; + +typedef struct dpp_cfg_csr_mst_debug_data12_low32_t { + ZXIC_UINT32 mst_debug_data12_low32; +} DPP_CFG_CSR_MST_DEBUG_DATA12_LOW32_T; + +typedef struct dpp_cfg_csr_mst_debug_data13_high26_t { + ZXIC_UINT32 mst_debug_data13_high26; +} DPP_CFG_CSR_MST_DEBUG_DATA13_HIGH26_T; + +typedef struct dpp_cfg_csr_mst_debug_data13_low32_t { + ZXIC_UINT32 mst_debug_data13_low32; +} DPP_CFG_CSR_MST_DEBUG_DATA13_LOW32_T; + +typedef struct dpp_cfg_csr_mst_debug_data14_high26_t { + ZXIC_UINT32 mst_debug_data14_high26; +} DPP_CFG_CSR_MST_DEBUG_DATA14_HIGH26_T; + +typedef struct dpp_cfg_csr_mst_debug_data14_low32_t { + ZXIC_UINT32 mst_debug_data14_low32; +} DPP_CFG_CSR_MST_DEBUG_DATA14_LOW32_T; + +typedef struct dpp_cfg_csr_mst_debug_data15_high26_t { + ZXIC_UINT32 mst_debug_data15_high26; +} DPP_CFG_CSR_MST_DEBUG_DATA15_HIGH26_T; + +typedef struct dpp_cfg_csr_mst_debug_data15_low32_t { + ZXIC_UINT32 mst_debug_data15_low32; +} DPP_CFG_CSR_MST_DEBUG_DATA15_LOW32_T; + +#ifdef __cplusplus +} +#endif +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_dtb4k_reg.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_dtb4k_reg.h new file mode 100644 index 000000000000..6f5a624b1ef9 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_dtb4k_reg.h @@ -0,0 +1,40 @@ + +#ifndef _DPP_DTB4K_REG_H_ +#define _DPP_DTB4K_REG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct dpp_dtb4k_dtb_enq_cfg_queue_dtb_addr_h_0_127_t { + ZXIC_UINT32 cfg_queue_dtb_addr_h; +} DPP_DTB4K_DTB_ENQ_CFG_QUEUE_DTB_ADDR_H_0_127_T; + +typedef struct dpp_dtb4k_dtb_enq_cfg_queue_dtb_addr_l_0_127_t { + ZXIC_UINT32 cfg_queue_dtb_addr_l; +} DPP_DTB4K_DTB_ENQ_CFG_QUEUE_DTB_ADDR_L_0_127_T; + +typedef struct dpp_dtb4k_dtb_enq_cfg_queue_dtb_len_0_127_t { + ZXIC_UINT32 cfg_dtb_cmd_type; + ZXIC_UINT32 cfg_dtb_cmd_int_en; + ZXIC_UINT32 cfg_queue_dtb_len; +} DPP_DTB4K_DTB_ENQ_CFG_QUEUE_DTB_LEN_0_127_T; + +typedef struct dpp_dtb4k_dtb_enq_info_queue_buf_space_left_0_127_t { + ZXIC_UINT32 info_queue_buf_space_left; +} DPP_DTB4K_DTB_ENQ_INFO_QUEUE_BUF_SPACE_LEFT_0_127_T; + +typedef struct dpp_dtb4k_dtb_enq_cfg_epid_v_func_num_0_127_t { + ZXIC_UINT32 dbi_en; + ZXIC_UINT32 queue_en; + ZXIC_UINT32 cfg_epid; + ZXIC_UINT32 cfg_vfunc_num; + ZXIC_UINT32 cfg_vector; + ZXIC_UINT32 cfg_func_num; + ZXIC_UINT32 cfg_vfunc_active; +} DPP_DTB4K_DTB_ENQ_CFG_EPID_V_FUNC_NUM_0_127_T; + +#ifdef __cplusplus +} +#endif +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_dtb_reg.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_dtb_reg.h new file mode 100644 index 000000000000..561adc9d5381 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_dtb_reg.h @@ -0,0 +1,484 @@ + +#ifndef _DPP_DTB_REG_H_ +#define _DPP_DTB_REG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct dpp_dtb_dtb_cfg_cfg_eram_wr_interval_cnt_t { + ZXIC_UINT32 cfg_eram_wr_interval_cnt; +} DPP_DTB_DTB_CFG_CFG_ERAM_WR_INTERVAL_CNT_T; + +typedef struct dpp_dtb_dtb_cfg_cfg_zcam_wr_interval_cnt_t { + ZXIC_UINT32 cfg_zcam_wr_interval_cnt; +} DPP_DTB_DTB_CFG_CFG_ZCAM_WR_INTERVAL_CNT_T; + +typedef struct dpp_dtb_dtb_cfg_cfg_tcam_wr_interval_cnt_t { + ZXIC_UINT32 cfg_zcam_wr_interval_cnt; +} DPP_DTB_DTB_CFG_CFG_TCAM_WR_INTERVAL_CNT_T; + +typedef struct dpp_dtb_dtb_cfg_cfg_ddr_wr_interval_cnt_t { + ZXIC_UINT32 cfg_ddr_wr_interval_cnt; +} DPP_DTB_DTB_CFG_CFG_DDR_WR_INTERVAL_CNT_T; + +typedef struct dpp_dtb_dtb_cfg_cfg_hash_wr_interval_cnt_t { + ZXIC_UINT32 cfg_hash_wr_interval_cnt; +} DPP_DTB_DTB_CFG_CFG_HASH_WR_INTERVAL_CNT_T; + +typedef struct dpp_dtb_dtb_cfg_cfg_eram_rd_interval_cnt_t { + ZXIC_UINT32 cfg_eram_rd_interval_cnt; +} DPP_DTB_DTB_CFG_CFG_ERAM_RD_INTERVAL_CNT_T; + +typedef struct dpp_dtb_dtb_cfg_cfg_zcam_rd_interval_cnt_t { + ZXIC_UINT32 cfg_zcam_rd_interval_cnt; +} DPP_DTB_DTB_CFG_CFG_ZCAM_RD_INTERVAL_CNT_T; + +typedef struct dpp_dtb_dtb_cfg_cfg_tcam_rd_interval_cnt_t { + ZXIC_UINT32 cfg_tcam_rd_interval_cnt; +} DPP_DTB_DTB_CFG_CFG_TCAM_RD_INTERVAL_CNT_T; + +typedef struct dpp_dtb_dtb_cfg_cfg_ddr_rd_interval_cnt_t { + ZXIC_UINT32 cfg_ddr_rd_interval_cnt; +} DPP_DTB_DTB_CFG_CFG_DDR_RD_INTERVAL_CNT_T; + +typedef struct dpp_dtb_dtb_cfg_cfg_dtb_queue_lock_state_0_3_t { + ZXIC_UINT32 cfg_dtb_queue_lock_state; +} DPP_DTB_DTB_CFG_CFG_DTB_QUEUE_LOCK_STATE_0_3_T; + +typedef struct dpp_dtb_dtb_axim0_w_convert_0_mode_t { + ZXIC_UINT32 w_convert_mode; +} DPP_DTB_DTB_AXIM0_W_CONVERT_0_MODE_T; + +typedef struct dpp_dtb_dtb_axim0_r_convert_0_mode_t { + ZXIC_UINT32 r_convert_mode; +} DPP_DTB_DTB_AXIM0_R_CONVERT_0_MODE_T; + +typedef struct dpp_dtb_dtb_axim0_aximr_os_t { + ZXIC_UINT32 aximr_os; +} DPP_DTB_DTB_AXIM0_AXIMR_OS_T; + +typedef struct dpp_dtb_dtb_axim1_w_convert_1_mode_t { + ZXIC_UINT32 w_convert_mode; +} DPP_DTB_DTB_AXIM1_W_CONVERT_1_MODE_T; + +typedef struct dpp_dtb_dtb_axim1_r_convert_1_mode_t { + ZXIC_UINT32 r_convert_mode; +} DPP_DTB_DTB_AXIM1_R_CONVERT_1_MODE_T; + +typedef struct dpp_dtb_dtb_axis_axis_convert_mode_t { + ZXIC_UINT32 w_r_convert_mode; +} DPP_DTB_DTB_AXIS_AXIS_CONVERT_MODE_T; + +typedef struct dpp_dtb_dtb_cfg_cfg_finish_int_event0_t { + ZXIC_UINT32 cfg_finish_int_event0; +} DPP_DTB_DTB_CFG_CFG_FINISH_INT_EVENT0_T; + +typedef struct dpp_dtb_dtb_cfg_cfg_finish_int_event1_t { + ZXIC_UINT32 cfg_finish_int_event1; +} DPP_DTB_DTB_CFG_CFG_FINISH_INT_EVENT1_T; + +typedef struct dpp_dtb_dtb_cfg_cfg_finish_int_event2_t { + ZXIC_UINT32 cfg_finish_int_event2; +} DPP_DTB_DTB_CFG_CFG_FINISH_INT_EVENT2_T; + +typedef struct dpp_dtb_dtb_cfg_cfg_finish_int_event3_t { + ZXIC_UINT32 cfg_finish_int_event3; +} DPP_DTB_DTB_CFG_CFG_FINISH_INT_EVENT3_T; + +typedef struct dpp_dtb_dtb_cfg_cfg_finish_int_maks0_t { + ZXIC_UINT32 cfg_finish_int_mask0; +} DPP_DTB_DTB_CFG_CFG_FINISH_INT_MAKS0_T; + +typedef struct dpp_dtb_dtb_cfg_cfg_finish_int_maks1_t { + ZXIC_UINT32 cfg_finish_int_mask1; +} DPP_DTB_DTB_CFG_CFG_FINISH_INT_MAKS1_T; + +typedef struct dpp_dtb_dtb_cfg_cfg_finish_int_maks2_t { + ZXIC_UINT32 cfg_finish_int_mask2; +} DPP_DTB_DTB_CFG_CFG_FINISH_INT_MAKS2_T; + +typedef struct dpp_dtb_dtb_cfg_cfg_finish_int_maks3_t { + ZXIC_UINT32 cfg_finish_int_mask3; +} DPP_DTB_DTB_CFG_CFG_FINISH_INT_MAKS3_T; + +typedef struct dpp_dtb_dtb_cfg_cfg_finish_int_test0_t { + ZXIC_UINT32 cfg_finish_int_test0; +} DPP_DTB_DTB_CFG_CFG_FINISH_INT_TEST0_T; + +typedef struct dpp_dtb_dtb_cfg_cfg_finish_int_test1_t { + ZXIC_UINT32 cfg_finish_int_test1; +} DPP_DTB_DTB_CFG_CFG_FINISH_INT_TEST1_T; + +typedef struct dpp_dtb_dtb_cfg_cfg_finish_int_test2_t { + ZXIC_UINT32 cfg_finish_int_test2; +} DPP_DTB_DTB_CFG_CFG_FINISH_INT_TEST2_T; + +typedef struct dpp_dtb_dtb_cfg_cfg_finish_int_test3_t { + ZXIC_UINT32 cfg_finish_int_test3; +} DPP_DTB_DTB_CFG_CFG_FINISH_INT_TEST3_T; + +typedef struct dpp_dtb_dtb_cfg_cfg_dtb_int_to_riscv_sel_t { + ZXIC_UINT32 cfg_dtb_int_to_riscv_sel0; +} DPP_DTB_DTB_CFG_CFG_DTB_INT_TO_RISCV_SEL_T; + +typedef struct dpp_dtb_dtb_cfg_cfg_dtb_ep_int_msix_enable_t { + ZXIC_UINT32 cfg_dtb_ep_int_msix_enable; +} DPP_DTB_DTB_CFG_CFG_DTB_EP_INT_MSIX_ENABLE_T; + +typedef struct dpp_dtb_dtb_cfg_cfg_dtb_ep_doorbell_addr_h_0_15_t { + ZXIC_UINT32 cfg_dtb_ep_doorbell_addr_h_0_15; +} DPP_DTB_DTB_CFG_CFG_DTB_EP_DOORBELL_ADDR_H_0_15_T; + +typedef struct dpp_dtb_dtb_cfg_cfg_dtb_ep_doorbell_addr_l_0_15_t { + ZXIC_UINT32 cfg_dtb_ep_doorbell_addr_l_0_15; +} DPP_DTB_DTB_CFG_CFG_DTB_EP_DOORBELL_ADDR_L_0_15_T; + +typedef struct dpp_dtb_dtb_cfg_cfg_dtb_debug_mode_en_t { + ZXIC_UINT32 cfg_dtb_debug_mode_en; +} DPP_DTB_DTB_CFG_CFG_DTB_DEBUG_MODE_EN_T; + +typedef struct dpp_dtb_dtb_cfg_info_axi_last_rd_table_addr_high_t { + ZXIC_UINT32 info_axi_last_rd_table_addr_high; +} DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_ADDR_HIGH_T; + +typedef struct dpp_dtb_dtb_cfg_info_axi_last_rd_table_addr_low_t { + ZXIC_UINT32 info_axi_last_rd_table_addr_low; +} DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_ADDR_LOW_T; + +typedef struct dpp_dtb_dtb_cfg_info_axi_last_rd_table_len_t { + ZXIC_UINT32 info_axi_last_rd_table_len; +} DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_LEN_T; + +typedef struct dpp_dtb_dtb_cfg_info_axi_last_rd_table_user_t { + ZXIC_UINT32 info_rd_table_user_en; + ZXIC_UINT32 info_rd_table_epid; + ZXIC_UINT32 info_rd_table_vfunc_num; + ZXIC_UINT32 info_rd_table_func_num; + ZXIC_UINT32 info_rd_table_vfunc_active; +} DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_USER_T; + +typedef struct dpp_dtb_dtb_cfg_info_axi_last_rd_table_onload_cnt_t { + ZXIC_UINT32 info_axi_last_rd_table_onload_cnt; +} DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_ONLOAD_CNT_T; + +typedef struct dpp_dtb_dtb_cfg_cnt_axi_rd_table_resp_err_t { + ZXIC_UINT32 cnt_axi_rd_table_resp_err; +} DPP_DTB_DTB_CFG_CNT_AXI_RD_TABLE_RESP_ERR_T; + +typedef struct dpp_dtb_dtb_cfg_info_axi_last_rd_pd_addr_high_t { + ZXIC_UINT32 info_axi_last_rd_pd_addr_high; +} DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_ADDR_HIGH_T; + +typedef struct dpp_dtb_dtb_cfg_info_axi_last_rd_pd_addr_low_t { + ZXIC_UINT32 info_axi_last_rd_pd_addr_low; +} DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_ADDR_LOW_T; + +typedef struct dpp_dtb_dtb_cfg_info_axi_last_rd_pd_len_t { + ZXIC_UINT32 info_axi_last_rd_pd_len; +} DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_LEN_T; + +typedef struct dpp_dtb_dtb_cfg_info_axi_last_rd_pd_user_t { + ZXIC_UINT32 info_rd_pd_user_en; + ZXIC_UINT32 info_rd_pd_epid; + ZXIC_UINT32 info_rd_pd_vfunc_num; + ZXIC_UINT32 info_rd_pd_func_num; + ZXIC_UINT32 info_rd_pd_vfunc_active; +} DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_USER_T; + +typedef struct dpp_dtb_dtb_cfg_info_axi_last_rd_pd_onload_cnt_t { + ZXIC_UINT32 info_axi_last_rd_pd_onload_cnt; +} DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_ONLOAD_CNT_T; + +typedef struct dpp_dtb_dtb_cfg_cnt_axi_rd_pd_resp_err_t { + ZXIC_UINT32 cnt_axi_rd_pd_resp_err; +} DPP_DTB_DTB_CFG_CNT_AXI_RD_PD_RESP_ERR_T; + +typedef struct dpp_dtb_dtb_cfg_info_axi_last_wr_ctrl_addr_high_t { + ZXIC_UINT32 info_axi_last_wr_ctrl_addr_high; +} DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_ADDR_HIGH_T; + +typedef struct dpp_dtb_dtb_cfg_info_axi_last_wr_ctrl_addr_low_t { + ZXIC_UINT32 info_axi_last_wr_ctrl_addr_low; +} DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_ADDR_LOW_T; + +typedef struct dpp_dtb_dtb_cfg_info_axi_last_wr_ctrl_len_t { + ZXIC_UINT32 info_axi_last_wr_ctrl_len; +} DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_LEN_T; + +typedef struct dpp_dtb_dtb_cfg_info_axi_last_wr_ctrl_user_t { + ZXIC_UINT32 info_wr_ctrl_user_en; + ZXIC_UINT32 info_wr_ctrl_epid; + ZXIC_UINT32 info_wr_ctrl_vfunc_num; + ZXIC_UINT32 info_wr_ctrl_func_num; + ZXIC_UINT32 info_wr_ctrl_vfunc_active; +} DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_USER_T; + +typedef struct dpp_dtb_dtb_cfg_info_axi_last_wr_ctrl_onload_cnt_t { + ZXIC_UINT32 info_axi_last_wr_ctrl_onload_cnt; +} DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_ONLOAD_CNT_T; + +typedef struct dpp_dtb_dtb_cfg_cnt_axi_wr_ctrl_resp_err_t { + ZXIC_UINT32 cnt_axi_wr_ctrl_resp_err; +} DPP_DTB_DTB_CFG_CNT_AXI_WR_CTRL_RESP_ERR_T; + +typedef struct dpp_dtb_dtb_cfg_info_axi_last_wr_ddr_addr_high_t { + ZXIC_UINT32 info_axi_last_wr_ddr_addr_high; +} DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_ADDR_HIGH_T; + +typedef struct dpp_dtb_dtb_cfg_info_axi_last_wr_ddr_addr_low_t { + ZXIC_UINT32 info_axi_last_wr_ddr_addr_low; +} DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_ADDR_LOW_T; + +typedef struct dpp_dtb_dtb_cfg_info_axi_last_wr_ddr_len_t { + ZXIC_UINT32 info_axi_last_wr_ddr_len; +} DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_LEN_T; + +typedef struct dpp_dtb_dtb_cfg_info_axi_last_wr_ddr_user_t { + ZXIC_UINT32 info_wr_ddr_user_en; + ZXIC_UINT32 info_wr_ddr_epid; + ZXIC_UINT32 info_wr_ddr_vfunc_num; + ZXIC_UINT32 info_wr_ddr_func_num; + ZXIC_UINT32 info_wr_ddr_vfunc_active; +} DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_USER_T; + +typedef struct dpp_dtb_dtb_cfg_info_axi_last_wr_ddr_onload_cnt_t { + ZXIC_UINT32 info_axi_last_wr_ddr_onload_cnt; +} DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_ONLOAD_CNT_T; + +typedef struct dpp_dtb_dtb_cfg_cnt_axi_wr_ddr_resp_err_t { + ZXIC_UINT32 cnt_axi_wr_ddr_resp_err; +} DPP_DTB_DTB_CFG_CNT_AXI_WR_DDR_RESP_ERR_T; + +typedef struct dpp_dtb_dtb_cfg_info_axi_last_wr_fin_addr_high_t { + ZXIC_UINT32 info_axi_last_wr_fin_addr_high; +} DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_ADDR_HIGH_T; + +typedef struct dpp_dtb_dtb_cfg_info_axi_last_wr_fin_addr_low_t { + ZXIC_UINT32 info_axi_last_wr_fin_addr_low; +} DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_ADDR_LOW_T; + +typedef struct dpp_dtb_dtb_cfg_info_axi_last_wr_fin_len_t { + ZXIC_UINT32 info_axi_last_wr_fin_len; +} DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_LEN_T; + +typedef struct dpp_dtb_dtb_cfg_info_axi_last_wr_fin_user_t { + ZXIC_UINT32 info_wr_fin_user_en; + ZXIC_UINT32 info_wr_fin_epid; + ZXIC_UINT32 info_wr_fin_vfunc_num; + ZXIC_UINT32 info_wr_fin_func_num; + ZXIC_UINT32 info_wr_fin_vfunc_active; +} DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_USER_T; + +typedef struct dpp_dtb_dtb_cfg_info_axi_last_wr_fin_onload_cnt_t { + ZXIC_UINT32 info_axi_last_wr_fin_onload_cnt; +} DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_ONLOAD_CNT_T; + +typedef struct dpp_dtb_dtb_cfg_cnt_axi_wr_fin_resp_err_t { + ZXIC_UINT32 cnt_axi_wr_fin_resp_err; +} DPP_DTB_DTB_CFG_CNT_AXI_WR_FIN_RESP_ERR_T; + +typedef struct dpp_dtb_dtb_cfg_cnt_dtb_wr_smmu0_table_high_t { + ZXIC_UINT32 cnt_dtb_wr_smmu0_table_high; +} DPP_DTB_DTB_CFG_CNT_DTB_WR_SMMU0_TABLE_HIGH_T; + +typedef struct dpp_dtb_dtb_cfg_cnt_dtb_wr_smmu0_table_low_t { + ZXIC_UINT32 cnt_dtb_wr_smmu0_table_low; +} DPP_DTB_DTB_CFG_CNT_DTB_WR_SMMU0_TABLE_LOW_T; + +typedef struct dpp_dtb_dtb_cfg_cnt_dtb_wr_smmu1_table_high_t { + ZXIC_UINT32 cnt_dtb_wr_smmu1_table_high; +} DPP_DTB_DTB_CFG_CNT_DTB_WR_SMMU1_TABLE_HIGH_T; + +typedef struct dpp_dtb_dtb_cfg_cnt_dtb_wr_smmu1_table_low_t { + ZXIC_UINT32 cnt_dtb_wr_smmu1_table_low; +} DPP_DTB_DTB_CFG_CNT_DTB_WR_SMMU1_TABLE_LOW_T; + +typedef struct dpp_dtb_dtb_cfg_cnt_dtb_wr_zcam_table_high_t { + ZXIC_UINT32 cnt_dtb_wr_zcam_table_high; +} DPP_DTB_DTB_CFG_CNT_DTB_WR_ZCAM_TABLE_HIGH_T; + +typedef struct dpp_dtb_dtb_cfg_cnt_dtb_wr_zcam_table_low_t { + ZXIC_UINT32 cnt_dtb_wr_zcam_table_low; +} DPP_DTB_DTB_CFG_CNT_DTB_WR_ZCAM_TABLE_LOW_T; + +typedef struct dpp_dtb_dtb_cfg_cnt_dtb_wr_etcam_table_high_t { + ZXIC_UINT32 cnt_dtb_wr_etcam_table_high; +} DPP_DTB_DTB_CFG_CNT_DTB_WR_ETCAM_TABLE_HIGH_T; + +typedef struct dpp_dtb_dtb_cfg_cnt_dtb_wr_etcam_table_low_t { + ZXIC_UINT32 cnt_dtb_wr_etcam_table_low; +} DPP_DTB_DTB_CFG_CNT_DTB_WR_ETCAM_TABLE_LOW_T; + +typedef struct dpp_dtb_dtb_cfg_cnt_dtb_wr_hash_table_high_t { + ZXIC_UINT32 cnt_dtb_wr_hash_table_high; +} DPP_DTB_DTB_CFG_CNT_DTB_WR_HASH_TABLE_HIGH_T; + +typedef struct dpp_dtb_dtb_cfg_cnt_dtb_wr_hash_table_low_t { + ZXIC_UINT32 cnt_dtb_wr_hash_table_low; +} DPP_DTB_DTB_CFG_CNT_DTB_WR_HASH_TABLE_LOW_T; + +typedef struct dpp_dtb_dtb_cfg_cnt_dtb_rd_smmu0_table_high_t { + ZXIC_UINT32 cnt_dtb_rd_smmu0_table_high; +} DPP_DTB_DTB_CFG_CNT_DTB_RD_SMMU0_TABLE_HIGH_T; + +typedef struct dpp_dtb_dtb_cfg_cnt_dtb_rd_smmu0_table_low_t { + ZXIC_UINT32 cnt_dtb_rd_smmu0_table_low; +} DPP_DTB_DTB_CFG_CNT_DTB_RD_SMMU0_TABLE_LOW_T; + +typedef struct dpp_dtb_dtb_cfg_cnt_dtb_rd_smmu1_table_high_t { + ZXIC_UINT32 cnt_dtb_rd_smmu1_table_high; +} DPP_DTB_DTB_CFG_CNT_DTB_RD_SMMU1_TABLE_HIGH_T; + +typedef struct dpp_dtb_dtb_cfg_cnt_dtb_rd_smmu1_table_low_t { + ZXIC_UINT32 cnt_dtb_rd_smmu1_table_low; +} DPP_DTB_DTB_CFG_CNT_DTB_RD_SMMU1_TABLE_LOW_T; + +typedef struct dpp_dtb_dtb_cfg_cnt_dtb_rd_zcam_table_high_t { + ZXIC_UINT32 cnt_dtb_rd_zcam_table_high; +} DPP_DTB_DTB_CFG_CNT_DTB_RD_ZCAM_TABLE_HIGH_T; + +typedef struct dpp_dtb_dtb_cfg_cnt_dtb_rd_zcam_table_low_t { + ZXIC_UINT32 cnt_dtb_rd_zcam_table_low; +} DPP_DTB_DTB_CFG_CNT_DTB_RD_ZCAM_TABLE_LOW_T; + +typedef struct dpp_dtb_dtb_cfg_cnt_dtb_rd_etcam_table_high_t { + ZXIC_UINT32 cnt_dtb_rd_etcam_table_high; +} DPP_DTB_DTB_CFG_CNT_DTB_RD_ETCAM_TABLE_HIGH_T; + +typedef struct dpp_dtb_dtb_cfg_cnt_dtb_rd_etcam_table_low_t { + ZXIC_UINT32 cnt_dtb_rd_etcam_table_low; +} DPP_DTB_DTB_CFG_CNT_DTB_RD_ETCAM_TABLE_LOW_T; + +typedef struct dpp_dtb_dtb_cfg_info_wr_ctrl_state_t { + ZXIC_UINT32 info_wr_ctrl_state; +} DPP_DTB_DTB_CFG_INFO_WR_CTRL_STATE_T; + +typedef struct dpp_dtb_dtb_cfg_info_rd_table_state_t { + ZXIC_UINT32 info_rd_table_state; +} DPP_DTB_DTB_CFG_INFO_RD_TABLE_STATE_T; + +typedef struct dpp_dtb_dtb_cfg_info_rd_pd_state_t { + ZXIC_UINT32 info_rd_pd_state; +} DPP_DTB_DTB_CFG_INFO_RD_PD_STATE_T; + +typedef struct dpp_dtb_dtb_cfg_info_dump_cmd_state_t { + ZXIC_UINT32 info_dump_cmd_state; +} DPP_DTB_DTB_CFG_INFO_DUMP_CMD_STATE_T; + +typedef struct dpp_dtb_dtb_cfg_info_wr_ddr_state_t { + ZXIC_UINT32 info_wr_ddr_state; +} DPP_DTB_DTB_CFG_INFO_WR_DDR_STATE_T; + +typedef struct dpp_dtb_dtb_cfg_cfg_dtb_debug_info_clr_t { + ZXIC_UINT32 cfg_dtb_debug_info_clr; +} DPP_DTB_DTB_CFG_CFG_DTB_DEBUG_INFO_CLR_T; + +typedef struct dpp_dtb_ddos_cfg_ddos_stat_dump_thrd_0_15_t { + ZXIC_UINT32 cfg_ddos_stat_dump_thrd; +} DPP_DTB_DDOS_CFG_DDOS_STAT_DUMP_THRD_0_15_T; + +typedef struct dpp_dtb_ddos_cfg_ddos_stat_dump_thrd_comp_en_t { + ZXIC_UINT32 cfg_ddos_stat_dump_thrd_comp_en; +} DPP_DTB_DDOS_CFG_DDOS_STAT_DUMP_THRD_COMP_EN_T; + +typedef struct dpp_dtb_ddos_cfg_ddos_dump_stat_num_t { + ZXIC_UINT32 cfg_ddos_dump_stat_num; +} DPP_DTB_DDOS_CFG_DDOS_DUMP_STAT_NUM_T; + +typedef struct dpp_dtb_ddos_cfg_ddos_even_hash_table_baddr_t { + ZXIC_UINT32 cfg_ddos_even_hash_table_baddr; +} DPP_DTB_DDOS_CFG_DDOS_EVEN_HASH_TABLE_BADDR_T; + +typedef struct dpp_dtb_ddos_cfg_ddos_odd_hash_table_baddr_t { + ZXIC_UINT32 cfg_ddos_odd_hash_table_baddr; +} DPP_DTB_DDOS_CFG_DDOS_ODD_HASH_TABLE_BADDR_T; + +typedef struct dpp_dtb_ddos_cfg_ddos_stat_index_offset_t { + ZXIC_UINT32 cfg_ddos_stat_index_offset; +} DPP_DTB_DDOS_CFG_DDOS_STAT_INDEX_OFFSET_T; + +typedef struct dpp_dtb_ddos_cfg_ddos_ns_flag_cnt_t { + ZXIC_UINT32 cfg_ddos_ns_flag_cnt; +} DPP_DTB_DDOS_CFG_DDOS_NS_FLAG_CNT_T; + +typedef struct dpp_dtb_ddos_cfg_ddos_even_stat_table_baddr_t { + ZXIC_UINT32 cfg_ddos_even_stat_table_baddr; +} DPP_DTB_DDOS_CFG_DDOS_EVEN_STAT_TABLE_BADDR_T; + +typedef struct dpp_dtb_ddos_cfg_ddos_odd_stat_table_baddr_t { + ZXIC_UINT32 cfg_ddos_odd_stat_table_baddr; +} DPP_DTB_DDOS_CFG_DDOS_ODD_STAT_TABLE_BADDR_T; + +typedef struct dpp_dtb_ddos_cfg_ddos_even_stat_dump_daddr_h_t { + ZXIC_UINT32 cfg_ddos_even_stat_dump_daddr_h; +} DPP_DTB_DDOS_CFG_DDOS_EVEN_STAT_DUMP_DADDR_H_T; + +typedef struct dpp_dtb_ddos_cfg_ddos_even_stat_dump_daddr_l_t { + ZXIC_UINT32 cfg_ddos_even_stat_dump_daddr_l; +} DPP_DTB_DDOS_CFG_DDOS_EVEN_STAT_DUMP_DADDR_L_T; + +typedef struct dpp_dtb_ddos_cfg_ddos_odd_stat_dump_daddr_h_t { + ZXIC_UINT32 cfg_ddos_odd_stat_dump_daddr_h; +} DPP_DTB_DDOS_CFG_DDOS_ODD_STAT_DUMP_DADDR_H_T; + +typedef struct dpp_dtb_ddos_cfg_ddos_odd_stat_dump_daddr_l_t { + ZXIC_UINT32 cfg_ddos_odd_stat_dump_daddr_l; +} DPP_DTB_DDOS_CFG_DDOS_ODD_STAT_DUMP_DADDR_L_T; + +typedef struct dpp_dtb_ddos_cfg_ddos_work_mode_enable_t { + ZXIC_UINT32 cfg_ddos_mode_work_enable; +} DPP_DTB_DDOS_CFG_DDOS_WORK_MODE_ENABLE_T; + +typedef struct dpp_dtb_ddos_cfg_ddos_stat_table_len_t { + ZXIC_UINT32 cfg_ddos_stat_table_len; +} DPP_DTB_DDOS_CFG_DDOS_STAT_TABLE_LEN_T; + +typedef struct dpp_dtb_ddos_cfg_ddos_hash_table_len_t { + ZXIC_UINT32 cfg_ddos_hash_table_len; +} DPP_DTB_DDOS_CFG_DDOS_HASH_TABLE_LEN_T; + +typedef struct dpp_dtb_dtb_ram_traf_ctrl_ram0_0_255_t { + ZXIC_UINT32 traf_ctrl_ram0_0_255; +} DPP_DTB_DTB_RAM_TRAF_CTRL_RAM0_0_255_T; + +typedef struct dpp_dtb_dtb_ram_traf_ctrl_ram1_0_255_t { + ZXIC_UINT32 traf_ctrl_ram1_0_255; +} DPP_DTB_DTB_RAM_TRAF_CTRL_RAM1_0_255_T; + +typedef struct dpp_dtb_dtb_ram_traf_ctrl_ram2_0_255_t { + ZXIC_UINT32 traf_ctrl_ram2_0_255; +} DPP_DTB_DTB_RAM_TRAF_CTRL_RAM2_0_255_T; + +typedef struct dpp_dtb_dtb_ram_traf_ctrl_ram3_0_255_t { + ZXIC_UINT32 traf_ctrl_ram3_0_255; +} DPP_DTB_DTB_RAM_TRAF_CTRL_RAM3_0_255_T; + +typedef struct dpp_dtb_dtb_ram_traf_ctrl_ram4_0_255_t { + ZXIC_UINT32 traf_ctrl_ram4_0_255; +} DPP_DTB_DTB_RAM_TRAF_CTRL_RAM4_0_255_T; + +typedef struct dpp_dtb_dtb_ram_traf_ctrl_ram5_0_63_t { + ZXIC_UINT32 traf_ctrl_ram5_0_63; +} DPP_DTB_DTB_RAM_TRAF_CTRL_RAM5_0_63_T; + +typedef struct dpp_dtb_dtb_ram_dump_pd_ram_0_2047_t { + ZXIC_UINT32 dump_pd_ram_0_2047; +} DPP_DTB_DTB_RAM_DUMP_PD_RAM_0_2047_T; + +typedef struct dpp_dtb_dtb_ram_rd_ctrl_ram_0_4095_t { + ZXIC_UINT32 rd_ctrl_ram_0_4095; +} DPP_DTB_DTB_RAM_RD_CTRL_RAM_0_4095_T; + +typedef struct dpp_dtb_dtb_ram_rd_table_ram_0_8191_t { + ZXIC_UINT32 rd_table_ram_0_8191; +} DPP_DTB_DTB_RAM_RD_TABLE_RAM_0_8191_T; + +typedef struct dpp_dtb_dtb_ram_dtb_cmd_man_ram_0_16383_t { + ZXIC_UINT32 dtb_cmd_man_ram_0_16383; +} DPP_DTB_DTB_RAM_DTB_CMD_MAN_RAM_0_16383_T; + +#ifdef __cplusplus +} +#endif +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_etm_reg.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_etm_reg.h new file mode 100644 index 000000000000..480d67074f61 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_etm_reg.h @@ -0,0 +1,4056 @@ + +#ifndef _DPP_ETM_REG_H_ +#define _DPP_ETM_REG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct dpp_etm_cfgmt_cpu_check_reg_t { + ZXIC_UINT32 cpu_check_reg; +} DPP_ETM_CFGMT_CPU_CHECK_REG_T; + +typedef struct dpp_etm_cfgmt_cfgmt_blksize_t { + ZXIC_UINT32 cfgmt_blksize; +} DPP_ETM_CFGMT_CFGMT_BLKSIZE_T; + +typedef struct dpp_etm_cfgmt_reg_int_state_reg_t { + ZXIC_UINT32 shap_int; + ZXIC_UINT32 crdt_int; + ZXIC_UINT32 mmu_int; + ZXIC_UINT32 qmu_int; + ZXIC_UINT32 cgavd_int; + ZXIC_UINT32 olif_int; + ZXIC_UINT32 cfgmt_int_buf; +} DPP_ETM_CFGMT_REG_INT_STATE_REG_T; + +typedef struct dpp_etm_cfgmt_reg_int_mask_reg_t { + ZXIC_UINT32 shap_int_mask; + ZXIC_UINT32 crdt_int_mask; + ZXIC_UINT32 tmmu_int_mask; + ZXIC_UINT32 qmu_int_mask; + ZXIC_UINT32 cgavd_int_mask; + ZXIC_UINT32 olif_int_mask; + ZXIC_UINT32 cfgmt_int_buf_mask; +} DPP_ETM_CFGMT_REG_INT_MASK_REG_T; + +typedef struct dpp_etm_cfgmt_timeout_limit_t { + ZXIC_UINT32 timeout_limit; +} DPP_ETM_CFGMT_TIMEOUT_LIMIT_T; + +typedef struct dpp_etm_cfgmt_subsystem_rdy_reg_t { + ZXIC_UINT32 olif_rdy; + ZXIC_UINT32 qmu_rdy; + ZXIC_UINT32 cgavd_rdy; + ZXIC_UINT32 tmmu_rdy; + ZXIC_UINT32 shap_rdy; + ZXIC_UINT32 crdt_rdy; +} DPP_ETM_CFGMT_SUBSYSTEM_RDY_REG_T; + +typedef struct dpp_etm_cfgmt_subsystem_en_reg_t { + ZXIC_UINT32 subsystem_en_buf_31_28; + ZXIC_UINT32 subsystem_en_buf_25_0; +} DPP_ETM_CFGMT_SUBSYSTEM_EN_REG_T; + +typedef struct dpp_etm_cfgmt_cfgmt_int_reg_t { + ZXIC_UINT32 cfgmt_int_buf; +} DPP_ETM_CFGMT_CFGMT_INT_REG_T; + +typedef struct dpp_etm_cfgmt_qmu_work_mode_t { + ZXIC_UINT32 qmu_work_mode; +} DPP_ETM_CFGMT_QMU_WORK_MODE_T; + +typedef struct dpp_etm_cfgmt_cfgmt_ddr_attach_t { + ZXIC_UINT32 cfgmt_ddr_attach; +} DPP_ETM_CFGMT_CFGMT_DDR_ATTACH_T; + +typedef struct dpp_etm_cfgmt_cnt_mode_reg_t { + ZXIC_UINT32 cfgmt_fc_count_mode; + ZXIC_UINT32 cfgmt_count_rd_mode; + ZXIC_UINT32 cfgmt_count_overflow_mode; +} DPP_ETM_CFGMT_CNT_MODE_REG_T; + +typedef struct dpp_etm_cfgmt_clkgate_en_t { + ZXIC_UINT32 clkgate_en; +} DPP_ETM_CFGMT_CLKGATE_EN_T; + +typedef struct dpp_etm_cfgmt_softrst_en_t { + ZXIC_UINT32 softrst_en; +} DPP_ETM_CFGMT_SOFTRST_EN_T; + +typedef struct dpp_etm_olif_imem_prog_full_t { + ZXIC_UINT32 imem_prog_full_assert; + ZXIC_UINT32 imem_prog_full_negate; +} DPP_ETM_OLIF_IMEM_PROG_FULL_T; + +typedef struct dpp_etm_olif_qmu_para_prog_full_t { + ZXIC_UINT32 qmu_para_prog_full_assert; + ZXIC_UINT32 qmu_para_prog_full_negate; +} DPP_ETM_OLIF_QMU_PARA_PROG_FULL_T; + +typedef struct dpp_etm_olif_olif_int_mask_t { + ZXIC_UINT32 emem_dat_sop_err_mask; + ZXIC_UINT32 emem_dat_eop_err_mask; + ZXIC_UINT32 imem_dat_sop_err_mask; + ZXIC_UINT32 imem_dat_eop_err_mask; + ZXIC_UINT32 crcram_parity_err_mask; + ZXIC_UINT32 emem_fifo_ecc_mask; + ZXIC_UINT32 imem_fifo_ecc_mask; + ZXIC_UINT32 emem_fifo_ovf_mask; + ZXIC_UINT32 emem_fifo_udf_mask; + ZXIC_UINT32 imem_fifo_ovf_mask; + ZXIC_UINT32 imem_fifo_udf_mask; + ZXIC_UINT32 para_fifo_ecc_mask; + ZXIC_UINT32 para_fifo_ovf_mask; + ZXIC_UINT32 para_fifo_udf_mask; + ZXIC_UINT32 itmh_ecc_single_err_mask; + ZXIC_UINT32 itmh_ecc_double_err_mask; + ZXIC_UINT32 order_fifo_parity_err_mask; + ZXIC_UINT32 order_fifo_ovf_mask; + ZXIC_UINT32 order_fifo_udf_mask; +} DPP_ETM_OLIF_OLIF_INT_MASK_T; + +typedef struct dpp_etm_olif_itmhram_parity_err_2_int_t { + ZXIC_UINT32 emem_dat_sop_err; + ZXIC_UINT32 emem_dat_eop_err; + ZXIC_UINT32 imem_dat_sop_err; + ZXIC_UINT32 imem_dat_eop_err; + ZXIC_UINT32 crcram_parity_err_1_int; + ZXIC_UINT32 emem_fifo_ecc_single_err_int; + ZXIC_UINT32 emem_fifo_ecc_double_err_int; + ZXIC_UINT32 imem_fifo_ecc_single_err_int; + ZXIC_UINT32 imem_fifo_ecc_double_err_int; + ZXIC_UINT32 emem_fifo_ovf_int; + ZXIC_UINT32 emem_fifo_udf_int; + ZXIC_UINT32 imem_fifo_ovf_int; + ZXIC_UINT32 imem_fifo_udf_int; + ZXIC_UINT32 para_fifo_ecc_single_err_int; + ZXIC_UINT32 para_fifo_ecc_double_err_int; + ZXIC_UINT32 para_fifo_ovf_int; + ZXIC_UINT32 para_fifo_udf_int; + ZXIC_UINT32 itmh_ecc_single_err_int; + ZXIC_UINT32 itmh_ecc_double_err_int; + ZXIC_UINT32 order_fifo_parity_err_int; + ZXIC_UINT32 order_fifo_ovf_int; + ZXIC_UINT32 order_fifo_udf_int; +} DPP_ETM_OLIF_ITMHRAM_PARITY_ERR_2_INT_T; + +typedef struct dpp_etm_olif_lif0_port_rdy_mask_h_t { + ZXIC_UINT32 lif0_port_rdy_mask_h; +} DPP_ETM_OLIF_LIF0_PORT_RDY_MASK_H_T; + +typedef struct dpp_etm_olif_lif0_port_rdy_mask_l_t { + ZXIC_UINT32 lif0_port_rdy_mask_l; +} DPP_ETM_OLIF_LIF0_PORT_RDY_MASK_L_T; + +typedef struct dpp_etm_olif_lif0_port_rdy_cfg_h_t { + ZXIC_UINT32 lif0_port_rdy_cfg_h; +} DPP_ETM_OLIF_LIF0_PORT_RDY_CFG_H_T; + +typedef struct dpp_etm_olif_lif0_port_rdy_cfg_l_t { + ZXIC_UINT32 lif0_port_rdy_cfg_l; +} DPP_ETM_OLIF_LIF0_PORT_RDY_CFG_L_T; + +typedef struct dpp_etm_olif_lif0_link_rdy_mask_cfg_t { + ZXIC_UINT32 lif0_link_rdy_mask; + ZXIC_UINT32 lif0_link_rdy_cfg; +} DPP_ETM_OLIF_LIF0_LINK_RDY_MASK_CFG_T; + +typedef struct dpp_etm_olif_tm_lif_stat_cfg_t { + ZXIC_UINT32 all_or_by_port; + ZXIC_UINT32 i_or_e_sel; + ZXIC_UINT32 port_or_dest_id_sel; + ZXIC_UINT32 port_dest_id; +} DPP_ETM_OLIF_TM_LIF_STAT_CFG_T; + +typedef struct dpp_etm_olif_tm_lif_sop_stat_t { + ZXIC_UINT32 tm_lif_sop_stat; +} DPP_ETM_OLIF_TM_LIF_SOP_STAT_T; + +typedef struct dpp_etm_olif_tm_lif_eop_stat_t { + ZXIC_UINT32 tm_lif_eop_stat; +} DPP_ETM_OLIF_TM_LIF_EOP_STAT_T; + +typedef struct dpp_etm_olif_tm_lif_vld_stat_t { + ZXIC_UINT32 tm_lif_vld_stat; +} DPP_ETM_OLIF_TM_LIF_VLD_STAT_T; + +typedef struct dpp_etm_cgavd_prog_full_assert_cfg_t { + ZXIC_UINT32 prog_full_assert_cfg; + ZXIC_UINT32 prog_full_negate_cfg; +} DPP_ETM_CGAVD_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_etm_cgavd_cgavd_int_t { + ZXIC_UINT32 cgavd_int; +} DPP_ETM_CGAVD_CGAVD_INT_T; + +typedef struct dpp_etm_cgavd_cgavd_ram_err_t { + ZXIC_UINT32 flow_qnum_intb; + ZXIC_UINT32 flow_qnum_inta; + ZXIC_UINT32 pp_qlen_inta; + ZXIC_UINT32 pp_qlen_intb; + ZXIC_UINT32 pp_tdth_int; + ZXIC_UINT32 flow_tdth_inta; + ZXIC_UINT32 flow_tdth_intb; + ZXIC_UINT32 flow_qlen_inta; + ZXIC_UINT32 flow_qlen_intb; + ZXIC_UINT32 qmu_cgavd_fifo_uv_int; + ZXIC_UINT32 qmu_cgavd_fifo_ov_int; + ZXIC_UINT32 pds_deal_fifo_ov_int; + ZXIC_UINT32 pds_deal_fifo_uv_int; +} DPP_ETM_CGAVD_CGAVD_RAM_ERR_T; + +typedef struct dpp_etm_cgavd_cgavd_int_mask_t { + ZXIC_UINT32 cgavd_int_mask; +} DPP_ETM_CGAVD_CGAVD_INT_MASK_T; + +typedef struct dpp_etm_cgavd_cgavd_ram_err_int_mask_t { + ZXIC_UINT32 flow_qnum_inta_mask; + ZXIC_UINT32 flow_qnum_intb_mask; + ZXIC_UINT32 pp_qlen_inta_mask; + ZXIC_UINT32 pp_qlen_intb_mask; + ZXIC_UINT32 pp_tdth_int_mask; + ZXIC_UINT32 flow_tdth_inta_mask; + ZXIC_UINT32 flow_tdth_intb_mask; + ZXIC_UINT32 flow_qlen_inta_mask; + ZXIC_UINT32 flow_qlen_intb_mask; + ZXIC_UINT32 qmu_cgavd_fifo_uv_int_mask; + ZXIC_UINT32 qmu_cgavd_fifo_ov_int_mask; + ZXIC_UINT32 pds_deal_fifo_ov_int_mask; + ZXIC_UINT32 pds_deal_fifo_uv_int_mask; +} DPP_ETM_CGAVD_CGAVD_RAM_ERR_INT_MASK_T; + +typedef struct dpp_etm_cgavd_cfgmt_byte_mode_t { + ZXIC_UINT32 cfgmt_byte_mode; +} DPP_ETM_CGAVD_CFGMT_BYTE_MODE_T; + +typedef struct dpp_etm_cgavd_avg_qlen_return_zero_en_t { + ZXIC_UINT32 avg_qlen_return_zero_en; +} DPP_ETM_CGAVD_AVG_QLEN_RETURN_ZERO_EN_T; + +typedef struct dpp_etm_cgavd_flow_wred_q_len_th_t { + ZXIC_UINT32 flow_wred_q_len_th; +} DPP_ETM_CGAVD_FLOW_WRED_Q_LEN_TH_T; + +typedef struct dpp_etm_cgavd_flow_wq_t { + ZXIC_UINT32 wq_flow; +} DPP_ETM_CGAVD_FLOW_WQ_T; + +typedef struct dpp_etm_cgavd_flow_wred_max_th_t { + ZXIC_UINT32 flow_wred_max_th; +} DPP_ETM_CGAVD_FLOW_WRED_MAX_TH_T; + +typedef struct dpp_etm_cgavd_flow_wred_min_th_t { + ZXIC_UINT32 flow_wred_min_th; +} DPP_ETM_CGAVD_FLOW_WRED_MIN_TH_T; + +typedef struct dpp_etm_cgavd_flow_wred_cfg_para_t { + ZXIC_UINT32 flow_wred_cfg_para; +} DPP_ETM_CGAVD_FLOW_WRED_CFG_PARA_T; + +typedef struct dpp_etm_cgavd_pp_avg_q_len_t { + ZXIC_UINT32 pp_avg_q_len; +} DPP_ETM_CGAVD_PP_AVG_Q_LEN_T; + +typedef struct dpp_etm_cgavd_pp_td_th_t { + ZXIC_UINT32 pp_td_th; +} DPP_ETM_CGAVD_PP_TD_TH_T; + +typedef struct dpp_etm_cgavd_pp_ca_mtd_t { + ZXIC_UINT32 pp_ca_mtd; +} DPP_ETM_CGAVD_PP_CA_MTD_T; + +typedef struct dpp_etm_cgavd_pp_wred_grp_th_en_t { + ZXIC_UINT32 pp_wred_grp; + ZXIC_UINT32 pp_wred_grp_th_en; +} DPP_ETM_CGAVD_PP_WRED_GRP_TH_EN_T; + +typedef struct dpp_etm_cgavd_pp_wred_q_len_th_t { + ZXIC_UINT32 pp_wred_q_len_th; +} DPP_ETM_CGAVD_PP_WRED_Q_LEN_TH_T; + +typedef struct dpp_etm_cgavd_pp_wq_t { + ZXIC_UINT32 wq_pp; +} DPP_ETM_CGAVD_PP_WQ_T; + +typedef struct dpp_etm_cgavd_pp_wred_max_th_t { + ZXIC_UINT32 pp_wred_max_th; +} DPP_ETM_CGAVD_PP_WRED_MAX_TH_T; + +typedef struct dpp_etm_cgavd_pp_wred_min_th_t { + ZXIC_UINT32 pp_wred_min_th; +} DPP_ETM_CGAVD_PP_WRED_MIN_TH_T; + +typedef struct dpp_etm_cgavd_pp_cfg_para_t { + ZXIC_UINT32 pp_cfg_para; +} DPP_ETM_CGAVD_PP_CFG_PARA_T; + +typedef struct dpp_etm_cgavd_sys_avg_q_len_t { + ZXIC_UINT32 sys_avg_q_len; +} DPP_ETM_CGAVD_SYS_AVG_Q_LEN_T; + +typedef struct dpp_etm_cgavd_sys_td_th_t { + ZXIC_UINT32 sys_td_th; +} DPP_ETM_CGAVD_SYS_TD_TH_T; + +typedef struct dpp_etm_cgavd_sys_cgavd_metd_t { + ZXIC_UINT32 sys_cgavd_metd; +} DPP_ETM_CGAVD_SYS_CGAVD_METD_T; + +typedef struct dpp_etm_cgavd_sys_cfg_q_grp_para_t { + ZXIC_UINT32 gred_q_len_th_sys; +} DPP_ETM_CGAVD_SYS_CFG_Q_GRP_PARA_T; + +typedef struct dpp_etm_cgavd_sys_wq_t { + ZXIC_UINT32 wq_sys; +} DPP_ETM_CGAVD_SYS_WQ_T; + +typedef struct dpp_etm_cgavd_gred_max_th_t { + ZXIC_UINT32 gred_max_th; +} DPP_ETM_CGAVD_GRED_MAX_TH_T; + +typedef struct dpp_etm_cgavd_gred_mid_th_t { + ZXIC_UINT32 gred_mid_th; +} DPP_ETM_CGAVD_GRED_MID_TH_T; + +typedef struct dpp_etm_cgavd_gred_min_th_t { + ZXIC_UINT32 gred_min_th; +} DPP_ETM_CGAVD_GRED_MIN_TH_T; + +typedef struct dpp_etm_cgavd_gred_cfg_para0_t { + ZXIC_UINT32 gred_cfg_para0; +} DPP_ETM_CGAVD_GRED_CFG_PARA0_T; + +typedef struct dpp_etm_cgavd_gred_cfg_para1_t { + ZXIC_UINT32 gred_cfg_para1; +} DPP_ETM_CGAVD_GRED_CFG_PARA1_T; + +typedef struct dpp_etm_cgavd_gred_cfg_para2_t { + ZXIC_UINT32 gred_cfg_para2; +} DPP_ETM_CGAVD_GRED_CFG_PARA2_T; + +typedef struct dpp_etm_cgavd_sys_window_th_h_t { + ZXIC_UINT32 sys_window_th_h; +} DPP_ETM_CGAVD_SYS_WINDOW_TH_H_T; + +typedef struct dpp_etm_cgavd_sys_window_th_l_t { + ZXIC_UINT32 sys_window_th_l; +} DPP_ETM_CGAVD_SYS_WINDOW_TH_L_T; + +typedef struct dpp_etm_cgavd_amplify_gene0_t { + ZXIC_UINT32 amplify_gene0; +} DPP_ETM_CGAVD_AMPLIFY_GENE0_T; + +typedef struct dpp_etm_cgavd_amplify_gene1_t { + ZXIC_UINT32 amplify_gene1; +} DPP_ETM_CGAVD_AMPLIFY_GENE1_T; + +typedef struct dpp_etm_cgavd_amplify_gene2_t { + ZXIC_UINT32 amplify_gene2; +} DPP_ETM_CGAVD_AMPLIFY_GENE2_T; + +typedef struct dpp_etm_cgavd_amplify_gene3_t { + ZXIC_UINT32 amplify_gene3; +} DPP_ETM_CGAVD_AMPLIFY_GENE3_T; + +typedef struct dpp_etm_cgavd_amplify_gene4_t { + ZXIC_UINT32 amplify_gene4; +} DPP_ETM_CGAVD_AMPLIFY_GENE4_T; + +typedef struct dpp_etm_cgavd_amplify_gene5_t { + ZXIC_UINT32 amplify_gene5; +} DPP_ETM_CGAVD_AMPLIFY_GENE5_T; + +typedef struct dpp_etm_cgavd_amplify_gene6_t { + ZXIC_UINT32 amplify_gene6; +} DPP_ETM_CGAVD_AMPLIFY_GENE6_T; + +typedef struct dpp_etm_cgavd_amplify_gene7_t { + ZXIC_UINT32 amplify_gene7; +} DPP_ETM_CGAVD_AMPLIFY_GENE7_T; + +typedef struct dpp_etm_cgavd_amplify_gene8_t { + ZXIC_UINT32 amplify_gene8; +} DPP_ETM_CGAVD_AMPLIFY_GENE8_T; + +typedef struct dpp_etm_cgavd_amplify_gene9_t { + ZXIC_UINT32 amplify_gene9; +} DPP_ETM_CGAVD_AMPLIFY_GENE9_T; + +typedef struct dpp_etm_cgavd_amplify_gene10_t { + ZXIC_UINT32 amplify_gene10; +} DPP_ETM_CGAVD_AMPLIFY_GENE10_T; + +typedef struct dpp_etm_cgavd_amplify_gene11_t { + ZXIC_UINT32 amplify_gene11; +} DPP_ETM_CGAVD_AMPLIFY_GENE11_T; + +typedef struct dpp_etm_cgavd_amplify_gene12_t { + ZXIC_UINT32 amplify_gene12; +} DPP_ETM_CGAVD_AMPLIFY_GENE12_T; + +typedef struct dpp_etm_cgavd_amplify_gene13_t { + ZXIC_UINT32 amplify_gene13; +} DPP_ETM_CGAVD_AMPLIFY_GENE13_T; + +typedef struct dpp_etm_cgavd_amplify_gene14_t { + ZXIC_UINT32 amplify_gene14; +} DPP_ETM_CGAVD_AMPLIFY_GENE14_T; + +typedef struct dpp_etm_cgavd_amplify_gene15_t { + ZXIC_UINT32 amplify_gene15; +} DPP_ETM_CGAVD_AMPLIFY_GENE15_T; + +typedef struct dpp_etm_cgavd_equal_pkt_len_en_t { + ZXIC_UINT32 equal_pkt_len_en; +} DPP_ETM_CGAVD_EQUAL_PKT_LEN_EN_T; + +typedef struct dpp_etm_cgavd_equal_pkt_len_th0_t { + ZXIC_UINT32 equal_pkt_len_th0; +} DPP_ETM_CGAVD_EQUAL_PKT_LEN_TH0_T; + +typedef struct dpp_etm_cgavd_equal_pkt_len_th1_t { + ZXIC_UINT32 equal_pkt_len_th1; +} DPP_ETM_CGAVD_EQUAL_PKT_LEN_TH1_T; + +typedef struct dpp_etm_cgavd_equal_pkt_len_th2_t { + ZXIC_UINT32 equal_pkt_len_th2; +} DPP_ETM_CGAVD_EQUAL_PKT_LEN_TH2_T; + +typedef struct dpp_etm_cgavd_equal_pkt_len_th3_t { + ZXIC_UINT32 equal_pkt_len_th3; +} DPP_ETM_CGAVD_EQUAL_PKT_LEN_TH3_T; + +typedef struct dpp_etm_cgavd_equal_pkt_len_th4_t { + ZXIC_UINT32 equal_pkt_len_th4; +} DPP_ETM_CGAVD_EQUAL_PKT_LEN_TH4_T; + +typedef struct dpp_etm_cgavd_equal_pkt_len_th5_t { + ZXIC_UINT32 equal_pkt_len_th5; +} DPP_ETM_CGAVD_EQUAL_PKT_LEN_TH5_T; + +typedef struct dpp_etm_cgavd_equal_pkt_len_th6_t { + ZXIC_UINT32 equal_pkt_len_th6; +} DPP_ETM_CGAVD_EQUAL_PKT_LEN_TH6_T; + +typedef struct dpp_etm_cgavd_equal_pkt_len0_t { + ZXIC_UINT32 equal_pkt_len0; +} DPP_ETM_CGAVD_EQUAL_PKT_LEN0_T; + +typedef struct dpp_etm_cgavd_equal_pkt_len1_t { + ZXIC_UINT32 equal_pkt_len1; +} DPP_ETM_CGAVD_EQUAL_PKT_LEN1_T; + +typedef struct dpp_etm_cgavd_equal_pkt_len2_t { + ZXIC_UINT32 equal_pkt_len2; +} DPP_ETM_CGAVD_EQUAL_PKT_LEN2_T; + +typedef struct dpp_etm_cgavd_equal_pkt_len3_t { + ZXIC_UINT32 equal_pkt_len3; +} DPP_ETM_CGAVD_EQUAL_PKT_LEN3_T; + +typedef struct dpp_etm_cgavd_equal_pkt_len4_t { + ZXIC_UINT32 equal_pkt_len4; +} DPP_ETM_CGAVD_EQUAL_PKT_LEN4_T; + +typedef struct dpp_etm_cgavd_equal_pkt_len5_t { + ZXIC_UINT32 equal_pkt_len5; +} DPP_ETM_CGAVD_EQUAL_PKT_LEN5_T; + +typedef struct dpp_etm_cgavd_equal_pkt_len6_t { + ZXIC_UINT32 equal_pkt_len6; +} DPP_ETM_CGAVD_EQUAL_PKT_LEN6_T; + +typedef struct dpp_etm_cgavd_equal_pkt_len7_t { + ZXIC_UINT32 equal_pkt_len7; +} DPP_ETM_CGAVD_EQUAL_PKT_LEN7_T; + +typedef struct dpp_etm_cgavd_flow_cpu_set_avg_len_t { + ZXIC_UINT32 flow_cpu_set_avg_len; +} DPP_ETM_CGAVD_FLOW_CPU_SET_AVG_LEN_T; + +typedef struct dpp_etm_cgavd_flow_cpu_set_q_len_t { + ZXIC_UINT32 flow_cpu_set_q_len; +} DPP_ETM_CGAVD_FLOW_CPU_SET_Q_LEN_T; + +typedef struct dpp_etm_cgavd_pp_cpu_set_avg_q_len_t { + ZXIC_UINT32 pp_cpu_set_avg_q_len; +} DPP_ETM_CGAVD_PP_CPU_SET_AVG_Q_LEN_T; + +typedef struct dpp_etm_cgavd_pp_cpu_set_q_len_t { + ZXIC_UINT32 pp_cpu_set_q_len; +} DPP_ETM_CGAVD_PP_CPU_SET_Q_LEN_T; + +typedef struct dpp_etm_cgavd_sys_cpu_set_avg_len_t { + ZXIC_UINT32 sys_cpu_set_avg_len; +} DPP_ETM_CGAVD_SYS_CPU_SET_AVG_LEN_T; + +typedef struct dpp_etm_cgavd_sys_cpu_set_q_len_t { + ZXIC_UINT32 sys_cpu_set_q_len; +} DPP_ETM_CGAVD_SYS_CPU_SET_Q_LEN_T; + +typedef struct dpp_etm_cgavd_pke_len_calc_sign_t { + ZXIC_UINT32 pke_len_calc_sign; +} DPP_ETM_CGAVD_PKE_LEN_CALC_SIGN_T; + +typedef struct dpp_etm_cgavd_rd_cpu_or_ram_t { + ZXIC_UINT32 cpu_sel_sys_q_len_en; + ZXIC_UINT32 cpu_sel_sys_avg_q_len_en; + ZXIC_UINT32 cpu_sel_pp_q_len_en; + ZXIC_UINT32 cpu_sel_pp_avg_q_len_en; + ZXIC_UINT32 cpu_sel_flow_q_len_en; + ZXIC_UINT32 cpu_sel_flow_avg_q_len_en; +} DPP_ETM_CGAVD_RD_CPU_OR_RAM_T; + +typedef struct dpp_etm_cgavd_q_len_update_disable_t { + ZXIC_UINT32 q_len_sys_update_en; + ZXIC_UINT32 q_len_pp_update_en; + ZXIC_UINT32 q_len_flow_update_en; +} DPP_ETM_CGAVD_Q_LEN_UPDATE_DISABLE_T; + +typedef struct dpp_etm_cgavd_cgavd_dp_sel_t { + ZXIC_UINT32 flow_dp_sel_high; + ZXIC_UINT32 flow_dp_sel_mid; + ZXIC_UINT32 flow_dp_sel_low; + ZXIC_UINT32 pp_dp_sel_high; + ZXIC_UINT32 pp_dp_sel_mid; + ZXIC_UINT32 pp_dp_sel_low; + ZXIC_UINT32 sys_dp_sel_high; + ZXIC_UINT32 sys_dp_sel_mid; + ZXIC_UINT32 sys_dp_sel_low; +} DPP_ETM_CGAVD_CGAVD_DP_SEL_T; + +typedef struct dpp_etm_cgavd_cgavd_sub_en_t { + ZXIC_UINT32 cgavd_sa_sub_en; + ZXIC_UINT32 cgavd_sys_sub_en; + ZXIC_UINT32 cgavd_pp_sub_en; + ZXIC_UINT32 cgavd_flow_sub_en; +} DPP_ETM_CGAVD_CGAVD_SUB_EN_T; + +typedef struct dpp_etm_cgavd_default_start_queue_t { + ZXIC_UINT32 default_start_queue; +} DPP_ETM_CGAVD_DEFAULT_START_QUEUE_T; + +typedef struct dpp_etm_cgavd_default_finish_queue_t { + ZXIC_UINT32 default_finish_queue; +} DPP_ETM_CGAVD_DEFAULT_FINISH_QUEUE_T; + +typedef struct dpp_etm_cgavd_protocol_start_queue_t { + ZXIC_UINT32 protocol_start_queue; +} DPP_ETM_CGAVD_PROTOCOL_START_QUEUE_T; + +typedef struct dpp_etm_cgavd_protocol_finish_queue_t { + ZXIC_UINT32 protocol_finish_queue; +} DPP_ETM_CGAVD_PROTOCOL_FINISH_QUEUE_T; + +typedef struct dpp_etm_cgavd_uniform_td_th_t { + ZXIC_UINT32 uniform_td_th; +} DPP_ETM_CGAVD_UNIFORM_TD_TH_T; + +typedef struct dpp_etm_cgavd_uniform_td_th_en_t { + ZXIC_UINT32 uniform_td_th_en; +} DPP_ETM_CGAVD_UNIFORM_TD_TH_EN_T; + +typedef struct dpp_etm_cgavd_cgavd_cfg_fc_t { + ZXIC_UINT32 cgavd_cfg_fc; +} DPP_ETM_CGAVD_CGAVD_CFG_FC_T; + +typedef struct dpp_etm_cgavd_cgavd_cfg_no_fc_t { + ZXIC_UINT32 cgavd_cfg_no_fc; +} DPP_ETM_CGAVD_CGAVD_CFG_NO_FC_T; + +typedef struct dpp_etm_cgavd_cgavd_force_imem_omem_t { + ZXIC_UINT32 imem_omem_force_en; + ZXIC_UINT32 choose_imem_omem; +} DPP_ETM_CGAVD_CGAVD_FORCE_IMEM_OMEM_T; + +typedef struct dpp_etm_cgavd_cgavd_sys_q_len_l_t { + ZXIC_UINT32 cgavd_sys_q_len_l; +} DPP_ETM_CGAVD_CGAVD_SYS_Q_LEN_L_T; + +typedef struct dpp_etm_cgavd_default_queue_en_t { + ZXIC_UINT32 default_queue_en; +} DPP_ETM_CGAVD_DEFAULT_QUEUE_EN_T; + +typedef struct dpp_etm_cgavd_protocol_queue_en_t { + ZXIC_UINT32 protocol_queue_en; +} DPP_ETM_CGAVD_PROTOCOL_QUEUE_EN_T; + +typedef struct dpp_etm_cgavd_cfg_tc_flowid_dat_t { + ZXIC_UINT32 cfg_tc_flowid_dat; +} DPP_ETM_CGAVD_CFG_TC_FLOWID_DAT_T; + +typedef struct dpp_etm_cgavd_flow_td_th_t { + ZXIC_UINT32 flow_td_th; +} DPP_ETM_CGAVD_FLOW_TD_TH_T; + +typedef struct dpp_etm_cgavd_flow_ca_mtd_t { + ZXIC_UINT32 flow_ca_mtd; +} DPP_ETM_CGAVD_FLOW_CA_MTD_T; + +typedef struct dpp_etm_cgavd_flow_dynamic_th_en_t { + ZXIC_UINT32 flow_dynamic_th_en; +} DPP_ETM_CGAVD_FLOW_DYNAMIC_TH_EN_T; + +typedef struct dpp_etm_cgavd_pp_num_t { + ZXIC_UINT32 pp_num; +} DPP_ETM_CGAVD_PP_NUM_T; + +typedef struct dpp_etm_cgavd_flow_q_len_t { + ZXIC_UINT32 flow_q_len; +} DPP_ETM_CGAVD_FLOW_Q_LEN_T; + +typedef struct dpp_etm_cgavd_flow_wred_grp_t { + ZXIC_UINT32 flow_wred_grp; +} DPP_ETM_CGAVD_FLOW_WRED_GRP_T; + +typedef struct dpp_etm_cgavd_flow_avg_q_len_t { + ZXIC_UINT32 flow_avg_q_len; +} DPP_ETM_CGAVD_FLOW_AVG_Q_LEN_T; + +typedef struct dpp_etm_cgavd_qos_sign_t { + ZXIC_UINT32 qos_sign_flow_cfg_din; +} DPP_ETM_CGAVD_QOS_SIGN_T; + +typedef struct dpp_etm_cgavd_q_pri_t { + ZXIC_UINT32 qpri_flow_cfg_din; +} DPP_ETM_CGAVD_Q_PRI_T; + +typedef struct dpp_etm_cgavd_odma_tm_itmd_rd_low_t { + ZXIC_UINT32 odma_tm_itmd_low; +} DPP_ETM_CGAVD_ODMA_TM_ITMD_RD_LOW_T; + +typedef struct dpp_etm_cgavd_odma_tm_itmd_rd_mid_t { + ZXIC_UINT32 odma_tm_itmd_mid; +} DPP_ETM_CGAVD_ODMA_TM_ITMD_RD_MID_T; + +typedef struct dpp_etm_cgavd_odma_tm_itmd_rd_high_t { + ZXIC_UINT32 odma_tm_itmd_high; +} DPP_ETM_CGAVD_ODMA_TM_ITMD_RD_HIGH_T; + +typedef struct dpp_etm_cgavd_cgavd_stat_pkt_len_t { + ZXIC_UINT32 expect_deq_pkt_len; + ZXIC_UINT32 expect_enq_pkt_len; +} DPP_ETM_CGAVD_CGAVD_STAT_PKT_LEN_T; + +typedef struct dpp_etm_cgavd_cgavd_stat_qnum_t { + ZXIC_UINT32 cgavd_unexcept_qnum; + ZXIC_UINT32 cgavd_except_qnum; +} DPP_ETM_CGAVD_CGAVD_STAT_QNUM_T; + +typedef struct dpp_etm_cgavd_cgavd_stat_dp_t { + ZXIC_UINT32 cgavd_stat_dp; +} DPP_ETM_CGAVD_CGAVD_STAT_DP_T; + +typedef struct dpp_etm_cgavd_flow_num0_t { + ZXIC_UINT32 flow_num0; +} DPP_ETM_CGAVD_FLOW_NUM0_T; + +typedef struct dpp_etm_cgavd_flow_num1_t { + ZXIC_UINT32 flow_num1; +} DPP_ETM_CGAVD_FLOW_NUM1_T; + +typedef struct dpp_etm_cgavd_flow_num2_t { + ZXIC_UINT32 flow_num2; +} DPP_ETM_CGAVD_FLOW_NUM2_T; + +typedef struct dpp_etm_cgavd_flow_num3_t { + ZXIC_UINT32 flow_num3; +} DPP_ETM_CGAVD_FLOW_NUM3_T; + +typedef struct dpp_etm_cgavd_flow_num4_t { + ZXIC_UINT32 flow_num4; +} DPP_ETM_CGAVD_FLOW_NUM4_T; + +typedef struct dpp_etm_cgavd_flow0_imem_cnt_t { + ZXIC_UINT32 flow0_imem_cnt; +} DPP_ETM_CGAVD_FLOW0_IMEM_CNT_T; + +typedef struct dpp_etm_cgavd_flow1_imem_cnt_t { + ZXIC_UINT32 flow1_imem_cnt; +} DPP_ETM_CGAVD_FLOW1_IMEM_CNT_T; + +typedef struct dpp_etm_cgavd_flow2_imem_cnt_t { + ZXIC_UINT32 flow2_imem_cnt; +} DPP_ETM_CGAVD_FLOW2_IMEM_CNT_T; + +typedef struct dpp_etm_cgavd_flow3_imem_cnt_t { + ZXIC_UINT32 flow3_imem_cnt; +} DPP_ETM_CGAVD_FLOW3_IMEM_CNT_T; + +typedef struct dpp_etm_cgavd_flow4_imem_cnt_t { + ZXIC_UINT32 flow4_imem_cnt; +} DPP_ETM_CGAVD_FLOW4_IMEM_CNT_T; + +typedef struct dpp_etm_cgavd_flow0_drop_cnt_t { + ZXIC_UINT32 flow0_drop_cnt; +} DPP_ETM_CGAVD_FLOW0_DROP_CNT_T; + +typedef struct dpp_etm_cgavd_flow1_drop_cnt_t { + ZXIC_UINT32 flow1_drop_cnt; +} DPP_ETM_CGAVD_FLOW1_DROP_CNT_T; + +typedef struct dpp_etm_cgavd_flow2_drop_cnt_t { + ZXIC_UINT32 flow2_drop_cnt; +} DPP_ETM_CGAVD_FLOW2_DROP_CNT_T; + +typedef struct dpp_etm_cgavd_flow3_drop_cnt_t { + ZXIC_UINT32 flow3_drop_cnt; +} DPP_ETM_CGAVD_FLOW3_DROP_CNT_T; + +typedef struct dpp_etm_cgavd_flow4_drop_cnt_t { + ZXIC_UINT32 flow4_drop_cnt; +} DPP_ETM_CGAVD_FLOW4_DROP_CNT_T; + +typedef struct dpp_etm_cgavd_fc_count_mode_t { + ZXIC_UINT32 fc_count_mode; +} DPP_ETM_CGAVD_FC_COUNT_MODE_T; + +typedef struct dpp_etm_cgavd_qmu_cgavd_fc_num_t { + ZXIC_UINT32 qmu_cgavd_fc_state; + ZXIC_UINT32 qmu_cgavd_fc_num; +} DPP_ETM_CGAVD_QMU_CGAVD_FC_NUM_T; + +typedef struct dpp_etm_cgavd_cgavd_odma_fc_num_t { + ZXIC_UINT32 cgavd_lif_fc_state; + ZXIC_UINT32 cgavd_lif_fc_num; +} DPP_ETM_CGAVD_CGAVD_ODMA_FC_NUM_T; + +typedef struct dpp_etm_cgavd_cfg_offset_t { + ZXIC_UINT32 cfg_offset; +} DPP_ETM_CGAVD_CFG_OFFSET_T; + +typedef struct dpp_etm_tmmu_tmmu_init_done_t { + ZXIC_UINT32 tmmu_init_done; +} DPP_ETM_TMMU_TMMU_INIT_DONE_T; + +typedef struct dpp_etm_tmmu_tmmu_int_mask_1_t { + ZXIC_UINT32 imem_enq_rd_fifo_full_mask; + ZXIC_UINT32 imem_enq_rd_fifo_overflow_mask; + ZXIC_UINT32 imem_enq_rd_fifo_underflow_mask; + ZXIC_UINT32 imem_enq_drop_fifo_full_mask; + ZXIC_UINT32 imem_enq_drop_fifo_overflow_mask; + ZXIC_UINT32 imem_enq_drop_fifo_underflow_mask; + ZXIC_UINT32 imem_deq_rd_fifo_full_mask; + ZXIC_UINT32 imem_deq_rd_fifo_overflow_mask; + ZXIC_UINT32 imem_deq_rd_fifo_underflow_mask; + ZXIC_UINT32 imem_deq_drop_fifo_full_mask; + ZXIC_UINT32 imem_deq_drop_fifo_overflow_mask; + ZXIC_UINT32 imem_deq_drop_fifo_underflow_mask; + ZXIC_UINT32 dma_data_fifo_full_mask; + ZXIC_UINT32 dma_data_fifo_overflow_mask; + ZXIC_UINT32 dma_data_fifo_underflow_mask; + ZXIC_UINT32 wr_cmd_fifo_full_mask; + ZXIC_UINT32 wr_cmd_fifo_overflow_mask; + ZXIC_UINT32 wr_cmd_fifo_underflow_mask; + ZXIC_UINT32 cached_pd_fifo_full_mask; + ZXIC_UINT32 cached_pd_fifo_overflow_mask; + ZXIC_UINT32 cached_pd_fifo_underflow_mask; + ZXIC_UINT32 emem_pd_fifo_full_mask; + ZXIC_UINT32 emem_pd_fifo_overflow_mask; + ZXIC_UINT32 emem_pd_fifo_underflow_mask; + ZXIC_UINT32 pd_order_fifo_full_mask; + ZXIC_UINT32 pd_order_fifo_overflow_mask; + ZXIC_UINT32 pd_order_fifo_underflow_mask; +} DPP_ETM_TMMU_TMMU_INT_MASK_1_T; + +typedef struct dpp_etm_tmmu_tmmu_int_mask_2_t { + ZXIC_UINT32 dma_data_fifo_parity_err_mask; + ZXIC_UINT32 imem_enq_rd_fifo_ecc_single_err_mask; + ZXIC_UINT32 imem_enq_rd_fifo_ecc_double_err_mask; + ZXIC_UINT32 imem_enq_drop_fifo_ecc_single_err_mask; + ZXIC_UINT32 imem_enq_drop_fifo_ecc_double_err_mask; + ZXIC_UINT32 imem_deq_rd_fifo_ecc_single_err_mask; + ZXIC_UINT32 imem_deq_rd_fifo_ecc_double_err_mask; + ZXIC_UINT32 imem_deq_drop_fifo_ecc_single_err_mask; + ZXIC_UINT32 imem_deq_drop_fifo_ecc_double_err_mask; + ZXIC_UINT32 wr_cmd_fifo_ecc_single_err_mask; + ZXIC_UINT32 wr_cmd_fifo_ecc_double_err_mask; + ZXIC_UINT32 pd_cache_ram_ecc_single_err_mask; + ZXIC_UINT32 pd_cache_ram_ecc_double_err_mask; + ZXIC_UINT32 cached_pd_fifo_ecc_single_err_mask; + ZXIC_UINT32 cached_pd_fifo_ecc_double_err_mask; + ZXIC_UINT32 emem_pd_fifo_ecc_single_err_mask; + ZXIC_UINT32 emem_pd_fifo_ecc_double_err_mask; +} DPP_ETM_TMMU_TMMU_INT_MASK_2_T; + +typedef struct dpp_etm_tmmu_cfgmt_tm_pure_imem_en_t { + ZXIC_UINT32 cfgmt_tm_pure_imem_en; +} DPP_ETM_TMMU_CFGMT_TM_PURE_IMEM_EN_T; + +typedef struct dpp_etm_tmmu_cfgmt_force_ddr_rdy_cfg_t { + ZXIC_UINT32 cfgmt_force_ddr_rdy_cfg; +} DPP_ETM_TMMU_CFGMT_FORCE_DDR_RDY_CFG_T; + +typedef struct dpp_etm_tmmu_pd_order_fifo_aful_th_t { + ZXIC_UINT32 pd_order_fifo_aful_th; +} DPP_ETM_TMMU_PD_ORDER_FIFO_AFUL_TH_T; + +typedef struct dpp_etm_tmmu_cached_pd_fifo_aful_th_t { + ZXIC_UINT32 cached_pd_fifo_aful_th; +} DPP_ETM_TMMU_CACHED_PD_FIFO_AFUL_TH_T; + +typedef struct dpp_etm_tmmu_wr_cmd_fifo_aful_th_t { + ZXIC_UINT32 wr_cmd_fifo_aful_th; +} DPP_ETM_TMMU_WR_CMD_FIFO_AFUL_TH_T; + +typedef struct dpp_etm_tmmu_imem_enq_rd_fifo_aful_th_t { + ZXIC_UINT32 imem_enq_rd_fifo_aful_th; +} DPP_ETM_TMMU_IMEM_ENQ_RD_FIFO_AFUL_TH_T; + +typedef struct dpp_etm_tmmu_imem_enq_drop_fifo_aful_th_t { + ZXIC_UINT32 imem_enq_drop_fifo_aful_th; +} DPP_ETM_TMMU_IMEM_ENQ_DROP_FIFO_AFUL_TH_T; + +typedef struct dpp_etm_tmmu_imem_deq_drop_fifo_aful_th_t { + ZXIC_UINT32 imem_deq_drop_fifo_aful_th; +} DPP_ETM_TMMU_IMEM_DEQ_DROP_FIFO_AFUL_TH_T; + +typedef struct dpp_etm_tmmu_imem_deq_rd_fifo_aful_th_t { + ZXIC_UINT32 imem_deq_rd_fifo_aful_th; +} DPP_ETM_TMMU_IMEM_DEQ_RD_FIFO_AFUL_TH_T; + +typedef struct dpp_etm_tmmu_tmmu_states_1_t { + ZXIC_UINT32 imem_enq_rd_fifo_full; + ZXIC_UINT32 imem_enq_rd_fifo_overflow; + ZXIC_UINT32 imem_enq_rd_fifo_underflow; + ZXIC_UINT32 imem_enq_drop_fifo_full; + ZXIC_UINT32 imem_enq_drop_fifo_overflow; + ZXIC_UINT32 imem_enq_drop_fifo_underflow; + ZXIC_UINT32 imem_deq_rd_fifo_full; + ZXIC_UINT32 imem_deq_rd_fifo_overflow; + ZXIC_UINT32 imem_deq_rd_fifo_underflow; + ZXIC_UINT32 imem_deq_drop_fifo_full; + ZXIC_UINT32 imem_deq_drop_fifo_overflow; + ZXIC_UINT32 imem_deq_drop_fifo_underflow; + ZXIC_UINT32 dma_data_fifo_full; + ZXIC_UINT32 dma_data_fifo_overflow; + ZXIC_UINT32 dma_data_fifo_underflow; + ZXIC_UINT32 wr_cmd_fifo_full; + ZXIC_UINT32 wr_cmd_fifo_overflow; + ZXIC_UINT32 wr_cmd_fifo_underflow; + ZXIC_UINT32 cached_pd_fifo_full; + ZXIC_UINT32 cached_pd_fifo_overflow; + ZXIC_UINT32 cached_pd_fifo_underflow; + ZXIC_UINT32 emem_pd_fifo_full; + ZXIC_UINT32 emem_pd_fifo_overflow; + ZXIC_UINT32 emem_pd_fifo_underflow; + ZXIC_UINT32 pd_order_fifo_full; + ZXIC_UINT32 pd_order_fifo_overflow; + ZXIC_UINT32 pd_order_fifo_underflow; +} DPP_ETM_TMMU_TMMU_STATES_1_T; + +typedef struct dpp_etm_tmmu_tmmu_states_2_t { + ZXIC_UINT32 dma_data_fifo_parity_err; + ZXIC_UINT32 imem_enq_rd_fifo_ecc_single_err; + ZXIC_UINT32 imem_enq_rd_fifo_ecc_double_err; + ZXIC_UINT32 imem_enq_drop_fifo_ecc_single_err; + ZXIC_UINT32 imem_enq_drop_fifo_ecc_double_err; + ZXIC_UINT32 imem_deq_rd_fifo_ecc_single_err; + ZXIC_UINT32 imem_deq_rd_fifo_ecc_double_err; + ZXIC_UINT32 imem_deq_drop_fifo_ecc_single_err; + ZXIC_UINT32 imem_deq_drop_fifo_ecc_double_err; + ZXIC_UINT32 wr_cmd_fifo_ecc_single_err; + ZXIC_UINT32 wr_cmd_fifo_ecc_double_err; + ZXIC_UINT32 pd_cache_ram_ecc_single_err; + ZXIC_UINT32 pd_cache_ram_ecc_double_err; + ZXIC_UINT32 cached_pd_fifo_ecc_single_err; + ZXIC_UINT32 cached_pd_fifo_ecc_double_err; + ZXIC_UINT32 emem_pd_fifo_ecc_single_err; + ZXIC_UINT32 emem_pd_fifo_ecc_double_err; +} DPP_ETM_TMMU_TMMU_STATES_2_T; + +typedef struct dpp_etm_shap_shap_ind_cmd_t { + ZXIC_UINT32 rd; + ZXIC_UINT32 mem_id; + ZXIC_UINT32 addr; +} DPP_ETM_SHAP_SHAP_IND_CMD_T; + +typedef struct dpp_etm_shap_shap_ind_sta_t { + ZXIC_UINT32 indirectaccessdone; +} DPP_ETM_SHAP_SHAP_IND_STA_T; + +typedef struct dpp_etm_shap_shap_ind_data0_t { + ZXIC_UINT32 indirectdata0; +} DPP_ETM_SHAP_SHAP_IND_DATA0_T; + +typedef struct dpp_etm_shap_shap_ind_data1_t { + ZXIC_UINT32 indirectdata1; +} DPP_ETM_SHAP_SHAP_IND_DATA1_T; + +typedef struct dpp_etm_shap_full_threshold_t { + ZXIC_UINT32 full_threshold; +} DPP_ETM_SHAP_FULL_THRESHOLD_T; + +typedef struct dpp_etm_shap_empty_threshold_t { + ZXIC_UINT32 empty_threshold; +} DPP_ETM_SHAP_EMPTY_THRESHOLD_T; + +typedef struct dpp_etm_shap_shap_sta_init_cfg_t { + ZXIC_UINT32 sta_ram_init_done; + ZXIC_UINT32 sta_ram_init_en; +} DPP_ETM_SHAP_SHAP_STA_INIT_CFG_T; + +typedef struct dpp_etm_shap_shap_cfg_init_cfg_t { + ZXIC_UINT32 cfg_ram_init_done; + ZXIC_UINT32 cfg_ram_init_en; +} DPP_ETM_SHAP_SHAP_CFG_INIT_CFG_T; + +typedef struct dpp_etm_shap_token_mode_switch_t { + ZXIC_UINT32 token_mode_switch; +} DPP_ETM_SHAP_TOKEN_MODE_SWITCH_T; + +typedef struct dpp_etm_shap_token_grain_t { + ZXIC_UINT32 token_grain; +} DPP_ETM_SHAP_TOKEN_GRAIN_T; + +typedef struct dpp_etm_shap_crd_grain_t { + ZXIC_UINT32 crd_grain; +} DPP_ETM_SHAP_CRD_GRAIN_T; + +typedef struct dpp_etm_shap_shap_stat_ctrl_t { + ZXIC_UINT32 shap_stat_ctrl; +} DPP_ETM_SHAP_SHAP_STAT_CTRL_T; + +typedef struct dpp_etm_shap_token_stat_id_t { + ZXIC_UINT32 token_stat_id; +} DPP_ETM_SHAP_TOKEN_STAT_ID_T; + +typedef struct dpp_etm_shap_token_stat_t { + ZXIC_UINT32 token_stat; +} DPP_ETM_SHAP_TOKEN_STAT_T; + +typedef struct dpp_etm_shap_shap_stat_clk_cnt_t { + ZXIC_UINT32 shap_stat_clk_cnt; +} DPP_ETM_SHAP_SHAP_STAT_CLK_CNT_T; + +typedef struct dpp_etm_shap_shap_bucket_map_tbl_t { + ZXIC_UINT32 shap_map; +} DPP_ETM_SHAP_SHAP_BUCKET_MAP_TBL_T; + +typedef struct dpp_etm_shap_bkt_para_tbl_t { + ZXIC_UINT32 bucket_depth; + ZXIC_UINT32 bucket_rate; +} DPP_ETM_SHAP_BKT_PARA_TBL_T; + +typedef struct dpp_etm_crdt_credit_en_t { + ZXIC_UINT32 credit_en; +} DPP_ETM_CRDT_CREDIT_EN_T; + +typedef struct dpp_etm_crdt_crt_inter1_t { + ZXIC_UINT32 crd_inter1; +} DPP_ETM_CRDT_CRT_INTER1_T; + +typedef struct dpp_etm_crdt_db_token_t { + ZXIC_UINT32 db_token; +} DPP_ETM_CRDT_DB_TOKEN_T; + +typedef struct dpp_etm_crdt_crs_flt_cfg_t { + ZXIC_UINT32 crs_flt_cfg; +} DPP_ETM_CRDT_CRS_FLT_CFG_T; + +typedef struct dpp_etm_crdt_th_sp_t { + ZXIC_UINT32 th_sp; +} DPP_ETM_CRDT_TH_SP_T; + +typedef struct dpp_etm_crdt_th_wfq_fq_t { + ZXIC_UINT32 th_fq; + ZXIC_UINT32 th_wfq; +} DPP_ETM_CRDT_TH_WFQ_FQ_T; + +typedef struct dpp_etm_crdt_th_wfq2_fq2_t { + ZXIC_UINT32 th_fq2; + ZXIC_UINT32 th_wfq2; +} DPP_ETM_CRDT_TH_WFQ2_FQ2_T; + +typedef struct dpp_etm_crdt_th_wfq4_fq4_t { + ZXIC_UINT32 th_fq4; + ZXIC_UINT32 th_wfq4; +} DPP_ETM_CRDT_TH_WFQ4_FQ4_T; + +typedef struct dpp_etm_crdt_cfg_state_t { + ZXIC_UINT32 cfg_state; +} DPP_ETM_CRDT_CFG_STATE_T; + +typedef struct dpp_etm_crdt_crdt_ind_cmd_t { + ZXIC_UINT32 rd; + ZXIC_UINT32 mem_id; + ZXIC_UINT32 addr; +} DPP_ETM_CRDT_CRDT_IND_CMD_T; + +typedef struct dpp_etm_crdt_crdt_ind_sta_t { + ZXIC_UINT32 indirectaccessdone; +} DPP_ETM_CRDT_CRDT_IND_STA_T; + +typedef struct dpp_etm_crdt_crdt_ind_data0_t { + ZXIC_UINT32 indirectdata0; +} DPP_ETM_CRDT_CRDT_IND_DATA0_T; + +typedef struct dpp_etm_crdt_crdt_ind_data1_t { + ZXIC_UINT32 indirectdata1; +} DPP_ETM_CRDT_CRDT_IND_DATA1_T; + +typedef struct dpp_etm_crdt_crdt_state_t { + ZXIC_UINT32 crdt_int; + ZXIC_UINT32 crdt_rdy; +} DPP_ETM_CRDT_CRDT_STATE_T; + +typedef struct dpp_etm_crdt_stat_que_id_0_t { + ZXIC_UINT32 stat_que_id_0; +} DPP_ETM_CRDT_STAT_QUE_ID_0_T; + +typedef struct dpp_etm_crdt_stat_que_id_1_t { + ZXIC_UINT32 stat_que_id_1; +} DPP_ETM_CRDT_STAT_QUE_ID_1_T; + +typedef struct dpp_etm_crdt_stat_que_id_2_t { + ZXIC_UINT32 stat_que_id_2; +} DPP_ETM_CRDT_STAT_QUE_ID_2_T; + +typedef struct dpp_etm_crdt_stat_que_id_3_t { + ZXIC_UINT32 stat_que_id_3; +} DPP_ETM_CRDT_STAT_QUE_ID_3_T; + +typedef struct dpp_etm_crdt_stat_que_id_4_t { + ZXIC_UINT32 stat_que_id_4; +} DPP_ETM_CRDT_STAT_QUE_ID_4_T; + +typedef struct dpp_etm_crdt_stat_que_id_5_t { + ZXIC_UINT32 stat_que_id_5; +} DPP_ETM_CRDT_STAT_QUE_ID_5_T; + +typedef struct dpp_etm_crdt_stat_que_id_6_t { + ZXIC_UINT32 stat_que_id_6; +} DPP_ETM_CRDT_STAT_QUE_ID_6_T; + +typedef struct dpp_etm_crdt_stat_que_id_7_t { + ZXIC_UINT32 stat_que_id_7; +} DPP_ETM_CRDT_STAT_QUE_ID_7_T; + +typedef struct dpp_etm_crdt_stat_que_id_8_t { + ZXIC_UINT32 stat_que_id_8; +} DPP_ETM_CRDT_STAT_QUE_ID_8_T; + +typedef struct dpp_etm_crdt_stat_que_id_9_t { + ZXIC_UINT32 stat_que_id_9; +} DPP_ETM_CRDT_STAT_QUE_ID_9_T; + +typedef struct dpp_etm_crdt_stat_que_id_10_t { + ZXIC_UINT32 stat_que_id_10; +} DPP_ETM_CRDT_STAT_QUE_ID_10_T; + +typedef struct dpp_etm_crdt_stat_que_id_11_t { + ZXIC_UINT32 stat_que_id_11; +} DPP_ETM_CRDT_STAT_QUE_ID_11_T; + +typedef struct dpp_etm_crdt_stat_que_id_12_t { + ZXIC_UINT32 stat_que_id_12; +} DPP_ETM_CRDT_STAT_QUE_ID_12_T; + +typedef struct dpp_etm_crdt_stat_que_id_13_t { + ZXIC_UINT32 stat_que_id_13; +} DPP_ETM_CRDT_STAT_QUE_ID_13_T; + +typedef struct dpp_etm_crdt_stat_que_id_14_t { + ZXIC_UINT32 stat_que_id_14; +} DPP_ETM_CRDT_STAT_QUE_ID_14_T; + +typedef struct dpp_etm_crdt_stat_que_id_15_t { + ZXIC_UINT32 stat_que_id_15; +} DPP_ETM_CRDT_STAT_QUE_ID_15_T; + +typedef struct dpp_etm_crdt_stat_que_credit_t { + ZXIC_UINT32 stat_que_credit_cnt; +} DPP_ETM_CRDT_STAT_QUE_CREDIT_T; + +typedef struct dpp_etm_crdt_crdt_cfg_ram_init_t { + ZXIC_UINT32 cfg_ram_init_done; + ZXIC_UINT32 cfg_ram_init_en; +} DPP_ETM_CRDT_CRDT_CFG_RAM_INIT_T; + +typedef struct dpp_etm_crdt_crdt_sta_ram_init_t { + ZXIC_UINT32 sta_ram_init_done; + ZXIC_UINT32 sta_ram_init_en; +} DPP_ETM_CRDT_CRDT_STA_RAM_INIT_T; + +typedef struct dpp_etm_crdt_crs_que_id_t { + ZXIC_UINT32 crs_que_id; +} DPP_ETM_CRDT_CRS_QUE_ID_T; + +typedef struct dpp_etm_crdt_qmu_crs_end_state_t { + ZXIC_UINT32 qmu_crs_end_state; +} DPP_ETM_CRDT_QMU_CRS_END_STATE_T; + +typedef struct dpp_etm_crdt_shap_rdy_t { + ZXIC_UINT32 shap_rdy; +} DPP_ETM_CRDT_SHAP_RDY_T; + +typedef struct dpp_etm_crdt_shap_int_reg_t { + ZXIC_UINT32 pp_c_token_min_int; +} DPP_ETM_CRDT_SHAP_INT_REG_T; + +typedef struct dpp_etm_crdt_shap_int_mask_reg_t { + ZXIC_UINT32 pp_c_token_min_int_mask; +} DPP_ETM_CRDT_SHAP_INT_MASK_REG_T; + +typedef struct dpp_etm_crdt_token_state_almost_empty_th_t { + ZXIC_UINT32 token_state_almost_empty_th; +} DPP_ETM_CRDT_TOKEN_STATE_ALMOST_EMPTY_TH_T; + +typedef struct dpp_etm_crdt_token_state_empty_th_t { + ZXIC_UINT32 token_state_empty_th; +} DPP_ETM_CRDT_TOKEN_STATE_EMPTY_TH_T; + +typedef struct dpp_etm_crdt_full_th_t { + ZXIC_UINT32 token_state_full_th; +} DPP_ETM_CRDT_FULL_TH_T; + +typedef struct dpp_etm_crdt_pp_c_level_shap_en_t { + ZXIC_UINT32 pp_c_level_shap_en; +} DPP_ETM_CRDT_PP_C_LEVEL_SHAP_EN_T; + +typedef struct dpp_etm_crdt_enq_token_th_t { + ZXIC_UINT32 enq_token_th; +} DPP_ETM_CRDT_ENQ_TOKEN_TH_T; + +typedef struct dpp_etm_crdt_pp_tokenq_level1_qstate_weight_cir_t { + ZXIC_UINT32 pp_pp_q_state_cir; + ZXIC_UINT32 pp_pp_q_weight_wfq_l1_cir; +} DPP_ETM_CRDT_PP_TOKENQ_LEVEL1_QSTATE_WEIGHT_CIR_T; + +typedef struct dpp_etm_crdt_pp_idle_weight_level1_cir_t { + ZXIC_UINT32 pp_idle_q_weight_wfq_l1_cir; +} DPP_ETM_CRDT_PP_IDLE_WEIGHT_LEVEL1_CIR_T; + +typedef struct dpp_etm_crdt_rci_grade_th_0_cfg_t { + ZXIC_UINT32 rci_grade_th_0_cfg; +} DPP_ETM_CRDT_RCI_GRADE_TH_0_CFG_T; + +typedef struct dpp_etm_crdt_rci_grade_th_1_cfg_t { + ZXIC_UINT32 rci_grade_th_1_cfg; +} DPP_ETM_CRDT_RCI_GRADE_TH_1_CFG_T; + +typedef struct dpp_etm_crdt_rci_grade_th_2_cfg_t { + ZXIC_UINT32 rci_grade_th_2_cfg; +} DPP_ETM_CRDT_RCI_GRADE_TH_2_CFG_T; + +typedef struct dpp_etm_crdt_rci_grade_th_3_cfg_t { + ZXIC_UINT32 rci_grade_th_3_cfg; +} DPP_ETM_CRDT_RCI_GRADE_TH_3_CFG_T; + +typedef struct dpp_etm_crdt_rci_grade_th_4_cfg_t { + ZXIC_UINT32 rci_grade_th_4_cfg; +} DPP_ETM_CRDT_RCI_GRADE_TH_4_CFG_T; + +typedef struct dpp_etm_crdt_rci_grade_th_5_cfg_t { + ZXIC_UINT32 rci_grade_th_5_cfg; +} DPP_ETM_CRDT_RCI_GRADE_TH_5_CFG_T; + +typedef struct dpp_etm_crdt_rci_grade_th_6_cfg_t { + ZXIC_UINT32 rci_grade_th_6_cfg; +} DPP_ETM_CRDT_RCI_GRADE_TH_6_CFG_T; + +typedef struct dpp_etm_crdt_flow_del_cmd_t { + ZXIC_UINT32 flow_del_busy; + ZXIC_UINT32 flow_alt_cmd; + ZXIC_UINT32 flow_alt_ind; +} DPP_ETM_CRDT_FLOW_DEL_CMD_T; + +typedef struct dpp_etm_crdt_cnt_clr_t { + ZXIC_UINT32 cnt_clr; +} DPP_ETM_CRDT_CNT_CLR_T; + +typedef struct dpp_etm_crdt_crdt_int_bus_t { + ZXIC_UINT32 ldstr_fifo15_ovf_int; + ZXIC_UINT32 ldstr_fifo14_ovf_int; + ZXIC_UINT32 ldstr_fifo13_ovf_int; + ZXIC_UINT32 ldstr_fifo12_ovf_int; + ZXIC_UINT32 ldstr_fifo11_ovf_int; + ZXIC_UINT32 ldstr_fifo10_ovf_int; + ZXIC_UINT32 ldstr_fifo9_ovf_int; + ZXIC_UINT32 ldstr_fifo8_ovf_int; + ZXIC_UINT32 ldstr_fifo7_ovf_int; + ZXIC_UINT32 ldstr_fifo6_ovf_int; + ZXIC_UINT32 ldstr_fifo5_ovf_int; + ZXIC_UINT32 ldstr_fifo4_ovf_int; + ZXIC_UINT32 ldstr_fifo3_ovf_int; + ZXIC_UINT32 ldstr_fifo2_ovf_int; + ZXIC_UINT32 ldstr_fifo1_ovf_int; + ZXIC_UINT32 ldstr_fifo0_ovf_int; + ZXIC_UINT32 cfg_del_err_int; + ZXIC_UINT32 flwin_secrs_fifo_ovf_int; + ZXIC_UINT32 flwin_voqcrs_fifo_ovf_int; +} DPP_ETM_CRDT_CRDT_INT_BUS_T; + +typedef struct dpp_etm_crdt_crdt_int_mask_t { + ZXIC_UINT32 crdt_int_mask; +} DPP_ETM_CRDT_CRDT_INT_MASK_T; + +typedef struct dpp_etm_crdt_cfg_weight_together_t { + ZXIC_UINT32 cfg_weight_together; +} DPP_ETM_CRDT_CFG_WEIGHT_TOGETHER_T; + +typedef struct dpp_etm_crdt_weight_t { + ZXIC_UINT32 c_weight; + ZXIC_UINT32 e_weight; +} DPP_ETM_CRDT_WEIGHT_T; + +typedef struct dpp_etm_crdt_dev_sp_state_t { + ZXIC_UINT32 dev_sp_state; +} DPP_ETM_CRDT_DEV_SP_STATE_T; + +typedef struct dpp_etm_crdt_dev_crs_t { + ZXIC_UINT32 dev_crs; +} DPP_ETM_CRDT_DEV_CRS_T; + +typedef struct dpp_etm_crdt_congest_token_disable_31_0_t { + ZXIC_UINT32 congest_token_disable_31_0; +} DPP_ETM_CRDT_CONGEST_TOKEN_DISABLE_31_0_T; + +typedef struct dpp_etm_crdt_congest_token_disable_63_32_t { + ZXIC_UINT32 congest_token_disable_63_32; +} DPP_ETM_CRDT_CONGEST_TOKEN_DISABLE_63_32_T; + +typedef struct dpp_etm_crdt_crdt_interval_en_cfg_t { + ZXIC_UINT32 crdt_interval_en; +} DPP_ETM_CRDT_CRDT_INTERVAL_EN_CFG_T; + +typedef struct dpp_etm_crdt_q_token_staue_cfg_t { + ZXIC_UINT32 test_token_q_id; +} DPP_ETM_CRDT_Q_TOKEN_STAUE_CFG_T; + +typedef struct dpp_etm_crdt_q_token_dist_cnt_t { + ZXIC_UINT32 q_token_dist_counter; +} DPP_ETM_CRDT_Q_TOKEN_DIST_CNT_T; + +typedef struct dpp_etm_crdt_q_token_dec_cnt_t { + ZXIC_UINT32 q_token_dec_counter; +} DPP_ETM_CRDT_Q_TOKEN_DEC_CNT_T; + +typedef struct dpp_etm_crdt_pp_weight_ram_t { + ZXIC_UINT32 pp_c_weight; +} DPP_ETM_CRDT_PP_WEIGHT_RAM_T; + +typedef struct dpp_etm_crdt_pp_cbs_shape_en_ram_t { + ZXIC_UINT32 pp_cbs; + ZXIC_UINT32 pp_c_shap_en; +} DPP_ETM_CRDT_PP_CBS_SHAPE_EN_RAM_T; + +typedef struct dpp_etm_crdt_pp_next_pc_q_state_ram_t { + ZXIC_UINT32 pp_next_pc; + ZXIC_UINT32 pp_token_num; + ZXIC_UINT32 pp_q_state; +} DPP_ETM_CRDT_PP_NEXT_PC_Q_STATE_RAM_T; + +typedef struct dpp_etm_crdt_dev_interval_t { + ZXIC_UINT32 dev_interval; +} DPP_ETM_CRDT_DEV_INTERVAL_T; + +typedef struct dpp_etm_crdt_dev_wfq_cnt_t { + ZXIC_UINT32 dev_wfq_cnt; +} DPP_ETM_CRDT_DEV_WFQ_CNT_T; + +typedef struct dpp_etm_crdt_dev_wfq_state_t { + ZXIC_UINT32 dev_wfq_state; +} DPP_ETM_CRDT_DEV_WFQ_STATE_T; + +typedef struct dpp_etm_crdt_dev_active_head_ptr_t { + ZXIC_UINT32 dev_active_head_ptr; +} DPP_ETM_CRDT_DEV_ACTIVE_HEAD_PTR_T; + +typedef struct dpp_etm_crdt_dev_active_tail_ptr_t { + ZXIC_UINT32 dev_active_tail_ptr; +} DPP_ETM_CRDT_DEV_ACTIVE_TAIL_PTR_T; + +typedef struct dpp_etm_crdt_dev_unactive_head_ptr_t { + ZXIC_UINT32 dev_unactive_head_ptr; +} DPP_ETM_CRDT_DEV_UNACTIVE_HEAD_PTR_T; + +typedef struct dpp_etm_crdt_dev_unactive_tail_ptr_t { + ZXIC_UINT32 dev_unactive_tail_ptr; +} DPP_ETM_CRDT_DEV_UNACTIVE_TAIL_PTR_T; + +typedef struct dpp_etm_crdt_pp_weight_t { + ZXIC_UINT32 pp_weight; +} DPP_ETM_CRDT_PP_WEIGHT_T; + +typedef struct dpp_etm_crdt_pp_que_state_t { + ZXIC_UINT32 pp_enque_flag; + ZXIC_UINT32 pp_cir; + ZXIC_UINT32 pp_congest_cir; + ZXIC_UINT32 pp_crs; + ZXIC_UINT32 dev_sp; +} DPP_ETM_CRDT_PP_QUE_STATE_T; + +typedef struct dpp_etm_crdt_pp_next_ptr_t { + ZXIC_UINT32 pp_next_ptr; +} DPP_ETM_CRDT_PP_NEXT_PTR_T; + +typedef struct dpp_etm_crdt_pp_cfg_t { + ZXIC_UINT32 pp_cfg; +} DPP_ETM_CRDT_PP_CFG_T; + +typedef struct dpp_etm_crdt_pp_up_ptr_t { + ZXIC_UINT32 pp_up_ptr; +} DPP_ETM_CRDT_PP_UP_PTR_T; + +typedef struct dpp_etm_crdt_credit_drop_num_t { + ZXIC_UINT32 credit_drop_num; +} DPP_ETM_CRDT_CREDIT_DROP_NUM_T; + +typedef struct dpp_etm_crdt_se_id_lv0_t { + ZXIC_UINT32 se_id_out_lv0; +} DPP_ETM_CRDT_SE_ID_LV0_T; + +typedef struct dpp_etm_crdt_se_id_lv1_t { + ZXIC_UINT32 se_id_out_lv1; +} DPP_ETM_CRDT_SE_ID_LV1_T; + +typedef struct dpp_etm_crdt_se_id_lv2_t { + ZXIC_UINT32 se_id_out_lv2; +} DPP_ETM_CRDT_SE_ID_LV2_T; + +typedef struct dpp_etm_crdt_se_id_lv3_t { + ZXIC_UINT32 se_id_out_lv3; +} DPP_ETM_CRDT_SE_ID_LV3_T; + +typedef struct dpp_etm_crdt_se_id_lv4_t { + ZXIC_UINT32 se_id_out_lv4; +} DPP_ETM_CRDT_SE_ID_LV4_T; + +typedef struct dpp_etm_crdt_que_id_t { + ZXIC_UINT32 que_id_out; +} DPP_ETM_CRDT_QUE_ID_T; + +typedef struct dpp_etm_crdt_se_info_lv0_t { + ZXIC_UINT32 se_shape_lv0; + ZXIC_UINT32 se_ins_out_lv0; + ZXIC_UINT32 se_state_out_lv0; + ZXIC_UINT32 se_new_state_out_lv0; +} DPP_ETM_CRDT_SE_INFO_LV0_T; + +typedef struct dpp_etm_crdt_se_info_lv1_t { + ZXIC_UINT32 se_shape_lv1; + ZXIC_UINT32 se_ins_out_lv1; + ZXIC_UINT32 se_state_out_lv1; + ZXIC_UINT32 se_new_state_out_lv1; +} DPP_ETM_CRDT_SE_INFO_LV1_T; + +typedef struct dpp_etm_crdt_se_info_lv2_t { + ZXIC_UINT32 se_shape_lv2; + ZXIC_UINT32 se_ins_out_lv2; + ZXIC_UINT32 se_state_out_lv2; + ZXIC_UINT32 se_new_state_out_lv2; +} DPP_ETM_CRDT_SE_INFO_LV2_T; + +typedef struct dpp_etm_crdt_se_info_lv3_t { + ZXIC_UINT32 se_shape_lv3; + ZXIC_UINT32 se_ins_out_lv3; + ZXIC_UINT32 se_state_out_lv3; + ZXIC_UINT32 se_new_state_out_lv3; +} DPP_ETM_CRDT_SE_INFO_LV3_T; + +typedef struct dpp_etm_crdt_se_info_lv4_t { + ZXIC_UINT32 se_shape_lv4; + ZXIC_UINT32 se_ins_out_lv4; + ZXIC_UINT32 se_state_out_lv4; + ZXIC_UINT32 se_new_state_out_lv4; +} DPP_ETM_CRDT_SE_INFO_LV4_T; + +typedef struct dpp_etm_crdt_que_state_t { + ZXIC_UINT32 que_state_out; +} DPP_ETM_CRDT_QUE_STATE_T; + +typedef struct dpp_etm_crdt_eir_off_in_advance_t { + ZXIC_UINT32 eir_crs_filter; +} DPP_ETM_CRDT_EIR_OFF_IN_ADVANCE_T; + +typedef struct dpp_etm_crdt_double_level_shap_prevent_t { + ZXIC_UINT32 double_level_shap_prevent; +} DPP_ETM_CRDT_DOUBLE_LEVEL_SHAP_PREVENT_T; + +typedef struct dpp_etm_crdt_add_store_cycle_t { + ZXIC_UINT32 add_store_cycle; +} DPP_ETM_CRDT_ADD_STORE_CYCLE_T; + +typedef struct dpp_etm_crdt_tflag2_wr_flag_sum_t { + ZXIC_UINT32 tflag2_wr_flag_sum; +} DPP_ETM_CRDT_TFLAG2_WR_FLAG_SUM_T; + +typedef struct dpp_etm_crdt_flowque_para_tbl_t { + ZXIC_UINT32 flowque_link; + ZXIC_UINT32 flowque_w; + ZXIC_UINT32 flowque_pri; +} DPP_ETM_CRDT_FLOWQUE_PARA_TBL_T; + +typedef struct dpp_etm_crdt_se_para_tbl_t { + ZXIC_UINT32 se_insw; + ZXIC_UINT32 se_link; + ZXIC_UINT32 cp_token_en; + ZXIC_UINT32 se_w; + ZXIC_UINT32 se_pri; +} DPP_ETM_CRDT_SE_PARA_TBL_T; + +typedef struct dpp_etm_crdt_flowque_ins_tbl_t { + ZXIC_UINT32 flowque_ins; +} DPP_ETM_CRDT_FLOWQUE_INS_TBL_T; + +typedef struct dpp_etm_crdt_se_ins_tbl_t { + ZXIC_UINT32 se_ins_flag; + ZXIC_UINT32 se_ins_priority; +} DPP_ETM_CRDT_SE_INS_TBL_T; + +typedef struct dpp_etm_crdt_eir_crs_filter_tbl_t { + ZXIC_UINT32 eir_crs_filter; +} DPP_ETM_CRDT_EIR_CRS_FILTER_TBL_T; + +typedef struct dpp_etm_qmu_qcfg_qlist_cfg_done_t { + ZXIC_UINT32 qcfg_qlist_cfg_done; +} DPP_ETM_QMU_QCFG_QLIST_CFG_DONE_T; + +typedef struct dpp_etm_qmu_qcfg_qsch_credit_value_t { + ZXIC_UINT32 qcfg_qsch_credit_value; +} DPP_ETM_QMU_QCFG_QSCH_CREDIT_VALUE_T; + +typedef struct dpp_etm_qmu_qcfg_qsch_crbal_init_value_t { + ZXIC_UINT32 qcfg_qsch_crbal_init_value; +} DPP_ETM_QMU_QCFG_QSCH_CRBAL_INIT_VALUE_T; + +typedef struct dpp_etm_qmu_qcfg_qsch_crbal_init_mask_t { + ZXIC_UINT32 qcfg_qsch_crbal_init_mask; +} DPP_ETM_QMU_QCFG_QSCH_CRBAL_INIT_MASK_T; + +typedef struct dpp_etm_qmu_cmdsch_rd_cmd_aful_th_t { + ZXIC_UINT32 cmdsch_rd_cmd_aful_th; +} DPP_ETM_QMU_CMDSCH_RD_CMD_AFUL_TH_T; + +typedef struct dpp_etm_qmu_cfg_port_fc_interval_t { + ZXIC_UINT32 cfg_port_fc_interval; +} DPP_ETM_QMU_CFG_PORT_FC_INTERVAL_T; + +typedef struct dpp_etm_qmu_qcfg_csch_aged_cfg_t { + ZXIC_UINT32 qcfg_csch_aged_cfg; +} DPP_ETM_QMU_QCFG_CSCH_AGED_CFG_T; + +typedef struct dpp_etm_qmu_qcfg_csch_aged_scan_time_t { + ZXIC_UINT32 qcfg_csch_aged_scan_time; +} DPP_ETM_QMU_QCFG_CSCH_AGED_SCAN_TIME_T; + +typedef struct dpp_etm_qmu_qcfg_qmu_qlist_state_query_t { + ZXIC_UINT32 pkt_age_req_fifo_afull; + ZXIC_UINT32 rd_release_fwft_afull; + ZXIC_UINT32 drop_imem_fwft_afull; + ZXIC_UINT32 pkt_age_req_fifo_empty; + ZXIC_UINT32 rd_release_fwft_empty; + ZXIC_UINT32 drop_imem_fwft_empty; + ZXIC_UINT32 mmu_qmu_sop_rd_rdy; + ZXIC_UINT32 big_fifo_empty; + ZXIC_UINT32 qmu_mmu_rd_release_rdy; + ZXIC_UINT32 xsw_qmu_crs_rdy; + ZXIC_UINT32 mmu_qmu_rdy; + ZXIC_UINT32 mmu_ql_wr_rdy; + ZXIC_UINT32 mmu_ql_rd_rdy; + ZXIC_UINT32 csw_ql_rdy; + ZXIC_UINT32 ql_init_done; + ZXIC_UINT32 free_addr_ready; + ZXIC_UINT32 bank_group_afull; + ZXIC_UINT32 pds_fwft_empty; + ZXIC_UINT32 enq_rpt_fwft_afull; +} DPP_ETM_QMU_QCFG_QMU_QLIST_STATE_QUERY_T; + +typedef struct dpp_etm_qmu_cfgmt_qsch_crbal_drop_en_t { + ZXIC_UINT32 cfgmt_qsch_all_crbal_drop_en; + ZXIC_UINT32 cfgmt_qsch_crbal_drop_en; +} DPP_ETM_QMU_CFGMT_QSCH_CRBAL_DROP_EN_T; + +typedef struct dpp_etm_qmu_cfgmt_wlist_qnum_fifo_aful_th_t { + ZXIC_UINT32 cfgmt_wlist_qnum_fifo_aful_th; +} DPP_ETM_QMU_CFGMT_WLIST_QNUM_FIFO_AFUL_TH_T; + +typedef struct dpp_etm_qmu_qcfg_csw_pkt_blk_mode_t { + ZXIC_UINT32 qcfg_csw_pkt_blk_mode; +} DPP_ETM_QMU_QCFG_CSW_PKT_BLK_MODE_T; + +typedef struct dpp_etm_qmu_qcfg_qlist_ram_init_cancel_t { + ZXIC_UINT32 qcfg_qlist_ram_init_cancel; +} DPP_ETM_QMU_QCFG_QLIST_RAM_INIT_CANCEL_T; + +typedef struct dpp_etm_qmu_qcfg_qsch_crbal_transfer_mode_t { + ZXIC_UINT32 qcfg_qsch_crbal_transfer_mode; + ZXIC_UINT32 qcfg_qsch_crbal_transfer_value; +} DPP_ETM_QMU_QCFG_QSCH_CRBAL_TRANSFER_MODE_T; + +typedef struct dpp_etm_qmu_qcfg_qlist_qclr_interval_t { + ZXIC_UINT32 qcfg_qlist_qclr_interval; +} DPP_ETM_QMU_QCFG_QLIST_QCLR_INTERVAL_T; + +typedef struct dpp_etm_qmu_qcfg_qsch_qclr_rate_t { + ZXIC_UINT32 qcfg_qsch_qclr_rate; +} DPP_ETM_QMU_QCFG_QSCH_QCLR_RATE_T; + +typedef struct dpp_etm_qmu_qcfg_qlist_ddr_random_t { + ZXIC_UINT32 qcfg_qlist_ddr_random; +} DPP_ETM_QMU_QCFG_QLIST_DDR_RANDOM_T; + +typedef struct dpp_etm_qmu_cfgmt_qlist_pds_fifo_afull_th_t { + ZXIC_UINT32 cfgmt_qlist_pds_fifo_afull_th; +} DPP_ETM_QMU_CFGMT_QLIST_PDS_FIFO_AFULL_TH_T; + +typedef struct dpp_etm_qmu_cfgmt_sop_cmd_fifo_afull_th_t { + ZXIC_UINT32 cfgmt_sop_cmd_fifo_afull_th; +} DPP_ETM_QMU_CFGMT_SOP_CMD_FIFO_AFULL_TH_T; + +typedef struct dpp_etm_qmu_cfgmt_non_sop_cmd_fifo_afull_th_t { + ZXIC_UINT32 cfgmt_non_sop_cmd_fifo_afull_th; +} DPP_ETM_QMU_CFGMT_NON_SOP_CMD_FIFO_AFULL_TH_T; + +typedef struct dpp_etm_qmu_cfgmt_mmu_data_fifo_afull_th_t { + ZXIC_UINT32 cfgmt_mmu_data_fifo_afull_th; +} DPP_ETM_QMU_CFGMT_MMU_DATA_FIFO_AFULL_TH_T; + +typedef struct dpp_etm_qmu_qcfg_qlist_bank_ept_th_t { + ZXIC_UINT32 qcfg_qlist_bank_ept_th; +} DPP_ETM_QMU_QCFG_QLIST_BANK_EPT_TH_T; + +typedef struct dpp_etm_qmu_random_bypass_en_t { + ZXIC_UINT32 random_bypass_en; +} DPP_ETM_QMU_RANDOM_BYPASS_EN_T; + +typedef struct dpp_etm_qmu_cfgmt_crs_spd_bypass_t { + ZXIC_UINT32 cfgmt_crs_spd_bypass; +} DPP_ETM_QMU_CFGMT_CRS_SPD_BYPASS_T; + +typedef struct dpp_etm_qmu_cfgmt_crs_interval_t { + ZXIC_UINT32 cfgmt_crs_interval; +} DPP_ETM_QMU_CFGMT_CRS_INTERVAL_T; + +typedef struct dpp_etm_qmu_cfg_qsch_auto_credit_control_en_t { + ZXIC_UINT32 cfg_qsch_auto_credit_control_en; +} DPP_ETM_QMU_CFG_QSCH_AUTO_CREDIT_CONTROL_EN_T; + +typedef struct dpp_etm_qmu_cfg_qsch_autocrfrstque_t { + ZXIC_UINT32 cfg_qsch_autocrfrstque; +} DPP_ETM_QMU_CFG_QSCH_AUTOCRFRSTQUE_T; + +typedef struct dpp_etm_qmu_cfg_qsch_autocrlastque_t { + ZXIC_UINT32 cfg_qsch_autocrlastque; +} DPP_ETM_QMU_CFG_QSCH_AUTOCRLASTQUE_T; + +typedef struct dpp_etm_qmu_cfg_qsch_autocreditrate_t { + ZXIC_UINT32 cfg_qsch_autocreditrate; +} DPP_ETM_QMU_CFG_QSCH_AUTOCREDITRATE_T; + +typedef struct dpp_etm_qmu_cfg_qsch_scanfrstque_t { + ZXIC_UINT32 cfg_qsch_scanfrstque; +} DPP_ETM_QMU_CFG_QSCH_SCANFRSTQUE_T; + +typedef struct dpp_etm_qmu_cfg_qsch_scanlastque_t { + ZXIC_UINT32 cfg_qsch_scanlastque; +} DPP_ETM_QMU_CFG_QSCH_SCANLASTQUE_T; + +typedef struct dpp_etm_qmu_cfg_qsch_scanrate_t { + ZXIC_UINT32 cfg_qsch_scanrate; +} DPP_ETM_QMU_CFG_QSCH_SCANRATE_T; + +typedef struct dpp_etm_qmu_cfg_qsch_scan_en_t { + ZXIC_UINT32 cfg_qsch_scan_en; +} DPP_ETM_QMU_CFG_QSCH_SCAN_EN_T; + +typedef struct dpp_etm_qmu_cfgmt_qsch_rd_credit_fifo_rate_t { + ZXIC_UINT32 cfgmt_qsch_rd_credit_fifo_rate; +} DPP_ETM_QMU_CFGMT_QSCH_RD_CREDIT_FIFO_RATE_T; + +typedef struct dpp_etm_qmu_qcfg_qlist_bdep_t { + ZXIC_UINT32 qcfg_qlist_bdep; +} DPP_ETM_QMU_QCFG_QLIST_BDEP_T; + +typedef struct dpp_etm_qmu_qcfg_qlist_bhead_t { + ZXIC_UINT32 bank_vld; + ZXIC_UINT32 qcfg_qlist_bhead; +} DPP_ETM_QMU_QCFG_QLIST_BHEAD_T; + +typedef struct dpp_etm_qmu_qcfg_qlist_btail_t { + ZXIC_UINT32 qcfg_qlist_btail; +} DPP_ETM_QMU_QCFG_QLIST_BTAIL_T; + +typedef struct dpp_etm_qmu_qcfg_qsch_shap_param_t { + ZXIC_UINT32 qcfg_qsch_shap_en; + ZXIC_UINT32 qcfg_qsch_shap_param1; + ZXIC_UINT32 qcfg_qsch_shap_param2; +} DPP_ETM_QMU_QCFG_QSCH_SHAP_PARAM_T; + +typedef struct dpp_etm_qmu_qcfg_qsch_shap_token_t { + ZXIC_UINT32 qcfg_qsch_shap_token; +} DPP_ETM_QMU_QCFG_QSCH_SHAP_TOKEN_T; + +typedef struct dpp_etm_qmu_qcfg_qsch_shap_offset_t { + ZXIC_UINT32 qcfg_qsch_shap_offset; +} DPP_ETM_QMU_QCFG_QSCH_SHAP_OFFSET_T; + +typedef struct dpp_etm_qmu_qcfg_qsch_crs_eir_th_t { + ZXIC_UINT32 qcfg_qsch_crs_eir_th; +} DPP_ETM_QMU_QCFG_QSCH_CRS_EIR_TH_T; + +typedef struct dpp_etm_qmu_qcfg_qsch_crs_th1_t { + ZXIC_UINT32 qcfg_qsch_crs_th1; +} DPP_ETM_QMU_QCFG_QSCH_CRS_TH1_T; + +typedef struct dpp_etm_qmu_qcfg_qsch_crs_th2_t { + ZXIC_UINT32 qcfg_qsch_crs_th2; +} DPP_ETM_QMU_QCFG_QSCH_CRS_TH2_T; + +typedef struct dpp_etm_qmu_qcfg_csch_congest_th_t { + ZXIC_UINT32 qcfg_csch_congest_th; +} DPP_ETM_QMU_QCFG_CSCH_CONGEST_TH_T; + +typedef struct dpp_etm_qmu_qcfg_csch_sp_fc_th_t { + ZXIC_UINT32 qcfg_csch_sp_fc_th; +} DPP_ETM_QMU_QCFG_CSCH_SP_FC_TH_T; + +typedef struct dpp_etm_qmu_qcfg_csw_shap_parameter_t { + ZXIC_UINT32 qcfg_csw_shap_en; + ZXIC_UINT32 qcfg_csw_shap_parameter; +} DPP_ETM_QMU_QCFG_CSW_SHAP_PARAMETER_T; + +typedef struct dpp_etm_qmu_cfgmt_rd_release_aful_th_t { + ZXIC_UINT32 cfgmt_rd_release_aful_th; +} DPP_ETM_QMU_CFGMT_RD_RELEASE_AFUL_TH_T; + +typedef struct dpp_etm_qmu_cfgmt_drop_imem_release_fifo_aful_th_t { + ZXIC_UINT32 cfgmt_drop_imem_release_fifo_aful_th; +} DPP_ETM_QMU_CFGMT_DROP_IMEM_RELEASE_FIFO_AFUL_TH_T; + +typedef struct dpp_etm_qmu_cfgmt_nnh_rd_buf_aful_th_t { + ZXIC_UINT32 cfgmt_nnh_rd_buf_aful_th; +} DPP_ETM_QMU_CFGMT_NNH_RD_BUF_AFUL_TH_T; + +typedef struct dpp_etm_qmu_cfg_pid_use_inall_t { + ZXIC_UINT32 cfgmt_nod_rd_buf_0_aful_th; +} DPP_ETM_QMU_CFG_PID_USE_INALL_T; + +typedef struct dpp_etm_qmu_cfg_pid_round_th_t { + ZXIC_UINT32 cfgmt_nod_rd_buf_1_aful_th; +} DPP_ETM_QMU_CFG_PID_ROUND_TH_T; + +typedef struct dpp_etm_qmu_cfgmt_credit_fifo_afull_th_t { + ZXIC_UINT32 cfgmt_credit_fifo_afull_th; +} DPP_ETM_QMU_CFGMT_CREDIT_FIFO_AFULL_TH_T; + +typedef struct dpp_etm_qmu_cfgmt_scan_fifo_afull_th_t { + ZXIC_UINT32 cfgmt_scan_fifo_afull_th; +} DPP_ETM_QMU_CFGMT_SCAN_FIFO_AFULL_TH_T; + +typedef struct dpp_etm_qmu_cfgmt_small_fifo_aful_th_t { + ZXIC_UINT32 cfgmt_small_fifo_aful_th; +} DPP_ETM_QMU_CFGMT_SMALL_FIFO_AFUL_TH_T; + +typedef struct dpp_etm_qmu_cfgmt_free_addr_fifo_aful_th_t { + ZXIC_UINT32 cfgmt_free_addr_fifo_aful_th; +} DPP_ETM_QMU_CFGMT_FREE_ADDR_FIFO_AFUL_TH_T; + +typedef struct dpp_etm_qmu_cfgmt_enq_rpt_fifo_aful_th_t { + ZXIC_UINT32 cfgmt_enq_rpt_fifo_aful_th; +} DPP_ETM_QMU_CFGMT_ENQ_RPT_FIFO_AFUL_TH_T; + +typedef struct dpp_etm_qmu_qcfg_csw_shap_token_depth_t { + ZXIC_UINT32 qcfg_csw_shap_token_depth; +} DPP_ETM_QMU_QCFG_CSW_SHAP_TOKEN_DEPTH_T; + +typedef struct dpp_etm_qmu_qcfg_csw_shap_offset_value_t { + ZXIC_UINT32 qcfg_csw_shap_offset_value; +} DPP_ETM_QMU_QCFG_CSW_SHAP_OFFSET_VALUE_T; + +typedef struct dpp_etm_qmu_qcfg_csw_fc_offset_value_t { + ZXIC_UINT32 qcfg_csw_fc_offset_value; +} DPP_ETM_QMU_QCFG_CSW_FC_OFFSET_VALUE_T; + +typedef struct dpp_etm_qmu_qmu_init_done_state_t { + ZXIC_UINT32 csch_qcfg_init_done; + ZXIC_UINT32 qsch_qcfg_init_done; + ZXIC_UINT32 qlist_qcfg_init_done; + ZXIC_UINT32 qcsr_ram_init_done; +} DPP_ETM_QMU_QMU_INIT_DONE_STATE_T; + +typedef struct dpp_etm_qmu_csw_qcfg_port_shap_rdy_0_t { + ZXIC_UINT32 csw_qcfg_port_shap_rdy_0; +} DPP_ETM_QMU_CSW_QCFG_PORT_SHAP_RDY_0_T; + +typedef struct dpp_etm_qmu_csw_qcfg_port_shap_rdy_1_t { + ZXIC_UINT32 csw_qcfg_port_shap_rdy_1; +} DPP_ETM_QMU_CSW_QCFG_PORT_SHAP_RDY_1_T; + +typedef struct dpp_etm_qmu_qlist_cfgmt_ram_init_done_t { + ZXIC_UINT32 qlist_qcfg_qds_ram_init_done; + ZXIC_UINT32 qlist_qcfg_chk_ram_init_done; + ZXIC_UINT32 qlist_qcfg_ept_ram_init_done; + ZXIC_UINT32 qlist_qcfg_cti_ram_init_done; + ZXIC_UINT32 qlist_qcfg_cto_ram_init_done; + ZXIC_UINT32 qlist_qcfg_bcnt_ram_init_done; + ZXIC_UINT32 qlist_qcfg_biu_ram_init_done; + ZXIC_UINT32 qlist_qcfg_baram_init_done; +} DPP_ETM_QMU_QLIST_CFGMT_RAM_INIT_DONE_T; + +typedef struct dpp_etm_qmu_qlist_cfgmt_ram_ecc_err_t { + ZXIC_UINT32 qds_ram_parity_err; + ZXIC_UINT32 qcsr_qnum_fifo_parity_err; + ZXIC_UINT32 sa_id_ram_parity_err; + ZXIC_UINT32 enq_rpt_fifo_parity_err; + ZXIC_UINT32 bcnts_parity_err; + ZXIC_UINT32 baram_parity_err_a; + ZXIC_UINT32 baram_parity_err_b; + ZXIC_UINT32 bcntm_ram_parity_err; + ZXIC_UINT32 biu_ram_single_ecc_err; + ZXIC_UINT32 chk_ram_single_ecc_err; + ZXIC_UINT32 cmd_sch_cmd_ram_single_ecc_err; + ZXIC_UINT32 cmd_sch_list_ram_single_ecc_err; + ZXIC_UINT32 cmd_sch_hp_ram_single_ecc_err; + ZXIC_UINT32 cmd_sch_tp_ram_single_ecc_err; + ZXIC_UINT32 cmd_sch_enq_active_ram_single_ecc_err; + ZXIC_UINT32 cmd_sch_deq_active_ram_single_ecc_err; + ZXIC_UINT32 cmd_sch_empty_ram_single_ecc_err; + ZXIC_UINT32 cmd_sch_eop_ram_single_ecc_err; + ZXIC_UINT32 cmd_sch_blkcnt_ram_single_ecc_err; + ZXIC_UINT32 biu_ram_double_ecc_err; + ZXIC_UINT32 chk_ram_double_ecc_err; + ZXIC_UINT32 cmd_sch_cmd_ram_double_ecc_err; + ZXIC_UINT32 cmd_sch_list_ram_double_ecc_err; + ZXIC_UINT32 cmd_sch_hp_ram_double_ecc_err; + ZXIC_UINT32 cmd_sch_tp_ram_double_ecc_err; + ZXIC_UINT32 cmd_sch_enq_active_ram_double_ecc_err; + ZXIC_UINT32 cmd_sch_deq_active_ram_double_ecc_err; + ZXIC_UINT32 cmd_sch_empty_ram_double_ecc_err; + ZXIC_UINT32 cmd_sch_eop_ram_double_ecc_err; + ZXIC_UINT32 cmd_sch_blkcnt_ram_double_ecc_err; +} DPP_ETM_QMU_QLIST_CFGMT_RAM_ECC_ERR_T; + +typedef struct dpp_etm_qmu_qlist_cfgmt_ram_slot_err_t { + ZXIC_UINT32 qds_ram_enq_rd_slot_err; + ZXIC_UINT32 qds_ram_deq_rd_slot_err; + ZXIC_UINT32 qds_ram_enq_wr_slot_err; + ZXIC_UINT32 qds_ram_deq_wr_slot_err; + ZXIC_UINT32 chk_ram_enq_rd_slot_err; + ZXIC_UINT32 chk_ram_deq_rd_slot_err; + ZXIC_UINT32 chk_ram_enq_wr_slot_err; + ZXIC_UINT32 chk_ram_deq_wr_slot_err; + ZXIC_UINT32 ept_ram_enq_rd_slot_err; + ZXIC_UINT32 ept_ram_deq_rd_slot_err; + ZXIC_UINT32 ept_ram_enq_wr_slot_err; + ZXIC_UINT32 ept_ram_deq_wr_slot_err; + ZXIC_UINT32 cti_ram_enq_rd_slot_err; + ZXIC_UINT32 cti_ram_deq_rd_slot_err; + ZXIC_UINT32 cti_ram_enq_wr_slot_err; + ZXIC_UINT32 cti_ram_deq_wr_slot_err; + ZXIC_UINT32 cto_ram_enq_rd_slot_err; + ZXIC_UINT32 cto_ram_deq_rd_slot_err; + ZXIC_UINT32 cto_ram_enq_wr_slot_err; + ZXIC_UINT32 cto_ram_deq_wr_slot_err; +} DPP_ETM_QMU_QLIST_CFGMT_RAM_SLOT_ERR_T; + +typedef struct dpp_etm_qmu_qsch_cfgmt_ram_ecc_t { + ZXIC_UINT32 crbal_rama_parity_error; + ZXIC_UINT32 crbal_ramb_parity_error; + ZXIC_UINT32 crs_ram_parity_error; + ZXIC_UINT32 wlist_flag_ram_single_ecc_err; + ZXIC_UINT32 wlist_next_single_ecc_err; + ZXIC_UINT32 wlist_wactive_ram_single_ecc_err; + ZXIC_UINT32 wlist_ractive_ram_single_ecc_err; + ZXIC_UINT32 wlist_tp1_ram_single_ecc_err; + ZXIC_UINT32 wlist_tp2_ram_single_ecc_err; + ZXIC_UINT32 wlist_empty1_ram_single_ecc_err_a; + ZXIC_UINT32 wlist_empty1_ram_single_ecc_err_b; + ZXIC_UINT32 wlist_empty2_ram_single_ecc_err_a; + ZXIC_UINT32 wlist_empty2_ram_single_ecc_err_b; + ZXIC_UINT32 wlist_hp_ram_single_ecc_err_a; + ZXIC_UINT32 wlist_hp_ram_single_ecc_err_b; + ZXIC_UINT32 wlist_flag_ram_double_ecc_err; + ZXIC_UINT32 wlist_next_double_ecc_err; + ZXIC_UINT32 wlist_wactive_ram_double_ecc_err; + ZXIC_UINT32 wlist_ractive_ram_double_ecc_err; + ZXIC_UINT32 wlist_tp1_ram_double_ecc_err; + ZXIC_UINT32 wlist_tp2_ram_double_ecc_err; + ZXIC_UINT32 wlist_empty1_ram_double_ecc_err_a; + ZXIC_UINT32 wlist_empty1_ram_double_ecc_err_b; + ZXIC_UINT32 wlist_empty2_ram_double_ecc_err_a; + ZXIC_UINT32 wlist_empty2_ram_double_ecc_err_b; + ZXIC_UINT32 wlist_hp_ram_double_ecc_err_a; + ZXIC_UINT32 wlist_hp_ram_double_ecc_err_b; +} DPP_ETM_QMU_QSCH_CFGMT_RAM_ECC_T; + +typedef struct dpp_etm_qmu_qlist_cfgmt_fifo_state_t { + ZXIC_UINT32 pkt_age_req_fifo_overflow; + ZXIC_UINT32 pkt_age_req_fifo_underflow; + ZXIC_UINT32 qcsr_big_fifo_ovfl; + ZXIC_UINT32 qcsr_small_fifo_overflow; + ZXIC_UINT32 enq_rpt_fifo_overflow; + ZXIC_UINT32 enq_rpt_fifo_underflow; + ZXIC_UINT32 pds_fwft_overflow; + ZXIC_UINT32 pds_fwft_underflow; + ZXIC_UINT32 free_addr_fifo_overflow; + ZXIC_UINT32 free_addr_fifo_underflow; + ZXIC_UINT32 rd_release_fwft_overflow; + ZXIC_UINT32 rd_release_fwft_underflow; + ZXIC_UINT32 pid_free_list_overflow; + ZXIC_UINT32 pid_free_list_underflow; + ZXIC_UINT32 pid_prp_list_overflow; + ZXIC_UINT32 pid_prp_list_underflow; + ZXIC_UINT32 pid_rdy_list_overflow; + ZXIC_UINT32 pid_rdy_list_underflow; + ZXIC_UINT32 drop_imem_release_fwft_overflow; + ZXIC_UINT32 drop_imem_release_fwft_underflow; + ZXIC_UINT32 nnh_rd_buf_fifo_overflow; + ZXIC_UINT32 nnh_rd_buf_fifo_underflow; + ZXIC_UINT32 nod_rd_buf_0_fifo_overflow; + ZXIC_UINT32 nod_rd_buf_0_fifo_underflow; + ZXIC_UINT32 nod_rd_buf_1_fifo_overflow; + ZXIC_UINT32 nod_rd_buf_1_fifo_underflow; +} DPP_ETM_QMU_QLIST_CFGMT_FIFO_STATE_T; + +typedef struct dpp_etm_qmu_qlist_qcfg_clr_done_t { + ZXIC_UINT32 qlist_qcfg_clr_done; +} DPP_ETM_QMU_QLIST_QCFG_CLR_DONE_T; + +typedef struct dpp_etm_qmu_qmu_int_mask1_t { + ZXIC_UINT32 qmu_int_mask1; +} DPP_ETM_QMU_QMU_INT_MASK1_T; + +typedef struct dpp_etm_qmu_qmu_int_mask2_t { + ZXIC_UINT32 qmu_int_mask2; +} DPP_ETM_QMU_QMU_INT_MASK2_T; + +typedef struct dpp_etm_qmu_qmu_int_mask3_t { + ZXIC_UINT32 qmu_int_mask3; +} DPP_ETM_QMU_QMU_INT_MASK3_T; + +typedef struct dpp_etm_qmu_qmu_int_mask4_t { + ZXIC_UINT32 qmu_int_mask4; +} DPP_ETM_QMU_QMU_INT_MASK4_T; + +typedef struct dpp_etm_qmu_qmu_int_mask5_t { + ZXIC_UINT32 qmu_int_mask5; +} DPP_ETM_QMU_QMU_INT_MASK5_T; + +typedef struct dpp_etm_qmu_qmu_int_mask6_t { + ZXIC_UINT32 qmu_int_mask6; +} DPP_ETM_QMU_QMU_INT_MASK6_T; + +typedef struct dpp_etm_qmu_cmd_sch_cfgmt_fifo_state_t { + ZXIC_UINT32 nsop_fifo_parity_err; + ZXIC_UINT32 cmdsch_rd_cmd_fifo_parity_err; + ZXIC_UINT32 sop_fifo_afull; + ZXIC_UINT32 sop_fifo_empty; + ZXIC_UINT32 sop_fifo_overflow; + ZXIC_UINT32 sop_fifo_underflow; + ZXIC_UINT32 mmu_data_fifo_afull; + ZXIC_UINT32 mmu_data_fifo_empty; + ZXIC_UINT32 mmudat_fifo_overflow; + ZXIC_UINT32 mmudat_fifo_underflow; + ZXIC_UINT32 non_sop_fifo_afull; + ZXIC_UINT32 non_sop_fifo_empty; + ZXIC_UINT32 nsop_fifo_overflow; + ZXIC_UINT32 nsop_fifo_underflow; + ZXIC_UINT32 cmdsch_rd_cmd_fifo_afull; + ZXIC_UINT32 cmdsch_rd_cmd_fifo_empty; + ZXIC_UINT32 cmdsch_rd_cmd_fifo_overflow; + ZXIC_UINT32 cmdsch_rd_cmd_fifo_underflow; + ZXIC_UINT32 wlist_qnum_fifo_overflow; + ZXIC_UINT32 wlist_qnum_fifo_underflow; + ZXIC_UINT32 qsch_scan_fifo_overflow; + ZXIC_UINT32 qsch_scan_fifo_underflow; + ZXIC_UINT32 qsch_credit_fifo_overflow; + ZXIC_UINT32 qsch_credit_fifo_underflow; + ZXIC_UINT32 qsch_credit_fifo2_overflow; + ZXIC_UINT32 qsch_credit_fifo2_underflow; +} DPP_ETM_QMU_CMD_SCH_CFGMT_FIFO_STATE_T; + +typedef struct dpp_etm_qmu_qlist_r_bcnt_t { + ZXIC_UINT32 qlist_r_bcnt; +} DPP_ETM_QMU_QLIST_R_BCNT_T; + +typedef struct dpp_etm_qmu_qsch_rw_crbal_t { + ZXIC_UINT32 qsch_rw_crbal; +} DPP_ETM_QMU_QSCH_RW_CRBAL_T; + +typedef struct dpp_etm_qmu_qsch_rw_crs_t { + ZXIC_UINT32 qsch_rw_crs; +} DPP_ETM_QMU_QSCH_RW_CRS_T; + +typedef struct dpp_etm_qmu_qsch_r_wlist_empty_t { + ZXIC_UINT32 qsch_r_wlist_empty; +} DPP_ETM_QMU_QSCH_R_WLIST_EMPTY_T; + +typedef struct dpp_etm_qmu_qcfg_qlist_baram_rd_t { + ZXIC_UINT32 qcfg_qlist_baram_rd; +} DPP_ETM_QMU_QCFG_QLIST_BARAM_RD_T; + +typedef struct dpp_etm_qmu_qcfg_qsch_crbal_fb_rw_t { + ZXIC_UINT32 qcfg_qlist_crbal_fb_rw; +} DPP_ETM_QMU_QCFG_QSCH_CRBAL_FB_RW_T; + +typedef struct dpp_etm_qmu_qcfg_qlist_grp0_bank_t { + ZXIC_UINT32 qcfg_qlist_grp0_bank_wr; +} DPP_ETM_QMU_QCFG_QLIST_GRP0_BANK_T; + +typedef struct dpp_etm_qmu_qcfg_qlist_grp1_bank_t { + ZXIC_UINT32 qcfg_qlist_grp1_bank_wr; +} DPP_ETM_QMU_QCFG_QLIST_GRP1_BANK_T; + +typedef struct dpp_etm_qmu_qcfg_qlist_grp2_bank_t { + ZXIC_UINT32 qcfg_qlist_grp2_bank_wr; +} DPP_ETM_QMU_QCFG_QLIST_GRP2_BANK_T; + +typedef struct dpp_etm_qmu_qcfg_qlist_grp3_bank_t { + ZXIC_UINT32 qcfg_qlist_grp3_bank_wr; +} DPP_ETM_QMU_QCFG_QLIST_GRP3_BANK_T; + +typedef struct dpp_etm_qmu_qcfg_qlist_grp4_bank_t { + ZXIC_UINT32 qcfg_qlist_grp4_bank_wr; +} DPP_ETM_QMU_QCFG_QLIST_GRP4_BANK_T; + +typedef struct dpp_etm_qmu_qcfg_qlist_grp5_bank_t { + ZXIC_UINT32 qcfg_qlist_grp5_bank_wr; +} DPP_ETM_QMU_QCFG_QLIST_GRP5_BANK_T; + +typedef struct dpp_etm_qmu_qcfg_qlist_grp6_bank_t { + ZXIC_UINT32 qcfg_qlist_grp6_bank_wr; +} DPP_ETM_QMU_QCFG_QLIST_GRP6_BANK_T; + +typedef struct dpp_etm_qmu_qcfg_qlist_grp7_bank_t { + ZXIC_UINT32 qcfg_qlist_grp7_bank_wr; +} DPP_ETM_QMU_QCFG_QLIST_GRP7_BANK_T; + +typedef struct dpp_etm_qmu_qcfg_qlist_grp_t { + ZXIC_UINT32 qcfg_qlist_grp_wr; +} DPP_ETM_QMU_QCFG_QLIST_GRP_T; + +typedef struct dpp_etm_qmu_cfgmt_active_to_bank_cfg_t { + ZXIC_UINT32 cfgmt_active_to_bank_cfg; +} DPP_ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFG_T; + +typedef struct dpp_etm_qmu_cfgmt_ddr_in_mmu_cfg_t { + ZXIC_UINT32 cfgmt_ddr_in_mmu_cfg; +} DPP_ETM_QMU_CFGMT_DDR_IN_MMU_CFG_T; + +typedef struct dpp_etm_qmu_cfgmt_ddr_in_qmu_cfg_t { + ZXIC_UINT32 cfgmt_ddr_in_qmu_cfg; +} DPP_ETM_QMU_CFGMT_DDR_IN_QMU_CFG_T; + +typedef struct dpp_etm_qmu_cfgmt_bank_to_mmu_cfg_t { + ZXIC_UINT32 cfgmt_bank_in_mmu_cfg; +} DPP_ETM_QMU_CFGMT_BANK_TO_MMU_CFG_T; + +typedef struct dpp_etm_qmu_cfgmt_bank_to_qmu_cfg_t { + ZXIC_UINT32 cfgmt_bank_in_qmu_cfg; +} DPP_ETM_QMU_CFGMT_BANK_TO_QMU_CFG_T; + +typedef struct dpp_etm_qmu_cfgmt_grp_ram_n_clr_thd_t { + ZXIC_UINT32 cfgmt_grp_ram_n_clr_thd; +} DPP_ETM_QMU_CFGMT_GRP_RAM_N_CLR_THD_T; + +typedef struct dpp_etm_qmu_cfgmt_age_pkt_num_t { + ZXIC_UINT32 cfgmt_age_pkt_num; +} DPP_ETM_QMU_CFGMT_AGE_PKT_NUM_T; + +typedef struct dpp_etm_qmu_cfgmt_age_multi_interval_t { + ZXIC_UINT32 cfgmt_age_multi_interval; +} DPP_ETM_QMU_CFGMT_AGE_MULTI_INTERVAL_T; + +typedef struct dpp_etm_qmu_cfgmt_qmu_pkt_age_en_t { + ZXIC_UINT32 cfgmt_qmu_pkt_age_en; +} DPP_ETM_QMU_CFGMT_QMU_PKT_AGE_EN_T; + +typedef struct dpp_etm_qmu_cfgmt_qmu_pkt_age_interval_t { + ZXIC_UINT32 cfgmt_qmu_pkt_age_interval; +} DPP_ETM_QMU_CFGMT_QMU_PKT_AGE_INTERVAL_T; + +typedef struct dpp_etm_qmu_cfgmt_qmu_pkt_age_start_end_t { + ZXIC_UINT32 cfgmt_qmu_pkt_age_end; + ZXIC_UINT32 cfgmt_qmu_pkt_age_start; +} DPP_ETM_QMU_CFGMT_QMU_PKT_AGE_START_END_T; + +typedef struct dpp_etm_qmu_cfgmt_pkt_age_req_aful_th_t { + ZXIC_UINT32 cfgmt_pkt_age_req_aful_th; +} DPP_ETM_QMU_CFGMT_PKT_AGE_REQ_AFUL_TH_T; + +typedef struct dpp_etm_qmu_cfgmt_pkt_age_step_interval_t { + ZXIC_UINT32 cfgmt_pkt_age_step_interval; +} DPP_ETM_QMU_CFGMT_PKT_AGE_STEP_INTERVAL_T; + +typedef struct dpp_etm_qmu_cfgmt_qmu_imem_age_mode_t { + ZXIC_UINT32 cfgmt_qmu_imem_age_en; + ZXIC_UINT32 cfgmt_qmu_imem_age_qlen_en; + ZXIC_UINT32 cfgmt_qmu_imem_age_time_en; +} DPP_ETM_QMU_CFGMT_QMU_IMEM_AGE_MODE_T; + +typedef struct dpp_etm_qmu_cfgmt_qmu_imem_qlen_age_interval_t { + ZXIC_UINT32 cfgmt_qmu_imem_qlen_age_interval; +} DPP_ETM_QMU_CFGMT_QMU_IMEM_QLEN_AGE_INTERVAL_T; + +typedef struct dpp_etm_qmu_cfgmt_qmu_imem_time_age_interval_t { + ZXIC_UINT32 cfgmt_qmu_imem_time_age_interval; +} DPP_ETM_QMU_CFGMT_QMU_IMEM_TIME_AGE_INTERVAL_T; + +typedef struct dpp_etm_qmu_cfgmt_qmu_imem_qlen_age_thd_t { + ZXIC_UINT32 cfgmt_qmu_imem_qlen_age_thd; +} DPP_ETM_QMU_CFGMT_QMU_IMEM_QLEN_AGE_THD_T; + +typedef struct dpp_etm_qmu_cfgmt_imem_age_step_interval_t { + ZXIC_UINT32 cfgmt_imem_age_step_interval; +} DPP_ETM_QMU_CFGMT_IMEM_AGE_STEP_INTERVAL_T; + +typedef struct dpp_etm_qmu_cfgmt_qmu_ecc_bypass_read_t { + ZXIC_UINT32 cfgmt_qmu_ecc_bypass_read; +} DPP_ETM_QMU_CFGMT_QMU_ECC_BYPASS_READ_T; + +typedef struct dpp_etm_qmu_cfgmt_qmu_resp_stat_fc_en_t { + ZXIC_UINT32 cfgmt_qmu_resp_stat_fc_en; +} DPP_ETM_QMU_CFGMT_QMU_RESP_STAT_FC_EN_T; + +typedef struct dpp_etm_qmu_cfgmt_qmu_bank_xoff_pds_mode_t { + ZXIC_UINT32 cfgmt_qmu_bank_xoff_pds_mode; +} DPP_ETM_QMU_CFGMT_QMU_BANK_XOFF_PDS_MODE_T; + +typedef struct dpp_etm_qmu_cfgmt_qmu_stat_offset_t { + ZXIC_UINT32 cfgmt_qmu_stat_offset; +} DPP_ETM_QMU_CFGMT_QMU_STAT_OFFSET_T; + +typedef struct dpp_etm_qmu_fc_cnt_mode_t { + ZXIC_UINT32 fc_cnt_mode; +} DPP_ETM_QMU_FC_CNT_MODE_T; + +typedef struct dpp_etm_qmu_mmu_qmu_wr_fc_cnt_t { + ZXIC_UINT32 mmu_qmu_wr_fc_cnt; +} DPP_ETM_QMU_MMU_QMU_WR_FC_CNT_T; + +typedef struct dpp_etm_qmu_mmu_qmu_rd_fc_cnt_t { + ZXIC_UINT32 mmu_qmu_rd_fc_cnt; +} DPP_ETM_QMU_MMU_QMU_RD_FC_CNT_T; + +typedef struct dpp_etm_qmu_qmu_cgavd_fc_cnt_t { + ZXIC_UINT32 qmu_cgavd_fc_cnt; +} DPP_ETM_QMU_QMU_CGAVD_FC_CNT_T; + +typedef struct dpp_etm_qmu_cgavd_qmu_pkt_cnt_t { + ZXIC_UINT32 cgavd_qmu_pkt_cnt; +} DPP_ETM_QMU_CGAVD_QMU_PKT_CNT_T; + +typedef struct dpp_etm_qmu_cgavd_qmu_pktlen_all_t { + ZXIC_UINT32 cgavd_qmu_pktlen_all; +} DPP_ETM_QMU_CGAVD_QMU_PKTLEN_ALL_T; + +typedef struct dpp_etm_qmu_observe_portfc_spec_t { + ZXIC_UINT32 observe_portfc_spec; +} DPP_ETM_QMU_OBSERVE_PORTFC_SPEC_T; + +typedef struct dpp_etm_qmu_spec_lif_portfc_count_t { + ZXIC_UINT32 spec_lif_portfc_count; +} DPP_ETM_QMU_SPEC_LIF_PORTFC_COUNT_T; + +typedef struct dpp_etm_qmu_cfgmt_qmu_pfc_en_t { + ZXIC_UINT32 cfgmt_qmu_pfc_en; +} DPP_ETM_QMU_CFGMT_QMU_PFC_EN_T; + +typedef struct dpp_etm_qmu_cfgmt_qmu_pfc_mask_1_t { + ZXIC_UINT32 cfgmt_qmu_pfc_mask_1; +} DPP_ETM_QMU_CFGMT_QMU_PFC_MASK_1_T; + +typedef struct dpp_etm_qmu_cfgmt_qmu_pfc_mask_2_t { + ZXIC_UINT32 cfgmt_qmu_pfc_mask_2; +} DPP_ETM_QMU_CFGMT_QMU_PFC_MASK_2_T; + +typedef struct dpp_etm_cfgmt_chip_version_reg_t { + ZXIC_UINT32 chip_version_reg; + ZXIC_UINT32 chip_sub_reg; + ZXIC_UINT32 chip_type_reg; +} DPP_ETM_CFGMT_CHIP_VERSION_REG_T; + +typedef struct dpp_etm_cfgmt_chip_date_reg_t { + ZXIC_UINT32 chip_date_reg; +} DPP_ETM_CFGMT_CHIP_DATE_REG_T; + +typedef struct dpp_etm_cfgmt_cfgmt_crc_en_t { + ZXIC_UINT32 cfgmt_crc_en; +} DPP_ETM_CFGMT_CFGMT_CRC_EN_T; + +typedef struct dpp_etm_cfgmt_cfg_port_transfer_en_t { + ZXIC_UINT32 cfg_port_transfer_en; +} DPP_ETM_CFGMT_CFG_PORT_TRANSFER_EN_T; + +typedef struct dpp_etm_cfgmt_tm_sa_work_mode_t { + ZXIC_UINT32 tm_sa_work_mode; +} DPP_ETM_CFGMT_TM_SA_WORK_MODE_T; + +typedef struct dpp_etm_cfgmt_local_sa_id_t { + ZXIC_UINT32 local_sa_id; +} DPP_ETM_CFGMT_LOCAL_SA_ID_T; + +typedef struct dpp_etm_olif_olif_rdy_t { + ZXIC_UINT32 cfgmt_block_mode; + ZXIC_UINT32 cfgmt_count_overflow_mode; + ZXIC_UINT32 cfgmt_count_rd_mode; + ZXIC_UINT32 olif_rdy; +} DPP_ETM_OLIF_OLIF_RDY_T; + +typedef struct dpp_etm_olif_emem_prog_full_t { + ZXIC_UINT32 emem_prog_full_assert; + ZXIC_UINT32 emem_prog_full_negate; +} DPP_ETM_OLIF_EMEM_PROG_FULL_T; + +typedef struct dpp_etm_olif_port_order_fifo_full_t { + ZXIC_UINT32 port_order_fifo_full_assert; + ZXIC_UINT32 port_order_fifo_full_negate; +} DPP_ETM_OLIF_PORT_ORDER_FIFO_FULL_T; + +typedef struct dpp_etm_olif_olif_release_last_t { + ZXIC_UINT32 olif_release_last_addr; + ZXIC_UINT32 olif_release_last_bank; +} DPP_ETM_OLIF_OLIF_RELEASE_LAST_T; + +typedef struct dpp_etm_olif_olif_fifo_empty_state_t { + ZXIC_UINT32 qmu_para_fifo_empty; + ZXIC_UINT32 emem_empty; + ZXIC_UINT32 imem_empty; +} DPP_ETM_OLIF_OLIF_FIFO_EMPTY_STATE_T; + +typedef struct dpp_etm_olif_qmu_olif_release_fc_cnt_t { + ZXIC_UINT32 qmu_olif_release_fc_cnt; +} DPP_ETM_OLIF_QMU_OLIF_RELEASE_FC_CNT_T; + +typedef struct dpp_etm_olif_olif_qmu_link_fc_cnt_t { + ZXIC_UINT32 olif_qmu_link_fc_cnt; +} DPP_ETM_OLIF_OLIF_QMU_LINK_FC_CNT_T; + +typedef struct dpp_etm_olif_lif0_link_fc_cnt_t { + ZXIC_UINT32 lif0_link_fc_cnt; +} DPP_ETM_OLIF_LIF0_LINK_FC_CNT_T; + +typedef struct dpp_etm_olif_olif_tmmu_fc_cnt_t { + ZXIC_UINT32 olif_tmmu_fc_cnt; +} DPP_ETM_OLIF_OLIF_TMMU_FC_CNT_T; + +typedef struct dpp_etm_olif_olif_mmu_fc_cnt_t { + ZXIC_UINT32 olif_mmu_fc_cnt; +} DPP_ETM_OLIF_OLIF_MMU_FC_CNT_T; + +typedef struct dpp_etm_olif_olif_qmu_port_rdy_h_t { + ZXIC_UINT32 olif_qmu_port_rdy_h; +} DPP_ETM_OLIF_OLIF_QMU_PORT_RDY_H_T; + +typedef struct dpp_etm_olif_olif_qmu_port_rdy_l_t { + ZXIC_UINT32 olif_qmu_port_rdy_l; +} DPP_ETM_OLIF_OLIF_QMU_PORT_RDY_L_T; + +typedef struct dpp_etm_olif_lif0_port_rdy_h_t { + ZXIC_UINT32 lif0_port_rdy_h; +} DPP_ETM_OLIF_LIF0_PORT_RDY_H_T; + +typedef struct dpp_etm_olif_lif0_port_rdy_l_t { + ZXIC_UINT32 lif0_port_rdy_l; +} DPP_ETM_OLIF_LIF0_PORT_RDY_L_T; + +typedef struct dpp_etm_olif_qmu_olif_rd_sop_cnt_t { + ZXIC_UINT32 qmu_olif_rd_sop_cnt; +} DPP_ETM_OLIF_QMU_OLIF_RD_SOP_CNT_T; + +typedef struct dpp_etm_olif_qmu_olif_rd_eop_cnt_t { + ZXIC_UINT32 qmu_olif_rd_eop_cnt; +} DPP_ETM_OLIF_QMU_OLIF_RD_EOP_CNT_T; + +typedef struct dpp_etm_olif_qmu_olif_rd_vld_cnt_t { + ZXIC_UINT32 qmu_olif_rd_vld_cnt; +} DPP_ETM_OLIF_QMU_OLIF_RD_VLD_CNT_T; + +typedef struct dpp_etm_olif_qmu_olif_rd_blk_cnt_t { + ZXIC_UINT32 qmu_olif_rd_blk_cnt; +} DPP_ETM_OLIF_QMU_OLIF_RD_BLK_CNT_T; + +typedef struct dpp_etm_olif_mmu_tm_data_sop_cnt_t { + ZXIC_UINT32 mmu_tm_data_sop_cnt; +} DPP_ETM_OLIF_MMU_TM_DATA_SOP_CNT_T; + +typedef struct dpp_etm_olif_mmu_tm_data_eop_cnt_t { + ZXIC_UINT32 mmu_tm_data_eop_cnt; +} DPP_ETM_OLIF_MMU_TM_DATA_EOP_CNT_T; + +typedef struct dpp_etm_olif_mmu_tm_data_vld_cnt_t { + ZXIC_UINT32 mmu_tm_data_vld_cnt; +} DPP_ETM_OLIF_MMU_TM_DATA_VLD_CNT_T; + +typedef struct dpp_etm_olif_odma_tm_data_sop_cnt_t { + ZXIC_UINT32 odma_tm_data_sop_cnt; +} DPP_ETM_OLIF_ODMA_TM_DATA_SOP_CNT_T; + +typedef struct dpp_etm_olif_odma_tm_data_eop_cnt_t { + ZXIC_UINT32 odma_tm_data_eop_cnt; +} DPP_ETM_OLIF_ODMA_TM_DATA_EOP_CNT_T; + +typedef struct dpp_etm_olif_odma_tm_deq_vld_cnt_t { + ZXIC_UINT32 odma_tm_deq_vld_cnt; +} DPP_ETM_OLIF_ODMA_TM_DEQ_VLD_CNT_T; + +typedef struct dpp_etm_olif_olif_qmu_release_vld_cnt_t { + ZXIC_UINT32 olif_qmu_release_vld_cnt; +} DPP_ETM_OLIF_OLIF_QMU_RELEASE_VLD_CNT_T; + +typedef struct dpp_etm_olif_emem_dat_vld_cnt_t { + ZXIC_UINT32 emem_dat_vld_cnt; +} DPP_ETM_OLIF_EMEM_DAT_VLD_CNT_T; + +typedef struct dpp_etm_olif_imem_dat_vld_cnt_t { + ZXIC_UINT32 imem_dat_vld_cnt; +} DPP_ETM_OLIF_IMEM_DAT_VLD_CNT_T; + +typedef struct dpp_etm_olif_emem_dat_rd_cnt_t { + ZXIC_UINT32 emem_dat_rd_cnt; +} DPP_ETM_OLIF_EMEM_DAT_RD_CNT_T; + +typedef struct dpp_etm_olif_imem_dat_rd_cnt_t { + ZXIC_UINT32 imem_dat_rd_cnt; +} DPP_ETM_OLIF_IMEM_DAT_RD_CNT_T; + +typedef struct dpp_etm_olif_qmu_olif_rd_sop_emem_cnt_t { + ZXIC_UINT32 qmu_olif_rd_sop_emem_cnt; +} DPP_ETM_OLIF_QMU_OLIF_RD_SOP_EMEM_CNT_T; + +typedef struct dpp_etm_olif_qmu_olif_rd_vld_emem_cnt_t { + ZXIC_UINT32 qmu_olif_rd_vld_emem_cnt; +} DPP_ETM_OLIF_QMU_OLIF_RD_VLD_EMEM_CNT_T; + +typedef struct dpp_etm_olif_cpu_last_wr_addr_t { + ZXIC_UINT32 cpu_last_wr_addr; +} DPP_ETM_OLIF_CPU_LAST_WR_ADDR_T; + +typedef struct dpp_etm_olif_cpu_last_wr_data_t { + ZXIC_UINT32 cpu_last_wr_data; +} DPP_ETM_OLIF_CPU_LAST_WR_DATA_T; + +typedef struct dpp_etm_olif_cpu_last_rd_addr_t { + ZXIC_UINT32 cpu_last_rd_addr; +} DPP_ETM_OLIF_CPU_LAST_RD_ADDR_T; + +typedef struct dpp_etm_olif_qmu_olif_last_port_t { + ZXIC_UINT32 qmu_olif_last_port; +} DPP_ETM_OLIF_QMU_OLIF_LAST_PORT_T; + +typedef struct dpp_etm_olif_qmu_olif_last_addr_t { + ZXIC_UINT32 qmu_olif_last_addr; +} DPP_ETM_OLIF_QMU_OLIF_LAST_ADDR_T; + +typedef struct dpp_etm_olif_qmu_olif_last_bank_t { + ZXIC_UINT32 qmu_olif_last_bank; +} DPP_ETM_OLIF_QMU_OLIF_LAST_BANK_T; + +typedef struct dpp_etm_olif_tm_lif_byte_stat_t { + ZXIC_UINT32 tm_lif_byte_stat; +} DPP_ETM_OLIF_TM_LIF_BYTE_STAT_T; + +typedef struct dpp_etm_olif_tm_lif_err_stat_t { + ZXIC_UINT32 tm_lif_err_stat; +} DPP_ETM_OLIF_TM_LIF_ERR_STAT_T; + +typedef struct dpp_etm_cgavd_port_share_cnt_t { + ZXIC_UINT32 port_share_cnt; +} DPP_ETM_CGAVD_PORT_SHARE_CNT_T; + +typedef struct dpp_etm_cgavd_total_imem_cnt_t { + ZXIC_UINT32 total_imem_cnt; +} DPP_ETM_CGAVD_TOTAL_IMEM_CNT_T; + +typedef struct dpp_etm_cgavd_pp_q_len_t { + ZXIC_UINT32 pp_q_len; +} DPP_ETM_CGAVD_PP_Q_LEN_T; + +typedef struct dpp_etm_cgavd_sys_q_len_t { + ZXIC_UINT32 sys_q_len; +} DPP_ETM_CGAVD_SYS_Q_LEN_T; + +typedef struct dpp_etm_cgavd_cgavd_cfg_error_warning_t { + ZXIC_UINT32 error_correction_11; + ZXIC_UINT32 error_correction_10; + ZXIC_UINT32 error_correction_9; + ZXIC_UINT32 error_correction_8; + ZXIC_UINT32 error_correction_7; + ZXIC_UINT32 error_correction_6; + ZXIC_UINT32 error_correction5; + ZXIC_UINT32 error_correction_4; + ZXIC_UINT32 error_correction_3; + ZXIC_UINT32 error_correction_2; + ZXIC_UINT32 error_correction_1; + ZXIC_UINT32 error_correction_0; +} DPP_ETM_CGAVD_CGAVD_CFG_ERROR_WARNING_T; + +typedef struct dpp_etm_cgavd_mult_qlen_th_en_t { + ZXIC_UINT32 mult_qlen_th; +} DPP_ETM_CGAVD_MULT_QLEN_TH_EN_T; + +typedef struct dpp_etm_cgavd_mult_qlen_th_t { + ZXIC_UINT32 mult_qlen_th; +} DPP_ETM_CGAVD_MULT_QLEN_TH_T; + +typedef struct dpp_etm_cgavd_cgavd_cfg_move_t { + ZXIC_UINT32 cfgmt_sys_move_en; + ZXIC_UINT32 cfgmt_port_move_en; + ZXIC_UINT32 cfgmt_flow_move_en; +} DPP_ETM_CGAVD_CGAVD_CFG_MOVE_T; + +typedef struct dpp_etm_cgavd_cfgmt_total_th_t { + ZXIC_UINT32 cfgmt_total_th; +} DPP_ETM_CGAVD_CFGMT_TOTAL_TH_T; + +typedef struct dpp_etm_cgavd_cfgmt_port_share_th_t { + ZXIC_UINT32 cfgmt_port_share_th; +} DPP_ETM_CGAVD_CFGMT_PORT_SHARE_TH_T; + +typedef struct dpp_etm_cgavd_sa_unreach_state_t { + ZXIC_UINT32 sa_unreach_state; +} DPP_ETM_CGAVD_SA_UNREACH_STATE_T; + +typedef struct dpp_etm_cgavd_mv_port_th_t { + ZXIC_UINT32 port_th; +} DPP_ETM_CGAVD_MV_PORT_TH_T; + +typedef struct dpp_etm_cgavd_mv_drop_sp_th_t { + ZXIC_UINT32 mvdrop_sp_th; +} DPP_ETM_CGAVD_MV_DROP_SP_TH_T; + +typedef struct dpp_etm_cgavd_cgavd_state_warning_t { + ZXIC_UINT32 deq_q_num_warning; + ZXIC_UINT32 deq_pkt_len_warning; + ZXIC_UINT32 enq_pkt_dp_warning; + ZXIC_UINT32 unenq_q_num_warning; + ZXIC_UINT32 enq_q_num_warning; + ZXIC_UINT32 enq_pkt_len_warning; +} DPP_ETM_CGAVD_CGAVD_STATE_WARNING_T; + +typedef struct dpp_etm_cgavd_tmmu_cgavd_dma_fifo_cnt_t { + ZXIC_UINT32 tmmu_cgavd_dma_fifo_cnt; +} DPP_ETM_CGAVD_TMMU_CGAVD_DMA_FIFO_CNT_T; + +typedef struct dpp_etm_cgavd_tmmu_cgavd_dma_fifo_cnt_max_t { + ZXIC_UINT32 tmmu_cgavd_dma_fifo_cnt_max; +} DPP_ETM_CGAVD_TMMU_CGAVD_DMA_FIFO_CNT_MAX_T; + +typedef struct dpp_etm_cgavd_imem_total_cnt_t { + ZXIC_UINT32 imem_total_cnt; +} DPP_ETM_CGAVD_IMEM_TOTAL_CNT_T; + +typedef struct dpp_etm_cgavd_imem_total_cnt_max_t { + ZXIC_UINT32 imem_total_cnt_max; +} DPP_ETM_CGAVD_IMEM_TOTAL_CNT_MAX_T; + +typedef struct dpp_etm_cgavd_flow0_omem_cnt_t { + ZXIC_UINT32 flow0_omem_cnt; +} DPP_ETM_CGAVD_FLOW0_OMEM_CNT_T; + +typedef struct dpp_etm_cgavd_flow1_omem_cnt_t { + ZXIC_UINT32 flow1_omem_cnt; +} DPP_ETM_CGAVD_FLOW1_OMEM_CNT_T; + +typedef struct dpp_etm_cgavd_flow2_omem_cnt_t { + ZXIC_UINT32 flow2_omem_cnt; +} DPP_ETM_CGAVD_FLOW2_OMEM_CNT_T; + +typedef struct dpp_etm_cgavd_flow3_omem_cnt_t { + ZXIC_UINT32 flow3_omem_cnt; +} DPP_ETM_CGAVD_FLOW3_OMEM_CNT_T; + +typedef struct dpp_etm_cgavd_flow4_omem_cnt_t { + ZXIC_UINT32 flow4_omem_cnt; +} DPP_ETM_CGAVD_FLOW4_OMEM_CNT_T; + +typedef struct dpp_etm_cgavd_appoint_flow_num_message_1_t { + ZXIC_UINT32 appoint_flow_num_en_1; + ZXIC_UINT32 appoint_flow_num_1; +} DPP_ETM_CGAVD_APPOINT_FLOW_NUM_MESSAGE_1_T; + +typedef struct dpp_etm_cgavd_appoint_flow_num_message_2_t { + ZXIC_UINT32 appoint_flow_num_en_2; + ZXIC_UINT32 appoint_flow_num_2; +} DPP_ETM_CGAVD_APPOINT_FLOW_NUM_MESSAGE_2_T; + +typedef struct dpp_etm_cgavd_odma_cgavd_pkt_num_1_t { + ZXIC_UINT32 odma_cgavd_pkt_num_1; +} DPP_ETM_CGAVD_ODMA_CGAVD_PKT_NUM_1_T; + +typedef struct dpp_etm_cgavd_odma_cgavd_byte_num_1_t { + ZXIC_UINT32 odma_cgavd_byte_num_1; +} DPP_ETM_CGAVD_ODMA_CGAVD_BYTE_NUM_1_T; + +typedef struct dpp_etm_cgavd_cgavd_enqueue_pkt_num_1_t { + ZXIC_UINT32 cgavd_enqueue_pkt_num_1; +} DPP_ETM_CGAVD_CGAVD_ENQUEUE_PKT_NUM_1_T; + +typedef struct dpp_etm_cgavd_cgavd_dequeue_pkt_num_1_t { + ZXIC_UINT32 cgavd_dequeue_pkt_num_1; +} DPP_ETM_CGAVD_CGAVD_DEQUEUE_PKT_NUM_1_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_pkt_imem_num_1_t { + ZXIC_UINT32 cgavd_qmu_pkt_imem_num_1; +} DPP_ETM_CGAVD_CGAVD_QMU_PKT_IMEM_NUM_1_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_pkt_omem_num_1_t { + ZXIC_UINT32 cgavd_qmu_pkt_omem_num_1; +} DPP_ETM_CGAVD_CGAVD_QMU_PKT_OMEM_NUM_1_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_byte_imem_num_1_t { + ZXIC_UINT32 cgavd_qmu_byte_imem_1; +} DPP_ETM_CGAVD_CGAVD_QMU_BYTE_IMEM_NUM_1_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_byte_omem_num_1_t { + ZXIC_UINT32 cgavd_qmu_byte_omem_1; +} DPP_ETM_CGAVD_CGAVD_QMU_BYTE_OMEM_NUM_1_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_pkt_drop_num_1_t { + ZXIC_UINT32 cgavd_qmu_pkt_drop_num_1; +} DPP_ETM_CGAVD_CGAVD_QMU_PKT_DROP_NUM_1_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_byte_drop_num_1_t { + ZXIC_UINT32 cgavd_qmu_byte_drop_num_1; +} DPP_ETM_CGAVD_CGAVD_QMU_BYTE_DROP_NUM_1_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_forbid_drop_num_1_t { + ZXIC_UINT32 cgavd_qmu_forbid_drop_num_1; +} DPP_ETM_CGAVD_CGAVD_QMU_FORBID_DROP_NUM_1_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_flow_td_drop_num_1_t { + ZXIC_UINT32 cgavd_qmu_flow_td_drop_num_1; +} DPP_ETM_CGAVD_CGAVD_QMU_FLOW_TD_DROP_NUM_1_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_flow_wred_drop_num_1_t { + ZXIC_UINT32 cgavd_qmu_flow_wred_drop_num_1; +} DPP_ETM_CGAVD_CGAVD_QMU_FLOW_WRED_DROP_NUM_1_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_flow_wred_dp_drop_num_1_t { + ZXIC_UINT32 cgavd_qmu_flow_wred_dp_drop_num1; +} DPP_ETM_CGAVD_CGAVD_QMU_FLOW_WRED_DP_DROP_NUM_1_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_pp_td_num_1_t { + ZXIC_UINT32 cgavd_qmu_pp_td_num_1; +} DPP_ETM_CGAVD_CGAVD_QMU_PP_TD_NUM_1_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_pp_wred_drop_num_1_t { + ZXIC_UINT32 cgavd_qmu_pp_wred_drop_num_1; +} DPP_ETM_CGAVD_CGAVD_QMU_PP_WRED_DROP_NUM_1_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_pp_wred_dp_drop_num_1_t { + ZXIC_UINT32 cgavd_qmu_pp_wred_dp_drop_num1; +} DPP_ETM_CGAVD_CGAVD_QMU_PP_WRED_DP_DROP_NUM_1_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_sys_td_drop_num_1_t { + ZXIC_UINT32 cgavd_qmu_sys_td_drop_num_1; +} DPP_ETM_CGAVD_CGAVD_QMU_SYS_TD_DROP_NUM_1_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_sys_gred_drop_num_1_t { + ZXIC_UINT32 cgavd_qmu_sys_gred_drop_num_1; +} DPP_ETM_CGAVD_CGAVD_QMU_SYS_GRED_DROP_NUM_1_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_sys_gred_dp_drop_num1_t { + ZXIC_UINT32 cgavd_qmu_sys_gred_dp_drop_num1; +} DPP_ETM_CGAVD_CGAVD_QMU_SYS_GRED_DP_DROP_NUM1_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_sa_drop_num_1_t { + ZXIC_UINT32 cgavd_qmu_sa_drop_num_1; +} DPP_ETM_CGAVD_CGAVD_QMU_SA_DROP_NUM_1_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_move_drop_num_1_t { + ZXIC_UINT32 cgavd_qmu_move_drop_num_1; +} DPP_ETM_CGAVD_CGAVD_QMU_MOVE_DROP_NUM_1_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_tm_mult_drop_num_1_t { + ZXIC_UINT32 cgavd_qmu_tm_mult_drop_num_1; +} DPP_ETM_CGAVD_CGAVD_QMU_TM_MULT_DROP_NUM_1_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_tm_error_drop_num_1_t { + ZXIC_UINT32 cgavd_qmu_tm_error_drop_num_1; +} DPP_ETM_CGAVD_CGAVD_QMU_TM_ERROR_DROP_NUM_1_T; + +typedef struct dpp_etm_cgavd_odma_cgavd_pkt_num_2_t { + ZXIC_UINT32 odma_cgavd_pkt_num_2; +} DPP_ETM_CGAVD_ODMA_CGAVD_PKT_NUM_2_T; + +typedef struct dpp_etm_cgavd_odma_cgavd_byte_num_2_t { + ZXIC_UINT32 odma_cgavd_byte_num_2; +} DPP_ETM_CGAVD_ODMA_CGAVD_BYTE_NUM_2_T; + +typedef struct dpp_etm_cgavd_cgavd_enqueue_pkt_num_2_t { + ZXIC_UINT32 cgavd_enqueue_pkt_num_2; +} DPP_ETM_CGAVD_CGAVD_ENQUEUE_PKT_NUM_2_T; + +typedef struct dpp_etm_cgavd_cgavd_dequeue_pkt_num_2_t { + ZXIC_UINT32 cgavd_dequeue_pkt_num_2; +} DPP_ETM_CGAVD_CGAVD_DEQUEUE_PKT_NUM_2_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_pkt_imem_num_2_t { + ZXIC_UINT32 cgavd_qmu_pkt_imem_num_2; +} DPP_ETM_CGAVD_CGAVD_QMU_PKT_IMEM_NUM_2_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_pkt_omem_num_2_t { + ZXIC_UINT32 cgavd_qmu_pkt_omem_num_2; +} DPP_ETM_CGAVD_CGAVD_QMU_PKT_OMEM_NUM_2_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_byte_imem_num_2_t { + ZXIC_UINT32 cgavd_qmu_byte_imem_num_2; +} DPP_ETM_CGAVD_CGAVD_QMU_BYTE_IMEM_NUM_2_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_byte_omem_num_2_t { + ZXIC_UINT32 cgavd_qmu_byte_omem_num_2; +} DPP_ETM_CGAVD_CGAVD_QMU_BYTE_OMEM_NUM_2_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_pkt_drop_num_2_t { + ZXIC_UINT32 cgavd_qmu_pkt_drop_num_2; +} DPP_ETM_CGAVD_CGAVD_QMU_PKT_DROP_NUM_2_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_byte_drop_num_2_t { + ZXIC_UINT32 cgavd_qmu_byte_drop_num_2; +} DPP_ETM_CGAVD_CGAVD_QMU_BYTE_DROP_NUM_2_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_forbid_drop_num_2_t { + ZXIC_UINT32 cgavd_qmu_forbid_drop_num_2; +} DPP_ETM_CGAVD_CGAVD_QMU_FORBID_DROP_NUM_2_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_flow_td_drop_num_2_t { + ZXIC_UINT32 cgavd_qmu_flow_td_drop_num_2; +} DPP_ETM_CGAVD_CGAVD_QMU_FLOW_TD_DROP_NUM_2_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_flow_wred_drop_num_2_t { + ZXIC_UINT32 cgavd_qmu_flow_wred_drop_num_2; +} DPP_ETM_CGAVD_CGAVD_QMU_FLOW_WRED_DROP_NUM_2_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_flow_wred_dp_drop_num_2_t { + ZXIC_UINT32 cgavd_qmu_flow_wred_dp_drop_num_2; +} DPP_ETM_CGAVD_CGAVD_QMU_FLOW_WRED_DP_DROP_NUM_2_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_pp_td_num_2_t { + ZXIC_UINT32 cgavd_qmu_pp_td_num_2; +} DPP_ETM_CGAVD_CGAVD_QMU_PP_TD_NUM_2_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_pp_wred_drop_num_2_t { + ZXIC_UINT32 cgavd_qmu_pp_wred_drop_num_2; +} DPP_ETM_CGAVD_CGAVD_QMU_PP_WRED_DROP_NUM_2_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_pp_wred_dp_drop_num_2_t { + ZXIC_UINT32 cgavd_qmu_pp_wred_dp_drop_num_2; +} DPP_ETM_CGAVD_CGAVD_QMU_PP_WRED_DP_DROP_NUM_2_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_sys_td_drop_num_2_t { + ZXIC_UINT32 cgavd_qmu_sys_td_drop_num_2; +} DPP_ETM_CGAVD_CGAVD_QMU_SYS_TD_DROP_NUM_2_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_sys_gred_drop_num_2_t { + ZXIC_UINT32 cgavd_qmu_sys_gred_drop_num_2; +} DPP_ETM_CGAVD_CGAVD_QMU_SYS_GRED_DROP_NUM_2_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_sys_gred_dp_drop_num_2_t { + ZXIC_UINT32 cgavd_qmu_sys_gred_dp_drop_num_2; +} DPP_ETM_CGAVD_CGAVD_QMU_SYS_GRED_DP_DROP_NUM_2_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_sa_drop_num_2_t { + ZXIC_UINT32 cgavd_qmu_sa_drop_num_2; +} DPP_ETM_CGAVD_CGAVD_QMU_SA_DROP_NUM_2_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_move_drop_num_2_t { + ZXIC_UINT32 cgavd_qmu_move_drop_num_2; +} DPP_ETM_CGAVD_CGAVD_QMU_MOVE_DROP_NUM_2_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_tm_mult_drop_num_2_t { + ZXIC_UINT32 cgavd_qmu_tm_mult_drop_num_2; +} DPP_ETM_CGAVD_CGAVD_QMU_TM_MULT_DROP_NUM_2_T; + +typedef struct dpp_etm_cgavd_cgavd_qmu_tm_error_drop_num_2_t { + ZXIC_UINT32 cgavd_qmu_tm_error_drop_num_2; +} DPP_ETM_CGAVD_CGAVD_QMU_TM_ERROR_DROP_NUM_2_T; + +typedef struct dpp_etm_cgavd_move_flow_th_profile_t { + ZXIC_UINT32 move_drop_profile; +} DPP_ETM_CGAVD_MOVE_FLOW_TH_PROFILE_T; + +typedef struct dpp_etm_cgavd_move_flow_th_t { + ZXIC_UINT32 move_drop_flow_th; +} DPP_ETM_CGAVD_MOVE_FLOW_TH_T; + +typedef struct dpp_etm_tmmu_emem_pd_fifo_aful_th_t { + ZXIC_UINT32 emem_pd_fifo_aful_th; +} DPP_ETM_TMMU_EMEM_PD_FIFO_AFUL_TH_T; + +typedef struct dpp_etm_tmmu_dma_data_fifo_aful_th_t { + ZXIC_UINT32 dma_data_fifo_aful_th; +} DPP_ETM_TMMU_DMA_DATA_FIFO_AFUL_TH_T; + +typedef struct dpp_etm_tmmu_tmmu_states_0_t { + ZXIC_UINT32 tm_odma_pkt_rdy; + ZXIC_UINT32 dma_data_fifo_empty; + ZXIC_UINT32 imem_enq_rd_fifo_empty; + ZXIC_UINT32 imem_enq_drop_fifo_empty; + ZXIC_UINT32 imem_deq_rd_fifo_empty; + ZXIC_UINT32 imem_deq_drop_fifo_empty; + ZXIC_UINT32 wr_cmd_fifo_empty; + ZXIC_UINT32 cached_pd_fifo_empty; + ZXIC_UINT32 emem_pd_fifo_empty; + ZXIC_UINT32 pd_order_fifo_empty; + ZXIC_UINT32 odma_tm_data_rdy; + ZXIC_UINT32 odma_tm_discard_rdy; + ZXIC_UINT32 olif_tmmu_rdy; + ZXIC_UINT32 mmu_tm_cmd_wr_rdy; + ZXIC_UINT32 mmu_tm_data_wr_rdy; + ZXIC_UINT32 mmu_tm_rd_rdy; + ZXIC_UINT32 mmu_tm_sop_rd_rdy; + ZXIC_UINT32 qmu_tmmu_sop_data_rdy; + ZXIC_UINT32 tmmu_cmdsw_imem_release_rdy; + ZXIC_UINT32 imem_age_release_rdy; + ZXIC_UINT32 tmmu_qmu_wr_rdy; + ZXIC_UINT32 tmmu_qmu_rdy_7; + ZXIC_UINT32 tmmu_qmu_rdy_6; + ZXIC_UINT32 tmmu_qmu_rdy_5; + ZXIC_UINT32 tmmu_qmu_rdy_4; + ZXIC_UINT32 tmmu_qmu_rdy_3; + ZXIC_UINT32 tmmu_qmu_rdy_2; + ZXIC_UINT32 tmmu_qmu_rdy_1; + ZXIC_UINT32 tmmu_qmu_rdy_0; + ZXIC_UINT32 tmmu_qmu_rd_rdy; + ZXIC_UINT32 tmmu_qmu_sop_rd_rdy; +} DPP_ETM_TMMU_TMMU_STATES_0_T; + +typedef struct dpp_etm_tmmu_qmu_tmmu_wr_sop_cnt_t { + ZXIC_UINT32 qmu_tmmu_wr_sop_cnt; +} DPP_ETM_TMMU_QMU_TMMU_WR_SOP_CNT_T; + +typedef struct dpp_etm_tmmu_qmu_tmmu_wr_eop_cnt_t { + ZXIC_UINT32 qmu_tmmu_wr_eop_cnt; +} DPP_ETM_TMMU_QMU_TMMU_WR_EOP_CNT_T; + +typedef struct dpp_etm_tmmu_qmu_tmmu_wr_drop_cnt_t { + ZXIC_UINT32 qmu_tmmu_wr_drop_cnt; +} DPP_ETM_TMMU_QMU_TMMU_WR_DROP_CNT_T; + +typedef struct dpp_etm_tmmu_qmu_tmmu_wr_emem_cnt_t { + ZXIC_UINT32 qmu_tmmu_wr_emem_cnt; +} DPP_ETM_TMMU_QMU_TMMU_WR_EMEM_CNT_T; + +typedef struct dpp_etm_tmmu_qmu_tmmu_wr_imem_cnt_t { + ZXIC_UINT32 qmu_tmmu_wr_imem_cnt; +} DPP_ETM_TMMU_QMU_TMMU_WR_IMEM_CNT_T; + +typedef struct dpp_etm_tmmu_tmmu_mmu_wr_sop_cnt_t { + ZXIC_UINT32 tmmu_mmu_wr_sop_cnt; +} DPP_ETM_TMMU_TMMU_MMU_WR_SOP_CNT_T; + +typedef struct dpp_etm_tmmu_tmmu_mmu_wr_eop_cnt_t { + ZXIC_UINT32 tmmu_mmu_wr_eop_cnt; +} DPP_ETM_TMMU_TMMU_MMU_WR_EOP_CNT_T; + +typedef struct dpp_etm_tmmu_qmu_tmmu_rd_sop_cnt_t { + ZXIC_UINT32 qmu_tmmu_rd_sop_cnt; +} DPP_ETM_TMMU_QMU_TMMU_RD_SOP_CNT_T; + +typedef struct dpp_etm_tmmu_qmu_tmmu_rd_eop_cnt_t { + ZXIC_UINT32 qmu_tmmu_rd_eop_cnt; +} DPP_ETM_TMMU_QMU_TMMU_RD_EOP_CNT_T; + +typedef struct dpp_etm_tmmu_qmu_tmmu_rd_drop_cnt_t { + ZXIC_UINT32 qmu_tmmu_rd_drop_cnt; +} DPP_ETM_TMMU_QMU_TMMU_RD_DROP_CNT_T; + +typedef struct dpp_etm_tmmu_qmu_tmmu_rd_emem_cnt_t { + ZXIC_UINT32 qmu_tmmu_rd_emem_cnt; +} DPP_ETM_TMMU_QMU_TMMU_RD_EMEM_CNT_T; + +typedef struct dpp_etm_tmmu_qmu_tmmu_rd_imem_cnt_t { + ZXIC_UINT32 qmu_tmmu_rd_imem_cnt; +} DPP_ETM_TMMU_QMU_TMMU_RD_IMEM_CNT_T; + +typedef struct dpp_etm_tmmu_tmmu_mmu_rd_sop_cnt_t { + ZXIC_UINT32 tmmu_mmu_rd_sop_cnt; +} DPP_ETM_TMMU_TMMU_MMU_RD_SOP_CNT_T; + +typedef struct dpp_etm_tmmu_tmmu_mmu_rd_eop_cnt_t { + ZXIC_UINT32 tmmu_mmu_rd_eop_cnt; +} DPP_ETM_TMMU_TMMU_MMU_RD_EOP_CNT_T; + +typedef struct dpp_etm_tmmu_tmmu_odma_in_sop_cnt_t { + ZXIC_UINT32 tmmu_odma_in_sop_cnt; +} DPP_ETM_TMMU_TMMU_ODMA_IN_SOP_CNT_T; + +typedef struct dpp_etm_tmmu_tmmu_odma_in_eop_cnt_t { + ZXIC_UINT32 tmmu_odma_in_eop_cnt; +} DPP_ETM_TMMU_TMMU_ODMA_IN_EOP_CNT_T; + +typedef struct dpp_etm_tmmu_tmmu_odma_vld_cnt_t { + ZXIC_UINT32 tmmu_odma_vld_cnt; +} DPP_ETM_TMMU_TMMU_ODMA_VLD_CNT_T; + +typedef struct dpp_etm_tmmu_qmu_pd_in_cnt_t { + ZXIC_UINT32 qmu_pd_in_cnt; +} DPP_ETM_TMMU_QMU_PD_IN_CNT_T; + +typedef struct dpp_etm_tmmu_tmmu_pd_hit_cnt_t { + ZXIC_UINT32 tmmu_pd_hit_cnt; +} DPP_ETM_TMMU_TMMU_PD_HIT_CNT_T; + +typedef struct dpp_etm_tmmu_tmmu_pd_out_cnt_t { + ZXIC_UINT32 tmmu_pd_out_cnt; +} DPP_ETM_TMMU_TMMU_PD_OUT_CNT_T; + +typedef struct dpp_etm_tmmu_tmmu_wr_cmd_fifo_wr_cnt_t { + ZXIC_UINT32 tmmu_wr_cmd_fifo_wr_cnt; +} DPP_ETM_TMMU_TMMU_WR_CMD_FIFO_WR_CNT_T; + +typedef struct dpp_etm_tmmu_tmmu_imem_age_cnt_t { + ZXIC_UINT32 tmmu_imem_age_cnt; +} DPP_ETM_TMMU_TMMU_IMEM_AGE_CNT_T; + +typedef struct dpp_etm_tmmu_tmmu_cmdsch_rd_cnt_t { + ZXIC_UINT32 tmmu_cmdsch_rd_cnt; +} DPP_ETM_TMMU_TMMU_CMDSCH_RD_CNT_T; + +typedef struct dpp_etm_tmmu_tmmu_cmdsch_drop_cnt_t { + ZXIC_UINT32 tmmu_cmdsch_drop_cnt; +} DPP_ETM_TMMU_TMMU_CMDSCH_DROP_CNT_T; + +typedef struct dpp_etm_tmmu_tmmu_cmdsw_drop_cnt_t { + ZXIC_UINT32 tmmu_cmdsw_drop_cnt; +} DPP_ETM_TMMU_TMMU_CMDSW_DROP_CNT_T; + +typedef struct dpp_etm_tmmu_tmmu_odma_enq_rd_cnt_t { + ZXIC_UINT32 tmmu_odma_enq_rd_cnt; +} DPP_ETM_TMMU_TMMU_ODMA_ENQ_RD_CNT_T; + +typedef struct dpp_etm_tmmu_tmmu_odma_enq_drop_cnt_t { + ZXIC_UINT32 tmmu_odma_enq_drop_cnt; +} DPP_ETM_TMMU_TMMU_ODMA_ENQ_DROP_CNT_T; + +typedef struct dpp_etm_tmmu_tmmu_odma_imem_age_cnt_t { + ZXIC_UINT32 tmmu_odma_imem_age_cnt; +} DPP_ETM_TMMU_TMMU_ODMA_IMEM_AGE_CNT_T; + +typedef struct dpp_etm_tmmu_tmmu_odma_deq_rd_cnt_t { + ZXIC_UINT32 tmmu_odma_deq_rd_cnt; +} DPP_ETM_TMMU_TMMU_ODMA_DEQ_RD_CNT_T; + +typedef struct dpp_etm_tmmu_tmmu_odma_deq_drop_cnt_t { + ZXIC_UINT32 tmmu_odma_deq_drop_cnt; +} DPP_ETM_TMMU_TMMU_ODMA_DEQ_DROP_CNT_T; + +typedef struct dpp_etm_tmmu_olif_tmmu_xoff_cnt_t { + ZXIC_UINT32 olif_tmmu_xoff_cnt; +} DPP_ETM_TMMU_OLIF_TMMU_XOFF_CNT_T; + +typedef struct dpp_etm_tmmu_odma_tm_data_xoff_cnt_t { + ZXIC_UINT32 odma_tm_data_xoff_cnt; +} DPP_ETM_TMMU_ODMA_TM_DATA_XOFF_CNT_T; + +typedef struct dpp_etm_tmmu_tm_odma_pkt_xoff_cnt_t { + ZXIC_UINT32 tm_odma_pkt_xoff_cnt; +} DPP_ETM_TMMU_TM_ODMA_PKT_XOFF_CNT_T; + +typedef struct dpp_etm_tmmu_tm_state_3_t { + ZXIC_UINT32 tmmu_qmu_rdy_9; + ZXIC_UINT32 tmmu_qmu_rdy_8; +} DPP_ETM_TMMU_TM_STATE_3_T; + +typedef struct dpp_etm_tmmu_cfgmt_pd_cache_cmd_t { + ZXIC_UINT32 cfgmt_pd_cache_addr; +} DPP_ETM_TMMU_CFGMT_PD_CACHE_CMD_T; + +typedef struct dpp_etm_tmmu_cfgmt_pd_cache_rd_done_t { + ZXIC_UINT32 cfgmt_pd_cache_rd_done; +} DPP_ETM_TMMU_CFGMT_PD_CACHE_RD_DONE_T; + +typedef struct dpp_etm_tmmu_cfgmt_pd_cache_rd_data_0_t { + ZXIC_UINT32 cfgmt_pd_cache_rd_data_0; +} DPP_ETM_TMMU_CFGMT_PD_CACHE_RD_DATA_0_T; + +typedef struct dpp_etm_tmmu_cfgmt_pd_cache_rd_data_1_t { + ZXIC_UINT32 cfgmt_pd_cache_rd_data_1; +} DPP_ETM_TMMU_CFGMT_PD_CACHE_RD_DATA_1_T; + +typedef struct dpp_etm_tmmu_cfgmt_pd_cache_rd_data_2_t { + ZXIC_UINT32 cfgmt_pd_cache_rd_data_2; +} DPP_ETM_TMMU_CFGMT_PD_CACHE_RD_DATA_2_T; + +typedef struct dpp_etm_tmmu_cfgmt_pd_cache_rd_data_3_t { + ZXIC_UINT32 cfgmt_pd_cache_rd_data_3; +} DPP_ETM_TMMU_CFGMT_PD_CACHE_RD_DATA_3_T; + +typedef struct dpp_etm_tmmu_cfgmt_tmmu_to_odma_para_t { + ZXIC_UINT32 cfgmt_tmmu_to_odma_para; +} DPP_ETM_TMMU_CFGMT_TMMU_TO_ODMA_PARA_T; + +typedef struct dpp_etm_tmmu_cfgmt_dma_data_fifo_cnt_t { + ZXIC_UINT32 cfgmt_dma_data_fifo_cnt; +} DPP_ETM_TMMU_CFGMT_DMA_DATA_FIFO_CNT_T; + +typedef struct dpp_etm_tmmu_cfgmt_cache_tag_bit0_offset_t { + ZXIC_UINT32 cfgmt_cache_tag_bit0_offset; +} DPP_ETM_TMMU_CFGMT_CACHE_TAG_BIT0_OFFSET_T; + +typedef struct dpp_etm_tmmu_cfgmt_cache_tag_bit1_offset_t { + ZXIC_UINT32 cfgmt_cache_tag_bit1_offset; +} DPP_ETM_TMMU_CFGMT_CACHE_TAG_BIT1_OFFSET_T; + +typedef struct dpp_etm_tmmu_cfgmt_cache_tag_bit2_offset_t { + ZXIC_UINT32 cfgmt_cache_tag_bit2_offset; +} DPP_ETM_TMMU_CFGMT_CACHE_TAG_BIT2_OFFSET_T; + +typedef struct dpp_etm_tmmu_cfgmt_cache_tag_bit3_offset_t { + ZXIC_UINT32 cfgmt_cache_tag_bit3_offset; +} DPP_ETM_TMMU_CFGMT_CACHE_TAG_BIT3_OFFSET_T; + +typedef struct dpp_etm_tmmu_cfgmt_cache_tag_bit4_offset_t { + ZXIC_UINT32 cfgmt_cache_tag_bit4_offset; +} DPP_ETM_TMMU_CFGMT_CACHE_TAG_BIT4_OFFSET_T; + +typedef struct dpp_etm_tmmu_cfgmt_cache_tag_bit5_offset_t { + ZXIC_UINT32 cfgmt_cache_tag_bit5_offset; +} DPP_ETM_TMMU_CFGMT_CACHE_TAG_BIT5_OFFSET_T; + +typedef struct dpp_etm_tmmu_cfgmt_cache_index_bit0_offset_t { + ZXIC_UINT32 cfgmt_cache_index_bit0_offset; +} DPP_ETM_TMMU_CFGMT_CACHE_INDEX_BIT0_OFFSET_T; + +typedef struct dpp_etm_tmmu_cfgmt_cache_index_bit1_offset_t { + ZXIC_UINT32 cfgmt_cache_index_bit1_offset; +} DPP_ETM_TMMU_CFGMT_CACHE_INDEX_BIT1_OFFSET_T; + +typedef struct dpp_etm_tmmu_cfgmt_cache_index_bit2_offset_t { + ZXIC_UINT32 cfgmt_cache_index_bit2_offset; +} DPP_ETM_TMMU_CFGMT_CACHE_INDEX_BIT2_OFFSET_T; + +typedef struct dpp_etm_tmmu_cfgmt_cache_index_bit3_offset_t { + ZXIC_UINT32 cfgmt_cache_index_bit3_offset; +} DPP_ETM_TMMU_CFGMT_CACHE_INDEX_BIT3_OFFSET_T; + +typedef struct dpp_etm_tmmu_cfgmt_cache_index_bit4_offset_t { + ZXIC_UINT32 cfgmt_cache_index_bit4_offset; +} DPP_ETM_TMMU_CFGMT_CACHE_INDEX_BIT4_OFFSET_T; + +typedef struct dpp_etm_tmmu_cfgmt_cache_index_bit5_offset_t { + ZXIC_UINT32 cfgmt_cache_index_bit5_offset; +} DPP_ETM_TMMU_CFGMT_CACHE_INDEX_BIT5_OFFSET_T; + +typedef struct dpp_etm_tmmu_cfgmt_cache_index_bit6_offset_t { + ZXIC_UINT32 cfgmt_cache_index_bit6_offset; +} DPP_ETM_TMMU_CFGMT_CACHE_INDEX_BIT6_OFFSET_T; + +typedef struct dpp_etm_tmmu_cfgmt_cache_index_bit7_offset_t { + ZXIC_UINT32 cfgmt_cache_index_bit7_offset; +} DPP_ETM_TMMU_CFGMT_CACHE_INDEX_BIT7_OFFSET_T; + +typedef struct dpp_etm_tmmu_cfgmt_cache_index_bit8_offset_t { + ZXIC_UINT32 cfgmt_cache_index_bit8_offset; +} DPP_ETM_TMMU_CFGMT_CACHE_INDEX_BIT8_OFFSET_T; + +typedef struct dpp_etm_tmmu_cfgmt_cache_index_bit9_offset_t { + ZXIC_UINT32 cfgmt_cache_index_bit9_offset; +} DPP_ETM_TMMU_CFGMT_CACHE_INDEX_BIT9_OFFSET_T; + +typedef struct dpp_etm_tmmu_cfgmt_cache_index_bit10_offset_t { + ZXIC_UINT32 cfgmt_cache_index_bit10_offset; +} DPP_ETM_TMMU_CFGMT_CACHE_INDEX_BIT10_OFFSET_T; + +typedef struct dpp_etm_tmmu_cfgmt_cache_index_bit11_offset_t { + ZXIC_UINT32 cfgmt_cache_index_bit11_offset; +} DPP_ETM_TMMU_CFGMT_CACHE_INDEX_BIT11_OFFSET_T; + +typedef struct dpp_etm_tmmu_cfgmt_cache_index_bit12_offset_t { + ZXIC_UINT32 cfgmt_cache_index_bit12_offset; +} DPP_ETM_TMMU_CFGMT_CACHE_INDEX_BIT12_OFFSET_T; + +typedef struct dpp_etm_shap_bktfull_fifo_full_flagregister_t { + ZXIC_UINT32 bktfull_fifo_full_flag_core; +} DPP_ETM_SHAP_BKTFULL_FIFO_FULL_FLAGREGISTER_T; + +typedef struct dpp_etm_shap_fifo_full_regregister_t { + ZXIC_UINT32 fifo_full_reg; +} DPP_ETM_SHAP_FIFO_FULL_REGREGISTER_T; + +typedef struct dpp_etm_shap_fifo_empty_regregister_t { + ZXIC_UINT32 fifo_empty_reg; +} DPP_ETM_SHAP_FIFO_EMPTY_REGREGISTER_T; + +typedef struct dpp_etm_shap_fifo_almost_full_regregister_t { + ZXIC_UINT32 fifo_almost_full_reg; +} DPP_ETM_SHAP_FIFO_ALMOST_FULL_REGREGISTER_T; + +typedef struct dpp_etm_shap_fifo_almost_empty_regregister_t { + ZXIC_UINT32 fifo_almost_empty_reg; +} DPP_ETM_SHAP_FIFO_ALMOST_EMPTY_REGREGISTER_T; + +typedef struct dpp_etm_crdt_credit_space_select_t { + ZXIC_UINT32 credit_space_select; +} DPP_ETM_CRDT_CREDIT_SPACE_SELECT_T; + +typedef struct dpp_etm_crdt_stat_space_max_t { + ZXIC_UINT32 stat_space_max; +} DPP_ETM_CRDT_STAT_SPACE_MAX_T; + +typedef struct dpp_etm_crdt_stat_space_min_t { + ZXIC_UINT32 stat_space_min; +} DPP_ETM_CRDT_STAT_SPACE_MIN_T; + +typedef struct dpp_etm_crdt_stat_space_credit_t { + ZXIC_UINT32 stat_space_credit; +} DPP_ETM_CRDT_STAT_SPACE_CREDIT_T; + +typedef struct dpp_etm_crdt_stat_que_step8_credit_t { + ZXIC_UINT32 stat_que_step8_credit; +} DPP_ETM_CRDT_STAT_QUE_STEP8_CREDIT_T; + +typedef struct dpp_etm_crdt_special_que_t { + ZXIC_UINT32 special_que_id; +} DPP_ETM_CRDT_SPECIAL_QUE_T; + +typedef struct dpp_etm_crdt_special_que_credit_t { + ZXIC_UINT32 special_que_credit; +} DPP_ETM_CRDT_SPECIAL_QUE_CREDIT_T; + +typedef struct dpp_etm_crdt_lif_congest_credit_cnt_t { + ZXIC_UINT32 lif_congest_credit_cnt; +} DPP_ETM_CRDT_LIF_CONGEST_CREDIT_CNT_T; + +typedef struct dpp_etm_crdt_lif_port_congest_credit_cnt_t { + ZXIC_UINT32 lif_port_congest_credit_cnt; +} DPP_ETM_CRDT_LIF_PORT_CONGEST_CREDIT_CNT_T; + +typedef struct dpp_etm_crdt_crdt_congest_credit_cnt_t { + ZXIC_UINT32 crdt_congest_credit_cnt; +} DPP_ETM_CRDT_CRDT_CONGEST_CREDIT_CNT_T; + +typedef struct dpp_etm_crdt_crdt_port_congest_credit_cnt_t { + ZXIC_UINT32 crdt_port_congest_credit_cnt; +} DPP_ETM_CRDT_CRDT_PORT_CONGEST_CREDIT_CNT_T; + +typedef struct dpp_etm_crdt_congest_port_id_t { + ZXIC_UINT32 congest_port_id; +} DPP_ETM_CRDT_CONGEST_PORT_ID_T; + +typedef struct dpp_etm_crdt_dev_link_control_t { + ZXIC_UINT32 dev_link_control; +} DPP_ETM_CRDT_DEV_LINK_CONTROL_T; + +typedef struct dpp_etm_crdt_crdt_sa_port_rdy_t { + ZXIC_UINT32 crdt_sa_port_rdy; +} DPP_ETM_CRDT_CRDT_SA_PORT_RDY_T; + +typedef struct dpp_etm_crdt_crdt_congest_mode_select_t { + ZXIC_UINT32 crdt_congest_mode_selectr; +} DPP_ETM_CRDT_CRDT_CONGEST_MODE_SELECT_T; + +typedef struct dpp_etm_crdt_fifo_out_all_crs_normal_cnt_t { + ZXIC_UINT32 fifo_out_all_crs_normal_cnt; +} DPP_ETM_CRDT_FIFO_OUT_ALL_CRS_NORMAL_CNT_T; + +typedef struct dpp_etm_crdt_fifo_out_all_crs_off_cnt_t { + ZXIC_UINT32 fifo_out_all_crs_off_cnt; +} DPP_ETM_CRDT_FIFO_OUT_ALL_CRS_OFF_CNT_T; + +typedef struct dpp_etm_crdt_fifo_out_que_crs_normal_cnt_t { + ZXIC_UINT32 fifo_out_que_crs_normal_cnt; +} DPP_ETM_CRDT_FIFO_OUT_QUE_CRS_NORMAL_CNT_T; + +typedef struct dpp_etm_crdt_fifo_out_que_crs_off_cnt_t { + ZXIC_UINT32 fifo_out_que_crs_off_cnt; +} DPP_ETM_CRDT_FIFO_OUT_QUE_CRS_OFF_CNT_T; + +typedef struct dpp_etm_crdt_mode_add_60g_t { + ZXIC_UINT32 mode_add_60g; +} DPP_ETM_CRDT_MODE_ADD_60G_T; + +typedef struct dpp_etm_crdt_pp_token_add_t { + ZXIC_UINT32 pp_token_add_cir; +} DPP_ETM_CRDT_PP_TOKEN_ADD_T; + +typedef struct dpp_etm_crdt_pp_cir_token_total_dist_cnt_t { + ZXIC_UINT32 pp_cir_token_total_dist_counter; +} DPP_ETM_CRDT_PP_CIR_TOKEN_TOTAL_DIST_CNT_T; + +typedef struct dpp_etm_crdt_pp_cir_token_total_dec_cnt_t { + ZXIC_UINT32 pp_cir_token_total_dec_counter; +} DPP_ETM_CRDT_PP_CIR_TOKEN_TOTAL_DEC_CNT_T; + +typedef struct dpp_etm_crdt_dev_credit_cnt_t { + ZXIC_UINT32 dev_credit_cnt; +} DPP_ETM_CRDT_DEV_CREDIT_CNT_T; + +typedef struct dpp_etm_crdt_no_credit_cnt1_t { + ZXIC_UINT32 no_credit_cnt1; +} DPP_ETM_CRDT_NO_CREDIT_CNT1_T; + +typedef struct dpp_etm_crdt_no_credit_cnt2_t { + ZXIC_UINT32 no_credit_cnt2; +} DPP_ETM_CRDT_NO_CREDIT_CNT2_T; + +typedef struct dpp_etm_crdt_asm_interval_0_cfg_t { + ZXIC_UINT32 asm_interval_0_cfg; +} DPP_ETM_CRDT_ASM_INTERVAL_0_CFG_T; + +typedef struct dpp_etm_crdt_asm_interval_1_cfg_t { + ZXIC_UINT32 asm_interval_1_cfg; +} DPP_ETM_CRDT_ASM_INTERVAL_1_CFG_T; + +typedef struct dpp_etm_crdt_asm_interval_2_cfg_t { + ZXIC_UINT32 asm_interval_2_cfg; +} DPP_ETM_CRDT_ASM_INTERVAL_2_CFG_T; + +typedef struct dpp_etm_crdt_asm_interval_3_cfg_t { + ZXIC_UINT32 asm_interval_3_cfg; +} DPP_ETM_CRDT_ASM_INTERVAL_3_CFG_T; + +typedef struct dpp_etm_crdt_asm_interval_4_cfg_t { + ZXIC_UINT32 asm_interval_4_cfg; +} DPP_ETM_CRDT_ASM_INTERVAL_4_CFG_T; + +typedef struct dpp_etm_crdt_asm_interval_5cfg_t { + ZXIC_UINT32 asm_interval_5_cfg; +} DPP_ETM_CRDT_ASM_INTERVAL_5CFG_T; + +typedef struct dpp_etm_crdt_asm_interval_6_cfg_t { + ZXIC_UINT32 asm_interval_6_cfg; +} DPP_ETM_CRDT_ASM_INTERVAL_6_CFG_T; + +typedef struct dpp_etm_crdt_asm_interval_7_cfg_t { + ZXIC_UINT32 asm_interval_7_cfg; +} DPP_ETM_CRDT_ASM_INTERVAL_7_CFG_T; + +typedef struct dpp_etm_crdt_crdt_total_congest_mode_cfg_t { + ZXIC_UINT32 crdt_total_congest_mode_cfg; +} DPP_ETM_CRDT_CRDT_TOTAL_CONGEST_MODE_CFG_T; + +typedef struct dpp_etm_crdt_rci_fifo_ini_deep_cfg_t { + ZXIC_UINT32 rci_fifo_ini_deep_cfg; +} DPP_ETM_CRDT_RCI_FIFO_INI_DEEP_CFG_T; + +typedef struct dpp_etm_crdt_crdt_ecc_t { + ZXIC_UINT32 seinfo_wfq_single_ecc_err; + ZXIC_UINT32 seinfo_wfq_double_ecc_err; + ZXIC_UINT32 seinfo_fq_single_ecc_err; + ZXIC_UINT32 seinfo_fq_double_ecc_err; + ZXIC_UINT32 ecc_bypass; +} DPP_ETM_CRDT_CRDT_ECC_T; + +typedef struct dpp_etm_crdt_ucn_asm_rdy_shield_en_t { + ZXIC_UINT32 ucn_rdy_shield_en; + ZXIC_UINT32 asm_rdy_shield_en; +} DPP_ETM_CRDT_UCN_ASM_RDY_SHIELD_EN_T; + +typedef struct dpp_etm_crdt_ucn_asm_rdy_t { + ZXIC_UINT32 ucn_rdy; + ZXIC_UINT32 asm_rdy; +} DPP_ETM_CRDT_UCN_ASM_RDY_T; + +typedef struct dpp_etm_crdt_rci_grade_t { + ZXIC_UINT32 rci_grade; +} DPP_ETM_CRDT_RCI_GRADE_T; + +typedef struct dpp_etm_crdt_crdt_rci_value_r_t { + ZXIC_UINT32 crdt_rci_value_r; +} DPP_ETM_CRDT_CRDT_RCI_VALUE_R_T; + +typedef struct dpp_etm_crdt_crdt_interval_now_t { + ZXIC_UINT32 crdt_interval_now; +} DPP_ETM_CRDT_CRDT_INTERVAL_NOW_T; + +typedef struct dpp_etm_crdt_crs_sheild_flow_id_cfg_t { + ZXIC_UINT32 crs_sheild_flow_id_cfg; +} DPP_ETM_CRDT_CRS_SHEILD_FLOW_ID_CFG_T; + +typedef struct dpp_etm_crdt_crs_sheild_en_cfg_t { + ZXIC_UINT32 crs_sheild_en_cfg; +} DPP_ETM_CRDT_CRS_SHEILD_EN_CFG_T; + +typedef struct dpp_etm_crdt_crs_sheild_value_cfg_t { + ZXIC_UINT32 crs_sheild_value_cfg; +} DPP_ETM_CRDT_CRS_SHEILD_VALUE_CFG_T; + +typedef struct dpp_etm_crdt_test_token_calc_ctrl_t { + ZXIC_UINT32 test_token_calc_state; + ZXIC_UINT32 test_token_calc_trigger; +} DPP_ETM_CRDT_TEST_TOKEN_CALC_CTRL_T; + +typedef struct dpp_etm_crdt_test_token_sample_cycle_num_t { + ZXIC_UINT32 sample_cycle_num; +} DPP_ETM_CRDT_TEST_TOKEN_SAMPLE_CYCLE_NUM_T; + +typedef struct dpp_etm_crdt_q_state_0_7_t { + ZXIC_UINT32 q_token_state_7; + ZXIC_UINT32 q_token_state_6; + ZXIC_UINT32 q_token_state_5; + ZXIC_UINT32 q_token_state_4; + ZXIC_UINT32 q_token_state_3; + ZXIC_UINT32 q_token_state_2; + ZXIC_UINT32 q_token_state_1; + ZXIC_UINT32 q_token_state_0; +} DPP_ETM_CRDT_Q_STATE_0_7_T; + +typedef struct dpp_etm_crdt_q_state_8_15_t { + ZXIC_UINT32 q_token_state_15; + ZXIC_UINT32 q_token_state_14; + ZXIC_UINT32 q_token_state_13; + ZXIC_UINT32 q_token_state_12; + ZXIC_UINT32 q_token_state_11; + ZXIC_UINT32 q_token_state_10; + ZXIC_UINT32 q_token_state_9; + ZXIC_UINT32 q_token_state_8; +} DPP_ETM_CRDT_Q_STATE_8_15_T; + +typedef struct dpp_etm_qmu_csw_csch_rd_cmd_cnt_t { + ZXIC_UINT32 csw_csch_rd_cmd_cnt; +} DPP_ETM_QMU_CSW_CSCH_RD_CMD_CNT_T; + +typedef struct dpp_etm_qmu_csw_csch_rd_sop_cnt_t { + ZXIC_UINT32 csw_csch_rd_sop_cnt; +} DPP_ETM_QMU_CSW_CSCH_RD_SOP_CNT_T; + +typedef struct dpp_etm_qmu_csw_csch_rd_eop_cnt_t { + ZXIC_UINT32 csw_csch_rd_eop_cnt; +} DPP_ETM_QMU_CSW_CSCH_RD_EOP_CNT_T; + +typedef struct dpp_etm_qmu_csw_csch_rd_drop_cnt_t { + ZXIC_UINT32 csw_csch_rd_drop_cnt; +} DPP_ETM_QMU_CSW_CSCH_RD_DROP_CNT_T; + +typedef struct dpp_etm_qmu_csch_mmu_rd_cmd_cnt_t { + ZXIC_UINT32 csch_mmu_rd_cmd_cnt; +} DPP_ETM_QMU_CSCH_MMU_RD_CMD_CNT_T; + +typedef struct dpp_etm_qmu_csch_mmu_rd_sop_cnt_t { + ZXIC_UINT32 csch_mmu_rd_sop_cnt; +} DPP_ETM_QMU_CSCH_MMU_RD_SOP_CNT_T; + +typedef struct dpp_etm_qmu_csch_mmu_rd_eop_cnt_t { + ZXIC_UINT32 csch_mmu_rd_eop_cnt; +} DPP_ETM_QMU_CSCH_MMU_RD_EOP_CNT_T; + +typedef struct dpp_etm_qmu_csch_mmu_rd_drop_cnt_t { + ZXIC_UINT32 csch_mmu_rd_drop_cnt; +} DPP_ETM_QMU_CSCH_MMU_RD_DROP_CNT_T; + +typedef struct dpp_etm_qmu_qcfg_qsch_crs_filter_t { + ZXIC_UINT32 qcfg_qsch_crs_filter; +} DPP_ETM_QMU_QCFG_QSCH_CRS_FILTER_T; + +typedef struct dpp_etm_qmu_qcfg_qsch_crs_force_en_t { + ZXIC_UINT32 qcfg_qsch_crs_force_en; +} DPP_ETM_QMU_QCFG_QSCH_CRS_FORCE_EN_T; + +typedef struct dpp_etm_qmu_qcfg_qsch_crs_force_qnum_t { + ZXIC_UINT32 qcfg_qsch_crs_force_qnum; +} DPP_ETM_QMU_QCFG_QSCH_CRS_FORCE_QNUM_T; + +typedef struct dpp_etm_qmu_qcfg_qsch_crs_force_crs_t { + ZXIC_UINT32 qcfg_qsch_crs_force_crs; +} DPP_ETM_QMU_QCFG_QSCH_CRS_FORCE_CRS_T; + +typedef struct dpp_etm_qmu_cfgmt_oshp_sgmii_shap_mode_t { + ZXIC_UINT32 cfgmt_oshp_sgmii_shap_mode; +} DPP_ETM_QMU_CFGMT_OSHP_SGMII_SHAP_MODE_T; + +typedef struct dpp_etm_qmu_cfgmt_qmu_sashap_en_t { + ZXIC_UINT32 cfgmt_qmu_sashap_en; +} DPP_ETM_QMU_CFGMT_QMU_SASHAP_EN_T; + +typedef struct dpp_etm_qmu_cfgmt_sashap_token_max_t { + ZXIC_UINT32 cfgmt_sashap_token_max; +} DPP_ETM_QMU_CFGMT_SASHAP_TOKEN_MAX_T; + +typedef struct dpp_etm_qmu_cfgmt_sashap_token_min_t { + ZXIC_UINT32 cfgmt_sashap_token_min; +} DPP_ETM_QMU_CFGMT_SASHAP_TOKEN_MIN_T; + +typedef struct dpp_etm_qmu_cfg_qsch_q3lbaddrate_t { + ZXIC_UINT32 cfg_qsch_q3lbaddrate; +} DPP_ETM_QMU_CFG_QSCH_Q3LBADDRATE_T; + +typedef struct dpp_etm_qmu_cfg_qsch_q012lbaddrate_t { + ZXIC_UINT32 cfg_qsch_q012lbaddrate; +} DPP_ETM_QMU_CFG_QSCH_Q012LBADDRATE_T; + +typedef struct dpp_etm_qmu_cfg_qsch_q3creditlbmaxcnt_t { + ZXIC_UINT32 cfg_qsch_q3creditlbmaxcnt; +} DPP_ETM_QMU_CFG_QSCH_Q3CREDITLBMAXCNT_T; + +typedef struct dpp_etm_qmu_cfg_qsch_q012creditlbmaxcnt_t { + ZXIC_UINT32 cfg_qsch_q012creditlbmaxcnt; +} DPP_ETM_QMU_CFG_QSCH_Q012CREDITLBMAXCNT_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mul_token_gen_num_t { + ZXIC_UINT32 cfg_qsch_mul_token_gen_num; +} DPP_ETM_QMU_CFG_QSCH_MUL_TOKEN_GEN_NUM_T; + +typedef struct dpp_etm_qmu_cfg_qsch_q3_credit_lb_control_en_t { + ZXIC_UINT32 cfg_qsch_q3_credit_lb_control_en; +} DPP_ETM_QMU_CFG_QSCH_Q3_CREDIT_LB_CONTROL_EN_T; + +typedef struct dpp_etm_qmu_cfg_qsch_q012_credit_lb_control_en_t { + ZXIC_UINT32 cfg_qsch_q012_credit_lb_control_en; +} DPP_ETM_QMU_CFG_QSCH_Q012_CREDIT_LB_CONTROL_EN_T; + +typedef struct dpp_etm_qmu_cfg_qsch_sp_dwrr_en_t { + ZXIC_UINT32 cfg_qsch_sp_dwrr_en; +} DPP_ETM_QMU_CFG_QSCH_SP_DWRR_EN_T; + +typedef struct dpp_etm_qmu_cfg_qsch_q01_attach_en_t { + ZXIC_UINT32 cfg_qsch_q01_attach_en; +} DPP_ETM_QMU_CFG_QSCH_Q01_ATTACH_EN_T; + +typedef struct dpp_etm_qmu_cfg_qsch_w0_t { + ZXIC_UINT32 cfg_qsch_w0; +} DPP_ETM_QMU_CFG_QSCH_W0_T; + +typedef struct dpp_etm_qmu_cfg_qsch_w1_t { + ZXIC_UINT32 cfg_qsch_w1; +} DPP_ETM_QMU_CFG_QSCH_W1_T; + +typedef struct dpp_etm_qmu_cfg_qsch_w2_t { + ZXIC_UINT32 cfg_qsch_w2; +} DPP_ETM_QMU_CFG_QSCH_W2_T; + +typedef struct dpp_etm_qmu_cfg_qsch_lkybktmaxcnt1_t { + ZXIC_UINT32 cfg_qsch_lkybktmaxcnt1; +} DPP_ETM_QMU_CFG_QSCH_LKYBKTMAXCNT1_T; + +typedef struct dpp_etm_qmu_cfg_qsch_lkybktmaxcnt2_t { + ZXIC_UINT32 cfg_qsch_lkybktmaxcnt2; +} DPP_ETM_QMU_CFG_QSCH_LKYBKTMAXCNT2_T; + +typedef struct dpp_etm_qmu_cfg_qsch_lkybktdcrrate1_t { + ZXIC_UINT32 cfg_qsch_lkybktdcrrate1; +} DPP_ETM_QMU_CFG_QSCH_LKYBKTDCRRATE1_T; + +typedef struct dpp_etm_qmu_cfg_qsch_lkybktdcrrate2_t { + ZXIC_UINT32 cfg_qsch_lkybktdcrrate2; +} DPP_ETM_QMU_CFG_QSCH_LKYBKTDCRRATE2_T; + +typedef struct dpp_etm_qmu_cfg_qsch_lkybktdcrrate3_t { + ZXIC_UINT32 cfg_qsch_lkybktdcrrate3; +} DPP_ETM_QMU_CFG_QSCH_LKYBKTDCRRATE3_T; + +typedef struct dpp_etm_qmu_cfg_qsch_lkybktmaxcnt3_t { + ZXIC_UINT32 cfg_qsch_lkybktmaxcnt3; +} DPP_ETM_QMU_CFG_QSCH_LKYBKTMAXCNT3_T; + +typedef struct dpp_etm_qmu_cfg_qsch_qmu_mul_auto_sa_version_t { + ZXIC_UINT32 cfg_qsch_qmu_mul_auto_sa_version; +} DPP_ETM_QMU_CFG_QSCH_QMU_MUL_AUTO_SA_VERSION_T; + +typedef struct dpp_etm_qmu_cfg_qsch_sa_credit_value_0_t { + ZXIC_UINT32 cfg_qsch_sa_credit_value_0; +} DPP_ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_0_T; + +typedef struct dpp_etm_qmu_cfg_qsch_sa_credit_value_1_t { + ZXIC_UINT32 cfg_qsch_sa_credit_value_1; +} DPP_ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_1_T; + +typedef struct dpp_etm_qmu_cfg_qsch_sa_credit_value_2_t { + ZXIC_UINT32 cfg_qsch_sa_credit_value_2; +} DPP_ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_2_T; + +typedef struct dpp_etm_qmu_cfg_qsch_sa_credit_value_3_t { + ZXIC_UINT32 cfg_qsch_sa_credit_value_3; +} DPP_ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_3_T; + +typedef struct dpp_etm_qmu_cfg_qsch_sa_credit_value_4_t { + ZXIC_UINT32 cfg_qsch_sa_credit_value_4; +} DPP_ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_4_T; + +typedef struct dpp_etm_qmu_cfg_qsch_sa_credit_value_5_t { + ZXIC_UINT32 cfg_qsch_sa_credit_value_5; +} DPP_ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_5_T; + +typedef struct dpp_etm_qmu_cfg_qsch_sa_credit_value_6_t { + ZXIC_UINT32 cfg_qsch_sa_credit_value_6; +} DPP_ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_6_T; + +typedef struct dpp_etm_qmu_cfg_qsch_sa_credit_value_7_t { + ZXIC_UINT32 cfg_qsch_sa_credit_value_7; +} DPP_ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_7_T; + +typedef struct dpp_etm_qmu_cfg_qsch_remote_credit_fifo_almost_full_th_t { + ZXIC_UINT32 cfg_qsch_remote_credit_fifo_almost_full_th; +} DPP_ETM_QMU_CFG_QSCH_REMOTE_CREDIT_FIFO_ALMOST_FULL_TH_T; + +typedef struct dpp_etm_qmu_cfg_qsch_auto_credit_fifo_almost_full_th_t { + ZXIC_UINT32 cfg_qsch_auto_credit_fifo_almost_full_th; +} DPP_ETM_QMU_CFG_QSCH_AUTO_CREDIT_FIFO_ALMOST_FULL_TH_T; + +typedef struct dpp_etm_qmu_cfg_qsch_q3_credit_fifo_almost_full_th_t { + ZXIC_UINT32 cfg_qsch_q3_credit_fifo_almost_full_th; +} DPP_ETM_QMU_CFG_QSCH_Q3_CREDIT_FIFO_ALMOST_FULL_TH_T; + +typedef struct dpp_etm_qmu_cfg_qsch_q012_credit_fifo_almost_full_th_t { + ZXIC_UINT32 cfg_qsch_q012_credit_fifo_almost_full_th; +} DPP_ETM_QMU_CFG_QSCH_Q012_CREDIT_FIFO_ALMOST_FULL_TH_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mul_fc_res_en_t { + ZXIC_UINT32 cfg_qsch_mul_fc_res_en; +} DPP_ETM_QMU_CFG_QSCH_MUL_FC_RES_EN_T; + +typedef struct dpp_etm_qmu_cfgmt_mul_ovf_udf_flg_query_t { + ZXIC_UINT32 qsch_cfg_remote_credit_fifo_full; + ZXIC_UINT32 qsch_cfg_remote_credit_fifo_empty; + ZXIC_UINT32 qsch_cfg_remote_credit_fifo_overflow; + ZXIC_UINT32 qsch_cfg_remote_credit_fifo_underflow; + ZXIC_UINT32 qsch_cfg_auto_credit_fifo_full; + ZXIC_UINT32 qsch_cfg_auto_credit_fifo_empty; + ZXIC_UINT32 qsch_cfg_auto_credit_fifo_overflow; + ZXIC_UINT32 qsch_cfg_auto_credit_fifo_underflow; + ZXIC_UINT32 qsch_cfg_q3_credit_fifo_full; + ZXIC_UINT32 qsch_cfg_q3_credit_fifo_empty; + ZXIC_UINT32 qsch_cfg_q3_credit_fifo_overflow; + ZXIC_UINT32 qsch_cfg_q3_credit_fifo_underflow; + ZXIC_UINT32 qsch_cfg_q012_credit_fifo_full; + ZXIC_UINT32 qsch_cfg_q012_credit_fifo_empty; + ZXIC_UINT32 qsch_cfg_q012_credit_fifo_overflow; + ZXIC_UINT32 qsch_cfg_q012_credit_fifo_underflow; + ZXIC_UINT32 qsch_cfg_lkybktoverflow1; + ZXIC_UINT32 qsch_cfg_lkybktoverflow2; + ZXIC_UINT32 qsch_cfg_lkybktoverflow3; +} DPP_ETM_QMU_CFGMT_MUL_OVF_UDF_FLG_QUERY_T; + +typedef struct dpp_etm_qmu_cfgmt_mul_cng_flg_query_t { + ZXIC_UINT32 qsch_cfg_q3cngflag; + ZXIC_UINT32 qsch_cfg_q012cngflag; + ZXIC_UINT32 qsch_cfg_cngflag1; + ZXIC_UINT32 qsch_cfg_cngflag2; + ZXIC_UINT32 qsch_cfg_cngflag3; +} DPP_ETM_QMU_CFGMT_MUL_CNG_FLG_QUERY_T; + +typedef struct dpp_etm_qmu_qsch_cfg_lkybktval1_t { + ZXIC_UINT32 qsch_cfg_lkybktval1; +} DPP_ETM_QMU_QSCH_CFG_LKYBKTVAL1_T; + +typedef struct dpp_etm_qmu_qsch_cfg_lkybktval2_t { + ZXIC_UINT32 qsch_cfg_lkybktval2; +} DPP_ETM_QMU_QSCH_CFG_LKYBKTVAL2_T; + +typedef struct dpp_etm_qmu_qsch_cfg_lkybktval3_t { + ZXIC_UINT32 qsch_cfg_lkybktval3; +} DPP_ETM_QMU_QSCH_CFG_LKYBKTVAL3_T; + +typedef struct dpp_etm_qmu_qsch_cfg_q3lbval_t { + ZXIC_UINT32 qsch_cfg_q3lbval; +} DPP_ETM_QMU_QSCH_CFG_Q3LBVAL_T; + +typedef struct dpp_etm_qmu_qsch_cfg_q012lbval_t { + ZXIC_UINT32 qsch_cfg_q012lbval; +} DPP_ETM_QMU_QSCH_CFG_Q012LBVAL_T; + +typedef struct dpp_etm_qmu_qlist_cfgmt_ram_ecc_err2_t { + ZXIC_UINT32 qlist_imem_pd_ram_single_ecc_err; + ZXIC_UINT32 qlist_imem_pd_ram_double_ecc_err; + ZXIC_UINT32 qlist_imem_up_ptr_ram_single_ecc_err; + ZXIC_UINT32 qlist_imem_up_ptr_ram_double_ecc_err; + ZXIC_UINT32 qlist_imem_down_ptr_ram_single_ecc_err; + ZXIC_UINT32 qlist_imem_down_ptr_ram_double_ecc_err; + ZXIC_UINT32 cmdsw_sop_fifo_single_ecc_err; + ZXIC_UINT32 cmdsw_sop_fifo_double_ecc_err; + ZXIC_UINT32 cmdsw_nsop_fifo_single_ecc_err; + ZXIC_UINT32 cmdsw_nsop_fifo_double_ecc_err; + ZXIC_UINT32 cmdsw_mmudat_fifo_single_ecc_err; + ZXIC_UINT32 cmdsw_mmudat_fifo_double_ecc_err; + ZXIC_UINT32 qlist_rd_release_fwft_single_ecc_err; + ZXIC_UINT32 qlist_rd_release_fwft_double_ecc_err; + ZXIC_UINT32 qlist_drop_imem_fwft_single_ecc_err; + ZXIC_UINT32 qlist_drop_imem_fwft_double_ecc_err; +} DPP_ETM_QMU_QLIST_CFGMT_RAM_ECC_ERR2_T; + +typedef struct dpp_etm_qmu_csch_aged_cmd_cnt_t { + ZXIC_UINT32 csch_aged_cmd_cnt; +} DPP_ETM_QMU_CSCH_AGED_CMD_CNT_T; + +typedef struct dpp_etm_qmu_csch_qcfg_csch_congest_cnt_t { + ZXIC_UINT32 csch_qcfg_csch_congest_cnt; +} DPP_ETM_QMU_CSCH_QCFG_CSCH_CONGEST_CNT_T; + +typedef struct dpp_etm_qmu_csch_qcfg_qlist_csch_sop_cnt_t { + ZXIC_UINT32 csch_qcfg_qlist_csch_sop_cnt; +} DPP_ETM_QMU_CSCH_QCFG_QLIST_CSCH_SOP_CNT_T; + +typedef struct dpp_etm_qmu_csch_qcfg_qlist_csch_eop_cnt_t { + ZXIC_UINT32 csch_qcfg_qlist_csch_eop_cnt; +} DPP_ETM_QMU_CSCH_QCFG_QLIST_CSCH_EOP_CNT_T; + +typedef struct dpp_etm_qmu_csch_qcfg_csch_csw_sop_cnt_t { + ZXIC_UINT32 csch_qcfg_csch_csw_sop_cnt; +} DPP_ETM_QMU_CSCH_QCFG_CSCH_CSW_SOP_CNT_T; + +typedef struct dpp_etm_qmu_csch_qcfg_csch_csw_eop_cnt_t { + ZXIC_UINT32 csch_qcfg_csch_csw_eop_cnt; +} DPP_ETM_QMU_CSCH_QCFG_CSCH_CSW_EOP_CNT_T; + +typedef struct dpp_etm_qmu_csch_qcfg_qlist_csch_drop_cnt_t { + ZXIC_UINT32 csch_qcfg_qlist_csch_drop_cnt; +} DPP_ETM_QMU_CSCH_QCFG_QLIST_CSCH_DROP_CNT_T; + +typedef struct dpp_etm_qmu_csch_qcfg_csch_csw_drop_cnt_t { + ZXIC_UINT32 csch_qcfg_csch_csw_drop_cnt; +} DPP_ETM_QMU_CSCH_QCFG_CSCH_CSW_DROP_CNT_T; + +typedef struct dpp_etm_qmu_csw_mmu_sop_cmd_cnt_t { + ZXIC_UINT32 csw_mmu_sop_cmd_cnt; +} DPP_ETM_QMU_CSW_MMU_SOP_CMD_CNT_T; + +typedef struct dpp_etm_qmu_mmu_csw_sop_data_cnt_t { + ZXIC_UINT32 mmu_csw_sop_data_cnt; +} DPP_ETM_QMU_MMU_CSW_SOP_DATA_CNT_T; + +typedef struct dpp_etm_qmu_csw_qsch_feedb_cnt_t { + ZXIC_UINT32 csw_qsch_feedb_cnt; +} DPP_ETM_QMU_CSW_QSCH_FEEDB_CNT_T; + +typedef struct dpp_etm_qmu_qmu_crdt_port_fc_cnt_t { + ZXIC_UINT32 qmu_crdt_port_fc_cnt; +} DPP_ETM_QMU_QMU_CRDT_PORT_FC_CNT_T; + +typedef struct dpp_etm_qmu_csch_r_block_cnt_t { + ZXIC_UINT32 csch_r_block_cnt; +} DPP_ETM_QMU_CSCH_R_BLOCK_CNT_T; + +typedef struct dpp_etm_qmu_qcfg_qlist_qds_head_rd_t { + ZXIC_UINT32 qcfg_qlist_qds_head_rd; +} DPP_ETM_QMU_QCFG_QLIST_QDS_HEAD_RD_T; + +typedef struct dpp_etm_qmu_qcfg_qlist_qds_tail_rd_t { + ZXIC_UINT32 qcfg_qlist_qds_tail_rd; +} DPP_ETM_QMU_QCFG_QLIST_QDS_TAIL_RD_T; + +typedef struct dpp_etm_qmu_qcfg_qlist_ept_rd_t { + ZXIC_UINT32 qcfg_qlist_ept_rd; +} DPP_ETM_QMU_QCFG_QLIST_EPT_RD_T; + +typedef struct dpp_etm_qmu_qcfg_qlist_age_flag_rd_t { + ZXIC_UINT32 qcfg_qlist_age_flag_rd; +} DPP_ETM_QMU_QCFG_QLIST_AGE_FLAG_RD_T; + +typedef struct dpp_etm_qmu_qcfg_qlist_cti_rd_t { + ZXIC_UINT32 qcfg_qlist_cti_rd; +} DPP_ETM_QMU_QCFG_QLIST_CTI_RD_T; + +typedef struct dpp_etm_qmu_qcfg_qlist_cto_rd_t { + ZXIC_UINT32 qcfg_qlist_cto_rd; +} DPP_ETM_QMU_QCFG_QLIST_CTO_RD_T; + +typedef struct dpp_etm_qmu_qcfg_qlist_chk_rd_t { + ZXIC_UINT32 qcfg_qlist_chk_rd; +} DPP_ETM_QMU_QCFG_QLIST_CHK_RD_T; + +typedef struct dpp_etm_qmu_qcfg_qlist_nod_rd_t { + ZXIC_UINT32 qcfg_qlist_nod_rd; +} DPP_ETM_QMU_QCFG_QLIST_NOD_RD_T; + +typedef struct dpp_etm_qmu_qcfg_qlist_biu_rd_t { + ZXIC_UINT32 qcfg_qlist_biu_rd; +} DPP_ETM_QMU_QCFG_QLIST_BIU_RD_T; + +typedef struct dpp_etm_qmu_qsch_r_wlist_flag_t { + ZXIC_UINT32 qsch_r_wlist_flag; +} DPP_ETM_QMU_QSCH_R_WLIST_FLAG_T; + +typedef struct dpp_etm_qmu_qcfg_crs_flg_rd_t { + ZXIC_UINT32 qcfg_crs_flg_rd; +} DPP_ETM_QMU_QCFG_CRS_FLG_RD_T; + +typedef struct dpp_etm_qmu_cfgmt_qmu_imem_age_qds_t { + ZXIC_UINT32 cfgmt_qmu_imem_tp; + ZXIC_UINT32 cfgmt_qmu_imem_hp; +} DPP_ETM_QMU_CFGMT_QMU_IMEM_AGE_QDS_T; + +typedef struct dpp_etm_qmu_cfgmt_qmu_imem_age_qlen_t { + ZXIC_UINT32 cfgmt_qmu_imem_no_empty; + ZXIC_UINT32 cfgmt_qmu_imem_qlen; +} DPP_ETM_QMU_CFGMT_QMU_IMEM_AGE_QLEN_T; + +typedef struct dpp_etm_qmu_cfgmt_qmu_imem_pd_ram_low_t { + ZXIC_UINT32 cfgmt_qmu_imem_pd_ram_low; +} DPP_ETM_QMU_CFGMT_QMU_IMEM_PD_RAM_LOW_T; + +typedef struct dpp_etm_qmu_cfgmt_qmu_imem_pd_ram_high_t { + ZXIC_UINT32 cfgmt_qmu_imem_pd_ram_high; +} DPP_ETM_QMU_CFGMT_QMU_IMEM_PD_RAM_HIGH_T; + +typedef struct dpp_etm_qmu_cfgmt_qmu_imem_up_ptr_t { + ZXIC_UINT32 cfgmt_qmu_imem_up_ptr; +} DPP_ETM_QMU_CFGMT_QMU_IMEM_UP_PTR_T; + +typedef struct dpp_etm_qmu_cfgmt_qmu_imem_down_ptr_t { + ZXIC_UINT32 cfgmt_qmu_imem_down_ptr; +} DPP_ETM_QMU_CFGMT_QMU_IMEM_DOWN_PTR_T; + +typedef struct dpp_etm_qmu_cfgmt_qmu_imem_age_flag_t { + ZXIC_UINT32 cfgmt_qmu_imem_age_flag; +} DPP_ETM_QMU_CFGMT_QMU_IMEM_AGE_FLAG_T; + +typedef struct dpp_etm_qmu_cfg_qsch_lkybkt2cngth_t { + ZXIC_UINT32 cfg_qsch_lkybkt2cngth; +} DPP_ETM_QMU_CFG_QSCH_LKYBKT2CNGTH_T; + +typedef struct dpp_etm_qmu_cfg_qsch_lkybkt1cngth_t { + ZXIC_UINT32 cfg_qsch_lkybkt1cngth; +} DPP_ETM_QMU_CFG_QSCH_LKYBKT1CNGTH_T; + +typedef struct dpp_etm_qmu_cfg_qsch_lkybkt3cngth_t { + ZXIC_UINT32 cfg_qsch_lkybkt3cngth; +} DPP_ETM_QMU_CFG_QSCH_LKYBKT3CNGTH_T; + +typedef struct dpp_etm_qmu_cfg_qsch_rm_mul_mcn1_credit_value_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn1_credit_value; +} DPP_ETM_QMU_CFG_QSCH_RM_MUL_MCN1_CREDIT_VALUE_T; + +typedef struct dpp_etm_qmu_cfg_qsch_rm_mul_mcn2_credit_value_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn2_credit_value; +} DPP_ETM_QMU_CFG_QSCH_RM_MUL_MCN2_CREDIT_VALUE_T; + +typedef struct dpp_etm_qmu_cfg_qsch_rm_mul_mcn3_credit_value_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn3_credit_value; +} DPP_ETM_QMU_CFG_QSCH_RM_MUL_MCN3_CREDIT_VALUE_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn1_rand_ansr_seed_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn1_rand_mchsm_en; + ZXIC_UINT32 cfg_qsch_rm_mul_mcn1_rand_ansr_seed; +} DPP_ETM_QMU_RM_MUL_MCN1_RAND_ANSR_SEED_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn2_rand_ansr_seed_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn2_rand_mchsm_en; + ZXIC_UINT32 cfg_qsch_rm_mul_mcn2_rand_ansr_seed; +} DPP_ETM_QMU_RM_MUL_MCN2_RAND_ANSR_SEED_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn3_rand_ansr_seed_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn3_rand_mchsm_en; + ZXIC_UINT32 cfg_qsch_rm_mul_mcn3_rand_ansr_seed; +} DPP_ETM_QMU_RM_MUL_MCN3_RAND_ANSR_SEED_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn1_rand_ansr_th_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn1_rand_ansr_th; +} DPP_ETM_QMU_RM_MUL_MCN1_RAND_ANSR_TH_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn2_rand_ansr_th_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn2_rand_ansr_th; +} DPP_ETM_QMU_RM_MUL_MCN2_RAND_ANSR_TH_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn3_rand_ansr_th_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn3_rand_ansr_th; +} DPP_ETM_QMU_RM_MUL_MCN3_RAND_ANSR_TH_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn1_rand_hold_base_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn1_rand_mchsm_en; + ZXIC_UINT32 cfg_qsch_rm_mul_mcn1_rand_hold_base; +} DPP_ETM_QMU_RM_MUL_MCN1_RAND_HOLD_BASE_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn2_rand_hold_base_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn2_rand_mchsm_en; + ZXIC_UINT32 cfg_qsch_rm_mul_mcn2_rand_hold_base; +} DPP_ETM_QMU_RM_MUL_MCN2_RAND_HOLD_BASE_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn3_rand_hold_base_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn3_rand_mchsm_en; + ZXIC_UINT32 cfg_qsch_rm_mul_mcn3_rand_hold_base; +} DPP_ETM_QMU_RM_MUL_MCN3_RAND_HOLD_BASE_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn1_rand_sel_mask_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn1_rand_sel_mask; +} DPP_ETM_QMU_RM_MUL_MCN1_RAND_SEL_MASK_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn2_rand_sel_mask_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn2_rand_sel_mask; +} DPP_ETM_QMU_RM_MUL_MCN2_RAND_SEL_MASK_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn3_rand_sel_mask_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn3_rand_sel_mask; +} DPP_ETM_QMU_RM_MUL_MCN3_RAND_SEL_MASK_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn1_rand_sel_seed_reg0_t { + ZXIC_UINT32 rm_mul_mcn1_rand_sel_seed7; + ZXIC_UINT32 rm_mul_mcn1_rand_sel_seed6; + ZXIC_UINT32 rm_mul_mcn1_rand_sel_seed5; + ZXIC_UINT32 rm_mul_mcn1_rand_sel_seed4; + ZXIC_UINT32 rm_mul_mcn1_rand_sel_seed3; + ZXIC_UINT32 rm_mul_mcn1_rand_sel_seed2; + ZXIC_UINT32 rm_mul_mcn1_rand_sel_seed1; + ZXIC_UINT32 rm_mul_mcn1_rand_sel_seed0; +} DPP_ETM_QMU_RM_MUL_MCN1_RAND_SEL_SEED_REG0_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn1_rand_sel_seed_reg1_t { + ZXIC_UINT32 rm_mul_mcn1_rand_sel_seed8; +} DPP_ETM_QMU_RM_MUL_MCN1_RAND_SEL_SEED_REG1_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn2_rand_sel_seed_reg0_t { + ZXIC_UINT32 rm_mul_mcn2_rand_sel_seed7; + ZXIC_UINT32 rm_mul_mcn2_rand_sel_seed6; + ZXIC_UINT32 rm_mul_mcn2_rand_sel_seed5; + ZXIC_UINT32 rm_mul_mcn2_rand_sel_seed4; + ZXIC_UINT32 rm_mul_mcn2_rand_sel_seed3; + ZXIC_UINT32 rm_mul_mcn2_rand_sel_seed2; + ZXIC_UINT32 rm_mul_mcn2_rand_sel_seed1; + ZXIC_UINT32 rm_mul_mcn2_rand_sel_seed0; +} DPP_ETM_QMU_RM_MUL_MCN2_RAND_SEL_SEED_REG0_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn2_rand_sel_seed_reg1_t { + ZXIC_UINT32 rm_mul_mcn2_rand_sel_seed8; +} DPP_ETM_QMU_RM_MUL_MCN2_RAND_SEL_SEED_REG1_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn3_rand_sel_seed_reg0_t { + ZXIC_UINT32 rm_mul_mcn3_rand_sel_seed7; + ZXIC_UINT32 rm_mul_mcn3_rand_sel_seed6; + ZXIC_UINT32 rm_mul_mcn3_rand_sel_seed5; + ZXIC_UINT32 rm_mul_mcn3_rand_sel_seed4; + ZXIC_UINT32 rm_mul_mcn3_rand_sel_seed3; + ZXIC_UINT32 rm_mul_mcn3_rand_sel_seed2; + ZXIC_UINT32 rm_mul_mcn3_rand_sel_seed1; + ZXIC_UINT32 rm_mul_mcn3_rand_sel_seed0; +} DPP_ETM_QMU_RM_MUL_MCN3_RAND_SEL_SEED_REG0_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn3_rand_sel_seed_reg1_t { + ZXIC_UINT32 rm_mul_mcn3_rand_sel_seed8; +} DPP_ETM_QMU_RM_MUL_MCN3_RAND_SEL_SEED_REG1_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn1_step_wait_th1_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn1_step_wait_th1; +} DPP_ETM_QMU_RM_MUL_MCN1_STEP_WAIT_TH1_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn1_step_wait_th2_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn1_step_wait_th2; +} DPP_ETM_QMU_RM_MUL_MCN1_STEP_WAIT_TH2_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn1_step_wait_th3_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn1_step_wait_th3; +} DPP_ETM_QMU_RM_MUL_MCN1_STEP_WAIT_TH3_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn1_step_wait_th4_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn1_step_wait_th4; +} DPP_ETM_QMU_RM_MUL_MCN1_STEP_WAIT_TH4_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn1_step_wait_th5_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn1_step_wait_th5; +} DPP_ETM_QMU_RM_MUL_MCN1_STEP_WAIT_TH5_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn1_step_wait_th6_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn1_step_wait_th6; +} DPP_ETM_QMU_RM_MUL_MCN1_STEP_WAIT_TH6_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn1_step_wait_th7_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn1_step_wait_th7; +} DPP_ETM_QMU_RM_MUL_MCN1_STEP_WAIT_TH7_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn2_step_wait_th1_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn2_step_wait_th1; +} DPP_ETM_QMU_RM_MUL_MCN2_STEP_WAIT_TH1_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn2_step_wait_th2_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn2_step_wait_th2; +} DPP_ETM_QMU_RM_MUL_MCN2_STEP_WAIT_TH2_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn2_step_wait_th3_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn2_step_wait_th3; +} DPP_ETM_QMU_RM_MUL_MCN2_STEP_WAIT_TH3_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn2_step_wait_th4_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn2_step_wait_th4; +} DPP_ETM_QMU_RM_MUL_MCN2_STEP_WAIT_TH4_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn2_step_wait_th5_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn2_step_wait_th5; +} DPP_ETM_QMU_RM_MUL_MCN2_STEP_WAIT_TH5_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn2_step_wait_th6_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn2_step_wait_th6; +} DPP_ETM_QMU_RM_MUL_MCN2_STEP_WAIT_TH6_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn2_step_wait_th7_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn2_step_wait_th7; +} DPP_ETM_QMU_RM_MUL_MCN2_STEP_WAIT_TH7_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn3_step_wait_th1_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn3_step_wait_th1; +} DPP_ETM_QMU_RM_MUL_MCN3_STEP_WAIT_TH1_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn3_step_wait_th2_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn3_step_wait_th2; +} DPP_ETM_QMU_RM_MUL_MCN3_STEP_WAIT_TH2_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn3_step_wait_th3_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn3_step_wait_th3; +} DPP_ETM_QMU_RM_MUL_MCN3_STEP_WAIT_TH3_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn3_step_wait_th4_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn3_step_wait_th4; +} DPP_ETM_QMU_RM_MUL_MCN3_STEP_WAIT_TH4_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn3_step_wait_th5_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn3_step_wait_th5; +} DPP_ETM_QMU_RM_MUL_MCN3_STEP_WAIT_TH5_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn3_step_wait_th6_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn3_step_wait_th6; +} DPP_ETM_QMU_RM_MUL_MCN3_STEP_WAIT_TH6_T; + +typedef struct dpp_etm_qmu_rm_mul_mcn3step_wait_th7_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn3_step_wait_th7; +} DPP_ETM_QMU_RM_MUL_MCN3STEP_WAIT_TH7_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate0_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate0; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE0_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate1_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate1; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE1_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate2_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate2; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE2_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate3_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate3; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE3_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate4_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate4; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE4_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate5_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate5; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE5_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate6_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate6; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE6_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate7_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate7; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE7_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate8_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate8; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE8_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate9_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate9; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE9_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate10_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate10; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE10_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate11_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate11; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE11_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate12_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate12; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE12_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate13_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate13; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE13_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate14_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate14; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE14_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate15_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate15; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE15_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate16_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate16; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE16_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate17_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate17; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE17_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate18_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate18; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE18_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate19_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate19; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE19_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate20_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate20; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE20_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate21_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate21; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE21_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate22_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate22; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE22_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate23_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate23; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE23_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate24_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate24; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE24_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate25_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate25; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE25_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate26_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate26; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE26_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate27_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate27; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE27_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate28_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate28; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE28_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate29_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate29; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE29_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate30_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate30; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE30_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate31_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate31; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE31_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate32_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate32; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE32_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate33_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate33; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE33_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate34_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate34; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE34_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate35_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate35; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE35_T; + +typedef struct dpp_etm_qmu_cfg_qsch_mulcrdcntrate36_t { + ZXIC_UINT32 cfg_qsch_mulcrdcntrate36; +} DPP_ETM_QMU_CFG_QSCH_MULCRDCNTRATE36_T; + +typedef struct dpp_etm_qmu_cfg_qsch_rm_mul_mcn1_rand_hold_shift_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn1_rand_hold_shift; +} DPP_ETM_QMU_CFG_QSCH_RM_MUL_MCN1_RAND_HOLD_SHIFT_T; + +typedef struct dpp_etm_qmu_cfg_qsch_rm_mul_mcn2_rand_hold_shift_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn2_rand_hold_shift; +} DPP_ETM_QMU_CFG_QSCH_RM_MUL_MCN2_RAND_HOLD_SHIFT_T; + +typedef struct dpp_etm_qmu_cfg_qsch_rm_mul_mcn3_rand_hold_shift_t { + ZXIC_UINT32 cfg_qsch_rm_mul_mcn3_rand_hold_shift; +} DPP_ETM_QMU_CFG_QSCH_RM_MUL_MCN3_RAND_HOLD_SHIFT_T; + +typedef struct dpp_etm_qmu_last_drop_qnum_get_t { + ZXIC_UINT32 cgavd_qmu_drop_tap; + ZXIC_UINT32 last_drop_qnum; +} DPP_ETM_QMU_LAST_DROP_QNUM_GET_T; + +typedef struct dpp_etm_qmu_crdt_qmu_credit_cnt_t { + ZXIC_UINT32 crdt_qmu_credit_cnt; +} DPP_ETM_QMU_CRDT_QMU_CREDIT_CNT_T; + +typedef struct dpp_etm_qmu_qmu_to_qsch_report_cnt_t { + ZXIC_UINT32 qmu_to_qsch_report_cnt; +} DPP_ETM_QMU_QMU_TO_QSCH_REPORT_CNT_T; + +typedef struct dpp_etm_qmu_qmu_to_cgavd_report_cnt_t { + ZXIC_UINT32 qmu_to_cgavd_report_cnt; +} DPP_ETM_QMU_QMU_TO_CGAVD_REPORT_CNT_T; + +typedef struct dpp_etm_qmu_qmu_crdt_crs_normal_cnt_t { + ZXIC_UINT32 qmu_crdt_crs_normal_cnt; +} DPP_ETM_QMU_QMU_CRDT_CRS_NORMAL_CNT_T; + +typedef struct dpp_etm_qmu_qmu_crdt_crs_off_cnt_t { + ZXIC_UINT32 qmu_crdt_crs_off_cnt; +} DPP_ETM_QMU_QMU_CRDT_CRS_OFF_CNT_T; + +typedef struct dpp_etm_qmu_qsch_qlist_shedule_cnt_t { + ZXIC_UINT32 qsch_qlist_shedule_cnt; +} DPP_ETM_QMU_QSCH_QLIST_SHEDULE_CNT_T; + +typedef struct dpp_etm_qmu_qsch_qlist_sch_ept_cnt_t { + ZXIC_UINT32 qsch_qlist_sch_ept_cnt; +} DPP_ETM_QMU_QSCH_QLIST_SCH_EPT_CNT_T; + +typedef struct dpp_etm_qmu_qmu_to_mmu_blk_wr_cnt_t { + ZXIC_UINT32 qmu_to_mmu_blk_wr_cnt; +} DPP_ETM_QMU_QMU_TO_MMU_BLK_WR_CNT_T; + +typedef struct dpp_etm_qmu_qmu_to_csw_blk_rd_cnt_t { + ZXIC_UINT32 qmu_to_csw_blk_rd_cnt; +} DPP_ETM_QMU_QMU_TO_CSW_BLK_RD_CNT_T; + +typedef struct dpp_etm_qmu_qmu_to_mmu_sop_wr_cnt_t { + ZXIC_UINT32 qmu_to_mmu_sop_wr_cnt; +} DPP_ETM_QMU_QMU_TO_MMU_SOP_WR_CNT_T; + +typedef struct dpp_etm_qmu_qmu_to_mmu_eop_wr_cnt_t { + ZXIC_UINT32 qmu_to_mmu_eop_wr_cnt; +} DPP_ETM_QMU_QMU_TO_MMU_EOP_WR_CNT_T; + +typedef struct dpp_etm_qmu_qmu_to_mmu_drop_wr_cnt_t { + ZXIC_UINT32 qmu_to_mmu_drop_wr_cnt; +} DPP_ETM_QMU_QMU_TO_MMU_DROP_WR_CNT_T; + +typedef struct dpp_etm_qmu_qmu_to_csw_sop_rd_cnt_t { + ZXIC_UINT32 qmu_to_csw_sop_rd_cnt; +} DPP_ETM_QMU_QMU_TO_CSW_SOP_RD_CNT_T; + +typedef struct dpp_etm_qmu_qmu_to_csw_eop_rd_cnt_t { + ZXIC_UINT32 qmu_to_csw_eop_rd_cnt; +} DPP_ETM_QMU_QMU_TO_CSW_EOP_RD_CNT_T; + +typedef struct dpp_etm_qmu_qmu_to_csw_drop_rd_cnt_t { + ZXIC_UINT32 qmu_to_csw_drop_rd_cnt; +} DPP_ETM_QMU_QMU_TO_CSW_DROP_RD_CNT_T; + +typedef struct dpp_etm_qmu_mmu_to_qmu_wr_release_cnt_t { + ZXIC_UINT32 mmu_to_qmu_wr_release_cnt; +} DPP_ETM_QMU_MMU_TO_QMU_WR_RELEASE_CNT_T; + +typedef struct dpp_etm_qmu_mmu_to_qmu_rd_release_cnt_t { + ZXIC_UINT32 mmu_to_qmu_rd_release_cnt; +} DPP_ETM_QMU_MMU_TO_QMU_RD_RELEASE_CNT_T; + +typedef struct dpp_etm_qmu_observe_qnum_set_t { + ZXIC_UINT32 observe_qnum_set; +} DPP_ETM_QMU_OBSERVE_QNUM_SET_T; + +typedef struct dpp_etm_qmu_spec_q_pkt_received_t { + ZXIC_UINT32 spec_q_pkt_received; +} DPP_ETM_QMU_SPEC_Q_PKT_RECEIVED_T; + +typedef struct dpp_etm_qmu_spec_q_pkt_dropped_t { + ZXIC_UINT32 spec_q_pkt_dropped; +} DPP_ETM_QMU_SPEC_Q_PKT_DROPPED_T; + +typedef struct dpp_etm_qmu_spec_q_pkt_scheduled_t { + ZXIC_UINT32 spec_q_pkt_scheduled; +} DPP_ETM_QMU_SPEC_Q_PKT_SCHEDULED_T; + +typedef struct dpp_etm_qmu_spec_q_wr_cmd_sent_t { + ZXIC_UINT32 spec_q_wr_cmd_sent; +} DPP_ETM_QMU_SPEC_Q_WR_CMD_SENT_T; + +typedef struct dpp_etm_qmu_spec_q_rd_cmd_sent_t { + ZXIC_UINT32 spec_q_rd_cmd_sent; +} DPP_ETM_QMU_SPEC_Q_RD_CMD_SENT_T; + +typedef struct dpp_etm_qmu_spec_q_pkt_enq_t { + ZXIC_UINT32 spec_q_pkt_enq; +} DPP_ETM_QMU_SPEC_Q_PKT_ENQ_T; + +typedef struct dpp_etm_qmu_spec_q_pkt_deq_t { + ZXIC_UINT32 spec_q_pkt_deq; +} DPP_ETM_QMU_SPEC_Q_PKT_DEQ_T; + +typedef struct dpp_etm_qmu_spec_q_crdt_uncon_received_t { + ZXIC_UINT32 spec_q_crdt_uncon_received; +} DPP_ETM_QMU_SPEC_Q_CRDT_UNCON_RECEIVED_T; + +typedef struct dpp_etm_qmu_spec_q_crdt_cong_received_t { + ZXIC_UINT32 spec_q_crdt_cong_received; +} DPP_ETM_QMU_SPEC_Q_CRDT_CONG_RECEIVED_T; + +typedef struct dpp_etm_qmu_spec_q_crs_normal_cnt_t { + ZXIC_UINT32 spec_q_crs_normal_cnt; +} DPP_ETM_QMU_SPEC_Q_CRS_NORMAL_CNT_T; + +typedef struct dpp_etm_qmu_spec_q_crs_off_cnt_t { + ZXIC_UINT32 spec_q_crs_off_cnt; +} DPP_ETM_QMU_SPEC_Q_CRS_OFF_CNT_T; + +typedef struct dpp_etm_qmu_observe_batch_set_t { + ZXIC_UINT32 observe_batch_set; +} DPP_ETM_QMU_OBSERVE_BATCH_SET_T; + +typedef struct dpp_etm_qmu_spec_bat_pkt_received_t { + ZXIC_UINT32 spec_bat_pkt_received; +} DPP_ETM_QMU_SPEC_BAT_PKT_RECEIVED_T; + +typedef struct dpp_etm_qmu_spec_bat_pkt_dropped_t { + ZXIC_UINT32 spec_bat_pkt_dropped; +} DPP_ETM_QMU_SPEC_BAT_PKT_DROPPED_T; + +typedef struct dpp_etm_qmu_spec_bat_blk_scheduled_t { + ZXIC_UINT32 spec_bat_blk_scheduled; +} DPP_ETM_QMU_SPEC_BAT_BLK_SCHEDULED_T; + +typedef struct dpp_etm_qmu_spec_bat_wr_cmd_sent_t { + ZXIC_UINT32 spec_bat_wr_cmd_sent; +} DPP_ETM_QMU_SPEC_BAT_WR_CMD_SENT_T; + +typedef struct dpp_etm_qmu_spec_bat_rd_cmd_sent_t { + ZXIC_UINT32 spec_bat_rd_cmd_sent; +} DPP_ETM_QMU_SPEC_BAT_RD_CMD_SENT_T; + +typedef struct dpp_etm_qmu_spec_bat_pkt_enq_t { + ZXIC_UINT32 spec_bat_pkt_enq; +} DPP_ETM_QMU_SPEC_BAT_PKT_ENQ_T; + +typedef struct dpp_etm_qmu_spec_bat_pkt_deq_t { + ZXIC_UINT32 spec_bat_pkt_deq; +} DPP_ETM_QMU_SPEC_BAT_PKT_DEQ_T; + +typedef struct dpp_etm_qmu_spec_bat_crdt_uncon_received_t { + ZXIC_UINT32 spec_bat_crdt_uncon_received; +} DPP_ETM_QMU_SPEC_BAT_CRDT_UNCON_RECEIVED_T; + +typedef struct dpp_etm_qmu_spec_bat_crdt_cong_received_t { + ZXIC_UINT32 spec_bat_crdt_cong_received; +} DPP_ETM_QMU_SPEC_BAT_CRDT_CONG_RECEIVED_T; + +typedef struct dpp_etm_qmu_spec_bat_crs_normal_cnt_t { + ZXIC_UINT32 spec_bat_crs_normal_cnt; +} DPP_ETM_QMU_SPEC_BAT_CRS_NORMAL_CNT_T; + +typedef struct dpp_etm_qmu_spec_bat_crs_off_cnt_t { + ZXIC_UINT32 spec_bat_crs_off_cnt; +} DPP_ETM_QMU_SPEC_BAT_CRS_OFF_CNT_T; + +typedef struct dpp_etm_qmu_bcntm_ovfl_qnum_get_t { + ZXIC_UINT32 bcntm_ovfl_qnum_get; +} DPP_ETM_QMU_BCNTM_OVFL_QNUM_GET_T; + +typedef struct dpp_etm_qmu_crbal_a_ovf_qnum_get_t { + ZXIC_UINT32 crbal_a_ovf_qnum_get; +} DPP_ETM_QMU_CRBAL_A_OVF_QNUM_GET_T; + +typedef struct dpp_etm_qmu_crbal_b_ovf_qnum_get_t { + ZXIC_UINT32 crbal_b_ovf_qnum_get; +} DPP_ETM_QMU_CRBAL_B_OVF_QNUM_GET_T; + +typedef struct dpp_etm_qmu_crbal_drop_qnum_get_t { + ZXIC_UINT32 crbal_drop_qnum_get; +} DPP_ETM_QMU_CRBAL_DROP_QNUM_GET_T; + +typedef struct dpp_etm_qmu_deq_flg_report_cnt_t { + ZXIC_UINT32 deq_flg_report_cnt; +} DPP_ETM_QMU_DEQ_FLG_REPORT_CNT_T; + +typedef struct dpp_etm_qmu_spec_q_crs_get_t { + ZXIC_UINT32 spec_q_crs_get; +} DPP_ETM_QMU_SPEC_Q_CRS_GET_T; + +typedef struct dpp_etm_qmu_spec_q_crs_in_get_t { + ZXIC_UINT32 spec_q_crs_in_get; +} DPP_ETM_QMU_SPEC_Q_CRS_IN_GET_T; + +typedef struct dpp_etm_qmu_spec_q_crs_flg_csol_get_t { + ZXIC_UINT32 spec_q_crs_flg_csol_get; +} DPP_ETM_QMU_SPEC_Q_CRS_FLG_CSOL_GET_T; + +typedef struct dpp_etm_qmu_ept_sch_qnum_get_t { + ZXIC_UINT32 ept_sch_qnum_get; +} DPP_ETM_QMU_EPT_SCH_QNUM_GET_T; + +#ifdef __cplusplus +} +#endif +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_mem_info.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_mem_info.h new file mode 100644 index 000000000000..03d471087ea6 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_mem_info.h @@ -0,0 +1,60 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_mem.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : 王春雷 +* 完成日期 : 2014/03/20 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef _DPP_MEM_H_ +#define _DPP_MEM_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum dpp_mem_no_e { + PKTRX_PHYPORT_UDF_ATTRIm = 0, + PKTRX_PHYPORT_HDW_ATTRIm = 1, + PKTRX_PHYPORT_FLOW_PCm = 2, + PKTRX_ICU_TCAMm = 3, + PKTRX_FLOWNUM_TCAMm = 4, +} DPP_MEM_NO_E; + +typedef struct dpp_mem_field_t { + ZXIC_SINT8 *p_name; /* 字段名 */ + ZXIC_UINT32 flags; /* 标志位 */ + ZXIC_UINT16 msb_pos; /* 最高比特位置,以寄存器列表为准*/ + ZXIC_UINT16 len; /* 字段长度,以比特为单位 */ +} DPP_MEM_FIELD_T; + +#define DPP_MEM_FLAG_TCAM (1 << 0) /* Tcam类型的表 */ +typedef struct dpp_mem_info_t { + ZXIC_UINT32 mem_no; + ZXIC_UINT32 module_no; + ZXIC_UINT32 flags; + ZXIC_UINT32 mem_id; /* 模块内部的mem标识 */ + ZXIC_UINT32 index_min; /* 最小索引值 */ + ZXIC_UINT32 index_max; /* 最大索引值 */ + ZXIC_UINT32 width; /* 表项宽度,以字节为单位 */ + + DPP_MEM_FIELD_T *p_fileds; /* 表项所有字段 */ + +} DPP_MEM_INFO_T; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_module.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_module.h new file mode 100644 index 000000000000..202bbc8facb9 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_module.h @@ -0,0 +1,212 @@ +#ifndef _DPP_MODULE_H_ +#define _DPP_MODULE_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "zxic_common.h" +#include "dpp_dev.h" +#include "dpp_type_api.h" + +/** for PPU*/ +#ifdef DPP_TEST_BAORD_SSP2T +#define DPP_PPU_CLUSTER_NUM (1) +#else +#define DPP_PPU_CLUSTER_NUM (6) +#endif + +#define DPP_PHYPORT_NUM (119) /** 物理端口号个数(0-118) */ + +#define DPP_PPU_ME_EXCEPTION_MAX (8) +#define DPP_PPU_CLUSTER_MEPERCLS_NUM (8) +#define DPP_PPU_CLUSTER_SPACE_SIZE (0x00008000) +#define DPP_MAX_CHCHK_NUM (50) + +#define DPP_TRPG_PORT_NUM (32) +#define DPP_TRPG_PORT_SPACE_SIZE (0x10000) + +#define DPP_TRPG_RAM_NUM (16) +#define DPP_TRPG_RAM_SPACE_SIZE (0x10000) + +#define DPP_TSN_PORT_NUM (4) +#define DPP_TSN_PORT_SPACE_SIZE (0x4000) + +typedef enum dpp_module_e { + CFG = 1, /**< @brief 1*/ + NPPU, /**< @brief 2*/ + PPU, /**< @brief 3*/ + ETM, /**< @brief 4*/ + STAT, /**< @brief 5*/ + CAR, /**< @brief 6*/ + SE, /**< @brief 7*/ + SMMU0 = SE, /**< @brief 7*/ + SMMU1 = SE, /**< @brief 7*/ + DTB, /**< @brief 8*/ + TRPG, /**< @brief 9*/ + TSN, /**< @brief 10*/ + AXI, /**< @brief 11*/ + PTPTM, /**< @brief 12*/ + DTB4K, /**< @brief 13*/ + STAT4K, /**< @brief 14*/ + PPU4K, /**< @brief 15*/ + SE4K, /**< @brief 16*/ + SMMU14K, /**< @brief 17*/ + MODULE_MAX +} DPP_MODULE_E; + +typedef enum module_tm_e { + MODULE_TM_CFGMT = 0, + MODULE_TM_OLIF = 1, + MODULE_TM_CGAVD = 2, + MODULE_TM_TMMU = 3, + MODULE_TM_SHAP = 4, + MODULE_TM_CRDT = 5, + MODULE_TM_QMU = 6, + MODULE_TM_MAX +} MODULE_TM_E; + +/*NP 在PICE中的偏移地址*/ +#define SYS_VF_NP_BASE_OFFSET 0X0000000000 +/*NP 在DPU中的基地址*/ + +#if DPP_DEV_VPCI_EN +#define SYS_NP_BASE_ADDR 0x6300000000 +#define SYS_NP_BASE_ADDR0 0x14000000 +#define SYS_NP_BASE_ADDR1 0x16000000 +#else +#define SYS_NP_BASE_ADDR 0x6218000000 +#define SYS_NP_BASE_ADDR0 0x00000000 +#define SYS_NP_BASE_ADDR1 0x02000000 +#endif + +/** sub system base address*/ +typedef enum sys_base_addr_e { + SYS_NPPU_BASE_ADDR = (SYS_NP_BASE_ADDR0 + 0x00000000), + SYS_PPU_BASE_ADDR = (SYS_NP_BASE_ADDR0 + 0x00080000), + SYS_ETM_BASE_ADDR = (SYS_NP_BASE_ADDR0 + 0x00180000), + SYS_STAT_BASE_ADDR = (SYS_NP_BASE_ADDR0 + 0x00200000), + SYS_SE_BASE_ADDR = (SYS_NP_BASE_ADDR0 + 0x00280000), + SYS_SE_SMMU0_BASE_ADDR = (SYS_NP_BASE_ADDR0 + 0x00300000), + SYS_SE_SMMU1_BASE_ADDR = (SYS_NP_BASE_ADDR0 + 0x00310000), + // SYS_TRPG_BASE_ADDR = (SYS_NP_BASE_ADDR0 + 0x00320000), + SYS_CFG_BASE_ADDR = (SYS_NP_BASE_ADDR0 + 0x00330000), + SYS_PTP0_BASE_ADDR = (SYS_NP_BASE_ADDR0 + 0x00340000), + SYS_PTP1_BASE_ADDR = (SYS_NP_BASE_ADDR0 + 0x00344000), + SYS_TSN_BASE_ADDR = (SYS_NP_BASE_ADDR0 + 0x00350000), + SYS_TRPG_BASE_ADDR = (SYS_NP_BASE_ADDR0 + 0x00400000), + + SYS_DTB_BASE_ADDR = (SYS_NP_BASE_ADDR1 + 0x00000000), + SYS_AXIM0_BASE_ADDR = (SYS_NP_BASE_ADDR1 + 0x01400000), + SYS_AXIM1_BASE_ADDR = (SYS_NP_BASE_ADDR1 + 0x01408000), + SYS_AXI_CONV_BASE_ADDR = (SYS_NP_BASE_ADDR1 + 0x01410000), + SYS_AXIS_BASE_ADDR = (SYS_NP_BASE_ADDR1 + 0x01418000), + SYS_TLB_BASE_ADDR = (SYS_NP_BASE_ADDR1 + 0x01420000), + + SYS_MAX_BASE_ADDR = 0x20000000, +} SYS_BASE_ADDR_E; + +/** module base address*/ +typedef enum module_base_addr_e { + /* CFG */ + MODULE_CFG_PCIE_BASE_ADDR = 0x00000000, + MODULE_CFG_DMA_BASE_ADDR = 0x00001000, + MODULE_CFG_CSR_BASE_ADDR = 0x00003000, + + /* NPPU */ + MODULE_NPPU_MR_CFG_BASE_ADDR = 0x00000000, + MODULE_NPPU_PKTRX_CFG_BASE_ADDR = 0x00000800, + MODULE_NPPU_PKTRX_STAT_BASE_ADDR = 0x00001000, + MODULE_NPPU_IDMA_CFG_BASE_ADDR = 0x00001800, + MODULE_NPPU_IDMA_STAT_BASE_ADDR = 0x00002000, + MODULE_NPPU_PBU_CFG_BASE_ADDR = 0x00002800, + MODULE_NPPU_PBU_STAT_BASE_ADDR = 0x00003000, + MODULE_NPPU_ISU_CFG_BASE_ADDR = 0x00003800, + MODULE_NPPU_ISU_STAT_BASE_ADDR = 0x00004000, + MODULE_NPPU_ODMA_CFG_BASE_ADDR = 0x00004800, + MODULE_NPPU_ODMA_STAT_BASE_ADDR = 0x00005000, + MODULE_NPPU_OAM_CFG_BASE_ADDR = 0x00005800, + MODULE_NPPU_OAM_STAT_BASE_ADDR = 0x00006000, + MODULE_NPPU_OAM_INT_IDX0_BASE_ADDR = 0x00006800, + MODULE_NPPU_OAM_INT_IDX1_BASE_ADDR = 0x00007000, + + /* PPU */ + MODULE_PPU_CSR_BASE_ADDR = 0x00000000, + MODULE_PPU_DBG_BASE_ADDR = 0x00000800, + MODULE_CLUSTER0_BASE_ADDR = 0x00008000, + MODULE_CLUSTER1_BASE_ADDR = 0x00010000, + MODULE_CLUSTER2_BASE_ADDR = 0x00018000, + MODULE_CLUSTER3_BASE_ADDR = 0x00020000, + + /* TM */ + MODULE_TM_CFGMT_BASE_ADDR = 0x00000000, + MODULE_TM_OLIF_BASE_ADDR = 0x00020000, + MODULE_TM_CGAVD_BASE_ADDR = 0x00030000, + MODULE_TM_TMMU_BASE_ADDR = 0x00040000, + MODULE_TM_SHAP_BASE_ADDR = 0x00050000, + MODULE_TM_CRDT_BASE_ADDR = 0x00060000, + MODULE_TM_QMU_BASE_ADDR = 0x00070000, + + /* STAT */ + MODULE_STAT_CAR0_BASE_ADDR = 0x00000000, + MODULE_STAT_ETCAM_BASE_ADDR = 0x00002000, + MODULE_STAT_GLBL_BASE_ADDR = 0x00003000, + + /* SE */ + MODULE_SE_ALG_BASE_ADDR = 0x00000000, + MODULE_SE_KSCHD_BASE_ADDR = 0x00004000, + MODULE_SE_RSCHD_BASE_ADDR = 0x00008000, + MODULE_SE_PARSER_BASE_ADDR = 0x0000c000, + MODULE_SE_AS_BASE_ADDR = 0x00010000, + MODULE_SE_CFG_BASE_ADDR = 0x00014000, + + /* SMMU0 */ + MODULE_SE_SMMU0_BASE_ADDR = 0x00000000, + + /* SMMU1 */ + MODULE_SE_SMMU1_BASE_ADDR = 0x00000000, + MODULE_SE_CMMU_BASE_ADDR = 0x00004000, + + /* DTB */ + MODULE_DTB_ENQ_BASE_ADDR = 0x00000000, + MODULE_DTB_CFG_BASE_ADDR = 0x01000000, + MODULE_DTB_DDOS_BASE_ADDR = 0x01010000, + MODULE_DTB_RAM_BASE_ADDR = 0x01100000, + + /* TRPG */ + MODULE_TRPG_RX_BASE_ADDR = 0x00000000, + MODULE_TRPG_TX_BASE_ADDR = 0x00400000, + MODULE_TRPG_TX_GLB_BASE_ADDR = 0x00600000, + MODULE_TRPG_TX_ETM_PORT_BASE_ADDR = 0x00610000, + MODULE_TRPG_RX_RAM_BASE_ADDR = 0x00200000, + MODULE_TRPG_TX_RAM_BASE_ADDR = 0x00620000, + MODULE_TRPG_TX_ETM_RAM_BASE_ADDR = 0x00710000, + MODULE_TRPG_TX_TODTIME_RAM_BASE_ADDR = 0x00720000, + + /* TSN */ + MODULE_TSN_PORT0_BASE_ADDR = 0x00000000, + MODULE_TSN_PORT1_BASE_ADDR = 0x00004000, + MODULE_TSN_PORT2_BASE_ADDR = 0x00008000, + MODULE_TSN_PORT3_BASE_ADDR = 0x0000C000, + +} MODULE_BASE_ADDR_E; + +DPP_STATUS dpp_read(DPP_DEV_T *dev, ZXIC_UINT32 addr, ZXIC_UINT32 *p_data); +DPP_STATUS dpp_write(DPP_DEV_T *dev, ZXIC_UINT32 addr, ZXIC_UINT32 *p_data); + +DPP_STATUS dpp_se_read(DPP_DEV_T *dev, ZXIC_UINT32 addr, ZXIC_UINT32 *p_data); +DPP_STATUS dpp_se_write(DPP_DEV_T *dev, ZXIC_UINT32 addr, ZXIC_UINT32 *p_data); + +DPP_STATUS dpp_se_alg_read(DPP_DEV_T *dev, ZXIC_UINT32 addr, + ZXIC_UINT32 *p_data); +DPP_STATUS dpp_se_alg_write(DPP_DEV_T *dev, ZXIC_UINT32 addr, + ZXIC_UINT32 *p_data); + +DPP_STATUS dpp_ppu_read(DPP_DEV_T *dev, ZXIC_UINT32 addr, ZXIC_UINT32 *p_data); +DPP_STATUS dpp_ppu_write(DPP_DEV_T *dev, ZXIC_UINT32 addr, ZXIC_UINT32 *p_data); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_nppu_reg.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_nppu_reg.h new file mode 100644 index 000000000000..2714bd7cec33 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_nppu_reg.h @@ -0,0 +1,3281 @@ + +#ifndef _DPP_NPPU_REG_H_ +#define _DPP_NPPU_REG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct dpp_nppu_mr_cfg_cfg_shap_param_t { + ZXIC_UINT32 shap_en; + ZXIC_UINT32 shap_rate; +} DPP_NPPU_MR_CFG_CFG_SHAP_PARAM_T; + +typedef struct dpp_nppu_mr_cfg_cfg_shap_token_t { + ZXIC_UINT32 cfg_shap_plen_offset; + ZXIC_UINT32 cfg_shap_token; +} DPP_NPPU_MR_CFG_CFG_SHAP_TOKEN_T; + +typedef struct dpp_nppu_mr_cfg_idle_ptr_fifo_aful_th_t { + ZXIC_UINT32 idle_ptr3_fifo_aful_th; + ZXIC_UINT32 idle_ptr2_fifo_aful_th; + ZXIC_UINT32 idle_ptr1_fifo_aful_th; + ZXIC_UINT32 idle_ptr0_fifo_aful_th; +} DPP_NPPU_MR_CFG_IDLE_PTR_FIFO_AFUL_TH_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos_port_cfg_t { + ZXIC_UINT32 cos3_port_cfg; + ZXIC_UINT32 cos2_port_cfg; + ZXIC_UINT32 cos1_port_cfg; + ZXIC_UINT32 cos0_port_cfg; +} DPP_NPPU_MR_CFG_MR_COS_PORT_CFG_T; + +typedef struct dpp_nppu_pktrx_cfg_ind_status_t { + ZXIC_UINT32 ind_access_done; +} DPP_NPPU_PKTRX_CFG_IND_STATUS_T; + +typedef struct dpp_nppu_pktrx_cfg_ind_cmd_t { + ZXIC_UINT32 ind_rd_or_wr; + ZXIC_UINT32 ind_mem_id; + ZXIC_UINT32 ind_mem_addr; +} DPP_NPPU_PKTRX_CFG_IND_CMD_T; + +typedef struct dpp_nppu_pktrx_cfg_ind_data0_t { + ZXIC_UINT32 ind_dat0; +} DPP_NPPU_PKTRX_CFG_IND_DATA0_T; + +typedef struct dpp_nppu_pktrx_cfg_ind_data1_t { + ZXIC_UINT32 ind_dat1; +} DPP_NPPU_PKTRX_CFG_IND_DATA1_T; + +typedef struct dpp_nppu_pktrx_cfg_ind_data2_t { + ZXIC_UINT32 ind_dat2; +} DPP_NPPU_PKTRX_CFG_IND_DATA2_T; + +typedef struct dpp_nppu_pktrx_cfg_ind_data3_t { + ZXIC_UINT32 ind_dat3; +} DPP_NPPU_PKTRX_CFG_IND_DATA3_T; + +typedef struct dpp_nppu_pktrx_cfg_ind_data4_t { + ZXIC_UINT32 ind_dat4; +} DPP_NPPU_PKTRX_CFG_IND_DATA4_T; + +typedef struct dpp_nppu_pktrx_cfg_ind_data5_t { + ZXIC_UINT32 ind_dat5; +} DPP_NPPU_PKTRX_CFG_IND_DATA5_T; + +typedef struct dpp_nppu_pktrx_cfg_ind_data6_t { + ZXIC_UINT32 ind_dat6; +} DPP_NPPU_PKTRX_CFG_IND_DATA6_T; + +typedef struct dpp_nppu_pktrx_cfg_ind_data7_t { + ZXIC_UINT32 ind_dat7; +} DPP_NPPU_PKTRX_CFG_IND_DATA7_T; + +typedef struct dpp_nppu_pktrx_cfg_tcam_0_cmd_t { + ZXIC_UINT32 cfg_vben; + ZXIC_UINT32 cfg_vbi; + ZXIC_UINT32 cfg_t_strwc; + ZXIC_UINT32 tcam0_sm; + ZXIC_UINT32 tcam0_smen; + ZXIC_UINT32 tcam0_rm; + ZXIC_UINT32 tcam0_rmen; + ZXIC_UINT32 tcam0_enable; + ZXIC_UINT32 tcam0_flush; + ZXIC_UINT32 tcam0_unload; + ZXIC_UINT32 tcam0_unload_addr; +} DPP_NPPU_PKTRX_CFG_TCAM_0_CMD_T; + +typedef struct dpp_nppu_pktrx_cfg_tcam_1_cmd_t { + ZXIC_UINT32 tcam1_sm; + ZXIC_UINT32 tcam1_smen; + ZXIC_UINT32 tcam1_rm; + ZXIC_UINT32 tcam1_rmen; + ZXIC_UINT32 tcam1_enable; + ZXIC_UINT32 tcam1_flush; + ZXIC_UINT32 tcam1_unload; + ZXIC_UINT32 tcam1_unload_addr; +} DPP_NPPU_PKTRX_CFG_TCAM_1_CMD_T; + +typedef struct dpp_nppu_pktrx_cfg_port_en_0_t { + ZXIC_UINT32 cfg_isch_port_en_0; +} DPP_NPPU_PKTRX_CFG_PORT_EN_0_T; + +typedef struct dpp_nppu_pktrx_cfg_port_en_1_t { + ZXIC_UINT32 cfg_isch_port_en_1; +} DPP_NPPU_PKTRX_CFG_PORT_EN_1_T; + +typedef struct dpp_nppu_pktrx_cfg_port_en_2_t { + ZXIC_UINT32 cfg_isch_port_en_2; +} DPP_NPPU_PKTRX_CFG_PORT_EN_2_T; + +typedef struct dpp_nppu_pktrx_cfg_port_en_3_t { + ZXIC_UINT32 cfg_port_change_en_0; + ZXIC_UINT32 cfg_port_change_en_1; + ZXIC_UINT32 cfg_isch_port_en_3; +} DPP_NPPU_PKTRX_CFG_PORT_EN_3_T; + +typedef struct dpp_nppu_pktrx_cfg_cfg_port_l2_offset_mode_0_t { + ZXIC_UINT32 cfg_port_l2_offset_mode_0; +} DPP_NPPU_PKTRX_CFG_CFG_PORT_L2_OFFSET_MODE_0_T; + +typedef struct dpp_nppu_pktrx_cfg_cfg_port_l2_offset_mode_1_t { + ZXIC_UINT32 cfg_port_l2_offset_mode_1; +} DPP_NPPU_PKTRX_CFG_CFG_PORT_L2_OFFSET_MODE_1_T; + +typedef struct dpp_nppu_pktrx_cfg_cfg_port_l2_offset_mode_2_t { + ZXIC_UINT32 cfg_port_l2_offset_mode_2; +} DPP_NPPU_PKTRX_CFG_CFG_PORT_L2_OFFSET_MODE_2_T; + +typedef struct dpp_nppu_pktrx_cfg_cfg_port_l2_offset_mode_3_t { + ZXIC_UINT32 cfg_port_l2_offset_mode_3; +} DPP_NPPU_PKTRX_CFG_CFG_PORT_L2_OFFSET_MODE_3_T; + +typedef struct dpp_nppu_pktrx_cfg_port_fc_mode_0_t { + ZXIC_UINT32 cfg_isch_fc_mode_0; +} DPP_NPPU_PKTRX_CFG_PORT_FC_MODE_0_T; + +typedef struct dpp_nppu_pktrx_cfg_port_fc_mode_1_t { + ZXIC_UINT32 cfg_isch_fc_mode_1; +} DPP_NPPU_PKTRX_CFG_PORT_FC_MODE_1_T; + +typedef struct dpp_nppu_pktrx_cfg_port_fc_mode_2_t { + ZXIC_UINT32 cfg_isch_fc_mode_2; +} DPP_NPPU_PKTRX_CFG_PORT_FC_MODE_2_T; + +typedef struct dpp_nppu_pktrx_cfg_port_fc_mode_3_t { + ZXIC_UINT32 cfg_isch_fc_mode_3; +} DPP_NPPU_PKTRX_CFG_PORT_FC_MODE_3_T; + +typedef struct dpp_nppu_pktrx_cfg_port_fc_mode_4_t { + ZXIC_UINT32 cfg_isch_fc_mode_4; +} DPP_NPPU_PKTRX_CFG_PORT_FC_MODE_4_T; + +typedef struct dpp_nppu_pktrx_cfg_port_fc_mode_5_t { + ZXIC_UINT32 cfg_isch_fc_mode_5; +} DPP_NPPU_PKTRX_CFG_PORT_FC_MODE_5_T; + +typedef struct dpp_nppu_pktrx_cfg_port_fc_mode_6_t { + ZXIC_UINT32 cfg_isch_fc_mode_6; +} DPP_NPPU_PKTRX_CFG_PORT_FC_MODE_6_T; + +typedef struct dpp_nppu_pktrx_cfg_port_fc_mode_7_t { + ZXIC_UINT32 cfg_pfu_aging_en; + ZXIC_UINT32 cfg_isch_aging_en; + ZXIC_UINT32 cfg_isch_fc_mode_7; +} DPP_NPPU_PKTRX_CFG_PORT_FC_MODE_7_T; + +typedef struct dpp_nppu_pktrx_cfg_cfg_isch_aging_th_t { + ZXIC_UINT32 cfg_pfu_delay_cycle; + ZXIC_UINT32 cfg_isch_aging_th; +} DPP_NPPU_PKTRX_CFG_CFG_ISCH_AGING_TH_T; + +typedef struct dpp_nppu_pktrx_cfg_isch_fifo_th_0_t { + ZXIC_UINT32 cfg_sch_fifo3_fc_th; + ZXIC_UINT32 cfg_sch_fifo2_fc_th; + ZXIC_UINT32 cfg_sch_fifo1_fc_th; + ZXIC_UINT32 cfg_sch_fifo0_fc_th; +} DPP_NPPU_PKTRX_CFG_ISCH_FIFO_TH_0_T; + +typedef struct dpp_nppu_pktrx_cfg_isch_cfg_1_t { + ZXIC_UINT32 cfg_parser_max_len_en; + ZXIC_UINT32 cfg_parser_max_len; + ZXIC_UINT32 cfg_parser_min_len_en; + ZXIC_UINT32 cfg_parser_min_len; + ZXIC_UINT32 sp_sch_sel; +} DPP_NPPU_PKTRX_CFG_ISCH_CFG_1_T; + +typedef struct dpp_nppu_pktrx_cfg_tcam_0_vld_t { + ZXIC_UINT32 cfg_tcam0_vld; +} DPP_NPPU_PKTRX_CFG_TCAM_0_VLD_T; + +typedef struct dpp_nppu_pktrx_cfg_tcam_1_vld_t { + ZXIC_UINT32 cfg_tcam1_vld; +} DPP_NPPU_PKTRX_CFG_TCAM_1_VLD_T; + +typedef struct dpp_nppu_pktrx_cfg_cpu_port_en_mask_t { + ZXIC_UINT32 cpu_port_en_mask; +} DPP_NPPU_PKTRX_CFG_CPU_PORT_EN_MASK_T; + +typedef struct dpp_nppu_pktrx_cfg_pktrx_glbal_cfg_0_t { + ZXIC_UINT32 pktrx_glbal_cfg_0; +} DPP_NPPU_PKTRX_CFG_PKTRX_GLBAL_CFG_0_T; + +typedef struct dpp_nppu_pktrx_cfg_pktrx_glbal_cfg_1_t { + ZXIC_UINT32 pktrx_glbal_cfg_1; +} DPP_NPPU_PKTRX_CFG_PKTRX_GLBAL_CFG_1_T; + +typedef struct dpp_nppu_pktrx_cfg_pktrx_glbal_cfg_2_t { + ZXIC_UINT32 pktrx_glbal_cfg_2; +} DPP_NPPU_PKTRX_CFG_PKTRX_GLBAL_CFG_2_T; + +typedef struct dpp_nppu_pktrx_cfg_pktrx_glbal_cfg_3_t { + ZXIC_UINT32 pktrx_glbal_cfg_3; +} DPP_NPPU_PKTRX_CFG_PKTRX_GLBAL_CFG_3_T; + +typedef struct dpp_nppu_pktrx_cfg_nppu_start_t { + ZXIC_UINT32 nppu_start; +} DPP_NPPU_PKTRX_CFG_NPPU_START_T; + +typedef struct dpp_nppu_pktrx_stat_ind_status_t { + ZXIC_UINT32 ind_access_done; +} DPP_NPPU_PKTRX_STAT_IND_STATUS_T; + +typedef struct dpp_nppu_pktrx_stat_ind_cmd_t { + ZXIC_UINT32 ind_rd_or_wr; + ZXIC_UINT32 ind_mem_id; + ZXIC_UINT32 ind_mem_addr; +} DPP_NPPU_PKTRX_STAT_IND_CMD_T; + +typedef struct dpp_nppu_pktrx_stat_ind_data0_t { + ZXIC_UINT32 ind_dat0; +} DPP_NPPU_PKTRX_STAT_IND_DATA0_T; + +typedef struct dpp_nppu_idma_cfg_debug_cnt_ovfl_mode_t { + ZXIC_UINT32 debug_cnt_ovfl_mode; +} DPP_NPPU_IDMA_CFG_DEBUG_CNT_OVFL_MODE_T; + +typedef struct dpp_nppu_idma_stat_ind_status_t { + ZXIC_UINT32 ind_access_done; +} DPP_NPPU_IDMA_STAT_IND_STATUS_T; + +typedef struct dpp_nppu_idma_stat_ind_cmd_t { + ZXIC_UINT32 ind_rd_or_wr; + ZXIC_UINT32 ind_mem_id; + ZXIC_UINT32 ind_mem_addr; +} DPP_NPPU_IDMA_STAT_IND_CMD_T; + +typedef struct dpp_nppu_idma_stat_ind_data0_t { + ZXIC_UINT32 ind_data0; +} DPP_NPPU_IDMA_STAT_IND_DATA0_T; + +typedef struct dpp_nppu_pbu_cfg_ind_status_t { + ZXIC_UINT32 ind_access_done; +} DPP_NPPU_PBU_CFG_IND_STATUS_T; + +typedef struct dpp_nppu_pbu_cfg_ind_cmd_t { + ZXIC_UINT32 ind_rd_or_wr; + ZXIC_UINT32 ind_mem_id; + ZXIC_UINT32 ind_mem_addr; +} DPP_NPPU_PBU_CFG_IND_CMD_T; + +typedef struct dpp_nppu_pbu_cfg_ind_data0_t { + ZXIC_UINT32 ind_data0; +} DPP_NPPU_PBU_CFG_IND_DATA0_T; + +typedef struct dpp_nppu_pbu_cfg_ind_data1_t { + ZXIC_UINT32 ind_data1; +} DPP_NPPU_PBU_CFG_IND_DATA1_T; + +typedef struct dpp_nppu_pbu_cfg_ind_data2_t { + ZXIC_UINT32 ind_data2; +} DPP_NPPU_PBU_CFG_IND_DATA2_T; + +typedef struct dpp_nppu_pbu_cfg_ind_data3_t { + ZXIC_UINT32 ind_data3; +} DPP_NPPU_PBU_CFG_IND_DATA3_T; + +typedef struct dpp_nppu_pbu_cfg_ind_data4_t { + ZXIC_UINT32 ind_data4; +} DPP_NPPU_PBU_CFG_IND_DATA4_T; + +typedef struct dpp_nppu_pbu_cfg_ind_data5_t { + ZXIC_UINT32 ind_data5; +} DPP_NPPU_PBU_CFG_IND_DATA5_T; + +typedef struct dpp_nppu_pbu_cfg_ind_data6_t { + ZXIC_UINT32 ind_data6; +} DPP_NPPU_PBU_CFG_IND_DATA6_T; + +typedef struct dpp_nppu_pbu_cfg_ind_data7_t { + ZXIC_UINT32 ind_data7; +} DPP_NPPU_PBU_CFG_IND_DATA7_T; + +typedef struct dpp_nppu_pbu_cfg_idma_public_th_t { + ZXIC_UINT32 idma_public_th; +} DPP_NPPU_PBU_CFG_IDMA_PUBLIC_TH_T; + +typedef struct dpp_nppu_pbu_cfg_lif_public_th_t { + ZXIC_UINT32 lif_public_th; +} DPP_NPPU_PBU_CFG_LIF_PUBLIC_TH_T; + +typedef struct dpp_nppu_pbu_cfg_idma_total_th_t { + ZXIC_UINT32 idma_total_th; +} DPP_NPPU_PBU_CFG_IDMA_TOTAL_TH_T; + +typedef struct dpp_nppu_pbu_cfg_lif_total_th_t { + ZXIC_UINT32 lif_total_th; +} DPP_NPPU_PBU_CFG_LIF_TOTAL_TH_T; + +typedef struct dpp_nppu_pbu_cfg_mc_total_th_t { + ZXIC_UINT32 mc_total_th; +} DPP_NPPU_PBU_CFG_MC_TOTAL_TH_T; + +typedef struct dpp_nppu_pbu_cfg_mc_cos10_th_t { + ZXIC_UINT32 mc_cos1_mode; + ZXIC_UINT32 mc_cos0_mode; + ZXIC_UINT32 mc_cos1_th; + ZXIC_UINT32 mc_cos0_th; +} DPP_NPPU_PBU_CFG_MC_COS10_TH_T; + +typedef struct dpp_nppu_pbu_cfg_mc_cos32_th_t { + ZXIC_UINT32 mc_cos3_mode; + ZXIC_UINT32 mc_cos2_mode; + ZXIC_UINT32 mc_cos3_th; + ZXIC_UINT32 mc_cos2_th; +} DPP_NPPU_PBU_CFG_MC_COS32_TH_T; + +typedef struct dpp_nppu_pbu_cfg_mc_cos54_th_t { + ZXIC_UINT32 mc_cos5_mode; + ZXIC_UINT32 mc_cos4_mode; + ZXIC_UINT32 mc_cos5_th; + ZXIC_UINT32 mc_cos4_th; +} DPP_NPPU_PBU_CFG_MC_COS54_TH_T; + +typedef struct dpp_nppu_pbu_cfg_mc_cos76_th_t { + ZXIC_UINT32 mc_cos7_mode; + ZXIC_UINT32 mc_cos6_mode; + ZXIC_UINT32 mc_cos7_th; + ZXIC_UINT32 mc_cos6_th; +} DPP_NPPU_PBU_CFG_MC_COS76_TH_T; + +typedef struct dpp_nppu_pbu_cfg_debug_cnt_ovfl_mode_t { + ZXIC_UINT32 debug_cnt_ovfl_mode; +} DPP_NPPU_PBU_CFG_DEBUG_CNT_OVFL_MODE_T; + +typedef struct dpp_nppu_pbu_cfg_se_key_aful_negate_cfg_t { + ZXIC_UINT32 se_key_aful_negate_cfg; +} DPP_NPPU_PBU_CFG_SE_KEY_AFUL_NEGATE_CFG_T; + +typedef struct dpp_nppu_pbu_cfg_sa_flag_t { + ZXIC_UINT32 sa_flag; +} DPP_NPPU_PBU_CFG_SA_FLAG_T; + +typedef struct dpp_nppu_pbu_stat_ind_data_t { + ZXIC_UINT32 ind_data; +} DPP_NPPU_PBU_STAT_IND_DATA_T; + +typedef struct dpp_nppu_pbu_stat_ind_status_t { + ZXIC_UINT32 ind_access_done; +} DPP_NPPU_PBU_STAT_IND_STATUS_T; + +typedef struct dpp_nppu_pbu_stat_ind_cmd_t { + ZXIC_UINT32 ind_rd_or_wr; + ZXIC_UINT32 ind_mem_id; + ZXIC_UINT32 ind_mem_addr; +} DPP_NPPU_PBU_STAT_IND_CMD_T; + +typedef struct dpp_nppu_pbu_stat_total_cnt_t { + ZXIC_UINT32 total_cnt; +} DPP_NPPU_PBU_STAT_TOTAL_CNT_T; + +typedef struct dpp_nppu_pbu_stat_idma_pub_cnt_t { + ZXIC_UINT32 idma_pub_cnt; +} DPP_NPPU_PBU_STAT_IDMA_PUB_CNT_T; + +typedef struct dpp_nppu_pbu_stat_lif_pub_cnt_t { + ZXIC_UINT32 lif_pub_cnt; +} DPP_NPPU_PBU_STAT_LIF_PUB_CNT_T; + +typedef struct dpp_nppu_pbu_stat_mc_total_cnt_t { + ZXIC_UINT32 mc_total_cnt; +} DPP_NPPU_PBU_STAT_MC_TOTAL_CNT_T; + +typedef struct dpp_nppu_pbu_stat_pbu_thram_init_done_t { + ZXIC_UINT32 pbu_thram_init_done; +} DPP_NPPU_PBU_STAT_PBU_THRAM_INIT_DONE_T; + +typedef struct dpp_nppu_pbu_stat_ifb_fptr_init_done_t { + ZXIC_UINT32 ifb_fptr_init_done; +} DPP_NPPU_PBU_STAT_IFB_FPTR_INIT_DONE_T; + +typedef struct dpp_nppu_isu_cfg_weight_normal_uc_t { + ZXIC_UINT32 weight_normal_uc; +} DPP_NPPU_ISU_CFG_WEIGHT_NORMAL_UC_T; + +typedef struct dpp_nppu_isu_cfg_fabric_or_saip_t { + ZXIC_UINT32 fabric_or_saip; +} DPP_NPPU_ISU_CFG_FABRIC_OR_SAIP_T; + +typedef struct dpp_nppu_isu_stat_ind_status_t { + ZXIC_UINT32 ind_access_done; +} DPP_NPPU_ISU_STAT_IND_STATUS_T; + +typedef struct dpp_nppu_isu_stat_ind_cmd_t { + ZXIC_UINT32 ind_rd_or_wr; + ZXIC_UINT32 ind_mem_id; + ZXIC_UINT32 ind_mem_addr; +} DPP_NPPU_ISU_STAT_IND_CMD_T; + +typedef struct dpp_nppu_isu_stat_ind_dat0_t { + ZXIC_UINT32 ind_dat0; +} DPP_NPPU_ISU_STAT_IND_DAT0_T; + +typedef struct dpp_nppu_odma_cfg_ind_access_done_t { + ZXIC_UINT32 ind_access_done; +} DPP_NPPU_ODMA_CFG_IND_ACCESS_DONE_T; + +typedef struct dpp_nppu_odma_cfg_ind_command_t { + ZXIC_UINT32 ind_rd_or_wr; + ZXIC_UINT32 ind_mem_id; + ZXIC_UINT32 ind_mem_addr; +} DPP_NPPU_ODMA_CFG_IND_COMMAND_T; + +typedef struct dpp_nppu_odma_cfg_ind_dat0_t { + ZXIC_UINT32 ind_dat0; +} DPP_NPPU_ODMA_CFG_IND_DAT0_T; + +typedef struct dpp_nppu_odma_cfg_ind_dat1_t { + ZXIC_UINT32 ind_dat1; +} DPP_NPPU_ODMA_CFG_IND_DAT1_T; + +typedef struct dpp_nppu_odma_cfg_fabric_or_saip_t { + ZXIC_UINT32 fabric_or_saip; +} DPP_NPPU_ODMA_CFG_FABRIC_OR_SAIP_T; + +typedef struct dpp_nppu_odma_cfg_max_pkt_len_t { + ZXIC_UINT32 max_pkt_len; +} DPP_NPPU_ODMA_CFG_MAX_PKT_LEN_T; + +typedef struct dpp_nppu_odma_cfg_age_en_t { + ZXIC_UINT32 age_en; +} DPP_NPPU_ODMA_CFG_AGE_EN_T; + +typedef struct dpp_nppu_odma_cfg_age_mode_t { + ZXIC_UINT32 age_mode; +} DPP_NPPU_ODMA_CFG_AGE_MODE_T; + +typedef struct dpp_nppu_odma_cfg_age_value_time_t { + ZXIC_UINT32 age_value_time; +} DPP_NPPU_ODMA_CFG_AGE_VALUE_TIME_T; + +typedef struct dpp_nppu_odma_cfg_age_value_room_t { + ZXIC_UINT32 age_value_room; +} DPP_NPPU_ODMA_CFG_AGE_VALUE_ROOM_T; + +typedef struct dpp_nppu_odma_cfg_age_out_cnt_t { + ZXIC_UINT32 age_out_cnt; +} DPP_NPPU_ODMA_CFG_AGE_OUT_CNT_T; + +typedef struct dpp_nppu_odma_cfg_token_value_a_t { + ZXIC_UINT32 token_value_a; +} DPP_NPPU_ODMA_CFG_TOKEN_VALUE_A_T; + +typedef struct dpp_nppu_odma_cfg_token_value_b_t { + ZXIC_UINT32 token_value_b; +} DPP_NPPU_ODMA_CFG_TOKEN_VALUE_B_T; + +typedef struct dpp_nppu_odma_cfg_cfg_shap_en_p0_t { + ZXIC_UINT32 cfg_shap_en_p0; +} DPP_NPPU_ODMA_CFG_CFG_SHAP_EN_P0_T; + +typedef struct dpp_nppu_odma_cfg_cfg_shap_en_p1_t { + ZXIC_UINT32 cfg_shap_en_p1; +} DPP_NPPU_ODMA_CFG_CFG_SHAP_EN_P1_T; + +typedef struct dpp_nppu_odma_cfg_cfg_shap_en_tm_t { + ZXIC_UINT32 cfg_shap_en_tm; +} DPP_NPPU_ODMA_CFG_CFG_SHAP_EN_TM_T; + +typedef struct dpp_nppu_odma_stat_ind_status_t { + ZXIC_UINT32 ind_access_done; +} DPP_NPPU_ODMA_STAT_IND_STATUS_T; + +typedef struct dpp_nppu_odma_stat_ind_cmd_t { + ZXIC_UINT32 ind_rd_or_wr; + ZXIC_UINT32 ind_mem_id; + ZXIC_UINT32 ind_mem_addr; +} DPP_NPPU_ODMA_STAT_IND_CMD_T; + +typedef struct dpp_nppu_odma_stat_ind_data0_t { + ZXIC_UINT32 ind_dat0; +} DPP_NPPU_ODMA_STAT_IND_DATA0_T; + +typedef struct dpp_nppu_odma_stat_debug_cnt_cfg_t { + ZXIC_UINT32 debug_cnt_ovf_mode; + ZXIC_UINT32 debug_cnt_rdclr_mode; + ZXIC_UINT32 user_cnt_value; +} DPP_NPPU_ODMA_STAT_DEBUG_CNT_CFG_T; + +typedef struct dpp_nppu_oam_cfg_bfd_firstchk_th_t { + ZXIC_UINT32 bfd_firstchk_th; +} DPP_NPPU_OAM_CFG_BFD_FIRSTCHK_TH_T; + +typedef struct dpp_nppu_pbu_cfg_memid_0_pbu_fc_idmath_ram_t { + ZXIC_UINT32 lif_th_15; + ZXIC_UINT32 lif_prv_15; + ZXIC_UINT32 idma_prv_15; + ZXIC_UINT32 idma_th_cos0_15; + ZXIC_UINT32 idma_th_cos1_15; + ZXIC_UINT32 idma_th_cos2_15; + ZXIC_UINT32 idma_th_cos3_15; + ZXIC_UINT32 idma_th_cos4_15; + ZXIC_UINT32 idma_th_cos5_15; + ZXIC_UINT32 idma_th_cos6_15; + ZXIC_UINT32 idma_th_cos7_15; +} DPP_NPPU_PBU_CFG_MEMID_0_PBU_FC_IDMATH_RAM_T; + +typedef struct dpp_nppu_pbu_cfg_memid_1_pbu_fc_macth_ram_t { + ZXIC_UINT32 cos7_th; + ZXIC_UINT32 cos6_th; + ZXIC_UINT32 cos5_th; + ZXIC_UINT32 cos4_th; + ZXIC_UINT32 cos3_th; + ZXIC_UINT32 cos2_th; + ZXIC_UINT32 cos1_th; + ZXIC_UINT32 cos0_th; +} DPP_NPPU_PBU_CFG_MEMID_1_PBU_FC_MACTH_RAM_T; + +typedef struct dpp_nppu_pbu_stat_memid_1_all_kind_port_cnt_t { + ZXIC_UINT32 peak_port_cnt; + ZXIC_UINT32 current_port_cnt; +} DPP_NPPU_PBU_STAT_MEMID_1_ALL_KIND_PORT_CNT_T; + +typedef struct dpp_nppu_pbu_stat_memid_2_ppu_pbu_ifb_req_vld_cnt_t { + ZXIC_UINT32 ppu_pbu_ifb_req_vld_cnt; +} DPP_NPPU_PBU_STAT_MEMID_2_PPU_PBU_IFB_REQ_VLD_CNT_T; + +typedef struct dpp_nppu_pbu_stat_memid_2_pbu_ppu_ifb_rsp_vld_cnt_t { + ZXIC_UINT32 pbu_ppu_ifb_rsp_vld_cnt; +} DPP_NPPU_PBU_STAT_MEMID_2_PBU_PPU_IFB_RSP_VLD_CNT_T; + +typedef struct dpp_nppu_pbu_stat_memid_2_odma_pbu_recy_ptr_vld_cnt_t { + ZXIC_UINT32 odma_pbu_recy_ptr_vld_cnt; +} DPP_NPPU_PBU_STAT_MEMID_2_ODMA_PBU_RECY_PTR_VLD_CNT_T; + +typedef struct dpp_nppu_pbu_stat_memid_2_ppu_pbu_mcode_pf_req_cnt_t { + ZXIC_UINT32 ppu_pbu_mcode_pf_req_cnt; +} DPP_NPPU_PBU_STAT_MEMID_2_PPU_PBU_MCODE_PF_REQ_CNT_T; + +typedef struct dpp_nppu_pbu_stat_memid_2_pbu_ppu_mcode_pf_rsp_cnt_t { + ZXIC_UINT32 pbu_ppu_mcode_pf_rsp_cnt; +} DPP_NPPU_PBU_STAT_MEMID_2_PBU_PPU_MCODE_PF_RSP_CNT_T; + +typedef struct dpp_nppu_pbu_stat_memid_2_ppu_pbu_logic_pf_req_cnt_t { + ZXIC_UINT32 ppu_pbu_logic_pf_req_cnt; +} DPP_NPPU_PBU_STAT_MEMID_2_PPU_PBU_LOGIC_PF_REQ_CNT_T; + +typedef struct dpp_nppu_pbu_stat_memid_2_pbu_ppu_logic_pf_rsp_cnt_t { + ZXIC_UINT32 pbu_ppu_logic_pf_rsp_cnt; +} DPP_NPPU_PBU_STAT_MEMID_2_PBU_PPU_LOGIC_PF_RSP_CNT_T; + +typedef struct dpp_nppu_pbu_stat_memid_2_ppu_use_ptr_pulse_cnt_t { + ZXIC_UINT32 ppu_use_ptr_pulse_cnt; +} DPP_NPPU_PBU_STAT_MEMID_2_PPU_USE_PTR_PULSE_CNT_T; + +typedef struct dpp_nppu_pbu_stat_memid_2_ppu_pbu_wb_vld_cnt_t { + ZXIC_UINT32 ppu_pbu_wb_vld_cnt; +} DPP_NPPU_PBU_STAT_MEMID_2_PPU_PBU_WB_VLD_CNT_T; + +typedef struct dpp_nppu_pbu_stat_memid_2_pbu_ppu_reorder_para_vld_cnt_t { + ZXIC_UINT32 pbu_ppu_reorder_para_vld_cnt; +} DPP_NPPU_PBU_STAT_MEMID_2_PBU_PPU_REORDER_PARA_VLD_CNT_T; + +typedef struct dpp_nppu_pbu_stat_memid_2_se_pbu_dpi_key_vld_cnt_t { + ZXIC_UINT32 se_pbu_dpi_key_vld_cnt; +} DPP_NPPU_PBU_STAT_MEMID_2_SE_PBU_DPI_KEY_VLD_CNT_T; + +typedef struct dpp_nppu_pbu_stat_memid_2_pbu_se_dpi_rsp_datvld_cnt_t { + ZXIC_UINT32 pbu_se_dpi_rsp_datvld_cnt; +} DPP_NPPU_PBU_STAT_MEMID_2_PBU_SE_DPI_RSP_DATVLD_CNT_T; + +typedef struct dpp_nppu_pbu_stat_memid_2_odma_pbu_ifb_rd1_cnt_t { + ZXIC_UINT32 odma_pbu_ifb_rd1_cnt; +} DPP_NPPU_PBU_STAT_MEMID_2_ODMA_PBU_IFB_RD1_CNT_T; + +typedef struct dpp_nppu_pbu_stat_memid_2_odma_pbu_ifb_rd2_cnt_t { + ZXIC_UINT32 odma_pbu_ifb_rd2_cnt; +} DPP_NPPU_PBU_STAT_MEMID_2_ODMA_PBU_IFB_RD2_CNT_T; + +typedef struct dpp_nppu_pbu_stat_memid_2_pbu_ppu_mcode_pf_no_rsp_cnt_t { + ZXIC_UINT32 pbu_ppu_mcode_pf_no_rsp_cnt; +} DPP_NPPU_PBU_STAT_MEMID_2_PBU_PPU_MCODE_PF_NO_RSP_CNT_T; + +typedef struct dpp_nppu_pbu_stat_memid_2_pbu_ppu_logic_pf_no_rsp_cnt_t { + ZXIC_UINT32 pbu_ppu_logic_pf_no_rsp_cnt; +} DPP_NPPU_PBU_STAT_MEMID_2_PBU_PPU_LOGIC_PF_NO_RSP_CNT_T; + +typedef struct dpp_nppu_pbu_stat_memid_3_cpu_rd_ifb_data_t { + ZXIC_UINT32 cpu_rd_ifb_data; +} DPP_NPPU_PBU_STAT_MEMID_3_CPU_RD_IFB_DATA_T; + +typedef struct dpp_nppu_pbu_stat_memid_4_mux_sel_rgt_t { + ZXIC_UINT32 current_port_cnt; +} DPP_NPPU_PBU_STAT_MEMID_4_MUX_SEL_RGT_T; + +typedef struct dpp_nppu_pbu_stat_memid_5_port_pub_cnt_t { + ZXIC_UINT32 port_pub_cnt; +} DPP_NPPU_PBU_STAT_MEMID_5_PORT_PUB_CNT_T; + +typedef struct dpp_nppu_idma_stat_memid_1_idma_o_isu_pkt_pulse_total_cnt_t { + ZXIC_UINT32 idma_o_isu_pkt_pulse_total_cnt; +} DPP_NPPU_IDMA_STAT_MEMID_1_IDMA_O_ISU_PKT_PULSE_TOTAL_CNT_T; + +typedef struct dpp_nppu_idma_stat_memid_1_idma_o_isu_epkt_pulse_total_cnt_t { + ZXIC_UINT32 idma_o_isu_epkt_pulse_total_cnt; +} DPP_NPPU_IDMA_STAT_MEMID_1_IDMA_O_ISU_EPKT_PULSE_TOTAL_CNT_T; + +typedef struct dpp_nppu_idma_stat_memid_1_idma_dispkt_pulse_total_cnt_t { + ZXIC_UINT32 idma_dispkt_pulse_total_cnt; +} DPP_NPPU_IDMA_STAT_MEMID_1_IDMA_DISPKT_PULSE_TOTAL_CNT_T; + +typedef struct dpp_nppu_idma_stat_memid_0_idma_o_isu_pkt_pulse_cnt_t { + ZXIC_UINT32 idma_o_isu_pkt_pulse_cnt; +} DPP_NPPU_IDMA_STAT_MEMID_0_IDMA_O_ISU_PKT_PULSE_CNT_T; + +typedef struct dpp_nppu_idma_stat_memid_0_idma_o_isu_epkt_pulse_cnt_t { + ZXIC_UINT32 idma_o_isu_epkt_pulse_cnt; +} DPP_NPPU_IDMA_STAT_MEMID_0_IDMA_O_ISU_EPKT_PULSE_CNT_T; + +typedef struct dpp_nppu_idma_stat_memid_0_idma_dispkt_pulse_cnt_t { + ZXIC_UINT32 idma_dispkt_pulse_cnt; +} DPP_NPPU_IDMA_STAT_MEMID_0_IDMA_DISPKT_PULSE_CNT_T; + +typedef struct dpp_nppu_mr_cfg_ind_access_states_t { + ZXIC_UINT32 ind_access_done; +} DPP_NPPU_MR_CFG_IND_ACCESS_STATES_T; + +typedef struct dpp_nppu_mr_cfg_ind_access_cmd0_t { + ZXIC_UINT32 wr_mode; + ZXIC_UINT32 rd_or_wr; + ZXIC_UINT32 ind_access_addr0; +} DPP_NPPU_MR_CFG_IND_ACCESS_CMD0_T; + +typedef struct dpp_nppu_mr_cfg_ind_access_data0_t { + ZXIC_UINT32 ind_access_data0; +} DPP_NPPU_MR_CFG_IND_ACCESS_DATA0_T; + +typedef struct dpp_nppu_mr_cfg_ind_access_data1_t { + ZXIC_UINT32 ind_access_data1; +} DPP_NPPU_MR_CFG_IND_ACCESS_DATA1_T; + +typedef struct dpp_nppu_mr_cfg_ind_access_cmd1_t { + ZXIC_UINT32 ind_access_addr1; +} DPP_NPPU_MR_CFG_IND_ACCESS_CMD1_T; + +typedef struct dpp_nppu_mr_cfg_mr_init_done_t { + ZXIC_UINT32 mr_init_done; +} DPP_NPPU_MR_CFG_MR_INIT_DONE_T; + +typedef struct dpp_nppu_mr_cfg_cnt_mode_reg_t { + ZXIC_UINT32 cfgmt_count_rd_mode; + ZXIC_UINT32 cfgmt_count_overflow_mode; +} DPP_NPPU_MR_CFG_CNT_MODE_REG_T; + +typedef struct dpp_nppu_mr_cfg_cfg_ecc_bypass_read_t { + ZXIC_UINT32 cfg_ecc_bypass_read; +} DPP_NPPU_MR_CFG_CFG_ECC_BYPASS_READ_T; + +typedef struct dpp_nppu_mr_cfg_cfg_rep_mod_t { + ZXIC_UINT32 cfg_rep_mod; +} DPP_NPPU_MR_CFG_CFG_REP_MOD_T; + +typedef struct dpp_nppu_mr_cfg_block_ptr_fifo_aful_th_t { + ZXIC_UINT32 block_ptr3_fifo_aful_th; + ZXIC_UINT32 block_ptr2_fifo_aful_th; + ZXIC_UINT32 block_ptr1_fifo_aful_th; + ZXIC_UINT32 block_ptr0_fifo_aful_th; +} DPP_NPPU_MR_CFG_BLOCK_PTR_FIFO_AFUL_TH_T; + +typedef struct dpp_nppu_mr_cfg_pre_rcv_ptr_fifo_aful_th_t { + ZXIC_UINT32 pre_rcv_ptr3_fifo_aful_th; + ZXIC_UINT32 pre_rcv_ptr2_fifo_aful_th; + ZXIC_UINT32 pre_rcv_ptr1_fifo_aful_th; + ZXIC_UINT32 pre_rcv_ptr0_fifo_aful_th; +} DPP_NPPU_MR_CFG_PRE_RCV_PTR_FIFO_AFUL_TH_T; + +typedef struct dpp_nppu_mr_cfg_mgid_fifo_aful_th_t { + ZXIC_UINT32 mgid3_fifo_aful_th; + ZXIC_UINT32 mgid2_fifo_aful_th; + ZXIC_UINT32 mgid1_fifo_aful_th; + ZXIC_UINT32 mgid0_fifo_aful_th; +} DPP_NPPU_MR_CFG_MGID_FIFO_AFUL_TH_T; + +typedef struct dpp_nppu_mr_cfg_rep_cmd_fifo_aful_th_t { + ZXIC_UINT32 rep_cmd3_fifo_aful_th; + ZXIC_UINT32 rep_cmd2_fifo_aful_th; + ZXIC_UINT32 rep_cmd1_fifo_aful_th; + ZXIC_UINT32 rep_cmd0_fifo_aful_th; +} DPP_NPPU_MR_CFG_REP_CMD_FIFO_AFUL_TH_T; + +typedef struct dpp_nppu_mr_cfg_mr_int_mask_1_t { + ZXIC_UINT32 free_ptr0_fifo_full_mask; + ZXIC_UINT32 free_ptr1_fifo_full_mask; + ZXIC_UINT32 free_ptr2_fifo_full_mask; + ZXIC_UINT32 free_ptr3_fifo_full_mask; + ZXIC_UINT32 block_ptr0_fifo_full_mask; + ZXIC_UINT32 block_ptr1_fifo_full_mask; + ZXIC_UINT32 block_ptr2_fifo_full_mask; + ZXIC_UINT32 block_ptr3_fifo_full_mask; + ZXIC_UINT32 mgid0_fifo_full_mask; + ZXIC_UINT32 mgid1_fifo_full_mask; + ZXIC_UINT32 mgid2_fifo_full_mask; + ZXIC_UINT32 mgid3_fifo_full_mask; + ZXIC_UINT32 pre_rcv_ptr0_fifo_full_mask; + ZXIC_UINT32 pre_rcv_ptr1_fifo_full_mask; + ZXIC_UINT32 pre_rcv_ptr2_fifo_full_mask; + ZXIC_UINT32 pre_rcv_ptr3_fifo_full_mask; + ZXIC_UINT32 rep_cmd0_fifo_full_mask; + ZXIC_UINT32 rep_cmd1_fifo_full_mask; + ZXIC_UINT32 rep_cmd2_fifo_full_mask; + ZXIC_UINT32 rep_cmd3_fifo_full_mask; +} DPP_NPPU_MR_CFG_MR_INT_MASK_1_T; + +typedef struct dpp_nppu_mr_cfg_mr_int_mask_2_t { + ZXIC_UINT32 free_ptr0_fifo_udf_mask; + ZXIC_UINT32 free_ptr1_fifo_udf_mask; + ZXIC_UINT32 free_ptr2_fifo_udf_mask; + ZXIC_UINT32 free_ptr3_fifo_udf_mask; + ZXIC_UINT32 block_ptr0_fifo_udf_mask; + ZXIC_UINT32 block_ptr1_fifo_udf_mask; + ZXIC_UINT32 block_ptr2_fifo_udf_mask; + ZXIC_UINT32 block_ptr3_fifo_udf_mask; + ZXIC_UINT32 mgid0_fifo_udf_mask; + ZXIC_UINT32 mgid1_fifo_udf_mask; + ZXIC_UINT32 mgid2_fifo_udf_mask; + ZXIC_UINT32 mgid3_fifo_udf_mask; + ZXIC_UINT32 pre_rcv_ptr0_fifo_udf_mask; + ZXIC_UINT32 pre_rcv_ptr1_fifo_udf_mask; + ZXIC_UINT32 pre_rcv_ptr2_fifo_udf_mask; + ZXIC_UINT32 pre_rcv_ptr3_fifo_udf_mask; + ZXIC_UINT32 rep_cmd0_fifo_udf_mask; + ZXIC_UINT32 rep_cmd1_fifo_udf_mask; + ZXIC_UINT32 rep_cmd2_fifo_udf_mask; + ZXIC_UINT32 rep_cmd3_fifo_udf_mask; +} DPP_NPPU_MR_CFG_MR_INT_MASK_2_T; + +typedef struct dpp_nppu_mr_cfg_mr_int_mask_3_t { + ZXIC_UINT32 free_ptr0_fifo_ovf_mask; + ZXIC_UINT32 free_ptr1_fifo_ovf_mask; + ZXIC_UINT32 free_ptr2_fifo_ovf_mask; + ZXIC_UINT32 free_ptr3_fifo_ovf_mask; + ZXIC_UINT32 block_ptr0_fifo_ovf_mask; + ZXIC_UINT32 block_ptr1_fifo_ovf_mask; + ZXIC_UINT32 block_ptr2_fifo_ovf_mask; + ZXIC_UINT32 block_ptr3_fifo_ovf_mask; + ZXIC_UINT32 mgid0_fifo_ovf_mask; + ZXIC_UINT32 mgid1_fifo_ovf_mask; + ZXIC_UINT32 mgid2_fifo_ovf_mask; + ZXIC_UINT32 mgid3_fifo_ovf_mask; + ZXIC_UINT32 pre_rcv_ptr0_fifo_ovf_mask; + ZXIC_UINT32 pre_rcv_ptr1_fifo_ovf_mask; + ZXIC_UINT32 pre_rcv_ptr2_fifo_ovf_mask; + ZXIC_UINT32 pre_rcv_ptr3_fifo_ovf_mask; + ZXIC_UINT32 rep_cmd0_fifo_ovf_mask; + ZXIC_UINT32 rep_cmd1_fifo_ovf_mask; + ZXIC_UINT32 rep_cmd2_fifo_ovf_mask; + ZXIC_UINT32 rep_cmd3_fifo_ovf_mask; +} DPP_NPPU_MR_CFG_MR_INT_MASK_3_T; + +typedef struct dpp_nppu_mr_cfg_mr_int_mask_4_t { + ZXIC_UINT32 data_buf0_ram_parity_err_mask; + ZXIC_UINT32 data_buf1_ram_parity_err_mask; + ZXIC_UINT32 data_buf2_ram_parity_err_mask; + ZXIC_UINT32 data_buf3_ram_parity_err_mask; + ZXIC_UINT32 mlt_ecc_single_err_mask; + ZXIC_UINT32 free_ptr0_fifo_ecc_single_err_mask; + ZXIC_UINT32 free_ptr1_fifo_ecc_single_err_mask; + ZXIC_UINT32 free_ptr2_fifo_ecc_single_err_mask; + ZXIC_UINT32 free_ptr3_fifo_ecc_single_err_mask; + ZXIC_UINT32 block_ptr0_fifo_ecc_single_err_mask; + ZXIC_UINT32 block_ptr1_fifo_ecc_single_err_mask; + ZXIC_UINT32 block_ptr2_fifo_ecc_single_err_mask; + ZXIC_UINT32 block_ptr3_fifo_ecc_single_err_mask; + ZXIC_UINT32 mgid0_fifo_ecc_single_err_mask; + ZXIC_UINT32 mgid1_fifo_ecc_single_err_mask; + ZXIC_UINT32 mgid2_fifo_ecc_single_err_mask; + ZXIC_UINT32 mgid3_fifo_ecc_single_err_mask; + ZXIC_UINT32 pre_rcv_ptr0_fifo_ecc_single_err_mask; + ZXIC_UINT32 pre_rcv_ptr1_fifo_ecc_single_err_mask; + ZXIC_UINT32 pre_rcv_ptr2_fifo_ecc_single_err_mask; + ZXIC_UINT32 pre_rcv_ptr3_fifo_ecc_single_err_mask; + ZXIC_UINT32 rep_cmd0_fifo_ecc_single_err_mask; + ZXIC_UINT32 rep_cmd1_fifo_ecc_single_err_mask; + ZXIC_UINT32 rep_cmd2_fifo_ecc_single_err_mask; + ZXIC_UINT32 rep_cmd3_fifo_ecc_single_err_mask; +} DPP_NPPU_MR_CFG_MR_INT_MASK_4_T; + +typedef struct dpp_nppu_mr_cfg_mr_states_1_t { + ZXIC_UINT32 free_ptr0_fifo_full; + ZXIC_UINT32 free_ptr1_fifo_full; + ZXIC_UINT32 free_ptr2_fifo_full; + ZXIC_UINT32 free_ptr3_fifo_full; + ZXIC_UINT32 block_ptr0_fifo_full; + ZXIC_UINT32 block_ptr1_fifo_full; + ZXIC_UINT32 block_ptr2_fifo_full; + ZXIC_UINT32 block_ptr3_fifo_full; + ZXIC_UINT32 mgid0_fifo_full; + ZXIC_UINT32 mgid1_fifo_full; + ZXIC_UINT32 mgid2_fifo_full; + ZXIC_UINT32 mgid3_fifo_full; + ZXIC_UINT32 pre_rcv_ptr0_fifo_full; + ZXIC_UINT32 pre_rcv_ptr1_fifo_full; + ZXIC_UINT32 pre_rcv_ptr2_fifo_full; + ZXIC_UINT32 pre_rcv_ptr3_fifo_full; + ZXIC_UINT32 rep_cmd0_fifo_full; + ZXIC_UINT32 rep_cmd1_fifo_full; + ZXIC_UINT32 rep_cmd2_fifo_full; + ZXIC_UINT32 rep_cmd3_fifo_full; +} DPP_NPPU_MR_CFG_MR_STATES_1_T; + +typedef struct dpp_nppu_mr_cfg_mr_states_2_t { + ZXIC_UINT32 free_ptr0_fifo_udf; + ZXIC_UINT32 free_ptr1_fifo_udf; + ZXIC_UINT32 free_ptr2_fifo_udf; + ZXIC_UINT32 free_ptr3_fifo_udf; + ZXIC_UINT32 block_ptr0_fifo_udf; + ZXIC_UINT32 block_ptr1_fifo_udf; + ZXIC_UINT32 block_ptr2_fifo_udf; + ZXIC_UINT32 block_ptr3_fifo_udf; + ZXIC_UINT32 mgid0_fifo_udf; + ZXIC_UINT32 mgid1_fifo_udf; + ZXIC_UINT32 mgid2_fifo_udf; + ZXIC_UINT32 mgid3_fifo_udf; + ZXIC_UINT32 pre_rcv_ptr0_fifo_udf; + ZXIC_UINT32 pre_rcv_ptr1_fifo_udf; + ZXIC_UINT32 pre_rcv_ptr2_fifo_udf; + ZXIC_UINT32 pre_rcv_ptr3_fifo_udf; + ZXIC_UINT32 rep_cmd0_fifo_udf; + ZXIC_UINT32 rep_cmd1_fifo_udf; + ZXIC_UINT32 rep_cmd2_fifo_udf; + ZXIC_UINT32 rep_cmd3_fifo_udf; +} DPP_NPPU_MR_CFG_MR_STATES_2_T; + +typedef struct dpp_nppu_mr_cfg_mr_states_3_t { + ZXIC_UINT32 free_ptr0_fifo_ovf; + ZXIC_UINT32 free_ptr1_fifo_ovf; + ZXIC_UINT32 free_ptr2_fifo_ovf; + ZXIC_UINT32 free_ptr3_fifo_ovf; + ZXIC_UINT32 block_ptr0_fifo_ovf; + ZXIC_UINT32 block_ptr1_fifo_ovf; + ZXIC_UINT32 block_ptr2_fifo_ovf; + ZXIC_UINT32 block_ptr3_fifo_ovf; + ZXIC_UINT32 mgid0_fifo_ovf; + ZXIC_UINT32 mgid1_fifo_ovf; + ZXIC_UINT32 mgid2_fifo_ovf; + ZXIC_UINT32 mgid3_fifo_ovf; + ZXIC_UINT32 pre_rcv_ptr0_fifo_ovf; + ZXIC_UINT32 pre_rcv_ptr1_fifo_ovf; + ZXIC_UINT32 pre_rcv_ptr2_fifo_ovf; + ZXIC_UINT32 pre_rcv_ptr3_fifo_ovf; + ZXIC_UINT32 rep_cmd0_fifo_ovf; + ZXIC_UINT32 rep_cmd1_fifo_ovf; + ZXIC_UINT32 rep_cmd2_fifo_ovf; + ZXIC_UINT32 rep_cmd3_fifo_ovf; +} DPP_NPPU_MR_CFG_MR_STATES_3_T; + +typedef struct dpp_nppu_mr_cfg_mr_states_4_t { + ZXIC_UINT32 data_buf0_ram_parity_err; + ZXIC_UINT32 data_buf1_ram_parity_err; + ZXIC_UINT32 data_buf2_ram_parity_err; + ZXIC_UINT32 data_buf3_ram_parity_err; + ZXIC_UINT32 mlt_ecc_single_err; + ZXIC_UINT32 free_ptr0_fifo_ecc_single_err; + ZXIC_UINT32 free_ptr1_fifo_ecc_single_err; + ZXIC_UINT32 free_ptr2_fifo_ecc_single_err; + ZXIC_UINT32 free_ptr3_fifo_ecc_single_err; + ZXIC_UINT32 block_ptr0_fifo_ecc_single_err; + ZXIC_UINT32 block_ptr1_fifo_ecc_single_err; + ZXIC_UINT32 block_ptr2_fifo_ecc_single_err; + ZXIC_UINT32 block_ptr3_fifo_ecc_single_err; + ZXIC_UINT32 mgid0_fifo_ecc_single_err; + ZXIC_UINT32 mgid1_fifo_ecc_single_err; + ZXIC_UINT32 mgid2_fifo_ecc_single_err; + ZXIC_UINT32 mgid3_fifo_ecc_single_err; + ZXIC_UINT32 pre_rcv_ptr0_fifo_ecc_single_err; + ZXIC_UINT32 pre_rcv_ptr1_fifo_ecc_single_err; + ZXIC_UINT32 pre_rcv_ptr2_fifo_ecc_single_err; + ZXIC_UINT32 pre_rcv_ptr3_fifo_ecc_single_err; + ZXIC_UINT32 rep_cmd0_fifo_ecc_single_err; + ZXIC_UINT32 rep_cmd1_fifo_ecc_single_err; + ZXIC_UINT32 rep_cmd2_fifo_ecc_single_err; + ZXIC_UINT32 rep_cmd3_fifo_ecc_single_err; +} DPP_NPPU_MR_CFG_MR_STATES_4_T; + +typedef struct dpp_nppu_mr_cfg_mr_states_5_t { + ZXIC_UINT32 mlt_ecc_double_err; + ZXIC_UINT32 free_ptr0_fifo_ecc_double_err; + ZXIC_UINT32 free_ptr1_fifo_ecc_double_err; + ZXIC_UINT32 free_ptr2_fifo_ecc_double_err; + ZXIC_UINT32 free_ptr3_fifo_ecc_double_err; + ZXIC_UINT32 block_ptr0_fifo_ecc_double_err; + ZXIC_UINT32 block_ptr1_fifo_ecc_double_err; + ZXIC_UINT32 block_ptr2_fifo_ecc_double_err; + ZXIC_UINT32 block_ptr3_fifo_ecc_double_err; + ZXIC_UINT32 mgid0_fifo_ecc_double_err; + ZXIC_UINT32 mgid1_fifo_ecc_double_err; + ZXIC_UINT32 mgid2_fifo_ecc_double_err; + ZXIC_UINT32 mgid3_fifo_ecc_double_err; + ZXIC_UINT32 pre_rcv_ptr0_fifo_ecc_double_err; + ZXIC_UINT32 pre_rcv_ptr1_fifo_ecc_double_err; + ZXIC_UINT32 pre_rcv_ptr2_fifo_ecc_double_err; + ZXIC_UINT32 pre_rcv_ptr3_fifo_ecc_double_err; + ZXIC_UINT32 rep_cmd0_fifo_ecc_double_err; + ZXIC_UINT32 rep_cmd1_fifo_ecc_double_err; + ZXIC_UINT32 rep_cmd2_fifo_ecc_double_err; + ZXIC_UINT32 rep_cmd3_fifo_ecc_double_err; +} DPP_NPPU_MR_CFG_MR_STATES_5_T; + +typedef struct dpp_nppu_mr_cfg_mr_states_6_t { + ZXIC_UINT32 free_ptr0_fifo_empty; + ZXIC_UINT32 free_ptr1_fifo_empty; + ZXIC_UINT32 free_ptr2_fifo_empty; + ZXIC_UINT32 free_ptr3_fifo_empty; + ZXIC_UINT32 block_ptr0_fifo_empty; + ZXIC_UINT32 block_ptr1_fifo_empty; + ZXIC_UINT32 block_ptr2_fifo_empty; + ZXIC_UINT32 block_ptr3_fifo_empty; + ZXIC_UINT32 mgid0_fifo_empty; + ZXIC_UINT32 mgid1_fifo_empty; + ZXIC_UINT32 mgid2_fifo_empty; + ZXIC_UINT32 mgid3_fifo_empty; + ZXIC_UINT32 pre_rcv_ptr0_fifo_empty; + ZXIC_UINT32 pre_rcv_ptr1_fifo_empty; + ZXIC_UINT32 pre_rcv_ptr2_fifo_empty; + ZXIC_UINT32 pre_rcv_ptr3_fifo_empty; + ZXIC_UINT32 rep_cmd0_fifo_empty; + ZXIC_UINT32 rep_cmd1_fifo_empty; + ZXIC_UINT32 rep_cmd2_fifo_empty; + ZXIC_UINT32 rep_cmd3_fifo_empty; +} DPP_NPPU_MR_CFG_MR_STATES_6_T; + +typedef struct dpp_nppu_mr_cfg_mr_states_7_t { + ZXIC_UINT32 cos0_is_rep_busy; + ZXIC_UINT32 cos1_is_rep_busy; + ZXIC_UINT32 cos2_is_rep_busy; + ZXIC_UINT32 cos3_is_rep_busy; + ZXIC_UINT32 block_ptr0_fifo_non_sop_ren_rdy; + ZXIC_UINT32 block_ptr1_fifo_non_sop_ren_rdy; + ZXIC_UINT32 block_ptr2_fifo_non_sop_ren_rdy; + ZXIC_UINT32 block_ptr3_fifo_non_sop_ren_rdy; + ZXIC_UINT32 pre_rcv_ptr0_fifo_non_sop_ren_rdy; + ZXIC_UINT32 pre_rcv_ptr1_fifo_non_sop_ren_rdy; + ZXIC_UINT32 pre_rcv_ptr2_fifo_non_sop_ren_rdy; + ZXIC_UINT32 pre_rcv_ptr3_fifo_non_sop_ren_rdy; + ZXIC_UINT32 port_shap_rdy; + ZXIC_UINT32 mr_lif_group0_rdy_3; + ZXIC_UINT32 mr_lif_group0_rdy_2; + ZXIC_UINT32 mr_lif_group0_rdy_1; + ZXIC_UINT32 mr_lif_group0_rdy_0; + ZXIC_UINT32 pktrx_pfc_rdy_3; + ZXIC_UINT32 pktrx_pfc_rdy_2; + ZXIC_UINT32 pktrx_pfc_rdy_1; + ZXIC_UINT32 pktrx_pfc_rdy_0; + ZXIC_UINT32 pktrx_link_rdy; +} DPP_NPPU_MR_CFG_MR_STATES_7_T; + +typedef struct dpp_nppu_mr_cfg_mr_states_8_t { + ZXIC_UINT32 mr_head; +} DPP_NPPU_MR_CFG_MR_STATES_8_T; + +typedef struct dpp_nppu_mr_cfg_mr_sop_in_cnt_t { + ZXIC_UINT32 mr_sop_in_cnt; +} DPP_NPPU_MR_CFG_MR_SOP_IN_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_eop_in_cnt_t { + ZXIC_UINT32 mr_eop_in_cnt; +} DPP_NPPU_MR_CFG_MR_EOP_IN_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_sop_out_cnt_t { + ZXIC_UINT32 mr_sop_out_cnt; +} DPP_NPPU_MR_CFG_MR_SOP_OUT_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_eop_out_cnt_t { + ZXIC_UINT32 mr_eop_out_cnt; +} DPP_NPPU_MR_CFG_MR_EOP_OUT_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos0_in_cnt_t { + ZXIC_UINT32 mr_cos0_in_cnt; +} DPP_NPPU_MR_CFG_MR_COS0_IN_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos1_in_cnt_t { + ZXIC_UINT32 mr_cos1_in_cnt; +} DPP_NPPU_MR_CFG_MR_COS1_IN_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos2_in_cnt_t { + ZXIC_UINT32 mr_cos2_in_cnt; +} DPP_NPPU_MR_CFG_MR_COS2_IN_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos3_in_cnt_t { + ZXIC_UINT32 mr_cos3_in_cnt; +} DPP_NPPU_MR_CFG_MR_COS3_IN_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos0_out_cnt_t { + ZXIC_UINT32 mr_cos0_out_cnt; +} DPP_NPPU_MR_CFG_MR_COS0_OUT_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos1_out_cnt_t { + ZXIC_UINT32 mr_cos1_out_cnt; +} DPP_NPPU_MR_CFG_MR_COS1_OUT_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos2_out_cnt_t { + ZXIC_UINT32 mr_cos2_out_cnt; +} DPP_NPPU_MR_CFG_MR_COS2_OUT_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos3_out_cnt_t { + ZXIC_UINT32 mr_cos3_out_cnt; +} DPP_NPPU_MR_CFG_MR_COS3_OUT_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_err_in_cnt_t { + ZXIC_UINT32 mr_err_in_cnt; +} DPP_NPPU_MR_CFG_MR_ERR_IN_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos0_sop_in_cnt_t { + ZXIC_UINT32 mr_cos0_sop_in_cnt; +} DPP_NPPU_MR_CFG_MR_COS0_SOP_IN_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos0_eop_in_cnt_t { + ZXIC_UINT32 mr_cos0_eop_in_cnt; +} DPP_NPPU_MR_CFG_MR_COS0_EOP_IN_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos1_sop_in_cnt_t { + ZXIC_UINT32 mr_cos1_sop_in_cnt; +} DPP_NPPU_MR_CFG_MR_COS1_SOP_IN_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos1_eop_in_cnt_t { + ZXIC_UINT32 mr_cos1_eop_in_cnt; +} DPP_NPPU_MR_CFG_MR_COS1_EOP_IN_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos2_sop_in_cnt_t { + ZXIC_UINT32 mr_cos2_sop_in_cnt; +} DPP_NPPU_MR_CFG_MR_COS2_SOP_IN_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos2_eop_in_cnt_t { + ZXIC_UINT32 mr_cos2_eop_in_cnt; +} DPP_NPPU_MR_CFG_MR_COS2_EOP_IN_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos3_sop_in_cnt_t { + ZXIC_UINT32 mr_cos3_sop_in_cnt; +} DPP_NPPU_MR_CFG_MR_COS3_SOP_IN_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos3_eop_in_cnt_t { + ZXIC_UINT32 mr_cos3_eop_in_cnt; +} DPP_NPPU_MR_CFG_MR_COS3_EOP_IN_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos0_in_err_cnt_t { + ZXIC_UINT32 mr_cos0_in_err_cnt; +} DPP_NPPU_MR_CFG_MR_COS0_IN_ERR_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos1_in_err_cnt_t { + ZXIC_UINT32 mr_cos1_in_err_cnt; +} DPP_NPPU_MR_CFG_MR_COS1_IN_ERR_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos2_in_err_cnt_t { + ZXIC_UINT32 mr_cos2_in_err_cnt; +} DPP_NPPU_MR_CFG_MR_COS2_IN_ERR_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos3_in_err_cnt_t { + ZXIC_UINT32 mr_cos3_in_err_cnt; +} DPP_NPPU_MR_CFG_MR_COS3_IN_ERR_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos0_sop_out_cnt_t { + ZXIC_UINT32 mr_cos0_sop_out_cnt; +} DPP_NPPU_MR_CFG_MR_COS0_SOP_OUT_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos0_eop_out_cnt_t { + ZXIC_UINT32 mr_cos0_eop_out_cnt; +} DPP_NPPU_MR_CFG_MR_COS0_EOP_OUT_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos1_sop_out_cnt_t { + ZXIC_UINT32 mr_cos1_sop_out_cnt; +} DPP_NPPU_MR_CFG_MR_COS1_SOP_OUT_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos1_eop_out_cnt_t { + ZXIC_UINT32 mr_cos1_eop_out_cnt; +} DPP_NPPU_MR_CFG_MR_COS1_EOP_OUT_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos2_sop_out_cnt_t { + ZXIC_UINT32 mr_cos2_sop_out_cnt; +} DPP_NPPU_MR_CFG_MR_COS2_SOP_OUT_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos2_eop_out_cnt_t { + ZXIC_UINT32 mr_cos2_eop_out_cnt; +} DPP_NPPU_MR_CFG_MR_COS2_EOP_OUT_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos3_sop_out_cnt_t { + ZXIC_UINT32 mr_cos3_sop_out_cnt; +} DPP_NPPU_MR_CFG_MR_COS3_SOP_OUT_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_cos3_eop_out_cnt_t { + ZXIC_UINT32 mr_cos3_eop_out_cnt; +} DPP_NPPU_MR_CFG_MR_COS3_EOP_OUT_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_mlt_unvld_cnt_t { + ZXIC_UINT32 mr_mlt_unvld_cnt; +} DPP_NPPU_MR_CFG_MR_MLT_UNVLD_CNT_T; + +typedef struct dpp_nppu_mr_cfg_mr_sop_eop_match_cfg_t { + ZXIC_UINT32 mr_sop_eop_macth_en; + ZXIC_UINT32 mr_sop_eop_macth_dicard_th; +} DPP_NPPU_MR_CFG_MR_SOP_EOP_MATCH_CFG_T; + +typedef struct dpp_nppu_mr_cfg_mr_mlt_unvld_mgid_t { + ZXIC_UINT32 mr_mlt_unvld_mgid; +} DPP_NPPU_MR_CFG_MR_MLT_UNVLD_MGID_T; + +typedef struct dpp_nppu_pktrx_cfg_isch_fifo_th_1_t { + ZXIC_UINT32 cfg_sch_fifo7_fc_th; + ZXIC_UINT32 cfg_sch_fifo6_fc_th; + ZXIC_UINT32 cfg_sch_fifo5_fc_th; + ZXIC_UINT32 cfg_sch_fifo4_fc_th; +} DPP_NPPU_PKTRX_CFG_ISCH_FIFO_TH_1_T; + +typedef struct dpp_nppu_pktrx_cfg_isch_fifo_th_2_t { + ZXIC_UINT32 cfg_sch_fifo3_drop_th; + ZXIC_UINT32 cfg_sch_fifo1_drop_th; + ZXIC_UINT32 cfg_sch_fifo0_drop_th; + ZXIC_UINT32 cfg_sch_fifo8_fc_th; +} DPP_NPPU_PKTRX_CFG_ISCH_FIFO_TH_2_T; + +typedef struct dpp_nppu_pktrx_cfg_isch_fifo_th_3_t { + ZXIC_UINT32 cfg_sch_fifo6_drop_th; + ZXIC_UINT32 cfg_sch_fifo5_drop_th; + ZXIC_UINT32 cfg_sch_fifo4_drop_th; + ZXIC_UINT32 cfg_sch_fifo2_drop_th; +} DPP_NPPU_PKTRX_CFG_ISCH_FIFO_TH_3_T; + +typedef struct dpp_nppu_pktrx_cfg_isch_fifo_th_4_t { + ZXIC_UINT32 cfg_sch_fifo9_fc_th; + ZXIC_UINT32 cfg_sch_fifo9_drop_th; + ZXIC_UINT32 cfg_sch_fifo8_drop_th; + ZXIC_UINT32 cfg_sch_fifo7_drop_th; +} DPP_NPPU_PKTRX_CFG_ISCH_FIFO_TH_4_T; + +typedef struct dpp_nppu_pktrx_cfg_isch_cfg_0_t { + ZXIC_UINT32 cfg_sch_wrr1_weight1; +} DPP_NPPU_PKTRX_CFG_ISCH_CFG_0_T; + +typedef struct dpp_nppu_pktrx_cfg_hdu_ex_tpid_0_t { + ZXIC_UINT32 cfg_type0; + ZXIC_UINT32 cfg_type1; +} DPP_NPPU_PKTRX_CFG_HDU_EX_TPID_0_T; + +typedef struct dpp_nppu_pktrx_cfg_hdu_ex_tpid_1_t { + ZXIC_UINT32 cfg_type2; + ZXIC_UINT32 cfg_type3; +} DPP_NPPU_PKTRX_CFG_HDU_EX_TPID_1_T; + +typedef struct dpp_nppu_pktrx_cfg_hdu_int_tpid_0_t { + ZXIC_UINT32 cfg_inner_type0; + ZXIC_UINT32 cfg_inner_type1; +} DPP_NPPU_PKTRX_CFG_HDU_INT_TPID_0_T; + +typedef struct dpp_nppu_pktrx_cfg_hdu_int_tpid_1_t { + ZXIC_UINT32 cfg_inner_type2; + ZXIC_UINT32 cfg_inner_type3; +} DPP_NPPU_PKTRX_CFG_HDU_INT_TPID_1_T; + +typedef struct dpp_nppu_pktrx_cfg_hdu_hdlc_0_t { + ZXIC_UINT32 hdlc_cfg0_type; + ZXIC_UINT32 hdlc_cfg1_type; +} DPP_NPPU_PKTRX_CFG_HDU_HDLC_0_T; + +typedef struct dpp_nppu_pktrx_cfg_hdu_hdlc_1_t { + ZXIC_UINT32 hdlc_cfg2_type; + ZXIC_UINT32 hdlc_cfg3_type; +} DPP_NPPU_PKTRX_CFG_HDU_HDLC_1_T; + +typedef struct dpp_nppu_pktrx_cfg_hdu_udf_l3type_0_t { + ZXIC_UINT32 cfg_l3_type0; + ZXIC_UINT32 cfg_l3_type1; +} DPP_NPPU_PKTRX_CFG_HDU_UDF_L3TYPE_0_T; + +typedef struct dpp_nppu_pktrx_cfg_hdu_udf_l3type_1_t { + ZXIC_UINT32 cfg_l3_type2; + ZXIC_UINT32 cfg_l3_type3; +} DPP_NPPU_PKTRX_CFG_HDU_UDF_L3TYPE_1_T; + +typedef struct dpp_nppu_pktrx_cfg_hdu_udf_l3type_2_t { + ZXIC_UINT32 cfg_l3_type4; + ZXIC_UINT32 cfg_l3_type5; +} DPP_NPPU_PKTRX_CFG_HDU_UDF_L3TYPE_2_T; + +typedef struct dpp_nppu_pktrx_cfg_hdu_udf_l3type_3_t { + ZXIC_UINT32 cfg_l3_type6; + ZXIC_UINT32 cfg_l3_type7; +} DPP_NPPU_PKTRX_CFG_HDU_UDF_L3TYPE_3_T; + +typedef struct dpp_nppu_pktrx_cfg_hdu_udf_l4type_0_t { + ZXIC_UINT32 cfg_l4_type0; + ZXIC_UINT32 cfg_l4_type1; + ZXIC_UINT32 cfg_l4_type2; + ZXIC_UINT32 cfg_l4_type3; +} DPP_NPPU_PKTRX_CFG_HDU_UDF_L4TYPE_0_T; + +typedef struct dpp_nppu_pktrx_cfg_hdu_udf_l4type_1_t { + ZXIC_UINT32 cfg_l4_type4; + ZXIC_UINT32 cfg_l4_type5; + ZXIC_UINT32 cfg_l4_type6; + ZXIC_UINT32 cfg_l4_type7; +} DPP_NPPU_PKTRX_CFG_HDU_UDF_L4TYPE_1_T; + +typedef struct dpp_nppu_pktrx_cfg_hdu_udf_l4type_2_t { + ZXIC_UINT32 cfg_l4_type8; + ZXIC_UINT32 cfg_l4_type9; + ZXIC_UINT32 cfg_l4_type10; +} DPP_NPPU_PKTRX_CFG_HDU_UDF_L4TYPE_2_T; + +typedef struct dpp_nppu_pktrx_cfg_slot_no_cfg_t { + ZXIC_UINT32 cfg_parser_slot_no; +} DPP_NPPU_PKTRX_CFG_SLOT_NO_CFG_T; + +typedef struct dpp_nppu_pktrx_cfg_pktrx_int_en_0_t { + ZXIC_UINT32 pktrx_int_en_31; + ZXIC_UINT32 pktrx_int_en_30; + ZXIC_UINT32 pktrx_int_en_29; + ZXIC_UINT32 pktrx_int_en_28; + ZXIC_UINT32 pktrx_int_en_27; + ZXIC_UINT32 pktrx_int_en_26; + ZXIC_UINT32 pktrx_int_en_25; + ZXIC_UINT32 pktrx_int_en_24; + ZXIC_UINT32 pktrx_int_en_23; + ZXIC_UINT32 pktrx_int_en_22; + ZXIC_UINT32 pktrx_int_en_21; + ZXIC_UINT32 pktrx_int_en_20; + ZXIC_UINT32 pktrx_int_en_19; + ZXIC_UINT32 pktrx_int_en_18; + ZXIC_UINT32 pktrx_int_en_17; + ZXIC_UINT32 pktrx_int_en_16; + ZXIC_UINT32 pktrx_int_en_15; + ZXIC_UINT32 pktrx_int_en_14; + ZXIC_UINT32 pktrx_int_en_13; + ZXIC_UINT32 pktrx_int_en_12; + ZXIC_UINT32 pktrx_int_en_11; + ZXIC_UINT32 pktrx_int_en_10; + ZXIC_UINT32 pktrx_int_en_9; + ZXIC_UINT32 pktrx_int_en_8; + ZXIC_UINT32 pktrx_int_en_7; + ZXIC_UINT32 pktrx_int_en_6; + ZXIC_UINT32 pktrx_int_en_5; + ZXIC_UINT32 pktrx_int_en_4; + ZXIC_UINT32 pktrx_int_en_3; + ZXIC_UINT32 pktrx_int_en_2; + ZXIC_UINT32 pktrx_int_en_1; + ZXIC_UINT32 pktrx_int_en_0; +} DPP_NPPU_PKTRX_CFG_PKTRX_INT_EN_0_T; + +typedef struct dpp_nppu_pktrx_cfg_pktrx_int_en_1_t { + ZXIC_UINT32 pktrx_int_en_35; + ZXIC_UINT32 pktrx_int_en_34; + ZXIC_UINT32 pktrx_int_en_33; + ZXIC_UINT32 pktrx_int_en_32; +} DPP_NPPU_PKTRX_CFG_PKTRX_INT_EN_1_T; + +typedef struct dpp_nppu_pktrx_cfg_pktrx_int_mask_0_t { + ZXIC_UINT32 pktrx_int_mask_31; + ZXIC_UINT32 pktrx_int_mask_30; + ZXIC_UINT32 pktrx_int_mask_29; + ZXIC_UINT32 pktrx_int_mask_28; + ZXIC_UINT32 pktrx_int_mask_27; + ZXIC_UINT32 pktrx_int_mask_26; + ZXIC_UINT32 pktrx_int_mask_25; + ZXIC_UINT32 pktrx_int_mask_24; + ZXIC_UINT32 pktrx_int_mask_23; + ZXIC_UINT32 pktrx_int_mask_22; + ZXIC_UINT32 pktrx_int_mask_21; + ZXIC_UINT32 pktrx_int_mask_20; + ZXIC_UINT32 pktrx_int_mask_19; + ZXIC_UINT32 pktrx_int_mask_18; + ZXIC_UINT32 pktrx_int_mask_17; + ZXIC_UINT32 pktrx_int_mask_16; + ZXIC_UINT32 pktrx_int_mask_15; + ZXIC_UINT32 pktrx_int_mask_14; + ZXIC_UINT32 pktrx_int_mask_13; + ZXIC_UINT32 pktrx_int_mask_12; + ZXIC_UINT32 pktrx_int_mask_11; + ZXIC_UINT32 pktrx_int_mask_10; + ZXIC_UINT32 pktrx_int_mask_9; + ZXIC_UINT32 pktrx_int_mask_8; + ZXIC_UINT32 pktrx_int_mask_7; + ZXIC_UINT32 pktrx_int_mask_6; + ZXIC_UINT32 pktrx_int_mask_5; + ZXIC_UINT32 pktrx_int_mask_4; + ZXIC_UINT32 pktrx_int_mask_3; + ZXIC_UINT32 pktrx_int_mask_2; + ZXIC_UINT32 pktrx_int_mask_1; + ZXIC_UINT32 pktrx_int_mask_0; +} DPP_NPPU_PKTRX_CFG_PKTRX_INT_MASK_0_T; + +typedef struct dpp_nppu_pktrx_cfg_pktrx_int_mask_1_t { + ZXIC_UINT32 pktrx_int_mask_35; + ZXIC_UINT32 pktrx_int_mask_34; + ZXIC_UINT32 pktrx_int_mask_33; + ZXIC_UINT32 pktrx_int_mask_32; +} DPP_NPPU_PKTRX_CFG_PKTRX_INT_MASK_1_T; + +typedef struct dpp_nppu_pktrx_cfg_pktrx_int_status_t { + ZXIC_UINT32 int_status; +} DPP_NPPU_PKTRX_CFG_PKTRX_INT_STATUS_T; + +typedef struct dpp_nppu_pktrx_cfg_pktrx_port_rdy0_t { + ZXIC_UINT32 pktrx_trpgrx_r1_rdy; + ZXIC_UINT32 pktrx_trpgrx_r2_rdy; +} DPP_NPPU_PKTRX_CFG_PKTRX_PORT_RDY0_T; + +typedef struct dpp_nppu_pktrx_cfg_pktrx_lif0_pfc_rdy0_t { + ZXIC_UINT32 pktrx_trpgrx_r1_pfc_rdy_0; +} DPP_NPPU_PKTRX_CFG_PKTRX_LIF0_PFC_RDY0_T; + +typedef struct dpp_nppu_pktrx_cfg_pktrx_lif0_pfc_rdy1_t { + ZXIC_UINT32 pktrx_trpgrx_r1_pfc_rdy_1; +} DPP_NPPU_PKTRX_CFG_PKTRX_LIF0_PFC_RDY1_T; + +typedef struct dpp_nppu_pktrx_cfg_pktrx_lif0_pfc_rdy2_t { + ZXIC_UINT32 pktrx_trpgrx_r1_pfc_rdy_2; +} DPP_NPPU_PKTRX_CFG_PKTRX_LIF0_PFC_RDY2_T; + +typedef struct dpp_nppu_pktrx_cfg_pktrx_lif0_pfc_rdy3_t { + ZXIC_UINT32 pktrx_trpgrx_r2_pfc_rdy_3; +} DPP_NPPU_PKTRX_CFG_PKTRX_LIF0_PFC_RDY3_T; + +typedef struct dpp_nppu_pktrx_cfg_pktrx_lif0_pfc_rdy4_t { + ZXIC_UINT32 pktrx_trpgrx_r2_pfc_rdy_4; +} DPP_NPPU_PKTRX_CFG_PKTRX_LIF0_PFC_RDY4_T; + +typedef struct dpp_nppu_pktrx_cfg_pktrx_lif0_pfc_rdy5_t { + ZXIC_UINT32 pktrx_trpgrx_r2_pfc_rdy_5; +} DPP_NPPU_PKTRX_CFG_PKTRX_LIF0_PFC_RDY5_T; + +typedef struct dpp_nppu_pktrx_cfg_pktrx_lif0_pfc_rdy6_t { + ZXIC_UINT32 pktrx_trpgrx_r2_pfc_rdy_6; +} DPP_NPPU_PKTRX_CFG_PKTRX_LIF0_PFC_RDY6_T; + +typedef struct dpp_nppu_pktrx_cfg_cfg_port_l2_offset_mode_t { + ZXIC_UINT32 cfg_port_l2_offset_mode; +} DPP_NPPU_PKTRX_CFG_CFG_PORT_L2_OFFSET_MODE_T; + +typedef struct dpp_nppu_idma_cfg_int_ram_en_t { + ZXIC_UINT32 phy_sts_parity_err; + ZXIC_UINT32 ptr_buf_parity_err; +} DPP_NPPU_IDMA_CFG_INT_RAM_EN_T; + +typedef struct dpp_nppu_idma_cfg_int_ram_mask_t { + ZXIC_UINT32 phy_sts_parity_err; + ZXIC_UINT32 ptr_buf_parity_err; +} DPP_NPPU_IDMA_CFG_INT_RAM_MASK_T; + +typedef struct dpp_nppu_idma_cfg_int_ram_status_t { + ZXIC_UINT32 phy_sts_parity_err; + ZXIC_UINT32 ptr_buf_parity_err; +} DPP_NPPU_IDMA_CFG_INT_RAM_STATUS_T; + +typedef struct dpp_nppu_idma_cfg_subsys_int_mask_flag_t { + ZXIC_UINT32 subsys_int_mask_flag; +} DPP_NPPU_IDMA_CFG_SUBSYS_INT_MASK_FLAG_T; + +typedef struct dpp_nppu_idma_cfg_subsys_int_unmask_flag_t { + ZXIC_UINT32 subsys_int_unmask_flag; +} DPP_NPPU_IDMA_CFG_SUBSYS_INT_UNMASK_FLAG_T; + +typedef struct dpp_nppu_idma_cfg_debug_cnt_rdclr_mode_t { + ZXIC_UINT32 debug_cnt_rdclr_mode; +} DPP_NPPU_IDMA_CFG_DEBUG_CNT_RDCLR_MODE_T; + +typedef struct dpp_nppu_pbu_cfg_int_ram_en0_t { + ZXIC_UINT32 int_ram_en_31; + ZXIC_UINT32 int_ram_en_30; + ZXIC_UINT32 int_ram_en_29; + ZXIC_UINT32 int_ram_en_28; + ZXIC_UINT32 int_ram_en_27; + ZXIC_UINT32 int_ram_en_26; + ZXIC_UINT32 int_ram_en_25; + ZXIC_UINT32 int_ram_en_24; + ZXIC_UINT32 int_ram_en_23; + ZXIC_UINT32 int_ram_en_22; + ZXIC_UINT32 int_ram_en_21; + ZXIC_UINT32 int_ram_en_20; + ZXIC_UINT32 int_ram_en_19; + ZXIC_UINT32 int_ram_en_18; + ZXIC_UINT32 int_ram_en_17; + ZXIC_UINT32 int_ram_en_16; + ZXIC_UINT32 int_ram_en_15; + ZXIC_UINT32 int_ram_en_14; + ZXIC_UINT32 int_ram_en_13; + ZXIC_UINT32 int_ram_en_12; + ZXIC_UINT32 int_ram_en_11; + ZXIC_UINT32 int_ram_en_10; + ZXIC_UINT32 int_ram_en_9; + ZXIC_UINT32 int_ram_en_8; + ZXIC_UINT32 int_ram_en_7; + ZXIC_UINT32 int_ram_en_6; + ZXIC_UINT32 int_ram_en_5; + ZXIC_UINT32 int_ram_en_4; + ZXIC_UINT32 int_ram_en_3; + ZXIC_UINT32 int_ram_en_2; + ZXIC_UINT32 int_ram_en_1; + ZXIC_UINT32 int_ram_en_0; +} DPP_NPPU_PBU_CFG_INT_RAM_EN0_T; + +typedef struct dpp_nppu_pbu_cfg_int_ram_mask0_t { + ZXIC_UINT32 int_ram_mask_31; + ZXIC_UINT32 int_ram_mask_30; + ZXIC_UINT32 int_ram_mask_29; + ZXIC_UINT32 int_ram_mask_28; + ZXIC_UINT32 int_ram_mask_27; + ZXIC_UINT32 int_ram_mask_26; + ZXIC_UINT32 int_ram_mask_25; + ZXIC_UINT32 int_ram_mask_24; + ZXIC_UINT32 int_ram_mask_23; + ZXIC_UINT32 int_ram_mask_22; + ZXIC_UINT32 int_ram_mask_21; + ZXIC_UINT32 int_ram_mask_20; + ZXIC_UINT32 int_ram_mask_19; + ZXIC_UINT32 int_ram_mask_18; + ZXIC_UINT32 int_ram_mask_17; + ZXIC_UINT32 int_ram_mask_16; + ZXIC_UINT32 int_ram_mask_15; + ZXIC_UINT32 int_ram_mask_14; + ZXIC_UINT32 int_ram_mask_13; + ZXIC_UINT32 int_ram_mask_12; + ZXIC_UINT32 int_ram_mask_11; + ZXIC_UINT32 int_ram_mask_10; + ZXIC_UINT32 int_ram_mask_9; + ZXIC_UINT32 int_ram_mask_8; + ZXIC_UINT32 int_ram_mask_7; + ZXIC_UINT32 int_ram_mask_6; + ZXIC_UINT32 int_ram_mask_5; + ZXIC_UINT32 int_ram_mask_4; + ZXIC_UINT32 int_ram_mask_3; + ZXIC_UINT32 int_ram_mask_2; + ZXIC_UINT32 int_ram_mask_1; + ZXIC_UINT32 int_ram_mask_0; +} DPP_NPPU_PBU_CFG_INT_RAM_MASK0_T; + +typedef struct dpp_nppu_pbu_cfg_int_ram_status0_t { + ZXIC_UINT32 int_ram_status_31; + ZXIC_UINT32 int_ram_status_30; + ZXIC_UINT32 int_ram_status_29; + ZXIC_UINT32 int_ram_status_28; + ZXIC_UINT32 int_ram_status_27; + ZXIC_UINT32 int_ram_status_26; + ZXIC_UINT32 int_ram_status_25; + ZXIC_UINT32 int_ram_status_24; + ZXIC_UINT32 int_ram_status_23; + ZXIC_UINT32 int_ram_status_22; + ZXIC_UINT32 int_ram_status_21; + ZXIC_UINT32 int_ram_status_20; + ZXIC_UINT32 int_ram_status_19; + ZXIC_UINT32 int_ram_status_18; + ZXIC_UINT32 int_ram_status_17; + ZXIC_UINT32 int_ram_status_16; + ZXIC_UINT32 int_ram_status_15; + ZXIC_UINT32 int_ram_status_14; + ZXIC_UINT32 int_ram_status_13; + ZXIC_UINT32 int_ram_status_12; + ZXIC_UINT32 int_ram_status_11; + ZXIC_UINT32 int_ram_status_10; + ZXIC_UINT32 int_ram_status_9; + ZXIC_UINT32 int_ram_status_8; + ZXIC_UINT32 int_ram_status_7; + ZXIC_UINT32 int_ram_status_6; + ZXIC_UINT32 int_ram_status_5; + ZXIC_UINT32 int_ram_status_4; + ZXIC_UINT32 int_ram_status_3; + ZXIC_UINT32 int_ram_status_2; + ZXIC_UINT32 int_ram_status_1; + ZXIC_UINT32 int_ram_status_0; +} DPP_NPPU_PBU_CFG_INT_RAM_STATUS0_T; + +typedef struct dpp_nppu_pbu_cfg_int_fifo_en0_t { + ZXIC_UINT32 int_fifo_en_31; + ZXIC_UINT32 int_fifo_en_30; + ZXIC_UINT32 int_fifo_en_29; + ZXIC_UINT32 int_fifo_en_28; + ZXIC_UINT32 int_fifo_en_27; + ZXIC_UINT32 int_fifo_en_26; + ZXIC_UINT32 int_fifo_en_25; + ZXIC_UINT32 int_fifo_en_24; + ZXIC_UINT32 int_fifo_en_23; + ZXIC_UINT32 int_fifo_en_22; + ZXIC_UINT32 int_fifo_en_21; + ZXIC_UINT32 int_fifo_en_20; + ZXIC_UINT32 int_fifo_en_19; + ZXIC_UINT32 int_fifo_en_18; + ZXIC_UINT32 int_fifo_en_17; + ZXIC_UINT32 int_fifo_en_16; + ZXIC_UINT32 int_fifo_en_15; + ZXIC_UINT32 int_fifo_en_14; + ZXIC_UINT32 int_fifo_en_13; + ZXIC_UINT32 int_fifo_en_12; + ZXIC_UINT32 int_fifo_en_11; + ZXIC_UINT32 int_fifo_en_10; + ZXIC_UINT32 int_fifo_en_9; + ZXIC_UINT32 int_fifo_en_8; + ZXIC_UINT32 int_fifo_en_7; + ZXIC_UINT32 int_fifo_en_6; + ZXIC_UINT32 int_fifo_en_5; + ZXIC_UINT32 int_fifo_en_4; + ZXIC_UINT32 int_fifo_en_3; + ZXIC_UINT32 int_fifo_en_2; + ZXIC_UINT32 int_fifo_en_1; + ZXIC_UINT32 int_fifo_en_0; +} DPP_NPPU_PBU_CFG_INT_FIFO_EN0_T; + +typedef struct dpp_nppu_pbu_cfg_int_fifo_en1_t { + ZXIC_UINT32 int_fifo_en_35; + ZXIC_UINT32 int_fifo_en_34; + ZXIC_UINT32 int_fifo_en_33; + ZXIC_UINT32 int_fifo_en_32; +} DPP_NPPU_PBU_CFG_INT_FIFO_EN1_T; + +typedef struct dpp_nppu_pbu_cfg_int_fifo_mask0_t { + ZXIC_UINT32 int_fifo_mask_31; + ZXIC_UINT32 int_fifo_mask_30; + ZXIC_UINT32 int_fifo_mask_29; + ZXIC_UINT32 int_fifo_mask_28; + ZXIC_UINT32 int_fifo_mask_27; + ZXIC_UINT32 int_fifo_mask_26; + ZXIC_UINT32 int_fifo_mask_25; + ZXIC_UINT32 int_fifo_mask_24; + ZXIC_UINT32 int_fifo_mask_23; + ZXIC_UINT32 int_fifo_mask_22; + ZXIC_UINT32 int_fifo_mask_21; + ZXIC_UINT32 int_fifo_mask_20; + ZXIC_UINT32 int_fifo_mask_19; + ZXIC_UINT32 int_fifo_mask_18; + ZXIC_UINT32 int_fifo_mask_17; + ZXIC_UINT32 int_fifo_mask_16; + ZXIC_UINT32 int_fifo_mask_15; + ZXIC_UINT32 int_fifo_mask_14; + ZXIC_UINT32 int_fifo_mask_13; + ZXIC_UINT32 int_fifo_mask_12; + ZXIC_UINT32 int_fifo_mask_11; + ZXIC_UINT32 int_fifo_mask_10; + ZXIC_UINT32 int_fifo_mask_9; + ZXIC_UINT32 int_fifo_mask_8; + ZXIC_UINT32 int_fifo_mask_7; + ZXIC_UINT32 int_fifo_mask_6; + ZXIC_UINT32 int_fifo_mask_5; + ZXIC_UINT32 int_fifo_mask_4; + ZXIC_UINT32 int_fifo_mask_3; + ZXIC_UINT32 int_fifo_mask_2; + ZXIC_UINT32 int_fifo_mask_1; + ZXIC_UINT32 int_fifo_mask_0; +} DPP_NPPU_PBU_CFG_INT_FIFO_MASK0_T; + +typedef struct dpp_nppu_pbu_cfg_int_fifo_mask1_t { + ZXIC_UINT32 int_fifo_mask_35; + ZXIC_UINT32 int_fifo_mask_34; + ZXIC_UINT32 int_fifo_mask_33; + ZXIC_UINT32 int_fifo_mask_32; +} DPP_NPPU_PBU_CFG_INT_FIFO_MASK1_T; + +typedef struct dpp_nppu_pbu_cfg_int_fifo_status0_t { + ZXIC_UINT32 int_fifo_status_31; + ZXIC_UINT32 int_fifo_status_30; + ZXIC_UINT32 int_fifo_status_29; + ZXIC_UINT32 int_fifo_status_28; + ZXIC_UINT32 int_fifo_status_27; + ZXIC_UINT32 int_fifo_status_26; + ZXIC_UINT32 int_fifo_status_25; + ZXIC_UINT32 int_fifo_status_24; + ZXIC_UINT32 int_fifo_status_23; + ZXIC_UINT32 int_fifo_status_22; + ZXIC_UINT32 int_fifo_status_21; + ZXIC_UINT32 int_fifo_status_20; + ZXIC_UINT32 int_fifo_status_19; + ZXIC_UINT32 int_fifo_status_18; + ZXIC_UINT32 int_fifo_status_17; + ZXIC_UINT32 int_fifo_status_16; + ZXIC_UINT32 int_fifo_status_15; + ZXIC_UINT32 int_fifo_status_14; + ZXIC_UINT32 int_fifo_status_13; + ZXIC_UINT32 int_fifo_status_12; + ZXIC_UINT32 int_fifo_status_11; + ZXIC_UINT32 int_fifo_status_10; + ZXIC_UINT32 int_fifo_status_9; + ZXIC_UINT32 int_fifo_status_8; + ZXIC_UINT32 int_fifo_status_7; + ZXIC_UINT32 int_fifo_status_6; + ZXIC_UINT32 int_fifo_status_5; + ZXIC_UINT32 int_fifo_status_4; + ZXIC_UINT32 int_fifo_status_3; + ZXIC_UINT32 int_fifo_status_2; + ZXIC_UINT32 int_fifo_status_1; + ZXIC_UINT32 int_fifo_status_0; +} DPP_NPPU_PBU_CFG_INT_FIFO_STATUS0_T; + +typedef struct dpp_nppu_pbu_cfg_int_fifo_status1_t { + ZXIC_UINT32 int_fifo_status_35; + ZXIC_UINT32 int_fifo_status_34; + ZXIC_UINT32 int_fifo_status_33; + ZXIC_UINT32 int_fifo_status_32; +} DPP_NPPU_PBU_CFG_INT_FIFO_STATUS1_T; + +typedef struct dpp_nppu_pbu_cfg_subsys_int_mask_flag_t { + ZXIC_UINT32 subsys_int_mask_flag; +} DPP_NPPU_PBU_CFG_SUBSYS_INT_MASK_FLAG_T; + +typedef struct dpp_nppu_pbu_cfg_subsys_int_unmask_flag_t { + ZXIC_UINT32 subsys_int_unmask_flag; +} DPP_NPPU_PBU_CFG_SUBSYS_INT_UNMASK_FLAG_T; + +typedef struct dpp_nppu_pbu_cfg_sa_ip_en_t { + ZXIC_UINT32 sa_ip_en; +} DPP_NPPU_PBU_CFG_SA_IP_EN_T; + +typedef struct dpp_nppu_pbu_cfg_debug_cnt_rdclr_mode_t { + ZXIC_UINT32 debug_cnt_rdclr_mode; +} DPP_NPPU_PBU_CFG_DEBUG_CNT_RDCLR_MODE_T; + +typedef struct dpp_nppu_pbu_cfg_fptr_fifo_aful_assert_cfg_t { + ZXIC_UINT32 fptr_fifo_aful_assert_cfg; +} DPP_NPPU_PBU_CFG_FPTR_FIFO_AFUL_ASSERT_CFG_T; + +typedef struct dpp_nppu_pbu_cfg_fptr_fifo_aful_negate_cfg_t { + ZXIC_UINT32 fptr_fifo_aful_negate_cfg; +} DPP_NPPU_PBU_CFG_FPTR_FIFO_AFUL_NEGATE_CFG_T; + +typedef struct dpp_nppu_pbu_cfg_pf_fifo_aful_assert_cfg_t { + ZXIC_UINT32 pf_fifo_aful_assert_cfg; +} DPP_NPPU_PBU_CFG_PF_FIFO_AFUL_ASSERT_CFG_T; + +typedef struct dpp_nppu_pbu_cfg_pf_fifo_aful_negate_cfg_t { + ZXIC_UINT32 pf_fifo_aful_negate_cfg; +} DPP_NPPU_PBU_CFG_PF_FIFO_AFUL_NEGATE_CFG_T; + +typedef struct dpp_nppu_pbu_cfg_pf_fifo_aept_assert_cfg_t { + ZXIC_UINT32 pf_fifo_aept_assert_cfg; +} DPP_NPPU_PBU_CFG_PF_FIFO_AEPT_ASSERT_CFG_T; + +typedef struct dpp_nppu_pbu_cfg_pf_fifo_aept_negate_cfg_t { + ZXIC_UINT32 pf_fifo_aept_negate_cfg; +} DPP_NPPU_PBU_CFG_PF_FIFO_AEPT_NEGATE_CFG_T; + +typedef struct dpp_nppu_pbu_cfg_wb_aful_assert_cfg_t { + ZXIC_UINT32 wb_aful_assert_cfg; +} DPP_NPPU_PBU_CFG_WB_AFUL_ASSERT_CFG_T; + +typedef struct dpp_nppu_pbu_cfg_wb_aful_negate_cfg_t { + ZXIC_UINT32 wb_aful_negate_cfg; +} DPP_NPPU_PBU_CFG_WB_AFUL_NEGATE_CFG_T; + +typedef struct dpp_nppu_pbu_cfg_se_key_aful_assert_cfg_t { + ZXIC_UINT32 se_key_aful_assert_cfg; +} DPP_NPPU_PBU_CFG_SE_KEY_AFUL_ASSERT_CFG_T; + +typedef struct dpp_nppu_pbu_cfg_ifbrd_se_aful_assert_cfg_t { + ZXIC_UINT32 ifbrd_se_aful_assert_cfg; +} DPP_NPPU_PBU_CFG_IFBRD_SE_AFUL_ASSERT_CFG_T; + +typedef struct dpp_nppu_pbu_cfg_ifbrd_se_aful_negate_cfg_t { + ZXIC_UINT32 ifbrd_se_aful_negate_cfg; +} DPP_NPPU_PBU_CFG_IFBRD_SE_AFUL_NEGATE_CFG_T; + +typedef struct dpp_nppu_pbu_cfg_ifbrd_odma_aful_assert_cfg_t { + ZXIC_UINT32 ifbrd_odma_aful_assert_cfg; +} DPP_NPPU_PBU_CFG_IFBRD_ODMA_AFUL_ASSERT_CFG_T; + +typedef struct dpp_nppu_pbu_cfg_ifbrd_odma_aful_negate_cfg_t { + ZXIC_UINT32 ifbrd_odma_aful_negate_cfg; +} DPP_NPPU_PBU_CFG_IFBRD_ODMA_AFUL_NEGATE_CFG_T; + +typedef struct dpp_nppu_pbu_cfg_ifbrd_ppu_aful_assert_cfg_t { + ZXIC_UINT32 ifbrd_ppu_aful_assert_cfg; +} DPP_NPPU_PBU_CFG_IFBRD_PPU_AFUL_ASSERT_CFG_T; + +typedef struct dpp_nppu_pbu_cfg_ifbrd_ppu_aful_negate_cfg_t { + ZXIC_UINT32 ifbrd_ppu_aful_negate_cfg; +} DPP_NPPU_PBU_CFG_IFBRD_PPU_AFUL_NEGATE_CFG_T; + +typedef struct dpp_nppu_pbu_cfg_mc_logic_aful_assert_cfg_t { + ZXIC_UINT32 mc_logic_aful_assert_cfg; +} DPP_NPPU_PBU_CFG_MC_LOGIC_AFUL_ASSERT_CFG_T; + +typedef struct dpp_nppu_pbu_cfg_mc_logic_aful_negate_cfg_t { + ZXIC_UINT32 mc_logic_aful_negate_cfg; +} DPP_NPPU_PBU_CFG_MC_LOGIC_AFUL_NEGATE_CFG_T; + +typedef struct dpp_nppu_pbu_cfg_mc_logic_diff_t { + ZXIC_UINT32 mc_logic_diff; +} DPP_NPPU_PBU_CFG_MC_LOGIC_DIFF_T; + +typedef struct dpp_nppu_pbu_cfg_cfg_peak_port_cnt_clr_t { + ZXIC_UINT32 cfg_peak_port_cnt_clr; +} DPP_NPPU_PBU_CFG_CFG_PEAK_PORT_CNT_CLR_T; + +typedef struct dpp_nppu_pbu_cfg_all_ftm_crdt_th_t { + ZXIC_UINT32 ftm_crdt_port_cng_th; + ZXIC_UINT32 ftm_crdt_port_th; +} DPP_NPPU_PBU_CFG_ALL_FTM_CRDT_TH_T; + +typedef struct dpp_nppu_pbu_cfg_all_ftm_link_th_01_t { + ZXIC_UINT32 total_congest_th1; + ZXIC_UINT32 total_congest_th0; +} DPP_NPPU_PBU_CFG_ALL_FTM_LINK_TH_01_T; + +typedef struct dpp_nppu_pbu_cfg_all_ftm_link_th_23_t { + ZXIC_UINT32 total_congest_th3; + ZXIC_UINT32 total_congest_th2; +} DPP_NPPU_PBU_CFG_ALL_FTM_LINK_TH_23_T; + +typedef struct dpp_nppu_pbu_cfg_all_ftm_link_th_45_t { + ZXIC_UINT32 total_congest_th5; + ZXIC_UINT32 total_congest_th4; +} DPP_NPPU_PBU_CFG_ALL_FTM_LINK_TH_45_T; + +typedef struct dpp_nppu_pbu_cfg_all_ftm_link_th_6_t { + ZXIC_UINT32 total_congest_th6; +} DPP_NPPU_PBU_CFG_ALL_FTM_LINK_TH_6_T; + +typedef struct dpp_nppu_pbu_cfg_all_ftm_total_congest_th_t { + ZXIC_UINT32 all_ftm_total_congest_th; +} DPP_NPPU_PBU_CFG_ALL_FTM_TOTAL_CONGEST_TH_T; + +typedef struct dpp_nppu_pbu_cfg_cfg_crdt_mode_t { + ZXIC_UINT32 cfg_crdt_mode; +} DPP_NPPU_PBU_CFG_CFG_CRDT_MODE_T; + +typedef struct dpp_nppu_pbu_cfg_cfg_pfc_rdy_high_time_t { + ZXIC_UINT32 cfg_pfc_rdy_high_time; +} DPP_NPPU_PBU_CFG_CFG_PFC_RDY_HIGH_TIME_T; + +typedef struct dpp_nppu_pbu_cfg_cfg_pfc_rdy_low_time_t { + ZXIC_UINT32 cfg_pfc_rdy_low_time; +} DPP_NPPU_PBU_CFG_CFG_PFC_RDY_LOW_TIME_T; + +typedef struct dpp_nppu_pbu_stat_pbu_fc_rdy_t { + ZXIC_UINT32 pbu_oam_send_fc_rdy; + ZXIC_UINT32 pbu_odma_fc_rdy; + ZXIC_UINT32 pbu_tm_fc_rdy; + ZXIC_UINT32 pbu_idma_cos_rdy; +} DPP_NPPU_PBU_STAT_PBU_FC_RDY_T; + +typedef struct dpp_nppu_pbu_stat_pbu_lif_group0_rdy0_t { + ZXIC_UINT32 pbu_ipg1_rdy; + ZXIC_UINT32 pbu_ipg0_rdy; + ZXIC_UINT32 pbu_trpgrx_xge_rdy; + ZXIC_UINT32 pbu_trpgrx_cge1_rdy; + ZXIC_UINT32 pbu_trpgrx_cge0_rdy; +} DPP_NPPU_PBU_STAT_PBU_LIF_GROUP0_RDY0_T; + +typedef struct dpp_nppu_pbu_stat_pbu_lif_group0_rdy1_t { + ZXIC_UINT32 pbu_lif_group0_rdy1; +} DPP_NPPU_PBU_STAT_PBU_LIF_GROUP0_RDY1_T; + +typedef struct dpp_nppu_pbu_stat_pbu_lif_group1_rdy_t { + ZXIC_UINT32 pbu_lif_group1_rdy1; +} DPP_NPPU_PBU_STAT_PBU_LIF_GROUP1_RDY_T; + +typedef struct dpp_nppu_pbu_stat_pbu_lif_group0_pfc_rdy_t { + ZXIC_UINT32 pbu_lif_group0_pfc_rdy; +} DPP_NPPU_PBU_STAT_PBU_LIF_GROUP0_PFC_RDY_T; + +typedef struct dpp_nppu_pbu_stat_pbu_lif_group1_pfc_rdy_t { + ZXIC_UINT32 pbu_lif_group1_pfc_rdy; +} DPP_NPPU_PBU_STAT_PBU_LIF_GROUP1_PFC_RDY_T; + +typedef struct dpp_nppu_pbu_stat_pbu_sa_port_rdy_0_31_t { + ZXIC_UINT32 pbu_sa_port_rdy_0_31; +} DPP_NPPU_PBU_STAT_PBU_SA_PORT_RDY_0_31_T; + +typedef struct dpp_nppu_pbu_stat_pbu_sa_port_rdy_32_50_t { + ZXIC_UINT32 pbu_sa_port_rdy_32_50; +} DPP_NPPU_PBU_STAT_PBU_SA_PORT_RDY_32_50_T; + +typedef struct dpp_nppu_pbu_stat_pbu_pktrx_mr_pfc_rdy_t { + ZXIC_UINT32 pbu_pktrx_mr_pfc_rdy; +} DPP_NPPU_PBU_STAT_PBU_PKTRX_MR_PFC_RDY_T; + +typedef struct dpp_nppu_pbu_stat_pbu_ftm_crdt_port_rdy_0_31_t { + ZXIC_UINT32 pbu_ftm_crdt_port_rdy_0_31; +} DPP_NPPU_PBU_STAT_PBU_FTM_CRDT_PORT_RDY_0_31_T; + +typedef struct dpp_nppu_pbu_stat_pbu_ftm_crdt_port_rdy_32_47_t { + ZXIC_UINT32 pbu_ftm_crdt_port_rdy_32_47; +} DPP_NPPU_PBU_STAT_PBU_FTM_CRDT_PORT_RDY_32_47_T; + +typedef struct dpp_nppu_pbu_stat_pbu_ftm_crdt_port_cng_rdy_0_31_t { + ZXIC_UINT32 pbu_ftm_crdt_port_cng_rdy_0_31; +} DPP_NPPU_PBU_STAT_PBU_FTM_CRDT_PORT_CNG_RDY_0_31_T; + +typedef struct dpp_nppu_pbu_stat_pbu_ftm_crdt_port_cng_rdy_32_47_t { + ZXIC_UINT32 pbu_ftm_crdt_port_cng_rdy_32_47; +} DPP_NPPU_PBU_STAT_PBU_FTM_CRDT_PORT_CNG_RDY_32_47_T; + +typedef struct dpp_nppu_pbu_stat_pbu_ftm_crdt_sys_info_t { + ZXIC_UINT32 pbu_ftm_crdt_sys_info; +} DPP_NPPU_PBU_STAT_PBU_FTM_CRDT_SYS_INFO_T; + +typedef struct dpp_nppu_isu_cfg_weight_normal_mc_t { + ZXIC_UINT32 weight_normal_mc; +} DPP_NPPU_ISU_CFG_WEIGHT_NORMAL_MC_T; + +typedef struct dpp_nppu_isu_cfg_weight_sa_mc_t { + ZXIC_UINT32 weight_sa_mc; +} DPP_NPPU_ISU_CFG_WEIGHT_SA_MC_T; + +typedef struct dpp_nppu_isu_cfg_weight_etm_t { + ZXIC_UINT32 weight_etm; +} DPP_NPPU_ISU_CFG_WEIGHT_ETM_T; + +typedef struct dpp_nppu_isu_cfg_weight_lp_mc_t { + ZXIC_UINT32 weight_lp_mc; +} DPP_NPPU_ISU_CFG_WEIGHT_LP_MC_T; + +typedef struct dpp_nppu_isu_cfg_weight_oam_t { + ZXIC_UINT32 weight_oam; +} DPP_NPPU_ISU_CFG_WEIGHT_OAM_T; + +typedef struct dpp_nppu_isu_cfg_weight_lif_ctrl1_t { + ZXIC_UINT32 weight_lif_ctrl1; +} DPP_NPPU_ISU_CFG_WEIGHT_LIF_CTRL1_T; + +typedef struct dpp_nppu_isu_cfg_weight_lif_ctrl2_t { + ZXIC_UINT32 weight_lif_ctrl2; +} DPP_NPPU_ISU_CFG_WEIGHT_LIF_CTRL2_T; + +typedef struct dpp_nppu_isu_cfg_ecc_bypass_read_t { + ZXIC_UINT32 eccbypass; +} DPP_NPPU_ISU_CFG_ECC_BYPASS_READ_T; + +typedef struct dpp_nppu_isu_cfg_isu_int_mask_t { + ZXIC_UINT32 isu_int_mask; +} DPP_NPPU_ISU_CFG_ISU_INT_MASK_T; + +typedef struct dpp_nppu_isu_cfg_cfg_crdt_cycle_t { + ZXIC_UINT32 cfg_cycle; +} DPP_NPPU_ISU_CFG_CFG_CRDT_CYCLE_T; + +typedef struct dpp_nppu_isu_cfg_cfg_crdt_value_t { + ZXIC_UINT32 cfg_value; +} DPP_NPPU_ISU_CFG_CFG_CRDT_VALUE_T; + +typedef struct dpp_nppu_isu_cfg_isu_int_en_t { + ZXIC_UINT32 isu_int_en; +} DPP_NPPU_ISU_CFG_ISU_INT_EN_T; + +typedef struct dpp_nppu_isu_cfg_isu_ppu_fifo_fc_t { + ZXIC_UINT32 isu_ppu_fifo_fc; +} DPP_NPPU_ISU_CFG_ISU_PPU_FIFO_FC_T; + +typedef struct dpp_nppu_isu_cfg_isu_int_status_t { + ZXIC_UINT32 isu_int_status_26; + ZXIC_UINT32 isu_int_status_25; + ZXIC_UINT32 isu_int_status_24; + ZXIC_UINT32 isu_int_status_23; + ZXIC_UINT32 isu_int_status_22; + ZXIC_UINT32 isu_int_status_21; + ZXIC_UINT32 isu_int_status_20; + ZXIC_UINT32 isu_int_status_19; + ZXIC_UINT32 isu_int_status_18; + ZXIC_UINT32 isu_int_status_17; + ZXIC_UINT32 isu_int_status_16; + ZXIC_UINT32 isu_int_status_15; + ZXIC_UINT32 isu_int_status_14; + ZXIC_UINT32 isu_int_status_13; + ZXIC_UINT32 isu_int_status_12; + ZXIC_UINT32 isu_int_status_11; + ZXIC_UINT32 isu_int_status_10; + ZXIC_UINT32 isu_int_status_9; + ZXIC_UINT32 isu_int_status_8; + ZXIC_UINT32 isu_int_status_7; + ZXIC_UINT32 isu_int_status_6; + ZXIC_UINT32 isu_int_status_5; + ZXIC_UINT32 isu_int_status_4; + ZXIC_UINT32 isu_int_status_3; + ZXIC_UINT32 isu_int_status_2; + ZXIC_UINT32 isu_int_status_1; + ZXIC_UINT32 isu_int_status_0; +} DPP_NPPU_ISU_CFG_ISU_INT_STATUS_T; + +typedef struct dpp_nppu_isu_cfg_fd_prog_full_assert_cfg_t { + ZXIC_UINT32 fd_prog_full_assert_cfg; +} DPP_NPPU_ISU_CFG_FD_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_nppu_isu_cfg_fd_prog_full_negate_cfg_t { + ZXIC_UINT32 fd_prog_full_negate_cfg; +} DPP_NPPU_ISU_CFG_FD_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_nppu_isu_cfg_lp_prog_full_assert_cfg_t { + ZXIC_UINT32 lp_prog_ept_assert_cfg; +} DPP_NPPU_ISU_CFG_LP_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_nppu_isu_cfg_lp_prog_full_negate_cfg_t { + ZXIC_UINT32 lp_prog_ept_negate_cfg; +} DPP_NPPU_ISU_CFG_LP_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_nppu_isu_stat_debug_cnt_dat0_t { + ZXIC_UINT32 debug_cnt_dat0; +} DPP_NPPU_ISU_STAT_DEBUG_CNT_DAT0_T; + +typedef struct dpp_nppu_isu_stat_debug_cnt_dat1_t { + ZXIC_UINT32 debug_cnt_dat1; +} DPP_NPPU_ISU_STAT_DEBUG_CNT_DAT1_T; + +typedef struct dpp_nppu_isu_stat_debug_cnt_dat2_t { + ZXIC_UINT32 debug_cnt_dat2; +} DPP_NPPU_ISU_STAT_DEBUG_CNT_DAT2_T; + +typedef struct dpp_nppu_isu_stat_debug_cnt_dat3_t { + ZXIC_UINT32 debug_cnt_dat3; +} DPP_NPPU_ISU_STAT_DEBUG_CNT_DAT3_T; + +typedef struct dpp_nppu_isu_stat_debug_cnt_dat4_t { + ZXIC_UINT32 debug_cnt_dat4; +} DPP_NPPU_ISU_STAT_DEBUG_CNT_DAT4_T; + +typedef struct dpp_nppu_isu_stat_debug_cnt_dat5_t { + ZXIC_UINT32 debug_cnt_dat5; +} DPP_NPPU_ISU_STAT_DEBUG_CNT_DAT5_T; + +typedef struct dpp_nppu_isu_stat_debug_cnt_dat6_t { + ZXIC_UINT32 debug_cnt_dat6; +} DPP_NPPU_ISU_STAT_DEBUG_CNT_DAT6_T; + +typedef struct dpp_nppu_isu_stat_debug_cnt_dat7_t { + ZXIC_UINT32 debug_cnt_dat7; +} DPP_NPPU_ISU_STAT_DEBUG_CNT_DAT7_T; + +typedef struct dpp_nppu_isu_stat_debug_cnt_dat8_t { + ZXIC_UINT32 debug_cnt_dat8; +} DPP_NPPU_ISU_STAT_DEBUG_CNT_DAT8_T; + +typedef struct dpp_nppu_isu_stat_debug_cnt_dat9_t { + ZXIC_UINT32 debug_cnt_dat9; +} DPP_NPPU_ISU_STAT_DEBUG_CNT_DAT9_T; + +typedef struct dpp_nppu_isu_stat_debug_cnt_dat10_t { + ZXIC_UINT32 debug_cnt_dat10; +} DPP_NPPU_ISU_STAT_DEBUG_CNT_DAT10_T; + +typedef struct dpp_nppu_isu_stat_debug_cnt_dat11_t { + ZXIC_UINT32 debug_cnt_dat11; +} DPP_NPPU_ISU_STAT_DEBUG_CNT_DAT11_T; + +typedef struct dpp_nppu_isu_stat_debug_cnt_dat12_t { + ZXIC_UINT32 debug_cnt_dat12; +} DPP_NPPU_ISU_STAT_DEBUG_CNT_DAT12_T; + +typedef struct dpp_nppu_isu_stat_debug_cnt_dat13_t { + ZXIC_UINT32 debug_cnt_dat13; +} DPP_NPPU_ISU_STAT_DEBUG_CNT_DAT13_T; + +typedef struct dpp_nppu_isu_stat_debug_cnt_dat14_t { + ZXIC_UINT32 debug_cnt_dat14; +} DPP_NPPU_ISU_STAT_DEBUG_CNT_DAT14_T; + +typedef struct dpp_nppu_isu_stat_debug_cnt_dat15_t { + ZXIC_UINT32 debug_cnt_dat15; +} DPP_NPPU_ISU_STAT_DEBUG_CNT_DAT15_T; + +typedef struct dpp_nppu_isu_stat_debug_cnt_dat16_t { + ZXIC_UINT32 debug_cnt_dat16; +} DPP_NPPU_ISU_STAT_DEBUG_CNT_DAT16_T; + +typedef struct dpp_nppu_isu_stat_debug_cnt_dat17_t { + ZXIC_UINT32 debug_cnt_dat17; +} DPP_NPPU_ISU_STAT_DEBUG_CNT_DAT17_T; + +typedef struct dpp_nppu_isu_stat_debug_cnt_dat18_t { + ZXIC_UINT32 debug_cnt_dat18; +} DPP_NPPU_ISU_STAT_DEBUG_CNT_DAT18_T; + +typedef struct dpp_nppu_isu_stat_debug_cnt_dat19_t { + ZXIC_UINT32 debug_cnt_dat18; +} DPP_NPPU_ISU_STAT_DEBUG_CNT_DAT19_T; + +typedef struct dpp_nppu_isu_stat_debug_cnt_cfg_t { + ZXIC_UINT32 debug_cnt_ovf_mode; + ZXIC_UINT32 debug_cnt_rdclr_mode; + ZXIC_UINT32 user_cnt_value; +} DPP_NPPU_ISU_STAT_DEBUG_CNT_CFG_T; + +typedef struct dpp_nppu_odma_cfg_exsa_tdm_offset_t { + ZXIC_UINT32 exsa_tdm_offset; +} DPP_NPPU_ODMA_CFG_EXSA_TDM_OFFSET_T; + +typedef struct dpp_nppu_odma_cfg_ecc_bypass_readt_t { + ZXIC_UINT32 ecc_bypass_read; +} DPP_NPPU_ODMA_CFG_ECC_BYPASS_READT_T; + +typedef struct dpp_nppu_odma_cfg_odma_int_en_0_t { + ZXIC_UINT32 odma_int_en_31; + ZXIC_UINT32 odma_int_en_30; + ZXIC_UINT32 odma_int_en_29; + ZXIC_UINT32 odma_int_en_28; + ZXIC_UINT32 odma_int_en_27; + ZXIC_UINT32 odma_int_en_26; + ZXIC_UINT32 odma_int_en_25; + ZXIC_UINT32 odma_int_en_24; + ZXIC_UINT32 odma_int_en_22; + ZXIC_UINT32 odma_int_en_21; + ZXIC_UINT32 odma_int_en_18; +} DPP_NPPU_ODMA_CFG_ODMA_INT_EN_0_T; + +typedef struct dpp_nppu_odma_cfg_odma_int_en_1_t { + ZXIC_UINT32 odma_int_en_63; + ZXIC_UINT32 odma_int_en_62; + ZXIC_UINT32 odma_int_en_61; + ZXIC_UINT32 odma_int_en_59; + ZXIC_UINT32 odma_int_en_58; + ZXIC_UINT32 odma_int_en_57; + ZXIC_UINT32 odma_int_en_56; + ZXIC_UINT32 odma_int_en_55; + ZXIC_UINT32 odma_int_en_54; + ZXIC_UINT32 odma_int_en_53; + ZXIC_UINT32 odma_int_en_52; + ZXIC_UINT32 odma_int_en_51; + ZXIC_UINT32 odma_int_en_49; + ZXIC_UINT32 odma_int_en_47; + ZXIC_UINT32 odma_int_en_45; + ZXIC_UINT32 odma_int_en_39; + ZXIC_UINT32 odma_int_en_38; + ZXIC_UINT32 odma_int_en_37; + ZXIC_UINT32 odma_int_en_36; + ZXIC_UINT32 odma_int_en_35; + ZXIC_UINT32 odma_int_en_34; + ZXIC_UINT32 odma_int_en_33; + ZXIC_UINT32 odma_int_en_32; +} DPP_NPPU_ODMA_CFG_ODMA_INT_EN_1_T; + +typedef struct dpp_nppu_odma_cfg_odma_int_en_2_t { + ZXIC_UINT32 odma_int_en_91; + ZXIC_UINT32 odma_int_en_88; + ZXIC_UINT32 odma_int_en_85; + ZXIC_UINT32 odma_int_en_82; + ZXIC_UINT32 odma_int_en_79; + ZXIC_UINT32 odma_int_en_75; + ZXIC_UINT32 odma_int_en_74; + ZXIC_UINT32 odma_int_en_71; + ZXIC_UINT32 odma_int_en_65; + ZXIC_UINT32 odma_int_en_64; +} DPP_NPPU_ODMA_CFG_ODMA_INT_EN_2_T; + +typedef struct dpp_nppu_odma_cfg_odma_int_en_3_t { + ZXIC_UINT32 odma_int_en_115; + ZXIC_UINT32 odma_int_en_114; + ZXIC_UINT32 odma_int_en_112; + ZXIC_UINT32 odma_int_en_110; + ZXIC_UINT32 odma_int_en_109; + ZXIC_UINT32 odma_int_en_108; + ZXIC_UINT32 odma_int_en_107; + ZXIC_UINT32 odma_int_en_106; + ZXIC_UINT32 odma_int_en_102; + ZXIC_UINT32 odma_int_en_101; + ZXIC_UINT32 odma_int_en_100; + ZXIC_UINT32 odma_int_en_98; + ZXIC_UINT32 odma_int_en_96; +} DPP_NPPU_ODMA_CFG_ODMA_INT_EN_3_T; + +typedef struct dpp_nppu_odma_cfg_odma_int_mask_0_t { + ZXIC_UINT32 odma_int_mask_31; + ZXIC_UINT32 odma_int_mask_30; + ZXIC_UINT32 odma_int_mask_29; + ZXIC_UINT32 odma_int_mask_28; + ZXIC_UINT32 odma_int_mask_27; + ZXIC_UINT32 odma_int_mask_26; + ZXIC_UINT32 odma_int_mask_25; + ZXIC_UINT32 odma_int_mask_24; + ZXIC_UINT32 odma_int_mask_22; + ZXIC_UINT32 odma_int_mask_21; + ZXIC_UINT32 odma_int_mask_18; +} DPP_NPPU_ODMA_CFG_ODMA_INT_MASK_0_T; + +typedef struct dpp_nppu_odma_cfg_odma_int_mask_1_t { + ZXIC_UINT32 odma_int_mask_63; + ZXIC_UINT32 odma_int_mask_62; + ZXIC_UINT32 odma_int_mask_61; + ZXIC_UINT32 odma_int_mask_59; + ZXIC_UINT32 odma_int_mask_58; + ZXIC_UINT32 odma_int_mask_57; + ZXIC_UINT32 odma_int_mask_56; + ZXIC_UINT32 odma_int_mask_55; + ZXIC_UINT32 odma_int_mask_54; + ZXIC_UINT32 odma_int_mask_53; + ZXIC_UINT32 odma_int_mask_52; + ZXIC_UINT32 odma_int_mask_51; + ZXIC_UINT32 odma_int_mask_50; + ZXIC_UINT32 odma_int_mask_49; + ZXIC_UINT32 odma_int_mask_47; + ZXIC_UINT32 odma_int_mask_45; + ZXIC_UINT32 odma_int_mask_39; + ZXIC_UINT32 odma_int_mask_38; + ZXIC_UINT32 odma_int_mask_37; + ZXIC_UINT32 odma_int_mask_36; + ZXIC_UINT32 odma_int_mask_35; + ZXIC_UINT32 odma_int_mask_34; + ZXIC_UINT32 odma_int_mask_33; + ZXIC_UINT32 odma_int_mask_32; +} DPP_NPPU_ODMA_CFG_ODMA_INT_MASK_1_T; + +typedef struct dpp_nppu_odma_cfg_odma_int_mask_2_t { + ZXIC_UINT32 odma_int_mask_91; + ZXIC_UINT32 odma_int_mask_88; + ZXIC_UINT32 odma_int_mask_85; + ZXIC_UINT32 odma_int_mask_82; + ZXIC_UINT32 odma_int_mask_79; + ZXIC_UINT32 odma_int_mask_75; + ZXIC_UINT32 odma_int_mask_74; + ZXIC_UINT32 odma_int_mask_71; + ZXIC_UINT32 odma_int_mask_65; + ZXIC_UINT32 odma_int_mask_64; +} DPP_NPPU_ODMA_CFG_ODMA_INT_MASK_2_T; + +typedef struct dpp_nppu_odma_cfg_odma_int_mask_3_t { + ZXIC_UINT32 odma_int_mask_115; + ZXIC_UINT32 odma_int_mask_114; + ZXIC_UINT32 odma_int_mask_112; + ZXIC_UINT32 odma_int_mask_110; + ZXIC_UINT32 odma_int_mask_109; + ZXIC_UINT32 odma_int_mask_108; + ZXIC_UINT32 odma_int_mask_107; + ZXIC_UINT32 odma_int_mask_106; + ZXIC_UINT32 odma_int_mask_102; + ZXIC_UINT32 odma_int_mask_101; + ZXIC_UINT32 odma_int_mask_100; + ZXIC_UINT32 odma_int_mask_98; + ZXIC_UINT32 odma_int_mask_96; +} DPP_NPPU_ODMA_CFG_ODMA_INT_MASK_3_T; + +typedef struct dpp_nppu_odma_cfg_odma_int_status_0_t { + ZXIC_UINT32 odma_int_status_31; + ZXIC_UINT32 odma_int_status_30; + ZXIC_UINT32 odma_int_status_29; + ZXIC_UINT32 odma_int_status_28; + ZXIC_UINT32 odma_int_status_27; + ZXIC_UINT32 odma_int_status_26; + ZXIC_UINT32 odma_int_status_25; + ZXIC_UINT32 odma_int_status_24; + ZXIC_UINT32 odma_int_status_22; + ZXIC_UINT32 odma_int_status_21; + ZXIC_UINT32 odma_int_status_18; +} DPP_NPPU_ODMA_CFG_ODMA_INT_STATUS_0_T; + +typedef struct dpp_nppu_odma_cfg_odma_int_status_1_t { + ZXIC_UINT32 odma_int_status_63; + ZXIC_UINT32 odma_int_status_62; + ZXIC_UINT32 odma_int_status_61; + ZXIC_UINT32 odma_int_status_59; + ZXIC_UINT32 odma_int_status_58; + ZXIC_UINT32 odma_int_status_57; + ZXIC_UINT32 odma_int_status_56; + ZXIC_UINT32 odma_int_status_55; + ZXIC_UINT32 odma_int_status_54; + ZXIC_UINT32 odma_int_status_53; + ZXIC_UINT32 odma_int_status_52; + ZXIC_UINT32 odma_int_status_51; + ZXIC_UINT32 odma_int_status_49; + ZXIC_UINT32 odma_int_status_47; + ZXIC_UINT32 odma_int_status_45; + ZXIC_UINT32 odma_int_status_39; + ZXIC_UINT32 odma_int_status_38; + ZXIC_UINT32 odma_int_status_37; + ZXIC_UINT32 odma_int_status_36; + ZXIC_UINT32 odma_int_status_35; + ZXIC_UINT32 odma_int_status_34; + ZXIC_UINT32 odma_int_status_33; + ZXIC_UINT32 odma_int_status_32; +} DPP_NPPU_ODMA_CFG_ODMA_INT_STATUS_1_T; + +typedef struct dpp_nppu_odma_cfg_odma_int_status_2_t { + ZXIC_UINT32 odma_int_status_91; + ZXIC_UINT32 odma_int_status_88; + ZXIC_UINT32 odma_int_status_85; + ZXIC_UINT32 odma_int_status_82; + ZXIC_UINT32 odma_int_status_79; + ZXIC_UINT32 odma_int_status_75; + ZXIC_UINT32 odma_int_status_74; + ZXIC_UINT32 odma_int_status_71; + ZXIC_UINT32 odma_int_status_65; + ZXIC_UINT32 odma_int_status_64; +} DPP_NPPU_ODMA_CFG_ODMA_INT_STATUS_2_T; + +typedef struct dpp_nppu_odma_cfg_odma_int_status_3_t { + ZXIC_UINT32 odma_int_status_117; + ZXIC_UINT32 odma_int_status_116; + ZXIC_UINT32 odma_int_status_115; + ZXIC_UINT32 odma_int_status_114; + ZXIC_UINT32 odma_int_status_112; + ZXIC_UINT32 odma_int_status_110; + ZXIC_UINT32 odma_int_status_109; + ZXIC_UINT32 odma_int_status_108; + ZXIC_UINT32 odma_int_status_107; + ZXIC_UINT32 odma_int_status_106; + ZXIC_UINT32 odma_int_status_102; + ZXIC_UINT32 odma_int_status_101; + ZXIC_UINT32 odma_int_status_100; + ZXIC_UINT32 odma_int_status_98; + ZXIC_UINT32 odma_int_status_96; +} DPP_NPPU_ODMA_CFG_ODMA_INT_STATUS_3_T; + +typedef struct dpp_nppu_odma_cfg_sp_tdm_err_nor_cfg_t { + ZXIC_UINT32 sp_tdm_err_nor_cfg; +} DPP_NPPU_ODMA_CFG_SP_TDM_ERR_NOR_CFG_T; + +typedef struct dpp_nppu_odma_cfg_etm_dis_ptr_prog_full_cfg_a_t { + ZXIC_UINT32 etm_dis_ptr_prog_full_cfg_a; +} DPP_NPPU_ODMA_CFG_ETM_DIS_PTR_PROG_FULL_CFG_A_T; + +typedef struct dpp_nppu_odma_cfg_etm_dis_ptr_prog_full_cfg_n_t { + ZXIC_UINT32 etm_dis_ptr_prog_full_cfg_n; +} DPP_NPPU_ODMA_CFG_ETM_DIS_PTR_PROG_FULL_CFG_N_T; + +typedef struct dpp_nppu_odma_cfg_ftm_dis_ptr_prog_full_cfg_a_t { + ZXIC_UINT32 ftm_dis_ptr_prog_full_cfg_a; +} DPP_NPPU_ODMA_CFG_FTM_DIS_PTR_PROG_FULL_CFG_A_T; + +typedef struct dpp_nppu_odma_cfg_ftm_dis_ptr_prog_full_cfg_n_t { + ZXIC_UINT32 ftm_dis_ptr_prog_full_cfg_n; +} DPP_NPPU_ODMA_CFG_FTM_DIS_PTR_PROG_FULL_CFG_N_T; + +typedef struct dpp_nppu_odma_cfg_tm_dis_fifo_prog_full_cfg_a_t { + ZXIC_UINT32 tm_dis_fifo_prog_full_cfg_a; +} DPP_NPPU_ODMA_CFG_TM_DIS_FIFO_PROG_FULL_CFG_A_T; + +typedef struct dpp_nppu_odma_cfg_tm_dis_fifo_prog_full_cfg_n_t { + ZXIC_UINT32 tm_dis_fifo_prog_full_cfg_n; +} DPP_NPPU_ODMA_CFG_TM_DIS_FIFO_PROG_FULL_CFG_N_T; + +typedef struct dpp_nppu_odma_cfg_err_prog_full_cfg_a_t { + ZXIC_UINT32 err_prog_full_cfg_a; +} DPP_NPPU_ODMA_CFG_ERR_PROG_FULL_CFG_A_T; + +typedef struct dpp_nppu_odma_cfg_err_prog_full_cfg_n_t { + ZXIC_UINT32 err_prog_full_cfg_n; +} DPP_NPPU_ODMA_CFG_ERR_PROG_FULL_CFG_N_T; + +typedef struct dpp_nppu_odma_cfg_tdmuc_prog_full_cfg_a_t { + ZXIC_UINT32 tdmuc_prog_full_cfg_a; +} DPP_NPPU_ODMA_CFG_TDMUC_PROG_FULL_CFG_A_T; + +typedef struct dpp_nppu_odma_cfg_tdmuc_prog_full_cfg_n_t { + ZXIC_UINT32 tdmuc_prog_full_cfg_n; +} DPP_NPPU_ODMA_CFG_TDMUC_PROG_FULL_CFG_N_T; + +typedef struct dpp_nppu_odma_cfg_tdmmc_groupid_prog_full_cfg_a_t { + ZXIC_UINT32 tdmmc_groupid_prog_full_cfg_a; +} DPP_NPPU_ODMA_CFG_TDMMC_GROUPID_PROG_FULL_CFG_A_T; + +typedef struct dpp_nppu_odma_cfg_tdmmc_groupid_prog_full_cfg_n_t { + ZXIC_UINT32 tdmmc_groupid_prog_full_cfg_n; +} DPP_NPPU_ODMA_CFG_TDMMC_GROUPID_PROG_FULL_CFG_N_T; + +typedef struct dpp_nppu_odma_cfg_tdmmc_no_bitmap_prog_full_cfg_a_t { + ZXIC_UINT32 tdmmc_no_bitmap_prog_full_cfg_a; +} DPP_NPPU_ODMA_CFG_TDMMC_NO_BITMAP_PROG_FULL_CFG_A_T; + +typedef struct dpp_nppu_odma_cfg_tdmmc_no_bitmap_prog_full_cfg_n_t { + ZXIC_UINT32 tdmmc_no_bitmap_prog_full_cfg_n; +} DPP_NPPU_ODMA_CFG_TDMMC_NO_BITMAP_PROG_FULL_CFG_N_T; + +typedef struct dpp_nppu_odma_cfg_tdmmc_prog_full_cfg_a_t { + ZXIC_UINT32 tdmmc_prog_full_cfg_a; +} DPP_NPPU_ODMA_CFG_TDMMC_PROG_FULL_CFG_A_T; + +typedef struct dpp_nppu_odma_cfg_tdmmc_prog_full_cfg_n_t { + ZXIC_UINT32 tdmmc_prog_full_cfg_n; +} DPP_NPPU_ODMA_CFG_TDMMC_PROG_FULL_CFG_N_T; + +typedef struct dpp_nppu_odma_cfg_desc_prog_full_cfg_a_t { + ZXIC_UINT32 desc_prog_full_cfg_a; +} DPP_NPPU_ODMA_CFG_DESC_PROG_FULL_CFG_A_T; + +typedef struct dpp_nppu_odma_cfg_desc_prog_full_cfg_n_t { + ZXIC_UINT32 desc_prog_full_cfg_n; +} DPP_NPPU_ODMA_CFG_DESC_PROG_FULL_CFG_N_T; + +typedef struct dpp_nppu_odma_cfg_dly_prog_full_cfg_a_t { + ZXIC_UINT32 dly_prog_full_cfg_a; +} DPP_NPPU_ODMA_CFG_DLY_PROG_FULL_CFG_A_T; + +typedef struct dpp_nppu_odma_cfg_dly_prog_full_cfg_n_t { + ZXIC_UINT32 dly_prog_full_cfg_n; +} DPP_NPPU_ODMA_CFG_DLY_PROG_FULL_CFG_N_T; + +typedef struct dpp_nppu_odma_cfg_rsp_prog_full_cfg_a_t { + ZXIC_UINT32 rsp_prog_full_cfg_a; +} DPP_NPPU_ODMA_CFG_RSP_PROG_FULL_CFG_A_T; + +typedef struct dpp_nppu_odma_cfg_rsp_prog_full_cfg_n_t { + ZXIC_UINT32 rsp_prog_full_cfg_n; +} DPP_NPPU_ODMA_CFG_RSP_PROG_FULL_CFG_N_T; + +typedef struct dpp_nppu_odma_cfg_nor_prog_full_cfg_a_t { + ZXIC_UINT32 nor_prog_full_cfg_a; +} DPP_NPPU_ODMA_CFG_NOR_PROG_FULL_CFG_A_T; + +typedef struct dpp_nppu_odma_cfg_nor_prog_full_cfg_n_t { + ZXIC_UINT32 nor_prog_full_cfg_n; +} DPP_NPPU_ODMA_CFG_NOR_PROG_FULL_CFG_N_T; + +typedef struct dpp_nppu_odma_cfg_etm_nor_prog_full_cfg_a_t { + ZXIC_UINT32 etm_nor_prog_full_cfg_a; +} DPP_NPPU_ODMA_CFG_ETM_NOR_PROG_FULL_CFG_A_T; + +typedef struct dpp_nppu_odma_cfg_etm_nor_prog_full_cfg_n_t { + ZXIC_UINT32 etm_nor_prog_full_cfg_n; +} DPP_NPPU_ODMA_CFG_ETM_NOR_PROG_FULL_CFG_N_T; + +typedef struct dpp_nppu_odma_cfg_ftm_nor_prog_full_cfg_a_t { + ZXIC_UINT32 ftm_nor_prog_full_cfg_a; +} DPP_NPPU_ODMA_CFG_FTM_NOR_PROG_FULL_CFG_A_T; + +typedef struct dpp_nppu_odma_cfg_ftm_nor_prog_full_cfg_n_t { + ZXIC_UINT32 ftm_nor_prog_full_cfg_n; +} DPP_NPPU_ODMA_CFG_FTM_NOR_PROG_FULL_CFG_N_T; + +typedef struct dpp_nppu_odma_cfg_etm_prog_full_cfg_a_t { + ZXIC_UINT32 etm_prog_full_cfg_a; +} DPP_NPPU_ODMA_CFG_ETM_PROG_FULL_CFG_A_T; + +typedef struct dpp_nppu_odma_cfg_etm_prog_full_cfg_n_t { + ZXIC_UINT32 etm_prog_full_cfg_n; +} DPP_NPPU_ODMA_CFG_ETM_PROG_FULL_CFG_N_T; + +typedef struct dpp_nppu_odma_cfg_ftm_prog_full_cfg_a_t { + ZXIC_UINT32 ftm_prog_full_cfg_a; +} DPP_NPPU_ODMA_CFG_FTM_PROG_FULL_CFG_A_T; + +typedef struct dpp_nppu_odma_cfg_ftm_prog_full_cfg_n_t { + ZXIC_UINT32 ftm_prog_full_cfg_n; +} DPP_NPPU_ODMA_CFG_FTM_PROG_FULL_CFG_N_T; + +typedef struct dpp_nppu_odma_cfg_etm_nrdcnt_prog_full_cfg_a_t { + ZXIC_UINT32 etm_nrdcnt_prog_full_cfg_a; +} DPP_NPPU_ODMA_CFG_ETM_NRDCNT_PROG_FULL_CFG_A_T; + +typedef struct dpp_nppu_odma_cfg_etm_nrdcnt_prog_full_cfg_n_t { + ZXIC_UINT32 etm_nrdcnt_prog_full_cfg_n; +} DPP_NPPU_ODMA_CFG_ETM_NRDCNT_PROG_FULL_CFG_N_T; + +typedef struct dpp_nppu_odma_cfg_ftm_nrdcnt_prog_full_cfg_a_t { + ZXIC_UINT32 ftm_nrdcnt_prog_full_cfg_a; +} DPP_NPPU_ODMA_CFG_FTM_NRDCNT_PROG_FULL_CFG_A_T; + +typedef struct dpp_nppu_odma_cfg_ftm_nrdcnt_prog_full_cfg_n_t { + ZXIC_UINT32 ftm_nrdcnt_prog_full_cfg_n; +} DPP_NPPU_ODMA_CFG_FTM_NRDCNT_PROG_FULL_CFG_N_T; + +typedef struct dpp_nppu_odma_cfg_pp_prog_full_cfg_a_t { + ZXIC_UINT32 pp_prog_full_cfg_a; +} DPP_NPPU_ODMA_CFG_PP_PROG_FULL_CFG_A_T; + +typedef struct dpp_nppu_odma_cfg_pp_prog_full_cfg_n_t { + ZXIC_UINT32 pp_prog_full_cfg_n; +} DPP_NPPU_ODMA_CFG_PP_PROG_FULL_CFG_N_T; + +typedef struct dpp_nppu_odma_cfg_tm_weight_t { + ZXIC_UINT32 tm_weight; +} DPP_NPPU_ODMA_CFG_TM_WEIGHT_T; + +typedef struct dpp_nppu_odma_cfg_pp_weight_t { + ZXIC_UINT32 pp_weight; +} DPP_NPPU_ODMA_CFG_PP_WEIGHT_T; + +typedef struct dpp_nppu_odma_cfg_ifbcmd_prog_full_cfg_a_t { + ZXIC_UINT32 ifbcmd_prog_full_cfg_a; +} DPP_NPPU_ODMA_CFG_IFBCMD_PROG_FULL_CFG_A_T; + +typedef struct dpp_nppu_odma_cfg_ifbcmd_prog_full_cfg_n_t { + ZXIC_UINT32 ifbcmd_prog_full_cfg_n; +} DPP_NPPU_ODMA_CFG_IFBCMD_PROG_FULL_CFG_N_T; + +typedef struct dpp_nppu_odma_cfg_mccnt_prog_full_cfg_a_t { + ZXIC_UINT32 mccnt_prog_full_cfg_a; +} DPP_NPPU_ODMA_CFG_MCCNT_PROG_FULL_CFG_A_T; + +typedef struct dpp_nppu_odma_cfg_mccnt_prog_full_cfg_n_t { + ZXIC_UINT32 mccnt_prog_full_cfg_n; +} DPP_NPPU_ODMA_CFG_MCCNT_PROG_FULL_CFG_N_T; + +typedef struct dpp_nppu_odma_cfg_int_or_pon_t { + ZXIC_UINT32 int_or_pon; +} DPP_NPPU_ODMA_CFG_INT_OR_PON_T; + +typedef struct dpp_nppu_odma_cfg_quemng_cnt_in_err_cnt_t { + ZXIC_UINT32 quemng_cnt_in_err_cnt; +} DPP_NPPU_ODMA_CFG_QUEMNG_CNT_IN_ERR_CNT_T; + +typedef struct dpp_nppu_odma_cfg_lif0_port_eop_cnt_t { + ZXIC_UINT32 lif0_port_eop_cnt; +} DPP_NPPU_ODMA_CFG_LIF0_PORT_EOP_CNT_T; + +typedef struct dpp_nppu_odma_cfg_lif1_port_eop_cnt_t { + ZXIC_UINT32 lif1_port_eop_cnt; +} DPP_NPPU_ODMA_CFG_LIF1_PORT_EOP_CNT_T; + +typedef struct dpp_nppu_odma_cfg_lifc_port0_eop_cnt_t { + ZXIC_UINT32 lifc_port0_eop_cnt; +} DPP_NPPU_ODMA_CFG_LIFC_PORT0_EOP_CNT_T; + +typedef struct dpp_nppu_odma_cfg_lifc_port1_eop_cnt_t { + ZXIC_UINT32 lifc_port1_eop_cnt; +} DPP_NPPU_ODMA_CFG_LIFC_PORT1_EOP_CNT_T; + +typedef struct dpp_nppu_odma_cfg_fptr_fifo_prog_ept_cfg_n_t { + ZXIC_UINT32 fptr_fifo_prog_ept_cfg_n; +} DPP_NPPU_ODMA_CFG_FPTR_FIFO_PROG_EPT_CFG_N_T; + +typedef struct dpp_nppu_odma_cfg_isu_fifo_prog_full_cfg_a_t { + ZXIC_UINT32 isu_fifo_prog_full_cfg_a; +} DPP_NPPU_ODMA_CFG_ISU_FIFO_PROG_FULL_CFG_A_T; + +typedef struct dpp_nppu_odma_cfg_isu_fifo_prog_full_cfg_n_t { + ZXIC_UINT32 isu_fifo_prog_full_cfg_n; +} DPP_NPPU_ODMA_CFG_ISU_FIFO_PROG_FULL_CFG_N_T; + +typedef struct dpp_nppu_oam_cfg_ind_access_done_t { + ZXIC_UINT32 ind_access_done; +} DPP_NPPU_OAM_CFG_IND_ACCESS_DONE_T; + +typedef struct dpp_nppu_oam_cfg_ind_access_command_t { + ZXIC_UINT32 ind_rd_or_wr; + ZXIC_UINT32 ind_mem_mask; + ZXIC_UINT32 ind_mem_id; + ZXIC_UINT32 ind_mem_addr; +} DPP_NPPU_OAM_CFG_IND_ACCESS_COMMAND_T; + +typedef struct dpp_nppu_oam_cfg_ind_dat0_t { + ZXIC_UINT32 ind_dat0; +} DPP_NPPU_OAM_CFG_IND_DAT0_T; + +typedef struct dpp_nppu_oam_cfg_ind_dat1_t { + ZXIC_UINT32 ind_dat1; +} DPP_NPPU_OAM_CFG_IND_DAT1_T; + +typedef struct dpp_nppu_oam_cfg_ind_dat2_t { + ZXIC_UINT32 ind_dat2; +} DPP_NPPU_OAM_CFG_IND_DAT2_T; + +typedef struct dpp_nppu_oam_cfg_ind_dat3_t { + ZXIC_UINT32 ind_dat3; +} DPP_NPPU_OAM_CFG_IND_DAT3_T; + +typedef struct dpp_nppu_oam_cfg_oam_tx_main_en_t { + ZXIC_UINT32 oam_tx_main_en; +} DPP_NPPU_OAM_CFG_OAM_TX_MAIN_EN_T; + +typedef struct dpp_nppu_oam_cfg_tx_total_num_t { + ZXIC_UINT32 tx_total_num; +} DPP_NPPU_OAM_CFG_TX_TOTAL_NUM_T; + +typedef struct dpp_nppu_oam_cfg_oam_chk_main_en_t { + ZXIC_UINT32 oam_chk_main_en; +} DPP_NPPU_OAM_CFG_OAM_CHK_MAIN_EN_T; + +typedef struct dpp_nppu_oam_cfg_chk_total_num0_t { + ZXIC_UINT32 chk_total_num0; +} DPP_NPPU_OAM_CFG_CHK_TOTAL_NUM0_T; + +typedef struct dpp_nppu_oam_cfg_ma_chk_main_en_t { + ZXIC_UINT32 oam_chk_main_en; +} DPP_NPPU_OAM_CFG_MA_CHK_MAIN_EN_T; + +typedef struct dpp_nppu_oam_cfg_chk_total_num1_t { + ZXIC_UINT32 chk_total_num0; +} DPP_NPPU_OAM_CFG_CHK_TOTAL_NUM1_T; + +typedef struct dpp_nppu_oam_cfg_tx_stat_en_t { + ZXIC_UINT32 tx_stat_en; +} DPP_NPPU_OAM_CFG_TX_STAT_EN_T; + +typedef struct dpp_nppu_oam_cfg_rec_stat_en_t { + ZXIC_UINT32 rec_stat_en; +} DPP_NPPU_OAM_CFG_REC_STAT_EN_T; + +typedef struct dpp_nppu_oam_cfg_stat_oam_rdy_mask_t { + ZXIC_UINT32 stat_oam_rdy_mask; +} DPP_NPPU_OAM_CFG_STAT_OAM_RDY_MASK_T; + +typedef struct dpp_nppu_oam_cfg_session_grading0_t { + ZXIC_UINT32 session_grading0; +} DPP_NPPU_OAM_CFG_SESSION_GRADING0_T; + +typedef struct dpp_nppu_oam_cfg_session_grading1_t { + ZXIC_UINT32 session_grading1; +} DPP_NPPU_OAM_CFG_SESSION_GRADING1_T; + +typedef struct dpp_nppu_oam_cfg_session_grading2_t { + ZXIC_UINT32 session_grading2; +} DPP_NPPU_OAM_CFG_SESSION_GRADING2_T; + +typedef struct dpp_nppu_oam_cfg_session_grading3_t { + ZXIC_UINT32 session_grading3; +} DPP_NPPU_OAM_CFG_SESSION_GRADING3_T; + +typedef struct dpp_nppu_oam_cfg_bfd_chk_haddr_t { + ZXIC_UINT32 bfd_chk_haddr; +} DPP_NPPU_OAM_CFG_BFD_CHK_HADDR_T; + +typedef struct dpp_nppu_oam_cfg_ethccm_chk_haddr_t { + ZXIC_UINT32 ethccm_chk_haddr; +} DPP_NPPU_OAM_CFG_ETHCCM_CHK_HADDR_T; + +typedef struct dpp_nppu_oam_cfg_tpbfd_chk_haddr_t { + ZXIC_UINT32 tpbfd_chk_haddr; +} DPP_NPPU_OAM_CFG_TPBFD_CHK_HADDR_T; + +typedef struct dpp_nppu_oam_cfg_tpoam_ccm_chk_haddr_t { + ZXIC_UINT32 tpoam_ccm_chk_haddr; +} DPP_NPPU_OAM_CFG_TPOAM_CCM_CHK_HADDR_T; + +typedef struct dpp_nppu_oam_cfg_bfd_tx_haddr_t { + ZXIC_UINT32 bfd_tx_haddr; +} DPP_NPPU_OAM_CFG_BFD_TX_HADDR_T; + +typedef struct dpp_nppu_oam_cfg_ethccm_tx_haddr_t { + ZXIC_UINT32 ethccm_tx_haddr; +} DPP_NPPU_OAM_CFG_ETHCCM_TX_HADDR_T; + +typedef struct dpp_nppu_oam_cfg_tpbfd_tx_haddr_t { + ZXIC_UINT32 tpbfd_tx_haddr; +} DPP_NPPU_OAM_CFG_TPBFD_TX_HADDR_T; + +typedef struct dpp_nppu_oam_cfg_tpoam_ccm_tx_haddr_t { + ZXIC_UINT32 tpoam_ccm_tx_haddr; +} DPP_NPPU_OAM_CFG_TPOAM_CCM_TX_HADDR_T; + +typedef struct dpp_nppu_oam_cfg_ethccm_ma_chk_haddr_t { + ZXIC_UINT32 ethccm_ma_chk_haddr; +} DPP_NPPU_OAM_CFG_ETHCCM_MA_CHK_HADDR_T; + +typedef struct dpp_nppu_oam_cfg_tpccm_ma_chk_haddr_t { + ZXIC_UINT32 tpccm_ma_chk_haddr; +} DPP_NPPU_OAM_CFG_TPCCM_MA_CHK_HADDR_T; + +typedef struct dpp_nppu_oam_cfg_groupnum_ram_clr_t { + ZXIC_UINT32 groupnum_ram_clr; +} DPP_NPPU_OAM_CFG_GROUPNUM_RAM_CLR_T; + +typedef struct dpp_nppu_oam_cfg_index_ram0_clr_t { + ZXIC_UINT32 index_ram0_clr; +} DPP_NPPU_OAM_CFG_INDEX_RAM0_CLR_T; + +typedef struct dpp_nppu_oam_cfg_index_ram1_clr_t { + ZXIC_UINT32 index_ram1_clr; +} DPP_NPPU_OAM_CFG_INDEX_RAM1_CLR_T; + +typedef struct dpp_nppu_oam_cfg_rmep_ram_clr_t { + ZXIC_UINT32 rmep_ram_clr; +} DPP_NPPU_OAM_CFG_RMEP_RAM_CLR_T; + +typedef struct dpp_nppu_oam_cfg_ma_ram_clr_t { + ZXIC_UINT32 ma_ram_clr; +} DPP_NPPU_OAM_CFG_MA_RAM_CLR_T; + +typedef struct dpp_nppu_oam_cfg_ram_init_done_t { + ZXIC_UINT32 ram_init_done; +} DPP_NPPU_OAM_CFG_RAM_INIT_DONE_T; + +typedef struct dpp_nppu_oam_cfg_rec_bfd_debug_en_t { + ZXIC_UINT32 rec_bfd_debug_en; +} DPP_NPPU_OAM_CFG_REC_BFD_DEBUG_EN_T; + +typedef struct dpp_nppu_oam_cfg_oam_session_int_t { + ZXIC_UINT32 tpma_int; + ZXIC_UINT32 ethma_int; + ZXIC_UINT32 bfd_int; + ZXIC_UINT32 ethoam_int; + ZXIC_UINT32 tpbfd_int; + ZXIC_UINT32 tpoam_int; +} DPP_NPPU_OAM_CFG_OAM_SESSION_INT_T; + +typedef struct dpp_nppu_oam_cfg_pon_int_t { + ZXIC_UINT32 fifo_int; + ZXIC_UINT32 pon_protect_int; +} DPP_NPPU_OAM_CFG_PON_INT_T; + +typedef struct dpp_nppu_oam_cfg_oam_int_clr_t { + ZXIC_UINT32 oam_int_clr; +} DPP_NPPU_OAM_CFG_OAM_INT_CLR_T; + +typedef struct dpp_nppu_oam_cfg_type_int_clr0_t { + ZXIC_UINT32 tpma_int_clr; + ZXIC_UINT32 ethma_int_clr; + ZXIC_UINT32 bfd_int_clr; + ZXIC_UINT32 ethoam_int_clr; + ZXIC_UINT32 tpbfd_int_clr; + ZXIC_UINT32 tpoam_int_clr; +} DPP_NPPU_OAM_CFG_TYPE_INT_CLR0_T; + +typedef struct dpp_nppu_oam_cfg_type_int_clr1_t { + ZXIC_UINT32 fifo_int_clr; + ZXIC_UINT32 pon_protect_int_clr; +} DPP_NPPU_OAM_CFG_TYPE_INT_CLR1_T; + +typedef struct dpp_nppu_oam_cfg_interrupt_mask_t { + ZXIC_UINT32 fifo_interrupt_mask; + ZXIC_UINT32 pon_protect_interruptmask; + ZXIC_UINT32 tpma_interrupt_mask; + ZXIC_UINT32 ethma_interrupt_mask; + ZXIC_UINT32 bfd_interrupt_mask; + ZXIC_UINT32 ethoam_interrupt_mask; + ZXIC_UINT32 tpbfd_interrupt_mask; + ZXIC_UINT32 tpoam_interrupt_mask; +} DPP_NPPU_OAM_CFG_INTERRUPT_MASK_T; + +typedef struct dpp_nppu_oam_cfg_int0_index_t { + ZXIC_UINT32 int0_index0; +} DPP_NPPU_OAM_CFG_INT0_INDEX_T; + +typedef struct dpp_nppu_oam_cfg_int1_index_t { + ZXIC_UINT32 int1_index0; +} DPP_NPPU_OAM_CFG_INT1_INDEX_T; + +typedef struct dpp_nppu_oam_cfg_int0_index_region_t { + ZXIC_UINT32 int0_index_region; +} DPP_NPPU_OAM_CFG_INT0_INDEX_REGION_T; + +typedef struct dpp_nppu_oam_cfg_int1_index_region_t { + ZXIC_UINT32 int1_index_region; +} DPP_NPPU_OAM_CFG_INT1_INDEX_REGION_T; + +typedef struct dpp_nppu_oam_cfg_bdiinfo_fwft_fifo_th_t { + ZXIC_UINT32 bdiinfo_fwft_fifo_th; +} DPP_NPPU_OAM_CFG_BDIINFO_FWFT_FIFO_TH_T; + +typedef struct dpp_nppu_oam_cfg_recsec_fwft_fifo_th_t { + ZXIC_UINT32 recsec_fwft_fifo_th; +} DPP_NPPU_OAM_CFG_RECSEC_FWFT_FIFO_TH_T; + +typedef struct dpp_nppu_oam_cfg_timing_chk_info0_fwft_fifo_th_t { + ZXIC_UINT32 timing_chk_info0_fwft_fifo_th; +} DPP_NPPU_OAM_CFG_TIMING_CHK_INFO0_FWFT_FIFO_TH_T; + +typedef struct dpp_nppu_oam_cfg_recma_fwft_fifo_th_t { + ZXIC_UINT32 recma_fwft_fifo_th; +} DPP_NPPU_OAM_CFG_RECMA_FWFT_FIFO_TH_T; + +typedef struct dpp_nppu_oam_cfg_timing_chk_info1_fwft_fifo_th_t { + ZXIC_UINT32 timing_chk_info1_fwft_fifo_th; +} DPP_NPPU_OAM_CFG_TIMING_CHK_INFO1_FWFT_FIFO_TH_T; + +typedef struct dpp_nppu_oam_cfg_oam_txinst_fifo_th_t { + ZXIC_UINT32 oam_txinst_fifo_th; +} DPP_NPPU_OAM_CFG_OAM_TXINST_FIFO_TH_T; + +typedef struct dpp_nppu_oam_cfg_oam_rdinfo_fwft_fifo_th_t { + ZXIC_UINT32 oam_rdinfo_fwft_fifo_th; +} DPP_NPPU_OAM_CFG_OAM_RDINFO_FWFT_FIFO_TH_T; + +typedef struct dpp_nppu_oam_cfg_lm_cnt_fwft_fifo_th_t { + ZXIC_UINT32 lm_cnt_fwft_fifo_th; +} DPP_NPPU_OAM_CFG_LM_CNT_FWFT_FIFO_TH_T; + +typedef struct dpp_nppu_oam_cfg_oam_pkt_fifo_th_t { + ZXIC_UINT32 oam_pkt_fifo_th; +} DPP_NPPU_OAM_CFG_OAM_PKT_FIFO_TH_T; + +typedef struct dpp_nppu_oam_cfg_reclm_stat_fifo_th_t { + ZXIC_UINT32 reclm_stat_fifo_th; +} DPP_NPPU_OAM_CFG_RECLM_STAT_FIFO_TH_T; + +typedef struct dpp_nppu_oam_cfg_txlm_stat_fifo_th_t { + ZXIC_UINT32 txlm_stat_fifo_th; +} DPP_NPPU_OAM_CFG_TXLM_STAT_FIFO_TH_T; + +typedef struct dpp_nppu_oam_cfg_oam_chk_fwft_fifo_th_t { + ZXIC_UINT32 oam_chk_fwft_fifo_th; +} DPP_NPPU_OAM_CFG_OAM_CHK_FWFT_FIFO_TH_T; + +typedef struct dpp_nppu_oam_cfg_txoam_stat_fifo_th_t { + ZXIC_UINT32 txoam_stat_fifo_th; +} DPP_NPPU_OAM_CFG_TXOAM_STAT_FIFO_TH_T; + +typedef struct dpp_nppu_oam_cfg_recoam_stat_fifo_th_t { + ZXIC_UINT32 recoam_stat_fifo_th; +} DPP_NPPU_OAM_CFG_RECOAM_STAT_FIFO_TH_T; + +typedef struct dpp_nppu_oam_cfg_txpkt_data_fwft_fifo_th_t { + ZXIC_UINT32 txpkt_data_fwft_fifo_th; +} DPP_NPPU_OAM_CFG_TXPKT_DATA_FWFT_FIFO_TH_T; + +typedef struct dpp_nppu_oam_cfg_tstpkt_fwft_fifo_th_t { + ZXIC_UINT32 tstpkt_fwft_fifo_th; +} DPP_NPPU_OAM_CFG_TSTPKT_FWFT_FIFO_TH_T; + +typedef struct dpp_nppu_oam_cfg_tst_txinst_fwft_fifo_th_t { + ZXIC_UINT32 tst_txinst_fwft_fifo_th; +} DPP_NPPU_OAM_CFG_TST_TXINST_FWFT_FIFO_TH_T; + +typedef struct dpp_nppu_oam_cfg_tstrx_main_en_t { + ZXIC_UINT32 tstrx_main_en; +} DPP_NPPU_OAM_CFG_TSTRX_MAIN_EN_T; + +typedef struct dpp_nppu_oam_cfg_tsttx_cfg_para_tbl2_t { + ZXIC_UINT32 ddr_self_test_tx_en; + ZXIC_UINT32 tm_self_test_tx_en; + ZXIC_UINT32 fast_aging_tx_en; + ZXIC_UINT32 timing_aging_tx_en; + ZXIC_UINT32 backgroud_flow_tx_en; + ZXIC_UINT32 tsttx_tx_en; + ZXIC_UINT32 tx_freq; + ZXIC_UINT32 tx_offset; +} DPP_NPPU_OAM_CFG_TSTTX_CFG_PARA_TBL2_T; + +typedef struct dpp_nppu_oam_cfg_tsttx_cfg_para_tbl1_t { + ZXIC_UINT32 tx_count; +} DPP_NPPU_OAM_CFG_TSTTX_CFG_PARA_TBL1_T; + +typedef struct dpp_nppu_oam_cfg_tsttx_cfg_para_tbl0_t { + ZXIC_UINT32 fast_tx_mode_en; + ZXIC_UINT32 tsttx_tx_head_len; + ZXIC_UINT32 tsttx_tx_interval; +} DPP_NPPU_OAM_CFG_TSTTX_CFG_PARA_TBL0_T; + +typedef struct dpp_nppu_oam_cfg_tstrx_cfg_para_t { + ZXIC_UINT32 tstrx_session_num; + ZXIC_UINT32 tstrx_session_en; +} DPP_NPPU_OAM_CFG_TSTRX_CFG_PARA_T; + +typedef struct dpp_nppu_oam_cfg_fifo_status_int_en_0_t { + ZXIC_UINT32 fifo_status_int_en_31; + ZXIC_UINT32 fifo_status_int_en_30; + ZXIC_UINT32 fifo_status_int_en_29; + ZXIC_UINT32 fifo_status_int_en_28; + ZXIC_UINT32 fifo_status_int_en_27; + ZXIC_UINT32 fifo_status_int_en_26; + ZXIC_UINT32 fifo_status_int_en_25; + ZXIC_UINT32 fifo_status_int_en_24; + ZXIC_UINT32 fifo_status_int_en_23; + ZXIC_UINT32 fifo_status_int_en_22; + ZXIC_UINT32 fifo_status_int_en_21; + ZXIC_UINT32 fifo_status_int_en_20; + ZXIC_UINT32 fifo_status_int_en_19; + ZXIC_UINT32 fifo_status_int_en_18; + ZXIC_UINT32 fifo_status_int_en_17; + ZXIC_UINT32 fifo_status_int_en_16; + ZXIC_UINT32 fifo_status_int_en_15; + ZXIC_UINT32 fifo_status_int_en_14; + ZXIC_UINT32 fifo_status_int_en_13; + ZXIC_UINT32 fifo_status_int_en_12; + ZXIC_UINT32 fifo_status_int_en_11; + ZXIC_UINT32 fifo_status_int_en_10; + ZXIC_UINT32 fifo_status_int_en_9; + ZXIC_UINT32 fifo_status_int_en_8; + ZXIC_UINT32 fifo_status_int_en_7; + ZXIC_UINT32 fifo_status_int_en_6; + ZXIC_UINT32 fifo_status_int_en_5; + ZXIC_UINT32 fifo_status_int_en_4; + ZXIC_UINT32 fifo_status_int_en_3; + ZXIC_UINT32 fifo_status_int_en_2; + ZXIC_UINT32 fifo_status_int_en_1; + ZXIC_UINT32 fifo_status_int_en_0; +} DPP_NPPU_OAM_CFG_FIFO_STATUS_INT_EN_0_T; + +typedef struct dpp_nppu_oam_cfg_fifo_status_int_en_1_t { + ZXIC_UINT32 fifo_status_int_en_41; + ZXIC_UINT32 fifo_status_int_en_40; + ZXIC_UINT32 fifo_status_int_en_39; + ZXIC_UINT32 fifo_status_int_en_38; + ZXIC_UINT32 fifo_status_int_en_37; + ZXIC_UINT32 fifo_status_int_en_36; + ZXIC_UINT32 fifo_status_int_en_35; + ZXIC_UINT32 fifo_status_int_en_34; + ZXIC_UINT32 fifo_status_int_en_33; + ZXIC_UINT32 fifo_status_int_en_32; +} DPP_NPPU_OAM_CFG_FIFO_STATUS_INT_EN_1_T; + +typedef struct dpp_nppu_oam_cfg_fifo_status_int_mask_0_t { + ZXIC_UINT32 fifo_status_int_mask_31; + ZXIC_UINT32 fifo_status_int_mask_30; + ZXIC_UINT32 fifo_status_int_mask_29; + ZXIC_UINT32 fifo_status_int_mask_28; + ZXIC_UINT32 fifo_status_int_mask_27; + ZXIC_UINT32 fifo_status_int_mask_26; + ZXIC_UINT32 fifo_status_int_mask_25; + ZXIC_UINT32 fifo_status_int_mask_24; + ZXIC_UINT32 fifo_status_int_mask_23; + ZXIC_UINT32 fifo_status_int_mask_22; + ZXIC_UINT32 fifo_status_int_mask_21; + ZXIC_UINT32 fifo_status_int_mask_20; + ZXIC_UINT32 fifo_status_int_mask_19; + ZXIC_UINT32 fifo_status_int_mask_18; + ZXIC_UINT32 fifo_status_int_mask_17; + ZXIC_UINT32 fifo_status_int_mask_16; + ZXIC_UINT32 fifo_status_int_mask_15; + ZXIC_UINT32 fifo_status_int_mask_14; + ZXIC_UINT32 fifo_status_int_mask_13; + ZXIC_UINT32 fifo_status_int_mask_12; + ZXIC_UINT32 fifo_status_int_mask_11; + ZXIC_UINT32 fifo_status_int_mask_10; + ZXIC_UINT32 fifo_status_int_mask_9; + ZXIC_UINT32 fifo_status_int_mask_8; + ZXIC_UINT32 fifo_status_int_mask_7; + ZXIC_UINT32 fifo_status_int_mask_6; + ZXIC_UINT32 fifo_status_int_mask_5; + ZXIC_UINT32 fifo_status_int_mask_4; + ZXIC_UINT32 fifo_status_int_mask_3; + ZXIC_UINT32 fifo_status_int_mask_2; + ZXIC_UINT32 fifo_status_int_mask_1; + ZXIC_UINT32 fifo_status_int_mask_0; +} DPP_NPPU_OAM_CFG_FIFO_STATUS_INT_MASK_0_T; + +typedef struct dpp_nppu_oam_cfg_fifo_status_int_mask_1_t { + ZXIC_UINT32 fifo_status_int_mask_41; + ZXIC_UINT32 fifo_status_int_mask_40; + ZXIC_UINT32 fifo_status_int_mask_39; + ZXIC_UINT32 fifo_status_int_mask_38; + ZXIC_UINT32 fifo_status_int_mask_37; + ZXIC_UINT32 fifo_status_int_mask_36; + ZXIC_UINT32 fifo_status_int_mask_35; + ZXIC_UINT32 fifo_status_int_mask_34; + ZXIC_UINT32 fifo_status_int_mask_33; + ZXIC_UINT32 fifo_status_int_mask_32; +} DPP_NPPU_OAM_CFG_FIFO_STATUS_INT_MASK_1_T; + +typedef struct dpp_nppu_oam_cfg_fifo_status_int_status_t { + ZXIC_UINT32 fifo_status_int_status; +} DPP_NPPU_OAM_CFG_FIFO_STATUS_INT_STATUS_T; + +typedef struct dpp_nppu_oam_cfg_main_frequency_t { + ZXIC_UINT32 main_frequency; +} DPP_NPPU_OAM_CFG_MAIN_FREQUENCY_T; + +typedef struct dpp_nppu_oam_cfg_oam_cfg_type_t { + ZXIC_UINT32 oam_cfg_type; +} DPP_NPPU_OAM_CFG_OAM_CFG_TYPE_T; + +typedef struct dpp_nppu_oam_cfg_fst_swch_eth_head0_t { + ZXIC_UINT32 fst_swch_eth_head; +} DPP_NPPU_OAM_CFG_FST_SWCH_ETH_HEAD0_T; + +typedef struct dpp_nppu_oam_cfg_fst_swch_eth_head1_t { + ZXIC_UINT32 fst_swch_eth_head1; +} DPP_NPPU_OAM_CFG_FST_SWCH_ETH_HEAD1_T; + +typedef struct dpp_nppu_oam_cfg_fst_swch_eth_head2_t { + ZXIC_UINT32 fst_swch_eth_head2; +} DPP_NPPU_OAM_CFG_FST_SWCH_ETH_HEAD2_T; + +typedef struct dpp_nppu_oam_cfg_fst_swch_eth_head3_t { + ZXIC_UINT32 fst_swch_eth_head3; +} DPP_NPPU_OAM_CFG_FST_SWCH_ETH_HEAD3_T; + +typedef struct dpp_nppu_oam_cfg_oam_fs_txinst_fifo_th_t { + ZXIC_UINT32 oam_fs_txinst_fifo_th; +} DPP_NPPU_OAM_CFG_OAM_FS_TXINST_FIFO_TH_T; + +typedef struct dpp_nppu_oam_cfg_oam_ma_fs_txinst_fifo_th_t { + ZXIC_UINT32 oam_ma_fs_txinst_fifo_th; +} DPP_NPPU_OAM_CFG_OAM_MA_FS_TXINST_FIFO_TH_T; + +typedef struct dpp_nppu_oam_cfg_pon_int_ram_clr_t { + ZXIC_UINT32 pon_int_ram_clr; +} DPP_NPPU_OAM_CFG_PON_INT_RAM_CLR_T; + +typedef struct dpp_nppu_oam_cfg_pon_p_int_index_t { + ZXIC_UINT32 pon_p_int_index; +} DPP_NPPU_OAM_CFG_PON_P_INT_INDEX_T; + +typedef struct dpp_nppu_oam_cfg_pon_protect_pkt_fifo_th_t { + ZXIC_UINT32 pon_protect_pkt_fifo_th; +} DPP_NPPU_OAM_CFG_PON_PROTECT_PKT_FIFO_TH_T; + +typedef struct dpp_nppu_oam_cfg_pon_laser_off_en_t { + ZXIC_UINT32 pon_laser_off_en; +} DPP_NPPU_OAM_CFG_PON_LASER_OFF_EN_T; + +typedef struct dpp_nppu_oam_cfg_pon_prtct_pkt_tx_en_t { + ZXIC_UINT32 pon_prtct_pkt_tx_en; +} DPP_NPPU_OAM_CFG_PON_PRTCT_PKT_TX_EN_T; + +typedef struct dpp_nppu_oam_cfg_cfg_pon_master_t { + ZXIC_UINT32 cfg_pon_master; +} DPP_NPPU_OAM_CFG_CFG_PON_MASTER_T; + +typedef struct dpp_nppu_oam_cfg_level_mode_t { + ZXIC_UINT32 level_mode; +} DPP_NPPU_OAM_CFG_LEVEL_MODE_T; + +typedef struct dpp_nppu_oam_cfg_interrupt_en_t { + ZXIC_UINT32 interrupt_en; +} DPP_NPPU_OAM_CFG_INTERRUPT_EN_T; + +typedef struct dpp_nppu_oam_cfg_pon_laser_on_en_t { + ZXIC_UINT32 pon_laser_on_en; +} DPP_NPPU_OAM_CFG_PON_LASER_ON_EN_T; + +typedef struct dpp_nppu_oam_cfg_ti_pon_sd_t { + ZXIC_UINT32 ti_pon_sd; +} DPP_NPPU_OAM_CFG_TI_PON_SD_T; + +typedef struct dpp_nppu_oam_cfg_ti_pon_los_t { + ZXIC_UINT32 ti_pon_los; +} DPP_NPPU_OAM_CFG_TI_PON_LOS_T; + +typedef struct dpp_nppu_oam_cfg_ind_dat4_t { + ZXIC_UINT32 ind_dat4; +} DPP_NPPU_OAM_CFG_IND_DAT4_T; + +typedef struct dpp_nppu_oam_cfg_ind_dat5_t { + ZXIC_UINT32 ind_dat5; +} DPP_NPPU_OAM_CFG_IND_DAT5_T; + +typedef struct dpp_nppu_oam_cfg_ind_dat6_t { + ZXIC_UINT32 ind_dat6; +} DPP_NPPU_OAM_CFG_IND_DAT6_T; + +typedef struct dpp_nppu_oam_cfg_ind_dat7_t { + ZXIC_UINT32 ind_dat7; +} DPP_NPPU_OAM_CFG_IND_DAT7_T; + +typedef struct dpp_nppu_oam_cfg_ind_dat8_t { + ZXIC_UINT32 ind_dat8; +} DPP_NPPU_OAM_CFG_IND_DAT8_T; + +typedef struct dpp_nppu_oam_cfg_ind_dat9_t { + ZXIC_UINT32 ind_dat9; +} DPP_NPPU_OAM_CFG_IND_DAT9_T; + +typedef struct dpp_nppu_oam_cfg_ind_dat10_t { + ZXIC_UINT32 ind_dat10; +} DPP_NPPU_OAM_CFG_IND_DAT10_T; + +typedef struct dpp_nppu_oam_cfg_ind_dat11_t { + ZXIC_UINT32 ind_dat11; +} DPP_NPPU_OAM_CFG_IND_DAT11_T; + +typedef struct dpp_nppu_oam_cfg_ind_dat12_t { + ZXIC_UINT32 ind_dat12; +} DPP_NPPU_OAM_CFG_IND_DAT12_T; + +typedef struct dpp_nppu_oam_cfg_ind_dat13_t { + ZXIC_UINT32 ind_dat13; +} DPP_NPPU_OAM_CFG_IND_DAT13_T; + +typedef struct dpp_nppu_oam_cfg_ind_dat14_t { + ZXIC_UINT32 ind_dat14; +} DPP_NPPU_OAM_CFG_IND_DAT14_T; + +typedef struct dpp_nppu_oam_cfg_ind_dat15_t { + ZXIC_UINT32 ind_dat15; +} DPP_NPPU_OAM_CFG_IND_DAT15_T; + +typedef struct dpp_nppu_oam_cfg_oam_2544_pkt_fifo_th_t { + ZXIC_UINT32 oam_2544_pkt_fifo_th; +} DPP_NPPU_OAM_CFG_OAM_2544_PKT_FIFO_TH_T; + +typedef struct dpp_nppu_oam_cfg_txinfo_ram_clr_t { + ZXIC_UINT32 txinfo_ram_clr; +} DPP_NPPU_OAM_CFG_TXINFO_RAM_CLR_T; + +typedef struct dpp_nppu_oam_cfg_txinfo_ram_init_done_t { + ZXIC_UINT32 txinfo_ram_init_done; +} DPP_NPPU_OAM_CFG_TXINFO_RAM_INIT_DONE_T; + +typedef struct dpp_nppu_oam_cfg_fifo_status_int_status40_t { + ZXIC_UINT32 fifo_status_int_status40; +} DPP_NPPU_OAM_CFG_FIFO_STATUS_INT_STATUS40_T; + +typedef struct dpp_nppu_oam_cfg_fifo_status_int_status41_t { + ZXIC_UINT32 fifo_status_int_status41; +} DPP_NPPU_OAM_CFG_FIFO_STATUS_INT_STATUS41_T; + +typedef struct dpp_nppu_oam_cfg_oam_2544_fun_en_t { + ZXIC_UINT32 oam_2544_fun_en; +} DPP_NPPU_OAM_CFG_OAM_2544_FUN_EN_T; + +typedef struct dpp_nppu_oam_cfg_oam_2544_stat_clr_t { + ZXIC_UINT32 oam_2544_stat_clr; +} DPP_NPPU_OAM_CFG_OAM_2544_STAT_CLR_T; + +typedef struct dpp_nppu_oam_cfg_txdis_default_t { + ZXIC_UINT32 txdis_default; +} DPP_NPPU_OAM_CFG_TXDIS_DEFAULT_T; + +typedef struct dpp_nppu_oam_cfg_txdis_default_en_t { + ZXIC_UINT32 txdis_default_en; +} DPP_NPPU_OAM_CFG_TXDIS_DEFAULT_EN_T; + +typedef struct dpp_nppu_oam_cfg_tpbfd_firstchk_th_t { + ZXIC_UINT32 tpbfd_firstchk_th; +} DPP_NPPU_OAM_CFG_TPBFD_FIRSTCHK_TH_T; + +typedef struct dpp_nppu_oam_cfg_ethccm_firstchk_th_t { + ZXIC_UINT32 ethccm_firstchk_th; +} DPP_NPPU_OAM_CFG_ETHCCM_FIRSTCHK_TH_T; + +typedef struct dpp_nppu_oam_cfg_tpccm_firstchk_th_t { + ZXIC_UINT32 tpccm_firstchk_th; +} DPP_NPPU_OAM_CFG_TPCCM_FIRSTCHK_TH_T; + +typedef struct dpp_nppu_oam_stat_txstat_req_cnt_t { + ZXIC_UINT32 txstat_req_cnt; +} DPP_NPPU_OAM_STAT_TXSTAT_REQ_CNT_T; + +typedef struct dpp_nppu_oam_stat_chkstat_req_cnt_t { + ZXIC_UINT32 chkstat_req_cnt; +} DPP_NPPU_OAM_STAT_CHKSTAT_REQ_CNT_T; + +typedef struct dpp_nppu_oam_stat_stat_oam_fc_cnt_t { + ZXIC_UINT32 stat1_oam_fc_cnt; +} DPP_NPPU_OAM_STAT_STAT_OAM_FC_CNT_T; + +typedef struct dpp_nppu_oam_stat_bfdseq_req_cnt_t { + ZXIC_UINT32 bfdseq_req_cnt; +} DPP_NPPU_OAM_STAT_BFDSEQ_REQ_CNT_T; + +typedef struct dpp_nppu_oam_stat_lmcnt_req_cnt_t { + ZXIC_UINT32 lmcnt_req_cnt; +} DPP_NPPU_OAM_STAT_LMCNT_REQ_CNT_T; + +typedef struct dpp_nppu_oam_stat_stat_oam_lm_rsp_cnt_t { + ZXIC_UINT32 stat2_rsp_cnt; +} DPP_NPPU_OAM_STAT_STAT_OAM_LM_RSP_CNT_T; + +typedef struct dpp_nppu_oam_stat_stat_oam_lm_fc_cnt_t { + ZXIC_UINT32 stat2_oam_fc_cnt; +} DPP_NPPU_OAM_STAT_STAT_OAM_LM_FC_CNT_T; + +typedef struct dpp_nppu_oam_stat_se_req_cnt_t { + ZXIC_UINT32 se_req_cnt; +} DPP_NPPU_OAM_STAT_SE_REQ_CNT_T; + +typedef struct dpp_nppu_oam_stat_se_rsp_cnt_t { + ZXIC_UINT32 se_rsp_cnt; +} DPP_NPPU_OAM_STAT_SE_RSP_CNT_T; + +typedef struct dpp_nppu_oam_stat_se_oam_fc_cnt_t { + ZXIC_UINT32 se_oam_fc_cnt; +} DPP_NPPU_OAM_STAT_SE_OAM_FC_CNT_T; + +typedef struct dpp_nppu_oam_stat_oam_se_fc_cnt_t { + ZXIC_UINT32 oam_se_fc_cnt; +} DPP_NPPU_OAM_STAT_OAM_SE_FC_CNT_T; + +typedef struct dpp_nppu_oam_stat_oam_pktrx_sop_cnt_t { + ZXIC_UINT32 oam_pktrx_sop_cnt; +} DPP_NPPU_OAM_STAT_OAM_PKTRX_SOP_CNT_T; + +typedef struct dpp_nppu_oam_stat_oam_pktrx_eop_cnt_t { + ZXIC_UINT32 oam_pktrx_eop_cnt; +} DPP_NPPU_OAM_STAT_OAM_PKTRX_EOP_CNT_T; + +typedef struct dpp_nppu_oam_stat_pktrx_oam_fc_cnt_t { + ZXIC_UINT32 pktrx_oam_fc_cnt; +} DPP_NPPU_OAM_STAT_PKTRX_OAM_FC_CNT_T; + +typedef struct dpp_nppu_oam_stat_pktrx_oam_tst_fc_cnt_t { + ZXIC_UINT32 pktrx_oam_tst_fc_cnt; +} DPP_NPPU_OAM_STAT_PKTRX_OAM_TST_FC_CNT_T; + +typedef struct dpp_nppu_oam_stat_odma_oam_sop_cnt_t { + ZXIC_UINT32 odma_oam_sop_cnt; +} DPP_NPPU_OAM_STAT_ODMA_OAM_SOP_CNT_T; + +typedef struct dpp_nppu_oam_stat_odma_oam_eop_cnt_t { + ZXIC_UINT32 odma_oam_eop_cnt; +} DPP_NPPU_OAM_STAT_ODMA_OAM_EOP_CNT_T; + +typedef struct dpp_nppu_oam_stat_oam_odma_fc_cnt_t { + ZXIC_UINT32 oam_odma_fc_cnt; +} DPP_NPPU_OAM_STAT_OAM_ODMA_FC_CNT_T; + +typedef struct dpp_nppu_oam_stat_rec_ma_pkt_illegal_cnt_t { + ZXIC_UINT32 rec_ma_pkt_illegal_cnt; +} DPP_NPPU_OAM_STAT_REC_MA_PKT_ILLEGAL_CNT_T; + +typedef struct dpp_nppu_oam_stat_rec_rmep_pkt_illegal_cnt_t { + ZXIC_UINT32 rec_rmep_pkt_illegal_cnt; +} DPP_NPPU_OAM_STAT_REC_RMEP_PKT_ILLEGAL_CNT_T; + +typedef struct dpp_nppu_oam_stat_rec_eth_ais_pkt_cnt_t { + ZXIC_UINT32 rec_eth_ais_pkt_cnt; +} DPP_NPPU_OAM_STAT_REC_ETH_AIS_PKT_CNT_T; + +typedef struct dpp_nppu_oam_stat_rec_tp_ais_pkt_cnt_t { + ZXIC_UINT32 rec_tp_ais_pkt_cnt; +} DPP_NPPU_OAM_STAT_REC_TP_AIS_PKT_CNT_T; + +typedef struct dpp_nppu_oam_stat_rec_tp_csf_pkt_cnt_t { + ZXIC_UINT32 rec_tp_csf_pkt_cnt; +} DPP_NPPU_OAM_STAT_REC_TP_CSF_PKT_CNT_T; + +typedef struct dpp_nppu_oam_stat_rec_eth_level_defect_cnt_t { + ZXIC_UINT32 rec_eth_level_defect_cnt; +} DPP_NPPU_OAM_STAT_REC_ETH_LEVEL_DEFECT_CNT_T; + +typedef struct dpp_nppu_oam_stat_rec_eth_megid_defect_cnt_t { + ZXIC_UINT32 rec_eth_megid_defect_cnt; +} DPP_NPPU_OAM_STAT_REC_ETH_MEGID_DEFECT_CNT_T; + +typedef struct dpp_nppu_oam_stat_rec_eth_mepid_defect_cnt_t { + ZXIC_UINT32 rec_eth_mepid_defect_cnt; +} DPP_NPPU_OAM_STAT_REC_ETH_MEPID_DEFECT_CNT_T; + +typedef struct dpp_nppu_oam_stat_rec_eth_interval_defect_cnt_t { + ZXIC_UINT32 rec_eth_interval_defect_cnt; +} DPP_NPPU_OAM_STAT_REC_ETH_INTERVAL_DEFECT_CNT_T; + +typedef struct dpp_nppu_oam_stat_rec_sess_unenable_cnt_t { + ZXIC_UINT32 rec_sess_unenable_cnt; +} DPP_NPPU_OAM_STAT_REC_SESS_UNENABLE_CNT_T; + +typedef struct dpp_nppu_oam_stat_oam_2544_rd_pkt_cnt_t { + ZXIC_UINT32 oam_2544_rd_pkt_cnt; +} DPP_NPPU_OAM_STAT_OAM_2544_RD_PKT_CNT_T; + +typedef struct dpp_nppu_oam_stat_debug_cnt_clr_t { + ZXIC_UINT32 debug_cnt_clr; +} DPP_NPPU_OAM_STAT_DEBUG_CNT_CLR_T; + +typedef struct dpp_nppu_oam_stat_oam_pktrx_catch_data_t { + ZXIC_UINT32 oam_pktrx_catch_data; +} DPP_NPPU_OAM_STAT_OAM_PKTRX_CATCH_DATA_T; + +typedef struct dpp_nppu_oam_stat_odma_oam_catch_data_t { + ZXIC_UINT32 odma_oam_catch_data; +} DPP_NPPU_OAM_STAT_ODMA_OAM_CATCH_DATA_T; + +typedef struct dpp_nppu_oam_stat_tst_session_tx_cnt_t { + ZXIC_UINT32 tst_session_tx_cnt; +} DPP_NPPU_OAM_STAT_TST_SESSION_TX_CNT_T; + +typedef struct dpp_nppu_oam_stat_tst_session_rx_cnt_t { + ZXIC_UINT32 tst_session_rx_cnt; +} DPP_NPPU_OAM_STAT_TST_SESSION_RX_CNT_T; + +typedef struct dpp_nppu_oam_stat_tstrx_lost_cnt_t { + ZXIC_UINT32 tstrx_lost_cnt; +} DPP_NPPU_OAM_STAT_TSTRX_LOST_CNT_T; + +typedef struct dpp_nppu_oam_stat_bfdseq_wr_cnt_t { + ZXIC_UINT32 bfdseq_wr_cnt; +} DPP_NPPU_OAM_STAT_BFDSEQ_WR_CNT_T; + +typedef struct dpp_nppu_oam_stat_bfdtime_wr_cnt_t { + ZXIC_UINT32 bfdtime_wr_cnt; +} DPP_NPPU_OAM_STAT_BFDTIME_WR_CNT_T; + +typedef struct dpp_nppu_oam_stat_lmcnt_wr_cnt_t { + ZXIC_UINT32 lmcnt_wr_cnt; +} DPP_NPPU_OAM_STAT_LMCNT_WR_CNT_T; + +typedef struct dpp_nppu_oam_stat_oam_fs_pkt_cnt_t { + ZXIC_UINT32 oam_fs_pkt_cnt; +} DPP_NPPU_OAM_STAT_OAM_FS_PKT_CNT_T; + +typedef struct dpp_nppu_oam_stat_oam_ma_fs_pkt_cnt_t { + ZXIC_UINT32 lmcnt_wr_cnt; +} DPP_NPPU_OAM_STAT_OAM_MA_FS_PKT_CNT_T; + +typedef struct dpp_nppu_oam_stat_rec_tp_level_defect_cnt_t { + ZXIC_UINT32 rec_tp_level_defect_cnt; +} DPP_NPPU_OAM_STAT_REC_TP_LEVEL_DEFECT_CNT_T; + +typedef struct dpp_nppu_oam_stat_rec_tp_megid_defect_cnt_t { + ZXIC_UINT32 rec_tp_megid_defect_cnt; +} DPP_NPPU_OAM_STAT_REC_TP_MEGID_DEFECT_CNT_T; + +typedef struct dpp_nppu_oam_stat_rec_tp_mepid_defect_cnt_t { + ZXIC_UINT32 rec_tp_mepid_defect_cnt; +} DPP_NPPU_OAM_STAT_REC_TP_MEPID_DEFECT_CNT_T; + +typedef struct dpp_nppu_oam_stat_rec_tp_interval_defect_cnt_t { + ZXIC_UINT32 rec_tp_interval_defect_cnt; +} DPP_NPPU_OAM_STAT_REC_TP_INTERVAL_DEFECT_CNT_T; + +typedef struct dpp_nppu_oam_stat_rd_reg_clear_mode_t { + ZXIC_UINT32 rd_clear_mode_cfg; +} DPP_NPPU_OAM_STAT_RD_REG_CLEAR_MODE_T; + +typedef struct dpp_nppu_oam_stat_rd_data_reg_clear_mode_t { + ZXIC_UINT32 rd_data_reg_clear_mode_cfg; +} DPP_NPPU_OAM_STAT_RD_DATA_REG_CLEAR_MODE_T; + +typedef struct dpp_nppu_oam_cfg_indir_oam_int_status_ram_0_t { + ZXIC_UINT32 bfd_diag_value_bit4; + ZXIC_UINT32 bfd_diag_value_bit3; + ZXIC_UINT32 bfd_diag_value_bit2; + ZXIC_UINT32 bfd_diag_value_bit1; + ZXIC_UINT32 bfd_diag_value_bit0; + ZXIC_UINT32 dloc_int; + ZXIC_UINT32 drdi_int; +} DPP_NPPU_OAM_CFG_INDIR_OAM_INT_STATUS_RAM_0_T; + +typedef struct dpp_nppu_oam_cfg_indir_oam_int_status_ram1_t { + ZXIC_UINT32 sticky_error_level_defect; + ZXIC_UINT32 sticky_error_megid_defect; + ZXIC_UINT32 sticky_error_mepid_defect; + ZXIC_UINT32 sticky_error_inter_defect; + ZXIC_UINT32 sticky_ais_defect; + ZXIC_UINT32 sticky_csf_defect; + ZXIC_UINT32 current_error_level_defect; + ZXIC_UINT32 current_error_megid_defect; + ZXIC_UINT32 current_error_mepid_defect; + ZXIC_UINT32 current_error_inter_defect; + ZXIC_UINT32 current_ais_defect; + ZXIC_UINT32 current_csf_defect; +} DPP_NPPU_OAM_CFG_INDIR_OAM_INT_STATUS_RAM1_T; + +typedef struct dpp_nppu_oam_cfg_indir_tst_pkt_tx_para_ram_t { + ZXIC_UINT32 ddr_self_test_tx_en; + ZXIC_UINT32 tm_self_test_tx_en; + ZXIC_UINT32 fast_aging_tx_en; + ZXIC_UINT32 timing_aging_tx_en; + ZXIC_UINT32 backgroud_flow_tx_en; + ZXIC_UINT32 tsttx_session_en; + ZXIC_UINT32 tx_freq; + ZXIC_UINT32 tx_offset; + ZXIC_UINT32 tx_count; + ZXIC_UINT32 fast_tx_mode_en; + ZXIC_UINT32 tsttx_pkthead_len; + ZXIC_UINT32 tsttx_interval; +} DPP_NPPU_OAM_CFG_INDIR_TST_PKT_TX_PARA_RAM_T; + +typedef struct dpp_nppu_oam_cfg_indir_groupnumram_t { + ZXIC_UINT32 mep_down_num; +} DPP_NPPU_OAM_CFG_INDIR_GROUPNUMRAM_T; + +typedef struct dpp_nppu_oam_cfg_indir_oam_tx_tbl_ram_t { + ZXIC_UINT32 oam_tx_en; + ZXIC_UINT32 oam_tx_type; + ZXIC_UINT32 oam_fetch_len; + ZXIC_UINT32 bfd_seq_tx_en; + ZXIC_UINT32 tx_para; + ZXIC_UINT32 oam_tx_interval; + ZXIC_UINT32 hd_ena_flag; + ZXIC_UINT32 last_tx_time; +} DPP_NPPU_OAM_CFG_INDIR_OAM_TX_TBL_RAM_T; + +typedef struct dpp_nppu_oam_cfg_indir_oam_chk_tbl_ram_t { + ZXIC_UINT32 fast_switch_en; + ZXIC_UINT32 oam_chk_en; + ZXIC_UINT32 oam_chk_type; + ZXIC_UINT32 ccm_predel_flag; + ZXIC_UINT32 lm_chk_en; + ZXIC_UINT32 ccm_group_id; + ZXIC_UINT32 oam_chk_internal; + ZXIC_UINT32 fist_chk_flag; + ZXIC_UINT32 last_chk_time; +} DPP_NPPU_OAM_CFG_INDIR_OAM_CHK_TBL_RAM_T; + +typedef struct dpp_nppu_oam_cfg_indir_oam_ma_chk_tbl_ram_t { + ZXIC_UINT32 ma_fast_switch_en; + ZXIC_UINT32 ma_chk_en; + ZXIC_UINT32 ma_type; + ZXIC_UINT32 error_level_defect_en; + ZXIC_UINT32 error_megid_defect_en; + ZXIC_UINT32 error_mepid_defect_en; + ZXIC_UINT32 error_inter_defect_en; + ZXIC_UINT32 ais_defect_en; + ZXIC_UINT32 csf_defect_en; + ZXIC_UINT32 error_level_defect_ccm; + ZXIC_UINT32 error_megid_defect_ccm; + ZXIC_UINT32 error_mepid_defect_ccm; + ZXIC_UINT32 error_inter_defect_ccm; + ZXIC_UINT32 ais_defect_ccm; + ZXIC_UINT32 csf_defect_ccm; + ZXIC_UINT32 ma_predel_en; + ZXIC_UINT32 error_level_defect_ts; + ZXIC_UINT32 error_megid_defect_ts; + ZXIC_UINT32 error_mepid_defect_ts; + ZXIC_UINT32 error_inter_defect_ts; + ZXIC_UINT32 ais_defect_ts; + ZXIC_UINT32 csf_defect_ts; +} DPP_NPPU_OAM_CFG_INDIR_OAM_MA_CHK_TBL_RAM_T; + +typedef struct dpp_nppu_oam_cfg_indir_oam_2544_tx_ram_t { + ZXIC_UINT32 tx_en_2544; + ZXIC_UINT32 tx_cfg_times_2544; + ZXIC_UINT32 current_times; + ZXIC_UINT32 slice_num; + ZXIC_UINT32 pkt_mty; +} DPP_NPPU_OAM_CFG_INDIR_OAM_2544_TX_RAM_T; + +#ifdef __cplusplus +} +#endif +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_pci.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_pci.h new file mode 100644 index 000000000000..231216891d8a --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_pci.h @@ -0,0 +1,39 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_pci.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : 石金锋 +* 完成日期 : 2014/02/10 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: 代码规范性修改 +* 修改日期: 2014/02/10 +* 版 本 号: +* 修 改 人: 丁金凤 +* 修改内容: +***************************************************************/ + +#ifndef _DPP_PCI_H_ +#define _DPP_PCI_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "dpp_dev.h" +#include "dpp_module.h" + +ZXIC_UINT32 dpp_pci_write32(DPP_DEV_T *dev, ZXIC_ADDR_T abs_addr, + ZXIC_UINT32 *p_data); +ZXIC_UINT32 dpp_pci_read32(DPP_DEV_T *dev, ZXIC_ADDR_T abs_addr, + ZXIC_UINT32 *p_data); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_ppu4k_reg.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_ppu4k_reg.h new file mode 100644 index 000000000000..81e5b270e44c --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_ppu4k_reg.h @@ -0,0 +1,37 @@ + +#ifndef _DPP_PPU4K_REG_H_ +#define _DPP_PPU4K_REG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct dpp_ppu4k_cluster_wr_high_data_r_mex_t { + ZXIC_UINT32 wr_high_data_r_mex; +} DPP_PPU4K_CLUSTER_WR_HIGH_DATA_R_MEX_T; + +typedef struct dpp_ppu4k_cluster_wr_low_data_r_mex_t { + ZXIC_UINT32 wr_low_data_r_mex; +} DPP_PPU4K_CLUSTER_WR_LOW_DATA_R_MEX_T; + +typedef struct dpp_ppu4k_cluster_addr_r_mex_t { + ZXIC_UINT32 operate_type; + ZXIC_UINT32 addr_r_mex; +} DPP_PPU4K_CLUSTER_ADDR_R_MEX_T; + +typedef struct dpp_ppu4k_cluster_sdt_tbl_ind_access_done_t { + ZXIC_UINT32 rd_addr_r_mex; +} DPP_PPU4K_CLUSTER_SDT_TBL_IND_ACCESS_DONE_T; + +typedef struct dpp_ppu4k_cluster_rd_high_data_r_mex_t { + ZXIC_UINT32 rd_high_data_r_mex; +} DPP_PPU4K_CLUSTER_RD_HIGH_DATA_R_MEX_T; + +typedef struct dpp_ppu4k_cluster_rd_low_data_r_mex_t { + ZXIC_UINT32 rd_low_data_r_mex; +} DPP_PPU4K_CLUSTER_RD_LOW_DATA_R_MEX_T; + +#ifdef __cplusplus +} +#endif +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_ppu_reg.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_ppu_reg.h new file mode 100644 index 000000000000..9ec6d0f1264a --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_ppu_reg.h @@ -0,0 +1,3653 @@ + +#ifndef _DPP_PPU_REG_H_ +#define _DPP_PPU_REG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct dpp_ppu_ppu_test_r_t { + ZXIC_UINT32 test_r; +} DPP_PPU_PPU_TEST_R_T; + +typedef struct dpp_ppu_ppu_ppu_debug_en_r_t { + ZXIC_UINT32 debug_en_r; +} DPP_PPU_PPU_PPU_DEBUG_EN_R_T; + +typedef struct dpp_ppu_ppu_csr_dup_table_wr_data_t { + ZXIC_UINT32 item_vld; + ZXIC_UINT32 flownum_vld; + ZXIC_UINT32 start_pc; + ZXIC_UINT32 flownum; +} DPP_PPU_PPU_CSR_DUP_TABLE_WR_DATA_T; + +typedef struct dpp_ppu_ppu_csr_dup_table_rd_data_t { + ZXIC_UINT32 item_vld; + ZXIC_UINT32 flownum_vld; + ZXIC_UINT32 start_pc; + ZXIC_UINT32 flownum; +} DPP_PPU_PPU_CSR_DUP_TABLE_RD_DATA_T; + +typedef struct dpp_ppu_ppu_csr_dup_table_addr_t { + ZXIC_UINT32 csr_dup_table_operation; + ZXIC_UINT32 csr_dup_table_addr; +} DPP_PPU_PPU_CSR_DUP_TABLE_ADDR_T; + +typedef struct dpp_ppu_ppu_ppu_debug_vld_t { + ZXIC_UINT32 ppu_debug_vld; +} DPP_PPU_PPU_PPU_DEBUG_VLD_T; + +typedef struct dpp_ppu_ppu_cop_thash_rsk_319_288_t { + ZXIC_UINT32 rsk_319_288; +} DPP_PPU_PPU_COP_THASH_RSK_319_288_T; + +typedef struct dpp_ppu_ppu_cop_thash_rsk_287_256_t { + ZXIC_UINT32 rsk_287_256; +} DPP_PPU_PPU_COP_THASH_RSK_287_256_T; + +typedef struct dpp_ppu_ppu_cop_thash_rsk_255_224_t { + ZXIC_UINT32 rsk_255_224; +} DPP_PPU_PPU_COP_THASH_RSK_255_224_T; + +typedef struct dpp_ppu_ppu_cop_thash_rsk_223_192_t { + ZXIC_UINT32 rsk_223_192; +} DPP_PPU_PPU_COP_THASH_RSK_223_192_T; + +typedef struct dpp_ppu_ppu_cop_thash_rsk_191_160_t { + ZXIC_UINT32 rsk_191_160; +} DPP_PPU_PPU_COP_THASH_RSK_191_160_T; + +typedef struct dpp_ppu_ppu_cop_thash_rsk_159_128_t { + ZXIC_UINT32 rsk_159_128; +} DPP_PPU_PPU_COP_THASH_RSK_159_128_T; + +typedef struct dpp_ppu_ppu_cop_thash_rsk_127_096_t { + ZXIC_UINT32 rsk_127_096; +} DPP_PPU_PPU_COP_THASH_RSK_127_096_T; + +typedef struct dpp_ppu_ppu_cop_thash_rsk_095_064_t { + ZXIC_UINT32 rsk_095_064; +} DPP_PPU_PPU_COP_THASH_RSK_095_064_T; + +typedef struct dpp_ppu_ppu_cop_thash_rsk_063_032_t { + ZXIC_UINT32 rsk_063_032; +} DPP_PPU_PPU_COP_THASH_RSK_063_032_T; + +typedef struct dpp_ppu_ppu_cop_thash_rsk_031_000_t { + ZXIC_UINT32 rsk_031_000; +} DPP_PPU_PPU_COP_THASH_RSK_031_000_T; + +typedef struct dpp_ppu_ppu_cfg_ipv4_ipid_start_value_t { + ZXIC_UINT32 cfg_ipv4_ipid_start_value; +} DPP_PPU_PPU_CFG_IPV4_IPID_START_VALUE_T; + +typedef struct dpp_ppu_ppu_cfg_ipv4_ipid_end_value_t { + ZXIC_UINT32 cfg_ipv4_ipid_end_value; +} DPP_PPU_PPU_CFG_IPV4_IPID_END_VALUE_T; + +typedef struct dpp_ppu_ppu_cluster_mf_in_en_t { + ZXIC_UINT32 cluster_mf_in_en; +} DPP_PPU_PPU_CLUSTER_MF_IN_EN_T; + +typedef struct dpp_ppu_ppu_ppu_empty_t { + ZXIC_UINT32 ppu_empty; +} DPP_PPU_PPU_PPU_EMPTY_T; + +typedef struct dpp_ppu_ppu_instrmem_w_addr_t { + ZXIC_UINT32 instrmem_w_addr; +} DPP_PPU_PPU_INSTRMEM_W_ADDR_T; + +typedef struct dpp_ppu_ppu_instrmem_w_data_191_160_t { + ZXIC_UINT32 instrmem_w_data_191_160; +} DPP_PPU_PPU_INSTRMEM_W_DATA_191_160_T; + +typedef struct dpp_ppu_ppu_instrmem_w_data_159_128_t { + ZXIC_UINT32 instrmem_w_data_159_128; +} DPP_PPU_PPU_INSTRMEM_W_DATA_159_128_T; + +typedef struct dpp_ppu_ppu_instrmem_w_data_127_96_t { + ZXIC_UINT32 instrmem_w_data_127_96; +} DPP_PPU_PPU_INSTRMEM_W_DATA_127_96_T; + +typedef struct dpp_ppu_ppu_instrmem_w_data_95_64_t { + ZXIC_UINT32 instrmem_w_data_95_64; +} DPP_PPU_PPU_INSTRMEM_W_DATA_95_64_T; + +typedef struct dpp_ppu_ppu_instrmem_w_data_63_32_t { + ZXIC_UINT32 instrmem_w_data_63_32; +} DPP_PPU_PPU_INSTRMEM_W_DATA_63_32_T; + +typedef struct dpp_ppu_ppu_instrmem_w_data_31_0_t { + ZXIC_UINT32 instrmem_w_data_31_0; +} DPP_PPU_PPU_INSTRMEM_W_DATA_31_0_T; + +typedef struct dpp_ppu_ppu_isu_fwft_mf_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 isu_fwft_mf_fifo_prog_full_assert_cfg; +} DPP_PPU_PPU_ISU_FWFT_MF_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_isu_fwft_mf_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 isu_fwft_mf_fifo_prog_full_negate_cfg; +} DPP_PPU_PPU_ISU_FWFT_MF_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_cluster_int_1200m_mask_t { + ZXIC_UINT32 me7_interrupt_mask; + ZXIC_UINT32 me6_interrupt_mask; + ZXIC_UINT32 me5_interrupt_mask; + ZXIC_UINT32 me4_interrupt_mask; + ZXIC_UINT32 me3_interrupt_mask; + ZXIC_UINT32 me2_interrupt_mask; + ZXIC_UINT32 me1_interrupt_mask; + ZXIC_UINT32 me0_interrupt_mask; +} DPP_PPU_CLUSTER_INT_1200M_MASK_T; + +typedef struct dpp_ppu_ppu_interrupt_en_r_t { + ZXIC_UINT32 interrupt_en_r; +} DPP_PPU_PPU_INTERRUPT_EN_R_T; + +typedef struct dpp_ppu_ppu_mec_host_interrupt_t { + ZXIC_UINT32 mec_host_interrupt; +} DPP_PPU_PPU_MEC_HOST_INTERRUPT_T; + +typedef struct dpp_ppu_ppu_dbg_rtl_date_t { + ZXIC_UINT32 dbg_rtl_date; +} DPP_PPU_PPU_DBG_RTL_DATE_T; + +typedef struct dpp_ppu_ppu_dup_start_num_cfg_t { + ZXIC_UINT32 dup_start_num_cfg; +} DPP_PPU_PPU_DUP_START_NUM_CFG_T; + +typedef struct dpp_ppu_ppu_debug_data_write_complete_t { + ZXIC_UINT32 debug_data_write_complete; +} DPP_PPU_PPU_DEBUG_DATA_WRITE_COMPLETE_T; + +typedef struct dpp_ppu_ppu_uc_mc_wrr_cfg_t { + ZXIC_UINT32 uc_mc_wrr_cfg; +} DPP_PPU_PPU_UC_MC_WRR_CFG_T; + +typedef struct dpp_ppu_ppu_debug_pkt_send_en_t { + ZXIC_UINT32 debug_pkt_send_en; +} DPP_PPU_PPU_DEBUG_PKT_SEND_EN_T; + +typedef struct dpp_ppu_ppu_dup_tbl_ind_access_done_t { + ZXIC_UINT32 dup_tbl_ind_access_done; +} DPP_PPU_PPU_DUP_TBL_IND_ACCESS_DONE_T; + +typedef struct dpp_ppu_ppu_isu_ppu_demux_fifo_interrupt_mask_t { + ZXIC_UINT32 isu_in_para_fwft_fifo_32x81_wrapper_u0_overflow_mask; + ZXIC_UINT32 isu_in_para_fwft_fifo_32x81_wrapper_u0_underflow_mask; + ZXIC_UINT32 isu_in_fifo_64x81_wrapper_u0_overflow_mask; + ZXIC_UINT32 isu_in_fifo_64x81_wrapper_u0_underflow_mask; +} DPP_PPU_PPU_ISU_PPU_DEMUX_FIFO_INTERRUPT_MASK_T; + +typedef struct dpp_ppu_ppu_ppu_multicast_fifo_interrupt_mask_t { + ZXIC_UINT32 ppu_pktrx_mc_ptr_fifo_16384x17_wrapper_u0_underflow_mask; + ZXIC_UINT32 ppu_pktrx_mc_ptr_fifo_16384x17_wrapper_u0_overflow_mask; + ZXIC_UINT32 pf_req_fwft_fifo_16x36_wrapper_u0_overflow_mask; + ZXIC_UINT32 pf_req_fwft_fifo_16x36_wrapper_u0_underflow_mask; + ZXIC_UINT32 pf_rsp_fwft_fifo_32x34_wrapper_u0_overflow_mask; + ZXIC_UINT32 pf_rsp_fwft_fifo_32x34_wrapper_u0_underflow_mask; + ZXIC_UINT32 dup_para_fwft_fifo_16x35_wrapper_u0_overflow_mask; + ZXIC_UINT32 dup_para_fwft_fifo_16x35_wrapper_u0_underflow_mask; + ZXIC_UINT32 se_mc_rsp_fwft_fifo_32x17_wrapper_u0_overflow_mask; + ZXIC_UINT32 se_mc_rsp_fwft_fifo_32x17_wrapper_u0_underflow_mask; + ZXIC_UINT32 sa_para_fwft_fifo_64x17_wrapper_u0_overflow_mask; + ZXIC_UINT32 sa_para_fwft_fifo_64x17_wrapper_u0_underflow_mask; + ZXIC_UINT32 group_id_fifo_64x16_wrapper_u0_overflow_mask; + ZXIC_UINT32 group_id_fifo_64x16_wrapper_u0_underflow_mask; + ZXIC_UINT32 isu_mc_para_fwft_fifo_128x34_wrapper_u0_overflow_mask; + ZXIC_UINT32 isu_mc_para_fwft_fifo_128x34_wrapper_u0_underflow_mask; + ZXIC_UINT32 dup_freeptr_fwft_fifo_128x7_wrapper_u0_overflow_mask; + ZXIC_UINT32 dup_freeptr_fwft_fifo_128x7_wrapper_u0_underflow_mask; + ZXIC_UINT32 car_flag_fifo_32x1_wrapper_overflow_mask; + ZXIC_UINT32 car_flag_fifo_32x1_wrapper_underflow_mask; +} DPP_PPU_PPU_PPU_MULTICAST_FIFO_INTERRUPT_MASK_T; + +typedef struct dpp_ppu_ppu_ppu_in_schedule_fifo_interrupt_mask_t { + ZXIC_UINT32 free_global_num_fwft_fifo_8192x13_wrapper_u0_overflow_mask; + ZXIC_UINT32 free_global_num_fwft_fifo_8192x13_wrapper_u0_underflow_mask; + ZXIC_UINT32 mc_mf_fifo_16x2048_wrapper_u0_overflow_mask; + ZXIC_UINT32 mc_mf_fifo_16x2048_wrapper_u0_underflow_mask; + ZXIC_UINT32 uc_mf_fifo_96x2048_wrapper_u0_overflow_mask; + ZXIC_UINT32 uc_mf_fifo_96x2048_wrapper_u0_underflow_mask; +} DPP_PPU_PPU_PPU_IN_SCHEDULE_FIFO_INTERRUPT_MASK_T; + +typedef struct dpp_ppu_ppu_ppu_mf_out_fifo_interrupt_mask_t { + ZXIC_UINT32 ppu_cluster5_mf_out_afifo_32x2048_wrapper_overflow_mask; + ZXIC_UINT32 ppu_cluster5_mf_out_afifo_32x2048_wrapper_underflow_mask; + ZXIC_UINT32 ppu_cluster4_mf_out_afifo_32x2048_wrapper_overflow_mask; + ZXIC_UINT32 ppu_cluster4_mf_out_afifo_32x2048_wrapper_underflow_mask; + ZXIC_UINT32 ppu_cluster3_mf_out_afifo_32x2048_wrapper_overflow_mask; + ZXIC_UINT32 ppu_cluster3_mf_out_afifo_32x2048_wrapper_underflow_mask; + ZXIC_UINT32 ppu_cluster2_mf_out_afifo_32x2048_wrapper_overflow_mask; + ZXIC_UINT32 ppu_cluster2_mf_out_afifo_32x2048_wrapper_underflow_mask; + ZXIC_UINT32 ppu_cluster1_mf_out_afifo_32x2048_wrapper_overflow_mask; + ZXIC_UINT32 ppu_cluster1_mf_out_afifo_32x2048_wrapper_underflow_mask; + ZXIC_UINT32 ppu_cluster0_mf_out_afifo_32x2048_wrapper_overflow_mask; + ZXIC_UINT32 ppu_cluster0_mf_out_afifo_32x2048_wrapper_underflow_mask; +} DPP_PPU_PPU_PPU_MF_OUT_FIFO_INTERRUPT_MASK_T; + +typedef struct dpp_ppu_ppu_pbu_mcode_pf_req_schedule_fifo_interrupt_mask_t { + ZXIC_UINT32 + ppu_cluster5_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_mask; + ZXIC_UINT32 + ppu_cluster4_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_mask; + ZXIC_UINT32 + ppu_cluster3_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_mask; + ZXIC_UINT32 + ppu_cluster2_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_mask; + ZXIC_UINT32 + ppu_cluster1_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_mask; + ZXIC_UINT32 + ppu_cluster0_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_mask; + ZXIC_UINT32 + ppu_cluster5_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_mask; + ZXIC_UINT32 + ppu_cluster4_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_mask; + ZXIC_UINT32 + ppu_cluster3_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_mask; + ZXIC_UINT32 + ppu_cluster2_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_mask; + ZXIC_UINT32 + ppu_cluster1_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_mask; + ZXIC_UINT32 + ppu_cluster0_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_mask; +} DPP_PPU_PPU_PBU_MCODE_PF_REQ_SCHEDULE_FIFO_INTERRUPT_MASK_T; + +typedef struct dpp_ppu_ppu_pbu_mcode_pf_rsp_schedule_fifo_interrupt_mask_t { + ZXIC_UINT32 ppu_pbu_mcode_pf_rsp_afifo_64x16_wrapper_u0r_underflow_mask; + ZXIC_UINT32 ppu_pbu_mcode_pf_rsp_afifo_64x16_wrapper_u0_overflow_mask; +} DPP_PPU_PPU_PBU_MCODE_PF_RSP_SCHEDULE_FIFO_INTERRUPT_MASK_T; + +typedef struct dpp_ppu_ppu_ppu_mccnt_fifo_interrupt_mask_t { + ZXIC_UINT32 ppu_mccnt_fifo_32x15_wrapper_u0_overflow_mask; + ZXIC_UINT32 ppu_mccnt_fifo_32x15_wrapper_u0_underflow_mask; + ZXIC_UINT32 ppu_wb_data_fifo_32x2048_wrapper_u0_overflow_mask; + ZXIC_UINT32 ppu_wb_data_fifo_32x2048_wrapper_u0_underflow_mask; + ZXIC_UINT32 mccnt_rsp_fifo_32x1_wrapper_u0_overflow_mask; + ZXIC_UINT32 mccnt_rsp_fifo_32x1_wrapper_u0_underflow_mask; +} DPP_PPU_PPU_PPU_MCCNT_FIFO_INTERRUPT_MASK_T; + +typedef struct dpp_ppu_ppu_coprocessor_fifo_interrupt_mask_l_t { + ZXIC_UINT32 mec3_cop_key_crc_fifo_32x625_wrapper_overflow_flg_mask; + ZXIC_UINT32 mec3_cop_key_crc_fifo_32x625_wrapper_underflow_flg_mask; + ZXIC_UINT32 mec3_cop_key_checksum_fifo_32x180_wrapper_overflow_flg_mask; + ZXIC_UINT32 mec3_cop_key_checksum_fifo_32x180_wrapper_underflow_flg_mask; + ZXIC_UINT32 mec3_cop_key_mul_fifo_32x52_wrapper_overflow_flg_mask; + ZXIC_UINT32 mec3_cop_key_mul_fifo_32x52_wrapper_underflow_flg_mask; + ZXIC_UINT32 mec3_cop_key_random_mod_fifo_32x44_wrapper_overflow_flg_mask; + ZXIC_UINT32 + mec3_cop_key_random_mod_fifo_32x44_wrapper_underflow_flg_mask; + ZXIC_UINT32 mec2_cop_key_crc_fifo_32x625_wrapper_overflow_flg_mask; + ZXIC_UINT32 mec2_cop_key_crc_fifo_32x625_wrapper_underflow_flg_mask; + ZXIC_UINT32 mec2_cop_key_checksum_fifo_32x180_wrapper_overflow_flg_mask; + ZXIC_UINT32 mec2_cop_key_checksum_fifo_32x180_wrapper_underflow_flg_mask; + ZXIC_UINT32 mec2_cop_key_mul_fifo_32x52_wrapper_overflow_flg_mask; + ZXIC_UINT32 mec2_cop_key_mul_fifo_32x52_wrapper_underflow_flg_mask; + ZXIC_UINT32 mec2_cop_key_random_mod_fifo_32x44_wrapper_overflow_flg_mask; + ZXIC_UINT32 + mec2_cop_key_random_mod_fifo_32x44_wrapper_underflow_flg_mask; + ZXIC_UINT32 mec1_cop_key_crc_fifo_32x625_wrapper_overflow_flg_mask; + ZXIC_UINT32 mec1_cop_key_crc_fifo_32x625_wrapper_underflow_flg_mask; + ZXIC_UINT32 mec1_cop_key_checksum_fifo_32x180_wrapper_overflow_flg_mask; + ZXIC_UINT32 mec1_cop_key_checksum_fifo_32x180_wrapper_underflow_flg_mask; + ZXIC_UINT32 mec1_cop_key_mul_fifo_32x52_wrapper_overflow_flg_mask; + ZXIC_UINT32 mec1_cop_key_mul_fifo_32x52_wrapper_underflow_flg_mask; + ZXIC_UINT32 mec1_cop_key_random_mod_fifo_32x44_wrapper_overflow_flg_mask; + ZXIC_UINT32 + mec1_cop_key_random_mod_fifo_32x44_wrapper_underflow_flg_mask; + ZXIC_UINT32 mec0_cop_key_crc_fifo_32x625_wrapper_overflow_flg_mask; + ZXIC_UINT32 mec0_cop_key_crc_fifo_32x625_wrapper_underflow_flg_mask; + ZXIC_UINT32 mec0_cop_key_checksum_fifo_32x180_wrapper_overflow_flg_mask; + ZXIC_UINT32 mec0_cop_key_checksum_fifo_32x180_wrapper_underflow_flg_mask; + ZXIC_UINT32 mec0_cop_key_mul_fifo_32x52_wrapper_overflow_flg_mask; + ZXIC_UINT32 mec0_cop_key_mul_fifo_32x52_wrapper_underflow_flg_mask; + ZXIC_UINT32 mec0_cop_key_random_mod_fifo_32x44_wrapper_overflow_flg_mask; + ZXIC_UINT32 + mec0_cop_key_random_mod_fifo_32x44_wrapper_underflow_flg_mask; +} DPP_PPU_PPU_COPROCESSOR_FIFO_INTERRUPT_MASK_L_T; + +typedef struct dpp_ppu_ppu_coprocessor_fifo_interrupt_mask_m_t { + ZXIC_UINT32 ppu_cop_result_fwft_fifo_80x80_wrapper_overflow_mask; + ZXIC_UINT32 ppu_cop_result_fwft_fifo_80x80_wrapper_underflow_mask; + ZXIC_UINT32 ppu_cop_delay_fifo_48x16_wrapper_overflow_mask; + ZXIC_UINT32 ppu_cop_delay_fifo_48x16_wrapper_underflow_mask; + ZXIC_UINT32 ppu_cop_delay_fifo_16x48_wrapper_overflow_mask; + ZXIC_UINT32 ppu_cop_delay_fifo_16x48_wrapper_underflow_mask; + ZXIC_UINT32 ppu_cop_delay_fifo_16x32_wrapper_overflow_mask; + ZXIC_UINT32 ppu_cop_delay_fifo_16x32_wrapper_underflow_mask; + ZXIC_UINT32 ppu_cop_result_fwft_fifo_96x80_wrapper_overflow_mask; + ZXIC_UINT32 ppu_cop_result_fwft_fifo_96x80_wrapper_underflow_mask; + ZXIC_UINT32 ppu_cop_delay_fifo_16x16_wrapper_overflow_mask; + ZXIC_UINT32 ppu_cop_delay_fifo_16x16_wrapper_underflow_mask; + ZXIC_UINT32 ppu_cop_result_fwft_fifo_32x80_wrapper_overflow_mask; + ZXIC_UINT32 ppu_cop_result_fwft_fifo_32x80_wrapper_underflow_mask; + ZXIC_UINT32 ppu_cop_result_fwft_fifo_16x80_wrapper_overflow_mask; + ZXIC_UINT32 ppu_cop_result_fwft_fifo_16x80_wrapper_underflow_mask; + ZXIC_UINT32 mec5_cop_key_crc_fifo_32x625_wrapper_overflow_flg_mask; + ZXIC_UINT32 mec5_cop_key_crc_fifo_32x625_wrapper_underflow_flg_mask; + ZXIC_UINT32 mec5_cop_key_checksum_fifo_32x180_wrapper_overflow_flg_mask; + ZXIC_UINT32 mec5_cop_key_checksum_fifo_32x180_wrapper_underflow_flg_mask; + ZXIC_UINT32 mec5_cop_key_mul_fifo_32x52_wrapper_overflow_flg_mask; + ZXIC_UINT32 mec5_cop_key_mul_fifo_32x52_wrapper_underflow_flg_mask; + ZXIC_UINT32 mec5_cop_key_random_mod_fifo_32x44_wrapper_overflow_flg_mask; + ZXIC_UINT32 + mec5_cop_key_random_mod_fifo_32x44_wrapper_underflow_flg_mask; + ZXIC_UINT32 mec4_cop_key_crc_fifo_32x625_wrapper_overflow_flg_mask; + ZXIC_UINT32 mec4_cop_key_crc_fifo_32x625_wrapper_underflow_flg_mask; + ZXIC_UINT32 mec4_cop_key_checksum_fifo_32x180_wrapper_overflow_flg_mask; + ZXIC_UINT32 mec4_cop_key_checksum_fifo_32x180_wrapper_underflow_flg_mask; + ZXIC_UINT32 mec4_cop_key_mul_fifo_32x52_wrapper_overflow_flg_mask; + ZXIC_UINT32 mec4_cop_key_mul_fifo_32x52_wrapper_underflow_flg_mask; + ZXIC_UINT32 mec4_cop_key_random_mod_fifo_32x44_wrapper_overflow_flg_mask; + ZXIC_UINT32 + mec4_cop_key_random_mod_fifo_32x44_wrapper_underflow_flg_mask; +} DPP_PPU_PPU_COPROCESSOR_FIFO_INTERRUPT_MASK_M_T; + +typedef struct dpp_ppu_ppu_coprocessor_fifo_interrupt_mask_h_t { + ZXIC_UINT32 coprocessor_fwft_fifo_16x80_wrapper_overflow_mask; + ZXIC_UINT32 coprocessor_fwft_fifo_16x80_wrapper_underflow_mask; + ZXIC_UINT32 + ppu_cop_random_mod_para_delay_fifo_48x16_wrapper_overflow_mask; + ZXIC_UINT32 + ppu_cop_random_mod_para_delay_fifo_48x16_wrapper_underflow_mask; +} DPP_PPU_PPU_COPROCESSOR_FIFO_INTERRUPT_MASK_H_T; + +typedef struct dpp_ppu_ppu_ppu_ram_check_err_mask_t { + ZXIC_UINT32 parity_err_mask; +} DPP_PPU_PPU_PPU_RAM_CHECK_ERR_MASK_T; + +typedef struct dpp_ppu_ppu_instrmem_fifo_interrupt_mask_t { + ZXIC_UINT32 instrmem2_wr_fifo_ovf_mask; + ZXIC_UINT32 instrmem2_wr_fifo_udf_mask; + ZXIC_UINT32 instrmem2_rd_fifo_ovf_mask; + ZXIC_UINT32 instrmem2_rd_fifo_udf_mask; + ZXIC_UINT32 instrmem1_wr_fifo_ovf_mask; + ZXIC_UINT32 instrmem1_wr_fifo_udf_mask; + ZXIC_UINT32 instrmem1_rd_fifo_ovf_mask; + ZXIC_UINT32 instrmem1_rd_fifo_udf_mask; + ZXIC_UINT32 instrmem0_wr_fifo_ovf_mask; + ZXIC_UINT32 instrmem0_wr_fifo_udf_mask; + ZXIC_UINT32 instrmem0_rd_fifo_ovf_mask; + ZXIC_UINT32 instrmem0_rd_fifo_udf_mask; +} DPP_PPU_PPU_INSTRMEM_FIFO_INTERRUPT_MASK_T; + +typedef struct dpp_ppu_ppu_isu_ppu_demux_fifo_interrupt_sta_t { + ZXIC_UINT32 isu_in_para_fwft_fifo_32x81_wrapper_u0_overflow_sta; + ZXIC_UINT32 isu_in_para_fwft_fifo_32x81_wrapper_u0_underflow_sta; + ZXIC_UINT32 isu_in_fifo_64x81_wrapper_u0_overflow_sta; + ZXIC_UINT32 isu_in_fifo_64x81_wrapper_u0_underflow_sta; +} DPP_PPU_PPU_ISU_PPU_DEMUX_FIFO_INTERRUPT_STA_T; + +typedef struct dpp_ppu_ppu_ppu_multicast_fifo_interrupt_sta_t { + ZXIC_UINT32 ppu_pktrx_mc_ptr_fifo_16384x17_wrapper_u0_overflow_sta; + ZXIC_UINT32 ppu_pktrx_mc_ptr_fifo_16384x17_wrapper_u0_underflow_sta; + ZXIC_UINT32 pf_req_fwft_fifo_16x36_wrapper_u0_overflow_sta; + ZXIC_UINT32 pf_req_fwft_fifo_16x36_wrapper_u0_underflow_sta; + ZXIC_UINT32 pf_rsp_fwft_fifo_32x34_wrapper_u0_overflow_sta; + ZXIC_UINT32 pf_rsp_fwft_fifo_32x34_wrapper_u0_underflow_sta; + ZXIC_UINT32 dup_para_fwft_fifo_16x35_wrapper_u0_overflow_sta; + ZXIC_UINT32 dup_para_fwft_fifo_16x35_wrapper_u0_underflow_sta; + ZXIC_UINT32 se_mc_rsp_fwft_fifo_32x17_wrapper_u0_overflow_sta; + ZXIC_UINT32 se_mc_rsp_fwft_fifo_32x17_wrapper_u0_underflow_sta; + ZXIC_UINT32 sa_para_fwft_fifo_64x17_wrapper_u0_overflow_sta; + ZXIC_UINT32 sa_para_fwft_fifo_64x17_wrapper_u0_underflow_sta; + ZXIC_UINT32 group_id_fifo_64x16_wrapper_u0_overflow_sta; + ZXIC_UINT32 group_id_fifo_64x16_wrapper_u0_underflow_sta; + ZXIC_UINT32 isu_mc_para_fwft_fifo_128x34_wrapper_u0_overflow_sta; + ZXIC_UINT32 isu_mc_para_fwft_fifo_128x34_wrapper_u0_underflow_sta; + ZXIC_UINT32 dup_freeptr_fwft_fifo_128x7_wrapper_u0_overflow_sta; + ZXIC_UINT32 dup_freeptr_fwft_fifo_128x7_wrapper_u0_underflow_sta; + ZXIC_UINT32 car_flag_fifo_32x1_wrapper_overflow_sta; + ZXIC_UINT32 car_flag_fifo_32x1_wrapper_underflow_sta; +} DPP_PPU_PPU_PPU_MULTICAST_FIFO_INTERRUPT_STA_T; + +typedef struct dpp_ppu_ppu_ppu_in_schedule_fifo_interrupt_sta_t { + ZXIC_UINT32 free_global_num_fwft_fifo_8192x13_wrapper_u0_overflow_sta; + ZXIC_UINT32 free_global_num_fwft_fifo_8192x13_wrapper_u0_underflow_sta; + ZXIC_UINT32 mc_mf_fifo_16x2048_wrapper_u0_overflow_sta; + ZXIC_UINT32 mc_mf_fifo_16x2048_wrapper_u0_underflow_sta; + ZXIC_UINT32 uc_mf_fifo_96x2048_wrapper_u0_overflow_sta; + ZXIC_UINT32 uc_mf_fifo_96x2048_wrapper_u0_underflow_sta; +} DPP_PPU_PPU_PPU_IN_SCHEDULE_FIFO_INTERRUPT_STA_T; + +typedef struct dpp_ppu_ppu_ppu_mf_out_fifo_interrupt_sta_t { + ZXIC_UINT32 ppu_cluster5_mf_out_afifo_32x2048_wrapper_overflow_sta; + ZXIC_UINT32 ppu_cluster5_mf_out_afifo_32x2048_wrapper_underflow_sta; + ZXIC_UINT32 ppu_cluster4_mf_out_afifo_32x2048_wrapper_overflow_sta; + ZXIC_UINT32 ppu_cluster4_mf_out_afifo_32x2048_wrapper_underflow_sta; + ZXIC_UINT32 ppu_cluster3_mf_out_afifo_32x2048_wrapper_overflow_sta; + ZXIC_UINT32 ppu_cluster3_mf_out_afifo_32x2048_wrapper_underflow_sta; + ZXIC_UINT32 ppu_cluster2_mf_out_afifo_32x2048_wrapper_overflow_sta; + ZXIC_UINT32 ppu_cluster2_mf_out_afifo_32x2048_wrapper_underflow_sta; + ZXIC_UINT32 ppu_cluster1_mf_out_afifo_32x2048_wrapper_overflow_sta; + ZXIC_UINT32 ppu_cluster1_mf_out_afifo_32x2048_wrapper_underflow_sta; + ZXIC_UINT32 ppu_cluster0_mf_out_afifo_32x2048_wrapper_overflow_sta; + ZXIC_UINT32 ppu_cluster0_mf_out_afifo_32x2048_wrapper_underflow_sta; +} DPP_PPU_PPU_PPU_MF_OUT_FIFO_INTERRUPT_STA_T; + +typedef struct dpp_ppu_ppu_pbu_mcode_pf_req_schedule_fifo_interrupt_sta_t { + ZXIC_UINT32 + ppu_cluster5_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_sta; + ZXIC_UINT32 + ppu_cluster4_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_sta; + ZXIC_UINT32 + ppu_cluster3_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_sta; + ZXIC_UINT32 + ppu_cluster2_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_sta; + ZXIC_UINT32 + ppu_cluster1_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_sta; + ZXIC_UINT32 + ppu_cluster0_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_sta; + ZXIC_UINT32 + ppu_cluster5_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_sta; + ZXIC_UINT32 + ppu_cluster4_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_sta; + ZXIC_UINT32 + ppu_cluster3_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_sta; + ZXIC_UINT32 + ppu_cluster2_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_sta; + ZXIC_UINT32 + ppu_cluster1_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_sta; + ZXIC_UINT32 + ppu_cluster0_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_sta; +} DPP_PPU_PPU_PBU_MCODE_PF_REQ_SCHEDULE_FIFO_INTERRUPT_STA_T; + +typedef struct dpp_ppu_ppu_pbu_mcode_pf_rsp_schedule_fifo_interrupt_sta_t { + ZXIC_UINT32 ppu_pbu_mcode_pf_rsp_afifo_64x16_wrapper_u0r_underflow_sta; + ZXIC_UINT32 ppu_pbu_mcode_pf_rsp_afifo_64x16_wrapper_u0_overflow_sta; +} DPP_PPU_PPU_PBU_MCODE_PF_RSP_SCHEDULE_FIFO_INTERRUPT_STA_T; + +typedef struct dpp_ppu_ppu_ppu_mccnt_fifo_interrupt_sta_t { + ZXIC_UINT32 ppu_mccnt_fifo_32x15_wrapper_u0_overflow_sta; + ZXIC_UINT32 ppu_mccnt_fifo_32x15_wrapper_u0_underflow_sta; + ZXIC_UINT32 ppu_wb_data_fifo_32x2048_wrapper_u0_overflow_sta; + ZXIC_UINT32 ppu_wb_data_fifo_32x2048_wrapper_u0_underflow_sta; + ZXIC_UINT32 mccnt_rsp_fifo_32x1_wrapper_u0_overflow_sta; + ZXIC_UINT32 mccnt_rsp_fifo_32x1_wrapper_u0_underflow_sta; +} DPP_PPU_PPU_PPU_MCCNT_FIFO_INTERRUPT_STA_T; + +typedef struct dpp_ppu_ppu_coprocessor_fifo_interrupt_sta_l_t { + ZXIC_UINT32 mec3_cop_key_crc_fifo_32x625_wrapper_overflow_flg_sta; + ZXIC_UINT32 mec3_cop_key_crc_fifo_32x625_wrapper_underflow_flg_sta; + ZXIC_UINT32 mec3_cop_key_checksum_fifo_32x180_wrapper_overflow_flg_sta; + ZXIC_UINT32 mec3_cop_key_checksum_fifo_32x180_wrapper_underflow_flg_sta; + ZXIC_UINT32 mec3_cop_key_mul_fifo_32x52_wrapper_overflow_flg_sta; + ZXIC_UINT32 mec3_cop_key_mul_fifo_32x52_wrapper_underflow_flg_sta; + ZXIC_UINT32 mec3_cop_key_random_mod_fifo_32x44_wrapper_overflow_flg_sta; + ZXIC_UINT32 mec3_cop_key_random_mod_fifo_32x44_wrapper_underflow_flg_sta; + ZXIC_UINT32 mec2_cop_key_crc_fifo_32x625_wrapper_overflow_flg_sta; + ZXIC_UINT32 mec2_cop_key_crc_fifo_32x625_wrapper_underflow_flg_sta; + ZXIC_UINT32 mec2_cop_key_checksum_fifo_32x180_wrapper_overflow_flg_sta; + ZXIC_UINT32 mec2_cop_key_checksum_fifo_32x180_wrapper_underflow_flg_sta; + ZXIC_UINT32 mec2_cop_key_mul_fifo_32x52_wrapper_overflow_flg_sta; + ZXIC_UINT32 mec2_cop_key_mul_fifo_32x52_wrapper_underflow_flg_sta; + ZXIC_UINT32 mec2_cop_key_random_mod_fifo_32x44_wrapper_overflow_flg_sta; + ZXIC_UINT32 mec2_cop_key_random_mod_fifo_32x44_wrapper_underflow_flg_sta; + ZXIC_UINT32 mec1_cop_key_crc_fifo_32x625_wrapper_overflow_flg_sta; + ZXIC_UINT32 mec1_cop_key_crc_fifo_32x625_wrapper_underflow_flg_sta; + ZXIC_UINT32 mec1_cop_key_checksum_fifo_32x180_wrapper_overflow_flg_sta; + ZXIC_UINT32 mec1_cop_key_checksum_fifo_32x180_wrapper_underflow_flg_sta; + ZXIC_UINT32 mec1_cop_key_mul_fifo_32x52_wrapper_overflow_flg_sta; + ZXIC_UINT32 mec1_cop_key_mul_fifo_32x52_wrapper_underflow_flg_sta; + ZXIC_UINT32 mec1_cop_key_random_mod_fifo_32x44_wrapper_overflow_flg_sta; + ZXIC_UINT32 mec1_cop_key_random_mod_fifo_32x44_wrapper_underflow_flg_sta; + ZXIC_UINT32 mec0_cop_key_crc_fifo_32x625_wrapper_overflow_flg_sta; + ZXIC_UINT32 mec0_cop_key_crc_fifo_32x625_wrapper_underflow_flg_sta; + ZXIC_UINT32 mec0_cop_key_checksum_fifo_32x180_wrapper_overflow_flg_sta; + ZXIC_UINT32 mec0_cop_key_checksum_fifo_32x180_wrapper_underflow_flg_sta; + ZXIC_UINT32 mec0_cop_key_mul_fifo_32x52_wrapper_overflow_flg_sta; + ZXIC_UINT32 mec0_cop_key_mul_fifo_32x52_wrapper_underflow_flg_sta; + ZXIC_UINT32 mec0_cop_key_random_mod_fifo_32x44_wrapper_overflow_flg_sta; + ZXIC_UINT32 mec0_cop_key_random_mod_fifo_32x44_wrapper_underflow_flg_sta; +} DPP_PPU_PPU_COPROCESSOR_FIFO_INTERRUPT_STA_L_T; + +typedef struct dpp_ppu_ppu_coprocessor_fifo_interrupt_sta_m_t { + ZXIC_UINT32 ppu_cop_result_fwft_fifo_80x80_wrapper_overflow_sta; + ZXIC_UINT32 ppu_cop_result_fwft_fifo_80x80_wrapper_underflow_sta; + ZXIC_UINT32 ppu_cop_delay_fifo_48x16_wrapper_overflow_sta; + ZXIC_UINT32 ppu_cop_delay_fifo_48x16_wrapper_underflow_sta; + ZXIC_UINT32 ppu_cop_delay_fifo_16x48_wrapper_overflow_sta; + ZXIC_UINT32 ppu_cop_delay_fifo_16x48_wrapper_underflow_sta; + ZXIC_UINT32 ppu_cop_delay_fifo_16x32_wrapper_overflow_sta; + ZXIC_UINT32 ppu_cop_delay_fifo_16x32_wrapper_underflow_sta; + ZXIC_UINT32 ppu_cop_result_fwft_fifo_96x80_wrapper_overflow_sta; + ZXIC_UINT32 ppu_cop_result_fwft_fifo_96x80_wrapper_underflow_sta; + ZXIC_UINT32 ppu_cop_delay_fifo_16x16_wrapper_overflow_sta; + ZXIC_UINT32 ppu_cop_delay_fifo_16x16_wrapper_underflow_sta; + ZXIC_UINT32 ppu_cop_result_fwft_fifo_32x80_wrapper_overflow_sta; + ZXIC_UINT32 ppu_cop_result_fwft_fifo_32x80_wrapper_underflow_sta; + ZXIC_UINT32 ppu_cop_result_fwft_fifo_16x80_wrapper_overflow_sta; + ZXIC_UINT32 ppu_cop_result_fwft_fifo_16x80_wrapper_underflow_sta; + ZXIC_UINT32 mec5_cop_key_crc_fifo_32x625_wrapper_overflow_flg_sta; + ZXIC_UINT32 mec5_cop_key_crc_fifo_32x625_wrapper_underflow_flg_sta; + ZXIC_UINT32 mec5_cop_key_checksum_fifo_32x180_wrapper_overflow_flg_sta; + ZXIC_UINT32 mec5_cop_key_checksum_fifo_32x180_wrapper_underflow_flg_sta; + ZXIC_UINT32 mec5_cop_key_mul_fifo_32x52_wrapper_overflow_flg_sta; + ZXIC_UINT32 mec5_cop_key_mul_fifo_32x52_wrapper_underflow_flg_sta; + ZXIC_UINT32 mec5_cop_key_random_mod_fifo_32x44_wrapper_overflow_flg_sta; + ZXIC_UINT32 mec5_cop_key_random_mod_fifo_32x44_wrapper_underflow_flg_sta; + ZXIC_UINT32 mec4_cop_key_crc_fifo_32x625_wrapper_overflow_flg_sta; + ZXIC_UINT32 mec4_cop_key_crc_fifo_32x625_wrapper_underflow_flg_sta; + ZXIC_UINT32 mec4_cop_key_checksum_fifo_32x180_wrapper_overflow_flg_sta; + ZXIC_UINT32 mec4_cop_key_checksum_fifo_32x180_wrapper_underflow_flg_sta; + ZXIC_UINT32 mec4_cop_key_mul_fifo_32x52_wrapper_overflow_flg_sta; + ZXIC_UINT32 mec4_cop_key_mul_fifo_32x52_wrapper_underflow_flg_sta; + ZXIC_UINT32 mec4_cop_key_random_mod_fifo_32x44_wrapper_overflow_flg_sta; + ZXIC_UINT32 mec4_cop_key_random_mod_fifo_32x44_wrapper_underflow_flg_sta; +} DPP_PPU_PPU_COPROCESSOR_FIFO_INTERRUPT_STA_M_T; + +typedef struct dpp_ppu_ppu_coprocessor_fifo_interrupt_sta_h_t { + ZXIC_UINT32 + ppu_cop_random_mod_para_delay_fifo_48x16_wrapper_overflow_sta; + ZXIC_UINT32 + ppu_cop_random_mod_para_delay_fifo_48x16_wrapper_underflow_sta; +} DPP_PPU_PPU_COPROCESSOR_FIFO_INTERRUPT_STA_H_T; + +typedef struct dpp_ppu_ppu_instrmem_fifo_interrupt_sta_t { + ZXIC_UINT32 instrmem1_wr_fifo_ovf_sta; + ZXIC_UINT32 instrmem1_wr_fifo_udf_sta; + ZXIC_UINT32 instrmem1_rd_fifo_ovf_sta; + ZXIC_UINT32 instrmem1_rd_fifo_udf_sta; + ZXIC_UINT32 instrmem0_wr_fifo_ovf_sta; + ZXIC_UINT32 instrmem0_wr_fifo_udf_sta; + ZXIC_UINT32 instrmem0_rd_fifo_ovf_sta; + ZXIC_UINT32 instrmem0_rd_fifo_udf_sta; +} DPP_PPU_PPU_INSTRMEM_FIFO_INTERRUPT_STA_T; + +typedef struct dpp_ppu_ppu_ppu_ram_check_ecc_err_flag_1_t { + ZXIC_UINT32 ecc_single_err_sa_para_fifo_int_flag; + ZXIC_UINT32 ecc_double_err_sa_para_fifo_int_flag; + ZXIC_UINT32 ecc_single_err_dup_para_fifo_int_flag; + ZXIC_UINT32 ecc_double_err_dup_para_fifo_int_flag; + ZXIC_UINT32 ecc_single_err_pf_rsp_fifo_int_flag; + ZXIC_UINT32 ecc_double_err_pf_rsp_fifo_int_flag; + ZXIC_UINT32 ecc_single_err_pf_req_fifo_int_flag; + ZXIC_UINT32 ecc_double_err_pf_req_fifo_int_flag; + ZXIC_UINT32 ecc_single_err_ppu_reorder_link_ram0_int_flag; + ZXIC_UINT32 ecc_double_err_ppu_reorder_link_ram0_int_flag; + ZXIC_UINT32 ecc_single_err_ppu_reorder_link_ram1_int_flag; + ZXIC_UINT32 ecc_double_err_ppu_reorder_link_ram1_int_flag; + ZXIC_UINT32 ecc_single_err_ppu_reorder_link_flag_array_ram0_int_flag; + ZXIC_UINT32 ecc_single_err_ppu_reorder_link_flag_array_ram1_int_flag; + ZXIC_UINT32 ecc_single_err_ppu_reorder_ifb_ram_int_flag; + ZXIC_UINT32 ecc_double_err_ppu_reorder_ifb_ram_int_flag; + ZXIC_UINT32 ecc_single_err_ppu_reorder_flag_array_ram0_int_flag; + ZXIC_UINT32 ecc_single_err_ppu_reorder_flag_array_ram1_int_flag; + ZXIC_UINT32 ecc_single_err_ppu_reorder_flag_ram0_int_flag; + ZXIC_UINT32 ecc_single_err_ppu_reorder_flag_ram1_int_flag; + ZXIC_UINT32 ecc_single_err_uc_mf_fifo_int_flag; + ZXIC_UINT32 ecc_double_err_uc_mf_fifo_int_flag; + ZXIC_UINT32 ecc_single_err_mc_mf_fifo_int_flag; + ZXIC_UINT32 ecc_double_err_mc_mf_fifo_int_flag; + ZXIC_UINT32 ecc_single_err_free_global_num_fifo_int_flag; + ZXIC_UINT32 ecc_double_err_free_global_num_fifo_int_flag; +} DPP_PPU_PPU_PPU_RAM_CHECK_ECC_ERR_FLAG_1_T; + +typedef struct dpp_ppu_ppu_isu_ppu_demux_fifo_interrupt_flag_t { + ZXIC_UINT32 isu_in_para_fwft_fifo_32x81_wrapper_u0_overflow_flag; + ZXIC_UINT32 isu_in_para_fwft_fifo_32x81_wrapper_u0_underflow_flag; + ZXIC_UINT32 isu_in_fifo_64x81_wrapper_u0_overflow_flag; + ZXIC_UINT32 isu_in_fifo_64x81_wrapper_u0_underflow_flag; +} DPP_PPU_PPU_ISU_PPU_DEMUX_FIFO_INTERRUPT_FLAG_T; + +typedef struct dpp_ppu_ppu_ppu_multicast_fifo_interrupt_flag_t { + ZXIC_UINT32 ppu_pktrx_mc_ptr_fifo_16384x17_wrapper_u0_overflow_flag; + ZXIC_UINT32 ppu_pktrx_mc_ptr_fifo_16384x17_wrapper_u0_underflow_flag; + ZXIC_UINT32 pf_req_fwft_fifo_16x36_wrapper_u0_overflow_flag; + ZXIC_UINT32 pf_req_fwft_fifo_16x36_wrapper_u0_underflow_flag; + ZXIC_UINT32 pf_rsp_fwft_fifo_32x34_wrapper_u0_overflow_flag; + ZXIC_UINT32 pf_rsp_fwft_fifo_32x34_wrapper_u0_underflow_flag; + ZXIC_UINT32 dup_para_fwft_fifo_16x35_wrapper_u0_overflow_flag; + ZXIC_UINT32 dup_para_fwft_fifo_16x35_wrapper_u0_underflow_flag; + ZXIC_UINT32 se_mc_rsp_fwft_fifo_32x17_wrapper_u0_overflow_flag; + ZXIC_UINT32 se_mc_rsp_fwft_fifo_32x17_wrapper_u0_underflow_flag; + ZXIC_UINT32 sa_para_fwft_fifo_64x17_wrapper_u0_overflow_flag; + ZXIC_UINT32 sa_para_fwft_fifo_64x17_wrapper_u0_underflow_flag; + ZXIC_UINT32 group_id_fifo_64x16_wrapper_u0_overflow_flag; + ZXIC_UINT32 group_id_fifo_64x16_wrapper_u0_underflow_flag; + ZXIC_UINT32 isu_mc_para_fwft_fifo_128x34_wrapper_u0_overflow_flag; + ZXIC_UINT32 isu_mc_para_fwft_fifo_128x34_wrapper_u0_underflow_flag; + ZXIC_UINT32 dup_freeptr_fwft_fifo_128x7_wrapper_u0_overflow_flag; + ZXIC_UINT32 dup_freeptr_fwft_fifo_128x7_wrapper_u0_underflow_flag; + ZXIC_UINT32 car_flag_fifo_32x1_wrapper_overflow_flag; + ZXIC_UINT32 car_flag_fifo_32x1_wrapper_underflow_flag; +} DPP_PPU_PPU_PPU_MULTICAST_FIFO_INTERRUPT_FLAG_T; + +typedef struct dpp_ppu_ppu_ppu_in_schedule_fifo_interrupt_flag_t { + ZXIC_UINT32 free_global_num_fwft_fifo_8192x13_wrapper_u0_overflow_flag; + ZXIC_UINT32 free_global_num_fwft_fifo_8192x13_wrapper_u0_underflow_flag; + ZXIC_UINT32 mc_mf_fifo_16x2048_wrapper_u0_overflow_flag; + ZXIC_UINT32 mc_mf_fifo_16x2048_wrapper_u0_underflow_flag; + ZXIC_UINT32 uc_mf_fifo_96x2048_wrapper_u0_overflow_flag; + ZXIC_UINT32 uc_mf_fifo_96x2048_wrapper_u0_underflow_flag; +} DPP_PPU_PPU_PPU_IN_SCHEDULE_FIFO_INTERRUPT_FLAG_T; + +typedef struct dpp_ppu_ppu_ppu_mf_out_fifo_interrupt_flag_t { + ZXIC_UINT32 ppu_cluster5_mf_out_afifo_32x2048_wrapper_overflow_flag; + ZXIC_UINT32 ppu_cluster5_mf_out_afifo_32x2048_wrapper_underflow_flag; + ZXIC_UINT32 ppu_cluster4_mf_out_afifo_32x2048_wrapper_overflow_flag; + ZXIC_UINT32 ppu_cluster4_mf_out_afifo_32x2048_wrapper_underflow_flag; + ZXIC_UINT32 ppu_cluster3_mf_out_afifo_32x2048_wrapper_overflow_flag; + ZXIC_UINT32 ppu_cluster3_mf_out_afifo_32x2048_wrapper_underflow_flag; + ZXIC_UINT32 ppu_cluster2_mf_out_afifo_32x2048_wrapper_overflow_flag; + ZXIC_UINT32 ppu_cluster2_mf_out_afifo_32x2048_wrapper_underflow_flag; + ZXIC_UINT32 ppu_cluster1_mf_out_afifo_32x2048_wrapper_overflow_flag; + ZXIC_UINT32 ppu_cluster1_mf_out_afifo_32x2048_wrapper_underflow_flag; + ZXIC_UINT32 ppu_cluster0_mf_out_afifo_32x2048_wrapper_overflow_flag; + ZXIC_UINT32 ppu_cluster0_mf_out_afifo_32x2048_wrapper_underflow_flag; +} DPP_PPU_PPU_PPU_MF_OUT_FIFO_INTERRUPT_FLAG_T; + +typedef struct dpp_ppu_ppu_pbu_mcode_pf_req_schedule_fifo_interrupt_flag_t { + ZXIC_UINT32 + ppu_cluster5_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_flag; + ZXIC_UINT32 + ppu_cluster4_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_flag; + ZXIC_UINT32 + ppu_cluster3_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_flag; + ZXIC_UINT32 + ppu_cluster2_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_flag; + ZXIC_UINT32 + ppu_cluster1_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_flag; + ZXIC_UINT32 + ppu_cluster0_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_flag; + ZXIC_UINT32 + ppu_cluster5_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_flag; + ZXIC_UINT32 + ppu_cluster4_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_flag; + ZXIC_UINT32 + ppu_cluster3_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_flag; + ZXIC_UINT32 + ppu_cluster2_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_flag; + ZXIC_UINT32 + ppu_cluster1_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_flag; + ZXIC_UINT32 + ppu_cluster0_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_flag; +} DPP_PPU_PPU_PBU_MCODE_PF_REQ_SCHEDULE_FIFO_INTERRUPT_FLAG_T; + +typedef struct dpp_ppu_ppu_pbu_mcode_pf_rsp_schedule_fifo_interrupt_flag_t { + ZXIC_UINT32 ppu_pbu_mcode_pf_rsp_afifo_64x16_wrapper_u0r_underflow_flag; + ZXIC_UINT32 ppu_pbu_mcode_pf_rsp_afifo_64x16_wrapper_u0_overflow_flag; +} DPP_PPU_PPU_PBU_MCODE_PF_RSP_SCHEDULE_FIFO_INTERRUPT_FLAG_T; + +typedef struct dpp_ppu_ppu_ppu_mccnt_fifo_interrupt_flag_t { + ZXIC_UINT32 ppu_mccnt_fifo_32x15_wrapper_u0_overflow_flag; + ZXIC_UINT32 ppu_mccnt_fifo_32x15_wrapper_u0_underflow_flag; + ZXIC_UINT32 ppu_wb_data_fifo_32x2048_wrapper_u0_overflow_flag; + ZXIC_UINT32 ppu_wb_data_fifo_32x2048_wrapper_u0_underflow_flag; + ZXIC_UINT32 mccnt_rsp_fifo_32x1_wrapper_u0_overflow_flag; + ZXIC_UINT32 mccnt_rsp_fifo_32x1_wrapper_u0_underflow_flag; +} DPP_PPU_PPU_PPU_MCCNT_FIFO_INTERRUPT_FLAG_T; + +typedef struct dpp_ppu_ppu_coprocessor_fifo_interrupt_flag_l_t { + ZXIC_UINT32 mec3_cop_key_crc_fifo_32x625_wrapper_overflow_flag; + ZXIC_UINT32 mec3_cop_key_crc_fifo_32x625_wrapper_underflow_flag; + ZXIC_UINT32 mec3_cop_key_checksum_fifo_32x180_wrapper_overflow_flag; + ZXIC_UINT32 mec3_cop_key_checksum_fifo_32x180_wrapper_underflow_flag; + ZXIC_UINT32 mec3_cop_key_mul_fifo_32x52_wrapper_overflow_flag; + ZXIC_UINT32 mec3_cop_key_mul_fifo_32x52_wrapper_underflow_flag; + ZXIC_UINT32 mec3_cop_key_random_mod_fifo_32x44_wrapper_overflow_flag; + ZXIC_UINT32 mec3_cop_key_random_mod_fifo_32x44_wrapper_underflow_flag; + ZXIC_UINT32 mec2_cop_key_crc_fifo_32x625_wrapper_overflow_flag; + ZXIC_UINT32 mec2_cop_key_crc_fifo_32x625_wrapper_underflow_flag; + ZXIC_UINT32 mec2_cop_key_checksum_fifo_32x180_wrapper_overflow_flag; + ZXIC_UINT32 mec2_cop_key_checksum_fifo_32x180_wrapper_underflow_flag; + ZXIC_UINT32 mec2_cop_key_mul_fifo_32x52_wrapper_overflow_flag; + ZXIC_UINT32 mec2_cop_key_mul_fifo_32x52_wrapper_underflow_flag; + ZXIC_UINT32 mec2_cop_key_random_mod_fifo_32x44_wrapper_overflow_flag; + ZXIC_UINT32 mec2_cop_key_random_mod_fifo_32x44_wrapper_underflow_flag; + ZXIC_UINT32 mec1_cop_key_crc_fifo_32x625_wrapper_overflow_flag; + ZXIC_UINT32 mec1_cop_key_crc_fifo_32x625_wrapper_underflow_flag; + ZXIC_UINT32 mec1_cop_key_checksum_fifo_32x180_wrapper_overflow_flag; + ZXIC_UINT32 mec1_cop_key_checksum_fifo_32x180_wrapper_underflow_flag; + ZXIC_UINT32 mec1_cop_key_mul_fifo_32x52_wrapper_overflow_flag; + ZXIC_UINT32 mec1_cop_key_mul_fifo_32x52_wrapper_underflow_flag; + ZXIC_UINT32 mec1_cop_key_random_mod_fifo_32x44_wrapper_overflow_flag; + ZXIC_UINT32 mec1_cop_key_random_mod_fifo_32x44_wrapper_underflow_flag; + ZXIC_UINT32 mec0_cop_key_crc_fifo_32x625_wrapper_overflow_flag; + ZXIC_UINT32 mec0_cop_key_crc_fifo_32x625_wrapper_underflow_flag; + ZXIC_UINT32 mec0_cop_key_checksum_fifo_32x180_wrapper_overflow_flag; + ZXIC_UINT32 mec0_cop_key_checksum_fifo_32x180_wrapper_underflow_flag; + ZXIC_UINT32 mec0_cop_key_mul_fifo_32x52_wrapper_overflow_flag; + ZXIC_UINT32 mec0_cop_key_mul_fifo_32x52_wrapper_underflow_flag; + ZXIC_UINT32 mec0_cop_key_random_mod_fifo_32x44_wrapper_overflow_flag; + ZXIC_UINT32 mec0_cop_key_random_mod_fifo_32x44_wrapper_underflow_flag; +} DPP_PPU_PPU_COPROCESSOR_FIFO_INTERRUPT_FLAG_L_T; + +typedef struct dpp_ppu_ppu_coprocessor_fifo_interrupt_flag_m_t { + ZXIC_UINT32 ppu_cop_result_fwft_fifo_80x80_wrapper_overflow_flag; + ZXIC_UINT32 ppu_cop_result_fwft_fifo_80x80_wrapper_underflow_flag; + ZXIC_UINT32 ppu_cop_delay_fifo_48x16_wrapper_overflow_flag; + ZXIC_UINT32 ppu_cop_delay_fifo_48x16_wrapper_underflow_flag; + ZXIC_UINT32 ppu_cop_delay_fifo_16x48_wrapper_overflow_flag; + ZXIC_UINT32 ppu_cop_delay_fifo_16x48_wrapper_underflow_flag; + ZXIC_UINT32 ppu_cop_delay_fifo_16x32_wrapper_overflow_flag; + ZXIC_UINT32 ppu_cop_delay_fifo_16x32_wrapper_underflow_flag; + ZXIC_UINT32 ppu_cop_result_fwft_fifo_96x80_wrapper_overflow_flag; + ZXIC_UINT32 ppu_cop_result_fwft_fifo_96x80_wrapper_underflow_flag; + ZXIC_UINT32 ppu_cop_delay_fifo_16x16_wrapper_overflow_flag; + ZXIC_UINT32 ppu_cop_delay_fifo_16x16_wrapper_underflow_flag; + ZXIC_UINT32 ppu_cop_result_fwft_fifo_32x80_wrapper_overflow_flag; + ZXIC_UINT32 ppu_cop_result_fwft_fifo_32x80_wrapper_underflow_flag; + ZXIC_UINT32 ppu_cop_result_fwft_fifo_16x80_wrapper_overflow_flag; + ZXIC_UINT32 ppu_cop_result_fwft_fifo_16x80_wrapper_underflow_flag; + ZXIC_UINT32 mec5_cop_key_crc_fifo_32x625_wrapper_overflow_flag; + ZXIC_UINT32 mec5_cop_key_crc_fifo_32x625_wrapper_underflow_flag; + ZXIC_UINT32 mec5_cop_key_checksum_fifo_32x180_wrapper_overflow_flag; + ZXIC_UINT32 mec5_cop_key_checksum_fifo_32x180_wrapper_underflow_flag; + ZXIC_UINT32 mec5_cop_key_mul_fifo_32x52_wrapper_overflow_flag; + ZXIC_UINT32 mec5_cop_key_mul_fifo_32x52_wrapper_underflow_flag; + ZXIC_UINT32 mec5_cop_key_random_mod_fifo_32x44_wrapper_overflow_flag; + ZXIC_UINT32 mec5_cop_key_random_mod_fifo_32x44_wrapper_underflow_flag; + ZXIC_UINT32 mec4_cop_key_crc_fifo_32x625_wrapper_overflow_flag; + ZXIC_UINT32 mec4_cop_key_crc_fifo_32x625_wrapper_underflow_flag; + ZXIC_UINT32 mec4_cop_key_checksum_fifo_32x180_wrapper_overflow_flag; + ZXIC_UINT32 mec4_cop_key_checksum_fifo_32x180_wrapper_underflow_flag; + ZXIC_UINT32 mec4_cop_key_mul_fifo_32x52_wrapper_overflow_flag; + ZXIC_UINT32 mec4_cop_key_mul_fifo_32x52_wrapper_underflow_flag; + ZXIC_UINT32 mec4_cop_key_random_mod_fifo_32x44_wrapper_overflow_flag; + ZXIC_UINT32 mec4_cop_key_random_mod_fifo_32x44_wrapper_underflow_flag; +} DPP_PPU_PPU_COPROCESSOR_FIFO_INTERRUPT_FLAG_M_T; + +typedef struct dpp_ppu_ppu_coprocessor_fifo_interrupt_flag_h_t { + ZXIC_UINT32 + ppu_cop_random_mod_para_delay_fifo_48x16_wrapper_overflow_flag; + ZXIC_UINT32 + ppu_cop_random_mod_para_delay_fifo_48x16_wrapper_underflow_flag; +} DPP_PPU_PPU_COPROCESSOR_FIFO_INTERRUPT_FLAG_H_T; + +typedef struct dpp_ppu_ppu_instrmem_fifo_interrupt_flag_t { + ZXIC_UINT32 instrmem2_wr_fifo_ovf_flag; + ZXIC_UINT32 instrmem2_wr_fifo_udf_flag; + ZXIC_UINT32 instrmem2_rd_fifo_ovf_flag; + ZXIC_UINT32 instrmem2_rd_fifo_udf_flag; + ZXIC_UINT32 instrmem1_wr_fifo_ovf_flag; + ZXIC_UINT32 instrmem1_wr_fifo_udf_flag; + ZXIC_UINT32 instrmem1_rd_fifo_ovf_flag; + ZXIC_UINT32 instrmem1_rd_fifo_udf_flag; + ZXIC_UINT32 instrmem0_wr_fifo_ovf_flag; + ZXIC_UINT32 instrmem0_wr_fifo_udf_flag; + ZXIC_UINT32 instrmem0_rd_fifo_ovf_flag; + ZXIC_UINT32 instrmem0_rd_fifo_udf_flag; +} DPP_PPU_PPU_INSTRMEM_FIFO_INTERRUPT_FLAG_T; + +typedef struct dpp_ppu_ppu_instrmem_ram_int_out_t { + ZXIC_UINT32 instrmem2_bank3_ram_parity_err_int_out; + ZXIC_UINT32 instrmem2_bank2_ram_parity_err_int_out; + ZXIC_UINT32 instrmem2_bank1_ram_parity_err_int_out; + ZXIC_UINT32 instrmem2_bank0_ram_parity_err_int_out; + ZXIC_UINT32 instrmem1_bank3_ram_parity_err_int_out; + ZXIC_UINT32 instrmem1_bank2_ram_parity_err_int_out; + ZXIC_UINT32 instrmem1_bank1_ram_parity_err_int_out; + ZXIC_UINT32 instrmem1_bank0_ram_parity_err_int_out; + ZXIC_UINT32 instrmem0_bank3_ram_parity_err_int_out; + ZXIC_UINT32 instrmem0_bank2_ram_parity_err_int_out; + ZXIC_UINT32 instrmem0_bank1_ram_parity_err_int_out; + ZXIC_UINT32 instrmem0_bank0_ram_parity_err_int_out; +} DPP_PPU_PPU_INSTRMEM_RAM_INT_OUT_T; + +typedef struct dpp_ppu_ppu_instrmem_ram_int_mask_t { + ZXIC_UINT32 instrmem2_bank3_ram_parity_err_mask; + ZXIC_UINT32 instrmem2_bank2_ram_parity_err_mask; + ZXIC_UINT32 instrmem2_bank1_ram_parity_err_mask; + ZXIC_UINT32 instrmem2_bank0_ram_parity_err_mask; + ZXIC_UINT32 instrmem1_bank3_ram_parity_err_mask; + ZXIC_UINT32 instrmem1_bank2_ram_parity_err_mask; + ZXIC_UINT32 instrmem1_bank1_ram_parity_err_mask; + ZXIC_UINT32 instrmem1_bank0_ram_parity_err_mask; + ZXIC_UINT32 instrmem0_bank3_ram_parity_err_mask; + ZXIC_UINT32 instrmem0_bank2_ram_parity_err_mask; + ZXIC_UINT32 instrmem0_bank1_ram_parity_err_mask; + ZXIC_UINT32 instrmem0_bank0_ram_parity_err_mask; +} DPP_PPU_PPU_INSTRMEM_RAM_INT_MASK_T; + +typedef struct dpp_ppu_ppu_instrmem_ram_int_stat_t { + ZXIC_UINT32 instrmem2_bank3_ram_parity_errstat; + ZXIC_UINT32 instrmem2_bank2_ram_parity_errstat; + ZXIC_UINT32 instrmem2_bank1_ram_parity_errstat; + ZXIC_UINT32 instrmem2_bank0_ram_parity_errstat; + ZXIC_UINT32 instrmem1_bank3_ram_parity_errstat; + ZXIC_UINT32 instrmem1_bank2_ram_parity_errstat; + ZXIC_UINT32 instrmem1_bank1_ram_parity_errstat; + ZXIC_UINT32 instrmem1_bank0_ram_parity_errstat; + ZXIC_UINT32 instrmem0_bank3_ram_parity_errstat; + ZXIC_UINT32 instrmem0_bank2_ram_parity_errstat; + ZXIC_UINT32 instrmem0_bank1_ram_parity_errstat; + ZXIC_UINT32 instrmem0_bank0_ram_parity_errstat; +} DPP_PPU_PPU_INSTRMEM_RAM_INT_STAT_T; + +typedef struct dpp_ppu_ppu_instrmem_ram_int_flag_t { + ZXIC_UINT32 instrmem2_bank3_ram_parity_err_flag; + ZXIC_UINT32 instrmem2_bank2_ram_parity_err_flag; + ZXIC_UINT32 instrmem2_bank1_ram_parity_err_flag; + ZXIC_UINT32 instrmem2_bank0_ram_parity_err_flag; + ZXIC_UINT32 instrmem1_bank3_ram_parity_err_flag; + ZXIC_UINT32 instrmem1_bank2_ram_parity_err_flag; + ZXIC_UINT32 instrmem1_bank1_ram_parity_err_flag; + ZXIC_UINT32 instrmem1_bank0_ram_parity_err_flag; + ZXIC_UINT32 instrmem0_bank3_ram_parity_err_flag; + ZXIC_UINT32 instrmem0_bank2_ram_parity_err_flag; + ZXIC_UINT32 instrmem0_bank1_ram_parity_err_flag; + ZXIC_UINT32 instrmem0_bank0_ram_parity_err_flag; +} DPP_PPU_PPU_INSTRMEM_RAM_INT_FLAG_T; + +typedef struct dpp_ppu_ppu_ppu_count_cfg_t { + ZXIC_UINT32 ppu_count_overflow_mode; + ZXIC_UINT32 ppu_count_rd_mode; +} DPP_PPU_PPU_PPU_COUNT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_statics_cfg_t { + ZXIC_UINT32 csr_statics_mc_type; + ZXIC_UINT32 csr_statics_bufnum; + ZXIC_UINT32 csr_statics_portnum1; + ZXIC_UINT32 csr_statics_portnum0; +} DPP_PPU_PPU_PPU_STATICS_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_statics_wb_cfg_t { + ZXIC_UINT32 csr_statics_wb_halt_send_type; + ZXIC_UINT32 csr_statics_wb_mf_type; + ZXIC_UINT32 csr_statics_wb_halt_continue_end; + ZXIC_UINT32 csr_statics_wb_dup_flag; + ZXIC_UINT32 csr_statics_wb_last_flag; + ZXIC_UINT32 csr_statics_wb_dis_flag; +} DPP_PPU_PPU_PPU_STATICS_WB_CFG_T; + +typedef struct dpp_ppu_ppu_wr_table_self_rsp_en_cfg_t { + ZXIC_UINT32 wr_table_self_rsp_en_cfg; +} DPP_PPU_PPU_WR_TABLE_SELF_RSP_EN_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_random_arbiter_8to1_cfg_t { + ZXIC_UINT32 ppu_random_arbiter_8to1_cfg; +} DPP_PPU_PPU_PPU_RANDOM_ARBITER_8TO1_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_reorder_bypass_flow_num_cfg_t { + ZXIC_UINT32 ppu_reorder_bypass_flow_num_cfg; +} DPP_PPU_PPU_PPU_REORDER_BYPASS_FLOW_NUM_CFG_T; + +typedef struct dpp_ppu_ppu_cos_meter_cfg_h_t { + ZXIC_UINT32 cbs; + ZXIC_UINT32 pbs; + ZXIC_UINT32 green_action; + ZXIC_UINT32 yellow_action; + ZXIC_UINT32 red_action; +} DPP_PPU_PPU_COS_METER_CFG_H_T; + +typedef struct dpp_ppu_ppu_cos_meter_cfg_l_t { + ZXIC_UINT32 cir; + ZXIC_UINT32 pir; + ZXIC_UINT32 car_en; +} DPP_PPU_PPU_COS_METER_CFG_L_T; + +typedef struct dpp_ppu_ppu_instrmem_rdy_t { + ZXIC_UINT32 instrmem_rdy; +} DPP_PPU_PPU_INSTRMEM_RDY_T; + +typedef struct dpp_ppu_ppu_instrmem_addr_t { + ZXIC_UINT32 instrmem_operate; + ZXIC_UINT32 instrmem_addr; +} DPP_PPU_PPU_INSTRMEM_ADDR_T; + +typedef struct dpp_ppu_ppu_instrmem_ind_access_done_t { + ZXIC_UINT32 instrmem_ind_access_done; +} DPP_PPU_PPU_INSTRMEM_IND_ACCESS_DONE_T; + +typedef struct dpp_ppu_ppu_instrmem_instr0_data_l_t { + ZXIC_UINT32 instrmem_instr0_data_l; +} DPP_PPU_PPU_INSTRMEM_INSTR0_DATA_L_T; + +typedef struct dpp_ppu_ppu_instrmem_instr0_data_h_t { + ZXIC_UINT32 instrmem_instr0_data_h; +} DPP_PPU_PPU_INSTRMEM_INSTR0_DATA_H_T; + +typedef struct dpp_ppu_ppu_instrmem_instr1_data_l_t { + ZXIC_UINT32 instrmem_instr1_data_l; +} DPP_PPU_PPU_INSTRMEM_INSTR1_DATA_L_T; + +typedef struct dpp_ppu_ppu_instrmem_instr1_data_h_t { + ZXIC_UINT32 instrmem_instr1_data_h; +} DPP_PPU_PPU_INSTRMEM_INSTR1_DATA_H_T; + +typedef struct dpp_ppu_ppu_instrmem_instr2_data_l_t { + ZXIC_UINT32 instrmem_instr2_data_l; +} DPP_PPU_PPU_INSTRMEM_INSTR2_DATA_L_T; + +typedef struct dpp_ppu_ppu_instrmem_instr2_data_h_t { + ZXIC_UINT32 instrmem_instr2_data_h; +} DPP_PPU_PPU_INSTRMEM_INSTR2_DATA_H_T; + +typedef struct dpp_ppu_ppu_instrmem_instr3_data_l_t { + ZXIC_UINT32 instrmem_instr3_data_l; +} DPP_PPU_PPU_INSTRMEM_INSTR3_DATA_L_T; + +typedef struct dpp_ppu_ppu_instrmem_instr3_data_h_t { + ZXIC_UINT32 instrmem_instr3_data_h; +} DPP_PPU_PPU_INSTRMEM_INSTR3_DATA_H_T; + +typedef struct dpp_ppu_ppu_instrmem_read_instr0_data_l_t { + ZXIC_UINT32 instrmem_read_instr0_data_l; +} DPP_PPU_PPU_INSTRMEM_READ_INSTR0_DATA_L_T; + +typedef struct dpp_ppu_ppu_instrmem_read_instr0_data_h_t { + ZXIC_UINT32 instrmem_read_instr0_data_h; +} DPP_PPU_PPU_INSTRMEM_READ_INSTR0_DATA_H_T; + +typedef struct dpp_ppu_ppu_instrmem_read_instr1_data_l_t { + ZXIC_UINT32 instrmem_read_instr1_data_l; +} DPP_PPU_PPU_INSTRMEM_READ_INSTR1_DATA_L_T; + +typedef struct dpp_ppu_ppu_instrmem_read_instr1_data_h_t { + ZXIC_UINT32 instrmem_read_instr1_data_h; +} DPP_PPU_PPU_INSTRMEM_READ_INSTR1_DATA_H_T; + +typedef struct dpp_ppu_ppu_instrmem_read_instr2_data_l_t { + ZXIC_UINT32 instrmem_read_instr2_data_l; +} DPP_PPU_PPU_INSTRMEM_READ_INSTR2_DATA_L_T; + +typedef struct dpp_ppu_ppu_instrmem_read_instr2_data_h_t { + ZXIC_UINT32 instrmem_read_instr2_data_h; +} DPP_PPU_PPU_INSTRMEM_READ_INSTR2_DATA_H_T; + +typedef struct dpp_ppu_ppu_instrmem_read_instr3_data_l_t { + ZXIC_UINT32 instrmem_read_instr3_data_l; +} DPP_PPU_PPU_INSTRMEM_READ_INSTR3_DATA_L_T; + +typedef struct dpp_ppu_ppu_instrmem_read_instr3_data_h_t { + ZXIC_UINT32 instrmem_read_instr3_data_h; +} DPP_PPU_PPU_INSTRMEM_READ_INSTR3_DATA_H_T; + +typedef struct dpp_ppu_ppu_se_ppu_mc_srh_fc_cnt_h_t { + ZXIC_UINT32 se_ppu_mc_srh_fc_cnt_h; +} DPP_PPU_PPU_SE_PPU_MC_SRH_FC_CNT_H_T; + +typedef struct dpp_ppu_ppu_se_ppu_mc_srh_fc_cnt_l_t { + ZXIC_UINT32 se_ppu_mc_srh_fc_cnt_l; +} DPP_PPU_PPU_SE_PPU_MC_SRH_FC_CNT_L_T; + +typedef struct dpp_ppu_ppu_ppu_se_mc_srh_fc_cnt_h_t { + ZXIC_UINT32 ppu_se_mc_srh_fc_cnt_h; +} DPP_PPU_PPU_PPU_SE_MC_SRH_FC_CNT_H_T; + +typedef struct dpp_ppu_ppu_ppu_se_mc_srh_fc_cnt_l_t { + ZXIC_UINT32 ppu_se_mc_srh_fc_cnt_l; +} DPP_PPU_PPU_PPU_SE_MC_SRH_FC_CNT_L_T; + +typedef struct dpp_ppu_ppu_ppu_se_mc_srh_vld_cnt_h_t { + ZXIC_UINT32 ppu_se_mc_srh_vld_cnt_h; +} DPP_PPU_PPU_PPU_SE_MC_SRH_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_ppu_se_mc_srh_vld_cnt_l_t { + ZXIC_UINT32 ppu_se_mc_srh_vld_cnt_l; +} DPP_PPU_PPU_PPU_SE_MC_SRH_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_se_ppu_mc_srh_vld_cnt_h_t { + ZXIC_UINT32 se_ppu_mc_srh_vld_cnt_h; +} DPP_PPU_PPU_SE_PPU_MC_SRH_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_se_ppu_mc_srh_vld_cnt_l_t { + ZXIC_UINT32 se_ppu_mc_srh_vld_cnt_l; +} DPP_PPU_PPU_SE_PPU_MC_SRH_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_pbu_ppu_logic_pf_fc_cnt_h_t { + ZXIC_UINT32 pbu_ppu_logic_pf_fc_cnt_h; +} DPP_PPU_PPU_PBU_PPU_LOGIC_PF_FC_CNT_H_T; + +typedef struct dpp_ppu_ppu_pbu_ppu_logic_pf_fc_cnt_l_t { + ZXIC_UINT32 pbu_ppu_logic_pf_fc_cnt_l; +} DPP_PPU_PPU_PBU_PPU_LOGIC_PF_FC_CNT_L_T; + +typedef struct dpp_ppu_ppu_ppu_pbu_logic_rsp_fc_cnt_h_t { + ZXIC_UINT32 ppu_pbu_logic_rsp_fc_cnt_h; +} DPP_PPU_PPU_PPU_PBU_LOGIC_RSP_FC_CNT_H_T; + +typedef struct dpp_ppu_ppu_ppu_pbu_logic_rsp_fc_cnt_l_t { + ZXIC_UINT32 ppu_pbu_logic_rsp_fc_cnt_l; +} DPP_PPU_PPU_PPU_PBU_LOGIC_RSP_FC_CNT_L_T; + +typedef struct dpp_ppu_ppu_ppu_pbu_logic_pf_req_vld_cnt_h_t { + ZXIC_UINT32 ppu_pbu_logic_pf_req_vld_cnt_h; +} DPP_PPU_PPU_PPU_PBU_LOGIC_PF_REQ_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_ppu_pbu_logic_pf_req_vld_cnt_l_t { + ZXIC_UINT32 ppu_pbu_logic_pf_req_vld_cnt_l; +} DPP_PPU_PPU_PPU_PBU_LOGIC_PF_REQ_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_pbu_ppu_logic_pf_rsp_vld_cnt_h_t { + ZXIC_UINT32 pbu_ppu_logic_pf_rsp_vld_cnt_h; +} DPP_PPU_PPU_PBU_PPU_LOGIC_PF_RSP_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_pbu_ppu_logic_pf_rsp_vld_cnt_l_t { + ZXIC_UINT32 pbu_ppu_logic_pf_rsp_vld_cnt_l; +} DPP_PPU_PPU_PBU_PPU_LOGIC_PF_RSP_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_pbu_ppu_ifb_rd_fc_cnt_h_t { + ZXIC_UINT32 pbu_ppu_ifb_rd_fc_cnt_h; +} DPP_PPU_PPU_PBU_PPU_IFB_RD_FC_CNT_H_T; + +typedef struct dpp_ppu_ppu_pbu_ppu_ifb_rd_fc_cnt_l_t { + ZXIC_UINT32 pbu_ppu_ifb_rd_fc_cnt_l; +} DPP_PPU_PPU_PBU_PPU_IFB_RD_FC_CNT_L_T; + +typedef struct dpp_ppu_ppu_pbu_ppu_wb_fc_cnt_h_t { + ZXIC_UINT32 pbu_ppu_wb_fc_cnt_h; +} DPP_PPU_PPU_PBU_PPU_WB_FC_CNT_H_T; + +typedef struct dpp_ppu_ppu_pbu_ppu_wb_fc_cnt_l_t { + ZXIC_UINT32 pbu_ppu_wb_fc_cnt_l; +} DPP_PPU_PPU_PBU_PPU_WB_FC_CNT_L_T; + +typedef struct dpp_ppu_ppu_ppu_pbu_mcode_pf_req_vld_cnt_h_t { + ZXIC_UINT32 ppu_pbu_mcode_pf_req_vld_cnt_h; +} DPP_PPU_PPU_PPU_PBU_MCODE_PF_REQ_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_ppu_pbu_mcode_pf_req_vld_cnt_l_t { + ZXIC_UINT32 ppu_pbu_mcode_pf_req_vld_cnt_l; +} DPP_PPU_PPU_PPU_PBU_MCODE_PF_REQ_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_pbu_ppu_mcode_pf_rsp_vld_cnt_h_t { + ZXIC_UINT32 pbu_ppu_mcode_pf_rsp_vld_cnt_h; +} DPP_PPU_PPU_PBU_PPU_MCODE_PF_RSP_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_pbu_ppu_mcode_pf_rsp_vld_cnt_l_t { + ZXIC_UINT32 pbu_ppu_mcode_pf_rsp_vld_cnt_l; +} DPP_PPU_PPU_PBU_PPU_MCODE_PF_RSP_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_odma_ppu_para_fc_cnt_h_t { + ZXIC_UINT32 odma_ppu_para_fc_cnt_h; +} DPP_PPU_PPU_ODMA_PPU_PARA_FC_CNT_H_T; + +typedef struct dpp_ppu_ppu_odma_ppu_para_fc_cnt_l_t { + ZXIC_UINT32 odma_ppu_para_fc_cnt_l; +} DPP_PPU_PPU_ODMA_PPU_PARA_FC_CNT_L_T; + +typedef struct dpp_ppu_ppu_odma_ppu_mccnt_wr_fc_cnt_h_t { + ZXIC_UINT32 odma_ppu_mccnt_wr_fc_cnt_h; +} DPP_PPU_PPU_ODMA_PPU_MCCNT_WR_FC_CNT_H_T; + +typedef struct dpp_ppu_ppu_odma_ppu_mccnt_wr_fc_cnt_l_t { + ZXIC_UINT32 odma_ppu_mccnt_wr_fc_cnt_l; +} DPP_PPU_PPU_ODMA_PPU_MCCNT_WR_FC_CNT_L_T; + +typedef struct dpp_ppu_ppu_ppu_odma_mccnt_wr_vld_cnt_h_t { + ZXIC_UINT32 ppu_odma_mccnt_wr_vld_cnt_h; +} DPP_PPU_PPU_PPU_ODMA_MCCNT_WR_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_ppu_odma_mccnt_wr_vld_cnt_l_t { + ZXIC_UINT32 ppu_odma_mccnt_wr_vld_cnt_l; +} DPP_PPU_PPU_PPU_ODMA_MCCNT_WR_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_odma_ppu_mccnt_rsp_vld_cnt_h_t { + ZXIC_UINT32 odma_ppu_mccnt_rsp_vld_cnt_h; +} DPP_PPU_PPU_ODMA_PPU_MCCNT_RSP_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_odma_ppu_mccnt_rsp_vld_cnt_l_t { + ZXIC_UINT32 odma_ppu_mccnt_rsp_vld_cnt_l; +} DPP_PPU_PPU_ODMA_PPU_MCCNT_RSP_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_ppu_pktrx_uc_fc_cnt_h_t { + ZXIC_UINT32 ppu_pktrx_uc_fc_cnt_h; +} DPP_PPU_PPU_PPU_PKTRX_UC_FC_CNT_H_T; + +typedef struct dpp_ppu_ppu_ppu_pktrx_uc_fc_cnt_l_t { + ZXIC_UINT32 ppu_pktrx_uc_fc_cnt_l; +} DPP_PPU_PPU_PPU_PKTRX_UC_FC_CNT_L_T; + +typedef struct dpp_ppu_ppu_ppu_pktrx_mc_fc_cnt_h_t { + ZXIC_UINT32 ppu_pktrx_mc_fc_cnt_h; +} DPP_PPU_PPU_PPU_PKTRX_MC_FC_CNT_H_T; + +typedef struct dpp_ppu_ppu_ppu_pktrx_mc_fc_cnt_l_t { + ZXIC_UINT32 ppu_pktrx_mc_fc_cnt_l; +} DPP_PPU_PPU_PPU_PKTRX_MC_FC_CNT_L_T; + +typedef struct dpp_ppu_ppu_pktrx_ppu_desc_vld_cnt_h_t { + ZXIC_UINT32 pktrx_ppu_desc_vld_cnt_h; +} DPP_PPU_PPU_PKTRX_PPU_DESC_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_pktrx_ppu_desc_vld_cnt_l_t { + ZXIC_UINT32 pktrx_ppu_desc_vld_cnt_l; +} DPP_PPU_PPU_PKTRX_PPU_DESC_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_ppu_pbu_ifb_req_vld_cnt_h_t { + ZXIC_UINT32 ppu_pbu_ifb_req_vld_cnt_h; +} DPP_PPU_PPU_PPU_PBU_IFB_REQ_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_ppu_pbu_ifb_req_vld_cnt_l_t { + ZXIC_UINT32 ppu_pbu_ifb_req_vld_cnt_l; +} DPP_PPU_PPU_PPU_PBU_IFB_REQ_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_pbu_ppu_ifb_rsp_vld_cnt_h_t { + ZXIC_UINT32 pbu_ppu_ifb_rsp_vld_cnt_h; +} DPP_PPU_PPU_PBU_PPU_IFB_RSP_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_pbu_ppu_ifb_rsp_vld_cnt_l_t { + ZXIC_UINT32 pbu_ppu_ifb_rsp_vld_cnt_l; +} DPP_PPU_PPU_PBU_PPU_IFB_RSP_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_ppu_pbu_wb_vld_cnt_h_t { + ZXIC_UINT32 ppu_pbu_wb_vld_cnt_h; +} DPP_PPU_PPU_PPU_PBU_WB_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_ppu_pbu_wb_vld_cnt_l_t { + ZXIC_UINT32 ppu_pbu_wb_vld_cnt_l; +} DPP_PPU_PPU_PPU_PBU_WB_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_pbu_ppu_reorder_para_vld_cnt_h_t { + ZXIC_UINT32 pbu_ppu_reorder_para_vld_cnt_h; +} DPP_PPU_PPU_PBU_PPU_REORDER_PARA_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_pbu_ppu_reorder_para_vld_cnt_l_t { + ZXIC_UINT32 pbu_ppu_reorder_para_vld_cnt_l; +} DPP_PPU_PPU_PBU_PPU_REORDER_PARA_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_ppu_odma_para_vld_cnt_h_t { + ZXIC_UINT32 ppu_odma_para_vld_cnt_h; +} DPP_PPU_PPU_PPU_ODMA_PARA_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_ppu_odma_para_vld_cnt_l_t { + ZXIC_UINT32 ppu_odma_para_vld_cnt_l; +} DPP_PPU_PPU_PPU_ODMA_PARA_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_statics_isu_ppu_mc_vld_cnt_h_t { + ZXIC_UINT32 statics_isu_ppu_mc_vld_cnt_h; +} DPP_PPU_PPU_STATICS_ISU_PPU_MC_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_statics_isu_ppu_mc_vld_cnt_l_t { + ZXIC_UINT32 statics_isu_ppu_mc_vld_cnt_l; +} DPP_PPU_PPU_STATICS_ISU_PPU_MC_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_statics_isu_ppu_mc_loop_vld_cnt_h_t { + ZXIC_UINT32 statics_isu_ppu_mc_loop_vld_cnt_h; +} DPP_PPU_PPU_STATICS_ISU_PPU_MC_LOOP_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_statics_isu_ppu_mc_loop_vld_cnt_l_t { + ZXIC_UINT32 statics_isu_ppu_mc_loop_vld_cnt_l; +} DPP_PPU_PPU_STATICS_ISU_PPU_MC_LOOP_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_statics_isu_ppu_uc_vld_cnt_h_t { + ZXIC_UINT32 statics_isu_ppu_uc_vld_cnt_h; +} DPP_PPU_PPU_STATICS_ISU_PPU_UC_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_statics_isu_ppu_uc_vld_cnt_l_t { + ZXIC_UINT32 statics_isu_ppu_uc_vld_cnt_l; +} DPP_PPU_PPU_STATICS_ISU_PPU_UC_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_statics_isu_ppu_uc_bufnumis0_vld_cnt_h_t { + ZXIC_UINT32 statics_isu_ppu_uc_bufnumis0_vld_cnt_h; +} DPP_PPU_PPU_STATICS_ISU_PPU_UC_BUFNUMIS0_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_statics_isu_ppu_uc_bufnumis0_vld_cnt_l_t { + ZXIC_UINT32 statics_isu_ppu_uc_bufnumis0_vld_cnt_l; +} DPP_PPU_PPU_STATICS_ISU_PPU_UC_BUFNUMIS0_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_statics_demux_schedule_mc_vld_cnt_h_t { + ZXIC_UINT32 statics_demux_schedule_mc_vld_cnt_h; +} DPP_PPU_PPU_STATICS_DEMUX_SCHEDULE_MC_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_statics_demux_schedule_mc_vld_cnt_l_t { + ZXIC_UINT32 statics_demux_schedule_mc_vld_cnt_l; +} DPP_PPU_PPU_STATICS_DEMUX_SCHEDULE_MC_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_statics_demux_schedule_mc_bufnumis0_vld_cnt_h_t { + ZXIC_UINT32 statics_demux_schedule_mc_bufnumis0_vld_cnt_h; +} DPP_PPU_PPU_STATICS_DEMUX_SCHEDULE_MC_BUFNUMIS0_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_statics_demux_schedule_mc_bufnumis0_vld_cnt_l_t { + ZXIC_UINT32 statics_demux_schedule_mc_bufnumis0_vld_cnt_l; +} DPP_PPU_PPU_STATICS_DEMUX_SCHEDULE_MC_BUFNUMIS0_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_statics_demux_schedule_mc_srcportis0_vld_cnt_h_t { + ZXIC_UINT32 statics_demux_schedule_mc_srcportis0_vld_cnt_h; +} DPP_PPU_PPU_STATICS_DEMUX_SCHEDULE_MC_SRCPORTIS0_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_statics_demux_schedule_mc_srcportis0_vld_cnt_l_t { + ZXIC_UINT32 statics_demux_schedule_mc_srcportis0_vld_cnt_l; +} DPP_PPU_PPU_STATICS_DEMUX_SCHEDULE_MC_SRCPORTIS0_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_statics_demux_schedule_mc_srcportis1_vld_cnt_h_t { + ZXIC_UINT32 statics_demux_schedule_mc_srcportis1_vld_cnt_h; +} DPP_PPU_PPU_STATICS_DEMUX_SCHEDULE_MC_SRCPORTIS1_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_statics_demux_schedule_mc_srcportis1_vld_cnt_l_t { + ZXIC_UINT32 statics_demux_schedule_mc_srcportis1_vld_cnt_l; +} DPP_PPU_PPU_STATICS_DEMUX_SCHEDULE_MC_SRCPORTIS1_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_statics_demux_schedule_uc_vld_cnt_h_t { + ZXIC_UINT32 statics_demux_schedule_uc_vld_cnt_h; +} DPP_PPU_PPU_STATICS_DEMUX_SCHEDULE_UC_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_statics_demux_schedule_uc_vld_cnt_l_t { + ZXIC_UINT32 statics_demux_schedule_uc_vld_cnt_l; +} DPP_PPU_PPU_STATICS_DEMUX_SCHEDULE_UC_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_statics_demux_schedule_uc_bufnumis0_vld_cnt_h_t { + ZXIC_UINT32 statics_demux_schedule_uc_bufnumis0_vld_cnt_h; +} DPP_PPU_PPU_STATICS_DEMUX_SCHEDULE_UC_BUFNUMIS0_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_statics_demux_schedule_uc_bufnumis0_vld_cnt_l_t { + ZXIC_UINT32 statics_demux_schedule_uc_bufnumis0_vld_cnt_l; +} DPP_PPU_PPU_STATICS_DEMUX_SCHEDULE_UC_BUFNUMIS0_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_statics_demux_schedule_uc_srcportis0_vld_cnt_h_t { + ZXIC_UINT32 statics_demux_schedule_uc_srcportis0_vld_cnt_h; +} DPP_PPU_PPU_STATICS_DEMUX_SCHEDULE_UC_SRCPORTIS0_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_statics_demux_schedule_uc_srcportis0_vld_cnt_l_t { + ZXIC_UINT32 statics_demux_schedule_uc_srcportis0_vld_cnt_l; +} DPP_PPU_PPU_STATICS_DEMUX_SCHEDULE_UC_SRCPORTIS0_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_statics_demux_schedule_uc_srcportis1_vld_cnt_h_t { + ZXIC_UINT32 statics_demux_schedule_uc_srcportis1_vld_cnt_h; +} DPP_PPU_PPU_STATICS_DEMUX_SCHEDULE_UC_SRCPORTIS1_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_statics_demux_schedule_uc_srcportis1_vld_cnt_l_t { + ZXIC_UINT32 statics_demux_schedule_uc_srcportis1_vld_cnt_l; +} DPP_PPU_PPU_STATICS_DEMUX_SCHEDULE_UC_SRCPORTIS1_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_statics_ppu_wb_vld_cnt_h_t { + ZXIC_UINT32 statics_ppu_wb_vld_cnt_h; +} DPP_PPU_PPU_STATICS_PPU_WB_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_statics_ppu_wb_vld_cnt_l_t { + ZXIC_UINT32 statics_ppu_wb_vld_cnt_l; +} DPP_PPU_PPU_STATICS_PPU_WB_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_statics_ppu_wb_bufnumis0_vld_cnt_h_t { + ZXIC_UINT32 statics_ppu_wb_bufnumis0_vld_cnt_h; +} DPP_PPU_PPU_STATICS_PPU_WB_BUFNUMIS0_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_statics_ppu_wb_bufnumis0_vld_cnt_l_t { + ZXIC_UINT32 statics_ppu_wb_bufnumis0_vld_cnt_l; +} DPP_PPU_PPU_STATICS_PPU_WB_BUFNUMIS0_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_statics_ppu_wb_srcportis0_vld_cnt_h_t { + ZXIC_UINT32 statics_ppu_wb_srcportis0_vld_cnt_h; +} DPP_PPU_PPU_STATICS_PPU_WB_SRCPORTIS0_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_statics_ppu_wb_srcportis0_vld_cnt_l_t { + ZXIC_UINT32 statics_ppu_wb_srcportis0_vld_cnt_l; +} DPP_PPU_PPU_STATICS_PPU_WB_SRCPORTIS0_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_statics_ppu_wb_srcportis1_vld_cnt_h_t { + ZXIC_UINT32 statics_ppu_wb_srcportis1_vld_cnt_h; +} DPP_PPU_PPU_STATICS_PPU_WB_SRCPORTIS1_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_statics_ppu_wb_srcportis1_vld_cnt_l_t { + ZXIC_UINT32 statics_ppu_wb_srcportis1_vld_cnt_l; +} DPP_PPU_PPU_STATICS_PPU_WB_SRCPORTIS1_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_statics_ppu_wb_halt_send_type_vld_cnt_h_t { + ZXIC_UINT32 statics_ppu_wb_halt_send_type_vld_cnt_h; +} DPP_PPU_PPU_STATICS_PPU_WB_HALT_SEND_TYPE_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_statics_ppu_wb_halt_send_type_vld_cnt_l_t { + ZXIC_UINT32 statics_ppu_wb_halt_send_type_vld_cnt_l; +} DPP_PPU_PPU_STATICS_PPU_WB_HALT_SEND_TYPE_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_statics_ppu_wb_mf_type_vld_cnt_h_t { + ZXIC_UINT32 statics_ppu_wb_mf_type_vld_cnt_h; +} DPP_PPU_PPU_STATICS_PPU_WB_MF_TYPE_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_statics_ppu_wb_mf_type_vld_cnt_l_t { + ZXIC_UINT32 statics_ppu_wb_mf_type_vld_cnt_l; +} DPP_PPU_PPU_STATICS_PPU_WB_MF_TYPE_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_statics_ppu_wb_halt_continue_end_vld_cnt_h_t { + ZXIC_UINT32 statics_ppu_wb_halt_continue_end_vld_cnt_h; +} DPP_PPU_PPU_STATICS_PPU_WB_HALT_CONTINUE_END_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_statics_ppu_wb_halt_continue_end_vld_cnt_l_t { + ZXIC_UINT32 statics_ppu_wb_halt_continue_end_vld_cnt_l; +} DPP_PPU_PPU_STATICS_PPU_WB_HALT_CONTINUE_END_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_statics_ppu_wb_dup_flag_vld_cnt_h_t { + ZXIC_UINT32 statics_ppu_wb_dup_flag_vld_cnt_h; +} DPP_PPU_PPU_STATICS_PPU_WB_DUP_FLAG_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_statics_ppu_wb_dup_flag_vld_cnt_l_t { + ZXIC_UINT32 statics_ppu_wb_dup_flag_vld_cnt_l; +} DPP_PPU_PPU_STATICS_PPU_WB_DUP_FLAG_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_statics_ppu_wb_last_flag_vld_cnt_h_t { + ZXIC_UINT32 statics_ppu_wb_last_flag_vld_cnt_h; +} DPP_PPU_PPU_STATICS_PPU_WB_LAST_FLAG_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_statics_ppu_wb_last_flag_vld_cnt_l_t { + ZXIC_UINT32 statics_ppu_wb_last_flag_vld_cnt_l; +} DPP_PPU_PPU_STATICS_PPU_WB_LAST_FLAG_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_statics_ppu_wb_dis_flag_vld_cnt_h_t { + ZXIC_UINT32 statics_ppu_wb_dis_flag_vld_cnt_h; +} DPP_PPU_PPU_STATICS_PPU_WB_DIS_FLAG_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_statics_ppu_wb_dis_flag_vld_cnt_l_t { + ZXIC_UINT32 statics_ppu_wb_dis_flag_vld_cnt_l; +} DPP_PPU_PPU_STATICS_PPU_WB_DIS_FLAG_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_statics_pbu_ppu_reorder_halt_send_type_vld_cnt_h_t { + ZXIC_UINT32 statics_pbu_ppu_reorder_halt_send_type_vld_cnt_h; +} DPP_PPU_PPU_STATICS_PBU_PPU_REORDER_HALT_SEND_TYPE_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_statics_pbu_ppu_reorder_halt_send_type_vld_cnt_l_t { + ZXIC_UINT32 statics_pbu_ppu_reorder_halt_send_type_vld_cnt_l; +} DPP_PPU_PPU_STATICS_PBU_PPU_REORDER_HALT_SEND_TYPE_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_statics_pbu_ppu_reorder_mf_type_vld_cnt_h_t { + ZXIC_UINT32 statics_pbu_ppu_reorder_mf_type_vld_cnt_h; +} DPP_PPU_PPU_STATICS_PBU_PPU_REORDER_MF_TYPE_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_statics_pbu_ppu_reorder_mf_type_vld_cnt_l_t { + ZXIC_UINT32 statics_pbu_ppu_reorder_mf_type_vld_cnt_l; +} DPP_PPU_PPU_STATICS_PBU_PPU_REORDER_MF_TYPE_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_statics_pbu_ppu_reorder_halt_continue_end_vld_cnt_h_t { + ZXIC_UINT32 statics_pbu_ppu_reorder_halt_continue_end_vld_cnt_h; +} DPP_PPU_PPU_STATICS_PBU_PPU_REORDER_HALT_CONTINUE_END_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_statics_pbu_ppu_reorder_halt_continue_end_vld_cnt_l_t { + ZXIC_UINT32 statics_pbu_ppu_reorder_halt_continue_end_vld_cnt_l; +} DPP_PPU_PPU_STATICS_PBU_PPU_REORDER_HALT_CONTINUE_END_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_car_green_pkt_vld_cnt_h_t { + ZXIC_UINT32 car_green_pkt_vld_cnt_h; +} DPP_PPU_PPU_CAR_GREEN_PKT_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_car_green_pkt_vld_cnt_l_t { + ZXIC_UINT32 car_green_pkt_vld_cnt_l; +} DPP_PPU_PPU_CAR_GREEN_PKT_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_car_yellow_pkt_vld_cnt_h_t { + ZXIC_UINT32 car_yellow_pkt_vld_cnt_h; +} DPP_PPU_PPU_CAR_YELLOW_PKT_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_car_yellow_pkt_vld_cnt_l_t { + ZXIC_UINT32 car_yellow_pkt_vld_cnt_l; +} DPP_PPU_PPU_CAR_YELLOW_PKT_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_car_red_pkt_vld_cnt_h_t { + ZXIC_UINT32 car_red_pkt_vld_cnt_h; +} DPP_PPU_PPU_CAR_RED_PKT_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_car_red_pkt_vld_cnt_l_t { + ZXIC_UINT32 car_red_pkt_vld_cnt_l; +} DPP_PPU_PPU_CAR_RED_PKT_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_car_drop_pkt_vld_cnt_h_t { + ZXIC_UINT32 car_drop_pkt_vld_cnt_h; +} DPP_PPU_PPU_CAR_DROP_PKT_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_car_drop_pkt_vld_cnt_l_t { + ZXIC_UINT32 car_drop_pkt_vld_cnt_l; +} DPP_PPU_PPU_CAR_DROP_PKT_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_ppu_pktrx_mc_ptr_vld_cnt_h_t { + ZXIC_UINT32 ppu_pktrx_mc_ptr_vld_cnt_h; +} DPP_PPU_PPU_PPU_PKTRX_MC_PTR_VLD_CNT_H_T; + +typedef struct dpp_ppu_ppu_ppu_pktrx_mc_ptr_vld_cnt_l_t { + ZXIC_UINT32 ppu_pktrx_mc_ptr_vld_cnt_l; +} DPP_PPU_PPU_PPU_PKTRX_MC_PTR_VLD_CNT_L_T; + +typedef struct dpp_ppu_ppu_isu_ppu_loopback_fc_cnt_h_t { + ZXIC_UINT32 ppu_pktrx_mc_ptr_vld_cnt_h; +} DPP_PPU_PPU_ISU_PPU_LOOPBACK_FC_CNT_H_T; + +typedef struct dpp_ppu_ppu_isu_ppu_loopback_fc_cnt_l_t { + ZXIC_UINT32 ppu_pktrx_mc_ptr_vld_cnt_l; +} DPP_PPU_PPU_ISU_PPU_LOOPBACK_FC_CNT_L_T; + +typedef struct dpp_ppu_ppu_ppu_culster_pbu_mcode_pf_req_prog_full_assert_cfg_t { + ZXIC_UINT32 ppu_culster_pbu_mcode_pf_req_prog_full_assert_cfg; +} DPP_PPU_PPU_PPU_CULSTER_PBU_MCODE_PF_REQ_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_culster_pbu_mcode_pf_req_prog_full_negate_cfg_t { + ZXIC_UINT32 ppu_culster_pbu_mcode_pf_req_prog_full_negate_cfg; +} DPP_PPU_PPU_PPU_CULSTER_PBU_MCODE_PF_REQ_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_culster_pbu_mcode_pf_req_prog_empty_assert_cfg_t { + ZXIC_UINT32 ppu_culster_pbu_mcode_pf_req_prog_empty_assert_cfg; +} DPP_PPU_PPU_PPU_CULSTER_PBU_MCODE_PF_REQ_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_culster_pbu_mcode_pf_req_prog_empty_negate_cfg_t { + ZXIC_UINT32 ppu_culster_pbu_mcode_pf_req_prog_empty_negate_cfg; +} DPP_PPU_PPU_PPU_CULSTER_PBU_MCODE_PF_REQ_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_pbu_mcode_pf_rsp_prog_full_assert_cfg_t { + ZXIC_UINT32 ppu_pbu_mcode_pf_rsp_prog_full_assert_cfg; +} DPP_PPU_PPU_PPU_PBU_MCODE_PF_RSP_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_pbu_mcode_pf_rsp_prog_full_negate_cfg_t { + ZXIC_UINT32 ppu_pbu_mcode_pf_rsp_prog_full_negate_cfg; +} DPP_PPU_PPU_PPU_PBU_MCODE_PF_RSP_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_pbu_mcode_pf_rsp_prog_empty_assert_cfg_t { + ZXIC_UINT32 ppu_pbu_mcode_pf_rsp_prog_empty_assert_cfg; +} DPP_PPU_PPU_PPU_PBU_MCODE_PF_RSP_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_pbu_mcode_pf_rsp_prog_empty_negate_cfg_t { + ZXIC_UINT32 ppu_pbu_mcode_pf_rsp_prog_empty_negate_cfg; +} DPP_PPU_PPU_PPU_PBU_MCODE_PF_RSP_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_mccnt_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 mccnt_fifo_prog_full_assert_cfg; +} DPP_PPU_PPU_MCCNT_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_mccnt_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 mccnt_fifo_prog_full_negate_cfg; +} DPP_PPU_PPU_MCCNT_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_mccnt_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 mccnt_fifo_prog_empty_assert_cfg; +} DPP_PPU_PPU_MCCNT_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_mccnt_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 mccnt_fifo_prog_empty_negate_cfg; +} DPP_PPU_PPU_MCCNT_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_uc_mf_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 uc_mf_fifo_prog_full_assert_cfg; +} DPP_PPU_PPU_UC_MF_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_uc_mf_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 uc_mf_fifo_prog_full_negate_cfg; +} DPP_PPU_PPU_UC_MF_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_uc_mf_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 uc_mf_fifo_prog_empty_assert_cfg; +} DPP_PPU_PPU_UC_MF_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_uc_mf_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 uc_mf_fifo_prog_empty_negate_cfg; +} DPP_PPU_PPU_UC_MF_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_mc_mf_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 mc_mf_fifo_prog_full_assert_cfg; +} DPP_PPU_PPU_MC_MF_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_mc_mf_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 mc_mf_fifo_prog_full_negate_cfg; +} DPP_PPU_PPU_MC_MF_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_mc_mf_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 mc_mf_fifo_prog_empty_assert_cfg; +} DPP_PPU_PPU_MC_MF_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_mc_mf_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 mc_mf_fifo_prog_empty_negate_cfg; +} DPP_PPU_PPU_MC_MF_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_isu_mf_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 isu_mf_fifo_prog_full_assert_cfg; +} DPP_PPU_PPU_ISU_MF_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_isu_mf_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 isu_mf_fifo_prog_full_negate_cfg; +} DPP_PPU_PPU_ISU_MF_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_isu_mf_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 isu_mf_fifo_prog_empty_assert_cfg; +} DPP_PPU_PPU_ISU_MF_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_isu_mf_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 isu_mf_fifo_prog_empty_negate_cfg; +} DPP_PPU_PPU_ISU_MF_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_isu_fwft_mf_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 isu_fwft_mf_fifo_prog_empty_assert_cfg; +} DPP_PPU_PPU_ISU_FWFT_MF_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_isu_fwft_mf_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 isu_fwft_mf_fifo_prog_empty_negate_cfg; +} DPP_PPU_PPU_ISU_FWFT_MF_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_isu_mc_para_mf_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 isu_mc_para_mf_fifo_prog_full_assert_cfg; +} DPP_PPU_PPU_ISU_MC_PARA_MF_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_isu_mc_para_mf_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 isu_mc_para_mf_fifo_prog_full_negate_cfg; +} DPP_PPU_PPU_ISU_MC_PARA_MF_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_isu_mc_para_mf_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 isu_mc_para_mf_fifo_prog_empty_assert_cfg; +} DPP_PPU_PPU_ISU_MC_PARA_MF_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_isu_mc_para_mf_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 isu_mc_para_mf_fifo_prog_empty_negate_cfg; +} DPP_PPU_PPU_ISU_MC_PARA_MF_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_group_id_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 group_id_fifo_prog_full_assert_cfg; +} DPP_PPU_PPU_GROUP_ID_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_group_id_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 group_id_fifo_prog_full_negate_cfg; +} DPP_PPU_PPU_GROUP_ID_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_group_id_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 group_id_fifo_prog_empty_assert_cfg; +} DPP_PPU_PPU_GROUP_ID_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_group_id_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 group_id_fifo_prog_empty_negate_cfg; +} DPP_PPU_PPU_GROUP_ID_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_sa_para_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 sa_para_fifo_prog_full_assert_cfg; +} DPP_PPU_PPU_SA_PARA_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_sa_para_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 sa_para_fifo_prog_full_negate_cfg; +} DPP_PPU_PPU_SA_PARA_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_sa_para_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 sa_para_fifo_prog_empty_assert_cfg; +} DPP_PPU_PPU_SA_PARA_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_sa_para_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 sa_para_fifo_prog_empty_negate_cfg; +} DPP_PPU_PPU_SA_PARA_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_se_mc_rsp_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 se_mc_rsp_fifo_prog_full_assert_cfg; +} DPP_PPU_PPU_SE_MC_RSP_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_se_mc_rsp_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 se_mc_rsp_fifo_prog_full_negate_cfg; +} DPP_PPU_PPU_SE_MC_RSP_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_se_mc_rsp_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 se_mc_rsp_fifo_prog_empty_assert_cfg; +} DPP_PPU_PPU_SE_MC_RSP_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_se_mc_rsp_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 se_mc_rsp_fifo_prog_empty_negate_cfg; +} DPP_PPU_PPU_SE_MC_RSP_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_dup_para_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 dup_para_fifo_prog_full_assert_cfg; +} DPP_PPU_PPU_DUP_PARA_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_dup_para_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 dup_para_fifo_prog_full_negate_cfg; +} DPP_PPU_PPU_DUP_PARA_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_dup_para_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 dup_para_fifo_prog_empty_assert_cfg; +} DPP_PPU_PPU_DUP_PARA_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_dup_para_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 dup_para_fifo_prog_empty_negate_cfg; +} DPP_PPU_PPU_DUP_PARA_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_pf_rsp_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 pf_rsp_fifo_prog_full_assert_cfg; +} DPP_PPU_PPU_PF_RSP_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_pf_rsp_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 pf_rsp_fifo_prog_full_negate_cfg; +} DPP_PPU_PPU_PF_RSP_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_pf_rsp_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 pf_rsp_fifo_prog_empty_assert_cfg; +} DPP_PPU_PPU_PF_RSP_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_pf_rsp_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 pf_rsp_fifo_prog_empty_negate_cfg; +} DPP_PPU_PPU_PF_RSP_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_dup_freeptr_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 dup_freeptr_fifo_prog_full_assert_cfg; +} DPP_PPU_PPU_DUP_FREEPTR_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_dup_freeptr_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 dup_freeptr_fifo_prog_full_negate_cfg; +} DPP_PPU_PPU_DUP_FREEPTR_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_dup_freeptr_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 dup_freeptr_fifo_prog_empty_assert_cfg; +} DPP_PPU_PPU_DUP_FREEPTR_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_dup_freeptr_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 dup_freeptr_fifo_prog_empty_negate_cfg; +} DPP_PPU_PPU_DUP_FREEPTR_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_pf_req_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 pf_req_fifo_prog_full_assert_cfg; +} DPP_PPU_PPU_PF_REQ_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_pf_req_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 pf_req_fifo_prog_full_negate_cfg; +} DPP_PPU_PPU_PF_REQ_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_pf_req_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 pf_req_fifo_prog_empty_assert_cfg; +} DPP_PPU_PPU_PF_REQ_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_pf_req_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 pf_req_fifo_prog_empty_negate_cfg; +} DPP_PPU_PPU_PF_REQ_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_car_flag_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 car_flag_fifo_prog_full_assert_cfg; +} DPP_PPU_PPU_CAR_FLAG_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_car_flag_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 car_flag_fifo_prog_full_negate_cfg; +} DPP_PPU_PPU_CAR_FLAG_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_car_flag_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 car_flag_fifo_prog_empty_assert_cfg; +} DPP_PPU_PPU_CAR_FLAG_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_car_flag_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 car_flag_fifo_prog_empty_negate_cfg; +} DPP_PPU_PPU_CAR_FLAG_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cluster_mf_out_afifo_prog_full_assert_cfg_t { + ZXIC_UINT32 ppu_cluster_mf_out_afifo_prog_full_assert_cfg; +} DPP_PPU_PPU_PPU_CLUSTER_MF_OUT_AFIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cluster_mf_out_afifo_prog_full_negate_cfg_t { + ZXIC_UINT32 ppu_cluster_mf_out_afifo_prog_full_negate_cfg; +} DPP_PPU_PPU_PPU_CLUSTER_MF_OUT_AFIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cluster_mf_out_afifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 ppu_cluster_mf_out_afifo_prog_empty_assert_cfg; +} DPP_PPU_PPU_PPU_CLUSTER_MF_OUT_AFIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cluster_mf_out_afifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 ppu_cluster_mf_out_afifo_prog_empty_negate_cfg; +} DPP_PPU_PPU_PPU_CLUSTER_MF_OUT_AFIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_key_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 ppu_cop_key_fifo_prog_full_assert_cfg; +} DPP_PPU_PPU_PPU_COP_KEY_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_key_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 ppu_cop_key_fifo_prog_full_negate_cfg; +} DPP_PPU_PPU_PPU_COP_KEY_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_key_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 ppu_cop_key_fifo_prog_empty_assert_cfg; +} DPP_PPU_PPU_PPU_COP_KEY_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_key_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 ppu_cop_key_fifo_prog_empty_negate_cfg; +} DPP_PPU_PPU_PPU_COP_KEY_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_random_mod_para_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 ppu_cop_random_mod_para_fifo_prog_full_assert_cfg; +} DPP_PPU_PPU_PPU_COP_RANDOM_MOD_PARA_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_random_mod_para_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 ppu_cop_random_mod_para_fifo_prog_full_negate_cfg; +} DPP_PPU_PPU_PPU_COP_RANDOM_MOD_PARA_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_random_mod_para_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 ppu_cop_random_mod_para_fifo_prog_empty_assert_cfg; +} DPP_PPU_PPU_PPU_COP_RANDOM_MOD_PARA_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_random_mod_para_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 ppu_cop_random_mod_para_fifo_prog_empty_negate_cfg; +} DPP_PPU_PPU_PPU_COP_RANDOM_MOD_PARA_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_random_mod_result_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 ppu_cop_random_mod_result_fifo_prog_full_assert_cfg; +} DPP_PPU_PPU_PPU_COP_RANDOM_MOD_RESULT_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_random_mod_result_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 ppu_cop_random_mod_result_fifo_prog_full_negate_cfg; +} DPP_PPU_PPU_PPU_COP_RANDOM_MOD_RESULT_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_random_mod_result_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 ppu_cop_random_mod_result_fifo_prog_empty_assert_cfg; +} DPP_PPU_PPU_PPU_COP_RANDOM_MOD_RESULT_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_random_mod_result_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 ppu_cop_random_mod_result_fifo_prog_empty_negate_cfg; +} DPP_PPU_PPU_PPU_COP_RANDOM_MOD_RESULT_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_checksum_result_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 ppu_cop_checksum_result_fifo_prog_full_assert_cfg; +} DPP_PPU_PPU_PPU_COP_CHECKSUM_RESULT_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_checksum_result_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 ppu_cop_checksum_result_fifo_prog_full_negate_cfg; +} DPP_PPU_PPU_PPU_COP_CHECKSUM_RESULT_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_checksum_result_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 ppu_cop_checksum_result_fifo_prog_empty_assert_cfg; +} DPP_PPU_PPU_PPU_COP_CHECKSUM_RESULT_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_checksum_result_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 ppu_cop_checksum_result_fifo_prog_empty_negate_cfg; +} DPP_PPU_PPU_PPU_COP_CHECKSUM_RESULT_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_crc_first_para_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 ppu_cop_crc_first_para_fifo_prog_full_assert_cfg; +} DPP_PPU_PPU_PPU_COP_CRC_FIRST_PARA_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_crc_first_para_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 ppu_cop_crc_first_para_fifo_prog_full_negate_cfg; +} DPP_PPU_PPU_PPU_COP_CRC_FIRST_PARA_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_crc_first_para_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 ppu_cop_crc_first_para_fifo_prog_empty_assert_cfg; +} DPP_PPU_PPU_PPU_COP_CRC_FIRST_PARA_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_crc_first_para_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 ppu_cop_crc_first_para_fifo_prog_empty_negate_cfg; +} DPP_PPU_PPU_PPU_COP_CRC_FIRST_PARA_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_crc_bypass_delay_prog_full_assert_cfg_t { + ZXIC_UINT32 ppu_cop_crc_bypass_delay_prog_full_assert_cfg; +} DPP_PPU_PPU_PPU_COP_CRC_BYPASS_DELAY_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_crc_bypass_delay_prog_full_negate_cfg_t { + ZXIC_UINT32 ppu_cop_crc_bypass_delay_prog_full_negate_cfg; +} DPP_PPU_PPU_PPU_COP_CRC_BYPASS_DELAY_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_crc_bypass_delay_prog_empty_assert_cfg_t { + ZXIC_UINT32 ppu_cop_crc_bypass_delay_prog_empty_assert_cfg; +} DPP_PPU_PPU_PPU_COP_CRC_BYPASS_DELAY_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_crc_bypass_delay_prog_empty_negate_cfg_t { + ZXIC_UINT32 ppu_cop_crc_bypass_delay_prog_empty_negate_cfg; +} DPP_PPU_PPU_PPU_COP_CRC_BYPASS_DELAY_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_crc_second_para_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 ppu_cop_crc_second_para_fifo_prog_full_assert_cfg; +} DPP_PPU_PPU_PPU_COP_CRC_SECOND_PARA_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_crc_second_para_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 ppu_cop_crc_second_para_fifo_prog_full_negate_cfg; +} DPP_PPU_PPU_PPU_COP_CRC_SECOND_PARA_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_crc_second_para_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 ppu_cop_crc_second_para_fifo_prog_empty_assert_cfg; +} DPP_PPU_PPU_PPU_COP_CRC_SECOND_PARA_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_crc_second_para_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 ppu_cop_crc_second_para_fifo_prog_empty_negate_cfg; +} DPP_PPU_PPU_PPU_COP_CRC_SECOND_PARA_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_crc_result_fwft_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 ppu_cop_crc_result_fwft_fifo_prog_full_assert_cfg; +} DPP_PPU_PPU_PPU_COP_CRC_RESULT_FWFT_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_crc_result_fwft_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 ppu_cop_crc_result_fwft_fifo_prog_full_negate_cfg; +} DPP_PPU_PPU_PPU_COP_CRC_RESULT_FWFT_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_crc_result_fwft_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 ppu_cop_crc_result_fwft_fifo_prog_empty_assert_cfg; +} DPP_PPU_PPU_PPU_COP_CRC_RESULT_FWFT_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_crc_result_fwft_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 ppu_cop_crc_result_fwft_fifo_prog_empty_negate_cfg; +} DPP_PPU_PPU_PPU_COP_CRC_RESULT_FWFT_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_multiply_para_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 ppu_cop_multiply_para_fifo_prog_full_assert_cfg; +} DPP_PPU_PPU_PPU_COP_MULTIPLY_PARA_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_multiply_para_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 ppu_cop_multiply_para_fifo_prog_full_negate_cfg; +} DPP_PPU_PPU_PPU_COP_MULTIPLY_PARA_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_multiply_para_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 ppu_cop_multiply_para_fifo_prog_empty_assert_cfg; +} DPP_PPU_PPU_PPU_COP_MULTIPLY_PARA_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_multiply_para_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 ppu_cop_multiply_para_fifo_prog_empty_negate_cfg; +} DPP_PPU_PPU_PPU_COP_MULTIPLY_PARA_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_multiply_para_result_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 ppu_cop_multiply_para_result_fifo_prog_full_assert_cfg; +} DPP_PPU_PPU_PPU_COP_MULTIPLY_PARA_RESULT_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_multiply_para_result_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 ppu_cop_multiply_para_result_fifo_prog_full_negate_cfg; +} DPP_PPU_PPU_PPU_COP_MULTIPLY_PARA_RESULT_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_multiply_para_result_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 ppu_cop_multiply_para_result_fifo_prog_empty_assert_cfg; +} DPP_PPU_PPU_PPU_COP_MULTIPLY_PARA_RESULT_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_cop_multiply_para_result_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 ppu_cop_multiply_para_result_fifo_prog_empty_negate_cfg; +} DPP_PPU_PPU_PPU_COP_MULTIPLY_PARA_RESULT_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_free_global_num_fwft_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 free_global_num_fwft_fifo_prog_full_assert_cfg; +} DPP_PPU_PPU_FREE_GLOBAL_NUM_FWFT_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_free_global_num_fwft_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 free_global_num_fwft_fifo_prog_full_negate_cfg; +} DPP_PPU_PPU_FREE_GLOBAL_NUM_FWFT_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_free_global_num_fwft_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 free_global_num_fwft_fifo_prog_empty_assert_cfg; +} DPP_PPU_PPU_FREE_GLOBAL_NUM_FWFT_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_free_global_num_fwft_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 free_global_num_fwft_fifo_prog_empty_negate_cfg; +} DPP_PPU_PPU_FREE_GLOBAL_NUM_FWFT_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_pktrx_mc_ptr_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 ppu_pktrx_mc_ptr_fifo_prog_full_assert_cfg; +} DPP_PPU_PPU_PPU_PKTRX_MC_PTR_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_pktrx_mc_ptr_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 ppu_pktrx_mc_ptr_fifo_prog_full_negate_cfg; +} DPP_PPU_PPU_PPU_PKTRX_MC_PTR_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_pktrx_mc_ptr_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 ppu_pktrx_mc_ptr_fifo_prog_empty_assert_cfg; +} DPP_PPU_PPU_PPU_PKTRX_MC_PTR_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_ppu_ppu_pktrx_mc_ptr_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 ppu_pktrx_mc_ptr_fifo_prog_empty_negate_cfg; +} DPP_PPU_PPU_PPU_PKTRX_MC_PTR_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_ppu_pkt_data0_t { + ZXIC_UINT32 pkt_data0; +} DPP_PPU_PPU_PKT_DATA0_T; + +typedef struct dpp_ppu_ppu_pkt_data1_t { + ZXIC_UINT32 pkt_data1; +} DPP_PPU_PPU_PKT_DATA1_T; + +typedef struct dpp_ppu_ppu_pkt_data2_t { + ZXIC_UINT32 pkt_data2; +} DPP_PPU_PPU_PKT_DATA2_T; + +typedef struct dpp_ppu_ppu_pkt_data3_t { + ZXIC_UINT32 pkt_data3; +} DPP_PPU_PPU_PKT_DATA3_T; + +typedef struct dpp_ppu_ppu_pkt_data4_t { + ZXIC_UINT32 pkt_data4; +} DPP_PPU_PPU_PKT_DATA4_T; + +typedef struct dpp_ppu_ppu_pkt_data5_t { + ZXIC_UINT32 pkt_data5; +} DPP_PPU_PPU_PKT_DATA5_T; + +typedef struct dpp_ppu_ppu_pkt_data6_t { + ZXIC_UINT32 pkt_data6; +} DPP_PPU_PPU_PKT_DATA6_T; + +typedef struct dpp_ppu_ppu_pkt_data7_t { + ZXIC_UINT32 pkt_data7; +} DPP_PPU_PPU_PKT_DATA7_T; + +typedef struct dpp_ppu_ppu_pkt_data8_t { + ZXIC_UINT32 pkt_data8; +} DPP_PPU_PPU_PKT_DATA8_T; + +typedef struct dpp_ppu_ppu_pkt_data9_t { + ZXIC_UINT32 pkt_data9; +} DPP_PPU_PPU_PKT_DATA9_T; + +typedef struct dpp_ppu_ppu_pkt_data10_t { + ZXIC_UINT32 pkt_data10; +} DPP_PPU_PPU_PKT_DATA10_T; + +typedef struct dpp_ppu_ppu_pkt_data11_t { + ZXIC_UINT32 pkt_data11; +} DPP_PPU_PPU_PKT_DATA11_T; + +typedef struct dpp_ppu_ppu_pkt_data12_t { + ZXIC_UINT32 pkt_data12; +} DPP_PPU_PPU_PKT_DATA12_T; + +typedef struct dpp_ppu_ppu_pkt_data13_t { + ZXIC_UINT32 pkt_data13; +} DPP_PPU_PPU_PKT_DATA13_T; + +typedef struct dpp_ppu_ppu_pkt_data14_t { + ZXIC_UINT32 pkt_data14; +} DPP_PPU_PPU_PKT_DATA14_T; + +typedef struct dpp_ppu_ppu_pkt_data15_t { + ZXIC_UINT32 pkt_data15; +} DPP_PPU_PPU_PKT_DATA15_T; + +typedef struct dpp_ppu_ppu_pkt_data16_t { + ZXIC_UINT32 pkt_data16; +} DPP_PPU_PPU_PKT_DATA16_T; + +typedef struct dpp_ppu_ppu_pkt_data17_t { + ZXIC_UINT32 pkt_data17; +} DPP_PPU_PPU_PKT_DATA17_T; + +typedef struct dpp_ppu_ppu_pkt_data18_t { + ZXIC_UINT32 pkt_data18; +} DPP_PPU_PPU_PKT_DATA18_T; + +typedef struct dpp_ppu_ppu_pkt_data19_t { + ZXIC_UINT32 pkt_data19; +} DPP_PPU_PPU_PKT_DATA19_T; + +typedef struct dpp_ppu_ppu_pkt_data20_t { + ZXIC_UINT32 pkt_data20; +} DPP_PPU_PPU_PKT_DATA20_T; + +typedef struct dpp_ppu_ppu_pkt_data21_t { + ZXIC_UINT32 pkt_data21; +} DPP_PPU_PPU_PKT_DATA21_T; + +typedef struct dpp_ppu_ppu_pkt_data22_t { + ZXIC_UINT32 pkt_data22; +} DPP_PPU_PPU_PKT_DATA22_T; + +typedef struct dpp_ppu_ppu_pkt_data23_t { + ZXIC_UINT32 pkt_data23; +} DPP_PPU_PPU_PKT_DATA23_T; + +typedef struct dpp_ppu_ppu_pkt_data24_t { + ZXIC_UINT32 pkt_data24; +} DPP_PPU_PPU_PKT_DATA24_T; + +typedef struct dpp_ppu_ppu_pkt_data25_t { + ZXIC_UINT32 pkt_data25; +} DPP_PPU_PPU_PKT_DATA25_T; + +typedef struct dpp_ppu_ppu_pkt_data26_t { + ZXIC_UINT32 pkt_data26; +} DPP_PPU_PPU_PKT_DATA26_T; + +typedef struct dpp_ppu_ppu_pkt_data27_t { + ZXIC_UINT32 pkt_data27; +} DPP_PPU_PPU_PKT_DATA27_T; + +typedef struct dpp_ppu_ppu_pkt_data28_t { + ZXIC_UINT32 pkt_data28; +} DPP_PPU_PPU_PKT_DATA28_T; + +typedef struct dpp_ppu_ppu_pkt_data29_t { + ZXIC_UINT32 pkt_data29; +} DPP_PPU_PPU_PKT_DATA29_T; + +typedef struct dpp_ppu_ppu_pkt_data30_t { + ZXIC_UINT32 pkt_data30; +} DPP_PPU_PPU_PKT_DATA30_T; + +typedef struct dpp_ppu_ppu_pkt_data31_t { + ZXIC_UINT32 pkt_data31; +} DPP_PPU_PPU_PKT_DATA31_T; + +typedef struct dpp_ppu_ppu_pkt_data32_t { + ZXIC_UINT32 pkt_data32; +} DPP_PPU_PPU_PKT_DATA32_T; + +typedef struct dpp_ppu_ppu_pkt_data33_t { + ZXIC_UINT32 pkt_data33; +} DPP_PPU_PPU_PKT_DATA33_T; + +typedef struct dpp_ppu_ppu_pkt_data34_t { + ZXIC_UINT32 pkt_data34; +} DPP_PPU_PPU_PKT_DATA34_T; + +typedef struct dpp_ppu_ppu_pkt_data35_t { + ZXIC_UINT32 pkt_data35; +} DPP_PPU_PPU_PKT_DATA35_T; + +typedef struct dpp_ppu_ppu_pkt_data36_t { + ZXIC_UINT32 pkt_data36; +} DPP_PPU_PPU_PKT_DATA36_T; + +typedef struct dpp_ppu_ppu_pkt_data37_t { + ZXIC_UINT32 pkt_data37; +} DPP_PPU_PPU_PKT_DATA37_T; + +typedef struct dpp_ppu_ppu_pkt_data38_t { + ZXIC_UINT32 pkt_data38; +} DPP_PPU_PPU_PKT_DATA38_T; + +typedef struct dpp_ppu_ppu_pkt_data39_t { + ZXIC_UINT32 pkt_data39; +} DPP_PPU_PPU_PKT_DATA39_T; + +typedef struct dpp_ppu_ppu_pkt_data40_t { + ZXIC_UINT32 pkt_data40; +} DPP_PPU_PPU_PKT_DATA40_T; + +typedef struct dpp_ppu_ppu_pkt_data41_t { + ZXIC_UINT32 pkt_data41; +} DPP_PPU_PPU_PKT_DATA41_T; + +typedef struct dpp_ppu_ppu_pkt_data42_t { + ZXIC_UINT32 pkt_data42; +} DPP_PPU_PPU_PKT_DATA42_T; + +typedef struct dpp_ppu_ppu_pkt_data43_t { + ZXIC_UINT32 pkt_data43; +} DPP_PPU_PPU_PKT_DATA43_T; + +typedef struct dpp_ppu_ppu_pkt_data44_t { + ZXIC_UINT32 pkt_data44; +} DPP_PPU_PPU_PKT_DATA44_T; + +typedef struct dpp_ppu_ppu_pkt_data45_t { + ZXIC_UINT32 pkt_data45; +} DPP_PPU_PPU_PKT_DATA45_T; + +typedef struct dpp_ppu_ppu_pkt_data46_t { + ZXIC_UINT32 pkt_data46; +} DPP_PPU_PPU_PKT_DATA46_T; + +typedef struct dpp_ppu_ppu_pkt_data47_t { + ZXIC_UINT32 pkt_data47; +} DPP_PPU_PPU_PKT_DATA47_T; + +typedef struct dpp_ppu_ppu_pkt_data48_t { + ZXIC_UINT32 pkt_data48; +} DPP_PPU_PPU_PKT_DATA48_T; + +typedef struct dpp_ppu_ppu_pkt_data49_t { + ZXIC_UINT32 pkt_data49; +} DPP_PPU_PPU_PKT_DATA49_T; + +typedef struct dpp_ppu_ppu_pkt_data50_t { + ZXIC_UINT32 pkt_data50; +} DPP_PPU_PPU_PKT_DATA50_T; + +typedef struct dpp_ppu_ppu_pkt_data51_t { + ZXIC_UINT32 pkt_data51; +} DPP_PPU_PPU_PKT_DATA51_T; + +typedef struct dpp_ppu_ppu_pkt_data52_t { + ZXIC_UINT32 pkt_data52; +} DPP_PPU_PPU_PKT_DATA52_T; + +typedef struct dpp_ppu_ppu_pkt_data53_t { + ZXIC_UINT32 pkt_data53; +} DPP_PPU_PPU_PKT_DATA53_T; + +typedef struct dpp_ppu_ppu_pkt_data54_t { + ZXIC_UINT32 pkt_data54; +} DPP_PPU_PPU_PKT_DATA54_T; + +typedef struct dpp_ppu_ppu_pkt_data55_t { + ZXIC_UINT32 pkt_data55; +} DPP_PPU_PPU_PKT_DATA55_T; + +typedef struct dpp_ppu_ppu_pkt_data56_t { + ZXIC_UINT32 pkt_data56; +} DPP_PPU_PPU_PKT_DATA56_T; + +typedef struct dpp_ppu_ppu_pkt_data57_t { + ZXIC_UINT32 pkt_data57; +} DPP_PPU_PPU_PKT_DATA57_T; + +typedef struct dpp_ppu_ppu_pkt_data58_t { + ZXIC_UINT32 pkt_data58; +} DPP_PPU_PPU_PKT_DATA58_T; + +typedef struct dpp_ppu_ppu_pkt_data59_t { + ZXIC_UINT32 pkt_data59; +} DPP_PPU_PPU_PKT_DATA59_T; + +typedef struct dpp_ppu_ppu_pkt_data60_t { + ZXIC_UINT32 pkt_data60; +} DPP_PPU_PPU_PKT_DATA60_T; + +typedef struct dpp_ppu_ppu_pkt_data61_t { + ZXIC_UINT32 pkt_data61; +} DPP_PPU_PPU_PKT_DATA61_T; + +typedef struct dpp_ppu_ppu_pkt_data62_t { + ZXIC_UINT32 pkt_data62; +} DPP_PPU_PPU_PKT_DATA62_T; + +typedef struct dpp_ppu_ppu_pkt_data63_t { + ZXIC_UINT32 pkt_data63; +} DPP_PPU_PPU_PKT_DATA63_T; + +typedef struct dpp_ppu_ppu_pkt_data64_t { + ZXIC_UINT32 pkt_data64; +} DPP_PPU_PPU_PKT_DATA64_T; + +typedef struct dpp_ppu_ppu_pkt_data65_t { + ZXIC_UINT32 pkt_data65; +} DPP_PPU_PPU_PKT_DATA65_T; + +typedef struct dpp_ppu_ppu_pkt_data66_t { + ZXIC_UINT32 pkt_data66; +} DPP_PPU_PPU_PKT_DATA66_T; + +typedef struct dpp_ppu_ppu_pkt_data67_t { + ZXIC_UINT32 pkt_data67; +} DPP_PPU_PPU_PKT_DATA67_T; + +typedef struct dpp_ppu_ppu_pkt_data68_t { + ZXIC_UINT32 pkt_data68; +} DPP_PPU_PPU_PKT_DATA68_T; + +typedef struct dpp_ppu_ppu_pkt_data69_t { + ZXIC_UINT32 pkt_data69; +} DPP_PPU_PPU_PKT_DATA69_T; + +typedef struct dpp_ppu_ppu_pkt_data70_t { + ZXIC_UINT32 pkt_data70; +} DPP_PPU_PPU_PKT_DATA70_T; + +typedef struct dpp_ppu_ppu_pkt_data71_t { + ZXIC_UINT32 pkt_data71; +} DPP_PPU_PPU_PKT_DATA71_T; + +typedef struct dpp_ppu_ppu_pkt_data72_t { + ZXIC_UINT32 pkt_data72; +} DPP_PPU_PPU_PKT_DATA72_T; + +typedef struct dpp_ppu_ppu_pkt_data73_t { + ZXIC_UINT32 pkt_data73; +} DPP_PPU_PPU_PKT_DATA73_T; + +typedef struct dpp_ppu_ppu_pkt_data74_t { + ZXIC_UINT32 pkt_data74; +} DPP_PPU_PPU_PKT_DATA74_T; + +typedef struct dpp_ppu_ppu_pkt_data75_t { + ZXIC_UINT32 pkt_data75; +} DPP_PPU_PPU_PKT_DATA75_T; + +typedef struct dpp_ppu_ppu_pkt_data76_t { + ZXIC_UINT32 pkt_data76; +} DPP_PPU_PPU_PKT_DATA76_T; + +typedef struct dpp_ppu_ppu_pkt_data77_t { + ZXIC_UINT32 pkt_data77; +} DPP_PPU_PPU_PKT_DATA77_T; + +typedef struct dpp_ppu_ppu_pkt_data78_t { + ZXIC_UINT32 pkt_data78; +} DPP_PPU_PPU_PKT_DATA78_T; + +typedef struct dpp_ppu_ppu_pkt_data79_t { + ZXIC_UINT32 pkt_data79; +} DPP_PPU_PPU_PKT_DATA79_T; + +typedef struct dpp_ppu_ppu_pkt_data80_t { + ZXIC_UINT32 pkt_data80; +} DPP_PPU_PPU_PKT_DATA80_T; + +typedef struct dpp_ppu_ppu_pkt_data81_t { + ZXIC_UINT32 pkt_data81; +} DPP_PPU_PPU_PKT_DATA81_T; + +typedef struct dpp_ppu_ppu_pkt_data82_t { + ZXIC_UINT32 pkt_data82; +} DPP_PPU_PPU_PKT_DATA82_T; + +typedef struct dpp_ppu_ppu_pkt_data83_t { + ZXIC_UINT32 pkt_data83; +} DPP_PPU_PPU_PKT_DATA83_T; + +typedef struct dpp_ppu_ppu_pkt_data84_t { + ZXIC_UINT32 pkt_data84; +} DPP_PPU_PPU_PKT_DATA84_T; + +typedef struct dpp_ppu_ppu_pkt_data85_t { + ZXIC_UINT32 pkt_data85; +} DPP_PPU_PPU_PKT_DATA85_T; + +typedef struct dpp_ppu_ppu_pkt_data86_t { + ZXIC_UINT32 pkt_data86; +} DPP_PPU_PPU_PKT_DATA86_T; + +typedef struct dpp_ppu_ppu_pkt_data87_t { + ZXIC_UINT32 pkt_data87; +} DPP_PPU_PPU_PKT_DATA87_T; + +typedef struct dpp_ppu_ppu_pkt_data88_t { + ZXIC_UINT32 pkt_data88; +} DPP_PPU_PPU_PKT_DATA88_T; + +typedef struct dpp_ppu_ppu_pkt_data89_t { + ZXIC_UINT32 pkt_data89; +} DPP_PPU_PPU_PKT_DATA89_T; + +typedef struct dpp_ppu_ppu_pkt_data90_t { + ZXIC_UINT32 pkt_data90; +} DPP_PPU_PPU_PKT_DATA90_T; + +typedef struct dpp_ppu_ppu_pkt_data91_t { + ZXIC_UINT32 pkt_data91; +} DPP_PPU_PPU_PKT_DATA91_T; + +typedef struct dpp_ppu_ppu_pkt_data92_t { + ZXIC_UINT32 pkt_data92; +} DPP_PPU_PPU_PKT_DATA92_T; + +typedef struct dpp_ppu_ppu_pkt_data93_t { + ZXIC_UINT32 pkt_data93; +} DPP_PPU_PPU_PKT_DATA93_T; + +typedef struct dpp_ppu_ppu_pkt_data94_t { + ZXIC_UINT32 pkt_data94; +} DPP_PPU_PPU_PKT_DATA94_T; + +typedef struct dpp_ppu_ppu_pkt_data95_t { + ZXIC_UINT32 pkt_data95; +} DPP_PPU_PPU_PKT_DATA95_T; + +typedef struct dpp_ppu_ppu_pkt_data96_t { + ZXIC_UINT32 pkt_data96; +} DPP_PPU_PPU_PKT_DATA96_T; + +typedef struct dpp_ppu_ppu_pkt_data97_t { + ZXIC_UINT32 pkt_data97; +} DPP_PPU_PPU_PKT_DATA97_T; + +typedef struct dpp_ppu_ppu_pkt_data98_t { + ZXIC_UINT32 pkt_data98; +} DPP_PPU_PPU_PKT_DATA98_T; + +typedef struct dpp_ppu_ppu_pkt_data99_t { + ZXIC_UINT32 pkt_data99; +} DPP_PPU_PPU_PKT_DATA99_T; + +typedef struct dpp_ppu_ppu_pkt_data100_t { + ZXIC_UINT32 pkt_data100; +} DPP_PPU_PPU_PKT_DATA100_T; + +typedef struct dpp_ppu_ppu_pkt_data101_t { + ZXIC_UINT32 pkt_data101; +} DPP_PPU_PPU_PKT_DATA101_T; + +typedef struct dpp_ppu_ppu_pkt_data102_t { + ZXIC_UINT32 pkt_data102; +} DPP_PPU_PPU_PKT_DATA102_T; + +typedef struct dpp_ppu_ppu_pkt_data103_t { + ZXIC_UINT32 pkt_data103; +} DPP_PPU_PPU_PKT_DATA103_T; + +typedef struct dpp_ppu_ppu_pkt_data104_t { + ZXIC_UINT32 pkt_data104; +} DPP_PPU_PPU_PKT_DATA104_T; + +typedef struct dpp_ppu_ppu_pkt_data105_t { + ZXIC_UINT32 pkt_data105; +} DPP_PPU_PPU_PKT_DATA105_T; + +typedef struct dpp_ppu_ppu_pkt_data106_t { + ZXIC_UINT32 pkt_data106; +} DPP_PPU_PPU_PKT_DATA106_T; + +typedef struct dpp_ppu_ppu_pkt_data107_t { + ZXIC_UINT32 pkt_data107; +} DPP_PPU_PPU_PKT_DATA107_T; + +typedef struct dpp_ppu_ppu_pkt_data108_t { + ZXIC_UINT32 pkt_data108; +} DPP_PPU_PPU_PKT_DATA108_T; + +typedef struct dpp_ppu_ppu_pkt_data109_t { + ZXIC_UINT32 pkt_data109; +} DPP_PPU_PPU_PKT_DATA109_T; + +typedef struct dpp_ppu_ppu_pkt_data110_t { + ZXIC_UINT32 pkt_data110; +} DPP_PPU_PPU_PKT_DATA110_T; + +typedef struct dpp_ppu_ppu_pkt_data111_t { + ZXIC_UINT32 pkt_data111; +} DPP_PPU_PPU_PKT_DATA111_T; + +typedef struct dpp_ppu_ppu_pkt_data112_t { + ZXIC_UINT32 pkt_data112; +} DPP_PPU_PPU_PKT_DATA112_T; + +typedef struct dpp_ppu_ppu_pkt_data113_t { + ZXIC_UINT32 pkt_data113; +} DPP_PPU_PPU_PKT_DATA113_T; + +typedef struct dpp_ppu_ppu_pkt_data114_t { + ZXIC_UINT32 pkt_data114; +} DPP_PPU_PPU_PKT_DATA114_T; + +typedef struct dpp_ppu_ppu_pkt_data115_t { + ZXIC_UINT32 pkt_data115; +} DPP_PPU_PPU_PKT_DATA115_T; + +typedef struct dpp_ppu_ppu_pkt_data116_t { + ZXIC_UINT32 pkt_data116; +} DPP_PPU_PPU_PKT_DATA116_T; + +typedef struct dpp_ppu_ppu_pkt_data117_t { + ZXIC_UINT32 pkt_data117; +} DPP_PPU_PPU_PKT_DATA117_T; + +typedef struct dpp_ppu_ppu_pkt_data118_t { + ZXIC_UINT32 pkt_data118; +} DPP_PPU_PPU_PKT_DATA118_T; + +typedef struct dpp_ppu_ppu_pkt_data119_t { + ZXIC_UINT32 pkt_data119; +} DPP_PPU_PPU_PKT_DATA119_T; + +typedef struct dpp_ppu_ppu_pkt_data120_t { + ZXIC_UINT32 pkt_data120; +} DPP_PPU_PPU_PKT_DATA120_T; + +typedef struct dpp_ppu_ppu_pkt_data121_t { + ZXIC_UINT32 pkt_data121; +} DPP_PPU_PPU_PKT_DATA121_T; + +typedef struct dpp_ppu_ppu_pkt_data122_t { + ZXIC_UINT32 pkt_data122; +} DPP_PPU_PPU_PKT_DATA122_T; + +typedef struct dpp_ppu_ppu_pkt_data123_t { + ZXIC_UINT32 pkt_data123; +} DPP_PPU_PPU_PKT_DATA123_T; + +typedef struct dpp_ppu_ppu_pkt_data124_t { + ZXIC_UINT32 pkt_data124; +} DPP_PPU_PPU_PKT_DATA124_T; + +typedef struct dpp_ppu_ppu_pkt_data125_t { + ZXIC_UINT32 pkt_data125; +} DPP_PPU_PPU_PKT_DATA125_T; + +typedef struct dpp_ppu_ppu_pkt_data126_t { + ZXIC_UINT32 pkt_data126; +} DPP_PPU_PPU_PKT_DATA126_T; + +typedef struct dpp_ppu_ppu_pkt_data127_t { + ZXIC_UINT32 pkt_data127; +} DPP_PPU_PPU_PKT_DATA127_T; + +typedef struct dpp_ppu_ppu_spr0_t { + ZXIC_UINT32 spr0; +} DPP_PPU_PPU_SPR0_T; + +typedef struct dpp_ppu_ppu_spr1_t { + ZXIC_UINT32 spr1; +} DPP_PPU_PPU_SPR1_T; + +typedef struct dpp_ppu_ppu_spr2_t { + ZXIC_UINT32 spr2; +} DPP_PPU_PPU_SPR2_T; + +typedef struct dpp_ppu_ppu_spr3_t { + ZXIC_UINT32 spr3; +} DPP_PPU_PPU_SPR3_T; + +typedef struct dpp_ppu_ppu_spr4_t { + ZXIC_UINT32 spr4; +} DPP_PPU_PPU_SPR4_T; + +typedef struct dpp_ppu_ppu_spr5_t { + ZXIC_UINT32 spr5; +} DPP_PPU_PPU_SPR5_T; + +typedef struct dpp_ppu_ppu_spr6_t { + ZXIC_UINT32 spr6; +} DPP_PPU_PPU_SPR6_T; + +typedef struct dpp_ppu_ppu_spr7_t { + ZXIC_UINT32 spr7; +} DPP_PPU_PPU_SPR7_T; + +typedef struct dpp_ppu_ppu_spr8_t { + ZXIC_UINT32 spr8; +} DPP_PPU_PPU_SPR8_T; + +typedef struct dpp_ppu_ppu_spr9_t { + ZXIC_UINT32 spr9; +} DPP_PPU_PPU_SPR9_T; + +typedef struct dpp_ppu_ppu_spr10_t { + ZXIC_UINT32 spr10; +} DPP_PPU_PPU_SPR10_T; + +typedef struct dpp_ppu_ppu_spr11_t { + ZXIC_UINT32 spr11; +} DPP_PPU_PPU_SPR11_T; + +typedef struct dpp_ppu_ppu_spr12_t { + ZXIC_UINT32 spr12; +} DPP_PPU_PPU_SPR12_T; + +typedef struct dpp_ppu_ppu_spr13_t { + ZXIC_UINT32 spr13; +} DPP_PPU_PPU_SPR13_T; + +typedef struct dpp_ppu_ppu_spr14_t { + ZXIC_UINT32 spr14; +} DPP_PPU_PPU_SPR14_T; + +typedef struct dpp_ppu_ppu_spr15_t { + ZXIC_UINT32 spr15; +} DPP_PPU_PPU_SPR15_T; + +typedef struct dpp_ppu_ppu_spr16_t { + ZXIC_UINT32 spr16; +} DPP_PPU_PPU_SPR16_T; + +typedef struct dpp_ppu_ppu_spr17_t { + ZXIC_UINT32 spr17; +} DPP_PPU_PPU_SPR17_T; + +typedef struct dpp_ppu_ppu_spr18_t { + ZXIC_UINT32 spr18; +} DPP_PPU_PPU_SPR18_T; + +typedef struct dpp_ppu_ppu_spr19_t { + ZXIC_UINT32 spr19; +} DPP_PPU_PPU_SPR19_T; + +typedef struct dpp_ppu_ppu_spr20_t { + ZXIC_UINT32 spr20; +} DPP_PPU_PPU_SPR20_T; + +typedef struct dpp_ppu_ppu_spr21_t { + ZXIC_UINT32 spr21; +} DPP_PPU_PPU_SPR21_T; + +typedef struct dpp_ppu_ppu_spr22_t { + ZXIC_UINT32 spr22; +} DPP_PPU_PPU_SPR22_T; + +typedef struct dpp_ppu_ppu_spr23_t { + ZXIC_UINT32 spr23; +} DPP_PPU_PPU_SPR23_T; + +typedef struct dpp_ppu_ppu_spr24_t { + ZXIC_UINT32 spr24; +} DPP_PPU_PPU_SPR24_T; + +typedef struct dpp_ppu_ppu_spr25_t { + ZXIC_UINT32 spr25; +} DPP_PPU_PPU_SPR25_T; + +typedef struct dpp_ppu_ppu_spr26_t { + ZXIC_UINT32 spr26; +} DPP_PPU_PPU_SPR26_T; + +typedef struct dpp_ppu_ppu_spr27_t { + ZXIC_UINT32 spr27; +} DPP_PPU_PPU_SPR27_T; + +typedef struct dpp_ppu_ppu_spr28_t { + ZXIC_UINT32 spr28; +} DPP_PPU_PPU_SPR28_T; + +typedef struct dpp_ppu_ppu_spr29_t { + ZXIC_UINT32 spr29; +} DPP_PPU_PPU_SPR29_T; + +typedef struct dpp_ppu_ppu_spr30_t { + ZXIC_UINT32 spr30; +} DPP_PPU_PPU_SPR30_T; + +typedef struct dpp_ppu_ppu_spr31_t { + ZXIC_UINT32 spr31; +} DPP_PPU_PPU_SPR31_T; + +typedef struct dpp_ppu_ppu_rsp0_t { + ZXIC_UINT32 rsp0; +} DPP_PPU_PPU_RSP0_T; + +typedef struct dpp_ppu_ppu_rsp1_t { + ZXIC_UINT32 rsp1; +} DPP_PPU_PPU_RSP1_T; + +typedef struct dpp_ppu_ppu_rsp2_t { + ZXIC_UINT32 rsp2; +} DPP_PPU_PPU_RSP2_T; + +typedef struct dpp_ppu_ppu_rsp3_t { + ZXIC_UINT32 rsp3; +} DPP_PPU_PPU_RSP3_T; + +typedef struct dpp_ppu_ppu_rsp4_t { + ZXIC_UINT32 rsp4; +} DPP_PPU_PPU_RSP4_T; + +typedef struct dpp_ppu_ppu_rsp5_t { + ZXIC_UINT32 rsp5; +} DPP_PPU_PPU_RSP5_T; + +typedef struct dpp_ppu_ppu_rsp6_t { + ZXIC_UINT32 rsp6; +} DPP_PPU_PPU_RSP6_T; + +typedef struct dpp_ppu_ppu_rsp7_t { + ZXIC_UINT32 rsp7; +} DPP_PPU_PPU_RSP7_T; + +typedef struct dpp_ppu_ppu_rsp8_t { + ZXIC_UINT32 rsp8; +} DPP_PPU_PPU_RSP8_T; + +typedef struct dpp_ppu_ppu_rsp9_t { + ZXIC_UINT32 rsp9; +} DPP_PPU_PPU_RSP9_T; + +typedef struct dpp_ppu_ppu_rsp10_t { + ZXIC_UINT32 rsp10; +} DPP_PPU_PPU_RSP10_T; + +typedef struct dpp_ppu_ppu_rsp11_t { + ZXIC_UINT32 rsp11; +} DPP_PPU_PPU_RSP11_T; + +typedef struct dpp_ppu_ppu_rsp12_t { + ZXIC_UINT32 rsp12; +} DPP_PPU_PPU_RSP12_T; + +typedef struct dpp_ppu_ppu_rsp13_t { + ZXIC_UINT32 rsp13; +} DPP_PPU_PPU_RSP13_T; + +typedef struct dpp_ppu_ppu_rsp14_t { + ZXIC_UINT32 rsp14; +} DPP_PPU_PPU_RSP14_T; + +typedef struct dpp_ppu_ppu_rsp15_t { + ZXIC_UINT32 rsp15; +} DPP_PPU_PPU_RSP15_T; + +typedef struct dpp_ppu_ppu_rsp16_t { + ZXIC_UINT32 rsp16; +} DPP_PPU_PPU_RSP16_T; + +typedef struct dpp_ppu_ppu_rsp17_t { + ZXIC_UINT32 rsp17; +} DPP_PPU_PPU_RSP17_T; + +typedef struct dpp_ppu_ppu_rsp18_t { + ZXIC_UINT32 rsp18; +} DPP_PPU_PPU_RSP18_T; + +typedef struct dpp_ppu_ppu_rsp19_t { + ZXIC_UINT32 rsp19; +} DPP_PPU_PPU_RSP19_T; + +typedef struct dpp_ppu_ppu_rsp20_t { + ZXIC_UINT32 rsp20; +} DPP_PPU_PPU_RSP20_T; + +typedef struct dpp_ppu_ppu_rsp21_t { + ZXIC_UINT32 rsp21; +} DPP_PPU_PPU_RSP21_T; + +typedef struct dpp_ppu_ppu_rsp22_t { + ZXIC_UINT32 rsp22; +} DPP_PPU_PPU_RSP22_T; + +typedef struct dpp_ppu_ppu_rsp23_t { + ZXIC_UINT32 rsp23; +} DPP_PPU_PPU_RSP23_T; + +typedef struct dpp_ppu_ppu_rsp24_t { + ZXIC_UINT32 rsp24; +} DPP_PPU_PPU_RSP24_T; + +typedef struct dpp_ppu_ppu_rsp25_t { + ZXIC_UINT32 rsp25; +} DPP_PPU_PPU_RSP25_T; + +typedef struct dpp_ppu_ppu_rsp26_t { + ZXIC_UINT32 rsp26; +} DPP_PPU_PPU_RSP26_T; + +typedef struct dpp_ppu_ppu_rsp27_t { + ZXIC_UINT32 rsp27; +} DPP_PPU_PPU_RSP27_T; + +typedef struct dpp_ppu_ppu_rsp28_t { + ZXIC_UINT32 rsp28; +} DPP_PPU_PPU_RSP28_T; + +typedef struct dpp_ppu_ppu_rsp29_t { + ZXIC_UINT32 rsp29; +} DPP_PPU_PPU_RSP29_T; + +typedef struct dpp_ppu_ppu_rsp30_t { + ZXIC_UINT32 rsp30; +} DPP_PPU_PPU_RSP30_T; + +typedef struct dpp_ppu_ppu_rsp31_t { + ZXIC_UINT32 rsp31; +} DPP_PPU_PPU_RSP31_T; + +typedef struct dpp_ppu_ppu_key0_t { + ZXIC_UINT32 key0; +} DPP_PPU_PPU_KEY0_T; + +typedef struct dpp_ppu_ppu_key1_t { + ZXIC_UINT32 key1; +} DPP_PPU_PPU_KEY1_T; + +typedef struct dpp_ppu_ppu_key2_t { + ZXIC_UINT32 key2; +} DPP_PPU_PPU_KEY2_T; + +typedef struct dpp_ppu_ppu_key3_t { + ZXIC_UINT32 key3; +} DPP_PPU_PPU_KEY3_T; + +typedef struct dpp_ppu_ppu_key4_t { + ZXIC_UINT32 key4; +} DPP_PPU_PPU_KEY4_T; + +typedef struct dpp_ppu_ppu_key5_t { + ZXIC_UINT32 key5; +} DPP_PPU_PPU_KEY5_T; + +typedef struct dpp_ppu_ppu_key6_t { + ZXIC_UINT32 key6; +} DPP_PPU_PPU_KEY6_T; + +typedef struct dpp_ppu_ppu_key7_t { + ZXIC_UINT32 key7; +} DPP_PPU_PPU_KEY7_T; + +typedef struct dpp_ppu_ppu_key8_t { + ZXIC_UINT32 key8; +} DPP_PPU_PPU_KEY8_T; + +typedef struct dpp_ppu_ppu_key9_t { + ZXIC_UINT32 key9; +} DPP_PPU_PPU_KEY9_T; + +typedef struct dpp_ppu_ppu_key10_t { + ZXIC_UINT32 key10; +} DPP_PPU_PPU_KEY10_T; + +typedef struct dpp_ppu_ppu_key11_t { + ZXIC_UINT32 key11; +} DPP_PPU_PPU_KEY11_T; + +typedef struct dpp_ppu_ppu_key12_t { + ZXIC_UINT32 key12; +} DPP_PPU_PPU_KEY12_T; + +typedef struct dpp_ppu_ppu_key13_t { + ZXIC_UINT32 key13; +} DPP_PPU_PPU_KEY13_T; + +typedef struct dpp_ppu_ppu_key14_t { + ZXIC_UINT32 key14; +} DPP_PPU_PPU_KEY14_T; + +typedef struct dpp_ppu_ppu_key15_t { + ZXIC_UINT32 key15; +} DPP_PPU_PPU_KEY15_T; + +typedef struct dpp_ppu_ppu_key16_t { + ZXIC_UINT32 key16; +} DPP_PPU_PPU_KEY16_T; + +typedef struct dpp_ppu_ppu_key17_t { + ZXIC_UINT32 key17; +} DPP_PPU_PPU_KEY17_T; + +typedef struct dpp_ppu_ppu_key18_t { + ZXIC_UINT32 key18; +} DPP_PPU_PPU_KEY18_T; + +typedef struct dpp_ppu_ppu_key19_t { + ZXIC_UINT32 key19; +} DPP_PPU_PPU_KEY19_T; + +typedef struct dpp_ppu_ppu_flag_t { + ZXIC_UINT32 me_num; + ZXIC_UINT32 thread_num; + ZXIC_UINT32 flag; +} DPP_PPU_PPU_FLAG_T; + +typedef struct dpp_ppu_cluster_int_1200m_flag_t { + ZXIC_UINT32 me7_interrupt_flag; + ZXIC_UINT32 me6_interrupt_flag; + ZXIC_UINT32 me5_interrupt_flag; + ZXIC_UINT32 me4_interrupt_flag; + ZXIC_UINT32 me3_interrupt_flag; + ZXIC_UINT32 me2_interrupt_flag; + ZXIC_UINT32 me1_interrupt_flag; + ZXIC_UINT32 me0_interrupt_flag; +} DPP_PPU_CLUSTER_INT_1200M_FLAG_T; + +typedef struct dpp_ppu_cluster_bp_instr_l_t { + ZXIC_UINT32 bp_instr_l; +} DPP_PPU_CLUSTER_BP_INSTR_L_T; + +typedef struct dpp_ppu_cluster_bp_instr_h_t { + ZXIC_UINT32 bp_instr_h; +} DPP_PPU_CLUSTER_BP_INSTR_H_T; + +typedef struct dpp_ppu_cluster_bp_addr_t { + ZXIC_UINT32 bp_addr; +} DPP_PPU_CLUSTER_BP_ADDR_T; + +typedef struct dpp_ppu_cluster_drr_t { + ZXIC_UINT32 drr; +} DPP_PPU_CLUSTER_DRR_T; + +typedef struct dpp_ppu_cluster_dsr_t { + ZXIC_UINT32 dsr; +} DPP_PPU_CLUSTER_DSR_T; + +typedef struct dpp_ppu_cluster_dbg_rtl_date_t { + ZXIC_UINT32 dbg_rtl_date; +} DPP_PPU_CLUSTER_DBG_RTL_DATE_T; + +typedef struct dpp_ppu_cluster_me_continue_t { + ZXIC_UINT32 me_continue; +} DPP_PPU_CLUSTER_ME_CONTINUE_T; + +typedef struct dpp_ppu_cluster_me_step_t { + ZXIC_UINT32 me_step; +} DPP_PPU_CLUSTER_ME_STEP_T; + +typedef struct dpp_ppu_cluster_me_refresh_t { + ZXIC_UINT32 me_refresh; +} DPP_PPU_CLUSTER_ME_REFRESH_T; + +typedef struct dpp_ppu_cluster_drr_clr_t { + ZXIC_UINT32 drr_clr; +} DPP_PPU_CLUSTER_DRR_CLR_T; + +typedef struct dpp_ppu_cluster_me_busy_thresold_t { + ZXIC_UINT32 me_busy_thresold; +} DPP_PPU_CLUSTER_ME_BUSY_THRESOLD_T; + +typedef struct dpp_ppu_cluster_int_1200m_sta_t { + ZXIC_UINT32 me7_interrupt_sta; + ZXIC_UINT32 me6_interrupt_sta; + ZXIC_UINT32 me5_interrupt_sta; + ZXIC_UINT32 me4_interrupt_sta; + ZXIC_UINT32 me3_interrupt_sta; + ZXIC_UINT32 me2_interrupt_sta; + ZXIC_UINT32 me1_interrupt_sta; + ZXIC_UINT32 me0_interrupt_sta; +} DPP_PPU_CLUSTER_INT_1200M_STA_T; + +typedef struct dpp_ppu_cluster_int_1200m_me_fifo_mask_l_t { + ZXIC_UINT32 me_free_pkt_q_overflow_mask; + ZXIC_UINT32 me_free_pkt_q_underflow_mask; + ZXIC_UINT32 me_free_thread_q_overflow_mask; + ZXIC_UINT32 me_free_thread_q_underflow_mask; + ZXIC_UINT32 me_pkt_in_overflow_mask; + ZXIC_UINT32 me_pkt_in_underflow_mask; + ZXIC_UINT32 me_rdy_q_overflow_mask; + ZXIC_UINT32 me_rdy_q_underflow_mask; + ZXIC_UINT32 me_pkt_out_q_overflow_mask; + ZXIC_UINT32 me_pkt_out_q_underflow_mask; + ZXIC_UINT32 me_continue_q_overflow_mask; + ZXIC_UINT32 me_continue_q_underflow_mask; + ZXIC_UINT32 me_esrh_q_overflow_mask; + ZXIC_UINT32 me_esrh_q_underflow_mask; + ZXIC_UINT32 me_isrh_q_overflow_mask; + ZXIC_UINT32 me_isrh_q_underflow_mask; + ZXIC_UINT32 me_cache_miss_q_overflow_mask; + ZXIC_UINT32 me_cache_miss_q_underflow_mask; + ZXIC_UINT32 me_base_q_u0_overflow_mask; + ZXIC_UINT32 me_base_q_u0_underflow_mask; + ZXIC_UINT32 me_base_q_u1_overflow_mask; + ZXIC_UINT32 me_base_q_u1_underflow_mask; + ZXIC_UINT32 me_base_q_u2_overflow_mask; + ZXIC_UINT32 me_base_q_u2_underflow_mask; + ZXIC_UINT32 me_base_q_u3_overflow_mask; + ZXIC_UINT32 me_base_q_u3_underflow_mask; + ZXIC_UINT32 me_reg_pc_q_overflow_mask; + ZXIC_UINT32 me_reg_pc_q_underflow_mask; + ZXIC_UINT32 me_branch_q_overflow_mask; + ZXIC_UINT32 me_branch_q_underflow_mask; + ZXIC_UINT32 me_pkt_base_q_overflow_mask; + ZXIC_UINT32 me_pkt_base_q_underflow_mask; +} DPP_PPU_CLUSTER_INT_1200M_ME_FIFO_MASK_L_T; + +typedef struct dpp_ppu_cluster_int_1200m_me_fifo_mask_h_t { + ZXIC_UINT32 me_except_refetch_pc_overflow_mask; + ZXIC_UINT32 me_except_refetch_pc_underflow_mask; +} DPP_PPU_CLUSTER_INT_1200M_ME_FIFO_MASK_H_T; + +typedef struct dpp_ppu_cluster_me_fifo_interrupt_flag_l_t { + ZXIC_UINT32 me_free_pkt_q_overflow_flag; + ZXIC_UINT32 me_free_pkt_q_underflow_flag; + ZXIC_UINT32 me_free_thread_q_overflow_flag; + ZXIC_UINT32 me_free_thread_q_underflow_flag; + ZXIC_UINT32 me_pkt_in_overflow_flag; + ZXIC_UINT32 me_pkt_in_underflow_flag; + ZXIC_UINT32 me_rdy_q_overflow_flag; + ZXIC_UINT32 me_rdy_q_underflow_flag; + ZXIC_UINT32 me_pkt_out_q_overflow_flag; + ZXIC_UINT32 me_pkt_out_q_underflow_flag; + ZXIC_UINT32 me_continue_q_overflow_flag; + ZXIC_UINT32 me_continue_q_underflow_flag; + ZXIC_UINT32 me_esrh_q_overflow_flag; + ZXIC_UINT32 me_esrh_q_underflow_flag; + ZXIC_UINT32 me_isrh_q_overflow_flag; + ZXIC_UINT32 me_isrh_q_underflow_flag; + ZXIC_UINT32 me_cache_miss_q_overflow_flag; + ZXIC_UINT32 me_cache_miss_q_underflow_flag; + ZXIC_UINT32 me_base_q_u0_overflow_flag; + ZXIC_UINT32 me_base_q_u0_underflow_flag; + ZXIC_UINT32 me_base_q_u1_overflow_flag; + ZXIC_UINT32 me_base_q_u1_underflow_flag; + ZXIC_UINT32 me_base_q_u2_overflow_flag; + ZXIC_UINT32 me_base_q_u2_underflow_flag; + ZXIC_UINT32 me_base_q_u3_overflow_flag; + ZXIC_UINT32 me_base_q_u3_underflow_flag; + ZXIC_UINT32 me_reg_pc_q_overflow_flag; + ZXIC_UINT32 me_reg_pc_q_underflow_flag; + ZXIC_UINT32 me_branch_q_overflow_flag; + ZXIC_UINT32 me_branch_q_underflow_flag; + ZXIC_UINT32 me_pkt_base_q_overflow_flag; + ZXIC_UINT32 me_pkt_base_q_underflow_flag; +} DPP_PPU_CLUSTER_ME_FIFO_INTERRUPT_FLAG_L_T; + +typedef struct dpp_ppu_cluster_me_fifo_interrupt_flag_h_t { + ZXIC_UINT32 me_except_refetch_pc_overflow_flag; + ZXIC_UINT32 me_except_refetch_pc_underflow_flag; +} DPP_PPU_CLUSTER_ME_FIFO_INTERRUPT_FLAG_H_T; + +typedef struct dpp_ppu_cluster_me_fifo_interrupt_sta_l_t { + ZXIC_UINT32 me_free_pkt_q_overflow_sta; + ZXIC_UINT32 me_free_pkt_q_underflow_sta; + ZXIC_UINT32 me_free_thread_q_overflow_sta; + ZXIC_UINT32 me_free_thread_q_underflow_sta; + ZXIC_UINT32 me_pkt_in_overflow_sta; + ZXIC_UINT32 me_pkt_in_underflow_sta; + ZXIC_UINT32 me_rdy_q_overflow_sta; + ZXIC_UINT32 me_rdy_q_underflow_sta; + ZXIC_UINT32 me_pkt_out_q_overflow_sta; + ZXIC_UINT32 me_pkt_out_q_underflow_sta; + ZXIC_UINT32 me_continue_q_overflow_sta; + ZXIC_UINT32 me_continue_q_underflow_sta; + ZXIC_UINT32 me_esrh_q_overflow_sta; + ZXIC_UINT32 me_esrh_q_underflow_sta; + ZXIC_UINT32 me_isrh_q_overflow_sta; + ZXIC_UINT32 me_isrh_q_underflow_sta; + ZXIC_UINT32 me_cache_miss_q_overflow_sta; + ZXIC_UINT32 me_cache_miss_q_underflow_sta; + ZXIC_UINT32 me_base_q_u0_overflow_sta; + ZXIC_UINT32 me_base_q_u0_underflow_sta; + ZXIC_UINT32 me_base_q_u1_overflow_sta; + ZXIC_UINT32 me_base_q_u1_underflow_sta; + ZXIC_UINT32 me_base_q_u2_overflow_sta; + ZXIC_UINT32 me_base_q_u2_underflow_sta; + ZXIC_UINT32 me_base_q_u3_overflow_sta; + ZXIC_UINT32 me_base_q_u3_underflow_sta; + ZXIC_UINT32 me_reg_pc_q_overflow_sta; + ZXIC_UINT32 me_reg_pc_q_underflow_sta; + ZXIC_UINT32 me_branch_q_overflow_sta; + ZXIC_UINT32 me_branch_q_underflow_sta; + ZXIC_UINT32 me_pkt_base_q_overflow_sta; + ZXIC_UINT32 me_pkt_base_q_underflow_sta; +} DPP_PPU_CLUSTER_ME_FIFO_INTERRUPT_STA_L_T; + +typedef struct dpp_ppu_cluster_me_fifo_interrupt_sta_h_t { + ZXIC_UINT32 me_except_refetch_pc_overflow_sta; + ZXIC_UINT32 me_except_refetch_pc_underflow_sta; +} DPP_PPU_CLUSTER_ME_FIFO_INTERRUPT_STA_H_T; + +typedef struct dpp_ppu_cluster_int_1200m_cluster_mex_fifo_mask_l_t { + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u3_overflow_mask; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u3_underflow_mask; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u4_overflow_mask; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u4_underflow_mask; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u5_overflow_mask; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u5_underflow_mask; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u6_overflow_mask; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u6_underflow_mask; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u7_overflow_mask; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u7_underflow_mask; + ZXIC_UINT32 ppu_ise_rsp_afifo_64x143_wrapper_u0_underflow_mask; + ZXIC_UINT32 ise_rsp_ram_free_ptr_u0_overflow_mask; + ZXIC_UINT32 ise_rsp_ram_free_ptr_u0_underflow_mask; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u0_overflow_mask; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u0_underflow_mask; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u1_overflow_mask; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u1_underflow_mask; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u2_overflow_mask; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u2_underflow_mask; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u3_overflow_mask; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u3_underflow_mask; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u4_overflow_mask; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u4_underflow_mask; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u5_overflow_mask; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u5_underflow_mask; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u6_overflow_mask; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u6_underflow_mask; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u7_overflow_mask; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u7_underflow_mask; + ZXIC_UINT32 ppu_sta_rsp_afifo_64x79_wrapper_underflow_mask; + ZXIC_UINT32 ppu_sta_rsp_fwft_fifo_128x79_wrapper_overflow_mask; + ZXIC_UINT32 ppu_sta_rsp_fwft_fifo_128x79_wrapper_underflow_mask; +} DPP_PPU_CLUSTER_INT_1200M_CLUSTER_MEX_FIFO_MASK_L_T; + +typedef struct dpp_ppu_cluster_int_1200m_cluster_mex_fifo_mask_h_t { + ZXIC_UINT32 ppu_se_key_afifo_32x54_wrapper_overflow_mask; + ZXIC_UINT32 ppu_se_key_afifo_32x665_wrapper_overflow_mask; + ZXIC_UINT32 ppu_sta_key_afifo_32x110_wrapper_overflow_mask; + ZXIC_UINT32 ppu_cluster_mf_in_afifo_32x2048_wrapper_underflow_mask; + ZXIC_UINT32 ppu_pbu_mcode_pf_rsp_fifo_32x13_wrapper_overflow_mask; + ZXIC_UINT32 ppu_pbu_mcode_pf_rsp_fifo_32x13_wrapper_underflow_mask; + ZXIC_UINT32 ppu_coprocess_rsp_fifo_32x77_wrapper_overflow_mask; + ZXIC_UINT32 ppu_coprocess_rsp_fifo_32x77_wrapper_underflow_mask; + ZXIC_UINT32 ppu_coprocess_rsp_fwft_fifo_128x78_wrapper_overflow_mask; + ZXIC_UINT32 ppu_coprocess_rsp_fwft_fifo_128x78_wrapper_underflow_mask; + ZXIC_UINT32 ppu_ese_rsp_afifo_64x271_wrapper_u0_underflow_mask; + ZXIC_UINT32 ese_rsp_ram_free_ptr_u0_overflow_mask; + ZXIC_UINT32 ese_rsp_ram_free_ptr_u0_underflow_mask; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u0_overflow_mask; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u0_underflow_mask; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u1_overflow_mask; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u1_underflow_mask; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u2_overflow_mask; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u2_underflow_mask; +} DPP_PPU_CLUSTER_INT_1200M_CLUSTER_MEX_FIFO_MASK_H_T; + +typedef struct dpp_ppu_cluster_int_1200m_cluster_mex_fifo_flag_l_t { + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u3_overflow_flag; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u3_underflow_flag; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u4_overflow_flag; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u4_underflow_flag; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u5_overflow_flag; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u5_underflow_flag; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u6_overflow_flag; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u6_underflow_flag; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u7_overflow_flag; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u7_underflow_flag; + ZXIC_UINT32 ppu_ise_rsp_afifo_64x143_wrapper_u0_underflow_flag; + ZXIC_UINT32 ise_rsp_ram_free_ptr_u0_overflow_flag; + ZXIC_UINT32 ise_rsp_ram_free_ptr_u0_underflow_flag; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u0_overflow_flag; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u0_underflow_flag; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u1_overflow_flag; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u1_underflow_flag; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u2_overflow_flag; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u2_underflow_flag; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u3_overflow_flag; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u3_underflow_flag; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u4_overflow_flag; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u4_underflow_flag; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u5_overflow_flag; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u5_underflow_flag; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u6_overflow_flag; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u6_underflow_flag; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u7_overflow_flag; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u7_underflow_flag; + ZXIC_UINT32 ppu_sta_rsp_afifo_64x79_wrapper_underflow_flag; + ZXIC_UINT32 ppu_sta_rsp_fwft_fifo_128x79_wrapper_overflow_flag; + ZXIC_UINT32 ppu_sta_rsp_fwft_fifo_128x79_wrapper_underflow_flag; +} DPP_PPU_CLUSTER_INT_1200M_CLUSTER_MEX_FIFO_FLAG_L_T; + +typedef struct dpp_ppu_cluster_int_1200m_cluster_mex_fifo_flag_h_t { + ZXIC_UINT32 ppu_se_key_afifo_32x54_wrapper_overflow_flag; + ZXIC_UINT32 ppu_se_key_afifo_32x665_wrapper_overflow_flag; + ZXIC_UINT32 ppu_sta_key_afifo_32x110_wrapper_overflow_flag; + ZXIC_UINT32 ppu_cluster_mf_in_afifo_32x2048_wrapper_underflow_flag; + ZXIC_UINT32 ppu_pbu_mcode_pf_rsp_fifo_32x13_wrapper_overflow_flag; + ZXIC_UINT32 ppu_pbu_mcode_pf_rsp_fifo_32x13_wrapper_underflow_flag; + ZXIC_UINT32 ppu_coprocess_rsp_fifo_32x77_wrapper_overflow_flag; + ZXIC_UINT32 ppu_coprocess_rsp_fifo_32x77_wrapper_underflow_flag; + ZXIC_UINT32 ppu_coprocess_rsp_fwft_fifo_128x78_wrapper_overflow_flag; + ZXIC_UINT32 ppu_coprocess_rsp_fwft_fifo_128x78_wrapper_underflow_flag; + ZXIC_UINT32 ppu_ese_rsp_afifo_64x271_wrapper_u0_underflow_flag; + ZXIC_UINT32 ese_rsp_ram_free_ptr_u0_overflow_flag; + ZXIC_UINT32 ese_rsp_ram_free_ptr_u0_underflow_flag; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u0_overflow_flag; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u0_underflow_flag; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u1_overflow_flag; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u1_underflow_flag; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u2_overflow_flag; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u2_underflow_flag; +} DPP_PPU_CLUSTER_INT_1200M_CLUSTER_MEX_FIFO_FLAG_H_T; + +typedef struct dpp_ppu_cluster_int_1200m_cluster_mex_fifo_stat_l_t { + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u3_overflow_stat; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u3_underflow_stat; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u4_overflow_stat; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u4_underflow_stat; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u5_overflow_stat; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u5_underflow_stat; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u6_overflow_stat; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u6_underflow_stat; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u7_overflow_stat; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u7_underflow_stat; + ZXIC_UINT32 ppu_ise_rsp_afifo_64x143_wrapper_u0_underflow_stat; + ZXIC_UINT32 ise_rsp_ram_free_ptr_u0_overflow_stat; + ZXIC_UINT32 ise_rsp_ram_free_ptr_u0_underflow_stat; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u0_overflow_stat; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u0_underflow_stat; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u1_overflow_stat; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u1_underflow_stat; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u2_overflow_stat; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u2_underflow_stat; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u3_overflow_stat; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u3_underflow_stat; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u4_overflow_stat; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u4_underflow_stat; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u5_overflow_stat; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u5_underflow_stat; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u6_overflow_stat; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u6_underflow_stat; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u7_overflow_stat; + ZXIC_UINT32 ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u7_underflow_stat; + ZXIC_UINT32 ppu_sta_rsp_afifo_64x79_wrapper_underflow_stat; + ZXIC_UINT32 ppu_sta_rsp_fwft_fifo_128x79_wrapper_overflow_stat; + ZXIC_UINT32 ppu_sta_rsp_fwft_fifo_128x79_wrapper_underflow_stat; +} DPP_PPU_CLUSTER_INT_1200M_CLUSTER_MEX_FIFO_STAT_L_T; + +typedef struct dpp_ppu_cluster_int_1200m_cluster_mex_fifo_stat_h_t { + ZXIC_UINT32 ppu_se_key_afifo_32x54_wrapper_overflow_stat; + ZXIC_UINT32 ppu_se_key_afifo_32x665_wrapper_overflow_stat; + ZXIC_UINT32 ppu_sta_key_afifo_32x110_wrapper_overflow_stat; + ZXIC_UINT32 ppu_cluster_mf_in_afifo_32x2048_wrapper_underflow_stat; + ZXIC_UINT32 ppu_pbu_mcode_pf_rsp_fifo_32x13_wrapper_overflow_stat; + ZXIC_UINT32 ppu_pbu_mcode_pf_rsp_fifo_32x13_wrapper_underflow_stat; + ZXIC_UINT32 ppu_coprocess_rsp_fifo_32x77_wrapper_overflow_stat; + ZXIC_UINT32 ppu_coprocess_rsp_fifo_32x77_wrapper_underflow_stat; + ZXIC_UINT32 ppu_coprocess_rsp_fwft_fifo_128x78_wrapper_overflow_stat; + ZXIC_UINT32 ppu_coprocess_rsp_fwft_fifo_128x78_wrapper_underflow_stat; + ZXIC_UINT32 ppu_ese_rsp_afifo_64x271_wrapper_u0_underflow_stat; + ZXIC_UINT32 ese_rsp_ram_free_ptr_u0_overflow_stat; + ZXIC_UINT32 ese_rsp_ram_free_ptr_u0_underflow_stat; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u0_overflow_stat; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u0_underflow_stat; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u1_overflow_stat; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u1_underflow_stat; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u2_overflow_stat; + ZXIC_UINT32 ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u2_underflow_stat; +} DPP_PPU_CLUSTER_INT_1200M_CLUSTER_MEX_FIFO_STAT_H_T; + +typedef struct dpp_ppu_cluster_ppu_statics_wb_exception_cfg_t { + ZXIC_UINT32 csr_statics_wb_exception_code5; + ZXIC_UINT32 csr_statics_wb_exception_code4; + ZXIC_UINT32 csr_statics_wb_exception_code3; + ZXIC_UINT32 csr_statics_wb_exception_code2; + ZXIC_UINT32 csr_statics_wb_exception_code1; + ZXIC_UINT32 csr_statics_wb_exception_code0; +} DPP_PPU_CLUSTER_PPU_STATICS_WB_EXCEPTION_CFG_T; + +typedef struct dpp_ppu_cluster_thread_switch_en_t { + ZXIC_UINT32 thread_switch_en; +} DPP_PPU_CLUSTER_THREAD_SWITCH_EN_T; + +typedef struct dpp_ppu_cluster_is_me_not_idle_t { + ZXIC_UINT32 me7_is_not_idle; + ZXIC_UINT32 me6_is_not_idle; + ZXIC_UINT32 me5_is_not_idle; + ZXIC_UINT32 me4_is_not_idle; + ZXIC_UINT32 me3_is_not_idle; + ZXIC_UINT32 me2_is_not_idle; + ZXIC_UINT32 me1_is_not_idle; + ZXIC_UINT32 me0_is_not_idle; +} DPP_PPU_CLUSTER_IS_ME_NOT_IDLE_T; + +typedef struct dpp_ppu_cluster_ppu_cluster_mf_in_afifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 ppu_cluster_mf_in_afifo_prog_empty_assert_cfg; +} DPP_PPU_CLUSTER_PPU_CLUSTER_MF_IN_AFIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_cluster_mf_in_afifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 ppu_cluster_mf_in_afifo_prog_empty_negate_cfg; +} DPP_PPU_CLUSTER_PPU_CLUSTER_MF_IN_AFIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_cluster_ese_rsp_afifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 ese_rsp_afifo_prog_empty_assert_cfg; +} DPP_PPU_CLUSTER_ESE_RSP_AFIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_cluster_ese_rsp_afifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 ese_rsp_afifo_prog_empty_negate_cfg; +} DPP_PPU_CLUSTER_ESE_RSP_AFIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_cluster_ise_rsp_afifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 ise_rsp_afifo_prog_empty_assert_cfg; +} DPP_PPU_CLUSTER_ISE_RSP_AFIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_cluster_ise_rsp_afifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 ise_rsp_afifo_prog_empty_negate_cfg; +} DPP_PPU_CLUSTER_ISE_RSP_AFIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_rsp_ptr_fwft_fifo0_prog_full_assert_cfg_t { + ZXIC_UINT32 ppu_rsp_ptr_fwft_fifo0_prog_full_assert_cfg; +} DPP_PPU_CLUSTER_PPU_RSP_PTR_FWFT_FIFO0_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_rsp_ptr_fwft_fifo0_prog_full_negate_cfg_t { + ZXIC_UINT32 ppu_rsp_ptr_fwft_fifo0_prog_full_negate_cfg; +} DPP_PPU_CLUSTER_PPU_RSP_PTR_FWFT_FIFO0_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_rsp_ptr_fwft_fifo0_prog_empty_assert_cfg_t { + ZXIC_UINT32 ppu_rsp_ptr_fwft_fifo0_prog_empty_assert_cfg; +} DPP_PPU_CLUSTER_PPU_RSP_PTR_FWFT_FIFO0_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_rsp_ptr_fwft_fifo0_prog_empty_negate_cfg_t { + ZXIC_UINT32 ppu_rsp_ptr_fwft_fifo0_prog_empty_negate_cfg; +} DPP_PPU_CLUSTER_PPU_RSP_PTR_FWFT_FIFO0_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_rsp_ptr_fwft_fifo1_prog_full_assert_cfg_t { + ZXIC_UINT32 ppu_rsp_ptr_fwft_fifo1_prog_full_assert_cfg; +} DPP_PPU_CLUSTER_PPU_RSP_PTR_FWFT_FIFO1_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_rsp_ptr_fwft_fifo1_prog_full_negate_cfg_t { + ZXIC_UINT32 ppu_rsp_ptr_fwft_fifo1_prog_full_negate_cfg; +} DPP_PPU_CLUSTER_PPU_RSP_PTR_FWFT_FIFO1_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_rsp_ptr_fwft_fifo1_prog_empty_assert_cfg_t { + ZXIC_UINT32 ppu_rsp_ptr_fwft_fifo1_prog_empty_assert_cfg; +} DPP_PPU_CLUSTER_PPU_RSP_PTR_FWFT_FIFO1_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_rsp_ptr_fwft_fifo1_prog_empty_negate_cfg_t { + ZXIC_UINT32 ppu_rsp_ptr_fwft_fifo1_prog_empty_negate_cfg; +} DPP_PPU_CLUSTER_PPU_RSP_PTR_FWFT_FIFO1_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_cluster_sta_rsp_afifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 sta_rsp_afifo_prog_empty_assert_cfg; +} DPP_PPU_CLUSTER_STA_RSP_AFIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_cluster_sta_rsp_afifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 sta_rsp_afifo_prog_empty_negate_cfg; +} DPP_PPU_CLUSTER_STA_RSP_AFIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_sta_rsp_fwft_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 ppu_sta_rsp_fwft_fifo_prog_full_assert_cfg; +} DPP_PPU_CLUSTER_PPU_STA_RSP_FWFT_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_sta_rsp_fwft_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 ppu_sta_rsp_fwft_fifo_prog_full_negate_cfg; +} DPP_PPU_CLUSTER_PPU_STA_RSP_FWFT_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_sta_rsp_fwft_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 ppu_sta_rsp_fwft_fifo_prog_empty_assert_cfg; +} DPP_PPU_CLUSTER_PPU_STA_RSP_FWFT_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_sta_rsp_fwft_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 ppu_sta_rsp_fwft_fifo_prog_empty_negate_cfg; +} DPP_PPU_CLUSTER_PPU_STA_RSP_FWFT_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_cluster_cop_rsp_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 cop_rsp_fifo_prog_full_assert_cfg; +} DPP_PPU_CLUSTER_COP_RSP_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_cluster_cop_rsp_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 cop_rsp_fifo_prog_full_negate_cfg; +} DPP_PPU_CLUSTER_COP_RSP_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_cluster_cop_rsp_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 cop_rsp_fifo_prog_empty_assert_cfg; +} DPP_PPU_CLUSTER_COP_RSP_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_cluster_cop_rsp_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 cop_rsp_fifo_prog_empty_negate_cfg; +} DPP_PPU_CLUSTER_COP_RSP_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_cluster_mcode_pf_rsp_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 mcode_pf_rsp_fifo_prog_full_assert_cfg; +} DPP_PPU_CLUSTER_MCODE_PF_RSP_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_cluster_mcode_pf_rsp_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 mcode_pf_rsp_fifo_prog_full_negate_cfg; +} DPP_PPU_CLUSTER_MCODE_PF_RSP_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_cluster_mcode_pf_rsp_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 mcode_pf_rsp_fifo_prog_empty_assert_cfg; +} DPP_PPU_CLUSTER_MCODE_PF_RSP_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_cluster_mcode_pf_rsp_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 mcode_pf_rsp_fifo_prog_empty_negate_cfg; +} DPP_PPU_CLUSTER_MCODE_PF_RSP_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_cop_rsp_fwft_fifo_prog_full_assert_cfg_t { + ZXIC_UINT32 ppu_cop_rsp_fwft_fifo_prog_full_assert_cfg; +} DPP_PPU_CLUSTER_PPU_COP_RSP_FWFT_FIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_cop_rsp_fwft_fifo_prog_full_negate_cfg_t { + ZXIC_UINT32 ppu_cop_rsp_fwft_fifo_prog_full_negate_cfg; +} DPP_PPU_CLUSTER_PPU_COP_RSP_FWFT_FIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_cop_rsp_fwft_fifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 ppu_cop_rsp_fwft_fifo_prog_empty_assert_cfg; +} DPP_PPU_CLUSTER_PPU_COP_RSP_FWFT_FIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_cop_rsp_fwft_fifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 ppu_cop_rsp_fwft_fifo_prog_empty_negate_cfg; +} DPP_PPU_CLUSTER_PPU_COP_RSP_FWFT_FIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_ise_key_afifo_prog_full_assert_cfg_t { + ZXIC_UINT32 ppu_ise_key_afifo_prog_full_assert_cfg; +} DPP_PPU_CLUSTER_PPU_ISE_KEY_AFIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_ise_key_afifo_prog_full_negate_cfg_t { + ZXIC_UINT32 ppu_ise_key_afifo_prog_full_negate_cfg; +} DPP_PPU_CLUSTER_PPU_ISE_KEY_AFIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_ese_key_afifo_prog_full_assert_cfg_t { + ZXIC_UINT32 ppu_ese_key_afifo_prog_full_assert_cfg; +} DPP_PPU_CLUSTER_PPU_ESE_KEY_AFIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_ese_key_afifo_prog_full_negate_cfg_t { + ZXIC_UINT32 ppu_ese_key_afifo_prog_full_negate_cfg; +} DPP_PPU_CLUSTER_PPU_ESE_KEY_AFIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_sta_key_afifo_prog_full_assert_cfg_t { + ZXIC_UINT32 ppu_sta_key_afifo_prog_full_assert_cfg; +} DPP_PPU_CLUSTER_PPU_STA_KEY_AFIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_sta_key_afifo_prog_full_negate_cfg_t { + ZXIC_UINT32 ppu_sta_key_afifo_prog_full_negate_cfg; +} DPP_PPU_CLUSTER_PPU_STA_KEY_AFIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_cluster_int_600m_cluster_mex_fifo_mask_t { + ZXIC_UINT32 ppu_se_key_afifo_32x54_wrapper_underflow_mask; + ZXIC_UINT32 ppu_se_key_afifo_32x665_wrapper_underflow_mask; + ZXIC_UINT32 ppu_sta_key_afifo_32x110_wrapper_underflow_mask; + ZXIC_UINT32 ppu_cluster_mf_in_afifo_32x2048_wrapper_overflow_mask; + ZXIC_UINT32 ppu_ese_rsp_afifo_64x271_wrapper_u0_overflow_mask; + ZXIC_UINT32 ppu_ise_rsp_afifo_64x143_wrapper_u0_overflow_mask; + ZXIC_UINT32 ppu_sta_rsp_afifo_64x79_wrapper_overflow_mask; +} DPP_PPU_CLUSTER_INT_600M_CLUSTER_MEX_FIFO_MASK_T; + +typedef struct dpp_ppu_cluster_cluster_mex_fifo_600m_interrupt_flag_t { + ZXIC_UINT32 ppu_se_key_afifo_32x54_wrapper_underflow_flag; + ZXIC_UINT32 ppu_se_key_afifo_32x665_wrapper_underflow_flag; + ZXIC_UINT32 ppu_sta_key_afifo_32x110_wrapper_underflow_flag; + ZXIC_UINT32 ppu_cluster_mf_in_afifo_32x2048_wrapper_overflow_flag; + ZXIC_UINT32 ppu_ese_rsp_afifo_64x271_wrapper_u0_overflow_flag; + ZXIC_UINT32 ppu_ise_rsp_afifo_64x143_wrapper_u0_overflow_flag; + ZXIC_UINT32 ppu_sta_rsp_afifo_64x79_wrapper_overflow_flag; +} DPP_PPU_CLUSTER_CLUSTER_MEX_FIFO_600M_INTERRUPT_FLAG_T; + +typedef struct dpp_ppu_cluster_cluster_mex_fifo_600m_interrupt_sta_t { + ZXIC_UINT32 ppu_se_key_afifo_32x54_wrapper_underflow_sta; + ZXIC_UINT32 ppu_se_key_afifo_32x665_wrapper_underflow_sta; + ZXIC_UINT32 ppu_sta_key_afifo_32x110_wrapper_underflow_sta; + ZXIC_UINT32 ppu_cluster_mf_in_afifo_32x2048_wrapper_overflow_sta; + ZXIC_UINT32 ppu_ese_rsp_afifo_64x271_wrapper_u0_overflow_sta; + ZXIC_UINT32 ppu_ise_rsp_afifo_64x143_wrapper_u0_overflow_sta; + ZXIC_UINT32 ppu_sta_rsp_afifo_64x79_wrapper_overflow_sta; +} DPP_PPU_CLUSTER_CLUSTER_MEX_FIFO_600M_INTERRUPT_STA_T; + +typedef struct dpp_ppu_cluster_mex_cnt_cfg_t { + ZXIC_UINT32 csr_count_overflow_mode; + ZXIC_UINT32 csr_count_rd_mode; +} DPP_PPU_CLUSTER_MEX_CNT_CFG_T; + +typedef struct dpp_ppu_cluster_int_600m_cluster_mex_ram_ecc_error_interrupt_mask_t { + ZXIC_UINT32 ppu_sta_key_ram_1r1w_32x110_ecc_double_err_mask; + ZXIC_UINT32 ppu_se_key_afifo_32x665_ecc_double_err_mask; + ZXIC_UINT32 ppu_se_key_afifo_32x54_ecc_double_err_mask; + ZXIC_UINT32 ppu_sta_key_ram_1r1w_32x110_ecc_single_err_flag; + ZXIC_UINT32 ppu_se_key_afifo_32x665_ecc_single_err_mask; + ZXIC_UINT32 ppu_se_key_afifo_32x54_ecc_single_err_mask; +} DPP_PPU_CLUSTER_INT_600M_CLUSTER_MEX_RAM_ECC_ERROR_INTERRUPT_MASK_T; + +typedef struct dpp_ppu_cluster_cluster_mex_ram_600m_ecc_error_interrupt_flag_t { + ZXIC_UINT32 ppu_sta_key_ram_1r1w_32x110_ecc_double_err_flag; + ZXIC_UINT32 ppu_se_key_afifo_32x665_ecc_double_err_flag; + ZXIC_UINT32 ppu_se_key_afifo_32x54_ecc_double_err_flag; + ZXIC_UINT32 ppu_sta_key_ram_1r1w_32x110_ecc_single_err_flag; + ZXIC_UINT32 ppu_se_key_afifo_32x665_ecc_single_err_flag; + ZXIC_UINT32 ppu_se_key_afifo_32x54_ecc_single_err_flag; +} DPP_PPU_CLUSTER_CLUSTER_MEX_RAM_600M_ECC_ERROR_INTERRUPT_FLAG_T; + +typedef struct dpp_ppu_cluster_cluster_mex_ram_600m_ecc_error_interrupt_sta_t { + ZXIC_UINT32 ppu_sta_key_ram_1r1w_32x110_ecc_double_err_stat; + ZXIC_UINT32 ppu_se_key_afifo_32x665_ecc_double_err_stat; + ZXIC_UINT32 ppu_se_key_afifo_32x54_ecc_double_err_stat; + ZXIC_UINT32 ppu_sta_key_ram_1r1w_32x110_ecc_single_err_stat; + ZXIC_UINT32 ppu_se_key_afifo_32x665_ecc_single_err_stat; + ZXIC_UINT32 ppu_se_key_afifo_32x54_ecc_single_err_stat; +} DPP_PPU_CLUSTER_CLUSTER_MEX_RAM_600M_ECC_ERROR_INTERRUPT_STA_T; + +typedef struct dpp_ppu_cluster_ppu_cluster_mf_in_afifo_prog_full_assert_cfg_t { + ZXIC_UINT32 ppu_cluster_mf_in_afifo_prog_full_assert_cfg; +} DPP_PPU_CLUSTER_PPU_CLUSTER_MF_IN_AFIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_cluster_mf_in_afifo_prog_full_negate_cfg_t { + ZXIC_UINT32 ppu_cluster_mf_in_afifo_prog_full_negate_cfg; +} DPP_PPU_CLUSTER_PPU_CLUSTER_MF_IN_AFIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_cluster_ese_rsp_afifo_prog_full_assert_cfg_t { + ZXIC_UINT32 ese_rsp_afifo_prog_full_assert_cfg; +} DPP_PPU_CLUSTER_ESE_RSP_AFIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_cluster_ese_rsp_afifo_prog_full_negate_cfg_t { + ZXIC_UINT32 ese_rsp_afifo_prog_full_negate_cfg; +} DPP_PPU_CLUSTER_ESE_RSP_AFIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_cluster_ise_rsp_afifo_prog_full_assert_cfg_t { + ZXIC_UINT32 ise_rsp_afifo_prog_full_assert_cfg; +} DPP_PPU_CLUSTER_ISE_RSP_AFIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_cluster_ise_rsp_afifo_prog_full_negate_cfg_t { + ZXIC_UINT32 ise_rsp_afifo_prog_full_negate_cfg; +} DPP_PPU_CLUSTER_ISE_RSP_AFIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_cluster_sta_rsp_afifo_prog_full_assert_cfg_t { + ZXIC_UINT32 sta_rsp_afifo_prog_full_assert_cfg; +} DPP_PPU_CLUSTER_STA_RSP_AFIFO_PROG_FULL_ASSERT_CFG_T; + +typedef struct dpp_ppu_cluster_sta_rsp_afifo_prog_full_negate_cfg_t { + ZXIC_UINT32 sta_rsp_afifo_prog_full_negate_cfg; +} DPP_PPU_CLUSTER_STA_RSP_AFIFO_PROG_FULL_NEGATE_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_ise_key_afifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 ppu_ise_key_afifo_prog_empty_assert_cfg; +} DPP_PPU_CLUSTER_PPU_ISE_KEY_AFIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_ise_key_afifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 ppu_ise_key_afifo_prog_empty_negate_cfg; +} DPP_PPU_CLUSTER_PPU_ISE_KEY_AFIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_ese_key_afifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 ppu_ese_key_afifo_prog_empty_assert_cfg; +} DPP_PPU_CLUSTER_PPU_ESE_KEY_AFIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_ese_key_afifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 ppu_ese_key_afifo_prog_empty_negate_cfg; +} DPP_PPU_CLUSTER_PPU_ESE_KEY_AFIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_sta_key_afifo_prog_empty_assert_cfg_t { + ZXIC_UINT32 ppu_sta_key_afifo_prog_empty_assert_cfg; +} DPP_PPU_CLUSTER_PPU_STA_KEY_AFIFO_PROG_EMPTY_ASSERT_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_sta_key_afifo_prog_empty_negate_cfg_t { + ZXIC_UINT32 ppu_sta_key_afifo_prog_empty_negate_cfg; +} DPP_PPU_CLUSTER_PPU_STA_KEY_AFIFO_PROG_EMPTY_NEGATE_CFG_T; + +typedef struct dpp_ppu_cluster_ppu_cluster_mf_vld_cnt_h_t { + ZXIC_UINT32 ppu_cluster_mf_vld_cnt_h; +} DPP_PPU_CLUSTER_PPU_CLUSTER_MF_VLD_CNT_H_T; + +typedef struct dpp_ppu_cluster_ppu_cluster_mf_vld_cnt_l_t { + ZXIC_UINT32 ppu_cluster_mf_vld_cnt_l; +} DPP_PPU_CLUSTER_PPU_CLUSTER_MF_VLD_CNT_L_T; + +typedef struct dpp_ppu_cluster_cluster_ise_key_out_vld_cnt_t { + ZXIC_UINT32 cluster_ise_key_out_vld_cnt; +} DPP_PPU_CLUSTER_CLUSTER_ISE_KEY_OUT_VLD_CNT_T; + +typedef struct dpp_ppu_cluster_ise_cluster_rsp_in_vld_cnt_t { + ZXIC_UINT32 ise_cluster_rsp_in_vld_cnt; +} DPP_PPU_CLUSTER_ISE_CLUSTER_RSP_IN_VLD_CNT_T; + +typedef struct dpp_ppu_cluster_cluster_ese_key_out_vld_cnt_t { + ZXIC_UINT32 cluster_ese_key_out_vld_cnt; +} DPP_PPU_CLUSTER_CLUSTER_ESE_KEY_OUT_VLD_CNT_T; + +typedef struct dpp_ppu_cluster_ese_cluster_rsp_in_vld_cnt_t { + ZXIC_UINT32 ese_cluster_rsp_in_vld_cnt; +} DPP_PPU_CLUSTER_ESE_CLUSTER_RSP_IN_VLD_CNT_T; + +typedef struct dpp_ppu_cluster_cluster_stat_cmd_vld_cnt_t { + ZXIC_UINT32 cluster_stat_cmd_vld_cnt; +} DPP_PPU_CLUSTER_CLUSTER_STAT_CMD_VLD_CNT_T; + +typedef struct dpp_ppu_cluster_stat_cluster_rsp_vld_cnt_t { + ZXIC_UINT32 stat_cluster_rsp_vld_cnt; +} DPP_PPU_CLUSTER_STAT_CLUSTER_RSP_VLD_CNT_T; + +typedef struct dpp_ppu_cluster_mex_debug_key_vld_cnt_t { + ZXIC_UINT32 mex_debug_key_vld_cnt; +} DPP_PPU_CLUSTER_MEX_DEBUG_KEY_VLD_CNT_T; + +typedef struct dpp_ppu_cluster_ise_cluster_key_fc_cnt_t { + ZXIC_UINT32 ise_cluster_key_fc_cnt; +} DPP_PPU_CLUSTER_ISE_CLUSTER_KEY_FC_CNT_T; + +typedef struct dpp_ppu_cluster_ese_cluster_key_fc_cnt_t { + ZXIC_UINT32 ese_cluster_key_fc_cnt; +} DPP_PPU_CLUSTER_ESE_CLUSTER_KEY_FC_CNT_T; + +typedef struct dpp_ppu_cluster_cluster_ise_rsp_fc_cnt_t { + ZXIC_UINT32 cluster_ise_rsp_fc_cnt; +} DPP_PPU_CLUSTER_CLUSTER_ISE_RSP_FC_CNT_T; + +typedef struct dpp_ppu_cluster_cluster_ese_rsp_fc_cnt_t { + ZXIC_UINT32 cluster_ese_rsp_fc_cnt; +} DPP_PPU_CLUSTER_CLUSTER_ESE_RSP_FC_CNT_T; + +typedef struct dpp_ppu_cluster_stat_cluster_cmd_fc_cnt_t { + ZXIC_UINT32 stat_cluster_cmd_fc_cnt; +} DPP_PPU_CLUSTER_STAT_CLUSTER_CMD_FC_CNT_T; + +typedef struct dpp_ppu_cluster_cluster_stat_rsp_fc_cnt_t { + ZXIC_UINT32 cluster_stat_rsp_fc_cnt; +} DPP_PPU_CLUSTER_CLUSTER_STAT_RSP_FC_CNT_T; + +typedef struct dpp_ppu_cluster_cluster_ppu_mf_vld_cnt_l_t { + ZXIC_UINT32 cluster_ppu_mf_vld_cnt_l; +} DPP_PPU_CLUSTER_CLUSTER_PPU_MF_VLD_CNT_L_T; + +typedef struct dpp_ppu_cluster_cluster_ppu_mf_vld_cnt_h_t { + ZXIC_UINT32 cluster_ppu_mf_vld_cnt_h; +} DPP_PPU_CLUSTER_CLUSTER_PPU_MF_VLD_CNT_H_T; + +typedef struct dpp_ppu_cluster_cluster_cop_key_vld_cnt_l_t { + ZXIC_UINT32 cluster_cop_key_vld_cnt_l; +} DPP_PPU_CLUSTER_CLUSTER_COP_KEY_VLD_CNT_L_T; + +typedef struct dpp_ppu_cluster_cluster_cop_key_vld_cnt_h_t { + ZXIC_UINT32 cluster_cop_key_vld_cnt_h; +} DPP_PPU_CLUSTER_CLUSTER_COP_KEY_VLD_CNT_H_T; + +typedef struct dpp_ppu_cluster_cop_cluster_rsp_vld_cnt_l_t { + ZXIC_UINT32 cop_cluster_rsp_vld_cnt_l; +} DPP_PPU_CLUSTER_COP_CLUSTER_RSP_VLD_CNT_L_T; + +typedef struct dpp_ppu_cluster_cop_cluster_rsp_vld_cnt_h_t { + ZXIC_UINT32 cop_cluster_rsp_vld_cnt_h; +} DPP_PPU_CLUSTER_COP_CLUSTER_RSP_VLD_CNT_H_T; + +typedef struct dpp_ppu_cluster_mex_me_pkt_in_sop_cnt_l_t { + ZXIC_UINT32 mex_me_pkt_in_sop_cnt_l; +} DPP_PPU_CLUSTER_MEX_ME_PKT_IN_SOP_CNT_L_T; + +typedef struct dpp_ppu_cluster_mex_me_pkt_in_sop_cnt_h_t { + ZXIC_UINT32 mex_me_pkt_in_sop_cnt_h; +} DPP_PPU_CLUSTER_MEX_ME_PKT_IN_SOP_CNT_H_T; + +typedef struct dpp_ppu_cluster_mex_me_pkt_in_eop_cnt_l_t { + ZXIC_UINT32 mex_me_pkt_in_eop_cnt_l; +} DPP_PPU_CLUSTER_MEX_ME_PKT_IN_EOP_CNT_L_T; + +typedef struct dpp_ppu_cluster_mex_me_pkt_in_eop_cnt_h_t { + ZXIC_UINT32 mex_me_pkt_in_eop_cnt_h; +} DPP_PPU_CLUSTER_MEX_ME_PKT_IN_EOP_CNT_H_T; + +typedef struct dpp_ppu_cluster_mex_me_pkt_in_vld_cnt_l_t { + ZXIC_UINT32 mex_me_pkt_in_vld_cnt_l; +} DPP_PPU_CLUSTER_MEX_ME_PKT_IN_VLD_CNT_L_T; + +typedef struct dpp_ppu_cluster_mex_me_pkt_in_vld_cnt_h_t { + ZXIC_UINT32 mex_me_pkt_in_vld_cnt_h; +} DPP_PPU_CLUSTER_MEX_ME_PKT_IN_VLD_CNT_H_T; + +typedef struct dpp_ppu_cluster_me_mex_pkt_out_sop_cnt_l_t { + ZXIC_UINT32 me_mex_pkt_out_sop_cnt_l; +} DPP_PPU_CLUSTER_ME_MEX_PKT_OUT_SOP_CNT_L_T; + +typedef struct dpp_ppu_cluster_me_mex_pkt_out_sop_cnt_h_t { + ZXIC_UINT32 me_mex_pkt_out_sop_cnt_h; +} DPP_PPU_CLUSTER_ME_MEX_PKT_OUT_SOP_CNT_H_T; + +typedef struct dpp_ppu_cluster_me_mex_pkt_out_eop_cnt_l_t { + ZXIC_UINT32 me_mex_pkt_out_eop_cnt_l; +} DPP_PPU_CLUSTER_ME_MEX_PKT_OUT_EOP_CNT_L_T; + +typedef struct dpp_ppu_cluster_me_mex_pkt_out_eop_cnt_h_t { + ZXIC_UINT32 me_mex_pkt_out_eop_cnt_h; +} DPP_PPU_CLUSTER_ME_MEX_PKT_OUT_EOP_CNT_H_T; + +typedef struct dpp_ppu_cluster_me_mex_pkt_out_vld_cnt_l_t { + ZXIC_UINT32 me_mex_pkt_out_vld_cnt_l; +} DPP_PPU_CLUSTER_ME_MEX_PKT_OUT_VLD_CNT_L_T; + +typedef struct dpp_ppu_cluster_me_mex_pkt_out_vld_cnt_h_t { + ZXIC_UINT32 me_mex_pkt_out_vld_cnt_h; +} DPP_PPU_CLUSTER_ME_MEX_PKT_OUT_VLD_CNT_H_T; + +typedef struct dpp_ppu_cluster_me_mex_i_key_out_sop_cnt_l_t { + ZXIC_UINT32 me_mex_i_key_out_sop_cnt_l; +} DPP_PPU_CLUSTER_ME_MEX_I_KEY_OUT_SOP_CNT_L_T; + +typedef struct dpp_ppu_cluster_me_mex_i_key_out_sop_cnt_h_t { + ZXIC_UINT32 me_mex_i_key_out_sop_cnt_h; +} DPP_PPU_CLUSTER_ME_MEX_I_KEY_OUT_SOP_CNT_H_T; + +typedef struct dpp_ppu_cluster_me_mex_i_key_out_eop_cnt_l_t { + ZXIC_UINT32 me_mex_i_key_out_eop_cnt_l; +} DPP_PPU_CLUSTER_ME_MEX_I_KEY_OUT_EOP_CNT_L_T; + +typedef struct dpp_ppu_cluster_me_mex_i_key_out_eop_cnt_h_t { + ZXIC_UINT32 me_mex_i_key_out_eop_cnt_h; +} DPP_PPU_CLUSTER_ME_MEX_I_KEY_OUT_EOP_CNT_H_T; + +typedef struct dpp_ppu_cluster_me_mex_i_key_out_vld_cnt_l_t { + ZXIC_UINT32 me_mex_i_key_out_vld_cnt_l; +} DPP_PPU_CLUSTER_ME_MEX_I_KEY_OUT_VLD_CNT_L_T; + +typedef struct dpp_ppu_cluster_me_mex_i_key_out_vld_cnt_h_t { + ZXIC_UINT32 me_mex_i_key_out_vld_cnt_h; +} DPP_PPU_CLUSTER_ME_MEX_I_KEY_OUT_VLD_CNT_H_T; + +typedef struct dpp_ppu_cluster_me_mex_e_key_out_sop_cnt_l_t { + ZXIC_UINT32 me_mex_e_key_out_sop_cnt_l; +} DPP_PPU_CLUSTER_ME_MEX_E_KEY_OUT_SOP_CNT_L_T; + +typedef struct dpp_ppu_cluster_me_mex_e_key_out_sop_cnt_h_t { + ZXIC_UINT32 me_mex_e_key_out_sop_cnt_h; +} DPP_PPU_CLUSTER_ME_MEX_E_KEY_OUT_SOP_CNT_H_T; + +typedef struct dpp_ppu_cluster_me_mex_e_key_out_eop_cnt_l_t { + ZXIC_UINT32 me_mex_e_key_out_eop_cnt_l; +} DPP_PPU_CLUSTER_ME_MEX_E_KEY_OUT_EOP_CNT_L_T; + +typedef struct dpp_ppu_cluster_me_mex_e_key_out_eop_cnt_h_t { + ZXIC_UINT32 me_mex_e_key_out_eop_cnt_h; +} DPP_PPU_CLUSTER_ME_MEX_E_KEY_OUT_EOP_CNT_H_T; + +typedef struct dpp_ppu_cluster_me_mex_e_key_out_vld_cnt_l_t { + ZXIC_UINT32 me_mex_e_key_out_vld_cnt_l; +} DPP_PPU_CLUSTER_ME_MEX_E_KEY_OUT_VLD_CNT_L_T; + +typedef struct dpp_ppu_cluster_me_mex_e_key_out_vld_cnt_h_t { + ZXIC_UINT32 me_mex_e_key_out_vld_cnt_h; +} DPP_PPU_CLUSTER_ME_MEX_E_KEY_OUT_VLD_CNT_H_T; + +typedef struct dpp_ppu_cluster_me_mex_demux_ise_key_vld_cnt_l_t { + ZXIC_UINT32 me_mex_demux_ise_key_vld_cnt_l; +} DPP_PPU_CLUSTER_ME_MEX_DEMUX_ISE_KEY_VLD_CNT_L_T; + +typedef struct dpp_ppu_cluster_me_mex_demux_ise_key_vld_cnt_h_t { + ZXIC_UINT32 me_mex_demux_ise_key_vld_cnt_h; +} DPP_PPU_CLUSTER_ME_MEX_DEMUX_ISE_KEY_VLD_CNT_H_T; + +typedef struct dpp_ppu_cluster_me_mex_demux_ese_key_vld_cnt_l_t { + ZXIC_UINT32 me_mex_demux_ese_key_vld_cnt_l; +} DPP_PPU_CLUSTER_ME_MEX_DEMUX_ESE_KEY_VLD_CNT_L_T; + +typedef struct dpp_ppu_cluster_me_mex_demux_ese_key_vld_cnt_h_t { + ZXIC_UINT32 me_mex_demux_ese_key_vld_cnt_h; +} DPP_PPU_CLUSTER_ME_MEX_DEMUX_ESE_KEY_VLD_CNT_H_T; + +typedef struct dpp_ppu_cluster_me_mex_demux_sta_key_vld_cnt_l_t { + ZXIC_UINT32 me_mex_demux_sta_key_vld_cnt_l; +} DPP_PPU_CLUSTER_ME_MEX_DEMUX_STA_KEY_VLD_CNT_L_T; + +typedef struct dpp_ppu_cluster_me_mex_demux_sta_key_vld_cnt_h_t { + ZXIC_UINT32 me_mex_demux_sta_key_vld_cnt_h; +} DPP_PPU_CLUSTER_ME_MEX_DEMUX_STA_KEY_VLD_CNT_H_T; + +typedef struct dpp_ppu_cluster_me_mex_demux_cop_key_vld_cnt_l_t { + ZXIC_UINT32 me_mex_demux_cop_key_vld_cnt_l; +} DPP_PPU_CLUSTER_ME_MEX_DEMUX_COP_KEY_VLD_CNT_L_T; + +typedef struct dpp_ppu_cluster_me_mex_demux_cop_key_vld_cnt_h_t { + ZXIC_UINT32 me_mex_demux_cop_key_vld_cnt_h; +} DPP_PPU_CLUSTER_ME_MEX_DEMUX_COP_KEY_VLD_CNT_H_T; + +typedef struct dpp_ppu_cluster_mex_me_demux_ise_rsp_vld_cnt_l_t { + ZXIC_UINT32 mex_me_demux_ise_rsp_vld_cnt_l; +} DPP_PPU_CLUSTER_MEX_ME_DEMUX_ISE_RSP_VLD_CNT_L_T; + +typedef struct dpp_ppu_cluster_mex_me_demux_ise_rsp_vld_cnt_h_t { + ZXIC_UINT32 mex_me_demux_ise_rsp_vld_cnt_h; +} DPP_PPU_CLUSTER_MEX_ME_DEMUX_ISE_RSP_VLD_CNT_H_T; + +typedef struct dpp_ppu_cluster_mex_me_demux_ese_rsp_vld_cnt_l_t { + ZXIC_UINT32 mex_me_demux_ese_rsp_vld_cnt_l; +} DPP_PPU_CLUSTER_MEX_ME_DEMUX_ESE_RSP_VLD_CNT_L_T; + +typedef struct dpp_ppu_cluster_mex_me_demux_ese_rsp_vld_cnt_h_t { + ZXIC_UINT32 mex_me_demux_ese_rsp_vld_cnt_h; +} DPP_PPU_CLUSTER_MEX_ME_DEMUX_ESE_RSP_VLD_CNT_H_T; + +typedef struct dpp_ppu_cluster_mex_me_demux_sta_rsp_vld_cnt_l_t { + ZXIC_UINT32 mex_me_demux_sta_rsp_vld_cnt_l; +} DPP_PPU_CLUSTER_MEX_ME_DEMUX_STA_RSP_VLD_CNT_L_T; + +typedef struct dpp_ppu_cluster_mex_me_demux_sta_rsp_vld_cnt_h_t { + ZXIC_UINT32 mex_me_demux_sta_rsp_vld_cnt_h; +} DPP_PPU_CLUSTER_MEX_ME_DEMUX_STA_RSP_VLD_CNT_H_T; + +typedef struct dpp_ppu_cluster_mex_me_demux_cop_rsp_vld_cnt_l_t { + ZXIC_UINT32 mex_me_demux_cop_rsp_vld_cnt_l; +} DPP_PPU_CLUSTER_MEX_ME_DEMUX_COP_RSP_VLD_CNT_L_T; + +typedef struct dpp_ppu_cluster_mex_me_demux_cop_rsp_vld_cnt_h_t { + ZXIC_UINT32 mex_me_demux_cop_rsp_vld_cnt_h; +} DPP_PPU_CLUSTER_MEX_ME_DEMUX_COP_RSP_VLD_CNT_H_T; + +typedef struct dpp_ppu_cluster_me_exception_code0_cnt_l_t { + ZXIC_UINT32 me_exception_code0_cnt_l; +} DPP_PPU_CLUSTER_ME_EXCEPTION_CODE0_CNT_L_T; + +typedef struct dpp_ppu_cluster_me_exception_code0_cnt_h_t { + ZXIC_UINT32 me_exception_code0_cnt_h; +} DPP_PPU_CLUSTER_ME_EXCEPTION_CODE0_CNT_H_T; + +typedef struct dpp_ppu_cluster_me_exception_code1_cnt_l_t { + ZXIC_UINT32 me_exception_code1_cnt_l; +} DPP_PPU_CLUSTER_ME_EXCEPTION_CODE1_CNT_L_T; + +typedef struct dpp_ppu_cluster_me_exception_code1_cnt_h_t { + ZXIC_UINT32 me_exception_code1_cnt_h; +} DPP_PPU_CLUSTER_ME_EXCEPTION_CODE1_CNT_H_T; + +typedef struct dpp_ppu_cluster_me_exception_code2_cnt_l_t { + ZXIC_UINT32 me_exception_code2_cnt_l; +} DPP_PPU_CLUSTER_ME_EXCEPTION_CODE2_CNT_L_T; + +typedef struct dpp_ppu_cluster_me_exception_code2_cnt_h_t { + ZXIC_UINT32 me_exception_code2_cnt_h; +} DPP_PPU_CLUSTER_ME_EXCEPTION_CODE2_CNT_H_T; + +typedef struct dpp_ppu_cluster_me_exception_code3_cnt_l_t { + ZXIC_UINT32 me_exception_code3_cnt_l; +} DPP_PPU_CLUSTER_ME_EXCEPTION_CODE3_CNT_L_T; + +typedef struct dpp_ppu_cluster_me_exception_code3_cnt_h_t { + ZXIC_UINT32 me_exception_code3_cnt_h; +} DPP_PPU_CLUSTER_ME_EXCEPTION_CODE3_CNT_H_T; + +typedef struct dpp_ppu_cluster_me_exception_code4_cnt_l_t { + ZXIC_UINT32 me_exception_code4_cnt_l; +} DPP_PPU_CLUSTER_ME_EXCEPTION_CODE4_CNT_L_T; + +typedef struct dpp_ppu_cluster_me_exception_code4_cnt_h_t { + ZXIC_UINT32 me_exception_code4_cnt_h; +} DPP_PPU_CLUSTER_ME_EXCEPTION_CODE4_CNT_H_T; + +typedef struct dpp_ppu_cluster_me_exception_code5_cnt_l_t { + ZXIC_UINT32 me_exception_code5_cnt_l; +} DPP_PPU_CLUSTER_ME_EXCEPTION_CODE5_CNT_L_T; + +typedef struct dpp_ppu_cluster_me_exception_code5_cnt_h_t { + ZXIC_UINT32 me_exception_code5_cnt_h; +} DPP_PPU_CLUSTER_ME_EXCEPTION_CODE5_CNT_H_T; + +#ifdef __cplusplus +} +#endif +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_ptptm_reg.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_ptptm_reg.h new file mode 100644 index 000000000000..0c6e037243f7 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_ptptm_reg.h @@ -0,0 +1,729 @@ + +#ifndef _DPP_PTPTM_REG_H_ +#define _DPP_PTPTM_REG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct dpp_ptptm_ptp_top_pp1s_interrupt_t { + ZXIC_UINT32 int_state; + ZXIC_UINT32 int_test; + ZXIC_UINT32 int_clr; + ZXIC_UINT32 int_en; +} DPP_PTPTM_PTP_TOP_PP1S_INTERRUPT_T; + +typedef struct dpp_ptptm_ptp_top_pp1s_external_select_t { + ZXIC_UINT32 pp1s_external_select; +} DPP_PTPTM_PTP_TOP_PP1S_EXTERNAL_SELECT_T; + +typedef struct dpp_ptptm_ptp_top_pp1s_out_select_t { + ZXIC_UINT32 pp1s_out_sel; +} DPP_PTPTM_PTP_TOP_PP1S_OUT_SELECT_T; + +typedef struct dpp_ptptm_ptp_top_test_pp1s_select_t { + ZXIC_UINT32 test_pp1s_sel; +} DPP_PTPTM_PTP_TOP_TEST_PP1S_SELECT_T; + +typedef struct dpp_ptptm_ptp_top_local_pp1s_en_t { + ZXIC_UINT32 local_pp1s_en; +} DPP_PTPTM_PTP_TOP_LOCAL_PP1S_EN_T; + +typedef struct dpp_ptptm_ptp_top_local_pp1s_adjust_t { + ZXIC_UINT32 local_pp1s_adjust_sel; + ZXIC_UINT32 local_pp1s_adjust_en; +} DPP_PTPTM_PTP_TOP_LOCAL_PP1S_ADJUST_T; + +typedef struct dpp_ptptm_ptp_top_local_pp1s_adjust_value_t { + ZXIC_UINT32 local_pp1s_adjust_value; +} DPP_PTPTM_PTP_TOP_LOCAL_PP1S_ADJUST_VALUE_T; + +typedef struct dpp_ptptm_ptp_top_pp1s_to_np_select_t { + ZXIC_UINT32 pp1s_to_np_sel; +} DPP_PTPTM_PTP_TOP_PP1S_TO_NP_SELECT_T; + +typedef struct dpp_ptptm_ptp_top_pd_u1_sel_t { + ZXIC_UINT32 pd_u1_sel1; + ZXIC_UINT32 pd_u1_sel0; +} DPP_PTPTM_PTP_TOP_PD_U1_SEL_T; + +typedef struct dpp_ptptm_ptp_top_pd_u1_pd0_shift_t { + ZXIC_UINT32 pd_u1_pd0_shift; +} DPP_PTPTM_PTP_TOP_PD_U1_PD0_SHIFT_T; + +typedef struct dpp_ptptm_ptp_top_pd_u1_pd1_shift_t { + ZXIC_UINT32 pd_u1_pd1_shift; +} DPP_PTPTM_PTP_TOP_PD_U1_PD1_SHIFT_T; + +typedef struct dpp_ptptm_ptp_top_pd_u1_result_t { + ZXIC_UINT32 pd_u1_result_sign; + ZXIC_UINT32 pd_u1_overflow; + ZXIC_UINT32 pd_u1_result; +} DPP_PTPTM_PTP_TOP_PD_U1_RESULT_T; + +typedef struct dpp_ptptm_ptp_top_pd_u2_sel_t { + ZXIC_UINT32 pd_u2_sel1; + ZXIC_UINT32 pd_u2_sel0; +} DPP_PTPTM_PTP_TOP_PD_U2_SEL_T; + +typedef struct dpp_ptptm_ptp_top_pd_u2_pd0_shift_t { + ZXIC_UINT32 pd_u2_pd0_shift; +} DPP_PTPTM_PTP_TOP_PD_U2_PD0_SHIFT_T; + +typedef struct dpp_ptptm_ptp_top_pd_u2_pd1_shift_t { + ZXIC_UINT32 pd_u2_pd1_shift; +} DPP_PTPTM_PTP_TOP_PD_U2_PD1_SHIFT_T; + +typedef struct dpp_ptptm_ptp_top_pd_u2_result_t { + ZXIC_UINT32 pd_u2_result_sign; + ZXIC_UINT32 pd_u2_overflow; + ZXIC_UINT32 pd_u2_result; +} DPP_PTPTM_PTP_TOP_PD_U2_RESULT_T; + +typedef struct dpp_ptptm_ptp_top_tsn_group_nanosecond_delay0_t { + ZXIC_UINT32 tsn_group_nanosecond_delay0; +} DPP_PTPTM_PTP_TOP_TSN_GROUP_NANOSECOND_DELAY0_T; + +typedef struct dpp_ptptm_ptp_top_tsn_group_fracnanosecond_delay0_t { + ZXIC_UINT32 tsn_group_fracnanosecond_delay0; +} DPP_PTPTM_PTP_TOP_TSN_GROUP_FRACNANOSECOND_DELAY0_T; + +typedef struct dpp_ptptm_ptp_top_tsn_group_nanosecond_delay1_t { + ZXIC_UINT32 tsn_group_nanosecond_delay1; +} DPP_PTPTM_PTP_TOP_TSN_GROUP_NANOSECOND_DELAY1_T; + +typedef struct dpp_ptptm_ptp_top_tsn_group_fracnanosecond_delay1_t { + ZXIC_UINT32 tsn_group_fracnanosecond_delay1; +} DPP_PTPTM_PTP_TOP_TSN_GROUP_FRACNANOSECOND_DELAY1_T; + +typedef struct dpp_ptptm_ptp_top_tsn_group_nanosecond_delay2_t { + ZXIC_UINT32 tsn_group_nanosecond_delay2; +} DPP_PTPTM_PTP_TOP_TSN_GROUP_NANOSECOND_DELAY2_T; + +typedef struct dpp_ptptm_ptp_top_tsn_group_fracnanosecond_delay2_t { + ZXIC_UINT32 tsn_group_fracnanosecond_delay2; +} DPP_PTPTM_PTP_TOP_TSN_GROUP_FRACNANOSECOND_DELAY2_T; + +typedef struct dpp_ptptm_ptp_top_tsn_group_nanosecond_delay3_t { + ZXIC_UINT32 tsn_group_nanosecond_delay3; +} DPP_PTPTM_PTP_TOP_TSN_GROUP_NANOSECOND_DELAY3_T; + +typedef struct dpp_ptptm_ptp_top_tsn_group_fracnanosecond_delay3_t { + ZXIC_UINT32 tsn_group_fracnanosecond_delay3; +} DPP_PTPTM_PTP_TOP_TSN_GROUP_FRACNANOSECOND_DELAY3_T; + +typedef struct dpp_ptptm_ptp_top_tsn_ptp1588_rdma_nanosecond_delay_t { + ZXIC_UINT32 ptp1588_rdma_nanosecond_delay; +} DPP_PTPTM_PTP_TOP_TSN_PTP1588_RDMA_NANOSECOND_DELAY_T; + +typedef struct dpp_ptptm_ptp_top_ptp1588_rdma_fracnanosecond_delay_t { + ZXIC_UINT32 ptp1588_rdma_fracnanosecond_delay; +} DPP_PTPTM_PTP_TOP_PTP1588_RDMA_FRACNANOSECOND_DELAY_T; + +typedef struct dpp_ptptm_ptp_top_ptp1588_np_nanosecond_delay_t { + ZXIC_UINT32 ptp1588_np_nanosecond_delay; +} DPP_PTPTM_PTP_TOP_PTP1588_NP_NANOSECOND_DELAY_T; + +typedef struct dpp_ptptm_ptp_top_ptp1588_np_fracnanosecond_delay_t { + ZXIC_UINT32 ptp1588_np_fracnanosecond_delay; +} DPP_PTPTM_PTP_TOP_PTP1588_NP_FRACNANOSECOND_DELAY_T; + +typedef struct dpp_ptptm_ptp_top_time_sync_period_t { + ZXIC_UINT32 time_sync_period; +} DPP_PTPTM_PTP_TOP_TIME_SYNC_PERIOD_T; + +typedef struct dpp_ptptm_ptptm_module_id_t { + ZXIC_UINT32 module_id; +} DPP_PTPTM_PTPTM_MODULE_ID_T; + +typedef struct dpp_ptptm_ptptm_module_version_t { + ZXIC_UINT32 module_major_version; + ZXIC_UINT32 module_minor_version; +} DPP_PTPTM_PTPTM_MODULE_VERSION_T; + +typedef struct dpp_ptptm_ptptm_module_date_t { + ZXIC_UINT32 year; + ZXIC_UINT32 month; + ZXIC_UINT32 date; +} DPP_PTPTM_PTPTM_MODULE_DATE_T; + +typedef struct dpp_ptptm_ptptm_interrupt_status_t { + ZXIC_UINT32 pps_in_status; + ZXIC_UINT32 fifo_almost_full_status; + ZXIC_UINT32 fifo_no_empty_status; + ZXIC_UINT32 trigger_output_status; + ZXIC_UINT32 trigger_input_status; +} DPP_PTPTM_PTPTM_INTERRUPT_STATUS_T; + +typedef struct dpp_ptptm_ptptm_interrupt_event_t { + ZXIC_UINT32 pps_in_event; + ZXIC_UINT32 fifo_almost_full_event; + ZXIC_UINT32 fifo_no_empty_event; + ZXIC_UINT32 trigger_output_event; + ZXIC_UINT32 trigger_input_event; +} DPP_PTPTM_PTPTM_INTERRUPT_EVENT_T; + +typedef struct dpp_ptptm_ptptm_interrupt_mask_t { + ZXIC_UINT32 pps_in_event_mask; + ZXIC_UINT32 fifo_almost_full_event_mask; + ZXIC_UINT32 fifo_no_empty_event_mask; + ZXIC_UINT32 trigger_output_event_mask; + ZXIC_UINT32 trigger_input_eventt_mask; +} DPP_PTPTM_PTPTM_INTERRUPT_MASK_T; + +typedef struct dpp_ptptm_ptptm_interrupt_test_t { + ZXIC_UINT32 trigger_pps_in_event_test; + ZXIC_UINT32 trigger_fifo_almost_full_event_test; + ZXIC_UINT32 trigger_fifo_no_empty_event_test; + ZXIC_UINT32 trigger_output_event_test; + ZXIC_UINT32 trigger_input_event_test; +} DPP_PTPTM_PTPTM_INTERRUPT_TEST_T; + +typedef struct dpp_ptptm_ptptm_hw_clock_cycle_integer_t { + ZXIC_UINT32 integeral_nanosecond_of_hw_clock_cycle; +} DPP_PTPTM_PTPTM_HW_CLOCK_CYCLE_INTEGER_T; + +typedef struct dpp_ptptm_ptptm_hw_clock_cycle_fraction_t { + ZXIC_UINT32 fractional_nanosecond_of_hw_clock_cycle; +} DPP_PTPTM_PTPTM_HW_CLOCK_CYCLE_FRACTION_T; + +typedef struct dpp_ptptm_ptptm_ptp_clock_cycle_integer_t { + ZXIC_UINT32 integeral_nanosecond_of_ptp_clock_cycle; +} DPP_PTPTM_PTPTM_PTP_CLOCK_CYCLE_INTEGER_T; + +typedef struct dpp_ptptm_ptptm_ptp_clock_cycle_fraction_t { + ZXIC_UINT32 fractional_nanosecond_of_ptp_clock_cycle; +} DPP_PTPTM_PTPTM_PTP_CLOCK_CYCLE_FRACTION_T; + +typedef struct dpp_ptptm_ptptm_ptp_configuration_t { + ZXIC_UINT32 trig_oe; + ZXIC_UINT32 hw_time_update_en; + ZXIC_UINT32 ptp1588_tod_time_update_en; + ZXIC_UINT32 timer_enable; + ZXIC_UINT32 pps_output_enable; + ZXIC_UINT32 pp1_output_enable; + ZXIC_UINT32 pp2_output_enable; + ZXIC_UINT32 enable_writing_timestamps_to_the_fifo; + ZXIC_UINT32 l2s_time_output_select; + ZXIC_UINT32 reserved_9; + ZXIC_UINT32 pps_input_select; + ZXIC_UINT32 pp_output_select; + ZXIC_UINT32 reserved_6; + ZXIC_UINT32 timer_run_mode; + ZXIC_UINT32 update_command_select; + ZXIC_UINT32 trigger_out_enable; + ZXIC_UINT32 trigger_in_enable; + ZXIC_UINT32 timer_capture_slave_mode; +} DPP_PTPTM_PTPTM_PTP_CONFIGURATION_T; + +typedef struct dpp_ptptm_ptptm_timer_control_t { + ZXIC_UINT32 ptpmoutputsynchroningstate; + ZXIC_UINT32 ptp1588_fifo_read_command; + ZXIC_UINT32 adjust_the_timer; +} DPP_PTPTM_PTPTM_TIMER_CONTROL_T; + +typedef struct dpp_ptptm_ptptm_pps_income_delay_t { + ZXIC_UINT32 pps_income_delay_nanosecond; + ZXIC_UINT32 pps_income_delay_frac_nanosecond; +} DPP_PTPTM_PTPTM_PPS_INCOME_DELAY_T; + +typedef struct dpp_ptptm_ptptm_clock_cycle_update_t { + ZXIC_UINT32 tsn3_clock_cycle_update_enable; + ZXIC_UINT32 tsn2_clock_cycle_update_enable; + ZXIC_UINT32 tsn1_clock_cycle_update_enable; + ZXIC_UINT32 tsn0_clock_cycle_update_enable; + ZXIC_UINT32 ptp1588_clock_cycle_update_enable; +} DPP_PTPTM_PTPTM_CLOCK_CYCLE_UPDATE_T; + +typedef struct dpp_ptptm_ptptm_cycle_time_of_output_period_pulse_1_t { + ZXIC_UINT32 clock_number_of_output_period_pulse_1; +} DPP_PTPTM_PTPTM_CYCLE_TIME_OF_OUTPUT_PERIOD_PULSE_1_T; + +typedef struct dpp_ptptm_ptptm_cycle_time_of_output_period_pulse_2_t { + ZXIC_UINT32 clock_number_of_output_period_pulse_2; +} DPP_PTPTM_PTPTM_CYCLE_TIME_OF_OUTPUT_PERIOD_PULSE_2_T; + +typedef struct dpp_ptptm_ptptm_timer_latch_en_t { + ZXIC_UINT32 latch_the_timer_en; +} DPP_PTPTM_PTPTM_TIMER_LATCH_EN_T; + +typedef struct dpp_ptptm_ptptm_timer_latch_sel_t { + ZXIC_UINT32 timer_latch_sel; +} DPP_PTPTM_PTPTM_TIMER_LATCH_SEL_T; + +typedef struct dpp_ptptm_ptptm_trigger_in_tod_nanosecond_t { + ZXIC_UINT32 trigger_in_tod_nanosecond; +} DPP_PTPTM_PTPTM_TRIGGER_IN_TOD_NANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_trigger_in_lower_tod_second_t { + ZXIC_UINT32 trigger_in_lower_tod_second; +} DPP_PTPTM_PTPTM_TRIGGER_IN_LOWER_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_trigger_in_high_tod_second_t { + ZXIC_UINT32 trigger_in_high_tod_second; +} DPP_PTPTM_PTPTM_TRIGGER_IN_HIGH_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_trigger_in_fracnanosecond_t { + ZXIC_UINT32 trigger_in_fracnanosecond; +} DPP_PTPTM_PTPTM_TRIGGER_IN_FRACNANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_trigger_in_hardware_time_low_t { + ZXIC_UINT32 trigger_in_hardware_time_low; +} DPP_PTPTM_PTPTM_TRIGGER_IN_HARDWARE_TIME_LOW_T; + +typedef struct dpp_ptptm_ptptm_trigger_in_hardware_time_high_t { + ZXIC_UINT32 trigger_in_hardware_time_high; +} DPP_PTPTM_PTPTM_TRIGGER_IN_HARDWARE_TIME_HIGH_T; + +typedef struct dpp_ptptm_ptptm_trigger_out_tod_nanosecond_t { + ZXIC_UINT32 trigger_out_tod_nanosecond; +} DPP_PTPTM_PTPTM_TRIGGER_OUT_TOD_NANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_trigger_out_lower_tod_second_t { + ZXIC_UINT32 trigger_out_lower_tod_second; +} DPP_PTPTM_PTPTM_TRIGGER_OUT_LOWER_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_trigger_out_high_tod_second_t { + ZXIC_UINT32 trigger_out_high_tod_second; +} DPP_PTPTM_PTPTM_TRIGGER_OUT_HIGH_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_trigger_out_hardware_time_low_t { + ZXIC_UINT32 trigger_out_hardware_time_low; +} DPP_PTPTM_PTPTM_TRIGGER_OUT_HARDWARE_TIME_LOW_T; + +typedef struct dpp_ptptm_ptptm_trigger_out_hardware_time_high_t { + ZXIC_UINT32 trigger_out_hardware_time_high; +} DPP_PTPTM_PTPTM_TRIGGER_OUT_HARDWARE_TIME_HIGH_T; + +typedef struct dpp_ptptm_ptptm_adjust_tod_nanosecond_t { + ZXIC_UINT32 adjust_tod_nanosecond; +} DPP_PTPTM_PTPTM_ADJUST_TOD_NANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_adjust_lower_tod_second_t { + ZXIC_UINT32 adjust_lower_tod_second; +} DPP_PTPTM_PTPTM_ADJUST_LOWER_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_adjust_high_tod_second_t { + ZXIC_UINT32 adjust_high_tod_second; +} DPP_PTPTM_PTPTM_ADJUST_HIGH_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_adjust_fracnanosecond_t { + ZXIC_UINT32 adjust_fracnanosecond; +} DPP_PTPTM_PTPTM_ADJUST_FRACNANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_adjust_hardware_time_low_t { + ZXIC_UINT32 adjust_hardware_time_low; +} DPP_PTPTM_PTPTM_ADJUST_HARDWARE_TIME_LOW_T; + +typedef struct dpp_ptptm_ptptm_adjust_hardware_time_high_t { + ZXIC_UINT32 adjust_hardware_time_high; +} DPP_PTPTM_PTPTM_ADJUST_HARDWARE_TIME_HIGH_T; + +typedef struct dpp_ptptm_ptptm_latch_tod_nanosecond_t { + ZXIC_UINT32 latch_tod_nanosecond; +} DPP_PTPTM_PTPTM_LATCH_TOD_NANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_latch_lower_tod_second_t { + ZXIC_UINT32 latch_lower_tod_second; +} DPP_PTPTM_PTPTM_LATCH_LOWER_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_latch_high_tod_second_t { + ZXIC_UINT32 latch_high_tod_second; +} DPP_PTPTM_PTPTM_LATCH_HIGH_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_latch_fracnanosecond_t { + ZXIC_UINT32 latch_fracnanosecond; +} DPP_PTPTM_PTPTM_LATCH_FRACNANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_latch_hardware_time_low_t { + ZXIC_UINT32 latch_hardware_time_low; +} DPP_PTPTM_PTPTM_LATCH_HARDWARE_TIME_LOW_T; + +typedef struct dpp_ptptm_ptptm_latch_hardware_time_high_t { + ZXIC_UINT32 latch_hardware_time_high; +} DPP_PTPTM_PTPTM_LATCH_HARDWARE_TIME_HIGH_T; + +typedef struct dpp_ptptm_ptptm_real_tod_nanosecond_t { + ZXIC_UINT32 real_tod_nanosecond; +} DPP_PTPTM_PTPTM_REAL_TOD_NANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_real_lower_tod_second_t { + ZXIC_UINT32 real_lower_tod_second; +} DPP_PTPTM_PTPTM_REAL_LOWER_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_real_high_tod_second_t { + ZXIC_UINT32 real_high_tod_second; +} DPP_PTPTM_PTPTM_REAL_HIGH_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_real_hardware_time_low_t { + ZXIC_UINT32 real_hardware_time_low; +} DPP_PTPTM_PTPTM_REAL_HARDWARE_TIME_LOW_T; + +typedef struct dpp_ptptm_ptptm_real_hardware_time_high_t { + ZXIC_UINT32 real_hardware_time_high; +} DPP_PTPTM_PTPTM_REAL_HARDWARE_TIME_HIGH_T; + +typedef struct dpp_ptptm_ptptm_ptp1588_event_message_port_t { + ZXIC_UINT32 ptp1588_event_message_port; +} DPP_PTPTM_PTPTM_PTP1588_EVENT_MESSAGE_PORT_T; + +typedef struct dpp_ptptm_ptptm_ptp1588_event_message_timestamp_low_t { + ZXIC_UINT32 ptp1588_event_message_timestamp_low; +} DPP_PTPTM_PTPTM_PTP1588_EVENT_MESSAGE_TIMESTAMP_LOW_T; + +typedef struct dpp_ptptm_ptptm_ptp1588_event_message_timestamp_high_t { + ZXIC_UINT32 ptp1588_event_message_timestamp_high; +} DPP_PTPTM_PTPTM_PTP1588_EVENT_MESSAGE_TIMESTAMP_HIGH_T; + +typedef struct dpp_ptptm_ptptm_ptp1588_event_message_fifo_status_t { + ZXIC_UINT32 fifo_full; + ZXIC_UINT32 fifo_empty; + ZXIC_UINT32 timestamps_count; +} DPP_PTPTM_PTPTM_PTP1588_EVENT_MESSAGE_FIFO_STATUS_T; + +typedef struct dpp_ptptm_ptptm_pp1s_latch_tod_nanosecond_t { + ZXIC_UINT32 latch_1588tod_nanosecond; +} DPP_PTPTM_PTPTM_PP1S_LATCH_TOD_NANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_pp1s_latch_lower_tod_second_t { + ZXIC_UINT32 latch_lower_1588tod_second; +} DPP_PTPTM_PTPTM_PP1S_LATCH_LOWER_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_pp1s_latch_high_tod_second_t { + ZXIC_UINT32 latch_high_1588tod_second; +} DPP_PTPTM_PTPTM_PP1S_LATCH_HIGH_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_pp1s_latch_fracnanosecond_t { + ZXIC_UINT32 latch_1588fracnanosecond; +} DPP_PTPTM_PTPTM_PP1S_LATCH_FRACNANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn_time_configuration_t { + ZXIC_UINT32 tsn_pps_enable; + ZXIC_UINT32 tsn_timer_enable; + ZXIC_UINT32 tsn_timer_run_mode; + ZXIC_UINT32 timer_capture_slave_mode; +} DPP_PTPTM_PTPTM_TSN_TIME_CONFIGURATION_T; + +typedef struct dpp_ptptm_ptptm_tsn_timer_control_t { + ZXIC_UINT32 adjust_the_tsn3_timer; + ZXIC_UINT32 adjust_the_tsn2_timer; + ZXIC_UINT32 adjust_the_tsn1_timer; + ZXIC_UINT32 adjust_the_tsn0_timer; +} DPP_PTPTM_PTPTM_TSN_TIMER_CONTROL_T; + +typedef struct dpp_ptptm_ptptm_tsn0_clock_cycle_integer_t { + ZXIC_UINT32 integeral_nanosecond_of_tsn0_clock_cycle; +} DPP_PTPTM_PTPTM_TSN0_CLOCK_CYCLE_INTEGER_T; + +typedef struct dpp_ptptm_ptptm_tsn0_clock_cycle_fraction_t { + ZXIC_UINT32 fractional_nanosecond_of_tsn0_clock_cycle; +} DPP_PTPTM_PTPTM_TSN0_CLOCK_CYCLE_FRACTION_T; + +typedef struct dpp_ptptm_ptptm_tsn1_clock_cycle_integer_t { + ZXIC_UINT32 integeral_nanosecond_of_tsn1_clock_cycle; +} DPP_PTPTM_PTPTM_TSN1_CLOCK_CYCLE_INTEGER_T; + +typedef struct dpp_ptptm_ptptm_tsn1_clock_cycle_fraction_t { + ZXIC_UINT32 fractional_nanosecond_of_tsn1_clock_cycle; +} DPP_PTPTM_PTPTM_TSN1_CLOCK_CYCLE_FRACTION_T; + +typedef struct dpp_ptptm_ptptm_tsn2_clock_cycle_integer_t { + ZXIC_UINT32 integeral_nanosecond_of_tsn2_clock_cycle; +} DPP_PTPTM_PTPTM_TSN2_CLOCK_CYCLE_INTEGER_T; + +typedef struct dpp_ptptm_ptptm_tsn2_clock_cycle_fraction_t { + ZXIC_UINT32 fractional_nanosecond_of_tsn2_clock_cycle; +} DPP_PTPTM_PTPTM_TSN2_CLOCK_CYCLE_FRACTION_T; + +typedef struct dpp_ptptm_ptptm_tsn3_clock_cycle_integer_t { + ZXIC_UINT32 integeral_nanosecond_of_tsn3_clock_cycle; +} DPP_PTPTM_PTPTM_TSN3_CLOCK_CYCLE_INTEGER_T; + +typedef struct dpp_ptptm_ptptm_tsn3_clock_cycle_fraction_t { + ZXIC_UINT32 fractional_nanosecond_of_tsn3_clock_cycle; +} DPP_PTPTM_PTPTM_TSN3_CLOCK_CYCLE_FRACTION_T; + +typedef struct dpp_ptptm_ptptm_tsn0_adjust_tod_nanosecond_t { + ZXIC_UINT32 tsn0_adjust_tod_nanosecond; +} DPP_PTPTM_PTPTM_TSN0_ADJUST_TOD_NANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn0_adjust_lower_tod_second_t { + ZXIC_UINT32 tsn0_adjust_lower_tod_second; +} DPP_PTPTM_PTPTM_TSN0_ADJUST_LOWER_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn0_adjust_high_tod_second_t { + ZXIC_UINT32 tsn0_adjust_high_tod_second; +} DPP_PTPTM_PTPTM_TSN0_ADJUST_HIGH_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn0_adjust_fracnanosecond_t { + ZXIC_UINT32 tsn0_adjust_fracnanosecond; +} DPP_PTPTM_PTPTM_TSN0_ADJUST_FRACNANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn1_adjust_tod_nanosecond_t { + ZXIC_UINT32 tsn1_adjust_tod_nanosecond; +} DPP_PTPTM_PTPTM_TSN1_ADJUST_TOD_NANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn1_adjust_lower_tod_second_t { + ZXIC_UINT32 tsn1_adjust_lower_tod_second; +} DPP_PTPTM_PTPTM_TSN1_ADJUST_LOWER_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn1_adjust_high_tod_second_t { + ZXIC_UINT32 tsn1_adjust_high_tod_second; +} DPP_PTPTM_PTPTM_TSN1_ADJUST_HIGH_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn1_adjust_fracnanosecond_t { + ZXIC_UINT32 tsn1_adjust_fracnanosecond; +} DPP_PTPTM_PTPTM_TSN1_ADJUST_FRACNANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn2_adjust_tod_nanosecond_t { + ZXIC_UINT32 tsn2_adjust_tod_nanosecond; +} DPP_PTPTM_PTPTM_TSN2_ADJUST_TOD_NANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn2_adjust_lower_tod_second_t { + ZXIC_UINT32 tsn2_adjust_lower_tod_second; +} DPP_PTPTM_PTPTM_TSN2_ADJUST_LOWER_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn2_adjust_high_tod_second_t { + ZXIC_UINT32 tsn2_adjust_high_tod_second; +} DPP_PTPTM_PTPTM_TSN2_ADJUST_HIGH_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn2_adjust_fracnanosecond_t { + ZXIC_UINT32 tsn2_adjust_fracnanosecond; +} DPP_PTPTM_PTPTM_TSN2_ADJUST_FRACNANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn3_adjust_tod_nanosecond_t { + ZXIC_UINT32 tsn3_adjust_tod_nanosecond; +} DPP_PTPTM_PTPTM_TSN3_ADJUST_TOD_NANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn3_adjust_lower_tod_second_t { + ZXIC_UINT32 tsn3_adjust_lower_tod_second; +} DPP_PTPTM_PTPTM_TSN3_ADJUST_LOWER_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn3_adjust_high_tod_second_t { + ZXIC_UINT32 tsn3_adjust_high_tod_second; +} DPP_PTPTM_PTPTM_TSN3_ADJUST_HIGH_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn3_adjust_fracnanosecond_t { + ZXIC_UINT32 tsn3_adjust_fracnanosecond; +} DPP_PTPTM_PTPTM_TSN3_ADJUST_FRACNANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn0_latch_tod_nanosecond_t { + ZXIC_UINT32 tsn0_latch_tod_nanosecond; +} DPP_PTPTM_PTPTM_TSN0_LATCH_TOD_NANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn0_latch_lower_tod_second_t { + ZXIC_UINT32 tsn0_latch_lower_tod_second; +} DPP_PTPTM_PTPTM_TSN0_LATCH_LOWER_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn0_latch_high_tod_second_t { + ZXIC_UINT32 tsn0_latch_high_tod_second; +} DPP_PTPTM_PTPTM_TSN0_LATCH_HIGH_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn0_latch_fracnanosecond_t { + ZXIC_UINT32 tsn0_latch_fracnanosecond; +} DPP_PTPTM_PTPTM_TSN0_LATCH_FRACNANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn1_latch_tod_nanosecond_t { + ZXIC_UINT32 tsn1_latch_tod_nanosecond; +} DPP_PTPTM_PTPTM_TSN1_LATCH_TOD_NANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn1_latch_lower_tod_second_t { + ZXIC_UINT32 tsn1_latch_lower_tod_second; +} DPP_PTPTM_PTPTM_TSN1_LATCH_LOWER_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn1_latch_high_tod_second_t { + ZXIC_UINT32 tsn1_latch_high_tod_second; +} DPP_PTPTM_PTPTM_TSN1_LATCH_HIGH_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn1_latch_fracnanosecond_t { + ZXIC_UINT32 tsn1_latch_fracnanosecond; +} DPP_PTPTM_PTPTM_TSN1_LATCH_FRACNANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn2_latch_tod_nanosecond_t { + ZXIC_UINT32 tsn2_latch_tod_nanosecond; +} DPP_PTPTM_PTPTM_TSN2_LATCH_TOD_NANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn2_latch_lower_tod_second_t { + ZXIC_UINT32 tsn2_latch_lower_tod_second; +} DPP_PTPTM_PTPTM_TSN2_LATCH_LOWER_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn2_latch_high_tod_second_t { + ZXIC_UINT32 tsn2_latch_high_tod_second; +} DPP_PTPTM_PTPTM_TSN2_LATCH_HIGH_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn2_latch_fracnanosecond_t { + ZXIC_UINT32 tsn2_latch_fracnanosecond; +} DPP_PTPTM_PTPTM_TSN2_LATCH_FRACNANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn3_latch_tod_nanosecond_t { + ZXIC_UINT32 tsn3_latch_tod_nanosecond; +} DPP_PTPTM_PTPTM_TSN3_LATCH_TOD_NANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn3_latch_lower_tod_second_t { + ZXIC_UINT32 tsn3_latch_lower_tod_second; +} DPP_PTPTM_PTPTM_TSN3_LATCH_LOWER_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn3_latch_high_tod_second_t { + ZXIC_UINT32 tsn3_latch_high_tod_second; +} DPP_PTPTM_PTPTM_TSN3_LATCH_HIGH_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn3_latch_fracnanosecond_t { + ZXIC_UINT32 tsn3_latch_fracnanosecond; +} DPP_PTPTM_PTPTM_TSN3_LATCH_FRACNANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_pp1s_latch_tsn0_tod_nanosecond_t { + ZXIC_UINT32 latch_tsn0_tod_nanosecond; +} DPP_PTPTM_PTPTM_PP1S_LATCH_TSN0_TOD_NANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_pp1s_latch_tsn0_lower_tod_second_t { + ZXIC_UINT32 latch_lower_tsn0_tod_second; +} DPP_PTPTM_PTPTM_PP1S_LATCH_TSN0_LOWER_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_pp1s_latch_tsn0_high_tod_second_t { + ZXIC_UINT32 latch_high_tsn0_tod_second; +} DPP_PTPTM_PTPTM_PP1S_LATCH_TSN0_HIGH_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_pp1s_latch_tsn0_fracnanosecond_t { + ZXIC_UINT32 latch_tsn0_fracnanosecond; +} DPP_PTPTM_PTPTM_PP1S_LATCH_TSN0_FRACNANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_pp1s_latch_tsn1_tod_nanosecond_t { + ZXIC_UINT32 latch_tsn1_tod_nanosecond; +} DPP_PTPTM_PTPTM_PP1S_LATCH_TSN1_TOD_NANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_pp1s_latch_tsn1_lower_tod_second_t { + ZXIC_UINT32 latch_lower_tsn1_tod_second; +} DPP_PTPTM_PTPTM_PP1S_LATCH_TSN1_LOWER_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_pp1s_latch_tsn1_high_tod_second_t { + ZXIC_UINT32 latch_high_tsn1_tod_second; +} DPP_PTPTM_PTPTM_PP1S_LATCH_TSN1_HIGH_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_pp1s_latch_tsn1_fracnanosecond_t { + ZXIC_UINT32 latch_tsn1_fracnanosecond; +} DPP_PTPTM_PTPTM_PP1S_LATCH_TSN1_FRACNANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_pp1s_latch_tsn2_tod_nanosecond_t { + ZXIC_UINT32 latch_tsn2_tod_nanosecond; +} DPP_PTPTM_PTPTM_PP1S_LATCH_TSN2_TOD_NANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_pp1s_latch_tsn2_lower_tod_second_t { + ZXIC_UINT32 latch_lower_tsn2_tod_second; +} DPP_PTPTM_PTPTM_PP1S_LATCH_TSN2_LOWER_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_pp1s_latch_tsn2_high_tod_second_t { + ZXIC_UINT32 latch_high_tsn2_tod_second; +} DPP_PTPTM_PTPTM_PP1S_LATCH_TSN2_HIGH_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_pp1s_latch_tsn2_fracnanosecond_t { + ZXIC_UINT32 latch_tsn2_fracnanosecond; +} DPP_PTPTM_PTPTM_PP1S_LATCH_TSN2_FRACNANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_pp1s_latch_tsn3_tod_nanosecond_t { + ZXIC_UINT32 latch_tsn3_tod_nanosecond; +} DPP_PTPTM_PTPTM_PP1S_LATCH_TSN3_TOD_NANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_pp1s_latch_tsn3_lower_tod_second_t { + ZXIC_UINT32 latch_lower_tsn3_tod_second; +} DPP_PTPTM_PTPTM_PP1S_LATCH_TSN3_LOWER_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_pp1s_latch_tsn3_high_tod_second_t { + ZXIC_UINT32 latch_high_tsn3_tod_second; +} DPP_PTPTM_PTPTM_PP1S_LATCH_TSN3_HIGH_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_pp1s_latch_tsn3_fracnanosecond_t { + ZXIC_UINT32 latch_tsn3_fracnanosecond; +} DPP_PTPTM_PTPTM_PP1S_LATCH_TSN3_FRACNANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn0_real_tod_nanosecond_t { + ZXIC_UINT32 tsn0_real_tod_nanosecond; +} DPP_PTPTM_PTPTM_TSN0_REAL_TOD_NANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn0_real_lower_tod_second_t { + ZXIC_UINT32 tsn0_real_lower_tod_second; +} DPP_PTPTM_PTPTM_TSN0_REAL_LOWER_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn0_real_high_tod_second_t { + ZXIC_UINT32 tsn0_real_high_tod_second; +} DPP_PTPTM_PTPTM_TSN0_REAL_HIGH_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn1_real_tod_nanosecond_t { + ZXIC_UINT32 tsn1_real_tod_nanosecond; +} DPP_PTPTM_PTPTM_TSN1_REAL_TOD_NANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn1_real_lower_tod_second_t { + ZXIC_UINT32 tsn1_real_lower_tod_second; +} DPP_PTPTM_PTPTM_TSN1_REAL_LOWER_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn1_real_high_tod_second_t { + ZXIC_UINT32 tsn1_real_high_tod_second; +} DPP_PTPTM_PTPTM_TSN1_REAL_HIGH_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn2_real_tod_nanosecond_t { + ZXIC_UINT32 tsn2_real_tod_nanosecond; +} DPP_PTPTM_PTPTM_TSN2_REAL_TOD_NANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn2_real_lower_tod_second_t { + ZXIC_UINT32 tsn2_real_lower_tod_second; +} DPP_PTPTM_PTPTM_TSN2_REAL_LOWER_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn2_real_high_tod_second_t { + ZXIC_UINT32 tsn2_real_high_tod_second; +} DPP_PTPTM_PTPTM_TSN2_REAL_HIGH_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn3_real_tod_nanosecond_t { + ZXIC_UINT32 tsn3_real_tod_nanosecond; +} DPP_PTPTM_PTPTM_TSN3_REAL_TOD_NANOSECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn3_real_lower_tod_second_t { + ZXIC_UINT32 tsn3_real_lower_tod_second; +} DPP_PTPTM_PTPTM_TSN3_REAL_LOWER_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_tsn3_real_high_tod_second_t { + ZXIC_UINT32 tsn3_real_high_tod_second; +} DPP_PTPTM_PTPTM_TSN3_REAL_HIGH_TOD_SECOND_T; + +typedef struct dpp_ptptm_ptptm_real_ptp_clock_cycle_integer_t { + ZXIC_UINT32 integeral_nanosecond_of_real_ptp_clock_cycle; +} DPP_PTPTM_PTPTM_REAL_PTP_CLOCK_CYCLE_INTEGER_T; + +typedef struct dpp_ptptm_ptptm_real_ptp_clock_cycle_fraction_t { + ZXIC_UINT32 fractional_nanosecond_of_real_ptp_clock_cycle; +} DPP_PTPTM_PTPTM_REAL_PTP_CLOCK_CYCLE_FRACTION_T; + +typedef struct dpp_ptptm_ptptm_real_tsn0_clock_cycle_integer_t { + ZXIC_UINT32 integeral_nanosecond_of_real_tsn0_clock_cycle; +} DPP_PTPTM_PTPTM_REAL_TSN0_CLOCK_CYCLE_INTEGER_T; + +typedef struct dpp_ptptm_ptptm_real_tsn0_clock_cycle_fraction_t { + ZXIC_UINT32 fractional_nanosecond_of_real_tsn0_clock_cycle; +} DPP_PTPTM_PTPTM_REAL_TSN0_CLOCK_CYCLE_FRACTION_T; + +typedef struct dpp_ptptm_ptptm_real_tsn1_clock_cycle_integer_t { + ZXIC_UINT32 integeral_nanosecond_of_real_tsn1_clock_cycle; +} DPP_PTPTM_PTPTM_REAL_TSN1_CLOCK_CYCLE_INTEGER_T; + +typedef struct dpp_ptptm_ptptm_real_tsn1_clock_cycle_fraction_t { + ZXIC_UINT32 fractional_nanosecond_of_real_tsn1_clock_cycle; +} DPP_PTPTM_PTPTM_REAL_TSN1_CLOCK_CYCLE_FRACTION_T; + +typedef struct dpp_ptptm_ptptm_real_tsn2_clock_cycle_integer_t { + ZXIC_UINT32 integeral_nanosecond_of_real_tsn2_clock_cycle; +} DPP_PTPTM_PTPTM_REAL_TSN2_CLOCK_CYCLE_INTEGER_T; + +typedef struct dpp_ptptm_ptptm_real_tsn2_clock_cycle_fraction_t { + ZXIC_UINT32 fractional_nanosecond_of_real_tsn2_clock_cycle; +} DPP_PTPTM_PTPTM_REAL_TSN2_CLOCK_CYCLE_FRACTION_T; + +typedef struct dpp_ptptm_ptptm_real_tsn3_clock_cycle_integer_t { + ZXIC_UINT32 integeral_nanosecond_of_real_tsn3_clock_cycle; +} DPP_PTPTM_PTPTM_REAL_TSN3_CLOCK_CYCLE_INTEGER_T; + +typedef struct dpp_ptptm_ptptm_real_tsn3_clock_cycle_fraction_t { + ZXIC_UINT32 fractional_nanosecond_of_real_tsn3_clock_cycle; +} DPP_PTPTM_PTPTM_REAL_TSN3_CLOCK_CYCLE_FRACTION_T; + +#ifdef __cplusplus +} +#endif +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_reg.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_reg.h new file mode 100644 index 000000000000..8286ec87199c --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_reg.h @@ -0,0 +1,53 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_reg.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : 王春雷 +* 完成日期 : 2014/03/18 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef _DPP_REG_H_ +#define _DPP_REG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "dpp_reg_struct.h" +#include "dpp_reg_info.h" +#include "dpp_reg_api.h" +#include "dpp_module.h" + +#include "dpp_cfg_reg.h" +#include "dpp_etm_reg.h" +#include "dpp_nppu_reg.h" +#include "dpp_ppu_reg.h" +#include "dpp_smmu0_reg.h" +#include "dpp_smmu1_reg.h" +#include "dpp_se_reg.h" +#include "dpp_stat_reg.h" +#include "dpp_axi_reg.h" +#include "dpp_dtb_reg.h" +#include "dpp_trpg_reg.h" +#include "dpp_tsn_reg.h" +#include "dpp_ptptm_reg.h" + +#include "zxic_comm_socket.h" +#include "zxic_comm_thread.h" + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_reg_info.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_reg_info.h new file mode 100644 index 000000000000..082b9a7eeb06 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_reg_info.h @@ -0,0 +1,4456 @@ + +#ifndef _DPP_REG_INFO_H_ +#define _DPP_REG_INFO_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum dpp_reg_info_e { + ETM_CFGMT_CPU_CHECK_REGr = 0, + ETM_CFGMT_CFGMT_BLKSIZEr = 1, + ETM_CFGMT_REG_INT_STATE_REGr = 2, + ETM_CFGMT_REG_INT_MASK_REGr = 3, + ETM_CFGMT_TIMEOUT_LIMITr = 4, + ETM_CFGMT_SUBSYSTEM_RDY_REGr = 5, + ETM_CFGMT_SUBSYSTEM_EN_REGr = 6, + ETM_CFGMT_CFGMT_INT_REGr = 7, + ETM_CFGMT_QMU_WORK_MODEr = 8, + ETM_CFGMT_CFGMT_DDR_ATTACHr = 9, + ETM_CFGMT_CNT_MODE_REGr = 10, + ETM_CFGMT_CLKGATE_ENr = 11, + ETM_CFGMT_SOFTRST_ENr = 12, + ETM_OLIF_IMEM_PROG_FULLr = 13, + ETM_OLIF_QMU_PARA_PROG_FULLr = 14, + ETM_OLIF_OLIF_INT_MASKr = 15, + ETM_OLIF_ITMHRAM_PARITY_ERR_2_INTr = 16, + ETM_OLIF_LIF0_PORT_RDY_MASK_Hr = 17, + ETM_OLIF_LIF0_PORT_RDY_MASK_Lr = 18, + ETM_OLIF_LIF0_PORT_RDY_CFG_Hr = 19, + ETM_OLIF_LIF0_PORT_RDY_CFG_Lr = 20, + ETM_OLIF_LIF0_LINK_RDY_MASK_CFGr = 21, + ETM_OLIF_TM_LIF_STAT_CFGr = 22, + ETM_OLIF_TM_LIF_SOP_STATr = 23, + ETM_OLIF_TM_LIF_EOP_STATr = 24, + ETM_OLIF_TM_LIF_VLD_STATr = 25, + ETM_CGAVD_PROG_FULL_ASSERT_CFGr = 26, + ETM_CGAVD_CGAVD_INTr = 27, + ETM_CGAVD_CGAVD_RAM_ERRr = 28, + ETM_CGAVD_CGAVD_INT_MASKr = 29, + ETM_CGAVD_CGAVD_RAM_ERR_INT_MASKr = 30, + ETM_CGAVD_CFGMT_BYTE_MODEr = 31, + ETM_CGAVD_AVG_QLEN_RETURN_ZERO_ENr = 32, + ETM_CGAVD_FLOW_WRED_Q_LEN_THr = 33, + ETM_CGAVD_FLOW_WQr = 34, + ETM_CGAVD_FLOW_WRED_MAX_THr = 35, + ETM_CGAVD_FLOW_WRED_MIN_THr = 36, + ETM_CGAVD_FLOW_WRED_CFG_PARAr = 37, + ETM_CGAVD_PP_AVG_Q_LENr = 38, + ETM_CGAVD_PP_TD_THr = 39, + ETM_CGAVD_PP_CA_MTDr = 40, + ETM_CGAVD_PP_WRED_GRP_TH_ENr = 41, + ETM_CGAVD_PP_WRED_Q_LEN_THr = 42, + ETM_CGAVD_PP_WQr = 43, + ETM_CGAVD_PP_WRED_MAX_THr = 44, + ETM_CGAVD_PP_WRED_MIN_THr = 45, + ETM_CGAVD_PP_CFG_PARAr = 46, + ETM_CGAVD_SYS_AVG_Q_LENr = 47, + ETM_CGAVD_SYS_TD_THr = 48, + ETM_CGAVD_SYS_CGAVD_METDr = 49, + ETM_CGAVD_SYS_CFG_Q_GRP_PARAr = 50, + ETM_CGAVD_SYS_WQr = 51, + ETM_CGAVD_GRED_MAX_THr = 52, + ETM_CGAVD_GRED_MID_THr = 53, + ETM_CGAVD_GRED_MIN_THr = 54, + ETM_CGAVD_GRED_CFG_PARA0r = 55, + ETM_CGAVD_GRED_CFG_PARA1r = 56, + ETM_CGAVD_GRED_CFG_PARA2r = 57, + ETM_CGAVD_SYS_WINDOW_TH_Hr = 58, + ETM_CGAVD_SYS_WINDOW_TH_Lr = 59, + ETM_CGAVD_AMPLIFY_GENE0r = 60, + ETM_CGAVD_AMPLIFY_GENE1r = 61, + ETM_CGAVD_AMPLIFY_GENE2r = 62, + ETM_CGAVD_AMPLIFY_GENE3r = 63, + ETM_CGAVD_AMPLIFY_GENE4r = 64, + ETM_CGAVD_AMPLIFY_GENE5r = 65, + ETM_CGAVD_AMPLIFY_GENE6r = 66, + ETM_CGAVD_AMPLIFY_GENE7r = 67, + ETM_CGAVD_AMPLIFY_GENE8r = 68, + ETM_CGAVD_AMPLIFY_GENE9r = 69, + ETM_CGAVD_AMPLIFY_GENE10r = 70, + ETM_CGAVD_AMPLIFY_GENE11r = 71, + ETM_CGAVD_AMPLIFY_GENE12r = 72, + ETM_CGAVD_AMPLIFY_GENE13r = 73, + ETM_CGAVD_AMPLIFY_GENE14r = 74, + ETM_CGAVD_AMPLIFY_GENE15r = 75, + ETM_CGAVD_EQUAL_PKT_LEN_ENr = 76, + ETM_CGAVD_EQUAL_PKT_LEN_TH0r = 77, + ETM_CGAVD_EQUAL_PKT_LEN_TH1r = 78, + ETM_CGAVD_EQUAL_PKT_LEN_TH2r = 79, + ETM_CGAVD_EQUAL_PKT_LEN_TH3r = 80, + ETM_CGAVD_EQUAL_PKT_LEN_TH4r = 81, + ETM_CGAVD_EQUAL_PKT_LEN_TH5r = 82, + ETM_CGAVD_EQUAL_PKT_LEN_TH6r = 83, + ETM_CGAVD_EQUAL_PKT_LEN0r = 84, + ETM_CGAVD_EQUAL_PKT_LEN1r = 85, + ETM_CGAVD_EQUAL_PKT_LEN2r = 86, + ETM_CGAVD_EQUAL_PKT_LEN3r = 87, + ETM_CGAVD_EQUAL_PKT_LEN4r = 88, + ETM_CGAVD_EQUAL_PKT_LEN5r = 89, + ETM_CGAVD_EQUAL_PKT_LEN6r = 90, + ETM_CGAVD_EQUAL_PKT_LEN7r = 91, + ETM_CGAVD_FLOW_CPU_SET_AVG_LENr = 92, + ETM_CGAVD_FLOW_CPU_SET_Q_LENr = 93, + ETM_CGAVD_PP_CPU_SET_AVG_Q_LENr = 94, + ETM_CGAVD_PP_CPU_SET_Q_LENr = 95, + ETM_CGAVD_SYS_CPU_SET_AVG_LENr = 96, + ETM_CGAVD_SYS_CPU_SET_Q_LENr = 97, + ETM_CGAVD_PKE_LEN_CALC_SIGNr = 98, + ETM_CGAVD_RD_CPU_OR_RAMr = 99, + ETM_CGAVD_Q_LEN_UPDATE_DISABLEr = 100, + ETM_CGAVD_CGAVD_DP_SELr = 101, + ETM_CGAVD_CGAVD_SUB_ENr = 102, + ETM_CGAVD_DEFAULT_START_QUEUEr = 103, + ETM_CGAVD_DEFAULT_FINISH_QUEUEr = 104, + ETM_CGAVD_PROTOCOL_START_QUEUEr = 105, + ETM_CGAVD_PROTOCOL_FINISH_QUEUEr = 106, + ETM_CGAVD_UNIFORM_TD_THr = 107, + ETM_CGAVD_UNIFORM_TD_TH_ENr = 108, + ETM_CGAVD_CGAVD_CFG_FCr = 109, + ETM_CGAVD_CGAVD_CFG_NO_FCr = 110, + ETM_CGAVD_CGAVD_FORCE_IMEM_OMEMr = 111, + ETM_CGAVD_CGAVD_SYS_Q_LEN_Lr = 112, + ETM_CGAVD_DEFAULT_QUEUE_ENr = 113, + ETM_CGAVD_PROTOCOL_QUEUE_ENr = 114, + ETM_CGAVD_CFG_TC_FLOWID_DATr = 115, + ETM_CGAVD_FLOW_TD_THr = 116, + ETM_CGAVD_FLOW_CA_MTDr = 117, + ETM_CGAVD_FLOW_DYNAMIC_TH_ENr = 118, + ETM_CGAVD_PP_NUMr = 119, + ETM_CGAVD_FLOW_Q_LENr = 120, + ETM_CGAVD_FLOW_WRED_GRPr = 121, + ETM_CGAVD_FLOW_AVG_Q_LENr = 122, + ETM_CGAVD_QOS_SIGNr = 123, + ETM_CGAVD_Q_PRIr = 124, + ETM_CGAVD_ODMA_TM_ITMD_RD_LOWr = 125, + ETM_CGAVD_ODMA_TM_ITMD_RD_MIDr = 126, + ETM_CGAVD_ODMA_TM_ITMD_RD_HIGHr = 127, + ETM_CGAVD_CGAVD_STAT_PKT_LENr = 128, + ETM_CGAVD_CGAVD_STAT_QNUMr = 129, + ETM_CGAVD_CGAVD_STAT_DPr = 130, + ETM_CGAVD_FLOW_NUM0r = 131, + ETM_CGAVD_FLOW_NUM1r = 132, + ETM_CGAVD_FLOW_NUM2r = 133, + ETM_CGAVD_FLOW_NUM3r = 134, + ETM_CGAVD_FLOW_NUM4r = 135, + ETM_CGAVD_FLOW0_IMEM_CNTr = 136, + ETM_CGAVD_FLOW1_IMEM_CNTr = 137, + ETM_CGAVD_FLOW2_IMEM_CNTr = 138, + ETM_CGAVD_FLOW3_IMEM_CNTr = 139, + ETM_CGAVD_FLOW4_IMEM_CNTr = 140, + ETM_CGAVD_FLOW0_DROP_CNTr = 141, + ETM_CGAVD_FLOW1_DROP_CNTr = 142, + ETM_CGAVD_FLOW2_DROP_CNTr = 143, + ETM_CGAVD_FLOW3_DROP_CNTr = 144, + ETM_CGAVD_FLOW4_DROP_CNTr = 145, + ETM_CGAVD_FC_COUNT_MODEr = 146, + ETM_CGAVD_QMU_CGAVD_FC_NUMr = 147, + ETM_CGAVD_CGAVD_ODMA_FC_NUMr = 148, + ETM_CGAVD_CFG_OFFSETr = 149, + ETM_TMMU_TMMU_INIT_DONEr = 150, + ETM_TMMU_TMMU_INT_MASK_1r = 151, + ETM_TMMU_TMMU_INT_MASK_2r = 152, + ETM_TMMU_CFGMT_TM_PURE_IMEM_ENr = 153, + ETM_TMMU_CFGMT_FORCE_DDR_RDY_CFGr = 154, + ETM_TMMU_PD_ORDER_FIFO_AFUL_THr = 155, + ETM_TMMU_CACHED_PD_FIFO_AFUL_THr = 156, + ETM_TMMU_WR_CMD_FIFO_AFUL_THr = 157, + ETM_TMMU_IMEM_ENQ_RD_FIFO_AFUL_THr = 158, + ETM_TMMU_IMEM_ENQ_DROP_FIFO_AFUL_THr = 159, + ETM_TMMU_IMEM_DEQ_DROP_FIFO_AFUL_THr = 160, + ETM_TMMU_IMEM_DEQ_RD_FIFO_AFUL_THr = 161, + ETM_TMMU_TMMU_STATES_1r = 162, + ETM_TMMU_TMMU_STATES_2r = 163, + ETM_SHAP_SHAP_IND_CMDr = 164, + ETM_SHAP_SHAP_IND_STAr = 165, + ETM_SHAP_SHAP_IND_DATA0r = 166, + ETM_SHAP_SHAP_IND_DATA1r = 167, + ETM_SHAP_FULL_THRESHOLDr = 168, + ETM_SHAP_EMPTY_THRESHOLDr = 169, + ETM_SHAP_SHAP_STA_INIT_CFGr = 170, + ETM_SHAP_SHAP_CFG_INIT_CFGr = 171, + ETM_SHAP_TOKEN_MODE_SWITCHr = 172, + ETM_SHAP_TOKEN_GRAINr = 173, + ETM_SHAP_CRD_GRAINr = 174, + ETM_SHAP_SHAP_STAT_CTRLr = 175, + ETM_SHAP_TOKEN_STAT_IDr = 176, + ETM_SHAP_TOKEN_STATr = 177, + ETM_SHAP_SHAP_STAT_CLK_CNTr = 178, + ETM_SHAP_SHAP_BUCKET_MAP_TBLr = 179, + ETM_SHAP_BKT_PARA_TBLr = 180, + ETM_CRDT_CREDIT_ENr = 181, + ETM_CRDT_CRT_INTER1r = 182, + ETM_CRDT_DB_TOKENr = 183, + ETM_CRDT_CRS_FLT_CFGr = 184, + ETM_CRDT_TH_SPr = 185, + ETM_CRDT_TH_WFQ_FQr = 186, + ETM_CRDT_TH_WFQ2_FQ2r = 187, + ETM_CRDT_TH_WFQ4_FQ4r = 188, + ETM_CRDT_CFG_STATEr = 189, + ETM_CRDT_CRDT_IND_CMDr = 190, + ETM_CRDT_CRDT_IND_STAr = 191, + ETM_CRDT_CRDT_IND_DATA0r = 192, + ETM_CRDT_CRDT_IND_DATA1r = 193, + ETM_CRDT_CRDT_STATEr = 194, + ETM_CRDT_STAT_QUE_ID_0r = 195, + ETM_CRDT_STAT_QUE_ID_1r = 196, + ETM_CRDT_STAT_QUE_ID_2r = 197, + ETM_CRDT_STAT_QUE_ID_3r = 198, + ETM_CRDT_STAT_QUE_ID_4r = 199, + ETM_CRDT_STAT_QUE_ID_5r = 200, + ETM_CRDT_STAT_QUE_ID_6r = 201, + ETM_CRDT_STAT_QUE_ID_7r = 202, + ETM_CRDT_STAT_QUE_ID_8r = 203, + ETM_CRDT_STAT_QUE_ID_9r = 204, + ETM_CRDT_STAT_QUE_ID_10r = 205, + ETM_CRDT_STAT_QUE_ID_11r = 206, + ETM_CRDT_STAT_QUE_ID_12r = 207, + ETM_CRDT_STAT_QUE_ID_13r = 208, + ETM_CRDT_STAT_QUE_ID_14r = 209, + ETM_CRDT_STAT_QUE_ID_15r = 210, + ETM_CRDT_STAT_QUE_CREDITr = 211, + ETM_CRDT_CRDT_CFG_RAM_INITr = 212, + ETM_CRDT_CRDT_STA_RAM_INITr = 213, + ETM_CRDT_CRS_QUE_IDr = 214, + ETM_CRDT_QMU_CRS_END_STATEr = 215, + ETM_CRDT_SHAP_RDYr = 216, + ETM_CRDT_SHAP_INT_REGr = 217, + ETM_CRDT_SHAP_INT_MASK_REGr = 218, + ETM_CRDT_TOKEN_STATE_ALMOST_EMPTY_THr = 219, + ETM_CRDT_TOKEN_STATE_EMPTY_THr = 220, + ETM_CRDT_FULL_THr = 221, + ETM_CRDT_PP_C_LEVEL_SHAP_ENr = 222, + ETM_CRDT_ENQ_TOKEN_THr = 223, + ETM_CRDT_PP_TOKENQ_LEVEL1_QSTATE_WEIGHT_CIRr = 224, + ETM_CRDT_PP_IDLE_WEIGHT_LEVEL1_CIRr = 225, + ETM_CRDT_RCI_GRADE_TH_0_CFGr = 226, + ETM_CRDT_RCI_GRADE_TH_1_CFGr = 227, + ETM_CRDT_RCI_GRADE_TH_2_CFGr = 228, + ETM_CRDT_RCI_GRADE_TH_3_CFGr = 229, + ETM_CRDT_RCI_GRADE_TH_4_CFGr = 230, + ETM_CRDT_RCI_GRADE_TH_5_CFGr = 231, + ETM_CRDT_RCI_GRADE_TH_6_CFGr = 232, + ETM_CRDT_FLOW_DEL_CMDr = 233, + ETM_CRDT_CNT_CLRr = 234, + ETM_CRDT_CRDT_INT_BUSr = 235, + ETM_CRDT_CRDT_INT_MASKr = 236, + ETM_CRDT_CFG_WEIGHT_TOGETHERr = 237, + ETM_CRDT_WEIGHTr = 238, + ETM_CRDT_DEV_SP_STATEr = 239, + ETM_CRDT_DEV_CRSr = 240, + ETM_CRDT_CONGEST_TOKEN_DISABLE_31_0r = 241, + ETM_CRDT_CONGEST_TOKEN_DISABLE_63_32r = 242, + ETM_CRDT_CRDT_INTERVAL_EN_CFGr = 243, + ETM_CRDT_Q_TOKEN_STAUE_CFGr = 244, + ETM_CRDT_Q_TOKEN_DIST_CNTr = 245, + ETM_CRDT_Q_TOKEN_DEC_CNTr = 246, + ETM_CRDT_PP_WEIGHT_RAMr = 247, + ETM_CRDT_PP_CBS_SHAPE_EN_RAMr = 248, + ETM_CRDT_PP_NEXT_PC_Q_STATE_RAMr = 249, + ETM_CRDT_DEV_INTERVALr = 250, + ETM_CRDT_DEV_WFQ_CNTr = 251, + ETM_CRDT_DEV_WFQ_STATEr = 252, + ETM_CRDT_DEV_ACTIVE_HEAD_PTRr = 253, + ETM_CRDT_DEV_ACTIVE_TAIL_PTRr = 254, + ETM_CRDT_DEV_UNACTIVE_HEAD_PTRr = 255, + ETM_CRDT_DEV_UNACTIVE_TAIL_PTRr = 256, + ETM_CRDT_PP_WEIGHTr = 257, + ETM_CRDT_PP_QUE_STATEr = 258, + ETM_CRDT_PP_NEXT_PTRr = 259, + ETM_CRDT_PP_CFGr = 260, + ETM_CRDT_PP_UP_PTRr = 261, + ETM_CRDT_CREDIT_DROP_NUMr = 262, + ETM_CRDT_SE_ID_LV0r = 263, + ETM_CRDT_SE_ID_LV1r = 264, + ETM_CRDT_SE_ID_LV2r = 265, + ETM_CRDT_SE_ID_LV3r = 266, + ETM_CRDT_SE_ID_LV4r = 267, + ETM_CRDT_QUE_IDr = 268, + ETM_CRDT_SE_INFO_LV0r = 269, + ETM_CRDT_SE_INFO_LV1r = 270, + ETM_CRDT_SE_INFO_LV2r = 271, + ETM_CRDT_SE_INFO_LV3r = 272, + ETM_CRDT_SE_INFO_LV4r = 273, + ETM_CRDT_QUE_STATEr = 274, + ETM_CRDT_EIR_OFF_IN_ADVANCEr = 275, + ETM_CRDT_DOUBLE_LEVEL_SHAP_PREVENTr = 276, + ETM_CRDT_ADD_STORE_CYCLEr = 277, + ETM_CRDT_TFLAG2_WR_FLAG_SUMr = 278, + ETM_CRDT_FLOWQUE_PARA_TBLr = 279, + ETM_CRDT_SE_PARA_TBLr = 280, + ETM_CRDT_FLOWQUE_INS_TBLr = 281, + ETM_CRDT_SE_INS_TBLr = 282, + ETM_CRDT_EIR_CRS_FILTER_TBLr = 283, + ETM_QMU_QCFG_QLIST_CFG_DONEr = 284, + ETM_QMU_QCFG_QSCH_CREDIT_VALUEr = 285, + ETM_QMU_QCFG_QSCH_CRBAL_INIT_VALUEr = 286, + ETM_QMU_QCFG_QSCH_CRBAL_INIT_MASKr = 287, + ETM_QMU_CMDSCH_RD_CMD_AFUL_THr = 288, + ETM_QMU_CFG_PORT_FC_INTERVALr = 289, + ETM_QMU_QCFG_CSCH_AGED_CFGr = 290, + ETM_QMU_QCFG_CSCH_AGED_SCAN_TIMEr = 291, + ETM_QMU_QCFG_QMU_QLIST_STATE_QUERYr = 292, + ETM_QMU_CFGMT_QSCH_CRBAL_DROP_ENr = 293, + ETM_QMU_CFGMT_WLIST_QNUM_FIFO_AFUL_THr = 294, + ETM_QMU_QCFG_CSW_PKT_BLK_MODEr = 295, + ETM_QMU_QCFG_QLIST_RAM_INIT_CANCELr = 296, + ETM_QMU_QCFG_QSCH_CRBAL_TRANSFER_MODEr = 297, + ETM_QMU_QCFG_QLIST_QCLR_INTERVALr = 298, + ETM_QMU_QCFG_QSCH_QCLR_RATEr = 299, + ETM_QMU_QCFG_QLIST_DDR_RANDOMr = 300, + ETM_QMU_CFGMT_QLIST_PDS_FIFO_AFULL_THr = 301, + ETM_QMU_CFGMT_SOP_CMD_FIFO_AFULL_THr = 302, + ETM_QMU_CFGMT_NON_SOP_CMD_FIFO_AFULL_THr = 303, + ETM_QMU_CFGMT_MMU_DATA_FIFO_AFULL_THr = 304, + ETM_QMU_QCFG_QLIST_BANK_EPT_THr = 305, + ETM_QMU_RANDOM_BYPASS_ENr = 306, + ETM_QMU_CFGMT_CRS_SPD_BYPASSr = 307, + ETM_QMU_CFGMT_CRS_INTERVALr = 308, + ETM_QMU_CFG_QSCH_AUTO_CREDIT_CONTROL_ENr = 309, + ETM_QMU_CFG_QSCH_AUTOCRFRSTQUEr = 310, + ETM_QMU_CFG_QSCH_AUTOCRLASTQUEr = 311, + ETM_QMU_CFG_QSCH_AUTOCREDITRATEr = 312, + ETM_QMU_CFG_QSCH_SCANFRSTQUEr = 313, + ETM_QMU_CFG_QSCH_SCANLASTQUEr = 314, + ETM_QMU_CFG_QSCH_SCANRATEr = 315, + ETM_QMU_CFG_QSCH_SCAN_ENr = 316, + ETM_QMU_CFGMT_QSCH_RD_CREDIT_FIFO_RATEr = 317, + ETM_QMU_QCFG_QLIST_BDEPr = 318, + ETM_QMU_QCFG_QLIST_BHEADr = 319, + ETM_QMU_QCFG_QLIST_BTAILr = 320, + ETM_QMU_QCFG_QSCH_SHAP_PARAMr = 321, + ETM_QMU_QCFG_QSCH_SHAP_TOKENr = 322, + ETM_QMU_QCFG_QSCH_SHAP_OFFSETr = 323, + ETM_QMU_QCFG_QSCH_CRS_EIR_THr = 324, + ETM_QMU_QCFG_QSCH_CRS_TH1r = 325, + ETM_QMU_QCFG_QSCH_CRS_TH2r = 326, + ETM_QMU_QCFG_CSCH_CONGEST_THr = 327, + ETM_QMU_QCFG_CSCH_SP_FC_THr = 328, + ETM_QMU_QCFG_CSW_SHAP_PARAMETERr = 329, + ETM_QMU_CFGMT_RD_RELEASE_AFUL_THr = 330, + ETM_QMU_CFGMT_DROP_IMEM_RELEASE_FIFO_AFUL_THr = 331, + ETM_QMU_CFGMT_NNH_RD_BUF_AFUL_THr = 332, + ETM_QMU_CFG_PID_USE_INALLr = 333, + ETM_QMU_CFG_PID_ROUND_THr = 334, + ETM_QMU_CFGMT_CREDIT_FIFO_AFULL_THr = 335, + ETM_QMU_CFGMT_SCAN_FIFO_AFULL_THr = 336, + ETM_QMU_CFGMT_SMALL_FIFO_AFUL_THr = 337, + ETM_QMU_CFGMT_FREE_ADDR_FIFO_AFUL_THr = 338, + ETM_QMU_CFGMT_ENQ_RPT_FIFO_AFUL_THr = 339, + ETM_QMU_QCFG_CSW_SHAP_TOKEN_DEPTHr = 340, + ETM_QMU_QCFG_CSW_SHAP_OFFSET_VALUEr = 341, + ETM_QMU_QCFG_CSW_FC_OFFSET_VALUEr = 342, + ETM_QMU_QMU_INIT_DONE_STATEr = 343, + ETM_QMU_CSW_QCFG_PORT_SHAP_RDY_0r = 344, + ETM_QMU_CSW_QCFG_PORT_SHAP_RDY_1r = 345, + ETM_QMU_QLIST_CFGMT_RAM_INIT_DONEr = 346, + ETM_QMU_QLIST_CFGMT_RAM_ECC_ERRr = 347, + ETM_QMU_QLIST_CFGMT_RAM_SLOT_ERRr = 348, + ETM_QMU_QSCH_CFGMT_RAM_ECCr = 349, + ETM_QMU_QLIST_CFGMT_FIFO_STATEr = 350, + ETM_QMU_QLIST_QCFG_CLR_DONEr = 351, + ETM_QMU_QMU_INT_MASK1r = 352, + ETM_QMU_QMU_INT_MASK2r = 353, + ETM_QMU_QMU_INT_MASK3r = 354, + ETM_QMU_QMU_INT_MASK4r = 355, + ETM_QMU_QMU_INT_MASK5r = 356, + ETM_QMU_QMU_INT_MASK6r = 357, + ETM_QMU_CMD_SCH_CFGMT_FIFO_STATEr = 358, + ETM_QMU_QLIST_R_BCNTr = 359, + ETM_QMU_QSCH_RW_CRBALr = 360, + ETM_QMU_QSCH_RW_CRSr = 361, + ETM_QMU_QSCH_R_WLIST_EMPTYr = 362, + ETM_QMU_QCFG_QLIST_BARAM_RDr = 363, + ETM_QMU_QCFG_QSCH_CRBAL_FB_RWr = 364, + ETM_QMU_QCFG_QLIST_GRP0_BANKr = 365, + ETM_QMU_QCFG_QLIST_GRP1_BANKr = 366, + ETM_QMU_QCFG_QLIST_GRP2_BANKr = 367, + ETM_QMU_QCFG_QLIST_GRP3_BANKr = 368, + ETM_QMU_QCFG_QLIST_GRP4_BANKr = 369, + ETM_QMU_QCFG_QLIST_GRP5_BANKr = 370, + ETM_QMU_QCFG_QLIST_GRP6_BANKr = 371, + ETM_QMU_QCFG_QLIST_GRP7_BANKr = 372, + ETM_QMU_QCFG_QLIST_GRPr = 373, + ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFGr = 374, + ETM_QMU_CFGMT_DDR_IN_MMU_CFGr = 375, + ETM_QMU_CFGMT_DDR_IN_QMU_CFGr = 376, + ETM_QMU_CFGMT_BANK_TO_MMU_CFGr = 377, + ETM_QMU_CFGMT_BANK_TO_QMU_CFGr = 378, + ETM_QMU_CFGMT_GRP_RAM_N_CLR_THDr = 379, + ETM_QMU_CFGMT_AGE_PKT_NUMr = 380, + ETM_QMU_CFGMT_AGE_MULTI_INTERVALr = 381, + ETM_QMU_CFGMT_QMU_PKT_AGE_ENr = 382, + ETM_QMU_CFGMT_QMU_PKT_AGE_INTERVALr = 383, + ETM_QMU_CFGMT_QMU_PKT_AGE_START_ENDr = 384, + ETM_QMU_CFGMT_PKT_AGE_REQ_AFUL_THr = 385, + ETM_QMU_CFGMT_PKT_AGE_STEP_INTERVALr = 386, + ETM_QMU_CFGMT_QMU_IMEM_AGE_MODEr = 387, + ETM_QMU_CFGMT_QMU_IMEM_QLEN_AGE_INTERVALr = 388, + ETM_QMU_CFGMT_QMU_IMEM_TIME_AGE_INTERVALr = 389, + ETM_QMU_CFGMT_QMU_IMEM_QLEN_AGE_THDr = 390, + ETM_QMU_CFGMT_IMEM_AGE_STEP_INTERVALr = 391, + ETM_QMU_CFGMT_QMU_ECC_BYPASS_READr = 392, + ETM_QMU_CFGMT_QMU_RESP_STAT_FC_ENr = 393, + ETM_QMU_CFGMT_QMU_BANK_XOFF_PDS_MODEr = 394, + ETM_QMU_CFGMT_QMU_STAT_OFFSETr = 395, + ETM_QMU_FC_CNT_MODEr = 396, + ETM_QMU_MMU_QMU_WR_FC_CNTr = 397, + ETM_QMU_MMU_QMU_RD_FC_CNTr = 398, + ETM_QMU_QMU_CGAVD_FC_CNTr = 399, + ETM_QMU_CGAVD_QMU_PKT_CNTr = 400, + ETM_QMU_CGAVD_QMU_PKTLEN_ALLr = 401, + ETM_QMU_OBSERVE_PORTFC_SPECr = 402, + ETM_QMU_SPEC_LIF_PORTFC_COUNTr = 403, + ETM_QMU_CFGMT_QMU_PFC_ENr = 404, + ETM_QMU_CFGMT_QMU_PFC_MASK_1r = 405, + ETM_QMU_CFGMT_QMU_PFC_MASK_2r = 406, + CFG_PCIE_INT_REPEATr = 407, + CFG_DMA_DMA_UP_SIZEr = 408, + CFG_CSR_SOC_WR_TIME_OUT_THRESHr = 409, + NPPU_MR_CFG_CFG_SHAP_PARAMr = 410, + NPPU_MR_CFG_CFG_SHAP_TOKENr = 411, + NPPU_MR_CFG_IDLE_PTR_FIFO_AFUL_THr = 412, + NPPU_MR_CFG_MR_COS_PORT_CFGr = 413, + NPPU_PKTRX_CFG_IND_STATUSr = 414, + NPPU_PKTRX_CFG_IND_CMDr = 415, + NPPU_PKTRX_CFG_IND_DATA0r = 416, + NPPU_PKTRX_CFG_IND_DATA1r = 417, + NPPU_PKTRX_CFG_IND_DATA2r = 418, + NPPU_PKTRX_CFG_IND_DATA3r = 419, + NPPU_PKTRX_CFG_IND_DATA4r = 420, + NPPU_PKTRX_CFG_IND_DATA5r = 421, + NPPU_PKTRX_CFG_IND_DATA6r = 422, + NPPU_PKTRX_CFG_IND_DATA7r = 423, + NPPU_PKTRX_CFG_TCAM_0_CMDr = 424, + NPPU_PKTRX_CFG_TCAM_1_CMDr = 425, + NPPU_PKTRX_CFG_PORT_EN_0r = 426, + NPPU_PKTRX_CFG_PORT_EN_1r = 427, + NPPU_PKTRX_CFG_PORT_EN_2r = 428, + NPPU_PKTRX_CFG_PORT_EN_3r = 429, + NPPU_PKTRX_CFG_CFG_PORT_L2_OFFSET_MODE_0r = 430, + NPPU_PKTRX_CFG_CFG_PORT_L2_OFFSET_MODE_1r = 431, + NPPU_PKTRX_CFG_CFG_PORT_L2_OFFSET_MODE_2r = 432, + NPPU_PKTRX_CFG_CFG_PORT_L2_OFFSET_MODE_3r = 433, + NPPU_PKTRX_CFG_PORT_FC_MODE_0r = 434, + NPPU_PKTRX_CFG_PORT_FC_MODE_1r = 435, + NPPU_PKTRX_CFG_PORT_FC_MODE_2r = 436, + NPPU_PKTRX_CFG_PORT_FC_MODE_3r = 437, + NPPU_PKTRX_CFG_PORT_FC_MODE_4r = 438, + NPPU_PKTRX_CFG_PORT_FC_MODE_5r = 439, + NPPU_PKTRX_CFG_PORT_FC_MODE_6r = 440, + NPPU_PKTRX_CFG_PORT_FC_MODE_7r = 441, + NPPU_PKTRX_CFG_CFG_ISCH_AGING_THr = 442, + NPPU_PKTRX_CFG_ISCH_FIFO_TH_0r = 443, + NPPU_PKTRX_CFG_ISCH_CFG_1r = 444, + NPPU_PKTRX_CFG_TCAM_0_VLDr = 445, + NPPU_PKTRX_CFG_TCAM_1_VLDr = 446, + NPPU_PKTRX_CFG_CPU_PORT_EN_MASKr = 447, + NPPU_PKTRX_CFG_PKTRX_GLBAL_CFG_0r = 448, + NPPU_PKTRX_CFG_PKTRX_GLBAL_CFG_1r = 449, + NPPU_PKTRX_CFG_PKTRX_GLBAL_CFG_2r = 450, + NPPU_PKTRX_CFG_PKTRX_GLBAL_CFG_3r = 451, + NPPU_PKTRX_CFG_NPPU_STARTr = 452, + NPPU_PKTRX_STAT_IND_STATUSr = 453, + NPPU_PKTRX_STAT_IND_CMDr = 454, + NPPU_PKTRX_STAT_IND_DATA0r = 455, + NPPU_IDMA_CFG_DEBUG_CNT_OVFL_MODEr = 456, + NPPU_IDMA_STAT_IND_STATUSr = 457, + NPPU_IDMA_STAT_IND_CMDr = 458, + NPPU_IDMA_STAT_IND_DATA0r = 459, + NPPU_PBU_CFG_IND_STATUSr = 460, + NPPU_PBU_CFG_IND_CMDr = 461, + NPPU_PBU_CFG_IND_DATA0r = 462, + NPPU_PBU_CFG_IND_DATA1r = 463, + NPPU_PBU_CFG_IND_DATA2r = 464, + NPPU_PBU_CFG_IND_DATA3r = 465, + NPPU_PBU_CFG_IND_DATA4r = 466, + NPPU_PBU_CFG_IND_DATA5r = 467, + NPPU_PBU_CFG_IND_DATA6r = 468, + NPPU_PBU_CFG_IND_DATA7r = 469, + NPPU_PBU_CFG_IDMA_PUBLIC_THr = 470, + NPPU_PBU_CFG_LIF_PUBLIC_THr = 471, + NPPU_PBU_CFG_IDMA_TOTAL_THr = 472, + NPPU_PBU_CFG_LIF_TOTAL_THr = 473, + NPPU_PBU_CFG_MC_TOTAL_THr = 474, + NPPU_PBU_CFG_MC_COS10_THr = 475, + NPPU_PBU_CFG_MC_COS32_THr = 476, + NPPU_PBU_CFG_MC_COS54_THr = 477, + NPPU_PBU_CFG_MC_COS76_THr = 478, + NPPU_PBU_CFG_DEBUG_CNT_OVFL_MODEr = 479, + NPPU_PBU_CFG_SE_KEY_AFUL_NEGATE_CFGr = 480, + NPPU_PBU_CFG_SA_FLAGr = 481, + NPPU_PBU_STAT_IND_DATAr = 482, + NPPU_PBU_STAT_IND_STATUSr = 483, + NPPU_PBU_STAT_IND_CMDr = 484, + NPPU_PBU_STAT_TOTAL_CNTr = 485, + NPPU_PBU_STAT_IDMA_PUB_CNTr = 486, + NPPU_PBU_STAT_LIF_PUB_CNTr = 487, + NPPU_PBU_STAT_MC_TOTAL_CNTr = 488, + NPPU_PBU_STAT_PBU_THRAM_INIT_DONEr = 489, + NPPU_PBU_STAT_IFB_FPTR_INIT_DONEr = 490, + NPPU_ISU_CFG_WEIGHT_NORMAL_UCr = 491, + NPPU_ISU_CFG_FABRIC_OR_SAIPr = 492, + NPPU_ISU_STAT_IND_STATUSr = 493, + NPPU_ISU_STAT_IND_CMDr = 494, + NPPU_ISU_STAT_IND_DAT0r = 495, + NPPU_ODMA_CFG_IND_ACCESS_DONEr = 496, + NPPU_ODMA_CFG_IND_COMMANDr = 497, + NPPU_ODMA_CFG_IND_DAT0r = 498, + NPPU_ODMA_CFG_IND_DAT1r = 499, + NPPU_ODMA_CFG_FABRIC_OR_SAIPr = 500, + NPPU_ODMA_CFG_MAX_PKT_LENr = 501, + NPPU_ODMA_CFG_AGE_ENr = 502, + NPPU_ODMA_CFG_AGE_MODEr = 503, + NPPU_ODMA_CFG_AGE_VALUE_TIMEr = 504, + NPPU_ODMA_CFG_AGE_VALUE_ROOMr = 505, + NPPU_ODMA_CFG_AGE_OUT_CNTr = 506, + NPPU_ODMA_CFG_TOKEN_VALUE_Ar = 507, + NPPU_ODMA_CFG_TOKEN_VALUE_Br = 508, + NPPU_ODMA_CFG_CFG_SHAP_EN_P0r = 509, + NPPU_ODMA_CFG_CFG_SHAP_EN_P1r = 510, + NPPU_ODMA_CFG_CFG_SHAP_EN_TMr = 511, + NPPU_ODMA_STAT_IND_STATUSr = 512, + NPPU_ODMA_STAT_IND_CMDr = 513, + NPPU_ODMA_STAT_IND_DATA0r = 514, + NPPU_ODMA_STAT_DEBUG_CNT_CFGr = 515, + NPPU_OAM_CFG_BFD_FIRSTCHK_THr = 516, + NPPU_PBU_CFG_MEMID_0_PBU_FC_IDMATH_RAMr = 517, + NPPU_PBU_CFG_MEMID_1_PBU_FC_MACTH_RAMr = 518, + NPPU_PBU_STAT_MEMID_1_ALL_KIND_PORT_CNTr = 519, + NPPU_PBU_STAT_MEMID_2_PPU_PBU_IFB_REQ_VLD_CNTr = 520, + NPPU_PBU_STAT_MEMID_2_PBU_PPU_IFB_RSP_VLD_CNTr = 521, + NPPU_PBU_STAT_MEMID_2_ODMA_PBU_RECY_PTR_VLD_CNTr = 522, + NPPU_PBU_STAT_MEMID_2_PPU_PBU_MCODE_PF_REQ_CNTr = 523, + NPPU_PBU_STAT_MEMID_2_PBU_PPU_MCODE_PF_RSP_CNTr = 524, + NPPU_PBU_STAT_MEMID_2_PPU_PBU_LOGIC_PF_REQ_CNTr = 525, + NPPU_PBU_STAT_MEMID_2_PBU_PPU_LOGIC_PF_RSP_CNTr = 526, + NPPU_PBU_STAT_MEMID_2_PPU_USE_PTR_PULSE_CNTr = 527, + NPPU_PBU_STAT_MEMID_2_PPU_PBU_WB_VLD_CNTr = 528, + NPPU_PBU_STAT_MEMID_2_PBU_PPU_REORDER_PARA_VLD_CNTr = 529, + NPPU_PBU_STAT_MEMID_2_SE_PBU_DPI_KEY_VLD_CNTr = 530, + NPPU_PBU_STAT_MEMID_2_PBU_SE_DPI_RSP_DATVLD_CNTr = 531, + NPPU_PBU_STAT_MEMID_2_ODMA_PBU_IFB_RD1_CNTr = 532, + NPPU_PBU_STAT_MEMID_2_ODMA_PBU_IFB_RD2_CNTr = 533, + NPPU_PBU_STAT_MEMID_2_PBU_PPU_MCODE_PF_NO_RSP_CNTr = 534, + NPPU_PBU_STAT_MEMID_2_PBU_PPU_LOGIC_PF_NO_RSP_CNTr = 535, + NPPU_PBU_STAT_MEMID_3_CPU_RD_IFB_DATAr = 536, + NPPU_PBU_STAT_MEMID_4_MUX_SEL_RGTr = 537, + NPPU_PBU_STAT_MEMID_5_PORT_PUB_CNTr = 538, + NPPU_IDMA_STAT_MEMID_1_IDMA_O_ISU_PKT_PULSE_TOTAL_CNTr = 539, + NPPU_IDMA_STAT_MEMID_1_IDMA_O_ISU_EPKT_PULSE_TOTAL_CNTr = 540, + NPPU_IDMA_STAT_MEMID_1_IDMA_DISPKT_PULSE_TOTAL_CNTr = 541, + NPPU_IDMA_STAT_MEMID_0_IDMA_O_ISU_PKT_PULSE_CNTr = 542, + NPPU_IDMA_STAT_MEMID_0_IDMA_O_ISU_EPKT_PULSE_CNTr = 543, + NPPU_IDMA_STAT_MEMID_0_IDMA_DISPKT_PULSE_CNTr = 544, + PPU_PPU_TEST_Rr = 545, + PPU_PPU_PPU_DEBUG_EN_Rr = 546, + PPU_PPU_CSR_DUP_TABLE_WR_DATAr = 547, + PPU_PPU_CSR_DUP_TABLE_RD_DATAr = 548, + PPU_PPU_CSR_DUP_TABLE_ADDRr = 549, + PPU_PPU_PPU_DEBUG_VLDr = 550, + PPU_PPU_COP_THASH_RSK_319_288r = 551, + PPU_PPU_COP_THASH_RSK_287_256r = 552, + PPU_PPU_COP_THASH_RSK_255_224r = 553, + PPU_PPU_COP_THASH_RSK_223_192r = 554, + PPU_PPU_COP_THASH_RSK_191_160r = 555, + PPU_PPU_COP_THASH_RSK_159_128r = 556, + PPU_PPU_COP_THASH_RSK_127_096r = 557, + PPU_PPU_COP_THASH_RSK_095_064r = 558, + PPU_PPU_COP_THASH_RSK_063_032r = 559, + PPU_PPU_COP_THASH_RSK_031_000r = 560, + PPU_PPU_CFG_IPV4_IPID_START_VALUEr = 561, + PPU_PPU_CFG_IPV4_IPID_END_VALUEr = 562, + PPU_PPU_CLUSTER_MF_IN_ENr = 563, + PPU_PPU_PPU_EMPTYr = 564, + PPU_PPU_INSTRMEM_W_ADDRr = 565, + PPU_PPU_INSTRMEM_W_DATA_191_160r = 566, + PPU_PPU_INSTRMEM_W_DATA_159_128r = 567, + PPU_PPU_INSTRMEM_W_DATA_127_96r = 568, + PPU_PPU_INSTRMEM_W_DATA_95_64r = 569, + PPU_PPU_INSTRMEM_W_DATA_63_32r = 570, + PPU_PPU_INSTRMEM_W_DATA_31_0r = 571, + PPU_PPU_ISU_FWFT_MF_FIFO_PROG_FULL_ASSERT_CFGr = 572, + PPU_PPU_ISU_FWFT_MF_FIFO_PROG_FULL_NEGATE_CFGr = 573, + PPU_CLUSTER_INT_1200M_MASKr = 574, + PPU4K_CLUSTER_WR_HIGH_DATA_R_MEXr = 575, + PPU4K_CLUSTER_WR_LOW_DATA_R_MEXr = 576, + PPU4K_CLUSTER_ADDR_R_MEXr = 577, + PPU4K_CLUSTER_SDT_TBL_IND_ACCESS_DONEr = 578, + PPU4K_CLUSTER_RD_HIGH_DATA_R_MEXr = 579, + PPU4K_CLUSTER_RD_LOW_DATA_R_MEXr = 580, + SE_ALG_INIT_OKr = 581, + SE_ALG_CPU_RD_RDYr = 582, + SE_ALG_CPU_RD_DATA_TMP0r = 583, + SE_ALG_CPU_RD_DATA_TMP1r = 584, + SE_ALG_CPU_RD_DATA_TMP2r = 585, + SE_ALG_CPU_RD_DATA_TMP3r = 586, + SE_ALG_CPU_RD_DATA_TMP4r = 587, + SE_ALG_CPU_RD_DATA_TMP5r = 588, + SE_ALG_CPU_RD_DATA_TMP6r = 589, + SE_ALG_CPU_RD_DATA_TMP7r = 590, + SE_ALG_CPU_RD_DATA_TMP8r = 591, + SE_ALG_CPU_RD_DATA_TMP9r = 592, + SE_ALG_CPU_RD_DATA_TMP10r = 593, + SE_ALG_CPU_RD_DATA_TMP11r = 594, + SE_ALG_CPU_RD_DATA_TMP12r = 595, + SE_ALG_CPU_RD_DATA_TMP13r = 596, + SE_ALG_CPU_RD_DATA_TMP14r = 597, + SE_ALG_CPU_RD_DATA_TMP15r = 598, + SE_ALG_LPM_V4_CONFIG_RGTr = 599, + SE_ALG_LPM_V6_CONFIG_RGTr = 600, + SE_ALG_LPM_EXT_RSP_FIFO_U0_PFULL_ASTr = 601, + SE_AS_HASH_AGE_PAT_CFGr = 602, + SE_AS_LEARN_RDY_CFGr = 603, + SE_KSCHD_KSCHD_AS_PFUL_CFGr = 604, + SE_KSCHD_KSCHD_DIR_PFUL_CFGr = 605, + SE_KSCHD_KSCHD_AS_EPT_CFGr = 606, + SE_KSCHD_CPU_ARBI_PFUL_CFGr = 607, + SE_KSCHD_KSCHD_PBU_PFUL_CFGr = 608, + SE_RSCHD_RSCHD_DIR_PFUL_CFGr = 609, + SE_RSCHD_RSCHD_DIR_EPT_CFGr = 610, + SE4K_SE_ALG_CPU_CMD_RGTr = 611, + SE4K_SE_ALG_CPU_WR_DATA_TMP0r = 612, + SE4K_SE_ALG_CPU_WR_DATA_TMP1r = 613, + SE4K_SE_ALG_CPU_WR_DATA_TMP2r = 614, + SE4K_SE_ALG_CPU_WR_DATA_TMP3r = 615, + SE4K_SE_ALG_CPU_WR_DATA_TMP4r = 616, + SE4K_SE_ALG_CPU_WR_DATA_TMP5r = 617, + SE4K_SE_ALG_CPU_WR_DATA_TMP6r = 618, + SE4K_SE_ALG_CPU_WR_DATA_TMP7r = 619, + SE4K_SE_ALG_CPU_WR_DATA_TMP8r = 620, + SE4K_SE_ALG_CPU_WR_DATA_TMP9r = 621, + SE4K_SE_ALG_CPU_WR_DATA_TMP10r = 622, + SE4K_SE_ALG_CPU_WR_DATA_TMP11r = 623, + SE4K_SE_ALG_CPU_WR_DATA_TMP12r = 624, + SE4K_SE_ALG_CPU_WR_DATA_TMP13r = 625, + SE4K_SE_ALG_CPU_WR_DATA_TMP14r = 626, + SE4K_SE_ALG_CPU_WR_DATA_TMP15r = 627, + SE4K_SE_ALG_CPU_RD_RDYr = 628, + SE4K_SE_ALG_CPU_RD_DATA_TMP0r = 629, + SE4K_SE_ALG_CPU_RD_DATA_TMP1r = 630, + SE4K_SE_ALG_CPU_RD_DATA_TMP2r = 631, + SE4K_SE_ALG_CPU_RD_DATA_TMP3r = 632, + SE4K_SE_ALG_CPU_RD_DATA_TMP4r = 633, + SE4K_SE_ALG_CPU_RD_DATA_TMP5r = 634, + SE4K_SE_ALG_CPU_RD_DATA_TMP6r = 635, + SE4K_SE_ALG_CPU_RD_DATA_TMP7r = 636, + SE4K_SE_ALG_CPU_RD_DATA_TMP8r = 637, + SE4K_SE_ALG_CPU_RD_DATA_TMP9r = 638, + SE4K_SE_ALG_CPU_RD_DATA_TMP10r = 639, + SE4K_SE_ALG_CPU_RD_DATA_TMP11r = 640, + SE4K_SE_ALG_CPU_RD_DATA_TMP12r = 641, + SE4K_SE_ALG_CPU_RD_DATA_TMP13r = 642, + SE4K_SE_ALG_CPU_RD_DATA_TMP14r = 643, + SE4K_SE_ALG_CPU_RD_DATA_TMP15r = 644, + SE4K_SE_ALG_HASH0_EXT_CFG_RGTr = 645, + SE4K_SE_ALG_HASH1_EXT_CFG_RGTr = 646, + SE4K_SE_ALG_HASH2_EXT_CFG_RGTr = 647, + SE4K_SE_ALG_HASH3_EXT_CFG_RGTr = 648, + SE4K_SE_ALG_HASH0_TBL30_DEPTHr = 649, + SE4K_SE_ALG_HASH0_TBL74_DEPTHr = 650, + SE4K_SE_ALG_HASH1_TBL30_DEPTHr = 651, + SE4K_SE_ALG_HASH1_TBL74_DEPTHr = 652, + SE4K_SE_ALG_HASH2_TBL30_DEPTHr = 653, + SE4K_SE_ALG_HASH2_TBL74_DEPTHr = 654, + SE4K_SE_ALG_HASH3_TBL30_DEPTHr = 655, + SE4K_SE_ALG_HASH3_TBL74_DEPTHr = 656, + SE4K_SE_ALG_WR_RSP_CFGr = 657, + SE4K_SE_ALG_HASH_MONO_FLAGr = 658, + SE4K_SE_ALG_HASH10_EXT_CRC_CFGr = 659, + SE4K_SE_ALG_HASH32_EXT_CRC_CFGr = 660, + SE4K_SE_ALG_ZBLOCK_SERVICE_CONFIGUREr = 661, + SE4K_SE_ALG_ZBLOCK_HASH_ZCELL_MONOr = 662, + SE4K_SE_ALG_ZLOCK_HASH_ZREG_MONOr = 663, + SMMU0_SMMU0_INIT_DONEr = 664, + SMMU0_SMMU0_CPU_IND_WDAT0r = 665, + SMMU0_SMMU0_CPU_IND_WDAT1r = 666, + SMMU0_SMMU0_CPU_IND_WDAT2r = 667, + SMMU0_SMMU0_CPU_IND_WDAT3r = 668, + SMMU0_SMMU0_CPU_IND_CMDr = 669, + SMMU0_SMMU0_CPU_IND_RD_DONEr = 670, + SMMU0_SMMU0_CPU_IND_RDAT0r = 671, + SMMU0_SMMU0_CPU_IND_RDAT1r = 672, + SMMU0_SMMU0_CPU_IND_RDAT2r = 673, + SMMU0_SMMU0_CPU_IND_RDAT3r = 674, + SMMU0_SMMU0_CFG_PLCR_MONOr = 675, + SMMU0_SMMU0_WR_ARB_CPU_RDYr = 676, + SMMU0_SMMU0_TM_STAT_EN_CFGr = 677, + SE_SMMU1_DDR_WDAT0r = 678, + SE_SMMU1_DIR_ARBI_SER_RPFULr = 679, + SE_SMMU1_CFG_WR_ARBI_PFUL2r = 680, + SE_SMMU1_ETM_TBL_CFGr = 681, + SE_SMMU1_CFG_CASH_ADDR_PFULr = 682, + SE_SMMU1_CTRL_RFIFO_CFGr = 683, + SE_SMMU1_CACHE_REQ_FIFO_CFGr = 684, + STAT_STAT_CFG_CPU_IND_ERAM_WDAT0r = 685, + STAT_STAT_CFG_ETM_PORT_SEL_CFGr = 686, + STAT_STAT_CFG_TM_STAT_CFGr = 687, + STAT_STAT_CFG_PPU_ERAM_DEPTHr = 688, + STAT_STAT_CFG_PPU_ERAM_BASE_ADDRr = 689, + STAT_STAT_CFG_PPU_DDR_BASE_ADDRr = 690, + STAT_STAT_CFG_PLCR0_BASE_ADDRr = 691, + STAT_STAT_CFG_ETM_STAT_START_ADDR_CFGr = 692, + STAT_STAT_CFG_ETM_STAT_DEPTH_CFGr = 693, + STAT_STAT_CFG_CYCLE_MOV_EN_CFGr = 694, + STAT_ETCAM_CPU_IND_WDAT0r = 695, + STAT_ETCAM_CPU_IND_CTRL_TMP0r = 696, + STAT_ETCAM_CPU_IND_CTRL_TMP1r = 697, + STAT_ETCAM_CPU_IND_RD_DONEr = 698, + STAT_ETCAM_CPU_RDAT0r = 699, + STAT_ETCAM_CPU_RDAT1r = 700, + STAT_ETCAM_CPU_RDAT2r = 701, + STAT_ETCAM_CPU_RDAT3r = 702, + STAT_ETCAM_CPU_RDAT4r = 703, + STAT_ETCAM_CPU_RDAT5r = 704, + STAT_ETCAM_CPU_RDAT6r = 705, + STAT_ETCAM_CPU_RDAT7r = 706, + STAT_ETCAM_CPU_RDAT8r = 707, + STAT_ETCAM_CPU_RDAT9r = 708, + STAT_ETCAM_CPU_RDAT10r = 709, + STAT_ETCAM_CPU_RDAT11r = 710, + STAT_ETCAM_CPU_RDAT12r = 711, + STAT_ETCAM_CPU_RDAT13r = 712, + STAT_ETCAM_CPU_RDAT14r = 713, + STAT_ETCAM_CPU_RDAT15r = 714, + STAT_ETCAM_CPU_RDAT16r = 715, + STAT_ETCAM_CPU_RDAT17r = 716, + STAT_ETCAM_CPU_RDAT18r = 717, + STAT_ETCAM_CPU_RDAT19r = 718, + STAT_ETCAM_QVBOr = 719, + STAT_ETCAM_CNT_OVERFLOW_MODEr = 720, + STAT_CAR0_CARA_QUEUE_RAM0_159_0r = 721, + STAT_CAR0_CARA_PROFILE_RAM1_255_0r = 722, + STAT_CAR0_CARA_QOVS_RAM_RAM2r = 723, + STAT_CAR0_LOOK_UP_TABLE1r = 724, + STAT_CAR0_CARA_PKT_DES_I_CNTr = 725, + STAT_CAR0_CARA_GREEN_PKT_I_CNTr = 726, + STAT_CAR0_CARA_YELLOW_PKT_I_CNTr = 727, + STAT_CAR0_CARA_RED_PKT_I_CNTr = 728, + STAT_CAR0_CARA_PKT_DES_O_CNTr = 729, + STAT_CAR0_CARA_GREEN_PKT_O_CNTr = 730, + STAT_CAR0_CARA_YELLOW_PKT_O_CNTr = 731, + STAT_CAR0_CARA_RED_PKT_O_CNTr = 732, + STAT_CAR0_CARA_PKT_DES_FC_FOR_CFG_CNTr = 733, + STAT_CAR0_CARA_APPOINT_QNUM_OR_SPr = 734, + STAT_CAR0_CARA_CFGMT_COUNT_MODEr = 735, + STAT_CAR0_CARA_PKT_SIZE_CNTr = 736, + STAT_CAR0_CARA_PLCR_INIT_DONTr = 737, + STAT_CAR0_CARB_QUEUE_RAM0_159_0r = 738, + STAT_CAR0_CARB_PROFILE_RAM1_255_0r = 739, + STAT_CAR0_CARB_QOVS_RAM_RAM2r = 740, + STAT_CAR0_LOOK_UP_TABLE2r = 741, + STAT_CAR0_CARB_PKT_DES_I_CNTr = 742, + STAT_CAR0_CARB_GREEN_PKT_I_CNTr = 743, + STAT_CAR0_CARB_YELLOW_PKT_I_CNTr = 744, + STAT_CAR0_CARB_RED_PKT_I_CNTr = 745, + STAT_CAR0_CARB_PKT_DES_O_CNTr = 746, + STAT_CAR0_CARB_GREEN_PKT_O_CNTr = 747, + STAT_CAR0_CARB_YELLOW_PKT_O_CNTr = 748, + STAT_CAR0_CARB_RED_PKT_O_CNTr = 749, + STAT_CAR0_CARB_PKT_DES_FC_FOR_CFG_CNTr = 750, + STAT_CAR0_CARB_APPOINT_QNUM_OR_SPr = 751, + STAT_CAR0_CARB_CFGMT_COUNT_MODEr = 752, + STAT_CAR0_CARB_PKT_SIZE_CNTr = 753, + STAT_CAR0_CARB_PLCR_INIT_DONTr = 754, + STAT_CAR0_CARC_QUEUE_RAM0_159_0r = 755, + STAT_CAR0_CARC_PROFILE_RAM1_255_0r = 756, + STAT_CAR0_CARC_QOVS_RAM_RAM2r = 757, + STAT_CAR0_CARC_PKT_DES_I_CNTr = 758, + STAT_CAR0_CARC_GREEN_PKT_I_CNTr = 759, + STAT_CAR0_CARC_YELLOW_PKT_I_CNTr = 760, + STAT_CAR0_CARC_RED_PKT_I_CNTr = 761, + STAT_CAR0_CARC_PKT_DES_O_CNTr = 762, + STAT_CAR0_CARC_GREEN_PKT_O_CNTr = 763, + STAT_CAR0_CARC_YELLOW_PKT_O_CNTr = 764, + STAT_CAR0_CARC_RED_PKT_O_CNTr = 765, + STAT_CAR0_CARC_PKT_DES_FC_FOR_CFG_CNTr = 766, + STAT_CAR0_CARC_APPOINT_QNUM_OR_SPr = 767, + STAT_CAR0_CARC_CFGMT_COUNT_MODEr = 768, + STAT_CAR0_CARC_PKT_SIZE_CNTr = 769, + STAT_CAR0_CARC_PLCR_INIT_DONTr = 770, + STAT_CAR0_CARB_RANDOM_RAMr = 771, + STAT_CAR0_CARC_RANDOM_RAMr = 772, + STAT_CAR0_CARA_BEGIN_FLOW_IDr = 773, + STAT_CAR0_CARB_BEGIN_FLOW_IDr = 774, + STAT_CAR0_CARC_BEGIN_FLOW_IDr = 775, + STAT_CAR0_PROG_FULL_ASSERT_CFG_Wr = 776, + STAT_CAR0_PROG_FULL_NEGATE_CFG_Wr = 777, + STAT_CAR0_TIMEOUT_LIMITr = 778, + STAT_CAR0_PKT_DES_FIFO_OVERFLOWr = 779, + STAT_CAR0_PKT_DES_FIFO_UNDERFLOWr = 780, + STAT_CAR0_PKT_DES_FIFO_PROG_FULLr = 781, + STAT_CAR0_PKT_DES_FIFO_PROG_EMPTYr = 782, + STAT_CAR0_PKT_DES_FIFO_FULLr = 783, + STAT_CAR0_PKT_DES_FIFO_EMPTYr = 784, + STAT_CAR0_PKT_SIZE_OFFSETr = 785, + STAT_CAR0_CAR_PLCR_INIT_DONTr = 786, + STAT_CAR0_MAX_PKT_SIZE_Ar = 787, + STAT_CAR0_MAX_PKT_SIZE_Br = 788, + STAT_CAR0_MAX_PKT_SIZE_Cr = 789, + STAT_CAR0_CAR_HIERARCHY_MODEr = 790, + STAT_CAR0_PROG_EMPTY_ASSERT_CFG_Wr = 791, + STAT_CAR0_PROG_EMPTY_NEGATE_CFG_Wr = 792, + STAT_CAR0_PKT_DES_FIFO_OVF_INTr = 793, + STAT_CAR0_PKT_DES_FIFO_DATA_COUNTr = 794, + STAT_CAR0_PKT_DES_FIFO_UDF_INTr = 795, + STAT_CAR0_CARA_QUEUE_RAM0_159_0_PKTr = 796, + STAT_CAR0_CARA_PROFILE_RAM1_255_0_PKTr = 797, + STAT4K_ETCAM_BLOCK0_7_PORT_ID_CFGr = 798, + STAT4K_ETCAM_BLOCK0_3_BASE_ADDR_CFGr = 799, + STAT4K_ETCAM_BLOCK4_7_BASE_ADDR_CFGr = 800, + DTB_DTB_CFG_CFG_ERAM_WR_INTERVAL_CNTr = 801, + DTB_DTB_CFG_CFG_ZCAM_WR_INTERVAL_CNTr = 802, + DTB_DTB_CFG_CFG_TCAM_WR_INTERVAL_CNTr = 803, + DTB_DTB_CFG_CFG_DDR_WR_INTERVAL_CNTr = 804, + DTB_DTB_CFG_CFG_HASH_WR_INTERVAL_CNTr = 805, + DTB_DTB_CFG_CFG_ERAM_RD_INTERVAL_CNTr = 806, + DTB_DTB_CFG_CFG_ZCAM_RD_INTERVAL_CNTr = 807, + DTB_DTB_CFG_CFG_TCAM_RD_INTERVAL_CNTr = 808, + DTB_DTB_CFG_CFG_DDR_RD_INTERVAL_CNTr = 809, + DTB_DTB_CFG_CFG_DTB_QUEUE_LOCK_STATE_0_3r = 810, + DTB_DTB_AXIM0_W_CONVERT_0_MODEr = 811, + DTB_DTB_AXIM0_R_CONVERT_0_MODEr = 812, + DTB_DTB_AXIM0_AXIMR_OSr = 813, + DTB_DTB_AXIM1_W_CONVERT_1_MODEr = 814, + DTB_DTB_AXIM1_R_CONVERT_1_MODEr = 815, + DTB_DTB_AXIS_AXIS_CONVERT_MODEr = 816, + DTB4K_DTB_ENQ_CFG_QUEUE_DTB_ADDR_H_0_127r = 817, + DTB4K_DTB_ENQ_CFG_QUEUE_DTB_ADDR_L_0_127r = 818, + DTB4K_DTB_ENQ_CFG_QUEUE_DTB_LEN_0_127r = 819, + DTB4K_DTB_ENQ_INFO_QUEUE_BUF_SPACE_LEFT_0_127r = 820, + DTB4K_DTB_ENQ_CFG_EPID_V_FUNC_NUM_0_127r = 821, + TRPG_TRPG_RX_PORT_CPU_TRPG_MS_ENr = 822, + TRPG_TRPG_RX_PORT_CPU_TRPG_PORT_ENr = 823, + TRPG_TRPG_RX_PORT_CPU_TRPG_LOOK_ENr = 824, + TRPG_TRPG_RX_PORT_CPU_TRPGRX_RAM_ALMOST_FULLr = 825, + TRPG_TRPG_RX_PORT_CPU_TRPGRX_RAM_TEST_ENr = 826, + TRPG_TRPG_RX_PORT_CPU_TRPGRX_INMOD_PFC_RDY_ENr = 827, + TRPG_TRPG_RX_PORT_CPU_TRPGRX_PKT_NUM_Hr = 828, + TRPG_TRPG_RX_PORT_CPU_TRPGRX_PKT_NUM_Lr = 829, + TRPG_TRPG_RX_PORT_CPU_TRPGRX_PKT_BYTE_NUM_Hr = 830, + TRPG_TRPG_RX_PORT_CPU_TRPGRX_PKT_BYTE_NUM_Lr = 831, + TRPG_TRPG_RX_PORT_CPU_TRPGRX_PKT_CNT_CLRr = 832, + TRPG_TRPG_RX_PORT_CPU_TRPGRX_FC_CLK_FREQr = 833, + TRPG_TRPG_RX_PORT_CPU_TRPGRX_FC_ENr = 834, + TRPG_TRPG_RX_PORT_CPU_TRPGRX_FC_TOKEN_ADD_NUMr = 835, + TRPG_TRPG_RX_PORT_CPU_TRPGRX_FC_TOKEN_MAX_NUMr = 836, + TRPG_TRPG_RX_PORT_CPU_TRPGRX_PORT_STATE_INFOr = 837, + TRPG_TRPG_RX_PORT_CPU_TRPGRX_RAM_PAST_MAX_DEPr = 838, + TRPG_TRPG_RX_PORT_CPU_TRPGRX_RAM_PAST_MAX_DEP_CLRr = 839, + TRPG_TRPG_RX_PORT_CPU_TRPGRX_PKT_PAST_MAX_LENr = 840, + TRPG_TRPG_RX_PORT_CPU_TRPGRX_PKT_PAST_MAX_LEN_CLRr = 841, + TRPG_TRPG_RX_PORT_CPU_TRPGRX_PKT_PAST_MIN_LENr = 842, + TRPG_TRPG_RX_PORT_CPU_TRPGRX_PKT_PAST_MIN_LEN_CLRr = 843, + TRPG_TRPG_RX_RAM_TRPG_RX_DATA_RAMr = 844, + TRPG_TRPG_RX_RAM_TRPG_RX_INFO_RAMr = 845, + TRPG_TRPG_TX_PORT_CPU_TRPG_MS_ENr = 846, + TRPG_TRPG_TX_PORT_CPU_TRPG_PORT_ENr = 847, + TRPG_TRPG_TX_PORT_CPU_TRPG_LOOK_ENr = 848, + TRPG_TRPG_TX_PORT_CPU_TRPGTX_RAM_ALMOST_FULLr = 849, + TRPG_TRPG_TX_PORT_CPU_TRPGTX_RAM_TEST_ENr = 850, + TRPG_TRPG_TX_PORT_CPU_TRPGTX_PKT_NUM_Hr = 851, + TRPG_TRPG_TX_PORT_CPU_TRPGTX_PKT_NUM_Lr = 852, + TRPG_TRPG_TX_PORT_CPU_TRPGTX_PKT_BYTE_NUM_Hr = 853, + TRPG_TRPG_TX_PORT_CPU_TRPGTX_PKT_BYTE_NUM_Lr = 854, + TRPG_TRPG_TX_PORT_CPU_TRPGTX_PKT_CNT_CLRr = 855, + TRPG_TRPG_TX_PORT_CPU_TRPGTX_FC_CLK_FREQr = 856, + TRPG_TRPG_TX_PORT_CPU_TRPGTX_FC_ENr = 857, + TRPG_TRPG_TX_PORT_CPU_TRPGTX_FC_TOKEN_ADD_NUMr = 858, + TRPG_TRPG_TX_PORT_CPU_TRPGTX_FC_TOKEN_MAX_NUMr = 859, + TRPG_TRPG_TX_PORT_CPU_TRPGTX_PORT_STATE_INFOr = 860, + TRPG_TRPG_TX_PORT_CPU_TRPGTX_RAM_PAST_MAX_DEPr = 861, + TRPG_TRPG_TX_PORT_CPU_TRPGTX_RAM_PAST_MAX_DEP_CLRr = 862, + TRPG_TRPG_TX_PORT_CPU_TRPGTX_PKT_PAST_MAX_LENr = 863, + TRPG_TRPG_TX_PORT_CPU_TRPGTX_PKT_PAST_MAX_LEN_CLRr = 864, + TRPG_TRPG_TX_PORT_CPU_TRPGTX_PKT_PAST_MIN_LENr = 865, + TRPG_TRPG_TX_PORT_CPU_TRPGTX_PKT_PAST_MIN_LEN_CLRr = 866, + TRPG_TRPG_TX_ETM_PORT_CPU_TRPGTX_ETM_RAM_ALMOST_FULLr = 867, + TRPG_TRPG_TX_ETM_PORT_CPU_TRPGTX_ETM_RAM_TEST_ENr = 868, + TRPG_TRPG_TX_GLB_CPU_TODTIME_UPDATE_INT_MASKr = 869, + TRPG_TRPG_TX_GLB_CPU_TODTIME_UPDATE_INT_CLRr = 870, + TRPG_TRPG_TX_GLB_CPU_TODTIME_RAM_TEST_ENr = 871, + TRPG_TRPG_TX_RAM_TRPG_TX_DATA_RAMr = 872, + TRPG_TRPG_TX_RAM_TRPG_TX_INFO_RAMr = 873, + TRPG_TRPG_TX_ETM_RAM_TRPG_TX_ETM_DATA_RAMr = 874, + TRPG_TRPG_TX_ETM_RAM_TRPG_TX_ETM_INFO_RAMr = 875, + ETM_CFGMT_CHIP_VERSION_REGr = 876, + ETM_CFGMT_CHIP_DATE_REGr = 877, + ETM_CFGMT_CFGMT_CRC_ENr = 878, + ETM_CFGMT_CFG_PORT_TRANSFER_ENr = 879, + ETM_CFGMT_TM_SA_WORK_MODEr = 880, + ETM_CFGMT_LOCAL_SA_IDr = 881, + ETM_OLIF_OLIF_RDYr = 882, + ETM_OLIF_EMEM_PROG_FULLr = 883, + ETM_OLIF_PORT_ORDER_FIFO_FULLr = 884, + ETM_OLIF_OLIF_RELEASE_LASTr = 885, + ETM_OLIF_OLIF_FIFO_EMPTY_STATEr = 886, + ETM_OLIF_QMU_OLIF_RELEASE_FC_CNTr = 887, + ETM_OLIF_OLIF_QMU_LINK_FC_CNTr = 888, + ETM_OLIF_LIF0_LINK_FC_CNTr = 889, + ETM_OLIF_OLIF_TMMU_FC_CNTr = 890, + ETM_OLIF_OLIF_MMU_FC_CNTr = 891, + ETM_OLIF_OLIF_QMU_PORT_RDY_Hr = 892, + ETM_OLIF_OLIF_QMU_PORT_RDY_Lr = 893, + ETM_OLIF_LIF0_PORT_RDY_Hr = 894, + ETM_OLIF_LIF0_PORT_RDY_Lr = 895, + ETM_OLIF_QMU_OLIF_RD_SOP_CNTr = 896, + ETM_OLIF_QMU_OLIF_RD_EOP_CNTr = 897, + ETM_OLIF_QMU_OLIF_RD_VLD_CNTr = 898, + ETM_OLIF_QMU_OLIF_RD_BLK_CNTr = 899, + ETM_OLIF_MMU_TM_DATA_SOP_CNTr = 900, + ETM_OLIF_MMU_TM_DATA_EOP_CNTr = 901, + ETM_OLIF_MMU_TM_DATA_VLD_CNTr = 902, + ETM_OLIF_ODMA_TM_DATA_SOP_CNTr = 903, + ETM_OLIF_ODMA_TM_DATA_EOP_CNTr = 904, + ETM_OLIF_ODMA_TM_DEQ_VLD_CNTr = 905, + ETM_OLIF_OLIF_QMU_RELEASE_VLD_CNTr = 906, + ETM_OLIF_EMEM_DAT_VLD_CNTr = 907, + ETM_OLIF_IMEM_DAT_VLD_CNTr = 908, + ETM_OLIF_EMEM_DAT_RD_CNTr = 909, + ETM_OLIF_IMEM_DAT_RD_CNTr = 910, + ETM_OLIF_QMU_OLIF_RD_SOP_EMEM_CNTr = 911, + ETM_OLIF_QMU_OLIF_RD_VLD_EMEM_CNTr = 912, + ETM_OLIF_CPU_LAST_WR_ADDRr = 913, + ETM_OLIF_CPU_LAST_WR_DATAr = 914, + ETM_OLIF_CPU_LAST_RD_ADDRr = 915, + ETM_OLIF_QMU_OLIF_LAST_PORTr = 916, + ETM_OLIF_QMU_OLIF_LAST_ADDRr = 917, + ETM_OLIF_QMU_OLIF_LAST_BANKr = 918, + ETM_OLIF_TM_LIF_BYTE_STATr = 919, + ETM_OLIF_TM_LIF_ERR_STATr = 920, + ETM_CGAVD_PORT_SHARE_CNTr = 921, + ETM_CGAVD_TOTAL_IMEM_CNTr = 922, + ETM_CGAVD_PP_Q_LENr = 923, + ETM_CGAVD_SYS_Q_LENr = 924, + ETM_CGAVD_CGAVD_CFG_ERROR_WARNINGr = 925, + ETM_CGAVD_MULT_QLEN_TH_ENr = 926, + ETM_CGAVD_MULT_QLEN_THr = 927, + ETM_CGAVD_CGAVD_CFG_MOVEr = 928, + ETM_CGAVD_CFGMT_TOTAL_THr = 929, + ETM_CGAVD_CFGMT_PORT_SHARE_THr = 930, + ETM_CGAVD_SA_UNREACH_STATEr = 931, + ETM_CGAVD_MV_PORT_THr = 932, + ETM_CGAVD_MV_DROP_SP_THr = 933, + ETM_CGAVD_CGAVD_STATE_WARNINGr = 934, + ETM_CGAVD_TMMU_CGAVD_DMA_FIFO_CNTr = 935, + ETM_CGAVD_TMMU_CGAVD_DMA_FIFO_CNT_MAXr = 936, + ETM_CGAVD_IMEM_TOTAL_CNTr = 937, + ETM_CGAVD_IMEM_TOTAL_CNT_MAXr = 938, + ETM_CGAVD_FLOW0_OMEM_CNTr = 939, + ETM_CGAVD_FLOW1_OMEM_CNTr = 940, + ETM_CGAVD_FLOW2_OMEM_CNTr = 941, + ETM_CGAVD_FLOW3_OMEM_CNTr = 942, + ETM_CGAVD_FLOW4_OMEM_CNTr = 943, + ETM_CGAVD_APPOINT_FLOW_NUM_MESSAGE_1r = 944, + ETM_CGAVD_APPOINT_FLOW_NUM_MESSAGE_2r = 945, + ETM_CGAVD_ODMA_CGAVD_PKT_NUM_1r = 946, + ETM_CGAVD_ODMA_CGAVD_BYTE_NUM_1r = 947, + ETM_CGAVD_CGAVD_ENQUEUE_PKT_NUM_1r = 948, + ETM_CGAVD_CGAVD_DEQUEUE_PKT_NUM_1r = 949, + ETM_CGAVD_CGAVD_QMU_PKT_IMEM_NUM_1r = 950, + ETM_CGAVD_CGAVD_QMU_PKT_OMEM_NUM_1r = 951, + ETM_CGAVD_CGAVD_QMU_BYTE_IMEM_NUM_1r = 952, + ETM_CGAVD_CGAVD_QMU_BYTE_OMEM_NUM_1r = 953, + ETM_CGAVD_CGAVD_QMU_PKT_DROP_NUM_1r = 954, + ETM_CGAVD_CGAVD_QMU_BYTE_DROP_NUM_1r = 955, + ETM_CGAVD_CGAVD_QMU_FORBID_DROP_NUM_1r = 956, + ETM_CGAVD_CGAVD_QMU_FLOW_TD_DROP_NUM_1r = 957, + ETM_CGAVD_CGAVD_QMU_FLOW_WRED_DROP_NUM_1r = 958, + ETM_CGAVD_CGAVD_QMU_FLOW_WRED_DP_DROP_NUM_1r = 959, + ETM_CGAVD_CGAVD_QMU_PP_TD_NUM_1r = 960, + ETM_CGAVD_CGAVD_QMU_PP_WRED_DROP_NUM_1r = 961, + ETM_CGAVD_CGAVD_QMU_PP_WRED_DP_DROP_NUM_1r = 962, + ETM_CGAVD_CGAVD_QMU_SYS_TD_DROP_NUM_1r = 963, + ETM_CGAVD_CGAVD_QMU_SYS_GRED_DROP_NUM_1r = 964, + ETM_CGAVD_CGAVD_QMU_SYS_GRED_DP_DROP_NUM1r = 965, + ETM_CGAVD_CGAVD_QMU_SA_DROP_NUM_1r = 966, + ETM_CGAVD_CGAVD_QMU_MOVE_DROP_NUM_1r = 967, + ETM_CGAVD_CGAVD_QMU_TM_MULT_DROP_NUM_1r = 968, + ETM_CGAVD_CGAVD_QMU_TM_ERROR_DROP_NUM_1r = 969, + ETM_CGAVD_ODMA_CGAVD_PKT_NUM_2r = 970, + ETM_CGAVD_ODMA_CGAVD_BYTE_NUM_2r = 971, + ETM_CGAVD_CGAVD_ENQUEUE_PKT_NUM_2r = 972, + ETM_CGAVD_CGAVD_DEQUEUE_PKT_NUM_2r = 973, + ETM_CGAVD_CGAVD_QMU_PKT_IMEM_NUM_2r = 974, + ETM_CGAVD_CGAVD_QMU_PKT_OMEM_NUM_2r = 975, + ETM_CGAVD_CGAVD_QMU_BYTE_IMEM_NUM_2r = 976, + ETM_CGAVD_CGAVD_QMU_BYTE_OMEM_NUM_2r = 977, + ETM_CGAVD_CGAVD_QMU_PKT_DROP_NUM_2r = 978, + ETM_CGAVD_CGAVD_QMU_BYTE_DROP_NUM_2r = 979, + ETM_CGAVD_CGAVD_QMU_FORBID_DROP_NUM_2r = 980, + ETM_CGAVD_CGAVD_QMU_FLOW_TD_DROP_NUM_2r = 981, + ETM_CGAVD_CGAVD_QMU_FLOW_WRED_DROP_NUM_2r = 982, + ETM_CGAVD_CGAVD_QMU_FLOW_WRED_DP_DROP_NUM_2r = 983, + ETM_CGAVD_CGAVD_QMU_PP_TD_NUM_2r = 984, + ETM_CGAVD_CGAVD_QMU_PP_WRED_DROP_NUM_2r = 985, + ETM_CGAVD_CGAVD_QMU_PP_WRED_DP_DROP_NUM_2r = 986, + ETM_CGAVD_CGAVD_QMU_SYS_TD_DROP_NUM_2r = 987, + ETM_CGAVD_CGAVD_QMU_SYS_GRED_DROP_NUM_2r = 988, + ETM_CGAVD_CGAVD_QMU_SYS_GRED_DP_DROP_NUM_2r = 989, + ETM_CGAVD_CGAVD_QMU_SA_DROP_NUM_2r = 990, + ETM_CGAVD_CGAVD_QMU_MOVE_DROP_NUM_2r = 991, + ETM_CGAVD_CGAVD_QMU_TM_MULT_DROP_NUM_2r = 992, + ETM_CGAVD_CGAVD_QMU_TM_ERROR_DROP_NUM_2r = 993, + ETM_CGAVD_MOVE_FLOW_TH_PROFILEr = 994, + ETM_CGAVD_MOVE_FLOW_THr = 995, + ETM_TMMU_EMEM_PD_FIFO_AFUL_THr = 996, + ETM_TMMU_DMA_DATA_FIFO_AFUL_THr = 997, + ETM_TMMU_TMMU_STATES_0r = 998, + ETM_TMMU_QMU_TMMU_WR_SOP_CNTr = 999, + ETM_TMMU_QMU_TMMU_WR_EOP_CNTr = 1000, + ETM_TMMU_QMU_TMMU_WR_DROP_CNTr = 1001, + ETM_TMMU_QMU_TMMU_WR_EMEM_CNTr = 1002, + ETM_TMMU_QMU_TMMU_WR_IMEM_CNTr = 1003, + ETM_TMMU_TMMU_MMU_WR_SOP_CNTr = 1004, + ETM_TMMU_TMMU_MMU_WR_EOP_CNTr = 1005, + ETM_TMMU_QMU_TMMU_RD_SOP_CNTr = 1006, + ETM_TMMU_QMU_TMMU_RD_EOP_CNTr = 1007, + ETM_TMMU_QMU_TMMU_RD_DROP_CNTr = 1008, + ETM_TMMU_QMU_TMMU_RD_EMEM_CNTr = 1009, + ETM_TMMU_QMU_TMMU_RD_IMEM_CNTr = 1010, + ETM_TMMU_TMMU_MMU_RD_SOP_CNTr = 1011, + ETM_TMMU_TMMU_MMU_RD_EOP_CNTr = 1012, + ETM_TMMU_TMMU_ODMA_IN_SOP_CNTr = 1013, + ETM_TMMU_TMMU_ODMA_IN_EOP_CNTr = 1014, + ETM_TMMU_TMMU_ODMA_VLD_CNTr = 1015, + ETM_TMMU_QMU_PD_IN_CNTr = 1016, + ETM_TMMU_TMMU_PD_HIT_CNTr = 1017, + ETM_TMMU_TMMU_PD_OUT_CNTr = 1018, + ETM_TMMU_TMMU_WR_CMD_FIFO_WR_CNTr = 1019, + ETM_TMMU_TMMU_IMEM_AGE_CNTr = 1020, + ETM_TMMU_TMMU_CMDSCH_RD_CNTr = 1021, + ETM_TMMU_TMMU_CMDSCH_DROP_CNTr = 1022, + ETM_TMMU_TMMU_CMDSW_DROP_CNTr = 1023, + ETM_TMMU_TMMU_ODMA_ENQ_RD_CNTr = 1024, + ETM_TMMU_TMMU_ODMA_ENQ_DROP_CNTr = 1025, + ETM_TMMU_TMMU_ODMA_IMEM_AGE_CNTr = 1026, + ETM_TMMU_TMMU_ODMA_DEQ_RD_CNTr = 1027, + ETM_TMMU_TMMU_ODMA_DEQ_DROP_CNTr = 1028, + ETM_TMMU_OLIF_TMMU_XOFF_CNTr = 1029, + ETM_TMMU_ODMA_TM_DATA_XOFF_CNTr = 1030, + ETM_TMMU_TM_ODMA_PKT_XOFF_CNTr = 1031, + ETM_TMMU_TM_STATE_3r = 1032, + ETM_TMMU_CFGMT_PD_CACHE_CMDr = 1033, + ETM_TMMU_CFGMT_PD_CACHE_RD_DONEr = 1034, + ETM_TMMU_CFGMT_PD_CACHE_RD_DATA_0r = 1035, + ETM_TMMU_CFGMT_PD_CACHE_RD_DATA_1r = 1036, + ETM_TMMU_CFGMT_PD_CACHE_RD_DATA_2r = 1037, + ETM_TMMU_CFGMT_PD_CACHE_RD_DATA_3r = 1038, + ETM_TMMU_CFGMT_TMMU_TO_ODMA_PARAr = 1039, + ETM_TMMU_CFGMT_DMA_DATA_FIFO_CNTr = 1040, + ETM_TMMU_CFGMT_CACHE_TAG_BIT0_OFFSETr = 1041, + ETM_TMMU_CFGMT_CACHE_TAG_BIT1_OFFSETr = 1042, + ETM_TMMU_CFGMT_CACHE_TAG_BIT2_OFFSETr = 1043, + ETM_TMMU_CFGMT_CACHE_TAG_BIT3_OFFSETr = 1044, + ETM_TMMU_CFGMT_CACHE_TAG_BIT4_OFFSETr = 1045, + ETM_TMMU_CFGMT_CACHE_TAG_BIT5_OFFSETr = 1046, + ETM_TMMU_CFGMT_CACHE_INDEX_BIT0_OFFSETr = 1047, + ETM_TMMU_CFGMT_CACHE_INDEX_BIT1_OFFSETr = 1048, + ETM_TMMU_CFGMT_CACHE_INDEX_BIT2_OFFSETr = 1049, + ETM_TMMU_CFGMT_CACHE_INDEX_BIT3_OFFSETr = 1050, + ETM_TMMU_CFGMT_CACHE_INDEX_BIT4_OFFSETr = 1051, + ETM_TMMU_CFGMT_CACHE_INDEX_BIT5_OFFSETr = 1052, + ETM_TMMU_CFGMT_CACHE_INDEX_BIT6_OFFSETr = 1053, + ETM_TMMU_CFGMT_CACHE_INDEX_BIT7_OFFSETr = 1054, + ETM_TMMU_CFGMT_CACHE_INDEX_BIT8_OFFSETr = 1055, + ETM_TMMU_CFGMT_CACHE_INDEX_BIT9_OFFSETr = 1056, + ETM_TMMU_CFGMT_CACHE_INDEX_BIT10_OFFSETr = 1057, + ETM_TMMU_CFGMT_CACHE_INDEX_BIT11_OFFSETr = 1058, + ETM_TMMU_CFGMT_CACHE_INDEX_BIT12_OFFSETr = 1059, + ETM_SHAP_BKTFULL_FIFO_FULL_FLAGREGISTERr = 1060, + ETM_SHAP_FIFO_FULL_REGREGISTERr = 1061, + ETM_SHAP_FIFO_EMPTY_REGREGISTERr = 1062, + ETM_SHAP_FIFO_ALMOST_FULL_REGREGISTERr = 1063, + ETM_SHAP_FIFO_ALMOST_EMPTY_REGREGISTERr = 1064, + ETM_CRDT_CREDIT_SPACE_SELECTr = 1065, + ETM_CRDT_STAT_SPACE_MAXr = 1066, + ETM_CRDT_STAT_SPACE_MINr = 1067, + ETM_CRDT_STAT_SPACE_CREDITr = 1068, + ETM_CRDT_STAT_QUE_STEP8_CREDITr = 1069, + ETM_CRDT_SPECIAL_QUEr = 1070, + ETM_CRDT_SPECIAL_QUE_CREDITr = 1071, + ETM_CRDT_LIF_CONGEST_CREDIT_CNTr = 1072, + ETM_CRDT_LIF_PORT_CONGEST_CREDIT_CNTr = 1073, + ETM_CRDT_CRDT_CONGEST_CREDIT_CNTr = 1074, + ETM_CRDT_CRDT_PORT_CONGEST_CREDIT_CNTr = 1075, + ETM_CRDT_CONGEST_PORT_IDr = 1076, + ETM_CRDT_DEV_LINK_CONTROLr = 1077, + ETM_CRDT_CRDT_SA_PORT_RDYr = 1078, + ETM_CRDT_CRDT_CONGEST_MODE_SELECTr = 1079, + ETM_CRDT_FIFO_OUT_ALL_CRS_NORMAL_CNTr = 1080, + ETM_CRDT_FIFO_OUT_ALL_CRS_OFF_CNTr = 1081, + ETM_CRDT_FIFO_OUT_QUE_CRS_NORMAL_CNTr = 1082, + ETM_CRDT_FIFO_OUT_QUE_CRS_OFF_CNTr = 1083, + ETM_CRDT_MODE_ADD_60Gr = 1084, + ETM_CRDT_PP_TOKEN_ADDr = 1085, + ETM_CRDT_PP_CIR_TOKEN_TOTAL_DIST_CNTr = 1086, + ETM_CRDT_PP_CIR_TOKEN_TOTAL_DEC_CNTr = 1087, + ETM_CRDT_DEV_CREDIT_CNTr = 1088, + ETM_CRDT_NO_CREDIT_CNT1r = 1089, + ETM_CRDT_NO_CREDIT_CNT2r = 1090, + ETM_CRDT_ASM_INTERVAL_0_CFGr = 1091, + ETM_CRDT_ASM_INTERVAL_1_CFGr = 1092, + ETM_CRDT_ASM_INTERVAL_2_CFGr = 1093, + ETM_CRDT_ASM_INTERVAL_3_CFGr = 1094, + ETM_CRDT_ASM_INTERVAL_4_CFGr = 1095, + ETM_CRDT_ASM_INTERVAL_5CFGr = 1096, + ETM_CRDT_ASM_INTERVAL_6_CFGr = 1097, + ETM_CRDT_ASM_INTERVAL_7_CFGr = 1098, + ETM_CRDT_CRDT_TOTAL_CONGEST_MODE_CFGr = 1099, + ETM_CRDT_RCI_FIFO_INI_DEEP_CFGr = 1100, + ETM_CRDT_CRDT_ECCr = 1101, + ETM_CRDT_UCN_ASM_RDY_SHIELD_ENr = 1102, + ETM_CRDT_UCN_ASM_RDYr = 1103, + ETM_CRDT_RCI_GRADEr = 1104, + ETM_CRDT_CRDT_RCI_VALUE_Rr = 1105, + ETM_CRDT_CRDT_INTERVAL_NOWr = 1106, + ETM_CRDT_CRS_SHEILD_FLOW_ID_CFGr = 1107, + ETM_CRDT_CRS_SHEILD_EN_CFGr = 1108, + ETM_CRDT_CRS_SHEILD_VALUE_CFGr = 1109, + ETM_CRDT_TEST_TOKEN_CALC_CTRLr = 1110, + ETM_CRDT_TEST_TOKEN_SAMPLE_CYCLE_NUMr = 1111, + ETM_CRDT_Q_STATE_0_7r = 1112, + ETM_CRDT_Q_STATE_8_15r = 1113, + ETM_QMU_CSW_CSCH_RD_CMD_CNTr = 1114, + ETM_QMU_CSW_CSCH_RD_SOP_CNTr = 1115, + ETM_QMU_CSW_CSCH_RD_EOP_CNTr = 1116, + ETM_QMU_CSW_CSCH_RD_DROP_CNTr = 1117, + ETM_QMU_CSCH_MMU_RD_CMD_CNTr = 1118, + ETM_QMU_CSCH_MMU_RD_SOP_CNTr = 1119, + ETM_QMU_CSCH_MMU_RD_EOP_CNTr = 1120, + ETM_QMU_CSCH_MMU_RD_DROP_CNTr = 1121, + ETM_QMU_QCFG_QSCH_CRS_FILTERr = 1122, + ETM_QMU_QCFG_QSCH_CRS_FORCE_ENr = 1123, + ETM_QMU_QCFG_QSCH_CRS_FORCE_QNUMr = 1124, + ETM_QMU_QCFG_QSCH_CRS_FORCE_CRSr = 1125, + ETM_QMU_CFGMT_OSHP_SGMII_SHAP_MODEr = 1126, + ETM_QMU_CFGMT_QMU_SASHAP_ENr = 1127, + ETM_QMU_CFGMT_SASHAP_TOKEN_MAXr = 1128, + ETM_QMU_CFGMT_SASHAP_TOKEN_MINr = 1129, + ETM_QMU_CFG_QSCH_Q3LBADDRATEr = 1130, + ETM_QMU_CFG_QSCH_Q012LBADDRATEr = 1131, + ETM_QMU_CFG_QSCH_Q3CREDITLBMAXCNTr = 1132, + ETM_QMU_CFG_QSCH_Q012CREDITLBMAXCNTr = 1133, + ETM_QMU_CFG_QSCH_MUL_TOKEN_GEN_NUMr = 1134, + ETM_QMU_CFG_QSCH_Q3_CREDIT_LB_CONTROL_ENr = 1135, + ETM_QMU_CFG_QSCH_Q012_CREDIT_LB_CONTROL_ENr = 1136, + ETM_QMU_CFG_QSCH_SP_DWRR_ENr = 1137, + ETM_QMU_CFG_QSCH_Q01_ATTACH_ENr = 1138, + ETM_QMU_CFG_QSCH_W0r = 1139, + ETM_QMU_CFG_QSCH_W1r = 1140, + ETM_QMU_CFG_QSCH_W2r = 1141, + ETM_QMU_CFG_QSCH_LKYBKTMAXCNT1r = 1142, + ETM_QMU_CFG_QSCH_LKYBKTMAXCNT2r = 1143, + ETM_QMU_CFG_QSCH_LKYBKTDCRRATE1r = 1144, + ETM_QMU_CFG_QSCH_LKYBKTDCRRATE2r = 1145, + ETM_QMU_CFG_QSCH_LKYBKTDCRRATE3r = 1146, + ETM_QMU_CFG_QSCH_LKYBKTMAXCNT3r = 1147, + ETM_QMU_CFG_QSCH_QMU_MUL_AUTO_SA_VERSIONr = 1148, + ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_0r = 1149, + ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_1r = 1150, + ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_2r = 1151, + ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_3r = 1152, + ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_4r = 1153, + ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_5r = 1154, + ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_6r = 1155, + ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_7r = 1156, + ETM_QMU_CFG_QSCH_REMOTE_CREDIT_FIFO_ALMOST_FULL_THr = 1157, + ETM_QMU_CFG_QSCH_AUTO_CREDIT_FIFO_ALMOST_FULL_THr = 1158, + ETM_QMU_CFG_QSCH_Q3_CREDIT_FIFO_ALMOST_FULL_THr = 1159, + ETM_QMU_CFG_QSCH_Q012_CREDIT_FIFO_ALMOST_FULL_THr = 1160, + ETM_QMU_CFG_QSCH_MUL_FC_RES_ENr = 1161, + ETM_QMU_CFGMT_MUL_OVF_UDF_FLG_QUERYr = 1162, + ETM_QMU_CFGMT_MUL_CNG_FLG_QUERYr = 1163, + ETM_QMU_QSCH_CFG_LKYBKTVAL1r = 1164, + ETM_QMU_QSCH_CFG_LKYBKTVAL2r = 1165, + ETM_QMU_QSCH_CFG_LKYBKTVAL3r = 1166, + ETM_QMU_QSCH_CFG_Q3LBVALr = 1167, + ETM_QMU_QSCH_CFG_Q012LBVALr = 1168, + ETM_QMU_QLIST_CFGMT_RAM_ECC_ERR2r = 1169, + ETM_QMU_CSCH_AGED_CMD_CNTr = 1170, + ETM_QMU_CSCH_QCFG_CSCH_CONGEST_CNTr = 1171, + ETM_QMU_CSCH_QCFG_QLIST_CSCH_SOP_CNTr = 1172, + ETM_QMU_CSCH_QCFG_QLIST_CSCH_EOP_CNTr = 1173, + ETM_QMU_CSCH_QCFG_CSCH_CSW_SOP_CNTr = 1174, + ETM_QMU_CSCH_QCFG_CSCH_CSW_EOP_CNTr = 1175, + ETM_QMU_CSCH_QCFG_QLIST_CSCH_DROP_CNTr = 1176, + ETM_QMU_CSCH_QCFG_CSCH_CSW_DROP_CNTr = 1177, + ETM_QMU_CSW_MMU_SOP_CMD_CNTr = 1178, + ETM_QMU_MMU_CSW_SOP_DATA_CNTr = 1179, + ETM_QMU_CSW_QSCH_FEEDB_CNTr = 1180, + ETM_QMU_QMU_CRDT_PORT_FC_CNTr = 1181, + ETM_QMU_CSCH_R_BLOCK_CNTr = 1182, + ETM_QMU_QCFG_QLIST_QDS_HEAD_RDr = 1183, + ETM_QMU_QCFG_QLIST_QDS_TAIL_RDr = 1184, + ETM_QMU_QCFG_QLIST_EPT_RDr = 1185, + ETM_QMU_QCFG_QLIST_AGE_FLAG_RDr = 1186, + ETM_QMU_QCFG_QLIST_CTI_RDr = 1187, + ETM_QMU_QCFG_QLIST_CTO_RDr = 1188, + ETM_QMU_QCFG_QLIST_CHK_RDr = 1189, + ETM_QMU_QCFG_QLIST_NOD_RDr = 1190, + ETM_QMU_QCFG_QLIST_BIU_RDr = 1191, + ETM_QMU_QSCH_R_WLIST_FLAGr = 1192, + ETM_QMU_QCFG_CRS_FLG_RDr = 1193, + ETM_QMU_CFGMT_QMU_IMEM_AGE_QDSr = 1194, + ETM_QMU_CFGMT_QMU_IMEM_AGE_QLENr = 1195, + ETM_QMU_CFGMT_QMU_IMEM_PD_RAM_LOWr = 1196, + ETM_QMU_CFGMT_QMU_IMEM_PD_RAM_HIGHr = 1197, + ETM_QMU_CFGMT_QMU_IMEM_UP_PTRr = 1198, + ETM_QMU_CFGMT_QMU_IMEM_DOWN_PTRr = 1199, + ETM_QMU_CFGMT_QMU_IMEM_AGE_FLAGr = 1200, + ETM_QMU_CFG_QSCH_LKYBKT2CNGTHr = 1201, + ETM_QMU_CFG_QSCH_LKYBKT1CNGTHr = 1202, + ETM_QMU_CFG_QSCH_LKYBKT3CNGTHr = 1203, + ETM_QMU_CFG_QSCH_RM_MUL_MCN1_CREDIT_VALUEr = 1204, + ETM_QMU_CFG_QSCH_RM_MUL_MCN2_CREDIT_VALUEr = 1205, + ETM_QMU_CFG_QSCH_RM_MUL_MCN3_CREDIT_VALUEr = 1206, + ETM_QMU_RM_MUL_MCN1_RAND_ANSR_SEEDr = 1207, + ETM_QMU_RM_MUL_MCN2_RAND_ANSR_SEEDr = 1208, + ETM_QMU_RM_MUL_MCN3_RAND_ANSR_SEEDr = 1209, + ETM_QMU_RM_MUL_MCN1_RAND_ANSR_THr = 1210, + ETM_QMU_RM_MUL_MCN2_RAND_ANSR_THr = 1211, + ETM_QMU_RM_MUL_MCN3_RAND_ANSR_THr = 1212, + ETM_QMU_RM_MUL_MCN1_RAND_HOLD_BASEr = 1213, + ETM_QMU_RM_MUL_MCN2_RAND_HOLD_BASEr = 1214, + ETM_QMU_RM_MUL_MCN3_RAND_HOLD_BASEr = 1215, + ETM_QMU_RM_MUL_MCN1_RAND_SEL_MASKr = 1216, + ETM_QMU_RM_MUL_MCN2_RAND_SEL_MASKr = 1217, + ETM_QMU_RM_MUL_MCN3_RAND_SEL_MASKr = 1218, + ETM_QMU_RM_MUL_MCN1_RAND_SEL_SEED_REG0r = 1219, + ETM_QMU_RM_MUL_MCN1_RAND_SEL_SEED_REG1r = 1220, + ETM_QMU_RM_MUL_MCN2_RAND_SEL_SEED_REG0r = 1221, + ETM_QMU_RM_MUL_MCN2_RAND_SEL_SEED_REG1r = 1222, + ETM_QMU_RM_MUL_MCN3_RAND_SEL_SEED_REG0r = 1223, + ETM_QMU_RM_MUL_MCN3_RAND_SEL_SEED_REG1r = 1224, + ETM_QMU_RM_MUL_MCN1_STEP_WAIT_TH1r = 1225, + ETM_QMU_RM_MUL_MCN1_STEP_WAIT_TH2r = 1226, + ETM_QMU_RM_MUL_MCN1_STEP_WAIT_TH3r = 1227, + ETM_QMU_RM_MUL_MCN1_STEP_WAIT_TH4r = 1228, + ETM_QMU_RM_MUL_MCN1_STEP_WAIT_TH5r = 1229, + ETM_QMU_RM_MUL_MCN1_STEP_WAIT_TH6r = 1230, + ETM_QMU_RM_MUL_MCN1_STEP_WAIT_TH7r = 1231, + ETM_QMU_RM_MUL_MCN2_STEP_WAIT_TH1r = 1232, + ETM_QMU_RM_MUL_MCN2_STEP_WAIT_TH2r = 1233, + ETM_QMU_RM_MUL_MCN2_STEP_WAIT_TH3r = 1234, + ETM_QMU_RM_MUL_MCN2_STEP_WAIT_TH4r = 1235, + ETM_QMU_RM_MUL_MCN2_STEP_WAIT_TH5r = 1236, + ETM_QMU_RM_MUL_MCN2_STEP_WAIT_TH6r = 1237, + ETM_QMU_RM_MUL_MCN2_STEP_WAIT_TH7r = 1238, + ETM_QMU_RM_MUL_MCN3_STEP_WAIT_TH1r = 1239, + ETM_QMU_RM_MUL_MCN3_STEP_WAIT_TH2r = 1240, + ETM_QMU_RM_MUL_MCN3_STEP_WAIT_TH3r = 1241, + ETM_QMU_RM_MUL_MCN3_STEP_WAIT_TH4r = 1242, + ETM_QMU_RM_MUL_MCN3_STEP_WAIT_TH5r = 1243, + ETM_QMU_RM_MUL_MCN3_STEP_WAIT_TH6r = 1244, + ETM_QMU_RM_MUL_MCN3STEP_WAIT_TH7r = 1245, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE0r = 1246, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE1r = 1247, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE2r = 1248, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE3r = 1249, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE4r = 1250, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE5r = 1251, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE6r = 1252, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE7r = 1253, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE8r = 1254, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE9r = 1255, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE10r = 1256, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE11r = 1257, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE12r = 1258, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE13r = 1259, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE14r = 1260, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE15r = 1261, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE16r = 1262, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE17r = 1263, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE18r = 1264, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE19r = 1265, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE20r = 1266, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE21r = 1267, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE22r = 1268, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE23r = 1269, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE24r = 1270, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE25r = 1271, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE26r = 1272, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE27r = 1273, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE28r = 1274, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE29r = 1275, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE30r = 1276, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE31r = 1277, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE32r = 1278, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE33r = 1279, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE34r = 1280, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE35r = 1281, + ETM_QMU_CFG_QSCH_MULCRDCNTRATE36r = 1282, + ETM_QMU_CFG_QSCH_RM_MUL_MCN1_RAND_HOLD_SHIFTr = 1283, + ETM_QMU_CFG_QSCH_RM_MUL_MCN2_RAND_HOLD_SHIFTr = 1284, + ETM_QMU_CFG_QSCH_RM_MUL_MCN3_RAND_HOLD_SHIFTr = 1285, + ETM_QMU_LAST_DROP_QNUM_GETr = 1286, + ETM_QMU_CRDT_QMU_CREDIT_CNTr = 1287, + ETM_QMU_QMU_TO_QSCH_REPORT_CNTr = 1288, + ETM_QMU_QMU_TO_CGAVD_REPORT_CNTr = 1289, + ETM_QMU_QMU_CRDT_CRS_NORMAL_CNTr = 1290, + ETM_QMU_QMU_CRDT_CRS_OFF_CNTr = 1291, + ETM_QMU_QSCH_QLIST_SHEDULE_CNTr = 1292, + ETM_QMU_QSCH_QLIST_SCH_EPT_CNTr = 1293, + ETM_QMU_QMU_TO_MMU_BLK_WR_CNTr = 1294, + ETM_QMU_QMU_TO_CSW_BLK_RD_CNTr = 1295, + ETM_QMU_QMU_TO_MMU_SOP_WR_CNTr = 1296, + ETM_QMU_QMU_TO_MMU_EOP_WR_CNTr = 1297, + ETM_QMU_QMU_TO_MMU_DROP_WR_CNTr = 1298, + ETM_QMU_QMU_TO_CSW_SOP_RD_CNTr = 1299, + ETM_QMU_QMU_TO_CSW_EOP_RD_CNTr = 1300, + ETM_QMU_QMU_TO_CSW_DROP_RD_CNTr = 1301, + ETM_QMU_MMU_TO_QMU_WR_RELEASE_CNTr = 1302, + ETM_QMU_MMU_TO_QMU_RD_RELEASE_CNTr = 1303, + ETM_QMU_OBSERVE_QNUM_SETr = 1304, + ETM_QMU_SPEC_Q_PKT_RECEIVEDr = 1305, + ETM_QMU_SPEC_Q_PKT_DROPPEDr = 1306, + ETM_QMU_SPEC_Q_PKT_SCHEDULEDr = 1307, + ETM_QMU_SPEC_Q_WR_CMD_SENTr = 1308, + ETM_QMU_SPEC_Q_RD_CMD_SENTr = 1309, + ETM_QMU_SPEC_Q_PKT_ENQr = 1310, + ETM_QMU_SPEC_Q_PKT_DEQr = 1311, + ETM_QMU_SPEC_Q_CRDT_UNCON_RECEIVEDr = 1312, + ETM_QMU_SPEC_Q_CRDT_CONG_RECEIVEDr = 1313, + ETM_QMU_SPEC_Q_CRS_NORMAL_CNTr = 1314, + ETM_QMU_SPEC_Q_CRS_OFF_CNTr = 1315, + ETM_QMU_OBSERVE_BATCH_SETr = 1316, + ETM_QMU_SPEC_BAT_PKT_RECEIVEDr = 1317, + ETM_QMU_SPEC_BAT_PKT_DROPPEDr = 1318, + ETM_QMU_SPEC_BAT_BLK_SCHEDULEDr = 1319, + ETM_QMU_SPEC_BAT_WR_CMD_SENTr = 1320, + ETM_QMU_SPEC_BAT_RD_CMD_SENTr = 1321, + ETM_QMU_SPEC_BAT_PKT_ENQr = 1322, + ETM_QMU_SPEC_BAT_PKT_DEQr = 1323, + ETM_QMU_SPEC_BAT_CRDT_UNCON_RECEIVEDr = 1324, + ETM_QMU_SPEC_BAT_CRDT_CONG_RECEIVEDr = 1325, + ETM_QMU_SPEC_BAT_CRS_NORMAL_CNTr = 1326, + ETM_QMU_SPEC_BAT_CRS_OFF_CNTr = 1327, + ETM_QMU_BCNTM_OVFL_QNUM_GETr = 1328, + ETM_QMU_CRBAL_A_OVF_QNUM_GETr = 1329, + ETM_QMU_CRBAL_B_OVF_QNUM_GETr = 1330, + ETM_QMU_CRBAL_DROP_QNUM_GETr = 1331, + ETM_QMU_DEQ_FLG_REPORT_CNTr = 1332, + ETM_QMU_SPEC_Q_CRS_GETr = 1333, + ETM_QMU_SPEC_Q_CRS_IN_GETr = 1334, + ETM_QMU_SPEC_Q_CRS_FLG_CSOL_GETr = 1335, + ETM_QMU_EPT_SCH_QNUM_GETr = 1336, + CFG_PCIE_PCIE_DDR_SWITCHr = 1337, + CFG_PCIE_USER0_INT_ENr = 1338, + CFG_PCIE_USER0_INT_MASKr = 1339, + CFG_PCIE_USER0_INT_STATUSr = 1340, + CFG_PCIE_USER1_INT_ENr = 1341, + CFG_PCIE_USER1_INT_MASKr = 1342, + CFG_PCIE_USER1_INT_STATUSr = 1343, + CFG_PCIE_USER2_INT_ENr = 1344, + CFG_PCIE_USER2_INT_MASKr = 1345, + CFG_PCIE_USER2_INT_STATUSr = 1346, + CFG_PCIE_ECC_1B_INT_ENr = 1347, + CFG_PCIE_ECC_1B_INT_MASKr = 1348, + CFG_PCIE_ECC_1B_INT_STATUSr = 1349, + CFG_PCIE_ECC_2B_INT_ENr = 1350, + CFG_PCIE_ECC_2B_INT_MASKr = 1351, + CFG_PCIE_ECC_2B_INT_STATUSr = 1352, + CFG_PCIE_CFG_INT_STATUSr = 1353, + CFG_PCIE_I_CORE_TO_CNTLr = 1354, + CFG_PCIE_TEST_IN_LOWr = 1355, + CFG_PCIE_TEST_IN_HIGHr = 1356, + CFG_PCIE_LOCAL_INTERRUPT_OUTr = 1357, + CFG_PCIE_PL_LTSSMr = 1358, + CFG_PCIE_TEST_OUT0r = 1359, + CFG_PCIE_TEST_OUT1r = 1360, + CFG_PCIE_TEST_OUT2r = 1361, + CFG_PCIE_TEST_OUT3r = 1362, + CFG_PCIE_TEST_OUT4r = 1363, + CFG_PCIE_TEST_OUT5r = 1364, + CFG_PCIE_TEST_OUT6r = 1365, + CFG_PCIE_TEST_OUT7r = 1366, + CFG_PCIE_SYNC_O_CORE_STATUSr = 1367, + CFG_PCIE_SYNC_O_ALERT_DBEr = 1368, + CFG_PCIE_SYNC_O_ALERT_SBEr = 1369, + CFG_PCIE_SYNC_O_LINK_LOOPBACK_ENr = 1370, + CFG_PCIE_SYNC_O_LOCAL_FS_LF_VALIDr = 1371, + CFG_PCIE_SYNC_O_RX_IDLE_DETECTr = 1372, + CFG_PCIE_SYNC_O_RX_RDYr = 1373, + CFG_PCIE_SYNC_O_TX_RDYr = 1374, + CFG_PCIE_PCIE_LINK_UP_CNTr = 1375, + CFG_PCIE_TEST_OUT_PCIE0r = 1376, + CFG_PCIE_TEST_OUT_PCIE1r = 1377, + CFG_PCIE_TEST_OUT_PCIE2r = 1378, + CFG_PCIE_TEST_OUT_PCIE3r = 1379, + CFG_PCIE_TEST_OUT_PCIE4r = 1380, + CFG_PCIE_TEST_OUT_PCIE5r = 1381, + CFG_PCIE_TEST_OUT_PCIE6r = 1382, + CFG_PCIE_TEST_OUT_PCIE7r = 1383, + CFG_PCIE_TEST_OUT_PCIE8r = 1384, + CFG_PCIE_TEST_OUT_PCIE9r = 1385, + CFG_PCIE_TEST_OUT_PCIE10r = 1386, + CFG_PCIE_TEST_OUT_PCIE11r = 1387, + CFG_PCIE_TEST_OUT_PCIE12r = 1388, + CFG_PCIE_TEST_OUT_PCIE13r = 1389, + CFG_PCIE_TEST_OUT_PCIE14r = 1390, + CFG_PCIE_TEST_OUT_PCIE15r = 1391, + CFG_PCIE_INT_REPEAT_ENr = 1392, + CFG_PCIE_DBG_AWID_AXI_MSTr = 1393, + CFG_PCIE_DBG_AWADDR_AXI_MST0r = 1394, + CFG_PCIE_DBG_AWADDR_AXI_MST1r = 1395, + CFG_PCIE_DBG_AWLEN_AXI_MSTr = 1396, + CFG_PCIE_DBG_AWSIZE_AXI_MSTr = 1397, + CFG_PCIE_DBG_AWBURST_AXI_MSTr = 1398, + CFG_PCIE_DBG_AWLOCK_AXI_MSTr = 1399, + CFG_PCIE_DBG_AWCACHE_AXI_MSTr = 1400, + CFG_PCIE_DBG_AWPROT_AXI_MSTr = 1401, + CFG_PCIE_DBG_WID_AXI_MSTr = 1402, + CFG_PCIE_DBG_WDATA_AXI_MST0r = 1403, + CFG_PCIE_DBG_WDATA_AXI_MST1r = 1404, + CFG_PCIE_DBG_WDATA_AXI_MST2r = 1405, + CFG_PCIE_DBG_WDATA_AXI_MST3r = 1406, + CFG_PCIE_DBG_WSTRB_AXI_MSTr = 1407, + CFG_PCIE_DBG_WLAST_AXI_MSTr = 1408, + CFG_PCIE_DBG_ARID_AXI_MSTr = 1409, + CFG_PCIE_DBG_ARADDR_AXI_MST0r = 1410, + CFG_PCIE_DBG_ARADDR_AXI_MST1r = 1411, + CFG_PCIE_DBG_ARLEN_AXI_MSTr = 1412, + CFG_PCIE_DBG_ARSIZE_AXI_MSTr = 1413, + CFG_PCIE_DBG_ARBURST_AXI_MSTr = 1414, + CFG_PCIE_DBG_ARLOCK_AXI_MSTr = 1415, + CFG_PCIE_DBG_ARCACHE_AXI_MSTr = 1416, + CFG_PCIE_DBG_ARPROT_AXI_MSTr = 1417, + CFG_PCIE_DBG_RDATA_AXI_MST0r = 1418, + CFG_PCIE_DBG_RDATA_AXI_MST1r = 1419, + CFG_PCIE_DBG_RDATA_AXI_MST2r = 1420, + CFG_PCIE_DBG_RDATA_AXI_MST3r = 1421, + CFG_PCIE_AXI_MST_STATEr = 1422, + CFG_PCIE_AXI_CFG_STATEr = 1423, + CFG_PCIE_AXI_SLV_RD_STATEr = 1424, + CFG_PCIE_AXI_SLV_WR_STATEr = 1425, + CFG_PCIE_AXIM_DELAY_ENr = 1426, + CFG_PCIE_AXIM_DELAYr = 1427, + CFG_PCIE_AXIM_SPEED_WRr = 1428, + CFG_PCIE_AXIM_SPEED_RDr = 1429, + CFG_PCIE_DBG_AWADDR_AXI_SLV0r = 1430, + CFG_PCIE_DBG_AWADDR_AXI_SLV1r = 1431, + CFG_PCIE_DBG0_WDATA_AXI_SLV0r = 1432, + CFG_PCIE_DBG0_WDATA_AXI_SLV1r = 1433, + CFG_PCIE_DBG0_WDATA_AXI_SLV2r = 1434, + CFG_PCIE_DBG0_WDATA_AXI_SLV3r = 1435, + CFG_PCIE_DBG1_WDATA_AXI_SLV0r = 1436, + CFG_PCIE_DBG1_WDATA_AXI_SLV1r = 1437, + CFG_PCIE_DBG1_WDATA_AXI_SLV2r = 1438, + CFG_PCIE_DBG1_WDATA_AXI_SLV3r = 1439, + CFG_PCIE_DBG2_WDATA_AXI_SLV0r = 1440, + CFG_PCIE_DBG2_WDATA_AXI_SLV1r = 1441, + CFG_PCIE_DBG2_WDATA_AXI_SLV2r = 1442, + CFG_PCIE_DBG2_WDATA_AXI_SLV3r = 1443, + CFG_PCIE_DBG3_WDATA_AXI_SLV0r = 1444, + CFG_PCIE_DBG3_WDATA_AXI_SLV1r = 1445, + CFG_PCIE_DBG3_WDATA_AXI_SLV2r = 1446, + CFG_PCIE_DBG3_WDATA_AXI_SLV3r = 1447, + CFG_PCIE_DBG4_WDATA_AXI_SLV0r = 1448, + CFG_PCIE_DBG4_WDATA_AXI_SLV1r = 1449, + CFG_PCIE_DBG4_WDATA_AXI_SLV2r = 1450, + CFG_PCIE_DBG4_WDATA_AXI_SLV3r = 1451, + CFG_PCIE_DBG5_WDATA_AXI_SLV0r = 1452, + CFG_PCIE_DBG5_WDATA_AXI_SLV1r = 1453, + CFG_PCIE_DBG5_WDATA_AXI_SLV2r = 1454, + CFG_PCIE_DBG5_WDATA_AXI_SLV3r = 1455, + CFG_PCIE_DBG6_WDATA_AXI_SLV0r = 1456, + CFG_PCIE_DBG6_WDATA_AXI_SLV1r = 1457, + CFG_PCIE_DBG6_WDATA_AXI_SLV2r = 1458, + CFG_PCIE_DBG6_WDATA_AXI_SLV3r = 1459, + CFG_PCIE_DBG7_WDATA_AXI_SLV0r = 1460, + CFG_PCIE_DBG7_WDATA_AXI_SLV1r = 1461, + CFG_PCIE_DBG7_WDATA_AXI_SLV2r = 1462, + CFG_PCIE_DBG7_WDATA_AXI_SLV3r = 1463, + CFG_PCIE_DBG8_WDATA_AXI_SLV0r = 1464, + CFG_PCIE_DBG8_WDATA_AXI_SLV1r = 1465, + CFG_PCIE_DBG8_WDATA_AXI_SLV2r = 1466, + CFG_PCIE_DBG8_WDATA_AXI_SLV3r = 1467, + CFG_PCIE_DBG9_WDATA_AXI_SLV0r = 1468, + CFG_PCIE_DBG9_WDATA_AXI_SLV1r = 1469, + CFG_PCIE_DBG9_WDATA_AXI_SLV2r = 1470, + CFG_PCIE_DBG9_WDATA_AXI_SLV3r = 1471, + CFG_PCIE_DBG_AWLEN_AXI_SLVr = 1472, + CFG_PCIE_DBG_WLAST_AXI_SLVr = 1473, + CFG_PCIE_DBG_ARADDR_AXI_SLV0r = 1474, + CFG_PCIE_DBG_ARADDR_AXI_SLV1r = 1475, + CFG_PCIE_DBG0_RDATA_AXI_SLV0r = 1476, + CFG_PCIE_DBG0_RDATA_AXI_SLV1r = 1477, + CFG_PCIE_DBG0_RDATA_AXI_SLV2r = 1478, + CFG_PCIE_DBG0_RDATA_AXI_SLV3r = 1479, + CFG_PCIE_DBG1_RDATA_AXI_SLV0r = 1480, + CFG_PCIE_DBG1_RDATA_AXI_SLV1r = 1481, + CFG_PCIE_DBG1_RDATA_AXI_SLV2r = 1482, + CFG_PCIE_DBG1_RDATA_AXI_SLV3r = 1483, + CFG_PCIE_DBG_RLAST_AXI_SLVr = 1484, + CFG_DMA_DMA_ENABLEr = 1485, + CFG_DMA_UP_REQr = 1486, + CFG_DMA_DMA_UP_CURRENT_STATEr = 1487, + CFG_DMA_DMA_UP_REQ_ACKr = 1488, + CFG_DMA_DMA_DONE_LATCHr = 1489, + CFG_DMA_DMA_UP_CPU_ADDR_LOW32r = 1490, + CFG_DMA_DMA_UP_CPU_ADDR_HIGH32r = 1491, + CFG_DMA_DMA_UP_SE_ADDRr = 1492, + CFG_DMA_DMA_DONE_INTr = 1493, + CFG_DMA_SP_CFGr = 1494, + CFG_DMA_DMA_INGr = 1495, + CFG_DMA_RD_TIMEOUT_THRESHr = 1496, + CFG_DMA_DMA_TAB_STA_UP_FIFO_GAPr = 1497, + CFG_DMA_CFG_MAC_TIMr = 1498, + CFG_DMA_CFG_MAC_NUMr = 1499, + CFG_DMA_INIT_BD_ADDRr = 1500, + CFG_DMA_MAC_UP_BD_ADDR1_LOW32r = 1501, + CFG_DMA_MAC_UP_BD_ADDR1_HIGH32r = 1502, + CFG_DMA_MAC_UP_BD_ADDR2_LOW32r = 1503, + CFG_DMA_MAC_UP_BD_ADDR2_HIGH32r = 1504, + CFG_DMA_CFG_MAC_MAX_NUMr = 1505, + CFG_DMA_DMA_WBUF_FF_EMPTYr = 1506, + CFG_DMA_DMA_WBUF_STATEr = 1507, + CFG_DMA_DMA_MAC_BD_ADDR_LOW32r = 1508, + CFG_DMA_DMA_MAC_BD_ADDR_HIGH32r = 1509, + CFG_DMA_MAC_UP_ENABLEr = 1510, + CFG_DMA_MAC_ENDIANr = 1511, + CFG_DMA_UP_ENDIANr = 1512, + CFG_DMA_DMA_UP_RD_CNT_LATCHr = 1513, + CFG_DMA_DMA_UP_RCV_CNT_LATCHr = 1514, + CFG_DMA_DMA_UP_CNT_LATCHr = 1515, + CFG_DMA_CPU_RD_BD_PULSEr = 1516, + CFG_DMA_CPU_BD_THRESHOLDr = 1517, + CFG_DMA_CPU_BD_USED_CNTr = 1518, + CFG_DMA_DMA_UP_RCV_STATUSr = 1519, + CFG_DMA_SLV_RID_ERR_ENr = 1520, + CFG_DMA_SLV_RRESP_ERR_ENr = 1521, + CFG_DMA_SE_RDBK_FF_FULLr = 1522, + CFG_DMA_DMA_UP_DATA_COUNTr = 1523, + CFG_DMA_DMA_MWR_FIFO_AFULL_GAPr = 1524, + CFG_DMA_DMA_INFO_FIFO_AFULL_GAPr = 1525, + CFG_DMA_DMA_RD_TIMEOUT_SETr = 1526, + CFG_DMA_DMA_BD_DAT_ERR_ENr = 1527, + CFG_DMA_DMA_REPEAT_CNTr = 1528, + CFG_DMA_DMA_RD_TIMEOUT_ENr = 1529, + CFG_DMA_DMA_REPEAT_READr = 1530, + CFG_DMA_DMA_REPEAT_READ_ENr = 1531, + CFG_DMA_BD_CTL_STATEr = 1532, + CFG_DMA_DMA_DONE_INT_CNT_WRr = 1533, + CFG_DMA_DMA_DONE_INT_CNT_MACr = 1534, + CFG_DMA_CURRENT_MAC_NUMr = 1535, + CFG_DMA_CFG_MAC_AFIFO_AFULLr = 1536, + CFG_DMA_DMA_MAC_FF_FULLr = 1537, + CFG_DMA_USER_AXI_MSTr = 1538, + CFG_CSR_SBUS_STATEr = 1539, + CFG_CSR_MST_DEBUG_ENr = 1540, + CFG_CSR_SBUS_COMMAND_SELr = 1541, + CFG_CSR_SOC_RD_TIME_OUT_THRESHr = 1542, + CFG_CSR_BIG_LITTLE_BYTE_ORDERr = 1543, + CFG_CSR_ECC_BYPASS_READr = 1544, + CFG_CSR_AHB_ASYNC_WR_FIFO_AFULL_GAPr = 1545, + CFG_CSR_AHB_ASYNC_RD_FIFO_AFULL_GAPr = 1546, + CFG_CSR_AHB_ASYNC_CPL_FIFO_AFULL_GAPr = 1547, + CFG_CSR_MST_DEBUG_DATA0_HIGH26r = 1548, + CFG_CSR_MST_DEBUG_DATA0_LOW32r = 1549, + CFG_CSR_MST_DEBUG_DATA1_HIGH26r = 1550, + CFG_CSR_MST_DEBUG_DATA1_LOW32r = 1551, + CFG_CSR_MST_DEBUG_DATA2_HIGH26r = 1552, + CFG_CSR_MST_DEBUG_DATA2_LOW32r = 1553, + CFG_CSR_MST_DEBUG_DATA3_HIGH26r = 1554, + CFG_CSR_MST_DEBUG_DATA3_LOW32r = 1555, + CFG_CSR_MST_DEBUG_DATA4_HIGH26r = 1556, + CFG_CSR_MST_DEBUG_DATA4_LOW32r = 1557, + CFG_CSR_MST_DEBUG_DATA5_HIGH26r = 1558, + CFG_CSR_MST_DEBUG_DATA5_LOW32r = 1559, + CFG_CSR_MST_DEBUG_DATA6_HIGH26r = 1560, + CFG_CSR_MST_DEBUG_DATA6_LOW32r = 1561, + CFG_CSR_MST_DEBUG_DATA7_HIGH26r = 1562, + CFG_CSR_MST_DEBUG_DATA7_LOW32r = 1563, + CFG_CSR_MST_DEBUG_DATA8_HIGH26r = 1564, + CFG_CSR_MST_DEBUG_DATA8_LOW32r = 1565, + CFG_CSR_MST_DEBUG_DATA9_HIGH26r = 1566, + CFG_CSR_MST_DEBUG_DATA9_LOW32r = 1567, + CFG_CSR_MST_DEBUG_DATA10_HIGH26r = 1568, + CFG_CSR_MST_DEBUG_DATA10_LOW32r = 1569, + CFG_CSR_MST_DEBUG_DATA11_HIGH26r = 1570, + CFG_CSR_MST_DEBUG_DATA11_LOW32r = 1571, + CFG_CSR_MST_DEBUG_DATA12_HIGH26r = 1572, + CFG_CSR_MST_DEBUG_DATA12_LOW32r = 1573, + CFG_CSR_MST_DEBUG_DATA13_HIGH26r = 1574, + CFG_CSR_MST_DEBUG_DATA13_LOW32r = 1575, + CFG_CSR_MST_DEBUG_DATA14_HIGH26r = 1576, + CFG_CSR_MST_DEBUG_DATA14_LOW32r = 1577, + CFG_CSR_MST_DEBUG_DATA15_HIGH26r = 1578, + CFG_CSR_MST_DEBUG_DATA15_LOW32r = 1579, + NPPU_MR_CFG_IND_ACCESS_STATESr = 1580, + NPPU_MR_CFG_IND_ACCESS_CMD0r = 1581, + NPPU_MR_CFG_IND_ACCESS_DATA0r = 1582, + NPPU_MR_CFG_IND_ACCESS_DATA1r = 1583, + NPPU_MR_CFG_IND_ACCESS_CMD1r = 1584, + NPPU_MR_CFG_MR_INIT_DONEr = 1585, + NPPU_MR_CFG_CNT_MODE_REGr = 1586, + NPPU_MR_CFG_CFG_ECC_BYPASS_READr = 1587, + NPPU_MR_CFG_CFG_REP_MODr = 1588, + NPPU_MR_CFG_BLOCK_PTR_FIFO_AFUL_THr = 1589, + NPPU_MR_CFG_PRE_RCV_PTR_FIFO_AFUL_THr = 1590, + NPPU_MR_CFG_MGID_FIFO_AFUL_THr = 1591, + NPPU_MR_CFG_REP_CMD_FIFO_AFUL_THr = 1592, + NPPU_MR_CFG_MR_INT_MASK_1r = 1593, + NPPU_MR_CFG_MR_INT_MASK_2r = 1594, + NPPU_MR_CFG_MR_INT_MASK_3r = 1595, + NPPU_MR_CFG_MR_INT_MASK_4r = 1596, + NPPU_MR_CFG_MR_STATES_1r = 1597, + NPPU_MR_CFG_MR_STATES_2r = 1598, + NPPU_MR_CFG_MR_STATES_3r = 1599, + NPPU_MR_CFG_MR_STATES_4r = 1600, + NPPU_MR_CFG_MR_STATES_5r = 1601, + NPPU_MR_CFG_MR_STATES_6r = 1602, + NPPU_MR_CFG_MR_STATES_7r = 1603, + NPPU_MR_CFG_MR_STATES_8r = 1604, + NPPU_MR_CFG_MR_SOP_IN_CNTr = 1605, + NPPU_MR_CFG_MR_EOP_IN_CNTr = 1606, + NPPU_MR_CFG_MR_SOP_OUT_CNTr = 1607, + NPPU_MR_CFG_MR_EOP_OUT_CNTr = 1608, + NPPU_MR_CFG_MR_COS0_IN_CNTr = 1609, + NPPU_MR_CFG_MR_COS1_IN_CNTr = 1610, + NPPU_MR_CFG_MR_COS2_IN_CNTr = 1611, + NPPU_MR_CFG_MR_COS3_IN_CNTr = 1612, + NPPU_MR_CFG_MR_COS0_OUT_CNTr = 1613, + NPPU_MR_CFG_MR_COS1_OUT_CNTr = 1614, + NPPU_MR_CFG_MR_COS2_OUT_CNTr = 1615, + NPPU_MR_CFG_MR_COS3_OUT_CNTr = 1616, + NPPU_MR_CFG_MR_ERR_IN_CNTr = 1617, + NPPU_MR_CFG_MR_COS0_SOP_IN_CNTr = 1618, + NPPU_MR_CFG_MR_COS0_EOP_IN_CNTr = 1619, + NPPU_MR_CFG_MR_COS1_SOP_IN_CNTr = 1620, + NPPU_MR_CFG_MR_COS1_EOP_IN_CNTr = 1621, + NPPU_MR_CFG_MR_COS2_SOP_IN_CNTr = 1622, + NPPU_MR_CFG_MR_COS2_EOP_IN_CNTr = 1623, + NPPU_MR_CFG_MR_COS3_SOP_IN_CNTr = 1624, + NPPU_MR_CFG_MR_COS3_EOP_IN_CNTr = 1625, + NPPU_MR_CFG_MR_COS0_IN_ERR_CNTr = 1626, + NPPU_MR_CFG_MR_COS1_IN_ERR_CNTr = 1627, + NPPU_MR_CFG_MR_COS2_IN_ERR_CNTr = 1628, + NPPU_MR_CFG_MR_COS3_IN_ERR_CNTr = 1629, + NPPU_MR_CFG_MR_COS0_SOP_OUT_CNTr = 1630, + NPPU_MR_CFG_MR_COS0_EOP_OUT_CNTr = 1631, + NPPU_MR_CFG_MR_COS1_SOP_OUT_CNTr = 1632, + NPPU_MR_CFG_MR_COS1_EOP_OUT_CNTr = 1633, + NPPU_MR_CFG_MR_COS2_SOP_OUT_CNTr = 1634, + NPPU_MR_CFG_MR_COS2_EOP_OUT_CNTr = 1635, + NPPU_MR_CFG_MR_COS3_SOP_OUT_CNTr = 1636, + NPPU_MR_CFG_MR_COS3_EOP_OUT_CNTr = 1637, + NPPU_MR_CFG_MR_MLT_UNVLD_CNTr = 1638, + NPPU_MR_CFG_MR_SOP_EOP_MATCH_CFGr = 1639, + NPPU_MR_CFG_MR_MLT_UNVLD_MGIDr = 1640, + NPPU_PKTRX_CFG_ISCH_FIFO_TH_1r = 1641, + NPPU_PKTRX_CFG_ISCH_FIFO_TH_2r = 1642, + NPPU_PKTRX_CFG_ISCH_FIFO_TH_3r = 1643, + NPPU_PKTRX_CFG_ISCH_FIFO_TH_4r = 1644, + NPPU_PKTRX_CFG_ISCH_CFG_0r = 1645, + NPPU_PKTRX_CFG_HDU_EX_TPID_0r = 1646, + NPPU_PKTRX_CFG_HDU_EX_TPID_1r = 1647, + NPPU_PKTRX_CFG_HDU_INT_TPID_0r = 1648, + NPPU_PKTRX_CFG_HDU_INT_TPID_1r = 1649, + NPPU_PKTRX_CFG_HDU_HDLC_0r = 1650, + NPPU_PKTRX_CFG_HDU_HDLC_1r = 1651, + NPPU_PKTRX_CFG_HDU_UDF_L3TYPE_0r = 1652, + NPPU_PKTRX_CFG_HDU_UDF_L3TYPE_1r = 1653, + NPPU_PKTRX_CFG_HDU_UDF_L3TYPE_2r = 1654, + NPPU_PKTRX_CFG_HDU_UDF_L3TYPE_3r = 1655, + NPPU_PKTRX_CFG_HDU_UDF_L4TYPE_0r = 1656, + NPPU_PKTRX_CFG_HDU_UDF_L4TYPE_1r = 1657, + NPPU_PKTRX_CFG_HDU_UDF_L4TYPE_2r = 1658, + NPPU_PKTRX_CFG_SLOT_NO_CFGr = 1659, + NPPU_PKTRX_CFG_PKTRX_INT_EN_0r = 1660, + NPPU_PKTRX_CFG_PKTRX_INT_EN_1r = 1661, + NPPU_PKTRX_CFG_PKTRX_INT_MASK_0r = 1662, + NPPU_PKTRX_CFG_PKTRX_INT_MASK_1r = 1663, + NPPU_PKTRX_CFG_PKTRX_INT_STATUSr = 1664, + NPPU_PKTRX_CFG_PKTRX_PORT_RDY0r = 1665, + NPPU_PKTRX_CFG_PKTRX_LIF0_PFC_RDY0r = 1666, + NPPU_PKTRX_CFG_PKTRX_LIF0_PFC_RDY1r = 1667, + NPPU_PKTRX_CFG_PKTRX_LIF0_PFC_RDY2r = 1668, + NPPU_PKTRX_CFG_PKTRX_LIF0_PFC_RDY3r = 1669, + NPPU_PKTRX_CFG_PKTRX_LIF0_PFC_RDY4r = 1670, + NPPU_PKTRX_CFG_PKTRX_LIF0_PFC_RDY5r = 1671, + NPPU_PKTRX_CFG_PKTRX_LIF0_PFC_RDY6r = 1672, + NPPU_PKTRX_CFG_CFG_PORT_L2_OFFSET_MODEr = 1673, + NPPU_IDMA_CFG_INT_RAM_ENr = 1674, + NPPU_IDMA_CFG_INT_RAM_MASKr = 1675, + NPPU_IDMA_CFG_INT_RAM_STATUSr = 1676, + NPPU_IDMA_CFG_SUBSYS_INT_MASK_FLAGr = 1677, + NPPU_IDMA_CFG_SUBSYS_INT_UNMASK_FLAGr = 1678, + NPPU_IDMA_CFG_DEBUG_CNT_RDCLR_MODEr = 1679, + NPPU_PBU_CFG_INT_RAM_EN0r = 1680, + NPPU_PBU_CFG_INT_RAM_MASK0r = 1681, + NPPU_PBU_CFG_INT_RAM_STATUS0r = 1682, + NPPU_PBU_CFG_INT_FIFO_EN0r = 1683, + NPPU_PBU_CFG_INT_FIFO_EN1r = 1684, + NPPU_PBU_CFG_INT_FIFO_MASK0r = 1685, + NPPU_PBU_CFG_INT_FIFO_MASK1r = 1686, + NPPU_PBU_CFG_INT_FIFO_STATUS0r = 1687, + NPPU_PBU_CFG_INT_FIFO_STATUS1r = 1688, + NPPU_PBU_CFG_SUBSYS_INT_MASK_FLAGr = 1689, + NPPU_PBU_CFG_SUBSYS_INT_UNMASK_FLAGr = 1690, + NPPU_PBU_CFG_SA_IP_ENr = 1691, + NPPU_PBU_CFG_DEBUG_CNT_RDCLR_MODEr = 1692, + NPPU_PBU_CFG_FPTR_FIFO_AFUL_ASSERT_CFGr = 1693, + NPPU_PBU_CFG_FPTR_FIFO_AFUL_NEGATE_CFGr = 1694, + NPPU_PBU_CFG_PF_FIFO_AFUL_ASSERT_CFGr = 1695, + NPPU_PBU_CFG_PF_FIFO_AFUL_NEGATE_CFGr = 1696, + NPPU_PBU_CFG_PF_FIFO_AEPT_ASSERT_CFGr = 1697, + NPPU_PBU_CFG_PF_FIFO_AEPT_NEGATE_CFGr = 1698, + NPPU_PBU_CFG_WB_AFUL_ASSERT_CFGr = 1699, + NPPU_PBU_CFG_WB_AFUL_NEGATE_CFGr = 1700, + NPPU_PBU_CFG_SE_KEY_AFUL_ASSERT_CFGr = 1701, + NPPU_PBU_CFG_IFBRD_SE_AFUL_ASSERT_CFGr = 1702, + NPPU_PBU_CFG_IFBRD_SE_AFUL_NEGATE_CFGr = 1703, + NPPU_PBU_CFG_IFBRD_ODMA_AFUL_ASSERT_CFGr = 1704, + NPPU_PBU_CFG_IFBRD_ODMA_AFUL_NEGATE_CFGr = 1705, + NPPU_PBU_CFG_IFBRD_PPU_AFUL_ASSERT_CFGr = 1706, + NPPU_PBU_CFG_IFBRD_PPU_AFUL_NEGATE_CFGr = 1707, + NPPU_PBU_CFG_MC_LOGIC_AFUL_ASSERT_CFGr = 1708, + NPPU_PBU_CFG_MC_LOGIC_AFUL_NEGATE_CFGr = 1709, + NPPU_PBU_CFG_MC_LOGIC_DIFFr = 1710, + NPPU_PBU_CFG_CFG_PEAK_PORT_CNT_CLRr = 1711, + NPPU_PBU_CFG_ALL_FTM_CRDT_THr = 1712, + NPPU_PBU_CFG_ALL_FTM_LINK_TH_01r = 1713, + NPPU_PBU_CFG_ALL_FTM_LINK_TH_23r = 1714, + NPPU_PBU_CFG_ALL_FTM_LINK_TH_45r = 1715, + NPPU_PBU_CFG_ALL_FTM_LINK_TH_6r = 1716, + NPPU_PBU_CFG_ALL_FTM_TOTAL_CONGEST_THr = 1717, + NPPU_PBU_CFG_CFG_CRDT_MODEr = 1718, + NPPU_PBU_CFG_CFG_PFC_RDY_HIGH_TIMEr = 1719, + NPPU_PBU_CFG_CFG_PFC_RDY_LOW_TIMEr = 1720, + NPPU_PBU_STAT_PBU_FC_RDYr = 1721, + NPPU_PBU_STAT_PBU_LIF_GROUP0_RDY0r = 1722, + NPPU_PBU_STAT_PBU_LIF_GROUP0_RDY1r = 1723, + NPPU_PBU_STAT_PBU_LIF_GROUP1_RDYr = 1724, + NPPU_PBU_STAT_PBU_LIF_GROUP0_PFC_RDYr = 1725, + NPPU_PBU_STAT_PBU_LIF_GROUP1_PFC_RDYr = 1726, + NPPU_PBU_STAT_PBU_SA_PORT_RDY_0_31r = 1727, + NPPU_PBU_STAT_PBU_SA_PORT_RDY_32_50r = 1728, + NPPU_PBU_STAT_PBU_PKTRX_MR_PFC_RDYr = 1729, + NPPU_PBU_STAT_PBU_FTM_CRDT_PORT_RDY_0_31r = 1730, + NPPU_PBU_STAT_PBU_FTM_CRDT_PORT_RDY_32_47r = 1731, + NPPU_PBU_STAT_PBU_FTM_CRDT_PORT_CNG_RDY_0_31r = 1732, + NPPU_PBU_STAT_PBU_FTM_CRDT_PORT_CNG_RDY_32_47r = 1733, + NPPU_PBU_STAT_PBU_FTM_CRDT_SYS_INFOr = 1734, + NPPU_ISU_CFG_WEIGHT_NORMAL_MCr = 1735, + NPPU_ISU_CFG_WEIGHT_SA_MCr = 1736, + NPPU_ISU_CFG_WEIGHT_ETMr = 1737, + NPPU_ISU_CFG_WEIGHT_LP_MCr = 1738, + NPPU_ISU_CFG_WEIGHT_OAMr = 1739, + NPPU_ISU_CFG_WEIGHT_LIF_CTRL1r = 1740, + NPPU_ISU_CFG_WEIGHT_LIF_CTRL2r = 1741, + NPPU_ISU_CFG_ECC_BYPASS_READr = 1742, + NPPU_ISU_CFG_ISU_INT_MASKr = 1743, + NPPU_ISU_CFG_CFG_CRDT_CYCLEr = 1744, + NPPU_ISU_CFG_CFG_CRDT_VALUEr = 1745, + NPPU_ISU_CFG_ISU_INT_ENr = 1746, + NPPU_ISU_CFG_ISU_PPU_FIFO_FCr = 1747, + NPPU_ISU_CFG_ISU_INT_STATUSr = 1748, + NPPU_ISU_CFG_FD_PROG_FULL_ASSERT_CFGr = 1749, + NPPU_ISU_CFG_FD_PROG_FULL_NEGATE_CFGr = 1750, + NPPU_ISU_CFG_LP_PROG_FULL_ASSERT_CFGr = 1751, + NPPU_ISU_CFG_LP_PROG_FULL_NEGATE_CFGr = 1752, + NPPU_ISU_STAT_DEBUG_CNT_DAT0r = 1753, + NPPU_ISU_STAT_DEBUG_CNT_DAT1r = 1754, + NPPU_ISU_STAT_DEBUG_CNT_DAT2r = 1755, + NPPU_ISU_STAT_DEBUG_CNT_DAT3r = 1756, + NPPU_ISU_STAT_DEBUG_CNT_DAT4r = 1757, + NPPU_ISU_STAT_DEBUG_CNT_DAT5r = 1758, + NPPU_ISU_STAT_DEBUG_CNT_DAT6r = 1759, + NPPU_ISU_STAT_DEBUG_CNT_DAT7r = 1760, + NPPU_ISU_STAT_DEBUG_CNT_DAT8r = 1761, + NPPU_ISU_STAT_DEBUG_CNT_DAT9r = 1762, + NPPU_ISU_STAT_DEBUG_CNT_DAT10r = 1763, + NPPU_ISU_STAT_DEBUG_CNT_DAT11r = 1764, + NPPU_ISU_STAT_DEBUG_CNT_DAT12r = 1765, + NPPU_ISU_STAT_DEBUG_CNT_DAT13r = 1766, + NPPU_ISU_STAT_DEBUG_CNT_DAT14r = 1767, + NPPU_ISU_STAT_DEBUG_CNT_DAT15r = 1768, + NPPU_ISU_STAT_DEBUG_CNT_DAT16r = 1769, + NPPU_ISU_STAT_DEBUG_CNT_DAT17r = 1770, + NPPU_ISU_STAT_DEBUG_CNT_DAT18r = 1771, + NPPU_ISU_STAT_DEBUG_CNT_DAT19r = 1772, + NPPU_ISU_STAT_DEBUG_CNT_CFGr = 1773, + NPPU_ODMA_CFG_EXSA_TDM_OFFSETr = 1774, + NPPU_ODMA_CFG_ECC_BYPASS_READTr = 1775, + NPPU_ODMA_CFG_ODMA_INT_EN_0r = 1776, + NPPU_ODMA_CFG_ODMA_INT_EN_1r = 1777, + NPPU_ODMA_CFG_ODMA_INT_EN_2r = 1778, + NPPU_ODMA_CFG_ODMA_INT_EN_3r = 1779, + NPPU_ODMA_CFG_ODMA_INT_MASK_0r = 1780, + NPPU_ODMA_CFG_ODMA_INT_MASK_1r = 1781, + NPPU_ODMA_CFG_ODMA_INT_MASK_2r = 1782, + NPPU_ODMA_CFG_ODMA_INT_MASK_3r = 1783, + NPPU_ODMA_CFG_ODMA_INT_STATUS_0r = 1784, + NPPU_ODMA_CFG_ODMA_INT_STATUS_1r = 1785, + NPPU_ODMA_CFG_ODMA_INT_STATUS_2r = 1786, + NPPU_ODMA_CFG_ODMA_INT_STATUS_3r = 1787, + NPPU_ODMA_CFG_SP_TDM_ERR_NOR_CFGr = 1788, + NPPU_ODMA_CFG_ETM_DIS_PTR_PROG_FULL_CFG_Ar = 1789, + NPPU_ODMA_CFG_ETM_DIS_PTR_PROG_FULL_CFG_Nr = 1790, + NPPU_ODMA_CFG_FTM_DIS_PTR_PROG_FULL_CFG_Ar = 1791, + NPPU_ODMA_CFG_FTM_DIS_PTR_PROG_FULL_CFG_Nr = 1792, + NPPU_ODMA_CFG_TM_DIS_FIFO_PROG_FULL_CFG_Ar = 1793, + NPPU_ODMA_CFG_TM_DIS_FIFO_PROG_FULL_CFG_Nr = 1794, + NPPU_ODMA_CFG_ERR_PROG_FULL_CFG_Ar = 1795, + NPPU_ODMA_CFG_ERR_PROG_FULL_CFG_Nr = 1796, + NPPU_ODMA_CFG_TDMUC_PROG_FULL_CFG_Ar = 1797, + NPPU_ODMA_CFG_TDMUC_PROG_FULL_CFG_Nr = 1798, + NPPU_ODMA_CFG_TDMMC_GROUPID_PROG_FULL_CFG_Ar = 1799, + NPPU_ODMA_CFG_TDMMC_GROUPID_PROG_FULL_CFG_Nr = 1800, + NPPU_ODMA_CFG_TDMMC_NO_BITMAP_PROG_FULL_CFG_Ar = 1801, + NPPU_ODMA_CFG_TDMMC_NO_BITMAP_PROG_FULL_CFG_Nr = 1802, + NPPU_ODMA_CFG_TDMMC_PROG_FULL_CFG_Ar = 1803, + NPPU_ODMA_CFG_TDMMC_PROG_FULL_CFG_Nr = 1804, + NPPU_ODMA_CFG_DESC_PROG_FULL_CFG_Ar = 1805, + NPPU_ODMA_CFG_DESC_PROG_FULL_CFG_Nr = 1806, + NPPU_ODMA_CFG_DLY_PROG_FULL_CFG_Ar = 1807, + NPPU_ODMA_CFG_DLY_PROG_FULL_CFG_Nr = 1808, + NPPU_ODMA_CFG_RSP_PROG_FULL_CFG_Ar = 1809, + NPPU_ODMA_CFG_RSP_PROG_FULL_CFG_Nr = 1810, + NPPU_ODMA_CFG_NOR_PROG_FULL_CFG_Ar = 1811, + NPPU_ODMA_CFG_NOR_PROG_FULL_CFG_Nr = 1812, + NPPU_ODMA_CFG_ETM_NOR_PROG_FULL_CFG_Ar = 1813, + NPPU_ODMA_CFG_ETM_NOR_PROG_FULL_CFG_Nr = 1814, + NPPU_ODMA_CFG_FTM_NOR_PROG_FULL_CFG_Ar = 1815, + NPPU_ODMA_CFG_FTM_NOR_PROG_FULL_CFG_Nr = 1816, + NPPU_ODMA_CFG_ETM_PROG_FULL_CFG_Ar = 1817, + NPPU_ODMA_CFG_ETM_PROG_FULL_CFG_Nr = 1818, + NPPU_ODMA_CFG_FTM_PROG_FULL_CFG_Ar = 1819, + NPPU_ODMA_CFG_FTM_PROG_FULL_CFG_Nr = 1820, + NPPU_ODMA_CFG_ETM_NRDCNT_PROG_FULL_CFG_Ar = 1821, + NPPU_ODMA_CFG_ETM_NRDCNT_PROG_FULL_CFG_Nr = 1822, + NPPU_ODMA_CFG_FTM_NRDCNT_PROG_FULL_CFG_Ar = 1823, + NPPU_ODMA_CFG_FTM_NRDCNT_PROG_FULL_CFG_Nr = 1824, + NPPU_ODMA_CFG_PP_PROG_FULL_CFG_Ar = 1825, + NPPU_ODMA_CFG_PP_PROG_FULL_CFG_Nr = 1826, + NPPU_ODMA_CFG_TM_WEIGHTr = 1827, + NPPU_ODMA_CFG_PP_WEIGHTr = 1828, + NPPU_ODMA_CFG_IFBCMD_PROG_FULL_CFG_Ar = 1829, + NPPU_ODMA_CFG_IFBCMD_PROG_FULL_CFG_Nr = 1830, + NPPU_ODMA_CFG_MCCNT_PROG_FULL_CFG_Ar = 1831, + NPPU_ODMA_CFG_MCCNT_PROG_FULL_CFG_Nr = 1832, + NPPU_ODMA_CFG_INT_OR_PONr = 1833, + NPPU_ODMA_CFG_QUEMNG_CNT_IN_ERR_CNTr = 1834, + NPPU_ODMA_CFG_LIF0_PORT_EOP_CNTr = 1835, + NPPU_ODMA_CFG_LIF1_PORT_EOP_CNTr = 1836, + NPPU_ODMA_CFG_LIFC_PORT0_EOP_CNTr = 1837, + NPPU_ODMA_CFG_LIFC_PORT1_EOP_CNTr = 1838, + NPPU_ODMA_CFG_FPTR_FIFO_PROG_EPT_CFG_Nr = 1839, + NPPU_ODMA_CFG_ISU_FIFO_PROG_FULL_CFG_Ar = 1840, + NPPU_ODMA_CFG_ISU_FIFO_PROG_FULL_CFG_Nr = 1841, + NPPU_OAM_CFG_IND_ACCESS_DONEr = 1842, + NPPU_OAM_CFG_IND_ACCESS_COMMANDr = 1843, + NPPU_OAM_CFG_IND_DAT0r = 1844, + NPPU_OAM_CFG_IND_DAT1r = 1845, + NPPU_OAM_CFG_IND_DAT2r = 1846, + NPPU_OAM_CFG_IND_DAT3r = 1847, + NPPU_OAM_CFG_OAM_TX_MAIN_ENr = 1848, + NPPU_OAM_CFG_TX_TOTAL_NUMr = 1849, + NPPU_OAM_CFG_OAM_CHK_MAIN_ENr = 1850, + NPPU_OAM_CFG_CHK_TOTAL_NUM0r = 1851, + NPPU_OAM_CFG_MA_CHK_MAIN_ENr = 1852, + NPPU_OAM_CFG_CHK_TOTAL_NUM1r = 1853, + NPPU_OAM_CFG_TX_STAT_ENr = 1854, + NPPU_OAM_CFG_REC_STAT_ENr = 1855, + NPPU_OAM_CFG_STAT_OAM_RDY_MASKr = 1856, + NPPU_OAM_CFG_SESSION_GRADING0r = 1857, + NPPU_OAM_CFG_SESSION_GRADING1r = 1858, + NPPU_OAM_CFG_SESSION_GRADING2r = 1859, + NPPU_OAM_CFG_SESSION_GRADING3r = 1860, + NPPU_OAM_CFG_BFD_CHK_HADDRr = 1861, + NPPU_OAM_CFG_ETHCCM_CHK_HADDRr = 1862, + NPPU_OAM_CFG_TPBFD_CHK_HADDRr = 1863, + NPPU_OAM_CFG_TPOAM_CCM_CHK_HADDRr = 1864, + NPPU_OAM_CFG_BFD_TX_HADDRr = 1865, + NPPU_OAM_CFG_ETHCCM_TX_HADDRr = 1866, + NPPU_OAM_CFG_TPBFD_TX_HADDRr = 1867, + NPPU_OAM_CFG_TPOAM_CCM_TX_HADDRr = 1868, + NPPU_OAM_CFG_ETHCCM_MA_CHK_HADDRr = 1869, + NPPU_OAM_CFG_TPCCM_MA_CHK_HADDRr = 1870, + NPPU_OAM_CFG_GROUPNUM_RAM_CLRr = 1871, + NPPU_OAM_CFG_INDEX_RAM0_CLRr = 1872, + NPPU_OAM_CFG_INDEX_RAM1_CLRr = 1873, + NPPU_OAM_CFG_RMEP_RAM_CLRr = 1874, + NPPU_OAM_CFG_MA_RAM_CLRr = 1875, + NPPU_OAM_CFG_RAM_INIT_DONEr = 1876, + NPPU_OAM_CFG_REC_BFD_DEBUG_ENr = 1877, + NPPU_OAM_CFG_OAM_SESSION_INTr = 1878, + NPPU_OAM_CFG_PON_INTr = 1879, + NPPU_OAM_CFG_OAM_INT_CLRr = 1880, + NPPU_OAM_CFG_TYPE_INT_CLR0r = 1881, + NPPU_OAM_CFG_TYPE_INT_CLR1r = 1882, + NPPU_OAM_CFG_INTERRUPT_MASKr = 1883, + NPPU_OAM_CFG_INT0_INDEXr = 1884, + NPPU_OAM_CFG_INT1_INDEXr = 1885, + NPPU_OAM_CFG_INT0_INDEX_REGIONr = 1886, + NPPU_OAM_CFG_INT1_INDEX_REGIONr = 1887, + NPPU_OAM_CFG_BDIINFO_FWFT_FIFO_THr = 1888, + NPPU_OAM_CFG_RECSEC_FWFT_FIFO_THr = 1889, + NPPU_OAM_CFG_TIMING_CHK_INFO0_FWFT_FIFO_THr = 1890, + NPPU_OAM_CFG_RECMA_FWFT_FIFO_THr = 1891, + NPPU_OAM_CFG_TIMING_CHK_INFO1_FWFT_FIFO_THr = 1892, + NPPU_OAM_CFG_OAM_TXINST_FIFO_THr = 1893, + NPPU_OAM_CFG_OAM_RDINFO_FWFT_FIFO_THr = 1894, + NPPU_OAM_CFG_LM_CNT_FWFT_FIFO_THr = 1895, + NPPU_OAM_CFG_OAM_PKT_FIFO_THr = 1896, + NPPU_OAM_CFG_RECLM_STAT_FIFO_THr = 1897, + NPPU_OAM_CFG_TXLM_STAT_FIFO_THr = 1898, + NPPU_OAM_CFG_OAM_CHK_FWFT_FIFO_THr = 1899, + NPPU_OAM_CFG_TXOAM_STAT_FIFO_THr = 1900, + NPPU_OAM_CFG_RECOAM_STAT_FIFO_THr = 1901, + NPPU_OAM_CFG_TXPKT_DATA_FWFT_FIFO_THr = 1902, + NPPU_OAM_CFG_TSTPKT_FWFT_FIFO_THr = 1903, + NPPU_OAM_CFG_TST_TXINST_FWFT_FIFO_THr = 1904, + NPPU_OAM_CFG_TSTRX_MAIN_ENr = 1905, + NPPU_OAM_CFG_TSTTX_CFG_PARA_TBL2r = 1906, + NPPU_OAM_CFG_TSTTX_CFG_PARA_TBL1r = 1907, + NPPU_OAM_CFG_TSTTX_CFG_PARA_TBL0r = 1908, + NPPU_OAM_CFG_TSTRX_CFG_PARAr = 1909, + NPPU_OAM_CFG_FIFO_STATUS_INT_EN_0r = 1910, + NPPU_OAM_CFG_FIFO_STATUS_INT_EN_1r = 1911, + NPPU_OAM_CFG_FIFO_STATUS_INT_MASK_0r = 1912, + NPPU_OAM_CFG_FIFO_STATUS_INT_MASK_1r = 1913, + NPPU_OAM_CFG_FIFO_STATUS_INT_STATUSr = 1914, + NPPU_OAM_CFG_MAIN_FREQUENCYr = 1915, + NPPU_OAM_CFG_OAM_CFG_TYPEr = 1916, + NPPU_OAM_CFG_FST_SWCH_ETH_HEAD0r = 1917, + NPPU_OAM_CFG_FST_SWCH_ETH_HEAD1r = 1918, + NPPU_OAM_CFG_FST_SWCH_ETH_HEAD2r = 1919, + NPPU_OAM_CFG_FST_SWCH_ETH_HEAD3r = 1920, + NPPU_OAM_CFG_OAM_FS_TXINST_FIFO_THr = 1921, + NPPU_OAM_CFG_OAM_MA_FS_TXINST_FIFO_THr = 1922, + NPPU_OAM_CFG_PON_INT_RAM_CLRr = 1923, + NPPU_OAM_CFG_PON_P_INT_INDEXr = 1924, + NPPU_OAM_CFG_PON_PROTECT_PKT_FIFO_THr = 1925, + NPPU_OAM_CFG_PON_LASER_OFF_ENr = 1926, + NPPU_OAM_CFG_PON_PRTCT_PKT_TX_ENr = 1927, + NPPU_OAM_CFG_CFG_PON_MASTERr = 1928, + NPPU_OAM_CFG_LEVEL_MODEr = 1929, + NPPU_OAM_CFG_INTERRUPT_ENr = 1930, + NPPU_OAM_CFG_PON_LASER_ON_ENr = 1931, + NPPU_OAM_CFG_TI_PON_SDr = 1932, + NPPU_OAM_CFG_TI_PON_LOSr = 1933, + NPPU_OAM_CFG_IND_DAT4r = 1934, + NPPU_OAM_CFG_IND_DAT5r = 1935, + NPPU_OAM_CFG_IND_DAT6r = 1936, + NPPU_OAM_CFG_IND_DAT7r = 1937, + NPPU_OAM_CFG_IND_DAT8r = 1938, + NPPU_OAM_CFG_IND_DAT9r = 1939, + NPPU_OAM_CFG_IND_DAT10r = 1940, + NPPU_OAM_CFG_IND_DAT11r = 1941, + NPPU_OAM_CFG_IND_DAT12r = 1942, + NPPU_OAM_CFG_IND_DAT13r = 1943, + NPPU_OAM_CFG_IND_DAT14r = 1944, + NPPU_OAM_CFG_IND_DAT15r = 1945, + NPPU_OAM_CFG_OAM_2544_PKT_FIFO_THr = 1946, + NPPU_OAM_CFG_TXINFO_RAM_CLRr = 1947, + NPPU_OAM_CFG_TXINFO_RAM_INIT_DONEr = 1948, + NPPU_OAM_CFG_FIFO_STATUS_INT_STATUS40r = 1949, + NPPU_OAM_CFG_FIFO_STATUS_INT_STATUS41r = 1950, + NPPU_OAM_CFG_OAM_2544_FUN_ENr = 1951, + NPPU_OAM_CFG_OAM_2544_STAT_CLRr = 1952, + NPPU_OAM_CFG_TXDIS_DEFAULTr = 1953, + NPPU_OAM_CFG_TXDIS_DEFAULT_ENr = 1954, + NPPU_OAM_CFG_TPBFD_FIRSTCHK_THr = 1955, + NPPU_OAM_CFG_ETHCCM_FIRSTCHK_THr = 1956, + NPPU_OAM_CFG_TPCCM_FIRSTCHK_THr = 1957, + NPPU_OAM_STAT_TXSTAT_REQ_CNTr = 1958, + NPPU_OAM_STAT_CHKSTAT_REQ_CNTr = 1959, + NPPU_OAM_STAT_STAT_OAM_FC_CNTr = 1960, + NPPU_OAM_STAT_BFDSEQ_REQ_CNTr = 1961, + NPPU_OAM_STAT_LMCNT_REQ_CNTr = 1962, + NPPU_OAM_STAT_STAT_OAM_LM_RSP_CNTr = 1963, + NPPU_OAM_STAT_STAT_OAM_LM_FC_CNTr = 1964, + NPPU_OAM_STAT_SE_REQ_CNTr = 1965, + NPPU_OAM_STAT_SE_RSP_CNTr = 1966, + NPPU_OAM_STAT_SE_OAM_FC_CNTr = 1967, + NPPU_OAM_STAT_OAM_SE_FC_CNTr = 1968, + NPPU_OAM_STAT_OAM_PKTRX_SOP_CNTr = 1969, + NPPU_OAM_STAT_OAM_PKTRX_EOP_CNTr = 1970, + NPPU_OAM_STAT_PKTRX_OAM_FC_CNTr = 1971, + NPPU_OAM_STAT_PKTRX_OAM_TST_FC_CNTr = 1972, + NPPU_OAM_STAT_ODMA_OAM_SOP_CNTr = 1973, + NPPU_OAM_STAT_ODMA_OAM_EOP_CNTr = 1974, + NPPU_OAM_STAT_OAM_ODMA_FC_CNTr = 1975, + NPPU_OAM_STAT_REC_MA_PKT_ILLEGAL_CNTr = 1976, + NPPU_OAM_STAT_REC_RMEP_PKT_ILLEGAL_CNTr = 1977, + NPPU_OAM_STAT_REC_ETH_AIS_PKT_CNTr = 1978, + NPPU_OAM_STAT_REC_TP_AIS_PKT_CNTr = 1979, + NPPU_OAM_STAT_REC_TP_CSF_PKT_CNTr = 1980, + NPPU_OAM_STAT_REC_ETH_LEVEL_DEFECT_CNTr = 1981, + NPPU_OAM_STAT_REC_ETH_MEGID_DEFECT_CNTr = 1982, + NPPU_OAM_STAT_REC_ETH_MEPID_DEFECT_CNTr = 1983, + NPPU_OAM_STAT_REC_ETH_INTERVAL_DEFECT_CNTr = 1984, + NPPU_OAM_STAT_REC_SESS_UNENABLE_CNTr = 1985, + NPPU_OAM_STAT_OAM_2544_RD_PKT_CNTr = 1986, + NPPU_OAM_STAT_DEBUG_CNT_CLRr = 1987, + NPPU_OAM_STAT_OAM_PKTRX_CATCH_DATAr = 1988, + NPPU_OAM_STAT_ODMA_OAM_CATCH_DATAr = 1989, + NPPU_OAM_STAT_TST_SESSION_TX_CNTr = 1990, + NPPU_OAM_STAT_TST_SESSION_RX_CNTr = 1991, + NPPU_OAM_STAT_TSTRX_LOST_CNTr = 1992, + NPPU_OAM_STAT_BFDSEQ_WR_CNTr = 1993, + NPPU_OAM_STAT_BFDTIME_WR_CNTr = 1994, + NPPU_OAM_STAT_LMCNT_WR_CNTr = 1995, + NPPU_OAM_STAT_OAM_FS_PKT_CNTr = 1996, + NPPU_OAM_STAT_OAM_MA_FS_PKT_CNTr = 1997, + NPPU_OAM_STAT_REC_TP_LEVEL_DEFECT_CNTr = 1998, + NPPU_OAM_STAT_REC_TP_MEGID_DEFECT_CNTr = 1999, + NPPU_OAM_STAT_REC_TP_MEPID_DEFECT_CNTr = 2000, + NPPU_OAM_STAT_REC_TP_INTERVAL_DEFECT_CNTr = 2001, + NPPU_OAM_STAT_RD_REG_CLEAR_MODEr = 2002, + NPPU_OAM_STAT_RD_DATA_REG_CLEAR_MODEr = 2003, + NPPU_OAM_CFG_INDIR_OAM_INT_STATUS_RAM_0r = 2004, + NPPU_OAM_CFG_INDIR_OAM_INT_STATUS_RAM1r = 2005, + NPPU_OAM_CFG_INDIR_TST_PKT_TX_PARA_RAMr = 2006, + NPPU_OAM_CFG_INDIR_GROUPNUMRAMr = 2007, + NPPU_OAM_CFG_INDIR_OAM_TX_TBL_RAMr = 2008, + NPPU_OAM_CFG_INDIR_OAM_CHK_TBL_RAMr = 2009, + NPPU_OAM_CFG_INDIR_OAM_MA_CHK_TBL_RAMr = 2010, + NPPU_OAM_CFG_INDIR_OAM_2544_TX_RAMr = 2011, + PPU_PPU_INTERRUPT_EN_Rr = 2012, + PPU_PPU_MEC_HOST_INTERRUPTr = 2013, + PPU_PPU_DBG_RTL_DATEr = 2014, + PPU_PPU_DUP_START_NUM_CFGr = 2015, + PPU_PPU_DEBUG_DATA_WRITE_COMPLETEr = 2016, + PPU_PPU_UC_MC_WRR_CFGr = 2017, + PPU_PPU_DEBUG_PKT_SEND_ENr = 2018, + PPU_PPU_DUP_TBL_IND_ACCESS_DONEr = 2019, + PPU_PPU_ISU_PPU_DEMUX_FIFO_INTERRUPT_MASKr = 2020, + PPU_PPU_PPU_MULTICAST_FIFO_INTERRUPT_MASKr = 2021, + PPU_PPU_PPU_IN_SCHEDULE_FIFO_INTERRUPT_MASKr = 2022, + PPU_PPU_PPU_MF_OUT_FIFO_INTERRUPT_MASKr = 2023, + PPU_PPU_PBU_MCODE_PF_REQ_SCHEDULE_FIFO_INTERRUPT_MASKr = 2024, + PPU_PPU_PBU_MCODE_PF_RSP_SCHEDULE_FIFO_INTERRUPT_MASKr = 2025, + PPU_PPU_PPU_MCCNT_FIFO_INTERRUPT_MASKr = 2026, + PPU_PPU_COPROCESSOR_FIFO_INTERRUPT_MASK_Lr = 2027, + PPU_PPU_COPROCESSOR_FIFO_INTERRUPT_MASK_Mr = 2028, + PPU_PPU_COPROCESSOR_FIFO_INTERRUPT_MASK_Hr = 2029, + PPU_PPU_PPU_RAM_CHECK_ERR_MASKr = 2030, + PPU_PPU_INSTRMEM_FIFO_INTERRUPT_MASKr = 2031, + PPU_PPU_ISU_PPU_DEMUX_FIFO_INTERRUPT_STAr = 2032, + PPU_PPU_PPU_MULTICAST_FIFO_INTERRUPT_STAr = 2033, + PPU_PPU_PPU_IN_SCHEDULE_FIFO_INTERRUPT_STAr = 2034, + PPU_PPU_PPU_MF_OUT_FIFO_INTERRUPT_STAr = 2035, + PPU_PPU_PBU_MCODE_PF_REQ_SCHEDULE_FIFO_INTERRUPT_STAr = 2036, + PPU_PPU_PBU_MCODE_PF_RSP_SCHEDULE_FIFO_INTERRUPT_STAr = 2037, + PPU_PPU_PPU_MCCNT_FIFO_INTERRUPT_STAr = 2038, + PPU_PPU_COPROCESSOR_FIFO_INTERRUPT_STA_Lr = 2039, + PPU_PPU_COPROCESSOR_FIFO_INTERRUPT_STA_Mr = 2040, + PPU_PPU_COPROCESSOR_FIFO_INTERRUPT_STA_Hr = 2041, + PPU_PPU_INSTRMEM_FIFO_INTERRUPT_STAr = 2042, + PPU_PPU_PPU_RAM_CHECK_ECC_ERR_FLAG_1r = 2043, + PPU_PPU_ISU_PPU_DEMUX_FIFO_INTERRUPT_FLAGr = 2044, + PPU_PPU_PPU_MULTICAST_FIFO_INTERRUPT_FLAGr = 2045, + PPU_PPU_PPU_IN_SCHEDULE_FIFO_INTERRUPT_FLAGr = 2046, + PPU_PPU_PPU_MF_OUT_FIFO_INTERRUPT_FLAGr = 2047, + PPU_PPU_PBU_MCODE_PF_REQ_SCHEDULE_FIFO_INTERRUPT_FLAGr = 2048, + PPU_PPU_PBU_MCODE_PF_RSP_SCHEDULE_FIFO_INTERRUPT_FLAGr = 2049, + PPU_PPU_PPU_MCCNT_FIFO_INTERRUPT_FLAGr = 2050, + PPU_PPU_COPROCESSOR_FIFO_INTERRUPT_FLAG_Lr = 2051, + PPU_PPU_COPROCESSOR_FIFO_INTERRUPT_FLAG_Mr = 2052, + PPU_PPU_COPROCESSOR_FIFO_INTERRUPT_FLAG_Hr = 2053, + PPU_PPU_INSTRMEM_FIFO_INTERRUPT_FLAGr = 2054, + PPU_PPU_INSTRMEM_RAM_INT_OUTr = 2055, + PPU_PPU_INSTRMEM_RAM_INT_MASKr = 2056, + PPU_PPU_INSTRMEM_RAM_INT_STATr = 2057, + PPU_PPU_INSTRMEM_RAM_INT_FLAGr = 2058, + PPU_PPU_PPU_COUNT_CFGr = 2059, + PPU_PPU_PPU_STATICS_CFGr = 2060, + PPU_PPU_PPU_STATICS_WB_CFGr = 2061, + PPU_PPU_WR_TABLE_SELF_RSP_EN_CFGr = 2062, + PPU_PPU_PPU_RANDOM_ARBITER_8TO1_CFGr = 2063, + PPU_PPU_PPU_REORDER_BYPASS_FLOW_NUM_CFGr = 2064, + PPU_PPU_COS_METER_CFG_Hr = 2065, + PPU_PPU_COS_METER_CFG_Lr = 2066, + PPU_PPU_INSTRMEM_RDYr = 2067, + PPU_PPU_INSTRMEM_ADDRr = 2068, + PPU_PPU_INSTRMEM_IND_ACCESS_DONEr = 2069, + PPU_PPU_INSTRMEM_INSTR0_DATA_Lr = 2070, + PPU_PPU_INSTRMEM_INSTR0_DATA_Hr = 2071, + PPU_PPU_INSTRMEM_INSTR1_DATA_Lr = 2072, + PPU_PPU_INSTRMEM_INSTR1_DATA_Hr = 2073, + PPU_PPU_INSTRMEM_INSTR2_DATA_Lr = 2074, + PPU_PPU_INSTRMEM_INSTR2_DATA_Hr = 2075, + PPU_PPU_INSTRMEM_INSTR3_DATA_Lr = 2076, + PPU_PPU_INSTRMEM_INSTR3_DATA_Hr = 2077, + PPU_PPU_INSTRMEM_READ_INSTR0_DATA_Lr = 2078, + PPU_PPU_INSTRMEM_READ_INSTR0_DATA_Hr = 2079, + PPU_PPU_INSTRMEM_READ_INSTR1_DATA_Lr = 2080, + PPU_PPU_INSTRMEM_READ_INSTR1_DATA_Hr = 2081, + PPU_PPU_INSTRMEM_READ_INSTR2_DATA_Lr = 2082, + PPU_PPU_INSTRMEM_READ_INSTR2_DATA_Hr = 2083, + PPU_PPU_INSTRMEM_READ_INSTR3_DATA_Lr = 2084, + PPU_PPU_INSTRMEM_READ_INSTR3_DATA_Hr = 2085, + PPU_PPU_SE_PPU_MC_SRH_FC_CNT_Hr = 2086, + PPU_PPU_SE_PPU_MC_SRH_FC_CNT_Lr = 2087, + PPU_PPU_PPU_SE_MC_SRH_FC_CNT_Hr = 2088, + PPU_PPU_PPU_SE_MC_SRH_FC_CNT_Lr = 2089, + PPU_PPU_PPU_SE_MC_SRH_VLD_CNT_Hr = 2090, + PPU_PPU_PPU_SE_MC_SRH_VLD_CNT_Lr = 2091, + PPU_PPU_SE_PPU_MC_SRH_VLD_CNT_Hr = 2092, + PPU_PPU_SE_PPU_MC_SRH_VLD_CNT_Lr = 2093, + PPU_PPU_PBU_PPU_LOGIC_PF_FC_CNT_Hr = 2094, + PPU_PPU_PBU_PPU_LOGIC_PF_FC_CNT_Lr = 2095, + PPU_PPU_PPU_PBU_LOGIC_RSP_FC_CNT_Hr = 2096, + PPU_PPU_PPU_PBU_LOGIC_RSP_FC_CNT_Lr = 2097, + PPU_PPU_PPU_PBU_LOGIC_PF_REQ_VLD_CNT_Hr = 2098, + PPU_PPU_PPU_PBU_LOGIC_PF_REQ_VLD_CNT_Lr = 2099, + PPU_PPU_PBU_PPU_LOGIC_PF_RSP_VLD_CNT_Hr = 2100, + PPU_PPU_PBU_PPU_LOGIC_PF_RSP_VLD_CNT_Lr = 2101, + PPU_PPU_PBU_PPU_IFB_RD_FC_CNT_Hr = 2102, + PPU_PPU_PBU_PPU_IFB_RD_FC_CNT_Lr = 2103, + PPU_PPU_PBU_PPU_WB_FC_CNT_Hr = 2104, + PPU_PPU_PBU_PPU_WB_FC_CNT_Lr = 2105, + PPU_PPU_PPU_PBU_MCODE_PF_REQ_VLD_CNT_Hr = 2106, + PPU_PPU_PPU_PBU_MCODE_PF_REQ_VLD_CNT_Lr = 2107, + PPU_PPU_PBU_PPU_MCODE_PF_RSP_VLD_CNT_Hr = 2108, + PPU_PPU_PBU_PPU_MCODE_PF_RSP_VLD_CNT_Lr = 2109, + PPU_PPU_ODMA_PPU_PARA_FC_CNT_Hr = 2110, + PPU_PPU_ODMA_PPU_PARA_FC_CNT_Lr = 2111, + PPU_PPU_ODMA_PPU_MCCNT_WR_FC_CNT_Hr = 2112, + PPU_PPU_ODMA_PPU_MCCNT_WR_FC_CNT_Lr = 2113, + PPU_PPU_PPU_ODMA_MCCNT_WR_VLD_CNT_Hr = 2114, + PPU_PPU_PPU_ODMA_MCCNT_WR_VLD_CNT_Lr = 2115, + PPU_PPU_ODMA_PPU_MCCNT_RSP_VLD_CNT_Hr = 2116, + PPU_PPU_ODMA_PPU_MCCNT_RSP_VLD_CNT_Lr = 2117, + PPU_PPU_PPU_PKTRX_UC_FC_CNT_Hr = 2118, + PPU_PPU_PPU_PKTRX_UC_FC_CNT_Lr = 2119, + PPU_PPU_PPU_PKTRX_MC_FC_CNT_Hr = 2120, + PPU_PPU_PPU_PKTRX_MC_FC_CNT_Lr = 2121, + PPU_PPU_PKTRX_PPU_DESC_VLD_CNT_Hr = 2122, + PPU_PPU_PKTRX_PPU_DESC_VLD_CNT_Lr = 2123, + PPU_PPU_PPU_PBU_IFB_REQ_VLD_CNT_Hr = 2124, + PPU_PPU_PPU_PBU_IFB_REQ_VLD_CNT_Lr = 2125, + PPU_PPU_PBU_PPU_IFB_RSP_VLD_CNT_Hr = 2126, + PPU_PPU_PBU_PPU_IFB_RSP_VLD_CNT_Lr = 2127, + PPU_PPU_PPU_PBU_WB_VLD_CNT_Hr = 2128, + PPU_PPU_PPU_PBU_WB_VLD_CNT_Lr = 2129, + PPU_PPU_PBU_PPU_REORDER_PARA_VLD_CNT_Hr = 2130, + PPU_PPU_PBU_PPU_REORDER_PARA_VLD_CNT_Lr = 2131, + PPU_PPU_PPU_ODMA_PARA_VLD_CNT_Hr = 2132, + PPU_PPU_PPU_ODMA_PARA_VLD_CNT_Lr = 2133, + PPU_PPU_STATICS_ISU_PPU_MC_VLD_CNT_Hr = 2134, + PPU_PPU_STATICS_ISU_PPU_MC_VLD_CNT_Lr = 2135, + PPU_PPU_STATICS_ISU_PPU_MC_LOOP_VLD_CNT_Hr = 2136, + PPU_PPU_STATICS_ISU_PPU_MC_LOOP_VLD_CNT_Lr = 2137, + PPU_PPU_STATICS_ISU_PPU_UC_VLD_CNT_Hr = 2138, + PPU_PPU_STATICS_ISU_PPU_UC_VLD_CNT_Lr = 2139, + PPU_PPU_STATICS_ISU_PPU_UC_BUFNUMIS0_VLD_CNT_Hr = 2140, + PPU_PPU_STATICS_ISU_PPU_UC_BUFNUMIS0_VLD_CNT_Lr = 2141, + PPU_PPU_STATICS_DEMUX_SCHEDULE_MC_VLD_CNT_Hr = 2142, + PPU_PPU_STATICS_DEMUX_SCHEDULE_MC_VLD_CNT_Lr = 2143, + PPU_PPU_STATICS_DEMUX_SCHEDULE_MC_BUFNUMIS0_VLD_CNT_Hr = 2144, + PPU_PPU_STATICS_DEMUX_SCHEDULE_MC_BUFNUMIS0_VLD_CNT_Lr = 2145, + PPU_PPU_STATICS_DEMUX_SCHEDULE_MC_SRCPORTIS0_VLD_CNT_Hr = 2146, + PPU_PPU_STATICS_DEMUX_SCHEDULE_MC_SRCPORTIS0_VLD_CNT_Lr = 2147, + PPU_PPU_STATICS_DEMUX_SCHEDULE_MC_SRCPORTIS1_VLD_CNT_Hr = 2148, + PPU_PPU_STATICS_DEMUX_SCHEDULE_MC_SRCPORTIS1_VLD_CNT_Lr = 2149, + PPU_PPU_STATICS_DEMUX_SCHEDULE_UC_VLD_CNT_Hr = 2150, + PPU_PPU_STATICS_DEMUX_SCHEDULE_UC_VLD_CNT_Lr = 2151, + PPU_PPU_STATICS_DEMUX_SCHEDULE_UC_BUFNUMIS0_VLD_CNT_Hr = 2152, + PPU_PPU_STATICS_DEMUX_SCHEDULE_UC_BUFNUMIS0_VLD_CNT_Lr = 2153, + PPU_PPU_STATICS_DEMUX_SCHEDULE_UC_SRCPORTIS0_VLD_CNT_Hr = 2154, + PPU_PPU_STATICS_DEMUX_SCHEDULE_UC_SRCPORTIS0_VLD_CNT_Lr = 2155, + PPU_PPU_STATICS_DEMUX_SCHEDULE_UC_SRCPORTIS1_VLD_CNT_Hr = 2156, + PPU_PPU_STATICS_DEMUX_SCHEDULE_UC_SRCPORTIS1_VLD_CNT_Lr = 2157, + PPU_PPU_STATICS_PPU_WB_VLD_CNT_Hr = 2158, + PPU_PPU_STATICS_PPU_WB_VLD_CNT_Lr = 2159, + PPU_PPU_STATICS_PPU_WB_BUFNUMIS0_VLD_CNT_Hr = 2160, + PPU_PPU_STATICS_PPU_WB_BUFNUMIS0_VLD_CNT_Lr = 2161, + PPU_PPU_STATICS_PPU_WB_SRCPORTIS0_VLD_CNT_Hr = 2162, + PPU_PPU_STATICS_PPU_WB_SRCPORTIS0_VLD_CNT_Lr = 2163, + PPU_PPU_STATICS_PPU_WB_SRCPORTIS1_VLD_CNT_Hr = 2164, + PPU_PPU_STATICS_PPU_WB_SRCPORTIS1_VLD_CNT_Lr = 2165, + PPU_PPU_STATICS_PPU_WB_HALT_SEND_TYPE_VLD_CNT_Hr = 2166, + PPU_PPU_STATICS_PPU_WB_HALT_SEND_TYPE_VLD_CNT_Lr = 2167, + PPU_PPU_STATICS_PPU_WB_MF_TYPE_VLD_CNT_Hr = 2168, + PPU_PPU_STATICS_PPU_WB_MF_TYPE_VLD_CNT_Lr = 2169, + PPU_PPU_STATICS_PPU_WB_HALT_CONTINUE_END_VLD_CNT_Hr = 2170, + PPU_PPU_STATICS_PPU_WB_HALT_CONTINUE_END_VLD_CNT_Lr = 2171, + PPU_PPU_STATICS_PPU_WB_DUP_FLAG_VLD_CNT_Hr = 2172, + PPU_PPU_STATICS_PPU_WB_DUP_FLAG_VLD_CNT_Lr = 2173, + PPU_PPU_STATICS_PPU_WB_LAST_FLAG_VLD_CNT_Hr = 2174, + PPU_PPU_STATICS_PPU_WB_LAST_FLAG_VLD_CNT_Lr = 2175, + PPU_PPU_STATICS_PPU_WB_DIS_FLAG_VLD_CNT_Hr = 2176, + PPU_PPU_STATICS_PPU_WB_DIS_FLAG_VLD_CNT_Lr = 2177, + PPU_PPU_STATICS_PBU_PPU_REORDER_HALT_SEND_TYPE_VLD_CNT_Hr = 2178, + PPU_PPU_STATICS_PBU_PPU_REORDER_HALT_SEND_TYPE_VLD_CNT_Lr = 2179, + PPU_PPU_STATICS_PBU_PPU_REORDER_MF_TYPE_VLD_CNT_Hr = 2180, + PPU_PPU_STATICS_PBU_PPU_REORDER_MF_TYPE_VLD_CNT_Lr = 2181, + PPU_PPU_STATICS_PBU_PPU_REORDER_HALT_CONTINUE_END_VLD_CNT_Hr = 2182, + PPU_PPU_STATICS_PBU_PPU_REORDER_HALT_CONTINUE_END_VLD_CNT_Lr = 2183, + PPU_PPU_CAR_GREEN_PKT_VLD_CNT_Hr = 2184, + PPU_PPU_CAR_GREEN_PKT_VLD_CNT_Lr = 2185, + PPU_PPU_CAR_YELLOW_PKT_VLD_CNT_Hr = 2186, + PPU_PPU_CAR_YELLOW_PKT_VLD_CNT_Lr = 2187, + PPU_PPU_CAR_RED_PKT_VLD_CNT_Hr = 2188, + PPU_PPU_CAR_RED_PKT_VLD_CNT_Lr = 2189, + PPU_PPU_CAR_DROP_PKT_VLD_CNT_Hr = 2190, + PPU_PPU_CAR_DROP_PKT_VLD_CNT_Lr = 2191, + PPU_PPU_PPU_PKTRX_MC_PTR_VLD_CNT_Hr = 2192, + PPU_PPU_PPU_PKTRX_MC_PTR_VLD_CNT_Lr = 2193, + PPU_PPU_ISU_PPU_LOOPBACK_FC_CNT_Hr = 2194, + PPU_PPU_ISU_PPU_LOOPBACK_FC_CNT_Lr = 2195, + PPU_PPU_PPU_CULSTER_PBU_MCODE_PF_REQ_PROG_FULL_ASSERT_CFGr = 2196, + PPU_PPU_PPU_CULSTER_PBU_MCODE_PF_REQ_PROG_FULL_NEGATE_CFGr = 2197, + PPU_PPU_PPU_CULSTER_PBU_MCODE_PF_REQ_PROG_EMPTY_ASSERT_CFGr = 2198, + PPU_PPU_PPU_CULSTER_PBU_MCODE_PF_REQ_PROG_EMPTY_NEGATE_CFGr = 2199, + PPU_PPU_PPU_PBU_MCODE_PF_RSP_PROG_FULL_ASSERT_CFGr = 2200, + PPU_PPU_PPU_PBU_MCODE_PF_RSP_PROG_FULL_NEGATE_CFGr = 2201, + PPU_PPU_PPU_PBU_MCODE_PF_RSP_PROG_EMPTY_ASSERT_CFGr = 2202, + PPU_PPU_PPU_PBU_MCODE_PF_RSP_PROG_EMPTY_NEGATE_CFGr = 2203, + PPU_PPU_MCCNT_FIFO_PROG_FULL_ASSERT_CFGr = 2204, + PPU_PPU_MCCNT_FIFO_PROG_FULL_NEGATE_CFGr = 2205, + PPU_PPU_MCCNT_FIFO_PROG_EMPTY_ASSERT_CFGr = 2206, + PPU_PPU_MCCNT_FIFO_PROG_EMPTY_NEGATE_CFGr = 2207, + PPU_PPU_UC_MF_FIFO_PROG_FULL_ASSERT_CFGr = 2208, + PPU_PPU_UC_MF_FIFO_PROG_FULL_NEGATE_CFGr = 2209, + PPU_PPU_UC_MF_FIFO_PROG_EMPTY_ASSERT_CFGr = 2210, + PPU_PPU_UC_MF_FIFO_PROG_EMPTY_NEGATE_CFGr = 2211, + PPU_PPU_MC_MF_FIFO_PROG_FULL_ASSERT_CFGr = 2212, + PPU_PPU_MC_MF_FIFO_PROG_FULL_NEGATE_CFGr = 2213, + PPU_PPU_MC_MF_FIFO_PROG_EMPTY_ASSERT_CFGr = 2214, + PPU_PPU_MC_MF_FIFO_PROG_EMPTY_NEGATE_CFGr = 2215, + PPU_PPU_ISU_MF_FIFO_PROG_FULL_ASSERT_CFGr = 2216, + PPU_PPU_ISU_MF_FIFO_PROG_FULL_NEGATE_CFGr = 2217, + PPU_PPU_ISU_MF_FIFO_PROG_EMPTY_ASSERT_CFGr = 2218, + PPU_PPU_ISU_MF_FIFO_PROG_EMPTY_NEGATE_CFGr = 2219, + PPU_PPU_ISU_FWFT_MF_FIFO_PROG_EMPTY_ASSERT_CFGr = 2220, + PPU_PPU_ISU_FWFT_MF_FIFO_PROG_EMPTY_NEGATE_CFGr = 2221, + PPU_PPU_ISU_MC_PARA_MF_FIFO_PROG_FULL_ASSERT_CFGr = 2222, + PPU_PPU_ISU_MC_PARA_MF_FIFO_PROG_FULL_NEGATE_CFGr = 2223, + PPU_PPU_ISU_MC_PARA_MF_FIFO_PROG_EMPTY_ASSERT_CFGr = 2224, + PPU_PPU_ISU_MC_PARA_MF_FIFO_PROG_EMPTY_NEGATE_CFGr = 2225, + PPU_PPU_GROUP_ID_FIFO_PROG_FULL_ASSERT_CFGr = 2226, + PPU_PPU_GROUP_ID_FIFO_PROG_FULL_NEGATE_CFGr = 2227, + PPU_PPU_GROUP_ID_FIFO_PROG_EMPTY_ASSERT_CFGr = 2228, + PPU_PPU_GROUP_ID_FIFO_PROG_EMPTY_NEGATE_CFGr = 2229, + PPU_PPU_SA_PARA_FIFO_PROG_FULL_ASSERT_CFGr = 2230, + PPU_PPU_SA_PARA_FIFO_PROG_FULL_NEGATE_CFGr = 2231, + PPU_PPU_SA_PARA_FIFO_PROG_EMPTY_ASSERT_CFGr = 2232, + PPU_PPU_SA_PARA_FIFO_PROG_EMPTY_NEGATE_CFGr = 2233, + PPU_PPU_SE_MC_RSP_FIFO_PROG_FULL_ASSERT_CFGr = 2234, + PPU_PPU_SE_MC_RSP_FIFO_PROG_FULL_NEGATE_CFGr = 2235, + PPU_PPU_SE_MC_RSP_FIFO_PROG_EMPTY_ASSERT_CFGr = 2236, + PPU_PPU_SE_MC_RSP_FIFO_PROG_EMPTY_NEGATE_CFGr = 2237, + PPU_PPU_DUP_PARA_FIFO_PROG_FULL_ASSERT_CFGr = 2238, + PPU_PPU_DUP_PARA_FIFO_PROG_FULL_NEGATE_CFGr = 2239, + PPU_PPU_DUP_PARA_FIFO_PROG_EMPTY_ASSERT_CFGr = 2240, + PPU_PPU_DUP_PARA_FIFO_PROG_EMPTY_NEGATE_CFGr = 2241, + PPU_PPU_PF_RSP_FIFO_PROG_FULL_ASSERT_CFGr = 2242, + PPU_PPU_PF_RSP_FIFO_PROG_FULL_NEGATE_CFGr = 2243, + PPU_PPU_PF_RSP_FIFO_PROG_EMPTY_ASSERT_CFGr = 2244, + PPU_PPU_PF_RSP_FIFO_PROG_EMPTY_NEGATE_CFGr = 2245, + PPU_PPU_DUP_FREEPTR_FIFO_PROG_FULL_ASSERT_CFGr = 2246, + PPU_PPU_DUP_FREEPTR_FIFO_PROG_FULL_NEGATE_CFGr = 2247, + PPU_PPU_DUP_FREEPTR_FIFO_PROG_EMPTY_ASSERT_CFGr = 2248, + PPU_PPU_DUP_FREEPTR_FIFO_PROG_EMPTY_NEGATE_CFGr = 2249, + PPU_PPU_PF_REQ_FIFO_PROG_FULL_ASSERT_CFGr = 2250, + PPU_PPU_PF_REQ_FIFO_PROG_FULL_NEGATE_CFGr = 2251, + PPU_PPU_PF_REQ_FIFO_PROG_EMPTY_ASSERT_CFGr = 2252, + PPU_PPU_PF_REQ_FIFO_PROG_EMPTY_NEGATE_CFGr = 2253, + PPU_PPU_CAR_FLAG_FIFO_PROG_FULL_ASSERT_CFGr = 2254, + PPU_PPU_CAR_FLAG_FIFO_PROG_FULL_NEGATE_CFGr = 2255, + PPU_PPU_CAR_FLAG_FIFO_PROG_EMPTY_ASSERT_CFGr = 2256, + PPU_PPU_CAR_FLAG_FIFO_PROG_EMPTY_NEGATE_CFGr = 2257, + PPU_PPU_PPU_CLUSTER_MF_OUT_AFIFO_PROG_FULL_ASSERT_CFGr = 2258, + PPU_PPU_PPU_CLUSTER_MF_OUT_AFIFO_PROG_FULL_NEGATE_CFGr = 2259, + PPU_PPU_PPU_CLUSTER_MF_OUT_AFIFO_PROG_EMPTY_ASSERT_CFGr = 2260, + PPU_PPU_PPU_CLUSTER_MF_OUT_AFIFO_PROG_EMPTY_NEGATE_CFGr = 2261, + PPU_PPU_PPU_COP_KEY_FIFO_PROG_FULL_ASSERT_CFGr = 2262, + PPU_PPU_PPU_COP_KEY_FIFO_PROG_FULL_NEGATE_CFGr = 2263, + PPU_PPU_PPU_COP_KEY_FIFO_PROG_EMPTY_ASSERT_CFGr = 2264, + PPU_PPU_PPU_COP_KEY_FIFO_PROG_EMPTY_NEGATE_CFGr = 2265, + PPU_PPU_PPU_COP_RANDOM_MOD_PARA_FIFO_PROG_FULL_ASSERT_CFGr = 2266, + PPU_PPU_PPU_COP_RANDOM_MOD_PARA_FIFO_PROG_FULL_NEGATE_CFGr = 2267, + PPU_PPU_PPU_COP_RANDOM_MOD_PARA_FIFO_PROG_EMPTY_ASSERT_CFGr = 2268, + PPU_PPU_PPU_COP_RANDOM_MOD_PARA_FIFO_PROG_EMPTY_NEGATE_CFGr = 2269, + PPU_PPU_PPU_COP_RANDOM_MOD_RESULT_FIFO_PROG_FULL_ASSERT_CFGr = 2270, + PPU_PPU_PPU_COP_RANDOM_MOD_RESULT_FIFO_PROG_FULL_NEGATE_CFGr = 2271, + PPU_PPU_PPU_COP_RANDOM_MOD_RESULT_FIFO_PROG_EMPTY_ASSERT_CFGr = 2272, + PPU_PPU_PPU_COP_RANDOM_MOD_RESULT_FIFO_PROG_EMPTY_NEGATE_CFGr = 2273, + PPU_PPU_PPU_COP_CHECKSUM_RESULT_FIFO_PROG_FULL_ASSERT_CFGr = 2274, + PPU_PPU_PPU_COP_CHECKSUM_RESULT_FIFO_PROG_FULL_NEGATE_CFGr = 2275, + PPU_PPU_PPU_COP_CHECKSUM_RESULT_FIFO_PROG_EMPTY_ASSERT_CFGr = 2276, + PPU_PPU_PPU_COP_CHECKSUM_RESULT_FIFO_PROG_EMPTY_NEGATE_CFGr = 2277, + PPU_PPU_PPU_COP_CRC_FIRST_PARA_FIFO_PROG_FULL_ASSERT_CFGr = 2278, + PPU_PPU_PPU_COP_CRC_FIRST_PARA_FIFO_PROG_FULL_NEGATE_CFGr = 2279, + PPU_PPU_PPU_COP_CRC_FIRST_PARA_FIFO_PROG_EMPTY_ASSERT_CFGr = 2280, + PPU_PPU_PPU_COP_CRC_FIRST_PARA_FIFO_PROG_EMPTY_NEGATE_CFGr = 2281, + PPU_PPU_PPU_COP_CRC_BYPASS_DELAY_PROG_FULL_ASSERT_CFGr = 2282, + PPU_PPU_PPU_COP_CRC_BYPASS_DELAY_PROG_FULL_NEGATE_CFGr = 2283, + PPU_PPU_PPU_COP_CRC_BYPASS_DELAY_PROG_EMPTY_ASSERT_CFGr = 2284, + PPU_PPU_PPU_COP_CRC_BYPASS_DELAY_PROG_EMPTY_NEGATE_CFGr = 2285, + PPU_PPU_PPU_COP_CRC_SECOND_PARA_FIFO_PROG_FULL_ASSERT_CFGr = 2286, + PPU_PPU_PPU_COP_CRC_SECOND_PARA_FIFO_PROG_FULL_NEGATE_CFGr = 2287, + PPU_PPU_PPU_COP_CRC_SECOND_PARA_FIFO_PROG_EMPTY_ASSERT_CFGr = 2288, + PPU_PPU_PPU_COP_CRC_SECOND_PARA_FIFO_PROG_EMPTY_NEGATE_CFGr = 2289, + PPU_PPU_PPU_COP_CRC_RESULT_FWFT_FIFO_PROG_FULL_ASSERT_CFGr = 2290, + PPU_PPU_PPU_COP_CRC_RESULT_FWFT_FIFO_PROG_FULL_NEGATE_CFGr = 2291, + PPU_PPU_PPU_COP_CRC_RESULT_FWFT_FIFO_PROG_EMPTY_ASSERT_CFGr = 2292, + PPU_PPU_PPU_COP_CRC_RESULT_FWFT_FIFO_PROG_EMPTY_NEGATE_CFGr = 2293, + PPU_PPU_PPU_COP_MULTIPLY_PARA_FIFO_PROG_FULL_ASSERT_CFGr = 2294, + PPU_PPU_PPU_COP_MULTIPLY_PARA_FIFO_PROG_FULL_NEGATE_CFGr = 2295, + PPU_PPU_PPU_COP_MULTIPLY_PARA_FIFO_PROG_EMPTY_ASSERT_CFGr = 2296, + PPU_PPU_PPU_COP_MULTIPLY_PARA_FIFO_PROG_EMPTY_NEGATE_CFGr = 2297, + PPU_PPU_PPU_COP_MULTIPLY_PARA_RESULT_FIFO_PROG_FULL_ASSERT_CFGr = 2298, + PPU_PPU_PPU_COP_MULTIPLY_PARA_RESULT_FIFO_PROG_FULL_NEGATE_CFGr = 2299, + PPU_PPU_PPU_COP_MULTIPLY_PARA_RESULT_FIFO_PROG_EMPTY_ASSERT_CFGr = 2300, + PPU_PPU_PPU_COP_MULTIPLY_PARA_RESULT_FIFO_PROG_EMPTY_NEGATE_CFGr = 2301, + PPU_PPU_FREE_GLOBAL_NUM_FWFT_FIFO_PROG_FULL_ASSERT_CFGr = 2302, + PPU_PPU_FREE_GLOBAL_NUM_FWFT_FIFO_PROG_FULL_NEGATE_CFGr = 2303, + PPU_PPU_FREE_GLOBAL_NUM_FWFT_FIFO_PROG_EMPTY_ASSERT_CFGr = 2304, + PPU_PPU_FREE_GLOBAL_NUM_FWFT_FIFO_PROG_EMPTY_NEGATE_CFGr = 2305, + PPU_PPU_PPU_PKTRX_MC_PTR_FIFO_PROG_FULL_ASSERT_CFGr = 2306, + PPU_PPU_PPU_PKTRX_MC_PTR_FIFO_PROG_FULL_NEGATE_CFGr = 2307, + PPU_PPU_PPU_PKTRX_MC_PTR_FIFO_PROG_EMPTY_ASSERT_CFGr = 2308, + PPU_PPU_PPU_PKTRX_MC_PTR_FIFO_PROG_EMPTY_NEGATE_CFGr = 2309, + PPU_PPU_PKT_DATA0r = 2310, + PPU_PPU_PKT_DATA1r = 2311, + PPU_PPU_PKT_DATA2r = 2312, + PPU_PPU_PKT_DATA3r = 2313, + PPU_PPU_PKT_DATA4r = 2314, + PPU_PPU_PKT_DATA5r = 2315, + PPU_PPU_PKT_DATA6r = 2316, + PPU_PPU_PKT_DATA7r = 2317, + PPU_PPU_PKT_DATA8r = 2318, + PPU_PPU_PKT_DATA9r = 2319, + PPU_PPU_PKT_DATA10r = 2320, + PPU_PPU_PKT_DATA11r = 2321, + PPU_PPU_PKT_DATA12r = 2322, + PPU_PPU_PKT_DATA13r = 2323, + PPU_PPU_PKT_DATA14r = 2324, + PPU_PPU_PKT_DATA15r = 2325, + PPU_PPU_PKT_DATA16r = 2326, + PPU_PPU_PKT_DATA17r = 2327, + PPU_PPU_PKT_DATA18r = 2328, + PPU_PPU_PKT_DATA19r = 2329, + PPU_PPU_PKT_DATA20r = 2330, + PPU_PPU_PKT_DATA21r = 2331, + PPU_PPU_PKT_DATA22r = 2332, + PPU_PPU_PKT_DATA23r = 2333, + PPU_PPU_PKT_DATA24r = 2334, + PPU_PPU_PKT_DATA25r = 2335, + PPU_PPU_PKT_DATA26r = 2336, + PPU_PPU_PKT_DATA27r = 2337, + PPU_PPU_PKT_DATA28r = 2338, + PPU_PPU_PKT_DATA29r = 2339, + PPU_PPU_PKT_DATA30r = 2340, + PPU_PPU_PKT_DATA31r = 2341, + PPU_PPU_PKT_DATA32r = 2342, + PPU_PPU_PKT_DATA33r = 2343, + PPU_PPU_PKT_DATA34r = 2344, + PPU_PPU_PKT_DATA35r = 2345, + PPU_PPU_PKT_DATA36r = 2346, + PPU_PPU_PKT_DATA37r = 2347, + PPU_PPU_PKT_DATA38r = 2348, + PPU_PPU_PKT_DATA39r = 2349, + PPU_PPU_PKT_DATA40r = 2350, + PPU_PPU_PKT_DATA41r = 2351, + PPU_PPU_PKT_DATA42r = 2352, + PPU_PPU_PKT_DATA43r = 2353, + PPU_PPU_PKT_DATA44r = 2354, + PPU_PPU_PKT_DATA45r = 2355, + PPU_PPU_PKT_DATA46r = 2356, + PPU_PPU_PKT_DATA47r = 2357, + PPU_PPU_PKT_DATA48r = 2358, + PPU_PPU_PKT_DATA49r = 2359, + PPU_PPU_PKT_DATA50r = 2360, + PPU_PPU_PKT_DATA51r = 2361, + PPU_PPU_PKT_DATA52r = 2362, + PPU_PPU_PKT_DATA53r = 2363, + PPU_PPU_PKT_DATA54r = 2364, + PPU_PPU_PKT_DATA55r = 2365, + PPU_PPU_PKT_DATA56r = 2366, + PPU_PPU_PKT_DATA57r = 2367, + PPU_PPU_PKT_DATA58r = 2368, + PPU_PPU_PKT_DATA59r = 2369, + PPU_PPU_PKT_DATA60r = 2370, + PPU_PPU_PKT_DATA61r = 2371, + PPU_PPU_PKT_DATA62r = 2372, + PPU_PPU_PKT_DATA63r = 2373, + PPU_PPU_PKT_DATA64r = 2374, + PPU_PPU_PKT_DATA65r = 2375, + PPU_PPU_PKT_DATA66r = 2376, + PPU_PPU_PKT_DATA67r = 2377, + PPU_PPU_PKT_DATA68r = 2378, + PPU_PPU_PKT_DATA69r = 2379, + PPU_PPU_PKT_DATA70r = 2380, + PPU_PPU_PKT_DATA71r = 2381, + PPU_PPU_PKT_DATA72r = 2382, + PPU_PPU_PKT_DATA73r = 2383, + PPU_PPU_PKT_DATA74r = 2384, + PPU_PPU_PKT_DATA75r = 2385, + PPU_PPU_PKT_DATA76r = 2386, + PPU_PPU_PKT_DATA77r = 2387, + PPU_PPU_PKT_DATA78r = 2388, + PPU_PPU_PKT_DATA79r = 2389, + PPU_PPU_PKT_DATA80r = 2390, + PPU_PPU_PKT_DATA81r = 2391, + PPU_PPU_PKT_DATA82r = 2392, + PPU_PPU_PKT_DATA83r = 2393, + PPU_PPU_PKT_DATA84r = 2394, + PPU_PPU_PKT_DATA85r = 2395, + PPU_PPU_PKT_DATA86r = 2396, + PPU_PPU_PKT_DATA87r = 2397, + PPU_PPU_PKT_DATA88r = 2398, + PPU_PPU_PKT_DATA89r = 2399, + PPU_PPU_PKT_DATA90r = 2400, + PPU_PPU_PKT_DATA91r = 2401, + PPU_PPU_PKT_DATA92r = 2402, + PPU_PPU_PKT_DATA93r = 2403, + PPU_PPU_PKT_DATA94r = 2404, + PPU_PPU_PKT_DATA95r = 2405, + PPU_PPU_PKT_DATA96r = 2406, + PPU_PPU_PKT_DATA97r = 2407, + PPU_PPU_PKT_DATA98r = 2408, + PPU_PPU_PKT_DATA99r = 2409, + PPU_PPU_PKT_DATA100r = 2410, + PPU_PPU_PKT_DATA101r = 2411, + PPU_PPU_PKT_DATA102r = 2412, + PPU_PPU_PKT_DATA103r = 2413, + PPU_PPU_PKT_DATA104r = 2414, + PPU_PPU_PKT_DATA105r = 2415, + PPU_PPU_PKT_DATA106r = 2416, + PPU_PPU_PKT_DATA107r = 2417, + PPU_PPU_PKT_DATA108r = 2418, + PPU_PPU_PKT_DATA109r = 2419, + PPU_PPU_PKT_DATA110r = 2420, + PPU_PPU_PKT_DATA111r = 2421, + PPU_PPU_PKT_DATA112r = 2422, + PPU_PPU_PKT_DATA113r = 2423, + PPU_PPU_PKT_DATA114r = 2424, + PPU_PPU_PKT_DATA115r = 2425, + PPU_PPU_PKT_DATA116r = 2426, + PPU_PPU_PKT_DATA117r = 2427, + PPU_PPU_PKT_DATA118r = 2428, + PPU_PPU_PKT_DATA119r = 2429, + PPU_PPU_PKT_DATA120r = 2430, + PPU_PPU_PKT_DATA121r = 2431, + PPU_PPU_PKT_DATA122r = 2432, + PPU_PPU_PKT_DATA123r = 2433, + PPU_PPU_PKT_DATA124r = 2434, + PPU_PPU_PKT_DATA125r = 2435, + PPU_PPU_PKT_DATA126r = 2436, + PPU_PPU_PKT_DATA127r = 2437, + PPU_PPU_SPR0r = 2438, + PPU_PPU_SPR1r = 2439, + PPU_PPU_SPR2r = 2440, + PPU_PPU_SPR3r = 2441, + PPU_PPU_SPR4r = 2442, + PPU_PPU_SPR5r = 2443, + PPU_PPU_SPR6r = 2444, + PPU_PPU_SPR7r = 2445, + PPU_PPU_SPR8r = 2446, + PPU_PPU_SPR9r = 2447, + PPU_PPU_SPR10r = 2448, + PPU_PPU_SPR11r = 2449, + PPU_PPU_SPR12r = 2450, + PPU_PPU_SPR13r = 2451, + PPU_PPU_SPR14r = 2452, + PPU_PPU_SPR15r = 2453, + PPU_PPU_SPR16r = 2454, + PPU_PPU_SPR17r = 2455, + PPU_PPU_SPR18r = 2456, + PPU_PPU_SPR19r = 2457, + PPU_PPU_SPR20r = 2458, + PPU_PPU_SPR21r = 2459, + PPU_PPU_SPR22r = 2460, + PPU_PPU_SPR23r = 2461, + PPU_PPU_SPR24r = 2462, + PPU_PPU_SPR25r = 2463, + PPU_PPU_SPR26r = 2464, + PPU_PPU_SPR27r = 2465, + PPU_PPU_SPR28r = 2466, + PPU_PPU_SPR29r = 2467, + PPU_PPU_SPR30r = 2468, + PPU_PPU_SPR31r = 2469, + PPU_PPU_RSP0r = 2470, + PPU_PPU_RSP1r = 2471, + PPU_PPU_RSP2r = 2472, + PPU_PPU_RSP3r = 2473, + PPU_PPU_RSP4r = 2474, + PPU_PPU_RSP5r = 2475, + PPU_PPU_RSP6r = 2476, + PPU_PPU_RSP7r = 2477, + PPU_PPU_RSP8r = 2478, + PPU_PPU_RSP9r = 2479, + PPU_PPU_RSP10r = 2480, + PPU_PPU_RSP11r = 2481, + PPU_PPU_RSP12r = 2482, + PPU_PPU_RSP13r = 2483, + PPU_PPU_RSP14r = 2484, + PPU_PPU_RSP15r = 2485, + PPU_PPU_RSP16r = 2486, + PPU_PPU_RSP17r = 2487, + PPU_PPU_RSP18r = 2488, + PPU_PPU_RSP19r = 2489, + PPU_PPU_RSP20r = 2490, + PPU_PPU_RSP21r = 2491, + PPU_PPU_RSP22r = 2492, + PPU_PPU_RSP23r = 2493, + PPU_PPU_RSP24r = 2494, + PPU_PPU_RSP25r = 2495, + PPU_PPU_RSP26r = 2496, + PPU_PPU_RSP27r = 2497, + PPU_PPU_RSP28r = 2498, + PPU_PPU_RSP29r = 2499, + PPU_PPU_RSP30r = 2500, + PPU_PPU_RSP31r = 2501, + PPU_PPU_KEY0r = 2502, + PPU_PPU_KEY1r = 2503, + PPU_PPU_KEY2r = 2504, + PPU_PPU_KEY3r = 2505, + PPU_PPU_KEY4r = 2506, + PPU_PPU_KEY5r = 2507, + PPU_PPU_KEY6r = 2508, + PPU_PPU_KEY7r = 2509, + PPU_PPU_KEY8r = 2510, + PPU_PPU_KEY9r = 2511, + PPU_PPU_KEY10r = 2512, + PPU_PPU_KEY11r = 2513, + PPU_PPU_KEY12r = 2514, + PPU_PPU_KEY13r = 2515, + PPU_PPU_KEY14r = 2516, + PPU_PPU_KEY15r = 2517, + PPU_PPU_KEY16r = 2518, + PPU_PPU_KEY17r = 2519, + PPU_PPU_KEY18r = 2520, + PPU_PPU_KEY19r = 2521, + PPU_PPU_FLAGr = 2522, + PPU_CLUSTER_INT_1200M_FLAGr = 2523, + PPU_CLUSTER_BP_INSTR_Lr = 2524, + PPU_CLUSTER_BP_INSTR_Hr = 2525, + PPU_CLUSTER_BP_ADDRr = 2526, + PPU_CLUSTER_DRRr = 2527, + PPU_CLUSTER_DSRr = 2528, + PPU_CLUSTER_DBG_RTL_DATEr = 2529, + PPU_CLUSTER_ME_CONTINUEr = 2530, + PPU_CLUSTER_ME_STEPr = 2531, + PPU_CLUSTER_ME_REFRESHr = 2532, + PPU_CLUSTER_DRR_CLRr = 2533, + PPU_CLUSTER_ME_BUSY_THRESOLDr = 2534, + PPU_CLUSTER_INT_1200M_STAr = 2535, + PPU_CLUSTER_INT_1200M_ME_FIFO_MASK_Lr = 2536, + PPU_CLUSTER_INT_1200M_ME_FIFO_MASK_Hr = 2537, + PPU_CLUSTER_ME_FIFO_INTERRUPT_FLAG_Lr = 2538, + PPU_CLUSTER_ME_FIFO_INTERRUPT_FLAG_Hr = 2539, + PPU_CLUSTER_ME_FIFO_INTERRUPT_STA_Lr = 2540, + PPU_CLUSTER_ME_FIFO_INTERRUPT_STA_Hr = 2541, + PPU_CLUSTER_INT_1200M_CLUSTER_MEX_FIFO_MASK_Lr = 2542, + PPU_CLUSTER_INT_1200M_CLUSTER_MEX_FIFO_MASK_Hr = 2543, + PPU_CLUSTER_INT_1200M_CLUSTER_MEX_FIFO_FLAG_Lr = 2544, + PPU_CLUSTER_INT_1200M_CLUSTER_MEX_FIFO_FLAG_Hr = 2545, + PPU_CLUSTER_INT_1200M_CLUSTER_MEX_FIFO_STAT_Lr = 2546, + PPU_CLUSTER_INT_1200M_CLUSTER_MEX_FIFO_STAT_Hr = 2547, + PPU_CLUSTER_PPU_STATICS_WB_EXCEPTION_CFGr = 2548, + PPU_CLUSTER_THREAD_SWITCH_ENr = 2549, + PPU_CLUSTER_IS_ME_NOT_IDLEr = 2550, + PPU_CLUSTER_PPU_CLUSTER_MF_IN_AFIFO_PROG_EMPTY_ASSERT_CFGr = 2551, + PPU_CLUSTER_PPU_CLUSTER_MF_IN_AFIFO_PROG_EMPTY_NEGATE_CFGr = 2552, + PPU_CLUSTER_ESE_RSP_AFIFO_PROG_EMPTY_ASSERT_CFGr = 2553, + PPU_CLUSTER_ESE_RSP_AFIFO_PROG_EMPTY_NEGATE_CFGr = 2554, + PPU_CLUSTER_ISE_RSP_AFIFO_PROG_EMPTY_ASSERT_CFGr = 2555, + PPU_CLUSTER_ISE_RSP_AFIFO_PROG_EMPTY_NEGATE_CFGr = 2556, + PPU_CLUSTER_PPU_RSP_PTR_FWFT_FIFO0_PROG_FULL_ASSERT_CFGr = 2557, + PPU_CLUSTER_PPU_RSP_PTR_FWFT_FIFO0_PROG_FULL_NEGATE_CFGr = 2558, + PPU_CLUSTER_PPU_RSP_PTR_FWFT_FIFO0_PROG_EMPTY_ASSERT_CFGr = 2559, + PPU_CLUSTER_PPU_RSP_PTR_FWFT_FIFO0_PROG_EMPTY_NEGATE_CFGr = 2560, + PPU_CLUSTER_PPU_RSP_PTR_FWFT_FIFO1_PROG_FULL_ASSERT_CFGr = 2561, + PPU_CLUSTER_PPU_RSP_PTR_FWFT_FIFO1_PROG_FULL_NEGATE_CFGr = 2562, + PPU_CLUSTER_PPU_RSP_PTR_FWFT_FIFO1_PROG_EMPTY_ASSERT_CFGr = 2563, + PPU_CLUSTER_PPU_RSP_PTR_FWFT_FIFO1_PROG_EMPTY_NEGATE_CFGr = 2564, + PPU_CLUSTER_STA_RSP_AFIFO_PROG_EMPTY_ASSERT_CFGr = 2565, + PPU_CLUSTER_STA_RSP_AFIFO_PROG_EMPTY_NEGATE_CFGr = 2566, + PPU_CLUSTER_PPU_STA_RSP_FWFT_FIFO_PROG_FULL_ASSERT_CFGr = 2567, + PPU_CLUSTER_PPU_STA_RSP_FWFT_FIFO_PROG_FULL_NEGATE_CFGr = 2568, + PPU_CLUSTER_PPU_STA_RSP_FWFT_FIFO_PROG_EMPTY_ASSERT_CFGr = 2569, + PPU_CLUSTER_PPU_STA_RSP_FWFT_FIFO_PROG_EMPTY_NEGATE_CFGr = 2570, + PPU_CLUSTER_COP_RSP_FIFO_PROG_FULL_ASSERT_CFGr = 2571, + PPU_CLUSTER_COP_RSP_FIFO_PROG_FULL_NEGATE_CFGr = 2572, + PPU_CLUSTER_COP_RSP_FIFO_PROG_EMPTY_ASSERT_CFGr = 2573, + PPU_CLUSTER_COP_RSP_FIFO_PROG_EMPTY_NEGATE_CFGr = 2574, + PPU_CLUSTER_MCODE_PF_RSP_FIFO_PROG_FULL_ASSERT_CFGr = 2575, + PPU_CLUSTER_MCODE_PF_RSP_FIFO_PROG_FULL_NEGATE_CFGr = 2576, + PPU_CLUSTER_MCODE_PF_RSP_FIFO_PROG_EMPTY_ASSERT_CFGr = 2577, + PPU_CLUSTER_MCODE_PF_RSP_FIFO_PROG_EMPTY_NEGATE_CFGr = 2578, + PPU_CLUSTER_PPU_COP_RSP_FWFT_FIFO_PROG_FULL_ASSERT_CFGr = 2579, + PPU_CLUSTER_PPU_COP_RSP_FWFT_FIFO_PROG_FULL_NEGATE_CFGr = 2580, + PPU_CLUSTER_PPU_COP_RSP_FWFT_FIFO_PROG_EMPTY_ASSERT_CFGr = 2581, + PPU_CLUSTER_PPU_COP_RSP_FWFT_FIFO_PROG_EMPTY_NEGATE_CFGr = 2582, + PPU_CLUSTER_PPU_ISE_KEY_AFIFO_PROG_FULL_ASSERT_CFGr = 2583, + PPU_CLUSTER_PPU_ISE_KEY_AFIFO_PROG_FULL_NEGATE_CFGr = 2584, + PPU_CLUSTER_PPU_ESE_KEY_AFIFO_PROG_FULL_ASSERT_CFGr = 2585, + PPU_CLUSTER_PPU_ESE_KEY_AFIFO_PROG_FULL_NEGATE_CFGr = 2586, + PPU_CLUSTER_PPU_STA_KEY_AFIFO_PROG_FULL_ASSERT_CFGr = 2587, + PPU_CLUSTER_PPU_STA_KEY_AFIFO_PROG_FULL_NEGATE_CFGr = 2588, + PPU_CLUSTER_INT_600M_CLUSTER_MEX_FIFO_MASKr = 2589, + PPU_CLUSTER_CLUSTER_MEX_FIFO_600M_INTERRUPT_FLAGr = 2590, + PPU_CLUSTER_CLUSTER_MEX_FIFO_600M_INTERRUPT_STAr = 2591, + PPU_CLUSTER_MEX_CNT_CFGr = 2592, + PPU_CLUSTER_INT_600M_CLUSTER_MEX_RAM_ECC_ERROR_INTERRUPT_MASKr = 2593, + PPU_CLUSTER_CLUSTER_MEX_RAM_600M_ECC_ERROR_INTERRUPT_FLAGr = 2594, + PPU_CLUSTER_CLUSTER_MEX_RAM_600M_ECC_ERROR_INTERRUPT_STAr = 2595, + PPU_CLUSTER_PPU_CLUSTER_MF_IN_AFIFO_PROG_FULL_ASSERT_CFGr = 2596, + PPU_CLUSTER_PPU_CLUSTER_MF_IN_AFIFO_PROG_FULL_NEGATE_CFGr = 2597, + PPU_CLUSTER_ESE_RSP_AFIFO_PROG_FULL_ASSERT_CFGr = 2598, + PPU_CLUSTER_ESE_RSP_AFIFO_PROG_FULL_NEGATE_CFGr = 2599, + PPU_CLUSTER_ISE_RSP_AFIFO_PROG_FULL_ASSERT_CFGr = 2600, + PPU_CLUSTER_ISE_RSP_AFIFO_PROG_FULL_NEGATE_CFGr = 2601, + PPU_CLUSTER_STA_RSP_AFIFO_PROG_FULL_ASSERT_CFGr = 2602, + PPU_CLUSTER_STA_RSP_AFIFO_PROG_FULL_NEGATE_CFGr = 2603, + PPU_CLUSTER_PPU_ISE_KEY_AFIFO_PROG_EMPTY_ASSERT_CFGr = 2604, + PPU_CLUSTER_PPU_ISE_KEY_AFIFO_PROG_EMPTY_NEGATE_CFGr = 2605, + PPU_CLUSTER_PPU_ESE_KEY_AFIFO_PROG_EMPTY_ASSERT_CFGr = 2606, + PPU_CLUSTER_PPU_ESE_KEY_AFIFO_PROG_EMPTY_NEGATE_CFGr = 2607, + PPU_CLUSTER_PPU_STA_KEY_AFIFO_PROG_EMPTY_ASSERT_CFGr = 2608, + PPU_CLUSTER_PPU_STA_KEY_AFIFO_PROG_EMPTY_NEGATE_CFGr = 2609, + PPU_CLUSTER_PPU_CLUSTER_MF_VLD_CNT_Hr = 2610, + PPU_CLUSTER_PPU_CLUSTER_MF_VLD_CNT_Lr = 2611, + PPU_CLUSTER_CLUSTER_ISE_KEY_OUT_VLD_CNTr = 2612, + PPU_CLUSTER_ISE_CLUSTER_RSP_IN_VLD_CNTr = 2613, + PPU_CLUSTER_CLUSTER_ESE_KEY_OUT_VLD_CNTr = 2614, + PPU_CLUSTER_ESE_CLUSTER_RSP_IN_VLD_CNTr = 2615, + PPU_CLUSTER_CLUSTER_STAT_CMD_VLD_CNTr = 2616, + PPU_CLUSTER_STAT_CLUSTER_RSP_VLD_CNTr = 2617, + PPU_CLUSTER_MEX_DEBUG_KEY_VLD_CNTr = 2618, + PPU_CLUSTER_ISE_CLUSTER_KEY_FC_CNTr = 2619, + PPU_CLUSTER_ESE_CLUSTER_KEY_FC_CNTr = 2620, + PPU_CLUSTER_CLUSTER_ISE_RSP_FC_CNTr = 2621, + PPU_CLUSTER_CLUSTER_ESE_RSP_FC_CNTr = 2622, + PPU_CLUSTER_STAT_CLUSTER_CMD_FC_CNTr = 2623, + PPU_CLUSTER_CLUSTER_STAT_RSP_FC_CNTr = 2624, + PPU_CLUSTER_CLUSTER_PPU_MF_VLD_CNT_Lr = 2625, + PPU_CLUSTER_CLUSTER_PPU_MF_VLD_CNT_Hr = 2626, + PPU_CLUSTER_CLUSTER_COP_KEY_VLD_CNT_Lr = 2627, + PPU_CLUSTER_CLUSTER_COP_KEY_VLD_CNT_Hr = 2628, + PPU_CLUSTER_COP_CLUSTER_RSP_VLD_CNT_Lr = 2629, + PPU_CLUSTER_COP_CLUSTER_RSP_VLD_CNT_Hr = 2630, + PPU_CLUSTER_MEX_ME_PKT_IN_SOP_CNT_Lr = 2631, + PPU_CLUSTER_MEX_ME_PKT_IN_SOP_CNT_Hr = 2632, + PPU_CLUSTER_MEX_ME_PKT_IN_EOP_CNT_Lr = 2633, + PPU_CLUSTER_MEX_ME_PKT_IN_EOP_CNT_Hr = 2634, + PPU_CLUSTER_MEX_ME_PKT_IN_VLD_CNT_Lr = 2635, + PPU_CLUSTER_MEX_ME_PKT_IN_VLD_CNT_Hr = 2636, + PPU_CLUSTER_ME_MEX_PKT_OUT_SOP_CNT_Lr = 2637, + PPU_CLUSTER_ME_MEX_PKT_OUT_SOP_CNT_Hr = 2638, + PPU_CLUSTER_ME_MEX_PKT_OUT_EOP_CNT_Lr = 2639, + PPU_CLUSTER_ME_MEX_PKT_OUT_EOP_CNT_Hr = 2640, + PPU_CLUSTER_ME_MEX_PKT_OUT_VLD_CNT_Lr = 2641, + PPU_CLUSTER_ME_MEX_PKT_OUT_VLD_CNT_Hr = 2642, + PPU_CLUSTER_ME_MEX_I_KEY_OUT_SOP_CNT_Lr = 2643, + PPU_CLUSTER_ME_MEX_I_KEY_OUT_SOP_CNT_Hr = 2644, + PPU_CLUSTER_ME_MEX_I_KEY_OUT_EOP_CNT_Lr = 2645, + PPU_CLUSTER_ME_MEX_I_KEY_OUT_EOP_CNT_Hr = 2646, + PPU_CLUSTER_ME_MEX_I_KEY_OUT_VLD_CNT_Lr = 2647, + PPU_CLUSTER_ME_MEX_I_KEY_OUT_VLD_CNT_Hr = 2648, + PPU_CLUSTER_ME_MEX_E_KEY_OUT_SOP_CNT_Lr = 2649, + PPU_CLUSTER_ME_MEX_E_KEY_OUT_SOP_CNT_Hr = 2650, + PPU_CLUSTER_ME_MEX_E_KEY_OUT_EOP_CNT_Lr = 2651, + PPU_CLUSTER_ME_MEX_E_KEY_OUT_EOP_CNT_Hr = 2652, + PPU_CLUSTER_ME_MEX_E_KEY_OUT_VLD_CNT_Lr = 2653, + PPU_CLUSTER_ME_MEX_E_KEY_OUT_VLD_CNT_Hr = 2654, + PPU_CLUSTER_ME_MEX_DEMUX_ISE_KEY_VLD_CNT_Lr = 2655, + PPU_CLUSTER_ME_MEX_DEMUX_ISE_KEY_VLD_CNT_Hr = 2656, + PPU_CLUSTER_ME_MEX_DEMUX_ESE_KEY_VLD_CNT_Lr = 2657, + PPU_CLUSTER_ME_MEX_DEMUX_ESE_KEY_VLD_CNT_Hr = 2658, + PPU_CLUSTER_ME_MEX_DEMUX_STA_KEY_VLD_CNT_Lr = 2659, + PPU_CLUSTER_ME_MEX_DEMUX_STA_KEY_VLD_CNT_Hr = 2660, + PPU_CLUSTER_ME_MEX_DEMUX_COP_KEY_VLD_CNT_Lr = 2661, + PPU_CLUSTER_ME_MEX_DEMUX_COP_KEY_VLD_CNT_Hr = 2662, + PPU_CLUSTER_MEX_ME_DEMUX_ISE_RSP_VLD_CNT_Lr = 2663, + PPU_CLUSTER_MEX_ME_DEMUX_ISE_RSP_VLD_CNT_Hr = 2664, + PPU_CLUSTER_MEX_ME_DEMUX_ESE_RSP_VLD_CNT_Lr = 2665, + PPU_CLUSTER_MEX_ME_DEMUX_ESE_RSP_VLD_CNT_Hr = 2666, + PPU_CLUSTER_MEX_ME_DEMUX_STA_RSP_VLD_CNT_Lr = 2667, + PPU_CLUSTER_MEX_ME_DEMUX_STA_RSP_VLD_CNT_Hr = 2668, + PPU_CLUSTER_MEX_ME_DEMUX_COP_RSP_VLD_CNT_Lr = 2669, + PPU_CLUSTER_MEX_ME_DEMUX_COP_RSP_VLD_CNT_Hr = 2670, + PPU_CLUSTER_ME_EXCEPTION_CODE0_CNT_Lr = 2671, + PPU_CLUSTER_ME_EXCEPTION_CODE0_CNT_Hr = 2672, + PPU_CLUSTER_ME_EXCEPTION_CODE1_CNT_Lr = 2673, + PPU_CLUSTER_ME_EXCEPTION_CODE1_CNT_Hr = 2674, + PPU_CLUSTER_ME_EXCEPTION_CODE2_CNT_Lr = 2675, + PPU_CLUSTER_ME_EXCEPTION_CODE2_CNT_Hr = 2676, + PPU_CLUSTER_ME_EXCEPTION_CODE3_CNT_Lr = 2677, + PPU_CLUSTER_ME_EXCEPTION_CODE3_CNT_Hr = 2678, + PPU_CLUSTER_ME_EXCEPTION_CODE4_CNT_Lr = 2679, + PPU_CLUSTER_ME_EXCEPTION_CODE4_CNT_Hr = 2680, + PPU_CLUSTER_ME_EXCEPTION_CODE5_CNT_Lr = 2681, + PPU_CLUSTER_ME_EXCEPTION_CODE5_CNT_Hr = 2682, + SE_CFG_PPU_SOFT_RSTr = 2683, + SE_CFG_EPT_FLAGr = 2684, + SE_CFG_DDR_KEY_LK0_3r = 2685, + SE_CFG_DDR_KEY_LK0_2r = 2686, + SE_CFG_DDR_KEY_LK0_1r = 2687, + SE_CFG_DDR_KEY_LK0_0r = 2688, + SE_CFG_DDR_KEY_LK1_3r = 2689, + SE_CFG_DDR_KEY_LK1_2r = 2690, + SE_CFG_DDR_KEY_LK1_1r = 2691, + SE_CFG_DDR_KEY_LK1_0r = 2692, + SE_CFG_HASH_KEY_LK0_18r = 2693, + SE_CFG_HASH_KEY_LK0_17r = 2694, + SE_CFG_HASH_KEY_LK0_16r = 2695, + SE_CFG_HASH_KEY_LK0_15r = 2696, + SE_CFG_HASH_KEY_LK0_14r = 2697, + SE_CFG_HASH_KEY_LK0_13r = 2698, + SE_CFG_HASH_KEY_LK0_12r = 2699, + SE_CFG_HASH_KEY_LK0_11r = 2700, + SE_CFG_HASH_KEY_LK0_10r = 2701, + SE_CFG_HASH_KEY_LK0_9r = 2702, + SE_CFG_HASH_KEY_LK0_8r = 2703, + SE_CFG_HASH_KEY_LK0_7r = 2704, + SE_CFG_HASH_KEY_LK0_6r = 2705, + SE_CFG_HASH_KEY_LK0_5r = 2706, + SE_CFG_HASH_KEY_LK0_4r = 2707, + SE_CFG_HASH_KEY_LK0_3r = 2708, + SE_CFG_HASH_KEY_LK0_2r = 2709, + SE_CFG_HASH_KEY_LK0_1r = 2710, + SE_CFG_HASH_KEY_LK0_0r = 2711, + SE_CFG_HASH_KEY_LK1_18r = 2712, + SE_CFG_HASH_KEY_LK1_17r = 2713, + SE_CFG_HASH_KEY_LK1_16r = 2714, + SE_CFG_HASH_KEY_LK1_15r = 2715, + SE_CFG_HASH_KEY_LK1_14r = 2716, + SE_CFG_HASH_KEY_LK1_13r = 2717, + SE_CFG_HASH_KEY_LK1_12r = 2718, + SE_CFG_HASH_KEY_LK1_11r = 2719, + SE_CFG_HASH_KEY_LK1_10r = 2720, + SE_CFG_HASH_KEY_LK1_9r = 2721, + SE_CFG_HASH_KEY_LK1_8r = 2722, + SE_CFG_HASH_KEY_LK1_7r = 2723, + SE_CFG_HASH_KEY_LK1_6r = 2724, + SE_CFG_HASH_KEY_LK1_5r = 2725, + SE_CFG_HASH_KEY_LK1_4r = 2726, + SE_CFG_HASH_KEY_LK1_3r = 2727, + SE_CFG_HASH_KEY_LK1_2r = 2728, + SE_CFG_HASH_KEY_LK1_1r = 2729, + SE_CFG_HASH_KEY_LK1_0r = 2730, + SE_CFG_HASH_KEY_LK2_18r = 2731, + SE_CFG_HASH_KEY_LK2_17r = 2732, + SE_CFG_HASH_KEY_LK2_16r = 2733, + SE_CFG_HASH_KEY_LK2_15r = 2734, + SE_CFG_HASH_KEY_LK2_14r = 2735, + SE_CFG_HASH_KEY_LK2_13r = 2736, + SE_CFG_HASH_KEY_LK2_12r = 2737, + SE_CFG_HASH_KEY_LK2_11r = 2738, + SE_CFG_HASH_KEY_LK2_10r = 2739, + SE_CFG_HASH_KEY_LK2_9r = 2740, + SE_CFG_HASH_KEY_LK2_8r = 2741, + SE_CFG_HASH_KEY_LK2_7r = 2742, + SE_CFG_HASH_KEY_LK2_6r = 2743, + SE_CFG_HASH_KEY_LK2_5r = 2744, + SE_CFG_HASH_KEY_LK2_4r = 2745, + SE_CFG_HASH_KEY_LK2_3r = 2746, + SE_CFG_HASH_KEY_LK2_2r = 2747, + SE_CFG_HASH_KEY_LK2_1r = 2748, + SE_CFG_HASH_KEY_LK2_0r = 2749, + SE_CFG_HASH_KEY_LK3_18r = 2750, + SE_CFG_HASH_KEY_LK3_17r = 2751, + SE_CFG_HASH_KEY_LK3_16r = 2752, + SE_CFG_HASH_KEY_LK3_15r = 2753, + SE_CFG_HASH_KEY_LK3_14r = 2754, + SE_CFG_HASH_KEY_LK3_13r = 2755, + SE_CFG_HASH_KEY_LK3_12r = 2756, + SE_CFG_HASH_KEY_LK3_11r = 2757, + SE_CFG_HASH_KEY_LK3_10r = 2758, + SE_CFG_HASH_KEY_LK3_9r = 2759, + SE_CFG_HASH_KEY_LK3_8r = 2760, + SE_CFG_HASH_KEY_LK3_7r = 2761, + SE_CFG_HASH_KEY_LK3_6r = 2762, + SE_CFG_HASH_KEY_LK3_5r = 2763, + SE_CFG_HASH_KEY_LK3_4r = 2764, + SE_CFG_HASH_KEY_LK3_3r = 2765, + SE_CFG_HASH_KEY_LK3_2r = 2766, + SE_CFG_HASH_KEY_LK3_1r = 2767, + SE_CFG_HASH_KEY_LK3_0r = 2768, + SE_CFG_LPM_KEY_LK0_6r = 2769, + SE_CFG_LPM_KEY_LK0_5r = 2770, + SE_CFG_LPM_KEY_LK0_4r = 2771, + SE_CFG_LPM_KEY_LK0_3r = 2772, + SE_CFG_LPM_KEY_LK0_2r = 2773, + SE_CFG_LPM_KEY_LK0_1r = 2774, + SE_CFG_LPM_KEY_LK0_0r = 2775, + SE_CFG_LPM_KEY_LK1_6r = 2776, + SE_CFG_LPM_KEY_LK1_5r = 2777, + SE_CFG_LPM_KEY_LK1_4r = 2778, + SE_CFG_LPM_KEY_LK1_3r = 2779, + SE_CFG_LPM_KEY_LK1_2r = 2780, + SE_CFG_LPM_KEY_LK1_1r = 2781, + SE_CFG_LPM_KEY_LK1_0r = 2782, + SE_CFG_LPM_KEY_LK2_6r = 2783, + SE_CFG_LPM_KEY_LK2_5r = 2784, + SE_CFG_LPM_KEY_LK2_4r = 2785, + SE_CFG_LPM_KEY_LK2_3r = 2786, + SE_CFG_LPM_KEY_LK2_2r = 2787, + SE_CFG_LPM_KEY_LK2_1r = 2788, + SE_CFG_LPM_KEY_LK2_0r = 2789, + SE_CFG_LPM_KEY_LK3_6r = 2790, + SE_CFG_LPM_KEY_LK3_5r = 2791, + SE_CFG_LPM_KEY_LK3_4r = 2792, + SE_CFG_LPM_KEY_LK3_3r = 2793, + SE_CFG_LPM_KEY_LK3_2r = 2794, + SE_CFG_LPM_KEY_LK3_1r = 2795, + SE_CFG_LPM_KEY_LK3_0r = 2796, + SE_CFG_ETCAM_KEY_LK0_22r = 2797, + SE_CFG_ETCAM_KEY_LK0_21r = 2798, + SE_CFG_ETCAM_KEY_LK0_20r = 2799, + SE_CFG_ETCAM_KEY_LK0_19r = 2800, + SE_CFG_ETCAM_KEY_LK0_18r = 2801, + SE_CFG_ETCAM_KEY_LK0_17r = 2802, + SE_CFG_ETCAM_KEY_LK0_16r = 2803, + SE_CFG_ETCAM_KEY_LK0_15r = 2804, + SE_CFG_ETCAM_KEY_LK0_14r = 2805, + SE_CFG_ETCAM_KEY_LK0_13r = 2806, + SE_CFG_ETCAM_KEY_LK0_12r = 2807, + SE_CFG_ETCAM_KEY_LK0_11r = 2808, + SE_CFG_ETCAM_KEY_LK0_10r = 2809, + SE_CFG_ETCAM_KEY_LK0_9r = 2810, + SE_CFG_ETCAM_KEY_LK0_8r = 2811, + SE_CFG_ETCAM_KEY_LK0_7r = 2812, + SE_CFG_ETCAM_KEY_LK0_6r = 2813, + SE_CFG_ETCAM_KEY_LK0_5r = 2814, + SE_CFG_ETCAM_KEY_LK0_4r = 2815, + SE_CFG_ETCAM_KEY_LK0_3r = 2816, + SE_CFG_ETCAM_KEY_LK0_2r = 2817, + SE_CFG_ETCAM_KEY_LK0_1r = 2818, + SE_CFG_ETCAM_KEY_LK0_0r = 2819, + SE_CFG_ETCAM_KEY_LK1_22r = 2820, + SE_CFG_ETCAM_KEY_LK1_21r = 2821, + SE_CFG_ETCAM_KEY_LK1_20r = 2822, + SE_CFG_ETCAM_KEY_LK1_19r = 2823, + SE_CFG_ETCAM_KEY_LK1_18r = 2824, + SE_CFG_ETCAM_KEY_LK1_17r = 2825, + SE_CFG_ETCAM_KEY_LK1_16r = 2826, + SE_CFG_ETCAM_KEY_LK1_15r = 2827, + SE_CFG_ETCAM_KEY_LK1_14r = 2828, + SE_CFG_ETCAM_KEY_LK1_13r = 2829, + SE_CFG_ETCAM_KEY_LK1_12r = 2830, + SE_CFG_ETCAM_KEY_LK1_11r = 2831, + SE_CFG_ETCAM_KEY_LK1_10r = 2832, + SE_CFG_ETCAM_KEY_LK1_9r = 2833, + SE_CFG_ETCAM_KEY_LK1_8r = 2834, + SE_CFG_ETCAM_KEY_LK1_7r = 2835, + SE_CFG_ETCAM_KEY_LK1_6r = 2836, + SE_CFG_ETCAM_KEY_LK1_5r = 2837, + SE_CFG_ETCAM_KEY_LK1_4r = 2838, + SE_CFG_ETCAM_KEY_LK1_3r = 2839, + SE_CFG_ETCAM_KEY_LK1_2r = 2840, + SE_CFG_ETCAM_KEY_LK1_1r = 2841, + SE_CFG_ETCAM_KEY_LK1_0r = 2842, + SE_CFG_ETCAM_KEY_LK2_22r = 2843, + SE_CFG_ETCAM_KEY_LK2_21r = 2844, + SE_CFG_ETCAM_KEY_LK2_20r = 2845, + SE_CFG_ETCAM_KEY_LK2_19r = 2846, + SE_CFG_ETCAM_KEY_LK2_18r = 2847, + SE_CFG_ETCAM_KEY_LK2_17r = 2848, + SE_CFG_ETCAM_KEY_LK2_16r = 2849, + SE_CFG_ETCAM_KEY_LK2_15r = 2850, + SE_CFG_ETCAM_KEY_LK2_14r = 2851, + SE_CFG_ETCAM_KEY_LK2_13r = 2852, + SE_CFG_ETCAM_KEY_LK2_12r = 2853, + SE_CFG_ETCAM_KEY_LK2_11r = 2854, + SE_CFG_ETCAM_KEY_LK2_10r = 2855, + SE_CFG_ETCAM_KEY_LK2_9r = 2856, + SE_CFG_ETCAM_KEY_LK2_8r = 2857, + SE_CFG_ETCAM_KEY_LK2_7r = 2858, + SE_CFG_ETCAM_KEY_LK2_6r = 2859, + SE_CFG_ETCAM_KEY_LK2_5r = 2860, + SE_CFG_ETCAM_KEY_LK2_4r = 2861, + SE_CFG_ETCAM_KEY_LK2_3r = 2862, + SE_CFG_ETCAM_KEY_LK2_2r = 2863, + SE_CFG_ETCAM_KEY_LK2_1r = 2864, + SE_CFG_ETCAM_KEY_LK2_0r = 2865, + SE_CFG_ETCAM_KEY_LK3_22r = 2866, + SE_CFG_ETCAM_KEY_LK3_21r = 2867, + SE_CFG_ETCAM_KEY_LK3_20r = 2868, + SE_CFG_ETCAM_KEY_LK3_19r = 2869, + SE_CFG_ETCAM_KEY_LK3_18r = 2870, + SE_CFG_ETCAM_KEY_LK3_17r = 2871, + SE_CFG_ETCAM_KEY_LK3_16r = 2872, + SE_CFG_ETCAM_KEY_LK3_15r = 2873, + SE_CFG_ETCAM_KEY_LK3_14r = 2874, + SE_CFG_ETCAM_KEY_LK3_13r = 2875, + SE_CFG_ETCAM_KEY_LK3_12r = 2876, + SE_CFG_ETCAM_KEY_LK3_11r = 2877, + SE_CFG_ETCAM_KEY_LK3_10r = 2878, + SE_CFG_ETCAM_KEY_LK3_9r = 2879, + SE_CFG_ETCAM_KEY_LK3_8r = 2880, + SE_CFG_ETCAM_KEY_LK3_7r = 2881, + SE_CFG_ETCAM_KEY_LK3_6r = 2882, + SE_CFG_ETCAM_KEY_LK3_5r = 2883, + SE_CFG_ETCAM_KEY_LK3_4r = 2884, + SE_CFG_ETCAM_KEY_LK3_3r = 2885, + SE_CFG_ETCAM_KEY_LK3_2r = 2886, + SE_CFG_ETCAM_KEY_LK3_1r = 2887, + SE_CFG_ETCAM_KEY_LK3_0r = 2888, + SE_CFG_PBU_KEY_LK0_3r = 2889, + SE_CFG_PBU_KEY_LK0_2r = 2890, + SE_CFG_PBU_KEY_LK0_1r = 2891, + SE_CFG_PBU_KEY_LK0_0r = 2892, + SE_CFG_PBU_KEY_LK1_3r = 2893, + SE_CFG_PBU_KEY_LK1_2r = 2894, + SE_CFG_PBU_KEY_LK1_1r = 2895, + SE_CFG_PBU_KEY_LK1_0r = 2896, + SE_CFG_PBU_KEY_LK2_3r = 2897, + SE_CFG_PBU_KEY_LK2_2r = 2898, + SE_CFG_PBU_KEY_LK2_1r = 2899, + SE_CFG_PBU_KEY_LK2_0r = 2900, + SE_CFG_PBU_KEY_LK3_3r = 2901, + SE_CFG_PBU_KEY_LK3_2r = 2902, + SE_CFG_PBU_KEY_LK3_1r = 2903, + SE_CFG_PBU_KEY_LK3_0r = 2904, + SE_ALG_SCHD_LEARN_FIFO_PFULL_ASTr = 2905, + SE_ALG_SCHD_LEARN_FIFO_PFULL_NEGr = 2906, + SE_ALG_SCHD_HASH0_FIFO_PFULL_ASTr = 2907, + SE_ALG_SCHD_HASH0_FIFO_PFULL_NEGr = 2908, + SE_ALG_SCHD_HASH1_FIFO_PFULL_ASTr = 2909, + SE_ALG_SCHD_HASH1_FIFO_PFULL_NEGr = 2910, + SE_ALG_SCHD_HASH2_FIFO_PFULL_ASTr = 2911, + SE_ALG_SCHD_HASH2_FIFO_PFULL_NEGr = 2912, + SE_ALG_SCHD_HASH3_FIFO_PFULL_ASTr = 2913, + SE_ALG_SCHD_HASH3_FIFO_PFULL_NEGr = 2914, + SE_ALG_SCHD_LPM_FIFO_PFULL_ASTr = 2915, + SE_ALG_SCHD_LPM_FIFO_PFULL_NEGr = 2916, + SE_ALG_HASH0_KEY_FIFO_PFULL_ASTr = 2917, + SE_ALG_HASH0_KEY_FIFO_PFULL_NEGr = 2918, + SE_ALG_HASH0_SREQ_FIFO_PFULL_ASTr = 2919, + SE_ALG_HASH0_SREQ_FIFO_PFULL_NEGr = 2920, + SE_ALG_HASH0_INT_RSP_FIFO_PFULL_ASTr = 2921, + SE_ALG_HASH0_INT_RSP_FIFO_PFULL_NEGr = 2922, + SE_ALG_HASH0_EXT_RSP_FIFO_PFULL_ASTr = 2923, + SE_ALG_HASH0_EXT_RSP_FIFO_PFULL_NEGr = 2924, + SE_ALG_HASH1_KEY_FIFO_PFULL_ASTr = 2925, + SE_ALG_HASH1_KEY_FIFO_PFULL_NEGr = 2926, + SE_ALG_HASH1_SREQ_FIFO_PFULL_ASTr = 2927, + SE_ALG_HASH1_SREQ_FIFO_PFULL_NEGr = 2928, + SE_ALG_HASH1_INT_RSP_FIFO_PFULL_ASTr = 2929, + SE_ALG_HASH1_INT_RSP_FIFO_PFULL_NEGr = 2930, + SE_ALG_HASH1_EXT_RSP_FIFO_PFULL_ASTr = 2931, + SE_ALG_HASH1_EXT_RSP_FIFO_PFULL_NEGr = 2932, + SE_ALG_HASH2_KEY_FIFO_PFULL_ASTr = 2933, + SE_ALG_HASH2_KEY_FIFO_PFULL_NEGr = 2934, + SE_ALG_HASH2_SREQ_FIFO_PFULL_ASTr = 2935, + SE_ALG_HASH2_SREQ_FIFO_PFULL_NEGr = 2936, + SE_ALG_HASH2_INT_RSP_FIFO_PFULL_ASTr = 2937, + SE_ALG_HASH2_INT_RSP_FIFO_PFULL_NEGr = 2938, + SE_ALG_HASH2_EXT_RSP_FIFO_PFULL_ASTr = 2939, + SE_ALG_HASH2_EXT_RSP_FIFO_PFULL_NEGr = 2940, + SE_ALG_HASH3_KEY_FIFO_PFULL_ASTr = 2941, + SE_ALG_HASH3_KEY_FIFO_PFULL_NEGr = 2942, + SE_ALG_HASH3_SREQ_FIFO_PFULL_ASTr = 2943, + SE_ALG_HASH3_SREQ_FIFO_PFULL_NEGr = 2944, + SE_ALG_HASH3_INT_RSP_FIFO_PFULL_ASTr = 2945, + SE_ALG_HASH3_INT_RSP_FIFO_PFULL_NEGr = 2946, + SE_ALG_HASH3_EXT_RSP_FIFO_PFULL_ASTr = 2947, + SE_ALG_HASH3_EXT_RSP_FIFO_PFULL_NEGr = 2948, + SE_ALG_LPM_AS_INFOr = 2949, + SE_ALG_LPM_EXT_RSP_FIFO_U0_PFULL_NEGr = 2950, + SE_ALG_LPM_EXT_RSP_FIFO_U2_PFULL_ASTr = 2951, + SE_ALG_LPM_EXT_RSP_FIFO_U2_PFULL_NEGr = 2952, + SE_ALG_LPM_EXT_RSP_FIFO_U3_PFULL_ASTr = 2953, + SE_ALG_LPM_EXT_RSP_FIFO_U3_PFULL_NEGr = 2954, + SE_ALG_LPM_EXT_RSP_FIFO_U4_PFULL_ASTr = 2955, + SE_ALG_LPM_EXT_RSP_FIFO_U4_PFULL_NEGr = 2956, + SE_ALG_LPM_AS_RSP_FIFO_U0_PFULL_ASTr = 2957, + SE_ALG_LPM_AS_RSP_FIFO_U0_PFULL_NEGr = 2958, + SE_ALG_LPM_AS_RSP_FIFO_U1_PFULL_ASTr = 2959, + SE_ALG_LPM_AS_RSP_FIFO_U1_PFULL_NEGr = 2960, + SE_ALG_LPM_V4_DDR3_BASE_ADDRr = 2961, + SE_ALG_LPM_V6_DDR3_BASE_ADDRr = 2962, + SE_ALG_DEBUG_CNT_MODEr = 2963, + SE_ALG_HASH_P0_KEY_VLD_CNTr = 2964, + SE_ALG_HASH_P1_KEY_VLD_CNTr = 2965, + SE_ALG_HASH_P2_KEY_VLD_CNTr = 2966, + SE_ALG_HASH_P3_KEY_VLD_CNTr = 2967, + SE_ALG_LPM_P0_KEY_VLD_CNTr = 2968, + SE_ALG_HASH_P0_RSP_VLD_CNTr = 2969, + SE_ALG_HASH_P1_RSP_VLD_CNTr = 2970, + SE_ALG_HASH_P2_RSP_VLD_CNTr = 2971, + SE_ALG_HASH_P3_RSP_VLD_CNTr = 2972, + SE_ALG_LPM_P0_RSP_VLD_CNTr = 2973, + SE_ALG_HASH_P0_SMF_CNTr = 2974, + SE_ALG_HASH_P1_SMF_CNTr = 2975, + SE_ALG_HASH_P2_SMF_CNTr = 2976, + SE_ALG_HASH_P3_SMF_CNTr = 2977, + SE_ALG_LPM_P0_SMF_CNTr = 2978, + SE_ALG_HASH_P0_SPACEVLD_CNTr = 2979, + SE_ALG_HASH_P1_SPACEVLD_CNTr = 2980, + SE_ALG_HASH_P2_SPACEVLD_CNTr = 2981, + SE_ALG_HASH_P3_SPACEVLD_CNTr = 2982, + SE_ALG_SMMU1_P0_REQ_VLD_CNTr = 2983, + SE_ALG_SMMU1_P1_REQ_VLD_CNTr = 2984, + SE_ALG_SMMU1_P2_REQ_VLD_CNTr = 2985, + SE_ALG_SMMU1_P3_REQ_VLD_CNTr = 2986, + SE_ALG_SMMU1_P4_REQ_VLD_CNTr = 2987, + SE_ALG_SMMU1_P5_REQ_VLD_CNTr = 2988, + SE_ALG_SMMU1_P0_RSP_VLD_CNTr = 2989, + SE_ALG_SMMU1_P1_RSP_VLD_CNTr = 2990, + SE_ALG_SMMU1_P2_RSP_VLD_CNTr = 2991, + SE_ALG_SMMU1_P3_RSP_VLD_CNTr = 2992, + SE_ALG_SMMU1_P4_RSP_VLD_CNTr = 2993, + SE_ALG_SMMU1_P5_RSP_VLD_CNTr = 2994, + SE_ALG_SCHD_LEARN_FIFO_INT_CNTr = 2995, + SE_ALG_SCHD_HASH0_FIFO_INT_CNTr = 2996, + SE_ALG_SCHD_HASH1_FIFO_INT_CNTr = 2997, + SE_ALG_SCHD_HASH2_FIFO_INT_CNTr = 2998, + SE_ALG_SCHD_HASH3_FIFO_INT_CNTr = 2999, + SE_ALG_SCHD_LPM_FIFO_INT_CNTr = 3000, + SE_ALG_SCHD_LEARN_FIFO_PARITY_ERR_CNTr = 3001, + SE_ALG_SCHD_HASH0_FIFO_PARITY_ERR_CNTr = 3002, + SE_ALG_SCHD_HASH1_FIFO_PARITY_ERR_CNTr = 3003, + SE_ALG_SCHD_HASH2_FIFO_PARITY_ERR_CNTr = 3004, + SE_ALG_SCHD_HASH3_FIFO_PARITY_ERR_CNTr = 3005, + SE_ALG_SCHD_LPM_FIFO_PARITY_ERR_CNTr = 3006, + SE_ALG_RD_INIT_CFT_CNTr = 3007, + SE_ALG_ZGP0_ZBLK0_ECC_ERR_CNTr = 3008, + SE_ALG_ZGP0_ZBLK1_ECC_ERR_CNTr = 3009, + SE_ALG_ZGP0_ZBLK2_ECC_ERR_CNTr = 3010, + SE_ALG_ZGP0_ZBLK3_ECC_ERR_CNTr = 3011, + SE_ALG_ZGP0_ZBLK4_ECC_ERR_CNTr = 3012, + SE_ALG_ZGP0_ZBLK5_ECC_ERR_CNTr = 3013, + SE_ALG_ZGP0_ZBLK6_ECC_ERR_CNTr = 3014, + SE_ALG_ZGP0_ZBLK7_ECC_ERR_CNTr = 3015, + SE_ALG_ZGP1_ZBLK0_ECC_ERR_CNTr = 3016, + SE_ALG_ZGP1_ZBLK1_ECC_ERR_CNTr = 3017, + SE_ALG_ZGP1_ZBLK2_ECC_ERR_CNTr = 3018, + SE_ALG_ZGP1_ZBLK3_ECC_ERR_CNTr = 3019, + SE_ALG_ZGP1_ZBLK4_ECC_ERR_CNTr = 3020, + SE_ALG_ZGP1_ZBLK5_ECC_ERR_CNTr = 3021, + SE_ALG_ZGP1_ZBLK6_ECC_ERR_CNTr = 3022, + SE_ALG_ZGP1_ZBLK7_ECC_ERR_CNTr = 3023, + SE_ALG_ZGP2_ZBLK0_ECC_ERR_CNTr = 3024, + SE_ALG_ZGP2_ZBLK1_ECC_ERR_CNTr = 3025, + SE_ALG_ZGP2_ZBLK2_ECC_ERR_CNTr = 3026, + SE_ALG_ZGP2_ZBLK3_ECC_ERR_CNTr = 3027, + SE_ALG_ZGP2_ZBLK4_ECC_ERR_CNTr = 3028, + SE_ALG_ZGP2_ZBLK5_ECC_ERR_CNTr = 3029, + SE_ALG_ZGP2_ZBLK6_ECC_ERR_CNTr = 3030, + SE_ALG_ZGP2_ZBLK7_ECC_ERR_CNTr = 3031, + SE_ALG_ZGP3_ZBLK0_ECC_ERR_CNTr = 3032, + SE_ALG_ZGP3_ZBLK1_ECC_ERR_CNTr = 3033, + SE_ALG_ZGP3_ZBLK2_ECC_ERR_CNTr = 3034, + SE_ALG_ZGP3_ZBLK3_ECC_ERR_CNTr = 3035, + SE_ALG_ZGP3_ZBLK4_ECC_ERR_CNTr = 3036, + SE_ALG_ZGP3_ZBLK5_ECC_ERR_CNTr = 3037, + SE_ALG_ZGP3_ZBLK6_ECC_ERR_CNTr = 3038, + SE_ALG_ZGP3_ZBLK7_ECC_ERR_CNTr = 3039, + SE_ALG_ZCAM_HASH_P0_ERR_CNTr = 3040, + SE_ALG_ZCAM_HASH_P1_ERR_CNTr = 3041, + SE_ALG_ZCAM_HASH_P2_ERR_CNTr = 3042, + SE_ALG_ZCAM_HASH_P3_ERR_CNTr = 3043, + SE_ALG_ZCAM_LPM_ERR_CNTr = 3044, + SE_ALG_HASH0_SREQ_FIFO_PARITY_ERR_CNTr = 3045, + SE_ALG_HASH0_SREQ_FIFO_INT_CNTr = 3046, + SE_ALG_HASH0_KEY_FIFO_INT_CNTr = 3047, + SE_ALG_HASH0_INT_RSP_FIFO_PARITY_ERR_CNTr = 3048, + SE_ALG_HASH0_EXT_RSP_FIFO_PARITY_ERR_CNTr = 3049, + SE_ALG_HASH0_EXT_RSP_FIFO_INT_CNTr = 3050, + SE_ALG_HASH0_INT_RSP_FIFO_INT_CNTr = 3051, + SE_ALG_HASH1_SREQ_FIFO_PARITY_ERR_CNTr = 3052, + SE_ALG_HASH1_SREQ_FIFO_INT_CNTr = 3053, + SE_ALG_HASH1_KEY_FIFO_INT_CNTr = 3054, + SE_ALG_HASH1_INT_RSP_FIFO_PARITY_ERR_CNTr = 3055, + SE_ALG_HASH1_EXT_RSP_FIFO_PARITY_ERR_CNTr = 3056, + SE_ALG_HASH1_EXT_RSP_FIFO_INT_CNTr = 3057, + SE_ALG_HASH1_INT_RSP_FIFO_INT_CNTr = 3058, + SE_ALG_HASH2_SREQ_FIFO_PARITY_ERR_CNTr = 3059, + SE_ALG_HASH2_SREQ_FIFO_INT_CNTr = 3060, + SE_ALG_HASH2_KEY_FIFO_INT_CNTr = 3061, + SE_ALG_HASH2_INT_RSP_FIFO_PARITY_ERR_CNTr = 3062, + SE_ALG_HASH2_EXT_RSP_FIFO_PARITY_ERR_CNTr = 3063, + SE_ALG_HASH2_EXT_RSP_FIFO_INT_CNTr = 3064, + SE_ALG_HASH2_INT_RSP_FIFO_INT_CNTr = 3065, + SE_ALG_HASH3_SREQ_FIFO_PARITY_ERR_CNTr = 3066, + SE_ALG_HASH3_SREQ_FIFO_INT_CNTr = 3067, + SE_ALG_HASH3_KEY_FIFO_INT_CNTr = 3068, + SE_ALG_HASH3_INT_RSP_FIFO_PARITY_ERR_CNTr = 3069, + SE_ALG_HASH3_EXT_RSP_FIFO_PARITY_ERR_CNTr = 3070, + SE_ALG_HASH3_EXT_RSP_FIFO_INT_CNTr = 3071, + SE_ALG_HASH3_INT_RSP_FIFO_INT_CNTr = 3072, + SE_ALG_LPM_EXT_RSP_FIFO_INT_CNTr = 3073, + SE_ALG_LPM_EXT_V6_FIFO_INT_CNTr = 3074, + SE_ALG_LPM_EXT_V4_FIFO_INT_CNTr = 3075, + SE_ALG_LPM_EXT_ADDR_FIFO_INT_CNTr = 3076, + SE_ALG_LPM_EXT_V4_FIFO_PARITY_ERR_CNTr = 3077, + SE_ALG_LPM_EXT_V6_FIFO_PARITY_ERR_CNTr = 3078, + SE_ALG_LPM_EXT_RSP_FIFO_PARITY_ERR_CNTr = 3079, + SE_ALG_LPM_AS_REQ_FIFO_INT_CNTr = 3080, + SE_ALG_LPM_AS_INT_RSP_FIFO_INT_CNTr = 3081, + SE_ALG_SE_ALG_INT_STATUSr = 3082, + SE_ALG_SCHD_INT_ENr = 3083, + SE_ALG_SCHD_INT_MASKr = 3084, + SE_ALG_SCHD_INT_STATUSr = 3085, + SE_ALG_ZBLK_ECC_INT_ENr = 3086, + SE_ALG_ZBLK_ECC_INT_MASKr = 3087, + SE_ALG_ZBLK_ECC_INT_STATUSr = 3088, + SE_ALG_HASH0_INT_ENr = 3089, + SE_ALG_HASH0_INT_MASKr = 3090, + SE_ALG_HASH0_INT_STATUSr = 3091, + SE_ALG_HASH1_INT_ENr = 3092, + SE_ALG_HASH1_INT_MASKr = 3093, + SE_ALG_HASH1_INT_STATUSr = 3094, + SE_ALG_HASH2_INT_ENr = 3095, + SE_ALG_HASH2_INT_MASKr = 3096, + SE_ALG_HASH2_INT_STATUSr = 3097, + SE_ALG_HASH3_INT_ENr = 3098, + SE_ALG_HASH3_INT_MASKr = 3099, + SE_ALG_HASH3_INT_STATUSr = 3100, + SE_ALG_LPM_INT_ENr = 3101, + SE_ALG_LPM_INT_MASKr = 3102, + SE_ALG_LPM_INT_STATUSr = 3103, + SE_ALG_ZBLOCK_LPM_MASK0r = 3104, + SE_ALG_ZBLOCK_LPM_MASK1r = 3105, + SE_ALG_ZBLOCK_LPM_MASK2r = 3106, + SE_ALG_ZBLOCK_LPM_MASK3r = 3107, + SE_ALG_ZBLOCK_DEFAULT_ROUTE0r = 3108, + SE_ALG_ZBLOCK_DEFAULT_ROUTE1r = 3109, + SE_ALG_ZBLOCK_DEFAULT_ROUTE2r = 3110, + SE_ALG_ZBLOCK_DEFAULT_ROUTE3r = 3111, + SE_ALG_ZBLOCK_DEFAULT_ROUTE4r = 3112, + SE_ALG_ZBLOCK_DEFAULT_ROUTE5r = 3113, + SE_ALG_ZBLOCK_DEFAULT_ROUTE6r = 3114, + SE_ALG_ZBLOCK_DEFAULT_ROUTE7r = 3115, + SE_ALG_ZBLOCK_HASH_LISTTABLE_ITEM0r = 3116, + SE_ALG_ZBLOCK_HASH_LISTTABLE_ITEM1r = 3117, + SE_ALG_ZBLOCK_HASH_LISTTABLE_ITEM2r = 3118, + SE_ALG_ZBLOCK_HASH_LISTTABLE_ITEM3r = 3119, + SE_ALG_ZBLOCK_ECC_ERR_STATUSr = 3120, + SE_ALG_ZBLOCK_LPM_V6_SRAM_CMPr = 3121, + SE_ALG_ZBLOCK_LPM_V4_SRAM_CMPr = 3122, + SE_PARSER_KSCHD_PFUL_CFGr = 3123, + SE_PARSER_DEBUG_CNT_MODEr = 3124, + SE_PARSER_PARSER_INT_ENr = 3125, + SE_PARSER_PARSER_INT_MASKr = 3126, + SE_PARSER_PARSER_INT_STATUSr = 3127, + SE_PARSER_PARSER_INT_UNMASK_FLAGr = 3128, + SE_PARSER_ECC_BYPASS_READr = 3129, + SE_PARSER_MEX0_5_REQ_CNTr = 3130, + SE_PARSER_KSCHD_REQ0_5_CNTr = 3131, + SE_PARSER_KSCHD_PARSER_FC0_5_CNTr = 3132, + SE_PARSER_SE_PPU_MEX0_5_FC_CNTr = 3133, + SE_PARSER_SMMU0_MARC_FC_CNTr = 3134, + SE_PARSER_SMMU0_MARC_KEY_CNTr = 3135, + SE_PARSER_CMMU_KEY_CNTr = 3136, + SE_PARSER_CMMU_PARSER_FC_CNTr = 3137, + SE_PARSER_MARC_TAB_TYPE_ERR_MEX0_5_CNTr = 3138, + SE_PARSER_ERAM_FULLADDR_DROP_CNTr = 3139, + SE_AS_HASH0_PFUL_CFGr = 3140, + SE_AS_HASH1_PFUL_CFGr = 3141, + SE_AS_HASH2_PFUL_CFGr = 3142, + SE_AS_HASH3_PFUL_CFGr = 3143, + SE_AS_PBU_PFUL_CFGr = 3144, + SE_AS_LPM_PFUL_CFGr = 3145, + SE_AS_ETCAM_PFUL_CFGr = 3146, + SE_AS_AS_LEARN0_FIFO_CFGr = 3147, + SE_AS_AS_LEARN1_FIFO_CFGr = 3148, + SE_AS_AS_DMA_FIFO_CFGr = 3149, + SE_AS_AGE_PFUL_CFGr = 3150, + SE_AS_ETCAM_RSP_CFGr = 3151, + SE_AS_PBU_ECC_BYPASS_READr = 3152, + SE_AS_ETCAM0_ECC_BYPASS_READr = 3153, + SE_AS_ETCAM1_ECC_BYPASS_READr = 3154, + SE_AS_LPM_ECC_BYPASS_READr = 3155, + SE_AS_HASH_ECC_BYPASS_READr = 3156, + SE_AS_HASH_LEARN_ECC_BYPASS_READr = 3157, + SE_AS_DEBUG_CNT_MODEr = 3158, + SE_AS_AS_INT_0_ENr = 3159, + SE_AS_AS_INT_0_MASKr = 3160, + SE_AS_AS_INT_1_ENr = 3161, + SE_AS_AS_INT_1_MASKr = 3162, + SE_AS_AS_INT_2_ENr = 3163, + SE_AS_AS_INT_2_MASKr = 3164, + SE_AS_AS_INT_0_STATUSr = 3165, + SE_AS_AS_INT_1_STATUSr = 3166, + SE_AS_AS_INT_2_STATUSr = 3167, + SE_AS_SE_AS_INT_STATUSr = 3168, + SE_AS_HASH0_3_WR_REQ_CNTr = 3169, + SE_AS_SMMU0_ETCAM0_1_FC_CNTr = 3170, + SE_AS_ETCAM0_1_SMMU0_REQ_CNTr = 3171, + SE_AS_SMMU0_ETCAM0_1_RSP_CNTr = 3172, + SE_AS_AS_HLA_HASH_P0_3_KEY_CNTr = 3173, + SE_AS_AS_HLA_LPM_P0_KEY_CNTr = 3174, + SE_AS_ALG_AS_HASH_P0_3_RSP_CNTr = 3175, + SE_AS_ALG_AS_HASH_P0_3_SMF_RSP_CNTr = 3176, + SE_AS_ALG_AS_LPM_P0_RSP_CNTr = 3177, + SE_AS_ALG_AS_LPM_P0_3_SMF_RSP_CNTr = 3178, + SE_AS_AS_PBU_KEY_CNTr = 3179, + SE_AS_PBU_SE_DPI_RSP_DAT_CNTr = 3180, + SE_AS_AS_ETCAM_CTRL_REQ0_CNTr = 3181, + SE_AS_ETCAM_CTRL_AS_INDEX0_1_CNTr = 3182, + SE_AS_ETCAM_CTRL_AS_HIT0_1_CNTr = 3183, + SE_AS_AS_SMMU0_REQ_CNTr = 3184, + SE_AS_LEARN_HLA_WR_CNTr = 3185, + SE_AS_AS_SMMU1_REQ_CNTr = 3186, + SE_AS_SE_CFG_MAC_DAT_CNTr = 3187, + SE_AS_ALG_AS_HASH_P0_3_FC_CNTr = 3188, + SE_AS_ALG_AS_LPM_P0_FC_CNTr = 3189, + SE_AS_AS_ALG_HASH_P0_3_FC_CNTr = 3190, + SE_AS_AS_ALG_LPM_P0_FC_CNTr = 3191, + SE_AS_AS_PBU_FC_CNTr = 3192, + SE_AS_PBU_SE_DPI_KEY_FC_CNTr = 3193, + SE_AS_AS_ETCAM_CTRL_FC0_1_CNTr = 3194, + SE_AS_ETCAM_CTRL_AS_FC0_1_CNTr = 3195, + SE_AS_SMMU0_AS_MAC_AGE_FC_CNTr = 3196, + SE_AS_ALG_LEARN_FC_CNTr = 3197, + SE_AS_SMMU1_AS_FC_CNTr = 3198, + SE_AS_CFG_SE_MAC_FC_CNTr = 3199, + SE_KSCHD_KSCHD_CPU_RDYr = 3200, + SE_KSCHD_PPU0_ECC_BYPASS_READr = 3201, + SE_KSCHD_PBU_ECC_BYPASS_READr = 3202, + SE_KSCHD_SMMU1_ECC_BYPASS_READr = 3203, + SE_KSCHD_ASS_ECC_BYPASS_READr = 3204, + SE_KSCHD_SDT_Hr = 3205, + SE_KSCHD_SDT_Lr = 3206, + SE_KSCHD_HASH_KEY15r = 3207, + SE_KSCHD_HASH_KEY14r = 3208, + SE_KSCHD_HASH_KEY13r = 3209, + SE_KSCHD_HASH_KEY12r = 3210, + SE_KSCHD_HASH_KEY11r = 3211, + SE_KSCHD_HASH_KEY10r = 3212, + SE_KSCHD_HASH_KEY9r = 3213, + SE_KSCHD_HASH_KEY8r = 3214, + SE_KSCHD_HASH_KEY7r = 3215, + SE_KSCHD_HASH_KEY6r = 3216, + SE_KSCHD_HASH_KEY5r = 3217, + SE_KSCHD_HASH_KEY4r = 3218, + SE_KSCHD_HASH_KEY3r = 3219, + SE_KSCHD_HASH_KEY2r = 3220, + SE_KSCHD_HASH_KEY1r = 3221, + SE_KSCHD_HASH_KEY0r = 3222, + SE_KSCHD_SCHD_INT_0_ENr = 3223, + SE_KSCHD_SCHD_INT_0_MASKr = 3224, + SE_KSCHD_SCHD_INT_1_ENr = 3225, + SE_KSCHD_SCHD_INT_1_MASKr = 3226, + SE_KSCHD_SCHD_INT_2_ENr = 3227, + SE_KSCHD_SCHD_INT_2_MASKr = 3228, + SE_KSCHD_SCHD_INT_3_ENr = 3229, + SE_KSCHD_SCHD_INT_3_MASKr = 3230, + SE_KSCHD_SCHD_INT_4_ENr = 3231, + SE_KSCHD_SCHD_INT_4_MASKr = 3232, + SE_KSCHD_SCHD_INT_0_STATUSr = 3233, + SE_KSCHD_SCHD_INT_1_STATUSr = 3234, + SE_KSCHD_SCHD_INT_2_STATUSr = 3235, + SE_KSCHD_SCHD_INT_3_STATUSr = 3236, + SE_KSCHD_SCHD_INT_4_STATUSr = 3237, + SE_KSCHD_SE_KSCHD_INT_STATUSr = 3238, + SE_KSCHD_DEBUG_CNT_MODEr = 3239, + SE_KSCHD_SE_PARSER_KSCHD_KEY0_3_CNTr = 3240, + SE_KSCHD_SE_SMMU1_KEY0_3_CNTr = 3241, + SE_KSCHD_KSCHD_AS_KEY0_CNTr = 3242, + SE_KSCHD_KSCHD_AS_KEY1_CNTr = 3243, + SE_KSCHD_KSCHD_AS_KEY2_CNTr = 3244, + SE_KSCHD_KSCHD_AS_KEY3_CNTr = 3245, + SE_KSCHD_KSCHD_AS_KEY4_CNTr = 3246, + SE_KSCHD_KSCHD_AS_KEY5_CNTr = 3247, + SE_KSCHD_KSCHD_AS_KEY6_CNTr = 3248, + SE_KSCHD_KSCHD_AS_KEY9_CNTr = 3249, + SE_KSCHD_KSCHD_SE_PARSER_FC0_3_CNTr = 3250, + SE_KSCHD_SMMU1_SE_FC0_3_CNTr = 3251, + SE_KSCHD_AS_KSCHD_FC_CNT0r = 3252, + SE_KSCHD_AS_KSCHD_FC_CNT1r = 3253, + SE_KSCHD_AS_KSCHD_FC_CNT2r = 3254, + SE_KSCHD_AS_KSCHD_FC_CNT3r = 3255, + SE_KSCHD_AS_KSCHD_FC_CNT4r = 3256, + SE_KSCHD_AS_KSCHD_FC_CNT5r = 3257, + SE_KSCHD_AS_KSCHD_FC_CNT6r = 3258, + SE_KSCHD_AS_KSCHD_FC_CNT9r = 3259, + SE_RSCHD_RSCHD_HASH_PFUL_CFGr = 3260, + SE_RSCHD_RSCHD_HASH_EPT_CFGr = 3261, + SE_RSCHD_RSCHD_PBU_PFUL_CFGr = 3262, + SE_RSCHD_RSCHD_PBU_EPT_CFGr = 3263, + SE_RSCHD_RSCHD_LPM_PFUL_CFGr = 3264, + SE_RSCHD_RSCHD_LPM_EPT_CFGr = 3265, + SE_RSCHD_RSCHD_ETCAM_PFUL_CFGr = 3266, + SE_RSCHD_RSCHD_ETCAM_EPT_CFGr = 3267, + SE_RSCHD_SMMU0_WB_PFUL_CFGr = 3268, + SE_RSCHD_SMMU0_WB_EPT_CFGr = 3269, + SE_RSCHD_SMMU1_WB_PFUL_CFGr = 3270, + SE_RSCHD_SMMU1_WB_EPT_CFGr = 3271, + SE_RSCHD_ALG_WB_PFUL_CFGr = 3272, + SE_RSCHD_ALG_WB_EPT_CFGr = 3273, + SE_RSCHD_WR_RSP_VLD_ENr = 3274, + SE_RSCHD_NPPU_WB_PFUL_CFGr = 3275, + SE_RSCHD_NPPU_WB_EPT_CFGr = 3276, + SE_RSCHD_PORT0_INT_ENr = 3277, + SE_RSCHD_PORT0_INT_MASKr = 3278, + SE_RSCHD_PORT1_INT_ENr = 3279, + SE_RSCHD_PORT1_INT_MASKr = 3280, + SE_RSCHD_PORT0_INT_STATUSr = 3281, + SE_RSCHD_PORT1_INT_STATUSr = 3282, + SE_RSCHD_SE_RSCHD_INT_STATUSr = 3283, + SE_RSCHD_DEBUG_CNT_MODEr = 3284, + SE_RSCHD_SE_PPU_MEX0_5_RSP1_CNTr = 3285, + SE_RSCHD_AS_RSCHD_RSP0_CNTr = 3286, + SE_RSCHD_AS_RSCHD_RSP1_CNTr = 3287, + SE_RSCHD_AS_RSCHD_RSP2_CNTr = 3288, + SE_RSCHD_AS_RSCHD_RSP3_CNTr = 3289, + SE_RSCHD_AS_RSCHD_RSP4_CNTr = 3290, + SE_RSCHD_AS_RSCHD_RSP5_CNTr = 3291, + SE_RSCHD_AS_RSCHD_RSP6_CNTr = 3292, + SE_RSCHD_AS_RSCHD_RSP9_CNTr = 3293, + SE_RSCHD_SMMU1_SE_RSP0_3_CNTr = 3294, + SE_RSCHD_PPU_SE_MEX0_3_FC_CNTr = 3295, + SE_RSCHD_RSCHD_AS_FC_CNT0r = 3296, + SE_RSCHD_RSCHD_AS_FC_CNT1r = 3297, + SE_RSCHD_RSCHD_AS_FC_CNT2r = 3298, + SE_RSCHD_RSCHD_AS_FC_CNT3r = 3299, + SE_RSCHD_RSCHD_AS_FC_CNT4r = 3300, + SE_RSCHD_RSCHD_AS_FC_CNT5r = 3301, + SE_RSCHD_RSCHD_AS_FC_CNT6r = 3302, + SE_RSCHD_RSCHD_AS_FC_CNT9r = 3303, + SE_RSCHD_SE_SMMU1_FC0_3_CNTr = 3304, + SE_RSCHD_SMMU0_SE_WR_DONE_CNTr = 3305, + SE_RSCHD_SE_SMMU0_WR_DONE_FC_CNTr = 3306, + SE_RSCHD_SMMU1_SE_WR_RSP_CNTr = 3307, + SE_RSCHD_SE_SMMU1_WR_RSP_FC_CNTr = 3308, + SE_RSCHD_ALG_SE_WR_RSP_CNTr = 3309, + SE_RSCHD_SE_ALG_WR_RSP_FC_CNTr = 3310, + SMMU0_SMMU0_KSCHD_PFUL_CFG0r = 3311, + SMMU0_SMMU0_KSCHD_PFUL_CFG1r = 3312, + SMMU0_SMMU0_CTRL_PFUL1_CFGr = 3313, + SMMU0_SMMU0_CTRL_PFUL2_CFGr = 3314, + SMMU0_SMMU0_CTRL_PFUL3_CFGr = 3315, + SMMU0_SMMU0_RSCHD_PFUL_CFGr = 3316, + SMMU0_SMMU0_RSCHD_EPT_CFGr = 3317, + SMMU0_SMMU0_ALUCMD_PFUL_CFGr = 3318, + SMMU0_SMMU0_ALUWR_PFUL_CFGr = 3319, + SMMU0_SMMU0_WR_ARB_PFUL_CFG0r = 3320, + SMMU0_SMMU0_WR_ARB_PFUL_CFG1r = 3321, + SMMU0_SMMU0_ORD_PFUL_CFGr = 3322, + SMMU0_SMMU0_CFG_DMA_BADDRr = 3323, + SMMU0_SMMU0_CFG_ODMA0_BADDRr = 3324, + SMMU0_SMMU0_CFG_ODMA1_BADDRr = 3325, + SMMU0_SMMU0_CFG_ODMA2_BADDRr = 3326, + SMMU0_SMMU0_CFG_ODMA_TDM_BADDRr = 3327, + SMMU0_SMMU0_CFG_MCAST_BADDRr = 3328, + SMMU0_SMMU0_CFG_LPM0r = 3329, + SMMU0_SMMU0_CFG_LPM1r = 3330, + SMMU0_SMMU0_CFG_LPM2r = 3331, + SMMU0_SMMU0_CFG_LPM3r = 3332, + SMMU0_SMMU0_CFG_LPM4r = 3333, + SMMU0_SMMU0_CFG_LPM5r = 3334, + SMMU0_SMMU0_CFG_LPM6r = 3335, + SMMU0_SMMU0_CFG_LPM7r = 3336, + SMMU0_SMMU0_DEBUG_CNT_MODEr = 3337, + SMMU0_SMMU0_STAT_OVERFLOW_MODEr = 3338, + SMMU0_SMMU0_INIT_EN_CFG_TMPr = 3339, + SMMU0_SMMU0_SMMU0_INT_UNMASK_FLAGr = 3340, + SMMU0_SMMU0_SMMU0_INT0_ENr = 3341, + SMMU0_SMMU0_SMMU0_INT0_MASKr = 3342, + SMMU0_SMMU0_SMMU0_INT0_STATUSr = 3343, + SMMU0_SMMU0_SMMU0_INT1_ENr = 3344, + SMMU0_SMMU0_SMMU0_INT1_MASKr = 3345, + SMMU0_SMMU0_SMMU0_INT1_STATUSr = 3346, + SMMU0_SMMU0_SMMU0_INT2_ENr = 3347, + SMMU0_SMMU0_SMMU0_INT2_MASKr = 3348, + SMMU0_SMMU0_SMMU0_INT2_STATUSr = 3349, + SMMU0_SMMU0_SMMU0_INT3_ENr = 3350, + SMMU0_SMMU0_SMMU0_INT3_MASKr = 3351, + SMMU0_SMMU0_SMMU0_INT3_STATUSr = 3352, + SMMU0_SMMU0_SMMU0_INT4_ENr = 3353, + SMMU0_SMMU0_SMMU0_INT4_MASKr = 3354, + SMMU0_SMMU0_SMMU0_INT4_STATUSr = 3355, + SMMU0_SMMU0_SMMU0_INT5_ENr = 3356, + SMMU0_SMMU0_SMMU0_INT5_MASKr = 3357, + SMMU0_SMMU0_SMMU0_INT5_STATUSr = 3358, + SMMU0_SMMU0_SMMU0_INT6_ENr = 3359, + SMMU0_SMMU0_SMMU0_INT6_MASKr = 3360, + SMMU0_SMMU0_SMMU0_INT6_STATUSr = 3361, + SMMU0_SMMU0_SMMU0_INT7_ENr = 3362, + SMMU0_SMMU0_SMMU0_INT7_MASKr = 3363, + SMMU0_SMMU0_SMMU0_INT7_STATUSr = 3364, + SMMU0_SMMU0_SMMU0_INT8_ENr = 3365, + SMMU0_SMMU0_SMMU0_INT8_MASKr = 3366, + SMMU0_SMMU0_SMMU0_INT8_STATUSr = 3367, + SMMU0_SMMU0_SMMU0_INT9_ENr = 3368, + SMMU0_SMMU0_SMMU0_INT9_MASKr = 3369, + SMMU0_SMMU0_SMMU0_INT9_STATUSr = 3370, + SMMU0_SMMU0_SMMU0_INT10_ENr = 3371, + SMMU0_SMMU0_SMMU0_INT10_MASKr = 3372, + SMMU0_SMMU0_SMMU0_INT10_STATUSr = 3373, + SMMU0_SMMU0_SMMU0_INT11_ENr = 3374, + SMMU0_SMMU0_SMMU0_INT11_MASKr = 3375, + SMMU0_SMMU0_SMMU0_INT11_STATUSr = 3376, + SMMU0_SMMU0_SMMU0_INT12_ENr = 3377, + SMMU0_SMMU0_SMMU0_INT12_MASKr = 3378, + SMMU0_SMMU0_SMMU0_INT12_STATUSr = 3379, + SMMU0_SMMU0_SMMU0_INT13_ENr = 3380, + SMMU0_SMMU0_SMMU0_INT13_MASKr = 3381, + SMMU0_SMMU0_SMMU0_INT13_STATUSr = 3382, + SMMU0_SMMU0_SMMU0_INT14_ENr = 3383, + SMMU0_SMMU0_SMMU0_INT14_MASKr = 3384, + SMMU0_SMMU0_SMMU0_INT14_STATUSr = 3385, + SMMU0_SMMU0_SMMU0_ECC_UNMASK_FLAGr = 3386, + SMMU0_SMMU0_SMMU0_INT15_ENr = 3387, + SMMU0_SMMU0_SMMU0_INT15_MASKr = 3388, + SMMU0_SMMU0_SMMU0_INT15_STATUSr = 3389, + SMMU0_SMMU0_SMMU0_INT16_ENr = 3390, + SMMU0_SMMU0_SMMU0_INT16_MASKr = 3391, + SMMU0_SMMU0_SMMU0_INT16_STATUSr = 3392, + SMMU0_SMMU0_SMMU0_INT17_ENr = 3393, + SMMU0_SMMU0_SMMU0_INT17_MASKr = 3394, + SMMU0_SMMU0_SMMU0_INT17_STATUSr = 3395, + SMMU0_SMMU0_SMMU0_INT18_ENr = 3396, + SMMU0_SMMU0_SMMU0_INT18_MASKr = 3397, + SMMU0_SMMU0_SMMU0_INT18_STATUSr = 3398, + SMMU0_SMMU0_SMMU0_INT19_ENr = 3399, + SMMU0_SMMU0_SMMU0_INT19_MASKr = 3400, + SMMU0_SMMU0_SMMU0_INT19_STATUSr = 3401, + SMMU0_SMMU0_SMMU0_INT20_ENr = 3402, + SMMU0_SMMU0_SMMU0_INT20_MASKr = 3403, + SMMU0_SMMU0_SMMU0_INT20_STATUSr = 3404, + SMMU0_SMMU0_SMMU0_INT21_ENr = 3405, + SMMU0_SMMU0_SMMU0_INT21_MASKr = 3406, + SMMU0_SMMU0_SMMU0_INT21_STATUSr = 3407, + SMMU0_SMMU0_SMMU0_INT22_ENr = 3408, + SMMU0_SMMU0_SMMU0_INT22_MASKr = 3409, + SMMU0_SMMU0_SMMU0_INT22_STATUSr = 3410, + SMMU0_SMMU0_SMMU0_INT23_ENr = 3411, + SMMU0_SMMU0_SMMU0_INT23_MASKr = 3412, + SMMU0_SMMU0_SMMU0_INT23_STATUSr = 3413, + SMMU0_SMMU0_SMMU0_INT24_ENr = 3414, + SMMU0_SMMU0_SMMU0_INT24_MASKr = 3415, + SMMU0_SMMU0_SMMU0_INT24_STATUSr = 3416, + SMMU0_SMMU0_SMMU0_INT25_ENr = 3417, + SMMU0_SMMU0_SMMU0_INT25_MASKr = 3418, + SMMU0_SMMU0_SMMU0_INT25_STATUSr = 3419, + SMMU0_SMMU0_SMMU0_INT26_ENr = 3420, + SMMU0_SMMU0_SMMU0_INT26_MASKr = 3421, + SMMU0_SMMU0_SMMU0_INT26_STATUSr = 3422, + SMMU0_SMMU0_SMMU0_INT27_ENr = 3423, + SMMU0_SMMU0_SMMU0_INT27_MASKr = 3424, + SMMU0_SMMU0_SMMU0_INT27_STATUSr = 3425, + SMMU0_SMMU0_SMMU0_INT28_ENr = 3426, + SMMU0_SMMU0_SMMU0_INT28_MASKr = 3427, + SMMU0_SMMU0_SMMU0_INT28_STATUSr = 3428, + SMMU0_SMMU0_SMMU0_INT29_ENr = 3429, + SMMU0_SMMU0_SMMU0_INT29_MASKr = 3430, + SMMU0_SMMU0_SMMU0_INT29_STATUSr = 3431, + SMMU0_SMMU0_SMMU0_INT30_ENr = 3432, + SMMU0_SMMU0_SMMU0_INT30_MASKr = 3433, + SMMU0_SMMU0_SMMU0_INT30_STATUSr = 3434, + SMMU0_SMMU0_SMMU0_INT31_ENr = 3435, + SMMU0_SMMU0_SMMU0_INT31_MASKr = 3436, + SMMU0_SMMU0_SMMU0_INT31_STATUSr = 3437, + SMMU0_SMMU0_SMMU0_INT32_ENr = 3438, + SMMU0_SMMU0_SMMU0_INT32_MASKr = 3439, + SMMU0_SMMU0_SMMU0_INT32_STATUSr = 3440, + SMMU0_SMMU0_SMMU0_INT33_ENr = 3441, + SMMU0_SMMU0_SMMU0_INT33_MASKr = 3442, + SMMU0_SMMU0_SMMU0_INT33_STATUSr = 3443, + SMMU0_SMMU0_SMMU0_INT34_ENr = 3444, + SMMU0_SMMU0_SMMU0_INT34_MASKr = 3445, + SMMU0_SMMU0_SMMU0_INT34_STATUSr = 3446, + SMMU0_SMMU0_SMMU0_INT35_ENr = 3447, + SMMU0_SMMU0_SMMU0_INT35_MASKr = 3448, + SMMU0_SMMU0_SMMU0_INT35_STATUSr = 3449, + SMMU0_SMMU0_SMMU0_INT36_ENr = 3450, + SMMU0_SMMU0_SMMU0_INT36_MASKr = 3451, + SMMU0_SMMU0_SMMU0_INT36_STATUSr = 3452, + SMMU0_SMMU0_SMMU0_INT37_ENr = 3453, + SMMU0_SMMU0_SMMU0_INT37_MASKr = 3454, + SMMU0_SMMU0_SMMU0_INT37_STATUSr = 3455, + SMMU0_SMMU0_SMMU0_INT38_ENr = 3456, + SMMU0_SMMU0_SMMU0_INT38_MASKr = 3457, + SMMU0_SMMU0_SMMU0_INT38_STATUSr = 3458, + SMMU0_SMMU0_SMMU0_INT39_ENr = 3459, + SMMU0_SMMU0_SMMU0_INT39_MASKr = 3460, + SMMU0_SMMU0_SMMU0_INT39_STATUSr = 3461, + SMMU0_SMMU0_SMMU0_INT40_ENr = 3462, + SMMU0_SMMU0_SMMU0_INT40_MASKr = 3463, + SMMU0_SMMU0_SMMU0_INT40_STATUSr = 3464, + SMMU0_SMMU0_SMMU0_INT41_ENr = 3465, + SMMU0_SMMU0_SMMU0_INT41_MASKr = 3466, + SMMU0_SMMU0_SMMU0_INT41_STATUSr = 3467, + SMMU0_SMMU0_SMMU0_INT42_ENr = 3468, + SMMU0_SMMU0_SMMU0_INT42_MASKr = 3469, + SMMU0_SMMU0_SMMU0_INT42_STATUSr = 3470, + SMMU0_SMMU0_SMMU0_INT43_ENr = 3471, + SMMU0_SMMU0_SMMU0_INT43_MASKr = 3472, + SMMU0_SMMU0_SMMU0_INT43_STATUSr = 3473, + SMMU0_SMMU0_SMMU0_INT44_ENr = 3474, + SMMU0_SMMU0_SMMU0_INT44_MASKr = 3475, + SMMU0_SMMU0_SMMU0_INT44_STATUSr = 3476, + SMMU0_SMMU0_SMMU0_INT45_ENr = 3477, + SMMU0_SMMU0_SMMU0_INT45_MASKr = 3478, + SMMU0_SMMU0_SMMU0_INT45_STATUSr = 3479, + SMMU0_SMMU0_SMMU0_INT46_ENr = 3480, + SMMU0_SMMU0_SMMU0_INT46_MASKr = 3481, + SMMU0_SMMU0_SMMU0_INT46_STATUSr = 3482, + SMMU0_SMMU0_SMMU0_INT47_ENr = 3483, + SMMU0_SMMU0_SMMU0_INT47_MASKr = 3484, + SMMU0_SMMU0_SMMU0_INT47_STATUSr = 3485, + SMMU0_SMMU0_SMMU0_INT48_ENr = 3486, + SMMU0_SMMU0_SMMU0_INT48_MASKr = 3487, + SMMU0_SMMU0_SMMU0_INT48_STATUSr = 3488, + SMMU0_SMMU0_SMMU0_INT49_ENr = 3489, + SMMU0_SMMU0_SMMU0_INT49_MASKr = 3490, + SMMU0_SMMU0_SMMU0_INT49_STATUSr = 3491, + SMMU0_SMMU0_SMMU0_INT50_ENr = 3492, + SMMU0_SMMU0_SMMU0_INT50_MASKr = 3493, + SMMU0_SMMU0_SMMU0_INT50_STATUSr = 3494, + SMMU0_SMMU0_SMMU0_INT51_ENr = 3495, + SMMU0_SMMU0_SMMU0_INT51_MASKr = 3496, + SMMU0_SMMU0_SMMU0_INT51_STATUSr = 3497, + SMMU0_SMMU0_SMMU0_INT52_ENr = 3498, + SMMU0_SMMU0_SMMU0_INT52_MASKr = 3499, + SMMU0_SMMU0_SMMU0_INT52_STATUSr = 3500, + SMMU0_SMMU0_SMMU0_INT53_ENr = 3501, + SMMU0_SMMU0_SMMU0_INT53_MASKr = 3502, + SMMU0_SMMU0_SMMU0_INT53_STATUSr = 3503, + SMMU0_SMMU0_CTRL0_ARBITER_ECC_BYPASSr = 3504, + SMMU0_SMMU0_CTRL2_ARBITER_ECC_BYPASSr = 3505, + SMMU0_SMMU0_CTRL4_ARBITER_ECC_BYPASSr = 3506, + SMMU0_SMMU0_CTRL6_ARBITER_ECC_BYPASSr = 3507, + SMMU0_SMMU0_CTRL8_ARBITER_ECC_BYPASSr = 3508, + SMMU0_SMMU0_CTRL10_ARBITER_ECC_BYPASSr = 3509, + SMMU0_SMMU0_CTRL12_ARBITER_ECC_BYPASSr = 3510, + SMMU0_SMMU0_CTRL14_ARBITER_ECC_BYPASSr = 3511, + SMMU0_SMMU0_CTRL16_ARBITER_ECC_BYPASSr = 3512, + SMMU0_SMMU0_CTRL18_ARBITER_ECC_BYPASSr = 3513, + SMMU0_SMMU0_CTRL20_ARBITER_ECC_BYPASSr = 3514, + SMMU0_SMMU0_CTRL22_ARBITER_ECC_BYPASSr = 3515, + SMMU0_SMMU0_CTRL24_ARBITER_ECC_BYPASSr = 3516, + SMMU0_SMMU0_CTRL26_ARBITER_ECC_BYPASSr = 3517, + SMMU0_SMMU0_CTRL28_ARBITER_ECC_BYPASSr = 3518, + SMMU0_SMMU0_CTRL30_ARBITER_ECC_BYPASSr = 3519, + SMMU0_SMMU0_CTRL_REQ_ECC_BYPASSr = 3520, + SMMU0_SMMU0_CTRL_INFO_ECC_BYPASSr = 3521, + SMMU0_SMMU0_SMMU0_RSCHD_ECC_BYPASSr = 3522, + SMMU0_SMMU0_SMMU0_WR_ECC_BYPASSr = 3523, + SMMU0_SMMU0_CTRL0_ARBITER_ECC_ERRr = 3524, + SMMU0_SMMU0_CTRL1_ARBITER_ECC_ERRr = 3525, + SMMU0_SMMU0_CTRL2_ARBITER_ECC_ERRr = 3526, + SMMU0_SMMU0_CTRL3_ARBITER_ECC_ERRr = 3527, + SMMU0_SMMU0_CTRL4_ARBITER_ECC_ERRr = 3528, + SMMU0_SMMU0_CTRL5_ARBITER_ECC_ERRr = 3529, + SMMU0_SMMU0_CTRL6_ARBITER_ECC_ERRr = 3530, + SMMU0_SMMU0_CTRL7_ARBITER_ECC_ERRr = 3531, + SMMU0_SMMU0_CTRL8_ARBITER_ECC_ERRr = 3532, + SMMU0_SMMU0_CTRL9_ARBITER_ECC_ERRr = 3533, + SMMU0_SMMU0_CTRL10_ARBITER_ECC_ERRr = 3534, + SMMU0_SMMU0_CTRL11_ARBITER_ECC_ERRr = 3535, + SMMU0_SMMU0_CTRL12_ARBITER_ECC_ERRr = 3536, + SMMU0_SMMU0_CTRL13_ARBITER_ECC_ERRr = 3537, + SMMU0_SMMU0_CTRL14_ARBITER_ECC_ERRr = 3538, + SMMU0_SMMU0_CTRL15_ARBITER_ECC_ERRr = 3539, + SMMU0_SMMU0_CTRL16_ARBITER_ECC_ERRr = 3540, + SMMU0_SMMU0_CTRL17_ARBITER_ECC_ERRr = 3541, + SMMU0_SMMU0_CTRL18_ARBITER_ECC_ERRr = 3542, + SMMU0_SMMU0_CTRL19_ARBITER_ECC_ERRr = 3543, + SMMU0_SMMU0_CTRL20_ARBITER_ECC_ERRr = 3544, + SMMU0_SMMU0_CTRL21_ARBITER_ECC_ERRr = 3545, + SMMU0_SMMU0_CTRL22_ARBITER_ECC_ERRr = 3546, + SMMU0_SMMU0_CTRL23_ARBITER_ECC_ERRr = 3547, + SMMU0_SMMU0_CTRL24_ARBITER_ECC_ERRr = 3548, + SMMU0_SMMU0_CTRL25_ARBITER_ECC_ERRr = 3549, + SMMU0_SMMU0_CTRL26_ARBITER_ECC_ERRr = 3550, + SMMU0_SMMU0_CTRL27_ARBITER_ECC_ERRr = 3551, + SMMU0_SMMU0_CTRL28_ARBITER_ECC_ERRr = 3552, + SMMU0_SMMU0_CTRL29_ARBITER_ECC_ERRr = 3553, + SMMU0_SMMU0_CTRL30_ARBITER_ECC_ERRr = 3554, + SMMU0_SMMU0_CTRL31_ARBITER_ECC_ERRr = 3555, + SMMU0_SMMU0_CTRL_REQ_ECC_SINGLE_ERRr = 3556, + SMMU0_SMMU0_CTRL_REQ_ECC_DOUBLE_ERRr = 3557, + SMMU0_SMMU0_CTRL_INFO_ECC_SINGLE_ERRr = 3558, + SMMU0_SMMU0_CTRL_INFO_ECC_DOUBLE_ERRr = 3559, + SMMU0_SMMU0_SMMU0_WR_ECC_ERRr = 3560, + SMMU0_SMMU0_SMMU0_RSCHD_ECC_SINGLE_ERRr = 3561, + SMMU0_SMMU0_SMMU0_RSCHD_ECC_DOUBLE_ERRr = 3562, + SMMU0_SMMU0_ORD_FIFO_EMPTYr = 3563, + SMMU0_SMMU0_WR_ARB_FIFO_EMPTYr = 3564, + SMMU0_SMMU0_CTRL_FIFO_EMPTY0r = 3565, + SMMU0_SMMU0_CTRL_FIFO_EMPTY1r = 3566, + SMMU0_SMMU0_CTRL_FIFO_EMPTY2r = 3567, + SMMU0_SMMU0_CTRL_FIFO_EMPTY3r = 3568, + SMMU0_SMMU0_CTRL_FIFO_EMPTY4r = 3569, + SMMU0_SMMU0_CTRL_FIFO_EMPTY5r = 3570, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY0r = 3571, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY1r = 3572, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY2r = 3573, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY3r = 3574, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY4r = 3575, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY5r = 3576, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY6r = 3577, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY7r = 3578, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY8r = 3579, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY9r = 3580, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY10r = 3581, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY11r = 3582, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY12r = 3583, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY13r = 3584, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY14r = 3585, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY15r = 3586, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY16r = 3587, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY17r = 3588, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY18r = 3589, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY19r = 3590, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY20r = 3591, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY21r = 3592, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY22r = 3593, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY23r = 3594, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY24r = 3595, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY25r = 3596, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY26r = 3597, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY27r = 3598, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY28r = 3599, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY29r = 3600, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY30r = 3601, + SMMU0_SMMU0_KSCHD_FIFO_EMPTY31r = 3602, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY0r = 3603, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY1r = 3604, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY2r = 3605, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY3r = 3606, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY4r = 3607, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY5r = 3608, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY6r = 3609, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY7r = 3610, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY8r = 3611, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY9r = 3612, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY10r = 3613, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY11r = 3614, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY12r = 3615, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY13r = 3616, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY14r = 3617, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY15r = 3618, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY16r = 3619, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY17r = 3620, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY18r = 3621, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY19r = 3622, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY20r = 3623, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY21r = 3624, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY22r = 3625, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY23r = 3626, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY24r = 3627, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY25r = 3628, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY26r = 3629, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY27r = 3630, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY28r = 3631, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY29r = 3632, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY30r = 3633, + SMMU0_SMMU0_RSCHD_FIFO_EMPTY31r = 3634, + SMMU0_SMMU0_EPT_FLAGr = 3635, + SMMU0_SMMU0_PPU_SOFT_RSTr = 3636, + SMMU0_SMMU0_SMMU0_AS_MAC_AGE_FC_CNTr = 3637, + SMMU0_SMMU0_SMMU0_MARC_SE_PARSER_FC_CNTr = 3638, + SMMU0_SMMU0_WR_ARB_CPU_FC_CNTr = 3639, + SMMU0_SMMU0_SMMU0_LPM_AS_FC_CNTr = 3640, + SMMU0_SMMU0_LPM_AS_SMMU0_FC_CNTr = 3641, + SMMU0_SMMU0_SMMU0_ETCAM1_0_AS_FC_CNTr = 3642, + SMMU0_SMMU0_AS_ETCAM1_0_SMMU0_FC_CNTr = 3643, + SMMU0_SMMU0_SMMU0_PPU_MCAST_FC_CNTr = 3644, + SMMU0_SMMU0_PPU_SMMU0_MCAST_FC_CNTr = 3645, + SMMU0_SMMU0_ODMA_SMMU0_TDM_FC_RSP_FC_CNTr = 3646, + SMMU0_SMMU0_SMMU0_ODMA_TDM_FC_KEY_FC_CNTr = 3647, + SMMU0_SMMU0_SMMU0_ODMA_FC_CNTr = 3648, + SMMU0_SMMU0_SMMU0_CFG_TAB_RD_FC_CNTr = 3649, + SMMU0_SMMU0_SMMU0_STAT_FC15_0_CNTr = 3650, + SMMU0_SMMU0_STAT_SMMU0_FC15_0_CNTr = 3651, + SMMU0_SMMU0_SMMU0_PPU_MEX5_0_FC_CNTr = 3652, + SMMU0_SMMU0_PPU_SMMU0_MEX5_0_FC_CNTr = 3653, + SMMU0_SMMU0_AS_SMMU0_MAC_AGE_REQ_CNTr = 3654, + SMMU0_SMMU0_SE_PARSER_SMMU0_MARC_KEY_CNTr = 3655, + SMMU0_SMMU0_CPU_IND_RDAT_CNTr = 3656, + SMMU0_SMMU0_CPU_IND_RD_REQ_CNTr = 3657, + SMMU0_SMMU0_CPU_IND_WR_REQ_CNTr = 3658, + SMMU0_SMMU0_SMMU0_PLCR_RSP0_CNTr = 3659, + SMMU0_SMMU0_PLCR_SMMU0_REQ0_CNTr = 3660, + SMMU0_SMMU0_SMMU0_LPM_AS_RSP_CNTr = 3661, + SMMU0_SMMU0_LPM_AS_SMMU0_REQ_CNTr = 3662, + SMMU0_SMMU0_SMMU0_ETCAM1_0_AS_RSP_CNTr = 3663, + SMMU0_SMMU0_ETCAM1_0_AS_SMMU0_REQ_CNTr = 3664, + SMMU0_SMMU0_SMMU0_PPU_MCAST_RSP_CNTr = 3665, + SMMU0_SMMU0_PPU_SMMU0_MCAST_KEY_CNTr = 3666, + SMMU0_SMMU0_SMMU0_ODMA_TDM_MC_RSP_CNTr = 3667, + SMMU0_SMMU0_ODMA_SMMU0_TDM_MC_KEY_CNTr = 3668, + SMMU0_SMMU0_SMMU0_ODMA_RSP_CNTr = 3669, + SMMU0_SMMU0_ODMA_SMMU0_CMD_CNTr = 3670, + SMMU0_SMMU0_SMMU0_CFG_TAB_RDAT_CNTr = 3671, + SMMU0_SMMU0_CFG_SMMU0_TAB_RD_CNTr = 3672, + SMMU0_SMMU0_SMMU0_STAT_RSP15_0_CNTr = 3673, + SMMU0_SMMU0_STAT_SMMU0_REQ15_0_CNTr = 3674, + SMMU0_SMMU0_SMMU0_PPU_MEX5_0_RSP_CNTr = 3675, + SMMU0_SMMU0_PPU_SMMU0_MEX5_0_KEY_CNTr = 3676, + SMMU0_SMMU0_FTM_STAT_SMMU0_REQ0_CNTr = 3677, + SMMU0_SMMU0_FTM_STAT_SMMU0_REQ1_CNTr = 3678, + SMMU0_SMMU0_ETM_STAT_SMMU0_REQ0_CNTr = 3679, + SMMU0_SMMU0_ETM_STAT_SMMU0_REQ1_CNTr = 3680, + SMMU0_SMMU0_REQ_ERAM0_31_RD_CNTr = 3681, + SMMU0_SMMU0_REQ_ERAM0_31_WR_CNTr = 3682, + SE_SMMU1_DDR_WDAT1r = 3683, + SE_SMMU1_DDR_WDAT2r = 3684, + SE_SMMU1_DDR_WDAT3r = 3685, + SE_SMMU1_DDR_WDAT4r = 3686, + SE_SMMU1_DDR_WDAT5r = 3687, + SE_SMMU1_DDR_WDAT6r = 3688, + SE_SMMU1_DDR_WDAT7r = 3689, + SE_SMMU1_DDR_WDAT8r = 3690, + SE_SMMU1_DDR_WDAT9r = 3691, + SE_SMMU1_DDR_WDAT10r = 3692, + SE_SMMU1_DDR_WDAT11r = 3693, + SE_SMMU1_DDR_WDAT12r = 3694, + SE_SMMU1_DDR_WDAT13r = 3695, + SE_SMMU1_DDR_WDAT14r = 3696, + SE_SMMU1_DDR_WDAT15r = 3697, + SE_SMMU1_CNT_STAT_CACHE_ENr = 3698, + SE_SMMU1_CNT_STAT_CACHE_CLRr = 3699, + SE_SMMU1_CNT_STAT_CACHE_REQ_63_32r = 3700, + SE_SMMU1_CNT_STAT_CACHE_REQ_31_0r = 3701, + SE_SMMU1_CNT_STAT_CACHE_HIT_63_32r = 3702, + SE_SMMU1_CNT_STAT_CACHE_HIT_31_0r = 3703, + SE_SMMU1_DDR_CMD0r = 3704, + SE_SMMU1_INFO_ADDRr = 3705, + SE_SMMU1_DDR_CMD1r = 3706, + SE_SMMU1_CLR_START_ADDRr = 3707, + SE_SMMU1_CLR_END_ADDRr = 3708, + SE_SMMU1_CLR_TBL_ENr = 3709, + SE_SMMU1_DEBUG_CNT_MODEr = 3710, + SE_SMMU1_INIT_DONEr = 3711, + SE_SMMU1_CPU_RSP_RD_DONEr = 3712, + SE_SMMU1_KSCH_OAM_SP_ENr = 3713, + SE_SMMU1_CFG_CACHE_ENr = 3714, + SE_SMMU1_CACHE_AGE_ENr = 3715, + SE_SMMU1_CPU_RDAT0r = 3716, + SE_SMMU1_CPU_RDAT1r = 3717, + SE_SMMU1_CPU_RDAT2r = 3718, + SE_SMMU1_CPU_RDAT3r = 3719, + SE_SMMU1_CPU_RDAT4r = 3720, + SE_SMMU1_CPU_RDAT5r = 3721, + SE_SMMU1_CPU_RDAT6r = 3722, + SE_SMMU1_CPU_RDAT7r = 3723, + SE_SMMU1_CPU_RDAT8r = 3724, + SE_SMMU1_CPU_RDAT9r = 3725, + SE_SMMU1_CPU_RDAT10r = 3726, + SE_SMMU1_CPU_RDAT11r = 3727, + SE_SMMU1_CPU_RDAT12r = 3728, + SE_SMMU1_CPU_RDAT13r = 3729, + SE_SMMU1_CPU_RDAT14r = 3730, + SE_SMMU1_CPU_RDAT15r = 3731, + SE_SMMU1_CTRL_CPU_RD_RDYr = 3732, + SE_SMMU1_CPU_WARBI_RDY_CFGr = 3733, + SE_SMMU1_DIR_ARBI_CPU_RPFULr = 3734, + SE_SMMU1_DIR_ARBI_WPFULr = 3735, + SE_SMMU1_CFG_WR_ARBI_PFUL0r = 3736, + SE_SMMU1_CFG_WR_ARBI_PFUL1r = 3737, + SE_SMMU1_SMMU1_WDONE_PFUL_CFGr = 3738, + SE_SMMU1_STAT_RATE_CFG_CNTr = 3739, + SE_SMMU1_FTM_RATE_CFG_CNTr = 3740, + SE_SMMU1_ETM_RATE_CFG_CNTr = 3741, + SE_SMMU1_DIR_RATE_CFG_CNTr = 3742, + SE_SMMU1_HASH_RATE_CFG_CNTr = 3743, + SE_SMMU1_FTM_TBL_CFGr = 3744, + SE_SMMU1_LPM_V4_AS_TBL_CFGr = 3745, + SE_SMMU1_LPM_V4_TBL_CFGr = 3746, + SE_SMMU1_LPM_V6_TBL_CFGr = 3747, + SE_SMMU1_LPM_V6_AS_TBL_CFGr = 3748, + SE_SMMU1_DMA_TBL_CFGr = 3749, + SE_SMMU1_STAT_MODE_CFGr = 3750, + SE_SMMU1_CTRL_RPAR_CPU_PFULr = 3751, + SE_SMMU1_CFG_KSCH_DIR_PFULr = 3752, + SE_SMMU1_CFG_KSCH_HASH_PFULr = 3753, + SE_SMMU1_CFG_KSCH_LPM_PFULr = 3754, + SE_SMMU1_CFG_KSCH_LPM_AS_PFULr = 3755, + SE_SMMU1_CFG_KSCH_STAT_PFULr = 3756, + SE_SMMU1_CFG_KSCH_TM_PFULr = 3757, + SE_SMMU1_CFG_KSCH_OAM_PFULr = 3758, + SE_SMMU1_CFG_KSCH_DMA_PFULr = 3759, + SE_SMMU1_CTRL_WFIFO_CFGr = 3760, + SE_SMMU1_RSCH_HASH_PTR_CFGr = 3761, + SE_SMMU1_RSCH_LPM_PTR_CFGr = 3762, + SE_SMMU1_RSCH_LPM_AS_PTR_CFGr = 3763, + SE_SMMU1_RSCH_STAT_PTR_CFGr = 3764, + SE_SMMU1_RSCH_OAM_PTR_CFGr = 3765, + SE_SMMU1_RSCHD_FIFO_PEPT_CFGr = 3766, + SE_SMMU1_DIR_FIFO_PFUL_CFGr = 3767, + SE_SMMU1_HASH_FIFO_PFUL_CFGr = 3768, + SE_SMMU1_LPM_FIFO_PFUL_CFGr = 3769, + SE_SMMU1_LPM_AS_FIFO_PFUL_CFGr = 3770, + SE_SMMU1_STAT_FIFO_PFUL_CFGr = 3771, + SE_SMMU1_FTM_FIFO_PFUL_CFGr = 3772, + SE_SMMU1_ETM_FIFO_PFUL_CFGr = 3773, + SE_SMMU1_OAM_FIFO_PFUL_CFGr = 3774, + SE_SMMU1_DMA_FIFO_PFUL_CFGr = 3775, + SE_SMMU1_CACHE_RSP_RR_FIFO_CFGr = 3776, + SE_SMMU1_DDR_RSP_RR_FIFO_CFGr = 3777, + SE_SMMU1_CPU_CAHCE_FIFO_CFGr = 3778, + SE_SMMU1_CACHE_RSP_FIFO_CFGr = 3779, + SE_SMMU1_TEST_STATEr = 3780, + SE_SMMU1_CACHE_FIFO_EPTr = 3781, + SE_SMMU1_RR_FIFO_EPTr = 3782, + SE_SMMU1_WR_FIFO_EPTr = 3783, + SE_SMMU1_WDONE_FIFO_EPTr = 3784, + SE_SMMU1_KSCHD_FIFO_EPT0r = 3785, + SE_SMMU1_CASH_FIFO_EPTr = 3786, + SE_SMMU1_CTRL_FIFO_EPTr = 3787, + SE_SMMU1_SMMU1_RSCHD_EPT3r = 3788, + SE_SMMU1_SMMU1_RSCHD_EPT2r = 3789, + SE_SMMU1_SMMU1_RSCHD_EPT1r = 3790, + SE_SMMU1_SMMU1_RSCHD_EPT0r = 3791, + SE_SMMU1_CASH0_ECC_ERR_ADDRr = 3792, + SE_SMMU1_ARBI_CPU_WR_RDYr = 3793, + SE_SMMU1_SMMU1_INT_0_ENr = 3794, + SE_SMMU1_SMMU1_INT_0_MASKr = 3795, + SE_SMMU1_SMMU1_INT_1_ENr = 3796, + SE_SMMU1_SMMU1_INT_1_MASKr = 3797, + SE_SMMU1_SMMU1_INT_2_ENr = 3798, + SE_SMMU1_SMMU1_INT_2_MASKr = 3799, + SE_SMMU1_SMMU1_INT_3_ENr = 3800, + SE_SMMU1_SMMU1_INT_3_MASKr = 3801, + SE_SMMU1_SMMU1_INT_0_STATUSr = 3802, + SE_SMMU1_SMMU1_INT_1_STATUSr = 3803, + SE_SMMU1_SMMU1_INT_2_STATUSr = 3804, + SE_SMMU1_SMMU1_INT_3_STATUSr = 3805, + SE_SMMU1_SMMU1_INT_STATUSr = 3806, + SE_SMMU1_CTRL_TO_CASH7_0_FC_CNTr = 3807, + SE_SMMU1_CASH7_0_TO_CTRL_REQ_CNTr = 3808, + SE_SMMU1_RSCHD_TO_CACHE7_FC_CNTr = 3809, + SE_SMMU1_CASH7_TO_CACHE_RSP_CNTr = 3810, + SE_SMMU1_CASH7_TO_CTRL_FC_CNTr = 3811, + SE_SMMU1_CTRL_TO_CASH7_0_RSP_CNTr = 3812, + SE_SMMU1_KSCHD_TO_CACHE7_0_REQ_CNTr = 3813, + SE_SMMU1_CACHE7_0_TO_KSCHD_FC_CNTr = 3814, + SE_SMMU1_DMA_TO_SMMU1_RD_REQ_CNTr = 3815, + SE_SMMU1_OAM_TO_KSCHD_REQ_CNTr = 3816, + SE_SMMU1_OAM_RR_STATE_RSP_CNTr = 3817, + SE_SMMU1_OAM_CLASH_INFO_CNTr = 3818, + SE_SMMU1_OAM_TO_RR_REQ_CNTr = 3819, + SE_SMMU1_LPM_AS_TO_KSCHD_REQ_CNTr = 3820, + SE_SMMU1_LPM_AS_RR_STATE_RSP_CNTr = 3821, + SE_SMMU1_LPM_AS_CLASH_INFO_CNTr = 3822, + SE_SMMU1_LPM_AS_TO_RR_REQ_CNTr = 3823, + SE_SMMU1_LPM_TO_KSCHD_REQ_CNTr = 3824, + SE_SMMU1_LPM_RR_STATE_RSP_CNTr = 3825, + SE_SMMU1_LPM_CLASH_INFO_CNTr = 3826, + SE_SMMU1_LPM_TO_RR_REQ_CNTr = 3827, + SE_SMMU1_HASH3_0_TO_KSCHD_REQ_CNTr = 3828, + SE_SMMU1_HASH3_0_RR_STATE_RSP_CNTr = 3829, + SE_SMMU1_HASH3_0_CLASH_INFO_CNTr = 3830, + SE_SMMU1_HASH3_0_TO_RR_REQ_CNTr = 3831, + SE_SMMU1_DIR3_0_TO_KSCHD_REQ_CNTr = 3832, + SE_SMMU1_DIR3_0_CLASH_INFO_CNTr = 3833, + SE_SMMU1_DIR_TBL_WR_REQ_CNTr = 3834, + SE_SMMU1_WARBI_TO_DIR_TBL_WARBI_FC_CNTr = 3835, + SE_SMMU1_DIR3_0_TO_BANK_RR_REQ_CNTr = 3836, + SE_SMMU1_KSCHD_TO_DIR3_0_FC_CNTr = 3837, + SE_SMMU1_DIR3_0_RR_STATE_RSP_CNTr = 3838, + SE_SMMU1_WR_DONE_TO_WARBI_FC_CNTr = 3839, + SE_SMMU1_WR_DONE_PTR_REQ_CNTr = 3840, + SE_SMMU1_CTRL7_0_TO_WARBI_FC_CNTr = 3841, + SE_SMMU1_WARBI_TO_CTRL7_0_WR_REQ_CNTr = 3842, + SE_SMMU1_WARBI_TO_CASH7_0_WR_REQ_CNTr = 3843, + SE_SMMU1_WARBI_TO_CPU_WR_FC_CNTr = 3844, + SE_SMMU1_CPU_WR_REQ_CNTr = 3845, + SE_SMMU1_CTRL7_0_TO_CPU_RD_RSP_CNTr = 3846, + SE_SMMU1_CPU_TO_CTRL7_0_RD_REQ_CNTr = 3847, + SE_SMMU1_CPU_RD_DIR_TBL_RSP_CNTr = 3848, + SE_SMMU1_CPU_TO_DIR_TBL_RD_WR_REQ_CNTr = 3849, + SE_SMMU1_SMMU1_TO_MMU_7_0_RSP_FC_CNTr = 3850, + SE_SMMU1_MMU_7_0_TO_SMMU1_RD_RSP_CNTr = 3851, + SE_SMMU1_MMU_7_0_TO_SMMU1_RD_FC_CNTr = 3852, + SE_SMMU1_SMMU1_TO_MMU_7_RD_REQ_CNTr = 3853, + SE_SMMU1_MMU_7_TO_SMMU1_WR_FC_CNTr = 3854, + SE_SMMU1_SMMU1_TO_MMU_7_0_WR_REQ_CNTr = 3855, + SE_SMMU1_SE_TO_SMMU1_WR_RSP_FC_CNTr = 3856, + SE_SMMU1_SMMU1_TO_SE_WR_RSP_CNTr = 3857, + SE_SMMU1_DDR7_0_WR_RSP_CNTr = 3858, + SE_SMMU1_SMMU1_TO_AS_FC_CNTr = 3859, + SE_SMMU1_AS_TO_SMMU1_WR_REQ_CNTr = 3860, + SE_SMMU1_SMMU1_TO_SE_PARSER_FC_CNTr = 3861, + SE_SMMU1_SE_PARSER_TO_SMMU1_REQ_CNTr = 3862, + SE_SMMU1_SMMU1_TO_ETM_WR_FC_CNTr = 3863, + SE_SMMU1_ETM_WR_REQ_CNTr = 3864, + SE_SMMU1_SMMU1_TO_FTM_WR_FC_CNTr = 3865, + SE_SMMU1_FTM_WR_REQ_CNTr = 3866, + SE_SMMU1_SMMU1_TO_STATE_WR_FC_CNTr = 3867, + SE_SMMU1_STATE_WR_REQ_CNTr = 3868, + SE_SMMU1_SE_TO_DMA_RSP_CNTr = 3869, + SE_SMMU1_SE_TO_DMA_FC_CNTr = 3870, + SE_SMMU1_OAM_TO_SMMU1_FC_CNTr = 3871, + SE_SMMU1_SMMU1_TO_OAM_RSP_CNTr = 3872, + SE_SMMU1_SMMU1_TO_OAM_FC_CNTr = 3873, + SE_SMMU1_OAM_TO_SMMU1_REQ_CNTr = 3874, + SE_SMMU1_SMMU1_TO_ETM_RSP_CNTr = 3875, + SE_SMMU1_SMMU1_TO_FTM_RSP_CNTr = 3876, + SE_SMMU1_SMMU1_TO_ETM_FC_CNTr = 3877, + SE_SMMU1_ETM_TO_SMMU1_REQ_CNTr = 3878, + SE_SMMU1_SMMU1_TO_FTM_FC_CNTr = 3879, + SE_SMMU1_FTM_TO_SMMU1_REQ_CNTr = 3880, + SE_SMMU1_SMMU1_TO_STAT_RSP_CNTr = 3881, + SE_SMMU1_SMMU1_TO_STAT_FC_CNTr = 3882, + SE_SMMU1_STAT_TO_SMMU1_REQ_CNTr = 3883, + SE_SMMU1_LPM_AS_TO_SMMU1_FC_CNTr = 3884, + SE_SMMU1_LPM_TO_SMMU1_FC_CNTr = 3885, + SE_SMMU1_SMMU1_TO_LPM_AS_RSP_CNTr = 3886, + SE_SMMU1_SMMU1_TO_LPM_RSP_CNTr = 3887, + SE_SMMU1_SMMU1_TO_LPM_AS_FC_CNTr = 3888, + SE_SMMU1_SMMU1_TO_LPM_FC_CNTr = 3889, + SE_SMMU1_LPM_AS_TO_SMMU1_REQ_CNTr = 3890, + SE_SMMU1_LPM_TO_SMMU1_REQ_CNTr = 3891, + SE_SMMU1_HASH3_0_TO_SMMU1_FC_CNTr = 3892, + SE_SMMU1_SMMU1_TO_HASH3_0_RSP_CNTr = 3893, + SE_SMMU1_SMMU1_TO_HASH3_0_FC_CNTr = 3894, + SE_SMMU1_HASH3_0_TO_SMMU1_CNTr = 3895, + SE_SMMU1_SE_TO_SMMU1_DIR3_0_RSP_FC_CNTr = 3896, + SE_SMMU1_SMMU1_TO_SE_DIR3_0_RSP_CNTr = 3897, + SE_SMMU1_SMMU1_TO_SE_DIR3_0_FC_CNTr = 3898, + SE_SMMU1_SE_TO_SMMU1_DIR3_0_CNTr = 3899, + SE_SMMU1_CACHE7_0_TO_RSCHD_RSP_CNTr = 3900, + SE_CMMU_DDR_RW_ADDRr = 3901, + SE_CMMU_DDR_RW_MODEr = 3902, + SE_CMMU_CP_CMDr = 3903, + SE_CMMU_CPU_IND_RD_DONEr = 3904, + SE_CMMU_CPU_IND_RDAT0r = 3905, + SE_CMMU_CPU_IND_RDAT1r = 3906, + SE_CMMU_CPU_IND_RDAT2r = 3907, + SE_CMMU_CPU_IND_RDAT3r = 3908, + SE_CMMU_CPU_DDR_FIFO_ALMFULr = 3909, + SE_CMMU_DEBUG_CNT_MODEr = 3910, + SE_CMMU_CMMU_PFUL_CFGr = 3911, + SE_CMMU_CMMU_STAT_PFUL_CFGr = 3912, + SE_CMMU_STAT_OVERFLOW_MODEr = 3913, + SE_CMMU_CMMU_CP_FIFO_PFULr = 3914, + SE_CMMU_DDR_WR_DAT0r = 3915, + SE_CMMU_DDR_WR_DAT1r = 3916, + SE_CMMU_CMMU_INT_UNMASK_FLAGr = 3917, + SE_CMMU_CMMU_INT_ENr = 3918, + SE_CMMU_CMMU_INT_MASKr = 3919, + SE_CMMU_CMMU_INT_STATUSr = 3920, + SE_CMMU_STAT_CMMU_REQ_CNTr = 3921, + SE_CMMU_CMMU_FC0_CNTr = 3922, + SE_CMMU_CMMU_FC1_CNTr = 3923, + SE_CMMU_CMMU_FC2_CNTr = 3924, + SMMU14K_SE_SMMU1_HASH0_TBL0_CFGr = 3925, + SMMU14K_SE_SMMU1_HASH0_TBL1_CFGr = 3926, + SMMU14K_SE_SMMU1_HASH0_TBL2_CFGr = 3927, + SMMU14K_SE_SMMU1_HASH0_TBL3_CFGr = 3928, + SMMU14K_SE_SMMU1_HASH0_TBL4_CFGr = 3929, + SMMU14K_SE_SMMU1_HASH0_TBL5_CFGr = 3930, + SMMU14K_SE_SMMU1_HASH0_TBL6_CFGr = 3931, + SMMU14K_SE_SMMU1_HASH0_TBL7_CFGr = 3932, + SMMU14K_SE_SMMU1_HASH1_TBL0_CFGr = 3933, + SMMU14K_SE_SMMU1_HASH1_TBL1_CFGr = 3934, + SMMU14K_SE_SMMU1_HASH1_TBL2_CFGr = 3935, + SMMU14K_SE_SMMU1_HASH1_TBL3_CFGr = 3936, + SMMU14K_SE_SMMU1_HASH1_TBL4_CFGr = 3937, + SMMU14K_SE_SMMU1_HASH1_TBL5_CFGr = 3938, + SMMU14K_SE_SMMU1_HASH1_TBL6_CFGr = 3939, + SMMU14K_SE_SMMU1_HASH1_TBL7_CFGr = 3940, + SMMU14K_SE_SMMU1_HASH2_TBL0_CFGr = 3941, + SMMU14K_SE_SMMU1_HASH2_TBL1_CFGr = 3942, + SMMU14K_SE_SMMU1_HASH2_TBL2_CFGr = 3943, + SMMU14K_SE_SMMU1_HASH2_TBL3_CFGr = 3944, + SMMU14K_SE_SMMU1_HASH2_TBL4_CFGr = 3945, + SMMU14K_SE_SMMU1_HASH2_TBL5_CFGr = 3946, + SMMU14K_SE_SMMU1_HASH2_TBL6_CFGr = 3947, + SMMU14K_SE_SMMU1_HASH2_TBL7_CFGr = 3948, + SMMU14K_SE_SMMU1_HASH3_TBL0_CFGr = 3949, + SMMU14K_SE_SMMU1_HASH3_TBL1_CFGr = 3950, + SMMU14K_SE_SMMU1_HASH3_TBL2_CFGr = 3951, + SMMU14K_SE_SMMU1_HASH3_TBL3_CFGr = 3952, + SMMU14K_SE_SMMU1_HASH3_TBL4_CFGr = 3953, + SMMU14K_SE_SMMU1_HASH3_TBL5_CFGr = 3954, + SMMU14K_SE_SMMU1_HASH3_TBL6_CFGr = 3955, + SMMU14K_SE_SMMU1_HASH3_TBL7_CFGr = 3956, + STAT_STAT_CFG_CPU_IND_ERAM_WDAT1r = 3957, + STAT_STAT_CFG_CPU_IND_ERAM_WDAT2r = 3958, + STAT_STAT_CFG_CPU_IND_ERAM_WDAT3r = 3959, + STAT_STAT_CFG_CPU_IND_ERAM_REQ_INFOr = 3960, + STAT_STAT_CFG_CPU_IND_ERAM_RD_DONEr = 3961, + STAT_STAT_CFG_CPU_IND_ERAM_RDAT0r = 3962, + STAT_STAT_CFG_CPU_IND_ERAM_RDAT1r = 3963, + STAT_STAT_CFG_CPU_IND_ERAM_RDAT2r = 3964, + STAT_STAT_CFG_CPU_IND_ERAM_RDAT3r = 3965, + STAT_STAT_CFG_TM_ALU_ERAM_CPU_RDYr = 3966, + STAT_STAT_CFG_OAM_STAT_CFGr = 3967, + STAT_STAT_CFG_FTM_PORT_SEL_CFGr = 3968, + STAT_STAT_CFG_OAM_ERAM_BASE_ADDRr = 3969, + STAT_STAT_CFG_OAM_LM_ERAM_BASE_ADDRr = 3970, + STAT_STAT_CFG_OAM_DDR_BASE_ADDRr = 3971, + STAT_STAT_CFG_PLCR0_SCHD_PFUL_CFGr = 3972, + STAT_STAT_CFG_OAM_LM_ORD_PFUL_CFGr = 3973, + STAT_STAT_CFG_DDR_SCHD_PFUL_CFGr = 3974, + STAT_STAT_CFG_ERAM_SCHD_PFUL_CFGr = 3975, + STAT_STAT_CFG_ERAM_SCHD_PEPT_CFGr = 3976, + STAT_STAT_CFG_ERAM_SCHD_OAM_PFUL_CFGr = 3977, + STAT_STAT_CFG_ERAM_SCHD_OAM_PEPT_CFGr = 3978, + STAT_STAT_CFG_ERAM_SCHD_OAM_LM_PFUL_CFGr = 3979, + STAT_STAT_CFG_ERAM_SCHD_OAM_LM_PEPT_CFGr = 3980, + STAT_STAT_CFG_RSCHD_PFUL_CFGr = 3981, + STAT_STAT_CFG_RSCHD_PEPT_CFGr = 3982, + STAT_STAT_CFG_RSCHD_PLCR_PFUL_CFGr = 3983, + STAT_STAT_CFG_RSCHD_PLCR_PEPT_CFGr = 3984, + STAT_STAT_CFG_RSCHD_PLCR_INFO_PFUL_CFGr = 3985, + STAT_STAT_CFG_ALU_ARB_CPU_PFUL_CFGr = 3986, + STAT_STAT_CFG_ALU_ARB_USER_PFUL_CFGr = 3987, + STAT_STAT_CFG_ALU_ARB_STAT_PFUL_CFGr = 3988, + STAT_STAT_CFG_CYCMOV_DAT_PFUL_CFGr = 3989, + STAT_STAT_CFG_DDR_OPR_PFUL_CFGr = 3990, + STAT_STAT_CFG_CYCLE_MOV_PFUL_CFGr = 3991, + STAT_STAT_CFG_CNTOVF_PFUL_CFGr = 3992, + STAT_STAT_CFG_ERAM_SCHD_PLCR_PFUL_CFGr = 3993, + STAT_STAT_CFG_ERAM_SCHD_PLCR_PEPT_CFGr = 3994, + STAT_STAT_CFG_DEBUG_CNT_MODEr = 3995, + STAT_STAT_CFG_TM_MOV_PERIOD_CFGr = 3996, + STAT_STAT_CFG_ALU_DDR_CPU_REQ_PFUL_CFGr = 3997, + STAT_STAT_CFG_CYCMOV_ADDR_PFUL_CFGr = 3998, + STAT_STAT_CFG_ORD_DDR_PLCR_FIFO_EMPTYr = 3999, + STAT_STAT_CFG_TM_STAT_FIFO_EMPTYr = 4000, + STAT_STAT_CFG_ERAM_SCHD_FIFO_EMPTY_0_1r = 4001, + STAT_STAT_CFG_ERAM_SCHD_FIFO_EMPTY_2_3r = 4002, + STAT_STAT_CFG_ERAM_SCHD_FIFO_EMPTY_4_5r = 4003, + STAT_STAT_CFG_ERAM_SCHD_FIFO_EMPTY_6_7r = 4004, + STAT_STAT_CFG_ERAM_SCHD_FIFO_EMPTY_FREE_8r = 4005, + STAT_STAT_CFG_RSCHD_FIFO_EMPTY_0_3r = 4006, + STAT_STAT_CFG_RSCHD_FIFO_EMPTY_4_7r = 4007, + STAT_STAT_CFG_RSCHD_FIFO_EMPTY_8_11r = 4008, + STAT_STAT_CFG_RSCHD_FIFO_EMPTY_12_15r = 4009, + STAT_STAT_CFG_RSCHD_FIFO_EMPTY_PLCR_16_17r = 4010, + STAT_STAT_CFG_STAT_INT_UNMASK_FLAGr = 4011, + STAT_STAT_CFG_STAT_INT0_ENr = 4012, + STAT_STAT_CFG_STAT_INT0_MASKr = 4013, + STAT_STAT_CFG_STAT_INT0_STATUSr = 4014, + STAT_STAT_CFG_STAT_INT1_ENr = 4015, + STAT_STAT_CFG_STAT_INT1_MASKr = 4016, + STAT_STAT_CFG_STAT_INT1_STATUSr = 4017, + STAT_STAT_CFG_STAT_INT2_ENr = 4018, + STAT_STAT_CFG_STAT_INT2_MASKr = 4019, + STAT_STAT_CFG_STAT_INT2_STATUSr = 4020, + STAT_STAT_CFG_STAT_INT3_ENr = 4021, + STAT_STAT_CFG_STAT_INT3_MASKr = 4022, + STAT_STAT_CFG_STAT_INT3_STATUSr = 4023, + STAT_STAT_CFG_STAT_INT4_ENr = 4024, + STAT_STAT_CFG_STAT_INT4_MASKr = 4025, + STAT_STAT_CFG_STAT_INT4_STATUSr = 4026, + STAT_STAT_CFG_STAT_INT5_ENr = 4027, + STAT_STAT_CFG_STAT_INT5_MASKr = 4028, + STAT_STAT_CFG_STAT_INT5_STATUSr = 4029, + STAT_STAT_CFG_RSCHD_ECC_BYPASSr = 4030, + STAT_STAT_CFG_RSCHD_ECC_SINGLE_ERRr = 4031, + STAT_STAT_CFG_RSCHD_ECC_DOUBLE_ERRr = 4032, + STAT_STAT_CFG_CPU_IND_DDR_WDAT0r = 4033, + STAT_STAT_CFG_CPU_IND_DDR_WDAT1r = 4034, + STAT_STAT_CFG_CPU_IND_DDR_WDAT2r = 4035, + STAT_STAT_CFG_CPU_IND_DDR_WDAT3r = 4036, + STAT_STAT_CFG_CPU_IND_DDR_WDAT4r = 4037, + STAT_STAT_CFG_CPU_IND_DDR_WDAT5r = 4038, + STAT_STAT_CFG_CPU_IND_DDR_WDAT6r = 4039, + STAT_STAT_CFG_CPU_IND_DDR_WDAT7r = 4040, + STAT_STAT_CFG_CPU_IND_DDR_WDAT8r = 4041, + STAT_STAT_CFG_CPU_IND_DDR_WDAT9r = 4042, + STAT_STAT_CFG_CPU_IND_DDR_WDAT10r = 4043, + STAT_STAT_CFG_CPU_IND_DDR_WDAT11r = 4044, + STAT_STAT_CFG_CPU_IND_DDR_WDAT12r = 4045, + STAT_STAT_CFG_CPU_IND_DDR_WDAT13r = 4046, + STAT_STAT_CFG_CPU_IND_DDR_WDAT14r = 4047, + STAT_STAT_CFG_CPU_IND_DDR_WDAT15r = 4048, + STAT_STAT_CFG_CPU_IND_DDR_REQ_INFOr = 4049, + STAT_STAT_CFG_CPU_IND_DDR_RD_DONEr = 4050, + STAT_STAT_CFG_CPU_IND_DDR_RDAT0r = 4051, + STAT_STAT_CFG_CPU_IND_DDR_RDAT1r = 4052, + STAT_STAT_CFG_CPU_IND_DDR_RDAT2r = 4053, + STAT_STAT_CFG_CPU_IND_DDR_RDAT3r = 4054, + STAT_STAT_CFG_CPU_IND_DDR_RDAT4r = 4055, + STAT_STAT_CFG_CPU_IND_DDR_RDAT5r = 4056, + STAT_STAT_CFG_CPU_IND_DDR_RDAT6r = 4057, + STAT_STAT_CFG_CPU_IND_DDR_RDAT7r = 4058, + STAT_STAT_CFG_CPU_IND_DDR_RDAT8r = 4059, + STAT_STAT_CFG_CPU_IND_DDR_RDAT9r = 4060, + STAT_STAT_CFG_CPU_IND_DDR_RDAT10r = 4061, + STAT_STAT_CFG_CPU_IND_DDR_RDAT11r = 4062, + STAT_STAT_CFG_CPU_IND_DDR_RDAT12r = 4063, + STAT_STAT_CFG_CPU_IND_DDR_RDAT13r = 4064, + STAT_STAT_CFG_CPU_IND_DDR_RDAT14r = 4065, + STAT_STAT_CFG_CPU_IND_DDR_RDAT15r = 4066, + STAT_STAT_CFG_TM_ALU_DDR_CPU_RDYr = 4067, + STAT_STAT_CFG_EPT_FLAGr = 4068, + STAT_STAT_CFG_PPU_SOFT_RSTr = 4069, + STAT_STAT_CFG_STAT_SMMU0_FC15_0_CNTr = 4070, + STAT_STAT_CFG_SMMU0_STAT_FC15_0_CNTr = 4071, + STAT_STAT_CFG_SMMU0_STAT_RSP15_0_CNTr = 4072, + STAT_STAT_CFG_STAT_SMMU0_REQ15_0_CNTr = 4073, + STAT_STAT_CFG_PPU_STAT_MEC5_0_RSP_FC_CNTr = 4074, + STAT_STAT_CFG_STAT_PPU_MEC5_0_KEY_FC_CNTr = 4075, + STAT_STAT_CFG_STAT_PPU_MEC5_0_RSP_CNTr = 4076, + STAT_STAT_CFG_PPU_STAT_MEC5_0_KEY_CNTr = 4077, + STAT_STAT_CFG_PPU5_0_NO_EXIST_OPCD_EX_CNTr = 4078, + STAT_STAT_CFG_SE_ETM_STAT_WR_FC_CNTr = 4079, + STAT_STAT_CFG_SE_ETM_STAT_RD_FC_CNTr = 4080, + STAT_STAT_CFG_STAT_ETM_DEQ_FC_CNTr = 4081, + STAT_STAT_CFG_STAT_ETM_ENQ_FC_CNTr = 4082, + STAT_STAT_CFG_STAT_OAM_LM_FC_CNTr = 4083, + STAT_STAT_CFG_OAM_STAT_LM_FC_CNTr = 4084, + STAT_STAT_CFG_STAT_OAM_FC_CNTr = 4085, + STAT_STAT_CFG_CMMU_STAT_FC_CNTr = 4086, + STAT_STAT_CFG_STAT_CMMU_REQ_CNTr = 4087, + STAT_STAT_CFG_SMMU0_PLCR_RSP0_CNTr = 4088, + STAT_STAT_CFG_PLCR_SMMU0_REQ0_CNTr = 4089, + STAT_STAT_CFG_STAT_OAM_LM_RSP_CNTr = 4090, + STAT_STAT_CFG_OAM_STAT_LM_REQ_CNTr = 4091, + STAT_STAT_CFG_OAM_STAT_REQ_CNTr = 4092, + STAT_STAT_CFG_SE_ETM_STAT_RSP_CNTr = 4093, + STAT_STAT_CFG_ETM_STAT_SE_WR_REQ_CNTr = 4094, + STAT_STAT_CFG_ETM_STAT_SE_RD_REQ_CNTr = 4095, + STAT_STAT_CFG_ETM_STAT_SMMU0_REQ_CNT0r = 4096, + STAT_STAT_CFG_ETM_STAT_SMMU0_REQ_CNT1r = 4097, + STAT_STAT_CFG_TM_STAT_ERAM_CPU_RSP_CNTr = 4098, + STAT_STAT_CFG_CPU_RD_ERAM_REQ_CNTr = 4099, + STAT_STAT_CFG_CPU_WR_ERAM_REQ_CNTr = 4100, + STAT_STAT_CFG_TM_STAT_DDR_CPU_RSP_CNTr = 4101, + STAT_STAT_CFG_CPU_RD_DDR_REQ_CNTr = 4102, + STAT_STAT_CFG_CPU_WR_DDR_REQ_CNTr = 4103, + STAT_ETCAM_CPU_IND_WDAT1r = 4104, + STAT_ETCAM_CPU_IND_WDAT2r = 4105, + STAT_ETCAM_CPU_IND_WDAT3r = 4106, + STAT_ETCAM_CPU_IND_WDAT4r = 4107, + STAT_ETCAM_CPU_IND_WDAT5r = 4108, + STAT_ETCAM_CPU_IND_WDAT6r = 4109, + STAT_ETCAM_CPU_IND_WDAT7r = 4110, + STAT_ETCAM_CPU_IND_WDAT8r = 4111, + STAT_ETCAM_CPU_IND_WDAT9r = 4112, + STAT_ETCAM_CPU_IND_WDAT10r = 4113, + STAT_ETCAM_CPU_IND_WDAT11r = 4114, + STAT_ETCAM_CPU_IND_WDAT12r = 4115, + STAT_ETCAM_CPU_IND_WDAT13r = 4116, + STAT_ETCAM_CPU_IND_WDAT14r = 4117, + STAT_ETCAM_CPU_IND_WDAT15r = 4118, + STAT_ETCAM_CPU_IND_WDAT16r = 4119, + STAT_ETCAM_CPU_IND_WDAT17r = 4120, + STAT_ETCAM_CPU_IND_WDAT18r = 4121, + STAT_ETCAM_CPU_IND_WDAT19r = 4122, + STAT_ETCAM_T_STRWC_CFGr = 4123, + STAT_ETCAM_ETCAM_INT_UNMASK_FLAGr = 4124, + STAT_ETCAM_ETCAM_INT_EN0r = 4125, + STAT_ETCAM_ETCAM_INT_MASK0r = 4126, + STAT_ETCAM_ETCAM_INT_STATUSr = 4127, + STAT_ETCAM_INT_TB_INI_OKr = 4128, + STAT_ETCAM_ETCAM_CLK_ENr = 4129, + STAT_ETCAM_AS_ETCAM_REQ0_CNTr = 4130, + STAT_ETCAM_AS_ETCAM_REQ1_CNTr = 4131, + STAT_ETCAM_ETCAM_AS_INDEX0_CNTr = 4132, + STAT_ETCAM_ETCAM_AS_INDEX1_CNTr = 4133, + STAT_ETCAM_ETCAM_NOT_HIT0_CNTr = 4134, + STAT_ETCAM_ETCAM_NOT_HIT1_CNTr = 4135, + STAT_ETCAM_TABLE_ID_NOT_MATCH_CNTr = 4136, + STAT_ETCAM_TABLE_ID_CLASH01_CNTr = 4137, + STAT_ETCAM_ETCAM_CPU_FLr = 4138, + STAT_ETCAM_ETCAM_ARB_EMPTYr = 4139, + DTB_DTB_CFG_CFG_FINISH_INT_EVENT0r = 4140, + DTB_DTB_CFG_CFG_FINISH_INT_EVENT1r = 4141, + DTB_DTB_CFG_CFG_FINISH_INT_EVENT2r = 4142, + DTB_DTB_CFG_CFG_FINISH_INT_EVENT3r = 4143, + DTB_DTB_CFG_CFG_FINISH_INT_MAKS0r = 4144, + DTB_DTB_CFG_CFG_FINISH_INT_MAKS1r = 4145, + DTB_DTB_CFG_CFG_FINISH_INT_MAKS2r = 4146, + DTB_DTB_CFG_CFG_FINISH_INT_MAKS3r = 4147, + DTB_DTB_CFG_CFG_FINISH_INT_TEST0r = 4148, + DTB_DTB_CFG_CFG_FINISH_INT_TEST1r = 4149, + DTB_DTB_CFG_CFG_FINISH_INT_TEST2r = 4150, + DTB_DTB_CFG_CFG_FINISH_INT_TEST3r = 4151, + DTB_DTB_CFG_CFG_DTB_INT_TO_RISCV_SELr = 4152, + DTB_DTB_CFG_CFG_DTB_EP_INT_MSIX_ENABLEr = 4153, + DTB_DTB_CFG_CFG_DTB_EP_DOORBELL_ADDR_H_0_15r = 4154, + DTB_DTB_CFG_CFG_DTB_EP_DOORBELL_ADDR_L_0_15r = 4155, + DTB_DTB_CFG_CFG_DTB_DEBUG_MODE_ENr = 4156, + DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_ADDR_HIGHr = 4157, + DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_ADDR_LOWr = 4158, + DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_LENr = 4159, + DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_USERr = 4160, + DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_ONLOAD_CNTr = 4161, + DTB_DTB_CFG_CNT_AXI_RD_TABLE_RESP_ERRr = 4162, + DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_ADDR_HIGHr = 4163, + DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_ADDR_LOWr = 4164, + DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_LENr = 4165, + DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_USERr = 4166, + DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_ONLOAD_CNTr = 4167, + DTB_DTB_CFG_CNT_AXI_RD_PD_RESP_ERRr = 4168, + DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_ADDR_HIGHr = 4169, + DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_ADDR_LOWr = 4170, + DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_LENr = 4171, + DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_USERr = 4172, + DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_ONLOAD_CNTr = 4173, + DTB_DTB_CFG_CNT_AXI_WR_CTRL_RESP_ERRr = 4174, + DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_ADDR_HIGHr = 4175, + DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_ADDR_LOWr = 4176, + DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_LENr = 4177, + DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_USERr = 4178, + DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_ONLOAD_CNTr = 4179, + DTB_DTB_CFG_CNT_AXI_WR_DDR_RESP_ERRr = 4180, + DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_ADDR_HIGHr = 4181, + DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_ADDR_LOWr = 4182, + DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_LENr = 4183, + DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_USERr = 4184, + DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_ONLOAD_CNTr = 4185, + DTB_DTB_CFG_CNT_AXI_WR_FIN_RESP_ERRr = 4186, + DTB_DTB_CFG_CNT_DTB_WR_SMMU0_TABLE_HIGHr = 4187, + DTB_DTB_CFG_CNT_DTB_WR_SMMU0_TABLE_LOWr = 4188, + DTB_DTB_CFG_CNT_DTB_WR_SMMU1_TABLE_HIGHr = 4189, + DTB_DTB_CFG_CNT_DTB_WR_SMMU1_TABLE_LOWr = 4190, + DTB_DTB_CFG_CNT_DTB_WR_ZCAM_TABLE_HIGHr = 4191, + DTB_DTB_CFG_CNT_DTB_WR_ZCAM_TABLE_LOWr = 4192, + DTB_DTB_CFG_CNT_DTB_WR_ETCAM_TABLE_HIGHr = 4193, + DTB_DTB_CFG_CNT_DTB_WR_ETCAM_TABLE_LOWr = 4194, + DTB_DTB_CFG_CNT_DTB_WR_HASH_TABLE_HIGHr = 4195, + DTB_DTB_CFG_CNT_DTB_WR_HASH_TABLE_LOWr = 4196, + DTB_DTB_CFG_CNT_DTB_RD_SMMU0_TABLE_HIGHr = 4197, + DTB_DTB_CFG_CNT_DTB_RD_SMMU0_TABLE_LOWr = 4198, + DTB_DTB_CFG_CNT_DTB_RD_SMMU1_TABLE_HIGHr = 4199, + DTB_DTB_CFG_CNT_DTB_RD_SMMU1_TABLE_LOWr = 4200, + DTB_DTB_CFG_CNT_DTB_RD_ZCAM_TABLE_HIGHr = 4201, + DTB_DTB_CFG_CNT_DTB_RD_ZCAM_TABLE_LOWr = 4202, + DTB_DTB_CFG_CNT_DTB_RD_ETCAM_TABLE_HIGHr = 4203, + DTB_DTB_CFG_CNT_DTB_RD_ETCAM_TABLE_LOWr = 4204, + DTB_DTB_CFG_INFO_WR_CTRL_STATEr = 4205, + DTB_DTB_CFG_INFO_RD_TABLE_STATEr = 4206, + DTB_DTB_CFG_INFO_RD_PD_STATEr = 4207, + DTB_DTB_CFG_INFO_DUMP_CMD_STATEr = 4208, + DTB_DTB_CFG_INFO_WR_DDR_STATEr = 4209, + DTB_DTB_CFG_CFG_DTB_DEBUG_INFO_CLRr = 4210, + DTB_DDOS_CFG_DDOS_STAT_DUMP_THRD_0_15r = 4211, + DTB_DDOS_CFG_DDOS_STAT_DUMP_THRD_COMP_ENr = 4212, + DTB_DDOS_CFG_DDOS_DUMP_STAT_NUMr = 4213, + DTB_DDOS_CFG_DDOS_EVEN_HASH_TABLE_BADDRr = 4214, + DTB_DDOS_CFG_DDOS_ODD_HASH_TABLE_BADDRr = 4215, + DTB_DDOS_CFG_DDOS_STAT_INDEX_OFFSETr = 4216, + DTB_DDOS_CFG_DDOS_NS_FLAG_CNTr = 4217, + DTB_DDOS_CFG_DDOS_EVEN_STAT_TABLE_BADDRr = 4218, + DTB_DDOS_CFG_DDOS_ODD_STAT_TABLE_BADDRr = 4219, + DTB_DDOS_CFG_DDOS_EVEN_STAT_DUMP_DADDR_Hr = 4220, + DTB_DDOS_CFG_DDOS_EVEN_STAT_DUMP_DADDR_Lr = 4221, + DTB_DDOS_CFG_DDOS_ODD_STAT_DUMP_DADDR_Hr = 4222, + DTB_DDOS_CFG_DDOS_ODD_STAT_DUMP_DADDR_Lr = 4223, + DTB_DDOS_CFG_DDOS_WORK_MODE_ENABLEr = 4224, + DTB_DDOS_CFG_DDOS_STAT_TABLE_LENr = 4225, + DTB_DDOS_CFG_DDOS_HASH_TABLE_LENr = 4226, + DTB_DTB_RAM_TRAF_CTRL_RAM0_0_255r = 4227, + DTB_DTB_RAM_TRAF_CTRL_RAM1_0_255r = 4228, + DTB_DTB_RAM_TRAF_CTRL_RAM2_0_255r = 4229, + DTB_DTB_RAM_TRAF_CTRL_RAM3_0_255r = 4230, + DTB_DTB_RAM_TRAF_CTRL_RAM4_0_255r = 4231, + DTB_DTB_RAM_TRAF_CTRL_RAM5_0_63r = 4232, + DTB_DTB_RAM_DUMP_PD_RAM_0_2047r = 4233, + DTB_DTB_RAM_RD_CTRL_RAM_0_4095r = 4234, + DTB_DTB_RAM_RD_TABLE_RAM_0_8191r = 4235, + DTB_DTB_RAM_DTB_CMD_MAN_RAM_0_16383r = 4236, + TRPG_TRPG_RX_PORT_CPU_TRPG_MS_STr = 4237, + TRPG_TRPG_RX_PORT_CPU_TRPG_MS_INDr = 4238, + TRPG_TRPG_RX_PORT_CPU_TRPG_MS_SLAVE_INDr = 4239, + TRPG_TRPG_RX_PORT_CPU_TRPGRX_UP_WATER_LEVELr = 4240, + TRPG_TRPG_RX_PORT_CPU_TRPGRX_LOW_WATER_LEVELr = 4241, + TRPG_TRPG_TX_PORT_CPU_TRPG_MS_STr = 4242, + TRPG_TRPG_TX_PORT_CPU_TRPG_MS_INDr = 4243, + TRPG_TRPG_TX_PORT_CPU_TRPG_MS_SLAVE_INDr = 4244, + TRPG_TRPG_TX_GLB_CPU_TODTIME_UPDATE_INT_EVENTr = 4245, + TRPG_TRPG_TX_GLB_CPU_TODTIME_UPDATE_INT_TESTr = 4246, + TRPG_TRPG_TX_GLB_CPU_TODTIME_UPDATE_INT_ADDRr = 4247, + TRPG_TRPG_TX_TODTIME_RAM_TRPG_TX_TODTIME_RAMr = 4248, + TSN_TSN_PORT_CFG_TSN_TEST_REGr = 4249, + TSN_TSN_PORT_CFG_TSN_PORT_QBV_ENABLEr = 4250, + TSN_TSN_PORT_CFG_TSN_PHY_PORT_SELr = 4251, + TSN_TSN_PORT_CFG_TSN_PORT_TIME_SELr = 4252, + TSN_TSN_PORT_CFG_TSN_CLK_FREQr = 4253, + TSN_TSN_PORT_CFG_TSN_READ_RAM_Nr = 4254, + TSN_TSN_PORT_CFG_TSN_EXE_TIMEr = 4255, + TSN_TSN_PORT_CFG_TSN_PORT_ITR_SHIFTr = 4256, + TSN_TSN_PORT_CFG_TSN_PORT_BASE_TIME_Hr = 4257, + TSN_TSN_PORT_CFG_TSN_PORT_BASE_TIME_Lr = 4258, + TSN_TSN_PORT_CFG_TSN_PORT_CYCLE_TIME_Hr = 4259, + TSN_TSN_PORT_CFG_TSN_PORT_CYCLE_TIME_Lr = 4260, + TSN_TSN_PORT_CFG_TSN_PORT_GUARD_BAND_TIMEr = 4261, + TSN_TSN_PORT_CFG_TSN_PORT_DEFAULT_GATE_ENr = 4262, + TSN_TSN_PORT_CFG_TSN_PORT_CHANGE_GATE_ENr = 4263, + TSN_TSN_PORT_CFG_TSN_PORT_INIT_FINISHr = 4264, + TSN_TSN_PORT_CFG_TSN_PORT_CHANGE_ENr = 4265, + TSN_TSN_PORT_CFG_TSN_PORT_GCL_NUM0r = 4266, + TSN_TSN_PORT_CFG_TSN_PORT_GCL_NUM1r = 4267, + TSN_TSN_PORT_CFG_TSN_PORT_GCL_VALUE0r = 4268, + TSN_TSN_PORT_CFG_TSN_PORT_GCL_VALUE1r = 4269, + AXI_AXI_CONV_CFG_EPID_V_FUNC_NUMr = 4270, + AXI_AXI_CONV_INFO_AXIM_RW_HSK_CNTr = 4271, + AXI_AXI_CONV_INFO_AXIM_LAST_WR_IDr = 4272, + AXI_AXI_CONV_INFO_AXIM_LAST_WR_ADDR_Hr = 4273, + AXI_AXI_CONV_INFO_AXIM_LAST_WR_ADDR_Lr = 4274, + AXI_AXI_CONV_CFG_DEBUG_INFO_CLR_ENr = 4275, + PTPTM_PTP_TOP_PP1S_INTERRUPTr = 4276, + PTPTM_PTP_TOP_PP1S_EXTERNAL_SELECTr = 4277, + PTPTM_PTP_TOP_PP1S_OUT_SELECTr = 4278, + PTPTM_PTP_TOP_TEST_PP1S_SELECTr = 4279, + PTPTM_PTP_TOP_LOCAL_PP1S_ENr = 4280, + PTPTM_PTP_TOP_LOCAL_PP1S_ADJUSTr = 4281, + PTPTM_PTP_TOP_LOCAL_PP1S_ADJUST_VALUEr = 4282, + PTPTM_PTP_TOP_PP1S_TO_NP_SELECTr = 4283, + PTPTM_PTP_TOP_PD_U1_SELr = 4284, + PTPTM_PTP_TOP_PD_U1_PD0_SHIFTr = 4285, + PTPTM_PTP_TOP_PD_U1_PD1_SHIFTr = 4286, + PTPTM_PTP_TOP_PD_U1_RESULTr = 4287, + PTPTM_PTP_TOP_PD_U2_SELr = 4288, + PTPTM_PTP_TOP_PD_U2_PD0_SHIFTr = 4289, + PTPTM_PTP_TOP_PD_U2_PD1_SHIFTr = 4290, + PTPTM_PTP_TOP_PD_U2_RESULTr = 4291, + PTPTM_PTP_TOP_TSN_GROUP_NANOSECOND_DELAY0r = 4292, + PTPTM_PTP_TOP_TSN_GROUP_FRACNANOSECOND_DELAY0r = 4293, + PTPTM_PTP_TOP_TSN_GROUP_NANOSECOND_DELAY1r = 4294, + PTPTM_PTP_TOP_TSN_GROUP_FRACNANOSECOND_DELAY1r = 4295, + PTPTM_PTP_TOP_TSN_GROUP_NANOSECOND_DELAY2r = 4296, + PTPTM_PTP_TOP_TSN_GROUP_FRACNANOSECOND_DELAY2r = 4297, + PTPTM_PTP_TOP_TSN_GROUP_NANOSECOND_DELAY3r = 4298, + PTPTM_PTP_TOP_TSN_GROUP_FRACNANOSECOND_DELAY3r = 4299, + PTPTM_PTP_TOP_TSN_PTP1588_RDMA_NANOSECOND_DELAYr = 4300, + PTPTM_PTP_TOP_PTP1588_RDMA_FRACNANOSECOND_DELAYr = 4301, + PTPTM_PTP_TOP_PTP1588_NP_NANOSECOND_DELAYr = 4302, + PTPTM_PTP_TOP_PTP1588_NP_FRACNANOSECOND_DELAYr = 4303, + PTPTM_PTP_TOP_TIME_SYNC_PERIODr = 4304, + PTPTM_PTPTM_MODULE_IDr = 4305, + PTPTM_PTPTM_MODULE_VERSIONr = 4306, + PTPTM_PTPTM_MODULE_DATEr = 4307, + PTPTM_PTPTM_INTERRUPT_STATUSr = 4308, + PTPTM_PTPTM_INTERRUPT_EVENTr = 4309, + PTPTM_PTPTM_INTERRUPT_MASKr = 4310, + PTPTM_PTPTM_INTERRUPT_TESTr = 4311, + PTPTM_PTPTM_HW_CLOCK_CYCLE_INTEGERr = 4312, + PTPTM_PTPTM_HW_CLOCK_CYCLE_FRACTIONr = 4313, + PTPTM_PTPTM_PTP_CLOCK_CYCLE_INTEGERr = 4314, + PTPTM_PTPTM_PTP_CLOCK_CYCLE_FRACTIONr = 4315, + PTPTM_PTPTM_PTP_CONFIGURATIONr = 4316, + PTPTM_PTPTM_TIMER_CONTROLr = 4317, + PTPTM_PTPTM_PPS_INCOME_DELAYr = 4318, + PTPTM_PTPTM_CLOCK_CYCLE_UPDATEr = 4319, + PTPTM_PTPTM_CYCLE_TIME_OF_OUTPUT_PERIOD_PULSE_1r = 4320, + PTPTM_PTPTM_CYCLE_TIME_OF_OUTPUT_PERIOD_PULSE_2r = 4321, + PTPTM_PTPTM_TIMER_LATCH_ENr = 4322, + PTPTM_PTPTM_TIMER_LATCH_SELr = 4323, + PTPTM_PTPTM_TRIGGER_IN_TOD_NANOSECONDr = 4324, + PTPTM_PTPTM_TRIGGER_IN_LOWER_TOD_SECONDr = 4325, + PTPTM_PTPTM_TRIGGER_IN_HIGH_TOD_SECONDr = 4326, + PTPTM_PTPTM_TRIGGER_IN_FRACNANOSECONDr = 4327, + PTPTM_PTPTM_TRIGGER_IN_HARDWARE_TIME_LOWr = 4328, + PTPTM_PTPTM_TRIGGER_IN_HARDWARE_TIME_HIGHr = 4329, + PTPTM_PTPTM_TRIGGER_OUT_TOD_NANOSECONDr = 4330, + PTPTM_PTPTM_TRIGGER_OUT_LOWER_TOD_SECONDr = 4331, + PTPTM_PTPTM_TRIGGER_OUT_HIGH_TOD_SECONDr = 4332, + PTPTM_PTPTM_TRIGGER_OUT_HARDWARE_TIME_LOWr = 4333, + PTPTM_PTPTM_TRIGGER_OUT_HARDWARE_TIME_HIGHr = 4334, + PTPTM_PTPTM_ADJUST_TOD_NANOSECONDr = 4335, + PTPTM_PTPTM_ADJUST_LOWER_TOD_SECONDr = 4336, + PTPTM_PTPTM_ADJUST_HIGH_TOD_SECONDr = 4337, + PTPTM_PTPTM_ADJUST_FRACNANOSECONDr = 4338, + PTPTM_PTPTM_ADJUST_HARDWARE_TIME_LOWr = 4339, + PTPTM_PTPTM_ADJUST_HARDWARE_TIME_HIGHr = 4340, + PTPTM_PTPTM_LATCH_TOD_NANOSECONDr = 4341, + PTPTM_PTPTM_LATCH_LOWER_TOD_SECONDr = 4342, + PTPTM_PTPTM_LATCH_HIGH_TOD_SECONDr = 4343, + PTPTM_PTPTM_LATCH_FRACNANOSECONDr = 4344, + PTPTM_PTPTM_LATCH_HARDWARE_TIME_LOWr = 4345, + PTPTM_PTPTM_LATCH_HARDWARE_TIME_HIGHr = 4346, + PTPTM_PTPTM_REAL_TOD_NANOSECONDr = 4347, + PTPTM_PTPTM_REAL_LOWER_TOD_SECONDr = 4348, + PTPTM_PTPTM_REAL_HIGH_TOD_SECONDr = 4349, + PTPTM_PTPTM_REAL_HARDWARE_TIME_LOWr = 4350, + PTPTM_PTPTM_REAL_HARDWARE_TIME_HIGHr = 4351, + PTPTM_PTPTM_PTP1588_EVENT_MESSAGE_PORTr = 4352, + PTPTM_PTPTM_PTP1588_EVENT_MESSAGE_TIMESTAMP_LOWr = 4353, + PTPTM_PTPTM_PTP1588_EVENT_MESSAGE_TIMESTAMP_HIGHr = 4354, + PTPTM_PTPTM_PTP1588_EVENT_MESSAGE_FIFO_STATUSr = 4355, + PTPTM_PTPTM_PP1S_LATCH_TOD_NANOSECONDr = 4356, + PTPTM_PTPTM_PP1S_LATCH_LOWER_TOD_SECONDr = 4357, + PTPTM_PTPTM_PP1S_LATCH_HIGH_TOD_SECONDr = 4358, + PTPTM_PTPTM_PP1S_LATCH_FRACNANOSECONDr = 4359, + PTPTM_PTPTM_TSN_TIME_CONFIGURATIONr = 4360, + PTPTM_PTPTM_TSN_TIMER_CONTROLr = 4361, + PTPTM_PTPTM_TSN0_CLOCK_CYCLE_INTEGERr = 4362, + PTPTM_PTPTM_TSN0_CLOCK_CYCLE_FRACTIONr = 4363, + PTPTM_PTPTM_TSN1_CLOCK_CYCLE_INTEGERr = 4364, + PTPTM_PTPTM_TSN1_CLOCK_CYCLE_FRACTIONr = 4365, + PTPTM_PTPTM_TSN2_CLOCK_CYCLE_INTEGERr = 4366, + PTPTM_PTPTM_TSN2_CLOCK_CYCLE_FRACTIONr = 4367, + PTPTM_PTPTM_TSN3_CLOCK_CYCLE_INTEGERr = 4368, + PTPTM_PTPTM_TSN3_CLOCK_CYCLE_FRACTIONr = 4369, + PTPTM_PTPTM_TSN0_ADJUST_TOD_NANOSECONDr = 4370, + PTPTM_PTPTM_TSN0_ADJUST_LOWER_TOD_SECONDr = 4371, + PTPTM_PTPTM_TSN0_ADJUST_HIGH_TOD_SECONDr = 4372, + PTPTM_PTPTM_TSN0_ADJUST_FRACNANOSECONDr = 4373, + PTPTM_PTPTM_TSN1_ADJUST_TOD_NANOSECONDr = 4374, + PTPTM_PTPTM_TSN1_ADJUST_LOWER_TOD_SECONDr = 4375, + PTPTM_PTPTM_TSN1_ADJUST_HIGH_TOD_SECONDr = 4376, + PTPTM_PTPTM_TSN1_ADJUST_FRACNANOSECONDr = 4377, + PTPTM_PTPTM_TSN2_ADJUST_TOD_NANOSECONDr = 4378, + PTPTM_PTPTM_TSN2_ADJUST_LOWER_TOD_SECONDr = 4379, + PTPTM_PTPTM_TSN2_ADJUST_HIGH_TOD_SECONDr = 4380, + PTPTM_PTPTM_TSN2_ADJUST_FRACNANOSECONDr = 4381, + PTPTM_PTPTM_TSN3_ADJUST_TOD_NANOSECONDr = 4382, + PTPTM_PTPTM_TSN3_ADJUST_LOWER_TOD_SECONDr = 4383, + PTPTM_PTPTM_TSN3_ADJUST_HIGH_TOD_SECONDr = 4384, + PTPTM_PTPTM_TSN3_ADJUST_FRACNANOSECONDr = 4385, + PTPTM_PTPTM_TSN0_LATCH_TOD_NANOSECONDr = 4386, + PTPTM_PTPTM_TSN0_LATCH_LOWER_TOD_SECONDr = 4387, + PTPTM_PTPTM_TSN0_LATCH_HIGH_TOD_SECONDr = 4388, + PTPTM_PTPTM_TSN0_LATCH_FRACNANOSECONDr = 4389, + PTPTM_PTPTM_TSN1_LATCH_TOD_NANOSECONDr = 4390, + PTPTM_PTPTM_TSN1_LATCH_LOWER_TOD_SECONDr = 4391, + PTPTM_PTPTM_TSN1_LATCH_HIGH_TOD_SECONDr = 4392, + PTPTM_PTPTM_TSN1_LATCH_FRACNANOSECONDr = 4393, + PTPTM_PTPTM_TSN2_LATCH_TOD_NANOSECONDr = 4394, + PTPTM_PTPTM_TSN2_LATCH_LOWER_TOD_SECONDr = 4395, + PTPTM_PTPTM_TSN2_LATCH_HIGH_TOD_SECONDr = 4396, + PTPTM_PTPTM_TSN2_LATCH_FRACNANOSECONDr = 4397, + PTPTM_PTPTM_TSN3_LATCH_TOD_NANOSECONDr = 4398, + PTPTM_PTPTM_TSN3_LATCH_LOWER_TOD_SECONDr = 4399, + PTPTM_PTPTM_TSN3_LATCH_HIGH_TOD_SECONDr = 4400, + PTPTM_PTPTM_TSN3_LATCH_FRACNANOSECONDr = 4401, + PTPTM_PTPTM_PP1S_LATCH_TSN0_TOD_NANOSECONDr = 4402, + PTPTM_PTPTM_PP1S_LATCH_TSN0_LOWER_TOD_SECONDr = 4403, + PTPTM_PTPTM_PP1S_LATCH_TSN0_HIGH_TOD_SECONDr = 4404, + PTPTM_PTPTM_PP1S_LATCH_TSN0_FRACNANOSECONDr = 4405, + PTPTM_PTPTM_PP1S_LATCH_TSN1_TOD_NANOSECONDr = 4406, + PTPTM_PTPTM_PP1S_LATCH_TSN1_LOWER_TOD_SECONDr = 4407, + PTPTM_PTPTM_PP1S_LATCH_TSN1_HIGH_TOD_SECONDr = 4408, + PTPTM_PTPTM_PP1S_LATCH_TSN1_FRACNANOSECONDr = 4409, + PTPTM_PTPTM_PP1S_LATCH_TSN2_TOD_NANOSECONDr = 4410, + PTPTM_PTPTM_PP1S_LATCH_TSN2_LOWER_TOD_SECONDr = 4411, + PTPTM_PTPTM_PP1S_LATCH_TSN2_HIGH_TOD_SECONDr = 4412, + PTPTM_PTPTM_PP1S_LATCH_TSN2_FRACNANOSECONDr = 4413, + PTPTM_PTPTM_PP1S_LATCH_TSN3_TOD_NANOSECONDr = 4414, + PTPTM_PTPTM_PP1S_LATCH_TSN3_LOWER_TOD_SECONDr = 4415, + PTPTM_PTPTM_PP1S_LATCH_TSN3_HIGH_TOD_SECONDr = 4416, + PTPTM_PTPTM_PP1S_LATCH_TSN3_FRACNANOSECONDr = 4417, + PTPTM_PTPTM_TSN0_REAL_TOD_NANOSECONDr = 4418, + PTPTM_PTPTM_TSN0_REAL_LOWER_TOD_SECONDr = 4419, + PTPTM_PTPTM_TSN0_REAL_HIGH_TOD_SECONDr = 4420, + PTPTM_PTPTM_TSN1_REAL_TOD_NANOSECONDr = 4421, + PTPTM_PTPTM_TSN1_REAL_LOWER_TOD_SECONDr = 4422, + PTPTM_PTPTM_TSN1_REAL_HIGH_TOD_SECONDr = 4423, + PTPTM_PTPTM_TSN2_REAL_TOD_NANOSECONDr = 4424, + PTPTM_PTPTM_TSN2_REAL_LOWER_TOD_SECONDr = 4425, + PTPTM_PTPTM_TSN2_REAL_HIGH_TOD_SECONDr = 4426, + PTPTM_PTPTM_TSN3_REAL_TOD_NANOSECONDr = 4427, + PTPTM_PTPTM_TSN3_REAL_LOWER_TOD_SECONDr = 4428, + PTPTM_PTPTM_TSN3_REAL_HIGH_TOD_SECONDr = 4429, + PTPTM_PTPTM_REAL_PTP_CLOCK_CYCLE_INTEGERr = 4430, + PTPTM_PTPTM_REAL_PTP_CLOCK_CYCLE_FRACTIONr = 4431, + PTPTM_PTPTM_REAL_TSN0_CLOCK_CYCLE_INTEGERr = 4432, + PTPTM_PTPTM_REAL_TSN0_CLOCK_CYCLE_FRACTIONr = 4433, + PTPTM_PTPTM_REAL_TSN1_CLOCK_CYCLE_INTEGERr = 4434, + PTPTM_PTPTM_REAL_TSN1_CLOCK_CYCLE_FRACTIONr = 4435, + PTPTM_PTPTM_REAL_TSN2_CLOCK_CYCLE_INTEGERr = 4436, + PTPTM_PTPTM_REAL_TSN2_CLOCK_CYCLE_FRACTIONr = 4437, + PTPTM_PTPTM_REAL_TSN3_CLOCK_CYCLE_INTEGERr = 4438, + PTPTM_PTPTM_REAL_TSN3_CLOCK_CYCLE_FRACTIONr = 4439, + REG_ENUM_MAX_VALUE +} DPP_REG_INFO_E; + +#ifdef __cplusplus +} +#endif +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_reg_struct.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_reg_struct.h new file mode 100644 index 000000000000..0af1fb273a4f --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_reg_struct.h @@ -0,0 +1,81 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_reg_struct.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : 石金锋 +* 完成日期 : 2014/02/10 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ +#ifndef _DPP_REG_STRUCT_H_ +#define _DPP_REG_STRUCT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "zxic_common.h" +#include "dpp_dev.h" +#include "dpp_type_api.h" + +typedef ZXIC_UINT32 (*DPP_REG_WRITE)(DPP_DEV_T *dev, ZXIC_UINT32 addr, + ZXIC_UINT32 *p_data); +typedef ZXIC_UINT32 (*DPP_REG_READ)(DPP_DEV_T *dev, ZXIC_UINT32 addr, + ZXIC_UINT32 *p_data); + +#define DPP_FIELD_FLAG_RO (1 << 0) /* 只读标志 */ +#define DPP_FIELD_FLAG_RW (1 << 1) /* 读写标志 */ +#define DPP_FIELD_FLAG_RC (1 << 2) /* 读清标志 */ +#define DPP_FIELD_FLAG_WO (1 << 3) /* 只写标志 */ +#define DPP_FIELD_FLAG_WC (1 << 4) /* 写清标志 */ + +typedef struct dpp_field_t { + ZXIC_CHAR *p_name; /* 字段名 */ + ZXIC_UINT32 flags; /* 标志位 */ + ZXIC_UINT16 msb_pos; /* 最高比特位置,以寄存器列表为准*/ + ZXIC_UINT16 len; /* 字段长度,以比特为单位 */ + ZXIC_UINT32 default_value; /* 缺省值 */ + ZXIC_UINT32 default_step; /* 缺省值步长*/ +} DPP_FIELD_T; + +#define DPP_REG_FLAG_DIRECT (0 << 0) /* 直接读写寄存器 */ +#define DPP_REG_FLAG_INDIRECT (1 << 0) /* 间接读写寄存器 */ +#define DPP_REG_FLAG_WO (1 << 1) /* 只写寄存器 */ + +#define DPP_REG_NUL_ARRAY (0 << 0) /*零元寄存器序列 */ +#define DPP_REG_UNI_ARRAY (1 << 0) /*一元寄存器序列 */ +#define DPP_REG_BIN_ARRAY (1 << 1) /*二元寄存器序列 */ + +typedef struct dpp_reg_t { + ZXIC_CHAR *reg_name; /* 寄存器名称*/ + ZXIC_UINT32 reg_no; /* 寄存器的编号 */ + ZXIC_UINT32 module_no; /* 寄存器归属的模块的编号 */ + ZXIC_UINT32 flags; /* 标志位 */ + ZXIC_UINT32 array_type; /* 寄存器偏移类型*/ + ZXIC_UINT32 addr; /* 寄存器的芯片地址*/ + ZXIC_UINT32 width; /* 寄存器位宽,以字节为单位 */ + ZXIC_UINT32 m_size; /* 寄存器序列参数1的个数 */ + ZXIC_UINT32 n_size; /* 寄存器序列参数2的个数*/ + ZXIC_UINT32 m_step; /* 寄存器序列参数1的偏移步长 */ + ZXIC_UINT32 n_step; /* 寄存器序列参数2的偏移步长 */ + ZXIC_UINT32 field_num; /* 包含的字段个数 */ + DPP_FIELD_T *p_fields; /* 寄存器所有字段 */ + + DPP_REG_WRITE p_write_fun; /* 寄存器写函数 */ + DPP_REG_READ p_read_fun; /* 寄存器读函数 */ +} DPP_REG_T; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_se4k_reg.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_se4k_reg.h new file mode 100644 index 000000000000..f3dc4efc6e03 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_se4k_reg.h @@ -0,0 +1,279 @@ + +#ifndef _DPP_SE4K_REG_H_ +#define _DPP_SE4K_REG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct dpp_se4k_se_alg_cpu_cmd_rgt_t { + ZXIC_UINT32 rd_flag; + ZXIC_UINT32 mask; + ZXIC_UINT32 reg_sram_flag; + ZXIC_UINT32 zgroup_id; + ZXIC_UINT32 zblock_id; + ZXIC_UINT32 zcell_id; + ZXIC_UINT32 addr; +} DPP_SE4K_SE_ALG_CPU_CMD_RGT_T; + +typedef struct dpp_se4k_se_alg_cpu_wr_data_tmp0_t { + ZXIC_UINT32 cpu_wr_data_tmp0; +} DPP_SE4K_SE_ALG_CPU_WR_DATA_TMP0_T; + +typedef struct dpp_se4k_se_alg_cpu_wr_data_tmp1_t { + ZXIC_UINT32 cpu_wr_data_tmp1; +} DPP_SE4K_SE_ALG_CPU_WR_DATA_TMP1_T; + +typedef struct dpp_se4k_se_alg_cpu_wr_data_tmp2_t { + ZXIC_UINT32 cpu_wr_data_tmp2; +} DPP_SE4K_SE_ALG_CPU_WR_DATA_TMP2_T; + +typedef struct dpp_se4k_se_alg_cpu_wr_data_tmp3_t { + ZXIC_UINT32 cpu_wr_data_tmp3; +} DPP_SE4K_SE_ALG_CPU_WR_DATA_TMP3_T; + +typedef struct dpp_se4k_se_alg_cpu_wr_data_tmp4_t { + ZXIC_UINT32 cpu_wr_data_tmp4; +} DPP_SE4K_SE_ALG_CPU_WR_DATA_TMP4_T; + +typedef struct dpp_se4k_se_alg_cpu_wr_data_tmp5_t { + ZXIC_UINT32 cpu_wr_data_tmp5; +} DPP_SE4K_SE_ALG_CPU_WR_DATA_TMP5_T; + +typedef struct dpp_se4k_se_alg_cpu_wr_data_tmp6_t { + ZXIC_UINT32 cpu_wr_data_tmp6; +} DPP_SE4K_SE_ALG_CPU_WR_DATA_TMP6_T; + +typedef struct dpp_se4k_se_alg_cpu_wr_data_tmp7_t { + ZXIC_UINT32 cpu_wr_data_tmp7; +} DPP_SE4K_SE_ALG_CPU_WR_DATA_TMP7_T; + +typedef struct dpp_se4k_se_alg_cpu_wr_data_tmp8_t { + ZXIC_UINT32 cpu_wr_data_tmp8; +} DPP_SE4K_SE_ALG_CPU_WR_DATA_TMP8_T; + +typedef struct dpp_se4k_se_alg_cpu_wr_data_tmp9_t { + ZXIC_UINT32 cpu_wr_data_tmp9; +} DPP_SE4K_SE_ALG_CPU_WR_DATA_TMP9_T; + +typedef struct dpp_se4k_se_alg_cpu_wr_data_tmp10_t { + ZXIC_UINT32 cpu_wr_data_tmp10; +} DPP_SE4K_SE_ALG_CPU_WR_DATA_TMP10_T; + +typedef struct dpp_se4k_se_alg_cpu_wr_data_tmp11_t { + ZXIC_UINT32 cpu_wr_data_tmp11; +} DPP_SE4K_SE_ALG_CPU_WR_DATA_TMP11_T; + +typedef struct dpp_se4k_se_alg_cpu_wr_data_tmp12_t { + ZXIC_UINT32 cpu_wr_data_tmp12; +} DPP_SE4K_SE_ALG_CPU_WR_DATA_TMP12_T; + +typedef struct dpp_se4k_se_alg_cpu_wr_data_tmp13_t { + ZXIC_UINT32 cpu_wr_data_tmp13; +} DPP_SE4K_SE_ALG_CPU_WR_DATA_TMP13_T; + +typedef struct dpp_se4k_se_alg_cpu_wr_data_tmp14_t { + ZXIC_UINT32 cpu_wr_data_tmp14; +} DPP_SE4K_SE_ALG_CPU_WR_DATA_TMP14_T; + +typedef struct dpp_se4k_se_alg_cpu_wr_data_tmp15_t { + ZXIC_UINT32 cpu_wr_data_tmp15; +} DPP_SE4K_SE_ALG_CPU_WR_DATA_TMP15_T; + +typedef struct dpp_se4k_se_alg_cpu_rd_rdy_t { + ZXIC_UINT32 cpu_rd_rdy; +} DPP_SE4K_SE_ALG_CPU_RD_RDY_T; + +typedef struct dpp_se4k_se_alg_cpu_rd_data_tmp0_t { + ZXIC_UINT32 cpu_rd_data_tmp0; +} DPP_SE4K_SE_ALG_CPU_RD_DATA_TMP0_T; + +typedef struct dpp_se4k_se_alg_cpu_rd_data_tmp1_t { + ZXIC_UINT32 cpu_rd_data_tmp1; +} DPP_SE4K_SE_ALG_CPU_RD_DATA_TMP1_T; + +typedef struct dpp_se4k_se_alg_cpu_rd_data_tmp2_t { + ZXIC_UINT32 cpu_rd_data_tmp2; +} DPP_SE4K_SE_ALG_CPU_RD_DATA_TMP2_T; + +typedef struct dpp_se4k_se_alg_cpu_rd_data_tmp3_t { + ZXIC_UINT32 cpu_rd_data_tmp3; +} DPP_SE4K_SE_ALG_CPU_RD_DATA_TMP3_T; + +typedef struct dpp_se4k_se_alg_cpu_rd_data_tmp4_t { + ZXIC_UINT32 cpu_rd_data_tmp4; +} DPP_SE4K_SE_ALG_CPU_RD_DATA_TMP4_T; + +typedef struct dpp_se4k_se_alg_cpu_rd_data_tmp5_t { + ZXIC_UINT32 cpu_rd_data_tmp5; +} DPP_SE4K_SE_ALG_CPU_RD_DATA_TMP5_T; + +typedef struct dpp_se4k_se_alg_cpu_rd_data_tmp6_t { + ZXIC_UINT32 cpu_rd_data_tmp6; +} DPP_SE4K_SE_ALG_CPU_RD_DATA_TMP6_T; + +typedef struct dpp_se4k_se_alg_cpu_rd_data_tmp7_t { + ZXIC_UINT32 cpu_rd_data_tmp7; +} DPP_SE4K_SE_ALG_CPU_RD_DATA_TMP7_T; + +typedef struct dpp_se4k_se_alg_cpu_rd_data_tmp8_t { + ZXIC_UINT32 cpu_rd_data_tmp8; +} DPP_SE4K_SE_ALG_CPU_RD_DATA_TMP8_T; + +typedef struct dpp_se4k_se_alg_cpu_rd_data_tmp9_t { + ZXIC_UINT32 cpu_rd_data_tmp9; +} DPP_SE4K_SE_ALG_CPU_RD_DATA_TMP9_T; + +typedef struct dpp_se4k_se_alg_cpu_rd_data_tmp10_t { + ZXIC_UINT32 cpu_rd_data_tmp10; +} DPP_SE4K_SE_ALG_CPU_RD_DATA_TMP10_T; + +typedef struct dpp_se4k_se_alg_cpu_rd_data_tmp11_t { + ZXIC_UINT32 cpu_rd_data_tmp11; +} DPP_SE4K_SE_ALG_CPU_RD_DATA_TMP11_T; + +typedef struct dpp_se4k_se_alg_cpu_rd_data_tmp12_t { + ZXIC_UINT32 cpu_rd_data_tmp12; +} DPP_SE4K_SE_ALG_CPU_RD_DATA_TMP12_T; + +typedef struct dpp_se4k_se_alg_cpu_rd_data_tmp13_t { + ZXIC_UINT32 cpu_rd_data_tmp13; +} DPP_SE4K_SE_ALG_CPU_RD_DATA_TMP13_T; + +typedef struct dpp_se4k_se_alg_cpu_rd_data_tmp14_t { + ZXIC_UINT32 cpu_rd_data_tmp14; +} DPP_SE4K_SE_ALG_CPU_RD_DATA_TMP14_T; + +typedef struct dpp_se4k_se_alg_cpu_rd_data_tmp15_t { + ZXIC_UINT32 cpu_rd_data_tmp15; +} DPP_SE4K_SE_ALG_CPU_RD_DATA_TMP15_T; + +typedef struct dpp_se4k_se_alg_hash0_ext_cfg_rgt_t { + ZXIC_UINT32 hash0_ext_mode; + ZXIC_UINT32 hash0_ext_flag; +} DPP_SE4K_SE_ALG_HASH0_EXT_CFG_RGT_T; + +typedef struct dpp_se4k_se_alg_hash1_ext_cfg_rgt_t { + ZXIC_UINT32 hash1_ext_mode; + ZXIC_UINT32 hash1_ext_flag; +} DPP_SE4K_SE_ALG_HASH1_EXT_CFG_RGT_T; + +typedef struct dpp_se4k_se_alg_hash2_ext_cfg_rgt_t { + ZXIC_UINT32 hash2_ext_mode; + ZXIC_UINT32 hash2_ext_flag; +} DPP_SE4K_SE_ALG_HASH2_EXT_CFG_RGT_T; + +typedef struct dpp_se4k_se_alg_hash3_ext_cfg_rgt_t { + ZXIC_UINT32 hash3_ext_mode; + ZXIC_UINT32 hash3_ext_flag; +} DPP_SE4K_SE_ALG_HASH3_EXT_CFG_RGT_T; + +typedef struct dpp_se4k_se_alg_hash0_tbl30_depth_t { + ZXIC_UINT32 hash0_tbl3_depth; + ZXIC_UINT32 hash0_tbl2_depth; + ZXIC_UINT32 hash0_tbl1_depth; + ZXIC_UINT32 hash0_tbl0_depth; +} DPP_SE4K_SE_ALG_HASH0_TBL30_DEPTH_T; + +typedef struct dpp_se4k_se_alg_hash0_tbl74_depth_t { + ZXIC_UINT32 hash0_tbl7_depth; + ZXIC_UINT32 hash0_tbl6_depth; + ZXIC_UINT32 hash0_tbl5_depth; + ZXIC_UINT32 hash0_tbl4_depth; +} DPP_SE4K_SE_ALG_HASH0_TBL74_DEPTH_T; + +typedef struct dpp_se4k_se_alg_hash1_tbl30_depth_t { + ZXIC_UINT32 hash1_tbl3_depth; + ZXIC_UINT32 hash1_tbl2_depth; + ZXIC_UINT32 hash1_tbl1_depth; + ZXIC_UINT32 hash1_tbl0_depth; +} DPP_SE4K_SE_ALG_HASH1_TBL30_DEPTH_T; + +typedef struct dpp_se4k_se_alg_hash1_tbl74_depth_t { + ZXIC_UINT32 hash1_tbl7_depth; + ZXIC_UINT32 hash1_tbl6_depth; + ZXIC_UINT32 hash1_tbl5_depth; + ZXIC_UINT32 hash1_tbl4_depth; +} DPP_SE4K_SE_ALG_HASH1_TBL74_DEPTH_T; + +typedef struct dpp_se4k_se_alg_hash2_tbl30_depth_t { + ZXIC_UINT32 hash2_tbl3_depth; + ZXIC_UINT32 hash2_tbl2_depth; + ZXIC_UINT32 hash2_tbl1_depth; + ZXIC_UINT32 hash2_tbl0_depth; +} DPP_SE4K_SE_ALG_HASH2_TBL30_DEPTH_T; + +typedef struct dpp_se4k_se_alg_hash2_tbl74_depth_t { + ZXIC_UINT32 hash2_tbl7_depth; + ZXIC_UINT32 hash2_tbl6_depth; + ZXIC_UINT32 hash2_tbl5_depth; + ZXIC_UINT32 hash2_tbl4_depth; +} DPP_SE4K_SE_ALG_HASH2_TBL74_DEPTH_T; + +typedef struct dpp_se4k_se_alg_hash3_tbl30_depth_t { + ZXIC_UINT32 hash3_tbl3_depth; + ZXIC_UINT32 hash3_tbl2_depth; + ZXIC_UINT32 hash3_tbl1_depth; + ZXIC_UINT32 hash3_tbl0_depth; +} DPP_SE4K_SE_ALG_HASH3_TBL30_DEPTH_T; + +typedef struct dpp_se4k_se_alg_hash3_tbl74_depth_t { + ZXIC_UINT32 hash3_tbl7_depth; + ZXIC_UINT32 hash3_tbl6_depth; + ZXIC_UINT32 hash3_tbl5_depth; + ZXIC_UINT32 hash3_tbl4_depth; +} DPP_SE4K_SE_ALG_HASH3_TBL74_DEPTH_T; + +typedef struct dpp_se4k_se_alg_wr_rsp_cfg_t { + ZXIC_UINT32 wr_rsp_fifo_cfg; +} DPP_SE4K_SE_ALG_WR_RSP_CFG_T; + +typedef struct dpp_se4k_se_alg_hash_mono_flag_t { + ZXIC_UINT32 hash3_mono_flag; + ZXIC_UINT32 hash2_mono_flag; + ZXIC_UINT32 hash1_mono_flag; + ZXIC_UINT32 hash0_mono_flag; +} DPP_SE4K_SE_ALG_HASH_MONO_FLAG_T; + +typedef struct dpp_se4k_se_alg_hash10_ext_crc_cfg_t { + ZXIC_UINT32 hash1_crc_cfg; + ZXIC_UINT32 hash0_crc_cfg; +} DPP_SE4K_SE_ALG_HASH10_EXT_CRC_CFG_T; + +typedef struct dpp_se4k_se_alg_hash32_ext_crc_cfg_t { + ZXIC_UINT32 hash3_crc_cfg; + ZXIC_UINT32 hash2_crc_cfg; +} DPP_SE4K_SE_ALG_HASH32_EXT_CRC_CFG_T; + +typedef struct dpp_se4k_se_alg_zblock_service_configure_t { + ZXIC_UINT32 service_sel; + ZXIC_UINT32 hash_channel_sel; + ZXIC_UINT32 st_en; +} DPP_SE4K_SE_ALG_ZBLOCK_SERVICE_CONFIGURE_T; + +typedef struct dpp_se4k_se_alg_zblock_hash_zcell_mono_t { + ZXIC_UINT32 ha_zcell3_mono_flag; + ZXIC_UINT32 ha_zcell3_tbl_id; + ZXIC_UINT32 ha_zcell2_mono_flag; + ZXIC_UINT32 ha_zcell2_tbl_id; + ZXIC_UINT32 ha_zcell1_mono_flag; + ZXIC_UINT32 ha_zcell1_tbl_id; + ZXIC_UINT32 ha_zcell0_mono_flag; + ZXIC_UINT32 ha_zcell0_tbl_id; +} DPP_SE4K_SE_ALG_ZBLOCK_HASH_ZCELL_MONO_T; + +typedef struct dpp_se4k_se_alg_zlock_hash_zreg_mono_t { + ZXIC_UINT32 ha_zreg3_mono_flag; + ZXIC_UINT32 ha_zreg3_tbl_id; + ZXIC_UINT32 ha_zreg2_mono_flag; + ZXIC_UINT32 ha_zreg2_tbl_id; + ZXIC_UINT32 ha_zreg1_mono_flag; + ZXIC_UINT32 ha_zreg1_tbl_id; + ZXIC_UINT32 ha_zreg0_mono_flag; + ZXIC_UINT32 ha_zreg0_tbl_id; +} DPP_SE4K_SE_ALG_ZLOCK_HASH_ZREG_MONO_T; + +#ifdef __cplusplus +} +#endif +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_se_reg.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_se_reg.h new file mode 100644 index 000000000000..064f1ba96066 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_se_reg.h @@ -0,0 +1,2778 @@ + +#ifndef _DPP_SE_REG_H_ +#define _DPP_SE_REG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct dpp_se_alg_init_ok_t { + ZXIC_UINT32 init_ok; +} DPP_SE_ALG_INIT_OK_T; + +typedef struct dpp_se_alg_cpu_rd_rdy_t { + ZXIC_UINT32 cpu_rd_rdy; +} DPP_SE_ALG_CPU_RD_RDY_T; + +typedef struct dpp_se_alg_cpu_rd_data_tmp0_t { + ZXIC_UINT32 cpu_rd_data_tmp0; +} DPP_SE_ALG_CPU_RD_DATA_TMP0_T; + +typedef struct dpp_se_alg_cpu_rd_data_tmp1_t { + ZXIC_UINT32 cpu_rd_data_tmp1; +} DPP_SE_ALG_CPU_RD_DATA_TMP1_T; + +typedef struct dpp_se_alg_cpu_rd_data_tmp2_t { + ZXIC_UINT32 cpu_rd_data_tmp2; +} DPP_SE_ALG_CPU_RD_DATA_TMP2_T; + +typedef struct dpp_se_alg_cpu_rd_data_tmp3_t { + ZXIC_UINT32 cpu_rd_data_tmp3; +} DPP_SE_ALG_CPU_RD_DATA_TMP3_T; + +typedef struct dpp_se_alg_cpu_rd_data_tmp4_t { + ZXIC_UINT32 cpu_rd_data_tmp4; +} DPP_SE_ALG_CPU_RD_DATA_TMP4_T; + +typedef struct dpp_se_alg_cpu_rd_data_tmp5_t { + ZXIC_UINT32 cpu_rd_data_tmp5; +} DPP_SE_ALG_CPU_RD_DATA_TMP5_T; + +typedef struct dpp_se_alg_cpu_rd_data_tmp6_t { + ZXIC_UINT32 cpu_rd_data_tmp6; +} DPP_SE_ALG_CPU_RD_DATA_TMP6_T; + +typedef struct dpp_se_alg_cpu_rd_data_tmp7_t { + ZXIC_UINT32 cpu_rd_data_tmp7; +} DPP_SE_ALG_CPU_RD_DATA_TMP7_T; + +typedef struct dpp_se_alg_cpu_rd_data_tmp8_t { + ZXIC_UINT32 cpu_rd_data_tmp8; +} DPP_SE_ALG_CPU_RD_DATA_TMP8_T; + +typedef struct dpp_se_alg_cpu_rd_data_tmp9_t { + ZXIC_UINT32 cpu_rd_data_tmp9; +} DPP_SE_ALG_CPU_RD_DATA_TMP9_T; + +typedef struct dpp_se_alg_cpu_rd_data_tmp10_t { + ZXIC_UINT32 cpu_rd_data_tmp10; +} DPP_SE_ALG_CPU_RD_DATA_TMP10_T; + +typedef struct dpp_se_alg_cpu_rd_data_tmp11_t { + ZXIC_UINT32 cpu_rd_data_tmp11; +} DPP_SE_ALG_CPU_RD_DATA_TMP11_T; + +typedef struct dpp_se_alg_cpu_rd_data_tmp12_t { + ZXIC_UINT32 cpu_rd_data_tmp12; +} DPP_SE_ALG_CPU_RD_DATA_TMP12_T; + +typedef struct dpp_se_alg_cpu_rd_data_tmp13_t { + ZXIC_UINT32 cpu_rd_data_tmp13; +} DPP_SE_ALG_CPU_RD_DATA_TMP13_T; + +typedef struct dpp_se_alg_cpu_rd_data_tmp14_t { + ZXIC_UINT32 cpu_rd_data_tmp14; +} DPP_SE_ALG_CPU_RD_DATA_TMP14_T; + +typedef struct dpp_se_alg_cpu_rd_data_tmp15_t { + ZXIC_UINT32 cpu_rd_data_tmp15; +} DPP_SE_ALG_CPU_RD_DATA_TMP15_T; + +typedef struct dpp_se_alg_lpm_v4_config_rgt_t { + ZXIC_UINT32 lpm_v4_shift_sel; + ZXIC_UINT32 lpm_v4_sram_cmp_flag; + ZXIC_UINT32 lpm_v4_ddr3_addr_sel; +} DPP_SE_ALG_LPM_V4_CONFIG_RGT_T; + +typedef struct dpp_se_alg_lpm_v6_config_rgt_t { + ZXIC_UINT32 lpm_v6_shift_sel; + ZXIC_UINT32 lpm_v6_sram_cmp_flag; + ZXIC_UINT32 lpm_v6_ddr3_addr_sel; +} DPP_SE_ALG_LPM_V6_CONFIG_RGT_T; + +typedef struct dpp_se_alg_lpm_ext_rsp_fifo_u0_pfull_ast_t { + ZXIC_UINT32 lpm_ext_rsp_fifo_u0_pfull_ast; +} DPP_SE_ALG_LPM_EXT_RSP_FIFO_U0_PFULL_AST_T; + +typedef struct dpp_se_as_hash_age_pat_cfg_t { + ZXIC_UINT32 hash_age_pat_cfg; +} DPP_SE_AS_HASH_AGE_PAT_CFG_T; + +typedef struct dpp_se_as_learn_rdy_cfg_t { + ZXIC_UINT32 learn_rdy_cfg; +} DPP_SE_AS_LEARN_RDY_CFG_T; + +typedef struct dpp_se_kschd_kschd_as_pful_cfg_t { + ZXIC_UINT32 kschd_as_pful_cfg; +} DPP_SE_KSCHD_KSCHD_AS_PFUL_CFG_T; + +typedef struct dpp_se_kschd_kschd_dir_pful_cfg_t { + ZXIC_UINT32 kschd_dir_pful_cfg; +} DPP_SE_KSCHD_KSCHD_DIR_PFUL_CFG_T; + +typedef struct dpp_se_kschd_kschd_as_ept_cfg_t { + ZXIC_UINT32 kschd_as_ept_cfg; +} DPP_SE_KSCHD_KSCHD_AS_EPT_CFG_T; + +typedef struct dpp_se_kschd_cpu_arbi_pful_cfg_t { + ZXIC_UINT32 cpu_arbi_pful_cfg; +} DPP_SE_KSCHD_CPU_ARBI_PFUL_CFG_T; + +typedef struct dpp_se_kschd_kschd_pbu_pful_cfg_t { + ZXIC_UINT32 kschd_pbu_pful_cfg; +} DPP_SE_KSCHD_KSCHD_PBU_PFUL_CFG_T; + +typedef struct dpp_se_rschd_rschd_dir_pful_cfg_t { + ZXIC_UINT32 rschd_dir_pful_cfg; +} DPP_SE_RSCHD_RSCHD_DIR_PFUL_CFG_T; + +typedef struct dpp_se_rschd_rschd_dir_ept_cfg_t { + ZXIC_UINT32 rschd_dir_ept_cfg; +} DPP_SE_RSCHD_RSCHD_DIR_EPT_CFG_T; + +typedef struct dpp_se_cfg_ppu_soft_rst_t { + ZXIC_UINT32 ppu_soft_rst; +} DPP_SE_CFG_PPU_SOFT_RST_T; + +typedef struct dpp_se_cfg_ept_flag_t { + ZXIC_UINT32 ept_flag; +} DPP_SE_CFG_EPT_FLAG_T; + +typedef struct dpp_se_cfg_ddr_key_lk0_3_t { + ZXIC_UINT32 ddr_key_lk0_3; +} DPP_SE_CFG_DDR_KEY_LK0_3_T; + +typedef struct dpp_se_cfg_ddr_key_lk0_2_t { + ZXIC_UINT32 ddr_key_lk0_2; +} DPP_SE_CFG_DDR_KEY_LK0_2_T; + +typedef struct dpp_se_cfg_ddr_key_lk0_1_t { + ZXIC_UINT32 ddr_key_lk0_1; +} DPP_SE_CFG_DDR_KEY_LK0_1_T; + +typedef struct dpp_se_cfg_ddr_key_lk0_0_t { + ZXIC_UINT32 ddr_key_lk0_0; +} DPP_SE_CFG_DDR_KEY_LK0_0_T; + +typedef struct dpp_se_cfg_ddr_key_lk1_3_t { + ZXIC_UINT32 ddr_key_lk1_3; +} DPP_SE_CFG_DDR_KEY_LK1_3_T; + +typedef struct dpp_se_cfg_ddr_key_lk1_2_t { + ZXIC_UINT32 ddr_key_lk1_2; +} DPP_SE_CFG_DDR_KEY_LK1_2_T; + +typedef struct dpp_se_cfg_ddr_key_lk1_1_t { + ZXIC_UINT32 ddr_key_lk1_1; +} DPP_SE_CFG_DDR_KEY_LK1_1_T; + +typedef struct dpp_se_cfg_ddr_key_lk1_0_t { + ZXIC_UINT32 ddr_key_lk1_0; +} DPP_SE_CFG_DDR_KEY_LK1_0_T; + +typedef struct dpp_se_cfg_hash_key_lk0_18_t { + ZXIC_UINT32 hash_key_lk0_18; +} DPP_SE_CFG_HASH_KEY_LK0_18_T; + +typedef struct dpp_se_cfg_hash_key_lk0_17_t { + ZXIC_UINT32 hash_key_lk0_17; +} DPP_SE_CFG_HASH_KEY_LK0_17_T; + +typedef struct dpp_se_cfg_hash_key_lk0_16_t { + ZXIC_UINT32 hash_key_lk0_16; +} DPP_SE_CFG_HASH_KEY_LK0_16_T; + +typedef struct dpp_se_cfg_hash_key_lk0_15_t { + ZXIC_UINT32 hash_key_lk0_15; +} DPP_SE_CFG_HASH_KEY_LK0_15_T; + +typedef struct dpp_se_cfg_hash_key_lk0_14_t { + ZXIC_UINT32 hash_key_lk0_14; +} DPP_SE_CFG_HASH_KEY_LK0_14_T; + +typedef struct dpp_se_cfg_hash_key_lk0_13_t { + ZXIC_UINT32 hash_key_lk0_13; +} DPP_SE_CFG_HASH_KEY_LK0_13_T; + +typedef struct dpp_se_cfg_hash_key_lk0_12_t { + ZXIC_UINT32 hash_key_lk0_12; +} DPP_SE_CFG_HASH_KEY_LK0_12_T; + +typedef struct dpp_se_cfg_hash_key_lk0_11_t { + ZXIC_UINT32 hash_key_lk0_11; +} DPP_SE_CFG_HASH_KEY_LK0_11_T; + +typedef struct dpp_se_cfg_hash_key_lk0_10_t { + ZXIC_UINT32 hash_key_lk0_10; +} DPP_SE_CFG_HASH_KEY_LK0_10_T; + +typedef struct dpp_se_cfg_hash_key_lk0_9_t { + ZXIC_UINT32 hash_key_lk0_9; +} DPP_SE_CFG_HASH_KEY_LK0_9_T; + +typedef struct dpp_se_cfg_hash_key_lk0_8_t { + ZXIC_UINT32 hash_key_lk0_8; +} DPP_SE_CFG_HASH_KEY_LK0_8_T; + +typedef struct dpp_se_cfg_hash_key_lk0_7_t { + ZXIC_UINT32 hash_key_lk0_7; +} DPP_SE_CFG_HASH_KEY_LK0_7_T; + +typedef struct dpp_se_cfg_hash_key_lk0_6_t { + ZXIC_UINT32 hash_key_lk0_6; +} DPP_SE_CFG_HASH_KEY_LK0_6_T; + +typedef struct dpp_se_cfg_hash_key_lk0_5_t { + ZXIC_UINT32 hash_key_lk0_5; +} DPP_SE_CFG_HASH_KEY_LK0_5_T; + +typedef struct dpp_se_cfg_hash_key_lk0_4_t { + ZXIC_UINT32 hash_key_lk0_4; +} DPP_SE_CFG_HASH_KEY_LK0_4_T; + +typedef struct dpp_se_cfg_hash_key_lk0_3_t { + ZXIC_UINT32 hash_key_lk0_3; +} DPP_SE_CFG_HASH_KEY_LK0_3_T; + +typedef struct dpp_se_cfg_hash_key_lk0_2_t { + ZXIC_UINT32 hash_key_lk0_2; +} DPP_SE_CFG_HASH_KEY_LK0_2_T; + +typedef struct dpp_se_cfg_hash_key_lk0_1_t { + ZXIC_UINT32 hash_key_lk0_1; +} DPP_SE_CFG_HASH_KEY_LK0_1_T; + +typedef struct dpp_se_cfg_hash_key_lk0_0_t { + ZXIC_UINT32 hash_key_lk0_0; +} DPP_SE_CFG_HASH_KEY_LK0_0_T; + +typedef struct dpp_se_cfg_hash_key_lk1_18_t { + ZXIC_UINT32 hash_key_lk1_18; +} DPP_SE_CFG_HASH_KEY_LK1_18_T; + +typedef struct dpp_se_cfg_hash_key_lk1_17_t { + ZXIC_UINT32 hash_key_lk1_17; +} DPP_SE_CFG_HASH_KEY_LK1_17_T; + +typedef struct dpp_se_cfg_hash_key_lk1_16_t { + ZXIC_UINT32 hash_key_lk1_16; +} DPP_SE_CFG_HASH_KEY_LK1_16_T; + +typedef struct dpp_se_cfg_hash_key_lk1_15_t { + ZXIC_UINT32 hash_key_lk1_15; +} DPP_SE_CFG_HASH_KEY_LK1_15_T; + +typedef struct dpp_se_cfg_hash_key_lk1_14_t { + ZXIC_UINT32 hash_key_lk1_14; +} DPP_SE_CFG_HASH_KEY_LK1_14_T; + +typedef struct dpp_se_cfg_hash_key_lk1_13_t { + ZXIC_UINT32 hash_key_lk1_13; +} DPP_SE_CFG_HASH_KEY_LK1_13_T; + +typedef struct dpp_se_cfg_hash_key_lk1_12_t { + ZXIC_UINT32 hash_key_lk1_12; +} DPP_SE_CFG_HASH_KEY_LK1_12_T; + +typedef struct dpp_se_cfg_hash_key_lk1_11_t { + ZXIC_UINT32 hash_key_lk1_11; +} DPP_SE_CFG_HASH_KEY_LK1_11_T; + +typedef struct dpp_se_cfg_hash_key_lk1_10_t { + ZXIC_UINT32 hash_key_lk1_10; +} DPP_SE_CFG_HASH_KEY_LK1_10_T; + +typedef struct dpp_se_cfg_hash_key_lk1_9_t { + ZXIC_UINT32 hash_key_lk1_9; +} DPP_SE_CFG_HASH_KEY_LK1_9_T; + +typedef struct dpp_se_cfg_hash_key_lk1_8_t { + ZXIC_UINT32 hash_key_lk1_8; +} DPP_SE_CFG_HASH_KEY_LK1_8_T; + +typedef struct dpp_se_cfg_hash_key_lk1_7_t { + ZXIC_UINT32 hash_key_lk1_7; +} DPP_SE_CFG_HASH_KEY_LK1_7_T; + +typedef struct dpp_se_cfg_hash_key_lk1_6_t { + ZXIC_UINT32 hash_key_lk1_6; +} DPP_SE_CFG_HASH_KEY_LK1_6_T; + +typedef struct dpp_se_cfg_hash_key_lk1_5_t { + ZXIC_UINT32 hash_key_lk1_5; +} DPP_SE_CFG_HASH_KEY_LK1_5_T; + +typedef struct dpp_se_cfg_hash_key_lk1_4_t { + ZXIC_UINT32 hash_key_lk1_4; +} DPP_SE_CFG_HASH_KEY_LK1_4_T; + +typedef struct dpp_se_cfg_hash_key_lk1_3_t { + ZXIC_UINT32 hash_key_lk1_3; +} DPP_SE_CFG_HASH_KEY_LK1_3_T; + +typedef struct dpp_se_cfg_hash_key_lk1_2_t { + ZXIC_UINT32 hash_key_lk1_2; +} DPP_SE_CFG_HASH_KEY_LK1_2_T; + +typedef struct dpp_se_cfg_hash_key_lk1_1_t { + ZXIC_UINT32 hash_key_lk1_1; +} DPP_SE_CFG_HASH_KEY_LK1_1_T; + +typedef struct dpp_se_cfg_hash_key_lk1_0_t { + ZXIC_UINT32 hash_key_lk1_0; +} DPP_SE_CFG_HASH_KEY_LK1_0_T; + +typedef struct dpp_se_cfg_hash_key_lk2_18_t { + ZXIC_UINT32 hash_key_lk2_18; +} DPP_SE_CFG_HASH_KEY_LK2_18_T; + +typedef struct dpp_se_cfg_hash_key_lk2_17_t { + ZXIC_UINT32 hash_key_lk2_17; +} DPP_SE_CFG_HASH_KEY_LK2_17_T; + +typedef struct dpp_se_cfg_hash_key_lk2_16_t { + ZXIC_UINT32 hash_key_lk2_16; +} DPP_SE_CFG_HASH_KEY_LK2_16_T; + +typedef struct dpp_se_cfg_hash_key_lk2_15_t { + ZXIC_UINT32 hash_key_lk2_15; +} DPP_SE_CFG_HASH_KEY_LK2_15_T; + +typedef struct dpp_se_cfg_hash_key_lk2_14_t { + ZXIC_UINT32 hash_key_lk2_14; +} DPP_SE_CFG_HASH_KEY_LK2_14_T; + +typedef struct dpp_se_cfg_hash_key_lk2_13_t { + ZXIC_UINT32 hash_key_lk2_13; +} DPP_SE_CFG_HASH_KEY_LK2_13_T; + +typedef struct dpp_se_cfg_hash_key_lk2_12_t { + ZXIC_UINT32 hash_key_lk2_12; +} DPP_SE_CFG_HASH_KEY_LK2_12_T; + +typedef struct dpp_se_cfg_hash_key_lk2_11_t { + ZXIC_UINT32 hash_key_lk2_11; +} DPP_SE_CFG_HASH_KEY_LK2_11_T; + +typedef struct dpp_se_cfg_hash_key_lk2_10_t { + ZXIC_UINT32 hash_key_lk2_10; +} DPP_SE_CFG_HASH_KEY_LK2_10_T; + +typedef struct dpp_se_cfg_hash_key_lk2_9_t { + ZXIC_UINT32 hash_key_lk2_9; +} DPP_SE_CFG_HASH_KEY_LK2_9_T; + +typedef struct dpp_se_cfg_hash_key_lk2_8_t { + ZXIC_UINT32 hash_key_lk2_8; +} DPP_SE_CFG_HASH_KEY_LK2_8_T; + +typedef struct dpp_se_cfg_hash_key_lk2_7_t { + ZXIC_UINT32 hash_key_lk2_7; +} DPP_SE_CFG_HASH_KEY_LK2_7_T; + +typedef struct dpp_se_cfg_hash_key_lk2_6_t { + ZXIC_UINT32 hash_key_lk2_6; +} DPP_SE_CFG_HASH_KEY_LK2_6_T; + +typedef struct dpp_se_cfg_hash_key_lk2_5_t { + ZXIC_UINT32 hash_key_lk2_5; +} DPP_SE_CFG_HASH_KEY_LK2_5_T; + +typedef struct dpp_se_cfg_hash_key_lk2_4_t { + ZXIC_UINT32 hash_key_lk2_4; +} DPP_SE_CFG_HASH_KEY_LK2_4_T; + +typedef struct dpp_se_cfg_hash_key_lk2_3_t { + ZXIC_UINT32 hash_key_lk2_3; +} DPP_SE_CFG_HASH_KEY_LK2_3_T; + +typedef struct dpp_se_cfg_hash_key_lk2_2_t { + ZXIC_UINT32 hash_key_lk2_2; +} DPP_SE_CFG_HASH_KEY_LK2_2_T; + +typedef struct dpp_se_cfg_hash_key_lk2_1_t { + ZXIC_UINT32 hash_key_lk2_1; +} DPP_SE_CFG_HASH_KEY_LK2_1_T; + +typedef struct dpp_se_cfg_hash_key_lk2_0_t { + ZXIC_UINT32 hash_key_lk2_0; +} DPP_SE_CFG_HASH_KEY_LK2_0_T; + +typedef struct dpp_se_cfg_hash_key_lk3_18_t { + ZXIC_UINT32 hash_key_lk3_18; +} DPP_SE_CFG_HASH_KEY_LK3_18_T; + +typedef struct dpp_se_cfg_hash_key_lk3_17_t { + ZXIC_UINT32 hash_key_lk3_17; +} DPP_SE_CFG_HASH_KEY_LK3_17_T; + +typedef struct dpp_se_cfg_hash_key_lk3_16_t { + ZXIC_UINT32 hash_key_lk3_16; +} DPP_SE_CFG_HASH_KEY_LK3_16_T; + +typedef struct dpp_se_cfg_hash_key_lk3_15_t { + ZXIC_UINT32 hash_key_lk3_15; +} DPP_SE_CFG_HASH_KEY_LK3_15_T; + +typedef struct dpp_se_cfg_hash_key_lk3_14_t { + ZXIC_UINT32 hash_key_lk3_14; +} DPP_SE_CFG_HASH_KEY_LK3_14_T; + +typedef struct dpp_se_cfg_hash_key_lk3_13_t { + ZXIC_UINT32 hash_key_lk3_13; +} DPP_SE_CFG_HASH_KEY_LK3_13_T; + +typedef struct dpp_se_cfg_hash_key_lk3_12_t { + ZXIC_UINT32 hash_key_lk3_12; +} DPP_SE_CFG_HASH_KEY_LK3_12_T; + +typedef struct dpp_se_cfg_hash_key_lk3_11_t { + ZXIC_UINT32 hash_key_lk3_11; +} DPP_SE_CFG_HASH_KEY_LK3_11_T; + +typedef struct dpp_se_cfg_hash_key_lk3_10_t { + ZXIC_UINT32 hash_key_lk3_10; +} DPP_SE_CFG_HASH_KEY_LK3_10_T; + +typedef struct dpp_se_cfg_hash_key_lk3_9_t { + ZXIC_UINT32 hash_key_lk3_9; +} DPP_SE_CFG_HASH_KEY_LK3_9_T; + +typedef struct dpp_se_cfg_hash_key_lk3_8_t { + ZXIC_UINT32 hash_key_lk3_8; +} DPP_SE_CFG_HASH_KEY_LK3_8_T; + +typedef struct dpp_se_cfg_hash_key_lk3_7_t { + ZXIC_UINT32 hash_key_lk3_7; +} DPP_SE_CFG_HASH_KEY_LK3_7_T; + +typedef struct dpp_se_cfg_hash_key_lk3_6_t { + ZXIC_UINT32 hash_key_lk3_6; +} DPP_SE_CFG_HASH_KEY_LK3_6_T; + +typedef struct dpp_se_cfg_hash_key_lk3_5_t { + ZXIC_UINT32 hash_key_lk3_5; +} DPP_SE_CFG_HASH_KEY_LK3_5_T; + +typedef struct dpp_se_cfg_hash_key_lk3_4_t { + ZXIC_UINT32 hash_key_lk3_4; +} DPP_SE_CFG_HASH_KEY_LK3_4_T; + +typedef struct dpp_se_cfg_hash_key_lk3_3_t { + ZXIC_UINT32 hash_key_lk3_3; +} DPP_SE_CFG_HASH_KEY_LK3_3_T; + +typedef struct dpp_se_cfg_hash_key_lk3_2_t { + ZXIC_UINT32 hash_key_lk3_2; +} DPP_SE_CFG_HASH_KEY_LK3_2_T; + +typedef struct dpp_se_cfg_hash_key_lk3_1_t { + ZXIC_UINT32 hash_key_lk3_1; +} DPP_SE_CFG_HASH_KEY_LK3_1_T; + +typedef struct dpp_se_cfg_hash_key_lk3_0_t { + ZXIC_UINT32 hash_key_lk3_0; +} DPP_SE_CFG_HASH_KEY_LK3_0_T; + +typedef struct dpp_se_cfg_lpm_key_lk0_6_t { + ZXIC_UINT32 lpm_key_lk0_6; +} DPP_SE_CFG_LPM_KEY_LK0_6_T; + +typedef struct dpp_se_cfg_lpm_key_lk0_5_t { + ZXIC_UINT32 lpm_key_lk0_5; +} DPP_SE_CFG_LPM_KEY_LK0_5_T; + +typedef struct dpp_se_cfg_lpm_key_lk0_4_t { + ZXIC_UINT32 lpm_key_lk0_4; +} DPP_SE_CFG_LPM_KEY_LK0_4_T; + +typedef struct dpp_se_cfg_lpm_key_lk0_3_t { + ZXIC_UINT32 lpm_key_lk0_3; +} DPP_SE_CFG_LPM_KEY_LK0_3_T; + +typedef struct dpp_se_cfg_lpm_key_lk0_2_t { + ZXIC_UINT32 lpm_key_lk0_2; +} DPP_SE_CFG_LPM_KEY_LK0_2_T; + +typedef struct dpp_se_cfg_lpm_key_lk0_1_t { + ZXIC_UINT32 lpm_key_lk0_1; +} DPP_SE_CFG_LPM_KEY_LK0_1_T; + +typedef struct dpp_se_cfg_lpm_key_lk0_0_t { + ZXIC_UINT32 lpm_key_lk0_0; +} DPP_SE_CFG_LPM_KEY_LK0_0_T; + +typedef struct dpp_se_cfg_lpm_key_lk1_6_t { + ZXIC_UINT32 lpm_key_lk1_6; +} DPP_SE_CFG_LPM_KEY_LK1_6_T; + +typedef struct dpp_se_cfg_lpm_key_lk1_5_t { + ZXIC_UINT32 lpm_key_lk1_5; +} DPP_SE_CFG_LPM_KEY_LK1_5_T; + +typedef struct dpp_se_cfg_lpm_key_lk1_4_t { + ZXIC_UINT32 lpm_key_lk1_4; +} DPP_SE_CFG_LPM_KEY_LK1_4_T; + +typedef struct dpp_se_cfg_lpm_key_lk1_3_t { + ZXIC_UINT32 lpm_key_lk1_3; +} DPP_SE_CFG_LPM_KEY_LK1_3_T; + +typedef struct dpp_se_cfg_lpm_key_lk1_2_t { + ZXIC_UINT32 lpm_key_lk1_2; +} DPP_SE_CFG_LPM_KEY_LK1_2_T; + +typedef struct dpp_se_cfg_lpm_key_lk1_1_t { + ZXIC_UINT32 lpm_key_lk1_1; +} DPP_SE_CFG_LPM_KEY_LK1_1_T; + +typedef struct dpp_se_cfg_lpm_key_lk1_0_t { + ZXIC_UINT32 lpm_key_lk1_0; +} DPP_SE_CFG_LPM_KEY_LK1_0_T; + +typedef struct dpp_se_cfg_lpm_key_lk2_6_t { + ZXIC_UINT32 lpm_key_lk2_6; +} DPP_SE_CFG_LPM_KEY_LK2_6_T; + +typedef struct dpp_se_cfg_lpm_key_lk2_5_t { + ZXIC_UINT32 lpm_key_lk2_5; +} DPP_SE_CFG_LPM_KEY_LK2_5_T; + +typedef struct dpp_se_cfg_lpm_key_lk2_4_t { + ZXIC_UINT32 lpm_key_lk2_4; +} DPP_SE_CFG_LPM_KEY_LK2_4_T; + +typedef struct dpp_se_cfg_lpm_key_lk2_3_t { + ZXIC_UINT32 lpm_key_lk2_3; +} DPP_SE_CFG_LPM_KEY_LK2_3_T; + +typedef struct dpp_se_cfg_lpm_key_lk2_2_t { + ZXIC_UINT32 lpm_key_lk2_2; +} DPP_SE_CFG_LPM_KEY_LK2_2_T; + +typedef struct dpp_se_cfg_lpm_key_lk2_1_t { + ZXIC_UINT32 lpm_key_lk2_1; +} DPP_SE_CFG_LPM_KEY_LK2_1_T; + +typedef struct dpp_se_cfg_lpm_key_lk2_0_t { + ZXIC_UINT32 lpm_key_lk2_0; +} DPP_SE_CFG_LPM_KEY_LK2_0_T; + +typedef struct dpp_se_cfg_lpm_key_lk3_6_t { + ZXIC_UINT32 lpm_key_lk3_6; +} DPP_SE_CFG_LPM_KEY_LK3_6_T; + +typedef struct dpp_se_cfg_lpm_key_lk3_5_t { + ZXIC_UINT32 lpm_key_lk3_5; +} DPP_SE_CFG_LPM_KEY_LK3_5_T; + +typedef struct dpp_se_cfg_lpm_key_lk3_4_t { + ZXIC_UINT32 lpm_key_lk3_4; +} DPP_SE_CFG_LPM_KEY_LK3_4_T; + +typedef struct dpp_se_cfg_lpm_key_lk3_3_t { + ZXIC_UINT32 lpm_key_lk3_3; +} DPP_SE_CFG_LPM_KEY_LK3_3_T; + +typedef struct dpp_se_cfg_lpm_key_lk3_2_t { + ZXIC_UINT32 lpm_key_lk3_2; +} DPP_SE_CFG_LPM_KEY_LK3_2_T; + +typedef struct dpp_se_cfg_lpm_key_lk3_1_t { + ZXIC_UINT32 lpm_key_lk3_1; +} DPP_SE_CFG_LPM_KEY_LK3_1_T; + +typedef struct dpp_se_cfg_lpm_key_lk3_0_t { + ZXIC_UINT32 lpm_key_lk3_0; +} DPP_SE_CFG_LPM_KEY_LK3_0_T; + +typedef struct dpp_se_cfg_etcam_key_lk0_22_t { + ZXIC_UINT32 etcam_key_lk0_22; +} DPP_SE_CFG_ETCAM_KEY_LK0_22_T; + +typedef struct dpp_se_cfg_etcam_key_lk0_21_t { + ZXIC_UINT32 etcam_key_lk0_21; +} DPP_SE_CFG_ETCAM_KEY_LK0_21_T; + +typedef struct dpp_se_cfg_etcam_key_lk0_20_t { + ZXIC_UINT32 etcam_key_lk0_20; +} DPP_SE_CFG_ETCAM_KEY_LK0_20_T; + +typedef struct dpp_se_cfg_etcam_key_lk0_19_t { + ZXIC_UINT32 etcam_key_lk0_19; +} DPP_SE_CFG_ETCAM_KEY_LK0_19_T; + +typedef struct dpp_se_cfg_etcam_key_lk0_18_t { + ZXIC_UINT32 etcam_key_lk0_18; +} DPP_SE_CFG_ETCAM_KEY_LK0_18_T; + +typedef struct dpp_se_cfg_etcam_key_lk0_17_t { + ZXIC_UINT32 etcam_key_lk0_17; +} DPP_SE_CFG_ETCAM_KEY_LK0_17_T; + +typedef struct dpp_se_cfg_etcam_key_lk0_16_t { + ZXIC_UINT32 etcam_key_lk0_16; +} DPP_SE_CFG_ETCAM_KEY_LK0_16_T; + +typedef struct dpp_se_cfg_etcam_key_lk0_15_t { + ZXIC_UINT32 etcam_key_lk0_15; +} DPP_SE_CFG_ETCAM_KEY_LK0_15_T; + +typedef struct dpp_se_cfg_etcam_key_lk0_14_t { + ZXIC_UINT32 etcam_key_lk0_14; +} DPP_SE_CFG_ETCAM_KEY_LK0_14_T; + +typedef struct dpp_se_cfg_etcam_key_lk0_13_t { + ZXIC_UINT32 etcam_key_lk0_13; +} DPP_SE_CFG_ETCAM_KEY_LK0_13_T; + +typedef struct dpp_se_cfg_etcam_key_lk0_12_t { + ZXIC_UINT32 etcam_key_lk0_12; +} DPP_SE_CFG_ETCAM_KEY_LK0_12_T; + +typedef struct dpp_se_cfg_etcam_key_lk0_11_t { + ZXIC_UINT32 etcam_key_lk0_11; +} DPP_SE_CFG_ETCAM_KEY_LK0_11_T; + +typedef struct dpp_se_cfg_etcam_key_lk0_10_t { + ZXIC_UINT32 etcam_key_lk0_10; +} DPP_SE_CFG_ETCAM_KEY_LK0_10_T; + +typedef struct dpp_se_cfg_etcam_key_lk0_9_t { + ZXIC_UINT32 etcam_key_lk0_9; +} DPP_SE_CFG_ETCAM_KEY_LK0_9_T; + +typedef struct dpp_se_cfg_etcam_key_lk0_8_t { + ZXIC_UINT32 etcam_key_lk0_8; +} DPP_SE_CFG_ETCAM_KEY_LK0_8_T; + +typedef struct dpp_se_cfg_etcam_key_lk0_7_t { + ZXIC_UINT32 etcam_key_lk0_7; +} DPP_SE_CFG_ETCAM_KEY_LK0_7_T; + +typedef struct dpp_se_cfg_etcam_key_lk0_6_t { + ZXIC_UINT32 etcam_key_lk0_6; +} DPP_SE_CFG_ETCAM_KEY_LK0_6_T; + +typedef struct dpp_se_cfg_etcam_key_lk0_5_t { + ZXIC_UINT32 etcam_key_lk0_5; +} DPP_SE_CFG_ETCAM_KEY_LK0_5_T; + +typedef struct dpp_se_cfg_etcam_key_lk0_4_t { + ZXIC_UINT32 etcam_key_lk0_4; +} DPP_SE_CFG_ETCAM_KEY_LK0_4_T; + +typedef struct dpp_se_cfg_etcam_key_lk0_3_t { + ZXIC_UINT32 etcam_key_lk0_3; +} DPP_SE_CFG_ETCAM_KEY_LK0_3_T; + +typedef struct dpp_se_cfg_etcam_key_lk0_2_t { + ZXIC_UINT32 etcam_key_lk0_2; +} DPP_SE_CFG_ETCAM_KEY_LK0_2_T; + +typedef struct dpp_se_cfg_etcam_key_lk0_1_t { + ZXIC_UINT32 etcam_key_lk0_1; +} DPP_SE_CFG_ETCAM_KEY_LK0_1_T; + +typedef struct dpp_se_cfg_etcam_key_lk0_0_t { + ZXIC_UINT32 etcam_key_lk0_0; +} DPP_SE_CFG_ETCAM_KEY_LK0_0_T; + +typedef struct dpp_se_cfg_etcam_key_lk1_22_t { + ZXIC_UINT32 etcam_key_lk1_22; +} DPP_SE_CFG_ETCAM_KEY_LK1_22_T; + +typedef struct dpp_se_cfg_etcam_key_lk1_21_t { + ZXIC_UINT32 etcam_key_lk1_21; +} DPP_SE_CFG_ETCAM_KEY_LK1_21_T; + +typedef struct dpp_se_cfg_etcam_key_lk1_20_t { + ZXIC_UINT32 etcam_key_lk1_20; +} DPP_SE_CFG_ETCAM_KEY_LK1_20_T; + +typedef struct dpp_se_cfg_etcam_key_lk1_19_t { + ZXIC_UINT32 etcam_key_lk1_19; +} DPP_SE_CFG_ETCAM_KEY_LK1_19_T; + +typedef struct dpp_se_cfg_etcam_key_lk1_18_t { + ZXIC_UINT32 etcam_key_lk1_18; +} DPP_SE_CFG_ETCAM_KEY_LK1_18_T; + +typedef struct dpp_se_cfg_etcam_key_lk1_17_t { + ZXIC_UINT32 etcam_key_lk1_17; +} DPP_SE_CFG_ETCAM_KEY_LK1_17_T; + +typedef struct dpp_se_cfg_etcam_key_lk1_16_t { + ZXIC_UINT32 etcam_key_lk1_16; +} DPP_SE_CFG_ETCAM_KEY_LK1_16_T; + +typedef struct dpp_se_cfg_etcam_key_lk1_15_t { + ZXIC_UINT32 etcam_key_lk1_15; +} DPP_SE_CFG_ETCAM_KEY_LK1_15_T; + +typedef struct dpp_se_cfg_etcam_key_lk1_14_t { + ZXIC_UINT32 etcam_key_lk1_14; +} DPP_SE_CFG_ETCAM_KEY_LK1_14_T; + +typedef struct dpp_se_cfg_etcam_key_lk1_13_t { + ZXIC_UINT32 etcam_key_lk1_13; +} DPP_SE_CFG_ETCAM_KEY_LK1_13_T; + +typedef struct dpp_se_cfg_etcam_key_lk1_12_t { + ZXIC_UINT32 etcam_key_lk1_12; +} DPP_SE_CFG_ETCAM_KEY_LK1_12_T; + +typedef struct dpp_se_cfg_etcam_key_lk1_11_t { + ZXIC_UINT32 etcam_key_lk1_11; +} DPP_SE_CFG_ETCAM_KEY_LK1_11_T; + +typedef struct dpp_se_cfg_etcam_key_lk1_10_t { + ZXIC_UINT32 etcam_key_lk1_10; +} DPP_SE_CFG_ETCAM_KEY_LK1_10_T; + +typedef struct dpp_se_cfg_etcam_key_lk1_9_t { + ZXIC_UINT32 etcam_key_lk1_9; +} DPP_SE_CFG_ETCAM_KEY_LK1_9_T; + +typedef struct dpp_se_cfg_etcam_key_lk1_8_t { + ZXIC_UINT32 etcam_key_lk1_8; +} DPP_SE_CFG_ETCAM_KEY_LK1_8_T; + +typedef struct dpp_se_cfg_etcam_key_lk1_7_t { + ZXIC_UINT32 etcam_key_lk1_7; +} DPP_SE_CFG_ETCAM_KEY_LK1_7_T; + +typedef struct dpp_se_cfg_etcam_key_lk1_6_t { + ZXIC_UINT32 etcam_key_lk1_6; +} DPP_SE_CFG_ETCAM_KEY_LK1_6_T; + +typedef struct dpp_se_cfg_etcam_key_lk1_5_t { + ZXIC_UINT32 etcam_key_lk1_5; +} DPP_SE_CFG_ETCAM_KEY_LK1_5_T; + +typedef struct dpp_se_cfg_etcam_key_lk1_4_t { + ZXIC_UINT32 etcam_key_lk1_4; +} DPP_SE_CFG_ETCAM_KEY_LK1_4_T; + +typedef struct dpp_se_cfg_etcam_key_lk1_3_t { + ZXIC_UINT32 etcam_key_lk1_3; +} DPP_SE_CFG_ETCAM_KEY_LK1_3_T; + +typedef struct dpp_se_cfg_etcam_key_lk1_2_t { + ZXIC_UINT32 etcam_key_lk1_2; +} DPP_SE_CFG_ETCAM_KEY_LK1_2_T; + +typedef struct dpp_se_cfg_etcam_key_lk1_1_t { + ZXIC_UINT32 etcam_key_lk1_1; +} DPP_SE_CFG_ETCAM_KEY_LK1_1_T; + +typedef struct dpp_se_cfg_etcam_key_lk1_0_t { + ZXIC_UINT32 etcam_key_lk1_0; +} DPP_SE_CFG_ETCAM_KEY_LK1_0_T; + +typedef struct dpp_se_cfg_etcam_key_lk2_22_t { + ZXIC_UINT32 etcam_key_lk2_22; +} DPP_SE_CFG_ETCAM_KEY_LK2_22_T; + +typedef struct dpp_se_cfg_etcam_key_lk2_21_t { + ZXIC_UINT32 etcam_key_lk2_21; +} DPP_SE_CFG_ETCAM_KEY_LK2_21_T; + +typedef struct dpp_se_cfg_etcam_key_lk2_20_t { + ZXIC_UINT32 etcam_key_lk2_20; +} DPP_SE_CFG_ETCAM_KEY_LK2_20_T; + +typedef struct dpp_se_cfg_etcam_key_lk2_19_t { + ZXIC_UINT32 etcam_key_lk2_19; +} DPP_SE_CFG_ETCAM_KEY_LK2_19_T; + +typedef struct dpp_se_cfg_etcam_key_lk2_18_t { + ZXIC_UINT32 etcam_key_lk2_18; +} DPP_SE_CFG_ETCAM_KEY_LK2_18_T; + +typedef struct dpp_se_cfg_etcam_key_lk2_17_t { + ZXIC_UINT32 etcam_key_lk2_17; +} DPP_SE_CFG_ETCAM_KEY_LK2_17_T; + +typedef struct dpp_se_cfg_etcam_key_lk2_16_t { + ZXIC_UINT32 etcam_key_lk2_16; +} DPP_SE_CFG_ETCAM_KEY_LK2_16_T; + +typedef struct dpp_se_cfg_etcam_key_lk2_15_t { + ZXIC_UINT32 etcam_key_lk2_15; +} DPP_SE_CFG_ETCAM_KEY_LK2_15_T; + +typedef struct dpp_se_cfg_etcam_key_lk2_14_t { + ZXIC_UINT32 etcam_key_lk2_14; +} DPP_SE_CFG_ETCAM_KEY_LK2_14_T; + +typedef struct dpp_se_cfg_etcam_key_lk2_13_t { + ZXIC_UINT32 etcam_key_lk2_13; +} DPP_SE_CFG_ETCAM_KEY_LK2_13_T; + +typedef struct dpp_se_cfg_etcam_key_lk2_12_t { + ZXIC_UINT32 etcam_key_lk2_12; +} DPP_SE_CFG_ETCAM_KEY_LK2_12_T; + +typedef struct dpp_se_cfg_etcam_key_lk2_11_t { + ZXIC_UINT32 etcam_key_lk2_11; +} DPP_SE_CFG_ETCAM_KEY_LK2_11_T; + +typedef struct dpp_se_cfg_etcam_key_lk2_10_t { + ZXIC_UINT32 etcam_key_lk2_10; +} DPP_SE_CFG_ETCAM_KEY_LK2_10_T; + +typedef struct dpp_se_cfg_etcam_key_lk2_9_t { + ZXIC_UINT32 etcam_key_lk2_9; +} DPP_SE_CFG_ETCAM_KEY_LK2_9_T; + +typedef struct dpp_se_cfg_etcam_key_lk2_8_t { + ZXIC_UINT32 etcam_key_lk2_8; +} DPP_SE_CFG_ETCAM_KEY_LK2_8_T; + +typedef struct dpp_se_cfg_etcam_key_lk2_7_t { + ZXIC_UINT32 etcam_key_lk2_7; +} DPP_SE_CFG_ETCAM_KEY_LK2_7_T; + +typedef struct dpp_se_cfg_etcam_key_lk2_6_t { + ZXIC_UINT32 etcam_key_lk2_6; +} DPP_SE_CFG_ETCAM_KEY_LK2_6_T; + +typedef struct dpp_se_cfg_etcam_key_lk2_5_t { + ZXIC_UINT32 etcam_key_lk2_5; +} DPP_SE_CFG_ETCAM_KEY_LK2_5_T; + +typedef struct dpp_se_cfg_etcam_key_lk2_4_t { + ZXIC_UINT32 etcam_key_lk2_4; +} DPP_SE_CFG_ETCAM_KEY_LK2_4_T; + +typedef struct dpp_se_cfg_etcam_key_lk2_3_t { + ZXIC_UINT32 etcam_key_lk2_3; +} DPP_SE_CFG_ETCAM_KEY_LK2_3_T; + +typedef struct dpp_se_cfg_etcam_key_lk2_2_t { + ZXIC_UINT32 etcam_key_lk2_2; +} DPP_SE_CFG_ETCAM_KEY_LK2_2_T; + +typedef struct dpp_se_cfg_etcam_key_lk2_1_t { + ZXIC_UINT32 etcam_key_lk2_1; +} DPP_SE_CFG_ETCAM_KEY_LK2_1_T; + +typedef struct dpp_se_cfg_etcam_key_lk2_0_t { + ZXIC_UINT32 etcam_key_lk2_0; +} DPP_SE_CFG_ETCAM_KEY_LK2_0_T; + +typedef struct dpp_se_cfg_etcam_key_lk3_22_t { + ZXIC_UINT32 etcam_key_lk3_22; +} DPP_SE_CFG_ETCAM_KEY_LK3_22_T; + +typedef struct dpp_se_cfg_etcam_key_lk3_21_t { + ZXIC_UINT32 etcam_key_lk3_21; +} DPP_SE_CFG_ETCAM_KEY_LK3_21_T; + +typedef struct dpp_se_cfg_etcam_key_lk3_20_t { + ZXIC_UINT32 etcam_key_lk3_20; +} DPP_SE_CFG_ETCAM_KEY_LK3_20_T; + +typedef struct dpp_se_cfg_etcam_key_lk3_19_t { + ZXIC_UINT32 etcam_key_lk3_19; +} DPP_SE_CFG_ETCAM_KEY_LK3_19_T; + +typedef struct dpp_se_cfg_etcam_key_lk3_18_t { + ZXIC_UINT32 etcam_key_lk3_18; +} DPP_SE_CFG_ETCAM_KEY_LK3_18_T; + +typedef struct dpp_se_cfg_etcam_key_lk3_17_t { + ZXIC_UINT32 etcam_key_lk3_17; +} DPP_SE_CFG_ETCAM_KEY_LK3_17_T; + +typedef struct dpp_se_cfg_etcam_key_lk3_16_t { + ZXIC_UINT32 etcam_key_lk3_16; +} DPP_SE_CFG_ETCAM_KEY_LK3_16_T; + +typedef struct dpp_se_cfg_etcam_key_lk3_15_t { + ZXIC_UINT32 etcam_key_lk3_15; +} DPP_SE_CFG_ETCAM_KEY_LK3_15_T; + +typedef struct dpp_se_cfg_etcam_key_lk3_14_t { + ZXIC_UINT32 etcam_key_lk3_14; +} DPP_SE_CFG_ETCAM_KEY_LK3_14_T; + +typedef struct dpp_se_cfg_etcam_key_lk3_13_t { + ZXIC_UINT32 etcam_key_lk3_13; +} DPP_SE_CFG_ETCAM_KEY_LK3_13_T; + +typedef struct dpp_se_cfg_etcam_key_lk3_12_t { + ZXIC_UINT32 etcam_key_lk3_12; +} DPP_SE_CFG_ETCAM_KEY_LK3_12_T; + +typedef struct dpp_se_cfg_etcam_key_lk3_11_t { + ZXIC_UINT32 etcam_key_lk3_11; +} DPP_SE_CFG_ETCAM_KEY_LK3_11_T; + +typedef struct dpp_se_cfg_etcam_key_lk3_10_t { + ZXIC_UINT32 etcam_key_lk3_10; +} DPP_SE_CFG_ETCAM_KEY_LK3_10_T; + +typedef struct dpp_se_cfg_etcam_key_lk3_9_t { + ZXIC_UINT32 etcam_key_lk3_9; +} DPP_SE_CFG_ETCAM_KEY_LK3_9_T; + +typedef struct dpp_se_cfg_etcam_key_lk3_8_t { + ZXIC_UINT32 etcam_key_lk3_8; +} DPP_SE_CFG_ETCAM_KEY_LK3_8_T; + +typedef struct dpp_se_cfg_etcam_key_lk3_7_t { + ZXIC_UINT32 etcam_key_lk3_7; +} DPP_SE_CFG_ETCAM_KEY_LK3_7_T; + +typedef struct dpp_se_cfg_etcam_key_lk3_6_t { + ZXIC_UINT32 etcam_key_lk3_6; +} DPP_SE_CFG_ETCAM_KEY_LK3_6_T; + +typedef struct dpp_se_cfg_etcam_key_lk3_5_t { + ZXIC_UINT32 etcam_key_lk3_5; +} DPP_SE_CFG_ETCAM_KEY_LK3_5_T; + +typedef struct dpp_se_cfg_etcam_key_lk3_4_t { + ZXIC_UINT32 etcam_key_lk3_4; +} DPP_SE_CFG_ETCAM_KEY_LK3_4_T; + +typedef struct dpp_se_cfg_etcam_key_lk3_3_t { + ZXIC_UINT32 etcam_key_lk3_3; +} DPP_SE_CFG_ETCAM_KEY_LK3_3_T; + +typedef struct dpp_se_cfg_etcam_key_lk3_2_t { + ZXIC_UINT32 etcam_key_lk3_2; +} DPP_SE_CFG_ETCAM_KEY_LK3_2_T; + +typedef struct dpp_se_cfg_etcam_key_lk3_1_t { + ZXIC_UINT32 etcam_key_lk3_1; +} DPP_SE_CFG_ETCAM_KEY_LK3_1_T; + +typedef struct dpp_se_cfg_etcam_key_lk3_0_t { + ZXIC_UINT32 etcam_key_lk3_0; +} DPP_SE_CFG_ETCAM_KEY_LK3_0_T; + +typedef struct dpp_se_cfg_pbu_key_lk0_3_t { + ZXIC_UINT32 pbu_key_lk0_3; +} DPP_SE_CFG_PBU_KEY_LK0_3_T; + +typedef struct dpp_se_cfg_pbu_key_lk0_2_t { + ZXIC_UINT32 pbu_key_lk0_2; +} DPP_SE_CFG_PBU_KEY_LK0_2_T; + +typedef struct dpp_se_cfg_pbu_key_lk0_1_t { + ZXIC_UINT32 pbu_key_lk0_1; +} DPP_SE_CFG_PBU_KEY_LK0_1_T; + +typedef struct dpp_se_cfg_pbu_key_lk0_0_t { + ZXIC_UINT32 pbu_key_lk0_0; +} DPP_SE_CFG_PBU_KEY_LK0_0_T; + +typedef struct dpp_se_cfg_pbu_key_lk1_3_t { + ZXIC_UINT32 pbu_key_lk1_3; +} DPP_SE_CFG_PBU_KEY_LK1_3_T; + +typedef struct dpp_se_cfg_pbu_key_lk1_2_t { + ZXIC_UINT32 pbu_key_lk1_2; +} DPP_SE_CFG_PBU_KEY_LK1_2_T; + +typedef struct dpp_se_cfg_pbu_key_lk1_1_t { + ZXIC_UINT32 pbu_key_lk1_1; +} DPP_SE_CFG_PBU_KEY_LK1_1_T; + +typedef struct dpp_se_cfg_pbu_key_lk1_0_t { + ZXIC_UINT32 pbu_key_lk1_0; +} DPP_SE_CFG_PBU_KEY_LK1_0_T; + +typedef struct dpp_se_cfg_pbu_key_lk2_3_t { + ZXIC_UINT32 pbu_key_lk2_3; +} DPP_SE_CFG_PBU_KEY_LK2_3_T; + +typedef struct dpp_se_cfg_pbu_key_lk2_2_t { + ZXIC_UINT32 pbu_key_lk2_2; +} DPP_SE_CFG_PBU_KEY_LK2_2_T; + +typedef struct dpp_se_cfg_pbu_key_lk2_1_t { + ZXIC_UINT32 pbu_key_lk2_1; +} DPP_SE_CFG_PBU_KEY_LK2_1_T; + +typedef struct dpp_se_cfg_pbu_key_lk2_0_t { + ZXIC_UINT32 pbu_key_lk2_0; +} DPP_SE_CFG_PBU_KEY_LK2_0_T; + +typedef struct dpp_se_cfg_pbu_key_lk3_3_t { + ZXIC_UINT32 pbu_key_lk3_3; +} DPP_SE_CFG_PBU_KEY_LK3_3_T; + +typedef struct dpp_se_cfg_pbu_key_lk3_2_t { + ZXIC_UINT32 pbu_key_lk3_2; +} DPP_SE_CFG_PBU_KEY_LK3_2_T; + +typedef struct dpp_se_cfg_pbu_key_lk3_1_t { + ZXIC_UINT32 pbu_key_lk3_1; +} DPP_SE_CFG_PBU_KEY_LK3_1_T; + +typedef struct dpp_se_cfg_pbu_key_lk3_0_t { + ZXIC_UINT32 pbu_key_lk3_0; +} DPP_SE_CFG_PBU_KEY_LK3_0_T; + +typedef struct dpp_se_alg_schd_learn_fifo_pfull_ast_t { + ZXIC_UINT32 schd_learn_fifo_pfull_ast; +} DPP_SE_ALG_SCHD_LEARN_FIFO_PFULL_AST_T; + +typedef struct dpp_se_alg_schd_learn_fifo_pfull_neg_t { + ZXIC_UINT32 schd_learn_fifo_pfull_neg; +} DPP_SE_ALG_SCHD_LEARN_FIFO_PFULL_NEG_T; + +typedef struct dpp_se_alg_schd_hash0_fifo_pfull_ast_t { + ZXIC_UINT32 schd_hash0_fifo_pfull_ast; +} DPP_SE_ALG_SCHD_HASH0_FIFO_PFULL_AST_T; + +typedef struct dpp_se_alg_schd_hash0_fifo_pfull_neg_t { + ZXIC_UINT32 schd_hash0_fifo_pfull_neg; +} DPP_SE_ALG_SCHD_HASH0_FIFO_PFULL_NEG_T; + +typedef struct dpp_se_alg_schd_hash1_fifo_pfull_ast_t { + ZXIC_UINT32 schd_hash1_fifo_pfull_ast; +} DPP_SE_ALG_SCHD_HASH1_FIFO_PFULL_AST_T; + +typedef struct dpp_se_alg_schd_hash1_fifo_pfull_neg_t { + ZXIC_UINT32 schd_hash1_fifo_pfull_neg; +} DPP_SE_ALG_SCHD_HASH1_FIFO_PFULL_NEG_T; + +typedef struct dpp_se_alg_schd_hash2_fifo_pfull_ast_t { + ZXIC_UINT32 schd_hash2_fifo_pfull_ast; +} DPP_SE_ALG_SCHD_HASH2_FIFO_PFULL_AST_T; + +typedef struct dpp_se_alg_schd_hash2_fifo_pfull_neg_t { + ZXIC_UINT32 schd_hash2_fifo_pfull_neg; +} DPP_SE_ALG_SCHD_HASH2_FIFO_PFULL_NEG_T; + +typedef struct dpp_se_alg_schd_hash3_fifo_pfull_ast_t { + ZXIC_UINT32 schd_hash3_fifo_pfull_ast; +} DPP_SE_ALG_SCHD_HASH3_FIFO_PFULL_AST_T; + +typedef struct dpp_se_alg_schd_hash3_fifo_pfull_neg_t { + ZXIC_UINT32 schd_hash3_fifo_pfull_neg; +} DPP_SE_ALG_SCHD_HASH3_FIFO_PFULL_NEG_T; + +typedef struct dpp_se_alg_schd_lpm_fifo_pfull_ast_t { + ZXIC_UINT32 schd_lpm_fifo_pfull_ast; +} DPP_SE_ALG_SCHD_LPM_FIFO_PFULL_AST_T; + +typedef struct dpp_se_alg_schd_lpm_fifo_pfull_neg_t { + ZXIC_UINT32 schd_lpm_fifo_pfull_neg; +} DPP_SE_ALG_SCHD_LPM_FIFO_PFULL_NEG_T; + +typedef struct dpp_se_alg_hash0_key_fifo_pfull_ast_t { + ZXIC_UINT32 hash0_key_fifo_pfull_ast; +} DPP_SE_ALG_HASH0_KEY_FIFO_PFULL_AST_T; + +typedef struct dpp_se_alg_hash0_key_fifo_pfull_neg_t { + ZXIC_UINT32 hash0_key_fifo_pfull_ast; +} DPP_SE_ALG_HASH0_KEY_FIFO_PFULL_NEG_T; + +typedef struct dpp_se_alg_hash0_sreq_fifo_pfull_ast_t { + ZXIC_UINT32 hash0_sreq_fifo_pfull_ast; +} DPP_SE_ALG_HASH0_SREQ_FIFO_PFULL_AST_T; + +typedef struct dpp_se_alg_hash0_sreq_fifo_pfull_neg_t { + ZXIC_UINT32 hash0_sreq_fifo_pfull_neg; +} DPP_SE_ALG_HASH0_SREQ_FIFO_PFULL_NEG_T; + +typedef struct dpp_se_alg_hash0_int_rsp_fifo_pfull_ast_t { + ZXIC_UINT32 hash0_int_rsp_fifo_pfull_ast; +} DPP_SE_ALG_HASH0_INT_RSP_FIFO_PFULL_AST_T; + +typedef struct dpp_se_alg_hash0_int_rsp_fifo_pfull_neg_t { + ZXIC_UINT32 hash0_int_rsp_fifo_pfull_neg; +} DPP_SE_ALG_HASH0_INT_RSP_FIFO_PFULL_NEG_T; + +typedef struct dpp_se_alg_hash0_ext_rsp_fifo_pfull_ast_t { + ZXIC_UINT32 hash0_ext_rsp_fifo_pfull_ast; +} DPP_SE_ALG_HASH0_EXT_RSP_FIFO_PFULL_AST_T; + +typedef struct dpp_se_alg_hash0_ext_rsp_fifo_pfull_neg_t { + ZXIC_UINT32 hash0_ext_rsp_fifo_pfull_neg; +} DPP_SE_ALG_HASH0_EXT_RSP_FIFO_PFULL_NEG_T; + +typedef struct dpp_se_alg_hash1_key_fifo_pfull_ast_t { + ZXIC_UINT32 hash1_key_fifo_pfull_ast; +} DPP_SE_ALG_HASH1_KEY_FIFO_PFULL_AST_T; + +typedef struct dpp_se_alg_hash1_key_fifo_pfull_neg_t { + ZXIC_UINT32 hash1_key_fifo_pfull_ast; +} DPP_SE_ALG_HASH1_KEY_FIFO_PFULL_NEG_T; + +typedef struct dpp_se_alg_hash1_sreq_fifo_pfull_ast_t { + ZXIC_UINT32 hash1_sreq_fifo_pfull_ast; +} DPP_SE_ALG_HASH1_SREQ_FIFO_PFULL_AST_T; + +typedef struct dpp_se_alg_hash1_sreq_fifo_pfull_neg_t { + ZXIC_UINT32 hash1_sreq_fifo_pfull_neg; +} DPP_SE_ALG_HASH1_SREQ_FIFO_PFULL_NEG_T; + +typedef struct dpp_se_alg_hash1_int_rsp_fifo_pfull_ast_t { + ZXIC_UINT32 hash1_int_rsp_fifo_pfull_ast; +} DPP_SE_ALG_HASH1_INT_RSP_FIFO_PFULL_AST_T; + +typedef struct dpp_se_alg_hash1_int_rsp_fifo_pfull_neg_t { + ZXIC_UINT32 hash1_int_rsp_fifo_pfull_neg; +} DPP_SE_ALG_HASH1_INT_RSP_FIFO_PFULL_NEG_T; + +typedef struct dpp_se_alg_hash1_ext_rsp_fifo_pfull_ast_t { + ZXIC_UINT32 hash1_ext_rsp_fifo_pfull_ast; +} DPP_SE_ALG_HASH1_EXT_RSP_FIFO_PFULL_AST_T; + +typedef struct dpp_se_alg_hash1_ext_rsp_fifo_pfull_neg_t { + ZXIC_UINT32 hash1_ext_rsp_fifo_pfull_neg; +} DPP_SE_ALG_HASH1_EXT_RSP_FIFO_PFULL_NEG_T; + +typedef struct dpp_se_alg_hash2_key_fifo_pfull_ast_t { + ZXIC_UINT32 hash2_key_fifo_pfull_ast; +} DPP_SE_ALG_HASH2_KEY_FIFO_PFULL_AST_T; + +typedef struct dpp_se_alg_hash2_key_fifo_pfull_neg_t { + ZXIC_UINT32 hash2_key_fifo_pfull_ast; +} DPP_SE_ALG_HASH2_KEY_FIFO_PFULL_NEG_T; + +typedef struct dpp_se_alg_hash2_sreq_fifo_pfull_ast_t { + ZXIC_UINT32 hash2_sreq_fifo_pfull_ast; +} DPP_SE_ALG_HASH2_SREQ_FIFO_PFULL_AST_T; + +typedef struct dpp_se_alg_hash2_sreq_fifo_pfull_neg_t { + ZXIC_UINT32 hash2_sreq_fifo_pfull_neg; +} DPP_SE_ALG_HASH2_SREQ_FIFO_PFULL_NEG_T; + +typedef struct dpp_se_alg_hash2_int_rsp_fifo_pfull_ast_t { + ZXIC_UINT32 hash2_int_rsp_fifo_pfull_ast; +} DPP_SE_ALG_HASH2_INT_RSP_FIFO_PFULL_AST_T; + +typedef struct dpp_se_alg_hash2_int_rsp_fifo_pfull_neg_t { + ZXIC_UINT32 hash2_int_rsp_fifo_pfull_neg; +} DPP_SE_ALG_HASH2_INT_RSP_FIFO_PFULL_NEG_T; + +typedef struct dpp_se_alg_hash2_ext_rsp_fifo_pfull_ast_t { + ZXIC_UINT32 hash2_ext_rsp_fifo_pfull_ast; +} DPP_SE_ALG_HASH2_EXT_RSP_FIFO_PFULL_AST_T; + +typedef struct dpp_se_alg_hash2_ext_rsp_fifo_pfull_neg_t { + ZXIC_UINT32 hash2_ext_rsp_fifo_pfull_neg; +} DPP_SE_ALG_HASH2_EXT_RSP_FIFO_PFULL_NEG_T; + +typedef struct dpp_se_alg_hash3_key_fifo_pfull_ast_t { + ZXIC_UINT32 hash3_key_fifo_pfull_ast; +} DPP_SE_ALG_HASH3_KEY_FIFO_PFULL_AST_T; + +typedef struct dpp_se_alg_hash3_key_fifo_pfull_neg_t { + ZXIC_UINT32 hash3_key_fifo_pfull_ast; +} DPP_SE_ALG_HASH3_KEY_FIFO_PFULL_NEG_T; + +typedef struct dpp_se_alg_hash3_sreq_fifo_pfull_ast_t { + ZXIC_UINT32 hash3_sreq_fifo_pfull_ast; +} DPP_SE_ALG_HASH3_SREQ_FIFO_PFULL_AST_T; + +typedef struct dpp_se_alg_hash3_sreq_fifo_pfull_neg_t { + ZXIC_UINT32 hash3_sreq_fifo_pfull_neg; +} DPP_SE_ALG_HASH3_SREQ_FIFO_PFULL_NEG_T; + +typedef struct dpp_se_alg_hash3_int_rsp_fifo_pfull_ast_t { + ZXIC_UINT32 hash3_int_rsp_fifo_pfull_ast; +} DPP_SE_ALG_HASH3_INT_RSP_FIFO_PFULL_AST_T; + +typedef struct dpp_se_alg_hash3_int_rsp_fifo_pfull_neg_t { + ZXIC_UINT32 hash3_int_rsp_fifo_pfull_neg; +} DPP_SE_ALG_HASH3_INT_RSP_FIFO_PFULL_NEG_T; + +typedef struct dpp_se_alg_hash3_ext_rsp_fifo_pfull_ast_t { + ZXIC_UINT32 hash3_ext_rsp_fifo_pfull_ast; +} DPP_SE_ALG_HASH3_EXT_RSP_FIFO_PFULL_AST_T; + +typedef struct dpp_se_alg_hash3_ext_rsp_fifo_pfull_neg_t { + ZXIC_UINT32 hash3_ext_rsp_fifo_pfull_neg; +} DPP_SE_ALG_HASH3_EXT_RSP_FIFO_PFULL_NEG_T; + +typedef struct dpp_se_alg_lpm_as_info_t { + ZXIC_UINT32 lpm_as_type; + ZXIC_UINT32 lpm_as_en; +} DPP_SE_ALG_LPM_AS_INFO_T; + +typedef struct dpp_se_alg_lpm_ext_rsp_fifo_u0_pfull_neg_t { + ZXIC_UINT32 lpm_ext_rsp_fifo_u0_pfull_neg; +} DPP_SE_ALG_LPM_EXT_RSP_FIFO_U0_PFULL_NEG_T; + +typedef struct dpp_se_alg_lpm_ext_rsp_fifo_u2_pfull_ast_t { + ZXIC_UINT32 lpm_ext_rsp_fifo_u2_pfull_ast; +} DPP_SE_ALG_LPM_EXT_RSP_FIFO_U2_PFULL_AST_T; + +typedef struct dpp_se_alg_lpm_ext_rsp_fifo_u2_pfull_neg_t { + ZXIC_UINT32 lpm_ext_rsp_fifo_u2_pfull_neg; +} DPP_SE_ALG_LPM_EXT_RSP_FIFO_U2_PFULL_NEG_T; + +typedef struct dpp_se_alg_lpm_ext_rsp_fifo_u3_pfull_ast_t { + ZXIC_UINT32 lpm_ext_rsp_fifo_u3_pfull_ast; +} DPP_SE_ALG_LPM_EXT_RSP_FIFO_U3_PFULL_AST_T; + +typedef struct dpp_se_alg_lpm_ext_rsp_fifo_u3_pfull_neg_t { + ZXIC_UINT32 lpm_ext_rsp_fifo_u3_pfull_neg; +} DPP_SE_ALG_LPM_EXT_RSP_FIFO_U3_PFULL_NEG_T; + +typedef struct dpp_se_alg_lpm_ext_rsp_fifo_u4_pfull_ast_t { + ZXIC_UINT32 lpm_ext_rsp_fifo_u4_pfull_ast; +} DPP_SE_ALG_LPM_EXT_RSP_FIFO_U4_PFULL_AST_T; + +typedef struct dpp_se_alg_lpm_ext_rsp_fifo_u4_pfull_neg_t { + ZXIC_UINT32 lpm_ext_rsp_fifo_u4_pfull_neg; +} DPP_SE_ALG_LPM_EXT_RSP_FIFO_U4_PFULL_NEG_T; + +typedef struct dpp_se_alg_lpm_as_rsp_fifo_u0_pfull_ast_t { + ZXIC_UINT32 lpm_as_rsp_fifo_u0_pfull_ast; +} DPP_SE_ALG_LPM_AS_RSP_FIFO_U0_PFULL_AST_T; + +typedef struct dpp_se_alg_lpm_as_rsp_fifo_u0_pfull_neg_t { + ZXIC_UINT32 lpm_as_rsp_fifo_u0_pfull_neg; +} DPP_SE_ALG_LPM_AS_RSP_FIFO_U0_PFULL_NEG_T; + +typedef struct dpp_se_alg_lpm_as_rsp_fifo_u1_pfull_ast_t { + ZXIC_UINT32 lpm_as_rsp_fifo_u1_pfull_ast; +} DPP_SE_ALG_LPM_AS_RSP_FIFO_U1_PFULL_AST_T; + +typedef struct dpp_se_alg_lpm_as_rsp_fifo_u1_pfull_neg_t { + ZXIC_UINT32 lpm_as_rsp_fifo_u1_pfull_neg; +} DPP_SE_ALG_LPM_AS_RSP_FIFO_U1_PFULL_NEG_T; + +typedef struct dpp_se_alg_lpm_v4_ddr3_base_addr_t { + ZXIC_UINT32 lpm_v4_ddr3_base_addr; +} DPP_SE_ALG_LPM_V4_DDR3_BASE_ADDR_T; + +typedef struct dpp_se_alg_lpm_v6_ddr3_base_addr_t { + ZXIC_UINT32 lpm_v6_ddr3_base_addr; +} DPP_SE_ALG_LPM_V6_DDR3_BASE_ADDR_T; + +typedef struct dpp_se_alg_debug_cnt_mode_t { + ZXIC_UINT32 cnt_rd_mode; + ZXIC_UINT32 cnt_overflow_mode; +} DPP_SE_ALG_DEBUG_CNT_MODE_T; + +typedef struct dpp_se_alg_hash_p0_key_vld_cnt_t { + ZXIC_UINT32 hash_p0_key_vld_cnt; +} DPP_SE_ALG_HASH_P0_KEY_VLD_CNT_T; + +typedef struct dpp_se_alg_hash_p1_key_vld_cnt_t { + ZXIC_UINT32 hash_p1_key_vld_cnt; +} DPP_SE_ALG_HASH_P1_KEY_VLD_CNT_T; + +typedef struct dpp_se_alg_hash_p2_key_vld_cnt_t { + ZXIC_UINT32 hash_p2_key_vld_cnt; +} DPP_SE_ALG_HASH_P2_KEY_VLD_CNT_T; + +typedef struct dpp_se_alg_hash_p3_key_vld_cnt_t { + ZXIC_UINT32 hash_p3_key_vld_cnt; +} DPP_SE_ALG_HASH_P3_KEY_VLD_CNT_T; + +typedef struct dpp_se_alg_lpm_p0_key_vld_cnt_t { + ZXIC_UINT32 lpm_p0_key_vld_cnt; +} DPP_SE_ALG_LPM_P0_KEY_VLD_CNT_T; + +typedef struct dpp_se_alg_hash_p0_rsp_vld_cnt_t { + ZXIC_UINT32 hash_p0_rsp_vld_cnt; +} DPP_SE_ALG_HASH_P0_RSP_VLD_CNT_T; + +typedef struct dpp_se_alg_hash_p1_rsp_vld_cnt_t { + ZXIC_UINT32 hash_p1_rsp_vld_cnt; +} DPP_SE_ALG_HASH_P1_RSP_VLD_CNT_T; + +typedef struct dpp_se_alg_hash_p2_rsp_vld_cnt_t { + ZXIC_UINT32 hash_p2_rsp_vld_cnt; +} DPP_SE_ALG_HASH_P2_RSP_VLD_CNT_T; + +typedef struct dpp_se_alg_hash_p3_rsp_vld_cnt_t { + ZXIC_UINT32 hash_p3_rsp_vld_cnt; +} DPP_SE_ALG_HASH_P3_RSP_VLD_CNT_T; + +typedef struct dpp_se_alg_lpm_p0_rsp_vld_cnt_t { + ZXIC_UINT32 lpm_p0_rsp_vld_cnt; +} DPP_SE_ALG_LPM_P0_RSP_VLD_CNT_T; + +typedef struct dpp_se_alg_hash_p0_smf_cnt_t { + ZXIC_UINT32 hash_p0_smf_cnt; +} DPP_SE_ALG_HASH_P0_SMF_CNT_T; + +typedef struct dpp_se_alg_hash_p1_smf_cnt_t { + ZXIC_UINT32 hash_p1_smf_cnt; +} DPP_SE_ALG_HASH_P1_SMF_CNT_T; + +typedef struct dpp_se_alg_hash_p2_smf_cnt_t { + ZXIC_UINT32 hash_p2_smf_cnt; +} DPP_SE_ALG_HASH_P2_SMF_CNT_T; + +typedef struct dpp_se_alg_hash_p3_smf_cnt_t { + ZXIC_UINT32 hash_p3_smf_cnt; +} DPP_SE_ALG_HASH_P3_SMF_CNT_T; + +typedef struct dpp_se_alg_lpm_p0_smf_cnt_t { + ZXIC_UINT32 lpm_p0_smf_cnt; +} DPP_SE_ALG_LPM_P0_SMF_CNT_T; + +typedef struct dpp_se_alg_hash_p0_spacevld_cnt_t { + ZXIC_UINT32 hash_p0_spacevld_cnt; +} DPP_SE_ALG_HASH_P0_SPACEVLD_CNT_T; + +typedef struct dpp_se_alg_hash_p1_spacevld_cnt_t { + ZXIC_UINT32 hash_p1_spacevld_cnt; +} DPP_SE_ALG_HASH_P1_SPACEVLD_CNT_T; + +typedef struct dpp_se_alg_hash_p2_spacevld_cnt_t { + ZXIC_UINT32 hash_p2_spacevld_cnt; +} DPP_SE_ALG_HASH_P2_SPACEVLD_CNT_T; + +typedef struct dpp_se_alg_hash_p3_spacevld_cnt_t { + ZXIC_UINT32 hash_p3_spacevld_cnt; +} DPP_SE_ALG_HASH_P3_SPACEVLD_CNT_T; + +typedef struct dpp_se_alg_smmu1_p0_req_vld_cnt_t { + ZXIC_UINT32 smmu1_p0_req_vld_cnt; +} DPP_SE_ALG_SMMU1_P0_REQ_VLD_CNT_T; + +typedef struct dpp_se_alg_smmu1_p1_req_vld_cnt_t { + ZXIC_UINT32 smmu1_p1_req_vld_cnt; +} DPP_SE_ALG_SMMU1_P1_REQ_VLD_CNT_T; + +typedef struct dpp_se_alg_smmu1_p2_req_vld_cnt_t { + ZXIC_UINT32 smmu1_p2_req_vld_cnt; +} DPP_SE_ALG_SMMU1_P2_REQ_VLD_CNT_T; + +typedef struct dpp_se_alg_smmu1_p3_req_vld_cnt_t { + ZXIC_UINT32 smmu1_p3_req_vld_cnt; +} DPP_SE_ALG_SMMU1_P3_REQ_VLD_CNT_T; + +typedef struct dpp_se_alg_smmu1_p4_req_vld_cnt_t { + ZXIC_UINT32 smmu1_p4_req_vld_cnt; +} DPP_SE_ALG_SMMU1_P4_REQ_VLD_CNT_T; + +typedef struct dpp_se_alg_smmu1_p5_req_vld_cnt_t { + ZXIC_UINT32 smmu1_p5_req_vld_cnt; +} DPP_SE_ALG_SMMU1_P5_REQ_VLD_CNT_T; + +typedef struct dpp_se_alg_smmu1_p0_rsp_vld_cnt_t { + ZXIC_UINT32 smmu1_p0_rsp_vld_cnt; +} DPP_SE_ALG_SMMU1_P0_RSP_VLD_CNT_T; + +typedef struct dpp_se_alg_smmu1_p1_rsp_vld_cnt_t { + ZXIC_UINT32 smmu1_p1_rsp_vld_cnt; +} DPP_SE_ALG_SMMU1_P1_RSP_VLD_CNT_T; + +typedef struct dpp_se_alg_smmu1_p2_rsp_vld_cnt_t { + ZXIC_UINT32 smmu1_p2_rsp_vld_cnt; +} DPP_SE_ALG_SMMU1_P2_RSP_VLD_CNT_T; + +typedef struct dpp_se_alg_smmu1_p3_rsp_vld_cnt_t { + ZXIC_UINT32 smmu1_p3_rsp_vld_cnt; +} DPP_SE_ALG_SMMU1_P3_RSP_VLD_CNT_T; + +typedef struct dpp_se_alg_smmu1_p4_rsp_vld_cnt_t { + ZXIC_UINT32 smmu1_p4_rsp_vld_cnt; +} DPP_SE_ALG_SMMU1_P4_RSP_VLD_CNT_T; + +typedef struct dpp_se_alg_smmu1_p5_rsp_vld_cnt_t { + ZXIC_UINT32 smmu1_p5_rsp_vld_cnt; +} DPP_SE_ALG_SMMU1_P5_RSP_VLD_CNT_T; + +typedef struct dpp_se_alg_schd_learn_fifo_int_cnt_t { + ZXIC_UINT32 schd_learn_fifo_int_cnt; +} DPP_SE_ALG_SCHD_LEARN_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_schd_hash0_fifo_int_cnt_t { + ZXIC_UINT32 schd_hash0_fifo_int_cnt; +} DPP_SE_ALG_SCHD_HASH0_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_schd_hash1_fifo_int_cnt_t { + ZXIC_UINT32 schd_hash1_fifo_int_cnt; +} DPP_SE_ALG_SCHD_HASH1_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_schd_hash2_fifo_int_cnt_t { + ZXIC_UINT32 schd_hash2_fifo_int_cnt; +} DPP_SE_ALG_SCHD_HASH2_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_schd_hash3_fifo_int_cnt_t { + ZXIC_UINT32 schd_hash3_fifo_int_cnt; +} DPP_SE_ALG_SCHD_HASH3_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_schd_lpm_fifo_int_cnt_t { + ZXIC_UINT32 schd_lpm_fifo_int_cnt; +} DPP_SE_ALG_SCHD_LPM_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_schd_learn_fifo_parity_err_cnt_t { + ZXIC_UINT32 schd_learn_fifo_parity_err_cnt; +} DPP_SE_ALG_SCHD_LEARN_FIFO_PARITY_ERR_CNT_T; + +typedef struct dpp_se_alg_schd_hash0_fifo_parity_err_cnt_t { + ZXIC_UINT32 schd_hash0_fifo_parity_err_cnt; +} DPP_SE_ALG_SCHD_HASH0_FIFO_PARITY_ERR_CNT_T; + +typedef struct dpp_se_alg_schd_hash1_fifo_parity_err_cnt_t { + ZXIC_UINT32 schd_hash1_fifo_parity_err_cnt; +} DPP_SE_ALG_SCHD_HASH1_FIFO_PARITY_ERR_CNT_T; + +typedef struct dpp_se_alg_schd_hash2_fifo_parity_err_cnt_t { + ZXIC_UINT32 schd_hash2_fifo_parity_err_cnt; +} DPP_SE_ALG_SCHD_HASH2_FIFO_PARITY_ERR_CNT_T; + +typedef struct dpp_se_alg_schd_hash3_fifo_parity_err_cnt_t { + ZXIC_UINT32 schd_hash3_fifo_parity_err_cnt; +} DPP_SE_ALG_SCHD_HASH3_FIFO_PARITY_ERR_CNT_T; + +typedef struct dpp_se_alg_schd_lpm_fifo_parity_err_cnt_t { + ZXIC_UINT32 schd_lpm_fifo_parity_err_cnt; +} DPP_SE_ALG_SCHD_LPM_FIFO_PARITY_ERR_CNT_T; + +typedef struct dpp_se_alg_rd_init_cft_cnt_t { + ZXIC_UINT32 rd_init_cft_cnt; +} DPP_SE_ALG_RD_INIT_CFT_CNT_T; + +typedef struct dpp_se_alg_zgp0_zblk0_ecc_err_cnt_t { + ZXIC_UINT32 zgp0_zblk0_ecc_err_cnt; +} DPP_SE_ALG_ZGP0_ZBLK0_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp0_zblk1_ecc_err_cnt_t { + ZXIC_UINT32 zgp0_zblk1_ecc_err_cnt; +} DPP_SE_ALG_ZGP0_ZBLK1_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp0_zblk2_ecc_err_cnt_t { + ZXIC_UINT32 zgp0_zblk2_ecc_err_cnt; +} DPP_SE_ALG_ZGP0_ZBLK2_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp0_zblk3_ecc_err_cnt_t { + ZXIC_UINT32 zgp0_zblk3_ecc_err_cnt; +} DPP_SE_ALG_ZGP0_ZBLK3_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp0_zblk4_ecc_err_cnt_t { + ZXIC_UINT32 zgp0_zblk4_ecc_err_cnt; +} DPP_SE_ALG_ZGP0_ZBLK4_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp0_zblk5_ecc_err_cnt_t { + ZXIC_UINT32 zgp0_zblk5_ecc_err_cnt; +} DPP_SE_ALG_ZGP0_ZBLK5_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp0_zblk6_ecc_err_cnt_t { + ZXIC_UINT32 zgp0_zblk6_ecc_err_cnt; +} DPP_SE_ALG_ZGP0_ZBLK6_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp0_zblk7_ecc_err_cnt_t { + ZXIC_UINT32 zgp0_zblk7_ecc_err_cnt; +} DPP_SE_ALG_ZGP0_ZBLK7_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp1_zblk0_ecc_err_cnt_t { + ZXIC_UINT32 zgp1_zblk0_ecc_err_cnt; +} DPP_SE_ALG_ZGP1_ZBLK0_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp1_zblk1_ecc_err_cnt_t { + ZXIC_UINT32 zgp1_zblk1_ecc_err_cnt; +} DPP_SE_ALG_ZGP1_ZBLK1_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp1_zblk2_ecc_err_cnt_t { + ZXIC_UINT32 zgp1_zblk2_ecc_err_cnt; +} DPP_SE_ALG_ZGP1_ZBLK2_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp1_zblk3_ecc_err_cnt_t { + ZXIC_UINT32 zgp1_zblk3_ecc_err_cnt; +} DPP_SE_ALG_ZGP1_ZBLK3_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp1_zblk4_ecc_err_cnt_t { + ZXIC_UINT32 zgp1_zblk4_ecc_err_cnt; +} DPP_SE_ALG_ZGP1_ZBLK4_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp1_zblk5_ecc_err_cnt_t { + ZXIC_UINT32 zgp1_zblk5_ecc_err_cnt; +} DPP_SE_ALG_ZGP1_ZBLK5_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp1_zblk6_ecc_err_cnt_t { + ZXIC_UINT32 zgp1_zblk6_ecc_err_cnt; +} DPP_SE_ALG_ZGP1_ZBLK6_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp1_zblk7_ecc_err_cnt_t { + ZXIC_UINT32 zgp1_zblk7_ecc_err_cnt; +} DPP_SE_ALG_ZGP1_ZBLK7_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp2_zblk0_ecc_err_cnt_t { + ZXIC_UINT32 zgp2_zblk0_ecc_err_cnt; +} DPP_SE_ALG_ZGP2_ZBLK0_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp2_zblk1_ecc_err_cnt_t { + ZXIC_UINT32 zgp2_zblk1_ecc_err_cnt; +} DPP_SE_ALG_ZGP2_ZBLK1_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp2_zblk2_ecc_err_cnt_t { + ZXIC_UINT32 zgp2_zblk2_ecc_err_cnt; +} DPP_SE_ALG_ZGP2_ZBLK2_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp2_zblk3_ecc_err_cnt_t { + ZXIC_UINT32 zgp2_zblk3_ecc_err_cnt; +} DPP_SE_ALG_ZGP2_ZBLK3_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp2_zblk4_ecc_err_cnt_t { + ZXIC_UINT32 zgp2_zblk4_ecc_err_cnt; +} DPP_SE_ALG_ZGP2_ZBLK4_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp2_zblk5_ecc_err_cnt_t { + ZXIC_UINT32 zgp2_zblk5_ecc_err_cnt; +} DPP_SE_ALG_ZGP2_ZBLK5_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp2_zblk6_ecc_err_cnt_t { + ZXIC_UINT32 zgp2_zblk6_ecc_err_cnt; +} DPP_SE_ALG_ZGP2_ZBLK6_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp2_zblk7_ecc_err_cnt_t { + ZXIC_UINT32 zgp2_zblk7_ecc_err_cnt; +} DPP_SE_ALG_ZGP2_ZBLK7_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp3_zblk0_ecc_err_cnt_t { + ZXIC_UINT32 zgp3_zblk0_ecc_err_cnt; +} DPP_SE_ALG_ZGP3_ZBLK0_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp3_zblk1_ecc_err_cnt_t { + ZXIC_UINT32 zgp3_zblk1_ecc_err_cnt; +} DPP_SE_ALG_ZGP3_ZBLK1_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp3_zblk2_ecc_err_cnt_t { + ZXIC_UINT32 zgp3_zblk2_ecc_err_cnt; +} DPP_SE_ALG_ZGP3_ZBLK2_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp3_zblk3_ecc_err_cnt_t { + ZXIC_UINT32 zgp3_zblk3_ecc_err_cnt; +} DPP_SE_ALG_ZGP3_ZBLK3_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp3_zblk4_ecc_err_cnt_t { + ZXIC_UINT32 zgp3_zblk4_ecc_err_cnt; +} DPP_SE_ALG_ZGP3_ZBLK4_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp3_zblk5_ecc_err_cnt_t { + ZXIC_UINT32 zgp3_zblk5_ecc_err_cnt; +} DPP_SE_ALG_ZGP3_ZBLK5_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp3_zblk6_ecc_err_cnt_t { + ZXIC_UINT32 zgp3_zblk6_ecc_err_cnt; +} DPP_SE_ALG_ZGP3_ZBLK6_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zgp3_zblk7_ecc_err_cnt_t { + ZXIC_UINT32 zgp3_zblk7_ecc_err_cnt; +} DPP_SE_ALG_ZGP3_ZBLK7_ECC_ERR_CNT_T; + +typedef struct dpp_se_alg_zcam_hash_p0_err_cnt_t { + ZXIC_UINT32 zcam_hash_p0_err_cnt; +} DPP_SE_ALG_ZCAM_HASH_P0_ERR_CNT_T; + +typedef struct dpp_se_alg_zcam_hash_p1_err_cnt_t { + ZXIC_UINT32 zcam_hash_p1_err_cnt; +} DPP_SE_ALG_ZCAM_HASH_P1_ERR_CNT_T; + +typedef struct dpp_se_alg_zcam_hash_p2_err_cnt_t { + ZXIC_UINT32 zcam_hash_p2_err_cnt; +} DPP_SE_ALG_ZCAM_HASH_P2_ERR_CNT_T; + +typedef struct dpp_se_alg_zcam_hash_p3_err_cnt_t { + ZXIC_UINT32 zcam_hash_p3_err_cnt; +} DPP_SE_ALG_ZCAM_HASH_P3_ERR_CNT_T; + +typedef struct dpp_se_alg_zcam_lpm_err_cnt_t { + ZXIC_UINT32 zcam_lpm_err_cnt; +} DPP_SE_ALG_ZCAM_LPM_ERR_CNT_T; + +typedef struct dpp_se_alg_hash0_sreq_fifo_parity_err_cnt_t { + ZXIC_UINT32 hash0_sreq_fifo_parity_err_cnt; +} DPP_SE_ALG_HASH0_SREQ_FIFO_PARITY_ERR_CNT_T; + +typedef struct dpp_se_alg_hash0_sreq_fifo_int_cnt_t { + ZXIC_UINT32 hash0_sreq_fifo_int_cnt; +} DPP_SE_ALG_HASH0_SREQ_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_hash0_key_fifo_int_cnt_t { + ZXIC_UINT32 hash0_key_fifo_int_cnt; +} DPP_SE_ALG_HASH0_KEY_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_hash0_int_rsp_fifo_parity_err_cnt_t { + ZXIC_UINT32 hash0_int_rsp_fifo_parity_err_cnt; +} DPP_SE_ALG_HASH0_INT_RSP_FIFO_PARITY_ERR_CNT_T; + +typedef struct dpp_se_alg_hash0_ext_rsp_fifo_parity_err_cnt_t { + ZXIC_UINT32 hash0_ext_rsp_fifo_parity_err_cnt; +} DPP_SE_ALG_HASH0_EXT_RSP_FIFO_PARITY_ERR_CNT_T; + +typedef struct dpp_se_alg_hash0_ext_rsp_fifo_int_cnt_t { + ZXIC_UINT32 hash0_ext_rsp_fifo_int_cnt; +} DPP_SE_ALG_HASH0_EXT_RSP_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_hash0_int_rsp_fifo_int_cnt_t { + ZXIC_UINT32 hash0_int_rsp_fifo_int_cnt; +} DPP_SE_ALG_HASH0_INT_RSP_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_hash1_sreq_fifo_parity_err_cnt_t { + ZXIC_UINT32 hash1_sreq_fifo_parity_err_cnt; +} DPP_SE_ALG_HASH1_SREQ_FIFO_PARITY_ERR_CNT_T; + +typedef struct dpp_se_alg_hash1_sreq_fifo_int_cnt_t { + ZXIC_UINT32 hash1_sreq_fifo_int_cnt; +} DPP_SE_ALG_HASH1_SREQ_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_hash1_key_fifo_int_cnt_t { + ZXIC_UINT32 hash1_key_fifo_int_cnt; +} DPP_SE_ALG_HASH1_KEY_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_hash1_int_rsp_fifo_parity_err_cnt_t { + ZXIC_UINT32 hash1_int_rsp_fifo_parity_err_cnt; +} DPP_SE_ALG_HASH1_INT_RSP_FIFO_PARITY_ERR_CNT_T; + +typedef struct dpp_se_alg_hash1_ext_rsp_fifo_parity_err_cnt_t { + ZXIC_UINT32 hash1_ext_rsp_fifo_parity_err_cnt; +} DPP_SE_ALG_HASH1_EXT_RSP_FIFO_PARITY_ERR_CNT_T; + +typedef struct dpp_se_alg_hash1_ext_rsp_fifo_int_cnt_t { + ZXIC_UINT32 hash1_ext_rsp_fifo_int_cnt; +} DPP_SE_ALG_HASH1_EXT_RSP_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_hash1_int_rsp_fifo_int_cnt_t { + ZXIC_UINT32 hash1_int_rsp_fifo_int_cnt; +} DPP_SE_ALG_HASH1_INT_RSP_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_hash2_sreq_fifo_parity_err_cnt_t { + ZXIC_UINT32 hash2_sreq_fifo_parity_err_cnt; +} DPP_SE_ALG_HASH2_SREQ_FIFO_PARITY_ERR_CNT_T; + +typedef struct dpp_se_alg_hash2_sreq_fifo_int_cnt_t { + ZXIC_UINT32 hash2_sreq_fifo_int_cnt; +} DPP_SE_ALG_HASH2_SREQ_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_hash2_key_fifo_int_cnt_t { + ZXIC_UINT32 hash2_key_fifo_int_cnt; +} DPP_SE_ALG_HASH2_KEY_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_hash2_int_rsp_fifo_parity_err_cnt_t { + ZXIC_UINT32 hash2_int_rsp_fifo_parity_err_cnt; +} DPP_SE_ALG_HASH2_INT_RSP_FIFO_PARITY_ERR_CNT_T; + +typedef struct dpp_se_alg_hash2_ext_rsp_fifo_parity_err_cnt_t { + ZXIC_UINT32 hash2_ext_rsp_fifo_parity_err_cnt; +} DPP_SE_ALG_HASH2_EXT_RSP_FIFO_PARITY_ERR_CNT_T; + +typedef struct dpp_se_alg_hash2_ext_rsp_fifo_int_cnt_t { + ZXIC_UINT32 hash2_ext_rsp_fifo_int_cnt; +} DPP_SE_ALG_HASH2_EXT_RSP_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_hash2_int_rsp_fifo_int_cnt_t { + ZXIC_UINT32 hash2_int_rsp_fifo_int_cnt; +} DPP_SE_ALG_HASH2_INT_RSP_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_hash3_sreq_fifo_parity_err_cnt_t { + ZXIC_UINT32 hash3_sreq_fifo_parity_err_cnt; +} DPP_SE_ALG_HASH3_SREQ_FIFO_PARITY_ERR_CNT_T; + +typedef struct dpp_se_alg_hash3_sreq_fifo_int_cnt_t { + ZXIC_UINT32 hash3_sreq_fifo_int_cnt; +} DPP_SE_ALG_HASH3_SREQ_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_hash3_key_fifo_int_cnt_t { + ZXIC_UINT32 hash3_key_fifo_int_cnt; +} DPP_SE_ALG_HASH3_KEY_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_hash3_int_rsp_fifo_parity_err_cnt_t { + ZXIC_UINT32 hash3_int_rsp_fifo_parity_err_cnt; +} DPP_SE_ALG_HASH3_INT_RSP_FIFO_PARITY_ERR_CNT_T; + +typedef struct dpp_se_alg_hash3_ext_rsp_fifo_parity_err_cnt_t { + ZXIC_UINT32 hash3_ext_rsp_fifo_parity_err_cnt; +} DPP_SE_ALG_HASH3_EXT_RSP_FIFO_PARITY_ERR_CNT_T; + +typedef struct dpp_se_alg_hash3_ext_rsp_fifo_int_cnt_t { + ZXIC_UINT32 hash3_ext_rsp_fifo_int_cnt; +} DPP_SE_ALG_HASH3_EXT_RSP_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_hash3_int_rsp_fifo_int_cnt_t { + ZXIC_UINT32 hash3_int_rsp_fifo_int_cnt; +} DPP_SE_ALG_HASH3_INT_RSP_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_lpm_ext_rsp_fifo_int_cnt_t { + ZXIC_UINT32 lpm_ext_rsp_fifo_int_cnt; +} DPP_SE_ALG_LPM_EXT_RSP_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_lpm_ext_v6_fifo_int_cnt_t { + ZXIC_UINT32 lpm_ext_v6_fifo_int_cnt; +} DPP_SE_ALG_LPM_EXT_V6_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_lpm_ext_v4_fifo_int_cnt_t { + ZXIC_UINT32 lpm_ext_v4_fifo_int_cnt; +} DPP_SE_ALG_LPM_EXT_V4_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_lpm_ext_addr_fifo_int_cnt_t { + ZXIC_UINT32 lpm_ext_addr_fifo_int_cnt; +} DPP_SE_ALG_LPM_EXT_ADDR_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_lpm_ext_v4_fifo_parity_err_cnt_t { + ZXIC_UINT32 lpm_ext_v4_fifo_parity_err_cnt; +} DPP_SE_ALG_LPM_EXT_V4_FIFO_PARITY_ERR_CNT_T; + +typedef struct dpp_se_alg_lpm_ext_v6_fifo_parity_err_cnt_t { + ZXIC_UINT32 lpm_ext_v6_fifo_parity_err_cnt; +} DPP_SE_ALG_LPM_EXT_V6_FIFO_PARITY_ERR_CNT_T; + +typedef struct dpp_se_alg_lpm_ext_rsp_fifo_parity_err_cnt_t { + ZXIC_UINT32 lpm_ext_rsp_fifo_parity_err_cnt; +} DPP_SE_ALG_LPM_EXT_RSP_FIFO_PARITY_ERR_CNT_T; + +typedef struct dpp_se_alg_lpm_as_req_fifo_int_cnt_t { + ZXIC_UINT32 lpm_as_req_fifo_int_cnt; +} DPP_SE_ALG_LPM_AS_REQ_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_lpm_as_int_rsp_fifo_int_cnt_t { + ZXIC_UINT32 lpm_as_int_rsp_fifo_int_cnt; +} DPP_SE_ALG_LPM_AS_INT_RSP_FIFO_INT_CNT_T; + +typedef struct dpp_se_alg_se_alg_int_status_t { + ZXIC_UINT32 schd_int_unmask_flag; + ZXIC_UINT32 zblk_ecc_int_unmask_flag; + ZXIC_UINT32 hash0_int_unmask_flag; + ZXIC_UINT32 hash1_int_unmask_flag; + ZXIC_UINT32 hash2_int_unmask_flag; + ZXIC_UINT32 hash3_int_unmask_flag; + ZXIC_UINT32 lpm_int_unmask_flag; +} DPP_SE_ALG_SE_ALG_INT_STATUS_T; + +typedef struct dpp_se_alg_schd_int_en_t { + ZXIC_UINT32 wr_rsp_fifo_ovfl; + ZXIC_UINT32 init_rd_cft_en; + ZXIC_UINT32 schd_lpm_fifo_parity_errl; + ZXIC_UINT32 schd_hash3_fifo_parity_err; + ZXIC_UINT32 schd_hash2_fifo_parity_err; + ZXIC_UINT32 schd_hash1_fifo_parity_err; + ZXIC_UINT32 schd_hash0_fifo_parity_err; + ZXIC_UINT32 schd_learn_fifo_parity_err; + ZXIC_UINT32 schd_lpm_fifo_ovfl; + ZXIC_UINT32 schd_hash3_fifo_ovfl; + ZXIC_UINT32 schd_hash2_fifo_unfl; + ZXIC_UINT32 schd_hash1_fifo_ovfl; + ZXIC_UINT32 schd_hash0_fifo_ovfl; + ZXIC_UINT32 schd_learn_fifo_ovfl; +} DPP_SE_ALG_SCHD_INT_EN_T; + +typedef struct dpp_se_alg_schd_int_mask_t { + ZXIC_UINT32 schd_int_mask; +} DPP_SE_ALG_SCHD_INT_MASK_T; + +typedef struct dpp_se_alg_schd_int_status_t { + ZXIC_UINT32 schd_int_status; +} DPP_SE_ALG_SCHD_INT_STATUS_T; + +typedef struct dpp_se_alg_zblk_ecc_int_en_t { + ZXIC_UINT32 zblk_ecc_int_en; +} DPP_SE_ALG_ZBLK_ECC_INT_EN_T; + +typedef struct dpp_se_alg_zblk_ecc_int_mask_t { + ZXIC_UINT32 zblk_ecc_int_mask; +} DPP_SE_ALG_ZBLK_ECC_INT_MASK_T; + +typedef struct dpp_se_alg_zblk_ecc_int_status_t { + ZXIC_UINT32 zblk_ecc_int_status; +} DPP_SE_ALG_ZBLK_ECC_INT_STATUS_T; + +typedef struct dpp_se_alg_hash0_int_en_t { + ZXIC_UINT32 zcam_hash_p0_err_en; + ZXIC_UINT32 hash0_agree_int_fifo_ovf_en; + ZXIC_UINT32 hash0_agree_ext_fifo_ovf_en; + ZXIC_UINT32 hash0_agree_ext_fifo_parity_err_en; + ZXIC_UINT32 hash0_agree_int_fifo_parity_err_en; + ZXIC_UINT32 hash0_key_fifo_ovfl_en; + ZXIC_UINT32 hash0_sreq_fifo_ovfl_en; + ZXIC_UINT32 hash0_key_fifo_parity_err_en; +} DPP_SE_ALG_HASH0_INT_EN_T; + +typedef struct dpp_se_alg_hash0_int_mask_t { + ZXIC_UINT32 hash0_int_mask; +} DPP_SE_ALG_HASH0_INT_MASK_T; + +typedef struct dpp_se_alg_hash0_int_status_t { + ZXIC_UINT32 hash0_int_status; +} DPP_SE_ALG_HASH0_INT_STATUS_T; + +typedef struct dpp_se_alg_hash1_int_en_t { + ZXIC_UINT32 zcam_hash_p1_err_en; + ZXIC_UINT32 hash1_agree_int_fifo_ovf_en; + ZXIC_UINT32 hash1_agree_ext_fifo_ovf_en; + ZXIC_UINT32 hash1_agree_ext_fifo_parity_err_en; + ZXIC_UINT32 hash1_agree_int_fifo_parity_err_en; + ZXIC_UINT32 hash1_key_fifo_ovfl_en; + ZXIC_UINT32 hash1_sreq_fifo_ovfl_en; + ZXIC_UINT32 hash1_key_fifo_parity_err_en; +} DPP_SE_ALG_HASH1_INT_EN_T; + +typedef struct dpp_se_alg_hash1_int_mask_t { + ZXIC_UINT32 hash1_int_mask; +} DPP_SE_ALG_HASH1_INT_MASK_T; + +typedef struct dpp_se_alg_hash1_int_status_t { + ZXIC_UINT32 hash1_int_status; +} DPP_SE_ALG_HASH1_INT_STATUS_T; + +typedef struct dpp_se_alg_hash2_int_en_t { + ZXIC_UINT32 zcam_hash_p2_err_en; + ZXIC_UINT32 hash2_agree_int_fifo_ovf_en; + ZXIC_UINT32 hash2_agree_ext_fifo_ovf_en; + ZXIC_UINT32 hash2_agree_ext_fifo_parity_err_en; + ZXIC_UINT32 hash2_agree_int_fifo_parity_err_en; + ZXIC_UINT32 hash2_key_fifo_ovfl_en; + ZXIC_UINT32 hash2_sreq_fifo_ovfl_en; + ZXIC_UINT32 hash2_key_fifo_parity_err_en; +} DPP_SE_ALG_HASH2_INT_EN_T; + +typedef struct dpp_se_alg_hash2_int_mask_t { + ZXIC_UINT32 hash2_int_mask; +} DPP_SE_ALG_HASH2_INT_MASK_T; + +typedef struct dpp_se_alg_hash2_int_status_t { + ZXIC_UINT32 hash2_int_status; +} DPP_SE_ALG_HASH2_INT_STATUS_T; + +typedef struct dpp_se_alg_hash3_int_en_t { + ZXIC_UINT32 zcam_hash_p3_err_en; + ZXIC_UINT32 hash3_agree_int_fifo_ovf_en; + ZXIC_UINT32 hash3_agree_ext_fifo_ovf_en; + ZXIC_UINT32 hash3_agree_ext_fifo_parity_err_en; + ZXIC_UINT32 hash3_agree_int_fifo_parity_err_en; + ZXIC_UINT32 hash3_key_fifo_ovfl_en; + ZXIC_UINT32 hash3_sreq_fifo_ovfl_en; + ZXIC_UINT32 hash3_key_fifo_parity_err_en; +} DPP_SE_ALG_HASH3_INT_EN_T; + +typedef struct dpp_se_alg_hash3_int_mask_t { + ZXIC_UINT32 hash3_int_mask; +} DPP_SE_ALG_HASH3_INT_MASK_T; + +typedef struct dpp_se_alg_hash3_int_status_t { + ZXIC_UINT32 hash3_int_status; +} DPP_SE_ALG_HASH3_INT_STATUS_T; + +typedef struct dpp_se_alg_lpm_int_en_t { + ZXIC_UINT32 zcam_lpm_err_en; + ZXIC_UINT32 lpm_as_int_rsp_fifo_ovfl_en; + ZXIC_UINT32 lpm_as_req_fifo_ovfl_en; + ZXIC_UINT32 lpm_ext_ddr_rsp_fifo_parity_en; + ZXIC_UINT32 lpm_ext_v6_key_parity_en; + ZXIC_UINT32 lpm_ext_v4_key_parity_en; + ZXIC_UINT32 lpm_ext_addr_fifo_ovfl_en; + ZXIC_UINT32 lpm_ext_v4_fifo_ovfl_en; + ZXIC_UINT32 lpm_ext_v6_fifo_ovfl_en; + ZXIC_UINT32 lpm_ext_ddr_rsp_ovf_en; +} DPP_SE_ALG_LPM_INT_EN_T; + +typedef struct dpp_se_alg_lpm_int_mask_t { + ZXIC_UINT32 lpm_int_mask; +} DPP_SE_ALG_LPM_INT_MASK_T; + +typedef struct dpp_se_alg_lpm_int_status_t { + ZXIC_UINT32 lpm_int_status; +} DPP_SE_ALG_LPM_INT_STATUS_T; + +typedef struct dpp_se_alg_zblock_lpm_mask0_t { + ZXIC_UINT32 vpn_id_mask; + ZXIC_UINT32 prefix0_mask; + ZXIC_UINT32 prefix1_mask; + ZXIC_UINT32 prefix2_mask; + ZXIC_UINT32 prefix3_mask; +} DPP_SE_ALG_ZBLOCK_LPM_MASK0_T; + +typedef struct dpp_se_alg_zblock_lpm_mask1_t { + ZXIC_UINT32 vpn_id_mask; + ZXIC_UINT32 prefix0_mask; + ZXIC_UINT32 prefix1_mask; + ZXIC_UINT32 prefix2_mask; + ZXIC_UINT32 prefix3_mask; +} DPP_SE_ALG_ZBLOCK_LPM_MASK1_T; + +typedef struct dpp_se_alg_zblock_lpm_mask2_t { + ZXIC_UINT32 vpn_id_mask; + ZXIC_UINT32 prefix0_mask; + ZXIC_UINT32 prefix1_mask; + ZXIC_UINT32 prefix2_mask; + ZXIC_UINT32 prefix3_mask; +} DPP_SE_ALG_ZBLOCK_LPM_MASK2_T; + +typedef struct dpp_se_alg_zblock_lpm_mask3_t { + ZXIC_UINT32 vpn_id_mask; + ZXIC_UINT32 prefix0_mask; + ZXIC_UINT32 prefix1_mask; + ZXIC_UINT32 prefix2_mask; + ZXIC_UINT32 prefix3_mask; +} DPP_SE_ALG_ZBLOCK_LPM_MASK3_T; + +typedef struct dpp_se_alg_zblock_default_route0_t { + ZXIC_UINT32 vpn_id; + ZXIC_UINT32 vpn_dresult; + ZXIC_UINT32 vpn_flag; + ZXIC_UINT32 vpn_vld; +} DPP_SE_ALG_ZBLOCK_DEFAULT_ROUTE0_T; + +typedef struct dpp_se_alg_zblock_default_route1_t { + ZXIC_UINT32 vpn_id; + ZXIC_UINT32 vpn_dresult; + ZXIC_UINT32 vpn_flag; + ZXIC_UINT32 vpn_vld; +} DPP_SE_ALG_ZBLOCK_DEFAULT_ROUTE1_T; + +typedef struct dpp_se_alg_zblock_default_route2_t { + ZXIC_UINT32 vpn_id; + ZXIC_UINT32 vpn_dresult; + ZXIC_UINT32 vpn_flag; + ZXIC_UINT32 vpn_vld; +} DPP_SE_ALG_ZBLOCK_DEFAULT_ROUTE2_T; + +typedef struct dpp_se_alg_zblock_default_route3_t { + ZXIC_UINT32 vpn_id; + ZXIC_UINT32 vpn_dresult; + ZXIC_UINT32 vpn_flag; + ZXIC_UINT32 vpn_vld; +} DPP_SE_ALG_ZBLOCK_DEFAULT_ROUTE3_T; + +typedef struct dpp_se_alg_zblock_default_route4_t { + ZXIC_UINT32 vpn_id; + ZXIC_UINT32 vpn_dresult; + ZXIC_UINT32 vpn_flag; + ZXIC_UINT32 vpn_vld; +} DPP_SE_ALG_ZBLOCK_DEFAULT_ROUTE4_T; + +typedef struct dpp_se_alg_zblock_default_route5_t { + ZXIC_UINT32 vpn_id; + ZXIC_UINT32 vpn_dresult; + ZXIC_UINT32 vpn_flag; + ZXIC_UINT32 vpn_vld; +} DPP_SE_ALG_ZBLOCK_DEFAULT_ROUTE5_T; + +typedef struct dpp_se_alg_zblock_default_route6_t { + ZXIC_UINT32 vpn_id; + ZXIC_UINT32 vpn_dresult; + ZXIC_UINT32 vpn_flag; + ZXIC_UINT32 vpn_vld; +} DPP_SE_ALG_ZBLOCK_DEFAULT_ROUTE6_T; + +typedef struct dpp_se_alg_zblock_default_route7_t { + ZXIC_UINT32 vpn_id; + ZXIC_UINT32 vpn_dresult; + ZXIC_UINT32 vpn_flag; + ZXIC_UINT32 vpn_vld; +} DPP_SE_ALG_ZBLOCK_DEFAULT_ROUTE7_T; + +typedef struct dpp_se_alg_zblock_hash_listtable_item0_t { + ZXIC_UINT32 hash_item; +} DPP_SE_ALG_ZBLOCK_HASH_LISTTABLE_ITEM0_T; + +typedef struct dpp_se_alg_zblock_hash_listtable_item1_t { + ZXIC_UINT32 hash_item; +} DPP_SE_ALG_ZBLOCK_HASH_LISTTABLE_ITEM1_T; + +typedef struct dpp_se_alg_zblock_hash_listtable_item2_t { + ZXIC_UINT32 hash_item; +} DPP_SE_ALG_ZBLOCK_HASH_LISTTABLE_ITEM2_T; + +typedef struct dpp_se_alg_zblock_hash_listtable_item3_t { + ZXIC_UINT32 hash_item; +} DPP_SE_ALG_ZBLOCK_HASH_LISTTABLE_ITEM3_T; + +typedef struct dpp_se_alg_zblock_ecc_err_status_t { + ZXIC_UINT32 sram3_ecc_err; + ZXIC_UINT32 sram2_ecc_err; + ZXIC_UINT32 sram1_ecc_err; + ZXIC_UINT32 sram0_ecc_err; +} DPP_SE_ALG_ZBLOCK_ECC_ERR_STATUS_T; + +typedef struct dpp_se_alg_zblock_lpm_v6_sram_cmp_t { + ZXIC_UINT32 sram_cmp_flag; +} DPP_SE_ALG_ZBLOCK_LPM_V6_SRAM_CMP_T; + +typedef struct dpp_se_alg_zblock_lpm_v4_sram_cmp_t { + ZXIC_UINT32 sram_cmp_flag; +} DPP_SE_ALG_ZBLOCK_LPM_V4_SRAM_CMP_T; + +typedef struct dpp_se_parser_kschd_pful_cfg_t { + ZXIC_UINT32 kschd_pful_assert; + ZXIC_UINT32 kschd_pful_negate; +} DPP_SE_PARSER_KSCHD_PFUL_CFG_T; + +typedef struct dpp_se_parser_debug_cnt_mode_t { + ZXIC_UINT32 cnt_rd_mode; + ZXIC_UINT32 cnt_overflow_mode; +} DPP_SE_PARSER_DEBUG_CNT_MODE_T; + +typedef struct dpp_se_parser_parser_int_en_t { + ZXIC_UINT32 parser_int_en; +} DPP_SE_PARSER_PARSER_INT_EN_T; + +typedef struct dpp_se_parser_parser_int_mask_t { + ZXIC_UINT32 parser_int_mask; +} DPP_SE_PARSER_PARSER_INT_MASK_T; + +typedef struct dpp_se_parser_parser_int_status_t { + ZXIC_UINT32 parser_int_status; +} DPP_SE_PARSER_PARSER_INT_STATUS_T; + +typedef struct dpp_se_parser_parser_int_unmask_flag_t { + ZXIC_UINT32 parser_int_unmask_flag; +} DPP_SE_PARSER_PARSER_INT_UNMASK_FLAG_T; + +typedef struct dpp_se_parser_ecc_bypass_read_t { + ZXIC_UINT32 ecc_bypass_read; +} DPP_SE_PARSER_ECC_BYPASS_READ_T; + +typedef struct dpp_se_parser_mex0_5_req_cnt_t { + ZXIC_UINT32 mex0_5_req_cnt; +} DPP_SE_PARSER_MEX0_5_REQ_CNT_T; + +typedef struct dpp_se_parser_kschd_req0_5_cnt_t { + ZXIC_UINT32 kschd_req0_5_cnt; +} DPP_SE_PARSER_KSCHD_REQ0_5_CNT_T; + +typedef struct dpp_se_parser_kschd_parser_fc0_5_cnt_t { + ZXIC_UINT32 kschd_parser_fc0_5_cnt; +} DPP_SE_PARSER_KSCHD_PARSER_FC0_5_CNT_T; + +typedef struct dpp_se_parser_se_ppu_mex0_5_fc_cnt_t { + ZXIC_UINT32 se_ppu_mex0_5_fc_cnt; +} DPP_SE_PARSER_SE_PPU_MEX0_5_FC_CNT_T; + +typedef struct dpp_se_parser_smmu0_marc_fc_cnt_t { + ZXIC_UINT32 smmu0_marc_fc_cnt; +} DPP_SE_PARSER_SMMU0_MARC_FC_CNT_T; + +typedef struct dpp_se_parser_smmu0_marc_key_cnt_t { + ZXIC_UINT32 smmu0_marc_key_cnt; +} DPP_SE_PARSER_SMMU0_MARC_KEY_CNT_T; + +typedef struct dpp_se_parser_cmmu_key_cnt_t { + ZXIC_UINT32 cmmu_key_cnt; +} DPP_SE_PARSER_CMMU_KEY_CNT_T; + +typedef struct dpp_se_parser_cmmu_parser_fc_cnt_t { + ZXIC_UINT32 cmmu_parser_fc_cnt; +} DPP_SE_PARSER_CMMU_PARSER_FC_CNT_T; + +typedef struct dpp_se_parser_marc_tab_type_err_mex0_5_cnt_t { + ZXIC_UINT32 marc_tab_type_err_mex0_5_cnt; +} DPP_SE_PARSER_MARC_TAB_TYPE_ERR_MEX0_5_CNT_T; + +typedef struct dpp_se_parser_eram_fulladdr_drop_cnt_t { + ZXIC_UINT32 eram_fulladdr_drop_cnt; +} DPP_SE_PARSER_ERAM_FULLADDR_DROP_CNT_T; + +typedef struct dpp_se_as_hash0_pful_cfg_t { + ZXIC_UINT32 hash0_pful_cfg; +} DPP_SE_AS_HASH0_PFUL_CFG_T; + +typedef struct dpp_se_as_hash1_pful_cfg_t { + ZXIC_UINT32 hash1_pful_cfg; +} DPP_SE_AS_HASH1_PFUL_CFG_T; + +typedef struct dpp_se_as_hash2_pful_cfg_t { + ZXIC_UINT32 hash2_pful_cfg; +} DPP_SE_AS_HASH2_PFUL_CFG_T; + +typedef struct dpp_se_as_hash3_pful_cfg_t { + ZXIC_UINT32 hash3_pful_cfg; +} DPP_SE_AS_HASH3_PFUL_CFG_T; + +typedef struct dpp_se_as_pbu_pful_cfg_t { + ZXIC_UINT32 pbu_pful_cfg; +} DPP_SE_AS_PBU_PFUL_CFG_T; + +typedef struct dpp_se_as_lpm_pful_cfg_t { + ZXIC_UINT32 lpm_pful_cfg; +} DPP_SE_AS_LPM_PFUL_CFG_T; + +typedef struct dpp_se_as_etcam_pful_cfg_t { + ZXIC_UINT32 etcam_pful_cfg; +} DPP_SE_AS_ETCAM_PFUL_CFG_T; + +typedef struct dpp_se_as_as_learn0_fifo_cfg_t { + ZXIC_UINT32 as_learn1_pful_negate; + ZXIC_UINT32 as_learn1_pful_asert; + ZXIC_UINT32 as_learn0_pful_negate; + ZXIC_UINT32 as_learn0_pful_asert; +} DPP_SE_AS_AS_LEARN0_FIFO_CFG_T; + +typedef struct dpp_se_as_as_learn1_fifo_cfg_t { + ZXIC_UINT32 as_learn3_pful_negate; + ZXIC_UINT32 as_learn3_pful_asert; + ZXIC_UINT32 as_learn2_pful_negate; + ZXIC_UINT32 as_learn2_pful_asert; +} DPP_SE_AS_AS_LEARN1_FIFO_CFG_T; + +typedef struct dpp_se_as_as_dma_fifo_cfg_t { + ZXIC_UINT32 as_dma_fifo_cfg; +} DPP_SE_AS_AS_DMA_FIFO_CFG_T; + +typedef struct dpp_se_as_age_pful_cfg_t { + ZXIC_UINT32 age_pful_cfg; +} DPP_SE_AS_AGE_PFUL_CFG_T; + +typedef struct dpp_se_as_etcam_rsp_cfg_t { + ZXIC_UINT32 eram_rsp_pful_negate; + ZXIC_UINT32 eram_rsp_pful_assert; + ZXIC_UINT32 etcam_rsp_pful_negate; + ZXIC_UINT32 etcam_rsp_pful_assert; +} DPP_SE_AS_ETCAM_RSP_CFG_T; + +typedef struct dpp_se_as_pbu_ecc_bypass_read_t { + ZXIC_UINT32 pbu_ecc_bypass_read; +} DPP_SE_AS_PBU_ECC_BYPASS_READ_T; + +typedef struct dpp_se_as_etcam0_ecc_bypass_read_t { + ZXIC_UINT32 etcam0_ecc_bypass_read; +} DPP_SE_AS_ETCAM0_ECC_BYPASS_READ_T; + +typedef struct dpp_se_as_etcam1_ecc_bypass_read_t { + ZXIC_UINT32 etcam1_ecc_bypass_read; +} DPP_SE_AS_ETCAM1_ECC_BYPASS_READ_T; + +typedef struct dpp_se_as_lpm_ecc_bypass_read_t { + ZXIC_UINT32 lpm_ecc_bypass_read; +} DPP_SE_AS_LPM_ECC_BYPASS_READ_T; + +typedef struct dpp_se_as_hash_ecc_bypass_read_t { + ZXIC_UINT32 hash3_ecc_bypass_read; + ZXIC_UINT32 hash2_ecc_bypass_read; + ZXIC_UINT32 hash1_ecc_bypass_read; + ZXIC_UINT32 hash0_ecc_bypass_read; +} DPP_SE_AS_HASH_ECC_BYPASS_READ_T; + +typedef struct dpp_se_as_hash_learn_ecc_bypass_read_t { + ZXIC_UINT32 hash_learn_ecc_bypass_read; +} DPP_SE_AS_HASH_LEARN_ECC_BYPASS_READ_T; + +typedef struct dpp_se_as_debug_cnt_mode_t { + ZXIC_UINT32 cnt_rd_mode; + ZXIC_UINT32 cnt_overflow_mode; +} DPP_SE_AS_DEBUG_CNT_MODE_T; + +typedef struct dpp_se_as_as_int_0_en_t { + ZXIC_UINT32 as_int_0_en; +} DPP_SE_AS_AS_INT_0_EN_T; + +typedef struct dpp_se_as_as_int_0_mask_t { + ZXIC_UINT32 as_int_0_mask; +} DPP_SE_AS_AS_INT_0_MASK_T; + +typedef struct dpp_se_as_as_int_1_en_t { + ZXIC_UINT32 as_int_1_en; +} DPP_SE_AS_AS_INT_1_EN_T; + +typedef struct dpp_se_as_as_int_1_mask_t { + ZXIC_UINT32 as_int_1_mask; +} DPP_SE_AS_AS_INT_1_MASK_T; + +typedef struct dpp_se_as_as_int_2_en_t { + ZXIC_UINT32 as_int_2_en; +} DPP_SE_AS_AS_INT_2_EN_T; + +typedef struct dpp_se_as_as_int_2_mask_t { + ZXIC_UINT32 as_int_2_mask; +} DPP_SE_AS_AS_INT_2_MASK_T; + +typedef struct dpp_se_as_as_int_0_status_t { + ZXIC_UINT32 port0_int_status; +} DPP_SE_AS_AS_INT_0_STATUS_T; + +typedef struct dpp_se_as_as_int_1_status_t { + ZXIC_UINT32 port1_int_status; +} DPP_SE_AS_AS_INT_1_STATUS_T; + +typedef struct dpp_se_as_as_int_2_status_t { + ZXIC_UINT32 port2_int_status; +} DPP_SE_AS_AS_INT_2_STATUS_T; + +typedef struct dpp_se_as_se_as_int_status_t { + ZXIC_UINT32 as_int_2_unmask_flag; + ZXIC_UINT32 as_int_1_unmask_flag; + ZXIC_UINT32 as_int_0_unmask_flag; +} DPP_SE_AS_SE_AS_INT_STATUS_T; + +typedef struct dpp_se_as_hash0_3_wr_req_cnt_t { + ZXIC_UINT32 hash0_3_wr_req_cnt; +} DPP_SE_AS_HASH0_3_WR_REQ_CNT_T; + +typedef struct dpp_se_as_smmu0_etcam0_1_fc_cnt_t { + ZXIC_UINT32 smmu0_etcam0_1_fc_cnt; +} DPP_SE_AS_SMMU0_ETCAM0_1_FC_CNT_T; + +typedef struct dpp_se_as_etcam0_1_smmu0_req_cnt_t { + ZXIC_UINT32 etcam0_1_smmu0_req_cnt; +} DPP_SE_AS_ETCAM0_1_SMMU0_REQ_CNT_T; + +typedef struct dpp_se_as_smmu0_etcam0_1_rsp_cnt_t { + ZXIC_UINT32 smmu0_etcam0_1_rsp_cnt; +} DPP_SE_AS_SMMU0_ETCAM0_1_RSP_CNT_T; + +typedef struct dpp_se_as_as_hla_hash_p0_3_key_cnt_t { + ZXIC_UINT32 as_hla_hash_p0_3_key_cnt; +} DPP_SE_AS_AS_HLA_HASH_P0_3_KEY_CNT_T; + +typedef struct dpp_se_as_as_hla_lpm_p0_key_cnt_t { + ZXIC_UINT32 as_hla_lpm_p0_key_cnt; +} DPP_SE_AS_AS_HLA_LPM_P0_KEY_CNT_T; + +typedef struct dpp_se_as_alg_as_hash_p0_3_rsp_cnt_t { + ZXIC_UINT32 alg_as_hash_p0_3_rsp_cnt; +} DPP_SE_AS_ALG_AS_HASH_P0_3_RSP_CNT_T; + +typedef struct dpp_se_as_alg_as_hash_p0_3_smf_rsp_cnt_t { + ZXIC_UINT32 alg_as_hash_p0_3_smf_rsp_cnt; +} DPP_SE_AS_ALG_AS_HASH_P0_3_SMF_RSP_CNT_T; + +typedef struct dpp_se_as_alg_as_lpm_p0_rsp_cnt_t { + ZXIC_UINT32 alg_as_lpm_p0_rsp_cnt; +} DPP_SE_AS_ALG_AS_LPM_P0_RSP_CNT_T; + +typedef struct dpp_se_as_alg_as_lpm_p0_3_smf_rsp_cnt_t { + ZXIC_UINT32 alg_as_lpm_p0_3_smf_rsp_cnt; +} DPP_SE_AS_ALG_AS_LPM_P0_3_SMF_RSP_CNT_T; + +typedef struct dpp_se_as_as_pbu_key_cnt_t { + ZXIC_UINT32 as_pbu_key_cnt; +} DPP_SE_AS_AS_PBU_KEY_CNT_T; + +typedef struct dpp_se_as_pbu_se_dpi_rsp_dat_cnt_t { + ZXIC_UINT32 pbu_se_dpi_rsp_dat_cnt; +} DPP_SE_AS_PBU_SE_DPI_RSP_DAT_CNT_T; + +typedef struct dpp_se_as_as_etcam_ctrl_req0_cnt_t { + ZXIC_UINT32 as_etcam_ctrl_req0_cnt; +} DPP_SE_AS_AS_ETCAM_CTRL_REQ0_CNT_T; + +typedef struct dpp_se_as_etcam_ctrl_as_index0_1_cnt_t { + ZXIC_UINT32 etcam_ctrl_as_index0_1_cnt; +} DPP_SE_AS_ETCAM_CTRL_AS_INDEX0_1_CNT_T; + +typedef struct dpp_se_as_etcam_ctrl_as_hit0_1_cnt_t { + ZXIC_UINT32 etcam_ctrl_as_hit0_1_cnt; +} DPP_SE_AS_ETCAM_CTRL_AS_HIT0_1_CNT_T; + +typedef struct dpp_se_as_as_smmu0_req_cnt_t { + ZXIC_UINT32 as_smmu0_req_cnt; +} DPP_SE_AS_AS_SMMU0_REQ_CNT_T; + +typedef struct dpp_se_as_learn_hla_wr_cnt_t { + ZXIC_UINT32 learn_hla_wr_cnt; +} DPP_SE_AS_LEARN_HLA_WR_CNT_T; + +typedef struct dpp_se_as_as_smmu1_req_cnt_t { + ZXIC_UINT32 as_smmu1_req_cnt; +} DPP_SE_AS_AS_SMMU1_REQ_CNT_T; + +typedef struct dpp_se_as_se_cfg_mac_dat_cnt_t { + ZXIC_UINT32 se_cfg_mac_dat_cnt; +} DPP_SE_AS_SE_CFG_MAC_DAT_CNT_T; + +typedef struct dpp_se_as_alg_as_hash_p0_3_fc_cnt_t { + ZXIC_UINT32 alg_as_hash_p0_3_fc_cnt; +} DPP_SE_AS_ALG_AS_HASH_P0_3_FC_CNT_T; + +typedef struct dpp_se_as_alg_as_lpm_p0_fc_cnt_t { + ZXIC_UINT32 alg_as_lpm_p0_fc_cnt; +} DPP_SE_AS_ALG_AS_LPM_P0_FC_CNT_T; + +typedef struct dpp_se_as_as_alg_hash_p0_3_fc_cnt_t { + ZXIC_UINT32 as_alg_hash_p0_3_fc_cnt; +} DPP_SE_AS_AS_ALG_HASH_P0_3_FC_CNT_T; + +typedef struct dpp_se_as_as_alg_lpm_p0_fc_cnt_t { + ZXIC_UINT32 as_alg_lpm_p0_fc_cnt; +} DPP_SE_AS_AS_ALG_LPM_P0_FC_CNT_T; + +typedef struct dpp_se_as_as_pbu_fc_cnt_t { + ZXIC_UINT32 as_pbu_fc_cnt; +} DPP_SE_AS_AS_PBU_FC_CNT_T; + +typedef struct dpp_se_as_pbu_se_dpi_key_fc_cnt_t { + ZXIC_UINT32 pbu_se_dpi_key_fc_cnt; +} DPP_SE_AS_PBU_SE_DPI_KEY_FC_CNT_T; + +typedef struct dpp_se_as_as_etcam_ctrl_fc0_1_cnt_t { + ZXIC_UINT32 as_etcam_ctrl_fc0_1_cnt; +} DPP_SE_AS_AS_ETCAM_CTRL_FC0_1_CNT_T; + +typedef struct dpp_se_as_etcam_ctrl_as_fc0_1_cnt_t { + ZXIC_UINT32 etcam_ctrl_as_fc0_1_cnt; +} DPP_SE_AS_ETCAM_CTRL_AS_FC0_1_CNT_T; + +typedef struct dpp_se_as_smmu0_as_mac_age_fc_cnt_t { + ZXIC_UINT32 smmu0_as_mac_age_fc_cnt; +} DPP_SE_AS_SMMU0_AS_MAC_AGE_FC_CNT_T; + +typedef struct dpp_se_as_alg_learn_fc_cnt_t { + ZXIC_UINT32 alg_learn_fc_cnt; +} DPP_SE_AS_ALG_LEARN_FC_CNT_T; + +typedef struct dpp_se_as_smmu1_as_fc_cnt_t { + ZXIC_UINT32 smmu1_as_fc_cnt; +} DPP_SE_AS_SMMU1_AS_FC_CNT_T; + +typedef struct dpp_se_as_cfg_se_mac_fc_cnt_t { + ZXIC_UINT32 cfg_se_mac_fc_cnt; +} DPP_SE_AS_CFG_SE_MAC_FC_CNT_T; + +typedef struct dpp_se_kschd_kschd_cpu_rdy_t { + ZXIC_UINT32 kschd_cpu_rdy; +} DPP_SE_KSCHD_KSCHD_CPU_RDY_T; + +typedef struct dpp_se_kschd_ppu0_ecc_bypass_read_t { + ZXIC_UINT32 ppu0_ecc_bypass_read; +} DPP_SE_KSCHD_PPU0_ECC_BYPASS_READ_T; + +typedef struct dpp_se_kschd_pbu_ecc_bypass_read_t { + ZXIC_UINT32 pbu_ecc_bypass_read; +} DPP_SE_KSCHD_PBU_ECC_BYPASS_READ_T; + +typedef struct dpp_se_kschd_smmu1_ecc_bypass_read_t { + ZXIC_UINT32 u3_smmu1_ecc_bypass_read; + ZXIC_UINT32 u2_smmu1_ecc_bypass_read; + ZXIC_UINT32 u1_smmu1_ecc_bypass_read; + ZXIC_UINT32 u0_smmu1_ecc_bypass_read; +} DPP_SE_KSCHD_SMMU1_ECC_BYPASS_READ_T; + +typedef struct dpp_se_kschd_ass_ecc_bypass_read_t { + ZXIC_UINT32 ass_ecc_bypass_read; +} DPP_SE_KSCHD_ASS_ECC_BYPASS_READ_T; + +typedef struct dpp_se_kschd_sdt_h_t { + ZXIC_UINT32 sdt_h; +} DPP_SE_KSCHD_SDT_H_T; + +typedef struct dpp_se_kschd_sdt_l_t { + ZXIC_UINT32 sdt_l; +} DPP_SE_KSCHD_SDT_L_T; + +typedef struct dpp_se_kschd_hash_key15_t { + ZXIC_UINT32 dma_en; + ZXIC_UINT32 delete_en; + ZXIC_UINT32 hash_key15; +} DPP_SE_KSCHD_HASH_KEY15_T; + +typedef struct dpp_se_kschd_hash_key14_t { + ZXIC_UINT32 hash_key14; +} DPP_SE_KSCHD_HASH_KEY14_T; + +typedef struct dpp_se_kschd_hash_key13_t { + ZXIC_UINT32 hash_key13; +} DPP_SE_KSCHD_HASH_KEY13_T; + +typedef struct dpp_se_kschd_hash_key12_t { + ZXIC_UINT32 hash_key12; +} DPP_SE_KSCHD_HASH_KEY12_T; + +typedef struct dpp_se_kschd_hash_key11_t { + ZXIC_UINT32 hash_key11; +} DPP_SE_KSCHD_HASH_KEY11_T; + +typedef struct dpp_se_kschd_hash_key10_t { + ZXIC_UINT32 hash_key10; +} DPP_SE_KSCHD_HASH_KEY10_T; + +typedef struct dpp_se_kschd_hash_key9_t { + ZXIC_UINT32 hash_key9; +} DPP_SE_KSCHD_HASH_KEY9_T; + +typedef struct dpp_se_kschd_hash_key8_t { + ZXIC_UINT32 hash_key8; +} DPP_SE_KSCHD_HASH_KEY8_T; + +typedef struct dpp_se_kschd_hash_key7_t { + ZXIC_UINT32 hash_key7; +} DPP_SE_KSCHD_HASH_KEY7_T; + +typedef struct dpp_se_kschd_hash_key6_t { + ZXIC_UINT32 hash_key6; +} DPP_SE_KSCHD_HASH_KEY6_T; + +typedef struct dpp_se_kschd_hash_key5_t { + ZXIC_UINT32 hash_key5; +} DPP_SE_KSCHD_HASH_KEY5_T; + +typedef struct dpp_se_kschd_hash_key4_t { + ZXIC_UINT32 hash_key4; +} DPP_SE_KSCHD_HASH_KEY4_T; + +typedef struct dpp_se_kschd_hash_key3_t { + ZXIC_UINT32 hash_key3; +} DPP_SE_KSCHD_HASH_KEY3_T; + +typedef struct dpp_se_kschd_hash_key2_t { + ZXIC_UINT32 hash_key2; +} DPP_SE_KSCHD_HASH_KEY2_T; + +typedef struct dpp_se_kschd_hash_key1_t { + ZXIC_UINT32 hash_key1; +} DPP_SE_KSCHD_HASH_KEY1_T; + +typedef struct dpp_se_kschd_hash_key0_t { + ZXIC_UINT32 hash_key0; +} DPP_SE_KSCHD_HASH_KEY0_T; + +typedef struct dpp_se_kschd_schd_int_0_en_t { + ZXIC_UINT32 port0_int_en; +} DPP_SE_KSCHD_SCHD_INT_0_EN_T; + +typedef struct dpp_se_kschd_schd_int_0_mask_t { + ZXIC_UINT32 port0_int_mask; +} DPP_SE_KSCHD_SCHD_INT_0_MASK_T; + +typedef struct dpp_se_kschd_schd_int_1_en_t { + ZXIC_UINT32 port1_int_en; +} DPP_SE_KSCHD_SCHD_INT_1_EN_T; + +typedef struct dpp_se_kschd_schd_int_1_mask_t { + ZXIC_UINT32 port1_int_mask; +} DPP_SE_KSCHD_SCHD_INT_1_MASK_T; + +typedef struct dpp_se_kschd_schd_int_2_en_t { + ZXIC_UINT32 port2_int_en; +} DPP_SE_KSCHD_SCHD_INT_2_EN_T; + +typedef struct dpp_se_kschd_schd_int_2_mask_t { + ZXIC_UINT32 port2_int_mask; +} DPP_SE_KSCHD_SCHD_INT_2_MASK_T; + +typedef struct dpp_se_kschd_schd_int_3_en_t { + ZXIC_UINT32 port3_int_en; +} DPP_SE_KSCHD_SCHD_INT_3_EN_T; + +typedef struct dpp_se_kschd_schd_int_3_mask_t { + ZXIC_UINT32 port3_int_mask; +} DPP_SE_KSCHD_SCHD_INT_3_MASK_T; + +typedef struct dpp_se_kschd_schd_int_4_en_t { + ZXIC_UINT32 port4_int_en; +} DPP_SE_KSCHD_SCHD_INT_4_EN_T; + +typedef struct dpp_se_kschd_schd_int_4_mask_t { + ZXIC_UINT32 port4_int_mask; +} DPP_SE_KSCHD_SCHD_INT_4_MASK_T; + +typedef struct dpp_se_kschd_schd_int_0_status_t { + ZXIC_UINT32 port0_int_status; +} DPP_SE_KSCHD_SCHD_INT_0_STATUS_T; + +typedef struct dpp_se_kschd_schd_int_1_status_t { + ZXIC_UINT32 port1_int_status; +} DPP_SE_KSCHD_SCHD_INT_1_STATUS_T; + +typedef struct dpp_se_kschd_schd_int_2_status_t { + ZXIC_UINT32 port2_int_status; +} DPP_SE_KSCHD_SCHD_INT_2_STATUS_T; + +typedef struct dpp_se_kschd_schd_int_3_status_t { + ZXIC_UINT32 port3_int_status; +} DPP_SE_KSCHD_SCHD_INT_3_STATUS_T; + +typedef struct dpp_se_kschd_schd_int_4_status_t { + ZXIC_UINT32 port4_int_status; +} DPP_SE_KSCHD_SCHD_INT_4_STATUS_T; + +typedef struct dpp_se_kschd_se_kschd_int_status_t { + ZXIC_UINT32 schd_int4_unmask_flag; + ZXIC_UINT32 schd_int3_unmask_flag; + ZXIC_UINT32 schd_int2_unmask_flag; + ZXIC_UINT32 schd_int1_unmask_flag; + ZXIC_UINT32 schd_int0_unmask_flag; +} DPP_SE_KSCHD_SE_KSCHD_INT_STATUS_T; + +typedef struct dpp_se_kschd_debug_cnt_mode_t { + ZXIC_UINT32 cnt_rd_mode; + ZXIC_UINT32 cnt_overflow_mode; +} DPP_SE_KSCHD_DEBUG_CNT_MODE_T; + +typedef struct dpp_se_kschd_se_parser_kschd_key0_3_cnt_t { + ZXIC_UINT32 se_parser_kschd_key0_3_cnt; +} DPP_SE_KSCHD_SE_PARSER_KSCHD_KEY0_3_CNT_T; + +typedef struct dpp_se_kschd_se_smmu1_key0_3_cnt_t { + ZXIC_UINT32 se_smmu1_key0_3_cnt; +} DPP_SE_KSCHD_SE_SMMU1_KEY0_3_CNT_T; + +typedef struct dpp_se_kschd_kschd_as_key0_cnt_t { + ZXIC_UINT32 kschd_as_key0_cnt; +} DPP_SE_KSCHD_KSCHD_AS_KEY0_CNT_T; + +typedef struct dpp_se_kschd_kschd_as_key1_cnt_t { + ZXIC_UINT32 kschd_as_key1_cnt; +} DPP_SE_KSCHD_KSCHD_AS_KEY1_CNT_T; + +typedef struct dpp_se_kschd_kschd_as_key2_cnt_t { + ZXIC_UINT32 kschd_as_key2_cnt; +} DPP_SE_KSCHD_KSCHD_AS_KEY2_CNT_T; + +typedef struct dpp_se_kschd_kschd_as_key3_cnt_t { + ZXIC_UINT32 kschd_as_key3_cnt; +} DPP_SE_KSCHD_KSCHD_AS_KEY3_CNT_T; + +typedef struct dpp_se_kschd_kschd_as_key4_cnt_t { + ZXIC_UINT32 kschd_as_key4_cnt; +} DPP_SE_KSCHD_KSCHD_AS_KEY4_CNT_T; + +typedef struct dpp_se_kschd_kschd_as_key5_cnt_t { + ZXIC_UINT32 kschd_as_key5_cnt; +} DPP_SE_KSCHD_KSCHD_AS_KEY5_CNT_T; + +typedef struct dpp_se_kschd_kschd_as_key6_cnt_t { + ZXIC_UINT32 kschd_as_key6_cnt; +} DPP_SE_KSCHD_KSCHD_AS_KEY6_CNT_T; + +typedef struct dpp_se_kschd_kschd_as_key9_cnt_t { + ZXIC_UINT32 kschd_as_key9_cnt; +} DPP_SE_KSCHD_KSCHD_AS_KEY9_CNT_T; + +typedef struct dpp_se_kschd_kschd_se_parser_fc0_3_cnt_t { + ZXIC_UINT32 kschd_se_parser_fc0_3_cnt; +} DPP_SE_KSCHD_KSCHD_SE_PARSER_FC0_3_CNT_T; + +typedef struct dpp_se_kschd_smmu1_se_fc0_3_cnt_t { + ZXIC_UINT32 smmu1_se_fc0_3_cnt; +} DPP_SE_KSCHD_SMMU1_SE_FC0_3_CNT_T; + +typedef struct dpp_se_kschd_as_kschd_fc_cnt0_t { + ZXIC_UINT32 as_kschd_fc_cnt0; +} DPP_SE_KSCHD_AS_KSCHD_FC_CNT0_T; + +typedef struct dpp_se_kschd_as_kschd_fc_cnt1_t { + ZXIC_UINT32 as_kschd_fc_cnt1; +} DPP_SE_KSCHD_AS_KSCHD_FC_CNT1_T; + +typedef struct dpp_se_kschd_as_kschd_fc_cnt2_t { + ZXIC_UINT32 as_kschd_fc_cnt2; +} DPP_SE_KSCHD_AS_KSCHD_FC_CNT2_T; + +typedef struct dpp_se_kschd_as_kschd_fc_cnt3_t { + ZXIC_UINT32 as_kschd_fc_cnt3; +} DPP_SE_KSCHD_AS_KSCHD_FC_CNT3_T; + +typedef struct dpp_se_kschd_as_kschd_fc_cnt4_t { + ZXIC_UINT32 as_kschd_fc_cnt4; +} DPP_SE_KSCHD_AS_KSCHD_FC_CNT4_T; + +typedef struct dpp_se_kschd_as_kschd_fc_cnt5_t { + ZXIC_UINT32 as_kschd_fc_cnt5; +} DPP_SE_KSCHD_AS_KSCHD_FC_CNT5_T; + +typedef struct dpp_se_kschd_as_kschd_fc_cnt6_t { + ZXIC_UINT32 as_kschd_fc_cnt6; +} DPP_SE_KSCHD_AS_KSCHD_FC_CNT6_T; + +typedef struct dpp_se_kschd_as_kschd_fc_cnt9_t { + ZXIC_UINT32 as_kschd_fc_cnt9; +} DPP_SE_KSCHD_AS_KSCHD_FC_CNT9_T; + +typedef struct dpp_se_rschd_rschd_hash_pful_cfg_t { + ZXIC_UINT32 rschd_hash_pful_cfg; +} DPP_SE_RSCHD_RSCHD_HASH_PFUL_CFG_T; + +typedef struct dpp_se_rschd_rschd_hash_ept_cfg_t { + ZXIC_UINT32 rschd_hash_ept_cfg; +} DPP_SE_RSCHD_RSCHD_HASH_EPT_CFG_T; + +typedef struct dpp_se_rschd_rschd_pbu_pful_cfg_t { + ZXIC_UINT32 rschd_pbu_pful_cfg; +} DPP_SE_RSCHD_RSCHD_PBU_PFUL_CFG_T; + +typedef struct dpp_se_rschd_rschd_pbu_ept_cfg_t { + ZXIC_UINT32 rschd_pbu_ept_cfg; +} DPP_SE_RSCHD_RSCHD_PBU_EPT_CFG_T; + +typedef struct dpp_se_rschd_rschd_lpm_pful_cfg_t { + ZXIC_UINT32 rschd_lpm_pful_cfg; +} DPP_SE_RSCHD_RSCHD_LPM_PFUL_CFG_T; + +typedef struct dpp_se_rschd_rschd_lpm_ept_cfg_t { + ZXIC_UINT32 rschd_lpm_ept_cfg; +} DPP_SE_RSCHD_RSCHD_LPM_EPT_CFG_T; + +typedef struct dpp_se_rschd_rschd_etcam_pful_cfg_t { + ZXIC_UINT32 rschd_etcam_pful_cfg; +} DPP_SE_RSCHD_RSCHD_ETCAM_PFUL_CFG_T; + +typedef struct dpp_se_rschd_rschd_etcam_ept_cfg_t { + ZXIC_UINT32 rschd_etcam_ept_cfg; +} DPP_SE_RSCHD_RSCHD_ETCAM_EPT_CFG_T; + +typedef struct dpp_se_rschd_smmu0_wb_pful_cfg_t { + ZXIC_UINT32 smmu0_wb_pful_cfg; +} DPP_SE_RSCHD_SMMU0_WB_PFUL_CFG_T; + +typedef struct dpp_se_rschd_smmu0_wb_ept_cfg_t { + ZXIC_UINT32 smmu0_wb_ept_cfg; +} DPP_SE_RSCHD_SMMU0_WB_EPT_CFG_T; + +typedef struct dpp_se_rschd_smmu1_wb_pful_cfg_t { + ZXIC_UINT32 smmu1_wb_pful_cfg; +} DPP_SE_RSCHD_SMMU1_WB_PFUL_CFG_T; + +typedef struct dpp_se_rschd_smmu1_wb_ept_cfg_t { + ZXIC_UINT32 smmu1_wb_ept_cfg; +} DPP_SE_RSCHD_SMMU1_WB_EPT_CFG_T; + +typedef struct dpp_se_rschd_alg_wb_pful_cfg_t { + ZXIC_UINT32 alg_wb_pful_cfg; +} DPP_SE_RSCHD_ALG_WB_PFUL_CFG_T; + +typedef struct dpp_se_rschd_alg_wb_ept_cfg_t { + ZXIC_UINT32 alg_wb_ept_cfg; +} DPP_SE_RSCHD_ALG_WB_EPT_CFG_T; + +typedef struct dpp_se_rschd_wr_rsp_vld_en_t { + ZXIC_UINT32 wr_rsp_vld_en; +} DPP_SE_RSCHD_WR_RSP_VLD_EN_T; + +typedef struct dpp_se_rschd_nppu_wb_pful_cfg_t { + ZXIC_UINT32 nppu_wb_pful_cfg; +} DPP_SE_RSCHD_NPPU_WB_PFUL_CFG_T; + +typedef struct dpp_se_rschd_nppu_wb_ept_cfg_t { + ZXIC_UINT32 nppu_wb_ept_cfg; +} DPP_SE_RSCHD_NPPU_WB_EPT_CFG_T; + +typedef struct dpp_se_rschd_port0_int_en_t { + ZXIC_UINT32 port0_int_en; +} DPP_SE_RSCHD_PORT0_INT_EN_T; + +typedef struct dpp_se_rschd_port0_int_mask_t { + ZXIC_UINT32 port0_int_mask; +} DPP_SE_RSCHD_PORT0_INT_MASK_T; + +typedef struct dpp_se_rschd_port1_int_en_t { + ZXIC_UINT32 port1_int_en; +} DPP_SE_RSCHD_PORT1_INT_EN_T; + +typedef struct dpp_se_rschd_port1_int_mask_t { + ZXIC_UINT32 port1_int_mask; +} DPP_SE_RSCHD_PORT1_INT_MASK_T; + +typedef struct dpp_se_rschd_port0_int_status_t { + ZXIC_UINT32 port0_int_status; +} DPP_SE_RSCHD_PORT0_INT_STATUS_T; + +typedef struct dpp_se_rschd_port1_int_status_t { + ZXIC_UINT32 port1_int_status; +} DPP_SE_RSCHD_PORT1_INT_STATUS_T; + +typedef struct dpp_se_rschd_se_rschd_int_status_t { + ZXIC_UINT32 port1_int_unmask_flag; + ZXIC_UINT32 port0_int_unmask_flag; +} DPP_SE_RSCHD_SE_RSCHD_INT_STATUS_T; + +typedef struct dpp_se_rschd_debug_cnt_mode_t { + ZXIC_UINT32 cnt_rd_mode; + ZXIC_UINT32 cnt_overflow_mode; +} DPP_SE_RSCHD_DEBUG_CNT_MODE_T; + +typedef struct dpp_se_rschd_se_ppu_mex0_5_rsp1_cnt_t { + ZXIC_UINT32 se_ppu_mex0_5_rsp1_cnt; +} DPP_SE_RSCHD_SE_PPU_MEX0_5_RSP1_CNT_T; + +typedef struct dpp_se_rschd_as_rschd_rsp0_cnt_t { + ZXIC_UINT32 as_rschd_rsp0_cnt; +} DPP_SE_RSCHD_AS_RSCHD_RSP0_CNT_T; + +typedef struct dpp_se_rschd_as_rschd_rsp1_cnt_t { + ZXIC_UINT32 as_rschd_rsp1_cnt; +} DPP_SE_RSCHD_AS_RSCHD_RSP1_CNT_T; + +typedef struct dpp_se_rschd_as_rschd_rsp2_cnt_t { + ZXIC_UINT32 as_rschd_rsp2_cnt; +} DPP_SE_RSCHD_AS_RSCHD_RSP2_CNT_T; + +typedef struct dpp_se_rschd_as_rschd_rsp3_cnt_t { + ZXIC_UINT32 as_rschd_rsp3_cnt; +} DPP_SE_RSCHD_AS_RSCHD_RSP3_CNT_T; + +typedef struct dpp_se_rschd_as_rschd_rsp4_cnt_t { + ZXIC_UINT32 as_rschd_rsp4_cnt; +} DPP_SE_RSCHD_AS_RSCHD_RSP4_CNT_T; + +typedef struct dpp_se_rschd_as_rschd_rsp5_cnt_t { + ZXIC_UINT32 as_rschd_rsp5_cnt; +} DPP_SE_RSCHD_AS_RSCHD_RSP5_CNT_T; + +typedef struct dpp_se_rschd_as_rschd_rsp6_cnt_t { + ZXIC_UINT32 as_rschd_rsp6_cnt; +} DPP_SE_RSCHD_AS_RSCHD_RSP6_CNT_T; + +typedef struct dpp_se_rschd_as_rschd_rsp9_cnt_t { + ZXIC_UINT32 as_rschd_rsp9_cnt; +} DPP_SE_RSCHD_AS_RSCHD_RSP9_CNT_T; + +typedef struct dpp_se_rschd_smmu1_se_rsp0_3_cnt_t { + ZXIC_UINT32 smmu1_se_rsp0_3_cnt; +} DPP_SE_RSCHD_SMMU1_SE_RSP0_3_CNT_T; + +typedef struct dpp_se_rschd_ppu_se_mex0_3_fc_cnt_t { + ZXIC_UINT32 ppu_se_mex0_3_fc_cnt; +} DPP_SE_RSCHD_PPU_SE_MEX0_3_FC_CNT_T; + +typedef struct dpp_se_rschd_rschd_as_fc_cnt0_t { + ZXIC_UINT32 rschd_as_fc_cnt0; +} DPP_SE_RSCHD_RSCHD_AS_FC_CNT0_T; + +typedef struct dpp_se_rschd_rschd_as_fc_cnt1_t { + ZXIC_UINT32 rschd_as_fc_cnt1; +} DPP_SE_RSCHD_RSCHD_AS_FC_CNT1_T; + +typedef struct dpp_se_rschd_rschd_as_fc_cnt2_t { + ZXIC_UINT32 rschd_as_fc_cnt2; +} DPP_SE_RSCHD_RSCHD_AS_FC_CNT2_T; + +typedef struct dpp_se_rschd_rschd_as_fc_cnt3_t { + ZXIC_UINT32 rschd_as_fc_cnt3; +} DPP_SE_RSCHD_RSCHD_AS_FC_CNT3_T; + +typedef struct dpp_se_rschd_rschd_as_fc_cnt4_t { + ZXIC_UINT32 rschd_as_fc_cnt4; +} DPP_SE_RSCHD_RSCHD_AS_FC_CNT4_T; + +typedef struct dpp_se_rschd_rschd_as_fc_cnt5_t { + ZXIC_UINT32 rschd_as_fc_cnt5; +} DPP_SE_RSCHD_RSCHD_AS_FC_CNT5_T; + +typedef struct dpp_se_rschd_rschd_as_fc_cnt6_t { + ZXIC_UINT32 rschd_as_fc_cnt6; +} DPP_SE_RSCHD_RSCHD_AS_FC_CNT6_T; + +typedef struct dpp_se_rschd_rschd_as_fc_cnt9_t { + ZXIC_UINT32 rschd_as_fc_cnt9; +} DPP_SE_RSCHD_RSCHD_AS_FC_CNT9_T; + +typedef struct dpp_se_rschd_se_smmu1_fc0_3_cnt_t { + ZXIC_UINT32 se_smmu1_fc0_3_cnt; +} DPP_SE_RSCHD_SE_SMMU1_FC0_3_CNT_T; + +typedef struct dpp_se_rschd_smmu0_se_wr_done_cnt_t { + ZXIC_UINT32 smmu0_se_wr_done_cnt; +} DPP_SE_RSCHD_SMMU0_SE_WR_DONE_CNT_T; + +typedef struct dpp_se_rschd_se_smmu0_wr_done_fc_cnt_t { + ZXIC_UINT32 se_smmu0_wr_done_fc_cnt; +} DPP_SE_RSCHD_SE_SMMU0_WR_DONE_FC_CNT_T; + +typedef struct dpp_se_rschd_smmu1_se_wr_rsp_cnt_t { + ZXIC_UINT32 smmu1_se_wr_rsp_cnt; +} DPP_SE_RSCHD_SMMU1_SE_WR_RSP_CNT_T; + +typedef struct dpp_se_rschd_se_smmu1_wr_rsp_fc_cnt_t { + ZXIC_UINT32 se_smmu1_wr_rsp_fc_cnt; +} DPP_SE_RSCHD_SE_SMMU1_WR_RSP_FC_CNT_T; + +typedef struct dpp_se_rschd_alg_se_wr_rsp_cnt_t { + ZXIC_UINT32 alg_se_wr_rsp_cnt; +} DPP_SE_RSCHD_ALG_SE_WR_RSP_CNT_T; + +typedef struct dpp_se_rschd_se_alg_wr_rsp_fc_cnt_t { + ZXIC_UINT32 se_alg_wr_rsp_fc_cnt; +} DPP_SE_RSCHD_SE_ALG_WR_RSP_FC_CNT_T; + +#ifdef __cplusplus +} +#endif +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_smmu0_reg.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_smmu0_reg.h new file mode 100644 index 000000000000..6a2e79ba15a2 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_smmu0_reg.h @@ -0,0 +1,6035 @@ + +#ifndef _DPP_SMMU0_REG_H_ +#define _DPP_SMMU0_REG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct dpp_smmu0_smmu0_init_done_t { + ZXIC_UINT32 init_done; +} DPP_SMMU0_SMMU0_INIT_DONE_T; + +typedef struct dpp_smmu0_smmu0_cpu_ind_wdat0_t { + ZXIC_UINT32 cpu_ind_wdat0; +} DPP_SMMU0_SMMU0_CPU_IND_WDAT0_T; + +typedef struct dpp_smmu0_smmu0_cpu_ind_wdat1_t { + ZXIC_UINT32 cpu_ind_wdat1; +} DPP_SMMU0_SMMU0_CPU_IND_WDAT1_T; + +typedef struct dpp_smmu0_smmu0_cpu_ind_wdat2_t { + ZXIC_UINT32 cpu_ind_wdat2; +} DPP_SMMU0_SMMU0_CPU_IND_WDAT2_T; + +typedef struct dpp_smmu0_smmu0_cpu_ind_wdat3_t { + ZXIC_UINT32 cpu_ind_wdat3; +} DPP_SMMU0_SMMU0_CPU_IND_WDAT3_T; + +typedef struct dpp_smmu0_smmu0_cpu_ind_cmd_t { + ZXIC_UINT32 cpu_ind_rw; + ZXIC_UINT32 cpu_ind_rd_mode; + ZXIC_UINT32 cpu_req_mode; + ZXIC_UINT32 cpu_ind_addr; +} DPP_SMMU0_SMMU0_CPU_IND_CMD_T; + +typedef struct dpp_smmu0_smmu0_cpu_ind_rd_done_t { + ZXIC_UINT32 cpu_ind_rd_done; +} DPP_SMMU0_SMMU0_CPU_IND_RD_DONE_T; + +typedef struct dpp_smmu0_smmu0_cpu_ind_rdat0_t { + ZXIC_UINT32 cpu_ind_rdat0; +} DPP_SMMU0_SMMU0_CPU_IND_RDAT0_T; + +typedef struct dpp_smmu0_smmu0_cpu_ind_rdat1_t { + ZXIC_UINT32 cpu_ind_rdat1; +} DPP_SMMU0_SMMU0_CPU_IND_RDAT1_T; + +typedef struct dpp_smmu0_smmu0_cpu_ind_rdat2_t { + ZXIC_UINT32 cpu_ind_rdat2; +} DPP_SMMU0_SMMU0_CPU_IND_RDAT2_T; + +typedef struct dpp_smmu0_smmu0_cpu_ind_rdat3_t { + ZXIC_UINT32 cpu_ind_rdat3; +} DPP_SMMU0_SMMU0_CPU_IND_RDAT3_T; + +typedef struct dpp_smmu0_smmu0_cfg_plcr_mono_t { + ZXIC_UINT32 cfg_plcr_mono; +} DPP_SMMU0_SMMU0_CFG_PLCR_MONO_T; + +typedef struct dpp_smmu0_smmu0_wr_arb_cpu_rdy_t { + ZXIC_UINT32 wr_arb_cpu_rdy; +} DPP_SMMU0_SMMU0_WR_ARB_CPU_RDY_T; + +typedef struct dpp_smmu0_smmu0_tm_stat_en_cfg_t { + ZXIC_UINT32 tm_stat_en_cfg; +} DPP_SMMU0_SMMU0_TM_STAT_EN_CFG_T; + +typedef struct dpp_smmu0_smmu0_kschd_pful_cfg0_t { + ZXIC_UINT32 kschd_pful_assert0_1; + ZXIC_UINT32 kschd_pful_negate0_1; + ZXIC_UINT32 kschd_pful_assert0_0; + ZXIC_UINT32 kschd_pful_negate0_0; +} DPP_SMMU0_SMMU0_KSCHD_PFUL_CFG0_T; + +typedef struct dpp_smmu0_smmu0_kschd_pful_cfg1_t { + ZXIC_UINT32 kschd_pful_assert1_1; + ZXIC_UINT32 kschd_pful_negate1_1; + ZXIC_UINT32 kschd_pful_assert1_0; + ZXIC_UINT32 kschd_pful_negate1_0; +} DPP_SMMU0_SMMU0_KSCHD_PFUL_CFG1_T; + +typedef struct dpp_smmu0_smmu0_ctrl_pful1_cfg_t { + ZXIC_UINT32 ctrl_pful1_assert; + ZXIC_UINT32 ctrl_pful1_negate; +} DPP_SMMU0_SMMU0_CTRL_PFUL1_CFG_T; + +typedef struct dpp_smmu0_smmu0_ctrl_pful2_cfg_t { + ZXIC_UINT32 ctrl_pful2_assert; + ZXIC_UINT32 ctrl_pful2_negate; +} DPP_SMMU0_SMMU0_CTRL_PFUL2_CFG_T; + +typedef struct dpp_smmu0_smmu0_ctrl_pful3_cfg_t { + ZXIC_UINT32 ctrl_pful3_assert; + ZXIC_UINT32 ctrl_pful3_negate; +} DPP_SMMU0_SMMU0_CTRL_PFUL3_CFG_T; + +typedef struct dpp_smmu0_smmu0_rschd_pful_cfg_t { + ZXIC_UINT32 rschd_pful_assert; + ZXIC_UINT32 rschd_pful_negate; +} DPP_SMMU0_SMMU0_RSCHD_PFUL_CFG_T; + +typedef struct dpp_smmu0_smmu0_rschd_ept_cfg_t { + ZXIC_UINT32 rschd_ept_assert; + ZXIC_UINT32 rschd_ept_negate; +} DPP_SMMU0_SMMU0_RSCHD_EPT_CFG_T; + +typedef struct dpp_smmu0_smmu0_alucmd_pful_cfg_t { + ZXIC_UINT32 alucmd_pful_assert; + ZXIC_UINT32 alucmd_pful_negate; +} DPP_SMMU0_SMMU0_ALUCMD_PFUL_CFG_T; + +typedef struct dpp_smmu0_smmu0_aluwr_pful_cfg_t { + ZXIC_UINT32 aluwr_pful_assert; + ZXIC_UINT32 aluwr_pful_negate; +} DPP_SMMU0_SMMU0_ALUWR_PFUL_CFG_T; + +typedef struct dpp_smmu0_smmu0_wr_arb_pful_cfg0_t { + ZXIC_UINT32 wr_arb_pful0_assert; + ZXIC_UINT32 wr_arb_pful0_negate; +} DPP_SMMU0_SMMU0_WR_ARB_PFUL_CFG0_T; + +typedef struct dpp_smmu0_smmu0_wr_arb_pful_cfg1_t { + ZXIC_UINT32 wr_arb_pful1_assert; + ZXIC_UINT32 wr_arb_pful1_negate; +} DPP_SMMU0_SMMU0_WR_ARB_PFUL_CFG1_T; + +typedef struct dpp_smmu0_smmu0_ord_pful_cfg_t { + ZXIC_UINT32 ord_pful_assert; + ZXIC_UINT32 ord_pful_negate; +} DPP_SMMU0_SMMU0_ORD_PFUL_CFG_T; + +typedef struct dpp_smmu0_smmu0_cfg_dma_baddr_t { + ZXIC_UINT32 cfg_dma_baddr; +} DPP_SMMU0_SMMU0_CFG_DMA_BADDR_T; + +typedef struct dpp_smmu0_smmu0_cfg_odma0_baddr_t { + ZXIC_UINT32 cfg_odma0_baddr; +} DPP_SMMU0_SMMU0_CFG_ODMA0_BADDR_T; + +typedef struct dpp_smmu0_smmu0_cfg_odma1_baddr_t { + ZXIC_UINT32 cfg_odma1_baddr; +} DPP_SMMU0_SMMU0_CFG_ODMA1_BADDR_T; + +typedef struct dpp_smmu0_smmu0_cfg_odma2_baddr_t { + ZXIC_UINT32 cfg_odma2_baddr; +} DPP_SMMU0_SMMU0_CFG_ODMA2_BADDR_T; + +typedef struct dpp_smmu0_smmu0_cfg_odma_tdm_baddr_t { + ZXIC_UINT32 cfg_odma_tdm_baddr; +} DPP_SMMU0_SMMU0_CFG_ODMA_TDM_BADDR_T; + +typedef struct dpp_smmu0_smmu0_cfg_mcast_baddr_t { + ZXIC_UINT32 cfg_mcast_baddr; +} DPP_SMMU0_SMMU0_CFG_MCAST_BADDR_T; + +typedef struct dpp_smmu0_smmu0_cfg_lpm0_t { + ZXIC_UINT32 lpm0_rsp_mode; + ZXIC_UINT32 lpm0_baddr; +} DPP_SMMU0_SMMU0_CFG_LPM0_T; + +typedef struct dpp_smmu0_smmu0_cfg_lpm1_t { + ZXIC_UINT32 lpm1_rsp_mode; + ZXIC_UINT32 lpm1_baddr; +} DPP_SMMU0_SMMU0_CFG_LPM1_T; + +typedef struct dpp_smmu0_smmu0_cfg_lpm2_t { + ZXIC_UINT32 lpm2_rsp_mode; + ZXIC_UINT32 lpm2_baddr; +} DPP_SMMU0_SMMU0_CFG_LPM2_T; + +typedef struct dpp_smmu0_smmu0_cfg_lpm3_t { + ZXIC_UINT32 lpm3_rsp_mode; + ZXIC_UINT32 lpm3_baddr; +} DPP_SMMU0_SMMU0_CFG_LPM3_T; + +typedef struct dpp_smmu0_smmu0_cfg_lpm4_t { + ZXIC_UINT32 lpm4_rsp_mode; + ZXIC_UINT32 lpm4_baddr; +} DPP_SMMU0_SMMU0_CFG_LPM4_T; + +typedef struct dpp_smmu0_smmu0_cfg_lpm5_t { + ZXIC_UINT32 lpm5_rsp_mode; + ZXIC_UINT32 lpm5_baddr; +} DPP_SMMU0_SMMU0_CFG_LPM5_T; + +typedef struct dpp_smmu0_smmu0_cfg_lpm6_t { + ZXIC_UINT32 lpm6_rsp_mode; + ZXIC_UINT32 lpm6_baddr; +} DPP_SMMU0_SMMU0_CFG_LPM6_T; + +typedef struct dpp_smmu0_smmu0_cfg_lpm7_t { + ZXIC_UINT32 lpm7_rsp_mode; + ZXIC_UINT32 lpm7_baddr; +} DPP_SMMU0_SMMU0_CFG_LPM7_T; + +typedef struct dpp_smmu0_smmu0_debug_cnt_mode_t { + ZXIC_UINT32 cnt_rd_mode; + ZXIC_UINT32 cnt_overflow_mode; +} DPP_SMMU0_SMMU0_DEBUG_CNT_MODE_T; + +typedef struct dpp_smmu0_smmu0_stat_overflow_mode_t { + ZXIC_UINT32 stat_overflow_mode; +} DPP_SMMU0_SMMU0_STAT_OVERFLOW_MODE_T; + +typedef struct dpp_smmu0_smmu0_init_en_cfg_tmp_t { + ZXIC_UINT32 init_en_cfg_tmp31; + ZXIC_UINT32 init_en_cfg_tmp30; + ZXIC_UINT32 init_en_cfg_tmp29; + ZXIC_UINT32 init_en_cfg_tmp28; + ZXIC_UINT32 init_en_cfg_tmp27; + ZXIC_UINT32 init_en_cfg_tmp26; + ZXIC_UINT32 init_en_cfg_tmp25; + ZXIC_UINT32 init_en_cfg_tmp24; + ZXIC_UINT32 init_en_cfg_tmp23; + ZXIC_UINT32 init_en_cfg_tmp22; + ZXIC_UINT32 init_en_cfg_tmp21; + ZXIC_UINT32 init_en_cfg_tmp20; + ZXIC_UINT32 init_en_cfg_tmp19; + ZXIC_UINT32 init_en_cfg_tmp18; + ZXIC_UINT32 init_en_cfg_tmp17; + ZXIC_UINT32 init_en_cfg_tmp16; + ZXIC_UINT32 init_en_cfg_tmp15; + ZXIC_UINT32 init_en_cfg_tmp14; + ZXIC_UINT32 init_en_cfg_tmp13; + ZXIC_UINT32 init_en_cfg_tmp12; + ZXIC_UINT32 init_en_cfg_tmp11; + ZXIC_UINT32 init_en_cfg_tmp10; + ZXIC_UINT32 init_en_cfg_tmp9; + ZXIC_UINT32 init_en_cfg_tmp8; + ZXIC_UINT32 init_en_cfg_tmp7; + ZXIC_UINT32 init_en_cfg_tmp6; + ZXIC_UINT32 init_en_cfg_tmp5; + ZXIC_UINT32 init_en_cfg_tmp4; + ZXIC_UINT32 init_en_cfg_tmp3; + ZXIC_UINT32 init_en_cfg_tmp2; + ZXIC_UINT32 init_en_cfg_tmp1; + ZXIC_UINT32 init_en_cfg_tmp0; +} DPP_SMMU0_SMMU0_INIT_EN_CFG_TMP_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int_unmask_flag_t { + ZXIC_UINT32 smmu0_int0_31_unmask_flag; +} DPP_SMMU0_SMMU0_SMMU0_INT_UNMASK_FLAG_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int0_en_t { + ZXIC_UINT32 smmu0_int0_en31; + ZXIC_UINT32 smmu0_int0_en30; + ZXIC_UINT32 smmu0_int0_en29; + ZXIC_UINT32 smmu0_int0_en28; + ZXIC_UINT32 smmu0_int0_en27; + ZXIC_UINT32 smmu0_int0_en26; + ZXIC_UINT32 smmu0_int0_en25; + ZXIC_UINT32 smmu0_int0_en24; + ZXIC_UINT32 smmu0_int0_en23; + ZXIC_UINT32 smmu0_int0_en22; + ZXIC_UINT32 smmu0_int0_en21; + ZXIC_UINT32 smmu0_int0_en20; + ZXIC_UINT32 smmu0_int0_en19; + ZXIC_UINT32 smmu0_int0_en18; + ZXIC_UINT32 smmu0_int0_en17; + ZXIC_UINT32 smmu0_int0_en16; + ZXIC_UINT32 smmu0_int0_en15; + ZXIC_UINT32 smmu0_int0_en14; + ZXIC_UINT32 smmu0_int0_en13; + ZXIC_UINT32 smmu0_int0_en12; + ZXIC_UINT32 smmu0_int0_en11; + ZXIC_UINT32 smmu0_int0_en10; + ZXIC_UINT32 smmu0_int0_en9; + ZXIC_UINT32 smmu0_int0_en8; + ZXIC_UINT32 smmu0_int0_en7; + ZXIC_UINT32 smmu0_int0_en6; + ZXIC_UINT32 smmu0_int0_en5; + ZXIC_UINT32 smmu0_int0_en4; + ZXIC_UINT32 smmu0_int0_en3; + ZXIC_UINT32 smmu0_int0_en2; + ZXIC_UINT32 smmu0_int0_en1; + ZXIC_UINT32 smmu0_int0_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT0_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int0_mask_t { + ZXIC_UINT32 smmu0_int0_mask31; + ZXIC_UINT32 smmu0_int0_mask30; + ZXIC_UINT32 smmu0_int0_mask29; + ZXIC_UINT32 smmu0_int0_mask28; + ZXIC_UINT32 smmu0_int0_mask27; + ZXIC_UINT32 smmu0_int0_mask26; + ZXIC_UINT32 smmu0_int0_mask25; + ZXIC_UINT32 smmu0_int0_mask24; + ZXIC_UINT32 smmu0_int0_mask23; + ZXIC_UINT32 smmu0_int0_mask22; + ZXIC_UINT32 smmu0_int0_mask21; + ZXIC_UINT32 smmu0_int0_mask20; + ZXIC_UINT32 smmu0_int0_mask19; + ZXIC_UINT32 smmu0_int0_mask18; + ZXIC_UINT32 smmu0_int0_mask17; + ZXIC_UINT32 smmu0_int0_mask16; + ZXIC_UINT32 smmu0_int0_mask15; + ZXIC_UINT32 smmu0_int0_mask14; + ZXIC_UINT32 smmu0_int0_mask13; + ZXIC_UINT32 smmu0_int0_mask12; + ZXIC_UINT32 smmu0_int0_mask11; + ZXIC_UINT32 smmu0_int0_mask10; + ZXIC_UINT32 smmu0_int0_mask9; + ZXIC_UINT32 smmu0_int0_mask8; + ZXIC_UINT32 smmu0_int0_mask7; + ZXIC_UINT32 smmu0_int0_mask6; + ZXIC_UINT32 smmu0_int0_mask5; + ZXIC_UINT32 smmu0_int0_mask4; + ZXIC_UINT32 smmu0_int0_mask3; + ZXIC_UINT32 smmu0_int0_mask2; + ZXIC_UINT32 smmu0_int0_mask1; + ZXIC_UINT32 smmu0_int0_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT0_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int0_status_t { + ZXIC_UINT32 smmu0_int0_status31; + ZXIC_UINT32 smmu0_int0_status30; + ZXIC_UINT32 smmu0_int0_status29; + ZXIC_UINT32 smmu0_int0_status28; + ZXIC_UINT32 smmu0_int0_status27; + ZXIC_UINT32 smmu0_int0_status26; + ZXIC_UINT32 smmu0_int0_status25; + ZXIC_UINT32 smmu0_int0_status24; + ZXIC_UINT32 smmu0_int0_status23; + ZXIC_UINT32 smmu0_int0_status22; + ZXIC_UINT32 smmu0_int0_status21; + ZXIC_UINT32 smmu0_int0_status20; + ZXIC_UINT32 smmu0_int0_status19; + ZXIC_UINT32 smmu0_int0_status18; + ZXIC_UINT32 smmu0_int0_status17; + ZXIC_UINT32 smmu0_int0_status16; + ZXIC_UINT32 smmu0_int0_status15; + ZXIC_UINT32 smmu0_int0_status14; + ZXIC_UINT32 smmu0_int0_status13; + ZXIC_UINT32 smmu0_int0_status12; + ZXIC_UINT32 smmu0_int0_status11; + ZXIC_UINT32 smmu0_int0_status10; + ZXIC_UINT32 smmu0_int0_status9; + ZXIC_UINT32 smmu0_int0_status8; + ZXIC_UINT32 smmu0_int0_status7; + ZXIC_UINT32 smmu0_int0_status6; + ZXIC_UINT32 smmu0_int0_status5; + ZXIC_UINT32 smmu0_int0_status4; + ZXIC_UINT32 smmu0_int0_status3; + ZXIC_UINT32 smmu0_int0_status2; + ZXIC_UINT32 smmu0_int0_status1; + ZXIC_UINT32 smmu0_int0_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT0_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int1_en_t { + ZXIC_UINT32 smmu0_int1_en31; + ZXIC_UINT32 smmu0_int1_en30; + ZXIC_UINT32 smmu0_int1_en29; + ZXIC_UINT32 smmu0_int1_en28; + ZXIC_UINT32 smmu0_int1_en27; + ZXIC_UINT32 smmu0_int1_en26; + ZXIC_UINT32 smmu0_int1_en25; + ZXIC_UINT32 smmu0_int1_en24; + ZXIC_UINT32 smmu0_int1_en23; + ZXIC_UINT32 smmu0_int1_en22; + ZXIC_UINT32 smmu0_int1_en21; + ZXIC_UINT32 smmu0_int1_en20; + ZXIC_UINT32 smmu0_int1_en19; + ZXIC_UINT32 smmu0_int1_en18; + ZXIC_UINT32 smmu0_int1_en17; + ZXIC_UINT32 smmu0_int1_en16; + ZXIC_UINT32 smmu0_int1_en15; + ZXIC_UINT32 smmu0_int1_en14; + ZXIC_UINT32 smmu0_int1_en13; + ZXIC_UINT32 smmu0_int1_en12; + ZXIC_UINT32 smmu0_int1_en11; + ZXIC_UINT32 smmu0_int1_en10; + ZXIC_UINT32 smmu0_int1_en9; + ZXIC_UINT32 smmu0_int1_en8; + ZXIC_UINT32 smmu0_int1_en7; + ZXIC_UINT32 smmu0_int1_en6; + ZXIC_UINT32 smmu0_int1_en5; + ZXIC_UINT32 smmu0_int1_en4; + ZXIC_UINT32 smmu0_int1_en3; + ZXIC_UINT32 smmu0_int1_en2; + ZXIC_UINT32 smmu0_int1_en1; + ZXIC_UINT32 smmu0_int1_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT1_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int1_mask_t { + ZXIC_UINT32 smmu0_int1_mask31; + ZXIC_UINT32 smmu0_int1_mask30; + ZXIC_UINT32 smmu0_int1_mask29; + ZXIC_UINT32 smmu0_int1_mask28; + ZXIC_UINT32 smmu0_int1_mask27; + ZXIC_UINT32 smmu0_int1_mask26; + ZXIC_UINT32 smmu0_int1_mask25; + ZXIC_UINT32 smmu0_int1_mask24; + ZXIC_UINT32 smmu0_int1_mask23; + ZXIC_UINT32 smmu0_int1_mask22; + ZXIC_UINT32 smmu0_int1_mask21; + ZXIC_UINT32 smmu0_int1_mask20; + ZXIC_UINT32 smmu0_int1_mask19; + ZXIC_UINT32 smmu0_int1_mask18; + ZXIC_UINT32 smmu0_int1_mask17; + ZXIC_UINT32 smmu0_int1_mask16; + ZXIC_UINT32 smmu0_int1_mask15; + ZXIC_UINT32 smmu0_int1_mask14; + ZXIC_UINT32 smmu0_int1_mask13; + ZXIC_UINT32 smmu0_int1_mask12; + ZXIC_UINT32 smmu0_int1_mask11; + ZXIC_UINT32 smmu0_int1_mask10; + ZXIC_UINT32 smmu0_int1_mask9; + ZXIC_UINT32 smmu0_int1_mask8; + ZXIC_UINT32 smmu0_int1_mask7; + ZXIC_UINT32 smmu0_int1_mask6; + ZXIC_UINT32 smmu0_int1_mask5; + ZXIC_UINT32 smmu0_int1_mask4; + ZXIC_UINT32 smmu0_int1_mask3; + ZXIC_UINT32 smmu0_int1_mask2; + ZXIC_UINT32 smmu0_int1_mask1; + ZXIC_UINT32 smmu0_int1_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT1_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int1_status_t { + ZXIC_UINT32 smmu0_int1_status31; + ZXIC_UINT32 smmu0_int1_status30; + ZXIC_UINT32 smmu0_int1_status29; + ZXIC_UINT32 smmu0_int1_status28; + ZXIC_UINT32 smmu0_int1_status27; + ZXIC_UINT32 smmu0_int1_status26; + ZXIC_UINT32 smmu0_int1_status25; + ZXIC_UINT32 smmu0_int1_status24; + ZXIC_UINT32 smmu0_int1_status23; + ZXIC_UINT32 smmu0_int1_status22; + ZXIC_UINT32 smmu0_int1_status21; + ZXIC_UINT32 smmu0_int1_status20; + ZXIC_UINT32 smmu0_int1_status19; + ZXIC_UINT32 smmu0_int1_status18; + ZXIC_UINT32 smmu0_int1_status17; + ZXIC_UINT32 smmu0_int1_status16; + ZXIC_UINT32 smmu0_int1_status15; + ZXIC_UINT32 smmu0_int1_status14; + ZXIC_UINT32 smmu0_int1_status13; + ZXIC_UINT32 smmu0_int1_status12; + ZXIC_UINT32 smmu0_int1_status11; + ZXIC_UINT32 smmu0_int1_status10; + ZXIC_UINT32 smmu0_int1_status9; + ZXIC_UINT32 smmu0_int1_status8; + ZXIC_UINT32 smmu0_int1_status7; + ZXIC_UINT32 smmu0_int1_status6; + ZXIC_UINT32 smmu0_int1_status5; + ZXIC_UINT32 smmu0_int1_status4; + ZXIC_UINT32 smmu0_int1_status3; + ZXIC_UINT32 smmu0_int1_status2; + ZXIC_UINT32 smmu0_int1_status1; + ZXIC_UINT32 smmu0_int1_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT1_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int2_en_t { + ZXIC_UINT32 smmu0_int2_en31; + ZXIC_UINT32 smmu0_int2_en30; + ZXIC_UINT32 smmu0_int2_en29; + ZXIC_UINT32 smmu0_int2_en28; + ZXIC_UINT32 smmu0_int2_en27; + ZXIC_UINT32 smmu0_int2_en26; + ZXIC_UINT32 smmu0_int2_en25; + ZXIC_UINT32 smmu0_int2_en24; + ZXIC_UINT32 smmu0_int2_en23; + ZXIC_UINT32 smmu0_int2_en22; + ZXIC_UINT32 smmu0_int2_en21; + ZXIC_UINT32 smmu0_int2_en20; + ZXIC_UINT32 smmu0_int2_en19; + ZXIC_UINT32 smmu0_int2_en18; + ZXIC_UINT32 smmu0_int2_en17; + ZXIC_UINT32 smmu0_int2_en16; + ZXIC_UINT32 smmu0_int2_en15; + ZXIC_UINT32 smmu0_int2_en14; + ZXIC_UINT32 smmu0_int2_en13; + ZXIC_UINT32 smmu0_int2_en12; + ZXIC_UINT32 smmu0_int2_en11; + ZXIC_UINT32 smmu0_int2_en10; + ZXIC_UINT32 smmu0_int2_en9; + ZXIC_UINT32 smmu0_int2_en8; + ZXIC_UINT32 smmu0_int2_en7; + ZXIC_UINT32 smmu0_int2_en6; + ZXIC_UINT32 smmu0_int2_en5; + ZXIC_UINT32 smmu0_int2_en4; + ZXIC_UINT32 smmu0_int2_en3; + ZXIC_UINT32 smmu0_int2_en2; + ZXIC_UINT32 smmu0_int2_en1; + ZXIC_UINT32 smmu0_int2_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT2_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int2_mask_t { + ZXIC_UINT32 smmu0_int2_mask31; + ZXIC_UINT32 smmu0_int2_mask30; + ZXIC_UINT32 smmu0_int2_mask29; + ZXIC_UINT32 smmu0_int2_mask28; + ZXIC_UINT32 smmu0_int2_mask27; + ZXIC_UINT32 smmu0_int2_mask26; + ZXIC_UINT32 smmu0_int2_mask25; + ZXIC_UINT32 smmu0_int2_mask24; + ZXIC_UINT32 smmu0_int2_mask23; + ZXIC_UINT32 smmu0_int2_mask22; + ZXIC_UINT32 smmu0_int2_mask21; + ZXIC_UINT32 smmu0_int2_mask20; + ZXIC_UINT32 smmu0_int2_mask19; + ZXIC_UINT32 smmu0_int2_mask18; + ZXIC_UINT32 smmu0_int2_mask17; + ZXIC_UINT32 smmu0_int2_mask16; + ZXIC_UINT32 smmu0_int2_mask15; + ZXIC_UINT32 smmu0_int2_mask14; + ZXIC_UINT32 smmu0_int2_mask13; + ZXIC_UINT32 smmu0_int2_mask12; + ZXIC_UINT32 smmu0_int2_mask11; + ZXIC_UINT32 smmu0_int2_mask10; + ZXIC_UINT32 smmu0_int2_mask9; + ZXIC_UINT32 smmu0_int2_mask8; + ZXIC_UINT32 smmu0_int2_mask7; + ZXIC_UINT32 smmu0_int2_mask6; + ZXIC_UINT32 smmu0_int2_mask5; + ZXIC_UINT32 smmu0_int2_mask4; + ZXIC_UINT32 smmu0_int2_mask3; + ZXIC_UINT32 smmu0_int2_mask2; + ZXIC_UINT32 smmu0_int2_mask1; + ZXIC_UINT32 smmu0_int2_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT2_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int2_status_t { + ZXIC_UINT32 smmu0_int2_status31; + ZXIC_UINT32 smmu0_int2_status30; + ZXIC_UINT32 smmu0_int2_status29; + ZXIC_UINT32 smmu0_int2_status28; + ZXIC_UINT32 smmu0_int2_status27; + ZXIC_UINT32 smmu0_int2_status26; + ZXIC_UINT32 smmu0_int2_status25; + ZXIC_UINT32 smmu0_int2_status24; + ZXIC_UINT32 smmu0_int2_status23; + ZXIC_UINT32 smmu0_int2_status22; + ZXIC_UINT32 smmu0_int2_status21; + ZXIC_UINT32 smmu0_int2_status20; + ZXIC_UINT32 smmu0_int2_status19; + ZXIC_UINT32 smmu0_int2_status18; + ZXIC_UINT32 smmu0_int2_status17; + ZXIC_UINT32 smmu0_int2_status16; + ZXIC_UINT32 smmu0_int2_status15; + ZXIC_UINT32 smmu0_int2_status14; + ZXIC_UINT32 smmu0_int2_status13; + ZXIC_UINT32 smmu0_int2_status12; + ZXIC_UINT32 smmu0_int2_status11; + ZXIC_UINT32 smmu0_int2_status10; + ZXIC_UINT32 smmu0_int2_status9; + ZXIC_UINT32 smmu0_int2_status8; + ZXIC_UINT32 smmu0_int2_status7; + ZXIC_UINT32 smmu0_int2_status6; + ZXIC_UINT32 smmu0_int2_status5; + ZXIC_UINT32 smmu0_int2_status4; + ZXIC_UINT32 smmu0_int2_status3; + ZXIC_UINT32 smmu0_int2_status2; + ZXIC_UINT32 smmu0_int2_status1; + ZXIC_UINT32 smmu0_int2_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT2_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int3_en_t { + ZXIC_UINT32 smmu0_int3_en31; + ZXIC_UINT32 smmu0_int3_en30; + ZXIC_UINT32 smmu0_int3_en29; + ZXIC_UINT32 smmu0_int3_en28; + ZXIC_UINT32 smmu0_int3_en27; + ZXIC_UINT32 smmu0_int3_en26; + ZXIC_UINT32 smmu0_int3_en25; + ZXIC_UINT32 smmu0_int3_en24; + ZXIC_UINT32 smmu0_int3_en23; + ZXIC_UINT32 smmu0_int3_en22; + ZXIC_UINT32 smmu0_int3_en21; + ZXIC_UINT32 smmu0_int3_en20; + ZXIC_UINT32 smmu0_int3_en19; + ZXIC_UINT32 smmu0_int3_en18; + ZXIC_UINT32 smmu0_int3_en17; + ZXIC_UINT32 smmu0_int3_en16; + ZXIC_UINT32 smmu0_int3_en15; + ZXIC_UINT32 smmu0_int3_en14; + ZXIC_UINT32 smmu0_int3_en13; + ZXIC_UINT32 smmu0_int3_en12; + ZXIC_UINT32 smmu0_int3_en11; + ZXIC_UINT32 smmu0_int3_en10; + ZXIC_UINT32 smmu0_int3_en9; + ZXIC_UINT32 smmu0_int3_en8; + ZXIC_UINT32 smmu0_int3_en7; + ZXIC_UINT32 smmu0_int3_en6; + ZXIC_UINT32 smmu0_int3_en5; + ZXIC_UINT32 smmu0_int3_en4; + ZXIC_UINT32 smmu0_int3_en3; + ZXIC_UINT32 smmu0_int3_en2; + ZXIC_UINT32 smmu0_int3_en1; + ZXIC_UINT32 smmu0_int3_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT3_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int3_mask_t { + ZXIC_UINT32 smmu0_int3_mask31; + ZXIC_UINT32 smmu0_int3_mask30; + ZXIC_UINT32 smmu0_int3_mask29; + ZXIC_UINT32 smmu0_int3_mask28; + ZXIC_UINT32 smmu0_int3_mask27; + ZXIC_UINT32 smmu0_int3_mask26; + ZXIC_UINT32 smmu0_int3_mask25; + ZXIC_UINT32 smmu0_int3_mask24; + ZXIC_UINT32 smmu0_int3_mask23; + ZXIC_UINT32 smmu0_int3_mask22; + ZXIC_UINT32 smmu0_int3_mask21; + ZXIC_UINT32 smmu0_int3_mask20; + ZXIC_UINT32 smmu0_int3_mask19; + ZXIC_UINT32 smmu0_int3_mask18; + ZXIC_UINT32 smmu0_int3_mask17; + ZXIC_UINT32 smmu0_int3_mask16; + ZXIC_UINT32 smmu0_int3_mask15; + ZXIC_UINT32 smmu0_int3_mask14; + ZXIC_UINT32 smmu0_int3_mask13; + ZXIC_UINT32 smmu0_int3_mask12; + ZXIC_UINT32 smmu0_int3_mask11; + ZXIC_UINT32 smmu0_int3_mask10; + ZXIC_UINT32 smmu0_int3_mask9; + ZXIC_UINT32 smmu0_int3_mask8; + ZXIC_UINT32 smmu0_int3_mask7; + ZXIC_UINT32 smmu0_int3_mask6; + ZXIC_UINT32 smmu0_int3_mask5; + ZXIC_UINT32 smmu0_int3_mask4; + ZXIC_UINT32 smmu0_int3_mask3; + ZXIC_UINT32 smmu0_int3_mask2; + ZXIC_UINT32 smmu0_int3_mask1; + ZXIC_UINT32 smmu0_int3_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT3_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int3_status_t { + ZXIC_UINT32 smmu0_int3_status31; + ZXIC_UINT32 smmu0_int3_status30; + ZXIC_UINT32 smmu0_int3_status29; + ZXIC_UINT32 smmu0_int3_status28; + ZXIC_UINT32 smmu0_int3_status27; + ZXIC_UINT32 smmu0_int3_status26; + ZXIC_UINT32 smmu0_int3_status25; + ZXIC_UINT32 smmu0_int3_status24; + ZXIC_UINT32 smmu0_int3_status23; + ZXIC_UINT32 smmu0_int3_status22; + ZXIC_UINT32 smmu0_int3_status21; + ZXIC_UINT32 smmu0_int3_status20; + ZXIC_UINT32 smmu0_int3_status19; + ZXIC_UINT32 smmu0_int3_status18; + ZXIC_UINT32 smmu0_int3_status17; + ZXIC_UINT32 smmu0_int3_status16; + ZXIC_UINT32 smmu0_int3_status15; + ZXIC_UINT32 smmu0_int3_status14; + ZXIC_UINT32 smmu0_int3_status13; + ZXIC_UINT32 smmu0_int3_status12; + ZXIC_UINT32 smmu0_int3_status11; + ZXIC_UINT32 smmu0_int3_status10; + ZXIC_UINT32 smmu0_int3_status9; + ZXIC_UINT32 smmu0_int3_status8; + ZXIC_UINT32 smmu0_int3_status7; + ZXIC_UINT32 smmu0_int3_status6; + ZXIC_UINT32 smmu0_int3_status5; + ZXIC_UINT32 smmu0_int3_status4; + ZXIC_UINT32 smmu0_int3_status3; + ZXIC_UINT32 smmu0_int3_status2; + ZXIC_UINT32 smmu0_int3_status1; + ZXIC_UINT32 smmu0_int3_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT3_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int4_en_t { + ZXIC_UINT32 smmu0_int4_en31; + ZXIC_UINT32 smmu0_int4_en30; + ZXIC_UINT32 smmu0_int4_en29; + ZXIC_UINT32 smmu0_int4_en28; + ZXIC_UINT32 smmu0_int4_en27; + ZXIC_UINT32 smmu0_int4_en26; + ZXIC_UINT32 smmu0_int4_en25; + ZXIC_UINT32 smmu0_int4_en24; + ZXIC_UINT32 smmu0_int4_en23; + ZXIC_UINT32 smmu0_int4_en22; + ZXIC_UINT32 smmu0_int4_en21; + ZXIC_UINT32 smmu0_int4_en20; + ZXIC_UINT32 smmu0_int4_en19; + ZXIC_UINT32 smmu0_int4_en18; + ZXIC_UINT32 smmu0_int4_en17; + ZXIC_UINT32 smmu0_int4_en16; + ZXIC_UINT32 smmu0_int4_en15; + ZXIC_UINT32 smmu0_int4_en14; + ZXIC_UINT32 smmu0_int4_en13; + ZXIC_UINT32 smmu0_int4_en12; + ZXIC_UINT32 smmu0_int4_en11; + ZXIC_UINT32 smmu0_int4_en10; + ZXIC_UINT32 smmu0_int4_en9; + ZXIC_UINT32 smmu0_int4_en8; + ZXIC_UINT32 smmu0_int4_en7; + ZXIC_UINT32 smmu0_int4_en6; + ZXIC_UINT32 smmu0_int4_en5; + ZXIC_UINT32 smmu0_int4_en4; + ZXIC_UINT32 smmu0_int4_en3; + ZXIC_UINT32 smmu0_int4_en2; + ZXIC_UINT32 smmu0_int4_en1; + ZXIC_UINT32 smmu0_int4_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT4_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int4_mask_t { + ZXIC_UINT32 smmu0_int4_mask31; + ZXIC_UINT32 smmu0_int4_mask30; + ZXIC_UINT32 smmu0_int4_mask29; + ZXIC_UINT32 smmu0_int4_mask28; + ZXIC_UINT32 smmu0_int4_mask27; + ZXIC_UINT32 smmu0_int4_mask26; + ZXIC_UINT32 smmu0_int4_mask25; + ZXIC_UINT32 smmu0_int4_mask24; + ZXIC_UINT32 smmu0_int4_mask23; + ZXIC_UINT32 smmu0_int4_mask22; + ZXIC_UINT32 smmu0_int4_mask21; + ZXIC_UINT32 smmu0_int4_mask20; + ZXIC_UINT32 smmu0_int4_mask19; + ZXIC_UINT32 smmu0_int4_mask18; + ZXIC_UINT32 smmu0_int4_mask17; + ZXIC_UINT32 smmu0_int4_mask16; + ZXIC_UINT32 smmu0_int4_mask15; + ZXIC_UINT32 smmu0_int4_mask14; + ZXIC_UINT32 smmu0_int4_mask13; + ZXIC_UINT32 smmu0_int4_mask12; + ZXIC_UINT32 smmu0_int4_mask11; + ZXIC_UINT32 smmu0_int4_mask10; + ZXIC_UINT32 smmu0_int4_mask9; + ZXIC_UINT32 smmu0_int4_mask8; + ZXIC_UINT32 smmu0_int4_mask7; + ZXIC_UINT32 smmu0_int4_mask6; + ZXIC_UINT32 smmu0_int4_mask5; + ZXIC_UINT32 smmu0_int4_mask4; + ZXIC_UINT32 smmu0_int4_mask3; + ZXIC_UINT32 smmu0_int4_mask2; + ZXIC_UINT32 smmu0_int4_mask1; + ZXIC_UINT32 smmu0_int4_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT4_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int4_status_t { + ZXIC_UINT32 smmu0_int4_status31; + ZXIC_UINT32 smmu0_int4_status30; + ZXIC_UINT32 smmu0_int4_status29; + ZXIC_UINT32 smmu0_int4_status28; + ZXIC_UINT32 smmu0_int4_status27; + ZXIC_UINT32 smmu0_int4_status26; + ZXIC_UINT32 smmu0_int4_status25; + ZXIC_UINT32 smmu0_int4_status24; + ZXIC_UINT32 smmu0_int4_status23; + ZXIC_UINT32 smmu0_int4_status22; + ZXIC_UINT32 smmu0_int4_status21; + ZXIC_UINT32 smmu0_int4_status20; + ZXIC_UINT32 smmu0_int4_status19; + ZXIC_UINT32 smmu0_int4_status18; + ZXIC_UINT32 smmu0_int4_status17; + ZXIC_UINT32 smmu0_int4_status16; + ZXIC_UINT32 smmu0_int4_status15; + ZXIC_UINT32 smmu0_int4_status14; + ZXIC_UINT32 smmu0_int4_status13; + ZXIC_UINT32 smmu0_int4_status12; + ZXIC_UINT32 smmu0_int4_status11; + ZXIC_UINT32 smmu0_int4_status10; + ZXIC_UINT32 smmu0_int4_status9; + ZXIC_UINT32 smmu0_int4_status8; + ZXIC_UINT32 smmu0_int4_status7; + ZXIC_UINT32 smmu0_int4_status6; + ZXIC_UINT32 smmu0_int4_status5; + ZXIC_UINT32 smmu0_int4_status4; + ZXIC_UINT32 smmu0_int4_status3; + ZXIC_UINT32 smmu0_int4_status2; + ZXIC_UINT32 smmu0_int4_status1; + ZXIC_UINT32 smmu0_int4_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT4_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int5_en_t { + ZXIC_UINT32 smmu0_int5_en31; + ZXIC_UINT32 smmu0_int5_en30; + ZXIC_UINT32 smmu0_int5_en29; + ZXIC_UINT32 smmu0_int5_en28; + ZXIC_UINT32 smmu0_int5_en27; + ZXIC_UINT32 smmu0_int5_en26; + ZXIC_UINT32 smmu0_int5_en25; + ZXIC_UINT32 smmu0_int5_en24; + ZXIC_UINT32 smmu0_int5_en23; + ZXIC_UINT32 smmu0_int5_en22; + ZXIC_UINT32 smmu0_int5_en21; + ZXIC_UINT32 smmu0_int5_en20; + ZXIC_UINT32 smmu0_int5_en19; + ZXIC_UINT32 smmu0_int5_en18; + ZXIC_UINT32 smmu0_int5_en17; + ZXIC_UINT32 smmu0_int5_en16; + ZXIC_UINT32 smmu0_int5_en15; + ZXIC_UINT32 smmu0_int5_en14; + ZXIC_UINT32 smmu0_int5_en13; + ZXIC_UINT32 smmu0_int5_en12; + ZXIC_UINT32 smmu0_int5_en11; + ZXIC_UINT32 smmu0_int5_en10; + ZXIC_UINT32 smmu0_int5_en9; + ZXIC_UINT32 smmu0_int5_en8; + ZXIC_UINT32 smmu0_int5_en7; + ZXIC_UINT32 smmu0_int5_en6; + ZXIC_UINT32 smmu0_int5_en5; + ZXIC_UINT32 smmu0_int5_en4; + ZXIC_UINT32 smmu0_int5_en3; + ZXIC_UINT32 smmu0_int5_en2; + ZXIC_UINT32 smmu0_int5_en1; + ZXIC_UINT32 smmu0_int5_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT5_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int5_mask_t { + ZXIC_UINT32 smmu0_int5_mask31; + ZXIC_UINT32 smmu0_int5_mask30; + ZXIC_UINT32 smmu0_int5_mask29; + ZXIC_UINT32 smmu0_int5_mask28; + ZXIC_UINT32 smmu0_int5_mask27; + ZXIC_UINT32 smmu0_int5_mask26; + ZXIC_UINT32 smmu0_int5_mask25; + ZXIC_UINT32 smmu0_int5_mask24; + ZXIC_UINT32 smmu0_int5_mask23; + ZXIC_UINT32 smmu0_int5_mask22; + ZXIC_UINT32 smmu0_int5_mask21; + ZXIC_UINT32 smmu0_int5_mask20; + ZXIC_UINT32 smmu0_int5_mask19; + ZXIC_UINT32 smmu0_int5_mask18; + ZXIC_UINT32 smmu0_int5_mask17; + ZXIC_UINT32 smmu0_int5_mask16; + ZXIC_UINT32 smmu0_int5_mask15; + ZXIC_UINT32 smmu0_int5_mask14; + ZXIC_UINT32 smmu0_int5_mask13; + ZXIC_UINT32 smmu0_int5_mask12; + ZXIC_UINT32 smmu0_int5_mask11; + ZXIC_UINT32 smmu0_int5_mask10; + ZXIC_UINT32 smmu0_int5_mask9; + ZXIC_UINT32 smmu0_int5_mask8; + ZXIC_UINT32 smmu0_int5_mask7; + ZXIC_UINT32 smmu0_int5_mask6; + ZXIC_UINT32 smmu0_int5_mask5; + ZXIC_UINT32 smmu0_int5_mask4; + ZXIC_UINT32 smmu0_int5_mask3; + ZXIC_UINT32 smmu0_int5_mask2; + ZXIC_UINT32 smmu0_int5_mask1; + ZXIC_UINT32 smmu0_int5_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT5_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int5_status_t { + ZXIC_UINT32 smmu0_int5_status31; + ZXIC_UINT32 smmu0_int5_status30; + ZXIC_UINT32 smmu0_int5_status29; + ZXIC_UINT32 smmu0_int5_status28; + ZXIC_UINT32 smmu0_int5_status27; + ZXIC_UINT32 smmu0_int5_status26; + ZXIC_UINT32 smmu0_int5_status25; + ZXIC_UINT32 smmu0_int5_status24; + ZXIC_UINT32 smmu0_int5_status23; + ZXIC_UINT32 smmu0_int5_status22; + ZXIC_UINT32 smmu0_int5_status21; + ZXIC_UINT32 smmu0_int5_status20; + ZXIC_UINT32 smmu0_int5_status19; + ZXIC_UINT32 smmu0_int5_status18; + ZXIC_UINT32 smmu0_int5_status17; + ZXIC_UINT32 smmu0_int5_status16; + ZXIC_UINT32 smmu0_int5_status15; + ZXIC_UINT32 smmu0_int5_status14; + ZXIC_UINT32 smmu0_int5_status13; + ZXIC_UINT32 smmu0_int5_status12; + ZXIC_UINT32 smmu0_int5_status11; + ZXIC_UINT32 smmu0_int5_status10; + ZXIC_UINT32 smmu0_int5_status9; + ZXIC_UINT32 smmu0_int5_status8; + ZXIC_UINT32 smmu0_int5_status7; + ZXIC_UINT32 smmu0_int5_status6; + ZXIC_UINT32 smmu0_int5_status5; + ZXIC_UINT32 smmu0_int5_status4; + ZXIC_UINT32 smmu0_int5_status3; + ZXIC_UINT32 smmu0_int5_status2; + ZXIC_UINT32 smmu0_int5_status1; + ZXIC_UINT32 smmu0_int5_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT5_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int6_en_t { + ZXIC_UINT32 smmu0_int6_en31; + ZXIC_UINT32 smmu0_int6_en30; + ZXIC_UINT32 smmu0_int6_en29; + ZXIC_UINT32 smmu0_int6_en28; + ZXIC_UINT32 smmu0_int6_en27; + ZXIC_UINT32 smmu0_int6_en26; + ZXIC_UINT32 smmu0_int6_en25; + ZXIC_UINT32 smmu0_int6_en24; + ZXIC_UINT32 smmu0_int6_en23; + ZXIC_UINT32 smmu0_int6_en22; + ZXIC_UINT32 smmu0_int6_en21; + ZXIC_UINT32 smmu0_int6_en20; + ZXIC_UINT32 smmu0_int6_en19; + ZXIC_UINT32 smmu0_int6_en18; + ZXIC_UINT32 smmu0_int6_en17; + ZXIC_UINT32 smmu0_int6_en16; + ZXIC_UINT32 smmu0_int6_en15; + ZXIC_UINT32 smmu0_int6_en14; + ZXIC_UINT32 smmu0_int6_en13; + ZXIC_UINT32 smmu0_int6_en12; + ZXIC_UINT32 smmu0_int6_en11; + ZXIC_UINT32 smmu0_int6_en10; + ZXIC_UINT32 smmu0_int6_en9; + ZXIC_UINT32 smmu0_int6_en8; + ZXIC_UINT32 smmu0_int6_en7; + ZXIC_UINT32 smmu0_int6_en6; + ZXIC_UINT32 smmu0_int6_en5; + ZXIC_UINT32 smmu0_int6_en4; + ZXIC_UINT32 smmu0_int6_en3; + ZXIC_UINT32 smmu0_int6_en2; + ZXIC_UINT32 smmu0_int6_en1; + ZXIC_UINT32 smmu0_int6_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT6_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int6_mask_t { + ZXIC_UINT32 smmu0_int6_mask31; + ZXIC_UINT32 smmu0_int6_mask30; + ZXIC_UINT32 smmu0_int6_mask29; + ZXIC_UINT32 smmu0_int6_mask28; + ZXIC_UINT32 smmu0_int6_mask27; + ZXIC_UINT32 smmu0_int6_mask26; + ZXIC_UINT32 smmu0_int6_mask25; + ZXIC_UINT32 smmu0_int6_mask24; + ZXIC_UINT32 smmu0_int6_mask23; + ZXIC_UINT32 smmu0_int6_mask22; + ZXIC_UINT32 smmu0_int6_mask21; + ZXIC_UINT32 smmu0_int6_mask20; + ZXIC_UINT32 smmu0_int6_mask19; + ZXIC_UINT32 smmu0_int6_mask18; + ZXIC_UINT32 smmu0_int6_mask17; + ZXIC_UINT32 smmu0_int6_mask16; + ZXIC_UINT32 smmu0_int6_mask15; + ZXIC_UINT32 smmu0_int6_mask14; + ZXIC_UINT32 smmu0_int6_mask13; + ZXIC_UINT32 smmu0_int6_mask12; + ZXIC_UINT32 smmu0_int6_mask11; + ZXIC_UINT32 smmu0_int6_mask10; + ZXIC_UINT32 smmu0_int6_mask9; + ZXIC_UINT32 smmu0_int6_mask8; + ZXIC_UINT32 smmu0_int6_mask7; + ZXIC_UINT32 smmu0_int6_mask6; + ZXIC_UINT32 smmu0_int6_mask5; + ZXIC_UINT32 smmu0_int6_mask4; + ZXIC_UINT32 smmu0_int6_mask3; + ZXIC_UINT32 smmu0_int6_mask2; + ZXIC_UINT32 smmu0_int6_mask1; + ZXIC_UINT32 smmu0_int6_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT6_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int6_status_t { + ZXIC_UINT32 smmu0_int6_status31; + ZXIC_UINT32 smmu0_int6_status30; + ZXIC_UINT32 smmu0_int6_status29; + ZXIC_UINT32 smmu0_int6_status28; + ZXIC_UINT32 smmu0_int6_status27; + ZXIC_UINT32 smmu0_int6_status26; + ZXIC_UINT32 smmu0_int6_status25; + ZXIC_UINT32 smmu0_int6_status24; + ZXIC_UINT32 smmu0_int6_status23; + ZXIC_UINT32 smmu0_int6_status22; + ZXIC_UINT32 smmu0_int6_status21; + ZXIC_UINT32 smmu0_int6_status20; + ZXIC_UINT32 smmu0_int6_status19; + ZXIC_UINT32 smmu0_int6_status18; + ZXIC_UINT32 smmu0_int6_status17; + ZXIC_UINT32 smmu0_int6_status16; + ZXIC_UINT32 smmu0_int6_status15; + ZXIC_UINT32 smmu0_int6_status14; + ZXIC_UINT32 smmu0_int6_status13; + ZXIC_UINT32 smmu0_int6_status12; + ZXIC_UINT32 smmu0_int6_status11; + ZXIC_UINT32 smmu0_int6_status10; + ZXIC_UINT32 smmu0_int6_status9; + ZXIC_UINT32 smmu0_int6_status8; + ZXIC_UINT32 smmu0_int6_status7; + ZXIC_UINT32 smmu0_int6_status6; + ZXIC_UINT32 smmu0_int6_status5; + ZXIC_UINT32 smmu0_int6_status4; + ZXIC_UINT32 smmu0_int6_status3; + ZXIC_UINT32 smmu0_int6_status2; + ZXIC_UINT32 smmu0_int6_status1; + ZXIC_UINT32 smmu0_int6_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT6_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int7_en_t { + ZXIC_UINT32 smmu0_int7_en31; + ZXIC_UINT32 smmu0_int7_en30; + ZXIC_UINT32 smmu0_int7_en29; + ZXIC_UINT32 smmu0_int7_en28; + ZXIC_UINT32 smmu0_int7_en27; + ZXIC_UINT32 smmu0_int7_en26; + ZXIC_UINT32 smmu0_int7_en25; + ZXIC_UINT32 smmu0_int7_en24; + ZXIC_UINT32 smmu0_int7_en23; + ZXIC_UINT32 smmu0_int7_en22; + ZXIC_UINT32 smmu0_int7_en21; + ZXIC_UINT32 smmu0_int7_en20; + ZXIC_UINT32 smmu0_int7_en19; + ZXIC_UINT32 smmu0_int7_en18; + ZXIC_UINT32 smmu0_int7_en17; + ZXIC_UINT32 smmu0_int7_en16; + ZXIC_UINT32 smmu0_int7_en15; + ZXIC_UINT32 smmu0_int7_en14; + ZXIC_UINT32 smmu0_int7_en13; + ZXIC_UINT32 smmu0_int7_en12; + ZXIC_UINT32 smmu0_int7_en11; + ZXIC_UINT32 smmu0_int7_en10; + ZXIC_UINT32 smmu0_int7_en9; + ZXIC_UINT32 smmu0_int7_en8; + ZXIC_UINT32 smmu0_int7_en7; + ZXIC_UINT32 smmu0_int7_en6; + ZXIC_UINT32 smmu0_int7_en5; + ZXIC_UINT32 smmu0_int7_en4; + ZXIC_UINT32 smmu0_int7_en3; + ZXIC_UINT32 smmu0_int7_en2; + ZXIC_UINT32 smmu0_int7_en1; + ZXIC_UINT32 smmu0_int7_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT7_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int7_mask_t { + ZXIC_UINT32 smmu0_int7_mask31; + ZXIC_UINT32 smmu0_int7_mask30; + ZXIC_UINT32 smmu0_int7_mask29; + ZXIC_UINT32 smmu0_int7_mask28; + ZXIC_UINT32 smmu0_int7_mask27; + ZXIC_UINT32 smmu0_int7_mask26; + ZXIC_UINT32 smmu0_int7_mask25; + ZXIC_UINT32 smmu0_int7_mask24; + ZXIC_UINT32 smmu0_int7_mask23; + ZXIC_UINT32 smmu0_int7_mask22; + ZXIC_UINT32 smmu0_int7_mask21; + ZXIC_UINT32 smmu0_int7_mask20; + ZXIC_UINT32 smmu0_int7_mask19; + ZXIC_UINT32 smmu0_int7_mask18; + ZXIC_UINT32 smmu0_int7_mask17; + ZXIC_UINT32 smmu0_int7_mask16; + ZXIC_UINT32 smmu0_int7_mask15; + ZXIC_UINT32 smmu0_int7_mask14; + ZXIC_UINT32 smmu0_int7_mask13; + ZXIC_UINT32 smmu0_int7_mask12; + ZXIC_UINT32 smmu0_int7_mask11; + ZXIC_UINT32 smmu0_int7_mask10; + ZXIC_UINT32 smmu0_int7_mask9; + ZXIC_UINT32 smmu0_int7_mask8; + ZXIC_UINT32 smmu0_int7_mask7; + ZXIC_UINT32 smmu0_int7_mask6; + ZXIC_UINT32 smmu0_int7_mask5; + ZXIC_UINT32 smmu0_int7_mask4; + ZXIC_UINT32 smmu0_int7_mask3; + ZXIC_UINT32 smmu0_int7_mask2; + ZXIC_UINT32 smmu0_int7_mask1; + ZXIC_UINT32 smmu0_int7_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT7_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int7_status_t { + ZXIC_UINT32 smmu0_int7_status31; + ZXIC_UINT32 smmu0_int7_status30; + ZXIC_UINT32 smmu0_int7_status29; + ZXIC_UINT32 smmu0_int7_status28; + ZXIC_UINT32 smmu0_int7_status27; + ZXIC_UINT32 smmu0_int7_status26; + ZXIC_UINT32 smmu0_int7_status25; + ZXIC_UINT32 smmu0_int7_status24; + ZXIC_UINT32 smmu0_int7_status23; + ZXIC_UINT32 smmu0_int7_status22; + ZXIC_UINT32 smmu0_int7_status21; + ZXIC_UINT32 smmu0_int7_status20; + ZXIC_UINT32 smmu0_int7_status19; + ZXIC_UINT32 smmu0_int7_status18; + ZXIC_UINT32 smmu0_int7_status17; + ZXIC_UINT32 smmu0_int7_status16; + ZXIC_UINT32 smmu0_int7_status15; + ZXIC_UINT32 smmu0_int7_status14; + ZXIC_UINT32 smmu0_int7_status13; + ZXIC_UINT32 smmu0_int7_status12; + ZXIC_UINT32 smmu0_int7_status11; + ZXIC_UINT32 smmu0_int7_status10; + ZXIC_UINT32 smmu0_int7_status9; + ZXIC_UINT32 smmu0_int7_status8; + ZXIC_UINT32 smmu0_int7_status7; + ZXIC_UINT32 smmu0_int7_status6; + ZXIC_UINT32 smmu0_int7_status5; + ZXIC_UINT32 smmu0_int7_status4; + ZXIC_UINT32 smmu0_int7_status3; + ZXIC_UINT32 smmu0_int7_status2; + ZXIC_UINT32 smmu0_int7_status1; + ZXIC_UINT32 smmu0_int7_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT7_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int8_en_t { + ZXIC_UINT32 smmu0_int8_en31; + ZXIC_UINT32 smmu0_int8_en30; + ZXIC_UINT32 smmu0_int8_en29; + ZXIC_UINT32 smmu0_int8_en28; + ZXIC_UINT32 smmu0_int8_en27; + ZXIC_UINT32 smmu0_int8_en26; + ZXIC_UINT32 smmu0_int8_en25; + ZXIC_UINT32 smmu0_int8_en24; + ZXIC_UINT32 smmu0_int8_en23; + ZXIC_UINT32 smmu0_int8_en22; + ZXIC_UINT32 smmu0_int8_en21; + ZXIC_UINT32 smmu0_int8_en20; + ZXIC_UINT32 smmu0_int8_en19; + ZXIC_UINT32 smmu0_int8_en18; + ZXIC_UINT32 smmu0_int8_en17; + ZXIC_UINT32 smmu0_int8_en16; + ZXIC_UINT32 smmu0_int8_en15; + ZXIC_UINT32 smmu0_int8_en14; + ZXIC_UINT32 smmu0_int8_en13; + ZXIC_UINT32 smmu0_int8_en12; + ZXIC_UINT32 smmu0_int8_en11; + ZXIC_UINT32 smmu0_int8_en10; + ZXIC_UINT32 smmu0_int8_en9; + ZXIC_UINT32 smmu0_int8_en8; + ZXIC_UINT32 smmu0_int8_en7; + ZXIC_UINT32 smmu0_int8_en6; + ZXIC_UINT32 smmu0_int8_en5; + ZXIC_UINT32 smmu0_int8_en4; + ZXIC_UINT32 smmu0_int8_en3; + ZXIC_UINT32 smmu0_int8_en2; + ZXIC_UINT32 smmu0_int8_en1; + ZXIC_UINT32 smmu0_int8_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT8_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int8_mask_t { + ZXIC_UINT32 smmu0_int8_mask31; + ZXIC_UINT32 smmu0_int8_mask30; + ZXIC_UINT32 smmu0_int8_mask29; + ZXIC_UINT32 smmu0_int8_mask28; + ZXIC_UINT32 smmu0_int8_mask27; + ZXIC_UINT32 smmu0_int8_mask26; + ZXIC_UINT32 smmu0_int8_mask25; + ZXIC_UINT32 smmu0_int8_mask24; + ZXIC_UINT32 smmu0_int8_mask23; + ZXIC_UINT32 smmu0_int8_mask22; + ZXIC_UINT32 smmu0_int8_mask21; + ZXIC_UINT32 smmu0_int8_mask20; + ZXIC_UINT32 smmu0_int8_mask19; + ZXIC_UINT32 smmu0_int8_mask18; + ZXIC_UINT32 smmu0_int8_mask17; + ZXIC_UINT32 smmu0_int8_mask16; + ZXIC_UINT32 smmu0_int8_mask15; + ZXIC_UINT32 smmu0_int8_mask14; + ZXIC_UINT32 smmu0_int8_mask13; + ZXIC_UINT32 smmu0_int8_mask12; + ZXIC_UINT32 smmu0_int8_mask11; + ZXIC_UINT32 smmu0_int8_mask10; + ZXIC_UINT32 smmu0_int8_mask9; + ZXIC_UINT32 smmu0_int8_mask8; + ZXIC_UINT32 smmu0_int8_mask7; + ZXIC_UINT32 smmu0_int8_mask6; + ZXIC_UINT32 smmu0_int8_mask5; + ZXIC_UINT32 smmu0_int8_mask4; + ZXIC_UINT32 smmu0_int8_mask3; + ZXIC_UINT32 smmu0_int8_mask2; + ZXIC_UINT32 smmu0_int8_mask1; + ZXIC_UINT32 smmu0_int8_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT8_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int8_status_t { + ZXIC_UINT32 smmu0_int8_status31; + ZXIC_UINT32 smmu0_int8_status30; + ZXIC_UINT32 smmu0_int8_status29; + ZXIC_UINT32 smmu0_int8_status28; + ZXIC_UINT32 smmu0_int8_status27; + ZXIC_UINT32 smmu0_int8_status26; + ZXIC_UINT32 smmu0_int8_status25; + ZXIC_UINT32 smmu0_int8_status24; + ZXIC_UINT32 smmu0_int8_status23; + ZXIC_UINT32 smmu0_int8_status22; + ZXIC_UINT32 smmu0_int8_status21; + ZXIC_UINT32 smmu0_int8_status20; + ZXIC_UINT32 smmu0_int8_status19; + ZXIC_UINT32 smmu0_int8_status18; + ZXIC_UINT32 smmu0_int8_status17; + ZXIC_UINT32 smmu0_int8_status16; + ZXIC_UINT32 smmu0_int8_status15; + ZXIC_UINT32 smmu0_int8_status14; + ZXIC_UINT32 smmu0_int8_status13; + ZXIC_UINT32 smmu0_int8_status12; + ZXIC_UINT32 smmu0_int8_status11; + ZXIC_UINT32 smmu0_int8_status10; + ZXIC_UINT32 smmu0_int8_status9; + ZXIC_UINT32 smmu0_int8_status8; + ZXIC_UINT32 smmu0_int8_status7; + ZXIC_UINT32 smmu0_int8_status6; + ZXIC_UINT32 smmu0_int8_status5; + ZXIC_UINT32 smmu0_int8_status4; + ZXIC_UINT32 smmu0_int8_status3; + ZXIC_UINT32 smmu0_int8_status2; + ZXIC_UINT32 smmu0_int8_status1; + ZXIC_UINT32 smmu0_int8_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT8_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int9_en_t { + ZXIC_UINT32 smmu0_int8_en31; + ZXIC_UINT32 smmu0_int8_en30; + ZXIC_UINT32 smmu0_int8_en29; + ZXIC_UINT32 smmu0_int8_en28; + ZXIC_UINT32 smmu0_int8_en27; + ZXIC_UINT32 smmu0_int8_en26; + ZXIC_UINT32 smmu0_int8_en25; + ZXIC_UINT32 smmu0_int8_en24; + ZXIC_UINT32 smmu0_int8_en23; + ZXIC_UINT32 smmu0_int8_en22; + ZXIC_UINT32 smmu0_int8_en21; + ZXIC_UINT32 smmu0_int8_en20; + ZXIC_UINT32 smmu0_int9_en19; + ZXIC_UINT32 smmu0_int9_en18; + ZXIC_UINT32 smmu0_int9_en17; + ZXIC_UINT32 smmu0_int9_en16; + ZXIC_UINT32 smmu0_int9_en15; + ZXIC_UINT32 smmu0_int9_en14; + ZXIC_UINT32 smmu0_int9_en13; + ZXIC_UINT32 smmu0_int9_en12; + ZXIC_UINT32 smmu0_int9_en11; + ZXIC_UINT32 smmu0_int9_en10; + ZXIC_UINT32 smmu0_int9_en9; + ZXIC_UINT32 smmu0_int9_en8; + ZXIC_UINT32 smmu0_int9_en7; + ZXIC_UINT32 smmu0_int9_en6; + ZXIC_UINT32 smmu0_int9_en5; + ZXIC_UINT32 smmu0_int9_en4; + ZXIC_UINT32 smmu0_int9_en3; + ZXIC_UINT32 smmu0_int9_en2; + ZXIC_UINT32 smmu0_int9_en1; + ZXIC_UINT32 smmu0_int9_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT9_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int9_mask_t { + ZXIC_UINT32 smmu0_int9_mask0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT9_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int9_status_t { + ZXIC_UINT32 smmu0_int9_status0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT9_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int10_en_t { + ZXIC_UINT32 smmu0_int10_en31; + ZXIC_UINT32 smmu0_int10_en30; + ZXIC_UINT32 smmu0_int10_en29; + ZXIC_UINT32 smmu0_int10_en28; + ZXIC_UINT32 smmu0_int10_en27; + ZXIC_UINT32 smmu0_int10_en26; + ZXIC_UINT32 smmu0_int10_en25; + ZXIC_UINT32 smmu0_int10_en24; + ZXIC_UINT32 smmu0_int10_en23; + ZXIC_UINT32 smmu0_int10_en22; + ZXIC_UINT32 smmu0_int10_en21; + ZXIC_UINT32 smmu0_int10_en20; + ZXIC_UINT32 smmu0_int10_en19; + ZXIC_UINT32 smmu0_int10_en18; + ZXIC_UINT32 smmu0_int10_en17; + ZXIC_UINT32 smmu0_int10_en16; + ZXIC_UINT32 smmu0_int10_en15; + ZXIC_UINT32 smmu0_int10_en14; + ZXIC_UINT32 smmu0_int10_en13; + ZXIC_UINT32 smmu0_int10_en12; + ZXIC_UINT32 smmu0_int10_en11; + ZXIC_UINT32 smmu0_int10_en10; + ZXIC_UINT32 smmu0_int10_en9; + ZXIC_UINT32 smmu0_int10_en8; + ZXIC_UINT32 smmu0_int10_en7; + ZXIC_UINT32 smmu0_int10_en6; + ZXIC_UINT32 smmu0_int10_en5; + ZXIC_UINT32 smmu0_int10_en4; + ZXIC_UINT32 smmu0_int10_en3; + ZXIC_UINT32 smmu0_int10_en2; + ZXIC_UINT32 smmu0_int10_en1; + ZXIC_UINT32 smmu0_int10_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT10_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int10_mask_t { + ZXIC_UINT32 smmu0_int10_mask0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT10_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int10_status_t { + ZXIC_UINT32 smmu0_int10_status0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT10_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int11_en_t { + ZXIC_UINT32 smmu0_int11_en0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT11_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int11_mask_t { + ZXIC_UINT32 smmu0_int11_mask0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT11_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int11_status_t { + ZXIC_UINT32 smmu0_int11_status0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT11_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int12_en_t { + ZXIC_UINT32 smmu0_int12_en0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT12_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int12_mask_t { + ZXIC_UINT32 smmu0_int12_mask0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT12_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int12_status_t { + ZXIC_UINT32 smmu0_int12_status0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT12_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int13_en_t { + ZXIC_UINT32 smmu0_int13_en19; + ZXIC_UINT32 smmu0_int13_en18; + ZXIC_UINT32 smmu0_int13_en17; + ZXIC_UINT32 smmu0_int13_en16; + ZXIC_UINT32 smmu0_int13_en15; + ZXIC_UINT32 smmu0_int13_en14; + ZXIC_UINT32 smmu0_int13_en13; + ZXIC_UINT32 smmu0_int13_en12; + ZXIC_UINT32 smmu0_int13_en11; + ZXIC_UINT32 smmu0_int13_en10; + ZXIC_UINT32 smmu0_int13_en9; + ZXIC_UINT32 smmu0_int13_en8; + ZXIC_UINT32 smmu0_int13_en7; + ZXIC_UINT32 smmu0_int13_en6; + ZXIC_UINT32 smmu0_int13_en5; + ZXIC_UINT32 smmu0_int13_en4; + ZXIC_UINT32 smmu0_int13_en3; + ZXIC_UINT32 smmu0_int13_en2; + ZXIC_UINT32 smmu0_int13_en1; + ZXIC_UINT32 smmu0_int13_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT13_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int13_mask_t { + ZXIC_UINT32 smmu0_int13_mask19; + ZXIC_UINT32 smmu0_int13_mask18; + ZXIC_UINT32 smmu0_int13_mask17; + ZXIC_UINT32 smmu0_int13_mask16; + ZXIC_UINT32 smmu0_int13_mask15; + ZXIC_UINT32 smmu0_int13_mask14; + ZXIC_UINT32 smmu0_int13_mask13; + ZXIC_UINT32 smmu0_int13_mask12; + ZXIC_UINT32 smmu0_int13_mask11; + ZXIC_UINT32 smmu0_int13_mask10; + ZXIC_UINT32 smmu0_int13_mask9; + ZXIC_UINT32 smmu0_int13_mask8; + ZXIC_UINT32 smmu0_int13_mask7; + ZXIC_UINT32 smmu0_int13_mask6; + ZXIC_UINT32 smmu0_int13_mask5; + ZXIC_UINT32 smmu0_int13_mask4; + ZXIC_UINT32 smmu0_int13_mask3; + ZXIC_UINT32 smmu0_int13_mask2; + ZXIC_UINT32 smmu0_int13_mask1; + ZXIC_UINT32 smmu0_int13_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT13_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int13_status_t { + ZXIC_UINT32 smmu0_int13_status19; + ZXIC_UINT32 smmu0_int13_status18; + ZXIC_UINT32 smmu0_int13_status17; + ZXIC_UINT32 smmu0_int13_status16; + ZXIC_UINT32 smmu0_int13_status15; + ZXIC_UINT32 smmu0_int13_status14; + ZXIC_UINT32 smmu0_int13_status13; + ZXIC_UINT32 smmu0_int13_status12; + ZXIC_UINT32 smmu0_int13_status11; + ZXIC_UINT32 smmu0_int13_status10; + ZXIC_UINT32 smmu0_int13_status9; + ZXIC_UINT32 smmu0_int13_status8; + ZXIC_UINT32 smmu0_int13_status7; + ZXIC_UINT32 smmu0_int13_status6; + ZXIC_UINT32 smmu0_int13_status5; + ZXIC_UINT32 smmu0_int13_status4; + ZXIC_UINT32 smmu0_int13_status3; + ZXIC_UINT32 smmu0_int13_status2; + ZXIC_UINT32 smmu0_int13_status1; + ZXIC_UINT32 smmu0_int13_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT13_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int14_en_t { + ZXIC_UINT32 smmu0_int14_en16; + ZXIC_UINT32 smmu0_int14_en15; + ZXIC_UINT32 smmu0_int14_en14; + ZXIC_UINT32 smmu0_int14_en13; + ZXIC_UINT32 smmu0_int14_en12; + ZXIC_UINT32 smmu0_int14_en11; + ZXIC_UINT32 smmu0_int14_en10; + ZXIC_UINT32 smmu0_int14_en9; + ZXIC_UINT32 smmu0_int14_en8; + ZXIC_UINT32 smmu0_int14_en7; + ZXIC_UINT32 smmu0_int14_en6; + ZXIC_UINT32 smmu0_int14_en5; + ZXIC_UINT32 smmu0_int14_en4; + ZXIC_UINT32 smmu0_int14_en3; + ZXIC_UINT32 smmu0_int14_en2; + ZXIC_UINT32 smmu0_int14_en1; + ZXIC_UINT32 smmu0_int14_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT14_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int14_mask_t { + ZXIC_UINT32 smmu0_int14_mask16; + ZXIC_UINT32 smmu0_int14_mask15; + ZXIC_UINT32 smmu0_int14_mask14; + ZXIC_UINT32 smmu0_int14_mask13; + ZXIC_UINT32 smmu0_int14_mask12; + ZXIC_UINT32 smmu0_int14_mask11; + ZXIC_UINT32 smmu0_int14_mask10; + ZXIC_UINT32 smmu0_int14_mask9; + ZXIC_UINT32 smmu0_int14_mask8; + ZXIC_UINT32 smmu0_int14_mask7; + ZXIC_UINT32 smmu0_int14_mask6; + ZXIC_UINT32 smmu0_int14_mask5; + ZXIC_UINT32 smmu0_int14_mask4; + ZXIC_UINT32 smmu0_int14_mask3; + ZXIC_UINT32 smmu0_int14_mask2; + ZXIC_UINT32 smmu0_int14_mask1; + ZXIC_UINT32 smmu0_int14_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT14_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int14_status_t { + ZXIC_UINT32 smmu0_int14_status16; + ZXIC_UINT32 smmu0_int14_status15; + ZXIC_UINT32 smmu0_int14_status14; + ZXIC_UINT32 smmu0_int14_status13; + ZXIC_UINT32 smmu0_int14_status12; + ZXIC_UINT32 smmu0_int14_status11; + ZXIC_UINT32 smmu0_int14_status10; + ZXIC_UINT32 smmu0_int14_status9; + ZXIC_UINT32 smmu0_int14_status8; + ZXIC_UINT32 smmu0_int14_status7; + ZXIC_UINT32 smmu0_int14_status6; + ZXIC_UINT32 smmu0_int14_status5; + ZXIC_UINT32 smmu0_int14_status4; + ZXIC_UINT32 smmu0_int14_status3; + ZXIC_UINT32 smmu0_int14_status2; + ZXIC_UINT32 smmu0_int14_status1; + ZXIC_UINT32 smmu0_int14_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT14_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_ecc_unmask_flag_t { + ZXIC_UINT32 smmu0_int53_unmask_flag; + ZXIC_UINT32 smmu0_int52_unmask_flag; + ZXIC_UINT32 smmu0_int51_unmask_flag; + ZXIC_UINT32 smmu0_int50_unmask_flag; + ZXIC_UINT32 smmu0_int49_unmask_flag; + ZXIC_UINT32 smmu0_int48_unmask_flag; + ZXIC_UINT32 smmu0_int47_unmask_flag; + ZXIC_UINT32 smmu0_int46_unmask_flag; + ZXIC_UINT32 smmu0_int45_unmask_flag; + ZXIC_UINT32 smmu0_int44_unmask_flag; + ZXIC_UINT32 smmu0_int43_unmask_flag; + ZXIC_UINT32 smmu0_int42_unmask_flag; + ZXIC_UINT32 smmu0_int41_unmask_flag; + ZXIC_UINT32 smmu0_int40_unmask_flag; + ZXIC_UINT32 smmu0_int39_unmask_flag; + ZXIC_UINT32 smmu0_int38_unmask_flag; + ZXIC_UINT32 smmu0_int37_unmask_flag; + ZXIC_UINT32 smmu0_int36_unmask_flag; + ZXIC_UINT32 smmu0_int35_unmask_flag; + ZXIC_UINT32 smmu0_int34_unmask_flag; + ZXIC_UINT32 smmu0_int33_unmask_flag; + ZXIC_UINT32 smmu0_int32_unmask_flag; +} DPP_SMMU0_SMMU0_SMMU0_ECC_UNMASK_FLAG_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int15_en_t { + ZXIC_UINT32 smmu0_int15_en31; + ZXIC_UINT32 smmu0_int15_en30; + ZXIC_UINT32 smmu0_int15_en29; + ZXIC_UINT32 smmu0_int15_en28; + ZXIC_UINT32 smmu0_int15_en27; + ZXIC_UINT32 smmu0_int15_en26; + ZXIC_UINT32 smmu0_int15_en25; + ZXIC_UINT32 smmu0_int15_en24; + ZXIC_UINT32 smmu0_int15_en23; + ZXIC_UINT32 smmu0_int15_en22; + ZXIC_UINT32 smmu0_int15_en21; + ZXIC_UINT32 smmu0_int15_en20; + ZXIC_UINT32 smmu0_int15_en19; + ZXIC_UINT32 smmu0_int15_en18; + ZXIC_UINT32 smmu0_int15_en17; + ZXIC_UINT32 smmu0_int15_en16; + ZXIC_UINT32 smmu0_int15_en15; + ZXIC_UINT32 smmu0_int15_en14; + ZXIC_UINT32 smmu0_int15_en13; + ZXIC_UINT32 smmu0_int15_en12; + ZXIC_UINT32 smmu0_int15_en11; + ZXIC_UINT32 smmu0_int15_en10; + ZXIC_UINT32 smmu0_int15_en9; + ZXIC_UINT32 smmu0_int15_en8; + ZXIC_UINT32 smmu0_int15_en7; + ZXIC_UINT32 smmu0_int15_en6; + ZXIC_UINT32 smmu0_int15_en5; + ZXIC_UINT32 smmu0_int15_en4; + ZXIC_UINT32 smmu0_int15_en3; + ZXIC_UINT32 smmu0_int15_en2; + ZXIC_UINT32 smmu0_int15_en1; + ZXIC_UINT32 smmu0_int15_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT15_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int15_mask_t { + ZXIC_UINT32 smmu0_int15_mask31; + ZXIC_UINT32 smmu0_int15_mask30; + ZXIC_UINT32 smmu0_int15_mask29; + ZXIC_UINT32 smmu0_int15_mask28; + ZXIC_UINT32 smmu0_int15_mask27; + ZXIC_UINT32 smmu0_int15_mask26; + ZXIC_UINT32 smmu0_int15_mask25; + ZXIC_UINT32 smmu0_int15_mask24; + ZXIC_UINT32 smmu0_int15_mask23; + ZXIC_UINT32 smmu0_int15_mask22; + ZXIC_UINT32 smmu0_int15_mask21; + ZXIC_UINT32 smmu0_int15_mask20; + ZXIC_UINT32 smmu0_int15_mask19; + ZXIC_UINT32 smmu0_int15_mask18; + ZXIC_UINT32 smmu0_int15_mask17; + ZXIC_UINT32 smmu0_int15_mask16; + ZXIC_UINT32 smmu0_int15_mask15; + ZXIC_UINT32 smmu0_int15_mask14; + ZXIC_UINT32 smmu0_int15_mask13; + ZXIC_UINT32 smmu0_int15_mask12; + ZXIC_UINT32 smmu0_int15_mask11; + ZXIC_UINT32 smmu0_int15_mask10; + ZXIC_UINT32 smmu0_int15_mask9; + ZXIC_UINT32 smmu0_int15_mask8; + ZXIC_UINT32 smmu0_int15_mask7; + ZXIC_UINT32 smmu0_int15_mask6; + ZXIC_UINT32 smmu0_int15_mask5; + ZXIC_UINT32 smmu0_int15_mask4; + ZXIC_UINT32 smmu0_int15_mask3; + ZXIC_UINT32 smmu0_int15_mask2; + ZXIC_UINT32 smmu0_int15_mask1; + ZXIC_UINT32 smmu0_int15_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT15_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int15_status_t { + ZXIC_UINT32 smmu0_int15_status31; + ZXIC_UINT32 smmu0_int15_status30; + ZXIC_UINT32 smmu0_int15_status29; + ZXIC_UINT32 smmu0_int15_status28; + ZXIC_UINT32 smmu0_int15_status27; + ZXIC_UINT32 smmu0_int15_status26; + ZXIC_UINT32 smmu0_int15_status25; + ZXIC_UINT32 smmu0_int15_status24; + ZXIC_UINT32 smmu0_int15_status23; + ZXIC_UINT32 smmu0_int15_status22; + ZXIC_UINT32 smmu0_int15_status21; + ZXIC_UINT32 smmu0_int15_status20; + ZXIC_UINT32 smmu0_int15_status19; + ZXIC_UINT32 smmu0_int15_status18; + ZXIC_UINT32 smmu0_int15_status17; + ZXIC_UINT32 smmu0_int15_status16; + ZXIC_UINT32 smmu0_int15_status15; + ZXIC_UINT32 smmu0_int15_status14; + ZXIC_UINT32 smmu0_int15_status13; + ZXIC_UINT32 smmu0_int15_status12; + ZXIC_UINT32 smmu0_int15_status11; + ZXIC_UINT32 smmu0_int15_status10; + ZXIC_UINT32 smmu0_int15_status9; + ZXIC_UINT32 smmu0_int15_status8; + ZXIC_UINT32 smmu0_int15_status7; + ZXIC_UINT32 smmu0_int15_status6; + ZXIC_UINT32 smmu0_int15_status5; + ZXIC_UINT32 smmu0_int15_status4; + ZXIC_UINT32 smmu0_int15_status3; + ZXIC_UINT32 smmu0_int15_status2; + ZXIC_UINT32 smmu0_int15_status1; + ZXIC_UINT32 smmu0_int15_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT15_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int16_en_t { + ZXIC_UINT32 smmu0_int16_en31; + ZXIC_UINT32 smmu0_int16_en30; + ZXIC_UINT32 smmu0_int16_en29; + ZXIC_UINT32 smmu0_int16_en28; + ZXIC_UINT32 smmu0_int16_en27; + ZXIC_UINT32 smmu0_int16_en26; + ZXIC_UINT32 smmu0_int16_en25; + ZXIC_UINT32 smmu0_int16_en24; + ZXIC_UINT32 smmu0_int16_en23; + ZXIC_UINT32 smmu0_int16_en22; + ZXIC_UINT32 smmu0_int16_en21; + ZXIC_UINT32 smmu0_int16_en20; + ZXIC_UINT32 smmu0_int16_en19; + ZXIC_UINT32 smmu0_int16_en18; + ZXIC_UINT32 smmu0_int16_en17; + ZXIC_UINT32 smmu0_int16_en16; + ZXIC_UINT32 smmu0_int16_en15; + ZXIC_UINT32 smmu0_int16_en14; + ZXIC_UINT32 smmu0_int16_en13; + ZXIC_UINT32 smmu0_int16_en12; + ZXIC_UINT32 smmu0_int16_en11; + ZXIC_UINT32 smmu0_int16_en10; + ZXIC_UINT32 smmu0_int16_en9; + ZXIC_UINT32 smmu0_int16_en8; + ZXIC_UINT32 smmu0_int16_en7; + ZXIC_UINT32 smmu0_int16_en6; + ZXIC_UINT32 smmu0_int16_en5; + ZXIC_UINT32 smmu0_int16_en4; + ZXIC_UINT32 smmu0_int16_en3; + ZXIC_UINT32 smmu0_int16_en2; + ZXIC_UINT32 smmu0_int16_en1; + ZXIC_UINT32 smmu0_int16_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT16_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int16_mask_t { + ZXIC_UINT32 smmu0_int16_mask31; + ZXIC_UINT32 smmu0_int16_mask30; + ZXIC_UINT32 smmu0_int16_mask29; + ZXIC_UINT32 smmu0_int16_mask28; + ZXIC_UINT32 smmu0_int16_mask27; + ZXIC_UINT32 smmu0_int16_mask26; + ZXIC_UINT32 smmu0_int16_mask25; + ZXIC_UINT32 smmu0_int16_mask24; + ZXIC_UINT32 smmu0_int16_mask23; + ZXIC_UINT32 smmu0_int16_mask22; + ZXIC_UINT32 smmu0_int16_mask21; + ZXIC_UINT32 smmu0_int16_mask20; + ZXIC_UINT32 smmu0_int16_mask19; + ZXIC_UINT32 smmu0_int16_mask18; + ZXIC_UINT32 smmu0_int16_mask17; + ZXIC_UINT32 smmu0_int16_mask16; + ZXIC_UINT32 smmu0_int16_mask15; + ZXIC_UINT32 smmu0_int16_mask14; + ZXIC_UINT32 smmu0_int16_mask13; + ZXIC_UINT32 smmu0_int16_mask12; + ZXIC_UINT32 smmu0_int16_mask11; + ZXIC_UINT32 smmu0_int16_mask10; + ZXIC_UINT32 smmu0_int16_mask9; + ZXIC_UINT32 smmu0_int16_mask8; + ZXIC_UINT32 smmu0_int16_mask7; + ZXIC_UINT32 smmu0_int16_mask6; + ZXIC_UINT32 smmu0_int16_mask5; + ZXIC_UINT32 smmu0_int16_mask4; + ZXIC_UINT32 smmu0_int16_mask3; + ZXIC_UINT32 smmu0_int16_mask2; + ZXIC_UINT32 smmu0_int16_mask1; + ZXIC_UINT32 smmu0_int16_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT16_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int16_status_t { + ZXIC_UINT32 smmu0_int16_status31; + ZXIC_UINT32 smmu0_int16_status30; + ZXIC_UINT32 smmu0_int16_status29; + ZXIC_UINT32 smmu0_int16_status28; + ZXIC_UINT32 smmu0_int16_status27; + ZXIC_UINT32 smmu0_int16_status26; + ZXIC_UINT32 smmu0_int16_status25; + ZXIC_UINT32 smmu0_int16_status24; + ZXIC_UINT32 smmu0_int16_status23; + ZXIC_UINT32 smmu0_int16_status22; + ZXIC_UINT32 smmu0_int16_status21; + ZXIC_UINT32 smmu0_int16_status20; + ZXIC_UINT32 smmu0_int16_status19; + ZXIC_UINT32 smmu0_int16_status18; + ZXIC_UINT32 smmu0_int16_status17; + ZXIC_UINT32 smmu0_int16_status16; + ZXIC_UINT32 smmu0_int16_status15; + ZXIC_UINT32 smmu0_int16_status14; + ZXIC_UINT32 smmu0_int16_status13; + ZXIC_UINT32 smmu0_int16_status12; + ZXIC_UINT32 smmu0_int16_status11; + ZXIC_UINT32 smmu0_int16_status10; + ZXIC_UINT32 smmu0_int16_status9; + ZXIC_UINT32 smmu0_int16_status8; + ZXIC_UINT32 smmu0_int16_status7; + ZXIC_UINT32 smmu0_int16_status6; + ZXIC_UINT32 smmu0_int16_status5; + ZXIC_UINT32 smmu0_int16_status4; + ZXIC_UINT32 smmu0_int16_status3; + ZXIC_UINT32 smmu0_int16_status2; + ZXIC_UINT32 smmu0_int16_status1; + ZXIC_UINT32 smmu0_int16_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT16_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int17_en_t { + ZXIC_UINT32 smmu0_int17_en31; + ZXIC_UINT32 smmu0_int17_en30; + ZXIC_UINT32 smmu0_int17_en29; + ZXIC_UINT32 smmu0_int17_en28; + ZXIC_UINT32 smmu0_int17_en27; + ZXIC_UINT32 smmu0_int17_en26; + ZXIC_UINT32 smmu0_int17_en25; + ZXIC_UINT32 smmu0_int17_en24; + ZXIC_UINT32 smmu0_int17_en23; + ZXIC_UINT32 smmu0_int17_en22; + ZXIC_UINT32 smmu0_int17_en21; + ZXIC_UINT32 smmu0_int17_en20; + ZXIC_UINT32 smmu0_int17_en19; + ZXIC_UINT32 smmu0_int17_en18; + ZXIC_UINT32 smmu0_int17_en17; + ZXIC_UINT32 smmu0_int17_en16; + ZXIC_UINT32 smmu0_int17_en15; + ZXIC_UINT32 smmu0_int17_en14; + ZXIC_UINT32 smmu0_int17_en13; + ZXIC_UINT32 smmu0_int17_en12; + ZXIC_UINT32 smmu0_int17_en11; + ZXIC_UINT32 smmu0_int17_en10; + ZXIC_UINT32 smmu0_int17_en9; + ZXIC_UINT32 smmu0_int17_en8; + ZXIC_UINT32 smmu0_int17_en7; + ZXIC_UINT32 smmu0_int17_en6; + ZXIC_UINT32 smmu0_int17_en5; + ZXIC_UINT32 smmu0_int17_en4; + ZXIC_UINT32 smmu0_int17_en3; + ZXIC_UINT32 smmu0_int17_en2; + ZXIC_UINT32 smmu0_int17_en1; + ZXIC_UINT32 smmu0_int17_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT17_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int17_mask_t { + ZXIC_UINT32 smmu0_int17_mask31; + ZXIC_UINT32 smmu0_int17_mask30; + ZXIC_UINT32 smmu0_int17_mask29; + ZXIC_UINT32 smmu0_int17_mask28; + ZXIC_UINT32 smmu0_int17_mask27; + ZXIC_UINT32 smmu0_int17_mask26; + ZXIC_UINT32 smmu0_int17_mask25; + ZXIC_UINT32 smmu0_int17_mask24; + ZXIC_UINT32 smmu0_int17_mask23; + ZXIC_UINT32 smmu0_int17_mask22; + ZXIC_UINT32 smmu0_int17_mask21; + ZXIC_UINT32 smmu0_int17_mask20; + ZXIC_UINT32 smmu0_int17_mask19; + ZXIC_UINT32 smmu0_int17_mask18; + ZXIC_UINT32 smmu0_int17_mask17; + ZXIC_UINT32 smmu0_int17_mask16; + ZXIC_UINT32 smmu0_int17_mask15; + ZXIC_UINT32 smmu0_int17_mask14; + ZXIC_UINT32 smmu0_int17_mask13; + ZXIC_UINT32 smmu0_int17_mask12; + ZXIC_UINT32 smmu0_int17_mask11; + ZXIC_UINT32 smmu0_int17_mask10; + ZXIC_UINT32 smmu0_int17_mask9; + ZXIC_UINT32 smmu0_int17_mask8; + ZXIC_UINT32 smmu0_int17_mask7; + ZXIC_UINT32 smmu0_int17_mask6; + ZXIC_UINT32 smmu0_int17_mask5; + ZXIC_UINT32 smmu0_int17_mask4; + ZXIC_UINT32 smmu0_int17_mask3; + ZXIC_UINT32 smmu0_int17_mask2; + ZXIC_UINT32 smmu0_int17_mask1; + ZXIC_UINT32 smmu0_int17_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT17_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int17_status_t { + ZXIC_UINT32 smmu0_int17_status31; + ZXIC_UINT32 smmu0_int17_status30; + ZXIC_UINT32 smmu0_int17_status29; + ZXIC_UINT32 smmu0_int17_status28; + ZXIC_UINT32 smmu0_int17_status27; + ZXIC_UINT32 smmu0_int17_status26; + ZXIC_UINT32 smmu0_int17_status25; + ZXIC_UINT32 smmu0_int17_status24; + ZXIC_UINT32 smmu0_int17_status23; + ZXIC_UINT32 smmu0_int17_status22; + ZXIC_UINT32 smmu0_int17_status21; + ZXIC_UINT32 smmu0_int17_status20; + ZXIC_UINT32 smmu0_int17_status19; + ZXIC_UINT32 smmu0_int17_status18; + ZXIC_UINT32 smmu0_int17_status17; + ZXIC_UINT32 smmu0_int17_status16; + ZXIC_UINT32 smmu0_int17_status15; + ZXIC_UINT32 smmu0_int17_status14; + ZXIC_UINT32 smmu0_int17_status13; + ZXIC_UINT32 smmu0_int17_status12; + ZXIC_UINT32 smmu0_int17_status11; + ZXIC_UINT32 smmu0_int17_status10; + ZXIC_UINT32 smmu0_int17_status9; + ZXIC_UINT32 smmu0_int17_status8; + ZXIC_UINT32 smmu0_int17_status7; + ZXIC_UINT32 smmu0_int17_status6; + ZXIC_UINT32 smmu0_int17_status5; + ZXIC_UINT32 smmu0_int17_status4; + ZXIC_UINT32 smmu0_int17_status3; + ZXIC_UINT32 smmu0_int17_status2; + ZXIC_UINT32 smmu0_int17_status1; + ZXIC_UINT32 smmu0_int17_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT17_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int18_en_t { + ZXIC_UINT32 smmu0_int18_en31; + ZXIC_UINT32 smmu0_int18_en30; + ZXIC_UINT32 smmu0_int18_en29; + ZXIC_UINT32 smmu0_int18_en28; + ZXIC_UINT32 smmu0_int18_en27; + ZXIC_UINT32 smmu0_int18_en26; + ZXIC_UINT32 smmu0_int18_en25; + ZXIC_UINT32 smmu0_int18_en24; + ZXIC_UINT32 smmu0_int18_en23; + ZXIC_UINT32 smmu0_int18_en22; + ZXIC_UINT32 smmu0_int18_en21; + ZXIC_UINT32 smmu0_int18_en20; + ZXIC_UINT32 smmu0_int18_en19; + ZXIC_UINT32 smmu0_int18_en18; + ZXIC_UINT32 smmu0_int18_en17; + ZXIC_UINT32 smmu0_int18_en16; + ZXIC_UINT32 smmu0_int18_en15; + ZXIC_UINT32 smmu0_int18_en14; + ZXIC_UINT32 smmu0_int18_en13; + ZXIC_UINT32 smmu0_int18_en12; + ZXIC_UINT32 smmu0_int18_en11; + ZXIC_UINT32 smmu0_int18_en10; + ZXIC_UINT32 smmu0_int18_en9; + ZXIC_UINT32 smmu0_int18_en8; + ZXIC_UINT32 smmu0_int18_en7; + ZXIC_UINT32 smmu0_int18_en6; + ZXIC_UINT32 smmu0_int18_en5; + ZXIC_UINT32 smmu0_int18_en4; + ZXIC_UINT32 smmu0_int18_en3; + ZXIC_UINT32 smmu0_int18_en2; + ZXIC_UINT32 smmu0_int18_en1; + ZXIC_UINT32 smmu0_int18_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT18_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int18_mask_t { + ZXIC_UINT32 smmu0_int18_mask31; + ZXIC_UINT32 smmu0_int18_mask30; + ZXIC_UINT32 smmu0_int18_mask29; + ZXIC_UINT32 smmu0_int18_mask28; + ZXIC_UINT32 smmu0_int18_mask27; + ZXIC_UINT32 smmu0_int18_mask26; + ZXIC_UINT32 smmu0_int18_mask25; + ZXIC_UINT32 smmu0_int18_mask24; + ZXIC_UINT32 smmu0_int18_mask23; + ZXIC_UINT32 smmu0_int18_mask22; + ZXIC_UINT32 smmu0_int18_mask21; + ZXIC_UINT32 smmu0_int18_mask20; + ZXIC_UINT32 smmu0_int18_mask19; + ZXIC_UINT32 smmu0_int18_mask18; + ZXIC_UINT32 smmu0_int18_mask17; + ZXIC_UINT32 smmu0_int18_mask16; + ZXIC_UINT32 smmu0_int18_mask15; + ZXIC_UINT32 smmu0_int18_mask14; + ZXIC_UINT32 smmu0_int18_mask13; + ZXIC_UINT32 smmu0_int18_mask12; + ZXIC_UINT32 smmu0_int18_mask11; + ZXIC_UINT32 smmu0_int18_mask10; + ZXIC_UINT32 smmu0_int18_mask9; + ZXIC_UINT32 smmu0_int18_mask8; + ZXIC_UINT32 smmu0_int18_mask7; + ZXIC_UINT32 smmu0_int18_mask6; + ZXIC_UINT32 smmu0_int18_mask5; + ZXIC_UINT32 smmu0_int18_mask4; + ZXIC_UINT32 smmu0_int18_mask3; + ZXIC_UINT32 smmu0_int18_mask2; + ZXIC_UINT32 smmu0_int18_mask1; + ZXIC_UINT32 smmu0_int18_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT18_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int18_status_t { + ZXIC_UINT32 smmu0_int18_status31; + ZXIC_UINT32 smmu0_int18_status30; + ZXIC_UINT32 smmu0_int18_status29; + ZXIC_UINT32 smmu0_int18_status28; + ZXIC_UINT32 smmu0_int18_status27; + ZXIC_UINT32 smmu0_int18_status26; + ZXIC_UINT32 smmu0_int18_status25; + ZXIC_UINT32 smmu0_int18_status24; + ZXIC_UINT32 smmu0_int18_status23; + ZXIC_UINT32 smmu0_int18_status22; + ZXIC_UINT32 smmu0_int18_status21; + ZXIC_UINT32 smmu0_int18_status20; + ZXIC_UINT32 smmu0_int18_status19; + ZXIC_UINT32 smmu0_int18_status18; + ZXIC_UINT32 smmu0_int18_status17; + ZXIC_UINT32 smmu0_int18_status16; + ZXIC_UINT32 smmu0_int18_status15; + ZXIC_UINT32 smmu0_int18_status14; + ZXIC_UINT32 smmu0_int18_status13; + ZXIC_UINT32 smmu0_int18_status12; + ZXIC_UINT32 smmu0_int18_status11; + ZXIC_UINT32 smmu0_int18_status10; + ZXIC_UINT32 smmu0_int18_status9; + ZXIC_UINT32 smmu0_int18_status8; + ZXIC_UINT32 smmu0_int18_status7; + ZXIC_UINT32 smmu0_int18_status6; + ZXIC_UINT32 smmu0_int18_status5; + ZXIC_UINT32 smmu0_int18_status4; + ZXIC_UINT32 smmu0_int18_status3; + ZXIC_UINT32 smmu0_int18_status2; + ZXIC_UINT32 smmu0_int18_status1; + ZXIC_UINT32 smmu0_int18_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT18_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int19_en_t { + ZXIC_UINT32 smmu0_int19_en31; + ZXIC_UINT32 smmu0_int19_en30; + ZXIC_UINT32 smmu0_int19_en29; + ZXIC_UINT32 smmu0_int19_en28; + ZXIC_UINT32 smmu0_int19_en27; + ZXIC_UINT32 smmu0_int19_en26; + ZXIC_UINT32 smmu0_int19_en25; + ZXIC_UINT32 smmu0_int19_en24; + ZXIC_UINT32 smmu0_int19_en23; + ZXIC_UINT32 smmu0_int19_en22; + ZXIC_UINT32 smmu0_int19_en21; + ZXIC_UINT32 smmu0_int19_en20; + ZXIC_UINT32 smmu0_int19_en19; + ZXIC_UINT32 smmu0_int19_en18; + ZXIC_UINT32 smmu0_int19_en17; + ZXIC_UINT32 smmu0_int19_en16; + ZXIC_UINT32 smmu0_int19_en15; + ZXIC_UINT32 smmu0_int19_en14; + ZXIC_UINT32 smmu0_int19_en13; + ZXIC_UINT32 smmu0_int19_en12; + ZXIC_UINT32 smmu0_int19_en11; + ZXIC_UINT32 smmu0_int19_en10; + ZXIC_UINT32 smmu0_int19_en9; + ZXIC_UINT32 smmu0_int19_en8; + ZXIC_UINT32 smmu0_int19_en7; + ZXIC_UINT32 smmu0_int19_en6; + ZXIC_UINT32 smmu0_int19_en5; + ZXIC_UINT32 smmu0_int19_en4; + ZXIC_UINT32 smmu0_int19_en3; + ZXIC_UINT32 smmu0_int19_en2; + ZXIC_UINT32 smmu0_int19_en1; + ZXIC_UINT32 smmu0_int19_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT19_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int19_mask_t { + ZXIC_UINT32 smmu0_int19_mask31; + ZXIC_UINT32 smmu0_int19_mask30; + ZXIC_UINT32 smmu0_int19_mask29; + ZXIC_UINT32 smmu0_int19_mask28; + ZXIC_UINT32 smmu0_int19_mask27; + ZXIC_UINT32 smmu0_int19_mask26; + ZXIC_UINT32 smmu0_int19_mask25; + ZXIC_UINT32 smmu0_int19_mask24; + ZXIC_UINT32 smmu0_int19_mask23; + ZXIC_UINT32 smmu0_int19_mask22; + ZXIC_UINT32 smmu0_int19_mask21; + ZXIC_UINT32 smmu0_int19_mask20; + ZXIC_UINT32 smmu0_int19_mask19; + ZXIC_UINT32 smmu0_int19_mask18; + ZXIC_UINT32 smmu0_int19_mask17; + ZXIC_UINT32 smmu0_int19_mask16; + ZXIC_UINT32 smmu0_int19_mask15; + ZXIC_UINT32 smmu0_int19_mask14; + ZXIC_UINT32 smmu0_int19_mask13; + ZXIC_UINT32 smmu0_int19_mask12; + ZXIC_UINT32 smmu0_int19_mask11; + ZXIC_UINT32 smmu0_int19_mask10; + ZXIC_UINT32 smmu0_int19_mask9; + ZXIC_UINT32 smmu0_int19_mask8; + ZXIC_UINT32 smmu0_int19_mask7; + ZXIC_UINT32 smmu0_int19_mask6; + ZXIC_UINT32 smmu0_int19_mask5; + ZXIC_UINT32 smmu0_int19_mask4; + ZXIC_UINT32 smmu0_int19_mask3; + ZXIC_UINT32 smmu0_int19_mask2; + ZXIC_UINT32 smmu0_int19_mask1; + ZXIC_UINT32 smmu0_int19_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT19_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int19_status_t { + ZXIC_UINT32 smmu0_int19_status31; + ZXIC_UINT32 smmu0_int19_status30; + ZXIC_UINT32 smmu0_int19_status29; + ZXIC_UINT32 smmu0_int19_status28; + ZXIC_UINT32 smmu0_int19_status27; + ZXIC_UINT32 smmu0_int19_status26; + ZXIC_UINT32 smmu0_int19_status25; + ZXIC_UINT32 smmu0_int19_status24; + ZXIC_UINT32 smmu0_int19_status23; + ZXIC_UINT32 smmu0_int19_status22; + ZXIC_UINT32 smmu0_int19_status21; + ZXIC_UINT32 smmu0_int19_status20; + ZXIC_UINT32 smmu0_int19_status19; + ZXIC_UINT32 smmu0_int19_status18; + ZXIC_UINT32 smmu0_int19_status17; + ZXIC_UINT32 smmu0_int19_status16; + ZXIC_UINT32 smmu0_int19_status15; + ZXIC_UINT32 smmu0_int19_status14; + ZXIC_UINT32 smmu0_int19_status13; + ZXIC_UINT32 smmu0_int19_status12; + ZXIC_UINT32 smmu0_int19_status11; + ZXIC_UINT32 smmu0_int19_status10; + ZXIC_UINT32 smmu0_int19_status9; + ZXIC_UINT32 smmu0_int19_status8; + ZXIC_UINT32 smmu0_int19_status7; + ZXIC_UINT32 smmu0_int19_status6; + ZXIC_UINT32 smmu0_int19_status5; + ZXIC_UINT32 smmu0_int19_status4; + ZXIC_UINT32 smmu0_int19_status3; + ZXIC_UINT32 smmu0_int19_status2; + ZXIC_UINT32 smmu0_int19_status1; + ZXIC_UINT32 smmu0_int19_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT19_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int20_en_t { + ZXIC_UINT32 smmu0_int20_en31; + ZXIC_UINT32 smmu0_int20_en30; + ZXIC_UINT32 smmu0_int20_en29; + ZXIC_UINT32 smmu0_int20_en28; + ZXIC_UINT32 smmu0_int20_en27; + ZXIC_UINT32 smmu0_int20_en26; + ZXIC_UINT32 smmu0_int20_en25; + ZXIC_UINT32 smmu0_int20_en24; + ZXIC_UINT32 smmu0_int20_en23; + ZXIC_UINT32 smmu0_int20_en22; + ZXIC_UINT32 smmu0_int20_en21; + ZXIC_UINT32 smmu0_int20_en20; + ZXIC_UINT32 smmu0_int20_en19; + ZXIC_UINT32 smmu0_int20_en18; + ZXIC_UINT32 smmu0_int20_en17; + ZXIC_UINT32 smmu0_int20_en16; + ZXIC_UINT32 smmu0_int20_en15; + ZXIC_UINT32 smmu0_int20_en14; + ZXIC_UINT32 smmu0_int20_en13; + ZXIC_UINT32 smmu0_int20_en12; + ZXIC_UINT32 smmu0_int20_en11; + ZXIC_UINT32 smmu0_int20_en10; + ZXIC_UINT32 smmu0_int20_en9; + ZXIC_UINT32 smmu0_int20_en8; + ZXIC_UINT32 smmu0_int20_en7; + ZXIC_UINT32 smmu0_int20_en6; + ZXIC_UINT32 smmu0_int20_en5; + ZXIC_UINT32 smmu0_int20_en4; + ZXIC_UINT32 smmu0_int20_en3; + ZXIC_UINT32 smmu0_int20_en2; + ZXIC_UINT32 smmu0_int20_en1; + ZXIC_UINT32 smmu0_int20_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT20_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int20_mask_t { + ZXIC_UINT32 smmu0_int20_mask31; + ZXIC_UINT32 smmu0_int20_mask30; + ZXIC_UINT32 smmu0_int20_mask29; + ZXIC_UINT32 smmu0_int20_mask28; + ZXIC_UINT32 smmu0_int20_mask27; + ZXIC_UINT32 smmu0_int20_mask26; + ZXIC_UINT32 smmu0_int20_mask25; + ZXIC_UINT32 smmu0_int20_mask24; + ZXIC_UINT32 smmu0_int20_mask23; + ZXIC_UINT32 smmu0_int20_mask22; + ZXIC_UINT32 smmu0_int20_mask21; + ZXIC_UINT32 smmu0_int20_mask20; + ZXIC_UINT32 smmu0_int20_mask19; + ZXIC_UINT32 smmu0_int20_mask18; + ZXIC_UINT32 smmu0_int20_mask17; + ZXIC_UINT32 smmu0_int20_mask16; + ZXIC_UINT32 smmu0_int20_mask15; + ZXIC_UINT32 smmu0_int20_mask14; + ZXIC_UINT32 smmu0_int20_mask13; + ZXIC_UINT32 smmu0_int20_mask12; + ZXIC_UINT32 smmu0_int20_mask11; + ZXIC_UINT32 smmu0_int20_mask10; + ZXIC_UINT32 smmu0_int20_mask9; + ZXIC_UINT32 smmu0_int20_mask8; + ZXIC_UINT32 smmu0_int20_mask7; + ZXIC_UINT32 smmu0_int20_mask6; + ZXIC_UINT32 smmu0_int20_mask5; + ZXIC_UINT32 smmu0_int20_mask4; + ZXIC_UINT32 smmu0_int20_mask3; + ZXIC_UINT32 smmu0_int20_mask2; + ZXIC_UINT32 smmu0_int20_mask1; + ZXIC_UINT32 smmu0_int20_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT20_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int20_status_t { + ZXIC_UINT32 smmu0_int20_status31; + ZXIC_UINT32 smmu0_int20_status30; + ZXIC_UINT32 smmu0_int20_status29; + ZXIC_UINT32 smmu0_int20_status28; + ZXIC_UINT32 smmu0_int20_status27; + ZXIC_UINT32 smmu0_int20_status26; + ZXIC_UINT32 smmu0_int20_status25; + ZXIC_UINT32 smmu0_int20_status24; + ZXIC_UINT32 smmu0_int20_status23; + ZXIC_UINT32 smmu0_int20_status22; + ZXIC_UINT32 smmu0_int20_status21; + ZXIC_UINT32 smmu0_int20_status20; + ZXIC_UINT32 smmu0_int20_status19; + ZXIC_UINT32 smmu0_int20_status18; + ZXIC_UINT32 smmu0_int20_status17; + ZXIC_UINT32 smmu0_int20_status16; + ZXIC_UINT32 smmu0_int20_status15; + ZXIC_UINT32 smmu0_int20_status14; + ZXIC_UINT32 smmu0_int20_status13; + ZXIC_UINT32 smmu0_int20_status12; + ZXIC_UINT32 smmu0_int20_status11; + ZXIC_UINT32 smmu0_int20_status10; + ZXIC_UINT32 smmu0_int20_status9; + ZXIC_UINT32 smmu0_int20_status8; + ZXIC_UINT32 smmu0_int20_status7; + ZXIC_UINT32 smmu0_int20_status6; + ZXIC_UINT32 smmu0_int20_status5; + ZXIC_UINT32 smmu0_int20_status4; + ZXIC_UINT32 smmu0_int20_status3; + ZXIC_UINT32 smmu0_int20_status2; + ZXIC_UINT32 smmu0_int20_status1; + ZXIC_UINT32 smmu0_int20_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT20_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int21_en_t { + ZXIC_UINT32 smmu0_int21_en31; + ZXIC_UINT32 smmu0_int21_en30; + ZXIC_UINT32 smmu0_int21_en29; + ZXIC_UINT32 smmu0_int21_en28; + ZXIC_UINT32 smmu0_int21_en27; + ZXIC_UINT32 smmu0_int21_en26; + ZXIC_UINT32 smmu0_int21_en25; + ZXIC_UINT32 smmu0_int21_en24; + ZXIC_UINT32 smmu0_int21_en23; + ZXIC_UINT32 smmu0_int21_en22; + ZXIC_UINT32 smmu0_int21_en21; + ZXIC_UINT32 smmu0_int21_en20; + ZXIC_UINT32 smmu0_int21_en19; + ZXIC_UINT32 smmu0_int21_en18; + ZXIC_UINT32 smmu0_int21_en17; + ZXIC_UINT32 smmu0_int21_en16; + ZXIC_UINT32 smmu0_int21_en15; + ZXIC_UINT32 smmu0_int21_en14; + ZXIC_UINT32 smmu0_int21_en13; + ZXIC_UINT32 smmu0_int21_en12; + ZXIC_UINT32 smmu0_int21_en11; + ZXIC_UINT32 smmu0_int21_en10; + ZXIC_UINT32 smmu0_int21_en9; + ZXIC_UINT32 smmu0_int21_en8; + ZXIC_UINT32 smmu0_int21_en7; + ZXIC_UINT32 smmu0_int21_en6; + ZXIC_UINT32 smmu0_int21_en5; + ZXIC_UINT32 smmu0_int21_en4; + ZXIC_UINT32 smmu0_int21_en3; + ZXIC_UINT32 smmu0_int21_en2; + ZXIC_UINT32 smmu0_int21_en1; + ZXIC_UINT32 smmu0_int21_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT21_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int21_mask_t { + ZXIC_UINT32 smmu0_int21_mask31; + ZXIC_UINT32 smmu0_int21_mask30; + ZXIC_UINT32 smmu0_int21_mask29; + ZXIC_UINT32 smmu0_int21_mask28; + ZXIC_UINT32 smmu0_int21_mask27; + ZXIC_UINT32 smmu0_int21_mask26; + ZXIC_UINT32 smmu0_int21_mask25; + ZXIC_UINT32 smmu0_int21_mask24; + ZXIC_UINT32 smmu0_int21_mask23; + ZXIC_UINT32 smmu0_int21_mask22; + ZXIC_UINT32 smmu0_int21_mask21; + ZXIC_UINT32 smmu0_int21_mask20; + ZXIC_UINT32 smmu0_int21_mask19; + ZXIC_UINT32 smmu0_int21_mask18; + ZXIC_UINT32 smmu0_int21_mask17; + ZXIC_UINT32 smmu0_int21_mask16; + ZXIC_UINT32 smmu0_int21_mask15; + ZXIC_UINT32 smmu0_int21_mask14; + ZXIC_UINT32 smmu0_int21_mask13; + ZXIC_UINT32 smmu0_int21_mask12; + ZXIC_UINT32 smmu0_int21_mask11; + ZXIC_UINT32 smmu0_int21_mask10; + ZXIC_UINT32 smmu0_int21_mask9; + ZXIC_UINT32 smmu0_int21_mask8; + ZXIC_UINT32 smmu0_int21_mask7; + ZXIC_UINT32 smmu0_int21_mask6; + ZXIC_UINT32 smmu0_int21_mask5; + ZXIC_UINT32 smmu0_int21_mask4; + ZXIC_UINT32 smmu0_int21_mask3; + ZXIC_UINT32 smmu0_int21_mask2; + ZXIC_UINT32 smmu0_int21_mask1; + ZXIC_UINT32 smmu0_int21_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT21_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int21_status_t { + ZXIC_UINT32 smmu0_int21_status31; + ZXIC_UINT32 smmu0_int21_status30; + ZXIC_UINT32 smmu0_int21_status29; + ZXIC_UINT32 smmu0_int21_status28; + ZXIC_UINT32 smmu0_int21_status27; + ZXIC_UINT32 smmu0_int21_status26; + ZXIC_UINT32 smmu0_int21_status25; + ZXIC_UINT32 smmu0_int21_status24; + ZXIC_UINT32 smmu0_int21_status23; + ZXIC_UINT32 smmu0_int21_status22; + ZXIC_UINT32 smmu0_int21_status21; + ZXIC_UINT32 smmu0_int21_status20; + ZXIC_UINT32 smmu0_int21_status19; + ZXIC_UINT32 smmu0_int21_status18; + ZXIC_UINT32 smmu0_int21_status17; + ZXIC_UINT32 smmu0_int21_status16; + ZXIC_UINT32 smmu0_int21_status15; + ZXIC_UINT32 smmu0_int21_status14; + ZXIC_UINT32 smmu0_int21_status13; + ZXIC_UINT32 smmu0_int21_status12; + ZXIC_UINT32 smmu0_int21_status11; + ZXIC_UINT32 smmu0_int21_status10; + ZXIC_UINT32 smmu0_int21_status9; + ZXIC_UINT32 smmu0_int21_status8; + ZXIC_UINT32 smmu0_int21_status7; + ZXIC_UINT32 smmu0_int21_status6; + ZXIC_UINT32 smmu0_int21_status5; + ZXIC_UINT32 smmu0_int21_status4; + ZXIC_UINT32 smmu0_int21_status3; + ZXIC_UINT32 smmu0_int21_status2; + ZXIC_UINT32 smmu0_int21_status1; + ZXIC_UINT32 smmu0_int21_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT21_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int22_en_t { + ZXIC_UINT32 smmu0_int22_en31; + ZXIC_UINT32 smmu0_int22_en30; + ZXIC_UINT32 smmu0_int22_en29; + ZXIC_UINT32 smmu0_int22_en28; + ZXIC_UINT32 smmu0_int22_en27; + ZXIC_UINT32 smmu0_int22_en26; + ZXIC_UINT32 smmu0_int22_en25; + ZXIC_UINT32 smmu0_int22_en24; + ZXIC_UINT32 smmu0_int22_en23; + ZXIC_UINT32 smmu0_int22_en22; + ZXIC_UINT32 smmu0_int22_en21; + ZXIC_UINT32 smmu0_int22_en20; + ZXIC_UINT32 smmu0_int22_en19; + ZXIC_UINT32 smmu0_int22_en18; + ZXIC_UINT32 smmu0_int22_en17; + ZXIC_UINT32 smmu0_int22_en16; + ZXIC_UINT32 smmu0_int22_en15; + ZXIC_UINT32 smmu0_int22_en14; + ZXIC_UINT32 smmu0_int22_en13; + ZXIC_UINT32 smmu0_int22_en12; + ZXIC_UINT32 smmu0_int22_en11; + ZXIC_UINT32 smmu0_int22_en10; + ZXIC_UINT32 smmu0_int22_en9; + ZXIC_UINT32 smmu0_int22_en8; + ZXIC_UINT32 smmu0_int22_en7; + ZXIC_UINT32 smmu0_int22_en6; + ZXIC_UINT32 smmu0_int22_en5; + ZXIC_UINT32 smmu0_int22_en4; + ZXIC_UINT32 smmu0_int22_en3; + ZXIC_UINT32 smmu0_int22_en2; + ZXIC_UINT32 smmu0_int22_en1; + ZXIC_UINT32 smmu0_int22_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT22_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int22_mask_t { + ZXIC_UINT32 smmu0_int22_mask31; + ZXIC_UINT32 smmu0_int22_mask30; + ZXIC_UINT32 smmu0_int22_mask29; + ZXIC_UINT32 smmu0_int22_mask28; + ZXIC_UINT32 smmu0_int22_mask27; + ZXIC_UINT32 smmu0_int22_mask26; + ZXIC_UINT32 smmu0_int22_mask25; + ZXIC_UINT32 smmu0_int22_mask24; + ZXIC_UINT32 smmu0_int22_mask23; + ZXIC_UINT32 smmu0_int22_mask22; + ZXIC_UINT32 smmu0_int22_mask21; + ZXIC_UINT32 smmu0_int22_mask20; + ZXIC_UINT32 smmu0_int22_mask19; + ZXIC_UINT32 smmu0_int22_mask18; + ZXIC_UINT32 smmu0_int22_mask17; + ZXIC_UINT32 smmu0_int22_mask16; + ZXIC_UINT32 smmu0_int22_mask15; + ZXIC_UINT32 smmu0_int22_mask14; + ZXIC_UINT32 smmu0_int22_mask13; + ZXIC_UINT32 smmu0_int22_mask12; + ZXIC_UINT32 smmu0_int22_mask11; + ZXIC_UINT32 smmu0_int22_mask10; + ZXIC_UINT32 smmu0_int22_mask9; + ZXIC_UINT32 smmu0_int22_mask8; + ZXIC_UINT32 smmu0_int22_mask7; + ZXIC_UINT32 smmu0_int22_mask6; + ZXIC_UINT32 smmu0_int22_mask5; + ZXIC_UINT32 smmu0_int22_mask4; + ZXIC_UINT32 smmu0_int22_mask3; + ZXIC_UINT32 smmu0_int22_mask2; + ZXIC_UINT32 smmu0_int22_mask1; + ZXIC_UINT32 smmu0_int22_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT22_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int22_status_t { + ZXIC_UINT32 smmu0_int22_status31; + ZXIC_UINT32 smmu0_int22_status30; + ZXIC_UINT32 smmu0_int22_status29; + ZXIC_UINT32 smmu0_int22_status28; + ZXIC_UINT32 smmu0_int22_status27; + ZXIC_UINT32 smmu0_int22_status26; + ZXIC_UINT32 smmu0_int22_status25; + ZXIC_UINT32 smmu0_int22_status24; + ZXIC_UINT32 smmu0_int22_status23; + ZXIC_UINT32 smmu0_int22_status22; + ZXIC_UINT32 smmu0_int22_status21; + ZXIC_UINT32 smmu0_int22_status20; + ZXIC_UINT32 smmu0_int22_status19; + ZXIC_UINT32 smmu0_int22_status18; + ZXIC_UINT32 smmu0_int22_status17; + ZXIC_UINT32 smmu0_int22_status16; + ZXIC_UINT32 smmu0_int22_status15; + ZXIC_UINT32 smmu0_int22_status14; + ZXIC_UINT32 smmu0_int22_status13; + ZXIC_UINT32 smmu0_int22_status12; + ZXIC_UINT32 smmu0_int22_status11; + ZXIC_UINT32 smmu0_int22_status10; + ZXIC_UINT32 smmu0_int22_status9; + ZXIC_UINT32 smmu0_int22_status8; + ZXIC_UINT32 smmu0_int22_status7; + ZXIC_UINT32 smmu0_int22_status6; + ZXIC_UINT32 smmu0_int22_status5; + ZXIC_UINT32 smmu0_int22_status4; + ZXIC_UINT32 smmu0_int22_status3; + ZXIC_UINT32 smmu0_int22_status2; + ZXIC_UINT32 smmu0_int22_status1; + ZXIC_UINT32 smmu0_int22_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT22_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int23_en_t { + ZXIC_UINT32 smmu0_int23_en31; + ZXIC_UINT32 smmu0_int23_en30; + ZXIC_UINT32 smmu0_int23_en29; + ZXIC_UINT32 smmu0_int23_en28; + ZXIC_UINT32 smmu0_int23_en27; + ZXIC_UINT32 smmu0_int23_en26; + ZXIC_UINT32 smmu0_int23_en25; + ZXIC_UINT32 smmu0_int23_en24; + ZXIC_UINT32 smmu0_int23_en23; + ZXIC_UINT32 smmu0_int23_en22; + ZXIC_UINT32 smmu0_int23_en21; + ZXIC_UINT32 smmu0_int23_en20; + ZXIC_UINT32 smmu0_int23_en19; + ZXIC_UINT32 smmu0_int23_en18; + ZXIC_UINT32 smmu0_int23_en17; + ZXIC_UINT32 smmu0_int23_en16; + ZXIC_UINT32 smmu0_int23_en15; + ZXIC_UINT32 smmu0_int23_en14; + ZXIC_UINT32 smmu0_int23_en13; + ZXIC_UINT32 smmu0_int23_en12; + ZXIC_UINT32 smmu0_int23_en11; + ZXIC_UINT32 smmu0_int23_en10; + ZXIC_UINT32 smmu0_int23_en9; + ZXIC_UINT32 smmu0_int23_en8; + ZXIC_UINT32 smmu0_int23_en7; + ZXIC_UINT32 smmu0_int23_en6; + ZXIC_UINT32 smmu0_int23_en5; + ZXIC_UINT32 smmu0_int23_en4; + ZXIC_UINT32 smmu0_int23_en3; + ZXIC_UINT32 smmu0_int23_en2; + ZXIC_UINT32 smmu0_int23_en1; + ZXIC_UINT32 smmu0_int23_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT23_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int23_mask_t { + ZXIC_UINT32 smmu0_int23_mask31; + ZXIC_UINT32 smmu0_int23_mask30; + ZXIC_UINT32 smmu0_int23_mask29; + ZXIC_UINT32 smmu0_int23_mask28; + ZXIC_UINT32 smmu0_int23_mask27; + ZXIC_UINT32 smmu0_int23_mask26; + ZXIC_UINT32 smmu0_int23_mask25; + ZXIC_UINT32 smmu0_int23_mask24; + ZXIC_UINT32 smmu0_int23_mask23; + ZXIC_UINT32 smmu0_int23_mask22; + ZXIC_UINT32 smmu0_int23_mask21; + ZXIC_UINT32 smmu0_int23_mask20; + ZXIC_UINT32 smmu0_int23_mask19; + ZXIC_UINT32 smmu0_int23_mask18; + ZXIC_UINT32 smmu0_int23_mask17; + ZXIC_UINT32 smmu0_int23_mask16; + ZXIC_UINT32 smmu0_int23_mask15; + ZXIC_UINT32 smmu0_int23_mask14; + ZXIC_UINT32 smmu0_int23_mask13; + ZXIC_UINT32 smmu0_int23_mask12; + ZXIC_UINT32 smmu0_int23_mask11; + ZXIC_UINT32 smmu0_int23_mask10; + ZXIC_UINT32 smmu0_int23_mask9; + ZXIC_UINT32 smmu0_int23_mask8; + ZXIC_UINT32 smmu0_int23_mask7; + ZXIC_UINT32 smmu0_int23_mask6; + ZXIC_UINT32 smmu0_int23_mask5; + ZXIC_UINT32 smmu0_int23_mask4; + ZXIC_UINT32 smmu0_int23_mask3; + ZXIC_UINT32 smmu0_int23_mask2; + ZXIC_UINT32 smmu0_int23_mask1; + ZXIC_UINT32 smmu0_int23_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT23_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int23_status_t { + ZXIC_UINT32 smmu0_int23_status31; + ZXIC_UINT32 smmu0_int23_status30; + ZXIC_UINT32 smmu0_int23_status29; + ZXIC_UINT32 smmu0_int23_status28; + ZXIC_UINT32 smmu0_int23_status27; + ZXIC_UINT32 smmu0_int23_status26; + ZXIC_UINT32 smmu0_int23_status25; + ZXIC_UINT32 smmu0_int23_status24; + ZXIC_UINT32 smmu0_int23_status23; + ZXIC_UINT32 smmu0_int23_status22; + ZXIC_UINT32 smmu0_int23_status21; + ZXIC_UINT32 smmu0_int23_status20; + ZXIC_UINT32 smmu0_int23_status19; + ZXIC_UINT32 smmu0_int23_status18; + ZXIC_UINT32 smmu0_int23_status17; + ZXIC_UINT32 smmu0_int23_status16; + ZXIC_UINT32 smmu0_int23_status15; + ZXIC_UINT32 smmu0_int23_status14; + ZXIC_UINT32 smmu0_int23_status13; + ZXIC_UINT32 smmu0_int23_status12; + ZXIC_UINT32 smmu0_int23_status11; + ZXIC_UINT32 smmu0_int23_status10; + ZXIC_UINT32 smmu0_int23_status9; + ZXIC_UINT32 smmu0_int23_status8; + ZXIC_UINT32 smmu0_int23_status7; + ZXIC_UINT32 smmu0_int23_status6; + ZXIC_UINT32 smmu0_int23_status5; + ZXIC_UINT32 smmu0_int23_status4; + ZXIC_UINT32 smmu0_int23_status3; + ZXIC_UINT32 smmu0_int23_status2; + ZXIC_UINT32 smmu0_int23_status1; + ZXIC_UINT32 smmu0_int23_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT23_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int24_en_t { + ZXIC_UINT32 smmu0_int24_en31; + ZXIC_UINT32 smmu0_int24_en30; + ZXIC_UINT32 smmu0_int24_en29; + ZXIC_UINT32 smmu0_int24_en28; + ZXIC_UINT32 smmu0_int24_en27; + ZXIC_UINT32 smmu0_int24_en26; + ZXIC_UINT32 smmu0_int24_en25; + ZXIC_UINT32 smmu0_int24_en24; + ZXIC_UINT32 smmu0_int24_en23; + ZXIC_UINT32 smmu0_int24_en22; + ZXIC_UINT32 smmu0_int24_en21; + ZXIC_UINT32 smmu0_int24_en20; + ZXIC_UINT32 smmu0_int24_en19; + ZXIC_UINT32 smmu0_int24_en18; + ZXIC_UINT32 smmu0_int24_en17; + ZXIC_UINT32 smmu0_int24_en16; + ZXIC_UINT32 smmu0_int24_en15; + ZXIC_UINT32 smmu0_int24_en14; + ZXIC_UINT32 smmu0_int24_en13; + ZXIC_UINT32 smmu0_int24_en12; + ZXIC_UINT32 smmu0_int24_en11; + ZXIC_UINT32 smmu0_int24_en10; + ZXIC_UINT32 smmu0_int24_en9; + ZXIC_UINT32 smmu0_int24_en8; + ZXIC_UINT32 smmu0_int24_en7; + ZXIC_UINT32 smmu0_int24_en6; + ZXIC_UINT32 smmu0_int24_en5; + ZXIC_UINT32 smmu0_int24_en4; + ZXIC_UINT32 smmu0_int24_en3; + ZXIC_UINT32 smmu0_int24_en2; + ZXIC_UINT32 smmu0_int24_en1; + ZXIC_UINT32 smmu0_int24_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT24_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int24_mask_t { + ZXIC_UINT32 smmu0_int24_mask31; + ZXIC_UINT32 smmu0_int24_mask30; + ZXIC_UINT32 smmu0_int24_mask29; + ZXIC_UINT32 smmu0_int24_mask28; + ZXIC_UINT32 smmu0_int24_mask27; + ZXIC_UINT32 smmu0_int24_mask26; + ZXIC_UINT32 smmu0_int24_mask25; + ZXIC_UINT32 smmu0_int24_mask24; + ZXIC_UINT32 smmu0_int24_mask23; + ZXIC_UINT32 smmu0_int24_mask22; + ZXIC_UINT32 smmu0_int24_mask21; + ZXIC_UINT32 smmu0_int24_mask20; + ZXIC_UINT32 smmu0_int24_mask19; + ZXIC_UINT32 smmu0_int24_mask18; + ZXIC_UINT32 smmu0_int24_mask17; + ZXIC_UINT32 smmu0_int24_mask16; + ZXIC_UINT32 smmu0_int24_mask15; + ZXIC_UINT32 smmu0_int24_mask14; + ZXIC_UINT32 smmu0_int24_mask13; + ZXIC_UINT32 smmu0_int24_mask12; + ZXIC_UINT32 smmu0_int24_mask11; + ZXIC_UINT32 smmu0_int24_mask10; + ZXIC_UINT32 smmu0_int24_mask9; + ZXIC_UINT32 smmu0_int24_mask8; + ZXIC_UINT32 smmu0_int24_mask7; + ZXIC_UINT32 smmu0_int24_mask6; + ZXIC_UINT32 smmu0_int24_mask5; + ZXIC_UINT32 smmu0_int24_mask4; + ZXIC_UINT32 smmu0_int24_mask3; + ZXIC_UINT32 smmu0_int24_mask2; + ZXIC_UINT32 smmu0_int24_mask1; + ZXIC_UINT32 smmu0_int24_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT24_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int24_status_t { + ZXIC_UINT32 smmu0_int24_status31; + ZXIC_UINT32 smmu0_int24_status30; + ZXIC_UINT32 smmu0_int24_status29; + ZXIC_UINT32 smmu0_int24_status28; + ZXIC_UINT32 smmu0_int24_status27; + ZXIC_UINT32 smmu0_int24_status26; + ZXIC_UINT32 smmu0_int24_status25; + ZXIC_UINT32 smmu0_int24_status24; + ZXIC_UINT32 smmu0_int24_status23; + ZXIC_UINT32 smmu0_int24_status22; + ZXIC_UINT32 smmu0_int24_status21; + ZXIC_UINT32 smmu0_int24_status20; + ZXIC_UINT32 smmu0_int24_status19; + ZXIC_UINT32 smmu0_int24_status18; + ZXIC_UINT32 smmu0_int24_status17; + ZXIC_UINT32 smmu0_int24_status16; + ZXIC_UINT32 smmu0_int24_status15; + ZXIC_UINT32 smmu0_int24_status14; + ZXIC_UINT32 smmu0_int24_status13; + ZXIC_UINT32 smmu0_int24_status12; + ZXIC_UINT32 smmu0_int24_status11; + ZXIC_UINT32 smmu0_int24_status10; + ZXIC_UINT32 smmu0_int24_status9; + ZXIC_UINT32 smmu0_int24_status8; + ZXIC_UINT32 smmu0_int24_status7; + ZXIC_UINT32 smmu0_int24_status6; + ZXIC_UINT32 smmu0_int24_status5; + ZXIC_UINT32 smmu0_int24_status4; + ZXIC_UINT32 smmu0_int24_status3; + ZXIC_UINT32 smmu0_int24_status2; + ZXIC_UINT32 smmu0_int24_status1; + ZXIC_UINT32 smmu0_int24_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT24_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int25_en_t { + ZXIC_UINT32 smmu0_int25_en31; + ZXIC_UINT32 smmu0_int25_en30; + ZXIC_UINT32 smmu0_int25_en29; + ZXIC_UINT32 smmu0_int25_en28; + ZXIC_UINT32 smmu0_int25_en27; + ZXIC_UINT32 smmu0_int25_en26; + ZXIC_UINT32 smmu0_int25_en25; + ZXIC_UINT32 smmu0_int25_en24; + ZXIC_UINT32 smmu0_int25_en23; + ZXIC_UINT32 smmu0_int25_en22; + ZXIC_UINT32 smmu0_int25_en21; + ZXIC_UINT32 smmu0_int25_en20; + ZXIC_UINT32 smmu0_int25_en19; + ZXIC_UINT32 smmu0_int25_en18; + ZXIC_UINT32 smmu0_int25_en17; + ZXIC_UINT32 smmu0_int25_en16; + ZXIC_UINT32 smmu0_int25_en15; + ZXIC_UINT32 smmu0_int25_en14; + ZXIC_UINT32 smmu0_int25_en13; + ZXIC_UINT32 smmu0_int25_en12; + ZXIC_UINT32 smmu0_int25_en11; + ZXIC_UINT32 smmu0_int25_en10; + ZXIC_UINT32 smmu0_int25_en9; + ZXIC_UINT32 smmu0_int25_en8; + ZXIC_UINT32 smmu0_int25_en7; + ZXIC_UINT32 smmu0_int25_en6; + ZXIC_UINT32 smmu0_int25_en5; + ZXIC_UINT32 smmu0_int25_en4; + ZXIC_UINT32 smmu0_int25_en3; + ZXIC_UINT32 smmu0_int25_en2; + ZXIC_UINT32 smmu0_int25_en1; + ZXIC_UINT32 smmu0_int25_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT25_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int25_mask_t { + ZXIC_UINT32 smmu0_int25_mask31; + ZXIC_UINT32 smmu0_int25_mask30; + ZXIC_UINT32 smmu0_int25_mask29; + ZXIC_UINT32 smmu0_int25_mask28; + ZXIC_UINT32 smmu0_int25_mask27; + ZXIC_UINT32 smmu0_int25_mask26; + ZXIC_UINT32 smmu0_int25_mask25; + ZXIC_UINT32 smmu0_int25_mask24; + ZXIC_UINT32 smmu0_int25_mask23; + ZXIC_UINT32 smmu0_int25_mask22; + ZXIC_UINT32 smmu0_int25_mask21; + ZXIC_UINT32 smmu0_int25_mask20; + ZXIC_UINT32 smmu0_int25_mask19; + ZXIC_UINT32 smmu0_int25_mask18; + ZXIC_UINT32 smmu0_int25_mask17; + ZXIC_UINT32 smmu0_int25_mask16; + ZXIC_UINT32 smmu0_int25_mask15; + ZXIC_UINT32 smmu0_int25_mask14; + ZXIC_UINT32 smmu0_int25_mask13; + ZXIC_UINT32 smmu0_int25_mask12; + ZXIC_UINT32 smmu0_int25_mask11; + ZXIC_UINT32 smmu0_int25_mask10; + ZXIC_UINT32 smmu0_int25_mask9; + ZXIC_UINT32 smmu0_int25_mask8; + ZXIC_UINT32 smmu0_int25_mask7; + ZXIC_UINT32 smmu0_int25_mask6; + ZXIC_UINT32 smmu0_int25_mask5; + ZXIC_UINT32 smmu0_int25_mask4; + ZXIC_UINT32 smmu0_int25_mask3; + ZXIC_UINT32 smmu0_int25_mask2; + ZXIC_UINT32 smmu0_int25_mask1; + ZXIC_UINT32 smmu0_int25_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT25_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int25_status_t { + ZXIC_UINT32 smmu0_int25_status31; + ZXIC_UINT32 smmu0_int25_status30; + ZXIC_UINT32 smmu0_int25_status29; + ZXIC_UINT32 smmu0_int25_status28; + ZXIC_UINT32 smmu0_int25_status27; + ZXIC_UINT32 smmu0_int25_status26; + ZXIC_UINT32 smmu0_int25_status25; + ZXIC_UINT32 smmu0_int25_status24; + ZXIC_UINT32 smmu0_int25_status23; + ZXIC_UINT32 smmu0_int25_status22; + ZXIC_UINT32 smmu0_int25_status21; + ZXIC_UINT32 smmu0_int25_status20; + ZXIC_UINT32 smmu0_int25_status19; + ZXIC_UINT32 smmu0_int25_status18; + ZXIC_UINT32 smmu0_int25_status17; + ZXIC_UINT32 smmu0_int25_status16; + ZXIC_UINT32 smmu0_int25_status15; + ZXIC_UINT32 smmu0_int25_status14; + ZXIC_UINT32 smmu0_int25_status13; + ZXIC_UINT32 smmu0_int25_status12; + ZXIC_UINT32 smmu0_int25_status11; + ZXIC_UINT32 smmu0_int25_status10; + ZXIC_UINT32 smmu0_int25_status9; + ZXIC_UINT32 smmu0_int25_status8; + ZXIC_UINT32 smmu0_int25_status7; + ZXIC_UINT32 smmu0_int25_status6; + ZXIC_UINT32 smmu0_int25_status5; + ZXIC_UINT32 smmu0_int25_status4; + ZXIC_UINT32 smmu0_int25_status3; + ZXIC_UINT32 smmu0_int25_status2; + ZXIC_UINT32 smmu0_int25_status1; + ZXIC_UINT32 smmu0_int25_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT25_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int26_en_t { + ZXIC_UINT32 smmu0_int26_en31; + ZXIC_UINT32 smmu0_int26_en30; + ZXIC_UINT32 smmu0_int26_en29; + ZXIC_UINT32 smmu0_int26_en28; + ZXIC_UINT32 smmu0_int26_en27; + ZXIC_UINT32 smmu0_int26_en26; + ZXIC_UINT32 smmu0_int26_en25; + ZXIC_UINT32 smmu0_int26_en24; + ZXIC_UINT32 smmu0_int26_en23; + ZXIC_UINT32 smmu0_int26_en22; + ZXIC_UINT32 smmu0_int26_en21; + ZXIC_UINT32 smmu0_int26_en20; + ZXIC_UINT32 smmu0_int26_en19; + ZXIC_UINT32 smmu0_int26_en18; + ZXIC_UINT32 smmu0_int26_en17; + ZXIC_UINT32 smmu0_int26_en16; + ZXIC_UINT32 smmu0_int26_en15; + ZXIC_UINT32 smmu0_int26_en14; + ZXIC_UINT32 smmu0_int26_en13; + ZXIC_UINT32 smmu0_int26_en12; + ZXIC_UINT32 smmu0_int26_en11; + ZXIC_UINT32 smmu0_int26_en10; + ZXIC_UINT32 smmu0_int26_en9; + ZXIC_UINT32 smmu0_int26_en8; + ZXIC_UINT32 smmu0_int26_en7; + ZXIC_UINT32 smmu0_int26_en6; + ZXIC_UINT32 smmu0_int26_en5; + ZXIC_UINT32 smmu0_int26_en4; + ZXIC_UINT32 smmu0_int26_en3; + ZXIC_UINT32 smmu0_int26_en2; + ZXIC_UINT32 smmu0_int26_en1; + ZXIC_UINT32 smmu0_int26_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT26_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int26_mask_t { + ZXIC_UINT32 smmu0_int26_mask31; + ZXIC_UINT32 smmu0_int26_mask30; + ZXIC_UINT32 smmu0_int26_mask29; + ZXIC_UINT32 smmu0_int26_mask28; + ZXIC_UINT32 smmu0_int26_mask27; + ZXIC_UINT32 smmu0_int26_mask26; + ZXIC_UINT32 smmu0_int26_mask25; + ZXIC_UINT32 smmu0_int26_mask24; + ZXIC_UINT32 smmu0_int26_mask23; + ZXIC_UINT32 smmu0_int26_mask22; + ZXIC_UINT32 smmu0_int26_mask21; + ZXIC_UINT32 smmu0_int26_mask20; + ZXIC_UINT32 smmu0_int26_mask19; + ZXIC_UINT32 smmu0_int26_mask18; + ZXIC_UINT32 smmu0_int26_mask17; + ZXIC_UINT32 smmu0_int26_mask16; + ZXIC_UINT32 smmu0_int26_mask15; + ZXIC_UINT32 smmu0_int26_mask14; + ZXIC_UINT32 smmu0_int26_mask13; + ZXIC_UINT32 smmu0_int26_mask12; + ZXIC_UINT32 smmu0_int26_mask11; + ZXIC_UINT32 smmu0_int26_mask10; + ZXIC_UINT32 smmu0_int26_mask9; + ZXIC_UINT32 smmu0_int26_mask8; + ZXIC_UINT32 smmu0_int26_mask7; + ZXIC_UINT32 smmu0_int26_mask6; + ZXIC_UINT32 smmu0_int26_mask5; + ZXIC_UINT32 smmu0_int26_mask4; + ZXIC_UINT32 smmu0_int26_mask3; + ZXIC_UINT32 smmu0_int26_mask2; + ZXIC_UINT32 smmu0_int26_mask1; + ZXIC_UINT32 smmu0_int26_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT26_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int26_status_t { + ZXIC_UINT32 smmu0_int26_status31; + ZXIC_UINT32 smmu0_int26_status30; + ZXIC_UINT32 smmu0_int26_status29; + ZXIC_UINT32 smmu0_int26_status28; + ZXIC_UINT32 smmu0_int26_status27; + ZXIC_UINT32 smmu0_int26_status26; + ZXIC_UINT32 smmu0_int26_status25; + ZXIC_UINT32 smmu0_int26_status24; + ZXIC_UINT32 smmu0_int26_status23; + ZXIC_UINT32 smmu0_int26_status22; + ZXIC_UINT32 smmu0_int26_status21; + ZXIC_UINT32 smmu0_int26_status20; + ZXIC_UINT32 smmu0_int26_status19; + ZXIC_UINT32 smmu0_int26_status18; + ZXIC_UINT32 smmu0_int26_status17; + ZXIC_UINT32 smmu0_int26_status16; + ZXIC_UINT32 smmu0_int26_status15; + ZXIC_UINT32 smmu0_int26_status14; + ZXIC_UINT32 smmu0_int26_status13; + ZXIC_UINT32 smmu0_int26_status12; + ZXIC_UINT32 smmu0_int26_status11; + ZXIC_UINT32 smmu0_int26_status10; + ZXIC_UINT32 smmu0_int26_status9; + ZXIC_UINT32 smmu0_int26_status8; + ZXIC_UINT32 smmu0_int26_status7; + ZXIC_UINT32 smmu0_int26_status6; + ZXIC_UINT32 smmu0_int26_status5; + ZXIC_UINT32 smmu0_int26_status4; + ZXIC_UINT32 smmu0_int26_status3; + ZXIC_UINT32 smmu0_int26_status2; + ZXIC_UINT32 smmu0_int26_status1; + ZXIC_UINT32 smmu0_int26_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT26_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int27_en_t { + ZXIC_UINT32 smmu0_int27_en31; + ZXIC_UINT32 smmu0_int27_en30; + ZXIC_UINT32 smmu0_int27_en29; + ZXIC_UINT32 smmu0_int27_en28; + ZXIC_UINT32 smmu0_int27_en27; + ZXIC_UINT32 smmu0_int27_en26; + ZXIC_UINT32 smmu0_int27_en25; + ZXIC_UINT32 smmu0_int27_en24; + ZXIC_UINT32 smmu0_int27_en23; + ZXIC_UINT32 smmu0_int27_en22; + ZXIC_UINT32 smmu0_int27_en21; + ZXIC_UINT32 smmu0_int27_en20; + ZXIC_UINT32 smmu0_int27_en19; + ZXIC_UINT32 smmu0_int27_en18; + ZXIC_UINT32 smmu0_int27_en17; + ZXIC_UINT32 smmu0_int27_en16; + ZXIC_UINT32 smmu0_int27_en15; + ZXIC_UINT32 smmu0_int27_en14; + ZXIC_UINT32 smmu0_int27_en13; + ZXIC_UINT32 smmu0_int27_en12; + ZXIC_UINT32 smmu0_int27_en11; + ZXIC_UINT32 smmu0_int27_en10; + ZXIC_UINT32 smmu0_int27_en9; + ZXIC_UINT32 smmu0_int27_en8; + ZXIC_UINT32 smmu0_int27_en7; + ZXIC_UINT32 smmu0_int27_en6; + ZXIC_UINT32 smmu0_int27_en5; + ZXIC_UINT32 smmu0_int27_en4; + ZXIC_UINT32 smmu0_int27_en3; + ZXIC_UINT32 smmu0_int27_en2; + ZXIC_UINT32 smmu0_int27_en1; + ZXIC_UINT32 smmu0_int27_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT27_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int27_mask_t { + ZXIC_UINT32 smmu0_int27_mask31; + ZXIC_UINT32 smmu0_int27_mask30; + ZXIC_UINT32 smmu0_int27_mask29; + ZXIC_UINT32 smmu0_int27_mask28; + ZXIC_UINT32 smmu0_int27_mask27; + ZXIC_UINT32 smmu0_int27_mask26; + ZXIC_UINT32 smmu0_int27_mask25; + ZXIC_UINT32 smmu0_int27_mask24; + ZXIC_UINT32 smmu0_int27_mask23; + ZXIC_UINT32 smmu0_int27_mask22; + ZXIC_UINT32 smmu0_int27_mask21; + ZXIC_UINT32 smmu0_int27_mask20; + ZXIC_UINT32 smmu0_int27_mask19; + ZXIC_UINT32 smmu0_int27_mask18; + ZXIC_UINT32 smmu0_int27_mask17; + ZXIC_UINT32 smmu0_int27_mask16; + ZXIC_UINT32 smmu0_int27_mask15; + ZXIC_UINT32 smmu0_int27_mask14; + ZXIC_UINT32 smmu0_int27_mask13; + ZXIC_UINT32 smmu0_int27_mask12; + ZXIC_UINT32 smmu0_int27_mask11; + ZXIC_UINT32 smmu0_int27_mask10; + ZXIC_UINT32 smmu0_int27_mask9; + ZXIC_UINT32 smmu0_int27_mask8; + ZXIC_UINT32 smmu0_int27_mask7; + ZXIC_UINT32 smmu0_int27_mask6; + ZXIC_UINT32 smmu0_int27_mask5; + ZXIC_UINT32 smmu0_int27_mask4; + ZXIC_UINT32 smmu0_int27_mask3; + ZXIC_UINT32 smmu0_int27_mask2; + ZXIC_UINT32 smmu0_int27_mask1; + ZXIC_UINT32 smmu0_int27_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT27_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int27_status_t { + ZXIC_UINT32 smmu0_int27_status31; + ZXIC_UINT32 smmu0_int27_status30; + ZXIC_UINT32 smmu0_int27_status29; + ZXIC_UINT32 smmu0_int27_status28; + ZXIC_UINT32 smmu0_int27_status27; + ZXIC_UINT32 smmu0_int27_status26; + ZXIC_UINT32 smmu0_int27_status25; + ZXIC_UINT32 smmu0_int27_status24; + ZXIC_UINT32 smmu0_int27_status23; + ZXIC_UINT32 smmu0_int27_status22; + ZXIC_UINT32 smmu0_int27_status21; + ZXIC_UINT32 smmu0_int27_status20; + ZXIC_UINT32 smmu0_int27_status19; + ZXIC_UINT32 smmu0_int27_status18; + ZXIC_UINT32 smmu0_int27_status17; + ZXIC_UINT32 smmu0_int27_status16; + ZXIC_UINT32 smmu0_int27_status15; + ZXIC_UINT32 smmu0_int27_status14; + ZXIC_UINT32 smmu0_int27_status13; + ZXIC_UINT32 smmu0_int27_status12; + ZXIC_UINT32 smmu0_int27_status11; + ZXIC_UINT32 smmu0_int27_status10; + ZXIC_UINT32 smmu0_int27_status9; + ZXIC_UINT32 smmu0_int27_status8; + ZXIC_UINT32 smmu0_int27_status7; + ZXIC_UINT32 smmu0_int27_status6; + ZXIC_UINT32 smmu0_int27_status5; + ZXIC_UINT32 smmu0_int27_status4; + ZXIC_UINT32 smmu0_int27_status3; + ZXIC_UINT32 smmu0_int27_status2; + ZXIC_UINT32 smmu0_int27_status1; + ZXIC_UINT32 smmu0_int27_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT27_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int28_en_t { + ZXIC_UINT32 smmu0_int28_en31; + ZXIC_UINT32 smmu0_int28_en30; + ZXIC_UINT32 smmu0_int28_en29; + ZXIC_UINT32 smmu0_int28_en28; + ZXIC_UINT32 smmu0_int28_en27; + ZXIC_UINT32 smmu0_int28_en26; + ZXIC_UINT32 smmu0_int28_en25; + ZXIC_UINT32 smmu0_int28_en24; + ZXIC_UINT32 smmu0_int28_en23; + ZXIC_UINT32 smmu0_int28_en22; + ZXIC_UINT32 smmu0_int28_en21; + ZXIC_UINT32 smmu0_int28_en20; + ZXIC_UINT32 smmu0_int28_en19; + ZXIC_UINT32 smmu0_int28_en18; + ZXIC_UINT32 smmu0_int28_en17; + ZXIC_UINT32 smmu0_int28_en16; + ZXIC_UINT32 smmu0_int28_en15; + ZXIC_UINT32 smmu0_int28_en14; + ZXIC_UINT32 smmu0_int28_en13; + ZXIC_UINT32 smmu0_int28_en12; + ZXIC_UINT32 smmu0_int28_en11; + ZXIC_UINT32 smmu0_int28_en10; + ZXIC_UINT32 smmu0_int28_en9; + ZXIC_UINT32 smmu0_int28_en8; + ZXIC_UINT32 smmu0_int28_en7; + ZXIC_UINT32 smmu0_int28_en6; + ZXIC_UINT32 smmu0_int28_en5; + ZXIC_UINT32 smmu0_int28_en4; + ZXIC_UINT32 smmu0_int28_en3; + ZXIC_UINT32 smmu0_int28_en2; + ZXIC_UINT32 smmu0_int28_en1; + ZXIC_UINT32 smmu0_int28_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT28_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int28_mask_t { + ZXIC_UINT32 smmu0_int28_mask31; + ZXIC_UINT32 smmu0_int28_mask30; + ZXIC_UINT32 smmu0_int28_mask29; + ZXIC_UINT32 smmu0_int28_mask28; + ZXIC_UINT32 smmu0_int28_mask27; + ZXIC_UINT32 smmu0_int28_mask26; + ZXIC_UINT32 smmu0_int28_mask25; + ZXIC_UINT32 smmu0_int28_mask24; + ZXIC_UINT32 smmu0_int28_mask23; + ZXIC_UINT32 smmu0_int28_mask22; + ZXIC_UINT32 smmu0_int28_mask21; + ZXIC_UINT32 smmu0_int28_mask20; + ZXIC_UINT32 smmu0_int28_mask19; + ZXIC_UINT32 smmu0_int28_mask18; + ZXIC_UINT32 smmu0_int28_mask17; + ZXIC_UINT32 smmu0_int28_mask16; + ZXIC_UINT32 smmu0_int28_mask15; + ZXIC_UINT32 smmu0_int28_mask14; + ZXIC_UINT32 smmu0_int28_mask13; + ZXIC_UINT32 smmu0_int28_mask12; + ZXIC_UINT32 smmu0_int28_mask11; + ZXIC_UINT32 smmu0_int28_mask10; + ZXIC_UINT32 smmu0_int28_mask9; + ZXIC_UINT32 smmu0_int28_mask8; + ZXIC_UINT32 smmu0_int28_mask7; + ZXIC_UINT32 smmu0_int28_mask6; + ZXIC_UINT32 smmu0_int28_mask5; + ZXIC_UINT32 smmu0_int28_mask4; + ZXIC_UINT32 smmu0_int28_mask3; + ZXIC_UINT32 smmu0_int28_mask2; + ZXIC_UINT32 smmu0_int28_mask1; + ZXIC_UINT32 smmu0_int28_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT28_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int28_status_t { + ZXIC_UINT32 smmu0_int28_status31; + ZXIC_UINT32 smmu0_int28_status30; + ZXIC_UINT32 smmu0_int28_status29; + ZXIC_UINT32 smmu0_int28_status28; + ZXIC_UINT32 smmu0_int28_status27; + ZXIC_UINT32 smmu0_int28_status26; + ZXIC_UINT32 smmu0_int28_status25; + ZXIC_UINT32 smmu0_int28_status24; + ZXIC_UINT32 smmu0_int28_status23; + ZXIC_UINT32 smmu0_int28_status22; + ZXIC_UINT32 smmu0_int28_status21; + ZXIC_UINT32 smmu0_int28_status20; + ZXIC_UINT32 smmu0_int28_status19; + ZXIC_UINT32 smmu0_int28_status18; + ZXIC_UINT32 smmu0_int28_status17; + ZXIC_UINT32 smmu0_int28_status16; + ZXIC_UINT32 smmu0_int28_status15; + ZXIC_UINT32 smmu0_int28_status14; + ZXIC_UINT32 smmu0_int28_status13; + ZXIC_UINT32 smmu0_int28_status12; + ZXIC_UINT32 smmu0_int28_status11; + ZXIC_UINT32 smmu0_int28_status10; + ZXIC_UINT32 smmu0_int28_status9; + ZXIC_UINT32 smmu0_int28_status8; + ZXIC_UINT32 smmu0_int28_status7; + ZXIC_UINT32 smmu0_int28_status6; + ZXIC_UINT32 smmu0_int28_status5; + ZXIC_UINT32 smmu0_int28_status4; + ZXIC_UINT32 smmu0_int28_status3; + ZXIC_UINT32 smmu0_int28_status2; + ZXIC_UINT32 smmu0_int28_status1; + ZXIC_UINT32 smmu0_int28_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT28_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int29_en_t { + ZXIC_UINT32 smmu0_int29_en31; + ZXIC_UINT32 smmu0_int29_en30; + ZXIC_UINT32 smmu0_int29_en29; + ZXIC_UINT32 smmu0_int29_en28; + ZXIC_UINT32 smmu0_int29_en27; + ZXIC_UINT32 smmu0_int29_en26; + ZXIC_UINT32 smmu0_int29_en25; + ZXIC_UINT32 smmu0_int29_en24; + ZXIC_UINT32 smmu0_int29_en23; + ZXIC_UINT32 smmu0_int29_en22; + ZXIC_UINT32 smmu0_int29_en21; + ZXIC_UINT32 smmu0_int29_en20; + ZXIC_UINT32 smmu0_int29_en19; + ZXIC_UINT32 smmu0_int29_en18; + ZXIC_UINT32 smmu0_int29_en17; + ZXIC_UINT32 smmu0_int29_en16; + ZXIC_UINT32 smmu0_int29_en15; + ZXIC_UINT32 smmu0_int29_en14; + ZXIC_UINT32 smmu0_int29_en13; + ZXIC_UINT32 smmu0_int29_en12; + ZXIC_UINT32 smmu0_int29_en11; + ZXIC_UINT32 smmu0_int29_en10; + ZXIC_UINT32 smmu0_int29_en9; + ZXIC_UINT32 smmu0_int29_en8; + ZXIC_UINT32 smmu0_int29_en7; + ZXIC_UINT32 smmu0_int29_en6; + ZXIC_UINT32 smmu0_int29_en5; + ZXIC_UINT32 smmu0_int29_en4; + ZXIC_UINT32 smmu0_int29_en3; + ZXIC_UINT32 smmu0_int29_en2; + ZXIC_UINT32 smmu0_int29_en1; + ZXIC_UINT32 smmu0_int29_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT29_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int29_mask_t { + ZXIC_UINT32 smmu0_int29_mask31; + ZXIC_UINT32 smmu0_int29_mask30; + ZXIC_UINT32 smmu0_int29_mask29; + ZXIC_UINT32 smmu0_int29_mask28; + ZXIC_UINT32 smmu0_int29_mask27; + ZXIC_UINT32 smmu0_int29_mask26; + ZXIC_UINT32 smmu0_int29_mask25; + ZXIC_UINT32 smmu0_int29_mask24; + ZXIC_UINT32 smmu0_int29_mask23; + ZXIC_UINT32 smmu0_int29_mask22; + ZXIC_UINT32 smmu0_int29_mask21; + ZXIC_UINT32 smmu0_int29_mask20; + ZXIC_UINT32 smmu0_int29_mask19; + ZXIC_UINT32 smmu0_int29_mask18; + ZXIC_UINT32 smmu0_int29_mask17; + ZXIC_UINT32 smmu0_int29_mask16; + ZXIC_UINT32 smmu0_int29_mask15; + ZXIC_UINT32 smmu0_int29_mask14; + ZXIC_UINT32 smmu0_int29_mask13; + ZXIC_UINT32 smmu0_int29_mask12; + ZXIC_UINT32 smmu0_int29_mask11; + ZXIC_UINT32 smmu0_int29_mask10; + ZXIC_UINT32 smmu0_int29_mask9; + ZXIC_UINT32 smmu0_int29_mask8; + ZXIC_UINT32 smmu0_int29_mask7; + ZXIC_UINT32 smmu0_int29_mask6; + ZXIC_UINT32 smmu0_int29_mask5; + ZXIC_UINT32 smmu0_int29_mask4; + ZXIC_UINT32 smmu0_int29_mask3; + ZXIC_UINT32 smmu0_int29_mask2; + ZXIC_UINT32 smmu0_int29_mask1; + ZXIC_UINT32 smmu0_int29_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT29_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int29_status_t { + ZXIC_UINT32 smmu0_int29_status31; + ZXIC_UINT32 smmu0_int29_status30; + ZXIC_UINT32 smmu0_int29_status29; + ZXIC_UINT32 smmu0_int29_status28; + ZXIC_UINT32 smmu0_int29_status27; + ZXIC_UINT32 smmu0_int29_status26; + ZXIC_UINT32 smmu0_int29_status25; + ZXIC_UINT32 smmu0_int29_status24; + ZXIC_UINT32 smmu0_int29_status23; + ZXIC_UINT32 smmu0_int29_status22; + ZXIC_UINT32 smmu0_int29_status21; + ZXIC_UINT32 smmu0_int29_status20; + ZXIC_UINT32 smmu0_int29_status19; + ZXIC_UINT32 smmu0_int29_status18; + ZXIC_UINT32 smmu0_int29_status17; + ZXIC_UINT32 smmu0_int29_status16; + ZXIC_UINT32 smmu0_int29_status15; + ZXIC_UINT32 smmu0_int29_status14; + ZXIC_UINT32 smmu0_int29_status13; + ZXIC_UINT32 smmu0_int29_status12; + ZXIC_UINT32 smmu0_int29_status11; + ZXIC_UINT32 smmu0_int29_status10; + ZXIC_UINT32 smmu0_int29_status9; + ZXIC_UINT32 smmu0_int29_status8; + ZXIC_UINT32 smmu0_int29_status7; + ZXIC_UINT32 smmu0_int29_status6; + ZXIC_UINT32 smmu0_int29_status5; + ZXIC_UINT32 smmu0_int29_status4; + ZXIC_UINT32 smmu0_int29_status3; + ZXIC_UINT32 smmu0_int29_status2; + ZXIC_UINT32 smmu0_int29_status1; + ZXIC_UINT32 smmu0_int29_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT29_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int30_en_t { + ZXIC_UINT32 smmu0_int30_en31; + ZXIC_UINT32 smmu0_int30_en30; + ZXIC_UINT32 smmu0_int30_en29; + ZXIC_UINT32 smmu0_int30_en28; + ZXIC_UINT32 smmu0_int30_en27; + ZXIC_UINT32 smmu0_int30_en26; + ZXIC_UINT32 smmu0_int30_en25; + ZXIC_UINT32 smmu0_int30_en24; + ZXIC_UINT32 smmu0_int30_en23; + ZXIC_UINT32 smmu0_int30_en22; + ZXIC_UINT32 smmu0_int30_en21; + ZXIC_UINT32 smmu0_int30_en20; + ZXIC_UINT32 smmu0_int30_en19; + ZXIC_UINT32 smmu0_int30_en18; + ZXIC_UINT32 smmu0_int30_en17; + ZXIC_UINT32 smmu0_int30_en16; + ZXIC_UINT32 smmu0_int30_en15; + ZXIC_UINT32 smmu0_int30_en14; + ZXIC_UINT32 smmu0_int30_en13; + ZXIC_UINT32 smmu0_int30_en12; + ZXIC_UINT32 smmu0_int30_en11; + ZXIC_UINT32 smmu0_int30_en10; + ZXIC_UINT32 smmu0_int30_en9; + ZXIC_UINT32 smmu0_int30_en8; + ZXIC_UINT32 smmu0_int30_en7; + ZXIC_UINT32 smmu0_int30_en6; + ZXIC_UINT32 smmu0_int30_en5; + ZXIC_UINT32 smmu0_int30_en4; + ZXIC_UINT32 smmu0_int30_en3; + ZXIC_UINT32 smmu0_int30_en2; + ZXIC_UINT32 smmu0_int30_en1; + ZXIC_UINT32 smmu0_int30_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT30_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int30_mask_t { + ZXIC_UINT32 smmu0_int30_mask31; + ZXIC_UINT32 smmu0_int30_mask30; + ZXIC_UINT32 smmu0_int30_mask29; + ZXIC_UINT32 smmu0_int30_mask28; + ZXIC_UINT32 smmu0_int30_mask27; + ZXIC_UINT32 smmu0_int30_mask26; + ZXIC_UINT32 smmu0_int30_mask25; + ZXIC_UINT32 smmu0_int30_mask24; + ZXIC_UINT32 smmu0_int30_mask23; + ZXIC_UINT32 smmu0_int30_mask22; + ZXIC_UINT32 smmu0_int30_mask21; + ZXIC_UINT32 smmu0_int30_mask20; + ZXIC_UINT32 smmu0_int30_mask19; + ZXIC_UINT32 smmu0_int30_mask18; + ZXIC_UINT32 smmu0_int30_mask17; + ZXIC_UINT32 smmu0_int30_mask16; + ZXIC_UINT32 smmu0_int30_mask15; + ZXIC_UINT32 smmu0_int30_mask14; + ZXIC_UINT32 smmu0_int30_mask13; + ZXIC_UINT32 smmu0_int30_mask12; + ZXIC_UINT32 smmu0_int30_mask11; + ZXIC_UINT32 smmu0_int30_mask10; + ZXIC_UINT32 smmu0_int30_mask9; + ZXIC_UINT32 smmu0_int30_mask8; + ZXIC_UINT32 smmu0_int30_mask7; + ZXIC_UINT32 smmu0_int30_mask6; + ZXIC_UINT32 smmu0_int30_mask5; + ZXIC_UINT32 smmu0_int30_mask4; + ZXIC_UINT32 smmu0_int30_mask3; + ZXIC_UINT32 smmu0_int30_mask2; + ZXIC_UINT32 smmu0_int30_mask1; + ZXIC_UINT32 smmu0_int30_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT30_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int30_status_t { + ZXIC_UINT32 smmu0_int30_status31; + ZXIC_UINT32 smmu0_int30_status30; + ZXIC_UINT32 smmu0_int30_status29; + ZXIC_UINT32 smmu0_int30_status28; + ZXIC_UINT32 smmu0_int30_status27; + ZXIC_UINT32 smmu0_int30_status26; + ZXIC_UINT32 smmu0_int30_status25; + ZXIC_UINT32 smmu0_int30_status24; + ZXIC_UINT32 smmu0_int30_status23; + ZXIC_UINT32 smmu0_int30_status22; + ZXIC_UINT32 smmu0_int30_status21; + ZXIC_UINT32 smmu0_int30_status20; + ZXIC_UINT32 smmu0_int30_status19; + ZXIC_UINT32 smmu0_int30_status18; + ZXIC_UINT32 smmu0_int30_status17; + ZXIC_UINT32 smmu0_int30_status16; + ZXIC_UINT32 smmu0_int30_status15; + ZXIC_UINT32 smmu0_int30_status14; + ZXIC_UINT32 smmu0_int30_status13; + ZXIC_UINT32 smmu0_int30_status12; + ZXIC_UINT32 smmu0_int30_status11; + ZXIC_UINT32 smmu0_int30_status10; + ZXIC_UINT32 smmu0_int30_status9; + ZXIC_UINT32 smmu0_int30_status8; + ZXIC_UINT32 smmu0_int30_status7; + ZXIC_UINT32 smmu0_int30_status6; + ZXIC_UINT32 smmu0_int30_status5; + ZXIC_UINT32 smmu0_int30_status4; + ZXIC_UINT32 smmu0_int30_status3; + ZXIC_UINT32 smmu0_int30_status2; + ZXIC_UINT32 smmu0_int30_status1; + ZXIC_UINT32 smmu0_int30_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT30_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int31_en_t { + ZXIC_UINT32 smmu0_int31_en31; + ZXIC_UINT32 smmu0_int31_en30; + ZXIC_UINT32 smmu0_int31_en29; + ZXIC_UINT32 smmu0_int31_en28; + ZXIC_UINT32 smmu0_int31_en27; + ZXIC_UINT32 smmu0_int31_en26; + ZXIC_UINT32 smmu0_int31_en25; + ZXIC_UINT32 smmu0_int31_en24; + ZXIC_UINT32 smmu0_int31_en23; + ZXIC_UINT32 smmu0_int31_en22; + ZXIC_UINT32 smmu0_int31_en21; + ZXIC_UINT32 smmu0_int31_en20; + ZXIC_UINT32 smmu0_int31_en19; + ZXIC_UINT32 smmu0_int31_en18; + ZXIC_UINT32 smmu0_int31_en17; + ZXIC_UINT32 smmu0_int31_en16; + ZXIC_UINT32 smmu0_int31_en15; + ZXIC_UINT32 smmu0_int31_en14; + ZXIC_UINT32 smmu0_int31_en13; + ZXIC_UINT32 smmu0_int31_en12; + ZXIC_UINT32 smmu0_int31_en11; + ZXIC_UINT32 smmu0_int31_en10; + ZXIC_UINT32 smmu0_int31_en9; + ZXIC_UINT32 smmu0_int31_en8; + ZXIC_UINT32 smmu0_int31_en7; + ZXIC_UINT32 smmu0_int31_en6; + ZXIC_UINT32 smmu0_int31_en5; + ZXIC_UINT32 smmu0_int31_en4; + ZXIC_UINT32 smmu0_int31_en3; + ZXIC_UINT32 smmu0_int31_en2; + ZXIC_UINT32 smmu0_int31_en1; + ZXIC_UINT32 smmu0_int31_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT31_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int31_mask_t { + ZXIC_UINT32 smmu0_int31_mask31; + ZXIC_UINT32 smmu0_int31_mask30; + ZXIC_UINT32 smmu0_int31_mask29; + ZXIC_UINT32 smmu0_int31_mask28; + ZXIC_UINT32 smmu0_int31_mask27; + ZXIC_UINT32 smmu0_int31_mask26; + ZXIC_UINT32 smmu0_int31_mask25; + ZXIC_UINT32 smmu0_int31_mask24; + ZXIC_UINT32 smmu0_int31_mask23; + ZXIC_UINT32 smmu0_int31_mask22; + ZXIC_UINT32 smmu0_int31_mask21; + ZXIC_UINT32 smmu0_int31_mask20; + ZXIC_UINT32 smmu0_int31_mask19; + ZXIC_UINT32 smmu0_int31_mask18; + ZXIC_UINT32 smmu0_int31_mask17; + ZXIC_UINT32 smmu0_int31_mask16; + ZXIC_UINT32 smmu0_int31_mask15; + ZXIC_UINT32 smmu0_int31_mask14; + ZXIC_UINT32 smmu0_int31_mask13; + ZXIC_UINT32 smmu0_int31_mask12; + ZXIC_UINT32 smmu0_int31_mask11; + ZXIC_UINT32 smmu0_int31_mask10; + ZXIC_UINT32 smmu0_int31_mask9; + ZXIC_UINT32 smmu0_int31_mask8; + ZXIC_UINT32 smmu0_int31_mask7; + ZXIC_UINT32 smmu0_int31_mask6; + ZXIC_UINT32 smmu0_int31_mask5; + ZXIC_UINT32 smmu0_int31_mask4; + ZXIC_UINT32 smmu0_int31_mask3; + ZXIC_UINT32 smmu0_int31_mask2; + ZXIC_UINT32 smmu0_int31_mask1; + ZXIC_UINT32 smmu0_int31_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT31_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int31_status_t { + ZXIC_UINT32 smmu0_int31_status31; + ZXIC_UINT32 smmu0_int31_status30; + ZXIC_UINT32 smmu0_int31_status29; + ZXIC_UINT32 smmu0_int31_status28; + ZXIC_UINT32 smmu0_int31_status27; + ZXIC_UINT32 smmu0_int31_status26; + ZXIC_UINT32 smmu0_int31_status25; + ZXIC_UINT32 smmu0_int31_status24; + ZXIC_UINT32 smmu0_int31_status23; + ZXIC_UINT32 smmu0_int31_status22; + ZXIC_UINT32 smmu0_int31_status21; + ZXIC_UINT32 smmu0_int31_status20; + ZXIC_UINT32 smmu0_int31_status19; + ZXIC_UINT32 smmu0_int31_status18; + ZXIC_UINT32 smmu0_int31_status17; + ZXIC_UINT32 smmu0_int31_status16; + ZXIC_UINT32 smmu0_int31_status15; + ZXIC_UINT32 smmu0_int31_status14; + ZXIC_UINT32 smmu0_int31_status13; + ZXIC_UINT32 smmu0_int31_status12; + ZXIC_UINT32 smmu0_int31_status11; + ZXIC_UINT32 smmu0_int31_status10; + ZXIC_UINT32 smmu0_int31_status9; + ZXIC_UINT32 smmu0_int31_status8; + ZXIC_UINT32 smmu0_int31_status7; + ZXIC_UINT32 smmu0_int31_status6; + ZXIC_UINT32 smmu0_int31_status5; + ZXIC_UINT32 smmu0_int31_status4; + ZXIC_UINT32 smmu0_int31_status3; + ZXIC_UINT32 smmu0_int31_status2; + ZXIC_UINT32 smmu0_int31_status1; + ZXIC_UINT32 smmu0_int31_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT31_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int32_en_t { + ZXIC_UINT32 smmu0_int32_en31; + ZXIC_UINT32 smmu0_int32_en30; + ZXIC_UINT32 smmu0_int32_en29; + ZXIC_UINT32 smmu0_int32_en28; + ZXIC_UINT32 smmu0_int32_en27; + ZXIC_UINT32 smmu0_int32_en26; + ZXIC_UINT32 smmu0_int32_en25; + ZXIC_UINT32 smmu0_int32_en24; + ZXIC_UINT32 smmu0_int32_en23; + ZXIC_UINT32 smmu0_int32_en22; + ZXIC_UINT32 smmu0_int32_en21; + ZXIC_UINT32 smmu0_int32_en20; + ZXIC_UINT32 smmu0_int32_en19; + ZXIC_UINT32 smmu0_int32_en18; + ZXIC_UINT32 smmu0_int32_en17; + ZXIC_UINT32 smmu0_int32_en16; + ZXIC_UINT32 smmu0_int32_en15; + ZXIC_UINT32 smmu0_int32_en14; + ZXIC_UINT32 smmu0_int32_en13; + ZXIC_UINT32 smmu0_int32_en12; + ZXIC_UINT32 smmu0_int32_en11; + ZXIC_UINT32 smmu0_int32_en10; + ZXIC_UINT32 smmu0_int32_en9; + ZXIC_UINT32 smmu0_int32_en8; + ZXIC_UINT32 smmu0_int32_en7; + ZXIC_UINT32 smmu0_int32_en6; + ZXIC_UINT32 smmu0_int32_en5; + ZXIC_UINT32 smmu0_int32_en4; + ZXIC_UINT32 smmu0_int32_en3; + ZXIC_UINT32 smmu0_int32_en2; + ZXIC_UINT32 smmu0_int32_en1; + ZXIC_UINT32 smmu0_int32_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT32_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int32_mask_t { + ZXIC_UINT32 smmu0_int32_mask31; + ZXIC_UINT32 smmu0_int32_mask30; + ZXIC_UINT32 smmu0_int32_mask29; + ZXIC_UINT32 smmu0_int32_mask28; + ZXIC_UINT32 smmu0_int32_mask27; + ZXIC_UINT32 smmu0_int32_mask26; + ZXIC_UINT32 smmu0_int32_mask25; + ZXIC_UINT32 smmu0_int32_mask24; + ZXIC_UINT32 smmu0_int32_mask23; + ZXIC_UINT32 smmu0_int32_mask22; + ZXIC_UINT32 smmu0_int32_mask21; + ZXIC_UINT32 smmu0_int32_mask20; + ZXIC_UINT32 smmu0_int32_mask19; + ZXIC_UINT32 smmu0_int32_mask18; + ZXIC_UINT32 smmu0_int32_mask17; + ZXIC_UINT32 smmu0_int32_mask16; + ZXIC_UINT32 smmu0_int32_mask15; + ZXIC_UINT32 smmu0_int32_mask14; + ZXIC_UINT32 smmu0_int32_mask13; + ZXIC_UINT32 smmu0_int32_mask12; + ZXIC_UINT32 smmu0_int32_mask11; + ZXIC_UINT32 smmu0_int32_mask10; + ZXIC_UINT32 smmu0_int32_mask9; + ZXIC_UINT32 smmu0_int32_mask8; + ZXIC_UINT32 smmu0_int32_mask7; + ZXIC_UINT32 smmu0_int32_mask6; + ZXIC_UINT32 smmu0_int32_mask5; + ZXIC_UINT32 smmu0_int32_mask4; + ZXIC_UINT32 smmu0_int32_mask3; + ZXIC_UINT32 smmu0_int32_mask2; + ZXIC_UINT32 smmu0_int32_mask1; + ZXIC_UINT32 smmu0_int32_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT32_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int32_status_t { + ZXIC_UINT32 smmu0_int32_status31; + ZXIC_UINT32 smmu0_int32_status30; + ZXIC_UINT32 smmu0_int32_status29; + ZXIC_UINT32 smmu0_int32_status28; + ZXIC_UINT32 smmu0_int32_status27; + ZXIC_UINT32 smmu0_int32_status26; + ZXIC_UINT32 smmu0_int32_status25; + ZXIC_UINT32 smmu0_int32_status24; + ZXIC_UINT32 smmu0_int32_status23; + ZXIC_UINT32 smmu0_int32_status22; + ZXIC_UINT32 smmu0_int32_status21; + ZXIC_UINT32 smmu0_int32_status20; + ZXIC_UINT32 smmu0_int32_status19; + ZXIC_UINT32 smmu0_int32_status18; + ZXIC_UINT32 smmu0_int32_status17; + ZXIC_UINT32 smmu0_int32_status16; + ZXIC_UINT32 smmu0_int32_status15; + ZXIC_UINT32 smmu0_int32_status14; + ZXIC_UINT32 smmu0_int32_status13; + ZXIC_UINT32 smmu0_int32_status12; + ZXIC_UINT32 smmu0_int32_status11; + ZXIC_UINT32 smmu0_int32_status10; + ZXIC_UINT32 smmu0_int32_status9; + ZXIC_UINT32 smmu0_int32_status8; + ZXIC_UINT32 smmu0_int32_status7; + ZXIC_UINT32 smmu0_int32_status6; + ZXIC_UINT32 smmu0_int32_status5; + ZXIC_UINT32 smmu0_int32_status4; + ZXIC_UINT32 smmu0_int32_status3; + ZXIC_UINT32 smmu0_int32_status2; + ZXIC_UINT32 smmu0_int32_status1; + ZXIC_UINT32 smmu0_int32_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT32_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int33_en_t { + ZXIC_UINT32 smmu0_int33_en31; + ZXIC_UINT32 smmu0_int33_en30; + ZXIC_UINT32 smmu0_int33_en29; + ZXIC_UINT32 smmu0_int33_en28; + ZXIC_UINT32 smmu0_int33_en27; + ZXIC_UINT32 smmu0_int33_en26; + ZXIC_UINT32 smmu0_int33_en25; + ZXIC_UINT32 smmu0_int33_en24; + ZXIC_UINT32 smmu0_int33_en23; + ZXIC_UINT32 smmu0_int33_en22; + ZXIC_UINT32 smmu0_int33_en21; + ZXIC_UINT32 smmu0_int33_en20; + ZXIC_UINT32 smmu0_int33_en19; + ZXIC_UINT32 smmu0_int33_en18; + ZXIC_UINT32 smmu0_int33_en17; + ZXIC_UINT32 smmu0_int33_en16; + ZXIC_UINT32 smmu0_int33_en15; + ZXIC_UINT32 smmu0_int33_en14; + ZXIC_UINT32 smmu0_int33_en13; + ZXIC_UINT32 smmu0_int33_en12; + ZXIC_UINT32 smmu0_int33_en11; + ZXIC_UINT32 smmu0_int33_en10; + ZXIC_UINT32 smmu0_int33_en9; + ZXIC_UINT32 smmu0_int33_en8; + ZXIC_UINT32 smmu0_int33_en7; + ZXIC_UINT32 smmu0_int33_en6; + ZXIC_UINT32 smmu0_int33_en5; + ZXIC_UINT32 smmu0_int33_en4; + ZXIC_UINT32 smmu0_int33_en3; + ZXIC_UINT32 smmu0_int33_en2; + ZXIC_UINT32 smmu0_int33_en1; + ZXIC_UINT32 smmu0_int33_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT33_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int33_mask_t { + ZXIC_UINT32 smmu0_int33_mask31; + ZXIC_UINT32 smmu0_int33_mask30; + ZXIC_UINT32 smmu0_int33_mask29; + ZXIC_UINT32 smmu0_int33_mask28; + ZXIC_UINT32 smmu0_int33_mask27; + ZXIC_UINT32 smmu0_int33_mask26; + ZXIC_UINT32 smmu0_int33_mask25; + ZXIC_UINT32 smmu0_int33_mask24; + ZXIC_UINT32 smmu0_int33_mask23; + ZXIC_UINT32 smmu0_int33_mask22; + ZXIC_UINT32 smmu0_int33_mask21; + ZXIC_UINT32 smmu0_int33_mask20; + ZXIC_UINT32 smmu0_int33_mask19; + ZXIC_UINT32 smmu0_int33_mask18; + ZXIC_UINT32 smmu0_int33_mask17; + ZXIC_UINT32 smmu0_int33_mask16; + ZXIC_UINT32 smmu0_int33_mask15; + ZXIC_UINT32 smmu0_int33_mask14; + ZXIC_UINT32 smmu0_int33_mask13; + ZXIC_UINT32 smmu0_int33_mask12; + ZXIC_UINT32 smmu0_int33_mask11; + ZXIC_UINT32 smmu0_int33_mask10; + ZXIC_UINT32 smmu0_int33_mask9; + ZXIC_UINT32 smmu0_int33_mask8; + ZXIC_UINT32 smmu0_int33_mask7; + ZXIC_UINT32 smmu0_int33_mask6; + ZXIC_UINT32 smmu0_int33_mask5; + ZXIC_UINT32 smmu0_int33_mask4; + ZXIC_UINT32 smmu0_int33_mask3; + ZXIC_UINT32 smmu0_int33_mask2; + ZXIC_UINT32 smmu0_int33_mask1; + ZXIC_UINT32 smmu0_int33_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT33_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int33_status_t { + ZXIC_UINT32 smmu0_int33_status31; + ZXIC_UINT32 smmu0_int33_status30; + ZXIC_UINT32 smmu0_int33_status29; + ZXIC_UINT32 smmu0_int33_status28; + ZXIC_UINT32 smmu0_int33_status27; + ZXIC_UINT32 smmu0_int33_status26; + ZXIC_UINT32 smmu0_int33_status25; + ZXIC_UINT32 smmu0_int33_status24; + ZXIC_UINT32 smmu0_int33_status23; + ZXIC_UINT32 smmu0_int33_status22; + ZXIC_UINT32 smmu0_int33_status21; + ZXIC_UINT32 smmu0_int33_status20; + ZXIC_UINT32 smmu0_int33_status19; + ZXIC_UINT32 smmu0_int33_status18; + ZXIC_UINT32 smmu0_int33_status17; + ZXIC_UINT32 smmu0_int33_status16; + ZXIC_UINT32 smmu0_int33_status15; + ZXIC_UINT32 smmu0_int33_status14; + ZXIC_UINT32 smmu0_int33_status13; + ZXIC_UINT32 smmu0_int33_status12; + ZXIC_UINT32 smmu0_int33_status11; + ZXIC_UINT32 smmu0_int33_status10; + ZXIC_UINT32 smmu0_int33_status9; + ZXIC_UINT32 smmu0_int33_status8; + ZXIC_UINT32 smmu0_int33_status7; + ZXIC_UINT32 smmu0_int33_status6; + ZXIC_UINT32 smmu0_int33_status5; + ZXIC_UINT32 smmu0_int33_status4; + ZXIC_UINT32 smmu0_int33_status3; + ZXIC_UINT32 smmu0_int33_status2; + ZXIC_UINT32 smmu0_int33_status1; + ZXIC_UINT32 smmu0_int33_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT33_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int34_en_t { + ZXIC_UINT32 smmu0_int34_en31; + ZXIC_UINT32 smmu0_int34_en30; + ZXIC_UINT32 smmu0_int34_en29; + ZXIC_UINT32 smmu0_int34_en28; + ZXIC_UINT32 smmu0_int34_en27; + ZXIC_UINT32 smmu0_int34_en26; + ZXIC_UINT32 smmu0_int34_en25; + ZXIC_UINT32 smmu0_int34_en24; + ZXIC_UINT32 smmu0_int34_en23; + ZXIC_UINT32 smmu0_int34_en22; + ZXIC_UINT32 smmu0_int34_en21; + ZXIC_UINT32 smmu0_int34_en20; + ZXIC_UINT32 smmu0_int34_en19; + ZXIC_UINT32 smmu0_int34_en18; + ZXIC_UINT32 smmu0_int34_en17; + ZXIC_UINT32 smmu0_int34_en16; + ZXIC_UINT32 smmu0_int34_en15; + ZXIC_UINT32 smmu0_int34_en14; + ZXIC_UINT32 smmu0_int34_en13; + ZXIC_UINT32 smmu0_int34_en12; + ZXIC_UINT32 smmu0_int34_en11; + ZXIC_UINT32 smmu0_int34_en10; + ZXIC_UINT32 smmu0_int34_en9; + ZXIC_UINT32 smmu0_int34_en8; + ZXIC_UINT32 smmu0_int34_en7; + ZXIC_UINT32 smmu0_int34_en6; + ZXIC_UINT32 smmu0_int34_en5; + ZXIC_UINT32 smmu0_int34_en4; + ZXIC_UINT32 smmu0_int34_en3; + ZXIC_UINT32 smmu0_int34_en2; + ZXIC_UINT32 smmu0_int34_en1; + ZXIC_UINT32 smmu0_int34_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT34_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int34_mask_t { + ZXIC_UINT32 smmu0_int34_mask31; + ZXIC_UINT32 smmu0_int34_mask30; + ZXIC_UINT32 smmu0_int34_mask29; + ZXIC_UINT32 smmu0_int34_mask28; + ZXIC_UINT32 smmu0_int34_mask27; + ZXIC_UINT32 smmu0_int34_mask26; + ZXIC_UINT32 smmu0_int34_mask25; + ZXIC_UINT32 smmu0_int34_mask24; + ZXIC_UINT32 smmu0_int34_mask23; + ZXIC_UINT32 smmu0_int34_mask22; + ZXIC_UINT32 smmu0_int34_mask21; + ZXIC_UINT32 smmu0_int34_mask20; + ZXIC_UINT32 smmu0_int34_mask19; + ZXIC_UINT32 smmu0_int34_mask18; + ZXIC_UINT32 smmu0_int34_mask17; + ZXIC_UINT32 smmu0_int34_mask16; + ZXIC_UINT32 smmu0_int34_mask15; + ZXIC_UINT32 smmu0_int34_mask14; + ZXIC_UINT32 smmu0_int34_mask13; + ZXIC_UINT32 smmu0_int34_mask12; + ZXIC_UINT32 smmu0_int34_mask11; + ZXIC_UINT32 smmu0_int34_mask10; + ZXIC_UINT32 smmu0_int34_mask9; + ZXIC_UINT32 smmu0_int34_mask8; + ZXIC_UINT32 smmu0_int34_mask7; + ZXIC_UINT32 smmu0_int34_mask6; + ZXIC_UINT32 smmu0_int34_mask5; + ZXIC_UINT32 smmu0_int34_mask4; + ZXIC_UINT32 smmu0_int34_mask3; + ZXIC_UINT32 smmu0_int34_mask2; + ZXIC_UINT32 smmu0_int34_mask1; + ZXIC_UINT32 smmu0_int34_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT34_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int34_status_t { + ZXIC_UINT32 smmu0_int34_status31; + ZXIC_UINT32 smmu0_int34_status30; + ZXIC_UINT32 smmu0_int34_status29; + ZXIC_UINT32 smmu0_int34_status28; + ZXIC_UINT32 smmu0_int34_status27; + ZXIC_UINT32 smmu0_int34_status26; + ZXIC_UINT32 smmu0_int34_status25; + ZXIC_UINT32 smmu0_int34_status24; + ZXIC_UINT32 smmu0_int34_status23; + ZXIC_UINT32 smmu0_int34_status22; + ZXIC_UINT32 smmu0_int34_status21; + ZXIC_UINT32 smmu0_int34_status20; + ZXIC_UINT32 smmu0_int34_status19; + ZXIC_UINT32 smmu0_int34_status18; + ZXIC_UINT32 smmu0_int34_status17; + ZXIC_UINT32 smmu0_int34_status16; + ZXIC_UINT32 smmu0_int34_status15; + ZXIC_UINT32 smmu0_int34_status14; + ZXIC_UINT32 smmu0_int34_status13; + ZXIC_UINT32 smmu0_int34_status12; + ZXIC_UINT32 smmu0_int34_status11; + ZXIC_UINT32 smmu0_int34_status10; + ZXIC_UINT32 smmu0_int34_status9; + ZXIC_UINT32 smmu0_int34_status8; + ZXIC_UINT32 smmu0_int34_status7; + ZXIC_UINT32 smmu0_int34_status6; + ZXIC_UINT32 smmu0_int34_status5; + ZXIC_UINT32 smmu0_int34_status4; + ZXIC_UINT32 smmu0_int34_status3; + ZXIC_UINT32 smmu0_int34_status2; + ZXIC_UINT32 smmu0_int34_status1; + ZXIC_UINT32 smmu0_int34_status0; +} DPP_SMMU0_SMMU0_SMMU0_INT34_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int35_en_t { + ZXIC_UINT32 smmu0_int35_en0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT35_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int35_mask_t { + ZXIC_UINT32 smmu0_int35_mask0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT35_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int35_status_t { + ZXIC_UINT32 smmu0_int35_status0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT35_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int36_en_t { + ZXIC_UINT32 smmu0_int36_en0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT36_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int36_mask_t { + ZXIC_UINT32 smmu0_int36_mask0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT36_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int36_status_t { + ZXIC_UINT32 smmu0_int36_status0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT36_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int37_en_t { + ZXIC_UINT32 smmu0_int37_en0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT37_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int37_mask_t { + ZXIC_UINT32 smmu0_int37_mask0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT37_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int37_status_t { + ZXIC_UINT32 smmu0_int37_status0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT37_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int38_en_t { + ZXIC_UINT32 smmu0_int38_en0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT38_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int38_mask_t { + ZXIC_UINT32 smmu0_int38_mask0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT38_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int38_status_t { + ZXIC_UINT32 smmu0_int38_status0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT38_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int39_en_t { + ZXIC_UINT32 smmu0_int39_en0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT39_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int39_mask_t { + ZXIC_UINT32 smmu0_int39_mask0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT39_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int39_status_t { + ZXIC_UINT32 smmu0_int39_status0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT39_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int40_en_t { + ZXIC_UINT32 smmu0_int40_en0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT40_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int40_mask_t { + ZXIC_UINT32 smmu0_int40_mask0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT40_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int40_status_t { + ZXIC_UINT32 smmu0_int40_status0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT40_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int41_en_t { + ZXIC_UINT32 smmu0_int41_en0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT41_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int41_mask_t { + ZXIC_UINT32 smmu0_int41_mask0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT41_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int41_status_t { + ZXIC_UINT32 smmu0_int41_status0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT41_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int42_en_t { + ZXIC_UINT32 smmu0_int42_en0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT42_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int42_mask_t { + ZXIC_UINT32 smmu0_int42_mask0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT42_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int42_status_t { + ZXIC_UINT32 smmu0_int42_status0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT42_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int43_en_t { + ZXIC_UINT32 smmu0_int43_en0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT43_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int43_mask_t { + ZXIC_UINT32 smmu0_int43_mask0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT43_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int43_status_t { + ZXIC_UINT32 smmu0_int43_status0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT43_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int44_en_t { + ZXIC_UINT32 smmu0_int44_en0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT44_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int44_mask_t { + ZXIC_UINT32 smmu0_int44_mask0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT44_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int44_status_t { + ZXIC_UINT32 smmu0_int44_status0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT44_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int45_en_t { + ZXIC_UINT32 smmu0_int45_en0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT45_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int45_mask_t { + ZXIC_UINT32 smmu0_int45_mask0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT45_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int45_status_t { + ZXIC_UINT32 smmu0_int45_status0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT45_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int46_en_t { + ZXIC_UINT32 smmu0_int46_en0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT46_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int46_mask_t { + ZXIC_UINT32 smmu0_int46_mask0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT46_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int46_status_t { + ZXIC_UINT32 smmu0_int46_status0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT46_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int47_en_t { + ZXIC_UINT32 smmu0_int47_en0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT47_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int47_mask_t { + ZXIC_UINT32 smmu0_int47_mask0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT47_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int47_status_t { + ZXIC_UINT32 smmu0_int47_status0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT47_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int48_en_t { + ZXIC_UINT32 smmu0_int48_en0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT48_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int48_mask_t { + ZXIC_UINT32 smmu0_int48_mask0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT48_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int48_status_t { + ZXIC_UINT32 smmu0_int48_status0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT48_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int49_en_t { + ZXIC_UINT32 smmu0_int49_en0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT49_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int49_mask_t { + ZXIC_UINT32 smmu0_int49_mask0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT49_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int49_status_t { + ZXIC_UINT32 smmu0_int49_status0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT49_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int50_en_t { + ZXIC_UINT32 smmu0_int50_en0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT50_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int50_mask_t { + ZXIC_UINT32 smmu0_int50_mask0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT50_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int50_status_t { + ZXIC_UINT32 smmu0_int50_status0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT50_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int51_en_t { + ZXIC_UINT32 smmu0_int51_en0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT51_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int51_mask_t { + ZXIC_UINT32 smmu0_int51_mask0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT51_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int51_status_t { + ZXIC_UINT32 smmu0_int51_status0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT51_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int52_en_t { + ZXIC_UINT32 smmu0_int52_en0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT52_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int52_mask_t { + ZXIC_UINT32 smmu0_int52_mask0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT52_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int52_status_t { + ZXIC_UINT32 smmu0_int52_status0_31; +} DPP_SMMU0_SMMU0_SMMU0_INT52_STATUS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int53_en_t { + ZXIC_UINT32 smmu0_int53_en3; + ZXIC_UINT32 smmu0_int53_en2; + ZXIC_UINT32 smmu0_int53_en1; + ZXIC_UINT32 smmu0_int53_en0; +} DPP_SMMU0_SMMU0_SMMU0_INT53_EN_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int53_mask_t { + ZXIC_UINT32 smmu0_int53_mask3; + ZXIC_UINT32 smmu0_int53_mask2; + ZXIC_UINT32 smmu0_int53_mask1; + ZXIC_UINT32 smmu0_int53_mask0; +} DPP_SMMU0_SMMU0_SMMU0_INT53_MASK_T; + +typedef struct dpp_smmu0_smmu0_smmu0_int53_status_t { + ZXIC_UINT32 smmu0_int53_status15; + ZXIC_UINT32 smmu0_int53_status14; + ZXIC_UINT32 smmu0_int53_status13; + ZXIC_UINT32 smmu0_int53_status12; +} DPP_SMMU0_SMMU0_SMMU0_INT53_STATUS_T; + +typedef struct dpp_smmu0_smmu0_ctrl0_arbiter_ecc_bypass_t { + ZXIC_UINT32 ctrl1_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl1_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl1_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl1_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl1_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl1_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl1_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl1_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl1_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl1_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl1_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl1_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl1_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl1_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl1_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl1_arbiter_ecc_bypass_0; + ZXIC_UINT32 ctrl0_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl0_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl0_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl0_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl0_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl0_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl0_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl0_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl0_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl0_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl0_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl0_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl0_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl0_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl0_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl0_arbiter_ecc_bypass_0; +} DPP_SMMU0_SMMU0_CTRL0_ARBITER_ECC_BYPASS_T; + +typedef struct dpp_smmu0_smmu0_ctrl2_arbiter_ecc_bypass_t { + ZXIC_UINT32 ctrl3_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl3_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl3_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl3_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl3_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl3_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl3_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl3_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl3_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl3_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl3_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl3_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl3_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl3_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl3_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl3_arbiter_ecc_bypass_0; + ZXIC_UINT32 ctrl2_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl2_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl2_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl2_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl2_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl2_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl2_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl2_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl2_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl2_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl2_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl2_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl2_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl2_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl2_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl2_arbiter_ecc_bypass_0; +} DPP_SMMU0_SMMU0_CTRL2_ARBITER_ECC_BYPASS_T; + +typedef struct dpp_smmu0_smmu0_ctrl4_arbiter_ecc_bypass_t { + ZXIC_UINT32 ctrl5_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl5_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl5_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl5_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl5_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl5_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl5_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl5_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl5_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl5_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl5_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl5_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl5_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl5_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl5_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl5_arbiter_ecc_bypass_0; + ZXIC_UINT32 ctrl4_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl4_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl4_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl4_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl4_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl4_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl4_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl4_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl4_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl4_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl4_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl4_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl4_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl4_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl4_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl4_arbiter_ecc_bypass_0; +} DPP_SMMU0_SMMU0_CTRL4_ARBITER_ECC_BYPASS_T; + +typedef struct dpp_smmu0_smmu0_ctrl6_arbiter_ecc_bypass_t { + ZXIC_UINT32 ctrl7_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl7_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl7_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl7_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl7_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl7_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl7_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl7_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl7_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl7_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl7_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl7_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl7_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl7_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl7_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl7_arbiter_ecc_bypass_0; + ZXIC_UINT32 ctrl6_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl6_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl6_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl6_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl6_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl6_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl6_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl6_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl6_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl6_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl6_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl6_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl6_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl6_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl6_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl6_arbiter_ecc_bypass_0; +} DPP_SMMU0_SMMU0_CTRL6_ARBITER_ECC_BYPASS_T; + +typedef struct dpp_smmu0_smmu0_ctrl8_arbiter_ecc_bypass_t { + ZXIC_UINT32 ctrl9_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl9_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl9_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl9_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl9_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl9_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl9_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl9_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl9_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl9_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl9_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl9_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl9_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl9_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl9_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl9_arbiter_ecc_bypass_0; + ZXIC_UINT32 ctrl8_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl8_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl8_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl8_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl8_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl8_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl8_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl8_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl8_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl8_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl8_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl8_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl8_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl8_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl8_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl8_arbiter_ecc_bypass_0; +} DPP_SMMU0_SMMU0_CTRL8_ARBITER_ECC_BYPASS_T; + +typedef struct dpp_smmu0_smmu0_ctrl10_arbiter_ecc_bypass_t { + ZXIC_UINT32 ctrl11_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl11_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl11_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl11_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl11_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl11_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl11_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl11_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl11_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl11_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl11_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl11_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl11_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl11_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl11_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl11_arbiter_ecc_bypass_0; + ZXIC_UINT32 ctrl10_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl10_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl10_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl10_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl10_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl10_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl10_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl10_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl10_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl10_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl10_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl10_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl10_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl10_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl10_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl10_arbiter_ecc_bypass_0; +} DPP_SMMU0_SMMU0_CTRL10_ARBITER_ECC_BYPASS_T; + +typedef struct dpp_smmu0_smmu0_ctrl12_arbiter_ecc_bypass_t { + ZXIC_UINT32 ctrl13_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl13_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl13_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl13_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl13_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl13_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl13_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl13_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl13_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl13_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl13_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl13_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl13_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl13_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl13_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl13_arbiter_ecc_bypass_0; + ZXIC_UINT32 ctrl12_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl12_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl12_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl12_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl12_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl12_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl12_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl12_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl12_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl12_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl12_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl12_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl12_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl12_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl12_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl12_arbiter_ecc_bypass_0; +} DPP_SMMU0_SMMU0_CTRL12_ARBITER_ECC_BYPASS_T; + +typedef struct dpp_smmu0_smmu0_ctrl14_arbiter_ecc_bypass_t { + ZXIC_UINT32 ctrl15_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl15_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl15_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl15_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl15_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl15_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl15_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl15_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl15_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl15_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl15_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl15_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl15_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl15_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl15_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl15_arbiter_ecc_bypass_0; + ZXIC_UINT32 ctrl14_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl14_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl14_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl14_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl14_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl14_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl14_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl14_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl14_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl14_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl14_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl14_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl14_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl14_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl14_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl14_arbiter_ecc_bypass_0; +} DPP_SMMU0_SMMU0_CTRL14_ARBITER_ECC_BYPASS_T; + +typedef struct dpp_smmu0_smmu0_ctrl16_arbiter_ecc_bypass_t { + ZXIC_UINT32 ctrl17_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl17_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl17_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl17_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl17_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl17_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl17_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl17_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl17_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl17_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl17_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl17_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl17_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl17_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl17_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl17_arbiter_ecc_bypass_0; + ZXIC_UINT32 ctrl16_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl16_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl16_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl16_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl16_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl16_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl16_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl16_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl16_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl16_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl16_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl16_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl16_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl16_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl16_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl16_arbiter_ecc_bypass_0; +} DPP_SMMU0_SMMU0_CTRL16_ARBITER_ECC_BYPASS_T; + +typedef struct dpp_smmu0_smmu0_ctrl18_arbiter_ecc_bypass_t { + ZXIC_UINT32 ctrl19_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl19_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl19_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl19_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl19_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl19_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl19_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl19_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl19_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl19_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl19_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl19_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl19_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl19_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl19_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl19_arbiter_ecc_bypass_0; + ZXIC_UINT32 ctrl18_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl18_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl18_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl18_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl18_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl18_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl18_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl18_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl18_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl18_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl18_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl18_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl18_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl18_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl18_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl18_arbiter_ecc_bypass_0; +} DPP_SMMU0_SMMU0_CTRL18_ARBITER_ECC_BYPASS_T; + +typedef struct dpp_smmu0_smmu0_ctrl20_arbiter_ecc_bypass_t { + ZXIC_UINT32 ctrl21_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl21_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl21_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl21_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl21_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl21_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl21_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl21_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl21_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl21_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl21_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl21_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl21_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl21_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl21_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl21_arbiter_ecc_bypass_0; + ZXIC_UINT32 ctrl20_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl20_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl20_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl20_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl20_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl20_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl20_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl20_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl20_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl20_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl20_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl20_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl20_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl20_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl20_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl20_arbiter_ecc_bypass_0; +} DPP_SMMU0_SMMU0_CTRL20_ARBITER_ECC_BYPASS_T; + +typedef struct dpp_smmu0_smmu0_ctrl22_arbiter_ecc_bypass_t { + ZXIC_UINT32 ctrl23_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl23_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl23_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl23_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl23_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl23_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl23_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl23_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl23_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl23_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl23_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl23_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl23_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl23_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl23_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl23_arbiter_ecc_bypass_0; + ZXIC_UINT32 ctrl22_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl22_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl22_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl22_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl22_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl22_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl22_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl22_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl22_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl22_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl22_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl22_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl22_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl22_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl22_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl22_arbiter_ecc_bypass_0; +} DPP_SMMU0_SMMU0_CTRL22_ARBITER_ECC_BYPASS_T; + +typedef struct dpp_smmu0_smmu0_ctrl24_arbiter_ecc_bypass_t { + ZXIC_UINT32 ctrl25_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl25_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl25_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl25_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl25_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl25_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl25_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl25_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl25_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl25_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl25_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl25_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl25_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl25_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl25_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl25_arbiter_ecc_bypass_0; + ZXIC_UINT32 ctrl24_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl24_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl24_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl24_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl24_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl24_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl24_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl24_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl24_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl24_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl24_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl24_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl24_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl24_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl24_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl24_arbiter_ecc_bypass_0; +} DPP_SMMU0_SMMU0_CTRL24_ARBITER_ECC_BYPASS_T; + +typedef struct dpp_smmu0_smmu0_ctrl26_arbiter_ecc_bypass_t { + ZXIC_UINT32 ctrl27_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl27_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl27_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl27_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl27_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl27_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl27_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl27_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl27_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl27_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl27_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl27_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl27_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl27_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl27_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl27_arbiter_ecc_bypass_0; + ZXIC_UINT32 ctrl26_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl26_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl26_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl26_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl26_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl26_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl26_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl26_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl26_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl26_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl26_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl26_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl26_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl26_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl26_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl26_arbiter_ecc_bypass_0; +} DPP_SMMU0_SMMU0_CTRL26_ARBITER_ECC_BYPASS_T; + +typedef struct dpp_smmu0_smmu0_ctrl28_arbiter_ecc_bypass_t { + ZXIC_UINT32 ctrl29_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl29_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl29_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl29_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl29_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl29_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl29_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl29_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl29_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl29_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl29_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl29_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl29_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl29_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl29_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl29_arbiter_ecc_bypass_0; + ZXIC_UINT32 ctrl28_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl28_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl28_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl28_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl28_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl28_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl28_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl28_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl28_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl28_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl28_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl28_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl28_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl28_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl28_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl28_arbiter_ecc_bypass_0; +} DPP_SMMU0_SMMU0_CTRL28_ARBITER_ECC_BYPASS_T; + +typedef struct dpp_smmu0_smmu0_ctrl30_arbiter_ecc_bypass_t { + ZXIC_UINT32 ctrl31_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl31_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl31_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl31_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl31_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl31_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl31_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl31_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl31_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl31_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl31_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl31_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl31_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl31_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl31_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl31_arbiter_ecc_bypass_0; + ZXIC_UINT32 ctrl30_arbiter_ecc_bypass_15; + ZXIC_UINT32 ctrl30_arbiter_ecc_bypass_14; + ZXIC_UINT32 ctrl30_arbiter_ecc_bypass_13; + ZXIC_UINT32 ctrl30_arbiter_ecc_bypass_12; + ZXIC_UINT32 ctrl30_arbiter_ecc_bypass_11; + ZXIC_UINT32 ctrl30_arbiter_ecc_bypass_10; + ZXIC_UINT32 ctrl30_arbiter_ecc_bypass_9; + ZXIC_UINT32 ctrl30_arbiter_ecc_bypass_8; + ZXIC_UINT32 ctrl30_arbiter_ecc_bypass_7; + ZXIC_UINT32 ctrl30_arbiter_ecc_bypass_6; + ZXIC_UINT32 ctrl30_arbiter_ecc_bypass_5; + ZXIC_UINT32 ctrl30_arbiter_ecc_bypass_4; + ZXIC_UINT32 ctrl30_arbiter_ecc_bypass_3; + ZXIC_UINT32 ctrl30_arbiter_ecc_bypass_2; + ZXIC_UINT32 ctrl30_arbiter_ecc_bypass_1; + ZXIC_UINT32 ctrl30_arbiter_ecc_bypass_0; +} DPP_SMMU0_SMMU0_CTRL30_ARBITER_ECC_BYPASS_T; + +typedef struct dpp_smmu0_smmu0_ctrl_req_ecc_bypass_t { + ZXIC_UINT32 ctrl_req_ecc_bypass_0_31; +} DPP_SMMU0_SMMU0_CTRL_REQ_ECC_BYPASS_T; + +typedef struct dpp_smmu0_smmu0_ctrl_info_ecc_bypass_t { + ZXIC_UINT32 ctrl_info_ecc_bypass_0_31; +} DPP_SMMU0_SMMU0_CTRL_INFO_ECC_BYPASS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_rschd_ecc_bypass_t { + ZXIC_UINT32 smmu0_rschd_ecc_bypass_0_31; +} DPP_SMMU0_SMMU0_SMMU0_RSCHD_ECC_BYPASS_T; + +typedef struct dpp_smmu0_smmu0_smmu0_wr_ecc_bypass_t { + ZXIC_UINT32 smmu0_wr_ecc_bypass1; + ZXIC_UINT32 smmu0_wr_ecc_bypass0; +} DPP_SMMU0_SMMU0_SMMU0_WR_ECC_BYPASS_T; + +typedef struct dpp_smmu0_smmu0_ctrl0_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl0_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl0_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL0_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl1_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl1_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl1_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL1_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl2_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl2_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl2_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL2_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl3_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl3_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl3_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL3_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl4_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl4_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl4_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL4_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl5_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl5_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl5_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL5_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl6_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl6_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl6_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL6_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl7_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl7_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl7_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL7_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl8_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl8_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl8_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL8_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl9_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl9_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl9_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL9_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl10_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl10_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl10_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL10_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl11_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl11_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl11_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL11_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl12_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl12_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl12_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL12_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl13_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl13_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl13_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL13_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl14_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl14_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl14_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL14_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl15_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl15_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl15_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL15_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl16_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl16_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl16_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL16_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl17_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl17_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl17_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL17_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl18_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl18_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl18_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL18_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl19_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl19_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl19_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL19_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl20_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl20_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl20_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL20_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl21_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl21_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl21_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL21_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl22_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl22_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl22_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL22_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl23_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl23_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl23_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL23_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl24_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl24_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl24_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL24_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl25_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl25_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl25_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL25_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl26_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl26_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl26_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL26_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl27_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl27_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl27_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL27_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl28_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl28_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl28_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL28_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl29_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl29_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl29_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL29_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl30_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl30_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl30_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL30_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl31_arbiter_ecc_err_t { + ZXIC_UINT32 ctrl31_arbiter_ecc_err_31; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_30; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_29; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_28; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_27; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_26; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_25; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_24; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_23; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_22; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_21; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_20; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_19; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_18; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_17; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_16; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_15; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_14; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_13; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_12; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_11; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_10; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_9; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_8; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_7; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_6; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_5; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_4; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_3; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_2; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_1; + ZXIC_UINT32 ctrl31_arbiter_ecc_err_0; +} DPP_SMMU0_SMMU0_CTRL31_ARBITER_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl_req_ecc_single_err_t { + ZXIC_UINT32 ctrl_req_ecc_single_err_0_31; +} DPP_SMMU0_SMMU0_CTRL_REQ_ECC_SINGLE_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl_req_ecc_double_err_t { + ZXIC_UINT32 ctrl_req_ecc_double_err_0_31; +} DPP_SMMU0_SMMU0_CTRL_REQ_ECC_DOUBLE_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl_info_ecc_single_err_t { + ZXIC_UINT32 ctrl_info_ecc_single_err_0_31; +} DPP_SMMU0_SMMU0_CTRL_INFO_ECC_SINGLE_ERR_T; + +typedef struct dpp_smmu0_smmu0_ctrl_info_ecc_double_err_t { + ZXIC_UINT32 ctrl_info_ecc_double_err_0_31; +} DPP_SMMU0_SMMU0_CTRL_INFO_ECC_DOUBLE_ERR_T; + +typedef struct dpp_smmu0_smmu0_smmu0_wr_ecc_err_t { + ZXIC_UINT32 smmu0_wr_ecc_err_3; + ZXIC_UINT32 smmu0_wr_ecc_err_2; + ZXIC_UINT32 smmu0_wr_ecc_err_1; + ZXIC_UINT32 smmu0_wr_ecc_err_0; +} DPP_SMMU0_SMMU0_SMMU0_WR_ECC_ERR_T; + +typedef struct dpp_smmu0_smmu0_smmu0_rschd_ecc_single_err_t { + ZXIC_UINT32 smmu0_rschd_ecc_single_err_0_31; +} DPP_SMMU0_SMMU0_SMMU0_RSCHD_ECC_SINGLE_ERR_T; + +typedef struct dpp_smmu0_smmu0_smmu0_rschd_ecc_double_err_t { + ZXIC_UINT32 smmu0_rschd_ecc_double_err_0_31; +} DPP_SMMU0_SMMU0_SMMU0_RSCHD_ECC_DOUBLE_ERR_T; + +typedef struct dpp_smmu0_smmu0_ord_fifo_empty_t { + ZXIC_UINT32 ord_fifo_empty; +} DPP_SMMU0_SMMU0_ORD_FIFO_EMPTY_T; + +typedef struct dpp_smmu0_smmu0_wr_arb_fifo_empty_t { + ZXIC_UINT32 wr_arb_fifo_empty; +} DPP_SMMU0_SMMU0_WR_ARB_FIFO_EMPTY_T; + +typedef struct dpp_smmu0_smmu0_ctrl_fifo_empty0_t { + ZXIC_UINT32 ctrl_fifo_empty0_5; + ZXIC_UINT32 ctrl_fifo_empty0_4; + ZXIC_UINT32 ctrl_fifo_empty0_3; + ZXIC_UINT32 ctrl_fifo_empty0_2; + ZXIC_UINT32 ctrl_fifo_empty0_1; + ZXIC_UINT32 ctrl_fifo_empty0_0; +} DPP_SMMU0_SMMU0_CTRL_FIFO_EMPTY0_T; + +typedef struct dpp_smmu0_smmu0_ctrl_fifo_empty1_t { + ZXIC_UINT32 ctrl_fifo_empty1_5; + ZXIC_UINT32 ctrl_fifo_empty1_4; + ZXIC_UINT32 ctrl_fifo_empty1_3; + ZXIC_UINT32 ctrl_fifo_empty1_2; + ZXIC_UINT32 ctrl_fifo_empty1_1; + ZXIC_UINT32 ctrl_fifo_empty1_0; +} DPP_SMMU0_SMMU0_CTRL_FIFO_EMPTY1_T; + +typedef struct dpp_smmu0_smmu0_ctrl_fifo_empty2_t { + ZXIC_UINT32 ctrl_fifo_empty2_5; + ZXIC_UINT32 ctrl_fifo_empty2_4; + ZXIC_UINT32 ctrl_fifo_empty2_3; + ZXIC_UINT32 ctrl_fifo_empty2_2; + ZXIC_UINT32 ctrl_fifo_empty2_1; + ZXIC_UINT32 ctrl_fifo_empty2_0; +} DPP_SMMU0_SMMU0_CTRL_FIFO_EMPTY2_T; + +typedef struct dpp_smmu0_smmu0_ctrl_fifo_empty3_t { + ZXIC_UINT32 ctrl_fifo_empty3_5; + ZXIC_UINT32 ctrl_fifo_empty3_4; + ZXIC_UINT32 ctrl_fifo_empty3_3; + ZXIC_UINT32 ctrl_fifo_empty3_2; + ZXIC_UINT32 ctrl_fifo_empty3_1; + ZXIC_UINT32 ctrl_fifo_empty3_0; +} DPP_SMMU0_SMMU0_CTRL_FIFO_EMPTY3_T; + +typedef struct dpp_smmu0_smmu0_ctrl_fifo_empty4_t { + ZXIC_UINT32 ctrl_fifo_empty4_5; + ZXIC_UINT32 ctrl_fifo_empty4_4; + ZXIC_UINT32 ctrl_fifo_empty4_3; + ZXIC_UINT32 ctrl_fifo_empty4_2; + ZXIC_UINT32 ctrl_fifo_empty4_1; + ZXIC_UINT32 ctrl_fifo_empty4_0; +} DPP_SMMU0_SMMU0_CTRL_FIFO_EMPTY4_T; + +typedef struct dpp_smmu0_smmu0_ctrl_fifo_empty5_t { + ZXIC_UINT32 ctrl_fifo_empty5_1; + ZXIC_UINT32 ctrl_fifo_empty5_0; +} DPP_SMMU0_SMMU0_CTRL_FIFO_EMPTY5_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty0_t { + ZXIC_UINT32 kschd_fifo_empty0; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY0_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty1_t { + ZXIC_UINT32 kschd_fifo_empty1; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY1_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty2_t { + ZXIC_UINT32 kschd_fifo_empty2; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY2_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty3_t { + ZXIC_UINT32 kschd_fifo_empty3; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY3_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty4_t { + ZXIC_UINT32 kschd_fifo_empty4; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY4_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty5_t { + ZXIC_UINT32 kschd_fifo_empty5; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY5_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty6_t { + ZXIC_UINT32 kschd_fifo_empty6; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY6_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty7_t { + ZXIC_UINT32 kschd_fifo_empty7; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY7_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty8_t { + ZXIC_UINT32 kschd_fifo_empty8; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY8_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty9_t { + ZXIC_UINT32 kschd_fifo_empty9; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY9_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty10_t { + ZXIC_UINT32 kschd_fifo_empty10; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY10_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty11_t { + ZXIC_UINT32 kschd_fifo_empty11; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY11_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty12_t { + ZXIC_UINT32 kschd_fifo_empty12; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY12_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty13_t { + ZXIC_UINT32 kschd_fifo_empty13; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY13_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty14_t { + ZXIC_UINT32 kschd_fifo_empty14; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY14_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty15_t { + ZXIC_UINT32 kschd_fifo_empty15; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY15_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty16_t { + ZXIC_UINT32 kschd_fifo_empty16; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY16_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty17_t { + ZXIC_UINT32 kschd_fifo_empty17; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY17_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty18_t { + ZXIC_UINT32 kschd_fifo_empty18; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY18_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty19_t { + ZXIC_UINT32 kschd_fifo_empty19; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY19_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty20_t { + ZXIC_UINT32 kschd_fifo_empty20; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY20_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty21_t { + ZXIC_UINT32 kschd_fifo_empty21; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY21_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty22_t { + ZXIC_UINT32 kschd_fifo_empty22; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY22_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty23_t { + ZXIC_UINT32 kschd_fifo_empty23; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY23_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty24_t { + ZXIC_UINT32 kschd_fifo_empty24; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY24_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty25_t { + ZXIC_UINT32 kschd_fifo_empty25; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY25_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty26_t { + ZXIC_UINT32 kschd_fifo_empty26; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY26_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty27_t { + ZXIC_UINT32 kschd_fifo_empty27; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY27_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty28_t { + ZXIC_UINT32 kschd_fifo_empty28; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY28_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty29_t { + ZXIC_UINT32 kschd_fifo_empty29; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY29_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty30_t { + ZXIC_UINT32 kschd_fifo_empty30; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY30_T; + +typedef struct dpp_smmu0_smmu0_kschd_fifo_empty31_t { + ZXIC_UINT32 kschd_fifo_empty31; +} DPP_SMMU0_SMMU0_KSCHD_FIFO_EMPTY31_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty0_t { + ZXIC_UINT32 rschd_fifo_empty0; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY0_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty1_t { + ZXIC_UINT32 rschd_fifo_empty1; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY1_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty2_t { + ZXIC_UINT32 rschd_fifo_empty2; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY2_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty3_t { + ZXIC_UINT32 rschd_fifo_empty3; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY3_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty4_t { + ZXIC_UINT32 rschd_fifo_empty4; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY4_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty5_t { + ZXIC_UINT32 rschd_fifo_empty5; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY5_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty6_t { + ZXIC_UINT32 rschd_fifo_empty6; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY6_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty7_t { + ZXIC_UINT32 rschd_fifo_empty7; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY7_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty8_t { + ZXIC_UINT32 rschd_fifo_empty8; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY8_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty9_t { + ZXIC_UINT32 rschd_fifo_empty9; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY9_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty10_t { + ZXIC_UINT32 rschd_fifo_empty10; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY10_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty11_t { + ZXIC_UINT32 rschd_fifo_empty11; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY11_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty12_t { + ZXIC_UINT32 rschd_fifo_empty12; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY12_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty13_t { + ZXIC_UINT32 rschd_fifo_empty13; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY13_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty14_t { + ZXIC_UINT32 rschd_fifo_empty14; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY14_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty15_t { + ZXIC_UINT32 rschd_fifo_empty15; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY15_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty16_t { + ZXIC_UINT32 rschd_fifo_empty16; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY16_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty17_t { + ZXIC_UINT32 rschd_fifo_empty17; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY17_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty18_t { + ZXIC_UINT32 rschd_fifo_empty18; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY18_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty19_t { + ZXIC_UINT32 rschd_fifo_empty19; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY19_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty20_t { + ZXIC_UINT32 rschd_fifo_empty20; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY20_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty21_t { + ZXIC_UINT32 rschd_fifo_empty21; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY21_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty22_t { + ZXIC_UINT32 rschd_fifo_empty22; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY22_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty23_t { + ZXIC_UINT32 rschd_fifo_empty23; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY23_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty24_t { + ZXIC_UINT32 rschd_fifo_empty24; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY24_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty25_t { + ZXIC_UINT32 rschd_fifo_empty25; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY25_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty26_t { + ZXIC_UINT32 rschd_fifo_empty26; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY26_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty27_t { + ZXIC_UINT32 rschd_fifo_empty27; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY27_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty28_t { + ZXIC_UINT32 rschd_fifo_empty28; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY28_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty29_t { + ZXIC_UINT32 rschd_fifo_empty29; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY29_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty30_t { + ZXIC_UINT32 rschd_fifo_empty30; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY30_T; + +typedef struct dpp_smmu0_smmu0_rschd_fifo_empty31_t { + ZXIC_UINT32 rschd_fifo_empty31; +} DPP_SMMU0_SMMU0_RSCHD_FIFO_EMPTY31_T; + +typedef struct dpp_smmu0_smmu0_ept_flag_t { + ZXIC_UINT32 ept_flag8; + ZXIC_UINT32 ept_flag7; + ZXIC_UINT32 ept_flag6; + ZXIC_UINT32 ept_flag5; + ZXIC_UINT32 ept_flag4; + ZXIC_UINT32 ept_flag3; + ZXIC_UINT32 ept_flag2; + ZXIC_UINT32 ept_flag1; + ZXIC_UINT32 ept_flag0; +} DPP_SMMU0_SMMU0_EPT_FLAG_T; + +typedef struct dpp_smmu0_smmu0_ppu_soft_rst_t { + ZXIC_UINT32 ppu_soft_rst; +} DPP_SMMU0_SMMU0_PPU_SOFT_RST_T; + +typedef struct dpp_smmu0_smmu0_smmu0_as_mac_age_fc_cnt_t { + ZXIC_UINT32 smmu0_as_mac_age_fc_cnt; +} DPP_SMMU0_SMMU0_SMMU0_AS_MAC_AGE_FC_CNT_T; + +typedef struct dpp_smmu0_smmu0_smmu0_marc_se_parser_fc_cnt_t { + ZXIC_UINT32 smmu0_marc_se_parser_fc_cnt; +} DPP_SMMU0_SMMU0_SMMU0_MARC_SE_PARSER_FC_CNT_T; + +typedef struct dpp_smmu0_smmu0_wr_arb_cpu_fc_cnt_t { + ZXIC_UINT32 wr_arb_cpu_fc_cnt; +} DPP_SMMU0_SMMU0_WR_ARB_CPU_FC_CNT_T; + +typedef struct dpp_smmu0_smmu0_smmu0_lpm_as_fc_cnt_t { + ZXIC_UINT32 smmu0_lpm_as_fc_cnt; +} DPP_SMMU0_SMMU0_SMMU0_LPM_AS_FC_CNT_T; + +typedef struct dpp_smmu0_smmu0_lpm_as_smmu0_fc_cnt_t { + ZXIC_UINT32 lpm_as_smmu0_fc_cnt; +} DPP_SMMU0_SMMU0_LPM_AS_SMMU0_FC_CNT_T; + +typedef struct dpp_smmu0_smmu0_smmu0_etcam1_0_as_fc_cnt_t { + ZXIC_UINT32 smmu0_etcam1_0_as_fc_cnt; +} DPP_SMMU0_SMMU0_SMMU0_ETCAM1_0_AS_FC_CNT_T; + +typedef struct dpp_smmu0_smmu0_as_etcam1_0_smmu0_fc_cnt_t { + ZXIC_UINT32 as_etcam1_0_smmu0_fc_cnt; +} DPP_SMMU0_SMMU0_AS_ETCAM1_0_SMMU0_FC_CNT_T; + +typedef struct dpp_smmu0_smmu0_smmu0_ppu_mcast_fc_cnt_t { + ZXIC_UINT32 smmu0_ppu_mcast_fc_cnt; +} DPP_SMMU0_SMMU0_SMMU0_PPU_MCAST_FC_CNT_T; + +typedef struct dpp_smmu0_smmu0_ppu_smmu0_mcast_fc_cnt_t { + ZXIC_UINT32 ppu_smmu0_mcast_fc_cnt; +} DPP_SMMU0_SMMU0_PPU_SMMU0_MCAST_FC_CNT_T; + +typedef struct dpp_smmu0_smmu0_odma_smmu0_tdm_fc_rsp_fc_cnt_t { + ZXIC_UINT32 odma_smmu0_tdm_fc_rsp_fc_cnt; +} DPP_SMMU0_SMMU0_ODMA_SMMU0_TDM_FC_RSP_FC_CNT_T; + +typedef struct dpp_smmu0_smmu0_smmu0_odma_tdm_fc_key_fc_cnt_t { + ZXIC_UINT32 smmu0_odma_tdm_fc_key_fc_cnt; +} DPP_SMMU0_SMMU0_SMMU0_ODMA_TDM_FC_KEY_FC_CNT_T; + +typedef struct dpp_smmu0_smmu0_smmu0_odma_fc_cnt_t { + ZXIC_UINT32 smmu0_odma_fc_cnt; +} DPP_SMMU0_SMMU0_SMMU0_ODMA_FC_CNT_T; + +typedef struct dpp_smmu0_smmu0_smmu0_cfg_tab_rd_fc_cnt_t { + ZXIC_UINT32 smmu0_cfg_tab_rd_fc_cnt; +} DPP_SMMU0_SMMU0_SMMU0_CFG_TAB_RD_FC_CNT_T; + +typedef struct dpp_smmu0_smmu0_smmu0_stat_fc15_0_cnt_t { + ZXIC_UINT32 smmu0_stat_fc15_0_cnt; +} DPP_SMMU0_SMMU0_SMMU0_STAT_FC15_0_CNT_T; + +typedef struct dpp_smmu0_smmu0_stat_smmu0_fc15_0_cnt_t { + ZXIC_UINT32 stat_smmu0_fc15_0_cnt; +} DPP_SMMU0_SMMU0_STAT_SMMU0_FC15_0_CNT_T; + +typedef struct dpp_smmu0_smmu0_smmu0_ppu_mex5_0_fc_cnt_t { + ZXIC_UINT32 smmu0_ppu_mex5_0_fc_cnt; +} DPP_SMMU0_SMMU0_SMMU0_PPU_MEX5_0_FC_CNT_T; + +typedef struct dpp_smmu0_smmu0_ppu_smmu0_mex5_0_fc_cnt_t { + ZXIC_UINT32 ppu_smmu0_mex5_0_fc_cnt; +} DPP_SMMU0_SMMU0_PPU_SMMU0_MEX5_0_FC_CNT_T; + +typedef struct dpp_smmu0_smmu0_as_smmu0_mac_age_req_cnt_t { + ZXIC_UINT32 as_smmu0_mac_age_req_cnt; +} DPP_SMMU0_SMMU0_AS_SMMU0_MAC_AGE_REQ_CNT_T; + +typedef struct dpp_smmu0_smmu0_se_parser_smmu0_marc_key_cnt_t { + ZXIC_UINT32 se_parser_smmu0_marc_key_cnt; +} DPP_SMMU0_SMMU0_SE_PARSER_SMMU0_MARC_KEY_CNT_T; + +typedef struct dpp_smmu0_smmu0_cpu_ind_rdat_cnt_t { + ZXIC_UINT32 cpu_ind_rdat_cnt; +} DPP_SMMU0_SMMU0_CPU_IND_RDAT_CNT_T; + +typedef struct dpp_smmu0_smmu0_cpu_ind_rd_req_cnt_t { + ZXIC_UINT32 cpu_ind_rd_req_cnt; +} DPP_SMMU0_SMMU0_CPU_IND_RD_REQ_CNT_T; + +typedef struct dpp_smmu0_smmu0_cpu_ind_wr_req_cnt_t { + ZXIC_UINT32 cpu_ind_wr_req_cnt; +} DPP_SMMU0_SMMU0_CPU_IND_WR_REQ_CNT_T; + +typedef struct dpp_smmu0_smmu0_smmu0_plcr_rsp0_cnt_t { + ZXIC_UINT32 smmu0_plcr_rsp0_cnt; +} DPP_SMMU0_SMMU0_SMMU0_PLCR_RSP0_CNT_T; + +typedef struct dpp_smmu0_smmu0_plcr_smmu0_req0_cnt_t { + ZXIC_UINT32 plcr_smmu0_req0_cnt; +} DPP_SMMU0_SMMU0_PLCR_SMMU0_REQ0_CNT_T; + +typedef struct dpp_smmu0_smmu0_smmu0_lpm_as_rsp_cnt_t { + ZXIC_UINT32 smmu0_lpm_as_rsp_cnt; +} DPP_SMMU0_SMMU0_SMMU0_LPM_AS_RSP_CNT_T; + +typedef struct dpp_smmu0_smmu0_lpm_as_smmu0_req_cnt_t { + ZXIC_UINT32 lpm_as_smmu0_req_cnt; +} DPP_SMMU0_SMMU0_LPM_AS_SMMU0_REQ_CNT_T; + +typedef struct dpp_smmu0_smmu0_smmu0_etcam1_0_as_rsp_cnt_t { + ZXIC_UINT32 smmu0_etcam1_0_as_rsp_cnt; +} DPP_SMMU0_SMMU0_SMMU0_ETCAM1_0_AS_RSP_CNT_T; + +typedef struct dpp_smmu0_smmu0_etcam1_0_as_smmu0_req_cnt_t { + ZXIC_UINT32 etcam1_0_as_smmu0_req_cnt; +} DPP_SMMU0_SMMU0_ETCAM1_0_AS_SMMU0_REQ_CNT_T; + +typedef struct dpp_smmu0_smmu0_smmu0_ppu_mcast_rsp_cnt_t { + ZXIC_UINT32 smmu0_ppu_mcast_rsp_cnt; +} DPP_SMMU0_SMMU0_SMMU0_PPU_MCAST_RSP_CNT_T; + +typedef struct dpp_smmu0_smmu0_ppu_smmu0_mcast_key_cnt_t { + ZXIC_UINT32 ppu_smmu0_mcast_key_cnt; +} DPP_SMMU0_SMMU0_PPU_SMMU0_MCAST_KEY_CNT_T; + +typedef struct dpp_smmu0_smmu0_smmu0_odma_tdm_mc_rsp_cnt_t { + ZXIC_UINT32 smmu0_odma_tdm_mc_rsp_cnt; +} DPP_SMMU0_SMMU0_SMMU0_ODMA_TDM_MC_RSP_CNT_T; + +typedef struct dpp_smmu0_smmu0_odma_smmu0_tdm_mc_key_cnt_t { + ZXIC_UINT32 odma_smmu0_tdm_mc_key_cnt; +} DPP_SMMU0_SMMU0_ODMA_SMMU0_TDM_MC_KEY_CNT_T; + +typedef struct dpp_smmu0_smmu0_smmu0_odma_rsp_cnt_t { + ZXIC_UINT32 smmu0_odma_rsp_cnt; +} DPP_SMMU0_SMMU0_SMMU0_ODMA_RSP_CNT_T; + +typedef struct dpp_smmu0_smmu0_odma_smmu0_cmd_cnt_t { + ZXIC_UINT32 odma_smmu0_cmd_cnt; +} DPP_SMMU0_SMMU0_ODMA_SMMU0_CMD_CNT_T; + +typedef struct dpp_smmu0_smmu0_smmu0_cfg_tab_rdat_cnt_t { + ZXIC_UINT32 smmu0_cfg_tab_rdat_cnt; +} DPP_SMMU0_SMMU0_SMMU0_CFG_TAB_RDAT_CNT_T; + +typedef struct dpp_smmu0_smmu0_cfg_smmu0_tab_rd_cnt_t { + ZXIC_UINT32 cfg_smmu0_tab_rd_cnt; +} DPP_SMMU0_SMMU0_CFG_SMMU0_TAB_RD_CNT_T; + +typedef struct dpp_smmu0_smmu0_smmu0_stat_rsp15_0_cnt_t { + ZXIC_UINT32 smmu0_stat_rsp15_0_cnt; +} DPP_SMMU0_SMMU0_SMMU0_STAT_RSP15_0_CNT_T; + +typedef struct dpp_smmu0_smmu0_stat_smmu0_req15_0_cnt_t { + ZXIC_UINT32 stat_smmu0_req15_0_cnt; +} DPP_SMMU0_SMMU0_STAT_SMMU0_REQ15_0_CNT_T; + +typedef struct dpp_smmu0_smmu0_smmu0_ppu_mex5_0_rsp_cnt_t { + ZXIC_UINT32 smmu0_ppu_mex5_0_rsp_cnt; +} DPP_SMMU0_SMMU0_SMMU0_PPU_MEX5_0_RSP_CNT_T; + +typedef struct dpp_smmu0_smmu0_ppu_smmu0_mex5_0_key_cnt_t { + ZXIC_UINT32 ppu_smmu0_mex5_0_key_cnt; +} DPP_SMMU0_SMMU0_PPU_SMMU0_MEX5_0_KEY_CNT_T; + +typedef struct dpp_smmu0_smmu0_ftm_stat_smmu0_req0_cnt_t { + ZXIC_UINT32 ftm_stat_smmu0_req0_cnt; +} DPP_SMMU0_SMMU0_FTM_STAT_SMMU0_REQ0_CNT_T; + +typedef struct dpp_smmu0_smmu0_ftm_stat_smmu0_req1_cnt_t { + ZXIC_UINT32 ftm_stat_smmu0_req1_cnt; +} DPP_SMMU0_SMMU0_FTM_STAT_SMMU0_REQ1_CNT_T; + +typedef struct dpp_smmu0_smmu0_etm_stat_smmu0_req0_cnt_t { + ZXIC_UINT32 etm_stat_smmu0_req0_cnt; +} DPP_SMMU0_SMMU0_ETM_STAT_SMMU0_REQ0_CNT_T; + +typedef struct dpp_smmu0_smmu0_etm_stat_smmu0_req1_cnt_t { + ZXIC_UINT32 etm_stat_smmu0_req1_cnt; +} DPP_SMMU0_SMMU0_ETM_STAT_SMMU0_REQ1_CNT_T; + +typedef struct dpp_smmu0_smmu0_req_eram0_31_rd_cnt_t { + ZXIC_UINT32 req_eram0_31_rd_cnt; +} DPP_SMMU0_SMMU0_REQ_ERAM0_31_RD_CNT_T; + +typedef struct dpp_smmu0_smmu0_req_eram0_31_wr_cnt_t { + ZXIC_UINT32 req_eram0_31_wr_cnt; +} DPP_SMMU0_SMMU0_REQ_ERAM0_31_WR_CNT_T; + +#ifdef __cplusplus +} +#endif +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_smmu14k_reg.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_smmu14k_reg.h new file mode 100644 index 000000000000..d4f524b3f1df --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_smmu14k_reg.h @@ -0,0 +1,204 @@ + +#ifndef _DPP_SMMU14K_REG_H_ +#define _DPP_SMMU14K_REG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct dpp_smmu14k_se_smmu1_hash0_tbl0_cfg_t { + ZXIC_UINT32 hash0_tbl0_len; + ZXIC_UINT32 hash0_tbl0_ecc_en; + ZXIC_UINT32 hash0_tbl0_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH0_TBL0_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash0_tbl1_cfg_t { + ZXIC_UINT32 hash0_tbl1_len; + ZXIC_UINT32 hash0_tbl1_ecc_en; + ZXIC_UINT32 hash0_tbl1_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH0_TBL1_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash0_tbl2_cfg_t { + ZXIC_UINT32 hash0_tbl2_len; + ZXIC_UINT32 hash0_tbl2_ecc_en; + ZXIC_UINT32 hash0_tbl2_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH0_TBL2_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash0_tbl3_cfg_t { + ZXIC_UINT32 hash0_tbl3_len; + ZXIC_UINT32 hash0_tbl3_ecc_en; + ZXIC_UINT32 hash0_tbl3_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH0_TBL3_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash0_tbl4_cfg_t { + ZXIC_UINT32 hash0_tbl4_len; + ZXIC_UINT32 hash0_tbl4_ecc_en; + ZXIC_UINT32 hash0_tbl4_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH0_TBL4_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash0_tbl5_cfg_t { + ZXIC_UINT32 hash0_tbl5_len; + ZXIC_UINT32 hash0_tbl5_ecc_en; + ZXIC_UINT32 hash0_tbl5_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH0_TBL5_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash0_tbl6_cfg_t { + ZXIC_UINT32 hash0_tbl6_len; + ZXIC_UINT32 hash0_tbl6_ecc_en; + ZXIC_UINT32 hash0_tbl6_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH0_TBL6_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash0_tbl7_cfg_t { + ZXIC_UINT32 hash0_tbl7_len; + ZXIC_UINT32 hash0_tbl7_ecc_en; + ZXIC_UINT32 hash0_tbl7_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH0_TBL7_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash1_tbl0_cfg_t { + ZXIC_UINT32 hash1_tbl0_len; + ZXIC_UINT32 hash1_tbl0_ecc_en; + ZXIC_UINT32 hash1_tbl0_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH1_TBL0_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash1_tbl1_cfg_t { + ZXIC_UINT32 hash1_tbl1_len; + ZXIC_UINT32 hash1_tbl1_ecc_en; + ZXIC_UINT32 hash1_tbl1_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH1_TBL1_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash1_tbl2_cfg_t { + ZXIC_UINT32 hash1_tbl2_len; + ZXIC_UINT32 hash1_tbl2_ecc_en; + ZXIC_UINT32 hash1_tbl2_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH1_TBL2_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash1_tbl3_cfg_t { + ZXIC_UINT32 hash1_tbl3_len; + ZXIC_UINT32 hash1_tbl3_ecc_en; + ZXIC_UINT32 hash1_tbl3_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH1_TBL3_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash1_tbl4_cfg_t { + ZXIC_UINT32 hash1_tbl4_len; + ZXIC_UINT32 hash1_tbl4_ecc_en; + ZXIC_UINT32 hash1_tbl4_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH1_TBL4_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash1_tbl5_cfg_t { + ZXIC_UINT32 hash1_tbl5_len; + ZXIC_UINT32 hash1_tbl5_ecc_en; + ZXIC_UINT32 hash1_tbl5_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH1_TBL5_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash1_tbl6_cfg_t { + ZXIC_UINT32 hash1_tbl6_len; + ZXIC_UINT32 hash1_tbl6_ecc_en; + ZXIC_UINT32 hash1_tbl6_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH1_TBL6_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash1_tbl7_cfg_t { + ZXIC_UINT32 hash1_tbl7_len; + ZXIC_UINT32 hash1_tbl7_ecc_en; + ZXIC_UINT32 hash1_tbl7_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH1_TBL7_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash2_tbl0_cfg_t { + ZXIC_UINT32 hash2_tbl0_len; + ZXIC_UINT32 hash2_tbl0_ecc_en; + ZXIC_UINT32 hash2_tbl0_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH2_TBL0_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash2_tbl1_cfg_t { + ZXIC_UINT32 hash2_tbl1_len; + ZXIC_UINT32 hash2_tbl1_ecc_en; + ZXIC_UINT32 hash2_tbl1_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH2_TBL1_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash2_tbl2_cfg_t { + ZXIC_UINT32 hash2_tbl2_len; + ZXIC_UINT32 hash2_tbl2_ecc_en; + ZXIC_UINT32 hash2_tbl2_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH2_TBL2_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash2_tbl3_cfg_t { + ZXIC_UINT32 hash2_tbl3_len; + ZXIC_UINT32 hash2_tbl3_ecc_en; + ZXIC_UINT32 hash2_tbl3_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH2_TBL3_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash2_tbl4_cfg_t { + ZXIC_UINT32 hash2_tbl4_len; + ZXIC_UINT32 hash2_tbl4_ecc_en; + ZXIC_UINT32 hash2_tbl4_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH2_TBL4_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash2_tbl5_cfg_t { + ZXIC_UINT32 hash2_tbl5_len; + ZXIC_UINT32 hash2_tbl5_ecc_en; + ZXIC_UINT32 hash2_tbl5_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH2_TBL5_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash2_tbl6_cfg_t { + ZXIC_UINT32 hash2_tbl6_len; + ZXIC_UINT32 hash2_tbl6_ecc_en; + ZXIC_UINT32 hash2_tbl6_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH2_TBL6_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash2_tbl7_cfg_t { + ZXIC_UINT32 hash2_tbl7_len; + ZXIC_UINT32 hash2_tbl7_ecc_en; + ZXIC_UINT32 hash2_tbl7_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH2_TBL7_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash3_tbl0_cfg_t { + ZXIC_UINT32 hash3_tbl0_len; + ZXIC_UINT32 hash3_tbl0_ecc_en; + ZXIC_UINT32 hash3_tbl0_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH3_TBL0_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash3_tbl1_cfg_t { + ZXIC_UINT32 hash3_tbl1_len; + ZXIC_UINT32 hash3_tbl1_ecc_en; + ZXIC_UINT32 hash3_tbl1_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH3_TBL1_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash3_tbl2_cfg_t { + ZXIC_UINT32 hash3_tbl2_len; + ZXIC_UINT32 hash3_tbl2_ecc_en; + ZXIC_UINT32 hash3_tbl2_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH3_TBL2_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash3_tbl3_cfg_t { + ZXIC_UINT32 hash3_tbl3_len; + ZXIC_UINT32 hash3_tbl3_ecc_en; + ZXIC_UINT32 hash3_tbl3_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH3_TBL3_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash3_tbl4_cfg_t { + ZXIC_UINT32 hash3_tbl4_len; + ZXIC_UINT32 hash3_tbl4_ecc_en; + ZXIC_UINT32 hash3_tbl4_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH3_TBL4_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash3_tbl5_cfg_t { + ZXIC_UINT32 hash3_tbl5_len; + ZXIC_UINT32 hash3_tbl5_ecc_en; + ZXIC_UINT32 hash3_tbl5_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH3_TBL5_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash3_tbl6_cfg_t { + ZXIC_UINT32 hash3_tbl6_len; + ZXIC_UINT32 hash3_tbl6_ecc_en; + ZXIC_UINT32 hash3_tbl6_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH3_TBL6_CFG_T; + +typedef struct dpp_smmu14k_se_smmu1_hash3_tbl7_cfg_t { + ZXIC_UINT32 hash3_tbl7_len; + ZXIC_UINT32 hash3_tbl7_ecc_en; + ZXIC_UINT32 hash3_tbl7_baddr; +} DPP_SMMU14K_SE_SMMU1_HASH3_TBL7_CFG_T; + +#ifdef __cplusplus +} +#endif +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_smmu1_reg.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_smmu1_reg.h new file mode 100644 index 000000000000..2bae73bf195e --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_smmu1_reg.h @@ -0,0 +1,1076 @@ + +#ifndef _DPP_SMMU1_REG_H_ +#define _DPP_SMMU1_REG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct dpp_se_smmu1_ddr_wdat0_t { + ZXIC_UINT32 ddr_wdat0; +} DPP_SE_SMMU1_DDR_WDAT0_T; + +typedef struct dpp_se_smmu1_dir_arbi_ser_rpful_t { + ZXIC_UINT32 dir_arbi_ser_rpful; +} DPP_SE_SMMU1_DIR_ARBI_SER_RPFUL_T; + +typedef struct dpp_se_smmu1_cfg_wr_arbi_pful2_t { + ZXIC_UINT32 hash_wr_pful; + ZXIC_UINT32 dir_wr_pful; +} DPP_SE_SMMU1_CFG_WR_ARBI_PFUL2_T; + +typedef struct dpp_se_smmu1_etm_tbl_cfg_t { + ZXIC_UINT32 etm_baddr; +} DPP_SE_SMMU1_ETM_TBL_CFG_T; + +typedef struct dpp_se_smmu1_cfg_cash_addr_pful_t { + ZXIC_UINT32 cfg_cash_addr_pful; +} DPP_SE_SMMU1_CFG_CASH_ADDR_PFUL_T; + +typedef struct dpp_se_smmu1_ctrl_rfifo_cfg_t { + ZXIC_UINT32 brst_fwft_fifo_prog_empty_assert; + ZXIC_UINT32 brst_fwft_fifo_prog_empty_negate; + ZXIC_UINT32 brst_fwft_fifo_prog_full_assert; + ZXIC_UINT32 brst_fwft_fifo_prog_full_negate; +} DPP_SE_SMMU1_CTRL_RFIFO_CFG_T; + +typedef struct dpp_se_smmu1_cache_req_fifo_cfg_t { + ZXIC_UINT32 srch_fifo_pfull_assert; + ZXIC_UINT32 srch_fifo_pfull_negate; +} DPP_SE_SMMU1_CACHE_REQ_FIFO_CFG_T; + +typedef struct dpp_se_smmu1_ddr_wdat1_t { + ZXIC_UINT32 ddr_wdat1; +} DPP_SE_SMMU1_DDR_WDAT1_T; + +typedef struct dpp_se_smmu1_ddr_wdat2_t { + ZXIC_UINT32 ddr_wdat2; +} DPP_SE_SMMU1_DDR_WDAT2_T; + +typedef struct dpp_se_smmu1_ddr_wdat3_t { + ZXIC_UINT32 ddr_wdat3; +} DPP_SE_SMMU1_DDR_WDAT3_T; + +typedef struct dpp_se_smmu1_ddr_wdat4_t { + ZXIC_UINT32 ddr_wdat4; +} DPP_SE_SMMU1_DDR_WDAT4_T; + +typedef struct dpp_se_smmu1_ddr_wdat5_t { + ZXIC_UINT32 ddr_wdat5; +} DPP_SE_SMMU1_DDR_WDAT5_T; + +typedef struct dpp_se_smmu1_ddr_wdat6_t { + ZXIC_UINT32 ddr_wdat6; +} DPP_SE_SMMU1_DDR_WDAT6_T; + +typedef struct dpp_se_smmu1_ddr_wdat7_t { + ZXIC_UINT32 ddr_wdat7; +} DPP_SE_SMMU1_DDR_WDAT7_T; + +typedef struct dpp_se_smmu1_ddr_wdat8_t { + ZXIC_UINT32 ddr_wdat8; +} DPP_SE_SMMU1_DDR_WDAT8_T; + +typedef struct dpp_se_smmu1_ddr_wdat9_t { + ZXIC_UINT32 ddr_wdat9; +} DPP_SE_SMMU1_DDR_WDAT9_T; + +typedef struct dpp_se_smmu1_ddr_wdat10_t { + ZXIC_UINT32 ddr_wdat10; +} DPP_SE_SMMU1_DDR_WDAT10_T; + +typedef struct dpp_se_smmu1_ddr_wdat11_t { + ZXIC_UINT32 ddr_wdat11; +} DPP_SE_SMMU1_DDR_WDAT11_T; + +typedef struct dpp_se_smmu1_ddr_wdat12_t { + ZXIC_UINT32 ddr_wdat12; +} DPP_SE_SMMU1_DDR_WDAT12_T; + +typedef struct dpp_se_smmu1_ddr_wdat13_t { + ZXIC_UINT32 ddr_wdat13; +} DPP_SE_SMMU1_DDR_WDAT13_T; + +typedef struct dpp_se_smmu1_ddr_wdat14_t { + ZXIC_UINT32 ddr_wdat14; +} DPP_SE_SMMU1_DDR_WDAT14_T; + +typedef struct dpp_se_smmu1_ddr_wdat15_t { + ZXIC_UINT32 ddr_wdat15; +} DPP_SE_SMMU1_DDR_WDAT15_T; + +typedef struct dpp_se_smmu1_cnt_stat_cache_en_t { + ZXIC_UINT32 cnt_stat_cache_en; +} DPP_SE_SMMU1_CNT_STAT_CACHE_EN_T; + +typedef struct dpp_se_smmu1_cnt_stat_cache_clr_t { + ZXIC_UINT32 cnt_stat_cache_clr; +} DPP_SE_SMMU1_CNT_STAT_CACHE_CLR_T; + +typedef struct dpp_se_smmu1_cnt_stat_cache_req_63_32_t { + ZXIC_UINT32 cnt_stat_cache_req_63_32; +} DPP_SE_SMMU1_CNT_STAT_CACHE_REQ_63_32_T; + +typedef struct dpp_se_smmu1_cnt_stat_cache_req_31_0_t { + ZXIC_UINT32 cnt_stat_cache_req_31_0; +} DPP_SE_SMMU1_CNT_STAT_CACHE_REQ_31_0_T; + +typedef struct dpp_se_smmu1_cnt_stat_cache_hit_63_32_t { + ZXIC_UINT32 cnt_stat_cache_hit_63_32; +} DPP_SE_SMMU1_CNT_STAT_CACHE_HIT_63_32_T; + +typedef struct dpp_se_smmu1_cnt_stat_cache_hit_31_0_t { + ZXIC_UINT32 cnt_stat_cache_hit_31_0; +} DPP_SE_SMMU1_CNT_STAT_CACHE_HIT_31_0_T; + +typedef struct dpp_se_smmu1_ddr_cmd0_t { + ZXIC_UINT32 ecc_en; + ZXIC_UINT32 rw_len; + ZXIC_UINT32 baddr; +} DPP_SE_SMMU1_DDR_CMD0_T; + +typedef struct dpp_se_smmu1_info_addr_t { + ZXIC_UINT32 info_addr; +} DPP_SE_SMMU1_INFO_ADDR_T; + +typedef struct dpp_se_smmu1_ddr_cmd1_t { + ZXIC_UINT32 rw_flag; + ZXIC_UINT32 rw_addr; +} DPP_SE_SMMU1_DDR_CMD1_T; + +typedef struct dpp_se_smmu1_clr_start_addr_t { + ZXIC_UINT32 clr_start_addr; +} DPP_SE_SMMU1_CLR_START_ADDR_T; + +typedef struct dpp_se_smmu1_clr_end_addr_t { + ZXIC_UINT32 clr_end_addr; +} DPP_SE_SMMU1_CLR_END_ADDR_T; + +typedef struct dpp_se_smmu1_clr_tbl_en_t { + ZXIC_UINT32 cfg_init_en; + ZXIC_UINT32 clr_tbl_en; +} DPP_SE_SMMU1_CLR_TBL_EN_T; + +typedef struct dpp_se_smmu1_debug_cnt_mode_t { + ZXIC_UINT32 cnt_rd_mode; + ZXIC_UINT32 cnt_overflow_mode; +} DPP_SE_SMMU1_DEBUG_CNT_MODE_T; + +typedef struct dpp_se_smmu1_init_done_t { + ZXIC_UINT32 cache_init_done; + ZXIC_UINT32 clr_done; + ZXIC_UINT32 init_ok; +} DPP_SE_SMMU1_INIT_DONE_T; + +typedef struct dpp_se_smmu1_cpu_rsp_rd_done_t { + ZXIC_UINT32 cpu_rsp_rd_done; +} DPP_SE_SMMU1_CPU_RSP_RD_DONE_T; + +typedef struct dpp_se_smmu1_ksch_oam_sp_en_t { + ZXIC_UINT32 ksch_oam_sp_en; +} DPP_SE_SMMU1_KSCH_OAM_SP_EN_T; + +typedef struct dpp_se_smmu1_cfg_cache_en_t { + ZXIC_UINT32 cfg_cache_en; +} DPP_SE_SMMU1_CFG_CACHE_EN_T; + +typedef struct dpp_se_smmu1_cache_age_en_t { + ZXIC_UINT32 cache_age_en; +} DPP_SE_SMMU1_CACHE_AGE_EN_T; + +typedef struct dpp_se_smmu1_cpu_rdat0_t { + ZXIC_UINT32 cpu_rdat0; +} DPP_SE_SMMU1_CPU_RDAT0_T; + +typedef struct dpp_se_smmu1_cpu_rdat1_t { + ZXIC_UINT32 cpu_rdat1; +} DPP_SE_SMMU1_CPU_RDAT1_T; + +typedef struct dpp_se_smmu1_cpu_rdat2_t { + ZXIC_UINT32 cpu_rdat2; +} DPP_SE_SMMU1_CPU_RDAT2_T; + +typedef struct dpp_se_smmu1_cpu_rdat3_t { + ZXIC_UINT32 cpu_rdat3; +} DPP_SE_SMMU1_CPU_RDAT3_T; + +typedef struct dpp_se_smmu1_cpu_rdat4_t { + ZXIC_UINT32 cpu_rdat4; +} DPP_SE_SMMU1_CPU_RDAT4_T; + +typedef struct dpp_se_smmu1_cpu_rdat5_t { + ZXIC_UINT32 cpu_rdat5; +} DPP_SE_SMMU1_CPU_RDAT5_T; + +typedef struct dpp_se_smmu1_cpu_rdat6_t { + ZXIC_UINT32 cpu_rdat6; +} DPP_SE_SMMU1_CPU_RDAT6_T; + +typedef struct dpp_se_smmu1_cpu_rdat7_t { + ZXIC_UINT32 cpu_rdat7; +} DPP_SE_SMMU1_CPU_RDAT7_T; + +typedef struct dpp_se_smmu1_cpu_rdat8_t { + ZXIC_UINT32 cpu_rdat8; +} DPP_SE_SMMU1_CPU_RDAT8_T; + +typedef struct dpp_se_smmu1_cpu_rdat9_t { + ZXIC_UINT32 cpu_rdat9; +} DPP_SE_SMMU1_CPU_RDAT9_T; + +typedef struct dpp_se_smmu1_cpu_rdat10_t { + ZXIC_UINT32 cpu_rdat10; +} DPP_SE_SMMU1_CPU_RDAT10_T; + +typedef struct dpp_se_smmu1_cpu_rdat11_t { + ZXIC_UINT32 cpu_rdat11; +} DPP_SE_SMMU1_CPU_RDAT11_T; + +typedef struct dpp_se_smmu1_cpu_rdat12_t { + ZXIC_UINT32 cpu_rdat12; +} DPP_SE_SMMU1_CPU_RDAT12_T; + +typedef struct dpp_se_smmu1_cpu_rdat13_t { + ZXIC_UINT32 cpu_rdat13; +} DPP_SE_SMMU1_CPU_RDAT13_T; + +typedef struct dpp_se_smmu1_cpu_rdat14_t { + ZXIC_UINT32 cpu_rdat14; +} DPP_SE_SMMU1_CPU_RDAT14_T; + +typedef struct dpp_se_smmu1_cpu_rdat15_t { + ZXIC_UINT32 cpu_rdat15; +} DPP_SE_SMMU1_CPU_RDAT15_T; + +typedef struct dpp_se_smmu1_ctrl_cpu_rd_rdy_t { + ZXIC_UINT32 ctrl_cpu_rd_rdy; +} DPP_SE_SMMU1_CTRL_CPU_RD_RDY_T; + +typedef struct dpp_se_smmu1_cpu_warbi_rdy_cfg_t { + ZXIC_UINT32 cpu_warbi_rdy_cfg; +} DPP_SE_SMMU1_CPU_WARBI_RDY_CFG_T; + +typedef struct dpp_se_smmu1_dir_arbi_cpu_rpful_t { + ZXIC_UINT32 smmu1_cfg_rpful; + ZXIC_UINT32 smmu1_cfg_wpful; +} DPP_SE_SMMU1_DIR_ARBI_CPU_RPFUL_T; + +typedef struct dpp_se_smmu1_dir_arbi_wpful_t { + ZXIC_UINT32 smmu1_ser_wdir_pful; + ZXIC_UINT32 smmu1_cfg_wdir_pful; +} DPP_SE_SMMU1_DIR_ARBI_WPFUL_T; + +typedef struct dpp_se_smmu1_cfg_wr_arbi_pful0_t { + ZXIC_UINT32 arbi_out_pful; + ZXIC_UINT32 cpu_wr_pful; +} DPP_SE_SMMU1_CFG_WR_ARBI_PFUL0_T; + +typedef struct dpp_se_smmu1_cfg_wr_arbi_pful1_t { + ZXIC_UINT32 tm_wr_pful; + ZXIC_UINT32 stat_wr_pful; +} DPP_SE_SMMU1_CFG_WR_ARBI_PFUL1_T; + +typedef struct dpp_se_smmu1_smmu1_wdone_pful_cfg_t { + ZXIC_UINT32 smmu1_wdone_pful_cfg; +} DPP_SE_SMMU1_SMMU1_WDONE_PFUL_CFG_T; + +typedef struct dpp_se_smmu1_stat_rate_cfg_cnt_t { + ZXIC_UINT32 stat_rate_cfg_cnt; +} DPP_SE_SMMU1_STAT_RATE_CFG_CNT_T; + +typedef struct dpp_se_smmu1_ftm_rate_cfg_cnt_t { + ZXIC_UINT32 ftm_rate_cfg_cnt; +} DPP_SE_SMMU1_FTM_RATE_CFG_CNT_T; + +typedef struct dpp_se_smmu1_etm_rate_cfg_cnt_t { + ZXIC_UINT32 etm_rate_cfg_cnt; +} DPP_SE_SMMU1_ETM_RATE_CFG_CNT_T; + +typedef struct dpp_se_smmu1_dir_rate_cfg_cnt_t { + ZXIC_UINT32 dir_rate_cfg_cnt; +} DPP_SE_SMMU1_DIR_RATE_CFG_CNT_T; + +typedef struct dpp_se_smmu1_hash_rate_cfg_cnt_t { + ZXIC_UINT32 hash_rate_cfg_cnt; +} DPP_SE_SMMU1_HASH_RATE_CFG_CNT_T; + +typedef struct dpp_se_smmu1_ftm_tbl_cfg_t { + ZXIC_UINT32 ftm_baddr; +} DPP_SE_SMMU1_FTM_TBL_CFG_T; + +typedef struct dpp_se_smmu1_lpm_v4_as_tbl_cfg_t { + ZXIC_UINT32 lpm_v4_as_rsp_len; + ZXIC_UINT32 lpm_v4_as_ecc_en; + ZXIC_UINT32 lpm_v4_as_baddr; +} DPP_SE_SMMU1_LPM_V4_AS_TBL_CFG_T; + +typedef struct dpp_se_smmu1_lpm_v4_tbl_cfg_t { + ZXIC_UINT32 lpm_v4_len; + ZXIC_UINT32 lpm_v4_ecc_en; + ZXIC_UINT32 lpm_v4_baddr; +} DPP_SE_SMMU1_LPM_V4_TBL_CFG_T; + +typedef struct dpp_se_smmu1_lpm_v6_tbl_cfg_t { + ZXIC_UINT32 lpm_v6_len; + ZXIC_UINT32 lpm_v6_ecc_en; + ZXIC_UINT32 lpm_v6_baddr; +} DPP_SE_SMMU1_LPM_V6_TBL_CFG_T; + +typedef struct dpp_se_smmu1_lpm_v6_as_tbl_cfg_t { + ZXIC_UINT32 lpm_v6_as_rsp_len; + ZXIC_UINT32 lpm_v6_as_ecc_en; + ZXIC_UINT32 lpm_v6_as_baddr; +} DPP_SE_SMMU1_LPM_V6_AS_TBL_CFG_T; + +typedef struct dpp_se_smmu1_dma_tbl_cfg_t { + ZXIC_UINT32 dma_baddr; +} DPP_SE_SMMU1_DMA_TBL_CFG_T; + +typedef struct dpp_se_smmu1_stat_mode_cfg_t { + ZXIC_UINT32 stat_mode; +} DPP_SE_SMMU1_STAT_MODE_CFG_T; + +typedef struct dpp_se_smmu1_ctrl_rpar_cpu_pful_t { + ZXIC_UINT32 ctrl_rpar_cpu_pful; +} DPP_SE_SMMU1_CTRL_RPAR_CPU_PFUL_T; + +typedef struct dpp_se_smmu1_cfg_ksch_dir_pful_t { + ZXIC_UINT32 cfg_ksch_dir_pful; +} DPP_SE_SMMU1_CFG_KSCH_DIR_PFUL_T; + +typedef struct dpp_se_smmu1_cfg_ksch_hash_pful_t { + ZXIC_UINT32 cfg_ksch_hash_pful; +} DPP_SE_SMMU1_CFG_KSCH_HASH_PFUL_T; + +typedef struct dpp_se_smmu1_cfg_ksch_lpm_pful_t { + ZXIC_UINT32 cfg_ksch_lpm_pful; +} DPP_SE_SMMU1_CFG_KSCH_LPM_PFUL_T; + +typedef struct dpp_se_smmu1_cfg_ksch_lpm_as_pful_t { + ZXIC_UINT32 cfg_ksch_lpm_as_pful; +} DPP_SE_SMMU1_CFG_KSCH_LPM_AS_PFUL_T; + +typedef struct dpp_se_smmu1_cfg_ksch_stat_pful_t { + ZXIC_UINT32 cfg_ksch_stat_pful; +} DPP_SE_SMMU1_CFG_KSCH_STAT_PFUL_T; + +typedef struct dpp_se_smmu1_cfg_ksch_tm_pful_t { + ZXIC_UINT32 cfg_ksch_tm_pful; +} DPP_SE_SMMU1_CFG_KSCH_TM_PFUL_T; + +typedef struct dpp_se_smmu1_cfg_ksch_oam_pful_t { + ZXIC_UINT32 cfg_ksch_oam_pful; +} DPP_SE_SMMU1_CFG_KSCH_OAM_PFUL_T; + +typedef struct dpp_se_smmu1_cfg_ksch_dma_pful_t { + ZXIC_UINT32 cfg_ksch_dma_pful; +} DPP_SE_SMMU1_CFG_KSCH_DMA_PFUL_T; + +typedef struct dpp_se_smmu1_ctrl_wfifo_cfg_t { + ZXIC_UINT32 ctrl_wfifo_cfg; +} DPP_SE_SMMU1_CTRL_WFIFO_CFG_T; + +typedef struct dpp_se_smmu1_rsch_hash_ptr_cfg_t { + ZXIC_UINT32 rsch_hash_ptr_cfg; +} DPP_SE_SMMU1_RSCH_HASH_PTR_CFG_T; + +typedef struct dpp_se_smmu1_rsch_lpm_ptr_cfg_t { + ZXIC_UINT32 rsch_lpm_ptr_cfg; +} DPP_SE_SMMU1_RSCH_LPM_PTR_CFG_T; + +typedef struct dpp_se_smmu1_rsch_lpm_as_ptr_cfg_t { + ZXIC_UINT32 rsch_lpm_as_ptr_cfg; +} DPP_SE_SMMU1_RSCH_LPM_AS_PTR_CFG_T; + +typedef struct dpp_se_smmu1_rsch_stat_ptr_cfg_t { + ZXIC_UINT32 rsch_stat_ptr_cfg; +} DPP_SE_SMMU1_RSCH_STAT_PTR_CFG_T; + +typedef struct dpp_se_smmu1_rsch_oam_ptr_cfg_t { + ZXIC_UINT32 rsch_oam_ptr_cfg; +} DPP_SE_SMMU1_RSCH_OAM_PTR_CFG_T; + +typedef struct dpp_se_smmu1_rschd_fifo_pept_cfg_t { + ZXIC_UINT32 rschd_fifo_pept_cfg; +} DPP_SE_SMMU1_RSCHD_FIFO_PEPT_CFG_T; + +typedef struct dpp_se_smmu1_dir_fifo_pful_cfg_t { + ZXIC_UINT32 dir_fifo_pful_cfg; +} DPP_SE_SMMU1_DIR_FIFO_PFUL_CFG_T; + +typedef struct dpp_se_smmu1_hash_fifo_pful_cfg_t { + ZXIC_UINT32 hash_fifo_pful_cfg; +} DPP_SE_SMMU1_HASH_FIFO_PFUL_CFG_T; + +typedef struct dpp_se_smmu1_lpm_fifo_pful_cfg_t { + ZXIC_UINT32 lpm_fifo_pful_cfg; +} DPP_SE_SMMU1_LPM_FIFO_PFUL_CFG_T; + +typedef struct dpp_se_smmu1_lpm_as_fifo_pful_cfg_t { + ZXIC_UINT32 lpm_as_fifo_pful_cfg; +} DPP_SE_SMMU1_LPM_AS_FIFO_PFUL_CFG_T; + +typedef struct dpp_se_smmu1_stat_fifo_pful_cfg_t { + ZXIC_UINT32 stat_fifo_pful_cfg; +} DPP_SE_SMMU1_STAT_FIFO_PFUL_CFG_T; + +typedef struct dpp_se_smmu1_ftm_fifo_pful_cfg_t { + ZXIC_UINT32 ftm_fifo_pful_cfg; +} DPP_SE_SMMU1_FTM_FIFO_PFUL_CFG_T; + +typedef struct dpp_se_smmu1_etm_fifo_pful_cfg_t { + ZXIC_UINT32 etm_fifo_pful_cfg; +} DPP_SE_SMMU1_ETM_FIFO_PFUL_CFG_T; + +typedef struct dpp_se_smmu1_oam_fifo_pful_cfg_t { + ZXIC_UINT32 oam_fifo_pful_cfg; +} DPP_SE_SMMU1_OAM_FIFO_PFUL_CFG_T; + +typedef struct dpp_se_smmu1_dma_fifo_pful_cfg_t { + ZXIC_UINT32 dma_fifo_pful_cfg; +} DPP_SE_SMMU1_DMA_FIFO_PFUL_CFG_T; + +typedef struct dpp_se_smmu1_cache_rsp_rr_fifo_cfg_t { + ZXIC_UINT32 rr_pfull_assert0; + ZXIC_UINT32 rr_pfull_negate0; +} DPP_SE_SMMU1_CACHE_RSP_RR_FIFO_CFG_T; + +typedef struct dpp_se_smmu1_ddr_rsp_rr_fifo_cfg_t { + ZXIC_UINT32 rr_pfull_assert1; + ZXIC_UINT32 rr_pfull_negate1; +} DPP_SE_SMMU1_DDR_RSP_RR_FIFO_CFG_T; + +typedef struct dpp_se_smmu1_cpu_cahce_fifo_cfg_t { + ZXIC_UINT32 smmu1_cahce_fwft_fifo_pfull_assert; + ZXIC_UINT32 smmu1_cahce_fwft_fifo_pfull_negate; +} DPP_SE_SMMU1_CPU_CAHCE_FIFO_CFG_T; + +typedef struct dpp_se_smmu1_cache_rsp_fifo_cfg_t { + ZXIC_UINT32 rschd_fifo_pfull_assert; + ZXIC_UINT32 rschd_fifo_pfull_negate; +} DPP_SE_SMMU1_CACHE_RSP_FIFO_CFG_T; + +typedef struct dpp_se_smmu1_test_state_t { + ZXIC_UINT32 test_state; +} DPP_SE_SMMU1_TEST_STATE_T; + +typedef struct dpp_se_smmu1_cache_fifo_ept_t { + ZXIC_UINT32 cache_fifo_ept; +} DPP_SE_SMMU1_CACHE_FIFO_EPT_T; + +typedef struct dpp_se_smmu1_rr_fifo_ept_t { + ZXIC_UINT32 rr_fifo_ept; +} DPP_SE_SMMU1_RR_FIFO_EPT_T; + +typedef struct dpp_se_smmu1_wr_fifo_ept_t { + ZXIC_UINT32 dir_arbi_ept; +} DPP_SE_SMMU1_WR_FIFO_EPT_T; + +typedef struct dpp_se_smmu1_wdone_fifo_ept_t { + ZXIC_UINT32 wdone_fifo_ept; +} DPP_SE_SMMU1_WDONE_FIFO_EPT_T; + +typedef struct dpp_se_smmu1_kschd_fifo_ept0_t { + ZXIC_UINT32 kschd_fifo_ept0; +} DPP_SE_SMMU1_KSCHD_FIFO_EPT0_T; + +typedef struct dpp_se_smmu1_cash_fifo_ept_t { + ZXIC_UINT32 cash_fifo_ept; +} DPP_SE_SMMU1_CASH_FIFO_EPT_T; + +typedef struct dpp_se_smmu1_ctrl_fifo_ept_t { + ZXIC_UINT32 ctrl_fifo_ept; +} DPP_SE_SMMU1_CTRL_FIFO_EPT_T; + +typedef struct dpp_se_smmu1_smmu1_rschd_ept3_t { + ZXIC_UINT32 rschd_fifo_ept3; +} DPP_SE_SMMU1_SMMU1_RSCHD_EPT3_T; + +typedef struct dpp_se_smmu1_smmu1_rschd_ept2_t { + ZXIC_UINT32 rschd_fifo_ept2; +} DPP_SE_SMMU1_SMMU1_RSCHD_EPT2_T; + +typedef struct dpp_se_smmu1_smmu1_rschd_ept1_t { + ZXIC_UINT32 rschd_fifo_ept1; +} DPP_SE_SMMU1_SMMU1_RSCHD_EPT1_T; + +typedef struct dpp_se_smmu1_smmu1_rschd_ept0_t { + ZXIC_UINT32 rschd_fifo_ept0; +} DPP_SE_SMMU1_SMMU1_RSCHD_EPT0_T; + +typedef struct dpp_se_smmu1_cash0_ecc_err_addr_t { + ZXIC_UINT32 cash0_ecc_err_addr; +} DPP_SE_SMMU1_CASH0_ECC_ERR_ADDR_T; + +typedef struct dpp_se_smmu1_arbi_cpu_wr_rdy_t { + ZXIC_UINT32 arbi_cpu_wr_rdy; +} DPP_SE_SMMU1_ARBI_CPU_WR_RDY_T; + +typedef struct dpp_se_smmu1_smmu1_int_0_en_t { + ZXIC_UINT32 smmu1_int_0_en; +} DPP_SE_SMMU1_SMMU1_INT_0_EN_T; + +typedef struct dpp_se_smmu1_smmu1_int_0_mask_t { + ZXIC_UINT32 smmu1_int_0_mask; +} DPP_SE_SMMU1_SMMU1_INT_0_MASK_T; + +typedef struct dpp_se_smmu1_smmu1_int_1_en_t { + ZXIC_UINT32 smmu1_int_1_en; +} DPP_SE_SMMU1_SMMU1_INT_1_EN_T; + +typedef struct dpp_se_smmu1_smmu1_int_1_mask_t { + ZXIC_UINT32 smmu1_int_1_mask; +} DPP_SE_SMMU1_SMMU1_INT_1_MASK_T; + +typedef struct dpp_se_smmu1_smmu1_int_2_en_t { + ZXIC_UINT32 smmu1_int_2_en; +} DPP_SE_SMMU1_SMMU1_INT_2_EN_T; + +typedef struct dpp_se_smmu1_smmu1_int_2_mask_t { + ZXIC_UINT32 smmu1_int_2_mask; +} DPP_SE_SMMU1_SMMU1_INT_2_MASK_T; + +typedef struct dpp_se_smmu1_smmu1_int_3_en_t { + ZXIC_UINT32 smmu1_int_3_en; +} DPP_SE_SMMU1_SMMU1_INT_3_EN_T; + +typedef struct dpp_se_smmu1_smmu1_int_3_mask_t { + ZXIC_UINT32 smmu1_int_3_mask; +} DPP_SE_SMMU1_SMMU1_INT_3_MASK_T; + +typedef struct dpp_se_smmu1_smmu1_int_0_status_t { + ZXIC_UINT32 smmu1_int_0_status; +} DPP_SE_SMMU1_SMMU1_INT_0_STATUS_T; + +typedef struct dpp_se_smmu1_smmu1_int_1_status_t { + ZXIC_UINT32 smmu1_int_1_status; +} DPP_SE_SMMU1_SMMU1_INT_1_STATUS_T; + +typedef struct dpp_se_smmu1_smmu1_int_2_status_t { + ZXIC_UINT32 smmu1_int_2_status; +} DPP_SE_SMMU1_SMMU1_INT_2_STATUS_T; + +typedef struct dpp_se_smmu1_smmu1_int_3_status_t { + ZXIC_UINT32 smmu1_int_3_status; +} DPP_SE_SMMU1_SMMU1_INT_3_STATUS_T; + +typedef struct dpp_se_smmu1_smmu1_int_status_t { + ZXIC_UINT32 smmu1_int_status; +} DPP_SE_SMMU1_SMMU1_INT_STATUS_T; + +typedef struct dpp_se_smmu1_ctrl_to_cash7_0_fc_cnt_t { + ZXIC_UINT32 ctrl_to_cash7_0_fc_cnt; +} DPP_SE_SMMU1_CTRL_TO_CASH7_0_FC_CNT_T; + +typedef struct dpp_se_smmu1_cash7_0_to_ctrl_req_cnt_t { + ZXIC_UINT32 cash7_0_to_ctrl_req_cnt; +} DPP_SE_SMMU1_CASH7_0_TO_CTRL_REQ_CNT_T; + +typedef struct dpp_se_smmu1_rschd_to_cache7_fc_cnt_t { + ZXIC_UINT32 rschd_to_cache7_fc_cnt; +} DPP_SE_SMMU1_RSCHD_TO_CACHE7_FC_CNT_T; + +typedef struct dpp_se_smmu1_cash7_to_cache_rsp_cnt_t { + ZXIC_UINT32 cash7_to_cache_rsp_cnt; +} DPP_SE_SMMU1_CASH7_TO_CACHE_RSP_CNT_T; + +typedef struct dpp_se_smmu1_cash7_to_ctrl_fc_cnt_t { + ZXIC_UINT32 cash7_to_ctrl_fc_cnt; +} DPP_SE_SMMU1_CASH7_TO_CTRL_FC_CNT_T; + +typedef struct dpp_se_smmu1_ctrl_to_cash7_0_rsp_cnt_t { + ZXIC_UINT32 ctrl_to_cash7_0_rsp_cnt; +} DPP_SE_SMMU1_CTRL_TO_CASH7_0_RSP_CNT_T; + +typedef struct dpp_se_smmu1_kschd_to_cache7_0_req_cnt_t { + ZXIC_UINT32 kschd_to_cache7_0_req_cnt; +} DPP_SE_SMMU1_KSCHD_TO_CACHE7_0_REQ_CNT_T; + +typedef struct dpp_se_smmu1_cache7_0_to_kschd_fc_cnt_t { + ZXIC_UINT32 cache7_0_to_kschd_fc_cnt; +} DPP_SE_SMMU1_CACHE7_0_TO_KSCHD_FC_CNT_T; + +typedef struct dpp_se_smmu1_dma_to_smmu1_rd_req_cnt_t { + ZXIC_UINT32 dma_to_smmu1_rd_req_cnt; +} DPP_SE_SMMU1_DMA_TO_SMMU1_RD_REQ_CNT_T; + +typedef struct dpp_se_smmu1_oam_to_kschd_req_cnt_t { + ZXIC_UINT32 oam_to_kschd_req_cnt; +} DPP_SE_SMMU1_OAM_TO_KSCHD_REQ_CNT_T; + +typedef struct dpp_se_smmu1_oam_rr_state_rsp_cnt_t { + ZXIC_UINT32 oam_rr_state_rsp_cnt; +} DPP_SE_SMMU1_OAM_RR_STATE_RSP_CNT_T; + +typedef struct dpp_se_smmu1_oam_clash_info_cnt_t { + ZXIC_UINT32 oam_clash_info_cnt; +} DPP_SE_SMMU1_OAM_CLASH_INFO_CNT_T; + +typedef struct dpp_se_smmu1_oam_to_rr_req_cnt_t { + ZXIC_UINT32 oam_to_rr_req_cnt; +} DPP_SE_SMMU1_OAM_TO_RR_REQ_CNT_T; + +typedef struct dpp_se_smmu1_lpm_as_to_kschd_req_cnt_t { + ZXIC_UINT32 lpm_as_to_kschd_req_cnt; +} DPP_SE_SMMU1_LPM_AS_TO_KSCHD_REQ_CNT_T; + +typedef struct dpp_se_smmu1_lpm_as_rr_state_rsp_cnt_t { + ZXIC_UINT32 lpm_as_rr_state_rsp_cnt; +} DPP_SE_SMMU1_LPM_AS_RR_STATE_RSP_CNT_T; + +typedef struct dpp_se_smmu1_lpm_as_clash_info_cnt_t { + ZXIC_UINT32 lpm_as_clash_info_cnt; +} DPP_SE_SMMU1_LPM_AS_CLASH_INFO_CNT_T; + +typedef struct dpp_se_smmu1_lpm_as_to_rr_req_cnt_t { + ZXIC_UINT32 lpm_as_to_rr_req_cnt; +} DPP_SE_SMMU1_LPM_AS_TO_RR_REQ_CNT_T; + +typedef struct dpp_se_smmu1_lpm_to_kschd_req_cnt_t { + ZXIC_UINT32 lpm_to_kschd_req_cnt; +} DPP_SE_SMMU1_LPM_TO_KSCHD_REQ_CNT_T; + +typedef struct dpp_se_smmu1_lpm_rr_state_rsp_cnt_t { + ZXIC_UINT32 lpm_rr_state_rsp_cnt; +} DPP_SE_SMMU1_LPM_RR_STATE_RSP_CNT_T; + +typedef struct dpp_se_smmu1_lpm_clash_info_cnt_t { + ZXIC_UINT32 lpm_clash_info_cnt; +} DPP_SE_SMMU1_LPM_CLASH_INFO_CNT_T; + +typedef struct dpp_se_smmu1_lpm_to_rr_req_cnt_t { + ZXIC_UINT32 lpm_to_rr_req_cnt; +} DPP_SE_SMMU1_LPM_TO_RR_REQ_CNT_T; + +typedef struct dpp_se_smmu1_hash3_0_to_kschd_req_cnt_t { + ZXIC_UINT32 hash3_0_to_kschd_req_cnt; +} DPP_SE_SMMU1_HASH3_0_TO_KSCHD_REQ_CNT_T; + +typedef struct dpp_se_smmu1_hash3_0_rr_state_rsp_cnt_t { + ZXIC_UINT32 hash3_0_rr_state_rsp_cnt; +} DPP_SE_SMMU1_HASH3_0_RR_STATE_RSP_CNT_T; + +typedef struct dpp_se_smmu1_hash3_0_clash_info_cnt_t { + ZXIC_UINT32 hash3_0_clash_info_cnt; +} DPP_SE_SMMU1_HASH3_0_CLASH_INFO_CNT_T; + +typedef struct dpp_se_smmu1_hash3_0_to_rr_req_cnt_t { + ZXIC_UINT32 hash3_0_to_rr_req_cnt; +} DPP_SE_SMMU1_HASH3_0_TO_RR_REQ_CNT_T; + +typedef struct dpp_se_smmu1_dir3_0_to_kschd_req_cnt_t { + ZXIC_UINT32 dir3_0_to_kschd_req_cnt; +} DPP_SE_SMMU1_DIR3_0_TO_KSCHD_REQ_CNT_T; + +typedef struct dpp_se_smmu1_dir3_0_clash_info_cnt_t { + ZXIC_UINT32 dir3_0_clash_info_cnt; +} DPP_SE_SMMU1_DIR3_0_CLASH_INFO_CNT_T; + +typedef struct dpp_se_smmu1_dir_tbl_wr_req_cnt_t { + ZXIC_UINT32 dir_tbl_wr_req_cnt; +} DPP_SE_SMMU1_DIR_TBL_WR_REQ_CNT_T; + +typedef struct dpp_se_smmu1_warbi_to_dir_tbl_warbi_fc_cnt_t { + ZXIC_UINT32 warbi_to_dir_tbl_warbi_fc_cnt; +} DPP_SE_SMMU1_WARBI_TO_DIR_TBL_WARBI_FC_CNT_T; + +typedef struct dpp_se_smmu1_dir3_0_to_bank_rr_req_cnt_t { + ZXIC_UINT32 dir3_0_to_bank_rr_req_cnt; +} DPP_SE_SMMU1_DIR3_0_TO_BANK_RR_REQ_CNT_T; + +typedef struct dpp_se_smmu1_kschd_to_dir3_0_fc_cnt_t { + ZXIC_UINT32 kschd_to_dir3_0_fc_cnt; +} DPP_SE_SMMU1_KSCHD_TO_DIR3_0_FC_CNT_T; + +typedef struct dpp_se_smmu1_dir3_0_rr_state_rsp_cnt_t { + ZXIC_UINT32 dir3_0_rr_state_rsp_cnt; +} DPP_SE_SMMU1_DIR3_0_RR_STATE_RSP_CNT_T; + +typedef struct dpp_se_smmu1_wr_done_to_warbi_fc_cnt_t { + ZXIC_UINT32 wr_done_to_warbi_fc_cnt; +} DPP_SE_SMMU1_WR_DONE_TO_WARBI_FC_CNT_T; + +typedef struct dpp_se_smmu1_wr_done_ptr_req_cnt_t { + ZXIC_UINT32 wr_done_ptr_req_cnt; +} DPP_SE_SMMU1_WR_DONE_PTR_REQ_CNT_T; + +typedef struct dpp_se_smmu1_ctrl7_0_to_warbi_fc_cnt_t { + ZXIC_UINT32 ctrl7_0_to_warbi_fc_cnt; +} DPP_SE_SMMU1_CTRL7_0_TO_WARBI_FC_CNT_T; + +typedef struct dpp_se_smmu1_warbi_to_ctrl7_0_wr_req_cnt_t { + ZXIC_UINT32 warbi_to_ctrl7_0_wr_req_cnt; +} DPP_SE_SMMU1_WARBI_TO_CTRL7_0_WR_REQ_CNT_T; + +typedef struct dpp_se_smmu1_warbi_to_cash7_0_wr_req_cnt_t { + ZXIC_UINT32 warbi_to_cash7_0_wr_req_cnt; +} DPP_SE_SMMU1_WARBI_TO_CASH7_0_WR_REQ_CNT_T; + +typedef struct dpp_se_smmu1_warbi_to_cpu_wr_fc_cnt_t { + ZXIC_UINT32 warbi_to_cpu_wr_fc_cnt; +} DPP_SE_SMMU1_WARBI_TO_CPU_WR_FC_CNT_T; + +typedef struct dpp_se_smmu1_cpu_wr_req_cnt_t { + ZXIC_UINT32 cpu_wr_req_cnt; +} DPP_SE_SMMU1_CPU_WR_REQ_CNT_T; + +typedef struct dpp_se_smmu1_ctrl7_0_to_cpu_rd_rsp_cnt_t { + ZXIC_UINT32 ctrl7_0_to_cpu_rd_rsp_cnt; +} DPP_SE_SMMU1_CTRL7_0_TO_CPU_RD_RSP_CNT_T; + +typedef struct dpp_se_smmu1_cpu_to_ctrl7_0_rd_req_cnt_t { + ZXIC_UINT32 cpu_to_ctrl7_0_rd_req_cnt; +} DPP_SE_SMMU1_CPU_TO_CTRL7_0_RD_REQ_CNT_T; + +typedef struct dpp_se_smmu1_cpu_rd_dir_tbl_rsp_cnt_t { + ZXIC_UINT32 cpu_rd_dir_tbl_rsp_cnt; +} DPP_SE_SMMU1_CPU_RD_DIR_TBL_RSP_CNT_T; + +typedef struct dpp_se_smmu1_cpu_to_dir_tbl_rd_wr_req_cnt_t { + ZXIC_UINT32 cpu_to_dir_tbl_rd_wr_req_cnt; +} DPP_SE_SMMU1_CPU_TO_DIR_TBL_RD_WR_REQ_CNT_T; + +typedef struct dpp_se_smmu1_smmu1_to_mmu_7_0_rsp_fc_cnt_t { + ZXIC_UINT32 smmu1_to_mmu_7_0_rsp_fc_cnt; +} DPP_SE_SMMU1_SMMU1_TO_MMU_7_0_RSP_FC_CNT_T; + +typedef struct dpp_se_smmu1_mmu_7_0_to_smmu1_rd_rsp_cnt_t { + ZXIC_UINT32 mmu_7_0_to_smmu1_rd_rsp_cnt; +} DPP_SE_SMMU1_MMU_7_0_TO_SMMU1_RD_RSP_CNT_T; + +typedef struct dpp_se_smmu1_mmu_7_0_to_smmu1_rd_fc_cnt_t { + ZXIC_UINT32 mmu_7_0_to_smmu1_rd_fc_cnt; +} DPP_SE_SMMU1_MMU_7_0_TO_SMMU1_RD_FC_CNT_T; + +typedef struct dpp_se_smmu1_smmu1_to_mmu_7_rd_req_cnt_t { + ZXIC_UINT32 smmu1_to_mmu_7_rd_req_cnt; +} DPP_SE_SMMU1_SMMU1_TO_MMU_7_RD_REQ_CNT_T; + +typedef struct dpp_se_smmu1_mmu_7_to_smmu1_wr_fc_cnt_t { + ZXIC_UINT32 mmu_7_to_smmu1_wr_fc_cnt; +} DPP_SE_SMMU1_MMU_7_TO_SMMU1_WR_FC_CNT_T; + +typedef struct dpp_se_smmu1_smmu1_to_mmu_7_0_wr_req_cnt_t { + ZXIC_UINT32 smmu1_to_mmu_7_0_wr_req_cnt; +} DPP_SE_SMMU1_SMMU1_TO_MMU_7_0_WR_REQ_CNT_T; + +typedef struct dpp_se_smmu1_se_to_smmu1_wr_rsp_fc_cnt_t { + ZXIC_UINT32 se_to_smmu1_wr_rsp_fc_cnt; +} DPP_SE_SMMU1_SE_TO_SMMU1_WR_RSP_FC_CNT_T; + +typedef struct dpp_se_smmu1_smmu1_to_se_wr_rsp_cnt_t { + ZXIC_UINT32 smmu1_to_se_wr_rsp_cnt; +} DPP_SE_SMMU1_SMMU1_TO_SE_WR_RSP_CNT_T; + +typedef struct dpp_se_smmu1_ddr7_0_wr_rsp_cnt_t { + ZXIC_UINT32 ddr7_0_wr_rsp_cnt; +} DPP_SE_SMMU1_DDR7_0_WR_RSP_CNT_T; + +typedef struct dpp_se_smmu1_smmu1_to_as_fc_cnt_t { + ZXIC_UINT32 smmu1_to_as_fc_cnt; +} DPP_SE_SMMU1_SMMU1_TO_AS_FC_CNT_T; + +typedef struct dpp_se_smmu1_as_to_smmu1_wr_req_cnt_t { + ZXIC_UINT32 as_to_smmu1_wr_req_cnt; +} DPP_SE_SMMU1_AS_TO_SMMU1_WR_REQ_CNT_T; + +typedef struct dpp_se_smmu1_smmu1_to_se_parser_fc_cnt_t { + ZXIC_UINT32 smmu1_to_se_parser_fc_cnt; +} DPP_SE_SMMU1_SMMU1_TO_SE_PARSER_FC_CNT_T; + +typedef struct dpp_se_smmu1_se_parser_to_smmu1_req_cnt_t { + ZXIC_UINT32 se_parser_to_smmu1_req_cnt; +} DPP_SE_SMMU1_SE_PARSER_TO_SMMU1_REQ_CNT_T; + +typedef struct dpp_se_smmu1_smmu1_to_etm_wr_fc_cnt_t { + ZXIC_UINT32 smmu1_to_etm_wr_fc_cnt; +} DPP_SE_SMMU1_SMMU1_TO_ETM_WR_FC_CNT_T; + +typedef struct dpp_se_smmu1_etm_wr_req_cnt_t { + ZXIC_UINT32 etm_wr_req_cnt; +} DPP_SE_SMMU1_ETM_WR_REQ_CNT_T; + +typedef struct dpp_se_smmu1_smmu1_to_ftm_wr_fc_cnt_t { + ZXIC_UINT32 smmu1_to_ftm_wr_fc_cnt; +} DPP_SE_SMMU1_SMMU1_TO_FTM_WR_FC_CNT_T; + +typedef struct dpp_se_smmu1_ftm_wr_req_cnt_t { + ZXIC_UINT32 ftm_wr_req_cnt; +} DPP_SE_SMMU1_FTM_WR_REQ_CNT_T; + +typedef struct dpp_se_smmu1_smmu1_to_state_wr_fc_cnt_t { + ZXIC_UINT32 smmu1_to_state_wr_fc_cnt; +} DPP_SE_SMMU1_SMMU1_TO_STATE_WR_FC_CNT_T; + +typedef struct dpp_se_smmu1_state_wr_req_cnt_t { + ZXIC_UINT32 state_wr_req_cnt; +} DPP_SE_SMMU1_STATE_WR_REQ_CNT_T; + +typedef struct dpp_se_smmu1_se_to_dma_rsp_cnt_t { + ZXIC_UINT32 se_to_dma_rsp_cnt; +} DPP_SE_SMMU1_SE_TO_DMA_RSP_CNT_T; + +typedef struct dpp_se_smmu1_se_to_dma_fc_cnt_t { + ZXIC_UINT32 se_to_dma_fc_cnt; +} DPP_SE_SMMU1_SE_TO_DMA_FC_CNT_T; + +typedef struct dpp_se_smmu1_oam_to_smmu1_fc_cnt_t { + ZXIC_UINT32 oam_to_smmu1_fc_cnt; +} DPP_SE_SMMU1_OAM_TO_SMMU1_FC_CNT_T; + +typedef struct dpp_se_smmu1_smmu1_to_oam_rsp_cnt_t { + ZXIC_UINT32 smmu1_to_oam_rsp_cnt; +} DPP_SE_SMMU1_SMMU1_TO_OAM_RSP_CNT_T; + +typedef struct dpp_se_smmu1_smmu1_to_oam_fc_cnt_t { + ZXIC_UINT32 smmu1_to_oam_fc_cnt; +} DPP_SE_SMMU1_SMMU1_TO_OAM_FC_CNT_T; + +typedef struct dpp_se_smmu1_oam_to_smmu1_req_cnt_t { + ZXIC_UINT32 oam_to_smmu1_req_cnt; +} DPP_SE_SMMU1_OAM_TO_SMMU1_REQ_CNT_T; + +typedef struct dpp_se_smmu1_smmu1_to_etm_rsp_cnt_t { + ZXIC_UINT32 smmu1_to_etm_rsp_cnt; +} DPP_SE_SMMU1_SMMU1_TO_ETM_RSP_CNT_T; + +typedef struct dpp_se_smmu1_smmu1_to_ftm_rsp_cnt_t { + ZXIC_UINT32 smmu1_to_ftm_rsp_cnt; +} DPP_SE_SMMU1_SMMU1_TO_FTM_RSP_CNT_T; + +typedef struct dpp_se_smmu1_smmu1_to_etm_fc_cnt_t { + ZXIC_UINT32 smmu1_to_etm_fc_cnt; +} DPP_SE_SMMU1_SMMU1_TO_ETM_FC_CNT_T; + +typedef struct dpp_se_smmu1_etm_to_smmu1_req_cnt_t { + ZXIC_UINT32 etm_to_smmu1_req_cnt; +} DPP_SE_SMMU1_ETM_TO_SMMU1_REQ_CNT_T; + +typedef struct dpp_se_smmu1_smmu1_to_ftm_fc_cnt_t { + ZXIC_UINT32 smmu1_to_ftm_fc_cnt; +} DPP_SE_SMMU1_SMMU1_TO_FTM_FC_CNT_T; + +typedef struct dpp_se_smmu1_ftm_to_smmu1_req_cnt_t { + ZXIC_UINT32 ftm_to_smmu1_req_cnt; +} DPP_SE_SMMU1_FTM_TO_SMMU1_REQ_CNT_T; + +typedef struct dpp_se_smmu1_smmu1_to_stat_rsp_cnt_t { + ZXIC_UINT32 smmu1_to_stat_rsp_cnt; +} DPP_SE_SMMU1_SMMU1_TO_STAT_RSP_CNT_T; + +typedef struct dpp_se_smmu1_smmu1_to_stat_fc_cnt_t { + ZXIC_UINT32 smmu1_to_stat_fc_cnt; +} DPP_SE_SMMU1_SMMU1_TO_STAT_FC_CNT_T; + +typedef struct dpp_se_smmu1_stat_to_smmu1_req_cnt_t { + ZXIC_UINT32 stat_to_smmu1_req_cnt; +} DPP_SE_SMMU1_STAT_TO_SMMU1_REQ_CNT_T; + +typedef struct dpp_se_smmu1_lpm_as_to_smmu1_fc_cnt_t { + ZXIC_UINT32 lpm_as_to_smmu1_fc_cnt; +} DPP_SE_SMMU1_LPM_AS_TO_SMMU1_FC_CNT_T; + +typedef struct dpp_se_smmu1_lpm_to_smmu1_fc_cnt_t { + ZXIC_UINT32 lpm_to_smmu1_fc_cnt; +} DPP_SE_SMMU1_LPM_TO_SMMU1_FC_CNT_T; + +typedef struct dpp_se_smmu1_smmu1_to_lpm_as_rsp_cnt_t { + ZXIC_UINT32 smmu1_to_lpm_as_rsp_cnt; +} DPP_SE_SMMU1_SMMU1_TO_LPM_AS_RSP_CNT_T; + +typedef struct dpp_se_smmu1_smmu1_to_lpm_rsp_cnt_t { + ZXIC_UINT32 smmu1_to_lpm_rsp_cnt; +} DPP_SE_SMMU1_SMMU1_TO_LPM_RSP_CNT_T; + +typedef struct dpp_se_smmu1_smmu1_to_lpm_as_fc_cnt_t { + ZXIC_UINT32 smmu1_to_lpm_as_fc_cnt; +} DPP_SE_SMMU1_SMMU1_TO_LPM_AS_FC_CNT_T; + +typedef struct dpp_se_smmu1_smmu1_to_lpm_fc_cnt_t { + ZXIC_UINT32 smmu1_to_lpm_fc_cnt; +} DPP_SE_SMMU1_SMMU1_TO_LPM_FC_CNT_T; + +typedef struct dpp_se_smmu1_lpm_as_to_smmu1_req_cnt_t { + ZXIC_UINT32 lpm_as_to_smmu1_req_cnt; +} DPP_SE_SMMU1_LPM_AS_TO_SMMU1_REQ_CNT_T; + +typedef struct dpp_se_smmu1_lpm_to_smmu1_req_cnt_t { + ZXIC_UINT32 lpm_to_smmu1_req_cnt; +} DPP_SE_SMMU1_LPM_TO_SMMU1_REQ_CNT_T; + +typedef struct dpp_se_smmu1_hash3_0_to_smmu1_fc_cnt_t { + ZXIC_UINT32 hash3_0_to_smmu1_fc_cnt; +} DPP_SE_SMMU1_HASH3_0_TO_SMMU1_FC_CNT_T; + +typedef struct dpp_se_smmu1_smmu1_to_hash3_0_rsp_cnt_t { + ZXIC_UINT32 smmu1_to_hash3_0_rsp_cnt; +} DPP_SE_SMMU1_SMMU1_TO_HASH3_0_RSP_CNT_T; + +typedef struct dpp_se_smmu1_smmu1_to_hash3_0_fc_cnt_t { + ZXIC_UINT32 smmu1_to_hash3_0_fc_cnt; +} DPP_SE_SMMU1_SMMU1_TO_HASH3_0_FC_CNT_T; + +typedef struct dpp_se_smmu1_hash3_0_to_smmu1_cnt_t { + ZXIC_UINT32 hash3_0_to_smmu1_cnt; +} DPP_SE_SMMU1_HASH3_0_TO_SMMU1_CNT_T; + +typedef struct dpp_se_smmu1_se_to_smmu1_dir3_0_rsp_fc_cnt_t { + ZXIC_UINT32 se_to_smmu1_dir3_0_rsp_fc_cnt; +} DPP_SE_SMMU1_SE_TO_SMMU1_DIR3_0_RSP_FC_CNT_T; + +typedef struct dpp_se_smmu1_smmu1_to_se_dir3_0_rsp_cnt_t { + ZXIC_UINT32 smmu1_to_se_dir3_0_rsp_cnt; +} DPP_SE_SMMU1_SMMU1_TO_SE_DIR3_0_RSP_CNT_T; + +typedef struct dpp_se_smmu1_smmu1_to_se_dir3_0_fc_cnt_t { + ZXIC_UINT32 smmu1_to_se_dir3_0_fc_cnt; +} DPP_SE_SMMU1_SMMU1_TO_SE_DIR3_0_FC_CNT_T; + +typedef struct dpp_se_smmu1_se_to_smmu1_dir3_0_cnt_t { + ZXIC_UINT32 se_to_smmu1_dir3_0_cnt; +} DPP_SE_SMMU1_SE_TO_SMMU1_DIR3_0_CNT_T; + +typedef struct dpp_se_smmu1_cache7_0_to_rschd_rsp_cnt_t { + ZXIC_UINT32 cache7_0_to_rschd_rsp_cnt; +} DPP_SE_SMMU1_CACHE7_0_TO_RSCHD_RSP_CNT_T; + +typedef struct dpp_se_cmmu_ddr_rw_addr_t { + ZXIC_UINT32 ddr_wr; +} DPP_SE_CMMU_DDR_RW_ADDR_T; + +typedef struct dpp_se_cmmu_ddr_rw_mode_t { + ZXIC_UINT32 ddr_rw_flag; + ZXIC_UINT32 ddr_rw_mode; +} DPP_SE_CMMU_DDR_RW_MODE_T; + +typedef struct dpp_se_cmmu_cp_cmd_t { + ZXIC_UINT32 stat_tbl_baddr; +} DPP_SE_CMMU_CP_CMD_T; + +typedef struct dpp_se_cmmu_cpu_ind_rd_done_t { + ZXIC_UINT32 cpu_ind_rd_done; +} DPP_SE_CMMU_CPU_IND_RD_DONE_T; + +typedef struct dpp_se_cmmu_cpu_ind_rdat0_t { + ZXIC_UINT32 cpu_ind_rdat0; +} DPP_SE_CMMU_CPU_IND_RDAT0_T; + +typedef struct dpp_se_cmmu_cpu_ind_rdat1_t { + ZXIC_UINT32 cpu_ind_rdat1; +} DPP_SE_CMMU_CPU_IND_RDAT1_T; + +typedef struct dpp_se_cmmu_cpu_ind_rdat2_t { + ZXIC_UINT32 cpu_ind_rdat2; +} DPP_SE_CMMU_CPU_IND_RDAT2_T; + +typedef struct dpp_se_cmmu_cpu_ind_rdat3_t { + ZXIC_UINT32 cpu_ind_rdat3; +} DPP_SE_CMMU_CPU_IND_RDAT3_T; + +typedef struct dpp_se_cmmu_cpu_ddr_fifo_almful_t { + ZXIC_UINT32 cpu_ddr_fifo_almful; +} DPP_SE_CMMU_CPU_DDR_FIFO_ALMFUL_T; + +typedef struct dpp_se_cmmu_debug_cnt_mode_t { + ZXIC_UINT32 cnt_rd_mode; + ZXIC_UINT32 cnt_overflow_mode; +} DPP_SE_CMMU_DEBUG_CNT_MODE_T; + +typedef struct dpp_se_cmmu_cmmu_pful_cfg_t { + ZXIC_UINT32 alu_cmd_pful_negate; + ZXIC_UINT32 alu_cmd_pful_assert; +} DPP_SE_CMMU_CMMU_PFUL_CFG_T; + +typedef struct dpp_se_cmmu_cmmu_stat_pful_cfg_t { + ZXIC_UINT32 cmmu_stat_pful_negate; + ZXIC_UINT32 cmmu_stat_pful_assert; +} DPP_SE_CMMU_CMMU_STAT_PFUL_CFG_T; + +typedef struct dpp_se_cmmu_stat_overflow_mode_t { + ZXIC_UINT32 stat_overflow_mode; +} DPP_SE_CMMU_STAT_OVERFLOW_MODE_T; + +typedef struct dpp_se_cmmu_cmmu_cp_fifo_pful_t { + ZXIC_UINT32 cmmu_cp_fifo_pful; +} DPP_SE_CMMU_CMMU_CP_FIFO_PFUL_T; + +typedef struct dpp_se_cmmu_ddr_wr_dat0_t { + ZXIC_UINT32 ddr_wr_dat0; +} DPP_SE_CMMU_DDR_WR_DAT0_T; + +typedef struct dpp_se_cmmu_ddr_wr_dat1_t { + ZXIC_UINT32 ddr_wr_dat1; +} DPP_SE_CMMU_DDR_WR_DAT1_T; + +typedef struct dpp_se_cmmu_cmmu_int_unmask_flag_t { + ZXIC_UINT32 cmmu_int_unmask_flag; +} DPP_SE_CMMU_CMMU_INT_UNMASK_FLAG_T; + +typedef struct dpp_se_cmmu_cmmu_int_en_t { + ZXIC_UINT32 cmmu_int_en12; + ZXIC_UINT32 cmmu_int_en11; + ZXIC_UINT32 cmmu_int_en10; + ZXIC_UINT32 cmmu_int_en9; + ZXIC_UINT32 cmmu_int_en8; + ZXIC_UINT32 cmmu_int_en7; + ZXIC_UINT32 cmmu_int_en6; + ZXIC_UINT32 cmmu_int_en5; + ZXIC_UINT32 cmmu_int_en4; + ZXIC_UINT32 cmmu_int_en3; + ZXIC_UINT32 cmmu_int_en2; + ZXIC_UINT32 cmmu_int_en1; + ZXIC_UINT32 cmmu_int_en0; +} DPP_SE_CMMU_CMMU_INT_EN_T; + +typedef struct dpp_se_cmmu_cmmu_int_mask_t { + ZXIC_UINT32 cmmu_int_mask12; + ZXIC_UINT32 cmmu_int_mask11; + ZXIC_UINT32 cmmu_int_mask10; + ZXIC_UINT32 cmmu_int_mask9; + ZXIC_UINT32 cmmu_int_mask8; + ZXIC_UINT32 cmmu_int_mask7; + ZXIC_UINT32 cmmu_int_mask6; + ZXIC_UINT32 cmmu_int_mask5; + ZXIC_UINT32 cmmu_int_mask4; + ZXIC_UINT32 cmmu_int_mask3; + ZXIC_UINT32 cmmu_int_mask2; + ZXIC_UINT32 cmmu_int_mask1; + ZXIC_UINT32 cmmu_int_mask0; +} DPP_SE_CMMU_CMMU_INT_MASK_T; + +typedef struct dpp_se_cmmu_cmmu_int_status_t { + ZXIC_UINT32 cmmu_int_status12; + ZXIC_UINT32 cmmu_int_status11; + ZXIC_UINT32 cmmu_int_status10; + ZXIC_UINT32 cmmu_int_status9; + ZXIC_UINT32 cmmu_int_status8; + ZXIC_UINT32 cmmu_int_status7; + ZXIC_UINT32 cmmu_int_status6; + ZXIC_UINT32 cmmu_int_status5; + ZXIC_UINT32 cmmu_int_status4; + ZXIC_UINT32 cmmu_int_status3; + ZXIC_UINT32 cmmu_int_status2; + ZXIC_UINT32 cmmu_int_status1; + ZXIC_UINT32 cmmu_int_status0; +} DPP_SE_CMMU_CMMU_INT_STATUS_T; + +typedef struct dpp_se_cmmu_stat_cmmu_req_cnt_t { + ZXIC_UINT32 stat_cmmu_req_cnt; +} DPP_SE_CMMU_STAT_CMMU_REQ_CNT_T; + +typedef struct dpp_se_cmmu_cmmu_fc0_cnt_t { + ZXIC_UINT32 cmmu_stat_rdy; +} DPP_SE_CMMU_CMMU_FC0_CNT_T; + +typedef struct dpp_se_cmmu_cmmu_fc1_cnt_t { + ZXIC_UINT32 smmu1_cmmu_wr_rdy; +} DPP_SE_CMMU_CMMU_FC1_CNT_T; + +typedef struct dpp_se_cmmu_cmmu_fc2_cnt_t { + ZXIC_UINT32 smmu1_cmmu_rd_rdy; +} DPP_SE_CMMU_CMMU_FC2_CNT_T; + +#ifdef __cplusplus +} +#endif +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_stat4k_reg.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_stat4k_reg.h new file mode 100644 index 000000000000..d30d919bcb99 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_stat4k_reg.h @@ -0,0 +1,37 @@ + +#ifndef _DPP_STAT4K_REG_H_ +#define _DPP_STAT4K_REG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct dpp_stat4k_etcam_block0_7_port_id_cfg_t { + ZXIC_UINT32 block7_port_id; + ZXIC_UINT32 block6_port_id; + ZXIC_UINT32 block5_port_id; + ZXIC_UINT32 block4_port_id; + ZXIC_UINT32 block3_port_id; + ZXIC_UINT32 block2_port_id; + ZXIC_UINT32 block1_port_id; + ZXIC_UINT32 block0_port_id; +} DPP_STAT4K_ETCAM_BLOCK0_7_PORT_ID_CFG_T; + +typedef struct dpp_stat4k_etcam_block0_3_base_addr_cfg_t { + ZXIC_UINT32 block3_base_addr_cfg; + ZXIC_UINT32 block2_base_addr_cfg; + ZXIC_UINT32 block1_base_addr_cfg; + ZXIC_UINT32 block0_base_addr_cfg; +} DPP_STAT4K_ETCAM_BLOCK0_3_BASE_ADDR_CFG_T; + +typedef struct dpp_stat4k_etcam_block4_7_base_addr_cfg_t { + ZXIC_UINT32 block7_base_addr_cfg; + ZXIC_UINT32 block6_base_addr_cfg; + ZXIC_UINT32 block5_base_addr_cfg; + ZXIC_UINT32 block4_base_addr_cfg; +} DPP_STAT4K_ETCAM_BLOCK4_7_BASE_ADDR_CFG_T; + +#ifdef __cplusplus +} +#endif +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_stat_reg.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_stat_reg.h new file mode 100644 index 000000000000..f4e474bb2db8 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_stat_reg.h @@ -0,0 +1,2064 @@ + +#ifndef _DPP_STAT_REG_H_ +#define _DPP_STAT_REG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct dpp_stat_stat_cfg_cpu_ind_eram_wdat0_t { + ZXIC_UINT32 cpu_ind_eram_wdat0; +} DPP_STAT_STAT_CFG_CPU_IND_ERAM_WDAT0_T; + +typedef struct dpp_stat_stat_cfg_etm_port_sel_cfg_t { + ZXIC_UINT32 etm_port0_sel_cfg; + ZXIC_UINT32 etm_port1_sel_cfg; + ZXIC_UINT32 etm_port2_sel_cfg; + ZXIC_UINT32 etm_port3_sel_cfg; +} DPP_STAT_STAT_CFG_ETM_PORT_SEL_CFG_T; + +typedef struct dpp_stat_stat_cfg_tm_stat_cfg_t { + ZXIC_UINT32 stat_overflow_mode; + ZXIC_UINT32 tm_stat_mode_cfg; + ZXIC_UINT32 tm_flow_control_cfg; +} DPP_STAT_STAT_CFG_TM_STAT_CFG_T; + +typedef struct dpp_stat_stat_cfg_ppu_eram_depth_t { + ZXIC_UINT32 ppu_eram_depth; +} DPP_STAT_STAT_CFG_PPU_ERAM_DEPTH_T; + +typedef struct dpp_stat_stat_cfg_ppu_eram_base_addr_t { + ZXIC_UINT32 ppu_eram_base_addr; +} DPP_STAT_STAT_CFG_PPU_ERAM_BASE_ADDR_T; + +typedef struct dpp_stat_stat_cfg_ppu_ddr_base_addr_t { + ZXIC_UINT32 ppu_ddr_base_addr; +} DPP_STAT_STAT_CFG_PPU_DDR_BASE_ADDR_T; + +typedef struct dpp_stat_stat_cfg_plcr0_base_addr_t { + ZXIC_UINT32 plcr0_base_addr; +} DPP_STAT_STAT_CFG_PLCR0_BASE_ADDR_T; + +typedef struct dpp_stat_stat_cfg_etm_stat_start_addr_cfg_t { + ZXIC_UINT32 etm_stat_start_addr_cfg; +} DPP_STAT_STAT_CFG_ETM_STAT_START_ADDR_CFG_T; + +typedef struct dpp_stat_stat_cfg_etm_stat_depth_cfg_t { + ZXIC_UINT32 etm_stat_depth_cfg; +} DPP_STAT_STAT_CFG_ETM_STAT_DEPTH_CFG_T; + +typedef struct dpp_stat_stat_cfg_cycle_mov_en_cfg_t { + ZXIC_UINT32 cycle_mov_en_cfg; +} DPP_STAT_STAT_CFG_CYCLE_MOV_EN_CFG_T; + +typedef struct dpp_stat_etcam_cpu_ind_wdat0_t { + ZXIC_UINT32 wdat0; +} DPP_STAT_ETCAM_CPU_IND_WDAT0_T; + +typedef struct dpp_stat_etcam_cpu_ind_ctrl_tmp0_t { + ZXIC_UINT32 reg_tcam_flag; + ZXIC_UINT32 flush; + ZXIC_UINT32 rd_wr; + ZXIC_UINT32 wr_mode; + ZXIC_UINT32 dat_or_mask; + ZXIC_UINT32 ram_sel; + ZXIC_UINT32 addr; +} DPP_STAT_ETCAM_CPU_IND_CTRL_TMP0_T; + +typedef struct dpp_stat_etcam_cpu_ind_ctrl_tmp1_t { + ZXIC_UINT32 row_or_col_msk; + ZXIC_UINT32 vben; + ZXIC_UINT32 vbit; +} DPP_STAT_ETCAM_CPU_IND_CTRL_TMP1_T; + +typedef struct dpp_stat_etcam_cpu_ind_rd_done_t { + ZXIC_UINT32 cpu_ind_rd_done; +} DPP_STAT_ETCAM_CPU_IND_RD_DONE_T; + +typedef struct dpp_stat_etcam_cpu_rdat0_t { + ZXIC_UINT32 cpu_rdat0; +} DPP_STAT_ETCAM_CPU_RDAT0_T; + +typedef struct dpp_stat_etcam_cpu_rdat1_t { + ZXIC_UINT32 cpu_rdat1; +} DPP_STAT_ETCAM_CPU_RDAT1_T; + +typedef struct dpp_stat_etcam_cpu_rdat2_t { + ZXIC_UINT32 cpu_rdat2; +} DPP_STAT_ETCAM_CPU_RDAT2_T; + +typedef struct dpp_stat_etcam_cpu_rdat3_t { + ZXIC_UINT32 cpu_rdat3; +} DPP_STAT_ETCAM_CPU_RDAT3_T; + +typedef struct dpp_stat_etcam_cpu_rdat4_t { + ZXIC_UINT32 cpu_rdat4; +} DPP_STAT_ETCAM_CPU_RDAT4_T; + +typedef struct dpp_stat_etcam_cpu_rdat5_t { + ZXIC_UINT32 cpu_rdat5; +} DPP_STAT_ETCAM_CPU_RDAT5_T; + +typedef struct dpp_stat_etcam_cpu_rdat6_t { + ZXIC_UINT32 cpu_rdat6; +} DPP_STAT_ETCAM_CPU_RDAT6_T; + +typedef struct dpp_stat_etcam_cpu_rdat7_t { + ZXIC_UINT32 cpu_rdat7; +} DPP_STAT_ETCAM_CPU_RDAT7_T; + +typedef struct dpp_stat_etcam_cpu_rdat8_t { + ZXIC_UINT32 cpu_rdat8; +} DPP_STAT_ETCAM_CPU_RDAT8_T; + +typedef struct dpp_stat_etcam_cpu_rdat9_t { + ZXIC_UINT32 cpu_rdat9; +} DPP_STAT_ETCAM_CPU_RDAT9_T; + +typedef struct dpp_stat_etcam_cpu_rdat10_t { + ZXIC_UINT32 cpu_rdat10; +} DPP_STAT_ETCAM_CPU_RDAT10_T; + +typedef struct dpp_stat_etcam_cpu_rdat11_t { + ZXIC_UINT32 cpu_rdat11; +} DPP_STAT_ETCAM_CPU_RDAT11_T; + +typedef struct dpp_stat_etcam_cpu_rdat12_t { + ZXIC_UINT32 cpu_rdat12; +} DPP_STAT_ETCAM_CPU_RDAT12_T; + +typedef struct dpp_stat_etcam_cpu_rdat13_t { + ZXIC_UINT32 cpu_rdat13; +} DPP_STAT_ETCAM_CPU_RDAT13_T; + +typedef struct dpp_stat_etcam_cpu_rdat14_t { + ZXIC_UINT32 cpu_rdat14; +} DPP_STAT_ETCAM_CPU_RDAT14_T; + +typedef struct dpp_stat_etcam_cpu_rdat15_t { + ZXIC_UINT32 cpu_rdat15; +} DPP_STAT_ETCAM_CPU_RDAT15_T; + +typedef struct dpp_stat_etcam_cpu_rdat16_t { + ZXIC_UINT32 cpu_rdat16; +} DPP_STAT_ETCAM_CPU_RDAT16_T; + +typedef struct dpp_stat_etcam_cpu_rdat17_t { + ZXIC_UINT32 cpu_rdat17; +} DPP_STAT_ETCAM_CPU_RDAT17_T; + +typedef struct dpp_stat_etcam_cpu_rdat18_t { + ZXIC_UINT32 cpu_rdat18; +} DPP_STAT_ETCAM_CPU_RDAT18_T; + +typedef struct dpp_stat_etcam_cpu_rdat19_t { + ZXIC_UINT32 cpu_rdat19; +} DPP_STAT_ETCAM_CPU_RDAT19_T; + +typedef struct dpp_stat_etcam_qvbo_t { + ZXIC_UINT32 qvbo; +} DPP_STAT_ETCAM_QVBO_T; + +typedef struct dpp_stat_etcam_cnt_overflow_mode_t { + ZXIC_UINT32 cnt_rd_mode; + ZXIC_UINT32 cnt_overflow_mode; +} DPP_STAT_ETCAM_CNT_OVERFLOW_MODE_T; + +typedef struct dpp_stat_car0_cara_queue_ram0_159_0_t { + ZXIC_UINT32 cara_drop; + ZXIC_UINT32 cara_plcr_en; + ZXIC_UINT32 cara_profile_id; + ZXIC_UINT32 cara_tq_h; + ZXIC_UINT32 cara_tq_l; + ZXIC_UINT32 cara_ted; + ZXIC_UINT32 cara_tcd; + ZXIC_UINT32 cara_tei; + ZXIC_UINT32 cara_tci; +} DPP_STAT_CAR0_CARA_QUEUE_RAM0_159_0_T; + +typedef struct dpp_stat_car0_cara_profile_ram1_255_0_t { + ZXIC_UINT32 cara_profile_wr; + ZXIC_UINT32 cara_pkt_sign; + ZXIC_UINT32 cara_cd; + ZXIC_UINT32 cara_cf; + ZXIC_UINT32 cara_cm; + ZXIC_UINT32 cara_eir; + ZXIC_UINT32 cara_cir; + ZXIC_UINT32 cara_ebs_pbs; + ZXIC_UINT32 cara_cbs; + ZXIC_UINT32 cara_c_pri1; + ZXIC_UINT32 cara_c_pri2; + ZXIC_UINT32 cara_c_pri3; + ZXIC_UINT32 cara_c_pri4; + ZXIC_UINT32 cara_c_pri5; + ZXIC_UINT32 cara_c_pri6; + ZXIC_UINT32 cara_c_pri7; + ZXIC_UINT32 cara_e_g_pri1; + ZXIC_UINT32 cara_e_g_pri2; + ZXIC_UINT32 cara_e_g_pri3; + ZXIC_UINT32 cara_e_g_pri4; + ZXIC_UINT32 cara_e_g_pri5; + ZXIC_UINT32 cara_e_g_pri6; + ZXIC_UINT32 cara_e_g_pri7; + ZXIC_UINT32 cara_e_y_pri0; + ZXIC_UINT32 cara_e_y_pri1; + ZXIC_UINT32 cara_e_y_pri2; + ZXIC_UINT32 cara_e_y_pri3; + ZXIC_UINT32 cara_e_y_pri4; + ZXIC_UINT32 cara_e_y_pri5; + ZXIC_UINT32 cara_e_y_pri6; + ZXIC_UINT32 cara_e_y_pri7; +} DPP_STAT_CAR0_CARA_PROFILE_RAM1_255_0_T; + +typedef struct dpp_stat_car0_cara_qovs_ram_ram2_t { + ZXIC_UINT32 cara_qovs; +} DPP_STAT_CAR0_CARA_QOVS_RAM_RAM2_T; + +typedef struct dpp_stat_car0_look_up_table1_t { + ZXIC_UINT32 cara_flow_id; + ZXIC_UINT32 cara_sp; +} DPP_STAT_CAR0_LOOK_UP_TABLE1_T; + +typedef struct dpp_stat_car0_cara_pkt_des_i_cnt_t { + ZXIC_UINT32 cara_pkt_des_i_cnt; +} DPP_STAT_CAR0_CARA_PKT_DES_I_CNT_T; + +typedef struct dpp_stat_car0_cara_green_pkt_i_cnt_t { + ZXIC_UINT32 cara_green_pkt_i_cnt; +} DPP_STAT_CAR0_CARA_GREEN_PKT_I_CNT_T; + +typedef struct dpp_stat_car0_cara_yellow_pkt_i_cnt_t { + ZXIC_UINT32 cara_yellow_pkt_i_cnt; +} DPP_STAT_CAR0_CARA_YELLOW_PKT_I_CNT_T; + +typedef struct dpp_stat_car0_cara_red_pkt_i_cnt_t { + ZXIC_UINT32 cara_red_pkt_i_cnt; +} DPP_STAT_CAR0_CARA_RED_PKT_I_CNT_T; + +typedef struct dpp_stat_car0_cara_pkt_des_o_cnt_t { + ZXIC_UINT32 cara_pkt_des_o_cnt; +} DPP_STAT_CAR0_CARA_PKT_DES_O_CNT_T; + +typedef struct dpp_stat_car0_cara_green_pkt_o_cnt_t { + ZXIC_UINT32 cara_green_pkt_o_cnt; +} DPP_STAT_CAR0_CARA_GREEN_PKT_O_CNT_T; + +typedef struct dpp_stat_car0_cara_yellow_pkt_o_cnt_t { + ZXIC_UINT32 cara_yellow_pkt_o_cnt; +} DPP_STAT_CAR0_CARA_YELLOW_PKT_O_CNT_T; + +typedef struct dpp_stat_car0_cara_red_pkt_o_cnt_t { + ZXIC_UINT32 cara_red_pkt_o_cnt; +} DPP_STAT_CAR0_CARA_RED_PKT_O_CNT_T; + +typedef struct dpp_stat_car0_cara_pkt_des_fc_for_cfg_cnt_t { + ZXIC_UINT32 cara_pkt_des_fc_for_cfg_cnt; +} DPP_STAT_CAR0_CARA_PKT_DES_FC_FOR_CFG_CNT_T; + +typedef struct dpp_stat_car0_cara_appoint_qnum_or_sp_t { + ZXIC_UINT32 cara_appoint_qnum_or_not; + ZXIC_UINT32 cara_appoint_sp_or_not; + ZXIC_UINT32 cara_plcr_stat_sp; + ZXIC_UINT32 cara_plcr_stat_qnum; +} DPP_STAT_CAR0_CARA_APPOINT_QNUM_OR_SP_T; + +typedef struct dpp_stat_car0_cara_cfgmt_count_mode_t { + ZXIC_UINT32 cara_cfgmt_count_overflow_mode; + ZXIC_UINT32 cara_cfgmt_count_rd_mode; +} DPP_STAT_CAR0_CARA_CFGMT_COUNT_MODE_T; + +typedef struct dpp_stat_car0_cara_pkt_size_cnt_t { + ZXIC_UINT32 cara_pkt_size_cnt; +} DPP_STAT_CAR0_CARA_PKT_SIZE_CNT_T; + +typedef struct dpp_stat_car0_cara_plcr_init_dont_t { + ZXIC_UINT32 cara_plcr_init_done; +} DPP_STAT_CAR0_CARA_PLCR_INIT_DONT_T; + +typedef struct dpp_stat_car0_carb_queue_ram0_159_0_t { + ZXIC_UINT32 carb_drop; + ZXIC_UINT32 carb_plcr_en; + ZXIC_UINT32 carb_profile_id; + ZXIC_UINT32 carb_tq_h; + ZXIC_UINT32 carb_tq_l; + ZXIC_UINT32 carb_ted; + ZXIC_UINT32 carb_tcd; + ZXIC_UINT32 carb_tei; + ZXIC_UINT32 carb_tci; +} DPP_STAT_CAR0_CARB_QUEUE_RAM0_159_0_T; + +typedef struct dpp_stat_car0_carb_profile_ram1_255_0_t { + ZXIC_UINT32 carb_profile_wr; + ZXIC_UINT32 carb_random_discard_en_e; + ZXIC_UINT32 carb_random_discard_en_c; + ZXIC_UINT32 carb_pkt_sign; + ZXIC_UINT32 carb_cd; + ZXIC_UINT32 carb_cf; + ZXIC_UINT32 carb_cm; + ZXIC_UINT32 carb_eir; + ZXIC_UINT32 carb_cir; + ZXIC_UINT32 carb_ebs_pbs; + ZXIC_UINT32 carb_cbs; + ZXIC_UINT32 carb_c_pri1; + ZXIC_UINT32 carb_c_pri2; + ZXIC_UINT32 carb_c_pri3; + ZXIC_UINT32 carb_c_pri4; + ZXIC_UINT32 carb_c_pri5; + ZXIC_UINT32 carb_c_pri6; + ZXIC_UINT32 carb_c_pri7; + ZXIC_UINT32 carb_e_g_pri1; + ZXIC_UINT32 carb_e_g_pri2; + ZXIC_UINT32 carb_e_g_pri3; + ZXIC_UINT32 carb_e_g_pri4; + ZXIC_UINT32 carb_e_g_pri5; + ZXIC_UINT32 carb_e_g_pri6; + ZXIC_UINT32 carb_e_g_pri7; + ZXIC_UINT32 carb_e_y_pri0; + ZXIC_UINT32 carb_e_y_pri1; + ZXIC_UINT32 carb_e_y_pri2; + ZXIC_UINT32 carb_e_y_pri3; + ZXIC_UINT32 carb_e_y_pri4; + ZXIC_UINT32 carb_e_y_pri5; + ZXIC_UINT32 carb_e_y_pri6; + ZXIC_UINT32 carb_e_y_pri7; +} DPP_STAT_CAR0_CARB_PROFILE_RAM1_255_0_T; + +typedef struct dpp_stat_car0_carb_qovs_ram_ram2_t { + ZXIC_UINT32 carb_qovs; +} DPP_STAT_CAR0_CARB_QOVS_RAM_RAM2_T; + +typedef struct dpp_stat_car0_look_up_table2_t { + ZXIC_UINT32 carb_flow_id; + ZXIC_UINT32 carb_sp; +} DPP_STAT_CAR0_LOOK_UP_TABLE2_T; + +typedef struct dpp_stat_car0_carb_pkt_des_i_cnt_t { + ZXIC_UINT32 carb_pkt_des_i_cnt; +} DPP_STAT_CAR0_CARB_PKT_DES_I_CNT_T; + +typedef struct dpp_stat_car0_carb_green_pkt_i_cnt_t { + ZXIC_UINT32 carb_green_pkt_i_cnt; +} DPP_STAT_CAR0_CARB_GREEN_PKT_I_CNT_T; + +typedef struct dpp_stat_car0_carb_yellow_pkt_i_cnt_t { + ZXIC_UINT32 carb_yellow_pkt_i_cnt; +} DPP_STAT_CAR0_CARB_YELLOW_PKT_I_CNT_T; + +typedef struct dpp_stat_car0_carb_red_pkt_i_cnt_t { + ZXIC_UINT32 carb_red_pkt_i_cnt; +} DPP_STAT_CAR0_CARB_RED_PKT_I_CNT_T; + +typedef struct dpp_stat_car0_carb_pkt_des_o_cnt_t { + ZXIC_UINT32 carb_pkt_des_o_cnt; +} DPP_STAT_CAR0_CARB_PKT_DES_O_CNT_T; + +typedef struct dpp_stat_car0_carb_green_pkt_o_cnt_t { + ZXIC_UINT32 carb_green_pkt_o_cnt; +} DPP_STAT_CAR0_CARB_GREEN_PKT_O_CNT_T; + +typedef struct dpp_stat_car0_carb_yellow_pkt_o_cnt_t { + ZXIC_UINT32 carb_yellow_pkt_o_cnt; +} DPP_STAT_CAR0_CARB_YELLOW_PKT_O_CNT_T; + +typedef struct dpp_stat_car0_carb_red_pkt_o_cnt_t { + ZXIC_UINT32 carb_red_pkt_o_cnt; +} DPP_STAT_CAR0_CARB_RED_PKT_O_CNT_T; + +typedef struct dpp_stat_car0_carb_pkt_des_fc_for_cfg_cnt_t { + ZXIC_UINT32 carb_pkt_des_fc_for_cfg_cnt; +} DPP_STAT_CAR0_CARB_PKT_DES_FC_FOR_CFG_CNT_T; + +typedef struct dpp_stat_car0_carb_appoint_qnum_or_sp_t { + ZXIC_UINT32 carb_appoint_qnum_or_not; + ZXIC_UINT32 carb_appoint_sp_or_not; + ZXIC_UINT32 carb_plcr_stat_sp; + ZXIC_UINT32 carb_plcr_stat_qnum; +} DPP_STAT_CAR0_CARB_APPOINT_QNUM_OR_SP_T; + +typedef struct dpp_stat_car0_carb_cfgmt_count_mode_t { + ZXIC_UINT32 carb_cfgmt_count_overflow_mode; + ZXIC_UINT32 carb_cfgmt_count_rd_mode; +} DPP_STAT_CAR0_CARB_CFGMT_COUNT_MODE_T; + +typedef struct dpp_stat_car0_carb_pkt_size_cnt_t { + ZXIC_UINT32 carb_pkt_size_cnt; +} DPP_STAT_CAR0_CARB_PKT_SIZE_CNT_T; + +typedef struct dpp_stat_car0_carb_plcr_init_dont_t { + ZXIC_UINT32 carb_plcr_init_done; +} DPP_STAT_CAR0_CARB_PLCR_INIT_DONT_T; + +typedef struct dpp_stat_car0_carc_queue_ram0_159_0_t { + ZXIC_UINT32 carc_drop; + ZXIC_UINT32 carc_plcr_en; + ZXIC_UINT32 carc_profile_id; + ZXIC_UINT32 carc_tq_h; + ZXIC_UINT32 carc_tq_l; + ZXIC_UINT32 carc_ted; + ZXIC_UINT32 carc_tcd; + ZXIC_UINT32 carc_tei; + ZXIC_UINT32 carc_tci; +} DPP_STAT_CAR0_CARC_QUEUE_RAM0_159_0_T; + +typedef struct dpp_stat_car0_carc_profile_ram1_255_0_t { + ZXIC_UINT32 carc_profile_wr; + ZXIC_UINT32 carc_random_discard_en_e; + ZXIC_UINT32 carc_random_discard_en_c; + ZXIC_UINT32 carc_pkt_sign; + ZXIC_UINT32 carc_cd; + ZXIC_UINT32 carc_cf; + ZXIC_UINT32 carc_cm; + ZXIC_UINT32 carc_eir; + ZXIC_UINT32 carc_cir; + ZXIC_UINT32 carc_ebs_pbs; + ZXIC_UINT32 carc_cbs; + ZXIC_UINT32 carc_c_pri1; + ZXIC_UINT32 carc_c_pri2; + ZXIC_UINT32 carc_c_pri3; + ZXIC_UINT32 carc_c_pri4; + ZXIC_UINT32 carc_c_pri5; + ZXIC_UINT32 carc_c_pri6; + ZXIC_UINT32 carc_c_pri7; + ZXIC_UINT32 carc_e_g_pri1; + ZXIC_UINT32 carc_e_g_pri2; + ZXIC_UINT32 carc_e_g_pri3; + ZXIC_UINT32 carc_e_g_pri4; + ZXIC_UINT32 carc_e_g_pri5; + ZXIC_UINT32 carc_e_g_pri6; + ZXIC_UINT32 carc_e_g_pri7; + ZXIC_UINT32 carc_e_y_pri0; + ZXIC_UINT32 carc_e_y_pri1; + ZXIC_UINT32 carc_e_y_pri2; + ZXIC_UINT32 carc_e_y_pri3; + ZXIC_UINT32 carc_e_y_pri4; + ZXIC_UINT32 carc_e_y_pri5; + ZXIC_UINT32 carc_e_y_pri6; + ZXIC_UINT32 carc_e_y_pri7; +} DPP_STAT_CAR0_CARC_PROFILE_RAM1_255_0_T; + +typedef struct dpp_stat_car0_carc_qovs_ram_ram2_t { + ZXIC_UINT32 carc_qovs; +} DPP_STAT_CAR0_CARC_QOVS_RAM_RAM2_T; + +typedef struct dpp_stat_car0_carc_pkt_des_i_cnt_t { + ZXIC_UINT32 carc_pkt_des_i_cnt; +} DPP_STAT_CAR0_CARC_PKT_DES_I_CNT_T; + +typedef struct dpp_stat_car0_carc_green_pkt_i_cnt_t { + ZXIC_UINT32 carc_green_pkt_i_cnt; +} DPP_STAT_CAR0_CARC_GREEN_PKT_I_CNT_T; + +typedef struct dpp_stat_car0_carc_yellow_pkt_i_cnt_t { + ZXIC_UINT32 carc_yellow_pkt_i_cnt; +} DPP_STAT_CAR0_CARC_YELLOW_PKT_I_CNT_T; + +typedef struct dpp_stat_car0_carc_red_pkt_i_cnt_t { + ZXIC_UINT32 carc_red_pkt_i_cnt; +} DPP_STAT_CAR0_CARC_RED_PKT_I_CNT_T; + +typedef struct dpp_stat_car0_carc_pkt_des_o_cnt_t { + ZXIC_UINT32 carc_pkt_des_o_cnt; +} DPP_STAT_CAR0_CARC_PKT_DES_O_CNT_T; + +typedef struct dpp_stat_car0_carc_green_pkt_o_cnt_t { + ZXIC_UINT32 carc_green_pkt_o_cnt; +} DPP_STAT_CAR0_CARC_GREEN_PKT_O_CNT_T; + +typedef struct dpp_stat_car0_carc_yellow_pkt_o_cnt_t { + ZXIC_UINT32 carc_yellow_pkt_o_cnt; +} DPP_STAT_CAR0_CARC_YELLOW_PKT_O_CNT_T; + +typedef struct dpp_stat_car0_carc_red_pkt_o_cnt_t { + ZXIC_UINT32 carc_red_pkt_o_cnt; +} DPP_STAT_CAR0_CARC_RED_PKT_O_CNT_T; + +typedef struct dpp_stat_car0_carc_pkt_des_fc_for_cfg_cnt_t { + ZXIC_UINT32 carc_pkt_des_fc_for_cfg_cnt; +} DPP_STAT_CAR0_CARC_PKT_DES_FC_FOR_CFG_CNT_T; + +typedef struct dpp_stat_car0_carc_appoint_qnum_or_sp_t { + ZXIC_UINT32 carc_appoint_qnum_or_not; + ZXIC_UINT32 carc_appoint_sp_or_not; + ZXIC_UINT32 carc_plcr_stat_sp; + ZXIC_UINT32 carc_plcr_stat_qnum; +} DPP_STAT_CAR0_CARC_APPOINT_QNUM_OR_SP_T; + +typedef struct dpp_stat_car0_carc_cfgmt_count_mode_t { + ZXIC_UINT32 carc_cfgmt_count_overflow_mode; + ZXIC_UINT32 carc_cfgmt_count_rd_mode; +} DPP_STAT_CAR0_CARC_CFGMT_COUNT_MODE_T; + +typedef struct dpp_stat_car0_carc_pkt_size_cnt_t { + ZXIC_UINT32 carc_pkt_size_cnt; +} DPP_STAT_CAR0_CARC_PKT_SIZE_CNT_T; + +typedef struct dpp_stat_car0_carc_plcr_init_dont_t { + ZXIC_UINT32 carc_plcr_init_done; +} DPP_STAT_CAR0_CARC_PLCR_INIT_DONT_T; + +typedef struct dpp_stat_car0_carb_random_ram_t { + ZXIC_UINT32 para8_e; + ZXIC_UINT32 para7_e; + ZXIC_UINT32 para6_e; + ZXIC_UINT32 para5_e; + ZXIC_UINT32 para4_h_e; + ZXIC_UINT32 para4_l_e; + ZXIC_UINT32 para3_e; + ZXIC_UINT32 para2_h_e; + ZXIC_UINT32 para2_l_e; + ZXIC_UINT32 para1_e; + ZXIC_UINT32 para0_h_e; + ZXIC_UINT32 para0_l_e; + ZXIC_UINT32 para8_c; + ZXIC_UINT32 para7_c; + ZXIC_UINT32 para6_c; + ZXIC_UINT32 para5_c; + ZXIC_UINT32 para4_h_c; + ZXIC_UINT32 para4_l_c; + ZXIC_UINT32 para3_c; + ZXIC_UINT32 para2_h_c; + ZXIC_UINT32 para2_l_c; + ZXIC_UINT32 para1_c; + ZXIC_UINT32 para0_h_c; + ZXIC_UINT32 para0_l_c; +} DPP_STAT_CAR0_CARB_RANDOM_RAM_T; + +typedef struct dpp_stat_car0_carc_random_ram_t { + ZXIC_UINT32 para8_e; + ZXIC_UINT32 para7_e; + ZXIC_UINT32 para6_e; + ZXIC_UINT32 para5_e; + ZXIC_UINT32 para4_h_e; + ZXIC_UINT32 para4_l_e; + ZXIC_UINT32 para3_e; + ZXIC_UINT32 para2_h_e; + ZXIC_UINT32 para2_l_e; + ZXIC_UINT32 para1_e; + ZXIC_UINT32 para0_h_e; + ZXIC_UINT32 para0_l_e; + ZXIC_UINT32 para8_c; + ZXIC_UINT32 para7_c; + ZXIC_UINT32 para6_c; + ZXIC_UINT32 para5_c; + ZXIC_UINT32 para4_h_c; + ZXIC_UINT32 para4_l_c; + ZXIC_UINT32 para3_c; + ZXIC_UINT32 para2_h_c; + ZXIC_UINT32 para2_l_c; + ZXIC_UINT32 para1_c; + ZXIC_UINT32 para0_h_c; + ZXIC_UINT32 para0_l_c; +} DPP_STAT_CAR0_CARC_RANDOM_RAM_T; + +typedef struct dpp_stat_car0_cara_begin_flow_id_t { + ZXIC_UINT32 cara_begin_flow_id; +} DPP_STAT_CAR0_CARA_BEGIN_FLOW_ID_T; + +typedef struct dpp_stat_car0_carb_begin_flow_id_t { + ZXIC_UINT32 carb_begin_flow_id; +} DPP_STAT_CAR0_CARB_BEGIN_FLOW_ID_T; + +typedef struct dpp_stat_car0_carc_begin_flow_id_t { + ZXIC_UINT32 carc_begin_flow_id; +} DPP_STAT_CAR0_CARC_BEGIN_FLOW_ID_T; + +typedef struct dpp_stat_car0_prog_full_assert_cfg_w_t { + ZXIC_UINT32 prog_full_assert_cfg_w; +} DPP_STAT_CAR0_PROG_FULL_ASSERT_CFG_W_T; + +typedef struct dpp_stat_car0_prog_full_negate_cfg_w_t { + ZXIC_UINT32 prog_full_negate_cfg_w; +} DPP_STAT_CAR0_PROG_FULL_NEGATE_CFG_W_T; + +typedef struct dpp_stat_car0_timeout_limit_t { + ZXIC_UINT32 timeout_limit; +} DPP_STAT_CAR0_TIMEOUT_LIMIT_T; + +typedef struct dpp_stat_car0_pkt_des_fifo_overflow_t { + ZXIC_UINT32 pkt_des_fifo_overflow; +} DPP_STAT_CAR0_PKT_DES_FIFO_OVERFLOW_T; + +typedef struct dpp_stat_car0_pkt_des_fifo_underflow_t { + ZXIC_UINT32 pkt_des_fifo_underflow; +} DPP_STAT_CAR0_PKT_DES_FIFO_UNDERFLOW_T; + +typedef struct dpp_stat_car0_pkt_des_fifo_prog_full_t { + ZXIC_UINT32 pkt_des_fifo_prog_full; +} DPP_STAT_CAR0_PKT_DES_FIFO_PROG_FULL_T; + +typedef struct dpp_stat_car0_pkt_des_fifo_prog_empty_t { + ZXIC_UINT32 pkt_des_fifo_prog_empty; +} DPP_STAT_CAR0_PKT_DES_FIFO_PROG_EMPTY_T; + +typedef struct dpp_stat_car0_pkt_des_fifo_full_t { + ZXIC_UINT32 pkt_des_fifo_full; +} DPP_STAT_CAR0_PKT_DES_FIFO_FULL_T; + +typedef struct dpp_stat_car0_pkt_des_fifo_empty_t { + ZXIC_UINT32 pkt_des_fifo_empty; +} DPP_STAT_CAR0_PKT_DES_FIFO_EMPTY_T; + +typedef struct dpp_stat_car0_pkt_size_offset_t { + ZXIC_UINT32 pkt_size_offset; +} DPP_STAT_CAR0_PKT_SIZE_OFFSET_T; + +typedef struct dpp_stat_car0_car_plcr_init_dont_t { + ZXIC_UINT32 plcr_init_done; +} DPP_STAT_CAR0_CAR_PLCR_INIT_DONT_T; + +typedef struct dpp_stat_car0_max_pkt_size_a_t { + ZXIC_UINT32 max_pkt_size_a; +} DPP_STAT_CAR0_MAX_PKT_SIZE_A_T; + +typedef struct dpp_stat_car0_max_pkt_size_b_t { + ZXIC_UINT32 max_pkt_size_b; +} DPP_STAT_CAR0_MAX_PKT_SIZE_B_T; + +typedef struct dpp_stat_car0_max_pkt_size_c_t { + ZXIC_UINT32 max_pkt_size_c; +} DPP_STAT_CAR0_MAX_PKT_SIZE_C_T; + +typedef struct dpp_stat_car0_car_hierarchy_mode_t { + ZXIC_UINT32 car_hierarchy_mode; +} DPP_STAT_CAR0_CAR_HIERARCHY_MODE_T; + +typedef struct dpp_stat_car0_prog_empty_assert_cfg_w_t { + ZXIC_UINT32 prog_empty_assert_cfg_w; +} DPP_STAT_CAR0_PROG_EMPTY_ASSERT_CFG_W_T; + +typedef struct dpp_stat_car0_prog_empty_negate_cfg_w_t { + ZXIC_UINT32 prog_empty_negate_cfg_w; +} DPP_STAT_CAR0_PROG_EMPTY_NEGATE_CFG_W_T; + +typedef struct dpp_stat_car0_pkt_des_fifo_ovf_int_t { + ZXIC_UINT32 pkt_des_fifo_ovf_int; +} DPP_STAT_CAR0_PKT_DES_FIFO_OVF_INT_T; + +typedef struct dpp_stat_car0_pkt_des_fifo_data_count_t { + ZXIC_UINT32 pkt_des_fifo_data_count; +} DPP_STAT_CAR0_PKT_DES_FIFO_DATA_COUNT_T; + +typedef struct dpp_stat_car0_pkt_des_fifo_udf_int_t { + ZXIC_UINT32 pkt_des_fifo_udf_int; +} DPP_STAT_CAR0_PKT_DES_FIFO_UDF_INT_T; + +typedef struct dpp_stat_car0_cara_queue_ram0_159_0_pkt_t { + ZXIC_UINT32 cara_drop; + ZXIC_UINT32 cara_plcr_en; + ZXIC_UINT32 cara_profile_id; + ZXIC_UINT32 cara_tq_h; + ZXIC_UINT32 cara_tq_l; + ZXIC_UINT32 cara_dc_high; + ZXIC_UINT32 cara_dc_low; + ZXIC_UINT32 cara_tc; +} DPP_STAT_CAR0_CARA_QUEUE_RAM0_159_0_PKT_T; + +typedef struct dpp_stat_car0_cara_profile_ram1_255_0_pkt_t { + ZXIC_UINT32 cara_profile_wr; + ZXIC_UINT32 cara_pkt_sign; + ZXIC_UINT32 cara_pkt_cir; + ZXIC_UINT32 cara_pkt_cbs; + ZXIC_UINT32 cara_pri0; + ZXIC_UINT32 cara_pri1; + ZXIC_UINT32 cara_pri2; + ZXIC_UINT32 cara_pri3; + ZXIC_UINT32 cara_pri4; + ZXIC_UINT32 cara_pri5; + ZXIC_UINT32 cara_pri6; + ZXIC_UINT32 cara_pri7; +} DPP_STAT_CAR0_CARA_PROFILE_RAM1_255_0_PKT_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_eram_wdat1_t { + ZXIC_UINT32 cpu_ind_eram_wdat1; +} DPP_STAT_STAT_CFG_CPU_IND_ERAM_WDAT1_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_eram_wdat2_t { + ZXIC_UINT32 cpu_ind_eram_wdat2; +} DPP_STAT_STAT_CFG_CPU_IND_ERAM_WDAT2_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_eram_wdat3_t { + ZXIC_UINT32 cpu_ind_eram_wdat3; +} DPP_STAT_STAT_CFG_CPU_IND_ERAM_WDAT3_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_eram_req_info_t { + ZXIC_UINT32 rw_mode; + ZXIC_UINT32 read_mode; + ZXIC_UINT32 tm_cs; + ZXIC_UINT32 queue_cs; + ZXIC_UINT32 rw_addr; +} DPP_STAT_STAT_CFG_CPU_IND_ERAM_REQ_INFO_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_eram_rd_done_t { + ZXIC_UINT32 cpu_ind_eram_rd_done; +} DPP_STAT_STAT_CFG_CPU_IND_ERAM_RD_DONE_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_eram_rdat0_t { + ZXIC_UINT32 cpu_ind_eram_rdat0; +} DPP_STAT_STAT_CFG_CPU_IND_ERAM_RDAT0_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_eram_rdat1_t { + ZXIC_UINT32 cpu_ind_eram_rdat1; +} DPP_STAT_STAT_CFG_CPU_IND_ERAM_RDAT1_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_eram_rdat2_t { + ZXIC_UINT32 cpu_ind_eram_rdat2; +} DPP_STAT_STAT_CFG_CPU_IND_ERAM_RDAT2_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_eram_rdat3_t { + ZXIC_UINT32 cpu_ind_eram_rdat3; +} DPP_STAT_STAT_CFG_CPU_IND_ERAM_RDAT3_T; + +typedef struct dpp_stat_stat_cfg_tm_alu_eram_cpu_rdy_t { + ZXIC_UINT32 tm_alu_eram_cpu_rdy; +} DPP_STAT_STAT_CFG_TM_ALU_ERAM_CPU_RDY_T; + +typedef struct dpp_stat_stat_cfg_oam_stat_cfg_t { + ZXIC_UINT32 oam_flow_control_cfg; + ZXIC_UINT32 oam_lm_flow_control_cfg; + ZXIC_UINT32 oam_in_eram_cfg; +} DPP_STAT_STAT_CFG_OAM_STAT_CFG_T; + +typedef struct dpp_stat_stat_cfg_ftm_port_sel_cfg_t { + ZXIC_UINT32 ftm_port0_sel_cfg; + ZXIC_UINT32 ftm_port1_sel_cfg; + ZXIC_UINT32 ftm_port2_sel_cfg; + ZXIC_UINT32 ftm_port3_sel_cfg; +} DPP_STAT_STAT_CFG_FTM_PORT_SEL_CFG_T; + +typedef struct dpp_stat_stat_cfg_oam_eram_base_addr_t { + ZXIC_UINT32 oam_eram_base_addr; +} DPP_STAT_STAT_CFG_OAM_ERAM_BASE_ADDR_T; + +typedef struct dpp_stat_stat_cfg_oam_lm_eram_base_addr_t { + ZXIC_UINT32 oam_lm_eram_base_addr; +} DPP_STAT_STAT_CFG_OAM_LM_ERAM_BASE_ADDR_T; + +typedef struct dpp_stat_stat_cfg_oam_ddr_base_addr_t { + ZXIC_UINT32 oam_ddr_base_addr; +} DPP_STAT_STAT_CFG_OAM_DDR_BASE_ADDR_T; + +typedef struct dpp_stat_stat_cfg_plcr0_schd_pful_cfg_t { + ZXIC_UINT32 plcr0_schd_pful_assert; + ZXIC_UINT32 plcr0_schd_pful_negate; +} DPP_STAT_STAT_CFG_PLCR0_SCHD_PFUL_CFG_T; + +typedef struct dpp_stat_stat_cfg_oam_lm_ord_pful_cfg_t { + ZXIC_UINT32 oam_lm_ord_pful_assert; + ZXIC_UINT32 oam_lm_ord_pful_negate; +} DPP_STAT_STAT_CFG_OAM_LM_ORD_PFUL_CFG_T; + +typedef struct dpp_stat_stat_cfg_ddr_schd_pful_cfg_t { + ZXIC_UINT32 ddr_schd_pful_assert; + ZXIC_UINT32 ddr_schd_pful_negate; +} DPP_STAT_STAT_CFG_DDR_SCHD_PFUL_CFG_T; + +typedef struct dpp_stat_stat_cfg_eram_schd_pful_cfg_t { + ZXIC_UINT32 eram_schd_pful_assert; + ZXIC_UINT32 eram_schd_pful_negate; +} DPP_STAT_STAT_CFG_ERAM_SCHD_PFUL_CFG_T; + +typedef struct dpp_stat_stat_cfg_eram_schd_pept_cfg_t { + ZXIC_UINT32 eram_schd_pept_assert; + ZXIC_UINT32 eram_schd_pept_negate; +} DPP_STAT_STAT_CFG_ERAM_SCHD_PEPT_CFG_T; + +typedef struct dpp_stat_stat_cfg_eram_schd_oam_pful_cfg_t { + ZXIC_UINT32 eram_schd_oam_pful_assert; + ZXIC_UINT32 eram_schd_oam_pful_negate; +} DPP_STAT_STAT_CFG_ERAM_SCHD_OAM_PFUL_CFG_T; + +typedef struct dpp_stat_stat_cfg_eram_schd_oam_pept_cfg_t { + ZXIC_UINT32 eram_schd_oam_pept_assert; + ZXIC_UINT32 eram_schd_oam_pept_negate; +} DPP_STAT_STAT_CFG_ERAM_SCHD_OAM_PEPT_CFG_T; + +typedef struct dpp_stat_stat_cfg_eram_schd_oam_lm_pful_cfg_t { + ZXIC_UINT32 eram_schd_oam_lm_pful_assert; + ZXIC_UINT32 eram_schd_oam_lm_pful_negate; +} DPP_STAT_STAT_CFG_ERAM_SCHD_OAM_LM_PFUL_CFG_T; + +typedef struct dpp_stat_stat_cfg_eram_schd_oam_lm_pept_cfg_t { + ZXIC_UINT32 eram_schd_oam_lm_pept_assert; + ZXIC_UINT32 eram_schd_oam_lm_pept_negate; +} DPP_STAT_STAT_CFG_ERAM_SCHD_OAM_LM_PEPT_CFG_T; + +typedef struct dpp_stat_stat_cfg_rschd_pful_cfg_t { + ZXIC_UINT32 rschd_pful_assert; + ZXIC_UINT32 rschd_pful_negate; +} DPP_STAT_STAT_CFG_RSCHD_PFUL_CFG_T; + +typedef struct dpp_stat_stat_cfg_rschd_pept_cfg_t { + ZXIC_UINT32 rschd_pept_assert; + ZXIC_UINT32 rschd_pept_negate; +} DPP_STAT_STAT_CFG_RSCHD_PEPT_CFG_T; + +typedef struct dpp_stat_stat_cfg_rschd_plcr_pful_cfg_t { + ZXIC_UINT32 rschd_plcr_pful_assert; + ZXIC_UINT32 rschd_plcr_pful_negate; +} DPP_STAT_STAT_CFG_RSCHD_PLCR_PFUL_CFG_T; + +typedef struct dpp_stat_stat_cfg_rschd_plcr_pept_cfg_t { + ZXIC_UINT32 rschd_plcr_pept_assert; + ZXIC_UINT32 rschd_plcr_pept_negate; +} DPP_STAT_STAT_CFG_RSCHD_PLCR_PEPT_CFG_T; + +typedef struct dpp_stat_stat_cfg_rschd_plcr_info_pful_cfg_t { + ZXIC_UINT32 rschd_plcr_info_pful_assert; + ZXIC_UINT32 rschd_plcr_info_pful_negate; +} DPP_STAT_STAT_CFG_RSCHD_PLCR_INFO_PFUL_CFG_T; + +typedef struct dpp_stat_stat_cfg_alu_arb_cpu_pful_cfg_t { + ZXIC_UINT32 alu_arb_cpu_pful_assert; + ZXIC_UINT32 alu_arb_cpu_pful_negate; +} DPP_STAT_STAT_CFG_ALU_ARB_CPU_PFUL_CFG_T; + +typedef struct dpp_stat_stat_cfg_alu_arb_user_pful_cfg_t { + ZXIC_UINT32 alu_arb_user_pful_assert; + ZXIC_UINT32 alu_arb_user_pful_negate; +} DPP_STAT_STAT_CFG_ALU_ARB_USER_PFUL_CFG_T; + +typedef struct dpp_stat_stat_cfg_alu_arb_stat_pful_cfg_t { + ZXIC_UINT32 alu_arb_stat_pful_assert; + ZXIC_UINT32 alu_arb_stat_pful_negate; +} DPP_STAT_STAT_CFG_ALU_ARB_STAT_PFUL_CFG_T; + +typedef struct dpp_stat_stat_cfg_cycmov_dat_pful_cfg_t { + ZXIC_UINT32 cycmov_dat_pful_assert; + ZXIC_UINT32 cycmov_dat_pful_negate; +} DPP_STAT_STAT_CFG_CYCMOV_DAT_PFUL_CFG_T; + +typedef struct dpp_stat_stat_cfg_ddr_opr_pful_cfg_t { + ZXIC_UINT32 ddr_opr_pful_assert; + ZXIC_UINT32 ddr_opr_pful_negate; +} DPP_STAT_STAT_CFG_DDR_OPR_PFUL_CFG_T; + +typedef struct dpp_stat_stat_cfg_cycle_mov_pful_cfg_t { + ZXIC_UINT32 cycle_mov_pful_assert; + ZXIC_UINT32 cycle_mov_pful_negate; +} DPP_STAT_STAT_CFG_CYCLE_MOV_PFUL_CFG_T; + +typedef struct dpp_stat_stat_cfg_cntovf_pful_cfg_t { + ZXIC_UINT32 cntovf_pful_assert; + ZXIC_UINT32 cntovf_pful_negate; +} DPP_STAT_STAT_CFG_CNTOVF_PFUL_CFG_T; + +typedef struct dpp_stat_stat_cfg_eram_schd_plcr_pful_cfg_t { + ZXIC_UINT32 eram_schd_plcr_pful_assert; + ZXIC_UINT32 eram_schd_plcr_pful_negate; +} DPP_STAT_STAT_CFG_ERAM_SCHD_PLCR_PFUL_CFG_T; + +typedef struct dpp_stat_stat_cfg_eram_schd_plcr_pept_cfg_t { + ZXIC_UINT32 eram_schd_plcr_pept_assert; + ZXIC_UINT32 eram_schd_plcr_pept_negate; +} DPP_STAT_STAT_CFG_ERAM_SCHD_PLCR_PEPT_CFG_T; + +typedef struct dpp_stat_stat_cfg_debug_cnt_mode_t { + ZXIC_UINT32 cnt_rd_mode; + ZXIC_UINT32 cnt_overflow_mode; +} DPP_STAT_STAT_CFG_DEBUG_CNT_MODE_T; + +typedef struct dpp_stat_stat_cfg_tm_mov_period_cfg_t { + ZXIC_UINT32 etm_mov_period_cfg; + ZXIC_UINT32 ftm_mov_period_cfg; +} DPP_STAT_STAT_CFG_TM_MOV_PERIOD_CFG_T; + +typedef struct dpp_stat_stat_cfg_alu_ddr_cpu_req_pful_cfg_t { + ZXIC_UINT32 alu_ddr_cpu_req_pful_assert; + ZXIC_UINT32 alu_ddr_cpu_req_pful_negate; +} DPP_STAT_STAT_CFG_ALU_DDR_CPU_REQ_PFUL_CFG_T; + +typedef struct dpp_stat_stat_cfg_cycmov_addr_pful_cfg_t { + ZXIC_UINT32 cycmov_addr_pful_assert; + ZXIC_UINT32 cycmov_addr_pful_negate; +} DPP_STAT_STAT_CFG_CYCMOV_ADDR_PFUL_CFG_T; + +typedef struct dpp_stat_stat_cfg_ord_ddr_plcr_fifo_empty_t { + ZXIC_UINT32 ord_oam_lm_empty; + ZXIC_UINT32 ddr_schd_fifo_empty; + ZXIC_UINT32 plcr0_schd_fifo_empty; +} DPP_STAT_STAT_CFG_ORD_DDR_PLCR_FIFO_EMPTY_T; + +typedef struct dpp_stat_stat_cfg_tm_stat_fifo_empty_t { + ZXIC_UINT32 tm_stat_fifo_empty; +} DPP_STAT_STAT_CFG_TM_STAT_FIFO_EMPTY_T; + +typedef struct dpp_stat_stat_cfg_eram_schd_fifo_empty_0_1_t { + ZXIC_UINT32 eram_schd_fifo_empty1; + ZXIC_UINT32 eram_schd_fifo_empty0; +} DPP_STAT_STAT_CFG_ERAM_SCHD_FIFO_EMPTY_0_1_T; + +typedef struct dpp_stat_stat_cfg_eram_schd_fifo_empty_2_3_t { + ZXIC_UINT32 eram_schd_fifo_empty3; + ZXIC_UINT32 eram_schd_fifo_empty2; +} DPP_STAT_STAT_CFG_ERAM_SCHD_FIFO_EMPTY_2_3_T; + +typedef struct dpp_stat_stat_cfg_eram_schd_fifo_empty_4_5_t { + ZXIC_UINT32 eram_schd_fifo_empty5; + ZXIC_UINT32 eram_schd_fifo_empty4; +} DPP_STAT_STAT_CFG_ERAM_SCHD_FIFO_EMPTY_4_5_T; + +typedef struct dpp_stat_stat_cfg_eram_schd_fifo_empty_6_7_t { + ZXIC_UINT32 eram_schd_fifo_empty7; + ZXIC_UINT32 eram_schd_fifo_empty6; +} DPP_STAT_STAT_CFG_ERAM_SCHD_FIFO_EMPTY_6_7_T; + +typedef struct dpp_stat_stat_cfg_eram_schd_fifo_empty_free_8_t { + ZXIC_UINT32 eram_schd_free_fifo_empty8; + ZXIC_UINT32 eram_schd_free_fifo_empty7; + ZXIC_UINT32 eram_schd_free_fifo_empty6; + ZXIC_UINT32 eram_schd_free_fifo_empty5; + ZXIC_UINT32 eram_schd_free_fifo_empty4; + ZXIC_UINT32 eram_schd_free_fifo_empty3; + ZXIC_UINT32 eram_schd_free_fifo_empty2; + ZXIC_UINT32 eram_schd_free_fifo_empty1; + ZXIC_UINT32 eram_schd_free_fifo_empty0; + ZXIC_UINT32 eram_schd_fifo_empty8; +} DPP_STAT_STAT_CFG_ERAM_SCHD_FIFO_EMPTY_FREE_8_T; + +typedef struct dpp_stat_stat_cfg_rschd_fifo_empty_0_3_t { + ZXIC_UINT32 rschd_fifo_empty3; + ZXIC_UINT32 rschd_fifo_empty2; + ZXIC_UINT32 rschd_fifo_empty1; + ZXIC_UINT32 rschd_fifo_empty0; +} DPP_STAT_STAT_CFG_RSCHD_FIFO_EMPTY_0_3_T; + +typedef struct dpp_stat_stat_cfg_rschd_fifo_empty_4_7_t { + ZXIC_UINT32 rschd_fifo_empty7; + ZXIC_UINT32 rschd_fifo_empty6; + ZXIC_UINT32 rschd_fifo_empty5; + ZXIC_UINT32 rschd_fifo_empty4; +} DPP_STAT_STAT_CFG_RSCHD_FIFO_EMPTY_4_7_T; + +typedef struct dpp_stat_stat_cfg_rschd_fifo_empty_8_11_t { + ZXIC_UINT32 rschd_fifo_empty11; + ZXIC_UINT32 rschd_fifo_empty10; + ZXIC_UINT32 rschd_fifo_empty9; + ZXIC_UINT32 rschd_fifo_empty8; +} DPP_STAT_STAT_CFG_RSCHD_FIFO_EMPTY_8_11_T; + +typedef struct dpp_stat_stat_cfg_rschd_fifo_empty_12_15_t { + ZXIC_UINT32 rschd_fifo_empty15; + ZXIC_UINT32 rschd_fifo_empty14; + ZXIC_UINT32 rschd_fifo_empty13; + ZXIC_UINT32 rschd_fifo_empty12; +} DPP_STAT_STAT_CFG_RSCHD_FIFO_EMPTY_12_15_T; + +typedef struct dpp_stat_stat_cfg_rschd_fifo_empty_plcr_16_17_t { + ZXIC_UINT32 rschd_fifo_empty_plcr; + ZXIC_UINT32 rschd_fifo_empty17; + ZXIC_UINT32 rschd_fifo_empty16; +} DPP_STAT_STAT_CFG_RSCHD_FIFO_EMPTY_PLCR_16_17_T; + +typedef struct dpp_stat_stat_cfg_stat_int_unmask_flag_t { + ZXIC_UINT32 stat_int5_unmask_flag; + ZXIC_UINT32 stat_int4_unmask_flag; + ZXIC_UINT32 stat_int3_unmask_flag; + ZXIC_UINT32 stat_int2_unmask_flag; + ZXIC_UINT32 stat_int1_unmask_flag; + ZXIC_UINT32 stat_int0_unmask_flag; +} DPP_STAT_STAT_CFG_STAT_INT_UNMASK_FLAG_T; + +typedef struct dpp_stat_stat_cfg_stat_int0_en_t { + ZXIC_UINT32 stat_int0_en31; + ZXIC_UINT32 stat_int0_en30; + ZXIC_UINT32 stat_int0_en29; + ZXIC_UINT32 stat_int0_en28; + ZXIC_UINT32 stat_int0_en27; + ZXIC_UINT32 stat_int0_en26; + ZXIC_UINT32 stat_int0_en25; + ZXIC_UINT32 stat_int0_en24; + ZXIC_UINT32 stat_int0_en23; + ZXIC_UINT32 stat_int0_en22; + ZXIC_UINT32 stat_int0_en21; + ZXIC_UINT32 stat_int0_en20; + ZXIC_UINT32 stat_int0_en19; + ZXIC_UINT32 stat_int0_en18; + ZXIC_UINT32 stat_int0_en17; + ZXIC_UINT32 stat_int0_en16; + ZXIC_UINT32 stat_int0_en15; + ZXIC_UINT32 stat_int0_en14; + ZXIC_UINT32 stat_int0_en13; + ZXIC_UINT32 stat_int0_en12; + ZXIC_UINT32 stat_int0_en11; + ZXIC_UINT32 stat_int0_en10; + ZXIC_UINT32 stat_int0_en9; + ZXIC_UINT32 stat_int0_en8; + ZXIC_UINT32 stat_int0_en7; + ZXIC_UINT32 stat_int0_en6; + ZXIC_UINT32 stat_int0_en5; + ZXIC_UINT32 stat_int0_en4; + ZXIC_UINT32 stat_int0_en3; + ZXIC_UINT32 stat_int0_en2; + ZXIC_UINT32 stat_int0_en1; + ZXIC_UINT32 stat_int0_en0; +} DPP_STAT_STAT_CFG_STAT_INT0_EN_T; + +typedef struct dpp_stat_stat_cfg_stat_int0_mask_t { + ZXIC_UINT32 stat_int0_mask31; + ZXIC_UINT32 stat_int0_mask30; + ZXIC_UINT32 stat_int0_mask29; + ZXIC_UINT32 stat_int0_mask28; + ZXIC_UINT32 stat_int0_mask27; + ZXIC_UINT32 stat_int0_mask26; + ZXIC_UINT32 stat_int0_mask25; + ZXIC_UINT32 stat_int0_mask24; + ZXIC_UINT32 stat_int0_mask23; + ZXIC_UINT32 stat_int0_mask22; + ZXIC_UINT32 stat_int0_mask21; + ZXIC_UINT32 stat_int0_mask20; + ZXIC_UINT32 stat_int0_mask19; + ZXIC_UINT32 stat_int0_mask18; + ZXIC_UINT32 stat_int0_mask17; + ZXIC_UINT32 stat_int0_mask16; + ZXIC_UINT32 stat_int0_mask15; + ZXIC_UINT32 stat_int0_mask14; + ZXIC_UINT32 stat_int0_mask13; + ZXIC_UINT32 stat_int0_mask12; + ZXIC_UINT32 stat_int0_mask11; + ZXIC_UINT32 stat_int0_mask10; + ZXIC_UINT32 stat_int0_mask9; + ZXIC_UINT32 stat_int0_mask8; + ZXIC_UINT32 stat_int0_mask7; + ZXIC_UINT32 stat_int0_mask6; + ZXIC_UINT32 stat_int0_mask5; + ZXIC_UINT32 stat_int0_mask4; + ZXIC_UINT32 stat_int0_mask3; + ZXIC_UINT32 stat_int0_mask2; + ZXIC_UINT32 stat_int0_mask1; + ZXIC_UINT32 stat_int0_mask0; +} DPP_STAT_STAT_CFG_STAT_INT0_MASK_T; + +typedef struct dpp_stat_stat_cfg_stat_int0_status_t { + ZXIC_UINT32 stat_int0_status31; + ZXIC_UINT32 stat_int0_status30; + ZXIC_UINT32 stat_int0_status29; + ZXIC_UINT32 stat_int0_status28; + ZXIC_UINT32 stat_int0_status27; + ZXIC_UINT32 stat_int0_status26; + ZXIC_UINT32 stat_int0_status25; + ZXIC_UINT32 stat_int0_status24; + ZXIC_UINT32 stat_int0_status23; + ZXIC_UINT32 stat_int0_status22; + ZXIC_UINT32 stat_int0_status21; + ZXIC_UINT32 stat_int0_status20; + ZXIC_UINT32 stat_int0_status19; + ZXIC_UINT32 stat_int0_status18; + ZXIC_UINT32 stat_int0_status17; + ZXIC_UINT32 stat_int0_status16; + ZXIC_UINT32 stat_int0_status15; + ZXIC_UINT32 stat_int0_status14; + ZXIC_UINT32 stat_int0_status13; + ZXIC_UINT32 stat_int0_status12; + ZXIC_UINT32 stat_int0_status11; + ZXIC_UINT32 stat_int0_status10; + ZXIC_UINT32 stat_int0_status9; + ZXIC_UINT32 stat_int0_status8; + ZXIC_UINT32 stat_int0_status7; + ZXIC_UINT32 stat_int0_status6; + ZXIC_UINT32 stat_int0_status5; + ZXIC_UINT32 stat_int0_status4; + ZXIC_UINT32 stat_int0_status3; + ZXIC_UINT32 stat_int0_status2; + ZXIC_UINT32 stat_int0_status1; + ZXIC_UINT32 stat_int0_status0; +} DPP_STAT_STAT_CFG_STAT_INT0_STATUS_T; + +typedef struct dpp_stat_stat_cfg_stat_int1_en_t { + ZXIC_UINT32 stat_int1_en31; + ZXIC_UINT32 stat_int1_en30; + ZXIC_UINT32 stat_int1_en29; + ZXIC_UINT32 stat_int1_en28; + ZXIC_UINT32 stat_int1_en27; + ZXIC_UINT32 stat_int1_en26; + ZXIC_UINT32 stat_int1_en25; + ZXIC_UINT32 stat_int1_en24; + ZXIC_UINT32 stat_int1_en23; + ZXIC_UINT32 stat_int1_en22; + ZXIC_UINT32 stat_int1_en21; + ZXIC_UINT32 stat_int1_en20; + ZXIC_UINT32 stat_int1_en19; + ZXIC_UINT32 stat_int1_en18; + ZXIC_UINT32 stat_int1_en17; + ZXIC_UINT32 stat_int1_en16; + ZXIC_UINT32 stat_int1_en15; + ZXIC_UINT32 stat_int1_en14; + ZXIC_UINT32 stat_int1_en13; + ZXIC_UINT32 stat_int1_en12; + ZXIC_UINT32 stat_int1_en11; + ZXIC_UINT32 stat_int1_en10; + ZXIC_UINT32 stat_int1_en9; + ZXIC_UINT32 stat_int1_en8; + ZXIC_UINT32 stat_int1_en7; + ZXIC_UINT32 stat_int1_en6; + ZXIC_UINT32 stat_int1_en5; + ZXIC_UINT32 stat_int1_en4; + ZXIC_UINT32 stat_int1_en3; + ZXIC_UINT32 stat_int1_en2; + ZXIC_UINT32 stat_int1_en1; + ZXIC_UINT32 stat_int1_en0; +} DPP_STAT_STAT_CFG_STAT_INT1_EN_T; + +typedef struct dpp_stat_stat_cfg_stat_int1_mask_t { + ZXIC_UINT32 stat_int1_mask31; + ZXIC_UINT32 stat_int1_mask30; + ZXIC_UINT32 stat_int1_mask29; + ZXIC_UINT32 stat_int1_mask28; + ZXIC_UINT32 stat_int1_mask27; + ZXIC_UINT32 stat_int1_mask26; + ZXIC_UINT32 stat_int1_mask25; + ZXIC_UINT32 stat_int1_mask24; + ZXIC_UINT32 stat_int1_mask23; + ZXIC_UINT32 stat_int1_mask22; + ZXIC_UINT32 stat_int1_mask21; + ZXIC_UINT32 stat_int1_mask20; + ZXIC_UINT32 stat_int1_mask19; + ZXIC_UINT32 stat_int1_mask18; + ZXIC_UINT32 stat_int1_mask17; + ZXIC_UINT32 stat_int1_mask16; + ZXIC_UINT32 stat_int1_mask15; + ZXIC_UINT32 stat_int1_mask14; + ZXIC_UINT32 stat_int1_mask13; + ZXIC_UINT32 stat_int1_mask12; + ZXIC_UINT32 stat_int1_mask11; + ZXIC_UINT32 stat_int1_mask10; + ZXIC_UINT32 stat_int1_mask9; + ZXIC_UINT32 stat_int1_mask8; + ZXIC_UINT32 stat_int1_mask7; + ZXIC_UINT32 stat_int1_mask6; + ZXIC_UINT32 stat_int1_mask5; + ZXIC_UINT32 stat_int1_mask4; + ZXIC_UINT32 stat_int1_mask3; + ZXIC_UINT32 stat_int1_mask2; + ZXIC_UINT32 stat_int1_mask1; + ZXIC_UINT32 stat_int1_mask0; +} DPP_STAT_STAT_CFG_STAT_INT1_MASK_T; + +typedef struct dpp_stat_stat_cfg_stat_int1_status_t { + ZXIC_UINT32 stat_int1_status31; + ZXIC_UINT32 stat_int1_status30; + ZXIC_UINT32 stat_int1_status29; + ZXIC_UINT32 stat_int1_status28; + ZXIC_UINT32 stat_int1_status27; + ZXIC_UINT32 stat_int1_status26; + ZXIC_UINT32 stat_int1_status25; + ZXIC_UINT32 stat_int1_status24; + ZXIC_UINT32 stat_int1_status23; + ZXIC_UINT32 stat_int1_status22; + ZXIC_UINT32 stat_int1_status21; + ZXIC_UINT32 stat_int1_status20; + ZXIC_UINT32 stat_int1_status19; + ZXIC_UINT32 stat_int1_status18; + ZXIC_UINT32 stat_int1_status17; + ZXIC_UINT32 stat_int1_status16; + ZXIC_UINT32 stat_int1_status15; + ZXIC_UINT32 stat_int1_status14; + ZXIC_UINT32 stat_int1_status13; + ZXIC_UINT32 stat_int1_status12; + ZXIC_UINT32 stat_int1_status11; + ZXIC_UINT32 stat_int1_status10; + ZXIC_UINT32 stat_int1_status9; + ZXIC_UINT32 stat_int1_status8; + ZXIC_UINT32 stat_int1_status7; + ZXIC_UINT32 stat_int1_status6; + ZXIC_UINT32 stat_int1_status5; + ZXIC_UINT32 stat_int1_status4; + ZXIC_UINT32 stat_int1_status3; + ZXIC_UINT32 stat_int1_status2; + ZXIC_UINT32 stat_int1_status1; + ZXIC_UINT32 stat_int1_status0; +} DPP_STAT_STAT_CFG_STAT_INT1_STATUS_T; + +typedef struct dpp_stat_stat_cfg_stat_int2_en_t { + ZXIC_UINT32 stat_int2_en31; + ZXIC_UINT32 stat_int2_en30; + ZXIC_UINT32 stat_int2_en29; + ZXIC_UINT32 stat_int2_en28; + ZXIC_UINT32 stat_int2_en27; + ZXIC_UINT32 stat_int2_en26; + ZXIC_UINT32 stat_int2_en25; + ZXIC_UINT32 stat_int2_en24; + ZXIC_UINT32 stat_int2_en23; + ZXIC_UINT32 stat_int2_en22; + ZXIC_UINT32 stat_int2_en21; + ZXIC_UINT32 stat_int2_en20; + ZXIC_UINT32 stat_int2_en19; + ZXIC_UINT32 stat_int2_en18; + ZXIC_UINT32 stat_int2_en17; + ZXIC_UINT32 stat_int2_en16; + ZXIC_UINT32 stat_int2_en15; + ZXIC_UINT32 stat_int2_en14; + ZXIC_UINT32 stat_int2_en13; + ZXIC_UINT32 stat_int2_en12; + ZXIC_UINT32 stat_int2_en11; + ZXIC_UINT32 stat_int2_en10; + ZXIC_UINT32 stat_int2_en9; + ZXIC_UINT32 stat_int2_en8; + ZXIC_UINT32 stat_int2_en7; + ZXIC_UINT32 stat_int2_en6; + ZXIC_UINT32 stat_int2_en5; + ZXIC_UINT32 stat_int2_en4; + ZXIC_UINT32 stat_int2_en3; + ZXIC_UINT32 stat_int2_en2; + ZXIC_UINT32 stat_int2_en1; + ZXIC_UINT32 stat_int2_en0; +} DPP_STAT_STAT_CFG_STAT_INT2_EN_T; + +typedef struct dpp_stat_stat_cfg_stat_int2_mask_t { + ZXIC_UINT32 stat_int2_mask31; + ZXIC_UINT32 stat_int2_mask30; + ZXIC_UINT32 stat_int2_mask29; + ZXIC_UINT32 stat_int2_mask28; + ZXIC_UINT32 stat_int2_mask27; + ZXIC_UINT32 stat_int2_mask26; + ZXIC_UINT32 stat_int2_mask25; + ZXIC_UINT32 stat_int2_mask24; + ZXIC_UINT32 stat_int2_mask23; + ZXIC_UINT32 stat_int2_mask22; + ZXIC_UINT32 stat_int2_mask21; + ZXIC_UINT32 stat_int2_mask20; + ZXIC_UINT32 stat_int2_mask19; + ZXIC_UINT32 stat_int2_mask18; + ZXIC_UINT32 stat_int2_mask17; + ZXIC_UINT32 stat_int2_mask16; + ZXIC_UINT32 stat_int2_mask15; + ZXIC_UINT32 stat_int2_mask14; + ZXIC_UINT32 stat_int2_mask13; + ZXIC_UINT32 stat_int2_mask12; + ZXIC_UINT32 stat_int2_mask11; + ZXIC_UINT32 stat_int2_mask10; + ZXIC_UINT32 stat_int2_mask9; + ZXIC_UINT32 stat_int2_mask8; + ZXIC_UINT32 stat_int2_mask7; + ZXIC_UINT32 stat_int2_mask6; + ZXIC_UINT32 stat_int2_mask5; + ZXIC_UINT32 stat_int2_mask4; + ZXIC_UINT32 stat_int2_mask3; + ZXIC_UINT32 stat_int2_mask2; + ZXIC_UINT32 stat_int2_mask1; + ZXIC_UINT32 stat_int2_mask0; +} DPP_STAT_STAT_CFG_STAT_INT2_MASK_T; + +typedef struct dpp_stat_stat_cfg_stat_int2_status_t { + ZXIC_UINT32 stat_int2_status31; + ZXIC_UINT32 stat_int2_status30; + ZXIC_UINT32 stat_int2_status29; + ZXIC_UINT32 stat_int2_status28; + ZXIC_UINT32 stat_int2_status27; + ZXIC_UINT32 stat_int2_status26; + ZXIC_UINT32 stat_int2_status25; + ZXIC_UINT32 stat_int2_status24; + ZXIC_UINT32 stat_int2_status23; + ZXIC_UINT32 stat_int2_status22; + ZXIC_UINT32 stat_int2_status21; + ZXIC_UINT32 stat_int2_status20; + ZXIC_UINT32 stat_int2_status19; + ZXIC_UINT32 stat_int2_status18; + ZXIC_UINT32 stat_int2_status17; + ZXIC_UINT32 stat_int2_status16; + ZXIC_UINT32 stat_int2_status15; + ZXIC_UINT32 stat_int2_status14; + ZXIC_UINT32 stat_int2_status13; + ZXIC_UINT32 stat_int2_status12; + ZXIC_UINT32 stat_int2_status11; + ZXIC_UINT32 stat_int2_status10; + ZXIC_UINT32 stat_int2_status9; + ZXIC_UINT32 stat_int2_status8; + ZXIC_UINT32 stat_int2_status7; + ZXIC_UINT32 stat_int2_status6; + ZXIC_UINT32 stat_int2_status5; + ZXIC_UINT32 stat_int2_status4; + ZXIC_UINT32 stat_int2_status3; + ZXIC_UINT32 stat_int2_status2; + ZXIC_UINT32 stat_int2_status1; + ZXIC_UINT32 stat_int2_status0; +} DPP_STAT_STAT_CFG_STAT_INT2_STATUS_T; + +typedef struct dpp_stat_stat_cfg_stat_int3_en_t { + ZXIC_UINT32 stat_int3_en31; + ZXIC_UINT32 stat_int3_en30; + ZXIC_UINT32 stat_int3_en29; + ZXIC_UINT32 stat_int3_en28; + ZXIC_UINT32 stat_int3_en27; + ZXIC_UINT32 stat_int3_en26; + ZXIC_UINT32 stat_int3_en25; + ZXIC_UINT32 stat_int3_en24; + ZXIC_UINT32 stat_int3_en23; + ZXIC_UINT32 stat_int3_en22; + ZXIC_UINT32 stat_int3_en21; + ZXIC_UINT32 stat_int3_en20; + ZXIC_UINT32 stat_int3_en19; + ZXIC_UINT32 stat_int3_en18; + ZXIC_UINT32 stat_int3_en17; + ZXIC_UINT32 stat_int3_en16; + ZXIC_UINT32 stat_int3_en15; + ZXIC_UINT32 stat_int3_en14; + ZXIC_UINT32 stat_int3_en13; + ZXIC_UINT32 stat_int3_en12; + ZXIC_UINT32 stat_int3_en11; + ZXIC_UINT32 stat_int3_en10; + ZXIC_UINT32 stat_int3_en9; + ZXIC_UINT32 stat_int3_en8; + ZXIC_UINT32 stat_int3_en7; + ZXIC_UINT32 stat_int3_en6; + ZXIC_UINT32 stat_int3_en5; + ZXIC_UINT32 stat_int3_en4; + ZXIC_UINT32 stat_int3_en3; + ZXIC_UINT32 stat_int3_en2; + ZXIC_UINT32 stat_int3_en1; + ZXIC_UINT32 stat_int3_en0; +} DPP_STAT_STAT_CFG_STAT_INT3_EN_T; + +typedef struct dpp_stat_stat_cfg_stat_int3_mask_t { + ZXIC_UINT32 stat_int3_mask31; + ZXIC_UINT32 stat_int3_mask30; + ZXIC_UINT32 stat_int3_mask29; + ZXIC_UINT32 stat_int3_mask28; + ZXIC_UINT32 stat_int3_mask27; + ZXIC_UINT32 stat_int3_mask26; + ZXIC_UINT32 stat_int3_mask25; + ZXIC_UINT32 stat_int3_mask24; + ZXIC_UINT32 stat_int3_mask23; + ZXIC_UINT32 stat_int3_mask22; + ZXIC_UINT32 stat_int3_mask21; + ZXIC_UINT32 stat_int3_mask20; + ZXIC_UINT32 stat_int3_mask19; + ZXIC_UINT32 stat_int3_mask18; + ZXIC_UINT32 stat_int3_mask17; + ZXIC_UINT32 stat_int3_mask16; + ZXIC_UINT32 stat_int3_mask15; + ZXIC_UINT32 stat_int3_mask14; + ZXIC_UINT32 stat_int3_mask13; + ZXIC_UINT32 stat_int3_mask12; + ZXIC_UINT32 stat_int3_mask11; + ZXIC_UINT32 stat_int3_mask10; + ZXIC_UINT32 stat_int3_mask9; + ZXIC_UINT32 stat_int3_mask8; + ZXIC_UINT32 stat_int3_mask7; + ZXIC_UINT32 stat_int3_mask6; + ZXIC_UINT32 stat_int3_mask5; + ZXIC_UINT32 stat_int3_mask4; + ZXIC_UINT32 stat_int3_mask3; + ZXIC_UINT32 stat_int3_mask2; + ZXIC_UINT32 stat_int3_mask1; + ZXIC_UINT32 stat_int3_mask0; +} DPP_STAT_STAT_CFG_STAT_INT3_MASK_T; + +typedef struct dpp_stat_stat_cfg_stat_int3_status_t { + ZXIC_UINT32 stat_int3_status31; + ZXIC_UINT32 stat_int3_status30; + ZXIC_UINT32 stat_int3_status29; + ZXIC_UINT32 stat_int3_status28; + ZXIC_UINT32 stat_int3_status27; + ZXIC_UINT32 stat_int3_status26; + ZXIC_UINT32 stat_int3_status25; + ZXIC_UINT32 stat_int3_status24; + ZXIC_UINT32 stat_int3_status23; + ZXIC_UINT32 stat_int3_status22; + ZXIC_UINT32 stat_int3_status21; + ZXIC_UINT32 stat_int3_status20; + ZXIC_UINT32 stat_int3_status19; + ZXIC_UINT32 stat_int3_status18; + ZXIC_UINT32 stat_int3_status17; + ZXIC_UINT32 stat_int3_status16; + ZXIC_UINT32 stat_int3_status15; + ZXIC_UINT32 stat_int3_status14; + ZXIC_UINT32 stat_int3_status13; + ZXIC_UINT32 stat_int3_status12; + ZXIC_UINT32 stat_int3_status11; + ZXIC_UINT32 stat_int3_status10; + ZXIC_UINT32 stat_int3_status9; + ZXIC_UINT32 stat_int3_status8; + ZXIC_UINT32 stat_int3_status7; + ZXIC_UINT32 stat_int3_status6; + ZXIC_UINT32 stat_int3_status5; + ZXIC_UINT32 stat_int3_status4; + ZXIC_UINT32 stat_int3_status3; + ZXIC_UINT32 stat_int3_status2; + ZXIC_UINT32 stat_int3_status1; + ZXIC_UINT32 stat_int3_status0; +} DPP_STAT_STAT_CFG_STAT_INT3_STATUS_T; + +typedef struct dpp_stat_stat_cfg_stat_int4_en_t { + ZXIC_UINT32 stat_int4_en_18; + ZXIC_UINT32 stat_int4_en_17; + ZXIC_UINT32 stat_int4_en_16; + ZXIC_UINT32 stat_int4_en_15; + ZXIC_UINT32 stat_int4_en_14; + ZXIC_UINT32 stat_int4_en_13; + ZXIC_UINT32 stat_int4_en_12; + ZXIC_UINT32 stat_int4_en_11; + ZXIC_UINT32 stat_int4_en_10; + ZXIC_UINT32 stat_int4_en_9; + ZXIC_UINT32 stat_int4_en_8; + ZXIC_UINT32 stat_int4_en_7; + ZXIC_UINT32 stat_int4_en_6; + ZXIC_UINT32 stat_int4_en_5; + ZXIC_UINT32 stat_int4_en_4; + ZXIC_UINT32 stat_int4_en_3; + ZXIC_UINT32 stat_int4_en_2; + ZXIC_UINT32 stat_int4_en_1; + ZXIC_UINT32 stat_int4_en_0; +} DPP_STAT_STAT_CFG_STAT_INT4_EN_T; + +typedef struct dpp_stat_stat_cfg_stat_int4_mask_t { + ZXIC_UINT32 stat_int4_mask_18; + ZXIC_UINT32 stat_int4_mask_17; + ZXIC_UINT32 stat_int4_mask_16; + ZXIC_UINT32 stat_int4_mask_15; + ZXIC_UINT32 stat_int4_mask_14; + ZXIC_UINT32 stat_int4_mask_13; + ZXIC_UINT32 stat_int4_mask_12; + ZXIC_UINT32 stat_int4_mask_11; + ZXIC_UINT32 stat_int4_mask_10; + ZXIC_UINT32 stat_int4_mask_9; + ZXIC_UINT32 stat_int4_mask_8; + ZXIC_UINT32 stat_int4_mask_7; + ZXIC_UINT32 stat_int4_mask_6; + ZXIC_UINT32 stat_int4_mask_5; + ZXIC_UINT32 stat_int4_mask_4; + ZXIC_UINT32 stat_int4_mask_3; + ZXIC_UINT32 stat_int4_mask_2; + ZXIC_UINT32 stat_int4_mask_1; + ZXIC_UINT32 stat_int4_mask_0; +} DPP_STAT_STAT_CFG_STAT_INT4_MASK_T; + +typedef struct dpp_stat_stat_cfg_stat_int4_status_t { + ZXIC_UINT32 stat_int4_mask_18; + ZXIC_UINT32 stat_int4_mask_17; + ZXIC_UINT32 stat_int4_mask_16; + ZXIC_UINT32 stat_int4_mask_15; + ZXIC_UINT32 stat_int4_mask_14; + ZXIC_UINT32 stat_int4_mask_13; + ZXIC_UINT32 stat_int4_mask_12; + ZXIC_UINT32 stat_int4_mask_11; + ZXIC_UINT32 stat_int4_mask_10; + ZXIC_UINT32 stat_int4_mask_9; + ZXIC_UINT32 stat_int4_mask_8; + ZXIC_UINT32 stat_int4_mask_7; + ZXIC_UINT32 stat_int4_mask_6; + ZXIC_UINT32 stat_int4_mask_5; + ZXIC_UINT32 stat_int4_mask_4; + ZXIC_UINT32 stat_int4_mask_3; + ZXIC_UINT32 stat_int4_mask_2; + ZXIC_UINT32 stat_int4_mask_1; + ZXIC_UINT32 stat_int4_mask_0; +} DPP_STAT_STAT_CFG_STAT_INT4_STATUS_T; + +typedef struct dpp_stat_stat_cfg_stat_int5_en_t { + ZXIC_UINT32 stat_int5_en_18; + ZXIC_UINT32 stat_int5_en_17; + ZXIC_UINT32 stat_int5_en_16; + ZXIC_UINT32 stat_int5_en_15; + ZXIC_UINT32 stat_int5_en_14; + ZXIC_UINT32 stat_int5_en_13; + ZXIC_UINT32 stat_int5_en_12; + ZXIC_UINT32 stat_int5_en_11; + ZXIC_UINT32 stat_int5_en_10; + ZXIC_UINT32 stat_int5_en_9; + ZXIC_UINT32 stat_int5_en_8; + ZXIC_UINT32 stat_int5_en_7; + ZXIC_UINT32 stat_int5_en_6; + ZXIC_UINT32 stat_int5_en_5; + ZXIC_UINT32 stat_int5_en_4; + ZXIC_UINT32 stat_int5_en_3; + ZXIC_UINT32 stat_int5_en_2; + ZXIC_UINT32 stat_int5_en_1; + ZXIC_UINT32 stat_int5_en_0; +} DPP_STAT_STAT_CFG_STAT_INT5_EN_T; + +typedef struct dpp_stat_stat_cfg_stat_int5_mask_t { + ZXIC_UINT32 stat_int5_mask_18; + ZXIC_UINT32 stat_int5_mask_17; + ZXIC_UINT32 stat_int5_mask_16; + ZXIC_UINT32 stat_int5_mask_15; + ZXIC_UINT32 stat_int5_mask_14; + ZXIC_UINT32 stat_int5_mask_13; + ZXIC_UINT32 stat_int5_mask_12; + ZXIC_UINT32 stat_int5_mask_11; + ZXIC_UINT32 stat_int5_mask_10; + ZXIC_UINT32 stat_int5_mask_9; + ZXIC_UINT32 stat_int5_mask_8; + ZXIC_UINT32 stat_int5_mask_7; + ZXIC_UINT32 stat_int5_mask_6; + ZXIC_UINT32 stat_int5_mask_5; + ZXIC_UINT32 stat_int5_mask_4; + ZXIC_UINT32 stat_int5_mask_3; + ZXIC_UINT32 stat_int5_mask_2; + ZXIC_UINT32 stat_int5_mask_1; + ZXIC_UINT32 stat_int5_mask_0; +} DPP_STAT_STAT_CFG_STAT_INT5_MASK_T; + +typedef struct dpp_stat_stat_cfg_stat_int5_status_t { + ZXIC_UINT32 stat_int5_mask_18; + ZXIC_UINT32 stat_int5_mask_17; + ZXIC_UINT32 stat_int5_mask_16; + ZXIC_UINT32 stat_int5_mask_15; + ZXIC_UINT32 stat_int5_mask_14; + ZXIC_UINT32 stat_int5_mask_13; + ZXIC_UINT32 stat_int5_mask_12; + ZXIC_UINT32 stat_int5_mask_11; + ZXIC_UINT32 stat_int5_mask_10; + ZXIC_UINT32 stat_int5_mask_9; + ZXIC_UINT32 stat_int5_mask_8; + ZXIC_UINT32 stat_int5_mask_7; + ZXIC_UINT32 stat_int5_mask_6; + ZXIC_UINT32 stat_int5_mask_5; + ZXIC_UINT32 stat_int5_mask_4; + ZXIC_UINT32 stat_int5_mask_3; + ZXIC_UINT32 stat_int5_mask_2; + ZXIC_UINT32 stat_int5_mask_1; + ZXIC_UINT32 stat_int5_mask_0; +} DPP_STAT_STAT_CFG_STAT_INT5_STATUS_T; + +typedef struct dpp_stat_stat_cfg_rschd_ecc_bypass_t { + ZXIC_UINT32 rschd_ecc_bypass_18; + ZXIC_UINT32 rschd_ecc_bypass_17; + ZXIC_UINT32 rschd_ecc_bypass_16; + ZXIC_UINT32 rschd_ecc_bypass_15; + ZXIC_UINT32 rschd_ecc_bypass_14; + ZXIC_UINT32 rschd_ecc_bypass_13; + ZXIC_UINT32 rschd_ecc_bypass_12; + ZXIC_UINT32 rschd_ecc_bypass_11; + ZXIC_UINT32 rschd_ecc_bypass_10; + ZXIC_UINT32 rschd_ecc_bypass_9; + ZXIC_UINT32 rschd_ecc_bypass_8; + ZXIC_UINT32 rschd_ecc_bypass_7; + ZXIC_UINT32 rschd_ecc_bypass_6; + ZXIC_UINT32 rschd_ecc_bypass_5; + ZXIC_UINT32 rschd_ecc_bypass_4; + ZXIC_UINT32 rschd_ecc_bypass_3; + ZXIC_UINT32 rschd_ecc_bypass_2; + ZXIC_UINT32 rschd_ecc_bypass_1; + ZXIC_UINT32 rschd_ecc_bypass_0; +} DPP_STAT_STAT_CFG_RSCHD_ECC_BYPASS_T; + +typedef struct dpp_stat_stat_cfg_rschd_ecc_single_err_t { + ZXIC_UINT32 rschd_ecc_single_err_18; + ZXIC_UINT32 rschd_ecc_single_err_17; + ZXIC_UINT32 rschd_ecc_single_err_16; + ZXIC_UINT32 rschd_ecc_single_err_15; + ZXIC_UINT32 rschd_ecc_single_err_14; + ZXIC_UINT32 rschd_ecc_single_err_13; + ZXIC_UINT32 rschd_ecc_single_err_12; + ZXIC_UINT32 rschd_ecc_single_err_11; + ZXIC_UINT32 rschd_ecc_single_err_10; + ZXIC_UINT32 rschd_ecc_single_err_9; + ZXIC_UINT32 rschd_ecc_single_err_8; + ZXIC_UINT32 rschd_ecc_single_err_7; + ZXIC_UINT32 rschd_ecc_single_err_6; + ZXIC_UINT32 rschd_ecc_single_err_5; + ZXIC_UINT32 rschd_ecc_single_err_4; + ZXIC_UINT32 rschd_ecc_single_err_3; + ZXIC_UINT32 rschd_ecc_single_err_2; + ZXIC_UINT32 rschd_ecc_single_err_1; + ZXIC_UINT32 rschd_ecc_single_err_0; +} DPP_STAT_STAT_CFG_RSCHD_ECC_SINGLE_ERR_T; + +typedef struct dpp_stat_stat_cfg_rschd_ecc_double_err_t { + ZXIC_UINT32 rschd_ecc_double_err_18; + ZXIC_UINT32 rschd_ecc_double_err_17; + ZXIC_UINT32 rschd_ecc_double_err_16; + ZXIC_UINT32 rschd_ecc_double_err_15; + ZXIC_UINT32 rschd_ecc_double_err_14; + ZXIC_UINT32 rschd_ecc_double_err_13; + ZXIC_UINT32 rschd_ecc_double_err_12; + ZXIC_UINT32 rschd_ecc_double_err_11; + ZXIC_UINT32 rschd_ecc_double_err_10; + ZXIC_UINT32 rschd_ecc_double_err_9; + ZXIC_UINT32 rschd_ecc_double_err_8; + ZXIC_UINT32 rschd_ecc_double_err_7; + ZXIC_UINT32 rschd_ecc_double_err_6; + ZXIC_UINT32 rschd_ecc_double_err_5; + ZXIC_UINT32 rschd_ecc_double_err_4; + ZXIC_UINT32 rschd_ecc_double_err_3; + ZXIC_UINT32 rschd_ecc_double_err_2; + ZXIC_UINT32 rschd_ecc_double_err_1; + ZXIC_UINT32 rschd_ecc_double_err_0; +} DPP_STAT_STAT_CFG_RSCHD_ECC_DOUBLE_ERR_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_wdat0_t { + ZXIC_UINT32 cpu_ind_ddr_wdat0; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_WDAT0_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_wdat1_t { + ZXIC_UINT32 cpu_ind_ddr_wdat1; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_WDAT1_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_wdat2_t { + ZXIC_UINT32 cpu_ind_ddr_wdat2; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_WDAT2_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_wdat3_t { + ZXIC_UINT32 cpu_ind_ddr_wdat3; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_WDAT3_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_wdat4_t { + ZXIC_UINT32 cpu_ind_ddr_wdat4; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_WDAT4_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_wdat5_t { + ZXIC_UINT32 cpu_ind_ddr_wdat5; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_WDAT5_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_wdat6_t { + ZXIC_UINT32 cpu_ind_ddr_wdat6; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_WDAT6_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_wdat7_t { + ZXIC_UINT32 cpu_ind_ddr_wdat7; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_WDAT7_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_wdat8_t { + ZXIC_UINT32 cpu_ind_ddr_wdat8; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_WDAT8_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_wdat9_t { + ZXIC_UINT32 cpu_ind_ddr_wdat9; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_WDAT9_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_wdat10_t { + ZXIC_UINT32 cpu_ind_ddr_wdat10; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_WDAT10_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_wdat11_t { + ZXIC_UINT32 cpu_ind_ddr_wdat11; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_WDAT11_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_wdat12_t { + ZXIC_UINT32 cpu_ind_ddr_wdat12; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_WDAT12_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_wdat13_t { + ZXIC_UINT32 cpu_ind_ddr_wdat13; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_WDAT13_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_wdat14_t { + ZXIC_UINT32 cpu_ind_ddr_wdat14; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_WDAT14_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_wdat15_t { + ZXIC_UINT32 cpu_ind_ddr_wdat15; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_WDAT15_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_req_info_t { + ZXIC_UINT32 rw_mode; + ZXIC_UINT32 read_mode; + ZXIC_UINT32 tm_cs; + ZXIC_UINT32 rw_addr; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_REQ_INFO_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_rd_done_t { + ZXIC_UINT32 cpu_ind_ddr_rd_done; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_RD_DONE_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_rdat0_t { + ZXIC_UINT32 cpu_ind_ddr_rdat0; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_RDAT0_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_rdat1_t { + ZXIC_UINT32 cpu_ind_ddr_rdat1; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_RDAT1_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_rdat2_t { + ZXIC_UINT32 cpu_ind_ddr_rdat2; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_RDAT2_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_rdat3_t { + ZXIC_UINT32 cpu_ind_ddr_rdat3; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_RDAT3_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_rdat4_t { + ZXIC_UINT32 cpu_ind_ddr_rdat4; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_RDAT4_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_rdat5_t { + ZXIC_UINT32 cpu_ind_ddr_rdat5; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_RDAT5_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_rdat6_t { + ZXIC_UINT32 cpu_ind_ddr_rdat6; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_RDAT6_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_rdat7_t { + ZXIC_UINT32 cpu_ind_ddr_rdat7; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_RDAT7_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_rdat8_t { + ZXIC_UINT32 cpu_ind_ddr_rdat8; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_RDAT8_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_rdat9_t { + ZXIC_UINT32 cpu_ind_ddr_rdat9; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_RDAT9_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_rdat10_t { + ZXIC_UINT32 cpu_ind_ddr_rdat10; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_RDAT10_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_rdat11_t { + ZXIC_UINT32 cpu_ind_ddr_rdat11; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_RDAT11_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_rdat12_t { + ZXIC_UINT32 cpu_ind_ddr_rdat12; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_RDAT12_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_rdat13_t { + ZXIC_UINT32 cpu_ind_ddr_rdat13; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_RDAT13_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_rdat14_t { + ZXIC_UINT32 cpu_ind_ddr_rdat14; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_RDAT14_T; + +typedef struct dpp_stat_stat_cfg_cpu_ind_ddr_rdat15_t { + ZXIC_UINT32 cpu_ind_ddr_rdat15; +} DPP_STAT_STAT_CFG_CPU_IND_DDR_RDAT15_T; + +typedef struct dpp_stat_stat_cfg_tm_alu_ddr_cpu_rdy_t { + ZXIC_UINT32 tm_alu_ddr_cpu_rdy; +} DPP_STAT_STAT_CFG_TM_ALU_DDR_CPU_RDY_T; + +typedef struct dpp_stat_stat_cfg_ept_flag_t { + ZXIC_UINT32 ept_flag; +} DPP_STAT_STAT_CFG_EPT_FLAG_T; + +typedef struct dpp_stat_stat_cfg_ppu_soft_rst_t { + ZXIC_UINT32 ppu_soft_rst; +} DPP_STAT_STAT_CFG_PPU_SOFT_RST_T; + +typedef struct dpp_stat_stat_cfg_stat_smmu0_fc15_0_cnt_t { + ZXIC_UINT32 stat_smmu0_fc15_0_cnt; +} DPP_STAT_STAT_CFG_STAT_SMMU0_FC15_0_CNT_T; + +typedef struct dpp_stat_stat_cfg_smmu0_stat_fc15_0_cnt_t { + ZXIC_UINT32 smmu0_stat_fc15_0_cnt; +} DPP_STAT_STAT_CFG_SMMU0_STAT_FC15_0_CNT_T; + +typedef struct dpp_stat_stat_cfg_smmu0_stat_rsp15_0_cnt_t { + ZXIC_UINT32 smmu0_stat_rsp15_0_cnt; +} DPP_STAT_STAT_CFG_SMMU0_STAT_RSP15_0_CNT_T; + +typedef struct dpp_stat_stat_cfg_stat_smmu0_req15_0_cnt_t { + ZXIC_UINT32 stat_smmu0_req15_0_cnt; +} DPP_STAT_STAT_CFG_STAT_SMMU0_REQ15_0_CNT_T; + +typedef struct dpp_stat_stat_cfg_ppu_stat_mec5_0_rsp_fc_cnt_t { + ZXIC_UINT32 ppu_stat_mec5_0_rsp_fc_cnt; +} DPP_STAT_STAT_CFG_PPU_STAT_MEC5_0_RSP_FC_CNT_T; + +typedef struct dpp_stat_stat_cfg_stat_ppu_mec5_0_key_fc_cnt_t { + ZXIC_UINT32 stat_ppu_mec5_0_key_fc_cnt; +} DPP_STAT_STAT_CFG_STAT_PPU_MEC5_0_KEY_FC_CNT_T; + +typedef struct dpp_stat_stat_cfg_stat_ppu_mec5_0_rsp_cnt_t { + ZXIC_UINT32 stat_ppu_mec5_0_rsp_cnt; +} DPP_STAT_STAT_CFG_STAT_PPU_MEC5_0_RSP_CNT_T; + +typedef struct dpp_stat_stat_cfg_ppu_stat_mec5_0_key_cnt_t { + ZXIC_UINT32 ppu_stat_mec5_0_key_cnt; +} DPP_STAT_STAT_CFG_PPU_STAT_MEC5_0_KEY_CNT_T; + +typedef struct dpp_stat_stat_cfg_ppu5_0_no_exist_opcd_ex_cnt_t { + ZXIC_UINT32 ppu5_0_no_exist_opcd_ex_cnt; +} DPP_STAT_STAT_CFG_PPU5_0_NO_EXIST_OPCD_EX_CNT_T; + +typedef struct dpp_stat_stat_cfg_se_etm_stat_wr_fc_cnt_t { + ZXIC_UINT32 se_etm_stat_wr_fc_cnt; +} DPP_STAT_STAT_CFG_SE_ETM_STAT_WR_FC_CNT_T; + +typedef struct dpp_stat_stat_cfg_se_etm_stat_rd_fc_cnt_t { + ZXIC_UINT32 se_etm_stat_rd_fc_cnt; +} DPP_STAT_STAT_CFG_SE_ETM_STAT_RD_FC_CNT_T; + +typedef struct dpp_stat_stat_cfg_stat_etm_deq_fc_cnt_t { + ZXIC_UINT32 stat_etm_deq_fc_cnt; +} DPP_STAT_STAT_CFG_STAT_ETM_DEQ_FC_CNT_T; + +typedef struct dpp_stat_stat_cfg_stat_etm_enq_fc_cnt_t { + ZXIC_UINT32 stat_etm_enq_fc_cnt; +} DPP_STAT_STAT_CFG_STAT_ETM_ENQ_FC_CNT_T; + +typedef struct dpp_stat_stat_cfg_stat_oam_lm_fc_cnt_t { + ZXIC_UINT32 stat_oam_lm_fc_cnt; +} DPP_STAT_STAT_CFG_STAT_OAM_LM_FC_CNT_T; + +typedef struct dpp_stat_stat_cfg_oam_stat_lm_fc_cnt_t { + ZXIC_UINT32 oam_stat_lm_fc_cnt; +} DPP_STAT_STAT_CFG_OAM_STAT_LM_FC_CNT_T; + +typedef struct dpp_stat_stat_cfg_stat_oam_fc_cnt_t { + ZXIC_UINT32 stat_oam_fc_cnt; +} DPP_STAT_STAT_CFG_STAT_OAM_FC_CNT_T; + +typedef struct dpp_stat_stat_cfg_cmmu_stat_fc_cnt_t { + ZXIC_UINT32 cmmu_stat_fc_cnt; +} DPP_STAT_STAT_CFG_CMMU_STAT_FC_CNT_T; + +typedef struct dpp_stat_stat_cfg_stat_cmmu_req_cnt_t { + ZXIC_UINT32 stat_cmmu_req_cnt; +} DPP_STAT_STAT_CFG_STAT_CMMU_REQ_CNT_T; + +typedef struct dpp_stat_stat_cfg_smmu0_plcr_rsp0_cnt_t { + ZXIC_UINT32 smmu0_plcr_rsp0_cnt; +} DPP_STAT_STAT_CFG_SMMU0_PLCR_RSP0_CNT_T; + +typedef struct dpp_stat_stat_cfg_plcr_smmu0_req0_cnt_t { + ZXIC_UINT32 plcr_smmu0_req0_cnt; +} DPP_STAT_STAT_CFG_PLCR_SMMU0_REQ0_CNT_T; + +typedef struct dpp_stat_stat_cfg_stat_oam_lm_rsp_cnt_t { + ZXIC_UINT32 stat_oam_lm_rsp_cnt; +} DPP_STAT_STAT_CFG_STAT_OAM_LM_RSP_CNT_T; + +typedef struct dpp_stat_stat_cfg_oam_stat_lm_req_cnt_t { + ZXIC_UINT32 oam_stat_lm_req_cnt; +} DPP_STAT_STAT_CFG_OAM_STAT_LM_REQ_CNT_T; + +typedef struct dpp_stat_stat_cfg_oam_stat_req_cnt_t { + ZXIC_UINT32 oam_stat_req_cnt; +} DPP_STAT_STAT_CFG_OAM_STAT_REQ_CNT_T; + +typedef struct dpp_stat_stat_cfg_se_etm_stat_rsp_cnt_t { + ZXIC_UINT32 se_etm_stat_rsp_cnt; +} DPP_STAT_STAT_CFG_SE_ETM_STAT_RSP_CNT_T; + +typedef struct dpp_stat_stat_cfg_etm_stat_se_wr_req_cnt_t { + ZXIC_UINT32 etm_stat_se_wr_req_cnt; +} DPP_STAT_STAT_CFG_ETM_STAT_SE_WR_REQ_CNT_T; + +typedef struct dpp_stat_stat_cfg_etm_stat_se_rd_req_cnt_t { + ZXIC_UINT32 etm_stat_se_rd_req_cnt; +} DPP_STAT_STAT_CFG_ETM_STAT_SE_RD_REQ_CNT_T; + +typedef struct dpp_stat_stat_cfg_etm_stat_smmu0_req_cnt0_t { + ZXIC_UINT32 etm_stat_smmu0_req_cnt0; +} DPP_STAT_STAT_CFG_ETM_STAT_SMMU0_REQ_CNT0_T; + +typedef struct dpp_stat_stat_cfg_etm_stat_smmu0_req_cnt1_t { + ZXIC_UINT32 etm_stat_smmu0_req_cnt1; +} DPP_STAT_STAT_CFG_ETM_STAT_SMMU0_REQ_CNT1_T; + +typedef struct dpp_stat_stat_cfg_tm_stat_eram_cpu_rsp_cnt_t { + ZXIC_UINT32 tm_stat_eram_cpu_rsp_cnt; +} DPP_STAT_STAT_CFG_TM_STAT_ERAM_CPU_RSP_CNT_T; + +typedef struct dpp_stat_stat_cfg_cpu_rd_eram_req_cnt_t { + ZXIC_UINT32 cpu_rd_eram_req_cnt; +} DPP_STAT_STAT_CFG_CPU_RD_ERAM_REQ_CNT_T; + +typedef struct dpp_stat_stat_cfg_cpu_wr_eram_req_cnt_t { + ZXIC_UINT32 cpu_wr_eram_req_cnt; +} DPP_STAT_STAT_CFG_CPU_WR_ERAM_REQ_CNT_T; + +typedef struct dpp_stat_stat_cfg_tm_stat_ddr_cpu_rsp_cnt_t { + ZXIC_UINT32 tm_stat_ddr_cpu_rsp_cnt; +} DPP_STAT_STAT_CFG_TM_STAT_DDR_CPU_RSP_CNT_T; + +typedef struct dpp_stat_stat_cfg_cpu_rd_ddr_req_cnt_t { + ZXIC_UINT32 cpu_rd_ddr_req_cnt; +} DPP_STAT_STAT_CFG_CPU_RD_DDR_REQ_CNT_T; + +typedef struct dpp_stat_stat_cfg_cpu_wr_ddr_req_cnt_t { + ZXIC_UINT32 cpu_wr_ddr_req_cnt; +} DPP_STAT_STAT_CFG_CPU_WR_DDR_REQ_CNT_T; + +typedef struct dpp_stat_etcam_cpu_ind_wdat1_t { + ZXIC_UINT32 wdat1; +} DPP_STAT_ETCAM_CPU_IND_WDAT1_T; + +typedef struct dpp_stat_etcam_cpu_ind_wdat2_t { + ZXIC_UINT32 wdat2; +} DPP_STAT_ETCAM_CPU_IND_WDAT2_T; + +typedef struct dpp_stat_etcam_cpu_ind_wdat3_t { + ZXIC_UINT32 wdat3; +} DPP_STAT_ETCAM_CPU_IND_WDAT3_T; + +typedef struct dpp_stat_etcam_cpu_ind_wdat4_t { + ZXIC_UINT32 wdat4; +} DPP_STAT_ETCAM_CPU_IND_WDAT4_T; + +typedef struct dpp_stat_etcam_cpu_ind_wdat5_t { + ZXIC_UINT32 wdat5; +} DPP_STAT_ETCAM_CPU_IND_WDAT5_T; + +typedef struct dpp_stat_etcam_cpu_ind_wdat6_t { + ZXIC_UINT32 wdat6; +} DPP_STAT_ETCAM_CPU_IND_WDAT6_T; + +typedef struct dpp_stat_etcam_cpu_ind_wdat7_t { + ZXIC_UINT32 wdat7; +} DPP_STAT_ETCAM_CPU_IND_WDAT7_T; + +typedef struct dpp_stat_etcam_cpu_ind_wdat8_t { + ZXIC_UINT32 wdat8; +} DPP_STAT_ETCAM_CPU_IND_WDAT8_T; + +typedef struct dpp_stat_etcam_cpu_ind_wdat9_t { + ZXIC_UINT32 wdat9; +} DPP_STAT_ETCAM_CPU_IND_WDAT9_T; + +typedef struct dpp_stat_etcam_cpu_ind_wdat10_t { + ZXIC_UINT32 wdat10; +} DPP_STAT_ETCAM_CPU_IND_WDAT10_T; + +typedef struct dpp_stat_etcam_cpu_ind_wdat11_t { + ZXIC_UINT32 wdat11; +} DPP_STAT_ETCAM_CPU_IND_WDAT11_T; + +typedef struct dpp_stat_etcam_cpu_ind_wdat12_t { + ZXIC_UINT32 wdat12; +} DPP_STAT_ETCAM_CPU_IND_WDAT12_T; + +typedef struct dpp_stat_etcam_cpu_ind_wdat13_t { + ZXIC_UINT32 wdat13; +} DPP_STAT_ETCAM_CPU_IND_WDAT13_T; + +typedef struct dpp_stat_etcam_cpu_ind_wdat14_t { + ZXIC_UINT32 wdat14; +} DPP_STAT_ETCAM_CPU_IND_WDAT14_T; + +typedef struct dpp_stat_etcam_cpu_ind_wdat15_t { + ZXIC_UINT32 wdat15; +} DPP_STAT_ETCAM_CPU_IND_WDAT15_T; + +typedef struct dpp_stat_etcam_cpu_ind_wdat16_t { + ZXIC_UINT32 wdat16; +} DPP_STAT_ETCAM_CPU_IND_WDAT16_T; + +typedef struct dpp_stat_etcam_cpu_ind_wdat17_t { + ZXIC_UINT32 wdat17; +} DPP_STAT_ETCAM_CPU_IND_WDAT17_T; + +typedef struct dpp_stat_etcam_cpu_ind_wdat18_t { + ZXIC_UINT32 wdat18; +} DPP_STAT_ETCAM_CPU_IND_WDAT18_T; + +typedef struct dpp_stat_etcam_cpu_ind_wdat19_t { + ZXIC_UINT32 wdat19; +} DPP_STAT_ETCAM_CPU_IND_WDAT19_T; + +typedef struct dpp_stat_etcam_t_strwc_cfg_t { + ZXIC_UINT32 t_strwc_cfg; +} DPP_STAT_ETCAM_T_STRWC_CFG_T; + +typedef struct dpp_stat_etcam_etcam_int_unmask_flag_t { + ZXIC_UINT32 etcam_int_unmask_flag; +} DPP_STAT_ETCAM_ETCAM_INT_UNMASK_FLAG_T; + +typedef struct dpp_stat_etcam_etcam_int_en0_t { + ZXIC_UINT32 etcam_int_en17; + ZXIC_UINT32 etcam_int_en16; + ZXIC_UINT32 etcam_int_en15; + ZXIC_UINT32 etcam_int_en14; + ZXIC_UINT32 etcam_int_en13; + ZXIC_UINT32 etcam_int_en12; + ZXIC_UINT32 etcam_int_en11; + ZXIC_UINT32 etcam_int_en10; + ZXIC_UINT32 etcam_int_en9; + ZXIC_UINT32 etcam_int_en8; + ZXIC_UINT32 etcam_int_en7; + ZXIC_UINT32 etcam_int_en6; + ZXIC_UINT32 etcam_int_en5; + ZXIC_UINT32 etcam_int_en4; + ZXIC_UINT32 etcam_int_en3; + ZXIC_UINT32 etcam_int_en2; + ZXIC_UINT32 etcam_int_en1; + ZXIC_UINT32 etcam_int_en0; +} DPP_STAT_ETCAM_ETCAM_INT_EN0_T; + +typedef struct dpp_stat_etcam_etcam_int_mask0_t { + ZXIC_UINT32 etcam_int_mask17; + ZXIC_UINT32 etcam_int_mask16; + ZXIC_UINT32 etcam_int_mask15; + ZXIC_UINT32 etcam_int_mask14; + ZXIC_UINT32 etcam_int_mask13; + ZXIC_UINT32 etcam_int_mask12; + ZXIC_UINT32 etcam_int_mask11; + ZXIC_UINT32 etcam_int_mask10; + ZXIC_UINT32 etcam_int_mask9; + ZXIC_UINT32 etcam_int_mask8; + ZXIC_UINT32 etcam_int_mask7; + ZXIC_UINT32 etcam_int_mask6; + ZXIC_UINT32 etcam_int_mask5; + ZXIC_UINT32 etcam_int_mask4; + ZXIC_UINT32 etcam_int_mask3; + ZXIC_UINT32 etcam_int_mask2; + ZXIC_UINT32 etcam_int_mask1; + ZXIC_UINT32 etcam_int_mask0; +} DPP_STAT_ETCAM_ETCAM_INT_MASK0_T; + +typedef struct dpp_stat_etcam_etcam_int_status_t { + ZXIC_UINT32 etcam_int_status17; + ZXIC_UINT32 etcam_int_status16; + ZXIC_UINT32 etcam_int_status15; + ZXIC_UINT32 etcam_int_status14; + ZXIC_UINT32 etcam_int_status13; + ZXIC_UINT32 etcam_int_status12; + ZXIC_UINT32 etcam_int_status11; + ZXIC_UINT32 etcam_int_status10; + ZXIC_UINT32 etcam_int_status9; + ZXIC_UINT32 etcam_int_status8; + ZXIC_UINT32 etcam_int_status7; + ZXIC_UINT32 etcam_int_status6; + ZXIC_UINT32 etcam_int_status5; + ZXIC_UINT32 etcam_int_status4; + ZXIC_UINT32 etcam_int_status3; + ZXIC_UINT32 etcam_int_status2; + ZXIC_UINT32 etcam_int_status1; + ZXIC_UINT32 etcam_int_status0; +} DPP_STAT_ETCAM_ETCAM_INT_STATUS_T; + +typedef struct dpp_stat_etcam_int_tb_ini_ok_t { + ZXIC_UINT32 int_tb_ini_ok; +} DPP_STAT_ETCAM_INT_TB_INI_OK_T; + +typedef struct dpp_stat_etcam_etcam_clk_en_t { + ZXIC_UINT32 etcam_clk_en; +} DPP_STAT_ETCAM_ETCAM_CLK_EN_T; + +typedef struct dpp_stat_etcam_as_etcam_req0_cnt_t { + ZXIC_UINT32 as_etcam_req0_cnt; +} DPP_STAT_ETCAM_AS_ETCAM_REQ0_CNT_T; + +typedef struct dpp_stat_etcam_as_etcam_req1_cnt_t { + ZXIC_UINT32 as_etcam_req1_cnt; +} DPP_STAT_ETCAM_AS_ETCAM_REQ1_CNT_T; + +typedef struct dpp_stat_etcam_etcam_as_index0_cnt_t { + ZXIC_UINT32 etcam_as_index0_cnt; +} DPP_STAT_ETCAM_ETCAM_AS_INDEX0_CNT_T; + +typedef struct dpp_stat_etcam_etcam_as_index1_cnt_t { + ZXIC_UINT32 etcam_as_index1_cnt; +} DPP_STAT_ETCAM_ETCAM_AS_INDEX1_CNT_T; + +typedef struct dpp_stat_etcam_etcam_not_hit0_cnt_t { + ZXIC_UINT32 etcam_not_hit0_cnt; +} DPP_STAT_ETCAM_ETCAM_NOT_HIT0_CNT_T; + +typedef struct dpp_stat_etcam_etcam_not_hit1_cnt_t { + ZXIC_UINT32 etcam_not_hit1_cnt; +} DPP_STAT_ETCAM_ETCAM_NOT_HIT1_CNT_T; + +typedef struct dpp_stat_etcam_table_id_not_match_cnt_t { + ZXIC_UINT32 table_id_not_match_cnt; +} DPP_STAT_ETCAM_TABLE_ID_NOT_MATCH_CNT_T; + +typedef struct dpp_stat_etcam_table_id_clash01_cnt_t { + ZXIC_UINT32 table_id_clash01_cnt; +} DPP_STAT_ETCAM_TABLE_ID_CLASH01_CNT_T; + +typedef struct dpp_stat_etcam_etcam_cpu_fl_t { + ZXIC_UINT32 etcam_cpu_fl; +} DPP_STAT_ETCAM_ETCAM_CPU_FL_T; + +typedef struct dpp_stat_etcam_etcam_arb_empty_t { + ZXIC_UINT32 etcam_arb_empty; +} DPP_STAT_ETCAM_ETCAM_ARB_EMPTY_T; + +#ifdef __cplusplus +} +#endif +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_trpg_reg.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_trpg_reg.h new file mode 100644 index 000000000000..4730b8e07d97 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_trpg_reg.h @@ -0,0 +1,276 @@ + +#ifndef _DPP_TRPG_REG_H_ +#define _DPP_TRPG_REG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct dpp_trpg_trpg_rx_port_cpu_trpg_ms_en_t { + ZXIC_UINT32 cpu_trpgrx_ms_en; +} DPP_TRPG_TRPG_RX_PORT_CPU_TRPG_MS_EN_T; + +typedef struct dpp_trpg_trpg_rx_port_cpu_trpg_port_en_t { + ZXIC_UINT32 cpu_trpgrx_port_en; +} DPP_TRPG_TRPG_RX_PORT_CPU_TRPG_PORT_EN_T; + +typedef struct dpp_trpg_trpg_rx_port_cpu_trpg_look_en_t { + ZXIC_UINT32 cpu_trpgrx_look_en; +} DPP_TRPG_TRPG_RX_PORT_CPU_TRPG_LOOK_EN_T; + +typedef struct dpp_trpg_trpg_rx_port_cpu_trpgrx_ram_almost_full_t { + ZXIC_UINT32 cpu_trpgrx_ram_almost_full; +} DPP_TRPG_TRPG_RX_PORT_CPU_TRPGRX_RAM_ALMOST_FULL_T; + +typedef struct dpp_trpg_trpg_rx_port_cpu_trpgrx_ram_test_en_t { + ZXIC_UINT32 cpu_trpgrx_ram_test_en; +} DPP_TRPG_TRPG_RX_PORT_CPU_TRPGRX_RAM_TEST_EN_T; + +typedef struct dpp_trpg_trpg_rx_port_cpu_trpgrx_inmod_pfc_rdy_en_t { + ZXIC_UINT32 cpu_trpgrx_inmod_pfc_rdy_en; +} DPP_TRPG_TRPG_RX_PORT_CPU_TRPGRX_INMOD_PFC_RDY_EN_T; + +typedef struct dpp_trpg_trpg_rx_port_cpu_trpgrx_pkt_num_h_t { + ZXIC_UINT32 cpu_trpgrx_pkt_num_h; +} DPP_TRPG_TRPG_RX_PORT_CPU_TRPGRX_PKT_NUM_H_T; + +typedef struct dpp_trpg_trpg_rx_port_cpu_trpgrx_pkt_num_l_t { + ZXIC_UINT32 cpu_trpgrx_pkt_num_l; +} DPP_TRPG_TRPG_RX_PORT_CPU_TRPGRX_PKT_NUM_L_T; + +typedef struct dpp_trpg_trpg_rx_port_cpu_trpgrx_pkt_byte_num_h_t { + ZXIC_UINT32 cpu_trpgrx_pkt_byte_num_h; +} DPP_TRPG_TRPG_RX_PORT_CPU_TRPGRX_PKT_BYTE_NUM_H_T; + +typedef struct dpp_trpg_trpg_rx_port_cpu_trpgrx_pkt_byte_num_l_t { + ZXIC_UINT32 cpu_trpgrx_pkt_byte_num_l; +} DPP_TRPG_TRPG_RX_PORT_CPU_TRPGRX_PKT_BYTE_NUM_L_T; + +typedef struct dpp_trpg_trpg_rx_port_cpu_trpgrx_pkt_cnt_clr_t { + ZXIC_UINT32 cpu_trpgrx_pkt_cnt_clr; +} DPP_TRPG_TRPG_RX_PORT_CPU_TRPGRX_PKT_CNT_CLR_T; + +typedef struct dpp_trpg_trpg_rx_port_cpu_trpgrx_fc_clk_freq_t { + ZXIC_UINT32 cpu_trpgrx_fc_clk_freq; +} DPP_TRPG_TRPG_RX_PORT_CPU_TRPGRX_FC_CLK_FREQ_T; + +typedef struct dpp_trpg_trpg_rx_port_cpu_trpgrx_fc_en_t { + ZXIC_UINT32 cpu_trpgrx_fc_en; +} DPP_TRPG_TRPG_RX_PORT_CPU_TRPGRX_FC_EN_T; + +typedef struct dpp_trpg_trpg_rx_port_cpu_trpgrx_fc_token_add_num_t { + ZXIC_UINT32 cpu_trpgrx_fc_token_add_num; +} DPP_TRPG_TRPG_RX_PORT_CPU_TRPGRX_FC_TOKEN_ADD_NUM_T; + +typedef struct dpp_trpg_trpg_rx_port_cpu_trpgrx_fc_token_max_num_t { + ZXIC_UINT32 cpu_trpgrx_fc_token_max_num; +} DPP_TRPG_TRPG_RX_PORT_CPU_TRPGRX_FC_TOKEN_MAX_NUM_T; + +typedef struct dpp_trpg_trpg_rx_port_cpu_trpgrx_port_state_info_t { + ZXIC_UINT32 cpu_trpgrx_port_state_info; +} DPP_TRPG_TRPG_RX_PORT_CPU_TRPGRX_PORT_STATE_INFO_T; + +typedef struct dpp_trpg_trpg_rx_port_cpu_trpgrx_ram_past_max_dep_t { + ZXIC_UINT32 cpu_trpgrx_ram_past_max_dep; +} DPP_TRPG_TRPG_RX_PORT_CPU_TRPGRX_RAM_PAST_MAX_DEP_T; + +typedef struct dpp_trpg_trpg_rx_port_cpu_trpgrx_ram_past_max_dep_clr_t { + ZXIC_UINT32 cpu_trpgrx_ram_past_max_dep_clr; +} DPP_TRPG_TRPG_RX_PORT_CPU_TRPGRX_RAM_PAST_MAX_DEP_CLR_T; + +typedef struct dpp_trpg_trpg_rx_port_cpu_trpgrx_pkt_past_max_len_t { + ZXIC_UINT32 cpu_trpgrx_pkt_past_max_len; +} DPP_TRPG_TRPG_RX_PORT_CPU_TRPGRX_PKT_PAST_MAX_LEN_T; + +typedef struct dpp_trpg_trpg_rx_port_cpu_trpgrx_pkt_past_max_len_clr_t { + ZXIC_UINT32 cpu_trpgrx_pkt_past_max_len_clr; +} DPP_TRPG_TRPG_RX_PORT_CPU_TRPGRX_PKT_PAST_MAX_LEN_CLR_T; + +typedef struct dpp_trpg_trpg_rx_port_cpu_trpgrx_pkt_past_min_len_t { + ZXIC_UINT32 cpu_trpgrx_pkt_past_min_len; +} DPP_TRPG_TRPG_RX_PORT_CPU_TRPGRX_PKT_PAST_MIN_LEN_T; + +typedef struct dpp_trpg_trpg_rx_port_cpu_trpgrx_pkt_past_min_len_clr_t { + ZXIC_UINT32 cpu_trpgrx_pkt_past_min_len_clr; +} DPP_TRPG_TRPG_RX_PORT_CPU_TRPGRX_PKT_PAST_MIN_LEN_CLR_T; + +typedef struct dpp_trpg_trpg_rx_ram_trpg_rx_data_ram_t { + ZXIC_UINT32 trpg_rx_data_ram; +} DPP_TRPG_TRPG_RX_RAM_TRPG_RX_DATA_RAM_T; + +typedef struct dpp_trpg_trpg_rx_ram_trpg_rx_info_ram_t { + ZXIC_UINT32 trpg_rx_info_ram; +} DPP_TRPG_TRPG_RX_RAM_TRPG_RX_INFO_RAM_T; + +typedef struct dpp_trpg_trpg_tx_port_cpu_trpg_ms_en_t { + ZXIC_UINT32 cpu_trpgtx_ms_en; +} DPP_TRPG_TRPG_TX_PORT_CPU_TRPG_MS_EN_T; + +typedef struct dpp_trpg_trpg_tx_port_cpu_trpg_port_en_t { + ZXIC_UINT32 cpu_trpgtx_port_en; +} DPP_TRPG_TRPG_TX_PORT_CPU_TRPG_PORT_EN_T; + +typedef struct dpp_trpg_trpg_tx_port_cpu_trpg_look_en_t { + ZXIC_UINT32 cpu_trpgtx_look_en; +} DPP_TRPG_TRPG_TX_PORT_CPU_TRPG_LOOK_EN_T; + +typedef struct dpp_trpg_trpg_tx_port_cpu_trpgtx_ram_almost_full_t { + ZXIC_UINT32 cpu_trpgtx_ram_almost_full; +} DPP_TRPG_TRPG_TX_PORT_CPU_TRPGTX_RAM_ALMOST_FULL_T; + +typedef struct dpp_trpg_trpg_tx_port_cpu_trpgtx_ram_test_en_t { + ZXIC_UINT32 cpu_trpgtx_ram_test_en; +} DPP_TRPG_TRPG_TX_PORT_CPU_TRPGTX_RAM_TEST_EN_T; + +typedef struct dpp_trpg_trpg_tx_port_cpu_trpgtx_pkt_num_h_t { + ZXIC_UINT32 cpu_trpgtx_pkt_num_h; +} DPP_TRPG_TRPG_TX_PORT_CPU_TRPGTX_PKT_NUM_H_T; + +typedef struct dpp_trpg_trpg_tx_port_cpu_trpgtx_pkt_num_l_t { + ZXIC_UINT32 cpu_trpgtx_pkt_num_l; +} DPP_TRPG_TRPG_TX_PORT_CPU_TRPGTX_PKT_NUM_L_T; + +typedef struct dpp_trpg_trpg_tx_port_cpu_trpgtx_pkt_byte_num_h_t { + ZXIC_UINT32 cpu_trpgtx_pkt_byte_num_h; +} DPP_TRPG_TRPG_TX_PORT_CPU_TRPGTX_PKT_BYTE_NUM_H_T; + +typedef struct dpp_trpg_trpg_tx_port_cpu_trpgtx_pkt_byte_num_l_t { + ZXIC_UINT32 cpu_trpgtx_pkt_byte_num_l; +} DPP_TRPG_TRPG_TX_PORT_CPU_TRPGTX_PKT_BYTE_NUM_L_T; + +typedef struct dpp_trpg_trpg_tx_port_cpu_trpgtx_pkt_cnt_clr_t { + ZXIC_UINT32 cpu_trpgtx_pkt_cnt_clr; +} DPP_TRPG_TRPG_TX_PORT_CPU_TRPGTX_PKT_CNT_CLR_T; + +typedef struct dpp_trpg_trpg_tx_port_cpu_trpgtx_fc_clk_freq_t { + ZXIC_UINT32 cpu_trpgtx_fc_clk_freq; +} DPP_TRPG_TRPG_TX_PORT_CPU_TRPGTX_FC_CLK_FREQ_T; + +typedef struct dpp_trpg_trpg_tx_port_cpu_trpgtx_fc_en_t { + ZXIC_UINT32 cpu_trpgtx_fc_en; +} DPP_TRPG_TRPG_TX_PORT_CPU_TRPGTX_FC_EN_T; + +typedef struct dpp_trpg_trpg_tx_port_cpu_trpgtx_fc_token_add_num_t { + ZXIC_UINT32 cpu_trpgtx_fc_token_add_num; +} DPP_TRPG_TRPG_TX_PORT_CPU_TRPGTX_FC_TOKEN_ADD_NUM_T; + +typedef struct dpp_trpg_trpg_tx_port_cpu_trpgtx_fc_token_max_num_t { + ZXIC_UINT32 cpu_trpgtx_fc_token_max_num; +} DPP_TRPG_TRPG_TX_PORT_CPU_TRPGTX_FC_TOKEN_MAX_NUM_T; + +typedef struct dpp_trpg_trpg_tx_port_cpu_trpgtx_port_state_info_t { + ZXIC_UINT32 cpu_trpgtx_port_state_info; +} DPP_TRPG_TRPG_TX_PORT_CPU_TRPGTX_PORT_STATE_INFO_T; + +typedef struct dpp_trpg_trpg_tx_port_cpu_trpgtx_ram_past_max_dep_t { + ZXIC_UINT32 cpu_trpgtx_ram_past_max_dep; +} DPP_TRPG_TRPG_TX_PORT_CPU_TRPGTX_RAM_PAST_MAX_DEP_T; + +typedef struct dpp_trpg_trpg_tx_port_cpu_trpgtx_ram_past_max_dep_clr_t { + ZXIC_UINT32 cpu_trpgtx_ram_past_max_dep_clr; +} DPP_TRPG_TRPG_TX_PORT_CPU_TRPGTX_RAM_PAST_MAX_DEP_CLR_T; + +typedef struct dpp_trpg_trpg_tx_port_cpu_trpgtx_pkt_past_max_len_t { + ZXIC_UINT32 cpu_trpgtx_pkt_past_max_len; +} DPP_TRPG_TRPG_TX_PORT_CPU_TRPGTX_PKT_PAST_MAX_LEN_T; + +typedef struct dpp_trpg_trpg_tx_port_cpu_trpgtx_pkt_past_max_len_clr_t { + ZXIC_UINT32 cpu_trpgtx_pkt_past_max_len_clr; +} DPP_TRPG_TRPG_TX_PORT_CPU_TRPGTX_PKT_PAST_MAX_LEN_CLR_T; + +typedef struct dpp_trpg_trpg_tx_port_cpu_trpgtx_pkt_past_min_len_t { + ZXIC_UINT32 cpu_trpgtx_pkt_past_min_len; +} DPP_TRPG_TRPG_TX_PORT_CPU_TRPGTX_PKT_PAST_MIN_LEN_T; + +typedef struct dpp_trpg_trpg_tx_port_cpu_trpgtx_pkt_past_min_len_clr_t { + ZXIC_UINT32 cpu_trpgtx_pkt_past_min_len_clr; +} DPP_TRPG_TRPG_TX_PORT_CPU_TRPGTX_PKT_PAST_MIN_LEN_CLR_T; + +typedef struct dpp_trpg_trpg_tx_etm_port_cpu_trpgtx_etm_ram_almost_full_t { + ZXIC_UINT32 cpu_trpgtx_etm_ram_almost_full; +} DPP_TRPG_TRPG_TX_ETM_PORT_CPU_TRPGTX_ETM_RAM_ALMOST_FULL_T; + +typedef struct dpp_trpg_trpg_tx_etm_port_cpu_trpgtx_etm_ram_test_en_t { + ZXIC_UINT32 cpu_trpgtx_etm_ram_test_en; +} DPP_TRPG_TRPG_TX_ETM_PORT_CPU_TRPGTX_ETM_RAM_TEST_EN_T; + +typedef struct dpp_trpg_trpg_tx_glb_cpu_todtime_update_int_mask_t { + ZXIC_UINT32 cpu_todtime_update_int_mask; +} DPP_TRPG_TRPG_TX_GLB_CPU_TODTIME_UPDATE_INT_MASK_T; + +typedef struct dpp_trpg_trpg_tx_glb_cpu_todtime_update_int_clr_t { + ZXIC_UINT32 cpu_todtime_update_int_clr; +} DPP_TRPG_TRPG_TX_GLB_CPU_TODTIME_UPDATE_INT_CLR_T; + +typedef struct dpp_trpg_trpg_tx_glb_cpu_todtime_ram_test_en_t { + ZXIC_UINT32 cpu_todtime_ram_test_en; +} DPP_TRPG_TRPG_TX_GLB_CPU_TODTIME_RAM_TEST_EN_T; + +typedef struct dpp_trpg_trpg_tx_ram_trpg_tx_data_ram_t { + ZXIC_UINT32 trpg_tx_data_ram; +} DPP_TRPG_TRPG_TX_RAM_TRPG_TX_DATA_RAM_T; + +typedef struct dpp_trpg_trpg_tx_ram_trpg_tx_info_ram_t { + ZXIC_UINT32 trpg_tx_info_ram; +} DPP_TRPG_TRPG_TX_RAM_TRPG_TX_INFO_RAM_T; + +typedef struct dpp_trpg_trpg_tx_etm_ram_trpg_tx_etm_data_ram_t { + ZXIC_UINT32 trpg_tx_etm_data_ram; +} DPP_TRPG_TRPG_TX_ETM_RAM_TRPG_TX_ETM_DATA_RAM_T; + +typedef struct dpp_trpg_trpg_tx_etm_ram_trpg_tx_etm_info_ram_t { + ZXIC_UINT32 trpg_tx_etm_info_ram; +} DPP_TRPG_TRPG_TX_ETM_RAM_TRPG_TX_ETM_INFO_RAM_T; + +typedef struct dpp_trpg_trpg_rx_port_cpu_trpg_ms_st_t { + ZXIC_UINT32 cpu_trpgrx_ms_st; +} DPP_TRPG_TRPG_RX_PORT_CPU_TRPG_MS_ST_T; + +typedef struct dpp_trpg_trpg_rx_port_cpu_trpg_ms_ind_t { + ZXIC_UINT32 cpu_trpgrx_ms_ind; +} DPP_TRPG_TRPG_RX_PORT_CPU_TRPG_MS_IND_T; + +typedef struct dpp_trpg_trpg_rx_port_cpu_trpg_ms_slave_ind_t { + ZXIC_UINT32 cpu_trpgrx_ms_slave_ind; +} DPP_TRPG_TRPG_RX_PORT_CPU_TRPG_MS_SLAVE_IND_T; + +typedef struct dpp_trpg_trpg_rx_port_cpu_trpgrx_up_water_level_t { + ZXIC_UINT32 cpu_trpgrx_up_water_level; +} DPP_TRPG_TRPG_RX_PORT_CPU_TRPGRX_UP_WATER_LEVEL_T; + +typedef struct dpp_trpg_trpg_rx_port_cpu_trpgrx_low_water_level_t { + ZXIC_UINT32 cpu_trpgrx_low_water_level; +} DPP_TRPG_TRPG_RX_PORT_CPU_TRPGRX_LOW_WATER_LEVEL_T; + +typedef struct dpp_trpg_trpg_tx_port_cpu_trpg_ms_st_t { + ZXIC_UINT32 cpu_trpgtx_ms_st; +} DPP_TRPG_TRPG_TX_PORT_CPU_TRPG_MS_ST_T; + +typedef struct dpp_trpg_trpg_tx_port_cpu_trpg_ms_ind_t { + ZXIC_UINT32 cpu_trpgtx_ms_ind; +} DPP_TRPG_TRPG_TX_PORT_CPU_TRPG_MS_IND_T; + +typedef struct dpp_trpg_trpg_tx_port_cpu_trpg_ms_slave_ind_t { + ZXIC_UINT32 cpu_trpgtx_ms_slave_ind; +} DPP_TRPG_TRPG_TX_PORT_CPU_TRPG_MS_SLAVE_IND_T; + +typedef struct dpp_trpg_trpg_tx_glb_cpu_todtime_update_int_event_t { + ZXIC_UINT32 cpu_todtime_update_int_event; +} DPP_TRPG_TRPG_TX_GLB_CPU_TODTIME_UPDATE_INT_EVENT_T; + +typedef struct dpp_trpg_trpg_tx_glb_cpu_todtime_update_int_test_t { + ZXIC_UINT32 cpu_todtime_update_int_test; +} DPP_TRPG_TRPG_TX_GLB_CPU_TODTIME_UPDATE_INT_TEST_T; + +typedef struct dpp_trpg_trpg_tx_glb_cpu_todtime_update_int_addr_t { + ZXIC_UINT32 cpu_todtime_update_int_addr; +} DPP_TRPG_TRPG_TX_GLB_CPU_TODTIME_UPDATE_INT_ADDR_T; + +typedef struct dpp_trpg_trpg_tx_todtime_ram_trpg_tx_todtime_ram_t { + ZXIC_UINT32 trpg_tx_todtime_ram; +} DPP_TRPG_TRPG_TX_TODTIME_RAM_TRPG_TX_TODTIME_RAM_T; + +#ifdef __cplusplus +} +#endif +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_tsn_reg.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_tsn_reg.h new file mode 100644 index 000000000000..e90b4c774b8d --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/dev/reg/dpp_tsn_reg.h @@ -0,0 +1,101 @@ + +#ifndef _DPP_TSN_REG_H_ +#define _DPP_TSN_REG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct dpp_tsn_tsn_port_cfg_tsn_test_reg_t { + ZXIC_UINT32 cfg_tsn_test_reg; +} DPP_TSN_TSN_PORT_CFG_TSN_TEST_REG_T; + +typedef struct dpp_tsn_tsn_port_cfg_tsn_port_qbv_enable_t { + ZXIC_UINT32 cfg_tsn_port_qbv_enable; +} DPP_TSN_TSN_PORT_CFG_TSN_PORT_QBV_ENABLE_T; + +typedef struct dpp_tsn_tsn_port_cfg_tsn_phy_port_sel_t { + ZXIC_UINT32 cfg_tsn_phy_port_sel; +} DPP_TSN_TSN_PORT_CFG_TSN_PHY_PORT_SEL_T; + +typedef struct dpp_tsn_tsn_port_cfg_tsn_port_time_sel_t { + ZXIC_UINT32 cfg_tsn_port_time_sel; +} DPP_TSN_TSN_PORT_CFG_TSN_PORT_TIME_SEL_T; + +typedef struct dpp_tsn_tsn_port_cfg_tsn_clk_freq_t { + ZXIC_UINT32 en; + ZXIC_UINT32 cfg_tsn_clk_freq; +} DPP_TSN_TSN_PORT_CFG_TSN_CLK_FREQ_T; + +typedef struct dpp_tsn_tsn_port_cfg_tsn_read_ram_n_t { + ZXIC_UINT32 cfg_tsn_data; + ZXIC_UINT32 cfg_tsn_read_status; + ZXIC_UINT32 cfg_tsn_read_ram_n; +} DPP_TSN_TSN_PORT_CFG_TSN_READ_RAM_N_T; + +typedef struct dpp_tsn_tsn_port_cfg_tsn_exe_time_t { + ZXIC_UINT32 cfg_tsn_exe_time; +} DPP_TSN_TSN_PORT_CFG_TSN_EXE_TIME_T; + +typedef struct dpp_tsn_tsn_port_cfg_tsn_port_itr_shift_t { + ZXIC_UINT32 cfg_tsn_port_itr_shift; +} DPP_TSN_TSN_PORT_CFG_TSN_PORT_ITR_SHIFT_T; + +typedef struct dpp_tsn_tsn_port_cfg_tsn_port_base_time_h_t { + ZXIC_UINT32 cfg_tsn_port_base_time_h; +} DPP_TSN_TSN_PORT_CFG_TSN_PORT_BASE_TIME_H_T; + +typedef struct dpp_tsn_tsn_port_cfg_tsn_port_base_time_l_t { + ZXIC_UINT32 cfg_tsn_port_base_time_l; +} DPP_TSN_TSN_PORT_CFG_TSN_PORT_BASE_TIME_L_T; + +typedef struct dpp_tsn_tsn_port_cfg_tsn_port_cycle_time_h_t { + ZXIC_UINT32 cfg_tsn_port_cycle_time_h; +} DPP_TSN_TSN_PORT_CFG_TSN_PORT_CYCLE_TIME_H_T; + +typedef struct dpp_tsn_tsn_port_cfg_tsn_port_cycle_time_l_t { + ZXIC_UINT32 cfg_tsn_port_cycle_time_l; +} DPP_TSN_TSN_PORT_CFG_TSN_PORT_CYCLE_TIME_L_T; + +typedef struct dpp_tsn_tsn_port_cfg_tsn_port_guard_band_time_t { + ZXIC_UINT32 cfg_tsn_port_guard_band_time; +} DPP_TSN_TSN_PORT_CFG_TSN_PORT_GUARD_BAND_TIME_T; + +typedef struct dpp_tsn_tsn_port_cfg_tsn_port_default_gate_en_t { + ZXIC_UINT32 cfg_tsn_port_default_gate_en; +} DPP_TSN_TSN_PORT_CFG_TSN_PORT_DEFAULT_GATE_EN_T; + +typedef struct dpp_tsn_tsn_port_cfg_tsn_port_change_gate_en_t { + ZXIC_UINT32 cfg_tsn_port_change_gate_en; +} DPP_TSN_TSN_PORT_CFG_TSN_PORT_CHANGE_GATE_EN_T; + +typedef struct dpp_tsn_tsn_port_cfg_tsn_port_init_finish_t { + ZXIC_UINT32 cfg_tsn_port_init_finish; +} DPP_TSN_TSN_PORT_CFG_TSN_PORT_INIT_FINISH_T; + +typedef struct dpp_tsn_tsn_port_cfg_tsn_port_change_en_t { + ZXIC_UINT32 cfg_tsn_port_change_en; +} DPP_TSN_TSN_PORT_CFG_TSN_PORT_CHANGE_EN_T; + +typedef struct dpp_tsn_tsn_port_cfg_tsn_port_gcl_num0_t { + ZXIC_UINT32 cfg_tsn_port_gcl_num0; +} DPP_TSN_TSN_PORT_CFG_TSN_PORT_GCL_NUM0_T; + +typedef struct dpp_tsn_tsn_port_cfg_tsn_port_gcl_num1_t { + ZXIC_UINT32 cfg_tsn_port_gcl_num1; +} DPP_TSN_TSN_PORT_CFG_TSN_PORT_GCL_NUM1_T; + +typedef struct dpp_tsn_tsn_port_cfg_tsn_port_gcl_value0_t { + ZXIC_UINT32 cfg_tsn_port_gcl_gate_control0; + ZXIC_UINT32 cfg_tsn_port_gcl_interval_time0; +} DPP_TSN_TSN_PORT_CFG_TSN_PORT_GCL_VALUE0_T; + +typedef struct dpp_tsn_tsn_port_cfg_tsn_port_gcl_value1_t { + ZXIC_UINT32 cfg_tsn_port_gcl_gate_control1; + ZXIC_UINT32 cfg_tsn_port_gcl_interval_time1; +} DPP_TSN_TSN_PORT_CFG_TSN_PORT_GCL_VALUE1_T; + +#ifdef __cplusplus +} +#endif +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/include/diag/dpp_se_diag.h b/drivers/net/ethernet/dinghai/en_np/sdk/include/diag/dpp_se_diag.h new file mode 100644 index 000000000000..bb5901737047 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/include/diag/dpp_se_diag.h @@ -0,0 +1,36 @@ +/************************************************************** +* Ȩ (C)2013-2015, ͨѶɷ޹˾ +* ļ : dpp_se_diag.h +* ļʶ : +* ժҪ : +* ˵ : +* ǰ汾 : +* : XXX +* : 2016/01/14 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* ޸ļ¼1: +* ޸: +* : +* : +* ޸: +***************************************************************/ +#ifndef _DPP_SE_DIAG_H_ +#define _DPP_SE_DIAG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum diag_dpp_etcam_no_as_rsp_mode_e { + DPP_ETCAM_NO_AS_RSP_MODE_32 = 0, + DPP_ETCAM_NO_AS_RSP_MODE_64 = 1, + DPP_ETCAM_NO_AS_RSP_MODE_128 = 2, +} DPP_DIAG_ETCAM_NO_AS_RSP_MODE_E; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/sdk/source/Kbuild.include new file mode 100644 index 000000000000..199d45c5e5c4 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/Kbuild.include @@ -0,0 +1,4 @@ +cur_dir := en_np/sdk/source/ +subdirs := dev/ +src_files += +include $(foreach subdir, $(subdirs), $(dinghai_root)/$(cur_dir)$(subdir)/Kbuild.include) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/Kbuild.include new file mode 100644 index 000000000000..f51ee608793e --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/Kbuild.include @@ -0,0 +1,4 @@ +cur_dir := en_np/sdk/source/dev/ +subdirs := reg/ module/ chip/ init/ +src_files += +include $(foreach subdir, $(subdirs), $(dinghai_root)/$(cur_dir)$(subdir)/Kbuild.include) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/chip/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/chip/Kbuild.include new file mode 100644 index 000000000000..c577b572ed59 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/chip/Kbuild.include @@ -0,0 +1,2 @@ +cur_dir := en_np/sdk/source/dev/chip/ +src_files += $(addprefix $(cur_dir),$(notdir $(wildcard $(dinghai_root)/$(cur_dir)*.c))) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/chip/dpp_dev.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/chip/dpp_dev.c new file mode 100644 index 000000000000..06174eddddd2 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/chip/dpp_dev.c @@ -0,0 +1,1463 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_dev.c +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : 王春雷 +* 完成日期 : 2014/02/10 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: 代码规范性修改 +* 修改日期: 2014/02/10 +* 版 本 号: +* 修 改 人: 丁金凤 +* 修改内容: +***************************************************************/ + +#include "zxic_common.h" +#include "dpp_module.h" +#include "dpp_pci.h" +#include "dpp_dev.h" +#include "dpp_type_api.h" +#include "dh_cmd.h" +#include "dpp_dtb_table.h" + +static DPP_DEV_MGR_T g_dev_mgr = { 0 }; + +#define DPP_DEV_INFO_GET(id) (g_dev_mgr.p_dev_array[id]) + +/***********************************************************/ +/** 初始化设备管理模块 +* @param ZXIC_VOID +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 王春雷 @date 2015/03/06 +************************************************************/ +DPP_STATUS dpp_dev_init(ZXIC_VOID) +{ + if (g_dev_mgr.is_init) { + ZXIC_COMM_TRACE_ERROR("Dev is already initialized.\n"); + return DPP_OK; + } + + g_dev_mgr.device_num = 0; + g_dev_mgr.is_init = 1; + + return DPP_OK; +} + +/***********************************************************/ +/** +* @return +* @remark 无 +* @see +* @author XXX @date 2014/02/14 +************************************************************/ +DPP_DEV_MGR_T *dpp_dev_mgr_get(ZXIC_VOID) +{ + if (!g_dev_mgr.is_init) { + ZXIC_COMM_TRACE_ERROR("Error: dev_mgr is not init.\n"); + ZXIC_COMM_ASSERT(0); + return NULL; + } + + return &g_dev_mgr; +} + +/***********************************************************/ +/** 添加新设备实例,并初始化 +* @param dev_id 新增设备的设备号 +* @param dev_type 设备类型,取值参照DPP_DEV_TYPE_E的定义 +* @param access_type 设备访问类型,取值参照DPP_DEV_ACCESS_TYPE_E的定义 +* @param pcie_addr PCIe映射地址 +* @param riscv_addr RISCV映射地址 +* @param dma_vir_addr DMA映射地址 +* @param dma_phy_addr DMA内存物理地址 +* @param p_pcie_write_fun PCIe硬件写回调函数 +* @param p_pcie_read_fun PCIe硬件读回调函数 +* @param p_riscv_write_fun RISCV硬件写回调函数 +* @param p_riscv_read_fun RISCV硬件读回调函数 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 王春雷 @date 2015/03/06 +************************************************************/ +DPP_STATUS dpp_dev_add(ZXIC_UINT32 dev_id, DPP_DEV_TYPE_E dev_type, + DPP_DEV_ACCESS_TYPE_E access_type, ZXIC_ADDR_T pcie_addr, + ZXIC_ADDR_T riscv_addr, ZXIC_ADDR_T dma_vir_addr, + ZXIC_ADDR_T dma_phy_addr, + DPP_DEV_WRITE_FUNC p_pcie_write_fun, + DPP_DEV_READ_FUNC p_pcie_read_fun, + DPP_DEV_WRITE_FUNC p_riscv_write_fun, + DPP_DEV_READ_FUNC p_riscv_read_fun) +{ + DPP_STATUS rtn = DPP_OK; + DPP_DEV_CFG_T *p_dev_info = NULL; + DPP_DEV_MGR_T *p_dev_mgr = NULL; + ZXIC_UINT32 i = 0; + + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + + p_dev_mgr = dpp_dev_mgr_get(); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev_mgr); + if (!p_dev_mgr->is_init) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "ErrorCode[ 0x%x]: Device Manager is not init!!!\n", + DPP_RC_DEV_MGR_NOT_INIT); + return DPP_RC_DEV_MGR_NOT_INIT; + } + + if (p_dev_mgr->p_dev_array[dev_id] != NULL) { + /* device is already exist. */ + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "Device is added again!!!\n"); + p_dev_info = p_dev_mgr->p_dev_array[dev_id]; + } else { + /* device is new. */ + p_dev_info = (DPP_DEV_CFG_T *)ZXIC_COMM_MALLOC( + sizeof(DPP_DEV_CFG_T)); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev_info); + ZXIC_COMM_MEMSET_S(p_dev_info, sizeof(DPP_DEV_CFG_T), 0x0, + sizeof(DPP_DEV_CFG_T)); + p_dev_mgr->p_dev_array[dev_id] = p_dev_info; + p_dev_mgr->device_num++; + } + + p_dev_info->device_id = dev_id; + p_dev_info->dev_type = dev_type; + p_dev_info->access_type = access_type; + p_dev_info->pcie_addr = pcie_addr; + p_dev_info->riscv_addr = riscv_addr; + p_dev_info->dma_vir_addr = dma_vir_addr; + p_dev_info->dma_phy_addr = dma_phy_addr; + p_dev_info->p_riscv_write_fun = NULL; + p_dev_info->p_riscv_read_fun = NULL; + p_dev_info->p_pcie_write_fun = dpp_dev_pcie_default_write; + p_dev_info->p_pcie_read_fun = dpp_dev_pcie_default_read; + ZXIC_COMM_MEMSET_S(p_dev_info->bar_msg_num, + sizeof(p_dev_info->bar_msg_num), 0xff, + sizeof(p_dev_info->bar_msg_num)); + + if (p_riscv_write_fun) { + p_dev_info->p_riscv_write_fun = p_riscv_write_fun; + } + + if (p_riscv_read_fun) { + p_dev_info->p_riscv_read_fun = p_riscv_read_fun; + } + + if (p_pcie_write_fun) { + p_dev_info->p_pcie_write_fun = p_pcie_write_fun; + } + + if (p_pcie_read_fun) { + p_dev_info->p_pcie_read_fun = p_pcie_read_fun; + } + + ZXIC_COMM_MEMSET(p_dev_info->pcie_channel, 0x00, + sizeof(p_dev_info->pcie_channel)); + + rtn = zxic_comm_mutex_create(&p_dev_info->reg_opr_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_create"); + + rtn = zxic_comm_mutex_create(&p_dev_info->oam_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_create"); + + rtn = zxic_comm_mutex_create(&p_dev_info->etm_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_create"); + + rtn = zxic_comm_mutex_create(&p_dev_info->ddr_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_create"); + + rtn = zxic_comm_mutex_create(&p_dev_info->ind_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_create"); + + rtn = zxic_comm_mutex_create(&p_dev_info->etcam_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_create"); + + rtn = zxic_comm_mutex_create(&p_dev_info->car0_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_create"); + + rtn = zxic_comm_mutex_create(&p_dev_info->alg_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_create"); + + rtn = zxic_comm_mutex_create(&p_dev_info->nppu_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_create"); + + rtn = zxic_comm_mutex_create(&p_dev_info->smmu0_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_create"); + + rtn = zxic_comm_mutex_create(&p_dev_info->smmu1_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_create"); + + rtn = zxic_comm_mutex_create(&p_dev_info->etm_2nd_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_create"); + + rtn = zxic_comm_mutex_create(&p_dev_info->lpm_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_create"); + + rtn = zxic_comm_mutex_create(&p_dev_info->crm_temp_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_create"); + + rtn = zxic_comm_mutex_create(&p_dev_info->sim_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_create"); + + rtn = zxic_comm_mutex_create(&p_dev_info->dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_create"); + + rtn = zxic_comm_mutex_create(&p_dev_info->pktrx_mf_glb_cfg_mutex_0); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_create"); + + rtn = zxic_comm_mutex_create(&p_dev_info->pktrx_mf_glb_cfg_mutex_1); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_create"); + + rtn = zxic_comm_mutex_create(&p_dev_info->pktrx_mf_glb_cfg_mutex_2); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_create"); + + rtn = zxic_comm_mutex_create(&p_dev_info->pktrx_mf_glb_cfg_mutex_3); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_create"); + + for (i = 0; i < DPP_DTB_QUEUE_MAX; i++) { + rtn = zxic_comm_mutex_create(&p_dev_info->dtb_rb_mutex[i]); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_create"); + } + + for (i = 0; i < DPP_DTB_QUEUE_MAX; i++) { + rtn = zxic_comm_mutex_create(&p_dev_info->dtb_queue_mutex[i]); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_create"); + } + + rtn = zxic_comm_mutex_create(&p_dev_info->self_recover_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_create"); + + return DPP_OK; +} + +/***********************************************************/ +/** 删除设备实例 +* @param dev_id 设备号 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 王春雷 @date 2015/05/20 +************************************************************/ +DPP_STATUS dpp_dev_del(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rtn = DPP_OK; + DPP_DEV_CFG_T *p_dev_info = NULL; + DPP_DEV_MGR_T *p_dev_mgr = NULL; + ZXIC_UINT32 i = 0; + + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + + p_dev_mgr = dpp_dev_mgr_get(); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev_mgr); + p_dev_info = p_dev_mgr->p_dev_array[dev_id]; + + if (p_dev_info != NULL) { + rtn = zxic_comm_mutex_destroy(&p_dev_info->reg_opr_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_destroy"); + + rtn = zxic_comm_mutex_destroy(&p_dev_info->oam_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_destroy"); + + rtn = zxic_comm_mutex_destroy(&p_dev_info->etm_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_destroy"); + + rtn = zxic_comm_mutex_destroy(&p_dev_info->ddr_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_destroy"); + + rtn = zxic_comm_mutex_destroy(&p_dev_info->ind_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_destroy"); + + rtn = zxic_comm_mutex_destroy(&p_dev_info->etcam_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_destroy"); + + rtn = zxic_comm_mutex_destroy(&p_dev_info->car0_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_destroy"); + + rtn = zxic_comm_mutex_destroy(&p_dev_info->alg_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_destroy"); + + rtn = zxic_comm_mutex_destroy(&p_dev_info->nppu_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_destroy"); + + rtn = zxic_comm_mutex_destroy(&p_dev_info->smmu0_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_destroy"); + + rtn = zxic_comm_mutex_destroy(&p_dev_info->smmu1_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_destroy"); + + rtn = zxic_comm_mutex_destroy(&p_dev_info->etm_2nd_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_destroy"); + + rtn = zxic_comm_mutex_destroy(&p_dev_info->lpm_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_destroy"); + + rtn = zxic_comm_mutex_destroy(&p_dev_info->crm_temp_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_destroy"); + + rtn = zxic_comm_mutex_destroy(&p_dev_info->dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_destroy"); + + rtn = zxic_comm_mutex_destroy( + &p_dev_info->pktrx_mf_glb_cfg_mutex_0); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_destroy"); + + rtn = zxic_comm_mutex_destroy( + &p_dev_info->pktrx_mf_glb_cfg_mutex_1); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_destroy"); + + rtn = zxic_comm_mutex_destroy( + &p_dev_info->pktrx_mf_glb_cfg_mutex_2); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_destroy"); + + rtn = zxic_comm_mutex_destroy( + &p_dev_info->pktrx_mf_glb_cfg_mutex_3); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_destroy"); + + for (i = 0; i < DPP_DTB_QUEUE_MAX; i++) { + rtn = zxic_comm_mutex_destroy( + &p_dev_info->dtb_queue_mutex[i]); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, + "zxic_comm_mutex_create"); + } + + for (i = 0; i < DPP_DTB_QUEUE_MAX; i++) { + rtn = zxic_comm_mutex_destroy( + &p_dev_info->dtb_rb_mutex[i]); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, + "zxic_comm_mutex_create"); + } + + rtn = zxic_comm_mutex_destroy(&p_dev_info->self_recover_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "zxic_comm_mutex_destroy"); + + ZXIC_COMM_FREE(p_dev_info); + p_dev_mgr->p_dev_array[dev_id] = NULL; + ZXIC_COMM_CHECK_DEV_INDEX_SUB_OVERFLOW_NO_ASSERT( + dev_id, p_dev_mgr->device_num, 1); + p_dev_mgr->device_num--; + } + + return DPP_OK; +} + +/***********************************************************/ +/** 获取 DPP_DEV_T +* @param dev_id +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 王春雷 @date 2015/05/20 +************************************************************/ +DPP_STATUS dpp_dev_get(DPP_PF_INFO_T *pf_info, DPP_DEV_T *dev) +{ + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT16 slot = 0; + ZXIC_UINT16 channel_id = 0; + + DPP_DEV_CFG_T *p_dev_info = NULL; + DPP_DEV_MGR_T *p_dev_mgr = NULL; + + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, pf_info); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, dev); + + slot = pf_info->slot; + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, slot, 0, DPP_PCIE_SLOT_MAX - 1); + + channel_id = DPP_PCIE_CHANNEL_ID(pf_info->vport); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, channel_id, 0, + DPP_PCIE_CHANNEL_MAX - 1); + + p_dev_mgr = dpp_dev_mgr_get(); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev_mgr); + if (!p_dev_mgr->is_init) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "ErrorCode[ 0x%x]: Device Manager is not init!!!\n", + DPP_RC_DEV_MGR_NOT_INIT); + return DPP_RC_DEV_MGR_NOT_INIT; + } + p_dev_info = p_dev_mgr->p_dev_array[dev_id]; + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev_info); + + ZXIC_COMM_CHECK_DEV_POINT( + dev_id, p_dev_info->pcie_channel[slot][channel_id].device); + + dev->device_id = p_dev_info->device_id; + dev->pcie_channel.is_used = + p_dev_info->pcie_channel[slot][channel_id].is_used; + dev->pcie_channel.slot = + p_dev_info->pcie_channel[slot][channel_id].slot; + dev->pcie_channel.vport = + p_dev_info->pcie_channel[slot][channel_id].vport; + dev->pcie_channel.pcie_id = + p_dev_info->pcie_channel[slot][channel_id].pcie_id; + dev->pcie_channel.device = + p_dev_info->pcie_channel[slot][channel_id].device; + dev->pcie_channel.base_addr = + p_dev_info->pcie_channel[slot][channel_id].base_addr; + dev->pcie_channel.offset_addr = + p_dev_info->pcie_channel[slot][channel_id].offset_addr; + dev->pcie_channel.bar_msg_num = p_dev_info->bar_msg_num[slot]; + dev->pcie_channel.dev_status = + p_dev_info->pcie_channel[slot][channel_id].dev_status; + + return DPP_OK; +} + +/***********************************************************/ +/** 校验当前dev是否为当前slot最后一个有效的pf +* @param dev +* @param last_flag 出参 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cq @date 2024/08/01 +************************************************************/ +DPP_STATUS dpp_dev_last_check(DPP_DEV_T *dev, ZXIC_UINT32 *last_flag) +{ + ZXIC_UINT32 slot = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 used_num = 0; + ZXIC_UINT32 channel_id = 0; + ZXIC_UINT32 vport = 0; + ZXIC_UINT32 dev_id = 0; + DPP_DEV_CFG_T *p_dev_info = NULL; + DPP_DEV_MGR_T *p_dev_mgr = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(last_flag); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + + p_dev_mgr = dpp_dev_mgr_get(); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev_mgr); + if (!p_dev_mgr->is_init) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "ErrorCode[ 0x%x]: Device Manager is not init!!!\n", + DPP_RC_DEV_MGR_NOT_INIT); + return DPP_RC_DEV_MGR_NOT_INIT; + } + + p_dev_info = p_dev_mgr->p_dev_array[dev_id]; + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev_info); + + vport = DEV_PCIE_VPORT(dev); + channel_id = DPP_PCIE_CHANNEL_ID(vport); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, channel_id, 0, + DPP_PCIE_CHANNEL_MAX - 1); + + slot = DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, slot, 0, DPP_PCIE_SLOT_MAX - 1); + + for (i = 0; i < DPP_PCIE_CHANNEL_MAX; i++) { + if (p_dev_info->pcie_channel[slot][i].is_used) { + used_num++; + } + } + + if ((used_num == 1) && + p_dev_info->pcie_channel[slot][channel_id].is_used) { + *last_flag = 1; + } + + return DPP_OK; +} + +/***********************************************************/ +/** +* @param dev_id 设备的设备号 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 王春雷 @date 2015/03/06 +************************************************************/ +DPP_STATUS dpp_dev_pcie_channel_add(DPP_PF_INFO_T *pf_info, + struct pci_dev *p_dev) +{ + ZXIC_UINT32 dev_id = 0; + ZXIC_VOID *base_addr = 0; + ZXIC_UINT8 type = 0; + ZXIC_UINT32 post = 0; + ZXIC_UINT16 pcie_id = 0; + ZXIC_UINT16 slot = 0; + ZXIC_UINT16 channel_id = 0; + ZXIC_UINT32 dma_size = DTB_SDT_DUMP_SIZE; + dma_addr_t dma_handle; + ZXIC_VOID *cpu_addr = NULL; + DPP_DEV_CFG_T *p_dev_info = NULL; + DPP_DEV_MGR_T *p_dev_mgr = NULL; + +#ifdef DPP_FLOW_HW_INIT + struct bar_offset_res res; + struct bar_offset_params paras; +#endif + + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, pf_info); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev); + + slot = pf_info->slot; + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, slot, 0, DPP_PCIE_SLOT_MAX - 1); + + channel_id = DPP_PCIE_CHANNEL_ID(pf_info->vport); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, channel_id, 0, + DPP_PCIE_CHANNEL_MAX - 1); + + p_dev_mgr = dpp_dev_mgr_get(); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev_mgr); + if (!p_dev_mgr->is_init) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "ErrorCode[ 0x%x]: Device Manager is not init!!!\n", + DPP_RC_DEV_MGR_NOT_INIT); + return DPP_RC_DEV_MGR_NOT_INIT; + } + p_dev_info = p_dev_mgr->p_dev_array[dev_id]; + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev_info); + + if (p_dev_info->pcie_channel[slot][channel_id].device != NULL) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "ErrorCode[ 0x%x]: pcie slot %u vport 0x%04x already init.\n", + DPP_RC_DEV_PARA_INVALID, slot, pf_info->vport); + return DPP_RC_DEV_PARA_INVALID; + } + + base_addr = ioremap(pci_resource_start(p_dev, 0), + pci_resource_len(p_dev, 0)); + if (IS_ERR_OR_NULL(base_addr)) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "ErrorCode[ 0x%x]: pcie slot %u vport 0x%04x ioremap failed.\n", + DPP_RC_DEV_PARA_INVALID, slot, pf_info->vport); + return DPP_RC_DEV_PARA_INVALID; + } + + for (post = pci_find_capability(p_dev, PCI_CAP_ID_VNDR); post > 0; + post = pci_find_next_capability(p_dev, post, PCI_CAP_ID_VNDR)) { + pci_read_config_byte(p_dev, post + 3, &type); + + if (type == 5) { + pci_read_config_word(p_dev, post + 6, &pcie_id); + } + } + + if (pcie_id == 0) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "ErrorCode[ 0x%x]: pcie slot %u vport 0x%04x get pcieid failed.\n", + DPP_RC_DEV_PARA_INVALID, slot, pf_info->vport); + return DPP_RC_DEV_PARA_INVALID; + } + +#ifdef DPP_FLOW_HW_INIT + paras.type = URI_NP; + paras.pcie_id = pcie_id; + paras.virt_addr = + ZXIC_COMM_PTR_TO_VAL(base_addr) + DEV_PCIE_MSG_OFFSET_ADDR; + if (zxdh_get_bar_offset(¶s, &res) != BAR_MSG_OK) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "ErrorCode[ 0x%x]: pcie slot %u vport 0x%04x get bar offset failed.\n", + DPP_RC_DEV_PARA_INVALID, slot, pf_info->vport); + return DPP_RC_DEV_PARA_INVALID; + } +#endif + cpu_addr = dma_alloc_coherent(&(p_dev->dev), dma_size, &dma_handle, + GFP_KERNEL); + if (!cpu_addr) { + dev_err(&(p_dev->dev), + "dma_alloc_coherent buff allocation failed\n"); + return DPP_RC_DEV_DMA_MEM_ALLOC_FAIL; + } + p_dev_info->pcie_channel[slot][channel_id].slot = slot; + p_dev_info->pcie_channel[slot][channel_id].vport = pf_info->vport; + p_dev_info->pcie_channel[slot][channel_id].pcie_id = pcie_id; + p_dev_info->pcie_channel[slot][channel_id].device = p_dev; + p_dev_info->pcie_channel[slot][channel_id].base_addr = + ZXIC_COMM_PTR_TO_VAL(base_addr); + p_dev_info->pcie_channel[slot][channel_id].dev_status = 1; + p_dev_info->pcie_channel[slot][channel_id].dump_dma_size = dma_size; + p_dev_info->pcie_channel[slot][channel_id].dump_dma_phy_addr = + (ZXIC_ADDR_T)dma_handle; + p_dev_info->pcie_channel[slot][channel_id].dump_dma_vir_addr = + (ZXIC_ADDR_T)(ZXIC_COMM_PTR_TO_VAL(cpu_addr)); + +#ifdef DPP_FLOW_HW_INIT + p_dev_info->pcie_channel[slot][channel_id].offset_addr = res.bar_offset; +#else + p_dev_info->pcie_channel[slot][channel_id].offset_addr = 0x6000; +#endif + + p_dev_info->pcie_channel[slot][channel_id].is_used = 1; + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x base_addr: 0x%llx success.\n", + __FUNCTION__, slot, pf_info->vport, + ZXIC_COMM_PTR_TO_VAL(base_addr)); + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x pcie_id: 0x%04x offset_addr: 0x%llx success.\n", + __FUNCTION__, slot, pf_info->vport, pcie_id, + p_dev_info->pcie_channel[slot][channel_id].offset_addr); + + return DPP_OK; +} + +/***********************************************************/ +/** +* @param dev_id 设备的设备号 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 王春雷 @date 2015/03/06 +************************************************************/ +DPP_STATUS dpp_dev_pcie_channel_del(DPP_PF_INFO_T *pf_info) +{ + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT16 slot = 0; + ZXIC_UINT16 channel_id = 0; + dma_addr_t dma_handle = 0; + ZXIC_VOID *cpu_addr = NULL; + ZXIC_UINT32 dma_size = 0; + DPP_DEV_CFG_T *p_dev_info = NULL; + DPP_DEV_MGR_T *p_dev_mgr = NULL; + struct pci_dev *p_dev = NULL; + + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, pf_info); + + slot = pf_info->slot; + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, slot, 0, DPP_PCIE_SLOT_MAX - 1); + + channel_id = DPP_PCIE_CHANNEL_ID(pf_info->vport); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, channel_id, 0, + DPP_PCIE_CHANNEL_MAX - 1); + + p_dev_mgr = dpp_dev_mgr_get(); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev_mgr); + if (!p_dev_mgr->is_init) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "ErrorCode[ 0x%x]: Device Manager is not init!!!\n", + DPP_RC_DEV_MGR_NOT_INIT); + return DPP_RC_DEV_MGR_NOT_INIT; + } + p_dev_info = p_dev_mgr->p_dev_array[dev_id]; + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev_info); + + p_dev = p_dev_info->pcie_channel[slot][channel_id].device; + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev); + + dma_handle = (dma_addr_t)(p_dev_info->pcie_channel[slot][channel_id] + .dump_dma_phy_addr); + cpu_addr = ZXIC_COMM_VAL_TO_PTR( + p_dev_info->pcie_channel[slot][channel_id].dump_dma_vir_addr); + dma_size = p_dev_info->pcie_channel[slot][channel_id].dump_dma_size; + if (dma_handle) { + dma_free_coherent(&(p_dev->dev), dma_size, cpu_addr, + dma_handle); + } + + iounmap((ZXIC_VOID *)p_dev_info->pcie_channel[slot][channel_id] + .base_addr); + + p_dev_info->pcie_channel[slot][channel_id].device = NULL; + p_dev_info->pcie_channel[slot][channel_id].slot = 0; + p_dev_info->pcie_channel[slot][channel_id].vport = 0; + p_dev_info->pcie_channel[slot][channel_id].pcie_id = 0; + p_dev_info->pcie_channel[slot][channel_id].base_addr = 0; + p_dev_info->pcie_channel[slot][channel_id].offset_addr = 0; + p_dev_info->pcie_channel[slot][channel_id].is_used = 0; + p_dev_info->pcie_channel[slot][channel_id].bar_msg_num = 0xFFFFFFFF; + p_dev_info->pcie_channel[slot][channel_id].dump_dma_size = 0; + p_dev_info->pcie_channel[slot][channel_id].dump_dma_phy_addr = 0; + p_dev_info->pcie_channel[slot][channel_id].dump_dma_vir_addr = 0; + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x success.\n", __FUNCTION__, + slot, pf_info->vport); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取存储流表资源信息的指针 +* @param dev 设备 +* @param +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/13 +************************************************************/ +ZXIC_VOID *dpp_dev_get_se_res_ptr(DPP_DEV_T *dev) +{ + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 slot_id = 0; + DPP_DEV_MGR_T *p_dev_mgr = NULL; + DPP_DEV_CFG_T *p_dev_info = NULL; + + ZXIC_COMM_CHECK_POINT_RETURN_NULL(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX_RETURN_NULL(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + + p_dev_mgr = dpp_dev_mgr_get(); + ZXIC_COMM_CHECK_POINT_RETURN_NULL(p_dev_mgr); + p_dev_info = p_dev_mgr->p_dev_array[dev_id]; + ZXIC_COMM_CHECK_POINT_RETURN_NULL(p_dev_info); + + slot_id = DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_RETURN_NULL(dev_id, slot_id, 0, + DPP_PCIE_SLOT_MAX - 1); + + return p_dev_info->p_std_nic_res[slot_id]; +} + +/***********************************************************/ +/** 设置存储流表资源信息的指针 +* @param dev 设备 +* @param se_ptr +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/13 +************************************************************/ +ZXIC_VOID dpp_dev_set_se_res_ptr(DPP_DEV_T *dev, ZXIC_VOID *se_ptr) +{ + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 slot_id = 0; + DPP_DEV_MGR_T *p_dev_mgr = NULL; + DPP_DEV_CFG_T *p_dev_info = NULL; + + ZXIC_COMM_CHECK_POINT_RETURN_NONE(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX_UPPER_RETURN_NONE(dev_id, + DPP_DEV_CHANNEL_MAX - 1); + + p_dev_mgr = dpp_dev_mgr_get(); + ZXIC_COMM_CHECK_DEV_POINT_RETURN_NONE(dev_id, p_dev_mgr); + + p_dev_info = p_dev_mgr->p_dev_array[dev_id]; + ZXIC_COMM_CHECK_DEV_POINT_RETURN_NONE(dev_id, p_dev_info); + + slot_id = DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_UPPER_RETURN_NONE(dev_id, slot_id, + DPP_PCIE_SLOT_MAX - 1); + p_dev_info->p_std_nic_res[slot_id] = se_ptr; +} + +/***********************************************************/ +/** 根据类型获取互斥锁 +* @param dev_id 设备号 +* @param type 互斥锁类型,取值参照DPP_DEV_MUTEX_TYPE_E的定义 +* @param p_mutex_out 返回的互斥锁指针地址 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 王春雷 @date 2015/10/10 +************************************************************/ +DPP_STATUS dpp_dev_opr_mutex_get(DPP_DEV_T *dev, ZXIC_UINT32 type, + ZXIC_MUTEX_T **p_mutex_out) +{ + //DPP_STATUS rc = 0; + DPP_DEV_MGR_T *p_dev_mgr = NULL; + DPP_DEV_CFG_T *p_dev_info = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX_UPPER(type, DPP_DEV_MUTEX_T_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_mutex_out); + + p_dev_mgr = dpp_dev_mgr_get(); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_dev_mgr); + p_dev_info = p_dev_mgr->p_dev_array[DEV_ID(dev)]; + + if (NULL == p_dev_info) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), "Get dev_info[ %d ] fail!\n", DEV_ID(dev)); + return DPP_DEV_TYPE_INVALID; + } + + switch (type) { + case DPP_DEV_MUTEX_T_REG: { + *p_mutex_out = &(p_dev_info->reg_opr_mutex); + } break; + + case DPP_DEV_MUTEX_T_OAM: { + *p_mutex_out = &(p_dev_info->oam_mutex); + } break; + + case DPP_DEV_MUTEX_T_ETM: { + *p_mutex_out = &(p_dev_info->etm_mutex); + } break; + + case DPP_DEV_MUTEX_T_DDR: { + *p_mutex_out = &(p_dev_info->ddr_mutex); + } break; + + case DPP_DEV_MUTEX_T_IND: { + *p_mutex_out = &(p_dev_info->ind_mutex); + } break; + + case DPP_DEV_MUTEX_T_ETCAM: { + *p_mutex_out = &(p_dev_info->etcam_mutex); + } break; + + case DPP_DEV_MUTEX_T_CAR0: { + *p_mutex_out = &(p_dev_info->car0_mutex); + } break; + + case DPP_DEV_MUTEX_T_ALG: { + *p_mutex_out = &(p_dev_info->alg_mutex); + } break; + + case DPP_DEV_MUTEX_T_NPPU: { + *p_mutex_out = &(p_dev_info->nppu_mutex); + } break; + + case DPP_DEV_MUTEX_T_SMMU0: { + *p_mutex_out = &(p_dev_info->smmu0_mutex); + } break; + + case DPP_DEV_MUTEX_T_SMMU1: { + *p_mutex_out = &(p_dev_info->smmu1_mutex); + } break; + + case DPP_DEV_MUTEX_T_ETM_2ND: { + *p_mutex_out = &(p_dev_info->etm_2nd_mutex); + } break; + + case DPP_DEV_MUTEX_T_LPM: { + *p_mutex_out = &(p_dev_info->lpm_mutex); + } break; + + case DPP_DEV_MUTEX_T_CRM_TEMP: { + *p_mutex_out = &(p_dev_info->crm_temp_mutex); + } break; + + case DPP_DEV_MUTEX_T_SIM: { + *p_mutex_out = &(p_dev_info->sim_mutex); + } break; + + case DPP_DEV_MUTEX_T_DTB: { + *p_mutex_out = &(p_dev_info->dtb_mutex); + } break; + + case DPP_DEV_MUTEX_T_PKTRX_MF_GLB_CFG_0: { + *p_mutex_out = &(p_dev_info->pktrx_mf_glb_cfg_mutex_0); + } break; + + case DPP_DEV_MUTEX_T_PKTRX_MF_GLB_CFG_1: { + *p_mutex_out = &(p_dev_info->pktrx_mf_glb_cfg_mutex_1); + } break; + + case DPP_DEV_MUTEX_T_PKTRX_MF_GLB_CFG_2: { + *p_mutex_out = &(p_dev_info->pktrx_mf_glb_cfg_mutex_2); + } break; + + case DPP_DEV_MUTEX_T_PKTRX_MF_GLB_CFG_3: { + *p_mutex_out = &(p_dev_info->pktrx_mf_glb_cfg_mutex_3); + } break; + + case DPP_DEV_MUTEX_T_SELF_RECOVER: { + *p_mutex_out = &(p_dev_info->self_recover_mutex); + } break; + + default: { + ZXIC_COMM_TRACE_DEV_ERROR(DEV_ID(dev), + "mutex type is invalid!\n"); + return DPP_ERR; + } + } + + return DPP_OK; +} + +/***********************************************************/ +DPP_STATUS dpp_dev_hash_opr_mutex_create(DPP_DEV_T *dev) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 slot = 0; + ZXIC_UINT32 hash_id = 0; + DPP_DEV_MGR_T *p_dev_mgr = NULL; + DPP_DEV_CFG_T *p_dev_info = NULL; + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + p_dev_mgr = dpp_dev_mgr_get(); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev_mgr); + p_dev_info = p_dev_mgr->p_dev_array[dev_id]; + if (NULL == p_dev_info) { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "Get dev_info[ %d ] fail!\n", + dev_id); + return DPP_DEV_TYPE_INVALID; + } + slot = DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_INDEX(slot, 0, (DPP_PCIE_SLOT_MAX - 1)); + for (hash_id = 0; hash_id < DEV_HASH_FUNC_ID_NUM; hash_id++) { + rc = zxic_comm_mutex_create( + &p_dev_info->hash_mutex[slot][hash_id]); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "zxic_comm_mutex_create"); + } + return DPP_OK; +} +DPP_STATUS dpp_dev_hash_opr_mutex_destroy(DPP_DEV_T *dev) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 slot = 0; + ZXIC_UINT32 hash_id = 0; + DPP_DEV_MGR_T *p_dev_mgr = NULL; + DPP_DEV_CFG_T *p_dev_info = NULL; + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + p_dev_mgr = dpp_dev_mgr_get(); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev_mgr); + p_dev_info = p_dev_mgr->p_dev_array[dev_id]; + if (NULL == p_dev_info) { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "Get dev_info[ %d ] fail!\n", + dev_id); + return DPP_DEV_TYPE_INVALID; + } + slot = DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_INDEX(slot, 0, (DPP_PCIE_SLOT_MAX - 1)); + for (hash_id = 0; hash_id < DEV_HASH_FUNC_ID_NUM; hash_id++) { + rc = zxic_comm_mutex_destroy( + &p_dev_info->hash_mutex[slot][hash_id]); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "zxic_comm_mutex_destroy"); + } + return DPP_OK; +} +/** 根据hash引擎获取红黑树的互斥锁 +* @param dev_id 设备号 +* @param fun_id +* @param p_mutex_out 0~3 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cq @date 2024/04/02 +************************************************************/ +DPP_STATUS dpp_dev_hash_opr_mutex_get(DPP_DEV_T *dev, ZXIC_UINT32 fun_id, + ZXIC_MUTEX_T **p_mutex_out) +{ + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 slot = 0; + DPP_DEV_MGR_T *p_dev_mgr = NULL; + DPP_DEV_CFG_T *p_dev_info = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX_UPPER(fun_id, DEV_HASH_FUNC_ID_NUM - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_mutex_out); + + p_dev_mgr = dpp_dev_mgr_get(); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev_mgr); + p_dev_info = p_dev_mgr->p_dev_array[dev_id]; + + if (NULL == p_dev_info) { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "Get dev_info[ %d ] fail!\n", + dev_id); + return DPP_DEV_TYPE_INVALID; + } + + slot = DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_INDEX(slot, 0, (DPP_PCIE_SLOT_MAX - 1)); + *p_mutex_out = &p_dev_info->hash_mutex[slot][fun_id]; + + return DPP_OK; +} + +/***********************************************************/ +/** 根据index获取dtb对应队列的互斥锁 +* @param dev_id 设备号 +* @param type 互斥锁类型,取值参照DPP_DEV_MUTEX_TYPE_E的定义 +* @param p_mutex_out 返回的互斥锁指针地址 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 王春雷 @date 2015/10/10 +************************************************************/ +DPP_STATUS dpp_dev_dtb_opr_mutex_get(DPP_DEV_T *dev, ZXIC_UINT32 type, + ZXIC_UINT32 index, + ZXIC_MUTEX_T **p_mutex_out) +{ + //DPP_STATUS rc = 0; + DPP_DEV_MGR_T *p_dev_mgr = NULL; + DPP_DEV_CFG_T *p_dev_info = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX_UPPER(type, DPP_DEV_MUTEX_T_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_mutex_out); + + p_dev_mgr = dpp_dev_mgr_get(); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_dev_mgr); + p_dev_info = p_dev_mgr->p_dev_array[DEV_ID(dev)]; + + if (NULL == p_dev_info) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), "Get dev_info[ %d ] fail!\n", DEV_ID(dev)); + return DPP_DEV_TYPE_INVALID; + } + + switch (type) { + case DPP_DEV_MUTEX_T_DTB: { + *p_mutex_out = &(p_dev_info->dtb_queue_mutex[index]); + } break; + + case DPP_DEV_MUTEX_T_DTB_RB: { + *p_mutex_out = &(p_dev_info->dtb_rb_mutex[index]); + } break; + + default: { + ZXIC_COMM_TRACE_DEV_ERROR(DEV_ID(dev), + "mutex type is invalid!\n"); + return DPP_ERR; + } + } + + return DPP_OK; +} + +/***********************************************************/ +/** PCIE 默认读接口 +* @param dev_id +* @param addr +* @param size +* @param p_data +* +* @return +* @remark 无 +* @see +* @author XXX @date 2018/05/02 +************************************************************/ +DPP_STATUS dpp_dev_pcie_default_write(DPP_DEV_T *dev, ZXIC_UINT32 addr, + ZXIC_UINT32 size, ZXIC_UINT32 *p_data) +{ + DPP_STATUS rc = 0; + ZXIC_UINT32 i; + ZXIC_ADDR_T abs_addr = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + + abs_addr = DEV_PCIE_REG_ADDR(dev); + +#ifdef MACRO_CPU64 + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_64_NO_ASSERT( + DEV_ID(dev), abs_addr, (ZXIC_ADDR_T)addr); +#else + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(DEV_ID(dev), abs_addr, + (ZXIC_ADDR_T)addr); +#endif + +#ifdef DPP_FOR_AARCH64 + addr = addr - (SYS_NP_BASE_ADDR1 - DEV_PCIE_OFFSET_ADDR(dev)); + addr = X86_ADDR_2_ARRCH64(addr); + addr = addr + (SYS_NP_BASE_ADDR1 - DEV_PCIE_OFFSET_ADDR(dev)); +#endif + + abs_addr += addr; + ZXIC_COMM_TRACE_DEBUG( + "dpp_dev_pcie_default_write: write abs_addr:0x%llx\n", + abs_addr); + + for (i = 0; i < size; i++) { +#ifdef MACRO_CPU64 + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_64_NO_ASSERT( + DEV_ID(dev), abs_addr, + (ZXIC_ADDR_T)(4 * ((ZXIC_ADDR_T)(i)))); +#else + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT( + DEV_ID(dev), abs_addr, + (ZXIC_ADDR_T)(4 * ((ZXIC_ADDR_T)(i)))); +#endif + + rc = dpp_pci_write32( + dev, abs_addr + (ZXIC_ADDR_T)(4 * ((ZXIC_ADDR_T)(i))), + p_data + i); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_pci_write32"); + ZXIC_COMM_TRACE_DEBUG( + "dpp_dev_pcie_default_write: write Addr:0x%llx ,Value 0x%x\n", + (abs_addr + (ZXIC_ADDR_T)(4 * ((ZXIC_ADDR_T)(i)))), + *(p_data + i)); + } + + return DPP_OK; +} + +/***********************************************************/ +/** PCIE 默认写接口 +* @param dev_id +* @param addr +* @param size +* @param p_data +* +* @return +* @remark 无 +* @see +* @author XXX @date 2018/05/02 +************************************************************/ +DPP_STATUS dpp_dev_pcie_default_read(DPP_DEV_T *dev, ZXIC_UINT32 addr, + ZXIC_UINT32 size, ZXIC_UINT32 *p_data) +{ + DPP_STATUS rc = 0; + ZXIC_UINT32 i; + ZXIC_ADDR_T abs_addr = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + + abs_addr = DEV_PCIE_REG_ADDR(dev); + +#ifdef MACRO_CPU64 + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_64_NO_ASSERT( + DEV_ID(dev), abs_addr, (ZXIC_ADDR_T)(addr)); +#else + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(DEV_ID(dev), abs_addr, + (ZXIC_ADDR_T)(addr)); +#endif + +#ifdef DPP_FOR_AARCH64 + addr = addr - (SYS_NP_BASE_ADDR1 - DEV_PCIE_OFFSET_ADDR(dev)); + addr = X86_ADDR_2_ARRCH64(addr); + addr = addr + (SYS_NP_BASE_ADDR1 - DEV_PCIE_OFFSET_ADDR(dev)); +#endif + + abs_addr += addr; + + for (i = 0; i < size; i++) { + rc = dpp_pci_read32( + dev, abs_addr + (ZXIC_ADDR_T)(4 * ((ZXIC_ADDR_T)(i))), + p_data + i); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_pci_read32"); + + ZXIC_COMM_TRACE_DEBUG( + "dpp_dev_pcie_default_read: Read Addr:0x%llx ,Value 0x%x\n", + (abs_addr + (ZXIC_ADDR_T)(4 * ((ZXIC_ADDR_T)(i)))), + *(p_data + i)); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 获取软件存储的hash index信息 +* @param dev +* @param hash_index +* +* @return +* @remark 无 +* @see +* @author cq @date 2024/12/17 +************************************************************/ +DPP_STATUS dpp_soft_hash_index_get(DPP_DEV_T *dev, ZXIC_UINT32 *hash_index) +{ + ZXIC_UINT16 slot = 0; + ZXIC_UINT16 channel_id = 0; + ZXIC_UINT32 dev_id = 0; + + DPP_DEV_CFG_T *p_dev_info = NULL; + DPP_DEV_MGR_T *p_dev_mgr = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, hash_index); + + p_dev_mgr = dpp_dev_mgr_get(); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev_mgr); + if (!p_dev_mgr->is_init) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "ErrorCode[ 0x%x]: Device Manager is not init!!!\n", + DPP_RC_DEV_MGR_NOT_INIT); + return DPP_RC_DEV_MGR_NOT_INIT; + } + + p_dev_info = p_dev_mgr->p_dev_array[dev_id]; + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev_info); + + slot = dev->pcie_channel.slot; + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, slot, 0, DPP_PCIE_SLOT_MAX - 1); + channel_id = DPP_PCIE_CHANNEL_ID(dev->pcie_channel.vport); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, channel_id, 0, + DPP_PCIE_CHANNEL_MAX - 1); + + ZXIC_COMM_CHECK_DEV_POINT( + dev_id, p_dev_info->pcie_channel[slot][channel_id].device); + + *hash_index = p_dev_info->pcie_channel[slot][channel_id].hash_index; + + return DPP_OK; +} + +/***********************************************************/ +/** 软件存储hash index信息 +* @param dev +* @param hash_index +* +* @return +* @remark 无 +* @see +* @author cq @date 2024/12/17 +************************************************************/ +DPP_STATUS dpp_soft_hash_index_set(DPP_DEV_T *dev, ZXIC_UINT32 hash_index) +{ + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT16 slot = 0; + ZXIC_UINT16 channel_id = 0; + + DPP_DEV_CFG_T *p_dev_info = NULL; + DPP_DEV_MGR_T *p_dev_mgr = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + + p_dev_mgr = dpp_dev_mgr_get(); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev_mgr); + if (!p_dev_mgr->is_init) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "ErrorCode[ 0x%x]: Device Manager is not init!!!\n", + DPP_RC_DEV_MGR_NOT_INIT); + return DPP_RC_DEV_MGR_NOT_INIT; + } + + p_dev_info = p_dev_mgr->p_dev_array[dev_id]; + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev_info); + + slot = dev->pcie_channel.slot; + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, slot, 0, DPP_PCIE_SLOT_MAX - 1); + channel_id = DPP_PCIE_CHANNEL_ID(dev->pcie_channel.vport); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, channel_id, 0, + DPP_PCIE_CHANNEL_MAX - 1); + + ZXIC_COMM_CHECK_DEV_POINT( + dev_id, p_dev_info->pcie_channel[slot][channel_id].device); + + p_dev_info->pcie_channel[slot][channel_id].hash_index = hash_index; + + return DPP_OK; +} + +/***********************************************************/ +/** 获取软件存储的DMA内存信息 +* @param dev +* @param p_dma_size DMA大小 +* @param p_dma_phy_addr DMA物理地址 +* @param p_dma_vir_addr DMA虚拟地址 +* +* @return +* @remark 无 +* @see +* @author cq @date 2025/06/11 +************************************************************/ +DPP_STATUS dpp_dev_dump_dma_mem_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_dma_size, + ZXIC_UINT64 *p_dma_phy_addr, + ZXIC_UINT64 *p_dma_vir_addr) +{ + ZXIC_UINT16 slot = 0; + ZXIC_UINT16 channel_id = 0; + ZXIC_UINT32 dev_id = 0; + + DPP_DEV_CFG_T *p_dev_info = NULL; + DPP_DEV_MGR_T *p_dev_mgr = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dma_size); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dma_phy_addr); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dma_vir_addr); + + p_dev_mgr = dpp_dev_mgr_get(); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev_mgr); + if (!p_dev_mgr->is_init) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "ErrorCode[ 0x%x]: Device Manager is not init!!!\n", + DPP_RC_DEV_MGR_NOT_INIT); + return DPP_RC_DEV_MGR_NOT_INIT; + } + + p_dev_info = p_dev_mgr->p_dev_array[dev_id]; + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev_info); + + slot = dev->pcie_channel.slot; + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, slot, 0, DPP_PCIE_SLOT_MAX - 1); + channel_id = DPP_PCIE_CHANNEL_ID(dev->pcie_channel.vport); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, channel_id, 0, + DPP_PCIE_CHANNEL_MAX - 1); + + ZXIC_COMM_CHECK_DEV_POINT( + dev_id, p_dev_info->pcie_channel[slot][channel_id].device); + + *p_dma_size = p_dev_info->pcie_channel[slot][channel_id].dump_dma_size; + *p_dma_phy_addr = + p_dev_info->pcie_channel[slot][channel_id].dump_dma_phy_addr; + *p_dma_vir_addr = + p_dev_info->pcie_channel[slot][channel_id].dump_dma_vir_addr; + + if ((*p_dma_size == 0) || (*p_dma_phy_addr == 0) || + (*p_dma_vir_addr == 0)) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, "ErrorCode[ 0x%x]: dump dma mem get fail!!!\n", + DPP_RC_DEV_DMA_MEM_GET_FAIL); + return DPP_RC_DEV_DMA_MEM_GET_FAIL; + } + + ZXIC_COMM_MEMSET_S(ZXIC_COMM_VAL_TO_PTR(*p_dma_vir_addr), *p_dma_size, + 0x0, *p_dma_size); + + return DPP_OK; +} + +#ifndef ES_FOR_LLT +/***********************************************************/ +/** 写设备底层接口 +* @param dev_id 设备号 +* @param addr 单个设备内部的相对地址 +* @param size 数据的长度,以32bit为单位 +* @param p_data 数据 +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/05/15 +************************************************************/ +DPP_STATUS dpp_dev_write_channel(DPP_DEV_T *dev, ZXIC_UINT32 addr, + ZXIC_UINT32 size, ZXIC_UINT32 *p_data) +{ + /*ZXIC_UINT32 i = 0;*/ + DPP_STATUS rtn = 0; +#if DPP_HW_OPR_EN + DPP_DEV_CFG_T *p_dev_info = NULL; +#endif + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + +#ifdef DPP_FOR_LLT + rtn = dpp_stump_reg_rb_debug_wr(DEV_ID(dev), addr, size, p_data); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, "dpp_stump_reg_rb_debug_wr"); + return DPP_OK; +#endif + +#if DPP_HW_OPR_EN + p_dev_info = DPP_DEV_INFO_GET(DEV_ID(dev)); + + if (p_dev_info == NULL) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), "Error: Channel[%d] dev is not exist!\n ", + DEV_ID(dev)); + return DPP_ERR; + } else { + if (p_dev_info->access_type == DPP_DEV_ACCESS_TYPE_PCIE) { + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), + p_dev_info->p_pcie_write_fun); + rtn = p_dev_info->p_pcie_write_fun(dev, addr, size, + p_data); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, + "p_dev_info->p_pcie_write_fun"); + } else { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "Dev access type[ %d ] is invalid!\n", + p_dev_info->access_type); + return DPP_ERR; + } + } + +#endif + + return DPP_OK; +} + +/***********************************************************/ +/** 读设备底层接口 +* @param dev_id 设备号 +* @param addr 单个设备内部的相对地址 +* @param size 数据的长度,以32bit为单位 +* @param p_data 数据 +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/05/15 +************************************************************/ +DPP_STATUS dpp_dev_read_channel(DPP_DEV_T *dev, ZXIC_UINT32 addr, + ZXIC_UINT32 size, ZXIC_UINT32 *p_data) +{ + DPP_STATUS rtn = 0; +#if DPP_HW_OPR_EN + DPP_DEV_CFG_T *p_dev_info = NULL; +#endif + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + +#ifdef DPP_FOR_LLT + rtn = dpp_stump_reg_rb_debug_rd(DEV_ID(dev), addr, size, p_data); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, "dpp_stump_reg_rb_debug_rd"); + return DPP_OK; +#endif + +#if DPP_HW_OPR_EN + p_dev_info = DPP_DEV_INFO_GET(DEV_ID(dev)); + + if (p_dev_info == NULL) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), "Error: Channel[%d] dev is not exist!\n ", + DEV_ID(dev)); + return DPP_ERR; + } else { + if (p_dev_info->access_type == DPP_DEV_ACCESS_TYPE_PCIE) { + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), + p_dev_info->p_pcie_read_fun); + rtn = p_dev_info->p_pcie_read_fun(dev, addr, size, + p_data); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, + "p_dev_info->p_pcie_read_fun"); + } else { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "Dev access type[ %d ] is invalid!\n", + p_dev_info->access_type); + return DPP_ERR; + } + } + +#else + + for (ZXIC_UINT32 i = 0; i < size; i++) { + p_data[i] = 0xffffffff; + } + +#endif + + return DPP_OK; +} +#endif /* ES_FOR_LLT */ diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/chip/dpp_init.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/chip/dpp_init.c new file mode 100644 index 000000000000..cf4e16de145a --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/chip/dpp_init.c @@ -0,0 +1,65 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_init.c +* 文件标识 : +* 内容摘要 : 芯片初始化源文件 +* 其它说明 : +* 当前版本 : +* 作 者 : 王春雷 +* 完成日期 : 2015/03/17 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#include "zxic_common.h" +#include "dpp_module.h" +#include "dpp_dev.h" +#include "dpp_ppu.h" +#include "dpp_se.h" +#include "dpp_dtb.h" +#include "dpp_init.h" +#include "dpp_drv_init.h" + +/***********************************************************/ +/** 芯片上电初始,完整版本 +* @param dev_id 设备号 +* @param p_init_ctrl 系统初始化控制数据结构,由用户完成实例化和成员赋值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 王春雷 @date 2015/03/26 +************************************************************/ +DPP_STATUS dpp_init(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rt = 0; + ZXIC_UINT32 dev_id_array[DPP_DEV_CHANNEL_MAX] = { 0 }; + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + + /* 初始化设备管理模块 */ + rt = dpp_dev_init(); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rt, "dpp_dev_init"); + + rt = dpp_dev_add(dev_id, DPP_DEV_TYPE_CHIP, DPP_DEV_ACCESS_TYPE_PCIE, 0, + 0, 0, 0, NULL, NULL, NULL, NULL); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rt, "dpp_dev_add"); + + /* 初始化table模块软件部分 */ + dev_id_array[0] = dev_id; + rt = dpp_sdt_init(1, dev_id_array); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rt, "dpp_sdt_init"); + + rt = dpp_ppu_parse_cls_bitmap(dev_id, DPP_PPU_CLS_ALL_START); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rt, "dpp_ppu_parse_cls_bitmap"); + + dpp_flow_init_status_init(); + + return DPP_OK; +} diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/init/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/init/Kbuild.include new file mode 100644 index 000000000000..b555fad18fe2 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/init/Kbuild.include @@ -0,0 +1,2 @@ +cur_dir := en_np/sdk/source/dev/init/ +src_files += $(addprefix $(cur_dir),$(notdir $(wildcard $(dinghai_root)/$(cur_dir)*.c))) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/init/dpp_kernel_init.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/init/dpp_kernel_init.c new file mode 100644 index 000000000000..1f858540b29e --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/init/dpp_kernel_init.c @@ -0,0 +1,252 @@ +#include +#include +#include +#include +#include +#include "zxic_common.h" +#include "dpp_dtb_cfg.h" +#include "dpp_kernel_init.h" +#include "dpp_dtb_table_api.h" + +/*hash dma地址信息*/ +typedef struct hash_dma_addr_info { + ZXIC_UINT32 slot_id; /*np所在的槽位号*/ + ZXIC_UINT32 dma_size; /*hash申请的dma大小*/ + dma_addr_t dma_phy_addr; /* dma 物理地址*/ + ZXIC_VOID *dma_vir_addr; /* dma 内核虚拟地址*/ +} HASH_DMA_ADDR_INFO; + +/*DMA空间信息*/ +typedef struct dpp_dma_info { + DTB_QUEUE_DMA_ADDR_INFO dtb_queue_info[DPP_DTB_QUEUE_NUM_MAX]; /*dtb队列*/ + HASH_DMA_ADDR_INFO hash_dma_info; /*hash上送*/ +} DPP_KERNEL_DMA_INFO; + +static DPP_KERNEL_DMA_INFO g_dpp_dma_info[DPP_PCIE_SLOT_MAX] = { 0 }; +static ZXIC_UINT32 queue_used_flag[DPP_PCIE_SLOT_MAX][4] = { 0 }; + +static ZXIC_VOID dpp_dtb_queue_dma_flag_set(ZXIC_UINT32 slot_id, + ZXIC_UINT32 queue_id) +{ + ZXIC_UINT32 bit_shift = 0; + ZXIC_UINT32 reg_shift = 0; + + reg_shift = queue_id / 32; + bit_shift = queue_id % 32; + + queue_used_flag[slot_id][reg_shift] = + queue_used_flag[slot_id][reg_shift] | (0x1 << bit_shift); +} + +static ZXIC_VOID dpp_dtb_queue_dma_flag_clear(ZXIC_UINT32 slot_id, + ZXIC_UINT32 queue_id) +{ + ZXIC_UINT32 bit_shift = 0; + ZXIC_UINT32 reg_shift = 0; + + reg_shift = queue_id / 32; + bit_shift = queue_id % 32; + + queue_used_flag[slot_id][reg_shift] = + queue_used_flag[slot_id][reg_shift] & ~(0x1 << bit_shift); +} + +static ZXIC_UINT32 dpp_dtb_queue_dma_flag_get(ZXIC_UINT32 slot_id, + ZXIC_UINT32 queue_id) +{ + ZXIC_UINT32 bit_shift = 0; + ZXIC_UINT32 reg_shift = 0; + + ZXIC_UINT32 flag = 0; + + reg_shift = queue_id / 32; + bit_shift = queue_id % 32; + + flag = (queue_used_flag[slot_id][reg_shift] >> bit_shift) & 0x1; + + ZXIC_COMM_TRACE_NOTICE("[%s]:slot %d queue %d flag %d!\n", __FUNCTION__, + slot_id, queue_id, flag); + + return flag; +} + +//申请队列DMA空间,通过传入DMA队列号,返回申请到DMA内存的物理地址 +ZXIC_SINT32 dpp_dtb_queue_dma_mem_alloc(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 size) +{ + dma_addr_t dma_handle; + ZXIC_VOID *cpu_addr = NULL; + ZXIC_UINT32 slot_id = 0; + + ZXIC_COMM_CHECK_POINT(dev); + + slot_id = (ZXIC_UINT32)DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_INDEX(slot_id, 0, DPP_DEV_SLOT_MAX - 1); + + if (queue_id > (DPP_DTB_QUEUE_NUM_MAX - 1)) { + return -ENOMEM; + } + + cpu_addr = dma_alloc_coherent(&(DEV_PCIE_DEV(dev)->dev), size, + &dma_handle, GFP_KERNEL); + + if (!cpu_addr) { + dev_err(&(DEV_PCIE_DEV(dev)->dev), + "dpp_dtb_queue_dma_mem_alloc buff allocation failed\n"); + return -ENOMEM; + } + + g_dpp_dma_info[slot_id].dtb_queue_info[queue_id].slot_id = slot_id; + g_dpp_dma_info[slot_id].dtb_queue_info[queue_id].queue_id = queue_id; + g_dpp_dma_info[slot_id].dtb_queue_info[queue_id].dma_vir_addr = + ZXIC_COMM_PTR_TO_VAL(cpu_addr); + g_dpp_dma_info[slot_id].dtb_queue_info[queue_id].dma_phy_addr = + dma_handle; + g_dpp_dma_info[slot_id].dtb_queue_info[queue_id].dma_size = size; + + ZXIC_COMM_TRACE_NOTICE( + "[%s]:slot %d queue %d kernel phy addr :0x%016llx !\n", + __FUNCTION__, slot_id, queue_id, dma_handle); + ZXIC_COMM_TRACE_NOTICE( + "[%s]:slot %d queue %d kernel vir addr :0x%016llx !\n", + __FUNCTION__, slot_id, queue_id, + ZXIC_COMM_PTR_TO_VAL(cpu_addr)); + + dpp_dtb_queue_dma_flag_set(slot_id, queue_id); + dpp_dtb_queue_dma_flag_get(slot_id, queue_id); + + return DPP_OK; +} + +ZXIC_SINT32 dpp_dtb_queue_dma_mem_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + DTB_QUEUE_DMA_ADDR_INFO *dmaAddrInfo) +{ + ZXIC_UINT32 slot_id = 0; + + ZXIC_COMM_CHECK_POINT(dev); + + slot_id = (ZXIC_UINT32)DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_INDEX(slot_id, 0, DPP_DEV_SLOT_MAX - 1); + + if (queue_id > (DPP_DTB_QUEUE_NUM_MAX - 1)) { + ZXIC_COMM_PRINT("[dpp_dtb_queue_dma_mem_get]:queue id max.\n"); + return -1; + } + + if (g_dpp_dma_info[slot_id].dtb_queue_info[queue_id].queue_id != + queue_id) { + ZXIC_COMM_PRINT( + "[dpp_dtb_queue_dma_mem_get]:slot %d queue %d error !\n", + slot_id, queue_id); + return -1; + } + dmaAddrInfo->dma_phy_addr = + g_dpp_dma_info[slot_id].dtb_queue_info[queue_id].dma_phy_addr; + dmaAddrInfo->dma_vir_addr = + g_dpp_dma_info[slot_id].dtb_queue_info[queue_id].dma_vir_addr; + dmaAddrInfo->dma_size = + g_dpp_dma_info[slot_id].dtb_queue_info[queue_id].dma_size; + dmaAddrInfo->queue_id = queue_id; + dmaAddrInfo->slot_id = slot_id; + + return DPP_OK; +} + +/*dtb队列dma内存释放,通过传入dtb队列号,在维护的队列地址中找到对应的虚拟地址和物理地址,进行释放*/ +ZXIC_SINT32 dpp_dtb_queue_dma_mem_release(DPP_DEV_T *dev, ZXIC_UINT32 queue_id) +{ + dma_addr_t dma_handle = 0; + ZXIC_VOID *cpu_addr = NULL; + ZXIC_UINT32 dma_size = 0; + ZXIC_UINT32 slot_id = 0; + + ZXIC_COMM_CHECK_POINT(dev); + + slot_id = (ZXIC_UINT32)DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_INDEX(slot_id, 0, DPP_DEV_SLOT_MAX - 1); + + if (queue_id > (DPP_DTB_QUEUE_NUM_MAX - 1)) { + ZXIC_COMM_PRINT("[dpp_dtb_dma_mem_release]:queue id max.\n"); + return -1; + } + + if (g_dpp_dma_info[slot_id].dtb_queue_info[queue_id].queue_id != + queue_id) { + ZXIC_COMM_PRINT( + "[dpp_dtb_dma_mem_release]:slot %d queue %d error !\n", + slot_id, queue_id); + return -1; + } + + dma_handle = + g_dpp_dma_info[slot_id].dtb_queue_info[queue_id].dma_phy_addr; + cpu_addr = ZXIC_COMM_VAL_TO_PTR( + g_dpp_dma_info[slot_id].dtb_queue_info[queue_id].dma_vir_addr); + dma_size = g_dpp_dma_info[slot_id].dtb_queue_info[queue_id].dma_size; + + if (!dma_handle) { + return -EFAULT; + } + + dma_free_coherent(&(DEV_PCIE_DEV(dev)->dev), dma_size, cpu_addr, + dma_handle); + + ZXIC_COMM_PRINT( + "[dpp_dtb_dma_mem_release]:slot %d queue %d release success!\n", + slot_id, queue_id); + + g_dpp_dma_info[slot_id].dtb_queue_info[queue_id].slot_id = 0; + g_dpp_dma_info[slot_id].dtb_queue_info[queue_id].queue_id = 0; + g_dpp_dma_info[slot_id].dtb_queue_info[queue_id].dma_phy_addr = 0; + g_dpp_dma_info[slot_id].dtb_queue_info[queue_id].dma_vir_addr = 0; + g_dpp_dma_info[slot_id].dtb_queue_info[queue_id].dma_size = 0; + + dpp_dtb_queue_dma_flag_clear(slot_id, queue_id); + dpp_dtb_queue_dma_flag_get(slot_id, queue_id); + + return 0; +} + +ZXIC_SINT32 dtb_sdt_dump_dma_alloc(DPP_DEV_T *dev, ZXIC_UINT32 dma_size, + ZXIC_UINT64 *p_dma_phy_addr, + ZXIC_UINT64 *p_dma_vir_addr) +{ + int rc = 0; + + dma_addr_t dma_handle; + ZXIC_VOID *cpu_addr = NULL; + + cpu_addr = dma_alloc_coherent(&(DEV_PCIE_DEV(dev)->dev), dma_size, + &dma_handle, GFP_KERNEL); + + if (!cpu_addr) { + dev_err(&(DEV_PCIE_DEV(dev)->dev), + "dtb_sdt_dump_dma_alloc buff allocation failed\n"); + return -ENOMEM; + } + + *p_dma_phy_addr = (ZXIC_UINT64)dma_handle; + *p_dma_vir_addr = (ZXIC_UINT64)(ZXIC_COMM_PTR_TO_VAL(cpu_addr)); + + return rc; +} + +ZXIC_SINT32 dtb_sdt_dump_dma_release(DPP_DEV_T *dev, ZXIC_UINT32 dma_size, + ZXIC_UINT64 dma_phy_addr, + ZXIC_UINT64 dma_vir_addr) +{ + dma_addr_t dma_handle = 0; + ZXIC_VOID *cpu_addr = NULL; + + dma_handle = (dma_addr_t)dma_phy_addr; + cpu_addr = ZXIC_COMM_VAL_TO_PTR(dma_vir_addr); + + if (!dma_handle) { + return -EFAULT; + } + + dma_free_coherent(&(DEV_PCIE_DEV(dev)->dev), dma_size, cpu_addr, + dma_handle); + + return 0; +} diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/Kbuild.include new file mode 100644 index 000000000000..9b841e1374b7 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/Kbuild.include @@ -0,0 +1,4 @@ +cur_dir := en_np/sdk/source/dev/module/ +subdirs := se/ table/ dma/ se_apt/ ppu/ tm/ nppu/ +src_files += +include $(foreach subdir, $(subdirs), $(dinghai_root)/$(cur_dir)$(subdir)/Kbuild.include) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/dma/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/dma/Kbuild.include new file mode 100644 index 000000000000..632e1f9bbc6e --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/dma/Kbuild.include @@ -0,0 +1,2 @@ +cur_dir := en_np/sdk/source/dev/module/dma/ +src_files += $(addprefix $(cur_dir),$(notdir $(wildcard $(dinghai_root)/$(cur_dir)*.c))) diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/dma/dpp_dtb.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/dma/dpp_dtb.c new file mode 100644 index 000000000000..218109ae7d2d --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/dma/dpp_dtb.c @@ -0,0 +1,2453 @@ +/************************************************************** +* 版权所有(C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_dtb.c +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : zab +* 完成日期 : 2022/08/26 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ +#include "zxic_common.h" +#include "dpp_dev.h" +#include "dpp_type_api.h" +#include "dpp_dtb_cfg.h" +#include "dpp_dtb.h" +#include "dpp_dtb_table.h" +#include "dpp_hash.h" +#include "dpp_dtb_reg.h" +#include "dpp_reg_api.h" +#include "dpp_reg_info.h" +#include "dpp_se_api.h" + +#if ZXIC_REAL("DTB") + +ZXIC_CHAR *g_dpp_dtb_name[] = { + "DOWN TAB", + "UP TAB", +}; + +DPP_DTB_MGR_T *p_dpp_dtb_mgr[DPP_PCIE_SLOT_MAX][DPP_DEV_CHANNEL_MAX] = { + { NULL } +}; + +/**dtb超时时间 单位us*/ +static ZXIC_UINT32 g_dtb_down_overtime = 2 * 1000; +static ZXIC_UINT32 g_dtb_dump_overtime = 5 * 1000 * 1000; + +/*配置dtb调试函数*/ +static ZXIC_UINT32 g_dtb_debug_fun_en; +static ZXIC_UINT32 g_dtb_print_en; +static ZXIC_UINT32 g_dtb_soft_perf_test; +static ZXIC_UINT32 g_dtb_hardware_perf_test; + +/*dtb下表/dump使能标记*/ +static ZXIC_UINT32 g_dtb_func_switch_en = 1; +ZXIC_UINT32 dtb_table_function_switch_get(ZXIC_VOID) +{ + return g_dtb_func_switch_en; +} + +ZXIC_UINT32 dtb_table_function_switch_enable(ZXIC_VOID) +{ + g_dtb_func_switch_en = 1; + return 0; +} +EXPORT_SYMBOL(dtb_table_function_switch_enable); + +ZXIC_UINT32 dtb_table_function_switch_disable(ZXIC_VOID) +{ + g_dtb_func_switch_en = 0; + return 0; +} +EXPORT_SYMBOL(dtb_table_function_switch_disable); + +/**使能dtb调试函数*/ +ZXIC_UINT32 dpp_dtb_debug_fun_enable(ZXIC_VOID) +{ + g_dtb_debug_fun_en = 1; + return 0; +} + +/**去使能dtb调试函数*/ +ZXIC_UINT32 dpp_dtb_debug_fun_disable(ZXIC_VOID) +{ + g_dtb_debug_fun_en = 0; + return 0; +} + +/**获取dtb调试函数*/ +ZXIC_UINT32 dpp_dtb_debug_fun_get(ZXIC_VOID) +{ + return g_dtb_debug_fun_en; +} + +/**使能dtb打印函数*/ +ZXIC_UINT32 dpp_dtb_prt_enable(ZXIC_VOID) +{ + g_dtb_print_en = 1; + return 0; +} + +/**去使能dtb打印函数*/ +ZXIC_UINT32 dpp_dtb_prt_disable(ZXIC_VOID) +{ + g_dtb_print_en = 0; + return 0; +} + +/**获取dtb打印函数*/ +ZXIC_UINT32 dpp_dtb_prt_get(ZXIC_VOID) +{ + return g_dtb_print_en; +} + +ZXIC_UINT32 dpp_dtb_soft_perf_test_set(ZXIC_UINT32 value) +{ + g_dtb_soft_perf_test = value; + return 0; +} + +ZXIC_UINT32 dpp_dtb_soft_perf_test_get(ZXIC_VOID) +{ + return g_dtb_soft_perf_test; +} + +ZXIC_UINT32 dpp_dtb_hardware_perf_test_set(ZXIC_UINT32 value) +{ + g_dtb_hardware_perf_test = value; + return 0; +} + +ZXIC_UINT32 dpp_dtb_hardware_perf_test_get(ZXIC_VOID) +{ + return g_dtb_hardware_perf_test; +} + +ZXIC_UINT32 dpp_dtb_down_table_overtime_set(ZXIC_UINT32 times_s) +{ + g_dtb_down_overtime = times_s; + return 0; +} + +ZXIC_UINT32 dpp_dtb_down_table_overtime_get(ZXIC_VOID) +{ + return g_dtb_down_overtime; +} + +ZXIC_UINT32 dpp_dtb_dump_table_overtime_set(ZXIC_UINT32 times_s) +{ + g_dtb_dump_overtime = times_s; + return 0; +} + +ZXIC_UINT32 dpp_dtb_dump_table_overtime_get(ZXIC_VOID) +{ + return g_dtb_dump_overtime; +} + +#if ZXIC_REAL("MGR") +/***********************************************************/ +/** 创建DTB的管理结构 +* @param dev_id 设备号,支持多芯片 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_mgr_create(ZXIC_UINT32 slot_id, ZXIC_UINT32 dev_id) +{ + ZXIC_COMM_CHECK_INDEX(slot_id, 0, DPP_PCIE_SLOT_MAX - 1); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + + if (p_dpp_dtb_mgr[slot_id][dev_id] != ZXIC_NULL) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "slot %d ErrorCode[0x%x]: DTB Manager is not exist!!!\n", + slot_id, DPP_RC_DTB_MGR_NOT_EXIST); + return DPP_RC_DTB_MGR_EXIST; + } + + p_dpp_dtb_mgr[slot_id][dev_id] = + (DPP_DTB_MGR_T *)ZXIC_COMM_MALLOC(sizeof(DPP_DTB_MGR_T)); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dpp_dtb_mgr[slot_id][dev_id]); + + ZXIC_COMM_MEMSET(p_dpp_dtb_mgr[slot_id][dev_id], 0, + sizeof(DPP_DTB_MGR_T)); + + ZXIC_COMM_TRACE_NOTICE("dpp_dtb_mgr_create:slot %d dev %d done!!!", + slot_id, dev_id); + + return DPP_OK; +} + +/***********************************************************/ +/** 注销DTB的管理结构 +* @param dev_id 设备号,支持多芯片 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_mgr_destory_all(ZXIC_VOID) +{ + ZXIC_UINT32 slot_id = 0; + ZXIC_UINT32 dev_id = 0; + + for (slot_id = 0; slot_id < DPP_DEV_SLOT_MAX; slot_id++) { + for (dev_id = 0; dev_id < DPP_DEV_CHANNEL_MAX; dev_id++) { + dpp_dtb_mgr_destory(slot_id, dev_id); + } + } + + return DPP_OK; +} + +ZXIC_UINT32 dpp_dtb_mgr_destory(ZXIC_UINT32 slot_id, ZXIC_UINT32 dev_id) +{ + ZXIC_COMM_CHECK_INDEX(slot_id, 0, DPP_PCIE_SLOT_MAX - 1); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + + if (p_dpp_dtb_mgr[slot_id][dev_id] != ZXIC_NULL) { + ZXIC_COMM_FREE(p_dpp_dtb_mgr[slot_id][dev_id]); + p_dpp_dtb_mgr[slot_id][dev_id] = ZXIC_NULL; + ZXIC_COMM_TRACE_NOTICE( + "dpp_dtb_mgr_destory:slot %d dev %d done!!!", slot_id, + dev_id); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 重置DTB管理结构 +* @param dev_id 设备号,支持多芯片 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_mgr_reset(ZXIC_UINT32 slot_id, ZXIC_UINT32 dev_id) +{ + ZXIC_COMM_CHECK_INDEX(slot_id, 0, DPP_PCIE_SLOT_MAX - 1); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + + if (p_dpp_dtb_mgr[slot_id][dev_id] == ZXIC_NULL) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "slot %d ErrorCode[0x%x]: dtb manager is not exist!!!\n", + slot_id, DPP_RC_DTB_MGR_NOT_EXIST); + return DPP_RC_DTB_MGR_NOT_EXIST; + } + + ZXIC_COMM_MEMSET(p_dpp_dtb_mgr[slot_id][dev_id], 0, + sizeof(DPP_DTB_MGR_T)); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取DMA管理结构 +* @param dev_id 设备号,支持多芯片 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +DPP_DTB_MGR_T *dpp_dtb_mgr_get(ZXIC_UINT32 slot_id, ZXIC_UINT32 dev_id) +{ + if ((slot_id >= DPP_PCIE_SLOT_MAX) || (dev_id >= DPP_DEV_CHANNEL_MAX)) { + return ZXIC_NULL; + } else { + return p_dpp_dtb_mgr[slot_id][dev_id]; + } +} +#endif + +#if ZXIC_REAL("QUEUE_ADDR") +/***********************************************************/ +/** 获得下表队列中某元素的物理地址 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列号0-127 +* @param element_id 队列中元素号0-31 +* @param p_element_start_addr_h 元素起始高32位地址 +* @param p_element_start_addr_l 元素起始低32位地址 +* @param p_element_table_addr_h 下表内容开始高32位地址 +* @param p_element_table_addr_l 下表内容开始低32位地址 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 +dpp_dtb_down_table_elemet_addr_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 element_id, + ZXIC_UINT32 *p_element_start_addr_h, + ZXIC_UINT32 *p_element_start_addr_l, + ZXIC_UINT32 *p_element_table_addr_h, + ZXIC_UINT32 *p_element_table_addr_l) +{ + ZXIC_UINT32 addr_h = 0; + ZXIC_UINT32 addr_l = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), element_id, 0, + DPP_DTB_QUEUE_ITEM_NUM_MAX - 1); + + addr_h = (DPP_DTB_TAB_DOWN_PHY_ADDR_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id, element_id) >> + 32) & + 0xffffffff; + addr_l = DPP_DTB_TAB_DOWN_PHY_ADDR_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id, element_id) & + 0xffffffff; + + *p_element_start_addr_h = addr_h; + *p_element_start_addr_l = addr_l; + + addr_h = + ((DPP_DTB_TAB_DOWN_PHY_ADDR_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id, element_id) + + DPP_DTB_ITEM_ACK_SIZE) >> + 32) & + 0xffffffff; + addr_l = (DPP_DTB_TAB_DOWN_PHY_ADDR_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id, element_id) + + DPP_DTB_ITEM_ACK_SIZE) & + 0xffffffff; + + *p_element_table_addr_h = addr_h; + *p_element_table_addr_l = addr_l; + + return DPP_OK; +} + +/***********************************************************/ +/** 获得队列中某元素dump的物理地址 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列号0-127 +* @param element_id 队列中元素号0-31 +* @param p_element_start_addr_h 元素起始高32位地址 +* @param p_element_start_addr_l 元素起始低32位地址 +* @param p_element_dump_addr_h dump描述符开始高32位地址 +* @param p_element_dump_addr_l dump描述符开始低32位地址 +* @param p_element_table_info_addr_h 表内容开始高32位地址 +* @param p_element_table_info_addr_l 表内容开始低32位地址 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_dump_table_elemet_addr_get( + DPP_DEV_T *dev, ZXIC_UINT32 queue_id, ZXIC_UINT32 element_id, + ZXIC_UINT32 *p_element_start_addr_h, + ZXIC_UINT32 *p_element_start_addr_l, ZXIC_UINT32 *p_element_dump_addr_h, + ZXIC_UINT32 *p_element_dump_addr_l, + ZXIC_UINT32 *p_element_table_info_addr_h, + ZXIC_UINT32 *p_element_table_info_addr_l) +{ + ZXIC_UINT32 rc = 0; + ZXIC_UINT32 addr_h = 0; + ZXIC_UINT32 addr_l = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), element_id, 0, + DPP_DTB_QUEUE_ITEM_NUM_MAX - 1); + + addr_h = ((DPP_DTB_TAB_UP_PHY_ADDR_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id, element_id)) >> + 32) & + 0xffffffff; + addr_l = (DPP_DTB_TAB_UP_PHY_ADDR_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id, element_id)) & + 0xffffffff; + + *p_element_start_addr_h = addr_h; + *p_element_start_addr_l = addr_l; + + addr_h = ((DPP_DTB_TAB_UP_PHY_ADDR_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id, element_id) + + DPP_DTB_ITEM_ACK_SIZE) >> + 32) & + 0xffffffff; + addr_l = (DPP_DTB_TAB_UP_PHY_ADDR_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id, element_id) + + DPP_DTB_ITEM_ACK_SIZE) & + 0xffffffff; + + *p_element_dump_addr_h = addr_h; + *p_element_dump_addr_l = addr_l; + + rc = dpp_dtb_tab_up_item_addr_get(dev, queue_id, element_id, + p_element_table_info_addr_h, + p_element_table_info_addr_l); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_tab_up_item_addr_get"); + + return DPP_OK; +} + +#endif + +#if ZXIC_REAL("MEM_RW") +/***********************************************************/ +/** 内存32bits写入函数 +* @param dev_id 设备号,支持多芯片 +* @param addr 地址偏移 +* @param data 写入数据 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_wr32(DPP_DEV_T *dev, ZXIC_ADDR_T addr, ZXIC_UINT32 data) +{ + ZXIC_UINT32 value = data; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), data, 0, ZXIC_UINT32_MAX); + + if (!zxic_comm_is_big_endian()) { + value = ZXIC_COMM_CONVERT32(value); + } + + *((ZXIC_VOL ZXIC_UINT32 *)(addr)) = value; + + return DPP_OK; +} + +/***********************************************************/ +/** 内存32bits读取函数 +* @param dev_id 设备号,支持多芯片 +* @param addr 读取地址 +* @param p_data 读取数据 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_rd32(DPP_DEV_T *dev, ZXIC_ADDR_T addr, ZXIC_UINT32 *p_data) +{ + ZXIC_UINT32 value = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + + // dpp_flush_cache();//片内DDR使用 + + value = *((ZXIC_VOL ZXIC_UINT32 *)(addr)); + + if (!zxic_comm_is_big_endian()) { + value = ZXIC_COMM_CONVERT32(value); + } + + *p_data = value; + + return DPP_OK; +} +#endif + +#if ZXIC_REAL("ACK_RW") +/***********************************************************/ +/** 读取BD表条目信息 +* @param dev_id 芯片的id号 +* @param queue_id 队列号,范围0-127 +* @param dir_flag 方向,1-上送表项,0-下发表项 +* @param index 条目索引,范围0-31 +* @param pos 一个item里面的4个32位,pos对应的是第几个ZXICP_WORD32, +* 取值为0,1,2,3 +* @param p_data 读取的数据,大端格式 +* +* @return +* @remark 无 +* @see +* @author zab @date 2019/11/2 +************************************************************/ +ZXIC_UINT32 dpp_dtb_item_ack_rd(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 dir_flag, ZXIC_UINT32 index, + ZXIC_UINT32 pos, ZXIC_UINT32 *p_data) +{ + ZXIC_UINT32 rc = 0; + ZXIC_ADDR_T addr = 0; + ZXIC_UINT32 val = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), dir_flag, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), index, 0, + DPP_DTB_QUEUE_ITEM_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), pos, 0, 3); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + + if (DPP_DTB_QUEUE_INIT_FLAG_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id) == 0) { + ZXIC_COMM_TRACE_ERROR("dtb slot %d queue %d is not init.\n", + DEV_PCIE_SLOT(dev), queue_id); + return DPP_RC_DTB_QUEUE_IS_NOT_INIT; + } + + if (dir_flag == DPP_DTB_DIR_UP_TYPE) { + addr = DPP_DTB_TAB_UP_VIR_ADDR_GET(DEV_PCIE_SLOT(dev), + DEV_ID(dev), queue_id, + index) + + pos * 4; + } else { + addr = DPP_DTB_TAB_DOWN_VIR_ADDR_GET(DEV_PCIE_SLOT(dev), + DEV_ID(dev), queue_id, + index) + + pos * 4; + } + + rc = dpp_dtb_rd32(dev, addr, &val); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_rd32"); + + *p_data = val; + + return DPP_OK; +} + +/***********************************************************/ +/** 向BD表条目指定位置写入值 +* @param dev_id 芯片的id号 +* @param queue_id 队列号,范围0-127 +* @param dir_flag 方向,1-上送表项,0-下发表项 +* @param index 条目索引,范围0-31 +* @param pos 一个item里面的4个32位,pos对应的是第几个ZXICP_WORD32, +* 取值为0,1,2,3 +* @param data 读取的数据,大端格式 +* +* @return +* @remark 无 +* @see +* @author zab @date 2019/11/2 +************************************************************/ +ZXIC_UINT32 dpp_dtb_item_ack_wr(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 dir_flag, ZXIC_UINT32 index, + ZXIC_UINT32 pos, ZXIC_UINT32 data) +{ + ZXIC_UINT32 rc = 0; + ZXIC_ADDR_T addr = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), dir_flag, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), index, 0, + DPP_DTB_QUEUE_ITEM_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), pos, 0, 3); + + if (DPP_DTB_QUEUE_INIT_FLAG_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id) == 0) { + ZXIC_COMM_TRACE_ERROR("dtb slot %d queue %d is not init.\n", + DEV_PCIE_SLOT(dev), queue_id); + return DPP_RC_DTB_QUEUE_IS_NOT_INIT; + } + + if (dir_flag == DPP_DTB_DIR_UP_TYPE) { + addr = DPP_DTB_TAB_UP_VIR_ADDR_GET(DEV_PCIE_SLOT(dev), + DEV_ID(dev), queue_id, + index) + + pos * 4; + } else { + addr = DPP_DTB_TAB_DOWN_VIR_ADDR_GET(DEV_PCIE_SLOT(dev), + DEV_ID(dev), queue_id, + index) + + pos * 4; + } + + rc = dpp_dtb_wr32(dev, addr, data); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_wr32"); + + return DPP_OK; +} + +/***********************************************************/ +/** 打印队列指定条目ACK信息 +* @param dev_id 芯片的id号 +* @param queue_id 队列号,范围0-127 +* @param dir_flag 方向,0-down,1-up +* @param index 条目索引 +* +* @return +* @remark 无 +* @see +* @author zab @date 2022/08/30 +************************************************************/ +ZXIC_UINT32 dpp_dtb_item_ack_prt(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 dir_flag, ZXIC_UINT32 index) +{ + ZXIC_UINT32 rc = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 ack_data[4] = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), dir_flag, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), index, 0, + DPP_DTB_QUEUE_ITEM_NUM_MAX - 1); + + for (i = 0; i < DPP_DTB_ITEM_ACK_SIZE / 4; i++) { + rc = dpp_dtb_item_ack_rd(dev, queue_id, dir_flag, index, i, + ack_data + i); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_dtb_item_ack_rd"); + } + + ZXIC_COMM_PRINT("\n=====> [%s] BD INFO:", g_dpp_dtb_name[dir_flag]); + ZXIC_COMM_PRINT("\n[ index : %u] : 0x%08x 0x%08x 0x%08x 0x%08x \n", + index, ack_data[0], ack_data[1], ack_data[2], + ack_data[3]); + + return DPP_OK; +} + +/***********************************************************/ +/** 打印队列指定条目BUFF的数据 +* @param dev_id 芯片的id号 +* @param queue_id 队列号,范围0-127 +* @param dir_flag 方向,0-down,1-up +* @param index 条目索引 +* @param len 读取数据长度 4字节为单位 +* +* @return +* @remark 无 +* @see +* @author zab @date 2022/08/30 +************************************************************/ +ZXIC_UINT32 dpp_dtb_item_buff_prt(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 dir_flag, ZXIC_UINT32 index, + ZXIC_UINT32 len) +{ + ZXIC_UINT32 rc = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + ZXIC_UINT32 *p_item_buff = ZXIC_NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), dir_flag, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), index, 0, + DPP_DTB_QUEUE_ITEM_NUM_MAX - 1); + + p_item_buff = ZXIC_COMM_MALLOC((len * sizeof(ZXIC_UINT32)) % + ZXIC_COMM_WORD32_MASK); + if (p_item_buff == ZXIC_NULL) { + ZXIC_COMM_PRINT("Alloc dtb item buffer faild!!!\n"); + return DPP_RC_DTB_MEMORY_ALLOC_ERR; + } + + ZXIC_COMM_MEMSET(p_item_buff, 0, len * sizeof(ZXIC_UINT32)); + + rc = dpp_dtb_item_buff_rd(dev, queue_id, dir_flag, index, 0, len, + p_item_buff); + ZXIC_COMM_CHECK_DEV_RC_MEMORY_FREE(DEV_ID(dev), rc, + "dpp_dtb_item_buff_rd", p_item_buff); + + ZXIC_COMM_PRINT("\n=====> [%s] BUFF INFO:", g_dpp_dtb_name[dir_flag]); + for (i = 0, j = 0; i < len; i++, j++) { + if (j % 4 == 0) { + ZXIC_COMM_PRINT("\n0x%08x ", (*(p_item_buff + i))); + } else { + ZXIC_COMM_PRINT("0x%08x ", (*(p_item_buff + i))); + } + } + ZXIC_COMM_PRINT("\n"); + + ZXIC_COMM_FREE(p_item_buff); + + return DPP_OK; +} + +#endif + +#if ZXIC_REAL("BUFF_RW") + +/***********************************************************/ +/** 读取dtb条目指向BUFF的数据 +* @param dev_id 芯片的id号 +* @param queue_id 队列号,范围0-127 +* @param dir_flag 方向,1-上送表项,0-下发表项 +* @param index 条目索引,范围0-31 +* @param pos 相对BUFF起始地址的偏移,单位32bit; +* @param p_data 读取的数据,大端格式 +* @param len 读取数据长度,单位32bit; +* +* @return +* @remark 无 +* @see +* @author zab @date 2019/11/2 +************************************************************/ +ZXIC_UINT32 dpp_dtb_item_buff_rd(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 dir_flag, ZXIC_UINT32 index, + ZXIC_UINT32 pos, ZXIC_UINT32 len, + ZXIC_UINT32 *p_data) +{ + ZXIC_ADDR_T addr = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), dir_flag, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), index, 0, + DPP_DTB_QUEUE_ITEM_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), pos, 0, 3); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + + if (DPP_DTB_QUEUE_INIT_FLAG_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id) == 0) { + ZXIC_COMM_TRACE_ERROR("dtb slot %d queue %d is not init.\n", + DEV_PCIE_SLOT(dev), queue_id); + return DPP_RC_DTB_QUEUE_IS_NOT_INIT; + } + + if (dir_flag == DPP_DTB_DIR_UP_TYPE) { + if (DPP_DTB_TAB_UP_USER_PHY_ADDR_FLAG_GET( + DEV_PCIE_SLOT(dev), DEV_ID(dev), queue_id, index) == + DPP_DTB_TAB_UP_USER_ADDR_TYPE) { + addr = DPP_DTB_TAB_UP_USER_VIR_ADDR_GET( + DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id, index) + + pos * 4; + DPP_DTB_TAB_UP_USER_ADDR_FLAG_SET(DEV_PCIE_SLOT(dev), + DEV_ID(dev), queue_id, + index, + 0); //为什么设置为0? + } else { + addr = DPP_DTB_TAB_UP_VIR_ADDR_GET(DEV_PCIE_SLOT(dev), + DEV_ID(dev), + queue_id, index) + + DPP_DTB_ITEM_ACK_SIZE + pos * 4; + } + } else { + addr = DPP_DTB_TAB_DOWN_VIR_ADDR_GET(DEV_PCIE_SLOT(dev), + DEV_ID(dev), queue_id, + index) + + DPP_DTB_ITEM_ACK_SIZE + pos * 4; + } + + ZXIC_COMM_MEMCPY_S(p_data, len * 4, (ZXIC_UINT8 *)(addr), len * 4); + + zxic_comm_swap((ZXIC_UINT8 *)p_data, len * 4); + + return DPP_OK; +} + +/***********************************************************/ +/** 向BD表条目指向的BUFF指定位置写入值 +* @param dev_id 芯片的id号 +* @param queue_id 队列号,范围0-127 +* @param dir_flag 方向,1-上送表项,0-下发表项 +* @param index 条目索引,范围0-31 +* @param pos 相对BUFF起始地址的偏移,单位32bit; +* @param p_data 读取的数据,大端格式 +* @param len 写入数据长度,单位32bit; +* +* @return +* @remark 无 +* @see +* @author zab @date 2019/11/2 +************************************************************/ +ZXIC_UINT32 dpp_dtb_item_buff_wr(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 dir_flag, ZXIC_UINT32 index, + ZXIC_UINT32 pos, ZXIC_UINT32 len, + ZXIC_UINT32 *p_data) +{ + ZXIC_ADDR_T addr = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), dir_flag, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), index, 0, + DPP_DTB_QUEUE_ITEM_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), pos, 0, 3); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + + if (DPP_DTB_QUEUE_INIT_FLAG_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id) == 0) { + ZXIC_COMM_TRACE_ERROR("dtb slot %d queue %d is not init.\n", + DEV_PCIE_SLOT(dev), queue_id); + return DPP_RC_DTB_QUEUE_IS_NOT_INIT; + } + + if (dir_flag == DPP_DTB_DIR_UP_TYPE) { + addr = DPP_DTB_TAB_UP_VIR_ADDR_GET(DEV_PCIE_SLOT(dev), + DEV_ID(dev), queue_id, + index) + + DPP_DTB_ITEM_ACK_SIZE + pos * 4; + } else { + addr = DPP_DTB_TAB_DOWN_VIR_ADDR_GET(DEV_PCIE_SLOT(dev), + DEV_ID(dev), queue_id, + index) + + DPP_DTB_ITEM_ACK_SIZE + pos * 4; + } + + ZXIC_COMM_MEMCPY_S((ZXIC_UINT8 *)(addr), len * 4, p_data, len * 4); + + // dpp_flush_cache();//片内ddr使用 + + return DPP_OK; +} +#endif + +#if ZXIC_REAL("API") +#if ZXIC_REAL("TAB_DOWN") + +/** dtb info print*/ +ZXIC_UINT32 dpp_dtb_info_print(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 item_index, + DPP_DTB_QUEUE_ITEM_INFO_T *item_info) +{ + ZXIC_ADDR_T element_start_addr = 0; + ZXIC_ADDR_T ack_start_addr = 0; + ZXIC_ADDR_T data_addr = 0; + ZXIC_UINT32 i = 0; + + ZXIC_COMM_CHECK_POINT(dev); + + ZXIC_COMM_PRINT( + "dpp_dtb_info_print: slot %d queue: %d, element:%d, %s table info is:\n", + DEV_PCIE_SLOT(dev), queue_id, item_index, + (item_info->cmd_type) ? "up" : "down"); + ZXIC_COMM_PRINT("cmd_vld : %d\n", item_info->cmd_vld); + ZXIC_COMM_PRINT("cmd_type : %s\n", + (item_info->cmd_type) ? "up" : "down"); + ZXIC_COMM_PRINT("int_en : %d\n", item_info->int_en); + ZXIC_COMM_PRINT("data_len : %d\n", item_info->data_len); + ZXIC_COMM_PRINT("data_hddr : 0x%08x\n", item_info->data_hddr); + ZXIC_COMM_PRINT("data_laddr : 0x%08x\n", item_info->data_laddr); + + if (item_info->cmd_type == DPP_DTB_DIR_UP_TYPE) { + if (DPP_DTB_TAB_UP_USER_PHY_ADDR_FLAG_GET( + DEV_PCIE_SLOT(dev), DEV_ID(dev), queue_id, + item_index) == DPP_DTB_TAB_UP_USER_ADDR_TYPE) { + ack_start_addr = DPP_DTB_TAB_UP_USER_VIR_ADDR_GET( + DEV_PCIE_SLOT(dev), DEV_ID(dev), queue_id, + item_index); + } + ack_start_addr = DPP_DTB_TAB_UP_VIR_ADDR_GET( + DEV_PCIE_SLOT(dev), DEV_ID(dev), queue_id, item_index); + element_start_addr = DPP_DTB_TAB_UP_VIR_ADDR_GET( + DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id, item_index) + + DPP_DTB_ITEM_ACK_SIZE; + } else { + ack_start_addr = DPP_DTB_TAB_DOWN_VIR_ADDR_GET( + DEV_PCIE_SLOT(dev), DEV_ID(dev), queue_id, item_index); + element_start_addr = DPP_DTB_TAB_DOWN_VIR_ADDR_GET( + DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id, item_index) + + DPP_DTB_ITEM_ACK_SIZE; + } + ZXIC_COMM_PRINT("dtb data:\n"); + + ZXIC_COMM_PRINT( + "ack info: 0x%08x 0x%08x 0x%08x 0x%08x \n", + ZXIC_COMM_CONVERT32(*((ZXIC_UINT32 *)(ack_start_addr + 4 * 0))), + ZXIC_COMM_CONVERT32(*((ZXIC_UINT32 *)(ack_start_addr + 4 * 1))), + ZXIC_COMM_CONVERT32(*((ZXIC_UINT32 *)(ack_start_addr + 4 * 2))), + ZXIC_COMM_CONVERT32( + *((ZXIC_UINT32 *)(ack_start_addr + 4 * 3)))); + + for (i = 0; i < item_info->data_len; i++) { + data_addr = element_start_addr + 16 * i; //16字节为一行 + + ZXIC_COMM_PRINT("row_%d:", i); + ZXIC_COMM_PRINT("0x%08x 0x%08x 0x%08x 0x%08x ", + ZXIC_COMM_CONVERT32( + *((ZXIC_UINT32 *)(data_addr + 4 * 0))), + ZXIC_COMM_CONVERT32( + *((ZXIC_UINT32 *)(data_addr + 4 * 1))), + ZXIC_COMM_CONVERT32( + *((ZXIC_UINT32 *)(data_addr + 4 * 2))), + ZXIC_COMM_CONVERT32( + *((ZXIC_UINT32 *)(data_addr + 4 * 3)))); + + ZXIC_COMM_PRINT("\n"); + } + + ZXIC_COMM_PRINT("dpp dtb info print end.\n"); + return DPP_OK; +} + +/***********************************************************/ +/** 配置下发配置数据信息 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列号,范围0-31 +* @param int_flag 中断标志,0-无,1-有 +* @param data_len 数据长度,单位32bit; +* @param p_data 待下发数据 +* @param p_item_index 返回使用的条目编号 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_tab_down_info_set(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 int_flag, + ZXIC_UINT32 data_len, ZXIC_UINT32 *p_data, + ZXIC_UINT32 *p_item_index) +{ + ZXIC_UINT32 rc = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 queue_en = 0; + ZXIC_UINT32 ack_vale = 0; + ZXIC_UINT32 item_index = 0; + ZXIC_UINT32 unused_item_num = 0; + DPP_DTB_QUEUE_ITEM_INFO_T item_info = { 0 }; + ZXIC_MUTEX_T *p_mutex = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), int_flag, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), data_len, 4, 0xffc); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_item_index); + + rc = dpp_dev_dtb_opr_mutex_get(dev, DPP_DEV_MUTEX_T_DTB, queue_id, + &p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dev_dtb_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "zxic_comm_mutex_lock"); + + /* + * 流程 + * 0.检测队列是否使能 + * 1.检测当前队列是否已初始化; + * 2.获取硬件队列剩余情况,大于0正常下发,等于0返回失败; + * 3.获取软件缓存空闲情况; + * 4.将下表配置数据填入buff中; + * 5.将ack字段填入0x11111100; + * 6.将数据信息填入硬件触发寄存器中; + * + */ +#if 0 + if (dpp_dtb_mode_is_debug(dev)) { + ZXIC_COMM_TRACE_DEV_ERROR(DEV_ID(dev), "the queue %d is open debug mode!", queue_id); + rc = zxic_comm_mutex_unlock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "zxic_comm_mutex_unlock"); + return DPP_RC_DTB_OPEN_DEBUG_MODE; + } +#endif + + rc = dpp_dtb_queue_enable_get(dev, queue_id, &queue_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, "dpp_dtb_queue_enable_get", p_mutex); + if (!queue_en) { + ZXIC_COMM_TRACE_DEV_ERROR(DEV_ID(dev), + "the slot %d queue %d is not enable!", + DEV_PCIE_SLOT(dev), queue_id); + rc = zxic_comm_mutex_unlock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "zxic_comm_mutex_unlock"); + return DPP_RC_DTB_QUEUE_NOT_ENABLE; + } + + if (DPP_DTB_QUEUE_INIT_FLAG_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id) == 0) { + ZXIC_COMM_TRACE_ERROR("dtb slot %d queue %d is not init.\n", + DEV_PCIE_SLOT(dev), queue_id); + rc = zxic_comm_mutex_unlock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "zxic_comm_mutex_unlock"); + return DPP_RC_DTB_QUEUE_IS_NOT_INIT; + } + + if (data_len % 4 != 0) { + rc = zxic_comm_mutex_unlock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "zxic_comm_mutex_unlock"); + /* 硬件规定数据必须是16字节为单位 */ + return DPP_RC_DTB_PARA_INVALID; + } + + rc = dpp_dtb_queue_unused_item_num_get(dev, queue_id, &unused_item_num); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, "dpp_dtb_queue_unused_item_num_get", p_mutex); + + if (unused_item_num == 0) { + rc = zxic_comm_mutex_unlock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "zxic_comm_mutex_unlock"); + return DPP_RC_DTB_QUEUE_ITEM_HW_EMPTY; + } + + for (i = 0; i < DPP_DTB_QUEUE_ITEM_NUM_MAX; i++) { + item_index = DPP_DTB_TAB_DOWN_WR_INDEX_GET(DEV_PCIE_SLOT(dev), + DEV_ID(dev), + queue_id) % + DPP_DTB_QUEUE_ITEM_NUM_MAX; + + rc = dpp_dtb_item_ack_rd(dev, queue_id, DPP_DTB_DIR_DOWN_TYPE, + item_index, 0, &ack_vale); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, "dpp_dtb_item_ack_rd", p_mutex); + + DPP_DTB_TAB_DOWN_WR_INDEX_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id) + ++; + + if ((ack_vale >> 8) == DPP_DTB_TAB_ACK_UNUSED_MASK) { + break; + } + } + + if (i == DPP_DTB_QUEUE_ITEM_NUM_MAX) { + rc = zxic_comm_mutex_unlock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "zxic_comm_mutex_unlock"); + return DPP_RC_DTB_QUEUE_ITEM_SW_EMPTY; + } + + rc = dpp_dtb_item_buff_wr(dev, queue_id, DPP_DTB_DIR_DOWN_TYPE, + item_index, 0, data_len, p_data); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, "dpp_dtb_item_buff_wr", p_mutex); + + rc = dpp_dtb_item_ack_wr(dev, queue_id, DPP_DTB_DIR_DOWN_TYPE, + item_index, 0, DPP_DTB_TAB_ACK_IS_USING_MASK); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK(DEV_ID(dev), rc, + "dpp_dtb_item_ack_wr", p_mutex); + + item_info.cmd_vld = 1; + item_info.cmd_type = DPP_DTB_DIR_DOWN_TYPE; + item_info.int_en = int_flag; + item_info.data_len = data_len / 4; + item_info.data_hddr = + ((DPP_DTB_TAB_DOWN_PHY_ADDR_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id, item_index) >> + 4) >> + 32) & + 0xffffffff; + item_info.data_laddr = + (DPP_DTB_TAB_DOWN_PHY_ADDR_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id, item_index) >> + 4) & + 0xffffffff; + + if (item_info.data_len < DPP_DTB_LEN_MIN || + item_info.data_len > DPP_DTB_DOWN_LEN) { + ZXIC_COMM_PRINT("DTB DATA_LEN :0x%08x.\n", item_info.data_len); + rc = dpp_dtb_item_ack_wr(dev, queue_id, DPP_DTB_DIR_DOWN_TYPE, + item_index, 0, + DPP_DTB_TAB_ACK_UNUSED_MASK); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_item_ack_wr"); + rc = zxic_comm_mutex_unlock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "zxic_comm_mutex_unlock"); + return DPP_RC_DTB_PARA_INVALID; + } + + if (dpp_dtb_prt_get()) { + dpp_dtb_info_print(dev, queue_id, item_index, &item_info); + } + + if (dpp_dtb_soft_perf_test_get()) { + *p_item_index = item_index; + rc = zxic_comm_mutex_unlock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "zxic_comm_mutex_unlock"); + return DPP_OK; + } + + rc = dpp_dtb_queue_item_info_set(dev, queue_id, &item_info); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, "dpp_dtb_queue_item_info_set", p_mutex); + *p_item_index = item_index; + + rc = zxic_comm_mutex_unlock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "zxic_comm_mutex_unlock"); + + return DPP_OK; +} + +/***********************************************************/ +/** 打印下表队列中指定元素的地址、ACK及下表中前768bit的数据 +* @param dev_id 芯片的id号 +* @param queue_id 队列号,范围0-127 +* @param element_id 元素号,范围0-31 +* +* @return +* @remark 无 +* @see +* @author zab @date 2022/08/30 +************************************************************/ +ZXIC_UINT32 dpp_dtb_down_table_element_info_prt(DPP_DEV_T *dev, + ZXIC_UINT32 queue_id, + ZXIC_UINT32 element_id) +{ + ZXIC_UINT32 rc = 0; + + ZXIC_UINT32 element_start_addr_h = 0; + ZXIC_UINT32 element_start_addr_l = 0; + ZXIC_UINT32 element_table_addr_h = 0; + ZXIC_UINT32 element_table_addr_l = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), element_id, 0, + DPP_DTB_QUEUE_ITEM_NUM_MAX - 1); + + rc = dpp_dtb_down_table_elemet_addr_get(dev, queue_id, element_id, + &element_start_addr_h, + &element_start_addr_l, + &element_table_addr_h, + &element_table_addr_l); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_dtb_elemet_addr_get"); + + ZXIC_COMM_DBGCNT32_PRINT("slot_id", DEV_PCIE_SLOT(dev)); + ZXIC_COMM_DBGCNT32_PRINT("queue_id", queue_id); + ZXIC_COMM_DBGCNT32_PRINT("element_id", element_id); + ZXIC_COMM_DBGCNT32_PRINT("element_start_addr_h", element_start_addr_h); + ZXIC_COMM_DBGCNT32_PRINT("element_start_addr_l", element_start_addr_l); + ZXIC_COMM_DBGCNT32_PRINT("element_table_addr_h", element_table_addr_h); + ZXIC_COMM_DBGCNT32_PRINT("element_table_addr_l", element_table_addr_l); + + /*打印element ack*/ + rc = dpp_dtb_item_ack_prt(dev, queue_id, DPP_DTB_DIR_DOWN_TYPE, + element_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_dtb_item_ack_prt"); + + rc = dpp_dtb_item_buff_prt(dev, queue_id, DPP_DTB_DIR_DOWN_TYPE, + element_id, 24); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_dtb_item_buff_prt"); + + return DPP_OK; +} + +/***********************************************************/ +/** 一个元素down成功状态检查 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列号,范围0-31 +* @param element_id 条目编号 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_tab_down_success_status_check(DPP_DEV_T *dev, + ZXIC_UINT32 queue_id, + ZXIC_UINT32 element_id) +{ + ZXIC_UINT32 rc = 0; + ZXIC_UINT32 rd_cnt = 0; + ZXIC_UINT32 ack_value = 0; + ZXIC_UINT32 success_flag = 0; + ZXIC_UINT32 dtb_interrupt_status = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), element_id, 0, + DPP_DTB_QUEUE_ITEM_NUM_MAX - 1); + + dtb_interrupt_status = dpp_dtb_interrupt_status_get(); + + if (dpp_dtb_soft_perf_test_get() || dpp_dtb_hardware_perf_test_get()) { + rc = dpp_dtb_item_ack_wr(dev, queue_id, DPP_DTB_DIR_DOWN_TYPE, + element_id, 0, + DPP_DTB_TAB_ACK_UNUSED_MASK); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_item_ack_wr"); + return rc; + } + + if (dpp_dtb_debug_fun_get()) { + return DPP_OK; + } + + while (!success_flag) { + rc = dpp_dtb_item_ack_rd(dev, queue_id, DPP_DTB_DIR_DOWN_TYPE, + element_id, 0, &ack_value); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_item_ack_rd"); + + ZXIC_COMM_TRACE_DEBUG("dpp_dtb_item_ack_rd ack_value:0x%08x\n", + ack_value); + + if (((ack_value >> 8) & 0xffffff) == + DPP_DTB_TAB_DOWN_ACK_VLD_MASK) { + success_flag = 1; + break; + } + + if (rd_cnt > dpp_dtb_down_table_overtime_get()) { + ZXIC_COMM_TRACE_ERROR( + "Error!!! dpp dtb down slot [%d] vport [0x%x] queue [%d] item [%d] ack success is overtime!\n", + DEV_PCIE_SLOT(dev), DEV_PCIE_VPORT(dev), + queue_id, element_id); + + ZXIC_COMM_PRINT( + "-------------------------------------------------------------------\n"); + ZXIC_COMM_PRINT( + " dtb down table info \n"); + ZXIC_COMM_PRINT( + "-------------------------------------------------------------------\n"); + rc = dpp_dtb_down_table_element_info_prt(dev, queue_id, + element_id); + ZXIC_COMM_CHECK_DEV_RC( + DEV_ID(dev), rc, + "dpp_dtb_down_table_element_info_prt"); + + rc = dpp_dtb_item_ack_wr(dev, queue_id, + DPP_DTB_DIR_DOWN_TYPE, + element_id, 0, + DPP_DTB_TAB_ACK_UNUSED_MASK); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_dtb_item_ack_wr"); + + ZXIC_COMM_PRINT( + "-------------------------------------------------------------------\n"); + ZXIC_COMM_PRINT( + " dtb reg info \n"); + ZXIC_COMM_PRINT( + "-------------------------------------------------------------------\n"); + + rc = diag_dpp_dtb_axi_last_operate_info_prt(dev); + ZXIC_COMM_CHECK_DEV_RC( + 0, rc, + "diag_dpp_dtb_axi_last_operate_info_prt"); + + rc = diag_dpp_dtb_channels_state_info_prt(dev); + ZXIC_COMM_CHECK_DEV_RC( + 0, rc, "diag_dpp_dtb_channels_state_info_prt"); + + rc = diag_dpp_dtb_channels_axi_resp_err_cnt_prt(dev); + ZXIC_COMM_CHECK_DEV_RC( + 0, rc, + "diag_dpp_dtb_channels_axi_resp_err_cnt_prt"); + + return DPP_RC_DTB_OVER_TIME; + } + + rd_cnt++; + zxic_comm_udelay(1); + } + + if (dtb_interrupt_status) { + /*清中断*/ + rc = dpp_dtb_finish_interrupt_event_state_clr(dev, queue_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT( + DEV_ID(dev), rc, + "dpp_dtb_finish_interrupt_event_state_clr"); + } + + if ((ack_value & 0xff) != DPP_DTB_TAB_ACK_SUCCESS_MASK) { + rc = dpp_dtb_item_ack_wr(dev, queue_id, DPP_DTB_DIR_DOWN_TYPE, + element_id, 0, + DPP_DTB_TAB_ACK_UNUSED_MASK); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_item_ack_wr"); + return ack_value & 0xff; + } + + rc = dpp_dtb_item_ack_wr(dev, queue_id, DPP_DTB_DIR_DOWN_TYPE, + element_id, 0, DPP_DTB_TAB_ACK_UNUSED_MASK); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_item_ack_wr"); + + return rc; +} + +#endif +#if ZXIC_REAL("TAB_UP") +/***********************************************************/ +/** dump队列空闲条目获取 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列号,范围0-31 +* @param p_item_index 返回使用的条目编号 +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_tab_up_free_item_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 *p_item_index) +{ + ZXIC_UINT32 rc = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 ack_vale = 0; + ZXIC_UINT32 item_index = 0; + ZXIC_UINT32 unused_item_num = 0; + ZXIC_MUTEX_T *p_mutex = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_item_index); + + rc = dpp_dev_dtb_opr_mutex_get(dev, DPP_DEV_MUTEX_T_DTB, queue_id, + &p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dev_dtb_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "zxic_comm_mutex_lock"); + + /* + * 流程 + * 1.获取硬件队列剩余情况,大于0正常上送,等于0返回失败; + * 2.获取软件缓存空闲情况; + * + */ + if (DPP_DTB_QUEUE_INIT_FLAG_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id) == 0) { + ZXIC_COMM_TRACE_ERROR("dtb slot %d queue %d is not init.\n", + DEV_PCIE_SLOT(dev), queue_id); + rc = zxic_comm_mutex_unlock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "zxic_comm_mutex_unlock"); + return DPP_RC_DTB_QUEUE_IS_NOT_INIT; + } + + rc = dpp_dtb_queue_unused_item_num_get(dev, queue_id, &unused_item_num); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, "dpp_dtb_queue_unused_item_num_get", p_mutex); + + if (unused_item_num == 0) { + rc = zxic_comm_mutex_unlock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "zxic_comm_mutex_unlock"); + return DPP_RC_DTB_QUEUE_ITEM_HW_EMPTY; + } + + for (i = 0; i < DPP_DTB_QUEUE_ITEM_NUM_MAX; i++) { + item_index = DPP_DTB_TAB_UP_WR_INDEX_GET(DEV_PCIE_SLOT(dev), + DEV_ID(dev), + queue_id) % + DPP_DTB_QUEUE_ITEM_NUM_MAX; + + rc = dpp_dtb_item_ack_rd(dev, queue_id, DPP_DTB_DIR_UP_TYPE, + item_index, 0, &ack_vale); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, "dpp_dtb_item_ack_rd", p_mutex); + + DPP_DTB_TAB_UP_WR_INDEX_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id) + ++; + + if ((ack_vale >> 8) == DPP_DTB_TAB_ACK_UNUSED_MASK) { + break; + } + } + + if (i == DPP_DTB_QUEUE_ITEM_NUM_MAX) { + rc = zxic_comm_mutex_unlock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "zxic_comm_mutex_unlock"); + return DPP_RC_DTB_QUEUE_ITEM_SW_EMPTY; + } + + rc = dpp_dtb_item_ack_wr(dev, queue_id, DPP_DTB_DIR_UP_TYPE, item_index, + 0, DPP_DTB_TAB_ACK_IS_USING_MASK); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, "dpp_dtb_item_buff_wr", p_mutex); + + *p_item_index = item_index; + + rc = zxic_comm_mutex_unlock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "zxic_comm_mutex_unlock"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取dump指定条目物理地址 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列编号,范围:0-127 +* @param item_index 条目编号,范围0-31 +* @param p_phy_haddr 物理地址高32bit +* @param p_phy_laddr 物理地址低32bit +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_tab_up_item_addr_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 item_index, + ZXIC_UINT32 *p_phy_haddr, + ZXIC_UINT32 *p_phy_laddr) +{ + ZXIC_ADDR_T addr = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), item_index, 0, + DPP_DTB_QUEUE_ITEM_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_phy_haddr); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_phy_laddr); + + if (DPP_DTB_QUEUE_INIT_FLAG_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id) == 0) { + ZXIC_COMM_TRACE_ERROR("dtb slot %d queue %d is not init.\n", + DEV_PCIE_SLOT(dev), queue_id); + return DPP_RC_DTB_QUEUE_IS_NOT_INIT; + } + + if (DPP_DTB_TAB_UP_USER_PHY_ADDR_FLAG_GET( + DEV_PCIE_SLOT(dev), DEV_ID(dev), queue_id, item_index) == + DPP_DTB_TAB_UP_USER_ADDR_TYPE) { + addr = DPP_DTB_TAB_UP_USER_PHY_ADDR_GET( + DEV_PCIE_SLOT(dev), DEV_ID(dev), queue_id, item_index); + } else { + addr = DPP_DTB_TAB_UP_PHY_ADDR_GET(DEV_PCIE_SLOT(dev), + DEV_ID(dev), queue_id, + item_index) + + DPP_DTB_ITEM_ACK_SIZE; + } + + /*将地址转换成16字节为单位*/ + // addr = addr >> 4; + + *p_phy_haddr = (addr >> 32) & 0xffffffff; + *p_phy_laddr = addr & 0xffffffff; + + return DPP_OK; +} + +/***********************************************************/ +/** 获取指定dump条目一定地址偏移的物理地址 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列编号,范围:0-127 +* @param item_index 条目编号,范围0-31 +* @param p_phy_haddr 物理地址高32bit +* @param p_phy_laddr 物理地址低32bit +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_tab_up_item_offset_addr_get(DPP_DEV_T *dev, + ZXIC_UINT32 queue_id, + ZXIC_UINT32 item_index, + ZXIC_UINT32 addr_offset, + ZXIC_UINT32 *p_phy_haddr, + ZXIC_UINT32 *p_phy_laddr) +{ + ZXIC_ADDR_T addr = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), item_index, 0, + DPP_DTB_QUEUE_ITEM_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_phy_haddr); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_phy_laddr); + + if (DPP_DTB_QUEUE_INIT_FLAG_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id) == 0) { + ZXIC_COMM_TRACE_ERROR("dtb slot %d queue %d is not init.\n", + DEV_PCIE_SLOT(dev), queue_id); + return DPP_RC_DTB_QUEUE_IS_NOT_INIT; + } + + if (DPP_DTB_TAB_UP_USER_PHY_ADDR_FLAG_GET( + DEV_PCIE_SLOT(dev), DEV_ID(dev), queue_id, item_index) == + DPP_DTB_TAB_UP_USER_ADDR_TYPE) { + addr = DPP_DTB_TAB_UP_USER_PHY_ADDR_GET( + DEV_PCIE_SLOT(dev), DEV_ID(dev), queue_id, item_index); + } else { + addr = DPP_DTB_TAB_UP_PHY_ADDR_GET(DEV_PCIE_SLOT(dev), + DEV_ID(dev), queue_id, + item_index) + + DPP_DTB_ITEM_ACK_SIZE; + } + + addr = addr + addr_offset; + + *p_phy_haddr = (addr >> 32) & 0xffffffff; + *p_phy_laddr = addr & 0xffffffff; + + return DPP_OK; +} + +/***********************************************************/ +/** 设置dump指定条目空间地址,用于用户自定义空间传输 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列编号,范围:0-127 +* @param item_index 条目编号,范围0-31 +* @param phy_haddr 物理地址高 +* @param vir_laddr 虚拟地址低 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_tab_up_item_user_addr_set(DPP_DEV_T *dev, + ZXIC_UINT32 queue_id, + ZXIC_UINT32 item_index, + ZXIC_ADDR_T phy_addr, + ZXIC_ADDR_T vir_addr) +{ + DPP_DTB_MGR_T *p_dtb_mgr = ZXIC_NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), item_index, 0, + DPP_DTB_QUEUE_ITEM_NUM_MAX - 1); + + p_dtb_mgr = dpp_dtb_mgr_get(DEV_PCIE_SLOT(dev), DEV_ID(dev)); + if (p_dtb_mgr == ZXIC_NULL) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "slot %d ErrorCode[0x%x]: DTB Manager is not exist!!!\n", + DEV_PCIE_SLOT(dev), DPP_RC_DTB_MGR_NOT_EXIST); + return DPP_RC_DTB_MGR_NOT_EXIST; + } + + if (DPP_DTB_QUEUE_INIT_FLAG_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id) == 0) { + ZXIC_COMM_TRACE_ERROR("dtb slot %d queue %d is not init.\n", + DEV_PCIE_SLOT(dev), queue_id); + return DPP_RC_DTB_QUEUE_IS_NOT_INIT; + } + + p_dtb_mgr->queue_info[queue_id].tab_up.user_addr[item_index].phy_addr = + phy_addr; + p_dtb_mgr->queue_info[queue_id].tab_up.user_addr[item_index].vir_addr = + vir_addr; + p_dtb_mgr->queue_info[queue_id].tab_up.user_addr[item_index].user_flag = + DPP_DTB_TAB_UP_USER_ADDR_TYPE; + + return DPP_OK; +} + +/***********************************************************/ +/** 清除用户dump指定条目空间地址 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列编号,范围:0-127 +* @param item_index 条目编号,范围0-31 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_tab_up_item_user_addr_clr(DPP_DEV_T *dev, + ZXIC_UINT32 queue_id, + ZXIC_UINT32 item_index) +{ + DPP_DTB_MGR_T *p_dtb_mgr = ZXIC_NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), item_index, 0, + DPP_DTB_QUEUE_ITEM_NUM_MAX - 1); + + p_dtb_mgr = dpp_dtb_mgr_get(DEV_PCIE_SLOT(dev), DEV_ID(dev)); + if (p_dtb_mgr == ZXIC_NULL) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "slot %d ErrorCode[0x%x]: DTB Manager is not exist!!!\n", + DEV_PCIE_SLOT(dev), DPP_RC_DTB_MGR_NOT_EXIST); + return DPP_RC_DTB_MGR_NOT_EXIST; + } + + if (DPP_DTB_QUEUE_INIT_FLAG_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id) == 0) { + ZXIC_COMM_TRACE_ERROR("dtb slot %d queue %d is not init.\n", + DEV_PCIE_SLOT(dev), queue_id); + return DPP_RC_DTB_QUEUE_IS_NOT_INIT; + } + + p_dtb_mgr->queue_info[queue_id].tab_up.user_addr[item_index].phy_addr = + 0; + p_dtb_mgr->queue_info[queue_id].tab_up.user_addr[item_index].vir_addr = + 0; + p_dtb_mgr->queue_info[queue_id].tab_up.user_addr[item_index].user_flag = + DPP_DTB_TAB_UP_NOUSER_ADDR_TYPE; + + return DPP_OK; +} + +/***********************************************************/ +/** dump配置描述符信息设置 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列号,范围0-31 +* @param item_index 返回使用的条目编号 +* @param int_flag 中断标志,0-无,1-有 +* @param data_len 数据长度,单位32bit; +* @param desc_len 描述符长度,单位32bit; +* @param p_desc_data 待下发描述符 +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_tab_up_info_set(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 item_index, + ZXIC_UINT32 int_flag, ZXIC_UINT32 data_len, + ZXIC_UINT32 desc_len, + ZXIC_UINT32 *p_desc_data) +{ + ZXIC_UINT32 rc = 0; + ZXIC_UINT32 queue_en = 0; + DPP_DTB_QUEUE_ITEM_INFO_T item_info = { 0 }; + ZXIC_MUTEX_T *p_mutex = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), item_index, 0, + DPP_DTB_QUEUE_ITEM_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), desc_len, 0, 0x400); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), int_flag, 0, 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_desc_data); + + rc = dpp_dev_dtb_opr_mutex_get(dev, DPP_DEV_MUTEX_T_DTB, queue_id, + &p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dev_dtb_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "zxic_comm_mutex_lock"); + + /* + * 流程 + * 0.检测队列是否使能 + * 1.获取硬件队列剩余情况,大于0正常下发,等于0返回失败; + * 2.获取软件缓存空闲情况; + * 3.将dump描述符写入buff中; + * 4.将ack字段填入0x11111100; + * 5.将数据信息填入硬件触发寄存器中; + * + */ +#if 0 + if (dpp_dtb_mode_is_debug(dev)) { + ZXIC_COMM_TRACE_DEV_ERROR(DEV_ID(dev), "the queue %d is open debug mode!", queue_id); + rc = zxic_comm_mutex_unlock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "zxic_comm_mutex_unlock"); + return DPP_RC_DTB_OPEN_DEBUG_MODE; + } +#endif + + rc = dpp_dtb_queue_enable_get(dev, queue_id, &queue_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, "dpp_dtb_queue_enable_get", p_mutex); + if (!queue_en) { + ZXIC_COMM_TRACE_DEV_ERROR(DEV_ID(dev), + "the slot %d queue %d is not enable!", + DEV_PCIE_SLOT(dev), queue_id); + rc = zxic_comm_mutex_unlock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "zxic_comm_mutex_unlock"); + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), "the queue %d is not enable!", queue_id); + return DPP_RC_DTB_QUEUE_NOT_ENABLE; + } + + if (DPP_DTB_QUEUE_INIT_FLAG_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id) == 0) { + ZXIC_COMM_TRACE_ERROR("dtb slot %d queue %d is not init.\n", + DEV_PCIE_SLOT(dev), queue_id); + rc = zxic_comm_mutex_unlock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "zxic_comm_mutex_unlock"); + return DPP_RC_DTB_QUEUE_IS_NOT_INIT; + } + + if (desc_len % 4 != 0) { + rc = zxic_comm_mutex_unlock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "zxic_comm_mutex_unlock"); + /* 硬件规定数据必须是16字节为单位 */ + return DPP_RC_DTB_PARA_INVALID; + } + + rc = dpp_dtb_item_buff_wr(dev, queue_id, DPP_DTB_DIR_UP_TYPE, + item_index, 0, desc_len, p_desc_data); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, "dpp_dtb_item_buff_wr", p_mutex); + + rc = dpp_dtb_item_ack_wr(dev, queue_id, DPP_DTB_DIR_UP_TYPE, item_index, + 0, DPP_DTB_TAB_ACK_IS_USING_MASK); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK(DEV_ID(dev), rc, + "dpp_dtb_item_ack_wr", p_mutex); + + DPP_DTB_TAB_UP_DATA_LEN_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), queue_id, + item_index) = data_len; + + item_info.cmd_vld = 1; + item_info.cmd_type = DPP_DTB_DIR_UP_TYPE; + item_info.int_en = int_flag; + item_info.data_len = desc_len / 4; + item_info.data_hddr = + ((DPP_DTB_TAB_UP_PHY_ADDR_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id, item_index) >> + 4) >> + 32) & + 0xffffffff; + item_info.data_laddr = + (DPP_DTB_TAB_UP_PHY_ADDR_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id, item_index) >> + 4) & + 0xffffffff; + + if (dpp_dtb_prt_get()) { + dpp_dtb_info_print(dev, queue_id, item_index, &item_info); + } + + rc = dpp_dtb_queue_item_info_set(dev, queue_id, &item_info); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, "dpp_dtb_queue_item_info_set", p_mutex); + + rc = zxic_comm_mutex_unlock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "zxic_comm_mutex_unlock"); + + return DPP_OK; +} + +/***********************************************************/ +/** 打印队列中指定元素的dump地址、ACK及下表中前768bit的数据 +* @param dev_id 芯片的id号 +* @param queue_id 队列号,范围0-127 +* @param element_id 元素号,范围0-31 +* +* @return +* @remark 无 +* @see +* @author zab @date 2022/08/30 +************************************************************/ +ZXIC_UINT32 dpp_dtb_dump_table_element_info_prt(DPP_DEV_T *dev, + ZXIC_UINT32 queue_id, + ZXIC_UINT32 element_id) +{ + ZXIC_UINT32 rc = 0; + + ZXIC_UINT32 element_start_addr_h = 0; + ZXIC_UINT32 element_start_addr_l = 0; + ZXIC_UINT32 element_dump_addr_h = 0; + ZXIC_UINT32 element_dump_addr_l = 0; + ZXIC_UINT32 element_table_info_addr_h = 0; + ZXIC_UINT32 element_table_info_addr_l = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), element_id, 0, + DPP_DTB_QUEUE_ITEM_NUM_MAX - 1); + + rc = dpp_dtb_dump_table_elemet_addr_get( + dev, queue_id, element_id, &element_start_addr_h, + &element_start_addr_l, &element_dump_addr_h, + &element_dump_addr_l, &element_table_info_addr_h, + &element_table_info_addr_l); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_dtb_dump_table_elemet_addr_get"); + + ZXIC_COMM_DBGCNT32_PRINT("slot_id", DEV_PCIE_SLOT(dev)); + ZXIC_COMM_DBGCNT32_PRINT("queue_id", queue_id); + ZXIC_COMM_DBGCNT32_PRINT("element_id", element_id); + ZXIC_COMM_DBGCNT32_PRINT("element_start_addr_h", element_start_addr_h); + ZXIC_COMM_DBGCNT32_PRINT("element_start_addr_l", element_start_addr_l); + ZXIC_COMM_DBGCNT32_PRINT("element_dump_addr_h", element_dump_addr_h); + ZXIC_COMM_DBGCNT32_PRINT("element_dump_addr_l", element_dump_addr_l); + ZXIC_COMM_DBGCNT32_PRINT("element_table_info_addr_h", + element_table_info_addr_h); + ZXIC_COMM_DBGCNT32_PRINT("element_table_info_addr_l", + element_table_info_addr_l); + + /*打印element ack*/ + rc = dpp_dtb_item_ack_prt(dev, queue_id, DPP_DTB_DIR_UP_TYPE, + element_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_dtb_item_ack_prt"); + + rc = dpp_dtb_item_buff_prt(dev, queue_id, DPP_DTB_DIR_UP_TYPE, + element_id, 32); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_dtb_item_buff_prt"); + + return DPP_OK; +} + +/***********************************************************/ +/** 一个元素dump成功状态检查 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列号,范围0-31 +* @param element_id 条目编号 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_tab_up_success_status_check(DPP_DEV_T *dev, + ZXIC_UINT32 queue_id, + ZXIC_UINT32 element_id) +{ + ZXIC_UINT32 rc = 0; + ZXIC_UINT32 rd_cnt = 0; + ZXIC_UINT32 ack_value = 0; + ZXIC_UINT32 success_flag = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), element_id, 0, + DPP_DTB_QUEUE_ITEM_NUM_MAX - 1); + + if (dpp_dtb_soft_perf_test_get() || dpp_dtb_hardware_perf_test_get()) { + return rc; + } + + while (!success_flag) { + rc = dpp_dtb_item_ack_rd(dev, queue_id, DPP_DTB_DIR_UP_TYPE, + element_id, 0, &ack_value); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_item_ack_rd"); + + if ((((ack_value >> 8) & 0xffffff) == + DPP_DTB_TAB_UP_ACK_VLD_MASK) && + ((ack_value & 0xff) == DPP_DTB_TAB_ACK_SUCCESS_MASK)) { + success_flag = 1; + break; + } + + if (rd_cnt > dpp_dtb_dump_table_overtime_get()) { + ZXIC_COMM_TRACE_ERROR( + "Error!!! dpp dtb dump slot [%d] vport [0x%x] queue [%d] item [%d] ack success is overtime!\n", + DEV_PCIE_SLOT(dev), DEV_PCIE_VPORT(dev), + queue_id, element_id); + + ZXIC_COMM_PRINT( + "-------------------------------------------------------------------\n"); + ZXIC_COMM_PRINT( + " dtb dump table info \n"); + ZXIC_COMM_PRINT( + "-------------------------------------------------------------------\n"); + rc = dpp_dtb_dump_table_element_info_prt(dev, queue_id, + element_id); + ZXIC_COMM_CHECK_DEV_RC( + DEV_ID(dev), rc, + "dpp_dtb_dump_table_element_info_prt"); + + rc = dpp_dtb_item_ack_wr(dev, queue_id, + DPP_DTB_DIR_UP_TYPE, + element_id, 0, + DPP_DTB_TAB_ACK_UNUSED_MASK); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_dtb_item_ack_wr"); + + ZXIC_COMM_PRINT( + "-------------------------------------------------------------------\n"); + ZXIC_COMM_PRINT( + " dtb reg info \n"); + ZXIC_COMM_PRINT( + "-------------------------------------------------------------------\n"); + + rc = diag_dpp_dtb_axi_last_operate_info_prt(dev); + ZXIC_COMM_CHECK_DEV_RC( + 0, rc, + "diag_dpp_dtb_axi_last_operate_info_prt"); + + rc = diag_dpp_dtb_channels_state_info_prt(dev); + ZXIC_COMM_CHECK_DEV_RC( + 0, rc, "diag_dpp_dtb_channels_state_info_prt"); + + rc = diag_dpp_dtb_channels_axi_resp_err_cnt_prt(dev); + ZXIC_COMM_CHECK_DEV_RC( + 0, rc, + "diag_dpp_dtb_channels_axi_resp_err_cnt_prt"); + + return DPP_ERR; + } + + rd_cnt++; + zxic_comm_udelay(1); + } + + return rc; +} + +/***********************************************************/ +/** 获取dump数据 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列号,范围0-31 +* @param item_index 数据对应的的条目编号 +* @param data_len 数据长度,单位32bit; +* @param p_data dump数据 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_tab_up_data_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 item_index, + ZXIC_UINT32 data_len, ZXIC_UINT32 *p_data) +{ + ZXIC_UINT32 rc = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), item_index, 0, + DPP_DTB_QUEUE_ITEM_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + + if (DPP_DTB_QUEUE_INIT_FLAG_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id) == 0) { + ZXIC_COMM_TRACE_ERROR("dtb slot %d queue %d is not init.\n", + DEV_PCIE_SLOT(dev), queue_id); + return DPP_RC_DTB_QUEUE_IS_NOT_INIT; + } + + if (dpp_dtb_hardware_perf_test_get()) { + return rc; + } + + rc = dpp_dtb_item_buff_rd(dev, queue_id, DPP_DTB_DIR_UP_TYPE, + item_index, 0, data_len, p_data); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_item_buff_rd"); + + if (dpp_dtb_debug_fun_get()) { + return DPP_OK; + } + + rc = dpp_dtb_item_ack_wr(dev, queue_id, DPP_DTB_DIR_UP_TYPE, item_index, + 0, DPP_DTB_TAB_ACK_UNUSED_MASK); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_item_ack_wr"); + + return DPP_OK; +} + +#endif +/***********************************************************/ +/** dtb队列down初始化 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列号 +* @param p_queue_cfg 队列配置参数,具体见DPP_DTB_QUEUE_CFG_T结构体类型 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_down_init(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + DPP_DTB_QUEUE_CFG_T *p_queue_cfg) +{ + ZXIC_UINT32 rc = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 ack_vale = 0; + ZXIC_UINT32 tab_down_item_size = 0; + DPP_DTB_MGR_T *p_dtb_mgr = ZXIC_NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_queue_cfg); + + p_dtb_mgr = dpp_dtb_mgr_get(DEV_PCIE_SLOT(dev), DEV_ID(dev)); + if (p_dtb_mgr == ZXIC_NULL) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "slot %d ErrorCode[0x%x]: DTB Manager is not exist!!!\n", + DEV_PCIE_SLOT(dev), DPP_RC_DTB_MGR_NOT_EXIST); + return DPP_RC_DTB_MGR_NOT_EXIST; + } + + /* + * 流程: + * 1.寻找空闲队列; + * 2.若找到: + * 1)配置硬件信息; + * 2)配置软件缓存信息; + * 3)初始化条目ack缓存信息,空闲约定:up-0x000000XX,down-0x000000XX; + * 忙约定:up-0x111111XX,down-0x111111XX; + * 完成约定:up-0x555555XX,down-0x5a5a5aXX; + * 3.若未找到:返回错误信息; + */ + + p_dtb_mgr->queue_info[queue_id].init_flag = 1; + p_dtb_mgr->queue_info[queue_id].slot_id = DEV_PCIE_SLOT(dev); + p_dtb_mgr->queue_info[queue_id].vport = DEV_PCIE_VPORT(dev); + + tab_down_item_size = (p_queue_cfg->down_item_size == 0) ? + DPP_DTB_ITEM_SIZE : + p_queue_cfg->down_item_size; + + p_dtb_mgr->queue_info[queue_id].tab_down.item_size = tab_down_item_size; + p_dtb_mgr->queue_info[queue_id].tab_down.start_phy_addr = + p_queue_cfg->down_start_phy_addr; + p_dtb_mgr->queue_info[queue_id].tab_down.start_vir_addr = + p_queue_cfg->down_start_vir_addr; + p_dtb_mgr->queue_info[queue_id].tab_down.wr_index = 0; + p_dtb_mgr->queue_info[queue_id].tab_down.rd_index = 0; + + for (i = 0; i < DPP_DTB_QUEUE_ITEM_NUM_MAX; i++) { + rc = dpp_dtb_item_ack_wr(dev, queue_id, DPP_DTB_DIR_DOWN_TYPE, + i, 0, DPP_DTB_TAB_ACK_CHECK_VALUE); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_item_ack_wr"); + } + + for (i = 0; i < DPP_DTB_QUEUE_ITEM_NUM_MAX; i++) { + rc = dpp_dtb_item_ack_rd(dev, queue_id, DPP_DTB_DIR_DOWN_TYPE, + i, 0, &ack_vale); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_item_ack_rd"); + if (ack_vale != DPP_DTB_TAB_ACK_CHECK_VALUE) { + ZXIC_COMM_PRINT( + "dtb slot [%d] queue [%d] down init faild, mem err!!!\n", + DEV_PCIE_SLOT(dev), queue_id); + return DPP_RC_DTB_MEMORY_ALLOC_ERR; + } + } + + ZXIC_COMM_TRACE_NOTICE( + "dtb slot [%d] queue [%d] down init success!!!\n", + DEV_PCIE_SLOT(dev), queue_id); + + ZXIC_COMM_MEMSET((ZXIC_UINT8 *)(p_queue_cfg->down_start_vir_addr), 0, + tab_down_item_size * DPP_DTB_QUEUE_ITEM_NUM_MAX); + + return DPP_OK; +} + +/***********************************************************/ +/** dtb队列dump初始化 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列号 +* @param p_queue_cfg 队列配置参数,具体见DPP_DTB_QUEUE_CFG_T结构体类型 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_dump_init(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + DPP_DTB_QUEUE_CFG_T *p_queue_cfg) +{ + ZXIC_UINT32 rc = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 ack_vale = 0; + ZXIC_UINT32 tab_up_item_size = 0; + DPP_DTB_MGR_T *p_dtb_mgr = ZXIC_NULL; + ZXIC_UINT32 slot_id = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_queue_cfg); + + slot_id = (ZXIC_UINT32)DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_INDEX(slot_id, 0, DPP_DEV_SLOT_MAX - 1); + + p_dtb_mgr = dpp_dtb_mgr_get(slot_id, DEV_ID(dev)); + if (p_dtb_mgr == ZXIC_NULL) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "slot %d ErrorCode[0x%x]: DTB Manager is not exist!!!\n", + DEV_PCIE_SLOT(dev), DPP_RC_DTB_MGR_NOT_EXIST); + return DPP_RC_DTB_MGR_NOT_EXIST; + } + + /* + * 流程: + * 1.寻找空闲队列; + * 2.若找到: + * 1)配置硬件信息; + * 2)配置软件缓存信息; + * 3)初始化条目ack缓存信息,空闲约定:up-0x000000XX,down-0x000000XX; + * 忙约定:up-0x111111XX,down-0x111111XX; + * 完成约定:up-0x555555XX,down-0x5a5a5aXX; + * 3.若未找到:返回错误信息; + */ + + p_dtb_mgr->queue_info[queue_id].init_flag = 1; + p_dtb_mgr->queue_info[queue_id].slot_id = DEV_PCIE_SLOT(dev); + p_dtb_mgr->queue_info[queue_id].vport = DEV_PCIE_VPORT(dev); + + tab_up_item_size = (p_queue_cfg->up_item_size == 0) ? + DPP_DTB_ITEM_SIZE : + p_queue_cfg->up_item_size; + + p_dtb_mgr->queue_info[queue_id].tab_up.item_size = tab_up_item_size; + p_dtb_mgr->queue_info[queue_id].tab_up.start_phy_addr = + p_queue_cfg->up_start_phy_addr; + p_dtb_mgr->queue_info[queue_id].tab_up.start_vir_addr = + p_queue_cfg->up_start_vir_addr; + p_dtb_mgr->queue_info[queue_id].tab_up.wr_index = 0; + p_dtb_mgr->queue_info[queue_id].tab_up.rd_index = 0; + + for (i = 0; i < DPP_DTB_QUEUE_ITEM_NUM_MAX; i++) { + rc = dpp_dtb_item_ack_wr(dev, queue_id, DPP_DTB_DIR_UP_TYPE, i, + 0, DPP_DTB_TAB_ACK_CHECK_VALUE); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_item_ack_wr"); + } + + for (i = 0; i < DPP_DTB_QUEUE_ITEM_NUM_MAX; i++) { + rc = dpp_dtb_item_ack_rd(dev, queue_id, DPP_DTB_DIR_UP_TYPE, i, + 0, &ack_vale); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_item_ack_rd"); + if (ack_vale != DPP_DTB_TAB_ACK_CHECK_VALUE) { + ZXIC_COMM_PRINT( + "dtb slot [%d] queue [%d] init faild, mem err!!!\n", + DEV_PCIE_SLOT(dev), queue_id); + return DPP_RC_DTB_MEMORY_ALLOC_ERR; + } + } + + ZXIC_COMM_TRACE_NOTICE("dtb slot [%d] queue [%d] up init success!!!\n", + DEV_PCIE_SLOT(dev), queue_id); + + ZXIC_COMM_MEMSET((ZXIC_UINT8 *)(p_queue_cfg->up_start_vir_addr), 0, + tab_up_item_size * DPP_DTB_QUEUE_ITEM_NUM_MAX); + + return DPP_OK; +} + +/***********************************************************/ +/** dtb队列down 空间地址配置 +* @param channelId dtb通道号 +* @param phyAddr down物理地址 +* @param virAddr down虚拟地址 +* @param size 空间大小 0:使用系统默认值16K+16 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_down_channel_addr_set(DPP_DEV_T *dev, ZXIC_UINT32 channelId, + ZXIC_UINT64 phyAddr, + ZXIC_UINT64 virAddr, ZXIC_UINT32 size) +{ + ZXIC_UINT32 rc = 0; + + DPP_DTB_QUEUE_CFG_T down_queue_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT_NO_ASSERT(dev); + + down_queue_cfg.down_start_phy_addr = phyAddr; + down_queue_cfg.down_start_vir_addr = virAddr; + down_queue_cfg.down_item_size = size; + + rc = dpp_dtb_queue_down_init(dev, channelId, &down_queue_cfg); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_queue_down_init"); + + return rc; +} + +/***********************************************************/ +/** dtb队列dump 空间地址配置 +* @param channelId dtb通道号 +* @param phyAddr dump物理地址 +* @param virAddr dump虚拟地址 +* @param size 空间大小 +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_dump_channel_addr_set(DPP_DEV_T *dev, ZXIC_UINT32 channelId, + ZXIC_UINT64 phyAddr, + ZXIC_UINT64 virAddr, ZXIC_UINT32 size) +{ + ZXIC_UINT32 rc = 0; + + DPP_DTB_QUEUE_CFG_T dump_queue_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT_NO_ASSERT(dev); + + dump_queue_cfg.up_start_phy_addr = phyAddr; + dump_queue_cfg.up_start_vir_addr = virAddr; + dump_queue_cfg.up_item_size = size; + + rc = dpp_dtb_queue_dump_init(dev, channelId, &dump_queue_cfg); + + return rc; +} + +/***********************************************************/ +/** 释放队列资源 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 分配到的队列号; +* +* @return +* @remark 无 +* @see +* @author zab @date 2021/02/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_id_free(DPP_DEV_T *dev, ZXIC_UINT32 queue_id) +{ + ZXIC_UINT32 rc = 0; + ZXIC_UINT32 item_num = 0; + DPP_DTB_MGR_T *p_dtb_mgr = ZXIC_NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + + p_dtb_mgr = dpp_dtb_mgr_get(DEV_PCIE_SLOT(dev), DEV_ID(dev)); + if (p_dtb_mgr == ZXIC_NULL) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "slot %d ErrorCode[0x%x]: DTB Manager is not exist!!!\n", + DEV_PCIE_SLOT(dev), DPP_RC_DTB_MGR_NOT_EXIST); + return DPP_RC_DTB_MGR_NOT_EXIST; + } + + rc = dpp_dtb_queue_unused_item_num_get(dev, queue_id, &item_num); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_dtb_queue_unused_item_num_get"); + + if (item_num != DPP_DTB_QUEUE_ITEM_NUM_MAX) { + return DPP_RC_DTB_QUEUE_IS_WORKING; + } + + p_dtb_mgr->queue_info[queue_id].init_flag = 0; + + ZXIC_COMM_MEMSET(&(p_dtb_mgr->queue_info[queue_id].tab_up), 0, + sizeof(DPP_DTB_TAB_UP_INFO_T)); + ZXIC_COMM_MEMSET(&(p_dtb_mgr->queue_info[queue_id].tab_down), 0, + sizeof(DPP_DTB_TAB_DOWN_INFO_T)); + + return DPP_OK; +} + +/***********************************************************/ +/** dtb初始化 +* @param dev_id 设备号,支持多芯片 +* +* @return +* @remark 无 +* @see +* @author zab @date 2022/08/30 +************************************************************/ +ZXIC_UINT32 dpp_dtb_init(DPP_DEV_T *dev) +{ + ZXIC_UINT32 rc = 0; + DPP_DTB_MGR_T *p_dtb_mgr = ZXIC_NULL; + + ZXIC_COMM_CHECK_POINT(dev); + + p_dtb_mgr = dpp_dtb_mgr_get(DEV_PCIE_SLOT(dev), DEV_ID(dev)); + if (p_dtb_mgr == ZXIC_NULL) { + rc = dpp_dtb_mgr_create(DEV_PCIE_SLOT(dev), DEV_ID(dev)); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_mgr_create"); + + p_dtb_mgr = dpp_dtb_mgr_get(DEV_PCIE_SLOT(dev), DEV_ID(dev)); + if (p_dtb_mgr == ZXIC_NULL) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "slot %d ErrorCode[0x%x]: DTB Manager is not exist!!!\n", + DEV_PCIE_SLOT(dev), DPP_RC_DTB_MGR_NOT_EXIST); + return DPP_RC_DTB_MGR_NOT_EXIST; + } + } + + return DPP_OK; +} + +/***********************************************************/ +/** 根据vport查找相应的队列号 +* @param dev_id 设备号,支持多芯片 +* @param vport vport信息 +* @param p_queue_arr 找到到队列数组 +* @param p_num 找到的队列个数 +* @return +* @remark 无 +* @see +* @author cbb @date 2023/09/13 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_id_search_by_vport(DPP_DEV_T *dev, + ZXIC_UINT32 *p_queue_arr, + ZXIC_UINT32 *p_num) +{ + ZXIC_UINT32 queue_id = 0; + ZXIC_UINT32 count = 0; + + DPP_DTB_MGR_T *p_dtb_mgr = ZXIC_NULL; + + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_queue_arr); + + p_dtb_mgr = dpp_dtb_mgr_get(DEV_PCIE_SLOT(dev), DEV_ID(dev)); + if (p_dtb_mgr == ZXIC_NULL) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "slot %d ErrorCode[0x%x]: DTB Manager is not exist!!!\n", + DEV_PCIE_SLOT(dev), DPP_RC_DTB_MGR_NOT_EXIST); + return DPP_RC_DTB_MGR_NOT_EXIST; + } + + for (queue_id = 0; queue_id < DPP_DTB_QUEUE_NUM_MAX; queue_id++) { + if (DPP_DTB_QUEUE_INIT_FLAG_GET(DEV_PCIE_SLOT(dev), DEV_ID(dev), + queue_id) != 0) { + if (DEV_PCIE_VPORT(dev) == + DPP_DTB_QUEUE_VPORT_GET(DEV_PCIE_SLOT(dev), + DEV_ID(dev), queue_id)) { + p_queue_arr[count] = queue_id; + count++; + } + } + } + + if (count == 0) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "slot %d ErrorCode[0x%x]: vport 0x%04x no queue not found!!!\n", + DEV_PCIE_SLOT(dev), DPP_RC_DTB_QUEUE_NOT_ALLOC, + DEV_PCIE_VPORT(dev)); + return DPP_RC_DTB_QUEUE_NOT_ALLOC; + } + + *p_num = count; + + return DPP_OK; +} + +/***********************************************************/ +/** 根据vport查找相应的队列号 +* @param dev_id 设备号,支持多芯片 +* @param vport vport信息 +* @param p_queue_arr 找到到队列数组 +* @param p_num 找到的队列个数 +* @return +* @remark 无 +* @see +* @author cbb @date 2023/09/13 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_id_get(DPP_DEV_T *dev, ZXIC_UINT32 *queue) +{ + ZXIC_UINT32 num = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_UINT32 queue_arr[DPP_DTB_QUEUE_NUM_MAX] = { 0 }; + + ZXIC_COMM_CHECK_POINT_NO_ASSERT(dev); + + rc = dpp_dtb_queue_id_search_by_vport(dev, queue_arr, &num); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_queue_id_search_by_vport"); + + *queue = queue_arr[0]; + + return DPP_OK; +} + +/***********************************************************/ +/** 获取当前队列有效标识 +* @param dev 设备 +* @param queue 队列id +* @param init_flag 出参 0:当前队列未被使用 1:当前队列已被vport使用 +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/06 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_valid_flag_get(DPP_DEV_T *dev, ZXIC_UINT32 queue, + ZXIC_UINT32 *valid_flag) +{ + ZXIC_UINT32 vport = 0; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 slot_id = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(valid_flag); + + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, queue, 0, DPP_DTB_QUEUE_NUM_MAX - 1); + + slot_id = DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, slot_id, 0, DPP_DEV_SLOT_MAX - 1); + + vport = DEV_PCIE_VPORT(dev); + *valid_flag = 0; + if (DPP_DTB_QUEUE_INIT_FLAG_GET(DEV_PCIE_SLOT(dev), dev_id, queue) && + (DPP_DTB_QUEUE_VPORT_GET(DEV_PCIE_SLOT(dev), dev_id, queue) == + vport)) { + *valid_flag = 1; + } + + return DPP_OK; +} + +/***********************************************************/ +/** 获取当前队列初始化标识 +* @param dev 设备 +* @param queue 队列id +* @param init_flag 出参 0:未初始化 1:已初始化 +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/06 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_init_flag_get(DPP_DEV_T *dev, ZXIC_UINT32 queue, + ZXIC_UINT32 *init_flag) +{ + ZXIC_UINT32 vport = 0; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 slot_id = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(init_flag); + + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, queue, 0, DPP_DTB_QUEUE_NUM_MAX - 1); + + slot_id = DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, slot_id, 0, DPP_DEV_SLOT_MAX - 1); + + vport = DEV_PCIE_VPORT(dev); + *init_flag = 0; + if (DPP_DTB_QUEUE_INIT_FLAG_GET(DEV_PCIE_SLOT(dev), dev_id, queue)) { + *init_flag = 1; + } + + return DPP_OK; +} + +#endif +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/dma/dpp_dtb_cfg.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/dma/dpp_dtb_cfg.c new file mode 100644 index 000000000000..716cc8fa7f24 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/dma/dpp_dtb_cfg.c @@ -0,0 +1,1254 @@ +/************************************************************** +* 版权所有(C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_dtb_cfg.c +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : zab +* 完成日期 : 2022/08/23 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#include "zxic_common.h" +#include "dpp_type_api.h" +#include "dpp_cfg_reg.h" +#include "dpp_dev.h" +#include "dpp_dtb.h" +#include "dpp_reg.h" +#include "dpp_reg_info.h" +#include "dpp_dtb_cfg.h" +#include "dpp_dtb4k_reg.h" +#include "dpp_dtb_reg.h" + +#define DTB_DEBUG_VALUE (0x5A) +#define DPP_DTB_SPACE_LEFT_MASK (0x3F) + +#if ZXIC_REAL("DTB_CFG") +/***********************************************************/ +/** DTB队列元素信息配置 +* @param dev_id 芯片id +* @param queue_id 队列ID,范围0-127 +* @param p_item_info 队列元素配置信息 +* +* @return +* @remark 无 +* @see +* @author zab @date 2018/08/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_item_info_set(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + DPP_DTB_QUEUE_ITEM_INFO_T *p_item_info) +{ + ZXIC_UINT32 rc = 0; + DPP_DTB4K_DTB_ENQ_CFG_QUEUE_DTB_LEN_0_127_T dtb_len = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_item_info); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_item_info->cmd_vld, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_item_info->cmd_type, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_item_info->int_en, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_item_info->data_len, + DPP_DTB_LEN_MIN, DPP_DTB_DOWN_LEN); + + rc = dpp_reg_write(dev, DTB4K_DTB_ENQ_CFG_QUEUE_DTB_ADDR_H_0_127r, 0, + queue_id, &(p_item_info->data_hddr)); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev, DTB4K_DTB_ENQ_CFG_QUEUE_DTB_ADDR_L_0_127r, 0, + queue_id, &(p_item_info->data_laddr)); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + dtb_len.cfg_dtb_cmd_type = p_item_info->cmd_type; + dtb_len.cfg_dtb_cmd_int_en = p_item_info->int_en; + dtb_len.cfg_queue_dtb_len = p_item_info->data_len; + + rc = dpp_reg_write(dev, DTB4K_DTB_ENQ_CFG_QUEUE_DTB_LEN_0_127r, 0, + queue_id, &dtb_len); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取DTB队列中剩余未使用的条目数量 +* @param dev_id 芯片id +* @param queue_id 队列ID,范围0-127 +* @param p_item_num 剩余未使用条目数量 +* +* @return +* @remark 无 +* @see +* @author zab @date 2018/08/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_unused_item_num_get(DPP_DEV_T *dev, + ZXIC_UINT32 queue_id, + ZXIC_UINT32 *p_item_num) +{ + ZXIC_UINT32 rc = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_item_num); + + rc = dpp_reg_read(dev, DTB4K_DTB_ENQ_INFO_QUEUE_BUF_SPACE_LEFT_0_127r, + 0, queue_id, p_item_num); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + if ((*p_item_num & DPP_DTB_SPACE_LEFT_MASK) == + DPP_DTB_SPACE_LEFT_MASK) { + ZXIC_COMM_TRACE_ERROR( + "pcie bar abnormal, get dtb space left false.\n"); + return ZXIC_PAR_CHK_BAR_ABNORMAL; + } + + return DPP_OK; +} + +/***********************************************************/ +/** 配置队列VM相关信息 +* @param dev_id 芯片id +* @param queue_id 队列ID,范围0-127 +* @param p_vm_info VM配置信息 +* +* @return +* @remark 无 +* @see +* @author zab @date 2018/08/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_vm_info_set(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + DPP_DTB_QUEUE_VM_INFO_T *p_vm_info) +{ + ZXIC_UINT32 rc = 0; + DPP_DTB4K_DTB_ENQ_CFG_EPID_V_FUNC_NUM_0_127_T vm_info = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_vm_info); + + vm_info.dbi_en = p_vm_info->dbi_en; + vm_info.queue_en = p_vm_info->queue_en; + vm_info.cfg_epid = p_vm_info->epid; + vm_info.cfg_vector = p_vm_info->vector; + vm_info.cfg_vfunc_num = p_vm_info->vfunc_num; + vm_info.cfg_func_num = p_vm_info->func_num; + vm_info.cfg_vfunc_active = p_vm_info->vfunc_active; + + rc = dpp_reg_write(dev, DTB4K_DTB_ENQ_CFG_EPID_V_FUNC_NUM_0_127r, 0, + queue_id, &vm_info); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取队列VM配置信息 +* @param dev_id оƬid +* @param queue_id 队列ID,范围0-127 +* @param p_vm_info VM配置信息 +* +* @return +* @remark 无 +* @see +* @author zab @date 2018/08/23 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_vm_info_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + DPP_DTB_QUEUE_VM_INFO_T *p_vm_info) +{ + ZXIC_UINT32 rc = 0; + DPP_DTB4K_DTB_ENQ_CFG_EPID_V_FUNC_NUM_0_127_T vm_info = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_vm_info); + + rc = dpp_reg_read(dev, DTB4K_DTB_ENQ_CFG_EPID_V_FUNC_NUM_0_127r, 0, + queue_id, &vm_info); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + p_vm_info->dbi_en = vm_info.dbi_en; + p_vm_info->queue_en = vm_info.queue_en; + p_vm_info->epid = vm_info.cfg_epid; + p_vm_info->vector = vm_info.cfg_vector; + p_vm_info->vfunc_num = vm_info.cfg_vfunc_num; + p_vm_info->func_num = vm_info.cfg_func_num; + p_vm_info->vfunc_active = vm_info.cfg_vfunc_active; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置队列使能 +* @param dev_id 芯片id +* @param queue_id 队列ID,范围0-127 +* @param enable 1:队列使能,0:队列去使能 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2023/09/27 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_enable_set(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 enable) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_DTB_QUEUE_VM_INFO_T vm_info = { 0 }; + + rc = dpp_dtb_queue_vm_info_get(dev, queue_id, &vm_info); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_queue_vm_info_get"); + + vm_info.queue_en = enable; + rc = dpp_dtb_queue_vm_info_set(dev, queue_id, &vm_info); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_queue_vm_info_set"); + + ZXIC_COMM_TRACE_INFO("dtb queue [%d] enable_set [%d] success.\n", + queue_id, enable); + + return rc; +} + +/***********************************************************/ +/** 获取队列使能状态 +* @param dev_id 芯片id +* @param queue_id 队列ID,范围0-127 +* @param enable 1:队列使能,0:队列去使能 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2023/09/27 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_enable_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 *enable) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_DTB_QUEUE_VM_INFO_T vm_info = { 0 }; + + rc = dpp_dtb_queue_vm_info_get(dev, queue_id, &vm_info); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_queue_vm_info_get"); + + *enable = vm_info.queue_en; + + ZXIC_COMM_TRACE_INFO( + "dpp_dtb_queue_enable_get queue %d enable: %d success.\n", + queue_id, *enable); + + return rc; +} + +/***********************************************************/ +/** 配置 dtb 完成中断事件状态 +* @param dev_id 芯片id +* @param queue_id 队列ID,范围0-127 +* @param state 中断事件状态,1-发生中断,0-无中断发生 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/12 +************************************************************/ +ZXIC_UINT32 dpp_dtb_finish_interrupt_event_state_set(DPP_DEV_T *dev, + ZXIC_UINT32 queue_id, + ZXIC_UINT32 state) +{ + ZXIC_UINT32 rc = 0; + ZXIC_UINT32 bit_shift = 0; + ZXIC_UINT32 reg_shift = 0; + ZXIC_UINT32 rd_value = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), state, 0, 1); + + reg_shift = queue_id / 32; + bit_shift = queue_id % 32; + + rc = dpp_reg_read(dev, DTB_DTB_CFG_CFG_FINISH_INT_EVENT0r + reg_shift, + 0, 0, &rd_value); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + ZXIC_COMM_UINT32_WRITE_BITS(rd_value, state, bit_shift, 1); + + rc = dpp_reg_write(dev, DTB_DTB_CFG_CFG_FINISH_INT_EVENT0r + reg_shift, + 0, 0, &rd_value); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 清除 dtb 完成中断事件状态 +* @param dev_id 芯片id +* @param queue_id 队列ID,范围0-127 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/12 +************************************************************/ +ZXIC_UINT32 dpp_dtb_finish_interrupt_event_state_clr(DPP_DEV_T *dev, + ZXIC_UINT32 queue_id) +{ + ZXIC_UINT32 rc = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + + rc = dpp_dtb_finish_interrupt_event_state_set(dev, queue_id, 1); + ZXIC_COMM_CHECK_RC_NO_ASSERT( + rc, "dpp_dtb_finish_interrupt_event_state_set"); + + return rc; +} + +/***********************************************************/ +/** 获取DTB debug模式配置 +* @param dev_id 芯片id +* @param p_debug_mode 调试模式 当为0x5a时,dtb的ram均可读写 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/09 +************************************************************/ +ZXIC_UINT32 dpp_dtb_debug_mode_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_debug_mode) +{ + ZXIC_UINT32 rc = 0; + DPP_DTB_DTB_CFG_CFG_DTB_DEBUG_MODE_EN_T dtb_debug_mode = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_debug_mode); + + rc = dpp_reg_read(dev, DTB_DTB_CFG_CFG_DTB_DEBUG_MODE_ENr, 0, 0, + &dtb_debug_mode); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_debug_mode = dtb_debug_mode.cfg_dtb_debug_mode_en; + + return DPP_OK; +} + +/***********************************************************/ +/** 判断dtb是否在debug模式 +* @param dev_id 芯片的id号 +* +* @return 1:DTB为debug模式; 0:非debug模式 +* @remark 无 +* @see +* @author cbb @date 2023/6/10 +************************************************************/ +ZXIC_UINT32 dpp_dtb_mode_is_debug(DPP_DEV_T *dev) +{ + ZXIC_UINT32 rc = 0; + ZXIC_UINT32 dtb_mode = 0; + + ZXIC_COMM_CHECK_POINT(dev); + rc = dpp_dtb_debug_mode_get(dev, &dtb_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_dtb_debug_mode_get"); + + if (DTB_DEBUG_VALUE == dtb_mode) { + return 1; + } + + return 0; +} + +#if ZXIC_REAL("AXIM_READ_TABLE_DEBUG") + +/***********************************************************/ +/** 读取axi 最近一次读表相关信息 +* @param dev_id 芯片id +* @param p_last_rd_table_addr_h axim最近一次读表高地址 +* @param p_last_rd_table_addr_l axim最近一次读表低地址 +* @param p_last_rd_table_len axim最近一次读表长度 +* @param p_last_rd_table_user axim最近一次读表USER信号 +* @param p_last_rd_table_onload_cnt axim最近一次读表在线计数 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/09 +************************************************************/ +ZXIC_UINT32 dpp_dtb_axi_last_rd_table_info_get( + DPP_DEV_T *dev, ZXIC_UINT32 *p_last_rd_table_addr_h, + ZXIC_UINT32 *p_last_rd_table_addr_l, ZXIC_UINT32 *p_last_rd_table_len, + ZXIC_UINT32 *p_last_rd_table_user, + ZXIC_UINT32 *p_last_rd_table_onload_cnt) +{ + ZXIC_UINT32 rc = 0; + + DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_ADDR_HIGH_T rd_table_addr_high = { + 0 + }; + DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_ADDR_LOW_T rd_table_addr_low = { + 0 + }; + DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_LEN_T rd_table_len = { 0 }; + DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_USER_T rd_table_user = { 0 }; + DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_ONLOAD_CNT_T + rd_table_onload_cnt = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_last_rd_table_addr_h); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_last_rd_table_addr_l); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_last_rd_table_len); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_last_rd_table_user); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_last_rd_table_onload_cnt); + + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_ADDR_HIGHr, 0, + 0, &rd_table_addr_high); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_ADDR_LOWr, 0, + 0, &rd_table_addr_low); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_LENr, 0, 0, + &rd_table_len); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_USERr, 0, 0, + &rd_table_user); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_ONLOAD_CNTr, + 0, 0, &rd_table_onload_cnt); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_last_rd_table_addr_h = + rd_table_addr_high.info_axi_last_rd_table_addr_high; + *p_last_rd_table_addr_l = + rd_table_addr_low.info_axi_last_rd_table_addr_low; + *p_last_rd_table_len = rd_table_len.info_axi_last_rd_table_len; + (*((DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_USER_T *) + p_last_rd_table_user)) + .info_rd_table_user_en = rd_table_user.info_rd_table_user_en; + (*((DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_USER_T *) + p_last_rd_table_user)) + .info_rd_table_epid = rd_table_user.info_rd_table_epid; + (*((DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_USER_T *) + p_last_rd_table_user)) + .info_rd_table_vfunc_num = + rd_table_user.info_rd_table_vfunc_num; + (*((DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_USER_T *) + p_last_rd_table_user)) + .info_rd_table_func_num = rd_table_user.info_rd_table_func_num; + (*((DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_USER_T *) + p_last_rd_table_user)) + .info_rd_table_vfunc_active = + rd_table_user.info_rd_table_vfunc_active; + *p_last_rd_table_onload_cnt = + rd_table_onload_cnt.info_axi_last_rd_table_onload_cnt; + + return DPP_OK; +} + +/***********************************************************/ +/** 读表通道错误次数统计 +* @param dev_id 芯片id +* @param p_axi_rd_table_resp_err_cnt 读表通道返回错误次数 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/09 +************************************************************/ +ZXIC_UINT32 +dpp_dtb_axi_rd_table_resp_err_cnt_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_axi_rd_table_resp_err_cnt) +{ + ZXIC_UINT32 rc = 0; + + DPP_DTB_DTB_CFG_CNT_AXI_RD_TABLE_RESP_ERR_T rd_table_resp_err_cnt = { + 0 + }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_axi_rd_table_resp_err_cnt); + + rc = dpp_reg_read(dev, DTB_DTB_CFG_CNT_AXI_RD_TABLE_RESP_ERRr, 0, 0, + &rd_table_resp_err_cnt); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_axi_rd_table_resp_err_cnt = + rd_table_resp_err_cnt.cnt_axi_rd_table_resp_err; + + return DPP_OK; +} +#endif + +#if ZXIC_REAL("AXIM_READ_PD_DEBUG") +/***********************************************************/ +/** 读取axi 最近一次读PD相关信息 +* @param dev_id 芯片id +* @param p_last_rd_pd_addr_h axim最近一次读PD高地址 +* @param p_last_rd_pd_addr_l axim最近一次读PD低地址 +* @param p_last_rd_pd_len axim最近一次读PD长度 +* @param p_last_rd_pd_user axim最近一次读PD USER信号 +* @param p_last_rd_pd_onload_cnt axim最近一次读PD在线计数 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/09 +************************************************************/ +ZXIC_UINT32 dpp_dtb_axi_last_rd_pd_info_get( + DPP_DEV_T *dev, ZXIC_UINT32 *p_last_rd_pd_addr_h, + ZXIC_UINT32 *p_last_rd_pd_addr_l, ZXIC_UINT32 *p_last_rd_pd_len, + ZXIC_UINT32 *p_last_rd_pd_user, ZXIC_UINT32 *p_last_rd_pd_onload_cnt) +{ + ZXIC_UINT32 rc = 0; + + DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_ADDR_HIGH_T rd_pd_addr_high = { 0 }; + DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_ADDR_LOW_T rd_pd_addr_low = { 0 }; + DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_LEN_T rd_pd_len = { 0 }; + DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_USER_T rd_pd_user = { 0 }; + DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_ONLOAD_CNT_T rd_pd_onload_cnt = { + 0 + }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_last_rd_pd_addr_h); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_last_rd_pd_addr_l); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_last_rd_pd_len); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_last_rd_pd_user); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_last_rd_pd_onload_cnt); + + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_ADDR_HIGHr, 0, 0, + &rd_pd_addr_high); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_ADDR_LOWr, 0, 0, + &rd_pd_addr_low); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_LENr, 0, 0, + &rd_pd_len); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_USERr, 0, 0, + &rd_pd_user); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_ONLOAD_CNTr, 0, + 0, &rd_pd_onload_cnt); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_last_rd_pd_addr_h = rd_pd_addr_high.info_axi_last_rd_pd_addr_high; + *p_last_rd_pd_addr_l = rd_pd_addr_low.info_axi_last_rd_pd_addr_low; + *p_last_rd_pd_len = rd_pd_len.info_axi_last_rd_pd_len; + (*((DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_USER_T *)p_last_rd_pd_user)) + .info_rd_pd_user_en = rd_pd_user.info_rd_pd_user_en; + (*((DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_USER_T *)p_last_rd_pd_user)) + .info_rd_pd_epid = rd_pd_user.info_rd_pd_epid; + (*((DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_USER_T *)p_last_rd_pd_user)) + .info_rd_pd_vfunc_num = rd_pd_user.info_rd_pd_vfunc_num; + (*((DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_USER_T *)p_last_rd_pd_user)) + .info_rd_pd_func_num = rd_pd_user.info_rd_pd_func_num; + (*((DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_USER_T *)p_last_rd_pd_user)) + .info_rd_pd_vfunc_active = rd_pd_user.info_rd_pd_vfunc_active; + *p_last_rd_pd_onload_cnt = + rd_pd_onload_cnt.info_axi_last_rd_pd_onload_cnt; + + return DPP_OK; +} + +/***********************************************************/ +/** 读PD通道错误次数统计 +* @param dev_id 芯片id +* @param p_axi_rd_pd_resp_err_cnt 读PD通道返回错误次数 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/09 +************************************************************/ +ZXIC_UINT32 +dpp_dtb_axi_rd_pd_resp_err_cnt_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_axi_rd_pd_resp_err_cnt) +{ + ZXIC_UINT32 rc = 0; + + DPP_DTB_DTB_CFG_CNT_AXI_RD_PD_RESP_ERR_T rd_pd_resp_err_cnt = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_axi_rd_pd_resp_err_cnt); + + rc = dpp_reg_read(dev, DTB_DTB_CFG_CNT_AXI_RD_PD_RESP_ERRr, 0, 0, + &rd_pd_resp_err_cnt); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_axi_rd_pd_resp_err_cnt = rd_pd_resp_err_cnt.cnt_axi_rd_pd_resp_err; + + return DPP_OK; +} + +#endif + +#if ZXIC_REAL("AXI_WRITE_CTRL_DEBUG") + +/***********************************************************/ +/** 读取axim 最近一次写控制相关信息 +* @param dev_id 芯片id +* @param p_last_wr_ctrl_addr_h axim最近一次写控制高地址 +* @param p_last_wr_ctrl_addr_l axim最近一次写控制低地址 +* @param p_last_wr_ctrl_len axim最近一次写控制长度 +* @param p_last_wr_ctrl_user axim最近一次写控制 USER信号 +* @param p_last_wr_ctrl_onload_cnt axim最近一次写控制在线计数 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/09 +************************************************************/ +ZXIC_UINT32 dpp_dtb_axi_last_wr_ctrl_info_get( + DPP_DEV_T *dev, ZXIC_UINT32 *p_last_wr_ctrl_addr_h, + ZXIC_UINT32 *p_last_wr_ctrl_addr_l, ZXIC_UINT32 *p_last_wr_ctrl_len, + ZXIC_UINT32 *p_last_wr_ctrl_user, + ZXIC_UINT32 *p_last_wr_ctrl_onload_cnt) +{ + ZXIC_UINT32 rc = 0; + + DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_ADDR_HIGH_T wr_ctrl_addr_high = { + 0 + }; + DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_ADDR_LOW_T wr_ctrl_addr_low = { + 0 + }; + DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_LEN_T wr_ctrl_len = { 0 }; + DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_USER_T wr_ctrl_user = { 0 }; + DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_ONLOAD_CNT_T wr_ctrl_onload_cnt = { + 0 + }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_last_wr_ctrl_addr_h); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_last_wr_ctrl_addr_l); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_last_wr_ctrl_len); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_last_wr_ctrl_user); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_last_wr_ctrl_onload_cnt); + + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_ADDR_HIGHr, 0, + 0, &wr_ctrl_addr_high); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_ADDR_LOWr, 0, + 0, &wr_ctrl_addr_low); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_LENr, 0, 0, + &wr_ctrl_len); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_USERr, 0, 0, + &wr_ctrl_user); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_ONLOAD_CNTr, 0, + 0, &wr_ctrl_onload_cnt); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_last_wr_ctrl_addr_h = + wr_ctrl_addr_high.info_axi_last_wr_ctrl_addr_high; + *p_last_wr_ctrl_addr_l = + wr_ctrl_addr_low.info_axi_last_wr_ctrl_addr_low; + *p_last_wr_ctrl_len = wr_ctrl_len.info_axi_last_wr_ctrl_len; + (*((DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_USER_T *)p_last_wr_ctrl_user)) + .info_wr_ctrl_user_en = wr_ctrl_user.info_wr_ctrl_user_en; + (*((DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_USER_T *)p_last_wr_ctrl_user)) + .info_wr_ctrl_epid = wr_ctrl_user.info_wr_ctrl_epid; + (*((DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_USER_T *)p_last_wr_ctrl_user)) + .info_wr_ctrl_vfunc_num = wr_ctrl_user.info_wr_ctrl_vfunc_num; + (*((DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_USER_T *)p_last_wr_ctrl_user)) + .info_wr_ctrl_func_num = wr_ctrl_user.info_wr_ctrl_func_num; + (*((DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_USER_T *)p_last_wr_ctrl_user)) + .info_wr_ctrl_vfunc_active = + wr_ctrl_user.info_wr_ctrl_vfunc_active; + *p_last_wr_ctrl_onload_cnt = + wr_ctrl_onload_cnt.info_axi_last_wr_ctrl_onload_cnt; + + return DPP_OK; +} + +/***********************************************************/ +/** 获取写控制通道错误次数统计 +* @param dev_id 芯片id +* @param p_axi_wr_ctrl_resp_err_cnt 写控制通道返回错误次数 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/09 +************************************************************/ +ZXIC_UINT32 +dpp_dtb_axi_wr_ctrl_resp_err_cnt_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_axi_wr_ctrl_resp_err_cnt) +{ + ZXIC_UINT32 rc = 0; + + DPP_DTB_DTB_CFG_CNT_AXI_WR_CTRL_RESP_ERR_T wr_ctrl_resp_err_cnt = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_axi_wr_ctrl_resp_err_cnt); + + rc = dpp_reg_read(dev, DTB_DTB_CFG_CNT_AXI_WR_CTRL_RESP_ERRr, 0, 0, + &wr_ctrl_resp_err_cnt); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_axi_wr_ctrl_resp_err_cnt = + wr_ctrl_resp_err_cnt.cnt_axi_wr_ctrl_resp_err; + + return DPP_OK; +} + +#endif + +#if ZXIC_REAL("AXI_WRITE_DDR_DEBUG") +/***********************************************************/ +/** 读取axim 最近一次写DDR相关信息 +* @param dev_id 芯片id +* @param p_last_wr_ddr_addr_h axim最近一次写控制高地址 +* @param p_last_wr_ddr_addr_l axim最近一次写控制低地址 +* @param p_last_wr_ddr_len axim最近一次写控制长度 +* @param p_last_wr_ddr_user axim最近一次写控制 USER信号 +* @param p_last_wr_ddr_onload_cnt axim最近一次写控制在线计数 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/09 +************************************************************/ +ZXIC_UINT32 dpp_dtb_axi_last_wr_ddr_info_get( + DPP_DEV_T *dev, ZXIC_UINT32 *p_last_wr_ddr_addr_h, + ZXIC_UINT32 *p_last_wr_ddr_addr_l, ZXIC_UINT32 *p_last_wr_ddr_len, + ZXIC_UINT32 *p_last_wr_ddr_user, ZXIC_UINT32 *p_last_wr_ddr_onload_cnt) +{ + ZXIC_UINT32 rc = 0; + + DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_ADDR_HIGH_T wr_ddr_addr_high = { + 0 + }; + DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_ADDR_LOW_T wr_ddr_addr_low = { 0 }; + DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_LEN_T wr_ddr_len = { 0 }; + DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_USER_T wr_ddr_user = { 0 }; + DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_ONLOAD_CNT_T wr_ddr_onload_cnt = { + 0 + }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_last_wr_ddr_addr_h); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_last_wr_ddr_addr_l); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_last_wr_ddr_len); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_last_wr_ddr_user); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_last_wr_ddr_onload_cnt); + + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_ADDR_HIGHr, 0, + 0, &wr_ddr_addr_high); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_ADDR_LOWr, 0, 0, + &wr_ddr_addr_low); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_LENr, 0, 0, + &wr_ddr_len); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_USERr, 0, 0, + &wr_ddr_user); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_ONLOAD_CNTr, 0, + 0, &wr_ddr_onload_cnt); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_last_wr_ddr_addr_h = wr_ddr_addr_high.info_axi_last_wr_ddr_addr_high; + *p_last_wr_ddr_addr_l = wr_ddr_addr_low.info_axi_last_wr_ddr_addr_low; + *p_last_wr_ddr_len = wr_ddr_len.info_axi_last_wr_ddr_len; + (*((DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_USER_T *)p_last_wr_ddr_user)) + .info_wr_ddr_user_en = wr_ddr_user.info_wr_ddr_user_en; + (*((DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_USER_T *)p_last_wr_ddr_user)) + .info_wr_ddr_epid = wr_ddr_user.info_wr_ddr_epid; + (*((DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_USER_T *)p_last_wr_ddr_user)) + .info_wr_ddr_vfunc_num = wr_ddr_user.info_wr_ddr_vfunc_num; + (*((DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_USER_T *)p_last_wr_ddr_user)) + .info_wr_ddr_func_num = wr_ddr_user.info_wr_ddr_func_num; + (*((DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_USER_T *)p_last_wr_ddr_user)) + .info_wr_ddr_vfunc_active = + wr_ddr_user.info_wr_ddr_vfunc_active; + *p_last_wr_ddr_onload_cnt = + wr_ddr_onload_cnt.info_axi_last_wr_ddr_onload_cnt; + + return DPP_OK; +} + +/***********************************************************/ +/** 获取写DDR通道错误次数统计 +* @param dev_id 芯片id +* @param p_axi_wr_ddr_resp_err_cnt 写DDR通道返回错误次数 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/09 +************************************************************/ +ZXIC_UINT32 +dpp_dtb_axi_wr_ddr_resp_err_cnt_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_axi_wr_ddr_resp_err_cnt) +{ + ZXIC_UINT32 rc = 0; + + DPP_DTB_DTB_CFG_CNT_AXI_WR_DDR_RESP_ERR_T wr_ddr_resp_err_cnt = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_axi_wr_ddr_resp_err_cnt); + + rc = dpp_reg_read(dev, DTB_DTB_CFG_CNT_AXI_WR_DDR_RESP_ERRr, 0, 0, + &wr_ddr_resp_err_cnt); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_axi_wr_ddr_resp_err_cnt = + wr_ddr_resp_err_cnt.cnt_axi_wr_ddr_resp_err; + + return DPP_OK; +} + +#endif + +#if ZXIC_REAL("AXIM_WRITE_FINISH_DEBUG") + +/***********************************************************/ +/** 读取axim 最近一次写完成相关信息 +* @param dev_id 芯片id +* @param p_last_wr_fin_addr_h axim最近一次写控制高地址 +* @param p_last_wr_fin_addr_l axim最近一次写控制低地址 +* @param p_last_wr_fin_len axim最近一次写控制长度 +* @param p_last_wr_fin_user axim最近一次写控制 USER信号 +* @param p_last_wr_fin_onload_cnt axim最近一次写控制在线计数 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/09 +************************************************************/ +ZXIC_UINT32 dpp_dtb_axi_last_wr_fin_info_get( + DPP_DEV_T *dev, ZXIC_UINT32 *p_last_wr_fin_addr_h, + ZXIC_UINT32 *p_last_wr_fin_addr_l, ZXIC_UINT32 *p_last_wr_fin_len, + ZXIC_UINT32 *p_last_wr_fin_user, ZXIC_UINT32 *p_last_wr_fin_onload_cnt) +{ + ZXIC_UINT32 rc = 0; + + DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_ADDR_HIGH_T wr_fin_addr_high = { + 0 + }; + DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_ADDR_LOW_T wr_fin_addr_low = { 0 }; + DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_LEN_T wr_fin_len = { 0 }; + DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_USER_T wr_fin_user = { 0 }; + DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_ONLOAD_CNT_T wr_fin_onload_cnt = { + 0 + }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_last_wr_fin_addr_h); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_last_wr_fin_addr_l); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_last_wr_fin_len); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_last_wr_fin_user); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_last_wr_fin_onload_cnt); + + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_ADDR_HIGHr, 0, + 0, &wr_fin_addr_high); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_ADDR_LOWr, 0, 0, + &wr_fin_addr_low); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_LENr, 0, 0, + &wr_fin_len); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_USERr, 0, 0, + &wr_fin_user); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_ONLOAD_CNTr, 0, + 0, &wr_fin_onload_cnt); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_last_wr_fin_addr_h = wr_fin_addr_high.info_axi_last_wr_fin_addr_high; + *p_last_wr_fin_addr_l = wr_fin_addr_low.info_axi_last_wr_fin_addr_low; + *p_last_wr_fin_len = wr_fin_len.info_axi_last_wr_fin_len; + + (*((DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_USER_T *)p_last_wr_fin_user)) + .info_wr_fin_user_en = wr_fin_user.info_wr_fin_user_en; + (*((DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_USER_T *)p_last_wr_fin_user)) + .info_wr_fin_epid = wr_fin_user.info_wr_fin_epid; + (*((DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_USER_T *)p_last_wr_fin_user)) + .info_wr_fin_vfunc_num = wr_fin_user.info_wr_fin_vfunc_num; + (*((DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_USER_T *)p_last_wr_fin_user)) + .info_wr_fin_func_num = wr_fin_user.info_wr_fin_func_num; + (*((DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_USER_T *)p_last_wr_fin_user)) + .info_wr_fin_vfunc_active = + wr_fin_user.info_wr_fin_vfunc_active; + *p_last_wr_fin_onload_cnt = + wr_fin_onload_cnt.info_axi_last_wr_fin_onload_cnt; + + return DPP_OK; +} + +/***********************************************************/ +/** 获取写完成通道错误次数统计 +* @param dev_id 芯片id +* @param p_axi_wr_fin_resp_err_cnt 写完成通道返回错误次数 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/09 +************************************************************/ +ZXIC_UINT32 +dpp_dtb_axi_wr_fin_resp_err_cnt_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_axi_wr_fin_resp_err_cnt) +{ + ZXIC_UINT32 rc = 0; + + DPP_DTB_DTB_CFG_CNT_AXI_WR_FIN_RESP_ERR_T wr_fin_resp_err_cnt = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_axi_wr_fin_resp_err_cnt); + + rc = dpp_reg_read(dev, DTB_DTB_CFG_CNT_AXI_WR_FIN_RESP_ERRr, 0, 0, + &wr_fin_resp_err_cnt); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_axi_wr_fin_resp_err_cnt = + wr_fin_resp_err_cnt.cnt_axi_wr_fin_resp_err; + + return DPP_OK; +} + +#endif + +#if ZXIC_REAL("DTB_STATE") + +/***********************************************************/ +/** 读取 DTB 各通道状态机 +* @param dev_id 芯片id +* @param p_wr_ctrl_state_info 写控制状态机 +* @param p_rd_table_state_info 读表状态机 +* @param p_rd_pd_state_info 读描述符数据状态机 +* @param p_wr_ddr_state_info 写数据到ddr的状态机 +* @param p_wr_fin_state_info 写结束标志的状态机 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/09 +************************************************************/ +ZXIC_UINT32 dpp_dtb_state_info_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_wr_ctrl_state_info, + ZXIC_UINT32 *p_rd_table_state_info, + ZXIC_UINT32 *p_rd_pd_state_info, + ZXIC_UINT32 *p_wr_ddr_state_info, + ZXIC_UINT32 *p_dump_cmd_state_info) +{ + ZXIC_UINT32 rc = 0; + + DPP_DTB_DTB_CFG_INFO_WR_CTRL_STATE_T wr_ctrl_state = { 0 }; + DPP_DTB_DTB_CFG_INFO_RD_TABLE_STATE_T rd_table_state = { 0 }; + DPP_DTB_DTB_CFG_INFO_RD_PD_STATE_T rd_pd_state = { 0 }; + DPP_DTB_DTB_CFG_INFO_WR_DDR_STATE_T wr_ddr_state = { 0 }; + DPP_DTB_DTB_CFG_INFO_DUMP_CMD_STATE_T dump_cmd_state = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_wr_ctrl_state_info); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_rd_table_state_info); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_rd_pd_state_info); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_wr_ddr_state_info); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_dump_cmd_state_info); + + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_WR_CTRL_STATEr, 0, 0, + &wr_ctrl_state); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_RD_TABLE_STATEr, 0, 0, + &rd_table_state); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_RD_PD_STATEr, 0, 0, + &rd_pd_state); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_WR_DDR_STATEr, 0, 0, + &wr_ddr_state); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + rc = dpp_reg_read(dev, DTB_DTB_CFG_INFO_DUMP_CMD_STATEr, 0, 0, + &dump_cmd_state); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_wr_ctrl_state_info = wr_ctrl_state.info_wr_ctrl_state; + *p_rd_table_state_info = rd_table_state.info_rd_table_state; + *p_rd_pd_state_info = rd_pd_state.info_rd_pd_state; + *p_wr_ddr_state_info = wr_ddr_state.info_wr_ddr_state; + *p_dump_cmd_state_info = dump_cmd_state.info_dump_cmd_state; + + return DPP_OK; +} + +#endif + +/***********************************************************/ +/** 各通道错误统计打印 +* @param dev_id 芯片的id号 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/11 +************************************************************/ +ZXIC_UINT32 diag_dpp_dtb_channels_axi_resp_err_cnt_prt(DPP_DEV_T *dev) +{ + ZXIC_UINT32 rc = 0; + ZXIC_UINT32 rd_value = 0; + + ZXIC_COMM_PRINT( + "\n --------------- DTB CHANNEL ERR STAT INFO --------------- \n"); + + ZXIC_COMM_PRINT("********** dtb down table err cnt **********\n"); + + rc = dpp_dtb_axi_rd_table_resp_err_cnt_get(dev, &rd_value); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT( + DEV_ID(dev), rc, "dpp_dtb_axi_rd_table_resp_err_cnt_get"); + + ZXIC_COMM_DBGCNT32_PRINT("cnt_axi_rd_table_resp_err", rd_value); + + rc = dpp_dtb_axi_wr_ctrl_resp_err_cnt_get(dev, &rd_value); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT( + DEV_ID(dev), rc, "dpp_dtb_axi_wr_ctrl_resp_err_cnt_get"); + + ZXIC_COMM_DBGCNT32_PRINT("cnt_axi_wr_ctrl_resp_err", rd_value); + + ZXIC_COMM_PRINT("********** dtb dump table err cnt **********\n"); + + rc = dpp_dtb_axi_rd_pd_resp_err_cnt_get(dev, &rd_value); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_dtb_axi_rd_pd_resp_err_cnt_get"); + + ZXIC_COMM_DBGCNT32_PRINT("cnt_axi_rd_pd_resp_err", rd_value); + + rc = dpp_dtb_axi_wr_ddr_resp_err_cnt_get(dev, &rd_value); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_dtb_axi_wr_ddr_resp_err_cnt_get"); + + ZXIC_COMM_DBGCNT32_PRINT("cnt_axi_wr_ddr_resp_err", rd_value); + + rc = dpp_dtb_axi_wr_fin_resp_err_cnt_get(dev, &rd_value); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_dtb_axi_wr_fin_resp_err_cnt_get"); + + ZXIC_COMM_DBGCNT32_PRINT("cnt_axi_wr_fin_resp_err", rd_value); + + return DPP_OK; +} + +/***********************************************************/ +/** AXIM最近一次操作信息记录打印 +* @param dev_id 芯片的id号 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/11 +************************************************************/ +ZXIC_UINT32 diag_dpp_dtb_axi_last_operate_info_prt(DPP_DEV_T *dev) +{ + ZXIC_UINT32 rc = 0; + ZXIC_UINT32 addr_high = 0; + ZXIC_UINT32 addr_low = 0; + ZXIC_UINT32 len = 0; + ZXIC_UINT32 onload_cnt = 0; + + DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_USER_T rd_table_user = { 0 }; + DPP_DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_USER_T rd_pd_user = { 0 }; + DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_USER_T wr_ctrl_user = { 0 }; + DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_USER_T wr_ddr_user = { 0 }; + DPP_DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_USER_T wr_fin_user = { 0 }; + ZXIC_COMM_PRINT( + "\n------------------ DTB DOWN TABLE LAST INFO ------------------\n"); + rc = dpp_dtb_axi_last_rd_table_info_get(dev, &addr_high, &addr_low, + &len, + (ZXIC_UINT32 *)&rd_table_user, + &onload_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_dtb_axi_last_rd_table_info_get"); + ZXIC_COMM_PRINT("**********axim last read table info***********\n"); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_rd_table_addr_high", addr_high); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_rd_table_addr_low", addr_low); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_rd_table_len", len); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_rd_table_user_en", + rd_table_user.info_rd_table_user_en); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_rd_table_epid", + rd_table_user.info_rd_table_epid); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_rd_table_vfunc_num", + rd_table_user.info_rd_table_vfunc_num); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_rd_table_func_num", + rd_table_user.info_rd_table_func_num); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_rd_table_vfunc_active", + rd_table_user.info_rd_table_vfunc_active); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_rd_table_onload_cnt", + onload_cnt); + + rc = dpp_dtb_axi_last_wr_ctrl_info_get(dev, &addr_high, &addr_low, &len, + (ZXIC_UINT32 *)&wr_ctrl_user, + &onload_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_dtb_axi_last_wr_ctrl_info_get"); + ZXIC_COMM_PRINT("**********axim last write ctrl info***********\n"); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_wr_ctrl_addr_high", addr_high); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_wr_ctrl_addr_low", addr_low); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_wr_ctrl_len", len); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_wr_ctrl_user_en", + wr_ctrl_user.info_wr_ctrl_user_en); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_wr_ctrl_epid", + wr_ctrl_user.info_wr_ctrl_epid); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_wr_ctrl_vfunc_num", + wr_ctrl_user.info_wr_ctrl_vfunc_num); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_wr_ctrl_func_num", + wr_ctrl_user.info_wr_ctrl_func_num); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_wr_ctrl_vfunc_active", + wr_ctrl_user.info_wr_ctrl_vfunc_active); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_wr_ctrl_onload_cnt", + onload_cnt); + + ZXIC_COMM_PRINT( + "\n------------------ DTB DUMP TABLE LAST INFO ------------------\n"); + rc = dpp_dtb_axi_last_rd_pd_info_get(dev, &addr_high, &addr_low, &len, + (ZXIC_UINT32 *)&rd_pd_user, + &onload_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_dtb_axi_last_rd_pd_info_get"); + ZXIC_COMM_PRINT("**********axim last read pd info***********\n"); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_rd_pd_addr_high", addr_high); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_rd_pd_addr_low", addr_low); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_rd_pd_len", len); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_rd_pd_user_en", + rd_pd_user.info_rd_pd_user_en); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_rd_pd_epid", + rd_pd_user.info_rd_pd_epid); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_rd_pd_vfunc_num", + rd_pd_user.info_rd_pd_vfunc_num); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_rd_pd_func_num", + rd_pd_user.info_rd_pd_func_num); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_rd_pd_vfunc_active", + rd_pd_user.info_rd_pd_vfunc_active); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_rd_pd_onload_cnt", onload_cnt); + + rc = dpp_dtb_axi_last_wr_ddr_info_get(dev, &addr_high, &addr_low, &len, + (ZXIC_UINT32 *)&wr_ddr_user, + &onload_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_dtb_axi_last_wr_ddr_info_get"); + ZXIC_COMM_PRINT("**********axim last write ddr info***********\n"); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_wr_ddr_addr_high", addr_high); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_wr_ddr_addr_low", addr_low); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_wr_ddr_len", len); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_wr_ddr_user_en", + wr_ddr_user.info_wr_ddr_user_en); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_wr_ddr_epid", + wr_ddr_user.info_wr_ddr_epid); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_wr_ddr_vfunc_num", + wr_ddr_user.info_wr_ddr_vfunc_num); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_wr_ddr_func_num", + wr_ddr_user.info_wr_ddr_func_num); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_wr_ddr_vfunc_active", + wr_ddr_user.info_wr_ddr_vfunc_active); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_wr_ddr_onload_cnt", onload_cnt); + + rc = dpp_dtb_axi_last_wr_fin_info_get(dev, &addr_high, &addr_low, &len, + (ZXIC_UINT32 *)&wr_fin_user, + &onload_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_dtb_axi_wr_fin_resp_err_cnt_get"); + ZXIC_COMM_PRINT("**********axim last write final info***********\n"); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_wr_fin_addr_high", addr_high); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_wr_fin_addr_low", addr_low); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_wr_fin_len", len); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_wr_fin_user_en", + wr_fin_user.info_wr_fin_user_en); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_wr_fin_epid", + wr_fin_user.info_wr_fin_epid); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_wr_fin_vfunc_num", + wr_fin_user.info_wr_fin_vfunc_num); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_wr_fin_func_num", + wr_fin_user.info_wr_fin_func_num); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_wr_fin_vfunc_active", + wr_fin_user.info_wr_fin_vfunc_active); + ZXIC_COMM_DBGCNT32_PRINT("info_axi_last_wr_fin_onload_cnt", onload_cnt); + + return DPP_OK; +} + +/***********************************************************/ +/** DTB 各通道状态机信息获取 +* @param dev_id 芯片的id号 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/11/11 +************************************************************/ +ZXIC_UINT32 diag_dpp_dtb_channels_state_info_prt(DPP_DEV_T *dev) +{ + ZXIC_UINT32 rc = 0; + + ZXIC_UINT32 wr_ctrl_state = 0; + ZXIC_UINT32 rd_table_state = 0; + ZXIC_UINT32 rd_pd_state = 0; + ZXIC_UINT32 wr_ddr_state = 0; + ZXIC_UINT32 dump_cmd_state = 0; + + rc = dpp_dtb_state_info_get(dev, &wr_ctrl_state, &rd_table_state, + &rd_pd_state, &wr_ddr_state, + &dump_cmd_state); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_dtb_stat_info_get"); + + ZXIC_COMM_PRINT( + "\n-------------- DTB CHANNEL STATE INFO ----------------- \n"); + ZXIC_COMM_PRINT( + "----- REG ------------------- CURRENT ------- CORRECT ----- \n"); + ZXIC_COMM_PRINT( + "info_wr_ctrl_state 0x%08x 0x00000005 \n", + wr_ctrl_state); + ZXIC_COMM_PRINT( + "info_rd_table_state 0x%08x 0x00080004 \n", + rd_table_state); + ZXIC_COMM_PRINT( + "info_rd_pd_state 0x%08x 0x00020020 \n", + rd_pd_state); + ZXIC_COMM_PRINT( + "info_wr_ddr_state 0x%08x 0x00000001 \n", + wr_ddr_state); + ZXIC_COMM_PRINT( + "info_dump_cmd_state 0x%08x 0x0000002a \n", + dump_cmd_state); + + return DPP_OK; +} + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/nppu/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/nppu/Kbuild.include new file mode 100644 index 000000000000..b1f4bca31130 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/nppu/Kbuild.include @@ -0,0 +1,2 @@ +cur_dir := en_np/sdk/source/dev/module/nppu/ +src_files += $(addprefix $(cur_dir),$(notdir $(wildcard $(dinghai_root)/$(cur_dir)*.c))) diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/nppu/dpp_pbu.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/nppu/dpp_pbu.c new file mode 100644 index 000000000000..0ee6311ba68c --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/nppu/dpp_pbu.c @@ -0,0 +1,292 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_pbu.c +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : djf +* 完成日期 : 2014/04/14 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +/****************************************************************************** + * START: 头文件 * + *****************************************************************************/ +#include "zxic_common.h" +#include "dpp_type_api.h" +#include "dpp_reg.h" +#include "dpp_pbu_api.h" +#include "dpp_pbu.h" +#include "dpp_dev.h" +/****************************************************************************** + * END: 头文件 * + *****************************************************************************/ + +/****************************************************************************** + * START: 常量定义 * + *****************************************************************************/ + +/****************************************************************************** + * END: 常量定义 * + *****************************************************************************/ +#define MF_MAX_BIT (4095) +#define MF_START_BIT (2047) +#define PKT_MAX_BIT (2047) +#define CAP_MAX_NUM (64) +#define PKT_BUFF_NUM (128) +#define PKT_BUF_SIZE (32) + +/***********************************************************/ +/** 配置端口的阈值 +* @param dev_id 设备编号 +* @param port_id 端口号 +* @param p_para 端口阈值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/07/09 +************************************************************/ +DPP_STATUS dpp_pbu_port_th_set(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + DPP_PBU_PORT_TH_PARA_T *p_para) +{ + DPP_STATUS rc = 0; + ZXIC_UINT32 *p_data = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), port_id, 0, + DPP_PBU_PORT_NUM - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_para); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_para->lif_th, 0, + DPP_PBU_PORT_TH_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_para->lif_prv, 0, + DPP_PBU_PORT_TH_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_para->idma_prv, 0, + DPP_PBU_PORT_TH_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_para->idma_th_cos7, 0, + DPP_PBU_PORT_TH_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_para->idma_th_cos6, 0, + p_para->idma_th_cos7); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_para->idma_th_cos5, 0, + p_para->idma_th_cos6); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_para->idma_th_cos4, 0, + p_para->idma_th_cos5); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_para->idma_th_cos3, 0, + p_para->idma_th_cos4); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_para->idma_th_cos2, 0, + p_para->idma_th_cos3); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_para->idma_th_cos1, 0, + p_para->idma_th_cos2); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_para->idma_th_cos0, 0, + p_para->idma_th_cos1); + + p_data = (ZXIC_UINT32 *)p_para; + + rc = dpp_reg_write(dev, NPPU_PBU_CFG_MEMID_0_PBU_FC_IDMATH_RAMr, 0, + port_id, p_data); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取端口的阈值 +* @param dev_id 设备编号 +* @param port_id 端口号 +* @param p_para 端口阈值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/07/09 +************************************************************/ +DPP_STATUS dpp_pbu_port_th_get(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + DPP_PBU_PORT_TH_PARA_T *p_para) +{ + DPP_STATUS rc = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), port_id, 0, + DPP_PBU_PORT_NUM - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_para); + + rc = dpp_reg_read(dev, NPPU_PBU_CFG_MEMID_0_PBU_FC_IDMATH_RAMr, 0, + port_id, (ZXIC_UINT32 *)p_para); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置指定端口按cos优先级起pfc流控的优先级流控指针阈值,仅对lif0的48个通道有效 +* @param dev_id 设备编号 +* @param port_id 端口号 +* @param p_para cos阈值,要求高优先级的阈值不小于低优先级的阈值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/07/09 +************************************************************/ +DPP_STATUS dpp_pbu_port_cos_th_set(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + DPP_PBU_PORT_COS_TH_PARA_T *p_para) +{ + DPP_STATUS rc = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 tmp_index = 0; + ZXIC_UINT32 *p_data = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), port_id, 0, + DPP_PBU_PORT_NUM - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_para); + + if (port_id < DPP_PBU_LIF1_PORT_NUM) { + tmp_index = port_id; + + } else if (port_id == DPP_PBU_TM_LOOP_PORT_NUM) { + tmp_index = 56; + + } else { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "dpp_pbu_port_cos_th_set: please check input port:%d !!!!!!\n", + port_id); + return DPP_OK; + } + + for (i = 0; i < DPP_PBU_COS_NUM; i++) { + if (0 == i) { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), + p_para->cos_th[i], 0, + DPP_PBU_PORT_COS_MAX_TH); + } else { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), + p_para->cos_th[i], + p_para->cos_th[i - 1], + DPP_PBU_PORT_COS_MAX_TH); + } + } + + p_data = (ZXIC_UINT32 *)p_para; + + rc = dpp_reg_write(dev, NPPU_PBU_CFG_MEMID_1_PBU_FC_MACTH_RAMr, 0, + tmp_index, p_data); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取指定端口中各cos的优先级流控指针阈值,仅对lif0的48个通道有效 +* @param dev_id 设备编号 +* @param port_id 端口号 +* @param p_para cos阈值,要求高优先级的阈值不小于低优先级的阈值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/07/09 +************************************************************/ +DPP_STATUS dpp_pbu_port_cos_th_get(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + DPP_PBU_PORT_COS_TH_PARA_T *p_para) +{ + DPP_STATUS rc = 0; + ZXIC_UINT32 tmp_index = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), port_id, 0, + DPP_PBU_PORT_NUM - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_para); + + if (port_id < DPP_PBU_LIF1_PORT_NUM) { + tmp_index = port_id; + + } else if (port_id == DPP_PBU_TM_LOOP_PORT_NUM) { + tmp_index = 56; + + } else { + return DPP_OK; + } + + rc = dpp_reg_read(dev, NPPU_PBU_CFG_MEMID_1_PBU_FC_MACTH_RAMr, 0, + tmp_index, (ZXIC_UINT32 *)p_para); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + return DPP_OK; +} + +/***********************************************************/ +/** +* @param dev_id 设备编号 +* @param delayTime 延时时间 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/04/09 +************************************************************/ +DPP_STATUS dpp_pbu_pfc_delay_time_set(DPP_DEV_T *dev, ZXIC_UINT64 delayTime) +{ + DPP_STATUS rc = DPP_OK; + DPP_NPPU_PBU_CFG_CFG_PFC_RDY_HIGH_TIME_T pbuDelayHighTime = { 0 }; + DPP_NPPU_PBU_CFG_CFG_PFC_RDY_LOW_TIME_T pbuDelayLowTime = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + + pbuDelayHighTime.cfg_pfc_rdy_high_time = (ZXIC_UINT32)(delayTime >> 32); + pbuDelayLowTime.cfg_pfc_rdy_low_time = + (ZXIC_UINT32)(delayTime & 0x00000000FFFFFFFF); + + rc = dpp_reg_write(dev, NPPU_PBU_CFG_CFG_PFC_RDY_HIGH_TIMEr, 0, 0, + &pbuDelayHighTime); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev, NPPU_PBU_CFG_CFG_PFC_RDY_LOW_TIMEr, 0, 0, + &pbuDelayLowTime); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** +* @param dev_id 设备编号 +* @param delayTime 延时时间 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/04/09 +************************************************************/ +DPP_STATUS dpp_pbu_pfc_delay_time_get(DPP_DEV_T *dev, ZXIC_UINT64 *delayTime) +{ + DPP_STATUS rc = DPP_OK; + DPP_NPPU_PBU_CFG_CFG_PFC_RDY_HIGH_TIME_T pbuDelayHighTime = { 0 }; + DPP_NPPU_PBU_CFG_CFG_PFC_RDY_LOW_TIME_T pbuDelayLowTime = { 0 }; + + ZXIC_COMM_CHECK_POINT(delayTime); + ZXIC_COMM_CHECK_POINT(dev); + + rc = dpp_reg_read(dev, NPPU_PBU_CFG_CFG_PFC_RDY_HIGH_TIMEr, 0, 0, + &pbuDelayHighTime); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev, NPPU_PBU_CFG_CFG_PFC_RDY_LOW_TIMEr, 0, 0, + &pbuDelayLowTime); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *delayTime = + (((ZXIC_UINT64)pbuDelayHighTime.cfg_pfc_rdy_high_time) << 32) | + (ZXIC_UINT64)pbuDelayLowTime.cfg_pfc_rdy_low_time; + + return DPP_OK; +} \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/nppu/dpp_pktrx_cfg.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/nppu/dpp_pktrx_cfg.c new file mode 100644 index 000000000000..3be9079488e4 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/nppu/dpp_pktrx_cfg.c @@ -0,0 +1,253 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_pktrx_cfg.c +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : zzh +* 完成日期 : 2015/02/06 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#include "zxic_common.h" +#include "dpp_type_api.h" +#include "dpp_reg.h" +#include "dpp_pktrx_api.h" +#include "dpp_dev.h" +#include "dpp_agent_channel.h" + +#if ZXIC_REAL("mcode glb cfg ") + +ZXIC_UINT32 dpp_pktrx_mcode_glb_cfg_set_0(DPP_DEV_T *dev, + ZXIC_UINT32 glb_cfg_data_0) +{ + ZXIC_UINT32 rc = 0; + + rc = dpp_reg_write(dev, NPPU_PKTRX_CFG_PKTRX_GLBAL_CFG_0r, 0, 0, + &glb_cfg_data_0); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_pktrx_mcode_glb_cfg_set_1(DPP_DEV_T *dev, + ZXIC_UINT32 glb_cfg_data_1) +{ + ZXIC_UINT32 rc = 0; + + rc = dpp_reg_write(dev, NPPU_PKTRX_CFG_PKTRX_GLBAL_CFG_1r, 0, 0, + &glb_cfg_data_1); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_pktrx_mcode_glb_cfg_set_2(DPP_DEV_T *dev, + ZXIC_UINT32 glb_cfg_data_2) +{ + ZXIC_UINT32 rc = 0; + + rc = dpp_reg_write(dev, NPPU_PKTRX_CFG_PKTRX_GLBAL_CFG_2r, 0, 0, + &glb_cfg_data_2); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_pktrx_mcode_glb_cfg_set_3(DPP_DEV_T *dev, + ZXIC_UINT32 glb_cfg_data_3) +{ + ZXIC_UINT32 rc = 0; + + rc = dpp_reg_write(dev, NPPU_PKTRX_CFG_PKTRX_GLBAL_CFG_3r, 0, 0, + &glb_cfg_data_3); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} +/***********************************************************/ +/** +* @param dev_id +* @param p_mcode_glb_cfg +* +* @return +* @remark 无 +* @see +* @author czd @date 2016/04/27 +************************************************************/ +ZXIC_UINT32 dpp_pktrx_mcode_glb_cfg_get_0(DPP_DEV_T *dev, + ZXIC_UINT32 *p_glb_cfg_data_0) +{ + ZXIC_UINT32 rc = 0; + + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_glb_cfg_data_0); + + rc = dpp_reg_read(dev, NPPU_PKTRX_CFG_PKTRX_GLBAL_CFG_0r, 0, 0, + p_glb_cfg_data_0); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_pktrx_mcode_glb_cfg_get_1(DPP_DEV_T *dev, + ZXIC_UINT32 *p_glb_cfg_data_1) +{ + ZXIC_UINT32 rc = 0; + + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_glb_cfg_data_1); + + rc = dpp_reg_read(dev, NPPU_PKTRX_CFG_PKTRX_GLBAL_CFG_1r, 0, 0, + p_glb_cfg_data_1); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_pktrx_mcode_glb_cfg_get_2(DPP_DEV_T *dev, + ZXIC_UINT32 *p_glb_cfg_data_2) +{ + ZXIC_UINT32 rc = 0; + + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_glb_cfg_data_2); + + rc = dpp_reg_read(dev, NPPU_PKTRX_CFG_PKTRX_GLBAL_CFG_2r, 0, 0, + p_glb_cfg_data_2); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_pktrx_mcode_glb_cfg_get_3(DPP_DEV_T *dev, + ZXIC_UINT32 *p_glb_cfg_data_3) +{ + ZXIC_UINT32 rc = 0; + + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_glb_cfg_data_3); + + rc = dpp_reg_read(dev, NPPU_PKTRX_CFG_PKTRX_GLBAL_CFG_3r, 0, 0, + p_glb_cfg_data_3); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_pktrx_mcode_glb_cfg_write_0(DPP_DEV_T *dev, + ZXIC_UINT32 start_bit_no, + ZXIC_UINT32 end_bit_no, + ZXIC_UINT32 glb_cfg_data_0) +{ + ZXIC_UINT32 rc = 0; + ZXIC_UINT32 data = 0; + ZXIC_MUTEX_T *p_mutex = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(start_bit_no, 0, 31); + ZXIC_COMM_CHECK_INDEX(end_bit_no, start_bit_no, 31); + + rc = dpp_dev_opr_mutex_get(dev, DPP_DEV_MUTEX_T_PKTRX_MF_GLB_CFG_0, + &p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dev_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "zxic_comm_mutex_lock"); + + rc = dpp_reg_read(dev, NPPU_PKTRX_CFG_PKTRX_GLBAL_CFG_0r, 0, 0, &data); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK(DEV_ID(dev), rc, "dpp_reg_read", p_mutex); + + ZXIC_COMM_UINT32_WRITE_BITS(data, glb_cfg_data_0, start_bit_no, + end_bit_no - start_bit_no + 1); + + rc = dpp_reg_write(dev, NPPU_PKTRX_CFG_PKTRX_GLBAL_CFG_0r, 0, 0, &data); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK(DEV_ID(dev), rc, "dpp_reg_write", + p_mutex); + + rc = zxic_comm_mutex_unlock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "zxic_comm_mutex_unlock"); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_pktrx_mcode_glb_cfg_write_1(DPP_DEV_T *dev, + ZXIC_UINT32 start_bit_no, + ZXIC_UINT32 end_bit_no, + ZXIC_UINT32 glb_cfg_data_1) +{ + ZXIC_UINT32 rc = 0; + ZXIC_UINT32 data = 0; + ZXIC_MUTEX_T *p_mutex = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(start_bit_no, 0, 31); + ZXIC_COMM_CHECK_INDEX(end_bit_no, start_bit_no, 31); + + rc = dpp_dev_opr_mutex_get(dev, DPP_DEV_MUTEX_T_PKTRX_MF_GLB_CFG_1, + &p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dev_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "zxic_comm_mutex_lock"); + + rc = dpp_reg_read(dev, NPPU_PKTRX_CFG_PKTRX_GLBAL_CFG_1r, 0, 0, &data); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK(DEV_ID(dev), rc, "dpp_reg_read", p_mutex); + + ZXIC_COMM_UINT32_WRITE_BITS(data, glb_cfg_data_1, start_bit_no, + end_bit_no - start_bit_no + 1); + + rc = dpp_reg_write(dev, NPPU_PKTRX_CFG_PKTRX_GLBAL_CFG_1r, 0, 0, &data); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK(DEV_ID(dev), rc, "dpp_reg_write", + p_mutex); + + rc = zxic_comm_mutex_unlock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "zxic_comm_mutex_unlock"); + + return DPP_OK; +} + +DPP_STATUS dpp_pktrx_ind_rd(DPP_DEV_T *dev, ZXIC_UINT32 mem_addr, + ZXIC_UINT32 mem_id, ZXIC_UINT32 len, + ZXIC_UINT32 *p_data) +{ + DPP_STATUS rtn = DPP_OK; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_data); + ZXIC_COMM_CHECK_INDEX(mem_id, 0, (MEM_ID_MUX_NUM - 1)); + + ZXIC_COMM_TRACE_NOTICE("dpp_pktrx_ind_rd start\n"); + + rtn = dpp_agent_channel_pktrx_ind_reg_rw( + dev, mem_addr, mem_id, DPP_PKTRX_IND_REG_RD, len, p_data); + ZXIC_COMM_CHECK_RC(rtn, "dpp_agent_channel_pktrx_ind_reg_rw"); + + ZXIC_COMM_PRINT("dpp_pktrx_ind_rd success\n"); + + return DPP_OK; +} + +DPP_STATUS +dpp_pktrx_udf_table_get(DPP_DEV_T *dev, ZXIC_UINT32 index, + DPP_PKTRX_PHYPORT_UDF_TABLE_T *p_phyport_user_info) +{ + DPP_STATUS rc = 0; + + ZXIC_COMM_CHECK_INDEX(index, 0, (DPP_PHYPORT_NUM - 1)); + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_phyport_user_info); + + /* read phyport_udf_tbl item */ + rc = dpp_pktrx_ind_rd(dev, index, PHYPORT_TAB_2_MEM_ID, 16, + &(p_phyport_user_info->port_based_user_data[0])); + ZXIC_COMM_CHECK_RC(rc, "dpp_pktrx_ind_rd"); + + return DPP_OK; +} + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/ppu/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/ppu/Kbuild.include new file mode 100644 index 000000000000..3408e9142bcb --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/ppu/Kbuild.include @@ -0,0 +1,2 @@ +cur_dir := en_np/sdk/source/dev/module/ppu/ +src_files += $(addprefix $(cur_dir),$(notdir $(wildcard $(dinghai_root)/$(cur_dir)*.c))) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/ppu/dpp_ppu.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/ppu/dpp_ppu.c new file mode 100644 index 000000000000..62c58e4ee297 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/ppu/dpp_ppu.c @@ -0,0 +1,344 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_ppu.c +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : 王春雷 +* 完成日期 : 2014/03/18 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#include "zxic_common.h" +#include "dpp_reg.h" +#include "dpp_dev.h" +#include "dpp_ppu_api.h" +#include "dpp_ppu.h" +#include "dpp_ppu4k_reg.h" +#include "dpp_agent_channel.h" + +#define OPR_WRITE (0) +#define OPR_READ (1) + +DPP_PPU_CLS_BITMAP_T g_ppu_cls_bit_map[DPP_DEV_CHANNEL_MAX]; + +#if ZXIC_REAL("INIT") + +ZXIC_UINT32 dpp_ppu_cls_use_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 cluster_id, + ZXIC_UINT32 flag) +{ + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, cluster_id, 0, + DPP_PPU_CLUSTER_NUM - 1); + g_ppu_cls_bit_map[dev_id].cls_use[cluster_id] = flag; + + return DPP_OK; +} + +ZXIC_UINT32 dpp_ppu_cls_use_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 cluster_id) +{ + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, cluster_id, 0, + DPP_PPU_CLUSTER_NUM - 1); + + return g_ppu_cls_bit_map[dev_id].cls_use[cluster_id]; +} + +ZXIC_UINT32 dpp_ppu_instr_mem_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 mem_id, + ZXIC_UINT32 flag) +{ + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, mem_id, 0, PPU_INSTR_MEM_NUM - 1); + g_ppu_cls_bit_map[dev_id].instr_mem[mem_id] = flag; + + return DPP_OK; +} + +#define DPP_PPU_CLS_USE_CHECK(dev_id, cls_id) \ + do { \ + if (!dpp_ppu_cls_use_get(dev_id, cls_id)) { \ + ZXIC_COMM_TRACE_DEV_ERROR( \ + dev_id, \ + "\n %s:%d[Error:cluster %d stop] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, cls_id, __FUNCTION__); \ + ZXIC_COMM_ASSERT(0); \ + return DPP_ERR; \ + } \ + } while (0) + +ZXIC_UINT32 dpp_ppu_parse_cls_bitmap(ZXIC_UINT32 dev_id, ZXIC_UINT32 bitmap) +{ + ZXIC_UINT32 cls_id = 0; + ZXIC_UINT32 mem_id = 0; + + ZXIC_UINT32 cls_use = 0; + ZXIC_UINT32 instr_mem = 0; + + /*cluster 使用标记必须保证至少有一个cluster是打开的使用的*/ + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, bitmap, 0, DPP_PPU_CLS_ALL_START); + + for (cls_id = 0; cls_id < DPP_PPU_CLUSTER_NUM; cls_id++) { + cls_use = (bitmap >> cls_id) & 0x1; + + dpp_ppu_cls_use_set(dev_id, cls_id, cls_use); + } + + for (mem_id = 0; mem_id < PPU_INSTR_MEM_NUM; mem_id++) { + instr_mem = (bitmap >> (mem_id * 2)) & 0x3; + + dpp_ppu_instr_mem_set(dev_id, mem_id, + ((instr_mem > 0) ? 1 : 0)); + } + + return DPP_OK; +} + +#endif + +#if ZXIC_REAL("TABEL_CFG") +/***********************************************************/ +/** 配置SDT表 +* @param dev_id 设备号,范围0~3 +* @param cluster_id me cluster编号,范围0~7 +* @param index 地址,即sdt表号,范围0~255 +* @param p_sdt_data sdt表数据 +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/03/18 +************************************************************/ +DPP_STATUS dpp_ppu_sdt_tbl_write(DPP_DEV_T *dev, ZXIC_UINT32 cluster_id, + ZXIC_UINT32 index, + DPP_SDT_TBL_DATA_T *p_sdt_data) +{ + DPP_STATUS rtn = DPP_OK; + DPP_PPU4K_CLUSTER_WR_HIGH_DATA_R_MEX_T high_data = { 0 }; + DPP_PPU4K_CLUSTER_WR_LOW_DATA_R_MEX_T low_data = { 0 }; + DPP_PPU4K_CLUSTER_ADDR_R_MEX_T sdt_cmd = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), index, PPU_SDT_IDX_MIN, + PPU_SDT_IDX_MAX); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_sdt_data); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), cluster_id, 0, + DPP_PPU_CLUSTER_NUM - 1); + DPP_PPU_CLS_USE_CHECK(DEV_ID(dev), cluster_id); + + /* write data reg*/ + high_data.wr_high_data_r_mex = p_sdt_data->data_high32; + low_data.wr_low_data_r_mex = p_sdt_data->data_low32; + rtn = dpp_reg_write(dev, PPU4K_CLUSTER_WR_HIGH_DATA_R_MEXr, cluster_id, + 0, &high_data); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, "dpp_reg_write"); + + rtn = dpp_reg_write(dev, PPU4K_CLUSTER_WR_LOW_DATA_R_MEXr, cluster_id, + 0, &low_data); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, "dpp_reg_write"); + + /* write cmd reg*/ + sdt_cmd.operate_type = OPR_WRITE; + sdt_cmd.addr_r_mex = index; + + rtn = dpp_reg_write(dev, PPU4K_CLUSTER_ADDR_R_MEXr, cluster_id, 0, + &sdt_cmd); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, "dpp_reg_write"); + + return DPP_OK; +} +#endif + +#if ZXIC_REAL("PPU_STATICS") + +/***********************************************************/ +/**配置协处理器哈希计算密钥 +* @param dev_id 设备号 +* @param DPP_PPU_PPU_COP_THASH_RSK_T +* +* @return +* @remark 无 +* @see +* @author yangmy @date 2022/09/09 +************************************************************/ +DPP_STATUS dpp_ppu_ppu_cop_thash_rsk_set(DPP_DEV_T *dev, + DPP_PPU_PPU_COP_THASH_RSK_T *p_para) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_para); + + ZXIC_COMM_TRACE_NOTICE("dpp_ppu_ppu_cop_thash_rsk_set start\n"); + + rc = dpp_agent_channel_ppu_thash_rsk(dev, DPP_PPU_THASH_RSK_WR, p_para); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_agent_channel_ppu_thash_rsk"); + + ZXIC_COMM_PRINT("dpp_ppu_ppu_cop_thash_rsk_set end\n"); + + return DPP_OK; +} + +DPP_STATUS +dpp_ppu_ppu_cop_thash_rsk_get(DPP_DEV_T *dev, + DPP_PPU_PPU_COP_THASH_RSK_T *p_ppu_cop_thash_rsk) +{ + DPP_STATUS rtn = DPP_OK; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_ppu_cop_thash_rsk); + + ZXIC_COMM_TRACE_NOTICE("dpp_ppu_ppu_cop_thash_rsk_get start\n"); + + rtn = dpp_agent_channel_ppu_thash_rsk(dev, DPP_PPU_THASH_RSK_RD, + p_ppu_cop_thash_rsk); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, + "dpp_agent_channel_ppu_thash_rsk"); + + ZXIC_COMM_PRINT("dpp_ppu_ppu_cop_thash_rsk_get end\n"); + + return DPP_OK; +} + +#endif +DPP_STATUS dpp_ppu_debug_en_set(DPP_DEV_T *dev, ZXIC_UINT32 enable) +{ + DPP_STATUS rtn = DPP_OK; + DPP_PPU_PPU_PPU_DEBUG_EN_R_T debug_en = { 0 }; + + debug_en.debug_en_r = enable; + + rtn = dpp_reg_write(dev, PPU_PPU_PPU_DEBUG_EN_Rr, 0, 0, &debug_en); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, "dpp_reg_write"); + + return DPP_OK; +} + +DPP_STATUS dpp_pktrx_port_en_set(DPP_DEV_T *dev, ZXIC_UINT32 port_no, + ZXIC_UINT32 flag) +{ + DPP_STATUS rc = 0; + ZXIC_UINT32 port_en_index = 0; + ZXIC_UINT32 port_en_mask = 0; + DPP_NPPU_PKTRX_CFG_PORT_EN_3_T port_en_3_reg = { 0 }; + + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), port_no, 0, DPP_PHYPORT_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flag, 0, 1); + + /* 由于端口使能配置cpu可能跟微码冲突,需要先写port_en_mask,再写en */ + port_en_mask = 1u << (port_no % 32); + port_en_index = flag << (port_no % 32); + + rc = dpp_reg_write(dev, NPPU_PKTRX_CFG_CPU_PORT_EN_MASKr, 0, 0, + &port_en_mask); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + /* 由于有en_mask的存在,不用先读后写,直接写入逻辑只更新mask为1的,不影响原值 */ + if (port_no < 96) { + rc = dpp_reg_write(dev, + NPPU_PKTRX_CFG_PORT_EN_0r + port_no / 32, 0, + 0, &port_en_index); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + } + /* port_en3寄存器需要先读后写,不完全看mask,只有端口使能相关的才看mask */ + else { + rc = dpp_reg_read(dev, NPPU_PKTRX_CFG_PORT_EN_3r, 0, 0, + &port_en_3_reg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + port_en_3_reg.cfg_isch_port_en_3 = port_en_index; + + rc = dpp_reg_write(dev, NPPU_PKTRX_CFG_PORT_EN_3r, 0, 0, + &port_en_3_reg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + } + + return DPP_OK; +} +DPP_STATUS dpp_ppu_debug_valid_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_valid) +{ + DPP_STATUS rtn = DPP_OK; + DPP_PPU_PPU_PPU_DEBUG_VLD_T debug_vld = { 0 }; + + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_valid); + + rtn = dpp_reg_read(dev, PPU_PPU_PPU_DEBUG_VLDr, 0, 0, &debug_vld); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, "dpp_reg_read"); + + *p_valid = debug_vld.ppu_debug_vld; + + return DPP_OK; +} + +DPP_STATUS dpp_ppu_set_debug_mode(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *dbg_status) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 pkt_empty = 0; + ZXIC_UINT32 rd_count = 0; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x register start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + /* 使能调试模式寄存器 */ + rc = dpp_ppu_debug_en_set(&dev, 1); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(&dev), rc, "dpp_ppu_debug_en_set"); + + rc = dpp_ppu_debug_valid_get(&dev, &pkt_empty); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(&dev), rc, "dpp_ppu_debug_valid_get"); + + while (!pkt_empty) { + rc = dpp_ppu_debug_valid_get(&dev, &pkt_empty); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(&dev), rc, + "dpp_ppu_debug_valid_get"); + + if (pkt_empty) { + break; + } + + if (rd_count > DPP_RD_CNT_MAX) { + ZXIC_COMM_PRINT("debug start is fail!!!\n"); + *dbg_status = 0; + rc = dpp_ppu_debug_en_set(&dev, + 0); /*包未排空关闭调试使能*/ + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(&dev), rc, + "dpp_ppu_debug_en_set"); + return DPP_OK; + } + + rd_count++; + usleep_range(5, 10); + } + + *dbg_status = 1; + + return DPP_OK; +} + +EXPORT_SYMBOL(dpp_ppu_set_debug_mode); +DPP_STATUS dpp_ppu_close_debug_mode(DPP_PF_INFO_T *pf_info) +{ + DPP_STATUS rc = DPP_OK; + DPP_DEV_T dev = { 0 }; + + rc = dpp_dev_get(pf_info, &dev); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x debug mode disable\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + dpp_ppu_debug_en_set(&dev, 0); + return DPP_OK; +} +EXPORT_SYMBOL(dpp_ppu_close_debug_mode); \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se/Kbuild.include new file mode 100644 index 000000000000..cad347a6f0bb --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se/Kbuild.include @@ -0,0 +1,2 @@ +cur_dir := en_np/sdk/source/dev/module/se/ +src_files += $(addprefix $(cur_dir),$(notdir $(wildcard $(dinghai_root)/$(cur_dir)*.c))) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se/dpp_etcam.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se/dpp_etcam.c new file mode 100644 index 000000000000..7b8f1678c08f --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se/dpp_etcam.c @@ -0,0 +1,784 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_etcam.c +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : 王春雷 +* 完成日期 : 2014/04/03 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#include "zxic_common.h" + +#include "dpp_reg.h" +#include "dpp_se_api.h" +#include "dpp_etcam.h" +#include "dpp_dev.h" +#include "dpp_stat4k_reg.h" + +#define DPP_ETCAM_OPR_WR (1) +#define DPP_ETCAM_OPR_RD (2) +#define DPP_ETCAM_OPR_UNLOAD (3) +#define DPP_ETCAM_OPR_VBIT (4) + +#define TBLID_CFG_SETP (8) +#define BADDR_CFG_SETP (4) + +/** 当前etcam 条目vld信息 */ +DPP_ETCAM_ENTRY_VLD_T g_etcam_vld_info[DPP_DEV_CHANNEL_MAX][DPP_ETCAM_BLOCK_NUM] + [DPP_ETCAM_RAM_DEPTH] = { { { { 0 } } } }; +#define GET_ETCAM_VLD_INFO(dev_id, block_id, block_index) \ + (g_etcam_vld_info[dev_id][block_id] + block_index) + +#if ZXIC_REAL("IN_FUNC") +/***********************************************************/ +/** 将用户输入的D/M格式的数据转换为写硬件需要的X/Y数据 +* @param p_dm +* @param p_xy +* @param len +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/04/03 +************************************************************/ +DPP_STATUS dpp_etcam_dm_to_xy(DPP_ETCAM_ENTRY_T *p_dm, DPP_ETCAM_ENTRY_T *p_xy, + ZXIC_UINT32 len) +{ + ZXIC_UINT32 i = 0; + + ZXIC_COMM_CHECK_POINT(p_dm); + ZXIC_COMM_CHECK_POINT(p_xy); + ZXIC_COMM_CHECK_INDEX(len, 0, DPP_ETCAM_WIDTH_MAX / 8); + ZXIC_COMM_ASSERT(p_dm->p_data && p_dm->p_mask && p_xy->p_data && + p_xy->p_mask); + + for (i = 0; i < len; i++) { + p_xy->p_data[i] = + ZXIC_COMM_DM_TO_X(p_dm->p_data[i], p_dm->p_mask[i]); + p_xy->p_mask[i] = + ZXIC_COMM_DM_TO_Y(p_dm->p_data[i], p_dm->p_mask[i]); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 将从硬件读到的X/Y格式的数据转换为用户需要的D/M数据 +* @param p_xy +* @param p_dm +* @param len +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/04/03 +************************************************************/ +DPP_STATUS dpp_etcam_xy_to_dm(DPP_ETCAM_ENTRY_T *p_dm, DPP_ETCAM_ENTRY_T *p_xy, + ZXIC_UINT32 len) +{ + ZXIC_UINT32 i = 0; + + ZXIC_COMM_CHECK_POINT(p_dm); + ZXIC_COMM_CHECK_POINT(p_xy); + ZXIC_COMM_CHECK_INDEX(len, 0, DPP_ETCAM_WIDTH_MAX / 8); + ZXIC_COMM_ASSERT(p_dm->p_data && p_dm->p_mask && p_xy->p_data && + p_xy->p_mask); + + for (i = 0; i < len; i++) { + p_dm->p_data[i] = ZXIC_COMM_XY_TO_DATA( + p_xy->p_data[i], + p_xy->p_mask[i]); /* valid only when mask is 0 */ + p_dm->p_mask[i] = + ZXIC_COMM_XY_TO_MASK(p_xy->p_data[i], p_xy->p_mask[i]); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 根据读写模式,获取需要操作的间接寄存器掩码 +* @param mask block RAM操作位图,共8比特,每比特对应一个block RAM行的80bit +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/03/31 +************************************************************/ +ZXIC_UINT32 dpp_etcam_ind_data_reg_opr_mask_get(ZXIC_UINT32 mask) +{ + ZXIC_UINT32 i = 0; + ZXIC_UINT32 reg_mask = 0; + + ZXIC_COMM_CHECK_INDEX(mask, 0, 0xff); + + for (i = 0; i < DPP_ETCAM_RAM_NUM; i++) { + if ((mask >> i) & 0x1) { + reg_mask |= ((ZXIC_UINT32)0x7 + << ((i / 2) * 5 + (i % 2) * 2)); + } + } + + return reg_mask; +} + +/***********************************************************/ +/** 写eTcam表项数据 +* @param dev_id +* @param wr_mask +* @param p_data +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/03/31 +************************************************************/ +DPP_STATUS dpp_etcam_ind_data_set(DPP_DEV_T *dev, ZXIC_UINT32 wr_mask, + ZXIC_UINT8 *p_data) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 offset = 0; + ZXIC_UINT32 reg_mask = 0; + ZXIC_UINT8 *p_temp = NULL; + ZXIC_UINT8 buff[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), wr_mask, 0, + DPP_ETCAM_WR_MASK_MAX); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + + p_temp = p_data; + + /* 160bit key: high 80bit in tcam_ram1, low 80bit in tcam_ram0, and so on. */ + for (i = 0; i < DPP_ETCAM_RAM_NUM; i++) { + offset = i * ((ZXIC_UINT32)DPP_ETCAM_WIDTH_MIN / 8); + + if ((wr_mask >> ((DPP_ETCAM_RAM_NUM - 1 - i) % 32)) & 0x1) { + ZXIC_COMM_MEMCPY(buff + offset, p_temp, + DPP_ETCAM_WIDTH_MIN / 8); + p_temp += DPP_ETCAM_WIDTH_MIN / 8; + } + } + + zxic_comm_swap(buff, DPP_ETCAM_WIDTH_MAX / 8); + + /* get ind data reg operate mask, 20bit */ + reg_mask = dpp_etcam_ind_data_reg_opr_mask_get(wr_mask); + + /* cpu_ind_wdat0 reg is for lowest 32bit data. */ + for (i = 0; i < (DPP_ETCAM_WIDTH_MAX / 32); i++) { + if ((reg_mask >> (DPP_ETCAM_WIDTH_MAX / 32 - 1 - i)) & 0x1) { + rc = dpp_reg_write(dev, STAT_ETCAM_CPU_IND_WDAT19r - i, + 0, 0, + (buff + i * sizeof(ZXIC_UINT32))); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_reg_write"); + } + } + + return DPP_OK; +} + +/***********************************************************/ +/** 写eTcam间接命令寄存器 +* @param dev_id +* @param addr etcam地址(0-511) +* @param block_idx block索引(0-15) +* @param data_or_mask 1:写X(data), 0:写Y(mask) +* @param wr_mask 写入掩码, 最高8bit, 对应bit为1代表对应的80bit数据 +* @param opr_type 1-write, 2-read, 3-unload +* @param tacm_reg_flag 1:配置内部row_col_mask寄存器 还是0:读写tcam +* @param row_mask_flag 1: write row_mask reg, 0: write col_mask reg +* @param vben enable the valid bit addressed by addr +* @param vbit valid bit input +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/03/29 +* @modify YXH @date 2018/11/29 +************************************************************/ +DPP_STATUS dpp_etcam_ind_cmd_set(DPP_DEV_T *dev, ZXIC_UINT32 addr, + ZXIC_UINT32 block_idx, + ZXIC_UINT32 data_or_mask, ZXIC_UINT32 wr_mask, + ZXIC_UINT32 opr_type, + ZXIC_UINT32 tacm_reg_flag, + ZXIC_UINT32 row_mask_flag, ZXIC_UINT32 vben, + ZXIC_UINT32 vbit) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_ETCAM_CPU_IND_CTRL_TMP0_T ind_cmd = { 0 }; + DPP_STAT_ETCAM_CPU_IND_CTRL_TMP1_T ind_cmd_1 = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), addr, 0, + DPP_ETCAM_RAM_DEPTH - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), block_idx, 0, + DPP_ETCAM_BLOCK_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), data_or_mask, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), wr_mask, 0, + DPP_ETCAM_WR_MASK_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), opr_type, 1, 4); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), tacm_reg_flag, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), row_mask_flag, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), vben, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), vbit, 0, 0xff); + + /* 一定要先配置 配置寄存器1,再配置 配置寄存器0 */ + ind_cmd_1.row_or_col_msk = row_mask_flag; + ind_cmd_1.vben = vben; + ind_cmd_1.vbit = vbit; + + rc = dpp_reg_write(dev, STAT_ETCAM_CPU_IND_CTRL_TMP1r, 0, 0, + &ind_cmd_1); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + /* opr_type: 1-write, 2-read, 3-unload */ + switch (opr_type) { + case DPP_ETCAM_OPR_RD: { + ind_cmd.rd_wr = 1; + } break; + + case DPP_ETCAM_OPR_WR: { + ind_cmd.rd_wr = 0; + ind_cmd.wr_mode = wr_mask; + } break; + + case DPP_ETCAM_OPR_UNLOAD: { + /* 掩码表,决定哪些条目删除,8bit */ + ind_cmd.flush = wr_mask; + } break; + + case DPP_ETCAM_OPR_VBIT: { + /* 获取该地址的vbit valid位状态 */ + ind_cmd.rd_wr = 1; + ind_cmd.wr_mode = wr_mask; + } break; + + default: { + ZXIC_COMM_TRACE_DEV_ERROR(DEV_ID(dev), "Invalid opr_type!\n"); + ZXIC_COMM_ASSERT(0); + return DPP_ERR; + } + } + + ind_cmd.dat_or_mask = data_or_mask; + ind_cmd.ram_sel = block_idx; + ind_cmd.addr = addr; + + ZXIC_COMM_TRACE_DEBUG("data_or_mask:%d\n", ind_cmd.dat_or_mask); + ZXIC_COMM_TRACE_DEBUG("block_idx:%d\n", ind_cmd.ram_sel); + ZXIC_COMM_TRACE_DEBUG("addr:0x%08x\n", ind_cmd.addr); + + rc = dpp_reg_write(dev, STAT_ETCAM_CPU_IND_CTRL_TMP0r, 0, 0, &ind_cmd); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +#endif + +#if ZXIC_REAL("EX_FUNC") +/***********************************************************/ +/** 获取etcam中每个block的cpu操作fifo将满信号,若将满则cpu不能再操作当前block +* @param dev_id 设备号 +* @param block_idx etcam中的block编号,范围0~15 +* @param p_cpu_afull +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2015/02/07 +************************************************************/ +DPP_STATUS dpp_etcam_cpu_afull_get(DPP_DEV_T *dev, ZXIC_UINT32 block_idx, + ZXIC_UINT32 *p_cpu_afull) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_ETCAM_ETCAM_CPU_FL_T cpu_fl = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), block_idx, 0, + DPP_ETCAM_BLOCK_NUM - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_cpu_afull); + + rc = dpp_reg_read(dev, STAT_ETCAM_ETCAM_CPU_FLr, 0, 0, &cpu_fl); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_cpu_afull = (cpu_fl.etcam_cpu_fl >> block_idx) & 0x1; + + return DPP_OK; +} + +/***********************************************************/ +/** 校验etcam中每个block的cpu操作fifo将满信号,若将满则cpu不能再操作当前block +* @param dev_id 设备号 +* @param block_idx etcam中的block编号,范围0~7 +* +* @return +* @remark 无 +* @see +* @author wll @date 2019/04/15 +************************************************************/ +DPP_STATUS dpp_etcam_cpu_afull_check(DPP_DEV_T *dev, ZXIC_UINT32 block_idx) +{ + ZXIC_UINT32 read_cnt = 0; + ZXIC_UINT32 cpu_afull = 1; + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), block_idx, 0, + DPP_ETCAM_BLOCK_NUM - 1); + + while (cpu_afull) { + rc = dpp_etcam_cpu_afull_get(dev, block_idx, &cpu_afull); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_etcam_cpu_afull_get"); + + if (!cpu_afull) { + break; + } + + read_cnt++; + + if (read_cnt > DPP_RD_CNT_MAX * DPP_RD_CNT_MAX) { + ZXIC_COMM_TRACE_ERROR( + "Error!!! dpp_etcam_cpu_afull_check is overtime!\n"); + return DPP_ERR; + } + + /* zxic_comm_usleep(100); */ + } + + return DPP_OK; +} + +/***********************************************************/ +/** 添加eTcam表条目 +* @param dev_id 设备号 +* @param addr 每个block中的ram地址,位宽为8*80bit +* @param block_idx block编号,范围0~7 +* @param wr_mask 写表掩码,共8bit,每bit控制ram中对应位置的80bit数据是否有效 +* @param opr_type etcam操作类型,详见 DPP_ETCAM_OPR_TYPE_E +* @param p_entry 条目数据,data和mask +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/04/03 +************************************************************/ +DPP_STATUS dpp_etcam_entry_add(DPP_DEV_T *dev, ZXIC_UINT32 addr, + ZXIC_UINT32 block_idx, ZXIC_UINT32 wr_mask, + ZXIC_UINT32 opr_type, DPP_ETCAM_ENTRY_T *p_entry) +{ + DPP_STATUS rc = DPP_OK; + // ZXIC_UINT32 i = 0; + // ZXIC_UINT32 tbl_id = 0; + // ZXIC_UINT32 handle_row = 0; + // ZXIC_UINT32 handle = 0; + // ZXIC_UINT32 basea_addr = 0; + ZXIC_UINT8 temp_data[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; + ZXIC_UINT8 temp_mask[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; + ZXIC_MUTEX_T *p_etcam_mutex = NULL; + DPP_ETCAM_ENTRY_T entry_xy = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), addr, 0, + DPP_ETCAM_RAM_DEPTH - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), block_idx, 0, + DPP_ETCAM_BLOCK_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), wr_mask, 0, + DPP_ETCAM_WR_MASK_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), opr_type, DPP_ETCAM_OPR_DM, + DPP_ETCAM_OPR_XY); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_entry); + + rc = dpp_dev_opr_mutex_get(dev, DPP_DEV_MUTEX_T_ETCAM, &p_etcam_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dev_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_etcam_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "zxic_comm_mutex_lock"); + + /* check cpu fifo is afull */ + rc = dpp_etcam_cpu_afull_check(dev, block_idx); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK( + DEV_ID(dev), rc, "dpp_etcam_cpu_afull_check", p_etcam_mutex); + + ZXIC_COMM_ASSERT(p_entry->p_data && p_entry->p_mask); + + entry_xy.p_data = temp_data; + entry_xy.p_mask = temp_mask; + + if (opr_type == DPP_ETCAM_OPR_DM) { + /* convert user D/M data to X/Y */ + rc = dpp_etcam_dm_to_xy( + p_entry, &entry_xy, + DPP_ETCAM_ENTRY_SIZE_GET(p_entry->mode)); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK( + DEV_ID(dev), rc, "dpp_etcam_dm_to_xy", p_etcam_mutex); + } else { + ZXIC_COMM_MEMCPY(entry_xy.p_data, p_entry->p_data, + DPP_ETCAM_ENTRY_SIZE_GET(p_entry->mode)); + ZXIC_COMM_MEMCPY(entry_xy.p_mask, p_entry->p_mask, + DPP_ETCAM_ENTRY_SIZE_GET(p_entry->mode)); + } + + /* write data X */ + rc = dpp_etcam_ind_data_set(dev, wr_mask, entry_xy.p_data); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK(DEV_ID(dev), rc, "dpp_etcam_ind_data_set", + p_etcam_mutex); + + rc = dpp_etcam_ind_cmd_set(dev, addr, block_idx, DPP_ETCAM_DTYPE_DATA, + wr_mask, DPP_ETCAM_OPR_WR, 0, 0, 1, 0); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK(DEV_ID(dev), rc, "dpp_etcam_ind_cmd_set", + p_etcam_mutex); + + /* write mask Y */ + rc = dpp_etcam_ind_data_set(dev, wr_mask, entry_xy.p_mask); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK(DEV_ID(dev), rc, "dpp_etcam_ind_data_set", + p_etcam_mutex); + + rc = dpp_etcam_ind_cmd_set(dev, addr, block_idx, DPP_ETCAM_DTYPE_MASK, + wr_mask, DPP_ETCAM_OPR_WR, 0, 0, 1, 0xFF); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK(DEV_ID(dev), rc, "dpp_etcam_ind_cmd_set", + p_etcam_mutex); + + rc = zxic_comm_mutex_unlock(p_etcam_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "zxic_comm_mutex_unlock"); + + return DPP_OK; +} + +/***********************************************************/ +/** 删除eTcam表项条目 +* @param dev_id 设备号 +* @param addr 每个block中的ram地址,位宽为8*80bit +* @param block_idx block的编号,范围0~7 +* @param wr_mask 写表掩码,共8bit,每bit控制ram中对应位置的80bit数据是否有效 +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/04/03 +************************************************************/ +DPP_STATUS dpp_etcam_entry_del(DPP_DEV_T *dev, ZXIC_UINT32 addr, + ZXIC_UINT32 block_idx, ZXIC_UINT32 wr_mask) +{ + DPP_STATUS rc = DPP_OK; + // ZXIC_UINT32 i = 0; + // ZXIC_UINT32 tbl_id = 0; + // ZXIC_UINT32 handle_row = 0; + // ZXIC_UINT32 handle = 0; + // ZXIC_UINT32 basea_addr = 0; + + ZXIC_UINT8 temp_data[DPP_ETCAM_WIDTH_MAX / 8] = { + 0xff, + }; + ZXIC_UINT8 temp_mask[DPP_ETCAM_WIDTH_MAX / 8] = { + 0, + }; + ZXIC_MUTEX_T *p_etcam_mutex = NULL; + DPP_ETCAM_ENTRY_T entry_xy = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), addr, 0, + DPP_ETCAM_RAM_DEPTH - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), block_idx, 0, + DPP_ETCAM_BLOCK_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), wr_mask, 0, + DPP_ETCAM_WR_MASK_MAX); + + ZXIC_COMM_MEMSET(temp_data, 0xff, DPP_ETCAM_WIDTH_MAX / 8); + ZXIC_COMM_MEMSET(temp_mask, 0, DPP_ETCAM_WIDTH_MAX / 8); + + entry_xy.p_data = temp_data; + entry_xy.p_mask = temp_mask; + + rc = dpp_dev_opr_mutex_get(dev, DPP_DEV_MUTEX_T_ETCAM, &p_etcam_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dev_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_etcam_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "zxic_comm_mutex_lock"); + + /* check cpu fifo is afull */ + rc = dpp_etcam_cpu_afull_check(dev, block_idx); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK( + DEV_ID(dev), rc, "dpp_etcam_cpu_afull_check", p_etcam_mutex); + + /* write data X */ + rc = dpp_etcam_ind_data_set(dev, wr_mask, entry_xy.p_data); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK(DEV_ID(dev), rc, "dpp_etcam_ind_data_set", + p_etcam_mutex); + + rc = dpp_etcam_ind_cmd_set(dev, addr, block_idx, DPP_ETCAM_DTYPE_DATA, + wr_mask, DPP_ETCAM_OPR_WR, 0, 0, 1, 0xFF); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK(DEV_ID(dev), rc, "dpp_etcam_ind_cmd_set", + p_etcam_mutex); + + /* write mask Y */ + rc = dpp_etcam_ind_data_set(dev, wr_mask, entry_xy.p_mask); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK(DEV_ID(dev), rc, "dpp_etcam_ind_data_set", + p_etcam_mutex); + + rc = dpp_etcam_ind_cmd_set(dev, addr, block_idx, DPP_ETCAM_DTYPE_MASK, + wr_mask, DPP_ETCAM_OPR_WR, 0, 0, 1, 0xFF); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK(DEV_ID(dev), rc, "dpp_etcam_ind_cmd_set", + p_etcam_mutex); + + rc = zxic_comm_mutex_unlock(p_etcam_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "zxic_comm_mutex_unlock"); + + return DPP_OK; +} + +/***********************************************************/ +/** 比较一组data/mask和一组X/Y数据内容是否相异 +* @param p_entry_dm 待比较的data/mask数据 +* @param p_entry_xy 待比较的一组X/Y组合数据 +* +* @return 1-不同,0-相同 +* @remark 无 +* @see +* @author 王春雷 @date 2015/04/23 +************************************************************/ +ZXIC_UINT32 dpp_etcam_entry_cmp(DPP_ETCAM_ENTRY_T *p_entry_dm, + DPP_ETCAM_ENTRY_T *p_entry_xy) +{ + DPP_STATUS rc = 0; + ZXIC_UINT32 data_len = 0; + ZXIC_UINT8 temp_data[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; + ZXIC_UINT8 temp_mask[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; + DPP_ETCAM_ENTRY_T entry_xy_temp = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_entry_dm); + ZXIC_COMM_CHECK_POINT(p_entry_xy); + + entry_xy_temp.mode = p_entry_dm->mode; + entry_xy_temp.p_data = temp_data; + entry_xy_temp.p_mask = temp_mask; + data_len = DPP_ETCAM_ENTRY_SIZE_GET(entry_xy_temp.mode); + + if (data_len > 80) { + return 1; + } + + ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW_NO_ASSERT(3U, entry_xy_temp.mode); + rc = dpp_etcam_dm_to_xy(p_entry_dm, &entry_xy_temp, data_len); + ZXIC_COMM_CHECK_RC(rc, "dpp_etcam_dm_to_xy"); + + if ((ZXIC_COMM_MEMCMP(entry_xy_temp.p_data, p_entry_xy->p_data, + data_len) != 0) || + (ZXIC_COMM_MEMCMP(entry_xy_temp.p_mask, p_entry_xy->p_mask, + data_len) != 0)) { + return 1; + } + + return 0; +} + +/***********************************************************/ +/** 配置block的业务号,table_id +* @param dev_id 设备号 +* @param block_idx block的索引值。范围[0~7] +* @param tbl_id 表号,范围[0~7] +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/04/08 +************************************************************/ +DPP_STATUS dpp_etcam_block_tbl_id_set(DPP_DEV_T *dev, ZXIC_UINT32 block_idx, + ZXIC_UINT32 tbl_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 reg_offset = 0; + ZXIC_UINT32 bit_offset = 0; + ZXIC_UINT32 *p_temp = 0; + DPP_STAT4K_ETCAM_BLOCK0_7_PORT_ID_CFG_T block_tbl_id = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), block_idx, 0, + DPP_ETCAM_BLOCK_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), tbl_id, 0, + DPP_ETCAM_TBLID_NUM - 1); + + /*reg_offset 表示第几个寄存器进行配置,共四组配置寄存器*/ + reg_offset = block_idx / TBLID_CFG_SETP; + /*bit_offset 一个寄存器可以配置四组port_id,本位表示寄存器内配置的偏移*/ + bit_offset = block_idx % TBLID_CFG_SETP; + + /*取出当前的配置*/ + rc = dpp_reg_read(dev, STAT4K_ETCAM_BLOCK0_7_PORT_ID_CFGr + reg_offset, + 0, 0, &block_tbl_id); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + /*计算出来配置当前的port_id所处的地址*/ + p_temp = (ZXIC_UINT32 *)(&block_tbl_id) + 7 - bit_offset; + + *p_temp = tbl_id; + + rc = dpp_reg_write(dev, STAT4K_ETCAM_BLOCK0_7_PORT_ID_CFGr + reg_offset, + 0, 0, &block_tbl_id); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取block的业务号,table_id +* @param dev_id +* @param block_idx +* @param p_tbl_id +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/04/08 +************************************************************/ +DPP_STATUS dpp_etcam_block_tbl_id_get(DPP_DEV_T *dev, ZXIC_UINT32 block_idx, + ZXIC_UINT32 *p_tbl_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 reg_offset = 0; + ZXIC_UINT32 bit_offset = 0; + ZXIC_UINT32 *p_temp = 0; + DPP_STAT4K_ETCAM_BLOCK0_7_PORT_ID_CFG_T block_tbl_id = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), block_idx, 0, + DPP_ETCAM_BLOCK_NUM - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_tbl_id); + + reg_offset = block_idx / TBLID_CFG_SETP; + bit_offset = block_idx % TBLID_CFG_SETP; + + rc = dpp_reg_read(dev, STAT4K_ETCAM_BLOCK0_7_PORT_ID_CFGr + reg_offset, + 0, 0, &block_tbl_id); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + p_temp = (ZXIC_UINT32 *)(&block_tbl_id) + TBLID_CFG_SETP - 1 - + bit_offset; + + *p_tbl_id = *p_temp; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置block的基地址。 +* @param dev_id 设备号 +* @param block_idx block编号 +* @param base_addr 基地址。 +* 配置规则:每个寄存器寄存器位宽是7bit,逻辑实现为{base_addr[6:0],9'b0}, +* 80 bit键值模式下,base_addr格式遵循{base_addr[6,3],3'0} +* 160bit键值模式下,base_addr格式遵循{1'b0,base_addr[5,2],2'0} +* 320bit键值模式下,base_addr格式遵循{2'b0,base_addr[4,1],1'0} +* 640bit键值模式下,base_addr格式遵循{3'b0,base_addr[3,0]} +* @return +* @remark 无 +* @see 共有8组配置寄存器,每组寄存器支持两组block基地址的配置 +* 其中第0组支持0和1号block基地址配置,第1组支持1和2号block基地址配置 ,依次类推 +* @author 王春雷 @date 2014/04/08 +************************************************************/ +DPP_STATUS dpp_etcam_block_baddr_set(DPP_DEV_T *dev, ZXIC_UINT32 block_idx, + ZXIC_UINT32 base_addr) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 reg_offset = 0; + ZXIC_UINT32 bit_offset = 0; + ZXIC_UINT32 *p_temp = 0; + DPP_STAT4K_ETCAM_BLOCK0_3_BASE_ADDR_CFG_T block_baddr = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), block_idx, 0, + DPP_ETCAM_BLOCK_NUM - 1); + + reg_offset = block_idx / BADDR_CFG_SETP; + bit_offset = block_idx % BADDR_CFG_SETP; + + rc = dpp_reg_read(dev, + STAT4K_ETCAM_BLOCK0_3_BASE_ADDR_CFGr + reg_offset, 0, + 0, &block_baddr); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + p_temp = (ZXIC_UINT32 *)(&block_baddr) + 3 - bit_offset; + + *p_temp = base_addr; + + rc = dpp_reg_write(dev, + STAT4K_ETCAM_BLOCK0_3_BASE_ADDR_CFGr + reg_offset, 0, + 0, &block_baddr); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取block的基地址 +* @param dev_id +* @param block_idx +* @param p_base_addr +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/04/08 +************************************************************/ +DPP_STATUS dpp_etcam_block_baddr_get(DPP_DEV_T *dev, ZXIC_UINT32 block_idx, + ZXIC_UINT32 *p_base_addr) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 reg_offset = 0; + ZXIC_UINT32 bit_offset = 0; + ZXIC_UINT32 *p_temp = 0; + DPP_STAT4K_ETCAM_BLOCK0_3_BASE_ADDR_CFG_T block_baddr = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), block_idx, 0, + DPP_ETCAM_BLOCK_NUM - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_base_addr); + + reg_offset = block_idx / BADDR_CFG_SETP; + bit_offset = block_idx % BADDR_CFG_SETP; + + rc = dpp_reg_read(dev, + STAT4K_ETCAM_BLOCK0_3_BASE_ADDR_CFGr + reg_offset, 0, + 0, &block_baddr); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + p_temp = (ZXIC_UINT32 *)&block_baddr + BADDR_CFG_SETP - 1 - bit_offset; + + *p_base_addr = *p_temp; + + return DPP_OK; +} + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se/dpp_se.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se/dpp_se.c new file mode 100644 index 000000000000..cd0602c1bb35 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se/dpp_se.c @@ -0,0 +1,1166 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_se.c +* 文件标识 : +* 内容摘要 : 芯片se模块基本接口函数实现 +* 其它说明 : +* 当前版本 : +* 作 者 : 王春雷 +* 完成日期 : 2014/03/11 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#include "zxic_common.h" +#include "dpp_type_api.h" +#include "dpp_reg.h" +#include "dpp_se_api.h" +#include "dpp_etcam.h" +#include "dpp_se.h" +#include "dpp_dev.h" +#include "dpp_se_cfg.h" +#include "dpp_sdt.h" +#include "dpp_smmu14k_reg.h" +#include "dpp_se4k_reg.h" + +#define SE_OPR_WR (0) +#define SE_OPR_RD (1) +#define SE_CLS_MAX (8) +#define SE_SMMU1_PAGE_MAX (5) +#define SE_LPMID_OFF (4) + +static SMMU1_KSCHD_HASH_DDR_CFG_T + g_smmu1_kschd_hash[DPP_DEV_CHANNEL_MAX][DPP_HASH_ID_NUM] + [HASH_BULK_NUM] = { { { { 0 } } } }; + +ZXIC_UINT32 g_lpm_dat_wr_type_flag = 2; /* 1:开启DMA 2:REG mode */ +ZXIC_UINT8 g_lpm_hw_dat_buf[LPM_HW_DAT_BUFF_SIZE_MAX] = { 0 }; +ZXIC_UINT32 g_lpm_hw_dat_offset; + +#define GET_SMMU1_KSCHD_HASH_CFG(dev_id, hash_id, bulk_id) \ + (&g_smmu1_kschd_hash[dev_id][hash_id][bulk_id]) + +#define SMMU1_WDAT0_R (SYS_SE_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0) +#define SMMU1_CMD0_R (SYS_SE_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x40) +#define SMMU1_CMD1_R (SYS_SE_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x44) +#define SMMU1_CMD0_F_ADDR_START (30) +#define SMMU1_CMD0_F_ADDR_WIDTH (2) +#define SMMU1_CMD0_F_ECC_EN_START (5) +#define SMMU1_CMD0_F_ECC_EN_WIDTH (1) +#define SMMU1_CMD0_F_DDR_MODE_START (3) +#define SMMU1_CMD0_F_DDR_MODE_WIDTH (2) +#define SMMU1_CMD0_F_BK_INFO_START (0) +#define SMMU1_CMD0_F_BK_INFO_WIDTH (3) + +#define SMMU1_CMD1_F_DDR_WR_START (30) +#define SMMU1_CMD1_F_DDR_WR_WIDTH (1) +#define SMMU1_CMD1_F_ADDR_START (0) +#define SMMU1_CMD1_F_ADDR_WIDTH (30) + +#if ZXIC_REAL("SMMU0") +/***********************************************************/ +/** SE通用完成状态检查 +* @param dev_id 设备号 +* @param reg_no 寄存器编号 +* @param pos 所要读的寄存器的done_flag的位置(只支持1bit) +* +* @return +* @remark 无 +* @see +* @author wyt @date 2018/07/09 +************************************************************/ +DPP_STATUS dpp_se_done_status_check(DPP_DEV_T *dev, ZXIC_UINT32 reg_no, + ZXIC_UINT32 pos) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 data = 0; + ZXIC_UINT32 rd_cnt = 0; + ZXIC_UINT32 done_flag = 0; + + /* 打桩使用 */ +#ifdef DPP_FOR_LLT + ZXIC_UINT32 done_sig = 0xffffffff; + + rc = dpp_reg_write32(dev_id, reg_no, done_sig); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_reg_write32"); +#endif + + ZXIC_COMM_CHECK_POINT(dev); + + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + + while (!done_flag) { + rc = dpp_reg_read32(dev, reg_no, 0, 0, &data); + if (ZXIC_OK != rc) { + ZXIC_COMM_TRACE_ERROR( + "\n [ErrorCode:0x%x] !-- dpp_reg_read32 Fail!\n", + rc); + return rc; + } + + done_flag = (data >> pos) & 0x1; + + if (done_flag) { + break; + } + + if (rd_cnt > DPP_RD_CNT_MAX * DPP_RD_CNT_MAX) { + ZXIC_COMM_TRACE_ERROR( + "Error!!! dpp se rd reg_no [%d] is overtime!\n", + reg_no); + return DPP_ERR; + } + + rd_cnt++; + /*zxic_comm_usleep(1000);*/ + } + + return rc; +} + +/***********************************************************/ +/** 写eRam +* @param dev_id 设备号 +* @param base_addr 基地址,以128bit为单位 +* @param index 条目索引 +* @param wrt_mode 数据位宽模式, 取值参考ERAM128_OPR_MODE_E的定义 +* @param p_data 数据 +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/04/16 +************************************************************/ +DPP_STATUS dpp_se_smmu0_ind_write(DPP_DEV_T *dev, ZXIC_UINT32 base_addr, + ZXIC_UINT32 index, ZXIC_UINT32 wrt_mode, + ZXIC_UINT32 *p_data) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 temp_idx = 0; + ZXIC_MUTEX_T *p_ind_mutex = NULL; + + DPP_SMMU0_SMMU0_CPU_IND_CMD_T cpu_ind_cmd = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), wrt_mode, ERAM128_OPR_128b, + ERAM128_OPR_1b); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), base_addr, 0, + SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1); + + rc = dpp_dev_opr_mutex_get(dev, DPP_DEV_MUTEX_T_SMMU0, &p_ind_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dev_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_ind_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "zxic_comm_mutex_lock"); + + rc = dpp_se_done_status_check(dev, SMMU0_SMMU0_WR_ARB_CPU_RDYr, 0); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK(DEV_ID(dev), rc, + "dpp_se_done_status_check", p_ind_mutex); + + switch (wrt_mode) { + case ERAM128_OPR_128b: { + if ((0xFFFFFFFF - (base_addr)) < (index)) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "ICM %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", + __FILE__, __LINE__, base_addr, index, + __FUNCTION__); + rc = zxic_comm_mutex_unlock(p_ind_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "zxic_comm_mutex_unlock"); + return ZXIC_PAR_CHK_INVALID_INDEX; + } + if ((base_addr + index) > (SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1)) { + ZXIC_COMM_PRINT( + "dpp_se_smmu0_ind_write : index out of range !\n"); + rc = zxic_comm_mutex_unlock(p_ind_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "zxic_comm_mutex_unlock"); + return DPP_ERR; + } + + temp_idx = index << 7; + + for (i = 0; i < 4; i++) { + rc = dpp_reg_write(dev, SMMU0_SMMU0_CPU_IND_WDAT0r + i, + 0, 0, p_data + 3 - i); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK( + DEV_ID(dev), rc, "dpp_reg_write", p_ind_mutex); + } + + break; + } + + case ERAM128_OPR_64b: { + if ((base_addr + (index >> 1)) > + SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1) { + ZXIC_COMM_PRINT( + "dpp_se_smmu0_ind_write : index out of range !\n"); + rc = zxic_comm_mutex_unlock(p_ind_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "zxic_comm_mutex_unlock"); + return DPP_ERR; + } + + temp_idx = index << 6; + + for (i = 0; i < 2; i++) { + rc = dpp_reg_write(dev, SMMU0_SMMU0_CPU_IND_WDAT0r + i, + 0, 0, p_data + 1 - i); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK( + DEV_ID(dev), rc, "dpp_reg_write", p_ind_mutex); + } + + break; + } + + case ERAM128_OPR_1b: { + if ((base_addr + (index >> 7)) > + SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1) { + ZXIC_COMM_PRINT( + "dpp_se_smmu0_ind_write : index out of range !\n"); + rc = zxic_comm_mutex_unlock(p_ind_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "zxic_comm_mutex_unlock"); + return DPP_ERR; + } + + temp_idx = index; + rc = dpp_reg_write(dev, SMMU0_SMMU0_CPU_IND_WDAT0r, 0, 0, + p_data); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK(DEV_ID(dev), rc, "dpp_reg_write", + p_ind_mutex); + break; + } + } + + cpu_ind_cmd.cpu_ind_rw = SE_OPR_WR; + cpu_ind_cmd.cpu_req_mode = wrt_mode; + if ((0xFFFFFFFF - (temp_idx)) < + ((base_addr << 7) & DPP_ERAM128_BADDR_MASK)) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "ICM %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", + __FILE__, __LINE__, temp_idx, + ((base_addr << 7) & DPP_ERAM128_BADDR_MASK), + __FUNCTION__); + zxic_comm_mutex_unlock(p_ind_mutex); + return ZXIC_PAR_CHK_INVALID_INDEX; + } + cpu_ind_cmd.cpu_ind_addr = + ((base_addr << 7) & DPP_ERAM128_BADDR_MASK) + temp_idx; + + rc = dpp_reg_write(dev, SMMU0_SMMU0_CPU_IND_CMDr, 0, 0, &cpu_ind_cmd); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK(DEV_ID(dev), rc, "dpp_reg_write", + p_ind_mutex); + + rc = zxic_comm_mutex_unlock(p_ind_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "zxic_comm_mutex_unlock"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读eRam +* @param dev_id 设备号 +* @param base_addr 基地址,以128bit为单位 +* @param index 条目索引,支持128、64、32和1bit的索引值 +* @param rd_mode 读eRam模式, 取值参照ERAM128_OPR_MODE_E定义,读清模式下不支持1bit模式 +* @param rd_clr_mode eRam读清模式, 取值参照ERAM128_RD_CLR_MODE_E定义 +* @param p_data 返回数据缓存的指针 +* +* @return +* @remark 无 +* @see +* @author wcl @date 2015/01/30 +************************************************************/ +DPP_STATUS dpp_se_smmu0_ind_read(DPP_DEV_T *dev, ZXIC_UINT32 base_addr, + ZXIC_UINT32 index, ZXIC_UINT32 rd_mode, + ZXIC_UINT32 rd_clr_mode, ZXIC_UINT32 *p_data) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 i = 0; + ZXIC_UINT32 row_index = 0; + ZXIC_UINT32 col_index = 0; + ZXIC_UINT32 temp_data[4] = { 0 }; + ZXIC_UINT32 *p_temp_data = NULL; + + DPP_SMMU0_SMMU0_CPU_IND_CMD_T cpu_ind_cmd = { 0 }; + + ZXIC_MUTEX_T *p_ind_mutex = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), rd_clr_mode, RD_MODE_HOLD, + RD_MODE_CLEAR); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), rd_mode, ERAM128_OPR_128b, + ERAM128_OPR_32b); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), base_addr, 0, + SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1); + + rc = dpp_dev_opr_mutex_get(dev, DPP_DEV_MUTEX_T_SMMU0, &p_ind_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dev_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_ind_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "zxic_comm_mutex_lock"); + + rc = dpp_se_done_status_check(dev, SMMU0_SMMU0_WR_ARB_CPU_RDYr, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK(DEV_ID(dev), rc, "time_out", + p_ind_mutex); + + /** 正常模式读数据,仅能以128bit读出数据,软件提取数据*/ + if (RD_MODE_HOLD == rd_clr_mode) { + cpu_ind_cmd.cpu_ind_rw = SE_OPR_RD; + cpu_ind_cmd.cpu_ind_rd_mode = RD_MODE_HOLD; + cpu_ind_cmd.cpu_req_mode = ERAM128_OPR_128b; + + /* 先以128bit模式读出数据 */ + switch (rd_mode) { + case ERAM128_OPR_128b: { + if ((0xFFFFFFFF - (base_addr)) < (index)) { + zxic_comm_mutex_unlock(p_ind_mutex); + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "ICM %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", + __FILE__, __LINE__, base_addr, index, + __FUNCTION__); + return ZXIC_PAR_CHK_INVALID_INDEX; + } + if (base_addr + index > + SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1) { + ZXIC_COMM_PRINT( + "dpp_se_smmu0_ind_read : index out of range !\n"); + zxic_comm_mutex_unlock(p_ind_mutex); + return DPP_ERR; + } + + row_index = (index << 7) & DPP_ERAM128_BADDR_MASK; + break; + } + + case ERAM128_OPR_64b: { + if ((base_addr + (index >> 1)) > + SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1) { + ZXIC_COMM_PRINT( + "dpp_se_smmu0_ind_read : index out of range !\n"); + zxic_comm_mutex_unlock(p_ind_mutex); + return DPP_ERR; + } + + row_index = (index << 6) & DPP_ERAM128_BADDR_MASK; + col_index = index & 0x1; + break; + } + + case ERAM128_OPR_32b: { + if ((base_addr + (index >> 2)) > + SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1) { + ZXIC_COMM_PRINT( + "dpp_se_smmu0_ind_read : index out of range !\n"); + zxic_comm_mutex_unlock(p_ind_mutex); + return DPP_ERR; + } + + row_index = (index << 5) & DPP_ERAM128_BADDR_MASK; + col_index = index & 0x3; + break; + } + + case ERAM128_OPR_1b: { + if ((base_addr + (index >> 7)) > + SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1) { + ZXIC_COMM_PRINT( + "dpp_se_smmu0_ind_read : index out of range !\n"); + zxic_comm_mutex_unlock(p_ind_mutex); + return DPP_ERR; + } + + row_index = index & DPP_ERAM128_BADDR_MASK; + col_index = index & 0x7F; + break; + } + } + + cpu_ind_cmd.cpu_ind_addr = + ((base_addr << 7) & DPP_ERAM128_BADDR_MASK) + row_index; + } + /** 读清模式*/ + else { + cpu_ind_cmd.cpu_ind_rw = SE_OPR_RD; + cpu_ind_cmd.cpu_ind_rd_mode = RD_MODE_CLEAR; + + switch (rd_mode) { + case ERAM128_OPR_128b: { + if ((0xFFFFFFFF - (base_addr)) < (index)) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "ICM %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", + __FILE__, __LINE__, base_addr, index, + __FUNCTION__); + zxic_comm_mutex_unlock(p_ind_mutex); + return ZXIC_PAR_CHK_INVALID_INDEX; + } + if (base_addr + index > + SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1) { + ZXIC_COMM_PRINT( + "dpp_se_smmu0_ind_read : index out of range !\n"); + zxic_comm_mutex_unlock(p_ind_mutex); + return DPP_ERR; + } + + row_index = (index << 7); + cpu_ind_cmd.cpu_req_mode = ERAM128_OPR_128b; + break; + } + + case ERAM128_OPR_64b: { + if ((base_addr + (index >> 1)) > + SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1) { + ZXIC_COMM_PRINT( + "dpp_se_smmu0_ind_read : index out of range !\n"); + zxic_comm_mutex_unlock(p_ind_mutex); + return DPP_ERR; + } + + row_index = (index << 6); + cpu_ind_cmd.cpu_req_mode = 2; + break; + } + + case ERAM128_OPR_32b: { + if ((base_addr + (index >> 2)) > + SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1) { + ZXIC_COMM_PRINT( + "dpp_se_smmu0_ind_read : index out of range !\n"); + zxic_comm_mutex_unlock(p_ind_mutex); + return DPP_ERR; + } + + row_index = (index << 5); + cpu_ind_cmd.cpu_req_mode = 1; + break; + } + + /** 1bit读清模式硬件不支持 */ + case ERAM128_OPR_1b: { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "Param Error! rd_clr_mode[%d] or rd_mode[%d] error!\n ", + rd_clr_mode, rd_mode); + zxic_comm_mutex_unlock(p_ind_mutex); + ZXIC_COMM_ASSERT(0); + return DPP_ERR; + } + } + + cpu_ind_cmd.cpu_ind_addr = + ((base_addr << 7) & DPP_ERAM128_BADDR_MASK) + row_index; + } + + rc = dpp_reg_write(dev, SMMU0_SMMU0_CPU_IND_CMDr, 0, 0, &cpu_ind_cmd); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK(DEV_ID(dev), rc, "dpp_reg_write", + p_ind_mutex); + + rc = dpp_se_done_status_check(dev, SMMU0_SMMU0_CPU_IND_RD_DONEr, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK(DEV_ID(dev), rc, "time_out", + p_ind_mutex); + + p_temp_data = temp_data; + for (i = 0; i < 4; i++) { + /* + rc = dpp_reg_read(dev_id, + SMMU0_SMMU0_CPU_IND_RDAT0r + i, + 0, + 0, + temp_data + 3 - i); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK(dev_id, rc, "dpp_reg_read", p_ind_mutex); +*/ + rc = dpp_reg_read(dev, SMMU0_SMMU0_CPU_IND_RDAT0r + i, 0, 0, + p_temp_data + 3 - i); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK(DEV_ID(dev), rc, "dpp_reg_read", + p_ind_mutex); + } + + /** 正常模式读数据,仅能以128bit读出数据,软件提取数据*/ + if (RD_MODE_HOLD == rd_clr_mode) { + switch (rd_mode) { + case ERAM128_OPR_128b: { + /* ZXIC_COMM_MEMCPY(p_data, &temp_data[0], (128 / 8)); */ + /* modify by ghm for coverity @20200714 */ + ZXIC_COMM_MEMCPY(p_data, p_temp_data, (128 / 8)); + break; + } + + case ERAM128_OPR_64b: { + /* ZXIC_COMM_MEMCPY(p_data, &temp_data[(1 - col_index) << 1], (64 / 8)); */ + /* modify by ghm for coverity @20200714 */ + ZXIC_COMM_MEMCPY(p_data, + p_temp_data + ((1 - col_index) << 1), + (64 / 8)); + break; + } + + case ERAM128_OPR_32b: { + /* ZXIC_COMM_MEMCPY(p_data, &temp_data[(3 - col_index)], (32 / 8)); */ + /* modify by ghm for coverity @20200714 */ + ZXIC_COMM_MEMCPY(p_data, + p_temp_data + ((3 - col_index)), + (32 / 8)); + break; + } + + case ERAM128_OPR_1b: { + /* ZXIC_COMM_UINT32_GET_BITS(p_data[0], temp_data[3 - col_index / 32], (col_index % 32), 1); */ + /* modify by ghm for coverity @20200714 */ + ZXIC_COMM_UINT32_GET_BITS( + p_data[0], + *(p_temp_data + (3 - col_index / 32)), + (col_index % 32), 1); + break; + } + } + } else { /** 读清模式*/ + switch (rd_mode) { + case ERAM128_OPR_128b: { + /* ZXIC_COMM_MEMCPY(p_data, temp_data, (128 / 8)); */ + /* modify by ghm for coverity @20200714 */ + ZXIC_COMM_MEMCPY(p_data, p_temp_data, (128 / 8)); + break; + } + + case ERAM128_OPR_64b: { + /* ZXIC_COMM_MEMCPY(p_data, temp_data, (64 / 8)); */ + /* modify by ghm for coverity @20200714 */ + ZXIC_COMM_MEMCPY(p_data, p_temp_data, (64 / 8)); + break; + } + + case ERAM128_OPR_32b: { + /* ZXIC_COMM_MEMCPY(p_data, temp_data, (32 / 8)); */ + /* modify by ghm for coverity @20200714 */ + ZXIC_COMM_MEMCPY(p_data, p_temp_data, (64 / 8)); + break; + } + } + } + + rc = zxic_comm_mutex_unlock(p_ind_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "zxic_comm_mutex_unlock"); + + return rc; +} + +#endif + +#if ZXIC_REAL("SMMU1") +/***********************************************************/ +/** dpp hash的smmu1属性设置 +* @param dev_id 设备号 +* @param hash_id hash引擎号 +* @param tbl_id hash表号 +* @param ecc_en ecc使能 +* @param baddr ddr基地址 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/12 +************************************************************/ +DPP_STATUS dpp_se_smmu1_hash_tbl_cfg_set(DPP_DEV_T *dev, ZXIC_UINT32 hash_id, + ZXIC_UINT32 tbl_id, ZXIC_UINT32 ecc_en, + ZXIC_UINT32 baddr) +{ + DPP_STATUS rc = DPP_OK; + + DPP_SMMU14K_SE_SMMU1_HASH0_TBL0_CFG_T hash_tbl_cfg = { 0 }; + + SMMU1_KSCHD_HASH_DDR_CFG_T *p_hash_ddr_cfg = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX_UPPER(hash_id, DPP_HASH_ID_MAX); + ZXIC_COMM_CHECK_INDEX_UPPER(tbl_id, HASH_BULK_ID_MAX); + ZXIC_COMM_CHECK_INDEX_UPPER(ecc_en, 1); + ZXIC_COMM_CHECK_INDEX_UPPER(baddr, DPP_SE_SMMU1_MAX_BADDR_NO_SHARE); + + /* save to soft buffer */ + p_hash_ddr_cfg = GET_SMMU1_KSCHD_HASH_CFG(DEV_ID(dev), hash_id, tbl_id); + p_hash_ddr_cfg->baddr = baddr; + p_hash_ddr_cfg->crcen = ecc_en; + p_hash_ddr_cfg->mode = + SMMU1_DDR_SRH_256b; /* alg search ddr mode, not write mode */ + + hash_tbl_cfg.hash0_tbl0_ecc_en = ecc_en; + hash_tbl_cfg.hash0_tbl0_baddr = baddr; + +#ifdef DPP_FLOW_HW_INIT + rc = dpp_reg_write(dev, + SMMU14K_SE_SMMU1_HASH0_TBL0_CFGr + + hash_id * HASH_BULK_NUM + tbl_id, + 0, 0, &hash_tbl_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); +#endif + + return rc; +} + +/***********************************************************/ +/** 获取hash算法访问DDR空间的属性,从软件获取(待优化) +* @param dev_id 设备号 +* @param hash_id hash引擎号 +* @param bulk_id Hash引擎存储资源划分块数的ID号 +* @param p_ecc_en 使能ECC校验 +* @param p_base_addr DDR空间基地址 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author tf @date 2016/06/15 +************************************************************/ +DPP_STATUS dpp_se_smmu1_hash_tbl_soft_cfg_get(DPP_DEV_T *dev, + ZXIC_UINT32 hash_id, + ZXIC_UINT32 bulk_id, + ZXIC_UINT32 *p_ecc_en, + ZXIC_UINT32 *p_base_addr) +{ + SMMU1_KSCHD_HASH_DDR_CFG_T *p_schd_hash = NULL; + + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX_UPPER(hash_id, DPP_HASH_ID_MAX); + ZXIC_COMM_CHECK_INDEX_UPPER(bulk_id, HASH_BULK_ID_MAX); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_base_addr); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_ecc_en); + + /* read from soft buffer */ + p_schd_hash = GET_SMMU1_KSCHD_HASH_CFG(DEV_ID(dev), hash_id, bulk_id); + *p_base_addr = p_schd_hash->baddr; + *p_ecc_en = p_schd_hash->crcen; + + return DPP_OK; +} + +#endif + +#if ZXIC_REAL("ALG") +/***********************************************************/ +/** 设置zblock service信息 +* @param dev_id +* @param zblk_idx zblock索引(2bit zgroup + 3bit zblock) +* @param serv_sel 0-lpm, 1-hash +* @param hash_id zblock对应的hash_id, lpm不关心此字段 +* @param enable 业务表使能标志 +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/07/26 +************************************************************/ +DPP_STATUS dpp_se_zblk_serv_cfg_set(DPP_DEV_T *dev, ZXIC_UINT32 zblk_idx, + ZXIC_UINT32 serv_sel, ZXIC_UINT32 hash_id, + ZXIC_UINT32 enable) +{ + DPP_STATUS rtn = DPP_OK; + + DPP_SE4K_SE_ALG_ZBLOCK_SERVICE_CONFIGURE_T zblk_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + + zblk_cfg.hash_channel_sel = hash_id; + zblk_cfg.service_sel = serv_sel; + zblk_cfg.st_en = enable; + rtn = dpp_reg_write(dev, SE4K_SE_ALG_ZBLOCK_SERVICE_CONFIGUREr, + ((ZBLK_ADDR_CONV(zblk_idx) >> 3) & 0x3), + (ZBLK_ADDR_CONV(zblk_idx) & 0x7), &zblk_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 设置业务表独占的zcell +* @param dev_id +* @param zblk_idx zblock索引(2bit zgroup + 3bit zblock) +* @param zcell0_bulk_id 独占zcell0的业务表号 +* @param zcell0_mono_flag zcell0被独占的标志 +* @param zcell1_bulk_id 独占zcell1的业务表号 +* @param zcell1_mono_flag zcell1被独占的标志 +* @param zcell2_bulk_id 独占zcell2的业务表号 +* @param zcell2_mono_flag zcell2被独占的标志 +* @param zcell3_bulk_id 独占zcell3的业务表号 +* @param zcell3_mono_flag zcell3被独占的标志 +* +* @return +* @remark 无 +* @see +* @author tf @date 2016/04/26 +************************************************************/ +DPP_STATUS dpp_se_zcell_mono_cfg_set( + DPP_DEV_T *dev, ZXIC_UINT32 zblk_idx, ZXIC_UINT32 zcell0_bulk_id, + ZXIC_UINT32 zcell0_mono_flag, ZXIC_UINT32 zcell1_bulk_id, + ZXIC_UINT32 zcell1_mono_flag, ZXIC_UINT32 zcell2_bulk_id, + ZXIC_UINT32 zcell2_mono_flag, ZXIC_UINT32 zcell3_bulk_id, + ZXIC_UINT32 zcell3_mono_flag) +{ + DPP_STATUS rtn = DPP_OK; + + DPP_SE4K_SE_ALG_ZBLOCK_HASH_ZCELL_MONO_T zblk_zcell_mono_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + + zblk_zcell_mono_cfg.ha_zcell0_tbl_id = zcell0_bulk_id; + zblk_zcell_mono_cfg.ha_zcell0_mono_flag = zcell0_mono_flag; + zblk_zcell_mono_cfg.ha_zcell1_tbl_id = zcell1_bulk_id; + zblk_zcell_mono_cfg.ha_zcell1_mono_flag = zcell1_mono_flag; + zblk_zcell_mono_cfg.ha_zcell2_tbl_id = zcell2_bulk_id; + zblk_zcell_mono_cfg.ha_zcell2_mono_flag = zcell2_mono_flag; + zblk_zcell_mono_cfg.ha_zcell3_tbl_id = zcell3_bulk_id; + zblk_zcell_mono_cfg.ha_zcell3_mono_flag = zcell3_mono_flag; + + rtn = dpp_reg_write(dev, SE4K_SE_ALG_ZBLOCK_HASH_ZCELL_MONOr, + ((ZBLK_ADDR_CONV(zblk_idx) >> 3) & 0x3), + (ZBLK_ADDR_CONV(zblk_idx) & 0x7), + &zblk_zcell_mono_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取业务表独占的zcell +* @param dev_id +* @param zblk_idx +* @param *zcell0_bulk_id +* @param *zcell0_mono_flag +* @param *zcell1_bulk_id +* @param *zcell1_mono_flag +* @param *zcell2_bulk_id +* @param *zcell2_mono_flag +* @param *zcell3_bulk_id +* @param *zcell3_mono_flag +* +* @return +* @remark 无 +* @see +* @author tf @date 2016/04/26 +************************************************************/ +DPP_STATUS dpp_se_zcell_mono_cfg_get( + DPP_DEV_T *dev, ZXIC_UINT32 zblk_idx, ZXIC_UINT32 *zcell0_bulk_id, + ZXIC_UINT32 *zcell0_mono_flag, ZXIC_UINT32 *zcell1_bulk_id, + ZXIC_UINT32 *zcell1_mono_flag, ZXIC_UINT32 *zcell2_bulk_id, + ZXIC_UINT32 *zcell2_mono_flag, ZXIC_UINT32 *zcell3_bulk_id, + ZXIC_UINT32 *zcell3_mono_flag) +{ + DPP_STATUS rtn = DPP_OK; + + DPP_SE4K_SE_ALG_ZBLOCK_HASH_ZCELL_MONO_T zblk_zcell_mono_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), zcell0_bulk_id); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), zcell0_mono_flag); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), zcell1_bulk_id); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), zcell1_mono_flag); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), zcell2_bulk_id); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), zcell2_mono_flag); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), zcell3_bulk_id); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), zcell3_mono_flag); + + rtn = dpp_reg_read(dev, SE4K_SE_ALG_ZBLOCK_HASH_ZCELL_MONOr, + ((ZBLK_ADDR_CONV(zblk_idx) >> 3) & 0x3), + (ZBLK_ADDR_CONV(zblk_idx) & 0x7), + &zblk_zcell_mono_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, "dpp_reg_write"); + + *zcell0_bulk_id = zblk_zcell_mono_cfg.ha_zcell0_tbl_id; + *zcell0_mono_flag = zblk_zcell_mono_cfg.ha_zcell0_mono_flag; + *zcell1_bulk_id = zblk_zcell_mono_cfg.ha_zcell1_tbl_id; + *zcell1_mono_flag = zblk_zcell_mono_cfg.ha_zcell1_mono_flag; + *zcell2_bulk_id = zblk_zcell_mono_cfg.ha_zcell2_tbl_id; + *zcell2_mono_flag = zblk_zcell_mono_cfg.ha_zcell2_mono_flag; + *zcell3_bulk_id = zblk_zcell_mono_cfg.ha_zcell3_tbl_id; + *zcell3_mono_flag = zblk_zcell_mono_cfg.ha_zcell3_mono_flag; + + return DPP_OK; +} + +/***********************************************************/ +/** 设置业务表独占的zreg +* @param dev_id +* @param zblk_idx zblock索引(2bit zgroup + 3bit zblock) +* @param zreg0_bulk_id 独占zreg0的业务表号 +* @param zreg0_mono_flag zreg0被独占的标志 +* @param zreg1_bulk_id 独占zreg1的业务表号 +* @param zreg1_mono_flag zreg1被独占的标志 +* @param zreg2_bulk_id 独占zreg2的业务表号 +* @param zreg2_mono_flag zreg2被独占的标志 +* @param zreg3_bulk_id 独占zreg3的业务表号 +* @param zreg3_mono_flag zreg3被独占的标志 +* +* @return +* @remark 无 +* @see +* @author tf @date 2016/04/27 +************************************************************/ +DPP_STATUS +dpp_se_zreg_mono_cfg_set(DPP_DEV_T *dev, ZXIC_UINT32 zblk_idx, + ZXIC_UINT32 zreg0_bulk_id, ZXIC_UINT32 zreg0_mono_flag, + ZXIC_UINT32 zreg1_bulk_id, ZXIC_UINT32 zreg1_mono_flag, + ZXIC_UINT32 zreg2_bulk_id, ZXIC_UINT32 zreg2_mono_flag, + ZXIC_UINT32 zreg3_bulk_id, ZXIC_UINT32 zreg3_mono_flag) +{ + DPP_STATUS rtn = DPP_OK; + + DPP_SE4K_SE_ALG_ZLOCK_HASH_ZREG_MONO_T zblk_zreg_mono_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + + zblk_zreg_mono_cfg.ha_zreg0_tbl_id = zreg0_bulk_id; + zblk_zreg_mono_cfg.ha_zreg0_mono_flag = zreg0_mono_flag; + zblk_zreg_mono_cfg.ha_zreg1_tbl_id = zreg1_bulk_id; + zblk_zreg_mono_cfg.ha_zreg1_mono_flag = zreg1_mono_flag; + zblk_zreg_mono_cfg.ha_zreg2_tbl_id = zreg2_bulk_id; + zblk_zreg_mono_cfg.ha_zreg2_mono_flag = zreg2_mono_flag; + zblk_zreg_mono_cfg.ha_zreg3_tbl_id = zreg3_bulk_id; + zblk_zreg_mono_cfg.ha_zreg3_mono_flag = zreg3_mono_flag; + + rtn = dpp_reg_write(dev, SE4K_SE_ALG_ZLOCK_HASH_ZREG_MONOr, + ((ZBLK_ADDR_CONV(zblk_idx) >> 3) & 0x3), + (ZBLK_ADDR_CONV(zblk_idx) & 0x7), + &zblk_zreg_mono_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取业务表独占的zcell +* @param dev_id +* @param zblk_idx +* @param *zreg0_bulk_id +* @param *zreg0_mono_flag +* @param *zreg1_bulk_id +* @param *zreg1_mono_flag +* @param *zreg2_bulk_id +* @param *zreg2_mono_flag +* @param *zreg3_bulk_id +* @param *zreg3_mono_flag +* +* @return +* @remark 无 +* @see +* @author tf @date 2016/04/27 +************************************************************/ +DPP_STATUS dpp_se_zreg_mono_cfg_get( + DPP_DEV_T *dev, ZXIC_UINT32 zblk_idx, ZXIC_UINT32 *zreg0_bulk_id, + ZXIC_UINT32 *zreg0_mono_flag, ZXIC_UINT32 *zreg1_bulk_id, + ZXIC_UINT32 *zreg1_mono_flag, ZXIC_UINT32 *zreg2_bulk_id, + ZXIC_UINT32 *zreg2_mono_flag, ZXIC_UINT32 *zreg3_bulk_id, + ZXIC_UINT32 *zreg3_mono_flag) +{ + DPP_STATUS rtn = DPP_OK; + + DPP_SE4K_SE_ALG_ZLOCK_HASH_ZREG_MONO_T zblk_zreg_mono_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), zreg0_bulk_id); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), zreg0_mono_flag); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), zreg1_bulk_id); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), zreg1_mono_flag); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), zreg2_bulk_id); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), zreg2_mono_flag); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), zreg3_bulk_id); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), zreg3_mono_flag); + + rtn = dpp_reg_read(dev, SE4K_SE_ALG_ZLOCK_HASH_ZREG_MONOr, + ((ZBLK_ADDR_CONV(zblk_idx) >> 3) & 0x3), + (ZBLK_ADDR_CONV(zblk_idx) & 0x7), + &zblk_zreg_mono_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, "dpp_reg_write"); + + *zreg0_bulk_id = zblk_zreg_mono_cfg.ha_zreg0_tbl_id; + *zreg0_mono_flag = zblk_zreg_mono_cfg.ha_zreg0_mono_flag; + *zreg1_bulk_id = zblk_zreg_mono_cfg.ha_zreg1_tbl_id; + *zreg1_mono_flag = zblk_zreg_mono_cfg.ha_zreg1_mono_flag; + *zreg2_bulk_id = zblk_zreg_mono_cfg.ha_zreg2_tbl_id; + *zreg2_mono_flag = zblk_zreg_mono_cfg.ha_zreg2_mono_flag; + *zreg3_bulk_id = zblk_zreg_mono_cfg.ha_zreg3_tbl_id; + *zreg3_mono_flag = zblk_zreg_mono_cfg.ha_zreg3_mono_flag; + + return DPP_OK; +} + +/***********************************************************/ +/** 设置hash访问片外DDR的属性 +* @param dev_id +* @param hash0_mono_flag +* @param hash1_mono_flag +* @param hash2_mono_flag +* @param hash3_mono_flag +* +* @return +* @remark 无 +* @see +* @author tf @date 2016/05/7 +************************************************************/ +DPP_STATUS dpp_se_hash_zcam_mono_flags_set(DPP_DEV_T *dev, + ZXIC_UINT32 hash0_mono_flag, + ZXIC_UINT32 hash1_mono_flag, + ZXIC_UINT32 hash2_mono_flag, + ZXIC_UINT32 hash3_mono_flag) +{ + DPP_STATUS rtn = DPP_OK; + + DPP_SE4K_SE_ALG_HASH_MONO_FLAG_T hash_mono_flag = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + + hash_mono_flag.hash0_mono_flag = hash0_mono_flag; + hash_mono_flag.hash1_mono_flag = hash1_mono_flag; + hash_mono_flag.hash2_mono_flag = hash2_mono_flag; + hash_mono_flag.hash3_mono_flag = hash3_mono_flag; + + rtn = dpp_reg_write(dev, SE4K_SE_ALG_HASH_MONO_FLAGr, 0, 0, + &hash_mono_flag); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 设置hash访问片外DDR的属性 +* @param dev_id +* @param hash0_mono_flag +* @param hash1_mono_flag +* @param hash2_mono_flag +* @param hash3_mono_flag +* +* @return +* @remark 无 +* @see +* @author tf @date 2016/05/7 +************************************************************/ +DPP_STATUS dpp_se_hash_zcam_mono_flags_get(DPP_DEV_T *dev, + ZXIC_UINT32 *hash0_mono_flag, + ZXIC_UINT32 *hash1_mono_flag, + ZXIC_UINT32 *hash2_mono_flag, + ZXIC_UINT32 *hash3_mono_flag) +{ + DPP_STATUS rtn = DPP_OK; + + DPP_SE4K_SE_ALG_HASH_MONO_FLAG_T hash_mono_flag = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), hash0_mono_flag); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), hash1_mono_flag); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), hash2_mono_flag); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), hash3_mono_flag); + + rtn = dpp_reg_read(dev, SE4K_SE_ALG_HASH_MONO_FLAGr, 0, 0, + &hash_mono_flag); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, "dpp_reg_write"); + + *hash0_mono_flag = hash_mono_flag.hash0_mono_flag; + *hash1_mono_flag = hash_mono_flag.hash1_mono_flag; + *hash2_mono_flag = hash_mono_flag.hash2_mono_flag; + *hash3_mono_flag = hash_mono_flag.hash3_mono_flag; + + return DPP_OK; +} + +/***********************************************************/ +/** 设置hash访问片外DDR的属性 +* @param dev_id +* @param hash_id +* @param ext_mode +* @param flag +* +* @return +* @remark 无 +* @see +* @author wcl @date 2014/07/26 +************************************************************/ +DPP_STATUS dpp_se_hash_ext_cfg_set(DPP_DEV_T *dev, ZXIC_UINT32 hash_id, + ZXIC_UINT32 ext_mode, ZXIC_UINT32 flag) +{ + DPP_STATUS rtn = DPP_OK; + + DPP_SE4K_SE_ALG_HASH0_EXT_CFG_RGT_T hash_ext_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX_UPPER(hash_id, DPP_HASH_ID_MAX); + + hash_ext_cfg.hash0_ext_flag = flag; + hash_ext_cfg.hash0_ext_mode = ext_mode; + + rtn = dpp_reg_write(dev, SE4K_SE_ALG_HASH0_EXT_CFG_RGTr + hash_id, 0, 0, + &hash_ext_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取hash访问片外DDR的属性 +* @param dev_id 设备号 +* @param hash_id hash引擎,范围0~3 +* @param p_content_type 每个hash表项读一次ddr3还是两次;1-读512b的宽度,读两次 0-读256宽度,读一次 +* @param p_flag 片外ddr3是否存储hash表,1-片外存储hash表 0-片外不存储hash表 +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/07/26 +************************************************************/ +DPP_STATUS dpp_se_hash_ext_cfg_get(DPP_DEV_T *dev, ZXIC_UINT32 hash_id, + ZXIC_UINT32 *p_content_type, + ZXIC_UINT32 *p_flag) +{ + DPP_STATUS rtn = DPP_OK; + + DPP_SE4K_SE_ALG_HASH0_EXT_CFG_RGT_T hash_ext_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX_UPPER(hash_id, DPP_HASH_ID_MAX); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_content_type); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_flag); + + rtn = dpp_reg_read(dev, SE4K_SE_ALG_HASH0_EXT_CFG_RGTr + hash_id, 0, 0, + &hash_ext_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, "dpp_reg_read"); + + *p_content_type = hash_ext_cfg.hash0_ext_mode; + *p_flag = hash_ext_cfg.hash0_ext_flag; + + return DPP_OK; +} + +/***********************************************************/ +/** 设置hash业务表深度 +* @param dev_id +* @param hash_id +* @param depth_bit +* @param content_type +* @param flag +* +* @return +* @remark 无 +* @see +* @author tf @date 2016/01/28 +************************************************************/ +DPP_STATUS dpp_se_hash_tbl_depth_set( + DPP_DEV_T *dev, ZXIC_UINT32 hash_id, ZXIC_UINT32 hash_tbl0_depth, + ZXIC_UINT32 hash_tbl1_depth, ZXIC_UINT32 hash_tbl2_depth, + ZXIC_UINT32 hash_tbl3_depth, ZXIC_UINT32 hash_tbl4_depth, + ZXIC_UINT32 hash_tbl5_depth, ZXIC_UINT32 hash_tbl6_depth, + ZXIC_UINT32 hash_tbl7_depth) + +{ + DPP_STATUS rtn = DPP_OK; + + DPP_SE4K_SE_ALG_HASH0_TBL30_DEPTH_T hash_tbl30_depth = { 0 }; + DPP_SE4K_SE_ALG_HASH0_TBL74_DEPTH_T hash_tbl74_depth = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX_UPPER(hash_id, DPP_HASH_ID_MAX); + + hash_tbl30_depth.hash0_tbl0_depth = hash_tbl0_depth; + hash_tbl30_depth.hash0_tbl1_depth = hash_tbl1_depth; + hash_tbl30_depth.hash0_tbl2_depth = hash_tbl2_depth; + hash_tbl30_depth.hash0_tbl3_depth = hash_tbl3_depth; + hash_tbl74_depth.hash0_tbl4_depth = hash_tbl4_depth; + hash_tbl74_depth.hash0_tbl5_depth = hash_tbl5_depth; + hash_tbl74_depth.hash0_tbl6_depth = hash_tbl6_depth; + hash_tbl74_depth.hash0_tbl7_depth = hash_tbl7_depth; + + rtn = dpp_reg_write(dev, SE4K_SE_ALG_HASH0_TBL30_DEPTHr + 2 * hash_id, + 0, 0, &hash_tbl30_depth); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, "dpp_reg_write"); + + rtn = dpp_reg_write(dev, SE4K_SE_ALG_HASH0_TBL74_DEPTHr + 2 * hash_id, + 0, 0, &hash_tbl74_depth); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取hash业务表深度 +* @param dev_id +* @param hash_id +* @param p_content_type +* @param p_flag +* +* @return +* @remark 无 +* @see +* @author tf @date 2016/01/28 +************************************************************/ +DPP_STATUS dpp_se_hash_tbl_depth_get( + DPP_DEV_T *dev, ZXIC_UINT32 hash_id, ZXIC_UINT32 *hash_tbl0_depth, + ZXIC_UINT32 *hash_tbl1_depth, ZXIC_UINT32 *hash_tbl2_depth, + ZXIC_UINT32 *hash_tbl3_depth, ZXIC_UINT32 *hash_tbl4_depth, + ZXIC_UINT32 *hash_tbl5_depth, ZXIC_UINT32 *hash_tbl6_depth, + ZXIC_UINT32 *hash_tbl7_depth) + +{ + DPP_STATUS rtn = DPP_OK; + + DPP_SE4K_SE_ALG_HASH0_TBL30_DEPTH_T hash_tbl30_depth = { 0 }; + DPP_SE4K_SE_ALG_HASH0_TBL74_DEPTH_T hash_tbl74_depth = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX_UPPER(hash_id, DPP_HASH_ID_MAX); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), hash_tbl0_depth); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), hash_tbl1_depth); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), hash_tbl2_depth); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), hash_tbl3_depth); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), hash_tbl4_depth); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), hash_tbl5_depth); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), hash_tbl6_depth); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), hash_tbl7_depth); + + rtn = dpp_reg_read(dev, SE4K_SE_ALG_HASH0_TBL30_DEPTHr + 2 * hash_id, 0, + 0, &hash_tbl30_depth); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, "dpp_reg_read"); + + rtn = dpp_reg_read(dev, SE4K_SE_ALG_HASH0_TBL74_DEPTHr + 2 * hash_id, 0, + 0, &hash_tbl74_depth); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, "dpp_reg_read"); + + *hash_tbl0_depth = hash_tbl30_depth.hash0_tbl0_depth; + *hash_tbl1_depth = hash_tbl30_depth.hash0_tbl1_depth; + *hash_tbl2_depth = hash_tbl30_depth.hash0_tbl2_depth; + *hash_tbl3_depth = hash_tbl30_depth.hash0_tbl3_depth; + *hash_tbl4_depth = hash_tbl74_depth.hash0_tbl4_depth; + *hash_tbl5_depth = hash_tbl74_depth.hash0_tbl5_depth; + *hash_tbl6_depth = hash_tbl74_depth.hash0_tbl6_depth; + *hash_tbl7_depth = hash_tbl74_depth.hash0_tbl7_depth; + + return DPP_OK; +} + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se/dpp_stat_car.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se/dpp_stat_car.c new file mode 100644 index 000000000000..425ceb610fbb --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se/dpp_stat_car.c @@ -0,0 +1,4544 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_stat_car.c +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : XXX +* 完成日期 : 2017/02/09 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#include "zxic_common.h" +#include "dpp_type_api.h" +#include "dpp_reg.h" +// #include "dpp_devmng_api.h" +#include "dpp_stat_car.h" +#include "dpp_stat_api.h" + +#if ZXIC_REAL("Global Variable") + +/** 软复位相关 */ +static DPP_CAR_SOFT_RESET_DATA_T g_car_store_data[DPP_DEV_CHANNEL_MAX] = { + { { 0 } } +}; + +#define GET_DPP_CAR_SOFT_RESET_INFO(dev_id) (&g_car_store_data[dev_id]) + +#define DPP_CAR1_REG_OFFSET (0) + +#endif + +#if 0 +/***********************************************************/ +/** car 模块初始化 +* @param dev_id 设备号 +* @param p_car_cfg CAR配置信息 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/05/05 +************************************************************/ +DPP_STATUS dpp_stat_car_init(ZXIC_UINT32 dev_id, DPP_CAR_CFG_T *p_car_cfg) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 car_type = 0; + ZXIC_UINT32 car_mono_mode = 0; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_car_cfg); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, p_car_cfg->car0_mono_mode[dev_id], CAR_SMMU0_MONO_MODE_NONE, CAR_SMMU0_MONO_MODE_MAX - 1); + /* 已经初始化完成,待后添加重新初始化的代码 */ + if (p_car_cfg->is_init[dev_id]) { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "Error! CAR cfg has been initialized!\n"); + } + + car_mono_mode = p_car_cfg->car0_mono_mode[dev_id]; + + for (car_type = STAT_CAR_A_TYPE; car_type < STAT_CAR_MAX_TYPE; car_type++) { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "dpp_stat_car_hardware_init(car_type[%d]) begin\n", car_type); + rc = dpp_stat_car_hardware_init(dev_id, + car_type, + car_mono_mode); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_car_hardware_init"); + } + + p_car_cfg->is_init[dev_id] = 1; + + /* 软复位相关 */ + ZXIC_COMM_MEMSET(&g_car_store_data[dev_id], 0, sizeof(DPP_CAR_SOFT_RESET_DATA_T)); + g_car_store_data[dev_id].is_init = 1; + + return rc; +} +#endif +#if ZXIC_REAL("Basic Reg Operation ") +/***********************************************************/ +/** car A的流设置 +* @param dev_id 设备号 car编号 +* @param flow_id 流号 +* @param drop_flag 丢弃标志 +* @param plcr_en 监管使能 +* @param profile_id 监管模板号 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/05 +************************************************************/ +DPP_STATUS dpp_stat_cara_queue_cfg_set(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 drop_flag, + ZXIC_UINT32 plcr_en, + ZXIC_UINT32 profile_id) +{ + DPP_STATUS rc = DPP_OK; + + DPP_STAT_CAR0_CARA_QUEUE_RAM0_159_0_T queue_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_A_FLOW_ID_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), drop_flag, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), plcr_en, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_A_PROFILE_ID_MAX); + + queue_cfg.cara_drop = drop_flag; + queue_cfg.cara_plcr_en = plcr_en; + queue_cfg.cara_profile_id = profile_id; + + rc = dpp_reg_write(dev, STAT_CAR0_CARA_QUEUE_RAM0_159_0r, 0, flow_id, + &queue_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return rc; +} + +/***********************************************************/ +/** 获取car A的流设置 +* @param dev_id 设备号 +* @param flow_id 流号 +* @param p_cara_queue_cfg car A流配置信息 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/05 +************************************************************/ +DPP_STATUS +dpp_stat_cara_queue_cfg_get(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + DPP_STAT_CAR_A_QUEUE_CFG_T *p_cara_queue_cfg) +{ + DPP_STATUS rc = DPP_OK; + + DPP_STAT_CAR0_CARA_QUEUE_RAM0_159_0_T queue_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_A_FLOW_ID_MAX); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_cara_queue_cfg); + + rc = dpp_reg_read(dev, STAT_CAR0_CARA_QUEUE_RAM0_159_0r, 0, flow_id, + &queue_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + p_cara_queue_cfg->flow_id = flow_id; + p_cara_queue_cfg->drop_flag = queue_cfg.cara_drop; + p_cara_queue_cfg->plcr_en = queue_cfg.cara_plcr_en; + p_cara_queue_cfg->profile_id = queue_cfg.cara_profile_id; + + p_cara_queue_cfg->tq = ZXIC_COMM_COUNTER64_BUILD(queue_cfg.cara_tq_h, + queue_cfg.cara_tq_l); + + p_cara_queue_cfg->ted = queue_cfg.cara_ted; + p_cara_queue_cfg->tcd = queue_cfg.cara_tcd; + p_cara_queue_cfg->tei = queue_cfg.cara_tei; + p_cara_queue_cfg->tci = queue_cfg.cara_tci; + + return rc; +} + +/***********************************************************/ +/** 获取car A 包长监管流配置 +* @param dev_id 设备号 +* @param flow_id 流号 +* @param p_cara_queue_cfg car A流配置信息 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/05 +************************************************************/ +DPP_STATUS dpp_stat_cara_pkt_queue_cfg_get( + DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + DPP_STAT_CAR_A_PKT_QUEUE_CFG_T *p_cara_queue_cfg) +{ + DPP_STATUS rc = DPP_OK; + + DPP_STAT_CAR0_CARA_QUEUE_RAM0_159_0_PKT_T queue_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_A_FLOW_ID_MAX); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_cara_queue_cfg); + + rc = dpp_reg_read(dev, STAT_CAR0_CARA_QUEUE_RAM0_159_0_PKTr, 0, flow_id, + &queue_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + p_cara_queue_cfg->flow_id = flow_id; + p_cara_queue_cfg->plcr_en = queue_cfg.cara_plcr_en; + p_cara_queue_cfg->profile_id = queue_cfg.cara_profile_id; + + p_cara_queue_cfg->tq = ZXIC_COMM_COUNTER64_BUILD(queue_cfg.cara_tq_h, + queue_cfg.cara_tq_l); + + p_cara_queue_cfg->dc = ZXIC_COMM_COUNTER64_BUILD(queue_cfg.cara_dc_high, + queue_cfg.cara_dc_low); + p_cara_queue_cfg->tc = queue_cfg.cara_tc; + + return rc; +} + +/***********************************************************/ +/** car A 字节限速监管模板设定 +* @param dev_id 设备号 +* @param profile_id 监管模板号 +* @param p_cara_profile_cfg 监管模板配置 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/05 +************************************************************/ +DPP_STATUS +dpp_stat_cara_profile_cfg_set(DPP_DEV_T *dev, ZXIC_UINT32 profile_id, + DPP_STAT_CAR_PROFILE_CFG_T *p_cara_profile_cfg) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 i = 0; + + DPP_STAT_CAR0_CARA_PROFILE_RAM1_255_0_T profile_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_A_PROFILE_ID_MAX); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_cara_profile_cfg); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_cara_profile_cfg->profile_id, + 0, DPP_CAR_A_PROFILE_ID_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_cara_profile_cfg->pkt_sign, 0, + 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_cara_profile_cfg->cf, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_cara_profile_cfg->cm, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_cara_profile_cfg->cd, + CAR_CD_MODE_SRTCM, CAR_CD_MODE_INVALID - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_cara_profile_cfg->cir, 0, + DPP_CAR_MAX_CIR_VALUE); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_cara_profile_cfg->eir, 0, + DPP_CAR_MAX_EIR_VALUE); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_cara_profile_cfg->cbs, 0, + DPP_CAR_MAX_CBS_VALUE); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_cara_profile_cfg->ebs, 0, + DPP_CAR_MAX_EBS_VALUE); + ZXIC_COMM_CHECK_DEV_INDEX( + DEV_ID(dev), p_cara_profile_cfg->random_disc_e, 0, 0xffffffff); + ZXIC_COMM_CHECK_DEV_INDEX( + DEV_ID(dev), p_cara_profile_cfg->random_disc_c, 0, 0xffffffff); + + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), + p_cara_profile_cfg->e_yellow_pri[0], 0, + DPP_CAR_MAX_PRI_VALUE); + + for (i = 1; i < DPP_CAR_PRI_MAX; i++) { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), + p_cara_profile_cfg->c_pri[i], 0, + DPP_CAR_MAX_PRI_VALUE); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), + p_cara_profile_cfg->e_green_pri[i], 0, + DPP_CAR_MAX_PRI_VALUE); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), + p_cara_profile_cfg->e_yellow_pri[i], + 0, DPP_CAR_MAX_PRI_VALUE); + } + ZXIC_COMM_TRACE_DEV_INFO( + DEV_ID(dev), + "==> dpp_stat_cara_profile_cfg_set profile_id[%d]: \n", + profile_id); + ZXIC_COMM_TRACE_DEV_INFO( + DEV_ID(dev), + "| -------------------------------------------------------------- | \n"); + ZXIC_COMM_TRACE_DEV_INFO( + DEV_ID(dev), + "| %-5s | %-5s | %-5s | %-10s | %-10s | %-10s | %-10s | \n", + "cd", "cf", "cm", "cir", "cbs", "eir", "ebs"); + ZXIC_COMM_TRACE_DEV_INFO( + DEV_ID(dev), + "| %-5d | %-5d | %-5d | %-10d | %-10d | %-10d | %-10d | \n", + p_cara_profile_cfg->cd, p_cara_profile_cfg->cf, + p_cara_profile_cfg->cm, p_cara_profile_cfg->cir, + p_cara_profile_cfg->cbs, p_cara_profile_cfg->eir, + p_cara_profile_cfg->ebs); + ZXIC_COMM_TRACE_DEV_INFO( + DEV_ID(dev), + "| ----- | ----- | ----- | ---------- | ---------- | ---------- | ---------- | \n"); + + profile_cfg.cara_e_y_pri7 = p_cara_profile_cfg->e_yellow_pri[7]; + profile_cfg.cara_e_y_pri6 = p_cara_profile_cfg->e_yellow_pri[6]; + profile_cfg.cara_e_y_pri5 = p_cara_profile_cfg->e_yellow_pri[5]; + profile_cfg.cara_e_y_pri4 = p_cara_profile_cfg->e_yellow_pri[4]; + profile_cfg.cara_e_y_pri3 = p_cara_profile_cfg->e_yellow_pri[3]; + profile_cfg.cara_e_y_pri2 = p_cara_profile_cfg->e_yellow_pri[2]; + profile_cfg.cara_e_y_pri1 = p_cara_profile_cfg->e_yellow_pri[1]; + profile_cfg.cara_e_y_pri0 = p_cara_profile_cfg->e_yellow_pri[0]; + + profile_cfg.cara_e_g_pri7 = p_cara_profile_cfg->e_green_pri[7]; + profile_cfg.cara_e_g_pri6 = p_cara_profile_cfg->e_green_pri[6]; + profile_cfg.cara_e_g_pri5 = p_cara_profile_cfg->e_green_pri[5]; + profile_cfg.cara_e_g_pri4 = p_cara_profile_cfg->e_green_pri[4]; + profile_cfg.cara_e_g_pri3 = p_cara_profile_cfg->e_green_pri[3]; + profile_cfg.cara_e_g_pri2 = p_cara_profile_cfg->e_green_pri[2]; + profile_cfg.cara_e_g_pri1 = p_cara_profile_cfg->e_green_pri[1]; + + profile_cfg.cara_c_pri7 = p_cara_profile_cfg->c_pri[7]; + profile_cfg.cara_c_pri6 = p_cara_profile_cfg->c_pri[6]; + profile_cfg.cara_c_pri5 = p_cara_profile_cfg->c_pri[5]; + profile_cfg.cara_c_pri4 = p_cara_profile_cfg->c_pri[4]; + profile_cfg.cara_c_pri3 = p_cara_profile_cfg->c_pri[3]; + profile_cfg.cara_c_pri2 = p_cara_profile_cfg->c_pri[2]; + profile_cfg.cara_c_pri1 = p_cara_profile_cfg->c_pri[1]; + + profile_cfg.cara_cbs = p_cara_profile_cfg->cbs; + profile_cfg.cara_ebs_pbs = p_cara_profile_cfg->ebs; + profile_cfg.cara_cir = p_cara_profile_cfg->cir; + profile_cfg.cara_eir = p_cara_profile_cfg->eir; + + profile_cfg.cara_cd = p_cara_profile_cfg->cd; + profile_cfg.cara_cf = p_cara_profile_cfg->cf; + profile_cfg.cara_cm = p_cara_profile_cfg->cm; + profile_cfg.cara_pkt_sign = p_cara_profile_cfg->pkt_sign; + + profile_cfg.cara_profile_wr = 0; + + rc = dpp_reg_write(dev, STAT_CAR0_CARA_PROFILE_RAM1_255_0r, 0, + profile_id, &profile_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return rc; +} + +/***********************************************************/ +/** 获取car A包的监管模板配置 +* @param dev_id 设备号 +* @param profile_id 监管模板号 +* @param p_cara_profile_cfg 监管模板配置 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/05 +************************************************************/ +DPP_STATUS +dpp_stat_cara_profile_cfg_get(DPP_DEV_T *dev, ZXIC_UINT32 profile_id, + DPP_STAT_CAR_PROFILE_CFG_T *p_cara_profile_cfg) +{ + DPP_STATUS rc = DPP_OK; + + DPP_STAT_CAR0_CARA_PROFILE_RAM1_255_0_T profile_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_A_PROFILE_ID_MAX); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_cara_profile_cfg); + + rc = dpp_reg_read(dev, STAT_CAR0_CARA_PROFILE_RAM1_255_0r, 0, + profile_id, &profile_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + p_cara_profile_cfg->profile_id = profile_id; + p_cara_profile_cfg->pkt_sign = profile_cfg.cara_pkt_sign; + p_cara_profile_cfg->cd = profile_cfg.cara_cd; + p_cara_profile_cfg->cf = profile_cfg.cara_cf; + p_cara_profile_cfg->cm = profile_cfg.cara_cm; + p_cara_profile_cfg->eir = profile_cfg.cara_eir; + p_cara_profile_cfg->cir = profile_cfg.cara_cir; + p_cara_profile_cfg->ebs = profile_cfg.cara_ebs_pbs; + p_cara_profile_cfg->cbs = profile_cfg.cara_cbs; + + p_cara_profile_cfg->c_pri[1] = profile_cfg.cara_c_pri1; + p_cara_profile_cfg->c_pri[2] = profile_cfg.cara_c_pri2; + p_cara_profile_cfg->c_pri[3] = profile_cfg.cara_c_pri3; + p_cara_profile_cfg->c_pri[4] = profile_cfg.cara_c_pri4; + p_cara_profile_cfg->c_pri[5] = profile_cfg.cara_c_pri5; + p_cara_profile_cfg->c_pri[6] = profile_cfg.cara_c_pri6; + p_cara_profile_cfg->c_pri[7] = profile_cfg.cara_c_pri7; + + p_cara_profile_cfg->e_green_pri[1] = profile_cfg.cara_e_g_pri1; + p_cara_profile_cfg->e_green_pri[2] = profile_cfg.cara_e_g_pri2; + p_cara_profile_cfg->e_green_pri[3] = profile_cfg.cara_e_g_pri3; + p_cara_profile_cfg->e_green_pri[4] = profile_cfg.cara_e_g_pri4; + p_cara_profile_cfg->e_green_pri[5] = profile_cfg.cara_e_g_pri5; + p_cara_profile_cfg->e_green_pri[6] = profile_cfg.cara_e_g_pri6; + p_cara_profile_cfg->e_green_pri[7] = profile_cfg.cara_e_g_pri7; + + p_cara_profile_cfg->e_yellow_pri[0] = profile_cfg.cara_e_y_pri0; + p_cara_profile_cfg->e_yellow_pri[1] = profile_cfg.cara_e_y_pri1; + p_cara_profile_cfg->e_yellow_pri[2] = profile_cfg.cara_e_y_pri2; + p_cara_profile_cfg->e_yellow_pri[3] = profile_cfg.cara_e_y_pri3; + p_cara_profile_cfg->e_yellow_pri[4] = profile_cfg.cara_e_y_pri4; + p_cara_profile_cfg->e_yellow_pri[5] = profile_cfg.cara_e_y_pri5; + p_cara_profile_cfg->e_yellow_pri[6] = profile_cfg.cara_e_y_pri6; + p_cara_profile_cfg->e_yellow_pri[7] = profile_cfg.cara_e_y_pri7; + + return rc; +} + +/***********************************************************/ +/** car A包限速监管模板设定 +* @param dev_id 设备号 +* @param profile_id 监管模板号 +* @param p_cara_profile_cfg 监管模板配置 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/05 +************************************************************/ +DPP_STATUS dpp_stat_cara_pkt_profile_cfg_set( + DPP_DEV_T *dev, ZXIC_UINT32 profile_id, + DPP_STAT_CAR_PKT_PROFILE_CFG_T *p_cara_profile_cfg) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 i = 0; + + DPP_STAT_CAR0_CARA_PROFILE_RAM1_255_0_PKT_T profile_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_A_PROFILE_ID_MAX); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_cara_profile_cfg); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_cara_profile_cfg->pkt_sign, 0, + 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_cara_profile_cfg->cir, 0, + DPP_CAR_MAX_PKT_CIR_VALUE); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_cara_profile_cfg->cbs, 0, + DPP_CAR_MAX_PKT_CBS_VALUE); + + for (i = 0; i < DPP_CAR_PRI_MAX; i++) { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), + p_cara_profile_cfg->pri[i], 0, + DPP_CAR_MAX_PRI_VALUE); + } + + profile_cfg.cara_pri7 = p_cara_profile_cfg->pri[7]; + profile_cfg.cara_pri6 = p_cara_profile_cfg->pri[6]; + profile_cfg.cara_pri5 = p_cara_profile_cfg->pri[5]; + profile_cfg.cara_pri4 = p_cara_profile_cfg->pri[4]; + profile_cfg.cara_pri3 = p_cara_profile_cfg->pri[3]; + profile_cfg.cara_pri2 = p_cara_profile_cfg->pri[2]; + profile_cfg.cara_pri1 = p_cara_profile_cfg->pri[1]; + profile_cfg.cara_pri0 = p_cara_profile_cfg->pri[0]; + + profile_cfg.cara_pkt_cbs = p_cara_profile_cfg->cbs; + profile_cfg.cara_pkt_cir = p_cara_profile_cfg->cir; + profile_cfg.cara_pkt_sign = p_cara_profile_cfg->pkt_sign; + + profile_cfg.cara_profile_wr = 0; + + rc = dpp_reg_write(dev, STAT_CAR0_CARA_PROFILE_RAM1_255_0_PKTr, 0, + profile_id, &profile_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return rc; +} + +/***********************************************************/ +/** 获取car A包长监管模板配置 +* @param dev_id 设备号 +* @param profile_id 监管模板号 +* @param p_cara_profile_cfg 监管模板配置 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/05 +************************************************************/ +DPP_STATUS dpp_stat_cara_pkt_profile_cfg_get( + DPP_DEV_T *dev, ZXIC_UINT32 profile_id, + DPP_STAT_CAR_PKT_PROFILE_CFG_T *p_cara_profile_cfg) +{ + DPP_STATUS rc = DPP_OK; + + DPP_STAT_CAR0_CARA_PROFILE_RAM1_255_0_PKT_T profile_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_A_PROFILE_ID_MAX); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_cara_profile_cfg); + + rc = dpp_reg_read(dev, STAT_CAR0_CARA_PROFILE_RAM1_255_0_PKTr, 0, + profile_id, &profile_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + p_cara_profile_cfg->profile_id = profile_id; + p_cara_profile_cfg->pkt_sign = profile_cfg.cara_pkt_sign; + p_cara_profile_cfg->cir = profile_cfg.cara_pkt_cir; + p_cara_profile_cfg->cbs = profile_cfg.cara_pkt_cbs; + + p_cara_profile_cfg->pri[0] = profile_cfg.cara_pri0; + p_cara_profile_cfg->pri[1] = profile_cfg.cara_pri1; + p_cara_profile_cfg->pri[2] = profile_cfg.cara_pri2; + p_cara_profile_cfg->pri[3] = profile_cfg.cara_pri3; + p_cara_profile_cfg->pri[4] = profile_cfg.cara_pri4; + p_cara_profile_cfg->pri[5] = profile_cfg.cara_pri5; + p_cara_profile_cfg->pri[6] = profile_cfg.cara_pri6; + p_cara_profile_cfg->pri[7] = profile_cfg.cara_pri7; + + return rc; +} + +/***********************************************************/ +/** 配置car A的qvos溢出模式 +* @param dev_id 设备号 +* @param flow_id 流号 +* @param qvos_mode 溢出模式,参见DPP_CAR_QVOS_MODE_E +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_cara_queue_qvos_set(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 qvos_mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_CARA_QOVS_RAM_RAM2_T qvos_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_A_FLOW_ID_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), qvos_mode, + CAR_QVOS_MODE_OVERFLOW_0, + CAR_QVOS_MODE_OVERFLOW_MAX - 1); + + qvos_cfg.cara_qovs = qvos_mode; + + rc = dpp_reg_write(dev, STAT_CAR0_CARA_QOVS_RAM_RAM2r, 0, flow_id, + &qvos_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return rc; +} + +/***********************************************************/ +/** 获取car A的qvos溢出模式 +* @param dev_id 设备号 +* @param flow_id 流号 +* @param p_qvos_mode qvos溢出模式 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_cara_queue_qvos_get(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 *p_qvos_mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_CARA_QOVS_RAM_RAM2_T qvos_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_A_FLOW_ID_MAX); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_qvos_mode); + + rc = dpp_reg_read(dev, STAT_CAR0_CARA_QOVS_RAM_RAM2r, 0, flow_id, + &qvos_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_qvos_mode = qvos_cfg.cara_qovs; + + return rc; +} + +/***********************************************************/ +/** 设置第一级的映射关系 +* @param dev_id 设备号 +* @param flow_id 流号 +* @param map_flow_id 映射的流号 +* @param map_sp 映射的优先级 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_cara_queue_map_set(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 map_flow_id, + ZXIC_UINT32 map_sp) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_LOOK_UP_TABLE1_T map_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_A_FLOW_ID_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), map_flow_id, 0, + DPP_CAR_B_FLOW_ID_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), map_sp, DPP_CAR_PRI0, + DPP_CAR_PRI_MAX - 1); + + map_cfg.cara_flow_id = map_flow_id; + map_cfg.cara_sp = map_sp; + + rc = dpp_reg_write(dev, STAT_CAR0_LOOK_UP_TABLE1r, 0, flow_id, + &map_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return rc; +} + +/***********************************************************/ +/** 获取第一级的映射关系 +* @param dev_id 设备号 +* @param flow_id 流号 +* @param p_map_flow_id 映射流号 +* @param p_map_sp 映射优先级 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_cara_queue_map_get(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 *p_map_flow_id, + ZXIC_UINT32 *p_map_sp) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_LOOK_UP_TABLE1_T map_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_A_FLOW_ID_MAX); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_map_flow_id); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_map_sp); + + rc = dpp_reg_read(dev, STAT_CAR0_LOOK_UP_TABLE1r, 0, flow_id, &map_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_map_flow_id = map_cfg.cara_flow_id; + *p_map_sp = map_cfg.cara_sp; + + return rc; +} + +/***********************************************************/ +/** car A指定队列模式配置,仅用于调试 +* @param dev_id 设备号 +* @param global_en 全局队列使能,0-不使能,1-使能 +* @param sp_en 优先级队列使能,0-不使能,1-使能 +* @param appoint_sp 指定的优先级 +* @param appoint_queue 指定的队列号 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_cara_queue_appoint_mode_set(DPP_DEV_T *dev, + ZXIC_UINT32 global_en, + ZXIC_UINT32 sp_en, + ZXIC_UINT32 appoint_sp, + ZXIC_UINT32 appoint_queue) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_CARA_APPOINT_QNUM_OR_SP_T appoint_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), global_en, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), sp_en, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), appoint_sp, DPP_CAR_PRI0, + DPP_CAR_PRI_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), appoint_queue, 0, + DPP_CAR_A_FLOW_ID_MAX); + + appoint_cfg.cara_appoint_qnum_or_not = global_en; + appoint_cfg.cara_appoint_sp_or_not = sp_en; + appoint_cfg.cara_plcr_stat_sp = appoint_sp; + appoint_cfg.cara_plcr_stat_qnum = appoint_queue; + + rc = dpp_reg_write(dev, STAT_CAR0_CARA_APPOINT_QNUM_OR_SPr, 0, 0, + &appoint_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return rc; +} + +/***********************************************************/ +/** 获取car A指定队列模式的配置 +* @param dev_id 设备号 +* @param p_global_en 全局队列使能,0-不使能,1-使能 +* @param p_sp_en 优先级队列使能,0-不使能,1-使能 +* @param p_appoint_sp 指定的优先级 +* @param p_appoint_queue 指定的队列号 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/07 +************************************************************/ +DPP_STATUS dpp_stat_cara_queue_appoint_mode_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_global_en, + ZXIC_UINT32 *p_sp_en, + ZXIC_UINT32 *p_appoint_sp, + ZXIC_UINT32 *p_appoint_queue) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_CARA_APPOINT_QNUM_OR_SP_T appoint_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_global_en); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_sp_en); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_appoint_sp); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_appoint_queue); + + rc = dpp_reg_read(dev, STAT_CAR0_CARA_APPOINT_QNUM_OR_SPr, 0, 0, + &appoint_cfg); + + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_global_en = appoint_cfg.cara_appoint_qnum_or_not; + *p_sp_en = appoint_cfg.cara_appoint_sp_or_not; + *p_appoint_sp = appoint_cfg.cara_plcr_stat_sp; + *p_appoint_queue = appoint_cfg.cara_plcr_stat_qnum; + + return rc; +} + +/***********************************************************/ +/** car A 调试计数读取模式配置 +* @param dev_id 设备号 +* @param overflow_mode 溢出模式,0-计数最大保持,1-计数最大翻转 +* @param rd_mode 读取模式,0-不读清,1-读清模式 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_cara_dbg_cnt_mode_set(DPP_DEV_T *dev, + ZXIC_UINT32 overflow_mode, + ZXIC_UINT32 rd_mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_CARA_CFGMT_COUNT_MODE_T cnt_mode_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), overflow_mode, CAR_KEEP_COUNT, + CAR_RE_COUNT); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), rd_mode, CAR_READ_NOT_CLEAR, + CAR_READ_AND_CLEAR); + + cnt_mode_cfg.cara_cfgmt_count_overflow_mode = overflow_mode; + cnt_mode_cfg.cara_cfgmt_count_rd_mode = rd_mode; + + rc = dpp_reg_write(dev, STAT_CAR0_CARA_CFGMT_COUNT_MODEr, 0, 0, + &cnt_mode_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return rc; +} + +/***********************************************************/ +/** car A 调试计数读取模式获取 +* @param dev_id 设备号 +* @param p_overflow_mode +* @param p_rd_mode +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_cara_dbg_cnt_mode_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_overflow_mode, + ZXIC_UINT32 *p_rd_mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_CARA_CFGMT_COUNT_MODE_T cnt_mode_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_overflow_mode); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_rd_mode); + + rc = dpp_reg_read(dev, STAT_CAR0_CARA_CFGMT_COUNT_MODEr, 0, 0, + &cnt_mode_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_overflow_mode = cnt_mode_cfg.cara_cfgmt_count_overflow_mode; + *p_rd_mode = cnt_mode_cfg.cara_cfgmt_count_rd_mode; + + return rc; +} + +#if 0 +/***********************************************************/ +/** car a 的调试计数获取 +* @param dev_id +* @param p_car_dbg_cnt +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/09/27 +************************************************************/ +DPP_STATUS dpp_stat_cara_dbg_cnt_get(ZXIC_UINT32 dev_id, + DPP_STAT_CAR_DBG_CNT_T *p_car_dbg_cnt) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_CARA_PKT_DES_I_CNT_T cara_pkt_in_total_cnt_cfg = {0}; + DPP_STAT_CAR0_CARA_PKT_SIZE_CNT_T cara_pkt_size_cnt_cfg = {0}; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 size = 0; + ZXIC_UINT32 *p_tmp_cnt = NULL; + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_car_dbg_cnt); + + size = sizeof(DPP_STAT_CAR_DBG_CNT_T) / sizeof(ZXIC_UINT32) - 1; + p_tmp_cnt = (ZXIC_UINT32 *)p_car_dbg_cnt; + + for (i = 0; i < size; i++) { + rc = dpp_reg_read(dev_id, + STAT_CAR0_CARA_PKT_DES_I_CNTr + i, + 0, + 0, + &cara_pkt_in_total_cnt_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_reg_read"); + + ZXIC_COMM_MEMCPY(p_tmp_cnt + i, &(cara_pkt_in_total_cnt_cfg.cara_pkt_des_i_cnt), sizeof(ZXIC_UINT32)); + } + + /* STAT_CAR0_CARA_PKT_SIZE_CNTr 不连续,单独读取 */ + rc = dpp_reg_read(dev_id, + STAT_CAR0_CARA_PKT_SIZE_CNTr, + 0, + 0, + &cara_pkt_size_cnt_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_reg_read"); + + p_car_dbg_cnt->pkt_size_cnt = cara_pkt_size_cnt_cfg.cara_pkt_size_cnt; + + return rc; +} + +/***********************************************************/ +/** 获取car A的初始化状态 +* @param dev_id 设备号 +* @param p_init_done 初始化完成使能 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_cara_init_done_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_init_done) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_CARA_PLCR_INIT_DONT_T cara_init_done_cfg = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_init_done); + + rc = dpp_reg_read(dev_id, + STAT_CAR0_CARA_PLCR_INIT_DONTr, + 0, + 0, + &cara_init_done_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_reg_read"); + + *p_init_done = cara_init_done_cfg.cara_plcr_init_done; + + return rc; +} + +#endif +/***********************************************************/ +/** car B的流设置 +* @param dev_id 设备号 +* @param flow_id 流号 +* @param drop_flag 丢弃标志 +* @param plcr_en 监管使能 +* @param profile_id 监管模板号 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/05 +************************************************************/ +DPP_STATUS dpp_stat_carb_queue_cfg_set(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 drop_flag, + ZXIC_UINT32 plcr_en, + ZXIC_UINT32 profile_id) +{ + DPP_STATUS rc = DPP_OK; + + DPP_STAT_CAR0_CARB_QUEUE_RAM0_159_0_T queue_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_B_FLOW_ID_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), drop_flag, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), plcr_en, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_B_PROFILE_ID_MAX); + + queue_cfg.carb_drop = drop_flag; + queue_cfg.carb_plcr_en = plcr_en; + queue_cfg.carb_profile_id = profile_id; + + rc = dpp_reg_write(dev, STAT_CAR0_CARB_QUEUE_RAM0_159_0r, 0, flow_id, + &queue_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return rc; +} + +/***********************************************************/ +/** 获取car B的流设置 +* @param dev_id 设备号 +* @param flow_id 流号 +* @param p_carb_queue_cfg car A流配置信息 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/05 +************************************************************/ +DPP_STATUS +dpp_stat_carb_queue_cfg_get(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + DPP_STAT_CAR_B_QUEUE_CFG_T *p_carb_queue_cfg) +{ + DPP_STATUS rc = DPP_OK; + + DPP_STAT_CAR0_CARB_QUEUE_RAM0_159_0_T queue_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_B_FLOW_ID_MAX); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_carb_queue_cfg); + + rc = dpp_reg_read(dev, STAT_CAR0_CARB_QUEUE_RAM0_159_0r, 0, flow_id, + &queue_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + p_carb_queue_cfg->flow_id = flow_id; + p_carb_queue_cfg->drop_flag = queue_cfg.carb_drop; + p_carb_queue_cfg->plcr_en = queue_cfg.carb_plcr_en; + p_carb_queue_cfg->profile_id = queue_cfg.carb_profile_id; + + p_carb_queue_cfg->tq = ZXIC_COMM_COUNTER64_BUILD(queue_cfg.carb_tq_h, + queue_cfg.carb_tq_l); + + p_carb_queue_cfg->tce_flag = queue_cfg.carb_ted; + p_carb_queue_cfg->tce = queue_cfg.carb_tcd; + p_carb_queue_cfg->te = queue_cfg.carb_tei; + p_carb_queue_cfg->tc = queue_cfg.carb_tci; + + return rc; +} + +/***********************************************************/ +/** car B监管模板设定 +* @param dev_id 设备号 +* @param profile_id 监管模板号 +* @param p_carb_profile_cfg 监管模板配置 +* pkt_sign 包限速标记写死成0,防止用户在carB包限速 +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/05 +************************************************************/ +DPP_STATUS +dpp_stat_carb_profile_cfg_set(DPP_DEV_T *dev, ZXIC_UINT32 profile_id, + DPP_STAT_CAR_PROFILE_CFG_T *p_carb_profile_cfg) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 i = 0; + + DPP_STAT_CAR0_CARB_PROFILE_RAM1_255_0_T profile_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_B_PROFILE_ID_MAX); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_carb_profile_cfg); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_carb_profile_cfg->pkt_sign, 0, + 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_carb_profile_cfg->cf, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_carb_profile_cfg->cm, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), + p_carb_profile_cfg->random_disc_e, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), + p_carb_profile_cfg->random_disc_c, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_carb_profile_cfg->cd, + CAR_CD_MODE_SRTCM, CAR_CD_MODE_INVALID - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_carb_profile_cfg->cir, 0, + DPP_CAR_MAX_CIR_VALUE); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_carb_profile_cfg->eir, 0, + DPP_CAR_MAX_EIR_VALUE); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_carb_profile_cfg->cbs, 0, + DPP_CAR_MAX_CBS_VALUE); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_carb_profile_cfg->ebs, 0, + DPP_CAR_MAX_EBS_VALUE); + + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), + p_carb_profile_cfg->e_yellow_pri[0], 0, + DPP_CAR_MAX_PRI_VALUE); + + for (i = 1; i < DPP_CAR_PRI_MAX; i++) { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), + p_carb_profile_cfg->c_pri[i], 0, + DPP_CAR_MAX_PRI_VALUE); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), + p_carb_profile_cfg->e_green_pri[i], 0, + DPP_CAR_MAX_PRI_VALUE); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), + p_carb_profile_cfg->e_yellow_pri[i], + 0, DPP_CAR_MAX_PRI_VALUE); + } + + profile_cfg.carb_e_y_pri7 = p_carb_profile_cfg->e_yellow_pri[7]; + profile_cfg.carb_e_y_pri6 = p_carb_profile_cfg->e_yellow_pri[6]; + profile_cfg.carb_e_y_pri5 = p_carb_profile_cfg->e_yellow_pri[5]; + profile_cfg.carb_e_y_pri4 = p_carb_profile_cfg->e_yellow_pri[4]; + profile_cfg.carb_e_y_pri3 = p_carb_profile_cfg->e_yellow_pri[3]; + profile_cfg.carb_e_y_pri2 = p_carb_profile_cfg->e_yellow_pri[2]; + profile_cfg.carb_e_y_pri1 = p_carb_profile_cfg->e_yellow_pri[1]; + profile_cfg.carb_e_y_pri0 = p_carb_profile_cfg->e_yellow_pri[0]; + + profile_cfg.carb_e_g_pri7 = p_carb_profile_cfg->e_green_pri[7]; + profile_cfg.carb_e_g_pri6 = p_carb_profile_cfg->e_green_pri[6]; + profile_cfg.carb_e_g_pri5 = p_carb_profile_cfg->e_green_pri[5]; + profile_cfg.carb_e_g_pri4 = p_carb_profile_cfg->e_green_pri[4]; + profile_cfg.carb_e_g_pri3 = p_carb_profile_cfg->e_green_pri[3]; + profile_cfg.carb_e_g_pri2 = p_carb_profile_cfg->e_green_pri[2]; + profile_cfg.carb_e_g_pri1 = p_carb_profile_cfg->e_green_pri[1]; + + profile_cfg.carb_c_pri7 = p_carb_profile_cfg->c_pri[7]; + profile_cfg.carb_c_pri6 = p_carb_profile_cfg->c_pri[6]; + profile_cfg.carb_c_pri5 = p_carb_profile_cfg->c_pri[5]; + profile_cfg.carb_c_pri4 = p_carb_profile_cfg->c_pri[4]; + profile_cfg.carb_c_pri3 = p_carb_profile_cfg->c_pri[3]; + profile_cfg.carb_c_pri2 = p_carb_profile_cfg->c_pri[2]; + profile_cfg.carb_c_pri1 = p_carb_profile_cfg->c_pri[1]; + + profile_cfg.carb_cbs = p_carb_profile_cfg->cbs; + profile_cfg.carb_ebs_pbs = p_carb_profile_cfg->ebs; + profile_cfg.carb_cir = p_carb_profile_cfg->cir; + profile_cfg.carb_eir = p_carb_profile_cfg->eir; + + profile_cfg.carb_cd = p_carb_profile_cfg->cd; + profile_cfg.carb_cf = p_carb_profile_cfg->cf; + profile_cfg.carb_cm = p_carb_profile_cfg->cm; + + profile_cfg.carb_random_discard_en_e = + p_carb_profile_cfg->random_disc_e; + profile_cfg.carb_random_discard_en_c = + p_carb_profile_cfg->random_disc_c; + /* B级car包限速标记写死为0,以免用户配置成1对CARB进行包限速 */ + profile_cfg.carb_pkt_sign = 0; + + profile_cfg.carb_profile_wr = 0; + + rc = dpp_reg_write(dev, STAT_CAR0_CARB_PROFILE_RAM1_255_0r, 0, + profile_id, &profile_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return rc; +} + +/***********************************************************/ +/** 获取car B的监管模板配置 +* @param dev_id 设备号 +* @param profile_id 监管模板号 +* @param p_carb_profile_cfg 监管模板配置 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/05 +************************************************************/ +DPP_STATUS +dpp_stat_carb_profile_cfg_get(DPP_DEV_T *dev, ZXIC_UINT32 profile_id, + DPP_STAT_CAR_PROFILE_CFG_T *p_carb_profile_cfg) +{ + DPP_STATUS rc = DPP_OK; + + DPP_STAT_CAR0_CARB_PROFILE_RAM1_255_0_T profile_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_B_PROFILE_ID_MAX); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_carb_profile_cfg); + + rc = dpp_reg_read(dev, STAT_CAR0_CARB_PROFILE_RAM1_255_0r, 0, + profile_id, &profile_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + p_carb_profile_cfg->profile_id = profile_id; + p_carb_profile_cfg->pkt_sign = profile_cfg.carb_pkt_sign; + p_carb_profile_cfg->cd = profile_cfg.carb_cd; + p_carb_profile_cfg->cf = profile_cfg.carb_cf; + p_carb_profile_cfg->cm = profile_cfg.carb_cm; + p_carb_profile_cfg->eir = profile_cfg.carb_eir; + p_carb_profile_cfg->cir = profile_cfg.carb_cir; + p_carb_profile_cfg->ebs = profile_cfg.carb_ebs_pbs; + p_carb_profile_cfg->cbs = profile_cfg.carb_cbs; + p_carb_profile_cfg->random_disc_e = + profile_cfg.carb_random_discard_en_e; + p_carb_profile_cfg->random_disc_c = + profile_cfg.carb_random_discard_en_c; + + p_carb_profile_cfg->c_pri[1] = profile_cfg.carb_c_pri1; + p_carb_profile_cfg->c_pri[2] = profile_cfg.carb_c_pri2; + p_carb_profile_cfg->c_pri[3] = profile_cfg.carb_c_pri3; + p_carb_profile_cfg->c_pri[4] = profile_cfg.carb_c_pri4; + p_carb_profile_cfg->c_pri[5] = profile_cfg.carb_c_pri5; + p_carb_profile_cfg->c_pri[6] = profile_cfg.carb_c_pri6; + p_carb_profile_cfg->c_pri[7] = profile_cfg.carb_c_pri7; + + p_carb_profile_cfg->e_green_pri[1] = profile_cfg.carb_e_g_pri1; + p_carb_profile_cfg->e_green_pri[2] = profile_cfg.carb_e_g_pri2; + p_carb_profile_cfg->e_green_pri[3] = profile_cfg.carb_e_g_pri3; + p_carb_profile_cfg->e_green_pri[4] = profile_cfg.carb_e_g_pri4; + p_carb_profile_cfg->e_green_pri[5] = profile_cfg.carb_e_g_pri5; + p_carb_profile_cfg->e_green_pri[6] = profile_cfg.carb_e_g_pri6; + p_carb_profile_cfg->e_green_pri[7] = profile_cfg.carb_e_g_pri7; + + p_carb_profile_cfg->e_yellow_pri[0] = profile_cfg.carb_e_y_pri0; + p_carb_profile_cfg->e_yellow_pri[1] = profile_cfg.carb_e_y_pri1; + p_carb_profile_cfg->e_yellow_pri[2] = profile_cfg.carb_e_y_pri2; + p_carb_profile_cfg->e_yellow_pri[3] = profile_cfg.carb_e_y_pri3; + p_carb_profile_cfg->e_yellow_pri[4] = profile_cfg.carb_e_y_pri4; + p_carb_profile_cfg->e_yellow_pri[5] = profile_cfg.carb_e_y_pri5; + p_carb_profile_cfg->e_yellow_pri[6] = profile_cfg.carb_e_y_pri6; + p_carb_profile_cfg->e_yellow_pri[7] = profile_cfg.carb_e_y_pri7; + + return rc; +} + +/***********************************************************/ +/** 配置car B的qvos溢出模式 +* @param dev_id 设备号 +* @param flow_id 流号 +* @param qvos_mode 溢出模式,参见DPP_CAR_QVOS_MODE_E +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_carb_queue_qvos_set(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 qvos_mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_CARB_QOVS_RAM_RAM2_T qvos_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_B_FLOW_ID_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), qvos_mode, + CAR_QVOS_MODE_OVERFLOW_0, + CAR_QVOS_MODE_OVERFLOW_MAX - 1); + + qvos_cfg.carb_qovs = qvos_mode; + + rc = dpp_reg_write(dev, STAT_CAR0_CARB_QOVS_RAM_RAM2r, 0, flow_id, + &qvos_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return rc; +} +/***********************************************************/ +/** 获取car B的qvos溢出模式 +* @param dev_id 设备号 +* @param flow_id 流号 +* @param p_qvos_mode qvos溢出模式 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_carb_queue_qvos_get(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 *p_qvos_mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_CARB_QOVS_RAM_RAM2_T qvos_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_B_FLOW_ID_MAX); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_qvos_mode); + + rc = dpp_reg_read(dev, STAT_CAR0_CARB_QOVS_RAM_RAM2r, 0, flow_id, + &qvos_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_qvos_mode = qvos_cfg.carb_qovs; + + return rc; +} + +/***********************************************************/ +/** 设置第二级的映射关系 +* @param dev_id 设备号 +* @param flow_id 流号 +* @param map_flow_id 映射的流号 +* @param map_sp 映射的优先级 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_carb_queue_map_set(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 map_flow_id, + ZXIC_UINT32 map_sp) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_LOOK_UP_TABLE2_T map_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_B_FLOW_ID_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), map_flow_id, 0, + DPP_CAR_C_FLOW_ID_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), map_sp, DPP_CAR_PRI0, + DPP_CAR_PRI_MAX - 1); + + map_cfg.carb_flow_id = map_flow_id; + map_cfg.carb_sp = map_sp; + + rc = dpp_reg_write(dev, STAT_CAR0_LOOK_UP_TABLE2r, 0, flow_id, + &map_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return rc; +} + +/***********************************************************/ +/** 获取第二级的映射关系 +* @param dev_id 设备号 +* @param flow_id 流号 +* @param p_map_flow_id 映射流号 +* @param p_map_sp 映射优先级 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_carb_queue_map_get(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 *p_map_flow_id, + ZXIC_UINT32 *p_map_sp) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_LOOK_UP_TABLE2_T map_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_B_FLOW_ID_MAX); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_map_flow_id); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_map_sp); + + rc = dpp_reg_read(dev, STAT_CAR0_LOOK_UP_TABLE2r, 0, flow_id, &map_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_map_flow_id = map_cfg.carb_flow_id; + *p_map_sp = map_cfg.carb_sp; + + return rc; +} + +#if 0 +/***********************************************************/ +/** car B指定队列模式配置,仅用于调试 +* @param dev_id 设备号 +* @param global_en 全局队列使能,0-不使能,1-使能 +* @param sp_en 优先级队列使能,0-不使能,1-使能 +* @param appoint_sp 指定的优先级 +* @param appoint_queue 指定的队列号 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_carb_queue_appoint_mode_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 global_en, + ZXIC_UINT32 sp_en, + ZXIC_UINT32 appoint_sp, + ZXIC_UINT32 appoint_queue) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_CARB_APPOINT_QNUM_OR_SP_T appoint_cfg = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, global_en, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, sp_en, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, appoint_sp, DPP_CAR_PRI0, DPP_CAR_PRI_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, appoint_queue, 0, DPP_CAR_B_FLOW_ID_MAX); + + appoint_cfg.carb_appoint_qnum_or_not = global_en; + appoint_cfg.carb_appoint_sp_or_not = sp_en; + appoint_cfg.carb_plcr_stat_sp = appoint_sp; + appoint_cfg.carb_plcr_stat_qnum = appoint_queue; + + rc = dpp_reg_write(dev_id, + STAT_CAR0_CARB_APPOINT_QNUM_OR_SPr, + 0, + 0, + &appoint_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_reg_write"); + + return rc; +} + +/***********************************************************/ +/** 获取car B指定队列模式的配置 +* @param dev_id 设备号 +* @param p_global_en 全局队列使能,0-不使能,1-使能 +* @param p_sp_en 优先级队列使能,0-不使能,1-使能 +* @param p_appoint_sp 指定的优先级 +* @param p_appoint_queue 指定的队列号 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/07 +************************************************************/ +DPP_STATUS dpp_stat_carb_queue_appoint_mode_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_global_en, + ZXIC_UINT32 *p_sp_en, + ZXIC_UINT32 *p_appoint_sp, + ZXIC_UINT32 *p_appoint_queue) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_CARB_APPOINT_QNUM_OR_SP_T appoint_cfg = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_global_en); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_sp_en); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_appoint_sp); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_appoint_queue); + + rc = dpp_reg_read(dev_id, + STAT_CAR0_CARB_APPOINT_QNUM_OR_SPr, + 0, + 0, + &appoint_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_reg_read"); + + *p_global_en = appoint_cfg.carb_appoint_qnum_or_not; + *p_sp_en = appoint_cfg.carb_appoint_sp_or_not; + *p_appoint_sp = appoint_cfg.carb_plcr_stat_sp; + *p_appoint_queue = appoint_cfg.carb_plcr_stat_qnum; + + return rc; +} + +/***********************************************************/ +/** car B 调试计数读取模式配置 +* @param dev_id 设备号 +* @param overflow_mode 溢出模式,0-计数最大保持,1-计数最大翻转 +* @param rd_mode 读取模式,0-不读清,1-读清模式 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_carb_dbg_cnt_mode_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 overflow_mode, + ZXIC_UINT32 rd_mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_CARB_CFGMT_COUNT_MODE_T cnt_mode_cfg = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, overflow_mode, CAR_KEEP_COUNT, CAR_RE_COUNT); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, rd_mode, CAR_READ_NOT_CLEAR, CAR_READ_AND_CLEAR); + + cnt_mode_cfg.carb_cfgmt_count_overflow_mode = overflow_mode; + cnt_mode_cfg.carb_cfgmt_count_rd_mode = rd_mode; + + rc = dpp_reg_write(dev_id, + STAT_CAR0_CARB_CFGMT_COUNT_MODEr, + 0, + 0, + &cnt_mode_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_reg_write"); + + return rc; +} + +/***********************************************************/ +/** +* @param dev_id +* @param p_overflow_mode +* @param p_rd_mode +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_carb_dbg_cnt_mode_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_overflow_mode, + ZXIC_UINT32 *p_rd_mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_CARB_CFGMT_COUNT_MODE_T cnt_mode_cfg = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_overflow_mode); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_rd_mode); + + rc = dpp_reg_read(dev_id, + STAT_CAR0_CARB_CFGMT_COUNT_MODEr, + 0, + 0, + &cnt_mode_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_reg_read"); + + *p_overflow_mode = cnt_mode_cfg.carb_cfgmt_count_overflow_mode; + *p_rd_mode = cnt_mode_cfg.carb_cfgmt_count_rd_mode; + + return rc; +} + +/***********************************************************/ +/** car b 的调试计数获取 +* @param dev_id +* @param p_car_dbg_cnt +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/09/27 +************************************************************/ +DPP_STATUS dpp_stat_carb_dbg_cnt_get(ZXIC_UINT32 dev_id, + DPP_STAT_CAR_DBG_CNT_T *p_car_dbg_cnt) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 i = 0; + ZXIC_UINT32 size = 0; + ZXIC_UINT32 *p_tmp_cnt = NULL; + + DPP_STAT_CAR0_CARB_PKT_SIZE_CNT_T carb_pkt_size_cnt_cfg = {0}; + DPP_STAT_CAR0_CARB_PKT_DES_I_CNT_T carb_pkt_in_total_cnt_cfg = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_car_dbg_cnt); + + size = sizeof(DPP_STAT_CAR_DBG_CNT_T) / sizeof(ZXIC_UINT32) - 1; + p_tmp_cnt = (ZXIC_UINT32 *)p_car_dbg_cnt; + + for (i = 0; i < size; i++) { + rc = dpp_reg_read(dev_id, + STAT_CAR0_CARB_PKT_DES_I_CNTr + i, + 0, + 0, + &carb_pkt_in_total_cnt_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_reg_read"); + + ZXIC_COMM_MEMCPY(p_tmp_cnt + i, &(carb_pkt_in_total_cnt_cfg.carb_pkt_des_i_cnt), sizeof(ZXIC_UINT32)); + } + + rc = dpp_reg_read(dev_id, + STAT_CAR0_CARB_PKT_SIZE_CNTr, + 0, + 0, + &carb_pkt_size_cnt_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_reg_read"); + + p_car_dbg_cnt->pkt_size_cnt = carb_pkt_size_cnt_cfg.carb_pkt_size_cnt; + + return rc; +} + + +/***********************************************************/ +/** 获取car B的初始化状态 +* @param dev_id 设备号 car编号 +* @param p_init_done 初始化完成使能 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_carb_init_done_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_init_done) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_CARB_PLCR_INIT_DONT_T carb_init_done_cfg = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_init_done); + + rc = dpp_reg_read(dev_id, + STAT_CAR0_CARB_PLCR_INIT_DONTr, + 0, + 0, + &carb_init_done_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_reg_read"); + + *p_init_done = carb_init_done_cfg.carb_plcr_init_done; + + return rc; +} + +#endif +/***********************************************************/ +/** +* @param dev_id +* @param profile_id +* @param p_random_ram +* +* @return +* @remark 无 +* @see +* @author YXH @date 2019/04/01 +************************************************************/ +DPP_STATUS dpp_stat_carb_random_ram_set(DPP_DEV_T *dev, ZXIC_UINT32 profile_id, + DPP_CAR_RANDOM_RAM_T *p_random_ram_e, + DPP_CAR_RANDOM_RAM_T *p_random_ram_c) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT64 para0_temp = 0; + ZXIC_UINT64 para2_temp = 0; + ZXIC_UINT64 para4_temp = 0; + DPP_STAT_CAR0_CARB_RANDOM_RAM_T carb_random_ram_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_random_ram_e); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_random_ram_e->p1, 0, 100); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_random_ram_e->p2, 0, 100); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_random_ram_e->p3, 0, 100); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_random_ram_c); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_random_ram_c->p1, 0, 100); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_random_ram_c->p2, 0, 100); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_random_ram_c->p3, 0, 100); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_B_PROFILE_ID_RANDOM_MAX); + + para0_temp = ((ZXIC_UINT64)((((ZXIC_UINT64)(p_random_ram_e->t2)) - + ((ZXIC_UINT64)(p_random_ram_e->t1))) * + ((ZXIC_UINT64)(p_random_ram_e->p1))) + << DPP_CAR_RANDOM_OFFSET_VAL) / + 100; + carb_random_ram_cfg.para0_l_e = (para0_temp & 0xFFFFFFFF); + carb_random_ram_cfg.para0_h_e = (para0_temp >> 32) & 0xFFFFFFFF; + + carb_random_ram_cfg.para1_e = ((p_random_ram_e->p2 - p_random_ram_e->p1) + << DPP_CAR_RANDOM_OFFSET_VAL) / + 100; + + para2_temp = ((ZXIC_UINT64)((((ZXIC_UINT64)(p_random_ram_e->t3)) - + ((ZXIC_UINT64)(p_random_ram_e->t2))) * + ((ZXIC_UINT64)(p_random_ram_e->p2))) + << DPP_CAR_RANDOM_OFFSET_VAL) / + 100; + carb_random_ram_cfg.para2_l_e = (para2_temp & 0xFFFFFFFF); + carb_random_ram_cfg.para2_h_e = (para2_temp >> 32) & 0xFFFFFFFF; + + carb_random_ram_cfg.para3_e = ((p_random_ram_e->p3 - p_random_ram_e->p2) + << DPP_CAR_RANDOM_OFFSET_VAL) / + 100; + + para4_temp = ((ZXIC_UINT64)((((ZXIC_UINT64)(p_random_ram_e->tc)) - + ((ZXIC_UINT64)(p_random_ram_e->t3))) * + ((ZXIC_UINT64)(p_random_ram_e->p3))) + << DPP_CAR_RANDOM_OFFSET_VAL) / + 100; + carb_random_ram_cfg.para4_l_e = (para4_temp & 0xFFFFFFFF); + carb_random_ram_cfg.para4_h_e = (para4_temp >> 32) & 0xFFFFFFFF; + + carb_random_ram_cfg.para5_e = + ((100 - p_random_ram_e->p3) << DPP_CAR_RANDOM_OFFSET_VAL) / 100; + carb_random_ram_cfg.para6_e = p_random_ram_e->t1; + carb_random_ram_cfg.para7_e = p_random_ram_e->t2; + carb_random_ram_cfg.para8_e = p_random_ram_e->t3; + + /* para0_temp = 0; + para2_temp = 0; + para4_temp = 0; */ + + para0_temp = ((ZXIC_UINT64)((((ZXIC_UINT64)(p_random_ram_c->t2)) - + ((ZXIC_UINT64)(p_random_ram_c->t1))) * + ((ZXIC_UINT64)(p_random_ram_c->p1))) + << DPP_CAR_RANDOM_OFFSET_VAL) / + 100; + carb_random_ram_cfg.para0_l_c = (para0_temp & 0xFFFFFFFF); + carb_random_ram_cfg.para0_h_c = (para0_temp >> 32) & 0xFFFFFFFF; + + carb_random_ram_cfg.para1_c = ((p_random_ram_c->p2 - p_random_ram_c->p1) + << DPP_CAR_RANDOM_OFFSET_VAL) / + 100; + + para2_temp = ((ZXIC_UINT64)((((ZXIC_UINT64)(p_random_ram_c->t3)) - + ((ZXIC_UINT64)(p_random_ram_c->t2))) * + ((ZXIC_UINT64)(p_random_ram_c->p2))) + << DPP_CAR_RANDOM_OFFSET_VAL) / + 100; + carb_random_ram_cfg.para2_l_c = (para2_temp & 0xFFFFFFFF); + carb_random_ram_cfg.para2_h_c = (para2_temp >> 32) & 0xFFFFFFFF; + + carb_random_ram_cfg.para3_c = ((p_random_ram_c->p3 - p_random_ram_c->p2) + << DPP_CAR_RANDOM_OFFSET_VAL) / + 100; + + para4_temp = ((ZXIC_UINT64)((((ZXIC_UINT64)(p_random_ram_c->tc)) - + ((ZXIC_UINT64)(p_random_ram_c->t3))) * + ((ZXIC_UINT64)(p_random_ram_c->p3))) + << DPP_CAR_RANDOM_OFFSET_VAL) / + 100; + carb_random_ram_cfg.para4_l_c = (para4_temp & 0xFFFFFFFF); + carb_random_ram_cfg.para4_h_c = (para4_temp >> 32) & 0xFFFFFFFF; + + carb_random_ram_cfg.para5_c = + ((100 - p_random_ram_c->p3) << DPP_CAR_RANDOM_OFFSET_VAL) / 100; + carb_random_ram_cfg.para6_c = p_random_ram_c->t1; + carb_random_ram_cfg.para7_c = p_random_ram_c->t2; + carb_random_ram_cfg.para8_c = p_random_ram_c->t3; + + rc = dpp_reg_write(dev, STAT_CAR0_CARB_RANDOM_RAMr, 0, profile_id, + &carb_random_ram_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return rc; +} + +/***********************************************************/ +/** +* @param dev_id +* @param p_random_ram +* +* @return +* @remark 无 +* @see +* @author YXH @date 2019/04/01 +************************************************************/ +DPP_STATUS dpp_stat_carb_random_ram_get(DPP_DEV_T *dev, ZXIC_UINT32 profile_id, + DPP_CAR_RANDOM_RAM_T *p_random_ram_e, + DPP_CAR_RANDOM_RAM_T *p_random_ram_c) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_CARB_RANDOM_RAM_T carb_random_ram_cfg = { 0 }; + ZXIC_UINT32 tmp_val = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_random_ram_e); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_random_ram_c); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_B_PROFILE_ID_RANDOM_MAX); + + rc = dpp_reg_read(dev, STAT_CAR0_CARB_RANDOM_RAMr, 0, profile_id, + &carb_random_ram_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + p_random_ram_e->t1 = carb_random_ram_cfg.para6_e; + p_random_ram_e->t2 = carb_random_ram_cfg.para7_e; + p_random_ram_e->t3 = carb_random_ram_cfg.para8_e; + tmp_val = (carb_random_ram_cfg.para5_e * 100) >> + DPP_CAR_RANDOM_OFFSET_VAL; + p_random_ram_e->p3 = 100 - tmp_val; + tmp_val = (carb_random_ram_cfg.para3_e * 100) >> + DPP_CAR_RANDOM_OFFSET_VAL; + p_random_ram_e->p2 = p_random_ram_e->p3 - tmp_val; + tmp_val = (carb_random_ram_cfg.para1_e * 100) >> + DPP_CAR_RANDOM_OFFSET_VAL; + p_random_ram_e->p1 = p_random_ram_e->p2 - tmp_val; + + p_random_ram_c->t1 = carb_random_ram_cfg.para6_c; + p_random_ram_c->t2 = carb_random_ram_cfg.para7_c; + p_random_ram_c->t3 = carb_random_ram_cfg.para8_c; + tmp_val = (carb_random_ram_cfg.para5_c * 100) >> + DPP_CAR_RANDOM_OFFSET_VAL; + p_random_ram_c->p3 = 100 - tmp_val; + tmp_val = (carb_random_ram_cfg.para3_c * 100) >> + DPP_CAR_RANDOM_OFFSET_VAL; + p_random_ram_c->p2 = p_random_ram_c->p3 - tmp_val; + tmp_val = (carb_random_ram_cfg.para1_c * 100) >> + DPP_CAR_RANDOM_OFFSET_VAL; + p_random_ram_c->p1 = p_random_ram_c->p2 - tmp_val; + + return rc; +} + +/***********************************************************/ +/** car C的流设置 +* @param dev_id 设备号 car编号 +* @param flow_id 流号 +* @param drop_flag 丢弃标志 +* @param plcr_en 监管使能 +* @param profile_id 监管模板号 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/05 +************************************************************/ +DPP_STATUS dpp_stat_carc_queue_cfg_set(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 drop_flag, + ZXIC_UINT32 plcr_en, + ZXIC_UINT32 profile_id) +{ + DPP_STATUS rc = DPP_OK; + + DPP_STAT_CAR0_CARC_QUEUE_RAM0_159_0_T queue_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_C_FLOW_ID_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), drop_flag, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), plcr_en, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_C_PROFILE_ID_MAX); + + queue_cfg.carc_drop = drop_flag; + queue_cfg.carc_plcr_en = plcr_en; + queue_cfg.carc_profile_id = profile_id; + + rc = dpp_reg_write(dev, STAT_CAR0_CARC_QUEUE_RAM0_159_0r, 0, flow_id, + &queue_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return rc; +} + +/***********************************************************/ +/** 获取car C的流设置 +* @param dev_id 设备号 car编号 +* @param flow_id 流号 +* @param p_carc_queue_cfg car c流配置信息 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/05 +************************************************************/ +DPP_STATUS +dpp_stat_carc_queue_cfg_get(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + DPP_STAT_CAR_C_QUEUE_CFG_T *p_carc_queue_cfg) +{ + DPP_STATUS rc = DPP_OK; + + DPP_STAT_CAR0_CARC_QUEUE_RAM0_159_0_T queue_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_C_FLOW_ID_MAX); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_carc_queue_cfg); + + rc = dpp_reg_read(dev, STAT_CAR0_CARC_QUEUE_RAM0_159_0r, 0, flow_id, + &queue_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + p_carc_queue_cfg->flow_id = flow_id; + p_carc_queue_cfg->drop_flag = queue_cfg.carc_drop; + p_carc_queue_cfg->plcr_en = queue_cfg.carc_plcr_en; + p_carc_queue_cfg->profile_id = queue_cfg.carc_profile_id; + + p_carc_queue_cfg->tq = ZXIC_COMM_COUNTER64_BUILD(queue_cfg.carc_tq_h, + queue_cfg.carc_tq_l); + + p_carc_queue_cfg->tce_flag = queue_cfg.carc_ted; + p_carc_queue_cfg->tce = queue_cfg.carc_tcd; + p_carc_queue_cfg->te = queue_cfg.carc_tei; + p_carc_queue_cfg->tc = queue_cfg.carc_tci; + + return rc; +} + +/***********************************************************/ +/** car C监管模板设定 +* @param dev_id 设备号 car编号 +* @param profile_id 监管模板号 +* @param p_carc_profile_cfg 监管模板配置 +* pkt_sign 包限速标记写死成0,防止用户在carC包限速 +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/05 +************************************************************/ +DPP_STATUS +dpp_stat_carc_profile_cfg_set(DPP_DEV_T *dev, ZXIC_UINT32 profile_id, + DPP_STAT_CAR_PROFILE_CFG_T *p_carc_profile_cfg) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 i = 0; + + DPP_STAT_CAR0_CARC_PROFILE_RAM1_255_0_T profile_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_C_PROFILE_ID_MAX); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_carc_profile_cfg); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_carc_profile_cfg->pkt_sign, 0, + 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_carc_profile_cfg->cf, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_carc_profile_cfg->cm, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), + p_carc_profile_cfg->random_disc_e, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), + p_carc_profile_cfg->random_disc_c, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_carc_profile_cfg->cd, + CAR_CD_MODE_SRTCM, CAR_CD_MODE_INVALID - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_carc_profile_cfg->cir, 0, + DPP_CAR_MAX_CIR_VALUE); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_carc_profile_cfg->eir, 0, + DPP_CAR_MAX_EIR_VALUE); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_carc_profile_cfg->cbs, 0, + DPP_CAR_MAX_CBS_VALUE); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_carc_profile_cfg->ebs, 0, + DPP_CAR_MAX_EBS_VALUE); + + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), + p_carc_profile_cfg->e_yellow_pri[0], 0, + DPP_CAR_MAX_PRI_VALUE); + + for (i = 1; i < DPP_CAR_PRI_MAX; i++) { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), + p_carc_profile_cfg->c_pri[i], 0, + DPP_CAR_MAX_PRI_VALUE); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), + p_carc_profile_cfg->e_green_pri[i], 0, + DPP_CAR_MAX_PRI_VALUE); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), + p_carc_profile_cfg->e_yellow_pri[i], + 0, DPP_CAR_MAX_PRI_VALUE); + } + + profile_cfg.carc_e_y_pri7 = p_carc_profile_cfg->e_yellow_pri[7]; + profile_cfg.carc_e_y_pri6 = p_carc_profile_cfg->e_yellow_pri[6]; + profile_cfg.carc_e_y_pri5 = p_carc_profile_cfg->e_yellow_pri[5]; + profile_cfg.carc_e_y_pri4 = p_carc_profile_cfg->e_yellow_pri[4]; + profile_cfg.carc_e_y_pri3 = p_carc_profile_cfg->e_yellow_pri[3]; + profile_cfg.carc_e_y_pri2 = p_carc_profile_cfg->e_yellow_pri[2]; + profile_cfg.carc_e_y_pri1 = p_carc_profile_cfg->e_yellow_pri[1]; + profile_cfg.carc_e_y_pri0 = p_carc_profile_cfg->e_yellow_pri[0]; + + profile_cfg.carc_e_g_pri7 = p_carc_profile_cfg->e_green_pri[7]; + profile_cfg.carc_e_g_pri6 = p_carc_profile_cfg->e_green_pri[6]; + profile_cfg.carc_e_g_pri5 = p_carc_profile_cfg->e_green_pri[5]; + profile_cfg.carc_e_g_pri4 = p_carc_profile_cfg->e_green_pri[4]; + profile_cfg.carc_e_g_pri3 = p_carc_profile_cfg->e_green_pri[3]; + profile_cfg.carc_e_g_pri2 = p_carc_profile_cfg->e_green_pri[2]; + profile_cfg.carc_e_g_pri1 = p_carc_profile_cfg->e_green_pri[1]; + + profile_cfg.carc_c_pri7 = p_carc_profile_cfg->c_pri[7]; + profile_cfg.carc_c_pri6 = p_carc_profile_cfg->c_pri[6]; + profile_cfg.carc_c_pri5 = p_carc_profile_cfg->c_pri[5]; + profile_cfg.carc_c_pri4 = p_carc_profile_cfg->c_pri[4]; + profile_cfg.carc_c_pri3 = p_carc_profile_cfg->c_pri[3]; + profile_cfg.carc_c_pri2 = p_carc_profile_cfg->c_pri[2]; + profile_cfg.carc_c_pri1 = p_carc_profile_cfg->c_pri[1]; + + profile_cfg.carc_cbs = p_carc_profile_cfg->cbs; + profile_cfg.carc_ebs_pbs = p_carc_profile_cfg->ebs; + profile_cfg.carc_cir = p_carc_profile_cfg->cir; + profile_cfg.carc_eir = p_carc_profile_cfg->eir; + + profile_cfg.carc_cd = p_carc_profile_cfg->cd; + profile_cfg.carc_cf = p_carc_profile_cfg->cf; + profile_cfg.carc_cm = p_carc_profile_cfg->cm; + profile_cfg.carc_random_discard_en_e = + p_carc_profile_cfg->random_disc_e; + profile_cfg.carc_random_discard_en_c = + p_carc_profile_cfg->random_disc_c; + /* C级car包限速标记写死为0,以免用户配置成1对CARC进行包限速 */ + profile_cfg.carc_pkt_sign = 0; + + profile_cfg.carc_profile_wr = 0; + + rc = dpp_reg_write(dev, STAT_CAR0_CARC_PROFILE_RAM1_255_0r, 0, + profile_id, &profile_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return rc; +} + +/***********************************************************/ +/** 获取car C的监管模板配置 +* @param dev_id 设备号 car编号 +* @param profile_id 监管模板号 +* @param p_carc_profile_cfg 监管模板配置 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/05 +************************************************************/ +DPP_STATUS +dpp_stat_carc_profile_cfg_get(DPP_DEV_T *dev, ZXIC_UINT32 profile_id, + DPP_STAT_CAR_PROFILE_CFG_T *p_carc_profile_cfg) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_CARC_PROFILE_RAM1_255_0_T profile_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_C_PROFILE_ID_MAX); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_carc_profile_cfg); + + rc = dpp_reg_read(dev, STAT_CAR0_CARC_PROFILE_RAM1_255_0r, 0, + profile_id, &profile_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + p_carc_profile_cfg->profile_id = profile_id; + p_carc_profile_cfg->pkt_sign = profile_cfg.carc_pkt_sign; + p_carc_profile_cfg->cd = profile_cfg.carc_cd; + p_carc_profile_cfg->cf = profile_cfg.carc_cf; + p_carc_profile_cfg->cm = profile_cfg.carc_cm; + + p_carc_profile_cfg->eir = profile_cfg.carc_eir; + p_carc_profile_cfg->cir = profile_cfg.carc_cir; + p_carc_profile_cfg->ebs = profile_cfg.carc_ebs_pbs; + p_carc_profile_cfg->cbs = profile_cfg.carc_cbs; + + p_carc_profile_cfg->random_disc_e = + profile_cfg.carc_random_discard_en_e; + p_carc_profile_cfg->random_disc_c = + profile_cfg.carc_random_discard_en_c; + + p_carc_profile_cfg->c_pri[1] = profile_cfg.carc_c_pri1; + p_carc_profile_cfg->c_pri[2] = profile_cfg.carc_c_pri2; + p_carc_profile_cfg->c_pri[3] = profile_cfg.carc_c_pri3; + p_carc_profile_cfg->c_pri[4] = profile_cfg.carc_c_pri4; + p_carc_profile_cfg->c_pri[5] = profile_cfg.carc_c_pri5; + p_carc_profile_cfg->c_pri[6] = profile_cfg.carc_c_pri6; + p_carc_profile_cfg->c_pri[7] = profile_cfg.carc_c_pri7; + + p_carc_profile_cfg->e_green_pri[1] = profile_cfg.carc_e_g_pri1; + p_carc_profile_cfg->e_green_pri[2] = profile_cfg.carc_e_g_pri2; + p_carc_profile_cfg->e_green_pri[3] = profile_cfg.carc_e_g_pri3; + p_carc_profile_cfg->e_green_pri[4] = profile_cfg.carc_e_g_pri4; + p_carc_profile_cfg->e_green_pri[5] = profile_cfg.carc_e_g_pri5; + p_carc_profile_cfg->e_green_pri[6] = profile_cfg.carc_e_g_pri6; + p_carc_profile_cfg->e_green_pri[7] = profile_cfg.carc_e_g_pri7; + + p_carc_profile_cfg->e_yellow_pri[0] = profile_cfg.carc_e_y_pri0; + p_carc_profile_cfg->e_yellow_pri[1] = profile_cfg.carc_e_y_pri1; + p_carc_profile_cfg->e_yellow_pri[2] = profile_cfg.carc_e_y_pri2; + p_carc_profile_cfg->e_yellow_pri[3] = profile_cfg.carc_e_y_pri3; + p_carc_profile_cfg->e_yellow_pri[4] = profile_cfg.carc_e_y_pri4; + p_carc_profile_cfg->e_yellow_pri[5] = profile_cfg.carc_e_y_pri5; + p_carc_profile_cfg->e_yellow_pri[6] = profile_cfg.carc_e_y_pri6; + p_carc_profile_cfg->e_yellow_pri[7] = profile_cfg.carc_e_y_pri7; + + return rc; +} + +/***********************************************************/ +/** 配置car C的qvos溢出模式 +* @param dev_id 设备号 car编号 +* @param flow_id 流号 +* @param qvos_mode 溢出模式,参见DPP_CAR_QVOS_MODE_E +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_carc_queue_qvos_set(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 qvos_mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_CARC_QOVS_RAM_RAM2_T qvos_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_C_FLOW_ID_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), qvos_mode, + CAR_QVOS_MODE_OVERFLOW_0, + CAR_QVOS_MODE_OVERFLOW_MAX - 1); + + qvos_cfg.carc_qovs = qvos_mode; + + rc = dpp_reg_write(dev, STAT_CAR0_CARC_QOVS_RAM_RAM2r, 0, flow_id, + &qvos_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return rc; +} +/***********************************************************/ +/** 获取car C的qvos溢出模式 +* @param dev_id 设备号 car编号 +* @param flow_id 流号 +* @param p_qvos_mode qvos溢出模式 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_carc_queue_qvos_get(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 *p_qvos_mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_CARC_QOVS_RAM_RAM2_T qvos_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_C_FLOW_ID_MAX); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_qvos_mode); + + rc = dpp_reg_read(dev, STAT_CAR0_CARC_QOVS_RAM_RAM2r, 0, flow_id, + &qvos_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_qvos_mode = qvos_cfg.carc_qovs; + + return rc; +} + +#if 0 +/***********************************************************/ +/** car C指定队列模式配置,仅用于调试 +* @param dev_id 设备号 car编号 +* @param global_en 全局队列使能,0-不使能,1-使能 +* @param sp_en 优先级队列使能,0-不使能,1-使能 +* @param appoint_sp 指定的优先级 +* @param appoint_queue 指定的队列号 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_carc_queue_appoint_mode_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 global_en, + ZXIC_UINT32 sp_en, + ZXIC_UINT32 appoint_sp, + ZXIC_UINT32 appoint_queue) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_CARC_APPOINT_QNUM_OR_SP_T appoint_cfg = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, global_en, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, sp_en, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, appoint_sp, DPP_CAR_PRI0, DPP_CAR_PRI_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, appoint_queue, 0, DPP_CAR_C_FLOW_ID_MAX); + + appoint_cfg.carc_appoint_qnum_or_not = global_en; + appoint_cfg.carc_appoint_sp_or_not = sp_en; + appoint_cfg.carc_plcr_stat_sp = appoint_sp; + appoint_cfg.carc_plcr_stat_qnum = appoint_queue; + + rc = dpp_reg_write(dev_id, + STAT_CAR0_CARC_APPOINT_QNUM_OR_SPr, + 0, + 0, + &appoint_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_reg_write"); + + return rc; +} + +/***********************************************************/ +/** 获取car C指定队列模式的配置 +* @param dev_id 设备号 car编号 +* @param p_global_en 全局队列使能,0-不使能,1-使能 +* @param p_sp_en 优先级队列使能,0-不使能,1-使能 +* @param p_appoint_sp 指定的优先级 +* @param p_appoint_queue 指定的队列号 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/07 +************************************************************/ +DPP_STATUS dpp_stat_carc_queue_appoint_mode_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_global_en, + ZXIC_UINT32 *p_sp_en, + ZXIC_UINT32 *p_appoint_sp, + ZXIC_UINT32 *p_appoint_queue) +{ + DPP_STATUS rc = DPP_OK; + + DPP_STAT_CAR0_CARC_APPOINT_QNUM_OR_SP_T appoint_cfg = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_global_en); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_sp_en); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_appoint_sp); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_appoint_queue); + + rc = dpp_reg_read(dev_id, + STAT_CAR0_CARC_APPOINT_QNUM_OR_SPr, + 0, + 0, + &appoint_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_reg_read"); + + *p_global_en = appoint_cfg.carc_appoint_qnum_or_not; + *p_sp_en = appoint_cfg.carc_appoint_sp_or_not; + *p_appoint_sp = appoint_cfg.carc_plcr_stat_sp; + *p_appoint_queue = appoint_cfg.carc_plcr_stat_qnum; + + return rc; +} + +/***********************************************************/ +/** car C 调试计数读取模式配置 +* @param dev_id 设备号 car编号 +* @param overflow_mode 溢出模式,0-计数最大保持,1-计数最大翻转 +* @param rd_mode 读取模式,0-不读清,1-读清模式 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_carc_dbg_cnt_mode_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 overflow_mode, + ZXIC_UINT32 rd_mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_CARC_CFGMT_COUNT_MODE_T cnt_mode_cfg = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, overflow_mode, CAR_KEEP_COUNT, CAR_RE_COUNT); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, rd_mode, CAR_READ_NOT_CLEAR, CAR_READ_AND_CLEAR); + + cnt_mode_cfg.carc_cfgmt_count_overflow_mode = overflow_mode; + cnt_mode_cfg.carc_cfgmt_count_rd_mode = rd_mode; + + rc = dpp_reg_write(dev_id, + STAT_CAR0_CARC_CFGMT_COUNT_MODEr, + 0, + 0, + &cnt_mode_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_reg_write"); + + return rc; +} + +/***********************************************************/ +/** car C 调试计数读取模式获取 +* @param dev_id 设备号 car编号 +* @param p_overflow_mode 溢出模式,0-计数最大保持,1-计数最大翻转 +* @param p_rd_mode 读清模式,0-不读清,1-读清模式 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_carc_dbg_cnt_mode_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_overflow_mode, + ZXIC_UINT32 *p_rd_mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_CARC_CFGMT_COUNT_MODE_T cnt_mode_cfg = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_overflow_mode); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_rd_mode); + + rc = dpp_reg_read(dev_id, + STAT_CAR0_CARC_CFGMT_COUNT_MODEr, + 0, + 0, + &cnt_mode_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_reg_read"); + + *p_overflow_mode = cnt_mode_cfg.carc_cfgmt_count_overflow_mode; + *p_rd_mode = cnt_mode_cfg.carc_cfgmt_count_rd_mode; + + return rc; +} + +/***********************************************************/ +/** car b 的调试计数获取 +* @param dev_id +* @param p_car_dbg_cnt +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/09/27 +************************************************************/ +DPP_STATUS dpp_stat_carc_dbg_cnt_get(ZXIC_UINT32 dev_id, + DPP_STAT_CAR_DBG_CNT_T *p_car_dbg_cnt) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 i = 0; + ZXIC_UINT32 size = 0; + ZXIC_UINT32 *p_tmp_cnt = NULL; + + DPP_STAT_CAR0_CARC_PKT_SIZE_CNT_T carc_pkt_size_cnt_cfg = {0}; + DPP_STAT_CAR0_CARC_PKT_DES_I_CNT_T carc_pkt_in_total_cnt_cfg = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_car_dbg_cnt); + + size = sizeof(DPP_STAT_CAR_DBG_CNT_T) / sizeof(ZXIC_UINT32) - 1; + p_tmp_cnt = (ZXIC_UINT32 *)p_car_dbg_cnt; + + for (i = 0; i < size; i++) { + rc = dpp_reg_read(dev_id, + STAT_CAR0_CARC_PKT_DES_I_CNTr + i, + 0, + 0, + &carc_pkt_in_total_cnt_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_reg_read"); + + ZXIC_COMM_MEMCPY(p_tmp_cnt + i, &(carc_pkt_in_total_cnt_cfg.carc_pkt_des_i_cnt), sizeof(ZXIC_UINT32)); + } + + rc = dpp_reg_read(dev_id, + STAT_CAR0_CARC_PKT_SIZE_CNTr, + 0, + 0, + &carc_pkt_size_cnt_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_reg_read"); + + p_car_dbg_cnt->pkt_size_cnt = carc_pkt_size_cnt_cfg.carc_pkt_size_cnt; + + return rc; +} + +/***********************************************************/ +/** 获取car C的初始化状态 +* @param dev_id 设备号 car编号 +* @param p_init_done 初始化完成使能 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/06 +************************************************************/ +DPP_STATUS dpp_stat_carc_init_done_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_init_done) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_CARC_PLCR_INIT_DONT_T carc_init_done_cfg = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_init_done); + + rc = dpp_reg_read(dev_id, + STAT_CAR0_CARC_PLCR_INIT_DONTr, + 0, + 0, + &carc_init_done_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_reg_read"); + + *p_init_done = carc_init_done_cfg.carc_plcr_init_done; + + return rc; +} + +#endif +/***********************************************************/ +/** +* @param dev_id +* @param profile_id +* @param p_random_ram +* +* @return +* @remark 无 +* @see +* @author YXH @date 2019/04/01 +************************************************************/ +DPP_STATUS dpp_stat_carc_random_ram_set(DPP_DEV_T *dev, ZXIC_UINT32 profile_id, + DPP_CAR_RANDOM_RAM_T *p_random_ram_e, + DPP_CAR_RANDOM_RAM_T *p_random_ram_c) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT64 para0_temp = 0; + ZXIC_UINT64 para2_temp = 0; + ZXIC_UINT64 para4_temp = 0; + DPP_STAT_CAR0_CARC_RANDOM_RAM_T carc_random_ram_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_random_ram_e); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_random_ram_e->p1, 0, 100); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_random_ram_e->p2, 0, 100); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_random_ram_e->p3, 0, 100); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_random_ram_c); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_random_ram_c->p1, 0, 100); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_random_ram_c->p2, 0, 100); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_random_ram_c->p3, 0, 100); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_C_PROFILE_ID_RANDOM_MAX); + + para0_temp = ((ZXIC_UINT64)((((ZXIC_UINT64)(p_random_ram_e->t2)) - + ((ZXIC_UINT64)(p_random_ram_e->t1))) * + ((ZXIC_UINT64)(p_random_ram_e->p1))) + << DPP_CAR_RANDOM_OFFSET_VAL) / + 100; + carc_random_ram_cfg.para0_l_e = (para0_temp & 0xFFFFFFFF); + carc_random_ram_cfg.para0_h_e = (para0_temp >> 32) & 0xFFFFFFFF; + + carc_random_ram_cfg.para1_e = ((p_random_ram_e->p2 - p_random_ram_e->p1) + << DPP_CAR_RANDOM_OFFSET_VAL) / + 100; + + para2_temp = ((ZXIC_UINT64)((((ZXIC_UINT64)(p_random_ram_e->t3)) - + ((ZXIC_UINT64)(p_random_ram_e->t2))) * + ((ZXIC_UINT64)(p_random_ram_e->p2))) + << DPP_CAR_RANDOM_OFFSET_VAL) / + 100; + carc_random_ram_cfg.para2_l_e = (para2_temp & 0xFFFFFFFF); + carc_random_ram_cfg.para2_h_e = (para2_temp >> 32) & 0xFFFFFFFF; + + carc_random_ram_cfg.para3_e = ((p_random_ram_e->p3 - p_random_ram_e->p2) + << DPP_CAR_RANDOM_OFFSET_VAL) / + 100; + + para4_temp = ((ZXIC_UINT64)((((ZXIC_UINT64)(p_random_ram_e->tc)) - + ((ZXIC_UINT64)(p_random_ram_e->t3))) * + ((ZXIC_UINT64)(p_random_ram_e->p3))) + << DPP_CAR_RANDOM_OFFSET_VAL) / + 100; + carc_random_ram_cfg.para4_l_e = (para4_temp & 0xFFFFFFFF); + carc_random_ram_cfg.para4_h_e = (para4_temp >> 32) & 0xFFFFFFFF; + + carc_random_ram_cfg.para5_e = + ((100 - p_random_ram_e->p3) << DPP_CAR_RANDOM_OFFSET_VAL) / 100; + carc_random_ram_cfg.para6_e = p_random_ram_e->t1; + carc_random_ram_cfg.para7_e = p_random_ram_e->t2; + carc_random_ram_cfg.para8_e = p_random_ram_e->t3; + + /* para0_temp = 0; + para2_temp = 0; + para4_temp = 0; */ + + para0_temp = ((ZXIC_UINT64)((((ZXIC_UINT64)(p_random_ram_c->t2)) - + ((ZXIC_UINT64)(p_random_ram_c->t1))) * + ((ZXIC_UINT64)(p_random_ram_c->p1))) + << DPP_CAR_RANDOM_OFFSET_VAL) / + 100; + carc_random_ram_cfg.para0_l_c = (para0_temp & 0xFFFFFFFF); + carc_random_ram_cfg.para0_h_c = (para0_temp >> 32) & 0xFFFFFFFF; + + carc_random_ram_cfg.para1_c = ((p_random_ram_c->p2 - p_random_ram_c->p1) + << DPP_CAR_RANDOM_OFFSET_VAL) / + 100; + + para2_temp = ((ZXIC_UINT64)((((ZXIC_UINT64)(p_random_ram_c->t3)) - + ((ZXIC_UINT64)(p_random_ram_c->t2))) * + ((ZXIC_UINT64)(p_random_ram_c->p2))) + << DPP_CAR_RANDOM_OFFSET_VAL) / + 100; + carc_random_ram_cfg.para2_l_c = (para2_temp & 0xFFFFFFFF); + carc_random_ram_cfg.para2_h_c = (para2_temp >> 32) & 0xFFFFFFFF; + + carc_random_ram_cfg.para3_c = ((p_random_ram_c->p3 - p_random_ram_c->p2) + << DPP_CAR_RANDOM_OFFSET_VAL) / + 100; + + para4_temp = ((ZXIC_UINT64)((((ZXIC_UINT64)(p_random_ram_c->tc)) - + ((ZXIC_UINT64)(p_random_ram_c->t3))) * + ((ZXIC_UINT64)(p_random_ram_c->p3))) + << DPP_CAR_RANDOM_OFFSET_VAL) / + 100; + carc_random_ram_cfg.para4_l_c = (para4_temp & 0xFFFFFFFF); + carc_random_ram_cfg.para4_h_c = (para4_temp >> 32) & 0xFFFFFFFF; + + carc_random_ram_cfg.para5_c = + ((100 - p_random_ram_c->p3) << DPP_CAR_RANDOM_OFFSET_VAL) / 100; + carc_random_ram_cfg.para6_c = p_random_ram_c->t1; + carc_random_ram_cfg.para7_c = p_random_ram_c->t2; + carc_random_ram_cfg.para8_c = p_random_ram_c->t3; + + rc = dpp_reg_write(dev, STAT_CAR0_CARC_RANDOM_RAMr, 0, profile_id, + &carc_random_ram_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return rc; +} + +/***********************************************************/ +/** +* @param dev_id +* @param p_random_ram +* +* @return +* @remark 无 +* @see +* @author YXH @date 2019/04/01 +************************************************************/ +DPP_STATUS dpp_stat_carc_random_ram_get(DPP_DEV_T *dev, ZXIC_UINT32 profile_id, + DPP_CAR_RANDOM_RAM_T *p_random_ram_e, + DPP_CAR_RANDOM_RAM_T *p_random_ram_c) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_CARB_RANDOM_RAM_T carc_random_ram_cfg = { 0 }; + ZXIC_UINT32 tmp_val = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_random_ram_e); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_random_ram_c); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_C_PROFILE_ID_RANDOM_MAX); + + rc = dpp_reg_read(dev, STAT_CAR0_CARC_RANDOM_RAMr, 0, profile_id, + &carc_random_ram_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + p_random_ram_e->t1 = carc_random_ram_cfg.para6_e; + p_random_ram_e->t2 = carc_random_ram_cfg.para7_e; + p_random_ram_e->t3 = carc_random_ram_cfg.para8_e; + tmp_val = (carc_random_ram_cfg.para5_e * 100) >> + DPP_CAR_RANDOM_OFFSET_VAL; + p_random_ram_e->p3 = 100 - tmp_val; + tmp_val = (carc_random_ram_cfg.para3_e * 100) >> + DPP_CAR_RANDOM_OFFSET_VAL; + p_random_ram_e->p2 = p_random_ram_e->p3 - tmp_val; + tmp_val = (carc_random_ram_cfg.para1_e * 100) >> + DPP_CAR_RANDOM_OFFSET_VAL; + p_random_ram_e->p1 = p_random_ram_e->p2 - tmp_val; + + p_random_ram_c->t1 = carc_random_ram_cfg.para6_c; + p_random_ram_c->t2 = carc_random_ram_cfg.para7_c; + p_random_ram_c->t3 = carc_random_ram_cfg.para8_c; + tmp_val = (carc_random_ram_cfg.para5_c * 100) >> + DPP_CAR_RANDOM_OFFSET_VAL; + p_random_ram_c->p3 = 100 - tmp_val; + tmp_val = (carc_random_ram_cfg.para3_c * 100) >> + DPP_CAR_RANDOM_OFFSET_VAL; + p_random_ram_c->p2 = p_random_ram_c->p3 - tmp_val; + tmp_val = (carc_random_ram_cfg.para1_c * 100) >> + DPP_CAR_RANDOM_OFFSET_VAL; + p_random_ram_c->p1 = p_random_ram_c->p2 - tmp_val; + + return rc; +} + +/***********************************************************/ +/** 配置car的层级模式 +* @param dev_id +* @param mode 2 - 三级car, 第一级支持16K +* 1 - 两级car, 第一级扩展为17K +* 0 - 一级car, 第一级扩展为21K +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/09/28 +************************************************************/ +DPP_STATUS dpp_stat_car_en_mode_set(DPP_DEV_T *dev, ZXIC_UINT32 mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_CAR_HIERARCHY_MODE_T car_en_mode_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), mode, DPP_CAR_EN_MODE_BOTH_EN, + DPP_CAR_EN_MODE_INVALID - 1); + + car_en_mode_cfg.car_hierarchy_mode = mode; + + rc = dpp_reg_write(dev, STAT_CAR0_CAR_HIERARCHY_MODEr, 0, 0, + &car_en_mode_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return rc; +} + +/***********************************************************/ +/** +* @param dev_id +* @param p_mode +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/09/28 +************************************************************/ +DPP_STATUS dpp_stat_car_en_mode_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_CAR_HIERARCHY_MODE_T car_en_mode_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_mode); + + rc = dpp_reg_read(dev, STAT_CAR0_CAR_HIERARCHY_MODEr, 0, 0, + &car_en_mode_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_mode = car_en_mode_cfg.car_hierarchy_mode; + + return rc; +} + +/***********************************************************/ +/** 配置car的包长偏移 +* @param dev_id +* @param pkt_size_off +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author wll @date 2019/06/06 +************************************************************/ +DPP_STATUS dpp_stat_car_pkt_size_offset_set(DPP_DEV_T *dev, + ZXIC_UINT32 pkt_size_off) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_PKT_SIZE_OFFSET_T car_pkt_size_offset = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), pkt_size_off, 0, 0xffffffff); + + car_pkt_size_offset.pkt_size_offset = pkt_size_off; + + rc = dpp_reg_write(dev, STAT_CAR0_PKT_SIZE_OFFSETr, 0, 0, + &car_pkt_size_offset); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return rc; +} + +/***********************************************************/ +/** 获取car的包长偏移 +* @param dev_id +* @param p_pkt_size_off +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author wll @date 2019/06/06 +************************************************************/ +DPP_STATUS dpp_stat_car_pkt_size_offset_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_pkt_size_off) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_PKT_SIZE_OFFSET_T car_pkt_size_offset = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_pkt_size_off); + + rc = dpp_reg_read(dev, STAT_CAR0_PKT_SIZE_OFFSETr, 0, 0, + &car_pkt_size_offset); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_pkt_size_off = car_pkt_size_offset.pkt_size_offset; + + return rc; +} + +/***********************************************************/ +/** 配置cara的最大包长 +* @param dev_id +* @param max_pkt_size +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author wll @date 2019/06/06 +************************************************************/ +DPP_STATUS dpp_stat_cara_max_pkt_size_set(DPP_DEV_T *dev, + ZXIC_UINT32 max_pkt_size) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_MAX_PKT_SIZE_A_T car_max_pkt_size = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), max_pkt_size, 0, 0x3fff); + + car_max_pkt_size.max_pkt_size_a = max_pkt_size; + + rc = dpp_reg_write(dev, STAT_CAR0_MAX_PKT_SIZE_Ar, 0, 0, + &car_max_pkt_size); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return rc; +} + +/***********************************************************/ +/** 获取cara的最大包长 +* @param dev_id +* @param p_max_pkt_size +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author wll @date 2019/06/06 +************************************************************/ +DPP_STATUS dpp_stat_cara_max_pkt_size_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_max_pkt_size) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_MAX_PKT_SIZE_A_T car_max_pkt_size = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_max_pkt_size); + + rc = dpp_reg_read(dev, STAT_CAR0_MAX_PKT_SIZE_Ar, 0, 0, + &car_max_pkt_size); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_max_pkt_size = car_max_pkt_size.max_pkt_size_a; + + return rc; +} + +/***********************************************************/ +/** 配置carb的最大包长 +* @param dev_id +* @param max_pkt_size +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author wll @date 2019/06/06 +************************************************************/ +DPP_STATUS dpp_stat_carb_max_pkt_size_set(DPP_DEV_T *dev, + ZXIC_UINT32 max_pkt_size) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_MAX_PKT_SIZE_B_T car_max_pkt_size = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), max_pkt_size, 0, 0x3fff); + + car_max_pkt_size.max_pkt_size_b = max_pkt_size; + + rc = dpp_reg_write(dev, STAT_CAR0_MAX_PKT_SIZE_Br, 0, 0, + &car_max_pkt_size); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return rc; +} + +/***********************************************************/ +/** 获取carb的最大包长 +* @param dev_id +* @param p_max_pkt_size +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author wll @date 2019/06/06 +************************************************************/ +DPP_STATUS dpp_stat_carb_max_pkt_size_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_max_pkt_size) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_MAX_PKT_SIZE_B_T car_max_pkt_size = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_max_pkt_size); + + rc = dpp_reg_read(dev, STAT_CAR0_MAX_PKT_SIZE_Br, 0, 0, + &car_max_pkt_size); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_max_pkt_size = car_max_pkt_size.max_pkt_size_b; + + return rc; +} + +/***********************************************************/ +/** 配置carc的最大包长 +* @param dev_id +* @param max_pkt_size +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author wll @date 2019/06/06 +************************************************************/ +DPP_STATUS dpp_stat_carc_max_pkt_size_set(DPP_DEV_T *dev, + ZXIC_UINT32 max_pkt_size) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_MAX_PKT_SIZE_C_T car_max_pkt_size = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), max_pkt_size, 0, 0x3fff); + + car_max_pkt_size.max_pkt_size_c = max_pkt_size; + + rc = dpp_reg_write(dev, STAT_CAR0_MAX_PKT_SIZE_Cr, 0, 0, + &car_max_pkt_size); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + return rc; +} + +/***********************************************************/ +/** 获取carc的最大包长 +* @param dev_id +* @param p_max_pkt_size +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author wll @date 2019/06/06 +************************************************************/ +DPP_STATUS dpp_stat_carc_max_pkt_size_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_max_pkt_size) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR0_MAX_PKT_SIZE_C_T car_max_pkt_size = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_max_pkt_size); + + rc = dpp_reg_read(dev, STAT_CAR0_MAX_PKT_SIZE_Cr, 0, 0, + &car_max_pkt_size); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_max_pkt_size = car_max_pkt_size.max_pkt_size_c; + + return rc; +} + +#endif + +#if ZXIC_REAL("Advanced Function") +#if 0 +/***********************************************************/ +/** car硬件初始化 +* @param dev_id 设备号 car编号 +* @param car_type car模式,参见STAT_CAR_TYPE_E +* @param car_mono_mode car独占mono模式 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/04/27 +************************************************************/ +DPP_STATUS dpp_stat_car_hardware_init(ZXIC_UINT32 dev_id, + ZXIC_UINT32 car_type, + ZXIC_UINT32 car_mono_mode) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 i = 0; + ZXIC_UINT32 init_done = 0; + + DPP_STAT_CAR_A_QUEUE_CFG_T car_a_queue_cfg = {0}; + DPP_STAT_CAR_B_QUEUE_CFG_T car_b_queue_cfg = {0}; + DPP_STAT_CAR_C_QUEUE_CFG_T car_c_queue_cfg = {0}; + DPP_STAT_CAR_PROFILE_CFG_T car_profile_cfg = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, car_type, STAT_CAR_A_TYPE, STAT_CAR_MAX_TYPE - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, car_mono_mode, CAR_SMMU0_MONO_MODE_NONE, CAR_SMMU0_MONO_MODE_MAX - 1); + + ZXIC_COMM_MEMSET(&car_profile_cfg, 0, sizeof(DPP_STAT_CAR_PROFILE_CFG_T)); + + rc = dpp_se_smmu0_cfg_car_mono_set(dev_id, car_mono_mode); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_se_smmu0_cfg_car_mono_set"); + + switch (car_type) { + case STAT_CAR_A_TYPE: + { + rc = dpp_stat_cara_init_done_get(dev_id, &init_done); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_cara_init_done_get"); + + if (0 == init_done) { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "Error! Get car_a init done fail!\n"); + return DPP_RC_CAR_INIT_FAIL; + } + + ZXIC_COMM_MEMSET(&car_a_queue_cfg, 0, sizeof(DPP_STAT_CAR_A_QUEUE_CFG_T)); + + /** 清空queue的配置 和绑定队列号配置 稍后补全 */ + for (i = 0; i <= DPP_CAR_A_FLOW_ID_MAX; i++) { + rc = dpp_stat_cara_queue_cfg_set(dev_id, + i, + 0, + 0, + 0); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_cara_queue_cfg_set"); + } + + /** 清空profile的配置 */ + for (i = 0; i <= DPP_CAR_A_PROFILE_ID_MAX; i++) { + rc = dpp_stat_cara_profile_cfg_set(dev_id, + i, + &car_profile_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_cara_profile_cfg_set"); + } + } + break; + + case STAT_CAR_B_TYPE: + { + rc = dpp_stat_carb_init_done_get(dev_id, &init_done); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_carb_init_done_get"); + + if (0 == init_done) { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "Error! Get car_b init done fail!\n"); + return DPP_RC_CAR_INIT_FAIL; + } + + ZXIC_COMM_MEMSET(&car_b_queue_cfg, 0, sizeof(DPP_STAT_CAR_B_QUEUE_CFG_T)); + + /** 清空queue的配置 和绑定队列号配置 稍后补全 */ + + for (i = 0; i <= DPP_CAR_B_FLOW_ID_MAX; i++) { + rc = dpp_stat_carb_queue_cfg_set(dev_id, + i, + 0, + 0, + 0); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_cara_queue_cfg_set"); + } + + /** 清空profile的配置 */ + for (i = 0; i <= DPP_CAR_B_PROFILE_ID_MAX; i++) { + rc = dpp_stat_carb_profile_cfg_set(dev_id, + i, + &car_profile_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_carb_profile_cfg_set"); + } + } + break; + + case STAT_CAR_C_TYPE: + { + rc = dpp_stat_carc_init_done_get(dev_id, + &init_done); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_carc_init_done_get"); + + if (0 == init_done) { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "Error! Get car_c init done fail!\n"); + return DPP_RC_CAR_INIT_FAIL; + } + + ZXIC_COMM_MEMSET(&car_c_queue_cfg, 0, sizeof(DPP_STAT_CAR_C_QUEUE_CFG_T)); + + /** 清空queue的配置 和绑定队列号配置 稍后补全 */ + + for (i = 0; i <= DPP_CAR_C_FLOW_ID_MAX; i++) { + rc = dpp_stat_carc_queue_cfg_set(dev_id, + i, + 0, + 0, + 0); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_carc_queue_cfg_set"); + } + + /** 清空profile的配置 */ + for (i = 0; i <= DPP_CAR_C_PROFILE_ID_MAX; i++) { + rc = dpp_stat_carc_profile_cfg_set(dev_id, + i, + &car_profile_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_carc_profile_cfg_set"); + } + } + break; + + default: + { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "car_type[%d] error!\n", car_type); + return DPP_ERR; + } + } + + return rc; +} +#endif +/***********************************************************/ +/** car 模块流配置 +* @param dev_id 设备号 +* @param car_type car模式,参见STAT_CAR_TYPE_E +* @param flow_id 队列号 +* @param drop_flag 丢弃标志 +* @param plcr_en 限速使能 +* @param profile_id 模板编号 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/05/06 +************************************************************/ +DPP_STATUS dpp_stat_car_queue_cfg_set(DPP_DEV_T *dev, ZXIC_UINT32 car_type, + ZXIC_UINT32 flow_id, + ZXIC_UINT32 drop_flag, + ZXIC_UINT32 plcr_en, + ZXIC_UINT32 profile_id) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 flow_num = 0; + + DPP_CAR_SOFT_RESET_DATA_T *p_restore_data = NULL; /* 软复位相关 */ + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), car_type, STAT_CAR_A_TYPE, + STAT_CAR_MAX_TYPE - 1); + + if (STAT_CAR_A_TYPE == car_type) { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_A_FLOW_ID_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_A_PROFILE_ID_MAX); + } else if (STAT_CAR_B_TYPE == car_type) { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_B_FLOW_ID_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_B_PROFILE_ID_MAX); + } else { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_C_FLOW_ID_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_C_PROFILE_ID_MAX); + } + + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), drop_flag, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), plcr_en, 0, 1); + + p_restore_data = + GET_DPP_CAR_SOFT_RESET_INFO(DEV_ID(dev)); /* 软复位相关 */ + + switch (car_type) { + case STAT_CAR_A_TYPE: { + rc = dpp_stat_cara_queue_cfg_set(dev, flow_id, drop_flag, + plcr_en, profile_id); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_cara_queue_cfg_set"); + + /* 软复位相关 */ + flow_num = p_restore_data->cara_flow_num; + if (flow_num < DPP_CAR_A_FLOW_ID_NUM) { + p_restore_data->cara_item[flow_num].flow_id = flow_id; + p_restore_data->cara_item[flow_num].profile_id = + profile_id; + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW( + DEV_ID(dev), p_restore_data->cara_flow_num, 1); + p_restore_data->cara_flow_num++; + } + + } break; + + case STAT_CAR_B_TYPE: { + rc = dpp_stat_carb_queue_cfg_set(dev, flow_id, drop_flag, + plcr_en, profile_id); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_carb_queue_cfg_set"); + + /* 软复位相关 */ + flow_num = p_restore_data->carb_flow_num; + if (flow_num < DPP_CAR_B_FLOW_ID_NUM) { + p_restore_data->carb_item[flow_num].flow_id = flow_id; + p_restore_data->carb_item[flow_num].profile_id = + profile_id; + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW( + DEV_ID(dev), p_restore_data->carb_flow_num, 1); + p_restore_data->carb_flow_num++; + } + + } break; + + case STAT_CAR_C_TYPE: { + rc = dpp_stat_carc_queue_cfg_set(dev, flow_id, drop_flag, + plcr_en, profile_id); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_carc_queue_cfg_set"); + + /* 软复位相关 */ + flow_num = p_restore_data->carc_flow_num; + if (flow_num < DPP_CAR_C_FLOW_ID_NUM) { + p_restore_data->carc_item[flow_num].flow_id = flow_id; + p_restore_data->carc_item[flow_num].profile_id = + profile_id; + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT( + DEV_ID(dev), p_restore_data->carc_flow_num, 1); + p_restore_data->carc_flow_num++; + } + + } break; + } + + return rc; +} + +/***********************************************************/ +/**队列设置参数获取 +* @param dev_id 设备号 car编号 +* @param car_type car模式,参见STAT_CAR_TYPE_E +* @param pkt_sign 限速标志,1-包限速,0-字节限速 +* @param flow_id 队列号 +* @param p_data 获取队列配置信息 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/09/27 +************************************************************/ +DPP_STATUS dpp_stat_car_queue_get(DPP_DEV_T *dev, ZXIC_UINT32 car_type, + ZXIC_UINT32 pkt_sign, ZXIC_UINT32 flow_id, + ZXIC_VOID *p_data) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), car_type, STAT_CAR_A_TYPE, + STAT_CAR_MAX_TYPE - 1); + + if (STAT_CAR_A_TYPE == car_type) { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), pkt_sign, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_A_FLOW_ID_MAX); + } else if (STAT_CAR_B_TYPE == car_type) { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_B_FLOW_ID_MAX); + } else { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_C_FLOW_ID_MAX); + } + + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + + switch (car_type) { + case STAT_CAR_A_TYPE: { + if (0 == pkt_sign) { + rc = dpp_stat_cara_queue_cfg_get( + dev, flow_id, + (DPP_STAT_CAR_A_QUEUE_CFG_T *)p_data); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_cara_queue_cfg_get"); + } else { + rc = dpp_stat_cara_pkt_queue_cfg_get( + dev, flow_id, + (DPP_STAT_CAR_A_PKT_QUEUE_CFG_T *)p_data); + ZXIC_COMM_CHECK_DEV_RC( + DEV_ID(dev), rc, + "dpp_stat_cara_pkt_queue_cfg_get"); + } + + } break; + + case STAT_CAR_B_TYPE: { + rc = dpp_stat_carb_queue_cfg_get( + dev, flow_id, (DPP_STAT_CAR_B_QUEUE_CFG_T *)p_data); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_carb_queue_cfg_get"); + } break; + + case STAT_CAR_C_TYPE: { + rc = dpp_stat_carc_queue_cfg_get( + dev, flow_id, (DPP_STAT_CAR_C_QUEUE_CFG_T *)p_data); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_carc_queue_cfg_get"); + } break; + } + + return rc; +} + +/***********************************************************/ +/** car 模块流配置获取 +* @param dev_id 设备号 +* @param car_type car模式,参见STAT_CAR_TYPE_E +* @param flow_id 队列号 +* @param p_drop_flag drop标记 +* @param p_plcr_en 监管使能信号 +* @param p_profile_id 模板id +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/08/19 +************************************************************/ +DPP_STATUS dpp_stat_car_queue_cfg_get(DPP_DEV_T *dev, ZXIC_UINT32 car_type, + ZXIC_UINT32 flow_id, + ZXIC_UINT32 *p_drop_flag, + ZXIC_UINT32 *p_plcr_en, + ZXIC_UINT32 *p_profile_id) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR_A_QUEUE_CFG_T car_a_queue_cfg = { 0 }; + DPP_STAT_CAR_B_QUEUE_CFG_T car_b_queue_cfg = { 0 }; + DPP_STAT_CAR_C_QUEUE_CFG_T car_c_queue_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), car_type, STAT_CAR_A_TYPE, + STAT_CAR_MAX_TYPE - 1); + + if (STAT_CAR_A_TYPE == car_type) { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_A_FLOW_ID_MAX); + } else if (STAT_CAR_B_TYPE == car_type) { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_B_FLOW_ID_MAX); + } else { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_C_FLOW_ID_MAX); + } + + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_drop_flag); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_plcr_en); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_profile_id); + + switch (car_type) { + case STAT_CAR_A_TYPE: { + rc = dpp_stat_cara_queue_cfg_get(dev, flow_id, + &car_a_queue_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_cara_queue_cfg_get"); + + *p_profile_id = car_a_queue_cfg.profile_id; + *p_plcr_en = car_a_queue_cfg.plcr_en; + *p_drop_flag = car_a_queue_cfg.drop_flag; + } break; + + case STAT_CAR_B_TYPE: { + rc = dpp_stat_carb_queue_cfg_get(dev, flow_id, + &car_b_queue_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_carb_queue_cfg_get"); + + *p_profile_id = car_b_queue_cfg.profile_id; + *p_plcr_en = car_b_queue_cfg.plcr_en; + *p_drop_flag = car_b_queue_cfg.drop_flag; + } break; + + case STAT_CAR_C_TYPE: { + rc = dpp_stat_carc_queue_cfg_get(dev, flow_id, + &car_c_queue_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_carc_queue_cfg_get"); + + *p_profile_id = car_c_queue_cfg.profile_id; + *p_plcr_en = car_c_queue_cfg.plcr_en; + *p_drop_flag = car_c_queue_cfg.drop_flag; + } break; + } + + return rc; +} + +/***********************************************************/ +/** car profile硬件写入 +* @param dev_id 设备号 car编号 +* @param car_type car模式,参见STAT_CAR_TYPE_E +* @param pkt_sign 限速模式,1-包限速,0-字节限速 +* @param profile_id 模板号 +* @param p_car_profile_cfg 模板配置信息 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/05/06 +************************************************************/ +DPP_STATUS dpp_stat_car_profile_cfg_set(DPP_DEV_T *dev, ZXIC_UINT32 car_type, + ZXIC_UINT32 pkt_sign, + ZXIC_UINT32 profile_id, + ZXIC_VOID *p_car_profile_cfg) +{ + DPP_STATUS rc = DPP_OK; + + DPP_CAR_SOFT_RESET_DATA_T *p_restore_data = NULL; /* 软复位相关 */ + DPP_STAT_CAR_PROFILE_CFG_T *p_stat_car_profile_cfg = NULL; + DPP_STAT_CAR_PKT_PROFILE_CFG_T *p_stat_pkt_car_profile_cfg = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), car_type, STAT_CAR_A_TYPE, + STAT_CAR_MAX_TYPE - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), pkt_sign, 0, 1); + + if (STAT_CAR_A_TYPE == car_type) { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_A_PROFILE_ID_MAX); + } else if (STAT_CAR_B_TYPE == car_type) { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_B_PROFILE_ID_MAX); + } else { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_C_PROFILE_ID_MAX); + } + + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_car_profile_cfg); + + p_restore_data = + GET_DPP_CAR_SOFT_RESET_INFO(DEV_ID(dev)); /* 软复位相关 */ + + if ((STAT_CAR_A_TYPE == car_type) && (1 == pkt_sign)) { + p_stat_pkt_car_profile_cfg = + (DPP_STAT_CAR_PKT_PROFILE_CFG_T *)p_car_profile_cfg; + + rc = dpp_stat_cara_pkt_profile_cfg_set( + dev, profile_id, p_stat_pkt_car_profile_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_cara_pkt_profile_cfg_set"); + + /* 软复位相关 */ + p_restore_data->car_pkt_sign[profile_id] = ZXIC_TRUE; + + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT( + DEV_ID(dev), p_restore_data->car0_pkt_num, 1); + p_restore_data->car0_pkt_num++; + } else { + p_stat_car_profile_cfg = + (DPP_STAT_CAR_PROFILE_CFG_T *)p_car_profile_cfg; + ZXIC_COMM_TRACE_DEV_INFO( + DEV_ID(dev), "==> dpp_stat_car_profile_cfg_set : \n"); + ZXIC_COMM_TRACE_DEV_INFO(DEV_ID(dev), + "| %-10s | %-10s | %-10s | %-10s | \n", + "profile_id", "car_id", "car_type", + "pkt_sign"); + ZXIC_COMM_TRACE_DEV_INFO( + DEV_ID(dev), "| %-10d | %-10d | %-10d | 0x%-8x | \n", + profile_id, 0, car_type, pkt_sign); + ZXIC_COMM_TRACE_DEV_INFO( + DEV_ID(dev), + "| ------------------------------------------------- | \n"); + ZXIC_COMM_TRACE_DEV_INFO( + DEV_ID(dev), + "| %-5s | %-5s | %-5s | %-10s | %-10s | %-10s | %-10s | \n", + "cd", "cf", "cm", "cir", "cbs", "eir", "ebs"); + ZXIC_COMM_TRACE_DEV_INFO( + DEV_ID(dev), + "| %-5d | %-5d | %-5d | %-10d | %-10d | %-10d | %-10d | \n", + p_stat_car_profile_cfg->cd, p_stat_car_profile_cfg->cf, + p_stat_car_profile_cfg->cm, p_stat_car_profile_cfg->cir, + p_stat_car_profile_cfg->cbs, + p_stat_car_profile_cfg->eir, + p_stat_car_profile_cfg->ebs); + + if (STAT_CAR_A_TYPE == car_type) { + rc = dpp_stat_cara_profile_cfg_set( + dev, profile_id, p_stat_car_profile_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_cara_profile_cfg_set"); + + /* 软复位相关 */ + if (ZXIC_TRUE == + p_restore_data->car_pkt_sign[profile_id]) { + p_restore_data->car0_pkt_num--; + p_restore_data->car_pkt_sign[profile_id] = + ZXIC_FALSE; + } + } else if (STAT_CAR_B_TYPE == car_type) { + rc = dpp_stat_carb_profile_cfg_set( + dev, profile_id, p_stat_car_profile_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_carb_profile_cfg_set"); + } else { + rc = dpp_stat_carc_profile_cfg_set( + dev, profile_id, p_stat_car_profile_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_carc_profile_cfg_set"); + } + } + + return rc; +} + +/***********************************************************/ +/**获取监管模板配置 +* @param dev_id 设备号 +* @param car_type car模式类型,参见STAT_CAR_TYPE_E +* @param pkt_sign 限速模式,1-包限速,0-字节限速 +* @param profile_id 监管模板id +* @param p_car_profile_cfg 模板配置信息 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/08/29 +************************************************************/ +DPP_STATUS dpp_stat_car_profile_cfg_get(DPP_DEV_T *dev, ZXIC_UINT32 car_type, + ZXIC_UINT32 pkt_sign, + ZXIC_UINT32 profile_id, + ZXIC_VOID *p_car_profile_cfg) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_CAR_PROFILE_CFG_T *p_stat_car_profile_cfg = NULL; + DPP_STAT_CAR_PKT_PROFILE_CFG_T *p_stat_pkt_car_profile_cfg = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), car_type, STAT_CAR_A_TYPE, + STAT_CAR_MAX_TYPE - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), pkt_sign, 0, 1); + + if (STAT_CAR_A_TYPE == car_type) { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_A_PROFILE_ID_MAX); + } else if (STAT_CAR_B_TYPE == car_type) { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_B_PROFILE_ID_MAX); + } else { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_C_PROFILE_ID_MAX); + } + + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_car_profile_cfg); + + if ((STAT_CAR_A_TYPE == car_type) && (1 == pkt_sign)) { + p_stat_pkt_car_profile_cfg = + (DPP_STAT_CAR_PKT_PROFILE_CFG_T *)p_car_profile_cfg; + rc = dpp_stat_cara_pkt_profile_cfg_get( + dev, profile_id, p_stat_pkt_car_profile_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_cara_pkt_profile_cfg_get"); + } else { + p_stat_car_profile_cfg = + (DPP_STAT_CAR_PROFILE_CFG_T *)p_car_profile_cfg; + + if (STAT_CAR_A_TYPE == car_type) { + rc = dpp_stat_cara_profile_cfg_get( + dev, profile_id, p_stat_car_profile_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_cara_profile_cfg_get"); + } else if (STAT_CAR_B_TYPE == car_type) { + rc = dpp_stat_carb_profile_cfg_get( + dev, profile_id, p_stat_car_profile_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_carb_profile_cfg_get"); + } else { + rc = dpp_stat_carc_profile_cfg_get( + dev, profile_id, p_stat_car_profile_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_carc_profile_cfg_get"); + } + } + + return rc; +} + +/***********************************************************/ +/** car 队列映射关系配置 +* @param dev_id 设备号 +* @param car_type car模式类型,参见STAT_CAR_TYPE_E +* @param flow_id 队列号 +* @param map_flow_id 映射队列号 +* @param map_sp 映射sp +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/09/27 +************************************************************/ +DPP_STATUS dpp_stat_car_queue_map_set(DPP_DEV_T *dev, ZXIC_UINT32 car_type, + ZXIC_UINT32 flow_id, + ZXIC_UINT32 map_flow_id, + ZXIC_UINT32 map_sp) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), car_type, STAT_CAR_A_TYPE, + STAT_CAR_B_TYPE); + + if (STAT_CAR_A_TYPE == car_type) { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_A_FLOW_ID_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), map_flow_id, 0, + DPP_CAR_B_FLOW_ID_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), map_sp, DPP_CAR_PRI0, + DPP_CAR_PRI_MAX - 1); + rc = dpp_stat_cara_queue_map_set(dev, flow_id, map_flow_id, + map_sp); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_cara_queue_map_set"); + } else { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_B_FLOW_ID_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), map_flow_id, 0, + DPP_CAR_C_FLOW_ID_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), map_sp, DPP_CAR_PRI0, + DPP_CAR_PRI_MAX - 1); + rc = dpp_stat_carb_queue_map_set(dev, flow_id, map_flow_id, + map_sp); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_carb_queue_map_set"); + } + + return rc; +} + +/***********************************************************/ +/** 获取 car 流号的绑定关系 +* @param dev_id 设备号 +* @param car_type car模式类型,参见STAT_CAR_TYPE_E +* @param flow_id 队列号 +* @param p_map_flow_id 映射队列号 +* @param p_map_sp 映射sp +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/09/27 +************************************************************/ +DPP_STATUS dpp_stat_car_queue_map_get(DPP_DEV_T *dev, ZXIC_UINT32 car_type, + ZXIC_UINT32 flow_id, + ZXIC_UINT32 *p_map_flow_id, + ZXIC_UINT32 *p_map_sp) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), car_type, STAT_CAR_A_TYPE, + STAT_CAR_B_TYPE); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_map_flow_id); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_map_sp); + + if (STAT_CAR_A_TYPE == car_type) { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_A_FLOW_ID_MAX); + rc = dpp_stat_cara_queue_map_get(dev, flow_id, p_map_flow_id, + p_map_sp); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_cara_queue_map_get"); + } else { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flow_id, 0, + DPP_CAR_B_FLOW_ID_MAX); + rc = dpp_stat_carb_queue_map_get(dev, flow_id, p_map_flow_id, + p_map_sp); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_carb_queue_map_get"); + } + + return rc; +} + +/***********************************************************/ +/** +* @param dev_id 设备ID +* @param profile_id 模板ID +* @param p_random_ram_e E桶概率丢弃配置参数 +* @param p_random_ram_c C桶概率丢弃配置参数 +* +* @return +* @remark 无 +* @see +* @author YXH @date 2019/04/01 +************************************************************/ +DPP_STATUS dpp_stat_car_random_ram_set(DPP_DEV_T *dev, ZXIC_UINT32 car_type, + ZXIC_UINT32 profile_id, + DPP_CAR_RANDOM_RAM_T *p_random_ram_e, + DPP_CAR_RANDOM_RAM_T *p_random_ram_c) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), car_type, STAT_CAR_B_TYPE, + STAT_CAR_C_TYPE); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_random_ram_e); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_random_ram_c); + + if (STAT_CAR_B_TYPE == car_type) { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_B_PROFILE_ID_RANDOM_MAX); + rc = dpp_stat_carb_random_ram_set( + dev, profile_id, p_random_ram_e, p_random_ram_c); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_carb_random_ram_set"); + } else { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_C_PROFILE_ID_RANDOM_MAX); + rc = dpp_stat_carc_random_ram_set( + dev, profile_id, p_random_ram_e, p_random_ram_c); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_carc_random_ram_set"); + } + + return rc; +} + +/***********************************************************/ +/** +* @param dev_id 设备ID +* @param profile_id 模板ID +* @param p_random_ram_e E桶概率丢弃配置参数 +* @param p_random_ram_c C桶概率丢弃配置参数 +* +* @return +* @remark 无 +* @see +* @author YXH @date 2019/04/01 +************************************************************/ +DPP_STATUS dpp_stat_car_random_ram_get(DPP_DEV_T *dev, ZXIC_UINT32 car_type, + ZXIC_UINT32 profile_id, + DPP_CAR_RANDOM_RAM_T *p_random_ram_e, + DPP_CAR_RANDOM_RAM_T *p_random_ram_c) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), car_type, STAT_CAR_B_TYPE, + STAT_CAR_C_TYPE); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_random_ram_e); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_random_ram_c); + + if (STAT_CAR_B_TYPE == car_type) { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_B_PROFILE_ID_RANDOM_MAX); + rc = dpp_stat_carb_random_ram_get( + dev, profile_id, p_random_ram_e, p_random_ram_c); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_carb_random_ram_get"); + } else { + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), profile_id, 0, + DPP_CAR_C_PROFILE_ID_RANDOM_MAX); + rc = dpp_stat_carc_random_ram_get( + dev, profile_id, p_random_ram_e, p_random_ram_c); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_carc_random_ram_get"); + } + + return rc; +} + +#if 0 +/***********************************************************/ +/**car模块dbg计数模式设置 +* @param dev_id 设备号 +* @param car_type car模式类型,参见STAT_CAR_TYPE_E +* @param overflow_mode 溢出模式,0-计数最大保持,1-计数最大翻转 +* @param rd_mode 读取模式,0-不读清,1-读清模式 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/09/27 +************************************************************/ +DPP_STATUS dpp_stat_car_dbg_cnt_mode_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 car_type, + ZXIC_UINT32 overflow_mode, + ZXIC_UINT32 rd_mode) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, car_type, STAT_CAR_A_TYPE, STAT_CAR_MAX_TYPE - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, overflow_mode, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, rd_mode, 0, 1); + + switch (car_type) { + case STAT_CAR_A_TYPE: + { + rc = dpp_stat_cara_dbg_cnt_mode_set(dev_id, + overflow_mode, + rd_mode); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_cara_dbg_cnt_mode_set"); + } + break; + + case STAT_CAR_B_TYPE: + { + rc = dpp_stat_carb_dbg_cnt_mode_set(dev_id, + overflow_mode, + rd_mode); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_carb_dbg_cnt_mode_set"); + } + break; + + case STAT_CAR_C_TYPE: + { + rc = dpp_stat_carc_dbg_cnt_mode_set(dev_id, + overflow_mode, + rd_mode); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_carc_dbg_cnt_mode_set"); + } + break; + } + + return rc; +} + +/***********************************************************/ +/**car模块dbg计数模式获取 +* @param dev_id 设备号 +* @param car_type car模式类型,参见STAT_CAR_TYPE_E +* @param p_overflow_mode 溢出模式,0-计数最大保持,1-计数最大翻转 +* @param p_rd_mode 读取模式,0-不读清,1-读清模式 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/09/27 +************************************************************/ +DPP_STATUS dpp_stat_car_dbg_cnt_mode_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 car_type, + ZXIC_UINT32 *p_overflow_mode, + ZXIC_UINT32 *p_rd_mode) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, car_type, STAT_CAR_A_TYPE, STAT_CAR_MAX_TYPE - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_overflow_mode); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_rd_mode); + + switch (car_type) { + case STAT_CAR_A_TYPE: + { + rc = dpp_stat_cara_dbg_cnt_mode_get(dev_id, + p_overflow_mode, + p_rd_mode); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_cara_dbg_cnt_mode_get"); + } + break; + + case STAT_CAR_B_TYPE: + { + rc = dpp_stat_carb_dbg_cnt_mode_get(dev_id, + p_overflow_mode, + p_rd_mode); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_carb_dbg_cnt_mode_get"); + } + break; + + case STAT_CAR_C_TYPE: + { + rc = dpp_stat_carc_dbg_cnt_mode_get(dev_id, + p_overflow_mode, + p_rd_mode); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_carc_dbg_cnt_mode_get"); + } + break; + } + + return rc; +} + +/***********************************************************/ +/** car 模块调试计数 获取 +* @param dev_id 设备号 +* @param car_type car模式类型,参见STAT_CAR_TYPE_E +* @param p_car_dbg_cnt dbg计数信息 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/09/27 +************************************************************/ +DPP_STATUS dpp_stat_car_dbg_cnt_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 car_type, + DPP_STAT_CAR_DBG_CNT_T *p_car_dbg_cnt) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, car_type, STAT_CAR_A_TYPE, STAT_CAR_MAX_TYPE - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_car_dbg_cnt); + + switch (car_type) { + case STAT_CAR_A_TYPE: + { + rc = dpp_stat_cara_dbg_cnt_get(dev_id, + p_car_dbg_cnt); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_cara_dbg_cnt_get"); + } + break; + + case STAT_CAR_B_TYPE: + { + rc = dpp_stat_carb_dbg_cnt_get(dev_id, + p_car_dbg_cnt); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_carb_dbg_cnt_get"); + } + break; + + case STAT_CAR_C_TYPE: + { + rc = dpp_stat_carc_dbg_cnt_get(dev_id, + p_car_dbg_cnt); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_carc_dbg_cnt_get"); + } + break; + } + + return rc; +} +#endif +/***********************************************************/ +/** 获取最大包长 +* @param dev_id +* @param car_type +* @param p_max_pkt_len +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author wll @date 2019/06/06 +************************************************************/ +DPP_STATUS dpp_stat_car_max_pkt_size_get(DPP_DEV_T *dev, ZXIC_UINT32 car_type, + ZXIC_UINT32 *p_max_pkt_len) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 pkt_len = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), car_type, STAT_CAR_A_TYPE, + STAT_CAR_MAX_TYPE - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_max_pkt_len); + + switch (car_type) { + case STAT_CAR_A_TYPE: { + rc = dpp_stat_cara_max_pkt_size_get(dev, &pkt_len); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_cara_max_pkt_size_get"); + } break; + + case STAT_CAR_B_TYPE: { + rc = dpp_stat_carb_max_pkt_size_get(dev, &pkt_len); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_carb_max_pkt_size_get"); + } break; + + case STAT_CAR_C_TYPE: { + rc = dpp_stat_carc_max_pkt_size_get(dev, &pkt_len); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_carc_max_pkt_size_get"); + } break; + } + + *p_max_pkt_len = pkt_len; + + return rc; +} + +/***********************************************************/ +/** 配置最大包长 +* @param dev_id +* @param car_type +* @param max_pkt_size +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author wll @date 2019/06/06 +************************************************************/ +DPP_STATUS dpp_stat_car_max_pkt_size_set(DPP_DEV_T *dev, ZXIC_UINT32 car_type, + ZXIC_UINT32 max_pkt_size) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), car_type, STAT_CAR_A_TYPE, + STAT_CAR_MAX_TYPE - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), max_pkt_size, 0, 0x3fff); + + switch (car_type) { + case STAT_CAR_A_TYPE: { + rc = dpp_stat_cara_max_pkt_size_set(dev, max_pkt_size); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_cara_max_pkt_size_set"); + } break; + + case STAT_CAR_B_TYPE: { + rc = dpp_stat_carb_max_pkt_size_set(dev, max_pkt_size); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_carb_max_pkt_size_set"); + } break; + + case STAT_CAR_C_TYPE: { + rc = dpp_stat_carc_max_pkt_size_set(dev, max_pkt_size); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stat_carc_max_pkt_size_set"); + } break; + } + + return rc; +} + +/***********************************************************/ +/** STAT CAR复位获取全局变量大小函数 +* @param dev_id +* @param p_size +* +* @return +* @remark 无 +* @see +* @author yxh @date 2018/06/26 +************************************************************/ +DPP_STATUS dpp_stat_car_glb_size_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_size) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 queue_num = 0; + ZXIC_UINT32 profile_num = 0; + ZXIC_UINT32 pkt_profile_num = 0; + + DPP_CAR_SOFT_RESET_DATA_T *p_g_restore_data = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_size); + + p_g_restore_data = GET_DPP_CAR_SOFT_RESET_INFO(DEV_ID(dev)); + + if (0 == p_g_restore_data->is_init) { + ZXIC_COMM_PRINT("Not init!!!\n"); + *p_size = sizeof(ZXIC_UINT32); + } else { + /* CAR A */ + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT( + DEV_ID(dev), queue_num, + p_g_restore_data->cara_flow_num); + queue_num += p_g_restore_data->cara_flow_num; + + /* CAR B */ + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT( + DEV_ID(dev), queue_num, + p_g_restore_data->carb_flow_num); + queue_num += p_g_restore_data->carb_flow_num; + + /* CAR C */ + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT( + DEV_ID(dev), queue_num, + p_g_restore_data->carc_flow_num); + queue_num += p_g_restore_data->carc_flow_num; + + pkt_profile_num = p_g_restore_data->car0_pkt_num; + ZXIC_COMM_CHECK_DEV_INDEX_SUB_OVERFLOW_NO_ASSERT( + DEV_ID(dev), + (DPP_CAR_A_PROFILE_ID_MAX + DPP_CAR_B_PROFILE_ID_MAX + + DPP_CAR_C_PROFILE_ID_MAX + 3), + pkt_profile_num); + profile_num = DPP_CAR_A_PROFILE_ID_MAX + + DPP_CAR_B_PROFILE_ID_MAX + + DPP_CAR_C_PROFILE_ID_MAX + 3 - pkt_profile_num; + /**占用的格式如下: + (ZXIC_UINT32) (ZXIC_UINT32) (ZXIC_UINT32)(CAR0) (ZXIC_UINT32)(CAR1) (ZXIC_UINT32) + is_init queue_num queue_info pkt_profile_num pkt_profile_num pkt_profile_cfg profile_num profile_cfg + */ + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT( + DEV_ID(dev), queue_num, + ((ZXIC_UINT32)ZXIC_SIZEOF(DPP_CAR_SOFT_RESET_QUEUE_T))); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT( + DEV_ID(dev), pkt_profile_num, + ((ZXIC_UINT32)ZXIC_SIZEOF( + DPP_STAT_CAR_PKT_PROFILE_CFG_T))); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT( + DEV_ID(dev), profile_num, + ((ZXIC_UINT32)ZXIC_SIZEOF(DPP_STAT_CAR_PROFILE_CFG_T))); + *p_size = ((ZXIC_UINT32)ZXIC_SIZEOF(ZXIC_UINT32)) * 5 + + queue_num * ((ZXIC_UINT32)ZXIC_SIZEOF( + DPP_CAR_SOFT_RESET_QUEUE_T)) + + pkt_profile_num * + ((ZXIC_UINT32)ZXIC_SIZEOF( + DPP_STAT_CAR_PKT_PROFILE_CFG_T)) + + profile_num * ((ZXIC_UINT32)ZXIC_SIZEOF( + DPP_STAT_CAR_PROFILE_CFG_T)); + ZXIC_COMM_PRINT("glb_size = %d!!!\n", *p_size); + } + + return rc; +} + +#if 0 +/***********************************************************/ +/** STAT CAR复位设置全局变量函数 +* @param dev_id 设备号 +* @param size 大小,字节数 +* @param p_data_buff 全局变量数据 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author yxh @date 2018/06/26 +************************************************************/ +DPP_STATUS dpp_stat_car_glb_mgr_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 size, ZXIC_UINT8 *p_data_buff) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + ZXIC_UINT32 is_init = 0; + ZXIC_UINT32 queue_num = 0; + ZXIC_UINT32 profile_num = 0; + ZXIC_UINT32 cara_profile_num = 0; + ZXIC_UINT32 car0_pkt_profile_num = 0; + ZXIC_UINT32 buff_offset = 0; + ZXIC_UINT32 size_of_stat_car = 0; + + DPP_CAR_SOFT_RESET_QUEUE_T *p_car_glb_queue_info = NULL; + DPP_STAT_CAR_PROFILE_CFG_T *p_car_glb_profile_info = NULL; + DPP_STAT_CAR_PKT_PROFILE_CFG_T *p_car_glb_pkt_profile_info = NULL; + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_data_buff); + + rc = dpp_stat_car_glb_size_get(dev_id, &size_of_stat_car); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_car_glb_size_get"); + + if (size < size_of_stat_car) { + ZXIC_COMM_TRACE_ERROR("dpp stat car glb mgr recovery, size of buffer is smaller than defaultss\n"); + return DPP_ERR; + } + + ZXIC_COMM_MEMCPY(&is_init, p_data_buff, size); + buff_offset = sizeof(ZXIC_UINT32); + + if (0 == is_init) { + ZXIC_COMM_PRINT("Not init!!!\n"); + } else { + /* 获取queue的数目 */ + ZXIC_COMM_MEMCPY(&queue_num, p_data_buff + buff_offset, sizeof(ZXIC_UINT32)); + buff_offset += sizeof(ZXIC_UINT32); + p_car_glb_queue_info = (DPP_CAR_SOFT_RESET_QUEUE_T *)(p_data_buff + buff_offset); + + /* 设置flow_id的信息 */ + for (i = 0; i < queue_num; i++) { + rc = dpp_stat_car_queue_cfg_set(dev_id, + p_car_glb_queue_info->car_type, + p_car_glb_queue_info->flow_id, + p_car_glb_queue_info->drop_flag, + p_car_glb_queue_info->plcr_en, + p_car_glb_queue_info->profile_id); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_car_queue_cfg_set"); + } + + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(dev_id, buff_offset, queue_num * ZXIC_SIZEOF(DPP_CAR_SOFT_RESET_QUEUE_T)); + buff_offset += (queue_num * ZXIC_SIZEOF(DPP_CAR_SOFT_RESET_QUEUE_T)) & 0xffffffff; + + ZXIC_COMM_MEMCPY(&car0_pkt_profile_num, p_data_buff + buff_offset, sizeof(ZXIC_UINT32)); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(dev_id, buff_offset, sizeof(ZXIC_UINT32)); + buff_offset += sizeof(ZXIC_UINT32); + + /* 设置pkt_frofile的信息 */ + p_car_glb_pkt_profile_info = (DPP_STAT_CAR_PKT_PROFILE_CFG_T *)(p_data_buff + buff_offset); + + for (i = 0; i < car0_pkt_profile_num; i++) { + rc = dpp_stat_car_profile_cfg_set(dev_id, + STAT_CAR_A_TYPE, + ZXIC_TRUE, + p_car_glb_pkt_profile_info->profile_id, + p_car_glb_pkt_profile_info); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_car_profile_cfg_set"); + p_car_glb_pkt_profile_info++; + } + + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(dev_id, buff_offset, car0_pkt_profile_num * ZXIC_SIZEOF(DPP_STAT_CAR_PKT_PROFILE_CFG_T)); + buff_offset += (car0_pkt_profile_num * ZXIC_SIZEOF(DPP_STAT_CAR_PKT_PROFILE_CFG_T)) & 0xffffffff; + + ZXIC_COMM_MEMCPY(&profile_num, p_data_buff + buff_offset, sizeof(ZXIC_UINT32)); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(dev_id, buff_offset, sizeof(ZXIC_UINT32)); + buff_offset += sizeof(ZXIC_UINT32); + + /* 设置profile的信息 */ + p_car_glb_profile_info = (DPP_STAT_CAR_PROFILE_CFG_T *)(p_data_buff + buff_offset); + + cara_profile_num = DPP_CAR_PKT_PROFILE_ID_MAX - car0_pkt_profile_num; + + ZXIC_COMM_PRINT(">>>>>>>>>>> Set CAR_A_PROFILE info start!\n"); + + for (j = 0; j < cara_profile_num; j++) { + rc = dpp_stat_car_profile_cfg_set(dev_id, + STAT_CAR_A_TYPE, + ZXIC_FALSE, + p_car_glb_profile_info->profile_id, + p_car_glb_profile_info); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_car_profile_cfg_set"); + p_car_glb_profile_info++; + } + + ZXIC_COMM_PRINT(">>>>>>>>>>> Set car CAR_B_PROFILE info start!\n"); + + for (j = 0; j <= DPP_CAR_B_PROFILE_ID_MAX; j++) { + rc = dpp_stat_car_profile_cfg_set(dev_id, + STAT_CAR_B_TYPE, + ZXIC_FALSE, + p_car_glb_profile_info->profile_id, + p_car_glb_profile_info); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_car_profile_cfg_set"); + p_car_glb_profile_info++; + } + + ZXIC_COMM_PRINT(">>>>>>>>>>> Set car CAR_C_PROFILE info start!\n"); + + for (j = 0; j <= DPP_CAR_C_PROFILE_ID_MAX; j++) { + rc = dpp_stat_car_profile_cfg_set(dev_id, + STAT_CAR_C_TYPE, + ZXIC_FALSE, + p_car_glb_profile_info->profile_id, + p_car_glb_profile_info); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_car_profile_cfg_set"); + p_car_glb_profile_info++; + } + + } /* end (1 == is_init) */ + + return rc; +} + +/***********************************************************/ +/** STAT CAR复位获取全局变量函数 +* @param dev_id 设备号 +* @param p_flag 释放使能,1-需要手动free,0-不需要手动free +* @param p_size 数据大小 +* @param pp_data_buff 全局变量数据 +* +* @return NPE_OK-成功,NPE_ERR-失败 +* @remark 无 +* @see +* @author yxh @date 2018/06/26 +************************************************************/ +DPP_STATUS dpp_stat_car_glb_mgr_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_flag, + ZXIC_UINT32 *p_size, + ZXIC_UINT8 **pp_data_buff) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 j = 0; + ZXIC_UINT32 is_init = 0; + ZXIC_UINT32 flow_num = 0; + ZXIC_UINT32 flow_num_total = 0; + ZXIC_UINT32 plcr_en = 0; + ZXIC_UINT32 drop_flag = 0; + ZXIC_UINT32 profile_id = 0; + ZXIC_UINT32 profile_num_total = 0; + ZXIC_UINT32 profile_pkt_total = 0; + ZXIC_UINT32 buff_offset = 0; + ZXIC_UINT32 size = 0; + DPP_CAR_SOFT_RESET_DATA_T *p_g_restore_data = NULL; + DPP_CAR_SOFT_RESET_QUEUE_T *p_car_glb_queue_info = NULL; + DPP_CAR_SOFT_RESET_QUEUE_T *p_car_glb_queue_info_temp = NULL; + DPP_STAT_CAR_PROFILE_CFG_T *p_car_glb_profile_info = NULL; + DPP_STAT_CAR_PROFILE_CFG_T *p_car_glb_profile_info_temp = NULL; + DPP_STAT_CAR_PKT_PROFILE_CFG_T *p_car_glb_pkt_profile_info = NULL; + DPP_STAT_CAR_PKT_PROFILE_CFG_T *p_car_glb_pkt_profile_info_temp = NULL; + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_size); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_flag); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, pp_data_buff); + + p_g_restore_data = GET_DPP_CAR_SOFT_RESET_INFO(dev_id); + + is_init = p_g_restore_data->is_init; + + if (0 == is_init) { + *p_size = sizeof(ZXIC_UINT32); + *p_flag = ZXIC_TRUE; + + ZXIC_COMM_PRINT(">>>>>>>>>>>[dpp_stat_car_glb_mgr_get]: No init\n"); + + *pp_data_buff = ZXIC_COMM_MALLOC(*p_size); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, *pp_data_buff); + ZXIC_COMM_MEMSET(*pp_data_buff, 0, *p_size); + + ZXIC_COMM_MEMCPY(*pp_data_buff, &is_init, sizeof(ZXIC_UINT32)); + } else { + ZXIC_COMM_PRINT(">>>>>>>>>>>[dpp_stat_car_glb_mgr_get]: Inited\n"); + + /** queue 绑定的信息 */ + /* CAR A */ + flow_num = p_g_restore_data->cara_flow_num; + ZXIC_COMM_TRACE_DEV_INFO(dev_id, "[dpp_stat_car_glb_mgr_get] cara_queue_num : 0x%08x\n", flow_num); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(dev_id, flow_num_total, flow_num); + flow_num_total += flow_num; + + /* CAR B */ + flow_num = p_g_restore_data->carb_flow_num; + ZXIC_COMM_TRACE_DEV_INFO(dev_id, "[dpp_stat_car_glb_mgr_get] carb_queue_num : 0x%08x\n", flow_num); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(dev_id, flow_num_total, flow_num); + flow_num_total += flow_num; + + /* CAR C */ + flow_num = p_g_restore_data->carc_flow_num; + ZXIC_COMM_TRACE_DEV_INFO(dev_id, "[dpp_stat_car_glb_mgr_get] carc_queue_num : 0x%08x\n", flow_num); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(dev_id, flow_num_total, flow_num); + flow_num_total += flow_num; + + profile_pkt_total = p_g_restore_data->car0_pkt_num; + ZXIC_COMM_CHECK_DEV_INDEX_SUB_OVERFLOW_NO_ASSERT(dev_id, DPP_CAR_PROFILE_ID_TOTAL, profile_pkt_total); + profile_num_total = DPP_CAR_PROFILE_ID_TOTAL - profile_pkt_total; + + /**占用的格式如下: + (ZXIC_UINT32) (ZXIC_UINT32) (ZXIC_UINT32)(CAR0) (ZXIC_UINT32)(CAR1) (ZXIC_UINT32) + is_init queue_num queue_info pkt_profile_num pkt_profile_num pkt_profile_cfg profile_num profile_cfg + */ + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, flow_num_total, ((ZXIC_UINT32)ZXIC_SIZEOF(DPP_CAR_SOFT_RESET_QUEUE_T))); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, profile_pkt_total, ((ZXIC_UINT32)ZXIC_SIZEOF(DPP_STAT_CAR_PKT_PROFILE_CFG_T))); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, profile_num_total, ((ZXIC_UINT32)ZXIC_SIZEOF(DPP_STAT_CAR_PROFILE_CFG_T))); + *p_size = ((ZXIC_UINT32)ZXIC_SIZEOF(ZXIC_UINT32)) * 5 + flow_num_total * ((ZXIC_UINT32)ZXIC_SIZEOF(DPP_CAR_SOFT_RESET_QUEUE_T)) + + profile_pkt_total * ((ZXIC_UINT32)ZXIC_SIZEOF(DPP_STAT_CAR_PKT_PROFILE_CFG_T)) + + profile_num_total * ((ZXIC_UINT32)ZXIC_SIZEOF(DPP_STAT_CAR_PROFILE_CFG_T)); + + /* 存放init_flag */ + *pp_data_buff = ZXIC_COMM_MALLOC(*p_size); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, *pp_data_buff); + ZXIC_COMM_MEMSET(*pp_data_buff, 0, *p_size); + ZXIC_COMM_MEMCPY(*pp_data_buff, &is_init, sizeof(ZXIC_UINT32)); + buff_offset = sizeof(ZXIC_UINT32); + + /* 存放queue_num */ + ZXIC_COMM_MEMCPY(*pp_data_buff + buff_offset, &flow_num_total, sizeof(ZXIC_UINT32)); + buff_offset += sizeof(ZXIC_UINT32); + + /* 获取、存放queue 绑定的信息 */ + if (0 != flow_num_total) { + ZXIC_COMM_PRINT(">>>>>>>>>>>[dpp_stat_car_glb_mgr_get]: Get queue info start!\n"); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, flow_num_total, ((ZXIC_UINT32)ZXIC_SIZEOF(DPP_CAR_SOFT_RESET_QUEUE_T))); + p_car_glb_queue_info = (DPP_CAR_SOFT_RESET_QUEUE_T *)ZXIC_COMM_MALLOC(flow_num_total * ((ZXIC_UINT32)ZXIC_SIZEOF(DPP_CAR_SOFT_RESET_QUEUE_T))); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_car_glb_queue_info); + ZXIC_COMM_MEMSET(p_car_glb_queue_info, 0, flow_num_total * sizeof(DPP_CAR_SOFT_RESET_QUEUE_T)); + p_car_glb_queue_info_temp = p_car_glb_queue_info; + + /* CAR A */ + flow_num = p_g_restore_data->cara_flow_num; + + for (j = 0; j < flow_num; j++) { + rc = dpp_stat_car_queue_cfg_get(dev_id, + STAT_CAR_A_TYPE, + p_g_restore_data->cara_item[j].flow_id, + &drop_flag, + &plcr_en, + &profile_id); + if (DPP_OK != rc) { + ZXIC_COMM_FREE(p_car_glb_queue_info); + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "\n ICM %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", __FILE__, __LINE__, rc, __FUNCTION__, "dpp_stat_car_queue_cfg_get"); + return rc; + } + + p_car_glb_queue_info_temp->car_type = STAT_CAR_A_TYPE; + p_car_glb_queue_info_temp->flow_id = p_g_restore_data->cara_item[j].flow_id; + p_car_glb_queue_info_temp->drop_flag = drop_flag; + p_car_glb_queue_info_temp->plcr_en = plcr_en; + p_car_glb_queue_info_temp->profile_id = profile_id; + p_car_glb_queue_info_temp++; + ZXIC_COMM_TRACE_DEV_INFO(dev_id, "carA used flow_id[%d] : 0x%08x\n", j, p_g_restore_data->cara_item[j].flow_id); + } + + /* CAR B */ + flow_num = p_g_restore_data->carb_flow_num; + + for (j = 0; j < flow_num; j++) { + rc = dpp_stat_car_queue_cfg_get(dev_id, + STAT_CAR_B_TYPE, + p_g_restore_data->carb_item[j].flow_id, + &drop_flag, + &plcr_en, + &profile_id); + if (DPP_OK != rc) { + ZXIC_COMM_FREE(p_car_glb_queue_info); + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "\n ICM %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", __FILE__, __LINE__, rc, __FUNCTION__, "dpp_stat_car_queue_cfg_get"); + return rc; + } + + p_car_glb_queue_info_temp->car_type = STAT_CAR_B_TYPE; + p_car_glb_queue_info_temp->flow_id = p_g_restore_data->carb_item[j].flow_id; + p_car_glb_queue_info_temp->drop_flag = drop_flag; + p_car_glb_queue_info_temp->plcr_en = plcr_en; + p_car_glb_queue_info_temp->profile_id = profile_id; + p_car_glb_queue_info_temp++; + ZXIC_COMM_TRACE_DEV_INFO(dev_id, "carB used flow_id[%d] : 0x%08x\n", j, p_g_restore_data->carb_item[j].flow_id); + } + + /* CAR C */ + flow_num = p_g_restore_data->carc_flow_num; + + for (j = 0; j < flow_num; j++) { + rc = dpp_stat_car_queue_cfg_get(dev_id, + STAT_CAR_C_TYPE, + p_g_restore_data->carc_item[j].flow_id, + &drop_flag, + &plcr_en, + &profile_id); + if (DPP_OK != rc) { + ZXIC_COMM_FREE(p_car_glb_queue_info); + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "\n ICM %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", __FILE__, __LINE__, rc, __FUNCTION__, "dpp_stat_car_queue_cfg_get"); + return rc; + } + + p_car_glb_queue_info_temp->car_type = STAT_CAR_C_TYPE; + p_car_glb_queue_info_temp->flow_id = p_g_restore_data->carc_item[j].flow_id; + p_car_glb_queue_info_temp->drop_flag = drop_flag; + p_car_glb_queue_info_temp->plcr_en = plcr_en; + p_car_glb_queue_info_temp->profile_id = profile_id; + p_car_glb_queue_info_temp++; + ZXIC_COMM_TRACE_DEV_INFO(dev_id, "carC used flow_id[%d] : 0x%08x\n", j, p_g_restore_data->carb_item[j].flow_id); + } + + ZXIC_COMM_MEMCPY(*pp_data_buff + buff_offset, p_car_glb_queue_info, flow_num_total * sizeof(DPP_CAR_SOFT_RESET_QUEUE_T)); + if ((0xFFFFFFFF - (buff_offset)) < (flow_num_total * ZXIC_SIZEOF(DPP_CAR_SOFT_RESET_QUEUE_T))) { + ZXIC_COMM_FREE(p_car_glb_queue_info); + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "ICM %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", __FILE__, __LINE__, buff_offset, flow_num_total * ZXIC_SIZEOF(DPP_CAR_SOFT_RESET_QUEUE_T), __FUNCTION__); + return ZXIC_PAR_CHK_INVALID_INDEX; + } + buff_offset += (flow_num_total * ZXIC_SIZEOF(DPP_CAR_SOFT_RESET_QUEUE_T)) % 0xffffffff; + } + + ZXIC_COMM_PRINT(">>>>>>>>>>>[dpp_stat_car_glb_mgr_get]: total flow_num = %d !\n", flow_num_total); + + /** profile 的信息 */ + if (0 != profile_pkt_total) { + p_car_glb_pkt_profile_info = (DPP_STAT_CAR_PKT_PROFILE_CFG_T *)ZXIC_COMM_MALLOC(profile_pkt_total * ZXIC_SIZEOF(DPP_STAT_CAR_PKT_PROFILE_CFG_T)); + if (NULL == (p_car_glb_pkt_profile_info)) { + ZXIC_COMM_FREE(p_car_glb_queue_info); + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "\n ICM %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", __FILE__, __LINE__, __FUNCTION__); + return ZXIC_PAR_CHK_POINT_NULL; + } + + ZXIC_COMM_MEMSET(p_car_glb_pkt_profile_info, 0, profile_pkt_total * sizeof(DPP_STAT_CAR_PKT_PROFILE_CFG_T)); + p_car_glb_pkt_profile_info_temp = p_car_glb_pkt_profile_info; + } + size = ZXIC_SIZEOF(DPP_STAT_CAR_PROFILE_CFG_T); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, profile_num_total, size); + p_car_glb_profile_info = (DPP_STAT_CAR_PROFILE_CFG_T *)ZXIC_COMM_MALLOC(profile_num_total * size); + if (NULL == (p_car_glb_profile_info)) { + ZXIC_COMM_FREE(p_car_glb_queue_info); + ZXIC_COMM_FREE(p_car_glb_pkt_profile_info); + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "\n ICM %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", __FILE__, __LINE__, __FUNCTION__); + return ZXIC_PAR_CHK_POINT_NULL; + } + + ZXIC_COMM_MEMSET(p_car_glb_profile_info, 0, profile_num_total * sizeof(DPP_STAT_CAR_PROFILE_CFG_T)); + p_car_glb_profile_info_temp = p_car_glb_profile_info; + + ZXIC_COMM_PRINT(">>>>>>>>>>>[dpp_stat_car_glb_mgr_get]: Get profile info start!\n"); + ZXIC_COMM_PRINT(">>>>>>>>>>> Get car CAR_A_PROFILE info start!\n"); + + for (j = 0; j <= DPP_CAR_A_PROFILE_ID_MAX; j++) { + /* (0 != profile_pkt_total) -- Coverity err : 递增 null 指针 p_car_glb_pkt_profile_info_temp */ + if ((0 != profile_pkt_total) && (ZXIC_TRUE == p_g_restore_data->car_pkt_sign[j])) { + ZXIC_COMM_TRACE_DEV_INFO(dev_id, "car profile_id[%d] is pkt_profile!!!!\n", j); + rc = dpp_stat_car_profile_cfg_get(dev_id, + STAT_CAR_A_TYPE, + ZXIC_TRUE, + j, + p_car_glb_pkt_profile_info_temp); + if (DPP_OK != rc) { + ZXIC_COMM_FREE(p_car_glb_queue_info); + ZXIC_COMM_FREE(p_car_glb_pkt_profile_info); + ZXIC_COMM_FREE(p_car_glb_profile_info); + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "\n ICM %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", __FILE__, __LINE__, rc, __FUNCTION__, "dpp_stat_car_profile_cfg_get"); + return rc; + } + + p_car_glb_pkt_profile_info_temp++; + } + } + + for (j = 0; j <= DPP_CAR_A_PROFILE_ID_MAX; j++) { + { + { + rc = dpp_stat_car_profile_cfg_get(dev_id, { + ZXIC_FALSE, + j, + p_car_glb_profile_info_temp); + if (DPP_OK != rc) { + ZXIC_COMM_FREE(p_car_glb_queue_info); + ZXIC_COMM_FREE(p_car_glb_pkt_profile_info); { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "\n ICM %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", __FILE__, __LINE__, rc, __FUNCTION__, "dpp_stat_car_profile_cfg_get"); + return rc; + } + + p_car_glb_profile_info_temp++; + } + } + + ZXIC_COMM_PRINT(">>>>>>>>>>> Get car CAR_B_PROFILE info start!\n"); + + for (j = 0; j <= DPP_CAR_B_PROFILE_ID_MAX; j++) { + rc = dpp_stat_car_profile_cfg_get(dev_id, + STAT_CAR_B_TYPE, + ZXIC_FALSE, { + p_car_glb_profile_info_temp); + if (DPP_OK != rc) { + ZXIC_COMM_FREE(p_car_glb_queue_info); + ZXIC_COMM_FREE(p_car_glb_pkt_profile_info); + ZXIC_COMM_FREE(p_car_glb_profile_info); + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "\n ICM %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", __FILE__, __LINE__, rc, __FUNCTION__, "dpp_stat_car_profile_cfg_get"); + { + } + + p_car_glb_profile_info_temp++; + } + + ZXIC_COMM_PRINT(">>>>>>>>>>> Get car CAR_C_PROFILE info start!\n"); + + for (j = 0; j <= DPP_CAR_C_PROFILE_ID_MAX; j++) { + rc = dpp_stat_car_profile_cfg_get(dev_id, + STAT_CAR_C_TYPE, + ZXIC_FALSE, + j, + p_car_glb_profile_info_temp); + if (DPP_OK != rc) { + ZXIC_COMM_FREE(p_car_glb_queue_info); + ZXIC_COMM_FREE(p_car_glb_pkt_profile_info); + ZXIC_COMM_FREE(p_car_glb_profile_info); + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "\n ICM %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", __FILE__, __LINE__, rc, __FUNCTION__, "dpp_stat_car_profile_cfg_get"); { + } + p_car_glb_profile_info_temp++; + } + + ZXIC_COMM_MEMCPY(*pp_data_buff + buff_offset, &(p_g_restore_data->car0_pkt_num), sizeof(ZXIC_UINT32)); + buff_offset += sizeof(ZXIC_UINT32); + + if ((0xFFFFFFFF - (buff_offset)) < (sizeof(ZXIC_UINT32))) { + ZXIC_COMM_FREE(p_car_glb_queue_info); + ZXIC_COMM_FREE(p_car_glb_profile_info); + ZXIC_COMM_FREE(p_car_glb_pkt_profile_info); + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "ICM %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", __FILE__, __LINE__, buff_offset, sizeof(ZXIC_UINT32), __FUNCTION__); + return ZXIC_PAR_CHK_INVALID_INDEX; + } + buff_offset += sizeof(ZXIC_UINT32); + + if (0 != profile_pkt_total) { + ZXIC_COMM_MEMCPY(*pp_data_buff + buff_offset, p_car_glb_pkt_profile_info, profile_pkt_total * sizeof(DPP_STAT_CAR_PKT_PROFILE_CFG_T)); + buff_offset += profile_pkt_total * ZXIC_SIZEOF(DPP_STAT_CAR_PKT_PROFILE_CFG_T); + } + + ZXIC_COMM_PRINT(">>>>>>>>>>>[dpp_stat_car_glb_mgr_get]: total pkt_profile = %d !\n", profile_pkt_total); + + ZXIC_COMM_MEMCPY(*pp_data_buff + buff_offset, &profile_num_total, sizeof(ZXIC_UINT32)); + if ((0xFFFFFFFF - (buff_offset)) < (sizeof(ZXIC_UINT32))) { + ZXIC_COMM_FREE(p_car_glb_queue_info); + ZXIC_COMM_FREE(p_car_glb_profile_info); + ZXIC_COMM_FREE(p_car_glb_pkt_profile_info); + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "ICM %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", __FILE__, __LINE__, buff_offset, sizeof(ZXIC_UINT32), __FUNCTION__); + return ZXIC_PAR_CHK_INVALID_INDEX; + } + buff_offset += sizeof(ZXIC_UINT32); + + if (0 != profile_num_total) { + ZXIC_COMM_PRINT(">>>>>>>>>>>[dpp_stat_car_glb_mgr_get]: total profile = %d !\n", profile_num_total); + ZXIC_COMM_MEMCPY(*pp_data_buff + buff_offset, p_car_glb_profile_info, profile_num_total * sizeof(DPP_STAT_CAR_PROFILE_CFG_T)); + buff_offset += profile_num_total * ZXIC_SIZEOF(DPP_STAT_CAR_PROFILE_CFG_T); + } + + ZXIC_COMM_PRINT(">>>>>>>>>>>[dpp_stat_car_glb_mgr_get]: total profile = %d !\n", profile_num_total); + ZXIC_COMM_PRINT(">>>>>>>>>>>[dpp_stat_car_glb_mgr_get]: buff_offset= %d !\n", buff_offset); + + *p_flag = ZXIC_TRUE; + + ZXIC_COMM_FREE(p_car_glb_queue_info); + + if (0 != profile_pkt_total) { + ZXIC_COMM_FREE(p_car_glb_pkt_profile_info); + } + + ZXIC_COMM_FREE(p_car_glb_profile_info); + } + { + + return rc; +} + +#endif +#endif + +#if ZXIC_REAL("TEST") + +#if 0 +/***********************************************************/ +/** +* @param p_rb_cfg +* +* @return +* @remark 无 +* @see +* @author XXX @date 2017/10/31 +************************************************************/ +DPP_STATUS dpp_stat_queue_rb_root_prt(ZXIC_RB_CFG *p_rb_cfg) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 i = 0; + ZXIC_UINT32 dev_id = 0; + + DPP_CAR_QUEUE_RB_KEY_T *p_tmp_car_queue_rb_key = NULL; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_rb_cfg); + + if (NULL != p_rb_cfg->p_root) { + p_tmp_car_queue_rb_key = (DPP_CAR_QUEUE_RB_KEY_T *)p_rb_cfg->p_root->p_key; + + if (NULL != p_tmp_car_queue_rb_key) { + ZXIC_COMM_PRINT("dpp_stat_queue_rb_root_prt[GET] car_queue_cfg:\n"); + + for (i = 0; i < DPP_CAR_QUEUE_CFG_ZXIC_UINT8; i++) { + ZXIC_COMM_PRINT("%02x\t", p_tmp_car_queue_rb_key->profile_cfg[i]); + + if (0 == (i + 1) % 4) { + { + } + } + + ZXIC_COMM_PRINT("\n"); { + } + + } + { +} + +/***********************************************************/ +/** +* @param p_rb_cfg +* +* @return +* @remark 无 +* @see +* @author XXX @date 2017/10/31 +************************************************************/ +DPP_STATUS dpp_stat_car_profile_id_rb_root_prt(ZXIC_UINT32 dev_id, ZXIC_RB_CFG *p_rb_cfg) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 i = 0; + + DPP_CAR_PROFILE_ID_RB_KEY_T *p_tmp_car_profile_id_rb_key = NULL; + ZXIC_RB_TN *p_car_rb_node = NULL; + DPP_CAR_PROFILE_RB_KEY_T *p_car_profile_rb_key = NULL; + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_rb_cfg); + + if (NULL != p_rb_cfg->p_root) { + p_tmp_car_profile_id_rb_key = (DPP_CAR_PROFILE_ID_RB_KEY_T *)p_rb_cfg->p_root->p_key; + + if (NULL != p_tmp_car_profile_id_rb_key) { + /* ZXIC_COMM_PRINT("dpp_stat_car_profile_id_rb_root_prt pp_car_node ADDRESS: 0x%08x\n", (ZXIC_UINT32)p_tmp_car_profile_id_rb_key->p_car_node); */ + p_car_rb_node = (ZXIC_RB_TN *)p_tmp_car_profile_id_rb_key->p_car_node; + + if (NULL != p_car_rb_node) { + p_car_profile_rb_key = (DPP_CAR_PROFILE_RB_KEY_T *)p_car_rb_node->p_key; + { + { + for (i = 0; i < DPP_CAR_PROFILE_CFG_ZXIC_UINT32; i++) { + ZXIC_COMM_PRINT("%08x\t", p_car_profile_rb_key->profile_cfg[i]); { + if (0 == (i + 1) % 4) { + ZXIC_COMM_PRINT("\n"); + } + } { + ZXIC_COMM_PRINT("\n"); + } + } + } { + + return rc; { { +#endif +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se/dpp_stat_cfg.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se/dpp_stat_cfg.c new file mode 100644 index 000000000000..28fe573adad2 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se/dpp_stat_cfg.c @@ -0,0 +1,288 @@ +/************************************************************** +* Ȩ (C)2013-2015, ͨѶɷ޹˾ +* ļ : dpp_stat_cfg.c +* ļʶ : +* ժҪ : +* ˵ : +* ǰ汾 : +* : ls +* : 2016/03/29 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* ޸ļ¼1: +* ޸: +* : +* : +* ޸: +***************************************************************/ +#include "zxic_common.h" +#include "dpp_type_api.h" +#include "dpp_dev.h" +#include "dpp_stat_reg.h" +#include "dpp_stat_cfg.h" +#include "dpp_stat_api.h" +#include "dpp_se_api.h" +#include "dpp_se.h" +#include "dpp_reg_api.h" +#include "dpp_reg_info.h" + +PPU_STAT_CFG_T g_ppu_stat_cfg = { 0 }; + +#if ZXIC_REAL("Basic Reg Operation") + +/***********************************************************/ +/** ȡppuͳƬ +* @param dev_id 豸 +* @param p_ppu_eram_depth ppuͳƬ +* +* @return NPE_OK-ɹNPE_ERR-ʧ +* @remark +* @see +* @author ls @date 2016/03/31 +************************************************************/ +DPP_STATUS dpp_stat_ppu_eram_depth_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_ppu_eram_depth) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_STAT_CFG_PPU_ERAM_DEPTH_T ppu_eram_depth_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_ppu_eram_depth); + + rc = dpp_reg_read(dev, STAT_STAT_CFG_PPU_ERAM_DEPTHr, 0, 0, + &ppu_eram_depth_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_ppu_eram_depth = ppu_eram_depth_cfg.ppu_eram_depth; + + return rc; +} + +/***********************************************************/ +/** ȡppuͳ ERAMַ +* @param dev_id 豸 +* @param p_ppu_eram_baddr ppuͳƬڻַ +* +* @return NPE_OK-ɹNPE_ERR-ʧ +* @remark +* @see +* @author ls @date 2016/03/31 +************************************************************/ +DPP_STATUS dpp_stat_ppu_eram_baddr_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_ppu_eram_baddr) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_STAT_CFG_PPU_ERAM_BASE_ADDR_T ppu_eram_baddr_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_ppu_eram_baddr); + + rc = dpp_reg_read(dev, STAT_STAT_CFG_PPU_ERAM_BASE_ADDRr, 0, 0, + &ppu_eram_baddr_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_ppu_eram_baddr = ppu_eram_baddr_cfg.ppu_eram_base_addr; + + return rc; +} + +/***********************************************************/ +/** ȡppuͳ ddrַ +* @param dev_id 豸 +* @param p_ppu_ddr_baddr ppuͳƬַ +* +* @return NPE_OK-ɹNPE_ERR-ʧ +* @remark +* @see +* @author ls @date 2016/03/31 +************************************************************/ +DPP_STATUS dpp_stat_ppu_ddr_baddr_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_ppu_ddr_baddr) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_STAT_CFG_PPU_DDR_BASE_ADDR_T ppu_ddr_baddr_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_ppu_ddr_baddr); + + rc = dpp_reg_read(dev, STAT_STAT_CFG_PPU_DDR_BASE_ADDRr, 0, 0, + &ppu_ddr_baddr_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_ppu_ddr_baddr = ppu_ddr_baddr_cfg.ppu_ddr_base_addr; + + return rc; +} +#endif + +#if ZXIC_REAL("Advanced Function") +/***********************************************************/ +/** ppuֵȡ +* @param dev_id 豸 +* @param rd_mode ȡλģʽμSTAT_CNT_MODE_E0-64bit1-128bit +* @param index λμrd_mode +* @param clr_mode ģʽμSTAT_RD_CLR_MODE_E0-壬1- +* @param p_data Σȡ +* +* @return NPE_OK-ɹNPE_ERR-ʧ +* @remark +* @see +* @author ls @date 2016/07/11 +************************************************************/ +DPP_STATUS dpp_stat_ppu_cnt_get(DPP_DEV_T *dev, STAT_CNT_MODE_E rd_mode, + ZXIC_UINT32 index, ZXIC_UINT32 clr_mode, + ZXIC_UINT32 *p_data) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 ppu_eram_baddr = 0; + ZXIC_UINT32 ppu_eram_depth = 0; + ZXIC_UINT32 ppu_ddr_baddr = 0; + ZXIC_UINT32 eram_rd_mode = 0; + ZXIC_UINT32 eram_clr_mode = 0; + // ZXIC_UINT32 ddr_rd_mode = 0; + // ZXIC_UINT32 ddr_clr_mode = 0; + // ZXIC_UINT32 ddr_index = 0; + + ZXIC_COMM_CHECK_POINT(dev); + + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), rd_mode, STAT_64_MODE, + STAT_MAX_MODE - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), clr_mode, STAT_RD_CLR_MODE_UNCLR, + STAT_RD_CLR_MODE_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + + rc = dpp_stat_ppu_eram_depth_get(dev, &ppu_eram_depth); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_stat_ppu_eram_depth_get"); + + rc = dpp_stat_ppu_eram_baddr_get(dev, &ppu_eram_baddr); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_stat_ppu_eram_baddr_get"); + + rc = dpp_stat_ppu_ddr_baddr_get(dev, &ppu_ddr_baddr); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_stat_ppu_ddr_baddr_get"); + + /** Ƭڴ洢 */ + if ((index >> (STAT_128_MODE - rd_mode)) < ppu_eram_depth) { + if (STAT_128_MODE == rd_mode) { + eram_rd_mode = ERAM128_OPR_128b; + } else { + eram_rd_mode = ERAM128_OPR_64b; + } + + if (STAT_RD_CLR_MODE_UNCLR == clr_mode) { + eram_clr_mode = RD_MODE_HOLD; + } else { + eram_clr_mode = RD_MODE_CLEAR; + } + + rc = dpp_se_smmu0_ind_read(dev, ppu_eram_baddr, index, + eram_rd_mode, eram_clr_mode, p_data); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_se_smmu0_ind_read"); + } + /** Ƭ洢 */ + else { + // if (STAT_128_MODE == rd_mode) + // { + // ddr_rd_mode = CMMU_RD_MODE_128; + // } + // else + // { + // ddr_rd_mode = CMMU_RD_MODE_64; + // } + + // if (STAT_RD_CLR_MODE_UNCLR == clr_mode) + // { + // ddr_clr_mode = CMMU_RD_CLR_MODE_UNCLR; + // } + // else + // { + // ddr_clr_mode = CMMU_RD_CLR_MODE_CLR; + // } + + // ddr_index = index - (ppu_eram_depth << (STAT_128_MODE - rd_mode)); + + // rc = dpp_se_cmmu_ddr_read(dev_id, + // ppu_ddr_baddr, + // ddr_rd_mode, + // ddr_clr_mode, + // ddr_index, + // p_data); + // ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_se_cmmu_ddr_read"); + } + + return rc; +} + +/***********************************************************/ +/** ppuͳƬ +* @param dev_id 豸 +* @param ppu_eram_depth ppuͳƬ,128bitΪλ +* +* @return NPE_OK-ɹNPE_ERR-ʧ +* @remark +* @see +* @author ls @date 2016/03/31 +************************************************************/ +DPP_STATUS dpp_stat_ppu_eram_depth_set(DPP_DEV_T *dev, + ZXIC_UINT32 ppu_eram_depth) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_STAT_CFG_PPU_ERAM_DEPTH_T ppu_eram_depth_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), ppu_eram_depth, 0, + DPP_STAT_PPU_ERAM_DEPTH_MAX); + + ppu_eram_depth_cfg.ppu_eram_depth = ppu_eram_depth; + + rc = dpp_reg_write(dev, STAT_STAT_CFG_PPU_ERAM_DEPTHr, 0, 0, + &ppu_eram_depth_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + g_ppu_stat_cfg.eram_depth = ppu_eram_depth; + + return rc; +} + +/***********************************************************/ +/** ppuͳ ERAMַ +* @param dev_id 豸 +* @param ppu_eram_baddr ppuͳeRamַ,128bitΪλ +* +* @return NPE_OK-ɹNPE_ERR-ʧ +* @remark +* @see +* @author ls @date 2016/03/31 +************************************************************/ +DPP_STATUS dpp_stat_ppu_eram_baddr_set(DPP_DEV_T *dev, + ZXIC_UINT32 ppu_eram_baddr) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_STAT_CFG_PPU_ERAM_BASE_ADDR_T ppu_eram_baddr_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), ppu_eram_baddr, 0, + DPP_STAT_PPU_ERAM_BADDR_MAX); + + ppu_eram_baddr_cfg.ppu_eram_base_addr = ppu_eram_baddr; + + rc = dpp_reg_write(dev, STAT_STAT_CFG_PPU_ERAM_BASE_ADDRr, 0, 0, + &ppu_eram_baddr_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_reg_write"); + + g_ppu_stat_cfg.eram_baddr = ppu_eram_baddr; + + return rc; +} + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/Kbuild.include new file mode 100644 index 000000000000..8a8b8ccd5b83 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/Kbuild.include @@ -0,0 +1,2 @@ +cur_dir := en_np/sdk/source/dev/module/se_apt/ +src_files += $(addprefix $(cur_dir),$(notdir $(wildcard $(dinghai_root)/$(cur_dir)*.c))) diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/dpp_apt_se_acl.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/dpp_apt_se_acl.c new file mode 100644 index 000000000000..630b5a0160a0 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/dpp_apt_se_acl.c @@ -0,0 +1,426 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_apt_se_acl.c +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : 陈勤00181032 +* 完成日期 : 2023/02/22 +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#include "dpp_apt_se.h" +#include "dpp_dev.h" +#include "dpp_sdt.h" +#include "dpp_acl.h" +#include "dpp_dtb_table.h" + +static DPP_ACL_CFG_EX_T *g_apt_acl_cfg[DPP_PCIE_SLOT_MAX] = { NULL }; + +DPP_ACL_CFG_EX_T *dpp_apt_get_acl_cfg(DPP_DEV_T *dev) +{ + ZXIC_UINT32 slot = 0; + if (NULL == dev) { + return NULL; + } + slot = DEV_PCIE_SLOT(dev); + if (slot < DPP_PCIE_SLOT_MAX) { + return g_apt_acl_cfg[slot]; + } + return NULL; +} + +/***********************************************************/ +/** acl资源初始化 +* @param dev_id 设备号 +* @param tbl_num etcam对应的sdt表个数 +* @param pAclTblRes acl表资源信息,包括SDT配置信息,acl资源(条目数,存放方式和占用的block)和结构体码流转换回调函数 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +DPP_STATUS dpp_apt_acl_res_init(DPP_DEV_T *dev, ZXIC_UINT32 tbl_num, + DPP_APT_ACL_TABLE_T *pAclTblRes) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT8 index = 0; + ZXIC_UINT32 slot = 0; + DPP_APT_ACL_TABLE_T *pTempAclTbl = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX_UPPER(tbl_num, DPP_ETCAM_TBLID_NUM); + ZXIC_COMM_CHECK_POINT(pAclTblRes); + + slot = DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_INDEX(slot, 0, DPP_PCIE_SLOT_MAX - 1); + + if (NULL == g_apt_acl_cfg[slot]) { + g_apt_acl_cfg[slot] = (DPP_ACL_CFG_EX_T *)ZXIC_COMM_MALLOC( + sizeof(DPP_ACL_CFG_EX_T)); + ZXIC_COMM_CHECK_POINT(g_apt_acl_cfg[slot]); + } + + rc = dpp_acl_cfg_init_ex(dev, g_apt_acl_cfg[slot], + (ZXIC_VOID *)ZXIC_COMM_VAL_TO_PTR(DEV_ID(dev)), + DPP_ACL_FLAG_ETCAM0_EN, NULL); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_acl_cfg_init_ex"); + + for (index = 0; index < tbl_num; index++) { + pTempAclTbl = pAclTblRes + index; + rc = dpp_sdt_tbl_write(dev, pTempAclTbl->sdtNo, + pTempAclTbl->aclSdt.table_type, + &(pTempAclTbl->aclSdt), SDT_OPER_ADD); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_sdt_tbl_write"); + + rc = dpp_acl_tbl_init_ex(g_apt_acl_cfg[slot], + pTempAclTbl->aclSdt.etcam_table_id, + pTempAclTbl->aclSdt.as_en, + pTempAclTbl->aclRes.entry_num, + pTempAclTbl->aclRes.pri_mode, + pTempAclTbl->aclSdt.etcam_key_mode, + pTempAclTbl->aclSdt.as_rsp_mode, + pTempAclTbl->aclSdt.as_eram_baddr, + pTempAclTbl->aclRes.block_num, + pTempAclTbl->aclRes.block_index); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_acl_tbl_init_ex"); + + rc = dpp_apt_set_callback(dev, pTempAclTbl->sdtNo, + pTempAclTbl->aclSdt.table_type, + (ZXIC_VOID *)pTempAclTbl); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_apt_set_callback"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** acl软件资源释放 +* @param dev 设备号 +* @return +* @remark 无 +* @see +* @author cq @date 2025/06/30 +************************************************************/ +DPP_STATUS dpp_apt_acl_soft_res_uninit(DPP_DEV_T *dev) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 table_id = 0; + ZXIC_UINT32 as_enable = 0; + ZXIC_UINT32 pri_mode = 0; + ZXIC_UINT32 slot = 0; + DPP_ACL_CFG_EX_T *p_acl_cfg = NULL; + DPP_ACL_TBL_CFG_T *p_tbl_cfg = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + slot = DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_INDEX(slot, 0, DPP_PCIE_SLOT_MAX - 1); + + rc = dpp_acl_cfg_get(dev, &p_acl_cfg); //获取ACL表资源配置 + ZXIC_COMM_CHECK_RC(rc, "dpp_acl_cfg_get"); + + if (!p_acl_cfg->acl_etcamids.is_valid) { + ZXIC_COMM_TRACE_ERROR("etcam is not init!\n"); + return DPP_ACL_RC_ETCAMID_NOT_INIT; + } + + for (table_id = DPP_ACL_TBL_ID_MIN; table_id <= DPP_ACL_TBL_ID_MAX; + table_id++) { + p_tbl_cfg = p_acl_cfg->acl_tbls + table_id; + if (!p_tbl_cfg->is_used) { + ZXIC_COMM_TRACE_DEBUG("table_id[ %d ] is not used!\n", + table_id); + continue; + } + + rc = (DPP_STATUS)zxic_comm_rb_destroy(&(p_tbl_cfg->acl_rb)); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_rb_destroy"); + + rc = zxic_comm_indexfill_destroy(&(p_tbl_cfg->index_mng)); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_indexfill_destroy"); + + as_enable = p_tbl_cfg->as_enable; + if (as_enable) { + if (p_tbl_cfg->as_rslt_buff) { + ZXIC_COMM_FREE(p_tbl_cfg->as_rslt_buff); + p_tbl_cfg->as_rslt_buff = NULL; + } + } + + pri_mode = p_tbl_cfg->pri_mode; + if ((pri_mode == DPP_ACL_PRI_EXPLICIT) || + (pri_mode == DPP_ACL_PRI_IMPLICIT)) { + if (p_tbl_cfg->acl_key_buff) { + ZXIC_COMM_FREE(p_tbl_cfg->acl_key_buff); + p_tbl_cfg->acl_key_buff = NULL; + } + } + + if (p_tbl_cfg->block_array) { + ZXIC_COMM_FREE(p_tbl_cfg->block_array); + p_tbl_cfg->block_array = NULL; + } + } + + if (NULL != g_apt_acl_cfg[slot]) { + ZXIC_COMM_FREE(g_apt_acl_cfg[slot]); + g_apt_acl_cfg[slot] = NULL; + dpp_acl_cfg_set(dev, NULL); + } + + return DPP_OK; +} + +/***********************************************************/ +/** acl表项插入/更新 +* @param dev_id 设备号 +* @param sdt_no sdt号 0~255 +* @param pData 业务插入表项内容,具体结构体由业务确定(结构体的第一个字段必须为index),SDK不感知 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +DPP_STATUS dpp_apt_dtb_acl_entry_insert(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, void *pData) +{ + ZXIC_UINT8 data[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; /*640bit*/ + ZXIC_UINT8 mask[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; /*640bit*/ + ZXIC_UINT8 rst[16] = { 0 }; /*128bit*/ + ZXIC_UINT32 element_id = 0; + ZXIC_UINT32 rc = DPP_OK; + + DPP_ACL_ENTRY_EX_T aclEntry = { 0 }; + DPP_DTB_ACL_ENTRY_INFO_T tDtbAclEntry = { 0 }; + //DPP_SDTTBL_ETCAM_T sdt_acl_info = {0}; /*SDT内容*/ + SE_APT_CALLBACK_T *pAptCallback = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + ZXIC_COMM_CHECK_POINT(pData); + + ZXIC_COMM_MEMSET(data, 0x0, sizeof(data)); + ZXIC_COMM_MEMSET(mask, 0x0, sizeof(mask)); + ZXIC_COMM_MEMSET(rst, 0x0, sizeof(rst)); + ZXIC_COMM_MEMSET(&aclEntry, 0x0, sizeof(DPP_ACL_ENTRY_EX_T)); + ZXIC_COMM_MEMSET(&tDtbAclEntry, 0x0, sizeof(DPP_DTB_ACL_ENTRY_INFO_T)); + + aclEntry.key_data = data; + aclEntry.key_mask = mask; + aclEntry.p_as_rslt = rst; + + pAptCallback = dpp_apt_get_func(dev, sdt_no); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), pAptCallback); + ZXIC_COMM_CHECK_DEV_POINT( + DEV_ID(dev), pAptCallback->se_func_info.aclFunc.acl_set_func); + + rc = pAptCallback->se_func_info.aclFunc.acl_set_func((void *)pData, + &aclEntry); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "acl_entry_func"); + + tDtbAclEntry.handle = aclEntry.pri; + tDtbAclEntry.key_data = aclEntry.key_data; + tDtbAclEntry.key_mask = aclEntry.key_mask; + tDtbAclEntry.p_as_rslt = aclEntry.p_as_rslt; + + rc = dpp_dtb_acl_dma_insert(dev, queue_id, sdt_no, 1, &tDtbAclEntry, + &element_id); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_acl_dma_insert"); + + return rc; +} + +/***********************************************************/ +/** acl表项删除 +* @param dev_id 设备号 +* @param sdt_no sdt号 0~255 +* @param pData 删除业务表项内容,仅需填入index信息 +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/25 +************************************************************/ +DPP_STATUS dpp_apt_dtb_acl_entry_del(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, void *pData) +{ + ZXIC_UINT8 data[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; /*640bit*/ + ZXIC_UINT8 mask[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; /*640bit*/ + ZXIC_UINT8 rst[16] = { 0 }; /*128bit*/ + ZXIC_UINT32 element_id = 0; + ZXIC_UINT32 rc = DPP_OK; + + DPP_ACL_ENTRY_EX_T aclEntry = { 0 }; + DPP_DTB_ACL_ENTRY_INFO_T tDtbAclEntry = { 0 }; + SE_APT_CALLBACK_T *pAptCallback = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + ZXIC_COMM_CHECK_POINT(pData); + + ZXIC_COMM_MEMSET(data, 0xff, sizeof(data)); + ZXIC_COMM_MEMSET(mask, 0x0, sizeof(mask)); + ZXIC_COMM_MEMSET(rst, 0x0, sizeof(rst)); + ZXIC_COMM_MEMSET(&aclEntry, 0x0, sizeof(DPP_ACL_ENTRY_EX_T)); + ZXIC_COMM_MEMSET(&tDtbAclEntry, 0x0, sizeof(DPP_DTB_ACL_ENTRY_INFO_T)); + + pAptCallback = dpp_apt_get_func(dev, sdt_no); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), pAptCallback); + ZXIC_COMM_CHECK_DEV_POINT( + DEV_ID(dev), pAptCallback->se_func_info.aclFunc.acl_set_func); + + rc = pAptCallback->se_func_info.aclFunc.acl_set_func((void *)pData, + &aclEntry); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "acl_entry_func"); + + tDtbAclEntry.handle = aclEntry.pri; + tDtbAclEntry.key_data = data; + tDtbAclEntry.key_mask = mask; + tDtbAclEntry.p_as_rslt = rst; + + rc = dpp_dtb_acl_dma_insert(dev, queue_id, sdt_no, 1, &tDtbAclEntry, + &element_id); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_acl_dma_insert"); + + return rc; +} + +/***********************************************************/ +/** acl表项查找(handle+data+mask有效) +* @param dev 设备 +* @param queue_id 队列号 +* @param sdt_no sdt号 0~255 +* @param pData 查找表项 +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/21 +************************************************************/ +DPP_STATUS dpp_apt_dtb_acl_entry_search(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, void *pData) +{ + ZXIC_UINT8 data[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; /*640bit*/ + ZXIC_UINT8 mask[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; /*640bit*/ + ZXIC_UINT8 rst[16] = { 0 }; /*128bit*/ + ZXIC_UINT32 rc = DPP_OK; + + DPP_ACL_ENTRY_EX_T aclEntry = { 0 }; + DPP_DTB_ACL_ENTRY_INFO_T tDtbAclEntry = { 0 }; + //DPP_SDTTBL_ETCAM_T sdt_acl_info = {0}; /*SDT内容*/ + SE_APT_CALLBACK_T *pAptCallback = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + ZXIC_COMM_CHECK_POINT(pData); + + ZXIC_COMM_MEMSET(data, 0x0, sizeof(data)); + ZXIC_COMM_MEMSET(mask, 0x0, sizeof(mask)); + ZXIC_COMM_MEMSET(rst, 0x0, sizeof(rst)); + ZXIC_COMM_MEMSET(&aclEntry, 0x0, sizeof(DPP_ACL_ENTRY_EX_T)); + ZXIC_COMM_MEMSET(&tDtbAclEntry, 0x0, sizeof(DPP_DTB_ACL_ENTRY_INFO_T)); + + aclEntry.key_data = data; + aclEntry.key_mask = mask; + aclEntry.p_as_rslt = rst; + + pAptCallback = dpp_apt_get_func(dev, sdt_no); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), pAptCallback); + ZXIC_COMM_CHECK_DEV_POINT( + DEV_ID(dev), pAptCallback->se_func_info.aclFunc.acl_set_func); + + rc = pAptCallback->se_func_info.aclFunc.acl_set_func((void *)pData, + &aclEntry); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "acl_entry_func"); + + tDtbAclEntry.handle = aclEntry.pri; + tDtbAclEntry.key_data = aclEntry.key_data; + tDtbAclEntry.key_mask = aclEntry.key_mask; + tDtbAclEntry.p_as_rslt = aclEntry.p_as_rslt; + + rc = dpp_dtb_acl_data_get(dev, queue_id, sdt_no, &tDtbAclEntry); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_acl_data_get"); + + aclEntry.pri = tDtbAclEntry.handle; + aclEntry.key_data = tDtbAclEntry.key_data; + aclEntry.key_mask = tDtbAclEntry.key_mask; + aclEntry.p_as_rslt = tDtbAclEntry.p_as_rslt; + + rc = pAptCallback->se_func_info.aclFunc.acl_get_func((void *)pData, + &aclEntry); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "acl_entry_func"); + + return rc; +} + +/***********************************************************/ +/** 根据handle获取到acl表项信息 +* @param dev 设备 +* @param queue_id 队列号 +* @param sdt_no sdt号 0~255 +* @param pData 查找表项 +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/21 +************************************************************/ +DPP_STATUS dpp_apt_dtb_acl_entry_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, void *pData) +{ + ZXIC_UINT8 data[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; /*640bit*/ + ZXIC_UINT8 mask[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; /*640bit*/ + ZXIC_UINT8 rst[16] = { 0 }; /*128bit*/ + ZXIC_UINT32 rc = DPP_OK; + + DPP_ACL_ENTRY_EX_T aclEntry = { 0 }; + DPP_DTB_ACL_ENTRY_INFO_T tDtbAclEntry = { 0 }; + SE_APT_CALLBACK_T *pAptCallback = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + ZXIC_COMM_CHECK_POINT(pData); + + ZXIC_COMM_MEMSET(data, 0x0, sizeof(data)); + ZXIC_COMM_MEMSET(mask, 0x0, sizeof(mask)); + ZXIC_COMM_MEMSET(rst, 0x0, sizeof(rst)); + ZXIC_COMM_MEMSET(&aclEntry, 0x0, sizeof(DPP_ACL_ENTRY_EX_T)); + ZXIC_COMM_MEMSET(&tDtbAclEntry, 0x0, sizeof(DPP_DTB_ACL_ENTRY_INFO_T)); + + pAptCallback = dpp_apt_get_func(dev, sdt_no); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), pAptCallback); + ZXIC_COMM_CHECK_DEV_POINT( + DEV_ID(dev), pAptCallback->se_func_info.aclFunc.acl_set_func); + + rc = pAptCallback->se_func_info.aclFunc.acl_set_func((void *)pData, + &aclEntry); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "acl_entry_func"); + + tDtbAclEntry.handle = aclEntry.pri; + tDtbAclEntry.key_data = data; + tDtbAclEntry.key_mask = mask; + tDtbAclEntry.p_as_rslt = rst; + + rc = dpp_dtb_etcam_data_get(dev, queue_id, sdt_no, &tDtbAclEntry); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_etcam_data_get"); + + aclEntry.pri = tDtbAclEntry.handle; + aclEntry.key_data = tDtbAclEntry.key_data; + aclEntry.key_mask = tDtbAclEntry.key_mask; + aclEntry.p_as_rslt = tDtbAclEntry.p_as_rslt; + rc = pAptCallback->se_func_info.aclFunc.acl_get_func((void *)pData, + &aclEntry); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "acl_entry_func"); + + return rc; +} diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/dpp_apt_se_comm.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/dpp_apt_se_comm.c new file mode 100644 index 000000000000..66d8d7028258 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/dpp_apt_se_comm.c @@ -0,0 +1,391 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_apt_se_common.c +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : 陈勤00181032 +* 完成日期 : 2023/02/22 +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#include "dpp_apt_se.h" +#include "dpp_dev.h" +#include "dpp_dtb_table_api.h" +#include "dpp_dtb_table.h" +#include "dpp_kernel_init.h" +#include "dpp_drv_sdt.h" +#include "dpp_tbl_comm.h" +#include "dpp_dtb_cfg.h" +#include "dpp_dtb.h" + +#define DTB_QUEUE_ACK_SIZE (16) +#define DTB_QUEUE_ELEMENT_NUM (32) +#define DTB_QUEUE_ELEMENT_DATA_SIZE (16 * 1024 + DTB_QUEUE_ACK_SIZE) //16k+16 +#define DTB_QUEUE_DATA_SIZE \ + (DTB_QUEUE_ELEMENT_DATA_SIZE * DTB_QUEUE_ELEMENT_NUM) +#define DTB_QUEUE_ELEMENT_DUMP_SIZE (16 * 1024 + DTB_QUEUE_ACK_SIZE) //16K+16 +#define DTB_QUEUE_DUMP_SIZE \ + (DTB_QUEUE_ELEMENT_DUMP_SIZE * DTB_QUEUE_ELEMENT_NUM) +#define DTB_QUEUE_DMA_SIZE (DTB_QUEUE_DATA_SIZE + DTB_QUEUE_DUMP_SIZE) + +SE_APT_CALLBACK_T g_apt_se_callback[DPP_PCIE_SLOT_MAX][DPP_DEV_SDT_ID_MAX] = { + { { 0 } } +}; + +/***********************************************************/ +/** 根据设备id和sdt号获取指针 +* @param p_new_key 新键值 +* @param p_old_key 旧键值 +* @param key_len 键值长度 +* @return 比较结果 +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/24 +************************************************************/ +ZXIC_SINT32 dpp_apt_table_key_cmp(void *p_new_key, void *p_old_key, + ZXIC_UINT32 key_len) +{ + ZXIC_COMM_CHECK_POINT(p_new_key); + ZXIC_COMM_CHECK_POINT(p_old_key); + /* 仅比较index */ + ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW_NO_ASSERT( + key_len, (ZXIC_UINT32)ZXIC_SIZEOF(ZXIC_UINT32)); + return ZXIC_COMM_MEMCMP((ZXIC_UINT32 *)p_new_key, + (ZXIC_UINT32 *)p_old_key, + ZXIC_SIZEOF(ZXIC_UINT32)); +} + +/***********************************************************/ +/** 根据设备id和sdt号获取指针 +* @param dev_id 设备号 +* @param sdt_no 业务表对应的sdt号 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/24 +************************************************************/ +SE_APT_CALLBACK_T *dpp_apt_get_func(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no) +{ + ZXIC_UINT32 slot = 0; + + ZXIC_COMM_CHECK_POINT_RETURN_NULL(dev); + ZXIC_COMM_CHECK_INDEX_RETURN_NULL_NO_ASSERT(sdt_no, 0, + DPP_DEV_SDT_ID_MAX - 1); + + slot = DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_INDEX_RETURN_NULL_NO_ASSERT(slot, 0, + DPP_PCIE_SLOT_MAX - 1); + + return &g_apt_se_callback[slot][sdt_no]; +} + +/***********************************************************/ +/** 根据设备id和sdt号获取级联sdt +* @param dev_id 设备号 +* @param sdt_no 业务表对应的sdt号 +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/19 +************************************************************/ +ZXIC_UINT32 dpp_apt_get_sdt_partner(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no) +{ + SE_APT_CALLBACK_T *pAptCallback = NULL; + + pAptCallback = dpp_apt_get_func(dev, sdt_no); + if (ZXIC_NULL == pAptCallback) { + return ZXIC_UINT32_MAX; + } + + if (DPP_SDT_TBLT_eTCAM == pAptCallback->table_type) { + return pAptCallback->se_func_info.aclFunc.sdt_partner; + } + + return ZXIC_UINT32_MAX; +} + +/***********************************************************/ +/** 保存回调参数信息 +* @param dev_id 设备号 +* @param sdt_no 业务表对应的sdt号 +* @param table_type SDT属性中的表类型,取值参考DPP_SDT_TABLE_TYPE_E的定义(仅添加操作时有效) +* @param pData 需保存的回调信息,由table_type确定此ZXIC_VOID型指针对应的数据结果 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/24 +************************************************************/ +DPP_STATUS dpp_apt_set_callback(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 table_type, ZXIC_VOID *pData) +{ + SE_APT_CALLBACK_T *aptFunc = NULL; + + aptFunc = dpp_apt_get_func(dev, sdt_no); + ZXIC_COMM_CHECK_POINT(aptFunc); + + aptFunc->sdtNo = sdt_no; + aptFunc->table_type = table_type; + + switch (table_type) { + case DPP_SDT_TBLT_eRAM: { + aptFunc->se_func_info.eramFunc.opr_mode = + ((DPP_APT_ERAM_TABLE_T *)pData)->opr_mode; + aptFunc->se_func_info.eramFunc.rd_mode = + ((DPP_APT_ERAM_TABLE_T *)pData)->rd_mode; + aptFunc->se_func_info.eramFunc.eram_set_func = + ((DPP_APT_ERAM_TABLE_T *)pData)->eram_set_func; + aptFunc->se_func_info.eramFunc.eram_get_func = + ((DPP_APT_ERAM_TABLE_T *)pData)->eram_get_func; + break; + } + case DPP_SDT_TBLT_DDR3: { + aptFunc->se_func_info.ddrFunc.ddr_tbl_depth = + ((DPP_APT_DDR_TABLE_T *)pData)->ddr_table_depth; + aptFunc->se_func_info.ddrFunc.ddr_set_func = + ((DPP_APT_DDR_TABLE_T *)pData)->ddr_set_func; + aptFunc->se_func_info.ddrFunc.ddr_get_func = + ((DPP_APT_DDR_TABLE_T *)pData)->ddr_get_func; + break; + } + case DPP_SDT_TBLT_HASH: { + aptFunc->se_func_info.hashFunc.hash_set_func = + ((DPP_APT_HASH_TABLE_T *)pData)->hash_set_func; + aptFunc->se_func_info.hashFunc.hash_get_func = + ((DPP_APT_HASH_TABLE_T *)pData)->hash_get_func; + break; + } + case DPP_SDT_TBLT_eTCAM: { + aptFunc->se_func_info.aclFunc.sdt_partner = + ((DPP_APT_ACL_TABLE_T *)pData)->sdt_partner; + aptFunc->se_func_info.aclFunc.acl_set_func = + ((DPP_APT_ACL_TABLE_T *)pData)->acl_set_func; + aptFunc->se_func_info.aclFunc.acl_get_func = + ((DPP_APT_ACL_TABLE_T *)pData)->acl_get_func; + break; + } + default: { + ZXIC_COMM_TRACE_ERROR( + "dpp_apt_se_set_callback table_type[ %d ] is invalid!\n", + table_type); + return DPP_ERR; + } + } + + return DPP_OK; +} + +DPP_STATUS dpp_apt_sw_list_insert(ZXIC_RB_CFG *rb_cfg, void *pData, + ZXIC_UINT32 len) +{ + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT8 *p_rb_key = NULL; + ZXIC_RB_TN *p_rb_new = NULL; + ZXIC_RB_TN *p_rb_rtn = NULL; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(rb_cfg); + ZXIC_COMM_CHECK_POINT(pData); + + p_rb_key = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC(len); + ZXIC_COMM_CHECK_POINT(p_rb_key); + ZXIC_COMM_MEMSET(p_rb_key, 0x0, len); + ZXIC_COMM_MEMCPY(p_rb_key, pData, len); + + p_rb_new = (ZXIC_RB_TN *)ZXIC_COMM_MALLOC(sizeof(ZXIC_RB_TN)); + if (NULL == (p_rb_new)) { + ZXIC_COMM_FREE(p_rb_key); + ZXIC_COMM_TRACE_ERROR( + "\n ICM %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", + __FILE__, __LINE__, __FUNCTION__); + return ZXIC_PAR_CHK_POINT_NULL; + } + ZXIC_COMM_MEMSET(p_rb_new, 0, ZXIC_SIZEOF(ZXIC_RB_TN)); + INIT_RBT_TN(p_rb_new, p_rb_key); + + rc = zxic_comm_rb_insert(rb_cfg, p_rb_new, &p_rb_rtn); + if (rc == ZXIC_RBT_RC_UPDATE) { + ZXIC_COMM_CHECK_POINT(p_rb_rtn); + ZXIC_COMM_MEMCPY(p_rb_rtn->p_key, pData, len); + ZXIC_COMM_FREE(p_rb_new); + ZXIC_COMM_FREE(p_rb_key); + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "update exist entry!\n"); + return DPP_OK; + } + + return rc; +} + +DPP_STATUS dpp_apt_sw_list_search(ZXIC_RB_CFG *rb_cfg, void *pData, + ZXIC_UINT32 len) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_RB_TN *p_rb_rtn = NULL; + + ZXIC_COMM_CHECK_POINT(rb_cfg); + + rc = zxic_comm_rb_search(rb_cfg, pData, &p_rb_rtn); + if (DPP_OK != rc) { + return rc; + } + //ZXIC_COMM_CHECK_RC_NO_ASSERT( rc, "zxic_comm_rb_search"); + + ZXIC_COMM_MEMCPY(pData, p_rb_rtn->p_key, len); + return rc; +} + +DPP_STATUS dpp_apt_sw_list_delete(ZXIC_RB_CFG *rb_cfg, void *pData, + ZXIC_UINT32 len) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_RB_TN *p_rb_rtn = NULL; + + ZXIC_COMM_CHECK_POINT(rb_cfg); + + rc = zxic_comm_rb_delete(rb_cfg, pData, &p_rb_rtn); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "zxic_comm_rb_delete"); + ZXIC_COMM_FREE(p_rb_rtn->p_key); + ZXIC_COMM_FREE(p_rb_rtn); + + return rc; +} + +DPP_STATUS dpp_apt_get_zblock_index(ZXIC_UINT32 zblock_bitmap, + ZXIC_UINT32 *zblk_idx) +{ + ZXIC_UINT32 index0 = 0; + ZXIC_UINT32 index1 = 0; + + ZXIC_COMM_CHECK_POINT(zblk_idx); + + for (index0 = 0; index0 < 32; index0++) { + if ((zblock_bitmap >> index0) & 0x1) { + *(zblk_idx + index1) = index0; + index1++; + } + } + + return DPP_OK; +} + +DPP_STATUS dpp_apt_dtb_res_init(DPP_DEV_T *dev) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 queue_id = 0; + ZXIC_UINT32 dma_size = 2 * DTB_QUEUE_DMA_SIZE; + ZXIC_UINT16 vport = 0; + ZXIC_MUTEX_T *p_self_recover_mutex = NULL; + DPP_DEV_MUTEX_TYPE_E mutex = 0; + + DTB_QUEUE_DMA_ADDR_INFO tDmaAddrInfo = { 0 }; + + ZXIC_COMM_CHECK_POINT_NO_ASSERT(dev); + vport = DEV_PCIE_VPORT(dev); + + mutex = DPP_DEV_MUTEX_T_SELF_RECOVER; + rc = dpp_dev_opr_mutex_get(dev, (ZXIC_UINT32)mutex, + &p_self_recover_mutex); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_self_recover_mutex); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_mutex_lock"); + + //申请队列 + rc = dpp_dtb_queue_requst_ex(dev, "pf", &queue_id); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_queue_requst", + p_self_recover_mutex); + + /*分配下表DMA内存*/ + ZXIC_COMM_MEMSET(&tDmaAddrInfo, 0x00, sizeof(DTB_QUEUE_DMA_ADDR_INFO)); + rc = dpp_dtb_queue_dma_mem_alloc(dev, queue_id, dma_size); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_queue_dma_mem_alloc", + p_self_recover_mutex); + + rc = dpp_dtb_queue_dma_mem_get(dev, queue_id, &tDmaAddrInfo); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_queue_dma_mem_get", + p_self_recover_mutex); + + // 配置下表地址空间 + rc = dpp_dtb_queue_down_table_addr_set(dev, queue_id, + tDmaAddrInfo.dma_phy_addr, + tDmaAddrInfo.dma_vir_addr); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_queue_down_table_addr_set", + p_self_recover_mutex); + + // 配置dump表地址空间 + rc = dpp_dtb_queue_dump_table_addr_set( + dev, queue_id, tDmaAddrInfo.dma_phy_addr + DTB_QUEUE_DMA_SIZE, + tDmaAddrInfo.dma_vir_addr + DTB_QUEUE_DMA_SIZE); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_queue_dump_table_addr_set", + p_self_recover_mutex); + + rc = dpp_dtb_user_info_set(dev, queue_id, vport, 0); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_user_info_set", + p_self_recover_mutex); + + rc = zxic_comm_mutex_unlock(p_self_recover_mutex); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_mutex_unlock"); + + return DPP_OK; +} + +/***********************************************************/ +/** 根据设备id对表项公共参数初始化 +* @param dev_id 设备号 +* @return +* @remark 无 +* @see +* @author cq @date 2023/11/09 +************************************************************/ +DPP_STATUS dpp_apt_se_callback_init(DPP_DEV_T *dev) +{ + ZXIC_UINT32 sdt_no = 0; + ZXIC_UINT32 slot = 0; + + ZXIC_COMM_CHECK_POINT(dev); + slot = DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(slot, 0, DPP_PCIE_SLOT_MAX - 1); + + for (sdt_no = 0; sdt_no < DPP_DEV_SDT_ID_MAX; sdt_no++) { + ZXIC_COMM_MEMSET(&g_apt_se_callback[slot][sdt_no], 0x0, + sizeof(SE_APT_CALLBACK_T)); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 释放sdt资源以及适配资源 +* @param dev_id 设备号 +* @param sdt_no sdt号 +* @return +* @remark 无 +* @see +* @author cq @date 2023/11/09 +************************************************************/ +DPP_STATUS dpp_apt_sdt_res_deinit(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + + /*sdt资源初始化,包括硬件和软件*/ + rc = dpp_sdt_tbl_write(dev, sdt_no, 0, NULL, 1); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_sdt_tbl_write"); + + /*适配资源初始化*/ + ZXIC_COMM_MEMSET(&g_apt_se_callback[DEV_ID(dev)][sdt_no], 0x0, + sizeof(SE_APT_CALLBACK_T)); + + return DPP_OK; +} diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/dpp_apt_se_ddr.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/dpp_apt_se_ddr.c new file mode 100644 index 000000000000..c8ca54340823 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/dpp_apt_se_ddr.c @@ -0,0 +1,59 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_apt_se_ddr.c +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : 陈勤00181032 +* 完成日期 : 2023/07/26 +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#include "dpp_apt_se.h" +#include "dpp_dev.h" +#include "dpp_sdt.h" + +/***********************************************************/ +/** DDR表资源初始化 +* @param dev_id 设备号 +* @param tbl_num 需初始化的DDR表个数 +* @param pDdrTbl ddr资源信息,包括SDT配置信息,直接表读取位宽和结构体码流转换回调函数 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/07/26 +************************************************************/ +DPP_STATUS dpp_apt_ddr_res_init(DPP_DEV_T *dev, ZXIC_UINT32 tbl_num, + DPP_APT_DDR_TABLE_T *pDdrTbl) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT8 index = 0; + DPP_APT_DDR_TABLE_T *pTempDdrTbl = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX_UPPER(tbl_num, DPP_DEV_SDT_ID_MAX); + ZXIC_COMM_CHECK_POINT(pDdrTbl); + + for (index = 0; index < tbl_num; index++) { + pTempDdrTbl = pDdrTbl + index; + rc = dpp_sdt_tbl_write(dev, pTempDdrTbl->sdtNo, + pTempDdrTbl->eDdrSdt.table_type, + &(pTempDdrTbl->eDdrSdt), SDT_OPER_ADD); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_sdt_tbl_write"); + + rc = dpp_apt_set_callback(dev, pTempDdrTbl->sdtNo, + pTempDdrTbl->eDdrSdt.table_type, + (ZXIC_VOID *)pTempDdrTbl); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_apt_set_callback"); + } + + return DPP_OK; +} diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/dpp_apt_se_eram.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/dpp_apt_se_eram.c new file mode 100644 index 000000000000..d605d7ecffc1 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/dpp_apt_se_eram.c @@ -0,0 +1,214 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_apt_se_eram.c +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : 陈勤00181032 +* 完成日期 : 2023/02/22 +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#include "dpp_apt_se.h" +#include "dpp_dev.h" +#include "dpp_sdt.h" +#include "dpp_dtb_table.h" +#include "dpp_dtb_table_api.h" +#include "dpp_apt_se.h" + +/***********************************************************/ +/** eram表资源初始化 +* @param dev_id 设备号 +* @param tbl_num 需初始化的eram表个数 +* @param pEramTbl eram资源信息,包括SDT配置信息,直接表读取位宽和结构体码流转换回调函数 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +DPP_STATUS dpp_apt_eram_res_init(DPP_DEV_T *dev, ZXIC_UINT32 tbl_num, + DPP_APT_ERAM_TABLE_T *pEramTbl) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT8 index = 0; + DPP_APT_ERAM_TABLE_T *pTempEramTbl = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX_UPPER(tbl_num, DPP_DEV_SDT_ID_MAX); + ZXIC_COMM_CHECK_POINT(pEramTbl); + + for (index = 0; index < tbl_num; index++) { + pTempEramTbl = pEramTbl + index; + rc = dpp_sdt_tbl_write(dev, pTempEramTbl->sdtNo, + pTempEramTbl->eRamSdt.table_type, + &(pTempEramTbl->eRamSdt), SDT_OPER_ADD); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_sdt_tbl_write"); + + rc = dpp_apt_set_callback(dev, pTempEramTbl->sdtNo, + pTempEramTbl->eRamSdt.table_type, + (ZXIC_VOID *)pTempEramTbl); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_apt_set_callback"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** dtb eram表项插入/更新 +* @param dev_id 设备号 +* @param sdt_no SDT号 0~255 +* @param index 条目index,索引范围随wrt_mode模式不同 +* @param pData 插入表项内容,由业务确定 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +DPP_STATUS dpp_apt_dtb_eram_insert(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 index, + void *pData) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 element_id = 0; + ZXIC_UINT32 dump_data[4] = { 0 }; + + SE_APT_CALLBACK_T *pAptCallback = NULL; + DPP_DTB_ERAM_ENTRY_INFO_T dtb_eram_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + + ZXIC_COMM_MEMSET(dump_data, 0x00, sizeof(dump_data)); + + pAptCallback = dpp_apt_get_func(dev, sdt_no); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), pAptCallback); + + rc = pAptCallback->se_func_info.eramFunc.eram_set_func(pData, + dump_data); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "eram_set_func"); + + //dtb配表 + dtb_eram_entry.index = index; + dtb_eram_entry.p_data = dump_data; + rc = dpp_dtb_eram_dma_write(dev, queue_id, sdt_no, 1, &dtb_eram_entry, + &element_id); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_eram_dma_write"); + + return rc; +} + +/***********************************************************/ +/** eram表项数据获取,从软件缓存中获取 +* @param dev_id 设备号 +* @param sdt_no SDT号 0~255 +* @param index 条目index,索引范围随wrt_mode模式不同 +* @param pData 出参,返回业务表项内容 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +DPP_STATUS dpp_apt_dtb_eram_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 index, + void *pData) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 dump_data[4] = { 0 }; + SE_APT_CALLBACK_T *pAptCallback = NULL; + DPP_DTB_ERAM_ENTRY_INFO_T dump_eram_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + + ZXIC_COMM_MEMSET(dump_data, 0x00, sizeof(dump_data)); + + pAptCallback = dpp_apt_get_func(dev, sdt_no); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), pAptCallback); + + dump_eram_entry.index = index; + dump_eram_entry.p_data = dump_data; + rc = dpp_dtb_eram_data_get(dev, queue_id, sdt_no, &dump_eram_entry); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_eram_data_get"); + + rc = pAptCallback->se_func_info.eramFunc.eram_get_func( + pData, dump_eram_entry.p_data); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "eram_get_func"); + + return rc; +} + +/***********************************************************/ +/** eram表项删除,软件维护删除 +* @param dev_id 设备号 +* @param sdt_no SDT号 0~255 +* @param index 条目index,索引范围随wrt_mode模式不同 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +DPP_STATUS dpp_apt_dtb_eram_clear(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 index) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 element_id = 0; + ZXIC_UINT32 dump_data[4] = { 0 }; + + SE_APT_CALLBACK_T *pAptCallback = NULL; + DPP_DTB_ERAM_ENTRY_INFO_T dtb_eram_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + + ZXIC_COMM_MEMSET(dump_data, 0x00, sizeof(dump_data)); + + pAptCallback = dpp_apt_get_func(dev, sdt_no); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), pAptCallback); + + //dtb配表 + dtb_eram_entry.index = index; + dtb_eram_entry.p_data = dump_data; + rc = dpp_dtb_eram_dma_write(dev, queue_id, sdt_no, 1, &dtb_eram_entry, + &element_id); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_eram_dma_write"); + + return rc; +} + +/***********************************************************/ +/** eram表项flush +* @param dev_id 设备号 +* @param sdt_no SDT号 0~255 +* @param index 条目index,索引范围随wrt_mode模式不同 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +DPP_STATUS dpp_apt_dtb_eram_flush(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no) +{ + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + + rc = dpp_dtb_eram_table_flush(dev, queue_id, sdt_no); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_eram_table_flush"); + ZXIC_COMM_TRACE_ERROR("dpp_apt_dtb_eram_flush sdt_no %d done.\n", + sdt_no); + + return rc; +} diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/dpp_apt_se_hash.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/dpp_apt_se_hash.c new file mode 100644 index 000000000000..739a64775260 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/dpp_apt_se_hash.c @@ -0,0 +1,594 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_apt_se_hash.c +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : chenqin00181032 +* 完成日期 : 2023/02/22 +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#include "dpp_apt_se.h" +#include "dpp_dev.h" +#include "dpp_hash.h" +#include "dpp_sdt.h" +#include "dpp_dtb_cfg.h" +#include "dpp_dtb_table.h" +#include "dpp_dtb_table_api.h" + +static DPP_SE_CFG *g_apt_se_cfg[DPP_PCIE_SLOT_MAX] = { NULL }; + +DPP_SE_CFG *dpp_apt_get_se_cfg(DPP_DEV_T *dev) +{ + ZXIC_UINT32 slot = 0; + if (NULL == dev) { + return NULL; + } + slot = DEV_PCIE_SLOT(dev); + if (slot < DPP_PCIE_SLOT_MAX) { + return g_apt_se_cfg[slot]; + } + return NULL; +} +/***********************************************************/ +/** hash表全局资源初始化 +* @param dev_id 设备号 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +DPP_STATUS dpp_apt_hash_global_res_init(DPP_DEV_T *dev) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 slot = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + slot = DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_INDEX(slot, 0, DPP_PCIE_SLOT_MAX - 1); + if (NULL == g_apt_se_cfg[slot]) { + g_apt_se_cfg[slot] = + (DPP_SE_CFG *)ZXIC_COMM_MALLOC(sizeof(DPP_SE_CFG)); + ZXIC_COMM_CHECK_POINT(g_apt_se_cfg[slot]); + rc = dpp_se_init(dev, g_apt_se_cfg[slot]); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_se_init"); + + rc = dpp_se_client_init(g_apt_se_cfg[slot], + ZXIC_COMM_VAL_TO_PTR(DEV_ID(dev))); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_se_client_init"); + + rc = dpp_dev_hash_opr_mutex_create(dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_hash_opr_mutex_create"); + } + + return rc; +} + +/***********************************************************/ +/** hash表全局资源去初始化 +* @param dev_id 设备号 +* @return +* @remark 无 +* @see +* @author cq @date 2024/08/01 +************************************************************/ +DPP_STATUS dpp_apt_hash_global_res_uninit(DPP_DEV_T *dev) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 slot = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + slot = DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_INDEX(slot, 0, DPP_PCIE_SLOT_MAX - 1); + if (g_apt_se_cfg[slot] != NULL) { + ZXIC_COMM_FREE(g_apt_se_cfg[slot]); + g_apt_se_cfg[slot] = NULL; + } + + rc = dpp_dev_hash_opr_mutex_destroy(dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_hash_opr_mutex_destroy"); + return rc; +} + +/***********************************************************/ +/** hash引擎初始化 +* @param dev_id 设备号 +* @param func_num 需初始化的hash引擎个数 1~4 +* @param pHashFuncRes 每个hash引擎分配的zblock个数和编号,以及分配模式(混合模式或者纯片内模式) +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +DPP_STATUS dpp_apt_hash_func_res_init(DPP_DEV_T *dev, ZXIC_UINT32 func_num, + DPP_APT_HASH_FUNC_RES_T *pHashFuncRes) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 zblk_idx[32] = { 0 }; + DPP_APT_HASH_FUNC_RES_T *pHashFuncResTemp = NULL; + DPP_SE_CFG *p_se_cfg = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX_UPPER(func_num, HASH_FUNC_ID_NUM); + ZXIC_COMM_CHECK_POINT(pHashFuncRes); + + p_se_cfg = dpp_apt_get_se_cfg(dev); + ZXIC_COMM_CHECK_POINT(p_se_cfg); + for (index = 0; index < func_num; index++) { + ZXIC_COMM_MEMSET(zblk_idx, 0x0, sizeof(zblk_idx)); + pHashFuncResTemp = pHashFuncRes + index; + rc = dpp_apt_get_zblock_index(pHashFuncResTemp->zblk_bitmap, + zblk_idx); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_apt_get_zblock_index"); + + rc = dpp_hash_init(p_se_cfg, pHashFuncResTemp->func_id, + pHashFuncResTemp->zblk_num, zblk_idx, + pHashFuncResTemp->ddr_dis); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_hash_init"); + } + + return rc; +} + +/***********************************************************/ +/** hash引擎初始化(删除硬件数据) +* @param dev_id 设备号 +* @param func_num 需初始化的hash引擎个数 1~4 +* @param pHashFuncRes 每个hash引擎分配的zblock个数和编号,以及分配模式(混合模式或者纯片内模式) +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +DPP_STATUS +dpp_apt_hash_func_flush_hardware_all(DPP_DEV_T *dev, ZXIC_UINT32 func_num, + DPP_APT_HASH_FUNC_RES_T *pHashFuncRes, + ZXIC_UINT32 queue_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 index = 0; + DPP_APT_HASH_FUNC_RES_T *pHashFuncResTemp = NULL; + DPP_SE_CFG *p_se_cfg = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX_UPPER(func_num, HASH_FUNC_ID_NUM); + p_se_cfg = dpp_apt_get_se_cfg(dev); + ZXIC_COMM_CHECK_POINT(p_se_cfg); + for (index = 0; index < func_num; index++) { + pHashFuncResTemp = pHashFuncRes + index; + + rc = dpp_dtb_zcam_space_clr(dev, p_se_cfg, queue_id, + pHashFuncResTemp->func_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_zcam_space_clr"); + } + + return rc; +} + +/***********************************************************/ +/** hash引擎bulk空间初始化 +* @param dev_id 设备号 +* @param bulk_num 需初始化的bulk表个数 1~32 +* @param pBulkRes zcell和zreg资源占用信息,如果是混合模式,需进行DDR资源分配 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +DPP_STATUS dpp_apt_hash_bulk_res_init(DPP_DEV_T *dev, ZXIC_UINT32 bulk_num, + DPP_APT_HASH_BULK_RES_T *pBulkRes) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 dev_id = 0; + DPP_APT_HASH_BULK_RES_T *pHashBulkResTemp = NULL; + DPP_SE_CFG *p_se_cfg = NULL; + DPP_HASH_DDR_RESC_CFG_T ddr_resc_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX_UPPER(bulk_num, HASH_FUNC_ID_NUM * HASH_BULK_NUM); + ZXIC_COMM_CHECK_POINT(pBulkRes); + p_se_cfg = dpp_apt_get_se_cfg(dev); + ZXIC_COMM_CHECK_POINT(p_se_cfg); + + for (index = 0; index < bulk_num; index++) { + ZXIC_COMM_MEMSET(&ddr_resc_cfg, 0x0, + sizeof(DPP_HASH_DDR_RESC_CFG_T)); + pHashBulkResTemp = pBulkRes + index; + + ddr_resc_cfg.ddr_baddr = pHashBulkResTemp->ddr_baddr; + ddr_resc_cfg.ddr_item_num = pHashBulkResTemp->ddr_item_num; + ddr_resc_cfg.ddr_width_mode = pHashBulkResTemp->ddr_width_mode; + ddr_resc_cfg.ddr_crc_sel = pHashBulkResTemp->ddr_crc_sel; + ddr_resc_cfg.ddr_ecc_en = pHashBulkResTemp->ddr_ecc_en; + + rc = dpp_hash_bulk_init(p_se_cfg, pHashBulkResTemp->func_id, + pHashBulkResTemp->bulk_id, + &ddr_resc_cfg, + pHashBulkResTemp->zcell_num, + pHashBulkResTemp->zreg_num); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_hash_bulk_init"); + } + + return rc; +} + +/***********************************************************/ +/** hash业务表属性初始化 +* @param dev_id 设备号 +* @param tbl_num 需初始化的业务表表个数 1~128 +* @param pHashTbl sdt配置信息,初始化标记和业务结构体码流转换函数 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +DPP_STATUS dpp_apt_hash_tbl_res_init(DPP_DEV_T *dev, ZXIC_UINT32 tbl_num, + DPP_APT_HASH_TABLE_T *pHashTbl) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 index = 0; + DPP_APT_HASH_TABLE_T *pHashTblTemp = NULL; + DPP_SE_CFG *p_se_cfg = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX_UPPER(tbl_num, + HASH_FUNC_ID_NUM * HASH_TBL_ID_NUM); + ZXIC_COMM_CHECK_POINT(pHashTbl); + p_se_cfg = dpp_apt_get_se_cfg(dev); + ZXIC_COMM_CHECK_POINT(p_se_cfg); + + for (index = 0; index < tbl_num; index++) { + pHashTblTemp = pHashTbl + index; + rc = dpp_sdt_tbl_write(dev, pHashTblTemp->sdtNo, + pHashTblTemp->hashSdt.table_type, + &pHashTblTemp->hashSdt, SDT_OPER_ADD); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_sdt_tbl_write"); + + rc = dpp_hash_tbl_id_info_init( + p_se_cfg, pHashTblTemp->hashSdt.hash_id, + pHashTblTemp->hashSdt.hash_table_id, + pHashTblTemp->tbl_flag, + pHashTblTemp->hashSdt.hash_table_width, + pHashTblTemp->hashSdt.key_size); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_hash_tbl_id_info_init_ex"); + + rc = dpp_apt_set_callback(dev, pHashTblTemp->sdtNo, + pHashTblTemp->hashSdt.table_type, + (ZXIC_VOID *)pHashTblTemp); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_apt_set_callback"); + } + + return rc; +} + +/***********************************************************/ +/** dtb hash表项插入/更新 +* @param dev_id 设备号 +* @param sdt_no sdt号 0~255 +* @param pData 插入hash表项信息,由业务确定 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +DPP_STATUS dpp_apt_dtb_hash_insert(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, void *pData) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT8 key_valid = 1; + DPP_HASH_ENTRY entry = { 0 }; + DPP_SDTTBL_HASH_T sdt_hash_info = { 0 }; /*SDT内容*/ + ZXIC_UINT8 aucKey[HASH_KEY_MAX] = { 0 }; + ZXIC_UINT8 aucRst[HASH_RST_MAX] = { 0 }; + SE_APT_CALLBACK_T *pAptCallback = NULL; + DPP_DTB_HASH_ENTRY_INFO_T tDtbHashEntry = { 0 }; + ZXIC_UINT32 element_id = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(queue_id, 0, DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + + //从sdt_no中获取SDT配置 + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_hash_info); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_soft_sdt_tbl_get"); + + pAptCallback = dpp_apt_get_func(dev, sdt_no); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), pAptCallback); + + entry.p_key = aucKey; + entry.p_rst = aucRst; + ZXIC_COMM_MEMSET(entry.p_key, 0x0, sizeof(aucKey)); + ZXIC_COMM_MEMSET(entry.p_rst, 0x0, sizeof(aucRst)); + entry.p_key[0] = DPP_GET_HASH_KEY_CTRL(key_valid, + sdt_hash_info.hash_table_width, + sdt_hash_info.hash_table_id); + rc = pAptCallback->se_func_info.hashFunc.hash_set_func(pData, &entry); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "hash_set_func"); + + tDtbHashEntry.p_actu_key = &entry.p_key[1]; + tDtbHashEntry.p_rst = entry.p_rst; + + rc = dpp_dtb_hash_dma_insert(dev, queue_id, sdt_no, 1, &tDtbHashEntry, + &element_id); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_hash_dma_insert"); + + return rc; +} + +/***********************************************************/ +/** dtb hash表项删除 +* @param dev_id 设备号 +* @param sdt_no sdt号 0~255 +* @param pData 删除hash表项信息,由业务传入 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +DPP_STATUS dpp_apt_dtb_hash_delete(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, void *pData) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT8 key_valid = 1; + DPP_HASH_ENTRY entry = { 0 }; + DPP_SDTTBL_HASH_T sdt_hash_info = { 0 }; /*SDT内容*/ + ZXIC_UINT8 aucKey[HASH_KEY_MAX] = { 0 }; + ZXIC_UINT8 aucRst[HASH_RST_MAX] = { 0 }; + SE_APT_CALLBACK_T *pAptCallback = NULL; + DPP_DTB_HASH_ENTRY_INFO_T tDtbHashEntry = { 0 }; + // ZXIC_UINT32 queue_id = 0; + ZXIC_UINT32 element_id = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(queue_id, 0, DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + + //从sdt_no中获取SDT配置 + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_hash_info); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_soft_sdt_tbl_get"); + + pAptCallback = dpp_apt_get_func(dev, sdt_no); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), pAptCallback); + + entry.p_key = aucKey; + entry.p_rst = aucRst; + ZXIC_COMM_MEMSET(entry.p_key, 0x0, sizeof(aucKey)); + ZXIC_COMM_MEMSET(entry.p_rst, 0x0, sizeof(aucRst)); + entry.p_key[0] = DPP_GET_HASH_KEY_CTRL(key_valid, + sdt_hash_info.hash_table_width, + sdt_hash_info.hash_table_id); + rc = pAptCallback->se_func_info.hashFunc.hash_set_func(pData, &entry); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "hash_set_func"); + + ZXIC_COMM_MEMSET(&tDtbHashEntry, 0x0, + sizeof(DPP_DTB_HASH_ENTRY_INFO_T)); + ZXIC_COMM_MEMSET(entry.p_rst, 0x0, sizeof(aucRst)); + tDtbHashEntry.p_actu_key = &entry.p_key[1]; + tDtbHashEntry.p_rst = entry.p_rst; + rc = dpp_dtb_hash_dma_delete(dev, queue_id, sdt_no, 1, &tDtbHashEntry, + &element_id); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_hash_dma_delete"); + + return rc; +} + +/***********************************************************/ +/** dtb hash表项批量插入/更新 +* @param dev_id 设备号 +* @param queue_id 队列id +* @param sdt_no sdt号 0~255 +* @param entry_num 插入条目数 +* @param entry_size 插入条目结构体大小 +* @param pData 插入hash表项信息,由业务确定 +* @return +* @remark 无 +* @see +* @author cq @date 2024/10/23 +************************************************************/ +DPP_STATUS dpp_apt_dtb_multi_hash_insert(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, + ZXIC_UINT32 entry_num, + ZXIC_UINT32 entry_size, + ZXIC_VOID *pData) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT8 key_valid = 1; + ZXIC_UINT32 entry_index = 0; + DPP_HASH_ENTRY entry = { 0 }; + DPP_SDTTBL_HASH_T sdt_hash_info = { 0 }; /*SDT内容*/ + ZXIC_UINT8 aucKey[HASH_KEY_MAX] = { 0 }; + ZXIC_UINT8 aucRst[HASH_RST_MAX] = { 0 }; + + SE_APT_CALLBACK_T *pAptCallback = NULL; + DPP_DTB_HASH_ENTRY_INFO_T *p_oneHashEntry = NULL; + DPP_DTB_HASH_ENTRY_INFO_T *p_multiHashEntry = NULL; + ZXIC_UINT8 *p_key = NULL; + ZXIC_UINT8 *p_rst = NULL; + ZXIC_UINT8 *p_temp_data = NULL; + ZXIC_UINT32 element_id = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(queue_id, 0, DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + ZXIC_COMM_CHECK_INDEX_LOWER(entry_num, 1); + + //从sdt_no中获取SDT配置 + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_hash_info); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_soft_sdt_tbl_get"); + + pAptCallback = dpp_apt_get_func(dev, sdt_no); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), pAptCallback); + + entry.p_key = aucKey; + entry.p_rst = aucRst; + + p_multiHashEntry = (DPP_DTB_HASH_ENTRY_INFO_T *)ZXIC_COMM_MALLOC( + entry_num * sizeof(DPP_DTB_HASH_ENTRY_INFO_T)); + ZXIC_COMM_CHECK_POINT(p_multiHashEntry); + p_key = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC(entry_num * HASH_KEY_MAX); + ZXIC_COMM_CHECK_POINT_MEMORY_FREE(p_key, p_multiHashEntry); + p_rst = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC(entry_num * HASH_RST_MAX); + ZXIC_COMM_CHECK_POINT_MEMORY_FREE2PTR_NO_ASSERT(p_rst, p_key, + p_multiHashEntry); + ZXIC_COMM_MEMSET_S(p_multiHashEntry, + entry_num * sizeof(DPP_DTB_HASH_ENTRY_INFO_T), 0x0, + entry_num * sizeof(DPP_DTB_HASH_ENTRY_INFO_T)); + ZXIC_COMM_MEMSET_S(p_key, entry_num * HASH_KEY_MAX, 0x0, + entry_num * HASH_KEY_MAX); + ZXIC_COMM_MEMSET_S(p_rst, entry_num * HASH_RST_MAX, 0x0, + entry_num * HASH_RST_MAX); + + for (entry_index = 0; entry_index < entry_num; entry_index++) { + ZXIC_COMM_MEMSET_S(entry.p_key, HASH_KEY_MAX, 0x0, + sizeof(aucKey)); + ZXIC_COMM_MEMSET_S(entry.p_rst, HASH_RST_MAX, 0x0, + sizeof(aucRst)); + entry.p_key[0] = DPP_GET_HASH_KEY_CTRL( + key_valid, sdt_hash_info.hash_table_width, + sdt_hash_info.hash_table_id); + p_temp_data = (ZXIC_UINT8 *)pData + entry_index * entry_size; + rc = pAptCallback->se_func_info.hashFunc.hash_set_func( + (ZXIC_VOID *)p_temp_data, &entry); + ZXIC_COMM_CHECK_DEV_RC_MEMORY_FREE3PTR_NO_ASSERT( + DEV_ID(dev), rc, "hash_set_func", p_rst, p_key, + p_multiHashEntry); + + p_oneHashEntry = p_multiHashEntry + entry_index; + p_oneHashEntry->p_actu_key = p_key + entry_index * HASH_KEY_MAX; + p_oneHashEntry->p_rst = p_rst + entry_index * HASH_RST_MAX; + ZXIC_COMM_MEMCPY_S(p_oneHashEntry->p_actu_key, HASH_KEY_MAX, + &entry.p_key[1], sizeof(aucKey) - 1); + ZXIC_COMM_MEMCPY_S(p_oneHashEntry->p_rst, HASH_RST_MAX, + entry.p_rst, sizeof(aucRst)); + } + + rc = dpp_dtb_hash_dma_insert(dev, queue_id, sdt_no, entry_num, + p_multiHashEntry, &element_id); + ZXIC_COMM_FREE(p_rst); + ZXIC_COMM_FREE(p_key); + ZXIC_COMM_FREE(p_multiHashEntry); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_hash_dma_insert"); + + return rc; +} + +/***********************************************************/ +/** dtb hash表项批量删除 +* @param dev_id 设备号 +* @param queue_id 队列id +* @param sdt_no sdt号 0~255 +* @param entry_num 删除条目数 +* @param entry_size 删除条目结构体大小 +* @param pData 删除hash表项信息,由业务确定 +* @return +* @remark 无 +* @see +* @author cq @date 2024/10/23 +************************************************************/ +DPP_STATUS dpp_apt_dtb_multi_hash_delete(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, + ZXIC_UINT32 entry_num, + ZXIC_UINT32 entry_size, + ZXIC_VOID *pData) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT8 key_valid = 1; + ZXIC_UINT32 entry_index = 0; + DPP_HASH_ENTRY entry = { 0 }; + DPP_SDTTBL_HASH_T sdt_hash_info = { 0 }; /*SDT内容*/ + ZXIC_UINT8 aucKey[HASH_KEY_MAX] = { 0 }; + ZXIC_UINT8 aucRst[HASH_RST_MAX] = { 0 }; + SE_APT_CALLBACK_T *pAptCallback = NULL; + DPP_DTB_HASH_ENTRY_INFO_T *p_oneHashEntry = NULL; + DPP_DTB_HASH_ENTRY_INFO_T *p_multiHashEntry = NULL; + ZXIC_UINT8 *p_key = NULL; + ZXIC_UINT8 *p_rst = NULL; + ZXIC_UINT8 *p_temp_data = NULL; + ZXIC_UINT32 element_id = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(queue_id, 0, DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + ZXIC_COMM_CHECK_INDEX_LOWER(entry_num, 1); + + //从sdt_no中获取SDT配置 + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_hash_info); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_soft_sdt_tbl_get"); + + pAptCallback = dpp_apt_get_func(dev, sdt_no); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), pAptCallback); + + entry.p_key = aucKey; + entry.p_rst = aucRst; + + p_multiHashEntry = (DPP_DTB_HASH_ENTRY_INFO_T *)ZXIC_COMM_MALLOC( + entry_num * sizeof(DPP_DTB_HASH_ENTRY_INFO_T)); + ZXIC_COMM_CHECK_POINT(p_multiHashEntry); + p_key = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC(entry_num * HASH_KEY_MAX); + ZXIC_COMM_CHECK_POINT_MEMORY_FREE(p_key, p_multiHashEntry); + p_rst = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC(entry_num * HASH_RST_MAX); + ZXIC_COMM_CHECK_POINT_MEMORY_FREE2PTR_NO_ASSERT(p_rst, p_key, + p_multiHashEntry); + ZXIC_COMM_MEMSET_S(p_multiHashEntry, + entry_num * sizeof(DPP_DTB_HASH_ENTRY_INFO_T), 0x0, + entry_num * sizeof(DPP_DTB_HASH_ENTRY_INFO_T)); + ZXIC_COMM_MEMSET_S(p_key, entry_num * HASH_KEY_MAX, 0x0, + entry_num * HASH_KEY_MAX); + ZXIC_COMM_MEMSET_S(p_rst, entry_num * HASH_RST_MAX, 0x0, + entry_num * HASH_RST_MAX); + + for (entry_index = 0; entry_index < entry_num; entry_index++) { + ZXIC_COMM_MEMSET_S(entry.p_key, HASH_KEY_MAX, 0x0, + sizeof(aucKey)); + ZXIC_COMM_MEMSET_S(entry.p_rst, HASH_RST_MAX, 0x0, + sizeof(aucRst)); + entry.p_key[0] = DPP_GET_HASH_KEY_CTRL( + key_valid, sdt_hash_info.hash_table_width, + sdt_hash_info.hash_table_id); + p_temp_data = (ZXIC_UINT8 *)pData + entry_index * entry_size; + rc = pAptCallback->se_func_info.hashFunc.hash_set_func( + (ZXIC_VOID *)p_temp_data, &entry); + ZXIC_COMM_CHECK_DEV_RC_MEMORY_FREE3PTR_NO_ASSERT( + DEV_ID(dev), rc, "hash_set_func", p_rst, p_key, + p_multiHashEntry); + + p_oneHashEntry = p_multiHashEntry + entry_index; + p_oneHashEntry->p_actu_key = p_key + entry_index * HASH_KEY_MAX; + p_oneHashEntry->p_rst = p_rst + entry_index * HASH_RST_MAX; + ZXIC_COMM_MEMCPY_S(p_oneHashEntry->p_actu_key, HASH_KEY_MAX, + &entry.p_key[1], sizeof(aucKey) - 1); + } + + rc = dpp_dtb_hash_dma_delete(dev, queue_id, sdt_no, entry_num, + p_multiHashEntry, &element_id); + ZXIC_COMM_FREE(p_rst); + ZXIC_COMM_FREE(p_key); + ZXIC_COMM_FREE(p_multiHashEntry); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_hash_dma_delete"); + + return rc; +} diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/dpp_apt_se_res.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/dpp_apt_se_res.c new file mode 100644 index 000000000000..abc09198648e --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/se_apt/dpp_apt_se_res.c @@ -0,0 +1,1323 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_apt_se_res.c +* 文件标识 : +* 内容摘要 : 所有表资源获取接口文件 +* 其它说明 : +* 当前版本 : +* 作 者 : cq +* 完成日期 : 2024/11/14 +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#include "dpp_apt_se_api.h" +#include "dpp_apt_se.h" +#include "dpp_stat_api.h" +#include "dpp_agent_channel.h" +#include "dpp_sdt.h" +#include "dpp_dtb_table_api.h" +#include "dpp_drv_eram.h" +#include "dpp_drv_sdt.h" +#include "dpp_dtb_table.h" +#include "dpp_dtb.h" +#include "dpp_kernel_init.h" + +ZXIC_UINT32 dpp_get_se_buff_size(ZXIC_UINT32 opr) +{ + ZXIC_UINT32 buff_size = 0; + + switch (opr) { + case HASH_FUNC_BULK_REQ: { + buff_size = ZXIC_SIZEOF(SE_HASH_FUNC_BULK_T); + break; + } + case HASH_TBL_REQ: { + buff_size = ZXIC_SIZEOF(SE_HASH_TBL_T); + break; + } + case ERAM_TBL_REQ: { + buff_size = ZXIC_SIZEOF(SE_ERAM_TBL_T); + break; + } + case ACL_TBL_REQ: { + buff_size = ZXIC_SIZEOF(SE_ACL_TBL_T); + break; + } + case LPM_TBL_REQ: { + buff_size = ZXIC_SIZEOF(SE_LPM_TBL_T); + break; + } + case DDR_TBL_REQ: { + buff_size = ZXIC_SIZEOF(SE_DDR_TBL_T); + break; + } + case STAT_CFG_REQ: { + buff_size = ZXIC_SIZEOF(SE_STAT_CFG_T); + break; + } + default: + break; + } + + return buff_size; +} + +static DPP_STATUS dpp_hash_func_bulk_set(DPP_APT_HASH_RES_INIT_T *pHashResInit, + SE_HASH_FUNC_BULK_T *p_func_bulk) +{ + ZXIC_UINT32 index = 0; + DPP_APT_HASH_FUNC_RES_T *p_func_res = NULL; + DPP_APT_HASH_BULK_RES_T *p_bulk_res = NULL; + + ZXIC_COMM_CHECK_POINT(pHashResInit); + ZXIC_COMM_CHECK_POINT(p_func_bulk); + ZXIC_COMM_CHECK_POINT(pHashResInit->func_res); + ZXIC_COMM_CHECK_POINT(pHashResInit->bulk_res); + ZXIC_COMM_CHECK_INDEX_UPPER(p_func_bulk->func_num, HASH_FUNC_MAX_NUM); + ZXIC_COMM_CHECK_INDEX_UPPER(p_func_bulk->bulk_num, HASH_BULK_MAX_NUM); + + pHashResInit->func_num = p_func_bulk->func_num; + pHashResInit->bulk_num = p_func_bulk->bulk_num; + for (index = 0; index < (pHashResInit->func_num); index++) { + p_func_res = pHashResInit->func_res + index; + + p_func_res->func_id = p_func_bulk->fun[index].func_id; + p_func_res->ddr_dis = p_func_bulk->fun[index].ddr_dis; + p_func_res->zblk_num = p_func_bulk->fun[index].zblk_num; + p_func_res->zblk_bitmap = p_func_bulk->fun[index].zblk_bitmap; + } + + for (index = 0; index < (pHashResInit->bulk_num); index++) { + p_bulk_res = pHashResInit->bulk_res + index; + + p_bulk_res->func_id = p_func_bulk->bulk[index].func_id; + p_bulk_res->bulk_id = p_func_bulk->bulk[index].bulk_id; + p_bulk_res->zcell_num = p_func_bulk->bulk[index].zcell_num; + p_bulk_res->zreg_num = p_func_bulk->bulk[index].zreg_num; + p_bulk_res->ddr_baddr = p_func_bulk->bulk[index].ddr_baddr; + p_bulk_res->ddr_item_num = + p_func_bulk->bulk[index].ddr_item_num; + p_bulk_res->ddr_width_mode = + p_func_bulk->bulk[index].ddr_width_mode; + p_bulk_res->ddr_crc_sel = p_func_bulk->bulk[index].ddr_crc_sel; + p_bulk_res->ddr_ecc_en = p_func_bulk->bulk[index].ddr_ecc_en; + } + + return DPP_OK; +} + +static DPP_STATUS dpp_hash_tbl_set(DPP_APT_HASH_RES_INIT_T *pHashResInit, + SE_HASH_TBL_T *p_hash_tbl) +{ + ZXIC_UINT32 index = 0; + DPP_APT_HASH_TABLE_T *p_tbl_res = NULL; + + ZXIC_COMM_CHECK_POINT(pHashResInit); + ZXIC_COMM_CHECK_POINT(p_hash_tbl); + ZXIC_COMM_CHECK_INDEX_UPPER(p_hash_tbl->tbl_num, HASH_TABLE_MAX_NUM); + + pHashResInit->tbl_num = p_hash_tbl->tbl_num; + for (index = 0; index < (pHashResInit->tbl_num); index++) { + p_tbl_res = pHashResInit->tbl_res + index; + + p_tbl_res->sdtNo = p_hash_tbl->table[index].sdtNo; + p_tbl_res->sdt_partner = p_hash_tbl->table[index].sdt_partner; + p_tbl_res->tbl_flag = p_hash_tbl->table[index].tbl_flag; + p_tbl_res->hashSdt.table_type = + p_hash_tbl->table[index].hashSdt.table_type; + p_tbl_res->hashSdt.hash_id = + p_hash_tbl->table[index].hashSdt.hash_id; + p_tbl_res->hashSdt.hash_table_width = + p_hash_tbl->table[index].hashSdt.hash_table_width; + p_tbl_res->hashSdt.key_size = + p_hash_tbl->table[index].hashSdt.key_size; + p_tbl_res->hashSdt.hash_table_id = + p_hash_tbl->table[index].hashSdt.hash_table_id; + p_tbl_res->hashSdt.learn_en = + p_hash_tbl->table[index].hashSdt.learn_en; + p_tbl_res->hashSdt.keep_alive = + p_hash_tbl->table[index].hashSdt.keep_alive; + p_tbl_res->hashSdt.keep_alive_baddr = + p_hash_tbl->table[index].hashSdt.keep_alive_baddr; + p_tbl_res->hashSdt.rsp_mode = + p_hash_tbl->table[index].hashSdt.rsp_mode; + p_tbl_res->hashSdt.hash_clutch_en = + p_hash_tbl->table[index].hashSdt.hash_clutch_en; + } + + return DPP_OK; +} + +static DPP_STATUS dpp_eram_tbl_set(DPP_APT_ERAM_RES_INIT_T *pEramResInit, + SE_ERAM_TBL_T *p_eram_tbl) +{ + ZXIC_UINT32 index = 0; + DPP_APT_ERAM_TABLE_T *p_eram_res = NULL; + + ZXIC_COMM_CHECK_POINT(pEramResInit); + ZXIC_COMM_CHECK_POINT(p_eram_tbl); + ZXIC_COMM_CHECK_POINT(pEramResInit->eram_res); + ZXIC_COMM_CHECK_INDEX_UPPER(p_eram_tbl->tbl_num, ERAM_MAX_NUM); + + pEramResInit->tbl_num = p_eram_tbl->tbl_num; + for (index = 0; index < (pEramResInit->tbl_num); index++) { + p_eram_res = pEramResInit->eram_res + index; + + p_eram_res->sdtNo = p_eram_tbl->eram[index].sdtNo; + p_eram_res->opr_mode = p_eram_tbl->eram[index].opr_mode; + p_eram_res->rd_mode = p_eram_tbl->eram[index].rd_mode; + p_eram_res->eRamSdt.table_type = + p_eram_tbl->eram[index].eRamSdt.table_type; + p_eram_res->eRamSdt.eram_mode = + p_eram_tbl->eram[index].eRamSdt.eram_mode; + p_eram_res->eRamSdt.eram_base_addr = + p_eram_tbl->eram[index].eRamSdt.eram_base_addr; + p_eram_res->eRamSdt.eram_table_depth = + p_eram_tbl->eram[index].eRamSdt.eram_table_depth; + p_eram_res->eRamSdt.eram_clutch_en = + p_eram_tbl->eram[index].eRamSdt.eram_clutch_en; + } + + return DPP_OK; +} + +static DPP_STATUS dpp_acl_tbl_set(DPP_APT_ACL_RES_INIT_T *pAclResInit, + SE_ACL_TBL_T *p_acl_tbl) +{ + ZXIC_UINT32 index = 0; + DPP_APT_ACL_TABLE_T *p_acl_res = NULL; + + ZXIC_COMM_CHECK_POINT(pAclResInit); + ZXIC_COMM_CHECK_POINT(p_acl_tbl); + ZXIC_COMM_CHECK_POINT(pAclResInit->acl_res); + ZXIC_COMM_CHECK_INDEX_UPPER(p_acl_tbl->tbl_num, ETCAM_MAX_NUM); + + pAclResInit->tbl_num = p_acl_tbl->tbl_num; + for (index = 0; index < (p_acl_tbl->tbl_num); index++) { + p_acl_res = pAclResInit->acl_res + index; + + p_acl_res->sdtNo = p_acl_tbl->acl[index].sdtNo; + p_acl_res->sdt_partner = p_acl_tbl->acl[index].sdt_partner; + p_acl_res->aclRes.block_num = + p_acl_tbl->acl[index].aclRes.block_num; + p_acl_res->aclRes.entry_num = + p_acl_tbl->acl[index].aclRes.entry_num; + p_acl_res->aclRes.pri_mode = + p_acl_tbl->acl[index].aclRes.pri_mode; + ZXIC_COMM_MEMCPY_S(p_acl_res->aclRes.block_index, + sizeof(ZXIC_UINT32) * DPP_ETCAM_BLOCK_NUM, + p_acl_tbl->acl[index].aclRes.block_index, + sizeof(ZXIC_UINT32) * ETCAM_BLOCK_NUM); + p_acl_res->aclSdt.table_type = + p_acl_tbl->acl[index].aclSdt.table_type; + p_acl_res->aclSdt.etcam_id = + p_acl_tbl->acl[index].aclSdt.etcam_id; + p_acl_res->aclSdt.etcam_key_mode = + p_acl_tbl->acl[index].aclSdt.etcam_key_mode; + p_acl_res->aclSdt.etcam_table_id = + p_acl_tbl->acl[index].aclSdt.etcam_table_id; + p_acl_res->aclSdt.no_as_rsp_mode = + p_acl_tbl->acl[index].aclSdt.no_as_rsp_mode; + p_acl_res->aclSdt.as_en = p_acl_tbl->acl[index].aclSdt.as_en; + p_acl_res->aclSdt.as_eram_baddr = + p_acl_tbl->acl[index].aclSdt.as_eram_baddr; + p_acl_res->aclSdt.as_rsp_mode = + p_acl_tbl->acl[index].aclSdt.as_rsp_mode; + p_acl_res->aclSdt.etcam_table_depth = + p_acl_tbl->acl[index].aclSdt.etcam_table_depth; + p_acl_res->aclSdt.etcam_clutch_en = + p_acl_tbl->acl[index].aclSdt.etcam_clutch_en; + } + + return DPP_OK; +} +#if 0 +static DPP_STATUS dpp_lpm_tbl_set(DPP_APT_LPM_RES_INIT_T *pLpmResInit, SE_LPM_TBL_T *p_lpm_tbl) +{ + ZXIC_UINT32 index = 0; + DPP_APT_LPM_TABLE_T *p_lpm_res = NULL; + + ZXIC_COMM_CHECK_POINT(pLpmResInit); + ZXIC_COMM_CHECK_POINT(p_lpm_tbl); + ZXIC_COMM_CHECK_POINT(pLpmResInit->glb_res); + ZXIC_COMM_CHECK_POINT(pLpmResInit->lpm_res); + ZXIC_COMM_CHECK_INDEX_UPPER(p_lpm_tbl->tbl_num, LPM_MAX_NUM); + + pLpmResInit->glb_res->lpm_flags = p_lpm_tbl->glb_res.lpm_flags; + pLpmResInit->glb_res->zblk_num = p_lpm_tbl->glb_res.zblk_num; + pLpmResInit->glb_res->zblk_bitmap = p_lpm_tbl->glb_res.zblk_bitmap; + pLpmResInit->glb_res->mono_ipv4_zblk_num = p_lpm_tbl->glb_res.mono_ipv4_zblk_num; + pLpmResInit->glb_res->mono_ipv4_zblk_bitmap = p_lpm_tbl->glb_res.mono_ipv4_zblk_bitmap; + pLpmResInit->glb_res->mono_ipv6_zblk_num = p_lpm_tbl->glb_res.mono_ipv6_zblk_num; + pLpmResInit->glb_res->mono_ipv6_zblk_bitmap = p_lpm_tbl->glb_res.mono_ipv6_zblk_bitmap; + pLpmResInit->glb_res->ddr4_item_num = p_lpm_tbl->glb_res.ddr4_item_num; + pLpmResInit->glb_res->ddr4_baddr = p_lpm_tbl->glb_res.ddr4_baddr; + pLpmResInit->glb_res->ddr4_base_offset = p_lpm_tbl->glb_res.ddr4_base_offset; + pLpmResInit->glb_res->ddr4_ecc_en = p_lpm_tbl->glb_res.ddr4_ecc_en; + pLpmResInit->glb_res->ddr6_item_num = p_lpm_tbl->glb_res.ddr6_item_num; + pLpmResInit->glb_res->ddr6_baddr = p_lpm_tbl->glb_res.ddr6_baddr; + pLpmResInit->glb_res->ddr6_base_offset = p_lpm_tbl->glb_res.ddr6_base_offset; + pLpmResInit->glb_res->ddr6_ecc_en = p_lpm_tbl->glb_res.ddr6_ecc_en; + pLpmResInit->tbl_num = p_lpm_tbl->tbl_num; + + for (index = 0; index < (p_lpm_tbl->tbl_num); index++) { + p_lpm_res = pLpmResInit->lpm_res + index; + + p_lpm_res->sdtNo = p_lpm_tbl->lpm_res[index].sdtNo; + p_lpm_res->lpmSdt.table_type = p_lpm_tbl->lpm_res[index].lpmSdt.table_type; + p_lpm_res->lpmSdt.lpm_v46_id = p_lpm_tbl->lpm_res[index].lpmSdt.lpm_v46_id; + p_lpm_res->lpmSdt.rsp_mode = p_lpm_tbl->lpm_res[index].lpmSdt.rsp_mode; + p_lpm_res->lpmSdt.lpm_table_depth = p_lpm_tbl->lpm_res[index].lpmSdt.lpm_table_depth; + p_lpm_res->lpmSdt.lpm_clutch_en = p_lpm_tbl->lpm_res[index].lpmSdt.lpm_clutch_en; + ZXIC_COMM_MEMCPY_S(p_lpm_res->as_eram_cfg, sizeof(DPP_ROUTE_AS_ERAM_T)*DPP_SMMU0_LPM_AS_TBL_ID_NUM, + p_lpm_tbl->lpm_res[index].as_eram_cfg, sizeof(ROUTE_AS_ERAM_T)*SMMU0_LPM_AS_TBL_ID_NUM); + + p_lpm_res->as_ddr_cfg.baddr = p_lpm_tbl->lpm_res[index].as_ddr_cfg.baddr; + p_lpm_res->as_ddr_cfg.rsp_len = p_lpm_tbl->lpm_res[index].as_ddr_cfg.rsp_len; + p_lpm_res->as_ddr_cfg.ecc_en = p_lpm_tbl->lpm_res[index].as_ddr_cfg.ecc_en; + } + + return DPP_OK; +} + +static DPP_STATUS dpp_ddr_tbl_set(DPP_APT_DDR_RES_INIT_T *pDdrResInit, SE_DDR_TBL_T *p_ddr_tbl) +{ + ZXIC_UINT32 index = 0; + DPP_APT_DDR_TABLE_T *p_ddr_res = NULL; + + ZXIC_COMM_CHECK_POINT(pDdrResInit); + ZXIC_COMM_CHECK_POINT(p_ddr_tbl); + ZXIC_COMM_CHECK_INDEX_UPPER(p_ddr_tbl->tbl_num, DDR_MAX_NUM); + + pDdrResInit->tbl_num = p_ddr_tbl->tbl_num; + for (index = 0; index < (p_ddr_tbl->tbl_num); index++) { + p_ddr_res = pDdrResInit->ddr_res + index; + + p_ddr_res->sdtNo = p_ddr_tbl->ddr[index].sdtNo; + p_ddr_res->ddr_table_depth = p_ddr_tbl->ddr[index].ddr_table_depth; + p_ddr_res->eDdrSdt.table_type = p_ddr_tbl->ddr[index].eDdrSdt.table_type; + p_ddr_res->eDdrSdt.ddr3_base_addr = p_ddr_tbl->ddr[index].eDdrSdt.ddr3_base_addr; + p_ddr_res->eDdrSdt.ddr3_share_type = p_ddr_tbl->ddr[index].eDdrSdt.ddr3_share_type; + p_ddr_res->eDdrSdt.ddr3_rw_len = p_ddr_tbl->ddr[index].eDdrSdt.ddr3_rw_len; + p_ddr_res->eDdrSdt.ddr3_sdt_num = p_ddr_tbl->ddr[index].eDdrSdt.ddr3_sdt_num; + p_ddr_res->eDdrSdt.ddr3_ecc_en = p_ddr_tbl->ddr[index].eDdrSdt.ddr3_ecc_en; + p_ddr_res->eDdrSdt.ddr3_clutch_en = p_ddr_tbl->ddr[index].eDdrSdt.ddr3_clutch_en; + } + + return DPP_OK; +} +#endif +static DPP_STATUS dpp_stat_cfg_set(DPP_APT_STAT_RES_INIT_T *pStatResInit, + SE_STAT_CFG_T *p_stat_cfg) +{ + ZXIC_COMM_CHECK_POINT(pStatResInit); + ZXIC_COMM_CHECK_POINT(p_stat_cfg); + + pStatResInit->eram_baddr = p_stat_cfg->eram_baddr; + pStatResInit->eram_depth = p_stat_cfg->eram_depth; + pStatResInit->ddr_baddr = p_stat_cfg->ddr_baddr; + pStatResInit->ppu_ddr_offset = p_stat_cfg->ppu_ddr_offset; + + return DPP_OK; +} + +/***********************************************************/ +/** stat资源初始化 +* @param dev 设备 +* @param stat_res_init stat资源 +* @return +* @remark 无 +* @see +* @author chenqin00181032 @date 2023/02/22 +************************************************************/ +static DPP_STATUS dpp_apt_stat_res_init(DPP_DEV_T *dev, + DPP_APT_STAT_RES_INIT_T *stat_res_init) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, stat_res_init); + + // init stat + rc = dpp_stat_ppu_eram_baddr_set(dev, stat_res_init->eram_baddr); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_ppu_eram_baddr_set"); + + rc = dpp_stat_ppu_eram_depth_set( + dev, stat_res_init->eram_depth); /* unit: 128bits */ + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_ppu_eram_depth_set"); + + return rc; +} + +/***********************************************************/ +/** 消息通道获取hash表func/bulk信息 +* @param dev NP设备 +* @param type 资源类型0:标卡 1:非标卡 +* @param pHashResInit 获取到的func/bulk信息 +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/11 +************************************************************/ +ZXIC_UINT32 dpp_agent_hash_func_bulk_get(DPP_DEV_T *dev, ZXIC_UINT32 type, + DPP_APT_HASH_RES_INIT_T *pHashResInit) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 opr = HASH_FUNC_BULK_REQ; + ZXIC_UINT32 sub_type = RES_STD_NIC_MSG; + ZXIC_UINT32 buff_size = 0; + ZXIC_MUTEX_T *p_dtb_mutex = NULL; + ZXIC_UINT32 *p_rsp_buff = NULL; + SE_HASH_FUNC_BULK_T *p_func_bulk = NULL; + DPP_DEV_MUTEX_TYPE_E mutex = 0; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, pHashResInit); + + //使用代理通道要加锁 + mutex = DPP_DEV_MUTEX_T_DTB; + rc = dpp_dev_opr_mutex_get(dev, (ZXIC_UINT32)mutex, &p_dtb_mutex); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dev_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_dtb_mutex); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "zxic_comm_mutex_lock"); + + buff_size = dpp_get_se_buff_size(opr) + sizeof(ZXIC_UINT32); + p_rsp_buff = (ZXIC_UINT32 *)ZXIC_COMM_MALLOC(buff_size); + ZXIC_COMM_CHECK_DEV_POINT_UNLOCK(dev_id, p_rsp_buff, p_dtb_mutex); + ZXIC_COMM_MEMSET_S(p_rsp_buff, buff_size, 0x0, buff_size); + + sub_type = (type == SE_STD_NIC_RES_TYPE) ? RES_STD_NIC_MSG : + RES_OFFLOAD_MSG; + + rc = dpp_agent_channel_se_res_get(dev, sub_type, opr, p_rsp_buff, + buff_size); + if (rc != DPP_OK) { + ZXIC_COMM_FREE(p_rsp_buff); + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, "hash func&bulk res get fail rc=0x%x. \n", rc); + rc = zxic_comm_mutex_unlock(p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, + "zxic_comm_mutex_unlock"); + return DPP_ERR; + } + + rc = zxic_comm_mutex_unlock(p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_MEMORY_FREE_NO_ASSERT( + dev_id, rc, "zxic_comm_mutex_unlock", p_rsp_buff); + + p_func_bulk = (SE_HASH_FUNC_BULK_T *)(p_rsp_buff + 1); + rc = dpp_hash_func_bulk_set(pHashResInit, p_func_bulk); + ZXIC_COMM_FREE(p_rsp_buff); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_hash_func_bulk_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** 消息通道获取hash表table信息 +* @param devId NP设备号 +* @param type 资源类型0:标卡 1:非标卡 +* @param pHashResInit 获取到的table信息 +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/11 +************************************************************/ +ZXIC_UINT32 dpp_agent_hash_tbl_get(DPP_DEV_T *dev, ZXIC_UINT32 type, + DPP_APT_HASH_RES_INIT_T *pHashResInit) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 opr = HASH_TBL_REQ; + ZXIC_UINT32 sub_type = RES_STD_NIC_MSG; + ZXIC_UINT32 buff_size = 0; + ZXIC_MUTEX_T *p_dtb_mutex = NULL; + ZXIC_UINT32 *p_rsp_buff = NULL; + SE_HASH_TBL_T *p_hash_tbl = NULL; + DPP_DEV_MUTEX_TYPE_E mutex = 0; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, pHashResInit); + + mutex = DPP_DEV_MUTEX_T_DTB; + rc = dpp_dev_opr_mutex_get(dev, (ZXIC_UINT32)mutex, &p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_dev_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "zxic_comm_mutex_lock"); + + buff_size = dpp_get_se_buff_size(opr) + sizeof(ZXIC_UINT32); + p_rsp_buff = (ZXIC_UINT32 *)ZXIC_COMM_MALLOC(buff_size); + ZXIC_COMM_CHECK_DEV_POINT_UNLOCK(dev_id, p_rsp_buff, p_dtb_mutex); + + sub_type = (type == SE_STD_NIC_RES_TYPE) ? RES_STD_NIC_MSG : + RES_OFFLOAD_MSG; + rc = dpp_agent_channel_se_res_get(dev, sub_type, opr, p_rsp_buff, + buff_size); + if (rc != DPP_OK) { + ZXIC_COMM_FREE(p_rsp_buff); + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, "hash table res get fail rc=0x%x. \n", rc); + rc = zxic_comm_mutex_unlock(p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, + "zxic_comm_mutex_unlock"); + return DPP_ERR; + } + + rc = zxic_comm_mutex_unlock(p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_MEMORY_FREE_NO_ASSERT( + dev_id, rc, "zxic_comm_mutex_unlock", p_rsp_buff); + + p_hash_tbl = (SE_HASH_TBL_T *)(p_rsp_buff + 1); + rc = dpp_hash_tbl_set(pHashResInit, p_hash_tbl); + ZXIC_COMM_FREE(p_rsp_buff); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_hash_tbl_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** 消息通道获取eram表table信息 +* @param devId NP设备号 +* @param type 资源类型0:标卡 1:非标卡 +* @param pEramResInit 获取到的eram信息 +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/11 +************************************************************/ +static ZXIC_UINT32 dpp_agent_eram_tbl_get(DPP_DEV_T *dev, ZXIC_UINT32 type, + DPP_APT_ERAM_RES_INIT_T *pEramResInit) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 opr = ERAM_TBL_REQ; + ZXIC_UINT32 sub_type = RES_STD_NIC_MSG; + ZXIC_UINT32 buff_size = 0; + ZXIC_MUTEX_T *p_dtb_mutex = NULL; + ZXIC_UINT32 *p_rsp_buff = NULL; + SE_ERAM_TBL_T *p_eram_tbl = NULL; + DPP_DEV_MUTEX_TYPE_E mutex = 0; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, pEramResInit); + + mutex = DPP_DEV_MUTEX_T_DTB; + rc = dpp_dev_opr_mutex_get(dev, (ZXIC_UINT32)mutex, &p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_dev_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "zxic_comm_mutex_lock"); + + buff_size = dpp_get_se_buff_size(opr) + sizeof(ZXIC_UINT32); + p_rsp_buff = (ZXIC_UINT32 *)ZXIC_COMM_MALLOC(buff_size); + ZXIC_COMM_CHECK_DEV_POINT_UNLOCK(dev_id, p_rsp_buff, p_dtb_mutex); + + sub_type = (type == SE_STD_NIC_RES_TYPE) ? RES_STD_NIC_MSG : + RES_OFFLOAD_MSG; + rc = dpp_agent_channel_se_res_get(dev, sub_type, opr, p_rsp_buff, + buff_size); + if (rc != DPP_OK) { + ZXIC_COMM_FREE(p_rsp_buff); + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, "eram table res get fail rc=0x%x. \n", rc); + rc = zxic_comm_mutex_unlock(p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, + "zxic_comm_mutex_unlock"); + return DPP_ERR; + } + + rc = zxic_comm_mutex_unlock(p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_MEMORY_FREE_NO_ASSERT( + dev_id, rc, "zxic_comm_mutex_unlock", p_rsp_buff); + + p_eram_tbl = (SE_ERAM_TBL_T *)(p_rsp_buff + 1); + rc = dpp_eram_tbl_set(pEramResInit, p_eram_tbl); + ZXIC_COMM_FREE(p_rsp_buff); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_eram_tbl_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** 消息通道获取acl表table信息 +* @param devId NP设备号 +* @param type 资源类型0:标卡 1:非标卡 +* @param pAclResInit 获取到的acl信息 +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/11 +************************************************************/ +static ZXIC_UINT32 dpp_agent_acl_tbl_get(DPP_DEV_T *dev, ZXIC_UINT32 type, + DPP_APT_ACL_RES_INIT_T *pAclResInit) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 opr = ACL_TBL_REQ; + ZXIC_UINT32 sub_type = RES_STD_NIC_MSG; + ZXIC_UINT32 buff_size = 0; + ZXIC_MUTEX_T *p_dtb_mutex = NULL; + ZXIC_UINT32 *p_rsp_buff = NULL; + SE_ACL_TBL_T *p_acl_tbl = NULL; + DPP_DEV_MUTEX_TYPE_E mutex = 0; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, pAclResInit); + + mutex = DPP_DEV_MUTEX_T_DTB; + rc = dpp_dev_opr_mutex_get(dev, (ZXIC_UINT32)mutex, &p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_dev_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "zxic_comm_mutex_lock"); + + buff_size = dpp_get_se_buff_size(opr) + sizeof(ZXIC_UINT32); + p_rsp_buff = (ZXIC_UINT32 *)ZXIC_COMM_MALLOC(buff_size); + ZXIC_COMM_CHECK_DEV_POINT_UNLOCK(dev_id, p_rsp_buff, p_dtb_mutex); + + sub_type = (type == SE_STD_NIC_RES_TYPE) ? RES_STD_NIC_MSG : + RES_OFFLOAD_MSG; + rc = dpp_agent_channel_se_res_get(dev, sub_type, opr, p_rsp_buff, + buff_size); + if (rc != DPP_OK) { + ZXIC_COMM_FREE(p_rsp_buff); + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, "acl table res get fail rc=0x%x. \n", rc); + rc = zxic_comm_mutex_unlock(p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, + "zxic_comm_mutex_unlock"); + return DPP_ERR; + } + + rc = zxic_comm_mutex_unlock(p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_MEMORY_FREE_NO_ASSERT( + dev_id, rc, "zxic_comm_mutex_unlock", p_rsp_buff); + + p_acl_tbl = (SE_ACL_TBL_T *)(p_rsp_buff + 1); + rc = dpp_acl_tbl_set(pAclResInit, p_acl_tbl); + ZXIC_COMM_FREE(p_rsp_buff); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_acl_tbl_set"); + + return DPP_OK; +} + +#if 0 +/***********************************************************/ +/** 消息通道获取lpm表table信息 +* @param devId NP设备号 +* @param type 资源类型0:标卡 1:非标卡 +* @param pLpmResInit 获取到的lpm信息 +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/11 +************************************************************/ +static ZXIC_UINT32 dpp_agent_lpm_tbl_get(DPP_DEV_T *dev, ZXIC_UINT32 type, DPP_APT_LPM_RES_INIT_T *pLpmResInit) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 opr = LPM_TBL_REQ; + ZXIC_UINT32 sub_type = RES_STD_NIC_MSG; + ZXIC_UINT32 buff_size = 0; + ZXIC_MUTEX_T *p_dtb_mutex = NULL; + ZXIC_UINT32 *p_rsp_buff = NULL; + SE_LPM_TBL_T *p_lpm_tbl = NULL; + DPP_DEV_MUTEX_TYPE_E mutex = 0; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, pLpmResInit); + + mutex = DPP_DEV_MUTEX_T_DTB; + rc = dpp_dev_opr_mutex_get(dev, (ZXIC_UINT32)mutex, &p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_dev_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "zxic_comm_mutex_lock"); + + buff_size = dpp_get_se_buff_size(opr)+sizeof(ZXIC_UINT32); + p_rsp_buff = (ZXIC_UINT32 *)ZXIC_COMM_MALLOC(buff_size); + ZXIC_COMM_CHECK_DEV_POINT_UNLOCK(dev_id, p_rsp_buff, p_dtb_mutex); + + sub_type = (type == SE_STD_NIC_RES_TYPE) ? RES_STD_NIC_MSG:RES_OFFLOAD_MSG; + rc = dpp_agent_channel_se_res_get(dev, sub_type, opr, p_rsp_buff, buff_size); + if (rc != DPP_OK) { + ZXIC_COMM_FREE(p_rsp_buff); + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "lpm table res get fail rc=0x%x. \n", rc); + rc = zxic_comm_mutex_unlock(p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "zxic_comm_mutex_unlock"); + return DPP_ERR; + } + + rc = zxic_comm_mutex_unlock(p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_MEMORY_FREE_NO_ASSERT(dev_id, rc, "zxic_comm_mutex_unlock", p_rsp_buff); + + p_lpm_tbl = (SE_LPM_TBL_T *)(p_rsp_buff + 1); + rc = dpp_lpm_tbl_set(pLpmResInit, p_lpm_tbl); + ZXIC_COMM_FREE(p_rsp_buff); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_lpm_tbl_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** 消息通道获取ddr表table信息 +* @param devId NP设备号 +* @param type 资源类型0:标卡 1:非标卡 +* @param pDdrResInit 获取到的ddr信息 +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/11 +************************************************************/ +static ZXIC_UINT32 dpp_agent_ddr_tbl_get(DPP_DEV_T *dev, ZXIC_UINT32 type, DPP_APT_DDR_RES_INIT_T *pDdrResInit) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 opr = DDR_TBL_REQ; + ZXIC_UINT32 sub_type = RES_STD_NIC_MSG; + ZXIC_UINT32 buff_size = 0; + ZXIC_MUTEX_T *p_dtb_mutex = NULL; + ZXIC_UINT32 *p_rsp_buff = NULL; + SE_DDR_TBL_T *p_ddr_tbl = NULL; + DPP_DEV_MUTEX_TYPE_E mutex = 0; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, pDdrResInit); + + mutex = DPP_DEV_MUTEX_T_DTB; + rc = dpp_dev_opr_mutex_get(dev, (ZXIC_UINT32)mutex, &p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_dev_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "zxic_comm_mutex_lock"); + + buff_size = dpp_get_se_buff_size(opr)+sizeof(ZXIC_UINT32); + p_rsp_buff = (ZXIC_UINT32 *)ZXIC_COMM_MALLOC(buff_size); + ZXIC_COMM_CHECK_DEV_POINT_UNLOCK(dev_id, p_rsp_buff, p_dtb_mutex); + + sub_type = (type == SE_STD_NIC_RES_TYPE) ? RES_STD_NIC_MSG:RES_OFFLOAD_MSG; + rc = dpp_agent_channel_se_res_get(dev, sub_type, opr, p_rsp_buff, buff_size); + if (rc != DPP_OK) { + ZXIC_COMM_FREE(p_rsp_buff); + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "ddr table res get fail rc=0x%x. \n", rc); + rc = zxic_comm_mutex_unlock(p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "zxic_comm_mutex_unlock"); + return DPP_ERR; + } + + rc = zxic_comm_mutex_unlock(p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_MEMORY_FREE_NO_ASSERT(dev_id, rc, "zxic_comm_mutex_unlock", p_rsp_buff); + + p_ddr_tbl = (SE_DDR_TBL_T *)(p_rsp_buff + 1); + rc = dpp_ddr_tbl_set(pDdrResInit, p_ddr_tbl); + ZXIC_COMM_FREE(p_rsp_buff); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_ddr_tbl_set"); + + return DPP_OK; +} +#endif + +/***********************************************************/ +/** 消息通道获取stat cfg信息 +* @param devId NP设备号 +* @param type 资源类型0:标卡 1:非标卡 +* @param pStatCfgInit 获取到的统计信息 +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/11 +************************************************************/ +static ZXIC_UINT32 dpp_agent_stat_cfg_get(DPP_DEV_T *dev, ZXIC_UINT32 type, + DPP_APT_STAT_RES_INIT_T *pStatCfgInit) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 opr = STAT_CFG_REQ; + ZXIC_UINT32 sub_type = RES_STD_NIC_MSG; + ZXIC_UINT32 buff_size = 0; + ZXIC_MUTEX_T *p_dtb_mutex = NULL; + ZXIC_UINT32 *p_rsp_buff = NULL; + SE_STAT_CFG_T *p_stat_cfg = NULL; + DPP_DEV_MUTEX_TYPE_E mutex = 0; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, pStatCfgInit); + + mutex = DPP_DEV_MUTEX_T_DTB; + rc = dpp_dev_opr_mutex_get(dev, (ZXIC_UINT32)mutex, &p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_dev_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "zxic_comm_mutex_lock"); + + buff_size = dpp_get_se_buff_size(opr) + sizeof(ZXIC_UINT32); + p_rsp_buff = (ZXIC_UINT32 *)ZXIC_COMM_MALLOC(buff_size); + ZXIC_COMM_CHECK_DEV_POINT_UNLOCK(dev_id, p_rsp_buff, p_dtb_mutex); + + sub_type = (type == SE_STD_NIC_RES_TYPE) ? RES_STD_NIC_MSG : + RES_OFFLOAD_MSG; + rc = dpp_agent_channel_se_res_get(dev, sub_type, opr, p_rsp_buff, + buff_size); + if (rc != DPP_OK) { + ZXIC_COMM_FREE(p_rsp_buff); + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, + "stat res get fail rc=0x%x. \n", rc); + rc = zxic_comm_mutex_unlock(p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, + "zxic_comm_mutex_unlock"); + return DPP_ERR; + } + + rc = zxic_comm_mutex_unlock(p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_MEMORY_FREE_NO_ASSERT( + dev_id, rc, "zxic_comm_mutex_unlock", p_rsp_buff); + + p_stat_cfg = (SE_STAT_CFG_T *)(p_rsp_buff + 1); + rc = dpp_stat_cfg_set(pStatCfgInit, p_stat_cfg); + ZXIC_COMM_FREE(p_rsp_buff); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_stat_cfg_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** 分配流表资源缓存空间 +* @param dev NP设备 +* @param +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/11 +************************************************************/ +DPP_STATUS dpp_se_res_mem_alloc(DPP_DEV_T *dev) +{ + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT16 slot = 0; + DPP_APT_SE_RES_T *p_se_res = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + slot = DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, slot, 0, DPP_PCIE_SLOT_MAX - 1); + + p_se_res = (DPP_APT_SE_RES_T *)dpp_dev_get_se_res_ptr(dev); + if (p_se_res == NULL) { + p_se_res = (DPP_APT_SE_RES_T *)ZXIC_COMM_MALLOC( + sizeof(DPP_APT_SE_RES_T)); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_se_res); + ZXIC_COMM_MEMSET_S(p_se_res, sizeof(DPP_APT_SE_RES_T), 0x0, + sizeof(DPP_APT_SE_RES_T)); + dpp_dev_set_se_res_ptr(dev, (ZXIC_VOID *)p_se_res); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 释放流表资源缓存空间 +* @param dev NP设备 +* @param +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/11 +************************************************************/ +DPP_STATUS dpp_se_res_mem_free(DPP_DEV_T *dev) +{ + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT16 slot = 0; + DPP_APT_SE_RES_T *p_se_res = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + slot = DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, slot, 0, DPP_PCIE_SLOT_MAX - 1); + + p_se_res = (DPP_APT_SE_RES_T *)dpp_dev_get_se_res_ptr(dev); + if (p_se_res) { + ZXIC_COMM_FREE(p_se_res); + dpp_dev_set_se_res_ptr(dev, NULL); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 消息通道获取指定类型的所有流表资源 +* @param dev NP设备 +* @param +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/11 +************************************************************/ +DPP_STATUS dpp_agent_se_res_get(DPP_DEV_T *dev) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 type = SE_STD_NIC_RES_TYPE; + DPP_APT_SE_RES_T *p_se_res = NULL; + DPP_APT_HASH_RES_INIT_T hash_res = { 0 }; + DPP_APT_ERAM_RES_INIT_T eram_res = { 0 }; + DPP_APT_ACL_RES_INIT_T acl_res = { 0 }; + //DPP_APT_LPM_RES_INIT_T lpm_res = {0}; + //DPP_APT_DDR_RES_INIT_T ddr_res = {0}; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + + p_se_res = (DPP_APT_SE_RES_T *)dpp_dev_get_se_res_ptr(dev); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_se_res); + + if (p_se_res->valid) { + ZXIC_COMM_PRINT("slot[0x%x] res status ready\n", + DEV_PCIE_SLOT(dev)); + return DPP_OK; + } + + ZXIC_COMM_MEMSET_S(&hash_res, sizeof(DPP_APT_HASH_RES_INIT_T), 0x0, + sizeof(DPP_APT_HASH_RES_INIT_T)); + ZXIC_COMM_MEMSET_S(&eram_res, sizeof(DPP_APT_ERAM_RES_INIT_T), 0x0, + sizeof(DPP_APT_ERAM_RES_INIT_T)); + ZXIC_COMM_MEMSET_S(&acl_res, sizeof(DPP_APT_ACL_RES_INIT_T), 0x0, + sizeof(DPP_APT_ACL_RES_INIT_T)); + //ZXIC_COMM_MEMSET_S(&lpm_res,sizeof(DPP_APT_LPM_RES_INIT_T),0x0,sizeof(DPP_APT_LPM_RES_INIT_T)); + //ZXIC_COMM_MEMSET_S(&ddr_res,sizeof(DPP_APT_DDR_RES_INIT_T),0x0,sizeof(DPP_APT_DDR_RES_INIT_T)); + + hash_res.func_res = p_se_res->hash_func; + hash_res.bulk_res = p_se_res->hash_bulk; + hash_res.tbl_res = p_se_res->hash_tbl; + rc = dpp_agent_hash_func_bulk_get(dev, type, &hash_res); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_agent_hash_func_bulk_get"); + rc = dpp_agent_hash_tbl_get(dev, type, &hash_res); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_agent_hash_tbl_get"); + p_se_res->hash_func_num = hash_res.func_num; + p_se_res->hash_bulk_num = hash_res.bulk_num; + p_se_res->hash_tbl_num = hash_res.tbl_num; + + eram_res.eram_res = p_se_res->eram_tbl; + rc = dpp_agent_eram_tbl_get(dev, type, &eram_res); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_agent_eram_tbl_get"); + p_se_res->eram_num = eram_res.tbl_num; + + acl_res.acl_res = p_se_res->acl_tbl; + rc = dpp_agent_acl_tbl_get(dev, type, &acl_res); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_agent_acl_tbl_get"); + p_se_res->acl_num = acl_res.tbl_num; + +#if 0 + lpm_res.glb_res = &p_se_res->lpm_global_res; + lpm_res.lpm_res = p_se_res->lpm_tbl; + rc = dpp_agent_lpm_tbl_get(dev, type, &lpm_res); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_agent_lpm_tbl_get"); + p_se_res->lpm_num = lpm_res.tbl_num; + + ddr_res.ddr_res = p_se_res->ddr_tbl; + rc = dpp_agent_ddr_tbl_get(dev, type, &ddr_res); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_agent_ddr_tbl_get"); + p_se_res->ddr_num = ddr_res.tbl_num; +#endif + + rc = dpp_agent_stat_cfg_get(dev, type, &(p_se_res->stat_cfg)); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_agent_stat_cfg_get"); + + p_se_res->valid = 1; + + return DPP_OK; +} + +/***********************************************************/ +/** 初始化流表资源 +* @param dev NP设备 +* @param +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/11 +************************************************************/ +DPP_STATUS dpp_se_res_init(DPP_DEV_T *dev) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + DPP_APT_SE_RES_T *p_se_res = NULL; + DPP_APT_HASH_RES_INIT_T tHashResInit = { 0 }; + DPP_APT_ERAM_RES_INIT_T tEramResInit = { 0 }; + DPP_APT_ACL_RES_INIT_T tAclResInit = { 0 }; + //DPP_APT_DDR_RES_INIT_T tDdrResInit = {0}; + //DPP_APT_LPM_RES_INIT_T tLpmResInit = {0}; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + + ZXIC_COMM_MEMSET(&tHashResInit, 0x0, sizeof(DPP_APT_HASH_RES_INIT_T)); + ZXIC_COMM_MEMSET(&tEramResInit, 0x0, sizeof(DPP_APT_ERAM_RES_INIT_T)); + ZXIC_COMM_MEMSET(&tAclResInit, 0x0, sizeof(DPP_APT_ACL_RES_INIT_T)); + //ZXIC_COMM_MEMSET(&tDdrResInit,0x0,sizeof(DPP_APT_DDR_RES_INIT_T)); + //ZXIC_COMM_MEMSET(&tLpmResInit,0x0,sizeof(DPP_APT_LPM_RES_INIT_T)); + + p_se_res = (DPP_APT_SE_RES_T *)dpp_dev_get_se_res_ptr(dev); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_se_res); + if (!p_se_res->valid) { + ZXIC_COMM_TRACE_ERROR("dpp_se_res_init:res invlaid!\n"); + return DPP_ERR; + } + + tHashResInit.func_num = p_se_res->hash_func_num; + tHashResInit.bulk_num = p_se_res->hash_bulk_num; + tHashResInit.tbl_num = p_se_res->hash_tbl_num; + tHashResInit.func_res = p_se_res->hash_func; + tHashResInit.bulk_res = p_se_res->hash_bulk; + tHashResInit.tbl_res = p_se_res->hash_tbl; + tEramResInit.tbl_num = p_se_res->eram_num; + tEramResInit.eram_res = p_se_res->eram_tbl; + tAclResInit.tbl_num = p_se_res->acl_num; + tAclResInit.acl_res = p_se_res->acl_tbl; + //tLpmResInit.tbl_num = p_se_res->lpm_num; + //tLpmResInit.glb_res = &p_se_res->lpm_global_res; + //tLpmResInit.lpm_res = p_se_res->lpm_tbl; + //tDdrResInit.tbl_num = p_se_res->ddr_num; + //tDdrResInit.ddr_res = p_se_res->ddr_tbl; + + // hash init + rc = dpp_apt_hash_global_res_init(dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_hash_global_res_init"); + + if (tHashResInit.func_num) { + rc = dpp_apt_hash_func_res_init(dev, tHashResInit.func_num, + tHashResInit.func_res); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_hash_func_res_init"); + } + + if (tHashResInit.bulk_num) { + rc = dpp_apt_hash_bulk_res_init(dev, tHashResInit.bulk_num, + tHashResInit.bulk_res); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_hash_bulk_res_init"); + } + + // tbl-res must be initialized after fun-res and buld-res + if (tHashResInit.tbl_num) { + rc = dpp_apt_hash_tbl_res_init(dev, tHashResInit.tbl_num, + tHashResInit.tbl_res); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_hash_tbl_res_init"); + } + + // eram init + if (tEramResInit.tbl_num) { + rc = dpp_apt_eram_res_init(dev, tEramResInit.tbl_num, + tEramResInit.eram_res); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_eram_res_init"); + } + + // init acl + if (tAclResInit.tbl_num) { + rc = dpp_apt_acl_res_init(dev, tAclResInit.tbl_num, + tAclResInit.acl_res); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_acl_res_init"); + } + + // init stat + rc = dpp_apt_stat_res_init(dev, &(p_se_res->stat_cfg)); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_stat_res_init"); + + return DPP_OK; +} + +/***********************************************************/ +/** 消息通道获取指定类型流表资源&流表资源初始化 +* @param dev NP设备 +* @param +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/11 +************************************************************/ +DPP_STATUS dpp_se_res_get_and_init(DPP_DEV_T *dev) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + + rc = dpp_agent_se_res_get(dev); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_agent_se_res_get"); + + rc = dpp_se_res_init(dev); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_se_res_init"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取sdt对应的hash最大条目数 +* @param dev NP设备 +* @param sdt_no +* @param max_num 出参,获取的条目数上限 +* @param +* @return +* @remark 无 +* @see +* @author cq @date 2024/12/03 +************************************************************/ +DPP_STATUS dpp_hash_max_item_num_get(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 *max_num) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 func_id = 0; + ZXIC_UINT32 bulk_id = 0; + ZXIC_UINT32 index = 0; + DPP_SDTTBL_HASH_T sdt_hash_info = { 0 }; + DPP_APT_SE_RES_T *p_se_res = NULL; + DPP_APT_HASH_BULK_RES_T *p_bulk = NULL; + DPP_APT_HASH_BULK_RES_T *p_temp_bulk = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(max_num); + + ZXIC_COMM_MEMSET_S(&sdt_hash_info, sizeof(DPP_SDTTBL_HASH_T), 0x0, + sizeof(DPP_SDTTBL_HASH_T)); + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_hash_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_soft_sdt_tbl_get"); + + func_id = sdt_hash_info.hash_id; + bulk_id = (sdt_hash_info.hash_table_id >> 2) & 0x7; + + p_se_res = (DPP_APT_SE_RES_T *)dpp_dev_get_se_res_ptr(dev); + ZXIC_COMM_CHECK_POINT(p_se_res); + + p_bulk = p_se_res->hash_bulk; + for (index = 0; index < HASH_BULK_MAX_NUM; index++) { + p_temp_bulk = p_bulk + index; + if ((p_temp_bulk->func_id == func_id) && + (p_temp_bulk->bulk_id == bulk_id)) { + *max_num = p_temp_bulk->ddr_item_num; + return DPP_OK; + } + } + return DPP_ERR; +} + +/***********************************************************/ +/** 解析统计项的信息 +* @param dev NP设备 +* @param sdt_no 统计属性对应的sdt号 +* @param entry_num dump出的条目数 +* @param p_dump_data_arr dump出的数据 +* @param p_stat_item_num 出参,统计项个数 +* @param p_stat_item 出参,解析后的统计项信息 +* @param +* @return +* @remark 无 +* @see +* @author cq @date 2025/02/15 +************************************************************/ +static DPP_STATUS dpp_stat_tbl_parse(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 entry_num, + DPP_DTB_ERAM_ENTRY_INFO_T *p_dump_data_arr, + ZXIC_UINT32 *p_stat_item_num, + DPP_APT_STAT_ITEM_T *p_stat_item) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 valid_item_num = 0; + ZXIC_UINT32 *p_data = NULL; + SE_APT_CALLBACK_T *pAptCallback = NULL; + DPP_APT_STAT_ITEM_T *p_temp_stat_item = NULL; + ZXDH_STAT_ATTR_T stat_attr = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_dump_data_arr); + ZXIC_COMM_CHECK_POINT(p_stat_item_num); + ZXIC_COMM_CHECK_POINT(p_stat_item); + + pAptCallback = dpp_apt_get_func(dev, sdt_no); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), pAptCallback); + ZXIC_COMM_CHECK_DEV_POINT( + DEV_ID(dev), pAptCallback->se_func_info.eramFunc.eram_get_func); + + for (i = 0; (i < entry_num) && (i < STAT_ITEM_MAX_NUM); i++) { + p_data = p_dump_data_arr[i].p_data; + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + + rc = pAptCallback->se_func_info.eramFunc.eram_get_func( + (ZXIC_VOID *)&stat_attr, p_data); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "eram_get_func"); + + if (stat_attr.valid) { + valid_item_num++; + p_temp_stat_item = p_stat_item + i; + p_temp_stat_item->mode = stat_attr.mode; + p_temp_stat_item->addr_offset = stat_attr.addr_offset; + p_temp_stat_item->depth = stat_attr.depth; + } + } + + *p_stat_item_num = valid_item_num; + + return DPP_OK; +} + +/***********************************************************/ +/** 获取统计项的信息 +* @param dev NP设备 +* @param p_se_res 流表资源 +* @param +* @return +* @remark 无 +* @see +* @author cq @date 2025/02/15 +************************************************************/ +DPP_STATUS dpp_stat_tbl_get(DPP_DEV_T *dev, DPP_APT_SE_RES_T *p_se_res) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 queue_id = 0; + ZXIC_UINT32 stat_sdt_no = ZXDH_SDT_STAT_ATTR_TABLE; + ZXIC_UINT32 exist_flag = 0; + ZXIC_UINT32 eram_table_depth = 0; + ZXIC_UINT32 byte_num = 0; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 entry_num = 0; + DPP_SDTTBL_ERAM_T sdt_eram = { 0 }; /*SDT内容*/ + DPP_DTB_ERAM_ENTRY_INFO_T *p_dump_data_arr = NULL; + ZXIC_UINT8 *data_buff = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_se_res); + + rc = dpp_apt_sdt_is_exist(p_se_res, DPP_SDT_TBLT_eRAM, stat_sdt_no, + &exist_flag); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_sdt_is_exist"); + if (!exist_flag) { + ZXIC_COMM_PRINT( + "sdt_no:%d is not exsit, can not get stat item info.\n", + stat_sdt_no); + return DPP_OK; + } + + rc = dpp_soft_sdt_tbl_get(dev, stat_sdt_no, &sdt_eram); + ZXIC_COMM_CHECK_RC(rc, "dpp_soft_sdt_tbl_get"); + + rc = dpp_dtb_queue_id_get(dev, &queue_id); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, sdt_eram.eram_mode, ERAM128_TBL_64b, + ERAM128_TBL_128b); + byte_num = (sdt_eram.eram_mode == ERAM128_TBL_64b) ? 8 : 16; + eram_table_depth = sdt_eram.eram_table_depth; + p_dump_data_arr = (DPP_DTB_ERAM_ENTRY_INFO_T *)ZXIC_COMM_MALLOC( + eram_table_depth * sizeof(DPP_DTB_ERAM_ENTRY_INFO_T)); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dump_data_arr); + ZXIC_COMM_MEMSET_S( + p_dump_data_arr, + eram_table_depth * sizeof(DPP_DTB_ERAM_ENTRY_INFO_T), 0, + eram_table_depth * sizeof(DPP_DTB_ERAM_ENTRY_INFO_T)); + + data_buff = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC(byte_num * eram_table_depth); + ZXIC_COMM_CHECK_DEV_POINT_MEMORY_FREE(dev_id, data_buff, + p_dump_data_arr); + ZXIC_COMM_MEMSET_S(data_buff, eram_table_depth * byte_num, 0, + eram_table_depth * byte_num); + + for (i = 0; i < eram_table_depth; i++) { + p_dump_data_arr[i].index = i; + p_dump_data_arr[i].p_data = + (ZXIC_UINT32 *)(data_buff + i * byte_num); + } + + rc = dpp_dtb_eram_dump(dev, queue_id, stat_sdt_no, + (ZXIC_UINT8 *)p_dump_data_arr, &entry_num); + ZXIC_COMM_CHECK_DEV_RC_MEMORY_FREE2PTR_NO_ASSERT( + dev_id, rc, "dpp_dtb_eram_dump", data_buff, p_dump_data_arr); + + /*解析dump出来的eram数据*/ + rc = dpp_stat_tbl_parse(dev, stat_sdt_no, entry_num, p_dump_data_arr, + &p_se_res->stat_item_num, p_se_res->stat_item); + ZXIC_COMM_FREE(data_buff); + ZXIC_COMM_FREE(p_dump_data_arr); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_stat_tbl_parse"); + + return DPP_OK; +} + +/***********************************************************/ +/** 查看当前sdt号是否存在 +* @param p_se_res 表资源 +* @param sdt_type 表类型 +* @param sdt_no sdt号 +* @return +* @remark 无 +* @see +* @author cq @date 2025/02/17 +************************************************************/ +DPP_STATUS dpp_apt_sdt_is_exist(DPP_APT_SE_RES_T *p_se_res, + DPP_SDT_TABLE_TYPE_E sdt_type, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 *p_is_exist) +{ + ZXIC_UINT8 index = 0; + + ZXIC_COMM_CHECK_POINT(p_se_res); + + *p_is_exist = 0; + if (sdt_type == DPP_SDT_TBLT_eRAM) { + for (index = 0; + (index < p_se_res->eram_num) && (index < ERAM_MAX_NUM); + index++) { + if (p_se_res->eram_tbl[index].sdtNo == sdt_no) { + *p_is_exist = 1; + break; + } + } + } else if (sdt_type == DPP_SDT_TBLT_eTCAM) { + for (index = 0; + (index < p_se_res->acl_num) && (index < ETCAM_MAX_NUM); + index++) { + if (p_se_res->acl_tbl[index].sdtNo == sdt_no) { + *p_is_exist = 1; + break; + } + } + } else if (sdt_type == DPP_SDT_TBLT_HASH) { + for (index = 0; (index < p_se_res->hash_tbl_num) && + (index < HASH_TABLE_MAX_NUM); + index++) { + if (p_se_res->hash_tbl[index].sdtNo == sdt_no) { + *p_is_exist = 1; + break; + } + } + } + + return DPP_OK; +} diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/Kbuild.include new file mode 100644 index 000000000000..bb99f1869be9 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/Kbuild.include @@ -0,0 +1,4 @@ +cur_dir := en_np/sdk/source/dev/module/table/ +subdirs := sdt/ se/ +src_files += +include $(foreach subdir, $(subdirs), $(dinghai_root)/$(cur_dir)$(subdir)/Kbuild.include) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/sdt/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/sdt/Kbuild.include new file mode 100644 index 000000000000..c3eee0509d4f --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/sdt/Kbuild.include @@ -0,0 +1,2 @@ +cur_dir := en_np/sdk/source/dev/module/table/sdt/ +src_files += $(addprefix $(cur_dir),$(notdir $(wildcard $(dinghai_root)/$(cur_dir)*.c))) diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/sdt/dpp_sdt.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/sdt/dpp_sdt.c new file mode 100644 index 000000000000..68084323b0e7 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/sdt/dpp_sdt.c @@ -0,0 +1,611 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_sdt.c +* 文件标识 : sdt配置接口实现文件 +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : 王春雷 +* 完成日期 : 2015/06/25 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#include "zxic_common.h" + +#include "dpp_dev.h" +#include "dpp_se.h" +#include "dpp_sdt_def.h" +#include "dpp_sdt_mgr.h" +#include "dpp_sdt.h" + +/** 获取低n位数据 */ +#define DPP_SDT_GET_LOW_DATA(source_value, low_width) \ + (source_value & ((1 << low_width) - 1)) + +#if ZXIC_REAL("function for FCM_FTM ") +/***********************************************************/ +/** 初始化SDT表配置管理 +* @param dev_num 设备数目 +* @param dev_id_array 设备dev_id数组 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 王春雷 @date 2015/07/13 +************************************************************/ +DPP_STATUS dpp_sdt_init(ZXIC_UINT32 dev_num, ZXIC_UINT32 *dev_id_array) +{ + DPP_STATUS rc = 0; + ZXIC_UINT32 i = 0; + + ZXIC_COMM_CHECK_INDEX(dev_num, 1, DPP_DEV_CHANNEL_MAX); + ZXIC_COMM_CHECK_POINT(dev_id_array); + + for (i = 0; i < dev_num; i++) { + ZXIC_COMM_CHECK_INDEX(*(dev_id_array + i), 0, + DPP_DEV_CHANNEL_MAX - 1); + } + + dpp_sdt_mgr_init(); + + for (i = 0; i < dev_num; i++) { + rc = dpp_sdt_mgr_create(dev_id_array[i]); + ZXIC_COMM_CHECK_RC(rc, "dpp_sdt_mgr_create"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 解析从硬件读取的64bit SDT属性 +* @param sdt_hig32 硬件表中存储的SDT属性高32bit +* @param sdt_low32 硬件表中存储的SDT属性低32bit +* @param p_sdt_info 解析之后的SDT属性,根据SDT属性中的table_type确定此ZXIC_VOID型指针对应的数据结构, 包括: \n +* DPP_SDTTBL_ERAM_T、DPP_SDTTBL_DDR3_T、DPP_SDTTBL_HASH_T、DPP_SDTTBL_LPM_T、\n +* DPP_SDTTBL_ETCAM_T、DPP_SDTTBL_PORTTBL_T。 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 王春雷 @date 2015/07/13 +************************************************************/ +DPP_STATUS dpp_sdt_tbl_data_parser(DPP_DEV_T *dev, ZXIC_UINT32 sdt_hig32, + ZXIC_UINT32 sdt_low32, ZXIC_VOID *p_sdt_info) +{ + ZXIC_UINT32 tmp = 0; + ZXIC_UINT32 tbl_type = 0; + ZXIC_UINT32 clutch_en = 0; + + DPP_SDTTBL_ERAM_T *p_sdt_eram = NULL; + DPP_SDTTBL_DDR3_T *p_sdt_ddr3 = NULL; + DPP_SDTTBL_HASH_T *p_sdt_hash = NULL; + DPP_SDTTBL_LPM_T *p_sdt_lpm = NULL; + DPP_SDTTBL_ETCAM_T *p_sdt_etcam = NULL; + DPP_SDTTBL_PORTTBL_T *p_sdt_porttbl = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_sdt_info); + + ZXIC_COMM_UINT32_GET_BITS(tbl_type, sdt_hig32, + DPP_SDT_H_TBL_TYPE_BT_POS, + DPP_SDT_H_TBL_TYPE_BT_LEN); + ZXIC_COMM_UINT32_GET_BITS(clutch_en, sdt_low32, + DPP_SDT_L_CLUTCH_EN_BT_POS, + DPP_SDT_L_CLUTCH_EN_BT_LEN); + + /* 根据表类型解析数据 */ + switch (tbl_type) { + case DPP_SDT_TBLT_eRAM: { + p_sdt_eram = (DPP_SDTTBL_ERAM_T *)p_sdt_info; + p_sdt_eram->table_type = tbl_type; + ZXIC_COMM_UINT32_GET_BITS(p_sdt_eram->eram_mode, sdt_hig32, + DPP_SDT_H_ERAM_MODE_BT_POS, + DPP_SDT_H_ERAM_MODE_BT_LEN); + ZXIC_COMM_UINT32_GET_BITS(p_sdt_eram->eram_base_addr, sdt_hig32, + DPP_SDT_H_ERAM_BASE_ADDR_BT_POS, + DPP_SDT_H_ERAM_BASE_ADDR_BT_LEN); + ZXIC_COMM_UINT32_GET_BITS(p_sdt_eram->eram_table_depth, + sdt_low32, + DPP_SDT_L_ERAM_TABLE_DEPTH_BT_POS, + DPP_SDT_L_ERAM_TABLE_DEPTH_BT_LEN); + p_sdt_eram->eram_clutch_en = clutch_en; + break; + } + + case DPP_SDT_TBLT_DDR3: { + p_sdt_ddr3 = (DPP_SDTTBL_DDR3_T *)p_sdt_info; + p_sdt_ddr3->table_type = tbl_type; + ZXIC_COMM_UINT32_GET_BITS(p_sdt_ddr3->ddr3_base_addr, sdt_hig32, + DPP_SDT_H_DDR3_BASE_ADDR_BT_POS, + DPP_SDT_H_DDR3_BASE_ADDR_BT_LEN); + ZXIC_COMM_UINT32_GET_BITS(p_sdt_ddr3->ddr3_share_type, + sdt_hig32, + DPP_SDT_H_DDR3_SHARE_TYPE_BT_POS, + DPP_SDT_H_DDR3_SHARE_TYPE_BT_LEN); + ZXIC_COMM_UINT32_GET_BITS(p_sdt_ddr3->ddr3_rw_len, sdt_hig32, + DPP_SDT_H_DDR3_RW_LEN_BT_POS, + DPP_SDT_H_DDR3_RW_LEN_BT_LEN); + ZXIC_COMM_UINT32_GET_BITS(tmp, sdt_hig32, + DPP_SDT_H_DDR3_SDT_NUM_BT_POS, + DPP_SDT_H_DDR3_SDT_NUM_BT_LEN); + ZXIC_COMM_UINT32_GET_BITS(p_sdt_ddr3->ddr3_sdt_num, sdt_low32, + DPP_SDT_L_DDR3_SDT_NUM_BT_POS, + DPP_SDT_L_DDR3_SDT_NUM_BT_LEN); + p_sdt_ddr3->ddr3_sdt_num += + (tmp << DPP_SDT_L_DDR3_SDT_NUM_BT_LEN); + ZXIC_COMM_UINT32_GET_BITS(p_sdt_ddr3->ddr3_ecc_en, sdt_low32, + DPP_SDT_L_DDR3_ECC_EN_BT_POS, + DPP_SDT_L_DDR3_ECC_EN_BT_LEN); + p_sdt_ddr3->ddr3_clutch_en = clutch_en; + break; + } + + case DPP_SDT_TBLT_HASH: { + p_sdt_hash = (DPP_SDTTBL_HASH_T *)p_sdt_info; + p_sdt_hash->table_type = tbl_type; + ZXIC_COMM_UINT32_GET_BITS(p_sdt_hash->hash_id, sdt_hig32, + DPP_SDT_H_HASH_ID_BT_POS, + DPP_SDT_H_HASH_ID_BT_LEN); + ZXIC_COMM_UINT32_GET_BITS(p_sdt_hash->hash_table_width, + sdt_hig32, + DPP_SDT_H_HASH_TABLE_WIDTH_BT_POS, + DPP_SDT_H_HASH_TABLE_WIDTH_BT_LEN); + ZXIC_COMM_UINT32_GET_BITS(p_sdt_hash->key_size, sdt_hig32, + DPP_SDT_H_HASH_KEY_SIZE_BT_POS, + DPP_SDT_H_HASH_KEY_SIZE_BT_LEN); + ZXIC_COMM_UINT32_GET_BITS(p_sdt_hash->hash_table_id, sdt_hig32, + DPP_SDT_H_HASH_TABLE_ID_BT_POS, + DPP_SDT_H_HASH_TABLE_ID_BT_LEN); + ZXIC_COMM_UINT32_GET_BITS(p_sdt_hash->learn_en, sdt_hig32, + DPP_SDT_H_LEARN_EN_BT_POS, + DPP_SDT_H_LEARN_EN_BT_LEN); + ZXIC_COMM_UINT32_GET_BITS(p_sdt_hash->keep_alive, sdt_hig32, + DPP_SDT_H_KEEP_ALIVE_BT_POS, + DPP_SDT_H_KEEP_ALIVE_BT_LEN); + ZXIC_COMM_UINT32_GET_BITS(tmp, sdt_hig32, + DPP_SDT_H_KEEP_ALIVE_BADDR_BT_POS, + DPP_SDT_H_KEEP_ALIVE_BADDR_BT_LEN); + ZXIC_COMM_UINT32_GET_BITS(p_sdt_hash->keep_alive_baddr, + sdt_low32, + DPP_SDT_L_KEEP_ALIVE_BADDR_BT_POS, + DPP_SDT_L_KEEP_ALIVE_BADDR_BT_LEN); + p_sdt_hash->keep_alive_baddr += + (tmp << DPP_SDT_L_KEEP_ALIVE_BADDR_BT_LEN); + ZXIC_COMM_UINT32_GET_BITS(p_sdt_hash->rsp_mode, sdt_low32, + DPP_SDT_L_RSP_MODE_BT_POS, + DPP_SDT_L_RSP_MODE_BT_LEN); + p_sdt_hash->hash_clutch_en = clutch_en; + break; + } + + case DPP_SDT_TBLT_LPM: { + p_sdt_lpm = (DPP_SDTTBL_LPM_T *)p_sdt_info; + p_sdt_lpm->table_type = tbl_type; + ZXIC_COMM_UINT32_GET_BITS(p_sdt_lpm->lpm_v46_id, sdt_hig32, + DPP_SDT_H_LPM_V46ID_BT_POS, + DPP_SDT_H_LPM_V46ID_BT_LEN); + ZXIC_COMM_UINT32_GET_BITS(p_sdt_lpm->rsp_mode, sdt_hig32, + DPP_SDT_H_LPM_RSP_MODE_BT_POS, + DPP_SDT_H_LPM_RSP_MODE_BT_LEN); + ZXIC_COMM_UINT32_GET_BITS(p_sdt_lpm->lpm_table_depth, sdt_low32, + DPP_SDT_L_LPM_TABLE_DEPTH_BT_POS, + DPP_SDT_L_LPM_TABLE_DEPTH_BT_LEN); + p_sdt_lpm->lpm_clutch_en = clutch_en; + break; + } + + case DPP_SDT_TBLT_eTCAM: { + p_sdt_etcam = (DPP_SDTTBL_ETCAM_T *)p_sdt_info; + p_sdt_etcam->table_type = tbl_type; + ZXIC_COMM_UINT32_GET_BITS(p_sdt_etcam->etcam_id, sdt_hig32, + DPP_SDT_H_ETCAM_ID_BT_POS, + DPP_SDT_H_ETCAM_ID_BT_LEN); + ZXIC_COMM_UINT32_GET_BITS(p_sdt_etcam->etcam_key_mode, + sdt_hig32, + DPP_SDT_H_ETCAM_KEY_MODE_BT_POS, + DPP_SDT_H_ETCAM_KEY_MODE_BT_LEN); + ZXIC_COMM_UINT32_GET_BITS(p_sdt_etcam->etcam_table_id, + sdt_hig32, + DPP_SDT_H_ETCAM_TABLE_ID_BT_POS, + DPP_SDT_H_ETCAM_TABLE_ID_BT_LEN); + ZXIC_COMM_UINT32_GET_BITS(p_sdt_etcam->no_as_rsp_mode, + sdt_hig32, + DPP_SDT_H_ETCAM_NOAS_RSP_MODE_BT_POS, + DPP_SDT_H_ETCAM_NOAS_RSP_MODE_BT_LEN); + ZXIC_COMM_UINT32_GET_BITS(p_sdt_etcam->as_en, sdt_hig32, + DPP_SDT_H_ETCAM_AS_EN_BT_POS, + DPP_SDT_H_ETCAM_AS_EN_BT_LEN); + + ZXIC_COMM_UINT32_GET_BITS(tmp, sdt_hig32, + DPP_SDT_H_ETCAM_AS_ERAM_BADDR_BT_POS, + DPP_SDT_H_ETCAM_AS_ERAM_BADDR_BT_LEN); + ZXIC_COMM_UINT32_GET_BITS(p_sdt_etcam->as_eram_baddr, sdt_low32, + DPP_SDT_L_ETCAM_AS_ERAM_BADDR_BT_POS, + DPP_SDT_L_ETCAM_AS_ERAM_BADDR_BT_LEN); + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT( + p_sdt_etcam->as_eram_baddr, + (tmp << DPP_SDT_L_ETCAM_AS_ERAM_BADDR_BT_LEN)); + p_sdt_etcam->as_eram_baddr += + (tmp << DPP_SDT_L_ETCAM_AS_ERAM_BADDR_BT_LEN); + + ZXIC_COMM_UINT32_GET_BITS(p_sdt_etcam->as_rsp_mode, sdt_low32, + DPP_SDT_L_ETCAM_AS_RSP_MODE_BT_POS, + DPP_SDT_L_ETCAM_AS_RSP_MODE_BT_LEN); + ZXIC_COMM_UINT32_GET_BITS(p_sdt_etcam->etcam_table_depth, + sdt_low32, + DPP_SDT_L_ETCAM_TABLE_DEPTH_BT_POS, + DPP_SDT_L_ETCAM_TABLE_DEPTH_BT_LEN); + p_sdt_etcam->etcam_clutch_en = clutch_en; + break; + } + + case DPP_SDT_TBLT_PORTTBL: { + p_sdt_porttbl = (DPP_SDTTBL_PORTTBL_T *)p_sdt_info; + p_sdt_porttbl->table_type = tbl_type; + p_sdt_porttbl->porttbl_clutch_en = clutch_en; + break; + } + + default: { + ZXIC_COMM_TRACE_ERROR("SDT table_type[ %d ] is invalid!\n", + tbl_type); + ZXIC_COMM_ASSERT(0); + return DPP_ERR; + } + } + + return DPP_OK; +} + +/***********************************************************/ +/** 从软件缓存中获取table data信息 +* @param dev_id 设备号 +* @param sdt_no 业务表对应的sdt号,0-255 +* @param p_sdt_data sdt表信息 +* +* @return +* @remark 无 +* @see +* @author lim @date 2020/04/16 +************************************************************/ +DPP_STATUS dpp_sdt_tbl_data_get(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no, + DPP_SDT_TBL_DATA_T *p_sdt_data) +{ + return dpp_sdt_mgr_sdt_item_srh(dev, sdt_no, &p_sdt_data->data_high32, + &p_sdt_data->data_low32); +} + +/***********************************************************/ +/** 从软件缓存中获取sdt信息 +* @param device_id 设备号 +* @param sdt_no 业务表对应的sdt号 +* @param p_sdt_info 写入的SDT属性。由table_type确定此ZXIC_VOID型指针对应的数据结构, 包括: \n +* DPP_SDTTBL_ERAM_T、DPP_SDTTBL_DDR_T、DPP_SDTTBL_HASH_T、DPP_SDTTBL_LPM_T、 +* DPP_SDTTBL_ETCAM_T。 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author lim @date 2020/04/16 +************************************************************/ +DPP_STATUS dpp_soft_sdt_tbl_get(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no, + ZXIC_VOID *p_sdt_info) +{ + DPP_STATUS rc = 0; + + DPP_SDT_TBL_DATA_T sdt_tbl = { 0 }; + + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_sdt_info); + + /** 从软件缓存读取sdt信息 */ + rc = dpp_sdt_tbl_data_get(dev, sdt_no, &sdt_tbl); + ZXIC_COMM_CHECK_RC(rc, "dpp_sdt_tbl_data_get"); + + rc = dpp_sdt_tbl_data_parser(dev, sdt_tbl.data_high32, + sdt_tbl.data_low32, p_sdt_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_sdt_tbl_data_parser"); + + return rc; +} + +/***********************************************************/ +/** 写SDT属性表条目到硬件表,同时向8个cluster写入 +* @param dev_id 设备号 +* @param sdt_no 业务表对应的sdt号 +* @param table_type SDT属性中的表类型,取值参考DPP_SDT_TABLE_TYPE_E的定义(仅添加操作时有效) +* @param p_sdt_info 写入的SDT属性(仅添加操作时有效)。由table_type确定此ZXIC_VOID型指针对应的数据结构, 包括: \n +* DPP_SDTTBL_ERAM_T、DPP_SDTTBL_DDR_T、DPP_SDTTBL_HASH_T、DPP_SDTTBL_LPM_T、\n +* DPP_SDTTBL_ETCAM_T、DPP_SDTTBL_PORTTBL_T。 +* @param opr_type 操作类型: 0-添加条目,1-删除条目. +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 王春雷 @date 2015/07/11 +************************************************************/ +DPP_STATUS dpp_sdt_tbl_write(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 table_type, ZXIC_VOID *p_sdt_info, + ZXIC_UINT32 opr_type) +{ +#ifdef DPP_FLOW_HW_INIT + ZXIC_UINT32 i = 0; +#endif + DPP_STATUS rtn = 0; + DPP_SDT_TBL_DATA_T sdt_tbl = { 0 }; + DPP_SDTTBL_ERAM_T *p_sdt_eram = NULL; + DPP_SDTTBL_DDR3_T *p_sdt_ddr3 = NULL; + DPP_SDTTBL_HASH_T *p_sdt_hash = NULL; + DPP_SDTTBL_LPM_T *p_sdt_lpm = NULL; + DPP_SDTTBL_ETCAM_T *p_sdt_etcam = NULL; + DPP_SDTTBL_PORTTBL_T *p_sdt_porttbl = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), sdt_no, 0, + DPP_DEV_SDT_ID_MAX - 1); + + ZXIC_COMM_MEMSET_S(&sdt_tbl, sizeof(DPP_SDT_TBL_DATA_T), 0, + sizeof(DPP_SDT_TBL_DATA_T)); + + /* 根据表类型解析数据 */ + if (opr_type) { + rtn = dpp_sdt_mgr_sdt_item_del(dev, sdt_no); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, + "dpp_sdt_mgr_sdt_item_del"); + } else { + /* add sdt item*/ + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_sdt_info); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), table_type, + DPP_SDT_TBLT_eRAM, + DPP_SDT_TBLT_PORTTBL); + + switch (table_type) { + case DPP_SDT_TBLT_eRAM: { + p_sdt_eram = (DPP_SDTTBL_ERAM_T *)p_sdt_info; + ZXIC_COMM_UINT32_WRITE_BITS(sdt_tbl.data_high32, + p_sdt_eram->eram_mode, + DPP_SDT_H_ERAM_MODE_BT_POS, + DPP_SDT_H_ERAM_MODE_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS( + sdt_tbl.data_high32, p_sdt_eram->eram_base_addr, + DPP_SDT_H_ERAM_BASE_ADDR_BT_POS, + DPP_SDT_H_ERAM_BASE_ADDR_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS( + sdt_tbl.data_low32, + p_sdt_eram->eram_table_depth, + DPP_SDT_L_ERAM_TABLE_DEPTH_BT_POS, + DPP_SDT_L_ERAM_TABLE_DEPTH_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS(sdt_tbl.data_low32, + p_sdt_eram->eram_clutch_en, + DPP_SDT_L_CLUTCH_EN_BT_POS, + DPP_SDT_L_CLUTCH_EN_BT_LEN); + break; + } + + case DPP_SDT_TBLT_DDR3: { + p_sdt_ddr3 = (DPP_SDTTBL_DDR3_T *)p_sdt_info; + + /** 添加操作必须保证sdt号和ddr存储的sdt号一致 */ + ZXIC_COMM_ASSERT(sdt_no == p_sdt_ddr3->ddr3_sdt_num); + + ZXIC_COMM_UINT32_WRITE_BITS( + sdt_tbl.data_high32, p_sdt_ddr3->ddr3_base_addr, + DPP_SDT_H_DDR3_BASE_ADDR_BT_POS, + DPP_SDT_H_DDR3_BASE_ADDR_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS( + sdt_tbl.data_high32, + p_sdt_ddr3->ddr3_share_type, + DPP_SDT_H_DDR3_SHARE_TYPE_BT_POS, + DPP_SDT_H_DDR3_SHARE_TYPE_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS( + sdt_tbl.data_high32, p_sdt_ddr3->ddr3_rw_len, + DPP_SDT_H_DDR3_RW_LEN_BT_POS, + DPP_SDT_H_DDR3_RW_LEN_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS( + sdt_tbl.data_high32, + ((p_sdt_ddr3->ddr3_sdt_num) >> + DPP_SDT_L_DDR3_SDT_NUM_BT_LEN), + DPP_SDT_H_DDR3_SDT_NUM_BT_POS, + DPP_SDT_H_DDR3_SDT_NUM_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS( + sdt_tbl.data_low32, + DPP_SDT_GET_LOW_DATA( + (p_sdt_ddr3->ddr3_sdt_num), + DPP_SDT_L_DDR3_SDT_NUM_BT_LEN), + DPP_SDT_L_DDR3_SDT_NUM_BT_POS, + DPP_SDT_L_DDR3_SDT_NUM_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS( + sdt_tbl.data_low32, p_sdt_ddr3->ddr3_ecc_en, + DPP_SDT_L_DDR3_ECC_EN_BT_POS, + DPP_SDT_L_DDR3_ECC_EN_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS(sdt_tbl.data_low32, + p_sdt_ddr3->ddr3_clutch_en, + DPP_SDT_L_CLUTCH_EN_BT_POS, + DPP_SDT_L_CLUTCH_EN_BT_LEN); + break; + } + + case DPP_SDT_TBLT_HASH: { + p_sdt_hash = (DPP_SDTTBL_HASH_T *)p_sdt_info; + ZXIC_COMM_UINT32_WRITE_BITS(sdt_tbl.data_high32, + p_sdt_hash->hash_id, + DPP_SDT_H_HASH_ID_BT_POS, + DPP_SDT_H_HASH_ID_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS( + sdt_tbl.data_high32, + p_sdt_hash->hash_table_width, + DPP_SDT_H_HASH_TABLE_WIDTH_BT_POS, + DPP_SDT_H_HASH_TABLE_WIDTH_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS( + sdt_tbl.data_high32, p_sdt_hash->key_size, + DPP_SDT_H_HASH_KEY_SIZE_BT_POS, + DPP_SDT_H_HASH_KEY_SIZE_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS( + sdt_tbl.data_high32, p_sdt_hash->hash_table_id, + DPP_SDT_H_HASH_TABLE_ID_BT_POS, + DPP_SDT_H_HASH_TABLE_ID_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS(sdt_tbl.data_high32, + p_sdt_hash->learn_en, + DPP_SDT_H_LEARN_EN_BT_POS, + DPP_SDT_H_LEARN_EN_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS( + sdt_tbl.data_high32, p_sdt_hash->keep_alive, + DPP_SDT_H_KEEP_ALIVE_BT_POS, + DPP_SDT_H_KEEP_ALIVE_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS( + sdt_tbl.data_high32, + ((p_sdt_hash->keep_alive_baddr) >> + DPP_SDT_L_KEEP_ALIVE_BADDR_BT_LEN), + DPP_SDT_H_KEEP_ALIVE_BADDR_BT_POS, + DPP_SDT_H_KEEP_ALIVE_BADDR_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS( + sdt_tbl.data_low32, + DPP_SDT_GET_LOW_DATA( + (p_sdt_hash->keep_alive_baddr), + DPP_SDT_L_KEEP_ALIVE_BADDR_BT_LEN), + DPP_SDT_L_KEEP_ALIVE_BADDR_BT_POS, + DPP_SDT_L_KEEP_ALIVE_BADDR_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS(sdt_tbl.data_low32, + p_sdt_hash->rsp_mode, + DPP_SDT_L_RSP_MODE_BT_POS, + DPP_SDT_L_RSP_MODE_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS(sdt_tbl.data_low32, + p_sdt_hash->hash_clutch_en, + DPP_SDT_L_CLUTCH_EN_BT_POS, + DPP_SDT_L_CLUTCH_EN_BT_LEN); + break; + } + + case DPP_SDT_TBLT_LPM: { + p_sdt_lpm = (DPP_SDTTBL_LPM_T *)p_sdt_info; + ZXIC_COMM_UINT32_WRITE_BITS(sdt_tbl.data_high32, + p_sdt_lpm->lpm_v46_id, + DPP_SDT_H_LPM_V46ID_BT_POS, + DPP_SDT_H_LPM_V46ID_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS( + sdt_tbl.data_high32, p_sdt_lpm->rsp_mode, + DPP_SDT_H_LPM_RSP_MODE_BT_POS, + DPP_SDT_H_LPM_RSP_MODE_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS( + sdt_tbl.data_low32, p_sdt_lpm->lpm_table_depth, + DPP_SDT_L_LPM_TABLE_DEPTH_BT_POS, + DPP_SDT_L_LPM_TABLE_DEPTH_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS(sdt_tbl.data_low32, + p_sdt_lpm->lpm_clutch_en, + DPP_SDT_L_CLUTCH_EN_BT_POS, + DPP_SDT_L_CLUTCH_EN_BT_LEN); + break; + } + + case DPP_SDT_TBLT_eTCAM: { + p_sdt_etcam = (DPP_SDTTBL_ETCAM_T *)p_sdt_info; + ZXIC_COMM_UINT32_WRITE_BITS(sdt_tbl.data_high32, + p_sdt_etcam->etcam_id, + DPP_SDT_H_ETCAM_ID_BT_POS, + DPP_SDT_H_ETCAM_ID_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS( + sdt_tbl.data_high32, + p_sdt_etcam->etcam_key_mode, + DPP_SDT_H_ETCAM_KEY_MODE_BT_POS, + DPP_SDT_H_ETCAM_KEY_MODE_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS( + sdt_tbl.data_high32, + p_sdt_etcam->etcam_table_id, + DPP_SDT_H_ETCAM_TABLE_ID_BT_POS, + DPP_SDT_H_ETCAM_TABLE_ID_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS( + sdt_tbl.data_high32, + p_sdt_etcam->no_as_rsp_mode, + DPP_SDT_H_ETCAM_NOAS_RSP_MODE_BT_POS, + DPP_SDT_H_ETCAM_NOAS_RSP_MODE_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS( + sdt_tbl.data_high32, p_sdt_etcam->as_en, + DPP_SDT_H_ETCAM_AS_EN_BT_POS, + DPP_SDT_H_ETCAM_AS_EN_BT_LEN); + + ZXIC_COMM_UINT32_WRITE_BITS( + sdt_tbl.data_high32, + ((p_sdt_etcam->as_eram_baddr) >> + DPP_SDT_L_ETCAM_AS_ERAM_BADDR_BT_LEN), + DPP_SDT_H_ETCAM_AS_ERAM_BADDR_BT_POS, + DPP_SDT_H_ETCAM_AS_ERAM_BADDR_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS( + sdt_tbl.data_low32, + DPP_SDT_GET_LOW_DATA( + (p_sdt_etcam->as_eram_baddr), + DPP_SDT_L_ETCAM_AS_ERAM_BADDR_BT_LEN), + DPP_SDT_L_ETCAM_AS_ERAM_BADDR_BT_POS, + DPP_SDT_L_ETCAM_AS_ERAM_BADDR_BT_LEN); + + ZXIC_COMM_UINT32_WRITE_BITS( + sdt_tbl.data_low32, p_sdt_etcam->as_rsp_mode, + DPP_SDT_L_ETCAM_AS_RSP_MODE_BT_POS, + DPP_SDT_L_ETCAM_AS_RSP_MODE_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS( + sdt_tbl.data_low32, + p_sdt_etcam->etcam_table_depth, + DPP_SDT_L_ETCAM_TABLE_DEPTH_BT_POS, + DPP_SDT_L_ETCAM_TABLE_DEPTH_BT_LEN); + ZXIC_COMM_UINT32_WRITE_BITS( + sdt_tbl.data_low32, + p_sdt_etcam->etcam_clutch_en, + DPP_SDT_L_CLUTCH_EN_BT_POS, + DPP_SDT_L_CLUTCH_EN_BT_LEN); + break; + } + + case DPP_SDT_TBLT_PORTTBL: { + p_sdt_porttbl = (DPP_SDTTBL_PORTTBL_T *)p_sdt_info; + ZXIC_COMM_UINT32_WRITE_BITS( + sdt_tbl.data_low32, + p_sdt_porttbl->porttbl_clutch_en, + DPP_SDT_L_CLUTCH_EN_BT_POS, + DPP_SDT_L_CLUTCH_EN_BT_LEN); + break; + } + + default: { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "SDT table_type[ %d ] is invalid!\n", + table_type); + return DPP_ERR; + } + } + + ZXIC_COMM_UINT32_WRITE_BITS(sdt_tbl.data_high32, table_type, + DPP_SDT_H_TBL_TYPE_BT_POS, + DPP_SDT_H_TBL_TYPE_BT_LEN); + + /* 缓存到软件 */ + rtn = dpp_sdt_mgr_sdt_item_add(dev, sdt_no, sdt_tbl.data_high32, + sdt_tbl.data_low32); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, + "dpp_sdt_mgr_sdt_item_add"); + } +#ifdef DPP_FLOW_HW_INIT + for (i = 0; i < DPP_PPU_CLUSTER_NUM; i++) { + /*cluster 未启用,不需要配置该cluster相关的寄存器*/ + if (!dpp_ppu_cls_use_get(DEV_ID(dev), i)) { + continue; + } + + rtn = dpp_ppu_sdt_tbl_write(dev, i, sdt_no, &sdt_tbl); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, + "dpp_ppu_sdt_tbl_write"); + } +#endif + return DPP_OK; +} + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/sdt/dpp_sdt_mgr.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/sdt/dpp_sdt_mgr.c new file mode 100644 index 000000000000..e738d08e36b9 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/sdt/dpp_sdt_mgr.c @@ -0,0 +1,328 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_sdt_mgr.c +* 文件标识 : +* 内容摘要 : SDT属性软件缓存,以及接收上层配置并下发给底层设备 +* 其它说明 : +* 当前版本 : +* 作 者 : 王春雷 +* 完成日期 : 2015/06/25 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#include "zxic_common.h" + +#include "dpp_dev.h" +#include "dpp_sdt_def.h" +#include "dpp_sdt.h" +#include "dpp_sdt_mgr.h" +#include "dpp_se_api.h" + +static DPP_SDT_MGR_T g_sdt_mgr = { 0 }; + +#define DPP_SDT_MGR_PTR_GET() (&g_sdt_mgr) + +#define DPP_SDT_SOFT_TBL_GET(id) (g_sdt_mgr.sdt_tbl_array[id]) + +ZXIC_UINT32 dpp_sdt_mgr_init(ZXIC_VOID) +{ + if (!g_sdt_mgr.is_init) { + g_sdt_mgr.channel_num = 0; + g_sdt_mgr.is_init = 1; + // g_sdt_mgr.p_sdt_mgr_smmu0_mux = dpp_tbl_dir_sdt_smmu0_mux; + // g_sdt_mgr.p_sdt_mgr_smmu1_mux = dpp_tbl_dir_sdt_smmu1_mux; + // g_sdt_mgr.p_sdt_mgr_hash_mux = dpp_tbl_dir_sdt_hash_mux; + // g_sdt_mgr.p_sdt_mgr_lpm_mux = dpp_tbl_dir_sdt_lpm_mux; + // g_sdt_mgr.p_sdt_mgr_etcam_mux = dpp_tbl_dir_sdt_etcam_mux; + ZXIC_COMM_MEMSET(g_sdt_mgr.sdt_tbl_array, 0, + DPP_DEV_CHANNEL_MAX * + sizeof(DPP_SDT_SOFT_TABLE_T *)); + } + + return DPP_OK; +} + +ZXIC_UINT32 dpp_sdt_mgr_create(ZXIC_UINT32 dev_id) +{ + DPP_SDT_SOFT_TABLE_T *p_sdt_tbl_temp = NULL; + DPP_SDT_MGR_T *p_sdt_mgr = NULL; + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + + p_sdt_mgr = DPP_SDT_MGR_PTR_GET(); + + if (DPP_SDT_SOFT_TBL_GET(dev_id) == NULL) { + p_sdt_tbl_temp = ZXIC_COMM_MALLOC(sizeof(DPP_SDT_SOFT_TABLE_T)); + ZXIC_COMM_CHECK_DEV_POINT( + dev_id, p_sdt_tbl_temp); /* mod for KW_0411 # 474 */ + + p_sdt_tbl_temp->device_id = dev_id; + ZXIC_COMM_MEMSET(p_sdt_tbl_temp->sdt_array, 0, + DPP_PCIE_SLOT_MAX * DPP_DEV_SDT_ID_MAX * + sizeof(DPP_SDT_ITEM_T)); + + DPP_SDT_SOFT_TBL_GET(dev_id) = p_sdt_tbl_temp; + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT( + dev_id, p_sdt_mgr->channel_num, 1); + p_sdt_mgr->channel_num++; + } else { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "Error: dpp_sdt_mgr_create for dev[%d] is called repeatedly!\n", + dev_id); + } + + return DPP_OK; +} + +ZXIC_UINT32 dpp_sdt_mgr_destroy(ZXIC_UINT32 dev_id) +{ + DPP_SDT_SOFT_TABLE_T *p_sdt_tbl_temp = NULL; + DPP_SDT_MGR_T *p_sdt_mgr = NULL; + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + + p_sdt_tbl_temp = DPP_SDT_SOFT_TBL_GET(dev_id); + p_sdt_mgr = DPP_SDT_MGR_PTR_GET(); + + if (NULL != p_sdt_tbl_temp) { + ZXIC_COMM_FREE(p_sdt_tbl_temp); + } + + DPP_SDT_SOFT_TBL_GET(dev_id) = NULL; + + ZXIC_COMM_CHECK_DEV_INDEX_SUB_OVERFLOW_NO_ASSERT( + dev_id, p_sdt_mgr->channel_num, 1); + p_sdt_mgr->channel_num--; + + return DPP_OK; +} + +#if 1 +/***********************************************************/ +/** 向软件缓存中添加SDT表条目 +* @param dev_id 设备号 +* @param sdt_no 业务表对应的sdt号 +* @param sdt_hig32 SDT属性高32bit +* @param sdt_low32 SDT属性低32bit +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 王春雷 @date 2015/07/13 +************************************************************/ +DPP_STATUS dpp_sdt_mgr_sdt_item_add(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 sdt_hig32, + ZXIC_UINT32 sdt_low32) +{ + ZXIC_UINT32 slot = 0; + ZXIC_UINT32 dev_id = 0; + DPP_SDT_SOFT_TABLE_T *p_sdt_soft_tbl = NULL; + DPP_SDT_ITEM_T *p_sdt_item = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + slot = DEV_PCIE_SLOT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, slot, 0, DPP_PCIE_SLOT_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + + p_sdt_soft_tbl = DPP_SDT_SOFT_TBL_GET(dev_id); + + if (NULL == p_sdt_soft_tbl) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "Error: dpp_sdt_mgr_sdt_item_add soft sdt table not Init! \n"); + ZXIC_COMM_ASSERT(0); + return DPP_RC_TABLE_SDT_MGR_INVALID; + } + + if (dev_id != p_sdt_soft_tbl->device_id) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "Error: dpp_sdt_mgr_sdt_item_add soft sdt table Item Invalid! \n"); + ZXIC_COMM_ASSERT(0); + return DPP_RC_TABLE_PARA_INVALID; + } + + /*添加SDT表项*/ + p_sdt_item = &(p_sdt_soft_tbl->sdt_array[slot][sdt_no]); + p_sdt_item->valid = DPP_SDT_VALID; + p_sdt_item->table_cfg[0] = sdt_hig32; /* hig32 */ + p_sdt_item->table_cfg[1] = sdt_low32; /* low32 */ + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, + "dpp_sdt_mgr_sdt_item_add 0x%08x 0x%08x \n", + p_sdt_item->table_cfg[0], + p_sdt_item->table_cfg[1]); + + return DPP_OK; +} + +/***********************************************************/ +/** 从软件缓存中读取SDT表条目 +* @param dev_id 设备号 +* @param sdt_no 业务表对应的sdt号 +* @param p_sdt_hig32 SDT属性高32bit +* @param p_sdt_low32 SDT属性低32bit +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 王春雷 @date 2015/07/13 +************************************************************/ +DPP_STATUS dpp_sdt_mgr_sdt_item_srh(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 *p_sdt_hig32, + ZXIC_UINT32 *p_sdt_low32) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 slot = 0; + DPP_SDT_SOFT_TABLE_T *p_sdt_soft_tbl = NULL; + DPP_SDT_ITEM_T *p_sdt_item = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + slot = DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, slot, 0, DPP_PCIE_SLOT_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_sdt_hig32); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_sdt_low32); + + p_sdt_soft_tbl = DPP_SDT_SOFT_TBL_GET(dev_id); + + if (NULL == p_sdt_soft_tbl) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "Error: dpp_sdt_mgr_sdt_item_srh Soft Table not Init! \n"); + ZXIC_COMM_ASSERT(0); + return DPP_RC_TABLE_SDT_MGR_INVALID; + } + + if (dev_id != p_sdt_soft_tbl->device_id) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "Error: dpp_sdt_mgr_sdt_item_srh Soft Table Item Invalid ! \n"); + ZXIC_COMM_ASSERT(0); + return DPP_RC_TABLE_PARA_INVALID; + } + + /* 获取SDT表项 */ + p_sdt_item = &p_sdt_soft_tbl->sdt_array[slot][sdt_no]; + + if (DPP_SDT_VALID == p_sdt_item->valid) { + *p_sdt_hig32 = p_sdt_item->table_cfg[0]; + *p_sdt_low32 = p_sdt_item->table_cfg[1]; + } else { + *p_sdt_hig32 = 0xFFFFFFFF; + *p_sdt_low32 = 0xFFFFFFFF; + } + + ZXIC_COMM_TRACE_DEV_DEBUG( + dev_id, + "dpp_sdt_mgr_sdt_item_srh is %s: sdt_no: 0x%08x sdt_hig32:0x%08x sdt_low32:0x%08x \n", + ((DPP_SDT_VALID == p_sdt_item->valid) ? ("success") : ("fail")), + sdt_no, *p_sdt_hig32, *p_sdt_low32); + + return rc; +} + +/***********************************************************/ +/** 从软件缓存中删除SDT表条目 +* @param dev_id 设备号 +* @param sdt_no 业务表对应的sdt号 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 王春雷 @date 2015/07/13 +************************************************************/ +DPP_STATUS dpp_sdt_mgr_sdt_item_del(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no) +{ + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 slot = 0; + DPP_SDT_SOFT_TABLE_T *p_sdt_soft_tbl = NULL; + DPP_SDT_ITEM_T *p_sdt_item = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + slot = DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, slot, 0, DPP_PCIE_SLOT_MAX - 1); + + p_sdt_soft_tbl = DPP_SDT_SOFT_TBL_GET(dev_id); + if (NULL != p_sdt_soft_tbl) { + if (dev_id != p_sdt_soft_tbl->device_id) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "Error: dpp_sdt_mgr_sdt_item_del Soft Table Item Invalid ! \n"); + ZXIC_COMM_ASSERT(0); + return DPP_RC_TABLE_PARA_INVALID; + } + + p_sdt_item = &p_sdt_soft_tbl->sdt_array[slot][sdt_no]; + p_sdt_item->valid = DPP_SDT_INVALID; + p_sdt_item->table_cfg[0] = 0; + p_sdt_item->table_cfg[1] = 0; + } + ZXIC_COMM_TRACE_DEV_DEBUG( + dev_id, "dpp_sdt_mgr_sdt_item_del sdt_no: 0x%08x \n", sdt_no); + return DPP_OK; +} + +#endif + +#if 1 +/********************************************************************* + * 函数名称:NpeSdtMgr_GetTblType + * + * 功能描述: + * + * 输入参数: + * 输出参数: + * 返 回 值: + * 全局变量: + * 注 释: + ============================================================ + * 修改记录: + * 修改日期 版本号 修改人 修改内容 + * 20120327 v1.0 石金锋 创建 + ============================================================ + * + *********************************************************************/ +DPP_TBL_TYPE_E dpp_sdt_mgr_get_tbl_type(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no) +{ + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 rtn = 0; + DPP_TBL_TYPE_E table_type = 0; + ZXIC_UINT32 table_cfg[DPP_SDT_CFG_LEN] = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + + rtn = dpp_sdt_mgr_sdt_item_srh(dev, sdt_no, &(table_cfg[0]), + &(table_cfg[1])); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "dpp_sdt_mgr_sdt_item_srh"); + + table_type = (DPP_TBL_TYPE_E)((table_cfg[0] >> 29) & 0x7); + + ZXIC_COMM_TRACE_DEV_DEBUG( + dev_id, + "dpp_sdt_mgr_get_tbl_type: dev_id: %d, sdt_no: %d, table_type: %d. \n", + dev_id, sdt_no, table_type); + + return table_type; +} + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/Kbuild.include new file mode 100644 index 000000000000..31709689b937 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/Kbuild.include @@ -0,0 +1,2 @@ +cur_dir := en_np/sdk/source/dev/module/table/se/ +src_files += $(addprefix $(cur_dir),$(notdir $(wildcard $(dinghai_root)/$(cur_dir)*.c))) diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/dpp_acl.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/dpp_acl.c new file mode 100644 index 000000000000..a2dce08c3057 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/dpp_acl.c @@ -0,0 +1,703 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_acl.c +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : 王春雷 +* 完成日期 : 2014/12/17 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#include "zxic_common.h" + +#include "dpp_dev.h" +#include "dpp_acl.h" +#include "dpp_etcam.h" +#include "dpp_se.h" +#include "dpp_se_reg.h" +#include "dpp_reg_api.h" +#include "dpp_reg_info.h" + +#define BLOCK_IDXBASE_BIT_OFF (9) +#define BLOCK_IDXBASE_BIT_MASK (0x7f) +#define ACL_AS_IDX_OFFSET_MAX (32 * 1024) +#define ACL_IMPLICIT_PRI (0) + +#define DPP_ACL_KEYSIZE_GET(key_mode) (2 * DPP_ETCAM_ENTRY_SIZE_GET(key_mode)) +#define DPP_ACL_ENTRY_WRMODE_GET(key_mode, entry_pos) \ + ((((1U << (8U >> (key_mode))) - 1) \ + << ((8U >> (key_mode)) * (entry_pos))) & \ + 0xFF) + +#define DPP_ACL_AS_RSLT_INFO_GET(buff_base, index, size) \ + (((ZXIC_UINT8 *)(buff_base) + (index) * (size))) + +#define MEM_OFF_NOT_NULL(type, member) \ + (ZXIC_COMM_PTR_TO_VAL(&(((type *)4)->member)) - \ + ZXIC_COMM_PTR_TO_VAL(((type *)4))) + +/*根据当前双链表的指针,找到本节点的指针*/ +#define GET_STRUCT_ENTRY_POINT(ptr, type, member) \ + ((type *)(ZXIC_COMM_PTR_TO_VAL(ptr) - MEM_OFF_NOT_NULL(type, member))) + +static DPP_ACL_CFG_EX_T *g_p_acl_ex_cfg[DPP_PCIE_SLOT_MAX] = { NULL }; + +/***********************************************************/ +/** 根据业务表总条目数计数需要的block数 +* @param entry_num +* @param key_mode +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/12/23 +************************************************************/ +ZXIC_UINT32 dpp_acl_entrynum_to_blocknum(ZXIC_UINT32 entry_num, + ZXIC_UINT32 key_mode) +{ + ZXIC_UINT32 value = 0; + + ZXIC_COMM_CHECK_INDEX_MUL_OVERFLOW_NO_ASSERT( + DPP_ETCAM_RAM_DEPTH, ((ZXIC_UINT32)1 << key_mode)); + value = entry_num % + (DPP_ETCAM_RAM_DEPTH * ((ZXIC_UINT32)1 << key_mode)); + + if (value == 0) { + return (entry_num / + (DPP_ETCAM_RAM_DEPTH * ((ZXIC_UINT32)1 << key_mode))); + } else { + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT( + entry_num / (DPP_ETCAM_RAM_DEPTH * + ((ZXIC_UINT32)1 << key_mode)), + 1); + return (entry_num / (DPP_ETCAM_RAM_DEPTH * + ((ZXIC_UINT32)1 << key_mode)) + + 1); + } +} + +/***********************************************************/ +/** 优先级比较函数,用于显示优先级模式比较优先级 +* @param p_new_key +* @param p_old_key +* @param key_len +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/12/24 +************************************************************/ +ZXIC_SINT32 dpp_acl_entry_pri_cmp(void *p_new_key, void *p_old_key, + ZXIC_UINT32 key_len) +{ + if ((*(ZXIC_UINT32 *)p_new_key) > (*(ZXIC_UINT32 *)p_old_key)) { + return 1; + } else if ((*(ZXIC_UINT32 *)p_new_key) < (*(ZXIC_UINT32 *)p_old_key)) { + return -1; + } + + return 0; +} + +/***********************************************************/ +/** 本地缓存acl键值比较,用于rb树回调 +* @param p_new_key +* @param p_old_key +* @param key_len +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/12/23 +************************************************************/ +ZXIC_SINT32 dpp_acl_key_cmp(void *p_new_key, void *p_old_key, + ZXIC_UINT32 key_len) +{ + ZXIC_COMM_CHECK_POINT(p_new_key); + ZXIC_COMM_CHECK_POINT(p_old_key); + /* 相同data+mask,但优先级不同,则为两个不同条目,因此软件需要比较pri+data+mask, handle不参与比较 */ + ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW_NO_ASSERT( + key_len, (ZXIC_UINT32)ZXIC_SIZEOF(ZXIC_UINT32)); + return ZXIC_COMM_MEMCMP(&(((DPP_ACL_KEY_INFO_T *)p_new_key)->pri), + &(((DPP_ACL_KEY_INFO_T *)p_old_key)->pri), + key_len - ZXIC_SIZEOF(ZXIC_UINT32)); +} + +/***********************************************************/ +/** 根据业务条目索引计算写etcam硬件的地址 +* @param p_tbl_cfg +* @param handle +* @param p_block_idx +* @param p_addr +* @param p_wr_mask +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/12/23 +************************************************************/ +DPP_STATUS dpp_acl_hdw_addr_get(DPP_ACL_TBL_CFG_T *p_tbl_cfg, + ZXIC_UINT32 handle, ZXIC_UINT32 *p_block_idx, + ZXIC_UINT32 *p_addr, ZXIC_UINT32 *p_wr_mask) +{ + ZXIC_UINT32 block_entry_num = 0; + ZXIC_UINT32 entry_pos = 0; + ZXIC_COMM_CHECK_POINT(p_tbl_cfg); + ZXIC_COMM_CHECK_POINT(p_block_idx); + ZXIC_COMM_CHECK_POINT(p_addr); + ZXIC_COMM_CHECK_POINT(p_wr_mask); + + block_entry_num = DPP_ACL_ENTRY_MAX_GET(p_tbl_cfg->key_mode, 1); + *p_block_idx = p_tbl_cfg->block_array[handle / block_entry_num]; + *p_addr = (handle % block_entry_num) / (1U << p_tbl_cfg->key_mode); + entry_pos = (handle % block_entry_num) % (1U << p_tbl_cfg->key_mode); + ZXIC_COMM_CHECK_INDEX_MUL_OVERFLOW_NO_ASSERT(entry_pos, 8); + *p_wr_mask = DPP_ACL_ENTRY_WRMODE_GET(p_tbl_cfg->key_mode, entry_pos); + return DPP_OK; +} + +/***********************************************************/ +/** +* @param p_tbl_cfg +* @param handle +* @param p_block_idx +* @param p_addr +* @param p_wr_mask +* +* @return +* @remark 无 +* @see +* @author XXX @date 2019/08/03 +************************************************************/ +DPP_STATUS dpp_acl_hdw_addr_get_ex(DPP_ACL_TBL_CFG_T *p_tbl_cfg, + ZXIC_UINT32 handle, ZXIC_UINT32 *p_block_idx, + ZXIC_UINT32 *p_addr, ZXIC_UINT32 *p_wr_mask) +{ + ZXIC_UINT32 block_entry_num = 0; + ZXIC_UINT32 entry_pos = 0; + DPP_STATUS rc = DPP_OK; + + if (0 == (p_tbl_cfg && p_block_idx && p_addr && p_wr_mask)) { + ZXIC_COMM_TRACE_ERROR( + "\n ICM %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", + __FILE__, __LINE__, __FUNCTION__); + rc = ZXIC_PAR_CHK_POINT_NULL; + return rc; + } + block_entry_num = DPP_ACL_ENTRY_MAX_GET(p_tbl_cfg->key_mode, 1); + *p_block_idx = p_tbl_cfg->block_array[handle / block_entry_num]; + *p_addr = (handle % block_entry_num) / (1U << p_tbl_cfg->key_mode); + entry_pos = (handle % block_entry_num) % (1U << p_tbl_cfg->key_mode); + ZXIC_COMM_CHECK_INDEX_MUL_OVERFLOW_NO_ASSERT(entry_pos, 8); + *p_wr_mask = DPP_ACL_ENTRY_WRMODE_GET(p_tbl_cfg->key_mode, entry_pos); + return rc; +} + +/***********************************************************/ +/** acl重排写硬件回调函数 +* @param old_index +* @param new_index +* @param p_cfg +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/12/23 +************************************************************/ +DPP_STATUS dpp_acl_entry_swap(ZXIC_UINT32 old_index, ZXIC_UINT32 new_index, + ZXIC_VOID *p_cfg) +{ + DPP_STATUS rc = 0; + ZXIC_UINT32 block_idx = 0; + ZXIC_UINT32 ram_addr = 0; + ZXIC_UINT32 wr_mask = 0; + ZXIC_UINT8 *p_old_rslt_temp = NULL; + ZXIC_UINT8 *p_new_rslt_temp = NULL; + ZXIC_UINT8 temp_data[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; + ZXIC_UINT8 temp_mask[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; + DPP_ACL_CFG_T *p_acl_cfg = NULL; + DPP_ACL_TBL_CFG_T *p_tbl_cfg = NULL; + DPP_ACL_KEY_INFO_T *p_acl_key = NULL; + DPP_ETCAM_ENTRY_T etcam_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_cfg); + + p_tbl_cfg = STRUCT_ENTRY_POINT(p_cfg, DPP_ACL_TBL_CFG_T, index_mng); + ZXIC_COMM_CHECK_INDEX(p_tbl_cfg->table_id, 0, + DPP_ACL_TBL_ID_NUM - + 1); /* modify coverity yinxh 2021.03.10*/ + p_acl_cfg = GET_STRUCT_ENTRY_POINT(p_tbl_cfg, DPP_ACL_CFG_T, + acl_tbls[p_tbl_cfg->table_id]); + + ZXIC_COMM_CHECK_INDEX_UPPER( + old_index, + p_tbl_cfg->entry_num); /* modify coverity yinxh 2021.03.10*/ + p_acl_key = p_tbl_cfg->acl_key_buff[old_index]; + + etcam_entry.p_data = temp_data; + etcam_entry.p_mask = temp_mask; + etcam_entry.mode = p_tbl_cfg->key_mode; + ZXIC_COMM_CHECK_DEV_INDEX( + p_acl_cfg->dev_id, p_tbl_cfg->key_mode, DPP_ACL_KEY_640b, + DPP_ACL_KEY_INVALID - 1); /* modify coverity yinxh 2021.03.10*/ + ZXIC_COMM_MEMCPY(etcam_entry.p_data, p_acl_key->key, + DPP_ETCAM_ENTRY_SIZE_GET(p_tbl_cfg->key_mode)); + ZXIC_COMM_MEMCPY(etcam_entry.p_mask, + p_acl_key->key + + DPP_ETCAM_ENTRY_SIZE_GET(p_tbl_cfg->key_mode), + DPP_ETCAM_ENTRY_SIZE_GET(p_tbl_cfg->key_mode)); + + /* update new as result */ + if (p_tbl_cfg->as_enable) { + /* eTcam result table as to eRam. */ + ZXIC_COMM_CHECK_DEV_INDEX(p_acl_cfg->dev_id, p_tbl_cfg->as_mode, + DPP_ACL_AS_MODE_16b, + DPP_ACL_AS_MODE_INVALID - 1); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT( + p_acl_cfg->dev_id, old_index, + (2U << (p_tbl_cfg->as_mode))); + p_old_rslt_temp = DPP_ACL_AS_RSLT_INFO_GET( + p_tbl_cfg->as_rslt_buff, old_index, + DPP_ACL_AS_RSLT_SIZE_GET_EX(p_tbl_cfg->as_mode)); + rc = p_acl_cfg->p_as_rslt_write_fun( + p_acl_cfg->dev_id, + p_tbl_cfg->as_eRam_base + p_tbl_cfg->as_idx_base, + new_index, p_tbl_cfg->as_mode, p_old_rslt_temp); + ZXIC_COMM_CHECK_RC(rc, "acl_as_rslt_write_fun"); + + /* update result buffer */ + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT( + p_acl_cfg->dev_id, new_index, 2U << p_tbl_cfg->as_mode); + p_new_rslt_temp = DPP_ACL_AS_RSLT_INFO_GET( + p_tbl_cfg->as_rslt_buff, new_index, + DPP_ACL_AS_RSLT_SIZE_GET_EX(p_tbl_cfg->as_mode)); + ZXIC_COMM_MEMCPY(p_new_rslt_temp, p_old_rslt_temp, + (ZXIC_UINT32)DPP_ACL_AS_RSLT_SIZE_GET_EX( + p_tbl_cfg->as_mode)); + } else if (p_tbl_cfg->is_as_ddr) { + /* eTcam result table as to DDR. */ + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT( + p_acl_cfg->dev_id, old_index, 2U << p_tbl_cfg->as_mode); + p_old_rslt_temp = DPP_ACL_AS_RSLT_INFO_GET( + p_tbl_cfg->as_rslt_buff, old_index, + DPP_ACL_AS_RSLT_SIZE_GET_EX(p_tbl_cfg->as_mode)); + + rc = p_tbl_cfg->p_as_ddr_wr_fun( + p_acl_cfg->dev_id, p_tbl_cfg->tbl_type, + p_tbl_cfg->table_id, p_tbl_cfg->dir_tbl_share_type, + p_tbl_cfg->ddr_baddr, p_tbl_cfg->ddr_ecc_en, + p_tbl_cfg->idx_offset, p_tbl_cfg->as_mode, + p_old_rslt_temp); + ZXIC_COMM_CHECK_RC(rc, "acl_as_ddr_rslt_writ_fun"); + + /* update result buffer */ + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT( + p_acl_cfg->dev_id, new_index, 2U << p_tbl_cfg->as_mode); + p_new_rslt_temp = DPP_ACL_AS_RSLT_INFO_GET( + p_tbl_cfg->as_rslt_buff, new_index, + DPP_ACL_AS_RSLT_SIZE_GET_EX(p_tbl_cfg->as_mode)); + ZXIC_COMM_MEMCPY(p_new_rslt_temp, p_old_rslt_temp, + (ZXIC_UINT32)DPP_ACL_AS_RSLT_SIZE_GET_EX( + p_tbl_cfg->as_mode)); + } + + /* add new entry */ + rc = dpp_acl_hdw_addr_get_ex(p_tbl_cfg, new_index, &block_idx, + &ram_addr, &wr_mask); + ZXIC_COMM_CHECK_RC(rc, "dpp_acl_hdw_addr_get_ex"); + +#ifdef DPP_FLOW_HW_INIT + rc = dpp_etcam_entry_add(p_acl_cfg->dev, ram_addr, block_idx, wr_mask, + DPP_ETCAM_OPR_DM, &etcam_entry); + ZXIC_COMM_CHECK_RC(rc, "dpp_etcam_entry_add"); +#endif + + /* delete old entry */ + rc = dpp_acl_hdw_addr_get_ex(p_tbl_cfg, old_index, &block_idx, + &ram_addr, &wr_mask); + ZXIC_COMM_CHECK_RC(rc, "dpp_acl_hdw_addr_get_ex"); + +#ifdef DPP_FLOW_HW_INIT + rc = dpp_etcam_entry_del(p_acl_cfg->dev, ram_addr, block_idx, wr_mask); + ZXIC_COMM_CHECK_RC(rc, "dpp_etcam_entry_del"); +#endif + + p_acl_key->handle = new_index; + p_tbl_cfg->acl_key_buff[new_index] = p_acl_key; + + return DPP_OK; +} + +/***********************************************************/ +/** 获取ACL全局配置 +* @param p_acl_cfg ACL公共管理数据结构指针 +* +* @return DPP_OK-成功,DPP_ERR-失败 +************************************************************/ +DPP_STATUS dpp_acl_cfg_get(DPP_DEV_T *dev, DPP_ACL_CFG_EX_T **p_acl_cfg) +{ + ZXIC_UINT32 slot = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_acl_cfg); + + slot = DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_INDEX(slot, 0, DPP_PCIE_SLOT_MAX - 1); + + if (NULL == g_p_acl_ex_cfg[slot]) { + ZXIC_COMM_TRACE_DEBUG( + "dpp_acl_cfg_get[%d] fail, etcam_is not init!\n", slot); + return DPP_ACL_RC_ETCAMID_NOT_INIT; + } + + *p_acl_cfg = g_p_acl_ex_cfg[slot]; + + return DPP_OK; +} + +/***********************************************************/ +/** 设置ACL全局配置 +* @param p_acl_cfg ACL公共管理数据结构指针 +* +* @return +************************************************************/ +ZXIC_VOID dpp_acl_cfg_set(DPP_DEV_T *dev, DPP_ACL_CFG_EX_T *p_acl_cfg) +{ + ZXIC_UINT32 slot = 0; + + ZXIC_COMM_CHECK_POINT_RETURN_NONE(dev); + + slot = DEV_PCIE_SLOT(dev); + if (slot < DPP_PCIE_SLOT_MAX) { + g_p_acl_ex_cfg[slot] = p_acl_cfg; + } + + return; +} + +#if ZXIC_REAL("init_ex MUL_PRI") + +/***********************************************************/ +/** +* @param p_acl_cfg +* @param p_client +* @param flags +* @param p_as_wrt_fun +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wcl @date 2015/12/29 +************************************************************/ +DPP_STATUS dpp_acl_cfg_init_ex(DPP_DEV_T *dev, DPP_ACL_CFG_EX_T *p_acl_cfg, + ZXIC_VOID *p_client, ZXIC_UINT32 flags, + ACL_AS_RSLT_WRT_FUNCTION p_as_wrt_fun) +{ + DPP_STATUS rc = 0; + ZXIC_UINT32 slot = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_acl_cfg); + + slot = DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_INDEX(slot, 0, DPP_PCIE_SLOT_MAX - 1); + + ZXIC_COMM_MEMSET(p_acl_cfg, 0, ZXIC_SIZEOF(DPP_ACL_CFG_EX_T)); + g_p_acl_ex_cfg[slot] = p_acl_cfg; + + p_acl_cfg->p_client = p_client; + // p_acl_cfg->dev_id = (ZXIC_UINT32)(ZXIC_COMM_PTR_TO_VAL(p_acl_cfg->p_client) & 0xFFFFFFFF); + p_acl_cfg->dev_id = DEV_ID(dev); + p_acl_cfg->dev = dev; + p_acl_cfg->flags = flags; + + if (flags & DPP_ACL_FLAG_ETCAM0_EN) { + p_acl_cfg->acl_etcamids.is_valid = 1; + + /* if (flags & DPP_ACL_FLAG_ETCAM0_AS)*/ + /* {*/ + /* p_acl_cfg->acl_etcamids[0].as_enable = 1;*/ + p_acl_cfg->acl_etcamids.as_eRam_base = 0; + /* }*/ + + rc = zxic_comm_double_link_init( + DPP_ACL_TBL_ID_NUM, + &(p_acl_cfg->acl_etcamids.tbl_list)); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_double_link_init"); + } + + if (p_as_wrt_fun == NULL) { + // p_acl_cfg->p_as_rslt_write_fun = dpp_acl_as_rslt_write; + // p_acl_cfg->p_as_rslt_read_fun = dpp_acl_as_rslt_read; + } else { + p_acl_cfg->p_as_rslt_write_fun = p_as_wrt_fun; + } + + return DPP_OK; +} + +/***********************************************************/ +/** acl业务表初始化,注意分配给一个table的多个block_idx + 必须按从小到大的顺序给定。支持多个优先级模式,暂不对外开放。 +* @param p_acl_cfg +* @param table_id +* @param as_enable +* @param entry_num +* @param pri_mode +* @param key_mode +* @param as_mode +* @param block_num +* @param p_block_idx +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/12/23 +************************************************************/ +DPP_STATUS dpp_acl_tbl_init_ex(DPP_ACL_CFG_EX_T *p_acl_cfg, + ZXIC_UINT32 table_id, ZXIC_UINT32 as_enable, + ZXIC_UINT32 entry_num, + DPP_ACL_PRI_MODE_E pri_mode, + ZXIC_UINT32 key_mode, DPP_ACL_AS_MODE_E as_mode, + ZXIC_UINT32 as_baddr, ZXIC_UINT32 block_num, + ZXIC_UINT32 *p_block_idx) +{ + DPP_STATUS rc = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 acl_key_buff_size = 0; + ZXIC_UINT32 slot = 0; + /* ZXIC_UINT32 as_idx_base = 0;*/ + + ZXIC_COMM_CHECK_POINT(p_acl_cfg); + ZXIC_COMM_CHECK_INDEX(table_id, DPP_ACL_TBL_ID_MIN, DPP_ACL_TBL_ID_MAX); + ZXIC_COMM_CHECK_INDEX(as_enable, 0, 1); + ZXIC_COMM_CHECK_INDEX(pri_mode, DPP_ACL_PRI_EXPLICIT, + DPP_ACL_PRI_SPECIFY); + ZXIC_COMM_CHECK_INDEX(key_mode, DPP_ACL_KEY_640b, DPP_ACL_KEY_80b); + ZXIC_COMM_CHECK_INDEX(as_mode, DPP_ACL_AS_MODE_16b, + DPP_ACL_AS_MODE_128b); + ZXIC_COMM_CHECK_INDEX(block_num, 0, DPP_ETCAM_BLOCK_NUM); + ZXIC_COMM_CHECK_POINT(p_block_idx); + ZXIC_COMM_CHECK_POINT(p_acl_cfg->dev); + + slot = p_acl_cfg->dev->pcie_channel.slot; + ZXIC_COMM_CHECK_INDEX(slot, 0, DPP_PCIE_SLOT_MAX - 1); + g_p_acl_ex_cfg[slot] = p_acl_cfg; + + if (p_acl_cfg->acl_tbls[table_id].is_used) { + ZXIC_COMM_TRACE_ERROR("table_id[ %d ] is already used!\n", + table_id); + ZXIC_COMM_ASSERT(0); + return DPP_ACL_RC_INVALID_TBLID; + } + + if (!p_acl_cfg->acl_etcamids.is_valid) { + ZXIC_COMM_TRACE_ERROR("etcam is not init!\n"); + ZXIC_COMM_ASSERT(0); + return DPP_ACL_RC_ETCAMID_NOT_INIT; + } + + if (dpp_acl_entrynum_to_blocknum(entry_num, key_mode) > block_num) { + ZXIC_COMM_TRACE_ERROR( + "key_mode[ %d ], the etcam block_num[ %d ] is not enough for entry_num[ 0x%x ].\n", + key_mode, block_num, entry_num); + ZXIC_COMM_ASSERT(0); + return DPP_ACL_RC_INVALID_BLOCKNUM; + } else if (dpp_acl_entrynum_to_blocknum(entry_num, key_mode) < + block_num) { + ZXIC_COMM_TRACE_DEBUG( + "key_mode[ %d ], the etcam block_num[ %d ] is more than entry_num[ 0x%x ], better to reduce block_num in order to match with entry_num.\n", + key_mode, block_num, entry_num); + } else { + ZXIC_COMM_TRACE_DEBUG( + "key_mode[ %d ], the etcam block_num[ %d ] is match with entry_num[ 0x%x ].\n", + key_mode, block_num, entry_num); + } + + p_acl_cfg->acl_tbls[table_id].as_enable = as_enable; + + /* + if ((!p_acl_cfg->acl_etcamids[etcam_id].as_enable && as_enable) || + (p_acl_cfg->acl_etcamids[etcam_id].as_enable && !as_enable)) { + ZXIC_COMM_TRACE_ERROR( "tbl's as_enable is not according to ETCAM_id's.\n"); + ZXIC_COMM_ASSERT(0); + return DPP_ACL_RC_INVALID_PARA; + } + */ + + if (as_enable) { + p_acl_cfg->acl_tbls[table_id].as_idx_base = as_baddr; + p_acl_cfg->acl_tbls[table_id].as_rslt_buff = ZXIC_COMM_MALLOC( + entry_num * DPP_ACL_AS_RSLT_SIZE_GET_EX(as_mode)); + ZXIC_COMM_CHECK_POINT( + p_acl_cfg->acl_tbls[table_id].as_rslt_buff); + } + + if ((pri_mode == DPP_ACL_PRI_EXPLICIT) || + (pri_mode == DPP_ACL_PRI_IMPLICIT)) { + rc = (DPP_STATUS)zxic_comm_indexfill_init( + &(p_acl_cfg->acl_tbls[table_id].index_mng), entry_num, + dpp_acl_entry_pri_cmp, dpp_acl_entry_swap, + ZXIC_SIZEOF(ZXIC_UINT32)); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_indexfill_init"); + + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT( + p_acl_cfg->dev_id, entry_num, + (ZXIC_UINT32)ZXIC_SIZEOF(DPP_ACL_KEY_INFO_T *)); + acl_key_buff_size = + (entry_num * ZXIC_SIZEOF(DPP_ACL_KEY_INFO_T *)) & + 0xffffffff; + p_acl_cfg->acl_tbls[table_id].acl_key_buff = + (DPP_ACL_KEY_INFO_T **)ZXIC_COMM_MALLOC( + acl_key_buff_size); + ZXIC_COMM_CHECK_POINT( + p_acl_cfg->acl_tbls[table_id].acl_key_buff); + } + + rc = (DPP_STATUS)zxic_comm_rb_init( + &(p_acl_cfg->acl_tbls[table_id].acl_rb), 0, + ZXIC_SIZEOF(DPP_ACL_KEY_INFO_T) + DPP_ACL_KEYSIZE_GET(key_mode), + dpp_acl_key_cmp); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_rb_init"); + + p_acl_cfg->acl_tbls[table_id].table_id = table_id; + p_acl_cfg->acl_tbls[table_id].pri_mode = pri_mode; + p_acl_cfg->acl_tbls[table_id].key_mode = key_mode; + p_acl_cfg->acl_tbls[table_id].entry_num = entry_num; + p_acl_cfg->acl_tbls[table_id].as_mode = as_mode; + p_acl_cfg->acl_tbls[table_id].is_used = 1; + + // ZXIC_COMM_TRACE_ERROR("p_acl_cfg->acl_tbls[%u].table_id [%d]\n", table_id, p_acl_cfg->acl_tbls[table_id].table_id); + // ZXIC_COMM_TRACE_ERROR("p_acl_cfg->acl_tbls[%u].pri_mode [%d]\n", table_id, p_acl_cfg->acl_tbls[table_id].pri_mode); + // ZXIC_COMM_TRACE_ERROR("p_acl_cfg->acl_tbls[%u].key_mode [%d]\n", table_id, p_acl_cfg->acl_tbls[table_id].key_mode); + // ZXIC_COMM_TRACE_ERROR("p_acl_cfg->acl_tbls[%u].entry_num [%d]\n", table_id, p_acl_cfg->acl_tbls[table_id].entry_num); + // ZXIC_COMM_TRACE_ERROR("p_acl_cfg->acl_tbls[%u].as_mode [%d]\n", table_id, p_acl_cfg->acl_tbls[table_id].as_mode); + + INIT_D_NODE(&(p_acl_cfg->acl_tbls[table_id].entry_dn), + &(p_acl_cfg->acl_tbls[table_id])); + rc = (DPP_STATUS)zxic_comm_double_link_insert_last( + &(p_acl_cfg->acl_tbls[table_id].entry_dn), + &(p_acl_cfg->acl_etcamids.tbl_list)); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_double_link_insert_last"); + + p_acl_cfg->acl_tbls[table_id].block_num = block_num; + p_acl_cfg->acl_tbls[table_id].block_array = + ZXIC_COMM_MALLOC(block_num * ZXIC_SIZEOF(ZXIC_UINT32)); + ZXIC_COMM_CHECK_POINT(p_acl_cfg->acl_tbls[table_id].block_array); + + for (i = 0; i < block_num; i++) { + if (p_acl_cfg->acl_blocks[p_block_idx[i]].is_used) { + ZXIC_COMM_TRACE_ERROR( + "the block[ %d ] is already used by table[ %d ]!\n", + p_block_idx[i], + p_acl_cfg->acl_blocks[p_block_idx[i]].tbl_id); + ZXIC_COMM_ASSERT(0); + return DPP_ACL_RC_INVALID_BLOCKID; + } + + p_acl_cfg->acl_tbls[table_id].block_array[i] = p_block_idx[i]; + p_acl_cfg->acl_blocks[p_block_idx[i]].is_used = 1; + p_acl_cfg->acl_blocks[p_block_idx[i]].tbl_id = table_id; + p_acl_cfg->acl_blocks[p_block_idx[i]].idx_base = + ((DPP_ACL_ENTRY_MAX_GET(key_mode, i)) >> + BLOCK_IDXBASE_BIT_OFF) & + BLOCK_IDXBASE_BIT_MASK; + +#ifdef DPP_FLOW_HW_INIT + /* cfg block table_id */ + rc = dpp_etcam_block_tbl_id_set(p_acl_cfg->dev, p_block_idx[i], + table_id); + ZXIC_COMM_CHECK_RC(rc, "dpp_etcam_block_tbl_id_set"); + + /* cfg block base_addr */ + rc = dpp_etcam_block_baddr_set( + p_acl_cfg->dev, p_block_idx[i], + p_acl_cfg->acl_blocks[p_block_idx[i]].idx_base); + ZXIC_COMM_CHECK_RC(rc, "dpp_etcam_block_baddr_set"); +#endif + } + + return DPP_OK; +} + +DPP_STATUS dpp_acl_res_destroy(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 table_id = 0; + ZXIC_UINT32 as_enable = 0; + ZXIC_UINT32 pri_mode = 0; + ZXIC_UINT32 slot = 0; + DPP_DEV_T dev = { 0 }; + DPP_ACL_CFG_EX_T *p_acl_cfg = NULL; + DPP_ACL_TBL_CFG_T *p_tbl_cfg = NULL; + + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + + for (slot = 0; slot < DPP_PCIE_SLOT_MAX; slot++) { + dev.device_id = dev_id; + dev.pcie_channel.slot = slot; + rc = dpp_acl_cfg_get(&dev, &p_acl_cfg); //获取ACL表资源配置 + if ((rc != DPP_OK) || (p_acl_cfg == NULL)) { + continue; + } + + if (!p_acl_cfg->acl_etcamids.is_valid) { + ZXIC_COMM_TRACE_ERROR("etcam is not init!\n"); + return DPP_ACL_RC_ETCAMID_NOT_INIT; + } + + for (table_id = DPP_ACL_TBL_ID_MIN; + table_id <= DPP_ACL_TBL_ID_MAX; table_id++) { + p_tbl_cfg = p_acl_cfg->acl_tbls + table_id; + if (!p_tbl_cfg->is_used) { + ZXIC_COMM_TRACE_DEBUG( + "table_id[ %d ] is not used!\n", + table_id); + continue; + } + + rc = (DPP_STATUS)zxic_comm_rb_destroy( + &(p_tbl_cfg->acl_rb)); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_rb_destroy"); + + rc = zxic_comm_indexfill_destroy( + &(p_tbl_cfg->index_mng)); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_indexfill_destroy"); + + as_enable = p_tbl_cfg->as_enable; + if (as_enable) { + if (p_tbl_cfg->as_rslt_buff) { + ZXIC_COMM_FREE(p_tbl_cfg->as_rslt_buff); + p_tbl_cfg->as_rslt_buff = NULL; + } + } + + pri_mode = p_tbl_cfg->pri_mode; + if ((pri_mode == DPP_ACL_PRI_EXPLICIT) || + (pri_mode == DPP_ACL_PRI_IMPLICIT)) { + if (p_tbl_cfg->acl_key_buff) { + ZXIC_COMM_FREE(p_tbl_cfg->acl_key_buff); + p_tbl_cfg->acl_key_buff = NULL; + } + } + + if (p_tbl_cfg->block_array) { + ZXIC_COMM_FREE(p_tbl_cfg->block_array); + p_tbl_cfg->block_array = NULL; + } + } + } + + return DPP_OK; +} + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/dpp_dtb_table.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/dpp_dtb_table.c new file mode 100644 index 000000000000..ff2976744bcb --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/dpp_dtb_table.c @@ -0,0 +1,8359 @@ +#include "zxic_private_top.h" +#include "zxic_common.h" +#include "dpp_dtb_table_api.h" +#include "dpp_dtb_table.h" +#include "dpp_se_api.h" +#include "dpp_se_cfg.h" +#include "dpp_hash_crc.h" +#include "dpp_hash.h" +#include "dpp_acl.h" +#include "dpp_etcam.h" +#include "dpp_se.h" +#include "dpp_dtb.h" +#include "dpp_dtb_cfg.h" +#include "dpp_dev.h" +#include "dpp_sdt.h" +#include "dpp_dtb.h" +#include "dpp_apt_se.h" +#include "dpp_hash.h" +#include "dpp_agent_channel.h" +#include "dpp_kernel_init.h" +#include "dpp_stat_cfg.h" + +extern ZXIC_UINT32 g_lpm_hw_dat_buf[LPM_HW_DAT_BUFF_SIZE_MAX]; +extern ZXIC_UINT32 g_lpm_hw_dat_offset; + +DPP_DTB_MIXED_TABLE_T *p_dtb_mixed_table_mgr; + +ZXIC_UINT32 g_dpp_dtb_int_enable = DISABLE; //默认中断不使能 +ZXIC_UINT32 g_dtb_srh_mode = 1; //默认硬件模式 0:软件查找 1:硬件查找 + +static ZXIC_UINT32 g_dtb_cmd_endian; //0:小端 1:大端 + +DPP_DTB_FIELD_T g_dtb_ddr_table_cmd_info[] = { + { "valid", 127, 1 }, + { "type_mode", 126, 3 }, + { "rw_len", 123, 2 }, + { "v46_flag", 121, 1 }, + { "lpm_wr_vld", 120, 1 }, + { "baddr", 119, 20 }, + { "ecc_en", 99, 1 }, + { "rw_addr", 29, 30 }, +}; + +DPP_DTB_FIELD_T g_dtb_eram_table_cmd_1_info[] = { + { "valid", 127, 1 }, + { "type_mode", 126, 3 }, + { "data_mode", 123, 2 }, + { "cpu_wr", 121, 1 }, + { "cpu_rd", 120, 1 }, + { "cpu_rd_mode", 119, 1 }, + { "addr", 113, 26 }, + { "data_h", 0, 1 }, +}; + +DPP_DTB_FIELD_T g_dtb_eram_table_cmd_64_info[] = { + { "valid", 127, 1 }, + { "type_mode", 126, 3 }, + { "data_mode", 123, 2 }, + { "cpu_wr", 121, 1 }, + { "cpu_rd", 120, 1 }, + { "cpu_rd_mode", 119, 1 }, + { "addr", 113, 26 }, + { "data_h", 63, 32 }, + { "data_l", 31, 32 }, +}; + +DPP_DTB_FIELD_T g_dtb_eram_table_cmd_128_info[] = { + { "valid", 127, 1 }, + { "type_mode", 126, 3 }, + { "data_mode", 123, 2 }, + { "cpu_wr", 121, 1 }, + { "cpu_rd", 120, 1 }, + { "cpu_rd_mode", 119, 1 }, + { "addr", 113, 26 }, +}; + +DPP_DTB_FIELD_T g_dtb_zcam_table_cmd_info[] = { + { "valid", 127, 1 }, + { "type_mode", 126, 3 }, + { "ram_reg_flag", 123, 1 }, + { "zgroup_id", 122, 2 }, + { "zblock_id", 120, 3 }, + { "zcell_id", 117, 2 }, + { "mask", 115, 4 }, + { "sram_addr", 111, 9 }, +}; + +DPP_DTB_FIELD_T g_dtb_etcam_table_cmd_info[] = { + { "valid", 127, 1 }, + { "type_mode", 126, 3 }, + { "block_sel", 123, 3 }, + { "init_en", 120, 1 }, + { "row_or_col_msk", 119, 1 }, + { "vben", 118, 1 }, + { "reg_tcam_flag", 117, 1 }, + { "uload", 116, 8 }, + { "rd_wr", 108, 1 }, + { "wr_mode", 107, 8 }, + { "data_or_mask", 99, 1 }, + { "addr", 98, 9 }, + { "vbit", 89, 8 }, +}; + +DPP_DTB_FIELD_T g_dtb_mc_hash_table_cmd_info[] = { + { "valid", 127, 1 }, + { "type_mode", 126, 3 }, + { "std_h", 63, 32 }, + { "std_l", 31, 32 }, +}; + +DPP_DTB_TABLE_T g_dpp_dtb_table_info[] = { + { + "ddr", + DTB_TABLE_DDR, + 8, + g_dtb_ddr_table_cmd_info, + }, + { + "eram 1 bit", + DTB_TABLE_ERAM_1, + 8, + g_dtb_eram_table_cmd_1_info, + }, + { + "eram 64 bit", + DTB_TABLE_ERAM_64, + 9, + g_dtb_eram_table_cmd_64_info, + }, + { + "eram 128 bit", + DTB_TABLE_ERAM_128, + 7, + g_dtb_eram_table_cmd_128_info, + }, + { + "zcam", + DTB_TABLE_ZCAM, + 8, + g_dtb_zcam_table_cmd_info, + }, + { + "etcam", + DTB_TABLE_ETCAM, + 13, + g_dtb_etcam_table_cmd_info, + }, + { "mc_hash", DTB_TABLE_MC_HASH, 4, g_dtb_mc_hash_table_cmd_info }, +}; + +DPP_DTB_FIELD_T g_dtb_eram_dump_cmd_info[] = { + { "valid", 127, 1 }, + { "up_type", 126, 2 }, + { "base_addr", 106, 19 }, + { "tb_depth", 83, 20 }, + { "tb_dst_addr_h", 63, 32 }, + { "tb_dst_addr_l", 31, 32 }, +}; + +DPP_DTB_FIELD_T g_dtb_ddr_dump_cmd_info[] = { + { "valid", 127, 1 }, + { "up_type", 126, 2 }, + { "base_addr", 117, 30 }, + { "tb_depth", 83, 20 }, + { "tb_dst_addr_h", 63, 32 }, + { "tb_dst_addr_l", 31, 32 }, + +}; + +DPP_DTB_FIELD_T g_dtb_zcam_dump_cmd_info[] = { + { "valid", 127, 1 }, + { "up_type", 126, 2 }, + { "zgroup_id", 124, 2 }, + { "zblock_id", 122, 3 }, + { "ram_reg_flag", 119, 1 }, + { "z_reg_cell_id", 118, 2 }, + { "sram_addr", 116, 9 }, + { "tb_depth", 97, 10 }, + { "tb_width", 65, 2 }, + { "tb_dst_addr_h", 63, 32 }, + { "tb_dst_addr_l", 31, 32 }, + +}; + +DPP_DTB_FIELD_T g_dtb_etcam_dump_cmd_info[] = { + { "valid", 127, 1 }, + { "up_type", 126, 2 }, + { "block_sel", 124, 3 }, + { "addr", 121, 9 }, + { "rd_mode", 112, 8 }, + { "data_or_mask", 104, 1 }, + { "tb_depth", 91, 10 }, + { "tb_width", 81, 2 }, + { "tb_dst_addr_h", 63, 32 }, + { "tb_dst_addr_l", 31, 32 }, + +}; + +DPP_DTB_TABLE_T g_dpp_dtb_dump_info[] = { + { + "eram", + DTB_DUMP_ERAM, + 6, + g_dtb_eram_dump_cmd_info, + }, + { + "ddr", + DTB_DUMP_DDR, + 6, + g_dtb_ddr_dump_cmd_info, + }, + { + "zcam", + DTB_DUMP_ZCAM, + 11, + g_dtb_zcam_dump_cmd_info, + }, + { + "etcam", + DTB_DUMP_ETCAM, + 10, + g_dtb_etcam_dump_cmd_info, + }, +}; + +/** dtb 中断配置 +* @param int_enable 中断使能 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_interrupt_status_set(ZXIC_UINT32 int_enable) +{ + g_dpp_dtb_int_enable = int_enable; + + return DPP_OK; +} + +/** dtb 中断获取 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +ZXIC_UINT32 dpp_dtb_interrupt_status_get(ZXIC_VOID) +{ + return g_dpp_dtb_int_enable; +} + +/** dtb cmd 大小端设置 +* @param int_enable 中断使能 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_cmd_endian_status_set(ZXIC_UINT32 endian) +{ + g_dtb_cmd_endian = endian; + + return DPP_OK; +} + +/** dtb cmd 大小端获取 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_cmd_endian_status_get(ZXIC_VOID) +{ + return g_dtb_cmd_endian; +} + +/***********************************************************/ +/** 获取 DTB 表属性信息 +* @param table_type DTB表类型 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/09/02 +************************************************************/ +DPP_DTB_TABLE_T *dpp_table_info_get(ZXIC_UINT32 table_type) +{ + ZXIC_COMM_CHECK_INDEX_RETURN_NULL(table_type, 0, + DTB_TABLE_ENUM_MAX - 1); + + return &g_dpp_dtb_table_info[table_type]; +} + +/***********************************************************/ +/** 获取 DTB dump表属性信息 +* @param up_type DTB表类型 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/09/02 +************************************************************/ +DPP_DTB_TABLE_T *dpp_dump_info_get(ZXIC_UINT32 up_type) +{ + ZXIC_COMM_CHECK_INDEX_RETURN_NULL(up_type, 0, DTB_DUMP_ENUM_MAX - 1); + + return &g_dpp_dtb_dump_info[up_type]; +} + +/*组装128bit数据格式接口*/ +/** dtb写下表128bit格式数据 +* @param dev_id 设备号 +* @param table_type dtb表类型 +* @param p_cmd_data 表格式命令数据 +* @param p_cmd_buff 表格式命令数据缓存 +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_write_table_cmd(ZXIC_UINT32 dev_id, + DPP_DTB_TABLE_INFO_E table_type, + ZXIC_VOID *p_cmd_data, ZXIC_VOID *p_cmd_buff) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 field_cnt = 0; + + DPP_DTB_TABLE_T *p_table_info; + DPP_DTB_FIELD_T *p_field_info = NULL; + ZXIC_UINT32 temp_data = 0; + + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, table_type, 0, + DTB_TABLE_ENUM_MAX - 1); + ZXIC_COMM_CHECK_POINT(p_cmd_data); + ZXIC_COMM_CHECK_POINT(p_cmd_buff); + + p_table_info = dpp_table_info_get(table_type); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_table_info); + p_field_info = p_table_info->p_fields; + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_field_info); + + /* 提取各字段数据,按各字段实际bit位宽进行拼装 */ + for (field_cnt = 0; field_cnt < p_table_info->field_num; field_cnt++) { + temp_data = *((ZXIC_UINT32 *)p_cmd_data + field_cnt) & + ZXIC_COMM_GET_BIT_MASK(ZXIC_UINT32, + p_field_info[field_cnt].len); + + rc = zxic_comm_write_bits_ex((ZXIC_UINT8 *)p_cmd_buff, + DTB_TABLE_CMD_SIZE_BIT, temp_data, + p_field_info[field_cnt].lsb_pos, + p_field_info[field_cnt].len); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "zxic_comm_write_bits"); + } + + return DPP_OK; +} + +/** dtb写dump表128bit格式数据 +* @param dev_id 设备号 +* @param dump_type dtb dump表类型 +* @param p_cmd_data dump表格式命令数据 +* @param p_cmd_buff dump表格式命令数据缓存 +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_write_dump_cmd(ZXIC_UINT32 dev_id, + DPP_DTB_DUMP_INFO_E dump_type, + ZXIC_VOID *p_cmd_data, ZXIC_VOID *p_cmd_buff) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 field_cnt = 0; + + DPP_DTB_TABLE_T *p_table_info; + DPP_DTB_FIELD_T *p_field_info = NULL; + ZXIC_UINT32 temp_data = 0; + + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dump_type, 0, DTB_DUMP_ENUM_MAX - 1); + ZXIC_COMM_CHECK_POINT(p_cmd_data); + ZXIC_COMM_CHECK_POINT(p_cmd_buff); + + p_table_info = dpp_dump_info_get(dump_type); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_table_info); + p_field_info = p_table_info->p_fields; + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_field_info); + + /* 提取各字段数据,按各字段实际bit位宽进行拼装 */ + for (field_cnt = 0; field_cnt < p_table_info->field_num; field_cnt++) { + temp_data = *((ZXIC_UINT32 *)p_cmd_data + field_cnt) & + ZXIC_COMM_GET_BIT_MASK(ZXIC_UINT32, + p_field_info[field_cnt].len); + + rc = zxic_comm_write_bits_ex((ZXIC_UINT8 *)p_cmd_buff, + DTB_TABLE_CMD_SIZE_BIT, temp_data, + p_field_info[field_cnt].lsb_pos, + p_field_info[field_cnt].len); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "zxic_comm_write_bits"); + } + + return DPP_OK; +} + +/** 将下表格式+数据写入缓存中 + * entry,data_in_cmd_flag,如果为1,直接拷贝cmd,不拷贝data +* @param p_data_buff 存放数据的buff头指针 +* @param addr_offset 当前数据要写入的位置(相对于buff头的偏移) +* @param entry 要写入缓存的数据指针 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30/ +************************************************************/ +DPP_STATUS dpp_dtb_data_write(ZXIC_UINT8 *p_data_buff, ZXIC_UINT32 addr_offset, + DPP_DTB_ENTRY_T *entry) +{ + ZXIC_UINT8 *p_cmd = p_data_buff + addr_offset; + ZXIC_UINT32 cmd_size = DTB_TABLE_CMD_SIZE_BIT / 8; + + ZXIC_UINT8 *p_data = p_cmd + cmd_size; + ZXIC_UINT32 data_size = entry->data_size; + + ZXIC_UINT8 *cmd = (ZXIC_UINT8 *)entry->cmd; + ZXIC_UINT8 *data = (ZXIC_UINT8 *)entry->data; + + ZXIC_COMM_CHECK_POINT(p_data_buff); + ZXIC_COMM_CHECK_POINT(entry); + + /*写入命令数据*/ + ZXIC_COMM_MEMCPY_S(p_cmd, cmd_size, cmd, cmd_size); + + /*写入数据*/ + if (!entry->data_in_cmd_flag) { + zxic_comm_swap(data, data_size); + ZXIC_COMM_MEMCPY_S(p_data, data_size, data, data_size); + } + + return DPP_OK; +} + +/** 下表数据写入命令寄存器 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param down_table_len 数据长度,单位:字节; +* @param p_down_table_buff 下表数据缓存 +* @param p_element_id 返回下表使用的元素id +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_write_down_table_data(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 down_table_len, + ZXIC_UINT8 *p_down_table_buff, + ZXIC_UINT32 *p_element_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dtb_interrupt_status = 0; + ZXIC_UINT32 dtb_down_check_times = 2; + ZXIC_UINT32 element_id = 0; + + ZXIC_COMM_CHECK_POINT_NO_ASSERT(dev); + + if ((!(dev->pcie_channel.dev_status)) || + (!dtb_table_function_switch_get())) { + ZXIC_COMM_PRINT("slot[%u] vport[0x%x] dev status off!\n", + dev->pcie_channel.slot, + dev->pcie_channel.vport); + return ZXIC_PAR_CHK_DEV_STATUS_OFF; + } + + dtb_interrupt_status = dpp_dtb_interrupt_status_get(); + + while (dtb_down_check_times) { + rc = dpp_dtb_tab_down_info_set( + dev, queue_id, dtb_interrupt_status, down_table_len / 4, + (ZXIC_UINT32 *)p_down_table_buff, &element_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_tab_down_info_set"); + + rc = dpp_dtb_tab_down_success_status_check(dev, queue_id, + element_id); + + if (rc != DPP_RC_DTB_OVER_TIME) { + break; + } + + dtb_down_check_times--; + + if (dtb_down_check_times > 0) { + ZXIC_COMM_PRINT( + "DTB DOWN TABLE OVERTIME, DOWN TABLE AGAIN----%d!\n", + dtb_down_check_times); + } + } + + *p_element_id = element_id; + + ZXIC_COMM_TRACE_INFO("down slot: %d, queue_id: %d, element id: %d\n", + DEV_PCIE_SLOT(dev), queue_id, *p_element_id); + + return DPP_OK; +} + +/** 计算eram 128bit为单位的index +* @param dev_id 设备号 +* @param eram_mode eram 位宽模式 +* @param index 以eram_mode为单位的index +* @param p_row_index 出参,行 +* @param p_col_index 出参,列 +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dtb_eram_index_cal(DPP_DEV_T *dev, ZXIC_UINT32 eram_mode, + ZXIC_UINT32 index, ZXIC_UINT32 *p_row_index, + ZXIC_UINT32 *p_col_index) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 row_index = 0; + ZXIC_UINT32 col_index = 0; + + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_row_index); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_col_index); + + switch (eram_mode) { + case ERAM128_TBL_128b: { + row_index = index; + break; + } + + case ERAM128_TBL_64b: { + row_index = (index >> 1); + col_index = index & 0x1; + break; + } + + case ERAM128_TBL_1b: { + row_index = (index >> 7); + col_index = index & 0x7F; + break; + } + } + + *p_row_index = row_index; + *p_col_index = col_index; + + return rc; +} + +#if ZXIC_REAL("DTB BASE INTERFACE") + +/** smmu0数据组装函数,输入格式字段和数据,输出一个entry +* @param dev_id 芯片id +* @param mode 位宽模式 +* @param addr 1bit为单位的索引 +* @param p_data 要写入数据 +* @param p_entry 组装好的条目(已分配好空间) +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30/ +************************************************************/ +DPP_STATUS dpp_dtb_smmu0_write_entry_data(DPP_DEV_T *dev, ZXIC_UINT32 mode, + ZXIC_UINT32 addr, ZXIC_UINT32 *p_data, + DPP_DTB_ENTRY_T *p_entry) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 i = 0; + DPP_DTB_ERAM_TABLE_FORM_T dtb_eram_form_info = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_entry); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), mode, ERAM128_OPR_128b, + ERAM128_OPR_32b); + + dtb_eram_form_info.valid = DTB_TABLE_VALID; + dtb_eram_form_info.type_mode = DTB_TABLE_MODE_ERAM; + dtb_eram_form_info.data_mode = mode; + dtb_eram_form_info.cpu_wr = 1; + dtb_eram_form_info.addr = addr; + dtb_eram_form_info.cpu_rd = 0; + dtb_eram_form_info.cpu_rd_mode = 0; + + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("dpp eram form info: \n"); + ZXIC_COMM_DBGCNT32_PRINT("valid", dtb_eram_form_info.valid); + ZXIC_COMM_DBGCNT32_PRINT("type_mode", + dtb_eram_form_info.type_mode); + ZXIC_COMM_DBGCNT32_PRINT("data_mode", + dtb_eram_form_info.data_mode); + ZXIC_COMM_DBGCNT32_PRINT("cpu_wr", dtb_eram_form_info.cpu_wr); + ZXIC_COMM_DBGCNT32_PRINT("addr", dtb_eram_form_info.addr); + ZXIC_COMM_DBGCNT32_PRINT("cpu_rd", dtb_eram_form_info.cpu_rd); + ZXIC_COMM_DBGCNT32_PRINT("cpu_rd_mode", + dtb_eram_form_info.cpu_rd_mode); + } + + /*清空p_entry中数据*/ + if (ERAM128_OPR_128b == mode) { + p_entry->data_in_cmd_flag = 0; + p_entry->data_size = 128 / 8; + + rc = dpp_dtb_write_table_cmd(DEV_ID(dev), DTB_TABLE_ERAM_128, + &dtb_eram_form_info, p_entry->cmd); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_write_table_cmd"); + + ZXIC_COMM_MEMCPY(p_entry->data, p_data, 128 / 8); + } else if (ERAM128_OPR_64b == mode) { + p_entry->data_in_cmd_flag = 1; + p_entry->data_size = 64 / 8; + dtb_eram_form_info.data_l = *(p_data + 1); + dtb_eram_form_info.data_h = *(p_data); + + rc = dpp_dtb_write_table_cmd(DEV_ID(dev), DTB_TABLE_ERAM_64, + &dtb_eram_form_info, p_entry->cmd); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_write_table_cmd"); + + } else if (ERAM128_OPR_1b == mode) { + p_entry->data_in_cmd_flag = 1; + p_entry->data_size = 1; + dtb_eram_form_info.data_h = *(p_data); + + rc = dpp_dtb_write_table_cmd(DEV_ID(dev), DTB_TABLE_ERAM_1, + &dtb_eram_form_info, p_entry->cmd); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_write_table_cmd"); + } + + //打印出cmd buff中内容128bit + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("table type: %d\n", DTB_TABLE_MODE_ERAM); + ZXIC_COMM_PRINT("cmd: "); + for (i = 0; i < 4; i++) { + ZXIC_COMM_PRINT( + "0x%08x ", + ZXIC_COMM_CONVERT32(*(( + ZXIC_UINT32 *)(p_entry->cmd + + 4 * i)))); //转换成大端显示 + } + + if (p_entry->data_in_cmd_flag == 0) { + if (p_entry->data) { + ZXIC_COMM_PRINT("\ndata:"); + for (i = 0; i < 4; i++) { + ZXIC_COMM_PRINT( + "0x%08x ", + *((ZXIC_UINT32 *)(p_entry->data + + 4 * i))); + } + ZXIC_COMM_PRINT("\n"); + } + } + } + + return DPP_OK; +} + +/** smmu1数据组装函数,输入格式字段和数据,输出一个entry +* @param dev_id 芯片id +* @param rw_len 位宽 0-128bit, 1-256bit, 2-384bit, 3-512bit +* @param v46_flag ipv4,ipv6flag +* @param lpm_wr_vld lpm表写标识 lpm表数据时为1 +* @param base_addr 基地址 以4K*128为单位 +* @param index 以位宽为单位的索引 +* @param ecc_en ecc使能 +* @param p_data 要写入数据 +* @param p_entry 组装好的条目(已分配好空间) data空间大小为512bit +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30/ +************************************************************/ +DPP_STATUS dpp_dtb_smmu1_write_entry_data( + ZXIC_UINT32 dev_id, ZXIC_UINT32 rw_len, ZXIC_UINT32 v46_flag, + ZXIC_UINT32 lpm_wr_vld, ZXIC_UINT32 base_addr, ZXIC_UINT32 index, + ZXIC_UINT32 ecc_en, ZXIC_UINT8 *p_data, DPP_DTB_ENTRY_T *p_entry) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + + DPP_DTB_DDR_TABLE_FORM_T dtb_ddr_form_info = { 0 }; /*DDR表格式*/ + + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX_UPPER(rw_len, SMMU1_DDR_WRT_512b); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_data); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_entry); + + /*DTB下表格式内容填充*/ + dtb_ddr_form_info.valid = DTB_TABLE_VALID; + dtb_ddr_form_info.type_mode = DTB_TABLE_MODE_DDR; + dtb_ddr_form_info.rw_len = rw_len; + dtb_ddr_form_info.v46_flag = v46_flag; + dtb_ddr_form_info.lpm_wr_vld = lpm_wr_vld; + dtb_ddr_form_info.baddr = base_addr; + dtb_ddr_form_info.ecc_en = ecc_en; + dtb_ddr_form_info.rw_addr = index; + + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("dtb_ddr_form_info:\n"); + ZXIC_COMM_DBGCNT32_PRINT("valid", dtb_ddr_form_info.valid); + ZXIC_COMM_DBGCNT32_PRINT("type_mode", + dtb_ddr_form_info.type_mode); + ZXIC_COMM_DBGCNT32_PRINT("rw_len", dtb_ddr_form_info.rw_len); + ZXIC_COMM_DBGCNT32_PRINT("v46_flag", + dtb_ddr_form_info.v46_flag); + ZXIC_COMM_DBGCNT32_PRINT("lpm_wr_vld", + dtb_ddr_form_info.lpm_wr_vld); + ZXIC_COMM_DBGCNT32_PRINT("baddr", dtb_ddr_form_info.baddr); + ZXIC_COMM_DBGCNT32_PRINT("ecc_en", dtb_ddr_form_info.ecc_en); + ZXIC_COMM_DBGCNT32_PRINT("rw_addr", dtb_ddr_form_info.rw_addr); + } + + p_entry->data_in_cmd_flag = 0; + p_entry->data_size = DTB_LEN_POS_SETP * (rw_len + 1); + + rc = dpp_dtb_write_table_cmd(dev_id, DTB_TABLE_DDR, &dtb_ddr_form_info, + p_entry->cmd); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_write_table_cmd"); + + ZXIC_COMM_MEMCPY(p_entry->data, p_data, + DTB_LEN_POS_SETP * (rw_len + 1)); + + //打印出cmd buff中内容128bit + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("table type: %d\n", DTB_TABLE_MODE_DDR); + ZXIC_COMM_PRINT("cmd: "); + for (i = 0; i < 4; i++) { + ZXIC_COMM_PRINT( + "0x%08x ", + ZXIC_COMM_CONVERT32(*(( + ZXIC_UINT32 *)(p_entry->cmd + + 4 * i)))); //转换成大端显示 + } + + if (p_entry->data) { + ZXIC_COMM_PRINT("\ndata:"); + for (j = 0; j < rw_len + 1; j++) { + for (i = 0; i < 4; i++) { + ZXIC_COMM_PRINT( + "0x%08x ", + *((ZXIC_UINT32 *)(p_entry->data + + 16 * j + + 4 * i))); + } + ZXIC_COMM_PRINT("\n"); + } + } + } + + return DPP_OK; +} + +/** zcam数据组装函数,输入格式字段和数据,输出一个entry +* @param dev_id 芯片id +* @param reg_sram_flag 读写ZCAM寄存器/sram标志位:1'b1:读写寄存器 1'b0:读写sram +* @param zgroup_id +* @param zblock_id +* @param zcell_id +* @param sram_addr 512bit为单位的地址 +* @param mask 写掩码:mask[3:0]分别对应CPU写数据的4个128-bit +* mask[0]对应[127:0],1'b0为不写该128-bit,1'b1为写; +* mask[1]对应[255:128]; +* mask[2]对应[383:256]; +* mask[3]对应[512:384] +* @param p_data 要写入数据 +* @param p_entry 组装好的条目(已分配好空间) data空间大小为512bit +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30/ +************************************************************/ +DPP_STATUS dpp_dtb_zcam_write_entry_data( + ZXIC_UINT32 dev_id, ZXIC_UINT32 reg_sram_flag, ZXIC_UINT32 zgroup_id, + ZXIC_UINT32 zblock_id, ZXIC_UINT32 zcell_id, ZXIC_UINT32 sram_addr, + ZXIC_UINT32 mask, ZXIC_UINT8 *p_data, DPP_DTB_ENTRY_T *p_entry) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + + DPP_DTB_ZCAM_TABLE_FORM_T dtb_zcam_form_info = { 0 }; /*ZCAM表格式*/ + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, zgroup_id, 0, SE_ZGRP_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, zblock_id, 0, ZBLK_NUM_PER_ZGRP - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, zcell_id, 0, SE_ZCELL_NUM - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_data); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_entry); + + dtb_zcam_form_info.valid = DTB_TABLE_VALID; + dtb_zcam_form_info.type_mode = DTB_TABLE_MODE_ZCAM; + dtb_zcam_form_info.ram_reg_flag = reg_sram_flag; + dtb_zcam_form_info.zgroup_id = zgroup_id; + dtb_zcam_form_info.zblock_id = zblock_id; + dtb_zcam_form_info.zcell_id = zcell_id; + dtb_zcam_form_info.mask = mask; + dtb_zcam_form_info.sram_addr = sram_addr & 0x1FF; + + p_entry->data_in_cmd_flag = 0; + p_entry->data_size = DTB_LEN_POS_SETP * (DTB_ZCAM_LEN_SIZE - 1); + + rc = dpp_dtb_write_table_cmd(dev_id, DTB_TABLE_ZCAM, + &dtb_zcam_form_info, p_entry->cmd); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_write_table_cmd"); + + ZXIC_COMM_MEMCPY(p_entry->data, p_data, + DTB_LEN_POS_SETP * (DTB_ZCAM_LEN_SIZE - 1)); + + //打印出cmd buff中内容128bit + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("table type: %d\n", DTB_TABLE_MODE_ZCAM); + ZXIC_COMM_PRINT("cmd: "); + for (i = 0; i < 4; i++) { + ZXIC_COMM_PRINT( + "0x%08x ", + ZXIC_COMM_CONVERT32(*(( + ZXIC_UINT32 *)(p_entry->cmd + + 4 * i)))); //转换成小端显示 + } + + if (p_entry->data) { + ZXIC_COMM_PRINT("\ndata:"); + for (j = 0; j < DTB_ZCAM_LEN_SIZE - 1; j++) { + for (i = 0; i < 4; i++) { + ZXIC_COMM_PRINT( + "0x%08x ", + *((ZXIC_UINT32 *)(p_entry->data + + 16 * j + + 4 * i))); + } + ZXIC_COMM_PRINT("\n"); + } + } + } + + return DPP_OK; +} + +/** etcam数据组装函数,输入格式字段和数据,输出一个entry +* @param dev_id 芯片id +* @param block_idx block索引 0 - 7 +* @param row_or_col_msk 1 write row mask reg 0:write col mask reg +* @param vben enable the valid bit addressed by addr +* @param reg_tcam_flag 1:配置内部row_col_mask寄存器 0:读写tcam +* @param flush 使能标识删除对应addr的表项条目,(80bit为单位,含义与wr_mode一一对应) +* @param rd_wr 读写标志 0写 1读 +* @param wr_mode 写入掩码,最高8bit,对应bit为1代表对应的80bit的数据 +* @param data_or_mask 数据或掩码标志 1:写x(data),0:写y(mask) +* @param ram_addr etcam地址(0-511) +* @param vbit valid bit input +* @param p_data 要写入数据 +* @param p_entry 组装好的条目(已分配好空间) data空间大小为640bit +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30/ +************************************************************/ +DPP_STATUS dpp_dtb_etcam_write_entry_data( + DPP_DEV_T *dev, ZXIC_UINT32 block_idx, ZXIC_UINT32 row_or_col_msk, + ZXIC_UINT32 vben, ZXIC_UINT32 reg_tcam_flag, ZXIC_UINT32 flush, + ZXIC_UINT32 rd_wr, ZXIC_UINT32 wr_mode, ZXIC_UINT32 data_or_mask, + ZXIC_UINT32 ram_addr, ZXIC_UINT32 vbit, ZXIC_UINT8 *p_data, + DPP_DTB_ENTRY_T *p_entry) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 offset = 0; + ZXIC_UINT8 *p_temp = NULL; + + ZXIC_UINT8 buff[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; + DPP_DTB_ETCAM_TABLE_FORM_T dtb_etcam_form_info = { 0 }; /*etcam表格式*/ + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), block_idx, 0, + DPP_ETCAM_BLOCK_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), row_or_col_msk, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), vben, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), reg_tcam_flag, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), flush, 0, DPP_ETCAM_WR_MASK_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), rd_wr, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), wr_mode, 0, + DPP_ETCAM_WR_MASK_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), data_or_mask, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), ram_addr, 0, + DPP_ETCAM_RAM_DEPTH - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), vbit, 0, 0xff); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_entry); + + /*data etcam 下表格式内容填充*/ + dtb_etcam_form_info.valid = DTB_TABLE_VALID; + dtb_etcam_form_info.type_mode = DTB_TABLE_MODE_ETCAM; + dtb_etcam_form_info.block_sel = block_idx; + dtb_etcam_form_info.init_en = 0; + dtb_etcam_form_info.row_or_col_msk = row_or_col_msk; + dtb_etcam_form_info.vben = vben; + dtb_etcam_form_info.reg_tcam_flag = reg_tcam_flag; + dtb_etcam_form_info.uload = flush; + dtb_etcam_form_info.rd_wr = rd_wr; //0:写;1读 + dtb_etcam_form_info.wr_mode = wr_mode; + dtb_etcam_form_info.data_or_mask = data_or_mask; + dtb_etcam_form_info.addr = ram_addr; + dtb_etcam_form_info.vbit = vbit; + + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("dpp etcam form info: \n"); + ZXIC_COMM_DBGCNT32_PRINT("valid", dtb_etcam_form_info.valid); + ZXIC_COMM_DBGCNT32_PRINT("type_mode", + dtb_etcam_form_info.type_mode); + ZXIC_COMM_DBGCNT32_PRINT("block_sel", + dtb_etcam_form_info.block_sel); + ZXIC_COMM_DBGCNT32_PRINT("init_en", + dtb_etcam_form_info.init_en); + ZXIC_COMM_DBGCNT32_PRINT("row_or_col_msk", + dtb_etcam_form_info.row_or_col_msk); + ZXIC_COMM_DBGCNT32_PRINT("vben", dtb_etcam_form_info.vben); + ZXIC_COMM_DBGCNT32_PRINT("reg_tcam_flag", + dtb_etcam_form_info.reg_tcam_flag); + ZXIC_COMM_DBGCNT32_PRINT("uload", dtb_etcam_form_info.uload); + ZXIC_COMM_DBGCNT32_PRINT("rd_wr", dtb_etcam_form_info.rd_wr); + ZXIC_COMM_DBGCNT32_PRINT("wr_mode", + dtb_etcam_form_info.wr_mode); + ZXIC_COMM_DBGCNT32_PRINT("data_or_mask", + dtb_etcam_form_info.data_or_mask); + ZXIC_COMM_DBGCNT32_PRINT("addr", dtb_etcam_form_info.addr); + ZXIC_COMM_DBGCNT32_PRINT("vbit", dtb_etcam_form_info.vbit); + } + + p_entry->data_in_cmd_flag = 0; + p_entry->data_size = DTB_LEN_POS_SETP * (DTB_ETCAM_LEN_SIZE - 1); + + rc = dpp_dtb_write_table_cmd(DEV_ID(dev), DTB_TABLE_ETCAM, + &dtb_etcam_form_info, p_entry->cmd); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_write_table_cmd"); + + p_temp = p_data; + + /*将数据写入ETCAM表项处理*/ + /* 160bit key: high 80bit in tcam_ram1, low 80bit in tcam_ram0, and so on. */ + for (i = 0; i < DPP_ETCAM_RAM_NUM; i++) { + offset = i * ((ZXIC_UINT32)DPP_ETCAM_WIDTH_MIN / 8); + + if ((wr_mode >> (DPP_ETCAM_RAM_NUM - 1 - i)) & 0x1) { + ZXIC_COMM_MEMCPY(buff + offset, p_temp, + DPP_ETCAM_WIDTH_MIN / 8); + p_temp += DPP_ETCAM_WIDTH_MIN / 8; + } + } + + zxic_comm_swap((ZXIC_UINT8 *)buff, + DTB_LEN_POS_SETP * (DTB_ETCAM_LEN_SIZE - 1)); + + ZXIC_COMM_MEMCPY(p_entry->data, buff, + DTB_LEN_POS_SETP * (DTB_ETCAM_LEN_SIZE - 1)); + + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("dpp_dtb_etcam_write_entry_data: \n"); + ZXIC_COMM_PRINT("wr_mode:0x%08x\n", wr_mode); + for (i = 0; i < 20; i++) { + ZXIC_COMM_PRINT("0x%08x ", + *((ZXIC_UINT32 *)(buff + 4 * i))); + } + ZXIC_COMM_PRINT("\n"); + } + + return DPP_OK; +} + +/** 写eram,输出一个entry +* @param dev_id 芯片id +* @param base_addr 基地址,以128bit为单位 +* @param index 条目索引,以mode为单位 +* @param wr_mode 数据位宽模式,支持0:128bit 1:64bit 2:1bit +* @param p_data 待写入的数据 +* @param p_entry 组装好的数据 +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30/ +************************************************************/ +DPP_STATUS dpp_dtb_se_smmu0_ind_write(DPP_DEV_T *dev, ZXIC_UINT32 base_addr, + ZXIC_UINT32 index, ZXIC_UINT32 wrt_mode, + ZXIC_UINT32 *p_data, + DPP_DTB_ENTRY_T *p_entry) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 temp_idx = 0; + ZXIC_UINT32 dtb_ind_addr = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_entry); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), wrt_mode, ERAM128_OPR_128b, + ERAM128_OPR_1b); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), base_addr, 0, + SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1); + + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("dpp_dtb_se_smmu0_ind_write: \n"); + ZXIC_COMM_PRINT("base addr: 0x%08x\n", base_addr); + ZXIC_COMM_PRINT("index: 0x%08x\n", index); + ZXIC_COMM_PRINT("write mode: %d 0-128bit 1-64bit 2-1bit\n", + wrt_mode); + } + + switch (wrt_mode) { + case ERAM128_OPR_128b: { + if ((0xFFFFFFFF - (base_addr)) < (index)) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "ICM %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", + __FILE__, __LINE__, base_addr, index, + __FUNCTION__); + + return ZXIC_PAR_CHK_INVALID_INDEX; + } + if (base_addr + index > SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "dpp_dtb_se_smmu0_ind_write : index out of range !\n"); + return DPP_ERR; + } + + temp_idx = index << 7; + + break; + } + + case ERAM128_OPR_64b: { + if ((base_addr + (index >> 1)) > + SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "dpp_dtb_se_smmu0_ind_write : index out of range !\n"); + return DPP_ERR; + } + + temp_idx = index << 6; + + break; + } + + case ERAM128_OPR_1b: { + if ((base_addr + (index >> 7)) > + SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "dpp_dtb_se_smmu0_ind_write : index out of range !\n"); + return DPP_ERR; + } + + temp_idx = index; + } + } + + if ((0xFFFFFFFF - (temp_idx)) < + ((base_addr << 7) & DPP_ERAM128_BADDR_MASK)) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "ICM %s:%d[Error:VALUE[val0=0x%x] INVALID] [val1=0x%x] ! FUNCTION :%s !\n", + __FILE__, __LINE__, temp_idx, + ((base_addr << 7) & DPP_ERAM128_BADDR_MASK), + __FUNCTION__); + return ZXIC_PAR_CHK_INVALID_INDEX; + } + + dtb_ind_addr = ((base_addr << 7) & DPP_ERAM128_BADDR_MASK) + temp_idx; + + if (dpp_dtb_prt_get()) { + ZXIC_COMM_DBGCNT32_PRINT(" dtb eram item 1bit addr", + dtb_ind_addr); + } + + rc = dpp_dtb_smmu0_write_entry_data(dev, wrt_mode, dtb_ind_addr, p_data, + p_entry); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_smmu0_write_entry_data"); + + return DPP_OK; +} + +/** dtb写smmu0中的数据,数据长度在16K范围内 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param smmu0_base_addr smmu0基地址,以128bit为单位 +* @param smmu0_wr_mode smmu0写模式,参考DPP_ERAM128_OPR_MODE_E,仅支持128bit、64bit、1bit模式 +* @param entry_num 下发的条目数 +* @param p_entry_arr 待下发表项内容结构体数组指针 +* @param element_id 返回下表使用的元素id +* @return +* @remark 无 +* @see +* @author cbb @date 2024/01/04 +************************************************************/ +DPP_STATUS dpp_dtb_smmu0_data_write_cycle( + DPP_DEV_T *dev, ZXIC_UINT32 queue_id, ZXIC_UINT32 smmu0_base_addr, + ZXIC_UINT32 smmu0_wr_mode, ZXIC_UINT32 entry_num, + DPP_DTB_ERAM_ENTRY_INFO_T *p_entry_arr, ZXIC_UINT32 *element_id) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 item_cnt = 0; + ZXIC_UINT32 addr_offset = 0; + ZXIC_UINT32 dtb_len = 0; + ZXIC_UINT32 index = 0; + + ZXIC_UINT32 *p_entry_data = NULL; + ZXIC_UINT8 *table_data_buff = NULL; + ZXIC_UINT32 entry_data_buff[4] = { 0 }; + ZXIC_UINT8 cmd_buff[DTB_TABLE_CMD_SIZE_BIT / 8] = { 0 }; + DPP_DTB_ENTRY_T dtb_one_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, DTB_QUEUE_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_LOWER(DEV_ID(dev), entry_num, 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_entry_arr); + + //分配下表数据缓存 16K + table_data_buff = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC( + DPP_DTB_TABLE_DATA_BUFF_SIZE * sizeof(ZXIC_UINT8)); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), table_data_buff); + ZXIC_COMM_MEMSET(table_data_buff, 0, + DPP_DTB_TABLE_DATA_BUFF_SIZE * sizeof(ZXIC_UINT8)); + + dtb_one_entry.cmd = cmd_buff; + dtb_one_entry.data = (ZXIC_UINT8 *)entry_data_buff; + + for (item_cnt = 0; item_cnt < entry_num; ++item_cnt) { + p_entry_data = (ZXIC_UINT32 *)p_entry_arr[item_cnt].p_data; + ZXIC_COMM_CHECK_POINT_MEMORY_FREE_NO_ASSERT(p_entry_data, + table_data_buff); + index = p_entry_arr[item_cnt].index; + + //将一个数据写入entry + rc = dpp_dtb_se_smmu0_ind_write(dev, smmu0_base_addr, index, + smmu0_wr_mode, p_entry_data, + &dtb_one_entry); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_se_smmu0_ind_write", table_data_buff); + + switch (smmu0_wr_mode) { + case ERAM128_OPR_128b: { + dtb_len += 2; + addr_offset = item_cnt * DTB_LEN_POS_SETP * 2; + break; + } + + case ERAM128_OPR_64b: { + dtb_len += 1; + addr_offset = item_cnt * DTB_LEN_POS_SETP; + break; + } + + case ERAM128_OPR_1b: { + dtb_len += 1; + addr_offset = item_cnt * DTB_LEN_POS_SETP; + break; + } + } + + /*将表格式和数据写入缓存buff中*/ + rc = dpp_dtb_data_write(table_data_buff, addr_offset, + &dtb_one_entry); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_data_write", table_data_buff); + ZXIC_COMM_MEMSET(cmd_buff, 0, DTB_TABLE_CMD_SIZE_BIT / 8); + ZXIC_COMM_MEMSET(entry_data_buff, 0, 4 * sizeof(ZXIC_UINT32)); + } + + if (dpp_dtb_prt_get()) { + dpp_data_buff_print(table_data_buff, dtb_len * 16); + ZXIC_COMM_PRINT( + "start down table to dtb : queue_id: %d, down_data_len: %d\n", + queue_id, dtb_len * 16); + } + + rc = dpp_dtb_write_down_table_data(dev, queue_id, dtb_len * 16, + table_data_buff, element_id); + ZXIC_COMM_FREE(table_data_buff); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_write_down_table_data"); + + return DPP_OK; +} + +/** dtb写smmu0中的数据,数据长度不限 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param smmu0_base_addr smmu0基地址,以128bit为单位 +* @param smmu0_wr_mode smmu0写模式,参考DPP_ERAM128_OPR_MODE_E,仅支持128bit、64bit、1bit模式 +* @param entry_num 下发的条目数 +* @param p_entry_arr 待下发表项内容结构体数组指针 +* @param element_id 返回下表使用的元素id +* @return +* @remark 无 +* @see +* @author cbb @date 2024/01/04 +************************************************************/ +DPP_STATUS dpp_dtb_smmu0_data_write(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 smmu0_base_addr, + ZXIC_UINT32 smmu0_wr_mode, + ZXIC_UINT32 entry_num, + DPP_DTB_ERAM_ENTRY_INFO_T *p_entry_arr, + ZXIC_UINT32 *element_id) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 i = 0; + ZXIC_UINT32 entry_num_max = 0; + ZXIC_UINT32 entry_cycle = 0; + ZXIC_UINT32 entry_remains = 0; + + DPP_DTB_ERAM_ENTRY_INFO_T *p_entry = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_entry_arr); + + switch (smmu0_wr_mode) { + case ERAM128_OPR_128b: { + entry_num_max = 0x1ff; + break; + } + + case ERAM128_OPR_64b: { + entry_num_max = 0x3ff; + break; + } + + case ERAM128_OPR_1b: { + entry_num_max = 0x3ff; + break; + } + } + + ZXIC_COMM_CHECK_INDEX_EQUAL(entry_num_max, 0); + entry_cycle = entry_num / entry_num_max; + entry_remains = entry_num % entry_num_max; + + for (i = 0; i < entry_cycle; ++i) { + p_entry = p_entry_arr + entry_num_max * i; + rc = dpp_dtb_smmu0_data_write_cycle( + dev, queue_id, smmu0_base_addr, smmu0_wr_mode, + entry_num_max, p_entry, element_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, + "dpp_dtb_smmu0_data_write_cycle"); + + ZXIC_COMM_TRACE_INFO( + "dpp_dtb_smmu0_data_write_cycle[%d]: element_id = %d\n", + i, *element_id); + } + + if (entry_remains) { + p_entry = p_entry_arr + entry_num_max * entry_cycle; + rc = dpp_dtb_smmu0_data_write_cycle( + dev, queue_id, smmu0_base_addr, smmu0_wr_mode, + entry_remains, p_entry, element_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, + "dpp_dtb_smmu0_data_write_cycle"); + + ZXIC_COMM_TRACE_INFO( + "dpp_dtb_smmu0_data_write_cycle: element_id = %d\n", + *element_id); + } + + return DPP_OK; +} + +/** dtb flush smmu0中的数据,数据长度16K以内 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param smmu0_base_addr smmu0基地址,以128bit为单位 +* @param smmu0_wr_mode smmu0写模式,参考DPP_ERAM128_OPR_MODE_E,仅支持128bit、64bit、1bit模式 +* @param start_index flush开始的条目 +* @param entry_num 下发的条目数 +* @param element_id 返回下表使用的元素id +* @return +* @remark 无 +* @see +* @author cbb @date 2024/01/04 +************************************************************/ +DPP_STATUS dpp_dtb_smmu0_flush_cycle(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 smmu0_base_addr, + ZXIC_UINT32 smmu0_wr_mode, + ZXIC_UINT32 start_index, + ZXIC_UINT32 entry_num, + ZXIC_UINT32 *element_id) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 index = 0; + ZXIC_UINT32 current_index = 0; + ZXIC_UINT32 entry_data_buff[4] = { 0 }; + DPP_DTB_ERAM_ENTRY_INFO_T *p_entry_arr = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + + p_entry_arr = (DPP_DTB_ERAM_ENTRY_INFO_T *)ZXIC_COMM_MALLOC( + entry_num * sizeof(DPP_DTB_ERAM_ENTRY_INFO_T)); + ZXIC_COMM_CHECK_POINT(p_entry_arr); + ZXIC_COMM_MEMSET(p_entry_arr, 0, + entry_num * sizeof(DPP_DTB_ERAM_ENTRY_INFO_T)); + + for (index = 0; index < entry_num; index++) { + current_index = start_index + index; + + p_entry_arr[index].index = current_index; + p_entry_arr[index].p_data = entry_data_buff; + } + + rc = dpp_dtb_smmu0_data_write_cycle(dev, queue_id, smmu0_base_addr, + smmu0_wr_mode, entry_num, + p_entry_arr, element_id); + ZXIC_COMM_FREE(p_entry_arr); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_smmu1_data_write_cycle"); + + return rc; +} + +/** dtb flush smmu0中的数据,大数据量 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param smmu0_base_addr smmu0基地址,以128bit为单位 +* @param smmu0_wr_mode smmu0写模式,参考DPP_ERAM128_OPR_MODE_E,仅支持128bit、64bit、1bit模式 +* @param start_index flush开始的条目 +* @param entry_num 下发的条目数 +* @param element_id 返回下表使用的元素id +* @return +* @remark 无 +* @see +* @author cbb @date 2024/01/04 +************************************************************/ +DPP_STATUS dpp_dtb_smmu0_flush(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 smmu0_base_addr, + ZXIC_UINT32 smmu0_wr_mode, + ZXIC_UINT32 start_index, ZXIC_UINT32 entry_num, + ZXIC_UINT32 *element_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 entry_num_max = 0; + ZXIC_UINT32 entry_cycle = 0; + ZXIC_UINT32 entry_remains = 0; + ZXIC_UINT32 temp_start_index = 0; + ZXIC_UINT32 i = 0; + + ZXIC_COMM_CHECK_POINT(dev); + + switch (smmu0_wr_mode) { + case ERAM128_OPR_128b: { + entry_num_max = 0x1ff; + break; + } + + case ERAM128_OPR_64b: { + entry_num_max = 0x3ff; + break; + } + + case ERAM128_OPR_1b: { + entry_num_max = 0x3ff; + break; + } + } + + ZXIC_COMM_CHECK_INDEX_EQUAL(entry_num_max, 0); + entry_cycle = entry_num / entry_num_max; + entry_remains = entry_num % entry_num_max; + + for (i = 0; i < entry_cycle; ++i) { + temp_start_index = entry_num_max * i + start_index; + + rc = dpp_dtb_smmu0_flush_cycle(dev, queue_id, smmu0_base_addr, + smmu0_wr_mode, temp_start_index, + entry_num_max, element_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_smmu0_flush_cycle"); + + ZXIC_COMM_TRACE_INFO( + "dpp_dtb_smmu0_flush_cycle[%d] element_id = %d\n", i, + *element_id); + } + + if (entry_remains) { + temp_start_index = entry_num_max * entry_cycle + start_index; + rc = dpp_dtb_smmu0_flush_cycle(dev, queue_id, smmu0_base_addr, + smmu0_wr_mode, temp_start_index, + entry_remains, element_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_smmu0_flush_cycle"); + + ZXIC_COMM_TRACE_INFO( + "dpp_dtb_smmu0_flush_cycle: element_id = %d\n", + *element_id); + } + + return 0; +} + +/** ddr直接表 写数据 +* @param dev_id 芯片id +* @param base_addr 基地址,以4K*128bit为单位 +* @param rw_len 数据位宽模式,位宽 0-128bit, 1-256bit, 2-384bit, 3-512bit,取值参考SMMU1_DDR_WRT_MODE_E的定义 +* @param index 条目索引,以mode为单位 +* @param ecc_en 直接表ECC使能位 +* @param p_data 待写入的数据指针 +* @param p_entry 组装好的条目(已分配好空间) +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30/ +************************************************************/ +DPP_STATUS +dpp_dtb_ddr_dir_table_data_write(ZXIC_UINT32 dev_id, ZXIC_UINT32 base_addr, + ZXIC_UINT32 rw_len, ZXIC_UINT32 index, + ZXIC_UINT32 ecc_en, ZXIC_UINT8 *p_data, + DPP_DTB_ENTRY_T *p_entry) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 ipv4_v6_flag = 0; + ZXIC_UINT32 lpm_vld = 0; + + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX_UPPER(rw_len, SMMU1_DDR_WRT_512b); + ZXIC_COMM_CHECK_INDEX_UPPER(ecc_en, 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_data); + + rc = dpp_dtb_smmu1_write_entry_data(dev_id, rw_len, ipv4_v6_flag, + lpm_vld, base_addr, index, ecc_en, + p_data, p_entry); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_smmu1_write_entry_data"); + + return DPP_OK; +} + +/** ddr直接表 写数据 +* @param dev_id 芯片id +* @param base_addr 基地址,以4K*128bit为单位 +* @param rw_len 数据位宽模式,位宽 0-128bit, 1-256bit, 2-384bit, 3-512bit,取值参考SMMU1_DDR_WRT_MODE_E的定义 +* @param index 条目索引,以mode为单位 +* @param ecc_en ecc是否使能 +* @param p_data 待写入的数据指针 +* @param p_entry 组装好的条目(已分配好空间) +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30/ +************************************************************/ +DPP_STATUS +dpp_dtb_ddr_hash_table_data_write(ZXIC_UINT32 dev_id, ZXIC_UINT32 base_addr, + ZXIC_UINT32 rw_len, ZXIC_UINT32 index, + ZXIC_UINT32 ecc_en, ZXIC_UINT8 *p_data, + DPP_DTB_ENTRY_T *p_entry) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 ipv4_v6_flag = 0; + ZXIC_UINT32 lpm_vld = 0; + + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX_UPPER(rw_len, SMMU1_DDR_WRT_512b); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_data); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_entry); + + rc = dpp_dtb_smmu1_write_entry_data(dev_id, rw_len, ipv4_v6_flag, + lpm_vld, base_addr, index, ecc_en, + p_data, p_entry); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_smmu1_write_entry_data"); + + return DPP_OK; +} + +/** dtb 通道 se alg间接写zcam空间 +* @param dev_id 芯片id +* @param addr 基地址,以4K*128bit为单位 +* @param p_data 待写入的数据指针 +* @param p_entry 组装好的条目(已分配好空间) +* +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30/ +************************************************************/ +DPP_STATUS dpp_dtb_se_alg_zcam_data_write(ZXIC_UINT32 dev_id, ZXIC_UINT32 addr, + ZXIC_UINT8 *p_data, + DPP_DTB_ENTRY_T *p_entry) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 reg_sram_flag = 0; + ZXIC_UINT32 zgroup_id = 0; + ZXIC_UINT32 zblock_id = 0; + ZXIC_UINT32 zcell_id = 0; + ZXIC_UINT32 mask = 0; + ZXIC_UINT32 sram_addr = 0; + + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_data); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_entry); + + mask = (addr >> 17) & 0xF; + reg_sram_flag = (addr >> 16) & 0x1; + zgroup_id = (addr >> 14) & 0x3; + zblock_id = (addr >> 11) & 0x7; + zcell_id = (addr >> 9) & 0x3; + sram_addr = addr & 0x1FF; + + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("dpp_dtb_se_alg_zcam_data_write: \n"); + ZXIC_COMM_DBGCNT32_PRINT("addr", addr); + ZXIC_COMM_DBGCNT32_PRINT("mask", mask); + ZXIC_COMM_DBGCNT32_PRINT("reg_sram_flag", reg_sram_flag); + ZXIC_COMM_DBGCNT32_PRINT("zgroup_id", zgroup_id); + ZXIC_COMM_DBGCNT32_PRINT("zblock_id", zblock_id); + ZXIC_COMM_DBGCNT32_PRINT("zcell_id", zcell_id); + ZXIC_COMM_DBGCNT32_PRINT("sram_addr", sram_addr); + } + + rc = dpp_dtb_zcam_write_entry_data(dev_id, reg_sram_flag, zgroup_id, + zblock_id, zcell_id, sram_addr, mask, + p_data, p_entry); + + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_zcam_write_entry_data"); + + return DPP_OK; +} + +#endif + +#if ZXIC_REAL("DOWN_TABLE") +/** 将HASH表数据格式写入form_buff中 +* @param p_hash_cfg hash表配置指针 +* @param p_rbkey_rtn 红黑树新节点信息指针 +* @param entry_data 条目数据缓存缓存 +* @param opr_mode 配置模式 0:add/update 1:delete +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_hash_form_write(DPP_HASH_CFG *p_hash_cfg, + DPP_HASH_RBKEY_INFO *p_rbkey_new, + ZXIC_UINT32 actu_key_size, + DPP_DTB_ENTRY_T *p_entry, + ZXIC_UINT32 opr_mode) +{ + ZXIC_UINT8 table_id = 0; + ZXIC_UINT32 key_type = 0; + ZXIC_UINT32 key_by_size = 0; + ZXIC_UINT32 rst_by_size = 0; + ZXIC_UINT32 byte_offset = 0; + + ZXIC_UINT32 i = 0; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 bulk_id = 0; + ZXIC_UINT32 temp_mask = 0; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 ddr_wr_mode = 0; + ZXIC_UINT32 addr; + + D_NODE *p_entry_dn = NULL; + HASH_DDR_CFG *p_ddr_cfg = NULL; + SE_ITEM_CFG *p_item_info = NULL; + DPP_HASH_RBKEY_INFO *p_rbkey = NULL; + ZXIC_UINT8 entry_data[SE_ENTRY_WIDTH_MAX] = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_hash_cfg); + ZXIC_COMM_CHECK_POINT(p_rbkey_new); + + dev_id = p_hash_cfg->p_se_info->dev_id; + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + + ZXIC_COMM_MEMSET(entry_data, 0x0, sizeof(entry_data)); + p_item_info = p_rbkey_new->p_item_info; + ZXIC_COMM_CHECK_POINT(p_item_info); + + if (p_item_info->item_type == ITEM_DDR_256 || + p_item_info->item_type == ITEM_DDR_512) { //hash条目存在DDR中 + table_id = DPP_GET_HASH_TBL_ID(p_rbkey_new->key); + bulk_id = ((table_id >> 2) & 0x7); + ZXIC_COMM_CHECK_INDEX_UPPER(bulk_id, HASH_BULK_NUM - 1); + p_ddr_cfg = p_hash_cfg->p_bulk_ddr_info[bulk_id]; + key_type = DPP_GET_HASH_KEY_TYPE(p_rbkey_new->key); + key_by_size = DPP_GET_KEY_SIZE(actu_key_size); + + switch (key_type) { + case HASH_KEY_128b: { + rst_by_size = 16U - + DPP_GET_ACTU_KEY_BY_SIZE(actu_key_size) - + HASH_KEY_CTR_SIZE; + break; + } + case HASH_KEY_256b: { + rst_by_size = 32U - + DPP_GET_ACTU_KEY_BY_SIZE(actu_key_size) - + HASH_KEY_CTR_SIZE; + break; + } + case HASH_KEY_512b: { + rst_by_size = 64U - + DPP_GET_ACTU_KEY_BY_SIZE(actu_key_size) - + HASH_KEY_CTR_SIZE; + break; + } + default: { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, "\n ErrorCode[%x]: Invalid key type.", + DPP_HASH_RC_INVALID_KEY_TYPE); + return DPP_HASH_RC_INVALID_KEY_TYPE; + } + } + + /* 计算index */ + if (DDR_WIDTH_256b == p_ddr_cfg->width_mode) { + if (HASH_KEY_128b == key_type) { + index = (p_item_info->hw_addr << 1) + + p_rbkey_new->entry_pos; + } else if (HASH_KEY_256b == key_type) { + index = p_item_info->hw_addr; + } + } else if (DDR_WIDTH_512b == p_ddr_cfg->width_mode) { + if (HASH_KEY_128b == key_type) { + index = (p_item_info->hw_addr << 2) + + p_rbkey_new->entry_pos; + } else if (HASH_KEY_256b == key_type) { + index = (p_item_info->hw_addr << 2) + + p_rbkey_new->entry_pos; + index = index >> 1; + } else { + index = p_item_info->hw_addr; + } + } + + ZXIC_COMM_TRACE_DEV_DEBUG( + dev_id, "ddr index(unit by key_type) is 0x%x \n", + index); + + ddr_wr_mode = DPP_GET_DDR_WR_MODE(key_type); + + /*将数据组合成一个数组中并写入缓存中进行保存*/ + if (DTB_ITEM_ADD_OR_UPDATE == opr_mode) { + ZXIC_COMM_MEMCPY(entry_data, p_rbkey_new->key, + key_by_size); + ZXIC_COMM_MEMCPY( + entry_data + key_by_size, p_rbkey_new->rst, + ((rst_by_size > HASH_RST_MAX) ? HASH_RST_MAX : + rst_by_size)); + zxic_comm_swap(entry_data, SE_ENTRY_WIDTH_MAX); + } + + dpp_dtb_ddr_hash_table_data_write(dev_id, p_ddr_cfg->ddr_baddr, + ddr_wr_mode, index, + p_ddr_cfg->ddr_ecc_en, + entry_data, p_entry); + ZXIC_COMM_TRACE_DEBUG("entry_data is:"); + + for (i = 0; i < SE_ENTRY_WIDTH_MAX; i++) { + ZXIC_COMM_TRACE_DEBUG("0x%02x ", entry_data[i]); + } + + ZXIC_COMM_TRACE_DEBUG("\n"); + + } else { /* hash条目存在zcam上*/ + /*写ZCAM表格式*/ + ZXIC_COMM_TRACE_DEV_DEBUG( + dev_id, "zcam p_item_info->hw_addr is 0x%x \n", + p_item_info->hw_addr); + addr = p_item_info->hw_addr; + + /*将数据组合成一个数组中并写入缓存中进行保存*/ + p_entry_dn = p_item_info->item_list.p_next; + + while (p_entry_dn) { + p_rbkey = (DPP_HASH_RBKEY_INFO *)(p_entry_dn->data); + // table_id = DPP_GET_HASH_TBL_ID(p_rbkey->key); + key_type = DPP_GET_HASH_KEY_TYPE(p_rbkey->key); + key_by_size = DPP_GET_KEY_SIZE(actu_key_size); + ZXIC_COMM_CHECK_INDEX_UPPER(key_by_size, HASH_KEY_MAX); + rst_by_size = DPP_GET_RST_SIZE(key_type, actu_key_size); + + byte_offset = p_rbkey->entry_pos * HASH_ENTRY_POS_STEP; + ZXIC_COMM_MEMCPY(entry_data + byte_offset, p_rbkey->key, + key_by_size); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT( + dev_id, byte_offset, key_by_size); + byte_offset += key_by_size; + ZXIC_COMM_MEMCPY(entry_data + byte_offset, p_rbkey->rst, + ((rst_by_size > HASH_RST_MAX) ? + HASH_RST_MAX : + rst_by_size)); + + temp_mask |= + ((((1U << (p_rbkey->entry_size / 16U)) - 1U) + << (4U - p_rbkey->entry_size / 16U - + p_rbkey->entry_pos)) & + 0xF); //计算掩码 + + p_entry_dn = p_entry_dn->next; + } + + zxic_comm_swap(entry_data, SE_ENTRY_WIDTH_MAX); + + dpp_dtb_se_alg_zcam_data_write(dev_id, addr, entry_data, + p_entry); + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "zcam_item_data is:"); + + for (i = 0; i < SE_ITEM_WIDTH_MAX; i++) { + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "0x%02x ", + entry_data[i]); + } + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "\n"); + } + + return DPP_OK; +} + +/** 增加hash表条目时写buff函数 +* @param dev_id 设备号 +* @param p_hash_entry_cfg 该hash条目的格式内容信息 +* @param p_data_buff 保存用于dtb下表数据的buff +* @param index 该hash条目的编号 +* @param p_dtb_len 下发完该hash表条目后dtb数据的长度 +* @return +* @remark 无 +* @see +* @author cbb @date 2023/06/03 +************************************************************/ +DPP_STATUS dpp_dtb_add_hash_buf_write(ZXIC_UINT32 dev_id, + HASH_ENTRY_CFG *p_hash_entry_cfg, + ZXIC_UINT8 *p_data_buff, + ZXIC_UINT32 index, ZXIC_UINT32 *p_dtb_len) +{ + DPP_STATUS rc = DPP_OK; + SE_ITEM_CFG *p_item_info = NULL; + ZXIC_UINT32 ddr_wr_mode = 0; + ZXIC_UINT32 addr_offset = 0; + DPP_DTB_ENTRY_T dtb_one_entry = { 0 }; /*条目结构*/ + ZXIC_UINT8 cmd_buff[DTB_TABLE_CMD_SIZE_BIT / 8] = { 0 }; /*命令格式缓存*/ + ZXIC_UINT8 hash_entry_data[SE_ENTRY_WIDTH_MAX] = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_hash_entry_cfg); + ZXIC_COMM_CHECK_POINT(p_hash_entry_cfg->p_rbkey_new); + ZXIC_COMM_CHECK_POINT(p_dtb_len); + + ZXIC_COMM_MEMSET(cmd_buff, 0x0, sizeof(cmd_buff)); + ZXIC_COMM_MEMSET(hash_entry_data, 0x0, sizeof(hash_entry_data)); + dtb_one_entry.cmd = cmd_buff; + dtb_one_entry.data = hash_entry_data; + + /*将位置信息和数据信息写入buff中*/ + rc = dpp_dtb_hash_form_write( + p_hash_entry_cfg->p_hash_cfg, p_hash_entry_cfg->p_rbkey_new, + p_hash_entry_cfg->actu_key_size, &dtb_one_entry, + DTB_ITEM_ADD_OR_UPDATE); /*计算一个条目的位置信息,并保存起来*/ + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_dtb_hash_form_write"); + + p_item_info = p_hash_entry_cfg->p_rbkey_new->p_item_info; + ZXIC_COMM_CHECK_POINT(p_item_info); + ddr_wr_mode = DPP_GET_DDR_WR_MODE(p_hash_entry_cfg->key_type); + + /*将数据写入缓存中,并更新长度统计*/ + if (p_item_info->item_type == ITEM_DDR_256 || + p_item_info->item_type == ITEM_DDR_512) { + dtb_one_entry.data_size = DTB_LEN_POS_SETP * (ddr_wr_mode + 1); + addr_offset = index * (ddr_wr_mode + 2) * + DTB_LEN_POS_SETP; /*在缓存中相对于0的offset*/ + (*p_dtb_len) += (ddr_wr_mode + 2); //dtb长度进行计数 + } else { /*ram or reg*/ + dtb_one_entry.data_size = + DTB_LEN_POS_SETP * (DTB_ZCAM_LEN_SIZE - 1); + addr_offset = index * DTB_ZCAM_LEN_SIZE * + DTB_LEN_POS_SETP; /*在缓存中相对于0的offset*/ + (*p_dtb_len) += DTB_ZCAM_LEN_SIZE; //dtb长度进行计数 + } + + rc = dpp_dtb_data_write(p_data_buff, addr_offset, + &dtb_one_entry); /*将数据写入缓存中*/ + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_data_write"); + + return rc; +} + +/** 删除hash表条目时写buff函数 +* @param dev_id 设备号 +* @param p_hash_entry_cfg 该hash条目的格式内容信息 +* @param p_data_buff 保存用于dtb下表数据的buff +* @param index 该hash条目的编号 +* @param p_dtb_len 下发完该hash表条目后dtb数据的长度 +* @return +* @remark 无 +* @see +* @author cbb @date 2023/06/03 +************************************************************/ +DPP_STATUS dpp_dtb_delete_hash_buf_write(ZXIC_UINT32 dev_id, + HASH_ENTRY_CFG *p_hash_entry_cfg, + ZXIC_UINT8 *p_data_buff, + ZXIC_UINT32 index, + ZXIC_UINT32 *p_dtb_len) +{ + DPP_STATUS rc = DPP_OK; + SE_ITEM_CFG *p_item_info = NULL; + ZXIC_UINT32 ddr_wr_mode = 0; + ZXIC_UINT32 addr_offset = 0; + DPP_DTB_ENTRY_T dtb_one_entry = { 0 }; /*条目结构*/ + ZXIC_UINT8 cmd_buff[DTB_TABLE_CMD_SIZE_BIT / 8] = { 0 }; /*命令格式缓存*/ + ZXIC_UINT8 hash_entry_data[SE_ENTRY_WIDTH_MAX] = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_hash_entry_cfg); + ZXIC_COMM_CHECK_POINT(p_hash_entry_cfg->p_rbkey_new); + ZXIC_COMM_CHECK_POINT(p_dtb_len); + + ZXIC_COMM_MEMSET(cmd_buff, 0x0, sizeof(cmd_buff)); + ZXIC_COMM_MEMSET(hash_entry_data, 0x0, sizeof(hash_entry_data)); + dtb_one_entry.cmd = cmd_buff; + dtb_one_entry.data = hash_entry_data; + + /*将位置信息和数据信息写入buff中*/ + rc = dpp_dtb_hash_form_write( + p_hash_entry_cfg->p_hash_cfg, p_hash_entry_cfg->p_rbkey_new, + p_hash_entry_cfg->actu_key_size, &dtb_one_entry, + DTB_ITEM_DELETE); /*计算一个条目的位置信息,并保存起来*/ + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_dtb_hash_form_write"); + + p_item_info = p_hash_entry_cfg->p_rbkey_new->p_item_info; + ZXIC_COMM_CHECK_POINT(p_item_info); + ddr_wr_mode = DPP_GET_DDR_WR_MODE(p_hash_entry_cfg->key_type); + + /*将数据写入缓存中,并更新长度统计*/ + if (p_item_info->item_type == ITEM_DDR_256 || + p_item_info->item_type == ITEM_DDR_512) { + dtb_one_entry.data_size = DTB_LEN_POS_SETP * (ddr_wr_mode + 1); + addr_offset = index * (ddr_wr_mode + 2) * + DTB_LEN_POS_SETP; /*在缓存中相对于0的offset*/ + (*p_dtb_len) += (ddr_wr_mode + 2); //dtb长度进行计数 + } else { /*ram or reg*/ + dtb_one_entry.data_size = + DTB_LEN_POS_SETP * (DTB_ZCAM_LEN_SIZE - 1); + addr_offset = index * DTB_ZCAM_LEN_SIZE * + DTB_LEN_POS_SETP; /*在缓存中相对于0的offset*/ + (*p_dtb_len) += DTB_ZCAM_LEN_SIZE; //dtb长度进行计数 + } + + // ZXIC_COMM_MEMSET(dtb_one_entry.data, 0x0, dtb_one_entry.data_size);//清除下表数据 + + rc = dpp_dtb_data_write(p_data_buff, addr_offset, + &dtb_one_entry); /*将数据写入缓存中*/ + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_data_write"); + + return rc; +} + +/***********************************************************/ +/** dtb 添加eTcam表条目,将etcam条目内容写入到entry中 +* @param dev_id 设备号 +* @param addr 每个block中的ram地址,位宽为8*80bit +* @param block_idx block编号,范围0~7 +* @param wr_mask 写表掩码,共8bit,每bit控制ram中对应位置的80bit数据是否有效 +* @param opr_type etcam操作类型,详见 DPP_ETCAM_OPR_TYPE_E +* @param p_entry 条目数据,data和mask +* @param p_entry_data 组装好的数据条目(已分配好空间) +* @param p_entry_mask 组装好的掩码条目(已分配好空间) +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/04/03 +************************************************************/ +DPP_STATUS dpp_dtb_etcam_entry_add(DPP_DEV_T *dev, ZXIC_UINT32 addr, + ZXIC_UINT32 block_idx, ZXIC_UINT32 wr_mask, + ZXIC_UINT32 opr_type, + DPP_ETCAM_ENTRY_T *p_entry, + DPP_DTB_ENTRY_T *p_entry_data, + DPP_DTB_ENTRY_T *p_entry_mask) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT8 temp_data[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; + ZXIC_UINT8 temp_mask[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; + DPP_ETCAM_ENTRY_T entry_xy = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), addr, 0, + DPP_ETCAM_RAM_DEPTH - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), block_idx, 0, + DPP_ETCAM_BLOCK_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), wr_mask, 0, + DPP_ETCAM_WR_MASK_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), opr_type, DPP_ETCAM_OPR_DM, + DPP_ETCAM_OPR_XY); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_entry); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_entry_data); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_entry_mask); + + ZXIC_COMM_ASSERT(p_entry->p_data && p_entry->p_mask); + + entry_xy.p_data = temp_data; + entry_xy.p_mask = temp_mask; + + if (opr_type == DPP_ETCAM_OPR_DM) { + /* convert user D/M data to X/Y */ + rc = dpp_etcam_dm_to_xy( + p_entry, &entry_xy, + DPP_ETCAM_ENTRY_SIZE_GET(p_entry->mode)); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_etcam_dm_to_xy"); + } else { + ZXIC_COMM_MEMCPY(entry_xy.p_data, p_entry->p_data, + DPP_ETCAM_ENTRY_SIZE_GET( + p_entry->mode)); //复制出实际的数据 + ZXIC_COMM_MEMCPY(entry_xy.p_mask, p_entry->p_mask, + DPP_ETCAM_ENTRY_SIZE_GET(p_entry->mode)); + } + + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("etcam xy:/n"); + dpp_acl_data_print(entry_xy.p_data, entry_xy.p_mask, + p_entry->mode); + } + + /*组装data格式*/ + rc = dpp_dtb_etcam_write_entry_data(dev, block_idx, 0, 1, 0, 0, + 0, //0:写;1读 + wr_mask, DPP_ETCAM_DTYPE_DATA, addr, + 0, entry_xy.p_data, p_entry_data); + + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_etcam_write_entry_data"); + + /*组装mask格式*/ + rc = dpp_dtb_etcam_write_entry_data(dev, block_idx, 0, 1, 0, 0, 0, + wr_mask, DPP_ETCAM_DTYPE_MASK, addr, + 0xFF, entry_xy.p_mask, + p_entry_mask); + + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_etcam_write_entry_data"); + + return DPP_OK; +} + +/** dtb写eRam表 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no eram表sdt表号 +* @param entry_cnt 下发的条目数 +* @param p_entry_arr 待下发表项内容结构体数组指针 +* @param element_id 返回下表使用的元素id +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_eram_dma_write_cycle(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, + ZXIC_UINT32 entry_num, + DPP_DTB_ERAM_ENTRY_INFO_T *p_entry_arr, + ZXIC_UINT32 *element_id) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 wrt_mode; + ZXIC_UINT32 base_addr; + ZXIC_UINT32 index; + ZXIC_UINT32 item_cnt = 0; + ZXIC_UINT32 addr_offset = 0; + ZXIC_UINT32 *p_entry_data = NULL; + ZXIC_UINT32 dtb_len = 0; + ZXIC_UINT32 eram_table_depth = 0; + + ZXIC_UINT8 *table_data_buff = NULL; + ZXIC_UINT32 entry_data_buff[4] = { 0 }; + ZXIC_UINT8 cmd_buff[DTB_TABLE_CMD_SIZE_BIT / 8] = { 0 }; + + DPP_SDTTBL_ERAM_T sdt_eram_info = { 0 }; + DPP_DTB_ENTRY_T dtb_one_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, DTB_QUEUE_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), sdt_no, 0, + DPP_DEV_SDT_ID_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_LOWER(DEV_ID(dev), entry_num, 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_entry_arr); + + //分配下表数据缓存 16K + table_data_buff = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC( + DPP_DTB_TABLE_DATA_BUFF_SIZE * sizeof(ZXIC_UINT8)); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), table_data_buff); + ZXIC_COMM_MEMSET(table_data_buff, 0, + DPP_DTB_TABLE_DATA_BUFF_SIZE * sizeof(ZXIC_UINT8)); + + dtb_one_entry.cmd = cmd_buff; + dtb_one_entry.data = (ZXIC_UINT8 *)entry_data_buff; + + //从sdt_no中获取SDT配置 + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_eram_info); + ZXIC_COMM_CHECK_DEV_RC_MEMORY_FREE( + DEV_ID(dev), rc, "dpp_soft_sdt_tbl_get", table_data_buff); + base_addr = sdt_eram_info.eram_base_addr; + wrt_mode = sdt_eram_info.eram_mode; //3:128 + eram_table_depth = sdt_eram_info.eram_table_depth; + ZXIC_COMM_CHECK_DEV_INDEX_LOWER_MEMORY_FREE( + DEV_ID(dev), eram_table_depth, 1, table_data_buff); + + switch (wrt_mode) { + case ERAM128_TBL_128b: { + wrt_mode = ERAM128_OPR_128b; + break; + } + + case ERAM128_TBL_64b: { + wrt_mode = ERAM128_OPR_64b; + break; + } + + case ERAM128_TBL_1b: { + wrt_mode = ERAM128_OPR_1b; + break; + } + } + + for (item_cnt = 0; item_cnt < entry_num; ++item_cnt) { + p_entry_data = (ZXIC_UINT32 *)p_entry_arr[item_cnt].p_data; + ZXIC_COMM_CHECK_DEV_POINT_MEMORY_FREE(DEV_ID(dev), p_entry_data, + table_data_buff); + index = p_entry_arr[item_cnt].index; + ZXIC_COMM_CHECK_DEV_INDEX_MEMORY_FREE_NO_ASSERT( + DEV_ID(dev), index, 0, eram_table_depth - 1, + table_data_buff); + + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT( + "dpp_dtb_eram_dma_write_cycle : the item index is %d !\n", + index); + } + + //将一个数据写入entry + rc = dpp_dtb_se_smmu0_ind_write(dev, base_addr, index, wrt_mode, + p_entry_data, &dtb_one_entry); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_se_smmu0_ind_write", table_data_buff); + + switch (wrt_mode) { + case ERAM128_OPR_128b: { + dtb_len += 2; + addr_offset = item_cnt * DTB_LEN_POS_SETP * 2; + break; + } + + case ERAM128_OPR_64b: { + dtb_len += 1; + addr_offset = item_cnt * DTB_LEN_POS_SETP; + break; + } + + case ERAM128_OPR_1b: { + dtb_len += 1; + addr_offset = item_cnt * DTB_LEN_POS_SETP; + break; + } + } + + /*将表格式和数据写入缓存buff中*/ + rc = dpp_dtb_data_write(table_data_buff, addr_offset, + &dtb_one_entry); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_data_write", table_data_buff); + ZXIC_COMM_MEMSET(cmd_buff, 0, DTB_TABLE_CMD_SIZE_BIT / 8); + ZXIC_COMM_MEMSET(entry_data_buff, 0, 4 * sizeof(ZXIC_UINT32)); + } + + if (dpp_dtb_prt_get()) { + dpp_data_buff_print(table_data_buff, dtb_len * 16); + ZXIC_COMM_PRINT( + "start down table to dtb : queue_id: %d, down_data_len: %d\n", + queue_id, dtb_len * 16); + } + + rc = dpp_dtb_write_down_table_data(dev, queue_id, dtb_len * 16, + table_data_buff, element_id); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_write_down_table_data", table_data_buff); + + ZXIC_COMM_FREE(table_data_buff); + + return DPP_OK; +} + +/** dtb写ddr中的数据,数据长度在16K范围内 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param ddr_base_addr ddr基地址,以4K*128bit为单位 +* @param ddr_wr_mode ddr写模式 0-128bit, 1-256bit, 2-384bit, 3-512bit,取值参考SMMU1_DDR_WRT_MODE_E的定义 +* @param ddr_ecc_en ddr ECC使能 +* @param entry_num 下发的条目数 +* @param p_entry_arr 待下发表项内容结构体数组指针 +* @param element_id 返回下表使用的元素id +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_smmu1_data_write_cycle( + DPP_DEV_T *dev, ZXIC_UINT32 queue_id, ZXIC_UINT32 ddr_base_addr, + ZXIC_UINT32 ddr_wr_mode, ZXIC_UINT32 ddr_ecc_en, ZXIC_UINT32 entry_num, + DPP_DTB_DDR_ENTRY_INFO_T *p_entry_arr, ZXIC_UINT32 *element_id) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 index; + ZXIC_UINT32 item_cnt = 0; + ZXIC_UINT32 addr_offset = 0; + ZXIC_UINT32 *p_entry_data = NULL; + ZXIC_UINT32 dtb_len = 0; + ZXIC_UINT8 *table_data_buff = NULL; + ZXIC_UINT8 cmd_buff[DTB_TABLE_CMD_SIZE_BIT / 8] = { 0 }; /*命令格式缓存*/ + ZXIC_UINT32 entry_data_buff[DPP_DIR_TBL_BUF_MAX_NUM] = { 0 }; + + DPP_DTB_ENTRY_T dtb_one_entry = { 0 }; /*条目结构*/ + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, DTB_QUEUE_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_LOWER(DEV_ID(dev), entry_num, 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_entry_arr); + + //分配下表数据缓存 16K + table_data_buff = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC( + DPP_DTB_TABLE_DATA_BUFF_SIZE * sizeof(ZXIC_UINT8)); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), table_data_buff); + ZXIC_COMM_MEMSET(table_data_buff, 0, + DPP_DTB_TABLE_DATA_BUFF_SIZE * sizeof(ZXIC_UINT8)); + + dtb_one_entry.cmd = cmd_buff; + dtb_one_entry.data = (ZXIC_UINT8 *)entry_data_buff; + + /*对每一个条目进行处理*/ + for (item_cnt = 0; item_cnt < entry_num; ++item_cnt) { + p_entry_data = (ZXIC_UINT32 *)p_entry_arr[item_cnt].p_data; + ZXIC_COMM_CHECK_POINT_MEMORY_FREE_NO_ASSERT(p_entry_data, + table_data_buff); + + index = p_entry_arr[item_cnt].index; + + //将一个数据写入entry + rc = dpp_dtb_ddr_dir_table_data_write( + DEV_ID(dev), ddr_base_addr, ddr_wr_mode, index, + ddr_ecc_en, (ZXIC_UINT8 *)p_entry_data, &dtb_one_entry); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_ddr_dir_table_data_write", + table_data_buff); + + dtb_len += (ddr_wr_mode + 2); + addr_offset = item_cnt * (ddr_wr_mode + 2) * DTB_LEN_POS_SETP; + + /*数据写入缓存中*/ + rc = dpp_dtb_data_write(table_data_buff, addr_offset, + &dtb_one_entry); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_data_write", table_data_buff); + ZXIC_COMM_MEMSET(cmd_buff, 0, DTB_TABLE_CMD_SIZE_BIT / 8); + ZXIC_COMM_MEMSET(entry_data_buff, 0, + DPP_DIR_TBL_BUF_MAX_NUM * sizeof(ZXIC_UINT32)); + } + + if (dpp_dtb_prt_get()) { + dpp_data_buff_print(table_data_buff, dtb_len * 16); + ZXIC_COMM_PRINT( + "start down table to dtb : queue_id: %d, down_data_len: %d\n", + queue_id, dtb_len * 16); + } + + rc = dpp_dtb_write_down_table_data(dev, queue_id, dtb_len * 16, + table_data_buff, element_id); + ZXIC_COMM_FREE(table_data_buff); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_write_down_table_data"); + + return DPP_OK; +} + +/** dtb写HASH表,在插入条目时如果冲突,则对冲突条目进行记录 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no hash表sdt表号 +* @param entry_cnt 下发的条目数 +* @param p_entry_arr 待下发表项内容结构体数组指针 +* @param element_id 返回下表使用的元素id +* @return +* @remark 无 是否是有一个写不成功就返回,还是继续进行下一个条目并记录错误的条目 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS +dpp_dtb_hash_dma_insert_cycle(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 entry_num, + DPP_DTB_HASH_ENTRY_INFO_T *p_arr_hash_entry, + ZXIC_UINT32 *element_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT8 temp_key[HASH_KEY_MAX] = { 0 }; + ZXIC_UINT8 end_flag = 0; + //ZXIC_UINT32 ddr_wr_mode = 0; + ZXIC_UINT32 item_cnt = 0; + ZXIC_UINT8 key_valid = 1; + ZXIC_UINT32 dtb_len = 0; + ZXIC_UINT8 key[HASH_KEY_MAX] = { 0 }; + ZXIC_UINT8 rst[HASH_RST_MAX] = { 0 }; + + DPP_SE_CFG *p_se_cfg = NULL; + ZXIC_RB_TN *p_rb_tn_new = NULL; + ZXIC_RB_TN *p_rb_tn_rtn = NULL; + DPP_HASH_CFG *p_hash_cfg = NULL; + DPP_HASH_RBKEY_INFO *p_rbkey_new = NULL; + FUNC_ID_INFO *p_func_info = NULL; + HASH_ENTRY_CFG hash_entry_cfg = { 0 }; + DPP_SDTTBL_HASH_T sdt_hash_info = { 0 }; /*SDT内容*/ + DPP_HASH_ENTRY entry = { 0 }; /* hash条目结构体*/ + ZXIC_UINT8 *p_data_buff = NULL; + ZXIC_MUTEX_T *p_hash_mutex = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, DTB_QUEUE_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), sdt_no, 0, + DPP_DEV_SDT_ID_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_LOWER(DEV_ID(dev), entry_num, 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_arr_hash_entry); + + //从sdt_no中获取SDT配置 + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_hash_info); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_soft_sdt_tbl_read"); + + hash_entry_cfg.fun_id = sdt_hash_info.hash_id; /*hash引擎*/ + ZXIC_COMM_CHECK_INDEX(hash_entry_cfg.fun_id, HASH_FUNC_ID_MIN, + HASH_FUNC_ID_NUM - 1); + hash_entry_cfg.table_id = sdt_hash_info.hash_table_id; /*hash表号*/ + ZXIC_COMM_CHECK_INDEX_UPPER(hash_entry_cfg.table_id, + HASH_TBL_ID_NUM - 1); + hash_entry_cfg.bulk_id = ((hash_entry_cfg.table_id >> 2) & 0x7); + ZXIC_COMM_CHECK_INDEX_UPPER(hash_entry_cfg.bulk_id, HASH_BULK_NUM - 1); + hash_entry_cfg.key_type = sdt_hash_info.hash_table_width; /*表宽度*/ + ZXIC_COMM_CHECK_INDEX(hash_entry_cfg.key_type, HASH_KEY_128b, + HASH_KEY_512b); + hash_entry_cfg.actu_key_size = + sdt_hash_info.key_size; /*业务表键值长度*/ + ZXIC_COMM_CHECK_INDEX(hash_entry_cfg.actu_key_size, HASH_ACTU_KEY_MIN, + HASH_ACTU_KEY_MAX); + hash_entry_cfg.key_by_size = + DPP_GET_KEY_SIZE(hash_entry_cfg.actu_key_size); + hash_entry_cfg.rst_by_size = DPP_GET_RST_SIZE( + hash_entry_cfg.key_type, hash_entry_cfg.actu_key_size); + + /* 取出se配置 */ + rc = dpp_se_cfg_get(dev, &p_se_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_se_cfg_get"); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_se_cfg); + p_func_info = DPP_GET_FUN_INFO(p_se_cfg, hash_entry_cfg.fun_id); + DPP_SE_CHECK_FUN(p_func_info, hash_entry_cfg.fun_id, FUN_HASH); + hash_entry_cfg.p_hash_cfg = (DPP_HASH_CFG *)p_func_info->fun_ptr; + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), hash_entry_cfg.p_hash_cfg); + + /*条目数上限检查*/ + //ddr_wr_mode = DPP_GET_DDR_WR_MODE(hash_entry_cfg.key_type); + //ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), entry_num, 1, DTB_DATA_SIZE_BIT / ( (ddr_wr_mode + 2) * DTB_TABLE_CMD_SIZE_BIT)); + + entry.p_key = key; + entry.p_rst = rst; + entry.p_key[0] = (ZXIC_UINT8)(((key_valid & 0x1) << 7) | + ((hash_entry_cfg.key_type & 0x3) << 5) | + (hash_entry_cfg.table_id & 0x1f)); + + p_data_buff = + (ZXIC_UINT8 *)ZXIC_COMM_MALLOC(DPP_DTB_TABLE_DATA_BUFF_SIZE); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data_buff); + ZXIC_COMM_MEMSET(p_data_buff, 0x0, DPP_DTB_TABLE_DATA_BUFF_SIZE); + + rc = dpp_dev_hash_opr_mutex_get(dev, hash_entry_cfg.fun_id, + &p_hash_mutex); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT(rc, "dpp_dev_opr_mutex_get", + p_data_buff); + rc = zxic_comm_mutex_lock(p_hash_mutex); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT(rc, "zxic_comm_mutex_lock", + p_data_buff); + + /*对每一个条目进行处理*/ + for (item_cnt = 0; item_cnt < entry_num; ++item_cnt) { + end_flag = 0; + /*组装数据*/ + ZXIC_COMM_MEMCPY(&entry.p_key[1], + p_arr_hash_entry[item_cnt].p_actu_key, + hash_entry_cfg.actu_key_size); + ZXIC_COMM_MEMCPY(&entry.p_rst[0], + p_arr_hash_entry[item_cnt].p_rst, + ((hash_entry_cfg.rst_by_size > HASH_RST_MAX) ? + HASH_RST_MAX : + hash_entry_cfg.rst_by_size)); + + rc = dpp_hash_red_black_node_alloc(dev, &p_rb_tn_new, + &p_rbkey_new); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_UNLOCK_NO_ASSERT( + rc, "dpp_hash_red_black_node_alloc", p_data_buff, + p_hash_mutex); + ZXIC_COMM_MEMCPY(p_rbkey_new->key, entry.p_key, + hash_entry_cfg.key_by_size); + hash_entry_cfg.p_rbkey_new = p_rbkey_new; + hash_entry_cfg.p_rb_tn_new = p_rb_tn_new; + + rc = dpp_hash_rb_insert(dev, &hash_entry_cfg, &entry); + if (DPP_OK != rc) { + if (DPP_HASH_RC_ADD_UPDATE == rc) { + /*将位置信息和数据信息写入buff中*/ + rc = dpp_dtb_add_hash_buf_write( + DEV_ID(dev), &hash_entry_cfg, + p_data_buff, item_cnt, &dtb_len); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_UNLOCK_NO_ASSERT( + rc, "dpp_dtb_add_hash_buf_write", + p_data_buff, p_hash_mutex); + } + continue; + } + + /*insert new hash item*/ + /*1 first form the new key(calc crc)*/ + rc = dpp_hash_set_crc_key(dev, &hash_entry_cfg, &entry, + temp_key); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_UNLOCK_NO_ASSERT( + rc, "dpp_hash_set_crc_key", p_data_buff, p_hash_mutex); + + /*2 if DDR is valid, first insert into DDR.*/ + p_hash_cfg = hash_entry_cfg.p_hash_cfg; + if (p_hash_cfg->ddr_valid) { + rc = dpp_hash_insert_ddr(dev, &hash_entry_cfg, temp_key, + &end_flag); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_UNLOCK_NO_ASSERT( + rc, "dpp_hash_insert_ddr", p_data_buff, + p_hash_mutex); + } + + /*3 if insert into DDR is fail, insert into ZCAM. */ + if (!end_flag) { + rc = dpp_hash_insert_zcell(dev, p_se_cfg, + &hash_entry_cfg, temp_key, + &end_flag); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_UNLOCK_NO_ASSERT( + rc, "dpp_hash_insert_zcell", p_data_buff, + p_hash_mutex); + } + + /*4 if insert into ZCAM is fail, insert into ZBLK Reg. */ + if (!end_flag) { + rc = dpp_hash_insert_zreg(dev, &hash_entry_cfg, + temp_key, &end_flag); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_UNLOCK_NO_ASSERT( + rc, "dpp_hash_insert_zreg", p_data_buff, + p_hash_mutex); + } + + if (!end_flag) { + p_hash_cfg->hash_stat.insert_fail++; + /* recycle rb tree node */ + ZXIC_COMM_MEMCPY(temp_key, entry.p_key, + hash_entry_cfg.key_by_size); + rc = zxic_comm_rb_delete(&p_hash_cfg->hash_rb, + p_rbkey_new, &p_rb_tn_rtn); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_UNLOCK_NO_ASSERT( + rc, "zxic_comm_rb_delete", p_data_buff, + p_hash_mutex); + ZXIC_COMM_ASSERT(p_rb_tn_new == p_rb_tn_rtn); + ZXIC_COMM_FREE(p_rbkey_new); + ZXIC_COMM_FREE(p_rb_tn_rtn); + ZXIC_COMM_FREE(p_data_buff); + rc = zxic_comm_mutex_unlock(p_hash_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "zxic_comm_mutex_unlock"); + dpp_dtb_data_print(temp_key, + hash_entry_cfg.key_by_size); + ZXIC_COMM_TRACE_ERROR("DPP_HASH_RC_TBL_FULL.\n"); + return DPP_RC_DTB_DOWN_HASH_CONFLICT; + } + + /*将位置信息和数据信息写入buff中*/ + rc = dpp_dtb_add_hash_buf_write(DEV_ID(dev), &hash_entry_cfg, + p_data_buff, item_cnt, + &dtb_len); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_UNLOCK_NO_ASSERT( + rc, "dpp_dtb_add_hash_buf_write", p_data_buff, + p_hash_mutex); + + p_hash_cfg->hash_stat.insert_ok++; + } + + rc = zxic_comm_mutex_unlock(p_hash_mutex); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT(rc, "zxic_comm_mutex_unlock", + p_data_buff); + + if (dpp_dtb_prt_get()) { + dpp_data_buff_print(p_data_buff, dtb_len * 16); + } + + rc = dpp_dtb_write_down_table_data(dev, queue_id, dtb_len * 16, + p_data_buff, element_id); + ZXIC_COMM_FREE(p_data_buff); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_write_down_table_data"); + + return DPP_OK; +} + +/** dtb写ACL表 (SPECIFY模式,条目中指定handle,支持级联64bit/128bit) +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no ACL表sdt表号 +* @param entry_cnt 下发的条目数 +* @param p_entry_arr 待下发表项内容结构体数组指针 +* @param element_id 返回下表使用的元素id +* @return +* @remark 无 是否是有一个写不成功就返回,还是继续进行下一个条目并记录错误的条目 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS +dpp_dtb_acl_dma_insert_cycle(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 entry_num, + DPP_DTB_ACL_ENTRY_INFO_T *p_acl_entry_arr, + ZXIC_UINT32 *element_id) +{ + /* + 1、条目的handle值是指定的; + 2、根据级联配置在etcam中保存级联结果值 + */ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 etcam_key_mode; + ZXIC_UINT32 as_eram_baddr; + ZXIC_UINT32 as_enable; + ZXIC_UINT32 etcam_table_id; + ZXIC_UINT32 etcam_as_mode; + ZXIC_UINT32 block_idx = 0; + ZXIC_UINT32 ram_addr = 0; + ZXIC_UINT32 etcam_wr_mode = 0; + ZXIC_UINT32 eram_wrt_mode = 0; + ZXIC_UINT32 eram_index; + + ZXIC_UINT32 item_cnt = 0; + ZXIC_UINT32 addr_offset_bk = 0; + ZXIC_UINT32 dtb_len = 0; + ZXIC_UINT32 as_addr_offset = 0; + ZXIC_UINT32 as_dtb_len = 0; + + DPP_ACL_CFG_EX_T *p_acl_cfg = NULL; + DPP_ACL_TBL_CFG_T *p_tbl_cfg = NULL; + DPP_DTB_ACL_ENTRY_INFO_T *p_acl_entry = NULL; + ZXIC_UINT32 *p_as_eram_data = NULL; + ZXIC_UINT8 *table_data_buff = NULL; + DPP_ETCAM_ENTRY_T etcam_entry = { 0 }; + + ZXIC_UINT8 entry_data_buff[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; + ZXIC_UINT8 entry_mask_buff[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; + ZXIC_UINT32 as_eram_data_buff[4] = { 0 }; + ZXIC_UINT8 entry_data_cmd_buff[DTB_TABLE_CMD_SIZE_BIT / 8] = { 0 }; + ZXIC_UINT8 entry_mask_cmd_buff[DTB_TABLE_CMD_SIZE_BIT / 8] = { 0 }; + ZXIC_UINT8 as_eram_cmd_buff[DTB_TABLE_CMD_SIZE_BIT / 8] = { 0 }; + + DPP_SDTTBL_ETCAM_T sdt_etcam_info = { 0 }; + DPP_DTB_ENTRY_T entry_data = { 0 }; + DPP_DTB_ENTRY_T entry_mask = { 0 }; + DPP_DTB_ENTRY_T dtb_as_data_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, DTB_QUEUE_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), sdt_no, 0, + DPP_DEV_SDT_ID_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_LOWER(DEV_ID(dev), entry_num, 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_acl_entry_arr); + + entry_data.cmd = entry_data_cmd_buff; + entry_data.data = (ZXIC_UINT8 *)entry_data_buff; + + entry_mask.cmd = entry_mask_cmd_buff; + entry_mask.data = (ZXIC_UINT8 *)entry_mask_buff; + + dtb_as_data_entry.cmd = as_eram_cmd_buff; + dtb_as_data_entry.data = (ZXIC_UINT8 *)as_eram_data_buff; + + //分配下表数据缓存 16K + table_data_buff = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC( + DPP_DTB_TABLE_DATA_BUFF_SIZE * sizeof(ZXIC_UINT8)); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), table_data_buff); + ZXIC_COMM_MEMSET(table_data_buff, 0, + DPP_DTB_TABLE_DATA_BUFF_SIZE * sizeof(ZXIC_UINT8)); + + //从sdt_no中获取SDT配置 + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_etcam_info); + ZXIC_COMM_CHECK_DEV_RC_MEMORY_FREE( + DEV_ID(dev), rc, "dpp_soft_sdt_tbl_get", table_data_buff); + etcam_key_mode = sdt_etcam_info.etcam_key_mode; + ZXIC_COMM_CHECK_INDEX_MEMORY_FREE_NO_ASSERT(etcam_key_mode, + DPP_ACL_KEY_640b, + DPP_ACL_KEY_80b, + table_data_buff); + etcam_as_mode = sdt_etcam_info.as_rsp_mode; + ZXIC_COMM_CHECK_INDEX_MEMORY_FREE_NO_ASSERT(etcam_as_mode, + DPP_ACL_AS_MODE_64b, + DPP_ACL_AS_MODE_INVALID - 1, + table_data_buff); + etcam_table_id = sdt_etcam_info.etcam_table_id; + ZXIC_COMM_CHECK_INDEX_MEMORY_FREE_NO_ASSERT(etcam_table_id, + DPP_ACL_TBL_ID_MIN, + DPP_ACL_TBL_ID_MAX, + table_data_buff); + as_enable = sdt_etcam_info.as_en; + as_eram_baddr = sdt_etcam_info.as_eram_baddr; + + //级联数据下发 + if (as_enable) { + //mode转换 + switch (etcam_as_mode) { + case ERAM128_TBL_128b: { + eram_wrt_mode = ERAM128_OPR_128b; + break; + } + + case ERAM128_TBL_64b: { + eram_wrt_mode = ERAM128_OPR_64b; + break; + } + + case ERAM128_TBL_1b: { + eram_wrt_mode = ERAM128_OPR_1b; + break; + } + } + } + + rc = dpp_acl_cfg_get(dev, &p_acl_cfg); //获取ACL表资源配置 + ZXIC_COMM_CHECK_RC_MEMORY_FREE(rc, "dpp_acl_cfg_get", table_data_buff); + ZXIC_COMM_CHECK_POINT_MEMORY_FREE(p_acl_cfg, table_data_buff); + + p_tbl_cfg = + p_acl_cfg->acl_tbls + etcam_table_id; //得到该acl表的资源配置 + + if (!p_tbl_cfg->is_used) { + ZXIC_COMM_TRACE_ERROR("table[ %d ] is not init!\n", + etcam_table_id); + ZXIC_COMM_FREE(table_data_buff); + ZXIC_COMM_ASSERT(0); + return DPP_ACL_RC_TBL_NOT_INIT; + } + + /*对每一个条目进行处理*/ + for (item_cnt = 0; item_cnt < entry_num; ++item_cnt) { + p_acl_entry = p_acl_entry_arr + item_cnt; + + /* write etcam key */ + etcam_entry.mode = p_tbl_cfg->key_mode; + etcam_entry.p_data = p_acl_entry->key_data; + etcam_entry.p_mask = p_acl_entry->key_mask; + + /*计算地址等信息*/ + rc = dpp_acl_hdw_addr_get(p_tbl_cfg, p_acl_entry->handle, + &block_idx, &ram_addr, + &etcam_wr_mode); + ZXIC_COMM_CHECK_RC_MEMORY_FREE(rc, "dpp_acl_hdw_addr_get", + table_data_buff); + + rc = dpp_dtb_etcam_entry_add(dev, ram_addr, block_idx, + etcam_wr_mode, DPP_ETCAM_OPR_DM, + &etcam_entry, &entry_data, + &entry_mask); + + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_etcam_entry_add", table_data_buff); + + /*data数据写入缓存*/ + dtb_len += DTB_ETCAM_LEN_SIZE; + rc = dpp_dtb_data_write(table_data_buff, addr_offset_bk, + &entry_data); /*将数据写入缓存中*/ + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_data_write", table_data_buff); + ZXIC_COMM_MEMSET(entry_data_cmd_buff, 0, + DTB_TABLE_CMD_SIZE_BIT / 8); + ZXIC_COMM_MEMSET(entry_data_buff, 0, DPP_ETCAM_WIDTH_MAX / 8); + addr_offset_bk = + addr_offset_bk + + DTB_ETCAM_LEN_SIZE * + DTB_LEN_POS_SETP; /*在缓存中相对于0的offset*/ + + /*mask数据写入缓存*/ + dtb_len += DTB_ETCAM_LEN_SIZE; + rc = dpp_dtb_data_write(table_data_buff, addr_offset_bk, + &entry_mask); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_data_write", table_data_buff); + ZXIC_COMM_MEMSET(entry_mask_cmd_buff, 0, + DTB_TABLE_CMD_SIZE_BIT / 8); + ZXIC_COMM_MEMSET(entry_mask_buff, 0, 640 / 8); + addr_offset_bk = + addr_offset_bk + + DTB_ETCAM_LEN_SIZE * + DTB_LEN_POS_SETP; /*在缓存中相对于0的offset*/ + + //处理级联数据 + if (as_enable) { + p_as_eram_data = + (ZXIC_UINT32 *)(p_acl_entry->p_as_rslt); + ZXIC_COMM_CHECK_DEV_POINT_MEMORY_FREE_NO_ASSERT( + DEV_ID(dev), p_as_eram_data, table_data_buff); + eram_index = p_acl_entry->handle; + rc = dpp_dtb_se_smmu0_ind_write( + dev, as_eram_baddr, eram_index, eram_wrt_mode, + p_as_eram_data, &dtb_as_data_entry); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_se_smmu0_ind_write", + table_data_buff); + + switch (eram_wrt_mode) { + case ERAM128_OPR_128b: { + as_dtb_len = 2; + as_addr_offset = DTB_LEN_POS_SETP * 2; + break; + } + + case ERAM128_OPR_64b: { + as_dtb_len = 1; + as_addr_offset = DTB_LEN_POS_SETP; + break; + } + + case ERAM128_OPR_1b: { + as_dtb_len = 1; + as_addr_offset = DTB_LEN_POS_SETP; + break; + } + } + + /*将表格式和数据写入缓存buff中*/ + rc = dpp_dtb_data_write(table_data_buff, addr_offset_bk, + &dtb_as_data_entry); + addr_offset_bk = + addr_offset_bk + + as_addr_offset; /*在缓存中相对于0的offset*/ + dtb_len += as_dtb_len; + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_data_write", table_data_buff); + ZXIC_COMM_MEMSET(as_eram_cmd_buff, 0, + DTB_TABLE_CMD_SIZE_BIT / 8); + ZXIC_COMM_MEMSET(as_eram_data_buff, 0, + 4 * sizeof(ZXIC_UINT32)); + } + } + + if (dpp_dtb_prt_get()) { + dpp_data_buff_print(table_data_buff, dtb_len * 16); + } + + rc = dpp_dtb_write_down_table_data(dev, queue_id, dtb_len * 16, + table_data_buff, element_id); + ZXIC_COMM_FREE(table_data_buff); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_write_down_table_data"); + + return DPP_OK; +} + +#endif /*DTB BASE INTERFACE*/ + +#if ZXIC_REAL("DTB_DOWN_INTERFACE") +/** dtb写eRam表--支持大数据量 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no eram表sdt表号 +* @param entry_cnt 下发的条目数 +* @param p_entry_arr 待下发表项内容结构体数组指针 +* @param element_id 返回下表使用的元素id +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_eram_dma_write(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 entry_num, + DPP_DTB_ERAM_ENTRY_INFO_T *p_entry_arr, + ZXIC_UINT32 *element_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 wrt_mode; + ZXIC_UINT32 entry_num_max = 0; + ZXIC_UINT32 entry_cycle = 0; + ZXIC_UINT32 entry_remains = 0; + ZXIC_UINT32 i = 0; + DPP_DTB_ERAM_ENTRY_INFO_T *p_entry = NULL; + + DPP_SDTTBL_ERAM_T sdt_eram_info = { 0 }; + + ZXIC_COMM_CHECK_POINT_NO_ASSERT(dev); + + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_eram_info); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_soft_sdt_tbl_get"); + + wrt_mode = sdt_eram_info.eram_mode; //3:128 + + switch (wrt_mode) { + case ERAM128_TBL_128b: { + entry_num_max = 0x1ff; + break; + } + + case ERAM128_TBL_64b: { + entry_num_max = 0x3ff; + break; + } + + case ERAM128_TBL_1b: { + entry_num_max = 0x3ff; + break; + } + } + + ZXIC_COMM_CHECK_INDEX_EQUAL(entry_num_max, 0); + entry_cycle = entry_num / entry_num_max; + entry_remains = entry_num % entry_num_max; + + for (i = 0; i < entry_cycle; ++i) { + p_entry = p_entry_arr + entry_num_max * i; + rc = dpp_dtb_eram_dma_write_cycle(dev, queue_id, sdt_no, + entry_num_max, p_entry, + element_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, + "dpp_dtb_eram_dma_write_cycle"); + + ZXIC_COMM_TRACE_INFO( + "dpp_dtb_eram_dma_write_cycle: slot [%d] queue [%d] element_id [%d] done.\n", + DEV_PCIE_SLOT(dev), queue_id, *element_id); + } + + if (entry_remains) { + p_entry = p_entry_arr + entry_num_max * entry_cycle; + rc = dpp_dtb_eram_dma_write_cycle(dev, queue_id, sdt_no, + entry_remains, p_entry, + element_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, + "dpp_dtb_eram_dma_write_cycle"); + + ZXIC_COMM_TRACE_INFO( + "dpp_dtb_eram_dma_write_cycle: slot [%d] queue [%d] element_id [%d] done.\n", + DEV_PCIE_SLOT(dev), queue_id, *element_id); + } + return DPP_OK; +} + +DPP_STATUS dpp_dtb_hash_dma_insert(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 entry_num, + DPP_DTB_HASH_ENTRY_INFO_T *p_arr_hash_entry, + ZXIC_UINT32 *element_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 entry_num_max = 0; + ZXIC_UINT32 entry_cycle = 0; + ZXIC_UINT32 entry_remains = 0; + ZXIC_UINT32 i = 0; + DPP_DTB_HASH_ENTRY_INFO_T *p_entry = NULL; + + ZXIC_COMM_CHECK_POINT_NO_ASSERT(dev); + + entry_num_max = 0xcc; //按512bit数据进行计算 + + entry_cycle = entry_num / entry_num_max; + entry_remains = entry_num % entry_num_max; + + for (i = 0; i < entry_cycle; ++i) { + p_entry = p_arr_hash_entry + entry_num_max * i; + rc = dpp_dtb_hash_dma_insert_cycle(dev, queue_id, sdt_no, + entry_num_max, p_entry, + element_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, + "dpp_dtb_hash_dma_insert_cycle"); + ZXIC_COMM_TRACE_INFO( + "dpp_dtb_hash_dma_insert_cycle: slot [%d] queue [%d] element_id [%d] done.\n", + DEV_PCIE_SLOT(dev), queue_id, *element_id); + } + + if (entry_remains) { + p_entry = p_arr_hash_entry + entry_num_max * entry_cycle; + rc = dpp_dtb_hash_dma_insert_cycle(dev, queue_id, sdt_no, + entry_remains, p_entry, + element_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, + "dpp_dtb_hash_dma_insert_cycle"); + ZXIC_COMM_TRACE_INFO( + "dpp_dtb_hash_dma_insert_cycle: slot [%d] queue [%d] element_id [%d] done.\n", + DEV_PCIE_SLOT(dev), queue_id, *element_id); + } + + return DPP_OK; +} + +DPP_STATUS dpp_dtb_hash_dma_delete(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 entry_num, + DPP_DTB_HASH_ENTRY_INFO_T *p_arr_hash_entry, + ZXIC_UINT32 *element_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 entry_num_max = 0; + ZXIC_UINT32 entry_cycle = 0; + ZXIC_UINT32 entry_remains = 0; + ZXIC_UINT32 i = 0; + DPP_DTB_HASH_ENTRY_INFO_T *p_entry = NULL; + + ZXIC_COMM_CHECK_POINT_NO_ASSERT(dev); + + entry_num_max = 0xcc; //按512bit数据进行计算 + + entry_cycle = entry_num / entry_num_max; + entry_remains = entry_num % entry_num_max; + + for (i = 0; i < entry_cycle; ++i) { + p_entry = p_arr_hash_entry + entry_num_max * i; + rc = dpp_dtb_hash_dma_delete_cycle(dev, queue_id, sdt_no, + entry_num_max, p_entry, + element_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, + "dpp_dtb_hash_dma_delete_cycle"); + ZXIC_COMM_TRACE_INFO( + "dpp_dtb_hash_dma_delete_cycle: slot [%d] queue [%d] element_id [%d] done.\n", + DEV_PCIE_SLOT(dev), queue_id, *element_id); + } + + if (entry_remains) { + p_entry = p_arr_hash_entry + entry_num_max * entry_cycle; + rc = dpp_dtb_hash_dma_delete_cycle(dev, queue_id, sdt_no, + entry_remains, p_entry, + element_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, + "dpp_dtb_hash_dma_delete_cycle"); + ZXIC_COMM_TRACE_INFO( + "dpp_dtb_hash_dma_delete_cycle: slot [%d] queue [%d] element_id [%d] done.\n", + DEV_PCIE_SLOT(dev), queue_id, *element_id); + } + + return DPP_OK; +} + +/** dtb删除HASH表 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no hash表sdt表号 +* @param entry_cnt 删除的条目数 +* @param p_entry_arr 待下发表项内容结构体数组指针 +* @param element_id 返回下表使用的元素id +* @return +* @remark 无 +* @see +* @author @date 2023/03/14 +************************************************************/ +DPP_STATUS +dpp_dtb_hash_dma_delete_cycle(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 entry_num, + DPP_DTB_HASH_ENTRY_INFO_T *p_arr_hash_entry, + ZXIC_UINT32 *element_id) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT8 key_valid = 1; + ZXIC_UINT32 item_cnt = 0; + //ZXIC_UINT32 ddr_wr_mode = 0; + ZXIC_UINT32 dtb_len = 0; + + SE_ITEM_CFG *p_item = NULL; + ZXIC_RB_TN *p_rb_tn_rtn = NULL; + DPP_HASH_CFG *p_hash_cfg = NULL; + DPP_HASH_RBKEY_INFO *p_rbkey_rtn = NULL; + DPP_HASH_RBKEY_INFO temp_rbkey = { { 0 } }; + ZXIC_UINT8 *p_data_buff = NULL; + ZXIC_MUTEX_T *p_hash_mutex = NULL; + + HASH_ENTRY_CFG hash_entry_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, DTB_QUEUE_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), sdt_no, 0, + DPP_DEV_SDT_ID_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_LOWER(DEV_ID(dev), entry_num, 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_arr_hash_entry); + + //从sdt_no中获取hash配置 + rc = dpp_hash_get_hash_info_from_sdt(dev, sdt_no, &hash_entry_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_hash_get_hash_info_from_sdt"); + + p_hash_cfg = hash_entry_cfg.p_hash_cfg; + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_hash_cfg); + + /*条目数上限检查*/ + //ddr_wr_mode = DPP_GET_DDR_WR_MODE(hash_entry_cfg.key_type); + //ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), entry_num, 1, DTB_DATA_SIZE_BIT / ( (ddr_wr_mode + 2) * DTB_TABLE_CMD_SIZE_BIT)); + + p_data_buff = + (ZXIC_UINT8 *)ZXIC_COMM_MALLOC(DPP_DTB_TABLE_DATA_BUFF_SIZE); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data_buff); + ZXIC_COMM_MEMSET(p_data_buff, 0x0, DPP_DTB_TABLE_DATA_BUFF_SIZE); + + rc = dpp_dev_hash_opr_mutex_get(dev, p_hash_cfg->fun_id, &p_hash_mutex); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT(rc, "dpp_dev_opr_mutex_get", + p_data_buff); + rc = zxic_comm_mutex_lock(p_hash_mutex); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT(rc, "zxic_comm_mutex_lock", + p_data_buff); + + /*对每一个条目进行处理*/ + for (item_cnt = 0; item_cnt < entry_num; ++item_cnt) { + ZXIC_COMM_MEMSET(&temp_rbkey, 0x0, sizeof(DPP_HASH_RBKEY_INFO)); + temp_rbkey.key[0] = + (ZXIC_UINT8)(((key_valid & 0x1) << 7) | + ((hash_entry_cfg.key_type & 0x3) << 5) | + (hash_entry_cfg.table_id & 0x1f)); + ZXIC_COMM_MEMCPY(&temp_rbkey.key[1], + p_arr_hash_entry[item_cnt].p_actu_key, + hash_entry_cfg.actu_key_size); + rc = zxic_comm_rb_delete(&p_hash_cfg->hash_rb, &temp_rbkey, + &p_rb_tn_rtn); + if (ZXIC_RBT_RC_SRHFAIL == rc) { + p_hash_cfg->hash_stat.delete_fail++; + ZXIC_COMM_TRACE_DEV_DEBUG( + DEV_ID(dev), + "Error!there is not item in hash!\n"); + continue; + } + + if (NULL == p_rb_tn_rtn) { + ZXIC_COMM_FREE(p_data_buff); + rc = zxic_comm_mutex_unlock(p_hash_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "zxic_comm_mutex_unlock"); + return ZXIC_PAR_CHK_POINT_NULL; + } + + p_rbkey_rtn = (DPP_HASH_RBKEY_INFO *)(p_rb_tn_rtn->p_key); + ZXIC_COMM_MEMSET(p_rbkey_rtn->rst, 0x0, + sizeof(p_rbkey_rtn->rst)); + hash_entry_cfg.p_rbkey_new = p_rbkey_rtn; + hash_entry_cfg.p_rb_tn_new = p_rb_tn_rtn; + + p_item = p_rbkey_rtn->p_item_info; + rc = zxic_comm_double_link_del(&(p_rbkey_rtn->entry_dn), + &(p_item->item_list)); + if (DPP_OK != rc) { + ZXIC_COMM_FREE(p_data_buff); + rc = zxic_comm_mutex_unlock(p_hash_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "zxic_comm_mutex_unlock"); + return rc; + } + p_item->wrt_mask &= + ~(DPP_GET_HASH_ENTRY_MASK(p_rbkey_rtn->entry_size, + p_rbkey_rtn->entry_pos)) & + 0xF; + + /*将位置信息和数据信息写入buff中*/ + rc = dpp_dtb_delete_hash_buf_write(DEV_ID(dev), &hash_entry_cfg, + p_data_buff, item_cnt, + &dtb_len); + if (DPP_OK != rc) { + ZXIC_COMM_FREE(p_data_buff); + rc = zxic_comm_mutex_unlock(p_hash_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "zxic_comm_mutex_unlock"); + return rc; + } + + if (0 == p_item->item_list.used) { + if ((ITEM_DDR_256 == p_item->item_type) || + (ITEM_DDR_512 == p_item->item_type)) { + /* modify coverity by yinxh 2021.03.10*,以256bit为单位。暂不考虑512bit的情况*/ + if ((p_item->item_index) > + (p_hash_cfg + ->p_bulk_ddr_info[hash_entry_cfg + .bulk_id] + ->item_num)) { + ZXIC_COMM_FREE(p_data_buff); + rc = zxic_comm_mutex_unlock( + p_hash_mutex); + ZXIC_COMM_CHECK_DEV_RC( + DEV_ID(dev), rc, + "zxic_comm_mutex_unlock"); + return ZXIC_PAR_CHK_INVALID_INDEX; + } + p_hash_cfg + ->p_bulk_ddr_info[hash_entry_cfg.bulk_id] + ->p_item_array[p_item->item_index] = + NULL; + ZXIC_COMM_FREE(p_item); + } else { + p_item->valid = 0; + } + } + + ZXIC_COMM_FREE(p_rbkey_rtn); + ZXIC_COMM_FREE(p_rb_tn_rtn); + + p_hash_cfg->hash_stat.delete_ok++; + } + + rc = zxic_comm_mutex_unlock(p_hash_mutex); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT(rc, "zxic_comm_mutex_unlock", + p_data_buff); + + if (dtb_len) { + rc = dpp_dtb_write_down_table_data(dev, queue_id, dtb_len * 16, + p_data_buff, element_id); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_write_down_table_data", p_data_buff); + } + ZXIC_COMM_FREE(p_data_buff); + + return DPP_OK; +} + +/** dtb删除所有的HASH表(硬件查找,硬件删除) +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no hash表sdt表号 +* @param entry_cnt 删除的条目数 +* @param p_entry_arr 待下发表项内容结构体数组指针 +* @param element_id 返回下表使用的元素id +* @return +* @remark 无 +* @see +* @author cq @date 2023/12/02 +************************************************************/ +DPP_STATUS +dpp_dtb_hash_dma_delete_hardware(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 entry_num, + DPP_DTB_HASH_ENTRY_INFO_T *p_arr_hash_entry, + ZXIC_UINT32 *element_id) +{ + DPP_STATUS rc = DPP_OK; + //ZXIC_UINT32 ddr_wr_mode = 0; + ZXIC_UINT32 item_cnt = 0; + ZXIC_UINT32 dtb_len = 0; + ZXIC_UINT8 srh_succ = 0; + ZXIC_UINT8 key[HASH_KEY_MAX] = { 0 }; + ZXIC_UINT8 rst[HASH_RST_MAX] = { 0 }; + ZXIC_UINT8 entry_cmd[DTB_TABLE_CMD_SIZE_BIT / 8] = { 0 }; + ZXIC_UINT8 entry_data[DPP_ETCAM_WIDTH_MAX / 8] = { + 0 + }; /*兼容所有表项,按照最大位宽分配,acl最大为640bit(80B)*/ + ZXIC_UINT8 *p_data_buff = NULL; + ZXIC_UINT32 dev_id = 0; + + DPP_HASH_ENTRY entry = { 0 }; + DPP_HASH_CFG *p_hash_cfg = NULL; + DPP_DTB_ENTRY_T dtb_one_entry = { 0 }; + HASH_ENTRY_CFG hash_entry_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT_NO_ASSERT(dev); + dev_id = DEV_ID(dev); + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, queue_id, 0, DTB_QUEUE_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_arr_hash_entry); + + dtb_one_entry.cmd = entry_cmd; + dtb_one_entry.data = entry_data; + entry.p_key = key; + entry.p_rst = rst; + + //从sdt_no中获取hash配置 + rc = dpp_hash_get_hash_info_from_sdt(dev, sdt_no, &hash_entry_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_hash_get_hash_info_from_sdt"); + + p_hash_cfg = hash_entry_cfg.p_hash_cfg; + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_hash_cfg); + + /*条目数上限检查*/ + //ddr_wr_mode = DPP_GET_DDR_WR_MODE(hash_entry_cfg.key_type); + //ZXIC_COMM_CHECK_DEV_INDEX(dev_id, entry_num, 1, DTB_DATA_SIZE_BIT / ( (ddr_wr_mode + 2) * DTB_TABLE_CMD_SIZE_BIT)); + + p_data_buff = + (ZXIC_UINT8 *)ZXIC_COMM_MALLOC(DPP_DTB_TABLE_DATA_BUFF_SIZE); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_data_buff); + ZXIC_COMM_MEMSET(p_data_buff, 0x0, DPP_DTB_TABLE_DATA_BUFF_SIZE); + + /*对每一个条目进行处理*/ + for (item_cnt = 0; item_cnt < entry_num; ++item_cnt) { + srh_succ = 0; + ZXIC_COMM_MEMSET(key, 0x0, sizeof(key)); + ZXIC_COMM_MEMSET(rst, 0x0, sizeof(rst)); + ZXIC_COMM_MEMSET(entry_cmd, 0x0, sizeof(entry_cmd)); + ZXIC_COMM_MEMSET(entry_data, 0x0, sizeof(entry_data)); + ZXIC_COMM_MEMCPY(entry.p_key, + p_arr_hash_entry[item_cnt].p_actu_key, + hash_entry_cfg.key_by_size); + + rc = dpp_dtb_hash_zcam_delete_hardware(dev, queue_id, + &hash_entry_cfg, &entry, + &dtb_one_entry, + &srh_succ); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_hash_zcam_delete_hardware", p_data_buff); + + if (srh_succ) { + rc = dpp_dtb_data_write(p_data_buff, 0, &dtb_one_entry); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_data_write", p_data_buff); + dtb_len = + dtb_one_entry.data_size / DTB_LEN_POS_SETP + 1; + + rc = dpp_dtb_write_down_table_data( + dev, queue_id, dtb_len * DTB_LEN_POS_SETP, + p_data_buff, element_id); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_write_down_table_data", + p_data_buff); + } + } + + ZXIC_COMM_FREE(p_data_buff); + + return DPP_OK; +} + +/***********************************************************/ +/** 将hash表DDR存储位宽(按照256bit/512bit划分)转换为hash在DDR中的实际位置(按照128/256/512bit划分) +* @param index hash表DDR存储位宽索引(按照256bit/512bit划分) +* @param width_mode DDR位宽(256bit/512bit) +* @param key_type hash表项位宽DPP_HASH_KEY_TYPE +* @param byte_offset hash表项在一个DDR存储位宽(256bit/512bit)里的偏移,单位为字节 +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/02 +************************************************************/ +ZXIC_UINT32 dpp_ddr_index_calc(ZXIC_UINT32 index, ZXIC_UINT32 width_mode, + ZXIC_UINT32 key_type, ZXIC_UINT32 byte_offset) +{ + ZXIC_UINT32 ddr_index = 0; /*按照hash位宽128/256/512bit进行划分*/ + ZXIC_UINT32 entry_size = 0; + + entry_size = DPP_GET_HASH_ENTRY_SIZE(key_type); + if (0 == entry_size) { + return ddr_index; + } + + if (DDR_WIDTH_256b == width_mode) { + if (HASH_KEY_128b == key_type) { + ddr_index = (index << 1) + + ((byte_offset / entry_size) & 0x1); + } else if (HASH_KEY_256b == key_type) { + ddr_index = index; + } + } else if (DDR_WIDTH_512b == width_mode) { + if (HASH_KEY_128b == key_type) { + ddr_index = (index << 2) + + ((byte_offset / entry_size) & 0x3); + } else if (HASH_KEY_256b == key_type) { + ddr_index = (index << 1) + + ((byte_offset / entry_size) & 0x1); + } else if (HASH_KEY_512b == key_type) { + ddr_index = index; + } + } + + return ddr_index; +} + +/***********************************************************/ +/** 查找存储在ZCAM空间的hash表项并删除(硬件处理) +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列id +* @param p_hash_entry_cfg hash表项配置信息 +* @param p_hash_entry 查找键值信息 +* @param p_entry dtb描述符 +* @param p_srh_succ 出参,查找是否成功 +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/02 +************************************************************/ +ZXIC_UINT32 dpp_dtb_hash_zcam_delete_hardware(DPP_DEV_T *dev, + ZXIC_UINT32 queue_id, + HASH_ENTRY_CFG *p_hash_entry_cfg, + DPP_HASH_ENTRY *p_hash_entry, + DPP_DTB_ENTRY_T *p_entry, + ZXIC_UINT8 *p_srh_succ) +{ + DPP_STATUS rc = DPP_OK; + DPP_HASH_RBKEY_INFO srh_rbkey = { 0 }; + DPP_HASH_CFG *p_hash_cfg = NULL; + SE_ZCELL_CFG *p_zcell = NULL; + SE_ZBLK_CFG *p_zblk = NULL; + + ZXIC_UINT32 zblk_idx = 0; + ZXIC_UINT32 pre_zblk_idx = 0xFFFFFFFF; /* -1; */ + ZXIC_UINT16 crc16_value = 0; + ZXIC_UINT32 zcell_id = 0; + ZXIC_UINT32 item_idx = 0; + ZXIC_UINT32 element_id = 0; + ZXIC_UINT8 byte_offset = 0; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 addr = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT8 srh_succ = 0; + ZXIC_UINT8 temp_key[HASH_KEY_MAX] = { 0 }; + ZXIC_UINT8 rd_buff[SE_ITEM_WIDTH_MAX] = { 0 }; + + D_NODE *p_zblk_dn = NULL; + D_NODE *p_zcell_dn = NULL; + DPP_SE_CFG *p_se_cfg = NULL; + + ZXIC_COMM_CHECK_POINT_NO_ASSERT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(queue_id, 0, DTB_QUEUE_MAX - 1); + ZXIC_COMM_CHECK_POINT(p_hash_entry_cfg); + ZXIC_COMM_CHECK_POINT(p_srh_succ); + ZXIC_COMM_CHECK_POINT(p_hash_entry); + ZXIC_COMM_CHECK_POINT(p_hash_entry->p_key); + ZXIC_COMM_CHECK_POINT(p_hash_entry->p_rst); + + ZXIC_COMM_MEMSET(rd_buff, 0x0, sizeof(rd_buff)); + ZXIC_COMM_MEMSET(&srh_rbkey, 0x0, sizeof(DPP_HASH_RBKEY_INFO)); + ZXIC_COMM_CHECK_INDEX_UPPER(p_hash_entry_cfg->key_by_size, + HASH_KEY_MAX); + ZXIC_COMM_MEMCPY(srh_rbkey.key, p_hash_entry->p_key, + p_hash_entry_cfg->key_by_size); + + p_hash_cfg = p_hash_entry_cfg->p_hash_cfg; + ZXIC_COMM_CHECK_POINT(p_hash_cfg); + + /* 取出se配置 */ + rc = dpp_se_cfg_get(dev, &p_se_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_se_cfg_get"); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_se_cfg); + + ZXIC_COMM_CHECK_INDEX_UPPER(p_hash_entry_cfg->key_by_size, + HASH_KEY_MAX); + rc = dpp_hash_set_crc_key(dev, p_hash_entry_cfg, p_hash_entry, + temp_key); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "hash_set_crc_key"); + + /*查找所有的zcell*/ + p_zcell_dn = p_hash_cfg->hash_shareram.zcell_free_list.p_next; + while (p_zcell_dn) { + p_zcell = (SE_ZCELL_CFG *)p_zcell_dn->data; + zblk_idx = GET_ZBLK_IDX(p_zcell->zcell_idx); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, zblk_idx, 0, SE_ZBLK_NUM - 1); + p_zblk = &(p_se_cfg->zblk_info[zblk_idx]); + + if (zblk_idx != pre_zblk_idx) { + pre_zblk_idx = zblk_idx; + crc16_value = p_hash_cfg->p_hash16_fun( + temp_key, p_hash_entry_cfg->key_by_size, + p_zblk->hash_arg); + } + + zcell_id = GET_ZCELL_IDX(p_zcell->zcell_idx); + item_idx = GET_ZCELL_CRC_VAL(zcell_id, crc16_value); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, item_idx, 0, + SE_RAM_DEPTH - 1); + addr = ZBLK_ITEM_ADDR_CALC(p_zcell->zcell_idx, item_idx); + rc = dpp_dtb_se_zcam_dma_dump(dev, queue_id, addr, + DTB_DUMP_ZCAM_512b, 1, + (ZXIC_UINT32 *)rd_buff, + &element_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_se_zcam_dma_dump"); + zxic_comm_swap(rd_buff, sizeof(rd_buff)); + + rc = dpp_dtb_hash_data_parse(ITEM_RAM, + p_hash_entry_cfg->key_by_size, + p_hash_entry, rd_buff, + &byte_offset); + if (DPP_OK == rc) { + ZXIC_COMM_TRACE_DEBUG( + "Hash search hardware succ in zcell. \n"); + srh_succ = 1; + p_hash_cfg->hash_stat.search_ok++; + break; + } + + p_zcell_dn = p_zcell_dn->next; + } + + /*zcell查找失败,则查找所有的zreg*/ + if (0 == srh_succ) { + p_zblk_dn = p_hash_cfg->hash_shareram.zblk_list.p_next; + while (p_zblk_dn) { + p_zblk = (SE_ZBLK_CFG *)p_zblk_dn->data; + zblk_idx = p_zblk->zblk_idx; + + for (i = 0; i < SE_ZREG_NUM; i++) { + item_idx = i; + addr = ZBLK_HASH_LIST_REG_ADDR_CALC(zblk_idx, + item_idx); + rc = dpp_dtb_se_zcam_dma_dump( + dev, queue_id, addr, DTB_DUMP_ZCAM_512b, + 1, (ZXIC_UINT32 *)rd_buff, &element_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT( + rc, "dpp_dtb_se_zcam_dma_dump"); + zxic_comm_swap(rd_buff, sizeof(rd_buff)); + + rc = dpp_dtb_hash_data_parse( + ITEM_RAM, p_hash_entry_cfg->key_by_size, + p_hash_entry, rd_buff, &byte_offset); + if (DPP_OK == rc) { + ZXIC_COMM_TRACE_DEBUG( + "Hash search hardware succ in zreg. \n"); + srh_succ = 1; + p_hash_cfg->hash_stat.search_ok++; + break; + } + } + p_zblk_dn = p_zblk_dn->next; + } + } + + /*查找成功,则将对应表项删除*/ + if (srh_succ) { + ZXIC_COMM_CHECK_INDEX_UPPER(byte_offset, SE_ITEM_WIDTH_MAX - 1); + ZXIC_COMM_MEMSET_S( + rd_buff + byte_offset, SE_ITEM_WIDTH_MAX - byte_offset, + 0x0, + DPP_GET_HASH_ENTRY_SIZE(p_hash_entry_cfg->key_type)); + zxic_comm_swap(rd_buff, sizeof(rd_buff)); + rc = dpp_dtb_se_alg_zcam_data_write(dev_id, addr, rd_buff, + p_entry); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, + "dpp_dtb_se_alg_zcam_data_write"); + p_hash_cfg->hash_stat.delete_ok++; + } + + *p_srh_succ = srh_succ; + + return DPP_OK; +} + +/** dtb写ACL表 (SPECIFY模式,条目中指定handle,支持级联64bit/128bit) +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no ACL表sdt表号 +* @param entry_num 下发的条目数 +* @param p_acl_entry_arr 待下发表项内容结构体数组指针 +* @param element_id 返回下表使用的元素id +* @return +* @remark 无 是否是有一个写不成功就返回,还是继续进行下一个条目并记录错误的条目 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_acl_dma_insert(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 entry_num, + DPP_DTB_ACL_ENTRY_INFO_T *p_acl_entry_arr, + ZXIC_UINT32 *element_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 as_enable; + ZXIC_UINT32 etcam_as_mode; + ZXIC_UINT32 entry_num_max = 0; + ZXIC_UINT32 entry_cycle = 0; + ZXIC_UINT32 entry_remains = 0; + ZXIC_UINT32 i = 0; + DPP_DTB_ACL_ENTRY_INFO_T *p_entry = NULL; + + DPP_SDTTBL_ETCAM_T sdt_etcam_info = { 0 }; /*SDT内容*/ + + ZXIC_COMM_CHECK_POINT(dev); + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_etcam_info); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_soft_sdt_tbl_get"); + + as_enable = sdt_etcam_info.as_en; + etcam_as_mode = sdt_etcam_info.as_rsp_mode; + + if (!as_enable) { + entry_num_max = 0x55; + } else { + if (etcam_as_mode == ERAM128_TBL_128b) { + entry_num_max = 0x49; + } else { + entry_num_max = 0x4e; + } + } + + entry_cycle = entry_num / entry_num_max; + entry_remains = entry_num % entry_num_max; + + for (i = 0; i < entry_cycle; ++i) { + p_entry = p_acl_entry_arr + entry_num_max * i; + rc = dpp_dtb_acl_dma_insert_cycle(dev, queue_id, sdt_no, + entry_num_max, p_entry, + element_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, + "dpp_dtb_acl_dma_insert_cycle"); + + ZXIC_COMM_TRACE_INFO( + "dpp_dtb_acl_dma_insert_cycle[%d]: element_id = %d\n", + i, *element_id); + } + + if (entry_remains) { + p_entry = p_acl_entry_arr + entry_num_max * entry_cycle; + rc = dpp_dtb_acl_dma_insert_cycle(dev, queue_id, sdt_no, + entry_remains, p_entry, + element_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, + "dpp_dtb_acl_dma_insert_cycle"); + + ZXIC_COMM_TRACE_INFO( + "dpp_dtb_acl_dma_insert_cycle: element_id = %d\n", + *element_id); + } + + return DPP_OK; +} + +#endif + +#if ZXIC_REAL("DTB DUMP BASE") + +/** smmu0 组装dump描述符函数 +* @param dev_id 设备号 +* @param base_addr smmu0空间基地址,以128bit为单位 +* @param depth dump的深度以128bit为单位 +* @param addr_high32 dump缓存地址高32bit +* @param addr_low32 dump缓存地址低32bit +* @param p_dump_info dump描述符指针(已分配好空间128bit) +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_smmu0_dump_info_write(DPP_DEV_T *dev, ZXIC_UINT32 base_addr, + ZXIC_UINT32 depth, + ZXIC_UINT32 addr_high32, + ZXIC_UINT32 addr_low32, + ZXIC_UINT32 *p_dump_info) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 i = 0; + + DPP_DTB_ERAM_DUMP_FORM_T dtb_eram_dump_form_info = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_dump_info); + + //组装dump描述符 + dtb_eram_dump_form_info.valid = DTB_TABLE_VALID; + dtb_eram_dump_form_info.up_type = DTB_DUMP_MODE_ERAM; + dtb_eram_dump_form_info.base_addr = base_addr; + dtb_eram_dump_form_info.tb_depth = depth; + dtb_eram_dump_form_info.tb_dst_addr_h = addr_high32; + dtb_eram_dump_form_info.tb_dst_addr_l = addr_low32; + + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("dpp_dtb_smmu0_dump_info_write: \n"); + ZXIC_COMM_DBGCNT32_PRINT("valid", + dtb_eram_dump_form_info.valid); + ZXIC_COMM_DBGCNT32_PRINT("up_type", + dtb_eram_dump_form_info.up_type); + ZXIC_COMM_DBGCNT32_PRINT("base_addr", + dtb_eram_dump_form_info.base_addr); + ZXIC_COMM_DBGCNT32_PRINT("tb_depth", + dtb_eram_dump_form_info.tb_depth); + ZXIC_COMM_DBGCNT32_PRINT("tb_dst_addr_h", + dtb_eram_dump_form_info.tb_dst_addr_h); + ZXIC_COMM_DBGCNT32_PRINT("tb_dst_addr_l", + dtb_eram_dump_form_info.tb_dst_addr_l); + } + + /*组装dump描述符*/ + rc = dpp_dtb_write_dump_cmd(DEV_ID(dev), DTB_DUMP_ERAM, + &dtb_eram_dump_form_info, p_dump_info); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_write_dump_cmd"); + + //打印出cmd buff中内容128bit + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("dump type: %d\n", DTB_DUMP_MODE_ERAM); + ZXIC_COMM_PRINT("cmd: "); + for (i = 0; i < 4; i++) { + ZXIC_COMM_PRINT( + "0x%08x ", + ZXIC_COMM_CONVERT32(*( + (ZXIC_UINT32 *)(p_dump_info + + i)))); //转换成大端显示 + } + ZXIC_COMM_PRINT("\n"); + } + return rc; +} + +/** smmu1 组装dump描述符函数 +* @param dev_id 设备号 +* @param base_addr dump基地址,以512it为单位 +* @param depth dump的深度以512bit为单位 +* @param addr_high32 dump缓存地址高32bit +* @param addr_low32 dump缓存地址低32bit +* @param p_dump_info dump描述符指针(已分配好空间128bit) +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_smmu1_dump_info_write(DPP_DEV_T *dev, ZXIC_UINT32 base_addr, + ZXIC_UINT32 depth, + ZXIC_UINT32 addr_high32, + ZXIC_UINT32 addr_low32, + ZXIC_UINT32 *p_dump_info) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 i = 0; + + DPP_DTB_DDR_DUMP_FORM_T dtb_ddr_dump_form_info = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_dump_info); + + //组装dump描述符 + dtb_ddr_dump_form_info.valid = DTB_TABLE_VALID; + dtb_ddr_dump_form_info.up_type = DTB_DUMP_MODE_DDR; + dtb_ddr_dump_form_info.base_addr = base_addr; + dtb_ddr_dump_form_info.tb_depth = depth; + dtb_ddr_dump_form_info.tb_dst_addr_h = addr_high32; + dtb_ddr_dump_form_info.tb_dst_addr_l = addr_low32; + + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("dpp_dtb_smmu1_dump_info_write: \n"); + ZXIC_COMM_DBGCNT32_PRINT("valid", dtb_ddr_dump_form_info.valid); + ZXIC_COMM_DBGCNT32_PRINT("up_type", + dtb_ddr_dump_form_info.up_type); + ZXIC_COMM_DBGCNT32_PRINT("base_addr", + dtb_ddr_dump_form_info.base_addr); + ZXIC_COMM_DBGCNT32_PRINT("tb_depth", + dtb_ddr_dump_form_info.tb_depth); + ZXIC_COMM_DBGCNT32_PRINT("tb_dst_addr_h", + dtb_ddr_dump_form_info.tb_dst_addr_h); + ZXIC_COMM_DBGCNT32_PRINT("tb_dst_addr_l", + dtb_ddr_dump_form_info.tb_dst_addr_l); + } + + /*组装dump描述符*/ + rc = dpp_dtb_write_dump_cmd(DEV_ID(dev), DTB_DUMP_DDR, + &dtb_ddr_dump_form_info, p_dump_info); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_write_dump_cmd"); + + //打印出cmd buff中内容128bit + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("dump type: %d\n", DTB_DUMP_MODE_ERAM); + ZXIC_COMM_PRINT("cmd: "); + for (i = 0; i < 4; i++) { + ZXIC_COMM_PRINT( + "0x%08x ", + ZXIC_COMM_CONVERT32(*( + (ZXIC_UINT32 *)(p_dump_info + + i)))); //转换成大端显示 + } + ZXIC_COMM_PRINT("\n"); + } + + return rc; +} + +/** zcam 组装dump描述符函数 +* @param dev_id 设备号 +* @param addr zcam内部ram地址,包括掩码(512bit为单位) +* @param tb_width dump数据的宽度 00:128bit 01:256bit 10:512bit +* @param depth dump的深度以512bit为单位 +* @param addr_high32 dump缓存地址高32bit +* @param addr_low32 dump缓存地址低32bit +* @param p_dump_info dump描述符指针(已分配好空间128bit) +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_zcam_dump_info_write(DPP_DEV_T *dev, ZXIC_UINT32 addr, + ZXIC_UINT32 tb_width, ZXIC_UINT32 depth, + ZXIC_UINT32 addr_high32, + ZXIC_UINT32 addr_low32, + ZXIC_UINT32 *p_dump_info) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 i = 0; + + DPP_DTB_ZCAM_DUMP_FORM_T dtb_zcam_dump_form_info = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_dump_info); + + //组装dump描述符 + dtb_zcam_dump_form_info.valid = DTB_TABLE_VALID; + dtb_zcam_dump_form_info.up_type = DTB_DUMP_MODE_ZCAM; + dtb_zcam_dump_form_info.tb_width = tb_width; + dtb_zcam_dump_form_info.sram_addr = addr & 0x1FF; //512bit为单位 + dtb_zcam_dump_form_info.ram_reg_flag = (addr >> 16) & 0x1; + dtb_zcam_dump_form_info.z_reg_cell_id = (addr >> 9) & 0x3; + dtb_zcam_dump_form_info.zblock_id = (addr >> 11) & 0x7; + dtb_zcam_dump_form_info.zgroup_id = (addr >> 14) & 0x3; + dtb_zcam_dump_form_info.tb_depth = depth; + dtb_zcam_dump_form_info.tb_dst_addr_h = addr_high32; + dtb_zcam_dump_form_info.tb_dst_addr_l = addr_low32; + + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("dpp_dtb_zcam_dump_info_write: \n"); + ZXIC_COMM_DBGCNT32_PRINT("addr", addr); + ZXIC_COMM_DBGCNT32_PRINT("valid", + dtb_zcam_dump_form_info.valid); + ZXIC_COMM_DBGCNT32_PRINT("up_type", + dtb_zcam_dump_form_info.up_type); + ZXIC_COMM_DBGCNT32_PRINT("zgroup_id", + dtb_zcam_dump_form_info.zgroup_id); + ZXIC_COMM_DBGCNT32_PRINT("zblock_id", + dtb_zcam_dump_form_info.zblock_id); + ZXIC_COMM_DBGCNT32_PRINT("reg_sram_flag", + dtb_zcam_dump_form_info.ram_reg_flag); + ZXIC_COMM_DBGCNT32_PRINT("zcell_id", + dtb_zcam_dump_form_info.z_reg_cell_id); + ZXIC_COMM_DBGCNT32_PRINT("tb_width", + dtb_zcam_dump_form_info.tb_width); + ZXIC_COMM_DBGCNT32_PRINT("sram_addr", + dtb_zcam_dump_form_info.sram_addr); + ZXIC_COMM_DBGCNT32_PRINT("tb_depth", + dtb_zcam_dump_form_info.tb_depth); + ZXIC_COMM_DBGCNT32_PRINT("tb_dst_addr_h", + dtb_zcam_dump_form_info.tb_dst_addr_h); + ZXIC_COMM_DBGCNT32_PRINT("tb_dst_addr_l", + dtb_zcam_dump_form_info.tb_dst_addr_l); + } + + //将描述符写入dump缓存中 + rc = dpp_dtb_write_dump_cmd(DEV_ID(dev), DTB_DUMP_ZCAM, + &dtb_zcam_dump_form_info, p_dump_info); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_write_dump_cmd"); + + //打印出cmd buff中内容128bit + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("dump type: %d\n", DTB_DUMP_MODE_ERAM); + ZXIC_COMM_PRINT("cmd: "); + for (i = 0; i < 4; i++) { + ZXIC_COMM_PRINT( + "0x%08x ", + ZXIC_COMM_CONVERT32(*( + (ZXIC_UINT32 *)(p_dump_info + + i)))); //转换成大端显示 + } + ZXIC_COMM_PRINT("\n"); + } + + return rc; +} + +/** etcam 组装dump描述符函数 +* @param dev_id 设备号 +* @param p_etcam_dump_info etcam dump内容信息 +* @param addr_high32 dump缓存地址高32bit +* @param addr_low32 dump缓存地址低32bit +* @param p_dump_info dump描述符指针(已分配好空间128bit) +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_etcam_dump_info_write(DPP_DEV_T *dev, + ETCAM_DUMP_INFO_T *p_etcam_dump_info, + ZXIC_UINT32 addr_high32, + ZXIC_UINT32 addr_low32, + ZXIC_UINT32 *p_dump_info) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 i = 0; + + DPP_DTB_ETCAM_DUMP_FORM_T dtb_etcam_dump_form_info = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_dump_info); + ZXIC_COMM_CHECK_POINT(p_etcam_dump_info); + + //组装dump描述符 + dtb_etcam_dump_form_info.valid = DTB_TABLE_VALID; + dtb_etcam_dump_form_info.up_type = DTB_DUMP_MODE_ETCAM; + dtb_etcam_dump_form_info.block_sel = p_etcam_dump_info->block_sel; + dtb_etcam_dump_form_info.addr = p_etcam_dump_info->addr; + dtb_etcam_dump_form_info.rd_mode = p_etcam_dump_info->rd_mode; + dtb_etcam_dump_form_info.data_or_mask = p_etcam_dump_info->data_or_mask; + dtb_etcam_dump_form_info.tb_depth = p_etcam_dump_info->tb_depth; + dtb_etcam_dump_form_info.tb_width = + p_etcam_dump_info + ->tb_width; //00:80bit 01:160bit 10:320bit 11:640bit + dtb_etcam_dump_form_info.tb_dst_addr_h = addr_high32; + dtb_etcam_dump_form_info.tb_dst_addr_l = addr_low32; + + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("dpp_dtb_etcam_dump_info_write: \n"); + ZXIC_COMM_DBGCNT32_PRINT("valid", + dtb_etcam_dump_form_info.valid); + ZXIC_COMM_DBGCNT32_PRINT("up_type", + dtb_etcam_dump_form_info.up_type); + ZXIC_COMM_DBGCNT32_PRINT("block_sel", + dtb_etcam_dump_form_info.block_sel); + ZXIC_COMM_DBGCNT32_PRINT("addr", dtb_etcam_dump_form_info.addr); + ZXIC_COMM_DBGCNT32_PRINT("rd_mode", + dtb_etcam_dump_form_info.rd_mode); + ZXIC_COMM_DBGCNT32_PRINT("data_or_mask", + dtb_etcam_dump_form_info.data_or_mask); + ZXIC_COMM_DBGCNT32_PRINT("tb_depth", + dtb_etcam_dump_form_info.tb_depth); + ZXIC_COMM_DBGCNT32_PRINT("tb_width", + dtb_etcam_dump_form_info.tb_width); + ZXIC_COMM_DBGCNT32_PRINT( + "tb_dst_addr_h", + dtb_etcam_dump_form_info.tb_dst_addr_h); + ZXIC_COMM_DBGCNT32_PRINT( + "tb_dst_addr_l", + dtb_etcam_dump_form_info.tb_dst_addr_l); + } + + //将描述符写入dump缓存中 + rc = dpp_dtb_write_dump_cmd(DEV_ID(dev), DTB_DUMP_ETCAM, + &dtb_etcam_dump_form_info, p_dump_info); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_write_dump_cmd"); + + //打印出cmd buff中内容128bit + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("dump type: %d\n", DTB_DUMP_MODE_ERAM); + ZXIC_COMM_PRINT("cmd: "); + for (i = 0; i < 4; i++) { + ZXIC_COMM_PRINT( + "0x%08x ", + ZXIC_COMM_CONVERT32(*( + (ZXIC_UINT32 *)(p_dump_info + + i)))); //转换成大端显示 + } + ZXIC_COMM_PRINT("\n"); + } + + return rc; +} + +/** eram 生成dump entry,输入dump描述符,输出一个entry +* @param dev_id 设备号 +* @param base_addr smmu0空间基地址,以128bit为单位 +* @param depth dump的深度以128bit为单位 +* @param addr_high32 dump缓存地址高32bit +* @param addr_low32 dump缓存地址低32bit +* @param p_entry entry指针(已分配好cmd空间128bit,用来存放dump描述符) +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_smmu0_dump_entry(DPP_DEV_T *dev, ZXIC_UINT32 base_addr, + ZXIC_UINT32 depth, ZXIC_UINT32 addr_high32, + ZXIC_UINT32 addr_low32, + DPP_DTB_ENTRY_T *p_entry) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_entry); + + rc = dpp_dtb_smmu0_dump_info_write(dev, base_addr, depth, addr_high32, + addr_low32, + (ZXIC_UINT32 *)p_entry->cmd); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_smmu0_dump_info_write"); + p_entry->data_in_cmd_flag = 1; + + return rc; +} + +/** smmu1生成dump entry,输入dump描述符,输出一个entry +* @param dev_id 设备号 +* @param base_addr dump基地址,以128bit为单位 +* @param depth dump的深度以512bit为单位 +* @param addr_high32 dump缓存地址高32bit +* @param addr_low32 dump缓存地址低32bit +* @param p_entry entry指针(已分配好cmd空间128bit,用来存放dump描述符) +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_smmu1_dump_entry(DPP_DEV_T *dev, ZXIC_UINT32 base_addr, + ZXIC_UINT32 depth, ZXIC_UINT32 addr_high32, + ZXIC_UINT32 addr_low32, + DPP_DTB_ENTRY_T *p_entry) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_entry); + + rc = dpp_dtb_smmu1_dump_info_write(dev, base_addr, depth, addr_high32, + addr_low32, + (ZXIC_UINT32 *)p_entry->cmd); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_smmu1_dump_info_write"); + p_entry->data_in_cmd_flag = 1; + + return rc; +} + +/** zcam 组装dump描述符函数 +* @param dev_id 设备号 +* @param addr zcam内部ram地址,包括掩码(512bit为单位) +* @param tb_width dump数据的宽度 00:128bit 01:256bit 10:512bit +* @param depth dump的深度以512bit为单位 +* @param addr_high32 dump缓存地址高32bit +* @param addr_low32 dump缓存地址低32bit +* @param p_entry entry指针(已分配好cmd空间128bit,用来存放dump描述符) +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_zcam_dump_entry(DPP_DEV_T *dev, ZXIC_UINT32 addr, + ZXIC_UINT32 tb_width, ZXIC_UINT32 depth, + ZXIC_UINT32 addr_high32, + ZXIC_UINT32 addr_low32, + DPP_DTB_ENTRY_T *p_entry) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_entry); + + rc = dpp_dtb_zcam_dump_info_write(dev, addr, tb_width, depth, + addr_high32, addr_low32, + (ZXIC_UINT32 *)p_entry->cmd); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_zcam_dump_info_write"); + p_entry->data_in_cmd_flag = 1; + + return rc; +} + +/** etcam 生成dump entry,输入dump描述符,输出一个entry +* @param dev_id 设备号 +* @param p_etcam_dump_info etcam dump内容信息 +* @param addr_high32 dump缓存地址高32bit +* @param addr_low32 dump缓存地址低32bit +* @param p_entry entry指针(已分配好cmd空间128bit,用来存放dump描述符) +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_etcam_dump_entry(DPP_DEV_T *dev, + ETCAM_DUMP_INFO_T *p_etcam_dump_info, + ZXIC_UINT32 addr_high32, + ZXIC_UINT32 addr_low32, + DPP_DTB_ENTRY_T *p_entry) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_entry); + + rc = dpp_dtb_etcam_dump_info_write(dev, p_etcam_dump_info, addr_high32, + addr_low32, + (ZXIC_UINT32 *)p_entry->cmd); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_etcam_dump_info_write"); + p_entry->data_in_cmd_flag = 1; + + return rc; +} + +/** 下发dump描述符,dump出数据 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param queue_element_id 元素编号 +* @param p_dump_info dump描述符 +* @param data_len dump出数据的长度(32bit为单位) +* @param desc_len dump描述符的长度(32bit为单位) +* @param p_dump_data dump出的数据缓存 +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_write_dump_desc_info(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 queue_element_id, + ZXIC_UINT32 *p_dump_info, + ZXIC_UINT32 data_len, + ZXIC_UINT32 desc_len, + ZXIC_UINT32 *p_dump_data) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 dtb_interrupt_status = 0; + + ZXIC_COMM_CHECK_POINT(dev); + + if ((!(dev->pcie_channel.dev_status)) || + (!dtb_table_function_switch_get())) { + ZXIC_COMM_PRINT("slot[%u] vport[0x%x] dev status off!\n", + dev->pcie_channel.slot, + dev->pcie_channel.vport); + dpp_dtb_item_ack_wr(dev, queue_id, DPP_DTB_DIR_UP_TYPE, + queue_element_id, 0, + DPP_DTB_TAB_ACK_UNUSED_MASK); + return ZXIC_PAR_CHK_DEV_STATUS_OFF; + } + + /*获取中断配置*/ + dtb_interrupt_status = dpp_dtb_interrupt_status_get(); + + /*下发dump描述符*/ + rc = dpp_dtb_tab_up_info_set(dev, queue_id, queue_element_id, + dtb_interrupt_status, data_len, desc_len, + p_dump_info); + if (DPP_OK != rc) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "the queue %d element id %d dump info set failed!", + queue_id, queue_element_id); + dpp_dtb_item_ack_wr(dev, queue_id, DPP_DTB_DIR_UP_TYPE, + queue_element_id, 0, + DPP_DTB_TAB_ACK_UNUSED_MASK); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_dtb_tab_up_info_set"); + } + + /*查询是否dump完成,完成后取出数据 需要在dpp_dtb.c中补充查询一个元素ack是否成功函数*/ + if (!dtb_interrupt_status) { + rc = dpp_dtb_tab_up_success_status_check(dev, queue_id, + queue_element_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT( + DEV_ID(dev), rc, "dpp_dtb_tab_up_success_status_check"); + + /*取出数据*/ + rc = dpp_dtb_tab_up_data_get(dev, queue_id, queue_element_id, + data_len, p_dump_data); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_dtb_tab_up_data_get"); + } else { + rc = dpp_dtb_tab_up_success_status_check(dev, queue_id, + queue_element_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT( + DEV_ID(dev), rc, "dpp_dtb_tab_up_success_status_check"); + + /*取出数据*/ + rc = dpp_dtb_tab_up_data_get(dev, queue_id, queue_element_id, + data_len, p_dump_data); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_dtb_tab_up_data_get"); + /*清中断*/ + dpp_dtb_finish_interrupt_event_state_clr(dev, queue_id); + } + + return DPP_OK; +} + +/** smmu0 dump 只写一个dump描述符的接口 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param base_addr 要dump的内容的基地址,以128bit为单位 +* @param depth dump的深度以128bit为单位 +* @param p_data dump出数据缓存(128bit * depth) +* @param element_id 返回下表使用的元素id +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_se_smmu0_dma_dump(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 base_addr, ZXIC_UINT32 depth, + ZXIC_UINT32 *p_data, + ZXIC_UINT32 *element_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dump_dst_phy_haddr = 0; + ZXIC_UINT32 dump_dst_phy_laddr = 0; + ZXIC_UINT32 queue_item_index = 0; + ZXIC_UINT32 data_len = 0; + ZXIC_UINT32 desc_len = 0; + + ZXIC_UINT8 form_buff[DTB_TABLE_CMD_SIZE_BIT / 8] = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, DTB_QUEUE_MAX - 1); + ZXIC_COMM_CHECK_INDEX_LOWER(depth, 1); + + /*获取队列中可用的元素编号*/ + rc = dpp_dtb_tab_up_free_item_get(dev, queue_id, &queue_item_index); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_tab_up_free_item_get"); + + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("dump smmu0:queue %d,item_index: %d\n", + queue_id, queue_item_index); + } + + *element_id = queue_item_index; //保存获取的item_index + + /*获取地址*/ + rc = dpp_dtb_tab_up_item_addr_get(dev, queue_id, queue_item_index, + &dump_dst_phy_haddr, + &dump_dst_phy_laddr); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_tab_up_item_addr_get"); + + rc = dpp_dtb_smmu0_dump_info_write(dev, base_addr, depth, + dump_dst_phy_haddr, + dump_dst_phy_laddr, + (ZXIC_UINT32 *)form_buff); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_smmu0_dump_info_write"); + + /*组装下表命令格式*/ + + data_len = depth * 128 / 32; + desc_len = DTB_LEN_POS_SETP / 4; + + rc = dpp_dtb_write_dump_desc_info(dev, queue_id, queue_item_index, + (ZXIC_UINT32 *)form_buff, data_len, + desc_len, p_data); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_write_dump_desc_info"); + + return DPP_OK; +} + +/** dtb dump DDR表项内容 返回内容以512bit为单位 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param base_addr dump基地址,以512bit为单位 +* @param depth dump的深度以512bit为单位 +* @param p_data dump出数据缓存(512bit * depth) +* @param element_id 返回下表使用的元素id +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_se_smmu1_dma_dump(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 base_addr, ZXIC_UINT32 depth, + ZXIC_UINT32 *p_data, + ZXIC_UINT32 *element_id) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 dump_dst_phy_haddr = 0; + ZXIC_UINT32 dump_dst_phy_laddr = 0; + + ZXIC_UINT32 queue_item_index = 0; + ZXIC_UINT32 data_len = 0; + ZXIC_UINT32 desc_len = 0; + + ZXIC_UINT8 form_buff[DTB_TABLE_CMD_SIZE_BIT / 8] = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, DTB_QUEUE_MAX - 1); + ZXIC_COMM_CHECK_INDEX_LOWER(depth, 1); + + /*获取队列元素*/ + rc = dpp_dtb_tab_up_free_item_get(dev, queue_id, &queue_item_index); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_tab_up_free_item_get"); + + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("dump smmu1:queue %d,item_index: %d\n", + queue_id, queue_item_index); + } + + *element_id = queue_item_index; //保存获取的item_index + + /*获取dump dma phy地址*/ + rc = dpp_dtb_tab_up_item_addr_get(dev, queue_id, queue_item_index, + &dump_dst_phy_haddr, + &dump_dst_phy_laddr); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_tab_up_item_addr_get"); + + rc = dpp_dtb_smmu1_dump_info_write(dev, base_addr, depth, + dump_dst_phy_haddr, + dump_dst_phy_laddr, + (ZXIC_UINT32 *)form_buff); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_smmu1_dump_info_write"); + + /*组装下表命令格式*/ + data_len = depth * 512 / 32; //要提取的数据长度,以32bit为单位 + desc_len = DTB_LEN_POS_SETP / 4; //描述符长度,以32bit为单位 + + rc = dpp_dtb_write_dump_desc_info(dev, queue_id, queue_item_index, + (ZXIC_UINT32 *)form_buff, data_len, + desc_len, p_data); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_write_dump_desc_info"); + + return DPP_OK; +} + +/** dtb dump zcam表项内容 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param addr zcam内部ram地址,包括掩码 +* @param tb_width dump数据的宽度 +* @param depth dump的深度以tb_width为单位 +* @param p_data dump出数据缓存(tb_width * depth) +* @param element_id 返回下表使用的元素id +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_se_zcam_dma_dump(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 addr, ZXIC_UINT32 tb_width, + ZXIC_UINT32 depth, ZXIC_UINT32 *p_data, + ZXIC_UINT32 *element_id) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 dump_dst_phy_haddr = 0; + ZXIC_UINT32 dump_dst_phy_laddr = 0; + + ZXIC_UINT32 queue_item_index = 0; + ZXIC_UINT32 data_len = 0; + ZXIC_UINT32 desc_len = 0; + ZXIC_UINT32 tb_width_len = 0; + + ZXIC_UINT8 form_buff[DTB_TABLE_CMD_SIZE_BIT / 8] = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, DTB_QUEUE_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), tb_width, DTB_DUMP_ZCAM_128b, + DTB_DUMP_ZCAM_RSV - 1); + ZXIC_COMM_CHECK_INDEX_LOWER(depth, 1); + + /*获取队列元素*/ + rc = dpp_dtb_tab_up_free_item_get(dev, queue_id, &queue_item_index); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_tab_up_free_item_get"); + + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("dump smmu0:queue %d,item_index: %d\n", + queue_id, queue_item_index); + } + + *element_id = queue_item_index; //保存获取的item_index + + /*获取dump dma phy地址*/ + rc = dpp_dtb_tab_up_item_addr_get(dev, queue_id, queue_item_index, + &dump_dst_phy_haddr, + &dump_dst_phy_laddr); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_tab_up_item_addr_get"); + + rc = dpp_dtb_zcam_dump_info_write(dev, addr, tb_width, depth, + dump_dst_phy_haddr, + dump_dst_phy_laddr, + (ZXIC_UINT32 *)form_buff); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_zcam_dump_info_write"); + + /*组装下表命令格式*/ + tb_width_len = DTB_LEN_POS_SETP << tb_width; + data_len = depth * tb_width_len / 4; //要提取的数据长度,以32bit为单位 + desc_len = DTB_LEN_POS_SETP / 4; //描述符长度,以32bit为单位 + + rc = dpp_dtb_write_dump_desc_info(dev, queue_id, queue_item_index, + (ZXIC_UINT32 *)form_buff, data_len, + desc_len, p_data); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_write_dump_desc_info"); + + return DPP_OK; +} + +#endif + +#if ZXIC_REAL("DTB GET") +/***********************************************************/ +/** 配置数据获取模式 +* @param srh_mode 0:软件获取 1:硬件获取 +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/02 +************************************************************/ +ZXIC_VOID dpp_dtb_srh_mode_set(ZXIC_UINT32 srh_mode) +{ + g_dtb_srh_mode = srh_mode; +} + +/***********************************************************/ +/** 获取查找方式 +* @param +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/02 +************************************************************/ +ZXIC_UINT32 dpp_dtb_srh_mode_get(ZXIC_VOID) +{ + return g_dtb_srh_mode; +} + +/***********************************************************/ +/** hash表项查找校验(软件获取数据) +* @param p_entry 入参:hash表项键值 出参:hash表项结果 +* @param key_by_size 键值大小 +* @param rst_by_size 返回值大小 +* @param p_item_info 512bit单元数据 +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/02 +************************************************************/ +DPP_STATUS dpp_dtb_hash_software_item_check(DPP_HASH_ENTRY *p_entry, + ZXIC_UINT32 key_by_size, + ZXIC_UINT32 rst_by_size, + SE_ITEM_CFG *p_item_info) +{ + ZXIC_UINT8 srh_succ = 0; + ZXIC_UINT8 temp_key_type = 0; + ZXIC_UINT8 srh_key_type = 0; + D_NODE *p_entry_dn = NULL; + DPP_HASH_RBKEY_INFO *p_rbkey = NULL; + + ZXIC_COMM_CHECK_INDEX(key_by_size, 1, HASH_KEY_MAX); + ZXIC_COMM_CHECK_POINT(p_entry); + ZXIC_COMM_CHECK_POINT(p_entry->p_key); + ZXIC_COMM_CHECK_POINT(p_entry->p_rst); + ZXIC_COMM_CHECK_POINT(p_item_info); + + srh_key_type = DPP_GET_HASH_KEY_TYPE(p_entry->p_key); + p_entry_dn = p_item_info->item_list.p_next; + while (p_entry_dn) { + p_rbkey = (DPP_HASH_RBKEY_INFO *)p_entry_dn->data; + ZXIC_COMM_CHECK_POINT(p_rbkey); + ZXIC_COMM_CHECK_POINT(p_rbkey->key); + ZXIC_COMM_CHECK_POINT(p_rbkey->rst); + + ZXIC_COMM_ASSERT(p_rbkey->p_item_info == p_item_info); + + temp_key_type = DPP_GET_HASH_KEY_TYPE(p_rbkey->key); + + if (DPP_GET_HASH_KEY_VALID(p_rbkey->key) && + (srh_key_type == temp_key_type)) { + if (0 == ZXIC_COMM_MEMCMP(p_entry->p_key, p_rbkey->key, + key_by_size)) { + srh_succ = 1; + break; + } + } + + p_entry_dn = p_entry_dn->next; + } + + if (NULL == p_rbkey) { + return ZXIC_PAR_CHK_POINT_NULL; + } + + if (!srh_succ) { + ZXIC_COMM_TRACE_DEBUG( + "dpp_dtb_hash_software_item_check fail!\n"); + return DPP_HASH_RC_MATCH_ITEM_FAIL; + } + + /* copy result */ + ZXIC_COMM_MEMCPY(p_entry->p_rst, p_rbkey->rst, + (rst_by_size > HASH_RST_MAX) ? HASH_RST_MAX : + rst_by_size); + + return DPP_OK; +} + +/** hash表项解析,在512bit存储单元里查找并比对 +* @param item_type +* @param key_by_size 键值大小 +* @param p_entry 入参:hash表项键值 出参:hash表项结果 +* @param p_item_data 查找键值信息 +* @param p_data_offset 查找键值在512bit里的偏移位置,单位为字节 +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/02 +************************************************************/ +ZXIC_UINT32 dpp_dtb_hash_data_parse(ZXIC_UINT32 item_type, + ZXIC_UINT32 key_by_size, + DPP_HASH_ENTRY *p_entry, + ZXIC_UINT8 *p_item_data, + ZXIC_UINT8 *p_data_offset) +{ + ZXIC_UINT32 data_offset = 0; + ZXIC_UINT8 temp_key_valid = 0; + ZXIC_UINT8 temp_key_type = 0; + ZXIC_UINT32 temp_entry_size = 0; + ZXIC_UINT8 srh_key_type = 0; + ZXIC_UINT32 srh_entry_size = 0; + ZXIC_UINT32 rst_by_size = 0; + ZXIC_UINT8 srh_succ = 0; + ZXIC_UINT32 item_width = SE_ITEM_WIDTH_MAX; + ZXIC_UINT8 *p_srh_key = NULL; + ZXIC_UINT8 *p_temp_key = NULL; + ZXIC_UINT32 i = 0; + + ZXIC_COMM_CHECK_POINT(p_item_data); + ZXIC_COMM_CHECK_POINT(p_entry); + ZXIC_COMM_CHECK_POINT(p_entry->p_key); + ZXIC_COMM_CHECK_POINT(p_data_offset); + ZXIC_COMM_CHECK_INDEX_UPPER(key_by_size, HASH_KEY_MAX); + + if (ITEM_DDR_256 == item_type) { + item_width = item_width / 2; + } + + p_temp_key = p_item_data; + p_srh_key = p_entry->p_key; + srh_key_type = DPP_GET_HASH_KEY_TYPE(p_srh_key); + srh_entry_size = DPP_GET_HASH_ENTRY_SIZE(srh_key_type); + + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("srh_key:0x"); + + for (i = 0; i < key_by_size; i++) { + ZXIC_COMM_PRINT("%02x ", p_srh_key[i]); + } + ZXIC_COMM_PRINT("\n"); + } + + while (data_offset < item_width) { + temp_key_valid = DPP_GET_HASH_KEY_VALID(p_temp_key); + temp_key_type = DPP_GET_HASH_KEY_TYPE(p_temp_key); + + if (temp_key_valid && (srh_key_type == temp_key_type)) { + if (0 == ZXIC_COMM_MEMCMP(p_srh_key, p_temp_key, + key_by_size)) { + ZXIC_COMM_TRACE_DEBUG( + "Hash search hardware successfully. \n"); + srh_succ = 1; + break; + } + + data_offset += srh_entry_size; + } else if (temp_key_valid && (srh_key_type != temp_key_type)) { + temp_entry_size = + DPP_GET_HASH_ENTRY_SIZE(temp_key_type); + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT( + data_offset, temp_entry_size); + if (temp_entry_size == 0) { + ZXIC_COMM_TRACE_ERROR( + "key type %d srh entry size is zero\n", + temp_key_type); + return DPP_ERR; + } + data_offset += temp_entry_size; + } else { + data_offset += 16; /* 偏移最小的步长128bit */ + } + + p_temp_key = p_item_data; + p_temp_key += data_offset; + } + + if (!srh_succ) { + ZXIC_COMM_TRACE_DEBUG("Hash search hardware fail. \n"); + return DPP_HASH_RC_MATCH_ITEM_FAIL; + } + + /* copy result */ + rst_by_size = srh_entry_size - key_by_size; + ZXIC_COMM_MEMCPY(p_entry->p_rst, p_temp_key + key_by_size, + (rst_by_size > HASH_RST_MAX) ? HASH_RST_MAX : + rst_by_size); + *p_data_offset = data_offset; + + return DPP_OK; +} + +/***********************************************************/ +/** 查找存储在DDR空间的hash表项 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列id +* @param p_hash_entry_cfg hash表项配置信息 +* @param p_hash_entry 查找键值信息 +* @param p_srh_succ 出参,查找是否成功 +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/02 +************************************************************/ +ZXIC_UINT32 dpp_dtb_hash_ddr_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + HASH_ENTRY_CFG *p_hash_entry_cfg, + DPP_HASH_ENTRY *p_hash_entry, + ZXIC_UINT8 *p_srh_succ) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 item_idx = 0xFFFFFFFF; /* -1 */ + ZXIC_UINT32 item_type = 0; + ZXIC_UINT32 crc_value = 0; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 index_offset = 0; + ZXIC_UINT32 hw_addr = 0; /*单位512bit*/ + ZXIC_UINT32 base_addr = 0; /*单位2k*256bit*/ + ZXIC_UINT32 ecc_en = 0; + ZXIC_UINT32 element_id = 0; + ZXIC_UINT32 byte_len = 0; + ZXIC_UINT8 byte_offset = 0; + ZXIC_UINT8 temp_key[HASH_KEY_MAX] = { 0 }; + HASH_DDR_CFG *p_ddr_cfg = NULL; + DPP_HASH_CFG *p_hash_cfg = NULL; + ZXIC_UINT32 rd_buff[DPP_DIR_TBL_BUF_MAX_NUM] = { 0 }; + ZXIC_UINT8 temp_data[DPP_DIR_TBL_BUF_MAX_NUM * 4] = { 0 }; + SE_ITEM_CFG *p_item = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(queue_id, 0, DTB_QUEUE_MAX - 1); + ZXIC_COMM_CHECK_POINT(p_hash_entry_cfg); + ZXIC_COMM_CHECK_POINT(p_hash_entry); + ZXIC_COMM_CHECK_POINT(p_srh_succ); + + ZXIC_COMM_MEMSET((ZXIC_UINT8 *)rd_buff, 0x0, sizeof(rd_buff)); + ZXIC_COMM_MEMSET(temp_data, 0x0, sizeof(temp_data)); + + rc = dpp_se_smmu1_hash_tbl_soft_cfg_get(dev, p_hash_entry_cfg->fun_id, + p_hash_entry_cfg->bulk_id, + &ecc_en, &base_addr); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_se_smmu1_hash_tbl_soft_cfg_get"); + + p_hash_cfg = p_hash_entry_cfg->p_hash_cfg; + ZXIC_COMM_CHECK_POINT(p_hash_cfg); + ZXIC_COMM_CHECK_INDEX_UPPER(p_hash_entry_cfg->bulk_id, + HASH_BULK_ID_MAX); + p_ddr_cfg = p_hash_cfg->p_bulk_ddr_info[p_hash_entry_cfg->bulk_id]; + ZXIC_COMM_CHECK_POINT(p_ddr_cfg); + + ZXIC_COMM_CHECK_INDEX_UPPER(p_hash_entry_cfg->key_by_size, + HASH_KEY_MAX); + rc = dpp_hash_set_crc_key(dev, p_hash_entry_cfg, p_hash_entry, + temp_key); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "hash_set_crc_key"); + + crc_value = p_hash_cfg->p_hash32_fun(temp_key, + p_hash_entry_cfg->key_by_size, + p_ddr_cfg->hash_ddr_arg); + item_idx = crc_value % p_ddr_cfg->item_num; + index = p_ddr_cfg->hw_baddr + item_idx; + if (DDR_WIDTH_512b == p_ddr_cfg->width_mode) { + item_type = ITEM_DDR_512; + hw_addr = (base_addr << 10) + index; + index_offset = 0; + byte_len = 512 / 8; + } else { + item_type = ITEM_DDR_256; + hw_addr = (base_addr << 10) + (index >> 1); + index_offset = index & 0x1; + byte_len = 256 / 8; + } + + ZXIC_COMM_TRACE_DEV_DEBUG( + DEV_ID(dev), + "Hash search in ITEM_DDR_%s, CRC32 index is: 0x%x.\n", + ((item_type == ITEM_DDR_256) ? "256" : "512"), item_idx); + if (dpp_dtb_srh_mode_get()) { + rc = dpp_dtb_se_smmu1_dma_dump(dev, queue_id, hw_addr, 1, + rd_buff, &element_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_dtb_se_smmu1_dma_dump"); + + zxic_comm_swap((ZXIC_UINT8 *)(rd_buff + index_offset * 8), + byte_len); + ZXIC_COMM_MEMCPY(temp_data, rd_buff + index_offset * 8, + byte_len); + + rc = dpp_dtb_hash_data_parse(item_type, + p_hash_entry_cfg->key_by_size, + p_hash_entry, temp_data, + &byte_offset); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_dtb_hash_data_parse"); + + ZXIC_COMM_TRACE_DEBUG("Hash search hardware succ in ddr. \n"); + } else { + ZXIC_COMM_CHECK_INDEX_UPPER(item_idx, p_ddr_cfg->item_num - 1); + p_item = p_ddr_cfg->p_item_array[item_idx]; + ZXIC_COMM_CHECK_POINT_NO_ASSERT(p_item); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT( + DEV_ID(dev), p_ddr_cfg->hw_baddr, item_idx); + p_item->hw_addr = p_ddr_cfg->hw_baddr + item_idx; + p_item->item_type = item_type; + p_item->item_index = item_idx; + + rc = dpp_dtb_hash_software_item_check( + p_hash_entry, p_hash_entry_cfg->key_by_size, + p_hash_entry_cfg->rst_by_size, p_item); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_hash_software_item_check"); + } + + *p_srh_succ = 1; + p_hash_cfg->hash_stat.search_ok++; + + return DPP_OK; +} + +/***********************************************************/ +/** 查找存储在ZCAM空间的hash表项 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列id +* @param p_hash_entry_cfg hash表项配置信息 +* @param p_hash_entry 查找键值信息 +* @param p_srh_succ 出参,查找是否成功 +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/02 +************************************************************/ +ZXIC_UINT32 dpp_dtb_hash_zcam_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + HASH_ENTRY_CFG *p_hash_entry_cfg, + DPP_HASH_ENTRY *p_hash_entry, + ZXIC_UINT32 srh_mode, ZXIC_UINT8 *p_srh_succ) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(queue_id, 0, DTB_QUEUE_MAX - 1); + ZXIC_COMM_CHECK_POINT(p_hash_entry_cfg); + ZXIC_COMM_CHECK_POINT(p_srh_succ); + ZXIC_COMM_CHECK_POINT(p_hash_entry); + + if (HASH_SRH_MODE_HDW == srh_mode) { + rc = dpp_dtb_hash_zcam_get_hardware(dev, queue_id, + p_hash_entry_cfg, + p_hash_entry, p_srh_succ); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, + "dpp_dtb_hash_zcam_get_hardware"); + } else { + rc = dpp_dtb_hash_get_software(dev, p_hash_entry_cfg, + p_hash_entry, p_srh_succ); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_hash_get_software"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 硬件查找存储在ZCAM空间的hash表项 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列id +* @param p_hash_entry_cfg hash表项配置信息 +* @param p_hash_entry 查找键值信息(查找成功,填充rst信息) +* @param p_srh_succ 出参,查找是否成功 +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/02 +************************************************************/ +DPP_STATUS dpp_dtb_hash_zcam_get_hardware(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + HASH_ENTRY_CFG *p_hash_entry_cfg, + DPP_HASH_ENTRY *p_hash_entry, + ZXIC_UINT8 *p_srh_succ) +{ + DPP_STATUS rc = DPP_OK; + DPP_HASH_CFG *p_hash_cfg = NULL; + SE_ZCELL_CFG *p_zcell = NULL; + SE_ZBLK_CFG *p_zblk = NULL; + + ZXIC_UINT32 zblk_idx = 0; + ZXIC_UINT32 pre_zblk_idx = 0xFFFFFFFF; /* -1; */ + ZXIC_UINT16 crc16_value = 0; + ZXIC_UINT32 zcell_id = 0; + ZXIC_UINT32 item_idx = 0; + ZXIC_UINT32 element_id = 0; + ZXIC_UINT32 byte_offset = 0; + ZXIC_UINT32 addr = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT8 srh_succ = 0; + ZXIC_UINT8 temp_key[HASH_KEY_MAX] = { 0 }; + ZXIC_UINT8 rd_buff[SE_ITEM_WIDTH_MAX] = { 0 }; + + D_NODE *p_zblk_dn = NULL; + D_NODE *p_zcell_dn = NULL; + DPP_SE_CFG *p_se_cfg = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(queue_id, 0, DTB_QUEUE_MAX - 1); + ZXIC_COMM_CHECK_POINT(p_hash_entry_cfg); + ZXIC_COMM_CHECK_POINT(p_srh_succ); + ZXIC_COMM_CHECK_POINT(p_hash_entry); + ZXIC_COMM_CHECK_POINT(p_hash_entry->p_key); + ZXIC_COMM_CHECK_POINT(p_hash_entry->p_rst); + + ZXIC_COMM_MEMSET_S(rd_buff, sizeof(rd_buff), 0, sizeof(rd_buff)); + + p_hash_cfg = p_hash_entry_cfg->p_hash_cfg; + ZXIC_COMM_CHECK_POINT(p_hash_cfg); + + /* 取出se配置 */ + p_se_cfg = p_hash_entry_cfg->p_se_cfg; + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_se_cfg); + + rc = dpp_hash_set_crc_key(dev, p_hash_entry_cfg, p_hash_entry, + temp_key); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "hash_set_crc_key"); + + /*查找所有的zcell*/ + p_zcell_dn = p_hash_cfg->hash_shareram.zcell_free_list.p_next; + while (p_zcell_dn) { + p_zcell = (SE_ZCELL_CFG *)p_zcell_dn->data; + zblk_idx = GET_ZBLK_IDX(p_zcell->zcell_idx); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), zblk_idx, 0, + SE_ZBLK_NUM - 1); + p_zblk = &(p_se_cfg->zblk_info[zblk_idx]); + + if (zblk_idx != pre_zblk_idx) { + pre_zblk_idx = zblk_idx; + crc16_value = p_hash_cfg->p_hash16_fun( + temp_key, p_hash_entry_cfg->key_by_size, + p_zblk->hash_arg); + } + + zcell_id = GET_ZCELL_IDX(p_zcell->zcell_idx); + item_idx = GET_ZCELL_CRC_VAL(zcell_id, crc16_value); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), item_idx, 0, + SE_RAM_DEPTH - 1); + addr = ZBLK_ITEM_ADDR_CALC(p_zcell->zcell_idx, item_idx); + rc = dpp_dtb_se_zcam_dma_dump(dev, queue_id, addr, + DTB_DUMP_ZCAM_512b, 1, + (ZXIC_UINT32 *)rd_buff, + &element_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_se_zcam_dma_dump"); + zxic_comm_swap(rd_buff, sizeof(rd_buff)); + + rc = dpp_dtb_hash_data_parse(ITEM_RAM, + p_hash_entry_cfg->key_by_size, + p_hash_entry, rd_buff, + (ZXIC_UINT8 *)(&byte_offset)); + if (DPP_OK == rc) { + ZXIC_COMM_TRACE_DEBUG( + "Hash search hardware succ in zcell. \n"); + srh_succ = 1; + p_hash_cfg->hash_stat.search_ok++; + break; + } + + p_zcell_dn = p_zcell_dn->next; + } + + /*zcell查找失败,则查找所有的zreg*/ + if (0 == srh_succ) { + p_zblk_dn = p_hash_cfg->hash_shareram.zblk_list.p_next; + while (p_zblk_dn) { + p_zblk = (SE_ZBLK_CFG *)p_zblk_dn->data; + zblk_idx = p_zblk->zblk_idx; + + for (i = 0; i < SE_ZREG_NUM; i++) { + item_idx = i; + addr = ZBLK_HASH_LIST_REG_ADDR_CALC(zblk_idx, + item_idx); + rc = dpp_dtb_se_zcam_dma_dump( + dev, queue_id, addr, DTB_DUMP_ZCAM_512b, + 1, (ZXIC_UINT32 *)rd_buff, &element_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT( + rc, "dpp_dtb_se_zcam_dma_dump"); + zxic_comm_swap(rd_buff, sizeof(rd_buff)); + + rc = dpp_dtb_hash_data_parse( + ITEM_RAM, p_hash_entry_cfg->key_by_size, + p_hash_entry, rd_buff, + (ZXIC_UINT8 *)(&byte_offset)); + if (DPP_OK == rc) { + ZXIC_COMM_TRACE_DEBUG( + "Hash search hardware succ in zreg. \n"); + srh_succ = 1; + p_hash_cfg->hash_stat.search_ok++; + break; + } + } + p_zblk_dn = p_zblk_dn->next; + } + } + + *p_srh_succ = srh_succ; + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("dump data:\n"); + for (i = 0; i < SE_ITEM_WIDTH_MAX; i++) { + ZXIC_COMM_PRINT("%02x ", rd_buff[i]); + if ((i + 1) % 16 == 0) { + ZXIC_COMM_PRINT("\n"); + } + } + ZXIC_COMM_PRINT("\n"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 软件查找存储在ZCAM空间的hash表项 +* @param dev_id 设备号,支持多芯片 +* @param p_hash_entry_cfg hash表项配置信息 +* @param p_hash_entry 查找键值信息(查找成功后,填充rst) +* @param p_srh_succ 出参,查找是否成功 +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/02 +************************************************************/ +DPP_STATUS dpp_dtb_hash_get_software(DPP_DEV_T *dev, + HASH_ENTRY_CFG *p_hash_entry_cfg, + DPP_HASH_ENTRY *p_hash_entry, + ZXIC_UINT8 *p_srh_succ) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + + DPP_HASH_RBKEY_INFO srh_rbkey = { 0 }; + DPP_HASH_RBKEY_INFO *p_rbkey = NULL; + ZXIC_RB_TN *p_rb_tn_rtn = NULL; + SE_ITEM_CFG *p_item = NULL; + DPP_HASH_CFG *p_hash_cfg = NULL; + ZXIC_MUTEX_T *p_hash_mutex = NULL; + DPP_SE_CFG *p_se_cfg = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_POINT(p_hash_entry_cfg); + ZXIC_COMM_CHECK_POINT(p_srh_succ); + ZXIC_COMM_CHECK_POINT(p_hash_entry); + ZXIC_COMM_CHECK_POINT(p_hash_entry->p_key); + ZXIC_COMM_CHECK_POINT(p_hash_entry->p_rst); + + ZXIC_COMM_MEMSET_S(&srh_rbkey, sizeof(DPP_HASH_RBKEY_INFO), 0, + sizeof(DPP_HASH_RBKEY_INFO)); + ZXIC_COMM_CHECK_INDEX_UPPER(p_hash_entry_cfg->key_by_size, + HASH_KEY_MAX); + ZXIC_COMM_MEMCPY(srh_rbkey.key, p_hash_entry->p_key, + p_hash_entry_cfg->key_by_size); + + p_hash_cfg = p_hash_entry_cfg->p_hash_cfg; + ZXIC_COMM_CHECK_POINT(p_hash_cfg); + + rc = dpp_se_cfg_get(dev, &p_se_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_se_cfg_get"); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_se_cfg); + + rc = dpp_dev_hash_opr_mutex_get(dev, p_hash_cfg->fun_id, &p_hash_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_dev_opr_mutex_get"); + rc = zxic_comm_mutex_lock(p_hash_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "zxic_comm_mutex_lock"); + + rc = zxic_comm_rb_search(&p_hash_cfg->hash_rb, (ZXIC_VOID *)&srh_rbkey, + (ZXIC_VOID *)(&p_rb_tn_rtn)); + if (ZXIC_RBT_RC_SRHFAIL == rc) { + ZXIC_COMM_TRACE_DEBUG("zxic_comm_rb_search fail. \n"); + + rc = zxic_comm_mutex_unlock(p_hash_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "zxic_comm_mutex_unlock"); + + return DPP_OK; + } + + ZXIC_COMM_CHECK_DEV_POINT_UNLOCK(dev_id, p_rb_tn_rtn, p_hash_mutex); + p_rbkey = p_rb_tn_rtn->p_key; + ZXIC_COMM_CHECK_DEV_POINT_UNLOCK(dev_id, p_rbkey, p_hash_mutex); + p_item = p_rbkey->p_item_info; + ZXIC_COMM_CHECK_DEV_POINT_UNLOCK(dev_id, p_item, p_hash_mutex); + + rc = dpp_dtb_hash_software_item_check(p_hash_entry, + p_hash_entry_cfg->key_by_size, + p_hash_entry_cfg->rst_by_size, + p_item); + if (DPP_OK == rc) { + ZXIC_COMM_TRACE_DEBUG("Hash search software succ. \n"); + *p_srh_succ = 1; + p_hash_cfg->hash_stat.search_ok++; + } + + rc = zxic_comm_mutex_unlock(p_hash_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "zxic_comm_mutex_unlock"); + + return DPP_OK; +} + +/** dtb根据etcam key计算dump出数据占用的数据长度 +* @param dev_id 设备号 +* @param etcam_key_mode etcam条目位宽,参照DPP_ETCAM_ENTRY_MODE_E定义 +* @param p_etcam_dump_len dtb dump出etcam的数据长度(单位:字节) +* @param p_etcam_dump_inerval dump出etcam数据的间隔数据长度(单位:字节) +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dtb_etcam_dump_data_len(DPP_DEV_T *dev, ZXIC_UINT32 etcam_key_mode, + ZXIC_UINT32 *p_etcam_dump_len, + ZXIC_UINT32 *p_etcam_dump_inerval) +{ + ZXIC_UINT32 dump_data_len = 0; + ZXIC_UINT8 etcam_dump_inerval = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_etcam_dump_len); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_etcam_dump_inerval); + + if (DPP_ETCAM_KEY_640b == etcam_key_mode) { + dump_data_len = 5 * DTB_LEN_POS_SETP; + etcam_dump_inerval = 0; + } else if (DPP_ETCAM_KEY_320b == etcam_key_mode) { + dump_data_len = 3 * DTB_LEN_POS_SETP; + etcam_dump_inerval = 8; + } else if (DPP_ETCAM_KEY_160b == etcam_key_mode) { + dump_data_len = 2 * DTB_LEN_POS_SETP; + etcam_dump_inerval = 12; + } else if (DPP_ETCAM_KEY_80b == etcam_key_mode) { + dump_data_len = 1 * DTB_LEN_POS_SETP; + etcam_dump_inerval = 6; + } + + *p_etcam_dump_len = dump_data_len; + *p_etcam_dump_inerval = etcam_dump_inerval; + + return DPP_OK; +} + +/** dtb 从dump数据中提取到xy数据 +* @param dev_id 设备号 +* @param p_data dump出的etcam data 指针 +* @param p_mask dump出的etcam mask 指针 +* @param p_etcam_dump_len dtb dump出etcam的数据长度(单位:字节) +* @param p_etcam_dump_inerval dump出etcam数据的间隔数据长度(单位:字节) +* @param p_entry_xy 保存etcam xy数据(已分配内存) +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_get_etcam_xy_from_dump_data(DPP_DEV_T *dev, + ZXIC_UINT8 *p_data, + ZXIC_UINT8 *p_mask, + ZXIC_UINT32 etcam_dump_len, + ZXIC_UINT32 etcam_dump_inerval, + DPP_ETCAM_ENTRY_T *p_entry_xy) +{ + ZXIC_UINT8 *p_entry_data = NULL; + ZXIC_UINT8 *p_entry_mask = NULL; + + //先对数据进行翻转再进行数据的截取,最后进行数据的复制 + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_mask); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_entry_xy); + + zxic_comm_swap(p_data, etcam_dump_len); + zxic_comm_swap(p_mask, etcam_dump_len); + + p_entry_data = p_data + etcam_dump_inerval; + p_entry_mask = p_mask + etcam_dump_inerval; + + ZXIC_COMM_MEMCPY(p_entry_xy->p_data, p_entry_data, + DPP_ETCAM_ENTRY_SIZE_GET(p_entry_xy->mode)); + ZXIC_COMM_MEMCPY(p_entry_xy->p_mask, p_entry_mask, + DPP_ETCAM_ENTRY_SIZE_GET(p_entry_xy->mode)); + + return DPP_OK; +} + +/** dtb dump etcam直接表表项内容 级联eram支持64bit/128bit +* @param dev_id 设备号 +* @param p_in_data 长度为640bit的输入数据 +* @param rd_mode 读模式 +* @param p_out_data 按照读模式读出的数据 +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_etcam_ind_data_get(DPP_DEV_T *dev, ZXIC_UINT8 *p_in_data, + ZXIC_UINT32 rd_mode, + ZXIC_UINT8 *p_out_data) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 i = 0; + ZXIC_UINT8 *p_temp = NULL; + ZXIC_UINT32 offset = 0; + ZXIC_UINT8 buff[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_in_data); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_out_data); + + p_temp = p_out_data; + ZXIC_COMM_MEMCPY(buff, p_in_data, DPP_ETCAM_WIDTH_MAX / 8); + + zxic_comm_swap(buff, DPP_ETCAM_WIDTH_MAX / 8); + + for (i = 0; i < DPP_ETCAM_RAM_NUM; i++) { + offset = i * (DPP_ETCAM_WIDTH_MIN / 8); + + if ((rd_mode >> (DPP_ETCAM_RAM_NUM - 1 - i)) & 0x1) { + ZXIC_COMM_MEMCPY(p_temp, buff + offset, + DPP_ETCAM_WIDTH_MIN / 8); + p_temp += DPP_ETCAM_WIDTH_MIN / 8; + } + } + + return rc; +} + +/** dtb dump etcam表项内容 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param block_idx block的索引,范围0-7 +* @param addr 单个block中的RAM地址,范围0~511 +* @param rd_mode 读block RAM行的位图,共8bit,每比特表示一个RAM中的80bit +* @param opr_type 读取的数据类型,0:data/mask格式,1:xy格式 +* @param as_en 级联eram使能 +* @param as_eram_baddr 级联eram基地址 +* @param as_eram_index 级联eram 条目index +* @param as_rsp_mode 级联返回位宽,参见DPP_DIAG_ERAM_MODE_E +* @param p_entry 读取的etcam数据 +* @param p_as_rslt 读取的级联数据(分配128bit存储空间) +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_etcam_entry_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 block_idx, ZXIC_UINT32 addr, + ZXIC_UINT32 rd_mode, ZXIC_UINT32 opr_type, + ZXIC_UINT32 as_en, ZXIC_UINT32 as_eram_baddr, + ZXIC_UINT32 as_eram_index, + ZXIC_UINT32 as_rsp_mode, //128:3 64:2 + DPP_ETCAM_ENTRY_T *p_entry, + ZXIC_UINT8 *p_as_rslt) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 etcam_key_mode = 0; + + ZXIC_UINT8 temp_data[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; + ZXIC_UINT8 temp_mask[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; + DPP_ETCAM_ENTRY_T entry_xy = { 0 }; + + ZXIC_UINT32 etcam_data_dst_phy_haddr = 0; + ZXIC_UINT32 etcam_data_dst_phy_laddr = 0; + ZXIC_UINT32 etcam_mask_dst_phy_haddr = 0; + ZXIC_UINT32 etcam_mask_dst_phy_laddr = 0; + ZXIC_UINT32 as_rst_dst_phy_haddr = 0; + ZXIC_UINT32 as_rst_dst_phy_laddr = 0; + + ZXIC_UINT32 dump_element_id = 0; + ZXIC_UINT32 etcam_dump_one_data_len = 0; + ZXIC_UINT32 etcam_dump_inerval = 0; + ZXIC_UINT32 dtb_desc_addr_offset = 0; + ZXIC_UINT32 dump_data_len = 0; + ZXIC_UINT32 dtb_desc_len = 0; + + ZXIC_UINT32 eram_dump_base_addr = 0; + ZXIC_UINT32 row_index = 0; + ZXIC_UINT32 col_index = 0; + + ZXIC_UINT8 *p_data = NULL; + ZXIC_UINT8 *p_mask = NULL; + ZXIC_UINT8 *p_rst = NULL; + ZXIC_UINT8 *temp_dump_out_data = NULL; + ZXIC_UINT8 *dump_info_buff = NULL; + ETCAM_DUMP_INFO_T etcam_dump_info = { 0 }; + DPP_DTB_ENTRY_T dtb_dump_entry = { 0 }; + ZXIC_UINT8 cmd_buff[DTB_TABLE_CMD_SIZE_BIT / 8] = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), addr, 0, + DPP_ETCAM_RAM_DEPTH - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), block_idx, 0, + DPP_ETCAM_BLOCK_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), rd_mode, 0, + DPP_ETCAM_WR_MASK_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), opr_type, DPP_ETCAM_OPR_DM, + DPP_ETCAM_OPR_XY); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_entry); + + dump_info_buff = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC( + DPP_DTB_TABLE_DUMP_INFO_BUFF_SIZE * sizeof(ZXIC_UINT8)); + ZXIC_COMM_CHECK_POINT_NO_ASSERT(dump_info_buff); + ZXIC_COMM_MEMSET(dump_info_buff, 0, + DPP_DTB_TABLE_DUMP_INFO_BUFF_SIZE * + sizeof(ZXIC_UINT8)); + + dtb_dump_entry.cmd = cmd_buff; + + entry_xy.p_data = temp_data; + entry_xy.p_mask = temp_mask; + + etcam_key_mode = p_entry->mode; + + etcam_dump_info.block_sel = block_idx; + etcam_dump_info.addr = addr; + etcam_dump_info.tb_width = 3 - etcam_key_mode; + etcam_dump_info.rd_mode = rd_mode; + etcam_dump_info.tb_depth = 1; + + rc = dpp_dtb_tab_up_free_item_get(dev, queue_id, &dump_element_id); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_tab_up_free_item_get", dump_info_buff); + ZXIC_COMM_TRACE_DEV_DEBUG(DEV_ID(dev), + "table up item queue_element_id is: %d.\n", + dump_element_id); + + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("dump etcam:queue %d,element id: %d\n", + queue_id, dump_element_id); + } + + rc = dtb_etcam_dump_data_len(dev, etcam_key_mode, + &etcam_dump_one_data_len, + &etcam_dump_inerval); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT(rc, "dtb_etcam_dump_data_len", + dump_info_buff); + + //etcam data dump描述符 + etcam_dump_info.data_or_mask = DPP_ETCAM_DTYPE_DATA; + rc = dpp_dtb_tab_up_item_offset_addr_get(dev, queue_id, dump_element_id, + dump_data_len, + &etcam_data_dst_phy_haddr, + &etcam_data_dst_phy_laddr); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_tab_up_item_offset_addr_get", dump_info_buff); + rc = dpp_dtb_etcam_dump_entry(dev, &etcam_dump_info, + etcam_data_dst_phy_haddr, + etcam_data_dst_phy_laddr, + &dtb_dump_entry); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT(rc, "dpp_dtb_etcam_dump_entry", + dump_info_buff); + + rc = dpp_dtb_data_write(dump_info_buff, dtb_desc_addr_offset, + &dtb_dump_entry); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT(rc, "dpp_dtb_data_write", + dump_info_buff); + ZXIC_COMM_MEMSET(cmd_buff, 0, DTB_TABLE_CMD_SIZE_BIT / 8); + dtb_desc_len += 1; + dtb_desc_addr_offset += DTB_LEN_POS_SETP; + dump_data_len += etcam_dump_one_data_len; + + //etcam mask dump描述符 + etcam_dump_info.data_or_mask = DPP_ETCAM_DTYPE_MASK; + rc = dpp_dtb_tab_up_item_offset_addr_get(dev, queue_id, dump_element_id, + dump_data_len, + &etcam_mask_dst_phy_haddr, + &etcam_mask_dst_phy_laddr); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_tab_up_item_offset_addr_get", dump_info_buff); + rc = dpp_dtb_etcam_dump_entry(dev, &etcam_dump_info, + etcam_mask_dst_phy_haddr, + etcam_mask_dst_phy_laddr, + &dtb_dump_entry); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT(rc, "dpp_dtb_etcam_dump_entry", + dump_info_buff); + rc = dpp_dtb_data_write(dump_info_buff, dtb_desc_addr_offset, + &dtb_dump_entry); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT(rc, "dpp_dtb_data_write", + dump_info_buff); + ZXIC_COMM_MEMSET(cmd_buff, 0, DTB_TABLE_CMD_SIZE_BIT / 8); + dtb_desc_len += 1; + dtb_desc_addr_offset += DTB_LEN_POS_SETP; + dump_data_len += etcam_dump_one_data_len; + + //级联数据dump描述符生成 + if (as_en) { + rc = dtb_eram_index_cal(dev, as_rsp_mode, as_eram_index, + &row_index, &col_index); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dtb_eram_index_cal", dump_info_buff); + + eram_dump_base_addr = as_eram_baddr + row_index; + ZXIC_COMM_TRACE_INFO("eram_dump_base_addr : 0x%x\n", + eram_dump_base_addr); + rc = dpp_dtb_tab_up_item_offset_addr_get( + dev, queue_id, dump_element_id, dump_data_len, + &as_rst_dst_phy_haddr, &as_rst_dst_phy_laddr); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_tab_up_item_offset_addr_get", + dump_info_buff); + + rc = dpp_dtb_smmu0_dump_entry(dev, eram_dump_base_addr, 1, + as_rst_dst_phy_haddr, + as_rst_dst_phy_laddr, + &dtb_dump_entry); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_smmu0_dump_entry", dump_info_buff); + rc = dpp_dtb_data_write(dump_info_buff, dtb_desc_addr_offset, + &dtb_dump_entry); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_data_write", dump_info_buff); + ZXIC_COMM_MEMSET(cmd_buff, 0, DTB_TABLE_CMD_SIZE_BIT / 8); + dtb_desc_len += 1; + dtb_desc_addr_offset += DTB_LEN_POS_SETP; + dump_data_len += DTB_LEN_POS_SETP; + } + + //申请data空间 + temp_dump_out_data = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC(dump_data_len * + sizeof(ZXIC_UINT8)); + ZXIC_COMM_CHECK_POINT_MEMORY_FREE_NO_ASSERT(temp_dump_out_data, + dump_info_buff); + ZXIC_COMM_MEMSET(temp_dump_out_data, 0, + dump_data_len * sizeof(ZXIC_UINT8)); + p_data = temp_dump_out_data; + /*下发dump描述符*/ + rc = dpp_dtb_write_dump_desc_info(dev, queue_id, dump_element_id, + (ZXIC_UINT32 *)dump_info_buff, + dump_data_len / 4, dtb_desc_len * 4, + (ZXIC_UINT32 *)temp_dump_out_data); + ZXIC_COMM_CHECK_RC_MEMORY_FREE2PTR_NO_ASSERT( + rc, "dpp_dtb_write_dump_desc_info", dump_info_buff, + temp_dump_out_data); + + //解析数据 + p_data = temp_dump_out_data; + p_mask = p_data + etcam_dump_one_data_len; + + rc = dpp_dtb_get_etcam_xy_from_dump_data(dev, p_data, p_mask, + etcam_dump_one_data_len, + etcam_dump_inerval, &entry_xy); + ZXIC_COMM_CHECK_RC_MEMORY_FREE2PTR_NO_ASSERT( + rc, "dpp_dtb_get_etcam_xy_from_dump_data", dump_info_buff, + temp_dump_out_data); + + if (opr_type == DPP_ETCAM_OPR_DM) { + /* convert hardware data X/Y to user D/M */ + rc = dpp_etcam_xy_to_dm( + p_entry, &entry_xy, + DPP_ETCAM_ENTRY_SIZE_GET(p_entry->mode)); + ZXIC_COMM_CHECK_RC_MEMORY_FREE2PTR_NO_ASSERT( + rc, "dpp_etcam_xy_to_dm", dump_info_buff, + temp_dump_out_data); + } else { + ZXIC_COMM_MEMCPY(p_entry->p_data, entry_xy.p_data, + DPP_ETCAM_ENTRY_SIZE_GET(p_entry->mode)); + ZXIC_COMM_MEMCPY(p_entry->p_mask, entry_xy.p_mask, + DPP_ETCAM_ENTRY_SIZE_GET(p_entry->mode)); + } + + if (as_en) { + //解析dump数据 + p_rst = p_mask + etcam_dump_one_data_len; + ZXIC_COMM_MEMCPY(p_as_rslt, p_rst, (128 / 8)); + } + + ZXIC_COMM_FREE(dump_info_buff); + ZXIC_COMM_FREE(temp_dump_out_data); + + return rc; +} +#endif + +#if ZXIC_REAL("DTB GET INTERFACE") +/** dtb dump eram直接表表项内容 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no eram表sdt表号 +* @param p_dump_eram_entry eram数据结构,数据已分配相应内存 +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_eram_data_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, + DPP_DTB_ERAM_ENTRY_INFO_T *p_dump_eram_entry) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 rd_mode = 0; + ZXIC_UINT32 eram_base_addr = 0; + ZXIC_UINT32 eram_table_depth = 0; + ZXIC_UINT32 eram_dump_base_addr = 0; + ZXIC_UINT32 row_index = 0; + ZXIC_UINT32 col_index = 0; + ZXIC_UINT32 temp_data[4] = { 0 }; + ZXIC_UINT32 element_id = 0; + + ZXIC_UINT32 index = p_dump_eram_entry->index; + ZXIC_UINT32 *p_data = p_dump_eram_entry->p_data; + + DPP_SDTTBL_ERAM_T sdt_eram_info = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, DTB_QUEUE_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), sdt_no, 0, + DPP_DEV_SDT_ID_MAX - 1); + + //从sdt_no中获取SDT配置 + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_eram_info); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_soft_sdt_tbl_get"); + eram_base_addr = sdt_eram_info.eram_base_addr; + rd_mode = sdt_eram_info.eram_mode; //0:1bit;2:64bit;3:128, + eram_table_depth = sdt_eram_info.eram_table_depth; + ZXIC_COMM_CHECK_DEV_INDEX_LOWER_NO_ASSERT(DEV_ID(dev), eram_table_depth, + 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), index, 0, + eram_table_depth - 1); + + rc = dtb_eram_index_cal(dev, rd_mode, index, &row_index, &col_index); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dtb_eram_index_cal"); + + eram_dump_base_addr = eram_base_addr + row_index; + + rc = dpp_dtb_se_smmu0_dma_dump(dev, queue_id, eram_dump_base_addr, 1, + temp_data, &element_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_se_smmu0_dma_dump"); + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("dtb dump eram done, the element id is %d.\n", + element_id); + } + + //提取数据 + switch (rd_mode) { + case ERAM128_TBL_128b: { + ZXIC_COMM_MEMCPY(p_data, temp_data, (128 / 8)); + break; + } + + case ERAM128_TBL_64b: { + ZXIC_COMM_MEMCPY(p_data, temp_data + ((1 - col_index) << 1), + (64 / 8)); + break; + } + + case ERAM128_TBL_1b: { + ZXIC_COMM_UINT32_GET_BITS(p_data[0], + *(temp_data + (3 - col_index / 32)), + (col_index % 32), 1); + break; + } + } + + return rc; +} + +/** dtb dump eram直接表表项内容 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no eram表sdt表号 +* @param p_dump_eram_entry eram数据结构,数据已分配相应内存 +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_eram_stat_data_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 base_addr, + ZXIC_UINT32 rd_mode, ZXIC_UINT32 index, + ZXIC_UINT32 *p_data) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 eram_dump_base_addr = 0; + ZXIC_UINT32 row_index = 0; + ZXIC_UINT32 col_index = 0; + ZXIC_UINT32 temp_data[4] = { 0 }; + ZXIC_UINT32 element_id = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, DTB_QUEUE_MAX - 1); + + rc = dtb_eram_index_cal(dev, rd_mode, index, &row_index, &col_index); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dtb_eram_index_cal"); + + eram_dump_base_addr = base_addr + row_index; + + rc = dpp_dtb_se_smmu0_dma_dump(dev, queue_id, eram_dump_base_addr, 1, + temp_data, &element_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_se_smmu0_dma_dump"); + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("dtb dump eram done, the element id is %d.\n", + element_id); + } + + //提取数据 + switch (rd_mode) { + case ERAM128_TBL_128b: { + ZXIC_COMM_MEMCPY(p_data, temp_data, (128 / 8)); + break; + } + + case ERAM128_TBL_64b: { + ZXIC_COMM_MEMCPY(p_data, temp_data + ((1 - col_index) << 1), + (64 / 8)); + break; + } + + case ERAM128_TBL_1b: { + ZXIC_COMM_UINT32_GET_BITS(p_data[0], + *(temp_data + (3 - col_index / 32)), + (col_index % 32), 1); + break; + } + } + + return rc; +} + +/** dtb dump ddr直接表表项内容 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no eram表sdt表号 +* @param p_dump_ddr_entry ddr 数据结构,数据已分配相应内存 +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_ddr_data_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, + DPP_DTB_DDR_ENTRY_INFO_T *p_dump_ddr_entry) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 rd_mode = 0; + ZXIC_UINT32 ddr_base_addr = 0; + ZXIC_UINT32 ddr_dump_base_addr_512bit = 0; + ZXIC_UINT32 row_index = 0; + ZXIC_UINT32 col_index = 0; + ZXIC_UINT32 rd_buff[DPP_DIR_TBL_BUF_MAX_NUM] = { 0 }; + ZXIC_UINT32 element_id = 0; + + ZXIC_UINT32 index = p_dump_ddr_entry->index; + ZXIC_UINT32 *p_data = p_dump_ddr_entry->p_data; + + DPP_SDTTBL_DDR3_T sdt_ddr_info = { 0 }; /*SDT内容*/ + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_dump_ddr_entry); + + //从sdt_no中获取SDT配置 + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_ddr_info); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_soft_sdt_tbl_get"); + ddr_base_addr = sdt_ddr_info.ddr3_base_addr; + ZXIC_COMM_CHECK_INDEX_UPPER(ddr_base_addr, DPP_SMMU1_TOTAL_MAX_BADDR); + rd_mode = sdt_ddr_info.ddr3_rw_len; + + if (SMMU1_DDR_SRH_128b == rd_mode) { + row_index = index >> 2; + col_index = ((index & 0x3)) << 2; + } else if (SMMU1_DDR_SRH_256b == rd_mode) { + row_index = index >> 1; + col_index = ((index & 0x1)) << 3; + } else if (SMMU1_DDR_SRH_512b == rd_mode) { + row_index = index; + col_index = 0; + } + + ddr_dump_base_addr_512bit = + (ddr_base_addr << 10) + row_index; /**转换成512bit为单位的地址*/ + + rc = dpp_dtb_se_smmu1_dma_dump(dev, queue_id, ddr_dump_base_addr_512bit, + 1, rd_buff, &element_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_se_smmu1_dma_dump"); + + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("dump ddr done ,the element is is %d\n", + element_id); + } + + ZXIC_COMM_MEMCPY((ZXIC_UINT8 *)p_data, rd_buff + col_index, + DTB_LEN_POS_SETP << rd_mode); + + return rc; +} + +/** 根据键值查找hash表 +* @param dev_id 设备号,支持多芯片 +* @param queue_id 队列id +* @param sdt_no 0~255 +* @param p_dtb_hash_entry 出参,返回描述符信息 +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/02 +************************************************************/ +ZXIC_UINT32 dpp_dtb_hash_data_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, + DPP_DTB_HASH_ENTRY_INFO_T *p_dtb_hash_entry, + ZXIC_UINT32 srh_mode) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT8 srh_succ = 0; + ZXIC_UINT8 key_valid = 1; + DPP_HASH_CFG *p_hash_cfg = NULL; + HASH_ENTRY_CFG hash_entry_cfg = { 0 }; + DPP_HASH_ENTRY hash_entry = { 0 }; + ZXIC_UINT8 aucKey[HASH_KEY_MAX] = { 0 }; + ZXIC_UINT8 aucRst[HASH_RST_MAX] = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(queue_id, 0, DTB_QUEUE_MAX - 1); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + ZXIC_COMM_CHECK_POINT(p_dtb_hash_entry); + ZXIC_COMM_CHECK_POINT(p_dtb_hash_entry->p_actu_key); + ZXIC_COMM_CHECK_POINT(p_dtb_hash_entry->p_rst); + + ZXIC_COMM_MEMSET_S(&hash_entry, sizeof(DPP_HASH_ENTRY), 0, + sizeof(DPP_HASH_ENTRY)); + + //从sdt_no中获取hash配置 + rc = dpp_hash_get_hash_info_from_sdt(dev, sdt_no, &hash_entry_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_hash_get_hash_info_from_sdt"); + + p_hash_cfg = hash_entry_cfg.p_hash_cfg; + ZXIC_COMM_CHECK_POINT(p_hash_cfg); + + hash_entry.p_key = aucKey; + hash_entry.p_rst = aucRst; + ZXIC_COMM_MEMSET_S(hash_entry.p_key, sizeof(aucKey), 0, sizeof(aucKey)); + ZXIC_COMM_MEMSET_S(hash_entry.p_rst, sizeof(aucRst), 0, sizeof(aucRst)); + hash_entry.p_key[0] = DPP_GET_HASH_KEY_CTRL( + key_valid, hash_entry_cfg.key_type, hash_entry_cfg.table_id); + ZXIC_COMM_CHECK_INDEX(hash_entry_cfg.actu_key_size, HASH_ACTU_KEY_MIN, + HASH_ACTU_KEY_MAX); + ZXIC_COMM_MEMCPY(&hash_entry.p_key[1], p_dtb_hash_entry->p_actu_key, + hash_entry_cfg.actu_key_size); + + // if(p_hash_cfg->ddr_valid) + // { + // rc = dpp_dtb_hash_ddr_get(dev_id,queue_id,&hash_entry_cfg,&hash_entry,srh_mode,&srh_succ); + // ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_dtb_hash_ddr_get"); + // } + + rc = dpp_dtb_hash_zcam_get(dev, queue_id, &hash_entry_cfg, &hash_entry, + srh_mode, &srh_succ); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_dtb_hash_zcam_get"); + + if (!srh_succ) { + p_hash_cfg->hash_stat.search_fail++; + ZXIC_COMM_TRACE_DEV_DEBUG(DEV_ID(dev), + "Hash search key fail!\n"); + return DPP_HASH_RC_SRH_FAIL; + } + + ZXIC_COMM_MEMCPY(p_dtb_hash_entry->p_rst, hash_entry.p_rst, + 1 << (hash_entry_cfg.rsp_mode + 2)); + + return DPP_OK; +} + +/** dtb 通过key和mask获取ACL表级联结果 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no eram表sdt表号 +* @param p_dump_acl_entry etcam 数据结构,数据已分配相应内存,需要输入key和mask +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_acl_data_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, + DPP_DTB_ACL_ENTRY_INFO_T *p_dump_acl_entry) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 block_idx = 0; + ZXIC_UINT32 ram_addr = 0; + ZXIC_UINT32 etcam_wr_mode = 0; + + ZXIC_UINT32 etcam_key_mode = 0; + ZXIC_UINT32 etcam_table_id = 0; + ZXIC_UINT32 as_enable = 0; + ZXIC_UINT32 as_eram_baddr = 0; + ZXIC_UINT32 etcam_as_mode = 0; + + ZXIC_UINT32 row_index = 0; + ZXIC_UINT32 col_index = 0; + + DPP_ETCAM_ENTRY_T etcam_entry_dm = { 0 }; + DPP_ETCAM_ENTRY_T etcam_entry_xy = { 0 }; + ZXIC_UINT32 as_eram_data[4] = { 0 }; + ZXIC_UINT8 temp_data[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; + ZXIC_UINT8 temp_mask[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; + + DPP_ACL_CFG_EX_T *p_acl_cfg = NULL; + DPP_ACL_TBL_CFG_T *p_tbl_cfg = NULL; + + DPP_SDTTBL_ETCAM_T sdt_etcam_info = { 0 }; /*SDT内容*/ + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_dump_acl_entry); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_dump_acl_entry->key_data); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_dump_acl_entry->key_mask); + + //从sdt_no中获取SDT配置 + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_etcam_info); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_soft_sdt_tbl_get"); + etcam_key_mode = sdt_etcam_info.etcam_key_mode; + etcam_as_mode = sdt_etcam_info.as_rsp_mode; + etcam_table_id = sdt_etcam_info.etcam_table_id; + as_enable = sdt_etcam_info.as_en; + as_eram_baddr = sdt_etcam_info.as_eram_baddr; + + if (as_enable) { + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), + p_dump_acl_entry->p_as_rslt); + } + + etcam_entry_xy.mode = etcam_key_mode; + etcam_entry_xy.p_data = temp_data; + etcam_entry_xy.p_mask = temp_mask; + etcam_entry_dm.mode = etcam_key_mode; + etcam_entry_dm.p_data = p_dump_acl_entry->key_data; + etcam_entry_dm.p_mask = p_dump_acl_entry->key_mask; + + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT("acl get DM:/n"); + dpp_acl_data_print(etcam_entry_dm.p_data, etcam_entry_dm.p_mask, + etcam_entry_dm.mode); + } + + rc = dpp_acl_cfg_get(dev, &p_acl_cfg); //获取ACL表资源配置 + ZXIC_COMM_CHECK_RC(rc, "dpp_acl_cfg_get"); + ZXIC_COMM_CHECK_POINT(p_acl_cfg); + + p_tbl_cfg = + p_acl_cfg->acl_tbls + etcam_table_id; //得到该acl表的资源配置 + + if (!p_tbl_cfg->is_used) { + ZXIC_COMM_TRACE_ERROR("table[ %d ] is not init!\n", + etcam_table_id); + ZXIC_COMM_ASSERT(0); + return DPP_ACL_RC_TBL_NOT_INIT; + } + + /*计算地址等信息*/ + rc = dpp_acl_hdw_addr_get(p_tbl_cfg, p_dump_acl_entry->handle, + &block_idx, &ram_addr, &etcam_wr_mode); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_acl_hdw_addr_get"); + + rc = dpp_dtb_etcam_entry_get(dev, queue_id, block_idx, ram_addr, + etcam_wr_mode, DPP_ETCAM_OPR_XY, as_enable, + as_eram_baddr, p_dump_acl_entry->handle, + etcam_as_mode, //128:3 64:2 + &etcam_entry_xy, + (ZXIC_UINT8 *)as_eram_data); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_etcam_entry_get"); + + if (dpp_etcam_entry_cmp(&etcam_entry_dm, &etcam_entry_xy) == 0) { + ZXIC_COMM_PRINT( + "Acl table[ %d ] search in hardware success: handle[ 0x%x ], block[ %d ], ram_addr[ %d ], rd_mode[ %x ].\n", + p_tbl_cfg->table_id, p_dump_acl_entry->handle, + block_idx, ram_addr, etcam_wr_mode); + } else { + ZXIC_COMM_PRINT( + "Acl table[ %d ] search in hardware fail: handle[ 0x%x ], block[ %d ], ram_addr[ %d ], rd_mode[ %x ].\n", + p_tbl_cfg->table_id, p_dump_acl_entry->handle, + block_idx, ram_addr, etcam_wr_mode); + + return DPP_ERR; + } + + if (as_enable) { + rc = dtb_eram_index_cal(dev, etcam_as_mode, + p_dump_acl_entry->handle, &row_index, + &col_index); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dtb_eram_index_cal"); + switch (etcam_as_mode) { + case ERAM128_TBL_128b: { + ZXIC_COMM_MEMCPY(p_dump_acl_entry->p_as_rslt, + as_eram_data, (128 / 8)); + break; + } + + case ERAM128_TBL_64b: { + ZXIC_COMM_MEMCPY(p_dump_acl_entry->p_as_rslt, + as_eram_data + ((1 - col_index) << 1), + (64 / 8)); + break; + } + + case ERAM128_TBL_1b: { + ZXIC_COMM_UINT32_GET_BITS( + *(ZXIC_UINT32 *)p_dump_acl_entry->p_as_rslt, + *(as_eram_data + (3 - col_index / 32)), + (col_index % 32), 1); + break; + } + } + } + + return rc; +} + +/** dtb etcam 数据get接口,通过handle值获取etcam数据 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no eram表sdt表号 +* @param p_dump_acl_entry etcam 数据结构,数据已分配相应内存 +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_etcam_data_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, + DPP_DTB_ACL_ENTRY_INFO_T *p_dump_acl_entry) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 block_idx = 0; + ZXIC_UINT32 ram_addr = 0; + ZXIC_UINT32 etcam_wr_mode = 0; + + ZXIC_UINT32 etcam_key_mode = 0; + ZXIC_UINT32 etcam_table_id = 0; + ZXIC_UINT32 as_enable = 0; + ZXIC_UINT32 as_eram_baddr = 0; + ZXIC_UINT32 etcam_as_mode = 0; + + ZXIC_UINT32 row_index = 0; + ZXIC_UINT32 col_index = 0; + + DPP_ETCAM_ENTRY_T etcam_entry_dm = { 0 }; + ZXIC_UINT32 as_eram_data[4] = { 0 }; + + ZXIC_UINT8 temp_data[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; + ZXIC_UINT8 temp_mask[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; + + DPP_ACL_CFG_EX_T *p_acl_cfg = NULL; + DPP_ACL_TBL_CFG_T *p_tbl_cfg = NULL; + + DPP_SDTTBL_ETCAM_T sdt_etcam_info = { 0 }; /*SDT内容*/ + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_dump_acl_entry); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_dump_acl_entry->key_data); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_dump_acl_entry->key_mask); + + //从sdt_no中获取SDT配置 + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_etcam_info); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_soft_sdt_tbl_get"); + etcam_key_mode = sdt_etcam_info.etcam_key_mode; + etcam_as_mode = sdt_etcam_info.as_rsp_mode; + etcam_table_id = sdt_etcam_info.etcam_table_id; + as_enable = sdt_etcam_info.as_en; + as_eram_baddr = sdt_etcam_info.as_eram_baddr; + + if (as_enable) { + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), + p_dump_acl_entry->p_as_rslt); + } + + etcam_entry_dm.mode = etcam_key_mode; + etcam_entry_dm.p_data = temp_data; + etcam_entry_dm.p_mask = temp_mask; + + rc = dpp_acl_cfg_get(dev, &p_acl_cfg); //获取ACL表资源配置 + ZXIC_COMM_CHECK_RC(rc, "dpp_acl_cfg_get"); + ZXIC_COMM_CHECK_POINT(p_acl_cfg); + + p_tbl_cfg = + p_acl_cfg->acl_tbls + etcam_table_id; //得到该acl表的资源配置 + + if (!p_tbl_cfg->is_used) { + ZXIC_COMM_TRACE_ERROR("table[ %d ] is not init!\n", + etcam_table_id); + ZXIC_COMM_ASSERT(0); + return DPP_ACL_RC_TBL_NOT_INIT; + } + + /*计算地址等信息*/ + rc = dpp_acl_hdw_addr_get(p_tbl_cfg, p_dump_acl_entry->handle, + &block_idx, &ram_addr, &etcam_wr_mode); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_acl_hdw_addr_get"); + + rc = dpp_dtb_etcam_entry_get(dev, queue_id, block_idx, ram_addr, + etcam_wr_mode, DPP_ETCAM_OPR_DM, as_enable, + as_eram_baddr, p_dump_acl_entry->handle, + etcam_as_mode, //128:3 64:2 + &etcam_entry_dm, + (ZXIC_UINT8 *)as_eram_data); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_etcam_entry_get"); + + ZXIC_COMM_MEMCPY(p_dump_acl_entry->key_data, etcam_entry_dm.p_data, + DPP_ETCAM_ENTRY_SIZE_GET(etcam_key_mode)); + ZXIC_COMM_MEMCPY(p_dump_acl_entry->key_mask, etcam_entry_dm.p_mask, + DPP_ETCAM_ENTRY_SIZE_GET(etcam_key_mode)); + + if (as_enable) { + rc = dtb_eram_index_cal(dev, etcam_as_mode, + p_dump_acl_entry->handle, &row_index, + &col_index); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dtb_eram_index_cal"); + switch (etcam_as_mode) { + case ERAM128_TBL_128b: { + ZXIC_COMM_MEMCPY(p_dump_acl_entry->p_as_rslt, + as_eram_data, (128 / 8)); + break; + } + + case ERAM128_TBL_64b: { + ZXIC_COMM_MEMCPY(p_dump_acl_entry->p_as_rslt, + as_eram_data + ((1 - col_index) << 1), + (64 / 8)); + break; + } + + case ERAM128_TBL_1b: { + ZXIC_COMM_UINT32_GET_BITS( + *(ZXIC_UINT32 *)p_dump_acl_entry->p_as_rslt, + *(as_eram_data + (3 - col_index / 32)), + (col_index % 32), 1); + break; + } + } + } + + return rc; +} + +#endif /**DTB GET INTERFACE*/ + +#if ZXIC_REAL("DTB FLUSH INTERFACE") + +/***********************************************************/ +/** DTB dd 整个流表清空Flush +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param ddr_base_addr ddr基地址,以4K*128bit为单位 +* @param ddr_wr_mode ddr写模式 0-128bit, 1-256bit, 2-384bit, 3-512bit,取值参考SMMU1_DDR_WRT_MODE_E的定义 +* @param ddr_ecc_en ddr ECC使能 +* @param start_index flush开始的条目 +* @param entry_num 下发的条目数 +* @param element_id 返回下表使用的元素id +* @return +* @remark 无 +* @see +* @author cbb @date 2023/07/03 +************************************************************/ +DPP_STATUS +dpp_dtb_smmu1_flush_cycle(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 ddr_base_addr, ZXIC_UINT32 ddr_wr_mode, + ZXIC_UINT32 ddr_ecc_en, ZXIC_UINT32 start_index, + ZXIC_UINT32 entry_num, ZXIC_UINT32 *element_id) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 index = 0; + ZXIC_UINT32 current_index = 0; + ZXIC_UINT32 ddr_entry_len = 0; + ZXIC_UINT32 *data_buff = NULL; + DPP_DTB_DDR_ENTRY_INFO_T *p_entry_arr = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + + ddr_entry_len = 4 * (ddr_wr_mode + 1); //计算数据长度 + + p_entry_arr = (DPP_DTB_DDR_ENTRY_INFO_T *)ZXIC_COMM_MALLOC( + entry_num * sizeof(DPP_DTB_DDR_ENTRY_INFO_T)); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_entry_arr); + + data_buff = (ZXIC_UINT32 *)ZXIC_COMM_MALLOC(ddr_entry_len * + sizeof(ZXIC_UINT32)); + ZXIC_COMM_CHECK_DEV_POINT_MEMORY_FREE_NO_ASSERT(DEV_ID(dev), data_buff, + p_entry_arr); + + ZXIC_COMM_MEMSET(data_buff, 0, ddr_entry_len * sizeof(ZXIC_UINT32)); + + // 下表数据准备 + for (index = 0; index < entry_num; index++) { + current_index = start_index + index; + + p_entry_arr[index].index = current_index; + p_entry_arr[index].p_data = data_buff; + } + + rc = dpp_dtb_smmu1_data_write_cycle(dev, queue_id, ddr_base_addr, + ddr_wr_mode, ddr_ecc_en, entry_num, + p_entry_arr, element_id); + ZXIC_COMM_FREE(data_buff); + ZXIC_COMM_FREE(p_entry_arr); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_smmu1_data_write_cycle"); + + return rc; +} + +/***********************************************************/ +/** DTB dd 整个流表清空Flush +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param ddr_base_addr ddr基地址,以4K*128bit为单位 +* @param ddr_wr_mode ddr写模式 0-128bit, 1-256bit, 2-384bit, 3-512bit,取值参考SMMU1_DDR_WRT_MODE_E的定义 +* @param ddr_ecc_en ddr ECC使能 +* @param start_index flush开始的条目 +* @param entry_num 下发的条目数 +* @param element_id 返回下表使用的元素id +* @return +* @remark 无 +* @see +* @author cbb @date 2023/07/03 +************************************************************/ +DPP_STATUS dpp_dtb_smmu1_flush(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 ddr_base_addr, + ZXIC_UINT32 ddr_wr_mode, ZXIC_UINT32 ddr_ecc_en, + ZXIC_UINT32 entry_num, ZXIC_UINT32 *element_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 entry_num_max = 0; + ZXIC_UINT32 entry_cycle = 0; + ZXIC_UINT32 entry_remains = 0; + ZXIC_UINT32 start_index = 0; + ZXIC_UINT32 i = 0; + + ZXIC_COMM_CHECK_POINT(dev); + + ZXIC_COMM_TRACE_INFO("%s--[%d]:ddr_base_addr is %d.\n", __FUNCTION__, + __LINE__, ddr_base_addr); + ZXIC_COMM_TRACE_INFO("%s--[%d]:ddr_wr_mode is %d.\n", __FUNCTION__, + __LINE__, ddr_wr_mode); + ZXIC_COMM_TRACE_INFO("%s--[%d]:entry_num is %d.\n", __FUNCTION__, + __LINE__, entry_num); + + switch (ddr_wr_mode) { + case SMMU1_DDR_WRT_128b: //128bit + { + entry_num_max = 0x1ff; + break; + } + + case SMMU1_DDR_WRT_256b: //256bit + { + entry_num_max = 0x155; + break; + } + + case SMMU1_DDR_WRT_384b: //384bit + { + entry_num_max = 0xcc; + break; + } + + case SMMU1_DDR_WRT_512b: //512bit + { + entry_num_max = 0xcc; + break; + } + } + + ZXIC_COMM_CHECK_INDEX_EQUAL(entry_num_max, 0); + entry_cycle = entry_num / entry_num_max; + entry_remains = entry_num % entry_num_max; + + for (i = 0; i < entry_cycle; ++i) { + start_index = entry_num_max * i; + + rc = dpp_dtb_smmu1_flush_cycle(dev, queue_id, ddr_base_addr, + ddr_wr_mode, ddr_ecc_en, + start_index, entry_num_max, + element_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_smmu1_flush_cycle"); + + ZXIC_COMM_TRACE_INFO( + "dpp_dtb_smmu1_flush_cycle[%d] element_id = %d\n", i, + *element_id); + } + + if (entry_remains) { + start_index = entry_num_max * entry_cycle; + rc = dpp_dtb_smmu1_flush_cycle(dev, queue_id, ddr_base_addr, + ddr_wr_mode, ddr_ecc_en, + start_index, entry_remains, + element_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_smmu1_flush_cycle"); + + ZXIC_COMM_TRACE_INFO( + "dpp_dtb_smmu1_flush_cycle: element_id = %d\n", + *element_id); + } + + return DPP_OK; +} + +/***********************************************************/ +/** flush指定ZCELL指定范围空间 +* @param dev_id 设备id +* @param queue_id 队列id +* @param zcell_id zcell id 0~127 +* @param start_index 起始索引 0~511 +* @param num 条目数 +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/02 +************************************************************/ +DPP_STATUS dpp_dtb_hash_zcell_range_clr(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 zcell_id, + ZXIC_UINT32 start_index, + ZXIC_UINT32 num) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 zcdep = 0; + ZXIC_UINT32 addr = 0; + ZXIC_UINT32 addr_offset = 0; + ZXIC_UINT32 dtb_len = 0; + ZXIC_UINT32 data[512 / 32] = { 0 }; + DPP_DTB_ENTRY_T entry = { 0 }; + ZXIC_UINT32 entry_data_buff[512 / 32] = { 0 }; + ZXIC_UINT8 entry_cmd_buff[DTB_TABLE_CMD_SIZE_BYTE] = { 0 }; + ZXIC_UINT8 *p_data_buff = NULL; + ZXIC_UINT32 element_id = 0; + ZXIC_UINT32 end_index = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), zcell_id, 0, + SE_ZCELL_TOTAL_NUM - 1); + ZXIC_COMM_CHECK_INDEX_UPPER(start_index, SE_RAM_DEPTH - 1); + + entry.cmd = entry_cmd_buff; + entry.data = (ZXIC_UINT8 *)entry_data_buff; + ZXIC_COMM_MEMSET(entry_cmd_buff, 0, sizeof(entry_cmd_buff)); + ZXIC_COMM_MEMSET(entry_data_buff, 0, sizeof(entry_data_buff)); + ZXIC_COMM_MEMSET(data, 0, sizeof(data)); + + p_data_buff = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC( + DPP_DTB_TABLE_DATA_BUFF_SIZE * sizeof(ZXIC_UINT8)); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data_buff); + ZXIC_COMM_MEMSET(p_data_buff, 0, + DPP_DTB_TABLE_DATA_BUFF_SIZE * sizeof(ZXIC_UINT8)); + + end_index = start_index + num; + end_index = (end_index > SE_RAM_DEPTH) ? SE_RAM_DEPTH : end_index; + + for (zcdep = start_index; zcdep < end_index; zcdep++) { + addr = ZBLK_ITEM_ADDR_CALC(zcell_id, zcdep); + rc = dpp_dtb_se_alg_zcam_data_write(DEV_ID(dev), addr, + (ZXIC_UINT8 *)data, &entry); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_se_alg_zcam_data_write", p_data_buff); + + dtb_len += DTB_ZCAM_LEN_SIZE; + addr_offset = (zcdep - start_index) * DTB_ZCAM_LEN_SIZE * + DTB_LEN_POS_SETP; /*在缓存中相对于0的offset*/ + rc = dpp_dtb_data_write(p_data_buff, addr_offset, &entry); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_data_write", p_data_buff); + ZXIC_COMM_MEMSET(entry_cmd_buff, 0x0, sizeof(entry_cmd_buff)); + ZXIC_COMM_MEMSET(entry_data_buff, 0x0, sizeof(entry_data_buff)); + } + + rc = dpp_dtb_write_down_table_data(dev, queue_id, dtb_len * 16, + p_data_buff, &element_id); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_write_down_table_data", p_data_buff); + + ZXIC_COMM_FREE(p_data_buff); + return DPP_OK; +} + +/***********************************************************/ +/** flush指定ZCELL空间 +* @param dev_id 设备id +* @param queue_id 队列id +* @param zcell_id zcell id 0~127 +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/02 +************************************************************/ +DPP_STATUS dpp_dtb_hash_zcell_clr(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 zcell_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 max_num = 0xcc; + ZXIC_UINT32 start_index = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), zcell_id, 0, + SE_ZCELL_TOTAL_NUM - 1); + ZXIC_COMM_CHECK_INDEX(queue_id, 0, DTB_QUEUE_MAX - 1); + + start_index = 0; + rc = dpp_dtb_hash_zcell_range_clr(dev, queue_id, zcell_id, start_index, + max_num); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_hash_zcell_range_clr"); + + start_index = max_num; + rc = dpp_dtb_hash_zcell_range_clr(dev, queue_id, zcell_id, start_index, + max_num); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_hash_zcell_range_clr"); + + start_index = max_num * 2; + rc = dpp_dtb_hash_zcell_range_clr(dev, queue_id, zcell_id, start_index, + max_num); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_hash_zcell_range_clr"); + + return DPP_OK; +} + +/***********************************************************/ +/** flush指定ZREG空间 +* @param dev_id 设备id +* @param queue_id 队列id +* @param zblk_id zblock id 0~31 +* @param zreg_id zreg id 0~3 +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/02 +************************************************************/ +DPP_STATUS dpp_dtb_hash_zreg_clr(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 zblk_id, ZXIC_UINT32 zreg_id) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 addr = 0; + ZXIC_UINT32 wr_data[16] = { 0 }; + ZXIC_UINT32 dtb_len = 0; + + DPP_DTB_ENTRY_T entry = { 0 }; + ZXIC_UINT32 entry_data_buff[512 / 32] = { 0 }; + ZXIC_UINT8 entry_cmd_buff[DTB_TABLE_CMD_SIZE_BYTE] = { 0 }; + ZXIC_UINT8 *p_data_buff = NULL; + ZXIC_UINT32 element_id = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), zblk_id, 0, SE_ZBLK_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), zreg_id, 0, SE_ZREG_NUM - 1); + ZXIC_COMM_CHECK_INDEX(queue_id, 0, DTB_QUEUE_MAX - 1); + + entry.cmd = entry_cmd_buff; + entry.data = (ZXIC_UINT8 *)entry_data_buff; + ZXIC_COMM_MEMSET(entry_cmd_buff, 0, sizeof(entry_cmd_buff)); + ZXIC_COMM_MEMSET(entry_data_buff, 0, sizeof(entry_data_buff)); + ZXIC_COMM_MEMSET(wr_data, 0x0, sizeof(wr_data)); + + addr = ZBLK_HASH_LIST_REG_ADDR_CALC(zblk_id, zreg_id); + rc = dpp_dtb_se_alg_zcam_data_write(DEV_ID(dev), addr, + (ZXIC_UINT8 *)wr_data, &entry); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_dtb_se_alg_zcam_data_write"); + + p_data_buff = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC( + DPP_DTB_TABLE_DATA_BUFF_SIZE * sizeof(ZXIC_UINT8)); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data_buff); + ZXIC_COMM_MEMSET(p_data_buff, 0x0, + DPP_DTB_TABLE_DATA_BUFF_SIZE * sizeof(ZXIC_UINT8)); + + dtb_len += DTB_ZCAM_LEN_SIZE; + rc = dpp_dtb_data_write(p_data_buff, 0, &entry); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT(rc, "dpp_dtb_data_write", + p_data_buff); + + rc = dpp_dtb_write_down_table_data(dev, queue_id, dtb_len * 16, + p_data_buff, &element_id); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_write_down_table_data", p_data_buff); + + ZXIC_COMM_FREE(p_data_buff); + return DPP_OK; +} + +/***********************************************************/ +/** flush指定ZCAM空间(独占模式) +* @param p_se_cfg 全局数据结构 +* @param queue_id 队列id +* @param fun_id hash引擎0~3 +* @param bulk_id bulk id 0~7 +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/02 +************************************************************/ +DPP_STATUS dpp_dtb_specify_zcam_space_clr(DPP_DEV_T *dev, DPP_SE_CFG *p_se_cfg, + ZXIC_UINT32 queue_id, + ZXIC_UINT32 fun_id, + ZXIC_UINT32 bulk_id) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 i = 0; + ZXIC_UINT32 zblock_id = 0; + + D_NODE *p_zblk_dn = NULL; + SE_ZBLK_CFG *p_zblk = NULL; + SE_ZREG_CFG *p_zreg = NULL; + SE_ZCELL_CFG *p_zcell = NULL; + DPP_HASH_CFG *p_hash_cfg = NULL; + FUNC_ID_INFO *p_func_info = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_se_cfg); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), fun_id, 0, HASH_FUNC_ID_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), bulk_id, 0, HASH_BULK_NUM - 1); + ZXIC_COMM_CHECK_INDEX(queue_id, 0, DTB_QUEUE_MAX - 1); + + // dev_id = p_se_cfg->dev_id; + p_func_info = DPP_GET_FUN_INFO(p_se_cfg, fun_id); + DPP_SE_CHECK_FUN(p_func_info, fun_id, FUN_HASH); + p_hash_cfg = (DPP_HASH_CFG *)p_func_info->fun_ptr; + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_hash_cfg); + p_zblk_dn = p_hash_cfg->hash_shareram.zblk_list.p_next; + + while (p_zblk_dn) { + p_zblk = (SE_ZBLK_CFG *)p_zblk_dn->data; + zblock_id = p_zblk->zblk_idx; + + for (i = 0; i < SE_ZCELL_NUM; i++) { + p_zcell = &(p_zblk->zcell_info[i]); + if ((p_zcell->flag & DPP_ZCELL_FLAG_IS_MONO) && + (p_zcell->bulk_id == bulk_id)) { + rc = dpp_dtb_hash_zcell_clr(dev, queue_id, + p_zcell->zcell_idx); + ZXIC_COMM_CHECK_DEV_RC( + DEV_ID(dev), rc, + "dpp_dtb_hash_zcell_clr"); + ZXIC_COMM_TRACE_DEV_INFO( + DEV_ID(dev), + "the Zblock[%d]'s Mono Zcell_id :%d \n", + zblock_id, p_zcell->zcell_idx); + } + } + + for (i = 0; i < SE_ZREG_NUM; i++) { + p_zreg = &(p_zblk->zreg_info[i]); + if ((p_zreg->flag & DPP_ZREG_FLAG_IS_MONO) && + (p_zreg->bulk_id == bulk_id)) { + rc = dpp_dtb_hash_zreg_clr(dev, queue_id, + zblock_id, i); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "npe_hash_zreg_clr"); + ZXIC_COMM_TRACE_DEV_INFO( + DEV_ID(dev), + "the Zblock[%d]'s Mono Zreg_id :%d \n", + zblock_id, i); + } + } + + p_zblk_dn = p_zblk_dn->next; + } + + return DPP_OK; +} + +/***********************************************************/ +/** flush当前hash引擎占用的ZCAM空间 +* @param p_se_cfg 全局数据结构 +* @param queue_id 队列id +* @param fun_id hash引擎0~3 +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/02 +************************************************************/ +DPP_STATUS dpp_dtb_zcam_space_clr(DPP_DEV_T *dev, DPP_SE_CFG *p_se_cfg, + ZXIC_UINT32 queue_id, ZXIC_UINT32 fun_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 zblock_id = 0; + + D_NODE *p_zblk_dn = NULL; + SE_ZBLK_CFG *p_zblk = NULL; + SE_ZCELL_CFG *p_zcell = NULL; + DPP_HASH_CFG *p_hash_cfg = NULL; + FUNC_ID_INFO *p_func_info = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_se_cfg); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), fun_id, 0, HASH_FUNC_ID_NUM - 1); + ZXIC_COMM_CHECK_INDEX(queue_id, 0, DTB_QUEUE_MAX - 1); + + // dev_id = p_se_cfg->dev_id; + p_func_info = DPP_GET_FUN_INFO(p_se_cfg, fun_id); + DPP_SE_CHECK_FUN(p_func_info, fun_id, FUN_HASH); + p_hash_cfg = (DPP_HASH_CFG *)p_func_info->fun_ptr; + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_hash_cfg); + p_zblk_dn = p_hash_cfg->hash_shareram.zblk_list.p_next; + + while (p_zblk_dn) { + p_zblk = (SE_ZBLK_CFG *)p_zblk_dn->data; + zblock_id = p_zblk->zblk_idx; + for (index = 0; index < SE_ZCELL_NUM; index++) { + p_zcell = &(p_zblk->zcell_info[index]); + rc = dpp_dtb_hash_zcell_clr(dev, queue_id, + p_zcell->zcell_idx); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_dtb_hash_zcell_clr"); + } + + for (index = 0; index < SE_ZREG_NUM; index++) { + rc = dpp_dtb_hash_zreg_clr(dev, queue_id, zblock_id, + index); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_dtb_hash_zreg_clr"); + } + p_zblk_dn = p_zblk_dn->next; + } + + return DPP_OK; +} + +/***********************************************************/ +/** 清除指定空间的hash表项(清除软件配置) +* @param p_se_cfg +* @param hash_id +* @param bulk_id +* +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/18 +************************************************************/ +DPP_STATUS dpp_hash_specify_entry_delete(DPP_DEV_T *dev, DPP_SE_CFG *p_se_cfg, + ZXIC_UINT32 hash_id, + ZXIC_UINT32 bulk_id) +{ + ZXIC_UINT32 rc = 0; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT8 key_valid = 0; + ZXIC_UINT32 table_id = 0; + ZXIC_UINT32 temp_bulk_id = 0; + + D_NODE *p_node = NULL; + ZXIC_RB_TN *p_rb_tn = NULL; + D_HEAD *p_head_hash_rb = NULL; + DPP_HASH_CFG *p_hash_cfg = NULL; + FUNC_ID_INFO *p_func_info = NULL; + DPP_HASH_RBKEY_INFO *p_rbkey = NULL; + DPP_HASH_RBKEY_INFO *p_rbkey_rtn = NULL; + ZXIC_RB_TN *p_rb_tn_rtn = NULL; + SE_ITEM_CFG *p_item = NULL; + + ZXIC_COMM_CHECK_POINT(p_se_cfg); + ZXIC_COMM_CHECK_INDEX(hash_id, 0, HASH_FUNC_ID_NUM - 1); + ZXIC_COMM_CHECK_INDEX(bulk_id, 0, HASH_BULK_NUM - 1); + + dev_id = p_se_cfg->dev_id; + p_func_info = DPP_GET_FUN_INFO(p_se_cfg, hash_id); + DPP_SE_CHECK_FUN(p_func_info, hash_id, FUN_HASH); + + p_hash_cfg = (DPP_HASH_CFG *)p_func_info->fun_ptr; + p_head_hash_rb = &p_hash_cfg->hash_rb.tn_list; + + p_node = p_head_hash_rb->p_next; + while (p_node) { + p_rb_tn = (ZXIC_RB_TN *)p_node->data; + p_rbkey = (DPP_HASH_RBKEY_INFO *)p_rb_tn->p_key; + key_valid = DPP_GET_HASH_KEY_VALID(p_rbkey->key); + table_id = DPP_GET_HASH_TBL_ID(p_rbkey->key); + temp_bulk_id = ((table_id >> 2) & 0x7); + if ((!key_valid) || (temp_bulk_id != bulk_id)) { + p_node = p_node->next; + continue; + } + + p_node = p_node->next; /*zxic_comm_rb_delete删除操作之前执行*/ + rc = zxic_comm_rb_delete(&p_hash_cfg->hash_rb, p_rbkey, + &p_rb_tn_rtn); + if (ZXIC_RBT_RC_SRHFAIL == rc) { + p_hash_cfg->hash_stat.delete_fail++; + ZXIC_COMM_TRACE_DEV_DEBUG( + dev_id, "Error!there is not item in hash!\n"); + return DPP_HASH_RC_DEL_SRHFAIL; + } + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_rb_tn_rtn); + p_rbkey_rtn = (DPP_HASH_RBKEY_INFO *)(p_rb_tn_rtn->p_key); + p_item = p_rbkey_rtn->p_item_info; + + rc = zxic_comm_double_link_del(&(p_rbkey_rtn->entry_dn), + &(p_item->item_list)); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "zxic_comm_double_link_del"); + p_item->wrt_mask &= + ~(DPP_GET_HASH_ENTRY_MASK(p_rbkey_rtn->entry_size, + p_rbkey_rtn->entry_pos)) & + 0xF; + + if (0 == p_item->item_list.used) { + if ((ITEM_DDR_256 == p_item->item_type) || + (ITEM_DDR_512 == p_item->item_type)) { + /* modify coverity by yinxh 2021.03.10*,以256bit为单位。暂不考虑512bit的情况*/ + ZXIC_COMM_CHECK_INDEX_UPPER( + p_item->item_index, + p_hash_cfg->p_bulk_ddr_info[bulk_id] + ->item_num); + p_hash_cfg->p_bulk_ddr_info[bulk_id] + ->p_item_array[p_item->item_index] = + NULL; + ZXIC_COMM_FREE(p_item); + } else { + p_item->valid = 0; + } + } + + ZXIC_COMM_FREE(p_rbkey_rtn); + ZXIC_COMM_FREE(p_rb_tn_rtn); + p_hash_cfg->hash_stat.delete_ok++; + } + + return DPP_OK; +} + +/***********************************************************/ +/** flush指定eram空间 +* @param dev_id 设备id +* @param queue_id 队列id +* @param sdt_no sdt号 +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/02 +************************************************************/ +DPP_STATUS dpp_dtb_eram_table_flush(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 eram_depth = 0; + ZXIC_UINT32 element_id = 0; + ZXIC_UINT32 index = 0; + ZXIC_UINT8 *pBuff = NULL; + DPP_DTB_ERAM_ENTRY_INFO_T *p_entry_arr = NULL; + DPP_DTB_ERAM_ENTRY_INFO_T *p_temp_entry_arr = NULL; + DPP_SDTTBL_ERAM_T sdt_eram = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(queue_id, 0, DTB_QUEUE_MAX - 1); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_eram); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_soft_sdt_tbl_get"); + + eram_depth = sdt_eram.eram_table_depth; + p_entry_arr = (DPP_DTB_ERAM_ENTRY_INFO_T *)ZXIC_COMM_MALLOC( + eram_depth * sizeof(DPP_DTB_ERAM_ENTRY_INFO_T)); + ZXIC_COMM_CHECK_POINT(p_entry_arr); + pBuff = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC(eram_depth * 4 * + sizeof(ZXIC_UINT32)); + ZXIC_COMM_CHECK_POINT_MEMORY_FREE(pBuff, p_entry_arr); + ZXIC_COMM_MEMSET((ZXIC_UINT8 *)p_entry_arr, 0x0, + eram_depth * sizeof(DPP_DTB_ERAM_ENTRY_INFO_T)); + ZXIC_COMM_MEMSET(pBuff, 0x0, eram_depth * 4 * sizeof(ZXIC_UINT32)); + for (index = 0; index < eram_depth; index++) { + p_temp_entry_arr = p_entry_arr + index; + p_temp_entry_arr->index = index; + p_temp_entry_arr->p_data = + (ZXIC_UINT32 *)(pBuff + + (index * 4 * sizeof(ZXIC_UINT32))); + } + + rc = dpp_dtb_eram_dma_write(dev, queue_id, sdt_no, eram_depth, + p_entry_arr, &element_id); + ZXIC_COMM_CHECK_RC_MEMORY_FREE2PTR_NO_ASSERT( + rc, "dpp_dtb_eram_dma_write", pBuff, p_entry_arr); + + ZXIC_COMM_FREE(pBuff); + ZXIC_COMM_FREE(p_entry_arr); + return DPP_OK; +} + +/***********************************************************/ +/** flush指定hash空间(DDR/ZCAM) +* @param dev_id 设备id +* @param queue_id 队列id +* @param sdt_no sdt号 +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/02 +************************************************************/ +DPP_STATUS dpp_dtb_hash_table_flush(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT8 bulk_id = 0; + ZXIC_UINT8 table_id = 0; + ZXIC_UINT32 ddr_baddr = 0; + ZXIC_UINT32 ddr_item_num = 0; + ZXIC_UINT32 ddr_tbl_wr_mode = 0; + ZXIC_UINT32 element_id = 0; + + DPP_SE_CFG *p_se_cfg = NULL; + FUNC_ID_INFO *p_func_info = NULL; + DPP_HASH_CFG *p_hash_cfg = NULL; + HASH_DDR_CFG *p_ddr_cfg = NULL; + DPP_SDTTBL_HASH_T sdt_hash = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(queue_id, 0, DTB_QUEUE_MAX - 1); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_hash); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_soft_sdt_tbl_get"); + + /* 取出se配置 */ + rc = dpp_se_cfg_get(dev, &p_se_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_se_cfg_get"); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_se_cfg); + p_func_info = DPP_GET_FUN_INFO(p_se_cfg, sdt_hash.hash_id); + DPP_SE_CHECK_FUN(p_func_info, sdt_hash.hash_id, FUN_HASH); + + p_hash_cfg = (DPP_HASH_CFG *)p_func_info->fun_ptr; + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_hash_cfg); + table_id = sdt_hash.hash_table_id; + bulk_id = ((table_id >> 2) & 0x7); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), bulk_id, 0, HASH_BULK_NUM - 1); + + /*独占模式下才执行flush操作*/ + if (!p_hash_cfg->bulk_ram_mono[bulk_id]) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "fush error:hash[%u] bulk[%u] is not monopolize!\n", + sdt_hash.hash_id, bulk_id); + return DPP_HASH_RC_INVALID_PARA; + } + + /*混合模式,先清除DDR空间*/ + if (p_hash_cfg->ddr_valid) { + p_ddr_cfg = p_hash_cfg->p_bulk_ddr_info[bulk_id]; + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_ddr_cfg); + ddr_baddr = p_ddr_cfg->ddr_baddr; + ddr_item_num = + p_ddr_cfg->item_num; /*ddr存储单元数目,以DDR位宽为单位*/ + ddr_tbl_wr_mode = SMMU1_DDR_WRT_256b; + if (DDR_WIDTH_512b == p_ddr_cfg->width_mode) { + ddr_tbl_wr_mode = SMMU1_DDR_WRT_512b; + } + + ZXIC_COMM_TRACE_INFO("%s--[%d]:ddr_baddr is %d.\n", + __FUNCTION__, __LINE__, ddr_baddr); + ZXIC_COMM_TRACE_INFO("%s--[%d]:ddr_item_num is %d.\n", + __FUNCTION__, __LINE__, ddr_item_num); + ZXIC_COMM_TRACE_INFO("%s--[%d]:ddr_tbl_wr_mode is %d.\n", + __FUNCTION__, __LINE__, ddr_tbl_wr_mode); + + rc = dpp_dtb_smmu1_flush(dev, queue_id, ddr_baddr, + ddr_tbl_wr_mode, p_ddr_cfg->ddr_ecc_en, + ddr_item_num, &element_id); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_dtb_smmu1_flush"); + } + ZXIC_COMM_TRACE_INFO("dpp_dtb_hash_table_flush: DDR DONE!!!\n"); + + /*清除zcam空间*/ + rc = dpp_dtb_specify_zcam_space_clr(dev, p_se_cfg, queue_id, + sdt_hash.hash_id, bulk_id); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_dtb_specify_zcam_space_clr"); + ZXIC_COMM_TRACE_INFO("dpp_dtb_specify_zcam_space_clr DONE!!!\n"); + + /*软件删除表项*/ + rc = dpp_hash_specify_entry_delete(dev, p_se_cfg, sdt_hash.hash_id, + bulk_id); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_hash_specify_entry_delete"); + ZXIC_COMM_TRACE_INFO("dpp_hash_specify_entry_delete DONE!!!\n"); + + return DPP_OK; +} + +/***********************************************************/ +/** 清除hash表资源(硬件和软件,硬件通过dtb方式清除) +* @param dev_id 设备id +* @param queue_id 队列id +* @param hash_id hash引擎 0~3 +* @return +* @remark 无 +* @see +* @author cq @date 2023/09/26 +************************************************************/ +DPP_STATUS dpp_dtb_hash_all_entry_delete(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 hash_id) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 bulk_id = 0; + ZXIC_UINT32 ddr_baddr = 0; + ZXIC_UINT32 ddr_item_num = 0; + ZXIC_UINT32 ddr_tbl_wr_mode = 0; + ZXIC_UINT32 element_id = 0; + + DPP_SE_CFG *p_se_cfg = NULL; + FUNC_ID_INFO *p_func_info = NULL; + DPP_HASH_CFG *p_hash_cfg = NULL; + HASH_DDR_CFG *p_ddr_cfg = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(queue_id, 0, DTB_QUEUE_MAX - 1); + ZXIC_COMM_CHECK_INDEX(hash_id, 0, HASH_FUNC_ID_NUM - 1); + + /* 取出se配置 */ + rc = dpp_se_cfg_get(dev, &p_se_cfg); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_se_cfg_get"); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_se_cfg); + p_func_info = DPP_GET_FUN_INFO(p_se_cfg, hash_id); + DPP_SE_CHECK_FUN(p_func_info, hash_id, FUN_HASH); + + p_hash_cfg = (DPP_HASH_CFG *)p_func_info->fun_ptr; + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_hash_cfg); + + /*混合模式,先清除DDR空间*/ + if (p_hash_cfg->ddr_valid) { + for (bulk_id = 0; bulk_id < HASH_BULK_NUM; bulk_id++) { + p_ddr_cfg = p_hash_cfg->p_bulk_ddr_info[bulk_id]; + if (NULL == p_ddr_cfg) { + continue; + } + ddr_baddr = p_ddr_cfg->ddr_baddr; + ddr_item_num = + p_ddr_cfg->item_num; /*ddr存储单元数目,以DDR位宽为单位*/ + ddr_tbl_wr_mode = SMMU1_DDR_WRT_256b; + if (DDR_WIDTH_512b == p_ddr_cfg->width_mode) { + ddr_tbl_wr_mode = SMMU1_DDR_WRT_512b; + } + + ZXIC_COMM_TRACE_INFO("%s--[%d]:ddr_baddr is %d.\n", + __FUNCTION__, __LINE__, ddr_baddr); + ZXIC_COMM_TRACE_INFO("%s--[%d]:ddr_item_num is %d.\n", + __FUNCTION__, __LINE__, + ddr_item_num); + ZXIC_COMM_TRACE_INFO( + "%s--[%d]:ddr_tbl_wr_mode is %d.\n", + __FUNCTION__, __LINE__, ddr_tbl_wr_mode); + + rc = dpp_dtb_smmu1_flush(dev, queue_id, ddr_baddr, + ddr_tbl_wr_mode, + p_ddr_cfg->ddr_ecc_en, + ddr_item_num, &element_id); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_dtb_smmu1_flush"); + } + } + + /*清除hash引擎占用的整个zcam空间*/ + rc = dpp_dtb_zcam_space_clr(dev, p_se_cfg, queue_id, hash_id); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_dtb_specify_zcam_space_clr"); + ZXIC_COMM_PRINT("dpp_dtb_zcam_space_clr hash id: %d DONE!!!\n", + hash_id); + + /*软件删除表项*/ + rc = dpp_hash_soft_all_entry_delete(p_se_cfg, hash_id); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_hash_soft_all_entry_delete"); + ZXIC_COMM_PRINT("dpp_hash_soft_all_entry_delete hash id %d DONE!!!\n", + hash_id); + + return DPP_OK; +} + +/***********************************************************/ +/** DTB etcam 整个流表清空Flush +* @param devId NP设备号 +* @param queueId DTB队列编号 +* @param sdtNo 流表std号 +* @return +* @remark 无 +* @see +* @author cbb @date 2023/07/03 +************************************************************/ +DPP_STATUS dpp_dtb_etcam_table_flush(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no) +{ + // 按照std 号的深度进行etcam的内容的清理; + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 data_byte_size = 0; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 etcam_key_mode = 0; + ZXIC_UINT32 as_enable = 0; + ZXIC_UINT32 etcam_table_depth = 0; + ZXIC_UINT32 element_id = 0; + + DPP_SDTTBL_ETCAM_T sdt_etcam_info = { 0 }; /*SDT内容*/ + + ZXIC_UINT8 *data_buff = NULL; + ZXIC_UINT8 *mask_buff = NULL; + ZXIC_UINT32 *eram_buff = NULL; + DPP_DTB_ACL_ENTRY_INFO_T *p_entry_arr = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_etcam_info); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_soft_sdt_tbl_get"); + + etcam_key_mode = sdt_etcam_info.etcam_key_mode; + as_enable = sdt_etcam_info.as_en; + etcam_table_depth = sdt_etcam_info.etcam_table_depth; + + data_byte_size = DPP_ETCAM_ENTRY_SIZE_GET(etcam_key_mode); //80/40 + + //组装数据 + p_entry_arr = (DPP_DTB_ACL_ENTRY_INFO_T *)ZXIC_COMM_MALLOC( + etcam_table_depth * sizeof(DPP_DTB_ACL_ENTRY_INFO_T)); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_entry_arr); + ZXIC_COMM_MEMSET(p_entry_arr, 0, + etcam_table_depth * sizeof(DPP_DTB_ACL_ENTRY_INFO_T)); + + data_buff = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC(data_byte_size * + sizeof(ZXIC_UINT8)); + ZXIC_COMM_CHECK_DEV_POINT_MEMORY_FREE_NO_ASSERT(DEV_ID(dev), data_buff, + p_entry_arr); + ZXIC_COMM_MEMSET(data_buff, 0xFF, data_byte_size * sizeof(ZXIC_UINT8)); + + mask_buff = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC(data_byte_size * + sizeof(ZXIC_UINT8)); + ZXIC_COMM_CHECK_DEV_POINT_MEMORY_FREE2PTR_NO_ASSERT( + DEV_ID(dev), mask_buff, p_entry_arr, data_buff); + ZXIC_COMM_MEMSET(mask_buff, 0, data_byte_size * sizeof(ZXIC_UINT8)); + + if (as_enable) { + eram_buff = (ZXIC_UINT32 *)ZXIC_COMM_MALLOC( + 4 * sizeof(ZXIC_UINT32)); + ZXIC_COMM_CHECK_DEV_POINT_MEMORY_FREE3PTR_NO_ASSERT( + 0, eram_buff, mask_buff, p_entry_arr, data_buff); + ZXIC_COMM_MEMSET(eram_buff, 0, 4 * sizeof(ZXIC_UINT32)); + } + + for (index = 0; index < etcam_table_depth; index++) { + p_entry_arr[index].handle = index; + p_entry_arr[index].key_data = data_buff; + p_entry_arr[index].key_mask = mask_buff; + + if (as_enable) { + p_entry_arr[index].p_as_rslt = (ZXIC_UINT8 *)eram_buff; + } + } + + rc = dpp_dtb_acl_dma_insert(dev, queue_id, sdt_no, etcam_table_depth, + p_entry_arr, &element_id); + ZXIC_COMM_FREE(data_buff); + ZXIC_COMM_FREE(mask_buff); + if (eram_buff) { + ZXIC_COMM_FREE(eram_buff); + } + ZXIC_COMM_FREE(p_entry_arr); + ZXIC_COMM_CHECK_DEV_RC(0, rc, "dpp_dtb_acl_dma_insert"); + + return rc; +} + +#endif + +#if ZXIC_REAL("DTB DUMP INTERFACE") + +/***********************************************************/ +/** 根据队列号和sdt号分配元素id和dma地址 +* @param dev_id 设备id +* @param queue_id 队列id 0~127 +* @param sdt_no sdt号 0~255 +* @param element_id 出参,元素id +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/22 +************************************************************/ +DPP_STATUS dpp_dtb_dump_addr_set(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 *element_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dump_element_id = 0; + ZXIC_UINT64 phy_addr = 0; + ZXIC_UINT64 vir_addr = 0; + ZXIC_UINT32 size = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(queue_id, 0, DTB_QUEUE_MAX - 1); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + + /*获取dump目的地址*/ + rc = dpp_dtb_dump_sdt_addr_get(dev, queue_id, sdt_no, &phy_addr, + &vir_addr, &size); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_dump_sdt_addr_get"); + ZXIC_COMM_MEMSET((ZXIC_UINT8 *)vir_addr, 0, size); + + /*获取队列可用元素*/ + rc = dpp_dtb_tab_up_free_item_get(dev, queue_id, &dump_element_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_tab_up_free_item_get"); + ZXIC_COMM_TRACE_DEV_INFO(DEV_ID(dev), + "table up item queue_element_id is: %d.\n", + dump_element_id); + + /*在相应队列的元素上配置用户dma地址信息*/ + rc = dpp_dtb_tab_up_item_user_addr_set(dev, queue_id, dump_element_id, + phy_addr, vir_addr); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_tab_up_item_addr_set"); + + *element_id = dump_element_id; + + return DPP_OK; +} + +/***********************************************************/ +/** 将dump数据解析为hash条目格式 +* @param p_hash_entry_cfg hash配置参数 +* @param item_type 条目类型,见枚举SE_ITEM_TYPE +* @param pdata dump数据 +* @param dump_len dump有效长度 +* @param pOutData 出参,连续内存,hash条目信息DPP_HASH_ENTRY +* @param p_item_num 出参,解析出的hash有效条目数 +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/22 +************************************************************/ +DPP_STATUS dpp_dtb_dump_hash_parse(HASH_ENTRY_CFG *p_hash_entry_cfg, + ZXIC_UINT32 item_type, ZXIC_UINT8 *pdata, + ZXIC_UINT32 dump_len, ZXIC_UINT8 *pOutData, + ZXIC_UINT32 *p_item_num) +{ + ZXIC_UINT32 item_num = 0; + ZXIC_UINT32 data_offset = 0; + ZXIC_UINT32 index = 0; + ZXIC_UINT8 temp_key_valid = 0; + ZXIC_UINT8 temp_key_type = 0; + ZXIC_UINT8 temp_tbl_id = 0; + ZXIC_UINT32 srh_entry_size = 0; + ZXIC_UINT32 item_width = SE_ITEM_WIDTH_MAX; + ZXIC_UINT8 *p_temp_key = NULL; + ZXIC_UINT8 *p_hash_item = NULL; + DPP_HASH_ENTRY *p_dtb_hash_entry = NULL; + DPP_HASH_ENTRY *p_temp_entry = NULL; + + ZXIC_COMM_CHECK_POINT(p_hash_entry_cfg); + ZXIC_COMM_CHECK_POINT(pdata); + ZXIC_COMM_CHECK_POINT(pOutData); + ZXIC_COMM_CHECK_POINT(p_item_num); + + if (ITEM_DDR_256 == item_type) { + item_width = item_width / 2; + } + + p_dtb_hash_entry = (DPP_HASH_ENTRY *)pOutData; + srh_entry_size = DPP_GET_HASH_ENTRY_SIZE(p_hash_entry_cfg->key_type); + if (srh_entry_size == 0) { + ZXIC_COMM_TRACE_ERROR("key type %d srh entry size is zero\n", + p_hash_entry_cfg->key_type); + return DPP_ERR; + } + + for (index = 0; index < (dump_len / item_width); index++) { + data_offset = 0; + p_hash_item = pdata + index * item_width; + while (data_offset < item_width) { + p_temp_key = p_hash_item + data_offset; + temp_key_valid = DPP_GET_HASH_KEY_VALID(p_temp_key); + temp_key_type = DPP_GET_HASH_KEY_TYPE(p_temp_key); + temp_tbl_id = DPP_GET_HASH_TBL_ID(p_temp_key); + p_temp_entry = p_dtb_hash_entry + item_num; + ZXIC_COMM_CHECK_POINT(p_temp_entry); + ZXIC_COMM_CHECK_POINT(p_temp_entry->p_key); + ZXIC_COMM_CHECK_POINT(p_temp_entry->p_rst); + if (temp_key_valid && + (temp_key_type == p_hash_entry_cfg->key_type) && + (temp_tbl_id == p_hash_entry_cfg->table_id)) { + ZXIC_COMM_MEMCPY( + p_temp_entry->p_key, p_temp_key, + p_hash_entry_cfg->key_by_size + 1); + ZXIC_COMM_MEMCPY( + p_temp_entry->p_rst, + p_temp_key + 1 + + p_hash_entry_cfg->key_by_size, + p_hash_entry_cfg->rst_by_size); + item_num++; + } + + data_offset += srh_entry_size; + } + } + + *p_item_num = item_num; + return DPP_OK; +} + +/** dtb dump eram整个表项内容 输入128bit单位index,输出128bit为单位的数据 +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no eram表sdt表号 +* @param start_index 要dump的起始index 单位是128bit +* @param depth dump的深度以128bit为单位 +* @param p_data dump出数据缓存(大小128bit * depth) +* @param element_id 返回下表使用的元素id +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_sdt_eram_table_dump(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, + ZXIC_UINT32 start_index, + ZXIC_UINT32 depth, ZXIC_UINT32 *p_data, + ZXIC_UINT32 *element_id) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 eram_base_addr = 0; + ZXIC_UINT32 dump_addr_128bit = 0; + ZXIC_UINT32 dump_item_index = 0; + ZXIC_UINT32 dump_data_len = 0; + ZXIC_UINT32 dump_desc_len = 0; + + ZXIC_ADDR_T dump_sdt_phy_addr = 0; + ZXIC_ADDR_T dump_sdt_vir_addr = 0; + ZXIC_UINT32 dump_addr_size = 0; + + ZXIC_UINT32 dump_dst_phy_haddr = 0; + ZXIC_UINT32 dump_dst_phy_laddr = 0; + + ZXIC_UINT8 form_buff[DTB_TABLE_CMD_SIZE_BIT / 8] = { 0 }; + + /*获取sdt信息*/ + DPP_SDTTBL_ERAM_T sdt_eram = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, DTB_QUEUE_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), sdt_no, 0, + DPP_DEV_SDT_ID_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), element_id); + + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_eram); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_soft_sdt_tbl_get"); + + eram_base_addr = sdt_eram.eram_base_addr; + dump_addr_128bit = eram_base_addr + start_index; + + //获取dump目的地址 + rc = dpp_dtb_dump_sdt_addr_get(dev, queue_id, sdt_no, + &dump_sdt_phy_addr, &dump_sdt_vir_addr, + &dump_addr_size); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_dump_sdt_addr_get"); + + ZXIC_COMM_MEMSET((ZXIC_UINT8 *)dump_sdt_vir_addr, 0, dump_addr_size); + + rc = dpp_dtb_tab_up_free_item_get(dev, queue_id, &dump_item_index); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_tab_up_free_item_get"); + ZXIC_COMM_TRACE_DEV_DEBUG(DEV_ID(dev), + "dump queue id %d, element_id is: %d.\n", + queue_id, dump_item_index); + + *element_id = dump_item_index; //保存获取的item_index + + ZXIC_COMM_TRACE_INFO("eram dump eram_base_addr %x\n", eram_base_addr); + ZXIC_COMM_TRACE_INFO("eram dump start_index %x\n", start_index); + ZXIC_COMM_TRACE_INFO("eram dump queue %d,item_index: %d\n", queue_id, + dump_item_index); + + //在相应队列的元素上配置用户dma地址信息 + rc = dpp_dtb_tab_up_item_user_addr_set(dev, queue_id, dump_item_index, + dump_sdt_phy_addr, + dump_sdt_vir_addr); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_tab_up_item_addr_set"); + + /*获取地址*/ + rc = dpp_dtb_tab_up_item_addr_get(dev, queue_id, dump_item_index, + &dump_dst_phy_haddr, + &dump_dst_phy_laddr); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_tab_up_item_addr_get"); + + rc = dpp_dtb_smmu0_dump_info_write(dev, dump_addr_128bit, depth, + dump_dst_phy_haddr, + dump_dst_phy_laddr, + (ZXIC_UINT32 *)form_buff); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_smmu0_dump_info_write"); + + /*组装下表命令格式*/ + dump_data_len = depth * 128 / 32; + dump_desc_len = DTB_LEN_POS_SETP / 4; + + if (dump_data_len * 4 > dump_addr_size) { + ZXIC_COMM_TRACE_ERROR("eram dump size is too small!\n"); + return DPP_RC_DTB_DUMP_SIZE_SMALL; + } + + rc = dpp_dtb_write_dump_desc_info(dev, queue_id, dump_item_index, + (ZXIC_UINT32 *)form_buff, + dump_data_len, dump_desc_len, p_data); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_write_dump_desc_info"); + + return DPP_OK; +} + +/** dtb dump eram直接表表项内容 支持64bit/128bit +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no eram表sdt表号 +* @param start_index 要dump的起始index,单位是sdt_no该表的mode +* @param p_dump_data_arr 本次dump出的数据,数据格式与下表格式相同 +* @param entry_num 本次dump实际的条目数 +* @param next_start_index 下次dump是开始的index +* @param finish_flag 整个表dump完成标志,1表示完成,0表示未完成 +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_eram_table_dump(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, + DPP_DTB_DUMP_INDEX_T start_index, + DPP_DTB_ERAM_ENTRY_INFO_T *p_dump_data_arr, + ZXIC_UINT32 *entry_num, + DPP_DTB_DUMP_INDEX_T *next_start_index, + ZXIC_UINT32 *finish_flag) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 i = 0; + ZXIC_UINT32 dump_mode = 0; + ZXIC_UINT32 eram_table_depth = 0; + ZXIC_UINT32 start_index_128bit = 0; + ZXIC_UINT32 row_index = 0; + ZXIC_UINT32 col_index = 0; + ZXIC_UINT32 dump_depth_128bit = 0; + ZXIC_UINT32 dump_depth = 0; + ZXIC_UINT32 element_id = 0; + ZXIC_UINT8 *dump_data_buff = NULL; + ZXIC_UINT8 *temp_data = NULL; + ZXIC_UINT32 remain = 0; + ZXIC_UINT32 *buff = NULL; + + DPP_DTB_ERAM_ENTRY_INFO_T *p_dump_user_data = NULL; + DPP_SDTTBL_ERAM_T sdt_eram = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_eram); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_soft_sdt_tbl_get"); + + dump_mode = sdt_eram.eram_mode; //0:1bit;2:64bit;3:128, + eram_table_depth = sdt_eram.eram_table_depth; + + rc = dtb_eram_index_cal(dev, dump_mode, eram_table_depth, + &dump_depth_128bit, &col_index); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dtb_eram_index_cal"); + + rc = dtb_eram_index_cal(dev, dump_mode, start_index.index, + &start_index_128bit, &col_index); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dtb_eram_index_cal"); + + dump_depth = dump_depth_128bit - start_index_128bit; + + dump_data_buff = + (ZXIC_UINT8 *)ZXIC_COMM_VMALLOC(dump_depth * DTB_LEN_POS_SETP); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), dump_data_buff); + ZXIC_COMM_MEMSET(dump_data_buff, 0, dump_depth * DTB_LEN_POS_SETP); + + rc = dpp_dtb_sdt_eram_table_dump(dev, queue_id, sdt_no, + start_index_128bit, dump_depth, + (ZXIC_UINT32 *)dump_data_buff, + &element_id); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE_NO_ASSERT( + rc, "dpp_dtb_sdt_eram_table_dump", dump_data_buff); + ZXIC_COMM_TRACE_INFO( + " dpp_dtb_sdt_eram_table_dump done queue %d element %d.\n", + queue_id, element_id); + + if (dump_mode == ERAM128_TBL_128b) { + for (i = 0; i < dump_depth; i++) { + p_dump_user_data = p_dump_data_arr + i; + temp_data = dump_data_buff + i * DTB_LEN_POS_SETP; + if ((p_dump_user_data == NULL) || + (p_dump_user_data->p_data == NULL)) { + ZXIC_COMM_TRACE_ERROR( + "eram index 0x%x data user buff is NULL!\n", + start_index.index + i); + ZXIC_COMM_VFREE(dump_data_buff); + return DPP_ERR; + } + + p_dump_user_data->index = start_index.index + i; + ZXIC_COMM_MEMCPY(p_dump_user_data->p_data, temp_data, + (128 / 8)); + } + } else if (dump_mode == ERAM128_TBL_64b) { + remain = start_index.index % 2; + for (i = 0; i < eram_table_depth - start_index.index; i++) { + rc = dtb_eram_index_cal(dev, dump_mode, remain, + &row_index, &col_index); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dtb_eram_index_cal", dump_data_buff); + temp_data = + dump_data_buff + row_index * DTB_LEN_POS_SETP; + + buff = (ZXIC_UINT32 *)temp_data; + p_dump_user_data = p_dump_data_arr + i; + if (p_dump_user_data->p_data == NULL) { + ZXIC_COMM_TRACE_ERROR( + "eram index 0x%x data point is NULL!\n", + start_index.index + i); + ZXIC_COMM_VFREE(dump_data_buff); + return DPP_ERR; + } + + p_dump_user_data->index = start_index.index + i; + ZXIC_COMM_MEMCPY(p_dump_user_data->p_data, + buff + ((1 - col_index) << 1), + (64 / 8)); + + remain++; + } + } + + *entry_num = eram_table_depth - start_index.index; + *finish_flag = 1; + ZXIC_COMM_TRACE_INFO( + " eram table dump entry num %d, finish flag %d\n\n", *entry_num, + *finish_flag); + + ZXIC_COMM_VFREE(dump_data_buff); + + return DPP_OK; +} + +/***********************************************************/ +/** dump eram表内容 +* @param dev 设备 +* @param queue_id 队列id 0~127 +* @param sdt_no sdt号 0~255 +* @param pDumpData 出参,dump数据,内存由用户分配,结构体DPP_HASH_ENTRY +* @param entryNum 出参,dump出的有效hash条目 +* @return +* @remark 无 +* @see +* @author cq @date 2025/04/03 +************************************************************/ +DPP_STATUS dpp_dtb_eram_dump(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT8 *pDumpData, + ZXIC_UINT32 *p_entry_num) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT64 dma_phy_addr = 0; + ZXIC_UINT64 dma_vir_addr = 0; + ZXIC_UINT32 dma_size = DTB_SDT_DUMP_SIZE / 2; + ZXIC_MUTEX_T *p_mutex = NULL; + DPP_DTB_DUMP_INDEX_T start_index = { 0 }; + DPP_DTB_DUMP_INDEX_T next_start_index = { 0 }; + ZXIC_UINT32 finish_flag = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(pDumpData); + ZXIC_COMM_CHECK_POINT(p_entry_num); + + rc = dpp_dev_dtb_opr_mutex_get(dev, DPP_DEV_MUTEX_T_DTB_RB, queue_id, + &p_mutex); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_dtb_opr_mutex_get"); + rc = zxic_comm_mutex_lock(p_mutex); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_mutex_lock"); + + rc = dpp_dev_dump_dma_mem_get(dev, &dma_size, &dma_phy_addr, + &dma_vir_addr); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dev_dump_dma_mem_get", p_mutex); + + rc = dpp_dtb_dump_sdt_addr_set(dev, queue_id, sdt_no, dma_phy_addr, + dma_vir_addr, dma_size); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_dump_sdt_addr_set", p_mutex); + + start_index.index = 0; + rc = dpp_dtb_eram_table_dump(dev, queue_id, sdt_no, start_index, + (DPP_DTB_ERAM_ENTRY_INFO_T *)pDumpData, + p_entry_num, &next_start_index, + &finish_flag); + rc |= dpp_dtb_dump_sdt_addr_clear(dev, queue_id, sdt_no); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_eram_table_dump", p_mutex); + + rc = zxic_comm_mutex_unlock(p_mutex); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_mutex_unlock"); + + return DPP_OK; +} + +/***********************************************************/ +/** dump指定hash表占用的zcam数据 +* @param dev_id 设备id +* @param queue_id 队列id 0~127 +* @param sdt_no sdt号 0~255 +* @param fun_id 需dump的zblock个数 +* @param bulk_id zblock索引 +* @param p_data 出参,dump数据 +* @param p_dump_len 出参,dump长度 +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/22 +************************************************************/ +DPP_STATUS +dpp_dtb_sdt_hash_zcam_mono_space_dump(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 fun_id, + ZXIC_UINT32 bulk_id, ZXIC_UINT8 *p_data, + ZXIC_UINT32 *p_dump_len) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 i = 0; + ZXIC_UINT32 zblock_id = 0; + ZXIC_UINT32 zcell_id = 0; + ZXIC_UINT32 start_addr = 0; + ZXIC_UINT32 dtb_desc_len = 0; + ZXIC_UINT32 dump_pa_h = 0; + ZXIC_UINT32 dump_pa_l = 0; + ZXIC_UINT32 dma_addr_offset = 0; + ZXIC_UINT32 desc_addr_offset = 0; + ZXIC_UINT32 element_id = 0; + ZXIC_UINT8 *p_dump_desc_buf = NULL; + + DPP_SE_CFG *p_se_cfg = NULL; + D_NODE *p_zblk_dn = NULL; + SE_ZBLK_CFG *p_zblk = NULL; + SE_ZREG_CFG *p_zreg = NULL; + SE_ZCELL_CFG *p_zcell = NULL; + DPP_HASH_CFG *p_hash_cfg = NULL; + FUNC_ID_INFO *p_func_info = NULL; + + DPP_DTB_ENTRY_T dtb_dump_entry = { 0 }; + ZXIC_UINT8 cmd_buff[DTB_TABLE_CMD_SIZE_BIT / 8] = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), fun_id, 0, HASH_FUNC_ID_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), bulk_id, 0, HASH_BULK_NUM - 1); + ZXIC_COMM_CHECK_INDEX(queue_id, 0, DTB_QUEUE_MAX - 1); + ZXIC_COMM_CHECK_POINT(p_data); + ZXIC_COMM_CHECK_POINT(p_dump_len); + + rc = dpp_dtb_dump_addr_set(dev, queue_id, sdt_no, &element_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_dump_addr_set"); + + p_dump_desc_buf = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC( + DPP_DTB_TABLE_DUMP_INFO_BUFF_SIZE * sizeof(ZXIC_UINT8)); + ZXIC_COMM_CHECK_POINT(p_dump_desc_buf); + ZXIC_COMM_MEMSET(p_dump_desc_buf, 0, + DPP_DTB_TABLE_DUMP_INFO_BUFF_SIZE * + sizeof(ZXIC_UINT8)); + + dtb_dump_entry.cmd = cmd_buff; + + rc = dpp_se_cfg_get(dev, &p_se_cfg); + ZXIC_COMM_CHECK_DEV_RC_MEMORY_FREE_NO_ASSERT( + DEV_ID(dev), rc, "dpp_se_cfg_get", p_dump_desc_buf); + ZXIC_COMM_CHECK_DEV_POINT_MEMORY_FREE_NO_ASSERT(DEV_ID(dev), p_se_cfg, + p_dump_desc_buf); + + p_func_info = DPP_GET_FUN_INFO(p_se_cfg, fun_id); + DPP_SE_CHECK_FUN_MEMORY_FREE(p_func_info, fun_id, FUN_HASH, + p_dump_desc_buf); + p_hash_cfg = (DPP_HASH_CFG *)p_func_info->fun_ptr; + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_hash_cfg); + p_zblk_dn = p_hash_cfg->hash_shareram.zblk_list.p_next; + + while (p_zblk_dn) { + p_zblk = (SE_ZBLK_CFG *)p_zblk_dn->data; + zblock_id = p_zblk->zblk_idx; + + //mono zcell dump + for (i = 0; i < SE_ZCELL_NUM; i++) { + p_zcell = &(p_zblk->zcell_info[i]); + + if ((p_zcell->flag & DPP_ZCELL_FLAG_IS_MONO) && + (p_zcell->bulk_id == bulk_id)) { + zcell_id = p_zcell->zcell_idx; + + start_addr = ZBLK_ITEM_ADDR_CALC(zcell_id, 0); + + /*获取dma上送指定条目的物理地址*/ + rc = dpp_dtb_tab_up_item_offset_addr_get( + dev, queue_id, element_id, + dma_addr_offset, &dump_pa_h, + &dump_pa_l); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, + "dpp_dtb_tab_up_item_offset_addr_get", + p_dump_desc_buf); + + rc = dpp_dtb_zcam_dump_entry( + dev, start_addr, DTB_DUMP_ZCAM_512b, + SE_RAM_DEPTH, dump_pa_h, dump_pa_l, + &dtb_dump_entry); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_zcam_dump_entry", + p_dump_desc_buf); + + rc = dpp_dtb_data_write(p_dump_desc_buf, + desc_addr_offset, + &dtb_dump_entry); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_zcam_dump_entry", + p_dump_desc_buf); + + dtb_desc_len++; + dma_addr_offset += SE_RAM_DEPTH * 512 / 8; + desc_addr_offset += DTB_LEN_POS_SETP; + + ZXIC_COMM_TRACE_DEV_INFO( + DEV_ID(dev), + "the Zblock[%d]'s bulk_id:%d Mono Zcell_id :%d \n", + zblock_id, bulk_id, zcell_id); + } + } + + //mono zreg dump + for (i = 0; i < SE_ZREG_NUM; i++) { + p_zreg = &(p_zblk->zreg_info[i]); + + if ((p_zreg->flag & DPP_ZREG_FLAG_IS_MONO) && + (p_zreg->bulk_id == bulk_id)) { + start_addr = ZBLK_HASH_LIST_REG_ADDR_CALC( + zblock_id, i); + + rc = dpp_dtb_tab_up_item_offset_addr_get( + dev, queue_id, element_id, + dma_addr_offset, &dump_pa_h, + &dump_pa_l); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, + "dpp_dtb_tab_up_item_offset_addr_get", + p_dump_desc_buf); + + rc = dpp_dtb_zcam_dump_entry( + dev, start_addr, DTB_DUMP_ZCAM_512b, 1, + dump_pa_h, dump_pa_l, &dtb_dump_entry); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_zcam_dump_entry", + p_dump_desc_buf); + + rc = dpp_dtb_data_write(p_dump_desc_buf, + desc_addr_offset, + &dtb_dump_entry); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_zcam_dump_entry", + p_dump_desc_buf); + + dtb_desc_len++; + dma_addr_offset += 512 / 8; + desc_addr_offset += DTB_LEN_POS_SETP; + + ZXIC_COMM_TRACE_DEV_INFO( + DEV_ID(dev), + "the Zblock[%d]'s bulk_id:%d Mono Zreg_id :%d \n", + zblock_id, p_zreg->bulk_id, i); + } + } + + p_zblk_dn = p_zblk_dn->next; + } + + rc = dpp_dtb_write_dump_desc_info(dev, queue_id, element_id, + (ZXIC_UINT32 *)p_dump_desc_buf, + dma_addr_offset / 4, dtb_desc_len * 4, + (ZXIC_UINT32 *)p_data); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_NO_ASSERT( + rc, "dpp_dtb_write_dump_desc_info", p_dump_desc_buf); + ZXIC_COMM_TRACE_INFO( + " dpp_dtb_hash_table_zcam_dump done queue %d element %d.\n", + queue_id, element_id); + + zxic_comm_swap(p_data, dma_addr_offset); + ZXIC_COMM_FREE(p_dump_desc_buf); + + *p_dump_len = dma_addr_offset; + + return DPP_OK; +} + +/***********************************************************/ +/** 只dump hash表的zcam内容 +* @param dev_id 设备id +* @param queue_id 队列id 0~127 +* @param sdt_no sdt号 0~255 +* @param pDumpData 出参,dump数据,内存由用户分配,结构体DPP_HASH_ENTRY +* @param entryNum 出参,dump出的有效hash条目 +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/22 +************************************************************/ +DPP_STATUS dpp_dtb_hash_table_only_zcam_dump(DPP_DEV_T *dev, + ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, + ZXIC_UINT8 *pDumpData, + ZXIC_UINT32 *entryNum) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT8 *p_data = NULL; + ZXIC_UINT32 data_len = 0; + ZXIC_UINT32 entry_num = 0; + ZXIC_UINT32 bulk_id = 0; + DPP_SE_CFG *p_se_cfg = NULL; + FUNC_ID_INFO *p_func_info = NULL; + DPP_HASH_CFG *p_hash_cfg = NULL; + DPP_SDTTBL_HASH_T sdt_hash = { 0 }; + HASH_ENTRY_CFG hash_entry_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(queue_id, 0, DTB_QUEUE_MAX - 1); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + ZXIC_COMM_CHECK_POINT(pDumpData); + ZXIC_COMM_CHECK_POINT(entryNum); + + ZXIC_COMM_TRACE_INFO("dump hash sdt no: %d\n", sdt_no); + + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_hash); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_soft_sdt_tbl_get"); + + /* 取出se配置 */ + rc = dpp_se_cfg_get(dev, &p_se_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_se_cfg_get"); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_se_cfg); + p_func_info = DPP_GET_FUN_INFO(p_se_cfg, sdt_hash.hash_id); + DPP_SE_CHECK_FUN(p_func_info, sdt_hash.hash_id, FUN_HASH); + p_hash_cfg = (DPP_HASH_CFG *)p_func_info->fun_ptr; + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_hash_cfg); + + ZXIC_COMM_MEMSET(&hash_entry_cfg, 0x0, sizeof(HASH_ENTRY_CFG)); + hash_entry_cfg.key_by_size = sdt_hash.key_size; + hash_entry_cfg.key_type = sdt_hash.hash_table_width; + hash_entry_cfg.rst_by_size = 1 << (sdt_hash.rsp_mode + 2); + hash_entry_cfg.table_id = sdt_hash.hash_table_id; + bulk_id = ((hash_entry_cfg.table_id >> 2) & 0x7); + ZXIC_COMM_CHECK_INDEX_UPPER(bulk_id, HASH_BULK_NUM - 1); + + //申请data空间 + p_data = (ZXIC_UINT8 *)ZXIC_COMM_VMALLOC(DTB_DMUP_DATA_MAX); + ZXIC_COMM_CHECK_POINT(p_data); + ZXIC_COMM_MEMSET_S(p_data, DTB_DMUP_DATA_MAX, 0, DTB_DMUP_DATA_MAX); + + rc = dpp_dtb_sdt_hash_zcam_mono_space_dump(dev, queue_id, sdt_no, + sdt_hash.hash_id, bulk_id, + p_data, &data_len); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE_NO_ASSERT( + rc, "dpp_dtb_sdt_hash_zcam_mono_space_dump", p_data); + + rc = dpp_dtb_dump_hash_parse(&hash_entry_cfg, ITEM_RAM, p_data, + data_len, pDumpData, &entry_num); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE_NO_ASSERT(rc, "dpp_dtb_dump_hash_parse", + p_data); + + *entryNum = entry_num; + + ZXIC_COMM_TRACE_INFO("hash table dump entry num %d end.\n\n", + *entryNum); + + ZXIC_COMM_VFREE(p_data); + + return rc; +} + +/***********************************************************/ +/** dump hash表内容 +* @param dev 设备 +* @param queue_id 队列id 0~127 +* @param sdt_no sdt号 0~255 +* @param pDumpData 出参,dump数据,内存由用户分配,结构体DPP_HASH_ENTRY +* @param entryNum 出参,dump出的有效hash条目 +* @return +* @remark 无 +* @see +* @author cq @date 2025/04/03 +************************************************************/ +DPP_STATUS dpp_dtb_hash_dump(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT8 *pDumpData, + ZXIC_UINT32 *p_entry_num) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT64 dma_phy_addr = 0; + ZXIC_UINT64 dma_vir_addr = 0; + ZXIC_UINT32 dma_size = DTB_SDT_DUMP_SIZE; + ZXIC_MUTEX_T *p_mutex = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(pDumpData); + ZXIC_COMM_CHECK_POINT(p_entry_num); + + rc = dpp_dev_dtb_opr_mutex_get(dev, DPP_DEV_MUTEX_T_DTB_RB, queue_id, + &p_mutex); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_dtb_opr_mutex_get"); + rc = zxic_comm_mutex_lock(p_mutex); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_mutex_lock"); + + rc = dpp_dev_dump_dma_mem_get(dev, &dma_size, &dma_phy_addr, + &dma_vir_addr); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dev_dump_dma_mem_get", p_mutex); + + rc = dpp_dtb_dump_sdt_addr_set(dev, queue_id, sdt_no, dma_phy_addr, + dma_vir_addr, dma_size); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_dump_sdt_addr_set", p_mutex); + + rc = dpp_dtb_hash_table_only_zcam_dump(dev, queue_id, sdt_no, pDumpData, + p_entry_num); + rc |= dpp_dtb_dump_sdt_addr_clear(dev, queue_id, sdt_no); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_hash_table_only_zcam_dump", + p_mutex); + + rc = zxic_comm_mutex_unlock(p_mutex); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_mutex_unlock"); + + return DPP_OK; +} +/** dtb dump etcam直接表表项内容 级联eram支持64bit/128bit +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no acl表sdt表号 +* @param start_index 要dump的起始index,单位是sdt_no该表的mode +* @param p_dump_data_arr 本次dump出的数据,数据格式与下表格式相同 +* @param entry_num 本次dump实际的条目数 +* @param next_start_index 下次dump是开始的index +* @param finish_flag 整个表dump完成标志,1表示完成,0表示未完成 +* @return +* @remark 无 +* @see +* @author cbb @date 2022/08/30 +************************************************************/ +DPP_STATUS dpp_dtb_acl_table_dump(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, + DPP_DTB_DUMP_INDEX_T start_index, + DPP_DTB_ACL_ENTRY_INFO_T *p_dump_data_arr, + ZXIC_UINT32 *entry_num, + DPP_DTB_DUMP_INDEX_T *next_start_index, + ZXIC_UINT32 *finish_flag) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 i = 0; + ZXIC_UINT32 handle = 0; + + ZXIC_UINT32 dump_element_id = 0; + + ZXIC_UINT8 *temp_dump_out_data = NULL; + ZXIC_UINT8 *dump_info_buff = NULL; + ZXIC_UINT8 *p_data_start = NULL; + ZXIC_UINT8 *p_data_640bit = NULL; + ZXIC_UINT8 *p_mask_start = NULL; + ZXIC_UINT8 *p_mask_640bit = NULL; + ZXIC_UINT8 *p_rst_start = NULL; + ZXIC_UINT8 *p_rst_128bit = NULL; + ZXIC_UINT32 *eram_buff = NULL; + + ZXIC_UINT32 addr_640bit = 0; + ZXIC_UINT32 rd_mask = 0; + ZXIC_UINT32 dump_eram_depth_128bit = 0; + ZXIC_UINT32 eram_row_index = 0; + ZXIC_UINT32 eram_col_index = 0; + + ZXIC_UINT8 cmd_buff[DTB_TABLE_CMD_SIZE_BIT / 8] = { 0 }; + ZXIC_UINT8 xy_data[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; + ZXIC_UINT8 xy_mask[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; + ZXIC_UINT8 dm_data[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; + ZXIC_UINT8 dm_mask[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; + DPP_ETCAM_ENTRY_T entry_xy = { 0 }; + DPP_ETCAM_ENTRY_T entry_dm = { 0 }; + DPP_DTB_ACL_ENTRY_INFO_T *p_dump_user_data = NULL; + + ZXIC_UINT32 block_num = 0; + ZXIC_UINT32 etcam_key_mode = 0; + ZXIC_UINT32 etcam_table_id = 0; + ZXIC_UINT32 as_enable = 0; + ZXIC_UINT32 as_eram_baddr = 0; + ZXIC_UINT32 etcam_as_mode = 0; + ZXIC_UINT32 etcam_table_depth = 0; + ZXIC_UINT32 block_idx = 0; + + ZXIC_UINT32 etcam_data_dst_phy_haddr = 0; + ZXIC_UINT32 etcam_data_dst_phy_laddr = 0; + ZXIC_UINT32 etcam_mask_dst_phy_haddr = 0; + ZXIC_UINT32 etcam_mask_dst_phy_laddr = 0; + ZXIC_UINT32 as_rst_dst_phy_haddr = 0; + ZXIC_UINT32 as_rst_dst_phy_laddr = 0; + + ZXIC_UINT32 dtb_desc_addr_offset = 0; + ZXIC_UINT32 dump_data_len = 0; + ZXIC_UINT32 dtb_desc_len = 0; + + ZXIC_UINT32 etcam_data_len_offset = 0; + ZXIC_UINT32 etcam_mask_len_offset = 0; + + DPP_ACL_CFG_EX_T *p_acl_cfg = NULL; + DPP_ACL_TBL_CFG_T *p_tbl_cfg = NULL; + + DPP_SDTTBL_ETCAM_T sdt_etcam_info = { 0 }; /*SDT内容*/ + ETCAM_DUMP_INFO_T etcam_dump_info = { 0 }; + DPP_DTB_ENTRY_T dtb_dump_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + + dtb_dump_entry.cmd = cmd_buff; + entry_xy.p_data = xy_data; + entry_xy.p_mask = xy_mask; + entry_dm.p_data = dm_data; + entry_dm.p_mask = dm_mask; + + //从sdt_no中获取SDT配置 + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_etcam_info); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_soft_sdt_tbl_get"); + etcam_key_mode = sdt_etcam_info.etcam_key_mode; + etcam_as_mode = sdt_etcam_info.as_rsp_mode; + etcam_table_id = sdt_etcam_info.etcam_table_id; + as_enable = sdt_etcam_info.as_en; + as_eram_baddr = sdt_etcam_info.as_eram_baddr; + etcam_table_depth = sdt_etcam_info.etcam_table_depth; + + rc = dpp_acl_cfg_get(dev, &p_acl_cfg); //获取ACL表资源配置 + ZXIC_COMM_CHECK_RC(rc, "dpp_acl_cfg_get"); + ZXIC_COMM_CHECK_POINT(p_acl_cfg); + + p_tbl_cfg = + p_acl_cfg->acl_tbls + etcam_table_id; //得到该acl表的资源配置 + + if (!p_tbl_cfg->is_used) { + ZXIC_COMM_TRACE_ERROR("table[ %d ] is not init!\n", + etcam_table_id); + ZXIC_COMM_ASSERT(0); + return DPP_ACL_RC_TBL_NOT_INIT; + } + + block_num = p_tbl_cfg->block_num; + + rc = dpp_dtb_dump_addr_set(dev, queue_id, sdt_no, &dump_element_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_dump_addr_set"); + + ZXIC_COMM_TRACE_INFO("etcam_key_mode: 0x%x\n", etcam_key_mode); + ZXIC_COMM_TRACE_INFO("etcam_table_id: 0x%x\n", etcam_table_id); + ZXIC_COMM_TRACE_INFO("as_enable: 0x%x\n", as_enable); + ZXIC_COMM_TRACE_INFO("as_eram_baddr: 0x%x\n", as_eram_baddr); + ZXIC_COMM_TRACE_INFO("etcam_as_mode: 0x%x\n", etcam_as_mode); + ZXIC_COMM_TRACE_INFO("block_num: 0x%x\n", block_num); + ZXIC_COMM_TRACE_INFO("dump_element_id: 0x%x\n", dump_element_id); + + dump_info_buff = (ZXIC_UINT8 *)ZXIC_COMM_VMALLOC( + DPP_DTB_TABLE_DUMP_INFO_BUFF_SIZE * sizeof(ZXIC_UINT8)); + ZXIC_COMM_CHECK_POINT_NO_ASSERT(dump_info_buff); + ZXIC_COMM_MEMSET(dump_info_buff, 0, + DPP_DTB_TABLE_DUMP_INFO_BUFF_SIZE * + sizeof(ZXIC_UINT8)); + + //etcam data dump描述符,以block为单位,深度是512 起始地址是0 rd_mode = 0xff + for (i = 0; i < block_num; i++) { + block_idx = p_tbl_cfg->block_array[i]; + + ZXIC_COMM_TRACE_INFO("block_idx: %d\n", block_idx); + + etcam_dump_info.block_sel = block_idx; + etcam_dump_info.addr = 0; + etcam_dump_info.tb_width = 3; + etcam_dump_info.rd_mode = 0xFF; + etcam_dump_info.tb_depth = DPP_ETCAM_RAM_DEPTH; + etcam_dump_info.data_or_mask = DPP_ETCAM_DTYPE_DATA; + + rc = dpp_dtb_tab_up_item_offset_addr_get( + dev, queue_id, dump_element_id, dump_data_len, + &etcam_data_dst_phy_haddr, &etcam_data_dst_phy_laddr); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE_NO_ASSERT( + rc, "dpp_dtb_tab_up_item_offset_addr_get", + dump_info_buff); + rc = dpp_dtb_etcam_dump_entry(dev, &etcam_dump_info, + etcam_data_dst_phy_haddr, + etcam_data_dst_phy_laddr, + &dtb_dump_entry); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE_NO_ASSERT( + rc, "dpp_dtb_etcam_dump_entry", dump_info_buff); + rc = dpp_dtb_data_write(dump_info_buff, dtb_desc_addr_offset, + &dtb_dump_entry); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE_NO_ASSERT( + rc, "dpp_dtb_data_write", dump_info_buff); + ZXIC_COMM_MEMSET(cmd_buff, 0, DTB_TABLE_CMD_SIZE_BIT / 8); + + dtb_desc_len += 1; + dtb_desc_addr_offset += DTB_LEN_POS_SETP; + dump_data_len += DPP_ETCAM_RAM_DEPTH * 640 / 8; + } + + etcam_data_len_offset = dump_data_len; + // etcam mask dump描述符,以block为单位,深度是512 起始地址是0 rd_mode = 0xff + for (i = 0; i < block_num; i++) { + block_idx = p_tbl_cfg->block_array[i]; + + ZXIC_COMM_TRACE_INFO("mask: block_idx: %d\n", block_idx); + + etcam_dump_info.block_sel = block_idx; + etcam_dump_info.addr = 0; + etcam_dump_info.tb_width = 3; + etcam_dump_info.rd_mode = 0xFF; + etcam_dump_info.tb_depth = DPP_ETCAM_RAM_DEPTH; + etcam_dump_info.data_or_mask = DPP_ETCAM_DTYPE_MASK; + + rc = dpp_dtb_tab_up_item_offset_addr_get( + dev, queue_id, dump_element_id, dump_data_len, + &etcam_mask_dst_phy_haddr, &etcam_mask_dst_phy_laddr); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE_NO_ASSERT( + rc, "dpp_dtb_tab_up_item_offset_addr_get", + dump_info_buff); + rc = dpp_dtb_etcam_dump_entry(dev, &etcam_dump_info, + etcam_mask_dst_phy_haddr, + etcam_mask_dst_phy_laddr, + &dtb_dump_entry); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE_NO_ASSERT( + rc, "dpp_dtb_etcam_dump_entry", dump_info_buff); + rc = dpp_dtb_data_write(dump_info_buff, dtb_desc_addr_offset, + &dtb_dump_entry); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE_NO_ASSERT( + rc, "dpp_dtb_data_write", dump_info_buff); + ZXIC_COMM_MEMSET(cmd_buff, 0, DTB_TABLE_CMD_SIZE_BIT / 8); + + dtb_desc_len += 1; + dtb_desc_addr_offset += DTB_LEN_POS_SETP; + dump_data_len += DPP_ETCAM_RAM_DEPTH * 640 / 8; + } + etcam_mask_len_offset = dump_data_len; + + //补充级联描述符 + if (as_enable) { + rc = dtb_eram_index_cal(dev, etcam_as_mode, etcam_table_depth, + &dump_eram_depth_128bit, + &eram_col_index); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE_NO_ASSERT( + rc, "dtb_eram_index_cal", dump_info_buff); + + rc = dpp_dtb_tab_up_item_offset_addr_get( + dev, queue_id, dump_element_id, dump_data_len, + &as_rst_dst_phy_haddr, &as_rst_dst_phy_laddr); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE_NO_ASSERT( + rc, "dpp_dtb_tab_up_item_offset_addr_get", + dump_info_buff); + + rc = dpp_dtb_smmu0_dump_entry(dev, as_eram_baddr, + dump_eram_depth_128bit, + as_rst_dst_phy_haddr, + as_rst_dst_phy_laddr, + &dtb_dump_entry); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE_NO_ASSERT( + rc, "dpp_dtb_smmu0_dump_entry", dump_info_buff); + rc = dpp_dtb_data_write(dump_info_buff, dtb_desc_addr_offset, + &dtb_dump_entry); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE_NO_ASSERT( + rc, "dpp_dtb_data_write", dump_info_buff); + ZXIC_COMM_MEMSET(cmd_buff, 0, DTB_TABLE_CMD_SIZE_BIT / 8); + dtb_desc_len += 1; + dtb_desc_addr_offset += DTB_LEN_POS_SETP; + dump_data_len += dump_eram_depth_128bit * 128 / 8; + } + + ZXIC_COMM_TRACE_INFO("dtb_desc_len: 0x%x\n", dtb_desc_len); + ZXIC_COMM_TRACE_INFO("dtb_desc_addr_offset: 0x%x\n", + dtb_desc_addr_offset); + ZXIC_COMM_TRACE_INFO("dump_data_len: 0x%x\n", dump_data_len); + + //申请data空间 + temp_dump_out_data = (ZXIC_UINT8 *)ZXIC_COMM_VMALLOC( + dump_data_len * sizeof(ZXIC_UINT8)); + ZXIC_COMM_CHECK_POINT_MEMORY_VFREE_NO_ASSERT(temp_dump_out_data, + dump_info_buff); + ZXIC_COMM_MEMSET(temp_dump_out_data, 0, + dump_data_len * sizeof(ZXIC_UINT8)); + + /*下发dump描述符*/ + rc = dpp_dtb_write_dump_desc_info(dev, queue_id, dump_element_id, + (ZXIC_UINT32 *)dump_info_buff, + dump_data_len / 4, dtb_desc_len * 4, + (ZXIC_UINT32 *)temp_dump_out_data); + ZXIC_COMM_VFREE(dump_info_buff); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE_NO_ASSERT( + rc, "dpp_dtb_write_dump_desc_info", temp_dump_out_data); + + //解析数据 + p_data_start = temp_dump_out_data; + p_mask_start = temp_dump_out_data + etcam_data_len_offset; + if (as_enable) { + p_rst_start = temp_dump_out_data + etcam_mask_len_offset; + } + + for (handle = 0; handle < etcam_table_depth; handle++) { + p_dump_user_data = p_dump_data_arr + handle; + + if ((p_dump_user_data == NULL) || + (p_dump_user_data->key_data == NULL) || + (p_dump_user_data->key_mask == NULL)) { + ZXIC_COMM_TRACE_ERROR( + "etcam handle 0x%x data user buff is NULL!\n", + handle); + ZXIC_COMM_VFREE(temp_dump_out_data); + return DPP_ERR; + } + + if (as_enable) { + if (p_dump_user_data->p_as_rslt == NULL) { + ZXIC_COMM_TRACE_ERROR( + "etcam handle 0x%x as data buff is NULL!\n", + handle); + ZXIC_COMM_VFREE(temp_dump_out_data); + return DPP_ERR; + } + } + + p_dump_user_data->handle = handle; + + addr_640bit = handle / (1U << etcam_key_mode); + rd_mask = (((1U << (8U >> etcam_key_mode)) - 1) + << ((8U >> etcam_key_mode) * + (handle % (1U << etcam_key_mode)))) & + 0xFF; + + p_data_640bit = p_data_start + addr_640bit * 640 / 8; + p_mask_640bit = p_mask_start + addr_640bit * 640 / 8; + + dpp_dtb_etcam_ind_data_get(dev, p_data_640bit, rd_mask, + entry_xy.p_data); + dpp_dtb_etcam_ind_data_get(dev, p_mask_640bit, rd_mask, + entry_xy.p_mask); + + rc = dpp_etcam_xy_to_dm( + &entry_dm, &entry_xy, + DPP_ETCAM_ENTRY_SIZE_GET(etcam_key_mode)); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE_NO_ASSERT( + rc, "dpp_etcam_xy_to_dm", temp_dump_out_data); + + ZXIC_COMM_MEMCPY(p_dump_user_data->key_data, entry_dm.p_data, + DPP_ETCAM_ENTRY_SIZE_GET(etcam_key_mode)); + ZXIC_COMM_MEMCPY(p_dump_user_data->key_mask, entry_dm.p_mask, + DPP_ETCAM_ENTRY_SIZE_GET(etcam_key_mode)); + + if (as_enable) { + rc = dtb_eram_index_cal(dev, etcam_as_mode, handle, + &eram_row_index, + &eram_col_index); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE_NO_ASSERT( + rc, "dtb_eram_index_cal", temp_dump_out_data); + p_rst_128bit = + p_rst_start + eram_row_index * DTB_LEN_POS_SETP; + + eram_buff = (ZXIC_UINT32 *)p_rst_128bit; + + if (etcam_as_mode == ERAM128_TBL_128b) { + ZXIC_COMM_MEMCPY(p_dump_user_data->p_as_rslt, + eram_buff, (128 / 8)); + } else if (etcam_as_mode == ERAM128_TBL_64b) { + ZXIC_COMM_MEMCPY( + p_dump_user_data->p_as_rslt, + eram_buff + ((1 - eram_col_index) << 1), + (64 / 8)); + } + } + } + + *entry_num = etcam_table_depth; + *finish_flag = 1; + ZXIC_COMM_TRACE_INFO( + " etcam table dump entry num %d, finish flag %d\n\n", + *entry_num, *finish_flag); + + ZXIC_COMM_VFREE(temp_dump_out_data); + + return DPP_OK; +} + +/***********************************************************/ +/** dump eram表内容 +* @param dev 设备 +* @param queue_id 队列id 0~127 +* @param sdt_no sdt号 0~255 +* @param pDumpData 出参,dump数据,内存由用户分配,结构体DPP_HASH_ENTRY +* @param entryNum 出参,dump出的有效hash条目 +* @return +* @remark 无 +* @see +* @author cq @date 2025/04/03 +************************************************************/ +DPP_STATUS dpp_dtb_acl_dump(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT8 *pDumpData, + ZXIC_UINT32 *p_entry_num) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT64 dma_phy_addr = 0; + ZXIC_UINT64 dma_vir_addr = 0; + ZXIC_UINT32 dma_size = DTB_SDT_DUMP_SIZE; + ZXIC_MUTEX_T *p_mutex = NULL; + DPP_DTB_DUMP_INDEX_T start_index = { 0 }; + DPP_DTB_DUMP_INDEX_T next_start_index = { 0 }; + ZXIC_UINT32 finish_flag = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(pDumpData); + ZXIC_COMM_CHECK_POINT(p_entry_num); + + rc = dpp_dev_dtb_opr_mutex_get(dev, DPP_DEV_MUTEX_T_DTB_RB, queue_id, + &p_mutex); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_dtb_opr_mutex_get"); + rc = zxic_comm_mutex_lock(p_mutex); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_mutex_lock"); + + rc = dpp_dev_dump_dma_mem_get(dev, &dma_size, &dma_phy_addr, + &dma_vir_addr); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dev_dump_dma_mem_get", p_mutex); + + rc = dpp_dtb_dump_sdt_addr_set(dev, queue_id, sdt_no, dma_phy_addr, + dma_vir_addr, dma_size); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_dump_sdt_addr_set", p_mutex); + + start_index.index = 0; + rc = dpp_dtb_acl_table_dump(dev, queue_id, sdt_no, start_index, + (DPP_DTB_ACL_ENTRY_INFO_T *)pDumpData, + p_entry_num, &next_start_index, + &finish_flag); + rc |= dpp_dtb_dump_sdt_addr_clear(dev, queue_id, sdt_no); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_acl_table_dump", p_mutex); + + rc = zxic_comm_mutex_unlock(p_mutex); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_mutex_unlock"); + + return DPP_OK; +} + +/***********************************************************/ +/** 释放vport下的所有index +* @param dev_id NP设备号 +* @param sdt_no 流表sdt号(0~255) +* @param vport 端口号 +* @param index 需要释放的索引值 +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/14 +************************************************************/ +DPP_STATUS dpp_dtb_acl_index_release_by_vport(DPP_DEV_T *dev, + ZXIC_UINT32 sdt_no, + ZXIC_UINT32 vport) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 eram_sdt_no = 0; + ZXIC_MUTEX_T *p_dtb_mutex = NULL; + DPP_DEV_MUTEX_TYPE_E mutex = 0; + DPP_SDTTBL_ETCAM_T sdt_acl = { 0 }; /*SDT内容*/ + DPP_SDTTBL_ERAM_T sdt_eram = { 0 }; /*SDT内容*/ + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + + //从sdt_no中获取SDT配置 + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_acl); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_soft_sdt_tbl_get"); + if (sdt_acl.table_type != DPP_SDT_TBLT_eTCAM) { + ZXIC_COMM_TRACE_ERROR( + "SDT[%d] table_type[ %d ] is not etcam table!\n", + sdt_no, sdt_acl.table_type); + return DPP_ERR; + } + + eram_sdt_no = dpp_apt_get_sdt_partner(dev, sdt_no); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, eram_sdt_no, 0, + DPP_DEV_SDT_ID_MAX - 1); + //从sdt_no中获取SDT配置 + rc = dpp_soft_sdt_tbl_get(dev, eram_sdt_no, &sdt_eram); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_soft_sdt_tbl_get"); + if (sdt_eram.table_type != DPP_SDT_TBLT_eRAM) { + ZXIC_COMM_TRACE_ERROR( + "SDT[%d] table_type[ %d ] is not eram table!\n", + eram_sdt_no, sdt_eram.table_type); + return DPP_ERR; + } + + mutex = DPP_DEV_MUTEX_T_DTB; + rc = dpp_dev_opr_mutex_get(dev, (ZXIC_UINT32)mutex, &p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_dev_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "zxic_comm_mutex_lock"); + + rc = dpp_agent_channel_acl_index_release(dev, ACL_INDEX_VPORT_REL, + sdt_no, vport, 0); + if (rc == DPP_ACL_RC_SRH_FAIL) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "ACL_INDEX_VPORT_REL[vport:0x%x] index is not exist. \n", + vport); + } + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + dev_id, rc, "dpp_agent_channel_acl_index_release", p_dtb_mutex); + + rc = zxic_comm_mutex_unlock(p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "zxic_comm_mutex_unlock"); + + return rc; +} + +/***********************************************************/ +/** 获取当前vport下分配的所有index +* @param dev 设备 +* @param queue_id 队列号 +* @param eram_sdt_no 维护index的eram直接表号 +* @param vport 端口号 +* @param index_num 出参,当前vport分配的index个数 +* @param p_index_array 出参,当前vport分配的index数组 +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/18 +************************************************************/ +DPP_STATUS dpp_dtb_acl_index_parse(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 eram_sdt_no, ZXIC_UINT32 vport, + ZXIC_UINT32 *index_num, + ZXIC_UINT32 *p_index_array) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 eram_table_depth = 0; + ZXIC_UINT32 byte_num = 0; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 entry_num = 0; + ZXIC_UINT32 valid_entry_num = 0; + ZXIC_UINT8 valid = 0; + ZXIC_UINT32 temp_vport = 0; + DPP_SDTTBL_ERAM_T sdt_eram = { 0 }; /*SDT内容*/ + DPP_DTB_ERAM_ENTRY_INFO_T *p_dump_data_arr = NULL; + ZXIC_UINT8 *data_buff = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, index_num); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_index_array); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, eram_sdt_no, 0, + DPP_DEV_SDT_ID_MAX - 1); + + rc = dpp_soft_sdt_tbl_get(dev, eram_sdt_no, &sdt_eram); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_soft_sdt_tbl_get"); + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, sdt_eram.eram_mode, ERAM128_TBL_64b, + ERAM128_TBL_128b); + byte_num = (sdt_eram.eram_mode == ERAM128_TBL_64b) ? 8 : 16; + eram_table_depth = sdt_eram.eram_table_depth; + p_dump_data_arr = (DPP_DTB_ERAM_ENTRY_INFO_T *)ZXIC_COMM_MALLOC( + eram_table_depth * sizeof(DPP_DTB_ERAM_ENTRY_INFO_T)); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dump_data_arr); + ZXIC_COMM_MEMSET(p_dump_data_arr, 0, + eram_table_depth * sizeof(DPP_DTB_ERAM_ENTRY_INFO_T)); + + data_buff = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC(byte_num * eram_table_depth); + ZXIC_COMM_CHECK_DEV_POINT_MEMORY_FREE(dev_id, data_buff, + p_dump_data_arr); + ZXIC_COMM_MEMSET(data_buff, 0, eram_table_depth * byte_num); + + for (i = 0; i < eram_table_depth; i++) { + p_dump_data_arr[i].index = i; + p_dump_data_arr[i].p_data = + (ZXIC_UINT32 *)(data_buff + i * byte_num); + } + + rc = dpp_dtb_eram_dump(dev, queue_id, eram_sdt_no, + (ZXIC_UINT8 *)p_dump_data_arr, &entry_num); + ZXIC_COMM_CHECK_DEV_RC_MEMORY_FREE2PTR_NO_ASSERT( + dev_id, rc, "dpp_dtb_eram_dump", data_buff, p_dump_data_arr); + + for (i = 0; i < entry_num; i++) { + valid = (p_dump_data_arr[i].p_data[0] >> 31) & 0x1; + temp_vport = p_dump_data_arr[i].p_data[0] & 0x7fffffff; + if (valid && (temp_vport == vport)) { + p_index_array[valid_entry_num] = i; + valid_entry_num++; + } + } + + *index_num = valid_entry_num; + ZXIC_COMM_FREE(data_buff); + ZXIC_COMM_FREE(p_dump_data_arr); + + if (dpp_dtb_prt_get()) { + ZXIC_COMM_PRINT( + "dpp_dtb_acl_index_parse vport=0x%x index_num=%u,index:\n", + vport, valid_entry_num); + for (i = 0; i < valid_entry_num; i++) { + ZXIC_COMM_PRINT("[%u] ", p_index_array[i]); + if ((i + 1) % 16 == 0) { + ZXIC_COMM_PRINT("\n"); + } + } + ZXIC_COMM_PRINT("\n"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 清除指定index的所有eram表项 +* @param dev 设备 +* @param queue_id 队列号 +* @param sdt_no 维护index的eram直接表号 +* @param index_num 当前vport分配的index个数 +* @param p_index_array 当前vport分配的index数组 +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/18 +************************************************************/ +DPP_STATUS dpp_dtb_eram_data_clear(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 index_num, + ZXIC_UINT32 *p_index_array) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 element_id = 0; + ZXIC_UINT32 i = 0; + + DPP_DTB_ERAM_ENTRY_INFO_T *p_eram_data_arr = NULL; + ZXIC_UINT8 *data_buff = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_index_array); + + p_eram_data_arr = (DPP_DTB_ERAM_ENTRY_INFO_T *)ZXIC_COMM_MALLOC( + index_num * sizeof(DPP_DTB_ERAM_ENTRY_INFO_T)); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_eram_data_arr); + ZXIC_COMM_MEMSET(p_eram_data_arr, 0, + index_num * sizeof(DPP_DTB_ERAM_ENTRY_INFO_T)); + + data_buff = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC(4 * sizeof(ZXIC_UINT32)); + ZXIC_COMM_CHECK_DEV_POINT_MEMORY_FREE(dev_id, data_buff, + p_eram_data_arr); + ZXIC_COMM_MEMSET(data_buff, 0, 4 * sizeof(ZXIC_UINT32)); + + for (i = 0; i < index_num; i++) { + p_eram_data_arr[i].index = p_index_array[i]; + p_eram_data_arr[i].p_data = (ZXIC_UINT32 *)data_buff; + } + + rc = dpp_dtb_eram_dma_write(dev, queue_id, sdt_no, index_num, + p_eram_data_arr, &element_id); + ZXIC_COMM_FREE(data_buff); + ZXIC_COMM_FREE(p_eram_data_arr); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_dtb_eram_dma_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** dtb方式清除指定index的所有统计项 +* @param dev 设备 +* @param queue_id 队列号 +* @param counter_id 统计编号,对应微码中的address +* @param rd_mode 统计读取方式 0:64bit 1:128bit +* @param sdt_no 维护index的eram直接表号 +* @param index_num 当前vport分配的index个数 +* @param p_index_array 当前vport分配的index数组 +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/18 +************************************************************/ +DPP_STATUS dpp_dtb_eram_stat_data_clear(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 counter_id, + STAT_CNT_MODE_E rd_mode, + ZXIC_UINT32 index_num, + ZXIC_UINT32 *p_index_array) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 element_id = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 wrt_mode = 0; + ZXIC_UINT32 start_addr = 0; + ZXIC_UINT32 counter_id_128bit = 0; + ZXIC_UINT32 ppu_eram_baddr = 0; + + DPP_DTB_ERAM_ENTRY_INFO_T *p_eram_data_arr = NULL; + ZXIC_UINT8 *data_buff = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_index_array); + + rc = dpp_stat_ppu_eram_baddr_get(dev, &ppu_eram_baddr); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_stat_ppu_eram_baddr_get"); + + p_eram_data_arr = (DPP_DTB_ERAM_ENTRY_INFO_T *)ZXIC_COMM_MALLOC( + index_num * sizeof(DPP_DTB_ERAM_ENTRY_INFO_T)); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_eram_data_arr); + ZXIC_COMM_MEMSET(p_eram_data_arr, 0, + index_num * sizeof(DPP_DTB_ERAM_ENTRY_INFO_T)); + + data_buff = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC(4 * sizeof(ZXIC_UINT32)); + ZXIC_COMM_CHECK_DEV_POINT_MEMORY_FREE(dev_id, data_buff, + p_eram_data_arr); + ZXIC_COMM_MEMSET(data_buff, 0, 4 * sizeof(ZXIC_UINT32)); + + for (i = 0; i < index_num; i++) { + p_eram_data_arr[i].index = p_index_array[i]; + p_eram_data_arr[i].p_data = (ZXIC_UINT32 *)data_buff; + } + + wrt_mode = (rd_mode == STAT_128_MODE) ? ERAM128_OPR_128b : + ERAM128_OPR_64b; + counter_id_128bit = (rd_mode == STAT_128_MODE) ? counter_id : + (counter_id >> 1); + start_addr = ppu_eram_baddr + counter_id_128bit; + ZXIC_COMM_TRACE_INFO( + "dpp_dtb_eram_stat_data_clear:ppu_eram_baddr=0x%x start_addr=0x%x\n", + ppu_eram_baddr, start_addr); + rc = dpp_dtb_smmu0_data_write(dev, queue_id, start_addr, wrt_mode, + index_num, p_eram_data_arr, &element_id); + ZXIC_COMM_FREE(data_buff); + ZXIC_COMM_FREE(p_eram_data_arr); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_dtb_smmu0_data_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 清除指定index的所有acl表项 +* @param dev 设备 +* @param queue_id 队列号 +* @param sdt_no acl表项的sdt号 +* @param index_num 当前vport分配的index个数 +* @param p_index_array 当前vport分配的index数组 +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/18 +************************************************************/ +DPP_STATUS dpp_dtb_acl_data_clear(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 index_num, + ZXIC_UINT32 *p_index_array) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 data_byte_size = 0; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 etcam_key_mode = 0; + ZXIC_UINT32 as_enable = 0; + ZXIC_UINT32 etcam_table_depth = 0; + ZXIC_UINT32 element_id = 0; + + DPP_SDTTBL_ETCAM_T sdt_etcam_info = { 0 }; /*SDT内容*/ + + ZXIC_UINT8 *data_buff = NULL; + ZXIC_UINT8 *mask_buff = NULL; + ZXIC_UINT32 *eram_buff = NULL; + DPP_DTB_ACL_ENTRY_INFO_T *p_entry_arr = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_index_array); + + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_etcam_info); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_soft_sdt_tbl_get"); + + etcam_key_mode = sdt_etcam_info.etcam_key_mode; + as_enable = sdt_etcam_info.as_en; + etcam_table_depth = sdt_etcam_info.etcam_table_depth; + data_byte_size = DPP_ETCAM_ENTRY_SIZE_GET(etcam_key_mode); //80/40 + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, index_num, 1, etcam_table_depth); + + //组装数据 + p_entry_arr = (DPP_DTB_ACL_ENTRY_INFO_T *)ZXIC_COMM_MALLOC( + index_num * sizeof(DPP_DTB_ACL_ENTRY_INFO_T)); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_entry_arr); + ZXIC_COMM_MEMSET_S(p_entry_arr, + index_num * sizeof(DPP_DTB_ACL_ENTRY_INFO_T), 0, + index_num * sizeof(DPP_DTB_ACL_ENTRY_INFO_T)); + + data_buff = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC(data_byte_size); + ZXIC_COMM_CHECK_DEV_POINT_MEMORY_FREE(dev_id, data_buff, p_entry_arr); + ZXIC_COMM_MEMSET_S(data_buff, data_byte_size, 0, data_byte_size); + + mask_buff = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC(data_byte_size); + ZXIC_COMM_CHECK_DEV_POINT_MEMORY_FREE2PTR_NO_ASSERT( + dev_id, mask_buff, p_entry_arr, data_buff); + ZXIC_COMM_MEMSET_S(mask_buff, data_byte_size, 0, data_byte_size); + + if (as_enable) { + eram_buff = (ZXIC_UINT32 *)ZXIC_COMM_MALLOC( + 4 * sizeof(ZXIC_UINT32)); + ZXIC_COMM_CHECK_DEV_POINT_MEMORY_FREE3PTR_NO_ASSERT( + dev_id, eram_buff, p_entry_arr, data_buff, mask_buff); + ZXIC_COMM_MEMSET_S(eram_buff, 4 * sizeof(ZXIC_UINT32), 0, + 4 * sizeof(ZXIC_UINT32)); + } + + for (index = 0; index < index_num; index++) { + p_entry_arr[index].handle = p_index_array[index]; + p_entry_arr[index].key_data = data_buff; + p_entry_arr[index].key_mask = mask_buff; + + if (as_enable) { + p_entry_arr[index].p_as_rslt = (ZXIC_UINT8 *)eram_buff; + } + } + + rc = dpp_dtb_acl_dma_insert(dev, queue_id, sdt_no, index_num, + p_entry_arr, &element_id); + ZXIC_COMM_FREE(data_buff); + ZXIC_COMM_FREE(mask_buff); + if (eram_buff) { + ZXIC_COMM_FREE(eram_buff); + } + ZXIC_COMM_FREE(p_entry_arr); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_dtb_acl_data_clear"); + + return rc; +} + +/***********************************************************/ +/** 指定vport的统计计数读清 +* @param dev NP设备 +* @param sdt_no 流表sdt号(0~255) +* @param vport 端口号 +* @param rd_mode 读取位宽模式,参见STAT_CNT_MODE_E,0-64bit,1-128bit +* @param start_counter_id 统计起始编号,对应微码中的address +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/14 +************************************************************/ +DPP_STATUS dpp_dtb_acl_stat_cnt_clr(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 vport, STAT_CNT_MODE_E rd_mode, + ZXIC_UINT32 start_counter_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_MUTEX_T *p_dtb_mutex = NULL; + DPP_DEV_MUTEX_TYPE_E mutex = 0; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + + mutex = DPP_DEV_MUTEX_T_DTB; + rc = dpp_dev_opr_mutex_get(dev, (ZXIC_UINT32)mutex, &p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_dev_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "zxic_comm_mutex_lock"); + + rc = dpp_agent_channel_acl_stat_clr(dev, sdt_no, vport, + start_counter_id, rd_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + dev_id, rc, "dpp_agent_channel_acl_stat_clr", p_dtb_mutex); + + rc = zxic_comm_mutex_unlock(p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "zxic_comm_mutex_unlock"); + + return rc; +} + +#endif + +#if ZXIC_REAL("DTB DEBUG PRINT") + +/*打印buff中数据,size 单位 字节*/ +ZXIC_VOID dpp_data_buff_print(ZXIC_UINT8 *buff, ZXIC_UINT32 size) +{ + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + ZXIC_UINT8 *temp_buff = NULL; + + ZXIC_COMM_PRINT("buff data is:\n"); + + for (i = 0; i < size / 4 / 4; i++) { + temp_buff = buff + 16 * i; + for (j = 0; j < 4; j++) { + ZXIC_COMM_PRINT("0x%08x ", + *((ZXIC_UINT32 *)(temp_buff + 4 * j))); + } + ZXIC_COMM_PRINT("\n"); + } + + ZXIC_COMM_PRINT("\n"); +} + +ZXIC_VOID dpp_acl_data_print(ZXIC_UINT8 *p_data, ZXIC_UINT8 *p_mask, + ZXIC_UINT32 etcam_mode) +{ + int i = 0; + int data_len = 0; + + data_len = DPP_ETCAM_ENTRY_SIZE_GET(etcam_mode); + + if (data_len > 80) { + return; + } + + ZXIC_COMM_PRINT("%s:", "data"); + + for (i = 0; i < data_len; i++) { + if ((i % 10) == 0) { + ZXIC_COMM_PRINT("\n"); + } + + ZXIC_COMM_PRINT("%02x", p_data[i]); + } + + ZXIC_COMM_PRINT("\n"); + + ZXIC_COMM_PRINT("%s:", "mask"); + + for (i = 0; i < data_len; i++) { + if ((i % 10) == 0) { + ZXIC_COMM_PRINT("\n"); + } + + ZXIC_COMM_PRINT("%02x", p_mask[i]); + } + ZXIC_COMM_PRINT("\n"); +} + +ZXIC_VOID dpp_dtb_data_print(ZXIC_UINT8 *p_data, ZXIC_UINT32 len) +{ + int i = 0; + int cycle = len / 4; + int remain = len % 4; + + ZXIC_COMM_TRACE_INFO("%s:", "data:\n"); + + for (i = 0; i < cycle; i++) { + ZXIC_COMM_TRACE_INFO("0x%02x %02x %02x %02x \n", p_data[4 * i], + p_data[4 * i + 1], p_data[4 * i + 2], + p_data[4 * i + 3]); + } + + for (i = 0; i < remain; i++) { + ZXIC_COMM_TRACE_INFO("0x%02x", p_data[cycle * 4 + i]); + } + ZXIC_COMM_TRACE_INFO("\n"); +} + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/dpp_dtb_table_api.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/dpp_dtb_table_api.c new file mode 100644 index 000000000000..7080d98eeb0f --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/dpp_dtb_table_api.c @@ -0,0 +1,1368 @@ +#include "zxic_common.h" +#include "dpp_dev.h" +#include "dpp_dtb_table_api.h" +#include "dpp_dtb_cfg.h" +#include "dpp_apt_se.h" +#include "zxic_comm_rb_tree.h" +#include "dpp_dtb_table.h" +#include "dpp_sdt.h" +#include "dpp_agent_channel.h" +#include "dpp_dtb.h" +#include "dpp_kernel_init.h" + +#define EP_NUM (5) +static ZXIC_UINT32 msix_interrupt_mode = 1; //1为dbi中断,0为match中断 +static ZXIC_RB_CFG + g_dtb_queue_dump_addr_rb[DPP_DEV_SLOT_MAX][DPP_DEV_CHANNEL_MAX] + [DPP_DTB_QUEUE_NUM_MAX] = { { { { 0 } } } }; +static ZXIC_UINT32 hardware_ep_id[EP_NUM] = { 5, 6, 7, 8, 9 }; + +typedef struct dpp_dtb_dump_addr_info_t { + ZXIC_UINT32 sdt_no; + ZXIC_UINT64 phyAddr; + ZXIC_UINT64 virAddr; + ZXIC_UINT32 size; +} DPP_DTB_DUMP_ADDR_INFO_T; + +ZXIC_UINT32 dpp_dtb_ep_id_get(ZXIC_UINT32 soft_ep_id) +{ + return hardware_ep_id[soft_ep_id]; +} + +ZXIC_RB_CFG *dpp_dtb_dump_addr_rb_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id) +{ + ZXIC_COMM_CHECK_POINT_RETURN_NULL(dev); + ZXIC_COMM_CHECK_INDEX_RETURN_NULL(DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_RETURN_NULL(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + return &g_dtb_queue_dump_addr_rb[DEV_PCIE_SLOT(dev)][DEV_ID(dev)] + [queue_id]; +} + +ZXIC_UINT32 dpp_dtb_dump_addr_rb_init(DPP_DEV_T *dev, ZXIC_UINT32 queue_id) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_RB_CFG *p_dtb_dump_addr_rb = NULL; + + ZXIC_COMM_CHECK_POINT_NO_ASSERT(dev); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + + p_dtb_dump_addr_rb = dpp_dtb_dump_addr_rb_get(dev, queue_id); + + rc = zxic_comm_rb_init(p_dtb_dump_addr_rb, 0, + ZXIC_SIZEOF(DPP_DTB_DUMP_ADDR_INFO_T), + dpp_apt_table_key_cmp); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "zxic_comm_rb_init"); + + return rc; +} + +ZXIC_UINT32 dpp_dtb_dump_addr_rb_destroy(DPP_DEV_T *dev, ZXIC_UINT32 queue_id) +{ + ZXIC_UINT32 rc = DPP_OK; + + D_NODE *p_node = NULL; + ZXIC_RB_TN *p_rb_tn = NULL; + DPP_DTB_DUMP_ADDR_INFO_T *p_rbkey = NULL; + D_HEAD *p_head_dtb_rb = NULL; + ZXIC_RB_CFG *p_dtb_dump_addr_rb = NULL; + + ZXIC_UINT32 sdt_no = 0; + + ZXIC_COMM_CHECK_POINT_NO_ASSERT(dev); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + + p_dtb_dump_addr_rb = dpp_dtb_dump_addr_rb_get(dev, queue_id); + ZXIC_COMM_CHECK_POINT_NO_ASSERT(p_dtb_dump_addr_rb); + + p_head_dtb_rb = &p_dtb_dump_addr_rb->tn_list; + ZXIC_COMM_CHECK_POINT_NO_ASSERT(p_head_dtb_rb); + + while (p_head_dtb_rb->used) { + p_node = p_head_dtb_rb->p_next; + p_rb_tn = (ZXIC_RB_TN *)p_node->data; + p_rbkey = (DPP_DTB_DUMP_ADDR_INFO_T *)p_rb_tn->p_key; + + sdt_no = p_rbkey->sdt_no; + rc = dpp_dtb_dump_sdt_addr_clear(dev, queue_id, sdt_no); + if (DPP_HASH_RC_DEL_SRHFAIL == rc) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "dtb dump delete key is not exist, std:%d\n", + sdt_no); + } else { + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_dtb_dump_sdt_addr_clear"); + } + } + + rc = zxic_comm_rb_destroy(p_dtb_dump_addr_rb); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "zxic_comm_rb_init"); + + return rc; +} + +/*dump地址信息获取*/ +ZXIC_UINT32 dpp_dtb_dump_sdt_addr_get(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT64 *phy_addr, + ZXIC_UINT64 *vir_addr, ZXIC_UINT32 *size) +{ + ZXIC_UINT32 rc = DPP_OK; + + DPP_DTB_DUMP_ADDR_INFO_T dtb_dump_addr_info = { 0 }; + ZXIC_RB_CFG *p_dtb_dump_addr_rb = NULL; + + ZXIC_COMM_CHECK_POINT_NO_ASSERT(dev); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + + dtb_dump_addr_info.sdt_no = sdt_no; + p_dtb_dump_addr_rb = dpp_dtb_dump_addr_rb_get(dev, queue_id); + rc = dpp_apt_sw_list_search(p_dtb_dump_addr_rb, &dtb_dump_addr_info, + sizeof(DPP_DTB_DUMP_ADDR_INFO_T)); + if (rc == DPP_OK) { + ZXIC_COMM_TRACE_INFO("search sdt_no %d success.\n", sdt_no); + } else { + ZXIC_COMM_TRACE_ERROR("search sdt_no %d fail.\n", sdt_no); + return rc; + } + + *phy_addr = dtb_dump_addr_info.phyAddr; + *vir_addr = dtb_dump_addr_info.virAddr; + *size = dtb_dump_addr_info.size; + + ZXIC_COMM_TRACE_INFO("dpp_dtb_dump_sdt_addr_get: queue :%d\n", + queue_id); + ZXIC_COMM_TRACE_INFO("dpp_dtb_dump_sdt_addr_get: sdt_no :%d\n", + sdt_no); + ZXIC_COMM_TRACE_INFO("dpp_dtb_dump_sdt_addr_get: phyAddr :0x%016llx\n", + dtb_dump_addr_info.phyAddr); + ZXIC_COMM_TRACE_INFO("dpp_dtb_dump_sdt_addr_get: vir_addr :0x%016llx\n", + dtb_dump_addr_info.virAddr); + ZXIC_COMM_TRACE_INFO("dpp_dtb_dump_sdt_addr_get: size :0x%x\n", + dtb_dump_addr_info.size); + + return rc; +} + +/***********************************************************/ +/** DTB通道申请 +* @param devId NP设备号 +* @param pName 申请DTB通道的唯一设备名(最大32字符) +* @param pQueueId 申请到的DTB通道编号 +* @return +* @remark 无 +* @see +* @author cbb @date 2023/07/03 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_requst(DPP_DEV_T *dev, ZXIC_CONST ZXIC_UINT8 *pName, + ZXIC_UINT16 vPort, ZXIC_UINT32 *pQueueId) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 queue_id = 0; + ZXIC_MUTEX_T *p_dtb_mutex = NULL; + DPP_DEV_MUTEX_TYPE_E mutex = 0; + ZXIC_UINT32 vport_info = (ZXIC_UINT32)vPort; + + ZXIC_COMM_CHECK_POINT_NO_ASSERT(dev); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), pName); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), pQueueId); + + //使用代理通道要加锁 + mutex = DPP_DEV_MUTEX_T_DTB; + rc = dpp_dev_opr_mutex_get(dev, (ZXIC_UINT32)mutex, &p_dtb_mutex); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dev_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_dtb_mutex); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "zxic_comm_mutex_lock"); + + rc = dpp_agent_channel_dtb_queue_request(dev, pName, vport_info, + &queue_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, "dpp_agent_channel_dtb_queue_request", + p_dtb_mutex); + + rc = zxic_comm_mutex_unlock(p_dtb_mutex); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "zxic_comm_mutex_unlock"); + + rc = dpp_dtb_dump_addr_rb_init(dev, queue_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_dump_addr_rb_init"); + + *pQueueId = queue_id; + + ZXIC_COMM_PRINT( + "dpp_dtb_queue_requst:slot %d name %s vport 0x%x queue_id %d\n", + DEV_PCIE_SLOT(dev), pName, vport_info, queue_id); + + return rc; +} + +/***********************************************************/ +/** DTB通道队列申请(申请到的队列在软件维护和硬件维护上都处于初态) +* @param devId NP设备号 +* @param pName 申请DTB通道的唯一设备名(最大32字符) +* @param pQueueId 申请到的DTB通道编号 +* @return +* @remark 无 +* @see +* @author cq @date 2025/06/05 +************************************************************/ +DPP_STATUS dpp_dtb_queue_requst_ex(DPP_DEV_T *dev, ZXIC_CONST ZXIC_UINT8 *pName, + ZXIC_UINT32 *p_queue_id) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 init_flag = 0; + ZXIC_UINT32 queue_id = 0; + ZXIC_UINT32 count = 0; + ZXIC_UINT32 vport = 0; + ZXIC_MUTEX_T *p_dtb_mutex = NULL; + DPP_DEV_MUTEX_TYPE_E mutex = DPP_DEV_MUTEX_T_DTB; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(pName); + ZXIC_COMM_CHECK_POINT(p_queue_id); + + vport = DEV_PCIE_VPORT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + + //使用代理通道要加锁 + rc = dpp_dev_opr_mutex_get(dev, (ZXIC_UINT32)mutex, &p_dtb_mutex); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dev_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_dtb_mutex); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "zxic_comm_mutex_lock"); + + do { + rc = dpp_agent_channel_dtb_queue_request(dev, pName, vport, + &queue_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + dev_id, rc, "dpp_agent_channel_dtb_queue_request", + p_dtb_mutex); + + /*从固件申请到的队列,在软件上已占用,则释放当前队列,重新申请*/ + rc = dpp_dtb_queue_init_flag_get(dev, queue_id, &init_flag); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + dev_id, rc, "dpp_dtb_queue_init_flag_get", p_dtb_mutex); + + if (init_flag) { + rc = dpp_agent_channel_dtb_queue_release(dev, pName, + queue_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + dev_id, rc, + "dpp_agent_channel_dtb_queue_release", + p_dtb_mutex); + + ZXIC_COMM_TRACE_NOTICE( + "[%s]:slot[%u] vport[0x%x] count[%u] release queue[%u] succ!\n", + __FUNCTION__, DEV_PCIE_SLOT(dev), vport, count, + queue_id); + } else { + ZXIC_COMM_PRINT( + "[%s]:slot[%u] vport[0x%x] count[%u] request queue succ!\n", + __FUNCTION__, DEV_PCIE_SLOT(dev), vport, count); + break; + } + count++; + } while ((init_flag == 1) && (count < DPP_DTB_QUEUE_NUM_MAX)); + + rc = zxic_comm_mutex_unlock(p_dtb_mutex); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "zxic_comm_mutex_unlock"); + + if (init_flag) { + ZXIC_COMM_TRACE_ERROR( + "[%s]:slot[%u] vport[0x%x] request queue fail!\n", + __FUNCTION__, DEV_PCIE_SLOT(dev), vport); + return DPP_ERR; + } + + rc = dpp_dtb_dump_addr_rb_init(dev, queue_id); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_dump_addr_rb_init"); + + *p_queue_id = queue_id; + + return DPP_OK; +} + +/***********************************************************/ +/** DTB通道释放 +* @param devId NP设备号 +* @param pName 要释放DTB通道的唯一设备名(最大32字符) +* @param pQueueId 要释放的DTB通道编号 +* @return +* @remark 无 +* @see +* @author cbb @date 2023/07/03 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_release(DPP_DEV_T *dev, ZXIC_CONST ZXIC_UINT8 *pName, + ZXIC_UINT32 queueId) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_MUTEX_T *p_dtb_mutex = NULL; + DPP_DEV_MUTEX_TYPE_E mutex = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), queueId, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), pName); + + ZXIC_COMM_TRACE_INFO("dpp_dtb_queue_release:queue_id %d\n", queueId); + + //使用代理通道要加锁 + mutex = DPP_DEV_MUTEX_T_DTB; + rc = dpp_dev_opr_mutex_get(dev, (ZXIC_UINT32)mutex, &p_dtb_mutex); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dev_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_dtb_mutex); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "zxic_comm_mutex_lock"); + + rc = dpp_agent_channel_dtb_queue_release(dev, pName, queueId); + + if (rc == DPP_RC_DTB_QUEUE_NOT_ALLOC) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), "dtb slot %d queue id %d not request. \n", + DEV_PCIE_SLOT(dev), queueId); + } + + if (rc == DPP_RC_DTB_QUEUE_NAME_ERROR) { + ZXIC_COMM_TRACE_DEV_ERROR(DEV_ID(dev), + "dtb slot %d queue %d name error. \n", + DEV_PCIE_SLOT(dev), queueId); + } + + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, "dpp_agent_channel_dtb_queue_release", + p_dtb_mutex); + + rc = zxic_comm_mutex_unlock(p_dtb_mutex); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "zxic_comm_mutex_unlock"); + + rc = dpp_dtb_queue_id_free(dev, queueId); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_queue_id_free"); + + rc = dpp_dtb_dump_addr_rb_destroy(dev, queueId); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_dump_addr_rb_destroy"); + + ZXIC_COMM_PRINT("dpp_dtb_queue_release:slot %d name %s queue_id %d\n", + DEV_PCIE_SLOT(dev), pName, queueId); + + return rc; +} + +/***********************************************************/ +/** DTB通道释放(增加锁保护) +* @param devId NP设备号 +* @return +* @remark 无 +* @see +* @author cbb @date 2023/07/03 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_release_ex(DPP_DEV_T *dev) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 queue_id = 0; + ZXIC_MUTEX_T *p_self_recover_mutex = NULL; + DPP_DEV_MUTEX_TYPE_E mutex = 0; + + ZXIC_COMM_CHECK_POINT(dev); + + mutex = DPP_DEV_MUTEX_T_SELF_RECOVER; + rc = dpp_dev_opr_mutex_get(dev, (ZXIC_UINT32)mutex, + &p_self_recover_mutex); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_self_recover_mutex); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_mutex_lock"); + + rc = dpp_dtb_queue_id_get(dev, &queue_id); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_queue_id_get", + p_self_recover_mutex); + + rc = dpp_dtb_queue_release(dev, "pf", queue_id); + if (rc == DPP_OK) { + rc = dpp_dtb_queue_dma_mem_release(dev, queue_id); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_queue_dma_mem_release", + p_self_recover_mutex); + } else { + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_queue_release", + p_self_recover_mutex); + } + + rc = zxic_comm_mutex_unlock(p_self_recover_mutex); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_mutex_unlock"); + + return DPP_OK; +} + +/***********************************************************/ +/** DTB队列软件资源释放 +* @param dev NP设备 +* @return +* @remark 无 +* @see +* @author cq @date 2025/06/06 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_release_soft(DPP_DEV_T *dev) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 queue_id = 0; + ZXIC_MUTEX_T *p_self_recover_mutex = NULL; + DPP_DEV_MUTEX_TYPE_E mutex = 0; + DPP_DTB_MGR_T *p_dtb_mgr = ZXIC_NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX(DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + + p_dtb_mgr = dpp_dtb_mgr_get(DEV_PCIE_SLOT(dev), DEV_ID(dev)); + if (p_dtb_mgr == ZXIC_NULL) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "slot %d ErrorCode[0x%x]: DTB Manager is not exist!!!\n", + DEV_PCIE_SLOT(dev), DPP_RC_DTB_MGR_NOT_EXIST); + return DPP_RC_DTB_MGR_NOT_EXIST; + } + + mutex = DPP_DEV_MUTEX_T_SELF_RECOVER; + rc = dpp_dev_opr_mutex_get(dev, (ZXIC_UINT32)mutex, + &p_self_recover_mutex); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_self_recover_mutex); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_mutex_lock"); + + rc = dpp_dtb_queue_id_get(dev, &queue_id); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_queue_id_get", + p_self_recover_mutex); + + rc = dpp_dtb_dump_addr_rb_destroy(dev, queue_id); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_dump_addr_rb_destroy", + p_self_recover_mutex); + + p_dtb_mgr->queue_info[queue_id].init_flag = 0; + ZXIC_COMM_MEMSET_S(&(p_dtb_mgr->queue_info[queue_id].tab_up), + sizeof(DPP_DTB_TAB_UP_INFO_T), 0, + sizeof(DPP_DTB_TAB_UP_INFO_T)); + ZXIC_COMM_MEMSET_S(&(p_dtb_mgr->queue_info[queue_id].tab_down), + sizeof(DPP_DTB_TAB_DOWN_INFO_T), 0, + sizeof(DPP_DTB_TAB_DOWN_INFO_T)); + + rc = dpp_dtb_queue_dma_mem_release(dev, queue_id); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_queue_dma_mem_release", + p_self_recover_mutex); + + rc = zxic_comm_mutex_unlock(p_self_recover_mutex); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_mutex_unlock"); + + return DPP_OK; +} + +/***********************************************************/ +/** 通知固件通道信息,保证固件与驱动配置一致 +* @param devId NP设备号 +* @param pName 要释放DTB通道的唯一设备名(最大32字符) +* @param vPort 端口号 +* @param pQueueId 同步队列 +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/06 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_sync_cfg(DPP_DEV_T *dev, ZXIC_CONST ZXIC_UINT8 *pName, + ZXIC_UINT16 vPort, ZXIC_UINT32 queueId) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_MUTEX_T *p_dtb_mutex = NULL; + DPP_DEV_MUTEX_TYPE_E mutex = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), queueId, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), pName); + + ZXIC_COMM_TRACE_INFO("dpp_dtb_queue_sync_cfg:queue_id %d\n", queueId); + + //使用代理通道要加锁 + mutex = DPP_DEV_MUTEX_T_DTB; + rc = dpp_dev_opr_mutex_get(dev, (ZXIC_UINT32)mutex, &p_dtb_mutex); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dev_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_dtb_mutex); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "zxic_comm_mutex_lock"); + + rc = dpp_agent_channel_dtb_queue_sync_cfg(dev, pName, vPort, queueId); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, "dpp_agent_channel_dtb_queue_sync_cfg", + p_dtb_mutex); + + rc = zxic_comm_mutex_unlock(p_dtb_mutex); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "zxic_comm_mutex_unlock"); + + rc = dpp_dtb_user_info_set(dev, queueId, vPort, 0); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_user_info_set"); + + ZXIC_COMM_PRINT( + "dpp_dtb_queue_sync_cfg:slot %d name %s vport 0x%x queue_id %d\n", + DEV_PCIE_SLOT(dev), pName, vPort, queueId); + + return rc; +} + +/***********************************************************/ +/** DTB通道用户信息配置 +* @param devId NP设备号 +* @param queueId DTB通道编号 +* @param vPort vport信息 +* @param vector 中断号 +* @return +* @remark 无 +* @see +* @author cbb @date 2023/07/03 +************************************************************/ +ZXIC_UINT32 dpp_dtb_user_info_set(DPP_DEV_T *dev, ZXIC_UINT32 queueId, + ZXIC_UINT16 vPort, ZXIC_UINT32 vector) +{ + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_UINT32 temp_epid; + ZXIC_UINT32 temp_func_active; + ZXIC_UINT32 temp_func_num; + ZXIC_UINT32 temp_vfunc_num; + ZXIC_UINT32 virtioPort = (ZXIC_UINT32)vPort; + + DPP_DTB_QUEUE_VM_INFO_T vm_info = { 0 }; + DPP_DTB_MGR_T *p_dtb_mgr = ZXIC_NULL; + + ZXIC_COMM_CHECK_POINT_NO_ASSERT(dev); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), queueId, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + + p_dtb_mgr = dpp_dtb_mgr_get(DEV_PCIE_SLOT(dev), DEV_ID(dev)); + if (p_dtb_mgr == ZXIC_NULL) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "ErrorCode[0x%x]: DTB Manager is not exist!!!\n", + DPP_RC_DTB_MGR_NOT_EXIST); + return DPP_RC_DTB_MGR_NOT_EXIST; + } + + ZXIC_COMM_UINT32_GET_BITS(temp_epid, virtioPort, VPORT_EPID_BT_START, + VPORT_EPID_BT_LEN); + ZXIC_COMM_UINT32_GET_BITS(temp_func_active, virtioPort, + VPORT_FUNC_ACTIVE_BT_START, + VPORT_FUNC_ACTIVE_BT_LEN); + ZXIC_COMM_UINT32_GET_BITS(temp_func_num, virtioPort, + VPORT_FUNC_NUM_BT_START, + VPORT_FUNC_NUM_BT_LEN); + ZXIC_COMM_UINT32_GET_BITS(temp_vfunc_num, virtioPort, + VPORT_VFUNC_NUM_BT_START, + VPORT_VFUNC_NUM_BT_LEN); + + rc = dpp_dtb_queue_vm_info_get(dev, queueId, &vm_info); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_queue_vm_info_get"); + + vm_info.dbi_en = msix_interrupt_mode; + vm_info.epid = dpp_dtb_ep_id_get(temp_epid); + vm_info.vfunc_num = temp_vfunc_num; + vm_info.func_num = temp_func_num; + vm_info.vfunc_active = temp_func_active; + vm_info.vector = vector; + + ZXIC_COMM_TRACE_NOTICE("[%s]:queue %d vport 0x%x, vector:%x\n", + __FUNCTION__, queueId, vPort, vector); + ZXIC_COMM_TRACE_NOTICE("[%s]:dbi_en 0x%x\n", __FUNCTION__, + vm_info.dbi_en); + ZXIC_COMM_TRACE_NOTICE("[%s]:epid 0x%x\n", __FUNCTION__, vm_info.epid); + ZXIC_COMM_TRACE_NOTICE("[%s]:vfunc_num 0x%x\n", __FUNCTION__, + vm_info.vfunc_num); + ZXIC_COMM_TRACE_NOTICE("[%s]:func_num 0x%x\n", __FUNCTION__, + vm_info.func_num); + ZXIC_COMM_TRACE_NOTICE("[%s]:vfunc_active 0x%x\n", __FUNCTION__, + vm_info.vfunc_active); + + p_dtb_mgr->queue_info[queueId].vport = vPort; + p_dtb_mgr->queue_info[queueId].vector = vector; + + rc = dpp_dtb_queue_vm_info_set(dev, queueId, &vm_info); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_queue_vm_info_set"); + + return rc; +} + +/***********************************************************/ +/** DTB通道下表空间地址设置,空间大小[32*(16+16*1024)B] +* @param devId NP设备号 +* @param queueId DTB通道编号 +* @param phyAddr 物理地址 +* @param virAddr 虚拟地址 +* @return +* @remark 无 +* @see +* @author cbb @date 2023/07/03 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_down_table_addr_set(DPP_DEV_T *dev, + ZXIC_UINT32 queueId, + ZXIC_UINT64 phyAddr, + ZXIC_UINT64 virAddr) +{ + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT_NO_ASSERT(dev); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), queueId, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + + ZXIC_COMM_PRINT( + "dpp_dtb_queue_down_table_addr_set:slot %d queue %d phyAddr 0x%llx\n", + DEV_PCIE_SLOT(dev), queueId, phyAddr); + ZXIC_COMM_TRACE_NOTICE( + "dpp_dtb_queue_down_table_addr_set:slot %d queue %d virAddr 0x%llx\n", + DEV_PCIE_SLOT(dev), queueId, virAddr); + + rc = dpp_dtb_down_channel_addr_set(dev, queueId, phyAddr, virAddr, 0); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_down_channel_addr_set"); + + return rc; +} + +/***********************************************************/ +/** DTB通道dump空间地址设置,空间大小[32*(16+16*1024)B] +* @param devId NP设备号 +* @param pName 要释放DTB通道的设备名 +* @param phyAddr 物理地址 +* @param virAddr 虚拟地址 +* @return +* @remark 无 +* @see +* @author cbb @date 2023/07/03 +************************************************************/ +ZXIC_UINT32 dpp_dtb_queue_dump_table_addr_set(DPP_DEV_T *dev, + ZXIC_UINT32 queueId, + ZXIC_UINT64 phyAddr, + ZXIC_UINT64 virAddr) +{ + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT_NO_ASSERT(dev); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), queueId, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + + ZXIC_COMM_PRINT( + "dpp_dtb_queue_dump_table_addr_set:slot %d queue %d phyAddr 0x%llx\n", + DEV_PCIE_SLOT(dev), queueId, phyAddr); + ZXIC_COMM_TRACE_NOTICE( + "dpp_dtb_queue_dump_table_addr_set:slot %d queue %d virAddr 0x%llx\n", + DEV_PCIE_SLOT(dev), queueId, virAddr); + + rc = dpp_dtb_dump_channel_addr_set(dev, queueId, phyAddr, virAddr, 0); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_dump_channel_addr_set"); + + return rc; +} + +/***********************************************************/ +/** 大批量dump一个流表使用的地址空间配置 +* @param devId NP设备号 +* @param queueId DTB队列编号 +* @param sdtNo 流表std号 +* @param phyAddr 物理地址 +* @param virAddr 虚拟地址 +* @param size (最大64MB) +* @return +* @remark 无 +* @see +* @author cbb @date 2023/07/03 +************************************************************/ +ZXIC_UINT32 dpp_dtb_dump_sdt_addr_set(DPP_DEV_T *dev, ZXIC_UINT32 queueId, + ZXIC_UINT32 sdtNo, ZXIC_UINT64 phyAddr, + ZXIC_UINT64 virAddr, ZXIC_UINT32 size) +{ + ZXIC_UINT32 rc = DPP_OK; + + DPP_DTB_DUMP_ADDR_INFO_T dtb_dump_addr_info = { 0 }; + ZXIC_RB_CFG *p_dtb_dump_addr_rb = NULL; + + ZXIC_COMM_CHECK_POINT_NO_ASSERT(dev); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), queueId, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + + ZXIC_COMM_TRACE_INFO("dpp_dtb_dump_sdt_addr_set: slotId :0x%x\n", + DEV_PCIE_SLOT(dev)); + ZXIC_COMM_TRACE_INFO("dpp_dtb_dump_sdt_addr_set: queueId :0x%x\n", + queueId); + ZXIC_COMM_TRACE_INFO("dpp_dtb_dump_sdt_addr_set: sdtNo :0x%x\n", sdtNo); + ZXIC_COMM_TRACE_INFO("dpp_dtb_dump_sdt_addr_set: phyAddr :0x%016llx\n", + phyAddr); + ZXIC_COMM_TRACE_INFO("dpp_dtb_dump_sdt_addr_set: virAddr :0x%016llx\n", + virAddr); + ZXIC_COMM_TRACE_INFO("dpp_dtb_dump_sdt_addr_set: size :0x%x\n", size); + + dtb_dump_addr_info.sdt_no = sdtNo; + dtb_dump_addr_info.phyAddr = phyAddr; + dtb_dump_addr_info.virAddr = virAddr; + dtb_dump_addr_info.size = size; + + p_dtb_dump_addr_rb = dpp_dtb_dump_addr_rb_get(dev, queueId); + ZXIC_COMM_CHECK_POINT_NO_ASSERT(p_dtb_dump_addr_rb); + ZXIC_COMM_CHECK_INDEX_UPPER_NO_ASSERT( + p_dtb_dump_addr_rb->key_size, + (ZXIC_UINT32)sizeof(DPP_DTB_DUMP_ADDR_INFO_T)); + + rc = dpp_apt_sw_list_insert(p_dtb_dump_addr_rb, &dtb_dump_addr_info, + sizeof(DPP_DTB_DUMP_ADDR_INFO_T)); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_apt_sw_list_insert"); + + return rc; +} + +/***********************************************************/ +/** 清除大批量dump一个流表使用的地址空间配置 +* @param devId NP设备号 +* @param queueId DTB队列编号 +* @param sdtNo 流表std号 +* @return +* @remark 无 +* @see +* @author cbb @date 2023/07/03 +************************************************************/ +ZXIC_UINT32 dpp_dtb_dump_sdt_addr_clear(DPP_DEV_T *dev, ZXIC_UINT32 queueId, + ZXIC_UINT32 sdtNo) +{ + ZXIC_UINT32 rc = DPP_OK; + + DPP_DTB_DUMP_ADDR_INFO_T dtb_dump_addr_info = { 0 }; + ZXIC_RB_CFG *p_dtb_dump_addr_rb = NULL; + + ZXIC_COMM_CHECK_POINT_NO_ASSERT(dev); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), queueId, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + + dtb_dump_addr_info.sdt_no = sdtNo; + + p_dtb_dump_addr_rb = dpp_dtb_dump_addr_rb_get(dev, queueId); + rc = dpp_apt_sw_list_delete(p_dtb_dump_addr_rb, &dtb_dump_addr_info, + sizeof(DPP_DTB_DUMP_ADDR_INFO_T)); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_apt_sw_list_delete"); + + return rc; +} + +/***********************************************************/ +/** 释放当前sdt下的所有流表(硬件方式) +* (适用于进程启动后,仅配置流表资源,软件未配置流表,但需要删除硬件上已配置流表的场景) +* @param dev +* @param queue_id 队列号 +* @param sdt_no sdt号 +* @return +* @remark 无 +* @see +* @author cq @date 2023/12/04 +************************************************************/ +DPP_STATUS dpp_dtb_hash_offline_delete(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no) + +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 entryNum = 0; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 max_item_num = DTB_DUMP_MULTICAST_MAC_DUMP_NUM; + ZXIC_UINT8 *pDumpData = NULL; + ZXIC_UINT8 *pKey = NULL; + ZXIC_UINT8 *pRst = NULL; + ZXIC_UINT32 element_id = 0; + + DPP_DTB_HASH_ENTRY_INFO_T *p_dtb_hash_entry = NULL; + DPP_DTB_HASH_ENTRY_INFO_T *p_temp_entry = NULL; + DPP_SDTTBL_HASH_T sdt_hash_info = { 0 }; /*SDT内容*/ + + ZXIC_COMM_CHECK_POINT_NO_ASSERT(dev); + ZXIC_COMM_CHECK_INDEX_NO_ASSERT(DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + + //从sdt_no中获取SDT配置 + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_hash_info); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_soft_sdt_tbl_get"); + + rc = dpp_hash_max_item_num_get(dev, sdt_no, &max_item_num); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_hash_max_item_num_get"); + + pDumpData = (ZXIC_UINT8 *)ZXIC_COMM_VMALLOC( + max_item_num * sizeof(DPP_DTB_HASH_ENTRY_INFO_T)); + ZXIC_COMM_CHECK_POINT_NO_ASSERT(pDumpData); + pKey = (ZXIC_UINT8 *)ZXIC_COMM_VMALLOC(max_item_num * HASH_KEY_MAX); + ZXIC_COMM_CHECK_POINT_MEMORY_VFREE_NO_ASSERT(pKey, pDumpData); + pRst = (ZXIC_UINT8 *)ZXIC_COMM_VMALLOC(max_item_num * HASH_RST_MAX); + ZXIC_COMM_CHECK_POINT_MEMORY_VFREE2PTR_NO_ASSERT(pRst, pKey, pDumpData); + + ZXIC_COMM_MEMSET_S(pDumpData, + max_item_num * sizeof(DPP_DTB_HASH_ENTRY_INFO_T), + 0x0, + max_item_num * sizeof(DPP_DTB_HASH_ENTRY_INFO_T)); + ZXIC_COMM_MEMSET_S(pKey, max_item_num * HASH_KEY_MAX, 0x0, + max_item_num * HASH_KEY_MAX); + ZXIC_COMM_MEMSET_S(pRst, max_item_num * HASH_RST_MAX, 0x0, + max_item_num * HASH_RST_MAX); + + p_dtb_hash_entry = (DPP_DTB_HASH_ENTRY_INFO_T *)pDumpData; + for (index = 0; index < max_item_num; index++) { + p_temp_entry = p_dtb_hash_entry + index; + p_temp_entry->p_actu_key = pKey + index * HASH_KEY_MAX; + p_temp_entry->p_rst = pRst + index * HASH_RST_MAX; + } + + rc = dpp_dtb_hash_dump(dev, queue_id, sdt_no, pDumpData, &entryNum); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE3PTR_NO_ASSERT(rc, "dpp_dtb_hash_dump", + pRst, pKey, pDumpData); + + ZXIC_COMM_TRACE_INFO("dpp_dtb_hash_dump valid entry num is %u\n", + entryNum); + + for (index = 0; index < entryNum; index++) { + p_temp_entry = p_dtb_hash_entry + index; + // //打印数据 + dpp_dtb_data_print( + p_temp_entry->p_actu_key, + DPP_GET_ACTU_KEY_BY_SIZE(sdt_hash_info.key_size) + 1); + dpp_dtb_data_print(p_temp_entry->p_rst, + 4 * (0x1 << sdt_hash_info.rsp_mode)); + } + + rc = dpp_dtb_hash_dma_delete_hardware(dev, queue_id, sdt_no, entryNum, + p_dtb_hash_entry, &element_id); + ZXIC_COMM_VFREE(p_dtb_hash_entry); + ZXIC_COMM_VFREE(pKey); + ZXIC_COMM_VFREE(pRst); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_hash_dma_delete_hardware"); + + return DPP_OK; +} + +/***********************************************************/ +/** 在线释放当前sdt下的所有流表表项 +* (适用于进程正常退出前删除表项,软件上有存储表项) +* @param dev_id 设备号 +* @param queue_id 队列号 +* @param sdt_no sdt号 +* @return +* @remark 无 +* @see +* @author cq @date 2023/12/04 +************************************************************/ +DPP_STATUS dpp_dtb_hash_online_delete(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no) +{ + ZXIC_UINT32 rc = 0; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT8 key_valid = 0; + ZXIC_UINT32 table_id = 0; + ZXIC_UINT32 key_type = 0; + ZXIC_UINT32 element_id = 0; + + D_NODE *p_node = NULL; + ZXIC_RB_TN *p_rb_tn = NULL; + D_HEAD *p_head_hash_rb = NULL; + DPP_HASH_CFG *p_hash_cfg = NULL; + DPP_HASH_RBKEY_INFO *p_rbkey = NULL; + HASH_ENTRY_CFG hash_entry_cfg = { 0 }; + DPP_DTB_HASH_ENTRY_INFO_T hashEntry = { 0 }; + + ZXIC_COMM_CHECK_POINT_NO_ASSERT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, queue_id, 0, DTB_QUEUE_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + + //从sdt_no中获取hash配置 + rc = dpp_hash_get_hash_info_from_sdt(dev, sdt_no, &hash_entry_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, + "dpp_hash_get_hash_info_from_sdt"); + + p_hash_cfg = hash_entry_cfg.p_hash_cfg; + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_hash_cfg); + + p_head_hash_rb = &p_hash_cfg->hash_rb.tn_list; + p_node = p_head_hash_rb->p_next; + while (p_node) { + p_rb_tn = (ZXIC_RB_TN *)p_node->data; + p_rbkey = (DPP_HASH_RBKEY_INFO *)p_rb_tn->p_key; + hashEntry.p_actu_key = p_rbkey->key + 1; + hashEntry.p_rst = p_rbkey->key; + + key_valid = DPP_GET_HASH_KEY_VALID(p_rbkey->key); + table_id = DPP_GET_HASH_TBL_ID(p_rbkey->key); + key_type = DPP_GET_HASH_KEY_TYPE(p_rbkey->key); + if ((!key_valid) || (table_id != hash_entry_cfg.table_id) || + (key_type != hash_entry_cfg.key_type)) { + p_node = p_node->next; + continue; + } + p_node = p_node->next; + + rc = dpp_dtb_hash_dma_delete(dev, queue_id, sdt_no, 1, + &hashEntry, &element_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, + "dpp_dtb_hash_dma_delete"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** acl index资源申请 +* @param dev NP设备 +* @param sdt_no 流表sdt号(0~255) +* @param vport 端口号 +* @param p_index 申请到的索引值,acl下表时使用 +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/14 +************************************************************/ +DPP_STATUS dpp_dtb_acl_index_request(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 vport, ZXIC_UINT32 *p_index) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 eram_sdt_no = 0; + ZXIC_MUTEX_T *p_dtb_mutex = NULL; + DPP_DEV_MUTEX_TYPE_E mutex = 0; + DPP_SDTTBL_ETCAM_T sdt_acl = { 0 }; /*SDT内容*/ + DPP_SDTTBL_ERAM_T sdt_eram = { 0 }; /*SDT内容*/ + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_index); + + //从sdt_no中获取SDT配置 + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_acl); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_soft_sdt_tbl_get"); + if (sdt_acl.table_type != DPP_SDT_TBLT_eTCAM) { + ZXIC_COMM_TRACE_ERROR( + "SDT[%d] table_type[ %d ] is not etcam table!\n", + sdt_no, sdt_acl.table_type); + return DPP_ERR; + } + + eram_sdt_no = dpp_apt_get_sdt_partner(dev, sdt_no); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, eram_sdt_no, 0, + DPP_DEV_SDT_ID_MAX - 1); + //从sdt_no中获取SDT配置 + rc = dpp_soft_sdt_tbl_get(dev, eram_sdt_no, &sdt_eram); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_soft_sdt_tbl_get"); + if (sdt_eram.table_type != DPP_SDT_TBLT_eRAM) { + ZXIC_COMM_TRACE_ERROR( + "SDT[%d] table_type[ %d ] is not eram table!\n", + eram_sdt_no, sdt_eram.table_type); + return DPP_ERR; + } + + //使用代理通道要加锁 + mutex = DPP_DEV_MUTEX_T_DTB; + rc = dpp_dev_opr_mutex_get(dev, (ZXIC_UINT32)mutex, &p_dtb_mutex); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dev_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_dtb_mutex); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "zxic_comm_mutex_lock"); + + rc = dpp_agent_channel_acl_index_request(dev, sdt_no, vport, &index); + if (rc == DPP_ACL_RC_INDEX_RES_FULL) { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "acl index is full. \n"); + } + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, "dpp_agent_channel_acl_index_request", + p_dtb_mutex); + + rc = zxic_comm_mutex_unlock(p_dtb_mutex); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "zxic_comm_mutex_unlock"); + + *p_index = index; + + ZXIC_COMM_PRINT( + "dpp_dtb_acl_index_request:slot %d vport 0x%x index %d\n", + DEV_PCIE_SLOT(dev), vport, index); + + return rc; +} + +/***********************************************************/ +/** acl index资源释放 +* @param dev NP设备 +* @param sdt_no 流表sdt号(0~255) +* @param vport 端口号 +* @param index 需要释放的索引值 +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/14 +************************************************************/ +DPP_STATUS dpp_dtb_acl_index_release(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 vport, ZXIC_UINT32 index) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 eram_sdt_no = 0; + ZXIC_MUTEX_T *p_dtb_mutex = NULL; + DPP_DEV_MUTEX_TYPE_E mutex = 0; + DPP_SDTTBL_ETCAM_T sdt_acl = { 0 }; /*SDT内容*/ + DPP_SDTTBL_ERAM_T sdt_eram = { 0 }; /*SDT内容*/ + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + + //从sdt_no中获取SDT配置 + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_acl); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_soft_sdt_tbl_get"); + if (sdt_acl.table_type != DPP_SDT_TBLT_eTCAM) { + ZXIC_COMM_TRACE_ERROR( + "SDT[%d] table_type[ %d ] is not etcam table!\n", + sdt_no, sdt_acl.table_type); + return DPP_ERR; + } + + eram_sdt_no = dpp_apt_get_sdt_partner(dev, sdt_no); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, eram_sdt_no, 0, + DPP_DEV_SDT_ID_MAX - 1); + //从sdt_no中获取SDT配置 + rc = dpp_soft_sdt_tbl_get(dev, eram_sdt_no, &sdt_eram); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_soft_sdt_tbl_get"); + if (sdt_eram.table_type != DPP_SDT_TBLT_eRAM) { + ZXIC_COMM_TRACE_ERROR( + "SDT[%d] table_type[ %d ] is not eram table!\n", + eram_sdt_no, sdt_eram.table_type); + return DPP_ERR; + } + + ZXIC_COMM_CHECK_DEV_INDEX_UPPER(dev_id, index, + sdt_eram.eram_table_depth); + + //使用代理通道要加锁 + mutex = DPP_DEV_MUTEX_T_DTB; + rc = dpp_dev_opr_mutex_get(dev, (ZXIC_UINT32)mutex, &p_dtb_mutex); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dev_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_dtb_mutex); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "zxic_comm_mutex_lock"); + + rc = dpp_agent_channel_acl_index_release(dev, ACL_INDEX_RELEASE, sdt_no, + vport, index); + if (rc == DPP_ACL_RC_SRH_FAIL) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, "slot %d vport 0x%x index %d is not exist. \n", + DEV_PCIE_SLOT(dev), vport, index); + } + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + dev_id, rc, "dpp_agent_channel_acl_index_release", p_dtb_mutex); + + rc = zxic_comm_mutex_unlock(p_dtb_mutex); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "zxic_comm_mutex_unlock"); + + ZXIC_COMM_PRINT( + "dpp_dtb_acl_index_release:slot %d vport 0x%x index %d\n", + DEV_PCIE_SLOT(dev), vport, index); + + return rc; +} + +/***********************************************************/ +/** 离线删除与vport关联的acl表项和索引值 +* @param dev_id NP设备号 +* @param queue_id dtb通道队列号(0~127) +* @param sdt_no 流表sdt号(0~255) +* @param vport 端口号 +* @param counter_id 统计编号,对应微码中的address +* @param rd_mode 统计读取方式 0:64bit 1:128bit +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/14 +************************************************************/ +DPP_STATUS dpp_dtb_acl_offline_delete(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 vport, + ZXIC_UINT32 counter_id, + ZXIC_UINT32 rd_mode) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 index_num = 0; + ZXIC_UINT32 eram_sdt_no = 0; + ZXIC_UINT32 *p_index_array = NULL; + + DPP_SDTTBL_ETCAM_T sdt_acl = { 0 }; /*SDT内容*/ + DPP_SDTTBL_ERAM_T sdt_eram = { 0 }; /*SDT内容*/ + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, queue_id, 0, + DPP_DTB_QUEUE_NUM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + + //从sdt_no中获取SDT配置 + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_acl); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_soft_sdt_tbl_get"); + if (sdt_acl.table_type != DPP_SDT_TBLT_eTCAM) { + ZXIC_COMM_TRACE_ERROR( + "SDT[%d] table_type[ %d ] is not etcam table!\n", + sdt_no, sdt_acl.table_type); + return DPP_ERR; + } + + eram_sdt_no = dpp_apt_get_sdt_partner(dev, sdt_no); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, eram_sdt_no, 0, + DPP_DEV_SDT_ID_MAX - 1); + rc = dpp_soft_sdt_tbl_get(dev, eram_sdt_no, &sdt_eram); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_soft_sdt_tbl_get"); + if (sdt_eram.table_type != DPP_SDT_TBLT_eRAM) { + ZXIC_COMM_TRACE_ERROR( + "SDT[%d] table_type[ %d ] is not eram table!\n", + eram_sdt_no, sdt_eram.table_type); + return DPP_ERR; + } + + p_index_array = (ZXIC_UINT32 *)ZXIC_COMM_MALLOC( + sizeof(ZXIC_UINT32) * (sdt_eram.eram_table_depth)); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_index_array); + + /*解析有效的valid数据*/ + rc = dpp_dtb_acl_index_parse(dev, queue_id, eram_sdt_no, vport, + &index_num, p_index_array); + ZXIC_COMM_CHECK_DEV_RC_MEMORY_FREE( + dev_id, rc, "dpp_dtb_acl_index_parse", p_index_array); + + if (!index_num) { + ZXIC_COMM_TRACE_INFO("SDT[%d] vport[0x%x] item num is zero!\n", + sdt_no, vport); + ZXIC_COMM_FREE(p_index_array); + return DPP_OK; + } + + /*delete acl数据*/ + rc = dpp_dtb_acl_data_clear(dev, queue_id, sdt_no, index_num, + p_index_array); + ZXIC_COMM_CHECK_DEV_RC_MEMORY_FREE(dev_id, rc, "dpp_dtb_acl_data_clear", + p_index_array); + + /*delete eram数据*/ + rc = dpp_dtb_eram_data_clear(dev, queue_id, eram_sdt_no, index_num, + p_index_array); + ZXIC_COMM_CHECK_DEV_RC_MEMORY_FREE( + dev_id, rc, "dpp_dtb_eram_data_clear", p_index_array); + + /*删除统计数据*/ + rc = dpp_dtb_eram_stat_data_clear(dev, queue_id, counter_id, rd_mode, + index_num, p_index_array); + ZXIC_COMM_CHECK_DEV_RC_MEMORY_FREE( + dev_id, rc, "dpp_dtb_eram_stat_data_clear", p_index_array); + + ZXIC_COMM_FREE(p_index_array); + + /*发送消息给固件清除index标志位*/ + rc = dpp_dtb_acl_index_release_by_vport(dev, sdt_no, vport); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, + "dpp_dtb_acl_index_release_by_vport"); + + ZXIC_COMM_PRINT( + "dpp_dtb_acl_offline_delete:slot %u vport 0x%x acl_num %u\n", + DEV_PCIE_SLOT(dev), vport, index_num); + + return rc; +} + +/***********************************************************/ +/** 统计计数读清 +* @param dev NP设备 +* @param queue_id 队列号 +* @param rd_mode 读取位宽模式,参见STAT_CNT_MODE_E,0-64bit,1-128bit +* @param start_count_id 统计起始编号,对应微码中的address +* @param num 统计项个数 +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/14 +************************************************************/ +DPP_STATUS dpp_dtb_stat_ppu_cnt_clr(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + STAT_CNT_MODE_E rd_mode, + ZXIC_UINT32 start_count_id, ZXIC_UINT32 num) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 *p_index_array = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX_LOWER(num, 1); + + p_index_array = + (ZXIC_UINT32 *)ZXIC_COMM_MALLOC(sizeof(ZXIC_UINT32) * num); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_index_array); + + for (i = 0; i < num; i++) { + p_index_array[i] = i; + } + + /*删除统计数据*/ + rc = dpp_dtb_eram_stat_data_clear(dev, queue_id, start_count_id, + rd_mode, num, p_index_array); + ZXIC_COMM_CHECK_DEV_RC_MEMORY_FREE( + dev_id, rc, "dpp_dtb_eram_stat_data_clear", p_index_array); + + ZXIC_COMM_FREE(p_index_array); + + return rc; +} + +/***********************************************************/ +/** 清除指定vport的对应的stat统计(dtb方式) +* @param dev NP设备 +* @param queue_id 队列号 +* @param sdt_no 流表sdt号(0~255) +* @param vport 端口号 +* @param rd_mode 读取位宽模式,参见STAT_CNT_MODE_E,0-64bit,1-128bit +* @param start_counter_id 统计起始编号,对应微码中的address +* @return +* @remark 无 +* @see +* @author cq @date 2024/09/14 +************************************************************/ +DPP_STATUS dpp_dtb_acl_stat_clr_by_vport(DPP_DEV_T *dev, ZXIC_UINT32 queue_id, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 vport, + STAT_CNT_MODE_E rd_mode, + ZXIC_UINT32 start_counter_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 eram_sdt_no = 0; + ZXIC_UINT32 index_num = 0; + ZXIC_UINT32 *p_index_array = NULL; + + DPP_SDTTBL_ETCAM_T sdt_etcam_info = { 0 }; /*SDT内容*/ + DPP_SDTTBL_ERAM_T sdt_eram = { 0 }; /*SDT内容*/ + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_etcam_info); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_soft_sdt_tbl_get"); + if (sdt_etcam_info.table_type != DPP_SDT_TBLT_eTCAM) { + ZXIC_COMM_TRACE_ERROR( + "SDT[%d] table_type[ %d ] is not etcam table!\n", + sdt_no, sdt_etcam_info.table_type); + return DPP_ERR; + } + + eram_sdt_no = dpp_apt_get_sdt_partner(dev, sdt_no); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, eram_sdt_no, 0, + DPP_DEV_SDT_ID_MAX - 1); + rc = dpp_soft_sdt_tbl_get(dev, eram_sdt_no, &sdt_eram); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_soft_sdt_tbl_get"); + if (sdt_eram.table_type != DPP_SDT_TBLT_eRAM) { + ZXIC_COMM_TRACE_ERROR( + "SDT[%d] table_type[ %d ] is not eram table!\n", + eram_sdt_no, sdt_eram.table_type); + return DPP_ERR; + } + + p_index_array = (ZXIC_UINT32 *)ZXIC_COMM_MALLOC( + sizeof(ZXIC_UINT32) * (sdt_eram.eram_table_depth)); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_index_array); + + /*解析有效的valid数据*/ + rc = dpp_dtb_acl_index_parse(dev, queue_id, eram_sdt_no, vport, + &index_num, p_index_array); + ZXIC_COMM_CHECK_DEV_RC_MEMORY_FREE( + dev_id, rc, "dpp_dtb_acl_index_parse", p_index_array); + + if (!index_num) { + ZXIC_COMM_TRACE_INFO("SDT[%d] vport[0x%x] item num is zero!\n", + sdt_no, vport); + ZXIC_COMM_FREE(p_index_array); + return DPP_OK; + } + + /*删除统计数据*/ + rc = dpp_dtb_eram_stat_data_clear(dev, queue_id, start_counter_id, + rd_mode, index_num, p_index_array); + ZXIC_COMM_CHECK_DEV_RC_MEMORY_FREE( + dev_id, rc, "dpp_dtb_eram_stat_data_clear", p_index_array); + + ZXIC_COMM_FREE(p_index_array); + + ZXIC_COMM_PRINT( + " dpp_dtb_acl_stat_clr_by_vport sdt_no[%u] start_index[0x%x] vport[0x%x]\n", + sdt_no, start_counter_id, vport); + + return DPP_OK; +} + +/***********************************************************/ +/** 消息通道获取pcie bar消息数目 +* @param dev NP设备 +* @param p_bar_msg_num 出参,获取pcie bar数目 +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/16 +************************************************************/ +DPP_STATUS dpp_pcie_bar_msg_num_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_bar_msg_num) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_MUTEX_T *p_dtb_mutex = NULL; + DPP_DEV_MUTEX_TYPE_E mutex = 0; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_bar_msg_num); + + mutex = DPP_DEV_MUTEX_T_DTB; + rc = dpp_dev_opr_mutex_get(dev, (ZXIC_UINT32)mutex, &p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_dev_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "zxic_comm_mutex_lock"); + + rc = dpp_agent_channel_pcie_bar_request(dev, p_bar_msg_num); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + dev_id, rc, "dpp_agent_channel_pcie_bar_request", p_dtb_mutex); + + rc = zxic_comm_mutex_unlock(p_dtb_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "zxic_comm_mutex_unlock"); + + return DPP_OK; +} diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/dpp_hash.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/dpp_hash.c new file mode 100644 index 000000000000..005b132e8d2f --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/dpp_hash.c @@ -0,0 +1,3716 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_hash.c +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : wcl +* 完成日期 : 2014/02/08 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#include "zxic_common.h" + +#include "dpp_dev.h" +#include "dpp_module.h" +#include "dpp_se_cfg.h" +#include "dpp_hash_crc.h" +#include "dpp_hash.h" +#include "dpp_se.h" +#include "dpp_se_reg.h" +#include "dpp_reg_api.h" +#include "dpp_reg_info.h" +#include "dpp_se4k_reg.h" +#include "dpp_sdt.h" + +#define HASH_CMP_ZCELL (1) +#define HASH_CMP_ZBLK (2) + +#define HASH_ENTRY_STAT + +/* 全局变量定义区 */ +static ZXIC_UINT32 g_ddr_hash_arg[HASH_DDR_CRC_NUM] = { 0x04C11DB7, 0xF4ACFB13, + 0x20044009, + 0x00210801 }; + +static DPP_HASH_TBL_ID_INFO g_tbl_id_info[DPP_PCIE_SLOT_MAX][HASH_FUNC_ID_NUM] + [HASH_TBL_ID_NUM] = { { { { 0 } } } }; + +/** 4个hash引擎,每个最多使用32个zblock */ +static ZXIC_UINT32 g_hash_zblk_idx[DPP_PCIE_SLOT_MAX][HASH_FUNC_ID_NUM] + [HASH_TBL_ID_NUM] = { { { 0 } } }; + +static DPP_HASH_SOFT_RESET_STOR_DAT g_hash_store_dat[DPP_PCIE_SLOT_MAX] = { + { { 0 } } +}; + +#if DPP_WRITE_FILE_EN +static DPP_HASH_FILE_REG_T g_hash_file_reg = { { 0x55550000, 0xffffaaaa }, + { 1, 1, 1, 1 }, + { { 0x12121212, 0x12121212 }, + { 0x12121212, 0x12121212 }, + { 0x12121212, 0x12121212 }, + { 0x12121212, 0x12121212 } }, + 0, + 0xffff, + 0, + { 0 }, + { 0 } }; +#endif +/* 宏函数定义区 */ +#define GET_DDR_HASH_ARG(ddr_crc_sel) (g_ddr_hash_arg[ddr_crc_sel]) +#define GET_HASH_TBL_ID_INFO(slot_id, fun_id, tbl_id) \ + (&g_tbl_id_info[slot_id][fun_id][tbl_id]) +#define GET_ACTU_KEY_SIZE_BY_TBLID(slot_id, fun_id, tbl_id) \ + (g_tbl_id_info[slot_id][fun_id][tbl_id].actu_key_size) + +#define HASH_TBL_ID_INFO_CHECK(slot_id, fun_id, tbl_id, key_type) \ + do { \ + if (!g_tbl_id_info[slot_id][fun_id][tbl_id].is_init || \ + (g_tbl_id_info[slot_id][fun_id][tbl_id].key_type != \ + key_type)) { \ + ZXIC_COMM_PRINT( \ + "init [%d], config ketype[%d] parameter key_type[%d].\n", \ + g_tbl_id_info[slot_id][fun_id][tbl_id].is_init, \ + g_tbl_id_info[slot_id][fun_id][tbl_id] \ + .key_type, \ + key_type); \ + ZXIC_COMM_ASSERT(0); \ + return DPP_HASH_RC_INVALID_TBL_ID_INFO; \ + } \ + } while (0) + +#define GET_HASH_DDR_HW_ADDR(base_addr, item_idx) ((base_addr) + (item_idx)) + +/* 9:zcell depth */ +#define GET_HASH_ZCAM_HW_ADDR(base_addr, zblk_idx, item_idx) \ + ((base_addr) + ((zblk_idx) << 9) + (item_idx)) + +#define DPP_GET_HASH_FILE_REG() (&g_hash_file_reg) + +/* 局部函数声明区 */ + +/* 函数实现区 */ + +#if ZXIC_REAL("inter func.") +/***********************************************************/ +/** zblock资源链表排序比较函数 +* @param data1 待比较的数据1 +* @param data2 待比较的数据2 +* @param data 排序比较的类型,1-zcell比较,2-zblock比较 +* +* @return 1-data1比data2的索引大 + 0-data1和data2索引相同 + -1-data1比data2的索引小 +* @remark 无 +* @see +* @author wcl @date 2014/11/10 +************************************************************/ +ZXIC_SINT32 dpp_hash_list_cmp(D_NODE *data1, D_NODE *data2, void *data) +{ + ZXIC_UINT32 flag = 0; + ZXIC_UINT32 data_new = 0; + ZXIC_UINT32 data_pre = 0; + + ZXIC_COMM_CHECK_POINT(data1); + ZXIC_COMM_CHECK_POINT(data2); + ZXIC_COMM_CHECK_POINT(data); + + flag = *(ZXIC_UINT32 *)data; + + if (flag == HASH_CMP_ZCELL) { + data_new = ((SE_ZCELL_CFG *)data1->data)->zcell_idx; + data_pre = ((SE_ZCELL_CFG *)data2->data)->zcell_idx; + } else if (flag == HASH_CMP_ZBLK) { + data_new = ((SE_ZBLK_CFG *)data1->data)->zblk_idx; + data_pre = ((SE_ZBLK_CFG *)data2->data)->zblk_idx; + } + + if (data_new > data_pre) { + return 1; + } else if (data_new == data_pre) { + return 0; + } else { + return -1; + } +} + +/***********************************************************/ +/** hash键值红黑树插入比较函数 +* @param p_new +* @param p_old +* @param key_size +* +* @return +* @remark 无 +* @see +* @author wcl @date 2014/11/10 +************************************************************/ +ZXIC_SINT32 dpp_hash_rb_key_cmp(ZXIC_VOID *p_new, ZXIC_VOID *p_old, + ZXIC_UINT32 key_size) +{ + DPP_HASH_RBKEY_INFO *p_rbkey_new = NULL; + DPP_HASH_RBKEY_INFO *p_rbkey_old = NULL; + + ZXIC_COMM_CHECK_POINT(p_new); + ZXIC_COMM_CHECK_POINT(p_old); + p_rbkey_new = (DPP_HASH_RBKEY_INFO *)(p_new); + p_rbkey_old = (DPP_HASH_RBKEY_INFO *)(p_old); + + return ZXIC_COMM_MEMCMP(p_rbkey_new->key, p_rbkey_old->key, + HASH_KEY_MAX); +} + +/***********************************************************/ +/** hash键值红黑树插入比较函数 +* @param p_new +* @param p_old +* @param key_size +* +* @return +* @remark 无 +* @see +* @author tf @date 2016/03/26 +************************************************************/ +ZXIC_SINT32 dpp_hash_ddr_cfg_rb_key_cmp(ZXIC_VOID *p_new, ZXIC_VOID *p_old, + ZXIC_UINT32 key_size) +{ + HASH_DDR_CFG *p_rbkey_new = NULL; + HASH_DDR_CFG *p_rbkey_old = NULL; + + ZXIC_COMM_CHECK_POINT(p_new); + ZXIC_COMM_CHECK_POINT(p_old); + p_rbkey_new = (HASH_DDR_CFG *)(p_new); + p_rbkey_old = (HASH_DDR_CFG *)(p_old); + + return ZXIC_COMM_MEMCMP(&p_rbkey_new->ddr_baddr, + &p_rbkey_old->ddr_baddr, sizeof(ZXIC_UINT32)); +} + +/***********************************************************/ +/** 获取位宽 +* @param ddr_item_num +* +* @return +* @remark 无 +* @see +* @author wcl @date 2014/11/10 +************************************************************/ +ZXIC_UINT32 dpp_hash_ddr_depth_conv(ZXIC_UINT32 ddr_item_num) +{ + ZXIC_UINT32 count = 0; + + while (ddr_item_num > ((ZXIC_UINT32)1 << count)) { + count++; + } + + return count; +} + +/***********************************************************/ +/** 片内资源初始化 +* @param p_hash_cfg +* @param zblk_num +* @param zblk_idx_array +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wcl @date 2015/10/13 +************************************************************/ +DPP_STATUS dpp_hash_zcam_resource_init(DPP_HASH_CFG *p_hash_cfg, + ZXIC_UINT32 zblk_num, + ZXIC_UINT32 *zblk_idx_array) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + ZXIC_UINT32 cmp_type = 0; + ZXIC_UINT32 zblk_idx = 0; + ZXIC_UINT32 zcell_idx = 0; + ZXIC_UINT32 dev_id = 0; + + D_HEAD *p_zblk_list = NULL; + D_HEAD *p_zcell_free = NULL; + SE_ZBLK_CFG *p_zblk_cfg = NULL; + SE_ZCELL_CFG *p_zcell_cfg = NULL; + + ZXIC_COMM_CHECK_POINT(p_hash_cfg); + ZXIC_COMM_CHECK_INDEX(zblk_num, 0, SE_ZBLK_NUM); + ZXIC_COMM_CHECK_POINT(zblk_idx_array); + + dev_id = p_hash_cfg->p_se_info->dev_id; + + /* init zblock list */ + p_zblk_list = &p_hash_cfg->hash_shareram.zblk_list; + rc = zxic_comm_double_link_init(SE_ZBLK_NUM, p_zblk_list); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "zxic_comm_double_link_init"); + + /* init zcell list */ + p_zcell_free = &p_hash_cfg->hash_shareram.zcell_free_list; + rc = zxic_comm_double_link_init(SE_ZBLK_NUM * SE_ZCELL_NUM, + p_zcell_free); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "zxic_comm_double_link_init"); + + for (i = 0; i < zblk_num; i++) { + zblk_idx = zblk_idx_array[i]; + /* debug start */ + //ZXIC_COMM_PRINT("zblk_idx is [%d]\n", zblk_idx); /* t */ + /* debug end */ + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, zblk_idx, 0, SE_ZBLK_NUM - 1); + p_zblk_cfg = + DPP_SE_GET_ZBLK_CFG(p_hash_cfg->p_se_info, zblk_idx); + + if (p_zblk_cfg->is_used) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "ErrorCode:[0x%x], ZBlock[%d] is already used by other function!\n", + DPP_HASH_RC_INVALID_ZBLCK, zblk_idx); + ZXIC_COMM_ASSERT(0); + return DPP_HASH_RC_INVALID_ZBLCK; + } + + for (j = 0; j < SE_ZCELL_NUM; j++) { + zcell_idx = p_zblk_cfg->zcell_info[j].zcell_idx; + p_zcell_cfg = DPP_SE_GET_ZCELL_CFG( + p_hash_cfg->p_se_info, zcell_idx); + + if (p_zcell_cfg->is_used) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "ErrorCode:[0x%x], ZBlk[%d], ZCell[%d] is already used by other function!\n", + DPP_HASH_RC_INVALID_ZCELL, zblk_idx, + zcell_idx); + ZXIC_COMM_ASSERT(0); + return DPP_HASH_RC_INVALID_ZCELL; + } + + p_zcell_cfg->is_used = 1; + + /* insert to free zcell free list */ + cmp_type = HASH_CMP_ZCELL; + rc = zxic_comm_double_link_insert_sort( + &p_zcell_cfg->zcell_dn, p_zcell_free, + dpp_hash_list_cmp, &cmp_type); + ZXIC_COMM_CHECK_DEV_RC( + dev_id, rc, + "zxic_comm_double_link_insert_sort"); + } + + /* insert to zblock list */ + p_zblk_cfg->is_used = 1; + cmp_type = HASH_CMP_ZBLK; + rc = zxic_comm_double_link_insert_sort(&p_zblk_cfg->zblk_dn, + p_zblk_list, + dpp_hash_list_cmp, + &cmp_type); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, + "zxic_comm_double_link_insert_last"); + + /* config zblock */ + rc = dpp_hash_zblkcfg_write(p_hash_cfg->p_se_info, + p_hash_cfg->fun_id, p_zblk_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_hash_zblkcfg_write"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** +* @param p_hash_cfg +* +* @return +* @remark 无 +* @see +* @author XCX @date 2018/03/22 +************************************************************/ +DPP_STATUS dpp_hash_zcam_resource_deinit(DPP_HASH_CFG *p_hash_cfg) +{ + ZXIC_UINT32 rc = 0; + ZXIC_UINT32 dev_id = 0; + + D_NODE *p_node = NULL; + D_HEAD *p_head = NULL; + SE_ZBLK_CFG *p_zblk_cfg = NULL; + SE_ZCELL_CFG *p_zcell_cfg = NULL; + + ZXIC_COMM_CHECK_POINT(p_hash_cfg); + + dev_id = p_hash_cfg->p_se_info->dev_id; + + /*delete zcell node*/ + p_head = &p_hash_cfg->hash_shareram.zcell_free_list; + + while (p_head->used) { + p_node = p_head->p_next; + + rc = zxic_comm_double_link_del(p_node, p_head); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "zxic_comm_double_link_del"); + p_zcell_cfg = (SE_ZCELL_CFG *)p_node->data; + p_zcell_cfg->is_used = 0; + } + + /*delete zblk node*/ + p_head = &p_hash_cfg->hash_shareram.zblk_list; + + while (p_head->used) { + p_node = p_head->p_next; + + rc = zxic_comm_double_link_del(p_node, p_head); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "zxic_comm_double_link_del"); + + ZXIC_COMM_CHECK_DEV_INDEX( + dev_id, ((SE_ZBLK_CFG *)p_node->data)->zblk_idx, 0, + HASH_ZBLK_ID_MAX); + p_zblk_cfg = DPP_SE_GET_ZBLK_CFG( + p_hash_cfg->p_se_info, + ((SE_ZBLK_CFG *)p_node->data)->zblk_idx); + p_zblk_cfg->is_used = 0; + + /*clear zblk config*/ + /* dpp_hash_zblkcfg_clr(p_hash_cfg->p_se_info, p_hash_cfg->fun_id, p_zblk_cfg); */ + } + + return DPP_OK; +} + +/***********************************************************/ +/** +* @param item_entry_max +* @param wrt_mask +* @param entry_size +* +* @return +* @remark 无 +* @see +* @author XCX @date 2018/03/22 +************************************************************/ +ZXIC_UINT32 dpp_hash_get_item_free_pos(ZXIC_UINT32 item_entry_max, + ZXIC_UINT32 wrt_mask, + ZXIC_UINT32 entry_size) +{ + ZXIC_UINT32 i = 0; + ZXIC_UINT32 pos = 0xFFFFFFFF; /* -1; */ + ZXIC_UINT32 mask = 0; + + for (i = 0; i < item_entry_max; i += entry_size / HASH_ENTRY_POS_STEP) { + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT( + i, entry_size / HASH_ENTRY_POS_STEP); + mask = DPP_GET_HASH_ENTRY_MASK(entry_size, i); + + if (0 == (mask & wrt_mask)) { + pos = i; + break; + } + } + + return pos; +} + +/***********************************************************/ +/** +* @param p_hash_cfg +* @param p_rbkey +* @param p_item +* @param item_idx +* @param item_type +* @param insrt_key_type +* +* @return +* @remark 无 +* @see +* @author XCX @date 2018/03/22 +************************************************************/ +DPP_STATUS dpp_hash_insrt_to_item(DPP_HASH_CFG *p_hash_cfg, + DPP_HASH_RBKEY_INFO *p_rbkey, + SE_ITEM_CFG *p_item, ZXIC_UINT32 item_idx, + ZXIC_UINT32 item_type, + ZXIC_UINT32 insrt_key_type) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 free_pos = 0; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 item_entry_max = ITEM_ENTRY_NUM_4; + + ZXIC_COMM_CHECK_POINT(p_hash_cfg); + ZXIC_COMM_CHECK_POINT(p_rbkey); + ZXIC_COMM_CHECK_POINT(p_item); + + dev_id = p_hash_cfg->p_se_info->dev_id; + + if (ITEM_DDR_256 == item_type) { + item_entry_max = ITEM_ENTRY_NUM_2; + } + + if (!p_item->valid) { /* item is not used */ + rc = zxic_comm_double_link_init(item_entry_max, + &p_item->item_list); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, + "zxic_comm_double_link_init"); + + p_rbkey->entry_pos = HASH_ITEM_POS_0; + p_item->wrt_mask = DPP_GET_HASH_ENTRY_MASK(p_rbkey->entry_size, + p_rbkey->entry_pos); + p_item->item_index = item_idx; + p_item->item_type = item_type; + p_item->valid = 1; + } else { + free_pos = dpp_hash_get_item_free_pos( + item_entry_max, p_item->wrt_mask, p_rbkey->entry_size); + + if (0xFFFFFFFF == free_pos) { + return DPP_HASH_RC_ITEM_FULL; + } else { + p_rbkey->entry_pos = free_pos; + p_item->wrt_mask |= DPP_GET_HASH_ENTRY_MASK( + p_rbkey->entry_size, p_rbkey->entry_pos); + } + } + + /* debug*/ + ZXIC_COMM_TRACE_DEV_DEBUG( + dev_id, "Entry in item pos is: [%d], entry size is: [%d].\n", + free_pos, p_rbkey->entry_size); + + rc = zxic_comm_double_link_insert_last(&p_rbkey->entry_dn, + &p_item->item_list); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "zxic_comm_double_link_insert_last"); + + p_rbkey->p_item_info = p_item; + + return DPP_OK; +} + +#endif + +#if ZXIC_REAL("External Func.") +/***********************************************************/ +/** 单个hash引擎初始化 +* @param p_se_cfg 算法模块公共管理数据结构指针 +* @param fun_id hash引擎号 +* @param zblk_num 分配给此hash引擎的zblock数目 +* @param zblk_idx 分配给此hash引擎的zblock编号 +* @param ddr_dis DDR关闭位,0-不关闭片外DDR, 1-关闭片外DDR +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wcl @date 2015/05/15 +************************************************************/ +DPP_STATUS dpp_hash_init(DPP_SE_CFG *p_se_cfg, ZXIC_UINT32 fun_id, + ZXIC_UINT32 zblk_num, ZXIC_UINT32 *zblk_idx, + ZXIC_UINT32 ddr_dis) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 i = 0; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 slot_id = 0; + + FUNC_ID_INFO *p_func_info = NULL; + DPP_HASH_CFG *p_hash_cfg = NULL; + + ZXIC_COMM_CHECK_POINT(p_se_cfg); + ZXIC_COMM_CHECK_POINT(zblk_idx); + ZXIC_COMM_CHECK_INDEX(fun_id, HASH_FUNC_ID_MIN, HASH_FUNC_ID_NUM - 1); + ZXIC_COMM_CHECK_INDEX(zblk_num, 0, SE_ZBLK_NUM); + ZXIC_COMM_CHECK_INDEX(ddr_dis, 0, 1); + + dev_id = p_se_cfg->dev_id; + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + + slot_id = p_se_cfg->dev.pcie_channel.slot; + ZXIC_COMM_CHECK_INDEX_UPPER(slot_id, DPP_PCIE_SLOT_MAX - 1); + + rc = dpp_se_fun_init(p_se_cfg, (fun_id & 0xff), FUN_HASH); + if (DPP_SE_RC_FUN_INVALID == rc) { + return DPP_OK; + } + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_se_fun_init"); + + p_func_info = DPP_GET_FUN_INFO(p_se_cfg, fun_id); + p_hash_cfg = (DPP_HASH_CFG *)p_func_info->fun_ptr; + p_hash_cfg->fun_id = fun_id; + p_hash_cfg->p_hash32_fun = dpp_crc32_calc; + p_hash_cfg->p_hash16_fun = dpp_crc16_calc; + p_hash_cfg->p_se_info = p_se_cfg; + + /* 判定hash引擎是片内模式还是混合模式*/ + if (ddr_dis == 1) { + /* disable ddr */ + p_hash_cfg->ddr_valid = 0; + +#ifdef DPP_FLOW_HW_INIT + rc = dpp_hash_ext_cfg_clr(p_se_cfg, fun_id); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_hash_ddrcfg_clr"); + + rc = dpp_hash_tbl_depth_clr(p_se_cfg, fun_id); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_hash_tbl_depth_clr"); +#endif + + } else { + p_hash_cfg->ddr_valid = 1; + } + + /* zcam初始化*/ + p_hash_cfg->hash_stat.zblock_num = zblk_num; + rc = dpp_hash_zcam_resource_init(p_hash_cfg, zblk_num, zblk_idx); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_hash_zcam_resource_init"); + + /* 将zblock_id记录在软件缓存中 */ + for (i = 0; i < zblk_num; i++) { + p_hash_cfg->hash_stat.zblock_array[i] = zblk_idx[i]; + } + + /* dynamic alloc rb_tree node by user. */ + + rc = (DPP_STATUS)zxic_comm_rb_init(&p_hash_cfg->hash_rb, 0, + sizeof(DPP_HASH_RBKEY_INFO), + dpp_hash_rb_key_cmp); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "zxic_comm_rb_init"); + + /* dynamic alloc rb_tree node by user. */ + rc = (DPP_STATUS)zxic_comm_rb_init(&p_hash_cfg->ddr_cfg_rb, 0, + sizeof(HASH_DDR_CFG), + dpp_hash_ddr_cfg_rb_key_cmp); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "zxic_comm_rb_init"); + + /** for 软复位 */ + /** 保存zblock内容 */ + for (i = 0; i < zblk_num; i++) { + g_hash_zblk_idx[slot_id][fun_id][i] = zblk_idx[i]; + } + + g_hash_store_dat[slot_id].ddr_dis_flag[fun_id] = ddr_dis; + g_hash_store_dat[slot_id].zblk_num[fun_id] = zblk_num; + g_hash_store_dat[slot_id].zblk_idx_start[fun_id] = + g_hash_zblk_idx[dev_id][fun_id]; + g_hash_store_dat[slot_id].hash_id_valid |= + (ZXIC_UINT32)((1U << fun_id) & 0xffffffff); + + return DPP_OK; +} + +/***********************************************************/ +/** 初始化单个hash引擎内的某个业务表,此接口支持为该业务表分配独占的zcell。 +* 必须先初始化hash引擎,再初始化业务表。 +* @param p_se_cfg 算法模块公共管理数据结构指针 +* @param fun_id hash引擎号 +* @param bulk_id 每个Hash引擎资源划分的空间ID号 +* @param p_ddr_resc_cfg 分配给hash引擎此资源空间的ddr资源属性 +* @param zcell_num 分配给hash引擎此资源空间的zcell数量 +* @param zreg_num 分配给hash引擎此资源空间的zreg数量 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wcl @date 2015/05/15 +************************************************************/ +DPP_STATUS dpp_hash_bulk_init(DPP_SE_CFG *p_se_cfg, ZXIC_UINT32 fun_id, + ZXIC_UINT32 bulk_id, + DPP_HASH_DDR_RESC_CFG_T *p_ddr_resc_cfg, + ZXIC_UINT32 zcell_num, ZXIC_UINT32 zreg_num) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + ZXIC_UINT32 zblk_idx = 0; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 ddr_item_num = 0; + + D_NODE *p_zblk_dn = NULL; + D_NODE *p_zcell_dn = NULL; + ZXIC_RB_TN *p_rb_tn_new = NULL; + ZXIC_RB_TN *p_rb_tn_rtn = NULL; + SE_ZBLK_CFG *p_zblk_cfg = NULL; + SE_ZREG_CFG *p_zreg_cfg = NULL; + HASH_DDR_CFG *p_ddr_cfg = NULL; + HASH_DDR_CFG *p_rbkey_new = NULL; + HASH_DDR_CFG *p_rbkey_rtn = NULL; + SE_ZCELL_CFG *p_zcell_cfg = NULL; + DPP_HASH_CFG *p_hash_cfg = NULL; + FUNC_ID_INFO *p_func_info = NULL; + SE_ITEM_CFG **p_item_array = NULL; + DPP_HASH_BULK_ZCAM_STAT *p_bulk_zcam_mono = NULL; + /* D_HEAD *p_zcell_free = NULL;*/ + /* D_HEAD *p_zblk_free = NULL;*/ + + ZXIC_COMM_CHECK_POINT(p_se_cfg); + ZXIC_COMM_CHECK_POINT(p_ddr_resc_cfg); + ZXIC_COMM_CHECK_INDEX(p_ddr_resc_cfg->ddr_crc_sel, 0, + HASH_DDR_CRC_NUM - 1); + ZXIC_COMM_CHECK_INDEX(fun_id, DPP_HASH_ID_MIN, DPP_HASH_ID_MAX); + ZXIC_COMM_CHECK_INDEX(bulk_id, HASH_BULK_ID_MIN, HASH_BULK_ID_MAX); + ZXIC_COMM_CHECK_INDEX(zcell_num, 0, SE_ZBLK_NUM * SE_ZCELL_NUM); + ZXIC_COMM_CHECK_INDEX(zreg_num, 0, SE_ZBLK_NUM * SE_ZREG_NUM); + + dev_id = p_se_cfg->dev_id; + + p_func_info = DPP_GET_FUN_INFO(p_se_cfg, fun_id); + p_hash_cfg = (DPP_HASH_CFG *)p_func_info->fun_ptr; + ZXIC_COMM_CHECK_POINT(p_hash_cfg); + + ZXIC_COMM_TRACE_INFO("p_hash_cfg->ddr_valid = %d!\n", + p_hash_cfg->ddr_valid); + if (NULL != p_hash_cfg->hash_stat.p_bulk_zcam_mono[bulk_id]) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "slot[%d] fun_id[%u] bulk_id[%u] is already init,do not init again!\n", + p_se_cfg->dev.pcie_channel.slot, fun_id, bulk_id); + return DPP_OK; + } + + if (1 == p_hash_cfg->ddr_valid) { + /* [a long time ago] hash存储深度不能超过26bit,不然会导致硬件逻辑crc index不一致,微码查表不中 */ + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, p_ddr_resc_cfg->ddr_item_num, + HASH_DDR_ITEM_MIN, HASH_DDR_ITEM_MAX); + + ddr_item_num = p_ddr_resc_cfg->ddr_item_num; + + if (DDR_WIDTH_512b == p_ddr_resc_cfg->ddr_width_mode) { + ddr_item_num = p_ddr_resc_cfg->ddr_item_num >> 1; + ZXIC_COMM_CHECK_DEV_INDEX( + dev_id, ddr_item_num, HASH_DDR_ITEM_MIN, + HASH_DDR_ITEM_MAX); /* 512bit存储时,hash存储深度不能少于14bit */ + } + + /** red&black key*/ + p_rbkey_new = + (HASH_DDR_CFG *)ZXIC_COMM_MALLOC(sizeof(HASH_DDR_CFG)); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_rbkey_new); + ZXIC_COMM_MEMSET(p_rbkey_new, 0, sizeof(HASH_DDR_CFG)); + + p_rbkey_new->ddr_baddr = p_ddr_resc_cfg->ddr_baddr; + + /** red&black tree node*/ + p_rb_tn_new = + (ZXIC_RB_TN *)ZXIC_COMM_MALLOC(sizeof(ZXIC_RB_TN)); + if (NULL == (p_rb_tn_new)) { + ZXIC_COMM_FREE(p_rbkey_new); + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "\n ICM %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", + __FILE__, __LINE__, __FUNCTION__); + return ZXIC_PAR_CHK_POINT_NULL; + } + INIT_RBT_TN(p_rb_tn_new, p_rbkey_new); + + rc = zxic_comm_rb_insert(&p_hash_cfg->ddr_cfg_rb, + (ZXIC_VOID *)p_rb_tn_new, + (ZXIC_VOID *)(&p_rb_tn_rtn)); + if (ZXIC_RBT_RC_FULL == rc) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, "The red black tree is full!\n"); + ZXIC_COMM_FREE(p_rbkey_new); + ZXIC_COMM_FREE(p_rb_tn_new); + ZXIC_COMM_ASSERT(0); + return DPP_HASH_RC_RB_TREE_FULL; + } else if (ZXIC_RBT_RC_UPDATE == rc) { + ZXIC_COMM_TRACE_DEV_DEBUG( + dev_id, "some bulk_id share one bulk!\n"); + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_rb_tn_rtn); + p_rbkey_rtn = (HASH_DDR_CFG *)(p_rb_tn_rtn->p_key); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, p_rbkey_rtn->bulk_id, + 0, HASH_BULK_NUM - 1); + p_ddr_cfg = + p_hash_cfg->p_bulk_ddr_info + [p_rbkey_rtn->bulk_id]; /* 已经初始化过的ddr_bulk_id属性 */ + + if (p_ddr_cfg->hash_ddr_arg != + GET_DDR_HASH_ARG( + p_ddr_resc_cfg->ddr_crc_sel) || + p_ddr_cfg->width_mode != + p_ddr_resc_cfg->ddr_width_mode || + p_ddr_cfg->ddr_ecc_en != + p_ddr_resc_cfg->ddr_ecc_en || + p_ddr_cfg->item_num != ddr_item_num) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "The base address is same but other ddr attribute is different\n"); + ZXIC_COMM_FREE(p_rbkey_new); + ZXIC_COMM_FREE(p_rb_tn_new); + ZXIC_COMM_ASSERT(0); + return DPP_HASH_RC_INVALID_PARA; + } + + p_hash_cfg->p_bulk_ddr_info[bulk_id] = + p_hash_cfg + ->p_bulk_ddr_info[p_rbkey_rtn->bulk_id]; + + ZXIC_COMM_TRACE_DEV_DEBUG( + dev_id, "bulk id init bulk_ddr_cfg ptr is:"); + + for (i = 0; i < HASH_BULK_NUM; i++) { + ZXIC_COMM_TRACE_DEV_DEBUG( + dev_id, "%p ", + p_hash_cfg->p_bulk_ddr_info[i]); + } + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "\n"); + + ZXIC_COMM_FREE(p_rbkey_new); + ZXIC_COMM_FREE(p_rb_tn_new); + } else { + p_item_array = (SE_ITEM_CFG **)ZXIC_COMM_MALLOC( + ddr_item_num * sizeof(SE_ITEM_CFG *)); + if (NULL == (p_item_array)) { + ZXIC_COMM_FREE(p_rbkey_new); + ZXIC_COMM_FREE(p_rb_tn_new); + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "\n ICM %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", + __FILE__, __LINE__, __FUNCTION__); + return ZXIC_PAR_CHK_POINT_NULL; + } + ZXIC_COMM_MEMSET(p_item_array, 0, + ddr_item_num * sizeof(SE_ITEM_CFG *)); + + p_rbkey_new->p_item_array = p_item_array; + p_rbkey_new->bulk_id = bulk_id; + p_rbkey_new->hw_baddr = 0; + p_rbkey_new->width_mode = + p_ddr_resc_cfg->ddr_width_mode; + p_rbkey_new->item_num = ddr_item_num; + p_rbkey_new->ddr_ecc_en = p_ddr_resc_cfg->ddr_ecc_en; + p_rbkey_new->hash_ddr_arg = + GET_DDR_HASH_ARG(p_ddr_resc_cfg->ddr_crc_sel); + p_rbkey_new->bulk_use = 1; + p_rbkey_new->zcell_num = zcell_num; + p_rbkey_new->zreg_num = zreg_num; + p_hash_cfg->p_bulk_ddr_info[bulk_id] = p_rbkey_new; + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, + "one new ddr_bulk init!\n"); + } +#ifdef DPP_FLOW_HW_INIT + rc = dpp_hash_tbl_crc_poly_write(p_hash_cfg->p_se_info, + p_hash_cfg->fun_id, bulk_id, + p_ddr_resc_cfg->ddr_crc_sel); + if (DPP_OK != rc) { + ZXIC_COMM_FREE(p_rbkey_new); + ZXIC_COMM_FREE(p_rb_tn_new); + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "\n ICM %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", + __FILE__, __LINE__, rc, __FUNCTION__, + "dpp_hash_tbl_crc_poly_write"); + return rc; + } + + rc = dpp_hash_ext_cfg_write( + p_hash_cfg->p_se_info, p_hash_cfg->fun_id, bulk_id, + p_hash_cfg->p_bulk_ddr_info[bulk_id]); + if (DPP_OK != rc) { + ZXIC_COMM_FREE(p_rbkey_new); + ZXIC_COMM_FREE(p_rb_tn_new); + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "\n ICM %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", + __FILE__, __LINE__, rc, __FUNCTION__, + "dpp_hash_ext_cfg_write"); + return rc; + } + rc = dpp_hash_tbl_depth_write( + p_hash_cfg->p_se_info, p_hash_cfg->fun_id, bulk_id, + p_hash_cfg->p_bulk_ddr_info[bulk_id]); + if (DPP_OK != rc) { + ZXIC_COMM_FREE(p_rbkey_new); + ZXIC_COMM_FREE(p_rb_tn_new); + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "\n ICM %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", + __FILE__, __LINE__, rc, __FUNCTION__, + "dpp_hash_tbl_depth_write"); + return rc; + } +#endif + rc = dpp_se_smmu1_hash_tbl_cfg_set(&p_se_cfg->dev, + p_hash_cfg->fun_id, bulk_id, + p_ddr_resc_cfg->ddr_ecc_en, + p_ddr_resc_cfg->ddr_baddr); + if (DPP_OK != rc) { + ZXIC_COMM_FREE(p_rbkey_new); + ZXIC_COMM_FREE(p_rb_tn_new); + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "\n ICM %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", + __FILE__, __LINE__, rc, __FUNCTION__, + "dpp_se_smmu1_hash_tbl_cfg_set"); + return rc; + } + } + + p_bulk_zcam_mono = (DPP_HASH_BULK_ZCAM_STAT *)ZXIC_COMM_MALLOC( + sizeof(DPP_HASH_BULK_ZCAM_STAT)); + if (NULL == (p_bulk_zcam_mono)) { + ZXIC_COMM_FREE(p_rbkey_new); + ZXIC_COMM_FREE(p_rb_tn_new); + ZXIC_COMM_FREE(p_item_array); + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "\n ICM %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", + __FILE__, __LINE__, __FUNCTION__); + return ZXIC_PAR_CHK_POINT_NULL; + } + ZXIC_COMM_MEMSET(p_bulk_zcam_mono, 0, sizeof(DPP_HASH_BULK_ZCAM_STAT)); + (&p_hash_cfg->hash_stat)->p_bulk_zcam_mono[bulk_id] = p_bulk_zcam_mono; + + for (i = 0; i < SE_ZBLK_NUM * SE_ZCELL_NUM; i++) { + p_bulk_zcam_mono->zcell_mono_idx[i] = 0xffffffff; + } + + for (i = 0; i < SE_ZBLK_NUM; i++) { + for (j = 0; j < SE_ZREG_NUM; j++) { + p_bulk_zcam_mono->zreg_mono_id[i][j].zblk_id = + 0xffffffff; + p_bulk_zcam_mono->zreg_mono_id[i][j].zreg_id = + 0xffffffff; + } + } + + if (zcell_num > 0) { + p_hash_cfg->bulk_ram_mono[bulk_id] = 1; + +#ifdef DPP_FLOW_HW_INIT + rc = dpp_hash_bulk_mono_flags_write(p_se_cfg, fun_id, bulk_id); + if (DPP_OK != rc) { + ZXIC_COMM_FREE(p_rbkey_new); + ZXIC_COMM_FREE(p_rb_tn_new); + ZXIC_COMM_FREE(p_item_array); + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "\n ICM %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", + __FILE__, __LINE__, rc, __FUNCTION__, + "dpp_hash_bulk_mono_flags_write"); + return rc; + } +#endif + + /* 初始分配此业务表独占的zcell,只能在非独占的zcell中分配 */ + /* p_zcell_free = &(p_hash_cfg->hash_shareram.zcell_free_list);*/ + p_zcell_dn = p_hash_cfg->hash_shareram.zcell_free_list.p_next; + + i = 0; + + while (p_zcell_dn) { + p_zcell_cfg = (SE_ZCELL_CFG *)p_zcell_dn->data; + + if (p_zcell_cfg->is_used) { + if (!(p_zcell_cfg->flag & + DPP_ZCELL_FLAG_IS_MONO)) { + p_zcell_cfg->flag |= + DPP_ZCELL_FLAG_IS_MONO; + p_zcell_cfg->bulk_id = bulk_id; + +#ifdef DPP_FLOW_HW_INIT + rc = dpp_hash_zcell_mono_write( + p_se_cfg, + p_zcell_cfg); /* 写入硬件 */ + if (DPP_OK != rc) { + ZXIC_COMM_FREE(p_rbkey_new); + ZXIC_COMM_FREE(p_rb_tn_new); + ZXIC_COMM_FREE(p_item_array); + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "\n ICM %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", + __FILE__, __LINE__, rc, + __FUNCTION__, + "dpp_hash_zcell_mono_write"); + return rc; + } +#endif + + ZXIC_COMM_CHECK_DEV_INDEX( + dev_id, p_zcell_cfg->zcell_idx, + 0, SE_ZCELL_TOTAL_NUM - 1); + p_bulk_zcam_mono->zcell_mono_idx + [p_zcell_cfg->zcell_idx] = + p_zcell_cfg + ->zcell_idx; /* 统计独占的zcell */ + + if (++i >= zcell_num) { + break; + } + } + } else { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "zcell[ %d ] is not init before used!\n", + p_zcell_cfg->zcell_idx); + ZXIC_COMM_ASSERT(0); + return DPP_HASH_RC_INVALID_PARA; + } + + p_zcell_dn = p_zcell_dn->next; + } + + if (i < zcell_num) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "Input param 'zcell_num' is [ %d ], actually bulk[ %d ]monopolize zcells is [ %d ]!\n", + zcell_num, bulk_id, i); + } + } + + if (zreg_num > 0) { + p_hash_cfg->bulk_ram_mono[bulk_id] = 1; + +#ifdef DPP_FLOW_HW_INIT + rc = dpp_hash_bulk_mono_flags_write(p_se_cfg, fun_id, bulk_id); + if (DPP_OK != rc) { + ZXIC_COMM_FREE(p_rbkey_new); + ZXIC_COMM_FREE(p_rb_tn_new); + ZXIC_COMM_FREE(p_item_array); + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "\n ICM %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", + __FILE__, __LINE__, rc, __FUNCTION__, + "dpp_hash_bulk_mono_flags_write"); + return rc; + } +#endif + /* p_tbl_id_info->zreg_num= zreg_num;*/ + + /* 初始分配此业务表独占的zreg,只能在非独占的zreg中分配 */ + /* p_zblk_free = &(p_hash_cfg->hash_shareram.zblk_list);*/ + p_zblk_dn = p_hash_cfg->hash_shareram.zblk_list.p_next; + j = 0; + + while (p_zblk_dn) { + p_zblk_cfg = (SE_ZBLK_CFG *)p_zblk_dn->data; + zblk_idx = p_zblk_cfg->zblk_idx; + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, zblk_idx, 0, + SE_ZBLK_NUM - 1); + + if (p_zblk_cfg->is_used) { + for (i = 0; i < SE_ZREG_NUM; i++) { + p_zreg_cfg = + &(p_zblk_cfg->zreg_info[i]); + + if (!(p_zreg_cfg->flag & + DPP_ZREG_FLAG_IS_MONO)) { + p_zreg_cfg->flag = + DPP_ZREG_FLAG_IS_MONO; + p_zreg_cfg->bulk_id = bulk_id; +#ifdef DPP_FLOW_HW_INIT + rc = dpp_hash_zreg_mono_write( + p_se_cfg, bulk_id, + zblk_idx, + i); /* 写入硬件 */ + if (DPP_OK != rc) { + ZXIC_COMM_FREE( + p_rbkey_new); + ZXIC_COMM_FREE( + p_rb_tn_new); + ZXIC_COMM_FREE( + p_item_array); + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "\n ICM %s:%d [ErrorCode:0x%x] !-- %s Call %s Fail!\n", + __FILE__, + __LINE__, rc, + __FUNCTION__, + "dpp_hash_zreg_mono_write"); + return rc; + } +#endif + p_bulk_zcam_mono + ->zreg_mono_id[zblk_idx] + [i] + .zblk_id = zblk_idx; + p_bulk_zcam_mono + ->zreg_mono_id[zblk_idx] + [i] + .zreg_id = i; + + if (++j >= zreg_num) { + break; + } + } + } + + if (j >= zreg_num) { + break; + } + } else { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "zblk [ %d ] is not init before used!\n", + p_zblk_cfg->zblk_idx); + ZXIC_COMM_ASSERT(0); + return DPP_HASH_RC_INVALID_PARA; + } + + p_zblk_dn = p_zblk_dn->next; + } + + if (j < zreg_num) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "Input param 'zreg_num' is [ %d ], actually bulk[ %d ]monopolize zregs is [ %d ]!\n", + zreg_num, bulk_id, j); + } + } + + /** for 软复位 */ + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(fun_id, 0, 3); + ZXIC_COMM_CHECK_INDEX(bulk_id, 0, 7); + g_hash_store_dat[dev_id].ddr_item_num[fun_id][bulk_id] = + p_ddr_resc_cfg->ddr_item_num; + + return DPP_OK; +} + +/***********************************************************/ +/** 初始化单个hash引擎内的某个业务表,此接口支持为该业务表分配独占的zcell。 +* 必须先初始化hash引擎,如果是片内+片外模式还必须先初始化dpp_hash_ddr_bulk_init, +* 再初始化业务表。 +* @param p_se_cfg 算法模块公共管理数据结构指针 +* @param fun_id hash引擎号 +* @param tbl_id 业务表 +* @param tbl_flag 初始化标记, bitmap的形式使用,如:HASH_TBL_FLAG_AGE等 +* @param key_type hash条目类型,取值参照DPP_HASH_KEY_TYPE的定义 +* @param actu_key_size 业务键值有效长度: 8bit*N,N=1~48 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wcl @date 2015/05/15 +************************************************************/ +DPP_STATUS dpp_hash_tbl_id_info_init(DPP_SE_CFG *p_se_cfg, ZXIC_UINT32 fun_id, + ZXIC_UINT32 tbl_id, ZXIC_UINT32 tbl_flag, + ZXIC_UINT32 key_type, + ZXIC_UINT32 actu_key_size) +{ + ZXIC_UINT32 key_by_size = 0; + ZXIC_UINT32 entry_size = 0; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 slot_id = 0; + + DPP_HASH_TBL_ID_INFO *p_tbl_id_info = NULL; + + ZXIC_COMM_CHECK_INDEX(tbl_id, 0, HASH_TBL_ID_NUM - 1); + ZXIC_COMM_CHECK_INDEX(fun_id, HASH_FUNC_ID_MIN, HASH_FUNC_ID_NUM - 1); + ZXIC_COMM_CHECK_INDEX(actu_key_size, HASH_ACTU_KEY_MIN, + HASH_ACTU_KEY_MAX); + ZXIC_COMM_CHECK_INDEX(key_type, HASH_KEY_128b, HASH_KEY_512b); + ZXIC_COMM_CHECK_POINT(p_se_cfg); + + dev_id = p_se_cfg->dev_id; + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + key_by_size = DPP_GET_KEY_SIZE(actu_key_size); + entry_size = DPP_GET_HASH_ENTRY_SIZE(key_type); + + if (key_by_size > entry_size) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "ErrorCode[%x]: actu_key_size[%d] not match to key_type[%d].\n", + DPP_HASH_RC_INVALID_PARA, key_by_size, entry_size); + ZXIC_COMM_ASSERT(0); + return DPP_HASH_RC_INVALID_PARA; + } + + slot_id = p_se_cfg->dev.pcie_channel.slot; + ZXIC_COMM_CHECK_INDEX_UPPER(slot_id, DPP_PCIE_SLOT_MAX - 1); + p_tbl_id_info = GET_HASH_TBL_ID_INFO(slot_id, fun_id, tbl_id); + + if (p_tbl_id_info->is_init) { + ZXIC_COMM_TRACE_DEV_DEBUG( + dev_id, + "slot[%d] fun_id[%d],table_id[%d] is already init, do not init again!\n", + slot_id, fun_id, tbl_id); + //return DPP_HASH_RC_REPEAT_INIT; + return DPP_OK; + } + + p_tbl_id_info->fun_id = fun_id; + p_tbl_id_info->actu_key_size = actu_key_size; + p_tbl_id_info->key_type = key_type; + p_tbl_id_info->is_init = 1; + + if (tbl_flag & HASH_TBL_FLAG_AGE) { + p_tbl_id_info->is_age = 1; + } + + if (tbl_flag & HASH_TBL_FLAG_LEARN) { + p_tbl_id_info->is_lrn = 1; + } + + if (tbl_flag & HASH_TBL_FLAG_MC_WRT) { + p_tbl_id_info->is_mc_wrt = 1; + } + + return DPP_OK; +} + +DPP_STATUS dpp_hash_red_black_node_alloc(DPP_DEV_T *dev, + ZXIC_RB_TN **p_rb_tn_new, + DPP_HASH_RBKEY_INFO **p_rbkey_new) +{ + ZXIC_RB_TN *p_rb_tn_new_temp = NULL; + DPP_HASH_RBKEY_INFO *p_rbkey_new_temp = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + + /** red&black key*/ + p_rbkey_new_temp = (DPP_HASH_RBKEY_INFO *)ZXIC_COMM_MALLOC( + sizeof(DPP_HASH_RBKEY_INFO)); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_rbkey_new_temp); + ZXIC_COMM_MEMSET(p_rbkey_new_temp, 0, sizeof(DPP_HASH_RBKEY_INFO)); + + /** red&black tree node*/ + p_rb_tn_new_temp = (ZXIC_RB_TN *)ZXIC_COMM_MALLOC(sizeof(ZXIC_RB_TN)); + if (NULL == p_rb_tn_new_temp) { + ZXIC_COMM_FREE(p_rbkey_new_temp); + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "\n ICM %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", + __FILE__, __LINE__, __FUNCTION__); + return ZXIC_PAR_CHK_POINT_NULL; + } + + INIT_RBT_TN(p_rb_tn_new_temp, p_rbkey_new_temp); + + *p_rb_tn_new = p_rb_tn_new_temp; + *p_rbkey_new = p_rbkey_new_temp; + + return DPP_OK; +} + +DPP_STATUS dpp_hash_get_hash_info_from_sdt(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no, + HASH_ENTRY_CFG *p_hash_entry_cfg) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + FUNC_ID_INFO *p_func_info = NULL; + DPP_SE_CFG *p_se_cfg = NULL; + + DPP_SDTTBL_HASH_T sdt_hash_info = { 0 }; /*SDT内容*/ + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_hash_entry_cfg); + + //从sdt_no中获取SDT配置 + rc = dpp_soft_sdt_tbl_get(dev, sdt_no, &sdt_hash_info); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_soft_sdt_tbl_read"); + + p_hash_entry_cfg->fun_id = sdt_hash_info.hash_id; /*hash引擎*/ + ZXIC_COMM_CHECK_INDEX(p_hash_entry_cfg->fun_id, HASH_FUNC_ID_MIN, + HASH_FUNC_ID_NUM - 1); + + p_hash_entry_cfg->table_id = sdt_hash_info.hash_table_id; /*hash表号*/ + ZXIC_COMM_CHECK_INDEX_UPPER(p_hash_entry_cfg->table_id, + HASH_TBL_ID_NUM - 1); + + p_hash_entry_cfg->bulk_id = ((p_hash_entry_cfg->table_id >> 2) & 0x7); + ZXIC_COMM_CHECK_INDEX_UPPER(p_hash_entry_cfg->bulk_id, + HASH_BULK_NUM - 1); + + p_hash_entry_cfg->key_type = sdt_hash_info.hash_table_width; /*表宽度*/ + ZXIC_COMM_CHECK_INDEX(p_hash_entry_cfg->key_type, HASH_KEY_128b, + HASH_KEY_512b); + + p_hash_entry_cfg->actu_key_size = + sdt_hash_info.key_size; /*业务表键值长度*/ + ZXIC_COMM_CHECK_INDEX(p_hash_entry_cfg->actu_key_size, + HASH_ACTU_KEY_MIN, HASH_ACTU_KEY_MAX); + p_hash_entry_cfg->key_by_size = + DPP_GET_KEY_SIZE(p_hash_entry_cfg->actu_key_size); + p_hash_entry_cfg->rst_by_size = DPP_GET_RST_SIZE( + p_hash_entry_cfg->key_type, p_hash_entry_cfg->actu_key_size); + + /* 取出se配置 */ + rc = dpp_se_cfg_get(dev, &p_se_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_se_cfg_get"); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_se_cfg); + p_hash_entry_cfg->p_se_cfg = p_se_cfg; + + p_func_info = DPP_GET_FUN_INFO(p_se_cfg, p_hash_entry_cfg->fun_id); + DPP_SE_CHECK_FUN(p_func_info, p_hash_entry_cfg->fun_id, FUN_HASH); + + p_hash_entry_cfg->p_hash_cfg = (DPP_HASH_CFG *)p_func_info->fun_ptr; + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_hash_entry_cfg->p_hash_cfg); + + return DPP_OK; +} + +DPP_STATUS dpp_hash_rb_insert(DPP_DEV_T *dev, HASH_ENTRY_CFG *p_hash_entry_cfg, + DPP_HASH_ENTRY *p_entry) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_HASH_RBKEY_INFO *p_rbkey_rtn = NULL; + DPP_HASH_CFG *p_hash_cfg = NULL; + DPP_HASH_RBKEY_INFO *p_rbkey_new = NULL; + ZXIC_RB_TN *p_rb_tn_new = NULL; + ZXIC_RB_TN *p_rb_tn_rtn = NULL; + ZXIC_UINT32 rst_actual_size = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_hash_entry_cfg); + ZXIC_COMM_CHECK_POINT(p_hash_entry_cfg->p_hash_cfg); + ZXIC_COMM_CHECK_POINT(p_entry); + ZXIC_COMM_CHECK_POINT(p_entry->p_rst); + + p_rbkey_new = p_hash_entry_cfg->p_rbkey_new; + p_rb_tn_new = p_hash_entry_cfg->p_rb_tn_new; + p_hash_cfg = p_hash_entry_cfg->p_hash_cfg; + rst_actual_size = ((p_hash_entry_cfg->rst_by_size) > HASH_RST_MAX) ? + HASH_RST_MAX : + p_hash_entry_cfg->rst_by_size; + rc = zxic_comm_rb_insert(&p_hash_cfg->hash_rb, (ZXIC_VOID *)p_rb_tn_new, + (ZXIC_VOID *)(&p_rb_tn_rtn)); + if (ZXIC_RBT_RC_FULL == rc) { + ZXIC_COMM_TRACE_DEV_ERROR(DEV_ID(dev), + "The red black tree is full!\n"); + ZXIC_COMM_FREE(p_rbkey_new); + ZXIC_COMM_FREE(p_rb_tn_new); + ZXIC_COMM_ASSERT(0); + return DPP_HASH_RC_RB_TREE_FULL; + } else if (ZXIC_RBT_RC_UPDATE == rc) { + p_hash_cfg->hash_stat.insert_same++; + ZXIC_COMM_TRACE_DEV_DEBUG(DEV_ID(dev), + "Hash update exist entry!\n"); + + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_rb_tn_rtn); + p_rbkey_rtn = (DPP_HASH_RBKEY_INFO *)(p_rb_tn_rtn->p_key); + + /* when the result is more than 256bit, get first 256bit valid data. */ + ZXIC_COMM_MEMCPY(p_rbkey_rtn->rst, p_entry->p_rst, + rst_actual_size); + + ZXIC_COMM_FREE(p_rbkey_new); + ZXIC_COMM_FREE(p_rb_tn_new); + p_hash_entry_cfg->p_rbkey_new = p_rbkey_rtn; + p_hash_entry_cfg->p_rb_tn_new = p_rb_tn_rtn; + + return DPP_HASH_RC_ADD_UPDATE; + } else { + ZXIC_COMM_TRACE_DEV_DEBUG(DEV_ID(dev), + "Hash insert new entry!\n"); + /* when the result is more than 256bit, get first 256bit valid data. */ + ZXIC_COMM_MEMCPY(p_rbkey_new->rst, p_entry->p_rst, + rst_actual_size); + p_rbkey_new->entry_size = + DPP_GET_HASH_ENTRY_SIZE(p_hash_entry_cfg->key_type); + INIT_D_NODE(&p_rbkey_new->entry_dn, p_rbkey_new); + } + + return DPP_OK; +} + +DPP_STATUS dpp_hash_set_crc_key(DPP_DEV_T *dev, + HASH_ENTRY_CFG *p_hash_entry_cfg, + DPP_HASH_ENTRY *p_entry, ZXIC_UINT8 *p_temp_key) +{ + ZXIC_UINT32 key_by_size = 0; + ZXIC_UINT8 temp_tbl_id = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_hash_entry_cfg); + ZXIC_COMM_CHECK_POINT(p_entry); + ZXIC_COMM_CHECK_POINT(p_entry->p_key); + ZXIC_COMM_CHECK_POINT(p_temp_key); + + key_by_size = p_hash_entry_cfg->key_by_size; + ZXIC_COMM_MEMCPY(p_temp_key, p_entry->p_key, key_by_size); + + /* 约定: 参与CRC运算的key, 需要将tbl_id的前面补3bit的0,然后放到actu_key的后面. */ + temp_tbl_id = (*p_temp_key) & 0x1F; + memmove(p_temp_key, p_temp_key + 1, key_by_size - HASH_KEY_CTR_SIZE); + ZXIC_COMM_CHECK_DEV_INDEX_SUB_OVERFLOW_NO_ASSERT( + DEV_ID(dev), key_by_size, HASH_KEY_CTR_SIZE); + p_temp_key[key_by_size - HASH_KEY_CTR_SIZE] = temp_tbl_id; + + return DPP_OK; +} + +DPP_STATUS dpp_hash_insert_ddr(DPP_DEV_T *dev, HASH_ENTRY_CFG *p_hash_entry_cfg, + ZXIC_UINT8 *p_temp_key, ZXIC_UINT8 *p_end_flag) +{ + DPP_HASH_CFG *p_hash_cfg = NULL; + ZXIC_UINT8 bulk_id = 0; + ZXIC_UINT8 key_type = 0; + ZXIC_UINT8 table_id = 0; + ZXIC_UINT32 key_by_size = 0; + ZXIC_UINT32 crc_value = 0; + ZXIC_UINT32 item_idx = 0xFFFFFFFF; /* -1 */ + ZXIC_UINT32 item_type = 0; + HASH_DDR_CFG *p_ddr_cfg = NULL; + SE_ITEM_CFG *p_item = NULL; + DPP_HASH_RBKEY_INFO *p_rbkey_new = NULL; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_hash_entry_cfg); + ZXIC_COMM_CHECK_POINT(p_temp_key); + ZXIC_COMM_CHECK_POINT(p_end_flag); + + p_hash_cfg = p_hash_entry_cfg->p_hash_cfg; + ZXIC_COMM_CHECK_POINT(p_hash_cfg); + bulk_id = p_hash_entry_cfg->bulk_id; + ZXIC_COMM_CHECK_INDEX_UPPER(bulk_id, HASH_BULK_NUM - 1); + p_ddr_cfg = p_hash_cfg->p_bulk_ddr_info[bulk_id]; + ZXIC_COMM_CHECK_POINT(p_ddr_cfg); + table_id = p_hash_entry_cfg->table_id; + ZXIC_COMM_CHECK_INDEX_UPPER(table_id, HASH_TBL_ID_NUM - 1); + p_rbkey_new = p_hash_entry_cfg->p_rbkey_new; + ZXIC_COMM_CHECK_POINT(p_rbkey_new); + + key_type = p_hash_entry_cfg->key_type; + if ((HASH_KEY_512b == key_type) && + (DDR_WIDTH_256b == p_ddr_cfg->width_mode)) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "ErrorCode[0x%x]: Hash DDR width mode is not match to the key type.\n", + DPP_HASH_RC_DDR_WIDTH_MODE_ERR); + return DPP_HASH_RC_DDR_WIDTH_MODE_ERR; + } + + key_by_size = p_hash_entry_cfg->key_by_size; + crc_value = p_hash_cfg->p_hash32_fun(p_temp_key, key_by_size, + p_ddr_cfg->hash_ddr_arg); + ZXIC_COMM_TRACE_DEV_DEBUG(DEV_ID(dev), + "hash ddr arg is: 0x%x.crc_value is 0x%x\n", + p_ddr_cfg->hash_ddr_arg, crc_value); /* t */ + item_idx = crc_value % p_ddr_cfg->item_num; /*按照256bit进行划分*/ + item_type = ITEM_DDR_256; + if (DDR_WIDTH_512b == p_ddr_cfg->width_mode) { + item_idx = + crc_value % + p_ddr_cfg->item_num; /* 512b模式时,item_num已经在bulk初始化做过转换 ,按照512bit进行划分*/ + item_type = ITEM_DDR_512; + } + + ZXIC_COMM_TRACE_DEV_DEBUG( + DEV_ID(dev), "Hash insert in ITEM_DDR_%s, item_idx is: 0x%x.\n", + ((item_type == ITEM_DDR_256) ? "256" : "512"), item_idx); + + ZXIC_COMM_CHECK_INDEX_UPPER( + item_idx, + p_ddr_cfg->item_num); /* modify coverity yinxh 2021.03.10*/ + p_item = p_ddr_cfg->p_item_array[item_idx]; + if (NULL == p_item) { + p_item = (SE_ITEM_CFG *)ZXIC_COMM_MALLOC(sizeof(SE_ITEM_CFG)); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_item); + ZXIC_COMM_MEMSET(p_item, 0, sizeof(SE_ITEM_CFG)); + p_ddr_cfg->p_item_array[item_idx] = p_item; + } + + rc = dpp_hash_insrt_to_item(p_hash_cfg, p_hash_entry_cfg->p_rbkey_new, + p_item, item_idx, item_type, key_type); + + if (DPP_HASH_RC_ITEM_FULL != rc) { + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_hash_insrt_to_item"); + *p_end_flag = 1; +#ifdef HASH_ENTRY_STAT + p_hash_cfg->hash_stat.insert_ddr++; + p_hash_cfg->hash_stat.insert_table[table_id].ddr++; +#endif + /* calc the item hardware address in DDR */ + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT( + DEV_ID(dev), p_ddr_cfg->hw_baddr, item_idx); + p_item->hw_addr = + GET_HASH_DDR_HW_ADDR(p_ddr_cfg->hw_baddr, item_idx); + p_item->bulk_id = p_hash_entry_cfg->bulk_id; + } + +#if OBTAIN_CONFLICT_KEY + /* debug start 获取冲突键值*/ + if (DPP_HASH_RC_ITEM_FULL == rc) { + p_item->hw_addr = + GET_HASH_DDR_HW_ADDR(p_ddr_cfg->hw_baddr, item_idx); + ZXIC_COMM_PRINT( + "ddr conflict item_idx is:0x%x,p_item->hw_addr is:0x%x\n", + item_idx, p_item->hw_addr); + ZXIC_COMM_PRINT("ddr conflict key is:"); + + for (index = 0; index < 10; index++) { + ZXIC_COMM_PRINT("0x%02x ", p_rbkey_new->key[index]); + } + + ZXIC_COMM_PRINT("\n"); + } + + /* debug end*/ +#endif + + return DPP_OK; +} + +DPP_STATUS dpp_hash_insert_zcell(DPP_DEV_T *dev, DPP_SE_CFG *p_se_cfg, + HASH_ENTRY_CFG *p_hash_entry_cfg, + ZXIC_UINT8 *p_temp_key, ZXIC_UINT8 *p_end_flag) +{ + ZXIC_UINT8 bulk_id = 0; + D_NODE *p_zcell_dn = NULL; + SE_ZCELL_CFG *p_zcell = NULL; + ZXIC_UINT32 zblk_idx = 0; + ZXIC_UINT32 zcell_id = 0; + ZXIC_UINT32 pre_zblk_idx = 0xFFFFFFFF; /* -1; */ + SE_ITEM_CFG *p_item = NULL; + ZXIC_UINT32 item_idx = 0xFFFFFFFF; /* -1 */ + ZXIC_UINT32 item_type = 0; + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 crc_value = 0; + ZXIC_UINT8 table_id = 0; + DPP_HASH_CFG *p_hash_cfg = NULL; + SE_ZBLK_CFG *p_zblk = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_hash_entry_cfg); + ZXIC_COMM_CHECK_POINT(p_temp_key); + ZXIC_COMM_CHECK_POINT(p_se_cfg); + ZXIC_COMM_CHECK_POINT(p_end_flag); + + /* if insert into DDR is fail, insert into ZCAM. */ + ZXIC_COMM_TRACE_DEV_DEBUG(DEV_ID(dev), "insert zcell start\n"); + bulk_id = p_hash_entry_cfg->bulk_id; + ZXIC_COMM_CHECK_INDEX_UPPER(bulk_id, HASH_BULK_NUM - 1); + table_id = p_hash_entry_cfg->table_id; + ZXIC_COMM_CHECK_INDEX_UPPER(table_id, HASH_TBL_ID_NUM - 1); + p_hash_cfg = p_hash_entry_cfg->p_hash_cfg; + ZXIC_COMM_CHECK_POINT(p_hash_cfg); + p_zcell_dn = p_hash_cfg->hash_shareram.zcell_free_list.p_next; + + while (p_zcell_dn) { + p_zcell = (SE_ZCELL_CFG *)p_zcell_dn->data; + ZXIC_COMM_CHECK_POINT(p_zcell); + + /* 只在hash引擎开启zcell独占模式下,才进行相关的处理 */ + if (((p_zcell->flag & DPP_ZCELL_FLAG_IS_MONO) && + (p_zcell->bulk_id != bulk_id)) || + ((!(p_zcell->flag & DPP_ZCELL_FLAG_IS_MONO)) && + (p_hash_cfg->bulk_ram_mono[bulk_id]))) { + p_zcell_dn = p_zcell_dn->next; + continue; + } + + zblk_idx = GET_ZBLK_IDX(p_zcell->zcell_idx); + ZXIC_COMM_CHECK_INDEX_UPPER(zblk_idx, SE_ZBLK_NUM - 1); + p_zblk = &(p_se_cfg->zblk_info[zblk_idx]); + if (zblk_idx != pre_zblk_idx) { + pre_zblk_idx = zblk_idx; + crc_value = p_hash_cfg->p_hash16_fun( + p_temp_key, p_hash_entry_cfg->key_by_size, + p_zblk->hash_arg); + } + + ZXIC_COMM_TRACE_DEV_DEBUG( + DEV_ID(dev), + "zblk_idx is [0x%x],p_zblk->hash_arg is [0x%x],crc_value is [0x%x]\n", + zblk_idx, p_zblk->hash_arg, crc_value); /* t */ + + zcell_id = GET_ZCELL_IDX(p_zcell->zcell_idx); + item_idx = GET_ZCELL_CRC_VAL(zcell_id, crc_value); + ZXIC_COMM_CHECK_INDEX_UPPER(item_idx, SE_RAM_DEPTH - 1); + p_item = &(p_zcell->item_info[item_idx]); + item_type = ITEM_RAM; + + ZXIC_COMM_TRACE_DEV_DEBUG( + DEV_ID(dev), "zcell_id is [0x%x],item_idx is [0x%x]\n", + zcell_id, item_idx); /* t */ + + rc = dpp_hash_insrt_to_item(p_hash_cfg, + p_hash_entry_cfg->p_rbkey_new, + p_item, item_idx, item_type, + p_hash_entry_cfg->key_type); + + if (DPP_HASH_RC_ITEM_FULL == rc) { + ZXIC_COMM_TRACE_DEV_DEBUG( + DEV_ID(dev), + "The item is full, check next. \n"); +#if OBTAIN_CONFLICT_KEY + + /* debug start 获取冲突键值*/ + if (DPP_HASH_RC_ITEM_FULL == rc) { + p_item->hw_addr = ZBLK_ITEM_ADDR_CALC( + p_zcell->zcell_idx, item_idx); + ZXIC_COMM_PRINT( + "zcell conflict item_idx is:0x%x,p_item->hw_addr is:0x%x\n", + item_idx, p_item->hw_addr); + ZXIC_COMM_PRINT("zcell conflict key is:"); + + for (index = 0; index < 10; index++) { + ZXIC_COMM_PRINT( + "0x%x ", + p_rbkey_new->key[index]); + } + + ZXIC_COMM_PRINT("\n"); + } + + /* debug end*/ +#endif + } else { + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_hash_insrt_to_item"); + *p_end_flag = 1; +#ifdef HASH_ENTRY_STAT + p_hash_cfg->hash_stat.insert_zcell++; + p_hash_cfg->hash_stat.insert_table[table_id].zcell++; +#endif + + /* calc the item hardware address in ZCAM. */ + p_item->hw_addr = ZBLK_ITEM_ADDR_CALC( + p_zcell->zcell_idx, item_idx); + /* debug start 获取冲突键值*/ + /* ZXIC_COMM_TRACE_DEV_DEBUG(DEV_ID(dev), "ZCAM hw_addr is [0x%x]\n", p_item->hw_addr); t*/ + /* ZXIC_COMM_PRINT("after ddr conflict item_idx is:%d,p_item->hw_addr is:%d \n", item_idx, p_item->hw_addr);*/ + /* ZXIC_COMM_PRINT("after ddr conflict zcell_id is [0x%x],zblk_idx is [0x%x]\n", zcell_id, zblk_idx); t;*/ + /* debug end*/ + break; + } + + p_zcell_dn = p_zcell_dn->next; + } + + return DPP_OK; +} + +DPP_STATUS dpp_hash_insert_zreg(DPP_DEV_T *dev, + HASH_ENTRY_CFG *p_hash_entry_cfg, + ZXIC_UINT8 *p_temp_key, ZXIC_UINT8 *p_end_flag) +{ + DPP_HASH_CFG *p_hash_cfg = NULL; + D_NODE *p_zblk_dn = NULL; + SE_ZBLK_CFG *p_zblk = NULL; + SE_ZREG_CFG *p_zreg = NULL; + SE_ITEM_CFG *p_item = NULL; + ZXIC_UINT8 reg_index = 0; + ZXIC_UINT32 zblk_idx = 0; + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT8 bulk_id = 0; + ZXIC_UINT32 item_idx = 0xFFFFFFFF; /* -1; */ + ZXIC_UINT32 item_type = 0; + ZXIC_UINT32 table_id = 0; + DPP_HASH_RBKEY_INFO *p_rbkey_new = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_hash_entry_cfg); + ZXIC_COMM_CHECK_POINT(p_temp_key); + ZXIC_COMM_CHECK_POINT(p_end_flag); + + ZXIC_COMM_TRACE_DEV_DEBUG(DEV_ID(dev), "insert zreg start\n"); /* t */ + bulk_id = p_hash_entry_cfg->bulk_id; + ZXIC_COMM_CHECK_INDEX_UPPER(bulk_id, HASH_BULK_NUM - 1); + table_id = p_hash_entry_cfg->table_id; + ZXIC_COMM_CHECK_INDEX_UPPER(table_id, HASH_TBL_ID_NUM - 1); + p_rbkey_new = p_hash_entry_cfg->p_rbkey_new; + ZXIC_COMM_CHECK_POINT(p_rbkey_new); + + p_hash_cfg = p_hash_entry_cfg->p_hash_cfg; + p_zblk_dn = p_hash_cfg->hash_shareram.zblk_list.p_next; + while (p_zblk_dn) { + p_zblk = (SE_ZBLK_CFG *)p_zblk_dn->data; + zblk_idx = p_zblk->zblk_idx; + + for (reg_index = 0; reg_index < SE_ZREG_NUM; reg_index++) { + p_zreg = &(p_zblk->zreg_info[reg_index]); + + if (((p_zreg->flag & DPP_ZREG_FLAG_IS_MONO) && + (p_zreg->bulk_id != bulk_id)) || + ((!(p_zreg->flag & DPP_ZREG_FLAG_IS_MONO)) && + (p_hash_cfg->bulk_ram_mono[bulk_id]))) { + continue; + } + + p_item = &(p_zblk->zreg_info[reg_index].item_info); + item_type = ITEM_REG; + item_idx = reg_index; + rc = dpp_hash_insrt_to_item( + p_hash_cfg, p_hash_entry_cfg->p_rbkey_new, + p_item, item_idx, item_type, + p_hash_entry_cfg->key_type); + + if (DPP_HASH_RC_ITEM_FULL == rc) { + ZXIC_COMM_TRACE_DEV_DEBUG( + DEV_ID(dev), + "The item is full, check next. \n"); +#if OBTAIN_CONFLICT_KEY + /* debug start 获取冲突键值*/ + ZXIC_COMM_PRINT( + "zreg_mono conflict,not inserted,reg_num is:0x%x,zblk_idx is:0x%x,p_item->hw_addr is:0x%x\n", + reg_index, zblk_idx, p_item->hw_addr); + ZXIC_COMM_PRINT("zreg_mono conflict,key is:"); + + for (index = 0; index < 8; index++) { + ZXIC_COMM_PRINT( + "0x%x ", + p_rbkey_new->key[index]); + } + + ZXIC_COMM_PRINT("\n"); + /* debug end*/ +#endif + } else { + ZXIC_COMM_CHECK_DEV_RC( + DEV_ID(dev), rc, + "dpp_hash_insrt_to_item"); + *p_end_flag = 1; +#ifdef HASH_ENTRY_STAT + p_hash_cfg->hash_stat.insert_zreg++; + p_hash_cfg->hash_stat.insert_table[table_id] + .zreg++; +#endif + + /* calc the item hardware address in ZBLK Reg. */ + p_item->hw_addr = ZBLK_HASH_LIST_REG_ADDR_CALC( + zblk_idx, reg_index); +#if OBTAIN_CONFLICT_KEY + /* debug start 获取冲突键值*/ + ZXIC_COMM_PRINT( + "zreg_mono conflict,inserted,reg_num is:0x%x,zblk_idx is:0x%x,p_item->hw_addr is:0x%x\n", + reg_index, zblk_idx, p_item->hw_addr); + ZXIC_COMM_PRINT("zreg_mono conflict,key is:"); + + for (index = 0; index < 8; index++) { + ZXIC_COMM_PRINT( + "0x%x ", + p_rbkey_new->key[index]); + } + + ZXIC_COMM_PRINT("\n"); + /* debug end*/ +#endif + break; + } + } + + if (*p_end_flag) { + break; + } + + p_zblk_dn = p_zblk_dn->next; + } + + return DPP_OK; +} + +/***********************************************************/ +/** +* @param p_se_cfg 算法模块公共管理数据结构指针 +* @param fun_id hash引擎号 +* @param p_zblk_cfg +* +* @return +* @remark 无 +* @see +* @author XCX @date 2017/03/27 +************************************************************/ +DPP_STATUS dpp_hash_zblkcfg_write(DPP_SE_CFG *p_se_cfg, ZXIC_UINT32 fun_id, + SE_ZBLK_CFG *p_zblk_cfg) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT8 ram_buf[SE_RAM_WIDTH / 8] = { 0 }; + + ZXIC_UINT32 dev_id = 0; + +#if DPP_WRITE_FILE_EN + ZXIC_UINT32 hw_addr = 0; +#endif + + ZXIC_COMM_CHECK_POINT(p_se_cfg); + ZXIC_COMM_CHECK_POINT(p_zblk_cfg); + ZXIC_COMM_CHECK_INDEX(fun_id, HASH_FUNC_ID_MIN, HASH_FUNC_ID_NUM - 1); + + dev_id = p_se_cfg->dev_id; + + /* write hash type*/ + rc = zxic_comm_write_bits_ex(ram_buf, SE_RAM_WIDTH, 1, + DPP_SE_ZBLK_SERVICE_TYPE_START, + DPP_SE_ZBLK_SERVICE_TYPE_START - + DPP_SE_ZBLK_SERVICE_TYPE_END + 1); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "zxic_comm_write_bits_ex"); + + /* write hash channel*/ + rc = zxic_comm_write_bits_ex( + ram_buf, SE_RAM_WIDTH, fun_id, DPP_SE_ZBLK_HASH_CHAN_START, + DPP_SE_ZBLK_HASH_CHAN_START - DPP_SE_ZBLK_HASH_CHAN_END + 1); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "zxic_comm_write_bits_ex"); + + /* write enable flag*/ + rc = zxic_comm_write_bits_ex( + ram_buf, SE_RAM_WIDTH, 1, DPP_SE_ZBLK_HW_POS_EN_START, + DPP_SE_ZBLK_HW_POS_EN_START - DPP_SE_ZBLK_HW_POS_EN_END + 1); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "zxic_comm_write_bits_ex"); + +#if DPP_WRITE_FILE_EN + hw_addr = ZBLK_REG_ADDR_CALC(p_zblk_cfg->zblk_idx, 0); + + rc = dpp_data_w2f(hw_addr, ram_buf, FILE_TYPE_ZBLK_CFG); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_data_w2f"); +#endif + +#ifdef DPP_FLOW_HW_INIT + rc = dpp_se_zblk_serv_cfg_set(&p_se_cfg->dev, p_zblk_cfg->zblk_idx, + ALG_ZBLK_SERV_HASH, fun_id, 1); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_se_zblk_serv_cfg_set"); +#endif + + return DPP_OK; +} + +/***********************************************************/ +/** +* @param p_se_cfg 算法模块公共管理数据结构指针 +* @param hash_id hash引擎号 +* @param bulk_id +* +* @return +* @remark 无 +* @see +* @author XCX @date 2017/03/27 +************************************************************/ +DPP_STATUS dpp_hash_bulk_mono_flags_write(DPP_SE_CFG *p_se_cfg, + ZXIC_UINT32 hash_id, + ZXIC_UINT32 bulk_id) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 hash0_mono_flag = 0; + ZXIC_UINT32 hash1_mono_flag = 0; + ZXIC_UINT32 hash2_mono_flag = 0; + ZXIC_UINT32 hash3_mono_flag = 0; + +#if DPP_WRITE_FILE_EN + ZXIC_UINT32 hash0_mono_flag_file = 0; + ZXIC_UINT32 hash1_mono_flag_file = 0; + ZXIC_UINT32 hash2_mono_flag_file = 0; + ZXIC_UINT32 hash3_mono_flag_file = 0; + ZXIC_UINT32 hash_mono_flags_file_reg = 0; + ZXIC_UINT32 hash_mono_flags_file_reg_addr = 0; + DPP_HASH_FILE_REG_T *p_hash_file_reg = NULL; +#endif + + ZXIC_COMM_CHECK_POINT(p_se_cfg); + ZXIC_COMM_CHECK_INDEX(hash_id, HASH_FUNC_ID_MIN, HASH_FUNC_ID_NUM - 1); + ZXIC_COMM_CHECK_INDEX(bulk_id, HASH_BULK_ID_MIN, HASH_BULK_ID_MAX); + + dev_id = p_se_cfg->dev_id; + + rc = dpp_se_hash_zcam_mono_flags_get(&p_se_cfg->dev, &hash0_mono_flag, + &hash1_mono_flag, &hash2_mono_flag, + &hash3_mono_flag); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_se_hash_zcam_mono_flags_get"); + + switch (hash_id) { + case 0: { + hash0_mono_flag |= (1U << bulk_id); + break; + } + + case 1: { + hash1_mono_flag |= (1U << bulk_id); + break; + } + + case 2: { + hash2_mono_flag |= (1U << bulk_id); + break; + } + + case 3: { + hash3_mono_flag |= (1U << bulk_id); + break; + } + + default: { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "\n hash id is out of rang."); + ZXIC_COMM_ASSERT(0); + } + } + + rc = dpp_se_hash_zcam_mono_flags_set(&p_se_cfg->dev, hash0_mono_flag, + hash1_mono_flag, hash2_mono_flag, + hash3_mono_flag); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_se_hash_zcam_mono_flags_set"); + +#if DPP_WRITE_FILE_EN + hash_mono_flags_file_reg_addr = 0x1c4; + + p_hash_file_reg = DPP_GET_HASH_FILE_REG(); + hash_mono_flags_file_reg = p_hash_file_reg->hash_mono_flags_file_reg; + + ZXIC_COMM_UINT32_GET_BITS(hash0_mono_flag_file, + hash_mono_flags_file_reg, + HASH0_MONO_FLAG_BT_START, + HASH0_MONO_FLAG_BT_WIDTH); + ZXIC_COMM_UINT32_GET_BITS(hash1_mono_flag_file, + hash_mono_flags_file_reg, + HASH1_MONO_FLAG_BT_START, + HASH1_MONO_FLAG_BT_WIDTH); + ZXIC_COMM_UINT32_GET_BITS(hash2_mono_flag_file, + hash_mono_flags_file_reg, + HASH2_MONO_FLAG_BT_START, + HASH2_MONO_FLAG_BT_WIDTH); + ZXIC_COMM_UINT32_GET_BITS(hash3_mono_flag_file, + hash_mono_flags_file_reg, + HASH3_MONO_FLAG_BT_START, + HASH3_MONO_FLAG_BT_WIDTH); + + switch (hash_id) { + case 0: { + hash0_mono_flag_file |= (1U << bulk_id); + break; + } + + case 1: { + hash1_mono_flag_file |= (1U << bulk_id); + break; + } + + case 2: { + hash2_mono_flag_file |= (1U << bulk_id); + break; + } + + case 3: { + hash3_mono_flag_file |= (1U << bulk_id); + break; + } + + default: { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "\n hash id is out of rang."); + ZXIC_COMM_ASSERT(0); + } + } + + ZXIC_COMM_UINT32_WRITE_BITS(hash_mono_flags_file_reg, + hash0_mono_flag_file, + HASH0_MONO_FLAG_BT_START, + HASH0_MONO_FLAG_BT_WIDTH); + ZXIC_COMM_UINT32_WRITE_BITS(hash_mono_flags_file_reg, + hash1_mono_flag_file, + HASH1_MONO_FLAG_BT_START, + HASH1_MONO_FLAG_BT_WIDTH); + ZXIC_COMM_UINT32_WRITE_BITS(hash_mono_flags_file_reg, + hash2_mono_flag_file, + HASH2_MONO_FLAG_BT_START, + HASH2_MONO_FLAG_BT_WIDTH); + ZXIC_COMM_UINT32_WRITE_BITS(hash_mono_flags_file_reg, + hash3_mono_flag_file, + HASH3_MONO_FLAG_BT_START, + HASH3_MONO_FLAG_BT_WIDTH); + + p_hash_file_reg->hash_mono_flags_file_reg = hash_mono_flags_file_reg; + + rc = dpp_data_w2f(p_se_cfg->reg_base + hash_mono_flags_file_reg_addr, + &p_hash_file_reg->hash_mono_flags_file_reg, + FILE_TYPE_REG); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_data_w2f"); +#endif + + return DPP_OK; +} + +/***********************************************************/ +/** +* @param p_se_cfg 算法模块公共管理数据结构指针 +* @param p_zcell_cfg +* +* @return +* @remark 无 +* @see +* @author XCX @date 2017/03/27 +************************************************************/ +DPP_STATUS dpp_hash_zcell_mono_write(DPP_SE_CFG *p_se_cfg, + SE_ZCELL_CFG *p_zcell_cfg) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 zblk_idx = 0; + ZXIC_UINT32 zcell_id = 0; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 zcell0_bulk_id = 0; + ZXIC_UINT32 zcell0_mono_flag = 0; + ZXIC_UINT32 zcell1_bulk_id = 0; + ZXIC_UINT32 zcell1_mono_flag = 0; + ZXIC_UINT32 zcell2_bulk_id = 0; + ZXIC_UINT32 zcell2_mono_flag = 0; + ZXIC_UINT32 zcell3_bulk_id = 0; + ZXIC_UINT32 zcell3_mono_flag = 0; + +#if DPP_WRITE_FILE_EN + ZXIC_UINT32 zcell0_bulk_id_file = 0; + ZXIC_UINT32 zcell0_mono_flag_file = 0; + ZXIC_UINT32 zcell1_bulk_id_file = 0; + ZXIC_UINT32 zcell1_mono_flag_file = 0; + ZXIC_UINT32 zcell2_bulk_id_file = 0; + ZXIC_UINT32 zcell2_mono_flag_file = 0; + ZXIC_UINT32 zcell3_bulk_id_file = 0; + ZXIC_UINT32 zcell3_mono_flag_file = 0; + ZXIC_UINT32 zcell_mono_file_ram_addr = 0; + + ZXIC_UINT8 *zcell_mono_file_ram = NULL; + DPP_HASH_FILE_REG_T *p_hash_file_reg = NULL; +#endif + + ZXIC_COMM_CHECK_POINT(p_se_cfg); + ZXIC_COMM_CHECK_POINT(p_zcell_cfg); + + dev_id = p_se_cfg->dev_id; + + zblk_idx = GET_ZBLK_IDX(p_zcell_cfg->zcell_idx); + zcell_id = p_zcell_cfg->zcell_idx & 0x3; + rc = dpp_se_zcell_mono_cfg_get(&p_se_cfg->dev, zblk_idx, + &zcell0_bulk_id, &zcell0_mono_flag, + &zcell1_bulk_id, &zcell1_mono_flag, + &zcell2_bulk_id, &zcell2_mono_flag, + &zcell3_bulk_id, &zcell3_mono_flag); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_se_zcell_mono_cfg_get"); + + switch (zcell_id) { + case 0: { + zcell0_bulk_id = p_zcell_cfg->bulk_id; + zcell0_mono_flag = 1; + break; + } + + case 1: { + zcell1_bulk_id = p_zcell_cfg->bulk_id; + zcell1_mono_flag = 1; + break; + } + + case 2: { + zcell2_bulk_id = p_zcell_cfg->bulk_id; + zcell2_mono_flag = 1; + break; + } + + case 3: { + zcell3_bulk_id = p_zcell_cfg->bulk_id; + zcell3_mono_flag = 1; + break; + } + } + + rc = dpp_se_zcell_mono_cfg_set(&p_se_cfg->dev, zblk_idx, zcell0_bulk_id, + zcell0_mono_flag, zcell1_bulk_id, + zcell1_mono_flag, zcell2_bulk_id, + zcell2_mono_flag, zcell3_bulk_id, + zcell3_mono_flag); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_se_zcell_mono_cfg_set"); + +#if DPP_WRITE_FILE_EN + zcell_mono_file_ram_addr = ZBLK_REG_ADDR_CALC(zblk_idx, 0x14); + + p_hash_file_reg = DPP_GET_HASH_FILE_REG(); + zcell_mono_file_ram = p_hash_file_reg->zcell_mono_file_ram; + + if (0 == zcell_id) { + ZXIC_COMM_MEMSET(zcell_mono_file_ram, 0, (SE_RAM_WIDTH / 8)); + } + + zxic_comm_read_bits_ex(zcell_mono_file_ram, SE_RAM_WIDTH, + &zcell0_bulk_id_file, ZCELL0_BULK_ID_BT_START, + ZCELL0_BULK_ID_BT_WIDTH); + zxic_comm_read_bits_ex(zcell_mono_file_ram, SE_RAM_WIDTH, + &zcell0_mono_flag_file, + ZCELL0_MONO_FLAG_BT_START, + ZCELL0_MONO_FLAG_BT_WIDTH); + zxic_comm_read_bits_ex(zcell_mono_file_ram, SE_RAM_WIDTH, + &zcell1_bulk_id_file, ZCELL1_BULK_ID_BT_START, + ZCELL1_BULK_ID_BT_WIDTH); + zxic_comm_read_bits_ex(zcell_mono_file_ram, SE_RAM_WIDTH, + &zcell1_mono_flag_file, + ZCELL1_MONO_FLAG_BT_START, + ZCELL1_MONO_FLAG_BT_WIDTH); + zxic_comm_read_bits_ex(zcell_mono_file_ram, SE_RAM_WIDTH, + &zcell2_bulk_id_file, ZCELL2_BULK_ID_BT_START, + ZCELL2_BULK_ID_BT_WIDTH); + zxic_comm_read_bits_ex(zcell_mono_file_ram, SE_RAM_WIDTH, + &zcell2_mono_flag_file, + ZCELL2_MONO_FLAG_BT_START, + ZCELL2_MONO_FLAG_BT_WIDTH); + zxic_comm_read_bits_ex(zcell_mono_file_ram, SE_RAM_WIDTH, + &zcell3_bulk_id_file, ZCELL3_BULK_ID_BT_START, + ZCELL3_BULK_ID_BT_WIDTH); + zxic_comm_read_bits_ex(zcell_mono_file_ram, SE_RAM_WIDTH, + &zcell3_mono_flag_file, + ZCELL3_MONO_FLAG_BT_START, + ZCELL3_MONO_FLAG_BT_WIDTH); + + switch (zcell_id) { + case 0: { + zcell0_bulk_id_file = p_zcell_cfg->bulk_id; + zcell0_mono_flag_file = 1; + break; + } + + case 1: { + zcell1_bulk_id_file = p_zcell_cfg->bulk_id; + zcell1_mono_flag_file = 1; + break; + } + + case 2: { + zcell2_bulk_id_file = p_zcell_cfg->bulk_id; + zcell2_mono_flag_file = 1; + break; + } + + case 3: { + zcell3_bulk_id_file = p_zcell_cfg->bulk_id; + zcell3_mono_flag_file = 1; + break; + } + + default: { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, + "\n zcell id is out of rang."); + ZXIC_COMM_ASSERT(0); + } + } + + zxic_comm_write_bits_ex(zcell_mono_file_ram, SE_RAM_WIDTH, + zcell0_bulk_id_file, ZCELL0_BULK_ID_BT_START, + ZCELL0_BULK_ID_BT_WIDTH); + zxic_comm_write_bits_ex(zcell_mono_file_ram, SE_RAM_WIDTH, + zcell0_mono_flag_file, + ZCELL0_MONO_FLAG_BT_START, + ZCELL0_MONO_FLAG_BT_WIDTH); + zxic_comm_write_bits_ex(zcell_mono_file_ram, SE_RAM_WIDTH, + zcell1_bulk_id_file, ZCELL1_BULK_ID_BT_START, + ZCELL1_BULK_ID_BT_WIDTH); + zxic_comm_write_bits_ex(zcell_mono_file_ram, SE_RAM_WIDTH, + zcell1_mono_flag_file, + ZCELL1_MONO_FLAG_BT_START, + ZCELL1_MONO_FLAG_BT_WIDTH); + zxic_comm_write_bits_ex(zcell_mono_file_ram, SE_RAM_WIDTH, + zcell2_bulk_id_file, ZCELL2_BULK_ID_BT_START, + ZCELL2_BULK_ID_BT_WIDTH); + zxic_comm_write_bits_ex(zcell_mono_file_ram, SE_RAM_WIDTH, + zcell2_mono_flag_file, + ZCELL2_MONO_FLAG_BT_START, + ZCELL2_MONO_FLAG_BT_WIDTH); + zxic_comm_write_bits_ex(zcell_mono_file_ram, SE_RAM_WIDTH, + zcell3_bulk_id_file, ZCELL3_BULK_ID_BT_START, + ZCELL3_BULK_ID_BT_WIDTH); + zxic_comm_write_bits_ex(zcell_mono_file_ram, SE_RAM_WIDTH, + zcell3_mono_flag_file, + ZCELL3_MONO_FLAG_BT_START, + ZCELL3_MONO_FLAG_BT_WIDTH); + + rc = dpp_data_w2f(zcell_mono_file_ram_addr, + &p_hash_file_reg->zcell_mono_file_ram, + FILE_TYPE_ZBLK_CFG); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_data_w2f"); +#endif + + return DPP_OK; +} + +/***********************************************************/ +/** +* @param p_se_cfg 算法模块公共管理数据结构指针 +* @param bulk_id +* @param zblk_idx +* @param zreg_id +* +* @return +* @remark 无 +* @see +* @author XCX @date 2017/03/27 +************************************************************/ +DPP_STATUS dpp_hash_zreg_mono_write(DPP_SE_CFG *p_se_cfg, ZXIC_UINT32 bulk_id, + ZXIC_UINT32 zblk_idx, ZXIC_UINT32 zreg_id) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 zreg0_bulk_id = 0; + ZXIC_UINT32 zreg0_mono_flag = 0; + ZXIC_UINT32 zreg1_bulk_id = 0; + ZXIC_UINT32 zreg1_mono_flag = 0; + ZXIC_UINT32 zreg2_bulk_id = 0; + ZXIC_UINT32 zreg2_mono_flag = 0; + ZXIC_UINT32 zreg3_bulk_id = 0; + ZXIC_UINT32 zreg3_mono_flag = 0; + +#if DPP_WRITE_FILE_EN + ZXIC_UINT32 zreg0_bulk_id_file = 0; + ZXIC_UINT32 zreg0_mono_flag_file = 0; + ZXIC_UINT32 zreg1_bulk_id_file = 0; + ZXIC_UINT32 zreg1_mono_flag_file = 0; + ZXIC_UINT32 zreg2_bulk_id_file = 0; + ZXIC_UINT32 zreg2_mono_flag_file = 0; + ZXIC_UINT32 zreg3_bulk_id_file = 0; + ZXIC_UINT32 zreg3_mono_flag_file = 0; + ZXIC_UINT8 *zreg_mono_file_ram = NULL; + ZXIC_UINT32 zreg_mono_file_ram_addr = 0; + DPP_HASH_FILE_REG_T *p_hash_file_reg = NULL; +#endif + + ZXIC_COMM_CHECK_POINT(p_se_cfg); + + dev_id = p_se_cfg->dev_id; + + rc = dpp_se_zreg_mono_cfg_get(&p_se_cfg->dev, zblk_idx, &zreg0_bulk_id, + &zreg0_mono_flag, &zreg1_bulk_id, + &zreg1_mono_flag, &zreg2_bulk_id, + &zreg2_mono_flag, &zreg3_bulk_id, + &zreg3_mono_flag); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_se_zreg_mono_cfg_get"); + + switch (zreg_id) { + case 0: { + zreg0_bulk_id = bulk_id; + zreg0_mono_flag = 1; + break; + } + + case 1: { + zreg1_bulk_id = bulk_id; + zreg1_mono_flag = 1; + break; + } + + case 2: { + zreg2_bulk_id = bulk_id; + zreg2_mono_flag = 1; + break; + } + + case 3: { + zreg3_bulk_id = bulk_id; + zreg3_mono_flag = 1; + break; + } + + default: { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "\n zreg id is out of rang."); + ZXIC_COMM_ASSERT(0); + } + } + + rc = dpp_se_zreg_mono_cfg_set(&p_se_cfg->dev, zblk_idx, zreg0_bulk_id, + zreg0_mono_flag, zreg1_bulk_id, + zreg1_mono_flag, zreg2_bulk_id, + zreg2_mono_flag, zreg3_bulk_id, + zreg3_mono_flag); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_se_zreg_mono_cfg_set"); + +#if DPP_WRITE_FILE_EN + zreg_mono_file_ram_addr = ZBLK_REG_ADDR_CALC(zblk_idx, 0x15); + + p_hash_file_reg = DPP_GET_HASH_FILE_REG(); + zreg_mono_file_ram = p_hash_file_reg->zreg_mono_file_ram; + + if (0 == zreg_id) { + ZXIC_COMM_MEMSET(zreg_mono_file_ram, 0, (SE_RAM_WIDTH / 8)); + } + + zxic_comm_read_bits_ex(zreg_mono_file_ram, SE_RAM_WIDTH, + &zreg0_bulk_id_file, ZREG0_BULK_ID_BT_START, + ZREG0_BULK_ID_BT_WIDTH); + zxic_comm_read_bits_ex(zreg_mono_file_ram, SE_RAM_WIDTH, + &zreg0_mono_flag_file, ZREG0_MONO_FLAG_BT_START, + ZREG0_MONO_FLAG_BT_WIDTH); + zxic_comm_read_bits_ex(zreg_mono_file_ram, SE_RAM_WIDTH, + &zreg1_bulk_id_file, ZREG1_BULK_ID_BT_START, + ZREG1_BULK_ID_BT_WIDTH); + zxic_comm_read_bits_ex(zreg_mono_file_ram, SE_RAM_WIDTH, + &zreg1_mono_flag_file, ZREG1_MONO_FLAG_BT_START, + ZREG1_MONO_FLAG_BT_WIDTH); + zxic_comm_read_bits_ex(zreg_mono_file_ram, SE_RAM_WIDTH, + &zreg2_bulk_id_file, ZREG2_BULK_ID_BT_START, + ZREG2_BULK_ID_BT_WIDTH); + zxic_comm_read_bits_ex(zreg_mono_file_ram, SE_RAM_WIDTH, + &zreg2_mono_flag_file, ZREG2_MONO_FLAG_BT_START, + ZREG2_MONO_FLAG_BT_WIDTH); + zxic_comm_read_bits_ex(zreg_mono_file_ram, SE_RAM_WIDTH, + &zreg3_bulk_id_file, ZREG3_BULK_ID_BT_START, + ZREG3_BULK_ID_BT_WIDTH); + zxic_comm_read_bits_ex(zreg_mono_file_ram, SE_RAM_WIDTH, + &zreg3_mono_flag_file, ZREG3_MONO_FLAG_BT_START, + ZREG3_MONO_FLAG_BT_WIDTH); + + switch (zreg_id) { + case 0: { + zreg0_bulk_id_file = bulk_id; + zreg0_mono_flag_file = 1; + break; + } + + case 1: { + zreg1_bulk_id_file = bulk_id; + zreg1_mono_flag_file = 1; + break; + } + + case 2: { + zreg2_bulk_id_file = bulk_id; + zreg2_mono_flag_file = 1; + break; + } + + case 3: { + zreg3_bulk_id_file = bulk_id; + zreg3_mono_flag_file = 1; + break; + } + + default: { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, + "\n zcell id is out of rang."); + ZXIC_COMM_ASSERT(0); + } + } + + zxic_comm_write_bits_ex(zreg_mono_file_ram, SE_RAM_WIDTH, + zreg0_bulk_id_file, ZREG0_BULK_ID_BT_START, + ZREG0_BULK_ID_BT_WIDTH); + zxic_comm_write_bits_ex(zreg_mono_file_ram, SE_RAM_WIDTH, + zreg0_mono_flag_file, ZREG0_MONO_FLAG_BT_START, + ZREG0_MONO_FLAG_BT_WIDTH); + zxic_comm_write_bits_ex(zreg_mono_file_ram, SE_RAM_WIDTH, + zreg1_bulk_id_file, ZREG1_BULK_ID_BT_START, + ZREG1_BULK_ID_BT_WIDTH); + zxic_comm_write_bits_ex(zreg_mono_file_ram, SE_RAM_WIDTH, + zreg1_mono_flag_file, ZREG1_MONO_FLAG_BT_START, + ZREG1_MONO_FLAG_BT_WIDTH); + zxic_comm_write_bits_ex(zreg_mono_file_ram, SE_RAM_WIDTH, + zreg2_bulk_id_file, ZREG2_BULK_ID_BT_START, + ZREG2_BULK_ID_BT_WIDTH); + zxic_comm_write_bits_ex(zreg_mono_file_ram, SE_RAM_WIDTH, + zreg2_mono_flag_file, ZREG2_MONO_FLAG_BT_START, + ZREG2_MONO_FLAG_BT_WIDTH); + zxic_comm_write_bits_ex(zreg_mono_file_ram, SE_RAM_WIDTH, + zreg3_bulk_id_file, ZREG3_BULK_ID_BT_START, + ZREG3_BULK_ID_BT_WIDTH); + zxic_comm_write_bits_ex(zreg_mono_file_ram, SE_RAM_WIDTH, + zreg3_mono_flag_file, ZREG3_MONO_FLAG_BT_START, + ZREG3_MONO_FLAG_BT_WIDTH); + + rc = dpp_data_w2f(zreg_mono_file_ram_addr, + &p_hash_file_reg->zreg_mono_file_ram, + FILE_TYPE_ZBLK_CFG); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_data_w2f"); +#endif + + return DPP_OK; +} + +/***********************************************************/ +/** hash ddr参数配置 +* @param p_se_cfg 算法模块公共管理数据结构指针 +* @param fun_id hash引擎号 +* @param bulk_id +* @param p_ddr_cfg +* +* @return +* @remark 无 +* @see +* @author wcl @date 2014/07/31 +************************************************************/ +DPP_STATUS dpp_hash_ext_cfg_write(DPP_SE_CFG *p_se_cfg, ZXIC_UINT32 fun_id, + ZXIC_UINT32 bulk_id, HASH_DDR_CFG *p_ddr_cfg) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 ext_mode = 0; + ZXIC_UINT32 ext_flag = 0; + ZXIC_UINT32 dev_id = 0; + +#if DPP_WRITE_FILE_EN + ZXIC_UINT32 ext_mode_file = 0; + ZXIC_UINT32 ext_flag_file = 0; + ZXIC_UINT32 ext_cfg_file_reg = 0; + ZXIC_UINT32 ext_cfg_file_reg_addr = 0; + DPP_HASH_FILE_REG_T *p_hash_file_reg = NULL; +#endif + + ZXIC_COMM_CHECK_POINT(p_se_cfg); + ZXIC_COMM_CHECK_POINT(p_ddr_cfg); + ZXIC_COMM_CHECK_INDEX(fun_id, DPP_HASH_ID_MIN, DPP_HASH_ID_MAX); + ZXIC_COMM_CHECK_INDEX(bulk_id, HASH_BULK_ID_MIN, HASH_BULK_ID_MAX); + + dev_id = p_se_cfg->dev_id; + + rc = dpp_se_hash_ext_cfg_get(&p_se_cfg->dev, fun_id, &ext_mode, + &ext_flag); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_se_hash_ext_cfg_get"); + + ext_flag = 1; + + if (p_ddr_cfg->width_mode == DDR_WIDTH_256b) { + ext_mode = (ext_mode & (~(1U << bulk_id))); + } else if (p_ddr_cfg->width_mode == DDR_WIDTH_512b) { + ext_mode = (ext_mode | (1U << bulk_id)); + } + +#if DPP_WRITE_FILE_EN + ext_cfg_file_reg_addr = 0xbc + fun_id * 4; + + p_hash_file_reg = DPP_GET_HASH_FILE_REG(); + ext_cfg_file_reg = p_hash_file_reg->ext_cfg_file_reg[fun_id]; + + ZXIC_COMM_UINT32_GET_BITS(ext_mode_file, ext_cfg_file_reg, + HASH_EXT_MODE_BT_START, + HASH_EXT_MODE_BT_WIDTH); + ZXIC_COMM_UINT32_GET_BITS(ext_flag_file, ext_cfg_file_reg, + HASH_EXT_FLAG_BT_START, + HASH_EXT_FLAG_BT_WIDTH); + + ext_flag_file = 1; + + if (DDR_WIDTH_256b == p_ddr_cfg->width_mode) { + ext_mode_file = (ext_mode_file & (~(1U << bulk_id))); + } else if (p_ddr_cfg->width_mode == DDR_WIDTH_512b) { + ext_mode_file = (ext_mode_file | (1U << bulk_id)); + } + + ZXIC_COMM_UINT32_WRITE_BITS(ext_cfg_file_reg, ext_mode_file, + HASH_EXT_MODE_BT_START, + HASH_EXT_MODE_BT_WIDTH); + ZXIC_COMM_UINT32_WRITE_BITS(ext_cfg_file_reg, ext_flag_file, + HASH_EXT_FLAG_BT_START, + HASH_EXT_FLAG_BT_WIDTH); + + p_hash_file_reg->ext_cfg_file_reg[fun_id] = ext_cfg_file_reg; + + rc = dpp_data_w2f(p_se_cfg->reg_base + ext_cfg_file_reg_addr, + &p_hash_file_reg->ext_cfg_file_reg[fun_id], + FILE_TYPE_REG); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_data_w2f"); +#endif + + rc = dpp_se_hash_ext_cfg_set(&p_se_cfg->dev, fun_id, ext_mode, + ext_flag); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_se_hash_ext_cfg_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** +* @param p_se_cfg 算法模块公共管理数据结构指针 +* @param fun_id hash引擎号 +* +* @return +* @remark 无 +* @see +* @author wcl @date 2014/07/31 +************************************************************/ +DPP_STATUS dpp_hash_ext_cfg_clr(DPP_SE_CFG *p_se_cfg, ZXIC_UINT32 fun_id) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 ext_mode = 0; + ZXIC_UINT32 ext_flag = 0; + ZXIC_UINT32 dev_id = 0; + +#if DPP_WRITE_FILE_EN + ZXIC_UINT32 ddr_cfg_dat = 0; + ZXIC_UINT32 ext_cfg_reg_addr = 0; +#endif + + ZXIC_COMM_CHECK_POINT(p_se_cfg); + ZXIC_COMM_CHECK_INDEX(fun_id, DPP_HASH_ID_MIN, DPP_HASH_ID_MAX); + + dev_id = p_se_cfg->dev_id; + +#if DPP_WRITE_FILE_EN + ext_cfg_reg_addr = 0xbc + fun_id * 4; + + rc = dpp_data_w2f(p_se_cfg->reg_base + ext_cfg_reg_addr, &ddr_cfg_dat, + FILE_TYPE_REG); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_data_w2f"); +#endif + + rc = dpp_se_hash_ext_cfg_set(&p_se_cfg->dev, fun_id, ext_mode, + ext_flag); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_se_hash_ext_cfg_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** hash 业务表ddr深度配置 +* @param p_se_cfg 算法模块公共管理数据结构指针 +* @param fun_id hash引擎号 +* @param bulk_id +* @param p_ddr_cfg +* +* @return +* @remark 无 +* @see +* @author tf @date 2016/01/28 +************************************************************/ +DPP_STATUS dpp_hash_tbl_depth_write(DPP_SE_CFG *p_se_cfg, ZXIC_UINT32 fun_id, + ZXIC_UINT32 bulk_id, + HASH_DDR_CFG *p_ddr_cfg) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 ext_depth = 0; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 hash_tbl0_depth = 0; + ZXIC_UINT32 hash_tbl1_depth = 0; + ZXIC_UINT32 hash_tbl2_depth = 0; + ZXIC_UINT32 hash_tbl3_depth = 0; + ZXIC_UINT32 hash_tbl4_depth = 0; + ZXIC_UINT32 hash_tbl5_depth = 0; + ZXIC_UINT32 hash_tbl6_depth = 0; + ZXIC_UINT32 hash_tbl7_depth = 0; + +#if DPP_WRITE_FILE_EN + ZXIC_UINT32 hash_tbl0_depth_file = 0; + ZXIC_UINT32 hash_tbl1_depth_file = 0; + ZXIC_UINT32 hash_tbl2_depth_file = 0; + ZXIC_UINT32 hash_tbl3_depth_file = 0; + ZXIC_UINT32 hash_tbl4_depth_file = 0; + ZXIC_UINT32 hash_tbl5_depth_file = 0; + ZXIC_UINT32 hash_tbl6_depth_file = 0; + ZXIC_UINT32 hash_tbl7_depth_file = 0; + ZXIC_UINT32 tbl03_depth_file_reg = 0; + ZXIC_UINT32 tbl47_depth_file_reg = 0; + ZXIC_UINT32 hash_tbl_depth_file_reg_addr = 0; + DPP_HASH_FILE_REG_T *p_hash_file_reg = NULL; +#endif + + ZXIC_UINT32 hash_tbl_depth_array[HASH_BULK_NUM] = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_se_cfg); + ZXIC_COMM_CHECK_POINT(p_ddr_cfg); + ZXIC_COMM_CHECK_INDEX(fun_id, DPP_HASH_ID_MIN, DPP_HASH_ID_MAX); + ZXIC_COMM_CHECK_INDEX(bulk_id, HASH_BULK_ID_MIN, HASH_BULK_ID_MAX); + + dev_id = p_se_cfg->dev_id; + + ext_depth = dpp_hash_ddr_depth_conv(p_ddr_cfg->item_num); + + if (((ZXIC_UINT32)1 << ext_depth) != p_ddr_cfg->item_num) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "ErrCode[ 0x%x ]: Hash DDR item num:[ %d ] is not N power of 2.\n", + DPP_HASH_RC_INVALID_PARA, p_ddr_cfg->item_num); + ZXIC_COMM_ASSERT(0); + return DPP_HASH_RC_INVALID_PARA; + } + + rc = dpp_se_hash_tbl_depth_get(&p_se_cfg->dev, fun_id, &hash_tbl0_depth, + &hash_tbl1_depth, &hash_tbl2_depth, + &hash_tbl3_depth, &hash_tbl4_depth, + &hash_tbl5_depth, &hash_tbl6_depth, + &hash_tbl7_depth); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_se_hash_tbl_depth_get"); + + hash_tbl_depth_array[0] = hash_tbl0_depth; + hash_tbl_depth_array[1] = hash_tbl1_depth; + hash_tbl_depth_array[2] = hash_tbl2_depth; + hash_tbl_depth_array[3] = hash_tbl3_depth; + hash_tbl_depth_array[4] = hash_tbl4_depth; + hash_tbl_depth_array[5] = hash_tbl5_depth; + hash_tbl_depth_array[6] = hash_tbl6_depth; + hash_tbl_depth_array[7] = hash_tbl7_depth; + + hash_tbl_depth_array[bulk_id] = ext_depth; + + hash_tbl0_depth = hash_tbl_depth_array[0]; + hash_tbl1_depth = hash_tbl_depth_array[1]; + hash_tbl2_depth = hash_tbl_depth_array[2]; + hash_tbl3_depth = hash_tbl_depth_array[3]; + hash_tbl4_depth = hash_tbl_depth_array[4]; + hash_tbl5_depth = hash_tbl_depth_array[5]; + hash_tbl6_depth = hash_tbl_depth_array[6]; + hash_tbl7_depth = hash_tbl_depth_array[7]; + +#if DPP_WRITE_FILE_EN + hash_tbl_depth_file_reg_addr = 0x01a4 + fun_id * 8; + + p_hash_file_reg = DPP_GET_HASH_FILE_REG(); + tbl03_depth_file_reg = p_hash_file_reg->hash_depth_file_regs[fun_id] + .tbl03_depth_file_reg; + tbl47_depth_file_reg = p_hash_file_reg->hash_depth_file_regs[fun_id] + .tbl47_depth_file_reg; + + ZXIC_COMM_UINT32_GET_BITS(hash_tbl0_depth_file, tbl03_depth_file_reg, + HASH_TBL0_DEPTH_BT_START, + HASH_TBL0_DEPTH_BT_WIDTH); + ZXIC_COMM_UINT32_GET_BITS(hash_tbl1_depth_file, tbl03_depth_file_reg, + HASH_TBL1_DEPTH_BT_START, + HASH_TBL1_DEPTH_BT_WIDTH); + ZXIC_COMM_UINT32_GET_BITS(hash_tbl2_depth_file, tbl03_depth_file_reg, + HASH_TBL2_DEPTH_BT_START, + HASH_TBL2_DEPTH_BT_WIDTH); + ZXIC_COMM_UINT32_GET_BITS(hash_tbl3_depth_file, tbl03_depth_file_reg, + HASH_TBL3_DEPTH_BT_START, + HASH_TBL3_DEPTH_BT_WIDTH); + + ZXIC_COMM_UINT32_GET_BITS(hash_tbl4_depth_file, tbl47_depth_file_reg, + HASH_TBL4_DEPTH_BT_START, + HASH_TBL4_DEPTH_BT_WIDTH); + ZXIC_COMM_UINT32_GET_BITS(hash_tbl5_depth_file, tbl47_depth_file_reg, + HASH_TBL5_DEPTH_BT_START, + HASH_TBL5_DEPTH_BT_WIDTH); + ZXIC_COMM_UINT32_GET_BITS(hash_tbl6_depth_file, tbl47_depth_file_reg, + HASH_TBL6_DEPTH_BT_START, + HASH_TBL6_DEPTH_BT_WIDTH); + ZXIC_COMM_UINT32_GET_BITS(hash_tbl7_depth_file, tbl47_depth_file_reg, + HASH_TBL7_DEPTH_BT_START, + HASH_TBL7_DEPTH_BT_WIDTH); + + hash_tbl_depth_array[0] = hash_tbl0_depth_file; + hash_tbl_depth_array[1] = hash_tbl1_depth_file; + hash_tbl_depth_array[2] = hash_tbl2_depth_file; + hash_tbl_depth_array[3] = hash_tbl3_depth_file; + hash_tbl_depth_array[4] = hash_tbl4_depth_file; + hash_tbl_depth_array[5] = hash_tbl5_depth_file; + hash_tbl_depth_array[6] = hash_tbl6_depth_file; + hash_tbl_depth_array[7] = hash_tbl7_depth_file; + + hash_tbl_depth_array[bulk_id] = ext_depth; + + hash_tbl0_depth_file = hash_tbl_depth_array[0]; + hash_tbl1_depth_file = hash_tbl_depth_array[1]; + hash_tbl2_depth_file = hash_tbl_depth_array[2]; + hash_tbl3_depth_file = hash_tbl_depth_array[3]; + hash_tbl4_depth_file = hash_tbl_depth_array[4]; + hash_tbl5_depth_file = hash_tbl_depth_array[5]; + hash_tbl6_depth_file = hash_tbl_depth_array[6]; + hash_tbl7_depth_file = hash_tbl_depth_array[7]; + + ZXIC_COMM_UINT32_WRITE_BITS(tbl03_depth_file_reg, hash_tbl0_depth_file, + HASH_TBL0_DEPTH_BT_START, + HASH_TBL0_DEPTH_BT_WIDTH); + ZXIC_COMM_UINT32_WRITE_BITS(tbl03_depth_file_reg, hash_tbl1_depth_file, + HASH_TBL1_DEPTH_BT_START, + HASH_TBL1_DEPTH_BT_WIDTH); + ZXIC_COMM_UINT32_WRITE_BITS(tbl03_depth_file_reg, hash_tbl2_depth_file, + HASH_TBL2_DEPTH_BT_START, + HASH_TBL2_DEPTH_BT_WIDTH); + ZXIC_COMM_UINT32_WRITE_BITS(tbl03_depth_file_reg, hash_tbl3_depth_file, + HASH_TBL3_DEPTH_BT_START, + HASH_TBL3_DEPTH_BT_WIDTH); + + ZXIC_COMM_UINT32_WRITE_BITS(tbl47_depth_file_reg, hash_tbl4_depth_file, + HASH_TBL4_DEPTH_BT_START, + HASH_TBL4_DEPTH_BT_WIDTH); + ZXIC_COMM_UINT32_WRITE_BITS(tbl47_depth_file_reg, hash_tbl5_depth_file, + HASH_TBL5_DEPTH_BT_START, + HASH_TBL5_DEPTH_BT_WIDTH); + ZXIC_COMM_UINT32_WRITE_BITS(tbl47_depth_file_reg, hash_tbl6_depth_file, + HASH_TBL6_DEPTH_BT_START, + HASH_TBL6_DEPTH_BT_WIDTH); + ZXIC_COMM_UINT32_WRITE_BITS(tbl47_depth_file_reg, hash_tbl7_depth_file, + HASH_TBL7_DEPTH_BT_START, + HASH_TBL7_DEPTH_BT_WIDTH); + + p_hash_file_reg->hash_depth_file_regs[fun_id].tbl03_depth_file_reg = + tbl03_depth_file_reg; + p_hash_file_reg->hash_depth_file_regs[fun_id].tbl47_depth_file_reg = + tbl47_depth_file_reg; + + rc = dpp_data_w2f(p_se_cfg->reg_base + hash_tbl_depth_file_reg_addr, + &tbl03_depth_file_reg, FILE_TYPE_REG); + rc = dpp_data_w2f(p_se_cfg->reg_base + hash_tbl_depth_file_reg_addr + 4, + &tbl47_depth_file_reg, FILE_TYPE_REG) + + rc; + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_data_w2f"); +#endif + + rc = dpp_se_hash_tbl_depth_set(&p_se_cfg->dev, fun_id, hash_tbl0_depth, + hash_tbl1_depth, hash_tbl2_depth, + hash_tbl3_depth, hash_tbl4_depth, + hash_tbl5_depth, hash_tbl6_depth, + hash_tbl7_depth); + + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_se_hash_tbl_depth_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** hash 业务表ddr深度恢复默认值 +* @param p_se_cfg 算法模块公共管理数据结构指针 +* @param fun_id hash引擎号 +* +* @return +* @remark 无 +* @see +* @author tf @date 2016/01/28 +************************************************************/ +DPP_STATUS dpp_hash_tbl_depth_clr(DPP_SE_CFG *p_se_cfg, ZXIC_UINT32 fun_id) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 hash_tbl0_depth = 0x12; + ZXIC_UINT32 hash_tbl1_depth = 0x12; + ZXIC_UINT32 hash_tbl2_depth = 0x12; + ZXIC_UINT32 hash_tbl3_depth = 0x12; + ZXIC_UINT32 hash_tbl4_depth = 0x12; + ZXIC_UINT32 hash_tbl5_depth = 0x12; + ZXIC_UINT32 hash_tbl6_depth = 0x12; + ZXIC_UINT32 hash_tbl7_depth = 0x12; + +#if DPP_WRITE_FILE_EN + ZXIC_UINT32 hash_tbl_depth_reg_file_addr = 0; + ZXIC_UINT32 hash_tbl03_depth_reg_file_dat = 0; + ZXIC_UINT32 hash_tbl47_depth_reg_file_dat = 0; +#endif + + ZXIC_COMM_CHECK_POINT(p_se_cfg); + ZXIC_COMM_CHECK_INDEX(fun_id, DPP_HASH_ID_MIN, DPP_HASH_ID_MAX); + + dev_id = p_se_cfg->dev_id; + +#if DPP_WRITE_FILE_EN + hash_tbl_depth_reg_file_addr = 0x01a4 + fun_id * 8; + + ZXIC_COMM_UINT32_WRITE_BITS(hash_tbl03_depth_reg_file_dat, + hash_tbl0_depth, HASH_TBL0_DEPTH_BT_START, + HASH_TBL0_DEPTH_BT_WIDTH); + ZXIC_COMM_UINT32_WRITE_BITS(hash_tbl03_depth_reg_file_dat, + hash_tbl1_depth, HASH_TBL1_DEPTH_BT_START, + HASH_TBL1_DEPTH_BT_WIDTH); + ZXIC_COMM_UINT32_WRITE_BITS(hash_tbl03_depth_reg_file_dat, + hash_tbl2_depth, HASH_TBL2_DEPTH_BT_START, + HASH_TBL2_DEPTH_BT_WIDTH); + ZXIC_COMM_UINT32_WRITE_BITS(hash_tbl03_depth_reg_file_dat, + hash_tbl3_depth, HASH_TBL3_DEPTH_BT_START, + HASH_TBL3_DEPTH_BT_WIDTH); + + ZXIC_COMM_UINT32_WRITE_BITS(hash_tbl47_depth_reg_file_dat, + hash_tbl4_depth, HASH_TBL4_DEPTH_BT_START, + HASH_TBL4_DEPTH_BT_WIDTH); + ZXIC_COMM_UINT32_WRITE_BITS(hash_tbl47_depth_reg_file_dat, + hash_tbl5_depth, HASH_TBL5_DEPTH_BT_START, + HASH_TBL5_DEPTH_BT_WIDTH); + ZXIC_COMM_UINT32_WRITE_BITS(hash_tbl47_depth_reg_file_dat, + hash_tbl6_depth, HASH_TBL6_DEPTH_BT_START, + HASH_TBL6_DEPTH_BT_WIDTH); + ZXIC_COMM_UINT32_WRITE_BITS(hash_tbl47_depth_reg_file_dat, + hash_tbl7_depth, HASH_TBL7_DEPTH_BT_START, + HASH_TBL7_DEPTH_BT_WIDTH); + + rc = dpp_data_w2f(p_se_cfg->reg_base + hash_tbl_depth_reg_file_addr, + &hash_tbl03_depth_reg_file_dat, FILE_TYPE_REG); + rc = dpp_data_w2f(p_se_cfg->reg_base + hash_tbl_depth_reg_file_addr + 4, + &hash_tbl47_depth_reg_file_dat, FILE_TYPE_REG) + + rc; + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_data_w2f"); +#endif + + rc = dpp_se_hash_tbl_depth_set(&p_se_cfg->dev, fun_id, hash_tbl0_depth, + hash_tbl1_depth, hash_tbl2_depth, + hash_tbl3_depth, hash_tbl4_depth, + hash_tbl5_depth, hash_tbl6_depth, + hash_tbl7_depth); + + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_se_hash_tbl_depth_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** hash CRC多项式选择 +* @param fun_id hash引擎号 +* @param crc_sel +* +* @return +* @remark 无 +* @see +* @author tf @date 2016/01/28 +************************************************************/ +DPP_STATUS dpp_hash_tbl_crc_poly_write(DPP_SE_CFG *p_se_cfg, ZXIC_UINT32 fun_id, + ZXIC_UINT32 bulk_id, ZXIC_UINT32 crc_sel) +{ + DPP_STATUS rtn = DPP_OK; + + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 hash_tbl_crc_cfg_bt_start = 0; + ZXIC_UINT32 hash_tbl_crc_cfg_bt_width = 2; + DPP_SE4K_SE_ALG_HASH10_EXT_CRC_CFG_T hash01_ext_crc_cfg = { 0 }; + DPP_SE4K_SE_ALG_HASH32_EXT_CRC_CFG_T hash23_ext_crc_cfg = { 0 }; + +#if DPP_WRITE_FILE_EN + ZXIC_UINT32 ext_crc_cfg_file_reg = 0; + ZXIC_UINT32 ext_crc_cfg_file_reg_addr = 0; + DPP_HASH_FILE_REG_T *p_hash_file_reg = NULL; +#endif + + ZXIC_COMM_ASSERT(p_se_cfg); + + dev_id = p_se_cfg->dev_id; + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, fun_id, DPP_HASH_ID_MIN, + DPP_HASH_ID_MAX); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, bulk_id, HASH_BULK_ID_MIN, + HASH_BULK_ID_MAX); + + hash_tbl_crc_cfg_bt_start = bulk_id * 2; + + if (fun_id == 0 || fun_id == 1) { + rtn = dpp_reg_read(&p_se_cfg->dev, + SE4K_SE_ALG_HASH10_EXT_CRC_CFGr, 0, 0, + &hash01_ext_crc_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "dpp_reg_read"); + + if (fun_id == 0) { + ZXIC_COMM_UINT32_WRITE_BITS( + hash01_ext_crc_cfg.hash0_crc_cfg, crc_sel, + hash_tbl_crc_cfg_bt_start, + hash_tbl_crc_cfg_bt_width); + } else { + ZXIC_COMM_UINT32_WRITE_BITS( + hash01_ext_crc_cfg.hash1_crc_cfg, crc_sel, + hash_tbl_crc_cfg_bt_start, + hash_tbl_crc_cfg_bt_width); + } + + rtn = dpp_reg_write(&p_se_cfg->dev, + SE4K_SE_ALG_HASH10_EXT_CRC_CFGr, 0, 0, + &hash01_ext_crc_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "dpp_reg_write"); + + } else { + rtn = dpp_reg_read(&p_se_cfg->dev, + SE4K_SE_ALG_HASH32_EXT_CRC_CFGr, 0, 0, + &hash23_ext_crc_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "dpp_reg_read"); + + if (fun_id == 2) { + ZXIC_COMM_UINT32_WRITE_BITS( + hash23_ext_crc_cfg.hash2_crc_cfg, crc_sel, + hash_tbl_crc_cfg_bt_start, + hash_tbl_crc_cfg_bt_width); + } else { + ZXIC_COMM_UINT32_WRITE_BITS( + hash23_ext_crc_cfg.hash3_crc_cfg, crc_sel, + hash_tbl_crc_cfg_bt_start, + hash_tbl_crc_cfg_bt_width); + } + + rtn = dpp_reg_write(&p_se_cfg->dev, + SE4K_SE_ALG_HASH32_EXT_CRC_CFGr, 0, 0, + &hash23_ext_crc_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "dpp_reg_write"); + } + +#if DPP_WRITE_FILE_EN + p_hash_file_reg = DPP_GET_HASH_FILE_REG(); + + if (fun_id == 0 || fun_id == 1) { + ext_crc_cfg_file_reg_addr = 0x01cc; + + hash_tbl_crc_cfg_bt_start = fun_id * 16 + bulk_id * 2; + + ext_crc_cfg_file_reg = p_hash_file_reg->ext_crc_cfg_file_reg[0]; + ZXIC_COMM_UINT32_WRITE_BITS(ext_crc_cfg_file_reg, crc_sel, + hash_tbl_crc_cfg_bt_start, + hash_tbl_crc_cfg_bt_width); + + p_hash_file_reg->ext_crc_cfg_file_reg[0] = ext_crc_cfg_file_reg; + + rtn = dpp_data_w2f(p_se_cfg->reg_base + + ext_crc_cfg_file_reg_addr, + &p_hash_file_reg->ext_crc_cfg_file_reg[0], + FILE_TYPE_REG); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "dpp_data_w2f"); + + } else { + ext_crc_cfg_file_reg_addr = 0x01d0; + + hash_tbl_crc_cfg_bt_start = (fun_id - 2) * 16 + bulk_id * 2; + + ext_crc_cfg_file_reg = p_hash_file_reg->ext_crc_cfg_file_reg[1]; + ZXIC_COMM_UINT32_WRITE_BITS(ext_crc_cfg_file_reg, crc_sel, + hash_tbl_crc_cfg_bt_start, + hash_tbl_crc_cfg_bt_width); + + p_hash_file_reg->ext_crc_cfg_file_reg[1] = ext_crc_cfg_file_reg; + + rtn = dpp_data_w2f(p_se_cfg->reg_base + + ext_crc_cfg_file_reg_addr, + &p_hash_file_reg->ext_crc_cfg_file_reg[1], + FILE_TYPE_REG); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rtn, "dpp_data_w2f"); + } + +#endif + + return DPP_OK; +} + +/***********************************************************/ +/** 清除hash引擎的所有hash表项(清除软件配置) +* @param p_se_cfg +* @param hash_id +* @param bulk_id +* +* @return +* @remark 无 +* @see +* @author cq @date 2023/08/18 +************************************************************/ +DPP_STATUS dpp_hash_soft_all_entry_delete(DPP_SE_CFG *p_se_cfg, + ZXIC_UINT32 hash_id) +{ + ZXIC_UINT32 rc = 0; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT8 table_id = 0; + ZXIC_UINT32 bulk_id = 0; + + D_NODE *p_node = NULL; + ZXIC_RB_TN *p_rb_tn = NULL; + ZXIC_RB_TN *p_rb_tn_rtn = NULL; + D_HEAD *p_head_hash_rb = NULL; + DPP_HASH_CFG *p_hash_cfg = NULL; + FUNC_ID_INFO *p_func_info = NULL; + DPP_HASH_RBKEY_INFO *p_rbkey = NULL; + DPP_HASH_RBKEY_INFO *p_rbkey_rtn = NULL; + SE_ITEM_CFG *p_item = NULL; + + ZXIC_COMM_CHECK_POINT(p_se_cfg); + ZXIC_COMM_CHECK_INDEX(hash_id, 0, HASH_FUNC_ID_NUM - 1); + + p_func_info = DPP_GET_FUN_INFO(p_se_cfg, hash_id); + DPP_SE_CHECK_FUN(p_func_info, hash_id, FUN_HASH); + + p_hash_cfg = (DPP_HASH_CFG *)p_func_info->fun_ptr; + p_head_hash_rb = &p_hash_cfg->hash_rb.tn_list; + + while (p_head_hash_rb->used) { + p_node = p_head_hash_rb->p_next; + p_rb_tn = (ZXIC_RB_TN *)p_node->data; + p_rbkey = (DPP_HASH_RBKEY_INFO *)p_rb_tn->p_key; + table_id = DPP_GET_HASH_TBL_ID(p_rbkey->key); + bulk_id = ((table_id >> 2) & 0x7); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, bulk_id, 0, + HASH_BULK_NUM - 1); + + rc = zxic_comm_rb_delete(&p_hash_cfg->hash_rb, p_rbkey, + &p_rb_tn_rtn); + if (ZXIC_RBT_RC_SRHFAIL == rc) { + p_hash_cfg->hash_stat.delete_fail++; + ZXIC_COMM_TRACE_DEV_DEBUG( + dev_id, "Error!there is not item in hash!\n"); + return DPP_HASH_RC_DEL_SRHFAIL; + } + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_rb_tn_rtn); + p_rbkey_rtn = (DPP_HASH_RBKEY_INFO *)(p_rb_tn_rtn->p_key); + p_item = p_rbkey_rtn->p_item_info; + + rc = zxic_comm_double_link_del(&(p_rbkey_rtn->entry_dn), + &(p_item->item_list)); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "zxic_comm_double_link_del"); + p_item->wrt_mask &= + ~(DPP_GET_HASH_ENTRY_MASK(p_rbkey_rtn->entry_size, + p_rbkey_rtn->entry_pos)) & + 0xF; + + if (0 == p_item->item_list.used) { + if ((ITEM_DDR_256 == p_item->item_type) || + (ITEM_DDR_512 == p_item->item_type)) { + /* modify coverity by yinxh 2021.03.10*,以256bit为单位。暂不考虑512bit的情况*/ + ZXIC_COMM_CHECK_INDEX_UPPER( + p_item->item_index, + p_hash_cfg->p_bulk_ddr_info[bulk_id] + ->item_num); + p_hash_cfg->p_bulk_ddr_info[bulk_id] + ->p_item_array[p_item->item_index] = + NULL; + ZXIC_COMM_FREE(p_item); + } else { + p_item->valid = 0; + } + } + + ZXIC_COMM_FREE(p_rbkey_rtn); + ZXIC_COMM_FREE(p_rb_tn_rtn); + p_hash_cfg->hash_stat.delete_ok++; + } + + return DPP_OK; +} + +/***********************************************************/ +/** 卸载软件上分配的hash资源(主要针对malloc释放) +* @param dev_id 设备号 +* @return +* @remark 无 +* @see +* @author cq @date 2023/12/20 +************************************************************/ +DPP_STATUS dpp_hash_soft_uninstall(DPP_DEV_T *dev) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 hash_id = 0; + ZXIC_UINT32 dev_id = 0; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + + for (hash_id = 0; hash_id < HASH_FUNC_ID_NUM; hash_id++) { + rc = dpp_one_hash_soft_uninstall(dev, hash_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, + "dpp_one_hash_soft_uninstall"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 释放当前sdt下的所有hash流表表项(仅删除软件表项,不操作硬件) +* @param dev_id 设备号 +* @param sdt_no sdt号 +* @return +* @remark 无 +* @see +* @author cq @date 2024/11/05 +************************************************************/ +DPP_STATUS dpp_hash_soft_delete_by_sdt(DPP_DEV_T *dev, ZXIC_UINT32 sdt_no) +{ + ZXIC_UINT32 rc = 0; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT8 key_valid = 0; + ZXIC_UINT32 table_id = 0; + ZXIC_UINT32 key_type = 0; + + D_NODE *p_node = NULL; + ZXIC_RB_TN *p_rb_tn = NULL; + D_HEAD *p_head_hash_rb = NULL; + DPP_HASH_CFG *p_hash_cfg = NULL; + DPP_HASH_RBKEY_INFO *p_rbkey = NULL; + DPP_HASH_RBKEY_INFO *p_rbkey_rtn = NULL; + ZXIC_RB_TN *p_rb_tn_rtn = NULL; + SE_ITEM_CFG *p_item = NULL; + ZXIC_MUTEX_T *p_hash_mutex = NULL; + HASH_ENTRY_CFG hash_entry_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT_NO_ASSERT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + + //从sdt_no中获取hash配置 + rc = dpp_hash_get_hash_info_from_sdt(dev, sdt_no, &hash_entry_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, + "dpp_hash_get_hash_info_from_sdt"); + + p_hash_cfg = hash_entry_cfg.p_hash_cfg; + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_hash_cfg); + + rc = dpp_dev_hash_opr_mutex_get(dev, p_hash_cfg->fun_id, &p_hash_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_dev_hash_opr_mutex_get"); + rc = zxic_comm_mutex_lock(p_hash_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "zxic_comm_mutex_lock"); + + p_head_hash_rb = &p_hash_cfg->hash_rb.tn_list; + p_node = p_head_hash_rb->p_next; + while (p_node) { + p_rb_tn = (ZXIC_RB_TN *)p_node->data; + p_rbkey = (DPP_HASH_RBKEY_INFO *)p_rb_tn->p_key; + + key_valid = DPP_GET_HASH_KEY_VALID(p_rbkey->key); + table_id = DPP_GET_HASH_TBL_ID(p_rbkey->key); + key_type = DPP_GET_HASH_KEY_TYPE(p_rbkey->key); + if ((!key_valid) || (table_id != hash_entry_cfg.table_id) || + (key_type != hash_entry_cfg.key_type)) { + p_node = p_node->next; + continue; + } + p_node = p_node->next; + + rc = zxic_comm_rb_delete(&p_hash_cfg->hash_rb, p_rbkey, + &p_rb_tn_rtn); + if (ZXIC_RBT_RC_SRHFAIL == rc) { + p_hash_cfg->hash_stat.delete_fail++; + ZXIC_COMM_TRACE_DEV_DEBUG( + dev_id, "Error!there is not item in hash!\n"); + + rc = zxic_comm_mutex_unlock(p_hash_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, + "zxic_comm_mutex_unlock"); + return DPP_HASH_RC_DEL_SRHFAIL; + } + + ZXIC_COMM_CHECK_DEV_POINT_UNLOCK(dev_id, p_rb_tn_rtn, + p_hash_mutex); + p_rbkey_rtn = (DPP_HASH_RBKEY_INFO *)(p_rb_tn_rtn->p_key); + p_item = p_rbkey_rtn->p_item_info; + ZXIC_COMM_CHECK_DEV_POINT_UNLOCK(dev_id, p_item, p_hash_mutex); + + rc = zxic_comm_double_link_del(&(p_rbkey_rtn->entry_dn), + &(p_item->item_list)); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK( + dev_id, rc, "zxic_comm_double_link_del", p_hash_mutex); + p_item->wrt_mask &= + ~(DPP_GET_HASH_ENTRY_MASK(p_rbkey_rtn->entry_size, + p_rbkey_rtn->entry_pos)) & + 0xF; + if (0 == p_item->item_list.used) { + p_item->valid = 0; + } + + ZXIC_COMM_FREE(p_rbkey_rtn); + ZXIC_COMM_FREE(p_rb_tn_rtn); + p_hash_cfg->hash_stat.delete_ok++; + } + + rc = zxic_comm_mutex_unlock(p_hash_mutex); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "zxic_comm_mutex_unlock"); + + return DPP_OK; +} + +/***********************************************************/ +/** 卸载指定hashid上软件分配的hash资源(主要针对malloc释放) +* @param dev_id 设备号 +* @param hash_id hash id 0~3 +* @return +* @remark 无 +* @see +* @author cq @date 2023/12/20 +************************************************************/ +DPP_STATUS dpp_one_hash_soft_uninstall(DPP_DEV_T *dev, ZXIC_UINT32 hash_id) +{ + ZXIC_UINT32 rc = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 dev_id = 0; + + D_NODE *p_node = NULL; + DPP_SE_CFG *p_se_cfg = NULL; + ZXIC_RB_TN *p_rb_tn = NULL; + ZXIC_RB_TN *p_rb_tn_rtn = NULL; + HASH_DDR_CFG *p_rbkey = NULL; + DPP_HASH_CFG *p_hash_cfg = NULL; + FUNC_ID_INFO *p_func_info = NULL; + D_HEAD *p_head_ddr_cfg_rb = NULL; + HASH_DDR_CFG *p_temp_rbkey = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX(dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_INDEX(hash_id, 0, HASH_FUNC_ID_NUM - 1); + + /* 取出se配置 */ + rc = dpp_se_cfg_get(dev, &p_se_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_se_cfg_get"); + ZXIC_COMM_CHECK_POINT(p_se_cfg); + p_func_info = DPP_GET_FUN_INFO(p_se_cfg, hash_id); + if (0 == p_func_info->is_used) { + ZXIC_COMM_TRACE_DEBUG("Error[0x%x], fun_id [%d] is not init\n!", + DPP_SE_RC_FUN_INVALID, hash_id); + return DPP_OK; + } + + /*软件删除表项,删除红黑树节点*/ + rc = dpp_hash_soft_all_entry_delete(p_se_cfg, hash_id); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_hash_soft_all_entry_delete"); + + /*释放独占标识内存*/ + p_hash_cfg = (DPP_HASH_CFG *)p_func_info->fun_ptr; + for (i = 0; i < HASH_BULK_NUM; i++) { + if ((&p_hash_cfg->hash_stat)->p_bulk_zcam_mono[i] != NULL) { + ZXIC_COMM_FREE( + (&p_hash_cfg->hash_stat)->p_bulk_zcam_mono[i]); + (&p_hash_cfg->hash_stat)->p_bulk_zcam_mono[i] = NULL; + } + } + + /*释放DDR分配的所有内存*/ + p_head_ddr_cfg_rb = &p_hash_cfg->ddr_cfg_rb.tn_list; + while (p_head_ddr_cfg_rb->used) { + p_node = p_head_ddr_cfg_rb->p_next; + + p_rb_tn = (ZXIC_RB_TN *)p_node->data; + p_rbkey = p_rb_tn->p_key; + + rc = zxic_comm_rb_delete(&p_hash_cfg->ddr_cfg_rb, p_rbkey, + &p_rb_tn_rtn); + + if (ZXIC_RBT_RC_SRHFAIL == rc) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "ddr_cfg_rb delete key is not exist, key: 0x"); + } else { + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, + "zxic_comm_rb_delete"); + } + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_rb_tn_rtn); + p_temp_rbkey = (HASH_DDR_CFG *)(p_rb_tn_rtn->p_key); + ZXIC_COMM_FREE(p_temp_rbkey->p_item_array); + p_temp_rbkey->p_item_array = NULL; + ZXIC_COMM_FREE(p_temp_rbkey); + ZXIC_COMM_FREE(p_rb_tn_rtn); + } + + rc = dpp_hash_zcam_resource_deinit(p_hash_cfg); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_hash_zcam_resource_deinit"); + + rc = dpp_se_fun_deinit(p_se_cfg, (hash_id & 0xff), FUN_HASH); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_se_fun_deinit"); + + return DPP_OK; +} + +/***********************************************************/ +/** +* @param p_hash_cfg +* @param p_entry +* @param key_by_size +* @param rst_by_size +* @param p_item_info +* @param p_wrt_lrn_rsp +* +* @return +* @remark 无 +* @see +* @author XCX @date 2018/03/22 +************************************************************/ +DPP_STATUS dpp_hash_software_item_check(DPP_HASH_CFG *p_hash_cfg, + DPP_HASH_ENTRY *p_entry, + ZXIC_UINT32 key_by_size, + ZXIC_UINT32 rst_by_size, + SE_ITEM_CFG *p_item_info, + DPP_HASH_WRT_LRN_RSP *p_wrt_lrn_rsp) +{ + ZXIC_UINT8 srh_succ = 0; + ZXIC_UINT8 srh_key_type = 0; + ZXIC_UINT8 srh_entry_size = 0; + ZXIC_UINT8 temp_key_type = 0; + ZXIC_UINT8 srh_key[HASH_KEY_MAX] = { 0 }; + + ZXIC_UINT32 tbl_id = 0; + ZXIC_UINT32 free_pos = 0xFFFFFFFF; /* ** -1; */ + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 slot_id = 0; + ZXIC_UINT32 item_entry_max = ITEM_ENTRY_NUM_4; + + D_NODE *p_entry_dn = NULL; + DPP_HASH_RBKEY_INFO *p_rbkey = NULL; + DPP_HASH_TBL_ID_INFO *p_tbl_id_info = NULL; + + ZXIC_COMM_CHECK_INDEX(key_by_size, 0, HASH_KEY_MAX); + ZXIC_COMM_CHECK_POINT(p_hash_cfg); + ZXIC_COMM_CHECK_POINT(p_entry); + ZXIC_COMM_CHECK_POINT(p_item_info); + ZXIC_COMM_CHECK_POINT(p_wrt_lrn_rsp); + ZXIC_COMM_CHECK_POINT(p_hash_cfg->p_se_info); + + dev_id = p_hash_cfg->p_se_info->dev_id; + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + + tbl_id = DPP_GET_HASH_TBL_ID(p_entry->p_key); + slot_id = p_hash_cfg->p_se_info->dev.pcie_channel.slot; + ZXIC_COMM_CHECK_INDEX_UPPER(slot_id, DPP_PCIE_SLOT_MAX - 1); + p_tbl_id_info = + GET_HASH_TBL_ID_INFO(slot_id, p_hash_cfg->fun_id, tbl_id); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_tbl_id_info); + + srh_key_type = DPP_GET_HASH_KEY_TYPE(p_entry->p_key); + ZXIC_COMM_MEMCPY(srh_key, p_entry->p_key, + 1); /* wr_flag + key_type + tbl_id */ + ZXIC_COMM_MEMCPY(srh_key + 1, + p_entry->p_key + 1 + (HASH_KEY_MAX - key_by_size), + key_by_size - 1); /* actural_key */ + srh_key[0] = + (ZXIC_UINT8)((srh_key[0] | 0x80) & 0xFF); /* set valid bit */ + srh_entry_size = DPP_GET_HASH_ENTRY_SIZE(srh_key_type); + + p_entry_dn = p_item_info->item_list.p_next; + + while (p_entry_dn) { + p_rbkey = (DPP_HASH_RBKEY_INFO *)p_entry_dn->data; + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_rbkey); + + ZXIC_COMM_ASSERT(p_rbkey->p_item_info == p_item_info); + + temp_key_type = DPP_GET_HASH_KEY_TYPE(p_rbkey->key); + + if (DPP_GET_HASH_KEY_VALID(p_rbkey->key) && + (srh_key_type == temp_key_type)) { + if (0 == ZXIC_COMM_MEMCMP(srh_key, p_rbkey->key, + key_by_size)) { + srh_succ = 1; + break; + } + } + + p_entry_dn = p_entry_dn->next; + } + + if (NULL == p_rbkey) { + return ZXIC_PAR_CHK_POINT_NULL; + } + + if (!srh_succ) { + if ((p_tbl_id_info->is_lrn || p_tbl_id_info->is_mc_wrt) && + !p_wrt_lrn_rsp->space_vld) { /* mod by tf 2016-5-25 14:39:36 */ + if (ITEM_DDR_256 == p_item_info->item_type) { + item_entry_max = ITEM_ENTRY_NUM_2; + } + + free_pos = dpp_hash_get_item_free_pos( + item_entry_max, p_item_info->wrt_mask, + srh_entry_size); + + if (free_pos != 0xFFFFFFFF) { + /* modify coverity by yinxh 2021.03.10*/ + // ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW_NO_ASSERT(4, srh_entry_size/16); + ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW_NO_ASSERT( + 4U - srh_entry_size / 16U, free_pos); + p_wrt_lrn_rsp->space_vld = 1; + p_wrt_lrn_rsp->wrt_mask = + DPP_GET_HASH_ENTRY_MASK(srh_entry_size, + free_pos); + + if (ITEM_DDR_256 == p_item_info->item_type) { + p_wrt_lrn_rsp->ext_flag = 1; + p_wrt_lrn_rsp->width_flag = 0; + p_wrt_lrn_rsp->lrn_addr = + p_item_info->hw_addr & + ZXIC_COMM_GET_BIT_MASK( + ZXIC_UINT32, + HASH_ADDR_DDR_BT_LEN); + } else if (ITEM_DDR_512 == + p_item_info->item_type) { + p_wrt_lrn_rsp->ext_flag = 1; + p_wrt_lrn_rsp->width_flag = 1; + p_wrt_lrn_rsp->lrn_addr = + (p_item_info->hw_addr) & + ZXIC_COMM_GET_BIT_MASK( + ZXIC_UINT32, + HASH_ADDR_DDR_BT_LEN); + } else { + p_wrt_lrn_rsp->ext_flag = 0; + p_wrt_lrn_rsp->width_flag = 1; + p_wrt_lrn_rsp->lrn_addr = + p_item_info->hw_addr & + ZXIC_COMM_GET_BIT_MASK( + ZXIC_UINT32, + HASH_ADDR_ZCAM_BT_LEN); + } + } + } + + return DPP_HASH_RC_MATCH_ITEM_FAIL; + } + + /* search success */ + if (p_tbl_id_info->is_mc_wrt) { + p_wrt_lrn_rsp->space_vld = 0; + p_wrt_lrn_rsp->wrt_mask = DPP_GET_HASH_ENTRY_MASK( + p_rbkey->entry_size, p_rbkey->entry_pos); + + if (ITEM_DDR_256 == p_item_info->item_type) { + p_wrt_lrn_rsp->ext_flag = 1; + p_wrt_lrn_rsp->width_flag = 0; + p_wrt_lrn_rsp->lrn_addr = + p_item_info->hw_addr & + ZXIC_COMM_GET_BIT_MASK(ZXIC_UINT32, + HASH_ADDR_DDR_BT_LEN); + } else if (ITEM_DDR_512 == p_item_info->item_type) { + p_wrt_lrn_rsp->ext_flag = 1; + p_wrt_lrn_rsp->width_flag = 1; + p_wrt_lrn_rsp->lrn_addr = + (p_item_info->hw_addr) & + ZXIC_COMM_GET_BIT_MASK(ZXIC_UINT32, + HASH_ADDR_DDR_BT_LEN); + } else { + p_wrt_lrn_rsp->ext_flag = 0; + p_wrt_lrn_rsp->width_flag = 1; + p_wrt_lrn_rsp->lrn_addr = + p_item_info->hw_addr & + ZXIC_COMM_GET_BIT_MASK(ZXIC_UINT32, + HASH_ADDR_ZCAM_BT_LEN); + } + } + + /* copy result */ + ZXIC_COMM_MEMCPY(p_entry->p_rst, p_rbkey->rst, + (rst_by_size > HASH_RST_MAX) ? HASH_RST_MAX : + rst_by_size); + + return DPP_OK; +} + +/***********************************************************/ +/** 单个hash引擎查找调试函数 +* @param p_se_cfg 算法模块公共管理数据结构指针 +* @param fun_id hash引擎号 +* @param p_entry hash条目,包括key和result,查找时的key的位宽为392bit,格式为:wr_flag(1bit) + key_type(2bit) + tbl_id(5bit) + reserve(M bit)+ actu_key(8*N bit) \n +* result用于在硬件学习使能的情况下,返回空闲位置的地址。p_entry->p_rst需要预留 HASH_RST_MAX 字节的空间 +* @param p_space_vld 学习使能时,是否有空闲空间 +* @param srh_mode 查找模式,取值参考DPP_HASH_SRH_MODE的定义 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wcl @date 2015/05/15 +************************************************************/ +DPP_STATUS dpp_hash_search(DPP_SE_CFG *p_se_cfg, ZXIC_UINT32 fun_id, + DPP_HASH_ENTRY *p_entry, ZXIC_UINT32 *p_space_vld, + ZXIC_UINT32 srh_mode) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT8 tbl_id = 0; + ZXIC_UINT8 wr_flag = 0; + ZXIC_UINT8 bulk_id = 0; + ZXIC_UINT8 key_type = 0; + ZXIC_UINT8 srh_succ = 0; + ZXIC_UINT32 key_by_size = 0; + ZXIC_UINT32 rst_by_size = 0; + ZXIC_UINT8 temp_tbl_id = 0; + ZXIC_UINT32 actu_key_size = 0; + ZXIC_UINT8 temp_key[HASH_KEY_MAX] = { 0 }; + + ZXIC_UINT16 crc16_value = 0; + + ZXIC_UINT32 i = 0; + ZXIC_UINT32 l = 0; + ZXIC_UINT32 hw_addr = 0; + ZXIC_UINT32 zcell_id = 0; + ZXIC_UINT32 rsp_addr = 0; + ZXIC_UINT32 zblk_idx = 0; + ZXIC_UINT32 item_idx = 0; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 slot_id = 0; + ZXIC_UINT32 item_type = 0; + ZXIC_UINT32 crc32_value = 0; + ZXIC_UINT32 pre_zblk_idx = 0xFFFFFFFF; /* -1; */ + + D_NODE *p_zblk_dn = NULL; + D_NODE *p_zcell_dn = NULL; + SE_ITEM_CFG *p_item = NULL; + SE_ZBLK_CFG *p_zblk = NULL; + SE_ZCELL_CFG *p_zcell = NULL; + HASH_DDR_CFG *p_ddr_cfg = NULL; + DPP_HASH_CFG *p_hash_cfg = NULL; + FUNC_ID_INFO *p_func_info = NULL; + DPP_HASH_TBL_ID_INFO *p_tbl_id_info = NULL; + DPP_HASH_WRT_LRN_RSP wrt_lrn_rsp = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_se_cfg); + ZXIC_COMM_CHECK_INDEX(fun_id, HASH_FUNC_ID_MIN, HASH_FUNC_ID_NUM - 1); + ZXIC_COMM_CHECK_POINT(p_entry); + ZXIC_COMM_CHECK_POINT(p_space_vld); + ZXIC_COMM_CHECK_INDEX(srh_mode, HASH_SRH_MODE_SOFT, HASH_SRH_MODE_HDW); + ZXIC_COMM_CHECK_POINT(p_entry->p_key); + ZXIC_COMM_CHECK_POINT(p_entry->p_rst); + + *p_space_vld = 0; + dev_id = p_se_cfg->dev_id; + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + + p_func_info = DPP_GET_FUN_INFO(p_se_cfg, fun_id); + DPP_SE_CHECK_FUN(p_func_info, fun_id, FUN_HASH); + + p_hash_cfg = (DPP_HASH_CFG *)p_func_info->fun_ptr; + wr_flag = DPP_GET_HASH_KEY_VALID(p_entry->p_key); + + tbl_id = DPP_GET_HASH_TBL_ID(p_entry->p_key); + slot_id = p_se_cfg->dev.pcie_channel.slot; + ZXIC_COMM_CHECK_INDEX_UPPER(slot_id, DPP_PCIE_SLOT_MAX - 1); + p_tbl_id_info = GET_HASH_TBL_ID_INFO(slot_id, fun_id, tbl_id); + + key_type = DPP_GET_HASH_KEY_TYPE(p_entry->p_key); + HASH_TBL_ID_INFO_CHECK(slot_id, fun_id, tbl_id, key_type); + + actu_key_size = GET_ACTU_KEY_SIZE_BY_TBLID(slot_id, fun_id, tbl_id); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT( + dev_id, DPP_GET_ACTU_KEY_BY_SIZE(actu_key_size), + HASH_KEY_CTR_SIZE); + key_by_size = DPP_GET_KEY_SIZE(actu_key_size); + ZXIC_COMM_CHECK_DEV_INDEX_SUB_OVERFLOW_NO_ASSERT( + dev_id, DPP_GET_HASH_ENTRY_SIZE(key_type), + DPP_GET_ACTU_KEY_BY_SIZE(actu_key_size)); + rst_by_size = DPP_GET_RST_SIZE(key_type, actu_key_size); + + /* Coverity err fixed (参数key_by_size(其值为0)传递下标越界给被调用方在-2处的数组) */ + if (key_by_size < (HASH_ACTU_KEY_MIN + HASH_KEY_CTR_SIZE) || + key_by_size > HASH_KEY_MAX) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "\n dpp_hash_search key_by_size[%d] INVALID !\n", + key_by_size); + return ZXIC_PAR_CHK_INVALID_INDEX; + } + + /* 查找时接收392bit的key格式为: wr_flag + key_type + tbl_id + reserve + actural_key, + * 需要先将格式调整为:wr_flag + key_type + tbl_id + actural_key + reserve. + */ + /* wr_flag + key_type + tbl_id */ + ZXIC_COMM_MEMCPY(temp_key, p_entry->p_key, 1); + /* actural_key */ + ZXIC_COMM_MEMCPY(temp_key + 1, + p_entry->p_key + 1 + (HASH_KEY_MAX - key_by_size), + key_by_size - 1); + + /* 约定: 参与CRC运算的key, 需要将tbl_id的前面补3bit的0,然后放到actu_key的后面. */ + temp_tbl_id = temp_key[0] & 0x1F; + memmove(&temp_key[0], &temp_key[1], key_by_size - HASH_KEY_CTR_SIZE); + temp_key[key_by_size - HASH_KEY_CTR_SIZE] = temp_tbl_id; + bulk_id = ((temp_tbl_id >> 2) & 0x7); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, bulk_id, 0, HASH_BULK_NUM - 1); + + /* [1]. search in DDR */ + if (p_hash_cfg->ddr_valid) { + item_type = ITEM_DDR_256; + p_ddr_cfg = p_hash_cfg->p_bulk_ddr_info[bulk_id]; + crc32_value = p_hash_cfg->p_hash32_fun(temp_key, key_by_size, + p_ddr_cfg->hash_ddr_arg); + item_idx = crc32_value % p_ddr_cfg->item_num; + + if (DDR_WIDTH_512b == p_ddr_cfg->width_mode) { + item_idx = crc32_value % p_ddr_cfg->item_num; + item_type = ITEM_DDR_512; + } + + ZXIC_COMM_TRACE_DEV_DEBUG( + dev_id, + "Hash search in ITEM_DDR_%s, CRC32 index is: 0x%x.\n", + ((item_type == ITEM_DDR_256) ? "256" : "512"), + item_idx); + + if (HASH_SRH_MODE_HDW == srh_mode) { + /* search hardware mode */ + } else { + /* search software mode */ + ZXIC_COMM_CHECK_INDEX_UPPER(item_idx, + p_ddr_cfg->item_num - 1); + p_item = p_ddr_cfg->p_item_array[item_idx]; + + if (p_item != NULL) { + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT( + dev_id, p_ddr_cfg->hw_baddr, item_idx); + p_item->hw_addr = GET_HASH_DDR_HW_ADDR( + p_ddr_cfg->hw_baddr, item_idx); + p_item->item_type = item_type; + p_item->item_index = item_idx; + rc = dpp_hash_software_item_check( + p_hash_cfg, p_entry, key_by_size, + rst_by_size, p_item, &wrt_lrn_rsp); + + if (DPP_OK == rc) { + srh_succ = 1; + } + } else { + wrt_lrn_rsp.space_vld = 1; + wrt_lrn_rsp.ext_flag = 1; + wrt_lrn_rsp.wrt_mask = DPP_GET_HASH_ENTRY_MASK( + DPP_GET_HASH_ENTRY_SIZE(key_type), 0); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT( + dev_id, p_ddr_cfg->hw_baddr, item_idx); + hw_addr = GET_HASH_DDR_HW_ADDR( + p_ddr_cfg->hw_baddr, item_idx); + + if (ITEM_DDR_256 == item_type) { + wrt_lrn_rsp.width_flag = 0; + wrt_lrn_rsp.lrn_addr = + hw_addr & + ZXIC_COMM_GET_BIT_MASK( + ZXIC_UINT32, + HASH_ADDR_DDR_BT_LEN); + } else { + wrt_lrn_rsp.width_flag = 1; + wrt_lrn_rsp.lrn_addr = + (hw_addr)&ZXIC_COMM_GET_BIT_MASK( + ZXIC_UINT32, + HASH_ADDR_DDR_BT_LEN); + } + } + } + } + + if (!srh_succ) { + /* [2]. search in ZCAM */ + item_type = ITEM_RAM; + p_zcell_dn = p_hash_cfg->hash_shareram.zcell_free_list.p_next; + + while (p_zcell_dn) { + p_zcell = (SE_ZCELL_CFG *)p_zcell_dn->data; + zblk_idx = GET_ZBLK_IDX(p_zcell->zcell_idx); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, zblk_idx, 0, + SE_ZBLK_NUM - 1); + p_zblk = &(p_se_cfg->zblk_info[zblk_idx]); + + if (zblk_idx != pre_zblk_idx) { + pre_zblk_idx = zblk_idx; + crc16_value = p_hash_cfg->p_hash16_fun( + temp_key, key_by_size, + p_zblk->hash_arg); + } + + zcell_id = GET_ZCELL_IDX(p_zcell->zcell_idx); + item_idx = GET_ZCELL_CRC_VAL(zcell_id, crc16_value); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, item_idx, 0, + SE_RAM_DEPTH - 1); + + ZXIC_COMM_TRACE_DEV_DEBUG( + dev_id, + "Hash search in ZCAM_RAM, zblk[%d], zcell[%d], CRC16 ram_index[0x%x].\n", + zblk_idx, zcell_id, item_idx); + + if (HASH_SRH_MODE_HDW == srh_mode) { + /* search hardware mode */ + } else { + /* search software mode */ + p_item = &(p_zcell->item_info[item_idx]); + p_item->hw_addr = ZBLK_ITEM_ADDR_CALC( + p_zcell->zcell_idx, item_idx); + p_item->item_type = item_type; + p_item->item_index = item_idx; + rc = dpp_hash_software_item_check( + p_hash_cfg, p_entry, key_by_size, + rst_by_size, p_item, &wrt_lrn_rsp); + + if (DPP_OK == rc) { + srh_succ = 1; + break; + } + } + + p_zcell_dn = p_zcell_dn->next; + } + } + + if (!srh_succ) { + /* [3]. search in ZBLK Reg */ + item_type = ITEM_REG; + p_zblk_dn = p_hash_cfg->hash_shareram.zblk_list.p_next; + + while (p_zblk_dn) { + p_zblk = (SE_ZBLK_CFG *)p_zblk_dn->data; + zblk_idx = p_zblk->zblk_idx; + + for (i = 0; i < SE_ZREG_NUM; i++) { + item_idx = i; + ZXIC_COMM_TRACE_DEV_DEBUG( + dev_id, + "Hash search in ZCAM_REG, zblk[%d], reg_index[0x%x].\n", + zblk_idx, item_idx); + + if (HASH_SRH_MODE_HDW == srh_mode) { + /* search hardware mode */ + } else { + /* search software mode */ + p_item = &( + p_zblk->zreg_info[i].item_info); + p_item->hw_addr = + ZBLK_HASH_LIST_REG_ADDR_CALC( + zblk_idx, item_idx); + p_item->item_type = item_type; + p_item->item_index = item_idx; + rc = dpp_hash_software_item_check( + p_hash_cfg, p_entry, + key_by_size, rst_by_size, + p_item, &wrt_lrn_rsp); + + if (DPP_OK == rc) { + srh_succ = 1; + break; + } + } + } + + if (srh_succ == 1) { + break; + } + + p_zblk_dn = p_zblk_dn->next; + } + } + + if (!srh_succ) { + /* if hardware learn enable or mcode write table enable, return space valid and address. */ + if (p_tbl_id_info->is_lrn || + (wr_flag && p_tbl_id_info->is_mc_wrt)) { + *p_space_vld = wrt_lrn_rsp.space_vld; + rsp_addr = (((ZXIC_UINT32)(wrt_lrn_rsp.ext_flag & 0x1) + << HASH_ADDR_EXT_FLAG_BT_OFF) | + ((ZXIC_UINT32)(wrt_lrn_rsp.wrt_mask & 0xF) + << HASH_ADDR_WRT_MASK_BT_OFF) | + ((ZXIC_UINT32)wrt_lrn_rsp.lrn_addr + << HASH_ADDR_BT_OFF) | + (wrt_lrn_rsp.width_flag & 0x1)); + + zxic_comm_swap((ZXIC_UINT8 *)&rsp_addr, + sizeof(ZXIC_UINT32)); + + ZXIC_COMM_MEMSET_S(p_entry->p_rst, HASH_RST_MAX, 0, + HASH_RST_MAX); + ZXIC_COMM_MEMCPY(p_entry->p_rst, &rsp_addr, + sizeof(ZXIC_UINT32)); + + ZXIC_COMM_TRACE_DEV_DEBUG( + dev_id, "search fun p_entry->p_rst is:"); + + for (l = 0; l < 4; l++) { + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "0x%x ", + p_entry->p_rst[l]); + } + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "\n"); + } + + p_hash_cfg->hash_stat.search_fail++; + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "Hash search key fail!\n"); + return DPP_HASH_RC_SRH_FAIL; + } +#if 0 + if ((wr_flag) && (p_tbl_id_info->is_mc_wrt)) { + /* mcode update table */ + *p_space_vld = wrt_lrn_rsp.space_vld; + rsp_addr = (((wrt_lrn_rsp.ext_flag & 0x1) << HASH_ADDR_EXT_FLAG_BT_OFF) | + ((wrt_lrn_rsp.wrt_mask & 0xF) << HASH_ADDR_WRT_MASK_BT_OFF) | + (wrt_lrn_rsp.lrn_addr << HASH_ADDR_BT_OFF) | + (wrt_lrn_rsp.width_flag & 0x1)); + + zxic_comm_swap((ZXIC_UINT8 *)&rsp_addr, sizeof(ZXIC_UINT32)); + + ZXIC_COMM_MEMSET(p_entry->p_rst, 0, HASH_RST_MAX); + ZXIC_COMM_MEMCPY(p_entry->p_rst, &rsp_addr, sizeof(ZXIC_UINT32)); + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "search fun p_entry->p_rst is:"); + + for (l = 0; l < 4; l++) { + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "0x%x ", p_entry->p_rst[l]); + } + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "\n"); + } +#endif + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "Hash search successfully.\n"); + p_hash_cfg->hash_stat.search_ok++; + + return DPP_OK; +} + +DPP_STATUS dpp_hash_tbl_clr(ZXIC_UINT32 dev_id) +{ + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + + ZXIC_COMM_MEMSET_S(&g_tbl_id_info[0][0][0], sizeof(g_tbl_id_info), 0, + sizeof(g_tbl_id_info)); + + return DPP_OK; +} +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/dpp_hash_crc.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/dpp_hash_crc.c new file mode 100644 index 000000000000..70fb4d99d608 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/dpp_hash_crc.c @@ -0,0 +1,376 @@ +#include "zxic_common.h" +#include "dpp_hash_crc.h" + +ZXIC_UINT16 g_crc16_rst[256][8] = { + { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + { 0x1021, 0x8005, 0x3d65, 0xab47, 0x3453, 0x0357, 0x0589, 0xa02b }, + { 0x2042, 0x800f, 0x7aca, 0xfdc9, 0x68a6, 0x06ae, 0x0b12, 0xe07d }, + { 0x3063, 0x000a, 0x47af, 0x568e, 0x5cf5, 0x05f9, 0x0e9b, 0x4056 }, + { 0x4084, 0x801b, 0xf594, 0x50d5, 0xd14c, 0x0d5c, 0x1624, 0x60d1 }, + { 0x50a5, 0x001e, 0xc8f1, 0xfb92, 0xe51f, 0x0e0b, 0x13ad, 0xc0fa }, + { 0x60c6, 0x0014, 0x8f5e, 0xad1c, 0xb9ea, 0x0bf2, 0x1d36, 0x80ac }, + { 0x70e7, 0x8011, 0xb23b, 0x065b, 0x8db9, 0x08a5, 0x18bf, 0x2087 }, + { 0x8108, 0x8033, 0xd64d, 0xa1aa, 0x96cb, 0x1ab8, 0x2c48, 0xc1a2 }, + { 0x9129, 0x0036, 0xeb28, 0x0aed, 0xa298, 0x19ef, 0x29c1, 0x6189 }, + { 0xa14a, 0x003c, 0xac87, 0x5c63, 0xfe6d, 0x1c16, 0x275a, 0x21df }, + { 0xb16b, 0x8039, 0x91e2, 0xf724, 0xca3e, 0x1f41, 0x22d3, 0x81f4 }, + { 0xc18c, 0x0028, 0x23d9, 0xf17f, 0x4787, 0x17e4, 0x3a6c, 0xa173 }, + { 0xd1ad, 0x802d, 0x1ebc, 0x5a38, 0x73d4, 0x14b3, 0x3fe5, 0x0158 }, + { 0xe1ce, 0x8027, 0x5913, 0x0cb6, 0x2f21, 0x114a, 0x317e, 0x410e }, + { 0xf1ef, 0x0022, 0x6476, 0xa7f1, 0x1b72, 0x121d, 0x34f7, 0xe125 }, + { 0x1231, 0x8063, 0x91ff, 0xe813, 0x19c5, 0x3570, 0x5890, 0x236f }, + { 0x0210, 0x0066, 0xac9a, 0x4354, 0x2d96, 0x3627, 0x5d19, 0x8344 }, + { 0x3273, 0x006c, 0xeb35, 0x15da, 0x7163, 0x33de, 0x5382, 0xc312 }, + { 0x2252, 0x8069, 0xd650, 0xbe9d, 0x4530, 0x3089, 0x560b, 0x6339 }, + { 0x52b5, 0x0078, 0x646b, 0xb8c6, 0xc889, 0x382c, 0x4eb4, 0x43be }, + { 0x4294, 0x807d, 0x590e, 0x1381, 0xfcda, 0x3b7b, 0x4b3d, 0xe395 }, + { 0x72f7, 0x8077, 0x1ea1, 0x450f, 0xa02f, 0x3e82, 0x45a6, 0xa3c3 }, + { 0x62d6, 0x0072, 0x23c4, 0xee48, 0x947c, 0x3dd5, 0x402f, 0x03e8 }, + { 0x9339, 0x0050, 0x47b2, 0x49b9, 0x8f0e, 0x2fc8, 0x74d8, 0xe2cd }, + { 0x8318, 0x8055, 0x7ad7, 0xe2fe, 0xbb5d, 0x2c9f, 0x7151, 0x42e6 }, + { 0xb37b, 0x805f, 0x3d78, 0xb470, 0xe7a8, 0x2966, 0x7fca, 0x02b0 }, + { 0xa35a, 0x005a, 0x001d, 0x1f37, 0xd3fb, 0x2a31, 0x7a43, 0xa29b }, + { 0xd3bd, 0x804b, 0xb226, 0x196c, 0x5e42, 0x2294, 0x62fc, 0x821c }, + { 0xc39c, 0x004e, 0x8f43, 0xb22b, 0x6a11, 0x21c3, 0x6775, 0x2237 }, + { 0xf3ff, 0x0044, 0xc8ec, 0xe4a5, 0x36e4, 0x243a, 0x69ee, 0x6261 }, + { 0xe3de, 0x8041, 0xf589, 0x4fe2, 0x02b7, 0x276d, 0x6c67, 0xc24a }, + { 0x2462, 0x80c3, 0x1e9b, 0x7b61, 0x338a, 0x6ae0, 0xb120, 0x46de }, + { 0x3443, 0x00c6, 0x23fe, 0xd026, 0x07d9, 0x69b7, 0xb4a9, 0xe6f5 }, + { 0x0420, 0x00cc, 0x6451, 0x86a8, 0x5b2c, 0x6c4e, 0xba32, 0xa6a3 }, + { 0x1401, 0x80c9, 0x5934, 0x2def, 0x6f7f, 0x6f19, 0xbfbb, 0x0688 }, + { 0x64e6, 0x00d8, 0xeb0f, 0x2bb4, 0xe2c6, 0x67bc, 0xa704, 0x260f }, + { 0x74c7, 0x80dd, 0xd66a, 0x80f3, 0xd695, 0x64eb, 0xa28d, 0x8624 }, + { 0x44a4, 0x80d7, 0x91c5, 0xd67d, 0x8a60, 0x6112, 0xac16, 0xc672 }, + { 0x5485, 0x00d2, 0xaca0, 0x7d3a, 0xbe33, 0x6245, 0xa99f, 0x6659 }, + { 0xa56a, 0x00f0, 0xc8d6, 0xdacb, 0xa541, 0x7058, 0x9d68, 0x877c }, + { 0xb54b, 0x80f5, 0xf5b3, 0x718c, 0x9112, 0x730f, 0x98e1, 0x2757 }, + { 0x8528, 0x80ff, 0xb21c, 0x2702, 0xcde7, 0x76f6, 0x967a, 0x6701 }, + { 0x9509, 0x00fa, 0x8f79, 0x8c45, 0xf9b4, 0x75a1, 0x93f3, 0xc72a }, + { 0xe5ee, 0x80eb, 0x3d42, 0x8a1e, 0x740d, 0x7d04, 0x8b4c, 0xe7ad }, + { 0xf5cf, 0x00ee, 0x0027, 0x2159, 0x405e, 0x7e53, 0x8ec5, 0x4786 }, + { 0xc5ac, 0x00e4, 0x4788, 0x77d7, 0x1cab, 0x7baa, 0x805e, 0x07d0 }, + { 0xd58d, 0x80e1, 0x7aed, 0xdc90, 0x28f8, 0x78fd, 0x85d7, 0xa7fb }, + { 0x3653, 0x00a0, 0x8f64, 0x9372, 0x2a4f, 0x5f90, 0xe9b0, 0x65b1 }, + { 0x2672, 0x80a5, 0xb201, 0x3835, 0x1e1c, 0x5cc7, 0xec39, 0xc59a }, + { 0x1611, 0x80af, 0xf5ae, 0x6ebb, 0x42e9, 0x593e, 0xe2a2, 0x85cc }, + { 0x0630, 0x00aa, 0xc8cb, 0xc5fc, 0x76ba, 0x5a69, 0xe72b, 0x25e7 }, + { 0x76d7, 0x80bb, 0x7af0, 0xc3a7, 0xfb03, 0x52cc, 0xff94, 0x0560 }, + { 0x66f6, 0x00be, 0x4795, 0x68e0, 0xcf50, 0x519b, 0xfa1d, 0xa54b }, + { 0x5695, 0x00b4, 0x003a, 0x3e6e, 0x93a5, 0x5462, 0xf486, 0xe51d }, + { 0x46b4, 0x80b1, 0x3d5f, 0x9529, 0xa7f6, 0x5735, 0xf10f, 0x4536 }, + { 0xb75b, 0x8093, 0x5929, 0x32d8, 0xbc84, 0x4528, 0xc5f8, 0xa413 }, + { 0xa77a, 0x0096, 0x644c, 0x999f, 0x88d7, 0x467f, 0xc071, 0x0438 }, + { 0x9719, 0x009c, 0x23e3, 0xcf11, 0xd422, 0x4386, 0xceea, 0x446e }, + { 0x8738, 0x8099, 0x1e86, 0x6456, 0xe071, 0x40d1, 0xcb63, 0xe445 }, + { 0xf7df, 0x0088, 0xacbd, 0x620d, 0x6dc8, 0x4874, 0xd3dc, 0xc4c2 }, + { 0xe7fe, 0x808d, 0x91d8, 0xc94a, 0x599b, 0x4b23, 0xd655, 0x64e9 }, + { 0xd79d, 0x8087, 0xd677, 0x9fc4, 0x056e, 0x4eda, 0xd8ce, 0x24bf }, + { 0xc7bc, 0x0082, 0xeb12, 0x3483, 0x313d, 0x4d8d, 0xdd47, 0x8494 }, + { 0x48c4, 0x8183, 0x3d36, 0xf6c2, 0x6714, 0xd5c0, 0x67c9, 0x8dbc }, + { 0x58e5, 0x0186, 0x0053, 0x5d85, 0x5347, 0xd697, 0x6240, 0x2d97 }, + { 0x6886, 0x018c, 0x47fc, 0x0b0b, 0x0fb2, 0xd36e, 0x6cdb, 0x6dc1 }, + { 0x78a7, 0x8189, 0x7a99, 0xa04c, 0x3be1, 0xd039, 0x6952, 0xcdea }, + { 0x0840, 0x0198, 0xc8a2, 0xa617, 0xb658, 0xd89c, 0x71ed, 0xed6d }, + { 0x1861, 0x819d, 0xf5c7, 0x0d50, 0x820b, 0xdbcb, 0x7464, 0x4d46 }, + { 0x2802, 0x8197, 0xb268, 0x5bde, 0xdefe, 0xde32, 0x7aff, 0x0d10 }, + { 0x3823, 0x0192, 0x8f0d, 0xf099, 0xeaad, 0xdd65, 0x7f76, 0xad3b }, + { 0xc9cc, 0x01b0, 0xeb7b, 0x5768, 0xf1df, 0xcf78, 0x4b81, 0x4c1e }, + { 0xd9ed, 0x81b5, 0xd61e, 0xfc2f, 0xc58c, 0xcc2f, 0x4e08, 0xec35 }, + { 0xe98e, 0x81bf, 0x91b1, 0xaaa1, 0x9979, 0xc9d6, 0x4093, 0xac63 }, + { 0xf9af, 0x01ba, 0xacd4, 0x01e6, 0xad2a, 0xca81, 0x451a, 0x0c48 }, + { 0x8948, 0x81ab, 0x1eef, 0x07bd, 0x2093, 0xc224, 0x5da5, 0x2ccf }, + { 0x9969, 0x01ae, 0x238a, 0xacfa, 0x14c0, 0xc173, 0x582c, 0x8ce4 }, + { 0xa90a, 0x01a4, 0x6425, 0xfa74, 0x4835, 0xc48a, 0x56b7, 0xccb2 }, + { 0xb92b, 0x81a1, 0x5940, 0x5133, 0x7c66, 0xc7dd, 0x533e, 0x6c99 }, + { 0x5af5, 0x01e0, 0xacc9, 0x1ed1, 0x7ed1, 0xe0b0, 0x3f59, 0xaed3 }, + { 0x4ad4, 0x81e5, 0x91ac, 0xb596, 0x4a82, 0xe3e7, 0x3ad0, 0x0ef8 }, + { 0x7ab7, 0x81ef, 0xd603, 0xe318, 0x1677, 0xe61e, 0x344b, 0x4eae }, + { 0x6a96, 0x01ea, 0xeb66, 0x485f, 0x2224, 0xe549, 0x31c2, 0xee85 }, + { 0x1a71, 0x81fb, 0x595d, 0x4e04, 0xaf9d, 0xedec, 0x297d, 0xce02 }, + { 0x0a50, 0x01fe, 0x6438, 0xe543, 0x9bce, 0xeebb, 0x2cf4, 0x6e29 }, + { 0x3a33, 0x01f4, 0x2397, 0xb3cd, 0xc73b, 0xeb42, 0x226f, 0x2e7f }, + { 0x2a12, 0x81f1, 0x1ef2, 0x188a, 0xf368, 0xe815, 0x27e6, 0x8e54 }, + { 0xdbfd, 0x81d3, 0x7a84, 0xbf7b, 0xe81a, 0xfa08, 0x1311, 0x6f71 }, + { 0xcbdc, 0x01d6, 0x47e1, 0x143c, 0xdc49, 0xf95f, 0x1698, 0xcf5a }, + { 0xfbbf, 0x01dc, 0x004e, 0x42b2, 0x80bc, 0xfca6, 0x1803, 0x8f0c }, + { 0xeb9e, 0x81d9, 0x3d2b, 0xe9f5, 0xb4ef, 0xfff1, 0x1d8a, 0x2f27 }, + { 0x9b79, 0x01c8, 0x8f10, 0xefae, 0x3956, 0xf754, 0x0535, 0x0fa0 }, + { 0x8b58, 0x81cd, 0xb275, 0x44e9, 0x0d05, 0xf403, 0x00bc, 0xaf8b }, + { 0xbb3b, 0x81c7, 0xf5da, 0x1267, 0x51f0, 0xf1fa, 0x0e27, 0xefdd }, + { 0xab1a, 0x01c2, 0xc8bf, 0xb920, 0x65a3, 0xf2ad, 0x0bae, 0x4ff6 }, + { 0x6ca6, 0x0140, 0x23ad, 0x8da3, 0x549e, 0xbf20, 0xd6e9, 0xcb62 }, + { 0x7c87, 0x8145, 0x1ec8, 0x26e4, 0x60cd, 0xbc77, 0xd360, 0x6b49 }, + { 0x4ce4, 0x814f, 0x5967, 0x706a, 0x3c38, 0xb98e, 0xddfb, 0x2b1f }, + { 0x5cc5, 0x014a, 0x6402, 0xdb2d, 0x086b, 0xbad9, 0xd872, 0x8b34 }, + { 0x2c22, 0x815b, 0xd639, 0xdd76, 0x85d2, 0xb27c, 0xc0cd, 0xabb3 }, + { 0x3c03, 0x015e, 0xeb5c, 0x7631, 0xb181, 0xb12b, 0xc544, 0x0b98 }, + { 0x0c60, 0x0154, 0xacf3, 0x20bf, 0xed74, 0xb4d2, 0xcbdf, 0x4bce }, + { 0x1c41, 0x8151, 0x9196, 0x8bf8, 0xd927, 0xb785, 0xce56, 0xebe5 }, + { 0xedae, 0x8173, 0xf5e0, 0x2c09, 0xc255, 0xa598, 0xfaa1, 0x0ac0 }, + { 0xfd8f, 0x0176, 0xc885, 0x874e, 0xf606, 0xa6cf, 0xff28, 0xaaeb }, + { 0xcdec, 0x017c, 0x8f2a, 0xd1c0, 0xaaf3, 0xa336, 0xf1b3, 0xeabd }, + { 0xddcd, 0x8179, 0xb24f, 0x7a87, 0x9ea0, 0xa061, 0xf43a, 0x4a96 }, + { 0xad2a, 0x0168, 0x0074, 0x7cdc, 0x1319, 0xa8c4, 0xec85, 0x6a11 }, + { 0xbd0b, 0x816d, 0x3d11, 0xd79b, 0x274a, 0xab93, 0xe90c, 0xca3a }, + { 0x8d68, 0x8167, 0x7abe, 0x8115, 0x7bbf, 0xae6a, 0xe797, 0x8a6c }, + { 0x9d49, 0x0162, 0x47db, 0x2a52, 0x4fec, 0xad3d, 0xe21e, 0x2a47 }, + { 0x7e97, 0x8123, 0xb252, 0x65b0, 0x4d5b, 0x8a50, 0x8e79, 0xe80d }, + { 0x6eb6, 0x0126, 0x8f37, 0xcef7, 0x7908, 0x8907, 0x8bf0, 0x4826 }, + { 0x5ed5, 0x012c, 0xc898, 0x9879, 0x25fd, 0x8cfe, 0x856b, 0x0870 }, + { 0x4ef4, 0x8129, 0xf5fd, 0x333e, 0x11ae, 0x8fa9, 0x80e2, 0xa85b }, + { 0x3e13, 0x0138, 0x47c6, 0x3565, 0x9c17, 0x870c, 0x985d, 0x88dc }, + { 0x2e32, 0x813d, 0x7aa3, 0x9e22, 0xa844, 0x845b, 0x9dd4, 0x28f7 }, + { 0x1e51, 0x8137, 0x3d0c, 0xc8ac, 0xf4b1, 0x81a2, 0x934f, 0x68a1 }, + { 0x0e70, 0x0132, 0x0069, 0x63eb, 0xc0e2, 0x82f5, 0x96c6, 0xc88a }, + { 0xff9f, 0x0110, 0x641f, 0xc41a, 0xdb90, 0x90e8, 0xa231, 0x29af }, + { 0xefbe, 0x8115, 0x597a, 0x6f5d, 0xefc3, 0x93bf, 0xa7b8, 0x8984 }, + { 0xdfdd, 0x811f, 0x1ed5, 0x39d3, 0xb336, 0x9646, 0xa923, 0xc9d2 }, + { 0xcffc, 0x011a, 0x23b0, 0x9294, 0x8765, 0x9511, 0xacaa, 0x69f9 }, + { 0xbf1b, 0x810b, 0x918b, 0x94cf, 0x0adc, 0x9db4, 0xb415, 0x497e }, + { 0xaf3a, 0x010e, 0xacee, 0x3f88, 0x3e8f, 0x9ee3, 0xb19c, 0xe955 }, + { 0x9f59, 0x0104, 0xeb41, 0x6906, 0x627a, 0x9b1a, 0xbf07, 0xa903 }, + { 0x8f78, 0x8101, 0xd624, 0xc241, 0x5629, 0x984d, 0xba8e, 0x0928 }, + { 0x9188, 0x8303, 0x7a6c, 0x46c3, 0xce28, 0xa8d7, 0xcf92, 0xbb53 }, + { 0x81a9, 0x0306, 0x4709, 0xed84, 0xfa7b, 0xab80, 0xca1b, 0x1b78 }, + { 0xb1ca, 0x030c, 0x00a6, 0xbb0a, 0xa68e, 0xae79, 0xc480, 0x5b2e }, + { 0xa1eb, 0x8309, 0x3dc3, 0x104d, 0x92dd, 0xad2e, 0xc109, 0xfb05 }, + { 0xd10c, 0x0318, 0x8ff8, 0x1616, 0x1f64, 0xa58b, 0xd9b6, 0xdb82 }, + { 0xc12d, 0x831d, 0xb29d, 0xbd51, 0x2b37, 0xa6dc, 0xdc3f, 0x7ba9 }, + { 0xf14e, 0x8317, 0xf532, 0xebdf, 0x77c2, 0xa325, 0xd2a4, 0x3bff }, + { 0xe16f, 0x0312, 0xc857, 0x4098, 0x4391, 0xa072, 0xd72d, 0x9bd4 }, + { 0x1080, 0x0330, 0xac21, 0xe769, 0x58e3, 0xb26f, 0xe3da, 0x7af1 }, + { 0x00a1, 0x8335, 0x9144, 0x4c2e, 0x6cb0, 0xb138, 0xe653, 0xdada }, + { 0x30c2, 0x833f, 0xd6eb, 0x1aa0, 0x3045, 0xb4c1, 0xe8c8, 0x9a8c }, + { 0x20e3, 0x033a, 0xeb8e, 0xb1e7, 0x0416, 0xb796, 0xed41, 0x3aa7 }, + { 0x5004, 0x832b, 0x59b5, 0xb7bc, 0x89af, 0xbf33, 0xf5fe, 0x1a20 }, + { 0x4025, 0x032e, 0x64d0, 0x1cfb, 0xbdfc, 0xbc64, 0xf077, 0xba0b }, + { 0x7046, 0x0324, 0x237f, 0x4a75, 0xe109, 0xb99d, 0xfeec, 0xfa5d }, + { 0x6067, 0x8321, 0x1e1a, 0xe132, 0xd55a, 0xbaca, 0xfb65, 0x5a76 }, + { 0x83b9, 0x0360, 0xeb93, 0xaed0, 0xd7ed, 0x9da7, 0x9702, 0x983c }, + { 0x9398, 0x8365, 0xd6f6, 0x0597, 0xe3be, 0x9ef0, 0x928b, 0x3817 }, + { 0xa3fb, 0x836f, 0x9159, 0x5319, 0xbf4b, 0x9b09, 0x9c10, 0x7841 }, + { 0xb3da, 0x036a, 0xac3c, 0xf85e, 0x8b18, 0x985e, 0x9999, 0xd86a }, + { 0xc33d, 0x837b, 0x1e07, 0xfe05, 0x06a1, 0x90fb, 0x8126, 0xf8ed }, + { 0xd31c, 0x037e, 0x2362, 0x5542, 0x32f2, 0x93ac, 0x84af, 0x58c6 }, + { 0xe37f, 0x0374, 0x64cd, 0x03cc, 0x6e07, 0x9655, 0x8a34, 0x1890 }, + { 0xf35e, 0x8371, 0x59a8, 0xa88b, 0x5a54, 0x9502, 0x8fbd, 0xb8bb }, + { 0x02b1, 0x8353, 0x3dde, 0x0f7a, 0x4126, 0x871f, 0xbb4a, 0x599e }, + { 0x1290, 0x0356, 0x00bb, 0xa43d, 0x7575, 0x8448, 0xbec3, 0xf9b5 }, + { 0x22f3, 0x035c, 0x4714, 0xf2b3, 0x2980, 0x81b1, 0xb058, 0xb9e3 }, + { 0x32d2, 0x8359, 0x7a71, 0x59f4, 0x1dd3, 0x82e6, 0xb5d1, 0x19c8 }, + { 0x4235, 0x0348, 0xc84a, 0x5faf, 0x906a, 0x8a43, 0xad6e, 0x394f }, + { 0x5214, 0x834d, 0xf52f, 0xf4e8, 0xa439, 0x8914, 0xa8e7, 0x9964 }, + { 0x6277, 0x8347, 0xb280, 0xa266, 0xf8cc, 0x8ced, 0xa67c, 0xd932 }, + { 0x7256, 0x0342, 0x8fe5, 0x0921, 0xcc9f, 0x8fba, 0xa3f5, 0x7919 }, + { 0xb5ea, 0x03c0, 0x64f7, 0x3da2, 0xfda2, 0xc237, 0x7eb2, 0xfd8d }, + { 0xa5cb, 0x83c5, 0x5992, 0x96e5, 0xc9f1, 0xc160, 0x7b3b, 0x5da6 }, + { 0x95a8, 0x83cf, 0x1e3d, 0xc06b, 0x9504, 0xc499, 0x75a0, 0x1df0 }, + { 0x8589, 0x03ca, 0x2358, 0x6b2c, 0xa157, 0xc7ce, 0x7029, 0xbddb }, + { 0xf56e, 0x83db, 0x9163, 0x6d77, 0x2cee, 0xcf6b, 0x6896, 0x9d5c }, + { 0xe54f, 0x03de, 0xac06, 0xc630, 0x18bd, 0xcc3c, 0x6d1f, 0x3d77 }, + { 0xd52c, 0x03d4, 0xeba9, 0x90be, 0x4448, 0xc9c5, 0x6384, 0x7d21 }, + { 0xc50d, 0x83d1, 0xd6cc, 0x3bf9, 0x701b, 0xca92, 0x660d, 0xdd0a }, + { 0x34e2, 0x83f3, 0xb2ba, 0x9c08, 0x6b69, 0xd88f, 0x52fa, 0x3c2f }, + { 0x24c3, 0x03f6, 0x8fdf, 0x374f, 0x5f3a, 0xdbd8, 0x5773, 0x9c04 }, + { 0x14a0, 0x03fc, 0xc870, 0x61c1, 0x03cf, 0xde21, 0x59e8, 0xdc52 }, + { 0x0481, 0x83f9, 0xf515, 0xca86, 0x379c, 0xdd76, 0x5c61, 0x7c79 }, + { 0x7466, 0x03e8, 0x472e, 0xccdd, 0xba25, 0xd5d3, 0x44de, 0x5cfe }, + { 0x6447, 0x83ed, 0x7a4b, 0x679a, 0x8e76, 0xd684, 0x4157, 0xfcd5 }, + { 0x5424, 0x83e7, 0x3de4, 0x3114, 0xd283, 0xd37d, 0x4fcc, 0xbc83 }, + { 0x4405, 0x03e2, 0x0081, 0x9a53, 0xe6d0, 0xd02a, 0x4a45, 0x1ca8 }, + { 0xa7db, 0x83a3, 0xf508, 0xd5b1, 0xe467, 0xf747, 0x2622, 0xdee2 }, + { 0xb7fa, 0x03a6, 0xc86d, 0x7ef6, 0xd034, 0xf410, 0x23ab, 0x7ec9 }, + { 0x8799, 0x03ac, 0x8fc2, 0x2878, 0x8cc1, 0xf1e9, 0x2d30, 0x3e9f }, + { 0x97b8, 0x83a9, 0xb2a7, 0x833f, 0xb892, 0xf2be, 0x28b9, 0x9eb4 }, + { 0xe75f, 0x03b8, 0x009c, 0x8564, 0x352b, 0xfa1b, 0x3006, 0xbe33 }, + { 0xf77e, 0x83bd, 0x3df9, 0x2e23, 0x0178, 0xf94c, 0x358f, 0x1e18 }, + { 0xc71d, 0x83b7, 0x7a56, 0x78ad, 0x5d8d, 0xfcb5, 0x3b14, 0x5e4e }, + { 0xd73c, 0x03b2, 0x4733, 0xd3ea, 0x69de, 0xffe2, 0x3e9d, 0xfe65 }, + { 0x26d3, 0x0390, 0x2345, 0x741b, 0x72ac, 0xedff, 0x0a6a, 0x1f40 }, + { 0x36f2, 0x8395, 0x1e20, 0xdf5c, 0x46ff, 0xeea8, 0x0fe3, 0xbf6b }, + { 0x0691, 0x839f, 0x598f, 0x89d2, 0x1a0a, 0xeb51, 0x0178, 0xff3d }, + { 0x16b0, 0x039a, 0x64ea, 0x2295, 0x2e59, 0xe806, 0x04f1, 0x5f16 }, + { 0x6657, 0x838b, 0xd6d1, 0x24ce, 0xa3e0, 0xe0a3, 0x1c4e, 0x7f91 }, + { 0x7676, 0x038e, 0xebb4, 0x8f89, 0x97b3, 0xe3f4, 0x19c7, 0xdfba }, + { 0x4615, 0x0384, 0xac1b, 0xd907, 0xcb46, 0xe60d, 0x175c, 0x9fec }, + { 0x5634, 0x8381, 0x917e, 0x7240, 0xff15, 0xe55a, 0x12d5, 0x3fc7 }, + { 0xd94c, 0x0280, 0x475a, 0xb001, 0xa93c, 0x7d17, 0xa85b, 0x36ef }, + { 0xc96d, 0x8285, 0x7a3f, 0x1b46, 0x9d6f, 0x7e40, 0xadd2, 0x96c4 }, + { 0xf90e, 0x828f, 0x3d90, 0x4dc8, 0xc19a, 0x7bb9, 0xa349, 0xd692 }, + { 0xe92f, 0x028a, 0x00f5, 0xe68f, 0xf5c9, 0x78ee, 0xa6c0, 0x76b9 }, + { 0x99c8, 0x829b, 0xb2ce, 0xe0d4, 0x7870, 0x704b, 0xbe7f, 0x563e }, + { 0x89e9, 0x029e, 0x8fab, 0x4b93, 0x4c23, 0x731c, 0xbbf6, 0xf615 }, + { 0xb98a, 0x0294, 0xc804, 0x1d1d, 0x10d6, 0x76e5, 0xb56d, 0xb643 }, + { 0xa9ab, 0x8291, 0xf561, 0xb65a, 0x2485, 0x75b2, 0xb0e4, 0x1668 }, + { 0x5844, 0x82b3, 0x9117, 0x11ab, 0x3ff7, 0x67af, 0x8413, 0xf74d }, + { 0x4865, 0x02b6, 0xac72, 0xbaec, 0x0ba4, 0x64f8, 0x819a, 0x5766 }, + { 0x7806, 0x02bc, 0xebdd, 0xec62, 0x5751, 0x6101, 0x8f01, 0x1730 }, + { 0x6827, 0x82b9, 0xd6b8, 0x4725, 0x6302, 0x6256, 0x8a88, 0xb71b }, + { 0x18c0, 0x02a8, 0x6483, 0x417e, 0xeebb, 0x6af3, 0x9237, 0x979c }, + { 0x08e1, 0x82ad, 0x59e6, 0xea39, 0xdae8, 0x69a4, 0x97be, 0x37b7 }, + { 0x3882, 0x82a7, 0x1e49, 0xbcb7, 0x861d, 0x6c5d, 0x9925, 0x77e1 }, + { 0x28a3, 0x02a2, 0x232c, 0x17f0, 0xb24e, 0x6f0a, 0x9cac, 0xd7ca }, + { 0xcb7d, 0x82e3, 0xd6a5, 0x5812, 0xb0f9, 0x4867, 0xf0cb, 0x1580 }, + { 0xdb5c, 0x02e6, 0xebc0, 0xf355, 0x84aa, 0x4b30, 0xf542, 0xb5ab }, + { 0xeb3f, 0x02ec, 0xac6f, 0xa5db, 0xd85f, 0x4ec9, 0xfbd9, 0xf5fd }, + { 0xfb1e, 0x82e9, 0x910a, 0x0e9c, 0xec0c, 0x4d9e, 0xfe50, 0x55d6 }, + { 0x8bf9, 0x02f8, 0x2331, 0x08c7, 0x61b5, 0x453b, 0xe6ef, 0x7551 }, + { 0x9bd8, 0x82fd, 0x1e54, 0xa380, 0x55e6, 0x466c, 0xe366, 0xd57a }, + { 0xabbb, 0x82f7, 0x59fb, 0xf50e, 0x0913, 0x4395, 0xedfd, 0x952c }, + { 0xbb9a, 0x02f2, 0x649e, 0x5e49, 0x3d40, 0x40c2, 0xe874, 0x3507 }, + { 0x4a75, 0x02d0, 0x00e8, 0xf9b8, 0x2632, 0x52df, 0xdc83, 0xd422 }, + { 0x5a54, 0x82d5, 0x3d8d, 0x52ff, 0x1261, 0x5188, 0xd90a, 0x7409 }, + { 0x6a37, 0x82df, 0x7a22, 0x0471, 0x4e94, 0x5471, 0xd791, 0x345f }, + { 0x7a16, 0x02da, 0x4747, 0xaf36, 0x7ac7, 0x5726, 0xd218, 0x9474 }, + { 0x0af1, 0x82cb, 0xf57c, 0xa96d, 0xf77e, 0x5f83, 0xcaa7, 0xb4f3 }, + { 0x1ad0, 0x02ce, 0xc819, 0x022a, 0xc32d, 0x5cd4, 0xcf2e, 0x14d8 }, + { 0x2ab3, 0x02c4, 0x8fb6, 0x54a4, 0x9fd8, 0x592d, 0xc1b5, 0x548e }, + { 0x3a92, 0x82c1, 0xb2d3, 0xffe3, 0xab8b, 0x5a7a, 0xc43c, 0xf4a5 }, + { 0xfd2e, 0x8243, 0x59c1, 0xcb60, 0x9ab6, 0x17f7, 0x197b, 0x7031 }, + { 0xed0f, 0x0246, 0x64a4, 0x6027, 0xaee5, 0x14a0, 0x1cf2, 0xd01a }, + { 0xdd6c, 0x024c, 0x230b, 0x36a9, 0xf210, 0x1159, 0x1269, 0x904c }, + { 0xcd4d, 0x8249, 0x1e6e, 0x9dee, 0xc643, 0x120e, 0x17e0, 0x3067 }, + { 0xbdaa, 0x0258, 0xac55, 0x9bb5, 0x4bfa, 0x1aab, 0x0f5f, 0x10e0 }, + { 0xad8b, 0x825d, 0x9130, 0x30f2, 0x7fa9, 0x19fc, 0x0ad6, 0xb0cb }, + { 0x9de8, 0x8257, 0xd69f, 0x667c, 0x235c, 0x1c05, 0x044d, 0xf09d }, + { 0x8dc9, 0x0252, 0xebfa, 0xcd3b, 0x170f, 0x1f52, 0x01c4, 0x50b6 }, + { 0x7c26, 0x0270, 0x8f8c, 0x6aca, 0x0c7d, 0x0d4f, 0x3533, 0xb193 }, + { 0x6c07, 0x8275, 0xb2e9, 0xc18d, 0x382e, 0x0e18, 0x30ba, 0x11b8 }, + { 0x5c64, 0x827f, 0xf546, 0x9703, 0x64db, 0x0be1, 0x3e21, 0x51ee }, + { 0x4c45, 0x027a, 0xc823, 0x3c44, 0x5088, 0x08b6, 0x3ba8, 0xf1c5 }, + { 0x3ca2, 0x826b, 0x7a18, 0x3a1f, 0xdd31, 0x0013, 0x2317, 0xd142 }, + { 0x2c83, 0x026e, 0x477d, 0x9158, 0xe962, 0x0344, 0x269e, 0x7169 }, + { 0x1ce0, 0x0264, 0x00d2, 0xc7d6, 0xb597, 0x06bd, 0x2805, 0x313f }, + { 0x0cc1, 0x8261, 0x3db7, 0x6c91, 0x81c4, 0x05ea, 0x2d8c, 0x9114 }, + { 0xef1f, 0x0220, 0xc83e, 0x2373, 0x8373, 0x2287, 0x41eb, 0x535e }, + { 0xff3e, 0x8225, 0xf55b, 0x8834, 0xb720, 0x21d0, 0x4462, 0xf375 }, + { 0xcf5d, 0x822f, 0xb2f4, 0xdeba, 0xebd5, 0x2429, 0x4af9, 0xb323 }, + { 0xdf7c, 0x022a, 0x8f91, 0x75fd, 0xdf86, 0x277e, 0x4f70, 0x1308 }, + { 0xaf9b, 0x823b, 0x3daa, 0x73a6, 0x523f, 0x2fdb, 0x57cf, 0x338f }, + { 0xbfba, 0x023e, 0x00cf, 0xd8e1, 0x666c, 0x2c8c, 0x5246, 0x93a4 }, + { 0x8fd9, 0x0234, 0x4760, 0x8e6f, 0x3a99, 0x2975, 0x5cdd, 0xd3f2 }, + { 0x9ff8, 0x8231, 0x7a05, 0x2528, 0x0eca, 0x2a22, 0x5954, 0x73d9 }, + { 0x6e17, 0x8213, 0x1e73, 0x82d9, 0x15b8, 0x383f, 0x6da3, 0x92fc }, + { 0x7e36, 0x0216, 0x2316, 0x299e, 0x21eb, 0x3b68, 0x682a, 0x32d7 }, + { 0x4e55, 0x021c, 0x64b9, 0x7f10, 0x7d1e, 0x3e91, 0x66b1, 0x7281 }, + { 0x5e74, 0x8219, 0x59dc, 0xd457, 0x494d, 0x3dc6, 0x6338, 0xd2aa }, + { 0x2e93, 0x0208, 0xebe7, 0xd20c, 0xc4f4, 0x3563, 0x7b87, 0xf22d }, + { 0x3eb2, 0x820d, 0xd682, 0x794b, 0xf0a7, 0x3634, 0x7e0e, 0x5206 }, + { 0x0ed1, 0x8207, 0x912d, 0x2fc5, 0xac52, 0x33cd, 0x7095, 0x1250 }, + { 0x1ef0, 0x0202, 0xac48, 0x8482, 0x9801, 0x309a, 0x751c, 0xb27b }, +}; + +ZXIC_UINT32 dpp_crc32_calc(ZXIC_UINT8 *pInputKey, ZXIC_UINT32 dwByteNum, + ZXIC_UINT32 dwCrcPoly) +{ + ZXIC_UINT32 dwResult = 0; + ZXIC_UINT32 dwDataType = 0; + + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + + while (i < dwByteNum) { + dwDataType = (ZXIC_UINT32)((dwResult & 0xff000000) ^ + (pInputKey[i] << 24)); + for (j = 0; j < 8; j++) { + if (dwDataType & 0x80000000) { + dwDataType <<= 1; + dwDataType ^= dwCrcPoly; + } else { + dwDataType <<= 1; + } + } + dwResult <<= 8; + dwResult ^= dwDataType; + + i++; + } + + return dwResult; +} + +ZXIC_UINT16 dpp_crc16_calc(ZXIC_UINT8 *pInputKey, ZXIC_UINT32 dwByteNum, + ZXIC_UINT16 dwCrcPoly) +{ + ZXIC_UINT16 dwResult = 0; + ZXIC_UINT16 dwDataType = 0; + + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + + while (i < dwByteNum) { + dwDataType = (ZXIC_UINT16)(((dwResult & 0xff00) ^ + (pInputKey[i] << 8)) & + 0xFFFF); + for (j = 0; j < 8; j++) { + if (dwDataType & 0x8000) { + dwDataType <<= 1; + dwDataType ^= dwCrcPoly; + } else { + dwDataType <<= 1; + } + } + dwResult <<= 8; + dwResult ^= dwDataType; + + i++; + } + + return dwResult; +} + +ZXIC_UINT16 dpp_crc16_get_idx(ZXIC_UINT16 crc_val) +{ + switch (crc_val) { + case 0x1021: + return 0; + case 0x8005: + return 1; + case 0x3d65: + return 2; + case 0xab47: + return 3; + case 0x3453: + return 4; + case 0x0357: + return 5; + case 0x0589: + return 6; + case 0xa02b: + return 7; + default: + return 0xFFFF; + } +} + +//查表法 +ZXIC_UINT16 dpp_crc16_table_lookup(ZXIC_UINT8 *pInputKey, ZXIC_UINT32 dwByteNum, + ZXIC_UINT16 dwCrcPoly) +{ + ZXIC_UINT16 dwResult = 0; + ZXIC_UINT16 dwDataType = 0; + ZXIC_UINT16 tmp_index = 0; + ZXIC_UINT32 i = 0; + + ZXIC_UINT32 crc_idx = dpp_crc16_get_idx(dwCrcPoly); + + ZXIC_COMM_CHECK_INDEX(crc_idx, 0, 7); + + while (i < dwByteNum) { + dwDataType = ((dwResult & 0xff00) ^ (pInputKey[i] << 8)) & + 0xFFFF; + + tmp_index = dwDataType >> 8; + tmp_index = tmp_index & 0xff; + ZXIC_COMM_CHECK_INDEX(tmp_index, 0, 255); + ZXIC_COMM_CHECK_INDEX(crc_idx, 0, 7); + dwDataType = g_crc16_rst[tmp_index][crc_idx]; + + dwResult <<= 8; + dwResult ^= dwDataType; + + i++; + } + + return dwResult; +} diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/dpp_se_cfg.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/dpp_se_cfg.c new file mode 100644 index 000000000000..a635dcaa0547 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/table/se/dpp_se_cfg.c @@ -0,0 +1,284 @@ +/***************************************************************************** + * 版权所有 (C)2001-2015, 深圳市中兴通讯股份有限公司。 + * + * 文件名称: dpp_se_cfg.c + * 文件标识: SE配置部分源文件 + * 内容摘要: + * 其它说明: + * 当前版本: + * 作 者: ChenWei10088471 + * 完成日期: + * 当前责任人-1: + * 当前责任人-2: + * + * DEPARTMENT : ASIC_FPGA_R&D_Dept + * MANUAL_PERCENT : 100% + *****************************************************************************/ + +#include "zxic_common.h" +#include "dpp_dev.h" +#include "dpp_module.h" +#include "dpp_se.h" +#include "dpp_se_cfg.h" +#include "dpp_hash.h" +#include "dpp_acl.h" + +DPP_SE_CFG *dpp_se_cfg[DPP_PCIE_SLOT_MAX] = { 0 }; + +static ZXIC_UINT16 g_lpm_crc[SE_ZBLK_NUM] = { + 0x1021, 0x8005, 0x3D65, 0xab47, 0x3453, 0x0357, 0x0589, 0xa02b, + 0x1021, 0x8005, 0x3D65, 0xab47, 0x3453, 0x0357, 0x0589, 0xa02b, + 0x1021, 0x8005, 0x3D65, 0xab47, 0x3453, 0x0357, 0x0589, 0xa02b, + 0x1021, 0x8005, 0x3D65, 0xab47, 0x3453, 0x0357, 0x0589, 0xa02b +}; + +#if DPP_WRITE_FILE_EN +static ZXIC_CHAR *prefix_str = "./dpp_se_data"; +#endif + +/***********************************************************/ +/** 配置g_se_cfg 的值 +* @param p_se_cfg 算法模块公共管理数据结构指针 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yxh @date 2019/02/22 +************************************************************/ +DPP_STATUS dpp_se_cfg_set(DPP_DEV_T *dev, DPP_SE_CFG *p_se_cfg) +{ + ZXIC_UINT64 se_cfg_ptr = 0; + ZXIC_UINT32 slot = 0; + + ZXIC_COMM_CHECK_POINT(p_se_cfg); + ZXIC_COMM_CHECK_POINT(dev); + + slot = DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_INDEX(slot, 0, DPP_PCIE_SLOT_MAX - 1); + dpp_se_cfg[slot] = p_se_cfg; + ZXIC_COMM_CHECK_POINT(dpp_se_cfg[slot]); + + se_cfg_ptr = ZXIC_COMM_PTR_TO_VAL(p_se_cfg); + ZXIC_COMM_TRACE_INFO("p_se_cfg address 0x%llx.\n", se_cfg_ptr); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取g_se_cfg 的值 +* @param p_se_cfg 算法模块公共管理数据结构指针 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yxh @date 2019/02/22 +************************************************************/ +DPP_STATUS dpp_se_cfg_get(DPP_DEV_T *dev, DPP_SE_CFG **p_se_cfg) +{ + ZXIC_UINT32 slot = 0; + ZXIC_COMM_CHECK_POINT(dev); + slot = DEV_PCIE_SLOT(dev); + ZXIC_COMM_CHECK_INDEX(slot, 0, DPP_PCIE_SLOT_MAX - 1); + + *p_se_cfg = dpp_se_cfg[slot]; + ZXIC_COMM_CHECK_POINT(*p_se_cfg); + + ZXIC_COMM_TRACE_DEBUG("p_se_cfg address %p.\n", (*p_se_cfg)); + + return DPP_OK; +} + +DPP_STATUS dpp_se_init(DPP_DEV_T *dev, DPP_SE_CFG *p_se_cfg) +{ + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + ZXIC_UINT32 dev_id = 0; + +#if LPM_THREAD_HW_WRITE_EN + ZXIC_UINT32 rtn = 0; +#endif + + SE_ZBLK_CFG *p_zblk_cfg = NULL; + SE_ZCELL_CFG *p_zcell_cfg = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_se_cfg); + + // dev_id = ZXIC_COMM_PTR_TO_VAL(p_se_cfg->p_client) & 0xFFFFFFFF; + dev_id = DEV_ID(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + + ZXIC_COMM_MEMSET(p_se_cfg, 0, sizeof(DPP_SE_CFG)); + ZXIC_COMM_MEMCPY_S(&p_se_cfg->dev, sizeof(DPP_DEV_T), dev, + sizeof(DPP_DEV_T)); + p_se_cfg->dev_id = dev_id; + dpp_se_cfg_set(dev, p_se_cfg); + + //p_se_cfg->p_as_rslt_wrt_fun = dpp_se_lpm_as_rslt_write; + p_se_cfg->p_client = ZXIC_COMM_VAL_TO_PTR(dev_id); + + for (i = 0; i < SE_ZBLK_NUM; i++) { + p_zblk_cfg = DPP_SE_GET_ZBLK_CFG(p_se_cfg, i); + + p_zblk_cfg->zblk_idx = i; + p_zblk_cfg->is_used = 0; + p_zblk_cfg->hash_arg = g_lpm_crc[i]; + p_zblk_cfg->zcell_bm = 0; + INIT_D_NODE(&p_zblk_cfg->zblk_dn, p_zblk_cfg); + + for (j = 0; j < SE_ZCELL_NUM; j++) { + p_zcell_cfg = &p_zblk_cfg->zcell_info[j]; + + p_zcell_cfg->zcell_idx = (i << 2) + j; + p_zcell_cfg->item_used = 0; + p_zcell_cfg->mask_len = 0; + + INIT_D_NODE(&p_zcell_cfg->zcell_dn, p_zcell_cfg); + + p_zcell_cfg->zcell_avl.p_key = p_zcell_cfg; + } + } + +#if DPP_WRITE_FILE_EN + /* icm_trace_set_log_dir(prefix_str);*/ + dpp_se_file_mng_init(); +#else + +#endif + +#if LPM_THREAD_HW_WRITE_EN + for (i = 0; i < MAX_ITEM_INFO_BAK_NUM; i++) { + rtn = zxic_comm_mutex_create(&(p_se_cfg->cache_index_mutex[i])); + ZXIC_COMM_CHECK_RC(rtn, "zxic_comm_mutex_create"); + } + + rtn = zxic_comm_liststack_creat(MAX_ITEM_INFO_BAK_NUM, + &p_se_cfg->p_thread_liststack_mng); + ZXIC_COMM_CHECK_RC(rtn, "zxic_comm_liststack_creat"); +#endif + + return DPP_OK; +} + +/***********************************************************/ +/** 初始化算法管理数据结构用户自定义的数据指针,当前仅用于传入设备号的值 +* @param p_se_cfg 算法管理数据结构指针 +* @param p_client 用户自定义的数据指针 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 王春雷 @date 2015/05/20 +************************************************************/ +DPP_STATUS dpp_se_client_init(DPP_SE_CFG *p_se_cfg, ZXIC_VOID *p_client) +{ + ZXIC_COMM_CHECK_POINT(p_se_cfg); + + p_se_cfg->p_client = p_client; + p_se_cfg->reg_base = + SYS_SE_BASE_ADDR + + MODULE_SE_ALG_BASE_ADDR; /* 算法模块芯片内相对基地址 */ + // p_se_cfg->p_write32_fun = dpp_se_reg_write32; + // p_se_cfg->p_read32_fun = dpp_se_reg_read32; + + return DPP_OK; +} + +DPP_STATUS dpp_se_fun_init(DPP_SE_CFG *p_se_cfg, ZXIC_UINT8 id, + ZXIC_UINT32 fun_type) +{ + FUNC_ID_INFO *p_fun_info = NULL; + + ZXIC_COMM_CHECK_POINT(p_se_cfg); + ZXIC_COMM_CHECK_INDEX(id, 0, MAX_FUN_NUM - 1); + ZXIC_COMM_CHECK_INDEX(fun_type, FUN_HASH, FUN_MAX - 1); + + p_fun_info = DPP_GET_FUN_INFO(p_se_cfg, id); + + if (p_fun_info->is_used) { + ZXIC_COMM_TRACE_ERROR( + "\n Error[0x%x], fun_id [%d] is already used!", + DPP_SE_RC_FUN_INVALID, id); + return DPP_SE_RC_FUN_INVALID; + } + + p_fun_info->fun_id = id; + p_fun_info->is_used = 1; + + switch (fun_type) { + case (FUN_LPM): { + /* + p_fun_info->fun_type = FUN_LPM; + p_fun_info->fun_ptr = ZXIC_COMM_MALLOC(sizeof(DPP_ROUTE_CFG)); + ZXIC_COMM_CHECK_POINT(p_fun_info->fun_ptr); + ZXIC_COMM_MEMSET(p_fun_info->fun_ptr,0,sizeof(DPP_ROUTE_CFG)); + + ((DPP_ROUTE_CFG*)(p_fun_info->fun_ptr))->p_se_cfg = p_se_cfg; + */ + + //dpp_func_lpm_create(p_se_cfg, id); + + } break; + + case (FUN_HASH): { + p_fun_info->fun_type = FUN_HASH; + p_fun_info->fun_ptr = ZXIC_COMM_MALLOC(sizeof(DPP_HASH_CFG)); + ZXIC_COMM_CHECK_POINT(p_fun_info->fun_ptr); + ZXIC_COMM_MEMSET(p_fun_info->fun_ptr, 0, sizeof(DPP_HASH_CFG)); + ((DPP_HASH_CFG *)(p_fun_info->fun_ptr))->p_se_info = p_se_cfg; + } break; + + default: { + ZXIC_COMM_TRACE_ERROR("\n Error,unrecgnized fun_type[ %d] ", + fun_type); + ZXIC_COMM_ASSERT(0); + return DPP_SE_RC_BASE; + } break; + } + + return DPP_OK; +} + +DPP_STATUS dpp_se_fun_deinit(DPP_SE_CFG *p_se_cfg, ZXIC_UINT8 id, + ZXIC_UINT32 fun_type) +{ + FUNC_ID_INFO *p_fun_info = NULL; + + ZXIC_COMM_CHECK_POINT(p_se_cfg); + ZXIC_COMM_CHECK_INDEX(id, 0, MAX_FUN_NUM - 1); + ZXIC_COMM_CHECK_INDEX(fun_type, FUN_HASH, FUN_MAX - 1); + + p_fun_info = DPP_GET_FUN_INFO(p_se_cfg, id); + + if (0 == p_fun_info->is_used) { + ZXIC_COMM_TRACE_ERROR( + "\n Error[0x%x], fun_id [%d] is already deinit!", + DPP_SE_RC_FUN_INVALID, id); + return DPP_SE_RC_FUN_INVALID; + } + + switch (fun_type) { + case (FUN_LPM): { + /* dpp_func_lpm_destory(p_se_cfg, id);*/ + } break; + + case (FUN_HASH): { + if (p_fun_info->fun_ptr) { + ZXIC_COMM_FREE(p_fun_info->fun_ptr); + p_fun_info->fun_ptr = NULL; + } + } break; + + default: { + ZXIC_COMM_TRACE_ERROR("\n Error,unrecgnized fun_type[ %d] ", + fun_type); + ZXIC_COMM_ASSERT(0); + return DPP_SE_RC_BASE; + } break; + } + + p_fun_info->fun_id = id; + p_fun_info->is_used = 0; + + return DPP_OK; +} diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/tm/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/tm/Kbuild.include new file mode 100644 index 000000000000..ba92bf3c1af0 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/tm/Kbuild.include @@ -0,0 +1,2 @@ +cur_dir := en_np/sdk/source/dev/module/tm/ +src_files += $(addprefix $(cur_dir),$(notdir $(wildcard $(dinghai_root)/$(cur_dir)*.c))) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/tm/dpp_tm.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/tm/dpp_tm.c new file mode 100644 index 000000000000..c21f7f15be91 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/module/tm/dpp_tm.c @@ -0,0 +1,20004 @@ +/************************************************************** +* 版权所有 (C)2013-2015,深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_tm.c +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : djf +* 完成日期 : 2014/02/17 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +/****************************************************************************** + * START: 头文件 * + *****************************************************************************/ +#include "zxic_common.h" +#include "dpp_type_api.h" +#include "dpp_reg_api.h" +#include "dpp_reg_info.h" +#include "dpp_etm_reg.h" +#include "dpp_module.h" +#include "dpp_tm_api.h" +//#include "dpp_tm_diag.h" +#include "dpp_tm.h" +#include "dpp_dev.h" + +/****************************************************************************** + * END: 头文件 * + *****************************************************************************/ + +/****************************************************************************** + * START: 常量定义 * + *****************************************************************************/ + +DPP_TM_SHAPE_PARA_TABLE g_dpp_etm_shape_para_table + [DPP_PCIE_SLOT_MAX][DPP_ETM_SHAP_TABEL_ID_MAX][DPP_TM_SHAP_MAP_ID_MAX] = { + { { { 0 } } } + }; /* coverity告警修改:单一变量不能超过10000字节 */ + +/* 全局变量读写互斥锁 */ +ZXIC_MUTEX_T g_dpp_tm_global_var_rw_mutex; +ZXIC_UINT32 g_dpp_tm_global_var_rw_mutex_flag; + +/***********************************************************/ +/** 全局变量读写互斥锁初始化 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/07/23 +************************************************************/ +DPP_STATUS dpp_tm_global_var_mutex_init(ZXIC_VOID) +{ + DPP_STATUS rc = DPP_OK; + + if (!g_dpp_tm_global_var_rw_mutex_flag) { + rc = zxic_comm_mutex_create(&g_dpp_tm_global_var_rw_mutex); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_mutex_create"); + + g_dpp_tm_global_var_rw_mutex_flag = 1; + } + + return DPP_OK; +} + +/****************************************************************************** + * END: 常量定义 * + *****************************************************************************/ + +#if ZXIC_REAL("TM_REG") +#if 0 +/***********************************************************/ +/** 写TM寄存器 +* @param module_id 区分TM子模块 +* @param addr 基于子模块的地址 +* @param data 写入的数据 +* +* @return +* @remark 无 +* @see +* @author yjd @date 2015/07/26 +************************************************************/ +DPP_STATUS dpp_tm_wr_reg(ZXIC_UINT32 dev_id, ZXIC_UINT32 module_id, ZXIC_UINT32 addr, ZXIC_UINT32 data) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 wr_data = 0; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, module_id, MODULE_TM_CFGMT, MODULE_TM_MAX - 1); + + wr_data = data; + rc = dpp_tm_write(dev_id, module_id, addr, &wr_data); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读TM寄存器 +* @param tm_type 0-ETM,1-FTM +* @param module_id 区分TM子模块 +* @param addr 基于子模块的地址 +* @return +* @remark 无 +* @see +* @author yjd @date 2015/07/26 +************************************************************/ +DPP_STATUS dpp_tm_rd_reg(ZXIC_UINT32 dev_id, ZXIC_UINT32 module_id, ZXIC_UINT32 addr) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 rd_data = 0; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, module_id, MODULE_TM_CFGMT, MODULE_TM_MAX - 1); + + rc = dpp_tm_read(dev_id, module_id, addr, &rd_data); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_read"); + + ZXIC_COMM_PRINT("[0x%08x] 0x%08x\n", addr, rd_data); + + return DPP_OK; +} + +/***********************************************************/ +/** 写一片连续的TM寄存器 +* @param module_id 区分TM子模块 +* @param first_addr 起始寄存器的地址 +* @param reg_num 总共读取的寄存器数 +* +* @return +* @remark 无 +* @see +* @author yjd @date 2015/07/26 +************************************************************/ +DPP_STATUS dpp_tm_wr_more_reg(ZXIC_UINT32 dev_id, ZXIC_UINT32 module_id, ZXIC_UINT32 first_addr, ZXIC_UINT32 first_data, ZXIC_UINT32 data_step, ZXIC_UINT32 reg_num) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 addr = 0; + ZXIC_UINT32 data = 0; + ZXIC_UINT32 i = 0; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, module_id, MODULE_TM_CFGMT, MODULE_TM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(dev_id, first_addr, reg_num); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, data_step, reg_num); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(dev_id, first_data, (data_step * reg_num)); + + for (i = 0; i < reg_num; i++) { + addr = first_addr + i; + data = first_data + (data_step * i); + rc = dpp_tm_wr_reg(dev_id, module_id, addr, data); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_wr_reg"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 读一片连续的TM寄存器 +* @param module_id 区分TM子模块 +* @param first_addr 起始寄存器的地址 +* @param reg_num 总共读取的寄存器数 +* +* @return +* @remark 无 +* @see +* @author yjd @date 2017/07/26 +************************************************************/ +DPP_STATUS dpp_tm_rd_more_reg(ZXIC_UINT32 dev_id, ZXIC_UINT32 module_id, ZXIC_UINT32 first_addr, ZXIC_UINT32 reg_num) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 addr = 0; + ZXIC_UINT32 i = 0; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, module_id, MODULE_TM_CFGMT, MODULE_TM_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(dev_id, first_addr, reg_num); + + for (i = 0; i < reg_num; i++) { + addr = first_addr + i; + rc = dpp_tm_rd_reg(dev_id, module_id, addr); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_rd_reg"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 写tm模块二层间接寄存器(仅crdt/shap模块使用) +* @param module_id 区分TM子模块 +* @param addr 基于子模块的地址 +* @param data 写入的数据 +* +* @return +* @remark 无 +* @see +* @author whuashan @date 2019/02/25 +************************************************************/ +DPP_STATUS dpp_tm_ind_wr_reg(ZXIC_UINT32 dev_id, ZXIC_UINT32 module_id, ZXIC_UINT32 addr, ZXIC_UINT64 data) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 wr_data[2] = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, module_id, MODULE_TM_SHAP, MODULE_TM_CRDT); + + wr_data[1] = (data & 0xffffffff); + wr_data[0] = (data >> 32) & 0xffffffff; + + rc = dpp_tm_ind_write(dev_id, module_id, addr, wr_data); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_ind_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读tm模块二层间接寄存器(仅crdt/shap模块使用) +* @param module_id 区分TM子模块 +* @param addr 基于子模块的地址 +* +* @return +* @remark 无 +* @see +* @author whuashan @date 2019/02/25 +************************************************************/ +DPP_STATUS dpp_tm_ind_rd_reg(ZXIC_UINT32 dev_id, ZXIC_UINT32 module_id, ZXIC_UINT32 addr) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 rd_data[2] = {0}; + ZXIC_UINT64 tmp_data = 0; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, module_id, MODULE_TM_SHAP, MODULE_TM_CRDT); + + rc = dpp_tm_ind_read(dev_id, module_id, addr, rd_data); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_ind_read"); + + tmp_data = ((tmp_data | rd_data[0]) << 32) | (rd_data[1]); + ZXIC_COMM_PRINT("[0x%08x] 0x%016llx\n", addr, tmp_data); + + return DPP_OK; +} +#endif + +#endif + +#if ZXIC_REAL("TM_CFGMT") + +#if 0 +/***********************************************************/ +/** 校验子系统初始化就绪,所有子系统均初始化就绪,p_rdy值为1 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_rdy 初始化就绪标记,1-就绪,0-未就绪 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/04/09 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_subsystem_rdy_check(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 p_rdy = 0; /*tm子系统是否初始化完成判断*/ + ZXIC_UINT32 read_times = 50; + DPP_ETM_CFGMT_SUBSYSTEM_RDY_REG_T subsystem_rdy = {0}; + + + /* 循环判定TM子系统是否初始化完成 */ + do { + rc = dpp_reg_read(dev_id, + ETM_CFGMT_SUBSYSTEM_RDY_REGr, + 0, + 0, + &subsystem_rdy); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + p_rdy = (subsystem_rdy.olif_rdy) && + (subsystem_rdy.qmu_rdy) && + (subsystem_rdy.tmmu_rdy) && + (subsystem_rdy.cgavd_rdy) && + (subsystem_rdy.shap_rdy) && + (subsystem_rdy.crdt_rdy); + + if (p_rdy) { + rc = dpp_tm_cfgmt_subsystem_rdy_print(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_subsystem_rdy_print"); + break; + } + + read_times--; + zxic_comm_sleep(100); + } while (read_times > 0); + + if (read_times == 0) { + rc = dpp_tm_cfgmt_subsystem_rdy_print(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_subsystem_rdy_print"); + ZXIC_COMM_PRINT("dpp_tm_cfgmt_subsystem_rdy_check:TM INIT FAILED !!\n"); + return DPP_ERR; + } + + return DPP_OK; +} +#endif + +/***********************************************************/ +/** cpu读写通道验证,其读出值等于写入值。读出值不等于写入值时,返回err +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/04/09 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_cpu_check(DPP_DEV_T *dev) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 input = 0x5a5a5a5a; + ZXIC_UINT32 output = 0; + DPP_ETM_CFGMT_CPU_CHECK_REG_T cpu_access_input = { 0 }; + DPP_ETM_CFGMT_CPU_CHECK_REG_T cpu_access_output = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + + cpu_access_input.cpu_check_reg = input; + rc = dpp_reg_write(dev, ETM_CFGMT_CPU_CHECK_REGr, 0, 0, + &cpu_access_input); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + rc = dpp_reg_read(dev, ETM_CFGMT_CPU_CHECK_REGr, 0, 0, + &cpu_access_output); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + output = cpu_access_output.cpu_check_reg; + + /* 判断读出值是否等于写入值 */ + if (input != output) { + ZXIC_COMM_TRACE_ERROR("dpp_tm_cpu_check :input != output"); + return DPP_ERR; + } + + return DPP_OK; +} + +/***********************************************************/ +/** 配置内置TM的工作模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param mode 配置的值,0-TM模式,1-SA模式 +*ETM仅工作在TM模式,FTM可以工作TM或SA模式 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/25 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_sa_work_mode_set(DPP_DEV_T *dev, + DPP_TM_WORK_MODE_E mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CFGMT_TM_SA_WORK_MODE_T tm_sa_mode = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT( + DEV_ID(dev), mode, DPP_TM_WORK_MODE_TM, DPP_TM_WORK_MODE_TM); + + tm_sa_mode.tm_sa_work_mode = mode; + rc = dpp_reg_write(dev, ETM_CFGMT_TM_SA_WORK_MODEr, 0, 0, &tm_sa_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取内置TM的工作模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_mode 读取的值,0-TM模式,1-SA模式 +*ETM仅工作在TM模式,FTM可以工作TM或SA模式 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/25 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_sa_work_mode_get(DPP_DEV_T *dev, + DPP_TM_WORK_MODE_E *p_mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CFGMT_TM_SA_WORK_MODE_T tm_sa_mode = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_mode); + + *p_mode = DPP_TM_WORK_MODE_INVALID; + + rc = dpp_reg_read(dev, ETM_CFGMT_TM_SA_WORK_MODEr, 0, 0, &tm_sa_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_mode = tm_sa_mode.tm_sa_work_mode; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置ddr3挂接组数,共10bit[0-9],每bit对应1组ddr,TM最多使用其中8组 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param ddr_num 单个tm使用的ddr组:bit[0-9]每bit代表一组ddr,如使用4567组,则配置0xf0 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/06/07 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_ddr_attach_set(DPP_DEV_T *dev, ZXIC_UINT32 ddr_num) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CFGMT_CFGMT_DDR_ATTACH_T ddr_attach = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), ddr_num, 0, 0x3FF); + + ddr_attach.cfgmt_ddr_attach = ddr_num; + rc = dpp_reg_write(dev, ETM_CFGMT_CFGMT_DDR_ATTACHr, 0, 0, &ddr_attach); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取ddr3挂接组数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_ddr_num ddr组数,1-6组 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/04/09 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_ddr_attach_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_ddr_num) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CFGMT_CFGMT_DDR_ATTACH_T ddr_attach = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_ddr_num); + + rc = dpp_reg_read(dev, ETM_CFGMT_CFGMT_DDR_ATTACHr, 0, 0, &ddr_attach); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + if (ddr_attach.cfgmt_ddr_attach == 1) { + *p_ddr_num = 1; + } else if (ddr_attach.cfgmt_ddr_attach == 3) { + *p_ddr_num = 2; + } else if (ddr_attach.cfgmt_ddr_attach == 7) { + *p_ddr_num = 3; + } else if (ddr_attach.cfgmt_ddr_attach == 15) { + *p_ddr_num = 4; + } else if (ddr_attach.cfgmt_ddr_attach == 31) { + *p_ddr_num = 5; + } else if (ddr_attach.cfgmt_ddr_attach == 63) { + *p_ddr_num = 6; + } else if (ddr_attach.cfgmt_ddr_attach == 127) { + *p_ddr_num = 7; + } else if (ddr_attach.cfgmt_ddr_attach == 255) { + *p_ddr_num = 8; + } + + return DPP_OK; +} + +/***********************************************************/ +/** 配置QMU工作模式,0:8 block工作模式,1:16 block工作模式 +*** (即一个chunk中block个数) 影响tm总可用的缓存节点数。 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param mode 0-128/256K节点,1-256/512K节点,目前固定配1 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/25 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_qmu_work_mode_set(DPP_DEV_T *dev, + DPP_TM_QMU_WORK_MODE_E mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CFGMT_QMU_WORK_MODE_T qmu_mode = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), mode, + DPP_TM_QMU_WORK_MODE_2M, + DPP_TM_QMU_WORK_MODE_4M); + + qmu_mode.qmu_work_mode = mode; + rc = dpp_reg_write(dev, ETM_CFGMT_QMU_WORK_MODEr, 0, 0, &qmu_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取QMU工作模式,0:8 block工作模式,1:16 block工作模式 +*** (即一个chunk中block个数) 影响tm总可用的缓存节点数。 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param mode 0-128/256K节点,1-256/512K节点,目前固定配1 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/25 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_qmu_work_mode_get(DPP_DEV_T *dev, + DPP_TM_QMU_WORK_MODE_E *p_mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CFGMT_QMU_WORK_MODE_T qmu_mode = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_mode); + + rc = dpp_reg_read(dev, ETM_CFGMT_QMU_WORK_MODEr, 0, 0, &qmu_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_mode = qmu_mode.qmu_work_mode; + + return DPP_OK; +} + +#if 0 +/***********************************************************/ +/** 配置包存储的CRC功能是否使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param en 配置的值,0-禁止CRC功能,1-允许CRC功能 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/25 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_crc_en_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CFGMT_CFGMT_CRC_EN_T crc_en = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, en, 0, 1); + + crc_en.cfgmt_crc_en = en; + rc = dpp_reg_write(dev_id, + ETM_CFGMT_CFGMT_CRC_ENr, + 0, + 0, + &crc_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取包存储的CRC功能是否使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_en 读取的值,0-禁止CRC功能,1-允许CRC功能 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/25 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_crc_en_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CFGMT_CFGMT_CRC_EN_T crc_en = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_en); + + *p_en = 0xffffffff; + rc = dpp_reg_read(dev_id, + ETM_CFGMT_CFGMT_CRC_ENr, + 0, + 0, + &crc_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + *p_en = crc_en.cfgmt_crc_en; + + return DPP_OK; +} +#endif +/***********************************************************/ +/** 配置block长度模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param size block长度模式:256/512/1024 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/04/09 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_blk_size_set(DPP_DEV_T *dev, ZXIC_UINT32 size) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CFGMT_CFGMT_BLKSIZE_T blk_size = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + + switch (size) { + case 256: { + blk_size.cfgmt_blksize = DPP_ETM_BLK_SIZE_256_B; + break; + } + + case 512: { + blk_size.cfgmt_blksize = DPP_ETM_BLK_SIZE_512_B; + break; + } + + case 1024: { + blk_size.cfgmt_blksize = DPP_ETM_BLK_SIZE_1024_B; + break; + } + + default: { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "dpp_tm_cfgmt_blk_size_set:TM set block size error!\n"); + return DPP_ERR; + } + } + + rc = dpp_reg_write(dev, ETM_CFGMT_CFGMT_BLKSIZEr, 0, 0, &blk_size); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取block长度模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_size block长度模式,256/512/1024 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/04/09 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_blk_size_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_size) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CFGMT_CFGMT_BLKSIZE_T blk_size = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_size); + + rc = dpp_reg_read(dev, ETM_CFGMT_CFGMT_BLKSIZEr, 0, 0, &blk_size); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + switch (blk_size.cfgmt_blksize) { + case DPP_ETM_BLK_SIZE_256_B: { + *p_size = 256; + break; + } + + case DPP_ETM_BLK_SIZE_512_B: { + *p_size = 512; + break; + } + + case DPP_ETM_BLK_SIZE_1024_B: { + *p_size = 1024; + break; + } + + default: { + *p_size = 256; + } + } + + return DPP_OK; +} + +#if 0 +/***********************************************************/ +/** 配置计数模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_mode 计数模式 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/04/09 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_cnt_mode_set(ZXIC_UINT32 dev_id, DPP_TM_CNT_MODE_T *p_mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CFGMT_CNT_MODE_REG_T cnt_mode = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_mode); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, p_mode->fc_count_mode, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, p_mode->count_rd_mode, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, p_mode->count_overflow_mode, 0, 1); + + cnt_mode.cfgmt_fc_count_mode = p_mode->fc_count_mode; + cnt_mode.cfgmt_count_rd_mode = p_mode->count_rd_mode; + cnt_mode.cfgmt_count_overflow_mode = p_mode->count_overflow_mode; + rc = dpp_reg_write(dev_id, + ETM_CFGMT_CNT_MODE_REGr, + 0, + 0, + &cnt_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取计数模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_mode 计数模式 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/04/09 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_cnt_mode_get(ZXIC_UINT32 dev_id, DPP_TM_CNT_MODE_T *p_mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CFGMT_CNT_MODE_REG_T cnt_mode = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_mode); + + rc = dpp_reg_read(dev_id, + ETM_CFGMT_CNT_MODE_REGr, + 0, + 0, + &cnt_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + p_mode->fc_count_mode = cnt_mode.cfgmt_fc_count_mode; + p_mode->count_rd_mode = cnt_mode.cfgmt_count_rd_mode; + p_mode->count_overflow_mode = cnt_mode.cfgmt_count_overflow_mode; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置中断屏蔽 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_para 中断屏蔽 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/04/09 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_int_mask_set(ZXIC_UINT32 dev_id, DPP_TM_INT_T *p_para) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CFGMT_REG_INT_MASK_REG_T int_mask = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_para); + + int_mask.shap_int_mask = p_para->shap_int; + int_mask.crdt_int_mask = p_para->crdt_int; + int_mask.tmmu_int_mask = p_para->mmu_int; + int_mask.qmu_int_mask = p_para->qmu_int; + int_mask.cgavd_int_mask = p_para->cgavd_int; + int_mask.olif_int_mask = p_para->olif_int; + int_mask.cfgmt_int_buf_mask = p_para->cfgmt_int; + + rc = dpp_reg_write(dev_id, + ETM_CFGMT_REG_INT_MASK_REGr, + 0, + 0, + &int_mask); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取中断屏蔽 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_para 中断屏蔽 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/04/09 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_int_mask_get(ZXIC_UINT32 dev_id, DPP_TM_INT_T *p_para) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CFGMT_REG_INT_MASK_REG_T int_mask = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_para); + + rc = dpp_reg_read(dev_id, + ETM_CFGMT_REG_INT_MASK_REGr, + 0, + 0, + &int_mask); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + p_para->shap_int = int_mask.shap_int_mask; + p_para->crdt_int = int_mask.crdt_int_mask; + p_para->mmu_int = int_mask.tmmu_int_mask; + p_para->qmu_int = int_mask.qmu_int_mask; + p_para->cgavd_int = int_mask.cgavd_int_mask; + p_para->olif_int = int_mask.olif_int_mask; + p_para->cfgmt_int = int_mask.cfgmt_int_buf_mask; + + return DPP_OK; +} + +/***********************************************************/ +/** 读取中断状态 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_para 中断状态 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/04/09 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_int_state_get(ZXIC_UINT32 dev_id, DPP_TM_INT_T *p_para) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CFGMT_REG_INT_STATE_REG_T int_state = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_para); + + rc = dpp_reg_read(dev_id, + ETM_CFGMT_REG_INT_STATE_REGr, + 0, + 0, + &int_state); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + p_para->shap_int = int_state.shap_int; + p_para->crdt_int = int_state.crdt_int; + p_para->mmu_int = int_state.mmu_int; + p_para->qmu_int = int_state.qmu_int; + p_para->cgavd_int = int_state.cgavd_int; + p_para->olif_int = int_state.olif_int; + p_para->cfgmt_int = int_state.cfgmt_int_buf; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置tm时钟门控是否使能 +* @param dev_id 设备编号 +* @param en 配置的值,0-禁止tm时钟门控,1-使能tm时钟门控 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author lsy @date 2022/08/22 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_clkgate_en_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CFGMT_CLKGATE_EN_T clkgate_en = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, en, 0, 1); + + clkgate_en.clkgate_en = en; + rc = dpp_reg_write(dev_id, + ETM_CFGMT_CLKGATE_ENr, + 0, + 0, + &clkgate_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取tm时钟门控是否使能 +* @param dev_id 设备编号 +* @param en 配置的值,0-禁止tm时钟门控,1-使能tm时钟门控 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author lsy @date 2022/08/22 + +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_clkgate_en_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CFGMT_CLKGATE_EN_T clkgate_en = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_en); + + *p_en = 0xffffffff; + rc = dpp_reg_read(dev_id, + ETM_CFGMT_CLKGATE_ENr, + 0, + 0, + &clkgate_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + *p_en = clkgate_en.clkgate_en; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置tm软复位是否使能 +* @param dev_id 设备编号 +* @param en 配置的值,0-禁止tm软复位,1-使能tm软复位 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author lsy @date 2022/08/22 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_softrst_en_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CFGMT_SOFTRST_EN_T softrst_en = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, en, 0, 1); + + softrst_en.softrst_en = en; + rc = dpp_reg_write(dev_id, + ETM_CFGMT_SOFTRST_ENr, + 0, + 0, + &softrst_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取tm软复位是否使能 +* @param dev_id 设备编号 +* @param en 配置的值,0-禁止tm软复位,1-使能tm软复位 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author lsy @date 2022/08/22 + +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_softrst_en_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CFGMT_SOFTRST_EN_T softrst_en = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_en); + + *p_en = 0xffffffff; + rc = dpp_reg_read(dev_id, + ETM_CFGMT_SOFTRST_ENr, + 0, + 0, + &softrst_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + *p_en = softrst_en.softrst_en; + + return DPP_OK; +} + +#endif +#endif + +#if ZXIC_REAL("TM_CGAVD") + +#if 0 +/***********************************************************/ +/** 配置各级搬移功能使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 要配置的拥塞避免层次号,0:队列级,1:端口级,2:系统级 +* @param en 使能标记,0-不使能,1-使能 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2016/10/18 +************************************************************/ +#ifdef ETM_REAL + +DPP_STATUS dpp_tm_cgavd_move_en_set(ZXIC_UINT32 dev_id, + DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 en) +{ + DPP_STATUS rc = DPP_OK; + + DPP_ETM_CGAVD_CGAVD_CFG_MOVE_T cgavd_cfg_move = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, level, QUEUE_LEVEL, SYS_LEVEL); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, en, 0, 1); + + rc = dpp_reg_read(dev_id, + ETM_CGAVD_CGAVD_CFG_MOVEr, + 0, + 0, + &cgavd_cfg_move); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + if (level == QUEUE_LEVEL) { + cgavd_cfg_move.cfgmt_flow_move_en = en; + } else if (level == PP_LEVEL) { + cgavd_cfg_move.cfgmt_port_move_en = en; + } else if (level == SYS_LEVEL) { + cgavd_cfg_move.cfgmt_sys_move_en = en; + } + + rc = dpp_reg_write(dev_id, + ETM_CGAVD_CGAVD_CFG_MOVEr, + 0, + 0, + &cgavd_cfg_move); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取各级搬移功能使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 要配置的拥塞避免层次号,0:队列级,1:端口级,2:系统级 +* @param p_en 读出的使能标记,0-不使能,1-使能 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2016/10/18 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_move_en_get(ZXIC_UINT32 dev_id, + DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 *p_en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_CGAVD_CFG_MOVE_T cgavd_cfg_move = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, level, QUEUE_LEVEL, SYS_LEVEL); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_en); + + *p_en = 0xffffffff; + + rc = dpp_reg_read(dev_id, + ETM_CGAVD_CGAVD_CFG_MOVEr, + 0, + 0, + &cgavd_cfg_move); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + if (level == QUEUE_LEVEL) { + *p_en = cgavd_cfg_move.cfgmt_flow_move_en; + } else if (level == PP_LEVEL) { + *p_en = cgavd_cfg_move.cfgmt_port_move_en; + } else if (level == SYS_LEVEL) { + *p_en = cgavd_cfg_move.cfgmt_sys_move_en; + } + + return DPP_OK; +} + +/***********************************************************/ +/** 配置各级搬移门限 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 拥塞避免支持层次号,0:队列级,1:端口级,2:系统级 +* @param id 队列号或端口号,系统级时,id参数无效 +* @param value 端口级和系统级时,为搬移门限值,单位为NPPU存包的单位,256B; + 流级时为搬移profile_id,0~15 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2016/10/18 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_move_th_set(ZXIC_UINT32 dev_id, + DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 id, + ZXIC_UINT32 value) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 q_move_profile_reg_index = 0; + ZXIC_UINT32 pp_move_th_reg_index = 0; + ZXIC_UINT32 sys_move_th_reg_index = 0; + + DPP_ETM_CGAVD_MOVE_FLOW_TH_PROFILE_T q_move_profile = {0}; + DPP_ETM_CGAVD_MV_PORT_TH_T pp_move_th = {0}; + DPP_ETM_CGAVD_CFGMT_TOTAL_TH_T sys_move_th = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, level, QUEUE_LEVEL, SYS_LEVEL); + + q_move_profile_reg_index = ETM_CGAVD_MOVE_FLOW_TH_PROFILEr; + pp_move_th_reg_index = ETM_CGAVD_MV_PORT_THr; + sys_move_th_reg_index = ETM_CGAVD_CFGMT_TOTAL_THr; + + if (level == QUEUE_LEVEL) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, value, 0, DPP_TM_CGAVD_MOVE_PROFILE_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, id, 0, DPP_ETM_Q_NUM - 1); + + q_move_profile.move_drop_profile = value; + rc = dpp_reg_write(dev_id, + q_move_profile_reg_index, + 0, + id, + &q_move_profile); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } else if (level == PP_LEVEL) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, id, 0, DPP_TM_PP_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, value, 0, 0x3fff); + + pp_move_th.port_th = value; + rc = dpp_reg_write(dev_id, + pp_move_th_reg_index, + 0, + id, + &pp_move_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } else { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, value, 0, 0x3fff); + + sys_move_th.cfgmt_total_th = value; + rc = dpp_reg_write(dev_id, + sys_move_th_reg_index, + 0, + 0, + &sys_move_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 读取各级搬移门限 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 拥塞避免支持层次号,0:队列级,1:端口级,2:系统级 +* @param id 队列号或端口号,系统级时,id参数无效 +* @param p_value 端口级和系统级时,为搬移门限值,单位为NPPU存包的单位,256B; + 流级时为搬移profile_id,0~15 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2016/10/18 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_move_th_get(ZXIC_UINT32 dev_id, + DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 id, + ZXIC_UINT32 *p_value) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 q_move_profile_reg_index = 0; + ZXIC_UINT32 pp_move_th_reg_index = 0; + ZXIC_UINT32 sys_move_th_reg_index = 0; + + DPP_ETM_CGAVD_MOVE_FLOW_TH_PROFILE_T q_move_profile = {0}; + DPP_ETM_CGAVD_MV_PORT_TH_T pp_move_th = {0}; + DPP_ETM_CGAVD_CFGMT_TOTAL_TH_T sys_move_th = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, level, QUEUE_LEVEL, SYS_LEVEL); + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_value); + + q_move_profile_reg_index = ETM_CGAVD_MOVE_FLOW_TH_PROFILEr; + pp_move_th_reg_index = ETM_CGAVD_MV_PORT_THr; + sys_move_th_reg_index = ETM_CGAVD_CFGMT_TOTAL_THr; + + + if (level == QUEUE_LEVEL) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, id, 0, DPP_ETM_Q_NUM - 1); + + rc = dpp_reg_read(dev_id, + q_move_profile_reg_index, + 0, + id, + &q_move_profile); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + *p_value = q_move_profile.move_drop_profile; + } + + else if (level == PP_LEVEL) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, id, 0, DPP_TM_PP_NUM - 1); + rc = dpp_reg_read(dev_id, + pp_move_th_reg_index, + 0, + id, + &pp_move_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + *p_value = pp_move_th.port_th; + } + + else { + rc = dpp_reg_read(dev_id, + sys_move_th_reg_index, + 0, + 0, + &sys_move_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + *p_value = sys_move_th.cfgmt_total_th; + } + + return DPP_OK; +} + +/***********************************************************/ +/** 配置flow级的搬移策略 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param move_profile flow级的搬移门限分组索引,0~15 +* @param th flow级的搬移门限,单位为KB; +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2016/10/18 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_flow_move_profile_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 move_profile, + ZXIC_UINT32 th) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 flow_move_th_reg_index = 0; + ZXIC_UINT32 blk_size = 0; + ZXIC_UINT32 move_th = 0; + ZXIC_UINT32 cgavd_cfg_mode = 0; + DPP_ETM_CGAVD_MOVE_FLOW_TH_T flow_th = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, move_profile, 0, DPP_TM_CGAVD_MOVE_PROFILE_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, th, DPP_TM_CGAVD_KILO_UL); + move_th = th * DPP_TM_CGAVD_KILO_UL; + + flow_move_th_reg_index = ETM_CGAVD_MOVE_FLOW_THr; + + rc = dpp_tm_cgavd_cfg_mode_get(dev_id, &cgavd_cfg_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_cfg_mode_get"); + + if (cgavd_cfg_mode == DPP_TM_CGAVD_BLOCK_MODE) { + rc = dpp_tm_cfgmt_blk_size_get(dev_id, &blk_size); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_blk_size_get"); + + if (blk_size != 0) { + move_th = (move_th / blk_size); + move_th = (move_th % blk_size == 0) ? (move_th) : ((move_th) + 1); + } + } + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, move_th, 0, 0x1fffffff); + + flow_th.move_drop_flow_th = move_th; + rc = dpp_reg_write(dev_id, + flow_move_th_reg_index, + 0, + move_profile, + &flow_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取flow级的搬移策略 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param move_profile flow级的搬移门限分组索引,0~15 +* @param p_th flow级的搬移门限,单位为KB; +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2016/10/18 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_flow_move_profile_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 move_profile, + ZXIC_UINT32 *p_th) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 flow_move_th_reg_index = 0; + ZXIC_UINT32 blk_size = 0; + ZXIC_UINT32 move_th = 0; + ZXIC_UINT32 cgavd_cfg_mode = 0; + + DPP_ETM_CGAVD_MOVE_FLOW_TH_T flow_th = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, move_profile, 0, DPP_TM_CGAVD_MOVE_PROFILE_NUM - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_th); + + flow_move_th_reg_index = ETM_CGAVD_MOVE_FLOW_THr; + + rc = dpp_reg_read(dev_id, + flow_move_th_reg_index, + 0, + move_profile, + &flow_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + move_th = flow_th.move_drop_flow_th; + + rc = dpp_tm_cgavd_cfg_mode_get(dev_id, &cgavd_cfg_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_cfg_mode_get"); + + if (cgavd_cfg_mode == DPP_TM_CGAVD_BLOCK_MODE) { + rc = dpp_tm_cfgmt_blk_size_get(dev_id, &blk_size); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_blk_size_get"); + + *p_th = (move_th *blk_size) / DPP_TM_CGAVD_KILO_UL; + } else if (cgavd_cfg_mode == DPP_TM_CGAVD_ZXIC_UINT8_MODE) { + *p_th = (move_th / DPP_TM_CGAVD_KILO_UL); + } else { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "dpp_tm_cgavd_flow_move_profile_get:cgavd_cfg_mode is err!!\n"); + return DPP_ERR; + } + + return DPP_OK; +} +#endif +/***********************************************************/ +/** 配置端口共享的搬移门限 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param th 端口共享的搬移门限,单位为NPPU存包的单位,256B; +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2016/10/18 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_port_share_th_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 th) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 port_share_th_reg_index = 0; + + DPP_ETM_CGAVD_CFGMT_PORT_SHARE_TH_T port_share_th = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, th, 0, 0x3fff); + + port_share_th_reg_index = ETM_CGAVD_CFGMT_PORT_SHARE_THr; + + port_share_th.cfgmt_port_share_th = th; + + rc = dpp_reg_write(dev_id, + port_share_th_reg_index, + 0, + 0, + &port_share_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取端口共享的搬移门限 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_th 端口共享的搬移门限,单位为NPPU存包的单位,256B; +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2016/10/18 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_port_share_th_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_th) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 port_share_th_reg_index = 0; + + DPP_ETM_CGAVD_CFGMT_PORT_SHARE_TH_T port_share_th = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_th); + + port_share_th_reg_index = ETM_CGAVD_CFGMT_PORT_SHARE_THr; + + rc = dpp_reg_read(dev_id, + port_share_th_reg_index, + 0, + 0, + &port_share_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + *p_th = port_share_th.cfgmt_port_share_th; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置基于优先级的QMU接收NPPU数据的fifo阈值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param sp 优先级0~7 +* @param th 指定优先级的fifo阈值0~511,单位为fifo条目,fifo深度512 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2016/10/18 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_move_drop_sp_th_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 sp, + ZXIC_UINT32 th) +{ + DPP_STATUS rc = DPP_OK; + + DPP_ETM_CGAVD_MV_DROP_SP_TH_T sp_th = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, th, 0, 0x1ff); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, sp, 0, 7); + + sp_th.mvdrop_sp_th = th; + + rc = dpp_reg_write(dev_id, + ETM_CGAVD_MV_DROP_SP_THr, + 0, + sp, + &sp_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取基于优先级的QMU接收NPPU数据的fifo阈值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param sp 优先级0~7 +* @param p_th 指定优先级的fifo阈值0~511,单位为fifo条目,fifo深度512 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2016/10/18 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_move_drop_sp_th_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 sp, + ZXIC_UINT32 *p_th) +{ + DPP_STATUS rc = DPP_OK; + + DPP_ETM_CGAVD_MV_DROP_SP_TH_T sp_th = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, sp, 0, 7); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_th); + + rc = dpp_reg_read(dev_id, + ETM_CGAVD_MV_DROP_SP_THr, + 0, + sp, + &sp_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + *p_th = sp_th.mvdrop_sp_th; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置强制片内或片外 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param en 1:使能 +* @param mode 1 :omem 强制片外 0:imem 强制片内 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2017/10/14 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_imem_omem_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 en, + ZXIC_UINT32 mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_CGAVD_FORCE_IMEM_OMEM_T cgavd_imem_omem_mode = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, en, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, mode, 0, 1); + + rc = dpp_reg_read(dev_id, + ETM_CGAVD_CGAVD_FORCE_IMEM_OMEMr, + 0, + 0, + &cgavd_imem_omem_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + cgavd_imem_omem_mode.choose_imem_omem = mode; + cgavd_imem_omem_mode.imem_omem_force_en = en; + + rc = dpp_reg_write(dev_id, + ETM_CGAVD_CGAVD_FORCE_IMEM_OMEMr, + 0, + 0, + &cgavd_imem_omem_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取强制片内或片外 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param en 1:使能 +* @param mode 1 :omem 强制片外 0:imem 强制片内 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2017/10/14 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_imem_omem_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_en, + ZXIC_UINT32 *p_mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_CGAVD_FORCE_IMEM_OMEM_T cgavd_imem_omem_mode = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_mode); + + rc = dpp_reg_read(dev_id, + ETM_CGAVD_CGAVD_FORCE_IMEM_OMEMr, + 0, + 0, + &cgavd_imem_omem_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + *p_mode = cgavd_imem_omem_mode.choose_imem_omem; + *p_en = cgavd_imem_omem_mode.imem_omem_force_en; + + return DPP_OK; +} + +#endif + +/***********************************************************/ +/** 配置配置cgavd模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param mode 0:block mode 1:byte mode +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/07/28 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_cfg_mode_set(DPP_DEV_T *dev, ZXIC_UINT32 mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_CFGMT_BYTE_MODE_T cgavd_cfg_mode = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), mode, 0, 1); + + rc = dpp_reg_read(dev, ETM_CGAVD_CFGMT_BYTE_MODEr, 0, 0, + &cgavd_cfg_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + cgavd_cfg_mode.cfgmt_byte_mode = mode; + + rc = dpp_reg_write(dev, ETM_CGAVD_CFGMT_BYTE_MODEr, 0, 0, + &cgavd_cfg_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取cgavd模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_mode 0:block mode 1:byte mode +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/07/29 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_cfg_mode_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_CFGMT_BYTE_MODE_T cgavd_cfg_mode = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_mode); + + rc = dpp_reg_read(dev, ETM_CGAVD_CFGMT_BYTE_MODEr, 0, 0, + &cgavd_cfg_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_mode = cgavd_cfg_mode.cfgmt_byte_mode; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置各级拥塞避免功能使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 要配置的拥塞避免层次号,0:队列级,1:端口级,2:系统级 +* @param en 使能标记,0-不使能,1-使能 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/28 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_en_set(DPP_DEV_T *dev, DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 en) +{ + DPP_STATUS rc = DPP_OK; + + DPP_ETM_CGAVD_CGAVD_SUB_EN_T cgavd_sub_en = { 0 }; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_POINT(dev); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), level, QUEUE_LEVEL, + SA_LEVEL); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), en, 0, 1); + + rc = dpp_reg_read(dev, ETM_CGAVD_CGAVD_SUB_ENr, 0, 0, &cgavd_sub_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + if (level == QUEUE_LEVEL) { + cgavd_sub_en.cgavd_flow_sub_en = en; + } + + else if (level == PP_LEVEL) { + cgavd_sub_en.cgavd_pp_sub_en = en; + } + + else if (level == SYS_LEVEL) { + cgavd_sub_en.cgavd_sys_sub_en = en; + } + + rc = dpp_reg_write(dev, ETM_CGAVD_CGAVD_SUB_ENr, 0, 0, &cgavd_sub_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取各级拥塞避免功能使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 要读取的拥塞避免层次号,0:队列级,1:端口级,2:系统级 +* @param p_en 读出的使能标记,0-不使能,1-使能 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/28 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_en_get(DPP_DEV_T *dev, DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 *p_en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_CGAVD_SUB_EN_T cgavd_sub_en = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), level, QUEUE_LEVEL, + SA_LEVEL); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_en); + + *p_en = 0xffffffff; + + rc = dpp_reg_read(dev, ETM_CGAVD_CGAVD_SUB_ENr, 0, 0, &cgavd_sub_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + if (level == QUEUE_LEVEL) { + *p_en = cgavd_sub_en.cgavd_flow_sub_en; + } + + else if (level == PP_LEVEL) { + *p_en = cgavd_sub_en.cgavd_pp_sub_en; + } + + else if (level == SYS_LEVEL) { + *p_en = cgavd_sub_en.cgavd_sys_sub_en; + } + + else if (level == SA_LEVEL) { + *p_en = cgavd_sub_en.cgavd_sa_sub_en; + } + + return DPP_OK; +} + +/***********************************************************/ +/** dp选取来源 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 要配置的拥塞避免层次号,0:队列级,1:端口级,2:系统级 +* @param dp_sel dp选取来源,0-dp,1-tc,2-pkt_len[2:0] +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2017/03/14 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_dp_sel_set(DPP_DEV_T *dev, DPP_TM_CGAVD_LEVEL_E level, + DPP_TM_CGAVD_DP_SEL_E dp_sel) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_CGAVD_DP_SEL_T cgavd_dp_sel = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), level, QUEUE_LEVEL, + SYS_LEVEL); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), dp_sel, DP_SEL_DP, + DP_SEL_PKT_LEN); + + rc = dpp_reg_read(dev, ETM_CGAVD_CGAVD_DP_SELr, 0, 0, &cgavd_dp_sel); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + if (level == QUEUE_LEVEL) { + if (dp_sel == DP_SEL_DP) { + cgavd_dp_sel.flow_dp_sel_high = 0; + cgavd_dp_sel.flow_dp_sel_mid = 0; + cgavd_dp_sel.flow_dp_sel_low = 1; + } else if (dp_sel == DP_SEL_TC) { + cgavd_dp_sel.flow_dp_sel_high = 0; + cgavd_dp_sel.flow_dp_sel_mid = 1; + cgavd_dp_sel.flow_dp_sel_low = 0; + } else if (dp_sel == DP_SEL_PKT_LEN) { + cgavd_dp_sel.flow_dp_sel_high = 1; + cgavd_dp_sel.flow_dp_sel_mid = 0; + cgavd_dp_sel.flow_dp_sel_low = 0; + } + } + + else if (level == PP_LEVEL) { + if (dp_sel == DP_SEL_DP) { + cgavd_dp_sel.pp_dp_sel_high = 0; + cgavd_dp_sel.pp_dp_sel_mid = 0; + cgavd_dp_sel.pp_dp_sel_low = 1; + } else if (dp_sel == DP_SEL_TC) { + cgavd_dp_sel.pp_dp_sel_high = 0; + cgavd_dp_sel.pp_dp_sel_mid = 1; + cgavd_dp_sel.pp_dp_sel_low = 0; + } else if (dp_sel == DP_SEL_PKT_LEN) { + cgavd_dp_sel.pp_dp_sel_high = 1; + cgavd_dp_sel.pp_dp_sel_mid = 0; + cgavd_dp_sel.pp_dp_sel_low = 0; + } + } + + else if (level == SYS_LEVEL) { + if (dp_sel == DP_SEL_DP) { + cgavd_dp_sel.sys_dp_sel_high = 0; + cgavd_dp_sel.sys_dp_sel_mid = 0; + cgavd_dp_sel.sys_dp_sel_low = 1; + } else if (dp_sel == DP_SEL_TC) { + cgavd_dp_sel.sys_dp_sel_high = 0; + cgavd_dp_sel.sys_dp_sel_mid = 1; + cgavd_dp_sel.sys_dp_sel_low = 0; + } else if (dp_sel == DP_SEL_PKT_LEN) { + cgavd_dp_sel.sys_dp_sel_high = 1; + cgavd_dp_sel.sys_dp_sel_mid = 0; + cgavd_dp_sel.sys_dp_sel_low = 0; + } + } + + rc = dpp_reg_write(dev, ETM_CGAVD_CGAVD_DP_SELr, 0, 0, &cgavd_dp_sel); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置拥塞避免算法 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 拥塞避免支持层次号,0:队列级,1:端口级,2:系统级 +* @param id 队列号或端口号,系统级时,id参数无效 +* @param method 配置的拥塞避免算法,0:TD,1:WRED/GRED +* 配置TD算法时,先配TD阈值,再配置TD算法 +* 配置WRED算法时,先配置流级或端口级的平均队列深度,再配置WRED算法 +* 配置GRED算法时,先配置系统级的平均队列深度,在配置成GRED算法 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark +* @see +* @author taq @date 2015/04/14 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_method_set(DPP_DEV_T *dev, DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 id, DPP_TM_CGAVD_METHOD_E method) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 q_avg_q_len_reg_index = 0; + ZXIC_UINT32 q_td_th_reg_index = 0; + ZXIC_UINT32 q_ca_mtd_reg_index = 0; + ZXIC_UINT32 pp_avg_q_len_reg_index = 0; + ZXIC_UINT32 pp_td_th_reg_index = 0; + ZXIC_UINT32 pp_ca_mtd_reg_index = 0; + ZXIC_UINT32 sys_avg_q_len_reg_index = 0; + ZXIC_UINT32 sys_td_th_reg_index = 0; + ZXIC_UINT32 sys_ca_mtd_reg_index = 0; + + DPP_ETM_CGAVD_FLOW_CA_MTD_T q_cgavd_method = { 0 }; + DPP_ETM_CGAVD_PP_CA_MTD_T pp_cgavd_method = { 0 }; + DPP_ETM_CGAVD_SYS_CGAVD_METD_T sys_cgavd_method = { 0 }; + DPP_ETM_CGAVD_FLOW_AVG_Q_LEN_T flow_avg_q_len = { 0 }; + DPP_ETM_CGAVD_PP_AVG_Q_LEN_T pp_avg_q_len = { 0 }; + DPP_ETM_CGAVD_SYS_AVG_Q_LEN_T sys_avg_q_len = { 0 }; + DPP_ETM_CGAVD_FLOW_TD_TH_T q_td_th = { 0 }; + DPP_ETM_CGAVD_PP_TD_TH_T pp_td_th = { 0 }; + DPP_ETM_CGAVD_SYS_TD_TH_T sys_td_th = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), level, QUEUE_LEVEL, + SYS_LEVEL); + + q_avg_q_len_reg_index = ETM_CGAVD_FLOW_AVG_Q_LENr; + q_td_th_reg_index = ETM_CGAVD_FLOW_TD_THr; + q_ca_mtd_reg_index = ETM_CGAVD_FLOW_CA_MTDr; + pp_avg_q_len_reg_index = ETM_CGAVD_PP_AVG_Q_LENr; + pp_td_th_reg_index = ETM_CGAVD_PP_TD_THr; + pp_ca_mtd_reg_index = ETM_CGAVD_PP_CA_MTDr; + sys_avg_q_len_reg_index = ETM_CGAVD_SYS_AVG_Q_LENr; + sys_td_th_reg_index = ETM_CGAVD_SYS_TD_THr; + sys_ca_mtd_reg_index = ETM_CGAVD_SYS_CGAVD_METDr; + + switch (level) { + case (QUEUE_LEVEL): { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), id, 0, + DPP_ETM_Q_NUM - 1); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT( + DEV_ID(dev), method, TD_METHOD, WRED_GRED_METHOD); + + if (1 == + method) { /* WRED算法,需要先配置平均队列深度,在选择wred算法 */ + rc = dpp_reg_read(dev, q_avg_q_len_reg_index, 0, id, + &flow_avg_q_len); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + + rc = dpp_reg_write(dev, q_avg_q_len_reg_index, 0, id, + &flow_avg_q_len); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + } + + if (0 == + method) { /* TD算法,需要先配置尾部丢弃阈值,在选择TD算法 */ + rc = dpp_reg_read(dev, q_td_th_reg_index, 0, id, + &q_td_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + + rc = dpp_reg_write(dev, q_td_th_reg_index, 0, id, + &q_td_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + } + + /* 寄存器写入值,0-TD,1-WRED */ + q_cgavd_method.flow_ca_mtd = method; + rc = dpp_reg_write(dev, q_ca_mtd_reg_index, 0, id, + &q_cgavd_method); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + + break; + } + + case (PP_LEVEL): { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), id, 0, + DPP_TM_PP_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT( + DEV_ID(dev), method, TD_METHOD, WRED_GRED_METHOD); + + if (1 == + method) { /* WRED算法,需要先配置平均队列深度,在选择wred算法 */ + rc = dpp_reg_read(dev, pp_avg_q_len_reg_index, 0, id, + &pp_avg_q_len); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + + rc = dpp_reg_write(dev, pp_avg_q_len_reg_index, 0, id, + &pp_avg_q_len); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + } + + if (method == 0) { /* TD算法,尾部丢弃阈值,在选择TD算法 */ + rc = dpp_reg_read(dev, pp_td_th_reg_index, 0, id, + &pp_td_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + rc = dpp_reg_write(dev, pp_td_th_reg_index, 0, id, + &pp_td_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + } + + /* 寄存器写入值,0-TD,1-WRED */ + pp_cgavd_method.pp_ca_mtd = method; + rc = dpp_reg_write(dev, pp_ca_mtd_reg_index, 0, id, + &pp_cgavd_method); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + + break; + } + + case (SYS_LEVEL): { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT( + DEV_ID(dev), method, TD_METHOD, WRED_GRED_METHOD); + + if (1 == + method) { /* GRED算法,需要先配置平均队列深度,在选择gred算法 */ + rc = dpp_reg_read(dev, sys_avg_q_len_reg_index, 0, 0, + &sys_avg_q_len); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + + rc = dpp_reg_write(dev, sys_avg_q_len_reg_index, 0, 0, + &sys_avg_q_len); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + } + + if (method == 0) { /* TD算法,尾部丢弃阈值,在选择TD算法 */ + rc = dpp_reg_read(dev, sys_td_th_reg_index, 0, 0, + &sys_td_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + + rc = dpp_reg_write(dev, sys_td_th_reg_index, 0, 0, + &sys_td_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + } + + /* 寄存器写入值,0-TD,1-GRED */ + sys_cgavd_method.sys_cgavd_metd = method; + rc = dpp_reg_write(dev, sys_ca_mtd_reg_index, 0, 0, + &sys_cgavd_method); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + + break; + } + + default: { + ZXIC_COMM_TRACE_DEV_ERROR(DEV_ID(dev), "method=%u error!\n", + (method)); + return DPP_ERR; + } + } + + return DPP_OK; +} + +/***********************************************************/ +/** 读取拥塞避免算法 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 拥塞避免支持层次号,0:队列级,1:端口级,2:系统级 +* @param id 队列号或端口号,系统级时,id参数无效 +* @param p_method 配置的拥塞避免算法,0:TD,1:WRED/GRED +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/17 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_method_get(DPP_DEV_T *dev, DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 id, + DPP_TM_CGAVD_METHOD_E *p_method) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 q_ca_mtd_reg_index = 0; + ZXIC_UINT32 pp_ca_mtd_reg_index = 0; + ZXIC_UINT32 sys_ca_mtd_reg_index = 0; + + DPP_ETM_CGAVD_FLOW_CA_MTD_T q_cgavd_method = { 0 }; + DPP_ETM_CGAVD_PP_CA_MTD_T pp_cgavd_method = { 0 }; + DPP_ETM_CGAVD_SYS_CGAVD_METD_T sys_cgavd_method = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), level, QUEUE_LEVEL, + SYS_LEVEL); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_method); + + *p_method = INVALID_METHOD; + + q_ca_mtd_reg_index = ETM_CGAVD_FLOW_CA_MTDr; + pp_ca_mtd_reg_index = ETM_CGAVD_PP_CA_MTDr; + sys_ca_mtd_reg_index = ETM_CGAVD_SYS_CGAVD_METDr; + + if (level == QUEUE_LEVEL) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), id, 0, + DPP_ETM_Q_NUM - 1); + + rc = dpp_reg_read(dev, q_ca_mtd_reg_index, 0, id, + &q_cgavd_method); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + *p_method = q_cgavd_method.flow_ca_mtd; + } + + else if (level == PP_LEVEL) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), id, 0, + DPP_TM_PP_NUM - 1); + + rc = dpp_reg_read(dev, pp_ca_mtd_reg_index, 0, id, + &pp_cgavd_method); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + *p_method = pp_cgavd_method.pp_ca_mtd; + } + + else { + rc = dpp_reg_read(dev, sys_ca_mtd_reg_index, 0, 0, + &sys_cgavd_method); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + + *p_method = sys_cgavd_method.sys_cgavd_metd; + } + + return DPP_OK; +} + +/***********************************************************/ +/** CPU设置的各级队列深度配置 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 拥塞避免支持层次号,0:队列级,1:端口级,2:系统级 +* @param q_len_use_cpu_set_en 0:选取RAM中读出的队列深度; +* @param 1:选取q_len_cpu_set值 +* @param q_len_cpu_set CPU设置的各级队列深度,单位为block。 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/06/19 +************************************************************/ +DPP_STATUS dpp_tm_q_len_use_cpu_set(DPP_DEV_T *dev, DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 q_len_use_cpu_set_en, + ZXIC_UINT32 q_len_cpu_set) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 rd_cpu_or_ram_reg_index = 0; + ZXIC_UINT32 q_cpu_set_q_len_reg_index = 0; + ZXIC_UINT32 pp_cpu_set_q_len_reg_index = 0; + ZXIC_UINT32 sys_cpu_set_q_len_reg_index = 0; + + DPP_ETM_CGAVD_RD_CPU_OR_RAM_T len_use_cpu_set_en = { 0 }; + DPP_ETM_CGAVD_FLOW_CPU_SET_Q_LEN_T flow_q_len_cpu_set = { 0 }; + DPP_ETM_CGAVD_PP_CPU_SET_Q_LEN_T pp_q_len_cpu_set = { 0 }; + DPP_ETM_CGAVD_SYS_CPU_SET_Q_LEN_T sys_q_len_cpu_set = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), level, QUEUE_LEVEL, + SYS_LEVEL); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), q_len_use_cpu_set_en, + 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), q_len_cpu_set, 0, + 0x1ffffff); + + rd_cpu_or_ram_reg_index = ETM_CGAVD_RD_CPU_OR_RAMr; + q_cpu_set_q_len_reg_index = ETM_CGAVD_FLOW_CPU_SET_Q_LENr; + pp_cpu_set_q_len_reg_index = ETM_CGAVD_PP_CPU_SET_Q_LENr; + sys_cpu_set_q_len_reg_index = ETM_CGAVD_SYS_CPU_SET_Q_LENr; + + rc = dpp_reg_read(dev, rd_cpu_or_ram_reg_index, 0, 0, + &len_use_cpu_set_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + switch (level) { + case (QUEUE_LEVEL): { + len_use_cpu_set_en.cpu_sel_flow_q_len_en = q_len_use_cpu_set_en; + rc = dpp_reg_write(dev, rd_cpu_or_ram_reg_index, 0, 0, + &len_use_cpu_set_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + + if (q_len_use_cpu_set_en == 1) { + flow_q_len_cpu_set.flow_cpu_set_q_len = q_len_cpu_set; + rc = dpp_reg_write(dev, q_cpu_set_q_len_reg_index, 0, 0, + &flow_q_len_cpu_set); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + } + + break; + } + + case (PP_LEVEL): { + len_use_cpu_set_en.cpu_sel_pp_q_len_en = q_len_use_cpu_set_en; + rc = dpp_reg_write(dev, rd_cpu_or_ram_reg_index, 0, 0, + &len_use_cpu_set_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + + if (q_len_use_cpu_set_en == 1) { + pp_q_len_cpu_set.pp_cpu_set_q_len = q_len_cpu_set; + rc = dpp_reg_write(dev, pp_cpu_set_q_len_reg_index, 0, + 0, &pp_q_len_cpu_set); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + } + + break; + } + + case (SYS_LEVEL): { + len_use_cpu_set_en.cpu_sel_sys_q_len_en = q_len_use_cpu_set_en; + rc = dpp_reg_write(dev, rd_cpu_or_ram_reg_index, 0, 0, + &len_use_cpu_set_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + + if (q_len_use_cpu_set_en == 1) { + sys_q_len_cpu_set.sys_cpu_set_q_len = q_len_cpu_set; + rc = dpp_reg_write(dev, sys_cpu_set_q_len_reg_index, 0, + 0, &sys_q_len_cpu_set); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + } + + break; + } + + default: { + ZXIC_COMM_TRACE_DEV_ERROR(DEV_ID(dev), "level=%u error!\n", + level); + return DPP_ERR; + } + } + + return DPP_OK; +} + +/***********************************************************/ +/** CPU设置的各级平均队列深度配置 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 拥塞避免支持层次号,0:队列级,1:端口级,2:系统级 +* @param q_avg_len_use_cpu_set_en 0:选取RAM中读出的队列深度; +* @param 1:选取q_avg_len_cpu_set值 +* @param q_avg_len_cpu_set CPU设置的各级平均队列深度。 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/06/19 +************************************************************/ +DPP_STATUS dpp_tm_q_avg_len_use_cpu_set(DPP_DEV_T *dev, + DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 q_avg_len_use_cpu_set_en, + ZXIC_UINT32 q_avg_len_cpu_set) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 rd_cpu_or_ram_reg_index = 0; + ZXIC_UINT32 q_cpu_set_avg_len_reg_index = 0; + ZXIC_UINT32 pp_cpu_set_avg_len_reg_index = 0; + ZXIC_UINT32 sys_cpu_set_avg_len_reg_index = 0; + + DPP_ETM_CGAVD_RD_CPU_OR_RAM_T avg_len_use_cpu_set_en = { 0 }; + DPP_ETM_CGAVD_FLOW_CPU_SET_AVG_LEN_T flow_q_avg_len_cpu_set = { 0 }; + DPP_ETM_CGAVD_PP_CPU_SET_AVG_Q_LEN_T pp_q_avg_len_cpu_set = { 0 }; + DPP_ETM_CGAVD_SYS_CPU_SET_AVG_LEN_T sys_q_avg_len_cpu_set = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), level, QUEUE_LEVEL, + SYS_LEVEL); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), + q_avg_len_use_cpu_set_en, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), q_avg_len_cpu_set, 0, + 0x1ffffff); + + rd_cpu_or_ram_reg_index = ETM_CGAVD_RD_CPU_OR_RAMr; + q_cpu_set_avg_len_reg_index = ETM_CGAVD_FLOW_CPU_SET_AVG_LENr; + pp_cpu_set_avg_len_reg_index = ETM_CGAVD_PP_CPU_SET_AVG_Q_LENr; + sys_cpu_set_avg_len_reg_index = ETM_CGAVD_SYS_CPU_SET_AVG_LENr; + + rc = dpp_reg_read(dev, rd_cpu_or_ram_reg_index, 0, 0, + &avg_len_use_cpu_set_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + switch (level) { + case (QUEUE_LEVEL): { + avg_len_use_cpu_set_en.cpu_sel_flow_avg_q_len_en = + q_avg_len_use_cpu_set_en; + rc = dpp_reg_write(dev, rd_cpu_or_ram_reg_index, 0, 0, + &avg_len_use_cpu_set_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + + if (q_avg_len_use_cpu_set_en == 1) { + flow_q_avg_len_cpu_set.flow_cpu_set_avg_len = + q_avg_len_cpu_set; + rc = dpp_reg_write(dev, q_cpu_set_avg_len_reg_index, 0, + 0, &flow_q_avg_len_cpu_set); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + } + + break; + } + + case (PP_LEVEL): { + avg_len_use_cpu_set_en.cpu_sel_pp_avg_q_len_en = + q_avg_len_use_cpu_set_en; + rc = dpp_reg_write(dev, rd_cpu_or_ram_reg_index, 0, 0, + &avg_len_use_cpu_set_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + + if (q_avg_len_use_cpu_set_en == 1) { + pp_q_avg_len_cpu_set.pp_cpu_set_avg_q_len = + q_avg_len_cpu_set; + rc = dpp_reg_write(dev, pp_cpu_set_avg_len_reg_index, 0, + 0, &pp_q_avg_len_cpu_set); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + } + + break; + } + + case (SYS_LEVEL): { + avg_len_use_cpu_set_en.cpu_sel_sys_avg_q_len_en = + q_avg_len_use_cpu_set_en; + rc = dpp_reg_write(dev, rd_cpu_or_ram_reg_index, 0, 0, + &avg_len_use_cpu_set_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + + if (q_avg_len_use_cpu_set_en == 1) { + sys_q_avg_len_cpu_set.sys_cpu_set_avg_len = + q_avg_len_cpu_set; + rc = dpp_reg_write(dev, sys_cpu_set_avg_len_reg_index, + 0, 0, &sys_q_avg_len_cpu_set); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + } + + break; + } + + default: { + ZXIC_COMM_TRACE_DEV_ERROR(DEV_ID(dev), "level=%u error!\n", + level); + return DPP_ERR; + } + } + + return DPP_OK; +} + +/***********************************************************/ +/** 流队列级队列深度的获取 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param que_id 队列号 +* p_len 队列深度以KB为单位 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/06/19 +************************************************************/ +DPP_STATUS dpp_tm_flow_que_len_get(DPP_DEV_T *dev, ZXIC_UINT32 que_id, + ZXIC_UINT32 *p_len) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 blk_size = 0; + ZXIC_UINT32 cgavd_cfg_mode = 0; + + DPP_ETM_CGAVD_FLOW_Q_LEN_T dpp_tm_flow_len = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_len); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), que_id, 0, + DPP_ETM_Q_NUM - 1); + + rc = dpp_reg_read(dev, ETM_CGAVD_FLOW_Q_LENr, 0, que_id, + &dpp_tm_flow_len); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + rc = dpp_tm_cgavd_cfg_mode_get(dev, &cgavd_cfg_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_cgavd_cfg_mode_get"); + + if (cgavd_cfg_mode == DPP_TM_CGAVD_BLOCK_MODE) { + rc = dpp_tm_cfgmt_blk_size_get(dev, &blk_size); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_cfgmt_blk_size_get"); + + *p_len = ((dpp_tm_flow_len.flow_q_len * blk_size) / + DPP_TM_CGAVD_KILO_UL); + } else if (cgavd_cfg_mode == DPP_TM_CGAVD_ZXIC_UINT8_MODE) { + *p_len = ((dpp_tm_flow_len.flow_q_len) / DPP_TM_CGAVD_KILO_UL); + } else { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "dpp_tm_flow_que_len_get:cgavd_cfg_mode is err!!\n"); + return DPP_ERR; + } + + return DPP_OK; +} + +#if 0 +/***********************************************************/ +/** 端口级队列深度的获取 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param pp_id 队列号 +* pp_len 队列深度以KB为单位 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/06/19 +************************************************************/ +DPP_STATUS dpp_tm_port_que_len_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 pp_id, + ZXIC_UINT32 *pp_len) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 blk_size = 0; + DPP_ETM_CGAVD_PP_Q_LEN_T dpp_tm_pp_len = {0}; + ZXIC_UINT32 cgavd_cfg_mode = 0; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, pp_len); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, pp_id, 0, DPP_TM_PP_NUM - 1); + + rc = dpp_reg_read(dev_id, + ETM_CGAVD_FLOW_Q_LENr, + 0, + pp_id, + &dpp_tm_pp_len); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_tm_cgavd_cfg_mode_get(dev_id, &cgavd_cfg_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_cfg_mode_get"); + + if (cgavd_cfg_mode == DPP_TM_CGAVD_BLOCK_MODE) { + rc = dpp_tm_cfgmt_blk_size_get(dev_id, &blk_size); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_blk_size_get"); + + *pp_len = ((dpp_tm_pp_len.pp_q_len * blk_size) / DPP_TM_CGAVD_KILO_UL); + } else if (cgavd_cfg_mode == DPP_TM_CGAVD_ZXIC_UINT8_MODE) { + *pp_len = ((dpp_tm_pp_len.pp_q_len) / DPP_TM_CGAVD_KILO_UL); + } else { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "dpp_tm_port_que_len_get err!!\n"); + return DPP_ERR; + } + + return DPP_OK; +} + +/***********************************************************/ +/** 系统级队列深度的获取 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param sys_len 系统级深度以block为单位 +* sys_protocol_len 系统级包含协议队列深度以KB为单位 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/06/19 +************************************************************/ +DPP_STATUS dpp_tm_sys_que_len_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *sys_len, + ZXIC_UINT32 *sys_protocol_len) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 blk_size = 0; + ZXIC_UINT32 sys_q_len_reg_index = 0; + ZXIC_UINT32 sys_q_len_l_reg_index = 0; + DPP_ETM_CGAVD_SYS_Q_LEN_T dpp_tm_sys_len = {0}; + DPP_ETM_CGAVD_CGAVD_SYS_Q_LEN_L_T dpp_tm_sys_protocol_len = {0}; + ZXIC_UINT32 cgavd_cfg_mode = 0; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, sys_protocol_len); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, sys_len); + + sys_q_len_reg_index = ETM_CGAVD_SYS_Q_LENr; + sys_q_len_l_reg_index = ETM_CGAVD_CGAVD_SYS_Q_LEN_Lr; + + rc = dpp_reg_read(dev_id, + sys_q_len_reg_index, + 0, + 0, + &dpp_tm_sys_len); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + sys_q_len_l_reg_index, + 0, + 0, + &dpp_tm_sys_protocol_len); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_tm_cgavd_cfg_mode_get(dev_id, &cgavd_cfg_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_cfg_mode_get"); + + if (cgavd_cfg_mode == DPP_TM_CGAVD_BLOCK_MODE) { + rc = dpp_tm_cfgmt_blk_size_get(dev_id, &blk_size); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_blk_size_get"); + + *sys_len = ((dpp_tm_sys_len.sys_q_len * blk_size) / DPP_TM_CGAVD_KILO_UL); + *sys_protocol_len = ((dpp_tm_sys_protocol_len.cgavd_sys_q_len_l * blk_size) / DPP_TM_CGAVD_KILO_UL); + } else if (cgavd_cfg_mode == DPP_TM_CGAVD_ZXIC_UINT8_MODE) { + *sys_len = ((dpp_tm_sys_len.sys_q_len) / DPP_TM_CGAVD_KILO_UL); + *sys_protocol_len = ((dpp_tm_sys_protocol_len.cgavd_sys_q_len_l) / DPP_TM_CGAVD_KILO_UL); + } else { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "dpp_tm_sys_que_len_get err!!\n"); + return DPP_ERR; + } + + return DPP_OK; +} + +#endif + +/***********************************************************/ +/** 配置TD拥塞避免模式下的丢弃门限值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 拥塞避免支持层次号,0:队列级,1:端口级,2:系统级 +* @param id 队列号或端口号,系统级时,id参数无效 +* @param byte_block_th 配置的丢弃门限值,ZXIC_UINT8/BLOCK单位写入寄存器 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/07/29 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_td_byte_block_th_set(DPP_DEV_T *dev, + DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 id, + ZXIC_UINT32 byte_block_th) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 q_td_th_reg_index = 0; + ZXIC_UINT32 q_ca_mtd_reg_index = 0; + ZXIC_UINT32 pp_td_th_reg_index = 0; + ZXIC_UINT32 pp_ca_mtd_reg_index = 0; + ZXIC_UINT32 sys_td_th_reg_index = 0; + ZXIC_UINT32 sys_ca_mtd_reg_index = 0; + ZXIC_UINT32 read_times = 50; + DPP_ETM_CGAVD_FLOW_CA_MTD_T q_cgavd_method = { 0 }; + DPP_ETM_CGAVD_FLOW_TD_TH_T q_td_th = { 0 }; + DPP_ETM_CGAVD_PP_CA_MTD_T pp_cgavd_method = { 0 }; + DPP_ETM_CGAVD_PP_TD_TH_T pp_td_th = { 0 }; + DPP_ETM_CGAVD_SYS_CGAVD_METD_T sys_cgavd_method = { 0 }; + DPP_ETM_CGAVD_SYS_TD_TH_T sys_td_th = { 0 }; + ZXIC_UINT32 qlist_clr_done_flag = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), level, QUEUE_LEVEL, + SYS_LEVEL); + + q_td_th_reg_index = ETM_CGAVD_FLOW_TD_THr; + q_ca_mtd_reg_index = ETM_CGAVD_FLOW_CA_MTDr; + pp_td_th_reg_index = ETM_CGAVD_PP_TD_THr; + pp_ca_mtd_reg_index = ETM_CGAVD_PP_CA_MTDr; + sys_td_th_reg_index = ETM_CGAVD_SYS_TD_THr; + sys_ca_mtd_reg_index = ETM_CGAVD_SYS_CGAVD_METDr; + + if (level == QUEUE_LEVEL) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), id, 0, + DPP_ETM_Q_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), byte_block_th, + 0, 0x1fffffff); + + q_td_th.flow_td_th = byte_block_th; + rc = dpp_reg_write(dev, q_td_th_reg_index, 0, id, &q_td_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + + q_cgavd_method.flow_ca_mtd = TD_METHOD; + + rc = dpp_reg_write(dev, q_ca_mtd_reg_index, 0, id, + &q_cgavd_method); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + + /* add by zhmy begin@20151103 */ + if (byte_block_th == 0) { + do { + rc = dpp_tm_qmu_qlist_qcfg_clr_done_get( + dev, &qlist_clr_done_flag); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT( + DEV_ID(dev), rc, + "dpp_tm_qmu_qlist_qcfg_clr_done_get"); + + read_times--; + + if (qlist_clr_done_flag == 1) { + break; + } + + zxic_comm_delay(10); + } while (read_times > 0); + + if (read_times == 0) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "dpp_tm_qmu_qlist_qcfg_clr_done_get time out\n"); + return DPP_ERR; + } + } + + /* add by zhmy end@20151103 */ + } + + else if (level == PP_LEVEL) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), id, 0, + DPP_TM_PP_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), byte_block_th, + 0, 0x1fffffff); + + pp_td_th.pp_td_th = byte_block_th; + rc = dpp_reg_write(dev, pp_td_th_reg_index, 0, id, &pp_td_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + + pp_cgavd_method.pp_ca_mtd = TD_METHOD; + rc = dpp_reg_write(dev, pp_ca_mtd_reg_index, 0, id, + &pp_cgavd_method); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + } + + else { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), byte_block_th, + 0, 0x1fffffff); + + sys_td_th.sys_td_th = byte_block_th; + rc = dpp_reg_write(dev, sys_td_th_reg_index, 0, 0, &sys_td_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + + sys_cgavd_method.sys_cgavd_metd = TD_METHOD; + + rc = dpp_reg_write(dev, sys_ca_mtd_reg_index, 0, 0, + &sys_cgavd_method); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 配置TD拥塞避免模式下的丢弃门限值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 拥塞避免支持层次号,0:队列级,1:端口级,2:系统级 +* @param id 队列号或端口号,系统级时,id参数无效 +* @param td_th 配置的丢弃门限值,用户配置门限值单位为Kbyte,需要转化为Block或者ZXIC_UINT8单位写入寄存器 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/07/29 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_td_th_set(DPP_DEV_T *dev, DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 id, ZXIC_UINT32 td_th) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 blk_size = 0; + ZXIC_UINT32 blk_th = 0; + ZXIC_UINT32 cgavd_cfg_mode = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), level, QUEUE_LEVEL, + SYS_LEVEL); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), td_th, 0, 512 * 1024); + + td_th = (ZXIC_UINT32)(td_th * DPP_TM_CGAVD_KILO_UL); + + rc = dpp_tm_cgavd_cfg_mode_get(dev, &cgavd_cfg_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_cgavd_cfg_mode_get"); + + if (cgavd_cfg_mode == DPP_TM_CGAVD_BLOCK_MODE) { + rc = dpp_tm_cfgmt_blk_size_get(dev, &blk_size); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_cfgmt_blk_size_get"); + + if (blk_size != 0) { + blk_th = (td_th / blk_size); + blk_th = (td_th % blk_size == 0) ? (blk_th) : + ((blk_th) + 1); + } + + rc = dpp_tm_cgavd_td_byte_block_th_set(dev, level, id, blk_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT( + DEV_ID(dev), rc, "dpp_tm_cgavd_td_byte_block_th_set"); + } else if (cgavd_cfg_mode == DPP_TM_CGAVD_ZXIC_UINT8_MODE) { + rc = dpp_tm_cgavd_td_byte_block_th_set(dev, level, id, td_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT( + DEV_ID(dev), rc, "dpp_tm_cgavd_td_byte_block_th_set"); + } else { + ZXIC_COMM_TRACE_DEV_ERROR(DEV_ID(dev), + "dpp_tm_cgavd_td_th_set err!!\n"); + return DPP_ERR; + } + + return DPP_OK; +} + +/***********************************************************/ +/** 读取TD拥塞避免模式下的丢弃门限值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 拥塞避免支持层次号,0:队列级,1:端口级,2:系统级 +* @param id 队列号或端口号,系统级时,id参数无效 +* @param p_byte_block_th 配置的丢弃门限值ZXIC_UINT8/BLOCK单位 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/07/29 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_td_byte_block_th_get(DPP_DEV_T *dev, + DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 id, + ZXIC_UINT32 *p_byte_block_th) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 q_td_th_reg_index = 0; + ZXIC_UINT32 pp_td_th_reg_index = 0; + ZXIC_UINT32 sys_td_th_reg_index = 0; + DPP_ETM_CGAVD_FLOW_TD_TH_T q_td_th = { 0 }; + DPP_ETM_CGAVD_PP_TD_TH_T pp_td_th = { 0 }; + DPP_ETM_CGAVD_SYS_TD_TH_T sys_td_th = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), level, QUEUE_LEVEL, + SYS_LEVEL); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_byte_block_th); + + if (level == QUEUE_LEVEL) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), id, 0, + DPP_ETM_Q_NUM - 1); + } + + else if (level == PP_LEVEL) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), id, 0, + DPP_TM_PP_NUM - 1); + } + + else { + ZXIC_COMM_PRINT("sys:id is not to be checked!!\n"); + } + + q_td_th_reg_index = ETM_CGAVD_FLOW_TD_THr; + pp_td_th_reg_index = ETM_CGAVD_PP_TD_THr; + sys_td_th_reg_index = ETM_CGAVD_SYS_TD_THr; + + if (level == QUEUE_LEVEL) { + rc = dpp_reg_read(dev, q_td_th_reg_index, 0, id, &q_td_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + *p_byte_block_th = q_td_th.flow_td_th; + } + + else if (level == PP_LEVEL) { + rc = dpp_reg_read(dev, pp_td_th_reg_index, 0, id, &pp_td_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + *p_byte_block_th = pp_td_th.pp_td_th; + } + + else { + rc = dpp_reg_read(dev, sys_td_th_reg_index, 0, 0, &sys_td_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + *p_byte_block_th = sys_td_th.sys_td_th; + } + + return DPP_OK; +} + +/***********************************************************/ +/** 读取TD拥塞避免模式下的丢弃门限值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 拥塞避免支持层次号,0:队列级,1:端口级,2:系统级 +* @param id 队列号或端口号,系统级时,id参数无效 +* @param p_td_th 配置的丢弃门限值KZXIC_UINT8单位 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/07/29 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_td_th_get(DPP_DEV_T *dev, DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 id, ZXIC_UINT32 *p_td_th) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 cgavd_cfg_mode = 0; + ZXIC_UINT32 block_byte_th = 0; + ZXIC_UINT32 blk_size = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), level, QUEUE_LEVEL, + SYS_LEVEL); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_td_th); + + *p_td_th = 0; + + if (level == QUEUE_LEVEL) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), id, 0, + DPP_ETM_Q_NUM - 1); + } + + else if (level == PP_LEVEL) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), id, 0, + DPP_TM_PP_NUM - 1); + } + + else { + ZXIC_COMM_PRINT("sys:id is not to be checked!!\n"); + } + + rc = dpp_tm_cgavd_td_byte_block_th_get(dev, level, id, &block_byte_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_cgavd_td_byte_block_th_get"); + + rc = dpp_tm_cgavd_cfg_mode_get(dev, &cgavd_cfg_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_cgavd_cfg_mode_get"); + + if (cgavd_cfg_mode == DPP_TM_CGAVD_BLOCK_MODE) { + rc = dpp_tm_cfgmt_blk_size_get(dev, &blk_size); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_cfgmt_blk_size_get"); + + *p_td_th = (block_byte_th * blk_size) / DPP_TM_CGAVD_KILO_UL; + } else if (cgavd_cfg_mode == DPP_TM_CGAVD_ZXIC_UINT8_MODE) { + *p_td_th = (block_byte_th / DPP_TM_CGAVD_KILO_UL); + } else { + ZXIC_COMM_TRACE_DEV_ERROR(DEV_ID(dev), + "dpp_tm_cgavd_td_th_get err!!\n"); + return DPP_ERR; + } + + return DPP_OK; +} + +#if 0 +/***********************************************************/ +/** 配置指定端口或队列绑定的WRED GROUP ID +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level WRED支持层次号,0:队列级,1:端口级 +* @param id 队列号或端口号 +* @param wred_id 配置的WRED GROUP ID +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/17 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_wred_id_set(ZXIC_UINT32 dev_id, + DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 id, + ZXIC_UINT32 wred_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 q_wred_grp_reg_index = 0; + ZXIC_UINT32 pp_wrd_grp_th_en_reg_index = 0; + + DPP_ETM_CGAVD_FLOW_WRED_GRP_T q_wred_group = {0}; + DPP_ETM_CGAVD_PP_WRED_GRP_TH_EN_T pp_wred_group = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, level, QUEUE_LEVEL, PP_LEVEL); + + q_wred_grp_reg_index = ETM_CGAVD_FLOW_WRED_GRPr; + pp_wrd_grp_th_en_reg_index = ETM_CGAVD_PP_WRED_GRP_TH_ENr; + + if (level == QUEUE_LEVEL) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, id, 0, DPP_ETM_Q_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, wred_id, 0, DPP_TM_Q_WRED_NUM - 1); + + q_wred_group.flow_wred_grp = wred_id; + rc = dpp_reg_write(dev_id, + q_wred_grp_reg_index, + 0, + id, + &q_wred_group); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + else { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, id, 0, DPP_TM_PP_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, wred_id, 0, DPP_TM_PP_WRED_NUM - 1); + + rc = dpp_reg_read(dev_id, + pp_wrd_grp_th_en_reg_index, + 0, + id, + &pp_wred_group); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + pp_wred_group.pp_wred_grp = wred_id; + rc = dpp_reg_write(dev_id, + pp_wrd_grp_th_en_reg_index, + 0, + id, + &pp_wred_group); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 读取指定端口或队列绑定的WRED GROUP ID +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level WRED支持层次号,0:队列级,1:端口级 +* @param id 队列号或端口号 +* @param p_wred_id 配置的WRED GROUP ID +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/17 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_wred_id_get(ZXIC_UINT32 dev_id, + DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 id, + ZXIC_UINT32 *p_wred_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 q_wred_grp_reg_index = 0; + ZXIC_UINT32 pp_wrd_grp_th_en_reg_index = 0; + + DPP_ETM_CGAVD_FLOW_WRED_GRP_T q_wred_group = {0}; + DPP_ETM_CGAVD_PP_WRED_GRP_TH_EN_T pp_wred_group = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, level, QUEUE_LEVEL, PP_LEVEL); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_wred_id); + + q_wred_grp_reg_index = ETM_CGAVD_FLOW_WRED_GRPr; + pp_wrd_grp_th_en_reg_index = ETM_CGAVD_PP_WRED_GRP_TH_ENr; + + if (level == QUEUE_LEVEL) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, id, 0, DPP_ETM_Q_NUM - 1); + rc = dpp_reg_read(dev_id, + q_wred_grp_reg_index, + 0, + id, + &q_wred_group); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + *p_wred_id = q_wred_group.flow_wred_grp; + } + + else { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, id, 0, DPP_TM_PP_NUM - 1); + rc = dpp_reg_read(dev_id, + pp_wrd_grp_th_en_reg_index, + 0, + id, + &pp_wred_group); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + *p_wred_id = pp_wred_group.pp_wred_grp; + } + + return DPP_OK; +} + +/***********************************************************/ +/** 配置WRED丢弃曲线对应的参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level WRED支持层次号,0:队列级,1:端口级 +* @param wred_id 队列级共支持16个WRED组0-15,端口级支持8组0-7 +* @param dp 共支持8个dp,取值0-7 +* @param p_para 配置的WRED组参数值,包含以下五个参数 + max_th 平均队列深度上限阈值ZXIC_UINT8/BLOCK单位 + min_th 平均队列深度下限阈值ZXIC_UINT8/BLOCK单位 + max_p 最大丢弃概率 + weight 平均队列深度计算权重 + q_len_th 队列深度阈值ZXIC_UINT8/BLOCK单位 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/08/01 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_wred_dp_line_block_byte_para_set(ZXIC_UINT32 dev_id, + DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 wred_id, + ZXIC_UINT32 dp, + DPP_TM_WRED_DP_LINE_PARA_T *p_para) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 q_wred_max_th_reg_index = 0; + ZXIC_UINT32 q_wred_min_th_reg_index = 0; + ZXIC_UINT32 q_wred_cfg_para_reg_index = 0; + ZXIC_UINT32 q_wq_reg_index = 0; + ZXIC_UINT32 q_wred_len_th_reg_index = 0; + + ZXIC_UINT32 pp_wred_max_th_reg_index = 0; + ZXIC_UINT32 pp_wred_min_th_reg_index = 0; + ZXIC_UINT32 pp_wred_cfg_para_reg_index = 0; + ZXIC_UINT32 pp_wq_reg_index = 0; + ZXIC_UINT32 pp_wred_len_th_reg_index = 0; + + DPP_ETM_CGAVD_FLOW_WRED_MAX_TH_T q_max_th = {0}; + DPP_ETM_CGAVD_FLOW_WRED_MIN_TH_T q_min_th = {0}; + DPP_ETM_CGAVD_FLOW_WRED_CFG_PARA_T q_cfg_para = {0}; + DPP_ETM_CGAVD_FLOW_WQ_T q_wq = {0}; + DPP_ETM_CGAVD_FLOW_WRED_Q_LEN_TH_T q_len_th = {0}; + DPP_ETM_CGAVD_PP_WRED_MAX_TH_T pp_max_th = {0}; + DPP_ETM_CGAVD_PP_WRED_MIN_TH_T pp_min_th = {0}; + DPP_ETM_CGAVD_PP_CFG_PARA_T pp_cfg_para = {0}; + DPP_ETM_CGAVD_PP_WQ_T pp_wq = {0}; + DPP_ETM_CGAVD_PP_WRED_Q_LEN_TH_T pp_len_th = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, level, QUEUE_LEVEL, PP_LEVEL); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dp, 0, DPP_TM_DP_NUM - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_para); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, p_para->max_p, 1, DPP_TM_RED_P_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, p_para->weight, 0, DPP_TM_CGAVD_WEIGHT_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, p_para->max_th, 0, 0x1fffffff); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, p_para->min_th, 0, 0x1fffffff); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, p_para->q_len_th, 0, 0x1fffffff); + + q_wred_max_th_reg_index = ETM_CGAVD_FLOW_WRED_MAX_THr; + q_wred_min_th_reg_index = ETM_CGAVD_FLOW_WRED_MIN_THr; + q_wred_cfg_para_reg_index = ETM_CGAVD_FLOW_WRED_CFG_PARAr; + q_wq_reg_index = ETM_CGAVD_FLOW_WQr; + q_wred_len_th_reg_index = ETM_CGAVD_FLOW_WRED_Q_LEN_THr; + + pp_wred_max_th_reg_index = ETM_CGAVD_PP_WRED_MAX_THr; + pp_wred_min_th_reg_index = ETM_CGAVD_PP_WRED_MIN_THr; + pp_wred_cfg_para_reg_index = ETM_CGAVD_PP_CFG_PARAr; + pp_wq_reg_index = ETM_CGAVD_PP_WQr; + pp_wred_len_th_reg_index = ETM_CGAVD_PP_WRED_Q_LEN_THr; + + ZXIC_COMM_CHECK_INDEX_MUL_OVERFLOW(wred_id, DPP_TM_DP_NUM); + index = wred_id * DPP_TM_DP_NUM + dp; + + if (level == QUEUE_LEVEL) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, wred_id, 0, DPP_TM_Q_WRED_NUM - 1); + + /* q_max_th */ + q_max_th.flow_wred_max_th = p_para->max_th; + rc = dpp_reg_write(dev_id, + q_wred_max_th_reg_index, + 0, + index, + &q_max_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* q_min_th */ + q_min_th.flow_wred_min_th = p_para->min_th; + rc = dpp_reg_write(dev_id, + q_wred_min_th_reg_index, + 0, + index, + &q_min_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* q_cfg_para,乘以100用来换算百分比,用户配置的是值是1-100 */ + if (p_para->max_p != 0) { + ZXIC_COMM_CHECK_DEV_INDEX_SUB_OVERFLOW_NO_ASSERT(dev_id, q_max_th.flow_wred_max_th + 1, q_min_th.flow_wred_min_th); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, 100, (q_max_th.flow_wred_max_th - q_min_th.flow_wred_min_th)); + q_cfg_para.flow_wred_cfg_para = (ZXIC_UINT32)(100 * (q_max_th.flow_wred_max_th - q_min_th.flow_wred_min_th) / p_para->max_p); + } else { + q_cfg_para.flow_wred_cfg_para = DPP_TM_Q_WRED_MAX_CFG_PARA; + } + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, q_cfg_para.flow_wred_cfg_para, 0, 0xffffffff); + + rc = dpp_reg_write(dev_id, + q_wred_cfg_para_reg_index, + 0, + index, + &q_cfg_para); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* 必须先写q_wq */ + q_wq.wq_flow = p_para->weight; + rc = dpp_reg_write(dev_id, + q_wq_reg_index, + 0, + wred_id, + &q_wq); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* 再写q_len_th */ + q_len_th.flow_wred_q_len_th = p_para->q_len_th; + rc = dpp_reg_write(dev_id, + q_wred_len_th_reg_index, + 0, + wred_id, + &q_len_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + else { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, wred_id, 0, DPP_TM_PP_WRED_NUM - 1); + + /* pp_max_th */ + pp_max_th.pp_wred_max_th = p_para->max_th; + rc = dpp_reg_write(dev_id, + pp_wred_max_th_reg_index, + 0, + index, + &pp_max_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* pp_min_th */ + pp_min_th.pp_wred_min_th = p_para->min_th; + rc = dpp_reg_write(dev_id, + pp_wred_min_th_reg_index, + 0, + index, + &pp_min_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* pp_cfg_para,乘以100用来换算百分比,用户配置的是值是1-100 */ + if (p_para->max_p != 0) { + ZXIC_COMM_CHECK_DEV_INDEX_SUB_OVERFLOW_NO_ASSERT(dev_id, pp_max_th.pp_wred_max_th + 1, pp_min_th.pp_wred_min_th); + pp_cfg_para.pp_cfg_para = (ZXIC_UINT32)(100 * (pp_max_th.pp_wred_max_th - pp_min_th.pp_wred_min_th) / p_para->max_p); + } else { + pp_cfg_para.pp_cfg_para = DPP_TM_PP_WRED_MAX_CFG_PARA; + } + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, pp_cfg_para.pp_cfg_para, 0, 0xffffffff); + + rc = dpp_reg_write(dev_id, + pp_wred_cfg_para_reg_index, + 0, + index, + &pp_cfg_para); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* 必须先写pp_len_th */ + pp_len_th.pp_wred_q_len_th = p_para->q_len_th; + rc = dpp_reg_write(dev_id, + pp_wred_len_th_reg_index, + 0, + wred_id, + &pp_len_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* 再写pp_wq */ + pp_wq.wq_pp = p_para->weight; + rc = dpp_reg_write(dev_id, + pp_wq_reg_index, + 0, + wred_id, + &pp_wq); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 配置WRED丢弃曲线对应的参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level WRED支持层次号,0:队列级,1:端口级 +* @param wred_id 队列级共支持16个WRED组0-15,端口级支持8组0-7 +* @param dp 共支持8个dp,取值0-7 +* @param p_para 配置的WRED组参数值,用户配置门限值单位为Kbyte,需要转化为Block或者ZXIC_UINT8单位写入寄存器 + 包含以下五个参数 + max_th 平均队列深度上限阈值 + min_th 平均队列深度下限阈值 + max_p 最大丢弃概率 + weight 平均队列深度计算权重 + q_len_th 队列深度阈值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/08/01 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_wred_dp_line_para_set(ZXIC_UINT32 dev_id, + DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 wred_id, + ZXIC_UINT32 dp, + DPP_TM_WRED_DP_LINE_PARA_T *p_para) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 cgavd_cfg_mode = 0; + ZXIC_UINT32 blk_size = 0; + DPP_TM_WRED_DP_LINE_PARA_T para = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, level, QUEUE_LEVEL, PP_LEVEL); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dp, 0, DPP_TM_DP_NUM - 1); + + if (level == QUEUE_LEVEL) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, wred_id, 0, DPP_TM_Q_WRED_NUM - 1); + } else { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, wred_id, 0, DPP_TM_PP_WRED_NUM - 1); + } + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_para); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, p_para->max_p, 1, DPP_TM_RED_P_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, p_para->weight, 0, DPP_TM_CGAVD_WEIGHT_MAX); + + p_para->max_th = p_para->max_th * DPP_TM_CGAVD_KILO_UL; + p_para->min_th = p_para->min_th * DPP_TM_CGAVD_KILO_UL; + + if ((p_para->q_len_th) * DPP_TM_CGAVD_KILO_UL > 0x1fffffff) { + p_para->q_len_th = 0x1fffffff; + } else { + p_para->q_len_th = (ZXIC_UINT32)(p_para->q_len_th) * DPP_TM_CGAVD_KILO_UL; + } + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, p_para->max_th, p_para->min_th, 0x1fffffff); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, p_para->min_th, 0, 0x1fffffff); + + para.max_p = p_para->max_p; + para.weight = p_para->weight; + + if (p_para->max_th == p_para->min_th) { + para.weight = 0; + } + + rc = dpp_tm_cgavd_cfg_mode_get(dev_id, &cgavd_cfg_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_cfg_mode_get"); + + if (cgavd_cfg_mode == DPP_TM_CGAVD_BLOCK_MODE) { + rc = dpp_tm_cfgmt_blk_size_get(dev_id, &blk_size); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_blk_size_get"); + + if (blk_size != 0) { + para.max_th = ((p_para->max_th) / blk_size); + para.max_th = ((p_para->max_th) % blk_size == 0) ? (para.max_th) : ((para.max_th) + 1); + para.min_th = ((p_para->min_th) / blk_size); + para.min_th = ((p_para->min_th) % blk_size == 0) ? (para.min_th) : ((para.min_th) + 1); + para.q_len_th = ((p_para->q_len_th) / blk_size); + para.q_len_th = ((p_para->q_len_th) % blk_size == 0) ? (para.q_len_th) : ((para.q_len_th) + 1); + } + } else if (cgavd_cfg_mode == DPP_TM_CGAVD_ZXIC_UINT8_MODE) { + para.max_th = p_para->max_th; + para.min_th = p_para->min_th; + para.q_len_th = p_para->q_len_th; + } else { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "dpp_tm_cgavd_wred_dp_line_para_set err!!\n"); + return DPP_ERR; + } + + rc = dpp_tm_cgavd_wred_dp_line_block_byte_para_set(dev_id, level, wred_id, dp, ¶); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_wred_dp_line_block_byte_para_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取WRED丢弃曲线对应的参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level WRED支持层次号,0:队列级,1:端口级 +* @param wred_id 队列级共支持16个WRED组0-15,端口级支持8组0-7 +* @param dp 共支持8个dp,取值0-7 +* @param p_para 配置的WRED组参数值,包含以下五个参数 + max_th 平均队列深度上限阈值ZXIC_UINT8/BLOCK单位 + min_th 平均队列深度下限阈值ZXIC_UINT8/BLOCK单位 + max_p 最大丢弃概率 + weight 平均队列深度计算权重 + q_len_th 队列深度阈值ZXIC_UINT8/BLOCK单位 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/08/01 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_wred_dp_line_block_byte_para_get(ZXIC_UINT32 dev_id, + DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 wred_id, + ZXIC_UINT32 dp, + DPP_TM_WRED_DP_LINE_PARA_T *p_para) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 q_wred_max_th_reg_index = 0; + ZXIC_UINT32 q_wred_min_th_reg_index = 0; + ZXIC_UINT32 q_wred_cfg_para_reg_index = 0; + ZXIC_UINT32 q_wq_reg_index = 0; + ZXIC_UINT32 q_wred_len_th_reg_index = 0; + + ZXIC_UINT32 pp_wred_max_th_reg_index = 0; + ZXIC_UINT32 pp_wred_min_th_reg_index = 0; + ZXIC_UINT32 pp_wred_cfg_para_reg_index = 0; + ZXIC_UINT32 pp_wq_reg_index = 0; + ZXIC_UINT32 pp_wred_len_th_reg_index = 0; + + DPP_ETM_CGAVD_FLOW_WRED_MAX_TH_T q_max_th = {0}; + DPP_ETM_CGAVD_FLOW_WRED_MIN_TH_T q_min_th = {0}; + DPP_ETM_CGAVD_FLOW_WRED_CFG_PARA_T q_cfg_para = {0}; + DPP_ETM_CGAVD_FLOW_WQ_T q_wq = {0}; + DPP_ETM_CGAVD_FLOW_WRED_Q_LEN_TH_T q_len_th = {0}; + DPP_ETM_CGAVD_PP_WRED_MAX_TH_T pp_max_th = {0}; + DPP_ETM_CGAVD_PP_WRED_MIN_TH_T pp_min_th = {0}; + DPP_ETM_CGAVD_PP_CFG_PARA_T pp_cfg_para = {0}; + DPP_ETM_CGAVD_PP_WQ_T pp_wq = {0}; + DPP_ETM_CGAVD_PP_WRED_Q_LEN_TH_T pp_len_th = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, level, QUEUE_LEVEL, PP_LEVEL); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dp, 0, DPP_TM_DP_NUM - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_para); + + q_wred_max_th_reg_index = ETM_CGAVD_FLOW_WRED_MAX_THr; + q_wred_min_th_reg_index = ETM_CGAVD_FLOW_WRED_MIN_THr; + q_wred_cfg_para_reg_index = ETM_CGAVD_FLOW_WRED_CFG_PARAr; + q_wq_reg_index = ETM_CGAVD_FLOW_WQr; + q_wred_len_th_reg_index = ETM_CGAVD_FLOW_WRED_Q_LEN_THr; + + pp_wred_max_th_reg_index = ETM_CGAVD_PP_WRED_MAX_THr; + pp_wred_min_th_reg_index = ETM_CGAVD_PP_WRED_MIN_THr; + pp_wred_cfg_para_reg_index = ETM_CGAVD_PP_CFG_PARAr; + pp_wq_reg_index = ETM_CGAVD_PP_WQr; + pp_wred_len_th_reg_index = ETM_CGAVD_PP_WRED_Q_LEN_THr; + + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, wred_id, DPP_TM_DP_NUM); + index = wred_id * DPP_TM_DP_NUM + dp; + + if (level == QUEUE_LEVEL) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, wred_id, 0, DPP_TM_Q_WRED_NUM - 1); + + /* q_max_th */ + rc = dpp_reg_read(dev_id, + q_wred_max_th_reg_index, + 0, + index, + &q_max_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + /* q_min_th */ + rc = dpp_reg_read(dev_id, + q_wred_min_th_reg_index, + 0, + index, + &q_min_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + /* q_cfg_para */ + rc = dpp_reg_read(dev_id, + q_wred_cfg_para_reg_index, + 0, + index, + &q_cfg_para); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + /* q_wq */ + rc = dpp_reg_read(dev_id, + q_wq_reg_index, + 0, + wred_id, + &q_wq); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + /* q_len_th */ + rc = dpp_reg_read(dev_id, + q_wred_len_th_reg_index, + 0, + wred_id, + &q_len_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + p_para->max_th = q_max_th.flow_wred_max_th; + p_para->min_th = q_min_th.flow_wred_min_th; + + if (q_cfg_para.flow_wred_cfg_para != 0) { + p_para->max_p = (ZXIC_UINT32)(100 * (q_max_th.flow_wred_max_th - q_min_th.flow_wred_min_th) / q_cfg_para.flow_wred_cfg_para); + } else { + p_para->max_p = 0; + } + + p_para->q_len_th = q_len_th.flow_wred_q_len_th; + p_para->weight = q_wq.wq_flow; + } + + else { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, wred_id, 0, DPP_TM_PP_WRED_NUM - 1); + + /* pp_max_th */ + rc = dpp_reg_read(dev_id, + pp_wred_max_th_reg_index, + 0, + index, + &pp_max_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + /* pp_min_th */ + rc = dpp_reg_read(dev_id, + pp_wred_min_th_reg_index, + 0, + index, + &pp_min_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + /* pp_cfg_para */ + rc = dpp_reg_read(dev_id, + pp_wred_cfg_para_reg_index, + 0, + index, + &pp_cfg_para); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + /* pp_wq */ + rc = dpp_reg_read(dev_id, + pp_wq_reg_index, + 0, + wred_id, + &pp_wq); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + /* q_len_th */ + rc = dpp_reg_read(dev_id, + pp_wred_len_th_reg_index, + 0, + wred_id, + &pp_len_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + p_para->max_th = pp_max_th.pp_wred_max_th; + p_para->min_th = pp_min_th.pp_wred_min_th; + + if (pp_cfg_para.pp_cfg_para != 0) { + p_para->max_p = (ZXIC_UINT32)(100 * (pp_max_th.pp_wred_max_th - pp_min_th.pp_wred_min_th) / pp_cfg_para.pp_cfg_para); + } else { + p_para->max_p = 0; + } + + p_para->q_len_th = pp_len_th.pp_wred_q_len_th; + p_para->weight = pp_wq.wq_pp; + } + + return DPP_OK; +} + +/***********************************************************/ +/** 读取WRED丢弃曲线对应的参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level WRED支持层次号,0:队列级,1:端口级 +* @param wred_id 队列级共支持16个WRED组0-15,端口级支持8组0-7 +* @param dp 共支持8个dp,取值0-7 +* @param p_para 配置的WRED组参数值,包含以下五个参数 + max_th 平均队列深度上限阈值 + min_th 平均队列深度下限阈值 + max_p 最大丢弃概率 + weight 平均队列深度计算权重 + q_len_th 队列深度阈值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/08/01 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_wred_dp_line_para_get(ZXIC_UINT32 dev_id, + DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 wred_id, + ZXIC_UINT32 dp, + DPP_TM_WRED_DP_LINE_PARA_T *p_para) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 blk_size = 0; + ZXIC_UINT32 cgavd_cfg_mode = 0; + DPP_TM_WRED_DP_LINE_PARA_T para = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, level, QUEUE_LEVEL, PP_LEVEL); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dp, 0, DPP_TM_DP_NUM - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_para); + + if (level == QUEUE_LEVEL) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, wred_id, 0, DPP_TM_Q_WRED_NUM - 1); + } + + else { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, wred_id, 0, DPP_TM_PP_WRED_NUM - 1); + } + + rc = dpp_tm_cgavd_wred_dp_line_block_byte_para_get(dev_id, level, wred_id, dp, ¶); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_wred_dp_line_block_byte_para_set"); + + p_para->max_p = para.max_p; + p_para->weight = para.weight; + + rc = dpp_tm_cgavd_cfg_mode_get(dev_id, &cgavd_cfg_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_cfg_mode_get"); + + if (cgavd_cfg_mode == DPP_TM_CGAVD_BLOCK_MODE) { + rc = dpp_tm_cfgmt_blk_size_get(dev_id, &blk_size); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_blk_size_get"); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, blk_size, 256, 1024); + + p_para->max_th = (para.max_th * blk_size) / DPP_TM_CGAVD_KILO_UL; + p_para->min_th = (para.min_th * blk_size) / DPP_TM_CGAVD_KILO_UL; + p_para->q_len_th = (para.q_len_th * blk_size) / DPP_TM_CGAVD_KILO_UL; + } else if (cgavd_cfg_mode == DPP_TM_CGAVD_ZXIC_UINT8_MODE) { + p_para->max_th = (para.max_th / DPP_TM_CGAVD_KILO_UL); + p_para->min_th = (para.min_th / DPP_TM_CGAVD_KILO_UL); + p_para->q_len_th = (para.q_len_th / DPP_TM_CGAVD_KILO_UL); + } else { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "dpp_tm_cgavd_wred_dp_line_para_get err!!\n"); + return DPP_ERR; + } + + return DPP_OK; +} + +/***********************************************************/ +/** 配置系统级GRED丢弃曲线对应的参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param dp 共支持8个dp,取值0-7 +* @param p_para 配置的GRED丢弃曲线参数值,包含以下六个参数 + max_th 平均队列深度上限阈值ZXIC_UINT8/BLOCK单位 + mid_th 平均队列深度中间阈值ZXIC_UINT8/BLOCK单位 + min_th 平均队列深度下限阈值ZXIC_UINT8/BLOCK单位 + max_p 最大丢弃概率[1-99] + weight 平均队列深度计算权重 + q_len_th 队列深度阈值ZXIC_UINT8/BLOCK单位 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/08/02 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_gred_dp_line_block_byte_para_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 dp, + DPP_TM_GRED_DP_LINE_PARA_T *p_para) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 gred_max_th_reg_index = 0; + ZXIC_UINT32 gred_mid_th_reg_index = 0; + ZXIC_UINT32 gred_min_th_reg_index = 0; + ZXIC_UINT32 gred_cfg_para0_reg_index = 0; + ZXIC_UINT32 gred_cfg_para1_reg_index = 0; + ZXIC_UINT32 gred_cfg_para2_reg_index = 0; + ZXIC_UINT32 gred_sys_wfq_reg_index = 0; + ZXIC_UINT32 gred_sys_cfg_q_grp_para_reg_index = 0; + + DPP_ETM_CGAVD_GRED_MAX_TH_T max_th = {0}; + DPP_ETM_CGAVD_GRED_MID_TH_T mid_th = {0}; + DPP_ETM_CGAVD_GRED_MIN_TH_T min_th = {0}; + DPP_ETM_CGAVD_GRED_CFG_PARA0_T cfg_para0 = {0}; + DPP_ETM_CGAVD_GRED_CFG_PARA1_T cfg_para1 = {0}; + DPP_ETM_CGAVD_GRED_CFG_PARA2_T cfg_para2 = {0}; + DPP_ETM_CGAVD_SYS_WQ_T sys_wq = {0}; + DPP_ETM_CGAVD_SYS_CFG_Q_GRP_PARA_T sys_len_th = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dp, 0, DPP_TM_DP_NUM - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_para); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, p_para->max_p, 1, DPP_TM_RED_P_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, p_para->weight, 0, DPP_TM_CGAVD_WEIGHT_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, p_para->max_th, 0, 0x1fffffff); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, p_para->mid_th, 0, 0x1fffffff); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, p_para->min_th, 0, 0x1fffffff); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, p_para->q_len_th, 0, 0x1fffffff); + + gred_max_th_reg_index = ETM_CGAVD_GRED_MAX_THr; + gred_mid_th_reg_index = ETM_CGAVD_GRED_MID_THr; + gred_min_th_reg_index = ETM_CGAVD_GRED_MIN_THr; + gred_cfg_para0_reg_index = ETM_CGAVD_GRED_CFG_PARA0r; + gred_cfg_para1_reg_index = ETM_CGAVD_GRED_CFG_PARA1r; + gred_cfg_para2_reg_index = ETM_CGAVD_GRED_CFG_PARA2r; + gred_sys_wfq_reg_index = ETM_CGAVD_SYS_WQr; + gred_sys_cfg_q_grp_para_reg_index = ETM_CGAVD_SYS_CFG_Q_GRP_PARAr; + + /* max_th */ + max_th.gred_max_th = p_para->max_th; + rc = dpp_reg_write(dev_id, + gred_max_th_reg_index, + 0, + dp, + &max_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* mid_th */ + mid_th.gred_mid_th = p_para->mid_th; + rc = dpp_reg_write(dev_id, + gred_mid_th_reg_index, + 0, + dp, + &mid_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* min_th */ + min_th.gred_min_th = p_para->min_th; + rc = dpp_reg_write(dev_id, + gred_min_th_reg_index, + 0, + dp, + &min_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* cfg_para0,乘以100用来换算百分比,用户配置的是值是1-100 */ + ZXIC_COMM_CHECK_DEV_INDEX_SUB_OVERFLOW_NO_ASSERT(dev_id, mid_th.gred_mid_th + 1, min_th.gred_min_th); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, 100, (mid_th.gred_mid_th - min_th.gred_min_th)); + cfg_para0.gred_cfg_para0 = (ZXIC_UINT32)(100 * (mid_th.gred_mid_th - min_th.gred_min_th) / p_para->max_p); + + rc = dpp_reg_write(dev_id, + gred_cfg_para0_reg_index, + 0, + dp, + &cfg_para0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* cfg_para1 */ + ZXIC_COMM_CHECK_DEV_INDEX_SUB_OVERFLOW_NO_ASSERT(dev_id, max_th.gred_max_th + 1, mid_th.gred_mid_th); + cfg_para1.gred_cfg_para1 = (ZXIC_UINT32)(100 * (max_th.gred_max_th - mid_th.gred_mid_th) / (100 - p_para->max_p)); + + rc = dpp_reg_write(dev_id, + gred_cfg_para1_reg_index, + 0, + dp, + &cfg_para1); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* cfg_para2 */ + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, p_para->max_p, cfg_para1.gred_cfg_para1); + cfg_para2.gred_cfg_para2 = (ZXIC_UINT32)(p_para->max_p * cfg_para1.gred_cfg_para1 / 100); + + rc = dpp_reg_write(dev_id, + gred_cfg_para2_reg_index, + 0, + dp, + &cfg_para2); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* 必须先写sys_len_th */ + sys_len_th.gred_q_len_th_sys = p_para->q_len_th; + rc = dpp_reg_write(dev_id, + gred_sys_cfg_q_grp_para_reg_index, + 0, + 0, + &sys_len_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* 再写sys_wq */ + sys_wq.wq_sys = p_para->weight; + rc = dpp_reg_write(dev_id, + gred_sys_wfq_reg_index, + 0, + 0, + &sys_wq); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置系统级GRED丢弃曲线对应的参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param dp 共支持8个dp,取值0-7 +* @param p_para 配置的GRED丢弃曲线参数值,用户配置门限值单位为Kbyte,需要转化为Block或者ZXIC_UINT8单位写入寄存器 + 包含以下六个参数 + max_th 平均队列深度上限阈值 + mid_th 平均队列深度中间阈值 + min_th 平均队列深度下限阈值 + max_p 最大丢弃概率 + weight 平均队列深度计算权重 + q_len_th 队列深度阈值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/08/02 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_gred_dp_line_para_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 dp, + DPP_TM_GRED_DP_LINE_PARA_T *p_para) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 cgavd_cfg_mode = 0; + ZXIC_UINT32 blk_size = 0; + DPP_TM_GRED_DP_LINE_PARA_T para = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dp, 0, DPP_TM_DP_NUM - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_para); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, p_para->max_p, 1, DPP_TM_RED_P_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, p_para->weight, 0, DPP_TM_CGAVD_WEIGHT_MAX); + + p_para->max_th = p_para->max_th * DPP_TM_CGAVD_KILO_UL; + p_para->mid_th = p_para->mid_th * DPP_TM_CGAVD_KILO_UL; + p_para->min_th = p_para->min_th * DPP_TM_CGAVD_KILO_UL; + if ((p_para->q_len_th) * DPP_TM_CGAVD_KILO_UL > 0x1fffffff) { + { + p_para->q_len_th = 0x1fffffff; + } else { + p_para->q_len_th = (p_para->q_len_th) * DPP_TM_CGAVD_KILO_UL; + } + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, p_para->max_th, 0, 0x1fffffff); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, p_para->mid_th, 0, 0x1fffffff); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, p_para->min_th, 0, 0x1fffffff); + + para.max_p = p_para->max_p; + para.weight = p_para->weight; + + rc = dpp_tm_cgavd_cfg_mode_get(dev_id, &cgavd_cfg_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_cfg_mode_get"); + + if (cgavd_cfg_mode == DPP_TM_CGAVD_BLOCK_MODE) { + rc = dpp_tm_cfgmt_blk_size_get(dev_id, &blk_size); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_blk_size_get"); + + if (blk_size != 0) { + para.max_th = ((p_para->max_th) / blk_size); + para.max_th = ((p_para->max_th) % blk_size == 0) ? (para.max_th) : ((para.max_th) + 1); + para.mid_th = ((p_para->mid_th) / blk_size); + para.mid_th = ((p_para->mid_th) % blk_size == 0) ? (para.mid_th) : ((para.mid_th) + 1); + para.min_th = ((p_para->min_th) / blk_size); + para.min_th = ((p_para->min_th) % blk_size == 0) ? (para.min_th) : ((para.min_th) + 1); + para.q_len_th = ((p_para->q_len_th) / blk_size); + para.q_len_th = ((p_para->q_len_th) % blk_size == 0) ? (para.q_len_th) : ((para.q_len_th) + 1); + } + } else if (cgavd_cfg_mode == DPP_TM_CGAVD_ZXIC_UINT8_MODE) { + para.max_th = p_para->max_th; + para.mid_th = p_para->mid_th; + para.min_th = p_para->min_th; + para.q_len_th = p_para->q_len_th; + } else { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "dpp_tm_cgavd_gred_dp_line_para_set err!!\n"); + return DPP_ERR; + } + + rc = dpp_tm_cgavd_gred_dp_line_block_byte_para_set(dev_id, dp, ¶); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_gred_dp_line_block_byte_para_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置系统级阶梯TD 丢弃曲线对应的参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param dp 共支持8个dp,取值0-7 +* @param td_th TD 门限 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/08/02 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_ladtd_dp_line_para_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 dp, + ZXIC_UINT32 td_th) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 gred_max_th_reg_index = 0; + ZXIC_UINT32 gred_mid_th_reg_index = 0; + ZXIC_UINT32 gred_min_th_reg_index = 0; + ZXIC_UINT32 gred_cfg_para0_reg_index = 0; + ZXIC_UINT32 gred_cfg_para1_reg_index = 0; + ZXIC_UINT32 gred_cfg_para2_reg_index = 0; + ZXIC_UINT32 cgavd_cfg_mode = 0; + ZXIC_UINT32 blk_size = 0; + + DPP_ETM_CGAVD_GRED_MAX_TH_T max_th = {0}; + DPP_ETM_CGAVD_GRED_MID_TH_T mid_th = {0}; + DPP_ETM_CGAVD_GRED_MIN_TH_T min_th = {0}; + DPP_ETM_CGAVD_GRED_CFG_PARA0_T cfg_para0 = {0}; + DPP_ETM_CGAVD_GRED_CFG_PARA1_T cfg_para1 = {0}; + DPP_ETM_CGAVD_GRED_CFG_PARA2_T cfg_para2 = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dp, 0, DPP_TM_DP_NUM - 1); + + td_th = td_th * DPP_TM_CGAVD_KILO_UL; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, td_th, 0, 0x1fffffff); + + rc = dpp_tm_cgavd_cfg_mode_get(dev_id, &cgavd_cfg_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_cfg_mode_get"); + + if (cgavd_cfg_mode == DPP_TM_CGAVD_BLOCK_MODE) { + rc = dpp_tm_cfgmt_blk_size_get(dev_id, &blk_size); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_blk_size_get"); + + if (blk_size != 0) { + max_th.gred_max_th = ((td_th) / blk_size); + max_th.gred_max_th = ((td_th) % blk_size == 0) ? (max_th.gred_max_th) : ((max_th.gred_max_th) + 1); + mid_th.gred_mid_th = ((td_th) / blk_size); + mid_th.gred_mid_th = ((td_th) % blk_size == 0) ? (mid_th.gred_mid_th) : ((mid_th.gred_mid_th) + 1); + min_th.gred_min_th = ((td_th) / blk_size); + min_th.gred_min_th = ((td_th) % blk_size == 0) ? (min_th.gred_min_th) : ((min_th.gred_min_th) + 1); + } else if (cgavd_cfg_mode == DPP_TM_CGAVD_ZXIC_UINT8_MODE) { + max_th.gred_max_th = td_th; + mid_th.gred_mid_th = td_th; + min_th.gred_min_th = td_th; + } else { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "dpp_tm_cgavd_gred_dp_line_para_get err!!\n"); + return DPP_ERR; + } + + gred_max_th_reg_index = ETM_CGAVD_GRED_MAX_THr; + gred_mid_th_reg_index = ETM_CGAVD_GRED_MID_THr; + gred_min_th_reg_index = ETM_CGAVD_GRED_MIN_THr; + gred_cfg_para0_reg_index = ETM_CGAVD_GRED_CFG_PARA0r; + gred_cfg_para1_reg_index = ETM_CGAVD_GRED_CFG_PARA1r; + gred_cfg_para2_reg_index = ETM_CGAVD_GRED_CFG_PARA2r; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, td_th, 0, 0x1fffffff); + + /* max_th */ + rc = dpp_reg_write(dev_id, + gred_max_th_reg_index, + 0, + dp, + &max_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* mid_th */ + rc = dpp_reg_write(dev_id, + gred_mid_th_reg_index, + 0, + dp, + &mid_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* min_th */ + rc = dpp_reg_write(dev_id, + gred_min_th_reg_index, + 0, + dp, + &min_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* cfg_para0,0 */ + cfg_para0.gred_cfg_para0 = 0; + + rc = dpp_reg_write(dev_id, + gred_cfg_para0_reg_index, + 0, + dp, + &cfg_para0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* cfg_para1 */ + cfg_para1.gred_cfg_para1 = 0; + + rc = dpp_reg_write(dev_id, + gred_cfg_para1_reg_index, + 0, + dp, + &cfg_para1); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* cfg_para2 */ + cfg_para2.gred_cfg_para2 = 0; + + rc = dpp_reg_write(dev_id, + gred_cfg_para2_reg_index, + 0, + dp, + &cfg_para2); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取系统级GRED丢弃曲线对应的参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param dp 共支持8个dp,取值0-7 +* @param p_para 配置的GRED丢弃曲线参数值,包含以下六个参数 + max_th 平均队列深度上限阈值ZXIC_UINT8/BLOCK单位 + mid_th 平均队列深度中间阈值ZXIC_UINT8/BLOCK单位 + min_th 平均队列深度下限阈值ZXIC_UINT8/BLOCK单位 + max_p 最大丢弃概率 + weight 平均队列深度计算权重 + q_len_th 队列深度阈值ZXIC_UINT8/BLOCK单位 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/08/02 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_gred_dp_line_block_byte_para_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 dp, + DPP_TM_GRED_DP_LINE_PARA_T *p_para) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 gred_max_th_reg_index = 0; + ZXIC_UINT32 gred_mid_th_reg_index = 0; + ZXIC_UINT32 gred_min_th_reg_index = 0; + ZXIC_UINT32 gred_cfg_para0_reg_index = 0; + ZXIC_UINT32 gred_cfg_para1_reg_index = 0; + ZXIC_UINT32 gred_cfg_para2_reg_index = 0; + ZXIC_UINT32 gred_sys_wfq_reg_index = 0; + ZXIC_UINT32 gred_sys_cfg_q_grp_para_reg_index = 0; + + DPP_ETM_CGAVD_GRED_MAX_TH_T max_th = {0}; + DPP_ETM_CGAVD_GRED_MID_TH_T mid_th = {0}; + DPP_ETM_CGAVD_GRED_MIN_TH_T min_th = {0}; + DPP_ETM_CGAVD_GRED_CFG_PARA0_T cfg_para0 = {0}; + DPP_ETM_CGAVD_GRED_CFG_PARA1_T cfg_para1 = {0}; + DPP_ETM_CGAVD_GRED_CFG_PARA2_T cfg_para2 = {0}; + DPP_ETM_CGAVD_SYS_WQ_T sys_wq = {0}; + DPP_ETM_CGAVD_SYS_CFG_Q_GRP_PARA_T sys_len_th = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dp, 0, DPP_TM_DP_NUM - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_para); + + gred_max_th_reg_index = ETM_CGAVD_GRED_MAX_THr; + gred_mid_th_reg_index = ETM_CGAVD_GRED_MID_THr; + gred_min_th_reg_index = ETM_CGAVD_GRED_MIN_THr; + gred_cfg_para0_reg_index = ETM_CGAVD_GRED_CFG_PARA0r; + gred_cfg_para1_reg_index = ETM_CGAVD_GRED_CFG_PARA1r; + gred_cfg_para2_reg_index = ETM_CGAVD_GRED_CFG_PARA2r; + gred_sys_wfq_reg_index = ETM_CGAVD_SYS_WQr; + gred_sys_cfg_q_grp_para_reg_index = ETM_CGAVD_SYS_CFG_Q_GRP_PARAr; + + /* max_th */ + rc = dpp_reg_read(dev_id, + gred_max_th_reg_index, + 0, + dp, + &max_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + /* mid_th */ + rc = dpp_reg_read(dev_id, + gred_mid_th_reg_index, + 0, + dp, + &mid_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + /* min_th */ + rc = dpp_reg_read(dev_id, + gred_min_th_reg_index, + 0, + dp, + &min_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + /* cfg_para0 */ + rc = dpp_reg_read(dev_id, + gred_cfg_para0_reg_index, + 0, + dp, + &cfg_para0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + /* cfg_para1 */ + rc = dpp_reg_read(dev_id, + gred_cfg_para1_reg_index, + 0, + dp, + &cfg_para1); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + /* cfg_para2 */ + rc = dpp_reg_read(dev_id, + gred_cfg_para2_reg_index, + 0, + dp, + &cfg_para2); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + /* sys_len_th */ + rc = dpp_reg_read(dev_id, + gred_sys_cfg_q_grp_para_reg_index, + 0, + 0, + &sys_len_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + /* sys_wq */ + rc = dpp_reg_read(dev_id, + gred_sys_wfq_reg_index, + 0, + 0, + &sys_wq); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + p_para->max_th = max_th.gred_max_th; + p_para->mid_th = mid_th.gred_mid_th; + p_para->min_th = min_th.gred_min_th; + + if (cfg_para0.gred_cfg_para0) { + p_para->max_p = (ZXIC_UINT32)(100 * (mid_th.gred_mid_th - min_th.gred_min_th) / cfg_para0.gred_cfg_para0); + } + + p_para->weight = sys_wq.wq_sys; + p_para->q_len_th = sys_len_th.gred_q_len_th_sys; + + return DPP_OK; +} + +/***********************************************************/ +/** 读取系统级GRED丢弃曲线对应的参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param dp 共支持8个dp,取值0-7 +* @param p_para 配置的GRED丢弃曲线参数值,包含以下六个参数 + max_th 平均队列深度上限阈值 + mid_th 平均队列深度中间阈值 + min_th 平均队列深度下限阈值 + max_p 最大丢弃概率 + weight 平均队列深度计算权重 + q_len_th 队列深度阈值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author taq @date 2015/04/20 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_gred_dp_line_para_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 dp, + DPP_TM_GRED_DP_LINE_PARA_T *p_para) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 cgavd_cfg_mode = 0; + ZXIC_UINT32 blk_size = 0; + DPP_TM_GRED_DP_LINE_PARA_T para = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dp, 0, DPP_TM_DP_NUM - 1); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_para); + + rc = dpp_tm_cgavd_gred_dp_line_block_byte_para_get(dev_id, dp, ¶); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_gred_dp_line_block_byte_para_get"); + + p_para->max_p = para.max_p; + p_para->weight = para.weight; + + rc = dpp_tm_cgavd_cfg_mode_get(dev_id, &cgavd_cfg_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_cfg_mode_get"); + + if (cgavd_cfg_mode == DPP_TM_CGAVD_BLOCK_MODE) { + rc = dpp_tm_cfgmt_blk_size_get(dev_id, &blk_size); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_blk_size_get"); + + p_para->max_th = (para.max_th * blk_size) / DPP_TM_CGAVD_KILO_UL; + p_para->mid_th = (para.mid_th * blk_size) / DPP_TM_CGAVD_KILO_UL; + p_para->min_th = (para.min_th * blk_size) / DPP_TM_CGAVD_KILO_UL; + p_para->q_len_th = (para.q_len_th * blk_size) / DPP_TM_CGAVD_KILO_UL; + } else if (cgavd_cfg_mode == DPP_TM_CGAVD_ZXIC_UINT8_MODE) { + p_para->max_th = (para.max_th / DPP_TM_CGAVD_KILO_UL); + p_para->mid_th = (para.mid_th / DPP_TM_CGAVD_KILO_UL); + p_para->min_th = (para.min_th / DPP_TM_CGAVD_KILO_UL); + p_para->q_len_th = (para.q_len_th / DPP_TM_CGAVD_KILO_UL); + } else { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "dpp_tm_cgavd_gred_dp_line_para_get err!!\n"); + return DPP_ERR; + } + + return DPP_OK; +} + +#endif +/***********************************************************/ +/** 配置指定端口或队列是否支持动态门限机制 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level WRED支持层次号,0:队列级,1:端口级 +* @param id 队列号或端口号 +* @param en 配置的值,0-不支持动态门限机制,1-支持动态门限机制 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/17 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_dyn_th_en_set(DPP_DEV_T *dev, + DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 id, ZXIC_UINT32 en) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 q_dyn_th_en_reg_index = 0; + ZXIC_UINT32 pp_wrd_grp_th_en_reg_index = 0; + + DPP_ETM_CGAVD_FLOW_DYNAMIC_TH_EN_T q_dyn_th_en = { 0 }; + DPP_ETM_CGAVD_PP_WRED_GRP_TH_EN_T pp_dyn_th_en = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), level, QUEUE_LEVEL, + PP_LEVEL); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), en, 0, 1); + + q_dyn_th_en_reg_index = ETM_CGAVD_FLOW_DYNAMIC_TH_ENr; + pp_wrd_grp_th_en_reg_index = ETM_CGAVD_PP_WRED_GRP_TH_ENr; + + if (level == QUEUE_LEVEL) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), id, 0, + DPP_ETM_Q_NUM - 1); + + q_dyn_th_en.flow_dynamic_th_en = en; + rc = dpp_reg_write(dev, q_dyn_th_en_reg_index, 0, id, + &q_dyn_th_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + } + + else { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), id, 0, + DPP_TM_PP_NUM - 1); + + rc = dpp_reg_read(dev, pp_wrd_grp_th_en_reg_index, 0, id, + &pp_dyn_th_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + + pp_dyn_th_en.pp_wred_grp_th_en = en; + rc = dpp_reg_write(dev, pp_wrd_grp_th_en_reg_index, 0, id, + &pp_dyn_th_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 读取指定端口或队列是否支持动态门限机制 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level WRED支持层次号,0:队列级,1:端口级 +* @param id 队列号或端口号 +* @param p_en 读取的值,0-不支持动态门限机制,1-支持动态门限机制 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/17 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_dyn_th_en_get(DPP_DEV_T *dev, + DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 id, ZXIC_UINT32 *p_en) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 q_dyn_th_en_reg_index = 0; + ZXIC_UINT32 pp_wrd_grp_th_en_reg_index = 0; + + DPP_ETM_CGAVD_FLOW_DYNAMIC_TH_EN_T q_dyn_th_en = { 0 }; + DPP_ETM_CGAVD_PP_WRED_GRP_TH_EN_T pp_dyn_th_en = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), level, QUEUE_LEVEL, + PP_LEVEL); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_en); + + q_dyn_th_en_reg_index = ETM_CGAVD_FLOW_DYNAMIC_TH_ENr; + pp_wrd_grp_th_en_reg_index = ETM_CGAVD_PP_WRED_GRP_TH_ENr; + + if (level == QUEUE_LEVEL) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), id, 0, + DPP_ETM_Q_NUM - 1); + + rc = dpp_reg_read(dev, q_dyn_th_en_reg_index, 0, id, + &q_dyn_th_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + + *p_en = q_dyn_th_en.flow_dynamic_th_en; + } + + else { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), id, 0, + DPP_TM_PP_NUM - 1); + + rc = dpp_reg_read(dev, pp_wrd_grp_th_en_reg_index, 0, id, + &pp_dyn_th_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + + *p_en = pp_dyn_th_en.pp_wred_grp_th_en; + } + + return DPP_OK; +} + +/***********************************************************/ +/** 配置等价包长使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param en 使能标记,0-不使能,1-使能 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/28 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_equal_pkt_len_en_set(DPP_DEV_T *dev, ZXIC_UINT32 en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_EQUAL_PKT_LEN_EN_T equal_pkt_len_en = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), en, 0, 1); + + equal_pkt_len_en.equal_pkt_len_en = en; + rc = dpp_reg_write(dev, ETM_CGAVD_EQUAL_PKT_LEN_ENr, 0, 0, + &equal_pkt_len_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取等价包长使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_en 使能标记,0-不使能,1-使能 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/28 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_equal_pkt_len_en_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_EQUAL_PKT_LEN_EN_T equal_pkt_len_en = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_en); + + *p_en = 0xffffffff; + rc = dpp_reg_read(dev, ETM_CGAVD_EQUAL_PKT_LEN_ENr, 0, 0, + &equal_pkt_len_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + *p_en = equal_pkt_len_en.equal_pkt_len_en; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置等价包长 +* @param dev_id +* @param tm_type 0-ETM,1-FTM +* @param p_equal_pkt_len 等价包长 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/28 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_equal_pkt_len_para_set( + DPP_DEV_T *dev, DPP_ETM_EQUAL_PKT_LEN_PARA_T *p_equal_pkt_len) +{ + /* 返回值变量定义 */ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 i = 0; + + /* 结构体变量定义 */ + DPP_ETM_CGAVD_EQUAL_PKT_LEN0_T equal_pkt_len0 = { 0 }; + + /* 入参检查 */ + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_equal_pkt_len); + + for (i = 0; i < 8; i++) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT( + DEV_ID(dev), p_equal_pkt_len->equal_pkt_len[i], 0x0, + 0x7fff); + equal_pkt_len0.equal_pkt_len0 = + p_equal_pkt_len->equal_pkt_len[i]; + rc = dpp_reg_write(dev, ETM_CGAVD_EQUAL_PKT_LEN0r + i, 0, 0, + &equal_pkt_len0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 读取等价包长 +* @param dev_id +* @param tm_type 0-ETM,1-FTM +* @param p_equal_pkt_len 等价包长 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/28 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_equal_pkt_len_para_get( + DPP_DEV_T *dev, DPP_ETM_EQUAL_PKT_LEN_PARA_T *p_equal_pkt_len) +{ + /* 返回值变量定义 */ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 i = 0; + + /* 结构体变量定义 */ + DPP_ETM_CGAVD_EQUAL_PKT_LEN0_T equal_pkt_len0 = { 0 }; + + /* 入参检查 */ + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_equal_pkt_len); + + for (i = 0; i < 8; i++) { + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT( + ETM_CGAVD_EQUAL_PKT_LEN0r, i); + rc = dpp_reg_read(dev, ETM_CGAVD_EQUAL_PKT_LEN0r + i, 0, 0, + &equal_pkt_len0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + p_equal_pkt_len->equal_pkt_len[i] = + equal_pkt_len0.equal_pkt_len0; + } + + return DPP_OK; +} + +/***********************************************************/ +/** 配置等价包长阈值 +* @param dev_id +* @param tm_type 0-ETM,1-FTM +* @param p_equal_pkt_len_th 等价包长阈值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/28 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_equal_pkt_len_th_para_set( + DPP_DEV_T *dev, DPP_ETM_EQUAL_PKT_LEN_TH_PARA_T *p_equal_pkt_len_th) +{ + /* 返回值变量定义 */ + DPP_STATUS rc = 0; + ZXIC_UINT32 i = 0; + + /* 结构体变量定义 */ + DPP_ETM_CGAVD_EQUAL_PKT_LEN_TH0_T equal_pkt_len_th0 = { 0 }; + + /* 入参检查 */ + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_equal_pkt_len_th); + + for (i = 0; i < 7; i++) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT( + DEV_ID(dev), p_equal_pkt_len_th->equal_pkt_len_th[i], + 0x0, 0x7fff); + + if (i <= 5) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT( + DEV_ID(dev), + p_equal_pkt_len_th->equal_pkt_len_th[i + 1], + p_equal_pkt_len_th->equal_pkt_len_th[i], + 0x7fff); + } + + equal_pkt_len_th0.equal_pkt_len_th0 = + p_equal_pkt_len_th->equal_pkt_len_th[i]; + rc = dpp_reg_write(dev, ETM_CGAVD_EQUAL_PKT_LEN_TH0r + i, 0, 0, + &equal_pkt_len_th0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 读取等价包长阈值 +* @param dev_id +* @param tm_type 0-ETM,1-FTM +* @param p_equal_pkt_len_th 等价包长阈值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/28 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_equal_pkt_len_th_para_get( + DPP_DEV_T *dev, DPP_ETM_EQUAL_PKT_LEN_TH_PARA_T *p_equal_pkt_len_th) +{ + /* 返回值变量定义 */ + DPP_STATUS rc = 0; + ZXIC_UINT32 i = 0; + + /* 结构体变量定义 */ + DPP_ETM_CGAVD_EQUAL_PKT_LEN_TH0_T equal_pkt_len_th0 = { 0 }; + + /* 入参检查 */ + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_equal_pkt_len_th); + + for (i = 0; i < 7; i++) { + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT( + DEV_ID(dev), i, ETM_CGAVD_EQUAL_PKT_LEN_TH0r); + rc = dpp_reg_read(dev, ETM_CGAVD_EQUAL_PKT_LEN_TH0r + i, 0, 0, + &equal_pkt_len_th0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + p_equal_pkt_len_th->equal_pkt_len_th[i] = + equal_pkt_len_th0.equal_pkt_len_th0; + } + + return DPP_OK; +} + +/***********************************************************/ +/** 动态门限放大因子参数配置 +* @param dev_id +* @param tm_type 0-ETM,1-FTM +* @param p_amplify_gene_para 动态门限放大因子参数 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/28 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_amplify_gene_para_set( + DPP_DEV_T *dev, DPP_ETM_AMPLIFY_GENE_PARA_T *p_amplify_gene_para) +{ + /* 返回值变量定义 */ + DPP_STATUS rc = 0; + ZXIC_UINT32 i = 0; + + /* 结构体变量定义 */ + DPP_ETM_CGAVD_AMPLIFY_GENE0_T amplify_gene0 = { 0 }; + + /* 入参检查 */ + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_amplify_gene_para); + + for (i = 0; i < 16; i++) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT( + DEV_ID(dev), p_amplify_gene_para->amplify_gene[i], 0x0, + 0xfff); + amplify_gene0.amplify_gene0 = + p_amplify_gene_para->amplify_gene[i]; + rc = dpp_reg_write(dev, ETM_CGAVD_AMPLIFY_GENE0r + i, 0, 0, + &lify_gene0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 动态门限放大因子参数获取 +* @param dev_id +* @param tm_type 0-ETM,1-FTM +* @param p_amplify_gene_para 动态门限放大因子参数 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/28 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_amplify_gene_para_get( + DPP_DEV_T *dev, DPP_ETM_AMPLIFY_GENE_PARA_T *p_amplify_gene_para) +{ + /* 返回值变量定义 */ + DPP_STATUS rc = 0; + ZXIC_UINT32 i = 0; + + /* 结构体变量定义 */ + DPP_ETM_CGAVD_AMPLIFY_GENE0_T amplify_gene0 = { 0 }; + + /* 入参检查 */ + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_amplify_gene_para); + + for (i = 0; i < 16; i++) { + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT( + ETM_CGAVD_AMPLIFY_GENE0r, i); + rc = dpp_reg_read(dev, ETM_CGAVD_AMPLIFY_GENE0r + i, 0, 0, + &lify_gene0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + p_amplify_gene_para->amplify_gene[i] = + amplify_gene0.amplify_gene0; + } + + return DPP_OK; +} + +#if 0 +/***********************************************************/ +/** 配置默认队列使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param en 配置的值,0-不使能默认队列,1-使能默认队列, +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2016/08/16 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_default_queue_en_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_DEFAULT_QUEUE_EN_T default_que_en = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, en, 0, 1); + + default_que_en.default_queue_en = en; + rc = dpp_reg_write(dev_id, + ETM_CGAVD_DEFAULT_QUEUE_ENr, + 0, + 0, + &default_que_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取默认队列使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_en 读取的值,0-不使能默认队列,1-使能默认队列, +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2016/08/16 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_default_queue_en_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_DEFAULT_QUEUE_EN_T default_que_en = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_en); + + rc = dpp_reg_read(dev_id, + ETM_CGAVD_DEFAULT_QUEUE_ENr, + 0, + 0, + &default_que_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + *p_en = default_que_en.default_queue_en; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置默认队列起始末尾 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param def_start_que 起始默认队列block/byte单位 +* @param def_finish_que 结束默认队列block/byte单位 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2016/08/16 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_default_queue_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 def_start_queue, ZXIC_UINT32 def_finish_queue) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_DEFAULT_START_QUEUE_T def_start_que = {0}; + DPP_ETM_CGAVD_DEFAULT_FINISH_QUEUE_T def_finish_que = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, def_start_queue, 0, 0x1fffffff); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, def_finish_queue, 0, 0x1fffffff); + + def_start_que.default_start_queue = def_start_queue; + def_finish_que.default_finish_queue = def_finish_queue; + + rc = dpp_reg_write(dev_id, + ETM_CGAVD_DEFAULT_START_QUEUEr, + 0, + 0, + &def_start_que); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + rc = dpp_reg_write(dev_id, + ETM_CGAVD_DEFAULT_FINISH_QUEUEr, + 0, + 0, + &def_finish_que); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取默认队列起始末尾值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_def_start_que 默认队列起始值block/byte单位 +* @param p_def_finish_que 默认队列结束值block/byte单位 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2016/08/16 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_default_queue_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_def_start_queue, ZXIC_UINT32 *p_def_finish_queue) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_DEFAULT_START_QUEUE_T def_start_que = {0}; + DPP_ETM_CGAVD_DEFAULT_FINISH_QUEUE_T def_finish_que = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_def_finish_queue); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_def_start_queue); + + rc = dpp_reg_read(dev_id, + ETM_CGAVD_DEFAULT_START_QUEUEr, + 0, + 0, + &def_start_que); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_CGAVD_DEFAULT_FINISH_QUEUEr, + 0, + 0, + &def_finish_que); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + *p_def_start_queue = def_start_que.default_start_queue; + *p_def_finish_queue = def_finish_que.default_finish_queue; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置协议队列使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param en 配置的值,0-不使能默认队列,1-使能默认队列, +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2016/08/16 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_protocol_queue_en_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_PROTOCOL_QUEUE_EN_T protocol_que_en = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, en, 0, 1); + + protocol_que_en.protocol_queue_en = en; + rc = dpp_reg_write(dev_id, + ETM_CGAVD_PROTOCOL_QUEUE_ENr, + 0, + 0, + &protocol_que_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取协议队列使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_en 读取的值,0-不使能通用门限,1-使能通用门限, +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2016/08/16 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_protocol_queue_en_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_PROTOCOL_QUEUE_EN_T protocol_que_en = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_en); + + rc = dpp_reg_read(dev_id, + ETM_CGAVD_PROTOCOL_QUEUE_ENr, + 0, + 0, + &protocol_que_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + *p_en = protocol_que_en.protocol_queue_en; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置协议队列起始末尾 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param protocol_start_que 起始协议队列block/byte单位 +*@param protocol_-finish_que 末尾协议队列block/byte单位 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2016/08/16 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_protocol_queue_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 protocol_start_que, ZXIC_UINT32 protocol_finish_que) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_PROTOCOL_START_QUEUE_T pro_start_que = {0}; + DPP_ETM_CGAVD_PROTOCOL_FINISH_QUEUE_T pro_finish_que = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, protocol_start_que, 0, 0x1fffffff); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, protocol_finish_que, 0, 0x1fffffff); + + pro_start_que.protocol_start_queue = protocol_start_que; + pro_finish_que.protocol_finish_queue = protocol_finish_que; + + rc = dpp_reg_write(dev_id, + ETM_CGAVD_PROTOCOL_START_QUEUEr, + 0, + 0, + &pro_start_que); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + rc = dpp_reg_write(dev_id, + ETM_CGAVD_PROTOCOL_FINISH_QUEUEr, + 0, + 0, + &pro_finish_que); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取协议队列起始末尾值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_protocol_start_que 协议认队列起始值block/byte单位 +* @param p_protocol_finish_que 协议认队列末尾值block/byte单位 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2016/08/16 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_protocol_queue_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_protocol_start_que, ZXIC_UINT32 *p_protocol_finish_que) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_PROTOCOL_START_QUEUE_T pro_start_que = {0}; + DPP_ETM_CGAVD_PROTOCOL_FINISH_QUEUE_T pro_finish_que = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_protocol_finish_que); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_protocol_start_que); + + rc = dpp_reg_read(dev_id, + ETM_CGAVD_PROTOCOL_START_QUEUEr, + 0, + 0, + &pro_start_que); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_CGAVD_PROTOCOL_FINISH_QUEUEr, + 0, + 0, + &pro_finish_que); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + *p_protocol_start_que = pro_start_que.protocol_start_queue; + *p_protocol_finish_que = pro_finish_que.protocol_finish_queue; + + return DPP_OK; +} + +#endif +/***********************************************************/ +/** 配置通用门限使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param en 配置的值,0-不使能通用门限,1-使能通用门限, +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/17 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_uniform_th_en_set(DPP_DEV_T *dev, ZXIC_UINT32 en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_UNIFORM_TD_TH_EN_T uniform_en = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), en, 0, 1); + + uniform_en.uniform_td_th_en = en; + rc = dpp_reg_write(dev, ETM_CGAVD_UNIFORM_TD_TH_ENr, 0, 0, &uniform_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取通用门限使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_en 读取的值,0-不使能通用门限,1-使能通用门限, +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/17 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_uniform_th_en_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_UNIFORM_TD_TH_EN_T uniform_en = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_en); + + rc = dpp_reg_read(dev, ETM_CGAVD_UNIFORM_TD_TH_ENr, 0, 0, &uniform_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_en = uniform_en.uniform_td_th_en; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置通用门限 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param byte_block_uni_th 通用门限值block/byte单位 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/08/01 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_uniform_byte_block_th_set(DPP_DEV_T *dev, + ZXIC_UINT32 byte_block_uni_th) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_UNIFORM_TD_TH_T uniform_block_th = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), byte_block_uni_th, 0, + 0x1fffffff); + + uniform_block_th.uniform_td_th = byte_block_uni_th; + rc = dpp_reg_write(dev, ETM_CGAVD_UNIFORM_TD_THr, 0, 0, + &uniform_block_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置通用门限 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param th 通用门限值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2014/02/17 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_uniform_th_set(DPP_DEV_T *dev, ZXIC_UINT32 uni_th) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 blk_size = 0; + ZXIC_UINT32 blk_th = 0; + ZXIC_UINT32 cgavd_cfg_mode = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW(DEV_ID(dev), uni_th, + DPP_TM_CGAVD_KILO_UL); + if ((uni_th * DPP_TM_CGAVD_KILO_UL) > 0x1fffffff) { + uni_th = 0x1fffffff; + } else { + uni_th = (uni_th * DPP_TM_CGAVD_KILO_UL); + } + + rc = dpp_tm_cgavd_cfg_mode_get(dev, &cgavd_cfg_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_cgavd_cfg_mode_get"); + + if (cgavd_cfg_mode == DPP_TM_CGAVD_BLOCK_MODE) { + rc = dpp_tm_cfgmt_blk_size_get(dev, &blk_size); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_cfgmt_blk_size_get"); + + if (blk_size != 0) { + blk_th = (uni_th / blk_size); + blk_th = (uni_th % blk_size == 0) ? (blk_th) : + ((blk_th) + 1); + } + + rc = dpp_tm_cgavd_uniform_byte_block_th_set(dev, blk_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT( + DEV_ID(dev), rc, + "dpp_tm_cgavd_uniform_byte_block_th_set"); + } else if (cgavd_cfg_mode == DPP_TM_CGAVD_ZXIC_UINT8_MODE) { + rc = dpp_tm_cgavd_uniform_byte_block_th_set(dev, uni_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT( + DEV_ID(dev), rc, + "dpp_tm_cgavd_uniform_byte_block_th_set"); + } else { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), "dpp_tm_cgavd_uniform_th_set err!!\n"); + return DPP_ERR; + } + + return DPP_OK; +} + +/***********************************************************/ +/** 读取通用门限 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_byte_block_uni_th 通用门限值block/byte单位 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/08/01 +************************************************************/ +DPP_STATUS +dpp_tm_cgavd_uniform_byte_block_th_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_byte_block_uni_th) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_UNIFORM_TD_TH_T uniform_block_th = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_byte_block_uni_th); + + rc = dpp_reg_read(dev, ETM_CGAVD_UNIFORM_TD_THr, 0, 0, + &uniform_block_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_byte_block_uni_th = uniform_block_th.uniform_td_th; + + return DPP_OK; +} + +/***********************************************************/ +/** 读取通用门限 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_uni_th 通用门限值kbyte单位 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/08/01 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_uniform_th_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_uni_th) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 blk_size = 0; + ZXIC_UINT32 cgavd_cfg_mode = 0; + ZXIC_UINT32 block_byte_th = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_uni_th); + + rc = dpp_tm_cgavd_uniform_byte_block_th_get(dev, &block_byte_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT( + DEV_ID(dev), rc, "dpp_tm_cgavd_uniform_byte_block_th_get"); + + rc = dpp_tm_cgavd_cfg_mode_get(dev, &cgavd_cfg_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_cgavd_cfg_mode_get"); + + if (cgavd_cfg_mode == DPP_TM_CGAVD_BLOCK_MODE) { + rc = dpp_tm_cfgmt_blk_size_get(dev, &blk_size); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_cfgmt_blk_size_get"); + + *p_uni_th = (block_byte_th * blk_size) / DPP_TM_CGAVD_KILO_UL; + + } else if (cgavd_cfg_mode == DPP_TM_CGAVD_ZXIC_UINT8_MODE) { + *p_uni_th = (block_byte_th / DPP_TM_CGAVD_KILO_UL); + } else { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), "dpp_tm_cgavd_uniform_th_get err!!\n"); + return DPP_ERR; + } + + return DPP_OK; +} + +/***********************************************************/ +/** 配置流队列所属优先级 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param q_id 队列号 +* @param pri 配置的优先级,0~4 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/17 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_q_pri_set(DPP_DEV_T *dev, ZXIC_UINT32 q_id, + ZXIC_UINT32 pri) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_Q_PRI_T q_pri = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), pri, 0, 4); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), q_id, 0, + DPP_ETM_Q_NUM - 1); + + q_pri.qpri_flow_cfg_din = pri; + rc = dpp_reg_write(dev, ETM_CGAVD_Q_PRIr, 0, q_id, &q_pri); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置TM模式下流队列挂接的端口号;SA模式下流队列映射的目的芯片ID +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param q_id 队列号 +* @param pp_id 配置的端口号 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/18 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_q_map_pp_set(DPP_DEV_T *dev, ZXIC_UINT32 q_id, + ZXIC_UINT32 pp_id) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_PP_NUM_T pp_num = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), q_id, 0, + DPP_ETM_Q_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), pp_id, 0, + DPP_TM_PP_NUM - 1); + + pp_num.pp_num = pp_id; + rc = dpp_reg_write(dev, ETM_CGAVD_PP_NUMr, 0, q_id, &pp_num); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取TM模式下流队列挂接的端口号;SA模式下流队列映射的目的芯片ID +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param q_id 队列号 +* @param p_pp_id 读取的端口号 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/18 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_q_map_pp_get(DPP_DEV_T *dev, ZXIC_UINT32 q_id, + ZXIC_UINT32 *p_pp_id) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_PP_NUM_T pp_num = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_pp_id); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), q_id, 0, + DPP_ETM_Q_NUM - 1); + + *p_pp_id = 0xffffffff; + rc = dpp_reg_read(dev, ETM_CGAVD_PP_NUMr, 0, q_id, &pp_num); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_pp_id = pp_num.pp_num; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置TM模式tc到flow的映射 +* @param dev_id 设备编号 +* @param tc_id itmd tc优先级(0~7) +* @param flow_id 映射的flowid号 (0~4095) +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author sun @date 2023/07/04 +************************************************************/ +DPP_STATUS dpp_tm_tc_map_flow_set(DPP_DEV_T *dev, ZXIC_UINT32 tc_id, + ZXIC_UINT32 flow_id) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_CFG_TC_FLOWID_DAT_T cfg_tc_flow = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), tc_id, 0, + DPP_TM_TC_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), flow_id, 0, + DPP_ETM_Q_NUM - 1); + + cfg_tc_flow.cfg_tc_flowid_dat = flow_id; + rc = dpp_reg_write(dev, ETM_CGAVD_CFG_TC_FLOWID_DATr, 0, tc_id, + &cfg_tc_flow); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取配置TM模式tc到flow的映射 +* @param dev_id 设备编号 +* @param tc_id itmd tc优先级(0~7) +* @param flow_id 读取映射的flowid号 (0~4095) +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author sun @date 2023/07/04 +************************************************************/ +DPP_STATUS dpp_tm_tc_map_flow_get(DPP_DEV_T *dev, ZXIC_UINT32 tc_id, + ZXIC_UINT32 *flow_id) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_CFG_TC_FLOWID_DAT_T cfg_tc_flow = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), flow_id); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), tc_id, 0, + DPP_TM_TC_NUM - 1); + + *flow_id = 0xffffffff; + rc = dpp_reg_read(dev, ETM_CGAVD_CFG_TC_FLOWID_DATr, 0, tc_id, + &cfg_tc_flow); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + *flow_id = cfg_tc_flow.cfg_tc_flowid_dat; + + return DPP_OK; +} + +#if 0 +/***********************************************************/ +/** 系统级缓存使用上下限阈值配置 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param th_h: 系统级缓存使用上限阈值 +* @param th_l: 系统级缓存使用下限阈值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/07/03 +************************************************************/ +DPP_STATUS dpp_tm_sys_window_th_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 th_h, + ZXIC_UINT32 th_l) +{ + DPP_STATUS rc = DPP_OK; + + DPP_ETM_CGAVD_SYS_WINDOW_TH_H_T sys_window_th_h = {0}; + DPP_ETM_CGAVD_SYS_WINDOW_TH_L_T sys_window_th_l = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, th_h, 0, 0x1fffffff); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, th_l, 0, 0x1fffffff); + + if (th_l > th_h) { + ZXIC_COMM_PRINT("input error th_l > th_h"); + return DPP_ERR; + } + + sys_window_th_h.sys_window_th_h = th_h; + sys_window_th_l.sys_window_th_l = th_l; + + rc = dpp_reg_write(dev_id, + ETM_CGAVD_SYS_WINDOW_TH_Hr, + 0, + 0, + &sys_window_th_h); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_CGAVD_SYS_WINDOW_TH_Lr, + 0, + 0, + &sys_window_th_l); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +#endif +/***********************************************************/ +/** 配置QMU查询队列Qos开关 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param q_id: 队列号 +* @param qos_sign: qos开关 0:关闭 1:开启 +* @param +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/07/03 +************************************************************/ +DPP_STATUS dpp_tm_qmu_qos_sign_set(DPP_DEV_T *dev, ZXIC_UINT32 q_id, + ZXIC_UINT32 qos_sign) +{ + DPP_STATUS rc = DPP_OK; + + DPP_ETM_CGAVD_QOS_SIGN_T qmu_qos_sign = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), qos_sign, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), q_id, 0, + DPP_ETM_Q_NUM - 1); + + qmu_qos_sign.qos_sign_flow_cfg_din = qos_sign; + + rc = dpp_reg_write(dev, ETM_CGAVD_QOS_SIGNr, 0, q_id, &qmu_qos_sign); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +#if 0 +/***********************************************************/ +/** 配置cgavd强制反压 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param cgavd_fc: 0:不强制反压 1:强制反压 +* @param +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/07/03 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_cfg_fc_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 cgavd_fc) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_CGAVD_CFG_FC_T cgavd_cfg_fc_t = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, cgavd_fc, 0, 1); + + cgavd_cfg_fc_t.cgavd_cfg_fc = cgavd_fc; + + rc = dpp_reg_write(dev_id, + ETM_CGAVD_CGAVD_CFG_FCr, + 0, + 0, + &cgavd_cfg_fc_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取cgavd强制反压状态 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param cgavd_fc: 0:不强制反压 1:强制反压 +* @param +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/07/03 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_cfg_fc_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *cgavd_fc) + +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_CGAVD_CFG_FC_T cgavd_cfg_fc_t = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, cgavd_fc); + + rc = dpp_reg_read(dev_id, + ETM_CGAVD_CGAVD_CFG_FCr, + 0, + 0, + &cgavd_cfg_fc_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + *cgavd_fc = cgavd_cfg_fc_t.cgavd_cfg_fc; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置cgavd强制不反压 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param cgavd_no_fc: 0:不强制 1:强制不反压 +* @param +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/07/03 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_cfg_no_fc_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 cgavd_no_fc) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_CGAVD_CFG_NO_FC_T cgavd_cfg_no_fc_t = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, cgavd_no_fc, 0, 1); + + cgavd_cfg_no_fc_t.cgavd_cfg_no_fc = cgavd_no_fc; + + rc = dpp_reg_write(dev_id, + ETM_CGAVD_CGAVD_CFG_NO_FCr, + 0, + 0, + &cgavd_cfg_no_fc_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取cgavd强制不反压状态 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param cgavd_no_fc: 0:不强制 1:强制不反压 +* @param +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/07/03 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_cfg_no_fc_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *cgavd_no_fc) + +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_CGAVD_CFG_NO_FC_T cgavd_cfg_no_fc_t = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, cgavd_no_fc); + + rc = dpp_reg_read(dev_id, + ETM_CGAVD_CGAVD_CFG_NO_FCr, + 0, + 0, + &cgavd_cfg_no_fc_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + *cgavd_no_fc = cgavd_cfg_no_fc_t.cgavd_cfg_no_fc; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置cgavd平均队列深度归零 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param en: 0:关闭 1:使能 +* @param +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/08/05 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_avg_qlen_return_zero_en_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_AVG_QLEN_RETURN_ZERO_EN_T return_zero_en = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, en, 0, 1); + + return_zero_en.avg_qlen_return_zero_en = en; + + rc = dpp_reg_write(dev_id, + ETM_CGAVD_AVG_QLEN_RETURN_ZERO_ENr, + 0, + 0, + &return_zero_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +#endif +#endif + +#if ZXIC_REAL("TM_QMU") +#if 0 +/***********************************************************/ +/** QMU MMU 配置清除 +* @param dev_id +* @param tm_type +* +* @return +* @remark 无 +* @see +* @author XXX @date 2020/04/13 +************************************************************/ +DPP_STATUS dpp_tm_qmu_mmu_cfg_clr(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CFGMT_CFGMT_DDR_ATTACH_T attach = {0}; + + ZXIC_UINT32 bdep[64] = {0}; + ZXIC_UINT32 bhead[64] = {0}; + ZXIC_UINT32 btail[64] = {0}; + ZXIC_UINT32 ddr_in_mmu[8] = {0}; + ZXIC_UINT32 ddr_in_qmu[10] = {0}; + ZXIC_UINT32 bank_to_mmu[64] = {0}; + ZXIC_UINT32 bank_to_qmu[80] = {0}; + ZXIC_UINT32 active[16] = {0}; + ZXIC_UINT32 random_grp[64] = {0}; + ZXIC_UINT32 random_ddr0[64] = {0}; + ZXIC_UINT32 random_ddr1[64] = {0}; + ZXIC_UINT32 random_ddr2[64] = {0}; + ZXIC_UINT32 random_ddr3[64] = {0}; + ZXIC_UINT32 random_ddr4[64] = {0}; + ZXIC_UINT32 random_ddr5[64] = {0}; + ZXIC_UINT32 random_ddr6[64] = {0}; + ZXIC_UINT32 random_ddr7[64] = {0}; + //ZXIC_UINT32 mmu_addr[128] = {0}; + + ZXIC_UINT32 i = 0; + + ZXIC_UINT32 reg_attach = 0; + ZXIC_UINT32 reg_ddr_in_mmu = 0; + ZXIC_UINT32 reg_ddr_in_qmu = 0; + ZXIC_UINT32 reg_bank_to_mmu = 0; + ZXIC_UINT32 reg_bank_to_qmu = 0; + ZXIC_UINT32 reg_bdep = 0; + ZXIC_UINT32 reg_bhead = 0; + ZXIC_UINT32 reg_btail = 0; + ZXIC_UINT32 reg_active = 0; + ZXIC_UINT32 reg_random_grp = 0; + ZXIC_UINT32 reg_random_ddr0 = 0; + ZXIC_UINT32 reg_random_ddr1 = 0; + ZXIC_UINT32 reg_random_ddr2 = 0; + ZXIC_UINT32 reg_random_ddr3 = 0; + ZXIC_UINT32 reg_random_ddr4 = 0; + ZXIC_UINT32 reg_random_ddr5 = 0; + ZXIC_UINT32 reg_random_ddr6 = 0; + ZXIC_UINT32 reg_random_ddr7 = 0; + //ZXIC_UINT32 reg_mmu_addr = 0; + + reg_attach = ETM_CFGMT_CFGMT_DDR_ATTACHr; + reg_ddr_in_mmu = ETM_QMU_CFGMT_DDR_IN_MMU_CFGr; + reg_ddr_in_qmu = ETM_QMU_CFGMT_DDR_IN_QMU_CFGr; + reg_bank_to_mmu = ETM_QMU_CFGMT_BANK_TO_MMU_CFGr; + reg_bank_to_qmu = ETM_QMU_CFGMT_BANK_TO_QMU_CFGr; + reg_bdep = ETM_QMU_QCFG_QLIST_BDEPr; + reg_bhead = ETM_QMU_QCFG_QLIST_BHEADr; + reg_btail = ETM_QMU_QCFG_QLIST_BTAILr; + reg_active = ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFGr; + reg_random_grp = ETM_QMU_QCFG_QLIST_GRPr; + reg_random_ddr0 = ETM_QMU_QCFG_QLIST_GRP0_BANKr; + reg_random_ddr1 = ETM_QMU_QCFG_QLIST_GRP1_BANKr; + reg_random_ddr2 = ETM_QMU_QCFG_QLIST_GRP2_BANKr; + reg_random_ddr3 = ETM_QMU_QCFG_QLIST_GRP3_BANKr; + reg_random_ddr4 = ETM_QMU_QCFG_QLIST_GRP4_BANKr; + reg_random_ddr5 = ETM_QMU_QCFG_QLIST_GRP5_BANKr; + reg_random_ddr6 = ETM_QMU_QCFG_QLIST_GRP6_BANKr; + reg_random_ddr7 = ETM_QMU_QCFG_QLIST_GRP7_BANKr; + + rc = dpp_reg_write(dev_id, reg_attach, 0, 0, &attach); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + for (i = 0; i < 8; i++) { + rc = dpp_reg_write32_bymn(dev_id, reg_ddr_in_mmu, 0, i, ddr_in_mmu[i]); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write32_bymn"); + } + + for (i = 0; i < 10; i++) { + rc = dpp_reg_write32_bymn(dev_id, reg_ddr_in_qmu, 0, i, ddr_in_qmu[i]); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write32_bymn"); + } + + for (i = 0; i < 64; i++) { + rc = dpp_reg_write32_bymn(dev_id, reg_bank_to_mmu, 0, i, bank_to_mmu[i]); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write32_bymn"); + } + + for (i = 0; i < 80; i++) { + rc = dpp_reg_write32_bymn(dev_id, reg_bank_to_qmu, 0, i, bank_to_qmu[i]); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write32_bymn"); + } + + for (i = 0; i < 64; i++) { + rc = dpp_reg_write32_bymn(dev_id, reg_bhead, 0, i, bhead[i]); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write32_bymn"); + + rc = dpp_reg_write32_bymn(dev_id, reg_btail, 0, i, btail[i]); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write32_bymn"); + + rc = dpp_reg_write32_bymn(dev_id, reg_bdep, 0, i, bdep[i]); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write32_bymn"); + } + + for (i = 0; i < 16; i++) { + rc = dpp_reg_write32_bymn(dev_id, reg_active, 0, i, active[i]); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write32_bymn"); + } + + for (i = 0; i < 64; i++) { + rc = dpp_reg_write32_bymn(dev_id, reg_random_grp, 0, i, random_grp[i]); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write32_bymn"); + + rc = dpp_reg_write32_bymn(dev_id, reg_random_ddr0, 0, i, random_ddr0[i]); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write32_bymn"); + + rc = dpp_reg_write32_bymn(dev_id, reg_random_ddr1, 0, i, random_ddr1[i]); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write32_bymn"); + + rc = dpp_reg_write32_bymn(dev_id, reg_random_ddr2, 0, i, random_ddr2[i]); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write32_bymn"); + + rc = dpp_reg_write32_bymn(dev_id, reg_random_ddr3, 0, i, random_ddr3[i]); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write32_bymn"); + + rc = dpp_reg_write32_bymn(dev_id, reg_random_ddr4, 0, i, random_ddr4[i]); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write32_bymn"); + + rc = dpp_reg_write32_bymn(dev_id, reg_random_ddr5, 0, i, random_ddr5[i]); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write32_bymn"); + + rc = dpp_reg_write32_bymn(dev_id, reg_random_ddr6, 0, i, random_ddr6[i]); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write32_bymn"); + + rc = dpp_reg_write32_bymn(dev_id, reg_random_ddr7, 0, i, random_ddr7[i]); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write32_bymn"); + } + + /*for (i = 0; i < 128; i++) + { + rc = dpp_reg_write32_bymn(dev_id, reg_mmu_addr + i, 0, 0, mmu_addr[i]); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write32_bymn"); + }*/ + + return rc; +} + +/***********************************************************/ +/** 配置QMU队列授权价值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param credit_value 授权价值,默认值是533Byte +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/04 +************************************************************/ +DPP_STATUS dpp_tm_qmu_credit_value_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 credit_value) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QCFG_QSCH_CREDIT_VALUE_T credit_val = {0}; + + credit_val.qcfg_qsch_credit_value = credit_value; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QSCH_CREDIT_VALUEr, + 0, + 0, + &credit_val); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +#endif +/***********************************************************/ +/** 读取QMU队列授权价值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_credit_value 授权价值,默认值是533Byte +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/04/14 +************************************************************/ +DPP_STATUS dpp_tm_qmu_credit_value_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_credit_value) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QCFG_QSCH_CREDIT_VALUE_T credit_val = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_credit_value); + + *p_credit_value = 0; + + rc = dpp_reg_read(dev, ETM_QMU_QCFG_QSCH_CREDIT_VALUEr, 0, 0, + &credit_val); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_credit_value = credit_val.qcfg_qsch_credit_value; + + return DPP_OK; +} + +#if 0 +/***********************************************************/ +/** 配置授权盈余初始化值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param crbal_initial_value 授权盈余初始化值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/04 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crbal_initial_value_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 crbal_initial_value) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QCFG_QSCH_CRBAL_INIT_VALUE_T crbal_init_val = {0}; + + crbal_init_val.qcfg_qsch_crbal_init_value = crbal_initial_value; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QSCH_CRBAL_INIT_VALUEr, + 0, + 0, + &crbal_init_val); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + + +/***********************************************************/ +/** 配置CRS过滤使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param en 配置的值,0-不使能过滤,1-使能过滤 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/04 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crs_filter_en_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QCFG_QSCH_CRS_FILTER_T crs_filter_en = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, en, 0, 1); + + crs_filter_en.qcfg_qsch_crs_filter = en; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QSCH_CRS_FILTERr, + 0, + 0, + &crs_filter_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置CRS发送强制使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param en 配置的值,0-不使能,1-使能 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/04 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crs_force_en_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QCFG_QSCH_CRS_FORCE_EN_T crs_force_en = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, en, 0, 1); + + crs_force_en.qcfg_qsch_crs_force_en = en; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QSCH_CRS_FORCE_ENr, + 0, + 0, + &crs_force_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置CRS发送强制的队列 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param q_id 队列号 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/04 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crs_force_q_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 q_id) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QCFG_QSCH_CRS_FORCE_QNUM_T crs_force_q = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, q_id, 0, DPP_ETM_Q_NUM - 1); + + crs_force_q.qcfg_qsch_crs_force_qnum = q_id; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QSCH_CRS_FORCE_QNUMr, + 0, + 0, + &crs_force_q); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + + +/***********************************************************/ +/** 配置CRS发送强置的状态 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param crs_state CRS发送强置的状态 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/18 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crs_force_state_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 crs_state) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QCFG_QSCH_CRS_FORCE_CRS_T crs_force_crs = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, crs_state, 0, 1); + + crs_force_crs.qcfg_qsch_crs_force_crs = crs_state; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QSCH_CRS_FORCE_CRSr, + 0, + 0, + &crs_force_crs); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置特定队列发送特定CRS +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param qnum 队列号 +* qcfg_qsch_crs_force_crs:CRS状态(0:off;1:normal。) + qcfg_qsch_crs_force_en:CRS发送强置使能(0:不使能;1:使能。) +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/04 +************************************************************/ +DPP_STATUS dpp_tm_qmu_qnum_crs_force(ZXIC_UINT32 dev_id, + ZXIC_UINT32 qnum, + ZXIC_UINT32 qcfg_qsch_crs_force_crs, + ZXIC_UINT32 qcfg_qsch_crs_force_en) + +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, qnum, 0, DPP_ETM_Q_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, qcfg_qsch_crs_force_en, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, qcfg_qsch_crs_force_crs, 0, 1); + + rc = dpp_tm_qmu_crs_force_q_set(dev_id, qnum); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_crs_force_q_set"); + + rc = dpp_tm_qmu_crs_force_state_set(dev_id, qcfg_qsch_crs_force_crs); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_crs_force_state_set"); + + rc = dpp_tm_qmu_crs_force_en_set(dev_id, qcfg_qsch_crs_force_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_crs_force_en_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置QMU空闲链表:TM独享模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param ddr_num ddr组数,1-8组 +* @param bank_vld bank有效信号 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/06/07 +************************************************************/ +DPP_STATUS dpp_tm_qmu_qlist_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 ddr_num, + ZXIC_UINT32 bank_num_para, + ZXIC_UINT32 bank_vld, + ZXIC_UINT32 gene_para) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 k = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + ZXIC_UINT32 reg_index = 0; + ZXIC_UINT32 bdep_reg_index = 0; + ZXIC_UINT32 bhead_reg_index = 0; + ZXIC_UINT32 btail_reg_index = 0; + ZXIC_UINT32 qmu_cfgmt_ddr_in_mmu_index = 0; + ZXIC_UINT32 qmu_cfgmt_ddr_in_qmu_index = 0; + ZXIC_UINT32 qmu_cfgmt_bank_to_mmu_index = 0; + ZXIC_UINT32 qmu_cfgmt_bank_to_qmu_index = 0; + ZXIC_UINT32 qmu_cfgmt_active_to_bank_index = 0; + ZXIC_UINT32 qmu_qcfg_qlist_grp0_bank_index = 0; + ZXIC_UINT32 qmu_qcfg_qlist_grp1_bank_index = 0; + ZXIC_UINT32 qlist_grp0_bank_data[8] = {1, 2, 3, 4, 5, 6, 7, 0}; + DPP_ETM_QMU_QCFG_QLIST_BDEP_T qlist_bdep = {0}; + DPP_ETM_QMU_QCFG_QLIST_BHEAD_T qlist_bhead = {0}; + DPP_ETM_QMU_QCFG_QLIST_BTAIL_T qlist_btail = {0}; + DPP_ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFG_T active_to_bank_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_MMU_CFG_T ddr_in_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_QMU_CFG_T ddr_in_qmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_MMU_CFG_T bank_to_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_QMU_CFG_T bank_to_qmu_cfg = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP0_BANK_T qlist_grp0_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP1_BANK_T qlist_grp1_bank = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, ddr_num, 1, 8); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, bank_num_para, 1, 8); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, bank_vld, 0, 1); + + bdep_reg_index = ETM_QMU_QCFG_QLIST_BDEPr; + bhead_reg_index = ETM_QMU_QCFG_QLIST_BHEADr; + btail_reg_index = ETM_QMU_QCFG_QLIST_BTAILr; + qmu_cfgmt_ddr_in_mmu_index = ETM_QMU_CFGMT_DDR_IN_MMU_CFGr; + qmu_cfgmt_ddr_in_qmu_index = ETM_QMU_CFGMT_DDR_IN_QMU_CFGr; + qmu_cfgmt_bank_to_mmu_index = ETM_QMU_CFGMT_BANK_TO_MMU_CFGr; + qmu_cfgmt_bank_to_qmu_index = ETM_QMU_CFGMT_BANK_TO_QMU_CFGr; + qmu_cfgmt_active_to_bank_index = ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFGr; + qmu_qcfg_qlist_grp0_bank_index = ETM_QMU_QCFG_QLIST_GRP0_BANKr; + qmu_qcfg_qlist_grp1_bank_index = ETM_QMU_QCFG_QLIST_GRP1_BANKr; + + /* cfgmt配置ddr_num组ddr */ + rc = dpp_tm_cfgmt_ddr_attach_set(dev_id, ddr_num); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_ddr_attach_set"); + + /* ddr组映射: qmu <--> mmu: ddr0~1 映射 0~1 */ + for (i = 0; i < ddr_num; i++) { + /* qmu --> mmu */ + ddr_in_mmu_cfg.cfgmt_ddr_in_mmu_cfg = i; + rc = dpp_reg_write(dev_id, + qmu_cfgmt_ddr_in_mmu_index, + 0, + i, + &ddr_in_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* mmu --> qmu */ + ddr_in_qmu_cfg.cfgmt_ddr_in_qmu_cfg = i; + rc = dpp_reg_write(dev_id, + qmu_cfgmt_ddr_in_qmu_index, + 0, + i, + &ddr_in_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + + /* qmu链表首尾指针及bank深度配置 */ + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "++++++dpp_tm_qmu_qlist_set starting++++++\n"); + + for (i = 0; i < ddr_num; i++) { + for (j = 0; j < bank_num_para; j++) { + reg_index = (i * 8 + j); + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "++++++ reg_index=%d ++++++\n", reg_index); + + qlist_bdep.qcfg_qlist_bdep = gene_para; + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "++++++ qlist_bdep.qcfg_qlist_bdep=0x%x ++++++\n", qlist_bdep.qcfg_qlist_bdep); + rc = dpp_reg_write(dev_id, + bdep_reg_index, + 0, + reg_index, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + qlist_bhead.bank_vld = bank_vld; + qlist_bhead.qcfg_qlist_bhead = (gene_para * (i * bank_num_para + j)); + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "++++++ qlist_bhead.bank_vld=0x%x ++++++\n", qlist_bhead.bank_vld); + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "++++++ qlist_bhead.qcfg_qlist_bhead=0x%x ++++++\n", qlist_bhead.qcfg_qlist_bhead); + rc = dpp_reg_write(dev_id, + bhead_reg_index, + 0, + reg_index, + &qlist_bhead); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, gene_para, ((i * bank_num_para + j) + 1)); + ZXIC_COMM_CHECK_DEV_INDEX_SUB_OVERFLOW_NO_ASSERT(dev_id, (gene_para * ((i * bank_num_para + j) + 1)), 1); + qlist_btail.qcfg_qlist_btail = ((gene_para * ((i * bank_num_para + j) + 1)) - 1); + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "++++++ qlist_btail.qcfg_qlist_btail=0x%x ++++++\n", qlist_btail.qcfg_qlist_btail); + rc = dpp_reg_write(dev_id, + btail_reg_index, + 0, + reg_index, + &qlist_btail); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "++++++ j=%d ++++++\n", j); + + if (j != 8) { + for (k = bank_num_para; k < 8; k++) { + reg_index = (i * 8 + k); + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "++++++ reg_index=%d ++++++\n", reg_index); + qlist_bhead.bank_vld = 0; + qlist_bhead.qcfg_qlist_bhead = 0; + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "++++++ qlist_bhead.bank_vld=0x%x ++++++\n", qlist_bhead.bank_vld); + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "++++++ qlist_bhead.qcfg_qlist_bhead=0x%x ++++++\n", qlist_bhead.qcfg_qlist_bhead); + rc = dpp_reg_write(dev_id, + bhead_reg_index, + 0, + reg_index, + &qlist_bhead); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + } + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "++++++ i=%d ++++++\n", i); + + if (i != 8) { + for (k = ddr_num; k < 8; k++) { + for (j = 0; j < 8; j++) { + reg_index = (k * 8 + j); + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "++++++ reg_index=%d ++++++\n", reg_index); + qlist_bhead.bank_vld = 0; + qlist_bhead.qcfg_qlist_bhead = 0; + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "++++++ qlist_bhead.bank_vld=0x%x ++++++\n", qlist_bhead.bank_vld); + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "++++++ qlist_bhead.qcfg_qlist_bhead=0x%x ++++++\n", qlist_bhead.qcfg_qlist_bhead); + rc = dpp_reg_write(dev_id, + bhead_reg_index, + 0, + reg_index, + &qlist_bhead); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + } + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "++++++dpp_tm_qmu_qlist_set end++++++\n"); + + /*** bank号映射: + qmu-->mmu: bank0~7 映射 bank0~7 + mmu-->qmu: bank0~7 映射 bank0~7 *****/ + for (j = 0; j < ddr_num; j++) { + for (i = 0; i < bank_num_para; i++) { + /* qmu-->mmu: bank0~7 映射 bank0~7 */ + k = j * 8 + i; + bank_to_mmu_cfg.cfgmt_bank_in_mmu_cfg = i; + rc = dpp_reg_write(dev_id, + qmu_cfgmt_bank_to_mmu_index, + 0, + k, + &bank_to_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* mmu-->qmu: bank0~7 映射 bank0~7 */ + bank_to_qmu_cfg.cfgmt_bank_in_qmu_cfg = i; + + rc = dpp_reg_write(dev_id, + qmu_cfgmt_bank_to_qmu_index, + 0, + k, + &bank_to_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* random映射ram */ + for (j = 0; j < ddr_num; j++) { + for (i = 0; i < bank_num_para; i++) { + k = j * bank_num_para + i; + + active_to_bank_cfg.cfgmt_active_to_bank_cfg = i; + + rc = dpp_reg_write(dev_id, + qmu_cfgmt_active_to_bank_index, + 0, + k, + &active_to_bank_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* 随机表配置,决定每组bank的轮询顺序 */ + for (j = 0; j < ddr_num; j++) { + for (i = 0; i < bank_num_para; i++) { + k = j * 8 + i; + + qlist_grp0_bank.qcfg_qlist_grp0_bank_wr = qlist_grp0_bank_data[i]; + + rc = dpp_reg_write(dev_id, + qmu_qcfg_qlist_grp0_bank_index, + 0, + k, + &qlist_grp0_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (j = 0; j < ddr_num; j++) { + for (i = 0; i < bank_num_para; i++) { + k = j * 8 + i; + + qlist_grp1_bank.qcfg_qlist_grp1_bank_wr = qlist_grp0_bank_data[i]; + + rc = dpp_reg_write(dev_id, + qmu_qcfg_qlist_grp1_bank_index, + 0, + k, + &qlist_grp1_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* qmu配置完成 */ + rc = dpp_tm_qmu_cfg_done_set(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_cfg_done_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** QMU DDR随机模式时,DDR随机组配置 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param ddr_num ddr组数,1-6组 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/04 +************************************************************/ +DPP_STATUS dpp_tm_qmu_ddr_rand_grp_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 ddr_num) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 bank_no[6][16] = {{0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7}, + {0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15}, + {0, 8, 16, 1, 9, 17, 2, 10, 18, 3, 11, 19, 4, 12, 20, 5}, + {0, 8, 16, 24, 1, 9, 17, 25, 2, 10, 18, 26, 3, 11, 19, 27}, + {0, 8, 16, 24, 32, 1, 9, 17, 25, 33, 2, 10, 18, 26, 34, 3}, + {0, 8, 16, 24, 32, 40, 1, 9, 17, 25, 33, 41, 2, 10, 18, 26} + }; + + DPP_ETM_QMU_QCFG_QLIST_GRP_T grp = {0}; + DPP_ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFG_T bank = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, ddr_num, 1, 6); + + for (i = 0; i < 64; i++) { + grp.qcfg_qlist_grp_wr = i % ddr_num; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRPr, + 0, + i, + &grp); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + for (i = 0; i < 16; i++) { + bank.cfgmt_active_to_bank_cfg = bank_no[ddr_num - 1][i]; + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFGr, + 0, + i, + &bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 配置QMU DDR BANK随机模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param ddr_random 模式:0-轮询模式;1-随机模式 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/04 +************************************************************/ +DPP_STATUS dpp_tm_qmu_ddr_random_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 ddr_random) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QCFG_QLIST_DDR_RANDOM_T ddr_rand = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, ddr_random, 0, 1); + + ddr_rand.qcfg_qlist_ddr_random = ddr_random; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_DDR_RANDOMr, + 0, + 0, + &ddr_rand); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置QMU DDR BANK随机模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_ddr_random 模式:0-轮询模式;1-随机模式 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/04 +************************************************************/ +DPP_STATUS dpp_tm_qmu_ddr_random_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_ddr_random) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QCFG_QLIST_DDR_RANDOM_T ddr_rand = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_ddr_random); + + rc = dpp_reg_read(dev_id, + ETM_QMU_QCFG_QLIST_DDR_RANDOMr, + 0, + 0, + &ddr_rand); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + *p_ddr_random = ddr_rand.qcfg_qlist_ddr_random; + + return DPP_OK; +} + +/***********************************************************/ +/** QMU配置完成寄存器,在QMU链表和DDR随机模式寄存器写入后,将此寄存器写1,完成QMU配置 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/05/08 +************************************************************/ +DPP_STATUS dpp_tm_qmu_cfg_done_set(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QCFG_QLIST_CFG_DONE_T cfg_done = {0}; + + cfg_done.qcfg_qlist_cfg_done = 1; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_CFG_DONEr, + 0, + 0, + &cfg_done); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置CRS的e桶产生的crbal门限值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param index crs组数:0~15 +* @param crs_th CRS产生的crbal门限值 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author xuhb @date 2021/02/14 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crs_eir_th_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 index, + ZXIC_UINT32 crs_th) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QCFG_QSCH_CRS_EIR_TH_T crs_eir_th = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, index, 0, 0xf); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, crs_th, 0, 0x3ffff); + + crs_eir_th.qcfg_qsch_crs_eir_th = crs_th; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QSCH_CRS_EIR_THr, + 0, + index, + &crs_eir_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置CRS产生的crbal门限值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param index crs组数:0~15 +* @param crs_th CRS产生的crbal门限值 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/11 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crs_th_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 index, + ZXIC_UINT32 crs_th) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QCFG_QSCH_CRS_TH1_T crs_th1 = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, index, 0, 0xf); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, crs_th, 0, 0xffffffff); + + crs_th1.qcfg_qsch_crs_th1 = crs_th; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QSCH_CRS_TH1r, + 0, + index, + &crs_th1); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置CRS产生的空队列确保门限值 +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param que_type 队列类型编号(0~15) +* @param empty_que_ack_th 空队列确保授权门限 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/09 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crs_th2_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 que_type, + ZXIC_UINT32 empty_que_ack_th) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QCFG_QSCH_CRS_TH2_T crs_th2 = {0}; + ZXIC_UINT32 rem_bit_sum = 4; + ZXIC_UINT32 rem = 0; + ZXIC_UINT32 exp = 0; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, que_type, 0, 15); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, empty_que_ack_th, 0, 0x78000); + + rc = dpp_tm_rem_and_exp_translate(empty_que_ack_th, + rem_bit_sum, + &rem, + &exp); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + crs_th2.qcfg_qsch_crs_th2 = (rem << 4) + (exp & 0xf); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QSCH_CRS_TH2r, + 0, + que_type, + &crs_th2); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置CRS发送的速率 +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param sent_cyc CRS发送的间隔(单位:时钟周期) +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author szq @date 2015/03/25 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crs_sent_rate_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 sent_cyc) +{ + DPP_STATUS rc = DPP_OK; + + DPP_ETM_QMU_CFGMT_CRS_INTERVAL_T crs_interval = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, sent_cyc, 1, 0xffffffff); + + crs_interval.cfgmt_crs_interval = sent_cyc; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_CRS_INTERVALr, + 0, + 0, + &crs_interval); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取CRS发送的速率 +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param p_sent_cyc CRS发送的间隔(单位:时钟周期) +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author szq @date 2015/03/25 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crs_sent_rate_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_sent_cyc) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_CFGMT_CRS_INTERVAL_T crs_interval = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_sent_cyc); + + *p_sent_cyc = 0; + + rc = dpp_reg_read(dev_id, + ETM_QMU_CFGMT_CRS_INTERVALr, + 0, + 0, + &crs_interval); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + *p_sent_cyc = crs_interval.cfgmt_crs_interval; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置QMU端口间交织模式 +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param pkt_blk_mode 交织模式: 1-按包交织; 0-按block交织 SA模式只能配置为1 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author szq @date 2015/03/25 +************************************************************/ +DPP_STATUS dpp_tm_qmu_pkt_blk_mode_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 pkt_blk_mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QCFG_CSW_PKT_BLK_MODE_T csw_pkt_blk_mode = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, pkt_blk_mode, 0, 1); + + csw_pkt_blk_mode.qcfg_csw_pkt_blk_mode = pkt_blk_mode; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_CSW_PKT_BLK_MODEr, + 0, + 0, + &csw_pkt_blk_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取QMU端口间交织模式 +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param p_pkt_blk_mode 交织模式: 0-按包交织 ; 1-按block交织SA模式只能配置为1 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author szq @date 2015/03/25 +************************************************************/ +DPP_STATUS dpp_tm_qmu_pkt_blk_mode_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_pkt_blk_mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QCFG_CSW_PKT_BLK_MODE_T csw_pkt_blk_mode = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_pkt_blk_mode); + + *p_pkt_blk_mode = 0; + + rc = dpp_reg_read(dev_id, + ETM_QMU_QCFG_CSW_PKT_BLK_MODEr, + 0, + 0, + &csw_pkt_blk_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + *p_pkt_blk_mode = csw_pkt_blk_mode.qcfg_csw_pkt_blk_mode; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置SA模式下各个版本的授权价值 +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param sa_ver_id 版本号(0~7) +* @param sa_credit_value 授权价值 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author szq @date 2015/03/25 +************************************************************/ +DPP_STATUS dpp_tm_qmu_sa_credit_value_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 sa_ver_id, + ZXIC_UINT32 sa_credit_value) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_0_T qmu_sa_credit_value = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, sa_ver_id, 0, 7); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, sa_credit_value, 0, 0x3ffff); + + qmu_sa_credit_value.cfg_qsch_sa_credit_value_0 = sa_credit_value; + rc = dpp_reg_write(dev_id, + ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_0r + sa_ver_id, + 0, + 0, + &qmu_sa_credit_value); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取SA模式下各个版本的授权价值 +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param sa_ver_id 版本号(0~7) +* @param p_sa_credit_value 授权价值 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author szq @date 2015/03/25 +************************************************************/ +DPP_STATUS dpp_tm_qmu_sa_credit_value_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 sa_ver_id, + ZXIC_UINT32 *p_sa_credit_value) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_0_T qmu_sa_credit_value = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, sa_ver_id, 0, 7); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_sa_credit_value); + + *p_sa_credit_value = 0; + + rc = dpp_reg_read(dev_id, + ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_0r + sa_ver_id, + 0, + 0, + &qmu_sa_credit_value); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + *p_sa_credit_value = qmu_sa_credit_value.cfg_qsch_sa_credit_value_0; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置多播授权令牌添加个数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param token_add_num 令牌添加时,每次增加的令牌数目,取值范围为1~255,默认为1;禁止配置为0,配置为0时,将不会产生授权。 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/09 +************************************************************/ +DPP_STATUS dpp_tm_qmu_mul_token_gen_num_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 token_add_num) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_CFG_QSCH_MUL_TOKEN_GEN_NUM_T token_gen_num = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, token_add_num, 1, 255); + + token_gen_num.cfg_qsch_mul_token_gen_num = token_add_num; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFG_QSCH_MUL_TOKEN_GEN_NUMr, + 0, + 0, + &token_gen_num); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置多播授权整形桶参数和使能参数 +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param q3_lb_control_en 3号队列整形功能开启使能。0:关闭;1:开启。 +* @param q012_lb_control_en 0~2号队列整形功能开启使能。0:关闭;1:开启。 +* @param q3_lb_max_cnt 3号队列整形桶桶深。 +* @param q012_lb_max_cnt 0~2号队列整形桶桶深。 +* @param q3_lb_add_rate 3号队列令牌添加速率,时钟周期为单位。不可配置为0,配置为0整形使能时,不能产生队列3授权调度信号。 +* @param q012_lb_add_rate 0~2号队列令牌添加速率,以时钟周期单位。不可配置为0,配置为0并整形使能时,不能产生队列0、1、2授权调度信号。 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/09 +************************************************************/ +DPP_STATUS dpp_tm_qmu_mul_ack_lb_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 q3_lb_control_en, + ZXIC_UINT32 q012_lb_control_en, + ZXIC_UINT32 q3_lb_max_cnt, + ZXIC_UINT32 q012_lb_max_cnt, + ZXIC_UINT32 q3_lb_add_rate, + ZXIC_UINT32 q012_lb_add_rate) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 q3_crdt_lb_ctrl_en_reg_index = 0; + ZXIC_UINT32 q012_crdt_lb_ctrl_en_reg_index = 0; + ZXIC_UINT32 q3_crdt_lb_max_cnt_reg_index = 0; + ZXIC_UINT32 q012_crdt_lb_max_cnt_reg_index = 0; + ZXIC_UINT32 q3_crdt_lb_add_rate_reg_index = 0; + ZXIC_UINT32 q012_crdt_lb_add_rate_reg_index = 0; + DPP_ETM_QMU_CFG_QSCH_Q3_CREDIT_LB_CONTROL_EN_T q3_en = {0}; + DPP_ETM_QMU_CFG_QSCH_Q012_CREDIT_LB_CONTROL_EN_T q012_en = {0}; + DPP_ETM_QMU_CFG_QSCH_Q3CREDITLBMAXCNT_T q3_max_cnt = {0}; + DPP_ETM_QMU_CFG_QSCH_Q012CREDITLBMAXCNT_T q012_max_cnt = {0}; + DPP_ETM_QMU_CFG_QSCH_Q3LBADDRATE_T q3_rate = {0}; + DPP_ETM_QMU_CFG_QSCH_Q012LBADDRATE_T q012_rate = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NONE(dev_id, q3_lb_control_en, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NONE(dev_id, q012_lb_control_en, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NONE(dev_id, q3_lb_max_cnt, 0, 0xff); + ZXIC_COMM_CHECK_DEV_INDEX_NONE(dev_id, q012_lb_max_cnt, 0, 0xff); + ZXIC_COMM_CHECK_DEV_INDEX_NONE(dev_id, q3_lb_add_rate, 0, 0xfffffff); + ZXIC_COMM_CHECK_DEV_INDEX_NONE(dev_id, q012_lb_add_rate, 0, 0xfffffff); + + q3_crdt_lb_ctrl_en_reg_index = ETM_QMU_CFG_QSCH_Q3_CREDIT_LB_CONTROL_ENr; + q012_crdt_lb_ctrl_en_reg_index = ETM_QMU_CFG_QSCH_Q012_CREDIT_LB_CONTROL_ENr; + q3_crdt_lb_max_cnt_reg_index = ETM_QMU_CFG_QSCH_Q3CREDITLBMAXCNTr; + q012_crdt_lb_max_cnt_reg_index = ETM_QMU_CFG_QSCH_Q012CREDITLBMAXCNTr; + q3_crdt_lb_add_rate_reg_index = ETM_QMU_CFG_QSCH_Q3LBADDRATEr; + q012_crdt_lb_add_rate_reg_index = ETM_QMU_CFG_QSCH_Q012LBADDRATEr; + + q3_en.cfg_qsch_q3_credit_lb_control_en = q3_lb_control_en; + rc = dpp_reg_write(dev_id, + q3_crdt_lb_ctrl_en_reg_index, + 0, + 0, + &q3_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + q012_en.cfg_qsch_q012_credit_lb_control_en = q012_lb_control_en; + rc = dpp_reg_write(dev_id, + q012_crdt_lb_ctrl_en_reg_index, + 0, + 0, + &q012_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + q3_max_cnt.cfg_qsch_q3creditlbmaxcnt = q3_lb_max_cnt; + rc = dpp_reg_write(dev_id, + q3_crdt_lb_max_cnt_reg_index, + 0, + 0, + &q3_max_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + q012_max_cnt.cfg_qsch_q012creditlbmaxcnt = q012_lb_max_cnt; + rc = dpp_reg_write(dev_id, + q012_crdt_lb_max_cnt_reg_index, + 0, + 0, + &q012_max_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + q3_rate.cfg_qsch_q3lbaddrate = q3_lb_add_rate; + rc = dpp_reg_write(dev_id, + q3_crdt_lb_add_rate_reg_index, + 0, + 0, + &q3_rate); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + q012_rate.cfg_qsch_q012lbaddrate = q012_lb_add_rate; + rc = dpp_reg_write(dev_id, + q012_crdt_lb_add_rate_reg_index, + 0, + 0, + &q012_rate); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置0,1号队列挂接1或2号MCN漏桶信息 +* @param tm_type 0-ETM,1-FTM +* @param dev_id 设备索引编号 +* @param mcn_lb_sel 0:0,1号队列挂接1号MCN漏桶 1:0,1号队列挂接2号MCN漏桶 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/09 +************************************************************/ +DPP_STATUS dpp_tm_qmu_mcn_lb_sel_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 mcn_lb_sel) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_CFG_QSCH_Q01_ATTACH_EN_T q01_attach_en = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, mcn_lb_sel, 0, 1); + + q01_attach_en.cfg_qsch_q01_attach_en = mcn_lb_sel; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFG_QSCH_Q01_ATTACH_ENr, + 0, + 0, + &q01_attach_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置多播队列0~2的授权输出SP、DWRR +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param sp_or_dwrr SP、DWRR模式选择。0:SP;1:DWRR。 +* @param dwrr_w0 0号队列DWRR权重(0~127) +* @param dwrr_w1 1号队列DWRR权重(0~127) +* @param dwrr_w2 2号队列DWRR权重(0~127) +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/09 +************************************************************/ +DPP_STATUS dpp_tm_qmu_mul_sp_dwrr_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 sp_or_dwrr, + ZXIC_UINT32 dwrr_w0, + ZXIC_UINT32 dwrr_w1, + ZXIC_UINT32 dwrr_w2) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_CFG_QSCH_SP_DWRR_EN_T sp_dwrr_en = {0}; + DPP_ETM_QMU_CFG_QSCH_W0_T qsch_w0 = {0}; + DPP_ETM_QMU_CFG_QSCH_W1_T qsch_w1 = {0}; + DPP_ETM_QMU_CFG_QSCH_W2_T qsch_w2 = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, sp_or_dwrr, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dwrr_w0, 0, 0x7f); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dwrr_w1, 0, 0x7f); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dwrr_w2, 0, 0x7f); + + sp_dwrr_en.cfg_qsch_sp_dwrr_en = sp_or_dwrr; + rc = dpp_reg_write(dev_id, + ETM_QMU_CFG_QSCH_SP_DWRR_ENr, + 0, + 0, + &sp_dwrr_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + qsch_w0.cfg_qsch_w0 = dwrr_w0; + rc = dpp_reg_write(dev_id, + ETM_QMU_CFG_QSCH_W0r, + 0, + 0, + &qsch_w0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + qsch_w1.cfg_qsch_w1 = dwrr_w1; + rc = dpp_reg_write(dev_id, + ETM_QMU_CFG_QSCH_W1r, + 0, + 0, + &qsch_w1); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + qsch_w2.cfg_qsch_w2 = dwrr_w2; + rc = dpp_reg_write(dev_id, + ETM_QMU_CFG_QSCH_W2r, + 0, + 0, + &qsch_w2); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置分目的SA整形打开或关闭 +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param shap_en 分目的SA整形使能开关 0:表示关闭 1:表示打开 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/09 +************************************************************/ +DPP_STATUS dpp_tm_qmu_dest_sa_shap_en_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 shap_en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_CFGMT_QMU_SASHAP_EN_T sa_shap_en = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, shap_en, 0, 1); + + sa_shap_en.cfgmt_qmu_sashap_en = shap_en; + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_QMU_SASHAP_ENr, + 0, + 0, + &sa_shap_en); + + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置轮转扫描使能和扫描速率 +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param scan_en 轮转扫描使能。0:关闭,1:开启 +* @param scan_rate 轮转扫描速率,配置扫描周期不得少于256个周期 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/10 +************************************************************/ +DPP_STATUS dpp_tm_qmu_scan_rate_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 scan_en, + ZXIC_UINT32 scan_rate) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_CFG_QSCH_SCAN_EN_T qsch_scan_en = {0}; + DPP_ETM_QMU_CFG_QSCH_SCANRATE_T qsch_scanrate = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, scan_en, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, scan_rate, 0x100, 0xfffff); + + qsch_scan_en.cfg_qsch_scan_en = scan_en; + rc = dpp_reg_write(dev_id, + ETM_QMU_CFG_QSCH_SCAN_ENr, + 0, + 0, + &qsch_scan_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + qsch_scanrate.cfg_qsch_scanrate = scan_rate; + rc = dpp_reg_write(dev_id, + ETM_QMU_CFG_QSCH_SCANRATEr, + 0, + 0, + &qsch_scanrate); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获得轮转扫描使能和扫描速率 +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param scan_en 轮转扫描使能。0:关闭,1:开启 +* @param scan_rate 轮转扫描速率,配置扫描周期不得少于256个周期 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/10 +************************************************************/ +DPP_STATUS dpp_tm_qmu_scan_rate_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_scan_en, + ZXIC_UINT32 *p_scan_rate) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_CFG_QSCH_SCAN_EN_T qsch_scan_en = {0}; + DPP_ETM_QMU_CFG_QSCH_SCANRATE_T qsch_scanrate = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_scan_en); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_scan_rate); + + rc = dpp_reg_read(dev_id, + ETM_QMU_CFG_QSCH_SCAN_ENr, + 0, + 0, + &qsch_scan_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + *p_scan_en = qsch_scan_en.cfg_qsch_scan_en; + + rc = dpp_reg_read(dev_id, + ETM_QMU_CFG_QSCH_SCANRATEr, + 0, + 0, + &qsch_scanrate); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + *p_scan_rate = qsch_scanrate.cfg_qsch_scanrate; + + return DPP_OK; + +} + +/***********************************************************/ +/** 配置轮转扫描队列范围 +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param first_que 起始队列号 +* @param last_que 终止队列号 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author whuashan @date 2019/09/10 +************************************************************/ +DPP_STATUS dpp_tm_qmu_scan_que_range_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 first_que, + ZXIC_UINT32 last_que) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_CFG_QSCH_SCANFRSTQUE_T qsch_scanfirstque_t = {0}; + DPP_ETM_QMU_CFG_QSCH_SCANLASTQUE_T qsch_scanlastque_t = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, first_que, 0, DPP_ETM_Q_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, last_que, 0, DPP_ETM_Q_NUM - 1); + + qsch_scanfirstque_t.cfg_qsch_scanfrstque = first_que; + qsch_scanlastque_t.cfg_qsch_scanlastque = last_que; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFG_QSCH_SCANFRSTQUEr, + 0, + 0, + &qsch_scanfirstque_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFG_QSCH_SCANLASTQUEr, + 0, + 0, + &qsch_scanlastque_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取轮转扫描队列范围 +* @param dev_id 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* @param first_que 起始队列号 +* @param last_que 终止队列号 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author whuashan @date 2019/09/10 +************************************************************/ +DPP_STATUS dpp_tm_qmu_scan_que_range_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *first_que, + ZXIC_UINT32 *last_que) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_CFG_QSCH_SCANFRSTQUE_T qsch_scanfirstque_t = {0}; + DPP_ETM_QMU_CFG_QSCH_SCANLASTQUE_T qsch_scanlastque_t = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, first_que); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, last_que); + + rc = dpp_reg_read(dev_id, + ETM_QMU_CFG_QSCH_SCANFRSTQUEr, + 0, + 0, + &qsch_scanfirstque_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_CFG_QSCH_SCANLASTQUEr, + 0, + 0, + &qsch_scanlastque_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + *first_que = qsch_scanfirstque_t.cfg_qsch_scanfrstque; + *last_que = qsch_scanlastque_t.cfg_qsch_scanlastque; + + return DPP_OK; +} +#endif + +/***********************************************************/ +/** 配置读命令老化使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param aged_en 读命令老化使能:0:不使能;1:使能 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/11 +************************************************************/ +DPP_STATUS dpp_tm_qmu_wr_aged_en_set(DPP_DEV_T *dev, ZXIC_UINT32 aged_en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QCFG_CSCH_AGED_CFG_T aged_cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), aged_en, 0, 1); + + aged_cfg.qcfg_csch_aged_cfg = aged_en; + + rc = dpp_reg_write(dev, ETM_QMU_QCFG_CSCH_AGED_CFGr, 0, 0, &aged_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置读命令老化速率 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param scan_time 读命令老化速率(扫描间隔时间) +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/11 +************************************************************/ +DPP_STATUS dpp_tm_qmu_wr_aged_scan_time_set(DPP_DEV_T *dev, + ZXIC_UINT32 scan_time) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QCFG_CSCH_AGED_SCAN_TIME_T aged_scan_time = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), scan_time, 0, + 0xffffffff); + + aged_scan_time.qcfg_csch_aged_scan_time = scan_time; + + rc = dpp_reg_write(dev, ETM_QMU_QCFG_CSCH_AGED_SCAN_TIMEr, 0, 0, + &aged_scan_time); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获得读命令老化速率 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_scan_time 读命令老化速率(扫描间隔时间) +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/11 +************************************************************/ +DPP_STATUS dpp_tm_qmu_wr_aged_scan_time_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_scan_time) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QCFG_CSCH_AGED_SCAN_TIME_T aged_scan_time = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_scan_time); + + rc = dpp_reg_read(dev, ETM_QMU_QCFG_CSCH_AGED_SCAN_TIMEr, 0, 0, + &aged_scan_time); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_scan_time = aged_scan_time.qcfg_csch_aged_scan_time; + + return DPP_OK; +} + +/***********************************************************/ +/** 获取QMU清空状态 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_clr_done_flag 队列是否清空完成 +* +* @return +* @remark 无 +* @see +* @author szq @date 2015/05/21 +************************************************************/ +DPP_STATUS dpp_tm_qmu_qlist_qcfg_clr_done_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_clr_done_flag) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QLIST_QCFG_CLR_DONE_T clr_done_flag = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_clr_done_flag); + + rc = dpp_reg_read(dev, ETM_QMU_QLIST_QCFG_CLR_DONEr, 0, 0, + &clr_done_flag); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_clr_done_flag = clr_done_flag.qlist_qcfg_clr_done; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置qsch调度分端口整形速率和使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param shape_en 整形使能 +* @param token_add_num [23:12]:添加令牌数目 +* @param token_gap [11:0]:添加令牌间隔,其中实际间隔为配置间隔+1 +* @param token_depth 桶深,单位B,范围[0-0x1EE00] +*公式:(1000*8*token_num)/(gap+1) = X Mbps +* 主频= 600 MHz +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author xuhb 2020-5-15 +************************************************************/ +DPP_STATUS dpp_tm_qmu_qsch_port_shape_set(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + ZXIC_UINT32 token_add_num, + ZXIC_UINT32 token_gap, + ZXIC_UINT32 token_depth, + ZXIC_UINT32 shape_en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QCFG_QSCH_SHAP_PARAM_T qsch_shap_param = { 0 }; + DPP_ETM_QMU_QCFG_QSCH_SHAP_TOKEN_T qsch_shap_token_depth = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), port_id, 0, + DPP_TM_PP_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), shape_en, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), token_add_num, 0, + 0xfff); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), token_gap, 0, 0xfff); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), token_depth, 0, + 0x1f000); + + /* 配置整形桶深 */ + qsch_shap_token_depth.qcfg_qsch_shap_token = token_depth; + rc = dpp_reg_write(dev, ETM_QMU_QCFG_QSCH_SHAP_TOKENr, 0, port_id, + &qsch_shap_token_depth); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + qsch_shap_param.qcfg_qsch_shap_en = shape_en; + qsch_shap_param.qcfg_qsch_shap_param1 = token_add_num; + qsch_shap_param.qcfg_qsch_shap_param2 = token_gap; + rc = dpp_reg_write(dev, ETM_QMU_QCFG_QSCH_SHAP_PARAMr, 0, port_id, + &qsch_shap_param); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置CMD_SW分端口整形速率和使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param shape_en 整形使能 +* @param token_add_num [23:12]:添加令牌数目 +* @param token_gap [11:0]:添加令牌间隔,其中实际间隔为配置间隔+1 +* @param token_depth 桶深,单位B,范围[0-0x1EE00] +*公式:(1000*8*token_num)/(gap+1) = X Mbps +* 主频= 600 MHz +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author whuashan 2020-3-17 +************************************************************/ +DPP_STATUS dpp_tm_qmu_port_shape_set(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + ZXIC_UINT32 token_add_num, + ZXIC_UINT32 token_gap, + ZXIC_UINT32 token_depth, + ZXIC_UINT32 shape_en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QCFG_CSW_SHAP_PARAMETER_T csw_shap_param = { 0 }; + DPP_ETM_QMU_QCFG_CSW_SHAP_TOKEN_DEPTH_T csw_shap_token_depth = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), port_id, 0, + DPP_TM_PP_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), shape_en, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), token_add_num, 0, + 0xfff); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), token_gap, 0, 0xfff); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), token_depth, 0, + 0x1ee00); + + /* 配置整形桶深 */ + csw_shap_token_depth.qcfg_csw_shap_token_depth = token_depth; + rc = dpp_reg_write(dev, ETM_QMU_QCFG_CSW_SHAP_TOKEN_DEPTHr, 0, port_id, + &csw_shap_token_depth); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + csw_shap_param.qcfg_csw_shap_en = shape_en; + csw_shap_param.qcfg_csw_shap_parameter = (token_add_num << 12) | + token_gap; + rc = dpp_reg_write(dev, ETM_QMU_QCFG_CSW_SHAP_PARAMETERr, 0, port_id, + &csw_shap_param); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获得CMD_SW分端口整形速率和使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_shape_en 整形使能 +* @param p_token_add_num [23:12]:添加令牌数目 +* @param p_token_gap [11:0]:添加令牌间隔,其中实际间隔为配置间隔+1 +* @param p_token_depth 桶深,单位B +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author whuashan 2020-3-17 +************************************************************/ +DPP_STATUS dpp_tm_qmu_port_shape_get(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + ZXIC_UINT32 *p_token_add_num, + ZXIC_UINT32 *p_token_gap, + ZXIC_UINT32 *p_token_depth, + ZXIC_UINT32 *p_shape_en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QCFG_CSW_SHAP_PARAMETER_T csw_shap_param = { 0 }; + DPP_ETM_QMU_QCFG_CSW_SHAP_TOKEN_DEPTH_T csw_shap_token_depth = { 0 }; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), port_id, 0, + DPP_TM_PP_NUM - 1); + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_token_add_num); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_token_gap); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_token_depth); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_shape_en); + + rc = dpp_reg_read(dev, ETM_QMU_QCFG_CSW_SHAP_PARAMETERr, 0, port_id, + &csw_shap_param); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_shape_en = csw_shap_param.qcfg_csw_shap_en; + *p_token_add_num = (csw_shap_param.qcfg_csw_shap_parameter >> 12) & + 0xfff; + *p_token_gap = csw_shap_param.qcfg_csw_shap_parameter & 0xfff; + + rc = dpp_reg_read(dev, ETM_QMU_QCFG_CSW_SHAP_TOKEN_DEPTHr, 0, port_id, + &csw_shap_token_depth); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_token_depth = csw_shap_token_depth.qcfg_csw_shap_token_depth; + + return DPP_OK; +} + +#if 0 +/***********************************************************/ +/** 配置CMD_SW分端口(qmu出端口)整形速率和使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param shape_cir 整形值,单位Mbps,范围[0-160000] +* @param shape_cbs 桶深, 单位B,范围[0-0x1EE00] +* @param shape_en 整形使能 +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author whuashan 2020-3-17 +************************************************************/ +DPP_STATUS dpp_tm_qmu_egress_shape_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 port_id, + ZXIC_UINT32 shape_cir, + ZXIC_UINT32 shape_cbs, + ZXIC_UINT32 shape_en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QCFG_CSW_SHAP_PARAMETER_T csw_shap_param = {0}; + DPP_ETM_QMU_QCFG_CSW_SHAP_TOKEN_DEPTH_T csw_shap_token_depth = {0}; + QMU_PORT_SHAPE_PARA qmu_port_shape_para[100] = {{0}}; + + ZXIC_UINT32 token_add_num = 0; + ZXIC_UINT32 token_gap = 0; + ZXIC_UINT32 shape_value_amplified = 0; + ZXIC_UINT32 compare_value = 0; + ZXIC_UINT32 shape_para_cnt = 0; + ZXIC_UINT32 shape_para_final_cnt = 0; + ZXIC_UINT32 shape_min_value = 0; + ZXIC_UINT32 shape_min_value_num = 0; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, port_id, 0, DPP_TM_PP_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, shape_cir, 0, 400000); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, shape_cbs, 0, 0x1EE00); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, shape_en, 0, 1); + + /* 获取token_add_num、token_gap */ + for (token_gap = 10; token_gap <= 0xfff; token_gap++) { + for (token_add_num = 1; token_add_num <= 0xfff; token_add_num++) { + shape_value_amplified = (1000 * 8 * token_add_num) / (token_gap + 1); + compare_value = (shape_value_amplified - (shape_cir * DPP_TM_QMU_PORT_SHAP_MAG)); + + /* 0~20的范围,避免获取token_add_num、token_gap失败 */ + if ((compare_value > 0) && (compare_value < 20) && (shape_para_cnt < 100)) { + qmu_port_shape_para[shape_para_cnt].shape_value_amplified = shape_value_amplified; + qmu_port_shape_para[shape_para_cnt].token_add_num = token_add_num; + qmu_port_shape_para[shape_para_cnt].token_gap = token_gap; + shape_para_final_cnt = shape_para_cnt + 1; + shape_para_cnt++; + } + } + } + + /* 获取最小的整形值参数 */ + shape_min_value = qmu_port_shape_para[0].shape_value_amplified; + + for (shape_para_cnt = 0; shape_para_cnt < shape_para_final_cnt; shape_para_cnt++) { + if (shape_min_value > qmu_port_shape_para[shape_para_cnt].shape_value_amplified) { + shape_min_value = qmu_port_shape_para[shape_para_cnt].shape_value_amplified; + shape_min_value_num = shape_para_cnt; + } + } + + /* 配置整形桶深 */ + csw_shap_token_depth.qcfg_csw_shap_token_depth = shape_cbs; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_CSW_SHAP_TOKEN_DEPTHr, + 0, + port_id, + &csw_shap_token_depth); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* 配置整形速率 */ + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, shape_min_value_num, 0, 99); + csw_shap_param.qcfg_csw_shap_en = shape_en; + csw_shap_param.qcfg_csw_shap_parameter = (qmu_port_shape_para[shape_min_value_num].token_add_num << 12) | qmu_port_shape_para[shape_min_value_num].token_gap; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_CSW_SHAP_PARAMETERr, + 0, + port_id, + &csw_shap_param); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取CMD_SW分端口整形速率和使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param shape_vlue 整形值,单位Mbps +* @param shape_en 整形使能 +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author zmy @20151217 +************************************************************/ +DPP_STATUS dpp_tm_qmu_egress_shape_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 port_id, + ZXIC_UINT32 *shape_value, + ZXIC_UINT32 *shape_en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QCFG_CSW_SHAP_PARAMETER_T csw_shap_param = {0}; + ZXIC_UINT32 token_add_num = 0; + ZXIC_UINT32 token_gap = 0; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, port_id, 0, DPP_TM_PP_NUM - 1); + + rc = dpp_reg_read(dev_id, + ETM_QMU_QCFG_CSW_SHAP_PARAMETERr, + 0, + port_id, + &csw_shap_param); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + token_add_num = (csw_shap_param.qcfg_csw_shap_parameter >> 12) & 0xfff; + token_gap = csw_shap_param.qcfg_csw_shap_parameter & 0xfff; + + *shape_value = (((600 * 8 * token_add_num) / (token_gap + 1)) / DPP_TM_QMU_PORT_SHAP_MAG); + + *shape_en = csw_shap_param.qcfg_csw_shap_en; + + return DPP_OK; +} + +#endif +/***********************************************************/ +/** 配置需要检测的特定队列号 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param qnum 需要检测统计的特定的队列号 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/18 +************************************************************/ +DPP_STATUS dpp_tm_qmu_spec_qnum_set(DPP_DEV_T *dev, ZXIC_UINT32 qnum) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_OBSERVE_QNUM_SET_T observe_qnum = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), qnum, 0, + DPP_ETM_Q_NUM - 1); + + observe_qnum.observe_qnum_set = qnum; + rc = dpp_reg_write(dev, ETM_QMU_OBSERVE_QNUM_SETr, 0, 0, &observe_qnum); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获得特定的队列号 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_qnum 需要检测统计的特定的队列号 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/23 +************************************************************/ +DPP_STATUS dpp_tm_qmu_spec_qnum_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_qnum) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_OBSERVE_QNUM_SET_T observe_qnum = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_qnum); + + rc = dpp_reg_read(dev, ETM_QMU_OBSERVE_QNUM_SETr, 0, 0, &observe_qnum); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_qnum = observe_qnum.observe_qnum_set; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置需要检测的队列组 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param group_num 需要检测统计的特定的队列组。这里按取q的低3bit +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/18 +************************************************************/ +DPP_STATUS dpp_tm_qmu_spec_group_set(DPP_DEV_T *dev, ZXIC_UINT32 group_num) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_OBSERVE_BATCH_SET_T observe_batch = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), group_num, 0, 7); + + observe_batch.observe_batch_set = group_num; + rc = dpp_reg_write(dev, ETM_QMU_OBSERVE_BATCH_SETr, 0, 0, + &observe_batch); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获得需要检测的队列组 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_group_num 需要检测统计的特定的队列组。这里按取q的低3bit +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/23 +************************************************************/ +DPP_STATUS dpp_tm_qmu_spec_group_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_group_num) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_OBSERVE_BATCH_SET_T observe_batch = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_group_num); + + rc = dpp_reg_read(dev, ETM_QMU_OBSERVE_BATCH_SETr, 0, 0, + &observe_batch); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_group_num = observe_batch.observe_batch_set; + + return DPP_OK; +} + +#if 0 +/***********************************************************/ +/** 配置出队暂存使用的进程总数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param used_inall 出队暂存使用的进程总数=19-N,默认3表示使用16个进程 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/18 +************************************************************/ +DPP_STATUS dpp_tm_qmu_pid_use_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 used_inall) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_CFG_PID_USE_INALL_T pid_use_inall = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, used_inall, 0, 19); + + pid_use_inall.cfgmt_nod_rd_buf_0_aful_th = 19 - used_inall; + rc = dpp_reg_write(dev_id, + ETM_QMU_CFG_PID_USE_INALLr, + 0, + 0, + &pid_use_inall); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获得出队暂存使用的进程总数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_used_inall 出队暂存使用的进程总数=19-N,默认3表示使用16个进程 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/23 +************************************************************/ +DPP_STATUS dpp_tm_qmu_pid_use_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_used_inall) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_CFG_PID_USE_INALL_T pid_use_inall = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_used_inall); + + rc = dpp_reg_read(dev_id, + ETM_QMU_CFG_PID_USE_INALLr, + 0, + 0, + &pid_use_inall); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + *p_used_inall = 19 - pid_use_inall.cfgmt_nod_rd_buf_0_aful_th; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置出队暂存自回加进程总数阈值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param round_th 出队暂存自回加进程总数阈值=19-N,默认4表示使用15个进程就自回加 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/18 +************************************************************/ +DPP_STATUS dpp_tm_qmu_pid_round_th_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 round_th) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_CFG_PID_ROUND_TH_T pid_round_th = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_SUB_OVERFLOW_NO_ASSERT(dev_id, 19, round_th); + pid_round_th.cfgmt_nod_rd_buf_1_aful_th = 19 - round_th; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFG_PID_ROUND_THr, + 0, + 0, + &pid_round_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获得出队暂存自回加进程总数阈值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_round_th 出队暂存自回加进程总数阈值=19-N,默认4表示使用15个进程就自回加 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/23 +************************************************************/ + +DPP_STATUS dpp_tm_qmu_pid_round_th_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_round_th) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_CFG_PID_ROUND_TH_T pid_round_th = {0}; + + rc = dpp_reg_read(dev_id, + ETM_QMU_CFG_PID_ROUND_THr, + 0, + 0, + &pid_round_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + *p_round_th = 19 - pid_round_th.cfgmt_nod_rd_buf_1_aful_th; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置队列授权盈余 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param qnum 配置的队列号 +* @param value 授权盈余 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/18 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crbal_value_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 qnum, + ZXIC_UINT32 value) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QSCH_RW_CRBAL_T qsch_rw_crbal = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, value, 0, 0x1ffff); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, qnum, 0, DPP_ETM_Q_NUM - 1); + + qsch_rw_crbal.qsch_rw_crbal = value; + rc = dpp_reg_write(dev_id, + ETM_QMU_QSCH_RW_CRBALr, + 0, + qnum, + &qsch_rw_crbal); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获得队列授权盈余 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param qnum 配置的队列号 +* @param p_value 授权盈余 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/23 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crbal_value_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 qnum, + ZXIC_UINT32 *p_value) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QSCH_RW_CRBAL_T qsch_rw_crbal = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_value); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, qnum, 0, DPP_ETM_Q_NUM - 1); + + rc = dpp_reg_read(dev_id, + ETM_QMU_QSCH_RW_CRBALr, + 0, + qnum, + &qsch_rw_crbal); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + *p_value = qsch_rw_crbal.qsch_rw_crbal & 0x1ffff; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置分目的SA整形桶深上、下限参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param max_value 分目的SA整形桶深上限,必须配置为正值 +* @param min_value 分目的SA整形桶深下限,必须配置为负值 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/18 +************************************************************/ +DPP_STATUS dpp_tm_qmu_dest_sa_shape_para_set(ZXIC_UINT32 dev_id, + ZXIC_SINT32 max_value, + ZXIC_SINT32 min_value) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_CFGMT_SASHAP_TOKEN_MAX_T sashap_token_max = {0}; + DPP_ETM_QMU_CFGMT_SASHAP_TOKEN_MIN_T sashap_token_min = {0}; + + if (max_value < 0) { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "[dev_id %d] max_value < 0, err!!!\n", dev_id); + ZXIC_COMM_ASSERT(0); + return DPP_ERR; + } + + if (min_value > 0) { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "[dev_id %d] min_value > 0, err!!!\n", dev_id); + ZXIC_COMM_ASSERT(0); + return DPP_ERR; + } + + sashap_token_max.cfgmt_sashap_token_max = max_value; + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_SASHAP_TOKEN_MAXr, + 0, + 0, + &sashap_token_max); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + sashap_token_min.cfgmt_sashap_token_min = min_value; + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_SASHAP_TOKEN_MINr, + 0, + 0, + &sashap_token_min); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获得分目的SA整形桶深上、下限参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_max_value 分目的SA整形桶深上限,必须配置为正值 +* @param p_min_value 分目的SA整形桶深下限,必须配置为负值 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/23 +************************************************************/ +DPP_STATUS dpp_tm_qmu_dest_sa_shape_para_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_max_value, + ZXIC_UINT32 *p_min_value) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_CFGMT_SASHAP_TOKEN_MAX_T sashap_token_max = {0}; + DPP_ETM_QMU_CFGMT_SASHAP_TOKEN_MIN_T sashap_token_min = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_max_value); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_min_value); + + rc = dpp_reg_read(dev_id, + ETM_QMU_CFGMT_SASHAP_TOKEN_MAXr, + 0, + 0, + &sashap_token_max); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + *p_max_value = sashap_token_max.cfgmt_sashap_token_max; + + rc = dpp_reg_read(dev_id, + ETM_QMU_CFGMT_SASHAP_TOKEN_MINr, + 0, + 0, + &sashap_token_min); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + *p_min_value = sashap_token_min.cfgmt_sashap_token_min; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置CRS状态 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param qnum 队列号 +* @param state +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/19 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crs_state_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 qnum, + ZXIC_UINT32 state) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QSCH_RW_CRS_T qsch_rw_crs = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, state, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, qnum, 0, DPP_ETM_Q_NUM - 1); + + qsch_rw_crs.qsch_rw_crs = state; + rc = dpp_reg_write(dev_id, + ETM_QMU_QSCH_RW_CRSr, + 0, + qnum, + &qsch_rw_crs); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获得CRS状态 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param qnum 队列号 +* @param p_state +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/23 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crs_state_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 qnum, + ZXIC_UINT32 *p_state) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QSCH_RW_CRS_T qsch_rw_crs = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_state); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, qnum, 0, DPP_ETM_Q_NUM - 1); + + rc = dpp_reg_read(dev_id, + ETM_QMU_QSCH_RW_CRSr, + 0, + qnum, + &qsch_rw_crs); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + *p_state = qsch_rw_crs.qsch_rw_crs; + + return DPP_OK; +} + +#endif +/***********************************************************/ +/** 配置自动授权队列范围 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param first_que 自授权起始队列号 +* @param last_que 自授权终止队列号 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/19 +************************************************************/ +DPP_STATUS dpp_tm_qmu_auto_credit_que_set(DPP_DEV_T *dev, ZXIC_UINT32 first_que, + ZXIC_UINT32 last_que) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_CFG_QSCH_AUTOCRFRSTQUE_T qsch_autocrfrstque = { 0 }; + DPP_ETM_QMU_CFG_QSCH_AUTOCRLASTQUE_T qsch_autocrlastque = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), first_que, 0, + DPP_ETM_Q_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), last_que, 0, + DPP_ETM_Q_NUM - 1); + + if (first_que > last_que) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "[dev_id %d] first_que > last_que, err!!!\n", + DEV_ID(dev)); + + return DPP_ERR; + } + + qsch_autocrfrstque.cfg_qsch_autocrfrstque = first_que; + rc = dpp_reg_write(dev, ETM_QMU_CFG_QSCH_AUTOCRFRSTQUEr, 0, 0, + &qsch_autocrfrstque); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + qsch_autocrlastque.cfg_qsch_autocrlastque = last_que; + rc = dpp_reg_write(dev, ETM_QMU_CFG_QSCH_AUTOCRLASTQUEr, 0, 0, + &qsch_autocrlastque); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获得自动授权队列范围 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_first_que 自授权起始队列号 +* @param p_last_que 自授权终止队列号 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/23 +************************************************************/ +DPP_STATUS dpp_tm_qmu_auto_credit_que_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_first_que, + ZXIC_UINT32 *p_last_que) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_CFG_QSCH_AUTOCRFRSTQUE_T qsch_autocrfrstque = { 0 }; + DPP_ETM_QMU_CFG_QSCH_AUTOCRLASTQUE_T qsch_autocrlastque = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_first_que); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_last_que); + + rc = dpp_reg_read(dev, ETM_QMU_CFG_QSCH_AUTOCRFRSTQUEr, 0, 0, + &qsch_autocrfrstque); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + *p_first_que = qsch_autocrfrstque.cfg_qsch_autocrfrstque; + + rc = dpp_reg_read(dev, ETM_QMU_CFG_QSCH_AUTOCRLASTQUEr, 0, 0, + &qsch_autocrlastque); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + *p_last_que = qsch_autocrlastque.cfg_qsch_autocrlastque; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置自动授权开启使能及扫描速率 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param auto_crdt_en 自动授权开启使能,默认关闭。0:关闭;1:开启 +* @param auto_crdt_rate 自授权速率配置 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/19 +************************************************************/ +DPP_STATUS dpp_tm_qmu_auto_credit_rate_set(DPP_DEV_T *dev, + ZXIC_UINT32 auto_crdt_en, + ZXIC_UINT32 auto_crdt_rate) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_CFG_QSCH_AUTO_CREDIT_CONTROL_EN_T credit_control_en = { 0 }; + DPP_ETM_QMU_CFG_QSCH_AUTOCREDITRATE_T autocredit_rate = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), auto_crdt_en, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), auto_crdt_rate, 0, + 0xfffff); + + credit_control_en.cfg_qsch_auto_credit_control_en = auto_crdt_en; + rc = dpp_reg_write(dev, ETM_QMU_CFG_QSCH_AUTO_CREDIT_CONTROL_ENr, 0, 0, + &credit_control_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + autocredit_rate.cfg_qsch_autocreditrate = auto_crdt_rate; + rc = dpp_reg_write(dev, ETM_QMU_CFG_QSCH_AUTOCREDITRATEr, 0, 0, + &autocredit_rate); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获得自动授权开启使能及扫描速率 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_auto_crdt_en 自动授权开启使能,默认关闭。0:关闭;1:开启 +* @param p_auto_crdt_rate 自授权速率配置 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/23 +************************************************************/ +DPP_STATUS dpp_tm_qmu_auto_credit_rate_get(DPP_DEV_T *dev, + ZXIC_UINT32 *p_auto_crdt_en, + ZXIC_UINT32 *p_auto_crdt_rate) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_CFG_QSCH_AUTO_CREDIT_CONTROL_EN_T credit_control_en = { 0 }; + DPP_ETM_QMU_CFG_QSCH_AUTOCREDITRATE_T autocredit_rate = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_auto_crdt_en); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_auto_crdt_rate); + + rc = dpp_reg_read(dev, ETM_QMU_CFG_QSCH_AUTO_CREDIT_CONTROL_ENr, 0, 0, + &credit_control_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + *p_auto_crdt_en = credit_control_en.cfg_qsch_auto_credit_control_en; + + rc = dpp_reg_read(dev, ETM_QMU_CFG_QSCH_AUTOCREDITRATEr, 0, 0, + &autocredit_rate); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + *p_auto_crdt_rate = autocredit_rate.cfg_qsch_autocreditrate; + + return DPP_OK; +} + +#if 0 +/***********************************************************/ +/** 配置授权丢弃使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param all_drop_en 所有授权丢弃使能:1:允许丢弃所有授权;0:仅允许丢弃拥塞授权 +* @param drop_en 授权丢弃使能:1:允许丢弃授权;0:禁止丢弃授权 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/23 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crbal_drop_en_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 all_drop_en, + ZXIC_UINT32 drop_en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_CFGMT_QSCH_CRBAL_DROP_EN_T crbal_drop_en = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, all_drop_en, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, drop_en, 0, 1); + + crbal_drop_en.cfgmt_qsch_all_crbal_drop_en = all_drop_en; + crbal_drop_en.cfgmt_qsch_crbal_drop_en = drop_en; + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_QSCH_CRBAL_DROP_ENr, + 0, + 0, + &crbal_drop_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获得授权丢弃使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_all_drop_en 所有授权丢弃使能:1:允许丢弃所有授权;0:仅允许丢弃拥塞授权 +* @param p_drop_en 授权丢弃使能:1:允许丢弃授权;0:禁止丢弃授权 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/06/23 +************************************************************/ +DPP_STATUS dpp_tm_qmu_crbal_drop_en_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 *p_all_drop_en, + ZXIC_UINT32 *p_drop_en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_CFGMT_QSCH_CRBAL_DROP_EN_T crbal_drop_en = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_all_drop_en); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_drop_en); + + rc = dpp_reg_read(dev_id, + ETM_QMU_CFGMT_QSCH_CRBAL_DROP_ENr, + 0, + 0, + &crbal_drop_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + *p_all_drop_en = crbal_drop_en.cfgmt_qsch_all_crbal_drop_en; + *p_drop_en = crbal_drop_en.cfgmt_qsch_crbal_drop_en; + + return DPP_OK; +} + +/***********************************************************/ +/** 获取特定队列发送的crs normal的个数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param 注 须先设置统计的特定队列 +* @param +* @return +* @remark 无 +* @see +* @author yjd @date 2015/07/09 +************************************************************/ +DPP_STATUS dpp_tm_qmu_spec_q_crs_normal_cnt(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_que_crs_normal_cnt) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_SPEC_Q_CRS_NORMAL_CNT_T crs_normal_cnt; + + rc = dpp_reg_read(dev_id, + ETM_QMU_SPEC_Q_CRS_NORMAL_CNTr, + 0, + 0, + &crs_normal_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + *p_que_crs_normal_cnt = crs_normal_cnt.spec_q_crs_normal_cnt; + return DPP_OK; +} + +/***********************************************************/ +/** 获取特定队列发送的crs off的个数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param 注 须先设置统计的特定队列 +* @param +* @return +* @remark 无 +* @see +* @author yjd @date 2015/07/09 +************************************************************/ +DPP_STATUS dpp_tm_qmu_spec_q_crs_off_cnt(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_que_crs_off_cnt) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_SPEC_Q_CRS_OFF_CNT_T crs_off_cnt; + + rc = dpp_reg_read(dev_id, + ETM_QMU_SPEC_Q_CRS_OFF_CNTr, + 0, + 0, + &crs_off_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + *p_que_crs_off_cnt = crs_off_cnt.spec_q_crs_off_cnt; + return DPP_OK; +} + +#endif +/***********************************************************/ +/**设置自然拥塞反压门限值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param +* @return +* @remark 无 +* @see +* @author yjd @date 2015/07/16 +************************************************************/ +DPP_STATUS dpp_tm_qmu_qcfg_csch_congest_th_set(DPP_DEV_T *dev, + ZXIC_UINT32 port_id, + ZXIC_UINT32 qmu_congest_th) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QCFG_CSCH_CONGEST_TH_T qcfg_csch_congest_th = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), port_id, 0, + DPP_TM_PP_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), qmu_congest_th, 0, + 0x1ffff); + + qcfg_csch_congest_th.qcfg_csch_congest_th = qmu_congest_th; + rc = dpp_reg_write(dev, ETM_QMU_QCFG_CSCH_CONGEST_THr, 0, port_id, + &qcfg_csch_congest_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/**获取自然拥塞反压门限值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param +* @return +* @remark 无 +* @see +* @author yjd @date 2015/07/16 +************************************************************/ +DPP_STATUS dpp_tm_qmu_qcfg_csch_congest_th_get(DPP_DEV_T *dev, + ZXIC_UINT32 port_id, + ZXIC_UINT32 *p_qmu_congest_th) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QCFG_CSCH_CONGEST_TH_T qcfg_csch_congest_th = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_qmu_congest_th); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), port_id, 0, + DPP_TM_PP_NUM - 1); + + rc = dpp_reg_read(dev, ETM_QMU_QCFG_CSCH_CONGEST_THr, 0, port_id, + &qcfg_csch_congest_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_qmu_congest_th = qcfg_csch_congest_th.qcfg_csch_congest_th; + + return DPP_OK; +} + +/***********************************************************/ +/**设置CMD_SCH分优先级反压门限值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param +* @return +* @remark 无 +* @see +* @author yjd @date 2015/07/16 +************************************************************/ +DPP_STATUS dpp_tm_qmu_qcfg_csch_sp_fc_th_set(DPP_DEV_T *dev, + ZXIC_UINT32 port_id, + ZXIC_UINT32 q_pri, + ZXIC_UINT32 qmu_sp_fc_th) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 index = 0; + DPP_ETM_QMU_QCFG_CSCH_SP_FC_TH_T qcfg_csch_sp_fc_th = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), port_id, 0, + DPP_TM_PP_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), q_pri, 0, 4); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), qmu_sp_fc_th, 0, + 0x1ffff); + + index = port_id * 5 + q_pri; + qcfg_csch_sp_fc_th.qcfg_csch_sp_fc_th = qmu_sp_fc_th; + + rc = dpp_reg_write(dev, ETM_QMU_QCFG_CSCH_SP_FC_THr, 0, index, + &qcfg_csch_sp_fc_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/**获取自然拥塞反压门限值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param +* @return +* @remark 无 +* @see +* @author yjd @date 2015/07/16 +************************************************************/ +DPP_STATUS dpp_tm_qmu_qcfg_csch_sp_fc_th_get(DPP_DEV_T *dev, + ZXIC_UINT32 port_id, + ZXIC_UINT32 q_pri, + ZXIC_UINT32 *p_qmu_sp_fc_th) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 index = 0; + DPP_ETM_QMU_QCFG_CSCH_SP_FC_TH_T qcfg_csch_sp_fc_th = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), port_id, 0, + DPP_TM_PP_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), q_pri, 0, 4); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_qmu_sp_fc_th); + + index = port_id * 5 + q_pri; + + rc = dpp_reg_read(dev, ETM_QMU_QCFG_CSCH_SP_FC_THr, 0, index, + &qcfg_csch_sp_fc_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_qmu_sp_fc_th = qcfg_csch_sp_fc_th.qcfg_csch_sp_fc_th; + + return DPP_OK; +} + +#if 0 +/***********************************************************/ +/**每隔10s获取crs状态的个数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param +* @return +* @remark 无 +* @see +* @author zmy @date 2015/08/07 +************************************************************/ + +DPP_STATUS dpp_tm_crs_statics(ZXIC_UINT32 dev_id, ZXIC_UINT32 que_id) +{ + DPP_STATUS rc = DPP_OK; + DPP_TM_CNT_MODE_T que_get_mode = {0}; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 q_crs_normal_cnt = 0; + ZXIC_UINT32 q_crs_off_cnt = 0; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, que_id, 0, DPP_ETM_Q_NUM - 1); + + rc = dpp_tm_cfgmt_cnt_mode_get(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_get"); + que_get_mode.count_rd_mode = 1; + + rc = dpp_tm_cfgmt_cnt_mode_set(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_set"); + + rc = dpp_tm_qmu_spec_qnum_set(dev_id, que_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_spec_qnum_set"); + rc = dpp_tm_qmu_spec_q_crs_normal_cnt(dev_id, &q_crs_normal_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_spec_q_crs_normal_cnt"); + rc = dpp_tm_qmu_spec_q_crs_off_cnt(dev_id, &q_crs_off_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_spec_q_crs_off_cnt"); + + for (i = 0; i <= 2; i++) { + zxic_comm_sleep(10000); + rc = dpp_tm_qmu_spec_q_crs_normal_cnt(dev_id, &q_crs_normal_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_spec_q_crs_normal_cnt"); + ZXIC_COMM_PRINT("q_crs_normal_cnt is %d\n ", q_crs_normal_cnt); + + rc = dpp_tm_qmu_spec_q_crs_off_cnt(dev_id, &q_crs_off_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_spec_q_crs_off_cnt"); + ZXIC_COMM_PRINT("q_crs_off_cnt is %d\n ", q_crs_off_cnt); + } + + rc = dpp_tm_cfgmt_cnt_mode_get(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_get"); + que_get_mode.count_rd_mode = 0; + + rc = dpp_tm_cfgmt_cnt_mode_set(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_set"); + + return DPP_OK; +} + + +/***********************************************************/ +/** 统计QMU发送和CRDT模块指定授权流接收的CRS计数(10s内) +* @param dev_id 设备编号 +* @param que_id QMU队列号 +* @param ackflow_id 授权流号 +* @param valid_flag 0:队列发送和授权流接收都统计; 1:只关注队列发送,2:只关注授权流接收。 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2017/05/12 +************************************************************/ +DPP_STATUS dpp_tm_crs_cnt_prt(ZXIC_UINT32 dev_id, ZXIC_UINT32 que_id, ZXIC_UINT32 ackflow_id, ZXIC_UINT32 valid_flag) +{ + DPP_STATUS rc = DPP_OK; + DPP_TM_CNT_MODE_T que_get_mode = {0}; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 q_crs_normal_cnt = 0; + ZXIC_UINT32 q_crs_off_cnt = 0; + ZXIC_UINT32 crdt_crs_que_id_index = 0; + ZXIC_UINT32 all_crs_normal_cnt_index = 0; + ZXIC_UINT32 all_crs_off_cnt_index = 0; + ZXIC_UINT32 que_crs_normal_cnt_index = 0; + ZXIC_UINT32 que_crd_off_cnt_index = 0; + ZXIC_UINT32 crs_end_state_index = 0; + + + /* 结构体变量定义 */ + DPP_ETM_CRDT_CRS_QUE_ID_T crdt_crs_que_id = {0}; + DPP_ETM_CRDT_FIFO_OUT_ALL_CRS_NORMAL_CNT_T all_crs_normal_cnt = {0}; + DPP_ETM_CRDT_FIFO_OUT_ALL_CRS_OFF_CNT_T all_crs_off_cnt = {0}; + DPP_ETM_CRDT_FIFO_OUT_QUE_CRS_NORMAL_CNT_T que_crs_normal_cnt = {0}; + DPP_ETM_CRDT_FIFO_OUT_QUE_CRS_OFF_CNT_T que_crd_off_cnt = {0}; + DPP_ETM_CRDT_QMU_CRS_END_STATE_T crs_end_state = {0}; + + crdt_crs_que_id_index = ETM_CRDT_CRS_QUE_IDr; + all_crs_normal_cnt_index = ETM_CRDT_FIFO_OUT_ALL_CRS_NORMAL_CNTr; + all_crs_off_cnt_index = ETM_CRDT_FIFO_OUT_ALL_CRS_OFF_CNTr; + que_crs_normal_cnt_index = ETM_CRDT_FIFO_OUT_QUE_CRS_NORMAL_CNTr; + que_crd_off_cnt_index = ETM_CRDT_FIFO_OUT_QUE_CRS_OFF_CNTr; + crs_end_state_index = ETM_CRDT_QMU_CRS_END_STATEr; + + if (valid_flag == 0) { + /* 设置统计CRDT CRS 接收个数的队列号 */ + crdt_crs_que_id.crs_que_id = ackflow_id; + rc = dpp_reg_write(dev_id, + crdt_crs_que_id_index, + 0, + 0, + &crdt_crs_que_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* 1.先配置读清 */ + rc = dpp_tm_cfgmt_cnt_mode_get(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_get"); + que_get_mode.count_rd_mode = 1; + + rc = dpp_tm_cfgmt_cnt_mode_set(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_set"); + + /* 队列发送crs读清 */ + + rc = dpp_tm_qmu_spec_qnum_set(dev_id, que_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_spec_qnum_set"); + rc = dpp_tm_qmu_spec_q_crs_normal_cnt(dev_id, &q_crs_normal_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_spec_q_crs_normal_cnt"); + rc = dpp_tm_qmu_spec_q_crs_off_cnt(dev_id, &q_crs_off_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_spec_q_crs_off_cnt"); + + /* crdt接收crs读清 */ + rc = dpp_tm_crdt_clr_diag(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_crdt_clr_diag"); + + /* 2.连读两次统计CRS发送和接收 */ + for (i = 0; i < 2; i++) { + zxic_comm_sleep(10000); + + ZXIC_COMM_PRINT("------(%d th)qmu_send & crdt_recv crs_cnt in 10s------\n ", i + 1); + /* 统计CRS发送 */ + rc = dpp_tm_qmu_spec_q_crs_normal_cnt(dev_id, &q_crs_normal_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_spec_q_crs_normal_cnt"); + ZXIC_COMM_PRINT("que_id(0x%08x)qmu_send_crs_normal_cnt: 0x%08x\n ", que_id, q_crs_normal_cnt); + + rc = dpp_tm_qmu_spec_q_crs_off_cnt(dev_id, &q_crs_off_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_spec_q_crs_off_cnt"); + ZXIC_COMM_PRINT("que_id(0x%08x)qmu_send_crs_off_cnt: 0x%08x\n ", que_id, q_crs_off_cnt); + + /* 统计CRS接收 */ + + /* 统计CRDT接收到的CRS off总数 */ + rc = dpp_reg_read(dev_id, + all_crs_off_cnt_index, + 0, + 0, + &all_crs_off_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + /* 统计CRDT指定队列接收到的CRS off总数 */ + rc = dpp_reg_read(dev_id, + que_crd_off_cnt_index, + 0, + 0, + &que_crd_off_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + /* 统计CRDT接收到的CRS normal总数 */ + rc = dpp_reg_read(dev_id, + all_crs_normal_cnt_index, + 0, + 0, + &all_crs_normal_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + + /* 统计CRDT指定队列接收到的CRS normal总数 */ + rc = dpp_reg_read(dev_id, + que_crs_normal_cnt_index, + 0, + 0, + &que_crs_normal_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + + /* 统计CRDT指定队列接收到的crs最后的状态 */ + rc = dpp_reg_read(dev_id, + crs_end_state_index, + 0, + 0, + &crs_end_state); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + /* crdt接收crs读清 */ + rc = dpp_tm_crdt_clr_diag(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_crdt_clr_diag"); + + /* 打印统计信息 */ + ZXIC_COMM_PRINT("crdt_recv_all_crs_normal_cnt: 0x%08x\n ", all_crs_normal_cnt.fifo_out_all_crs_normal_cnt); + ZXIC_COMM_PRINT("crdt_recv_all_crs_off_cnt: 0x%08x\n ", all_crs_off_cnt.fifo_out_all_crs_off_cnt); + ZXIC_COMM_PRINT("ackflow_id(0x%08x)recv_crs_normal_cnt: 0x%08x\n ", ackflow_id, que_crs_normal_cnt.fifo_out_que_crs_normal_cnt); + ZXIC_COMM_PRINT("ackflow_id(0x%08x)recv_crs_off_cnt: 0x%08x\n ", ackflow_id, que_crd_off_cnt.fifo_out_que_crs_off_cnt); + ZXIC_COMM_PRINT("ackflow_id(0x%08x)recv_crs_end_state: %d\n ", ackflow_id, crs_end_state.qmu_crs_end_state); + } + + /* 3.配置成crs非读清 */ + rc = dpp_tm_cfgmt_cnt_mode_get(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_get"); + que_get_mode.count_rd_mode = 0; + + rc = dpp_tm_cfgmt_cnt_mode_set(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_set"); + } else if (1 == valid_flag) { + /* 1.先配置读清 */ + rc = dpp_tm_cfgmt_cnt_mode_get(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_get"); + que_get_mode.count_rd_mode = 1; + + rc = dpp_tm_cfgmt_cnt_mode_set(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_set"); + + /* 队列发送crs读清 */ + + rc = dpp_tm_qmu_spec_qnum_set(dev_id, que_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_spec_qnum_set"); + rc = dpp_tm_qmu_spec_q_crs_normal_cnt(dev_id, &q_crs_normal_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_spec_q_crs_normal_cnt"); + rc = dpp_tm_qmu_spec_q_crs_off_cnt(dev_id, &q_crs_off_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_spec_q_crs_off_cnt"); + + /* 2.连读两次统计CRS发送和接收 */ + for (i = 0; i < 2; i++) { + zxic_comm_sleep(10000); + + ZXIC_COMM_PRINT("------(%d th)qmu_send crs_cnt in 10s------\n ", i + 1); + /* 统计CRS发送 */ + rc = dpp_tm_qmu_spec_q_crs_normal_cnt(dev_id, &q_crs_normal_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_spec_q_crs_normal_cnt"); + ZXIC_COMM_PRINT("que_id(0x%08x)qmu_send_crs_normal_cnt: 0x%08x\n ", que_id, q_crs_normal_cnt); + + rc = dpp_tm_qmu_spec_q_crs_off_cnt(dev_id, &q_crs_off_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_spec_q_crs_off_cnt"); + ZXIC_COMM_PRINT("que_id(0x%08x)qmu_send_crs_off_cnt: 0x%08x\n ", que_id, q_crs_off_cnt); + } + + /* 3.配置成crs非读清 */ + rc = dpp_tm_cfgmt_cnt_mode_get(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_get"); + que_get_mode.count_rd_mode = 0; + + rc = dpp_tm_cfgmt_cnt_mode_set(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_set"); + } + + else if (valid_flag == 2) { + /* 设置统计CRDT CRS 接收个数的队列号 */ + crdt_crs_que_id.crs_que_id = ackflow_id; + rc = dpp_reg_write(dev_id, + crdt_crs_que_id_index, + 0, + 0, + &crdt_crs_que_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* 2.连读两次统计CRS发送和接收 */ + for (i = 0; i < 2; i++) { + /* crdt接收crs读清 */ + rc = dpp_tm_crdt_clr_diag(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_crdt_clr_diag"); + + zxic_comm_sleep(10000); + + ZXIC_COMM_PRINT("------(%d th)crdt_recv crs_cnt in 10s------\n ", i + 1); + + /* 统计CRS接收 */ + /* 统计CRDT指定队列接收到的CRS off总数 */ + rc = dpp_reg_read(dev_id, + que_crd_off_cnt_index, + 0, + 0, + &que_crd_off_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + /* 统计CRDT接收到的CRS off 总数 */ + rc = dpp_reg_read(dev_id, + all_crs_off_cnt_index, + 0, + 0, + &all_crs_off_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + /* 统计CRDT接收到的CRS normal总数 */ + rc = dpp_reg_read(dev_id, + all_crs_normal_cnt_index, + 0, + 0, + &all_crs_normal_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + + /* 统计CRDT指定队列接收到的CRS normal总数 */ + rc = dpp_reg_read(dev_id, + que_crs_normal_cnt_index, + 0, + 0, + &que_crs_normal_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + /* 统计CRDT指定队列接收到的crs最后的状态 */ + rc = dpp_reg_read(dev_id, + crs_end_state_index, + 0, + 0, + &crs_end_state); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + /* crdt接收crs读清 */ + rc = dpp_tm_crdt_clr_diag(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_crdt_clr_diag"); + /* 打印统计信息 */ + ZXIC_COMM_PRINT("crdt_recv_all_crs_normal_cnt: 0x%08x\n ", all_crs_normal_cnt.fifo_out_all_crs_normal_cnt); + ZXIC_COMM_PRINT("crdt_recv_all_crs_off_cnt: 0x%08x\n ", all_crs_off_cnt.fifo_out_all_crs_off_cnt); + ZXIC_COMM_PRINT("ackflow_id(0x%08x)recv_crs_normal_cnt: 0x%08x\n ", ackflow_id, que_crs_normal_cnt.fifo_out_que_crs_normal_cnt); + ZXIC_COMM_PRINT("ackflow_id(0x%08x)recv_crs_off_cnt: 0x%08x\n ", ackflow_id, que_crd_off_cnt.fifo_out_que_crs_off_cnt); + ZXIC_COMM_PRINT("ackflow_id(0x%08x)recv_crs_end_state: %d\n ", ackflow_id, crs_end_state.qmu_crs_end_state); + } + } else { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "dpp_tm_crs_cnt_prt:valid_flag_error!!: 0: print crs of que and ackflow;1:print que crs only; 2:print ackflow crs only!\n"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 带停流的统计QMU发送和CRDT模块指定授权流接收的CRS计数 +* @param dev_id 设备编号 +* @param que_id QMU队列号 +* @param ackflow_id 授权流号 +* @param valid_flag 0:默认队列发送和授权流接收都统计,此时队列授权都在本板; +* 1:只关注队列发送,2:只关注授权流接收,需要与源端队列停流配合使用, + 先停流,运行该函数;或者直接不停流得到的是某段时间的计数。 +* @param sleep_time 统计多长时间内的crs计数 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2017/05/12 +************************************************************/ +DPP_STATUS dpp_tm_crs_cnt_prt_1(ZXIC_UINT32 dev_id, + ZXIC_UINT32 que_id, + ZXIC_UINT32 ackflow_id, + ZXIC_UINT32 valid_flag, + ZXIC_UINT32 sleep_time) +{ + DPP_STATUS rc = DPP_OK; + DPP_TM_CNT_MODE_T que_get_mode = {0}; + ZXIC_UINT32 flow_td_th = 0; + ZXIC_UINT32 q_crs_normal_cnt = 0; + ZXIC_UINT32 q_crs_off_cnt = 0; + ZXIC_UINT32 crdt_crs_que_id_index = 0; + ZXIC_UINT32 all_crs_normal_cnt_index = 0; + ZXIC_UINT32 all_crs_off_cnt_index = 0; + ZXIC_UINT32 que_crs_normal_cnt_index = 0; + ZXIC_UINT32 que_crd_off_cnt_index = 0; + ZXIC_UINT32 crs_end_state_index = 0; + + /* 结构体变量定义 */ + DPP_ETM_CRDT_CRS_QUE_ID_T crdt_crs_que_id = {0}; + DPP_ETM_CRDT_FIFO_OUT_ALL_CRS_NORMAL_CNT_T all_crs_normal_cnt = {0}; + DPP_ETM_CRDT_FIFO_OUT_ALL_CRS_OFF_CNT_T all_crs_off_cnt = {0}; + DPP_ETM_CRDT_FIFO_OUT_QUE_CRS_NORMAL_CNT_T que_crs_normal_cnt = {0}; + DPP_ETM_CRDT_FIFO_OUT_QUE_CRS_OFF_CNT_T que_crd_off_cnt = {0}; + DPP_ETM_CRDT_QMU_CRS_END_STATE_T crs_end_state = {0}; + + crdt_crs_que_id_index = ETM_CRDT_CRS_QUE_IDr; + all_crs_normal_cnt_index = ETM_CRDT_FIFO_OUT_ALL_CRS_NORMAL_CNTr; + all_crs_off_cnt_index = ETM_CRDT_FIFO_OUT_ALL_CRS_OFF_CNTr; + que_crs_normal_cnt_index = ETM_CRDT_FIFO_OUT_QUE_CRS_NORMAL_CNTr; + que_crd_off_cnt_index = ETM_CRDT_FIFO_OUT_QUE_CRS_OFF_CNTr; + crs_end_state_index = ETM_CRDT_QMU_CRS_END_STATEr; + + rc = dpp_tm_cgavd_td_th_get(dev_id, QUEUE_LEVEL, que_id, &flow_td_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_td_th_get"); + + if (valid_flag == 0) { + /* 设置统计CRDT CRS 接收个数的队列号 */ + crdt_crs_que_id.crs_que_id = ackflow_id; + rc = dpp_reg_write(dev_id, + crdt_crs_que_id_index, + 0, + 0, + &crdt_crs_que_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_tm_cgavd_td_th_set(dev_id, QUEUE_LEVEL, que_id, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_td_th_set"); + + /* 1.停流配置读清 */ + rc = dpp_tm_cfgmt_cnt_mode_get(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_get"); + que_get_mode.count_rd_mode = 1; + + rc = dpp_tm_cfgmt_cnt_mode_set(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_set"); + + /* 队列发送crs读清 */ + + rc = dpp_tm_qmu_spec_qnum_set(dev_id, que_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_spec_qnum_set"); + rc = dpp_tm_qmu_spec_q_crs_normal_cnt(dev_id, &q_crs_normal_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_spec_q_crs_normal_cnt"); + rc = dpp_tm_qmu_spec_q_crs_off_cnt(dev_id, &q_crs_off_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_spec_q_crs_off_cnt"); + + /* crdt接收crs读清 */ + rc = dpp_tm_crdt_clr_diag(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_crdt_clr_diag"); + + /* 2.统计CRS发送和接收 */ + rc = dpp_tm_cgavd_td_th_set(dev_id, QUEUE_LEVEL, que_id, flow_td_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_td_th_set"); + + zxic_comm_sleep(sleep_time); + + rc = dpp_tm_qmu_spec_q_crs_off_cnt(dev_id, &q_crs_off_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_spec_q_crs_off_cnt"); + + /* 统计CRDT接收到的CRS off总数 */ + rc = dpp_reg_read(dev_id, + all_crs_off_cnt_index, + 0, + 0, + &all_crs_off_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + /* 统计CRDT指定队列接收到的CRS off总数 */ + rc = dpp_reg_read(dev_id, + que_crd_off_cnt_index, + 0, + 0, + &que_crd_off_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_tm_cgavd_td_th_set(dev_id, QUEUE_LEVEL, que_id, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_td_th_set"); + zxic_comm_sleep(1000); + + /* 统计CRS发送 */ + rc = dpp_tm_qmu_spec_q_crs_normal_cnt(dev_id, &q_crs_normal_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_spec_q_crs_normal_cnt"); + ZXIC_COMM_PRINT("que_id(0x%08x)qmu_send_crs_normal_cnt: 0x%08x\n ", que_id, q_crs_normal_cnt); + ZXIC_COMM_PRINT("que_id(0x%08x)qmu_send_crs_off_cnt: 0x%08x\n ", que_id, q_crs_off_cnt); + /* 统计CRS接收 */ + /* 统计CRDT接收到的CRS normal总数 */ + rc = dpp_reg_read(dev_id, + all_crs_normal_cnt_index, + 0, + 0, + &all_crs_normal_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + /* 统计CRDT指定队列接收到的CRS normal总数 */ + rc = dpp_reg_read(dev_id, + que_crs_normal_cnt_index, + 0, + 0, + &que_crs_normal_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + + /* 统计CRDT指定队列接收到的crs最后的状态 */ + rc = dpp_reg_read(dev_id, + crs_end_state_index, + 0, + 0, + &crs_end_state); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + /* crdt接收crs读清 */ + rc = dpp_tm_crdt_clr_diag(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_crdt_clr_diag"); + /* 打印统计信息 */ + ZXIC_COMM_PRINT("crdt_recv_all_crs_normal_cnt: 0x%08x\n", all_crs_normal_cnt.fifo_out_all_crs_normal_cnt); + ZXIC_COMM_PRINT("crdt_recv_all_crs_off_cnt: 0x%08x\n", all_crs_off_cnt.fifo_out_all_crs_off_cnt); + ZXIC_COMM_PRINT("ackflow_id(0x%08x)recv_crs_normal_cnt: 0x%08x\n", ackflow_id, que_crs_normal_cnt.fifo_out_que_crs_normal_cnt); + ZXIC_COMM_PRINT("ackflow_id(0x%08x)recv_crs_off_cnt: 0x%08x\n", ackflow_id, que_crd_off_cnt.fifo_out_que_crs_off_cnt); + ZXIC_COMM_PRINT("ackflow_id(0x%08x)recv_crs_end_state: %d\n", ackflow_id, crs_end_state.qmu_crs_end_state); + + + /* 4.配置成crs非读清 */ + rc = dpp_tm_cfgmt_cnt_mode_get(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_get"); + que_get_mode.count_rd_mode = 0; + + rc = dpp_tm_cfgmt_cnt_mode_set(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_set"); + + /* 5.恢复通流 */ + rc = dpp_tm_cgavd_td_th_set(dev_id, 0, que_id, flow_td_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_td_th_set"); + } else if (1 == valid_flag) { /* 仅统计停流清零再发流该段时间发送的CRS */ + rc = dpp_tm_cgavd_td_th_set(dev_id, QUEUE_LEVEL, que_id, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_td_th_set"); + + /* 1.停流配置读清 */ + rc = dpp_tm_cfgmt_cnt_mode_get(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_get"); + que_get_mode.count_rd_mode = 1; + + rc = dpp_tm_cfgmt_cnt_mode_set(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_set"); + + /* 队列发送crs读清 */ + + rc = dpp_tm_qmu_spec_qnum_set(dev_id, que_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_spec_qnum_set"); + rc = dpp_tm_qmu_spec_q_crs_normal_cnt(dev_id, &q_crs_normal_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_spec_q_crs_normal_cnt"); + rc = dpp_tm_qmu_spec_q_crs_off_cnt(dev_id, &q_crs_off_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_spec_q_crs_off_cnt"); + + /* 2.开流设定时间后停流统计CRS发送 */ + rc = dpp_tm_cgavd_td_th_set(dev_id, QUEUE_LEVEL, que_id, flow_td_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_td_th_set"); + + zxic_comm_sleep(sleep_time); + + rc = dpp_tm_qmu_spec_q_crs_off_cnt(dev_id, &q_crs_off_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_spec_q_crs_off_cnt"); + + rc = dpp_tm_cgavd_td_th_set(dev_id, QUEUE_LEVEL, que_id, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_td_th_set"); + zxic_comm_sleep(1000); + + /* 统计CRS发送 */ + rc = dpp_tm_qmu_spec_q_crs_normal_cnt(dev_id, &q_crs_normal_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_spec_q_crs_normal_cnt"); + ZXIC_COMM_PRINT("que_id(0x%08x)qmu_send_crs_normal_cnt: 0x%08x\n ", que_id, q_crs_normal_cnt); + ZXIC_COMM_PRINT("que_id(0x%08x)qmu_send_crs_off_cnt: 0x%08x\n ", que_id, q_crs_off_cnt); + + /* 3.配置成crs非读清 */ + rc = dpp_tm_cfgmt_cnt_mode_get(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_get"); + que_get_mode.count_rd_mode = 0; + + rc = dpp_tm_cfgmt_cnt_mode_set(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_set"); + + /* 4.恢复通流 */ + rc = dpp_tm_cgavd_td_th_set(dev_id, QUEUE_LEVEL, que_id, flow_td_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_td_th_set"); + } + + else if (valid_flag == 2) { /* 仅统计接收,需要源端停流之后运行。*/ + /* 设置统计CRDT CRS 接收个数的队列号 */ + crdt_crs_que_id.crs_que_id = ackflow_id; + rc = dpp_reg_write(dev_id, + crdt_crs_que_id_index, + 0, + 0, + &crdt_crs_que_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* 1.停流配置读清 */ + /* crdt接收crs读清 */ + rc = dpp_tm_crdt_clr_diag(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_crdt_clr_diag"); + + zxic_comm_sleep(sleep_time); + + /* 统计CRDT接收到的CRS off总数 */ + rc = dpp_reg_read(dev_id, + all_crs_off_cnt_index, + 0, + 0, + &all_crs_off_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + /* 统计CRDT指定队列接收到的CRS off总数 */ + rc = dpp_reg_read(dev_id, + que_crd_off_cnt_index, + 0, + 0, + &que_crd_off_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + /* 统计CRS接收 */ + /* 统计CRDT接收到的CRS normal总数 */ + rc = dpp_reg_read(dev_id, + all_crs_normal_cnt_index, + 0, + 0, + &all_crs_normal_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + /* 统计CRDT指定队列接收到的CRS normal总数 */ + rc = dpp_reg_read(dev_id, + que_crs_normal_cnt_index, + 0, + 0, + &que_crs_normal_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + + /* 统计CRDT指定队列接收到的crs最后的状态 */ + rc = dpp_reg_read(dev_id, + crs_end_state_index, + 0, + 0, + &crs_end_state); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + /* crdt接收crs读清 */ + rc = dpp_tm_crdt_clr_diag(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_crdt_clr_diag"); + /* 打印统计信息 */ + ZXIC_COMM_PRINT("crdt_recv_all_crs_normal_cnt: 0x%08x\n", all_crs_normal_cnt.fifo_out_all_crs_normal_cnt); + ZXIC_COMM_PRINT("crdt_recv_all_crs_off_cnt: 0x%08x\n", all_crs_off_cnt.fifo_out_all_crs_off_cnt); + ZXIC_COMM_PRINT("ackflow_id(0x%08x)recv_crs_normal_cnt: 0x%08x\n", ackflow_id, que_crs_normal_cnt.fifo_out_que_crs_normal_cnt); + ZXIC_COMM_PRINT("ackflow_id(0x%08x)recv_crs_off_cnt: 0x%08x\n", ackflow_id, que_crd_off_cnt.fifo_out_que_crs_off_cnt); + ZXIC_COMM_PRINT("ackflow_id(0x%08x)recv_crs_end_state: %d\n", ackflow_id, crs_end_state.qmu_crs_end_state); + + } else { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "dpp_tm_crs_cnt_prt_1:valid_flag_error!!: 0: print crs of que and ackflow;1:print que crs only; 2:print ackflow crs only\n"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 读取qlist入队及出队状态监控 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2015/08/26 +************************************************************/ +DPP_STATUS dpp_tm_qmu_qlist_state_query(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QCFG_QMU_QLIST_STATE_QUERY_T qcfg_qmu_qlist_state_query = {0}; + + rc = dpp_reg_read(dev_id, + ETM_QMU_QCFG_QMU_QLIST_STATE_QUERYr, + 0, + 0, + &qcfg_qmu_qlist_state_query); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + ZXIC_COMM_PRINT("pkt_age_req_fifo_afull : %d\n", qcfg_qmu_qlist_state_query.pkt_age_req_fifo_afull); + ZXIC_COMM_PRINT("rd_release_fwft_afull : %d\n", qcfg_qmu_qlist_state_query.rd_release_fwft_afull); + ZXIC_COMM_PRINT("drop_imem_fwft_afull : %d\n", qcfg_qmu_qlist_state_query.drop_imem_fwft_afull); + ZXIC_COMM_PRINT("pkt_age_req_fifo_empty : %d\n", qcfg_qmu_qlist_state_query.pkt_age_req_fifo_empty); + ZXIC_COMM_PRINT("rd_release_fwft_empty : %d\n", qcfg_qmu_qlist_state_query.rd_release_fwft_empty); + ZXIC_COMM_PRINT("drop_imem_fwft_empty : %d\n", qcfg_qmu_qlist_state_query.drop_imem_fwft_empty); + ZXIC_COMM_PRINT("mmu_qmu_sop_rd_rdy : %d\n", qcfg_qmu_qlist_state_query.mmu_qmu_sop_rd_rdy); + ZXIC_COMM_PRINT("big_fifo_empty : %d\n", qcfg_qmu_qlist_state_query.big_fifo_empty); + ZXIC_COMM_PRINT("qmu_mmu_rd_release_rdy : %d\n", qcfg_qmu_qlist_state_query.qmu_mmu_rd_release_rdy); + ZXIC_COMM_PRINT("xsw_qmu_crs_rdy : %d\n", qcfg_qmu_qlist_state_query.xsw_qmu_crs_rdy); + ZXIC_COMM_PRINT("mmu_qmu_rdy : %d\n", qcfg_qmu_qlist_state_query.mmu_qmu_rdy); + ZXIC_COMM_PRINT("mmu_ql_wr_rdy : %d\n", qcfg_qmu_qlist_state_query.mmu_ql_wr_rdy); + ZXIC_COMM_PRINT("mmu_ql_rd_rdy : %d\n", qcfg_qmu_qlist_state_query.mmu_ql_rd_rdy); + ZXIC_COMM_PRINT("csw_ql_rdy : %d\n", qcfg_qmu_qlist_state_query.csw_ql_rdy); + ZXIC_COMM_PRINT("ql_init_done : %d\n", qcfg_qmu_qlist_state_query.ql_init_done); + ZXIC_COMM_PRINT("free_addr_ready : %d\n", qcfg_qmu_qlist_state_query.free_addr_ready); + ZXIC_COMM_PRINT("bank_group_afull : %d\n", qcfg_qmu_qlist_state_query.bank_group_afull); + ZXIC_COMM_PRINT("pds_fwft_empty : %d\n", qcfg_qmu_qlist_state_query.pds_fwft_empty); + ZXIC_COMM_PRINT("enq_rpt_fwft_afull : %d\n", qcfg_qmu_qlist_state_query.enq_rpt_fwft_afull); + + return DPP_OK; +} + +#endif +/***********************************************************/ +/** 配置QMU流控计数模式 +* @param dev_id 设备号 +* @param tm_type 0-ETM,1-FTM +* @param mode 流控模式,0-电平流控;1-边沿流控 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/05/07 +************************************************************/ +DPP_STATUS dpp_tm_qmu_fc_cnt_mode_set(DPP_DEV_T *dev, ZXIC_UINT32 mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_FC_CNT_MODE_T fc_cnt_mode_reg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), mode, 0, 1); + + fc_cnt_mode_reg.fc_cnt_mode = mode; + rc = dpp_reg_write(dev, ETM_QMU_FC_CNT_MODEr, 0, 0, &fc_cnt_mode_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取QMU流控计数模式 +* @param dev_id 设备号 +* @param tm_type 0-ETM,1-FTM +* @param p_mode 流控模式,0-电平流控;1-边沿流控 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/05/07 +************************************************************/ +DPP_STATUS dpp_tm_qmu_fc_cnt_mode_get(DPP_DEV_T *dev, ZXIC_UINT32 *p_mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_FC_CNT_MODE_T fc_cnt_mode_reg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_mode); + + rc = dpp_reg_read(dev, ETM_QMU_FC_CNT_MODEr, 0, 0, &fc_cnt_mode_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + *p_mode = fc_cnt_mode_reg.fc_cnt_mode; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置QMU需要检测流控的端口号 +* @param dev_id 设备号 +* @param tm_type 0-ETM,1-FTM +* @param port_id 端口号 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/05/07 +************************************************************/ +DPP_STATUS dpp_tm_qmu_observe_portfc_set(DPP_DEV_T *dev, ZXIC_UINT32 port_id) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_OBSERVE_PORTFC_SPEC_T observe_portfc_reg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), port_id, 0, + DPP_TM_PP_NUM - 1); + + observe_portfc_reg.observe_portfc_spec = port_id; + rc = dpp_reg_write(dev, ETM_QMU_OBSERVE_PORTFC_SPECr, 0, 0, + &observe_portfc_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置QMU需要统计的队列号 +* @param dev_id 设备号 +* @param tm_type 0-ETM,1-FTM +* @param q_id 队列号 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/05/07 +************************************************************/ +DPP_STATUS dpp_tm_qmu_observe_qnum_set(DPP_DEV_T *dev, ZXIC_UINT32 q_id) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_OBSERVE_QNUM_SET_T observe_qnum_reg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), q_id, 0, + DPP_ETM_Q_NUM - 1); + + observe_qnum_reg.observe_qnum_set = q_id; + rc = dpp_reg_write(dev, ETM_QMU_OBSERVE_PORTFC_SPECr, 0, 0, + &observe_qnum_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置QMU需要统计的队列组 +* @param dev_id 设备号 +* @param tm_type 0-ETM,1-FTM +* @param batch_id 队列组 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/05/07 +************************************************************/ +DPP_STATUS dpp_tm_qmu_observe_batch_set(DPP_DEV_T *dev, ZXIC_UINT32 batch_id) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_OBSERVE_BATCH_SET_T observe_batch_reg = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), batch_id, 0, 7); + + observe_batch_reg.observe_batch_set = batch_id; + rc = dpp_reg_write(dev, ETM_QMU_OBSERVE_BATCH_SETr, 0, 0, + &observe_batch_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/****************************************************************************** +*包老化配置 +* @param: dev_id: 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* aging_en: 包老化使能:1表示包老化功能使能;0表示包老化功能关闭。 +* aging_interval: 普通老化两次的间隔配置 +* aging_step_interval: 普通老化的老化时间的步进配置值 +* aging_start_qnum: 老化起始队列 +* aging_end_qnum: 老化结束队列 +* aging_req_aful_th: 普通老化FIFO的将满阈值 +* aging_pkt_num: 一次老化的包个数 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/05/10 +************************************************************/ +DPP_STATUS dpp_tm_qmu_pkt_aging_set(DPP_DEV_T *dev, ZXIC_UINT32 aging_en, + ZXIC_UINT32 aging_interval, + ZXIC_UINT32 aging_step_interval, + ZXIC_UINT32 aging_start_qnum, + ZXIC_UINT32 aging_end_qnum, + ZXIC_UINT32 aging_pkt_num, + ZXIC_UINT32 aging_req_aful_th) +{ + /* 返回值变量定义 */ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 age_pkt_num_reg_index = 0; + ZXIC_UINT32 age_step_interval_reg_index = 0; + ZXIC_UINT32 age_interval_reg_index = 0; + ZXIC_UINT32 age_qnum_reg_index = 0; + ZXIC_UINT32 age_req_aful_th_reg_index = 0; + ZXIC_UINT32 age_en_reg_index = 0; + + /* 结构体变量定义 */ + DPP_ETM_QMU_CFGMT_QMU_PKT_AGE_EN_T cfg_age_en = { 0 }; + DPP_ETM_QMU_CFGMT_PKT_AGE_STEP_INTERVAL_T cfg_age_step_interval = { 0 }; + DPP_ETM_QMU_CFGMT_QMU_PKT_AGE_INTERVAL_T cfg_age_interval = { 0 }; + DPP_ETM_QMU_CFGMT_QMU_PKT_AGE_START_END_T cfg_age_qnum = { 0 }; + DPP_ETM_QMU_CFGMT_PKT_AGE_REQ_AFUL_TH_T cfg_age_req_aful_th = { 0 }; + DPP_ETM_QMU_CFGMT_AGE_PKT_NUM_T cfg_age_pkt_num = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), aging_en, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), aging_step_interval, 0, + 0xff); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), aging_interval, 0, + 0xffff); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), aging_req_aful_th, 0, + 0x3f); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), aging_pkt_num, 0, 0xf); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), aging_start_qnum, 0, + DPP_ETM_Q_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), aging_end_qnum, 0, + DPP_ETM_Q_NUM - 1); + age_pkt_num_reg_index = ETM_QMU_CFGMT_AGE_PKT_NUMr; + age_step_interval_reg_index = ETM_QMU_CFGMT_PKT_AGE_STEP_INTERVALr; + age_interval_reg_index = ETM_QMU_CFGMT_QMU_PKT_AGE_INTERVALr; + age_qnum_reg_index = ETM_QMU_CFGMT_QMU_PKT_AGE_START_ENDr; + age_req_aful_th_reg_index = ETM_QMU_CFGMT_PKT_AGE_REQ_AFUL_THr; + age_en_reg_index = ETM_QMU_CFGMT_QMU_PKT_AGE_ENr; + + cfg_age_en.cfgmt_qmu_pkt_age_en = aging_en; + cfg_age_step_interval.cfgmt_pkt_age_step_interval = aging_step_interval; + cfg_age_interval.cfgmt_qmu_pkt_age_interval = aging_interval; + cfg_age_qnum.cfgmt_qmu_pkt_age_start = aging_start_qnum; + cfg_age_qnum.cfgmt_qmu_pkt_age_end = aging_end_qnum; + cfg_age_req_aful_th.cfgmt_pkt_age_req_aful_th = aging_req_aful_th; + cfg_age_pkt_num.cfgmt_age_pkt_num = aging_pkt_num; + + rc = dpp_reg_write(dev, age_pkt_num_reg_index, 0, 0, &cfg_age_pkt_num); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev, age_step_interval_reg_index, 0, 0, + &cfg_age_step_interval); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev, age_interval_reg_index, 0, 0, + &cfg_age_interval); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev, age_qnum_reg_index, 0, 0, &cfg_age_qnum); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev, age_req_aful_th_reg_index, 0, 0, + &cfg_age_req_aful_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev, age_en_reg_index, 0, 0, &cfg_age_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/****************************************************************************** +*配置老化一个包的时间,一次老化一个包,老化队列范围为可配 +* @param: dev_id: 设备索引编号 +* @param tm_type 0: etm; 1: ftm; +* aging_en: 包老化使能:1表示包老化功能使能;0表示包老化功能关闭。 +* aging_time: 老化一个包的时间,单位ms + aging_que_start:老化起始队列 + aging_que_start:老化终止队列 +老化时间=2*aging_interval*step_interval*q_num +aging_interval = (aging_time * 600000) / (2 * 1 * DPP_TM_Q_NUM); +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/12/08 +************************************************************/ +DPP_STATUS dpp_tm_qmu_pkt_age_time_set(DPP_DEV_T *dev, ZXIC_UINT32 aging_en, + ZXIC_UINT32 aging_time, + ZXIC_UINT32 aging_que_start, + ZXIC_UINT32 aging_que_end) +{ + /* 返回值变量定义 */ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 aging_interval = 0; + //DPP_CRM_CSR_PLL_CLK_SEL_T pll_clk_sel_t = {0}; + // ZXIC_UINT32 sys_clk = 0; + // ZXIC_UINT32 sys_clk_temp[8]={200,250,300,500,600,800,1000,1200}; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), aging_en, 0, 1); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), aging_que_start, 0, + DPP_ETM_Q_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), aging_que_end, 0, + DPP_ETM_Q_NUM - 1); +#if 0 + rc = dpp_reg_read(dev_id, + CRM_CSR_PLL_CLK_SELr, + 0, + 0, + &pll_clk_sel_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, pll_clk_sel_t.sys_clk_2x_sel, CRM_SYS_CLK_200M, CRM_SYS_CLK_INVALID - 1); + + sys_clk = sys_clk_temp[pll_clk_sel_t.sys_clk_2x_sel] / 2; + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, aging_time, sys_clk); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, aging_time * sys_clk, DPP_TM_KILO_ULL); + aging_interval = (aging_time * sys_clk * DPP_TM_KILO_ULL * DPP_TM_KILO_ULL / 1000) / (2 * 1 * (aging_que_end - aging_que_start + 1)); +#endif + + if (aging_interval == 0) { + aging_interval = 1; /* 防止算出来的是小数,导致写入0 */ + } + + rc = dpp_tm_qmu_pkt_aging_set(dev, aging_en, aging_interval, 1, + aging_que_start, aging_que_end, 1, 0xa); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_qmu_pkt_aging_set"); + + return DPP_OK; +} + +#if 0 +/***********************************************************/ +/** 获得队列空标志查询 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param qnum 配置的队列号 +* @param p_value 队列空标志查询 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author cy @date 2016/06/20 +************************************************************/ +DPP_STATUS dpp_tm_qlist_ept_flag_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 qnum, + ZXIC_UINT32 *p_value) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QCFG_QLIST_EPT_RD_T qcfg_qlist_ept_rd = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_value); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, qnum, 0, DPP_ETM_Q_NUM - 1); + + rc = dpp_reg_read(dev_id, + ETM_QMU_QCFG_QLIST_EPT_RDr, + 0, + qnum, + &qcfg_qlist_ept_rd); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + *p_value = qcfg_qlist_ept_rd.qcfg_qlist_ept_rd; + + return DPP_OK; +} + +/***********************************************************/ +/** 获得队列深度计数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param qnum 配置的队列号 +* @param p_value 队列深度计数 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author cy @date 2016/06/20 +************************************************************/ +DPP_STATUS dpp_tm_qlist_r_bcnt_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 qnum, + ZXIC_UINT32 *p_value) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_QLIST_R_BCNT_T qlist_r_bcnt = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_value); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, qnum, 0, DPP_ETM_Q_NUM - 1); + + rc = dpp_reg_read(dev_id, + ETM_QMU_QLIST_R_BCNTr, + 0, + qnum, + &qlist_r_bcnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + *p_value = qlist_r_bcnt.qlist_r_bcnt; + + return DPP_OK; +} + +#endif +/***********************************************************/ +/** 配置pfc使能 +* @param dev_id 设备编号 +* @param pfc_en 配置的值,0-不使能pfc,1-使能pfc +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author lsy @date 2022/08/22 +************************************************************/ +DPP_STATUS dpp_tm_qmu_pfc_en_set(DPP_DEV_T *dev, ZXIC_UINT32 pfc_en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_CFGMT_QMU_PFC_EN_T qmu_pfc_en = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), pfc_en, 0, 1); + + qmu_pfc_en.cfgmt_qmu_pfc_en = pfc_en; + rc = dpp_reg_write(dev, ETM_QMU_CFGMT_QMU_PFC_ENr, 0, 0, &qmu_pfc_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取pfc使能 +* @param dev_id 设备编号 +* @param pfc_en 配置的值,0-不使能pfc,1-使能pfc +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author lsy @date 2022/08/22 +************************************************************/ +DPP_STATUS dpp_tm_qmu_pfc_en_get(DPP_DEV_T *dev, ZXIC_UINT32 *pfc_en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_CFGMT_QMU_PFC_EN_T qmu_pfc_en = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), pfc_en); + + *pfc_en = 0xffffffff; + rc = dpp_reg_read(dev, ETM_QMU_CFGMT_QMU_PFC_ENr, 0, 0, &qmu_pfc_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + *pfc_en = qmu_pfc_en.cfgmt_qmu_pfc_en; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置端口pfc掩码 +* @param dev_id 设备编号 +* @param port_id 端口号:0~63 +* @param port_en 端口掩码配置,1pfc模式下该端口接收olif的优先级反压, +* 0pfc模式下该端口不接受olif的优先级反压,并将反压信号全部置1 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author lsy @date 2022/08/22 +************************************************************/ +DPP_STATUS dpp_tm_qmu_port_pfc_make_set(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + ZXIC_UINT32 port_en) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 value = 0; + DPP_ETM_QMU_CFGMT_QMU_PFC_MASK_1_T pfc_mask_31_0 = { 0 }; + DPP_ETM_QMU_CFGMT_QMU_PFC_MASK_2_T pfc_mask_63_32 = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), port_id, 0, + DPP_TM_PP_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), port_en, 0, 1); + + if (port_id <= 31) { + /* port_id:[0-31] */ + rc = dpp_reg_read(dev, ETM_QMU_CFGMT_QMU_PFC_MASK_1r, 0, 0, + &pfc_mask_31_0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + + value = pfc_mask_31_0.cfgmt_qmu_pfc_mask_1; + + if (port_en == 0) { + value = value & (~(1u << port_id)); + } else { + value = value | (1u << port_id); + } + + pfc_mask_31_0.cfgmt_qmu_pfc_mask_1 = value; + + rc = dpp_reg_write(dev, ETM_QMU_CFGMT_QMU_PFC_MASK_1r, 0, 0, + &pfc_mask_31_0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + } else { + /* port_id:[32-63] */ + rc = dpp_reg_read(dev, ETM_QMU_CFGMT_QMU_PFC_MASK_2r, 0, 0, + &pfc_mask_63_32); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + + value = pfc_mask_63_32.cfgmt_qmu_pfc_mask_2; + + if (port_en == 0) { + value = value & (~(1u << (port_id - 32))); + } else { + value = value | (1u << (port_id - 32)); + } + + pfc_mask_63_32.cfgmt_qmu_pfc_mask_2 = value; + + rc = dpp_reg_write(dev, ETM_QMU_CFGMT_QMU_PFC_MASK_2r, 0, 0, + &pfc_mask_63_32); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 获得端口pfc掩码 +* @param dev_id 设备编号 +* @param port_id 端口号:0~63 +* @param port_en 端口掩码配置,1pfc模式下该端口接收olif的优先级反压, +* 0pfc模式下该端口不接受olif的优先级反压,并将反压信号全部置1 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author lsy @date 2022/08/22 +************************************************************/ +DPP_STATUS dpp_tm_qmu_port_pfc_make_get(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + ZXIC_UINT32 *p_port_en) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 value = 0; + DPP_ETM_QMU_CFGMT_QMU_PFC_MASK_1_T pfc_mask_31_0 = { 0 }; + DPP_ETM_QMU_CFGMT_QMU_PFC_MASK_2_T pfc_mask_63_32 = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), port_id, 0, + DPP_TM_PP_NUM - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_port_en); + + if (port_id <= 31) { + /* port_id:[0-31] */ + rc = dpp_reg_read(dev, ETM_QMU_CFGMT_QMU_PFC_MASK_1r, 0, 0, + &pfc_mask_31_0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + + value = pfc_mask_31_0.cfgmt_qmu_pfc_mask_1; + + *p_port_en = 1 & (value >> port_id); + } else { + /* port_id:[32-63] */ + rc = dpp_reg_read(dev, ETM_QMU_CFGMT_QMU_PFC_MASK_2r, 0, 0, + &pfc_mask_63_32); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + + value = pfc_mask_63_32.cfgmt_qmu_pfc_mask_2; + + *p_port_en = 1 & (value >> (port_id - 32)); + } + + return DPP_OK; +} + +#if 0 +/***********************************************************/ +/** QMU初始化配置场景 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param case_no QMU初始化场景编号 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2020/4/14 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 case_no_temp) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 case_no = 0; + ZXIC_UINT32 ftm_ddr_no = 0; + ZXIC_UINT32 etm_ddr_no = 0; + + g_qmu_init_case_no = case_no_temp; + + if ((case_no_temp & 0xff) == 0xef && (case_no_temp >> 8) != 0 && (case_no_temp >> + 16 != 0)) { + case_no = 0xef; + ZXIC_COMM_PRINT("Here get in qmu init:%d case_num is %d\n", case_no); + /*低8-15bit作为FTM的DDR0-9中的编号,高16-23bit作为ETM的DDR0-9中的编号(only case_16 use)*/ + ftm_ddr_no = (case_no_temp >> 8) & 0xff; + etm_ddr_no = (case_no_temp >> 16) & 0xff; + } else if ((case_no_temp & 0xf) == 0xc) { + /*etm 和ftm共享指定1组ddr 8bank,其中etm 0-3bank ftm 4-7bank*/ + case_no = 0xc; + } else if ((case_no_temp & 0xf) == 0xd) { + /*一个tm独享指定4组8bank*/ + case_no = 0xd; + } else if (((case_no_temp & 0xf) == 0xe) || ((case_no_temp & 0xf) == 0xf) \ + ((case_no_temp & 0xf) == 0xb)) { + /*提取case_no_temp的低4bit作为case_no*/ + case_no = case_no_temp & 0xf; + ZXIC_COMM_PRINT("Here get in qmu init:%d case_num is %d\n", case_no); + + /*中间4bit作为ETM的DDR0-9中的编号,最高4bit作为FTM的DDR0-9中的编号(only case_11 14 15 use)*/ + ftm_ddr_no = (case_no_temp >> 4) & 0xf; + etm_ddr_no = (case_no_temp >> 8) & 0xf; + } else { + case_no = case_no_temp & 0xf; + } + /**clear mmu qmu**/ + rc = dpp_tm_qmu_mmu_cfg_clr(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_mmu_cfg_clr"); + + if (case_no == 0) { + /**纯片内pd16k模式**/ + //rc = dpp_tm_qmu_init_set_pd16k_2(dev_id, 64); + //ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_init_set_pd16k_2"); + + rc = dpp_tm_qmu_init_set_chuk32(dev_id, 512); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_init_set_chuk32"); + } else if (case_no == 1) { + /* 场景1:每个tm用8组*4bank,MMU实际分配ftm:0-7组(0145bank); etm:2-9组(2367bank) */ + rc = dpp_tm_qmu_init_set_1(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_init_set_1"); + } else if (case_no == 2) { + /* 场景2:tm独享8组ddr:ftm为0-7组ddr的0123bank,etm为0-7组ddr 4567bank */ + rc = dpp_tm_qmu_init_set_2(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_init_set_2"); + } else if (case_no == 3) { + /* 场景3:TM独享8组ddr:FTM为0-3组ddr的0~3bank,ETM为4-7组ddr 0~3bank */ + rc = dpp_tm_qmu_init_set_3(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_init_set_3"); + } else if (case_no == 4) { + /* 场景4:TM共享2组ddr:FTM为2、4组ddr的01bank,ETM为2、4组ddr 23bank */ + /*需开启IP_rotation、mmu multi_burst,关闭mmu_rotation_en*/ + rc = dpp_tm_qmu_init_set_4(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_init_set_4"); + } else if (case_no == 5) { + /* 场景5:TM共享4组ddr:FTM为1234组ddr的01bank,ETM为1234组ddr 23bank */ + /*需开启IP_rotation、mmu multi_burst,关闭mmu_rotation_en*/ + rc = dpp_tm_qmu_init_set_5(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_init_set_5"); + } else if (case_no == 6) { + /*场景6:4组*2bank,MMU实际分配4,6,7,9; etm使用01bank,ftm使用23bank*/ + /*需开启IP_rotation、mmu multi_burst,关闭mmu_rotation_en*/ + rc = dpp_tm_qmu_init_set_6(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_init_set_6"); + } else if (case_no == 7) { + /* 关闭rotation*/ + //rc = dpp_mmu_init(0, 0, 0, 0, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_mmu_init"); + /* 场景7:每个tm用8组*4bank,MMU实际分配ftm:2-9组(0145bank); etm:2-9组(2367bank) */ + rc = dpp_tm_qmu_init_set_7(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_init_set_7"); + } else if (case_no == 8) { + /* 场景8:每个tm用8组*4bank,MMU实际分配ftm:2-9组(01bank); etm:2-9组(23bank) */ + /*需开启IP_rotation、mmu multi_burst,关闭mmu_rotation_en*/ + rc = dpp_tm_qmu_init_set_8(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_init_set_8"); + } else if (case_no == 9) { + /*场景9:4组*2bank,MMU实际分配6,7,8,9; etm使用01bank,ftm使用23bank*/ + /*需开启IP_rotation、mmu multi_burst,关闭mmu_rotation_en*/ + rc = dpp_tm_qmu_init_set_9(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_init_set_9"); + } else if (case_no == 10) { + /* 关闭rotation*/ + //rc = dpp_mmu_init(0, 0, 0, 0, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_mmu_init"); + /*场景10:独享2组*8bank,MMU实际分配1,2; etm or ftm使用0-7bank*/ + rc = dpp_tm_qmu_init_set_10(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_init_set_10"); + } else if (case_no == 11) { + /*场景11:TM共享2组ddr:FTM为ddr_no1,ddr_no2组ddr的01bank,ETM为ddr_no1,ddr_no2组ddr 23bank*/ + rc = dpp_tm_qmu_init_set_11(dev_id, ftm_ddr_no, etm_ddr_no); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_init_set_11"); + } else if (case_no == 0xc) { + /*etm 和ftm共享指定1组ddr 8bank,其中etm 0-3bank ftm 4-7bank*/ + rc = dpp_tm_qmu_init_set_12(dev_id, case_no_temp >> 4); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_init_set_12"); + } else if (case_no == 0xd) { + /* 关闭rotation 配置8bank必须关掉,不然会出现crc错包*/ + //rc = dpp_mmu_init(0, 0, 0, 0, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_mmu_init"); + /*场景13:TM独享4组0-7bank,ddr编号来自传参,如0x6789 即代表独享6 7 8 + 9组ddr的全部bank*/ + rc = dpp_tm_qmu_init_set_13(dev_id, case_no_temp >> 4); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_init_set_13"); + } else if (case_no == 0xe) { + /* 关闭rotation*/ + //rc = dpp_mmu_init(dev_id, 0, 0, 0, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_mmu_init"); + + /* 场景14:每个tm只用1组*8bank,MMU实际分配ftm:0-9中指定组(0-7bank); etm:0-9中指定组(0-7bank) */ + rc = dpp_tm_qmu_init_set_14(dev_id, etm_ddr_no, ftm_ddr_no); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_init_set_8"); + } else if (case_no == 0xf) { + /* 场景15:每个tm只用1组*4bank,MMU实际分配ftm:0-9中指定组(0123bank); etm:0-9中指定组(0123bank) */ + rc = dpp_tm_qmu_init_set_15(dev_id, etm_ddr_no, ftm_ddr_no); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_init_set_8"); + } else { /* if (case_no == 0xef)*/ + /* 关闭rotation*/ + //rc = dpp_mmu_init(0, 0, 0, 0, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_mmu_init"); + /* 场景15:每个tm只用2组*8bank,MMU实际分配ftm:0-9中指定组(0-7bank); etm:0-9中指定组(0-7bank) */ + rc = dpp_tm_qmu_init_set_16(dev_id, etm_ddr_no, ftm_ddr_no); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_init_set_16"); + } + + return DPP_OK; +} + + +/***********************************************************/ +/** QMU/MMU初始化配置场景1:每个tm用8组*4bank,MMU实际分配ftm:0-7组(0145bank); etm:2-9组(2367bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/10/14 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_1(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 k = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + ZXIC_UINT32 depth = 1024; + ZXIC_UINT32 bank_to_mmu_cfg_map[4] = {2, 3, 6, 7}; + ZXIC_UINT32 qlist_grp0_bank_data[4] = {0, 1, 2, 3}; + + DPP_ETM_QMU_QCFG_QLIST_BDEP_T qlist_bdep = {0}; + DPP_ETM_QMU_QCFG_QLIST_BHEAD_T qlist_bhead = {0}; + DPP_ETM_QMU_QCFG_QLIST_BTAIL_T qlist_btail = {0}; + + DPP_ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFG_T active_to_bank_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_MMU_CFG_T ddr_in_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_QMU_CFG_T ddr_in_qmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_MMU_CFG_T bank_to_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_QMU_CFG_T bank_to_qmu_cfg = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP0_BANK_T qlist_grp0_bank = {0}; + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "========dpp_etm_qmu_init_set_1 start========n"); + + /* ddr组从qmu到mmu映射,0~7映射成2~9 */ + for (i = 0; i < 8; i++) { + ddr_in_mmu_cfg.cfgmt_ddr_in_mmu_cfg = (i + 2); + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_MMU_CFGr, + 0, + i, + &ddr_in_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* ddr组从mmu到qmu映射,2~9映射成0~7 */ + for (i = 2; i < 10; i++) { + ddr_in_qmu_cfg.cfgmt_ddr_in_qmu_cfg = (i - 2); + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_QMU_CFGr, + 0, + i, + &ddr_in_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* bank号从qmu到mmu映射,每组的bank0~3映射成bank2、3、6、7 */ + for (j = 0; j < 8; j++) { + for (i = 0; i < 4; i++) { + k = j * 8 + i; + + bank_to_mmu_cfg.cfgmt_bank_in_mmu_cfg = bank_to_mmu_cfg_map[i]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_MMU_CFGr, + 0, + k, + &bank_to_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* bank号从mmu到qmu映射,每组的bank2、3、6、7映射成bank0、1、2、3 */ + for (j = 2; j < 10; j++) { + for (i = 0; i < 4; i++) { + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(dev_id, j * 8, bank_to_mmu_cfg_map[i]); + k = j * 8 + bank_to_mmu_cfg_map[i]; + + bank_to_qmu_cfg.cfgmt_bank_in_qmu_cfg = i; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_QMU_CFGr, + 0, + k, + &bank_to_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* random映射ram:配置一个chunk中,bank的轮询顺序 */ + for (i = 0; i < 16; i++) { + active_to_bank_cfg.cfgmt_active_to_bank_cfg = i % 4; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFGr, + 0, + i, + &active_to_bank_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* 首尾深度指针配置 */ + for (j = 0; j < 8; j++) { + for (i = 0; i < 4; i++) { + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 4 + i), depth); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 4 + i + 1), depth); + k = j * 8 + i; + qlist_bhead.bank_vld = 1; + qlist_bhead.qcfg_qlist_bhead = (j * 4 + i) * depth; + qlist_btail.qcfg_qlist_btail = ((j * 4 + i + 1) * depth - 1); + qlist_bdep.qcfg_qlist_bdep = depth; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BHEADr, + 0, + k, + &qlist_bhead); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BTAILr, + 0, + k, + &qlist_btail); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + k, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT(ETM_QMU_QCFG_QLIST_GRP0_BANKr, 8); + /* 随机表配置,决定每组bank的轮询顺序 */ + for (j = 0; j < 8; j++) { + for (i = 0; i < 64; i++) { + qlist_grp0_bank.qcfg_qlist_grp0_bank_wr = qlist_grp0_bank_data[i % 4]; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRP0_BANKr + j, + 0, + i, + &qlist_grp0_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + rc = dpp_tm_qmu_cfg_done_set(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_cfg_done_set"); + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "========dpp_etm_qmu_init_set_1 END=======\n"); + + return DPP_OK; +} + + +/***********************************************************/ +/** QMU/MMU初始化配置场景1:每个tm用8组*4bank,MMU实际分配ftm:0-7组(0145bank); etm:2-9组(2367bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/10/14 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_1(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + + /* etm mmu基址配置:ddr2-9组,bank号2367,bank深度1024 */ + //rc = dpp_mmu_bank_base_addr_set(dev_id, 0x3fc, 0xcc, 1024); + //ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_mmu_bank_base_addr_set"); + + rc = dpp_etm_qmu_init_set_1(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_etm_qmu_init_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** QMU/MMU初始化配置场景二:tm独享8组ddr:ftm为0-7组ddr的0123bank,etm为0-7组ddr 4567bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/10/16 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_2(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 k = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + ZXIC_UINT32 depth = 1024; + ZXIC_UINT32 qlist_grp0_bank_data[4] = {1, 2, 3, 0}; + + DPP_ETM_QMU_QCFG_QLIST_BDEP_T qlist_bdep = {0}; + DPP_ETM_QMU_QCFG_QLIST_BHEAD_T qlist_bhead = {0}; + DPP_ETM_QMU_QCFG_QLIST_BTAIL_T qlist_btail = {0}; + + DPP_ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFG_T active_to_bank_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_MMU_CFG_T ddr_in_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_QMU_CFG_T ddr_in_qmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_MMU_CFG_T bank_to_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_QMU_CFG_T bank_to_qmu_cfg = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP0_BANK_T qlist_grp0_bank = {0}; + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "++++++dpp_etm_qmu_init_set starting++++++\n"); + + /* ddr组从qmu到mmu映射,0~7映射成0~7 */ + for (i = 0; i < 8; i++) { + ddr_in_mmu_cfg.cfgmt_ddr_in_mmu_cfg = i; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_MMU_CFGr, + 0, + i, + &ddr_in_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* ddr组从mmu到qmu映射,0~7映射成0~7 */ + for (i = 0; i < 8; i++) { + ddr_in_qmu_cfg.cfgmt_ddr_in_qmu_cfg = i; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_QMU_CFGr, + 0, + i, + &ddr_in_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* bank号从qmu到mmu映射,每组bank0、1、2、3映射成bank4567,组0~7循环配置 */ + for (j = 0; j < 8; j++) { + for (i = 0; i < 4; i++) { + k = j * 8 + i; + bank_to_mmu_cfg.cfgmt_bank_in_mmu_cfg = (i + 4); + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_MMU_CFGr, + 0, + k, + &bank_to_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* bank号从mmu到qmu映射,每组bank4567映射成bank0、1、2、3,组0~7循环配置 */ + for (j = 0; j < 8; j++) { + for (i = 4; i < 8; i++) { + k = j * 8 + i; + bank_to_qmu_cfg.cfgmt_bank_in_qmu_cfg = (i - 4); + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_QMU_CFGr, + 0, + k, + &bank_to_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* random映射ram:配置一个chunk中,bank的轮询顺序 */ + for (i = 0; i < 16; i++) { + active_to_bank_cfg.cfgmt_active_to_bank_cfg = i % 4; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFGr, + 0, + i, + &active_to_bank_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* 首尾深度指针配置:j为4组循环 */ + for (j = 0; j < 8; j++) { + for (i = 0; i < 4; i++) { + k = j * 8 + i; + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 4 + i), depth); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 4 + i + 1), depth); + qlist_bhead.bank_vld = 1; + qlist_bhead.qcfg_qlist_bhead = (j * 4 + i) * depth; + qlist_btail.qcfg_qlist_btail = ((j * 4 + i + 1) * depth - 1); + qlist_bdep.qcfg_qlist_bdep = depth; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BHEADr, + 0, + k, + &qlist_bhead); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BTAILr, + 0, + k, + &qlist_btail); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + k, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT(ETM_QMU_QCFG_QLIST_GRP0_BANKr, 8); + /* 随机表配置,决定ddr内bank的轮询顺序:使用几组ddr需要配置几组 */ + for (j = 0; j < 8; j++) { + for (i = 0; i < 64; i++) { + qlist_grp0_bank.qcfg_qlist_grp0_bank_wr = qlist_grp0_bank_data[i % 4]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRP0_BANKr + j, + 0, + i, + &qlist_grp0_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + rc = dpp_tm_qmu_cfg_done_set(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_cfg_done_set"); + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "++++++dpp_etm_qmu_init_set end++++++\n"); + + return DPP_OK; +} + + +/***********************************************************/ +/** QMU/MMU初始化配置场景二:tm独享8组ddr:ftm为0-7组ddr的0123bank,etm为0-7组ddr 4567bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/10/16 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_2(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + + /* etm mmu基址配置:ddr0-7组,bank号4567,bank深度1024 */ + //rc = dpp_mmu_bank_base_addr_set(dev_id, 0xff, 0xf0, 1024); + //ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_mmu_bank_base_addr_set"); + + rc = dpp_etm_qmu_init_set_2(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_etm_qmu_init_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** QMU初始化配置场景二:ddr3模式,tm看到4组*4bank,对应DDR分配4~7组,每组2~3,6~7bank +* depth=64,代表16k +* @param dev_id 设备编号 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/11/17 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_pd16k_2(ZXIC_UINT32 dev_id, ZXIC_UINT32 depth) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 k = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + ZXIC_UINT32 qlist_grp0_bank_data[4] = {1, 2, 3, 0}; + ZXIC_UINT32 ddr_num = 4; + + DPP_ETM_QMU_QCFG_QLIST_BDEP_T qlist_bdep = {0}; + DPP_ETM_QMU_QCFG_QLIST_BHEAD_T qlist_bhead = {0}; + DPP_ETM_QMU_QCFG_QLIST_BTAIL_T qlist_btail = {0}; + DPP_ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFG_T active_to_bank_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_MMU_CFG_T ddr_in_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_QMU_CFG_T ddr_in_qmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_MMU_CFG_T bank_to_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_QMU_CFG_T bank_to_qmu_cfg = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP0_BANK_T qlist_grp0_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP1_BANK_T qlist_grp1_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP2_BANK_T qlist_grp2_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP3_BANK_T qlist_grp3_bank = {0}; + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "++++++dpp_etm_qmu_init_set starting++++++\n"); + + /* cfgmt配置4组ddr */ + rc = dpp_tm_cfgmt_ddr_attach_set(dev_id, ddr_num); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_ddr_attach_set"); + + /* ddr组从qmu到mmu映射,0~3映射成4~7 */ + for (i = 0; i < 4; i++) { + ddr_in_mmu_cfg.cfgmt_ddr_in_mmu_cfg = (i + 4); + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_MMU_CFGr, + 0, + i, + &ddr_in_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* ddr组从mmu到qmu映射,4~7映射成0~3 */ + for (i = 4; i < 8; i++) { + ddr_in_qmu_cfg.cfgmt_ddr_in_qmu_cfg = (i - 4); + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_QMU_CFGr, + 0, + i, + &ddr_in_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* bank号从qmu到mmu映射,每组bank0、1、2、3映射成bank2、3、6、7,组0~3循环配置 */ + for (j = 0; j < 4; j++) { + for (i = 0; i < 4; i++) { + k = j * 8 + i; + + if (i < 2) { + bank_to_mmu_cfg.cfgmt_bank_in_mmu_cfg = (i + 2); + } else { + bank_to_mmu_cfg.cfgmt_bank_in_mmu_cfg = (i + 4); + } + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_MMU_CFGr, + 0, + k, + &bank_to_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* bank号从mmu到qmu映射,每组bank2、3、6、7映射成bank0、1、2、3,组4~7循环配置 */ + for (j = 4; j < 8; j++) { + for (i = 2; i < 6; i++) { + if (i < 4) { + k = j * 8 + i; + } else { + k = j * 8 + i + 2; + } + + bank_to_qmu_cfg.cfgmt_bank_in_qmu_cfg = (i - 2); + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_QMU_CFGr, + 0, + k, + &bank_to_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* random映射ram */ + for (j = 0; j < 4; j++) { + for (i = 0; i < 4; i++) { + k = j * 4 + i; + + active_to_bank_cfg.cfgmt_active_to_bank_cfg = i; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFGr, + 0, + k, + &active_to_bank_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* 首尾深度指针配置:j为4组循环 */ + for (j = 0; j < 4; j++) { + for (i = 0; i < 4; i++) { + k = j * 8 + i; + + qlist_bhead.bank_vld = 1; + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 4 + i), depth); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 4 + i + 1), depth); + qlist_bhead.qcfg_qlist_bhead = (j * 4 + i) * depth; + qlist_btail.qcfg_qlist_btail = ((j * 4 + i + 1) * depth - 1); + qlist_bdep.qcfg_qlist_bdep = depth; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BHEADr, + 0, + k, + &qlist_bhead); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BTAILr, + 0, + k, + &qlist_btail); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + k, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* 随机表配置,决定每组bank的轮询顺序 */ + for (j = 0; j < 16; j++) { + for (i = 0; i < 4; i++) { + k = j * 4 + i; + + qlist_grp0_bank.qcfg_qlist_grp0_bank_wr = qlist_grp0_bank_data[i]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRP0_BANKr, + 0, + k, + &qlist_grp0_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (j = 0; j < 16; j++) { + for (i = 0; i < 4; i++) { + k = j * 4 + i; + + qlist_grp1_bank.qcfg_qlist_grp1_bank_wr = qlist_grp0_bank_data[i]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRP1_BANKr, + 0, + k, + &qlist_grp1_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (j = 0; j < 16; j++) { + for (i = 0; i < 4; i++) { + k = j * 4 + i; + + qlist_grp2_bank.qcfg_qlist_grp2_bank_wr = qlist_grp0_bank_data[i]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRP2_BANKr, + 0, + k, + &qlist_grp2_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (j = 0; j < 16; j++) { + for (i = 0; i < 4; i++) { + k = j * 4 + i; + + qlist_grp3_bank.qcfg_qlist_grp3_bank_wr = qlist_grp0_bank_data[i]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRP3_BANKr, + 0, + k, + &qlist_grp3_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + rc = dpp_tm_qmu_cfg_done_set(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_cfg_done_set"); + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "++++++dpp_etm_qmu_init_set end++++++\n"); + + return DPP_OK; +} + +/***********************************************************/ +/** QMU初始化配置场景二:ddr3模式,tm看到4组*4bank,对应DDR分配4~7组,每组2~3,6~7bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/11/17 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_pd16k_2(ZXIC_UINT32 dev_id, ZXIC_UINT32 depth) +{ + DPP_STATUS rc = DPP_OK; + + rc = dpp_etm_qmu_init_set_pd16k_2(dev_id, depth); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_etm_qmu_init_set"); + + return DPP_OK; +} + +DPP_STATUS dpp_qmu_init_info(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 ddr_num = 0; + ZXIC_UINT32 k = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + DPP_ETM_QMU_QCFG_QLIST_BDEP_T qlist_bdep = {0}; + DPP_ETM_QMU_QCFG_QLIST_BHEAD_T qlist_bhead = {0}; + DPP_ETM_QMU_QCFG_QLIST_BTAIL_T qlist_btail = {0}; + DPP_ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFG_T active_to_bank_cfg = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP0_BANK_T qlist_grp0_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP1_BANK_T qlist_grp1_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP2_BANK_T qlist_grp2_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP3_BANK_T qlist_grp3_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP4_BANK_T qlist_grp4_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP5_BANK_T qlist_grp5_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP6_BANK_T qlist_grp6_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP7_BANK_T qlist_grp7_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP_T qcfg_qlist_grp = {0}; + + dpp_tm_cfgmt_ddr_attach_get(0, &ddr_num); + ZXIC_COMM_PRINT("dpp_tm_cfgmt_ddr_attach_get ddr_num:%d\n", ddr_num); + + /* random映射ram */ + for (j = 0; j < 4; j++) { + for (i = 0, k = 0; i < 4; i++) { + k = j * 4 + i; + + active_to_bank_cfg.cfgmt_active_to_bank_cfg = 0; + + rc = dpp_reg_read(dev_id, + ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFGr, + 0, + k, + &active_to_bank_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + ZXIC_COMM_PRINT("cfgmt_active_to_bank_cfg: cfgmt_active_to_bank_cfg-%d value:%d\n", k, active_to_bank_cfg.cfgmt_active_to_bank_cfg); + } + } + + /* 首尾深度指针配置: */ + for (j = 0, k = 0; j < 8; j++) { + k = j * 8; + rc = dpp_reg_read(dev_id, + ETM_QMU_QCFG_QLIST_BHEADr, + 0, + k, + &qlist_bhead); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + ZXIC_COMM_PRINT("qlist_bhead: qlist_bhead-%d bank_vld:%d qcfg_qlist_bhead:%d\n", k, qlist_bhead.bank_vld, qlist_bhead.qcfg_qlist_bhead); + + rc = dpp_reg_read(dev_id, + ETM_QMU_QCFG_QLIST_BTAILr, + 0, + k, + &qlist_btail); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + ZXIC_COMM_PRINT("qlist_btail: qlist_btail-%d qcfg_qlist_btail:%d\n", k, qlist_btail.qcfg_qlist_btail); + + } + for (j = 0; j < 64; j++) { + rc = dpp_reg_read(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + j, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + ZXIC_COMM_PRINT("qlist_bdep: qlist_bdep-%d qcfg_qlist_bdep:%d\n", j, qlist_bdep.qcfg_qlist_bdep); + } + + /* 随机表配置,决定每组bank的轮询顺序 */ + for (j = 0; j < 16; j++) { + for (i = 0, k = 0; i < 4; i++) { + k = j * 4 + i; + + rc = dpp_reg_read(dev_id, + ETM_QMU_QCFG_QLIST_GRP0_BANKr, + 0, + k, + &qlist_grp0_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + ZXIC_COMM_PRINT("qlist_grp0_bank: qlist_grp0_bank-%d qcfg_qlist_grp0_bank_wr:%d\n", k, qlist_grp0_bank.qcfg_qlist_grp0_bank_wr); + } + } + + for (j = 0; j < 16; j++) { + for (i = 0, k = 0; i < 4; i++) { + k = j * 4 + i; + + rc = dpp_reg_read(dev_id, + ETM_QMU_QCFG_QLIST_GRP1_BANKr, + 0, + k, + &qlist_grp1_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + ZXIC_COMM_PRINT("qlist_grp1_bank: qlist_grp1_bank-%d qcfg_qlist_grp1_bank_wr:%d\n", k, qlist_grp1_bank.qcfg_qlist_grp1_bank_wr); + } + } + + for (j = 0; j < 16; j++) { + for (i = 0, k = 0; i < 4; i++) { + k = j * 4 + i; + + rc = dpp_reg_read(dev_id, + ETM_QMU_QCFG_QLIST_GRP2_BANKr, + 0, + k, + &qlist_grp2_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + ZXIC_COMM_PRINT("qlist_grp2_bank: qlist_grp2_bank-%d qcfg_qlist_grp2_bank_wr:%d\n", k, qlist_grp2_bank.qcfg_qlist_grp2_bank_wr); + } + } + + for (j = 0; j < 16; j++) { + for (i = 0, k = 0; i < 4; i++) { + k = j * 4 + i; + + rc = dpp_reg_read(dev_id, + ETM_QMU_QCFG_QLIST_GRP3_BANKr, + 0, + k, + &qlist_grp3_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + ZXIC_COMM_PRINT("qlist_grp3_bank: qlist_grp3_bank-%d qcfg_qlist_grp3_bank_wr:%d\n", k, qlist_grp3_bank.qcfg_qlist_grp3_bank_wr); + } + } + + for (j = 0; j < 16; j++) { + for (i = 0, k = 0; i < 4; i++) { + k = j * 4 + i; + + rc = dpp_reg_read(dev_id, + ETM_QMU_QCFG_QLIST_GRP4_BANKr, + 0, + k, + &qlist_grp4_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + ZXIC_COMM_PRINT("qlist_grp4_bank: qlist_grp4_bank-%d qcfg_qlist_grp4_bank_wr:%d\n", k, qlist_grp4_bank.qcfg_qlist_grp4_bank_wr); + } + } + + for (j = 0; j < 16; j++) { + for (i = 0, k = 0; i < 4; i++) { + k = j * 4 + i; + + rc = dpp_reg_read(dev_id, + ETM_QMU_QCFG_QLIST_GRP5_BANKr, + 0, + k, + &qlist_grp5_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + ZXIC_COMM_PRINT("qlist_grp5_bank: qlist_grp5_bank-%d qcfg_qlist_grp5_bank_wr:%d\n", k, qlist_grp5_bank.qcfg_qlist_grp5_bank_wr); + } + } + + for (j = 0; j < 16; j++) { + for (i = 0, k = 0; i < 4; i++) { + k = j * 4 + i; + + rc = dpp_reg_read(dev_id, + ETM_QMU_QCFG_QLIST_GRP6_BANKr, + 0, + k, + &qlist_grp6_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + ZXIC_COMM_PRINT("qlist_grp6_bank: qlist_grp6_bank-%d qcfg_qlist_grp6_bank_wr:%d\n", k, qlist_grp6_bank.qcfg_qlist_grp6_bank_wr); + } + } + + for (j = 0; j < 16; j++) { + for (i = 0, k = 0; i < 4; i++) { + k = j * 4 + i; + + rc = dpp_reg_read(dev_id, + ETM_QMU_QCFG_QLIST_GRP7_BANKr, + 0, + k, + &qlist_grp7_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + ZXIC_COMM_PRINT("qlist_grp7_bank: qlist_grp7_bank-%d qcfg_qlist_grp7_bank_wr:%d\n", k, qlist_grp7_bank.qcfg_qlist_grp7_bank_wr); + } + } + + for (j = 0; j < 8; j++) { + for (i = 0, k = 0; i < 8; i++) { + k = j * 8 + i; + + rc = dpp_reg_read(dev_id, + ETM_QMU_QCFG_QLIST_GRPr, + 0, + k, + &qcfg_qlist_grp); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + ZXIC_COMM_PRINT("qcfg_qlist_grp: qcfg_qlist_grp-%d qcfg_qlist_grp_wr:%d\n", k, qcfg_qlist_grp.qcfg_qlist_grp_wr); + } + } + + return DPP_OK; +} + +/***********************************************************/ +/** QMU初始化配置场景二:ddr3模式,8组*8bank +* depth=512,代表32k +* @param dev_id 设备编号 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author sun @date 2023/04/12 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_chuk32(ZXIC_UINT32 dev_id, ZXIC_UINT32 depth) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 k = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + ZXIC_UINT32 qlist_grp0_bank_data[8] = {0, 0, 0, 0, 0, 0, 0, 0}; + ZXIC_UINT32 qlist_grp_bank_data[8] = {0, 1, 2, 3, 4, 5, 6, 7}; + ZXIC_UINT32 ddr_num = 0xff; + + DPP_ETM_QMU_QCFG_QLIST_BDEP_T qlist_bdep = {0}; + DPP_ETM_QMU_QCFG_QLIST_BHEAD_T qlist_bhead = {0}; + DPP_ETM_QMU_QCFG_QLIST_BTAIL_T qlist_btail = {0}; + DPP_ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFG_T active_to_bank_cfg = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP0_BANK_T qlist_grp0_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP1_BANK_T qlist_grp1_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP2_BANK_T qlist_grp2_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP3_BANK_T qlist_grp3_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP4_BANK_T qlist_grp4_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP5_BANK_T qlist_grp5_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP6_BANK_T qlist_grp6_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP7_BANK_T qlist_grp7_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP_T qcfg_qlist_grp = {0}; + + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "++++++dpp_etm_qmu_init_set starting++++++\n"); + + /* cfgmt配置8组ddr */ + rc = dpp_tm_cfgmt_ddr_attach_set(dev_id, ddr_num); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_ddr_attach_set"); + + /* 首尾深度指针配置: */ + for (j = 0, k = 0; j < 8; j++) { + k = j * 8; + qlist_bhead.bank_vld = 1; + qlist_bhead.qcfg_qlist_bhead = j * depth; + qlist_btail.qcfg_qlist_btail = 511 + j * depth; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BHEADr, + 0, + k, + &qlist_bhead); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BTAILr, + 0, + k, + &qlist_btail); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + for (j = 0; j < 64; j++) { + qlist_bdep.qcfg_qlist_bdep = depth; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + j, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* 随机表配置,决定每组bank的轮询顺序 */ + for (j = 0; j < 16; j++) { + for (i = 0, k = 0; i < 4; i++) { + k = j * 4 + i; + + qlist_grp0_bank.qcfg_qlist_grp0_bank_wr = qlist_grp0_bank_data[0]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRP0_BANKr, + 0, + k, + &qlist_grp0_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (j = 0; j < 16; j++) { + for (i = 0, k = 0; i < 4; i++) { + k = j * 4 + i; + + qlist_grp1_bank.qcfg_qlist_grp1_bank_wr = qlist_grp0_bank_data[1]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRP1_BANKr, + 0, + k, + &qlist_grp1_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (j = 0; j < 16; j++) { + for (i = 0, k = 0; i < 4; i++) { + k = j * 4 + i; + + qlist_grp2_bank.qcfg_qlist_grp2_bank_wr = qlist_grp0_bank_data[2]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRP2_BANKr, + 0, + k, + &qlist_grp2_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (j = 0; j < 16; j++) { + for (i = 0, k = 0; i < 4; i++) { + k = j * 4 + i; + + qlist_grp3_bank.qcfg_qlist_grp3_bank_wr = qlist_grp0_bank_data[3]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRP3_BANKr, + 0, + k, + &qlist_grp3_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (j = 0; j < 16; j++) { + for (i = 0, k = 0; i < 4; i++) { + k = j * 4 + i; + + qlist_grp4_bank.qcfg_qlist_grp4_bank_wr = qlist_grp0_bank_data[4]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRP4_BANKr, + 0, + k, + &qlist_grp4_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (j = 0; j < 16; j++) { + for (i = 0, k = 0; i < 4; i++) { + k = j * 4 + i; + + qlist_grp5_bank.qcfg_qlist_grp5_bank_wr = qlist_grp0_bank_data[5]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRP5_BANKr, + 0, + k, + &qlist_grp5_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (j = 0; j < 16; j++) { + for (i = 0, k = 0; i < 4; i++) { + k = j * 4 + i; + + qlist_grp6_bank.qcfg_qlist_grp6_bank_wr = qlist_grp0_bank_data[6]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRP6_BANKr, + 0, + k, + &qlist_grp6_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (j = 0; j < 16; j++) { + for (i = 0, k = 0; i < 4; i++) { + k = j * 4 + i; + + qlist_grp7_bank.qcfg_qlist_grp7_bank_wr = qlist_grp0_bank_data[7]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRP7_BANKr, + 0, + k, + &qlist_grp7_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (j = 0; j < 8; j++) { + for (i = 0, k = 0; i < 8; i++) { + k = j * 8 + i; + qcfg_qlist_grp.qcfg_qlist_grp_wr = qlist_grp_bank_data[i]; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRPr, + 0, + k, + &qcfg_qlist_grp); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* random映射ram */ + for (j = 0; j < 4; j++) { + for (i = 0, k = 0; i < 4; i++) { + k = j * 4 + i; + + active_to_bank_cfg.cfgmt_active_to_bank_cfg = 0; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFGr, + 0, + k, + &active_to_bank_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + zxic_comm_sleep(5); + rc = dpp_tm_qmu_cfg_done_set(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_cfg_done_set"); + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "++++++dpp_etm_qmu_init_set end++++++\n"); + + return DPP_OK; +} + +/***********************************************************/ +/** QMU初始化配置场景:ddr3模式,8组ddr*8bank +* @param dev_id 设备编号 +* @param depth bank depth +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author sun @date 2023/04/12 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_chuk32(ZXIC_UINT32 dev_id, ZXIC_UINT32 depth) +{ + DPP_STATUS rc = DPP_OK; + + rc = dpp_etm_qmu_init_set_chuk32(dev_id, depth); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_etm_qmu_init_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** QMU初始化配置场景二:TM独享8组ddr:FTM为0-3组ddr的0~3bank,ETM为4-7组ddr 0~3bank +* @param dev_id 设备编号 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/11/17 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_3(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 k = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + ZXIC_UINT32 depth = 2048; + ZXIC_UINT32 qlist_grp0_bank_data[8] = {0, 1, 2, 3, 4, 5, 6, 7}; + + DPP_ETM_QMU_QCFG_QLIST_BDEP_T qlist_bdep = {0}; + DPP_ETM_QMU_QCFG_QLIST_BHEAD_T qlist_bhead = {0}; + DPP_ETM_QMU_QCFG_QLIST_BTAIL_T qlist_btail = {0}; + DPP_ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFG_T active_to_bank_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_MMU_CFG_T ddr_in_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_QMU_CFG_T ddr_in_qmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_MMU_CFG_T bank_to_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_QMU_CFG_T bank_to_qmu_cfg = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP0_BANK_T qlist_grp0_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP_T qcfg_qlist_grp = {0}; + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "++++++dpp_etm_qmu_init_set starting++++++\n"); + + /* ddr组从qmu到mmu映射,0~3映射成4~7 */ + for (i = 0; i < 4; i++) { + ddr_in_mmu_cfg.cfgmt_ddr_in_mmu_cfg = i + 4; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_MMU_CFGr, + 0, + i, + &ddr_in_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* ddr组从mmu到qmu映射,4~7映射成0~3 */ + for (i = 4; i < 8; i++) { + ddr_in_qmu_cfg.cfgmt_ddr_in_qmu_cfg = i - 4; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_QMU_CFGr, + 0, + i, + &ddr_in_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* bank号从qmu到mmu映射,组0~3的bank0-3映射成bank0-3 */ + for (j = 0; j < 4; j++) { + for (i = 0; i < 4; i++) { + k = j * 8 + i; + + bank_to_mmu_cfg.cfgmt_bank_in_mmu_cfg = i; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_MMU_CFGr, + 0, + k, + &bank_to_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* bank号从mmu到qmu映射,组4~7的bank0-7映射成bank0-7 */ + for (j = 4; j < 8; j++) { + for (i = 0; i < 4; i++) { + k = j * 8 + i; + + bank_to_qmu_cfg.cfgmt_bank_in_qmu_cfg = i; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_QMU_CFGr, + 0, + k, + &bank_to_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* random映射ram:配置一个chunk中,bank的轮询顺序 */ + for (i = 0; i < 16; i++) { + active_to_bank_cfg.cfgmt_active_to_bank_cfg = i % 4; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFGr, + 0, + i, + &active_to_bank_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* 首尾深度指针配置:j为8组循环 */ + for (j = 0; j < 4; j++) { + for (i = 0; i < 4; i++) { + k = j * 8 + i; + + qlist_bhead.bank_vld = 1; + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 4 + i), depth); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 4 + i + 1), depth); + qlist_bhead.qcfg_qlist_bhead = (j * 4 + i) * depth; + qlist_btail.qcfg_qlist_btail = ((j * 4 + i + 1) * depth - 1); + qlist_bdep.qcfg_qlist_bdep = depth; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BHEADr, + 0, + k, + &qlist_bhead); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BTAILr, + 0, + k, + &qlist_btail); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + k, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT(ETM_QMU_QCFG_QLIST_GRP0_BANKr, 8); + /* 随机表配置,决定ddr内bank的轮询顺序:使用几组ddr需要配置几组 */ + for (j = 0; j < 8; j++) { + for (i = 0; i < 64; i++) { + qlist_grp0_bank.qcfg_qlist_grp0_bank_wr = qlist_grp0_bank_data[i % 4]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRP0_BANKr + j, + 0, + i, + &qlist_grp0_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (j = 0; j < 64; j++) { + qcfg_qlist_grp.qcfg_qlist_grp_wr = j % 4; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRPr, + 0, + j, + &qcfg_qlist_grp); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + rc = dpp_tm_qmu_cfg_done_set(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_cfg_done_set"); + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "++++++dpp_etm_qmu_init_set end++++++\n"); + + return DPP_OK; +} + + +/***********************************************************/ +/** QMU/MMU初始化配置场景三:TM独享8组ddr:FTM为0-3组ddr的0~3bank,ETM为4-7组ddr 0~3bank +* MMU开启rotatjon +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/11/17 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_3(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_RANDOM_BYPASS_EN_T etm_bypass = {0}; +// DPP_FTM_QMU_RANDOM_BYPASS_EN_T ftm_bypass = {0}; + + /* etm mmu基址配置:ddr4-7组,bank号0-3,bank深度2048 */ + //rc = dpp_mmu_bank_base_addr_set(dev_id, 0xf0, 0xf, 2048); + //ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_mmu_bank_base_addr_set"); + + rc = dpp_etm_qmu_init_set_3(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_etm_qmu_init_set_1"); + + /* 关闭etm映射配置 */ + etm_bypass.random_bypass_en = 0; + rc = dpp_reg_write(dev_id, + ETM_QMU_RANDOM_BYPASS_ENr, + 0, + 0, + &etm_bypass); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/**场景4:TM共享2组ddr:FTM为2、4组ddr的01bank,ETM为2、4组ddr 23bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/06/06 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_4(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 k = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + ZXIC_UINT32 depth = 8 * 1024; + ZXIC_UINT32 ddr_to_mmu_cfg_map[2] = {2, 4}; + ZXIC_UINT32 bank_to_mmu_cfg_map[2] = {2, 3}; + ZXIC_UINT32 qlist_grp0_bank_data[2] = {0, 1}; + + DPP_ETM_QMU_QCFG_QLIST_BDEP_T qlist_bdep = {0}; + DPP_ETM_QMU_QCFG_QLIST_BHEAD_T qlist_bhead = {0}; + DPP_ETM_QMU_QCFG_QLIST_BTAIL_T qlist_btail = {0}; + + DPP_ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFG_T active_to_bank_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_MMU_CFG_T ddr_in_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_QMU_CFG_T ddr_in_qmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_MMU_CFG_T bank_to_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_QMU_CFG_T bank_to_qmu_cfg = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP0_BANK_T qlist_grp0_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP_T qcfg_qlist_grp = {0}; + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "========dpp_etm_qmu_init_set_4 start========n"); + + /* ddr组从qmu到mmu映射,0~1映射成2 4 */ + for (i = 0; i < 2; i++) { + ddr_in_mmu_cfg.cfgmt_ddr_in_mmu_cfg = ddr_to_mmu_cfg_map[i]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_MMU_CFGr, + 0, + i, + &ddr_in_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* ddr组从mmu到qmu映射,2 4映射成0~1 */ + for (i = 0; i < 2; i++) { + ddr_in_qmu_cfg.cfgmt_ddr_in_qmu_cfg = i; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_QMU_CFGr, + 0, + ddr_to_mmu_cfg_map[i], + &ddr_in_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* bank号从qmu到mmu映射,每组的bank0~1映射成bank2、3 */ + for (j = 0; j < 2; j++) { + for (i = 0; i < 2; i++) { + k = j * 8 + i; + + bank_to_mmu_cfg.cfgmt_bank_in_mmu_cfg = bank_to_mmu_cfg_map[i]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_MMU_CFGr, + 0, + k, + &bank_to_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* bank号从mmu到qmu映射,每组的bank2、3映射成bank0、1 */ + for (j = 0; j < 2; j++) { + for (i = 0; i < 2; i++) { + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, ddr_to_mmu_cfg_map[j], 8); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(dev_id, ddr_to_mmu_cfg_map[j] * 8, bank_to_mmu_cfg_map[i]); + k = ddr_to_mmu_cfg_map[j] * 8 + bank_to_mmu_cfg_map[i]; + bank_to_qmu_cfg.cfgmt_bank_in_qmu_cfg = i; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_QMU_CFGr, + 0, + k, + &bank_to_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* random映射ram:配置一个chunk中,bank的轮询顺序 */ + for (i = 0; i < 16; i++) { + active_to_bank_cfg.cfgmt_active_to_bank_cfg = i % 2; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFGr, + 0, + i, + &active_to_bank_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* 首尾深度指针配置 */ + for (j = 0; j < 2; j++) { + for (i = 0; i < 2; i++) { + k = j * 8 + i; + + qlist_bhead.bank_vld = 1; + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 2 + i), depth); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 2 + i + 1), depth); + qlist_bhead.qcfg_qlist_bhead = (j * 2 + i) * depth; + qlist_btail.qcfg_qlist_btail = ((j * 2 + i + 1) * depth - 1); + qlist_bdep.qcfg_qlist_bdep = depth; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BHEADr, + 0, + k, + &qlist_bhead); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BTAILr, + 0, + k, + &qlist_btail); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + k, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (i = 0; i < 64; i++) { + qlist_bdep.qcfg_qlist_bdep = depth; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + i, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT(ETM_QMU_QCFG_QLIST_GRP0_BANKr, 8); + /* 随机表配置,决定每组bank的轮询顺序 */ + for (j = 0; j < 8; j++) { + for (i = 0; i < 64; i++) { + qlist_grp0_bank.qcfg_qlist_grp0_bank_wr = qlist_grp0_bank_data[i % 2]; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRP0_BANKr + j, + 0, + i, + &qlist_grp0_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (j = 0; j < 64; j++) { + qcfg_qlist_grp.qcfg_qlist_grp_wr = j % 2; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRPr, + 0, + j, + &qcfg_qlist_grp); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + rc = dpp_tm_qmu_cfg_done_set(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_cfg_done_set"); + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "========dpp_etm_qmu_init_set_4 END=======\n"); + + return DPP_OK; +} + + +/***********************************************************/ +/**场景4:TM共享2组ddr:FTM为2、4组ddr的01bank,ETM为2、4组ddr 23bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/06/06 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_4(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_RANDOM_BYPASS_EN_T etm_bypass = {0}; + //DPP_FTM_QMU_RANDOM_BYPASS_EN_T ftm_bypass = {0}; + + /* etm mmu基址配置:ddr2、4组,bank号23,bank深度8*1024 */ + //rc = dpp_mmu_bank_base_addr_set(dev_id, 0x14, 0xc, 8 * 1024); + //ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_mmu_bank_base_addr_set"); + + rc = dpp_etm_qmu_init_set_4(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_etm_qmu_init_set_4"); + + /* 关闭etm映射配置 */ + etm_bypass.random_bypass_en = 0; + rc = dpp_reg_write(dev_id, + ETM_QMU_RANDOM_BYPASS_ENr, + 0, + 0, + &etm_bypass); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 场景5:TM共享4组ddr:FTM为1234组ddr的01bank,ETM为1234组ddr 23bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/06/06 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_5(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 k = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + ZXIC_UINT32 depth = 4 * 1024; + ZXIC_UINT32 bank_to_mmu_cfg_map[2] = {2, 3}; + ZXIC_UINT32 qlist_grp0_bank_data[2] = {0, 1}; + ZXIC_UINT32 ddr_no[4] = {1, 2, 3, 4}; + + DPP_ETM_QMU_QCFG_QLIST_BDEP_T qlist_bdep = {0}; + DPP_ETM_QMU_QCFG_QLIST_BHEAD_T qlist_bhead = {0}; + DPP_ETM_QMU_QCFG_QLIST_BTAIL_T qlist_btail = {0}; + + DPP_ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFG_T active_to_bank_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_MMU_CFG_T ddr_in_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_QMU_CFG_T ddr_in_qmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_MMU_CFG_T bank_to_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_QMU_CFG_T bank_to_qmu_cfg = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP0_BANK_T qlist_grp0_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP_T qcfg_qlist_grp = {0}; + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "========dpp_etm_qmu_init_set_5 start========n"); + + /* ddr组从qmu到mmu映射,0~3映射成1234 */ + for (i = 0; i < 4; i++) { + ddr_in_mmu_cfg.cfgmt_ddr_in_mmu_cfg = ddr_no[i]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_MMU_CFGr, + 0, + i, + &ddr_in_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* ddr组从mmu到qmu映射,1234映射成0~3 */ + for (i = 0; i < 4; i++) { + ddr_in_qmu_cfg.cfgmt_ddr_in_qmu_cfg = i; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_QMU_CFGr, + 0, + ddr_no[i], + &ddr_in_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* bank号从qmu到mmu映射,每组的bank0~1映射成bank2、3 */ + for (j = 0; j < 4; j++) { + for (i = 0; i < 2; i++) { + k = j * 8 + i; + + bank_to_mmu_cfg.cfgmt_bank_in_mmu_cfg = bank_to_mmu_cfg_map[i]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_MMU_CFGr, + 0, + k, + &bank_to_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* bank号从mmu到qmu映射,每组的bank2、3映射成bank0、1 */ + for (j = 0; j < 4; j++) { + for (i = 0; i < 2; i++) { + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, ddr_no[j], 8); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(dev_id, ddr_no[j] * 8, bank_to_mmu_cfg_map[i]); + k = ddr_no[j] * 8 + bank_to_mmu_cfg_map[i]; + bank_to_qmu_cfg.cfgmt_bank_in_qmu_cfg = i; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_QMU_CFGr, + 0, + k, + &bank_to_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* random映射ram:配置一个chunk中,bank的轮询顺序 */ + for (i = 0; i < 16; i++) { + active_to_bank_cfg.cfgmt_active_to_bank_cfg = i % 2; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFGr, + 0, + i, + &active_to_bank_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* 首尾深度指针配置 */ + for (j = 0; j < 4; j++) { + for (i = 0; i < 2; i++) { + k = j * 8 + i; + + qlist_bhead.bank_vld = 1; + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 2 + i), depth); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 2 + i + 1), depth); + qlist_bhead.qcfg_qlist_bhead = (j * 2 + i) * depth; + qlist_btail.qcfg_qlist_btail = ((j * 2 + i + 1) * depth - 1); + qlist_bdep.qcfg_qlist_bdep = depth; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BHEADr, + 0, + k, + &qlist_bhead); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BTAILr, + 0, + k, + &qlist_btail); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + k, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (i = 0; i < 64; i++) { + qlist_bdep.qcfg_qlist_bdep = depth; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + i, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT(ETM_QMU_QCFG_QLIST_GRP0_BANKr, 8); + /* 随机表配置,决定每组bank的轮询顺序 */ + for (j = 0; j < 8; j++) { + for (i = 0; i < 64; i++) { + qlist_grp0_bank.qcfg_qlist_grp0_bank_wr = qlist_grp0_bank_data[i % 2]; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRP0_BANKr + j, + 0, + i, + &qlist_grp0_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (j = 0; j < 64; j++) { + qcfg_qlist_grp.qcfg_qlist_grp_wr = j % 4; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRPr, + 0, + j, + &qcfg_qlist_grp); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + rc = dpp_tm_qmu_cfg_done_set(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_cfg_done_set"); + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "========dpp_etm_qmu_init_set_5 END=======\n"); + + return DPP_OK; +} + +/***********************************************************/ +/** 场景5:TM共享4组ddr:FTM为1234组ddr的01bank,ETM为1234组ddr 23bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/06/06 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_5(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + + /* etm mmu基址配置:ddr1234组,bank号23,bank深度4*1024 */ + //rc = dpp_mmu_bank_base_addr_set(dev_id, 0x1e, 0xc, 4*1024); + //ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_mmu_bank_base_addr_set"); + + rc = dpp_etm_qmu_init_set_5(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_etm_qmu_init_set_5"); + + return DPP_OK; +} + +/***********************************************************/ +/** QMU初始化配置场景6:4组*2bank,MMU实际分配ftm:4,6,7,9组(01bank); etm:4,6,7,9组(23bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/05/13 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_6(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 k = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + ZXIC_UINT32 depth = 4 * 1024; + ZXIC_UINT32 bank_to_mmu_cfg_map[4] = {2, 3}; + ZXIC_UINT32 qlist_grp0_bank_data[4] = {0, 1}; + ZXIC_UINT32 ddr_no[4] = {4, 6, 7, 9}; + + DPP_ETM_QMU_QCFG_QLIST_BDEP_T qlist_bdep = {0}; + DPP_ETM_QMU_QCFG_QLIST_BHEAD_T qlist_bhead = {0}; + DPP_ETM_QMU_QCFG_QLIST_BTAIL_T qlist_btail = {0}; + + DPP_ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFG_T active_to_bank_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_MMU_CFG_T ddr_in_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_QMU_CFG_T ddr_in_qmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_MMU_CFG_T bank_to_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_QMU_CFG_T bank_to_qmu_cfg = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP0_BANK_T qlist_grp0_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP_T qcfg_qlist_grp = {0}; + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "========dpp_etm_qmu_init_set_1 start========n"); + + /* ddr组从qmu到mmu映射,0~3映射成4679 */ + for (i = 0; i < 4; i++) { + ddr_in_mmu_cfg.cfgmt_ddr_in_mmu_cfg = ddr_no[i]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_MMU_CFGr, + 0, + i, + &ddr_in_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* ddr组从mmu到qmu映射,4679映射成0~3 */ + for (i = 0; i < 4; i++) { + ddr_in_qmu_cfg.cfgmt_ddr_in_qmu_cfg = i; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_QMU_CFGr, + 0, + ddr_no[i], + &ddr_in_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* bank号从qmu到mmu映射,每组的bank0~1映射成bank2、3 */ + for (j = 0; j < 4; j++) { + for (i = 0; i < 2; i++) { + k = j * 8 + i; + + bank_to_mmu_cfg.cfgmt_bank_in_mmu_cfg = bank_to_mmu_cfg_map[i]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_MMU_CFGr, + 0, + k, + &bank_to_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* bank号从mmu到qmu映射,每组的bank2、3映射成bank0、1 */ + for (j = 0; j < 4; j++) { + for (i = 0; i < 2; i++) { + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, ddr_no[j], 8); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(dev_id, ddr_no[j] * 8, bank_to_mmu_cfg_map[i]); + k = ddr_no[j] * 8 + bank_to_mmu_cfg_map[i]; + bank_to_qmu_cfg.cfgmt_bank_in_qmu_cfg = i; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_QMU_CFGr, + 0, + k, + &bank_to_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* random映射ram:配置一个chunk中,bank的轮询顺序 */ + for (i = 0; i < 16; i++) { + active_to_bank_cfg.cfgmt_active_to_bank_cfg = i % 2; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFGr, + 0, + i, + &active_to_bank_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* 首尾深度指针配置 */ + for (j = 0; j < 4; j++) { + for (i = 0; i < 2; i++) { + k = j * 8 + i; + + qlist_bhead.bank_vld = 1; + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 2 + i), depth); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 2 + i + 1), depth); + qlist_bhead.qcfg_qlist_bhead = (j * 2 + i) * depth; + qlist_btail.qcfg_qlist_btail = ((j * 2 + i + 1) * depth - 1); + qlist_bdep.qcfg_qlist_bdep = depth; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BHEADr, + 0, + k, + &qlist_bhead); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BTAILr, + 0, + k, + &qlist_btail); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + k, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (i = 0; i < 64; i++) { + qlist_bdep.qcfg_qlist_bdep = depth; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + i, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT(ETM_QMU_QCFG_QLIST_GRP0_BANKr, 8); + /* 随机表配置,决定每组bank的轮询顺序 */ + for (j = 0; j < 8; j++) { + for (i = 0; i < 64; i++) { + qlist_grp0_bank.qcfg_qlist_grp0_bank_wr = qlist_grp0_bank_data[i % 2]; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRP0_BANKr + j, + 0, + i, + &qlist_grp0_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (j = 0; j < 64; j++) { + qcfg_qlist_grp.qcfg_qlist_grp_wr = j % 4; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRPr, + 0, + j, + &qcfg_qlist_grp); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + rc = dpp_tm_qmu_cfg_done_set(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_cfg_done_set"); + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "========dpp_etm_qmu_init_set_1 END=======\n"); + + return DPP_OK; +} + + +/***********************************************************/ +/** QMU初始化配置场景6:4组*2bank,MMU实际分配ftm:4,6,7,9组(01bank); etm:4,6,7,9组(23bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/05/13 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_6(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + + /* etm mmu基址配置:ddr4,6,7,9组,bank号01,bank深度4*1024 */ + //rc = dpp_mmu_bank_base_addr_set(dev_id, 0x2d0, 0xc, 4*1024); + //ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_mmu_bank_base_addr_set"); + + rc = dpp_etm_qmu_init_set_6(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_etm_qmu_init_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** QMU/MMU初始化配置场景7:每个tm用8组*4bank,MMU实际分配ftm:2-9组(0145bank); etm:2-9组(2367bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/10/14 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_7(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 k = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + ZXIC_UINT32 depth = 1024; + ZXIC_UINT32 bank_to_mmu_cfg_map[4] = {2, 3, 6, 7}; + ZXIC_UINT32 qlist_grp0_bank_data[4] = {0, 1, 2, 3}; + + DPP_ETM_QMU_QCFG_QLIST_BDEP_T qlist_bdep = {0}; + DPP_ETM_QMU_QCFG_QLIST_BHEAD_T qlist_bhead = {0}; + DPP_ETM_QMU_QCFG_QLIST_BTAIL_T qlist_btail = {0}; + + DPP_ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFG_T active_to_bank_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_MMU_CFG_T ddr_in_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_QMU_CFG_T ddr_in_qmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_MMU_CFG_T bank_to_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_QMU_CFG_T bank_to_qmu_cfg = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP0_BANK_T qlist_grp0_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP_T qcfg_qlist_grp = {0}; + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "========dpp_etm_qmu_init_set_1 start========n"); + + /* ddr组从qmu到mmu映射,0~7映射成2~9 */ + for (i = 0; i < 8; i++) { + ddr_in_mmu_cfg.cfgmt_ddr_in_mmu_cfg = (i + 2); + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_MMU_CFGr, + 0, + i, + &ddr_in_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* ddr组从mmu到qmu映射,2~9映射成0~7 */ + for (i = 2; i < 10; i++) { + ddr_in_qmu_cfg.cfgmt_ddr_in_qmu_cfg = (i - 2); + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_QMU_CFGr, + 0, + i, + &ddr_in_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* bank号从qmu到mmu映射,每组的bank0~3映射成bank2、3、6、7 */ + for (j = 0; j < 8; j++) { + for (i = 0; i < 4; i++) { + k = j * 8 + i; + + bank_to_mmu_cfg.cfgmt_bank_in_mmu_cfg = bank_to_mmu_cfg_map[i]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_MMU_CFGr, + 0, + k, + &bank_to_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* bank号从mmu到qmu映射,每组的bank2、3、6、7映射成bank0、1、2、3 */ + for (j = 2; j < 10; j++) { + for (i = 0; i < 4; i++) { + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, j, 8); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(dev_id, j * 8, bank_to_mmu_cfg_map[i]); + k = j * 8 + bank_to_mmu_cfg_map[i]; + bank_to_qmu_cfg.cfgmt_bank_in_qmu_cfg = i; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_QMU_CFGr, + 0, + k, + &bank_to_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* random映射ram:配置一个chunk中,bank的轮询顺序 */ + for (i = 0; i < 16; i++) { + active_to_bank_cfg.cfgmt_active_to_bank_cfg = i % 4; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFGr, + 0, + i, + &active_to_bank_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* 首尾深度指针配置 */ + for (j = 0; j < 8; j++) { + for (i = 0; i < 4; i++) { + k = j * 8 + i; + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 4 + i), depth); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 4 + i + 1), depth); + qlist_bhead.bank_vld = 1; + qlist_bhead.qcfg_qlist_bhead = (j * 4 + i) * depth; + qlist_btail.qcfg_qlist_btail = ((j * 4 + i + 1) * depth - 1); + qlist_bdep.qcfg_qlist_bdep = depth; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BHEADr, + 0, + k, + &qlist_bhead); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BTAILr, + 0, + k, + &qlist_btail); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + k, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (i = 0; i < 64; i++) { + qlist_bdep.qcfg_qlist_bdep = depth; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + i, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT(ETM_QMU_QCFG_QLIST_GRP0_BANKr, 8); + /* 随机表配置,决定每组bank的轮询顺序 */ + for (j = 0; j < 8; j++) { + for (i = 0; i < 64; i++) { + qlist_grp0_bank.qcfg_qlist_grp0_bank_wr = qlist_grp0_bank_data[i % 4]; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRP0_BANKr + j, + 0, + i, + &qlist_grp0_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (j = 0; j < 64; j++) { + qcfg_qlist_grp.qcfg_qlist_grp_wr = j % 8; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRPr, + 0, + j, + &qcfg_qlist_grp); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + rc = dpp_tm_qmu_cfg_done_set(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_cfg_done_set"); + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "========dpp_etm_qmu_init_set_1 END=======\n"); + + return DPP_OK; +} + + +/***********************************************************/ +/** QMU/MMU初始化配置场景7:每个tm用8组*4bank,MMU实际分配ftm:2-9组(0145bank); etm:2-9组(2367bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/10/14 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_7(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + + + /*关闭随机模式*/ + rc = dpp_tm_qmu_ddr_random_set(dev_id, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_ddr_random_set"); + + /* etm mmu基址配置:ddr2-9组,bank号2367,bank深度1024 */ + //rc = dpp_mmu_bank_base_addr_set(dev_id, 0x3fc, 0xcc, 1024); + //ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_mmu_bank_base_addr_set"); + + rc = dpp_etm_qmu_init_set_7(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_etm_qmu_init_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** QMU/MMU初始化配置场景8:每个tm用8组*4bank,MMU实际分配ftm:2-9组(01bank); etm:2-9组(23bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/10/14 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_8(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 k = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + ZXIC_UINT32 depth = 2048; + ZXIC_UINT32 bank_to_mmu_cfg_map[4] = {2, 3}; + ZXIC_UINT32 qlist_grp0_bank_data[4] = {0, 1}; + + DPP_ETM_QMU_QCFG_QLIST_BDEP_T qlist_bdep = {0}; + DPP_ETM_QMU_QCFG_QLIST_BHEAD_T qlist_bhead = {0}; + DPP_ETM_QMU_QCFG_QLIST_BTAIL_T qlist_btail = {0}; + + DPP_ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFG_T active_to_bank_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_MMU_CFG_T ddr_in_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_QMU_CFG_T ddr_in_qmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_MMU_CFG_T bank_to_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_QMU_CFG_T bank_to_qmu_cfg = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP0_BANK_T qlist_grp0_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP_T qcfg_qlist_grp = {0}; + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "========dpp_etm_qmu_init_set_1 start========n"); + + /* ddr组从qmu到mmu映射,0~7映射成2~9 */ + for (i = 0; i < 8; i++) { + ddr_in_mmu_cfg.cfgmt_ddr_in_mmu_cfg = (i + 2); + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_MMU_CFGr, + 0, + i, + &ddr_in_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* ddr组从mmu到qmu映射,2~9映射成0~7 */ + for (i = 2; i < 10; i++) { + ddr_in_qmu_cfg.cfgmt_ddr_in_qmu_cfg = (i - 2); + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_QMU_CFGr, + 0, + i, + &ddr_in_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* bank号从qmu到mmu映射,每组的bank0~1映射成bank2、3 */ + for (j = 0; j < 8; j++) { + for (i = 0; i < 2; i++) { + k = j * 8 + i; + + bank_to_mmu_cfg.cfgmt_bank_in_mmu_cfg = bank_to_mmu_cfg_map[i]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_MMU_CFGr, + 0, + k, + &bank_to_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* bank号从mmu到qmu映射,每组的bank2、3映射成bank0、1 */ + for (j = 2; j < 10; j++) { + for (i = 0; i < 2; i++) { + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, j, 8); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(dev_id, j * 8, bank_to_mmu_cfg_map[i]); + k = j * 8 + bank_to_mmu_cfg_map[i]; + bank_to_qmu_cfg.cfgmt_bank_in_qmu_cfg = i; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_QMU_CFGr, + 0, + k, + &bank_to_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* random映射ram:配置一个chunk中,bank的轮询顺序 */ + for (i = 0; i < 16; i++) { + active_to_bank_cfg.cfgmt_active_to_bank_cfg = i % 2; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFGr, + 0, + i, + &active_to_bank_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* 首尾深度指针配置 */ + for (j = 0; j < 8; j++) { + for (i = 0; i < 2; i++) { + k = j * 8 + i; + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 2 + i), depth); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 2 + i + 1), depth); + qlist_bhead.bank_vld = 1; + qlist_bhead.qcfg_qlist_bhead = (j * 2 + i) * depth; + qlist_btail.qcfg_qlist_btail = ((j * 2 + i + 1) * depth - 1); + qlist_bdep.qcfg_qlist_bdep = depth; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BHEADr, + 0, + k, + &qlist_bhead); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BTAILr, + 0, + k, + &qlist_btail); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + k, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT(ETM_QMU_QCFG_QLIST_GRP0_BANKr, 8); + /* 随机表配置,决定每组bank的轮询顺序 */ + for (j = 0; j < 8; j++) { + for (i = 0; i < 64; i++) { + qlist_grp0_bank.qcfg_qlist_grp0_bank_wr = qlist_grp0_bank_data[i % 2]; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRP0_BANKr + j, + 0, + i, + &qlist_grp0_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (j = 0; j < 64; j++) { + qcfg_qlist_grp.qcfg_qlist_grp_wr = j % 8; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRPr, + 0, + j, + &qcfg_qlist_grp); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + rc = dpp_tm_qmu_cfg_done_set(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_cfg_done_set"); + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "========dpp_etm_qmu_init_set_1 END=======\n"); + + return DPP_OK; +} + + +/***********************************************************/ +/** QMU/MMU初始化配置场景8:每个tm用8组*4bank,MMU实际分配ftm:2-9组(01bank); etm:2-9组(23bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/10/14 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_8(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + + /* etm mmu基址配置:ddr2-9组,bank号23,bank深度1024 */ + //rc = dpp_mmu_bank_base_addr_set(dev_id, 0x3fc, 0xc, 2048); + //ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_mmu_bank_base_addr_set"); + + rc = dpp_etm_qmu_init_set_8(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_etm_qmu_init_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** QMU初始化配置场景9:4组*2bank,MMU实际分配ftm:6,7,8,9组(01bank); etm:6,7,8,9组(23bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/09/28 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_9(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 k = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + ZXIC_UINT32 depth = 4 * 1024; + ZXIC_UINT32 bank_to_mmu_cfg_map[4] = {2, 3}; + ZXIC_UINT32 qlist_grp0_bank_data[4] = {0, 1}; + ZXIC_UINT32 ddr_no[4] = {6, 7, 8, 9}; + + DPP_ETM_QMU_QCFG_QLIST_BDEP_T qlist_bdep = {0}; + DPP_ETM_QMU_QCFG_QLIST_BHEAD_T qlist_bhead = {0}; + DPP_ETM_QMU_QCFG_QLIST_BTAIL_T qlist_btail = {0}; + + DPP_ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFG_T active_to_bank_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_MMU_CFG_T ddr_in_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_QMU_CFG_T ddr_in_qmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_MMU_CFG_T bank_to_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_QMU_CFG_T bank_to_qmu_cfg = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP0_BANK_T qlist_grp0_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP_T qcfg_qlist_grp = {0}; + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "========dpp_etm_qmu_init_set_1 start========n"); + + /* ddr组从qmu到mmu映射,0~3映射成4679 */ + for (i = 0; i < 4; i++) { + ddr_in_mmu_cfg.cfgmt_ddr_in_mmu_cfg = ddr_no[i]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_MMU_CFGr, + 0, + i, + &ddr_in_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* ddr组从mmu到qmu映射,4679映射成0~3 */ + for (i = 0; i < 4; i++) { + ddr_in_qmu_cfg.cfgmt_ddr_in_qmu_cfg = i; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_QMU_CFGr, + 0, + ddr_no[i], + &ddr_in_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* bank号从qmu到mmu映射,每组的bank0~1映射成bank2、3 */ + for (j = 0; j < 4; j++) { + for (i = 0; i < 2; i++) { + k = j * 8 + i; + + bank_to_mmu_cfg.cfgmt_bank_in_mmu_cfg = bank_to_mmu_cfg_map[i]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_MMU_CFGr, + 0, + k, + &bank_to_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* bank号从mmu到qmu映射,每组的bank2、3映射成bank0、1 */ + for (j = 0; j < 4; j++) { + for (i = 0; i < 2; i++) { + ZXIC_COMM_CHECK_INDEX_MUL_OVERFLOW_NO_ASSERT(ddr_no[j], 8); + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT(ddr_no[j] * 8, bank_to_mmu_cfg_map[i]); + k = ddr_no[j] * 8 + bank_to_mmu_cfg_map[i]; + + bank_to_qmu_cfg.cfgmt_bank_in_qmu_cfg = i; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_QMU_CFGr, + 0, + k, + &bank_to_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* random映射ram:配置一个chunk中,bank的轮询顺序 */ + for (i = 0; i < 16; i++) { + active_to_bank_cfg.cfgmt_active_to_bank_cfg = i % 2; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFGr, + 0, + i, + &active_to_bank_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* 首尾深度指针配置 */ + for (j = 0; j < 4; j++) { + for (i = 0; i < 2; i++) { + k = j * 8 + i; + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 2 + i), depth); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 2 + i + 1), depth); + qlist_bhead.bank_vld = 1; + qlist_bhead.qcfg_qlist_bhead = (j * 2 + i) * depth; + qlist_btail.qcfg_qlist_btail = ((j * 2 + i + 1) * depth - 1); + qlist_bdep.qcfg_qlist_bdep = depth; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BHEADr, + 0, + k, + &qlist_bhead); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BTAILr, + 0, + k, + &qlist_btail); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + k, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (i = 0; i < 64; i++) { + qlist_bdep.qcfg_qlist_bdep = depth; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + i, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT(ETM_QMU_QCFG_QLIST_GRP0_BANKr, 8); + /* 随机表配置,决定每组bank的轮询顺序 */ + for (j = 0; j < 8; j++) { + for (i = 0; i < 64; i++) { + qlist_grp0_bank.qcfg_qlist_grp0_bank_wr = qlist_grp0_bank_data[i % 2]; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRP0_BANKr + j, + 0, + i, + &qlist_grp0_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (j = 0; j < 64; j++) { + qcfg_qlist_grp.qcfg_qlist_grp_wr = j % 4; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRPr, + 0, + j, + &qcfg_qlist_grp); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + rc = dpp_tm_qmu_cfg_done_set(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_cfg_done_set"); + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "========dpp_etm_qmu_init_set_1 END=======\n"); + + return DPP_OK; +} + +/***********************************************************/ +/** QMU/MMU初始化配置场景8:每个tm用8组*4bank,MMU实际分配ftm:2-9组(01bank); etm:2-9组(23bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/10/14 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_9(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + + /* etm mmu基址配置:ddr2-9组,bank号23,bank深度1024 */ + //rc = dpp_mmu_bank_base_addr_set(dev_id, 0x3fc, 0xc, 2048); + //ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_mmu_bank_base_addr_set"); + + rc = dpp_etm_qmu_init_set_8(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_etm_qmu_init_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** QMU初始化配置场景10:独享2组*8bank,MMU实际分配1,2; etm or ftm使用0-7bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/12/16 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_10(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 k = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + ZXIC_UINT32 depth = 2 * 1024; + ZXIC_UINT32 bank_to_mmu_cfg_map[8] = {0, 1, 2, 3, 4, 5, 6, 7}; + ZXIC_UINT32 qlist_grp0_bank_data[8] = {0, 1, 2, 3, 4, 5, 6, 7}; + ZXIC_UINT32 ddr_no[2] = {1, 2}; + + DPP_ETM_QMU_QCFG_QLIST_BDEP_T qlist_bdep = {0}; + DPP_ETM_QMU_QCFG_QLIST_BHEAD_T qlist_bhead = {0}; + DPP_ETM_QMU_QCFG_QLIST_BTAIL_T qlist_btail = {0}; + + DPP_ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFG_T active_to_bank_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_MMU_CFG_T ddr_in_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_QMU_CFG_T ddr_in_qmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_MMU_CFG_T bank_to_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_QMU_CFG_T bank_to_qmu_cfg = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP0_BANK_T qlist_grp0_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP_T qcfg_qlist_grp = {0}; + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "========dpp_etm_qmu_init_set_1 start========n"); + + /* ddr组从qmu到mmu映射,0~1映射成12 */ + for (i = 0; i < 2; i++) { + ddr_in_mmu_cfg.cfgmt_ddr_in_mmu_cfg = ddr_no[i]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_MMU_CFGr, + 0, + i, + &ddr_in_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* ddr组从mmu到qmu映射,12映射成0~1 */ + for (i = 0; i < 2; i++) { + ddr_in_qmu_cfg.cfgmt_ddr_in_qmu_cfg = i; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_QMU_CFGr, + 0, + ddr_no[i], + &ddr_in_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* bank号从qmu到mmu映射,每组的bank0~7映射成bank0-7 */ + for (j = 0; j < 2; j++) { + for (i = 0; i < 8; i++) { + k = j * 8 + i; + + bank_to_mmu_cfg.cfgmt_bank_in_mmu_cfg = bank_to_mmu_cfg_map[i]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_MMU_CFGr, + 0, + k, + &bank_to_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* bank号从mmu到qmu映射,每组的bank0-7映射成bank0-7 */ + for (j = 0; j < 2; j++) { + for (i = 0; i < 8; i++) { + ZXIC_COMM_CHECK_INDEX_MUL_OVERFLOW_NO_ASSERT(ddr_no[j], 8); + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT(ddr_no[j] * 8, bank_to_mmu_cfg_map[i]); + k = ddr_no[j] * 8 + bank_to_mmu_cfg_map[i]; + + bank_to_qmu_cfg.cfgmt_bank_in_qmu_cfg = i; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_QMU_CFGr, + 0, + k, + &bank_to_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* random映射ram:配置一个chunk中,bank的轮询顺序 */ + for (i = 0; i < 16; i++) { + active_to_bank_cfg.cfgmt_active_to_bank_cfg = i % 8; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFGr, + 0, + i, + &active_to_bank_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* 首尾深度指针配置 */ + for (j = 0; j < 2; j++) { + for (i = 0; i < 8; i++) { + k = j * 8 + i; + + qlist_bhead.bank_vld = 1; + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 8 + i), depth); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 8 + i + 1), depth); + qlist_bhead.qcfg_qlist_bhead = (j * 8 + i) * depth; + qlist_btail.qcfg_qlist_btail = ((j * 8 + i + 1) * depth - 1); + qlist_bdep.qcfg_qlist_bdep = depth; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BHEADr, + 0, + k, + &qlist_bhead); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BTAILr, + 0, + k, + &qlist_btail); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + k, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (i = 0; i < 64; i++) { + qlist_bdep.qcfg_qlist_bdep = depth; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + i, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT(ETM_QMU_QCFG_QLIST_GRP0_BANKr, 8); + /* 随机表配置,决定每组bank的轮询顺序 */ + for (j = 0; j < 8; j++) { + for (i = 0; i < 64; i++) { + qlist_grp0_bank.qcfg_qlist_grp0_bank_wr = qlist_grp0_bank_data[i % 8]; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRP0_BANKr + j, + 0, + i, + &qlist_grp0_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (j = 0; j < 64; j++) { + qcfg_qlist_grp.qcfg_qlist_grp_wr = j % 2; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRPr, + 0, + j, + &qcfg_qlist_grp); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + rc = dpp_tm_qmu_cfg_done_set(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_cfg_done_set"); + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "========dpp_etm_qmu_init_set_1 END=======\n"); + + return DPP_OK; +} + +/***********************************************************/ +/** QMU初始化配置场景10:独享2组*8bank,MMU实际分配1,2; etm or ftm使用0-7bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/12/16 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_10(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + + /* etm mmu基址配置:ddr1,2组,bank号0-7,bank深度2*1024 */ + //rc = dpp_mmu_bank_base_addr_set(dev_id, 0x6, 0xff, 2*1024); + //ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_mmu_bank_base_addr_set"); + + rc = dpp_etm_qmu_init_set_10(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_etm_qmu_init_set_10"); + + return DPP_OK; +} + +/***********************************************************/ +/**场景11:TM共享2组ddr:FTM为ddr_no1、ddr_no2组ddr的01bank,ETM为ddr_no1、ddr_no2组ddr 23bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/06/06 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_11(ZXIC_UINT32 dev_id, ZXIC_UINT32 ddr_no1, ZXIC_UINT32 ddr_no2) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 k = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + ZXIC_UINT32 depth = 8 * 1024; + ZXIC_UINT32 ddr_to_mmu_cfg_map[2] = {ddr_no1, ddr_no2}; + ZXIC_UINT32 bank_to_mmu_cfg_map[2] = {2, 3}; + ZXIC_UINT32 qlist_grp0_bank_data[2] = {0, 1}; + + DPP_ETM_QMU_QCFG_QLIST_BDEP_T qlist_bdep = {0}; + DPP_ETM_QMU_QCFG_QLIST_BHEAD_T qlist_bhead = {0}; + DPP_ETM_QMU_QCFG_QLIST_BTAIL_T qlist_btail = {0}; + + DPP_ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFG_T active_to_bank_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_MMU_CFG_T ddr_in_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_QMU_CFG_T ddr_in_qmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_MMU_CFG_T bank_to_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_QMU_CFG_T bank_to_qmu_cfg = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP0_BANK_T qlist_grp0_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP_T qcfg_qlist_grp = {0}; + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "========dpp_etm_qmu_init_set_11 start========n"); + + /* ddr组从qmu到mmu映射,0~1映射成ddr_no1 ddr_no2 */ + for (i = 0; i < 2; i++) { + ddr_in_mmu_cfg.cfgmt_ddr_in_mmu_cfg = ddr_to_mmu_cfg_map[i]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_MMU_CFGr, + 0, + i, + &ddr_in_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* ddr组从mmu到qmu映射,ddr_no1 ddr_no2映射成0~1 */ + for (i = 0; i < 2; i++) { + ddr_in_qmu_cfg.cfgmt_ddr_in_qmu_cfg = i; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_QMU_CFGr, + 0, + ddr_to_mmu_cfg_map[i], + &ddr_in_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* bank号从qmu到mmu映射,每组的bank0~1映射成bank2、3 */ + for (j = 0; j < 2; j++) { + for (i = 0; i < 2; i++) { + k = j * 8 + i; + + bank_to_mmu_cfg.cfgmt_bank_in_mmu_cfg = bank_to_mmu_cfg_map[i]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_MMU_CFGr, + 0, + k, + &bank_to_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* bank号从mmu到qmu映射,每组的bank2、3映射成bank0、1 */ + for (j = 0; j < 2; j++) { + for (i = 0; i < 2; i++) { + ZXIC_COMM_CHECK_INDEX_MUL_OVERFLOW_NO_ASSERT(ddr_to_mmu_cfg_map[j], 8); + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT(ddr_to_mmu_cfg_map[j] * 8, bank_to_mmu_cfg_map[i]); + k = ddr_to_mmu_cfg_map[j] * 8 + bank_to_mmu_cfg_map[i]; + + bank_to_qmu_cfg.cfgmt_bank_in_qmu_cfg = i; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_QMU_CFGr, + 0, + k, + &bank_to_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* random映射ram:配置一个chunk中,bank的轮询顺序 */ + for (i = 0; i < 16; i++) { + active_to_bank_cfg.cfgmt_active_to_bank_cfg = i % 2; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFGr, + 0, + i, + &active_to_bank_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* 首尾深度指针配置 */ + for (j = 0; j < 2; j++) { + for (i = 0; i < 2; i++) { + k = j * 8 + i; + + qlist_bhead.bank_vld = 1; + ZXIC_COMM_CHECK_INDEX_MUL_OVERFLOW_NO_ASSERT((j * 2 + i), depth); + qlist_bhead.qcfg_qlist_bhead = (j * 2 + i) * depth; + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 2 + i), depth); + qlist_btail.qcfg_qlist_btail = ((j * 2 + i + 1) * depth - 1); + qlist_bdep.qcfg_qlist_bdep = depth; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BHEADr, + 0, + k, + &qlist_bhead); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BTAILr, + 0, + k, + &qlist_btail); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + k, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (i = 0; i < 64; i++) { + qlist_bdep.qcfg_qlist_bdep = depth; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + i, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT(ETM_QMU_QCFG_QLIST_GRP0_BANKr, 8); + /* 随机表配置,决定每组bank的轮询顺序 */ + for (j = 0; j < 8; j++) { + for (i = 0; i < 64; i++) { + qlist_grp0_bank.qcfg_qlist_grp0_bank_wr = qlist_grp0_bank_data[i % 2]; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRP0_BANKr + j, + 0, + i, + &qlist_grp0_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (j = 0; j < 64; j++) { + qcfg_qlist_grp.qcfg_qlist_grp_wr = j % 2; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRPr, + 0, + j, + &qcfg_qlist_grp); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + rc = dpp_tm_qmu_cfg_done_set(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_cfg_done_set"); + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "========dpp_etm_qmu_init_set_11 END=======\n"); + + return DPP_OK; +} + + +/***********************************************************/ +/**场景4:TM共享2组ddr:FTM为ddr_no1,ddr_no2组ddr的01bank,ETM为ddr_no1,ddr_no2组ddr 23bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/06/06 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_11(ZXIC_UINT32 dev_id, ZXIC_UINT32 ddr_no1, ZXIC_UINT32 ddr_no2) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_RANDOM_BYPASS_EN_T etm_bypass = {0}; + //DPP_FTM_QMU_RANDOM_BYPASS_EN_T ftm_bypass = {0}; + + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT((1U << ddr_no1), (1U << ddr_no2)); + + /* etm mmu基址配置:ddr_no1,ddr_no2组,bank号23,bank深度8*1024 */ + //rc = dpp_mmu_bank_base_addr_set(dev_id, ((1U<> 4) & 0xf; + ddr_no_data[1] = (ddr_no >> 8) & 0xf; + ddr_no_data[0] = (ddr_no >> 12) & 0xf; + + /* ddr组从qmu到mmu映射,0~3映射成ddr_no */ + for (i = 0; i < 4; i++) { + ddr_in_mmu_cfg.cfgmt_ddr_in_mmu_cfg = ddr_no_data[i]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_MMU_CFGr, + 0, + i, + &ddr_in_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* ddr组从mmu到qmu映射,ddr_no映射成0~3 */ + for (i = 0; i < 4; i++) { + ddr_in_qmu_cfg.cfgmt_ddr_in_qmu_cfg = i; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_QMU_CFGr, + 0, + ddr_no_data[i], + &ddr_in_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* bank号从qmu到mmu映射,组0~3的bank0-7映射成bank0-7 */ + for (j = 0; j < 4; j++) { + for (i = 0; i < 8; i++) { + k = j * 8 + i; + + bank_to_mmu_cfg.cfgmt_bank_in_mmu_cfg = i; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_MMU_CFGr, + 0, + k, + &bank_to_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* bank号从mmu到qmu映射,组ddr_no的bank0-7映射成bank0-7 */ + for (j = 0; j < 4; j++) { + for (i = 0; i < 8; i++) { + ZXIC_COMM_CHECK_INDEX_MUL_OVERFLOW_NO_ASSERT(ddr_no_data[j], 8); + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT(ddr_no_data[j] * 8, i); + k = ddr_no_data[j] * 8 + i; + + bank_to_qmu_cfg.cfgmt_bank_in_qmu_cfg = i; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_QMU_CFGr, + 0, + k, + &bank_to_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* random映射ram:配置一个chunk中,bank的轮询顺序 */ + for (i = 0; i < 16; i++) { + active_to_bank_cfg.cfgmt_active_to_bank_cfg = i % 8; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFGr, + 0, + i, + &active_to_bank_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* 首尾深度指针配置:j为4组循环 */ + for (j = 0; j < 4; j++) { + for (i = 0; i < 8; i++) { + k = j * 8 + i; + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 8 + i), depth); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 8 + i + 1), depth); + qlist_bhead.bank_vld = 1; + qlist_bhead.qcfg_qlist_bhead = (j * 8 + i) * depth; + qlist_btail.qcfg_qlist_btail = ((j * 8 + i + 1) * depth - 1); + qlist_bdep.qcfg_qlist_bdep = depth; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BHEADr, + 0, + k, + &qlist_bhead); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BTAILr, + 0, + k, + &qlist_btail); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + k, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + for (i = 0; i < 64; i++) { + qlist_bdep.qcfg_qlist_bdep = depth; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + i, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT(ETM_QMU_QCFG_QLIST_GRP0_BANKr, 8); + /* 随机表配置,决定ddr内bank的轮询顺序:使用几组ddr需要配置几组 */ + for (j = 0; j < 8; j++) { + for (i = 0; i < 64; i++) { + qlist_grp0_bank.qcfg_qlist_grp0_bank_wr = qlist_grp0_bank_data[i % 8]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRP0_BANKr + j, + 0, + i, + &qlist_grp0_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (j = 0; j < 64; j++) { + qcfg_qlist_grp.qcfg_qlist_grp_wr = j % 4; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRPr, + 0, + j, + &qcfg_qlist_grp); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + rc = dpp_tm_qmu_cfg_done_set(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_cfg_done_set"); + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "++++++dpp_etm_qmu_init_set end++++++\n"); + + return DPP_OK; +} + + +/***********************************************************/ +/** QMU/MMU初始化配置场景13:TM独享4组ddr:FTM为指定4组ddr的0~7bank + ETM为指定4组ddr的0-7bank +* MMU开启rotatjon +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2016/11/17 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_13(ZXIC_UINT32 dev_id, ZXIC_UINT32 ddr_no) +{ + DPP_STATUS rc = DPP_OK; + DPP_STATUS ddr_no_temp = 0; + DPP_ETM_QMU_RANDOM_BYPASS_EN_T etm_bypass = {0}; + //DPP_FTM_QMU_RANDOM_BYPASS_EN_T ftm_bypass = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, ((ddr_no >> 0) & 0xfU), 0, 9); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, ((ddr_no >> 4) & 0xfU), 0, 9); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, ((ddr_no >> 8) & 0xfU), 0, 9); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, ((ddr_no >> 12) & 0xfU), 0, 9); + + ddr_no_temp = ((1U << (ddr_no & 0xfU)) + (1U << ((ddr_no >> 4) & 0xfU)) + \ + (1U << ((ddr_no >> 8) & 0xfU)) + (1U << ((ddr_no >> 12) & 0xfU))) & 0x3ff; + + /* etm mmu基址配置:ddr_no中的4组,bank号0-7,bank深度1024 */ + //rc = dpp_mmu_bank_base_addr_set(dev_id, ddr_no_temp, 0xff, 1024); + //ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_mmu_bank_base_addr_set"); + + rc = dpp_etm_qmu_init_set_13(dev_id, ddr_no); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_etm_qmu_init_set_13"); + + /* 关闭etm映射配置 */ + etm_bypass.random_bypass_en = 0; + rc = dpp_reg_write(dev_id, + ETM_QMU_RANDOM_BYPASS_ENr, + 0, + 0, + &etm_bypass); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + + +/***********************************************************/ +/** QMU/MMU初始化配置场景14:每个tm只用1组*8bank,MMU实际分配ftm:0-9中指定组(0-7bank); etm:0-9中指定组(0-7bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/4/14 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_14(ZXIC_UINT32 dev_id, ZXIC_UINT32 etm_ddr_no) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 k = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + ZXIC_UINT32 depth = 4 * 1024; + ZXIC_UINT32 bank_to_mmu_cfg_map[8] = {0, 1, 2, 3, 4, 5, 6, 7}; + ZXIC_UINT32 qlist_grp0_bank_data[8] = {0, 1, 2, 3, 4, 5, 6, 7}; + + DPP_ETM_QMU_QCFG_QLIST_BDEP_T qlist_bdep = {0}; + DPP_ETM_QMU_QCFG_QLIST_BHEAD_T qlist_bhead = {0}; + DPP_ETM_QMU_QCFG_QLIST_BTAIL_T qlist_btail = {0}; + + DPP_ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFG_T active_to_bank_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_MMU_CFG_T ddr_in_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_QMU_CFG_T ddr_in_qmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_MMU_CFG_T bank_to_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_QMU_CFG_T bank_to_qmu_cfg = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP0_BANK_T qlist_grp0_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP_T qcfg_qlist_grp = {0}; + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "========dpp_etm_qmu_init_set_1 start========n"); + + /* ddr组从qmu到mmu映射,将0映射成etm_ddr_no */ + i = 0; + ddr_in_mmu_cfg.cfgmt_ddr_in_mmu_cfg = etm_ddr_no; + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_MMU_CFGr, + 0, + i, + &ddr_in_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* ddr组从mmu到qmu映射,etm_ddr_no映射成0 */ + i = etm_ddr_no; + ddr_in_qmu_cfg.cfgmt_ddr_in_qmu_cfg = 0; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_QMU_CFGr, + 0, + i, + &ddr_in_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* bank号从qmu到mmu映射,每组的bank0-7映射成bank0-7 */ + j = 0; + + for (i = 0; i < 8; i++) { + k = j * 8 + i; + bank_to_mmu_cfg.cfgmt_bank_in_mmu_cfg = bank_to_mmu_cfg_map[i]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_MMU_CFGr, + 0, + k, + &bank_to_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* bank号从mmu到qmu映射,每组的bank0-7映射成bank0-7 */ + j = etm_ddr_no; + + for (i = 0; i < 8; i++) { + ZXIC_COMM_CHECK_INDEX_MUL_OVERFLOW_NO_ASSERT(j, 8); + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT(j * 8, bank_to_mmu_cfg_map[i]); + k = j * 8 + bank_to_mmu_cfg_map[i]; + bank_to_qmu_cfg.cfgmt_bank_in_qmu_cfg = i; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_QMU_CFGr, + 0, + k, + &bank_to_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* random映射ram:配置一个chunk中,bank的轮询顺序 */ + for (i = 0; i < 16; i++) { + active_to_bank_cfg.cfgmt_active_to_bank_cfg = i % 8; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFGr, + 0, + i, + &active_to_bank_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* 首尾深度指针配置 */ + j = 0; + + for (i = 0; i < 8; i++) { + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 8 + i), depth); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 8 + i + 1), depth); + k = j * 8 + i; + qlist_bhead.bank_vld = 1; + qlist_bhead.qcfg_qlist_bhead = (j * 8 + i) * depth; + qlist_btail.qcfg_qlist_btail = ((j * 8 + i + 1) * depth - 1); + qlist_bdep.qcfg_qlist_bdep = depth; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BHEADr, + 0, + k, + &qlist_bhead); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BTAILr, + 0, + k, + &qlist_btail); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + k, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + for (i = 0; i < 64; i++) { + qlist_bdep.qcfg_qlist_bdep = depth; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + i, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT(ETM_QMU_QCFG_QLIST_GRP0_BANKr, 8); + /* 随机表配置,决定每组bank的轮询顺序 */ + for (j = 0; j < 8; j++) { + for (i = 0; i < 64; i++) { + qlist_grp0_bank.qcfg_qlist_grp0_bank_wr = qlist_grp0_bank_data[i % 8]; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRP0_BANKr + j, + 0, + i, + &qlist_grp0_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (j = 0; j < 64; j++) { + qcfg_qlist_grp.qcfg_qlist_grp_wr = 0; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRPr, + 0, + j, + &qcfg_qlist_grp); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + rc = dpp_tm_qmu_cfg_done_set(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_cfg_done_set"); + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "========dpp_etm_qmu_init_set_1 END=======\n"); + + return DPP_OK; +} + + +/***********************************************************/ +/** QMU/MMU初始化配置场景14:每个tm只用1组*8bank,MMU实际分配ftm:0-9中指定组(0-7bank); etm:0-9中指定组(0-7bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/4/14 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_14(ZXIC_UINT32 dev_id, ZXIC_UINT32 etm_ddr_no, ZXIC_UINT32 ftm_ddr_no) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 etm_ddr_no_temp = 0; /*ETM使用的ddr,bit位表示*/ + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, etm_ddr_no, 0, 9); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, ftm_ddr_no, 0, 9); + + /*etm ftm不能使用同一组ddr*/ + if (etm_ddr_no == ftm_ddr_no) { + ZXIC_COMM_PRINT("dpp_tm_qmu_init_set_15: etm_ddr_no can't equal to ftm_ddr_no!!!\n"); + } + + ZXIC_COMM_ASSERT(etm_ddr_no != ftm_ddr_no); + + /*将etm ftm使用的ddr号转换成对应的bit位,bit[0-9]每bit代表一组ddr */ + etm_ddr_no_temp = 0x1u << etm_ddr_no; + + /*开启随机模式*/ + rc = dpp_tm_qmu_ddr_random_set(dev_id, 1); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_ddr_random_set"); + + /* etm mmu基址配置:ddr指定etm_ddr_no组,bank号0-7bank,bank深度4K */ + //rc = dpp_mmu_bank_base_addr_set(dev_id, etm_ddr_no_temp, 0xff, 4 * 1024); + //ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_mmu_bank_base_addr_set"); + + rc = dpp_etm_qmu_init_set_14(dev_id, etm_ddr_no); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_etm_qmu_init_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** QMU/MMU初始化配置场景15:每个tm只用1组*4bank,MMU实际分配ftm:0-9中指定组(0123bank); etm:0-9中指定组(0123bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/4/14 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_15(ZXIC_UINT32 dev_id, ZXIC_UINT32 etm_ddr_no) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 k = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + ZXIC_UINT32 depth = 8 * 1024; + ZXIC_UINT32 bank_to_mmu_cfg_map[4] = {0, 1, 2, 3}; + ZXIC_UINT32 qlist_grp0_bank_data[4] = {0, 1, 2, 3}; + + DPP_ETM_QMU_QCFG_QLIST_BDEP_T qlist_bdep = {0}; + DPP_ETM_QMU_QCFG_QLIST_BHEAD_T qlist_bhead = {0}; + DPP_ETM_QMU_QCFG_QLIST_BTAIL_T qlist_btail = {0}; + + DPP_ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFG_T active_to_bank_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_MMU_CFG_T ddr_in_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_QMU_CFG_T ddr_in_qmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_MMU_CFG_T bank_to_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_QMU_CFG_T bank_to_qmu_cfg = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP0_BANK_T qlist_grp0_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP_T qcfg_qlist_grp = {0}; + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "========dpp_etm_qmu_init_set_1 start========n"); + + + /* ddr组从qmu到mmu映射,将0映射成etm_ddr_no */ + i = 0; + ddr_in_mmu_cfg.cfgmt_ddr_in_mmu_cfg = etm_ddr_no; + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_MMU_CFGr, + 0, + i, + &ddr_in_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* ddr组从mmu到qmu映射,etm_ddr_no映射成0 */ + i = etm_ddr_no; + ddr_in_qmu_cfg.cfgmt_ddr_in_qmu_cfg = 0; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_QMU_CFGr, + 0, + i, + &ddr_in_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* bank号从qmu到mmu映射,每组的bank0123映射成bank0123 */ + j = 0; + + for (i = 0; i < 4; i++) { + k = j * 8 + i; + bank_to_mmu_cfg.cfgmt_bank_in_mmu_cfg = bank_to_mmu_cfg_map[i]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_MMU_CFGr, + 0, + k, + &bank_to_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* bank号从mmu到qmu映射,每组的bank0123映射成bank0123 */ + j = etm_ddr_no; + + for (i = 0; i < 4; i++) { + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, j, 8); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(dev_id, j * 8, bank_to_mmu_cfg_map[i]); + k = j * 8 + bank_to_mmu_cfg_map[i]; + bank_to_qmu_cfg.cfgmt_bank_in_qmu_cfg = i; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_QMU_CFGr, + 0, + k, + &bank_to_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* random映射ram:配置一个chunk中,bank的轮询顺序 */ + for (i = 0; i < 16; i++) { + active_to_bank_cfg.cfgmt_active_to_bank_cfg = i % 4; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFGr, + 0, + i, + &active_to_bank_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* 首尾深度指针配置 */ + j = 0; + + for (i = 0; i < 4; i++) { + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 4 + i), depth); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 4 + i + 1), depth); + k = j * 8 + i; + qlist_bhead.bank_vld = 1; + qlist_bhead.qcfg_qlist_bhead = (j * 4 + i) * depth; + qlist_btail.qcfg_qlist_btail = ((j * 4 + i + 1) * depth - 1); + qlist_bdep.qcfg_qlist_bdep = depth; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BHEADr, + 0, + k, + &qlist_bhead); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BTAILr, + 0, + k, + &qlist_btail); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + k, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + for (i = 0; i < 64; i++) { + qlist_bdep.qcfg_qlist_bdep = depth; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + i, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT(ETM_QMU_QCFG_QLIST_GRP0_BANKr, 8); + /* 随机表配置,决定每组bank的轮询顺序 */ + for (j = 0; j < 8; j++) { + for (i = 0; i < 64; i++) { + qlist_grp0_bank.qcfg_qlist_grp0_bank_wr = qlist_grp0_bank_data[i % 4]; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRP0_BANKr + j, + 0, + i, + &qlist_grp0_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (j = 0; j < 64; j++) { + qcfg_qlist_grp.qcfg_qlist_grp_wr = 0; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRPr, + 0, + j, + &qcfg_qlist_grp); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + rc = dpp_tm_qmu_cfg_done_set(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_cfg_done_set"); + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "========dpp_etm_qmu_init_set_1 END=======\n"); + + return DPP_OK; +} + + +/***********************************************************/ +/** QMU/MMU初始化配置场景15:每个tm只用1组*4bank,MMU实际分配ftm:0-9中指定组(0123bank); etm:0-9中指定组(0123bank) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/4/14 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_15(ZXIC_UINT32 dev_id, ZXIC_UINT32 etm_ddr_no, ZXIC_UINT32 ftm_ddr_no) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 etm_ddr_no_temp = 0; /*ETM使用的ddr,bit位表示*/ + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, etm_ddr_no, 0, 9); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, ftm_ddr_no, 0, 9); + + /*etm ftm不能使用同一组ddr*/ + if (etm_ddr_no == ftm_ddr_no) { + ZXIC_COMM_PRINT("dpp_tm_qmu_init_set_15: etm_ddr_no can't equal to ftm_ddr_no!!!\n"); + } + + ZXIC_COMM_ASSERT(etm_ddr_no == ftm_ddr_no); + + /*将etm ftm使用的ddr号转换成对应的bit位,bit[0-9]每bit代表一组ddr */ + etm_ddr_no_temp = 0x1u << etm_ddr_no; + + /*关闭随机模式*/ + rc = dpp_tm_qmu_ddr_random_set(dev_id, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_ddr_random_set"); + + /* etm mmu基址配置:ddr指定etm_ddr_no组,bank号0123,bank深度8K */ + //rc = dpp_mmu_bank_base_addr_set(dev_id, etm_ddr_no_temp, 0xf, 8 * 1024); + //ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_mmu_bank_base_addr_set"); + + rc = dpp_etm_qmu_init_set_15(dev_id, etm_ddr_no); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_etm_qmu_init_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** QMU初始化配置场景16:独享2组*8bank,MMU实际分配etm_ddr_no; etm or ftm使用0-7bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/12/16 +************************************************************/ +DPP_STATUS dpp_etm_qmu_init_set_16(ZXIC_UINT32 dev_id, ZXIC_UINT32 etm_ddr_no) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 k = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + ZXIC_UINT32 depth = 2 * 1024; + ZXIC_UINT32 bank_to_mmu_cfg_map[8] = {0, 1, 2, 3, 4, 5, 6, 7}; + ZXIC_UINT32 qlist_grp0_bank_data[8] = {0, 1, 2, 3, 4, 5, 6, 7}; + ZXIC_UINT32 ddr_no[2] = {(etm_ddr_no >> 4) & 0xf, etm_ddr_no & 0xf}; + + DPP_ETM_QMU_QCFG_QLIST_BDEP_T qlist_bdep = {0}; + DPP_ETM_QMU_QCFG_QLIST_BHEAD_T qlist_bhead = {0}; + DPP_ETM_QMU_QCFG_QLIST_BTAIL_T qlist_btail = {0}; + + DPP_ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFG_T active_to_bank_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_MMU_CFG_T ddr_in_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_DDR_IN_QMU_CFG_T ddr_in_qmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_MMU_CFG_T bank_to_mmu_cfg = {0}; + DPP_ETM_QMU_CFGMT_BANK_TO_QMU_CFG_T bank_to_qmu_cfg = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP0_BANK_T qlist_grp0_bank = {0}; + DPP_ETM_QMU_QCFG_QLIST_GRP_T qcfg_qlist_grp = {0}; + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "========dpp_etm_qmu_init_set_16 start========n"); + + /* ddr组从qmu到mmu映射,0~1映射成12 */ + for (i = 0; i < 2; i++) { + ddr_in_mmu_cfg.cfgmt_ddr_in_mmu_cfg = ddr_no[i]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_MMU_CFGr, + 0, + i, + &ddr_in_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* ddr组从mmu到qmu映射,12映射成0~1 */ + for (i = 0; i < 2; i++) { + ddr_in_qmu_cfg.cfgmt_ddr_in_qmu_cfg = i; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_DDR_IN_QMU_CFGr, + 0, + ddr_no[i], + &ddr_in_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* bank号从qmu到mmu映射,每组的bank0~7映射成bank0-7 */ + for (j = 0; j < 2; j++) { + for (i = 0; i < 8; i++) { + k = j * 8 + i; + + bank_to_mmu_cfg.cfgmt_bank_in_mmu_cfg = bank_to_mmu_cfg_map[i]; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_MMU_CFGr, + 0, + k, + &bank_to_mmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* bank号从mmu到qmu映射,每组的bank0-7映射成bank0-7 */ + for (j = 0; j < 2; j++) { + for (i = 0; i < 8; i++) { + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, ddr_no[j], 8); + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT(ddr_no[j] * 8, bank_to_mmu_cfg_map[i]); + k = ddr_no[j] * 8 + bank_to_mmu_cfg_map[i]; + + bank_to_qmu_cfg.cfgmt_bank_in_qmu_cfg = i; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_BANK_TO_QMU_CFGr, + 0, + k, + &bank_to_qmu_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + /* random映射ram:配置一个chunk中,bank的轮询顺序 */ + for (i = 0; i < 16; i++) { + active_to_bank_cfg.cfgmt_active_to_bank_cfg = i % 8; + + rc = dpp_reg_write(dev_id, + ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFGr, + 0, + i, + &active_to_bank_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* 首尾深度指针配置 */ + for (j = 0; j < 2; j++) { + for (i = 0; i < 8; i++) { + k = j * 8 + i; + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 8 + i), depth); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(dev_id, (j * 8 + i + 1), depth); + qlist_bhead.bank_vld = 1; + qlist_bhead.qcfg_qlist_bhead = (j * 8 + i) * depth; + qlist_btail.qcfg_qlist_btail = ((j * 8 + i + 1) * depth - 1); + qlist_bdep.qcfg_qlist_bdep = depth; + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BHEADr, + 0, + k, + &qlist_bhead); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BTAILr, + 0, + k, + &qlist_btail); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + k, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (i = 0; i < 64; i++) { + qlist_bdep.qcfg_qlist_bdep = depth; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_BDEPr, + 0, + i, + &qlist_bdep); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT(ETM_QMU_QCFG_QLIST_GRP0_BANKr, 8); + /* 随机表配置,决定每组bank的轮询顺序 */ + for (j = 0; j < 8; j++) { + for (i = 0; i < 64; i++) { + qlist_grp0_bank.qcfg_qlist_grp0_bank_wr = qlist_grp0_bank_data[i % 8]; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRP0_BANKr + j, + 0, + i, + &qlist_grp0_bank); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + } + + for (j = 0; j < 64; j++) { + qcfg_qlist_grp.qcfg_qlist_grp_wr = j % 2; + rc = dpp_reg_write(dev_id, + ETM_QMU_QCFG_QLIST_GRPr, + 0, + j, + &qcfg_qlist_grp); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + rc = dpp_tm_qmu_cfg_done_set(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_cfg_done_set"); + + ZXIC_COMM_TRACE_DEV_DEBUG(dev_id, "========dpp_etm_qmu_init_set_16 END=======\n"); + + return DPP_OK; +} + +/***********************************************************/ +/** QMU初始化配置场景16:独享2组*8bank,MMU实际分配1,2; etm or ftm使用0-7bank +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/12/16 +************************************************************/ +DPP_STATUS dpp_tm_qmu_init_set_16(ZXIC_UINT32 dev_id, ZXIC_UINT32 etm_ddr_no, ZXIC_UINT32 ftm_ddr_no) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, etm_ddr_no, 0, 0xff); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, ftm_ddr_no, 0, 0xff); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(dev_id, (1UL << (etm_ddr_no >> 4 & 0xf)), (1UL << (etm_ddr_no & 0xf))); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(dev_id, (1UL << (ftm_ddr_no >> 4 & 0xf)), (1UL << (ftm_ddr_no & 0xf))); + + /* etm mmu基址配置:ddr1,2组,bank号0-7,bank深度2*1024 */ + //rc = dpp_mmu_bank_base_addr_set(dev_id, ((1UL<<(etm_ddr_no>>4&0xf))+(1UL<<(etm_ddr_no&0xf)))&0x3ff, 0xff, 2*1024); + //ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_mmu_bank_base_addr_set"); + + rc = dpp_etm_qmu_init_set_16(dev_id, etm_ddr_no); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_etm_qmu_init_set_16"); + + return DPP_OK; +} + +#endif +#endif + +#if ZXIC_REAL("TM_TMMU") + +#if 0 +/***********************************************************/ +/** TMMU TM纯片内模式配置 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param imem_en 1纯片内 +* +* @return +* @remark 说明:高有效,表示使能打开,TMMU不会再发起对MMU的读写操作,用户需要保证Cache PD全部命中。 +* @see +* @author wush @date 2017/10/14 +************************************************************/ +DPP_STATUS dpp_tm_tmmu_imem_en_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 imem_en) +{ + DPP_STATUS rc = DPP_OK; + + DPP_ETM_TMMU_CFGMT_TM_PURE_IMEM_EN_T pure_imem_t = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, imem_en, 0, 1); + + pure_imem_t.cfgmt_tm_pure_imem_en = imem_en; + rc = dpp_reg_write(dev_id, + ETM_TMMU_CFGMT_TM_PURE_IMEM_ENr, + 0, + 0, + &pure_imem_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** TMMU TM纯片内模式配置获取 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_imem_en 1纯片内 +* +* @return +* @remark 无 +* @see +* @author wush @date 2017/10/14 +************************************************************/ +DPP_STATUS dpp_tm_tmmu_imem_en_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_imem_en) +{ + DPP_STATUS rc = DPP_OK; + + DPP_ETM_TMMU_CFGMT_TM_PURE_IMEM_EN_T pure_imem_t = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_imem_en); + + rc = dpp_reg_read(dev_id, + ETM_TMMU_CFGMT_TM_PURE_IMEM_ENr, + 0, + 0, + &pure_imem_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + *p_imem_en = pure_imem_t.cfgmt_tm_pure_imem_en; + + return DPP_OK; +} + +/***********************************************************/ +/** TMMU 强制DDR RDY配置 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param ddr_force_rdy 1、如果bit【0】配置为1,则QMU看到的DDR0 RDY一直为1。 + 2、bit【0】代表DDR0,bit【7】代表DDR7。 + 3、纯片内模式需要配置为8'hff,排除DDR干扰。 +* +* @return +* @remark +* @see +* @author wush @date 2017/10/14 +************************************************************/ +DPP_STATUS dpp_tm_tmmu_ddr_force_rdy_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 ddr_force_rdy) +{ + DPP_STATUS rc = DPP_OK; + + DPP_ETM_TMMU_CFGMT_FORCE_DDR_RDY_CFG_T ddr_force_rdy_t = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, ddr_force_rdy, 0, 0x3FF); + + ddr_force_rdy_t.cfgmt_force_ddr_rdy_cfg = ddr_force_rdy; + rc = dpp_reg_write(dev_id, + ETM_TMMU_CFGMT_FORCE_DDR_RDY_CFGr, + 0, + 0, + &ddr_force_rdy_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** TMMU 强制DDR RDY配置获取 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_ddr_force_rdy 1、如果bit【0】配置为1,则QMU看到的DDR0 RDY一直为1。 + 2、bit【0】代表DDR0,bit【7】代表DDR7。 + 3、纯片内模式需要配置为8'hff,排除DDR干扰。 +* +* @return +* @remark 无 +* @see +* @author wush @date 2017/10/14 +************************************************************/ +DPP_STATUS dpp_tm_tmmu_ddr_force_rdy_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_ddr_force_rdy) +{ + DPP_STATUS rc = DPP_OK; + + DPP_ETM_TMMU_CFGMT_FORCE_DDR_RDY_CFG_T ddr_force_rdy_t = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_ddr_force_rdy); + + rc = dpp_reg_read(dev_id, + ETM_TMMU_CFGMT_FORCE_DDR_RDY_CFGr, + 0, + 0, + &ddr_force_rdy_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + *p_ddr_force_rdy = ddr_force_rdy_t.cfgmt_force_ddr_rdy_cfg; + + return DPP_OK; +} + +#endif +#endif + +#if ZXIC_REAL("TM_CRDT") + +#if 0 +/***********************************************************/ +/** crdt ram初始化 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/22 +************************************************************/ +DPP_STATUS dpp_tm_crdt_ram_init(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 read_times = 0; + DPP_ETM_CRDT_CRDT_CFG_RAM_INIT_T crdt_ram_cfg_init_t = {0}; + DPP_ETM_CRDT_CRDT_STA_RAM_INIT_T crdt_ram_sta_init_t = {0}; + + /**RAM初始化**/ + crdt_ram_cfg_init_t.cfg_ram_init_en = 1; + rc = dpp_reg_write(dev_id, + ETM_CRDT_CRDT_CFG_RAM_INITr, + 0, + 0, + &crdt_ram_cfg_init_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + crdt_ram_sta_init_t.sta_ram_init_en = 1; + rc = dpp_reg_write(dev_id, + ETM_CRDT_CRDT_STA_RAM_INITr, + 0, + 0, + &crdt_ram_sta_init_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + + /**初始化done确认**/ + do { + rc = dpp_reg_read(dev_id, + ETM_CRDT_CRDT_CFG_RAM_INITr, + 0, + 0, + &crdt_ram_cfg_init_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_CRDT_CRDT_STA_RAM_INITr, + 0, + 0, + &crdt_ram_sta_init_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + read_times++; + zxic_comm_usleep(100); + } while (0 == crdt_ram_cfg_init_t.cfg_ram_init_done || 0 == crdt_ram_sta_init_t.sta_ram_init_done); + + ZXIC_COMM_PRINT("wait_crdt_ram_done_times=%d\n", read_times); + ZXIC_COMM_PRINT("crdt_ram_cfg_init_t.cfg_ram_init_done=%d, crdt_ram_sta_init_t.sta_ram_init_done=%d\n", + crdt_ram_cfg_init_t.cfg_ram_init_done, crdt_ram_sta_init_t.sta_ram_init_done); + + return DPP_OK; +} + +#endif +/***********************************************************/ +/** 分配etm-FQ类型调度器资源:fq/fq2/fq4/fq8 个数,(共16K= 16384) +* @param dev_id 设备编号 +* @param fq_num FQ调度器个数,须是8的倍数 +* @param fq2_num FQ2调度器个数,须是4的倍数 +* @param fq4_num FQ4调度器个数,须是2的倍数 +* @param fq8_num FQ8调度器个数 +* 调度器总数不能超过:16K= 16384 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/26 +************************************************************/ +DPP_STATUS dpp_etm_crdt_fq_set(DPP_DEV_T *dev, ZXIC_UINT32 fq_num, + ZXIC_UINT32 fq2_num, ZXIC_UINT32 fq4_num, + ZXIC_UINT32 fq8_num) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 total_fq_num = 0; + DPP_ETM_CRDT_TH_WFQ_FQ_T etm_crdt_th_wfqfq_t = { 0 }; + DPP_ETM_CRDT_TH_WFQ2_FQ2_T etm_crdt_th_wfqfq2_t = { 0 }; + DPP_ETM_CRDT_TH_WFQ4_FQ4_T etm_crdt_th_wfqfq4_t = { 0 }; + ZXIC_UINT32 th_wfq_fq_index = ETM_CRDT_TH_WFQ_FQr; + ZXIC_UINT32 th_wfq_fq2_index = ETM_CRDT_TH_WFQ2_FQ2r; + ZXIC_UINT32 th_wfq_fq4_index = ETM_CRDT_TH_WFQ4_FQ4r; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + /* 参数合法性检查:fq调度器总数校验 */ + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(DEV_ID(dev), fq2_num, + 2); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(DEV_ID(dev), fq4_num, + 4); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(DEV_ID(dev), fq8_num, + 8); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(DEV_ID(dev), fq_num, + fq2_num * 2); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT( + DEV_ID(dev), fq_num + fq2_num * 2, fq4_num * 4); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT( + DEV_ID(dev), fq_num + fq2_num * 2 + fq4_num * 4, fq8_num * 8); + + total_fq_num = (fq_num + fq2_num * 2 + fq4_num * 4 + fq8_num * 8); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), total_fq_num, 0, + DPP_ETM_FQ_NUM); + + /* 各调度器数整齐性校验 */ + if ((fq_num != 0 && fq_num % 8 != 0) || + (fq2_num != 0 && fq2_num % 4 != 0) || + (fq4_num != 0 && fq4_num % 2 != 0)) { + //ZXIC_COMM__TRACE_ERR("Bad parameter: sp_num or wfq_num %8 != 0 !"); + return DPP_ERR; + } + + /* 开始调度器阈值配置:th_fq参数配置:th_fq = fq_num/8 */ + rc = dpp_reg_read(dev, th_wfq_fq_index, 0, 0, &etm_crdt_th_wfqfq_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + etm_crdt_th_wfqfq_t.th_fq = (fq_num / 8); + rc = dpp_reg_write(dev, th_wfq_fq_index, 0, 0, &etm_crdt_th_wfqfq_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + /* th_fq2参数配置:th_fq2 = (th_fq + fq2_num/4) */ + rc = dpp_reg_read(dev, th_wfq_fq2_index, 0, 0, &etm_crdt_th_wfqfq2_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + etm_crdt_th_wfqfq2_t.th_fq2 = (etm_crdt_th_wfqfq_t.th_fq + fq2_num / 4); + rc = dpp_reg_write(dev, th_wfq_fq2_index, 0, 0, &etm_crdt_th_wfqfq2_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + /* th_fq4参数配置:th_fq4 = (th_fq2 + fq4_num/2) */ + rc = dpp_reg_read(dev, th_wfq_fq4_index, 0, 0, &etm_crdt_th_wfqfq4_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + etm_crdt_th_wfqfq4_t.th_fq4 = + (etm_crdt_th_wfqfq2_t.th_fq2 + fq4_num / 2); + rc = dpp_reg_write(dev, th_wfq_fq4_index, 0, 0, &etm_crdt_th_wfqfq4_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 分配TM-SP/WFQ类型调度器资源:sp/wfq/wfq2/wfq4/wfq8 个数,(etm共9K=9216,ftm共1920个) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param sp_num SP调度器个数,须是8的倍数 +* @param wfq_num WFQ调度器个数,须是8的倍数 +* @param wfq2_num WFQ2调度器个数,须是4的倍数 +* @param wfq4_num WFQ4调度器个数,须是2的倍数 +* @param wfq8_num WFQ8调度器个数 +* 调度器总数不能超过:ETM= 9216; FTM= 1920 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/26 +************************************************************/ +DPP_STATUS dpp_tm_crdt_wfqsp_set(DPP_DEV_T *dev, ZXIC_UINT32 sp_num, + ZXIC_UINT32 wfq_num, ZXIC_UINT32 wfq2_num, + ZXIC_UINT32 wfq4_num, ZXIC_UINT32 wfq8_num) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 total_wfqsp_num = 0; + DPP_ETM_CRDT_TH_SP_T etm_crdt_th_sp_t = { 0 }; + DPP_ETM_CRDT_TH_WFQ_FQ_T etm_crdt_th_wfqfq_t = { 0 }; + DPP_ETM_CRDT_TH_WFQ2_FQ2_T etm_crdt_th_wfqfq2_t = { 0 }; + DPP_ETM_CRDT_TH_WFQ4_FQ4_T etm_crdt_th_wfqfq4_t = { 0 }; + ZXIC_UINT32 th_sp_index = ETM_CRDT_TH_SPr; + ZXIC_UINT32 th_wfq_fq_index = ETM_CRDT_TH_WFQ_FQr; + ZXIC_UINT32 th_wfq_fq2_index = ETM_CRDT_TH_WFQ2_FQ2r; + ZXIC_UINT32 th_wfq_fq4_index = ETM_CRDT_TH_WFQ4_FQ4r; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + /* 参数合法性检查 */ + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(DEV_ID(dev), wfq2_num, + 2); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(DEV_ID(dev), wfq4_num, + 4); + ZXIC_COMM_CHECK_DEV_INDEX_MUL_OVERFLOW_NO_ASSERT(DEV_ID(dev), wfq8_num, + 8); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(DEV_ID(dev), sp_num, + wfq_num); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT( + DEV_ID(dev), sp_num + wfq_num, wfq2_num * 2); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT( + DEV_ID(dev), sp_num + wfq_num + wfq2_num * 2, wfq4_num * 4); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT( + DEV_ID(dev), sp_num + wfq_num + wfq2_num * 2 + wfq4_num * 4, + wfq8_num * 8); + /* 调度器总数校验 */ + total_wfqsp_num = + (sp_num + wfq_num + wfq2_num * 2 + wfq4_num * 4 + wfq8_num * 8); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), total_wfqsp_num, 0, + DPP_ETM_WFQSP_NUM); + + /* 各调度器数整齐性校验 */ + if ((sp_num != 0 && sp_num % 8 != 0) || + (wfq_num != 0 && wfq_num % 8 != 0) || + (wfq2_num != 0 && wfq2_num % 4 != 0) || + (wfq4_num != 0 && wfq4_num % 2 != 0)) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "Bad parameter: sp_num or wfq_num mod 8 != 0 !\n"); + return DPP_ERR; + } + + th_sp_index = ETM_CRDT_TH_SPr; + th_wfq_fq_index = ETM_CRDT_TH_WFQ_FQr; + th_wfq_fq2_index = ETM_CRDT_TH_WFQ2_FQ2r; + th_wfq_fq4_index = ETM_CRDT_TH_WFQ4_FQ4r; + + /* 开始调度器阈值配置:th_sp参数配置:th_sp= sp_num/8 */ + rc = dpp_reg_read(dev, th_sp_index, 0, 0, &etm_crdt_th_sp_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + etm_crdt_th_sp_t.th_sp = (sp_num / 8); + rc = dpp_reg_write(dev, th_sp_index, 0, 0, &etm_crdt_th_sp_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + /* th_wfq参数配置:th_wfq = (th_sp + wfq_num/8) */ + rc = dpp_reg_read(dev, th_wfq_fq_index, 0, 0, &etm_crdt_th_wfqfq_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + etm_crdt_th_wfqfq_t.th_wfq = (etm_crdt_th_sp_t.th_sp + wfq_num / 8); + rc = dpp_reg_write(dev, th_wfq_fq_index, 0, 0, &etm_crdt_th_wfqfq_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + /* th_wfq2参数配置:th_wfq2 = (th_wfq + wfq2_num/4) */ + rc = dpp_reg_read(dev, th_wfq_fq2_index, 0, 0, &etm_crdt_th_wfqfq2_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + etm_crdt_th_wfqfq2_t.th_wfq2 = + (etm_crdt_th_wfqfq_t.th_wfq + wfq2_num / 4); + rc = dpp_reg_write(dev, th_wfq_fq2_index, 0, 0, &etm_crdt_th_wfqfq2_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + /* th_wfq4参数配置:th_wfq4 = (th_wfq2 + wfq4_num/2) */ + rc = dpp_reg_read(dev, th_wfq_fq4_index, 0, 0, &etm_crdt_th_wfqfq4_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + etm_crdt_th_wfqfq4_t.th_wfq4 = + (etm_crdt_th_wfqfq2_t.th_wfq2 + wfq4_num / 2); + rc = dpp_reg_write(dev, th_wfq_fq4_index, 0, 0, &etm_crdt_th_wfqfq4_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取各调度器的起始编号(etm共25K=25600,ftm共1920个) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_spwfq_start_num 调度器起始编号结构体 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/26 +************************************************************/ +DPP_STATUS +dpp_tm_crdt_wfqsp_get(DPP_DEV_T *dev, + DPP_TM_CRDT_SPWFQ_START_NUM_T *p_spwfq_start_num) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CRDT_TH_SP_T etm_crdt_th_sp_t = { 0 }; + DPP_ETM_CRDT_TH_WFQ_FQ_T etm_crdt_th_wfqfq_t = { 0 }; + DPP_ETM_CRDT_TH_WFQ2_FQ2_T etm_crdt_th_wfqfq2_t = { 0 }; + DPP_ETM_CRDT_TH_WFQ4_FQ4_T etm_crdt_th_wfqfq4_t = { 0 }; + ZXIC_UINT32 th_sp_index = ETM_CRDT_TH_SPr; + ZXIC_UINT32 th_wfq_fq_index = ETM_CRDT_TH_WFQ_FQr; + ZXIC_UINT32 th_wfq_fq2_index = ETM_CRDT_TH_WFQ2_FQ2r; + ZXIC_UINT32 th_wfq_fq4_index = ETM_CRDT_TH_WFQ4_FQ4r; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_spwfq_start_num); + + /* 读取调度器阈值配置:th_sp参数配置:th_sp= sp_num/8 */ + rc = dpp_reg_read(dev, th_sp_index, 0, 0, &etm_crdt_th_sp_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + /* th_wfq阈值读取:th_wfq = (th_sp + wfq_num/8) */ + rc = dpp_reg_read(dev, th_wfq_fq_index, 0, 0, &etm_crdt_th_wfqfq_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + /* th_wfq2阈值读取:th_wfq2 = (th_wfq + wfq2_num/4) */ + rc = dpp_reg_read(dev, th_wfq_fq2_index, 0, 0, &etm_crdt_th_wfqfq2_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + /* th_wfq4阈值读取:th_wfq4 = (th_wfq2 + wfq4_num/2) */ + rc = dpp_reg_read(dev, th_wfq_fq4_index, 0, 0, &etm_crdt_th_wfqfq4_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + /* 各调度器起始编号计算 */ + p_spwfq_start_num->start_num_fq = 0; + p_spwfq_start_num->start_num_fq2 = etm_crdt_th_wfqfq_t.th_fq * 8; + p_spwfq_start_num->start_num_fq4 = etm_crdt_th_wfqfq2_t.th_fq2 * 8; + p_spwfq_start_num->start_num_fq8 = etm_crdt_th_wfqfq4_t.th_fq4 * 8; + p_spwfq_start_num->start_num_sp = DPP_ETM_WFQSP_OFFSET; + p_spwfq_start_num->start_num_wfq = + (DPP_ETM_WFQSP_OFFSET + etm_crdt_th_sp_t.th_sp * 8); + p_spwfq_start_num->start_num_wfq2 = + (DPP_ETM_WFQSP_OFFSET + etm_crdt_th_wfqfq_t.th_wfq * 8); + p_spwfq_start_num->start_num_wfq4 = + (DPP_ETM_WFQSP_OFFSET + etm_crdt_th_wfqfq2_t.th_wfq2 * 8); + p_spwfq_start_num->start_num_wfq8 = + (DPP_ETM_WFQSP_OFFSET + etm_crdt_th_wfqfq4_t.th_wfq4 * 8); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取调度器类型 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param se_id 调度器编号 +* @param item_num 调度器中包含的子调度器个数 +* @param sch_type_num 调度器类型编号 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/26 +************************************************************/ +DPP_STATUS dpp_tm_crdt_sch_type_get(DPP_DEV_T *dev, ZXIC_UINT32 se_id, + ZXIC_UINT32 *item_num, + ZXIC_UINT32 *sch_type_num) +{ + DPP_STATUS rc = DPP_OK; + + DPP_TM_CRDT_SPWFQ_START_NUM_T spwfq_start_num_t = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), item_num); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_id, 0, + DPP_ETM_FQSPWFQ_NUM - 1); + + rc = dpp_tm_crdt_wfqsp_get(dev, &spwfq_start_num_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_crdt_wfqsp_get"); + + if (se_id < spwfq_start_num_t.start_num_fq2) { + *item_num = 1; + *sch_type_num = 5; + } else if (se_id < spwfq_start_num_t.start_num_fq4) { + *item_num = 2; + *sch_type_num = 6; + } else if (se_id < spwfq_start_num_t.start_num_fq8) { + *item_num = 4; + *sch_type_num = 7; + } else if (se_id < spwfq_start_num_t.start_num_sp) { + *item_num = 8; + *sch_type_num = 8; + } else if (se_id < spwfq_start_num_t.start_num_wfq) { + *item_num = 1; + *sch_type_num = 0; + } else if (se_id < spwfq_start_num_t.start_num_wfq2) { + *item_num = 1; + *sch_type_num = 1; + } else if (se_id < spwfq_start_num_t.start_num_wfq4) { + *item_num = 2; + *sch_type_num = 2; + } else if (se_id < spwfq_start_num_t.start_num_wfq8) { + *item_num = 4; + *sch_type_num = 3; + } else { + *item_num = 8; + *sch_type_num = 4; + } + + return DPP_OK; +} + +/***********************************************************/ +/** 获取pp->dev挂接关系 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param pp_id 0~63 +* @param p_weight 0~127 +* @param p_sp_mapping 0~7 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/04/20 +************************************************************/ +DPP_STATUS dpp_tm_crdt_pp_para_get(DPP_DEV_T *dev, ZXIC_UINT32 pp_id, + ZXIC_UINT32 *p_weight, + ZXIC_UINT32 *p_sp_mapping) +{ + DPP_STATUS rc = DPP_OK; + + DPP_ETM_CRDT_PP_CFG_T pp_cfg_r = { 0 }; + DPP_ETM_CRDT_PP_WEIGHT_T pp_weight_r = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), pp_id, 0, + DPP_TM_PP_NUM - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_weight); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_sp_mapping); + + rc = dpp_reg_read(dev, ETM_CRDT_PP_WEIGHTr, 0, pp_id, &pp_weight_r); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + *p_weight = pp_weight_r.pp_weight; + + rc = dpp_reg_read(dev, ETM_CRDT_PP_CFGr, 0, pp_id, &pp_cfg_r); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + *p_sp_mapping = pp_cfg_r.pp_cfg; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置se->pp->dev挂接关系 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param se_id 往端口挂接的调度器id +* @param pp_id [0-63] +* @param weight [1-511] +* @param sp_mapping [0~8] +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/3/4 +************************************************************/ +DPP_STATUS dpp_tm_crdt_se_pp_link_set(DPP_DEV_T *dev, ZXIC_UINT32 se_id, + ZXIC_UINT32 pp_id, ZXIC_UINT32 weight, + ZXIC_UINT32 sp_mapping) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 delay_time = 10; + + DPP_ETM_CRDT_PP_CFG_T pp_cfg = { 0 }; + DPP_ETM_CRDT_PP_WEIGHT_T pp_weight = { 0 }; + DPP_ETM_CRDT_PP_CFG_T pp_cfg_r = { 0 }; + DPP_ETM_CRDT_PP_WEIGHT_T pp_weight_r = { 0 }; + DPP_TM_SCH_SE_PARA_T sch_se_para_t = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_id, 0, + DPP_ETM_FQSPWFQ_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), pp_id, 0, + DPP_TM_PP_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), weight, 0, + DPP_TM_SCH_WEIGHT_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), sp_mapping, + DPP_TM_SCH_SP_0, DPP_TM_SCH_SP_8); + + /* 参数赋值:仅需端口号 ,配置调度器到端口的挂接 */ + sch_se_para_t.se_linkid = DPP_ETM_PORT_LINKID_BASE + pp_id; + + rc = dpp_tm_crdt_se_link_wr(dev, se_id, &sch_se_para_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_crdt_se_link_wr"); + + /* 检测CRDT寄存器是否空闲 */ + rc = dpp_tm_crdt_idle_check(dev); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_crdt_idle_check"); + + pp_weight.pp_weight = weight; + rc = dpp_reg_write(dev, ETM_CRDT_PP_WEIGHTr, 0, pp_id, &pp_weight); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + /* 写入后,重新读出来校验 */ +#if (ETM_WRITE_CHECK) + { + zxic_comm_delay(delay_time); + rc = dpp_reg_read(dev, ETM_CRDT_PP_WEIGHTr, 0, pp_id, + &pp_weight_r); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + + if (pp_weight_r.pp_weight != pp_weight.pp_weight) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "dpp_tm_crdt_pp_para_set pp[0x%x] wt_pp_weight[0x%x] rd_pp_weight[0x%x]\n", + pp_id, pp_weight.pp_weight, + pp_weight_r.pp_weight); + return DPP_ERR; + } + } +#endif + + /* 检测CRDT寄存器是否空闲 */ + rc = dpp_tm_crdt_idle_check(dev); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_crdt_idle_check"); + + pp_cfg.pp_cfg = sp_mapping; + rc = dpp_reg_write(dev, ETM_CRDT_PP_CFGr, 0, pp_id, &pp_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + /* 写入后,重新读出来校验 */ +#if (ETM_WRITE_CHECK) + { + zxic_comm_delay(delay_time); + rc = dpp_reg_read(dev, ETM_CRDT_PP_CFGr, 0, pp_id, &pp_cfg_r); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + + if (pp_cfg_r.pp_cfg != pp_cfg.pp_cfg) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "dpp_tm_crdt_pp_para_set pp[0x%x] wt_pp_cfg[0x%x] rd_pp_cfg[0x%x]\n", + pp_id, pp_cfg.pp_cfg, pp_cfg_r.pp_cfg); + return DPP_ERR; + } + } +#endif + + return DPP_OK; +} + +/***********************************************************/ +/** 配置flow级流队列的挂接关系(flow到上级调度器的挂接) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param flow_id 流队列号 +* @param c_linkid c桶要挂接到的上级调度器id +* @param c_weight c桶挂接到上级调度器的权重[1~511] +* @param c_sp c桶挂接到上级调度器的sp优先级,有效值[0-8],共9级,优先级依次降低 +* @param mode 挂接模式:0-单桶 1-双桶。配置单桶时无需关注后续参数,配0即可 +* @param e_linkid e桶要挂接到的上级调度器id +* @param e_weight e桶挂接到上级调度器的权重[1~511] +* @param e_sp e桶挂接到上级调度器的sp优先级,有效值[0-8],共9级,优先级依次降低 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_flow_link_wr(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + DPP_TM_SCH_FLOW_PARA_T *p_flow_para) + +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 flow_id_e = 0; + ZXIC_UINT32 c_linkid; + ZXIC_UINT32 c_weight; + ZXIC_UINT32 c_sp; + ZXIC_UINT32 mode; + ZXIC_UINT32 e_linkid; + ZXIC_UINT32 e_weight; + ZXIC_UINT32 e_sp; + DPP_ETM_CRDT_FLOWQUE_PARA_TBL_T etm_crdt_flow_para_tbl_t = { 0 }; + + /* 取配置参数 */ + c_linkid = p_flow_para->c_linkid; + c_weight = p_flow_para->c_weight; + c_sp = p_flow_para->c_sp; + mode = p_flow_para->mode; + e_linkid = p_flow_para->e_linkid; + e_weight = p_flow_para->e_weight; + e_sp = p_flow_para->e_sp; + /* 参数校验 */ + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_flow_para); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), c_weight, 0, + DPP_TM_SCH_WEIGHT_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), e_weight, 0, + DPP_TM_SCH_WEIGHT_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), c_sp, 0, + DPP_TM_SCH_SP_NUM); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), e_sp, 0, + DPP_TM_SCH_SP_NUM); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), mode, 0, 1); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), flow_id, 0, + DPP_ETM_Q_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), c_linkid, 0, + DPP_ETM_FQSPWFQ_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), e_linkid, 0, + DPP_ETM_FQSPWFQ_NUM - 1); + + /* 开始流级挂接配置 */ + + if (mode == 1) { + flow_id_e = (flow_id + 0x2400); + } + + /* c桶挂接配置 */ + etm_crdt_flow_para_tbl_t.flowque_link = c_linkid; + etm_crdt_flow_para_tbl_t.flowque_w = c_weight; + etm_crdt_flow_para_tbl_t.flowque_pri = c_sp; + + rc = dpp_reg_write(dev, ETM_CRDT_FLOWQUE_PARA_TBLr, 0, flow_id, + &etm_crdt_flow_para_tbl_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + zxic_comm_delay(5); + + /* mode-1 需要配置双桶挂接 */ + if (mode == 1) { + /* e桶挂接配置 */ + etm_crdt_flow_para_tbl_t.flowque_link = e_linkid; + etm_crdt_flow_para_tbl_t.flowque_w = e_weight; + etm_crdt_flow_para_tbl_t.flowque_pri = e_sp; + + rc = dpp_reg_write(dev, ETM_CRDT_FLOWQUE_PARA_TBLr, 0, + flow_id_e, &etm_crdt_flow_para_tbl_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + } + + zxic_comm_delay(5); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置flow级流队列的挂接关系(flow到上级调度器的挂接) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param flow_id 流队列号 +* @param c_linkid c桶要挂接到的上级调度器id +* @param c_weight c桶挂接到上级调度器的权重[1~511] +* @param c_sp c桶挂接到上级调度器的sp优先级,有效值[0-8],共9级,优先级依次降低 +* @param mode 挂接模式:0-单桶 1-双桶。配置单桶时无需关注后续参数,配0即可 +* @param e_linkid e桶要挂接到的上级调度器id +* @param e_weight e桶挂接到上级调度器的权重[1~511] +* @param e_sp e桶挂接到上级调度器的sp优先级,有效值[0-8],共9级,优先级依次降低 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_flow_link_set(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 c_linkid, ZXIC_UINT32 c_weight, + ZXIC_UINT32 c_sp, ZXIC_UINT32 mode, + ZXIC_UINT32 e_linkid, ZXIC_UINT32 e_weight, + ZXIC_UINT32 e_sp) +{ + DPP_STATUS rc = DPP_OK; + DPP_TM_SCH_FLOW_PARA_T sch_flow_para_t = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + /* 参数赋值 */ + sch_flow_para_t.c_linkid = c_linkid; + sch_flow_para_t.c_weight = c_weight; + sch_flow_para_t.c_sp = c_sp; + sch_flow_para_t.mode = mode; + sch_flow_para_t.e_linkid = e_linkid; + sch_flow_para_t.e_weight = e_weight; + sch_flow_para_t.e_sp = e_sp; + + rc = dpp_tm_crdt_flow_link_wr(dev, flow_id, &sch_flow_para_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_crdt_flow_link_wr"); + + return DPP_OK; +} + +/***********************************************************/ +/** 批量配置flow级流队列的挂接关系 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param flow_id_s 起始流队列号 +* @param flow_id_e 终止流队列号 +* @param c_linkid c桶要挂接到的上级调度器id +* @param c_weight c桶挂接到上级调度器的权重[1~511] +* @param c_sp c桶挂接到上级调度器的sp优先级,有效值[0-8],共9级,优先级依次降低 +* @param mode 挂接模式:0-单桶 1-双桶。配置单桶时无需关注后续参数,配0即可 +* @param e_linkid e桶要挂接到的上级调度器id +* @param e_weight e桶挂接到上级调度器的权重[1~511] +* @param e_sp e桶挂接到上级调度器的sp优先级,有效值[0-8],共9级,优先级依次降低 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS +dpp_tm_crdt_flow_link_more_set(DPP_DEV_T *dev, ZXIC_UINT32 flow_id_s, + ZXIC_UINT32 flow_id_e, ZXIC_UINT32 c_linkid, + ZXIC_UINT32 c_weight, ZXIC_UINT32 c_sp, + ZXIC_UINT32 mode, ZXIC_UINT32 e_linkid, + ZXIC_UINT32 e_weight, ZXIC_UINT32 e_sp) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 flow_id = 0; + DPP_TM_SCH_FLOW_PARA_T sch_flow_para_t = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + + if (flow_id_s > flow_id_e) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "Bad parameters! flow_id_s > flow_id_e !\n"); + return DPP_ERR; + } + + /* 参数赋值 */ + sch_flow_para_t.c_linkid = c_linkid; + sch_flow_para_t.c_weight = c_weight; + sch_flow_para_t.c_sp = c_sp; + sch_flow_para_t.mode = mode; + sch_flow_para_t.e_linkid = e_linkid; + sch_flow_para_t.e_weight = e_weight; + sch_flow_para_t.e_sp = e_sp; + + for (flow_id = flow_id_s; flow_id <= flow_id_e; flow_id++) { + rc = dpp_tm_crdt_flow_link_wr(dev, flow_id, &sch_flow_para_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_crdt_flow_link_wr"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 配置调度器层次化QOS的挂接关系:非优先级传递 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param se_id 本级调度器id +* 对于FQX/WFQX必须是调度单元中首个调度器id +* @param se_linkid 要挂接到的上级调度器id +* @param se_weight 挂接到上级调度器的权重[1~511] +* @param se_sp 挂接到上级调度器的sp优先级,有效值[0-8],共9级,优先级依次降低 +* @param se_insw 优先级传递使能:0-关 1-开。该参数不传递直接配0 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_se_link_wr(DPP_DEV_T *dev, ZXIC_UINT32 se_id, + DPP_TM_SCH_SE_PARA_T *p_sch_se_para) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 se_linkid = 0; + ZXIC_UINT32 se_weight = 0; + ZXIC_UINT32 se_sp = 0; + ZXIC_UINT32 se_insw = 0; /* 优先级传递关闭 */ + ZXIC_UINT32 item_num = 0; /* 调度单元中调度器的个数 */ + ZXIC_UINT32 sch_type_num = 0; + ZXIC_UINT32 i = 0; + DPP_ETM_CRDT_SE_PARA_TBL_T etm_crdt_se_para_tbl_t = { 0 }; + + /* 取配置参数 */ + se_linkid = p_sch_se_para->se_linkid; + se_weight = p_sch_se_para->se_weight; + se_sp = p_sch_se_para->se_sp; + + /* 参数校验 */ + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_weight, 0, + DPP_TM_SCH_WEIGHT_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_sp, 0, + DPP_TM_SCH_SP_NUM); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_insw, 0, 0); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_id, 0, + DPP_ETM_FQSPWFQ_NUM - 1); + + if (se_linkid > DPP_ETM_FQSPWFQ_NUM) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_linkid, + DPP_TM_PP_LINKID_PORT0, + DPP_TM_PP_LINKID_PORT63); + } else { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_linkid, 0, + DPP_ETM_FQSPWFQ_NUM - 1); + } + + /* 开始调度器挂接配置 */ + + /* 先区分调度器类型:sp/fq/wfq调度器挂接方式相同,wfqx/fqx=2/4/8是另一种挂接方式 */ + rc = dpp_tm_crdt_sch_type_get(dev, se_id, &item_num, &sch_type_num); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_crdt_sch_type_get"); + + /* 非优先级传递挂接:各调度器参数须相同 */ + etm_crdt_se_para_tbl_t.se_link = se_linkid; + etm_crdt_se_para_tbl_t.se_w = se_weight; + etm_crdt_se_para_tbl_t.se_pri = se_sp; + etm_crdt_se_para_tbl_t.se_insw = se_insw; + etm_crdt_se_para_tbl_t.cp_token_en = 1; + + for (i = 0; i < item_num; i++) { + rc = dpp_reg_write(dev, ETM_CRDT_SE_PARA_TBLr, 0, se_id + i, + &etm_crdt_se_para_tbl_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 配置调度器层次化QOS的挂接关系:非优先级传递 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param se_id 本级调度器id +* 对于FQX/WFQX必须是调度单元中首个调度器id +* @param se_linkid 要挂接到的上级调度器id +* @param se_weight 挂接到上级调度器的权重[1~511] +* @param se_sp 挂接到上级调度器的sp优先级,有效值[0-8],共9级,优先级依次降低 +* @param se_insw 优先级传递使能:0-关 1-开. 该参数不传递直接配0 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_se_link_set(DPP_DEV_T *dev, ZXIC_UINT32 se_id, + ZXIC_UINT32 se_linkid, ZXIC_UINT32 se_weight, + ZXIC_UINT32 se_sp) +{ + DPP_STATUS rc = DPP_OK; + DPP_TM_SCH_SE_PARA_T sch_se_para_t = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + /* 参数赋值 */ + sch_se_para_t.se_linkid = se_linkid; + sch_se_para_t.se_weight = se_weight; + sch_se_para_t.se_sp = se_sp; + + rc = dpp_tm_crdt_se_link_wr(dev, se_id, &sch_se_para_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_crdt_se_link_wr"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置调度器层次化QOS的挂接关系:优先级传递 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param se_id 本级调度器id +* @param se_linkid 要挂接到的上级调度器id +* @param se_sp 挂接到上级调度器的sp优先级,有效值[0-3],最多4级,优先级按调度单元分配, +* 每个调度单元内部调度器优先级相同! +* @param se_weight0-7 WFQ8中各调度器权重值[1~511],若是WFQ2/4 只取前面对应值,后面无效 +* @param se_insw 优先级传递使能:0-关 1-开. 该参数不传递直接配1 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS +dpp_tm_crdt_se_link_insw_wr(DPP_DEV_T *dev, ZXIC_UINT32 se_id, + DPP_TM_SCH_SE_PARA_INSW_T *p_sch_se_para_insw) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 se_linkid; /** 要挂接到的上级调度器id */ + + ZXIC_UINT32 + se_sp; /** 挂接到上级调度器的sp优先级,有效值[0-3],共4级,优先级依次降低,优先级按调度单元分配 */ + ZXIC_UINT32 se_weight[8] = { + 0 + }; /** WFQ8中各调度器权重值[1~511],若是WFQ2/4 只取前面对应值,后面无效 */ + ZXIC_UINT32 se_insw = 1; /* 优先级传递开启 */ + ZXIC_UINT32 item_num = 1; /* 调度单元中调度器的个数 */ + ZXIC_UINT32 sch_type_num = 0; + ZXIC_UINT32 item_num_link = 0; /* 上级调度单元中调度器的个数 */ + ZXIC_UINT32 sch_type_num_link = 0; + ZXIC_UINT32 i = 0; + DPP_ETM_CRDT_SE_PARA_TBL_T etm_crdt_se_para_tbl_t = { 0 }; + + /* 取配置参数 */ + se_linkid = p_sch_se_para_insw->se_linkid; + se_sp = p_sch_se_para_insw->se_sp; + se_weight[0] = p_sch_se_para_insw->se_weight[0]; + se_weight[1] = p_sch_se_para_insw->se_weight[1]; + se_weight[2] = p_sch_se_para_insw->se_weight[2]; + se_weight[3] = p_sch_se_para_insw->se_weight[3]; + se_weight[4] = p_sch_se_para_insw->se_weight[4]; + se_weight[5] = p_sch_se_para_insw->se_weight[5]; + se_weight[6] = p_sch_se_para_insw->se_weight[6]; + se_weight[7] = p_sch_se_para_insw->se_weight[7]; + + /* 参数校验 */ + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_sp, 0, + DPP_TM_SCH_SP_NUM - 5); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_insw, 1, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_weight[0], 0, + DPP_TM_SCH_WEIGHT_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_weight[1], 0, + DPP_TM_SCH_WEIGHT_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_weight[2], 0, + DPP_TM_SCH_WEIGHT_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_weight[3], 0, + DPP_TM_SCH_WEIGHT_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_weight[4], 0, + DPP_TM_SCH_WEIGHT_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_weight[5], 0, + DPP_TM_SCH_WEIGHT_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_weight[6], 0, + DPP_TM_SCH_WEIGHT_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_weight[7], 0, + DPP_TM_SCH_WEIGHT_MAX); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_id, 0, + DPP_ETM_FQSPWFQ_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_linkid, 0, + DPP_ETM_FQSPWFQ_NUM - 1); + + /* 开始调度器挂接配置 */ + + /* 先区分调度器类型:sp/fq/wfq调度器挂接方式相同,wfqx/fqx=2/4/8是另一种挂接方式 */ + rc = dpp_tm_crdt_sch_type_get(dev, se_id, &item_num, &sch_type_num); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_crdt_sch_type_get"); + rc = dpp_tm_crdt_sch_type_get(dev, se_linkid, &item_num_link, + &sch_type_num_link); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_crdt_sch_type_get"); + + /* 优先级传递挂接合法性检查:下级传入调度器需是首编号,且下级<=上级 */ + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), item_num, 1, 8); + + if (se_id % item_num != 0 || item_num > item_num_link) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "dpp_tm_crdt_se_link_insw_wr: NOT CORRECT,bad parameters!\n"); + return DPP_ERR; + } + + /* 优先级传递挂接:各调度器挂接的se_linkid依次递增1,需相邻不能错开 + se_weight不限,取值[1-511] + se_sp:根据下级往上级挂接情况,取值[0-3],调度单元内部须相同 + se_insw 写死为1 */ + etm_crdt_se_para_tbl_t.se_pri = se_sp; + etm_crdt_se_para_tbl_t.se_insw = se_insw; + etm_crdt_se_para_tbl_t.cp_token_en = 1; + + for (i = 0; i < item_num; i++) { + etm_crdt_se_para_tbl_t.se_link = (se_linkid + i); + etm_crdt_se_para_tbl_t.se_w = se_weight[i]; + + rc = dpp_reg_write(dev, ETM_CRDT_SE_PARA_TBLr, 0, se_id + i, + &etm_crdt_se_para_tbl_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 配置调度器层次化QOS的挂接关系:优先级传递 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param se_id 本级调度器id +* @param se_linkid 要挂接到的上级调度器id +* @param se_weight WFQ2/4/8中各调度器权重值[1~511],取相等的值 +* @param se_sp 挂接到上级调度器的sp优先级,有效值[0-7],共8级,优先级依次降低 +* @param se_insw 优先级传递使能:0-关 1-开. 该参数不传递直接配1 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_se_link_insw_set(DPP_DEV_T *dev, ZXIC_UINT32 se_id, + ZXIC_UINT32 se_linkid, + ZXIC_UINT32 se_weight, + ZXIC_UINT32 se_sp) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_TM_SCH_SE_PARA_INSW_T sch_se_para_insw_t = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + /* 参数赋值 */ + sch_se_para_insw_t.se_linkid = se_linkid; + sch_se_para_insw_t.se_sp = se_sp; + sch_se_para_insw_t.se_weight[0] = se_weight; + sch_se_para_insw_t.se_weight[1] = se_weight; + sch_se_para_insw_t.se_weight[2] = se_weight; + sch_se_para_insw_t.se_weight[3] = se_weight; + sch_se_para_insw_t.se_weight[4] = se_weight; + sch_se_para_insw_t.se_weight[5] = se_weight; + sch_se_para_insw_t.se_weight[6] = se_weight; + sch_se_para_insw_t.se_weight[7] = se_weight; + + rc = dpp_tm_crdt_se_link_insw_wr(dev, se_id, &sch_se_para_insw_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_crdt_se_link_insw_wr"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置调度器层次化QOS的挂接关系:优先级传递,单个调度器挂接 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param se_id 本级调度器id +* @param se_linkid 要挂接到的上级调度器id +* @param se_weight WFQ8中对应调度器权重值[1~511] +* @param se_sp 挂接到上级调度器的sp优先级,有效值[0-7],共8级,优先级依次降低 +* @param se_insw 优先级传递使能:0-关 1-开. 该参数不传递直接配1 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_se_link_insw_single_set(DPP_DEV_T *dev, + ZXIC_UINT32 se_id, + ZXIC_UINT32 se_linkid, + ZXIC_UINT32 se_weight, + ZXIC_UINT32 se_sp) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 se_insw = 1; /* 优先级传递开启 */ + DPP_ETM_CRDT_SE_PARA_TBL_T etm_crdt_se_para_tbl_t = { 0 }; + + /* 参数校验 */ + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_sp, 0, + DPP_TM_SCH_SP_NUM - 5); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_insw, 1, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_weight, 0, + DPP_TM_SCH_WEIGHT_MAX); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_id, 0, + DPP_ETM_FQSPWFQ_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_linkid, 0, + DPP_ETM_FQSPWFQ_NUM - 1); + + /* 开始调度器挂接配置 */ + + /* 优先级传递挂接:各调度器挂接的se_linkid依次递增1,需相邻不能错开 + se_weight不限,取值[1-511] + se_sp:根据下级往上级挂接情况,取值[0-3],调度单元内部须相同 + se_insw 写死为1 */ + etm_crdt_se_para_tbl_t.se_pri = se_sp; + etm_crdt_se_para_tbl_t.se_insw = se_insw; + etm_crdt_se_para_tbl_t.cp_token_en = 1; + etm_crdt_se_para_tbl_t.se_link = se_linkid; + etm_crdt_se_para_tbl_t.se_w = se_weight; + + rc = dpp_reg_write(dev, ETM_CRDT_SE_PARA_TBLr, 0, se_id, + &etm_crdt_se_para_tbl_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取调度器挂接配置参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param se_id 调度器编号 +* @param p_se_para_tbl 调度器参数 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_se_para_get(DPP_DEV_T *dev, ZXIC_UINT32 se_id, + DPP_ETM_CRDT_SE_PARA_TBL_T *p_se_para_tbl) +{ + DPP_STATUS rc = DPP_OK; + + /* 参数校验 */ + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_se_para_tbl); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_id, 0, + DPP_ETM_FQSPWFQ_NUM - 1); + + rc = dpp_reg_read(dev, ETM_CRDT_SE_PARA_TBLr, 0, se_id, p_se_para_tbl); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取流队列入链状态 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param flow_id 流队列号 +* @param link_state 0-未入链 1-在调度器链表中 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_flow_link_state_get(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 *link_state) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CRDT_FLOWQUE_INS_TBL_T crdt_flow_ins_tbl_t = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), link_state); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), flow_id, 0, + DPP_ETM_CRDT_NUM); + + rc = dpp_reg_read(dev, ETM_CRDT_FLOWQUE_INS_TBLr, 0, flow_id, + &crdt_flow_ins_tbl_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + *link_state = crdt_flow_ins_tbl_t.flowque_ins; + + return DPP_OK; +} + +/***********************************************************/ +/** 获取调度器入链状态 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param se_id 调度器编号 +* @param link_state 0-未入链 1-在调度器链表中 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_se_link_state_get(DPP_DEV_T *dev, ZXIC_UINT32 se_id, + ZXIC_UINT32 *link_state) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CRDT_SE_INS_TBL_T crdt_se_ins_tbl_t = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), link_state); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_id, 0, + DPP_ETM_FQSPWFQ_NUM - 1); + + rc = dpp_reg_read(dev, ETM_CRDT_SE_INS_TBLr, 0, se_id, + &crdt_se_ins_tbl_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + *link_state = crdt_se_ins_tbl_t.se_ins_flag; + + return DPP_OK; +} + +/***********************************************************/ +/** 判断crdt流删除命令是否空闲 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_del_cmd_idle(DPP_DEV_T *dev) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 read_times = 30; + DPP_ETM_CRDT_FLOW_DEL_CMD_T crdt_del_cmd_busy = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + + do { + rc = dpp_reg_read(dev, ETM_CRDT_FLOW_DEL_CMDr, 0, 0, + &crdt_del_cmd_busy); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + read_times--; + zxic_comm_delay(5); + } while ((0 != (crdt_del_cmd_busy.flow_del_busy)) && (read_times > 0)); + + if (read_times == 0) { + ZXIC_COMM_TRACE_DEV_ERROR(DEV_ID(dev), + "CRDT Del command busy!\n"); + return DPP_ERR; + } + + return DPP_OK; +} + +/***********************************************************/ +/** 删除流/调度器挂接关系(调度器编号非从0开始,需要偏移) +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param id 要删除的流号或调度器id +* ETM范围:0--0xABFF; FTM范围:0-0x177F +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_del_link_set(DPP_DEV_T *dev, ZXIC_UINT32 id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 c_sta = 0; + ZXIC_UINT32 e_sta = 0; + ZXIC_UINT32 flow_e = 0; + ZXIC_UINT32 link_state = 0; /*流或调度器入链状态*/ + ZXIC_UINT32 read_times = 300; + ZXIC_UINT32 crdt_del_cmd_reg_index = 0; + DPP_ETM_CRDT_FLOW_DEL_CMD_T crdt_del_cmd_t = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), id, 0, + DPP_ETM_SCH_DEL_NUM); + flow_e = DPP_ETM_Q_NUM; + + /* 循环判断入链状态是否为1 */ + do { + /* 判断当前流或调度器入链状态:非入链情况才能删除挂接 */ + if (id <= DPP_ETM_CRDT_NUM) { + /* 流入链状态 */ + rc = dpp_tm_crdt_flow_link_state_get(dev, id, &c_sta); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT( + DEV_ID(dev), rc, + "dpp_tm_crdt_flow_link_state_get"); + + rc = dpp_tm_crdt_flow_link_state_get( + dev, id + DPP_ETM_Q_NUM, &e_sta); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT( + DEV_ID(dev), rc, + "dpp_tm_crdt_flow_link_state_get"); + + link_state = c_sta || e_sta; + } else { + /*调度器入链状态*/ + rc = dpp_tm_crdt_se_link_state_get( + dev, id - DPP_ETM_SHAP_SEID_BASE, &link_state); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT( + DEV_ID(dev), rc, + "dpp_tm_crdt_se_link_state_get"); + } + + if (link_state == 0) { + break; + } + read_times--; + zxic_comm_delay(1); + } while (read_times > 0); + + if (read_times == 0) { + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT(id, flow_e); + ZXIC_COMM_TRACE_ERROR( + "id: 0x%08x ins_flag is always 1 (Maybe it's because cir equal zero) !!!\n", + id); + /*此时要 继续往下走,执行强删!如果不删除队列没释放下次直接覆写问题更严重。zhaoyan*/ + } + + /*删挂接命令是否空闲*/ + rc = dpp_tm_crdt_del_cmd_idle(dev); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_crdt_del_cmd_idle"); + + /*进行流或调度器删除操作*/ + crdt_del_cmd_reg_index = ETM_CRDT_FLOW_DEL_CMDr; + crdt_del_cmd_t.flow_alt_cmd = 1; + crdt_del_cmd_t.flow_alt_ind = id; + + /* 流删除:如果删除c桶,需要同时删除e桶 */ + if (id < DPP_ETM_Q_NUM) { + rc = dpp_reg_write(dev, crdt_del_cmd_reg_index, 0, 0, + &crdt_del_cmd_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + + /*删挂e桶:判断命令是否空闲*/ + rc = dpp_tm_crdt_del_cmd_idle(dev); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_crdt_del_cmd_idle"); + crdt_del_cmd_t.flow_alt_ind = (id + DPP_ETM_Q_NUM); + rc = dpp_reg_write(dev, crdt_del_cmd_reg_index, 0, 0, + &crdt_del_cmd_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + } else { + rc = dpp_reg_write(dev, crdt_del_cmd_reg_index, 0, 0, + &crdt_del_cmd_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 删除调度器挂接关系(调度器编号从0开始):对外API +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param se_id_s 要删除的起始调度器id +* @param se_id_e 要删除的终止调度器id +* ETM范围:0--0x63FF; FTM范围:0-0x77F +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_del_se_link_set(DPP_DEV_T *dev, ZXIC_UINT32 id_s, + ZXIC_UINT32 id_e) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 se_id_offset = 0; + ZXIC_UINT32 id = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), id_s, 0, + DPP_ETM_FQSPWFQ_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), id_e, 0, + DPP_ETM_FQSPWFQ_NUM - 1); + se_id_offset = DPP_ETM_SHAP_SEID_BASE; + + if (id_s > id_e) { + ZXIC_COMM_TRACE_DEV_ERROR(DEV_ID(dev), + "Bad parameters! id_s > id_e!\n"); + return DPP_ERR; + } + + for (id = id_s; id <= id_e; id++) { + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT( + DEV_ID(dev), id, se_id_offset); + rc = dpp_tm_crdt_del_link_set(dev, id + se_id_offset); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_crdt_del_link_set"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 删除流挂接关系:对外API +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param id_s 要删除的流号或调度器起始id +* @param id_e 要删除的流号或调度器终止id +* ETM范围:0--0x47FF; FTM范围:0-0xFFF +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/02/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_del_flow_link_set(DPP_DEV_T *dev, ZXIC_UINT32 id_s, + ZXIC_UINT32 id_e) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 id = 0; + ZXIC_UINT32 q_td_th = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), id_s, 0, + DPP_ETM_CRDT_NUM); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), id_e, 0, + DPP_ETM_CRDT_NUM); + + if (id_s > id_e) { + ZXIC_COMM_TRACE_DEV_ERROR(DEV_ID(dev), + "Bad parameters! id_s > id_e !\n"); + return DPP_ERR; + } + + for (id = id_s; id <= id_e; id++) { + rc = dpp_tm_cgavd_td_th_get(dev, QUEUE_LEVEL, id, &q_td_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_cgavd_td_th_get"); + + if (q_td_th != 0) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), + "queue TD_TH is not equal 0 ! q_td_th != 0 !\n"); + return DPP_ERR; + } + } + + for (id = id_s; id <= id_e; id++) { + rc = dpp_tm_crdt_del_link_set(dev, id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_crdt_del_link_set"); + } + + return DPP_OK; +} + +#if 0 +/***********************************************************/ +/** 配置授权分发使能或者关闭 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param en 配置的值,0-关闭授权分发,1-使能授权分发 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/25 +************************************************************/ +DPP_STATUS dpp_tm_crdt_credit_en_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CRDT_CREDIT_EN_T credit_en = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, en, 0, 1); + + credit_en.credit_en = en; + rc = dpp_reg_write(dev_id, + ETM_CRDT_CREDIT_ENr, + 0, + 0, + &credit_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取授权分发使能或者关闭 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_en 读出的值,0-关闭授权分发,1-使能授权分发 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/25 +************************************************************/ +DPP_STATUS dpp_tm_crdt_credit_en_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CRDT_CREDIT_EN_T credit_en = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_en); + + *p_en = 0xffffffff; + rc = dpp_reg_read(dev_id, + ETM_CRDT_CREDIT_ENr, + 0, + 0, + &credit_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + *p_en = credit_en.credit_en; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置授权产生间隔 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param crdt_space_choose 授权发送间隔 0:固定16个周期 1:查表 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/10 +************************************************************/ +DPP_STATUS dpp_tm_crdt_space_choose_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 crdt_space_choose) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CRDT_CREDIT_SPACE_SELECT_T space_select = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, crdt_space_choose, 0, 1); + + space_select.credit_space_select = crdt_space_choose; + + rc = dpp_reg_write(dev_id, + ETM_CRDT_CREDIT_SPACE_SELECTr, + 0, + 0, + &space_select); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获得授权产生间隔 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param crdt_space_choose 授权发送间隔 0:固定16个周期 1:查表 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/10 +************************************************************/ +DPP_STATUS dpp_tm_crdt_space_choose_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_crdt_space_choose) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CRDT_CREDIT_SPACE_SELECT_T space_select = {0}; + + rc = dpp_reg_read(dev_id, + ETM_CRDT_CREDIT_SPACE_SELECTr, + 0, + 0, + &space_select); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + *p_crdt_space_choose = space_select.credit_space_select; + + return DPP_OK; +} + +#endif +/***********************************************************/ +/** 配置端口拥塞令牌桶使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param port_id 端口号:0~63 +* @param port_en 端口拥塞令牌桶使能,1表示不使用拥塞令牌桶的授权,0表示可以使用拥塞令牌桶授权 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author taq @date 2015/03/11 +************************************************************/ +DPP_STATUS dpp_tm_crdt_port_congest_en_set(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + ZXIC_UINT32 port_en) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 value = 0; + DPP_ETM_CRDT_CONGEST_TOKEN_DISABLE_31_0_T disable_31_0 = { 0 }; + DPP_ETM_CRDT_CONGEST_TOKEN_DISABLE_63_32_T disable_63_32 = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), port_id, 0, + DPP_TM_PP_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), port_en, 0, 1); + + if (port_id <= 31) { + /* port_id:[0-31] */ + rc = dpp_reg_read(dev, ETM_CRDT_CONGEST_TOKEN_DISABLE_31_0r, 0, + 0, &disable_31_0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + + value = disable_31_0.congest_token_disable_31_0; + + if (port_en == 0) { + value = value & (~(1u << port_id)); + } else { + value = value | (1u << port_id); + } + + disable_31_0.congest_token_disable_31_0 = value; + + rc = dpp_reg_write(dev, ETM_CRDT_CONGEST_TOKEN_DISABLE_31_0r, 0, + 0, &disable_31_0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + } else { + /* port_id:[32-63] */ + rc = dpp_reg_read(dev, ETM_CRDT_CONGEST_TOKEN_DISABLE_63_32r, 0, + 0, &disable_63_32); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + + value = disable_63_32.congest_token_disable_63_32; + + if (port_en == 0) { + value = value & (~(1u << (port_id - 32))); + } else { + value = value | (1u << (port_id - 32)); + } + + disable_63_32.congest_token_disable_63_32 = value; + + rc = dpp_reg_write(dev, ETM_CRDT_CONGEST_TOKEN_DISABLE_63_32r, + 0, 0, &disable_63_32); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 获得端口拥塞令牌桶使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param port_id 端口号:0~120 +* @param p_port_en 端口拥塞令牌桶使能,1表示不使用拥塞令牌桶的授权,0表示可以使用拥塞令牌桶授权 +* +* @return +* @remark 无 +* @see +* @author djf @date 2015/03/11 +************************************************************/ +DPP_STATUS dpp_tm_crdt_port_congest_en_get(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + ZXIC_UINT32 *p_port_en) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 value = 0; + DPP_ETM_CRDT_CONGEST_TOKEN_DISABLE_31_0_T disable_31_0 = { 0 }; + DPP_ETM_CRDT_CONGEST_TOKEN_DISABLE_63_32_T disable_63_32 = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), port_id, 0, + DPP_TM_PP_NUM - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_port_en); + + if (port_id <= 31) { + /* port_id:[0-31] */ + rc = dpp_reg_read(dev, ETM_CRDT_CONGEST_TOKEN_DISABLE_31_0r, 0, + 0, &disable_31_0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + + value = disable_31_0.congest_token_disable_31_0; + + *p_port_en = 1 & (value >> port_id); + } else { + /* port_id:[32-63] */ + rc = dpp_reg_read(dev, ETM_CRDT_CONGEST_TOKEN_DISABLE_63_32r, 0, + 0, &disable_63_32); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + + value = disable_63_32.congest_token_disable_63_32; + + *p_port_en = 1 & (value >> (port_id - 32)); + } + + return DPP_OK; +} + +/***********************************************************/ +/** CRDT 模块 读写是否超时检查:只有端口sp优先级的配置需检测cfg_state的状态 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* +* @return DPP_OK-空闲,DPP_ERR-忙 +* @remark 无 +* @see +* @author szq @date 2015/05/26 +************************************************************/ +DPP_STATUS dpp_tm_crdt_idle_check(DPP_DEV_T *dev) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 read_times = 30; + DPP_ETM_CRDT_CFG_STATE_T is_idle_flag = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + + do { + rc = dpp_reg_read(dev, ETM_CRDT_CFG_STATEr, 0, 0, + &is_idle_flag); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + read_times--; + zxic_comm_delay(5); + + } while ((is_idle_flag.cfg_state == 1) && (read_times > 0)); + + if (read_times == 0) { + ZXIC_COMM_TRACE_DEV_ERROR(DEV_ID(dev), "crdt rw time out\n"); + return DPP_ERR; + } + + return DPP_OK; +} + +#if 0 +/***********************************************************/ +/** 授权个数统计寄存器清零 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author taq @date 2015/06/19 +************************************************************/ +DPP_STATUS dpp_tm_crdt_clr_diag(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CRDT_CNT_CLR_T cnt_clr = {0}; + + /* 不使能CRDT */ + rc = dpp_tm_crdt_credit_en_set(dev_id, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_crdt_credit_en_set"); + + /* 清零所有的授权数统计寄存器 */ + cnt_clr.cnt_clr = 1; + rc = dpp_reg_write(dev_id, + ETM_CRDT_CNT_CLRr, + 0, + 0, + &cnt_clr); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* 保持所有的授权数统计寄存器值 */ + cnt_clr.cnt_clr = 0; + rc = dpp_reg_write(dev_id, + ETM_CRDT_CNT_CLRr, + 0, + 0, + &cnt_clr); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* 使能CRDT */ + rc = dpp_tm_crdt_credit_en_set(dev_id, 1); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_crdt_credit_en_set"); + + return DPP_OK; +} + + +/***********************************************************/ +/** 打印各级及指定被统计的第0~15个授权流得到的授权个数 stm模式下使用 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/06/19 +************************************************************/ +DPP_STATUS dpp_tm_crdt_ackcnt_diag(ZXIC_UINT32 dev_id, ZXIC_UINT32 delay_ms) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 credit_value = 0; + ZXIC_UINT32 flow_spec_id_offset = 0; + ZXIC_FLOAT traffic_amplified = 0.0; + ZXIC_FLOAT flow_spec_traffic = 0.0; + + DPP_ETM_CRDT_DEV_CREDIT_CNT_T dev_crdit_cnt = {0}; + //DPP_ETM_CRDT_PP_CREDIT_CNT_T pp_crdit_cnt = {0}; + + DPP_ETM_CRDT_STAT_QUE_ID_0_T stat_que_id_0 = {0}; + DPP_ETM_CRDT_STAT_QUE_CREDIT_T que_credit = {0}; + + rc = dpp_tm_crdt_clr_diag(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_crdt_clr_diag"); + zxic_comm_sleep(delay_ms); + + /* dev级接收到的授权总数 */ + rc = dpp_reg_read(dev_id, + ETM_CRDT_DEV_CREDIT_CNTr, + 0, + 0, + &dev_crdit_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + /* pp级接收到的授权总数 + rc = dpp_reg_read(dev_id, + ETM_CRDT_PP_CREDIT_CNTr, + 0, + 0, + &pp_crdit_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); */ + + /* 读取credit_value */ + rc = dpp_tm_qmu_credit_value_get(dev_id, &credit_value); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_credit_value_get"); + + traffic_amplified = ((ZXIC_FLOAT)delay_ms * (ZXIC_FLOAT)(1000000.0)) / ((ZXIC_FLOAT)(8.0) * (ZXIC_FLOAT)(credit_value)); + ZXIC_COMM_PRINT("dev: ack_cnt = 0x%08x, traffic = %.6f.(Gb)\n", dev_crdit_cnt.dev_credit_cnt, (ZXIC_FLOAT)(dev_crdit_cnt.dev_credit_cnt) / traffic_amplified); + //ZXIC_COMM_PRINT("pp: ack_cnt = 0x%08x, traffic = %.6f.(G)\n", pp_crdit_cnt.pp_credit_cnt, (ZXIC_FLOAT)(pp_crdit_cnt.pp_credit_cnt) / traffic_amplified); + + for (flow_spec_id_offset = 0; flow_spec_id_offset < 16; flow_spec_id_offset++) { + rc = dpp_reg_read(dev_id, + ETM_CRDT_STAT_QUE_ID_0r + flow_spec_id_offset, + 0, + 0, + &stat_que_id_0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_CRDT_STAT_QUE_CREDITr, + 0, + flow_spec_id_offset, + &que_credit); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + flow_spec_traffic = (ZXIC_FLOAT)(que_credit.stat_que_credit_cnt) / traffic_amplified; + + ZXIC_COMM_PRINT("flow_0x%04x(%5d): ", (stat_que_id_0.stat_que_id_0 & 0xffff), (stat_que_id_0.stat_que_id_0 & 0xffff)); + ZXIC_COMM_PRINT("ack_cnt = 0x%08x, ", que_credit.stat_que_credit_cnt); + ZXIC_COMM_PRINT("traffic = %.6f.(Gb)\n", flow_spec_traffic); + } + + return DPP_OK; +} + +/***********************************************************/ +/** +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param que_id queue id +* @param en 1:过滤E桶队列CRS状态为SLOW的入链请求;0:E桶队列CRS SLOW正常入链; +* +* @return +* @remark 无 +* @see +* @author XXX @date 2019/05/08 +************************************************************/ +DPP_STATUS dpp_tm_crdt_eir_crs_filter_en_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 que_id, ZXIC_UINT32 en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CRDT_EIR_CRS_FILTER_TBL_T eir_crs_filter = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, en, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, que_id, 0, DPP_ETM_Q_NUM - 1); + + eir_crs_filter.eir_crs_filter = en; + rc = dpp_reg_write(dev_id, + ETM_CRDT_EIR_CRS_FILTER_TBLr, + 0, + que_id, + &eir_crs_filter); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return rc; +} + +/***********************************************************/ +/** +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param que_id_s 起始队列号 +* @param que_id_e 终止队列号 +* @param en 1:过滤E桶队列CRS状态为SLOW的入链请求;0:E桶队列CRS SLOW正常入链; +* +* @return +* @remark 无 +* @see +* @author XXX @date 2019/05/08 +************************************************************/ +DPP_STATUS dpp_tm_crdt_eir_crs_filter_en_more_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 que_id_s, ZXIC_UINT32 que_id_e, ZXIC_UINT32 en) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 i = 0; + + if (que_id_s > que_id_e) { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "Bad parameters! que_id_s > que_id_e !\n"); + return DPP_ERR; + } + + for (i = que_id_s; i <= que_id_e; i++) { + rc = dpp_tm_crdt_eir_crs_filter_en_set(dev_id, i, en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_crdt_eir_crs_filter_en_set"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param que_id queue id +* @param p_en +* +* @return +* @remark 无 +* @see +* @author XXX @date 2019/05/08 +************************************************************/ +DPP_STATUS dpp_tm_crdt_eir_crs_filter_en_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 que_id, ZXIC_UINT32 *p_en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CRDT_EIR_CRS_FILTER_TBL_T eir_crs_filter = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_en); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, que_id, 0, DPP_ETM_Q_NUM - 1); + + rc = dpp_reg_read(dev_id, + ETM_CRDT_EIR_CRS_FILTER_TBLr, + 0, + que_id, + &eir_crs_filter); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + *p_en = eir_crs_filter.eir_crs_filter; + + return rc; +} + +/***********************************************************/ +/**cpu配置flow_id的crs强制为normal或者off开关使能,用于检测SA模式下队列到授权流的多对一问题 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param flow_id:流号(和授权流号一一对应) +* en 强制配置crs的使能,0-不使能,1-使能 +* crs_value:强制配置crs的值2'b00:off; 2'b01:low; 2'b10:normal; +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/04/25 +************************************************************/ +DPP_STATUS dpp_tm_crdt_crs_sheild_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 flow_id, ZXIC_UINT32 en, ZXIC_UINT32 crs_value) +{ + DPP_STATUS rc = DPP_OK; + + DPP_ETM_CRDT_CRS_SHEILD_FLOW_ID_CFG_T crs_sheild_flow_id_cfg = {0}; + DPP_ETM_CRDT_CRS_SHEILD_EN_CFG_T crs_sheild_en_cfg = {0}; + DPP_ETM_CRDT_CRS_SHEILD_VALUE_CFG_T crs_sheild_value_cfg = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, en, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, crs_value, 0, 2); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, flow_id, 0, DPP_ETM_Q_NUM - 1); + + crs_sheild_flow_id_cfg.crs_sheild_flow_id_cfg = flow_id; + rc = dpp_reg_write(dev_id, + ETM_CRDT_CRS_SHEILD_FLOW_ID_CFGr, + 0, + 0, + &crs_sheild_flow_id_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + crs_sheild_en_cfg.crs_sheild_en_cfg = en; + rc = dpp_reg_write(dev_id, + ETM_CRDT_CRS_SHEILD_EN_CFGr, + 0, + 0, + &crs_sheild_en_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + crs_sheild_value_cfg.crs_sheild_value_cfg = crs_value; + rc = dpp_reg_write(dev_id, + ETM_CRDT_CRS_SHEILD_VALUE_CFGr, + 0, + 0, + &crs_sheild_value_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/**获取flow_id的crs强制为normal或者off开关使能,用于检测SA模式下队列到授权流的多对一问题 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param flow_id:流号(和授权流号一一对应) +* en 强制配置crs的使能,0-不使能,1-使能 +* crs_value:强制配置crs的值2'b00:off; 2'b01:low; 2'b10:normal; +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/04/25 +************************************************************/ +DPP_STATUS dpp_tm_crdt_crs_sheild_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_flow_id, ZXIC_UINT32 *p_en, ZXIC_UINT32 *p_crs_value) +{ + DPP_STATUS rc = DPP_OK; + + DPP_ETM_CRDT_CRS_SHEILD_FLOW_ID_CFG_T crs_sheild_flow_id_cfg = {0}; + DPP_ETM_CRDT_CRS_SHEILD_EN_CFG_T crs_sheild_en_cfg = {0}; + DPP_ETM_CRDT_CRS_SHEILD_VALUE_CFG_T crs_sheild_value_cfg = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_flow_id); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_en); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_crs_value); + + rc = dpp_reg_read(dev_id, + ETM_CRDT_CRS_SHEILD_FLOW_ID_CFGr, + 0, + 0, + &crs_sheild_flow_id_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + *p_flow_id = crs_sheild_flow_id_cfg.crs_sheild_flow_id_cfg; + + rc = dpp_reg_read(dev_id, + ETM_CRDT_CRS_SHEILD_EN_CFGr, + 0, + 0, + &crs_sheild_en_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + *p_en = crs_sheild_en_cfg.crs_sheild_en_cfg; + + rc = dpp_reg_read(dev_id, + ETM_CRDT_CRS_SHEILD_VALUE_CFGr, + 0, + 0, + &crs_sheild_value_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + *p_crs_value = crs_sheild_value_cfg.crs_sheild_value_cfg; + + return DPP_OK; +} + +/***********************************************************/ +/** 控制授权速率的门限 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param index 0~6 +* @param rci_grade_th_0_data +* +* @return +* @remark 无 +* @see +* @author wush @date 2017/10/17 +************************************************************/ +DPP_STATUS dpp_tm_crdt_rci_grade_th_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 index, ZXIC_UINT32 rci_grade_th_0_data) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, index, 0, 6); + + rc = dpp_reg_write(dev_id, + ETM_CRDT_RCI_GRADE_TH_0_CFGr + index, + 0, + 0, + &rci_grade_th_0_data); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +DPP_STATUS dpp_tm_crdt_rci_grade_th_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 index, ZXIC_UINT32 *p_rci_grade_th_0_data) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, index, 0, 6); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_rci_grade_th_0_data); + + rc = dpp_reg_read(dev_id, + ETM_CRDT_RCI_GRADE_TH_0_CFGr + index, + 0, + 0, + p_rci_grade_th_0_data); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + return DPP_OK; +} + +/***********************************************************/ +/** 控制授权间隔的门限,建议大于等于0XF,不可取0; +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param index 0~7 +* @param asm_interval_0_data 控制授权间隔的门限,建议大于等于0XF,不可取0; +* +* @return +* @remark 无 +* @see +* @author wush @date 2017/10/17 +************************************************************/ +DPP_STATUS dpp_tm_crdt_asm_interval_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 index, ZXIC_UINT32 asm_interval_0_data) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, index, 0, 7); + + rc = dpp_reg_write(dev_id, + ETM_CRDT_ASM_INTERVAL_0_CFGr + index, + 0, + 0, + &asm_interval_0_data); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +DPP_STATUS dpp_tm_crdt_asm_interval_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 index, ZXIC_UINT32 *p_asm_interval_0_data) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, index, 0, 7); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_asm_interval_0_data); + + rc = dpp_reg_read(dev_id, + ETM_CRDT_ASM_INTERVAL_0_CFGr + index, + 0, + 0, + p_asm_interval_0_data); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + return DPP_OK; +} + +/***********************************************************/ +/** rci的级别 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_rci_grade_data +* +* @return +* @remark 无 +* @see +* @author wush @date 2017/10/17 +************************************************************/ +DPP_STATUS dpp_tm_crdt_rci_grade_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_rci_grade_data) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_rci_grade_data); + + rc = dpp_reg_read(dev_id, + ETM_CRDT_RCI_GRADEr, + 0, + 0, + p_rci_grade_data); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + return DPP_OK; +} + +DPP_STATUS dpp_tm_crdt_rci_value_r_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_crdt_rci_value_r_data) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_crdt_rci_value_r_data); + + rc = dpp_reg_read(dev_id, + ETM_CRDT_CRDT_RCI_VALUE_Rr, + 0, + 0, + p_crdt_rci_value_r_data); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + return DPP_OK; +} + +DPP_STATUS dpp_tm_crdt_interval_now_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_crdt_interval_now_data) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_crdt_interval_now_data); + + rc = dpp_reg_read(dev_id, + ETM_CRDT_CRDT_INTERVAL_NOWr, + 0, + 0, + p_crdt_interval_now_data); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置crdt interval使能, +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param crdt_interval_en_cfg_data 授权分发间隔使能,1打开,0关闭 +* +* @return +* @remark 无 +* @see +* @author wush @date 2017/03/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_interval_en_cfg_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 crdt_interval_en_cfg_data) +{ + DPP_STATUS rc = DPP_OK; + + rc = dpp_reg_write(dev_id, + ETM_CRDT_CRDT_INTERVAL_EN_CFGr, + 0, + 0, + &crdt_interval_en_cfg_data); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取crdt interval使能 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param crdt_interval_en_cfg_data 授权分发间隔使能,1打开,0关闭 +* +* @return +* @remark 无 +* @see +* @author wush @date 2017/03/27 +************************************************************/ +DPP_STATUS dpp_tm_crdt_interval_en_cfg_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_crdt_interval_en_cfg_data) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_crdt_interval_en_cfg_data); + + rc = dpp_reg_read(dev_id, + ETM_CRDT_CRDT_INTERVAL_EN_CFGr, + 0, + 0, + p_crdt_interval_en_cfg_data); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + return DPP_OK; +} + +/***********************************************************/ +/** 屏蔽ucn/asm_rdy的时能信号 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param ucn_rdy_shield_en 是否屏蔽ucn_rdy信号,1屏蔽,0不屏蔽 +* @param asm_rdy_shield_en 是否屏蔽asm_rdy信号,1屏蔽,0不屏蔽 +* +* @return +* @remark 无 +* @see +* @author wush @date 2017/10/17 +************************************************************/ +DPP_STATUS dpp_tm_crdt_ucn_asm_rdy_shield_en_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 ucn_rdy_shield_en, ZXIC_UINT32 asm_rdy_shield_en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CRDT_UCN_ASM_RDY_SHIELD_EN_T ucn_rdy_shield_en_data = {0}; + + + ucn_rdy_shield_en_data.ucn_rdy_shield_en = ucn_rdy_shield_en; + ucn_rdy_shield_en_data.asm_rdy_shield_en = asm_rdy_shield_en; + + rc = dpp_reg_write(dev_id, + ETM_CRDT_UCN_ASM_RDY_SHIELD_ENr, + 0, + 0, + &ucn_rdy_shield_en_data); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +DPP_STATUS dpp_tm_crdt_ucn_asm_rdy_shield_en_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_ucn_rdy_shield_en, ZXIC_UINT32 *p_asm_rdy_shield_en) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CRDT_UCN_ASM_RDY_SHIELD_EN_T ucn_rdy_shield_en_data = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_ucn_rdy_shield_en); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_asm_rdy_shield_en); + + rc = dpp_reg_read(dev_id, + ETM_CRDT_UCN_ASM_RDY_SHIELD_ENr, + 0, + 0, + &ucn_rdy_shield_en_data); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + *p_ucn_rdy_shield_en = ucn_rdy_shield_en_data.ucn_rdy_shield_en; + *p_asm_rdy_shield_en = ucn_rdy_shield_en_data.asm_rdy_shield_en; + + return DPP_OK; +} + +#endif +#endif + +#if ZXIC_REAL("TM_SHAPE") + +#if 0 +/***********************************************************/ +/** 把整数分解成(指定位长)最高有效数和(2的)指数位数的形式,data=p_remdata*2^(p_exp) +* @param data 需要转换前的数 +* @param rembitsum 余数的位数 +* @param p_remdata 余数大小 +* @param p_exp 指数大小 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/27 +************************************************************/ +DPP_STATUS dpp_tm_rem_and_exp_translate(ZXIC_UINT32 data, + ZXIC_UINT32 rembitsum, + ZXIC_UINT32 *p_remdata, + ZXIC_UINT32 *p_exp) +{ + ZXIC_UINT32 i = 0; + + ZXIC_COMM_CHECK_POINT(p_remdata); + ZXIC_COMM_CHECK_POINT(p_exp); + + if ((0 == data) || (rembitsum == 0)) { + { + *p_remdata = 0; + *p_exp = 0; + return DPP_OK; + } + + for (i = 1; i <= 32; i++) { + /* ZXIC_UINT64位长64位,如果ZXIC_UINT32在,左移32位时会有问题 */ + if (0 == (data & (((ZXIC_UINT64)0xffffffff) << i))) { + { + break; + } + } + + if (i <= rembitsum) { + *p_remdata = data; + *p_exp = 0; + } else { + *p_remdata = (data >> (i - rembitsum)); + *p_exp = i - rembitsum; + } + + return DPP_OK; +} + +/***********************************************************/ +/** shap ram初始化 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/22 +************************************************************/ +DPP_STATUS dpp_tm_shap_ram_init(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 read_times = 30; + DPP_ETM_SHAP_SHAP_CFG_INIT_CFG_T shap_ram_cfg_init_t = {0}; + DPP_ETM_SHAP_SHAP_STA_INIT_CFG_T shap_ram_sta_init_t = {0}; + + /**RAM初始化**/ + shap_ram_cfg_init_t.cfg_ram_init_en = 1; + rc = dpp_reg_write(dev_id, + ETM_SHAP_SHAP_CFG_INIT_CFGr, + 0, + 0, + &shap_ram_cfg_init_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + shap_ram_sta_init_t.sta_ram_init_en = 1; + rc = dpp_reg_write(dev_id, + ETM_SHAP_SHAP_STA_INIT_CFGr, + 0, + 0, + &shap_ram_sta_init_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + + /**初始化done确认**/ + do { + rc = dpp_reg_read(dev_id, + ETM_SHAP_SHAP_CFG_INIT_CFGr, + 0, + 0, + &shap_ram_cfg_init_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_SHAP_SHAP_STA_INIT_CFGr, + 0, + 0, + &shap_ram_sta_init_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + read_times--; + zxic_comm_usleep(100); + } while ((0 == shap_ram_cfg_init_t.cfg_ram_init_done || 0 == shap_ram_sta_init_t.sta_ram_init_done) && (read_times > 0)); + + if (read_times == 0) { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "SHAP RAM init failed!\n"); + return DPP_ERR; + } + + return DPP_OK; +} + +#endif +/***********************************************************/ +/** 配置流队列双桶整形使能及模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param db_en 双桶整形使能 +* @param mode 0:c+e模式,1:c+p模式 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_tm_shape_flow_db_en_set(DPP_DEV_T *dev, ZXIC_UINT32 db_en, + ZXIC_UINT32 mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CRDT_DB_TOKEN_T tm_shape_db_en_t = { 0 }; + DPP_ETM_SHAP_TOKEN_MODE_SWITCH_T tm_shap_db_mode_t = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), db_en, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), mode, 0, 1); + + tm_shape_db_en_t.db_token = db_en; + tm_shap_db_mode_t.token_mode_switch = mode; + + rc = dpp_reg_write(dev, ETM_CRDT_DB_TOKENr, 0, 0, &tm_shape_db_en_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev, ETM_SHAP_TOKEN_MODE_SWITCHr, 0, 0, + &tm_shap_db_mode_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取流队列双桶整形使能及模式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param db_en 双桶整形使能 +* @param mode 0:c+e模式,1:c+p模式 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_tm_shape_flow_db_en_get(DPP_DEV_T *dev, ZXIC_UINT32 *db_en, + ZXIC_UINT32 *mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CRDT_DB_TOKEN_T tm_shape_db_en_t = { 0 }; + DPP_ETM_SHAP_TOKEN_MODE_SWITCH_T tm_shap_db_mode_t = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), db_en); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), mode); + + rc = dpp_reg_read(dev, ETM_CRDT_DB_TOKENr, 0, 0, &tm_shape_db_en_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev, ETM_SHAP_TOKEN_MODE_SWITCHr, 0, 0, + &tm_shap_db_mode_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + *db_en = tm_shape_db_en_t.db_token; + *mode = tm_shap_db_mode_t.token_mode_switch; + + return DPP_OK; +} + +#if 0 +/***********************************************************/ +/** 配置桶深最小单位配置:共8档:0-7 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param token_grain 3’d0:最小单位为128K +* 3’d1:最小单位为64k +* 3’d2:最小单位为32k +* 3’d3:最小单位为16k +* 3’d4:最小单位为8k +* 3’d5:最小单位为4k +* 3’d6:最小单位为2k +* 3’d7:最小单位为1k +* 默认为0,即128K +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_tm_shape_token_grain_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 token_grain) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_SHAP_TOKEN_GRAIN_T tm_shape_token_grain_t = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, token_grain, 0, 7); + + tm_shape_token_grain_t.token_grain = token_grain; + + rc = dpp_reg_write(dev_id, + ETM_SHAP_TOKEN_GRAINr, + 0, + 0, + &tm_shape_token_grain_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +#endif +/***********************************************************/ +/** 获取桶深最小单位配置:共8档:0-7 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param token_grain 3’d0:最小单位为128K +* 3’d1:最小单位为64k +* 3’d2:最小单位为32k +* 3’d3:最小单位为16k +* 3’d4:最小单位为8k +* 3’d5:最小单位为4k +* 3’d6:最小单位为2k +* 3’d7:最小单位为1k +* 默认为0,即128K +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_tm_shape_token_grain_get(DPP_DEV_T *dev, + ZXIC_UINT32 *token_grain) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_SHAP_TOKEN_GRAIN_T tm_shape_token_grain_t = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), token_grain); + + rc = dpp_reg_read(dev, ETM_SHAP_TOKEN_GRAINr, 0, 0, + &tm_shape_token_grain_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + *token_grain = tm_shape_token_grain_t.token_grain; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置流或调度器映射到整形参数表的某个ID +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param id 流或调度器编号ETM:0-ABFF,FTM:0-177F +* @param profile_id 整形参数表id索引:[0-127] +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_tm_shape_map_table_set(DPP_DEV_T *dev, ZXIC_UINT32 id, + ZXIC_UINT32 profile_id) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_SHAP_SHAP_BUCKET_MAP_TBL_T tm_shape_map_tbl_t = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), profile_id, 0, 127); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), id, 0, + DPP_ETM_SCH_DEL_NUM); + + tm_shape_map_tbl_t.shap_map = profile_id; + + rc = dpp_reg_write(dev, ETM_SHAP_SHAP_BUCKET_MAP_TBLr, 0, id, + &tm_shape_map_tbl_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取流或调度器映射到整形参数表的配置ID +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param id 流或调度器编号ETM:0-ABFF,FTM:0-177F +* @param profile_id 整形参数表:[0-127] +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_tm_shape_map_table_get(DPP_DEV_T *dev, ZXIC_UINT32 id, + ZXIC_UINT32 *profile_id) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_SHAP_SHAP_BUCKET_MAP_TBL_T tm_shape_map_tbl_t = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), profile_id); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), id, 0, + DPP_ETM_SCH_DEL_NUM); + + rc = dpp_reg_read(dev, ETM_SHAP_SHAP_BUCKET_MAP_TBLr, 0, id, + &tm_shape_map_tbl_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + *profile_id = tm_shape_map_tbl_t.shap_map; + + return DPP_OK; +} + +/***********************************************************/ +/** 根据流或调度器id查找对应配置表中的模板id + 找到直接进行整形配置并返回1;未找到返回0 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param id 流或调度器编号 ETM:0-ABFF,FTM:0-AFF +* @param cir cir速率,单位Kb,范围[64Kb - 160Gb] +* @param cbs cbs桶深,单位KB,范围[1KB - 64M] +* @return 找到:1,未找到:0 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_tm_shape_find_map_id(DPP_DEV_T *dev, ZXIC_UINT32 id, + ZXIC_UINT32 cir, ZXIC_UINT32 cbs) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 table_id = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 2); + + /* 根据id计算归属哪张表:每2K对应一个128项表 */ + table_id = id / 2048; + + for (i = 1; i < 128; i++) { + if (g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)][table_id][i] + .shape_cir == cir && + g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)][table_id][i] + .shape_cbs == cbs) { + rc = dpp_tm_shape_map_table_set(dev, id, i); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT( + DEV_ID(dev), rc, "dpp_tm_shape_map_table_set"); + g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)][table_id] + [i] + .shape_num++; + + return 1; + } + } + + return 0; +} + +/***********************************************************/ +/** 配置流级整形参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param flow_id 流队列号 ETM:0-9215,FTM:0-2047 +* @param cir cir速率,单位Kb,范围[64Kb - 160Gb] +* @param cbs cbs桶深,单位KB,范围[1KB - 64M] +* 注:cbs=0 表示关闭整形,即不限速 +* @param db_en 双桶整形使能,0-单桶,1-双桶 +* @param eir eir速率,单位Kb,范围同cir +* @param ebs ebs桶深,单位KB,范围同cbs +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_tm_shape_flow_para_set(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 cir, ZXIC_UINT32 cbs, + ZXIC_UINT32 db_en, ZXIC_UINT32 eir, + ZXIC_UINT32 ebs) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + + rc = dpp_etm_shape_flow_para_set(dev, flow_id, cir, cbs, db_en, eir, + ebs); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_etm_shape_flow_para_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取流级整形参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param flow_id 流队列号 ETM:0-9215,FTM:0-2047 +* @param cir cir速率,单位Kb,范围[64Kb - 160Gb] +* @param cbs cbs桶深,单位KB,范围[1KB - 64M] +* 注:cbs=0 表示关闭整形,即不限速 +* @param mode_e 整形模式,0-获取c桶参数,1-获取对应e桶参数 +* @param p_para_id 整形模板索引:ETM=[0-AFF],FTM=[0-17F] +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_tm_shape_flow_para_get(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 mode, ZXIC_UINT32 *p_para_id, + DPP_TM_SHAPE_PARA_TABLE *p_flow_para_tbl) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 flow_id_e = 0; + ZXIC_UINT32 table_id = 0; + ZXIC_UINT32 profile_id = 0; + ZXIC_UINT32 bucket_para_n = 0; + ZXIC_UINT32 bucket_depth = + 0; /* 实际写入寄存器的桶深,为多少个调节单位 */ + ZXIC_UINT32 bucket_rate = + 0; /* 实际写入寄存器的速率,为每4096周期添加的字节数 */ + ZXIC_UINT32 token_grain = 0; /* 令牌桶调节档位 */ + ZXIC_UINT32 token_grain_kb[8] = { + 128, 64, 32, 16, 8, 4, 2, 1 + }; /* 档位对应值 */ + DPP_ETM_SHAP_BKT_PARA_TBL_T shap_para_tbl_t = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), mode, 0, 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_para_id); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_flow_para_tbl); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), flow_id, 0, + DPP_ETM_Q_NUM - 1); + + flow_id_e = flow_id + DPP_ETM_Q_NUM; + + table_id = flow_id / 2048; + + /*获取流的profile_id*/ + if (mode) { + table_id = flow_id_e / 2048; + rc = dpp_tm_shape_map_table_get(dev, flow_id_e, &profile_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_shape_map_table_get"); + } else { + rc = dpp_tm_shape_map_table_get(dev, flow_id, &profile_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_shape_map_table_get"); + } + + /*从寄存器读取流配置参数*/ + bucket_para_n = table_id * 128 + profile_id; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), bucket_para_n, 0, + 0xAFF); + + rc = dpp_reg_read(dev, ETM_SHAP_BKT_PARA_TBLr, 0, bucket_para_n, + &shap_para_tbl_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + bucket_depth = shap_para_tbl_t.bucket_depth; + bucket_rate = shap_para_tbl_t.bucket_rate; + + /*数据转换处理*/ + rc = dpp_tm_shape_token_grain_get(dev, &token_grain); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_shape_token_grain_get"); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), token_grain, 0, 7); + + *p_para_id = bucket_para_n; + p_flow_para_tbl->shape_cbs = bucket_depth * token_grain_kb[token_grain]; + p_flow_para_tbl->shape_cir = (ZXIC_UINT64)bucket_rate * DPP_TM_SYS_HZ * + 8 / + ((ZXIC_UINT64)4096 * DPP_TM_KILO_ULL * 64); + + return DPP_OK; +} + +/***********************************************************/ +/** etm配置流级整形参数 +* @param dev_id 设备编号 +* @param flow_id 流队列号 ETM:0-9215,FTM:0-2047 +* @param cir cir速率,单位Kb,范围[20Kb - 160Gb] +* @param cbs cbs桶深,单位KB,范围[1KB - 64M] +* 注:cbs=0 表示关闭整形,即不限速 +* @param db_en 双桶整形使能,0-单桶,1-双桶 +* @param eir eir速率,单位Kb,范围同cir +* @param ebs ebs桶深,单位KB,范围同cbs +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_etm_shape_flow_para_set(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 cir, ZXIC_UINT32 cbs, + ZXIC_UINT32 db_en, ZXIC_UINT32 eir, + ZXIC_UINT32 ebs) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 table_id = 0; + ZXIC_UINT32 profile_id = 0; + ZXIC_UINT32 total_para_id = 0; + ZXIC_UINT32 get_profile_success_flag_c = + 0; /* 当前已配置表中是否找到需要的整形模板 */ + ZXIC_UINT32 get_profile_success_flag_e = + 0; /* 当前已配置表中是否找到需要的整形模板 */ + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 2); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), flow_id, 0, + DPP_ETM_Q_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT( + DEV_ID(dev), cir, DPP_TM_SHAPE_CIR_MIN, DPP_TM_SHAPE_CIR_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT( + DEV_ID(dev), cbs, DPP_TM_SHAPE_CBS_MIN, DPP_TM_SHAPE_CBS_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), db_en, 0, 1); + + if (db_en) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), eir, + DPP_TM_SHAPE_CIR_MIN, + DPP_TM_SHAPE_CIR_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), ebs, + DPP_TM_SHAPE_CBS_MIN, + DPP_TM_SHAPE_CBS_MAX); + } + + rc = dpp_tm_global_var_mutex_init(); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_global_var_mutex_init"); + + rc = zxic_comm_mutex_lock(&g_dpp_tm_global_var_rw_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "zxic_comm_mutex_lock"); + + /**双桶开关配置**/ + rc = dpp_tm_shape_flow_db_en_set(dev, db_en, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK(DEV_ID(dev), rc, + "dpp_tm_shape_flow_db_en_set", + &g_dpp_tm_global_var_rw_mutex); + + /******STEP1:先解除原profile_id映射******/ + /**处理c桶**/ + rc = dpp_tm_shape_map_table_get(dev, flow_id, &profile_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK(DEV_ID(dev), rc, + "dpp_tm_shape_map_table_get", + &g_dpp_tm_global_var_rw_mutex); + + if (profile_id > 0 && (profile_id < DPP_TM_SHAP_MAP_ID_MAX)) { + /***表示当前有配置整形: 根据id计算归属哪张表:每2K队列对应一个128项表***/ + table_id = flow_id / 2048; + + if (g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)][table_id] + [profile_id] + .shape_num != 0) { + g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)][table_id] + [profile_id] + .shape_num--; + + if (0 == g_dpp_etm_shape_para_table[DEV_PCIE_SLOT( + dev)][table_id][profile_id] + .shape_num) { + g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)] + [table_id][profile_id] + .shape_cbs = + 0; + g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)] + [table_id][profile_id] + .shape_cir = + 0; + } + } + } + + /**处理e桶**/ + rc = dpp_tm_shape_map_table_get(dev, (flow_id + DPP_ETM_Q_NUM), + &profile_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK(DEV_ID(dev), rc, + "dpp_tm_shape_map_table_get", + &g_dpp_tm_global_var_rw_mutex); + + if (profile_id > 0 && (profile_id < DPP_TM_SHAP_MAP_ID_MAX)) { + /*表示当前有配置整形*/ + table_id = (flow_id + DPP_ETM_Q_NUM) / 2048; + + if (g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)][table_id] + [profile_id] + .shape_num != 0) { + g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)][table_id] + [profile_id] + .shape_num--; + + if (0 == g_dpp_etm_shape_para_table[DEV_PCIE_SLOT( + dev)][table_id][profile_id] + .shape_num) { + g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)] + [table_id][profile_id] + .shape_cbs = + 0; + g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)] + [table_id][profile_id] + .shape_cir = + 0; + } + } + } + + /******STEP2:整形关闭的处理******/ + if (cbs == 0) { + /*关闭c桶e桶整形,并返回函数*/ + rc = dpp_tm_shape_map_table_set(dev, (flow_id), 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, "dpp_tm_shape_map_table_set", + &g_dpp_tm_global_var_rw_mutex); + rc = dpp_tm_shape_map_table_set(dev, (flow_id + DPP_ETM_Q_NUM), + 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, "dpp_tm_shape_map_table_set", + &g_dpp_tm_global_var_rw_mutex); + + rc = zxic_comm_mutex_unlock(&g_dpp_tm_global_var_rw_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "zxic_comm_mutex_unlock"); + return DPP_OK; + } + + if (ebs == 0 || db_en == 0) { + /*关闭e桶整形*/ + rc = dpp_tm_shape_map_table_set(dev, (flow_id + DPP_ETM_Q_NUM), + 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, "dpp_tm_shape_map_table_set", + &g_dpp_tm_global_var_rw_mutex); + get_profile_success_flag_e = 1; + } + + /******STEP3:整形配置处理,先在现有模板中查找******/ + /* 单桶整形 :c桶 */ + if (ebs == 0) { + /* 此时只有cbs>0,仅开启c桶整形:先查找c桶整形profile配置 */ + rc = dpp_tm_shape_find_map_id(dev, flow_id, cir, cbs); + + if (rc) { + get_profile_success_flag_c = 1; + } else { + get_profile_success_flag_c = 0; + } + } + /* 双桶整形 :c+e桶 */ + else { + /* 此时cbs>0,ebs>0:先查找c桶整形profile配置 */ + rc = dpp_tm_shape_find_map_id(dev, flow_id, cir, cbs); + + if (rc) { + get_profile_success_flag_c = 1; + } else { + get_profile_success_flag_c = 0; + } + + /* 查找e桶整形profile配置 */ + rc = dpp_tm_shape_find_map_id(dev, flow_id + DPP_ETM_Q_NUM, eir, + ebs); + + if (rc) { + get_profile_success_flag_e = 1; + } else { + get_profile_success_flag_e = 0; + } + } + + /******STEP4:现有整形模板中未找到所需profile******/ + if (!get_profile_success_flag_c) { + /* 根据id计算归属哪张表:每2K对应一个128项表 */ + table_id = flow_id / 2048; + + for (i = 1; i < 128; i++) { + if (g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)] + [table_id][i] + .shape_num == 0) { + /**********映射模板id********/ + rc = dpp_tm_shape_map_table_set(dev, flow_id, + i); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, + "dpp_tm_shape_map_table_set", + &g_dpp_tm_global_var_rw_mutex); + + /*********整形参数配置********/ + total_para_id = table_id * 128 + i; + rc = dpp_tm_shape_para_set(dev, total_para_id, + cir, cbs); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, + "dpp_tm_shape_para_set", + &g_dpp_tm_global_var_rw_mutex); + + /********同步更新全局数组******/ + g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)] + [table_id][i] + .shape_cir = + cir; + g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)] + [table_id][i] + .shape_cbs = + cbs; + g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)] + [table_id][i] + .shape_num++; + + get_profile_success_flag_c = 1; + break; + } + } + } + + if (!get_profile_success_flag_e && ebs) { + /* 根据id计算归属哪张表:每2K对应一个128项表 */ + table_id = (flow_id + DPP_ETM_Q_NUM) / 2048; + + for (i = 1; i < 128; i++) { + if (g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)] + [table_id][i] + .shape_num == 0) { + /**********映射模板id********/ + rc = dpp_tm_shape_map_table_set( + dev, (flow_id + DPP_ETM_Q_NUM), i); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, + "dpp_tm_shape_map_table_set", + &g_dpp_tm_global_var_rw_mutex); + + /*********整形参数配置********/ + total_para_id = table_id * 128 + i; + rc = dpp_tm_shape_para_set(dev, total_para_id, + eir, ebs); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, + "dpp_tm_shape_para_set", + &g_dpp_tm_global_var_rw_mutex); + + /********同步更新全局数组******/ + g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)] + [table_id][i] + .shape_cir = + eir; + g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)] + [table_id][i] + .shape_cbs = + ebs; + g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)] + [table_id][i] + .shape_num++; + + get_profile_success_flag_e = 1; + break; + } + } + } + + if (!get_profile_success_flag_c || !get_profile_success_flag_e) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), "Failure! Profile resource are FULL!\n"); + rc = zxic_comm_mutex_unlock(&g_dpp_tm_global_var_rw_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "zxic_comm_mutex_unlock"); + + return DPP_ERR; + } + + rc = zxic_comm_mutex_unlock(&g_dpp_tm_global_var_rw_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "zxic_comm_mutex_unlock"); + + return DPP_OK; +} + +/***********************************************************/ +/** tm配置调度器整形参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param se_id 调度器编号号,用户看到:ETM 0-0x63FF, FTM 0-0x77F +* 实际:ETM:0x4800-0xABFF,FTM:0x1000-0x177F +* @param pir pir总速率,单位Kb,范围同cir +* @param pbs pbs总桶深,单位KB,范围同cbs +* @param db_en 整形模式,0-单桶,1-双桶,仅FQ8/WFQ8有效 +* @param cir [0-3]调度器cir速率,单位Kb,范围[64Kb - 160Gb] +* @param cbs [0-3]调度器cbs桶深,单位KB,范围[1KB - 64M] +* 注:cbs=0 表示关闭整形,即不限速 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_tm_shape_se_para_set(DPP_DEV_T *dev, ZXIC_UINT32 se_id, + ZXIC_UINT32 pir, ZXIC_UINT32 pbs, + ZXIC_UINT32 db_en, ZXIC_UINT32 cir, + ZXIC_UINT32 cbs) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + + rc = dpp_etm_shape_se_para_set(dev, se_id, pir, pbs, db_en, cir, cbs); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_etm_shape_se_para_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取调度单元整形参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param se_id 调度器单元号 ETM:0-63FF,FTM:0-77F +* @param cir cir速率,单位Kb,范围[64Kb - 160Gb] +* @param cbs cbs桶深,单位KB,范围[1KB - 64M] +* 注:cbs=0 表示关闭整形,即不限速 +* @param mode 整形模式,0-获取p桶参数,1-获取对应c桶参数(仅FQ8/WFQ8支持) +* @param p_para_id 整形模板索引:ETM=[0-AFF],FTM=[0-17F] +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_tm_shape_se_para_get(DPP_DEV_T *dev, ZXIC_UINT32 se_id, + ZXIC_UINT32 mode, ZXIC_UINT32 *p_para_id, + DPP_TM_SHAPE_PARA_TABLE *p_se_para_tbl) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 real_se_id = 0; + ZXIC_UINT32 se_id_c = 0; + ZXIC_UINT32 table_id = 0; + ZXIC_UINT32 profile_id = 0; + ZXIC_UINT32 bucket_para_n = 0; + ZXIC_UINT32 bucket_depth = + 0; /* 实际写入寄存器的桶深,为多少个调节单位 */ + ZXIC_UINT32 bucket_rate = + 0; /* 实际写入寄存器的速率,为每4096周期添加的字节数 */ + ZXIC_UINT32 token_grain = 0; /* 令牌桶调节档位 */ + ZXIC_UINT32 token_grain_kb[8] = { + 128, 64, 32, 16, 8, 4, 2, 1 + }; /* 档位对应值 */ + DPP_ETM_SHAP_BKT_PARA_TBL_T shap_para_tbl_t = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), mode, 0, 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_para_id); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_se_para_tbl); + + real_se_id = (se_id + DPP_ETM_SHAP_SEID_BASE); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), real_se_id, 0, + DPP_ETM_SCH_DEL_NUM); + + se_id_c = real_se_id + 4; + + table_id = real_se_id / 2048; + + /*获取调度器的profile_id*/ + if (mode) { + rc = dpp_tm_shape_map_table_get(dev, se_id_c, &profile_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_shape_map_table_get"); + } else { + rc = dpp_tm_shape_map_table_get(dev, real_se_id, &profile_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_shape_map_table_get"); + } + + /*从寄存器读取调度器配置参数*/ + bucket_para_n = table_id * 128 + profile_id; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), bucket_para_n, 0, + 0xAFF); + + rc = dpp_reg_read(dev, ETM_SHAP_BKT_PARA_TBLr, 0, bucket_para_n, + &shap_para_tbl_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + bucket_depth = shap_para_tbl_t.bucket_depth; + bucket_rate = shap_para_tbl_t.bucket_rate; + + /*数据转换处理*/ + rc = dpp_tm_shape_token_grain_get(dev, &token_grain); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_shape_token_grain_get"); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), token_grain, 0, 7); + + *p_para_id = bucket_para_n; + p_se_para_tbl->shape_cbs = bucket_depth * token_grain_kb[token_grain]; + p_se_para_tbl->shape_cir = (ZXIC_UINT64)bucket_rate * DPP_TM_SYS_HZ * + 8 / + ((ZXIC_UINT64)4096 * DPP_TM_KILO_ULL * 64); + + return DPP_OK; +} + +/***********************************************************/ +/** etm配置调度器整形参数 +* @param dev_id 设备编号 +* @param se_id 调度器编号号 ETM 0-0x63FF, FTM 0-0x77F +* 实际:ETM:0x4800-0xABFF,FTM:0x1000-0x177F +* @param pir pir总速率,单位Kb,范围同cir +* @param pbs pbs总桶深,单位Kb,范围同cbs +* @param db_en 整形模式,0-单桶,1-双桶,仅FQ8/WFQ8有效 +* @param cir [0-3]调度器cir速率,单位Kb,范围[64Kb - 160Gb] +* @param cbs [0-3]调度器cbs桶深,单位KB,范围[1KB - 64M] +* 注:cbs=0 表示关闭整形,即不限速 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_etm_shape_se_para_set(DPP_DEV_T *dev, ZXIC_UINT32 se_id, + ZXIC_UINT32 pir, ZXIC_UINT32 pbs, + ZXIC_UINT32 db_en, ZXIC_UINT32 cir, + ZXIC_UINT32 cbs) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 real_se_id = 0; + ZXIC_UINT32 sch_type = 0; + ZXIC_UINT32 sch_type_num = 0; + ZXIC_UINT32 table_id = 0; + ZXIC_UINT32 profile_id = 0; + ZXIC_UINT32 total_para_id = 0; + ZXIC_UINT32 get_profile_success_flag_p = + 0; /* 当前已配置表中是否找到需要的p桶整形模板 */ + ZXIC_UINT32 get_profile_success_flag_c = + 0; /* 当前已配置表中是否找到需要的c桶整形模板 */ + DPP_ETM_CRDT_SE_PARA_TBL_T crdt_se_para_tabl_t = { + 0 + }; /*配置cp双桶模式,仅FQ8/WFQ8使用*/ + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 2); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), se_id, 0, + DPP_ETM_FQSPWFQ_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT( + DEV_ID(dev), pir, DPP_TM_SHAPE_CIR_MIN, DPP_TM_SHAPE_CIR_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT( + DEV_ID(dev), pbs, DPP_TM_SHAPE_CBS_MIN, DPP_TM_SHAPE_CBS_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), db_en, 0, 1); + + real_se_id = (se_id + DPP_ETM_SHAP_SEID_BASE); + + if (db_en) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), cir, + DPP_TM_SHAPE_CIR_MIN, + DPP_TM_SHAPE_CIR_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), cbs, + DPP_TM_SHAPE_CBS_MIN, + DPP_TM_SHAPE_CBS_MAX); + } + + rc = dpp_tm_global_var_mutex_init(); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_global_var_mutex_init"); + + rc = zxic_comm_mutex_lock(&g_dpp_tm_global_var_rw_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "zxic_comm_mutex_lock"); + + rc = dpp_tm_crdt_sch_type_get(dev, se_id, &sch_type, &sch_type_num); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK(DEV_ID(dev), rc, + "dpp_tm_crdt_sch_type_get", + &g_dpp_tm_global_var_rw_mutex); + + /**双桶模式配置:crdt模块se_id不用转换**/ + if (sch_type == 8) { + for (i = 0; i < 8; i++) { + rc = dpp_reg_read(dev, ETM_CRDT_SE_PARA_TBLr, 0, + se_id + i, &crdt_se_para_tabl_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, "dpp_reg_read", + &g_dpp_tm_global_var_rw_mutex); + + crdt_se_para_tabl_t.cp_token_en = db_en; + + rc = dpp_reg_write(dev, ETM_CRDT_SE_PARA_TBLr, 0, + se_id + i, &crdt_se_para_tabl_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, "dpp_reg_write", + &g_dpp_tm_global_var_rw_mutex); + } + } + + /******STEP1:先解除原profile_id映射******/ + /**处理p桶**/ + rc = dpp_tm_shape_map_table_get(dev, real_se_id, &profile_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK(DEV_ID(dev), rc, + "dpp_tm_shape_map_table_get", + &g_dpp_tm_global_var_rw_mutex); + + if (profile_id > 0 && (profile_id < DPP_TM_SHAP_MAP_ID_MAX)) { + /***表示当前有配置整形: 根据id计算归属哪张表:每2K队列对应一个128项表***/ + table_id = real_se_id / 2048; + + if (g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)][table_id] + [profile_id] + .shape_num != 0) { + g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)][table_id] + [profile_id] + .shape_num--; + } + } + + if (sch_type == 8) { + /**处理c桶**/ + rc = dpp_tm_shape_map_table_get(dev, (real_se_id + 4), + &profile_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, "dpp_tm_shape_map_table_get", + &g_dpp_tm_global_var_rw_mutex); + + if (profile_id > 0 && (profile_id < DPP_TM_SHAP_MAP_ID_MAX)) { + /*表示当前有配置整形*/ + table_id = (real_se_id + 4) / 2048; + + if (g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)] + [table_id][profile_id] + .shape_num != 0) { + g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)] + [table_id][profile_id] + .shape_num--; + } + } + } + + /******STEP2:整形关闭的处理******/ + if (sch_type < 8) { + /**非FQ8/WFQ8类型调度器:无双桶模式**/ + if (pbs == 0) { + /*关闭p桶整形,并返回函数*/ + rc = dpp_tm_shape_map_table_set(dev, (real_se_id), 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, "dpp_tm_shape_map_table_set", + &g_dpp_tm_global_var_rw_mutex); + + rc = zxic_comm_mutex_unlock( + &g_dpp_tm_global_var_rw_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT( + DEV_ID(dev), rc, "zxic_comm_mutex_unlock"); + + return DPP_OK; + } + } else { + /**FQ8/WFQ8类型调度器:考虑双桶**/ + if (pbs == 0 && db_en == 0) { + /*单桶模式:关闭p桶整形,并返回函数*/ + rc = dpp_tm_shape_map_table_set(dev, (real_se_id), 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, "dpp_tm_shape_map_table_set", + &g_dpp_tm_global_var_rw_mutex); + + rc = zxic_comm_mutex_unlock( + &g_dpp_tm_global_var_rw_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT( + DEV_ID(dev), rc, "zxic_comm_mutex_unlock"); + + return DPP_OK; + } + + if (pbs == 0 && db_en == 1 && cbs == 0) { + /*双桶模式:关闭p+c桶整形,并返回函数*/ + rc = dpp_tm_shape_map_table_set(dev, (real_se_id), 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, "dpp_tm_shape_map_table_set", + &g_dpp_tm_global_var_rw_mutex); + rc = dpp_tm_shape_map_table_set(dev, (real_se_id + 4), + 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, "dpp_tm_shape_map_table_set", + &g_dpp_tm_global_var_rw_mutex); + + rc = zxic_comm_mutex_unlock( + &g_dpp_tm_global_var_rw_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT( + DEV_ID(dev), rc, "zxic_comm_mutex_unlock"); + + return DPP_OK; + } + } + + /******STEP3:整形配置处理,先在现有模板中查找******/ + /* 非FQ8/WFQ8类型:仅处理p桶,c桶flag直接至1 */ + if (sch_type < 8) { + /* 查找p桶整形profile配置 */ + rc = dpp_tm_shape_find_map_id(dev, real_se_id, pir, pbs); + + if (rc) { + get_profile_success_flag_p = 1; + } else { + get_profile_success_flag_p = 0; + } + + get_profile_success_flag_c = 1; + } else { + /* FQ8/WFQ8类型:p+c桶 */ + if (db_en == 0) { + /* 单桶模式:仅查找p桶整形profile配置 */ + rc = dpp_tm_shape_find_map_id(dev, real_se_id, pir, + pbs); + + if (rc) { + get_profile_success_flag_p = 1; + } else { + get_profile_success_flag_p = 0; + } + + get_profile_success_flag_c = 1; + } else { + /* 双桶模式:先查找p桶整形profile配置 */ + if (pbs == 0) { + rc = dpp_tm_shape_map_table_set(dev, real_se_id, + 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, + "dpp_tm_shape_map_table_set", + &g_dpp_tm_global_var_rw_mutex); + + get_profile_success_flag_p = 1; + } else { + rc = dpp_tm_shape_find_map_id(dev, real_se_id, + pir, pbs); + + if (rc) { + get_profile_success_flag_p = 1; + } else { + get_profile_success_flag_p = 0; + } + } + + /* 查找c桶整形profile配置 */ + if (cbs == 0) { + rc = dpp_tm_shape_map_table_set( + dev, (real_se_id + 4), 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, + "dpp_tm_shape_map_table_set", + &g_dpp_tm_global_var_rw_mutex); + + get_profile_success_flag_c = 1; + } else { + rc = dpp_tm_shape_find_map_id( + dev, (real_se_id + 4), cir, cbs); + + if (rc) { + get_profile_success_flag_c = 1; + } else { + get_profile_success_flag_c = 0; + } + } + } + } + + /******STEP4:现有整形模板中未找到所需profile******/ + if (!get_profile_success_flag_p) { + /* 根据id计算归属哪张表:每2K对应一个128项表 */ + table_id = real_se_id / 2048; + + for (i = 1; i < 128; i++) { + if (g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)] + [table_id][i] + .shape_num == 0) { + /**********映射模板id********/ + rc = dpp_tm_shape_map_table_set(dev, real_se_id, + i); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, + "dpp_tm_shape_map_table_set", + &g_dpp_tm_global_var_rw_mutex); + + /*********整形参数配置********/ + total_para_id = table_id * 128 + i; + rc = dpp_tm_shape_para_set(dev, total_para_id, + pir, pbs); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, + "dpp_tm_shape_para_set", + &g_dpp_tm_global_var_rw_mutex); + + /********同步更新全局数组******/ + g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)] + [table_id][i] + .shape_cir = + pir; + g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)] + [table_id][i] + .shape_cbs = + pbs; + g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)] + [table_id][i] + .shape_num++; + + get_profile_success_flag_p = 1; + break; + } + } + } + + if (!get_profile_success_flag_c) { + /* 根据id计算归属哪张表:每2K对应一个128项表 */ + table_id = (real_se_id + 4) / 2048; + + for (i = 1; i < 128; i++) { + if (g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)] + [table_id][i] + .shape_num == 0) { + /**********映射模板id********/ + rc = dpp_tm_shape_map_table_set( + dev, (real_se_id + 4), i); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, + "dpp_tm_shape_map_table_set", + &g_dpp_tm_global_var_rw_mutex); + + /*********整形参数配置********/ + total_para_id = table_id * 128 + i; + rc = dpp_tm_shape_para_set(dev, total_para_id, + cir, cbs); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK( + DEV_ID(dev), rc, + "dpp_tm_shape_para_set", + &g_dpp_tm_global_var_rw_mutex); + + /********同步更新全局数组******/ + g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)] + [table_id][i] + .shape_cir = + cir; + g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)] + [table_id][i] + .shape_cbs = + cbs; + g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)] + [table_id][i] + .shape_num++; + + get_profile_success_flag_c = 1; + break; + } + } + } + + if (!get_profile_success_flag_p || !get_profile_success_flag_c) { + ZXIC_COMM_TRACE_DEV_ERROR( + DEV_ID(dev), "Failure! Profile resource are FULL!\n"); + + rc = zxic_comm_mutex_unlock(&g_dpp_tm_global_var_rw_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "zxic_comm_mutex_unlock"); + return DPP_ERR; + } + + rc = zxic_comm_mutex_unlock(&g_dpp_tm_global_var_rw_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "zxic_comm_mutex_unlock"); + + return DPP_OK; +} + +/***********************************************************/ +/** 写入流/调度器整形参数配置表 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param total_para_id 整形参数表中模板索引id ETM:0-AFF,FTM:0-17F +* @param cir 整形速率(c/e桶统一) +* @param cbs 桶深 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_tm_shape_para_set(DPP_DEV_T *dev, ZXIC_UINT32 total_para_id, + ZXIC_UINT32 cir, ZXIC_UINT32 cbs) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 bucket_depth = + 0; /* 实际写入寄存器的桶深,为多少个调节单位 */ + ZXIC_UINT32 bucket_rate = + 0; /* 实际写入寄存器的速率,为每4096周期添加的字节数 */ + ZXIC_UINT32 token_grain = 0; /* 令牌桶调节档位 */ + ZXIC_UINT32 token_grain_kb[8] = { + 128, 64, 32, 16, 8, 4, 2, 1 + }; /* 档位对应值 */ + DPP_ETM_SHAP_BKT_PARA_TBL_T shap_para_tbl_t = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT( + DEV_ID(dev), cir, DPP_TM_SHAPE_CIR_MIN, DPP_TM_SHAPE_CIR_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT( + DEV_ID(dev), cbs, DPP_TM_SHAPE_CBS_MIN, DPP_TM_SHAPE_CBS_MAX); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), total_para_id, 0, + 0xAFF); + + /********* 数据转换处理:Begin ********/ + + rc = dpp_tm_shape_token_grain_get(dev, &token_grain); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_shape_token_grain_get"); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), token_grain, 0, 7); + + if (cbs < token_grain_kb[token_grain] && (cbs != 0)) { + bucket_depth = 1; /* 最小为1个桶深调节单位 */ + } else { + bucket_depth = cbs / token_grain_kb[token_grain]; + } + + /* 寄存器最大可写范围为[0-2047] */ + if (bucket_depth > DPP_TM_SHAPE_CBS_REG_MAX) { + bucket_depth = DPP_TM_SHAPE_CBS_REG_MAX; + } + + /* 平均每周期添加cir*1/64bit,即每4096周期添加的字节数 */ + //bucket_rate = (ZXIC_UINT64)4096 * cir * DPP_TM_KILO_ULL * 64 / (((ZXIC_UINT64)DPP_TM_SYS_HZ / (ZXIC_UINT64)13393) * 8); + bucket_rate = (ZXIC_UINT64)4096 * cir * DPP_TM_KILO_ULL * 64 / + ((ZXIC_UINT64)DPP_TM_SYS_HZ * 8); + shap_para_tbl_t.bucket_rate = bucket_rate; + shap_para_tbl_t.bucket_depth = bucket_depth; + + /******** 数据转换处理:End *********/ + + rc = dpp_reg_write(dev, ETM_SHAP_BKT_PARA_TBLr, 0, total_para_id, + &shap_para_tbl_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取流/调度器整形参数配置表 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param total_para_id 整形参数表中模板索引id ETM:0-AFF,FTM:0-17F +* @param cir 整形速率(c/e桶统一) +* @param cbs 桶深 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/03/11 +************************************************************/ +DPP_STATUS dpp_tm_shape_para_get(DPP_DEV_T *dev, ZXIC_UINT32 total_para_id, + DPP_TM_SHAPE_PARA_TABLE *p_shap_para_tbl) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 token_grain = 0; /* 令牌桶调节档位 */ + ZXIC_UINT32 token_grain_kb[8] = { + 128, 64, 32, 16, 8, 4, 2, 1 + }; /* 档位对应值 */ + DPP_ETM_SHAP_BKT_PARA_TBL_T shap_para_tbl_t = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_shap_para_tbl); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), total_para_id, 0, + 0xAFF); + + /*读取配置*/ + rc = dpp_reg_read(dev, ETM_SHAP_BKT_PARA_TBLr, 0, total_para_id, + &shap_para_tbl_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + /********* 数据转换处理: ********/ + rc = dpp_tm_shape_token_grain_get(dev, &token_grain); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_shape_token_grain_get"); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), token_grain, 0, 7); + + p_shap_para_tbl->shape_cbs = + shap_para_tbl_t.bucket_depth * token_grain_kb[token_grain]; + p_shap_para_tbl->shape_cir = (ZXIC_UINT64)shap_para_tbl_t.bucket_rate * + DPP_TM_SYS_HZ * 8 / + ((ZXIC_UINT64)4096 * DPP_TM_KILO_ULL * 64); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置端口级整形参数 更改整形转换公式 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param port_id 端口号 +* @param p_para 整形信息:CIR/CBS/EN +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/04/15 +************************************************************/ +DPP_STATUS dpp_tm_shape_pp_para_set(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + const DPP_TM_SHAPE_PP_PARA_T *p_para) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 cir = 0; /* 颗粒度的倍数 */ + ZXIC_UINT32 cbs = 0; /* Credit的倍数 */ + ZXIC_UINT32 qmu_credit_value = 0; + //ZXIC_FLOAT DPP_TM_SHAPE_CIR_STEP_TEST = 160.069565217 * 1000 * 1000 * 1000 / 0x3FFFFFE;/*测试使用 by xuhb*/ + + DPP_ETM_CRDT_PP_WEIGHT_RAM_T pp_weight = { 0 }; + DPP_ETM_CRDT_PP_CBS_SHAPE_EN_RAM_T pp_cbs_shape_en = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_para); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), port_id, 0, + DPP_TM_PP_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), p_para->c_en, 0, 1); + + if (p_para->c_en == 0) { + rc = dpp_reg_read(dev, ETM_CRDT_PP_CBS_SHAPE_EN_RAMr, 0, + port_id, &pp_cbs_shape_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_read"); + pp_cbs_shape_en.pp_c_shap_en = p_para->c_en; + rc = dpp_reg_write(dev, ETM_CRDT_PP_CBS_SHAPE_EN_RAMr, 0, + port_id, &pp_cbs_shape_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + } else { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), p_para->cir, + DPP_TM_SHAPE_CIR_MIN, + DPP_TM_SHAPE_CIR_MAX); + + /* 读取授权价值 */ + rc = dpp_tm_qmu_credit_value_get(dev, &qmu_credit_value); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_qmu_credit_value_get"); + + cir = p_para->cir; + //cir = (ZXIC_UINT32)(((ZXIC_UINT64)cir * DPP_TM_KILO_ULL) / DPP_TM_SHAPE_CIR_STEP)*(crdt_credit_value / qmu_credit_value); + cir = (ZXIC_UINT32)(((ZXIC_UINT64)cir * DPP_TM_KILO_ULL) / + DPP_TM_SHAPE_CIR_STEP); + + /* 解决160Gbps设置出错的问题add by cuiy at 2016-4-15 */ + if (cir > 0x3FFFFFE) { + cir = 0x3FFFFFE; + } + + /* 检查以kbyte为单位的CBS */ + if (qmu_credit_value != 0) { + cbs = p_para->cbs; + cbs = cbs * DPP_TM_KILO_UL / qmu_credit_value; + } + + /* 寄存器写入CBS的最小值为20,小于该值时,整形不准 */ + if (cbs < DPP_TM_SHAPE_DEFAULT_CBS) { + cbs = DPP_TM_SHAPE_DEFAULT_CBS; + } + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT( + DEV_ID(dev), cbs, DPP_TM_SHAPE_DEFAULT_CBS, 0x1ffff); + + pp_cbs_shape_en.pp_cbs = cbs; + pp_weight.pp_c_weight = cir; + pp_cbs_shape_en.pp_c_shap_en = p_para->c_en; + + rc = dpp_reg_write(dev, ETM_CRDT_PP_WEIGHT_RAMr, 0, port_id, + &pp_weight); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + + rc = dpp_reg_write(dev, ETM_CRDT_PP_CBS_SHAPE_EN_RAMr, 0, + port_id, &pp_cbs_shape_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_reg_write"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 读取端口级整形参数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param port_id 端口号 +* @param p_para 整形信息:CIR/CBS/EN +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/04/15 +************************************************************/ +DPP_STATUS dpp_tm_shape_pp_para_get(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + DPP_TM_SHAPE_PP_PARA_T *p_para) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 cbs = 0; /* Credit的倍数 */ + ZXIC_UINT32 cir = 0; /* 颗粒度的倍数 */ + ZXIC_UINT32 qmu_credit_value = 0; + + DPP_ETM_CRDT_PP_WEIGHT_RAM_T pp_weight = { 0 }; + DPP_ETM_CRDT_PP_CBS_SHAPE_EN_RAM_T pp_cbs_shape_en = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_para); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), port_id, 0, + DPP_TM_PP_NUM - 1); + + /* 读取授权价值 */ + rc = dpp_tm_qmu_credit_value_get(dev, &qmu_credit_value); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_qmu_credit_value_get"); + + rc = dpp_reg_read(dev, ETM_CRDT_PP_WEIGHT_RAMr, 0, port_id, &pp_weight); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev, ETM_CRDT_PP_CBS_SHAPE_EN_RAMr, 0, port_id, + &pp_cbs_shape_en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, "dpp_reg_read"); + + cbs = pp_cbs_shape_en.pp_cbs; + cir = pp_weight.pp_c_weight; + p_para->c_en = pp_cbs_shape_en.pp_c_shap_en; + p_para->cir = (ZXIC_UINT32)((ZXIC_UINT64)cir * DPP_TM_SHAPE_CIR_STEP / + DPP_TM_KILO_ULL); + p_para->cbs = (cbs * qmu_credit_value / DPP_TM_KILO_UL); + + return DPP_OK; +} + +/***********************************************************/ +/** 写入端口级整形信息 +* @param tm_type 0-ETM,1-FTM +* @param port_id 端口号0-63 +* @param cir 单位Kb +* @param cbs 单位KB +* @param c_en c桶使能 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/03 +************************************************************/ +DPP_STATUS dpp_tm_shape_pp_para_wr(DPP_DEV_T *dev, ZXIC_UINT32 port_id, + ZXIC_UINT32 cir, ZXIC_UINT32 cbs, + ZXIC_UINT32 c_en) +{ + DPP_STATUS rc = DPP_OK; + DPP_TM_SHAPE_PP_PARA_T para = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), port_id, 0, + DPP_TM_PP_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), c_en, 0, 1); + + para.cir = cir; + para.cbs = cbs; + para.c_en = c_en; + + rc = dpp_tm_shape_pp_para_set(dev, port_id, ¶); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_shape_pp_para_set"); + + return DPP_OK; +} + +#if 0 +/***********************************************************/ +/** 配置第0~15个被统计得到令牌个数的端口号 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param port_id 被统计得到令牌个数的端口号 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/3/15 STM模式下使用 +************************************************************/ +DPP_STATUS dpp_tm_shape_token_pp_cfg(ZXIC_UINT32 dev_id, + ZXIC_UINT32 port_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 tmp_port = 0; + ZXIC_UINT32 i = 0; + DPP_ETM_CRDT_Q_TOKEN_STAUE_CFG_T crdt_q_token_staue_cfg = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, port_id, 0, 63); + + tmp_port = port_id; + + if (tmp_port > 48) { + tmp_port = 48; + } + + for (i = 0; i < 16; i++) { + crdt_q_token_staue_cfg.test_token_q_id = tmp_port + i; + rc = dpp_reg_write(dev_id, + ETM_CRDT_Q_TOKEN_STAUE_CFGr, + 0, + i, + &crdt_q_token_staue_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 打印被指定统计的第0~15个端口消耗的令牌个数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM + +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 统计时间为2s,其中c桶统计1s,e桶统计1s +* @see +* @author whuashan @date 2019/03/15 +************************************************************/ +DPP_STATUS dpp_tm_shape_token_dec_cnt_diag(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 dec_num[16] = {0}; + ZXIC_UINT32 credit_value = 0; + ZXIC_UINT32 port_num = 0; + ZXIC_FLOAT traffic_amplified = 0.0; + ZXIC_UINT32 shape_token_cycle_reg_index = 0; + ZXIC_UINT32 shape_q_token_sta_cfg_reg_index = 0; + ZXIC_UINT32 shape_test_token_calc_ctrl_reg_index = 0; + ZXIC_UINT32 shape_q_token_dec_cnt_reg_index = 0; + + DPP_TM_WORK_MODE_E sa_work_mode = 0; + DPP_TM_CNT_MODE_T que_get_mode = {0}; + DPP_ETM_CRDT_TEST_TOKEN_CALC_CTRL_T shap_test_token_calc_ctrl = {0}; + DPP_ETM_CRDT_TEST_TOKEN_SAMPLE_CYCLE_NUM_T test_token_sample_cycle_num = {0}; + DPP_ETM_CRDT_Q_TOKEN_STAUE_CFG_T q_token_staue_cfg = {0}; + DPP_ETM_CRDT_Q_TOKEN_DEC_CNT_T q_token_dec_cnt = {0}; + + shape_token_cycle_reg_index = ETM_CRDT_TEST_TOKEN_SAMPLE_CYCLE_NUMr; + shape_q_token_sta_cfg_reg_index = ETM_CRDT_Q_TOKEN_STAUE_CFGr; + shape_test_token_calc_ctrl_reg_index = ETM_CRDT_TEST_TOKEN_CALC_CTRLr; + shape_q_token_dec_cnt_reg_index = ETM_CRDT_Q_TOKEN_DEC_CNTr; + + /* 配置寄存器为读清模式 */ + rc = dpp_tm_cfgmt_cnt_mode_get(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_get"); + que_get_mode.count_rd_mode = 1; + + rc = dpp_tm_cfgmt_cnt_mode_set(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_set"); + + /* 获取端口号 */ + rc = dpp_reg_read(dev_id, + shape_q_token_sta_cfg_reg_index, + 0, + 0, + &q_token_staue_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + port_num = q_token_staue_cfg.test_token_q_id; + + /* 配置统计的时间,单位是令牌下发周期,令牌下发频率为600M/32 */ + test_token_sample_cycle_num.sample_cycle_num = 18750000; + rc = dpp_reg_write(dev_id, + shape_token_cycle_reg_index, + 0, + 0, + &test_token_sample_cycle_num); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* 启动统计功能 */ + shap_test_token_calc_ctrl.test_token_calc_trigger = 1; + rc = dpp_reg_write(dev_id, + shape_test_token_calc_ctrl_reg_index, + 0, + 0, + &shap_test_token_calc_ctrl); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + /* 等待统计完成 */ + rc = dpp_reg_read(dev_id, + shape_test_token_calc_ctrl_reg_index, + 0, + 0, + &shap_test_token_calc_ctrl); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + while (!shap_test_token_calc_ctrl.test_token_calc_state) { + rc = dpp_reg_read(dev_id, + shape_test_token_calc_ctrl_reg_index, + 0, + 0, + &shap_test_token_calc_ctrl); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + } + + /* 关闭统计功能 */ + shap_test_token_calc_ctrl.test_token_calc_trigger = 0; + rc = dpp_reg_write(dev_id, + shape_test_token_calc_ctrl_reg_index, + 0, + 0, + &shap_test_token_calc_ctrl); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* 读取计数器的计数 */ + for (i = 0; i < 16; i++) { + rc = dpp_reg_read(dev_id, + shape_q_token_dec_cnt_reg_index, + 0, + i, + &q_token_dec_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + dec_num[i] = q_token_dec_cnt.q_token_dec_counter; + } + + rc = dpp_tm_cfgmt_sa_work_mode_get(dev_id, &sa_work_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_sa_work_mode_get"); + + if (sa_work_mode == 0) { + rc = dpp_tm_qmu_credit_value_get(dev_id, &credit_value); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_credit_value_get"); + } else if (1 == sa_work_mode) { + rc = dpp_tm_qmu_sa_credit_value_get(dev_id, 4, &credit_value); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_sa_credit_value_get"); + } else { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "SA work mode error\n"); + return DPP_ERR; + } + + /* 计算各级消耗令牌速率:cnt*8*credit_value/(2*cycle_num*32/600M) */ + traffic_amplified = (ZXIC_FLOAT)(credit_value) * (ZXIC_FLOAT)(8.0) / ((ZXIC_FLOAT)(2.0) * (ZXIC_FLOAT)(18750000.0)); + + for (i = 0; i < 16; i++) { + ZXIC_COMM_PRINT("pp_%d: traffic = %.6f.(M)\n", (port_num + i), (ZXIC_FLOAT)(dec_num[i]) * traffic_amplified); + } + + /* 配置寄存器为不读清模式 */ + rc = dpp_tm_cfgmt_cnt_mode_get(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_get"); + + que_get_mode.count_rd_mode = 0; + rc = dpp_tm_cfgmt_cnt_mode_set(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** 打印被指定统计的第0~15个端口接收的令牌个数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 统计时间为2s,其中c桶统计1s,e桶统计1s +* @see +* @author whuashan @date 2019/03/15 +************************************************************/ +DPP_STATUS dpp_tm_shape_token_dist_cnt_diag(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 dist_num[16] = {0}; + ZXIC_UINT32 credit_value = 0; + ZXIC_UINT32 port_num = 0; + ZXIC_FLOAT traffic_amplified = 0.0; + ZXIC_UINT32 shape_token_cycle_reg_index = 0; + ZXIC_UINT32 shape_q_token_sta_cfg_reg_index = 0; + ZXIC_UINT32 shape_test_token_calc_ctrl_reg_index = 0; + ZXIC_UINT32 shape_q_token_dist_cnt_reg_index = 0; + DPP_TM_WORK_MODE_E sa_work_mode = 0; + DPP_TM_CNT_MODE_T que_get_mode = {0}; + DPP_ETM_CRDT_TEST_TOKEN_CALC_CTRL_T shap_test_token_calc_ctrl = {0}; + DPP_ETM_CRDT_TEST_TOKEN_SAMPLE_CYCLE_NUM_T test_token_sample_cycle_num = {0}; + DPP_ETM_CRDT_Q_TOKEN_STAUE_CFG_T q_token_staue_cfg = {0}; + DPP_ETM_CRDT_Q_TOKEN_DIST_CNT_T q_token_dist_cnt = {0}; + + shape_token_cycle_reg_index = ETM_CRDT_TEST_TOKEN_SAMPLE_CYCLE_NUMr; + shape_q_token_sta_cfg_reg_index = ETM_CRDT_Q_TOKEN_STAUE_CFGr; + shape_test_token_calc_ctrl_reg_index = ETM_CRDT_TEST_TOKEN_CALC_CTRLr; + shape_q_token_dist_cnt_reg_index = ETM_CRDT_Q_TOKEN_DIST_CNTr; + + /* 配置寄存器为读清模式 */ + rc = dpp_tm_cfgmt_cnt_mode_get(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_get"); + que_get_mode.count_rd_mode = 1; + + rc = dpp_tm_cfgmt_cnt_mode_set(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_set"); + + /* 获取端口号 */ + rc = dpp_reg_read(dev_id, + shape_q_token_sta_cfg_reg_index, + 0, + 0, + &q_token_staue_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + port_num = q_token_staue_cfg.test_token_q_id; + + /* 配置统计的时间,单位是令牌下发周期,令牌下发频率为600M/32 */ + test_token_sample_cycle_num.sample_cycle_num = 18750000; + rc = dpp_reg_write(dev_id, + shape_token_cycle_reg_index, + 0, + 0, + &test_token_sample_cycle_num); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* 启动统计功能 */ + shap_test_token_calc_ctrl.test_token_calc_trigger = 1; + rc = dpp_reg_write(dev_id, + shape_test_token_calc_ctrl_reg_index, + 0, + 0, + &shap_test_token_calc_ctrl); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + /* 等待统计完成 */ + rc = dpp_reg_read(dev_id, + shape_test_token_calc_ctrl_reg_index, + 0, + 0, + &shap_test_token_calc_ctrl); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + while (!shap_test_token_calc_ctrl.test_token_calc_state) { + rc = dpp_reg_read(dev_id, + shape_test_token_calc_ctrl_reg_index, + 0, + 0, + &shap_test_token_calc_ctrl); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + } + + /* 关闭统计功能 */ + shap_test_token_calc_ctrl.test_token_calc_trigger = 0; + rc = dpp_reg_write(dev_id, + shape_test_token_calc_ctrl_reg_index, + 0, + 0, + &shap_test_token_calc_ctrl); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + /* 读取c桶计数器的计数 */ + for (i = 0; i < 16; i++) { + rc = dpp_reg_read(dev_id, + shape_q_token_dist_cnt_reg_index, + 0, + i, + &q_token_dist_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + dist_num[i] = q_token_dist_cnt.q_token_dist_counter; + } + + rc = dpp_tm_qmu_credit_value_get(dev_id, &credit_value); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_credit_value_get"); + rc = dpp_tm_cfgmt_sa_work_mode_get(dev_id, &sa_work_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_sa_work_mode_get"); + + if (sa_work_mode == 0) { + rc = dpp_tm_qmu_credit_value_get(dev_id, &credit_value); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_credit_value_get"); + } else if (1 == sa_work_mode) { + rc = dpp_tm_qmu_sa_credit_value_get(dev_id, 4, &credit_value); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_sa_credit_value_get"); + } else { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "SA work mode error\n"); + return DPP_ERR; + } + + /* 计算各级消耗令牌速率:cnt*8*credit_value/(2*cycle_num*32/600M) */ + traffic_amplified = (ZXIC_FLOAT)(credit_value) * (ZXIC_FLOAT)(8.0) / ((ZXIC_FLOAT)(2.0) * (ZXIC_FLOAT)(18750000.0)); + + for (i = 0; i < 16; i++) { + ZXIC_COMM_PRINT("pp_%d: traffic = %.6f.(M)\n", (port_num + i), (ZXIC_FLOAT)(dist_num[i]) * traffic_amplified); + } + + /* 配置寄存器为不读清模式 */ + rc = dpp_tm_cfgmt_cnt_mode_get(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_get"); + que_get_mode.count_rd_mode = 0; + rc = dpp_tm_cfgmt_cnt_mode_set(dev_id, &que_get_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** 打印指定的全局数组值以及清空全局数组 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param para_x 数组index_x +* @param para_y 数组index_y +* @param clear_flag 清空shape全局数组 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark +* @see +* @author xuhb @date 2019/06/10 +************************************************************/ +DPP_STATUS dpp_tm_shape_para_array_prt(ZXIC_UINT32 dev_id, ZXIC_UINT32 para_x, ZXIC_UINT32 para_y, ZXIC_UINT32 clear_flag) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 2); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, para_x, 0, 21); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, para_y, 0, 127); + ZXIC_COMM_PRINT("cir(kb):%d cbs(KB):%d num:%d\n", g_dpp_etm_shape_para_table[dev_id][para_x][para_y].shape_cir, + g_dpp_etm_shape_para_table[dev_id][para_x][para_y].shape_cbs, + g_dpp_etm_shape_para_table[dev_id][para_x][para_y].shape_num); + + if (clear_flag) { + rc = dpp_tm_clr_shape_para(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_clr_shape_para"); + } + + return DPP_OK; +} + +#endif +/***********************************************************/ +/** 获取全局数组中用户实际配置的整形值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param flow_id 流队列号 ETM:0-9215,FTM:0-2047 +* @param cir cir速率,单位Kb,范围[64Kb - 160Gb] +* @param cbs cbs桶深,单位KB,范围[1KB - 64M] +* 注:cbs=0 表示关闭整形,即不限速 +* @param mode_e 整形模式,0-获取c桶参数,1-获取对应e桶参数 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark +* @see +* @author xuhb @date 2020/09/22 +************************************************************/ +DPP_STATUS +dpp_tm_shape_flow_para_array_get(DPP_DEV_T *dev, ZXIC_UINT32 flow_id, + ZXIC_UINT32 mode, + DPP_TM_SHAPE_PARA_TABLE *p_flow_para_tbl) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 flow_id_e = 0; + ZXIC_UINT32 table_id = 0; + ZXIC_UINT32 profile_id = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), mode, 0, 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_flow_para_tbl); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), flow_id, 0, + DPP_ETM_Q_NUM - 1); + + flow_id_e = flow_id + DPP_ETM_Q_NUM; + + table_id = flow_id / 2048; + + /*获取流的profile_id*/ + if (mode) { + table_id = flow_id_e / 2048; + rc = dpp_tm_shape_map_table_get(dev, flow_id_e, &profile_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_shape_map_table_get"); + } else { + rc = dpp_tm_shape_map_table_get(dev, flow_id, &profile_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_shape_map_table_get"); + } + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), profile_id, 0, + DPP_TM_SHAP_MAP_ID_MAX - 1); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), table_id, 0, + DPP_ETM_SHAP_TABEL_ID_MAX - 1); + p_flow_para_tbl->shape_cbs = + g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)][table_id] + [profile_id] + .shape_cbs; + p_flow_para_tbl->shape_cir = + g_dpp_etm_shape_para_table[DEV_PCIE_SLOT(dev)][table_id] + [profile_id] + .shape_cir; + + return DPP_OK; +} + +#if 0 +/***********************************************************/ +/** 配置shap模块中 crd_grain授权价值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param credit_value 授权价值,默认值是0x5feByte +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/08/13 +************************************************************/ +DPP_STATUS dpp_tm_shap_crd_grain_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 credit_value) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_SHAP_CRD_GRAIN_T credit_val = {0}; + + credit_val.crd_grain = credit_value; + rc = dpp_reg_write(dev_id, + ETM_SHAP_CRD_GRAINr, + 0, + 0, + &credit_val); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +#endif +#endif + +#if ZXIC_REAL("TM_EXTEND_API") + +#if 0 +/***********************************************************/ +/** 打印队列空标志查询 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param qnum 配置的队列号 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author cy @date 2016/06/20 +************************************************************/ +DPP_STATUS dpp_tm_qlist_ept_flag_get_diag(ZXIC_UINT32 dev_id, ZXIC_UINT32 qnum) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 value = 0; + + rc = dpp_tm_qlist_ept_flag_get(dev_id, qnum, &value); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qlist_ept_flag_get"); + ZXIC_COMM_PRINT("qlist_ept_flag is %d\n", value); + + return DPP_OK; +} + +/***********************************************************/ +/** 打印队列深度计数 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param qnum 配置的队列号 +* @param p_value 队列深度计数 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author cy @date 2016/06/20 +************************************************************/ +DPP_STATUS dpp_tm_qlist_r_bcnt_get_diag(ZXIC_UINT32 dev_id, ZXIC_UINT32 qnum) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 value = 0; + + rc = dpp_tm_qlist_r_bcnt_get(dev_id, qnum, &value); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qlist_r_bcnt_get"); + ZXIC_COMM_PRINT("qlist_r_bcnt is 0x%x\n", value); + + return DPP_OK; +} + +/***********************************************************/ +/** CMDSCH中分端口分优先级的BLOCK计数 +* @param dev_id 设备编号 +* @param pri +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author sun @date 2023/09/19 +************************************************************/ +DPP_STATUS dpp_tm_csch_r_block_cnt_get(ZXIC_UINT32 dev_id, + ZXIC_UINT32 pri, + ZXIC_UINT32 *p_value) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_CSCH_R_BLOCK_CNT_T csch_r_block_cnt = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_value); + + rc = dpp_reg_read(dev_id, + ETM_QMU_CSCH_R_BLOCK_CNTr, + 0, + pri, + &csch_r_block_cnt); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + *p_value = csch_r_block_cnt.csch_r_block_cnt; + + return DPP_OK; +} + +/***********************************************************/ +/** 打印CMDSCH中分端口分优先级的BLOCK计数 +* @param dev_id 设备编号 +* @param port +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author sun @date 2023/09/19 +************************************************************/ +DPP_STATUS dpp_tm_csch_r_block_cnt_diag(ZXIC_UINT32 dev_id, ZXIC_UINT32 port) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 pri = 0; + ZXIC_UINT32 pri_th = 0; + + for (int i = 0; i < 8; ++i) { + pri = port * 8 + i; + rc = dpp_tm_csch_r_block_cnt_get(dev_id, pri, &pri_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_csch_r_block_cnt_get"); + ZXIC_COMM_PRINT("csch_r_block_pri 0x%x value is 0x%x\n", pri, pri_th); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 打印队列入链状态 +* @param dev_id 设备编号 +* @param flow_id +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author sun @date 2023/09/19 +************************************************************/ +DPP_STATUS dpp_tm_crdt_flow_link_state_get_diag(ZXIC_UINT32 dev_id, ZXIC_UINT32 flow_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 link_status = 0; + DPP_ETM_CRDT_FLOWQUE_INS_TBL_T crdt_flow_ins_tbl_t = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, flow_id, 0, DPP_ETM_CRDT_NUM); + rc = dpp_tm_crdt_flow_link_state_get(dev_id, flow_id, &crdt_flow_ins_tbl_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_crdt_flow_link_state_get"); + + link_status = crdt_flow_ins_tbl_t.flowque_ins; + ZXIC_COMM_PRINT("flowque_ins_tbl flow 0x%x linkstatus is 0x%x\n", flow_id, link_status); + + return DPP_OK; +} + +/***********************************************************/ +/** 打印调度器入链状态 +* @param dev_id 设备编号 +* @param flow_id +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author sun @date 2023/09/19 +************************************************************/ +DPP_STATUS dpp_tm_crdt_se_link_state_get_diag(ZXIC_UINT32 dev_id, ZXIC_UINT32 se_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 link_status = 0; + DPP_ETM_CRDT_SE_INS_TBL_T crdt_se_ins_tbl_t = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, se_id, 0, DPP_ETM_FQSPWFQ_NUM - 1); + rc = dpp_tm_crdt_se_link_state_get(dev_id, se_id, &crdt_se_ins_tbl_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_crdt_se_link_state_get"); + + link_status = crdt_se_ins_tbl_t.se_ins_flag; + ZXIC_COMM_PRINT("flowque_ins_tbl se_id 0x%x linkstatus is 0x%x\n", se_id, link_status); + + return DPP_OK; +} + +/***********************************************************/ +/** 打印olif的fifo是否空状态 +* @param dev_id 设备编号 +* @param +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author sun @date 2023/09/19 +************************************************************/ +DPP_STATUS dpp_tm_olif_fifo_empty_state_get_diag(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 qmu_para_fifo_empty = 0; + ZXIC_UINT32 emem_empty = 0; + ZXIC_UINT32 imem_empty = 0; + DPP_ETM_OLIF_OLIF_FIFO_EMPTY_STATE_T empty_status = {0}; + + rc = dpp_reg_read(dev_id, + ETM_OLIF_OLIF_FIFO_EMPTY_STATEr, + 0, + 0, + &empty_status); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + qmu_para_fifo_empty = empty_status.qmu_para_fifo_empty; + emem_empty = empty_status.emem_empty; + imem_empty = empty_status.imem_empty; + ZXIC_COMM_PRINT("qmu_para_fifo_empty 0x%x, emem_empty 0x%x, imem_empty 0x%x\n", qmu_para_fifo_empty, emem_empty, imem_empty); + + return DPP_OK; +} + +/***********************************************************/ +/** +* @param dev_id +* @param tm_type +* @param que_id +* +* @return +* @remark 无 +* @see +* @author XXX @date 2019/05/08 +************************************************************/ +DPP_STATUS dpp_tm_crdt_eir_crs_filter_en_get_diag(ZXIC_UINT32 dev_id, ZXIC_UINT32 que_id) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CRDT_EIR_CRS_FILTER_TBL_T eir_crs_filter = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, que_id, 0, DPP_ETM_Q_NUM - 1); + + rc = dpp_reg_read(dev_id, + ETM_CRDT_EIR_CRS_FILTER_TBLr, + 0, + que_id, + &eir_crs_filter); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + ZXIC_COMM_PRINT("Tm type:%d [0:ftm 1:etm] eir_crs_filter_en:%d [1:enable 0:disable]", eir_crs_filter.eir_crs_filter); + + return rc; +} + + +/***********************************************************/ +/** 读取指定队列获得授权个数(只打印授权非零的队列号) +* @param dev_id 设备编号 +* @param ackflow_start 授权起始流号 +* @param ackflow_end 授权终止流号 +* @param sleep_time_ms 等待时间 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2017/01/19 +************************************************************/ +DPP_STATUS dpp_etm_crdt_traffic_diag(ZXIC_UINT32 dev_id, + ZXIC_UINT32 ackflow_start, + ZXIC_UINT32 ackflow_end, + ZXIC_UINT32 sleep_time_ms) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 ackflow_id = 0; + ZXIC_UINT32 credit_value = 0; + ZXIC_FLOAT traffic_amplified = 0.0; + ZXIC_FLOAT flow_spec_traffic = 0.0; + + DPP_ETM_CRDT_STAT_QUE_CREDIT_T que_credit = {0}; + DPP_TM_WORK_MODE_E sa_work_mode = {0}; + DPP_ETM_CRDT_STAT_QUE_ID_0_T stat_que_id_0 = {0}; + + if (ackflow_start > DPP_ETM_Q_NUM - 1) { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "ackflow_start is out of range!!!\n"); + return DPP_ERR; + } + + if (ackflow_end > DPP_ETM_Q_NUM - 1) { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "ackflow_end is out of range!!!\n"); + return DPP_ERR; + } + + rc = dpp_tm_cfgmt_sa_work_mode_get(dev_id, &sa_work_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_sa_work_mode_get"); + + if (sa_work_mode == 0) { + rc = dpp_tm_qmu_credit_value_get(dev_id, &credit_value); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_credit_value_get"); + } else if (1 == sa_work_mode) { + rc = dpp_tm_qmu_sa_credit_value_get(dev_id, 4, &credit_value); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_sa_credit_value_get"); + } else { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "SA work mode error\n"); + return DPP_ERR; + } + + traffic_amplified = ((ZXIC_FLOAT)sleep_time_ms * (ZXIC_FLOAT)(1000000.0)) / ((ZXIC_FLOAT)(8.0) * (ZXIC_FLOAT)(credit_value)); + + for (ackflow_id = ackflow_start; ackflow_id <= ackflow_end; ackflow_id += 16) { + /* 配置16条授权流 */ + for (index = 0; index < 16; index++) { + stat_que_id_0.stat_que_id_0 = ackflow_id + index; + rc = dpp_reg_write(dev_id, + ETM_CRDT_STAT_QUE_ID_0r + index, + 0, + 0, + &stat_que_id_0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + } + + /* 授权个数统计计数器清零 */ + rc = dpp_tm_crdt_clr_diag(0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_crdt_clr_diag"); + + zxic_comm_sleep(sleep_time_ms); + /* 不使能CRDT */ + rc = dpp_tm_crdt_credit_en_set(dev_id, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_crdt_credit_en_set"); + + for (index = 0; index < 16; index++) { + rc = dpp_reg_read(dev_id, + ETM_CRDT_STAT_QUE_ID_0r + index, + 0, + 0, + &stat_que_id_0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "DPP_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_CRDT_STAT_QUE_CREDITr, + 0, + index, + &que_credit); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "DPP_reg_read"); + + flow_spec_traffic = (ZXIC_FLOAT)(que_credit.stat_que_credit_cnt) / traffic_amplified; + + if (que_credit.stat_que_credit_cnt != 0) { + ZXIC_COMM_PRINT("flow_0x%04x(%5d): ", (stat_que_id_0.stat_que_id_0), (stat_que_id_0.stat_que_id_0)); + ZXIC_COMM_PRINT("ack_cnt = 0x%08x, ", que_credit.stat_que_credit_cnt); + ZXIC_COMM_PRINT("traffic = %.6f.(G)\n", flow_spec_traffic); + } + } + } + + /* 使能CRDT */ + rc = dpp_tm_crdt_credit_en_set(dev_id, 1); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_crdt_credit_en_set"); + + return DPP_OK; +} + + +/***********************************************************/ +/** 读取QMU所有队列的统计信息 +* @param dev_id 设备编号 +* @param p_para 获得的统计信息 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/04 +************************************************************/ +DPP_STATUS dpp_tm_qmu_stat_get(ZXIC_UINT32 dev_id, DPP_ETM_QMU_STAT_INFO_T *p_para) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_FC_CNT_MODE_T fc_cnt_mode_reg = {0}; + DPP_ETM_QMU_MMU_QMU_WR_FC_CNT_T mmu_qmu_wr_fc_cnt_reg = {0}; + DPP_ETM_QMU_MMU_QMU_RD_FC_CNT_T mmu_qmu_rd_fc_cnt_reg = {0}; + DPP_ETM_QMU_QMU_CGAVD_FC_CNT_T qmu_cgavd_fc_cnt_reg = {0}; + DPP_ETM_QMU_CGAVD_QMU_PKT_CNT_T cgavd_qmu_pkt_cnt_reg = {0}; + DPP_ETM_QMU_CGAVD_QMU_PKTLEN_ALL_T cgavd_qmu_pktlen_all_reg = {0}; + DPP_ETM_QMU_LAST_DROP_QNUM_GET_T last_drop_qnum_reg = {0}; + DPP_ETM_QMU_CRDT_QMU_CREDIT_CNT_T crdt_qmu_credit_cnt_reg = {0}; + DPP_ETM_QMU_QMU_TO_QSCH_REPORT_CNT_T qmu_to_qsch_report_cnt_reg = {0}; + DPP_ETM_QMU_QMU_TO_CGAVD_REPORT_CNT_T qmu_to_cgavd_report_cnt_reg = {0}; + DPP_ETM_QMU_QMU_CRDT_CRS_NORMAL_CNT_T qmu_crdt_crs_normal_cnt_reg = {0}; + DPP_ETM_QMU_QMU_CRDT_CRS_OFF_CNT_T qmu_crdt_crs_off_cnt_reg = {0}; + DPP_ETM_QMU_QSCH_QLIST_SHEDULE_CNT_T qsch_qlist_shedule_cnt_reg = {0}; + DPP_ETM_QMU_QSCH_QLIST_SCH_EPT_CNT_T qsch_qlist_sch_ept_cnt_reg = {0}; + DPP_ETM_QMU_QMU_TO_MMU_BLK_WR_CNT_T qmu_to_mmu_blk_wr_cnt_reg = {0}; + DPP_ETM_QMU_QMU_TO_CSW_BLK_RD_CNT_T qmu_to_csw_blk_rd_cnt_reg = {0}; + DPP_ETM_QMU_QMU_TO_MMU_SOP_WR_CNT_T qmu_to_mmu_sop_wr_cnt_reg = {0}; + DPP_ETM_QMU_QMU_TO_MMU_EOP_WR_CNT_T qmu_to_mmu_eop_wr_cnt_reg = {0}; + DPP_ETM_QMU_QMU_TO_MMU_DROP_WR_CNT_T qmu_to_mmu_drop_wr_cnt_reg = {0}; + DPP_ETM_QMU_QMU_TO_CSW_SOP_RD_CNT_T qmu_to_csw_sop_rd_cnt_reg = {0}; + DPP_ETM_QMU_QMU_TO_CSW_EOP_RD_CNT_T qmu_to_csw_eop_rd_cnt_reg = {0}; + DPP_ETM_QMU_QMU_TO_CSW_DROP_RD_CNT_T qmu_to_csw_drop_rd_cnt_reg = {0}; + DPP_ETM_QMU_MMU_TO_QMU_WR_RELEASE_CNT_T mmu_to_qmu_wr_release_cnt_reg = {0}; + DPP_ETM_QMU_MMU_TO_QMU_RD_RELEASE_CNT_T mmu_to_qmu_rd_release_cnt_reg = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_para); + + rc = dpp_reg_read(dev_id, + ETM_QMU_FC_CNT_MODEr, + 0, + 0, + &fc_cnt_mode_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_MMU_QMU_WR_FC_CNTr, + 0, + 0, + &mmu_qmu_wr_fc_cnt_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_MMU_QMU_RD_FC_CNTr, + 0, + 0, + &mmu_qmu_rd_fc_cnt_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_QMU_CGAVD_FC_CNTr, + 0, + 0, + &qmu_cgavd_fc_cnt_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_CGAVD_QMU_PKT_CNTr, + 0, + 0, + &cgavd_qmu_pkt_cnt_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_CGAVD_QMU_PKTLEN_ALLr, + 0, + 0, + &cgavd_qmu_pktlen_all_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_LAST_DROP_QNUM_GETr, + 0, + 0, + &last_drop_qnum_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_CRDT_QMU_CREDIT_CNTr, + 0, + 0, + &crdt_qmu_credit_cnt_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_QMU_TO_QSCH_REPORT_CNTr, + 0, + 0, + &qmu_to_qsch_report_cnt_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_QMU_TO_CGAVD_REPORT_CNTr, + 0, + 0, + &qmu_to_cgavd_report_cnt_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_QMU_CRDT_CRS_NORMAL_CNTr, + 0, + 0, + &qmu_crdt_crs_normal_cnt_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_QMU_CRDT_CRS_OFF_CNTr, + 0, + 0, + &qmu_crdt_crs_off_cnt_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_QSCH_QLIST_SHEDULE_CNTr, + 0, + 0, + &qsch_qlist_shedule_cnt_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_QSCH_QLIST_SCH_EPT_CNTr, + 0, + 0, + &qsch_qlist_sch_ept_cnt_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_QMU_TO_MMU_BLK_WR_CNTr, + 0, + 0, + &qmu_to_mmu_blk_wr_cnt_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_QMU_TO_CSW_BLK_RD_CNTr, + 0, + 0, + &qmu_to_csw_blk_rd_cnt_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_QMU_TO_MMU_SOP_WR_CNTr, + 0, + 0, + &qmu_to_mmu_sop_wr_cnt_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_QMU_TO_MMU_EOP_WR_CNTr, + 0, + 0, + &qmu_to_mmu_eop_wr_cnt_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_QMU_TO_MMU_DROP_WR_CNTr, + 0, + 0, + &qmu_to_mmu_drop_wr_cnt_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_QMU_TO_CSW_SOP_RD_CNTr, + 0, + 0, + &qmu_to_csw_sop_rd_cnt_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_QMU_TO_CSW_EOP_RD_CNTr, + 0, + 0, + &qmu_to_csw_eop_rd_cnt_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_QMU_TO_CSW_DROP_RD_CNTr, + 0, + 0, + &qmu_to_csw_drop_rd_cnt_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_MMU_TO_QMU_WR_RELEASE_CNTr, + 0, + 0, + &mmu_to_qmu_wr_release_cnt_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_MMU_TO_QMU_RD_RELEASE_CNTr, + 0, + 0, + &mmu_to_qmu_rd_release_cnt_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + p_para->fc_cnt_mode = fc_cnt_mode_reg.fc_cnt_mode; + p_para->mmu_qmu_wr_fc_cnt = mmu_qmu_wr_fc_cnt_reg.mmu_qmu_wr_fc_cnt; + p_para->mmu_qmu_rd_fc_cnt = mmu_qmu_rd_fc_cnt_reg.mmu_qmu_rd_fc_cnt; + p_para->qmu_cgavd_fc_cnt = qmu_cgavd_fc_cnt_reg.qmu_cgavd_fc_cnt; + p_para->cgavd_qmu_pkt_cnt = cgavd_qmu_pkt_cnt_reg.cgavd_qmu_pkt_cnt; + p_para->cgavd_qmu_pktlen_all = cgavd_qmu_pktlen_all_reg.cgavd_qmu_pktlen_all; + p_para->cgavd_qmu_drop_tap = last_drop_qnum_reg.cgavd_qmu_drop_tap; + p_para->last_drop_qnum = last_drop_qnum_reg.last_drop_qnum; + p_para->crdt_qmu_credit_cnt = crdt_qmu_credit_cnt_reg.crdt_qmu_credit_cnt; + p_para->qmu_to_qsch_report_cnt = qmu_to_qsch_report_cnt_reg.qmu_to_qsch_report_cnt; + p_para->qmu_to_cgavd_report_cnt = qmu_to_cgavd_report_cnt_reg.qmu_to_cgavd_report_cnt; + p_para->qmu_crdt_crs_normal_cnt = qmu_crdt_crs_normal_cnt_reg.qmu_crdt_crs_normal_cnt; + p_para->qmu_crdt_crs_off_cnt = qmu_crdt_crs_off_cnt_reg.qmu_crdt_crs_off_cnt; + p_para->qsch_qlist_shedule_cnt = qsch_qlist_shedule_cnt_reg.qsch_qlist_shedule_cnt; + p_para->qsch_qlist_sch_ept_cnt = qsch_qlist_sch_ept_cnt_reg.qsch_qlist_sch_ept_cnt; + p_para->qmu_to_mmu_blk_wr_cnt = qmu_to_mmu_blk_wr_cnt_reg.qmu_to_mmu_blk_wr_cnt; + p_para->qmu_to_csw_blk_rd_cnt = qmu_to_csw_blk_rd_cnt_reg.qmu_to_csw_blk_rd_cnt; + p_para->qmu_to_mmu_sop_wr_cnt = qmu_to_mmu_sop_wr_cnt_reg.qmu_to_mmu_sop_wr_cnt; + p_para->qmu_to_mmu_eop_wr_cnt = qmu_to_mmu_eop_wr_cnt_reg.qmu_to_mmu_eop_wr_cnt; + p_para->qmu_to_mmu_drop_wr_cnt = qmu_to_mmu_drop_wr_cnt_reg.qmu_to_mmu_drop_wr_cnt; + p_para->qmu_to_csw_sop_rd_cnt = qmu_to_csw_sop_rd_cnt_reg.qmu_to_csw_sop_rd_cnt; + p_para->qmu_to_csw_eop_rd_cnt = qmu_to_csw_eop_rd_cnt_reg.qmu_to_csw_eop_rd_cnt; + p_para->qmu_to_csw_drop_rd_cnt = qmu_to_csw_drop_rd_cnt_reg.qmu_to_csw_drop_rd_cnt; + p_para->mmu_to_qmu_wr_release_cnt = mmu_to_qmu_wr_release_cnt_reg.mmu_to_qmu_wr_release_cnt; + p_para->mmu_to_qmu_rd_release_cnt = mmu_to_qmu_rd_release_cnt_reg.mmu_to_qmu_rd_release_cnt; + + return DPP_OK; +} + +/***********************************************************/ +/** 读取QMU指定队列的计数信息 +* @param dev_id 设备编号 +* @param p_para 获得的统计信息 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/04 +************************************************************/ +DPP_STATUS dpp_tm_qmu_spec_q_stat_get(ZXIC_UINT32 dev_id, DPP_ETM_QMU_SPEC_Q_STAT_INFO_T *p_para) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_OBSERVE_PORTFC_SPEC_T observe_portfc_spec_reg = {0}; + DPP_ETM_QMU_SPEC_LIF_PORTFC_COUNT_T spec_lif_portfc_count_reg = {0}; + DPP_ETM_QMU_OBSERVE_QNUM_SET_T observe_qnum_set_reg = {0}; + DPP_ETM_QMU_SPEC_Q_PKT_RECEIVED_T spec_q_pkt_received_reg = {0}; + DPP_ETM_QMU_SPEC_Q_PKT_DROPPED_T spec_q_pkt_dropped_reg = {0}; + DPP_ETM_QMU_SPEC_Q_PKT_SCHEDULED_T spec_q_pkt_scheduled_reg = {0}; + DPP_ETM_QMU_SPEC_Q_WR_CMD_SENT_T spec_q_wr_cmd_sent_reg = {0}; + DPP_ETM_QMU_SPEC_Q_RD_CMD_SENT_T spec_q_rd_cmd_sent_reg = {0}; + DPP_ETM_QMU_SPEC_Q_PKT_ENQ_T spec_q_pkt_enq_reg = {0}; + DPP_ETM_QMU_SPEC_Q_PKT_DEQ_T spec_q_pkt_deq_reg = {0}; + DPP_ETM_QMU_SPEC_Q_CRDT_UNCON_RECEIVED_T spec_q_crdt_uncon_received_reg = {0}; + DPP_ETM_QMU_SPEC_Q_CRDT_CONG_RECEIVED_T spec_q_crdt_cong_received_reg = {0}; + DPP_ETM_QMU_SPEC_Q_CRS_NORMAL_CNT_T spec_q_crs_normal_cnt_reg = {0}; + DPP_ETM_QMU_SPEC_Q_CRS_OFF_CNT_T spec_q_crs_off_cnt_reg = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_para); + + rc = dpp_reg_read(dev_id, + ETM_QMU_OBSERVE_PORTFC_SPECr, + 0, + 0, + &observe_portfc_spec_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_SPEC_LIF_PORTFC_COUNTr, + 0, + 0, + &spec_lif_portfc_count_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_OBSERVE_QNUM_SETr, + 0, + 0, + &observe_qnum_set_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_SPEC_Q_PKT_RECEIVEDr, + 0, + 0, + &spec_q_pkt_received_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_SPEC_Q_PKT_DROPPEDr, + 0, + 0, + &spec_q_pkt_dropped_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_SPEC_Q_PKT_SCHEDULEDr, + 0, + 0, + &spec_q_pkt_scheduled_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_SPEC_Q_WR_CMD_SENTr, + 0, + 0, + &spec_q_wr_cmd_sent_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_SPEC_Q_RD_CMD_SENTr, + 0, + 0, + &spec_q_rd_cmd_sent_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_SPEC_Q_PKT_ENQr, + 0, + 0, + &spec_q_pkt_enq_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_SPEC_Q_PKT_DEQr, + 0, + 0, + &spec_q_pkt_deq_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_SPEC_Q_CRDT_UNCON_RECEIVEDr, + 0, + 0, + &spec_q_crdt_uncon_received_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_SPEC_Q_CRDT_CONG_RECEIVEDr, + 0, + 0, + &spec_q_crdt_cong_received_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_SPEC_Q_CRS_NORMAL_CNTr, + 0, + 0, + &spec_q_crs_normal_cnt_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_SPEC_Q_CRS_OFF_CNTr, + 0, + 0, + &spec_q_crs_off_cnt_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + p_para->observe_portfc_spec = observe_portfc_spec_reg.observe_portfc_spec; + p_para->spec_lif_portfc_count = spec_lif_portfc_count_reg.spec_lif_portfc_count; + p_para->observe_qnum_set = observe_qnum_set_reg.observe_qnum_set; + p_para->spec_q_pkt_received = spec_q_pkt_received_reg.spec_q_pkt_received; + p_para->spec_q_pkt_dropped = spec_q_pkt_dropped_reg.spec_q_pkt_dropped; + p_para->spec_q_pkt_scheduled = spec_q_pkt_scheduled_reg.spec_q_pkt_scheduled; + p_para->spec_q_wr_cmd_sent = spec_q_wr_cmd_sent_reg.spec_q_wr_cmd_sent; + p_para->spec_q_rd_cmd_sent = spec_q_rd_cmd_sent_reg.spec_q_rd_cmd_sent; + p_para->spec_q_pkt_enq = spec_q_pkt_enq_reg.spec_q_pkt_enq; + p_para->spec_q_pkt_deq = spec_q_pkt_deq_reg.spec_q_pkt_deq; + p_para->spec_q_crdt_uncon_received = spec_q_crdt_uncon_received_reg.spec_q_crdt_uncon_received; + p_para->spec_q_crdt_cong_received = spec_q_crdt_cong_received_reg.spec_q_crdt_cong_received; + p_para->spec_q_crs_normal_cnt = spec_q_crs_normal_cnt_reg.spec_q_crs_normal_cnt; + p_para->spec_q_crs_off_cnt = spec_q_crs_off_cnt_reg.spec_q_crs_off_cnt; + + return DPP_OK; +} + +/***********************************************************/ +/** 读取QMU指定队列组的计数信息 +* @param dev_id 设备编号 +* @param p_para 获得的统计信息 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/03/04 +************************************************************/ +DPP_STATUS dpp_tm_qmu_spec_bat_stat_get(ZXIC_UINT32 dev_id, DPP_ETM_QMU_SPEC_BAT_STAT_INFO_T *p_para) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_QMU_OBSERVE_BATCH_SET_T observe_batch_set_reg = {0}; + DPP_ETM_QMU_SPEC_BAT_PKT_RECEIVED_T spec_bat_pkt_received_reg = {0}; + DPP_ETM_QMU_SPEC_BAT_PKT_DROPPED_T spec_bat_pkt_dropped_reg = {0}; + DPP_ETM_QMU_SPEC_BAT_BLK_SCHEDULED_T spec_bat_blk_scheduled_reg = {0}; + DPP_ETM_QMU_SPEC_BAT_WR_CMD_SENT_T spec_bat_wr_cmd_sent_reg = {0}; + DPP_ETM_QMU_SPEC_BAT_RD_CMD_SENT_T spec_bat_rd_cmd_sent_reg = {0}; + DPP_ETM_QMU_SPEC_BAT_PKT_ENQ_T spec_bat_pkt_enq_reg = {0}; + DPP_ETM_QMU_SPEC_BAT_PKT_DEQ_T spec_bat_pkt_deq_reg = {0}; + DPP_ETM_QMU_SPEC_BAT_CRDT_UNCON_RECEIVED_T spec_bat_crdt_uncon_received_reg = {0}; + DPP_ETM_QMU_SPEC_BAT_CRDT_CONG_RECEIVED_T spec_bat_crdt_cong_received_reg = {0}; + DPP_ETM_QMU_SPEC_BAT_CRS_NORMAL_CNT_T spec_bat_crs_normal_cnt_reg = {0}; + DPP_ETM_QMU_SPEC_BAT_CRS_OFF_CNT_T spec_bat_crs_off_cnt_reg = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_para); + + rc = dpp_reg_read(dev_id, + ETM_QMU_OBSERVE_BATCH_SETr, + 0, + 0, + &observe_batch_set_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_SPEC_BAT_PKT_RECEIVEDr, + 0, + 0, + &spec_bat_pkt_received_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_SPEC_BAT_PKT_DROPPEDr, + 0, + 0, + &spec_bat_pkt_dropped_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_SPEC_BAT_BLK_SCHEDULEDr, + 0, + 0, + &spec_bat_blk_scheduled_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_SPEC_BAT_WR_CMD_SENTr, + 0, + 0, + &spec_bat_wr_cmd_sent_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_SPEC_BAT_RD_CMD_SENTr, + 0, + 0, + &spec_bat_rd_cmd_sent_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_SPEC_BAT_PKT_ENQr, + 0, + 0, + &spec_bat_pkt_enq_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_SPEC_BAT_PKT_DEQr, + 0, + 0, + &spec_bat_pkt_deq_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_SPEC_BAT_CRDT_UNCON_RECEIVEDr, + 0, + 0, + &spec_bat_crdt_uncon_received_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_SPEC_BAT_CRDT_CONG_RECEIVEDr, + 0, + 0, + &spec_bat_crdt_cong_received_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_SPEC_BAT_CRS_NORMAL_CNTr, + 0, + 0, + &spec_bat_crs_normal_cnt_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + rc = dpp_reg_read(dev_id, + ETM_QMU_SPEC_BAT_CRS_OFF_CNTr, + 0, + 0, + &spec_bat_crs_off_cnt_reg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + + p_para->observe_batch_set = observe_batch_set_reg.observe_batch_set; + p_para->spec_bat_pkt_received = spec_bat_pkt_received_reg.spec_bat_pkt_received; + p_para->spec_bat_pkt_dropped = spec_bat_pkt_dropped_reg.spec_bat_pkt_dropped; + p_para->spec_bat_blk_scheduled = spec_bat_blk_scheduled_reg.spec_bat_blk_scheduled; + p_para->spec_bat_wr_cmd_sent = spec_bat_wr_cmd_sent_reg.spec_bat_wr_cmd_sent; + p_para->spec_bat_rd_cmd_sent = spec_bat_rd_cmd_sent_reg.spec_bat_rd_cmd_sent; + p_para->spec_bat_pkt_enq = spec_bat_pkt_enq_reg.spec_bat_pkt_enq; + p_para->spec_bat_pkt_deq = spec_bat_pkt_deq_reg.spec_bat_pkt_deq; + p_para->spec_bat_crdt_uncon_received = spec_bat_crdt_uncon_received_reg.spec_bat_crdt_uncon_received; + p_para->spec_bat_crdt_cong_received = spec_bat_crdt_cong_received_reg.spec_bat_crdt_cong_received; + p_para->spec_bat_crs_normal_cnt = spec_bat_crs_normal_cnt_reg.spec_bat_crs_normal_cnt; + p_para->spec_bat_crs_off_cnt = spec_bat_crs_off_cnt_reg.spec_bat_crs_off_cnt; + + return DPP_OK; +} + +/***********************************************************/ +/** 连续配置各级搬移门限 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level 拥塞避免支持层次号,0:队列级,1:端口级,2:系统级 +* @param start_id 为起始 队列号或端口号,系统级时,id参数无效 +* @param value 端口级和系统级时,为搬移门限值,单位为NPPU存包的单位,256B; + 流级时为搬移profile_id,0~15 +* @param num 为队列或端口个数 +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author wush @date 2016/11/19 +************************************************************/ +#ifdef ETM_REAL +DPP_STATUS dpp_tm_cgavd_move_th_together_wr(ZXIC_UINT32 dev_id, + DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 start_id, + ZXIC_UINT32 value, + ZXIC_UINT32 num) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 i = 0; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, level, QUEUE_LEVEL, SYS_LEVEL); + + for (i = 0; i < num; i++) { + ZXIC_COMM_CHECK_INDEX_SUB_OVERFLOW(start_id, i); + rc = dpp_tm_cgavd_move_th_set(dev_id, level, start_id + i, value); + zxic_comm_usleep(100); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_move_th_set"); + } + + return DPP_OK; +} +#endif +#endif +/***********************************************************/ +/** 连续设置多个队列或端口的TD门限 +* @param level 层次号,0:队列级,1:端口级 +* @param tm_type 0-ETM,1-FTM +* @param id 起始队列号或端口号 +* @param td_th TD门限,单位是KB,转换为block写入寄存器 +* @param num 需要设置的队列或端口数量 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/17 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_td_th_together_wr(DPP_DEV_T *dev, ZXIC_UINT32 level, + ZXIC_UINT32 id, ZXIC_UINT32 td_th, + ZXIC_UINT32 num) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 i = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), level, QUEUE_LEVEL, + PP_LEVEL); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(DEV_ID(dev), id, num); + + for (i = 0; i < num; i++) { + rc = dpp_tm_cgavd_td_th_set(dev, level, id + i, td_th); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_cgavd_td_th_set"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 连续获取多个队列或端口的TD门限 +* @param level 层次号,0:队列级,1:端口级 +* @param tm_type 0-ETM,1-FTM +* @param level 拥塞避免支持层次号,0:队列级,1:端口级,2:系统级 +* @param id 起始队列号或端口号 +* @param num 需要设置的队列或端口数量 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2016/03/22 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_td_th_together_get(DPP_DEV_T *dev, ZXIC_UINT32 level, + ZXIC_UINT32 id, ZXIC_UINT32 num) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 td_th = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), level, QUEUE_LEVEL, + PP_LEVEL); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(DEV_ID(dev), id, num); + + for (i = 0; i < num; i++) { + rc = dpp_tm_cgavd_td_th_get(dev, level, id + i, &td_th); + zxic_comm_delay(5); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_cgavd_td_th_get"); + + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(DEV_ID(dev), + id, i); + if (level == QUEUE_LEVEL) { + ZXIC_COMM_PRINT(" flow_id: 0x%x, td_th: 0x%x\n", + id + i, td_th); + } else { + ZXIC_COMM_PRINT(" pp_id: 0x%x, td_th: 0x%x\n", + id + i, td_th); + } + } + + return DPP_OK; +} + +/***********************************************************/ +/** 连续配置多个队列或端口是否支持动态门限机制 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param level WRED支持层次号,0:队列级,1:端口级 +* @param id 队列号或端口号 +* @param en 配置的值,0-不支持动态门限机制,1-支持动态门限机制 +* @param num 连续配置的队列数 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author djf @date 2014/02/17 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_dyn_th_en_set_more(DPP_DEV_T *dev, + DPP_TM_CGAVD_LEVEL_E level, + ZXIC_UINT32 id, ZXIC_UINT32 en, + ZXIC_UINT32 num) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 i = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), level, QUEUE_LEVEL, + PP_LEVEL); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), en, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(DEV_ID(dev), id, num); + + for (i = 0; i < num; i++) { + rc = dpp_tm_cgavd_dyn_th_en_set(dev, level, id + i, en); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(DEV_ID(dev), rc, + "dpp_tm_cgavd_dyn_th_en_set"); + } + + return DPP_OK; +} + +#if 0 +/***********************************************************/ +/** 配置各级WRED丢弃曲线对应的参数 +* @param tm_type 0-ETM,1-FTM +* @param level WRED支持层次号,0:队列级,1:端口级 +* @param wred_id 队列级共支持16个WRED组0-15,端口级支持8组0-7 +* @param dp 共支持8个dp,取值0-7 +* @param max_th 平均队列深度上限阈值 +* @param min_th 平均队列深度下限阈值 +* @param max_p 最大丢弃概率 +* @param weight 平均队列深度计算权重 +* @param q_len_th 队列深度阈值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author taq @date 2015/04/20 +************************************************************/ +DPP_STATUS dpp_tm_wred_dp_line_para_wr(ZXIC_UINT32 dev_id, + ZXIC_UINT32 level, + ZXIC_UINT32 wred_id, + ZXIC_UINT32 dp, + ZXIC_UINT32 max_th, + ZXIC_UINT32 min_th, + ZXIC_UINT32 max_p, + ZXIC_UINT32 weight, + ZXIC_UINT32 q_len_th) +{ + DPP_STATUS rc = DPP_OK; + DPP_TM_WRED_DP_LINE_PARA_T para = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, level, QUEUE_LEVEL, PP_LEVEL); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dp, 0, DPP_TM_DP_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, max_p, 1, DPP_TM_RED_P_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, weight, 0, DPP_TM_CGAVD_WEIGHT_MAX); + + if (level == QUEUE_LEVEL) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, wred_id, 0, DPP_TM_Q_WRED_NUM - 1); + } else { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, wred_id, 0, DPP_TM_PP_WRED_NUM - 1); + } + + para.max_th = max_th; + para.min_th = min_th; + para.max_p = max_p; + para.weight = weight; + para.q_len_th = q_len_th; + + rc = dpp_tm_cgavd_wred_dp_line_para_set(dev_id, level, wred_id, dp, ¶); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_wred_dp_line_para_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置各级WRED丢弃曲线对应的参数 +* @param tm_type 0-ETM,1-FTM +* @param level WRED支持层次号,0:队列级,1:端口级 +* @param wred_id 队列级共支持16个WRED组0-15,端口级支持8组0-7 +* @param dp 共支持8个dp,取值0-7 +* @param max_th 平均队列深度上限阈值 +* @param min_th 平均队列深度下限阈值 +* @param max_p 最大丢弃概率 +* @param weight 平均队列深度计算权重 +* @param q_len_th 队列深度阈值 +* @param flag 忽略乘法里的当前包长和最大包长比标志位:1为忽略 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2015/11/9 +************************************************************/ +DPP_STATUS dpp_tm_wred_dp_line_para_flag_wr(ZXIC_UINT32 dev_id, + ZXIC_UINT32 level, + ZXIC_UINT32 wred_id, + ZXIC_UINT32 dp, + ZXIC_UINT32 max_th, + ZXIC_UINT32 min_th, + ZXIC_UINT32 max_p, + ZXIC_UINT32 weight, + ZXIC_UINT32 q_len_th, + ZXIC_UINT32 flag) +{ + DPP_STATUS rc = DPP_OK; + DPP_TM_WRED_DP_LINE_PARA_T para = {0}; + DPP_ETM_CGAVD_PKE_LEN_CALC_SIGN_T cgavd_pke_len_calc_sign = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, level, QUEUE_LEVEL, PP_LEVEL); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dp, 0, DPP_TM_DP_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, max_p, 1, DPP_TM_RED_P_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, weight, 0, DPP_TM_CGAVD_WEIGHT_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, flag, 0, 1); + + if (level == QUEUE_LEVEL) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, wred_id, 0, DPP_TM_Q_WRED_NUM - 1); + } else { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, wred_id, 0, DPP_TM_PP_WRED_NUM - 1); + } + + para.max_th = max_th; + para.min_th = min_th; + para.max_p = max_p; + para.weight = weight; + para.q_len_th = q_len_th; + + cgavd_pke_len_calc_sign.pke_len_calc_sign = flag; + rc = dpp_reg_write(dev_id, + ETM_CGAVD_PKE_LEN_CALC_SIGNr, + 0, + 0, + &cgavd_pke_len_calc_sign); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_tm_cgavd_wred_dp_line_para_set(dev_id, level, wred_id, dp, ¶); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_wred_dp_line_para_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置CPU设置的报文长度是否参与计算丢弃概率的使能 +* @param tm_type 0-ETM,1-FTM +* @param flag 忽略乘法里的当前包长和最大包长比标志位:1为忽略 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2015/11/9 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_wred_pke_len_calc_sign_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 flag) +{ + DPP_STATUS rc = DPP_OK; + + DPP_ETM_CGAVD_PKE_LEN_CALC_SIGN_T cgavd_pke_len_calc_sign = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, flag, 0, 1); + + cgavd_pke_len_calc_sign.pke_len_calc_sign = flag; + rc = dpp_reg_write(dev_id, + ETM_CGAVD_PKE_LEN_CALC_SIGNr, + 0, + 0, + &cgavd_pke_len_calc_sign); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取配置CPU设置的报文长度是否参与计算丢弃概率的使能 +* @param tm_type 0-ETM,1-FTM +* @param flag 忽略乘法里的当前包长和最大包长比标志位:1为忽略 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cy @date 2015/11/9 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_wred_pke_len_calc_sign_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_flag) +{ + DPP_STATUS rc = DPP_OK; + + DPP_ETM_CGAVD_PKE_LEN_CALC_SIGN_T cgavd_pke_len_calc_sign = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_flag); + + rc = dpp_reg_read(dev_id, + ETM_CGAVD_PKE_LEN_CALC_SIGNr, + 0, + 0, + &cgavd_pke_len_calc_sign); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + *p_flag = cgavd_pke_len_calc_sign.pke_len_calc_sign; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置系统级GRED丢弃曲线对应的参数 +* @param tm_type 0-ETM,1-FTM +* @param dp 共支持8个dp,取值0-7 +* @param max_th 平均队列深度上限阈值 +* @param mid_th 平均队列深度中间阈值 +* @param min_th 平均队列深度下限阈值 +* @param max_p 最大丢弃概率 1~99 +* @param weight 平均队列深度计算权重 +* @param q_len_th 队列深度阈值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author taq @date 2015/04/20 +************************************************************/ +DPP_STATUS dpp_tm_gred_dp_line_para_wr(ZXIC_UINT32 dev_id, + ZXIC_UINT32 dp, + ZXIC_UINT32 max_th, + ZXIC_UINT32 mid_th, + ZXIC_UINT32 min_th, + ZXIC_UINT32 max_p, + ZXIC_UINT32 weight, + ZXIC_UINT32 q_len_th) +{ + DPP_STATUS rc = DPP_OK; + DPP_TM_GRED_DP_LINE_PARA_T para = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dp, 0, DPP_TM_DP_NUM - 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, max_p, 1, DPP_TM_RED_P_MAX); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, weight, 0, DPP_TM_CGAVD_WEIGHT_MAX); + + para.max_th = max_th; + para.mid_th = mid_th; + para.min_th = min_th; + para.max_p = max_p; + + para.weight = weight; + para.q_len_th = q_len_th; + + rc = dpp_tm_cgavd_gred_dp_line_para_set(dev_id, dp, ¶); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_gred_dp_line_para_set"); + + return DPP_OK; +} + + +/***********************************************************/ +/** 配置olif统计组信息 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param id olif统计组号 +* @param all_or_by_port 0-统计所有,1-统计某一端口或某一dest_id +* @param i_or_e_sel 10-统计片外,01-统计片内,其他值-统计所有 +* @param port_or_dest_id_sel 0-统计port,1-统计dest_id +* @param port_dest_id port号或dest_id号 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author cuiy @date 2016/04/21 +************************************************************/ +DPP_STATUS dpp_tm_olif_stat_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 id, + ZXIC_UINT32 all_or_by_port, + ZXIC_UINT32 i_or_e_sel, + ZXIC_UINT32 port_or_dest_id_sel, + ZXIC_UINT32 port_dest_id) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_OLIF_TM_LIF_STAT_CFG_T tm_lif_stat_cft = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, id, 0, 15); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, all_or_by_port, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, i_or_e_sel, 0, 3); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, port_or_dest_id_sel, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, port_dest_id, 0, 255); + + tm_lif_stat_cft.all_or_by_port = all_or_by_port; + tm_lif_stat_cft.i_or_e_sel = i_or_e_sel; + tm_lif_stat_cft.port_or_dest_id_sel = port_or_dest_id_sel; + tm_lif_stat_cft.port_dest_id = port_dest_id; + rc = dpp_reg_write(dev_id, + ETM_OLIF_TM_LIF_STAT_CFGr, + 0, + id, + &tm_lif_stat_cft); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** +* @param dev_id +* @param tm_type +* @param i_or_e_sel +* @param port_or_dest_id_sel +* @param start_id +* @param start_port_dest_id +* @param num +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xjw @date 2018/02/01 +************************************************************/ +DPP_STATUS dpp_tm_olif_stat_set_mul(ZXIC_UINT32 dev_id, + ZXIC_UINT32 i_or_e_sel, + ZXIC_UINT32 port_or_dest_id_sel, + ZXIC_UINT32 start_id, + ZXIC_UINT32 start_port_dest_id, + ZXIC_UINT32 num) +{ + DPP_STATUS rt = DPP_OK; + ZXIC_UINT32 i = 0; + + for (i = 0; i < num; i++) { + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(dev_id, start_port_dest_id, i); + ZXIC_COMM_CHECK_DEV_INDEX_ADD_OVERFLOW_NO_ASSERT(dev_id, start_id, i); + + rt = dpp_tm_olif_stat_set(dev_id, start_id + i, + 1, i_or_e_sel, port_or_dest_id_sel, start_port_dest_id + i); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rt, "dpp_reg_write"); + } + + return rt; +} + +DPP_STATUS dpp_tm_mr_init(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + + rc = dpp_tm_qmu_qos_sign_set(dev_id, 0, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_qos_sign_set"); + rc = dpp_tm_cgavd_q_map_pp_set(dev_id, 0, 60); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_q_map_pp_set"); + rc = dpp_tm_cgavd_td_byte_block_th_get_diag(dev_id, 1, 60); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_td_byte_block_th_get_diag"); + rc = dpp_tm_cgavd_td_byte_block_th_set(dev_id, 0, 0, 1024); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_td_byte_block_th_set"); + rc = dpp_tm_cgavd_td_th_set(dev_id, 0, 0, 200); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_td_th_set"); + // rc = dpp_tm_shape_pp_para_wr(dev_id, 60, 1000000, 1000, 1); + // ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_shape_pp_para_wr"); + rc = dpp_tm_qmu_port_shape_set(dev_id, 60, 4, 31, 8192, 1); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_port_shape_set"); + + return rc; +} + +#if 0 +/***********************************************************/ +/** 配置TM模式下初始化代码 +* @param dev_id +* @param tm_type 0-ETM,1-FTM +* @param p_tm_init_info 配置TM模式下初始化信息包括以下 +* blk_size 配置qmu block大小:512B[default]/1024B +* case_num QMU初始化场景编号:0/1/2/3... +* imem_omem; 0-片内外混合; 1-纯片内;2-纯片外 +* mode 0-TM; 1-SA +* +* @return +* @remark 无 +* @see +* @author whuashan @date 2015/03/26 +************************************************************/ +DPP_STATUS dpp_tm_asic_init(ZXIC_UINT32 dev_id, DPP_TM_ASIC_INIT_INFO_T *p_tm_asic_init_info) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 case_num = 0; + ZXIC_UINT32 blk_size = 0; + ZXIC_UINT32 imem_omem = 0; + ZXIC_UINT32 mode = 0; + //ZXIC_UINT32 port_id = 0; + /*ZXIC_UINT32 flow_id_index = 0;*/ + ZXIC_UINT32 index = 0; + ZXIC_UINT32 sys_cgavd_td = 0; + DPP_TM_CNT_MODE_T cfgmt_count_mode = {0}; + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_tm_asic_init_info); + +/* + if (tm_type == DPP_ETM) { + flow_id_index = DPP_ETM_Q_NUM; + } + else if (tm_type == DPP_FTM) { + flow_id_index = DPP_FTM_Q_NUM; + } +*/ + case_num = p_tm_asic_init_info->case_num; + ZXIC_COMM_PRINT("case_num =:%d TM ASIC INIT START! <======\n", case_num); + blk_size = p_tm_asic_init_info->blk_size; + imem_omem = p_tm_asic_init_info->imem_omem; + mode = p_tm_asic_init_info->mode; + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, mode, 0, 1); + + ZXIC_COMM_PRINT("======>dev_id:%d TM ASIC INIT START! <======\n", dev_id); + + /*开启tm时钟门控使能*/ + rc = dpp_tm_cfgmt_clkgate_en_set(dev_id, 1); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_clkgate_en_set"); + ZXIC_COMM_PRINT("DPP tm clk enable ok\n"); + zxic_comm_sleep(5); + /*开启tm软复位使能*/ + rc = dpp_tm_cfgmt_softrst_en_set(dev_id, 1); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_softrst_en_set"); + ZXIC_COMM_PRINT("DPP tm softrst enable ok\n"); + zxic_comm_sleep(5); + + DPP_ETM_CFGMT_CLKGATE_EN_T timeout = {0}; + + timeout.clkgate_en = 0xfff; + rc = dpp_reg_write(dev_id, + ETM_CFGMT_TIMEOUT_LIMITr, + 0, + 0, + &timeout); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + + /* 整包和交织模式:TM模式配置成交织模式 */ + rc = dpp_tm_qmu_pkt_blk_mode_set(dev_id, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_pkt_blk_mode_set"); + ZXIC_COMM_PRINT("DPP tm mode cfg ok\n"); + + /* 配置block大小 */ + rc = dpp_tm_cfgmt_blk_size_set(dev_id, blk_size); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_blk_size_set"); + + /* 启动包存储CRC使能 */ + rc = dpp_tm_cfgmt_crc_en_set(dev_id, 1); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_crc_en_set"); + + /* 配置QMU的每chunk节点数:目前配置1chk=8 block */ + rc = dpp_tm_cfgmt_qmu_work_mode_set(dev_id, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_qmu_work_mode_set"); + ZXIC_COMM_PRINT("DPP tm qmu_work_mode_set ok\n"); + /* 计数模式配置 */ + cfgmt_count_mode.fc_count_mode = 1; /* 翻转 */ + cfgmt_count_mode.count_rd_mode = 0; /* 非读清 */ + cfgmt_count_mode.count_overflow_mode = 1; /* 允许溢出翻转 */ + rc = dpp_tm_cfgmt_cnt_mode_set(dev_id, &cfgmt_count_mode); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_cnt_mode_set"); + + /* 总缓存字节数=节点数量*节点大小ftm256k etm512k:配置总缓存90% */ + sys_cgavd_td = (512 * 1024) * blk_size; + sys_cgavd_td = sys_cgavd_td / 1024; + sys_cgavd_td = sys_cgavd_td / 100 * 90; + rc = dpp_tm_cgavd_td_th_set(dev_id, SYS_LEVEL, 0, sys_cgavd_td); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_td_th_set"); + + /* 纯片内模式 */ + dpp_tm_tmmu_imem_en_set(dev_id, 1); + dpp_tm_cgavd_imem_omem_set(dev_id, 1, 0); + dpp_tm_tmmu_ddr_force_rdy_set(dev_id, 0x3ff); + + /* QMU链表配置:保证16K配置 */ + rc = dpp_tm_qmu_init_set(dev_id, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_init_set"); + + /* 空队列确保门限值配置 */ + rc = dpp_tm_qmu_crs_th2_set(dev_id, 0, 200); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_crs_th2_set"); + + /*crs的e桶产生门限:每个队列可映射16种配置, + 映射表是cgavd的流队列WRED队列策略组*/ + for (index = 0 ; index < 16; index++) { + rc = dpp_tm_qmu_crs_eir_th_set(dev_id, index, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_crs_eir_th_set"); + } + + /* CRS限速 */ + + /* 普通老化使能和配置 */ + rc = dpp_tm_qmu_pkt_aging_set(dev_id, 0, 0x1ff, 0xff, 0, DPP_ETM_Q_NUM - 1, 1, 0xa); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_pkt_aging_set"); + + /* CRDT SHAP配置 */ + rc = dpp_tm_crdt_ram_init(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_crdt_ram_init"); + rc = dpp_tm_shap_ram_init(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_shap_ram_init"); + /* CRDT 授权使能打开 */ + rc = dpp_tm_crdt_credit_en_set(dev_id, 1); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_crdt_credit_en_set"); + ZXIC_COMM_PRINT("dev_id:%d DPP_TM crdt credit enable set success!!!\n", dev_id); + + /* CRDT开启E桶CRS过滤使能(不能打开,会导致延时大) */ + /*for (index = 0; index < flow_id_index; index++) + { + rc = dpp_tm_ind_write32(dev_id, MODULE_TM_CRDT, 0x600000 + index, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_move_th_set"); + }*/ + + /*打开flow db_token,配置成c+e*/ + rc = dpp_tm_shape_flow_db_en_set(dev_id, 1, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_shape_flow_db_en_set"); + + /*开启cgavd平均队列深度归零使能*/ + rc = dpp_tm_cgavd_avg_qlen_return_zero_en_set(dev_id, 1); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_avg_qlen_return_zero_en_set"); + + rc = dpp_tm_qmu_qlist_cfgmt_ram_init_done_print(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_qlist_cfgmt_ram_init_done_print"); + + /*开启tm时钟门控使能*/ + // rc = dpp_tm_cfgmt_clkgate_en_set(dev_id, 1); + // ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_clkgate_en_set"); + + // /*开启tm软复位使能*/ + // rc = dpp_tm_cfgmt_softrst_en_set(dev_id, 1); + // ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_softrst_en_set"); + + /* 子系统就绪判断 */ + rc = dpp_tm_cfgmt_subsystem_rdy_check(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_subsystem_rdy_check"); + + ZXIC_COMM_PRINT("=====>dev_id:%d TM ASIC INIT END! <======\n", dev_id); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置TM模式下初始化代码 +* @param dev_id +* @param tm_type 0-ETM,1-FTM +* @param p_tm_init_info 配置TM模式下初始化信息包括以下 +* blk_size 配置qmu block大小:512B[default]/1024B +* case_num QMU初始化场景编号:0/1/2/3... +* imem_omem; 0-片内外混合; 1-纯片内;2-纯片外 +* mode 0-TM; 1-SA +* +* @return +* @remark 无 +* @see +* @author szq @date 2015/03/26 +************************************************************/ +DPP_STATUS dpp_tm_asic_init_diag(ZXIC_UINT32 dev_id, ZXIC_UINT32 blk_size, ZXIC_UINT32 case_num, ZXIC_UINT32 imem_omem, ZXIC_UINT32 mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_TM_ASIC_INIT_INFO_T tm_asic_init_info = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, blk_size, 256, 1024); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, case_num, 0, 0x99F); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, imem_omem, 0, 2); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, mode, 0, 1); + + tm_asic_init_info.blk_size = blk_size; + tm_asic_init_info.case_num = case_num; + tm_asic_init_info.imem_omem = imem_omem; + tm_asic_init_info.mode = mode; + rc = dpp_tm_asic_init(dev_id, &tm_asic_init_info); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_asic_init"); + + return DPP_OK; +} + +#endif +/**************************************************************************** +* 函数名称: dpp_tm_avg_que_len_get +* 功能描述: 各级平均队列深度获取 +* 输入参数: dev_id: 设备索引编号 +* @param tm_type 0-ETM,1-FTM +* cgavd_level: 拥塞避免支持层次号,0:队列级,1:端口级,2:系统级 +* que_id: 本级别层次内的队列编号。 +* 输出参数: p_avg_len: 平均队列深度,单位为BLOCK。 +* 返 回 值: DPP_OK-成功,DPP_ERR-失败 +* 其它说明: +* author cy @date 2015/06/29 +*****************************************************************************/ +DPP_STATUS dpp_tm_avg_que_len_get(ZXIC_UINT32 dev_id, + DPP_TM_CGAVD_LEVEL_E cgavd_level, + ZXIC_UINT32 que_id, + ZXIC_UINT32 *p_avg_len) +{ + /* 返回值变量定义 */ + DPP_STATUS rc = DPP_OK; + + /* 结构体变量定义 */ + DPP_ETM_CGAVD_PP_AVG_Q_LEN_T cgavd_pp_avg_q_len = {0}; + DPP_ETM_CGAVD_SYS_AVG_Q_LEN_T cgavd_sys_avg_q_len = {0}; + DPP_ETM_CGAVD_FLOW_AVG_Q_LEN_T cgavd_flow_avg_q_len = {0}; + + /* 入参检查 */ + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, cgavd_level, QUEUE_LEVEL, SYS_LEVEL); + + if (cgavd_level == QUEUE_LEVEL) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, que_id, 0, DPP_ETM_Q_NUM - 1); + } else if (PP_LEVEL == cgavd_level) { + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, que_id, 0, DPP_TM_PP_NUM - 1); + } else { + } + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_avg_len); + + switch (cgavd_level) { + case QUEUE_LEVEL: + { + rc = dpp_reg_read(dev_id, ETM_CGAVD_FLOW_AVG_Q_LENr, 0, que_id, &cgavd_flow_avg_q_len); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + *p_avg_len = cgavd_flow_avg_q_len.flow_avg_q_len; + break; + } + + case PP_LEVEL: + { + rc = dpp_reg_read(dev_id, ETM_CGAVD_PP_AVG_Q_LENr, 0, que_id, &cgavd_pp_avg_q_len); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + *p_avg_len = cgavd_pp_avg_q_len.pp_avg_q_len; + break; + } + + case SYS_LEVEL: + { + rc = dpp_reg_read(dev_id, ETM_CGAVD_SYS_AVG_Q_LENr, 0, 0, &cgavd_sys_avg_q_len); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_read"); + *p_avg_len = cgavd_sys_avg_q_len.sys_avg_q_len; + break; + } + + default: + { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "[dev_id %d] cgavd_level is out of dev_id!!!\n", dev_id); + return DPP_ERR; + } + } + + return DPP_OK; +} + +#endif +/***********************************************************/ +/** 清除整形表格里面的值 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/07/16 +************************************************************/ +DPP_STATUS dpp_tm_clr_shape_para(DPP_DEV_T *dev) +{ + /*DPP_STATUS rc = DPP_OK;*/ + + /* rc = dpp_tm_global_var_mutex_init(); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_global_var_mutex_init"); + + rc = zxic_comm_mutex_lock(&g_dpp_tm_global_var_rw_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "zxic_comm_mutex_lock");*/ + + // ZXIC_COMM_CHECK_POINT(dev); + // ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(DEV_ID(dev), DEV_ID(dev), 0, DPP_DEV_CHANNEL_MAX - 2); + + memset(g_dpp_etm_shape_para_table, 0, + sizeof(g_dpp_etm_shape_para_table)); + + /* rc = zxic_comm_mutex_unlock(&g_dpp_tm_global_var_rw_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "zxic_comm_mutex_unlock");*/ + return DPP_OK; +} + +#if 0 +/***********************************************************/ +/** 配置CFGMT中断屏蔽 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param shap_int_mask_flag shap模块中断屏蔽位 0:不屏蔽 1:屏蔽 +* @param crdt_int_mask_flag crdt模块中断屏蔽位 +* @param mmu_int_mask_flag mmu模块中断屏蔽位 +* @param qmu_int_mask_flag qmu模块中断屏蔽位 +* @param cgavd_int_mask_flag cgavd模块中断屏蔽位 +* @param olif_int_mask_flag olif模块中断屏蔽位 +* @param cfgmt_int_buf_mask_flag cfgmt模块中断屏蔽位 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/07/08 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_int_mask_set_diag(ZXIC_UINT32 dev_id, + ZXIC_UINT32 shap_int_mask_flag, + ZXIC_UINT32 crdt_int_mask_flag, + ZXIC_UINT32 mmu_int_mask_flag, + ZXIC_UINT32 qmu_int_mask_flag, + ZXIC_UINT32 cgavd_int_mask_flag, + ZXIC_UINT32 olif_int_mask_flag, + ZXIC_UINT32 cfgmt_int_buf_mask_flag) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CFGMT_REG_INT_MASK_REG_T int_mask = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, shap_int_mask_flag, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, crdt_int_mask_flag, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, mmu_int_mask_flag, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, qmu_int_mask_flag, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, cgavd_int_mask_flag, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, olif_int_mask_flag, 0, 1); + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, cfgmt_int_buf_mask_flag, 0, 1); + + int_mask.shap_int_mask = shap_int_mask_flag; + int_mask.crdt_int_mask = crdt_int_mask_flag; + int_mask.tmmu_int_mask = mmu_int_mask_flag; + int_mask.qmu_int_mask = qmu_int_mask_flag; + int_mask.cgavd_int_mask = cgavd_int_mask_flag; + int_mask.olif_int_mask = olif_int_mask_flag; + int_mask.cfgmt_int_buf_mask = cfgmt_int_buf_mask_flag; + + rc = dpp_reg_write(dev_id, + ETM_CFGMT_REG_INT_MASK_REGr, + 0, + 0, + &int_mask); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取CFGMT中断状态 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_para 中断状态 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/07/08 +************************************************************/ +DPP_STATUS dpp_tm_cfgmt_int_state_get_diag(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + + rc = dpp_reg_fields_print_with_def(dev_id, ETM_CFGMT_REG_INT_STATE_REGr, 0, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_fields_print_with_def"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取olif中断状态 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/07/08 +************************************************************/ +DPP_STATUS dpp_tm_olif_int_state_get(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + + rc = dpp_reg_fields_print_with_def(dev_id, ETM_OLIF_ITMHRAM_PARITY_ERR_2_INTr, 0, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_fields_print_with_def"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置OLIF中断屏蔽 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param crcram_parity_err_mask 0:不屏蔽 1:屏蔽 +* @param itmhram_parity_err_mask +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/07/08 +************************************************************/ +DPP_STATUS dpp_tm_olif_int_mask_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 olif_int_mask_flag) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_OLIF_OLIF_INT_MASK_T int_mask = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, olif_int_mask_flag, 0, 1); + + int_mask.crcram_parity_err_mask = olif_int_mask_flag; + int_mask.emem_fifo_ecc_mask = olif_int_mask_flag; + int_mask.emem_fifo_ovf_mask = olif_int_mask_flag; + int_mask.emem_fifo_udf_mask = olif_int_mask_flag; + int_mask.imem_fifo_ecc_mask = olif_int_mask_flag; + int_mask.imem_fifo_ovf_mask = olif_int_mask_flag; + int_mask.imem_fifo_udf_mask = olif_int_mask_flag; + int_mask.itmh_ecc_double_err_mask = olif_int_mask_flag; + int_mask.itmh_ecc_single_err_mask = olif_int_mask_flag; + int_mask.order_fifo_ovf_mask = olif_int_mask_flag; + int_mask.order_fifo_parity_err_mask = olif_int_mask_flag; + int_mask.order_fifo_udf_mask = olif_int_mask_flag; + int_mask.para_fifo_ecc_mask = olif_int_mask_flag; + int_mask.para_fifo_ovf_mask = olif_int_mask_flag; + int_mask.para_fifo_udf_mask = olif_int_mask_flag; + + rc = dpp_reg_write(dev_id, + ETM_OLIF_OLIF_INT_MASKr, + 0, + 0, + &int_mask); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置cgavd中断屏蔽 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param cgavd_int_mask_flag 0:不屏蔽 1:屏蔽 +* @param 写1 屏蔽cgavd中断寄存器和cgavd_ram_err寄存器所有位 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/07/08 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_int_mask_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 cgavd_int_mask_flag) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CGAVD_CGAVD_INT_MASK_T cgavd_int_mask = {0}; + DPP_ETM_CGAVD_CGAVD_RAM_ERR_INT_MASK_T cgavd_ram_err_int_mask = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, cgavd_int_mask_flag, 0, 1); + + cgavd_int_mask.cgavd_int_mask = cgavd_int_mask_flag; + cgavd_ram_err_int_mask.flow_qlen_inta_mask = cgavd_int_mask_flag; + cgavd_ram_err_int_mask.flow_qlen_intb_mask = cgavd_int_mask_flag; + cgavd_ram_err_int_mask.flow_qnum_inta_mask = cgavd_int_mask_flag; + cgavd_ram_err_int_mask.flow_qnum_intb_mask = cgavd_int_mask_flag; + cgavd_ram_err_int_mask.flow_tdth_inta_mask = cgavd_int_mask_flag; + cgavd_ram_err_int_mask.flow_tdth_intb_mask = cgavd_int_mask_flag; + + cgavd_ram_err_int_mask.pds_deal_fifo_ov_int_mask = cgavd_int_mask_flag; + cgavd_ram_err_int_mask.pds_deal_fifo_uv_int_mask = cgavd_int_mask_flag; + cgavd_ram_err_int_mask.pp_qlen_inta_mask = cgavd_int_mask_flag; + cgavd_ram_err_int_mask.pp_qlen_intb_mask = cgavd_int_mask_flag; + cgavd_ram_err_int_mask.pp_tdth_int_mask = cgavd_int_mask_flag; + cgavd_ram_err_int_mask.qmu_cgavd_fifo_ov_int_mask = cgavd_int_mask_flag; + cgavd_ram_err_int_mask.qmu_cgavd_fifo_uv_int_mask = cgavd_int_mask_flag; + + rc = dpp_reg_write(dev_id, + ETM_CGAVD_CGAVD_INT_MASKr, + 0, + 0, + &cgavd_int_mask); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + ETM_CGAVD_CGAVD_RAM_ERR_INT_MASKr, + 0, + 0, + &cgavd_ram_err_int_mask); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取cgavd中断状态 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_para 中断状态 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/07/08 +************************************************************/ +DPP_STATUS dpp_tm_cgavd_int_state_get(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + + rc = dpp_reg_fields_print_with_def(dev_id, ETM_CGAVD_CGAVD_INTr, 0, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_fields_print_with_def"); + + rc = dpp_reg_fields_print_with_def(dev_id, ETM_CGAVD_CGAVD_RAM_ERRr, 0, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_fields_print_with_def"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取tmmu中断状态 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param p_para 中断状态 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/07/08 +************************************************************/ +DPP_STATUS dpp_tm_tmmu_int_state_get(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + + rc = dpp_reg_fields_print_with_def(dev_id, ETM_TMMU_TMMU_STATES_1r, 0, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_fields_print_with_def"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置crdt中断屏蔽 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param crdt_int_mask_flag 0:不屏蔽 1:屏蔽 +* @param 写1 屏蔽ETM_CGAVD_RD_CPU_OR_RAMr +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/4/10 +************************************************************/ +DPP_STATUS dpp_tm_crdt_int_mask_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 crdt_int_mask_flag) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CRDT_CRDT_INT_MASK_T crdt_int_mask = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, crdt_int_mask_flag, 0, 1); + + crdt_int_mask.crdt_int_mask = crdt_int_mask_flag; + rc = dpp_reg_write(dev_id, + ETM_CRDT_CRDT_INT_MASKr, + 0, + 0, + &crdt_int_mask); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取crdt中断状态 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/4/10 +************************************************************/ +DPP_STATUS dpp_tm_crdt_int_state_get(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + + rc = dpp_reg_fields_print_with_def(dev_id, ETM_CRDT_CRDT_INT_BUSr, 0, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_fields_print_with_def"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置qmu中断屏蔽 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param qmu_int_mask_flag 0:不屏蔽 1:屏蔽 +* @param 写1 屏蔽qmu_int_mask5的所有位 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/07/09 +************************************************************/ +DPP_STATUS dpp_tm_qmu_int_mask_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 qmu_int_mask_flag) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 qmu_qsch_crbal_init_mask_reg_index = 0; + ZXIC_UINT32 qmu_int_mask1_reg_index = 0; + ZXIC_UINT32 qmu_int_mask2_reg_index = 0; + ZXIC_UINT32 qmu_int_mask3_reg_index = 0; + ZXIC_UINT32 qmu_int_mask4_reg_index = 0; + ZXIC_UINT32 qmu_int_mask5_reg_index = 0; + + DPP_ETM_QMU_QCFG_QSCH_CRBAL_INIT_MASK_T qmu_qsch_crbal_init_mask = {0}; + DPP_ETM_QMU_QMU_INT_MASK1_T qmu_int_mask1 = {0}; + DPP_ETM_QMU_QMU_INT_MASK2_T qmu_int_mask2 = {0}; + DPP_ETM_QMU_QMU_INT_MASK3_T qmu_int_mask3 = {0}; + DPP_ETM_QMU_QMU_INT_MASK4_T qmu_int_mask4 = {0}; + DPP_ETM_QMU_QMU_INT_MASK5_T qmu_int_mask5 = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, qmu_int_mask_flag, 0, 1); + + qmu_qsch_crbal_init_mask_reg_index = ETM_QMU_QCFG_QSCH_CRBAL_INIT_MASKr; + qmu_int_mask1_reg_index = ETM_QMU_QMU_INT_MASK1r; + qmu_int_mask2_reg_index = ETM_QMU_QMU_INT_MASK2r; + qmu_int_mask3_reg_index = ETM_QMU_QMU_INT_MASK3r; + qmu_int_mask4_reg_index = ETM_QMU_QMU_INT_MASK4r; + qmu_int_mask5_reg_index = ETM_QMU_QMU_INT_MASK5r; + + qmu_qsch_crbal_init_mask.qcfg_qsch_crbal_init_mask = qmu_int_mask_flag; + + qmu_int_mask1.qmu_int_mask1 = qmu_int_mask_flag; + qmu_int_mask2.qmu_int_mask2 = qmu_int_mask_flag; + qmu_int_mask3.qmu_int_mask3 = qmu_int_mask_flag; + qmu_int_mask4.qmu_int_mask4 = qmu_int_mask_flag; + qmu_int_mask5.qmu_int_mask5 = qmu_int_mask_flag; + + rc = dpp_reg_write(dev_id, + qmu_qsch_crbal_init_mask_reg_index, + 0, + 0, + &qmu_qsch_crbal_init_mask); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + qmu_int_mask1_reg_index, + 0, + 0, + &qmu_int_mask1); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + qmu_int_mask2_reg_index, + 0, + 0, + &qmu_int_mask2); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + qmu_int_mask3_reg_index, + 0, + 0, + &qmu_int_mask3); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + qmu_int_mask4_reg_index, + 0, + 0, + &qmu_int_mask4); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + rc = dpp_reg_write(dev_id, + qmu_int_mask5_reg_index, + 0, + 0, + &qmu_int_mask5); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取qmu中断状态 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/07/09 +************************************************************/ +DPP_STATUS dpp_tm_qmu_int_state_get(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + + rc = dpp_reg_fields_print_with_def(dev_id, ETM_QMU_QLIST_CFGMT_FIFO_STATEr, 0, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_fields_print_with_def"); + rc = dpp_reg_fields_print_with_def(dev_id, ETM_QMU_CMD_SCH_CFGMT_FIFO_STATEr, 0, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_fields_print_with_def"); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置shape中断屏蔽 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param shape_int_mask_flag 0:不屏蔽 1:屏蔽 +* @param 写1 屏蔽ETM_CGAVD_RD_CPU_OR_RAMr +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/4/10 +************************************************************/ +DPP_STATUS dpp_tm_shape_int_mask_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 shape_int_mask_flag) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CRDT_SHAP_INT_MASK_REG_T shap_int_mask = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, shape_int_mask_flag, 0, 1); + + shap_int_mask.pp_c_token_min_int_mask = shape_int_mask_flag; + rc = dpp_reg_write(dev_id, + ETM_CRDT_SHAP_INT_MASK_REGr, + 0, + 0, + &shap_int_mask); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_write"); + + return DPP_OK; +} + +/***********************************************************/ +/** 读取shape中断状态 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author whuashan @date 2019/4/10 +************************************************************/ +DPP_STATUS dpp_tm_shape_int_state_get(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + DPP_ETM_CRDT_SHAP_INT_REG_T shap_int_reg_t = {0}; + + rc = dpp_reg_fields_print_with_def(dev_id, ETM_CRDT_SHAP_INT_REGr, 0, 0); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_reg_fields_print_with_def"); + + ZXIC_COMM_PRINT("pp_c_token_min_int : %d\n", shap_int_reg_t.pp_c_token_min_int); + + return DPP_OK; +} + +/***********************************************************/ +/** 配置dpp tm所有模块中断屏蔽 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param int_mask_flag 0:不屏蔽 1:屏蔽 +* @param +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/07/09 +************************************************************/ +DPP_STATUS dpp_tm_int_mask_set(ZXIC_UINT32 dev_id, + ZXIC_UINT32 int_mask_flag) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, int_mask_flag, 0, 1); + + rc = dpp_tm_cfgmt_int_mask_set_diag(dev_id, + int_mask_flag, + int_mask_flag, + int_mask_flag, + int_mask_flag, + int_mask_flag, + int_mask_flag, + int_mask_flag); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_int_mask_set_diag"); + + rc = dpp_tm_olif_int_mask_set(dev_id, int_mask_flag); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_olif_int_mask_set"); + + rc = dpp_tm_cgavd_int_mask_set(dev_id, int_mask_flag); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_int_mask_set"); + + rc = dpp_tm_shape_int_mask_set(dev_id, int_mask_flag); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_shape_int_mask_set"); + + rc = dpp_tm_crdt_int_mask_set(dev_id, int_mask_flag); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_crdt_int_mask_set"); + + rc = dpp_tm_qmu_int_mask_set(dev_id, int_mask_flag); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_int_mask_set"); + + return DPP_OK; +} + +/***********************************************************/ +/** 打印dpp tm所有模块中断状态 +* @param tm_type 0-ETM,1-FTM +* @param dev_id 设备编号 +* @param +* @param +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author yjd @date 2015/07/09 +************************************************************/ +DPP_STATUS diag_dpp_tm_int(ZXIC_UINT32 dev_id) +{ + DPP_STATUS rc = DPP_OK; + + ZXIC_COMM_PRINT("**************************************\n"); + ZXIC_COMM_PRINT("dpp tm cfgmt int state\n"); + ZXIC_COMM_PRINT("**************************************\n"); + rc = dpp_tm_cfgmt_int_state_get_diag(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cfgmt_int_state_get_diag"); + + ZXIC_COMM_PRINT("**************************************\n"); + ZXIC_COMM_PRINT("dpp tm olif int state\n"); + ZXIC_COMM_PRINT("**************************************\n"); + rc = dpp_tm_olif_int_state_get(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_olif_int_state_get"); + + ZXIC_COMM_PRINT("**************************************\n"); + ZXIC_COMM_PRINT("dpp tm cgavd int state\n"); + ZXIC_COMM_PRINT("**************************************\n"); + rc = dpp_tm_cgavd_int_state_get(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_cgavd_int_state_get"); + + ZXIC_COMM_PRINT("**************************************\n"); + ZXIC_COMM_PRINT("dpp tm tmmu int state\n"); + ZXIC_COMM_PRINT("**************************************\n"); + rc = dpp_tm_tmmu_int_state_get(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_tmmu_int_state_get"); + + ZXIC_COMM_PRINT("**************************************\n"); + ZXIC_COMM_PRINT("dpp tm shap int state\n"); + ZXIC_COMM_PRINT("**************************************\n"); + rc = dpp_tm_shape_int_state_get(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_shape_int_state_get"); + + ZXIC_COMM_PRINT("**************************************\n"); + ZXIC_COMM_PRINT("dpp tm crdt int state\n"); + ZXIC_COMM_PRINT("**************************************\n"); + rc = dpp_tm_crdt_int_state_get(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_crdt_int_state_get"); + + ZXIC_COMM_PRINT("**************************************\n"); + ZXIC_COMM_PRINT("dpp tm qmu int state\n"); + ZXIC_COMM_PRINT("**************************************\n"); + rc = dpp_tm_qmu_int_state_get(dev_id); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_int_state_get"); + + ZXIC_COMM_PRINT("**************************************\n"); + ZXIC_COMM_PRINT("dpp tm qmu int case_no\n"); + ZXIC_COMM_PRINT("**************************************\n"); + ZXIC_COMM_PRINT("g_qmu_init_case_no = 0x%x\n", g_qmu_init_case_no); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取tm.c中qmu_init_set中配置的case_num +* @param tm_type 0-ETM,1-FTM +* @param dev_id 设备编号 +* @param +* @param +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/04/15 +************************************************************/ +DPP_STATUS dpp_tm_case_no_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *case_no) +{ + ZXIC_COMM_CHECK_DEV_POINT(dev_id, case_no); + + *case_no = g_qmu_init_case_no; + + return DPP_OK; +} + +/***********************************************************/ +/** 配置tm授权价值总接口,包含qmu授权价值、shap授权价值、FTM对应授权价值版本设置 +* @param dev_id 设备编号 +* @param tm_type 0-ETM,1-FTM +* @param credit_value 授权价值,默认值是0x5feByte +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author xuhb @date 2020/08/13 +************************************************************/ +DPP_STATUS dpp_tm_credit_value_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 credit_value) +{ + DPP_STATUS rc = DPP_OK; + + rc = dpp_tm_qmu_credit_value_set(dev_id, credit_value); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_qmu_credit_value_set"); + + rc = dpp_tm_shap_crd_grain_set(dev_id, credit_value); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "dpp_tm_shap_crd_grain_set"); + + return DPP_OK; +} + +#endif +#endif + +#if ZXIC_REAL("TM_CPU_SOFT_RESET") +#if 0 +/***********************************************************/ +/** 设置TM的全局变量,shape_para只保存profile被使用的数量,整形相关参数从寄存器中重新读取 +* @param dev_id +* @param size data_buff的长度 +* @param p_data_buff 需要恢复的内容 +* +* @return +* @remark 无 +* @see +* @author XXX @date 2017/03/09 +************************************************************/ +DPP_STATUS dpp_tm_glb_mgr_set(ZXIC_UINT32 dev_id, ZXIC_UINT32 size, ZXIC_UINT8 *p_data_buff) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 shape_table_profile_num = 128; + static ZXIC_UINT32 etm_shape_num[22][128] = {{0}}; + ZXIC_UINT32 total_para_id = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + DPP_TM_SHAPE_PARA_TABLE shap_para_tbl_t = {0}; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 2); + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_data_buff); + + rc = zxic_comm_mutex_lock(&g_dpp_tm_global_var_rw_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "zxic_comm_mutex_lock"); + + if (size != (22 * 128 * sizeof(ZXIC_UINT32))) { + { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, "date is not complete.size[%d] err!!\n", size); + rc = zxic_comm_mutex_unlock(&g_dpp_tm_global_var_rw_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "zxic_comm_mutex_unlock"); + + return DPP_ERR; + } + + ZXIC_COMM_MEMCPY(etm_shape_num, p_data_buff, 22 * 128 * sizeof(ZXIC_UINT32)); + + /* ETM: set flow and se profile to cpu mem */ + for (i = 0; i < 22; i++) { + for (j = 0; j < shape_table_profile_num; j++) { + total_para_id = i * shape_table_profile_num + j; + + rc = dpp_tm_shape_para_get(dev_id, total_para_id, &shap_para_tbl_t); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT_UNLOCK(dev_id, rc, "dpp_tm_shape_para_get", &g_dpp_tm_global_var_rw_mutex); + + g_dpp_etm_shape_para_table[dev_id][i][j].shape_num = etm_shape_num[i][j]; + g_dpp_etm_shape_para_table[dev_id][i][j].shape_cbs = shap_para_tbl_t.shape_cbs; + g_dpp_etm_shape_para_table[dev_id][i][j].shape_cir = shap_para_tbl_t.shape_cir; + } + } + + rc = zxic_comm_mutex_unlock(&g_dpp_tm_global_var_rw_mutex); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, "zxic_comm_mutex_unlock"); + + return DPP_OK; +} + +/***********************************************************/ +/** 获取TM的全局变量,shape_para只保存profile被使用的数量,整形相关参数从寄存器中重新读取 +* @param dev_id +* @param p_flag 上层释放data_buff的标志,1:需要上层free,0:不需要上层free +* @param p_size data_buff的长度 +* @param pp_data_buff 二级指针(指向函数内部malloc空间的地址) +* +* @return +* @remark 无 +* @see +* @author XXX @date 2017/03/09 +************************************************************/ +DPP_STATUS dpp_tm_glb_mgr_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_flag, ZXIC_UINT32 *p_size, ZXIC_UINT8 **pp_data_buff) +{ + ZXIC_UINT32 size = 0; + static ZXIC_UINT32 etm_shape_num[22][128] = {{0}}; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 j = 0; + + ZXIC_COMM_CHECK_DEV_INDEX_NO_ASSERT(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 2); + + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_flag); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_size); + + size = 22 * 128 * sizeof(ZXIC_UINT32); + *pp_data_buff = ZXIC_COMM_MALLOC(size); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, *pp_data_buff); + + for (i = 0; i < 22; i++) { + for (j = 0; j < 128; j++) { + etm_shape_num[i][j] = g_dpp_etm_shape_para_table[dev_id][i][j].shape_num; + } + } + + ZXIC_COMM_MEMCPY(*pp_data_buff, etm_shape_num, 22 * 128 * sizeof(ZXIC_UINT32)); + + *p_flag = 1; + *p_size = size; + + return DPP_OK; +} + +/***********************************************************/ +/** 获取TM的全局变量大小,shape_para只保存profile被使用的数量,整形相关参数从寄存器中重新读取 +* @param dev_id +* @param p_size data_buff的长度 +* +* @return +* @remark 无 +* @see +* @author XXX @date 2017/03/09 +************************************************************/ +DPP_STATUS dpp_tm_glb_size_get(ZXIC_UINT32 dev_id, ZXIC_UINT32 *p_size) +{ + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_size); + + *p_size = 22 * 128 * sizeof(ZXIC_UINT32); + + return DPP_OK; +} + +#endif +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/reg/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/reg/Kbuild.include new file mode 100644 index 000000000000..08426d2c85a9 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/reg/Kbuild.include @@ -0,0 +1,2 @@ +cur_dir := en_np/sdk/source/dev/reg/ +src_files += $(addprefix $(cur_dir),$(notdir $(wildcard $(dinghai_root)/$(cur_dir)*.c))) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/reg/dpp_module.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/reg/dpp_module.c new file mode 100644 index 000000000000..51b544dfb745 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/reg/dpp_module.c @@ -0,0 +1,311 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_module.c +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : 石金锋 +* 完成日期 : 2014/02/10 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ +#include "zxic_common.h" +#include "dpp_type_api.h" +#include "dpp_module.h" +#include "dpp_dev.h" +#include "dpp_reg_struct.h" +#include "dpp_reg_api.h" + +/***********************************************************/ +/** dpp通用读寄存器函数 +* @param dev_id 设备id +* @param addr 地址读地址 +* @param p_data 返回数据 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author 石金锋 @date 2014/01/28 +************************************************************/ +DPP_STATUS dpp_read(DPP_DEV_T *dev, ZXIC_UINT32 addr, ZXIC_UINT32 *p_data) +{ + return dpp_dev_read_channel(dev, addr, 1, p_data); +} + +/***********************************************************/ +/** dpp通用写寄存器函数 +* @param dev_id 设备id +* @param addr 地址写地址 +* @param p_data 写入数据 +* +* @return 0表示成功 非0表示操作失败 +* @remark 无 +* @see +* @author 石金锋 @date 2014/01/28 +************************************************************/ +DPP_STATUS dpp_write(DPP_DEV_T *dev, ZXIC_UINT32 addr, ZXIC_UINT32 *p_data) +{ + return dpp_dev_write_channel(dev, addr, 1, p_data); +} + +#if ZXIC_REAL("SE") + +/***********************************************************/ +/** SE读接口 +* @param dev_id 设备号 +* @param addr 地址 +* @param p_data 数据 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/11/15 +************************************************************/ +DPP_STATUS dpp_se_read(DPP_DEV_T *dev, ZXIC_UINT32 addr, ZXIC_UINT32 *p_data) +{ + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + + return dpp_read(dev, addr, p_data); +} + +/***********************************************************/ +/** SE写接口 +* @param dev_id 设备号 +* @param addr 地址 +* @param p_data 数据 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/11/15 +************************************************************/ +DPP_STATUS dpp_se_write(DPP_DEV_T *dev, ZXIC_UINT32 addr, ZXIC_UINT32 *p_data) +{ + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + + return dpp_write(dev, addr, p_data); +} +#endif + +#if ZXIC_REAL("SE ALG") +/***********************************************************/ +/** se alg模块读寄存器函数 +* @param dev_id 设备id +* @param addr 地址 读地址 +* @param p_data 返回数据 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 王春雷 @date 2014/04/03 +************************************************************/ +DPP_STATUS dpp_se_alg_read(DPP_DEV_T *dev, ZXIC_UINT32 addr, + ZXIC_UINT32 *p_data) +{ + DPP_STATUS rtn = DPP_OK; + + ZXIC_UINT32 cpu_rd_rdy_addr = + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0048; + ZXIC_UINT32 ind_data0_addr = + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x004c; + ZXIC_UINT32 ind_cmd_addr = + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0004; + ZXIC_UINT32 cpu_rd_rdy_reg = 0; + ZXIC_UINT32 ind_data0_reg = 0; + ZXIC_UINT32 ind_cmd_reg = 0; + ZXIC_UINT32 cmd_data = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 cpu_rdy = 0; + ZXIC_UINT32 read_cnt = 0; + ZXIC_UINT32 recheck_flag = 20; + + ZXIC_MUTEX_T *p_alg_mutex = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + + ind_data0_reg = dpp_reg_addr_convert( + DEV_ID(dev), SE4K, DPP_REG_FLAG_DIRECT, ind_data0_addr); + ind_cmd_reg = dpp_reg_addr_convert(DEV_ID(dev), SE4K, + DPP_REG_FLAG_DIRECT, ind_cmd_addr); + cpu_rd_rdy_reg = dpp_reg_addr_convert( + DEV_ID(dev), SE4K, DPP_REG_FLAG_DIRECT, cpu_rd_rdy_addr); + + rtn = dpp_dev_opr_mutex_get(dev, DPP_DEV_MUTEX_T_REG, &p_alg_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, "dpp_dev_opr_mutex_get"); + + rtn = zxic_comm_mutex_lock(p_alg_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, "zxic_comm_mutex_lock"); + + /* dpp_module_get_se_alg_baseaddr(&base_addr); */ + + cmd_data = ((ZXIC_UINT32)0x1 << 31) | ((ZXIC_UINT32)0xF << 17) | addr; + rtn = dpp_dev_write_channel(dev, ind_cmd_reg, 1, &cmd_data); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK(DEV_ID(dev), rtn, "dpp_dev_write_channel", + p_alg_mutex); + + while (!(cpu_rdy & 0x1)) { + rtn = dpp_dev_read_channel(dev, cpu_rd_rdy_reg, 1, &cpu_rdy); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK( + DEV_ID(dev), rtn, "dpp_dev_read_channel", p_alg_mutex); + + read_cnt++; + /* zxic_comm_sleep(10); */ + + if (read_cnt > DPP_RD_CNT_MAX * DPP_RD_CNT_MAX) { + if (recheck_flag > 0) { + recheck_flag--; + read_cnt = 0; + rtn = dpp_dev_write_channel(dev, ind_cmd_reg, 1, + &cmd_data); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK( + DEV_ID(dev), rtn, + "dpp_dev_write_channel", p_alg_mutex); + } else { + ZXIC_COMM_PRINT( + "Error!!! dpp_se_alg_read get cpu_rd_rdone failed!!!\n"); + zxic_comm_mutex_unlock(p_alg_mutex); + /* ZXIC_COMM_ASSERT(0); */ /* xjw mod for OLT to not assert because of causing reboot at 18.8.7 */ + return DPP_ERR; + } + } + + /* ZXIC_COMM_CHECK_DEV_INDEX(dev_id, read_cnt, 0, DPP_RD_CNT_MAX); */ + } + + for (i = 0; i < 16; i++) { + rtn = dpp_dev_read_channel(dev, ind_data0_reg + 4 * i, 1, + p_data + 15 - i); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK( + DEV_ID(dev), rtn, "dpp_dev_read_channel", p_alg_mutex); + } + + rtn = zxic_comm_mutex_unlock(p_alg_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, "zxic_comm_mutex_unlock"); + + return DPP_OK; +} + +/***********************************************************/ +/** se alg模块写寄存器函数 +* @param dev_id 设备id +* @param addr 地址 读地址 +* @param p_data 返回数据 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 王春雷 @date 2014/04/03 +************************************************************/ +DPP_STATUS dpp_se_alg_write(DPP_DEV_T *dev, ZXIC_UINT32 addr, + ZXIC_UINT32 *p_data) +{ + DPP_STATUS rtn = DPP_OK; + + ZXIC_UINT32 ind_data0_addr = + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0008; + ZXIC_UINT32 cmd_data = 0; + ZXIC_UINT32 ind_cmd_addr = + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0004; + ZXIC_UINT32 ind_data0_reg = 0; + ZXIC_UINT32 ind_cmd_reg = 0; + ZXIC_UINT32 i = 0; + ZXIC_MUTEX_T *p_alg_mutex = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), DEV_ID(dev), 0, + DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + + ind_data0_reg = dpp_reg_addr_convert( + DEV_ID(dev), SE4K, DPP_REG_FLAG_DIRECT, ind_data0_addr); + ind_cmd_reg = dpp_reg_addr_convert(DEV_ID(dev), SE4K, + DPP_REG_FLAG_DIRECT, ind_cmd_addr); + + /* dpp_module_get_se_alg_baseaddr(&base_addr); */ + rtn = dpp_dev_opr_mutex_get(dev, DPP_DEV_MUTEX_T_REG, &p_alg_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, "dpp_dev_opr_mutex_get"); + + rtn = zxic_comm_mutex_lock(p_alg_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, "zxic_comm_mutex_lock"); + + /* write data */ + for (i = 0; i < 16; i++) { + ZXIC_COMM_TRACE_DEV_DEBUG( + DEV_ID(dev), + "dpp_se_alg_write: addr=0x%08x, data=0x%08x.\n", + ind_data0_reg + 4 * i, *(p_data + 15 - i)); + rtn = dpp_dev_write_channel(dev, ind_data0_reg + 4 * i, 1, + p_data + 15 - i); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK( + DEV_ID(dev), rtn, "dpp_dev_write_channel", p_alg_mutex); + } + + /* write cmd */ + /* cmd_data = ((ZXIC_UINT32)0xF << 17) | addr; */ + cmd_data = addr & 0x1fffff; /* mod by tf */ + ZXIC_COMM_TRACE_DEV_DEBUG( + DEV_ID(dev), "dpp_se_alg_write: addr=0x%08x, data=0x%08x.\n", + ind_cmd_reg, cmd_data); + rtn = dpp_dev_write_channel(dev, ind_cmd_reg, 1, &cmd_data); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK(DEV_ID(dev), rtn, "dpp_dev_write_channel", + p_alg_mutex); + + rtn = zxic_comm_mutex_unlock(p_alg_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rtn, "zxic_comm_mutex_unlock"); + + return DPP_OK; +} + +#endif + +#if ZXIC_REAL("PPU") +/***********************************************************/ +/** PPU读接口 +* @param dev_id 设备号 +* @param addr 地址 +* @param p_data 数据 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/11/15 +************************************************************/ +DPP_STATUS dpp_ppu_read(DPP_DEV_T *dev, ZXIC_UINT32 addr, ZXIC_UINT32 *p_data) +{ + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + + return dpp_read(dev, addr, p_data); +} + +/***********************************************************/ +/** PPU写接口 +* @param dev_id 设备号 +* @param addr 地址 +* @param p_data 数据 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author ls @date 2016/11/15 +************************************************************/ +DPP_STATUS dpp_ppu_write(DPP_DEV_T *dev, ZXIC_UINT32 addr, ZXIC_UINT32 *p_data) +{ + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_data); + + return dpp_write(dev, addr, p_data); +} +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/reg/dpp_pci.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/reg/dpp_pci.c new file mode 100644 index 000000000000..00bdb8f517f1 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/reg/dpp_pci.c @@ -0,0 +1,90 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_pci.c +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : 石金锋 +* 完成日期 : 2014/02/10 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: 代码规范性修改 +* 修改日期: 2014/02/10 +* 版 本 号: +* 修 改 人: 丁金凤 +* 修改内容: +***************************************************************/ +#include "zxic_common.h" +#include "dpp_type_api.h" +#include "dpp_pci.h" +#include "dpp_dev.h" + +/***********************************************************/ +/** +* @param abs_addr +* @param p_data +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/02/14 +************************************************************/ +ZXIC_UINT32 dpp_pci_write32(DPP_DEV_T *dev, ZXIC_ADDR_T abs_addr, + ZXIC_UINT32 *p_data) +{ + /* ZXIC_UINT32 rtn = 0; */ + ZXIC_UINT32 data = 0; + ZXIC_UINT64 addr = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_data); + + data = *p_data; + + if (zxic_comm_is_big_endian()) { + data = ZXIC_COMM_CONVERT32(data); + } + + addr = abs_addr + SYS_VF_NP_BASE_OFFSET; + *((ZXIC_VOL ZXIC_UINT32 *)addr) = data; + + return DPP_OK; +} + +/***********************************************************/ +/** +* @param abs_addr +* @param p_data +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/02/14 +************************************************************/ +ZXIC_UINT32 dpp_pci_read32(DPP_DEV_T *dev, ZXIC_ADDR_T abs_addr, + ZXIC_UINT32 *p_data) +{ + ZXIC_UINT32 data = 0; + ZXIC_UINT64 addr = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_POINT(p_data); + + addr = abs_addr + SYS_VF_NP_BASE_OFFSET; + data = *((ZXIC_VOL ZXIC_UINT32 *)addr); + + if (zxic_comm_is_big_endian()) { + data = ZXIC_COMM_CONVERT32(data); + } + *p_data = data; + + if (0xdadedade == *p_data) { + ZXIC_COMM_TRACE_DEBUG( + "PCIE time out err happening at addr[0x%llx]\n", + abs_addr); + } + + return DPP_OK; +} diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/reg/dpp_reg_api.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/reg/dpp_reg_api.c new file mode 100644 index 000000000000..72ac1bd32b85 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/reg/dpp_reg_api.c @@ -0,0 +1,571 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_reg.c +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : 王春雷 +* 完成日期 : 2014/02/12 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#include "zxic_common.h" +#include "dpp_type_api.h" +#include "dpp_module.h" +#include "dpp_dev.h" +#include "dpp_reg_api.h" +#include "dpp_reg_info.h" +#include "dpp_agent_channel.h" +#include "dpp_pci.h" + +#define REG_DATA_MAX (512 / 32) + +static DPP_REG_OFFSET_ADDR g_module_offset_addr[] = { + { DTB4K, BAR_4K_DTB, SYS_DTB_BASE_ADDR + MODULE_DTB_ENQ_BASE_ADDR }, + { STAT4K, BAR_4K_ETCAM, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR }, + { PPU4K, BAR_4K_CLS0, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x4000 }, + { SE4K, BAR_4K_SE, SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR }, + { SMMU14K, BAR_4K_SMMU1, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR } +}; + +/***********************************************************/ +/** 获取寄存器属性 +* @param reg_no 寄存器编号 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 王春雷 @date 2014/04/17 +************************************************************/ +DPP_REG_T *dpp_reg_info_get(ZXIC_UINT32 reg_no) +{ + ZXIC_COMM_CHECK_INDEX_RETURN_NULL(reg_no, 0, REG_ENUM_MAX_VALUE - 1); + + return &g_dpp_reg_info[reg_no]; +} + +/***********************************************************/ +/** 根据寄存器编号获得寄存器芯片内绝对地址 +* @param reg_no +* @param m_offset +* @param n_offset +* +* @return +* @remark 无 +* @see +* @author 王春雷 @date 2014/03/19 +************************************************************/ +ZXIC_UINT32 dpp_reg_get_reg_addr(ZXIC_UINT32 reg_no, ZXIC_UINT32 m_offset, + ZXIC_UINT32 n_offset) +{ + ZXIC_UINT32 addr = 0; + DPP_REG_T *p_reg_info = NULL; + + ZXIC_COMM_CHECK_INDEX(reg_no, 0, REG_ENUM_MAX_VALUE - 1); + + p_reg_info = dpp_reg_info_get(reg_no); + ZXIC_COMM_CHECK_POINT(p_reg_info); + + /* 计算写地址 */ + addr = p_reg_info->addr; + + if (p_reg_info->array_type & DPP_REG_UNI_ARRAY) { + if (n_offset > (p_reg_info->n_size - 1)) + ZXIC_COMM_TRACE_ERROR( + "reg n_offset is out of range, reg_no:%d, n:%d, size:%d\n", + reg_no, n_offset, p_reg_info->n_size - 1); + + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT( + addr, n_offset * p_reg_info->n_step); + addr += n_offset * p_reg_info->n_step; + } else if (p_reg_info->array_type & DPP_REG_BIN_ARRAY) { + if ((n_offset > (p_reg_info->n_size - 1)) || + (m_offset > (p_reg_info->m_size - 1))) + ZXIC_COMM_TRACE_ERROR( + "reg n_offset or m_offset is out of range, reg_no:%d, n:%d, n_size:%d, m:%d, m_size:%d,\n", + reg_no, n_offset, p_reg_info->n_size - 1, + m_offset, p_reg_info->m_size - 1); + + ZXIC_COMM_CHECK_INDEX_MUL_OVERFLOW_NO_ASSERT( + m_offset, p_reg_info->m_step); + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT( + (m_offset * (p_reg_info->m_step)), + (n_offset * (p_reg_info->n_step))); + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT( + addr, (m_offset * (p_reg_info->m_step)) + + (n_offset * (p_reg_info->n_step))); + addr += (m_offset * (p_reg_info->m_step)) + + (n_offset * (p_reg_info->n_step)); + } + + return addr; +} + +/***********************************************************/ +/** 判断是否为4K寄存器 +* @param reg_module +* +* @return +* @remark 无 +* @see +* @author cq @date 2023/11/29 +************************************************************/ +BOOLEAN dpp_4k_reg(ZXIC_UINT32 reg_module) +{ + if ((DTB4K <= reg_module) && (SMMU14K >= reg_module)) { + return ZXIC_TRUE; + } + + return ZXIC_FALSE; +} + +/***********************************************************/ +/** 获取NP对应模块的映射地址偏移(riscv或者非4K寄存器不做转换,host根据映射情况做转换) +* @param dev_id +* @param reg_module +* @param flags 标志位,DPP_REG_FLAG_INDIRECT DPP_REG_FLAG_DIRECT +* @param addr +* +* @return 映射地址 +* @remark 无 +* @see +* @author cq @date 2023/11/29 +************************************************************/ +ZXIC_UINT32 dpp_reg_addr_convert(ZXIC_UINT32 dev_id, ZXIC_UINT32 reg_module, + ZXIC_UINT32 flags, ZXIC_UINT32 addr) +{ + ZXIC_UINT32 convert_addr = addr; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 cluster_index = 0; + ZXIC_UINT32 index_4k = 0; + ZXIC_UINT32 size_4k = 4096; + ZXIC_UINT32 module_addr_offset = 0; + ZXIC_UINT32 dtb_addr_offset = + SYS_DTB_BASE_ADDR + MODULE_DTB_ENQ_BASE_ADDR; + + if (DPP_REG_FLAG_INDIRECT == flags) { + return addr; + } + + for (i = 0; + i < (sizeof(g_module_offset_addr) / sizeof(DPP_REG_OFFSET_ADDR)); + i++) { + if (reg_module == g_module_offset_addr[i].reg_module) { + module_addr_offset = + g_module_offset_addr[i].addr_offset; + if (PPU4K == reg_module) { + cluster_index = (addr - module_addr_offset) / + DPP_PPU_CLUSTER_SPACE_SIZE; + index_4k = g_module_offset_addr[i].index_4k + + cluster_index; + module_addr_offset += + cluster_index * + DPP_PPU_CLUSTER_SPACE_SIZE; + } else { + index_4k = g_module_offset_addr[i].index_4k; + } + convert_addr = + ((addr + (size_4k * index_4k) + + dtb_addr_offset) > module_addr_offset) ? + (addr + (size_4k * index_4k) + + dtb_addr_offset - module_addr_offset) : + addr; + } + } + + return convert_addr; +} + +/***********************************************************/ +/** 通用寄存器写函数 +* @param dev_id 设备号,支持多芯片 +* @param reg_no 寄存器编号 +* @param m_offset 二元寄存器的m偏移 +* @param n_offset 一元寄存器或二元寄存器的n偏移 +* @param p_data 数据指针 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 王春雷 @date 2014/02/12 +************************************************************/ +DPP_STATUS dpp_reg_write(DPP_DEV_T *dev, ZXIC_UINT32 reg_no, + ZXIC_UINT32 m_offset, ZXIC_UINT32 n_offset, + ZXIC_VOID *p_data) +{ + DPP_STATUS rc = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 addr = 0; + +#ifdef DPP_FLOW_HW_INIT + ZXIC_UINT32 convert_addr = 0; +#endif + + ZXIC_UINT32 p_buff[REG_DATA_MAX] = { 0 }; + ZXIC_UINT32 temp_data = 0; + ZXIC_UINT32 reg_type = 0; + ZXIC_UINT32 reg_module = 0; + ZXIC_UINT32 reg_width = 0; + DPP_REG_T *p_reg_info = NULL; + DPP_FIELD_T *p_field_info = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), reg_no, 0, + REG_ENUM_MAX_VALUE - 1); + ZXIC_COMM_CHECK_POINT(p_data); + + p_reg_info = dpp_reg_info_get(reg_no); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_reg_info); + p_field_info = p_reg_info->p_fields; + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_field_info); + reg_type = p_reg_info->flags; + reg_module = p_reg_info->module_no; + reg_width = p_reg_info->width; + ZXIC_COMM_CHECK_INDEX_UPPER(reg_width, REG_DATA_MAX * 4); + +#ifndef ZXIC_OS_WIN +#ifdef DPP_FOR_LLT + if (dpp_stump_reg_en_check(DEV_ID(dev), reg_no) && + (p_reg_info->flags == DPP_REG_FLAG_DIRECT || + p_reg_info->flags == DPP_REG_FLAG_WO | DPP_REG_FLAG_DIRECT)) { + rc = dpp_stump_reg_write(DEV_ID(dev), reg_no, m_offset, + n_offset, p_data); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_stump_reg_write"); + + return DPP_OK; + } +#endif +#endif + + /* 提取各字段数据,按各字段实际bit位宽进行拼装 */ + for (i = 0; i < p_reg_info->field_num; i++) { + if (p_field_info[i].len <= 32) { + /* lint -e64 */ + temp_data = *((ZXIC_UINT32 *)p_data + i) & + ZXIC_COMM_GET_BIT_MASK(ZXIC_UINT32, + p_field_info[i].len); + rc = zxic_comm_write_bits_ex((ZXIC_UINT8 *)p_buff, + p_reg_info->width * 8, + temp_data, + p_field_info[i].msb_pos, + p_field_info[i].len); + /* lint +e64 */ + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, + "zxic_comm_write_bits_ex"); + } + } + + ZXIC_COMM_TRACE_DEV_DEBUG(DEV_ID(dev), + "zxic_comm_write_bits_ex data = 0x%08x.\n", + p_buff[0]); + /* 若host cpu为小端字节序,则以4字节为单位对数据进行字节序转换 */ + if (!zxic_comm_is_big_endian()) { + for (i = 0; i < ((p_reg_info->width) / 4); i++) { + p_buff[i] = ZXIC_COMM_CONVERT32(p_buff[i]); + + /* for debug */ + ZXIC_COMM_TRACE_DEV_DEBUG( + DEV_ID(dev), + "ZXIC_COMM_CONVERT32 data = 0x%08x.\n", + p_buff[i]); + } + } + + /* 计算写地址 */ + addr = dpp_reg_get_reg_addr( + reg_no, m_offset, + n_offset); /* 通过寄存器编号等三个参数获得寄存器的基地址 */ + + ZXIC_COMM_TRACE_DEV_DEBUG(DEV_ID(dev), + "reg_no = %d. m_offset = %d n_offset = %d\n", + reg_no, m_offset, n_offset); + ZXIC_COMM_TRACE_DEV_DEBUG(DEV_ID(dev), "baseaddr = 0x%08x.\n", addr); + +#ifdef DPP_FLOW_HW_INIT + if (dpp_4k_reg(reg_module)) { + /* 调用寄存器写接口 */ + convert_addr = dpp_reg_addr_convert(DEV_ID(dev), reg_module, + reg_type, addr); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_reg_info->p_write_fun); + rc = p_reg_info->p_write_fun(dev, convert_addr, p_buff); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "p_reg_info->p_write_fun"); + } +#else + if (DTB4K == reg_module) { + /* 调用寄存器写接口 */ + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_reg_info->p_write_fun); + rc = p_reg_info->p_write_fun(dev, addr, p_buff); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "p_reg_info->p_write_fun"); + } +#endif + else { + /* 调用代理通道寄存器写接口 */ + rc = dpp_agent_channel_reg_write(dev, reg_type, reg_no, + reg_width, addr, p_buff); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_agent_channel_reg_write"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 通用寄存器读函数 +* @param dev_id 设备号,支持多芯片 +* @param reg_no 寄存器编号 +* @param m_offset 二元寄存器的m偏移 +* @param n_offset 一元寄存器或二元寄存器的n偏移 +* @param p_data 数据指针 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 王春雷 @date 2014/02/12 +************************************************************/ +DPP_STATUS dpp_reg_read(DPP_DEV_T *dev, ZXIC_UINT32 reg_no, + ZXIC_UINT32 m_offset, ZXIC_UINT32 n_offset, + ZXIC_VOID *p_data) +{ + DPP_STATUS rc = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 addr = 0; +#ifdef DPP_FLOW_HW_INIT + ZXIC_UINT32 convert_addr = 0; +#endif + ZXIC_UINT32 reg_type = 0; + ZXIC_UINT32 p_buff[REG_DATA_MAX] = { 0 }; + ZXIC_UINT32 reg_module = 0; + ZXIC_UINT32 reg_width = 0; + DPP_REG_T *p_reg_info = NULL; + DPP_FIELD_T *p_field_info = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), reg_no, 0, + REG_ENUM_MAX_VALUE - 1); + ZXIC_COMM_CHECK_POINT(p_data); + + p_reg_info = dpp_reg_info_get(reg_no); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_reg_info); + p_field_info = p_reg_info->p_fields; + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_field_info); + reg_type = p_reg_info->flags; + reg_module = p_reg_info->module_no; + reg_width = p_reg_info->width; + ZXIC_COMM_CHECK_INDEX_UPPER(reg_width, REG_DATA_MAX * 4); + +#ifndef ZXIC_OS_WIN +#ifdef DPP_FOR_LLT + if (dpp_stump_reg_en_check(DEV_ID(dev), reg_no) && + (p_reg_info->flags == DPP_REG_FLAG_DIRECT || + p_reg_info->flags == DPP_REG_FLAG_WO | DPP_REG_FLAG_DIRECT)) { + rc = dpp_stump_reg_read(DEV_ID(dev), reg_no, m_offset, n_offset, + p_data); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_stump_reg_read"); + + return DPP_OK; + } +#endif +#endif + + /* 计算读地址 */ + addr = dpp_reg_get_reg_addr(reg_no, m_offset, n_offset); +#ifdef DPP_FLOW_HW_INIT + if (dpp_4k_reg(reg_module)) { + /* 调用寄存器读接口 */ + convert_addr = dpp_reg_addr_convert(DEV_ID(dev), reg_module, + reg_type, addr); + ZXIC_COMM_CHECK_POINT(p_reg_info->p_read_fun); + rc = p_reg_info->p_read_fun(dev, convert_addr, p_buff); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "p_reg_info->p_read_fun"); + } +#else + if (DTB4K == reg_module) { + /* 调用寄存器读接口 */ + ZXIC_COMM_CHECK_POINT(p_reg_info->p_read_fun); + rc = p_reg_info->p_read_fun(dev, addr, p_buff); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "p_reg_info->p_read_fun"); + } +#endif + else { + /* 调用代理通道寄存器读接口 */ + rc = dpp_agent_channel_reg_read(dev, reg_type, reg_no, + reg_width, addr, p_buff); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_agent_channel_reg_read"); + } + + /* 若host cpu为小端字节序,则以4字节为单位对数据进行字节序转换 */ + if (!zxic_comm_is_big_endian()) { + for (i = 0; i < ((p_reg_info->width) / 4); i++) { + /* for debug */ + + //printf("dpp_reg_read data = 0x%08x.\n", p_buff[i]); + ZXIC_COMM_TRACE_DEV_DEBUG( + DEV_ID(dev), "dpp_reg_read data = 0x%08x.\n", + p_buff[i]); + + p_buff[i] = ZXIC_COMM_CONVERT32(p_buff[i]); + } + } + + /* 提取各字段数据,每字段以ZXIC_UINT32形式返回 */ + for (i = 0; i < p_reg_info->field_num; i++) { + /* lint -e64 */ + rc = zxic_comm_read_bits_ex((ZXIC_UINT8 *)p_buff, + p_reg_info->width * 8, + (ZXIC_UINT32 *)p_data + i, + p_field_info[i].msb_pos, + p_field_info[i].len); + ZXIC_COMM_CHECK_RC_NO_ASSERT(rc, "zxic_comm_read_bits_ex"); + /* lint +e64 */ + } + + return DPP_OK; +} + +/***********************************************************/ +/** 通过寄存器编号配置寄存器,仅适用于32bit位宽 + 的常规寄存器 +* @param dev_id 设备号 +* @param reg_no 寄存器编号 +* @param data 数据,32bit +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author XXX @date 2019/07/10 +************************************************************/ +DPP_STATUS dpp_reg_write32(DPP_DEV_T *dev, ZXIC_UINT32 reg_no, ZXIC_UINT32 data) +{ + DPP_STATUS rc = 0; + ZXIC_UINT32 addr = 0; + DPP_REG_T *p_reg_info = NULL; + ZXIC_UINT32 value = data; + ZXIC_UINT32 j = 0; + ZXIC_UINT32 k = 0; + ZXIC_UINT32 m_size = 0; + ZXIC_UINT32 n_size = 0; + ZXIC_UINT32 reg_type = 0; + // ZXIC_UINT32 reg_module = 0; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), reg_no, 0, + REG_ENUM_MAX_VALUE - 1); + + p_reg_info = dpp_reg_info_get(reg_no); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_reg_info); + reg_type = p_reg_info->flags; + // reg_module = p_reg_info->module_no; + + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_reg_info->width, 4, + 4); /* width must be 32bit */ + + m_size = (p_reg_info->m_size == 0) ? (1) : (p_reg_info->m_size); + n_size = (p_reg_info->n_size == 0) ? (1) : (p_reg_info->n_size); + + /* 计算读地址 */ + for (j = 0; j < m_size; j++) { + for (k = 0; k < n_size; k++) { +#ifndef ZXIC_OS_WIN +#ifdef DPP_FOR_LLT + if (dpp_stump_reg_en_check(DEV_ID(dev), reg_no) && + (p_reg_info->flags == DPP_REG_FLAG_DIRECT)) { + rc = dpp_stump_reg_write(DEV_ID(dev), reg_no, j, + k, &data); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_stump_reg_write"); + + return DPP_OK; + } +#endif +#endif + /* 计算写地址 */ + addr = dpp_reg_get_reg_addr(reg_no, j, k); + + /* 调用代理通道寄存器写接口 */ + rc = dpp_agent_channel_reg_write(dev, reg_type, reg_no, + 4, addr, &value); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, + "dpp_agent_channel_reg_write"); + } + } + + return DPP_OK; +} + +/***********************************************************/ +/** 通过寄存器编号读取寄存器的值,仅适用于32bit位宽的常规寄存器 +* @param dev_id 设备号 +* @param reg_no 寄存器编号 +* @param m_offset 二元寄存器的m偏移 +* @param n_offset 一元寄存器或二元寄存器的n偏移 +* @param p_data 出参,返回读取寄存器数值 +* +* @return DPP_OK-成功,DPP_ERR-失败 +* @remark 无 +* @see +* @author 石金锋 @date 2015/03/09 +************************************************************/ +DPP_STATUS dpp_reg_read32(DPP_DEV_T *dev, ZXIC_UINT32 reg_no, + ZXIC_UINT32 m_offset, ZXIC_UINT32 n_offset, + ZXIC_UINT32 *p_data) +{ + DPP_STATUS rc = 0; + ZXIC_UINT32 addr = 0; + ZXIC_UINT32 reg_type = 0; + // ZXIC_UINT32 reg_module = 0; + ZXIC_UINT32 p_buff[REG_DATA_MAX] = { 0 }; + + DPP_REG_T *p_reg_info = NULL; + + ZXIC_COMM_CHECK_POINT(dev); + ZXIC_COMM_CHECK_INDEX_UPPER(DEV_ID(dev), DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), reg_no, 0, + REG_ENUM_MAX_VALUE - 1); + ZXIC_COMM_CHECK_POINT(p_data); + + p_reg_info = dpp_reg_info_get(reg_no); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(dev), p_reg_info); + reg_type = p_reg_info->flags; + // reg_module = p_reg_info->module_no; + + ZXIC_COMM_CHECK_DEV_INDEX(DEV_ID(dev), p_reg_info->width, 4, + 4); /* width must be 32bit */ + +#ifndef ZXIC_OS_WIN +#ifdef DPP_FOR_LLT + if (dpp_stump_reg_en_check(DEV_ID(dev), reg_no) && + (p_reg_info->flags == DPP_REG_FLAG_DIRECT)) { + rc = dpp_stump_reg_read(DEV_ID(dev), reg_no, m_offset, n_offset, + p_data); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_stump_reg_read"); + + return DPP_OK; + } +#endif +#endif + + /* 计算读地址 */ + addr = dpp_reg_get_reg_addr(reg_no, m_offset, n_offset); + + /* 调用代理通道寄存器读接口 */ + rc = dpp_agent_channel_reg_read(dev, reg_type, reg_no, 4, addr, p_buff); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(dev), rc, "dpp_agent_channel_reg_read"); + *p_data = p_buff[0]; + + return DPP_OK; +} diff --git a/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/reg/dpp_reg_info.c b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/reg/dpp_reg_info.c new file mode 100644 index 000000000000..7fb416d8918d --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/sdk/source/dev/reg/dpp_reg_info.c @@ -0,0 +1,98226 @@ +#include "dpp_module.h" +#include "dpp_reg_struct.h" +#include "dpp_reg_info.h" + +DPP_FIELD_T g_etm_cfgmt_cpu_check_reg_reg[] = { + { "cpu_check_reg", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_etm_cfgmt_cfgmt_blksize_reg[] = { + { "cfgmt_blksize", DPP_FIELD_FLAG_RW, 1, 2, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_cfgmt_reg_int_state_reg_reg[] = { + { "shap_int", DPP_FIELD_FLAG_RO, 6, 1, 0x0, 0x0 }, + { "crdt_int", DPP_FIELD_FLAG_RO, 5, 1, 0x0, 0x0 }, + { "mmu_int", DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "qmu_int", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "cgavd_int", DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "olif_int", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "cfgmt_int_buf", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cfgmt_reg_int_mask_reg_reg[] = { + { "shap_int_mask", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "crdt_int_mask", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "tmmu_int_mask", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "qmu_int_mask", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "cgavd_int_mask", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "olif_int_mask", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "cfgmt_int_buf_mask", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_cfgmt_timeout_limit_reg[] = { + { "timeout_limit", DPP_FIELD_FLAG_RW, 15, 16, 0xfff, 0x0 }, +}; +DPP_FIELD_T g_etm_cfgmt_subsystem_rdy_reg_reg[] = { + { "olif_rdy", DPP_FIELD_FLAG_RO, 5, 1, 0x0, 0x0 }, + { "qmu_rdy", DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "cgavd_rdy", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "tmmu_rdy", DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "shap_rdy", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "crdt_rdy", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cfgmt_subsystem_en_reg_reg[] = { + { "subsystem_en_buf_31_28", DPP_FIELD_FLAG_RW, 31, 4, 0x0, 0x0 }, + { "subsystem_en_buf_25_0", DPP_FIELD_FLAG_RW, 25, 26, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cfgmt_cfgmt_int_reg_reg[] = { + { "cfgmt_int_buf", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cfgmt_qmu_work_mode_reg[] = { + { "qmu_work_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cfgmt_cfgmt_ddr_attach_reg[] = { + { "cfgmt_ddr_attach", DPP_FIELD_FLAG_RW, 9, 10, 0xf, 0x0 }, +}; +DPP_FIELD_T g_etm_cfgmt_cnt_mode_reg_reg[] = { + { "cfgmt_fc_count_mode", DPP_FIELD_FLAG_RW, 2, 1, 0x0, 0x0 }, + { "cfgmt_count_rd_mode", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "cfgmt_count_overflow_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_cfgmt_clkgate_en_reg[] = { + { "clkgate_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cfgmt_softrst_en_reg[] = { + { "softrst_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_imem_prog_full_reg[] = { + { "imem_prog_full_assert", DPP_FIELD_FLAG_RW, 24, 9, 0x9c, 0x0 }, + { "imem_prog_full_negate", DPP_FIELD_FLAG_RW, 8, 9, 0x94, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_qmu_para_prog_full_reg[] = { + { "qmu_para_prog_full_assert", DPP_FIELD_FLAG_RW, 26, 11, 0x370, 0x0 }, + { "qmu_para_prog_full_negate", DPP_FIELD_FLAG_RW, 10, 11, 0x370, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_olif_int_mask_reg[] = { + { "emem_dat_sop_err_mask", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "emem_dat_eop_err_mask", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "imem_dat_sop_err_mask", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "imem_dat_eop_err_mask", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "crcram_parity_err_mask", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "emem_fifo_ecc_mask", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "imem_fifo_ecc_mask", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "emem_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "emem_fifo_udf_mask", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "imem_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "imem_fifo_udf_mask", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "para_fifo_ecc_mask", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "para_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "para_fifo_udf_mask", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "itmh_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "itmh_ecc_double_err_mask", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "order_fifo_parity_err_mask", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "order_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "order_fifo_udf_mask", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_itmhram_parity_err_2_int_reg[] = { + { "emem_dat_sop_err", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "emem_dat_eop_err", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "imem_dat_sop_err", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "imem_dat_eop_err", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "crcram_parity_err_1_int", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "emem_fifo_ecc_single_err_int", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "emem_fifo_ecc_double_err_int", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "imem_fifo_ecc_single_err_int", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "imem_fifo_ecc_double_err_int", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "emem_fifo_ovf_int", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "emem_fifo_udf_int", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "imem_fifo_ovf_int", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "imem_fifo_udf_int", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "para_fifo_ecc_single_err_int", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "para_fifo_ecc_double_err_int", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "para_fifo_ovf_int", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "para_fifo_udf_int", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "itmh_ecc_single_err_int", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "itmh_ecc_double_err_int", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "order_fifo_parity_err_int", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "order_fifo_ovf_int", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "order_fifo_udf_int", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_lif0_port_rdy_mask_h_reg[] = { + { "lif0_port_rdy_mask_h", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_lif0_port_rdy_mask_l_reg[] = { + { "lif0_port_rdy_mask_l", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_lif0_port_rdy_cfg_h_reg[] = { + { "lif0_port_rdy_cfg_h", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_lif0_port_rdy_cfg_l_reg[] = { + { "lif0_port_rdy_cfg_l", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_lif0_link_rdy_mask_cfg_reg[] = { + { "lif0_link_rdy_mask", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "lif0_link_rdy_cfg", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_tm_lif_stat_cfg_reg[] = { + { "all_or_by_port", DPP_FIELD_FLAG_RW, 24, 1, 0x0, 0x0 }, + { "i_or_e_sel", DPP_FIELD_FLAG_RW, 17, 2, 0x0, 0x0 }, + { "port_or_dest_id_sel", DPP_FIELD_FLAG_RW, 8, 1, 0x0, 0x0 }, + { "port_dest_id", DPP_FIELD_FLAG_RW, 7, 8, 0x0, 0x1 }, +}; +DPP_FIELD_T g_etm_olif_tm_lif_sop_stat_reg[] = { + { "tm_lif_sop_stat", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_tm_lif_eop_stat_reg[] = { + { "tm_lif_eop_stat", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_tm_lif_vld_stat_reg[] = { + { "tm_lif_vld_stat", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_prog_full_assert_cfg_reg[] = { + { "prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 31, 16, 0xdc, 0x0 }, + { "prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 15, 16, 0xc8, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_int_reg[] = { + { "cgavd_int", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_ram_err_reg[] = { + { "flow_qnum_intb", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "flow_qnum_inta", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "pp_qlen_inta", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "pp_qlen_intb", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "pp_tdth_int", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "flow_tdth_inta", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "flow_tdth_intb", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "flow_qlen_inta", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "flow_qlen_intb", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "qmu_cgavd_fifo_uv_int", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "qmu_cgavd_fifo_ov_int", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "pds_deal_fifo_ov_int", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "pds_deal_fifo_uv_int", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_int_mask_reg[] = { + { "cgavd_int_mask", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_ram_err_int_mask_reg[] = { + { "flow_qnum_inta_mask", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "flow_qnum_intb_mask", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "pp_qlen_inta_mask", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "pp_qlen_intb_mask", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "pp_tdth_int_mask", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "flow_tdth_inta_mask", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "flow_tdth_intb_mask", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "flow_qlen_inta_mask", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "flow_qlen_intb_mask", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "qmu_cgavd_fifo_uv_int_mask", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "qmu_cgavd_fifo_ov_int_mask", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "pds_deal_fifo_ov_int_mask", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "pds_deal_fifo_uv_int_mask", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cfgmt_byte_mode_reg[] = { + { "cfgmt_byte_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_avg_qlen_return_zero_en_reg[] = { + { "avg_qlen_return_zero_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow_wred_q_len_th_reg[] = { + { "flow_wred_q_len_th", DPP_FIELD_FLAG_RW, 28, 29, 0x400, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow_wq_reg[] = { + { "wq_flow", DPP_FIELD_FLAG_RW, 3, 4, 0x9, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow_wred_max_th_reg[] = { + { "flow_wred_max_th", DPP_FIELD_FLAG_RW, 28, 29, 0x1000, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow_wred_min_th_reg[] = { + { "flow_wred_min_th", DPP_FIELD_FLAG_RW, 28, 29, 0x10, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow_wred_cfg_para_reg[] = { + { "flow_wred_cfg_para", DPP_FIELD_FLAG_RW, 31, 32, 0x13ec, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_pp_avg_q_len_reg[] = { + { "pp_avg_q_len", DPP_FIELD_FLAG_RW, 28, 29, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_pp_td_th_reg[] = { + { "pp_td_th", DPP_FIELD_FLAG_RW, 28, 29, 0x400, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_pp_ca_mtd_reg[] = { + { "pp_ca_mtd", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_pp_wred_grp_th_en_reg[] = { + { "pp_wred_grp", DPP_FIELD_FLAG_RW, 3, 3, 0x0, 0x0 }, + { "pp_wred_grp_th_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_pp_wred_q_len_th_reg[] = { + { "pp_wred_q_len_th", DPP_FIELD_FLAG_RW, 28, 29, 0x100000, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_pp_wq_reg[] = { + { "wq_pp", DPP_FIELD_FLAG_RW, 3, 4, 0x9, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_pp_wred_max_th_reg[] = { + { "pp_wred_max_th", DPP_FIELD_FLAG_RW, 28, 29, 0x80000, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_pp_wred_min_th_reg[] = { + { "pp_wred_min_th", DPP_FIELD_FLAG_RW, 28, 29, 0x800, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_pp_cfg_para_reg[] = { + { "pp_cfg_para", DPP_FIELD_FLAG_RW, 31, 32, 0x9f600, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_sys_avg_q_len_reg[] = { + { "sys_avg_q_len", DPP_FIELD_FLAG_RW, 28, 29, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_sys_td_th_reg[] = { + { "sys_td_th", DPP_FIELD_FLAG_RW, 28, 29, 0x1ff00, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_sys_cgavd_metd_reg[] = { + { "sys_cgavd_metd", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_sys_cfg_q_grp_para_reg[] = { + { "gred_q_len_th_sys", DPP_FIELD_FLAG_RW, 28, 29, 0x1fffe00, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_sys_wq_reg[] = { + { "wq_sys", DPP_FIELD_FLAG_RW, 3, 4, 0x9, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_gred_max_th_reg[] = { + { "gred_max_th", DPP_FIELD_FLAG_RW, 28, 29, 0x1000000, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_gred_mid_th_reg[] = { + { "gred_mid_th", DPP_FIELD_FLAG_RW, 28, 29, 0x600000, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_gred_min_th_reg[] = { + { "gred_min_th", DPP_FIELD_FLAG_RW, 28, 29, 0x80000, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_gred_cfg_para0_reg[] = { + { "gred_cfg_para0", DPP_FIELD_FLAG_RW, 31, 32, 0x92aaaa, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_gred_cfg_para1_reg[] = { + { "gred_cfg_para1", DPP_FIELD_FLAG_RW, 31, 32, 0x1900000, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_gred_cfg_para2_reg[] = { + { "gred_cfg_para2", DPP_FIELD_FLAG_RW, 31, 32, 0xf00000, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_sys_window_th_h_reg[] = { + { "sys_window_th_h", DPP_FIELD_FLAG_RW, 28, 29, 0x1d00000, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_sys_window_th_l_reg[] = { + { "sys_window_th_l", DPP_FIELD_FLAG_RW, 28, 29, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_amplify_gene0_reg[] = { + { "amplify_gene0", DPP_FIELD_FLAG_RW, 11, 12, 0x200, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_amplify_gene1_reg[] = { + { "amplify_gene1", DPP_FIELD_FLAG_RW, 11, 12, 0x82, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_amplify_gene2_reg[] = { + { "amplify_gene2", DPP_FIELD_FLAG_RW, 11, 12, 0x80, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_amplify_gene3_reg[] = { + { "amplify_gene3", DPP_FIELD_FLAG_RW, 11, 12, 0x7e, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_amplify_gene4_reg[] = { + { "amplify_gene4", DPP_FIELD_FLAG_RW, 11, 12, 0x7c, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_amplify_gene5_reg[] = { + { "amplify_gene5", DPP_FIELD_FLAG_RW, 11, 12, 0x78, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_amplify_gene6_reg[] = { + { "amplify_gene6", DPP_FIELD_FLAG_RW, 11, 12, 0x70, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_amplify_gene7_reg[] = { + { "amplify_gene7", DPP_FIELD_FLAG_RW, 11, 12, 0x68, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_amplify_gene8_reg[] = { + { "amplify_gene8", DPP_FIELD_FLAG_RW, 11, 12, 0x60, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_amplify_gene9_reg[] = { + { "amplify_gene9", DPP_FIELD_FLAG_RW, 11, 12, 0x58, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_amplify_gene10_reg[] = { + { "amplify_gene10", DPP_FIELD_FLAG_RW, 11, 12, 0x50, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_amplify_gene11_reg[] = { + { "amplify_gene11", DPP_FIELD_FLAG_RW, 11, 12, 0x40, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_amplify_gene12_reg[] = { + { "amplify_gene12", DPP_FIELD_FLAG_RW, 11, 12, 0x20, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_amplify_gene13_reg[] = { + { "amplify_gene13", DPP_FIELD_FLAG_RW, 11, 12, 0x8, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_amplify_gene14_reg[] = { + { "amplify_gene14", DPP_FIELD_FLAG_RW, 11, 12, 0x2, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_amplify_gene15_reg[] = { + { "amplify_gene15", DPP_FIELD_FLAG_RW, 11, 12, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_equal_pkt_len_en_reg[] = { + { "equal_pkt_len_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_equal_pkt_len_th0_reg[] = { + { "equal_pkt_len_th0", DPP_FIELD_FLAG_RW, 14, 15, 0x4, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_equal_pkt_len_th1_reg[] = { + { "equal_pkt_len_th1", DPP_FIELD_FLAG_RW, 14, 15, 0x8, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_equal_pkt_len_th2_reg[] = { + { "equal_pkt_len_th2", DPP_FIELD_FLAG_RW, 14, 15, 0x10, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_equal_pkt_len_th3_reg[] = { + { "equal_pkt_len_th3", DPP_FIELD_FLAG_RW, 14, 15, 0x18, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_equal_pkt_len_th4_reg[] = { + { "equal_pkt_len_th4", DPP_FIELD_FLAG_RW, 14, 15, 0x20, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_equal_pkt_len_th5_reg[] = { + { "equal_pkt_len_th5", DPP_FIELD_FLAG_RW, 14, 15, 0x40, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_equal_pkt_len_th6_reg[] = { + { "equal_pkt_len_th6", DPP_FIELD_FLAG_RW, 14, 15, 0x60, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_equal_pkt_len0_reg[] = { + { "equal_pkt_len0", DPP_FIELD_FLAG_RW, 14, 15, 0x20, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_equal_pkt_len1_reg[] = { + { "equal_pkt_len1", DPP_FIELD_FLAG_RW, 14, 15, 0x30, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_equal_pkt_len2_reg[] = { + { "equal_pkt_len2", DPP_FIELD_FLAG_RW, 14, 15, 0x40, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_equal_pkt_len3_reg[] = { + { "equal_pkt_len3", DPP_FIELD_FLAG_RW, 14, 15, 0x50, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_equal_pkt_len4_reg[] = { + { "equal_pkt_len4", DPP_FIELD_FLAG_RW, 14, 15, 0x60, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_equal_pkt_len5_reg[] = { + { "equal_pkt_len5", DPP_FIELD_FLAG_RW, 14, 15, 0x70, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_equal_pkt_len6_reg[] = { + { "equal_pkt_len6", DPP_FIELD_FLAG_RW, 14, 15, 0x78, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_equal_pkt_len7_reg[] = { + { "equal_pkt_len7", DPP_FIELD_FLAG_RW, 14, 15, 0x80, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow_cpu_set_avg_len_reg[] = { + { "flow_cpu_set_avg_len", DPP_FIELD_FLAG_RW, 28, 29, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow_cpu_set_q_len_reg[] = { + { "flow_cpu_set_q_len", DPP_FIELD_FLAG_RW, 28, 29, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_pp_cpu_set_avg_q_len_reg[] = { + { "pp_cpu_set_avg_q_len", DPP_FIELD_FLAG_RW, 28, 29, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_pp_cpu_set_q_len_reg[] = { + { "pp_cpu_set_q_len", DPP_FIELD_FLAG_RW, 28, 29, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_sys_cpu_set_avg_len_reg[] = { + { "sys_cpu_set_avg_len", DPP_FIELD_FLAG_RW, 28, 29, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_sys_cpu_set_q_len_reg[] = { + { "sys_cpu_set_q_len", DPP_FIELD_FLAG_RW, 28, 29, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_pke_len_calc_sign_reg[] = { + { "pke_len_calc_sign", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_rd_cpu_or_ram_reg[] = { + { "cpu_sel_sys_q_len_en", DPP_FIELD_FLAG_RW, 5, 1, 0x0, 0x0 }, + { "cpu_sel_sys_avg_q_len_en", DPP_FIELD_FLAG_RW, 4, 1, 0x0, 0x0 }, + { "cpu_sel_pp_q_len_en", DPP_FIELD_FLAG_RW, 3, 1, 0x0, 0x0 }, + { "cpu_sel_pp_avg_q_len_en", DPP_FIELD_FLAG_RW, 2, 1, 0x0, 0x0 }, + { "cpu_sel_flow_q_len_en", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "cpu_sel_flow_avg_q_len_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_q_len_update_disable_reg[] = { + { "q_len_sys_update_en", DPP_FIELD_FLAG_RW, 2, 1, 0x0, 0x0 }, + { "q_len_pp_update_en", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "q_len_flow_update_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_dp_sel_reg[] = { + { "flow_dp_sel_high", DPP_FIELD_FLAG_RW, 8, 1, 0x0, 0x0 }, + { "flow_dp_sel_mid", DPP_FIELD_FLAG_RW, 7, 1, 0x0, 0x0 }, + { "flow_dp_sel_low", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "pp_dp_sel_high", DPP_FIELD_FLAG_RW, 5, 1, 0x0, 0x0 }, + { "pp_dp_sel_mid", DPP_FIELD_FLAG_RW, 4, 1, 0x0, 0x0 }, + { "pp_dp_sel_low", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "sys_dp_sel_high", DPP_FIELD_FLAG_RW, 2, 1, 0x0, 0x0 }, + { "sys_dp_sel_mid", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "sys_dp_sel_low", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_sub_en_reg[] = { + { "cgavd_sa_sub_en", DPP_FIELD_FLAG_RW, 3, 1, 0x0, 0x0 }, + { "cgavd_sys_sub_en", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "cgavd_pp_sub_en", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "cgavd_flow_sub_en", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_default_start_queue_reg[] = { + { "default_start_queue", DPP_FIELD_FLAG_RW, 13, 14, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_default_finish_queue_reg[] = { + { "default_finish_queue", DPP_FIELD_FLAG_RW, 13, 14, 0x1ff, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_protocol_start_queue_reg[] = { + { "protocol_start_queue", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_protocol_finish_queue_reg[] = { + { "protocol_finish_queue", DPP_FIELD_FLAG_RW, 13, 14, 0xff, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_uniform_td_th_reg[] = { + { "uniform_td_th", DPP_FIELD_FLAG_RW, 28, 29, 0x200, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_uniform_td_th_en_reg[] = { + { "uniform_td_th_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_cfg_fc_reg[] = { + { "cgavd_cfg_fc", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_cfg_no_fc_reg[] = { + { "cgavd_cfg_no_fc", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_force_imem_omem_reg[] = { + { "imem_omem_force_en", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "choose_imem_omem", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_sys_q_len_l_reg[] = { + { "cgavd_sys_q_len_l", DPP_FIELD_FLAG_RO, 28, 29, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_default_queue_en_reg[] = { + { "default_queue_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_protocol_queue_en_reg[] = { + { "protocol_queue_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cfg_tc_flowid_dat_reg[] = { + { "cfg_tc_flowid_dat", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow_td_th_reg[] = { + { "flow_td_th", DPP_FIELD_FLAG_RW, 28, 29, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow_ca_mtd_reg[] = { + { "flow_ca_mtd", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow_dynamic_th_en_reg[] = { + { "flow_dynamic_th_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_pp_num_reg[] = { + { "pp_num", DPP_FIELD_FLAG_RW, 6, 7, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow_q_len_reg[] = { + { "flow_q_len", DPP_FIELD_FLAG_RO, 28, 29, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow_wred_grp_reg[] = { + { "flow_wred_grp", DPP_FIELD_FLAG_RW, 4, 5, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow_avg_q_len_reg[] = { + { "flow_avg_q_len", DPP_FIELD_FLAG_RW, 28, 29, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_qos_sign_reg[] = { + { "qos_sign_flow_cfg_din", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_q_pri_reg[] = { + { "qpri_flow_cfg_din", DPP_FIELD_FLAG_RW, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_odma_tm_itmd_rd_low_reg[] = { + { "odma_tm_itmd_low", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_odma_tm_itmd_rd_mid_reg[] = { + { "odma_tm_itmd_mid", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_odma_tm_itmd_rd_high_reg[] = { + { "odma_tm_itmd_high", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_stat_pkt_len_reg[] = { + { "expect_deq_pkt_len", DPP_FIELD_FLAG_RW, 25, 10, 0x0, 0x0 }, + { "expect_enq_pkt_len", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_stat_qnum_reg[] = { + { "cgavd_unexcept_qnum", DPP_FIELD_FLAG_RW, 29, 14, 0x0, 0x0 }, + { "cgavd_except_qnum", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_stat_dp_reg[] = { + { "cgavd_stat_dp", DPP_FIELD_FLAG_RW, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow_num0_reg[] = { + { "flow_num0", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow_num1_reg[] = { + { "flow_num1", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow_num2_reg[] = { + { "flow_num2", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow_num3_reg[] = { + { "flow_num3", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow_num4_reg[] = { + { "flow_num4", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow0_imem_cnt_reg[] = { + { "flow0_imem_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow1_imem_cnt_reg[] = { + { "flow1_imem_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow2_imem_cnt_reg[] = { + { "flow2_imem_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow3_imem_cnt_reg[] = { + { "flow3_imem_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow4_imem_cnt_reg[] = { + { "flow4_imem_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow0_drop_cnt_reg[] = { + { "flow0_drop_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow1_drop_cnt_reg[] = { + { "flow1_drop_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow2_drop_cnt_reg[] = { + { "flow2_drop_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow3_drop_cnt_reg[] = { + { "flow3_drop_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow4_drop_cnt_reg[] = { + { "flow4_drop_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_fc_count_mode_reg[] = { + { "fc_count_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_qmu_cgavd_fc_num_reg[] = { + { "qmu_cgavd_fc_state", DPP_FIELD_FLAG_RO, 31, 1, 0x0, 0x0 }, + { "qmu_cgavd_fc_num", DPP_FIELD_FLAG_RO, 30, 31, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_odma_fc_num_reg[] = { + { "cgavd_lif_fc_state", DPP_FIELD_FLAG_RO, 31, 1, 0x0, 0x0 }, + { "cgavd_lif_fc_num", DPP_FIELD_FLAG_RO, 30, 31, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cfg_offset_reg[] = { + { "cfg_offset", DPP_FIELD_FLAG_RW, 8, 9, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_tmmu_init_done_reg[] = { + { "tmmu_init_done", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_tmmu_int_mask_1_reg[] = { + { "imem_enq_rd_fifo_full_mask", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "imem_enq_rd_fifo_overflow_mask", DPP_FIELD_FLAG_RW, 25, 1, 0x1, + 0x0 }, + { "imem_enq_rd_fifo_underflow_mask", DPP_FIELD_FLAG_RW, 24, 1, 0x1, + 0x0 }, + { "imem_enq_drop_fifo_full_mask", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "imem_enq_drop_fifo_overflow_mask", DPP_FIELD_FLAG_RW, 22, 1, 0x1, + 0x0 }, + { "imem_enq_drop_fifo_underflow_mask", DPP_FIELD_FLAG_RW, 21, 1, 0x1, + 0x0 }, + { "imem_deq_rd_fifo_full_mask", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "imem_deq_rd_fifo_overflow_mask", DPP_FIELD_FLAG_RW, 19, 1, 0x1, + 0x0 }, + { "imem_deq_rd_fifo_underflow_mask", DPP_FIELD_FLAG_RW, 18, 1, 0x1, + 0x0 }, + { "imem_deq_drop_fifo_full_mask", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "imem_deq_drop_fifo_overflow_mask", DPP_FIELD_FLAG_RW, 16, 1, 0x1, + 0x0 }, + { "imem_deq_drop_fifo_underflow_mask", DPP_FIELD_FLAG_RW, 15, 1, 0x1, + 0x0 }, + { "dma_data_fifo_full_mask", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "dma_data_fifo_overflow_mask", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "dma_data_fifo_underflow_mask", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "wr_cmd_fifo_full_mask", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "wr_cmd_fifo_overflow_mask", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "wr_cmd_fifo_underflow_mask", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "cached_pd_fifo_full_mask", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "cached_pd_fifo_overflow_mask", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "cached_pd_fifo_underflow_mask", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "emem_pd_fifo_full_mask", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "emem_pd_fifo_overflow_mask", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "emem_pd_fifo_underflow_mask", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "pd_order_fifo_full_mask", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "pd_order_fifo_overflow_mask", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "pd_order_fifo_underflow_mask", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_tmmu_int_mask_2_reg[] = { + { "dma_data_fifo_parity_err_mask", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "imem_enq_rd_fifo_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 15, 1, 0x1, + 0x0 }, + { "imem_enq_rd_fifo_ecc_double_err_mask", DPP_FIELD_FLAG_RW, 14, 1, 0x1, + 0x0 }, + { "imem_enq_drop_fifo_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 13, 1, + 0x1, 0x0 }, + { "imem_enq_drop_fifo_ecc_double_err_mask", DPP_FIELD_FLAG_RW, 12, 1, + 0x1, 0x0 }, + { "imem_deq_rd_fifo_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 11, 1, 0x1, + 0x0 }, + { "imem_deq_rd_fifo_ecc_double_err_mask", DPP_FIELD_FLAG_RW, 10, 1, 0x1, + 0x0 }, + { "imem_deq_drop_fifo_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 9, 1, + 0x1, 0x0 }, + { "imem_deq_drop_fifo_ecc_double_err_mask", DPP_FIELD_FLAG_RW, 8, 1, + 0x1, 0x0 }, + { "wr_cmd_fifo_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 7, 1, 0x1, + 0x0 }, + { "wr_cmd_fifo_ecc_double_err_mask", DPP_FIELD_FLAG_RW, 6, 1, 0x1, + 0x0 }, + { "pd_cache_ram_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 5, 1, 0x1, + 0x0 }, + { "pd_cache_ram_ecc_double_err_mask", DPP_FIELD_FLAG_RW, 4, 1, 0x1, + 0x0 }, + { "cached_pd_fifo_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 3, 1, 0x1, + 0x0 }, + { "cached_pd_fifo_ecc_double_err_mask", DPP_FIELD_FLAG_RW, 2, 1, 0x1, + 0x0 }, + { "emem_pd_fifo_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 1, 1, 0x1, + 0x0 }, + { "emem_pd_fifo_ecc_double_err_mask", DPP_FIELD_FLAG_RW, 0, 1, 0x1, + 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_tm_pure_imem_en_reg[] = { + { "cfgmt_tm_pure_imem_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_force_ddr_rdy_cfg_reg[] = { + { "cfgmt_force_ddr_rdy_cfg", DPP_FIELD_FLAG_RW, 9, 10, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_pd_order_fifo_aful_th_reg[] = { + { "pd_order_fifo_aful_th", DPP_FIELD_FLAG_RW, 10, 11, 0x384, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cached_pd_fifo_aful_th_reg[] = { + { "cached_pd_fifo_aful_th", DPP_FIELD_FLAG_RW, 9, 10, 0x1f0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_wr_cmd_fifo_aful_th_reg[] = { + { "wr_cmd_fifo_aful_th", DPP_FIELD_FLAG_RW, 9, 10, 0x190, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_imem_enq_rd_fifo_aful_th_reg[] = { + { "imem_enq_rd_fifo_aful_th", DPP_FIELD_FLAG_RW, 6, 7, 0x10, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_imem_enq_drop_fifo_aful_th_reg[] = { + { "imem_enq_drop_fifo_aful_th", DPP_FIELD_FLAG_RW, 6, 7, 0x10, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_imem_deq_drop_fifo_aful_th_reg[] = { + { "imem_deq_drop_fifo_aful_th", DPP_FIELD_FLAG_RW, 6, 7, 0x10, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_imem_deq_rd_fifo_aful_th_reg[] = { + { "imem_deq_rd_fifo_aful_th", DPP_FIELD_FLAG_RW, 10, 11, 0x320, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_tmmu_states_1_reg[] = { + { "imem_enq_rd_fifo_full", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "imem_enq_rd_fifo_overflow", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "imem_enq_rd_fifo_underflow", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "imem_enq_drop_fifo_full", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "imem_enq_drop_fifo_overflow", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "imem_enq_drop_fifo_underflow", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "imem_deq_rd_fifo_full", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "imem_deq_rd_fifo_overflow", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "imem_deq_rd_fifo_underflow", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "imem_deq_drop_fifo_full", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "imem_deq_drop_fifo_overflow", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "imem_deq_drop_fifo_underflow", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "dma_data_fifo_full", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "dma_data_fifo_overflow", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "dma_data_fifo_underflow", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "wr_cmd_fifo_full", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "wr_cmd_fifo_overflow", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "wr_cmd_fifo_underflow", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "cached_pd_fifo_full", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "cached_pd_fifo_overflow", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "cached_pd_fifo_underflow", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "emem_pd_fifo_full", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "emem_pd_fifo_overflow", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "emem_pd_fifo_underflow", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "pd_order_fifo_full", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "pd_order_fifo_overflow", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "pd_order_fifo_underflow", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_tmmu_states_2_reg[] = { + { "dma_data_fifo_parity_err", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "imem_enq_rd_fifo_ecc_single_err", DPP_FIELD_FLAG_RC, 15, 1, 0x0, + 0x0 }, + { "imem_enq_rd_fifo_ecc_double_err", DPP_FIELD_FLAG_RC, 14, 1, 0x0, + 0x0 }, + { "imem_enq_drop_fifo_ecc_single_err", DPP_FIELD_FLAG_RC, 13, 1, 0x0, + 0x0 }, + { "imem_enq_drop_fifo_ecc_double_err", DPP_FIELD_FLAG_RC, 12, 1, 0x0, + 0x0 }, + { "imem_deq_rd_fifo_ecc_single_err", DPP_FIELD_FLAG_RC, 11, 1, 0x0, + 0x0 }, + { "imem_deq_rd_fifo_ecc_double_err", DPP_FIELD_FLAG_RC, 10, 1, 0x0, + 0x0 }, + { "imem_deq_drop_fifo_ecc_single_err", DPP_FIELD_FLAG_RC, 9, 1, 0x0, + 0x0 }, + { "imem_deq_drop_fifo_ecc_double_err", DPP_FIELD_FLAG_RC, 8, 1, 0x0, + 0x0 }, + { "wr_cmd_fifo_ecc_single_err", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "wr_cmd_fifo_ecc_double_err", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "pd_cache_ram_ecc_single_err", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "pd_cache_ram_ecc_double_err", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "cached_pd_fifo_ecc_single_err", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "cached_pd_fifo_ecc_double_err", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "emem_pd_fifo_ecc_single_err", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "emem_pd_fifo_ecc_double_err", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_shap_shap_ind_cmd_reg[] = { + { "rd", DPP_FIELD_FLAG_RW, 28, 1, 0x0, 0x0 }, + { "mem_id", DPP_FIELD_FLAG_RW, 27, 8, 0x0, 0x0 }, + { "addr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_shap_shap_ind_sta_reg[] = { + { "indirectaccessdone", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_shap_shap_ind_data0_reg[] = { + { "indirectdata0", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_shap_shap_ind_data1_reg[] = { + { "indirectdata1", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_shap_full_threshold_reg[] = { + { "full_threshold", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_shap_empty_threshold_reg[] = { + { "empty_threshold", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_shap_shap_sta_init_cfg_reg[] = { + { "sta_ram_init_done", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "sta_ram_init_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_shap_shap_cfg_init_cfg_reg[] = { + { "cfg_ram_init_done", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "cfg_ram_init_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_shap_token_mode_switch_reg[] = { + { "token_mode_switch", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_shap_token_grain_reg[] = { + { "token_grain", DPP_FIELD_FLAG_RW, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_shap_crd_grain_reg[] = { + { "crd_grain", DPP_FIELD_FLAG_RW, 20, 21, 0x8fc, 0x0 }, +}; +DPP_FIELD_T g_etm_shap_shap_stat_ctrl_reg[] = { + { "shap_stat_ctrl", DPP_FIELD_FLAG_RW, 1, 2, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_shap_token_stat_id_reg[] = { + { "token_stat_id", DPP_FIELD_FLAG_RW, 10, 11, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_shap_token_stat_reg[] = { + { "token_stat", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_shap_shap_stat_clk_cnt_reg[] = { + { "shap_stat_clk_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_shap_shap_bucket_map_tbl_reg[] = { + { "shap_map", DPP_FIELD_FLAG_RW, 6, 7, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_shap_bkt_para_tbl_reg[] = { + { "bucket_depth", DPP_FIELD_FLAG_RW, 35, 11, 0x0, 0x0 }, + { "bucket_rate", DPP_FIELD_FLAG_RW, 24, 25, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_credit_en_reg[] = { + { "credit_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_crt_inter1_reg[] = { + { "crd_inter1", DPP_FIELD_FLAG_RW, 8, 9, 0x2d, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_db_token_reg[] = { + { "db_token", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_crs_flt_cfg_reg[] = { + { "crs_flt_cfg", DPP_FIELD_FLAG_RW, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_th_sp_reg[] = { + { "th_sp", DPP_FIELD_FLAG_RW, 14, 15, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_th_wfq_fq_reg[] = { + { "th_fq", DPP_FIELD_FLAG_RW, 31, 16, 0x0, 0x0 }, + { "th_wfq", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_th_wfq2_fq2_reg[] = { + { "th_fq2", DPP_FIELD_FLAG_RW, 31, 16, 0x0, 0x0 }, + { "th_wfq2", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_th_wfq4_fq4_reg[] = { + { "th_fq4", DPP_FIELD_FLAG_RW, 31, 16, 0x0, 0x0 }, + { "th_wfq4", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_cfg_state_reg[] = { + { "cfg_state", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_crdt_ind_cmd_reg[] = { + { "rd", DPP_FIELD_FLAG_RW, 28, 1, 0x0, 0x0 }, + { "mem_id", DPP_FIELD_FLAG_RW, 27, 8, 0x0, 0x0 }, + { "addr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_crdt_ind_sta_reg[] = { + { "indirectaccessdone", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_crdt_ind_data0_reg[] = { + { "indirectdata0", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_crdt_ind_data1_reg[] = { + { "indirectdata1", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_crdt_state_reg[] = { + { "crdt_int", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "crdt_rdy", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_stat_que_id_0_reg[] = { + { "stat_que_id_0", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_stat_que_id_1_reg[] = { + { "stat_que_id_1", DPP_FIELD_FLAG_RW, 13, 14, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_stat_que_id_2_reg[] = { + { "stat_que_id_2", DPP_FIELD_FLAG_RW, 13, 14, 0x2, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_stat_que_id_3_reg[] = { + { "stat_que_id_3", DPP_FIELD_FLAG_RW, 13, 14, 0x3, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_stat_que_id_4_reg[] = { + { "stat_que_id_4", DPP_FIELD_FLAG_RW, 13, 14, 0x4, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_stat_que_id_5_reg[] = { + { "stat_que_id_5", DPP_FIELD_FLAG_RW, 13, 14, 0x5, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_stat_que_id_6_reg[] = { + { "stat_que_id_6", DPP_FIELD_FLAG_RW, 13, 14, 0x6, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_stat_que_id_7_reg[] = { + { "stat_que_id_7", DPP_FIELD_FLAG_RW, 13, 14, 0x7, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_stat_que_id_8_reg[] = { + { "stat_que_id_8", DPP_FIELD_FLAG_RW, 13, 14, 0x8, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_stat_que_id_9_reg[] = { + { "stat_que_id_9", DPP_FIELD_FLAG_RW, 13, 14, 0x9, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_stat_que_id_10_reg[] = { + { "stat_que_id_10", DPP_FIELD_FLAG_RW, 13, 14, 0xa, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_stat_que_id_11_reg[] = { + { "stat_que_id_11", DPP_FIELD_FLAG_RW, 13, 14, 0xb, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_stat_que_id_12_reg[] = { + { "stat_que_id_12", DPP_FIELD_FLAG_RW, 13, 14, 0xc, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_stat_que_id_13_reg[] = { + { "stat_que_id_13", DPP_FIELD_FLAG_RW, 13, 14, 0xd, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_stat_que_id_14_reg[] = { + { "stat_que_id_14", DPP_FIELD_FLAG_RW, 13, 14, 0xe, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_stat_que_id_15_reg[] = { + { "stat_que_id_15", DPP_FIELD_FLAG_RW, 13, 14, 0xf, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_stat_que_credit_reg[] = { + { "stat_que_credit_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_crdt_cfg_ram_init_reg[] = { + { "cfg_ram_init_done", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "cfg_ram_init_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_crdt_sta_ram_init_reg[] = { + { "sta_ram_init_done", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "sta_ram_init_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_crs_que_id_reg[] = { + { "crs_que_id", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_qmu_crs_end_state_reg[] = { + { "qmu_crs_end_state", DPP_FIELD_FLAG_RO, 1, 2, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_shap_rdy_reg[] = { + { "shap_rdy", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_shap_int_reg_reg[] = { + { "pp_c_token_min_int", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_shap_int_mask_reg_reg[] = { + { "pp_c_token_min_int_mask", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_token_state_almost_empty_th_reg[] = { + { "token_state_almost_empty_th", DPP_FIELD_FLAG_RW, 16, 17, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_token_state_empty_th_reg[] = { + { "token_state_empty_th", DPP_FIELD_FLAG_RW, 16, 17, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_full_th_reg[] = { + { "token_state_full_th", DPP_FIELD_FLAG_RW, 16, 17, 0x2, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_pp_c_level_shap_en_reg[] = { + { "pp_c_level_shap_en", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_enq_token_th_reg[] = { + { "enq_token_th", DPP_FIELD_FLAG_RW, 11, 12, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_pp_tokenq_level1_qstate_weight_cir_reg[] = { + { "pp_pp_q_state_cir", DPP_FIELD_FLAG_RO, 31, 1, 0x0, 0x0 }, + { "pp_pp_q_weight_wfq_l1_cir", DPP_FIELD_FLAG_RO, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_pp_idle_weight_level1_cir_reg[] = { + { "pp_idle_q_weight_wfq_l1_cir", DPP_FIELD_FLAG_RO, 25, 26, 0x1ffffff, + 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_rci_grade_th_0_cfg_reg[] = { + { "rci_grade_th_0_cfg", DPP_FIELD_FLAG_RW, 11, 12, 0x177, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_rci_grade_th_1_cfg_reg[] = { + { "rci_grade_th_1_cfg", DPP_FIELD_FLAG_RW, 11, 12, 0x2ee, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_rci_grade_th_2_cfg_reg[] = { + { "rci_grade_th_2_cfg", DPP_FIELD_FLAG_RW, 11, 12, 0x465, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_rci_grade_th_3_cfg_reg[] = { + { "rci_grade_th_3_cfg", DPP_FIELD_FLAG_RW, 11, 12, 0x47e, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_rci_grade_th_4_cfg_reg[] = { + { "rci_grade_th_4_cfg", DPP_FIELD_FLAG_RW, 11, 12, 0x753, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_rci_grade_th_5_cfg_reg[] = { + { "rci_grade_th_5_cfg", DPP_FIELD_FLAG_RW, 11, 12, 0x8ca, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_rci_grade_th_6_cfg_reg[] = { + { "rci_grade_th_6_cfg", DPP_FIELD_FLAG_RW, 11, 12, 0xa41, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_flow_del_cmd_reg[] = { + { "flow_del_busy", DPP_FIELD_FLAG_RO, 19, 1, 0x0, 0x0 }, + { "flow_alt_cmd", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "flow_alt_ind", DPP_FIELD_FLAG_RW, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_cnt_clr_reg[] = { + { "cnt_clr", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_crdt_int_bus_reg[] = { + { "ldstr_fifo15_ovf_int", DPP_FIELD_FLAG_RO, 31, 1, 0x0, 0x0 }, + { "ldstr_fifo14_ovf_int", DPP_FIELD_FLAG_RO, 30, 1, 0x0, 0x0 }, + { "ldstr_fifo13_ovf_int", DPP_FIELD_FLAG_RO, 29, 1, 0x0, 0x0 }, + { "ldstr_fifo12_ovf_int", DPP_FIELD_FLAG_RO, 28, 1, 0x0, 0x0 }, + { "ldstr_fifo11_ovf_int", DPP_FIELD_FLAG_RO, 27, 1, 0x0, 0x0 }, + { "ldstr_fifo10_ovf_int", DPP_FIELD_FLAG_RO, 26, 1, 0x0, 0x0 }, + { "ldstr_fifo9_ovf_int", DPP_FIELD_FLAG_RO, 25, 1, 0x0, 0x0 }, + { "ldstr_fifo8_ovf_int", DPP_FIELD_FLAG_RO, 24, 1, 0x0, 0x0 }, + { "ldstr_fifo7_ovf_int", DPP_FIELD_FLAG_RO, 23, 1, 0x0, 0x0 }, + { "ldstr_fifo6_ovf_int", DPP_FIELD_FLAG_RO, 22, 1, 0x0, 0x0 }, + { "ldstr_fifo5_ovf_int", DPP_FIELD_FLAG_RO, 21, 1, 0x0, 0x0 }, + { "ldstr_fifo4_ovf_int", DPP_FIELD_FLAG_RO, 20, 1, 0x0, 0x0 }, + { "ldstr_fifo3_ovf_int", DPP_FIELD_FLAG_RO, 19, 1, 0x0, 0x0 }, + { "ldstr_fifo2_ovf_int", DPP_FIELD_FLAG_RO, 18, 1, 0x0, 0x0 }, + { "ldstr_fifo1_ovf_int", DPP_FIELD_FLAG_RO, 17, 1, 0x0, 0x0 }, + { "ldstr_fifo0_ovf_int", DPP_FIELD_FLAG_RO, 16, 1, 0x0, 0x0 }, + { "cfg_del_err_int", DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "flwin_secrs_fifo_ovf_int", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "flwin_voqcrs_fifo_ovf_int", DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_crdt_int_mask_reg[] = { + { "crdt_int_mask", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_cfg_weight_together_reg[] = { + { "cfg_weight_together", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_weight_reg[] = { + { "c_weight", DPP_FIELD_FLAG_RW, 22, 7, 0x0, 0x0 }, + { "e_weight", DPP_FIELD_FLAG_RW, 6, 7, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_dev_sp_state_reg[] = { + { "dev_sp_state", DPP_FIELD_FLAG_RO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_dev_crs_reg[] = { + { "dev_crs", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_congest_token_disable_31_0_reg[] = { + { "congest_token_disable_31_0", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_congest_token_disable_63_32_reg[] = { + { "congest_token_disable_63_32", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_crdt_interval_en_cfg_reg[] = { + { "crdt_interval_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_q_token_staue_cfg_reg[] = { + { "test_token_q_id", DPP_FIELD_FLAG_RW, 6, 7, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_q_token_dist_cnt_reg[] = { + { "q_token_dist_counter", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_q_token_dec_cnt_reg[] = { + { "q_token_dec_counter", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_pp_weight_ram_reg[] = { + { "pp_c_weight", DPP_FIELD_FLAG_RW, 25, 26, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_pp_cbs_shape_en_ram_reg[] = { + { "pp_cbs", DPP_FIELD_FLAG_RW, 17, 17, 0x0, 0x0 }, + { "pp_c_shap_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_pp_next_pc_q_state_ram_reg[] = { + { "pp_next_pc", DPP_FIELD_FLAG_RO, 24, 6, 0x0, 0x0 }, + { "pp_token_num", DPP_FIELD_FLAG_RO, 18, 18, 0x20000, 0x0 }, + { "pp_q_state", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_dev_interval_reg[] = { + { "dev_interval", DPP_FIELD_FLAG_RW, 21, 22, 0xf, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_dev_wfq_cnt_reg[] = { + { "dev_wfq_cnt", DPP_FIELD_FLAG_RO, 6, 7, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_dev_wfq_state_reg[] = { + { "dev_wfq_state", DPP_FIELD_FLAG_RO, 6, 7, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_dev_active_head_ptr_reg[] = { + { "dev_active_head_ptr", DPP_FIELD_FLAG_RO, 5, 6, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_dev_active_tail_ptr_reg[] = { + { "dev_active_tail_ptr", DPP_FIELD_FLAG_RO, 5, 6, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_dev_unactive_head_ptr_reg[] = { + { "dev_unactive_head_ptr", DPP_FIELD_FLAG_RO, 5, 6, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_dev_unactive_tail_ptr_reg[] = { + { "dev_unactive_tail_ptr", DPP_FIELD_FLAG_RO, 5, 6, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_pp_weight_reg[] = { + { "pp_weight", DPP_FIELD_FLAG_RW, 6, 7, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_pp_que_state_reg[] = { + { "pp_enque_flag", DPP_FIELD_FLAG_RO, 16, 1, 0x0, 0x0 }, + { "pp_cir", DPP_FIELD_FLAG_RO, 15, 2, 0x2, 0x0 }, + { "pp_congest_cir", DPP_FIELD_FLAG_RO, 13, 2, 0x0, 0x0 }, + { "pp_crs", DPP_FIELD_FLAG_RO, 8, 1, 0x0, 0x0 }, + { "dev_sp", DPP_FIELD_FLAG_RO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_pp_next_ptr_reg[] = { + { "pp_next_ptr", DPP_FIELD_FLAG_RO, 5, 6, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_pp_cfg_reg[] = { + { "pp_cfg", DPP_FIELD_FLAG_RW, 3, 4, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_pp_up_ptr_reg[] = { + { "pp_up_ptr", DPP_FIELD_FLAG_RO, 5, 6, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_credit_drop_num_reg[] = { + { "credit_drop_num", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_se_id_lv0_reg[] = { + { "se_id_out_lv0", DPP_FIELD_FLAG_RO, 17, 18, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_se_id_lv1_reg[] = { + { "se_id_out_lv1", DPP_FIELD_FLAG_RO, 17, 18, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_se_id_lv2_reg[] = { + { "se_id_out_lv2", DPP_FIELD_FLAG_RO, 17, 18, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_se_id_lv3_reg[] = { + { "se_id_out_lv3", DPP_FIELD_FLAG_RO, 17, 18, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_se_id_lv4_reg[] = { + { "se_id_out_lv4", DPP_FIELD_FLAG_RO, 17, 18, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_que_id_reg[] = { + { "que_id_out", DPP_FIELD_FLAG_RO, 17, 18, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_se_info_lv0_reg[] = { + { "se_shape_lv0", DPP_FIELD_FLAG_RO, 25, 2, 0x0, 0x0 }, + { "se_ins_out_lv0", DPP_FIELD_FLAG_RO, 23, 8, 0x0, 0x0 }, + { "se_state_out_lv0", DPP_FIELD_FLAG_RO, 15, 8, 0x0, 0x0 }, + { "se_new_state_out_lv0", DPP_FIELD_FLAG_RO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_se_info_lv1_reg[] = { + { "se_shape_lv1", DPP_FIELD_FLAG_RO, 25, 2, 0x0, 0x0 }, + { "se_ins_out_lv1", DPP_FIELD_FLAG_RO, 23, 8, 0x0, 0x0 }, + { "se_state_out_lv1", DPP_FIELD_FLAG_RO, 15, 8, 0x0, 0x0 }, + { "se_new_state_out_lv1", DPP_FIELD_FLAG_RO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_se_info_lv2_reg[] = { + { "se_shape_lv2", DPP_FIELD_FLAG_RO, 25, 2, 0x0, 0x0 }, + { "se_ins_out_lv2", DPP_FIELD_FLAG_RO, 23, 8, 0x0, 0x0 }, + { "se_state_out_lv2", DPP_FIELD_FLAG_RO, 15, 8, 0x0, 0x0 }, + { "se_new_state_out_lv2", DPP_FIELD_FLAG_RO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_se_info_lv3_reg[] = { + { "se_shape_lv3", DPP_FIELD_FLAG_RO, 25, 2, 0x0, 0x0 }, + { "se_ins_out_lv3", DPP_FIELD_FLAG_RO, 23, 8, 0x0, 0x0 }, + { "se_state_out_lv3", DPP_FIELD_FLAG_RO, 15, 8, 0x0, 0x0 }, + { "se_new_state_out_lv3", DPP_FIELD_FLAG_RO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_se_info_lv4_reg[] = { + { "se_shape_lv4", DPP_FIELD_FLAG_RO, 25, 2, 0x0, 0x0 }, + { "se_ins_out_lv4", DPP_FIELD_FLAG_RO, 23, 8, 0x0, 0x0 }, + { "se_state_out_lv4", DPP_FIELD_FLAG_RO, 15, 8, 0x0, 0x0 }, + { "se_new_state_out_lv4", DPP_FIELD_FLAG_RO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_que_state_reg[] = { + { "que_state_out", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_eir_off_in_advance_reg[] = { + { "eir_crs_filter", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_double_level_shap_prevent_reg[] = { + { "double_level_shap_prevent", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_add_store_cycle_reg[] = { + { "add_store_cycle", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_tflag2_wr_flag_sum_reg[] = { + { "tflag2_wr_flag_sum", DPP_FIELD_FLAG_RO, 4, 5, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_flowque_para_tbl_reg[] = { + { "flowque_link", DPP_FIELD_FLAG_RW, 30, 15, 0x7fff, 0x0 }, + { "flowque_w", DPP_FIELD_FLAG_RW, 14, 11, 0x0, 0x0 }, + { "flowque_pri", DPP_FIELD_FLAG_RW, 3, 4, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_se_para_tbl_reg[] = { + { "se_insw", DPP_FIELD_FLAG_RW, 33, 1, 0x0, 0x0 }, + { "se_link", DPP_FIELD_FLAG_RW, 30, 15, 0x7fff, 0x0 }, + { "cp_token_en", DPP_FIELD_FLAG_RW, 15, 1, 0x0, 0x0 }, + { "se_w", DPP_FIELD_FLAG_RW, 14, 11, 0x0, 0x0 }, + { "se_pri", DPP_FIELD_FLAG_RW, 3, 4, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_flowque_ins_tbl_reg[] = { + { "flowque_ins", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_se_ins_tbl_reg[] = { + { "se_ins_flag", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "se_ins_priority", DPP_FIELD_FLAG_RO, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_eir_crs_filter_tbl_reg[] = { + { "eir_crs_filter", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qlist_cfg_done_reg[] = { + { "qcfg_qlist_cfg_done", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qsch_credit_value_reg[] = { + { "qcfg_qsch_credit_value", DPP_FIELD_FLAG_RW, 13, 14, 0x8fc, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qsch_crbal_init_value_reg[] = { + { "qcfg_qsch_crbal_init_value", DPP_FIELD_FLAG_RW, 16, 17, 0xff, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qsch_crbal_init_mask_reg[] = { + { "qcfg_qsch_crbal_init_mask", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cmdsch_rd_cmd_aful_th_reg[] = { + { "cmdsch_rd_cmd_aful_th", DPP_FIELD_FLAG_RW, 9, 10, 0x80, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_port_fc_interval_reg[] = { + { "cfg_port_fc_interval", DPP_FIELD_FLAG_RW, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_csch_aged_cfg_reg[] = { + { "qcfg_csch_aged_cfg", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_csch_aged_scan_time_reg[] = { + { "qcfg_csch_aged_scan_time", DPP_FIELD_FLAG_RW, 31, 32, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qmu_qlist_state_query_reg[] = { + { "pkt_age_req_fifo_afull", DPP_FIELD_FLAG_RO, 27, 1, 0x0, 0x0 }, + { "rd_release_fwft_afull", DPP_FIELD_FLAG_RO, 26, 1, 0x0, 0x0 }, + { "drop_imem_fwft_afull", DPP_FIELD_FLAG_RO, 25, 1, 0x0, 0x0 }, + { "pkt_age_req_fifo_empty", DPP_FIELD_FLAG_RO, 24, 1, 0x1, 0x0 }, + { "rd_release_fwft_empty", DPP_FIELD_FLAG_RO, 23, 1, 0x1, 0x0 }, + { "drop_imem_fwft_empty", DPP_FIELD_FLAG_RO, 22, 1, 0x1, 0x0 }, + { "mmu_qmu_sop_rd_rdy", DPP_FIELD_FLAG_RO, 21, 1, 0x0, 0x0 }, + { "big_fifo_empty", DPP_FIELD_FLAG_RO, 20, 1, 0x1, 0x0 }, + { "qmu_mmu_rd_release_rdy", DPP_FIELD_FLAG_RO, 19, 1, 0x0, 0x0 }, + { "xsw_qmu_crs_rdy", DPP_FIELD_FLAG_RO, 18, 1, 0x0, 0x0 }, + { "mmu_qmu_rdy", DPP_FIELD_FLAG_RO, 17, 10, 0x0, 0x0 }, + { "mmu_ql_wr_rdy", DPP_FIELD_FLAG_RO, 7, 1, 0x0, 0x0 }, + { "mmu_ql_rd_rdy", DPP_FIELD_FLAG_RO, 6, 1, 0x0, 0x0 }, + { "csw_ql_rdy", DPP_FIELD_FLAG_RO, 5, 1, 0x0, 0x0 }, + { "ql_init_done", DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "free_addr_ready", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "bank_group_afull", DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "pds_fwft_empty", DPP_FIELD_FLAG_RO, 1, 1, 0x1, 0x0 }, + { "enq_rpt_fwft_afull", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_qsch_crbal_drop_en_reg[] = { + { "cfgmt_qsch_all_crbal_drop_en", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "cfgmt_qsch_crbal_drop_en", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_wlist_qnum_fifo_aful_th_reg[] = { + { "cfgmt_wlist_qnum_fifo_aful_th", DPP_FIELD_FLAG_RW, 3, 4, 0x4, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_csw_pkt_blk_mode_reg[] = { + { "qcfg_csw_pkt_blk_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qlist_ram_init_cancel_reg[] = { + { "qcfg_qlist_ram_init_cancel", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qsch_crbal_transfer_mode_reg[] = { + { "qcfg_qsch_crbal_transfer_mode", DPP_FIELD_FLAG_RW, 16, 1, 0x0, 0x0 }, + { "qcfg_qsch_crbal_transfer_value", DPP_FIELD_FLAG_RW, 15, 16, 0x3e8, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qlist_qclr_interval_reg[] = { + { "qcfg_qlist_qclr_interval", DPP_FIELD_FLAG_RW, 31, 32, 0x400, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qsch_qclr_rate_reg[] = { + { "qcfg_qsch_qclr_rate", DPP_FIELD_FLAG_RW, 31, 32, 0xff, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qlist_ddr_random_reg[] = { + { "qcfg_qlist_ddr_random", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_qlist_pds_fifo_afull_th_reg[] = { + { "cfgmt_qlist_pds_fifo_afull_th", DPP_FIELD_FLAG_RW, 5, 6, 0xc, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_sop_cmd_fifo_afull_th_reg[] = { + { "cfgmt_sop_cmd_fifo_afull_th", DPP_FIELD_FLAG_RW, 7, 8, 0x78, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_non_sop_cmd_fifo_afull_th_reg[] = { + { "cfgmt_non_sop_cmd_fifo_afull_th", DPP_FIELD_FLAG_RW, 10, 11, 0x3e8, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_mmu_data_fifo_afull_th_reg[] = { + { "cfgmt_mmu_data_fifo_afull_th", DPP_FIELD_FLAG_RW, 9, 10, 0xf0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qlist_bank_ept_th_reg[] = { + { "qcfg_qlist_bank_ept_th", DPP_FIELD_FLAG_RW, 17, 18, 0x3, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_random_bypass_en_reg[] = { + { "random_bypass_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_crs_spd_bypass_reg[] = { + { "cfgmt_crs_spd_bypass", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_crs_interval_reg[] = { + { "cfgmt_crs_interval", DPP_FIELD_FLAG_RW, 31, 32, 0x8, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_auto_credit_control_en_reg[] = { + { "cfg_qsch_auto_credit_control_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_autocrfrstque_reg[] = { + { "cfg_qsch_autocrfrstque", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_autocrlastque_reg[] = { + { "cfg_qsch_autocrlastque", DPP_FIELD_FLAG_RW, 15, 16, 0x3ff, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_autocreditrate_reg[] = { + { "cfg_qsch_autocreditrate", DPP_FIELD_FLAG_RW, 19, 20, 0x20, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_scanfrstque_reg[] = { + { "cfg_qsch_scanfrstque", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_scanlastque_reg[] = { + { "cfg_qsch_scanlastque", DPP_FIELD_FLAG_RW, 13, 14, 0x3ff, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_scanrate_reg[] = { + { "cfg_qsch_scanrate", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_scan_en_reg[] = { + { "cfg_qsch_scan_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_qsch_rd_credit_fifo_rate_reg[] = { + { "cfgmt_qsch_rd_credit_fifo_rate", DPP_FIELD_FLAG_RW, 23, 24, 0xa, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qlist_bdep_reg[] = { + { "qcfg_qlist_bdep", DPP_FIELD_FLAG_RW, 14, 15, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qlist_bhead_reg[] = { + { "bank_vld", DPP_FIELD_FLAG_RW, 16, 1, 0x0, 0x0 }, + { "qcfg_qlist_bhead", DPP_FIELD_FLAG_RW, 14, 15, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qlist_btail_reg[] = { + { "qcfg_qlist_btail", DPP_FIELD_FLAG_RW, 14, 15, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qsch_shap_param_reg[] = { + { "qcfg_qsch_shap_en", DPP_FIELD_FLAG_RW, 31, 1, 0x0, 0x0 }, + { "qcfg_qsch_shap_param1", DPP_FIELD_FLAG_RW, 23, 12, 0x0, 0x0 }, + { "qcfg_qsch_shap_param2", DPP_FIELD_FLAG_RW, 11, 12, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qsch_shap_token_reg[] = { + { "qcfg_qsch_shap_token", DPP_FIELD_FLAG_RW, 16, 17, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qsch_shap_offset_reg[] = { + { "qcfg_qsch_shap_offset", DPP_FIELD_FLAG_RW, 8, 9, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qsch_crs_eir_th_reg[] = { + { "qcfg_qsch_crs_eir_th", DPP_FIELD_FLAG_RW, 18, 19, 0x80, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qsch_crs_th1_reg[] = { + { "qcfg_qsch_crs_th1", DPP_FIELD_FLAG_RW, 31, 32, 0x57584c4c, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qsch_crs_th2_reg[] = { + { "qcfg_qsch_crs_th2", DPP_FIELD_FLAG_RW, 31, 32, 0x200, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_csch_congest_th_reg[] = { + { "qcfg_csch_congest_th", DPP_FIELD_FLAG_RW, 13, 14, 0xc8, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_csch_sp_fc_th_reg[] = { + { "qcfg_csch_sp_fc_th", DPP_FIELD_FLAG_RW, 13, 14, 0x12c, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_csw_shap_parameter_reg[] = { + { "qcfg_csw_shap_en", DPP_FIELD_FLAG_RW, 31, 1, 0x0, 0x0 }, + { "qcfg_csw_shap_parameter", DPP_FIELD_FLAG_RW, 23, 24, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_rd_release_aful_th_reg[] = { + { "cfgmt_rd_release_aful_th", DPP_FIELD_FLAG_RW, 8, 9, 0x80, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_drop_imem_release_fifo_aful_th_reg[] = { + { "cfgmt_drop_imem_release_fifo_aful_th", DPP_FIELD_FLAG_RW, 8, 9, 0x80, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_nnh_rd_buf_aful_th_reg[] = { + { "cfgmt_nnh_rd_buf_aful_th", DPP_FIELD_FLAG_RW, 5, 6, 0xa, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_pid_use_inall_reg[] = { + { "cfgmt_nod_rd_buf_0_aful_th", DPP_FIELD_FLAG_RW, 4, 5, 0x3, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_pid_round_th_reg[] = { + { "cfgmt_nod_rd_buf_1_aful_th", DPP_FIELD_FLAG_RW, 4, 5, 0x4, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_credit_fifo_afull_th_reg[] = { + { "cfgmt_credit_fifo_afull_th", DPP_FIELD_FLAG_RW, 5, 6, 0xa, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_scan_fifo_afull_th_reg[] = { + { "cfgmt_scan_fifo_afull_th", DPP_FIELD_FLAG_RW, 5, 6, 0xa, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_small_fifo_aful_th_reg[] = { + { "cfgmt_small_fifo_aful_th", DPP_FIELD_FLAG_RW, 5, 6, 0x9, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_free_addr_fifo_aful_th_reg[] = { + { "cfgmt_free_addr_fifo_aful_th", DPP_FIELD_FLAG_RW, 5, 6, 0x16, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_enq_rpt_fifo_aful_th_reg[] = { + { "cfgmt_enq_rpt_fifo_aful_th", DPP_FIELD_FLAG_RW, 9, 10, 0x3e8, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_csw_shap_token_depth_reg[] = { + { "qcfg_csw_shap_token_depth", DPP_FIELD_FLAG_RW, 16, 17, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_csw_shap_offset_value_reg[] = { + { "qcfg_csw_shap_offset_value", DPP_FIELD_FLAG_RW, 8, 9, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_csw_fc_offset_value_reg[] = { + { "qcfg_csw_fc_offset_value", DPP_FIELD_FLAG_RW, 8, 9, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qmu_init_done_state_reg[] = { + { "csch_qcfg_init_done", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "qsch_qcfg_init_done", DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "qlist_qcfg_init_done", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "qcsr_ram_init_done", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_csw_qcfg_port_shap_rdy_0_reg[] = { + { "csw_qcfg_port_shap_rdy_0", DPP_FIELD_FLAG_RO, 31, 32, 0xffffffff, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_csw_qcfg_port_shap_rdy_1_reg[] = { + { "csw_qcfg_port_shap_rdy_1", DPP_FIELD_FLAG_RO, 31, 32, 0xffffffff, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qlist_cfgmt_ram_init_done_reg[] = { + { "qlist_qcfg_qds_ram_init_done", DPP_FIELD_FLAG_RO, 7, 1, 0x0, 0x0 }, + { "qlist_qcfg_chk_ram_init_done", DPP_FIELD_FLAG_RO, 6, 1, 0x0, 0x0 }, + { "qlist_qcfg_ept_ram_init_done", DPP_FIELD_FLAG_RO, 5, 1, 0x0, 0x0 }, + { "qlist_qcfg_cti_ram_init_done", DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "qlist_qcfg_cto_ram_init_done", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "qlist_qcfg_bcnt_ram_init_done", DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "qlist_qcfg_biu_ram_init_done", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "qlist_qcfg_baram_init_done", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qlist_cfgmt_ram_ecc_err_reg[] = { + { "qds_ram_parity_err", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "qcsr_qnum_fifo_parity_err", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "sa_id_ram_parity_err", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "enq_rpt_fifo_parity_err", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "bcnts_parity_err", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "baram_parity_err_a", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "baram_parity_err_b", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "bcntm_ram_parity_err", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "biu_ram_single_ecc_err", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "chk_ram_single_ecc_err", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "cmd_sch_cmd_ram_single_ecc_err", DPP_FIELD_FLAG_RC, 19, 1, 0x0, + 0x0 }, + { "cmd_sch_list_ram_single_ecc_err", DPP_FIELD_FLAG_RC, 18, 1, 0x0, + 0x0 }, + { "cmd_sch_hp_ram_single_ecc_err", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "cmd_sch_tp_ram_single_ecc_err", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "cmd_sch_enq_active_ram_single_ecc_err", DPP_FIELD_FLAG_RC, 15, 1, + 0x0, 0x0 }, + { "cmd_sch_deq_active_ram_single_ecc_err", DPP_FIELD_FLAG_RC, 14, 1, + 0x0, 0x0 }, + { "cmd_sch_empty_ram_single_ecc_err", DPP_FIELD_FLAG_RC, 13, 1, 0x0, + 0x0 }, + { "cmd_sch_eop_ram_single_ecc_err", DPP_FIELD_FLAG_RC, 12, 1, 0x0, + 0x0 }, + { "cmd_sch_blkcnt_ram_single_ecc_err", DPP_FIELD_FLAG_RC, 11, 1, 0x0, + 0x0 }, + { "biu_ram_double_ecc_err", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "chk_ram_double_ecc_err", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "cmd_sch_cmd_ram_double_ecc_err", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "cmd_sch_list_ram_double_ecc_err", DPP_FIELD_FLAG_RC, 7, 1, 0x0, + 0x0 }, + { "cmd_sch_hp_ram_double_ecc_err", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "cmd_sch_tp_ram_double_ecc_err", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "cmd_sch_enq_active_ram_double_ecc_err", DPP_FIELD_FLAG_RC, 4, 1, 0x0, + 0x0 }, + { "cmd_sch_deq_active_ram_double_ecc_err", DPP_FIELD_FLAG_RC, 3, 1, 0x0, + 0x0 }, + { "cmd_sch_empty_ram_double_ecc_err", DPP_FIELD_FLAG_RC, 2, 1, 0x0, + 0x0 }, + { "cmd_sch_eop_ram_double_ecc_err", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "cmd_sch_blkcnt_ram_double_ecc_err", DPP_FIELD_FLAG_RC, 0, 1, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qlist_cfgmt_ram_slot_err_reg[] = { + { "qds_ram_enq_rd_slot_err", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "qds_ram_deq_rd_slot_err", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "qds_ram_enq_wr_slot_err", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "qds_ram_deq_wr_slot_err", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "chk_ram_enq_rd_slot_err", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "chk_ram_deq_rd_slot_err", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "chk_ram_enq_wr_slot_err", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "chk_ram_deq_wr_slot_err", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ept_ram_enq_rd_slot_err", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ept_ram_deq_rd_slot_err", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ept_ram_enq_wr_slot_err", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ept_ram_deq_wr_slot_err", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "cti_ram_enq_rd_slot_err", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "cti_ram_deq_rd_slot_err", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "cti_ram_enq_wr_slot_err", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "cti_ram_deq_wr_slot_err", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "cto_ram_enq_rd_slot_err", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "cto_ram_deq_rd_slot_err", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "cto_ram_enq_wr_slot_err", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "cto_ram_deq_wr_slot_err", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qsch_cfgmt_ram_ecc_reg[] = { + { "crbal_rama_parity_error", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "crbal_ramb_parity_error", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "crs_ram_parity_error", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "wlist_flag_ram_single_ecc_err", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "wlist_next_single_ecc_err", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "wlist_wactive_ram_single_ecc_err", DPP_FIELD_FLAG_RC, 21, 1, 0x0, + 0x0 }, + { "wlist_ractive_ram_single_ecc_err", DPP_FIELD_FLAG_RC, 20, 1, 0x0, + 0x0 }, + { "wlist_tp1_ram_single_ecc_err", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "wlist_tp2_ram_single_ecc_err", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "wlist_empty1_ram_single_ecc_err_a", DPP_FIELD_FLAG_RC, 17, 1, 0x0, + 0x0 }, + { "wlist_empty1_ram_single_ecc_err_b", DPP_FIELD_FLAG_RC, 16, 1, 0x0, + 0x0 }, + { "wlist_empty2_ram_single_ecc_err_a", DPP_FIELD_FLAG_RC, 15, 1, 0x0, + 0x0 }, + { "wlist_empty2_ram_single_ecc_err_b", DPP_FIELD_FLAG_RC, 14, 1, 0x0, + 0x0 }, + { "wlist_hp_ram_single_ecc_err_a", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "wlist_hp_ram_single_ecc_err_b", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "wlist_flag_ram_double_ecc_err", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "wlist_next_double_ecc_err", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "wlist_wactive_ram_double_ecc_err", DPP_FIELD_FLAG_RC, 9, 1, 0x0, + 0x0 }, + { "wlist_ractive_ram_double_ecc_err", DPP_FIELD_FLAG_RC, 8, 1, 0x0, + 0x0 }, + { "wlist_tp1_ram_double_ecc_err", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "wlist_tp2_ram_double_ecc_err", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "wlist_empty1_ram_double_ecc_err_a", DPP_FIELD_FLAG_RC, 5, 1, 0x0, + 0x0 }, + { "wlist_empty1_ram_double_ecc_err_b", DPP_FIELD_FLAG_RC, 4, 1, 0x0, + 0x0 }, + { "wlist_empty2_ram_double_ecc_err_a", DPP_FIELD_FLAG_RC, 3, 1, 0x0, + 0x0 }, + { "wlist_empty2_ram_double_ecc_err_b", DPP_FIELD_FLAG_RC, 2, 1, 0x0, + 0x0 }, + { "wlist_hp_ram_double_ecc_err_a", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "wlist_hp_ram_double_ecc_err_b", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qlist_cfgmt_fifo_state_reg[] = { + { "pkt_age_req_fifo_overflow", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "pkt_age_req_fifo_underflow", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "qcsr_big_fifo_ovfl", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "qcsr_small_fifo_overflow", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "enq_rpt_fifo_overflow", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "enq_rpt_fifo_underflow", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "pds_fwft_overflow", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "pds_fwft_underflow", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "free_addr_fifo_overflow", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "free_addr_fifo_underflow", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "rd_release_fwft_overflow", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "rd_release_fwft_underflow", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "pid_free_list_overflow", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "pid_free_list_underflow", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "pid_prp_list_overflow", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "pid_prp_list_underflow", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "pid_rdy_list_overflow", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "pid_rdy_list_underflow", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "drop_imem_release_fwft_overflow", DPP_FIELD_FLAG_RC, 7, 1, 0x0, + 0x0 }, + { "drop_imem_release_fwft_underflow", DPP_FIELD_FLAG_RC, 6, 1, 0x0, + 0x0 }, + { "nnh_rd_buf_fifo_overflow", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "nnh_rd_buf_fifo_underflow", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "nod_rd_buf_0_fifo_overflow", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "nod_rd_buf_0_fifo_underflow", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "nod_rd_buf_1_fifo_overflow", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "nod_rd_buf_1_fifo_underflow", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qlist_qcfg_clr_done_reg[] = { + { "qlist_qcfg_clr_done", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qmu_int_mask1_reg[] = { + { "qmu_int_mask1", DPP_FIELD_FLAG_RW, 29, 30, 0x3fffffff, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qmu_int_mask2_reg[] = { + { "qmu_int_mask2", DPP_FIELD_FLAG_RW, 19, 20, 0xfffff, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qmu_int_mask3_reg[] = { + { "qmu_int_mask3", DPP_FIELD_FLAG_RW, 26, 27, 0x7ffffff, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qmu_int_mask4_reg[] = { + { "qmu_int_mask4", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qmu_int_mask5_reg[] = { + { "qmu_int_mask5", DPP_FIELD_FLAG_RW, 30, 31, 0x7fffffff, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qmu_int_mask6_reg[] = { + { "qmu_int_mask6", DPP_FIELD_FLAG_RW, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cmd_sch_cfgmt_fifo_state_reg[] = { + { "nsop_fifo_parity_err", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "cmdsch_rd_cmd_fifo_parity_err", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "sop_fifo_afull", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "sop_fifo_empty", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "sop_fifo_overflow", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "sop_fifo_underflow", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "mmu_data_fifo_afull", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "mmu_data_fifo_empty", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "mmudat_fifo_overflow", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "mmudat_fifo_underflow", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "non_sop_fifo_afull", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "non_sop_fifo_empty", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "nsop_fifo_overflow", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "nsop_fifo_underflow", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "cmdsch_rd_cmd_fifo_afull", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "cmdsch_rd_cmd_fifo_empty", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "cmdsch_rd_cmd_fifo_overflow", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "cmdsch_rd_cmd_fifo_underflow", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "wlist_qnum_fifo_overflow", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "wlist_qnum_fifo_underflow", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "qsch_scan_fifo_overflow", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "qsch_scan_fifo_underflow", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "qsch_credit_fifo_overflow", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "qsch_credit_fifo_underflow", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "qsch_credit_fifo2_overflow", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "qsch_credit_fifo2_underflow", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qlist_r_bcnt_reg[] = { + { "qlist_r_bcnt", DPP_FIELD_FLAG_RO, 21, 22, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qsch_rw_crbal_reg[] = { + { "qsch_rw_crbal", DPP_FIELD_FLAG_RW, 16, 17, 0xff, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qsch_rw_crs_reg[] = { + { "qsch_rw_crs", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qsch_r_wlist_empty_reg[] = { + { "qsch_r_wlist_empty", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qlist_baram_rd_reg[] = { + { "qcfg_qlist_baram_rd", DPP_FIELD_FLAG_RO, 6, 7, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qsch_crbal_fb_rw_reg[] = { + { "qcfg_qlist_crbal_fb_rw", DPP_FIELD_FLAG_RW, 16, 17, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qlist_grp0_bank_reg[] = { + { "qcfg_qlist_grp0_bank_wr", DPP_FIELD_FLAG_RW, 2, 3, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qlist_grp1_bank_reg[] = { + { "qcfg_qlist_grp1_bank_wr", DPP_FIELD_FLAG_RW, 2, 3, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qlist_grp2_bank_reg[] = { + { "qcfg_qlist_grp2_bank_wr", DPP_FIELD_FLAG_RW, 2, 3, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qlist_grp3_bank_reg[] = { + { "qcfg_qlist_grp3_bank_wr", DPP_FIELD_FLAG_RW, 2, 3, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qlist_grp4_bank_reg[] = { + { "qcfg_qlist_grp4_bank_wr", DPP_FIELD_FLAG_RW, 2, 3, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qlist_grp5_bank_reg[] = { + { "qcfg_qlist_grp5_bank_wr", DPP_FIELD_FLAG_RW, 2, 3, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qlist_grp6_bank_reg[] = { + { "qcfg_qlist_grp6_bank_wr", DPP_FIELD_FLAG_RW, 2, 3, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qlist_grp7_bank_reg[] = { + { "qcfg_qlist_grp7_bank_wr", DPP_FIELD_FLAG_RW, 2, 3, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qlist_grp_reg[] = { + { "qcfg_qlist_grp_wr", DPP_FIELD_FLAG_RW, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_active_to_bank_cfg_reg[] = { + { "cfgmt_active_to_bank_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0x0, 0x1 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_ddr_in_mmu_cfg_reg[] = { + { "cfgmt_ddr_in_mmu_cfg", DPP_FIELD_FLAG_RW, 3, 4, 0x0, 0x1 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_ddr_in_qmu_cfg_reg[] = { + { "cfgmt_ddr_in_qmu_cfg", DPP_FIELD_FLAG_RW, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_bank_to_mmu_cfg_reg[] = { + { "cfgmt_bank_in_mmu_cfg", DPP_FIELD_FLAG_RW, 2, 3, 0x0, 0x1 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_bank_to_qmu_cfg_reg[] = { + { "cfgmt_bank_in_qmu_cfg", DPP_FIELD_FLAG_RW, 2, 3, 0x0, 0x1 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_grp_ram_n_clr_thd_reg[] = { + { "cfgmt_grp_ram_n_clr_thd", DPP_FIELD_FLAG_RW, 5, 6, 0x3f, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_age_pkt_num_reg[] = { + { "cfgmt_age_pkt_num", DPP_FIELD_FLAG_RW, 3, 4, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_age_multi_interval_reg[] = { + { "cfgmt_age_multi_interval", DPP_FIELD_FLAG_RW, 15, 16, 0xff, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_qmu_pkt_age_en_reg[] = { + { "cfgmt_qmu_pkt_age_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_qmu_pkt_age_interval_reg[] = { + { "cfgmt_qmu_pkt_age_interval", DPP_FIELD_FLAG_RW, 31, 32, 0xff, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_qmu_pkt_age_start_end_reg[] = { + { "cfgmt_qmu_pkt_age_end", DPP_FIELD_FLAG_RW, 31, 16, 0x23ff, 0x0 }, + { "cfgmt_qmu_pkt_age_start", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_pkt_age_req_aful_th_reg[] = { + { "cfgmt_pkt_age_req_aful_th", DPP_FIELD_FLAG_RW, 5, 6, 0xa, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_pkt_age_step_interval_reg[] = { + { "cfgmt_pkt_age_step_interval", DPP_FIELD_FLAG_RW, 7, 8, 0xff, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_qmu_imem_age_mode_reg[] = { + { "cfgmt_qmu_imem_age_en", DPP_FIELD_FLAG_RW, 2, 1, 0x0, 0x0 }, + { "cfgmt_qmu_imem_age_qlen_en", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "cfgmt_qmu_imem_age_time_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_qmu_imem_qlen_age_interval_reg[] = { + { "cfgmt_qmu_imem_qlen_age_interval", DPP_FIELD_FLAG_RW, 15, 16, 0xff, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_qmu_imem_time_age_interval_reg[] = { + { "cfgmt_qmu_imem_time_age_interval", DPP_FIELD_FLAG_RW, 31, 32, 0xff, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_qmu_imem_qlen_age_thd_reg[] = { + { "cfgmt_qmu_imem_qlen_age_thd", DPP_FIELD_FLAG_RW, 13, 14, 0x1000, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_imem_age_step_interval_reg[] = { + { "cfgmt_imem_age_step_interval", DPP_FIELD_FLAG_RW, 7, 8, 0xff, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_qmu_ecc_bypass_read_reg[] = { + { "cfgmt_qmu_ecc_bypass_read", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_qmu_resp_stat_fc_en_reg[] = { + { "cfgmt_qmu_resp_stat_fc_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_qmu_bank_xoff_pds_mode_reg[] = { + { "cfgmt_qmu_bank_xoff_pds_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_qmu_stat_offset_reg[] = { + { "cfgmt_qmu_stat_offset", DPP_FIELD_FLAG_RW, 8, 9, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_fc_cnt_mode_reg[] = { + { "fc_cnt_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_mmu_qmu_wr_fc_cnt_reg[] = { + { "mmu_qmu_wr_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_mmu_qmu_rd_fc_cnt_reg[] = { + { "mmu_qmu_rd_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qmu_cgavd_fc_cnt_reg[] = { + { "qmu_cgavd_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cgavd_qmu_pkt_cnt_reg[] = { + { "cgavd_qmu_pkt_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cgavd_qmu_pktlen_all_reg[] = { + { "cgavd_qmu_pktlen_all", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_observe_portfc_spec_reg[] = { + { "observe_portfc_spec", DPP_FIELD_FLAG_RW, 5, 6, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_spec_lif_portfc_count_reg[] = { + { "spec_lif_portfc_count", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_qmu_pfc_en_reg[] = { + { "cfgmt_qmu_pfc_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_qmu_pfc_mask_1_reg[] = { + { "cfgmt_qmu_pfc_mask_1", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_qmu_pfc_mask_2_reg[] = { + { "cfgmt_qmu_pfc_mask_2", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_int_repeat_reg[] = { + { "int_repeat", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_up_size_reg[] = { + { "dma_up_size", DPP_FIELD_FLAG_RW, 9, 10, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_soc_wr_time_out_thresh_reg[] = { + { "soc_wr_time_out_thresh", DPP_FIELD_FLAG_RW, 31, 32, 0xfa0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_cfg_shap_param_reg[] = { + { "shap_en", DPP_FIELD_FLAG_RW, 31, 1, 0x0, 0x0 }, + { "shap_rate", DPP_FIELD_FLAG_RW, 30, 31, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_cfg_shap_token_reg[] = { + { "cfg_shap_plen_offset", DPP_FIELD_FLAG_RW, 31, 8, 0x00, 0x0 }, + { "cfg_shap_token", DPP_FIELD_FLAG_RW, 16, 17, 0xf0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_idle_ptr_fifo_aful_th_reg[] = { + { "idle_ptr3_fifo_aful_th", DPP_FIELD_FLAG_RW, 31, 8, 0x5a, 0x0 }, + { "idle_ptr2_fifo_aful_th", DPP_FIELD_FLAG_RW, 23, 8, 0x5a, 0x0 }, + { "idle_ptr1_fifo_aful_th", DPP_FIELD_FLAG_RW, 15, 8, 0x5a, 0x0 }, + { "idle_ptr0_fifo_aful_th", DPP_FIELD_FLAG_RW, 7, 8, 0x5a, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos_port_cfg_reg[] = { + { "cos3_port_cfg", DPP_FIELD_FLAG_RW, 31, 8, 0x3f, 0x0 }, + { "cos2_port_cfg", DPP_FIELD_FLAG_RW, 23, 8, 0x3e, 0x0 }, + { "cos1_port_cfg", DPP_FIELD_FLAG_RW, 15, 8, 0x3d, 0x0 }, + { "cos0_port_cfg", DPP_FIELD_FLAG_RW, 7, 8, 0x3c, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_ind_status_reg[] = { + { "ind_access_done", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_ind_cmd_reg[] = { + { "ind_rd_or_wr", DPP_FIELD_FLAG_RW, 28, 1, 0x0, 0x0 }, + { "ind_mem_id", DPP_FIELD_FLAG_RW, 22, 3, 0x0, 0x0 }, + { "ind_mem_addr", DPP_FIELD_FLAG_RW, 11, 12, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_ind_data0_reg[] = { + { "ind_dat0", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_ind_data1_reg[] = { + { "ind_dat1", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_ind_data2_reg[] = { + { "ind_dat2", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_ind_data3_reg[] = { + { "ind_dat3", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_ind_data4_reg[] = { + { "ind_dat4", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_ind_data5_reg[] = { + { "ind_dat5", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_ind_data6_reg[] = { + { "ind_dat6", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_ind_data7_reg[] = { + { "ind_dat7", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_tcam_0_cmd_reg[] = { + { "cfg_vben", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "cfg_vbi", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "cfg_t_strwc", DPP_FIELD_FLAG_RW, 22, 2, 0x1, 0x0 }, + { "tcam0_sm", DPP_FIELD_FLAG_RW, 20, 6, 0x0, 0x0 }, + { "tcam0_smen", DPP_FIELD_FLAG_RW, 14, 1, 0x0, 0x0 }, + { "tcam0_rm", DPP_FIELD_FLAG_RW, 13, 2, 0x2, 0x0 }, + { "tcam0_rmen", DPP_FIELD_FLAG_RW, 11, 1, 0x0, 0x0 }, + { "tcam0_enable", DPP_FIELD_FLAG_RW, 10, 1, 0x0, 0x0 }, + { "tcam0_flush", DPP_FIELD_FLAG_WO, 9, 1, 0x0, 0x0 }, + { "tcam0_unload", DPP_FIELD_FLAG_WO, 8, 1, 0x0, 0x0 }, + { "tcam0_unload_addr", DPP_FIELD_FLAG_RW, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_tcam_1_cmd_reg[] = { + { "tcam1_sm", DPP_FIELD_FLAG_RW, 20, 6, 0x0, 0x0 }, + { "tcam1_smen", DPP_FIELD_FLAG_RW, 14, 1, 0x0, 0x0 }, + { "tcam1_rm", DPP_FIELD_FLAG_RW, 13, 2, 0x2, 0x0 }, + { "tcam1_rmen", DPP_FIELD_FLAG_RW, 11, 1, 0x0, 0x0 }, + { "tcam1_enable", DPP_FIELD_FLAG_RW, 10, 1, 0x0, 0x0 }, + { "tcam1_flush", DPP_FIELD_FLAG_WO, 9, 1, 0x0, 0x0 }, + { "tcam1_unload", DPP_FIELD_FLAG_WO, 8, 1, 0x0, 0x0 }, + { "tcam1_unload_addr", DPP_FIELD_FLAG_RW, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_port_en_0_reg[] = { + { "cfg_isch_port_en_0", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_port_en_1_reg[] = { + { "cfg_isch_port_en_1", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_port_en_2_reg[] = { + { "cfg_isch_port_en_2", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_port_en_3_reg[] = { + { "cfg_port_change_en_0", DPP_FIELD_FLAG_RW, 26, 1, 0x0, 0x0 }, + { "cfg_port_change_en_1", DPP_FIELD_FLAG_RW, 25, 1, 0x0, 0x0 }, + { "cfg_isch_port_en_3", DPP_FIELD_FLAG_RW, 22, 23, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_cfg_port_l2_offset_mode_0_reg[] = { + { "cfg_port_l2_offset_mode_0", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_cfg_port_l2_offset_mode_1_reg[] = { + { "cfg_port_l2_offset_mode_1", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_cfg_port_l2_offset_mode_2_reg[] = { + { "cfg_port_l2_offset_mode_2", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_cfg_port_l2_offset_mode_3_reg[] = { + { "cfg_port_l2_offset_mode_3", DPP_FIELD_FLAG_RW, 22, 23, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_port_fc_mode_0_reg[] = { + { "cfg_isch_fc_mode_0", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_port_fc_mode_1_reg[] = { + { "cfg_isch_fc_mode_1", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_port_fc_mode_2_reg[] = { + { "cfg_isch_fc_mode_2", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_port_fc_mode_3_reg[] = { + { "cfg_isch_fc_mode_3", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_port_fc_mode_4_reg[] = { + { "cfg_isch_fc_mode_4", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_port_fc_mode_5_reg[] = { + { "cfg_isch_fc_mode_5", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_port_fc_mode_6_reg[] = { + { "cfg_isch_fc_mode_6", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_port_fc_mode_7_reg[] = { + { "cfg_pfu_aging_en", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "cfg_isch_aging_en", DPP_FIELD_FLAG_RW, 14, 1, 0x0, 0x0 }, + { "cfg_isch_fc_mode_7", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_cfg_isch_aging_th_reg[] = { + { "cfg_pfu_delay_cycle", DPP_FIELD_FLAG_RW, 31, 16, 0x8000, 0x0 }, + { "cfg_isch_aging_th", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_isch_fifo_th_0_reg[] = { + { "cfg_sch_fifo3_fc_th", DPP_FIELD_FLAG_RW, 29, 6, 0x30, 0x0 }, + { "cfg_sch_fifo2_fc_th", DPP_FIELD_FLAG_RW, 21, 6, 0x30, 0x0 }, + { "cfg_sch_fifo1_fc_th", DPP_FIELD_FLAG_RW, 13, 6, 0x30, 0x0 }, + { "cfg_sch_fifo0_fc_th", DPP_FIELD_FLAG_RW, 5, 6, 0x30, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_isch_cfg_1_reg[] = { + { "cfg_parser_max_len_en", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "cfg_parser_max_len", DPP_FIELD_FLAG_RW, 30, 15, 0x3e80, 0x0 }, + { "cfg_parser_min_len_en", DPP_FIELD_FLAG_RW, 15, 1, 0x0, 0x0 }, + { "cfg_parser_min_len", DPP_FIELD_FLAG_RW, 14, 7, 0x3c, 0x0 }, + { "sp_sch_sel", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_tcam_0_vld_reg[] = { + { "cfg_tcam0_vld", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_tcam_1_vld_reg[] = { + { "cfg_tcam1_vld", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_cpu_port_en_mask_reg[] = { + { "cpu_port_en_mask", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_pktrx_glbal_cfg_0_reg[] = { + { "pktrx_glbal_cfg_0", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_pktrx_glbal_cfg_1_reg[] = { + { "pktrx_glbal_cfg_1", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_pktrx_glbal_cfg_2_reg[] = { + { "pktrx_glbal_cfg_2", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_pktrx_glbal_cfg_3_reg[] = { + { "pktrx_glbal_cfg_3", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_nppu_start_reg[] = { + { "nppu_start", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_stat_ind_status_reg[] = { + { "ind_access_done", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_stat_ind_cmd_reg[] = { + { "ind_rd_or_wr", DPP_FIELD_FLAG_RW, 28, 1, 0x0, 0x0 }, + { "ind_mem_id", DPP_FIELD_FLAG_RW, 22, 3, 0x0, 0x0 }, + { "ind_mem_addr", DPP_FIELD_FLAG_RW, 11, 12, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_stat_ind_data0_reg[] = { + { "ind_dat0", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_idma_cfg_debug_cnt_ovfl_mode_reg[] = { + { "debug_cnt_ovfl_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_idma_stat_ind_status_reg[] = { + { "ind_access_done", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_idma_stat_ind_cmd_reg[] = { + { "ind_rd_or_wr", DPP_FIELD_FLAG_RW, 28, 1, 0x0, 0x0 }, + { "ind_mem_id", DPP_FIELD_FLAG_RW, 27, 8, 0x0, 0x0 }, + { "ind_mem_addr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_idma_stat_ind_data0_reg[] = { + { "ind_data0", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_ind_status_reg[] = { + { "ind_access_done", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_ind_cmd_reg[] = { + { "ind_rd_or_wr", DPP_FIELD_FLAG_RW, 28, 1, 0x0, 0x0 }, + { "ind_mem_id", DPP_FIELD_FLAG_RW, 27, 8, 0x0, 0x0 }, + { "ind_mem_addr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_ind_data0_reg[] = { + { "ind_data0", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_ind_data1_reg[] = { + { "ind_data1", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_ind_data2_reg[] = { + { "ind_data2", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_ind_data3_reg[] = { + { "ind_data3", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_ind_data4_reg[] = { + { "ind_data4", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_ind_data5_reg[] = { + { "ind_data5", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_ind_data6_reg[] = { + { "ind_data6", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_ind_data7_reg[] = { + { "ind_data7", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_idma_public_th_reg[] = { + { "idma_public_th", DPP_FIELD_FLAG_RW, 14, 15, 0x400, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_lif_public_th_reg[] = { + { "lif_public_th", DPP_FIELD_FLAG_RW, 14, 15, 0x400, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_idma_total_th_reg[] = { + { "idma_total_th", DPP_FIELD_FLAG_RW, 14, 15, 0x3ffc, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_lif_total_th_reg[] = { + { "lif_total_th", DPP_FIELD_FLAG_RW, 14, 15, 0x3fa2, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_mc_total_th_reg[] = { + { "mc_total_th", DPP_FIELD_FLAG_RW, 14, 15, 0x400, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_mc_cos10_th_reg[] = { + { "mc_cos1_mode", DPP_FIELD_FLAG_RW, 31, 1, 0x0, 0x0 }, + { "mc_cos0_mode", DPP_FIELD_FLAG_RW, 30, 1, 0x0, 0x0 }, + { "mc_cos1_th", DPP_FIELD_FLAG_RW, 29, 15, 0x80, 0x0 }, + { "mc_cos0_th", DPP_FIELD_FLAG_RW, 14, 15, 0x80, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_mc_cos32_th_reg[] = { + { "mc_cos3_mode", DPP_FIELD_FLAG_RW, 31, 1, 0x0, 0x0 }, + { "mc_cos2_mode", DPP_FIELD_FLAG_RW, 30, 1, 0x0, 0x0 }, + { "mc_cos3_th", DPP_FIELD_FLAG_RW, 29, 15, 0x80, 0x0 }, + { "mc_cos2_th", DPP_FIELD_FLAG_RW, 14, 15, 0x80, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_mc_cos54_th_reg[] = { + { "mc_cos5_mode", DPP_FIELD_FLAG_RW, 31, 1, 0x0, 0x0 }, + { "mc_cos4_mode", DPP_FIELD_FLAG_RW, 30, 1, 0x0, 0x0 }, + { "mc_cos5_th", DPP_FIELD_FLAG_RW, 29, 15, 0x80, 0x0 }, + { "mc_cos4_th", DPP_FIELD_FLAG_RW, 14, 15, 0x80, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_mc_cos76_th_reg[] = { + { "mc_cos7_mode", DPP_FIELD_FLAG_RW, 31, 1, 0x0, 0x0 }, + { "mc_cos6_mode", DPP_FIELD_FLAG_RW, 30, 1, 0x0, 0x0 }, + { "mc_cos7_th", DPP_FIELD_FLAG_RW, 29, 15, 0x80, 0x0 }, + { "mc_cos6_th", DPP_FIELD_FLAG_RW, 14, 15, 0x80, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_debug_cnt_ovfl_mode_reg[] = { + { "debug_cnt_ovfl_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_se_key_aful_negate_cfg_reg[] = { + { "se_key_aful_negate_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0x4, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_sa_flag_reg[] = { + { "sa_flag", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_ind_data_reg[] = { + { "ind_data", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_ind_status_reg[] = { + { "ind_access_done", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_ind_cmd_reg[] = { + { "ind_rd_or_wr", DPP_FIELD_FLAG_RW, 28, 1, 0x0, 0x0 }, + { "ind_mem_id", DPP_FIELD_FLAG_RW, 27, 8, 0x0, 0x0 }, + { "ind_mem_addr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_total_cnt_reg[] = { + { "total_cnt", DPP_FIELD_FLAG_RO, 14, 15, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_idma_pub_cnt_reg[] = { + { "idma_pub_cnt", DPP_FIELD_FLAG_RO, 14, 15, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_lif_pub_cnt_reg[] = { + { "lif_pub_cnt", DPP_FIELD_FLAG_RO, 14, 15, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_mc_total_cnt_reg[] = { + { "mc_total_cnt", DPP_FIELD_FLAG_RO, 14, 15, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_pbu_thram_init_done_reg[] = { + { "pbu_thram_init_done", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_ifb_fptr_init_done_reg[] = { + { "ifb_fptr_init_done", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_cfg_weight_normal_uc_reg[] = { + { "weight_normal_uc", DPP_FIELD_FLAG_RW, 6, 7, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_cfg_fabric_or_saip_reg[] = { + { "fabric_or_saip", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_stat_ind_status_reg[] = { + { "ind_access_done", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_stat_ind_cmd_reg[] = { + { "ind_rd_or_wr", DPP_FIELD_FLAG_RW, 28, 1, 0x0, 0x0 }, + { "ind_mem_id", DPP_FIELD_FLAG_RW, 27, 8, 0x0, 0x0 }, + { "ind_mem_addr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_stat_ind_dat0_reg[] = { + { "ind_dat0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_ind_access_done_reg[] = { + { "ind_access_done", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_ind_command_reg[] = { + { "ind_rd_or_wr", DPP_FIELD_FLAG_RW, 28, 1, 0x0, 0x0 }, + { "ind_mem_id", DPP_FIELD_FLAG_RW, 27, 8, 0x0, 0x0 }, + { "ind_mem_addr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_ind_dat0_reg[] = { + { "ind_dat0", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_ind_dat1_reg[] = { + { "ind_dat1", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_fabric_or_saip_reg[] = { + { "fabric_or_saip", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_max_pkt_len_reg[] = { + { "max_pkt_len", DPP_FIELD_FLAG_RW, 14, 15, 0x3f00, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_age_en_reg[] = { + { "age_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_age_mode_reg[] = { + { "age_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_age_value_time_reg[] = { + { "age_value_time", DPP_FIELD_FLAG_RW, 31, 32, 0xe4e1c0, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_age_value_room_reg[] = { + { "age_value_room", DPP_FIELD_FLAG_RW, 31, 32, 0x1e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_age_out_cnt_reg[] = { + { "age_out_cnt", DPP_FIELD_FLAG_RW, 6, 7, 0xa, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_token_value_a_reg[] = { + { "token_value_a", DPP_FIELD_FLAG_RW, 31, 12, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_token_value_b_reg[] = { + { "token_value_b", DPP_FIELD_FLAG_RW, 31, 12, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_cfg_shap_en_p0_reg[] = { + { "cfg_shap_en_p0", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_cfg_shap_en_p1_reg[] = { + { "cfg_shap_en_p1", DPP_FIELD_FLAG_RW, 31, 28, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_cfg_shap_en_tm_reg[] = { + { "cfg_shap_en_tm", DPP_FIELD_FLAG_RW, 31, 2, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_stat_ind_status_reg[] = { + { "ind_access_done", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_stat_ind_cmd_reg[] = { + { "ind_rd_or_wr", DPP_FIELD_FLAG_RW, 28, 1, 0x0, 0x0 }, + { "ind_mem_id", DPP_FIELD_FLAG_RW, 27, 8, 0x0, 0x0 }, + { "ind_mem_addr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_stat_ind_data0_reg[] = { + { "ind_dat0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_stat_debug_cnt_cfg_reg[] = { + { "debug_cnt_ovf_mode", DPP_FIELD_FLAG_RW, 31, 1, 0x0, 0x0 }, + { "debug_cnt_rdclr_mode", DPP_FIELD_FLAG_RW, 30, 1, 0x0, 0x0 }, + { "user_cnt_value", DPP_FIELD_FLAG_RW, 29, 4, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_bfd_firstchk_th_reg[] = { + { "bfd_firstchk_th", DPP_FIELD_FLAG_RW, 18, 19, 0xc350, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_memid_0_pbu_fc_idmath_ram_reg[] = { + { "lif_th_15", DPP_FIELD_FLAG_RW, 164, 15, 0x0, 0x0 }, + { "lif_prv_15", DPP_FIELD_FLAG_RW, 149, 15, 0x0, 0x0 }, + { "idma_prv_15", DPP_FIELD_FLAG_RW, 134, 15, 0x0, 0x0 }, + { "idma_th_cos0_15", DPP_FIELD_FLAG_RW, 119, 15, 0x0, 0x0 }, + { "idma_th_cos1_15", DPP_FIELD_FLAG_RW, 104, 15, 0x0, 0x0 }, + { "idma_th_cos2_15", DPP_FIELD_FLAG_RW, 89, 15, 0x0, 0x0 }, + { "idma_th_cos3_15", DPP_FIELD_FLAG_RW, 74, 15, 0x0, 0x0 }, + { "idma_th_cos4_15", DPP_FIELD_FLAG_RW, 59, 15, 0x0, 0x0 }, + { "idma_th_cos5_15", DPP_FIELD_FLAG_RW, 44, 15, 0x0, 0x0 }, + { "idma_th_cos6_15", DPP_FIELD_FLAG_RW, 29, 15, 0x0, 0x0 }, + { "idma_th_cos7_15", DPP_FIELD_FLAG_RW, 14, 15, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_memid_1_pbu_fc_macth_ram_reg[] = { + { "cos7_th", DPP_FIELD_FLAG_RW, 119, 15, 0x0, 0x0 }, + { "cos6_th", DPP_FIELD_FLAG_RW, 104, 15, 0x0, 0x0 }, + { "cos5_th", DPP_FIELD_FLAG_RW, 89, 15, 0x0, 0x0 }, + { "cos4_th", DPP_FIELD_FLAG_RW, 74, 15, 0x0, 0x0 }, + { "cos3_th", DPP_FIELD_FLAG_RW, 59, 15, 0x0, 0x0 }, + { "cos2_th", DPP_FIELD_FLAG_RW, 44, 15, 0x0, 0x0 }, + { "cos1_th", DPP_FIELD_FLAG_RW, 29, 15, 0x0, 0x0 }, + { "cos0_th", DPP_FIELD_FLAG_RW, 14, 15, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_memid_1_all_kind_port_cnt_reg[] = { + { "peak_port_cnt", DPP_FIELD_FLAG_RO, 29, 15, 0x0, 0x0 }, + { "current_port_cnt", DPP_FIELD_FLAG_RO, 14, 15, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_memid_2_ppu_pbu_ifb_req_vld_cnt_reg[] = { + { "ppu_pbu_ifb_req_vld_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_memid_2_pbu_ppu_ifb_rsp_vld_cnt_reg[] = { + { "pbu_ppu_ifb_rsp_vld_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_memid_2_odma_pbu_recy_ptr_vld_cnt_reg[] = { + { "odma_pbu_recy_ptr_vld_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_memid_2_ppu_pbu_mcode_pf_req_cnt_reg[] = { + { "ppu_pbu_mcode_pf_req_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_memid_2_pbu_ppu_mcode_pf_rsp_cnt_reg[] = { + { "pbu_ppu_mcode_pf_rsp_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_memid_2_ppu_pbu_logic_pf_req_cnt_reg[] = { + { "ppu_pbu_logic_pf_req_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_memid_2_pbu_ppu_logic_pf_rsp_cnt_reg[] = { + { "pbu_ppu_logic_pf_rsp_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_memid_2_ppu_use_ptr_pulse_cnt_reg[] = { + { "ppu_use_ptr_pulse_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_memid_2_ppu_pbu_wb_vld_cnt_reg[] = { + { "ppu_pbu_wb_vld_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_memid_2_pbu_ppu_reorder_para_vld_cnt_reg[] = { + { "pbu_ppu_reorder_para_vld_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_memid_2_se_pbu_dpi_key_vld_cnt_reg[] = { + { "se_pbu_dpi_key_vld_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_memid_2_pbu_se_dpi_rsp_datvld_cnt_reg[] = { + { "pbu_se_dpi_rsp_datvld_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_memid_2_odma_pbu_ifb_rd1_cnt_reg[] = { + { "odma_pbu_ifb_rd1_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_memid_2_odma_pbu_ifb_rd2_cnt_reg[] = { + { "odma_pbu_ifb_rd2_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_memid_2_pbu_ppu_mcode_pf_no_rsp_cnt_reg[] = { + { "pbu_ppu_mcode_pf_no_rsp_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_memid_2_pbu_ppu_logic_pf_no_rsp_cnt_reg[] = { + { "pbu_ppu_logic_pf_no_rsp_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_memid_3_cpu_rd_ifb_data_reg[] = { + { "cpu_rd_ifb_data", DPP_FIELD_FLAG_RO, 2047, 2048, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_memid_4_mux_sel_rgt_reg[] = { + { "current_port_cnt", DPP_FIELD_FLAG_RW, 3, 4, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_memid_5_port_pub_cnt_reg[] = { + { "port_pub_cnt", DPP_FIELD_FLAG_RO, 14, 15, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_idma_stat_memid_1_idma_o_isu_pkt_pulse_total_cnt_reg[] = { + { "idma_o_isu_pkt_pulse_total_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_nppu_idma_stat_memid_1_idma_o_isu_epkt_pulse_total_cnt_reg[] = { + { "idma_o_isu_epkt_pulse_total_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_nppu_idma_stat_memid_1_idma_dispkt_pulse_total_cnt_reg[] = { + { "idma_dispkt_pulse_total_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_idma_stat_memid_0_idma_o_isu_pkt_pulse_cnt_reg[] = { + { "idma_o_isu_pkt_pulse_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_idma_stat_memid_0_idma_o_isu_epkt_pulse_cnt_reg[] = { + { "idma_o_isu_epkt_pulse_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_idma_stat_memid_0_idma_dispkt_pulse_cnt_reg[] = { + { "idma_dispkt_pulse_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_test_r_reg[] = { + { "test_r", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_debug_en_r_reg[] = { + { "debug_en_r", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_csr_dup_table_wr_data_reg[] = { + { "item_vld", DPP_FIELD_FLAG_RW, 24, 1, 0x0, 0x0 }, + { "flownum_vld", DPP_FIELD_FLAG_RW, 23, 1, 0x0, 0x0 }, + { "start_pc", DPP_FIELD_FLAG_RW, 22, 15, 0x0, 0x0 }, + { "flownum", DPP_FIELD_FLAG_RW, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_csr_dup_table_rd_data_reg[] = { + { "item_vld", DPP_FIELD_FLAG_RO, 24, 1, 0x0, 0x0 }, + { "flownum_vld", DPP_FIELD_FLAG_RO, 23, 1, 0x0, 0x0 }, + { "start_pc", DPP_FIELD_FLAG_RO, 22, 15, 0x0, 0x0 }, + { "flownum", DPP_FIELD_FLAG_RO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_csr_dup_table_addr_reg[] = { + { "csr_dup_table_operation", DPP_FIELD_FLAG_WO, 6, 1, 0x0, 0x0 }, + { "csr_dup_table_addr", DPP_FIELD_FLAG_WO, 5, 6, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_debug_vld_reg[] = { + { "ppu_debug_vld", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_cop_thash_rsk_319_288_reg[] = { + { "rsk_319_288", DPP_FIELD_FLAG_RW, 31, 32, 0x6d5a56da, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_cop_thash_rsk_287_256_reg[] = { + { "rsk_287_256", DPP_FIELD_FLAG_RW, 31, 32, 0x255b0ec2, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_cop_thash_rsk_255_224_reg[] = { + { "rsk_255_224", DPP_FIELD_FLAG_RW, 31, 32, 0x4167253d, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_cop_thash_rsk_223_192_reg[] = { + { "rsk_223_192", DPP_FIELD_FLAG_RW, 31, 32, 0x43a38fb0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_cop_thash_rsk_191_160_reg[] = { + { "rsk_191_160", DPP_FIELD_FLAG_RW, 31, 32, 0xd0ca2bcb, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_cop_thash_rsk_159_128_reg[] = { + { "rsk_159_128", DPP_FIELD_FLAG_RW, 31, 32, 0xae7b30b4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_cop_thash_rsk_127_096_reg[] = { + { "rsk_127_096", DPP_FIELD_FLAG_RW, 31, 32, 0x77cb2da3, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_cop_thash_rsk_095_064_reg[] = { + { "rsk_095_064", DPP_FIELD_FLAG_RW, 31, 32, 0x8030f20c, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_cop_thash_rsk_063_032_reg[] = { + { "rsk_063_032", DPP_FIELD_FLAG_RW, 31, 32, 0x6a42b73b, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_cop_thash_rsk_031_000_reg[] = { + { "rsk_031_000", DPP_FIELD_FLAG_RW, 31, 32, 0xbeac01fa, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_cfg_ipv4_ipid_start_value_reg[] = { + { "cfg_ipv4_ipid_start_value", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_cfg_ipv4_ipid_end_value_reg[] = { + { "cfg_ipv4_ipid_end_value", DPP_FIELD_FLAG_RW, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_cluster_mf_in_en_reg[] = { + { "cluster_mf_in_en", DPP_FIELD_FLAG_RW, 5, 6, 0x3f, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_empty_reg[] = { + { "ppu_empty", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_w_addr_reg[] = { + { "instrmem_w_addr", DPP_FIELD_FLAG_WO, 12, 13, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_w_data_191_160_reg[] = { + { "instrmem_w_data_191_160", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_w_data_159_128_reg[] = { + { "instrmem_w_data_159_128", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_w_data_127_96_reg[] = { + { "instrmem_w_data_127_96", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_w_data_95_64_reg[] = { + { "instrmem_w_data_95_64", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_w_data_63_32_reg[] = { + { "instrmem_w_data_63_32", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_w_data_31_0_reg[] = { + { "instrmem_w_data_31_0", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_isu_fwft_mf_fifo_prog_full_assert_cfg_reg[] = { + { "isu_fwft_mf_fifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 5, 6, + 0x1e, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_isu_fwft_mf_fifo_prog_full_negate_cfg_reg[] = { + { "isu_fwft_mf_fifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 5, 6, + 0x1e, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_int_1200m_mask_reg[] = { + { "me7_interrupt_mask", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "me6_interrupt_mask", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "me5_interrupt_mask", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "me4_interrupt_mask", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "me3_interrupt_mask", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "me2_interrupt_mask", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "me1_interrupt_mask", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "me0_interrupt_mask", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_ppu4k_cluster_wr_high_data_r_mex_reg[] = { + { "wr_high_data_r_mex", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu4k_cluster_wr_low_data_r_mex_reg[] = { + { "wr_low_data_r_mex", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu4k_cluster_addr_r_mex_reg[] = { + { "operate_type", DPP_FIELD_FLAG_WO, 8, 1, 0x0, 0x0 }, + { "addr_r_mex", DPP_FIELD_FLAG_RW, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu4k_cluster_sdt_tbl_ind_access_done_reg[] = { + { "rd_addr_r_mex", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu4k_cluster_rd_high_data_r_mex_reg[] = { + { "rd_high_data_r_mex", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu4k_cluster_rd_low_data_r_mex_reg[] = { + { "rd_low_data_r_mex", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_init_ok_reg[] = { + { "init_ok", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_cpu_rd_rdy_reg[] = { + { "cpu_rd_rdy", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_cpu_rd_data_tmp0_reg[] = { + { "cpu_rd_data_tmp0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_cpu_rd_data_tmp1_reg[] = { + { "cpu_rd_data_tmp1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_cpu_rd_data_tmp2_reg[] = { + { "cpu_rd_data_tmp2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_cpu_rd_data_tmp3_reg[] = { + { "cpu_rd_data_tmp3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_cpu_rd_data_tmp4_reg[] = { + { "cpu_rd_data_tmp4", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_cpu_rd_data_tmp5_reg[] = { + { "cpu_rd_data_tmp5", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_cpu_rd_data_tmp6_reg[] = { + { "cpu_rd_data_tmp6", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_cpu_rd_data_tmp7_reg[] = { + { "cpu_rd_data_tmp7", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_cpu_rd_data_tmp8_reg[] = { + { "cpu_rd_data_tmp8", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_cpu_rd_data_tmp9_reg[] = { + { "cpu_rd_data_tmp9", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_cpu_rd_data_tmp10_reg[] = { + { "cpu_rd_data_tmp10", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_cpu_rd_data_tmp11_reg[] = { + { "cpu_rd_data_tmp11", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_cpu_rd_data_tmp12_reg[] = { + { "cpu_rd_data_tmp12", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_cpu_rd_data_tmp13_reg[] = { + { "cpu_rd_data_tmp13", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_cpu_rd_data_tmp14_reg[] = { + { "cpu_rd_data_tmp14", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_cpu_rd_data_tmp15_reg[] = { + { "cpu_rd_data_tmp15", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_v4_config_rgt_reg[] = { + { "lpm_v4_shift_sel", DPP_FIELD_FLAG_RW, 4, 2, 0x0, 0x0 }, + { "lpm_v4_sram_cmp_flag", DPP_FIELD_FLAG_RW, 2, 1, 0x0, 0x0 }, + { "lpm_v4_ddr3_addr_sel", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_v6_config_rgt_reg[] = { + { "lpm_v6_shift_sel", DPP_FIELD_FLAG_RW, 5, 2, 0x0, 0x0 }, + { "lpm_v6_sram_cmp_flag", DPP_FIELD_FLAG_RW, 3, 1, 0x0, 0x0 }, + { "lpm_v6_ddr3_addr_sel", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_ext_rsp_fifo_u0_pfull_ast_reg[] = { + { "lpm_ext_rsp_fifo_u0_pfull_ast", DPP_FIELD_FLAG_RW, 6, 7, 0x24, 0x0 }, +}; +DPP_FIELD_T g_se_as_hash_age_pat_cfg_reg[] = { + { "hash_age_pat_cfg", DPP_FIELD_FLAG_RW, 3, 4, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_learn_rdy_cfg_reg[] = { + { "learn_rdy_cfg", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_kschd_as_pful_cfg_reg[] = { + { "kschd_as_pful_cfg", DPP_FIELD_FLAG_RW, 13, 14, 0x1326, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_kschd_dir_pful_cfg_reg[] = { + { "kschd_dir_pful_cfg", DPP_FIELD_FLAG_RW, 13, 14, 0x1326, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_kschd_as_ept_cfg_reg[] = { + { "kschd_as_ept_cfg", DPP_FIELD_FLAG_RW, 13, 14, 0xc18, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_cpu_arbi_pful_cfg_reg[] = { + { "cpu_arbi_pful_cfg", DPP_FIELD_FLAG_RW, 13, 14, 0x1326, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_kschd_pbu_pful_cfg_reg[] = { + { "kschd_pbu_pful_cfg", DPP_FIELD_FLAG_RW, 13, 14, 0x1326, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_rschd_dir_pful_cfg_reg[] = { + { "rschd_dir_pful_cfg", DPP_FIELD_FLAG_RW, 31, 32, 0x00360036, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_rschd_dir_ept_cfg_reg[] = { + { "rschd_dir_ept_cfg", DPP_FIELD_FLAG_RW, 31, 32, 0x000a000a, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_cmd_rgt_reg[] = { + { "rd_flag", DPP_FIELD_FLAG_RW, 31, 1, 0x0, 0x0 }, + { "mask", DPP_FIELD_FLAG_RW, 20, 4, 0x0, 0x0 }, + { "reg_sram_flag", DPP_FIELD_FLAG_RW, 16, 1, 0x0, 0x0 }, + { "zgroup_id", DPP_FIELD_FLAG_RW, 15, 2, 0x0, 0x0 }, + { "zblock_id", DPP_FIELD_FLAG_RW, 13, 3, 0x0, 0x0 }, + { "zcell_id", DPP_FIELD_FLAG_RW, 10, 2, 0x0, 0x0 }, + { "addr", DPP_FIELD_FLAG_RW, 8, 9, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_wr_data_tmp0_reg[] = { + { "cpu_wr_data_tmp0", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_wr_data_tmp1_reg[] = { + { "cpu_wr_data_tmp1", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_wr_data_tmp2_reg[] = { + { "cpu_wr_data_tmp2", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_wr_data_tmp3_reg[] = { + { "cpu_wr_data_tmp3", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_wr_data_tmp4_reg[] = { + { "cpu_wr_data_tmp4", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_wr_data_tmp5_reg[] = { + { "cpu_wr_data_tmp5", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_wr_data_tmp6_reg[] = { + { "cpu_wr_data_tmp6", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_wr_data_tmp7_reg[] = { + { "cpu_wr_data_tmp7", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_wr_data_tmp8_reg[] = { + { "cpu_wr_data_tmp8", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_wr_data_tmp9_reg[] = { + { "cpu_wr_data_tmp9", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_wr_data_tmp10_reg[] = { + { "cpu_wr_data_tmp10", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_wr_data_tmp11_reg[] = { + { "cpu_wr_data_tmp11", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_wr_data_tmp12_reg[] = { + { "cpu_wr_data_tmp12", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_wr_data_tmp13_reg[] = { + { "cpu_wr_data_tmp13", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_wr_data_tmp14_reg[] = { + { "cpu_wr_data_tmp14", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_wr_data_tmp15_reg[] = { + { "cpu_wr_data_tmp15", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_rd_rdy_reg[] = { + { "cpu_rd_rdy", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_rd_data_tmp0_reg[] = { + { "cpu_rd_data_tmp0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_rd_data_tmp1_reg[] = { + { "cpu_rd_data_tmp1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_rd_data_tmp2_reg[] = { + { "cpu_rd_data_tmp2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_rd_data_tmp3_reg[] = { + { "cpu_rd_data_tmp3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_rd_data_tmp4_reg[] = { + { "cpu_rd_data_tmp4", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_rd_data_tmp5_reg[] = { + { "cpu_rd_data_tmp5", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_rd_data_tmp6_reg[] = { + { "cpu_rd_data_tmp6", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_rd_data_tmp7_reg[] = { + { "cpu_rd_data_tmp7", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_rd_data_tmp8_reg[] = { + { "cpu_rd_data_tmp8", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_rd_data_tmp9_reg[] = { + { "cpu_rd_data_tmp9", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_rd_data_tmp10_reg[] = { + { "cpu_rd_data_tmp10", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_rd_data_tmp11_reg[] = { + { "cpu_rd_data_tmp11", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_rd_data_tmp12_reg[] = { + { "cpu_rd_data_tmp12", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_rd_data_tmp13_reg[] = { + { "cpu_rd_data_tmp13", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_rd_data_tmp14_reg[] = { + { "cpu_rd_data_tmp14", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_cpu_rd_data_tmp15_reg[] = { + { "cpu_rd_data_tmp15", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_hash0_ext_cfg_rgt_reg[] = { + { "hash0_ext_mode", DPP_FIELD_FLAG_RW, 8, 8, 0x0, 0x0 }, + { "hash0_ext_flag", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_hash1_ext_cfg_rgt_reg[] = { + { "hash1_ext_mode", DPP_FIELD_FLAG_RW, 8, 8, 0x0, 0x0 }, + { "hash1_ext_flag", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_hash2_ext_cfg_rgt_reg[] = { + { "hash2_ext_mode", DPP_FIELD_FLAG_RW, 8, 8, 0x0, 0x0 }, + { "hash2_ext_flag", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_hash3_ext_cfg_rgt_reg[] = { + { "hash3_ext_mode", DPP_FIELD_FLAG_RW, 8, 8, 0x0, 0x0 }, + { "hash3_ext_flag", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_hash0_tbl30_depth_reg[] = { + { "hash0_tbl3_depth", DPP_FIELD_FLAG_RW, 31, 8, 0x12, 0x0 }, + { "hash0_tbl2_depth", DPP_FIELD_FLAG_RW, 23, 8, 0x12, 0x0 }, + { "hash0_tbl1_depth", DPP_FIELD_FLAG_RW, 15, 8, 0x12, 0x0 }, + { "hash0_tbl0_depth", DPP_FIELD_FLAG_RW, 7, 8, 0x12, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_hash0_tbl74_depth_reg[] = { + { "hash0_tbl7_depth", DPP_FIELD_FLAG_RW, 31, 8, 0x12, 0x0 }, + { "hash0_tbl6_depth", DPP_FIELD_FLAG_RW, 23, 8, 0x12, 0x0 }, + { "hash0_tbl5_depth", DPP_FIELD_FLAG_RW, 15, 8, 0x12, 0x0 }, + { "hash0_tbl4_depth", DPP_FIELD_FLAG_RW, 7, 8, 0x12, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_hash1_tbl30_depth_reg[] = { + { "hash1_tbl3_depth", DPP_FIELD_FLAG_RW, 31, 8, 0x12, 0x0 }, + { "hash1_tbl2_depth", DPP_FIELD_FLAG_RW, 23, 8, 0x12, 0x0 }, + { "hash1_tbl1_depth", DPP_FIELD_FLAG_RW, 15, 8, 0x12, 0x0 }, + { "hash1_tbl0_depth", DPP_FIELD_FLAG_RW, 7, 8, 0x12, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_hash1_tbl74_depth_reg[] = { + { "hash1_tbl7_depth", DPP_FIELD_FLAG_RW, 31, 8, 0x12, 0x0 }, + { "hash1_tbl6_depth", DPP_FIELD_FLAG_RW, 23, 8, 0x12, 0x0 }, + { "hash1_tbl5_depth", DPP_FIELD_FLAG_RW, 15, 8, 0x12, 0x0 }, + { "hash1_tbl4_depth", DPP_FIELD_FLAG_RW, 7, 8, 0x12, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_hash2_tbl30_depth_reg[] = { + { "hash2_tbl3_depth", DPP_FIELD_FLAG_RW, 31, 8, 0x12, 0x0 }, + { "hash2_tbl2_depth", DPP_FIELD_FLAG_RW, 23, 8, 0x12, 0x0 }, + { "hash2_tbl1_depth", DPP_FIELD_FLAG_RW, 15, 8, 0x12, 0x0 }, + { "hash2_tbl0_depth", DPP_FIELD_FLAG_RW, 7, 8, 0x12, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_hash2_tbl74_depth_reg[] = { + { "hash2_tbl7_depth", DPP_FIELD_FLAG_RW, 31, 8, 0x12, 0x0 }, + { "hash2_tbl6_depth", DPP_FIELD_FLAG_RW, 23, 8, 0x12, 0x0 }, + { "hash2_tbl5_depth", DPP_FIELD_FLAG_RW, 15, 8, 0x12, 0x0 }, + { "hash2_tbl4_depth", DPP_FIELD_FLAG_RW, 7, 8, 0x12, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_hash3_tbl30_depth_reg[] = { + { "hash3_tbl3_depth", DPP_FIELD_FLAG_RW, 31, 8, 0x12, 0x0 }, + { "hash3_tbl2_depth", DPP_FIELD_FLAG_RW, 23, 8, 0x12, 0x0 }, + { "hash3_tbl1_depth", DPP_FIELD_FLAG_RW, 15, 8, 0x12, 0x0 }, + { "hash3_tbl0_depth", DPP_FIELD_FLAG_RW, 7, 8, 0x12, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_hash3_tbl74_depth_reg[] = { + { "hash3_tbl7_depth", DPP_FIELD_FLAG_RW, 31, 8, 0x12, 0x0 }, + { "hash3_tbl6_depth", DPP_FIELD_FLAG_RW, 23, 8, 0x12, 0x0 }, + { "hash3_tbl5_depth", DPP_FIELD_FLAG_RW, 15, 8, 0x12, 0x0 }, + { "hash3_tbl4_depth", DPP_FIELD_FLAG_RW, 7, 8, 0x12, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_wr_rsp_cfg_reg[] = { + { "wr_rsp_fifo_cfg", DPP_FIELD_FLAG_RW, 9, 10, 0x18c, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_hash_mono_flag_reg[] = { + { "hash3_mono_flag", DPP_FIELD_FLAG_RW, 31, 8, 0x0, 0x0 }, + { "hash2_mono_flag", DPP_FIELD_FLAG_RW, 23, 8, 0x0, 0x0 }, + { "hash1_mono_flag", DPP_FIELD_FLAG_RW, 15, 8, 0x0, 0x0 }, + { "hash0_mono_flag", DPP_FIELD_FLAG_RW, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_hash10_ext_crc_cfg_reg[] = { + { "hash1_crc_cfg", DPP_FIELD_FLAG_RW, 31, 16, 0x5555, 0x0 }, + { "hash0_crc_cfg", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_hash32_ext_crc_cfg_reg[] = { + { "hash3_crc_cfg", DPP_FIELD_FLAG_RW, 31, 16, 0xffff, 0x0 }, + { "hash2_crc_cfg", DPP_FIELD_FLAG_RW, 15, 16, 0xaaaa, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_zblock_service_configure_reg[] = { + { "service_sel", DPP_FIELD_FLAG_RW, 3, 1, 0x0, 0x0 }, + { "hash_channel_sel", DPP_FIELD_FLAG_RW, 2, 2, 0x0, 0x0 }, + { "st_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_zblock_hash_zcell_mono_reg[] = { + { "ha_zcell3_mono_flag", DPP_FIELD_FLAG_RW, 27, 1, 0x0, 0x0 }, + { "ha_zcell3_tbl_id", DPP_FIELD_FLAG_RW, 26, 3, 0x0, 0x0 }, + { "ha_zcell2_mono_flag", DPP_FIELD_FLAG_RW, 19, 1, 0x0, 0x0 }, + { "ha_zcell2_tbl_id", DPP_FIELD_FLAG_RW, 18, 3, 0x0, 0x0 }, + { "ha_zcell1_mono_flag", DPP_FIELD_FLAG_RW, 11, 1, 0x0, 0x0 }, + { "ha_zcell1_tbl_id", DPP_FIELD_FLAG_RW, 10, 3, 0x0, 0x0 }, + { "ha_zcell0_mono_flag", DPP_FIELD_FLAG_RW, 3, 1, 0x0, 0x0 }, + { "ha_zcell0_tbl_id", DPP_FIELD_FLAG_RW, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se4k_se_alg_zlock_hash_zreg_mono_reg[] = { + { "ha_zreg3_mono_flag", DPP_FIELD_FLAG_RW, 27, 1, 0x0, 0x0 }, + { "ha_zreg3_tbl_id", DPP_FIELD_FLAG_RW, 26, 3, 0x0, 0x0 }, + { "ha_zreg2_mono_flag", DPP_FIELD_FLAG_RW, 19, 1, 0x0, 0x0 }, + { "ha_zreg2_tbl_id", DPP_FIELD_FLAG_RW, 18, 3, 0x0, 0x0 }, + { "ha_zreg1_mono_flag", DPP_FIELD_FLAG_RW, 11, 1, 0x0, 0x0 }, + { "ha_zreg1_tbl_id", DPP_FIELD_FLAG_RW, 10, 3, 0x0, 0x0 }, + { "ha_zreg0_mono_flag", DPP_FIELD_FLAG_RW, 3, 1, 0x0, 0x0 }, + { "ha_zreg0_tbl_id", DPP_FIELD_FLAG_RW, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_init_done_reg[] = { + { "init_done", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cpu_ind_wdat0_reg[] = { + { "cpu_ind_wdat0", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cpu_ind_wdat1_reg[] = { + { "cpu_ind_wdat1", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cpu_ind_wdat2_reg[] = { + { "cpu_ind_wdat2", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cpu_ind_wdat3_reg[] = { + { "cpu_ind_wdat3", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cpu_ind_cmd_reg[] = { + { "cpu_ind_rw", DPP_FIELD_FLAG_RW, 31, 1, 0x0, 0x0 }, + { "cpu_ind_rd_mode", DPP_FIELD_FLAG_RW, 30, 1, 0x0, 0x0 }, + { "cpu_req_mode", DPP_FIELD_FLAG_RW, 27, 2, 0x0, 0x0 }, + { "cpu_ind_addr", DPP_FIELD_FLAG_RW, 25, 26, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cpu_ind_rd_done_reg[] = { + { "cpu_ind_rd_done", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cpu_ind_rdat0_reg[] = { + { "cpu_ind_rdat0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cpu_ind_rdat1_reg[] = { + { "cpu_ind_rdat1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cpu_ind_rdat2_reg[] = { + { "cpu_ind_rdat2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cpu_ind_rdat3_reg[] = { + { "cpu_ind_rdat3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cfg_plcr_mono_reg[] = { + { "cfg_plcr_mono", DPP_FIELD_FLAG_RW, 1, 2, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_wr_arb_cpu_rdy_reg[] = { + { "wr_arb_cpu_rdy", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_tm_stat_en_cfg_reg[] = { + { "tm_stat_en_cfg", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ddr_wdat0_reg[] = { + { "ddr_wdat0", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_dir_arbi_ser_rpful_reg[] = { + { "dir_arbi_ser_rpful", DPP_FIELD_FLAG_RW, 9, 10, 0x14a, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cfg_wr_arbi_pful2_reg[] = { + { "hash_wr_pful", DPP_FIELD_FLAG_RW, 19, 10, 0x14a, 0x0 }, + { "dir_wr_pful", DPP_FIELD_FLAG_RW, 9, 10, 0x14a, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_etm_tbl_cfg_reg[] = { + { "etm_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cfg_cash_addr_pful_reg[] = { + { "cfg_cash_addr_pful", DPP_FIELD_FLAG_RW, 19, 20, 0x7e1f8, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ctrl_rfifo_cfg_reg[] = { + { "brst_fwft_fifo_prog_empty_assert", DPP_FIELD_FLAG_RW, 31, 10, 0x2, + 0x0 }, + { "brst_fwft_fifo_prog_empty_negate", DPP_FIELD_FLAG_RW, 21, 10, 0x2, + 0x0 }, + { "brst_fwft_fifo_prog_full_assert", DPP_FIELD_FLAG_RW, 11, 6, 0x18, + 0x0 }, + { "brst_fwft_fifo_prog_full_negate", DPP_FIELD_FLAG_RW, 5, 6, 0x18, + 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cache_req_fifo_cfg_reg[] = { + { "srch_fifo_pfull_assert", DPP_FIELD_FLAG_RW, 9, 5, 0xc, 0x0 }, + { "srch_fifo_pfull_negate", DPP_FIELD_FLAG_RW, 4, 5, 0xc, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_eram_wdat0_reg[] = { + { "cpu_ind_eram_wdat0", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_etm_port_sel_cfg_reg[] = { + { "etm_port0_sel_cfg", DPP_FIELD_FLAG_RW, 19, 5, 0x0, 0x0 }, + { "etm_port1_sel_cfg", DPP_FIELD_FLAG_RW, 14, 5, 0x1, 0x0 }, + { "etm_port2_sel_cfg", DPP_FIELD_FLAG_RW, 9, 5, 0xf, 0x0 }, + { "etm_port3_sel_cfg", DPP_FIELD_FLAG_RW, 4, 5, 0x10, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_tm_stat_cfg_reg[] = { + { "stat_overflow_mode", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "tm_stat_mode_cfg", DPP_FIELD_FLAG_RW, 6, 3, 0x4, 0x0 }, + { "tm_flow_control_cfg", DPP_FIELD_FLAG_RW, 3, 4, 0xf, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_ppu_eram_depth_reg[] = { + { "ppu_eram_depth", DPP_FIELD_FLAG_RW, 18, 19, 0x38000, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_ppu_eram_base_addr_reg[] = { + { "ppu_eram_base_addr", DPP_FIELD_FLAG_RW, 18, 19, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_ppu_ddr_base_addr_reg[] = { + { "ppu_ddr_base_addr", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_plcr0_base_addr_reg[] = { + { "plcr0_base_addr", DPP_FIELD_FLAG_RW, 18, 19, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_etm_stat_start_addr_cfg_reg[] = { + { "etm_stat_start_addr_cfg", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_etm_stat_depth_cfg_reg[] = { + { "etm_stat_depth_cfg", DPP_FIELD_FLAG_RW, 2, 3, 0x5, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cycle_mov_en_cfg_reg[] = { + { "cycle_mov_en_cfg", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_ind_wdat0_reg[] = { + { "wdat0", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_ind_ctrl_tmp0_reg[] = { + { "reg_tcam_flag", DPP_FIELD_FLAG_RW, 30, 1, 0x0, 0x0 }, + { "flush", DPP_FIELD_FLAG_RW, 29, 8, 0x0, 0x0 }, + { "rd_wr", DPP_FIELD_FLAG_RW, 21, 1, 0x0, 0x0 }, + { "wr_mode", DPP_FIELD_FLAG_RW, 20, 8, 0x0, 0x0 }, + { "dat_or_mask", DPP_FIELD_FLAG_RW, 12, 1, 0x0, 0x0 }, + { "ram_sel", DPP_FIELD_FLAG_RW, 11, 3, 0x0, 0x0 }, + { "addr", DPP_FIELD_FLAG_RW, 8, 9, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_ind_ctrl_tmp1_reg[] = { + { "row_or_col_msk", DPP_FIELD_FLAG_RW, 9, 1, 0x0, 0x0 }, + { "vben", DPP_FIELD_FLAG_RW, 8, 1, 0x0, 0x0 }, + { "vbit", DPP_FIELD_FLAG_RW, 7, 8, 0xff, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_ind_rd_done_reg[] = { + { "cpu_ind_rd_done", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_rdat0_reg[] = { + { "cpu_rdat0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_rdat1_reg[] = { + { "cpu_rdat1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_rdat2_reg[] = { + { "cpu_rdat2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_rdat3_reg[] = { + { "cpu_rdat3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_rdat4_reg[] = { + { "cpu_rdat4", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_rdat5_reg[] = { + { "cpu_rdat5", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_rdat6_reg[] = { + { "cpu_rdat6", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_rdat7_reg[] = { + { "cpu_rdat7", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_rdat8_reg[] = { + { "cpu_rdat8", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_rdat9_reg[] = { + { "cpu_rdat9", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_rdat10_reg[] = { + { "cpu_rdat10", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_rdat11_reg[] = { + { "cpu_rdat11", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_rdat12_reg[] = { + { "cpu_rdat12", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_rdat13_reg[] = { + { "cpu_rdat13", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_rdat14_reg[] = { + { "cpu_rdat14", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_rdat15_reg[] = { + { "cpu_rdat15", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_rdat16_reg[] = { + { "cpu_rdat16", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_rdat17_reg[] = { + { "cpu_rdat17", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_rdat18_reg[] = { + { "cpu_rdat18", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_rdat19_reg[] = { + { "cpu_rdat19", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_qvbo_reg[] = { + { "qvbo", DPP_FIELD_FLAG_RO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cnt_overflow_mode_reg[] = { + { "cnt_rd_mode", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "cnt_overflow_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_cara_queue_ram0_159_0_reg[] = { + { "cara_drop", DPP_FIELD_FLAG_RW, 147, 1, 0x0, 0x0 }, + { "cara_plcr_en", DPP_FIELD_FLAG_RW, 146, 1, 0x0, 0x0 }, + { "cara_profile_id", DPP_FIELD_FLAG_RW, 145, 9, 0x0, 0x0 }, + { "cara_tq_h", DPP_FIELD_FLAG_RO, 136, 13, 0x0, 0x0 }, + { "cara_tq_l", DPP_FIELD_FLAG_RO, 123, 32, 0x0, 0x0 }, + { "cara_ted", DPP_FIELD_FLAG_RO, 91, 19, 0x0, 0x0 }, + { "cara_tcd", DPP_FIELD_FLAG_RO, 72, 19, 0x0, 0x0 }, + { "cara_tei", DPP_FIELD_FLAG_RO, 53, 27, 0x0, 0x0 }, + { "cara_tci", DPP_FIELD_FLAG_RO, 26, 27, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_cara_profile_ram1_255_0_reg[] = { + { "cara_profile_wr", DPP_FIELD_FLAG_RW, 224, 1, 0x0, 0x0 }, + { "cara_pkt_sign", DPP_FIELD_FLAG_RW, 216, 1, 0x0, 0x0 }, + { "cara_cd", DPP_FIELD_FLAG_RW, 215, 2, 0x0, 0x0 }, + { "cara_cf", DPP_FIELD_FLAG_RW, 213, 1, 0x0, 0x0 }, + { "cara_cm", DPP_FIELD_FLAG_RW, 212, 1, 0x0, 0x0 }, + { "cara_eir", DPP_FIELD_FLAG_RW, 211, 24, 0x0, 0x0 }, + { "cara_cir", DPP_FIELD_FLAG_RW, 187, 24, 0x0, 0x0 }, + { "cara_ebs_pbs", DPP_FIELD_FLAG_RW, 163, 27, 0x0, 0x0 }, + { "cara_cbs", DPP_FIELD_FLAG_RW, 136, 27, 0x0, 0x0 }, + { "cara_c_pri1", DPP_FIELD_FLAG_RW, 109, 5, 0x0, 0x0 }, + { "cara_c_pri2", DPP_FIELD_FLAG_RW, 104, 5, 0x0, 0x0 }, + { "cara_c_pri3", DPP_FIELD_FLAG_RW, 99, 5, 0x0, 0x0 }, + { "cara_c_pri4", DPP_FIELD_FLAG_RW, 94, 5, 0x0, 0x0 }, + { "cara_c_pri5", DPP_FIELD_FLAG_RW, 89, 5, 0x0, 0x0 }, + { "cara_c_pri6", DPP_FIELD_FLAG_RW, 84, 5, 0x0, 0x0 }, + { "cara_c_pri7", DPP_FIELD_FLAG_RW, 79, 5, 0x0, 0x0 }, + { "cara_e_g_pri1", DPP_FIELD_FLAG_RW, 74, 5, 0x0, 0x0 }, + { "cara_e_g_pri2", DPP_FIELD_FLAG_RW, 69, 5, 0x0, 0x0 }, + { "cara_e_g_pri3", DPP_FIELD_FLAG_RW, 64, 5, 0x0, 0x0 }, + { "cara_e_g_pri4", DPP_FIELD_FLAG_RW, 59, 5, 0x0, 0x0 }, + { "cara_e_g_pri5", DPP_FIELD_FLAG_RW, 54, 5, 0x0, 0x0 }, + { "cara_e_g_pri6", DPP_FIELD_FLAG_RW, 49, 5, 0x0, 0x0 }, + { "cara_e_g_pri7", DPP_FIELD_FLAG_RW, 44, 5, 0x0, 0x0 }, + { "cara_e_y_pri0", DPP_FIELD_FLAG_RW, 39, 5, 0x0, 0x0 }, + { "cara_e_y_pri1", DPP_FIELD_FLAG_RW, 34, 5, 0x0, 0x0 }, + { "cara_e_y_pri2", DPP_FIELD_FLAG_RW, 29, 5, 0x0, 0x0 }, + { "cara_e_y_pri3", DPP_FIELD_FLAG_RW, 24, 5, 0x0, 0x0 }, + { "cara_e_y_pri4", DPP_FIELD_FLAG_RW, 19, 5, 0x0, 0x0 }, + { "cara_e_y_pri5", DPP_FIELD_FLAG_RW, 14, 5, 0x0, 0x0 }, + { "cara_e_y_pri6", DPP_FIELD_FLAG_RW, 9, 5, 0x0, 0x0 }, + { "cara_e_y_pri7", DPP_FIELD_FLAG_RW, 4, 5, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_cara_qovs_ram_ram2_reg[] = { + { "cara_qovs", DPP_FIELD_FLAG_RW, 1, 2, 0x2, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_look_up_table1_reg[] = { + { "cara_flow_id", DPP_FIELD_FLAG_RW, 14, 12, 0x0, 0x0 }, + { "cara_sp", DPP_FIELD_FLAG_RW, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_cara_pkt_des_i_cnt_reg[] = { + { "cara_pkt_des_i_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_cara_green_pkt_i_cnt_reg[] = { + { "cara_green_pkt_i_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_cara_yellow_pkt_i_cnt_reg[] = { + { "cara_yellow_pkt_i_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_cara_red_pkt_i_cnt_reg[] = { + { "cara_red_pkt_i_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_cara_pkt_des_o_cnt_reg[] = { + { "cara_pkt_des_o_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_cara_green_pkt_o_cnt_reg[] = { + { "cara_green_pkt_o_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_cara_yellow_pkt_o_cnt_reg[] = { + { "cara_yellow_pkt_o_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_cara_red_pkt_o_cnt_reg[] = { + { "cara_red_pkt_o_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_cara_pkt_des_fc_for_cfg_cnt_reg[] = { + { "cara_pkt_des_fc_for_cfg_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_cara_appoint_qnum_or_sp_reg[] = { + { "cara_appoint_qnum_or_not", DPP_FIELD_FLAG_RW, 19, 1, 0x0, 0x0 }, + { "cara_appoint_sp_or_not", DPP_FIELD_FLAG_RW, 18, 1, 0x0, 0x0 }, + { "cara_plcr_stat_sp", DPP_FIELD_FLAG_RW, 17, 3, 0x0, 0x0 }, + { "cara_plcr_stat_qnum", DPP_FIELD_FLAG_RW, 14, 15, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_cara_cfgmt_count_mode_reg[] = { + { "cara_cfgmt_count_overflow_mode", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "cara_cfgmt_count_rd_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_cara_pkt_size_cnt_reg[] = { + { "cara_pkt_size_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_cara_plcr_init_dont_reg[] = { + { "cara_plcr_init_done", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carb_queue_ram0_159_0_reg[] = { + { "carb_drop", DPP_FIELD_FLAG_RW, 147, 1, 0x0, 0x0 }, + { "carb_plcr_en", DPP_FIELD_FLAG_RW, 146, 1, 0x0, 0x0 }, + { "carb_profile_id", DPP_FIELD_FLAG_RW, 145, 9, 0x0, 0x0 }, + { "carb_tq_h", DPP_FIELD_FLAG_RO, 136, 13, 0x0, 0x0 }, + { "carb_tq_l", DPP_FIELD_FLAG_RO, 123, 32, 0x0, 0x0 }, + { "carb_ted", DPP_FIELD_FLAG_RO, 91, 19, 0x0, 0x0 }, + { "carb_tcd", DPP_FIELD_FLAG_RO, 72, 19, 0x0, 0x0 }, + { "carb_tei", DPP_FIELD_FLAG_RO, 53, 27, 0x0, 0x0 }, + { "carb_tci", DPP_FIELD_FLAG_RO, 26, 27, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carb_profile_ram1_255_0_reg[] = { + { "carb_profile_wr", DPP_FIELD_FLAG_RW, 224, 1, 0x0, 0x0 }, + { "carb_random_discard_en_e", DPP_FIELD_FLAG_RW, 218, 1, 0x0, 0x0 }, + { "carb_random_discard_en_c", DPP_FIELD_FLAG_RW, 217, 1, 0x0, 0x0 }, + { "carb_pkt_sign", DPP_FIELD_FLAG_RW, 216, 1, 0x0, 0x0 }, + { "carb_cd", DPP_FIELD_FLAG_RW, 215, 2, 0x0, 0x0 }, + { "carb_cf", DPP_FIELD_FLAG_RW, 213, 1, 0x0, 0x0 }, + { "carb_cm", DPP_FIELD_FLAG_RW, 212, 1, 0x0, 0x0 }, + { "carb_eir", DPP_FIELD_FLAG_RW, 211, 24, 0x0, 0x0 }, + { "carb_cir", DPP_FIELD_FLAG_RW, 187, 24, 0x0, 0x0 }, + { "carb_ebs_pbs", DPP_FIELD_FLAG_RW, 163, 27, 0x0, 0x0 }, + { "carb_cbs", DPP_FIELD_FLAG_RW, 136, 27, 0x0, 0x0 }, + { "carb_c_pri1", DPP_FIELD_FLAG_RW, 109, 5, 0x0, 0x0 }, + { "carb_c_pri2", DPP_FIELD_FLAG_RW, 104, 5, 0x0, 0x0 }, + { "carb_c_pri3", DPP_FIELD_FLAG_RW, 99, 5, 0x0, 0x0 }, + { "carb_c_pri4", DPP_FIELD_FLAG_RW, 94, 5, 0x0, 0x0 }, + { "carb_c_pri5", DPP_FIELD_FLAG_RW, 89, 5, 0x0, 0x0 }, + { "carb_c_pri6", DPP_FIELD_FLAG_RW, 84, 5, 0x0, 0x0 }, + { "carb_c_pri7", DPP_FIELD_FLAG_RW, 79, 5, 0x0, 0x0 }, + { "carb_e_g_pri1", DPP_FIELD_FLAG_RW, 74, 5, 0x0, 0x0 }, + { "carb_e_g_pri2", DPP_FIELD_FLAG_RW, 69, 5, 0x0, 0x0 }, + { "carb_e_g_pri3", DPP_FIELD_FLAG_RW, 64, 5, 0x0, 0x0 }, + { "carb_e_g_pri4", DPP_FIELD_FLAG_RW, 59, 5, 0x0, 0x0 }, + { "carb_e_g_pri5", DPP_FIELD_FLAG_RW, 54, 5, 0x0, 0x0 }, + { "carb_e_g_pri6", DPP_FIELD_FLAG_RW, 49, 5, 0x0, 0x0 }, + { "carb_e_g_pri7", DPP_FIELD_FLAG_RW, 44, 5, 0x0, 0x0 }, + { "carb_e_y_pri0", DPP_FIELD_FLAG_RW, 39, 5, 0x0, 0x0 }, + { "carb_e_y_pri1", DPP_FIELD_FLAG_RW, 34, 5, 0x0, 0x0 }, + { "carb_e_y_pri2", DPP_FIELD_FLAG_RW, 29, 5, 0x0, 0x0 }, + { "carb_e_y_pri3", DPP_FIELD_FLAG_RW, 24, 5, 0x0, 0x0 }, + { "carb_e_y_pri4", DPP_FIELD_FLAG_RW, 19, 5, 0x0, 0x0 }, + { "carb_e_y_pri5", DPP_FIELD_FLAG_RW, 14, 5, 0x0, 0x0 }, + { "carb_e_y_pri6", DPP_FIELD_FLAG_RW, 9, 5, 0x0, 0x0 }, + { "carb_e_y_pri7", DPP_FIELD_FLAG_RW, 4, 5, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carb_qovs_ram_ram2_reg[] = { + { "carb_qovs", DPP_FIELD_FLAG_RW, 1, 2, 0x2, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_look_up_table2_reg[] = { + { "carb_flow_id", DPP_FIELD_FLAG_RW, 12, 10, 0x0, 0x0 }, + { "carb_sp", DPP_FIELD_FLAG_RW, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carb_pkt_des_i_cnt_reg[] = { + { "carb_pkt_des_i_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carb_green_pkt_i_cnt_reg[] = { + { "carb_green_pkt_i_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carb_yellow_pkt_i_cnt_reg[] = { + { "carb_yellow_pkt_i_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carb_red_pkt_i_cnt_reg[] = { + { "carb_red_pkt_i_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carb_pkt_des_o_cnt_reg[] = { + { "carb_pkt_des_o_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carb_green_pkt_o_cnt_reg[] = { + { "carb_green_pkt_o_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carb_yellow_pkt_o_cnt_reg[] = { + { "carb_yellow_pkt_o_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carb_red_pkt_o_cnt_reg[] = { + { "carb_red_pkt_o_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carb_pkt_des_fc_for_cfg_cnt_reg[] = { + { "carb_pkt_des_fc_for_cfg_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carb_appoint_qnum_or_sp_reg[] = { + { "carb_appoint_qnum_or_not", DPP_FIELD_FLAG_RW, 16, 1, 0x0, 0x0 }, + { "carb_appoint_sp_or_not", DPP_FIELD_FLAG_RW, 15, 1, 0x0, 0x0 }, + { "carb_plcr_stat_sp", DPP_FIELD_FLAG_RW, 14, 3, 0x0, 0x0 }, + { "carb_plcr_stat_qnum", DPP_FIELD_FLAG_RW, 11, 12, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carb_cfgmt_count_mode_reg[] = { + { "carb_cfgmt_count_overflow_mode", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "carb_cfgmt_count_rd_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carb_pkt_size_cnt_reg[] = { + { "carb_pkt_size_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carb_plcr_init_dont_reg[] = { + { "carb_plcr_init_done", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carc_queue_ram0_159_0_reg[] = { + { "carc_drop", DPP_FIELD_FLAG_RW, 147, 1, 0x0, 0x0 }, + { "carc_plcr_en", DPP_FIELD_FLAG_RW, 146, 1, 0x0, 0x0 }, + { "carc_profile_id", DPP_FIELD_FLAG_RW, 145, 9, 0x0, 0x0 }, + { "carc_tq_h", DPP_FIELD_FLAG_RO, 136, 13, 0x0, 0x0 }, + { "carc_tq_l", DPP_FIELD_FLAG_RO, 123, 32, 0x0, 0x0 }, + { "carc_ted", DPP_FIELD_FLAG_RO, 91, 19, 0x0, 0x0 }, + { "carc_tcd", DPP_FIELD_FLAG_RO, 72, 19, 0x0, 0x0 }, + { "carc_tei", DPP_FIELD_FLAG_RO, 53, 27, 0x0, 0x0 }, + { "carc_tci", DPP_FIELD_FLAG_RO, 26, 27, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carc_profile_ram1_255_0_reg[] = { + { "carc_profile_wr", DPP_FIELD_FLAG_RW, 224, 1, 0x0, 0x0 }, + { "carc_random_discard_en_e", DPP_FIELD_FLAG_RW, 218, 1, 0x0, 0x0 }, + { "carc_random_discard_en_c", DPP_FIELD_FLAG_RW, 217, 1, 0x0, 0x0 }, + { "carc_pkt_sign", DPP_FIELD_FLAG_RW, 216, 1, 0x0, 0x0 }, + { "carc_cd", DPP_FIELD_FLAG_RW, 215, 2, 0x0, 0x0 }, + { "carc_cf", DPP_FIELD_FLAG_RW, 213, 1, 0x0, 0x0 }, + { "carc_cm", DPP_FIELD_FLAG_RW, 212, 1, 0x0, 0x0 }, + { "carc_eir", DPP_FIELD_FLAG_RW, 211, 24, 0x0, 0x0 }, + { "carc_cir", DPP_FIELD_FLAG_RW, 187, 24, 0x0, 0x0 }, + { "carc_ebs_pbs", DPP_FIELD_FLAG_RW, 163, 27, 0x0, 0x0 }, + { "carc_cbs", DPP_FIELD_FLAG_RW, 136, 27, 0x0, 0x0 }, + { "carc_c_pri1", DPP_FIELD_FLAG_RW, 109, 5, 0x0, 0x0 }, + { "carc_c_pri2", DPP_FIELD_FLAG_RW, 104, 5, 0x0, 0x0 }, + { "carc_c_pri3", DPP_FIELD_FLAG_RW, 99, 5, 0x0, 0x0 }, + { "carc_c_pri4", DPP_FIELD_FLAG_RW, 94, 5, 0x0, 0x0 }, + { "carc_c_pri5", DPP_FIELD_FLAG_RW, 89, 5, 0x0, 0x0 }, + { "carc_c_pri6", DPP_FIELD_FLAG_RW, 84, 5, 0x0, 0x0 }, + { "carc_c_pri7", DPP_FIELD_FLAG_RW, 79, 5, 0x0, 0x0 }, + { "carc_e_g_pri1", DPP_FIELD_FLAG_RW, 74, 5, 0x0, 0x0 }, + { "carc_e_g_pri2", DPP_FIELD_FLAG_RW, 69, 5, 0x0, 0x0 }, + { "carc_e_g_pri3", DPP_FIELD_FLAG_RW, 64, 5, 0x0, 0x0 }, + { "carc_e_g_pri4", DPP_FIELD_FLAG_RW, 59, 5, 0x0, 0x0 }, + { "carc_e_g_pri5", DPP_FIELD_FLAG_RW, 54, 5, 0x0, 0x0 }, + { "carc_e_g_pri6", DPP_FIELD_FLAG_RW, 49, 5, 0x0, 0x0 }, + { "carc_e_g_pri7", DPP_FIELD_FLAG_RW, 44, 5, 0x0, 0x0 }, + { "carc_e_y_pri0", DPP_FIELD_FLAG_RW, 39, 5, 0x0, 0x0 }, + { "carc_e_y_pri1", DPP_FIELD_FLAG_RW, 34, 5, 0x0, 0x0 }, + { "carc_e_y_pri2", DPP_FIELD_FLAG_RW, 29, 5, 0x0, 0x0 }, + { "carc_e_y_pri3", DPP_FIELD_FLAG_RW, 24, 5, 0x0, 0x0 }, + { "carc_e_y_pri4", DPP_FIELD_FLAG_RW, 19, 5, 0x0, 0x0 }, + { "carc_e_y_pri5", DPP_FIELD_FLAG_RW, 14, 5, 0x0, 0x0 }, + { "carc_e_y_pri6", DPP_FIELD_FLAG_RW, 9, 5, 0x0, 0x0 }, + { "carc_e_y_pri7", DPP_FIELD_FLAG_RW, 4, 5, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carc_qovs_ram_ram2_reg[] = { + { "carc_qovs", DPP_FIELD_FLAG_RW, 1, 2, 0x2, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carc_pkt_des_i_cnt_reg[] = { + { "carc_pkt_des_i_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carc_green_pkt_i_cnt_reg[] = { + { "carc_green_pkt_i_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carc_yellow_pkt_i_cnt_reg[] = { + { "carc_yellow_pkt_i_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carc_red_pkt_i_cnt_reg[] = { + { "carc_red_pkt_i_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carc_pkt_des_o_cnt_reg[] = { + { "carc_pkt_des_o_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carc_green_pkt_o_cnt_reg[] = { + { "carc_green_pkt_o_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carc_yellow_pkt_o_cnt_reg[] = { + { "carc_yellow_pkt_o_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carc_red_pkt_o_cnt_reg[] = { + { "carc_red_pkt_o_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carc_pkt_des_fc_for_cfg_cnt_reg[] = { + { "carc_pkt_des_fc_for_cfg_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carc_appoint_qnum_or_sp_reg[] = { + { "carc_appoint_qnum_or_not", DPP_FIELD_FLAG_RW, 14, 1, 0x0, 0x0 }, + { "carc_appoint_sp_or_not", DPP_FIELD_FLAG_RW, 13, 1, 0x0, 0x0 }, + { "carc_plcr_stat_sp", DPP_FIELD_FLAG_RW, 12, 3, 0x0, 0x0 }, + { "carc_plcr_stat_qnum", DPP_FIELD_FLAG_RW, 9, 10, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carc_cfgmt_count_mode_reg[] = { + { "carc_cfgmt_count_overflow_mode", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "carc_cfgmt_count_rd_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carc_pkt_size_cnt_reg[] = { + { "carc_pkt_size_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carc_plcr_init_dont_reg[] = { + { "carc_plcr_init_done", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carb_random_ram_reg[] = { + { "para8_e", DPP_FIELD_FLAG_RW, 491, 27, 0x0, 0x0 }, + { "para7_e", DPP_FIELD_FLAG_RW, 464, 27, 0x0, 0x0 }, + { "para6_e", DPP_FIELD_FLAG_RW, 437, 27, 0x0, 0x0 }, + { "para5_e", DPP_FIELD_FLAG_RW, 410, 14, 0x0, 0x0 }, + { "para4_h_e", DPP_FIELD_FLAG_RW, 396, 9, 0x0, 0x0 }, + { "para4_l_e", DPP_FIELD_FLAG_RW, 387, 32, 0x0, 0x0 }, + { "para3_e", DPP_FIELD_FLAG_RW, 355, 14, 0x0, 0x0 }, + { "para2_h_e", DPP_FIELD_FLAG_RW, 341, 9, 0x0, 0x0 }, + { "para2_l_e", DPP_FIELD_FLAG_RW, 332, 32, 0x0, 0x0 }, + { "para1_e", DPP_FIELD_FLAG_RW, 300, 14, 0x0, 0x0 }, + { "para0_h_e", DPP_FIELD_FLAG_RW, 286, 9, 0x0, 0x0 }, + { "para0_l_e", DPP_FIELD_FLAG_RW, 277, 32, 0x0, 0x0 }, + { "para8_c", DPP_FIELD_FLAG_RW, 245, 27, 0x0, 0x0 }, + { "para7_c", DPP_FIELD_FLAG_RW, 218, 27, 0x0, 0x0 }, + { "para6_c", DPP_FIELD_FLAG_RW, 191, 27, 0x0, 0x0 }, + { "para5_c", DPP_FIELD_FLAG_RW, 164, 14, 0x0, 0x0 }, + { "para4_h_c", DPP_FIELD_FLAG_RW, 150, 9, 0x0, 0x0 }, + { "para4_l_c", DPP_FIELD_FLAG_RW, 141, 32, 0x0, 0x0 }, + { "para3_c", DPP_FIELD_FLAG_RW, 109, 14, 0x0, 0x0 }, + { "para2_h_c", DPP_FIELD_FLAG_RW, 95, 9, 0x0, 0x0 }, + { "para2_l_c", DPP_FIELD_FLAG_RW, 86, 32, 0x0, 0x0 }, + { "para1_c", DPP_FIELD_FLAG_RW, 54, 14, 0x0, 0x0 }, + { "para0_h_c", DPP_FIELD_FLAG_RW, 40, 9, 0x0, 0x0 }, + { "para0_l_c", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carc_random_ram_reg[] = { + { "para8_e", DPP_FIELD_FLAG_RW, 491, 27, 0x0, 0x0 }, + { "para7_e", DPP_FIELD_FLAG_RW, 464, 27, 0x0, 0x0 }, + { "para6_e", DPP_FIELD_FLAG_RW, 437, 27, 0x0, 0x0 }, + { "para5_e", DPP_FIELD_FLAG_RW, 410, 14, 0x0, 0x0 }, + { "para4_h_e", DPP_FIELD_FLAG_RW, 396, 9, 0x0, 0x0 }, + { "para4_l_e", DPP_FIELD_FLAG_RW, 387, 32, 0x0, 0x0 }, + { "para3_e", DPP_FIELD_FLAG_RW, 355, 14, 0x0, 0x0 }, + { "para2_h_e", DPP_FIELD_FLAG_RW, 341, 9, 0x0, 0x0 }, + { "para2_l_e", DPP_FIELD_FLAG_RW, 332, 32, 0x0, 0x0 }, + { "para1_e", DPP_FIELD_FLAG_RW, 300, 14, 0x0, 0x0 }, + { "para0_h_e", DPP_FIELD_FLAG_RW, 286, 9, 0x0, 0x0 }, + { "para0_l_e", DPP_FIELD_FLAG_RW, 277, 32, 0x0, 0x0 }, + { "para8_c", DPP_FIELD_FLAG_RW, 245, 27, 0x0, 0x0 }, + { "para7_c", DPP_FIELD_FLAG_RW, 218, 27, 0x0, 0x0 }, + { "para6_c", DPP_FIELD_FLAG_RW, 191, 27, 0x0, 0x0 }, + { "para5_c", DPP_FIELD_FLAG_RW, 164, 14, 0x0, 0x0 }, + { "para4_h_c", DPP_FIELD_FLAG_RW, 150, 9, 0x0, 0x0 }, + { "para4_l_c", DPP_FIELD_FLAG_RW, 141, 32, 0x0, 0x0 }, + { "para3_c", DPP_FIELD_FLAG_RW, 109, 14, 0x0, 0x0 }, + { "para2_h_c", DPP_FIELD_FLAG_RW, 95, 9, 0x0, 0x0 }, + { "para2_l_c", DPP_FIELD_FLAG_RW, 86, 32, 0x0, 0x0 }, + { "para1_c", DPP_FIELD_FLAG_RW, 54, 14, 0x0, 0x0 }, + { "para0_h_c", DPP_FIELD_FLAG_RW, 40, 9, 0x0, 0x0 }, + { "para0_l_c", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_cara_begin_flow_id_reg[] = { + { "cara_begin_flow_id", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carb_begin_flow_id_reg[] = { + { "carb_begin_flow_id", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_carc_begin_flow_id_reg[] = { + { "carc_begin_flow_id", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_prog_full_assert_cfg_w_reg[] = { + { "prog_full_assert_cfg_w", DPP_FIELD_FLAG_RW, 7, 8, 0xe0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_prog_full_negate_cfg_w_reg[] = { + { "prog_full_negate_cfg_w", DPP_FIELD_FLAG_RW, 7, 8, 0xdf, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_timeout_limit_reg[] = { + { "timeout_limit", DPP_FIELD_FLAG_RW, 15, 16, 0x100, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_pkt_des_fifo_overflow_reg[] = { + { "pkt_des_fifo_overflow", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_pkt_des_fifo_underflow_reg[] = { + { "pkt_des_fifo_underflow", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_pkt_des_fifo_prog_full_reg[] = { + { "pkt_des_fifo_prog_full", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_pkt_des_fifo_prog_empty_reg[] = { + { "pkt_des_fifo_prog_empty", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_pkt_des_fifo_full_reg[] = { + { "pkt_des_fifo_full", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_pkt_des_fifo_empty_reg[] = { + { "pkt_des_fifo_empty", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_pkt_size_offset_reg[] = { + { "pkt_size_offset", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_car_plcr_init_dont_reg[] = { + { "plcr_init_done", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_max_pkt_size_a_reg[] = { + { "max_pkt_size_a", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_max_pkt_size_b_reg[] = { + { "max_pkt_size_b", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_max_pkt_size_c_reg[] = { + { "max_pkt_size_c", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_car_hierarchy_mode_reg[] = { + { "car_hierarchy_mode", DPP_FIELD_FLAG_RW, 1, 2, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_prog_empty_assert_cfg_w_reg[] = { + { "prog_empty_assert_cfg_w", DPP_FIELD_FLAG_RW, 7, 8, 0x3, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_prog_empty_negate_cfg_w_reg[] = { + { "prog_empty_negate_cfg_w", DPP_FIELD_FLAG_RW, 7, 8, 0x4, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_pkt_des_fifo_ovf_int_reg[] = { + { "pkt_des_fifo_ovf_int", DPP_FIELD_FLAG_RO, 1, 2, 0x00, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_pkt_des_fifo_data_count_reg[] = { + { "pkt_des_fifo_data_count", DPP_FIELD_FLAG_RO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_pkt_des_fifo_udf_int_reg[] = { + { "pkt_des_fifo_udf_int", DPP_FIELD_FLAG_RO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_cara_queue_ram0_159_0_pkt_reg[] = { + { "cara_drop", DPP_FIELD_FLAG_RW, 147, 1, 0x0, 0x0 }, + { "cara_plcr_en", DPP_FIELD_FLAG_RW, 146, 1, 0x0, 0x0 }, + { "cara_profile_id", DPP_FIELD_FLAG_RW, 145, 9, 0x0, 0x0 }, + { "cara_tq_h", DPP_FIELD_FLAG_RO, 136, 13, 0x0, 0x0 }, + { "cara_tq_l", DPP_FIELD_FLAG_RO, 123, 32, 0x0, 0x0 }, + { "cara_dc_high", DPP_FIELD_FLAG_RO, 50, 5, 0x0, 0x0 }, + { "cara_dc_low", DPP_FIELD_FLAG_RO, 45, 32, 0x0, 0x0 }, + { "cara_tc", DPP_FIELD_FLAG_RO, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_car0_cara_profile_ram1_255_0_pkt_reg[] = { + { "cara_profile_wr", DPP_FIELD_FLAG_RW, 224, 1, 0x0, 0x0 }, + { "cara_pkt_sign", DPP_FIELD_FLAG_RW, 216, 1, 0x0, 0x0 }, + { "cara_pkt_cir", DPP_FIELD_FLAG_RW, 189, 30, 0x0, 0x0 }, + { "cara_pkt_cbs", DPP_FIELD_FLAG_RW, 150, 14, 0x0, 0x0 }, + { "cara_pri0", DPP_FIELD_FLAG_RW, 39, 5, 0x0, 0x0 }, + { "cara_pri1", DPP_FIELD_FLAG_RW, 34, 5, 0x0, 0x0 }, + { "cara_pri2", DPP_FIELD_FLAG_RW, 29, 5, 0x0, 0x0 }, + { "cara_pri3", DPP_FIELD_FLAG_RW, 24, 5, 0x0, 0x0 }, + { "cara_pri4", DPP_FIELD_FLAG_RW, 19, 5, 0x0, 0x0 }, + { "cara_pri5", DPP_FIELD_FLAG_RW, 14, 5, 0x0, 0x0 }, + { "cara_pri6", DPP_FIELD_FLAG_RW, 9, 5, 0x0, 0x0 }, + { "cara_pri7", DPP_FIELD_FLAG_RW, 4, 5, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat4k_etcam_block0_7_port_id_cfg_reg[] = { + { "block7_port_id", DPP_FIELD_FLAG_RW, 31, 4, 0x0, 0x0 }, + { "block6_port_id", DPP_FIELD_FLAG_RW, 27, 4, 0x0, 0x0 }, + { "block5_port_id", DPP_FIELD_FLAG_RW, 23, 4, 0x0, 0x0 }, + { "block4_port_id", DPP_FIELD_FLAG_RW, 19, 4, 0x0, 0x0 }, + { "block3_port_id", DPP_FIELD_FLAG_RW, 15, 4, 0x0, 0x0 }, + { "block2_port_id", DPP_FIELD_FLAG_RW, 11, 4, 0x0, 0x0 }, + { "block1_port_id", DPP_FIELD_FLAG_RW, 7, 4, 0x0, 0x0 }, + { "block0_port_id", DPP_FIELD_FLAG_RW, 3, 4, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat4k_etcam_block0_3_base_addr_cfg_reg[] = { + { "block3_base_addr_cfg", DPP_FIELD_FLAG_RW, 30, 7, 0x0, 0x0 }, + { "block2_base_addr_cfg", DPP_FIELD_FLAG_RW, 22, 7, 0x0, 0x0 }, + { "block1_base_addr_cfg", DPP_FIELD_FLAG_RW, 14, 7, 0x0, 0x0 }, + { "block0_base_addr_cfg", DPP_FIELD_FLAG_RW, 6, 7, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat4k_etcam_block4_7_base_addr_cfg_reg[] = { + { "block7_base_addr_cfg", DPP_FIELD_FLAG_RW, 30, 7, 0x0, 0x0 }, + { "block6_base_addr_cfg", DPP_FIELD_FLAG_RW, 22, 7, 0x0, 0x0 }, + { "block5_base_addr_cfg", DPP_FIELD_FLAG_RW, 14, 7, 0x0, 0x0 }, + { "block4_base_addr_cfg", DPP_FIELD_FLAG_RW, 6, 7, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_eram_wr_interval_cnt_reg[] = { + { "cfg_eram_wr_interval_cnt", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_zcam_wr_interval_cnt_reg[] = { + { "cfg_zcam_wr_interval_cnt", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_tcam_wr_interval_cnt_reg[] = { + { "cfg_zcam_wr_interval_cnt", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_ddr_wr_interval_cnt_reg[] = { + { "cfg_ddr_wr_interval_cnt", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_hash_wr_interval_cnt_reg[] = { + { "cfg_hash_wr_interval_cnt", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_eram_rd_interval_cnt_reg[] = { + { "cfg_eram_rd_interval_cnt", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_zcam_rd_interval_cnt_reg[] = { + { "cfg_zcam_rd_interval_cnt", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_tcam_rd_interval_cnt_reg[] = { + { "cfg_tcam_rd_interval_cnt", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_ddr_rd_interval_cnt_reg[] = { + { "cfg_ddr_rd_interval_cnt", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_dtb_queue_lock_state_0_3_reg[] = { + { "cfg_dtb_queue_lock_state", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_axim0_w_convert_0_mode_reg[] = { + { "w_convert_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_axim0_r_convert_0_mode_reg[] = { + { "r_convert_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_axim0_aximr_os_reg[] = { + { "aximr_os", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_axim1_w_convert_1_mode_reg[] = { + { "w_convert_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_axim1_r_convert_1_mode_reg[] = { + { "r_convert_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_axis_axis_convert_mode_reg[] = { + { "w_r_convert_mode", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb4k_dtb_enq_cfg_queue_dtb_addr_h_0_127_reg[] = { + { "cfg_queue_dtb_addr_h", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb4k_dtb_enq_cfg_queue_dtb_addr_l_0_127_reg[] = { + { "cfg_queue_dtb_addr_l", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb4k_dtb_enq_cfg_queue_dtb_len_0_127_reg[] = { + { "cfg_dtb_cmd_type", DPP_FIELD_FLAG_RW, 30, 1, 0x0, 0x0 }, + { "cfg_dtb_cmd_int_en", DPP_FIELD_FLAG_RW, 29, 1, 0x0, 0x0 }, + { "cfg_queue_dtb_len", DPP_FIELD_FLAG_RW, 9, 10, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb4k_dtb_enq_info_queue_buf_space_left_0_127_reg[] = { + { "info_queue_buf_space_left", DPP_FIELD_FLAG_RO, 5, 6, 0x20, 0x0 }, +}; +DPP_FIELD_T g_dtb4k_dtb_enq_cfg_epid_v_func_num_0_127_reg[] = { + { "dbi_en", DPP_FIELD_FLAG_RW, 31, 1, 0x0, 0x0 }, + { "queue_en", DPP_FIELD_FLAG_RW, 30, 1, 0x0, 0x0 }, + { "cfg_epid", DPP_FIELD_FLAG_RW, 27, 4, 0x0, 0x0 }, + { "cfg_vfunc_num", DPP_FIELD_FLAG_RW, 23, 8, 0x0, 0x0 }, + { "cfg_vector", DPP_FIELD_FLAG_RW, 14, 7, 0x0, 0x0 }, + { "cfg_func_num", DPP_FIELD_FLAG_RW, 7, 3, 0x0, 0x0 }, + { "cfg_vfunc_active", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_port_cpu_trpg_ms_en_reg[] = { + { "cpu_trpgrx_ms_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_port_cpu_trpg_port_en_reg[] = { + { "cpu_trpgrx_port_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_port_cpu_trpg_look_en_reg[] = { + { "cpu_trpgrx_look_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_port_cpu_trpgrx_ram_almost_full_reg[] = { + { "cpu_trpgrx_ram_almost_full", DPP_FIELD_FLAG_RW, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_port_cpu_trpgrx_ram_test_en_reg[] = { + { "cpu_trpgrx_ram_test_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_port_cpu_trpgrx_inmod_pfc_rdy_en_reg[] = { + { "cpu_trpgrx_inmod_pfc_rdy_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_port_cpu_trpgrx_pkt_num_h_reg[] = { + { "cpu_trpgrx_pkt_num_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_port_cpu_trpgrx_pkt_num_l_reg[] = { + { "cpu_trpgrx_pkt_num_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_port_cpu_trpgrx_pkt_byte_num_h_reg[] = { + { "cpu_trpgrx_pkt_byte_num_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_port_cpu_trpgrx_pkt_byte_num_l_reg[] = { + { "cpu_trpgrx_pkt_byte_num_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_port_cpu_trpgrx_pkt_cnt_clr_reg[] = { + { "cpu_trpgrx_pkt_cnt_clr", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_port_cpu_trpgrx_fc_clk_freq_reg[] = { + { "cpu_trpgrx_fc_clk_freq", DPP_FIELD_FLAG_RW, 10, 11, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_port_cpu_trpgrx_fc_en_reg[] = { + { "cpu_trpgrx_fc_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_port_cpu_trpgrx_fc_token_add_num_reg[] = { + { "cpu_trpgrx_fc_token_add_num", DPP_FIELD_FLAG_RW, 14, 15, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_port_cpu_trpgrx_fc_token_max_num_reg[] = { + { "cpu_trpgrx_fc_token_max_num", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_port_cpu_trpgrx_port_state_info_reg[] = { + { "cpu_trpgrx_port_state_info", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_port_cpu_trpgrx_ram_past_max_dep_reg[] = { + { "cpu_trpgrx_ram_past_max_dep", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_port_cpu_trpgrx_ram_past_max_dep_clr_reg[] = { + { "cpu_trpgrx_ram_past_max_dep_clr", DPP_FIELD_FLAG_RW, 0, 1, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_port_cpu_trpgrx_pkt_past_max_len_reg[] = { + { "cpu_trpgrx_pkt_past_max_len", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_port_cpu_trpgrx_pkt_past_max_len_clr_reg[] = { + { "cpu_trpgrx_pkt_past_max_len_clr", DPP_FIELD_FLAG_RW, 0, 1, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_port_cpu_trpgrx_pkt_past_min_len_reg[] = { + { "cpu_trpgrx_pkt_past_min_len", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_port_cpu_trpgrx_pkt_past_min_len_clr_reg[] = { + { "cpu_trpgrx_pkt_past_min_len_clr", DPP_FIELD_FLAG_RW, 0, 1, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_ram_trpg_rx_data_ram_reg[] = { + { "trpg_rx_data_ram", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_ram_trpg_rx_info_ram_reg[] = { + { "trpg_rx_info_ram", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_port_cpu_trpg_ms_en_reg[] = { + { "cpu_trpgtx_ms_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_port_cpu_trpg_port_en_reg[] = { + { "cpu_trpgtx_port_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_port_cpu_trpg_look_en_reg[] = { + { "cpu_trpgtx_look_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_port_cpu_trpgtx_ram_almost_full_reg[] = { + { "cpu_trpgtx_ram_almost_full", DPP_FIELD_FLAG_RW, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_port_cpu_trpgtx_ram_test_en_reg[] = { + { "cpu_trpgtx_ram_test_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_port_cpu_trpgtx_pkt_num_h_reg[] = { + { "cpu_trpgtx_pkt_num_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_port_cpu_trpgtx_pkt_num_l_reg[] = { + { "cpu_trpgtx_pkt_num_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_port_cpu_trpgtx_pkt_byte_num_h_reg[] = { + { "cpu_trpgtx_pkt_byte_num_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_port_cpu_trpgtx_pkt_byte_num_l_reg[] = { + { "cpu_trpgtx_pkt_byte_num_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_port_cpu_trpgtx_pkt_cnt_clr_reg[] = { + { "cpu_trpgtx_pkt_cnt_clr", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_port_cpu_trpgtx_fc_clk_freq_reg[] = { + { "cpu_trpgtx_fc_clk_freq", DPP_FIELD_FLAG_RW, 10, 11, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_port_cpu_trpgtx_fc_en_reg[] = { + { "cpu_trpgtx_fc_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_port_cpu_trpgtx_fc_token_add_num_reg[] = { + { "cpu_trpgtx_fc_token_add_num", DPP_FIELD_FLAG_RW, 14, 15, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_port_cpu_trpgtx_fc_token_max_num_reg[] = { + { "cpu_trpgtx_fc_token_max_num", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_port_cpu_trpgtx_port_state_info_reg[] = { + { "cpu_trpgtx_port_state_info", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_port_cpu_trpgtx_ram_past_max_dep_reg[] = { + { "cpu_trpgtx_ram_past_max_dep", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_port_cpu_trpgtx_ram_past_max_dep_clr_reg[] = { + { "cpu_trpgtx_ram_past_max_dep_clr", DPP_FIELD_FLAG_RW, 0, 1, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_port_cpu_trpgtx_pkt_past_max_len_reg[] = { + { "cpu_trpgtx_pkt_past_max_len", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_port_cpu_trpgtx_pkt_past_max_len_clr_reg[] = { + { "cpu_trpgtx_pkt_past_max_len_clr", DPP_FIELD_FLAG_RW, 0, 1, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_port_cpu_trpgtx_pkt_past_min_len_reg[] = { + { "cpu_trpgtx_pkt_past_min_len", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_port_cpu_trpgtx_pkt_past_min_len_clr_reg[] = { + { "cpu_trpgtx_pkt_past_min_len_clr", DPP_FIELD_FLAG_RW, 0, 1, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_etm_port_cpu_trpgtx_etm_ram_almost_full_reg[] = { + { "cpu_trpgtx_etm_ram_almost_full", DPP_FIELD_FLAG_RW, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_etm_port_cpu_trpgtx_etm_ram_test_en_reg[] = { + { "cpu_trpgtx_etm_ram_test_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_glb_cpu_todtime_update_int_mask_reg[] = { + { "cpu_todtime_update_int_mask", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_glb_cpu_todtime_update_int_clr_reg[] = { + { "cpu_todtime_update_int_clr", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_glb_cpu_todtime_ram_test_en_reg[] = { + { "cpu_todtime_ram_test_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_ram_trpg_tx_data_ram_reg[] = { + { "trpg_tx_data_ram", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_ram_trpg_tx_info_ram_reg[] = { + { "trpg_tx_info_ram", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_etm_ram_trpg_tx_etm_data_ram_reg[] = { + { "trpg_tx_etm_data_ram", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_etm_ram_trpg_tx_etm_info_ram_reg[] = { + { "trpg_tx_etm_info_ram", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cfgmt_chip_version_reg_reg[] = { + { "chip_version_reg", DPP_FIELD_FLAG_RO, 31, 4, 0x1, 0x0 }, + { "chip_sub_reg", DPP_FIELD_FLAG_RO, 27, 4, 0x0, 0x0 }, + { "chip_type_reg", DPP_FIELD_FLAG_RO, 23, 24, 0x211650, 0x0 }, +}; +DPP_FIELD_T g_etm_cfgmt_chip_date_reg_reg[] = { + { "chip_date_reg", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cfgmt_cfgmt_crc_en_reg[] = { + { "cfgmt_crc_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cfgmt_cfg_port_transfer_en_reg[] = { + { "cfg_port_transfer_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cfgmt_tm_sa_work_mode_reg[] = { + { "tm_sa_work_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cfgmt_local_sa_id_reg[] = { + { "local_sa_id", DPP_FIELD_FLAG_RW, 6, 7, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_olif_rdy_reg[] = { + { "cfgmt_block_mode", DPP_FIELD_FLAG_RO, 5, 2, 0x2, 0x0 }, + { "cfgmt_count_overflow_mode", DPP_FIELD_FLAG_RO, 3, 1, 0x1, 0x0 }, + { "cfgmt_count_rd_mode", DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "olif_rdy", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_emem_prog_full_reg[] = { + { "emem_prog_full_assert", DPP_FIELD_FLAG_RW, 24, 9, 0xb9, 0x0 }, + { "emem_prog_full_negate", DPP_FIELD_FLAG_RW, 8, 9, 0xac, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_port_order_fifo_full_reg[] = { + { "port_order_fifo_full_assert", DPP_FIELD_FLAG_RW, 22, 7, 0x32, 0x0 }, + { "port_order_fifo_full_negate", DPP_FIELD_FLAG_RW, 6, 7, 0x32, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_olif_release_last_reg[] = { + { "olif_release_last_addr", DPP_FIELD_FLAG_RO, 24, 19, 0x0, 0x0 }, + { "olif_release_last_bank", DPP_FIELD_FLAG_RO, 5, 6, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_olif_fifo_empty_state_reg[] = { + { "qmu_para_fifo_empty", DPP_FIELD_FLAG_RO, 2, 1, 0x1, 0x0 }, + { "emem_empty", DPP_FIELD_FLAG_RO, 1, 1, 0x1, 0x0 }, + { "imem_empty", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_qmu_olif_release_fc_cnt_reg[] = { + { "qmu_olif_release_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_olif_qmu_link_fc_cnt_reg[] = { + { "olif_qmu_link_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_lif0_link_fc_cnt_reg[] = { + { "lif0_link_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_olif_tmmu_fc_cnt_reg[] = { + { "olif_tmmu_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_olif_mmu_fc_cnt_reg[] = { + { "olif_mmu_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_olif_qmu_port_rdy_h_reg[] = { + { "olif_qmu_port_rdy_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_olif_qmu_port_rdy_l_reg[] = { + { "olif_qmu_port_rdy_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_lif0_port_rdy_h_reg[] = { + { "lif0_port_rdy_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_lif0_port_rdy_l_reg[] = { + { "lif0_port_rdy_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_qmu_olif_rd_sop_cnt_reg[] = { + { "qmu_olif_rd_sop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_qmu_olif_rd_eop_cnt_reg[] = { + { "qmu_olif_rd_eop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_qmu_olif_rd_vld_cnt_reg[] = { + { "qmu_olif_rd_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_qmu_olif_rd_blk_cnt_reg[] = { + { "qmu_olif_rd_blk_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_mmu_tm_data_sop_cnt_reg[] = { + { "mmu_tm_data_sop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_mmu_tm_data_eop_cnt_reg[] = { + { "mmu_tm_data_eop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_mmu_tm_data_vld_cnt_reg[] = { + { "mmu_tm_data_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_odma_tm_data_sop_cnt_reg[] = { + { "odma_tm_data_sop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_odma_tm_data_eop_cnt_reg[] = { + { "odma_tm_data_eop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_odma_tm_deq_vld_cnt_reg[] = { + { "odma_tm_deq_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_olif_qmu_release_vld_cnt_reg[] = { + { "olif_qmu_release_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_emem_dat_vld_cnt_reg[] = { + { "emem_dat_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_imem_dat_vld_cnt_reg[] = { + { "imem_dat_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_emem_dat_rd_cnt_reg[] = { + { "emem_dat_rd_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_imem_dat_rd_cnt_reg[] = { + { "imem_dat_rd_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_qmu_olif_rd_sop_emem_cnt_reg[] = { + { "qmu_olif_rd_sop_emem_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_qmu_olif_rd_vld_emem_cnt_reg[] = { + { "qmu_olif_rd_vld_emem_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_cpu_last_wr_addr_reg[] = { + { "cpu_last_wr_addr", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_cpu_last_wr_data_reg[] = { + { "cpu_last_wr_data", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_cpu_last_rd_addr_reg[] = { + { "cpu_last_rd_addr", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_qmu_olif_last_port_reg[] = { + { "qmu_olif_last_port", DPP_FIELD_FLAG_RO, 6, 7, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_qmu_olif_last_addr_reg[] = { + { "qmu_olif_last_addr", DPP_FIELD_FLAG_RO, 18, 19, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_qmu_olif_last_bank_reg[] = { + { "qmu_olif_last_bank", DPP_FIELD_FLAG_RO, 6, 7, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_tm_lif_byte_stat_reg[] = { + { "tm_lif_byte_stat", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_olif_tm_lif_err_stat_reg[] = { + { "tm_lif_err_stat", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_port_share_cnt_reg[] = { + { "port_share_cnt", DPP_FIELD_FLAG_RO, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_total_imem_cnt_reg[] = { + { "total_imem_cnt", DPP_FIELD_FLAG_RO, 14, 15, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_pp_q_len_reg[] = { + { "pp_q_len", DPP_FIELD_FLAG_RO, 28, 29, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_sys_q_len_reg[] = { + { "sys_q_len", DPP_FIELD_FLAG_RO, 28, 29, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_cfg_error_warning_reg[] = { + { "error_correction_11", DPP_FIELD_FLAG_RO, 11, 1, 0x0, 0x0 }, + { "error_correction_10", DPP_FIELD_FLAG_RO, 10, 1, 0x0, 0x0 }, + { "error_correction_9", DPP_FIELD_FLAG_RO, 9, 1, 0x0, 0x0 }, + { "error_correction_8", DPP_FIELD_FLAG_RO, 8, 1, 0x0, 0x0 }, + { "error_correction_7", DPP_FIELD_FLAG_RO, 7, 1, 0x0, 0x0 }, + { "error_correction_6", DPP_FIELD_FLAG_RO, 6, 1, 0x0, 0x0 }, + { "error_correction5", DPP_FIELD_FLAG_RO, 5, 1, 0x0, 0x0 }, + { "error_correction_4", DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "error_correction_3", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "error_correction_2", DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "error_correction_1", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "error_correction_0", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_mult_qlen_th_en_reg[] = { + { "mult_qlen_th", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_mult_qlen_th_reg[] = { + { "mult_qlen_th", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_cfg_move_reg[] = { + { "cfgmt_sys_move_en", DPP_FIELD_FLAG_RW, 2, 1, 0x0, 0x0 }, + { "cfgmt_port_move_en", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "cfgmt_flow_move_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cfgmt_total_th_reg[] = { + { "cfgmt_total_th", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cfgmt_port_share_th_reg[] = { + { "cfgmt_port_share_th", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_sa_unreach_state_reg[] = { + { "sa_unreach_state", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_mv_port_th_reg[] = { + { "port_th", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_mv_drop_sp_th_reg[] = { + { "mvdrop_sp_th", DPP_FIELD_FLAG_RW, 8, 9, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_state_warning_reg[] = { + { "deq_q_num_warning", DPP_FIELD_FLAG_RO, 5, 1, 0x0, 0x0 }, + { "deq_pkt_len_warning", DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "enq_pkt_dp_warning", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "unenq_q_num_warning", DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "enq_q_num_warning", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "enq_pkt_len_warning", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_tmmu_cgavd_dma_fifo_cnt_reg[] = { + { "tmmu_cgavd_dma_fifo_cnt", DPP_FIELD_FLAG_RO, 8, 9, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_tmmu_cgavd_dma_fifo_cnt_max_reg[] = { + { "tmmu_cgavd_dma_fifo_cnt_max", DPP_FIELD_FLAG_RO, 8, 9, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_imem_total_cnt_reg[] = { + { "imem_total_cnt", DPP_FIELD_FLAG_RO, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_imem_total_cnt_max_reg[] = { + { "imem_total_cnt_max", DPP_FIELD_FLAG_RO, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow0_omem_cnt_reg[] = { + { "flow0_omem_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow1_omem_cnt_reg[] = { + { "flow1_omem_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow2_omem_cnt_reg[] = { + { "flow2_omem_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow3_omem_cnt_reg[] = { + { "flow3_omem_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_flow4_omem_cnt_reg[] = { + { "flow4_omem_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_appoint_flow_num_message_1_reg[] = { + { "appoint_flow_num_en_1", DPP_FIELD_FLAG_RW, 15, 1, 0x0, 0x0 }, + { "appoint_flow_num_1", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_appoint_flow_num_message_2_reg[] = { + { "appoint_flow_num_en_2", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "appoint_flow_num_2", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_odma_cgavd_pkt_num_1_reg[] = { + { "odma_cgavd_pkt_num_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_odma_cgavd_byte_num_1_reg[] = { + { "odma_cgavd_byte_num_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_enqueue_pkt_num_1_reg[] = { + { "cgavd_enqueue_pkt_num_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_dequeue_pkt_num_1_reg[] = { + { "cgavd_dequeue_pkt_num_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_pkt_imem_num_1_reg[] = { + { "cgavd_qmu_pkt_imem_num_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_pkt_omem_num_1_reg[] = { + { "cgavd_qmu_pkt_omem_num_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_byte_imem_num_1_reg[] = { + { "cgavd_qmu_byte_imem_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_byte_omem_num_1_reg[] = { + { "cgavd_qmu_byte_omem_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_pkt_drop_num_1_reg[] = { + { "cgavd_qmu_pkt_drop_num_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_byte_drop_num_1_reg[] = { + { "cgavd_qmu_byte_drop_num_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_forbid_drop_num_1_reg[] = { + { "cgavd_qmu_forbid_drop_num_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_flow_td_drop_num_1_reg[] = { + { "cgavd_qmu_flow_td_drop_num_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_flow_wred_drop_num_1_reg[] = { + { "cgavd_qmu_flow_wred_drop_num_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_flow_wred_dp_drop_num_1_reg[] = { + { "cgavd_qmu_flow_wred_dp_drop_num1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_pp_td_num_1_reg[] = { + { "cgavd_qmu_pp_td_num_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_pp_wred_drop_num_1_reg[] = { + { "cgavd_qmu_pp_wred_drop_num_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_pp_wred_dp_drop_num_1_reg[] = { + { "cgavd_qmu_pp_wred_dp_drop_num1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_sys_td_drop_num_1_reg[] = { + { "cgavd_qmu_sys_td_drop_num_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_sys_gred_drop_num_1_reg[] = { + { "cgavd_qmu_sys_gred_drop_num_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_sys_gred_dp_drop_num1_reg[] = { + { "cgavd_qmu_sys_gred_dp_drop_num1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_sa_drop_num_1_reg[] = { + { "cgavd_qmu_sa_drop_num_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_move_drop_num_1_reg[] = { + { "cgavd_qmu_move_drop_num_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_tm_mult_drop_num_1_reg[] = { + { "cgavd_qmu_tm_mult_drop_num_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_tm_error_drop_num_1_reg[] = { + { "cgavd_qmu_tm_error_drop_num_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_odma_cgavd_pkt_num_2_reg[] = { + { "odma_cgavd_pkt_num_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_odma_cgavd_byte_num_2_reg[] = { + { "odma_cgavd_byte_num_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_enqueue_pkt_num_2_reg[] = { + { "cgavd_enqueue_pkt_num_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_dequeue_pkt_num_2_reg[] = { + { "cgavd_dequeue_pkt_num_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_pkt_imem_num_2_reg[] = { + { "cgavd_qmu_pkt_imem_num_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_pkt_omem_num_2_reg[] = { + { "cgavd_qmu_pkt_omem_num_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_byte_imem_num_2_reg[] = { + { "cgavd_qmu_byte_imem_num_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_byte_omem_num_2_reg[] = { + { "cgavd_qmu_byte_omem_num_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_pkt_drop_num_2_reg[] = { + { "cgavd_qmu_pkt_drop_num_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_byte_drop_num_2_reg[] = { + { "cgavd_qmu_byte_drop_num_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_forbid_drop_num_2_reg[] = { + { "cgavd_qmu_forbid_drop_num_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_flow_td_drop_num_2_reg[] = { + { "cgavd_qmu_flow_td_drop_num_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_flow_wred_drop_num_2_reg[] = { + { "cgavd_qmu_flow_wred_drop_num_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_flow_wred_dp_drop_num_2_reg[] = { + { "cgavd_qmu_flow_wred_dp_drop_num_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_pp_td_num_2_reg[] = { + { "cgavd_qmu_pp_td_num_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_pp_wred_drop_num_2_reg[] = { + { "cgavd_qmu_pp_wred_drop_num_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_pp_wred_dp_drop_num_2_reg[] = { + { "cgavd_qmu_pp_wred_dp_drop_num_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_sys_td_drop_num_2_reg[] = { + { "cgavd_qmu_sys_td_drop_num_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_sys_gred_drop_num_2_reg[] = { + { "cgavd_qmu_sys_gred_drop_num_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_sys_gred_dp_drop_num_2_reg[] = { + { "cgavd_qmu_sys_gred_dp_drop_num_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_sa_drop_num_2_reg[] = { + { "cgavd_qmu_sa_drop_num_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_move_drop_num_2_reg[] = { + { "cgavd_qmu_move_drop_num_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_tm_mult_drop_num_2_reg[] = { + { "cgavd_qmu_tm_mult_drop_num_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_cgavd_qmu_tm_error_drop_num_2_reg[] = { + { "cgavd_qmu_tm_error_drop_num_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_move_flow_th_profile_reg[] = { + { "move_drop_profile", DPP_FIELD_FLAG_RW, 3, 4, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_cgavd_move_flow_th_reg[] = { + { "move_drop_flow_th", DPP_FIELD_FLAG_RW, 28, 29, 0x50, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_emem_pd_fifo_aful_th_reg[] = { + { "emem_pd_fifo_aful_th", DPP_FIELD_FLAG_RW, 7, 8, 0x40, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_dma_data_fifo_aful_th_reg[] = { + { "dma_data_fifo_aful_th", DPP_FIELD_FLAG_RW, 9, 10, 0x190, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_tmmu_states_0_reg[] = { + { "tm_odma_pkt_rdy", DPP_FIELD_FLAG_RO, 30, 1, 0x1, 0x0 }, + { "dma_data_fifo_empty", DPP_FIELD_FLAG_RO, 29, 1, 0x1, 0x0 }, + { "imem_enq_rd_fifo_empty", DPP_FIELD_FLAG_RO, 28, 1, 0x1, 0x0 }, + { "imem_enq_drop_fifo_empty", DPP_FIELD_FLAG_RO, 27, 1, 0x1, 0x0 }, + { "imem_deq_rd_fifo_empty", DPP_FIELD_FLAG_RO, 26, 1, 0x1, 0x0 }, + { "imem_deq_drop_fifo_empty", DPP_FIELD_FLAG_RO, 25, 1, 0x1, 0x0 }, + { "wr_cmd_fifo_empty", DPP_FIELD_FLAG_RO, 24, 1, 0x1, 0x0 }, + { "cached_pd_fifo_empty", DPP_FIELD_FLAG_RO, 23, 1, 0x1, 0x0 }, + { "emem_pd_fifo_empty", DPP_FIELD_FLAG_RO, 22, 1, 0x1, 0x0 }, + { "pd_order_fifo_empty", DPP_FIELD_FLAG_RO, 21, 1, 0x1, 0x0 }, + { "odma_tm_data_rdy", DPP_FIELD_FLAG_RO, 20, 1, 0x1, 0x0 }, + { "odma_tm_discard_rdy", DPP_FIELD_FLAG_RO, 19, 1, 0x1, 0x0 }, + { "olif_tmmu_rdy", DPP_FIELD_FLAG_RO, 18, 1, 0x1, 0x0 }, + { "mmu_tm_cmd_wr_rdy", DPP_FIELD_FLAG_RO, 17, 1, 0x1, 0x0 }, + { "mmu_tm_data_wr_rdy", DPP_FIELD_FLAG_RO, 16, 1, 0x1, 0x0 }, + { "mmu_tm_rd_rdy", DPP_FIELD_FLAG_RO, 15, 1, 0x1, 0x0 }, + { "mmu_tm_sop_rd_rdy", DPP_FIELD_FLAG_RO, 14, 1, 0x1, 0x0 }, + { "qmu_tmmu_sop_data_rdy", DPP_FIELD_FLAG_RO, 13, 1, 0x1, 0x0 }, + { "tmmu_cmdsw_imem_release_rdy", DPP_FIELD_FLAG_RO, 12, 1, 0x1, 0x0 }, + { "imem_age_release_rdy", DPP_FIELD_FLAG_RO, 11, 1, 0x1, 0x0 }, + { "tmmu_qmu_wr_rdy", DPP_FIELD_FLAG_RO, 10, 1, 0x1, 0x0 }, + { "tmmu_qmu_rdy_7", DPP_FIELD_FLAG_RO, 9, 1, 0x1, 0x0 }, + { "tmmu_qmu_rdy_6", DPP_FIELD_FLAG_RO, 8, 1, 0x1, 0x0 }, + { "tmmu_qmu_rdy_5", DPP_FIELD_FLAG_RO, 7, 1, 0x1, 0x0 }, + { "tmmu_qmu_rdy_4", DPP_FIELD_FLAG_RO, 6, 1, 0x1, 0x0 }, + { "tmmu_qmu_rdy_3", DPP_FIELD_FLAG_RO, 5, 1, 0x1, 0x0 }, + { "tmmu_qmu_rdy_2", DPP_FIELD_FLAG_RO, 4, 1, 0x1, 0x0 }, + { "tmmu_qmu_rdy_1", DPP_FIELD_FLAG_RO, 3, 1, 0x1, 0x0 }, + { "tmmu_qmu_rdy_0", DPP_FIELD_FLAG_RO, 2, 1, 0x1, 0x0 }, + { "tmmu_qmu_rd_rdy", DPP_FIELD_FLAG_RO, 1, 1, 0x1, 0x0 }, + { "tmmu_qmu_sop_rd_rdy", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_qmu_tmmu_wr_sop_cnt_reg[] = { + { "qmu_tmmu_wr_sop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_qmu_tmmu_wr_eop_cnt_reg[] = { + { "qmu_tmmu_wr_eop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_qmu_tmmu_wr_drop_cnt_reg[] = { + { "qmu_tmmu_wr_drop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_qmu_tmmu_wr_emem_cnt_reg[] = { + { "qmu_tmmu_wr_emem_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_qmu_tmmu_wr_imem_cnt_reg[] = { + { "qmu_tmmu_wr_imem_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_tmmu_mmu_wr_sop_cnt_reg[] = { + { "tmmu_mmu_wr_sop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_tmmu_mmu_wr_eop_cnt_reg[] = { + { "tmmu_mmu_wr_eop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_qmu_tmmu_rd_sop_cnt_reg[] = { + { "qmu_tmmu_rd_sop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_qmu_tmmu_rd_eop_cnt_reg[] = { + { "qmu_tmmu_rd_eop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_qmu_tmmu_rd_drop_cnt_reg[] = { + { "qmu_tmmu_rd_drop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_qmu_tmmu_rd_emem_cnt_reg[] = { + { "qmu_tmmu_rd_emem_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_qmu_tmmu_rd_imem_cnt_reg[] = { + { "qmu_tmmu_rd_imem_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_tmmu_mmu_rd_sop_cnt_reg[] = { + { "tmmu_mmu_rd_sop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_tmmu_mmu_rd_eop_cnt_reg[] = { + { "tmmu_mmu_rd_eop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_tmmu_odma_in_sop_cnt_reg[] = { + { "tmmu_odma_in_sop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_tmmu_odma_in_eop_cnt_reg[] = { + { "tmmu_odma_in_eop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_tmmu_odma_vld_cnt_reg[] = { + { "tmmu_odma_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_qmu_pd_in_cnt_reg[] = { + { "qmu_pd_in_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_tmmu_pd_hit_cnt_reg[] = { + { "tmmu_pd_hit_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_tmmu_pd_out_cnt_reg[] = { + { "tmmu_pd_out_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_tmmu_wr_cmd_fifo_wr_cnt_reg[] = { + { "tmmu_wr_cmd_fifo_wr_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_tmmu_imem_age_cnt_reg[] = { + { "tmmu_imem_age_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_tmmu_cmdsch_rd_cnt_reg[] = { + { "tmmu_cmdsch_rd_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_tmmu_cmdsch_drop_cnt_reg[] = { + { "tmmu_cmdsch_drop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_tmmu_cmdsw_drop_cnt_reg[] = { + { "tmmu_cmdsw_drop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_tmmu_odma_enq_rd_cnt_reg[] = { + { "tmmu_odma_enq_rd_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_tmmu_odma_enq_drop_cnt_reg[] = { + { "tmmu_odma_enq_drop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_tmmu_odma_imem_age_cnt_reg[] = { + { "tmmu_odma_imem_age_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_tmmu_odma_deq_rd_cnt_reg[] = { + { "tmmu_odma_deq_rd_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_tmmu_odma_deq_drop_cnt_reg[] = { + { "tmmu_odma_deq_drop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_olif_tmmu_xoff_cnt_reg[] = { + { "olif_tmmu_xoff_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_odma_tm_data_xoff_cnt_reg[] = { + { "odma_tm_data_xoff_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_tm_odma_pkt_xoff_cnt_reg[] = { + { "tm_odma_pkt_xoff_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_tm_state_3_reg[] = { + { "tmmu_qmu_rdy_9", DPP_FIELD_FLAG_RO, 1, 1, 0x1, 0x0 }, + { "tmmu_qmu_rdy_8", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_pd_cache_cmd_reg[] = { + { "cfgmt_pd_cache_addr", DPP_FIELD_FLAG_RW, 12, 13, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_pd_cache_rd_done_reg[] = { + { "cfgmt_pd_cache_rd_done", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_pd_cache_rd_data_0_reg[] = { + { "cfgmt_pd_cache_rd_data_0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_pd_cache_rd_data_1_reg[] = { + { "cfgmt_pd_cache_rd_data_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_pd_cache_rd_data_2_reg[] = { + { "cfgmt_pd_cache_rd_data_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_pd_cache_rd_data_3_reg[] = { + { "cfgmt_pd_cache_rd_data_3", DPP_FIELD_FLAG_RO, 14, 15, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_tmmu_to_odma_para_reg[] = { + { "cfgmt_tmmu_to_odma_para", DPP_FIELD_FLAG_RO, 25, 26, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_dma_data_fifo_cnt_reg[] = { + { "cfgmt_dma_data_fifo_cnt", DPP_FIELD_FLAG_RO, 8, 9, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_cache_tag_bit0_offset_reg[] = { + { "cfgmt_cache_tag_bit0_offset", DPP_FIELD_FLAG_RW, 4, 5, 0xd, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_cache_tag_bit1_offset_reg[] = { + { "cfgmt_cache_tag_bit1_offset", DPP_FIELD_FLAG_RW, 4, 5, 0xe, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_cache_tag_bit2_offset_reg[] = { + { "cfgmt_cache_tag_bit2_offset", DPP_FIELD_FLAG_RW, 4, 5, 0xf, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_cache_tag_bit3_offset_reg[] = { + { "cfgmt_cache_tag_bit3_offset", DPP_FIELD_FLAG_RW, 4, 5, 0x10, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_cache_tag_bit4_offset_reg[] = { + { "cfgmt_cache_tag_bit4_offset", DPP_FIELD_FLAG_RW, 4, 5, 0x11, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_cache_tag_bit5_offset_reg[] = { + { "cfgmt_cache_tag_bit5_offset", DPP_FIELD_FLAG_RW, 4, 5, 0x12, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_cache_index_bit0_offset_reg[] = { + { "cfgmt_cache_index_bit0_offset", DPP_FIELD_FLAG_RW, 4, 5, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_cache_index_bit1_offset_reg[] = { + { "cfgmt_cache_index_bit1_offset", DPP_FIELD_FLAG_RW, 4, 5, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_cache_index_bit2_offset_reg[] = { + { "cfgmt_cache_index_bit2_offset", DPP_FIELD_FLAG_RW, 4, 5, 0x2, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_cache_index_bit3_offset_reg[] = { + { "cfgmt_cache_index_bit3_offset", DPP_FIELD_FLAG_RW, 4, 5, 0x3, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_cache_index_bit4_offset_reg[] = { + { "cfgmt_cache_index_bit4_offset", DPP_FIELD_FLAG_RW, 4, 5, 0x4, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_cache_index_bit5_offset_reg[] = { + { "cfgmt_cache_index_bit5_offset", DPP_FIELD_FLAG_RW, 4, 5, 0x5, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_cache_index_bit6_offset_reg[] = { + { "cfgmt_cache_index_bit6_offset", DPP_FIELD_FLAG_RW, 4, 5, 0x6, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_cache_index_bit7_offset_reg[] = { + { "cfgmt_cache_index_bit7_offset", DPP_FIELD_FLAG_RW, 4, 5, 0x7, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_cache_index_bit8_offset_reg[] = { + { "cfgmt_cache_index_bit8_offset", DPP_FIELD_FLAG_RW, 4, 5, 0x8, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_cache_index_bit9_offset_reg[] = { + { "cfgmt_cache_index_bit9_offset", DPP_FIELD_FLAG_RW, 4, 5, 0x9, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_cache_index_bit10_offset_reg[] = { + { "cfgmt_cache_index_bit10_offset", DPP_FIELD_FLAG_RW, 4, 5, 0xa, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_cache_index_bit11_offset_reg[] = { + { "cfgmt_cache_index_bit11_offset", DPP_FIELD_FLAG_RW, 4, 5, 0xb, 0x0 }, +}; +DPP_FIELD_T g_etm_tmmu_cfgmt_cache_index_bit12_offset_reg[] = { + { "cfgmt_cache_index_bit12_offset", DPP_FIELD_FLAG_RW, 4, 5, 0xc, 0x0 }, +}; +DPP_FIELD_T g_etm_shap_bktfull_fifo_full_flagregister_reg[] = { + { "bktfull_fifo_full_flag_core", DPP_FIELD_FLAG_RC, 21, 22, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_shap_fifo_full_regregister_reg[] = { + { "fifo_full_reg", DPP_FIELD_FLAG_RO, 21, 22, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_shap_fifo_empty_regregister_reg[] = { + { "fifo_empty_reg", DPP_FIELD_FLAG_RO, 21, 22, 0x7, 0x0 }, +}; +DPP_FIELD_T g_etm_shap_fifo_almost_full_regregister_reg[] = { + { "fifo_almost_full_reg", DPP_FIELD_FLAG_RO, 21, 22, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_shap_fifo_almost_empty_regregister_reg[] = { + { "fifo_almost_empty_reg", DPP_FIELD_FLAG_RO, 21, 22, 0x7, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_credit_space_select_reg[] = { + { "credit_space_select", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_stat_space_max_reg[] = { + { "stat_space_max", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_stat_space_min_reg[] = { + { "stat_space_min", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_stat_space_credit_reg[] = { + { "stat_space_credit", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_stat_que_step8_credit_reg[] = { + { "stat_que_step8_credit", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_special_que_reg[] = { + { "special_que_id", DPP_FIELD_FLAG_RW, 5, 6, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_special_que_credit_reg[] = { + { "special_que_credit", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_lif_congest_credit_cnt_reg[] = { + { "lif_congest_credit_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_lif_port_congest_credit_cnt_reg[] = { + { "lif_port_congest_credit_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_crdt_congest_credit_cnt_reg[] = { + { "crdt_congest_credit_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_crdt_port_congest_credit_cnt_reg[] = { + { "crdt_port_congest_credit_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_congest_port_id_reg[] = { + { "congest_port_id", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_dev_link_control_reg[] = { + { "dev_link_control", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_crdt_sa_port_rdy_reg[] = { + { "crdt_sa_port_rdy", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_crdt_congest_mode_select_reg[] = { + { "crdt_congest_mode_selectr", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_fifo_out_all_crs_normal_cnt_reg[] = { + { "fifo_out_all_crs_normal_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_fifo_out_all_crs_off_cnt_reg[] = { + { "fifo_out_all_crs_off_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_fifo_out_que_crs_normal_cnt_reg[] = { + { "fifo_out_que_crs_normal_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_fifo_out_que_crs_off_cnt_reg[] = { + { "fifo_out_que_crs_off_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_mode_add_60g_reg[] = { + { "mode_add_60g", DPP_FIELD_FLAG_RW, 15, 16, 0xff, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_pp_token_add_reg[] = { + { "pp_token_add_cir", DPP_FIELD_FLAG_RO, 4, 5, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_pp_cir_token_total_dist_cnt_reg[] = { + { "pp_cir_token_total_dist_counter", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_pp_cir_token_total_dec_cnt_reg[] = { + { "pp_cir_token_total_dec_counter", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_dev_credit_cnt_reg[] = { + { "dev_credit_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_no_credit_cnt1_reg[] = { + { "no_credit_cnt1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_no_credit_cnt2_reg[] = { + { "no_credit_cnt2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_asm_interval_0_cfg_reg[] = { + { "asm_interval_0_cfg", DPP_FIELD_FLAG_RW, 21, 22, 0xf, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_asm_interval_1_cfg_reg[] = { + { "asm_interval_1_cfg", DPP_FIELD_FLAG_RW, 21, 22, 0x1f, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_asm_interval_2_cfg_reg[] = { + { "asm_interval_2_cfg", DPP_FIELD_FLAG_RW, 21, 22, 0x2f, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_asm_interval_3_cfg_reg[] = { + { "asm_interval_3_cfg", DPP_FIELD_FLAG_RW, 21, 22, 0x3f, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_asm_interval_4_cfg_reg[] = { + { "asm_interval_4_cfg", DPP_FIELD_FLAG_RW, 21, 22, 0x4f, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_asm_interval_5cfg_reg[] = { + { "asm_interval_5_cfg", DPP_FIELD_FLAG_RW, 21, 22, 0x5f, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_asm_interval_6_cfg_reg[] = { + { "asm_interval_6_cfg", DPP_FIELD_FLAG_RW, 21, 22, 0x6f, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_asm_interval_7_cfg_reg[] = { + { "asm_interval_7_cfg", DPP_FIELD_FLAG_RW, 21, 22, 0x7f, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_crdt_total_congest_mode_cfg_reg[] = { + { "crdt_total_congest_mode_cfg", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_rci_fifo_ini_deep_cfg_reg[] = { + { "rci_fifo_ini_deep_cfg", DPP_FIELD_FLAG_RW, 8, 9, 0x1f4, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_crdt_ecc_reg[] = { + { "seinfo_wfq_single_ecc_err", DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "seinfo_wfq_double_ecc_err", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "seinfo_fq_single_ecc_err", DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "seinfo_fq_double_ecc_err", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "ecc_bypass", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_ucn_asm_rdy_shield_en_reg[] = { + { "ucn_rdy_shield_en", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "asm_rdy_shield_en", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_ucn_asm_rdy_reg[] = { + { "ucn_rdy", DPP_FIELD_FLAG_RO, 1, 1, 0x1, 0x0 }, + { "asm_rdy", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_rci_grade_reg[] = { + { "rci_grade", DPP_FIELD_FLAG_RO, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_crdt_rci_value_r_reg[] = { + { "crdt_rci_value_r", DPP_FIELD_FLAG_RO, 11, 12, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_crdt_interval_now_reg[] = { + { "crdt_interval_now", DPP_FIELD_FLAG_RO, 21, 22, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_crs_sheild_flow_id_cfg_reg[] = { + { "crs_sheild_flow_id_cfg", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_crs_sheild_en_cfg_reg[] = { + { "crs_sheild_en_cfg", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_crs_sheild_value_cfg_reg[] = { + { "crs_sheild_value_cfg", DPP_FIELD_FLAG_RW, 1, 2, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_test_token_calc_ctrl_reg[] = { + { "test_token_calc_state", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "test_token_calc_trigger", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_test_token_sample_cycle_num_reg[] = { + { "sample_cycle_num", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_q_state_0_7_reg[] = { + { "q_token_state_7", DPP_FIELD_FLAG_RO, 29, 2, 0x0, 0x0 }, + { "q_token_state_6", DPP_FIELD_FLAG_RO, 25, 2, 0x0, 0x0 }, + { "q_token_state_5", DPP_FIELD_FLAG_RO, 21, 2, 0x0, 0x0 }, + { "q_token_state_4", DPP_FIELD_FLAG_RO, 17, 2, 0x0, 0x0 }, + { "q_token_state_3", DPP_FIELD_FLAG_RO, 13, 2, 0x0, 0x0 }, + { "q_token_state_2", DPP_FIELD_FLAG_RO, 9, 2, 0x0, 0x0 }, + { "q_token_state_1", DPP_FIELD_FLAG_RO, 5, 2, 0x0, 0x0 }, + { "q_token_state_0", DPP_FIELD_FLAG_RO, 1, 2, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_crdt_q_state_8_15_reg[] = { + { "q_token_state_15", DPP_FIELD_FLAG_RO, 29, 2, 0x0, 0x0 }, + { "q_token_state_14", DPP_FIELD_FLAG_RO, 25, 2, 0x0, 0x0 }, + { "q_token_state_13", DPP_FIELD_FLAG_RO, 21, 2, 0x0, 0x0 }, + { "q_token_state_12", DPP_FIELD_FLAG_RO, 17, 2, 0x0, 0x0 }, + { "q_token_state_11", DPP_FIELD_FLAG_RO, 13, 2, 0x0, 0x0 }, + { "q_token_state_10", DPP_FIELD_FLAG_RO, 9, 2, 0x0, 0x0 }, + { "q_token_state_9", DPP_FIELD_FLAG_RO, 5, 2, 0x0, 0x0 }, + { "q_token_state_8", DPP_FIELD_FLAG_RO, 1, 2, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_csw_csch_rd_cmd_cnt_reg[] = { + { "csw_csch_rd_cmd_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_csw_csch_rd_sop_cnt_reg[] = { + { "csw_csch_rd_sop_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_csw_csch_rd_eop_cnt_reg[] = { + { "csw_csch_rd_eop_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_csw_csch_rd_drop_cnt_reg[] = { + { "csw_csch_rd_drop_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_csch_mmu_rd_cmd_cnt_reg[] = { + { "csch_mmu_rd_cmd_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_csch_mmu_rd_sop_cnt_reg[] = { + { "csch_mmu_rd_sop_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_csch_mmu_rd_eop_cnt_reg[] = { + { "csch_mmu_rd_eop_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_csch_mmu_rd_drop_cnt_reg[] = { + { "csch_mmu_rd_drop_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qsch_crs_filter_reg[] = { + { "qcfg_qsch_crs_filter", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qsch_crs_force_en_reg[] = { + { "qcfg_qsch_crs_force_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qsch_crs_force_qnum_reg[] = { + { "qcfg_qsch_crs_force_qnum", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qsch_crs_force_crs_reg[] = { + { "qcfg_qsch_crs_force_crs", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_oshp_sgmii_shap_mode_reg[] = { + { "cfgmt_oshp_sgmii_shap_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_qmu_sashap_en_reg[] = { + { "cfgmt_qmu_sashap_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_sashap_token_max_reg[] = { + { "cfgmt_sashap_token_max", DPP_FIELD_FLAG_RW, 31, 32, 0x2000, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_sashap_token_min_reg[] = { + { "cfgmt_sashap_token_min", DPP_FIELD_FLAG_RW, 31, 32, 0xffffe000, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_q3lbaddrate_reg[] = { + { "cfg_qsch_q3lbaddrate", DPP_FIELD_FLAG_RW, 27, 28, 0x80, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_q012lbaddrate_reg[] = { + { "cfg_qsch_q012lbaddrate", DPP_FIELD_FLAG_RW, 27, 28, 0x80, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_q3creditlbmaxcnt_reg[] = { + { "cfg_qsch_q3creditlbmaxcnt", DPP_FIELD_FLAG_RW, 7, 8, 0x80, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_q012creditlbmaxcnt_reg[] = { + { "cfg_qsch_q012creditlbmaxcnt", DPP_FIELD_FLAG_RW, 7, 8, 0x80, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mul_token_gen_num_reg[] = { + { "cfg_qsch_mul_token_gen_num", DPP_FIELD_FLAG_RW, 7, 8, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_q3_credit_lb_control_en_reg[] = { + { "cfg_qsch_q3_credit_lb_control_en", DPP_FIELD_FLAG_RW, 0, 1, 0x1, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_q012_credit_lb_control_en_reg[] = { + { "cfg_qsch_q012_credit_lb_control_en", DPP_FIELD_FLAG_RW, 0, 1, 0x1, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_sp_dwrr_en_reg[] = { + { "cfg_qsch_sp_dwrr_en", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_q01_attach_en_reg[] = { + { "cfg_qsch_q01_attach_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_w0_reg[] = { + { "cfg_qsch_w0", DPP_FIELD_FLAG_RW, 6, 7, 0x10, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_w1_reg[] = { + { "cfg_qsch_w1", DPP_FIELD_FLAG_RW, 6, 7, 0x20, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_w2_reg[] = { + { "cfg_qsch_w2", DPP_FIELD_FLAG_RW, 6, 7, 0x40, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_lkybktmaxcnt1_reg[] = { + { "cfg_qsch_lkybktmaxcnt1", DPP_FIELD_FLAG_RW, 31, 32, 0x80, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_lkybktmaxcnt2_reg[] = { + { "cfg_qsch_lkybktmaxcnt2", DPP_FIELD_FLAG_RW, 31, 32, 0x80, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_lkybktdcrrate1_reg[] = { + { "cfg_qsch_lkybktdcrrate1", DPP_FIELD_FLAG_RW, 31, 32, 0x80000020, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_lkybktdcrrate2_reg[] = { + { "cfg_qsch_lkybktdcrrate2", DPP_FIELD_FLAG_RW, 31, 32, 0x80000010, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_lkybktdcrrate3_reg[] = { + { "cfg_qsch_lkybktdcrrate3", DPP_FIELD_FLAG_RW, 31, 32, 0x80000004, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_lkybktmaxcnt3_reg[] = { + { "cfg_qsch_lkybktmaxcnt3", DPP_FIELD_FLAG_RW, 31, 32, 0x80, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_qmu_mul_auto_sa_version_reg[] = { + { "cfg_qsch_qmu_mul_auto_sa_version", DPP_FIELD_FLAG_RW, 4, 5, 0x4, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_sa_credit_value_0_reg[] = { + { "cfg_qsch_sa_credit_value_0", DPP_FIELD_FLAG_RW, 13, 14, 0x85, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_sa_credit_value_1_reg[] = { + { "cfg_qsch_sa_credit_value_1", DPP_FIELD_FLAG_RW, 13, 14, 0x29a, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_sa_credit_value_2_reg[] = { + { "cfg_qsch_sa_credit_value_2", DPP_FIELD_FLAG_RW, 13, 14, 0x190, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_sa_credit_value_3_reg[] = { + { "cfg_qsch_sa_credit_value_3", DPP_FIELD_FLAG_RW, 13, 14, 0x4e2, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_sa_credit_value_4_reg[] = { + { "cfg_qsch_sa_credit_value_4", DPP_FIELD_FLAG_RW, 13, 14, 0x5fe, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_sa_credit_value_5_reg[] = { + { "cfg_qsch_sa_credit_value_5", DPP_FIELD_FLAG_RW, 13, 14, 0x8a0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_sa_credit_value_6_reg[] = { + { "cfg_qsch_sa_credit_value_6", DPP_FIELD_FLAG_RW, 13, 14, 0x8a0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_sa_credit_value_7_reg[] = { + { "cfg_qsch_sa_credit_value_7", DPP_FIELD_FLAG_RW, 13, 14, 0x8a0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_remote_credit_fifo_almost_full_th_reg[] = { + { "cfg_qsch_remote_credit_fifo_almost_full_th", DPP_FIELD_FLAG_RW, 10, + 11, 0x7ff, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_auto_credit_fifo_almost_full_th_reg[] = { + { "cfg_qsch_auto_credit_fifo_almost_full_th", DPP_FIELD_FLAG_RW, 5, 6, + 0x1c, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_q3_credit_fifo_almost_full_th_reg[] = { + { "cfg_qsch_q3_credit_fifo_almost_full_th", DPP_FIELD_FLAG_RW, 5, 6, + 0x1c, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_q012_credit_fifo_almost_full_th_reg[] = { + { "cfg_qsch_q012_credit_fifo_almost_full_th", DPP_FIELD_FLAG_RW, 5, 6, + 0x1c, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mul_fc_res_en_reg[] = { + { "cfg_qsch_mul_fc_res_en", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_mul_ovf_udf_flg_query_reg[] = { + { "qsch_cfg_remote_credit_fifo_full", DPP_FIELD_FLAG_RO, 18, 1, 0x0, + 0x0 }, + { "qsch_cfg_remote_credit_fifo_empty", DPP_FIELD_FLAG_RO, 17, 1, 0x1, + 0x0 }, + { "qsch_cfg_remote_credit_fifo_overflow", DPP_FIELD_FLAG_RO, 16, 1, 0x0, + 0x0 }, + { "qsch_cfg_remote_credit_fifo_underflow", DPP_FIELD_FLAG_RO, 15, 1, + 0x0, 0x0 }, + { "qsch_cfg_auto_credit_fifo_full", DPP_FIELD_FLAG_RO, 14, 1, 0x0, + 0x0 }, + { "qsch_cfg_auto_credit_fifo_empty", DPP_FIELD_FLAG_RO, 13, 1, 0x1, + 0x0 }, + { "qsch_cfg_auto_credit_fifo_overflow", DPP_FIELD_FLAG_RO, 12, 1, 0x0, + 0x0 }, + { "qsch_cfg_auto_credit_fifo_underflow", DPP_FIELD_FLAG_RO, 11, 1, 0x0, + 0x0 }, + { "qsch_cfg_q3_credit_fifo_full", DPP_FIELD_FLAG_RO, 10, 1, 0x0, 0x0 }, + { "qsch_cfg_q3_credit_fifo_empty", DPP_FIELD_FLAG_RO, 9, 1, 0x1, 0x0 }, + { "qsch_cfg_q3_credit_fifo_overflow", DPP_FIELD_FLAG_RO, 8, 1, 0x0, + 0x0 }, + { "qsch_cfg_q3_credit_fifo_underflow", DPP_FIELD_FLAG_RO, 7, 1, 0x0, + 0x0 }, + { "qsch_cfg_q012_credit_fifo_full", DPP_FIELD_FLAG_RO, 6, 1, 0x0, 0x0 }, + { "qsch_cfg_q012_credit_fifo_empty", DPP_FIELD_FLAG_RO, 5, 1, 0x1, + 0x0 }, + { "qsch_cfg_q012_credit_fifo_overflow", DPP_FIELD_FLAG_RO, 4, 1, 0x0, + 0x0 }, + { "qsch_cfg_q012_credit_fifo_underflow", DPP_FIELD_FLAG_RO, 3, 1, 0x0, + 0x0 }, + { "qsch_cfg_lkybktoverflow1", DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "qsch_cfg_lkybktoverflow2", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "qsch_cfg_lkybktoverflow3", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_mul_cng_flg_query_reg[] = { + { "qsch_cfg_q3cngflag", DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "qsch_cfg_q012cngflag", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "qsch_cfg_cngflag1", DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "qsch_cfg_cngflag2", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "qsch_cfg_cngflag3", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qsch_cfg_lkybktval1_reg[] = { + { "qsch_cfg_lkybktval1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qsch_cfg_lkybktval2_reg[] = { + { "qsch_cfg_lkybktval2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qsch_cfg_lkybktval3_reg[] = { + { "qsch_cfg_lkybktval3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qsch_cfg_q3lbval_reg[] = { + { "qsch_cfg_q3lbval", DPP_FIELD_FLAG_RO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qsch_cfg_q012lbval_reg[] = { + { "qsch_cfg_q012lbval", DPP_FIELD_FLAG_RO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qlist_cfgmt_ram_ecc_err2_reg[] = { + { "qlist_imem_pd_ram_single_ecc_err", DPP_FIELD_FLAG_RC, 15, 1, 0x0, + 0x0 }, + { "qlist_imem_pd_ram_double_ecc_err", DPP_FIELD_FLAG_RC, 14, 1, 0x0, + 0x0 }, + { "qlist_imem_up_ptr_ram_single_ecc_err", DPP_FIELD_FLAG_RC, 13, 1, 0x0, + 0x0 }, + { "qlist_imem_up_ptr_ram_double_ecc_err", DPP_FIELD_FLAG_RC, 12, 1, 0x0, + 0x0 }, + { "qlist_imem_down_ptr_ram_single_ecc_err", DPP_FIELD_FLAG_RC, 11, 1, + 0x0, 0x0 }, + { "qlist_imem_down_ptr_ram_double_ecc_err", DPP_FIELD_FLAG_RC, 10, 1, + 0x0, 0x0 }, + { "cmdsw_sop_fifo_single_ecc_err", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "cmdsw_sop_fifo_double_ecc_err", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "cmdsw_nsop_fifo_single_ecc_err", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "cmdsw_nsop_fifo_double_ecc_err", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "cmdsw_mmudat_fifo_single_ecc_err", DPP_FIELD_FLAG_RC, 5, 1, 0x0, + 0x0 }, + { "cmdsw_mmudat_fifo_double_ecc_err", DPP_FIELD_FLAG_RC, 4, 1, 0x0, + 0x0 }, + { "qlist_rd_release_fwft_single_ecc_err", DPP_FIELD_FLAG_RC, 3, 1, 0x0, + 0x0 }, + { "qlist_rd_release_fwft_double_ecc_err", DPP_FIELD_FLAG_RC, 2, 1, 0x0, + 0x0 }, + { "qlist_drop_imem_fwft_single_ecc_err", DPP_FIELD_FLAG_RC, 1, 1, 0x0, + 0x0 }, + { "qlist_drop_imem_fwft_double_ecc_err", DPP_FIELD_FLAG_RC, 0, 1, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_csch_aged_cmd_cnt_reg[] = { + { "csch_aged_cmd_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_csch_qcfg_csch_congest_cnt_reg[] = { + { "csch_qcfg_csch_congest_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_csch_qcfg_qlist_csch_sop_cnt_reg[] = { + { "csch_qcfg_qlist_csch_sop_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_csch_qcfg_qlist_csch_eop_cnt_reg[] = { + { "csch_qcfg_qlist_csch_eop_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_csch_qcfg_csch_csw_sop_cnt_reg[] = { + { "csch_qcfg_csch_csw_sop_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_csch_qcfg_csch_csw_eop_cnt_reg[] = { + { "csch_qcfg_csch_csw_eop_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_csch_qcfg_qlist_csch_drop_cnt_reg[] = { + { "csch_qcfg_qlist_csch_drop_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_csch_qcfg_csch_csw_drop_cnt_reg[] = { + { "csch_qcfg_csch_csw_drop_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_csw_mmu_sop_cmd_cnt_reg[] = { + { "csw_mmu_sop_cmd_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_mmu_csw_sop_data_cnt_reg[] = { + { "mmu_csw_sop_data_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_csw_qsch_feedb_cnt_reg[] = { + { "csw_qsch_feedb_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qmu_crdt_port_fc_cnt_reg[] = { + { "qmu_crdt_port_fc_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_csch_r_block_cnt_reg[] = { + { "csch_r_block_cnt", DPP_FIELD_FLAG_RO, 16, 17, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qlist_qds_head_rd_reg[] = { + { "qcfg_qlist_qds_head_rd", DPP_FIELD_FLAG_RO, 18, 19, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qlist_qds_tail_rd_reg[] = { + { "qcfg_qlist_qds_tail_rd", DPP_FIELD_FLAG_RO, 18, 19, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qlist_ept_rd_reg[] = { + { "qcfg_qlist_ept_rd", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qlist_age_flag_rd_reg[] = { + { "qcfg_qlist_age_flag_rd", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qlist_cti_rd_reg[] = { + { "qcfg_qlist_cti_rd", DPP_FIELD_FLAG_RO, 3, 4, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qlist_cto_rd_reg[] = { + { "qcfg_qlist_cto_rd", DPP_FIELD_FLAG_RO, 3, 4, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qlist_chk_rd_reg[] = { + { "qcfg_qlist_chk_rd", DPP_FIELD_FLAG_RO, 18, 19, 0x1, 0x1 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qlist_nod_rd_reg[] = { + { "qcfg_qlist_nod_rd", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_qlist_biu_rd_reg[] = { + { "qcfg_qlist_biu_rd", DPP_FIELD_FLAG_RO, 4, 5, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qsch_r_wlist_flag_reg[] = { + { "qsch_r_wlist_flag", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qcfg_crs_flg_rd_reg[] = { + { "qcfg_crs_flg_rd", DPP_FIELD_FLAG_RO, 8, 9, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_qmu_imem_age_qds_reg[] = { + { "cfgmt_qmu_imem_tp", DPP_FIELD_FLAG_RO, 30, 15, 0x0, 0x0 }, + { "cfgmt_qmu_imem_hp", DPP_FIELD_FLAG_RO, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_qmu_imem_age_qlen_reg[] = { + { "cfgmt_qmu_imem_no_empty", DPP_FIELD_FLAG_RO, 16, 1, 0x0, 0x0 }, + { "cfgmt_qmu_imem_qlen", DPP_FIELD_FLAG_RO, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_qmu_imem_pd_ram_low_reg[] = { + { "cfgmt_qmu_imem_pd_ram_low", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_qmu_imem_pd_ram_high_reg[] = { + { "cfgmt_qmu_imem_pd_ram_high", DPP_FIELD_FLAG_RO, 3, 4, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_qmu_imem_up_ptr_reg[] = { + { "cfgmt_qmu_imem_up_ptr", DPP_FIELD_FLAG_RO, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_qmu_imem_down_ptr_reg[] = { + { "cfgmt_qmu_imem_down_ptr", DPP_FIELD_FLAG_RO, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfgmt_qmu_imem_age_flag_reg[] = { + { "cfgmt_qmu_imem_age_flag", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_lkybkt2cngth_reg[] = { + { "cfg_qsch_lkybkt2cngth", DPP_FIELD_FLAG_RW, 31, 32, 0x40, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_lkybkt1cngth_reg[] = { + { "cfg_qsch_lkybkt1cngth", DPP_FIELD_FLAG_RW, 31, 32, 0x40, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_lkybkt3cngth_reg[] = { + { "cfg_qsch_lkybkt3cngth", DPP_FIELD_FLAG_RW, 31, 32, 0x40, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_rm_mul_mcn1_credit_value_reg[] = { + { "cfg_qsch_rm_mul_mcn1_credit_value", DPP_FIELD_FLAG_RW, 13, 14, 0x5fe, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_rm_mul_mcn2_credit_value_reg[] = { + { "cfg_qsch_rm_mul_mcn2_credit_value", DPP_FIELD_FLAG_RW, 13, 14, 0x5fe, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_rm_mul_mcn3_credit_value_reg[] = { + { "cfg_qsch_rm_mul_mcn3_credit_value", DPP_FIELD_FLAG_RW, 13, 14, 0x5fe, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn1_rand_ansr_seed_reg[] = { + { "cfg_qsch_rm_mul_mcn1_rand_mchsm_en", DPP_FIELD_FLAG_RW, 31, 1, 0x0, + 0x0 }, + { "cfg_qsch_rm_mul_mcn1_rand_ansr_seed", DPP_FIELD_FLAG_RW, 7, 8, 0x1, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn2_rand_ansr_seed_reg[] = { + { "cfg_qsch_rm_mul_mcn2_rand_mchsm_en", DPP_FIELD_FLAG_RW, 31, 1, 0x0, + 0x0 }, + { "cfg_qsch_rm_mul_mcn2_rand_ansr_seed", DPP_FIELD_FLAG_RW, 7, 8, 0x1, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn3_rand_ansr_seed_reg[] = { + { "cfg_qsch_rm_mul_mcn3_rand_mchsm_en", DPP_FIELD_FLAG_RW, 31, 1, 0x0, + 0x0 }, + { "cfg_qsch_rm_mul_mcn3_rand_ansr_seed", DPP_FIELD_FLAG_RW, 7, 8, 0x1, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn1_rand_ansr_th_reg[] = { + { "cfg_qsch_rm_mul_mcn1_rand_ansr_th", DPP_FIELD_FLAG_RW, 7, 8, 0x80, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn2_rand_ansr_th_reg[] = { + { "cfg_qsch_rm_mul_mcn2_rand_ansr_th", DPP_FIELD_FLAG_RW, 7, 8, 0x80, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn3_rand_ansr_th_reg[] = { + { "cfg_qsch_rm_mul_mcn3_rand_ansr_th", DPP_FIELD_FLAG_RW, 7, 8, 0x80, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn1_rand_hold_base_reg[] = { + { "cfg_qsch_rm_mul_mcn1_rand_mchsm_en", DPP_FIELD_FLAG_RW, 31, 1, 0x1, + 0x0 }, + { "cfg_qsch_rm_mul_mcn1_rand_hold_base", DPP_FIELD_FLAG_RW, 23, 24, + 0x1000, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn2_rand_hold_base_reg[] = { + { "cfg_qsch_rm_mul_mcn2_rand_mchsm_en", DPP_FIELD_FLAG_RW, 31, 1, 0x1, + 0x0 }, + { "cfg_qsch_rm_mul_mcn2_rand_hold_base", DPP_FIELD_FLAG_RW, 23, 24, + 0x1000, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn3_rand_hold_base_reg[] = { + { "cfg_qsch_rm_mul_mcn3_rand_mchsm_en", DPP_FIELD_FLAG_RW, 31, 1, 0x1, + 0x0 }, + { "cfg_qsch_rm_mul_mcn3_rand_hold_base", DPP_FIELD_FLAG_RW, 23, 24, + 0x1000, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn1_rand_sel_mask_reg[] = { + { "cfg_qsch_rm_mul_mcn1_rand_sel_mask", DPP_FIELD_FLAG_RW, 8, 9, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn2_rand_sel_mask_reg[] = { + { "cfg_qsch_rm_mul_mcn2_rand_sel_mask", DPP_FIELD_FLAG_RW, 8, 9, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn3_rand_sel_mask_reg[] = { + { "cfg_qsch_rm_mul_mcn3_rand_sel_mask", DPP_FIELD_FLAG_RW, 8, 9, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn1_rand_sel_seed_reg0_reg[] = { + { "rm_mul_mcn1_rand_sel_seed7", DPP_FIELD_FLAG_RW, 31, 4, 0x8, 0x0 }, + { "rm_mul_mcn1_rand_sel_seed6", DPP_FIELD_FLAG_RW, 27, 4, 0x7, 0x0 }, + { "rm_mul_mcn1_rand_sel_seed5", DPP_FIELD_FLAG_RW, 23, 4, 0x6, 0x0 }, + { "rm_mul_mcn1_rand_sel_seed4", DPP_FIELD_FLAG_RW, 19, 4, 0x5, 0x0 }, + { "rm_mul_mcn1_rand_sel_seed3", DPP_FIELD_FLAG_RW, 15, 4, 0x4, 0x0 }, + { "rm_mul_mcn1_rand_sel_seed2", DPP_FIELD_FLAG_RW, 11, 4, 0x3, 0x0 }, + { "rm_mul_mcn1_rand_sel_seed1", DPP_FIELD_FLAG_RW, 7, 4, 0x2, 0x0 }, + { "rm_mul_mcn1_rand_sel_seed0", DPP_FIELD_FLAG_RW, 3, 4, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn1_rand_sel_seed_reg1_reg[] = { + { "rm_mul_mcn1_rand_sel_seed8", DPP_FIELD_FLAG_RW, 3, 4, 0x9, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn2_rand_sel_seed_reg0_reg[] = { + { "rm_mul_mcn2_rand_sel_seed7", DPP_FIELD_FLAG_RW, 31, 4, 0x8, 0x0 }, + { "rm_mul_mcn2_rand_sel_seed6", DPP_FIELD_FLAG_RW, 27, 4, 0x7, 0x0 }, + { "rm_mul_mcn2_rand_sel_seed5", DPP_FIELD_FLAG_RW, 23, 4, 0x6, 0x0 }, + { "rm_mul_mcn2_rand_sel_seed4", DPP_FIELD_FLAG_RW, 19, 4, 0x5, 0x0 }, + { "rm_mul_mcn2_rand_sel_seed3", DPP_FIELD_FLAG_RW, 15, 4, 0x4, 0x0 }, + { "rm_mul_mcn2_rand_sel_seed2", DPP_FIELD_FLAG_RW, 11, 4, 0x3, 0x0 }, + { "rm_mul_mcn2_rand_sel_seed1", DPP_FIELD_FLAG_RW, 7, 4, 0x2, 0x0 }, + { "rm_mul_mcn2_rand_sel_seed0", DPP_FIELD_FLAG_RW, 3, 4, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn2_rand_sel_seed_reg1_reg[] = { + { "rm_mul_mcn2_rand_sel_seed8", DPP_FIELD_FLAG_RW, 3, 4, 0x9, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn3_rand_sel_seed_reg0_reg[] = { + { "rm_mul_mcn3_rand_sel_seed7", DPP_FIELD_FLAG_RW, 31, 4, 0x8, 0x0 }, + { "rm_mul_mcn3_rand_sel_seed6", DPP_FIELD_FLAG_RW, 27, 4, 0x7, 0x0 }, + { "rm_mul_mcn3_rand_sel_seed5", DPP_FIELD_FLAG_RW, 23, 4, 0x6, 0x0 }, + { "rm_mul_mcn3_rand_sel_seed4", DPP_FIELD_FLAG_RW, 19, 4, 0x5, 0x0 }, + { "rm_mul_mcn3_rand_sel_seed3", DPP_FIELD_FLAG_RW, 15, 4, 0x4, 0x0 }, + { "rm_mul_mcn3_rand_sel_seed2", DPP_FIELD_FLAG_RW, 11, 4, 0x3, 0x0 }, + { "rm_mul_mcn3_rand_sel_seed1", DPP_FIELD_FLAG_RW, 7, 4, 0x2, 0x0 }, + { "rm_mul_mcn3_rand_sel_seed0", DPP_FIELD_FLAG_RW, 3, 4, 0x1, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn3_rand_sel_seed_reg1_reg[] = { + { "rm_mul_mcn3_rand_sel_seed8", DPP_FIELD_FLAG_RW, 3, 4, 0x9, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn1_step_wait_th1_reg[] = { + { "cfg_qsch_rm_mul_mcn1_step_wait_th1", DPP_FIELD_FLAG_RW, 15, 16, 0x80, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn1_step_wait_th2_reg[] = { + { "cfg_qsch_rm_mul_mcn1_step_wait_th2", DPP_FIELD_FLAG_RW, 15, 16, 0x80, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn1_step_wait_th3_reg[] = { + { "cfg_qsch_rm_mul_mcn1_step_wait_th3", DPP_FIELD_FLAG_RW, 15, 16, 0x80, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn1_step_wait_th4_reg[] = { + { "cfg_qsch_rm_mul_mcn1_step_wait_th4", DPP_FIELD_FLAG_RW, 15, 16, 0x80, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn1_step_wait_th5_reg[] = { + { "cfg_qsch_rm_mul_mcn1_step_wait_th5", DPP_FIELD_FLAG_RW, 15, 16, 0x80, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn1_step_wait_th6_reg[] = { + { "cfg_qsch_rm_mul_mcn1_step_wait_th6", DPP_FIELD_FLAG_RW, 15, 16, 0x80, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn1_step_wait_th7_reg[] = { + { "cfg_qsch_rm_mul_mcn1_step_wait_th7", DPP_FIELD_FLAG_RW, 15, 16, 0x80, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn2_step_wait_th1_reg[] = { + { "cfg_qsch_rm_mul_mcn2_step_wait_th1", DPP_FIELD_FLAG_RW, 15, 16, 0x80, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn2_step_wait_th2_reg[] = { + { "cfg_qsch_rm_mul_mcn2_step_wait_th2", DPP_FIELD_FLAG_RW, 15, 16, 0x80, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn2_step_wait_th3_reg[] = { + { "cfg_qsch_rm_mul_mcn2_step_wait_th3", DPP_FIELD_FLAG_RW, 15, 16, 0x80, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn2_step_wait_th4_reg[] = { + { "cfg_qsch_rm_mul_mcn2_step_wait_th4", DPP_FIELD_FLAG_RW, 15, 16, 0x80, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn2_step_wait_th5_reg[] = { + { "cfg_qsch_rm_mul_mcn2_step_wait_th5", DPP_FIELD_FLAG_RW, 15, 16, 0x80, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn2_step_wait_th6_reg[] = { + { "cfg_qsch_rm_mul_mcn2_step_wait_th6", DPP_FIELD_FLAG_RW, 15, 16, 0x80, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn2_step_wait_th7_reg[] = { + { "cfg_qsch_rm_mul_mcn2_step_wait_th7", DPP_FIELD_FLAG_RW, 15, 16, 0x80, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn3_step_wait_th1_reg[] = { + { "cfg_qsch_rm_mul_mcn3_step_wait_th1", DPP_FIELD_FLAG_RW, 15, 16, 0x80, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn3_step_wait_th2_reg[] = { + { "cfg_qsch_rm_mul_mcn3_step_wait_th2", DPP_FIELD_FLAG_RW, 15, 16, 0x80, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn3_step_wait_th3_reg[] = { + { "cfg_qsch_rm_mul_mcn3_step_wait_th3", DPP_FIELD_FLAG_RW, 15, 16, 0x80, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn3_step_wait_th4_reg[] = { + { "cfg_qsch_rm_mul_mcn3_step_wait_th4", DPP_FIELD_FLAG_RW, 15, 16, 0x80, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn3_step_wait_th5_reg[] = { + { "cfg_qsch_rm_mul_mcn3_step_wait_th5", DPP_FIELD_FLAG_RW, 15, 16, 0x80, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn3_step_wait_th6_reg[] = { + { "cfg_qsch_rm_mul_mcn3_step_wait_th6", DPP_FIELD_FLAG_RW, 15, 16, 0x80, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_rm_mul_mcn3step_wait_th7_reg[] = { + { "cfg_qsch_rm_mul_mcn3_step_wait_th7", DPP_FIELD_FLAG_RW, 15, 16, 0x80, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate0_reg[] = { + { "cfg_qsch_mulcrdcntrate0", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate1_reg[] = { + { "cfg_qsch_mulcrdcntrate1", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate2_reg[] = { + { "cfg_qsch_mulcrdcntrate2", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate3_reg[] = { + { "cfg_qsch_mulcrdcntrate3", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate4_reg[] = { + { "cfg_qsch_mulcrdcntrate4", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate5_reg[] = { + { "cfg_qsch_mulcrdcntrate5", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate6_reg[] = { + { "cfg_qsch_mulcrdcntrate6", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate7_reg[] = { + { "cfg_qsch_mulcrdcntrate7", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate8_reg[] = { + { "cfg_qsch_mulcrdcntrate8", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate9_reg[] = { + { "cfg_qsch_mulcrdcntrate9", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate10_reg[] = { + { "cfg_qsch_mulcrdcntrate10", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate11_reg[] = { + { "cfg_qsch_mulcrdcntrate11", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate12_reg[] = { + { "cfg_qsch_mulcrdcntrate12", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate13_reg[] = { + { "cfg_qsch_mulcrdcntrate13", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate14_reg[] = { + { "cfg_qsch_mulcrdcntrate14", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate15_reg[] = { + { "cfg_qsch_mulcrdcntrate15", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate16_reg[] = { + { "cfg_qsch_mulcrdcntrate16", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate17_reg[] = { + { "cfg_qsch_mulcrdcntrate17", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate18_reg[] = { + { "cfg_qsch_mulcrdcntrate18", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate19_reg[] = { + { "cfg_qsch_mulcrdcntrate19", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate20_reg[] = { + { "cfg_qsch_mulcrdcntrate20", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate21_reg[] = { + { "cfg_qsch_mulcrdcntrate21", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate22_reg[] = { + { "cfg_qsch_mulcrdcntrate22", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate23_reg[] = { + { "cfg_qsch_mulcrdcntrate23", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate24_reg[] = { + { "cfg_qsch_mulcrdcntrate24", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate25_reg[] = { + { "cfg_qsch_mulcrdcntrate25", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate26_reg[] = { + { "cfg_qsch_mulcrdcntrate26", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate27_reg[] = { + { "cfg_qsch_mulcrdcntrate27", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate28_reg[] = { + { "cfg_qsch_mulcrdcntrate28", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate29_reg[] = { + { "cfg_qsch_mulcrdcntrate29", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate30_reg[] = { + { "cfg_qsch_mulcrdcntrate30", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate31_reg[] = { + { "cfg_qsch_mulcrdcntrate31", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate32_reg[] = { + { "cfg_qsch_mulcrdcntrate32", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate33_reg[] = { + { "cfg_qsch_mulcrdcntrate33", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate34_reg[] = { + { "cfg_qsch_mulcrdcntrate34", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate35_reg[] = { + { "cfg_qsch_mulcrdcntrate35", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_mulcrdcntrate36_reg[] = { + { "cfg_qsch_mulcrdcntrate36", DPP_FIELD_FLAG_RW, 19, 20, 0x100, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_rm_mul_mcn1_rand_hold_shift_reg[] = { + { "cfg_qsch_rm_mul_mcn1_rand_hold_shift", DPP_FIELD_FLAG_RW, 2, 3, 0x7, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_rm_mul_mcn2_rand_hold_shift_reg[] = { + { "cfg_qsch_rm_mul_mcn2_rand_hold_shift", DPP_FIELD_FLAG_RW, 2, 3, 0x7, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_cfg_qsch_rm_mul_mcn3_rand_hold_shift_reg[] = { + { "cfg_qsch_rm_mul_mcn3_rand_hold_shift", DPP_FIELD_FLAG_RW, 2, 3, 0x7, + 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_last_drop_qnum_get_reg[] = { + { "cgavd_qmu_drop_tap", DPP_FIELD_FLAG_RC, 31, 4, 0x0, 0x0 }, + { "last_drop_qnum", DPP_FIELD_FLAG_RC, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_crdt_qmu_credit_cnt_reg[] = { + { "crdt_qmu_credit_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qmu_to_qsch_report_cnt_reg[] = { + { "qmu_to_qsch_report_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qmu_to_cgavd_report_cnt_reg[] = { + { "qmu_to_cgavd_report_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qmu_crdt_crs_normal_cnt_reg[] = { + { "qmu_crdt_crs_normal_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qmu_crdt_crs_off_cnt_reg[] = { + { "qmu_crdt_crs_off_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qsch_qlist_shedule_cnt_reg[] = { + { "qsch_qlist_shedule_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qsch_qlist_sch_ept_cnt_reg[] = { + { "qsch_qlist_sch_ept_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qmu_to_mmu_blk_wr_cnt_reg[] = { + { "qmu_to_mmu_blk_wr_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qmu_to_csw_blk_rd_cnt_reg[] = { + { "qmu_to_csw_blk_rd_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qmu_to_mmu_sop_wr_cnt_reg[] = { + { "qmu_to_mmu_sop_wr_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qmu_to_mmu_eop_wr_cnt_reg[] = { + { "qmu_to_mmu_eop_wr_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qmu_to_mmu_drop_wr_cnt_reg[] = { + { "qmu_to_mmu_drop_wr_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qmu_to_csw_sop_rd_cnt_reg[] = { + { "qmu_to_csw_sop_rd_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qmu_to_csw_eop_rd_cnt_reg[] = { + { "qmu_to_csw_eop_rd_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_qmu_to_csw_drop_rd_cnt_reg[] = { + { "qmu_to_csw_drop_rd_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_mmu_to_qmu_wr_release_cnt_reg[] = { + { "mmu_to_qmu_wr_release_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_mmu_to_qmu_rd_release_cnt_reg[] = { + { "mmu_to_qmu_rd_release_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_observe_qnum_set_reg[] = { + { "observe_qnum_set", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_spec_q_pkt_received_reg[] = { + { "spec_q_pkt_received", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_spec_q_pkt_dropped_reg[] = { + { "spec_q_pkt_dropped", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_spec_q_pkt_scheduled_reg[] = { + { "spec_q_pkt_scheduled", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_spec_q_wr_cmd_sent_reg[] = { + { "spec_q_wr_cmd_sent", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_spec_q_rd_cmd_sent_reg[] = { + { "spec_q_rd_cmd_sent", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_spec_q_pkt_enq_reg[] = { + { "spec_q_pkt_enq", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_spec_q_pkt_deq_reg[] = { + { "spec_q_pkt_deq", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_spec_q_crdt_uncon_received_reg[] = { + { "spec_q_crdt_uncon_received", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_spec_q_crdt_cong_received_reg[] = { + { "spec_q_crdt_cong_received", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_spec_q_crs_normal_cnt_reg[] = { + { "spec_q_crs_normal_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_spec_q_crs_off_cnt_reg[] = { + { "spec_q_crs_off_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_observe_batch_set_reg[] = { + { "observe_batch_set", DPP_FIELD_FLAG_RW, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_spec_bat_pkt_received_reg[] = { + { "spec_bat_pkt_received", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_spec_bat_pkt_dropped_reg[] = { + { "spec_bat_pkt_dropped", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_spec_bat_blk_scheduled_reg[] = { + { "spec_bat_blk_scheduled", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_spec_bat_wr_cmd_sent_reg[] = { + { "spec_bat_wr_cmd_sent", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_spec_bat_rd_cmd_sent_reg[] = { + { "spec_bat_rd_cmd_sent", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_spec_bat_pkt_enq_reg[] = { + { "spec_bat_pkt_enq", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_spec_bat_pkt_deq_reg[] = { + { "spec_bat_pkt_deq", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_spec_bat_crdt_uncon_received_reg[] = { + { "spec_bat_crdt_uncon_received", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_spec_bat_crdt_cong_received_reg[] = { + { "spec_bat_crdt_cong_received", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_spec_bat_crs_normal_cnt_reg[] = { + { "spec_bat_crs_normal_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_spec_bat_crs_off_cnt_reg[] = { + { "spec_bat_crs_off_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_bcntm_ovfl_qnum_get_reg[] = { + { "bcntm_ovfl_qnum_get", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_crbal_a_ovf_qnum_get_reg[] = { + { "crbal_a_ovf_qnum_get", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_crbal_b_ovf_qnum_get_reg[] = { + { "crbal_b_ovf_qnum_get", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_crbal_drop_qnum_get_reg[] = { + { "crbal_drop_qnum_get", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_deq_flg_report_cnt_reg[] = { + { "deq_flg_report_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_spec_q_crs_get_reg[] = { + { "spec_q_crs_get", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_spec_q_crs_in_get_reg[] = { + { "spec_q_crs_in_get", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_spec_q_crs_flg_csol_get_reg[] = { + { "spec_q_crs_flg_csol_get", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_etm_qmu_ept_sch_qnum_get_reg[] = { + { "ept_sch_qnum_get", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_pcie_ddr_switch_reg[] = { + { "pcie_ddr_switch", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_user0_int_en_reg[] = { + { "user_int_en", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_user0_int_mask_reg[] = { + { "user_int_mask", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_user0_int_status_reg[] = { + { "user_int_status", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_user1_int_en_reg[] = { + { "user_int_en", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_user1_int_mask_reg[] = { + { "user_int_mask", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_user1_int_status_reg[] = { + { "user_int_status", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_user2_int_en_reg[] = { + { "user_int_en", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_user2_int_mask_reg[] = { + { "user_int_mask", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_user2_int_status_reg[] = { + { "user_int_status", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_ecc_1b_int_en_reg[] = { + { "ecc_1b_int_en", DPP_FIELD_FLAG_RW, 2, 3, 0xfffff, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_ecc_1b_int_mask_reg[] = { + { "ecc_1b_int_mask", DPP_FIELD_FLAG_RW, 2, 3, 0xfffff, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_ecc_1b_int_status_reg[] = { + { "ecc_1b_int_status", DPP_FIELD_FLAG_RC, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_ecc_2b_int_en_reg[] = { + { "ecc_2b_int_en", DPP_FIELD_FLAG_RW, 2, 3, 0xfffff, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_ecc_2b_int_mask_reg[] = { + { "ecc_2b_int_mask", DPP_FIELD_FLAG_RW, 2, 3, 0xfffff, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_ecc_2b_int_status_reg[] = { + { "ecc_2b_int_status", DPP_FIELD_FLAG_RC, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_cfg_int_status_reg[] = { + { "cfg_int_status", DPP_FIELD_FLAG_RC, 4, 5, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_i_core_to_cntl_reg[] = { + { "i_core_to_cntl", DPP_FIELD_FLAG_RW, 15, 16, 0xaaaa, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_test_in_low_reg[] = { + { "test_in_low", DPP_FIELD_FLAG_RW, 31, 32, 0x00004000, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_test_in_high_reg[] = { + { "test_in_high", DPP_FIELD_FLAG_RW, 31, 32, 0x00000000, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_local_interrupt_out_reg[] = { + { "local_interrupt_out", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_pl_ltssm_reg[] = { + { "pl_ltssm", DPP_FIELD_FLAG_RO, 4, 5, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_test_out0_reg[] = { + { "test_out0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_test_out1_reg[] = { + { "test_out1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_test_out2_reg[] = { + { "test_out2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_test_out3_reg[] = { + { "test_out3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_test_out4_reg[] = { + { "test_out4", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_test_out5_reg[] = { + { "test_out5", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_test_out6_reg[] = { + { "test_out6", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_test_out7_reg[] = { + { "test_out7", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_sync_o_core_status_reg[] = { + { "sync_o_core_status", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_sync_o_alert_dbe_reg[] = { + { "sync_o_alert_dbe", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_sync_o_alert_sbe_reg[] = { + { "sync_o_alert_sbe", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_sync_o_link_loopback_en_reg[] = { + { "sync_o_link_loopback_en", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_sync_o_local_fs_lf_valid_reg[] = { + { "sync_o_local_fs_lf_valid", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_sync_o_rx_idle_detect_reg[] = { + { "sync_o_rx_idle_detect", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_sync_o_rx_rdy_reg[] = { + { "sync_o_rx_rdy", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_sync_o_tx_rdy_reg[] = { + { "sync_o_tx_rdy", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_pcie_link_up_cnt_reg[] = { + { "pcie_link_up_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_test_out_pcie0_reg[] = { + { "test_out_pcie0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_test_out_pcie1_reg[] = { + { "test_out_pcie1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_test_out_pcie2_reg[] = { + { "test_out_pcie2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_test_out_pcie3_reg[] = { + { "test_out_pcie3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_test_out_pcie4_reg[] = { + { "test_out_pcie4", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_test_out_pcie5_reg[] = { + { "test_out_pcie5", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_test_out_pcie6_reg[] = { + { "test_out_pcie6", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_test_out_pcie7_reg[] = { + { "test_out_pcie7", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_test_out_pcie8_reg[] = { + { "test_out_pcie8", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_test_out_pcie9_reg[] = { + { "test_out_pcie9", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_test_out_pcie10_reg[] = { + { "test_out_pcie10", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_test_out_pcie11_reg[] = { + { "test_out_pcie11", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_test_out_pcie12_reg[] = { + { "test_out_pcie12", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_test_out_pcie13_reg[] = { + { "test_out_pcie13", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_test_out_pcie14_reg[] = { + { "test_out_pcie14", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_test_out_pcie15_reg[] = { + { "test_out_pcie15", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_int_repeat_en_reg[] = { + { "int_repeat_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_awid_axi_mst_reg[] = { + { "dbg_awid_axi_mst", DPP_FIELD_FLAG_RO, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_awaddr_axi_mst0_reg[] = { + { "dbg_awaddr_axi_mst0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_awaddr_axi_mst1_reg[] = { + { "dbg_awaddr_axi_mst1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_awlen_axi_mst_reg[] = { + { "dbg_awlen_axi_mst", DPP_FIELD_FLAG_RO, 3, 4, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_awsize_axi_mst_reg[] = { + { "dbg_awid_axi_mst", DPP_FIELD_FLAG_RO, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_awburst_axi_mst_reg[] = { + { "dbg_awburst_axi_mst", DPP_FIELD_FLAG_RO, 1, 2, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_awlock_axi_mst_reg[] = { + { "dbg_awlock_axi_mst", DPP_FIELD_FLAG_RO, 1, 2, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_awcache_axi_mst_reg[] = { + { "dbg_awcache_axi_mst", DPP_FIELD_FLAG_RO, 3, 4, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_awprot_axi_mst_reg[] = { + { "dbg_awprot_axi_mst", DPP_FIELD_FLAG_RO, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_wid_axi_mst_reg[] = { + { "dbg_wid_axi_mst", DPP_FIELD_FLAG_RO, 3, 4, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_wdata_axi_mst0_reg[] = { + { "dbg_wdata_axi_mst0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_wdata_axi_mst1_reg[] = { + { "dbg_wdata_axi_mst1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_wdata_axi_mst2_reg[] = { + { "dbg_wdata_axi_mst2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_wdata_axi_mst3_reg[] = { + { "dbg_wdata_axi_mst3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_wstrb_axi_mst_reg[] = { + { "dbg_wstrb_axi_mst", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_wlast_axi_mst_reg[] = { + { "dbg_wlast_axi_mst", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_arid_axi_mst_reg[] = { + { "dbg_arid_axi_mst", DPP_FIELD_FLAG_RO, 3, 4, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_araddr_axi_mst0_reg[] = { + { "dbg_araddr_axi_mst0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_araddr_axi_mst1_reg[] = { + { "dbg_araddr_axi_mst1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_arlen_axi_mst_reg[] = { + { "dbg_arlen_axi_mst", DPP_FIELD_FLAG_RO, 3, 4, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_arsize_axi_mst_reg[] = { + { "dbg_arsize_axi_mst", DPP_FIELD_FLAG_RO, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_arburst_axi_mst_reg[] = { + { "dbg_arburst_axi_mst", DPP_FIELD_FLAG_RO, 1, 2, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_arlock_axi_mst_reg[] = { + { "dbg_arlock_axi_mst", DPP_FIELD_FLAG_RO, 1, 2, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_arcache_axi_mst_reg[] = { + { "dbg_arcache_axi_mst", DPP_FIELD_FLAG_RO, 3, 4, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_arprot_axi_mst_reg[] = { + { "dbg_arprot_axi_mst", DPP_FIELD_FLAG_RO, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_rdata_axi_mst0_reg[] = { + { "dbg_rdata_axi_mst0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_rdata_axi_mst1_reg[] = { + { "dbg_rdata_axi_mst1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_rdata_axi_mst2_reg[] = { + { "dbg_rdata_axi_mst2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_rdata_axi_mst3_reg[] = { + { "dbg_rdata_axi_mst3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_axi_mst_state_reg[] = { + { "axi_mst_state", DPP_FIELD_FLAG_RO, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_axi_cfg_state_reg[] = { + { "axi_cfg_state", DPP_FIELD_FLAG_RO, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_axi_slv_rd_state_reg[] = { + { "axi_slv_rd_state", DPP_FIELD_FLAG_RO, 1, 2, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_axi_slv_wr_state_reg[] = { + { "axi_slv_wr_state", DPP_FIELD_FLAG_RO, 1, 2, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_axim_delay_en_reg[] = { + { "axim_delay_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_axim_delay_reg[] = { + { "axim_delay", DPP_FIELD_FLAG_RW, 31, 32, 0xff, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_axim_speed_wr_reg[] = { + { "axim_speed_wr", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_axim_speed_rd_reg[] = { + { "axim_speed_rd", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_awaddr_axi_slv0_reg[] = { + { "dbg_awaddr_axi_slv0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_awaddr_axi_slv1_reg[] = { + { "dbg_awaddr_axi_slv1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg0_wdata_axi_slv0_reg[] = { + { "dbg0_wdata_axi_slv0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg0_wdata_axi_slv1_reg[] = { + { "dbg0_wdata_axi_slv1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg0_wdata_axi_slv2_reg[] = { + { "dbg0_wdata_axi_slv2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg0_wdata_axi_slv3_reg[] = { + { "dbg0_wdata_axi_slv3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg1_wdata_axi_slv0_reg[] = { + { "dbg1_wdata_axi_slv0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg1_wdata_axi_slv1_reg[] = { + { "dbg1_wdata_axi_slv1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg1_wdata_axi_slv2_reg[] = { + { "dbg1_wdata_axi_slv2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg1_wdata_axi_slv3_reg[] = { + { "dbg1_wdata_axi_slv3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg2_wdata_axi_slv0_reg[] = { + { "dbg2_wdata_axi_slv0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg2_wdata_axi_slv1_reg[] = { + { "dbg2_wdata_axi_slv1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg2_wdata_axi_slv2_reg[] = { + { "dbg2_wdata_axi_slv2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg2_wdata_axi_slv3_reg[] = { + { "dbg2_wdata_axi_slv3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg3_wdata_axi_slv0_reg[] = { + { "dbg3_wdata_axi_slv0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg3_wdata_axi_slv1_reg[] = { + { "dbg3_wdata_axi_slv1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg3_wdata_axi_slv2_reg[] = { + { "dbg3_wdata_axi_slv2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg3_wdata_axi_slv3_reg[] = { + { "dbg3_wdata_axi_slv3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg4_wdata_axi_slv0_reg[] = { + { "dbg4_wdata_axi_slv0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg4_wdata_axi_slv1_reg[] = { + { "dbg4_wdata_axi_slv1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg4_wdata_axi_slv2_reg[] = { + { "dbg4_wdata_axi_slv2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg4_wdata_axi_slv3_reg[] = { + { "dbg4_wdata_axi_slv3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg5_wdata_axi_slv0_reg[] = { + { "dbg5_wdata_axi_slv0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg5_wdata_axi_slv1_reg[] = { + { "dbg5_wdata_axi_slv1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg5_wdata_axi_slv2_reg[] = { + { "dbg5_wdata_axi_slv2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg5_wdata_axi_slv3_reg[] = { + { "dbg5_wdata_axi_slv3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg6_wdata_axi_slv0_reg[] = { + { "dbg6_wdata_axi_slv0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg6_wdata_axi_slv1_reg[] = { + { "dbg6_wdata_axi_slv1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg6_wdata_axi_slv2_reg[] = { + { "dbg6_wdata_axi_slv2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg6_wdata_axi_slv3_reg[] = { + { "dbg6_wdata_axi_slv3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg7_wdata_axi_slv0_reg[] = { + { "dbg7_wdata_axi_slv0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg7_wdata_axi_slv1_reg[] = { + { "dbg7_wdata_axi_slv1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg7_wdata_axi_slv2_reg[] = { + { "dbg7_wdata_axi_slv2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg7_wdata_axi_slv3_reg[] = { + { "dbg7_wdata_axi_slv3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg8_wdata_axi_slv0_reg[] = { + { "dbg8_wdata_axi_slv0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg8_wdata_axi_slv1_reg[] = { + { "dbg8_wdata_axi_slv1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg8_wdata_axi_slv2_reg[] = { + { "dbg8_wdata_axi_slv2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg8_wdata_axi_slv3_reg[] = { + { "dbg8_wdata_axi_slv3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg9_wdata_axi_slv0_reg[] = { + { "dbg9_wdata_axi_slv0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg9_wdata_axi_slv1_reg[] = { + { "dbg9_wdata_axi_slv1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg9_wdata_axi_slv2_reg[] = { + { "dbg9_wdata_axi_slv2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg9_wdata_axi_slv3_reg[] = { + { "dbg9_wdata_axi_slv3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_awlen_axi_slv_reg[] = { + { "dbg_awlen_axi_slv", DPP_FIELD_FLAG_RO, 3, 4, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_wlast_axi_slv_reg[] = { + { "dbg_wlast_axi_slv", DPP_FIELD_FLAG_RO, 9, 10, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_araddr_axi_slv0_reg[] = { + { "dbg5_wdata_axi_slv1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_araddr_axi_slv1_reg[] = { + { "dbg5_wdata_axi_slv2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg0_rdata_axi_slv0_reg[] = { + { "dbg5_wdata_axi_slv3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg0_rdata_axi_slv1_reg[] = { + { "dbg6_wdata_axi_slv0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg0_rdata_axi_slv2_reg[] = { + { "dbg6_wdata_axi_slv1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg0_rdata_axi_slv3_reg[] = { + { "dbg6_wdata_axi_slv2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg1_rdata_axi_slv0_reg[] = { + { "dbg6_wdata_axi_slv3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg1_rdata_axi_slv1_reg[] = { + { "dbg7_wdata_axi_slv0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg1_rdata_axi_slv2_reg[] = { + { "dbg7_wdata_axi_slv1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg1_rdata_axi_slv3_reg[] = { + { "dbg7_wdata_axi_slv2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_pcie_dbg_rlast_axi_slv_reg[] = { + { "dbg_rlast_axi_slv", DPP_FIELD_FLAG_RO, 1, 2, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_enable_reg[] = { + { "dma_enable", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_up_req_reg[] = { + { "up_req", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_up_current_state_reg[] = { + { "dma_up_current_state", DPP_FIELD_FLAG_RO, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_up_req_ack_reg[] = { + { "dma_up_req_ack", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_done_latch_reg[] = { + { "done_latch", DPP_FIELD_FLAG_RO, 1, 2, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_up_cpu_addr_low32_reg[] = { + { "dma_up_cpu_addr_low", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_up_cpu_addr_high32_reg[] = { + { "dma_up_cpu_addr_high", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_up_se_addr_reg[] = { + { "dma_up_se_addr", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_done_int_reg[] = { + { "dma_done_int", DPP_FIELD_FLAG_RO, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_sp_cfg_reg[] = { + { "sp_cfg", DPP_FIELD_FLAG_RW, 1, 2, 0x1, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_ing_reg[] = { + { "dma_ing", DPP_FIELD_FLAG_RO, 1, 2, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_rd_timeout_thresh_reg[] = { + { "rd_timeout_thresh", DPP_FIELD_FLAG_RW, 31, 32, 0x7d0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_tab_sta_up_fifo_gap_reg[] = { + { "dma_tab_sta_up_fifo_gap", DPP_FIELD_FLAG_RW, 8, 9, 0xfa, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_cfg_mac_tim_reg[] = { + { "cfg_mac_tim", DPP_FIELD_FLAG_RW, 31, 32, 0x50, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_cfg_mac_num_reg[] = { + { "cfg_mac_num", DPP_FIELD_FLAG_RW, 8, 9, 0x28, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_init_bd_addr_reg[] = { + { "init_bd_addr", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_mac_up_bd_addr1_low32_reg[] = { + { "mac_up_bd_addr1_low32", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_mac_up_bd_addr1_high32_reg[] = { + { "mac_up_bd_addr1_high32", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_mac_up_bd_addr2_low32_reg[] = { + { "mac_up_bd_addr2_low32", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_mac_up_bd_addr2_high32_reg[] = { + { "mac_up_bd_addr2_high32", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_cfg_mac_max_num_reg[] = { + { "cfg_mac_max_num", DPP_FIELD_FLAG_RW, 8, 9, 0x1ff, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_wbuf_ff_empty_reg[] = { + { "dma_wbuf_ff_empty", DPP_FIELD_FLAG_RO, 1, 2, 0x3, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_wbuf_state_reg[] = { + { "dma_wbuf_state", DPP_FIELD_FLAG_RO, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_mac_bd_addr_low32_reg[] = { + { "dma_mac_bd_addr_low32", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_mac_bd_addr_high32_reg[] = { + { "dma_mac_bd_addr_high32", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_mac_up_enable_reg[] = { + { "mac_up_enable", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_mac_endian_reg[] = { + { "mac_endian", DPP_FIELD_FLAG_RW, 1, 2, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_up_endian_reg[] = { + { "up_endian", DPP_FIELD_FLAG_RW, 1, 2, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_up_rd_cnt_latch_reg[] = { + { "dma_up_rd_cnt_latch", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_up_rcv_cnt_latch_reg[] = { + { "dma_up_rcv_cnt_latch", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_up_cnt_latch_reg[] = { + { "dma_up_cnt_latch", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_cpu_rd_bd_pulse_reg[] = { + { "cpu_rd_bd_pulse", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_cpu_bd_threshold_reg[] = { + { "cpu_bd_threshold", DPP_FIELD_FLAG_RW, 31, 32, 0x64, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_cpu_bd_used_cnt_reg[] = { + { "cpu_bd_used_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_up_rcv_status_reg[] = { + { "dma_up_rcv_status", DPP_FIELD_FLAG_RO, 31, 32, 0x1, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_slv_rid_err_en_reg[] = { + { "slv_rid_err_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_slv_rresp_err_en_reg[] = { + { "slv_rresp_err_en", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_se_rdbk_ff_full_reg[] = { + { "se_rdbk_ff_full", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_up_data_count_reg[] = { + { "dma_up_data_count", DPP_FIELD_FLAG_RO, 9, 10, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_mwr_fifo_afull_gap_reg[] = { + { "dma_mwr_fifo_afull_gap", DPP_FIELD_FLAG_RW, 5, 6, 0x14, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_info_fifo_afull_gap_reg[] = { + { "dma_mwr_fifo_afull_gap", DPP_FIELD_FLAG_RW, 5, 6, 0x10, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_rd_timeout_set_reg[] = { + { "dma_rd_timeout_set", DPP_FIELD_FLAG_RW, 31, 32, 0x3ffffff, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_bd_dat_err_en_reg[] = { + { "dma_bd_dat_err_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_repeat_cnt_reg[] = { + { "dma_repeat_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_rd_timeout_en_reg[] = { + { "dma_rd_timeout_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_repeat_read_reg[] = { + { "dma_repeat_read", DPP_FIELD_FLAG_RW, 31, 32, 0xa, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_repeat_read_en_reg[] = { + { "dma_repeat_read_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_bd_ctl_state_reg[] = { + { "bd_ctl_state", DPP_FIELD_FLAG_RO, 1, 2, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_done_int_cnt_wr_reg[] = { + { "dma_done_int_cnt_wr", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_done_int_cnt_mac_reg[] = { + { "dma_done_int_cnt_mac", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_current_mac_num_reg[] = { + { "current_mac_num", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_cfg_mac_afifo_afull_reg[] = { + { "cfg_mac_afifo_afull", DPP_FIELD_FLAG_RW, 8, 9, 0xc8, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_dma_mac_ff_full_reg[] = { + { "dma_mac_ff_full", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_dma_user_axi_mst_reg[] = { + { "user_en", DPP_FIELD_FLAG_RW, 31, 1, 0x0, 0x0 }, + { "cfg_epid", DPP_FIELD_FLAG_RW, 27, 4, 0x0, 0x0 }, + { "cfg_vfunc_num", DPP_FIELD_FLAG_RW, 23, 8, 0x0, 0x0 }, + { "cfg_func_num", DPP_FIELD_FLAG_RW, 7, 3, 0x0, 0x0 }, + { "cfg_vfunc_active", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_sbus_state_reg[] = { + { "sbus_state", DPP_FIELD_FLAG_RO, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_en_reg[] = { + { "mst_debug_en", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_sbus_command_sel_reg[] = { + { "sbus_command_sel", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_soc_rd_time_out_thresh_reg[] = { + { "soc_rd_time_out_thresh", DPP_FIELD_FLAG_RW, 31, 32, 0x2710, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_big_little_byte_order_reg[] = { + { "big_little_byte_order", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_ecc_bypass_read_reg[] = { + { "ecc_bypass_read", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_ahb_async_wr_fifo_afull_gap_reg[] = { + { "ahb_async_wr_fifo_afull_gap", DPP_FIELD_FLAG_RW, 5, 6, 0x18, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_ahb_async_rd_fifo_afull_gap_reg[] = { + { "ahb_async_rd_fifo_afull_gap", DPP_FIELD_FLAG_RW, 4, 5, 0xc, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_ahb_async_cpl_fifo_afull_gap_reg[] = { + { "ahb_async_cpl_fifo_afull_gap", DPP_FIELD_FLAG_RW, 4, 5, 0xc, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data0_high26_reg[] = { + { "mst_debug_data0_high26", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data0_low32_reg[] = { + { "mst_debug_data0_low32", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data1_high26_reg[] = { + { "mst_debug_data1_high26", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data1_low32_reg[] = { + { "mst_debug_data1_low32", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data2_high26_reg[] = { + { "mst_debug_data2_high26", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data2_low32_reg[] = { + { "mst_debug_data2_low32", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data3_high26_reg[] = { + { "mst_debug_data3_high26", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data3_low32_reg[] = { + { "mst_debug_data3_low32", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data4_high26_reg[] = { + { "mst_debug_data4_high26", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data4_low32_reg[] = { + { "mst_debug_data4_low32", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data5_high26_reg[] = { + { "mst_debug_data5_high26", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data5_low32_reg[] = { + { "mst_debug_data5_low32", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data6_high26_reg[] = { + { "mst_debug_data6_high26", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data6_low32_reg[] = { + { "mst_debug_data6_low32", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data7_high26_reg[] = { + { "mst_debug_data7_high26", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data7_low32_reg[] = { + { "mst_debug_data7_low32", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data8_high26_reg[] = { + { "mst_debug_data8_high26", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data8_low32_reg[] = { + { "mst_debug_data8_low32", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data9_high26_reg[] = { + { "mst_debug_data9_high26", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data9_low32_reg[] = { + { "mst_debug_data9_low32", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data10_high26_reg[] = { + { "mst_debug_data10_high26", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data10_low32_reg[] = { + { "mst_debug_data10_low32", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data11_high26_reg[] = { + { "mst_debug_data11_high26", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data11_low32_reg[] = { + { "mst_debug_data11_low32", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data12_high26_reg[] = { + { "mst_debug_data12_high26", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data12_low32_reg[] = { + { "mst_debug_data12_low32", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data13_high26_reg[] = { + { "mst_debug_data13_high26", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data13_low32_reg[] = { + { "mst_debug_data13_low32", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data14_high26_reg[] = { + { "mst_debug_data14_high26", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data14_low32_reg[] = { + { "mst_debug_data14_low32", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data15_high26_reg[] = { + { "mst_debug_data15_high26", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_cfg_csr_mst_debug_data15_low32_reg[] = { + { "mst_debug_data15_low32", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_ind_access_states_reg[] = { + { "ind_access_done", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_ind_access_cmd0_reg[] = { + { "wr_mode", DPP_FIELD_FLAG_RW, 29, 1, 0x0, 0x0 }, + { "rd_or_wr", DPP_FIELD_FLAG_RW, 28, 1, 0x0, 0x0 }, + { "ind_access_addr0", DPP_FIELD_FLAG_RW, 16, 17, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_ind_access_data0_reg[] = { + { "ind_access_data0", DPP_FIELD_FLAG_RW, 20, 21, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_ind_access_data1_reg[] = { + { "ind_access_data1", DPP_FIELD_FLAG_RW, 20, 21, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_ind_access_cmd1_reg[] = { + { "ind_access_addr1", DPP_FIELD_FLAG_RW, 16, 17, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_init_done_reg[] = { + { "mr_init_done", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_cnt_mode_reg_reg[] = { + { "cfgmt_count_rd_mode", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "cfgmt_count_overflow_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_cfg_ecc_bypass_read_reg[] = { + { "cfg_ecc_bypass_read", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_cfg_rep_mod_reg[] = { + { "cfg_rep_mod", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_block_ptr_fifo_aful_th_reg[] = { + { "block_ptr3_fifo_aful_th", DPP_FIELD_FLAG_RW, 31, 8, 0xa0, 0x0 }, + { "block_ptr2_fifo_aful_th", DPP_FIELD_FLAG_RW, 23, 8, 0xa0, 0x0 }, + { "block_ptr1_fifo_aful_th", DPP_FIELD_FLAG_RW, 15, 8, 0xa0, 0x0 }, + { "block_ptr0_fifo_aful_th", DPP_FIELD_FLAG_RW, 7, 8, 0xa0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_pre_rcv_ptr_fifo_aful_th_reg[] = { + { "pre_rcv_ptr3_fifo_aful_th", DPP_FIELD_FLAG_RW, 31, 8, 0xa0, 0x0 }, + { "pre_rcv_ptr2_fifo_aful_th", DPP_FIELD_FLAG_RW, 23, 8, 0xa0, 0x0 }, + { "pre_rcv_ptr1_fifo_aful_th", DPP_FIELD_FLAG_RW, 15, 8, 0xa0, 0x0 }, + { "pre_rcv_ptr0_fifo_aful_th", DPP_FIELD_FLAG_RW, 7, 8, 0xa0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mgid_fifo_aful_th_reg[] = { + { "mgid3_fifo_aful_th", DPP_FIELD_FLAG_RW, 31, 8, 0xa0, 0x0 }, + { "mgid2_fifo_aful_th", DPP_FIELD_FLAG_RW, 23, 8, 0xa0, 0x0 }, + { "mgid1_fifo_aful_th", DPP_FIELD_FLAG_RW, 15, 8, 0xa0, 0x0 }, + { "mgid0_fifo_aful_th", DPP_FIELD_FLAG_RW, 7, 8, 0xa0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_rep_cmd_fifo_aful_th_reg[] = { + { "rep_cmd3_fifo_aful_th", DPP_FIELD_FLAG_RW, 31, 8, 0x14, 0x0 }, + { "rep_cmd2_fifo_aful_th", DPP_FIELD_FLAG_RW, 23, 8, 0x14, 0x0 }, + { "rep_cmd1_fifo_aful_th", DPP_FIELD_FLAG_RW, 15, 8, 0x14, 0x0 }, + { "rep_cmd0_fifo_aful_th", DPP_FIELD_FLAG_RW, 7, 8, 0x14, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_int_mask_1_reg[] = { + { "free_ptr0_fifo_full_mask", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "free_ptr1_fifo_full_mask", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "free_ptr2_fifo_full_mask", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "free_ptr3_fifo_full_mask", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "block_ptr0_fifo_full_mask", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "block_ptr1_fifo_full_mask", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "block_ptr2_fifo_full_mask", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "block_ptr3_fifo_full_mask", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "mgid0_fifo_full_mask", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "mgid1_fifo_full_mask", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "mgid2_fifo_full_mask", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "mgid3_fifo_full_mask", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "pre_rcv_ptr0_fifo_full_mask", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "pre_rcv_ptr1_fifo_full_mask", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "pre_rcv_ptr2_fifo_full_mask", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "pre_rcv_ptr3_fifo_full_mask", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "rep_cmd0_fifo_full_mask", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "rep_cmd1_fifo_full_mask", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "rep_cmd2_fifo_full_mask", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "rep_cmd3_fifo_full_mask", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_int_mask_2_reg[] = { + { "free_ptr0_fifo_udf_mask", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "free_ptr1_fifo_udf_mask", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "free_ptr2_fifo_udf_mask", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "free_ptr3_fifo_udf_mask", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "block_ptr0_fifo_udf_mask", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "block_ptr1_fifo_udf_mask", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "block_ptr2_fifo_udf_mask", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "block_ptr3_fifo_udf_mask", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "mgid0_fifo_udf_mask", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "mgid1_fifo_udf_mask", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "mgid2_fifo_udf_mask", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "mgid3_fifo_udf_mask", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "pre_rcv_ptr0_fifo_udf_mask", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "pre_rcv_ptr1_fifo_udf_mask", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "pre_rcv_ptr2_fifo_udf_mask", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "pre_rcv_ptr3_fifo_udf_mask", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "rep_cmd0_fifo_udf_mask", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "rep_cmd1_fifo_udf_mask", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "rep_cmd2_fifo_udf_mask", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "rep_cmd3_fifo_udf_mask", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_int_mask_3_reg[] = { + { "free_ptr0_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "free_ptr1_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "free_ptr2_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "free_ptr3_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "block_ptr0_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "block_ptr1_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "block_ptr2_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "block_ptr3_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "mgid0_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "mgid1_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "mgid2_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "mgid3_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "pre_rcv_ptr0_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "pre_rcv_ptr1_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "pre_rcv_ptr2_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "pre_rcv_ptr3_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "rep_cmd0_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "rep_cmd1_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "rep_cmd2_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "rep_cmd3_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_int_mask_4_reg[] = { + { "data_buf0_ram_parity_err_mask", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "data_buf1_ram_parity_err_mask", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "data_buf2_ram_parity_err_mask", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "data_buf3_ram_parity_err_mask", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "mlt_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "free_ptr0_fifo_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 19, 1, 0x1, + 0x0 }, + { "free_ptr1_fifo_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 18, 1, 0x1, + 0x0 }, + { "free_ptr2_fifo_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 17, 1, 0x1, + 0x0 }, + { "free_ptr3_fifo_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 16, 1, 0x1, + 0x0 }, + { "block_ptr0_fifo_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 15, 1, 0x1, + 0x0 }, + { "block_ptr1_fifo_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 14, 1, 0x1, + 0x0 }, + { "block_ptr2_fifo_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 13, 1, 0x1, + 0x0 }, + { "block_ptr3_fifo_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 12, 1, 0x1, + 0x0 }, + { "mgid0_fifo_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 11, 1, 0x1, + 0x0 }, + { "mgid1_fifo_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 10, 1, 0x1, + 0x0 }, + { "mgid2_fifo_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "mgid3_fifo_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "pre_rcv_ptr0_fifo_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 7, 1, 0x1, + 0x0 }, + { "pre_rcv_ptr1_fifo_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 6, 1, 0x1, + 0x0 }, + { "pre_rcv_ptr2_fifo_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 5, 1, 0x1, + 0x0 }, + { "pre_rcv_ptr3_fifo_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 4, 1, 0x1, + 0x0 }, + { "rep_cmd0_fifo_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 3, 1, 0x1, + 0x0 }, + { "rep_cmd1_fifo_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 2, 1, 0x1, + 0x0 }, + { "rep_cmd2_fifo_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 1, 1, 0x1, + 0x0 }, + { "rep_cmd3_fifo_ecc_single_err_mask", DPP_FIELD_FLAG_RW, 0, 1, 0x1, + 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_states_1_reg[] = { + { "free_ptr0_fifo_full", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "free_ptr1_fifo_full", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "free_ptr2_fifo_full", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "free_ptr3_fifo_full", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "block_ptr0_fifo_full", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "block_ptr1_fifo_full", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "block_ptr2_fifo_full", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "block_ptr3_fifo_full", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "mgid0_fifo_full", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "mgid1_fifo_full", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "mgid2_fifo_full", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "mgid3_fifo_full", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "pre_rcv_ptr0_fifo_full", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "pre_rcv_ptr1_fifo_full", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "pre_rcv_ptr2_fifo_full", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "pre_rcv_ptr3_fifo_full", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "rep_cmd0_fifo_full", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "rep_cmd1_fifo_full", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "rep_cmd2_fifo_full", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "rep_cmd3_fifo_full", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_states_2_reg[] = { + { "free_ptr0_fifo_udf", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "free_ptr1_fifo_udf", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "free_ptr2_fifo_udf", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "free_ptr3_fifo_udf", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "block_ptr0_fifo_udf", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "block_ptr1_fifo_udf", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "block_ptr2_fifo_udf", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "block_ptr3_fifo_udf", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "mgid0_fifo_udf", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "mgid1_fifo_udf", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "mgid2_fifo_udf", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "mgid3_fifo_udf", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "pre_rcv_ptr0_fifo_udf", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "pre_rcv_ptr1_fifo_udf", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "pre_rcv_ptr2_fifo_udf", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "pre_rcv_ptr3_fifo_udf", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "rep_cmd0_fifo_udf", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "rep_cmd1_fifo_udf", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "rep_cmd2_fifo_udf", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "rep_cmd3_fifo_udf", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_states_3_reg[] = { + { "free_ptr0_fifo_ovf", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "free_ptr1_fifo_ovf", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "free_ptr2_fifo_ovf", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "free_ptr3_fifo_ovf", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "block_ptr0_fifo_ovf", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "block_ptr1_fifo_ovf", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "block_ptr2_fifo_ovf", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "block_ptr3_fifo_ovf", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "mgid0_fifo_ovf", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "mgid1_fifo_ovf", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "mgid2_fifo_ovf", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "mgid3_fifo_ovf", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "pre_rcv_ptr0_fifo_ovf", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "pre_rcv_ptr1_fifo_ovf", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "pre_rcv_ptr2_fifo_ovf", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "pre_rcv_ptr3_fifo_ovf", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "rep_cmd0_fifo_ovf", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "rep_cmd1_fifo_ovf", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "rep_cmd2_fifo_ovf", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "rep_cmd3_fifo_ovf", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_states_4_reg[] = { + { "data_buf0_ram_parity_err", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "data_buf1_ram_parity_err", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "data_buf2_ram_parity_err", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "data_buf3_ram_parity_err", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "mlt_ecc_single_err", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "free_ptr0_fifo_ecc_single_err", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "free_ptr1_fifo_ecc_single_err", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "free_ptr2_fifo_ecc_single_err", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "free_ptr3_fifo_ecc_single_err", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "block_ptr0_fifo_ecc_single_err", DPP_FIELD_FLAG_RC, 15, 1, 0x0, + 0x0 }, + { "block_ptr1_fifo_ecc_single_err", DPP_FIELD_FLAG_RC, 14, 1, 0x0, + 0x0 }, + { "block_ptr2_fifo_ecc_single_err", DPP_FIELD_FLAG_RC, 13, 1, 0x0, + 0x0 }, + { "block_ptr3_fifo_ecc_single_err", DPP_FIELD_FLAG_RC, 12, 1, 0x0, + 0x0 }, + { "mgid0_fifo_ecc_single_err", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "mgid1_fifo_ecc_single_err", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "mgid2_fifo_ecc_single_err", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "mgid3_fifo_ecc_single_err", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "pre_rcv_ptr0_fifo_ecc_single_err", DPP_FIELD_FLAG_RC, 7, 1, 0x0, + 0x0 }, + { "pre_rcv_ptr1_fifo_ecc_single_err", DPP_FIELD_FLAG_RC, 6, 1, 0x0, + 0x0 }, + { "pre_rcv_ptr2_fifo_ecc_single_err", DPP_FIELD_FLAG_RC, 5, 1, 0x0, + 0x0 }, + { "pre_rcv_ptr3_fifo_ecc_single_err", DPP_FIELD_FLAG_RC, 4, 1, 0x0, + 0x0 }, + { "rep_cmd0_fifo_ecc_single_err", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "rep_cmd1_fifo_ecc_single_err", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "rep_cmd2_fifo_ecc_single_err", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "rep_cmd3_fifo_ecc_single_err", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_states_5_reg[] = { + { "mlt_ecc_double_err", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "free_ptr0_fifo_ecc_double_err", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "free_ptr1_fifo_ecc_double_err", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "free_ptr2_fifo_ecc_double_err", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "free_ptr3_fifo_ecc_double_err", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "block_ptr0_fifo_ecc_double_err", DPP_FIELD_FLAG_RC, 15, 1, 0x0, + 0x0 }, + { "block_ptr1_fifo_ecc_double_err", DPP_FIELD_FLAG_RC, 14, 1, 0x0, + 0x0 }, + { "block_ptr2_fifo_ecc_double_err", DPP_FIELD_FLAG_RC, 13, 1, 0x0, + 0x0 }, + { "block_ptr3_fifo_ecc_double_err", DPP_FIELD_FLAG_RC, 12, 1, 0x0, + 0x0 }, + { "mgid0_fifo_ecc_double_err", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "mgid1_fifo_ecc_double_err", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "mgid2_fifo_ecc_double_err", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "mgid3_fifo_ecc_double_err", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "pre_rcv_ptr0_fifo_ecc_double_err", DPP_FIELD_FLAG_RC, 7, 1, 0x0, + 0x0 }, + { "pre_rcv_ptr1_fifo_ecc_double_err", DPP_FIELD_FLAG_RC, 6, 1, 0x0, + 0x0 }, + { "pre_rcv_ptr2_fifo_ecc_double_err", DPP_FIELD_FLAG_RC, 5, 1, 0x0, + 0x0 }, + { "pre_rcv_ptr3_fifo_ecc_double_err", DPP_FIELD_FLAG_RC, 4, 1, 0x0, + 0x0 }, + { "rep_cmd0_fifo_ecc_double_err", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "rep_cmd1_fifo_ecc_double_err", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "rep_cmd2_fifo_ecc_double_err", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "rep_cmd3_fifo_ecc_double_err", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_states_6_reg[] = { + { "free_ptr0_fifo_empty", DPP_FIELD_FLAG_RO, 19, 1, 0x1, 0x0 }, + { "free_ptr1_fifo_empty", DPP_FIELD_FLAG_RO, 18, 1, 0x1, 0x0 }, + { "free_ptr2_fifo_empty", DPP_FIELD_FLAG_RO, 17, 1, 0x1, 0x0 }, + { "free_ptr3_fifo_empty", DPP_FIELD_FLAG_RO, 16, 1, 0x1, 0x0 }, + { "block_ptr0_fifo_empty", DPP_FIELD_FLAG_RO, 15, 1, 0x1, 0x0 }, + { "block_ptr1_fifo_empty", DPP_FIELD_FLAG_RO, 14, 1, 0x1, 0x0 }, + { "block_ptr2_fifo_empty", DPP_FIELD_FLAG_RO, 13, 1, 0x1, 0x0 }, + { "block_ptr3_fifo_empty", DPP_FIELD_FLAG_RO, 12, 1, 0x1, 0x0 }, + { "mgid0_fifo_empty", DPP_FIELD_FLAG_RO, 11, 1, 0x1, 0x0 }, + { "mgid1_fifo_empty", DPP_FIELD_FLAG_RO, 10, 1, 0x1, 0x0 }, + { "mgid2_fifo_empty", DPP_FIELD_FLAG_RO, 9, 1, 0x1, 0x0 }, + { "mgid3_fifo_empty", DPP_FIELD_FLAG_RO, 8, 1, 0x1, 0x0 }, + { "pre_rcv_ptr0_fifo_empty", DPP_FIELD_FLAG_RO, 7, 1, 0x1, 0x0 }, + { "pre_rcv_ptr1_fifo_empty", DPP_FIELD_FLAG_RO, 6, 1, 0x1, 0x0 }, + { "pre_rcv_ptr2_fifo_empty", DPP_FIELD_FLAG_RO, 5, 1, 0x1, 0x0 }, + { "pre_rcv_ptr3_fifo_empty", DPP_FIELD_FLAG_RO, 4, 1, 0x1, 0x0 }, + { "rep_cmd0_fifo_empty", DPP_FIELD_FLAG_RO, 3, 1, 0x1, 0x0 }, + { "rep_cmd1_fifo_empty", DPP_FIELD_FLAG_RO, 2, 1, 0x1, 0x0 }, + { "rep_cmd2_fifo_empty", DPP_FIELD_FLAG_RO, 1, 1, 0x1, 0x0 }, + { "rep_cmd3_fifo_empty", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_states_7_reg[] = { + { "cos0_is_rep_busy", DPP_FIELD_FLAG_RO, 21, 1, 0x0, 0x0 }, + { "cos1_is_rep_busy", DPP_FIELD_FLAG_RO, 20, 1, 0x0, 0x0 }, + { "cos2_is_rep_busy", DPP_FIELD_FLAG_RO, 19, 1, 0x0, 0x0 }, + { "cos3_is_rep_busy", DPP_FIELD_FLAG_RO, 18, 1, 0x0, 0x0 }, + { "block_ptr0_fifo_non_sop_ren_rdy", DPP_FIELD_FLAG_RO, 17, 1, 0x0, + 0x0 }, + { "block_ptr1_fifo_non_sop_ren_rdy", DPP_FIELD_FLAG_RO, 16, 1, 0x0, + 0x0 }, + { "block_ptr2_fifo_non_sop_ren_rdy", DPP_FIELD_FLAG_RO, 15, 1, 0x0, + 0x0 }, + { "block_ptr3_fifo_non_sop_ren_rdy", DPP_FIELD_FLAG_RO, 14, 1, 0x0, + 0x0 }, + { "pre_rcv_ptr0_fifo_non_sop_ren_rdy", DPP_FIELD_FLAG_RO, 13, 1, 0x0, + 0x0 }, + { "pre_rcv_ptr1_fifo_non_sop_ren_rdy", DPP_FIELD_FLAG_RO, 12, 1, 0x0, + 0x0 }, + { "pre_rcv_ptr2_fifo_non_sop_ren_rdy", DPP_FIELD_FLAG_RO, 11, 1, 0x0, + 0x0 }, + { "pre_rcv_ptr3_fifo_non_sop_ren_rdy", DPP_FIELD_FLAG_RO, 10, 1, 0x0, + 0x0 }, + { "port_shap_rdy", DPP_FIELD_FLAG_RO, 9, 1, 0x1, 0x0 }, + { "mr_lif_group0_rdy_3", DPP_FIELD_FLAG_RO, 8, 1, 0x1, 0x0 }, + { "mr_lif_group0_rdy_2", DPP_FIELD_FLAG_RO, 7, 1, 0x1, 0x0 }, + { "mr_lif_group0_rdy_1", DPP_FIELD_FLAG_RO, 6, 1, 0x1, 0x0 }, + { "mr_lif_group0_rdy_0", DPP_FIELD_FLAG_RO, 5, 1, 0x1, 0x0 }, + { "pktrx_pfc_rdy_3", DPP_FIELD_FLAG_RO, 4, 1, 0x1, 0x0 }, + { "pktrx_pfc_rdy_2", DPP_FIELD_FLAG_RO, 3, 1, 0x1, 0x0 }, + { "pktrx_pfc_rdy_1", DPP_FIELD_FLAG_RO, 2, 1, 0x1, 0x0 }, + { "pktrx_pfc_rdy_0", DPP_FIELD_FLAG_RO, 1, 1, 0x1, 0x0 }, + { "pktrx_link_rdy", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_states_8_reg[] = { + { "mr_head", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_sop_in_cnt_reg[] = { + { "mr_sop_in_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_eop_in_cnt_reg[] = { + { "mr_eop_in_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_sop_out_cnt_reg[] = { + { "mr_sop_out_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_eop_out_cnt_reg[] = { + { "mr_eop_out_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos0_in_cnt_reg[] = { + { "mr_cos0_in_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos1_in_cnt_reg[] = { + { "mr_cos1_in_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos2_in_cnt_reg[] = { + { "mr_cos2_in_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos3_in_cnt_reg[] = { + { "mr_cos3_in_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos0_out_cnt_reg[] = { + { "mr_cos0_out_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos1_out_cnt_reg[] = { + { "mr_cos1_out_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos2_out_cnt_reg[] = { + { "mr_cos2_out_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos3_out_cnt_reg[] = { + { "mr_cos3_out_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_err_in_cnt_reg[] = { + { "mr_err_in_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos0_sop_in_cnt_reg[] = { + { "mr_cos0_sop_in_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos0_eop_in_cnt_reg[] = { + { "mr_cos0_eop_in_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos1_sop_in_cnt_reg[] = { + { "mr_cos1_sop_in_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos1_eop_in_cnt_reg[] = { + { "mr_cos1_eop_in_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos2_sop_in_cnt_reg[] = { + { "mr_cos2_sop_in_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos2_eop_in_cnt_reg[] = { + { "mr_cos2_eop_in_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos3_sop_in_cnt_reg[] = { + { "mr_cos3_sop_in_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos3_eop_in_cnt_reg[] = { + { "mr_cos3_eop_in_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos0_in_err_cnt_reg[] = { + { "mr_cos0_in_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos1_in_err_cnt_reg[] = { + { "mr_cos1_in_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos2_in_err_cnt_reg[] = { + { "mr_cos2_in_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos3_in_err_cnt_reg[] = { + { "mr_cos3_in_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos0_sop_out_cnt_reg[] = { + { "mr_cos0_sop_out_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos0_eop_out_cnt_reg[] = { + { "mr_cos0_eop_out_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos1_sop_out_cnt_reg[] = { + { "mr_cos1_sop_out_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos1_eop_out_cnt_reg[] = { + { "mr_cos1_eop_out_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos2_sop_out_cnt_reg[] = { + { "mr_cos2_sop_out_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos2_eop_out_cnt_reg[] = { + { "mr_cos2_eop_out_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos3_sop_out_cnt_reg[] = { + { "mr_cos3_sop_out_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_cos3_eop_out_cnt_reg[] = { + { "mr_cos3_eop_out_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_mlt_unvld_cnt_reg[] = { + { "mr_mlt_unvld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_sop_eop_match_cfg_reg[] = { + { "mr_sop_eop_macth_en", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "mr_sop_eop_macth_dicard_th", DPP_FIELD_FLAG_RW, 7, 8, 0xbc, 0x0 }, +}; +DPP_FIELD_T g_nppu_mr_cfg_mr_mlt_unvld_mgid_reg[] = { + { "mr_mlt_unvld_mgid", DPP_FIELD_FLAG_RO, 16, 17, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_isch_fifo_th_1_reg[] = { + { "cfg_sch_fifo7_fc_th", DPP_FIELD_FLAG_RW, 29, 6, 0x30, 0x0 }, + { "cfg_sch_fifo6_fc_th", DPP_FIELD_FLAG_RW, 21, 6, 0x30, 0x0 }, + { "cfg_sch_fifo5_fc_th", DPP_FIELD_FLAG_RW, 13, 6, 0x30, 0x0 }, + { "cfg_sch_fifo4_fc_th", DPP_FIELD_FLAG_RW, 5, 6, 0x30, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_isch_fifo_th_2_reg[] = { + { "cfg_sch_fifo3_drop_th", DPP_FIELD_FLAG_RW, 29, 6, 0x38, 0x0 }, + { "cfg_sch_fifo1_drop_th", DPP_FIELD_FLAG_RW, 21, 6, 0x38, 0x0 }, + { "cfg_sch_fifo0_drop_th", DPP_FIELD_FLAG_RW, 13, 6, 0x38, 0x0 }, + { "cfg_sch_fifo8_fc_th", DPP_FIELD_FLAG_RW, 5, 6, 0x30, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_isch_fifo_th_3_reg[] = { + { "cfg_sch_fifo6_drop_th", DPP_FIELD_FLAG_RW, 29, 6, 0x38, 0x0 }, + { "cfg_sch_fifo5_drop_th", DPP_FIELD_FLAG_RW, 21, 6, 0x38, 0x0 }, + { "cfg_sch_fifo4_drop_th", DPP_FIELD_FLAG_RW, 13, 6, 0x38, 0x0 }, + { "cfg_sch_fifo2_drop_th", DPP_FIELD_FLAG_RW, 5, 6, 0x38, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_isch_fifo_th_4_reg[] = { + { "cfg_sch_fifo9_fc_th", DPP_FIELD_FLAG_RW, 29, 6, 0x30, 0x0 }, + { "cfg_sch_fifo9_drop_th", DPP_FIELD_FLAG_RW, 21, 6, 0x38, 0x0 }, + { "cfg_sch_fifo8_drop_th", DPP_FIELD_FLAG_RW, 13, 6, 0x38, 0x0 }, + { "cfg_sch_fifo7_drop_th", DPP_FIELD_FLAG_RW, 5, 6, 0x38, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_isch_cfg_0_reg[] = { + { "cfg_sch_wrr1_weight1", DPP_FIELD_FLAG_RW, 6, 7, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_hdu_ex_tpid_0_reg[] = { + { "cfg_type0", DPP_FIELD_FLAG_RW, 31, 16, 0x8100, 0x0 }, + { "cfg_type1", DPP_FIELD_FLAG_RW, 15, 16, 0x9100, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_hdu_ex_tpid_1_reg[] = { + { "cfg_type2", DPP_FIELD_FLAG_RW, 31, 16, 0x88a8, 0x0 }, + { "cfg_type3", DPP_FIELD_FLAG_RW, 15, 16, 0x9200, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_hdu_int_tpid_0_reg[] = { + { "cfg_inner_type0", DPP_FIELD_FLAG_RW, 31, 16, 0x8100, 0x0 }, + { "cfg_inner_type1", DPP_FIELD_FLAG_RW, 15, 16, 0x9100, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_hdu_int_tpid_1_reg[] = { + { "cfg_inner_type2", DPP_FIELD_FLAG_RW, 31, 16, 0x88a8, 0x0 }, + { "cfg_inner_type3", DPP_FIELD_FLAG_RW, 15, 16, 0x9200, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_hdu_hdlc_0_reg[] = { + { "hdlc_cfg0_type", DPP_FIELD_FLAG_RW, 31, 16, 0x0, 0x0 }, + { "hdlc_cfg1_type", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_hdu_hdlc_1_reg[] = { + { "hdlc_cfg2_type", DPP_FIELD_FLAG_RW, 31, 16, 0x0, 0x0 }, + { "hdlc_cfg3_type", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_hdu_udf_l3type_0_reg[] = { + { "cfg_l3_type0", DPP_FIELD_FLAG_RW, 31, 16, 0xffff, 0x0 }, + { "cfg_l3_type1", DPP_FIELD_FLAG_RW, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_hdu_udf_l3type_1_reg[] = { + { "cfg_l3_type2", DPP_FIELD_FLAG_RW, 31, 16, 0xffff, 0x0 }, + { "cfg_l3_type3", DPP_FIELD_FLAG_RW, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_hdu_udf_l3type_2_reg[] = { + { "cfg_l3_type4", DPP_FIELD_FLAG_RW, 31, 16, 0xffff, 0x0 }, + { "cfg_l3_type5", DPP_FIELD_FLAG_RW, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_hdu_udf_l3type_3_reg[] = { + { "cfg_l3_type6", DPP_FIELD_FLAG_RW, 31, 16, 0xffff, 0x0 }, + { "cfg_l3_type7", DPP_FIELD_FLAG_RW, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_hdu_udf_l4type_0_reg[] = { + { "cfg_l4_type0", DPP_FIELD_FLAG_RW, 31, 8, 0xff, 0x0 }, + { "cfg_l4_type1", DPP_FIELD_FLAG_RW, 23, 8, 0xff, 0x0 }, + { "cfg_l4_type2", DPP_FIELD_FLAG_RW, 15, 8, 0xff, 0x0 }, + { "cfg_l4_type3", DPP_FIELD_FLAG_RW, 7, 8, 0xff, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_hdu_udf_l4type_1_reg[] = { + { "cfg_l4_type4", DPP_FIELD_FLAG_RW, 31, 8, 0xff, 0x0 }, + { "cfg_l4_type5", DPP_FIELD_FLAG_RW, 23, 8, 0xff, 0x0 }, + { "cfg_l4_type6", DPP_FIELD_FLAG_RW, 15, 8, 0xff, 0x0 }, + { "cfg_l4_type7", DPP_FIELD_FLAG_RW, 7, 8, 0xff, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_hdu_udf_l4type_2_reg[] = { + { "cfg_l4_type8", DPP_FIELD_FLAG_RW, 23, 8, 0xff, 0x0 }, + { "cfg_l4_type9", DPP_FIELD_FLAG_RW, 15, 8, 0xff, 0x0 }, + { "cfg_l4_type10", DPP_FIELD_FLAG_RW, 7, 8, 0xff, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_slot_no_cfg_reg[] = { + { "cfg_parser_slot_no", DPP_FIELD_FLAG_RW, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_pktrx_int_en_0_reg[] = { + { "pktrx_int_en_31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "pktrx_int_en_30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "pktrx_int_en_29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "pktrx_int_en_28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "pktrx_int_en_27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "pktrx_int_en_26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "pktrx_int_en_25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "pktrx_int_en_24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "pktrx_int_en_23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "pktrx_int_en_22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "pktrx_int_en_21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "pktrx_int_en_20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "pktrx_int_en_19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "pktrx_int_en_18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "pktrx_int_en_17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "pktrx_int_en_16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "pktrx_int_en_15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "pktrx_int_en_14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "pktrx_int_en_13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "pktrx_int_en_12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "pktrx_int_en_11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "pktrx_int_en_10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "pktrx_int_en_9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "pktrx_int_en_8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "pktrx_int_en_7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "pktrx_int_en_6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "pktrx_int_en_5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "pktrx_int_en_4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "pktrx_int_en_3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "pktrx_int_en_2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "pktrx_int_en_1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "pktrx_int_en_0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_pktrx_int_en_1_reg[] = { + { "pktrx_int_en_35", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "pktrx_int_en_34", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "pktrx_int_en_33", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "pktrx_int_en_32", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_pktrx_int_mask_0_reg[] = { + { "pktrx_int_mask_31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "pktrx_int_mask_30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "pktrx_int_mask_29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "pktrx_int_mask_28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "pktrx_int_mask_27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "pktrx_int_mask_26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "pktrx_int_mask_25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "pktrx_int_mask_24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "pktrx_int_mask_23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "pktrx_int_mask_22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "pktrx_int_mask_21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "pktrx_int_mask_20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "pktrx_int_mask_19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "pktrx_int_mask_18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "pktrx_int_mask_17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "pktrx_int_mask_16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "pktrx_int_mask_15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "pktrx_int_mask_14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "pktrx_int_mask_13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "pktrx_int_mask_12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "pktrx_int_mask_11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "pktrx_int_mask_10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "pktrx_int_mask_9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "pktrx_int_mask_8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "pktrx_int_mask_7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "pktrx_int_mask_6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "pktrx_int_mask_5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "pktrx_int_mask_4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "pktrx_int_mask_3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "pktrx_int_mask_2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "pktrx_int_mask_1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "pktrx_int_mask_0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_pktrx_int_mask_1_reg[] = { + { "pktrx_int_mask_35", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "pktrx_int_mask_34", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "pktrx_int_mask_33", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "pktrx_int_mask_32", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_pktrx_int_status_reg[] = { + { "int_status", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_pktrx_port_rdy0_reg[] = { + { "pktrx_trpgrx_r1_rdy", DPP_FIELD_FLAG_RO, 31, 10, 0x3ff, 0x0 }, + { "pktrx_trpgrx_r2_rdy", DPP_FIELD_FLAG_RO, 4, 5, 0x1f, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_pktrx_lif0_pfc_rdy0_reg[] = { + { "pktrx_trpgrx_r1_pfc_rdy_0", DPP_FIELD_FLAG_RO, 31, 32, 0xffffffff, + 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_pktrx_lif0_pfc_rdy1_reg[] = { + { "pktrx_trpgrx_r1_pfc_rdy_1", DPP_FIELD_FLAG_RO, 31, 32, 0xffffffff, + 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_pktrx_lif0_pfc_rdy2_reg[] = { + { "pktrx_trpgrx_r1_pfc_rdy_2", DPP_FIELD_FLAG_RO, 31, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_pktrx_lif0_pfc_rdy3_reg[] = { + { "pktrx_trpgrx_r2_pfc_rdy_3", DPP_FIELD_FLAG_RO, 31, 8, 0xf, 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_pktrx_lif0_pfc_rdy4_reg[] = { + { "pktrx_trpgrx_r2_pfc_rdy_4", DPP_FIELD_FLAG_RO, 31, 32, 0xffffffff, + 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_pktrx_lif0_pfc_rdy5_reg[] = { + { "pktrx_trpgrx_r2_pfc_rdy_5", DPP_FIELD_FLAG_RO, 31, 32, 0xffffffff, + 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_pktrx_lif0_pfc_rdy6_reg[] = { + { "pktrx_trpgrx_r2_pfc_rdy_6", DPP_FIELD_FLAG_RO, 31, 24, 0xffffff, + 0x0 }, +}; +DPP_FIELD_T g_nppu_pktrx_cfg_cfg_port_l2_offset_mode_reg[] = { + { "cfg_port_l2_offset_mode", DPP_FIELD_FLAG_RW, 31, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_idma_cfg_int_ram_en_reg[] = { + { "phy_sts_parity_err", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "ptr_buf_parity_err", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_idma_cfg_int_ram_mask_reg[] = { + { "phy_sts_parity_err", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "ptr_buf_parity_err", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_idma_cfg_int_ram_status_reg[] = { + { "phy_sts_parity_err", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "ptr_buf_parity_err", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_idma_cfg_subsys_int_mask_flag_reg[] = { + { "subsys_int_mask_flag", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_idma_cfg_subsys_int_unmask_flag_reg[] = { + { "subsys_int_unmask_flag", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_idma_cfg_debug_cnt_rdclr_mode_reg[] = { + { "debug_cnt_rdclr_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_int_ram_en0_reg[] = { + { "int_ram_en_31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "int_ram_en_30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "int_ram_en_29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "int_ram_en_28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "int_ram_en_27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "int_ram_en_26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "int_ram_en_25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "int_ram_en_24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "int_ram_en_23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "int_ram_en_22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "int_ram_en_21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "int_ram_en_20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "int_ram_en_19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "int_ram_en_18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "int_ram_en_17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "int_ram_en_16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "int_ram_en_15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "int_ram_en_14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "int_ram_en_13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "int_ram_en_12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "int_ram_en_11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "int_ram_en_10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "int_ram_en_9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "int_ram_en_8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "int_ram_en_7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "int_ram_en_6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "int_ram_en_5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "int_ram_en_4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "int_ram_en_3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "int_ram_en_2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "int_ram_en_1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "int_ram_en_0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_int_ram_mask0_reg[] = { + { "int_ram_mask_31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "int_ram_mask_30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "int_ram_mask_29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "int_ram_mask_28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "int_ram_mask_27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "int_ram_mask_26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "int_ram_mask_25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "int_ram_mask_24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "int_ram_mask_23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "int_ram_mask_22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "int_ram_mask_21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "int_ram_mask_20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "int_ram_mask_19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "int_ram_mask_18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "int_ram_mask_17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "int_ram_mask_16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "int_ram_mask_15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "int_ram_mask_14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "int_ram_mask_13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "int_ram_mask_12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "int_ram_mask_11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "int_ram_mask_10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "int_ram_mask_9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "int_ram_mask_8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "int_ram_mask_7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "int_ram_mask_6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "int_ram_mask_5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "int_ram_mask_4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "int_ram_mask_3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "int_ram_mask_2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "int_ram_mask_1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "int_ram_mask_0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_int_ram_status0_reg[] = { + { "int_ram_status_31", DPP_FIELD_FLAG_RO, 31, 1, 0x0, 0x0 }, + { "int_ram_status_30", DPP_FIELD_FLAG_RO, 30, 1, 0x0, 0x0 }, + { "int_ram_status_29", DPP_FIELD_FLAG_RO, 29, 1, 0x0, 0x0 }, + { "int_ram_status_28", DPP_FIELD_FLAG_RO, 28, 1, 0x0, 0x0 }, + { "int_ram_status_27", DPP_FIELD_FLAG_RO, 27, 1, 0x0, 0x0 }, + { "int_ram_status_26", DPP_FIELD_FLAG_RO, 26, 1, 0x0, 0x0 }, + { "int_ram_status_25", DPP_FIELD_FLAG_RO, 25, 1, 0x0, 0x0 }, + { "int_ram_status_24", DPP_FIELD_FLAG_RO, 24, 1, 0x0, 0x0 }, + { "int_ram_status_23", DPP_FIELD_FLAG_RO, 23, 1, 0x0, 0x0 }, + { "int_ram_status_22", DPP_FIELD_FLAG_RO, 22, 1, 0x0, 0x0 }, + { "int_ram_status_21", DPP_FIELD_FLAG_RO, 21, 1, 0x0, 0x0 }, + { "int_ram_status_20", DPP_FIELD_FLAG_RO, 20, 1, 0x0, 0x0 }, + { "int_ram_status_19", DPP_FIELD_FLAG_RO, 19, 1, 0x0, 0x0 }, + { "int_ram_status_18", DPP_FIELD_FLAG_RO, 18, 1, 0x0, 0x0 }, + { "int_ram_status_17", DPP_FIELD_FLAG_RO, 17, 1, 0x0, 0x0 }, + { "int_ram_status_16", DPP_FIELD_FLAG_RO, 16, 1, 0x0, 0x0 }, + { "int_ram_status_15", DPP_FIELD_FLAG_RO, 15, 1, 0x0, 0x0 }, + { "int_ram_status_14", DPP_FIELD_FLAG_RO, 14, 1, 0x0, 0x0 }, + { "int_ram_status_13", DPP_FIELD_FLAG_RO, 13, 1, 0x0, 0x0 }, + { "int_ram_status_12", DPP_FIELD_FLAG_RO, 12, 1, 0x0, 0x0 }, + { "int_ram_status_11", DPP_FIELD_FLAG_RO, 11, 1, 0x0, 0x0 }, + { "int_ram_status_10", DPP_FIELD_FLAG_RO, 10, 1, 0x0, 0x0 }, + { "int_ram_status_9", DPP_FIELD_FLAG_RO, 9, 1, 0x0, 0x0 }, + { "int_ram_status_8", DPP_FIELD_FLAG_RO, 8, 1, 0x0, 0x0 }, + { "int_ram_status_7", DPP_FIELD_FLAG_RO, 7, 1, 0x0, 0x0 }, + { "int_ram_status_6", DPP_FIELD_FLAG_RO, 6, 1, 0x0, 0x0 }, + { "int_ram_status_5", DPP_FIELD_FLAG_RO, 5, 1, 0x0, 0x0 }, + { "int_ram_status_4", DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "int_ram_status_3", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "int_ram_status_2", DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "int_ram_status_1", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "int_ram_status_0", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_int_fifo_en0_reg[] = { + { "int_fifo_en_31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "int_fifo_en_30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "int_fifo_en_29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "int_fifo_en_28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "int_fifo_en_27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "int_fifo_en_26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "int_fifo_en_25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "int_fifo_en_24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "int_fifo_en_23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "int_fifo_en_22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "int_fifo_en_21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "int_fifo_en_20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "int_fifo_en_19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "int_fifo_en_18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "int_fifo_en_17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "int_fifo_en_16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "int_fifo_en_15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "int_fifo_en_14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "int_fifo_en_13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "int_fifo_en_12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "int_fifo_en_11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "int_fifo_en_10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "int_fifo_en_9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "int_fifo_en_8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "int_fifo_en_7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "int_fifo_en_6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "int_fifo_en_5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "int_fifo_en_4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "int_fifo_en_3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "int_fifo_en_2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "int_fifo_en_1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "int_fifo_en_0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_int_fifo_en1_reg[] = { + { "int_fifo_en_35", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "int_fifo_en_34", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "int_fifo_en_33", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "int_fifo_en_32", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_int_fifo_mask0_reg[] = { + { "int_fifo_mask_31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "int_fifo_mask_30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "int_fifo_mask_29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "int_fifo_mask_28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "int_fifo_mask_27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "int_fifo_mask_26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "int_fifo_mask_25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "int_fifo_mask_24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "int_fifo_mask_23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "int_fifo_mask_22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "int_fifo_mask_21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "int_fifo_mask_20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "int_fifo_mask_19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "int_fifo_mask_18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "int_fifo_mask_17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "int_fifo_mask_16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "int_fifo_mask_15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "int_fifo_mask_14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "int_fifo_mask_13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "int_fifo_mask_12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "int_fifo_mask_11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "int_fifo_mask_10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "int_fifo_mask_9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "int_fifo_mask_8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "int_fifo_mask_7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "int_fifo_mask_6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "int_fifo_mask_5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "int_fifo_mask_4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "int_fifo_mask_3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "int_fifo_mask_2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "int_fifo_mask_1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "int_fifo_mask_0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_int_fifo_mask1_reg[] = { + { "int_fifo_mask_35", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "int_fifo_mask_34", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "int_fifo_mask_33", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "int_fifo_mask_32", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_int_fifo_status0_reg[] = { + { "int_fifo_status_31", DPP_FIELD_FLAG_RO, 31, 1, 0x0, 0x0 }, + { "int_fifo_status_30", DPP_FIELD_FLAG_RO, 30, 1, 0x0, 0x0 }, + { "int_fifo_status_29", DPP_FIELD_FLAG_RO, 29, 1, 0x0, 0x0 }, + { "int_fifo_status_28", DPP_FIELD_FLAG_RO, 28, 1, 0x0, 0x0 }, + { "int_fifo_status_27", DPP_FIELD_FLAG_RO, 27, 1, 0x0, 0x0 }, + { "int_fifo_status_26", DPP_FIELD_FLAG_RO, 26, 1, 0x0, 0x0 }, + { "int_fifo_status_25", DPP_FIELD_FLAG_RO, 25, 1, 0x0, 0x0 }, + { "int_fifo_status_24", DPP_FIELD_FLAG_RO, 24, 1, 0x0, 0x0 }, + { "int_fifo_status_23", DPP_FIELD_FLAG_RO, 23, 1, 0x0, 0x0 }, + { "int_fifo_status_22", DPP_FIELD_FLAG_RO, 22, 1, 0x0, 0x0 }, + { "int_fifo_status_21", DPP_FIELD_FLAG_RO, 21, 1, 0x0, 0x0 }, + { "int_fifo_status_20", DPP_FIELD_FLAG_RO, 20, 1, 0x0, 0x0 }, + { "int_fifo_status_19", DPP_FIELD_FLAG_RO, 19, 1, 0x0, 0x0 }, + { "int_fifo_status_18", DPP_FIELD_FLAG_RO, 18, 1, 0x0, 0x0 }, + { "int_fifo_status_17", DPP_FIELD_FLAG_RO, 17, 1, 0x0, 0x0 }, + { "int_fifo_status_16", DPP_FIELD_FLAG_RO, 16, 1, 0x0, 0x0 }, + { "int_fifo_status_15", DPP_FIELD_FLAG_RO, 15, 1, 0x0, 0x0 }, + { "int_fifo_status_14", DPP_FIELD_FLAG_RO, 14, 1, 0x0, 0x0 }, + { "int_fifo_status_13", DPP_FIELD_FLAG_RO, 13, 1, 0x0, 0x0 }, + { "int_fifo_status_12", DPP_FIELD_FLAG_RO, 12, 1, 0x0, 0x0 }, + { "int_fifo_status_11", DPP_FIELD_FLAG_RO, 11, 1, 0x0, 0x0 }, + { "int_fifo_status_10", DPP_FIELD_FLAG_RO, 10, 1, 0x0, 0x0 }, + { "int_fifo_status_9", DPP_FIELD_FLAG_RO, 9, 1, 0x0, 0x0 }, + { "int_fifo_status_8", DPP_FIELD_FLAG_RO, 8, 1, 0x0, 0x0 }, + { "int_fifo_status_7", DPP_FIELD_FLAG_RO, 7, 1, 0x0, 0x0 }, + { "int_fifo_status_6", DPP_FIELD_FLAG_RO, 6, 1, 0x0, 0x0 }, + { "int_fifo_status_5", DPP_FIELD_FLAG_RO, 5, 1, 0x0, 0x0 }, + { "int_fifo_status_4", DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "int_fifo_status_3", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "int_fifo_status_2", DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "int_fifo_status_1", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "int_fifo_status_0", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_int_fifo_status1_reg[] = { + { "int_fifo_status_35", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "int_fifo_status_34", DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "int_fifo_status_33", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "int_fifo_status_32", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_subsys_int_mask_flag_reg[] = { + { "subsys_int_mask_flag", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_subsys_int_unmask_flag_reg[] = { + { "subsys_int_unmask_flag", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_sa_ip_en_reg[] = { + { "sa_ip_en", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_debug_cnt_rdclr_mode_reg[] = { + { "debug_cnt_rdclr_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_fptr_fifo_aful_assert_cfg_reg[] = { + { "fptr_fifo_aful_assert_cfg", DPP_FIELD_FLAG_RW, 12, 13, 0xff0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_fptr_fifo_aful_negate_cfg_reg[] = { + { "fptr_fifo_aful_negate_cfg", DPP_FIELD_FLAG_RW, 12, 13, 0xff0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_pf_fifo_aful_assert_cfg_reg[] = { + { "pf_fifo_aful_assert_cfg", DPP_FIELD_FLAG_RW, 4, 5, 0x8, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_pf_fifo_aful_negate_cfg_reg[] = { + { "pf_fifo_aful_negate_cfg", DPP_FIELD_FLAG_RW, 4, 5, 0x8, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_pf_fifo_aept_assert_cfg_reg[] = { + { "pf_fifo_aept_assert_cfg", DPP_FIELD_FLAG_RW, 4, 5, 0x4, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_pf_fifo_aept_negate_cfg_reg[] = { + { "pf_fifo_aept_negate_cfg", DPP_FIELD_FLAG_RW, 4, 5, 0x4, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_wb_aful_assert_cfg_reg[] = { + { "wb_aful_assert_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0x12, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_wb_aful_negate_cfg_reg[] = { + { "wb_aful_negate_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0x12, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_se_key_aful_assert_cfg_reg[] = { + { "se_key_aful_assert_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0x4, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_ifbrd_se_aful_assert_cfg_reg[] = { + { "ifbrd_se_aful_assert_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0x18, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_ifbrd_se_aful_negate_cfg_reg[] = { + { "ifbrd_se_aful_negate_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0x18, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_ifbrd_odma_aful_assert_cfg_reg[] = { + { "ifbrd_odma_aful_assert_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0x18, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_ifbrd_odma_aful_negate_cfg_reg[] = { + { "ifbrd_odma_aful_negate_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0x18, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_ifbrd_ppu_aful_assert_cfg_reg[] = { + { "ifbrd_ppu_aful_assert_cfg", DPP_FIELD_FLAG_RW, 4, 5, 0x10, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_ifbrd_ppu_aful_negate_cfg_reg[] = { + { "ifbrd_ppu_aful_negate_cfg", DPP_FIELD_FLAG_RW, 4, 5, 0x10, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_mc_logic_aful_assert_cfg_reg[] = { + { "mc_logic_aful_assert_cfg", DPP_FIELD_FLAG_RW, 4, 5, 0x10, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_mc_logic_aful_negate_cfg_reg[] = { + { "mc_logic_aful_negate_cfg", DPP_FIELD_FLAG_RW, 4, 5, 0x10, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_mc_logic_diff_reg[] = { + { "mc_logic_diff", DPP_FIELD_FLAG_RW, 7, 8, 0xa, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_cfg_peak_port_cnt_clr_reg[] = { + { "cfg_peak_port_cnt_clr", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_all_ftm_crdt_th_reg[] = { + { "ftm_crdt_port_cng_th", DPP_FIELD_FLAG_RW, 29, 15, 0x0, 0x0 }, + { "ftm_crdt_port_th", DPP_FIELD_FLAG_RW, 14, 15, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_all_ftm_link_th_01_reg[] = { + { "total_congest_th1", DPP_FIELD_FLAG_RW, 29, 15, 0x0, 0x0 }, + { "total_congest_th0", DPP_FIELD_FLAG_RW, 14, 15, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_all_ftm_link_th_23_reg[] = { + { "total_congest_th3", DPP_FIELD_FLAG_RW, 29, 15, 0x0, 0x0 }, + { "total_congest_th2", DPP_FIELD_FLAG_RW, 14, 15, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_all_ftm_link_th_45_reg[] = { + { "total_congest_th5", DPP_FIELD_FLAG_RW, 29, 15, 0x0, 0x0 }, + { "total_congest_th4", DPP_FIELD_FLAG_RW, 14, 15, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_all_ftm_link_th_6_reg[] = { + { "total_congest_th6", DPP_FIELD_FLAG_RW, 14, 15, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_all_ftm_total_congest_th_reg[] = { + { "all_ftm_total_congest_th", DPP_FIELD_FLAG_RW, 14, 15, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_cfg_crdt_mode_reg[] = { + { "cfg_crdt_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_cfg_pfc_rdy_high_time_reg[] = { + { "cfg_pfc_rdy_high_time", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_cfg_cfg_pfc_rdy_low_time_reg[] = { + { "cfg_pfc_rdy_low_time", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_pbu_fc_rdy_reg[] = { + { "pbu_oam_send_fc_rdy", DPP_FIELD_FLAG_RO, 7, 2, 0x0, 0x0 }, + { "pbu_odma_fc_rdy", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "pbu_tm_fc_rdy", DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "pbu_idma_cos_rdy", DPP_FIELD_FLAG_RO, 1, 2, 0x2, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_pbu_lif_group0_rdy0_reg[] = { + { "pbu_ipg1_rdy", DPP_FIELD_FLAG_RO, 27, 3, 0x0, 0x0 }, + { "pbu_ipg0_rdy", DPP_FIELD_FLAG_RO, 24, 9, 0x0, 0x0 }, + { "pbu_trpgrx_xge_rdy", DPP_FIELD_FLAG_RO, 9, 2, 0x0, 0x0 }, + { "pbu_trpgrx_cge1_rdy", DPP_FIELD_FLAG_RO, 7, 4, 0x0, 0x0 }, + { "pbu_trpgrx_cge0_rdy", DPP_FIELD_FLAG_RO, 3, 4, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_pbu_lif_group0_rdy1_reg[] = { + { "pbu_lif_group0_rdy1", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_pbu_lif_group1_rdy_reg[] = { + { "pbu_lif_group1_rdy1", DPP_FIELD_FLAG_RO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_pbu_lif_group0_pfc_rdy_reg[] = { + { "pbu_lif_group0_pfc_rdy", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_pbu_lif_group1_pfc_rdy_reg[] = { + { "pbu_lif_group1_pfc_rdy", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_pbu_sa_port_rdy_0_31_reg[] = { + { "pbu_sa_port_rdy_0_31", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_pbu_sa_port_rdy_32_50_reg[] = { + { "pbu_sa_port_rdy_32_50", DPP_FIELD_FLAG_RO, 18, 19, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_pbu_pktrx_mr_pfc_rdy_reg[] = { + { "pbu_pktrx_mr_pfc_rdy", DPP_FIELD_FLAG_RO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_pbu_ftm_crdt_port_rdy_0_31_reg[] = { + { "pbu_ftm_crdt_port_rdy_0_31", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_pbu_ftm_crdt_port_rdy_32_47_reg[] = { + { "pbu_ftm_crdt_port_rdy_32_47", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_pbu_ftm_crdt_port_cng_rdy_0_31_reg[] = { + { "pbu_ftm_crdt_port_cng_rdy_0_31", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_pbu_ftm_crdt_port_cng_rdy_32_47_reg[] = { + { "pbu_ftm_crdt_port_cng_rdy_32_47", DPP_FIELD_FLAG_RO, 15, 16, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_nppu_pbu_stat_pbu_ftm_crdt_sys_info_reg[] = { + { "pbu_ftm_crdt_sys_info", DPP_FIELD_FLAG_RO, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_cfg_weight_normal_mc_reg[] = { + { "weight_normal_mc", DPP_FIELD_FLAG_RW, 6, 7, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_cfg_weight_sa_mc_reg[] = { + { "weight_sa_mc", DPP_FIELD_FLAG_RW, 6, 7, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_cfg_weight_etm_reg[] = { + { "weight_etm", DPP_FIELD_FLAG_RW, 6, 7, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_cfg_weight_lp_mc_reg[] = { + { "weight_lp_mc", DPP_FIELD_FLAG_RW, 6, 7, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_cfg_weight_oam_reg[] = { + { "weight_oam", DPP_FIELD_FLAG_RW, 6, 7, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_cfg_weight_lif_ctrl1_reg[] = { + { "weight_lif_ctrl1", DPP_FIELD_FLAG_RW, 6, 7, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_cfg_weight_lif_ctrl2_reg[] = { + { "weight_lif_ctrl2", DPP_FIELD_FLAG_RW, 6, 7, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_cfg_ecc_bypass_read_reg[] = { + { "eccbypass", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_cfg_isu_int_mask_reg[] = { + { "isu_int_mask", DPP_FIELD_FLAG_RW, 26, 27, 0x7ffffff, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_cfg_cfg_crdt_cycle_reg[] = { + { "cfg_cycle", DPP_FIELD_FLAG_RW, 5, 6, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_cfg_cfg_crdt_value_reg[] = { + { "cfg_value", DPP_FIELD_FLAG_RW, 13, 14, 0x216, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_cfg_isu_int_en_reg[] = { + { "isu_int_en", DPP_FIELD_FLAG_RW, 26, 27, 0x7ffffff, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_cfg_isu_ppu_fifo_fc_reg[] = { + { "isu_ppu_fifo_fc", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_cfg_isu_int_status_reg[] = { + { "isu_int_status_26", DPP_FIELD_FLAG_RO, 26, 1, 0x0, 0x0 }, + { "isu_int_status_25", DPP_FIELD_FLAG_RO, 25, 1, 0x0, 0x0 }, + { "isu_int_status_24", DPP_FIELD_FLAG_RO, 24, 1, 0x0, 0x0 }, + { "isu_int_status_23", DPP_FIELD_FLAG_RO, 23, 1, 0x0, 0x0 }, + { "isu_int_status_22", DPP_FIELD_FLAG_RO, 22, 1, 0x0, 0x0 }, + { "isu_int_status_21", DPP_FIELD_FLAG_RO, 21, 1, 0x0, 0x0 }, + { "isu_int_status_20", DPP_FIELD_FLAG_RO, 20, 1, 0x0, 0x0 }, + { "isu_int_status_19", DPP_FIELD_FLAG_RO, 19, 1, 0x0, 0x0 }, + { "isu_int_status_18", DPP_FIELD_FLAG_RO, 18, 1, 0x0, 0x0 }, + { "isu_int_status_17", DPP_FIELD_FLAG_RO, 17, 1, 0x0, 0x0 }, + { "isu_int_status_16", DPP_FIELD_FLAG_RO, 16, 1, 0x0, 0x0 }, + { "isu_int_status_15", DPP_FIELD_FLAG_RO, 15, 1, 0x0, 0x0 }, + { "isu_int_status_14", DPP_FIELD_FLAG_RO, 14, 1, 0x0, 0x0 }, + { "isu_int_status_13", DPP_FIELD_FLAG_RO, 13, 1, 0x0, 0x0 }, + { "isu_int_status_12", DPP_FIELD_FLAG_RO, 12, 1, 0x0, 0x0 }, + { "isu_int_status_11", DPP_FIELD_FLAG_RO, 11, 1, 0x0, 0x0 }, + { "isu_int_status_10", DPP_FIELD_FLAG_RO, 10, 1, 0x0, 0x0 }, + { "isu_int_status_9", DPP_FIELD_FLAG_RO, 9, 1, 0x0, 0x0 }, + { "isu_int_status_8", DPP_FIELD_FLAG_RO, 8, 1, 0x0, 0x0 }, + { "isu_int_status_7", DPP_FIELD_FLAG_RO, 7, 1, 0x0, 0x0 }, + { "isu_int_status_6", DPP_FIELD_FLAG_RO, 6, 1, 0x0, 0x0 }, + { "isu_int_status_5", DPP_FIELD_FLAG_RO, 5, 1, 0x0, 0x0 }, + { "isu_int_status_4", DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "isu_int_status_3", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "isu_int_status_2", DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "isu_int_status_1", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "isu_int_status_0", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_cfg_fd_prog_full_assert_cfg_reg[] = { + { "fd_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 10, 11, 0x3e8, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_cfg_fd_prog_full_negate_cfg_reg[] = { + { "fd_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 10, 11, 0x3e8, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_cfg_lp_prog_full_assert_cfg_reg[] = { + { "lp_prog_ept_assert_cfg", DPP_FIELD_FLAG_RW, 11, 12, 0x80, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_cfg_lp_prog_full_negate_cfg_reg[] = { + { "lp_prog_ept_negate_cfg", DPP_FIELD_FLAG_RW, 11, 12, 0x80, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_stat_debug_cnt_dat0_reg[] = { + { "debug_cnt_dat0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_stat_debug_cnt_dat1_reg[] = { + { "debug_cnt_dat1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_stat_debug_cnt_dat2_reg[] = { + { "debug_cnt_dat2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_stat_debug_cnt_dat3_reg[] = { + { "debug_cnt_dat3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_stat_debug_cnt_dat4_reg[] = { + { "debug_cnt_dat4", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_stat_debug_cnt_dat5_reg[] = { + { "debug_cnt_dat5", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_stat_debug_cnt_dat6_reg[] = { + { "debug_cnt_dat6", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_stat_debug_cnt_dat7_reg[] = { + { "debug_cnt_dat7", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_stat_debug_cnt_dat8_reg[] = { + { "debug_cnt_dat8", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_stat_debug_cnt_dat9_reg[] = { + { "debug_cnt_dat9", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_stat_debug_cnt_dat10_reg[] = { + { "debug_cnt_dat10", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_stat_debug_cnt_dat11_reg[] = { + { "debug_cnt_dat11", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_stat_debug_cnt_dat12_reg[] = { + { "debug_cnt_dat12", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_stat_debug_cnt_dat13_reg[] = { + { "debug_cnt_dat13", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_stat_debug_cnt_dat14_reg[] = { + { "debug_cnt_dat14", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_stat_debug_cnt_dat15_reg[] = { + { "debug_cnt_dat15", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_stat_debug_cnt_dat16_reg[] = { + { "debug_cnt_dat16", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_stat_debug_cnt_dat17_reg[] = { + { "debug_cnt_dat17", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_stat_debug_cnt_dat18_reg[] = { + { "debug_cnt_dat18", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_stat_debug_cnt_dat19_reg[] = { + { "debug_cnt_dat18", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_isu_stat_debug_cnt_cfg_reg[] = { + { "debug_cnt_ovf_mode", DPP_FIELD_FLAG_RW, 31, 1, 0x0, 0x0 }, + { "debug_cnt_rdclr_mode", DPP_FIELD_FLAG_RW, 30, 1, 0x0, 0x0 }, + { "user_cnt_value", DPP_FIELD_FLAG_RW, 29, 4, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_exsa_tdm_offset_reg[] = { + { "exsa_tdm_offset", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_ecc_bypass_readt_reg[] = { + { "ecc_bypass_read", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_odma_int_en_0_reg[] = { + { "odma_int_en_31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "odma_int_en_30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "odma_int_en_29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "odma_int_en_28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "odma_int_en_27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "odma_int_en_26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "odma_int_en_25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "odma_int_en_24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "odma_int_en_22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "odma_int_en_21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "odma_int_en_18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_odma_int_en_1_reg[] = { + { "odma_int_en_63", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "odma_int_en_62", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "odma_int_en_61", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "odma_int_en_59", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "odma_int_en_58", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "odma_int_en_57", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "odma_int_en_56", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "odma_int_en_55", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "odma_int_en_54", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "odma_int_en_53", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "odma_int_en_52", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "odma_int_en_51", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "odma_int_en_49", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "odma_int_en_47", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "odma_int_en_45", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "odma_int_en_39", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "odma_int_en_38", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "odma_int_en_37", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "odma_int_en_36", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "odma_int_en_35", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "odma_int_en_34", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "odma_int_en_33", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "odma_int_en_32", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_odma_int_en_2_reg[] = { + { "odma_int_en_91", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "odma_int_en_88", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "odma_int_en_85", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "odma_int_en_82", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "odma_int_en_79", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "odma_int_en_75", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "odma_int_en_74", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "odma_int_en_71", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "odma_int_en_65", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "odma_int_en_64", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_odma_int_en_3_reg[] = { + { "odma_int_en_115", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "odma_int_en_114", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "odma_int_en_112", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "odma_int_en_110", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "odma_int_en_109", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "odma_int_en_108", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "odma_int_en_107", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "odma_int_en_106", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "odma_int_en_102", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "odma_int_en_101", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "odma_int_en_100", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "odma_int_en_98", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "odma_int_en_96", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_odma_int_mask_0_reg[] = { + { "odma_int_mask_31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "odma_int_mask_30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "odma_int_mask_29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "odma_int_mask_28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "odma_int_mask_27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "odma_int_mask_26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "odma_int_mask_25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "odma_int_mask_24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "odma_int_mask_22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "odma_int_mask_21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "odma_int_mask_18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_odma_int_mask_1_reg[] = { + { "odma_int_mask_63", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "odma_int_mask_62", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "odma_int_mask_61", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "odma_int_mask_59", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "odma_int_mask_58", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "odma_int_mask_57", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "odma_int_mask_56", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "odma_int_mask_55", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "odma_int_mask_54", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "odma_int_mask_53", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "odma_int_mask_52", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "odma_int_mask_51", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "odma_int_mask_50", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "odma_int_mask_49", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "odma_int_mask_47", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "odma_int_mask_45", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "odma_int_mask_39", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "odma_int_mask_38", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "odma_int_mask_37", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "odma_int_mask_36", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "odma_int_mask_35", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "odma_int_mask_34", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "odma_int_mask_33", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "odma_int_mask_32", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_odma_int_mask_2_reg[] = { + { "odma_int_mask_91", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "odma_int_mask_88", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "odma_int_mask_85", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "odma_int_mask_82", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "odma_int_mask_79", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "odma_int_mask_75", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "odma_int_mask_74", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "odma_int_mask_71", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "odma_int_mask_65", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "odma_int_mask_64", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_odma_int_mask_3_reg[] = { + { "odma_int_mask_115", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "odma_int_mask_114", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "odma_int_mask_112", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "odma_int_mask_110", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "odma_int_mask_109", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "odma_int_mask_108", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "odma_int_mask_107", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "odma_int_mask_106", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "odma_int_mask_102", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "odma_int_mask_101", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "odma_int_mask_100", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "odma_int_mask_98", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "odma_int_mask_96", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_odma_int_status_0_reg[] = { + { "odma_int_status_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "odma_int_status_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "odma_int_status_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "odma_int_status_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "odma_int_status_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "odma_int_status_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "odma_int_status_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "odma_int_status_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "odma_int_status_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "odma_int_status_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "odma_int_status_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_odma_int_status_1_reg[] = { + { "odma_int_status_63", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "odma_int_status_62", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "odma_int_status_61", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "odma_int_status_59", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "odma_int_status_58", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "odma_int_status_57", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "odma_int_status_56", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "odma_int_status_55", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "odma_int_status_54", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "odma_int_status_53", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "odma_int_status_52", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "odma_int_status_51", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "odma_int_status_49", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "odma_int_status_47", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "odma_int_status_45", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "odma_int_status_39", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "odma_int_status_38", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "odma_int_status_37", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "odma_int_status_36", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "odma_int_status_35", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "odma_int_status_34", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "odma_int_status_33", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "odma_int_status_32", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_odma_int_status_2_reg[] = { + { "odma_int_status_91", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "odma_int_status_88", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "odma_int_status_85", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "odma_int_status_82", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "odma_int_status_79", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "odma_int_status_75", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "odma_int_status_74", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "odma_int_status_71", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "odma_int_status_65", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "odma_int_status_64", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_odma_int_status_3_reg[] = { + { "odma_int_status_117", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "odma_int_status_116", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "odma_int_status_115", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "odma_int_status_114", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "odma_int_status_112", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "odma_int_status_110", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "odma_int_status_109", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "odma_int_status_108", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "odma_int_status_107", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "odma_int_status_106", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "odma_int_status_102", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "odma_int_status_101", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "odma_int_status_100", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "odma_int_status_98", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "odma_int_status_96", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_sp_tdm_err_nor_cfg_reg[] = { + { "sp_tdm_err_nor_cfg", DPP_FIELD_FLAG_RW, 1, 2, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_etm_dis_ptr_prog_full_cfg_a_reg[] = { + { "etm_dis_ptr_prog_full_cfg_a", DPP_FIELD_FLAG_RW, 7, 8, 0x6e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_etm_dis_ptr_prog_full_cfg_n_reg[] = { + { "etm_dis_ptr_prog_full_cfg_n", DPP_FIELD_FLAG_RW, 7, 8, 0x6e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_ftm_dis_ptr_prog_full_cfg_a_reg[] = { + { "ftm_dis_ptr_prog_full_cfg_a", DPP_FIELD_FLAG_RW, 7, 8, 0x6e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_ftm_dis_ptr_prog_full_cfg_n_reg[] = { + { "ftm_dis_ptr_prog_full_cfg_n", DPP_FIELD_FLAG_RW, 7, 8, 0x6e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_tm_dis_fifo_prog_full_cfg_a_reg[] = { + { "tm_dis_fifo_prog_full_cfg_a", DPP_FIELD_FLAG_RW, 7, 8, 0x6e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_tm_dis_fifo_prog_full_cfg_n_reg[] = { + { "tm_dis_fifo_prog_full_cfg_n", DPP_FIELD_FLAG_RW, 7, 8, 0x6e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_err_prog_full_cfg_a_reg[] = { + { "err_prog_full_cfg_a", DPP_FIELD_FLAG_RW, 7, 8, 0x6e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_err_prog_full_cfg_n_reg[] = { + { "err_prog_full_cfg_n", DPP_FIELD_FLAG_RW, 7, 8, 0x6e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_tdmuc_prog_full_cfg_a_reg[] = { + { "tdmuc_prog_full_cfg_a", DPP_FIELD_FLAG_RW, 10, 11, 0x3e8, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_tdmuc_prog_full_cfg_n_reg[] = { + { "tdmuc_prog_full_cfg_n", DPP_FIELD_FLAG_RW, 10, 11, 0x3e8, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_tdmmc_groupid_prog_full_cfg_a_reg[] = { + { "tdmmc_groupid_prog_full_cfg_a", DPP_FIELD_FLAG_RW, 10, 11, 0x3e8, + 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_tdmmc_groupid_prog_full_cfg_n_reg[] = { + { "tdmmc_groupid_prog_full_cfg_n", DPP_FIELD_FLAG_RW, 10, 11, 0x3e8, + 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_tdmmc_no_bitmap_prog_full_cfg_a_reg[] = { + { "tdmmc_no_bitmap_prog_full_cfg_a", DPP_FIELD_FLAG_RW, 10, 11, 0x3e8, + 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_tdmmc_no_bitmap_prog_full_cfg_n_reg[] = { + { "tdmmc_no_bitmap_prog_full_cfg_n", DPP_FIELD_FLAG_RW, 10, 11, 0x3e8, + 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_tdmmc_prog_full_cfg_a_reg[] = { + { "tdmmc_prog_full_cfg_a", DPP_FIELD_FLAG_RW, 10, 11, 0x3e8, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_tdmmc_prog_full_cfg_n_reg[] = { + { "tdmmc_prog_full_cfg_n", DPP_FIELD_FLAG_RW, 10, 11, 0x3e8, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_desc_prog_full_cfg_a_reg[] = { + { "desc_prog_full_cfg_a", DPP_FIELD_FLAG_RW, 7, 8, 0x64, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_desc_prog_full_cfg_n_reg[] = { + { "desc_prog_full_cfg_n", DPP_FIELD_FLAG_RW, 7, 8, 0x6e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_dly_prog_full_cfg_a_reg[] = { + { "dly_prog_full_cfg_a", DPP_FIELD_FLAG_RW, 7, 8, 0x6e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_dly_prog_full_cfg_n_reg[] = { + { "dly_prog_full_cfg_n", DPP_FIELD_FLAG_RW, 7, 8, 0x6e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_rsp_prog_full_cfg_a_reg[] = { + { "rsp_prog_full_cfg_a", DPP_FIELD_FLAG_RW, 7, 8, 0x6e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_rsp_prog_full_cfg_n_reg[] = { + { "rsp_prog_full_cfg_n", DPP_FIELD_FLAG_RW, 7, 8, 0x6e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_nor_prog_full_cfg_a_reg[] = { + { "nor_prog_full_cfg_a", DPP_FIELD_FLAG_RW, 10, 11, 0x3e8, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_nor_prog_full_cfg_n_reg[] = { + { "nor_prog_full_cfg_n", DPP_FIELD_FLAG_RW, 10, 11, 0x3e8, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_etm_nor_prog_full_cfg_a_reg[] = { + { "etm_nor_prog_full_cfg_a", DPP_FIELD_FLAG_RW, 7, 8, 0x6e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_etm_nor_prog_full_cfg_n_reg[] = { + { "etm_nor_prog_full_cfg_n", DPP_FIELD_FLAG_RW, 7, 8, 0x6e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_ftm_nor_prog_full_cfg_a_reg[] = { + { "ftm_nor_prog_full_cfg_a", DPP_FIELD_FLAG_RW, 7, 8, 0x6e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_ftm_nor_prog_full_cfg_n_reg[] = { + { "ftm_nor_prog_full_cfg_n", DPP_FIELD_FLAG_RW, 7, 8, 0x6e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_etm_prog_full_cfg_a_reg[] = { + { "etm_prog_full_cfg_a", DPP_FIELD_FLAG_RW, 7, 8, 0x6e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_etm_prog_full_cfg_n_reg[] = { + { "etm_prog_full_cfg_n", DPP_FIELD_FLAG_RW, 7, 8, 0x6e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_ftm_prog_full_cfg_a_reg[] = { + { "ftm_prog_full_cfg_a", DPP_FIELD_FLAG_RW, 7, 8, 0x6e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_ftm_prog_full_cfg_n_reg[] = { + { "ftm_prog_full_cfg_n", DPP_FIELD_FLAG_RW, 7, 8, 0x6e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_etm_nrdcnt_prog_full_cfg_a_reg[] = { + { "etm_nrdcnt_prog_full_cfg_a", DPP_FIELD_FLAG_RW, 7, 8, 0x6e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_etm_nrdcnt_prog_full_cfg_n_reg[] = { + { "etm_nrdcnt_prog_full_cfg_n", DPP_FIELD_FLAG_RW, 7, 8, 0x6e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_ftm_nrdcnt_prog_full_cfg_a_reg[] = { + { "ftm_nrdcnt_prog_full_cfg_a", DPP_FIELD_FLAG_RW, 7, 8, 0x6e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_ftm_nrdcnt_prog_full_cfg_n_reg[] = { + { "ftm_nrdcnt_prog_full_cfg_n", DPP_FIELD_FLAG_RW, 7, 8, 0x6e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_pp_prog_full_cfg_a_reg[] = { + { "pp_prog_full_cfg_a", DPP_FIELD_FLAG_RW, 7, 8, 0x6e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_pp_prog_full_cfg_n_reg[] = { + { "pp_prog_full_cfg_n", DPP_FIELD_FLAG_RW, 7, 8, 0x6e, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_tm_weight_reg[] = { + { "tm_weight", DPP_FIELD_FLAG_RW, 6, 7, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_pp_weight_reg[] = { + { "pp_weight", DPP_FIELD_FLAG_RW, 6, 7, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_ifbcmd_prog_full_cfg_a_reg[] = { + { "ifbcmd_prog_full_cfg_a", DPP_FIELD_FLAG_RW, 6, 7, 0x32, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_ifbcmd_prog_full_cfg_n_reg[] = { + { "ifbcmd_prog_full_cfg_n", DPP_FIELD_FLAG_RW, 6, 7, 0x32, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_mccnt_prog_full_cfg_a_reg[] = { + { "mccnt_prog_full_cfg_a", DPP_FIELD_FLAG_RW, 5, 6, 0x10, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_mccnt_prog_full_cfg_n_reg[] = { + { "mccnt_prog_full_cfg_n", DPP_FIELD_FLAG_RW, 5, 6, 0x19, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_int_or_pon_reg[] = { + { "int_or_pon", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_quemng_cnt_in_err_cnt_reg[] = { + { "quemng_cnt_in_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_lif0_port_eop_cnt_reg[] = { + { "lif0_port_eop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_lif1_port_eop_cnt_reg[] = { + { "lif1_port_eop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_lifc_port0_eop_cnt_reg[] = { + { "lifc_port0_eop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_lifc_port1_eop_cnt_reg[] = { + { "lifc_port1_eop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_fptr_fifo_prog_ept_cfg_n_reg[] = { + { "fptr_fifo_prog_ept_cfg_n", DPP_FIELD_FLAG_RW, 14, 15, 0x40, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_isu_fifo_prog_full_cfg_a_reg[] = { + { "isu_fifo_prog_full_cfg_a", DPP_FIELD_FLAG_RW, 7, 8, 0x60, 0x0 }, +}; +DPP_FIELD_T g_nppu_odma_cfg_isu_fifo_prog_full_cfg_n_reg[] = { + { "isu_fifo_prog_full_cfg_n", DPP_FIELD_FLAG_RW, 7, 8, 0x60, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_ind_access_done_reg[] = { + { "ind_access_done", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_ind_access_command_reg[] = { + { "ind_rd_or_wr", DPP_FIELD_FLAG_RW, 21, 1, 0x0, 0x0 }, + { "ind_mem_mask", DPP_FIELD_FLAG_RW, 20, 4, 0x0, 0x0 }, + { "ind_mem_id", DPP_FIELD_FLAG_RW, 16, 4, 0x0, 0x0 }, + { "ind_mem_addr", DPP_FIELD_FLAG_RW, 12, 13, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_ind_dat0_reg[] = { + { "ind_dat0", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_ind_dat1_reg[] = { + { "ind_dat1", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_ind_dat2_reg[] = { + { "ind_dat2", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_ind_dat3_reg[] = { + { "ind_dat3", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_oam_tx_main_en_reg[] = { + { "oam_tx_main_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_tx_total_num_reg[] = { + { "tx_total_num", DPP_FIELD_FLAG_RW, 12, 13, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_oam_chk_main_en_reg[] = { + { "oam_chk_main_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_chk_total_num0_reg[] = { + { "chk_total_num0", DPP_FIELD_FLAG_RW, 12, 13, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_ma_chk_main_en_reg[] = { + { "oam_chk_main_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_chk_total_num1_reg[] = { + { "chk_total_num0", DPP_FIELD_FLAG_RW, 12, 13, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_tx_stat_en_reg[] = { + { "tx_stat_en", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_rec_stat_en_reg[] = { + { "rec_stat_en", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_stat_oam_rdy_mask_reg[] = { + { "stat_oam_rdy_mask", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_session_grading0_reg[] = { + { "session_grading0", DPP_FIELD_FLAG_RW, 2, 3, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_session_grading1_reg[] = { + { "session_grading1", DPP_FIELD_FLAG_RW, 2, 3, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_session_grading2_reg[] = { + { "session_grading2", DPP_FIELD_FLAG_RW, 2, 3, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_session_grading3_reg[] = { + { "session_grading3", DPP_FIELD_FLAG_RW, 2, 3, 0x7, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_bfd_chk_haddr_reg[] = { + { "bfd_chk_haddr", DPP_FIELD_FLAG_RW, 11, 12, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_ethccm_chk_haddr_reg[] = { + { "ethccm_chk_haddr", DPP_FIELD_FLAG_RW, 11, 12, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_tpbfd_chk_haddr_reg[] = { + { "tpbfd_chk_haddr", DPP_FIELD_FLAG_RW, 11, 12, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_tpoam_ccm_chk_haddr_reg[] = { + { "tpoam_ccm_chk_haddr", DPP_FIELD_FLAG_RW, 11, 12, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_bfd_tx_haddr_reg[] = { + { "bfd_tx_haddr", DPP_FIELD_FLAG_RW, 11, 12, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_ethccm_tx_haddr_reg[] = { + { "ethccm_tx_haddr", DPP_FIELD_FLAG_RW, 11, 12, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_tpbfd_tx_haddr_reg[] = { + { "tpbfd_tx_haddr", DPP_FIELD_FLAG_RW, 11, 12, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_tpoam_ccm_tx_haddr_reg[] = { + { "tpoam_ccm_tx_haddr", DPP_FIELD_FLAG_RW, 11, 12, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_ethccm_ma_chk_haddr_reg[] = { + { "ethccm_ma_chk_haddr", DPP_FIELD_FLAG_RW, 11, 12, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_tpccm_ma_chk_haddr_reg[] = { + { "tpccm_ma_chk_haddr", DPP_FIELD_FLAG_RW, 11, 12, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_groupnum_ram_clr_reg[] = { + { "groupnum_ram_clr", DPP_FIELD_FLAG_WO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_index_ram0_clr_reg[] = { + { "index_ram0_clr", DPP_FIELD_FLAG_WO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_index_ram1_clr_reg[] = { + { "index_ram1_clr", DPP_FIELD_FLAG_WO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_rmep_ram_clr_reg[] = { + { "rmep_ram_clr", DPP_FIELD_FLAG_WO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_ma_ram_clr_reg[] = { + { "ma_ram_clr", DPP_FIELD_FLAG_WO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_ram_init_done_reg[] = { + { "ram_init_done", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_rec_bfd_debug_en_reg[] = { + { "rec_bfd_debug_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_oam_session_int_reg[] = { + { "tpma_int", DPP_FIELD_FLAG_RO, 5, 1, 0x0, 0x0 }, + { "ethma_int", DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "bfd_int", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "ethoam_int", DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "tpbfd_int", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "tpoam_int", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_pon_int_reg[] = { + { "fifo_int", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "pon_protect_int", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_oam_int_clr_reg[] = { + { "oam_int_clr", DPP_FIELD_FLAG_WO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_type_int_clr0_reg[] = { + { "tpma_int_clr", DPP_FIELD_FLAG_WO, 5, 1, 0x0, 0x0 }, + { "ethma_int_clr", DPP_FIELD_FLAG_WO, 4, 1, 0x0, 0x0 }, + { "bfd_int_clr", DPP_FIELD_FLAG_WO, 3, 1, 0x0, 0x0 }, + { "ethoam_int_clr", DPP_FIELD_FLAG_WO, 2, 1, 0x0, 0x0 }, + { "tpbfd_int_clr", DPP_FIELD_FLAG_WO, 1, 1, 0x0, 0x0 }, + { "tpoam_int_clr", DPP_FIELD_FLAG_WO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_type_int_clr1_reg[] = { + { "fifo_int_clr", DPP_FIELD_FLAG_WO, 1, 1, 0x0, 0x0 }, + { "pon_protect_int_clr", DPP_FIELD_FLAG_WO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_interrupt_mask_reg[] = { + { "fifo_interrupt_mask", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "pon_protect_interruptmask", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "tpma_interrupt_mask", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "ethma_interrupt_mask", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "bfd_interrupt_mask", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "ethoam_interrupt_mask", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "tpbfd_interrupt_mask", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "tpoam_interrupt_mask", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_int0_index_reg[] = { + { "int0_index0", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_int1_index_reg[] = { + { "int1_index0", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_int0_index_region_reg[] = { + { "int0_index_region", DPP_FIELD_FLAG_RO, 3, 4, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_int1_index_region_reg[] = { + { "int1_index_region", DPP_FIELD_FLAG_RO, 3, 4, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_bdiinfo_fwft_fifo_th_reg[] = { + { "bdiinfo_fwft_fifo_th", DPP_FIELD_FLAG_RW, 6, 7, 0x30, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_recsec_fwft_fifo_th_reg[] = { + { "recsec_fwft_fifo_th", DPP_FIELD_FLAG_RW, 7, 8, 0x40, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_timing_chk_info0_fwft_fifo_th_reg[] = { + { "timing_chk_info0_fwft_fifo_th", DPP_FIELD_FLAG_RW, 6, 7, 0x30, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_recma_fwft_fifo_th_reg[] = { + { "recma_fwft_fifo_th", DPP_FIELD_FLAG_RW, 7, 8, 0x40, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_timing_chk_info1_fwft_fifo_th_reg[] = { + { "timing_chk_info1_fwft_fifo_th", DPP_FIELD_FLAG_RW, 6, 7, 0x30, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_oam_txinst_fifo_th_reg[] = { + { "oam_txinst_fifo_th", DPP_FIELD_FLAG_RW, 10, 11, 0x300, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_oam_rdinfo_fwft_fifo_th_reg[] = { + { "oam_rdinfo_fwft_fifo_th", DPP_FIELD_FLAG_RW, 9, 10, 0x180, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_lm_cnt_fwft_fifo_th_reg[] = { + { "lm_cnt_fwft_fifo_th", DPP_FIELD_FLAG_RW, 6, 7, 0x30, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_oam_pkt_fifo_th_reg[] = { + { "oam_pkt_fifo_th", DPP_FIELD_FLAG_RW, 9, 10, 0x180, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_reclm_stat_fifo_th_reg[] = { + { "reclm_stat_fifo_th", DPP_FIELD_FLAG_RW, 7, 8, 0x40, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_txlm_stat_fifo_th_reg[] = { + { "txlm_stat_fifo_th", DPP_FIELD_FLAG_RW, 5, 6, 0x18, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_oam_chk_fwft_fifo_th_reg[] = { + { "oam_chk_fwft_fifo_th", DPP_FIELD_FLAG_RW, 9, 10, 0x180, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_txoam_stat_fifo_th_reg[] = { + { "txoam_stat_fifo_th", DPP_FIELD_FLAG_RW, 6, 7, 0x30, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_recoam_stat_fifo_th_reg[] = { + { "recoam_stat_fifo_th", DPP_FIELD_FLAG_RW, 6, 7, 0x30, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_txpkt_data_fwft_fifo_th_reg[] = { + { "txpkt_data_fwft_fifo_th", DPP_FIELD_FLAG_RW, 8, 9, 0xc0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_tstpkt_fwft_fifo_th_reg[] = { + { "tstpkt_fwft_fifo_th", DPP_FIELD_FLAG_RW, 8, 9, 0x80, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_tst_txinst_fwft_fifo_th_reg[] = { + { "tst_txinst_fwft_fifo_th", DPP_FIELD_FLAG_RW, 4, 5, 0x8, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_tstrx_main_en_reg[] = { + { "tstrx_main_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_tsttx_cfg_para_tbl2_reg[] = { + { "ddr_self_test_tx_en", DPP_FIELD_FLAG_RW, 16, 1, 0x0, 0x0 }, + { "tm_self_test_tx_en", DPP_FIELD_FLAG_RW, 15, 1, 0x0, 0x0 }, + { "fast_aging_tx_en", DPP_FIELD_FLAG_RW, 14, 1, 0x0, 0x0 }, + { "timing_aging_tx_en", DPP_FIELD_FLAG_RW, 13, 1, 0x0, 0x0 }, + { "backgroud_flow_tx_en", DPP_FIELD_FLAG_RW, 12, 1, 0x0, 0x0 }, + { "tsttx_tx_en", DPP_FIELD_FLAG_RW, 11, 1, 0x0, 0x0 }, + { "tx_freq", DPP_FIELD_FLAG_RW, 10, 3, 0x1, 0x0 }, + { "tx_offset", DPP_FIELD_FLAG_RW, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_tsttx_cfg_para_tbl1_reg[] = { + { "tx_count", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_tsttx_cfg_para_tbl0_reg[] = { + { "fast_tx_mode_en", DPP_FIELD_FLAG_RW, 31, 1, 0x0, 0x0 }, + { "tsttx_tx_head_len", DPP_FIELD_FLAG_RW, 30, 15, 0x0, 0x0 }, + { "tsttx_tx_interval", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_tstrx_cfg_para_reg[] = { + { "tstrx_session_num", DPP_FIELD_FLAG_RW, 16, 16, 0x0, 0x0 }, + { "tstrx_session_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_fifo_status_int_en_0_reg[] = { + { "fifo_status_int_en_31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "fifo_status_int_en_30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "fifo_status_int_en_29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "fifo_status_int_en_28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "fifo_status_int_en_27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "fifo_status_int_en_26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "fifo_status_int_en_25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "fifo_status_int_en_24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "fifo_status_int_en_23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "fifo_status_int_en_22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "fifo_status_int_en_21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "fifo_status_int_en_20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "fifo_status_int_en_19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "fifo_status_int_en_18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "fifo_status_int_en_17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "fifo_status_int_en_16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "fifo_status_int_en_15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "fifo_status_int_en_14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "fifo_status_int_en_13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "fifo_status_int_en_12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "fifo_status_int_en_11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "fifo_status_int_en_10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "fifo_status_int_en_9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "fifo_status_int_en_8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "fifo_status_int_en_7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "fifo_status_int_en_6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "fifo_status_int_en_5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "fifo_status_int_en_4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "fifo_status_int_en_3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "fifo_status_int_en_2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "fifo_status_int_en_1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "fifo_status_int_en_0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_fifo_status_int_en_1_reg[] = { + { "fifo_status_int_en_41", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "fifo_status_int_en_40", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "fifo_status_int_en_39", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "fifo_status_int_en_38", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "fifo_status_int_en_37", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "fifo_status_int_en_36", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "fifo_status_int_en_35", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "fifo_status_int_en_34", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "fifo_status_int_en_33", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "fifo_status_int_en_32", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_fifo_status_int_mask_0_reg[] = { + { "fifo_status_int_mask_31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_fifo_status_int_mask_1_reg[] = { + { "fifo_status_int_mask_41", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_40", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_39", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_38", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_37", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_36", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_35", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_34", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_33", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "fifo_status_int_mask_32", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_fifo_status_int_status_reg[] = { + { "fifo_status_int_status", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_main_frequency_reg[] = { + { "main_frequency", DPP_FIELD_FLAG_RW, 9, 10, 0x258, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_oam_cfg_type_reg[] = { + { "oam_cfg_type", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_fst_swch_eth_head0_reg[] = { + { "fst_swch_eth_head", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_fst_swch_eth_head1_reg[] = { + { "fst_swch_eth_head1", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_fst_swch_eth_head2_reg[] = { + { "fst_swch_eth_head2", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_fst_swch_eth_head3_reg[] = { + { "fst_swch_eth_head3", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_oam_fs_txinst_fifo_th_reg[] = { + { "oam_fs_txinst_fifo_th", DPP_FIELD_FLAG_RW, 8, 9, 0x1f4, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_oam_ma_fs_txinst_fifo_th_reg[] = { + { "oam_ma_fs_txinst_fifo_th", DPP_FIELD_FLAG_RW, 8, 9, 0x1f4, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_pon_int_ram_clr_reg[] = { + { "pon_int_ram_clr", DPP_FIELD_FLAG_WO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_pon_p_int_index_reg[] = { + { "pon_p_int_index", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_pon_protect_pkt_fifo_th_reg[] = { + { "pon_protect_pkt_fifo_th", DPP_FIELD_FLAG_RW, 8, 9, 0x1f4, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_pon_laser_off_en_reg[] = { + { "pon_laser_off_en", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_pon_prtct_pkt_tx_en_reg[] = { + { "pon_prtct_pkt_tx_en", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_cfg_pon_master_reg[] = { + { "cfg_pon_master", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_level_mode_reg[] = { + { "level_mode", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_interrupt_en_reg[] = { + { "interrupt_en", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_pon_laser_on_en_reg[] = { + { "pon_laser_on_en", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_ti_pon_sd_reg[] = { + { "ti_pon_sd", DPP_FIELD_FLAG_RW, 7, 8, 0x2, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_ti_pon_los_reg[] = { + { "ti_pon_los", DPP_FIELD_FLAG_RW, 7, 8, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_ind_dat4_reg[] = { + { "ind_dat4", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_ind_dat5_reg[] = { + { "ind_dat5", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_ind_dat6_reg[] = { + { "ind_dat6", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_ind_dat7_reg[] = { + { "ind_dat7", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_ind_dat8_reg[] = { + { "ind_dat8", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_ind_dat9_reg[] = { + { "ind_dat9", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_ind_dat10_reg[] = { + { "ind_dat10", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_ind_dat11_reg[] = { + { "ind_dat11", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_ind_dat12_reg[] = { + { "ind_dat12", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_ind_dat13_reg[] = { + { "ind_dat13", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_ind_dat14_reg[] = { + { "ind_dat14", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_ind_dat15_reg[] = { + { "ind_dat15", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_oam_2544_pkt_fifo_th_reg[] = { + { "oam_2544_pkt_fifo_th", DPP_FIELD_FLAG_RW, 8, 9, 0xc0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_txinfo_ram_clr_reg[] = { + { "txinfo_ram_clr", DPP_FIELD_FLAG_WO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_txinfo_ram_init_done_reg[] = { + { "txinfo_ram_init_done", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_fifo_status_int_status40_reg[] = { + { "fifo_status_int_status40", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_fifo_status_int_status41_reg[] = { + { "fifo_status_int_status41", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_oam_2544_fun_en_reg[] = { + { "oam_2544_fun_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_oam_2544_stat_clr_reg[] = { + { "oam_2544_stat_clr", DPP_FIELD_FLAG_WO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_txdis_default_reg[] = { + { "txdis_default", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_txdis_default_en_reg[] = { + { "txdis_default_en", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_tpbfd_firstchk_th_reg[] = { + { "tpbfd_firstchk_th", DPP_FIELD_FLAG_RW, 18, 19, 0xc350, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_ethccm_firstchk_th_reg[] = { + { "ethccm_firstchk_th", DPP_FIELD_FLAG_RW, 18, 19, 0xc350, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_tpccm_firstchk_th_reg[] = { + { "tpccm_firstchk_th", DPP_FIELD_FLAG_RW, 18, 19, 0xc350, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_txstat_req_cnt_reg[] = { + { "txstat_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_chkstat_req_cnt_reg[] = { + { "chkstat_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_stat_oam_fc_cnt_reg[] = { + { "stat1_oam_fc_cnt", DPP_FIELD_FLAG_RC, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_bfdseq_req_cnt_reg[] = { + { "bfdseq_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_lmcnt_req_cnt_reg[] = { + { "lmcnt_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_stat_oam_lm_rsp_cnt_reg[] = { + { "stat2_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_stat_oam_lm_fc_cnt_reg[] = { + { "stat2_oam_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_se_req_cnt_reg[] = { + { "se_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_se_rsp_cnt_reg[] = { + { "se_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_se_oam_fc_cnt_reg[] = { + { "se_oam_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_oam_se_fc_cnt_reg[] = { + { "oam_se_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_oam_pktrx_sop_cnt_reg[] = { + { "oam_pktrx_sop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_oam_pktrx_eop_cnt_reg[] = { + { "oam_pktrx_eop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_pktrx_oam_fc_cnt_reg[] = { + { "pktrx_oam_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_pktrx_oam_tst_fc_cnt_reg[] = { + { "pktrx_oam_tst_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_odma_oam_sop_cnt_reg[] = { + { "odma_oam_sop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_odma_oam_eop_cnt_reg[] = { + { "odma_oam_eop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_oam_odma_fc_cnt_reg[] = { + { "oam_odma_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_rec_ma_pkt_illegal_cnt_reg[] = { + { "rec_ma_pkt_illegal_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_rec_rmep_pkt_illegal_cnt_reg[] = { + { "rec_rmep_pkt_illegal_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_rec_eth_ais_pkt_cnt_reg[] = { + { "rec_eth_ais_pkt_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_rec_tp_ais_pkt_cnt_reg[] = { + { "rec_tp_ais_pkt_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_rec_tp_csf_pkt_cnt_reg[] = { + { "rec_tp_csf_pkt_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_rec_eth_level_defect_cnt_reg[] = { + { "rec_eth_level_defect_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_rec_eth_megid_defect_cnt_reg[] = { + { "rec_eth_megid_defect_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_rec_eth_mepid_defect_cnt_reg[] = { + { "rec_eth_mepid_defect_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_rec_eth_interval_defect_cnt_reg[] = { + { "rec_eth_interval_defect_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_rec_sess_unenable_cnt_reg[] = { + { "rec_sess_unenable_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_oam_2544_rd_pkt_cnt_reg[] = { + { "oam_2544_rd_pkt_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_debug_cnt_clr_reg[] = { + { "debug_cnt_clr", DPP_FIELD_FLAG_WO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_oam_pktrx_catch_data_reg[] = { + { "oam_pktrx_catch_data", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_odma_oam_catch_data_reg[] = { + { "odma_oam_catch_data", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_tst_session_tx_cnt_reg[] = { + { "tst_session_tx_cnt", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_tst_session_rx_cnt_reg[] = { + { "tst_session_rx_cnt", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_tstrx_lost_cnt_reg[] = { + { "tstrx_lost_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_bfdseq_wr_cnt_reg[] = { + { "bfdseq_wr_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_bfdtime_wr_cnt_reg[] = { + { "bfdtime_wr_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_lmcnt_wr_cnt_reg[] = { + { "lmcnt_wr_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_oam_fs_pkt_cnt_reg[] = { + { "oam_fs_pkt_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_oam_ma_fs_pkt_cnt_reg[] = { + { "lmcnt_wr_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_rec_tp_level_defect_cnt_reg[] = { + { "rec_tp_level_defect_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_rec_tp_megid_defect_cnt_reg[] = { + { "rec_tp_megid_defect_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_rec_tp_mepid_defect_cnt_reg[] = { + { "rec_tp_mepid_defect_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_rec_tp_interval_defect_cnt_reg[] = { + { "rec_tp_interval_defect_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_rd_reg_clear_mode_reg[] = { + { "rd_clear_mode_cfg", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_stat_rd_data_reg_clear_mode_reg[] = { + { "rd_data_reg_clear_mode_cfg", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_indir_oam_int_status_ram_0_reg[] = { + { "bfd_diag_value_bit4", DPP_FIELD_FLAG_RO, 6, 1, 0x0, 0x0 }, + { "bfd_diag_value_bit3", DPP_FIELD_FLAG_RO, 5, 1, 0x0, 0x0 }, + { "bfd_diag_value_bit2", DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "bfd_diag_value_bit1", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "bfd_diag_value_bit0", DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "dloc_int", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "drdi_int", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_indir_oam_int_status_ram1_reg[] = { + { "sticky_error_level_defect", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "sticky_error_megid_defect", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "sticky_error_mepid_defect", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "sticky_error_inter_defect", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "sticky_ais_defect", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "sticky_csf_defect", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "current_error_level_defect", DPP_FIELD_FLAG_RO, 5, 1, 0x0, 0x0 }, + { "current_error_megid_defect", DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "current_error_mepid_defect", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "current_error_inter_defect", DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "current_ais_defect", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "current_csf_defect", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_indir_tst_pkt_tx_para_ram_reg[] = { + { "ddr_self_test_tx_en", DPP_FIELD_FLAG_RW, 80, 1, 0x0, 0x0 }, + { "tm_self_test_tx_en", DPP_FIELD_FLAG_RW, 79, 1, 0x0, 0x0 }, + { "fast_aging_tx_en", DPP_FIELD_FLAG_RW, 78, 1, 0x0, 0x0 }, + { "timing_aging_tx_en", DPP_FIELD_FLAG_RW, 77, 1, 0x0, 0x0 }, + { "backgroud_flow_tx_en", DPP_FIELD_FLAG_RW, 76, 1, 0x0, 0x0 }, + { "tsttx_session_en", DPP_FIELD_FLAG_RW, 75, 1, 0x0, 0x0 }, + { "tx_freq", DPP_FIELD_FLAG_RW, 74, 3, 0x0, 0x0 }, + { "tx_offset", DPP_FIELD_FLAG_RW, 71, 8, 0x0, 0x0 }, + { "tx_count", DPP_FIELD_FLAG_RW, 63, 32, 0x0, 0x0 }, + { "fast_tx_mode_en", DPP_FIELD_FLAG_RW, 31, 1, 0x0, 0x0 }, + { "tsttx_pkthead_len", DPP_FIELD_FLAG_RW, 30, 15, 0x0, 0x0 }, + { "tsttx_interval", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_indir_groupnumram_reg[] = { + { "mep_down_num", DPP_FIELD_FLAG_RW, 8, 9, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_indir_oam_tx_tbl_ram_reg[] = { + { "oam_tx_en", DPP_FIELD_FLAG_RW, 62, 1, 0x0, 0x0 }, + { "oam_tx_type", DPP_FIELD_FLAG_RW, 61, 4, 0x0, 0x0 }, + { "oam_fetch_len", DPP_FIELD_FLAG_RW, 57, 14, 0x0, 0x0 }, + { "bfd_seq_tx_en", DPP_FIELD_FLAG_RW, 43, 1, 0x0, 0x0 }, + { "tx_para", DPP_FIELD_FLAG_RW, 42, 7, 0x0, 0x0 }, + { "oam_tx_interval", DPP_FIELD_FLAG_RW, 35, 16, 0x0, 0x0 }, + { "hd_ena_flag", DPP_FIELD_FLAG_RO, 19, 1, 0x0, 0x0 }, + { "last_tx_time", DPP_FIELD_FLAG_RO, 18, 19, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_indir_oam_chk_tbl_ram_reg[] = { + { "fast_switch_en", DPP_FIELD_FLAG_RW, 55, 1, 0x0, 0x0 }, + { "oam_chk_en", DPP_FIELD_FLAG_RW, 54, 1, 0x0, 0x0 }, + { "oam_chk_type", DPP_FIELD_FLAG_RW, 53, 4, 0x0, 0x0 }, + { "ccm_predel_flag", DPP_FIELD_FLAG_RW, 49, 1, 0x0, 0x0 }, + { "lm_chk_en", DPP_FIELD_FLAG_RW, 48, 1, 0x0, 0x0 }, + { "ccm_group_id", DPP_FIELD_FLAG_RW, 47, 12, 0x0, 0x0 }, + { "oam_chk_internal", DPP_FIELD_FLAG_RW, 35, 16, 0x0, 0x0 }, + { "fist_chk_flag", DPP_FIELD_FLAG_RO, 19, 1, 0x0, 0x0 }, + { "last_chk_time", DPP_FIELD_FLAG_RO, 18, 19, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_indir_oam_ma_chk_tbl_ram_reg[] = { + { "ma_fast_switch_en", DPP_FIELD_FLAG_RW, 141, 1, 0x0, 0x0 }, + { "ma_chk_en", DPP_FIELD_FLAG_RW, 140, 1, 0x0, 0x0 }, + { "ma_type", DPP_FIELD_FLAG_RW, 139, 1, 0x0, 0x0 }, + { "error_level_defect_en", DPP_FIELD_FLAG_RW, 138, 1, 0x0, 0x0 }, + { "error_megid_defect_en", DPP_FIELD_FLAG_RW, 137, 1, 0x0, 0x0 }, + { "error_mepid_defect_en", DPP_FIELD_FLAG_RW, 136, 1, 0x0, 0x0 }, + { "error_inter_defect_en", DPP_FIELD_FLAG_RW, 135, 1, 0x0, 0x0 }, + { "ais_defect_en", DPP_FIELD_FLAG_RW, 134, 1, 0x0, 0x0 }, + { "csf_defect_en", DPP_FIELD_FLAG_RW, 133, 1, 0x0, 0x0 }, + { "error_level_defect_ccm", DPP_FIELD_FLAG_RW, 132, 3, 0x0, 0x0 }, + { "error_megid_defect_ccm", DPP_FIELD_FLAG_RW, 129, 3, 0x0, 0x0 }, + { "error_mepid_defect_ccm", DPP_FIELD_FLAG_RW, 126, 3, 0x0, 0x0 }, + { "error_inter_defect_ccm", DPP_FIELD_FLAG_RW, 123, 3, 0x0, 0x0 }, + { "ais_defect_ccm", DPP_FIELD_FLAG_RW, 120, 3, 0x0, 0x0 }, + { "csf_defect_ccm", DPP_FIELD_FLAG_RW, 117, 3, 0x0, 0x0 }, + { "ma_predel_en", DPP_FIELD_FLAG_RW, 114, 1, 0x0, 0x0 }, + { "error_level_defect_ts", DPP_FIELD_FLAG_RO, 113, 19, 0x0, 0x0 }, + { "error_megid_defect_ts", DPP_FIELD_FLAG_RO, 94, 19, 0x0, 0x0 }, + { "error_mepid_defect_ts", DPP_FIELD_FLAG_RO, 75, 19, 0x0, 0x0 }, + { "error_inter_defect_ts", DPP_FIELD_FLAG_RO, 56, 19, 0x0, 0x0 }, + { "ais_defect_ts", DPP_FIELD_FLAG_RO, 37, 19, 0x0, 0x0 }, + { "csf_defect_ts", DPP_FIELD_FLAG_RO, 18, 19, 0x0, 0x0 }, +}; +DPP_FIELD_T g_nppu_oam_cfg_indir_oam_2544_tx_ram_reg[] = { + { "tx_en_2544", DPP_FIELD_FLAG_RW, 46, 1, 0x0, 0x0 }, + { "tx_cfg_times_2544", DPP_FIELD_FLAG_RW, 45, 16, 0x0, 0x0 }, + { "current_times", DPP_FIELD_FLAG_RW, 29, 16, 0x0, 0x0 }, + { "slice_num", DPP_FIELD_FLAG_RW, 13, 7, 0x0, 0x0 }, + { "pkt_mty", DPP_FIELD_FLAG_RW, 6, 7, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_interrupt_en_r_reg[] = { + { "interrupt_en_r", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_mec_host_interrupt_reg[] = { + { "mec_host_interrupt", DPP_FIELD_FLAG_RO, 5, 6, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_dbg_rtl_date_reg[] = { + { "dbg_rtl_date", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_dup_start_num_cfg_reg[] = { + { "dup_start_num_cfg", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_debug_data_write_complete_reg[] = { + { "debug_data_write_complete", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_uc_mc_wrr_cfg_reg[] = { + { "uc_mc_wrr_cfg", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_debug_pkt_send_en_reg[] = { + { "debug_pkt_send_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_dup_tbl_ind_access_done_reg[] = { + { "dup_tbl_ind_access_done", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_isu_ppu_demux_fifo_interrupt_mask_reg[] = { + { "isu_in_para_fwft_fifo_32x81_wrapper_u0_overflow_mask", + DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "isu_in_para_fwft_fifo_32x81_wrapper_u0_underflow_mask", + DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "isu_in_fifo_64x81_wrapper_u0_overflow_mask", DPP_FIELD_FLAG_RW, 1, 1, + 0x1, 0x0 }, + { "isu_in_fifo_64x81_wrapper_u0_underflow_mask", DPP_FIELD_FLAG_RW, 0, + 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_multicast_fifo_interrupt_mask_reg[] = { + { "ppu_pktrx_mc_ptr_fifo_16384x17_wrapper_u0_underflow_mask", + DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "ppu_pktrx_mc_ptr_fifo_16384x17_wrapper_u0_overflow_mask", + DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "pf_req_fwft_fifo_16x36_wrapper_u0_overflow_mask", DPP_FIELD_FLAG_RW, + 17, 1, 0x1, 0x0 }, + { "pf_req_fwft_fifo_16x36_wrapper_u0_underflow_mask", DPP_FIELD_FLAG_RW, + 16, 1, 0x1, 0x0 }, + { "pf_rsp_fwft_fifo_32x34_wrapper_u0_overflow_mask", DPP_FIELD_FLAG_RW, + 15, 1, 0x1, 0x0 }, + { "pf_rsp_fwft_fifo_32x34_wrapper_u0_underflow_mask", DPP_FIELD_FLAG_RW, + 14, 1, 0x1, 0x0 }, + { "dup_para_fwft_fifo_16x35_wrapper_u0_overflow_mask", + DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "dup_para_fwft_fifo_16x35_wrapper_u0_underflow_mask", + DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "se_mc_rsp_fwft_fifo_32x17_wrapper_u0_overflow_mask", + DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "se_mc_rsp_fwft_fifo_32x17_wrapper_u0_underflow_mask", + DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "sa_para_fwft_fifo_64x17_wrapper_u0_overflow_mask", DPP_FIELD_FLAG_RW, + 9, 1, 0x1, 0x0 }, + { "sa_para_fwft_fifo_64x17_wrapper_u0_underflow_mask", + DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "group_id_fifo_64x16_wrapper_u0_overflow_mask", DPP_FIELD_FLAG_RW, 7, + 1, 0x1, 0x0 }, + { "group_id_fifo_64x16_wrapper_u0_underflow_mask", DPP_FIELD_FLAG_RW, 6, + 1, 0x1, 0x0 }, + { "isu_mc_para_fwft_fifo_128x34_wrapper_u0_overflow_mask", + DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "isu_mc_para_fwft_fifo_128x34_wrapper_u0_underflow_mask", + DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "dup_freeptr_fwft_fifo_128x7_wrapper_u0_overflow_mask", + DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "dup_freeptr_fwft_fifo_128x7_wrapper_u0_underflow_mask", + DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "car_flag_fifo_32x1_wrapper_overflow_mask", DPP_FIELD_FLAG_RW, 1, 1, + 0x1, 0x0 }, + { "car_flag_fifo_32x1_wrapper_underflow_mask", DPP_FIELD_FLAG_RW, 0, 1, + 0x1, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_in_schedule_fifo_interrupt_mask_reg[] = { + { "free_global_num_fwft_fifo_8192x13_wrapper_u0_overflow_mask", + DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "free_global_num_fwft_fifo_8192x13_wrapper_u0_underflow_mask", + DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "mc_mf_fifo_16x2048_wrapper_u0_overflow_mask", DPP_FIELD_FLAG_RW, 3, + 1, 0x1, 0x0 }, + { "mc_mf_fifo_16x2048_wrapper_u0_underflow_mask", DPP_FIELD_FLAG_RW, 2, + 1, 0x1, 0x0 }, + { "uc_mf_fifo_96x2048_wrapper_u0_overflow_mask", DPP_FIELD_FLAG_RW, 1, + 1, 0x1, 0x0 }, + { "uc_mf_fifo_96x2048_wrapper_u0_underflow_mask", DPP_FIELD_FLAG_RW, 0, + 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_mf_out_fifo_interrupt_mask_reg[] = { + { "ppu_cluster5_mf_out_afifo_32x2048_wrapper_overflow_mask", + DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "ppu_cluster5_mf_out_afifo_32x2048_wrapper_underflow_mask", + DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "ppu_cluster4_mf_out_afifo_32x2048_wrapper_overflow_mask", + DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "ppu_cluster4_mf_out_afifo_32x2048_wrapper_underflow_mask", + DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "ppu_cluster3_mf_out_afifo_32x2048_wrapper_overflow_mask", + DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "ppu_cluster3_mf_out_afifo_32x2048_wrapper_underflow_mask", + DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "ppu_cluster2_mf_out_afifo_32x2048_wrapper_overflow_mask", + DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "ppu_cluster2_mf_out_afifo_32x2048_wrapper_underflow_mask", + DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "ppu_cluster1_mf_out_afifo_32x2048_wrapper_overflow_mask", + DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "ppu_cluster1_mf_out_afifo_32x2048_wrapper_underflow_mask", + DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "ppu_cluster0_mf_out_afifo_32x2048_wrapper_overflow_mask", + DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "ppu_cluster0_mf_out_afifo_32x2048_wrapper_underflow_mask", + DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pbu_mcode_pf_req_schedule_fifo_interrupt_mask_reg[] = { + { "ppu_cluster5_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_mask", + DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "ppu_cluster4_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_mask", + DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "ppu_cluster3_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_mask", + DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "ppu_cluster2_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_mask", + DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "ppu_cluster1_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_mask", + DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "ppu_cluster0_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_mask", + DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "ppu_cluster5_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_mask", + DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "ppu_cluster4_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_mask", + DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "ppu_cluster3_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_mask", + DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "ppu_cluster2_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_mask", + DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "ppu_cluster1_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_mask", + DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "ppu_cluster0_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_mask", + DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pbu_mcode_pf_rsp_schedule_fifo_interrupt_mask_reg[] = { + { "ppu_pbu_mcode_pf_rsp_afifo_64x16_wrapper_u0r_underflow_mask", + DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "ppu_pbu_mcode_pf_rsp_afifo_64x16_wrapper_u0_overflow_mask", + DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_mccnt_fifo_interrupt_mask_reg[] = { + { "ppu_mccnt_fifo_32x15_wrapper_u0_overflow_mask", DPP_FIELD_FLAG_RW, 5, + 1, 0x1, 0x0 }, + { "ppu_mccnt_fifo_32x15_wrapper_u0_underflow_mask", DPP_FIELD_FLAG_RW, + 4, 1, 0x1, 0x0 }, + { "ppu_wb_data_fifo_32x2048_wrapper_u0_overflow_mask", + DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "ppu_wb_data_fifo_32x2048_wrapper_u0_underflow_mask", + DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "mccnt_rsp_fifo_32x1_wrapper_u0_overflow_mask", DPP_FIELD_FLAG_RW, 1, + 1, 0x1, 0x0 }, + { "mccnt_rsp_fifo_32x1_wrapper_u0_underflow_mask", DPP_FIELD_FLAG_RW, 0, + 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_coprocessor_fifo_interrupt_mask_l_reg[] = { + { "mec3_cop_key_crc_fifo_32x625_wrapper_overflow_flg_mask", + DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "mec3_cop_key_crc_fifo_32x625_wrapper_underflow_flg_mask", + DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "mec3_cop_key_checksum_fifo_32x180_wrapper_overflow_flg_mask", + DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "mec3_cop_key_checksum_fifo_32x180_wrapper_underflow_flg_mask", + DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "mec3_cop_key_mul_fifo_32x52_wrapper_overflow_flg_mask", + DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "mec3_cop_key_mul_fifo_32x52_wrapper_underflow_flg_mask", + DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "mec3_cop_key_random_mod_fifo_32x44_wrapper_overflow_flg_mask", + DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "mec3_cop_key_random_mod_fifo_32x44_wrapper_underflow_flg_mask", + DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "mec2_cop_key_crc_fifo_32x625_wrapper_overflow_flg_mask", + DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "mec2_cop_key_crc_fifo_32x625_wrapper_underflow_flg_mask", + DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "mec2_cop_key_checksum_fifo_32x180_wrapper_overflow_flg_mask", + DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "mec2_cop_key_checksum_fifo_32x180_wrapper_underflow_flg_mask", + DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "mec2_cop_key_mul_fifo_32x52_wrapper_overflow_flg_mask", + DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "mec2_cop_key_mul_fifo_32x52_wrapper_underflow_flg_mask", + DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "mec2_cop_key_random_mod_fifo_32x44_wrapper_overflow_flg_mask", + DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "mec2_cop_key_random_mod_fifo_32x44_wrapper_underflow_flg_mask", + DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "mec1_cop_key_crc_fifo_32x625_wrapper_overflow_flg_mask", + DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "mec1_cop_key_crc_fifo_32x625_wrapper_underflow_flg_mask", + DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "mec1_cop_key_checksum_fifo_32x180_wrapper_overflow_flg_mask", + DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "mec1_cop_key_checksum_fifo_32x180_wrapper_underflow_flg_mask", + DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "mec1_cop_key_mul_fifo_32x52_wrapper_overflow_flg_mask", + DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "mec1_cop_key_mul_fifo_32x52_wrapper_underflow_flg_mask", + DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "mec1_cop_key_random_mod_fifo_32x44_wrapper_overflow_flg_mask", + DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "mec1_cop_key_random_mod_fifo_32x44_wrapper_underflow_flg_mask", + DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "mec0_cop_key_crc_fifo_32x625_wrapper_overflow_flg_mask", + DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "mec0_cop_key_crc_fifo_32x625_wrapper_underflow_flg_mask", + DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "mec0_cop_key_checksum_fifo_32x180_wrapper_overflow_flg_mask", + DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "mec0_cop_key_checksum_fifo_32x180_wrapper_underflow_flg_mask", + DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "mec0_cop_key_mul_fifo_32x52_wrapper_overflow_flg_mask", + DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "mec0_cop_key_mul_fifo_32x52_wrapper_underflow_flg_mask", + DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "mec0_cop_key_random_mod_fifo_32x44_wrapper_overflow_flg_mask", + DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "mec0_cop_key_random_mod_fifo_32x44_wrapper_underflow_flg_mask", + DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_coprocessor_fifo_interrupt_mask_m_reg[] = { + { "ppu_cop_result_fwft_fifo_80x80_wrapper_overflow_mask", + DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "ppu_cop_result_fwft_fifo_80x80_wrapper_underflow_mask", + DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "ppu_cop_delay_fifo_48x16_wrapper_overflow_mask", DPP_FIELD_FLAG_RW, + 29, 1, 0x1, 0x0 }, + { "ppu_cop_delay_fifo_48x16_wrapper_underflow_mask", DPP_FIELD_FLAG_RW, + 28, 1, 0x1, 0x0 }, + { "ppu_cop_delay_fifo_16x48_wrapper_overflow_mask", DPP_FIELD_FLAG_RW, + 27, 1, 0x1, 0x0 }, + { "ppu_cop_delay_fifo_16x48_wrapper_underflow_mask", DPP_FIELD_FLAG_RW, + 26, 1, 0x1, 0x0 }, + { "ppu_cop_delay_fifo_16x32_wrapper_overflow_mask", DPP_FIELD_FLAG_RW, + 25, 1, 0x1, 0x0 }, + { "ppu_cop_delay_fifo_16x32_wrapper_underflow_mask", DPP_FIELD_FLAG_RW, + 24, 1, 0x1, 0x0 }, + { "ppu_cop_result_fwft_fifo_96x80_wrapper_overflow_mask", + DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "ppu_cop_result_fwft_fifo_96x80_wrapper_underflow_mask", + DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "ppu_cop_delay_fifo_16x16_wrapper_overflow_mask", DPP_FIELD_FLAG_RW, + 21, 1, 0x1, 0x0 }, + { "ppu_cop_delay_fifo_16x16_wrapper_underflow_mask", DPP_FIELD_FLAG_RW, + 20, 1, 0x1, 0x0 }, + { "ppu_cop_result_fwft_fifo_32x80_wrapper_overflow_mask", + DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "ppu_cop_result_fwft_fifo_32x80_wrapper_underflow_mask", + DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "ppu_cop_result_fwft_fifo_16x80_wrapper_overflow_mask", + DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "ppu_cop_result_fwft_fifo_16x80_wrapper_underflow_mask", + DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "mec5_cop_key_crc_fifo_32x625_wrapper_overflow_flg_mask", + DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "mec5_cop_key_crc_fifo_32x625_wrapper_underflow_flg_mask", + DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "mec5_cop_key_checksum_fifo_32x180_wrapper_overflow_flg_mask", + DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "mec5_cop_key_checksum_fifo_32x180_wrapper_underflow_flg_mask", + DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "mec5_cop_key_mul_fifo_32x52_wrapper_overflow_flg_mask", + DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "mec5_cop_key_mul_fifo_32x52_wrapper_underflow_flg_mask", + DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "mec5_cop_key_random_mod_fifo_32x44_wrapper_overflow_flg_mask", + DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "mec5_cop_key_random_mod_fifo_32x44_wrapper_underflow_flg_mask", + DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "mec4_cop_key_crc_fifo_32x625_wrapper_overflow_flg_mask", + DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "mec4_cop_key_crc_fifo_32x625_wrapper_underflow_flg_mask", + DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "mec4_cop_key_checksum_fifo_32x180_wrapper_overflow_flg_mask", + DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "mec4_cop_key_checksum_fifo_32x180_wrapper_underflow_flg_mask", + DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "mec4_cop_key_mul_fifo_32x52_wrapper_overflow_flg_mask", + DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "mec4_cop_key_mul_fifo_32x52_wrapper_underflow_flg_mask", + DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "mec4_cop_key_random_mod_fifo_32x44_wrapper_overflow_flg_mask", + DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "mec4_cop_key_random_mod_fifo_32x44_wrapper_underflow_flg_mask", + DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_coprocessor_fifo_interrupt_mask_h_reg[] = { + { "coprocessor_fwft_fifo_16x80_wrapper_overflow_mask", + DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "coprocessor_fwft_fifo_16x80_wrapper_underflow_mask", + DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "ppu_cop_random_mod_para_delay_fifo_48x16_wrapper_overflow_mask", + DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "ppu_cop_random_mod_para_delay_fifo_48x16_wrapper_underflow_mask", + DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_ram_check_err_mask_reg[] = { + { "parity_err_mask", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_fifo_interrupt_mask_reg[] = { + { "instrmem2_wr_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "instrmem2_wr_fifo_udf_mask", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "instrmem2_rd_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "instrmem2_rd_fifo_udf_mask", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "instrmem1_wr_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "instrmem1_wr_fifo_udf_mask", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "instrmem1_rd_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "instrmem1_rd_fifo_udf_mask", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "instrmem0_wr_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "instrmem0_wr_fifo_udf_mask", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "instrmem0_rd_fifo_ovf_mask", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "instrmem0_rd_fifo_udf_mask", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_isu_ppu_demux_fifo_interrupt_sta_reg[] = { + { "isu_in_para_fwft_fifo_32x81_wrapper_u0_overflow_sta", + DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "isu_in_para_fwft_fifo_32x81_wrapper_u0_underflow_sta", + DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "isu_in_fifo_64x81_wrapper_u0_overflow_sta", DPP_FIELD_FLAG_RO, 1, 1, + 0x0, 0x0 }, + { "isu_in_fifo_64x81_wrapper_u0_underflow_sta", DPP_FIELD_FLAG_RO, 0, 1, + 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_multicast_fifo_interrupt_sta_reg[] = { + { "ppu_pktrx_mc_ptr_fifo_16384x17_wrapper_u0_overflow_sta", + DPP_FIELD_FLAG_RO, 19, 1, 0x0, 0x0 }, + { "ppu_pktrx_mc_ptr_fifo_16384x17_wrapper_u0_underflow_sta", + DPP_FIELD_FLAG_RO, 18, 1, 0x0, 0x0 }, + { "pf_req_fwft_fifo_16x36_wrapper_u0_overflow_sta", DPP_FIELD_FLAG_RO, + 17, 1, 0x0, 0x0 }, + { "pf_req_fwft_fifo_16x36_wrapper_u0_underflow_sta", DPP_FIELD_FLAG_RO, + 16, 1, 0x0, 0x0 }, + { "pf_rsp_fwft_fifo_32x34_wrapper_u0_overflow_sta", DPP_FIELD_FLAG_RO, + 15, 1, 0x0, 0x0 }, + { "pf_rsp_fwft_fifo_32x34_wrapper_u0_underflow_sta", DPP_FIELD_FLAG_RO, + 14, 1, 0x0, 0x0 }, + { "dup_para_fwft_fifo_16x35_wrapper_u0_overflow_sta", DPP_FIELD_FLAG_RO, + 13, 1, 0x0, 0x0 }, + { "dup_para_fwft_fifo_16x35_wrapper_u0_underflow_sta", + DPP_FIELD_FLAG_RO, 12, 1, 0x0, 0x0 }, + { "se_mc_rsp_fwft_fifo_32x17_wrapper_u0_overflow_sta", + DPP_FIELD_FLAG_RO, 11, 1, 0x0, 0x0 }, + { "se_mc_rsp_fwft_fifo_32x17_wrapper_u0_underflow_sta", + DPP_FIELD_FLAG_RO, 10, 1, 0x0, 0x0 }, + { "sa_para_fwft_fifo_64x17_wrapper_u0_overflow_sta", DPP_FIELD_FLAG_RO, + 9, 1, 0x0, 0x0 }, + { "sa_para_fwft_fifo_64x17_wrapper_u0_underflow_sta", DPP_FIELD_FLAG_RO, + 8, 1, 0x0, 0x0 }, + { "group_id_fifo_64x16_wrapper_u0_overflow_sta", DPP_FIELD_FLAG_RO, 7, + 1, 0x0, 0x0 }, + { "group_id_fifo_64x16_wrapper_u0_underflow_sta", DPP_FIELD_FLAG_RO, 6, + 1, 0x0, 0x0 }, + { "isu_mc_para_fwft_fifo_128x34_wrapper_u0_overflow_sta", + DPP_FIELD_FLAG_RO, 5, 1, 0x0, 0x0 }, + { "isu_mc_para_fwft_fifo_128x34_wrapper_u0_underflow_sta", + DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "dup_freeptr_fwft_fifo_128x7_wrapper_u0_overflow_sta", + DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "dup_freeptr_fwft_fifo_128x7_wrapper_u0_underflow_sta", + DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "car_flag_fifo_32x1_wrapper_overflow_sta", DPP_FIELD_FLAG_RO, 1, 1, + 0x0, 0x0 }, + { "car_flag_fifo_32x1_wrapper_underflow_sta", DPP_FIELD_FLAG_RO, 0, 1, + 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_in_schedule_fifo_interrupt_sta_reg[] = { + { "free_global_num_fwft_fifo_8192x13_wrapper_u0_overflow_sta", + DPP_FIELD_FLAG_RO, 5, 1, 0x0, 0x0 }, + { "free_global_num_fwft_fifo_8192x13_wrapper_u0_underflow_sta", + DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "mc_mf_fifo_16x2048_wrapper_u0_overflow_sta", DPP_FIELD_FLAG_RO, 3, 1, + 0x0, 0x0 }, + { "mc_mf_fifo_16x2048_wrapper_u0_underflow_sta", DPP_FIELD_FLAG_RO, 2, + 1, 0x0, 0x0 }, + { "uc_mf_fifo_96x2048_wrapper_u0_overflow_sta", DPP_FIELD_FLAG_RO, 1, 1, + 0x0, 0x0 }, + { "uc_mf_fifo_96x2048_wrapper_u0_underflow_sta", DPP_FIELD_FLAG_RO, 0, + 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_mf_out_fifo_interrupt_sta_reg[] = { + { "ppu_cluster5_mf_out_afifo_32x2048_wrapper_overflow_sta", + DPP_FIELD_FLAG_RO, 11, 1, 0x0, 0x0 }, + { "ppu_cluster5_mf_out_afifo_32x2048_wrapper_underflow_sta", + DPP_FIELD_FLAG_RO, 10, 1, 0x0, 0x0 }, + { "ppu_cluster4_mf_out_afifo_32x2048_wrapper_overflow_sta", + DPP_FIELD_FLAG_RO, 9, 1, 0x0, 0x0 }, + { "ppu_cluster4_mf_out_afifo_32x2048_wrapper_underflow_sta", + DPP_FIELD_FLAG_RO, 8, 1, 0x0, 0x0 }, + { "ppu_cluster3_mf_out_afifo_32x2048_wrapper_overflow_sta", + DPP_FIELD_FLAG_RO, 7, 1, 0x0, 0x0 }, + { "ppu_cluster3_mf_out_afifo_32x2048_wrapper_underflow_sta", + DPP_FIELD_FLAG_RO, 6, 1, 0x0, 0x0 }, + { "ppu_cluster2_mf_out_afifo_32x2048_wrapper_overflow_sta", + DPP_FIELD_FLAG_RO, 5, 1, 0x0, 0x0 }, + { "ppu_cluster2_mf_out_afifo_32x2048_wrapper_underflow_sta", + DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "ppu_cluster1_mf_out_afifo_32x2048_wrapper_overflow_sta", + DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "ppu_cluster1_mf_out_afifo_32x2048_wrapper_underflow_sta", + DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "ppu_cluster0_mf_out_afifo_32x2048_wrapper_overflow_sta", + DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "ppu_cluster0_mf_out_afifo_32x2048_wrapper_underflow_sta", + DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pbu_mcode_pf_req_schedule_fifo_interrupt_sta_reg[] = { + { "ppu_cluster5_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_sta", + DPP_FIELD_FLAG_RO, 11, 1, 0x0, 0x0 }, + { "ppu_cluster4_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_sta", + DPP_FIELD_FLAG_RO, 10, 1, 0x0, 0x0 }, + { "ppu_cluster3_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_sta", + DPP_FIELD_FLAG_RO, 9, 1, 0x0, 0x0 }, + { "ppu_cluster2_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_sta", + DPP_FIELD_FLAG_RO, 8, 1, 0x0, 0x0 }, + { "ppu_cluster1_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_sta", + DPP_FIELD_FLAG_RO, 7, 1, 0x0, 0x0 }, + { "ppu_cluster0_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_sta", + DPP_FIELD_FLAG_RO, 6, 1, 0x0, 0x0 }, + { "ppu_cluster5_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_sta", + DPP_FIELD_FLAG_RO, 5, 1, 0x0, 0x0 }, + { "ppu_cluster4_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_sta", + DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "ppu_cluster3_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_sta", + DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "ppu_cluster2_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_sta", + DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "ppu_cluster1_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_sta", + DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "ppu_cluster0_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_sta", + DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pbu_mcode_pf_rsp_schedule_fifo_interrupt_sta_reg[] = { + { "ppu_pbu_mcode_pf_rsp_afifo_64x16_wrapper_u0r_underflow_sta", + DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "ppu_pbu_mcode_pf_rsp_afifo_64x16_wrapper_u0_overflow_sta", + DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_mccnt_fifo_interrupt_sta_reg[] = { + { "ppu_mccnt_fifo_32x15_wrapper_u0_overflow_sta", DPP_FIELD_FLAG_RO, 5, + 1, 0x0, 0x0 }, + { "ppu_mccnt_fifo_32x15_wrapper_u0_underflow_sta", DPP_FIELD_FLAG_RO, 4, + 1, 0x0, 0x0 }, + { "ppu_wb_data_fifo_32x2048_wrapper_u0_overflow_sta", DPP_FIELD_FLAG_RO, + 3, 1, 0x0, 0x0 }, + { "ppu_wb_data_fifo_32x2048_wrapper_u0_underflow_sta", + DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "mccnt_rsp_fifo_32x1_wrapper_u0_overflow_sta", DPP_FIELD_FLAG_RO, 1, + 1, 0x0, 0x0 }, + { "mccnt_rsp_fifo_32x1_wrapper_u0_underflow_sta", DPP_FIELD_FLAG_RO, 0, + 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_coprocessor_fifo_interrupt_sta_l_reg[] = { + { "mec3_cop_key_crc_fifo_32x625_wrapper_overflow_flg_sta", + DPP_FIELD_FLAG_RO, 31, 1, 0x0, 0x0 }, + { "mec3_cop_key_crc_fifo_32x625_wrapper_underflow_flg_sta", + DPP_FIELD_FLAG_RO, 30, 1, 0x0, 0x0 }, + { "mec3_cop_key_checksum_fifo_32x180_wrapper_overflow_flg_sta", + DPP_FIELD_FLAG_RO, 29, 1, 0x0, 0x0 }, + { "mec3_cop_key_checksum_fifo_32x180_wrapper_underflow_flg_sta", + DPP_FIELD_FLAG_RO, 28, 1, 0x0, 0x0 }, + { "mec3_cop_key_mul_fifo_32x52_wrapper_overflow_flg_sta", + DPP_FIELD_FLAG_RO, 27, 1, 0x0, 0x0 }, + { "mec3_cop_key_mul_fifo_32x52_wrapper_underflow_flg_sta", + DPP_FIELD_FLAG_RO, 26, 1, 0x0, 0x0 }, + { "mec3_cop_key_random_mod_fifo_32x44_wrapper_overflow_flg_sta", + DPP_FIELD_FLAG_RO, 25, 1, 0x0, 0x0 }, + { "mec3_cop_key_random_mod_fifo_32x44_wrapper_underflow_flg_sta", + DPP_FIELD_FLAG_RO, 24, 1, 0x0, 0x0 }, + { "mec2_cop_key_crc_fifo_32x625_wrapper_overflow_flg_sta", + DPP_FIELD_FLAG_RO, 23, 1, 0x0, 0x0 }, + { "mec2_cop_key_crc_fifo_32x625_wrapper_underflow_flg_sta", + DPP_FIELD_FLAG_RO, 22, 1, 0x0, 0x0 }, + { "mec2_cop_key_checksum_fifo_32x180_wrapper_overflow_flg_sta", + DPP_FIELD_FLAG_RO, 21, 1, 0x0, 0x0 }, + { "mec2_cop_key_checksum_fifo_32x180_wrapper_underflow_flg_sta", + DPP_FIELD_FLAG_RO, 20, 1, 0x0, 0x0 }, + { "mec2_cop_key_mul_fifo_32x52_wrapper_overflow_flg_sta", + DPP_FIELD_FLAG_RO, 19, 1, 0x0, 0x0 }, + { "mec2_cop_key_mul_fifo_32x52_wrapper_underflow_flg_sta", + DPP_FIELD_FLAG_RO, 18, 1, 0x0, 0x0 }, + { "mec2_cop_key_random_mod_fifo_32x44_wrapper_overflow_flg_sta", + DPP_FIELD_FLAG_RO, 17, 1, 0x0, 0x0 }, + { "mec2_cop_key_random_mod_fifo_32x44_wrapper_underflow_flg_sta", + DPP_FIELD_FLAG_RO, 16, 1, 0x0, 0x0 }, + { "mec1_cop_key_crc_fifo_32x625_wrapper_overflow_flg_sta", + DPP_FIELD_FLAG_RO, 15, 1, 0x0, 0x0 }, + { "mec1_cop_key_crc_fifo_32x625_wrapper_underflow_flg_sta", + DPP_FIELD_FLAG_RO, 14, 1, 0x0, 0x0 }, + { "mec1_cop_key_checksum_fifo_32x180_wrapper_overflow_flg_sta", + DPP_FIELD_FLAG_RO, 13, 1, 0x0, 0x0 }, + { "mec1_cop_key_checksum_fifo_32x180_wrapper_underflow_flg_sta", + DPP_FIELD_FLAG_RO, 12, 1, 0x0, 0x0 }, + { "mec1_cop_key_mul_fifo_32x52_wrapper_overflow_flg_sta", + DPP_FIELD_FLAG_RO, 11, 1, 0x0, 0x0 }, + { "mec1_cop_key_mul_fifo_32x52_wrapper_underflow_flg_sta", + DPP_FIELD_FLAG_RO, 10, 1, 0x0, 0x0 }, + { "mec1_cop_key_random_mod_fifo_32x44_wrapper_overflow_flg_sta", + DPP_FIELD_FLAG_RO, 9, 1, 0x0, 0x0 }, + { "mec1_cop_key_random_mod_fifo_32x44_wrapper_underflow_flg_sta", + DPP_FIELD_FLAG_RO, 8, 1, 0x0, 0x0 }, + { "mec0_cop_key_crc_fifo_32x625_wrapper_overflow_flg_sta", + DPP_FIELD_FLAG_RO, 7, 1, 0x0, 0x0 }, + { "mec0_cop_key_crc_fifo_32x625_wrapper_underflow_flg_sta", + DPP_FIELD_FLAG_RO, 6, 1, 0x0, 0x0 }, + { "mec0_cop_key_checksum_fifo_32x180_wrapper_overflow_flg_sta", + DPP_FIELD_FLAG_RO, 5, 1, 0x0, 0x0 }, + { "mec0_cop_key_checksum_fifo_32x180_wrapper_underflow_flg_sta", + DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "mec0_cop_key_mul_fifo_32x52_wrapper_overflow_flg_sta", + DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "mec0_cop_key_mul_fifo_32x52_wrapper_underflow_flg_sta", + DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "mec0_cop_key_random_mod_fifo_32x44_wrapper_overflow_flg_sta", + DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "mec0_cop_key_random_mod_fifo_32x44_wrapper_underflow_flg_sta", + DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_coprocessor_fifo_interrupt_sta_m_reg[] = { + { "ppu_cop_result_fwft_fifo_80x80_wrapper_overflow_sta", + DPP_FIELD_FLAG_RO, 31, 1, 0x0, 0x0 }, + { "ppu_cop_result_fwft_fifo_80x80_wrapper_underflow_sta", + DPP_FIELD_FLAG_RO, 30, 1, 0x0, 0x0 }, + { "ppu_cop_delay_fifo_48x16_wrapper_overflow_sta", DPP_FIELD_FLAG_RO, + 29, 1, 0x0, 0x0 }, + { "ppu_cop_delay_fifo_48x16_wrapper_underflow_sta", DPP_FIELD_FLAG_RO, + 28, 1, 0x0, 0x0 }, + { "ppu_cop_delay_fifo_16x48_wrapper_overflow_sta", DPP_FIELD_FLAG_RO, + 27, 1, 0x0, 0x0 }, + { "ppu_cop_delay_fifo_16x48_wrapper_underflow_sta", DPP_FIELD_FLAG_RO, + 26, 1, 0x0, 0x0 }, + { "ppu_cop_delay_fifo_16x32_wrapper_overflow_sta", DPP_FIELD_FLAG_RO, + 25, 1, 0x0, 0x0 }, + { "ppu_cop_delay_fifo_16x32_wrapper_underflow_sta", DPP_FIELD_FLAG_RO, + 24, 1, 0x0, 0x0 }, + { "ppu_cop_result_fwft_fifo_96x80_wrapper_overflow_sta", + DPP_FIELD_FLAG_RO, 23, 1, 0x0, 0x0 }, + { "ppu_cop_result_fwft_fifo_96x80_wrapper_underflow_sta", + DPP_FIELD_FLAG_RO, 22, 1, 0x0, 0x0 }, + { "ppu_cop_delay_fifo_16x16_wrapper_overflow_sta", DPP_FIELD_FLAG_RO, + 21, 1, 0x0, 0x0 }, + { "ppu_cop_delay_fifo_16x16_wrapper_underflow_sta", DPP_FIELD_FLAG_RO, + 20, 1, 0x0, 0x0 }, + { "ppu_cop_result_fwft_fifo_32x80_wrapper_overflow_sta", + DPP_FIELD_FLAG_RO, 19, 1, 0x0, 0x0 }, + { "ppu_cop_result_fwft_fifo_32x80_wrapper_underflow_sta", + DPP_FIELD_FLAG_RO, 18, 1, 0x0, 0x0 }, + { "ppu_cop_result_fwft_fifo_16x80_wrapper_overflow_sta", + DPP_FIELD_FLAG_RO, 17, 1, 0x0, 0x0 }, + { "ppu_cop_result_fwft_fifo_16x80_wrapper_underflow_sta", + DPP_FIELD_FLAG_RO, 16, 1, 0x0, 0x0 }, + { "mec5_cop_key_crc_fifo_32x625_wrapper_overflow_flg_sta", + DPP_FIELD_FLAG_RO, 15, 1, 0x0, 0x0 }, + { "mec5_cop_key_crc_fifo_32x625_wrapper_underflow_flg_sta", + DPP_FIELD_FLAG_RO, 14, 1, 0x0, 0x0 }, + { "mec5_cop_key_checksum_fifo_32x180_wrapper_overflow_flg_sta", + DPP_FIELD_FLAG_RO, 13, 1, 0x0, 0x0 }, + { "mec5_cop_key_checksum_fifo_32x180_wrapper_underflow_flg_sta", + DPP_FIELD_FLAG_RO, 12, 1, 0x0, 0x0 }, + { "mec5_cop_key_mul_fifo_32x52_wrapper_overflow_flg_sta", + DPP_FIELD_FLAG_RO, 11, 1, 0x0, 0x0 }, + { "mec5_cop_key_mul_fifo_32x52_wrapper_underflow_flg_sta", + DPP_FIELD_FLAG_RO, 10, 1, 0x0, 0x0 }, + { "mec5_cop_key_random_mod_fifo_32x44_wrapper_overflow_flg_sta", + DPP_FIELD_FLAG_RO, 9, 1, 0x0, 0x0 }, + { "mec5_cop_key_random_mod_fifo_32x44_wrapper_underflow_flg_sta", + DPP_FIELD_FLAG_RO, 8, 1, 0x0, 0x0 }, + { "mec4_cop_key_crc_fifo_32x625_wrapper_overflow_flg_sta", + DPP_FIELD_FLAG_RO, 7, 1, 0x0, 0x0 }, + { "mec4_cop_key_crc_fifo_32x625_wrapper_underflow_flg_sta", + DPP_FIELD_FLAG_RO, 6, 1, 0x0, 0x0 }, + { "mec4_cop_key_checksum_fifo_32x180_wrapper_overflow_flg_sta", + DPP_FIELD_FLAG_RO, 5, 1, 0x0, 0x0 }, + { "mec4_cop_key_checksum_fifo_32x180_wrapper_underflow_flg_sta", + DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "mec4_cop_key_mul_fifo_32x52_wrapper_overflow_flg_sta", + DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "mec4_cop_key_mul_fifo_32x52_wrapper_underflow_flg_sta", + DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "mec4_cop_key_random_mod_fifo_32x44_wrapper_overflow_flg_sta", + DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "mec4_cop_key_random_mod_fifo_32x44_wrapper_underflow_flg_sta", + DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_coprocessor_fifo_interrupt_sta_h_reg[] = { + { "ppu_cop_random_mod_para_delay_fifo_48x16_wrapper_overflow_sta", + DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "ppu_cop_random_mod_para_delay_fifo_48x16_wrapper_underflow_sta", + DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_fifo_interrupt_sta_reg[] = { + { "instrmem1_wr_fifo_ovf_sta", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "instrmem1_wr_fifo_udf_sta", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "instrmem1_rd_fifo_ovf_sta", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "instrmem1_rd_fifo_udf_sta", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "instrmem0_wr_fifo_ovf_sta", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "instrmem0_wr_fifo_udf_sta", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "instrmem0_rd_fifo_ovf_sta", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "instrmem0_rd_fifo_udf_sta", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_ram_check_ecc_err_flag_1_reg[] = { + { "ecc_single_err_sa_para_fifo_int_flag", DPP_FIELD_FLAG_RC, 25, 1, 0x0, + 0x0 }, + { "ecc_double_err_sa_para_fifo_int_flag", DPP_FIELD_FLAG_RC, 24, 1, 0x0, + 0x0 }, + { "ecc_single_err_dup_para_fifo_int_flag", DPP_FIELD_FLAG_RC, 23, 1, + 0x0, 0x0 }, + { "ecc_double_err_dup_para_fifo_int_flag", DPP_FIELD_FLAG_RC, 22, 1, + 0x0, 0x0 }, + { "ecc_single_err_pf_rsp_fifo_int_flag", DPP_FIELD_FLAG_RC, 21, 1, 0x0, + 0x0 }, + { "ecc_double_err_pf_rsp_fifo_int_flag", DPP_FIELD_FLAG_RC, 20, 1, 0x0, + 0x0 }, + { "ecc_single_err_pf_req_fifo_int_flag", DPP_FIELD_FLAG_RC, 19, 1, 0x0, + 0x0 }, + { "ecc_double_err_pf_req_fifo_int_flag", DPP_FIELD_FLAG_RC, 18, 1, 0x0, + 0x0 }, + { "ecc_single_err_ppu_reorder_link_ram0_int_flag", DPP_FIELD_FLAG_RC, + 17, 1, 0x0, 0x0 }, + { "ecc_double_err_ppu_reorder_link_ram0_int_flag", DPP_FIELD_FLAG_RC, + 16, 1, 0x0, 0x0 }, + { "ecc_single_err_ppu_reorder_link_ram1_int_flag", DPP_FIELD_FLAG_RC, + 15, 1, 0x0, 0x0 }, + { "ecc_double_err_ppu_reorder_link_ram1_int_flag", DPP_FIELD_FLAG_RC, + 14, 1, 0x0, 0x0 }, + { "ecc_single_err_ppu_reorder_link_flag_array_ram0_int_flag", + DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ecc_single_err_ppu_reorder_link_flag_array_ram1_int_flag", + DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ecc_single_err_ppu_reorder_ifb_ram_int_flag", DPP_FIELD_FLAG_RC, 11, + 1, 0x0, 0x0 }, + { "ecc_double_err_ppu_reorder_ifb_ram_int_flag", DPP_FIELD_FLAG_RC, 10, + 1, 0x0, 0x0 }, + { "ecc_single_err_ppu_reorder_flag_array_ram0_int_flag", + DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ecc_single_err_ppu_reorder_flag_array_ram1_int_flag", + DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ecc_single_err_ppu_reorder_flag_ram0_int_flag", DPP_FIELD_FLAG_RC, 7, + 1, 0x0, 0x0 }, + { "ecc_single_err_ppu_reorder_flag_ram1_int_flag", DPP_FIELD_FLAG_RC, 6, + 1, 0x0, 0x0 }, + { "ecc_single_err_uc_mf_fifo_int_flag", DPP_FIELD_FLAG_RC, 5, 1, 0x0, + 0x0 }, + { "ecc_double_err_uc_mf_fifo_int_flag", DPP_FIELD_FLAG_RC, 4, 1, 0x0, + 0x0 }, + { "ecc_single_err_mc_mf_fifo_int_flag", DPP_FIELD_FLAG_RC, 3, 1, 0x0, + 0x0 }, + { "ecc_double_err_mc_mf_fifo_int_flag", DPP_FIELD_FLAG_RC, 2, 1, 0x0, + 0x0 }, + { "ecc_single_err_free_global_num_fifo_int_flag", DPP_FIELD_FLAG_RC, 1, + 1, 0x0, 0x0 }, + { "ecc_double_err_free_global_num_fifo_int_flag", DPP_FIELD_FLAG_RC, 0, + 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_isu_ppu_demux_fifo_interrupt_flag_reg[] = { + { "isu_in_para_fwft_fifo_32x81_wrapper_u0_overflow_flag", + DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "isu_in_para_fwft_fifo_32x81_wrapper_u0_underflow_flag", + DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "isu_in_fifo_64x81_wrapper_u0_overflow_flag", DPP_FIELD_FLAG_RC, 1, 1, + 0x0, 0x0 }, + { "isu_in_fifo_64x81_wrapper_u0_underflow_flag", DPP_FIELD_FLAG_RC, 0, + 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_multicast_fifo_interrupt_flag_reg[] = { + { "ppu_pktrx_mc_ptr_fifo_16384x17_wrapper_u0_overflow_flag", + DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ppu_pktrx_mc_ptr_fifo_16384x17_wrapper_u0_underflow_flag", + DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "pf_req_fwft_fifo_16x36_wrapper_u0_overflow_flag", DPP_FIELD_FLAG_RC, + 17, 1, 0x0, 0x0 }, + { "pf_req_fwft_fifo_16x36_wrapper_u0_underflow_flag", DPP_FIELD_FLAG_RC, + 16, 1, 0x0, 0x0 }, + { "pf_rsp_fwft_fifo_32x34_wrapper_u0_overflow_flag", DPP_FIELD_FLAG_RO, + 15, 1, 0x0, 0x0 }, + { "pf_rsp_fwft_fifo_32x34_wrapper_u0_underflow_flag", DPP_FIELD_FLAG_RC, + 14, 1, 0x0, 0x0 }, + { "dup_para_fwft_fifo_16x35_wrapper_u0_overflow_flag", + DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "dup_para_fwft_fifo_16x35_wrapper_u0_underflow_flag", + DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "se_mc_rsp_fwft_fifo_32x17_wrapper_u0_overflow_flag", + DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "se_mc_rsp_fwft_fifo_32x17_wrapper_u0_underflow_flag", + DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "sa_para_fwft_fifo_64x17_wrapper_u0_overflow_flag", DPP_FIELD_FLAG_RC, + 9, 1, 0x0, 0x0 }, + { "sa_para_fwft_fifo_64x17_wrapper_u0_underflow_flag", + DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "group_id_fifo_64x16_wrapper_u0_overflow_flag", DPP_FIELD_FLAG_RC, 7, + 1, 0x0, 0x0 }, + { "group_id_fifo_64x16_wrapper_u0_underflow_flag", DPP_FIELD_FLAG_RC, 6, + 1, 0x0, 0x0 }, + { "isu_mc_para_fwft_fifo_128x34_wrapper_u0_overflow_flag", + DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "isu_mc_para_fwft_fifo_128x34_wrapper_u0_underflow_flag", + DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "dup_freeptr_fwft_fifo_128x7_wrapper_u0_overflow_flag", + DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "dup_freeptr_fwft_fifo_128x7_wrapper_u0_underflow_flag", + DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "car_flag_fifo_32x1_wrapper_overflow_flag", DPP_FIELD_FLAG_RC, 1, 1, + 0x0, 0x0 }, + { "car_flag_fifo_32x1_wrapper_underflow_flag", DPP_FIELD_FLAG_RC, 0, 1, + 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_in_schedule_fifo_interrupt_flag_reg[] = { + { "free_global_num_fwft_fifo_8192x13_wrapper_u0_overflow_flag", + DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "free_global_num_fwft_fifo_8192x13_wrapper_u0_underflow_flag", + DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "mc_mf_fifo_16x2048_wrapper_u0_overflow_flag", DPP_FIELD_FLAG_RC, 3, + 1, 0x0, 0x0 }, + { "mc_mf_fifo_16x2048_wrapper_u0_underflow_flag", DPP_FIELD_FLAG_RC, 2, + 1, 0x0, 0x0 }, + { "uc_mf_fifo_96x2048_wrapper_u0_overflow_flag", DPP_FIELD_FLAG_RC, 1, + 1, 0x0, 0x0 }, + { "uc_mf_fifo_96x2048_wrapper_u0_underflow_flag", DPP_FIELD_FLAG_RC, 0, + 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_mf_out_fifo_interrupt_flag_reg[] = { + { "ppu_cluster5_mf_out_afifo_32x2048_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ppu_cluster5_mf_out_afifo_32x2048_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ppu_cluster4_mf_out_afifo_32x2048_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ppu_cluster4_mf_out_afifo_32x2048_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ppu_cluster3_mf_out_afifo_32x2048_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ppu_cluster3_mf_out_afifo_32x2048_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ppu_cluster2_mf_out_afifo_32x2048_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ppu_cluster2_mf_out_afifo_32x2048_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ppu_cluster1_mf_out_afifo_32x2048_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ppu_cluster1_mf_out_afifo_32x2048_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ppu_cluster0_mf_out_afifo_32x2048_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ppu_cluster0_mf_out_afifo_32x2048_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pbu_mcode_pf_req_schedule_fifo_interrupt_flag_reg[] = { + { "ppu_cluster5_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ppu_cluster4_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ppu_cluster3_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ppu_cluster2_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ppu_cluster1_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ppu_cluster0_pbu_mcode_pf_req_afifo_32x15_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ppu_cluster5_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ppu_cluster4_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ppu_cluster3_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ppu_cluster2_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ppu_cluster1_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ppu_cluster0_pbu_mcode_pf_req_afifo_32x15_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pbu_mcode_pf_rsp_schedule_fifo_interrupt_flag_reg[] = { + { "ppu_pbu_mcode_pf_rsp_afifo_64x16_wrapper_u0r_underflow_flag", + DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ppu_pbu_mcode_pf_rsp_afifo_64x16_wrapper_u0_overflow_flag", + DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_mccnt_fifo_interrupt_flag_reg[] = { + { "ppu_mccnt_fifo_32x15_wrapper_u0_overflow_flag", DPP_FIELD_FLAG_RC, 5, + 1, 0x0, 0x0 }, + { "ppu_mccnt_fifo_32x15_wrapper_u0_underflow_flag", DPP_FIELD_FLAG_RC, + 4, 1, 0x0, 0x0 }, + { "ppu_wb_data_fifo_32x2048_wrapper_u0_overflow_flag", + DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ppu_wb_data_fifo_32x2048_wrapper_u0_underflow_flag", + DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "mccnt_rsp_fifo_32x1_wrapper_u0_overflow_flag", DPP_FIELD_FLAG_RC, 1, + 1, 0x0, 0x0 }, + { "mccnt_rsp_fifo_32x1_wrapper_u0_underflow_flag", DPP_FIELD_FLAG_RC, 0, + 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_coprocessor_fifo_interrupt_flag_l_reg[] = { + { "mec3_cop_key_crc_fifo_32x625_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "mec3_cop_key_crc_fifo_32x625_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "mec3_cop_key_checksum_fifo_32x180_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "mec3_cop_key_checksum_fifo_32x180_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "mec3_cop_key_mul_fifo_32x52_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "mec3_cop_key_mul_fifo_32x52_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "mec3_cop_key_random_mod_fifo_32x44_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "mec3_cop_key_random_mod_fifo_32x44_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "mec2_cop_key_crc_fifo_32x625_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "mec2_cop_key_crc_fifo_32x625_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "mec2_cop_key_checksum_fifo_32x180_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "mec2_cop_key_checksum_fifo_32x180_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "mec2_cop_key_mul_fifo_32x52_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "mec2_cop_key_mul_fifo_32x52_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "mec2_cop_key_random_mod_fifo_32x44_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "mec2_cop_key_random_mod_fifo_32x44_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "mec1_cop_key_crc_fifo_32x625_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "mec1_cop_key_crc_fifo_32x625_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "mec1_cop_key_checksum_fifo_32x180_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "mec1_cop_key_checksum_fifo_32x180_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "mec1_cop_key_mul_fifo_32x52_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "mec1_cop_key_mul_fifo_32x52_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "mec1_cop_key_random_mod_fifo_32x44_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "mec1_cop_key_random_mod_fifo_32x44_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "mec0_cop_key_crc_fifo_32x625_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "mec0_cop_key_crc_fifo_32x625_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "mec0_cop_key_checksum_fifo_32x180_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "mec0_cop_key_checksum_fifo_32x180_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "mec0_cop_key_mul_fifo_32x52_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "mec0_cop_key_mul_fifo_32x52_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "mec0_cop_key_random_mod_fifo_32x44_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "mec0_cop_key_random_mod_fifo_32x44_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_coprocessor_fifo_interrupt_flag_m_reg[] = { + { "ppu_cop_result_fwft_fifo_80x80_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ppu_cop_result_fwft_fifo_80x80_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ppu_cop_delay_fifo_48x16_wrapper_overflow_flag", DPP_FIELD_FLAG_RC, + 29, 1, 0x0, 0x0 }, + { "ppu_cop_delay_fifo_48x16_wrapper_underflow_flag", DPP_FIELD_FLAG_RC, + 28, 1, 0x0, 0x0 }, + { "ppu_cop_delay_fifo_16x48_wrapper_overflow_flag", DPP_FIELD_FLAG_RC, + 27, 1, 0x0, 0x0 }, + { "ppu_cop_delay_fifo_16x48_wrapper_underflow_flag", DPP_FIELD_FLAG_RC, + 26, 1, 0x0, 0x0 }, + { "ppu_cop_delay_fifo_16x32_wrapper_overflow_flag", DPP_FIELD_FLAG_RC, + 25, 1, 0x0, 0x0 }, + { "ppu_cop_delay_fifo_16x32_wrapper_underflow_flag", DPP_FIELD_FLAG_RC, + 24, 1, 0x0, 0x0 }, + { "ppu_cop_result_fwft_fifo_96x80_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ppu_cop_result_fwft_fifo_96x80_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ppu_cop_delay_fifo_16x16_wrapper_overflow_flag", DPP_FIELD_FLAG_RC, + 21, 1, 0x0, 0x0 }, + { "ppu_cop_delay_fifo_16x16_wrapper_underflow_flag", DPP_FIELD_FLAG_RC, + 20, 1, 0x0, 0x0 }, + { "ppu_cop_result_fwft_fifo_32x80_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ppu_cop_result_fwft_fifo_32x80_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ppu_cop_result_fwft_fifo_16x80_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ppu_cop_result_fwft_fifo_16x80_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "mec5_cop_key_crc_fifo_32x625_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "mec5_cop_key_crc_fifo_32x625_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "mec5_cop_key_checksum_fifo_32x180_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "mec5_cop_key_checksum_fifo_32x180_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "mec5_cop_key_mul_fifo_32x52_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "mec5_cop_key_mul_fifo_32x52_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "mec5_cop_key_random_mod_fifo_32x44_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "mec5_cop_key_random_mod_fifo_32x44_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "mec4_cop_key_crc_fifo_32x625_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "mec4_cop_key_crc_fifo_32x625_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "mec4_cop_key_checksum_fifo_32x180_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "mec4_cop_key_checksum_fifo_32x180_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "mec4_cop_key_mul_fifo_32x52_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "mec4_cop_key_mul_fifo_32x52_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "mec4_cop_key_random_mod_fifo_32x44_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "mec4_cop_key_random_mod_fifo_32x44_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_coprocessor_fifo_interrupt_flag_h_reg[] = { + { "ppu_cop_random_mod_para_delay_fifo_48x16_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ppu_cop_random_mod_para_delay_fifo_48x16_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_fifo_interrupt_flag_reg[] = { + { "instrmem2_wr_fifo_ovf_flag", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "instrmem2_wr_fifo_udf_flag", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "instrmem2_rd_fifo_ovf_flag", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "instrmem2_rd_fifo_udf_flag", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "instrmem1_wr_fifo_ovf_flag", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "instrmem1_wr_fifo_udf_flag", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "instrmem1_rd_fifo_ovf_flag", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "instrmem1_rd_fifo_udf_flag", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "instrmem0_wr_fifo_ovf_flag", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "instrmem0_wr_fifo_udf_flag", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "instrmem0_rd_fifo_ovf_flag", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "instrmem0_rd_fifo_udf_flag", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_ram_int_out_reg[] = { + { "instrmem2_bank3_ram_parity_err_int_out", DPP_FIELD_FLAG_RO, 11, 1, + 0x0, 0x0 }, + { "instrmem2_bank2_ram_parity_err_int_out", DPP_FIELD_FLAG_RO, 10, 1, + 0x0, 0x0 }, + { "instrmem2_bank1_ram_parity_err_int_out", DPP_FIELD_FLAG_RO, 9, 1, + 0x0, 0x0 }, + { "instrmem2_bank0_ram_parity_err_int_out", DPP_FIELD_FLAG_RO, 8, 1, + 0x0, 0x0 }, + { "instrmem1_bank3_ram_parity_err_int_out", DPP_FIELD_FLAG_RO, 7, 1, + 0x0, 0x0 }, + { "instrmem1_bank2_ram_parity_err_int_out", DPP_FIELD_FLAG_RO, 6, 1, + 0x0, 0x0 }, + { "instrmem1_bank1_ram_parity_err_int_out", DPP_FIELD_FLAG_RO, 5, 1, + 0x0, 0x0 }, + { "instrmem1_bank0_ram_parity_err_int_out", DPP_FIELD_FLAG_RO, 4, 1, + 0x0, 0x0 }, + { "instrmem0_bank3_ram_parity_err_int_out", DPP_FIELD_FLAG_RO, 3, 1, + 0x0, 0x0 }, + { "instrmem0_bank2_ram_parity_err_int_out", DPP_FIELD_FLAG_RO, 2, 1, + 0x0, 0x0 }, + { "instrmem0_bank1_ram_parity_err_int_out", DPP_FIELD_FLAG_RO, 1, 1, + 0x0, 0x0 }, + { "instrmem0_bank0_ram_parity_err_int_out", DPP_FIELD_FLAG_RO, 0, 1, + 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_ram_int_mask_reg[] = { + { "instrmem2_bank3_ram_parity_err_mask", DPP_FIELD_FLAG_RW, 11, 1, 0x1, + 0x0 }, + { "instrmem2_bank2_ram_parity_err_mask", DPP_FIELD_FLAG_RW, 10, 1, 0x1, + 0x0 }, + { "instrmem2_bank1_ram_parity_err_mask", DPP_FIELD_FLAG_RW, 9, 1, 0x1, + 0x0 }, + { "instrmem2_bank0_ram_parity_err_mask", DPP_FIELD_FLAG_RW, 8, 1, 0x1, + 0x0 }, + { "instrmem1_bank3_ram_parity_err_mask", DPP_FIELD_FLAG_RW, 7, 1, 0x1, + 0x0 }, + { "instrmem1_bank2_ram_parity_err_mask", DPP_FIELD_FLAG_RW, 6, 1, 0x1, + 0x0 }, + { "instrmem1_bank1_ram_parity_err_mask", DPP_FIELD_FLAG_RW, 5, 1, 0x1, + 0x0 }, + { "instrmem1_bank0_ram_parity_err_mask", DPP_FIELD_FLAG_RW, 4, 1, 0x1, + 0x0 }, + { "instrmem0_bank3_ram_parity_err_mask", DPP_FIELD_FLAG_RW, 3, 1, 0x1, + 0x0 }, + { "instrmem0_bank2_ram_parity_err_mask", DPP_FIELD_FLAG_RW, 2, 1, 0x1, + 0x0 }, + { "instrmem0_bank1_ram_parity_err_mask", DPP_FIELD_FLAG_RW, 1, 1, 0x1, + 0x0 }, + { "instrmem0_bank0_ram_parity_err_mask", DPP_FIELD_FLAG_RW, 0, 1, 0x1, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_ram_int_stat_reg[] = { + { "instrmem2_bank3_ram_parity_errstat", DPP_FIELD_FLAG_RO, 11, 1, 0x0, + 0x0 }, + { "instrmem2_bank2_ram_parity_errstat", DPP_FIELD_FLAG_RO, 10, 1, 0x0, + 0x0 }, + { "instrmem2_bank1_ram_parity_errstat", DPP_FIELD_FLAG_RO, 9, 1, 0x0, + 0x0 }, + { "instrmem2_bank0_ram_parity_errstat", DPP_FIELD_FLAG_RO, 8, 1, 0x0, + 0x0 }, + { "instrmem1_bank3_ram_parity_errstat", DPP_FIELD_FLAG_RO, 7, 1, 0x0, + 0x0 }, + { "instrmem1_bank2_ram_parity_errstat", DPP_FIELD_FLAG_RO, 6, 1, 0x0, + 0x0 }, + { "instrmem1_bank1_ram_parity_errstat", DPP_FIELD_FLAG_RO, 5, 1, 0x0, + 0x0 }, + { "instrmem1_bank0_ram_parity_errstat", DPP_FIELD_FLAG_RO, 4, 1, 0x0, + 0x0 }, + { "instrmem0_bank3_ram_parity_errstat", DPP_FIELD_FLAG_RO, 3, 1, 0x0, + 0x0 }, + { "instrmem0_bank2_ram_parity_errstat", DPP_FIELD_FLAG_RO, 2, 1, 0x0, + 0x0 }, + { "instrmem0_bank1_ram_parity_errstat", DPP_FIELD_FLAG_RO, 1, 1, 0x0, + 0x0 }, + { "instrmem0_bank0_ram_parity_errstat", DPP_FIELD_FLAG_RO, 0, 1, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_ram_int_flag_reg[] = { + { "instrmem2_bank3_ram_parity_err_flag", DPP_FIELD_FLAG_RC, 11, 1, 0x0, + 0x0 }, + { "instrmem2_bank2_ram_parity_err_flag", DPP_FIELD_FLAG_RC, 10, 1, 0x0, + 0x0 }, + { "instrmem2_bank1_ram_parity_err_flag", DPP_FIELD_FLAG_RC, 9, 1, 0x0, + 0x0 }, + { "instrmem2_bank0_ram_parity_err_flag", DPP_FIELD_FLAG_RC, 8, 1, 0x0, + 0x0 }, + { "instrmem1_bank3_ram_parity_err_flag", DPP_FIELD_FLAG_RC, 7, 1, 0x0, + 0x0 }, + { "instrmem1_bank2_ram_parity_err_flag", DPP_FIELD_FLAG_RC, 6, 1, 0x0, + 0x0 }, + { "instrmem1_bank1_ram_parity_err_flag", DPP_FIELD_FLAG_RC, 5, 1, 0x0, + 0x0 }, + { "instrmem1_bank0_ram_parity_err_flag", DPP_FIELD_FLAG_RC, 4, 1, 0x0, + 0x0 }, + { "instrmem0_bank3_ram_parity_err_flag", DPP_FIELD_FLAG_RC, 3, 1, 0x0, + 0x0 }, + { "instrmem0_bank2_ram_parity_err_flag", DPP_FIELD_FLAG_RC, 2, 1, 0x0, + 0x0 }, + { "instrmem0_bank1_ram_parity_err_flag", DPP_FIELD_FLAG_RC, 1, 1, 0x0, + 0x0 }, + { "instrmem0_bank0_ram_parity_err_flag", DPP_FIELD_FLAG_RC, 0, 1, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_count_cfg_reg[] = { + { "ppu_count_overflow_mode", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "ppu_count_rd_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_statics_cfg_reg[] = { + { "csr_statics_mc_type", DPP_FIELD_FLAG_RW, 24, 1, 0x0, 0x0 }, + { "csr_statics_bufnum", DPP_FIELD_FLAG_RW, 22, 7, 0x0, 0x0 }, + { "csr_statics_portnum1", DPP_FIELD_FLAG_RW, 15, 8, 0xc1, 0x0 }, + { "csr_statics_portnum0", DPP_FIELD_FLAG_RW, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_statics_wb_cfg_reg[] = { + { "csr_statics_wb_halt_send_type", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "csr_statics_wb_mf_type", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "csr_statics_wb_halt_continue_end", DPP_FIELD_FLAG_RW, 3, 1, 0x1, + 0x0 }, + { "csr_statics_wb_dup_flag", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "csr_statics_wb_last_flag", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "csr_statics_wb_dis_flag", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_wr_table_self_rsp_en_cfg_reg[] = { + { "wr_table_self_rsp_en_cfg", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_random_arbiter_8to1_cfg_reg[] = { + { "ppu_random_arbiter_8to1_cfg", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_reorder_bypass_flow_num_cfg_reg[] = { + { "ppu_reorder_bypass_flow_num_cfg", DPP_FIELD_FLAG_RW, 7, 8, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_cos_meter_cfg_h_reg[] = { + { "cbs", DPP_FIELD_FLAG_RW, 22, 10, 0x3ff, 0x0 }, + { "pbs", DPP_FIELD_FLAG_RW, 12, 10, 0x3ff, 0x0 }, + { "green_action", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "yellow_action", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "red_action", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_cos_meter_cfg_l_reg[] = { + { "cir", DPP_FIELD_FLAG_RW, 20, 10, 0x24a, 0x0 }, + { "pir", DPP_FIELD_FLAG_RW, 10, 10, 0x24a, 0x0 }, + { "car_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_rdy_reg[] = { + { "instrmem_rdy", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_addr_reg[] = { + { "instrmem_operate", DPP_FIELD_FLAG_WO, 13, 1, 0x0, 0x0 }, + { "instrmem_addr", DPP_FIELD_FLAG_WO, 12, 13, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_ind_access_done_reg[] = { + { "instrmem_ind_access_done", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_instr0_data_l_reg[] = { + { "instrmem_instr0_data_l", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_instr0_data_h_reg[] = { + { "instrmem_instr0_data_h", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_instr1_data_l_reg[] = { + { "instrmem_instr1_data_l", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_instr1_data_h_reg[] = { + { "instrmem_instr1_data_h", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_instr2_data_l_reg[] = { + { "instrmem_instr2_data_l", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_instr2_data_h_reg[] = { + { "instrmem_instr2_data_h", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_instr3_data_l_reg[] = { + { "instrmem_instr3_data_l", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_instr3_data_h_reg[] = { + { "instrmem_instr3_data_h", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_read_instr0_data_l_reg[] = { + { "instrmem_read_instr0_data_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_read_instr0_data_h_reg[] = { + { "instrmem_read_instr0_data_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_read_instr1_data_l_reg[] = { + { "instrmem_read_instr1_data_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_read_instr1_data_h_reg[] = { + { "instrmem_read_instr1_data_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_read_instr2_data_l_reg[] = { + { "instrmem_read_instr2_data_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_read_instr2_data_h_reg[] = { + { "instrmem_read_instr2_data_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_read_instr3_data_l_reg[] = { + { "instrmem_read_instr3_data_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_instrmem_read_instr3_data_h_reg[] = { + { "instrmem_read_instr3_data_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_se_ppu_mc_srh_fc_cnt_h_reg[] = { + { "se_ppu_mc_srh_fc_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_se_ppu_mc_srh_fc_cnt_l_reg[] = { + { "se_ppu_mc_srh_fc_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_se_mc_srh_fc_cnt_h_reg[] = { + { "ppu_se_mc_srh_fc_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_se_mc_srh_fc_cnt_l_reg[] = { + { "ppu_se_mc_srh_fc_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_se_mc_srh_vld_cnt_h_reg[] = { + { "ppu_se_mc_srh_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_se_mc_srh_vld_cnt_l_reg[] = { + { "ppu_se_mc_srh_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_se_ppu_mc_srh_vld_cnt_h_reg[] = { + { "se_ppu_mc_srh_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_se_ppu_mc_srh_vld_cnt_l_reg[] = { + { "se_ppu_mc_srh_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pbu_ppu_logic_pf_fc_cnt_h_reg[] = { + { "pbu_ppu_logic_pf_fc_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pbu_ppu_logic_pf_fc_cnt_l_reg[] = { + { "pbu_ppu_logic_pf_fc_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_pbu_logic_rsp_fc_cnt_h_reg[] = { + { "ppu_pbu_logic_rsp_fc_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_pbu_logic_rsp_fc_cnt_l_reg[] = { + { "ppu_pbu_logic_rsp_fc_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_pbu_logic_pf_req_vld_cnt_h_reg[] = { + { "ppu_pbu_logic_pf_req_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_pbu_logic_pf_req_vld_cnt_l_reg[] = { + { "ppu_pbu_logic_pf_req_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pbu_ppu_logic_pf_rsp_vld_cnt_h_reg[] = { + { "pbu_ppu_logic_pf_rsp_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pbu_ppu_logic_pf_rsp_vld_cnt_l_reg[] = { + { "pbu_ppu_logic_pf_rsp_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pbu_ppu_ifb_rd_fc_cnt_h_reg[] = { + { "pbu_ppu_ifb_rd_fc_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pbu_ppu_ifb_rd_fc_cnt_l_reg[] = { + { "pbu_ppu_ifb_rd_fc_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pbu_ppu_wb_fc_cnt_h_reg[] = { + { "pbu_ppu_wb_fc_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pbu_ppu_wb_fc_cnt_l_reg[] = { + { "pbu_ppu_wb_fc_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_pbu_mcode_pf_req_vld_cnt_h_reg[] = { + { "ppu_pbu_mcode_pf_req_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_pbu_mcode_pf_req_vld_cnt_l_reg[] = { + { "ppu_pbu_mcode_pf_req_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pbu_ppu_mcode_pf_rsp_vld_cnt_h_reg[] = { + { "pbu_ppu_mcode_pf_rsp_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pbu_ppu_mcode_pf_rsp_vld_cnt_l_reg[] = { + { "pbu_ppu_mcode_pf_rsp_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_odma_ppu_para_fc_cnt_h_reg[] = { + { "odma_ppu_para_fc_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_odma_ppu_para_fc_cnt_l_reg[] = { + { "odma_ppu_para_fc_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_odma_ppu_mccnt_wr_fc_cnt_h_reg[] = { + { "odma_ppu_mccnt_wr_fc_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_odma_ppu_mccnt_wr_fc_cnt_l_reg[] = { + { "odma_ppu_mccnt_wr_fc_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_odma_mccnt_wr_vld_cnt_h_reg[] = { + { "ppu_odma_mccnt_wr_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_odma_mccnt_wr_vld_cnt_l_reg[] = { + { "ppu_odma_mccnt_wr_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_odma_ppu_mccnt_rsp_vld_cnt_h_reg[] = { + { "odma_ppu_mccnt_rsp_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_odma_ppu_mccnt_rsp_vld_cnt_l_reg[] = { + { "odma_ppu_mccnt_rsp_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_pktrx_uc_fc_cnt_h_reg[] = { + { "ppu_pktrx_uc_fc_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_pktrx_uc_fc_cnt_l_reg[] = { + { "ppu_pktrx_uc_fc_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_pktrx_mc_fc_cnt_h_reg[] = { + { "ppu_pktrx_mc_fc_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_pktrx_mc_fc_cnt_l_reg[] = { + { "ppu_pktrx_mc_fc_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pktrx_ppu_desc_vld_cnt_h_reg[] = { + { "pktrx_ppu_desc_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pktrx_ppu_desc_vld_cnt_l_reg[] = { + { "pktrx_ppu_desc_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_pbu_ifb_req_vld_cnt_h_reg[] = { + { "ppu_pbu_ifb_req_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_pbu_ifb_req_vld_cnt_l_reg[] = { + { "ppu_pbu_ifb_req_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pbu_ppu_ifb_rsp_vld_cnt_h_reg[] = { + { "pbu_ppu_ifb_rsp_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pbu_ppu_ifb_rsp_vld_cnt_l_reg[] = { + { "pbu_ppu_ifb_rsp_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_pbu_wb_vld_cnt_h_reg[] = { + { "ppu_pbu_wb_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_pbu_wb_vld_cnt_l_reg[] = { + { "ppu_pbu_wb_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pbu_ppu_reorder_para_vld_cnt_h_reg[] = { + { "pbu_ppu_reorder_para_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pbu_ppu_reorder_para_vld_cnt_l_reg[] = { + { "pbu_ppu_reorder_para_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_odma_para_vld_cnt_h_reg[] = { + { "ppu_odma_para_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_odma_para_vld_cnt_l_reg[] = { + { "ppu_odma_para_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_isu_ppu_mc_vld_cnt_h_reg[] = { + { "statics_isu_ppu_mc_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_isu_ppu_mc_vld_cnt_l_reg[] = { + { "statics_isu_ppu_mc_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_isu_ppu_mc_loop_vld_cnt_h_reg[] = { + { "statics_isu_ppu_mc_loop_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_isu_ppu_mc_loop_vld_cnt_l_reg[] = { + { "statics_isu_ppu_mc_loop_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_isu_ppu_uc_vld_cnt_h_reg[] = { + { "statics_isu_ppu_uc_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_isu_ppu_uc_vld_cnt_l_reg[] = { + { "statics_isu_ppu_uc_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_isu_ppu_uc_bufnumis0_vld_cnt_h_reg[] = { + { "statics_isu_ppu_uc_bufnumis0_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, + 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_isu_ppu_uc_bufnumis0_vld_cnt_l_reg[] = { + { "statics_isu_ppu_uc_bufnumis0_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, + 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_demux_schedule_mc_vld_cnt_h_reg[] = { + { "statics_demux_schedule_mc_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_demux_schedule_mc_vld_cnt_l_reg[] = { + { "statics_demux_schedule_mc_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_demux_schedule_mc_bufnumis0_vld_cnt_h_reg[] = { + { "statics_demux_schedule_mc_bufnumis0_vld_cnt_h", DPP_FIELD_FLAG_RO, + 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_demux_schedule_mc_bufnumis0_vld_cnt_l_reg[] = { + { "statics_demux_schedule_mc_bufnumis0_vld_cnt_l", DPP_FIELD_FLAG_RO, + 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_demux_schedule_mc_srcportis0_vld_cnt_h_reg[] = { + { "statics_demux_schedule_mc_srcportis0_vld_cnt_h", DPP_FIELD_FLAG_RO, + 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_demux_schedule_mc_srcportis0_vld_cnt_l_reg[] = { + { "statics_demux_schedule_mc_srcportis0_vld_cnt_l", DPP_FIELD_FLAG_RO, + 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_demux_schedule_mc_srcportis1_vld_cnt_h_reg[] = { + { "statics_demux_schedule_mc_srcportis1_vld_cnt_h", DPP_FIELD_FLAG_RO, + 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_demux_schedule_mc_srcportis1_vld_cnt_l_reg[] = { + { "statics_demux_schedule_mc_srcportis1_vld_cnt_l", DPP_FIELD_FLAG_RO, + 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_demux_schedule_uc_vld_cnt_h_reg[] = { + { "statics_demux_schedule_uc_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_demux_schedule_uc_vld_cnt_l_reg[] = { + { "statics_demux_schedule_uc_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_demux_schedule_uc_bufnumis0_vld_cnt_h_reg[] = { + { "statics_demux_schedule_uc_bufnumis0_vld_cnt_h", DPP_FIELD_FLAG_RO, + 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_demux_schedule_uc_bufnumis0_vld_cnt_l_reg[] = { + { "statics_demux_schedule_uc_bufnumis0_vld_cnt_l", DPP_FIELD_FLAG_RO, + 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_demux_schedule_uc_srcportis0_vld_cnt_h_reg[] = { + { "statics_demux_schedule_uc_srcportis0_vld_cnt_h", DPP_FIELD_FLAG_RO, + 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_demux_schedule_uc_srcportis0_vld_cnt_l_reg[] = { + { "statics_demux_schedule_uc_srcportis0_vld_cnt_l", DPP_FIELD_FLAG_RO, + 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_demux_schedule_uc_srcportis1_vld_cnt_h_reg[] = { + { "statics_demux_schedule_uc_srcportis1_vld_cnt_h", DPP_FIELD_FLAG_RO, + 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_demux_schedule_uc_srcportis1_vld_cnt_l_reg[] = { + { "statics_demux_schedule_uc_srcportis1_vld_cnt_l", DPP_FIELD_FLAG_RO, + 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_ppu_wb_vld_cnt_h_reg[] = { + { "statics_ppu_wb_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_ppu_wb_vld_cnt_l_reg[] = { + { "statics_ppu_wb_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_ppu_wb_bufnumis0_vld_cnt_h_reg[] = { + { "statics_ppu_wb_bufnumis0_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_ppu_wb_bufnumis0_vld_cnt_l_reg[] = { + { "statics_ppu_wb_bufnumis0_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_ppu_wb_srcportis0_vld_cnt_h_reg[] = { + { "statics_ppu_wb_srcportis0_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_ppu_wb_srcportis0_vld_cnt_l_reg[] = { + { "statics_ppu_wb_srcportis0_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_ppu_wb_srcportis1_vld_cnt_h_reg[] = { + { "statics_ppu_wb_srcportis1_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_ppu_wb_srcportis1_vld_cnt_l_reg[] = { + { "statics_ppu_wb_srcportis1_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_ppu_wb_halt_send_type_vld_cnt_h_reg[] = { + { "statics_ppu_wb_halt_send_type_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, + 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_ppu_wb_halt_send_type_vld_cnt_l_reg[] = { + { "statics_ppu_wb_halt_send_type_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, + 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_ppu_wb_mf_type_vld_cnt_h_reg[] = { + { "statics_ppu_wb_mf_type_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_ppu_wb_mf_type_vld_cnt_l_reg[] = { + { "statics_ppu_wb_mf_type_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_ppu_wb_halt_continue_end_vld_cnt_h_reg[] = { + { "statics_ppu_wb_halt_continue_end_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, + 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_ppu_wb_halt_continue_end_vld_cnt_l_reg[] = { + { "statics_ppu_wb_halt_continue_end_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, + 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_ppu_wb_dup_flag_vld_cnt_h_reg[] = { + { "statics_ppu_wb_dup_flag_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_ppu_wb_dup_flag_vld_cnt_l_reg[] = { + { "statics_ppu_wb_dup_flag_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_ppu_wb_last_flag_vld_cnt_h_reg[] = { + { "statics_ppu_wb_last_flag_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_ppu_wb_last_flag_vld_cnt_l_reg[] = { + { "statics_ppu_wb_last_flag_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_ppu_wb_dis_flag_vld_cnt_h_reg[] = { + { "statics_ppu_wb_dis_flag_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_ppu_wb_dis_flag_vld_cnt_l_reg[] = { + { "statics_ppu_wb_dis_flag_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_pbu_ppu_reorder_halt_send_type_vld_cnt_h_reg[] = { + { "statics_pbu_ppu_reorder_halt_send_type_vld_cnt_h", DPP_FIELD_FLAG_RO, + 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_pbu_ppu_reorder_halt_send_type_vld_cnt_l_reg[] = { + { "statics_pbu_ppu_reorder_halt_send_type_vld_cnt_l", DPP_FIELD_FLAG_RO, + 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_pbu_ppu_reorder_mf_type_vld_cnt_h_reg[] = { + { "statics_pbu_ppu_reorder_mf_type_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, + 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_statics_pbu_ppu_reorder_mf_type_vld_cnt_l_reg[] = { + { "statics_pbu_ppu_reorder_mf_type_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, + 32, 0x0, 0x0 }, +}; +DPP_FIELD_T +g_ppu_ppu_statics_pbu_ppu_reorder_halt_continue_end_vld_cnt_h_reg[] = { + { "statics_pbu_ppu_reorder_halt_continue_end_vld_cnt_h", + DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T +g_ppu_ppu_statics_pbu_ppu_reorder_halt_continue_end_vld_cnt_l_reg[] = { + { "statics_pbu_ppu_reorder_halt_continue_end_vld_cnt_l", + DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_car_green_pkt_vld_cnt_h_reg[] = { + { "car_green_pkt_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_car_green_pkt_vld_cnt_l_reg[] = { + { "car_green_pkt_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_car_yellow_pkt_vld_cnt_h_reg[] = { + { "car_yellow_pkt_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_car_yellow_pkt_vld_cnt_l_reg[] = { + { "car_yellow_pkt_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_car_red_pkt_vld_cnt_h_reg[] = { + { "car_red_pkt_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_car_red_pkt_vld_cnt_l_reg[] = { + { "car_red_pkt_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_car_drop_pkt_vld_cnt_h_reg[] = { + { "car_drop_pkt_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_car_drop_pkt_vld_cnt_l_reg[] = { + { "car_drop_pkt_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_pktrx_mc_ptr_vld_cnt_h_reg[] = { + { "ppu_pktrx_mc_ptr_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_pktrx_mc_ptr_vld_cnt_l_reg[] = { + { "ppu_pktrx_mc_ptr_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_isu_ppu_loopback_fc_cnt_h_reg[] = { + { "ppu_pktrx_mc_ptr_vld_cnt_h", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_isu_ppu_loopback_fc_cnt_l_reg[] = { + { "ppu_pktrx_mc_ptr_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_culster_pbu_mcode_pf_req_prog_full_assert_cfg_reg[] = { + { "ppu_culster_pbu_mcode_pf_req_prog_full_assert_cfg", + DPP_FIELD_FLAG_RW, 5, 6, 0x10, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_culster_pbu_mcode_pf_req_prog_full_negate_cfg_reg[] = { + { "ppu_culster_pbu_mcode_pf_req_prog_full_negate_cfg", + DPP_FIELD_FLAG_RW, 5, 6, 0x10, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_culster_pbu_mcode_pf_req_prog_empty_assert_cfg_reg[] = { + { "ppu_culster_pbu_mcode_pf_req_prog_empty_assert_cfg", + DPP_FIELD_FLAG_RW, 5, 6, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_culster_pbu_mcode_pf_req_prog_empty_negate_cfg_reg[] = { + { "ppu_culster_pbu_mcode_pf_req_prog_empty_negate_cfg", + DPP_FIELD_FLAG_RW, 5, 6, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_pbu_mcode_pf_rsp_prog_full_assert_cfg_reg[] = { + { "ppu_pbu_mcode_pf_rsp_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 6, 7, + 0x20, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_pbu_mcode_pf_rsp_prog_full_negate_cfg_reg[] = { + { "ppu_pbu_mcode_pf_rsp_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 6, 7, + 0x20, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_pbu_mcode_pf_rsp_prog_empty_assert_cfg_reg[] = { + { "ppu_pbu_mcode_pf_rsp_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 6, 7, + 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_pbu_mcode_pf_rsp_prog_empty_negate_cfg_reg[] = { + { "ppu_pbu_mcode_pf_rsp_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 6, 7, + 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_mccnt_fifo_prog_full_assert_cfg_reg[] = { + { "mccnt_fifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0x1c, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_mccnt_fifo_prog_full_negate_cfg_reg[] = { + { "mccnt_fifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0x1c, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_mccnt_fifo_prog_empty_assert_cfg_reg[] = { + { "mccnt_fifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0x4, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_mccnt_fifo_prog_empty_negate_cfg_reg[] = { + { "mccnt_fifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0x4, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_uc_mf_fifo_prog_full_assert_cfg_reg[] = { + { "uc_mf_fifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 7, 8, 0x10, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_uc_mf_fifo_prog_full_negate_cfg_reg[] = { + { "uc_mf_fifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 7, 8, 0x10, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_uc_mf_fifo_prog_empty_assert_cfg_reg[] = { + { "uc_mf_fifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 7, 8, 0x4, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_uc_mf_fifo_prog_empty_negate_cfg_reg[] = { + { "uc_mf_fifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 7, 8, 0x4, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_mc_mf_fifo_prog_full_assert_cfg_reg[] = { + { "mc_mf_fifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 4, 5, 0xa, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_mc_mf_fifo_prog_full_negate_cfg_reg[] = { + { "mc_mf_fifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 4, 5, 0xa, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_mc_mf_fifo_prog_empty_assert_cfg_reg[] = { + { "mc_mf_fifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 4, 5, 0x4, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_mc_mf_fifo_prog_empty_negate_cfg_reg[] = { + { "mc_mf_fifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 4, 5, 0x4, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_isu_mf_fifo_prog_full_assert_cfg_reg[] = { + { "isu_mf_fifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 6, 7, 0x10, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_isu_mf_fifo_prog_full_negate_cfg_reg[] = { + { "isu_mf_fifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 6, 7, 0x10, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_isu_mf_fifo_prog_empty_assert_cfg_reg[] = { + { "isu_mf_fifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 6, 7, 0x4, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_isu_mf_fifo_prog_empty_negate_cfg_reg[] = { + { "isu_mf_fifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 6, 7, 0x4, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_isu_fwft_mf_fifo_prog_empty_assert_cfg_reg[] = { + { "isu_fwft_mf_fifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 5, 6, + 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_isu_fwft_mf_fifo_prog_empty_negate_cfg_reg[] = { + { "isu_fwft_mf_fifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 5, 6, + 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_isu_mc_para_mf_fifo_prog_full_assert_cfg_reg[] = { + { "isu_mc_para_mf_fifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 7, 8, + 0x50, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_isu_mc_para_mf_fifo_prog_full_negate_cfg_reg[] = { + { "isu_mc_para_mf_fifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 7, 8, + 0x50, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_isu_mc_para_mf_fifo_prog_empty_assert_cfg_reg[] = { + { "isu_mc_para_mf_fifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 7, 8, + 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_isu_mc_para_mf_fifo_prog_empty_negate_cfg_reg[] = { + { "isu_mc_para_mf_fifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 7, 8, + 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_group_id_fifo_prog_full_assert_cfg_reg[] = { + { "group_id_fifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 6, 7, 0x3c, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_group_id_fifo_prog_full_negate_cfg_reg[] = { + { "group_id_fifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 6, 7, 0x3c, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_group_id_fifo_prog_empty_assert_cfg_reg[] = { + { "group_id_fifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 6, 7, 0x4, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_group_id_fifo_prog_empty_negate_cfg_reg[] = { + { "group_id_fifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 6, 7, 0x4, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_sa_para_fifo_prog_full_assert_cfg_reg[] = { + { "sa_para_fifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 6, 7, 0x3c, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_sa_para_fifo_prog_full_negate_cfg_reg[] = { + { "sa_para_fifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 6, 7, 0x3c, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_sa_para_fifo_prog_empty_assert_cfg_reg[] = { + { "sa_para_fifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 6, 7, 0x4, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_sa_para_fifo_prog_empty_negate_cfg_reg[] = { + { "sa_para_fifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 6, 7, 0x4, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_se_mc_rsp_fifo_prog_full_assert_cfg_reg[] = { + { "se_mc_rsp_fifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0xa, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_se_mc_rsp_fifo_prog_full_negate_cfg_reg[] = { + { "se_mc_rsp_fifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0xa, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_se_mc_rsp_fifo_prog_empty_assert_cfg_reg[] = { + { "se_mc_rsp_fifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0x4, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_se_mc_rsp_fifo_prog_empty_negate_cfg_reg[] = { + { "se_mc_rsp_fifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0x4, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_dup_para_fifo_prog_full_assert_cfg_reg[] = { + { "dup_para_fifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 4, 5, 0xc, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_dup_para_fifo_prog_full_negate_cfg_reg[] = { + { "dup_para_fifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 4, 5, 0xc, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_dup_para_fifo_prog_empty_assert_cfg_reg[] = { + { "dup_para_fifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 4, 5, 0x4, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_dup_para_fifo_prog_empty_negate_cfg_reg[] = { + { "dup_para_fifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 4, 5, 0x4, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pf_rsp_fifo_prog_full_assert_cfg_reg[] = { + { "pf_rsp_fifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0x10, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pf_rsp_fifo_prog_full_negate_cfg_reg[] = { + { "pf_rsp_fifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0x10, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pf_rsp_fifo_prog_empty_assert_cfg_reg[] = { + { "pf_rsp_fifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0x4, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pf_rsp_fifo_prog_empty_negate_cfg_reg[] = { + { "pf_rsp_fifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0x4, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_dup_freeptr_fifo_prog_full_assert_cfg_reg[] = { + { "dup_freeptr_fifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 7, 8, + 0x7c, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_dup_freeptr_fifo_prog_full_negate_cfg_reg[] = { + { "dup_freeptr_fifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 7, 8, + 0x7c, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_dup_freeptr_fifo_prog_empty_assert_cfg_reg[] = { + { "dup_freeptr_fifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 7, 8, + 0x58, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_dup_freeptr_fifo_prog_empty_negate_cfg_reg[] = { + { "dup_freeptr_fifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 7, 8, + 0x58, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pf_req_fifo_prog_full_assert_cfg_reg[] = { + { "pf_req_fifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 4, 5, 0xc, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pf_req_fifo_prog_full_negate_cfg_reg[] = { + { "pf_req_fifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 4, 5, 0xc, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pf_req_fifo_prog_empty_assert_cfg_reg[] = { + { "pf_req_fifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 4, 5, 0x4, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pf_req_fifo_prog_empty_negate_cfg_reg[] = { + { "pf_req_fifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 4, 5, 0x4, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_car_flag_fifo_prog_full_assert_cfg_reg[] = { + { "car_flag_fifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0x18, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_car_flag_fifo_prog_full_negate_cfg_reg[] = { + { "car_flag_fifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0x18, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_car_flag_fifo_prog_empty_assert_cfg_reg[] = { + { "car_flag_fifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0x4, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_car_flag_fifo_prog_empty_negate_cfg_reg[] = { + { "car_flag_fifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0x4, + 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cluster_mf_out_afifo_prog_full_assert_cfg_reg[] = { + { "ppu_cluster_mf_out_afifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 5, + 6, 0x10, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cluster_mf_out_afifo_prog_full_negate_cfg_reg[] = { + { "ppu_cluster_mf_out_afifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 5, + 6, 0x10, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cluster_mf_out_afifo_prog_empty_assert_cfg_reg[] = { + { "ppu_cluster_mf_out_afifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, + 5, 6, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cluster_mf_out_afifo_prog_empty_negate_cfg_reg[] = { + { "ppu_cluster_mf_out_afifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, + 5, 6, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_key_fifo_prog_full_assert_cfg_reg[] = { + { "ppu_cop_key_fifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 5, 6, + 0x18, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_key_fifo_prog_full_negate_cfg_reg[] = { + { "ppu_cop_key_fifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 5, 6, + 0x18, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_key_fifo_prog_empty_assert_cfg_reg[] = { + { "ppu_cop_key_fifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 5, 6, + 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_key_fifo_prog_empty_negate_cfg_reg[] = { + { "ppu_cop_key_fifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 5, 6, + 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_random_mod_para_fifo_prog_full_assert_cfg_reg[] = { + { "ppu_cop_random_mod_para_fifo_prog_full_assert_cfg", + DPP_FIELD_FLAG_RW, 6, 7, 0x24, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_random_mod_para_fifo_prog_full_negate_cfg_reg[] = { + { "ppu_cop_random_mod_para_fifo_prog_full_negate_cfg", + DPP_FIELD_FLAG_RW, 6, 7, 0x24, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_random_mod_para_fifo_prog_empty_assert_cfg_reg[] = { + { "ppu_cop_random_mod_para_fifo_prog_empty_assert_cfg", + DPP_FIELD_FLAG_RW, 6, 7, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_random_mod_para_fifo_prog_empty_negate_cfg_reg[] = { + { "ppu_cop_random_mod_para_fifo_prog_empty_negate_cfg", + DPP_FIELD_FLAG_RW, 6, 7, 0x4, 0x0 }, +}; +DPP_FIELD_T +g_ppu_ppu_ppu_cop_random_mod_result_fifo_prog_full_assert_cfg_reg[] = { + { "ppu_cop_random_mod_result_fifo_prog_full_assert_cfg", + DPP_FIELD_FLAG_RW, 7, 8, 0x28, 0x0 }, +}; +DPP_FIELD_T +g_ppu_ppu_ppu_cop_random_mod_result_fifo_prog_full_negate_cfg_reg[] = { + { "ppu_cop_random_mod_result_fifo_prog_full_negate_cfg", + DPP_FIELD_FLAG_RW, 7, 8, 0x28, 0x0 }, +}; +DPP_FIELD_T +g_ppu_ppu_ppu_cop_random_mod_result_fifo_prog_empty_assert_cfg_reg[] = { + { "ppu_cop_random_mod_result_fifo_prog_empty_assert_cfg", + DPP_FIELD_FLAG_RW, 7, 8, 0x4, 0x0 }, +}; +DPP_FIELD_T +g_ppu_ppu_ppu_cop_random_mod_result_fifo_prog_empty_negate_cfg_reg[] = { + { "ppu_cop_random_mod_result_fifo_prog_empty_negate_cfg", + DPP_FIELD_FLAG_RW, 7, 8, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_checksum_result_fifo_prog_full_assert_cfg_reg[] = { + { "ppu_cop_checksum_result_fifo_prog_full_assert_cfg", + DPP_FIELD_FLAG_RW, 4, 5, 0xa, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_checksum_result_fifo_prog_full_negate_cfg_reg[] = { + { "ppu_cop_checksum_result_fifo_prog_full_negate_cfg", + DPP_FIELD_FLAG_RW, 4, 5, 0xa, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_checksum_result_fifo_prog_empty_assert_cfg_reg[] = { + { "ppu_cop_checksum_result_fifo_prog_empty_assert_cfg", + DPP_FIELD_FLAG_RW, 4, 5, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_checksum_result_fifo_prog_empty_negate_cfg_reg[] = { + { "ppu_cop_checksum_result_fifo_prog_empty_negate_cfg", + DPP_FIELD_FLAG_RW, 4, 5, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_crc_first_para_fifo_prog_full_assert_cfg_reg[] = { + { "ppu_cop_crc_first_para_fifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, + 4, 5, 0xe, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_crc_first_para_fifo_prog_full_negate_cfg_reg[] = { + { "ppu_cop_crc_first_para_fifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, + 4, 5, 0xe, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_crc_first_para_fifo_prog_empty_assert_cfg_reg[] = { + { "ppu_cop_crc_first_para_fifo_prog_empty_assert_cfg", + DPP_FIELD_FLAG_RW, 4, 5, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_crc_first_para_fifo_prog_empty_negate_cfg_reg[] = { + { "ppu_cop_crc_first_para_fifo_prog_empty_negate_cfg", + DPP_FIELD_FLAG_RW, 4, 5, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_crc_bypass_delay_prog_full_assert_cfg_reg[] = { + { "ppu_cop_crc_bypass_delay_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 4, + 5, 0xe, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_crc_bypass_delay_prog_full_negate_cfg_reg[] = { + { "ppu_cop_crc_bypass_delay_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 4, + 5, 0xe, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_crc_bypass_delay_prog_empty_assert_cfg_reg[] = { + { "ppu_cop_crc_bypass_delay_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, + 4, 5, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_crc_bypass_delay_prog_empty_negate_cfg_reg[] = { + { "ppu_cop_crc_bypass_delay_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, + 4, 5, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_crc_second_para_fifo_prog_full_assert_cfg_reg[] = { + { "ppu_cop_crc_second_para_fifo_prog_full_assert_cfg", + DPP_FIELD_FLAG_RW, 6, 7, 0x24, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_crc_second_para_fifo_prog_full_negate_cfg_reg[] = { + { "ppu_cop_crc_second_para_fifo_prog_full_negate_cfg", + DPP_FIELD_FLAG_RW, 6, 7, 0x24, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_crc_second_para_fifo_prog_empty_assert_cfg_reg[] = { + { "ppu_cop_crc_second_para_fifo_prog_empty_assert_cfg", + DPP_FIELD_FLAG_RW, 6, 7, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_crc_second_para_fifo_prog_empty_negate_cfg_reg[] = { + { "ppu_cop_crc_second_para_fifo_prog_empty_negate_cfg", + DPP_FIELD_FLAG_RW, 6, 7, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_crc_result_fwft_fifo_prog_full_assert_cfg_reg[] = { + { "ppu_cop_crc_result_fwft_fifo_prog_full_assert_cfg", + DPP_FIELD_FLAG_RW, 7, 8, 0x30, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_crc_result_fwft_fifo_prog_full_negate_cfg_reg[] = { + { "ppu_cop_crc_result_fwft_fifo_prog_full_negate_cfg", + DPP_FIELD_FLAG_RW, 7, 8, 0x30, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_crc_result_fwft_fifo_prog_empty_assert_cfg_reg[] = { + { "ppu_cop_crc_result_fwft_fifo_prog_empty_assert_cfg", + DPP_FIELD_FLAG_RW, 7, 8, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_crc_result_fwft_fifo_prog_empty_negate_cfg_reg[] = { + { "ppu_cop_crc_result_fwft_fifo_prog_empty_negate_cfg", + DPP_FIELD_FLAG_RW, 7, 8, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_multiply_para_fifo_prog_full_assert_cfg_reg[] = { + { "ppu_cop_multiply_para_fifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, + 4, 5, 0xa, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_multiply_para_fifo_prog_full_negate_cfg_reg[] = { + { "ppu_cop_multiply_para_fifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, + 4, 5, 0xa, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_multiply_para_fifo_prog_empty_assert_cfg_reg[] = { + { "ppu_cop_multiply_para_fifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, + 4, 5, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_cop_multiply_para_fifo_prog_empty_negate_cfg_reg[] = { + { "ppu_cop_multiply_para_fifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, + 4, 5, 0x4, 0x0 }, +}; +DPP_FIELD_T +g_ppu_ppu_ppu_cop_multiply_para_result_fifo_prog_full_assert_cfg_reg[] = { + { "ppu_cop_multiply_para_result_fifo_prog_full_assert_cfg", + DPP_FIELD_FLAG_RW, 5, 6, 0x18, 0x0 }, +}; +DPP_FIELD_T +g_ppu_ppu_ppu_cop_multiply_para_result_fifo_prog_full_negate_cfg_reg[] = { + { "ppu_cop_multiply_para_result_fifo_prog_full_negate_cfg", + DPP_FIELD_FLAG_RW, 5, 6, 0x18, 0x0 }, +}; +DPP_FIELD_T +g_ppu_ppu_ppu_cop_multiply_para_result_fifo_prog_empty_assert_cfg_reg[] = { + { "ppu_cop_multiply_para_result_fifo_prog_empty_assert_cfg", + DPP_FIELD_FLAG_RW, 5, 6, 0x4, 0x0 }, +}; +DPP_FIELD_T +g_ppu_ppu_ppu_cop_multiply_para_result_fifo_prog_empty_negate_cfg_reg[] = { + { "ppu_cop_multiply_para_result_fifo_prog_empty_negate_cfg", + DPP_FIELD_FLAG_RW, 5, 6, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_free_global_num_fwft_fifo_prog_full_assert_cfg_reg[] = { + { "free_global_num_fwft_fifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, + 12, 13, 0xffc, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_free_global_num_fwft_fifo_prog_full_negate_cfg_reg[] = { + { "free_global_num_fwft_fifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, + 12, 13, 0xffc, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_free_global_num_fwft_fifo_prog_empty_assert_cfg_reg[] = { + { "free_global_num_fwft_fifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, + 12, 13, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_free_global_num_fwft_fifo_prog_empty_negate_cfg_reg[] = { + { "free_global_num_fwft_fifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, + 12, 13, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_pktrx_mc_ptr_fifo_prog_full_assert_cfg_reg[] = { + { "ppu_pktrx_mc_ptr_fifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 14, + 15, 0x3ff6, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_pktrx_mc_ptr_fifo_prog_full_negate_cfg_reg[] = { + { "ppu_pktrx_mc_ptr_fifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 14, + 15, 0x3ff6, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_pktrx_mc_ptr_fifo_prog_empty_assert_cfg_reg[] = { + { "ppu_pktrx_mc_ptr_fifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 14, + 15, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_ppu_pktrx_mc_ptr_fifo_prog_empty_negate_cfg_reg[] = { + { "ppu_pktrx_mc_ptr_fifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 14, + 15, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data0_reg[] = { + { "pkt_data0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data1_reg[] = { + { "pkt_data1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data2_reg[] = { + { "pkt_data2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data3_reg[] = { + { "pkt_data3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data4_reg[] = { + { "pkt_data4", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data5_reg[] = { + { "pkt_data5", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data6_reg[] = { + { "pkt_data6", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data7_reg[] = { + { "pkt_data7", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data8_reg[] = { + { "pkt_data8", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data9_reg[] = { + { "pkt_data9", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data10_reg[] = { + { "pkt_data10", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data11_reg[] = { + { "pkt_data11", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data12_reg[] = { + { "pkt_data12", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data13_reg[] = { + { "pkt_data13", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data14_reg[] = { + { "pkt_data14", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data15_reg[] = { + { "pkt_data15", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data16_reg[] = { + { "pkt_data16", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data17_reg[] = { + { "pkt_data17", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data18_reg[] = { + { "pkt_data18", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data19_reg[] = { + { "pkt_data19", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data20_reg[] = { + { "pkt_data20", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data21_reg[] = { + { "pkt_data21", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data22_reg[] = { + { "pkt_data22", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data23_reg[] = { + { "pkt_data23", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data24_reg[] = { + { "pkt_data24", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data25_reg[] = { + { "pkt_data25", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data26_reg[] = { + { "pkt_data26", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data27_reg[] = { + { "pkt_data27", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data28_reg[] = { + { "pkt_data28", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data29_reg[] = { + { "pkt_data29", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data30_reg[] = { + { "pkt_data30", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data31_reg[] = { + { "pkt_data31", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data32_reg[] = { + { "pkt_data32", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data33_reg[] = { + { "pkt_data33", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data34_reg[] = { + { "pkt_data34", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data35_reg[] = { + { "pkt_data35", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data36_reg[] = { + { "pkt_data36", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data37_reg[] = { + { "pkt_data37", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data38_reg[] = { + { "pkt_data38", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data39_reg[] = { + { "pkt_data39", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data40_reg[] = { + { "pkt_data40", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data41_reg[] = { + { "pkt_data41", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data42_reg[] = { + { "pkt_data42", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data43_reg[] = { + { "pkt_data43", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data44_reg[] = { + { "pkt_data44", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data45_reg[] = { + { "pkt_data45", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data46_reg[] = { + { "pkt_data46", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data47_reg[] = { + { "pkt_data47", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data48_reg[] = { + { "pkt_data48", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data49_reg[] = { + { "pkt_data49", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data50_reg[] = { + { "pkt_data50", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data51_reg[] = { + { "pkt_data51", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data52_reg[] = { + { "pkt_data52", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data53_reg[] = { + { "pkt_data53", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data54_reg[] = { + { "pkt_data54", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data55_reg[] = { + { "pkt_data55", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data56_reg[] = { + { "pkt_data56", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data57_reg[] = { + { "pkt_data57", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data58_reg[] = { + { "pkt_data58", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data59_reg[] = { + { "pkt_data59", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data60_reg[] = { + { "pkt_data60", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data61_reg[] = { + { "pkt_data61", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data62_reg[] = { + { "pkt_data62", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data63_reg[] = { + { "pkt_data63", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data64_reg[] = { + { "pkt_data64", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data65_reg[] = { + { "pkt_data65", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data66_reg[] = { + { "pkt_data66", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data67_reg[] = { + { "pkt_data67", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data68_reg[] = { + { "pkt_data68", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data69_reg[] = { + { "pkt_data69", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data70_reg[] = { + { "pkt_data70", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data71_reg[] = { + { "pkt_data71", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data72_reg[] = { + { "pkt_data72", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data73_reg[] = { + { "pkt_data73", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data74_reg[] = { + { "pkt_data74", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data75_reg[] = { + { "pkt_data75", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data76_reg[] = { + { "pkt_data76", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data77_reg[] = { + { "pkt_data77", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data78_reg[] = { + { "pkt_data78", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data79_reg[] = { + { "pkt_data79", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data80_reg[] = { + { "pkt_data80", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data81_reg[] = { + { "pkt_data81", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data82_reg[] = { + { "pkt_data82", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data83_reg[] = { + { "pkt_data83", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data84_reg[] = { + { "pkt_data84", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data85_reg[] = { + { "pkt_data85", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data86_reg[] = { + { "pkt_data86", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data87_reg[] = { + { "pkt_data87", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data88_reg[] = { + { "pkt_data88", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data89_reg[] = { + { "pkt_data89", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data90_reg[] = { + { "pkt_data90", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data91_reg[] = { + { "pkt_data91", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data92_reg[] = { + { "pkt_data92", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data93_reg[] = { + { "pkt_data93", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data94_reg[] = { + { "pkt_data94", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data95_reg[] = { + { "pkt_data95", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data96_reg[] = { + { "pkt_data96", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data97_reg[] = { + { "pkt_data97", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data98_reg[] = { + { "pkt_data98", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data99_reg[] = { + { "pkt_data99", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data100_reg[] = { + { "pkt_data100", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data101_reg[] = { + { "pkt_data101", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data102_reg[] = { + { "pkt_data102", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data103_reg[] = { + { "pkt_data103", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data104_reg[] = { + { "pkt_data104", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data105_reg[] = { + { "pkt_data105", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data106_reg[] = { + { "pkt_data106", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data107_reg[] = { + { "pkt_data107", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data108_reg[] = { + { "pkt_data108", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data109_reg[] = { + { "pkt_data109", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data110_reg[] = { + { "pkt_data110", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data111_reg[] = { + { "pkt_data111", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data112_reg[] = { + { "pkt_data112", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data113_reg[] = { + { "pkt_data113", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data114_reg[] = { + { "pkt_data114", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data115_reg[] = { + { "pkt_data115", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data116_reg[] = { + { "pkt_data116", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data117_reg[] = { + { "pkt_data117", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data118_reg[] = { + { "pkt_data118", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data119_reg[] = { + { "pkt_data119", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data120_reg[] = { + { "pkt_data120", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data121_reg[] = { + { "pkt_data121", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data122_reg[] = { + { "pkt_data122", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data123_reg[] = { + { "pkt_data123", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data124_reg[] = { + { "pkt_data124", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data125_reg[] = { + { "pkt_data125", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data126_reg[] = { + { "pkt_data126", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_pkt_data127_reg[] = { + { "pkt_data127", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr0_reg[] = { + { "spr0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr1_reg[] = { + { "spr1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr2_reg[] = { + { "spr2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr3_reg[] = { + { "spr3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr4_reg[] = { + { "spr4", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr5_reg[] = { + { "spr5", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr6_reg[] = { + { "spr6", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr7_reg[] = { + { "spr7", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr8_reg[] = { + { "spr8", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr9_reg[] = { + { "spr9", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr10_reg[] = { + { "spr10", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr11_reg[] = { + { "spr11", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr12_reg[] = { + { "spr12", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr13_reg[] = { + { "spr13", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr14_reg[] = { + { "spr14", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr15_reg[] = { + { "spr15", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr16_reg[] = { + { "spr16", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr17_reg[] = { + { "spr17", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr18_reg[] = { + { "spr18", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr19_reg[] = { + { "spr19", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr20_reg[] = { + { "spr20", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr21_reg[] = { + { "spr21", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr22_reg[] = { + { "spr22", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr23_reg[] = { + { "spr23", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr24_reg[] = { + { "spr24", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr25_reg[] = { + { "spr25", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr26_reg[] = { + { "spr26", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr27_reg[] = { + { "spr27", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr28_reg[] = { + { "spr28", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr29_reg[] = { + { "spr29", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr30_reg[] = { + { "spr30", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_spr31_reg[] = { + { "spr31", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp0_reg[] = { + { "rsp0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp1_reg[] = { + { "rsp1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp2_reg[] = { + { "rsp2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp3_reg[] = { + { "rsp3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp4_reg[] = { + { "rsp4", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp5_reg[] = { + { "rsp5", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp6_reg[] = { + { "rsp6", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp7_reg[] = { + { "rsp7", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp8_reg[] = { + { "rsp8", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp9_reg[] = { + { "rsp9", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp10_reg[] = { + { "rsp10", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp11_reg[] = { + { "rsp11", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp12_reg[] = { + { "rsp12", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp13_reg[] = { + { "rsp13", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp14_reg[] = { + { "rsp14", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp15_reg[] = { + { "rsp15", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp16_reg[] = { + { "rsp16", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp17_reg[] = { + { "rsp17", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp18_reg[] = { + { "rsp18", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp19_reg[] = { + { "rsp19", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp20_reg[] = { + { "rsp20", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp21_reg[] = { + { "rsp21", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp22_reg[] = { + { "rsp22", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp23_reg[] = { + { "rsp23", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp24_reg[] = { + { "rsp24", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp25_reg[] = { + { "rsp25", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp26_reg[] = { + { "rsp26", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp27_reg[] = { + { "rsp27", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp28_reg[] = { + { "rsp28", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp29_reg[] = { + { "rsp29", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp30_reg[] = { + { "rsp30", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_rsp31_reg[] = { + { "rsp31", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_key0_reg[] = { + { "key0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_key1_reg[] = { + { "key1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_key2_reg[] = { + { "key2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_key3_reg[] = { + { "key3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_key4_reg[] = { + { "key4", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_key5_reg[] = { + { "key5", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_key6_reg[] = { + { "key6", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_key7_reg[] = { + { "key7", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_key8_reg[] = { + { "key8", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_key9_reg[] = { + { "key9", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_key10_reg[] = { + { "key10", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_key11_reg[] = { + { "key11", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_key12_reg[] = { + { "key12", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_key13_reg[] = { + { "key13", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_key14_reg[] = { + { "key14", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_key15_reg[] = { + { "key15", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_key16_reg[] = { + { "key16", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_key17_reg[] = { + { "key17", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_key18_reg[] = { + { "key18", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_key19_reg[] = { + { "key19", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_ppu_flag_reg[] = { + { "me_num", DPP_FIELD_FLAG_RO, 26, 3, 0x0, 0x0 }, + { "thread_num", DPP_FIELD_FLAG_RO, 19, 4, 0x0, 0x0 }, + { "flag", DPP_FIELD_FLAG_RO, 11, 12, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_int_1200m_flag_reg[] = { + { "me7_interrupt_flag", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "me6_interrupt_flag", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "me5_interrupt_flag", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "me4_interrupt_flag", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "me3_interrupt_flag", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "me2_interrupt_flag", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "me1_interrupt_flag", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "me0_interrupt_flag", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_bp_instr_l_reg[] = { + { "bp_instr_l", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_bp_instr_h_reg[] = { + { "bp_instr_h", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_bp_addr_reg[] = { + { "bp_addr", DPP_FIELD_FLAG_RO, 14, 15, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_drr_reg[] = { + { "drr", DPP_FIELD_FLAG_RO, 4, 5, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_dsr_reg[] = { + { "dsr", DPP_FIELD_FLAG_RW, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_dbg_rtl_date_reg[] = { + { "dbg_rtl_date", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_continue_reg[] = { + { "me_continue", DPP_FIELD_FLAG_WO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_step_reg[] = { + { "me_step", DPP_FIELD_FLAG_WO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_refresh_reg[] = { + { "me_refresh", DPP_FIELD_FLAG_WO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_drr_clr_reg[] = { + { "drr_clr", DPP_FIELD_FLAG_WO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_busy_thresold_reg[] = { + { "me_busy_thresold", DPP_FIELD_FLAG_RW, 15, 16, 0x7fff, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_int_1200m_sta_reg[] = { + { "me7_interrupt_sta", DPP_FIELD_FLAG_RO, 7, 1, 0x0, 0x0 }, + { "me6_interrupt_sta", DPP_FIELD_FLAG_RO, 6, 1, 0x0, 0x0 }, + { "me5_interrupt_sta", DPP_FIELD_FLAG_RO, 5, 1, 0x0, 0x0 }, + { "me4_interrupt_sta", DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "me3_interrupt_sta", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "me2_interrupt_sta", DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "me1_interrupt_sta", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "me0_interrupt_sta", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_int_1200m_me_fifo_mask_l_reg[] = { + { "me_free_pkt_q_overflow_mask", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "me_free_pkt_q_underflow_mask", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "me_free_thread_q_overflow_mask", DPP_FIELD_FLAG_RW, 29, 1, 0x1, + 0x0 }, + { "me_free_thread_q_underflow_mask", DPP_FIELD_FLAG_RW, 28, 1, 0x1, + 0x0 }, + { "me_pkt_in_overflow_mask", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "me_pkt_in_underflow_mask", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "me_rdy_q_overflow_mask", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "me_rdy_q_underflow_mask", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "me_pkt_out_q_overflow_mask", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "me_pkt_out_q_underflow_mask", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "me_continue_q_overflow_mask", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "me_continue_q_underflow_mask", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "me_esrh_q_overflow_mask", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "me_esrh_q_underflow_mask", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "me_isrh_q_overflow_mask", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "me_isrh_q_underflow_mask", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "me_cache_miss_q_overflow_mask", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "me_cache_miss_q_underflow_mask", DPP_FIELD_FLAG_RW, 14, 1, 0x1, + 0x0 }, + { "me_base_q_u0_overflow_mask", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "me_base_q_u0_underflow_mask", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "me_base_q_u1_overflow_mask", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "me_base_q_u1_underflow_mask", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "me_base_q_u2_overflow_mask", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "me_base_q_u2_underflow_mask", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "me_base_q_u3_overflow_mask", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "me_base_q_u3_underflow_mask", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "me_reg_pc_q_overflow_mask", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "me_reg_pc_q_underflow_mask", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "me_branch_q_overflow_mask", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "me_branch_q_underflow_mask", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "me_pkt_base_q_overflow_mask", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "me_pkt_base_q_underflow_mask", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_int_1200m_me_fifo_mask_h_reg[] = { + { "me_except_refetch_pc_overflow_mask", DPP_FIELD_FLAG_RW, 1, 1, 0x1, + 0x0 }, + { "me_except_refetch_pc_underflow_mask", DPP_FIELD_FLAG_RW, 0, 1, 0x1, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_fifo_interrupt_flag_l_reg[] = { + { "me_free_pkt_q_overflow_flag", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "me_free_pkt_q_underflow_flag", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "me_free_thread_q_overflow_flag", DPP_FIELD_FLAG_RC, 29, 1, 0x0, + 0x0 }, + { "me_free_thread_q_underflow_flag", DPP_FIELD_FLAG_RC, 28, 1, 0x0, + 0x0 }, + { "me_pkt_in_overflow_flag", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "me_pkt_in_underflow_flag", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "me_rdy_q_overflow_flag", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "me_rdy_q_underflow_flag", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "me_pkt_out_q_overflow_flag", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "me_pkt_out_q_underflow_flag", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "me_continue_q_overflow_flag", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "me_continue_q_underflow_flag", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "me_esrh_q_overflow_flag", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "me_esrh_q_underflow_flag", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "me_isrh_q_overflow_flag", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "me_isrh_q_underflow_flag", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "me_cache_miss_q_overflow_flag", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "me_cache_miss_q_underflow_flag", DPP_FIELD_FLAG_RC, 14, 1, 0x0, + 0x0 }, + { "me_base_q_u0_overflow_flag", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "me_base_q_u0_underflow_flag", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "me_base_q_u1_overflow_flag", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "me_base_q_u1_underflow_flag", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "me_base_q_u2_overflow_flag", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "me_base_q_u2_underflow_flag", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "me_base_q_u3_overflow_flag", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "me_base_q_u3_underflow_flag", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "me_reg_pc_q_overflow_flag", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "me_reg_pc_q_underflow_flag", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "me_branch_q_overflow_flag", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "me_branch_q_underflow_flag", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "me_pkt_base_q_overflow_flag", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "me_pkt_base_q_underflow_flag", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_fifo_interrupt_flag_h_reg[] = { + { "me_except_refetch_pc_overflow_flag", DPP_FIELD_FLAG_RC, 1, 1, 0x0, + 0x0 }, + { "me_except_refetch_pc_underflow_flag", DPP_FIELD_FLAG_RC, 0, 1, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_fifo_interrupt_sta_l_reg[] = { + { "me_free_pkt_q_overflow_sta", DPP_FIELD_FLAG_RO, 31, 1, 0x0, 0x0 }, + { "me_free_pkt_q_underflow_sta", DPP_FIELD_FLAG_RO, 30, 1, 0x0, 0x0 }, + { "me_free_thread_q_overflow_sta", DPP_FIELD_FLAG_RO, 29, 1, 0x0, 0x0 }, + { "me_free_thread_q_underflow_sta", DPP_FIELD_FLAG_RO, 28, 1, 0x0, + 0x0 }, + { "me_pkt_in_overflow_sta", DPP_FIELD_FLAG_RO, 27, 1, 0x0, 0x0 }, + { "me_pkt_in_underflow_sta", DPP_FIELD_FLAG_RO, 26, 1, 0x0, 0x0 }, + { "me_rdy_q_overflow_sta", DPP_FIELD_FLAG_RO, 25, 1, 0x0, 0x0 }, + { "me_rdy_q_underflow_sta", DPP_FIELD_FLAG_RO, 24, 1, 0x0, 0x0 }, + { "me_pkt_out_q_overflow_sta", DPP_FIELD_FLAG_RO, 23, 1, 0x0, 0x0 }, + { "me_pkt_out_q_underflow_sta", DPP_FIELD_FLAG_RO, 22, 1, 0x0, 0x0 }, + { "me_continue_q_overflow_sta", DPP_FIELD_FLAG_RO, 21, 1, 0x0, 0x0 }, + { "me_continue_q_underflow_sta", DPP_FIELD_FLAG_RO, 20, 1, 0x0, 0x0 }, + { "me_esrh_q_overflow_sta", DPP_FIELD_FLAG_RO, 19, 1, 0x0, 0x0 }, + { "me_esrh_q_underflow_sta", DPP_FIELD_FLAG_RO, 18, 1, 0x0, 0x0 }, + { "me_isrh_q_overflow_sta", DPP_FIELD_FLAG_RO, 17, 1, 0x0, 0x0 }, + { "me_isrh_q_underflow_sta", DPP_FIELD_FLAG_RO, 16, 1, 0x0, 0x0 }, + { "me_cache_miss_q_overflow_sta", DPP_FIELD_FLAG_RO, 15, 1, 0x0, 0x0 }, + { "me_cache_miss_q_underflow_sta", DPP_FIELD_FLAG_RO, 14, 1, 0x0, 0x0 }, + { "me_base_q_u0_overflow_sta", DPP_FIELD_FLAG_RO, 13, 1, 0x0, 0x0 }, + { "me_base_q_u0_underflow_sta", DPP_FIELD_FLAG_RO, 12, 1, 0x0, 0x0 }, + { "me_base_q_u1_overflow_sta", DPP_FIELD_FLAG_RO, 11, 1, 0x0, 0x0 }, + { "me_base_q_u1_underflow_sta", DPP_FIELD_FLAG_RO, 10, 1, 0x0, 0x0 }, + { "me_base_q_u2_overflow_sta", DPP_FIELD_FLAG_RO, 9, 1, 0x0, 0x0 }, + { "me_base_q_u2_underflow_sta", DPP_FIELD_FLAG_RO, 8, 1, 0x0, 0x0 }, + { "me_base_q_u3_overflow_sta", DPP_FIELD_FLAG_RO, 7, 1, 0x0, 0x0 }, + { "me_base_q_u3_underflow_sta", DPP_FIELD_FLAG_RO, 6, 1, 0x0, 0x0 }, + { "me_reg_pc_q_overflow_sta", DPP_FIELD_FLAG_RO, 5, 1, 0x0, 0x0 }, + { "me_reg_pc_q_underflow_sta", DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "me_branch_q_overflow_sta", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "me_branch_q_underflow_sta", DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "me_pkt_base_q_overflow_sta", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "me_pkt_base_q_underflow_sta", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_fifo_interrupt_sta_h_reg[] = { + { "me_except_refetch_pc_overflow_sta", DPP_FIELD_FLAG_RO, 1, 1, 0x0, + 0x0 }, + { "me_except_refetch_pc_underflow_sta", DPP_FIELD_FLAG_RO, 0, 1, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_int_1200m_cluster_mex_fifo_mask_l_reg[] = { + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u3_overflow_mask", + DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u3_underflow_mask", + DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u4_overflow_mask", + DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u4_underflow_mask", + DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u5_overflow_mask", + DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u5_underflow_mask", + DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u6_overflow_mask", + DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u6_underflow_mask", + DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u7_overflow_mask", + DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u7_underflow_mask", + DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "ppu_ise_rsp_afifo_64x143_wrapper_u0_underflow_mask", + DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "ise_rsp_ram_free_ptr_u0_overflow_mask", DPP_FIELD_FLAG_RW, 20, 1, + 0x1, 0x0 }, + { "ise_rsp_ram_free_ptr_u0_underflow_mask", DPP_FIELD_FLAG_RW, 19, 1, + 0x1, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u0_overflow_mask", + DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u0_underflow_mask", + DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u1_overflow_mask", + DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u1_underflow_mask", + DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u2_overflow_mask", + DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u2_underflow_mask", + DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u3_overflow_mask", + DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u3_underflow_mask", + DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u4_overflow_mask", + DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u4_underflow_mask", + DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u5_overflow_mask", + DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u5_underflow_mask", + DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u6_overflow_mask", + DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u6_underflow_mask", + DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u7_overflow_mask", + DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u7_underflow_mask", + DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "ppu_sta_rsp_afifo_64x79_wrapper_underflow_mask", DPP_FIELD_FLAG_RW, + 2, 1, 0x1, 0x0 }, + { "ppu_sta_rsp_fwft_fifo_128x79_wrapper_overflow_mask", + DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "ppu_sta_rsp_fwft_fifo_128x79_wrapper_underflow_mask", + DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_int_1200m_cluster_mex_fifo_mask_h_reg[] = { + { "ppu_se_key_afifo_32x54_wrapper_overflow_mask", DPP_FIELD_FLAG_RW, 18, + 1, 0x1, 0x0 }, + { "ppu_se_key_afifo_32x665_wrapper_overflow_mask", DPP_FIELD_FLAG_RW, + 17, 1, 0x1, 0x0 }, + { "ppu_sta_key_afifo_32x110_wrapper_overflow_mask", DPP_FIELD_FLAG_RW, + 16, 1, 0x1, 0x0 }, + { "ppu_cluster_mf_in_afifo_32x2048_wrapper_underflow_mask", + DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "ppu_pbu_mcode_pf_rsp_fifo_32x13_wrapper_overflow_mask", + DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "ppu_pbu_mcode_pf_rsp_fifo_32x13_wrapper_underflow_mask", + DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "ppu_coprocess_rsp_fifo_32x77_wrapper_overflow_mask", + DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "ppu_coprocess_rsp_fifo_32x77_wrapper_underflow_mask", + DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "ppu_coprocess_rsp_fwft_fifo_128x78_wrapper_overflow_mask", + DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "ppu_coprocess_rsp_fwft_fifo_128x78_wrapper_underflow_mask", + DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "ppu_ese_rsp_afifo_64x271_wrapper_u0_underflow_mask", + DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "ese_rsp_ram_free_ptr_u0_overflow_mask", DPP_FIELD_FLAG_RW, 7, 1, 0x1, + 0x0 }, + { "ese_rsp_ram_free_ptr_u0_underflow_mask", DPP_FIELD_FLAG_RW, 6, 1, + 0x1, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u0_overflow_mask", + DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u0_underflow_mask", + DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u1_overflow_mask", + DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u1_underflow_mask", + DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u2_overflow_mask", + DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u2_underflow_mask", + DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_int_1200m_cluster_mex_fifo_flag_l_reg[] = { + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u3_overflow_flag", + DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u3_underflow_flag", + DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u4_overflow_flag", + DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u4_underflow_flag", + DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u5_overflow_flag", + DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u5_underflow_flag", + DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u6_overflow_flag", + DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u6_underflow_flag", + DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u7_overflow_flag", + DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u7_underflow_flag", + DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_afifo_64x143_wrapper_u0_underflow_flag", + DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ise_rsp_ram_free_ptr_u0_overflow_flag", DPP_FIELD_FLAG_RC, 20, 1, + 0x0, 0x0 }, + { "ise_rsp_ram_free_ptr_u0_underflow_flag", DPP_FIELD_FLAG_RC, 19, 1, + 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u0_overflow_flag", + DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u0_underflow_flag", + DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u1_overflow_flag", + DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u1_underflow_flag", + DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u2_overflow_flag", + DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u2_underflow_flag", + DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u3_overflow_flag", + DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u3_underflow_flag", + DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u4_overflow_flag", + DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u4_underflow_flag", + DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u5_overflow_flag", + DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u5_underflow_flag", + DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u6_overflow_flag", + DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u6_underflow_flag", + DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u7_overflow_flag", + DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u7_underflow_flag", + DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ppu_sta_rsp_afifo_64x79_wrapper_underflow_flag", DPP_FIELD_FLAG_RC, + 2, 1, 0x0, 0x0 }, + { "ppu_sta_rsp_fwft_fifo_128x79_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ppu_sta_rsp_fwft_fifo_128x79_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_int_1200m_cluster_mex_fifo_flag_h_reg[] = { + { "ppu_se_key_afifo_32x54_wrapper_overflow_flag", DPP_FIELD_FLAG_RC, 18, + 1, 0x0, 0x0 }, + { "ppu_se_key_afifo_32x665_wrapper_overflow_flag", DPP_FIELD_FLAG_RC, + 17, 1, 0x0, 0x0 }, + { "ppu_sta_key_afifo_32x110_wrapper_overflow_flag", DPP_FIELD_FLAG_RC, + 16, 1, 0x0, 0x0 }, + { "ppu_cluster_mf_in_afifo_32x2048_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ppu_pbu_mcode_pf_rsp_fifo_32x13_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ppu_pbu_mcode_pf_rsp_fifo_32x13_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ppu_coprocess_rsp_fifo_32x77_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ppu_coprocess_rsp_fifo_32x77_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ppu_coprocess_rsp_fwft_fifo_128x78_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ppu_coprocess_rsp_fwft_fifo_128x78_wrapper_underflow_flag", + DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_afifo_64x271_wrapper_u0_underflow_flag", + DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ese_rsp_ram_free_ptr_u0_overflow_flag", DPP_FIELD_FLAG_RC, 7, 1, 0x0, + 0x0 }, + { "ese_rsp_ram_free_ptr_u0_underflow_flag", DPP_FIELD_FLAG_RC, 6, 1, + 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u0_overflow_flag", + DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u0_underflow_flag", + DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u1_overflow_flag", + DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u1_underflow_flag", + DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u2_overflow_flag", + DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u2_underflow_flag", + DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_int_1200m_cluster_mex_fifo_stat_l_reg[] = { + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u3_overflow_stat", + DPP_FIELD_FLAG_RO, 31, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u3_underflow_stat", + DPP_FIELD_FLAG_RO, 30, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u4_overflow_stat", + DPP_FIELD_FLAG_RO, 29, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u4_underflow_stat", + DPP_FIELD_FLAG_RO, 28, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u5_overflow_stat", + DPP_FIELD_FLAG_RO, 27, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u5_underflow_stat", + DPP_FIELD_FLAG_RO, 26, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u6_overflow_stat", + DPP_FIELD_FLAG_RO, 25, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u6_underflow_stat", + DPP_FIELD_FLAG_RO, 24, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u7_overflow_stat", + DPP_FIELD_FLAG_RO, 23, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u7_underflow_stat", + DPP_FIELD_FLAG_RO, 22, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_afifo_64x143_wrapper_u0_underflow_stat", + DPP_FIELD_FLAG_RO, 21, 1, 0x0, 0x0 }, + { "ise_rsp_ram_free_ptr_u0_overflow_stat", DPP_FIELD_FLAG_RO, 20, 1, + 0x0, 0x0 }, + { "ise_rsp_ram_free_ptr_u0_underflow_stat", DPP_FIELD_FLAG_RO, 19, 1, + 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u0_overflow_stat", + DPP_FIELD_FLAG_RO, 18, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u0_underflow_stat", + DPP_FIELD_FLAG_RO, 17, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u1_overflow_stat", + DPP_FIELD_FLAG_RO, 16, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u1_underflow_stat", + DPP_FIELD_FLAG_RO, 15, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u2_overflow_stat", + DPP_FIELD_FLAG_RO, 14, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u2_underflow_stat", + DPP_FIELD_FLAG_RO, 13, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u3_overflow_stat", + DPP_FIELD_FLAG_RO, 12, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u3_underflow_stat", + DPP_FIELD_FLAG_RO, 11, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u4_overflow_stat", + DPP_FIELD_FLAG_RO, 10, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u4_underflow_stat", + DPP_FIELD_FLAG_RO, 9, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u5_overflow_stat", + DPP_FIELD_FLAG_RO, 8, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u5_underflow_stat", + DPP_FIELD_FLAG_RO, 7, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u6_overflow_stat", + DPP_FIELD_FLAG_RO, 6, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u6_underflow_stat", + DPP_FIELD_FLAG_RO, 5, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u7_overflow_stat", + DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_ptr_fwft_fifo_128x7_wrapper_u7_underflow_stat", + DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "ppu_sta_rsp_afifo_64x79_wrapper_underflow_stat", DPP_FIELD_FLAG_RO, + 2, 1, 0x0, 0x0 }, + { "ppu_sta_rsp_fwft_fifo_128x79_wrapper_overflow_stat", + DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "ppu_sta_rsp_fwft_fifo_128x79_wrapper_underflow_stat", + DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_int_1200m_cluster_mex_fifo_stat_h_reg[] = { + { "ppu_se_key_afifo_32x54_wrapper_overflow_stat", DPP_FIELD_FLAG_RO, 18, + 1, 0x0, 0x0 }, + { "ppu_se_key_afifo_32x665_wrapper_overflow_stat", DPP_FIELD_FLAG_RO, + 17, 1, 0x0, 0x0 }, + { "ppu_sta_key_afifo_32x110_wrapper_overflow_stat", DPP_FIELD_FLAG_RO, + 16, 1, 0x0, 0x0 }, + { "ppu_cluster_mf_in_afifo_32x2048_wrapper_underflow_stat", + DPP_FIELD_FLAG_RO, 15, 1, 0x0, 0x0 }, + { "ppu_pbu_mcode_pf_rsp_fifo_32x13_wrapper_overflow_stat", + DPP_FIELD_FLAG_RO, 14, 1, 0x0, 0x0 }, + { "ppu_pbu_mcode_pf_rsp_fifo_32x13_wrapper_underflow_stat", + DPP_FIELD_FLAG_RO, 13, 1, 0x0, 0x0 }, + { "ppu_coprocess_rsp_fifo_32x77_wrapper_overflow_stat", + DPP_FIELD_FLAG_RO, 12, 1, 0x0, 0x0 }, + { "ppu_coprocess_rsp_fifo_32x77_wrapper_underflow_stat", + DPP_FIELD_FLAG_RO, 11, 1, 0x0, 0x0 }, + { "ppu_coprocess_rsp_fwft_fifo_128x78_wrapper_overflow_stat", + DPP_FIELD_FLAG_RO, 10, 1, 0x0, 0x0 }, + { "ppu_coprocess_rsp_fwft_fifo_128x78_wrapper_underflow_stat", + DPP_FIELD_FLAG_RO, 9, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_afifo_64x271_wrapper_u0_underflow_stat", + DPP_FIELD_FLAG_RO, 8, 1, 0x0, 0x0 }, + { "ese_rsp_ram_free_ptr_u0_overflow_stat", DPP_FIELD_FLAG_RO, 7, 1, 0x0, + 0x0 }, + { "ese_rsp_ram_free_ptr_u0_underflow_stat", DPP_FIELD_FLAG_RO, 6, 1, + 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u0_overflow_stat", + DPP_FIELD_FLAG_RO, 5, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u0_underflow_stat", + DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u1_overflow_stat", + DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u1_underflow_stat", + DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u2_overflow_stat", + DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_ptr_fwft_fifo_128x7_wrapper_u2_underflow_stat", + DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_statics_wb_exception_cfg_reg[] = { + { "csr_statics_wb_exception_code5", DPP_FIELD_FLAG_RW, 22, 3, 0x7, + 0x0 }, + { "csr_statics_wb_exception_code4", DPP_FIELD_FLAG_RW, 18, 3, 0x6, + 0x0 }, + { "csr_statics_wb_exception_code3", DPP_FIELD_FLAG_RW, 14, 3, 0x5, + 0x0 }, + { "csr_statics_wb_exception_code2", DPP_FIELD_FLAG_RW, 10, 3, 0x4, + 0x0 }, + { "csr_statics_wb_exception_code1", DPP_FIELD_FLAG_RW, 6, 3, 0x3, 0x0 }, + { "csr_statics_wb_exception_code0", DPP_FIELD_FLAG_RW, 2, 3, 0x2, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_thread_switch_en_reg[] = { + { "thread_switch_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_is_me_not_idle_reg[] = { + { "me7_is_not_idle", DPP_FIELD_FLAG_RO, 7, 1, 0x0, 0x0 }, + { "me6_is_not_idle", DPP_FIELD_FLAG_RO, 6, 1, 0x0, 0x0 }, + { "me5_is_not_idle", DPP_FIELD_FLAG_RO, 5, 1, 0x0, 0x0 }, + { "me4_is_not_idle", DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "me3_is_not_idle", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "me2_is_not_idle", DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "me1_is_not_idle", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "me0_is_not_idle", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_cluster_mf_in_afifo_prog_empty_assert_cfg_reg[] = { + { "ppu_cluster_mf_in_afifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 5, + 6, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_cluster_mf_in_afifo_prog_empty_negate_cfg_reg[] = { + { "ppu_cluster_mf_in_afifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 5, + 6, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ese_rsp_afifo_prog_empty_assert_cfg_reg[] = { + { "ese_rsp_afifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 6, 7, 0xe, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ese_rsp_afifo_prog_empty_negate_cfg_reg[] = { + { "ese_rsp_afifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 6, 7, 0xe, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ise_rsp_afifo_prog_empty_assert_cfg_reg[] = { + { "ise_rsp_afifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 6, 7, 0x20, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ise_rsp_afifo_prog_empty_negate_cfg_reg[] = { + { "ise_rsp_afifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 6, 7, 0x20, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_rsp_ptr_fwft_fifo0_prog_full_assert_cfg_reg[] = { + { "ppu_rsp_ptr_fwft_fifo0_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 7, + 8, 0x78, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_rsp_ptr_fwft_fifo0_prog_full_negate_cfg_reg[] = { + { "ppu_rsp_ptr_fwft_fifo0_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 7, + 8, 0x78, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_rsp_ptr_fwft_fifo0_prog_empty_assert_cfg_reg[] = { + { "ppu_rsp_ptr_fwft_fifo0_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 7, + 8, 0x6, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_rsp_ptr_fwft_fifo0_prog_empty_negate_cfg_reg[] = { + { "ppu_rsp_ptr_fwft_fifo0_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 7, + 8, 0x6, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_rsp_ptr_fwft_fifo1_prog_full_assert_cfg_reg[] = { + { "ppu_rsp_ptr_fwft_fifo1_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 7, + 8, 0x78, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_rsp_ptr_fwft_fifo1_prog_full_negate_cfg_reg[] = { + { "ppu_rsp_ptr_fwft_fifo1_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 7, + 8, 0x78, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_rsp_ptr_fwft_fifo1_prog_empty_assert_cfg_reg[] = { + { "ppu_rsp_ptr_fwft_fifo1_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 7, + 8, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_rsp_ptr_fwft_fifo1_prog_empty_negate_cfg_reg[] = { + { "ppu_rsp_ptr_fwft_fifo1_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 7, + 8, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_sta_rsp_afifo_prog_empty_assert_cfg_reg[] = { + { "sta_rsp_afifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 6, 7, 0x4, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_sta_rsp_afifo_prog_empty_negate_cfg_reg[] = { + { "sta_rsp_afifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 6, 7, 0x4, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_sta_rsp_fwft_fifo_prog_full_assert_cfg_reg[] = { + { "ppu_sta_rsp_fwft_fifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 7, 8, + 0x78, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_sta_rsp_fwft_fifo_prog_full_negate_cfg_reg[] = { + { "ppu_sta_rsp_fwft_fifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 7, 8, + 0x78, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_sta_rsp_fwft_fifo_prog_empty_assert_cfg_reg[] = { + { "ppu_sta_rsp_fwft_fifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 7, + 8, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_sta_rsp_fwft_fifo_prog_empty_negate_cfg_reg[] = { + { "ppu_sta_rsp_fwft_fifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 7, + 8, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_cop_rsp_fifo_prog_full_assert_cfg_reg[] = { + { "cop_rsp_fifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0x17, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_cop_rsp_fifo_prog_full_negate_cfg_reg[] = { + { "cop_rsp_fifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0x17, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_cop_rsp_fifo_prog_empty_assert_cfg_reg[] = { + { "cop_rsp_fifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0x4, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_cop_rsp_fifo_prog_empty_negate_cfg_reg[] = { + { "cop_rsp_fifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 5, 6, 0x4, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_mcode_pf_rsp_fifo_prog_full_assert_cfg_reg[] = { + { "mcode_pf_rsp_fifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 5, 6, + 0x18, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_mcode_pf_rsp_fifo_prog_full_negate_cfg_reg[] = { + { "mcode_pf_rsp_fifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 5, 6, + 0x18, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_mcode_pf_rsp_fifo_prog_empty_assert_cfg_reg[] = { + { "mcode_pf_rsp_fifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 5, 6, + 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_mcode_pf_rsp_fifo_prog_empty_negate_cfg_reg[] = { + { "mcode_pf_rsp_fifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 5, 6, + 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_cop_rsp_fwft_fifo_prog_full_assert_cfg_reg[] = { + { "ppu_cop_rsp_fwft_fifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 7, 8, + 0x78, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_cop_rsp_fwft_fifo_prog_full_negate_cfg_reg[] = { + { "ppu_cop_rsp_fwft_fifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 7, 8, + 0x78, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_cop_rsp_fwft_fifo_prog_empty_assert_cfg_reg[] = { + { "ppu_cop_rsp_fwft_fifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 7, + 8, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_cop_rsp_fwft_fifo_prog_empty_negate_cfg_reg[] = { + { "ppu_cop_rsp_fwft_fifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 7, + 8, 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_ise_key_afifo_prog_full_assert_cfg_reg[] = { + { "ppu_ise_key_afifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 5, 6, + 0x14, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_ise_key_afifo_prog_full_negate_cfg_reg[] = { + { "ppu_ise_key_afifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 5, 6, + 0x14, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_ese_key_afifo_prog_full_assert_cfg_reg[] = { + { "ppu_ese_key_afifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 5, 6, + 0x14, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_ese_key_afifo_prog_full_negate_cfg_reg[] = { + { "ppu_ese_key_afifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 5, 6, + 0x14, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_sta_key_afifo_prog_full_assert_cfg_reg[] = { + { "ppu_sta_key_afifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 5, 6, + 0x14, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_sta_key_afifo_prog_full_negate_cfg_reg[] = { + { "ppu_sta_key_afifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 5, 6, + 0x14, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_int_600m_cluster_mex_fifo_mask_reg[] = { + { "ppu_se_key_afifo_32x54_wrapper_underflow_mask", DPP_FIELD_FLAG_RW, 6, + 1, 0x1, 0x0 }, + { "ppu_se_key_afifo_32x665_wrapper_underflow_mask", DPP_FIELD_FLAG_RW, + 5, 1, 0x1, 0x0 }, + { "ppu_sta_key_afifo_32x110_wrapper_underflow_mask", DPP_FIELD_FLAG_RW, + 4, 1, 0x1, 0x0 }, + { "ppu_cluster_mf_in_afifo_32x2048_wrapper_overflow_mask", + DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "ppu_ese_rsp_afifo_64x271_wrapper_u0_overflow_mask", + DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "ppu_ise_rsp_afifo_64x143_wrapper_u0_overflow_mask", + DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "ppu_sta_rsp_afifo_64x79_wrapper_overflow_mask", DPP_FIELD_FLAG_RW, 0, + 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_cluster_mex_fifo_600m_interrupt_flag_reg[] = { + { "ppu_se_key_afifo_32x54_wrapper_underflow_flag", DPP_FIELD_FLAG_RC, 6, + 1, 0x0, 0x0 }, + { "ppu_se_key_afifo_32x665_wrapper_underflow_flag", DPP_FIELD_FLAG_RC, + 5, 1, 0x0, 0x0 }, + { "ppu_sta_key_afifo_32x110_wrapper_underflow_flag", DPP_FIELD_FLAG_RC, + 4, 1, 0x0, 0x0 }, + { "ppu_cluster_mf_in_afifo_32x2048_wrapper_overflow_flag", + DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_afifo_64x271_wrapper_u0_overflow_flag", + DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_afifo_64x143_wrapper_u0_overflow_flag", + DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ppu_sta_rsp_afifo_64x79_wrapper_overflow_flag", DPP_FIELD_FLAG_RC, 0, + 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_cluster_mex_fifo_600m_interrupt_sta_reg[] = { + { "ppu_se_key_afifo_32x54_wrapper_underflow_sta", DPP_FIELD_FLAG_RO, 6, + 1, 0x0, 0x0 }, + { "ppu_se_key_afifo_32x665_wrapper_underflow_sta", DPP_FIELD_FLAG_RO, 5, + 1, 0x0, 0x0 }, + { "ppu_sta_key_afifo_32x110_wrapper_underflow_sta", DPP_FIELD_FLAG_RO, + 4, 1, 0x0, 0x0 }, + { "ppu_cluster_mf_in_afifo_32x2048_wrapper_overflow_sta", + DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "ppu_ese_rsp_afifo_64x271_wrapper_u0_overflow_sta", DPP_FIELD_FLAG_RO, + 2, 1, 0x0, 0x0 }, + { "ppu_ise_rsp_afifo_64x143_wrapper_u0_overflow_sta", DPP_FIELD_FLAG_RO, + 1, 1, 0x0, 0x0 }, + { "ppu_sta_rsp_afifo_64x79_wrapper_overflow_sta", DPP_FIELD_FLAG_RO, 0, + 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_mex_cnt_cfg_reg[] = { + { "csr_count_overflow_mode", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "csr_count_rd_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T +g_ppu_cluster_int_600m_cluster_mex_ram_ecc_error_interrupt_mask_reg[] = { + { "ppu_sta_key_ram_1r1w_32x110_ecc_double_err_mask", + DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "ppu_se_key_afifo_32x665_ecc_double_err_mask", + DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "ppu_se_key_afifo_32x54_ecc_double_err_mask", + DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "ppu_sta_key_ram_1r1w_32x110_ecc_single_err_flag", + DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "ppu_se_key_afifo_32x665_ecc_single_err_mask", + DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "ppu_se_key_afifo_32x54_ecc_single_err_mask", + DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_cluster_mex_ram_600m_ecc_error_interrupt_flag_reg[] = { + { "ppu_sta_key_ram_1r1w_32x110_ecc_double_err_flag", DPP_FIELD_FLAG_RC, + 5, 1, 0x0, 0x0 }, + { "ppu_se_key_afifo_32x665_ecc_double_err_flag", DPP_FIELD_FLAG_RC, 4, + 1, 0x0, 0x0 }, + { "ppu_se_key_afifo_32x54_ecc_double_err_flag", DPP_FIELD_FLAG_RC, 3, 1, + 0x0, 0x0 }, + { "ppu_sta_key_ram_1r1w_32x110_ecc_single_err_flag", DPP_FIELD_FLAG_RC, + 2, 1, 0x0, 0x0 }, + { "ppu_se_key_afifo_32x665_ecc_single_err_flag", DPP_FIELD_FLAG_RC, 1, + 1, 0x0, 0x0 }, + { "ppu_se_key_afifo_32x54_ecc_single_err_flag", DPP_FIELD_FLAG_RC, 0, 1, + 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_cluster_mex_ram_600m_ecc_error_interrupt_sta_reg[] = { + { "ppu_sta_key_ram_1r1w_32x110_ecc_double_err_stat", DPP_FIELD_FLAG_RO, + 5, 1, 0x0, 0x0 }, + { "ppu_se_key_afifo_32x665_ecc_double_err_stat", DPP_FIELD_FLAG_RO, 4, + 1, 0x0, 0x0 }, + { "ppu_se_key_afifo_32x54_ecc_double_err_stat", DPP_FIELD_FLAG_RO, 3, 1, + 0x0, 0x0 }, + { "ppu_sta_key_ram_1r1w_32x110_ecc_single_err_stat", DPP_FIELD_FLAG_RO, + 2, 1, 0x0, 0x0 }, + { "ppu_se_key_afifo_32x665_ecc_single_err_stat", DPP_FIELD_FLAG_RO, 1, + 1, 0x0, 0x0 }, + { "ppu_se_key_afifo_32x54_ecc_single_err_stat", DPP_FIELD_FLAG_RO, 0, 1, + 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_cluster_mf_in_afifo_prog_full_assert_cfg_reg[] = { + { "ppu_cluster_mf_in_afifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 5, + 6, 0x10, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_cluster_mf_in_afifo_prog_full_negate_cfg_reg[] = { + { "ppu_cluster_mf_in_afifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 5, + 6, 0x10, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ese_rsp_afifo_prog_full_assert_cfg_reg[] = { + { "ese_rsp_afifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 6, 7, 0x2c, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ese_rsp_afifo_prog_full_negate_cfg_reg[] = { + { "ese_rsp_afifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 6, 7, 0x2c, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ise_rsp_afifo_prog_full_assert_cfg_reg[] = { + { "ise_rsp_afifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 6, 7, 0x29, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ise_rsp_afifo_prog_full_negate_cfg_reg[] = { + { "ise_rsp_afifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 6, 7, 0x29, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_sta_rsp_afifo_prog_full_assert_cfg_reg[] = { + { "sta_rsp_afifo_prog_full_assert_cfg", DPP_FIELD_FLAG_RW, 6, 7, 0x27, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_sta_rsp_afifo_prog_full_negate_cfg_reg[] = { + { "sta_rsp_afifo_prog_full_negate_cfg", DPP_FIELD_FLAG_RW, 6, 7, 0x27, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_ise_key_afifo_prog_empty_assert_cfg_reg[] = { + { "ppu_ise_key_afifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 5, 6, + 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_ise_key_afifo_prog_empty_negate_cfg_reg[] = { + { "ppu_ise_key_afifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 5, 6, + 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_ese_key_afifo_prog_empty_assert_cfg_reg[] = { + { "ppu_ese_key_afifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 5, 6, + 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_ese_key_afifo_prog_empty_negate_cfg_reg[] = { + { "ppu_ese_key_afifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 5, 6, + 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_sta_key_afifo_prog_empty_assert_cfg_reg[] = { + { "ppu_sta_key_afifo_prog_empty_assert_cfg", DPP_FIELD_FLAG_RW, 5, 6, + 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_sta_key_afifo_prog_empty_negate_cfg_reg[] = { + { "ppu_sta_key_afifo_prog_empty_negate_cfg", DPP_FIELD_FLAG_RW, 5, 6, + 0x4, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_cluster_mf_vld_cnt_h_reg[] = { + { "ppu_cluster_mf_vld_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ppu_cluster_mf_vld_cnt_l_reg[] = { + { "ppu_cluster_mf_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_cluster_ise_key_out_vld_cnt_reg[] = { + { "cluster_ise_key_out_vld_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ise_cluster_rsp_in_vld_cnt_reg[] = { + { "ise_cluster_rsp_in_vld_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_cluster_ese_key_out_vld_cnt_reg[] = { + { "cluster_ese_key_out_vld_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ese_cluster_rsp_in_vld_cnt_reg[] = { + { "ese_cluster_rsp_in_vld_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_cluster_stat_cmd_vld_cnt_reg[] = { + { "cluster_stat_cmd_vld_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_stat_cluster_rsp_vld_cnt_reg[] = { + { "stat_cluster_rsp_vld_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_mex_debug_key_vld_cnt_reg[] = { + { "mex_debug_key_vld_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ise_cluster_key_fc_cnt_reg[] = { + { "ise_cluster_key_fc_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_ese_cluster_key_fc_cnt_reg[] = { + { "ese_cluster_key_fc_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_cluster_ise_rsp_fc_cnt_reg[] = { + { "cluster_ise_rsp_fc_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_cluster_ese_rsp_fc_cnt_reg[] = { + { "cluster_ese_rsp_fc_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_stat_cluster_cmd_fc_cnt_reg[] = { + { "stat_cluster_cmd_fc_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_cluster_stat_rsp_fc_cnt_reg[] = { + { "cluster_stat_rsp_fc_cnt", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_cluster_ppu_mf_vld_cnt_l_reg[] = { + { "cluster_ppu_mf_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_cluster_ppu_mf_vld_cnt_h_reg[] = { + { "cluster_ppu_mf_vld_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_cluster_cop_key_vld_cnt_l_reg[] = { + { "cluster_cop_key_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_cluster_cop_key_vld_cnt_h_reg[] = { + { "cluster_cop_key_vld_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_cop_cluster_rsp_vld_cnt_l_reg[] = { + { "cop_cluster_rsp_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_cop_cluster_rsp_vld_cnt_h_reg[] = { + { "cop_cluster_rsp_vld_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_mex_me_pkt_in_sop_cnt_l_reg[] = { + { "mex_me_pkt_in_sop_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_mex_me_pkt_in_sop_cnt_h_reg[] = { + { "mex_me_pkt_in_sop_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_mex_me_pkt_in_eop_cnt_l_reg[] = { + { "mex_me_pkt_in_eop_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_mex_me_pkt_in_eop_cnt_h_reg[] = { + { "mex_me_pkt_in_eop_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_mex_me_pkt_in_vld_cnt_l_reg[] = { + { "mex_me_pkt_in_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_mex_me_pkt_in_vld_cnt_h_reg[] = { + { "mex_me_pkt_in_vld_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_mex_pkt_out_sop_cnt_l_reg[] = { + { "me_mex_pkt_out_sop_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_mex_pkt_out_sop_cnt_h_reg[] = { + { "me_mex_pkt_out_sop_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_mex_pkt_out_eop_cnt_l_reg[] = { + { "me_mex_pkt_out_eop_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_mex_pkt_out_eop_cnt_h_reg[] = { + { "me_mex_pkt_out_eop_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_mex_pkt_out_vld_cnt_l_reg[] = { + { "me_mex_pkt_out_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_mex_pkt_out_vld_cnt_h_reg[] = { + { "me_mex_pkt_out_vld_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_mex_i_key_out_sop_cnt_l_reg[] = { + { "me_mex_i_key_out_sop_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_mex_i_key_out_sop_cnt_h_reg[] = { + { "me_mex_i_key_out_sop_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_mex_i_key_out_eop_cnt_l_reg[] = { + { "me_mex_i_key_out_eop_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_mex_i_key_out_eop_cnt_h_reg[] = { + { "me_mex_i_key_out_eop_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_mex_i_key_out_vld_cnt_l_reg[] = { + { "me_mex_i_key_out_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_mex_i_key_out_vld_cnt_h_reg[] = { + { "me_mex_i_key_out_vld_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_mex_e_key_out_sop_cnt_l_reg[] = { + { "me_mex_e_key_out_sop_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_mex_e_key_out_sop_cnt_h_reg[] = { + { "me_mex_e_key_out_sop_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_mex_e_key_out_eop_cnt_l_reg[] = { + { "me_mex_e_key_out_eop_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_mex_e_key_out_eop_cnt_h_reg[] = { + { "me_mex_e_key_out_eop_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_mex_e_key_out_vld_cnt_l_reg[] = { + { "me_mex_e_key_out_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_mex_e_key_out_vld_cnt_h_reg[] = { + { "me_mex_e_key_out_vld_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_mex_demux_ise_key_vld_cnt_l_reg[] = { + { "me_mex_demux_ise_key_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_mex_demux_ise_key_vld_cnt_h_reg[] = { + { "me_mex_demux_ise_key_vld_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_mex_demux_ese_key_vld_cnt_l_reg[] = { + { "me_mex_demux_ese_key_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_mex_demux_ese_key_vld_cnt_h_reg[] = { + { "me_mex_demux_ese_key_vld_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_mex_demux_sta_key_vld_cnt_l_reg[] = { + { "me_mex_demux_sta_key_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_mex_demux_sta_key_vld_cnt_h_reg[] = { + { "me_mex_demux_sta_key_vld_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_mex_demux_cop_key_vld_cnt_l_reg[] = { + { "me_mex_demux_cop_key_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_mex_demux_cop_key_vld_cnt_h_reg[] = { + { "me_mex_demux_cop_key_vld_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_mex_me_demux_ise_rsp_vld_cnt_l_reg[] = { + { "mex_me_demux_ise_rsp_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_mex_me_demux_ise_rsp_vld_cnt_h_reg[] = { + { "mex_me_demux_ise_rsp_vld_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_mex_me_demux_ese_rsp_vld_cnt_l_reg[] = { + { "mex_me_demux_ese_rsp_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_mex_me_demux_ese_rsp_vld_cnt_h_reg[] = { + { "mex_me_demux_ese_rsp_vld_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_mex_me_demux_sta_rsp_vld_cnt_l_reg[] = { + { "mex_me_demux_sta_rsp_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_mex_me_demux_sta_rsp_vld_cnt_h_reg[] = { + { "mex_me_demux_sta_rsp_vld_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_mex_me_demux_cop_rsp_vld_cnt_l_reg[] = { + { "mex_me_demux_cop_rsp_vld_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_mex_me_demux_cop_rsp_vld_cnt_h_reg[] = { + { "mex_me_demux_cop_rsp_vld_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_exception_code0_cnt_l_reg[] = { + { "me_exception_code0_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_exception_code0_cnt_h_reg[] = { + { "me_exception_code0_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_exception_code1_cnt_l_reg[] = { + { "me_exception_code1_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_exception_code1_cnt_h_reg[] = { + { "me_exception_code1_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_exception_code2_cnt_l_reg[] = { + { "me_exception_code2_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_exception_code2_cnt_h_reg[] = { + { "me_exception_code2_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_exception_code3_cnt_l_reg[] = { + { "me_exception_code3_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_exception_code3_cnt_h_reg[] = { + { "me_exception_code3_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_exception_code4_cnt_l_reg[] = { + { "me_exception_code4_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_exception_code4_cnt_h_reg[] = { + { "me_exception_code4_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_exception_code5_cnt_l_reg[] = { + { "me_exception_code5_cnt_l", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ppu_cluster_me_exception_code5_cnt_h_reg[] = { + { "me_exception_code5_cnt_h", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_ppu_soft_rst_reg[] = { + { "ppu_soft_rst", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_ept_flag_reg[] = { + { "ept_flag", DPP_FIELD_FLAG_RO, 5, 6, 0x3f, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_ddr_key_lk0_3_reg[] = { + { "ddr_key_lk0_3", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_ddr_key_lk0_2_reg[] = { + { "ddr_key_lk0_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_ddr_key_lk0_1_reg[] = { + { "ddr_key_lk0_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_ddr_key_lk0_0_reg[] = { + { "ddr_key_lk0_0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_ddr_key_lk1_3_reg[] = { + { "ddr_key_lk1_3", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_ddr_key_lk1_2_reg[] = { + { "ddr_key_lk1_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_ddr_key_lk1_1_reg[] = { + { "ddr_key_lk1_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_ddr_key_lk1_0_reg[] = { + { "ddr_key_lk1_0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk0_18_reg[] = { + { "hash_key_lk0_18", DPP_FIELD_FLAG_RO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk0_17_reg[] = { + { "hash_key_lk0_17", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk0_16_reg[] = { + { "hash_key_lk0_16", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk0_15_reg[] = { + { "hash_key_lk0_15", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk0_14_reg[] = { + { "hash_key_lk0_14", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk0_13_reg[] = { + { "hash_key_lk0_13", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk0_12_reg[] = { + { "hash_key_lk0_12", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk0_11_reg[] = { + { "hash_key_lk0_11", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk0_10_reg[] = { + { "hash_key_lk0_10", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk0_9_reg[] = { + { "hash_key_lk0_9", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk0_8_reg[] = { + { "hash_key_lk0_8", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk0_7_reg[] = { + { "hash_key_lk0_7", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk0_6_reg[] = { + { "hash_key_lk0_6", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk0_5_reg[] = { + { "hash_key_lk0_5", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk0_4_reg[] = { + { "hash_key_lk0_4", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk0_3_reg[] = { + { "hash_key_lk0_3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk0_2_reg[] = { + { "hash_key_lk0_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk0_1_reg[] = { + { "hash_key_lk0_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk0_0_reg[] = { + { "hash_key_lk0_0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk1_18_reg[] = { + { "hash_key_lk1_18", DPP_FIELD_FLAG_RO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk1_17_reg[] = { + { "hash_key_lk1_17", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk1_16_reg[] = { + { "hash_key_lk1_16", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk1_15_reg[] = { + { "hash_key_lk1_15", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk1_14_reg[] = { + { "hash_key_lk1_14", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk1_13_reg[] = { + { "hash_key_lk1_13", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk1_12_reg[] = { + { "hash_key_lk1_12", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk1_11_reg[] = { + { "hash_key_lk1_11", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk1_10_reg[] = { + { "hash_key_lk1_10", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk1_9_reg[] = { + { "hash_key_lk1_9", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk1_8_reg[] = { + { "hash_key_lk1_8", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk1_7_reg[] = { + { "hash_key_lk1_7", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk1_6_reg[] = { + { "hash_key_lk1_6", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk1_5_reg[] = { + { "hash_key_lk1_5", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk1_4_reg[] = { + { "hash_key_lk1_4", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk1_3_reg[] = { + { "hash_key_lk1_3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk1_2_reg[] = { + { "hash_key_lk1_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk1_1_reg[] = { + { "hash_key_lk1_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk1_0_reg[] = { + { "hash_key_lk1_0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk2_18_reg[] = { + { "hash_key_lk2_18", DPP_FIELD_FLAG_RO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk2_17_reg[] = { + { "hash_key_lk2_17", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk2_16_reg[] = { + { "hash_key_lk2_16", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk2_15_reg[] = { + { "hash_key_lk2_15", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk2_14_reg[] = { + { "hash_key_lk2_14", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk2_13_reg[] = { + { "hash_key_lk2_13", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk2_12_reg[] = { + { "hash_key_lk2_12", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk2_11_reg[] = { + { "hash_key_lk2_11", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk2_10_reg[] = { + { "hash_key_lk2_10", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk2_9_reg[] = { + { "hash_key_lk2_9", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk2_8_reg[] = { + { "hash_key_lk2_8", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk2_7_reg[] = { + { "hash_key_lk2_7", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk2_6_reg[] = { + { "hash_key_lk2_6", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk2_5_reg[] = { + { "hash_key_lk2_5", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk2_4_reg[] = { + { "hash_key_lk2_4", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk2_3_reg[] = { + { "hash_key_lk2_3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk2_2_reg[] = { + { "hash_key_lk2_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk2_1_reg[] = { + { "hash_key_lk2_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk2_0_reg[] = { + { "hash_key_lk2_0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk3_18_reg[] = { + { "hash_key_lk3_18", DPP_FIELD_FLAG_RO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk3_17_reg[] = { + { "hash_key_lk3_17", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk3_16_reg[] = { + { "hash_key_lk3_16", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk3_15_reg[] = { + { "hash_key_lk3_15", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk3_14_reg[] = { + { "hash_key_lk3_14", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk3_13_reg[] = { + { "hash_key_lk3_13", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk3_12_reg[] = { + { "hash_key_lk3_12", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk3_11_reg[] = { + { "hash_key_lk3_11", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk3_10_reg[] = { + { "hash_key_lk3_10", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk3_9_reg[] = { + { "hash_key_lk3_9", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk3_8_reg[] = { + { "hash_key_lk3_8", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk3_7_reg[] = { + { "hash_key_lk3_7", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk3_6_reg[] = { + { "hash_key_lk3_6", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk3_5_reg[] = { + { "hash_key_lk3_5", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk3_4_reg[] = { + { "hash_key_lk3_4", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk3_3_reg[] = { + { "hash_key_lk3_3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk3_2_reg[] = { + { "hash_key_lk3_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk3_1_reg[] = { + { "hash_key_lk3_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_hash_key_lk3_0_reg[] = { + { "hash_key_lk3_0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk0_6_reg[] = { + { "lpm_key_lk0_6", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk0_5_reg[] = { + { "lpm_key_lk0_5", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk0_4_reg[] = { + { "lpm_key_lk0_4", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk0_3_reg[] = { + { "lpm_key_lk0_3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk0_2_reg[] = { + { "lpm_key_lk0_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk0_1_reg[] = { + { "lpm_key_lk0_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk0_0_reg[] = { + { "lpm_key_lk0_0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk1_6_reg[] = { + { "lpm_key_lk1_6", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk1_5_reg[] = { + { "lpm_key_lk1_5", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk1_4_reg[] = { + { "lpm_key_lk1_4", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk1_3_reg[] = { + { "lpm_key_lk1_3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk1_2_reg[] = { + { "lpm_key_lk1_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk1_1_reg[] = { + { "lpm_key_lk1_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk1_0_reg[] = { + { "lpm_key_lk1_0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk2_6_reg[] = { + { "lpm_key_lk2_6", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk2_5_reg[] = { + { "lpm_key_lk2_5", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk2_4_reg[] = { + { "lpm_key_lk2_4", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk2_3_reg[] = { + { "lpm_key_lk2_3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk2_2_reg[] = { + { "lpm_key_lk2_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk2_1_reg[] = { + { "lpm_key_lk2_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk2_0_reg[] = { + { "lpm_key_lk2_0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk3_6_reg[] = { + { "lpm_key_lk3_6", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk3_5_reg[] = { + { "lpm_key_lk3_5", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk3_4_reg[] = { + { "lpm_key_lk3_4", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk3_3_reg[] = { + { "lpm_key_lk3_3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk3_2_reg[] = { + { "lpm_key_lk3_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk3_1_reg[] = { + { "lpm_key_lk3_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_lpm_key_lk3_0_reg[] = { + { "lpm_key_lk3_0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk0_22_reg[] = { + { "etcam_key_lk0_22", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk0_21_reg[] = { + { "etcam_key_lk0_21", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk0_20_reg[] = { + { "etcam_key_lk0_20", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk0_19_reg[] = { + { "etcam_key_lk0_19", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk0_18_reg[] = { + { "etcam_key_lk0_18", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk0_17_reg[] = { + { "etcam_key_lk0_17", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk0_16_reg[] = { + { "etcam_key_lk0_16", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk0_15_reg[] = { + { "etcam_key_lk0_15", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk0_14_reg[] = { + { "etcam_key_lk0_14", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk0_13_reg[] = { + { "etcam_key_lk0_13", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk0_12_reg[] = { + { "etcam_key_lk0_12", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk0_11_reg[] = { + { "etcam_key_lk0_11", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk0_10_reg[] = { + { "etcam_key_lk0_10", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk0_9_reg[] = { + { "etcam_key_lk0_9", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk0_8_reg[] = { + { "etcam_key_lk0_8", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk0_7_reg[] = { + { "etcam_key_lk0_7", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk0_6_reg[] = { + { "etcam_key_lk0_6", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk0_5_reg[] = { + { "etcam_key_lk0_5", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk0_4_reg[] = { + { "etcam_key_lk0_4", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk0_3_reg[] = { + { "etcam_key_lk0_3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk0_2_reg[] = { + { "etcam_key_lk0_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk0_1_reg[] = { + { "etcam_key_lk0_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk0_0_reg[] = { + { "etcam_key_lk0_0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk1_22_reg[] = { + { "etcam_key_lk1_22", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk1_21_reg[] = { + { "etcam_key_lk1_21", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk1_20_reg[] = { + { "etcam_key_lk1_20", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk1_19_reg[] = { + { "etcam_key_lk1_19", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk1_18_reg[] = { + { "etcam_key_lk1_18", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk1_17_reg[] = { + { "etcam_key_lk1_17", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk1_16_reg[] = { + { "etcam_key_lk1_16", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk1_15_reg[] = { + { "etcam_key_lk1_15", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk1_14_reg[] = { + { "etcam_key_lk1_14", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk1_13_reg[] = { + { "etcam_key_lk1_13", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk1_12_reg[] = { + { "etcam_key_lk1_12", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk1_11_reg[] = { + { "etcam_key_lk1_11", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk1_10_reg[] = { + { "etcam_key_lk1_10", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk1_9_reg[] = { + { "etcam_key_lk1_9", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk1_8_reg[] = { + { "etcam_key_lk1_8", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk1_7_reg[] = { + { "etcam_key_lk1_7", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk1_6_reg[] = { + { "etcam_key_lk1_6", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk1_5_reg[] = { + { "etcam_key_lk1_5", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk1_4_reg[] = { + { "etcam_key_lk1_4", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk1_3_reg[] = { + { "etcam_key_lk1_3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk1_2_reg[] = { + { "etcam_key_lk1_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk1_1_reg[] = { + { "etcam_key_lk1_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk1_0_reg[] = { + { "etcam_key_lk1_0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk2_22_reg[] = { + { "etcam_key_lk2_22", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk2_21_reg[] = { + { "etcam_key_lk2_21", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk2_20_reg[] = { + { "etcam_key_lk2_20", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk2_19_reg[] = { + { "etcam_key_lk2_19", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk2_18_reg[] = { + { "etcam_key_lk2_18", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk2_17_reg[] = { + { "etcam_key_lk2_17", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk2_16_reg[] = { + { "etcam_key_lk2_16", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk2_15_reg[] = { + { "etcam_key_lk2_15", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk2_14_reg[] = { + { "etcam_key_lk2_14", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk2_13_reg[] = { + { "etcam_key_lk2_13", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk2_12_reg[] = { + { "etcam_key_lk2_12", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk2_11_reg[] = { + { "etcam_key_lk2_11", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk2_10_reg[] = { + { "etcam_key_lk2_10", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk2_9_reg[] = { + { "etcam_key_lk2_9", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk2_8_reg[] = { + { "etcam_key_lk2_8", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk2_7_reg[] = { + { "etcam_key_lk2_7", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk2_6_reg[] = { + { "etcam_key_lk2_6", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk2_5_reg[] = { + { "etcam_key_lk2_5", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk2_4_reg[] = { + { "etcam_key_lk2_4", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk2_3_reg[] = { + { "etcam_key_lk2_3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk2_2_reg[] = { + { "etcam_key_lk2_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk2_1_reg[] = { + { "etcam_key_lk2_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk2_0_reg[] = { + { "etcam_key_lk2_0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk3_22_reg[] = { + { "etcam_key_lk3_22", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk3_21_reg[] = { + { "etcam_key_lk3_21", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk3_20_reg[] = { + { "etcam_key_lk3_20", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk3_19_reg[] = { + { "etcam_key_lk3_19", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk3_18_reg[] = { + { "etcam_key_lk3_18", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk3_17_reg[] = { + { "etcam_key_lk3_17", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk3_16_reg[] = { + { "etcam_key_lk3_16", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk3_15_reg[] = { + { "etcam_key_lk3_15", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk3_14_reg[] = { + { "etcam_key_lk3_14", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk3_13_reg[] = { + { "etcam_key_lk3_13", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk3_12_reg[] = { + { "etcam_key_lk3_12", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk3_11_reg[] = { + { "etcam_key_lk3_11", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk3_10_reg[] = { + { "etcam_key_lk3_10", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk3_9_reg[] = { + { "etcam_key_lk3_9", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk3_8_reg[] = { + { "etcam_key_lk3_8", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk3_7_reg[] = { + { "etcam_key_lk3_7", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk3_6_reg[] = { + { "etcam_key_lk3_6", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk3_5_reg[] = { + { "etcam_key_lk3_5", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk3_4_reg[] = { + { "etcam_key_lk3_4", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk3_3_reg[] = { + { "etcam_key_lk3_3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk3_2_reg[] = { + { "etcam_key_lk3_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk3_1_reg[] = { + { "etcam_key_lk3_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_etcam_key_lk3_0_reg[] = { + { "etcam_key_lk3_0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_pbu_key_lk0_3_reg[] = { + { "pbu_key_lk0_3", DPP_FIELD_FLAG_RO, 20, 21, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_pbu_key_lk0_2_reg[] = { + { "pbu_key_lk0_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_pbu_key_lk0_1_reg[] = { + { "pbu_key_lk0_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_pbu_key_lk0_0_reg[] = { + { "pbu_key_lk0_0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_pbu_key_lk1_3_reg[] = { + { "pbu_key_lk1_3", DPP_FIELD_FLAG_RO, 20, 21, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_pbu_key_lk1_2_reg[] = { + { "pbu_key_lk1_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_pbu_key_lk1_1_reg[] = { + { "pbu_key_lk1_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_pbu_key_lk1_0_reg[] = { + { "pbu_key_lk1_0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_pbu_key_lk2_3_reg[] = { + { "pbu_key_lk2_3", DPP_FIELD_FLAG_RO, 20, 21, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_pbu_key_lk2_2_reg[] = { + { "pbu_key_lk2_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_pbu_key_lk2_1_reg[] = { + { "pbu_key_lk2_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_pbu_key_lk2_0_reg[] = { + { "pbu_key_lk2_0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_pbu_key_lk3_3_reg[] = { + { "pbu_key_lk3_3", DPP_FIELD_FLAG_RO, 20, 21, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_pbu_key_lk3_2_reg[] = { + { "pbu_key_lk3_2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_pbu_key_lk3_1_reg[] = { + { "pbu_key_lk3_1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cfg_pbu_key_lk3_0_reg[] = { + { "pbu_key_lk3_0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_schd_learn_fifo_pfull_ast_reg[] = { + { "schd_learn_fifo_pfull_ast", DPP_FIELD_FLAG_RW, 5, 6, 0x14, 0x0 }, +}; +DPP_FIELD_T g_se_alg_schd_learn_fifo_pfull_neg_reg[] = { + { "schd_learn_fifo_pfull_neg", DPP_FIELD_FLAG_RW, 5, 6, 0x10, 0x0 }, +}; +DPP_FIELD_T g_se_alg_schd_hash0_fifo_pfull_ast_reg[] = { + { "schd_hash0_fifo_pfull_ast", DPP_FIELD_FLAG_RW, 5, 6, 0x14, 0x0 }, +}; +DPP_FIELD_T g_se_alg_schd_hash0_fifo_pfull_neg_reg[] = { + { "schd_hash0_fifo_pfull_neg", DPP_FIELD_FLAG_RW, 5, 6, 0x10, 0x0 }, +}; +DPP_FIELD_T g_se_alg_schd_hash1_fifo_pfull_ast_reg[] = { + { "schd_hash1_fifo_pfull_ast", DPP_FIELD_FLAG_RW, 5, 6, 0x14, 0x0 }, +}; +DPP_FIELD_T g_se_alg_schd_hash1_fifo_pfull_neg_reg[] = { + { "schd_hash1_fifo_pfull_neg", DPP_FIELD_FLAG_RW, 5, 6, 0x10, 0x0 }, +}; +DPP_FIELD_T g_se_alg_schd_hash2_fifo_pfull_ast_reg[] = { + { "schd_hash2_fifo_pfull_ast", DPP_FIELD_FLAG_RW, 5, 6, 0x14, 0x0 }, +}; +DPP_FIELD_T g_se_alg_schd_hash2_fifo_pfull_neg_reg[] = { + { "schd_hash2_fifo_pfull_neg", DPP_FIELD_FLAG_RW, 5, 6, 0x10, 0x0 }, +}; +DPP_FIELD_T g_se_alg_schd_hash3_fifo_pfull_ast_reg[] = { + { "schd_hash3_fifo_pfull_ast", DPP_FIELD_FLAG_RW, 5, 6, 0x14, 0x0 }, +}; +DPP_FIELD_T g_se_alg_schd_hash3_fifo_pfull_neg_reg[] = { + { "schd_hash3_fifo_pfull_neg", DPP_FIELD_FLAG_RW, 5, 6, 0x10, 0x0 }, +}; +DPP_FIELD_T g_se_alg_schd_lpm_fifo_pfull_ast_reg[] = { + { "schd_lpm_fifo_pfull_ast", DPP_FIELD_FLAG_RW, 5, 6, 0x14, 0x0 }, +}; +DPP_FIELD_T g_se_alg_schd_lpm_fifo_pfull_neg_reg[] = { + { "schd_lpm_fifo_pfull_neg", DPP_FIELD_FLAG_RW, 5, 6, 0x10, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash0_key_fifo_pfull_ast_reg[] = { + { "hash0_key_fifo_pfull_ast", DPP_FIELD_FLAG_RW, 9, 10, 0x1cc, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash0_key_fifo_pfull_neg_reg[] = { + { "hash0_key_fifo_pfull_ast", DPP_FIELD_FLAG_RW, 9, 10, 0x1c8, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash0_sreq_fifo_pfull_ast_reg[] = { + { "hash0_sreq_fifo_pfull_ast", DPP_FIELD_FLAG_RW, 5, 6, 0x1c, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash0_sreq_fifo_pfull_neg_reg[] = { + { "hash0_sreq_fifo_pfull_neg", DPP_FIELD_FLAG_RW, 5, 6, 0x14, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash0_int_rsp_fifo_pfull_ast_reg[] = { + { "hash0_int_rsp_fifo_pfull_ast", DPP_FIELD_FLAG_RW, 9, 10, 0x1cc, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash0_int_rsp_fifo_pfull_neg_reg[] = { + { "hash0_int_rsp_fifo_pfull_neg", DPP_FIELD_FLAG_RW, 9, 10, 0x1c8, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash0_ext_rsp_fifo_pfull_ast_reg[] = { + { "hash0_ext_rsp_fifo_pfull_ast", DPP_FIELD_FLAG_RW, 5, 6, 0x20, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash0_ext_rsp_fifo_pfull_neg_reg[] = { + { "hash0_ext_rsp_fifo_pfull_neg", DPP_FIELD_FLAG_RW, 5, 6, 0x1e, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash1_key_fifo_pfull_ast_reg[] = { + { "hash1_key_fifo_pfull_ast", DPP_FIELD_FLAG_RW, 9, 10, 0x1cc, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash1_key_fifo_pfull_neg_reg[] = { + { "hash1_key_fifo_pfull_ast", DPP_FIELD_FLAG_RW, 9, 10, 0x1c8, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash1_sreq_fifo_pfull_ast_reg[] = { + { "hash1_sreq_fifo_pfull_ast", DPP_FIELD_FLAG_RW, 5, 6, 0x1c, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash1_sreq_fifo_pfull_neg_reg[] = { + { "hash1_sreq_fifo_pfull_neg", DPP_FIELD_FLAG_RW, 5, 6, 0x14, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash1_int_rsp_fifo_pfull_ast_reg[] = { + { "hash1_int_rsp_fifo_pfull_ast", DPP_FIELD_FLAG_RW, 9, 10, 0x1cc, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash1_int_rsp_fifo_pfull_neg_reg[] = { + { "hash1_int_rsp_fifo_pfull_neg", DPP_FIELD_FLAG_RW, 9, 10, 0x1c8, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash1_ext_rsp_fifo_pfull_ast_reg[] = { + { "hash1_ext_rsp_fifo_pfull_ast", DPP_FIELD_FLAG_RW, 5, 6, 0x20, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash1_ext_rsp_fifo_pfull_neg_reg[] = { + { "hash1_ext_rsp_fifo_pfull_neg", DPP_FIELD_FLAG_RW, 5, 6, 0x1e, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash2_key_fifo_pfull_ast_reg[] = { + { "hash2_key_fifo_pfull_ast", DPP_FIELD_FLAG_RW, 9, 10, 0x1cc, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash2_key_fifo_pfull_neg_reg[] = { + { "hash2_key_fifo_pfull_ast", DPP_FIELD_FLAG_RW, 9, 10, 0x1c8, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash2_sreq_fifo_pfull_ast_reg[] = { + { "hash2_sreq_fifo_pfull_ast", DPP_FIELD_FLAG_RW, 5, 6, 0x1c, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash2_sreq_fifo_pfull_neg_reg[] = { + { "hash2_sreq_fifo_pfull_neg", DPP_FIELD_FLAG_RW, 5, 6, 0x14, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash2_int_rsp_fifo_pfull_ast_reg[] = { + { "hash2_int_rsp_fifo_pfull_ast", DPP_FIELD_FLAG_RW, 9, 10, 0x1cc, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash2_int_rsp_fifo_pfull_neg_reg[] = { + { "hash2_int_rsp_fifo_pfull_neg", DPP_FIELD_FLAG_RW, 9, 10, 0x1c8, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash2_ext_rsp_fifo_pfull_ast_reg[] = { + { "hash2_ext_rsp_fifo_pfull_ast", DPP_FIELD_FLAG_RW, 5, 6, 0x20, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash2_ext_rsp_fifo_pfull_neg_reg[] = { + { "hash2_ext_rsp_fifo_pfull_neg", DPP_FIELD_FLAG_RW, 5, 6, 0x1e, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash3_key_fifo_pfull_ast_reg[] = { + { "hash3_key_fifo_pfull_ast", DPP_FIELD_FLAG_RW, 9, 10, 0x1cc, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash3_key_fifo_pfull_neg_reg[] = { + { "hash3_key_fifo_pfull_ast", DPP_FIELD_FLAG_RW, 9, 10, 0x1c8, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash3_sreq_fifo_pfull_ast_reg[] = { + { "hash3_sreq_fifo_pfull_ast", DPP_FIELD_FLAG_RW, 5, 6, 0x1c, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash3_sreq_fifo_pfull_neg_reg[] = { + { "hash3_sreq_fifo_pfull_neg", DPP_FIELD_FLAG_RW, 5, 6, 0x14, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash3_int_rsp_fifo_pfull_ast_reg[] = { + { "hash3_int_rsp_fifo_pfull_ast", DPP_FIELD_FLAG_RW, 9, 10, 0x1cc, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash3_int_rsp_fifo_pfull_neg_reg[] = { + { "hash3_int_rsp_fifo_pfull_neg", DPP_FIELD_FLAG_RW, 9, 10, 0x1c8, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash3_ext_rsp_fifo_pfull_ast_reg[] = { + { "hash3_ext_rsp_fifo_pfull_ast", DPP_FIELD_FLAG_RW, 5, 6, 0x20, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash3_ext_rsp_fifo_pfull_neg_reg[] = { + { "hash3_ext_rsp_fifo_pfull_neg", DPP_FIELD_FLAG_RW, 5, 6, 0x1e, 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_as_info_reg[] = { + { "lpm_as_type", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "lpm_as_en", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_ext_rsp_fifo_u0_pfull_neg_reg[] = { + { "lpm_ext_rsp_fifo_u0_pfull_neg", DPP_FIELD_FLAG_RW, 6, 7, 0x20, 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_ext_rsp_fifo_u2_pfull_ast_reg[] = { + { "lpm_ext_rsp_fifo_u2_pfull_ast", DPP_FIELD_FLAG_RW, 9, 10, 0x1e4, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_ext_rsp_fifo_u2_pfull_neg_reg[] = { + { "lpm_ext_rsp_fifo_u2_pfull_neg", DPP_FIELD_FLAG_RW, 9, 10, 0x1e0, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_ext_rsp_fifo_u3_pfull_ast_reg[] = { + { "lpm_ext_rsp_fifo_u3_pfull_ast", DPP_FIELD_FLAG_RW, 9, 10, 0x1e4, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_ext_rsp_fifo_u3_pfull_neg_reg[] = { + { "lpm_ext_rsp_fifo_u3_pfull_neg", DPP_FIELD_FLAG_RW, 9, 10, 0x1e0, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_ext_rsp_fifo_u4_pfull_ast_reg[] = { + { "lpm_ext_rsp_fifo_u4_pfull_ast", DPP_FIELD_FLAG_RW, 5, 6, 0x14, 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_ext_rsp_fifo_u4_pfull_neg_reg[] = { + { "lpm_ext_rsp_fifo_u4_pfull_neg", DPP_FIELD_FLAG_RW, 5, 6, 0x14, 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_as_rsp_fifo_u0_pfull_ast_reg[] = { + { "lpm_as_rsp_fifo_u0_pfull_ast", DPP_FIELD_FLAG_RW, 6, 7, 0x24, 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_as_rsp_fifo_u0_pfull_neg_reg[] = { + { "lpm_as_rsp_fifo_u0_pfull_neg", DPP_FIELD_FLAG_RW, 6, 7, 0x20, 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_as_rsp_fifo_u1_pfull_ast_reg[] = { + { "lpm_as_rsp_fifo_u1_pfull_ast", DPP_FIELD_FLAG_RW, 9, 10, 0x1e2, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_as_rsp_fifo_u1_pfull_neg_reg[] = { + { "lpm_as_rsp_fifo_u1_pfull_neg", DPP_FIELD_FLAG_RW, 9, 10, 0x1dc, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_v4_ddr3_base_addr_reg[] = { + { "lpm_v4_ddr3_base_addr", DPP_FIELD_FLAG_RW, 25, 26, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_v6_ddr3_base_addr_reg[] = { + { "lpm_v6_ddr3_base_addr", DPP_FIELD_FLAG_RW, 25, 26, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_debug_cnt_mode_reg[] = { + { "cnt_rd_mode", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "cnt_overflow_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash_p0_key_vld_cnt_reg[] = { + { "hash_p0_key_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash_p1_key_vld_cnt_reg[] = { + { "hash_p1_key_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash_p2_key_vld_cnt_reg[] = { + { "hash_p2_key_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash_p3_key_vld_cnt_reg[] = { + { "hash_p3_key_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_p0_key_vld_cnt_reg[] = { + { "lpm_p0_key_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash_p0_rsp_vld_cnt_reg[] = { + { "hash_p0_rsp_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash_p1_rsp_vld_cnt_reg[] = { + { "hash_p1_rsp_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash_p2_rsp_vld_cnt_reg[] = { + { "hash_p2_rsp_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash_p3_rsp_vld_cnt_reg[] = { + { "hash_p3_rsp_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_p0_rsp_vld_cnt_reg[] = { + { "lpm_p0_rsp_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash_p0_smf_cnt_reg[] = { + { "hash_p0_smf_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash_p1_smf_cnt_reg[] = { + { "hash_p1_smf_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash_p2_smf_cnt_reg[] = { + { "hash_p2_smf_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash_p3_smf_cnt_reg[] = { + { "hash_p3_smf_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_p0_smf_cnt_reg[] = { + { "lpm_p0_smf_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash_p0_spacevld_cnt_reg[] = { + { "hash_p0_spacevld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash_p1_spacevld_cnt_reg[] = { + { "hash_p1_spacevld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash_p2_spacevld_cnt_reg[] = { + { "hash_p2_spacevld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash_p3_spacevld_cnt_reg[] = { + { "hash_p3_spacevld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_smmu1_p0_req_vld_cnt_reg[] = { + { "smmu1_p0_req_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_smmu1_p1_req_vld_cnt_reg[] = { + { "smmu1_p1_req_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_smmu1_p2_req_vld_cnt_reg[] = { + { "smmu1_p2_req_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_smmu1_p3_req_vld_cnt_reg[] = { + { "smmu1_p3_req_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_smmu1_p4_req_vld_cnt_reg[] = { + { "smmu1_p4_req_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_smmu1_p5_req_vld_cnt_reg[] = { + { "smmu1_p5_req_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_smmu1_p0_rsp_vld_cnt_reg[] = { + { "smmu1_p0_rsp_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_smmu1_p1_rsp_vld_cnt_reg[] = { + { "smmu1_p1_rsp_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_smmu1_p2_rsp_vld_cnt_reg[] = { + { "smmu1_p2_rsp_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_smmu1_p3_rsp_vld_cnt_reg[] = { + { "smmu1_p3_rsp_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_smmu1_p4_rsp_vld_cnt_reg[] = { + { "smmu1_p4_rsp_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_smmu1_p5_rsp_vld_cnt_reg[] = { + { "smmu1_p5_rsp_vld_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_schd_learn_fifo_int_cnt_reg[] = { + { "schd_learn_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_schd_hash0_fifo_int_cnt_reg[] = { + { "schd_hash0_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_schd_hash1_fifo_int_cnt_reg[] = { + { "schd_hash1_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_schd_hash2_fifo_int_cnt_reg[] = { + { "schd_hash2_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_schd_hash3_fifo_int_cnt_reg[] = { + { "schd_hash3_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_schd_lpm_fifo_int_cnt_reg[] = { + { "schd_lpm_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_schd_learn_fifo_parity_err_cnt_reg[] = { + { "schd_learn_fifo_parity_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_schd_hash0_fifo_parity_err_cnt_reg[] = { + { "schd_hash0_fifo_parity_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_schd_hash1_fifo_parity_err_cnt_reg[] = { + { "schd_hash1_fifo_parity_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_schd_hash2_fifo_parity_err_cnt_reg[] = { + { "schd_hash2_fifo_parity_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_schd_hash3_fifo_parity_err_cnt_reg[] = { + { "schd_hash3_fifo_parity_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_schd_lpm_fifo_parity_err_cnt_reg[] = { + { "schd_lpm_fifo_parity_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_rd_init_cft_cnt_reg[] = { + { "rd_init_cft_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp0_zblk0_ecc_err_cnt_reg[] = { + { "zgp0_zblk0_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp0_zblk1_ecc_err_cnt_reg[] = { + { "zgp0_zblk1_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp0_zblk2_ecc_err_cnt_reg[] = { + { "zgp0_zblk2_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp0_zblk3_ecc_err_cnt_reg[] = { + { "zgp0_zblk3_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp0_zblk4_ecc_err_cnt_reg[] = { + { "zgp0_zblk4_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp0_zblk5_ecc_err_cnt_reg[] = { + { "zgp0_zblk5_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp0_zblk6_ecc_err_cnt_reg[] = { + { "zgp0_zblk6_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp0_zblk7_ecc_err_cnt_reg[] = { + { "zgp0_zblk7_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp1_zblk0_ecc_err_cnt_reg[] = { + { "zgp1_zblk0_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp1_zblk1_ecc_err_cnt_reg[] = { + { "zgp1_zblk1_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp1_zblk2_ecc_err_cnt_reg[] = { + { "zgp1_zblk2_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp1_zblk3_ecc_err_cnt_reg[] = { + { "zgp1_zblk3_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp1_zblk4_ecc_err_cnt_reg[] = { + { "zgp1_zblk4_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp1_zblk5_ecc_err_cnt_reg[] = { + { "zgp1_zblk5_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp1_zblk6_ecc_err_cnt_reg[] = { + { "zgp1_zblk6_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp1_zblk7_ecc_err_cnt_reg[] = { + { "zgp1_zblk7_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp2_zblk0_ecc_err_cnt_reg[] = { + { "zgp2_zblk0_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp2_zblk1_ecc_err_cnt_reg[] = { + { "zgp2_zblk1_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp2_zblk2_ecc_err_cnt_reg[] = { + { "zgp2_zblk2_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp2_zblk3_ecc_err_cnt_reg[] = { + { "zgp2_zblk3_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp2_zblk4_ecc_err_cnt_reg[] = { + { "zgp2_zblk4_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp2_zblk5_ecc_err_cnt_reg[] = { + { "zgp2_zblk5_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp2_zblk6_ecc_err_cnt_reg[] = { + { "zgp2_zblk6_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp2_zblk7_ecc_err_cnt_reg[] = { + { "zgp2_zblk7_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp3_zblk0_ecc_err_cnt_reg[] = { + { "zgp3_zblk0_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp3_zblk1_ecc_err_cnt_reg[] = { + { "zgp3_zblk1_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp3_zblk2_ecc_err_cnt_reg[] = { + { "zgp3_zblk2_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp3_zblk3_ecc_err_cnt_reg[] = { + { "zgp3_zblk3_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp3_zblk4_ecc_err_cnt_reg[] = { + { "zgp3_zblk4_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp3_zblk5_ecc_err_cnt_reg[] = { + { "zgp3_zblk5_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp3_zblk6_ecc_err_cnt_reg[] = { + { "zgp3_zblk6_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zgp3_zblk7_ecc_err_cnt_reg[] = { + { "zgp3_zblk7_ecc_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zcam_hash_p0_err_cnt_reg[] = { + { "zcam_hash_p0_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zcam_hash_p1_err_cnt_reg[] = { + { "zcam_hash_p1_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zcam_hash_p2_err_cnt_reg[] = { + { "zcam_hash_p2_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zcam_hash_p3_err_cnt_reg[] = { + { "zcam_hash_p3_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zcam_lpm_err_cnt_reg[] = { + { "zcam_lpm_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash0_sreq_fifo_parity_err_cnt_reg[] = { + { "hash0_sreq_fifo_parity_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash0_sreq_fifo_int_cnt_reg[] = { + { "hash0_sreq_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash0_key_fifo_int_cnt_reg[] = { + { "hash0_key_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash0_int_rsp_fifo_parity_err_cnt_reg[] = { + { "hash0_int_rsp_fifo_parity_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash0_ext_rsp_fifo_parity_err_cnt_reg[] = { + { "hash0_ext_rsp_fifo_parity_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash0_ext_rsp_fifo_int_cnt_reg[] = { + { "hash0_ext_rsp_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash0_int_rsp_fifo_int_cnt_reg[] = { + { "hash0_int_rsp_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash1_sreq_fifo_parity_err_cnt_reg[] = { + { "hash1_sreq_fifo_parity_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash1_sreq_fifo_int_cnt_reg[] = { + { "hash1_sreq_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash1_key_fifo_int_cnt_reg[] = { + { "hash1_key_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash1_int_rsp_fifo_parity_err_cnt_reg[] = { + { "hash1_int_rsp_fifo_parity_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash1_ext_rsp_fifo_parity_err_cnt_reg[] = { + { "hash1_ext_rsp_fifo_parity_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash1_ext_rsp_fifo_int_cnt_reg[] = { + { "hash1_ext_rsp_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash1_int_rsp_fifo_int_cnt_reg[] = { + { "hash1_int_rsp_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash2_sreq_fifo_parity_err_cnt_reg[] = { + { "hash2_sreq_fifo_parity_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash2_sreq_fifo_int_cnt_reg[] = { + { "hash2_sreq_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash2_key_fifo_int_cnt_reg[] = { + { "hash2_key_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash2_int_rsp_fifo_parity_err_cnt_reg[] = { + { "hash2_int_rsp_fifo_parity_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash2_ext_rsp_fifo_parity_err_cnt_reg[] = { + { "hash2_ext_rsp_fifo_parity_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash2_ext_rsp_fifo_int_cnt_reg[] = { + { "hash2_ext_rsp_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash2_int_rsp_fifo_int_cnt_reg[] = { + { "hash2_int_rsp_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash3_sreq_fifo_parity_err_cnt_reg[] = { + { "hash3_sreq_fifo_parity_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash3_sreq_fifo_int_cnt_reg[] = { + { "hash3_sreq_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash3_key_fifo_int_cnt_reg[] = { + { "hash3_key_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash3_int_rsp_fifo_parity_err_cnt_reg[] = { + { "hash3_int_rsp_fifo_parity_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash3_ext_rsp_fifo_parity_err_cnt_reg[] = { + { "hash3_ext_rsp_fifo_parity_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash3_ext_rsp_fifo_int_cnt_reg[] = { + { "hash3_ext_rsp_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash3_int_rsp_fifo_int_cnt_reg[] = { + { "hash3_int_rsp_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_ext_rsp_fifo_int_cnt_reg[] = { + { "lpm_ext_rsp_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_ext_v6_fifo_int_cnt_reg[] = { + { "lpm_ext_v6_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_ext_v4_fifo_int_cnt_reg[] = { + { "lpm_ext_v4_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_ext_addr_fifo_int_cnt_reg[] = { + { "lpm_ext_addr_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_ext_v4_fifo_parity_err_cnt_reg[] = { + { "lpm_ext_v4_fifo_parity_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_ext_v6_fifo_parity_err_cnt_reg[] = { + { "lpm_ext_v6_fifo_parity_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_ext_rsp_fifo_parity_err_cnt_reg[] = { + { "lpm_ext_rsp_fifo_parity_err_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_as_req_fifo_int_cnt_reg[] = { + { "lpm_as_req_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_as_int_rsp_fifo_int_cnt_reg[] = { + { "lpm_as_int_rsp_fifo_int_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_se_alg_int_status_reg[] = { + { "schd_int_unmask_flag", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "zblk_ecc_int_unmask_flag", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "hash0_int_unmask_flag", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "hash1_int_unmask_flag", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "hash2_int_unmask_flag", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "hash3_int_unmask_flag", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "lpm_int_unmask_flag", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_schd_int_en_reg[] = { + { "wr_rsp_fifo_ovfl", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "init_rd_cft_en", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "schd_lpm_fifo_parity_errl", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "schd_hash3_fifo_parity_err", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "schd_hash2_fifo_parity_err", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "schd_hash1_fifo_parity_err", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "schd_hash0_fifo_parity_err", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "schd_learn_fifo_parity_err", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "schd_lpm_fifo_ovfl", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "schd_hash3_fifo_ovfl", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "schd_hash2_fifo_unfl", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "schd_hash1_fifo_ovfl", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "schd_hash0_fifo_ovfl", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "schd_learn_fifo_ovfl", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_alg_schd_int_mask_reg[] = { + { "schd_int_mask", DPP_FIELD_FLAG_RW, 13, 14, 0x3fff, 0x0 }, +}; +DPP_FIELD_T g_se_alg_schd_int_status_reg[] = { + { "schd_int_status", DPP_FIELD_FLAG_RC, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zblk_ecc_int_en_reg[] = { + { "zblk_ecc_int_en", DPP_FIELD_FLAG_RW, 31, 32, 0xffffff, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zblk_ecc_int_mask_reg[] = { + { "zblk_ecc_int_mask", DPP_FIELD_FLAG_RW, 31, 32, 0xffffff, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zblk_ecc_int_status_reg[] = { + { "zblk_ecc_int_status", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash0_int_en_reg[] = { + { "zcam_hash_p0_err_en", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "hash0_agree_int_fifo_ovf_en", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "hash0_agree_ext_fifo_ovf_en", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "hash0_agree_ext_fifo_parity_err_en", DPP_FIELD_FLAG_RW, 4, 1, 0x1, + 0x0 }, + { "hash0_agree_int_fifo_parity_err_en", DPP_FIELD_FLAG_RW, 3, 1, 0x1, + 0x0 }, + { "hash0_key_fifo_ovfl_en", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "hash0_sreq_fifo_ovfl_en", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "hash0_key_fifo_parity_err_en", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash0_int_mask_reg[] = { + { "hash0_int_mask", DPP_FIELD_FLAG_RW, 7, 8, 0xff, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash0_int_status_reg[] = { + { "hash0_int_status", DPP_FIELD_FLAG_RC, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash1_int_en_reg[] = { + { "zcam_hash_p1_err_en", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "hash1_agree_int_fifo_ovf_en", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "hash1_agree_ext_fifo_ovf_en", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "hash1_agree_ext_fifo_parity_err_en", DPP_FIELD_FLAG_RW, 4, 1, 0x1, + 0x0 }, + { "hash1_agree_int_fifo_parity_err_en", DPP_FIELD_FLAG_RW, 3, 1, 0x1, + 0x0 }, + { "hash1_key_fifo_ovfl_en", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "hash1_sreq_fifo_ovfl_en", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "hash1_key_fifo_parity_err_en", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash1_int_mask_reg[] = { + { "hash1_int_mask", DPP_FIELD_FLAG_RW, 7, 8, 0xff, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash1_int_status_reg[] = { + { "hash1_int_status", DPP_FIELD_FLAG_RC, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash2_int_en_reg[] = { + { "zcam_hash_p2_err_en", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "hash2_agree_int_fifo_ovf_en", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "hash2_agree_ext_fifo_ovf_en", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "hash2_agree_ext_fifo_parity_err_en", DPP_FIELD_FLAG_RW, 4, 1, 0x1, + 0x0 }, + { "hash2_agree_int_fifo_parity_err_en", DPP_FIELD_FLAG_RW, 3, 1, 0x1, + 0x0 }, + { "hash2_key_fifo_ovfl_en", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "hash2_sreq_fifo_ovfl_en", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "hash2_key_fifo_parity_err_en", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash2_int_mask_reg[] = { + { "hash2_int_mask", DPP_FIELD_FLAG_RW, 7, 8, 0xff, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash2_int_status_reg[] = { + { "hash2_int_status", DPP_FIELD_FLAG_RC, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash3_int_en_reg[] = { + { "zcam_hash_p3_err_en", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "hash3_agree_int_fifo_ovf_en", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "hash3_agree_ext_fifo_ovf_en", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "hash3_agree_ext_fifo_parity_err_en", DPP_FIELD_FLAG_RW, 4, 1, 0x1, + 0x0 }, + { "hash3_agree_int_fifo_parity_err_en", DPP_FIELD_FLAG_RW, 3, 1, 0x1, + 0x0 }, + { "hash3_key_fifo_ovfl_en", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "hash3_sreq_fifo_ovfl_en", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "hash3_key_fifo_parity_err_en", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash3_int_mask_reg[] = { + { "hash3_int_mask", DPP_FIELD_FLAG_RW, 7, 8, 0xff, 0x0 }, +}; +DPP_FIELD_T g_se_alg_hash3_int_status_reg[] = { + { "hash3_int_status", DPP_FIELD_FLAG_RC, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_int_en_reg[] = { + { "zcam_lpm_err_en", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "lpm_as_int_rsp_fifo_ovfl_en", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "lpm_as_req_fifo_ovfl_en", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "lpm_ext_ddr_rsp_fifo_parity_en", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "lpm_ext_v6_key_parity_en", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "lpm_ext_v4_key_parity_en", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "lpm_ext_addr_fifo_ovfl_en", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "lpm_ext_v4_fifo_ovfl_en", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "lpm_ext_v6_fifo_ovfl_en", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "lpm_ext_ddr_rsp_ovf_en", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_int_mask_reg[] = { + { "lpm_int_mask", DPP_FIELD_FLAG_RW, 9, 10, 0x3ff, 0x0 }, +}; +DPP_FIELD_T g_se_alg_lpm_int_status_reg[] = { + { "lpm_int_status", DPP_FIELD_FLAG_RC, 9, 10, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zblock_lpm_mask0_reg[] = { + { "vpn_id_mask", DPP_FIELD_FLAG_RW, 143, 16, 0x0, 0x0 }, + { "prefix0_mask", DPP_FIELD_FLAG_RW, 127, 32, 0x0, 0x0 }, + { "prefix1_mask", DPP_FIELD_FLAG_RW, 95, 32, 0x0, 0x0 }, + { "prefix2_mask", DPP_FIELD_FLAG_RW, 63, 32, 0x0, 0x0 }, + { "prefix3_mask", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zblock_lpm_mask1_reg[] = { + { "vpn_id_mask", DPP_FIELD_FLAG_RW, 143, 16, 0x0, 0x0 }, + { "prefix0_mask", DPP_FIELD_FLAG_RW, 127, 32, 0x0, 0x0 }, + { "prefix1_mask", DPP_FIELD_FLAG_RW, 95, 32, 0x0, 0x0 }, + { "prefix2_mask", DPP_FIELD_FLAG_RW, 63, 32, 0x0, 0x0 }, + { "prefix3_mask", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zblock_lpm_mask2_reg[] = { + { "vpn_id_mask", DPP_FIELD_FLAG_RW, 143, 16, 0x0, 0x0 }, + { "prefix0_mask", DPP_FIELD_FLAG_RW, 127, 32, 0x0, 0x0 }, + { "prefix1_mask", DPP_FIELD_FLAG_RW, 95, 32, 0x0, 0x0 }, + { "prefix2_mask", DPP_FIELD_FLAG_RW, 63, 32, 0x0, 0x0 }, + { "prefix3_mask", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zblock_lpm_mask3_reg[] = { + { "vpn_id_mask", DPP_FIELD_FLAG_RW, 143, 16, 0x0, 0x0 }, + { "prefix0_mask", DPP_FIELD_FLAG_RW, 127, 32, 0x0, 0x0 }, + { "prefix1_mask", DPP_FIELD_FLAG_RW, 95, 32, 0x0, 0x0 }, + { "prefix2_mask", DPP_FIELD_FLAG_RW, 63, 32, 0x0, 0x0 }, + { "prefix3_mask", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zblock_default_route0_reg[] = { + { "vpn_id", DPP_FIELD_FLAG_RW, 39, 16, 0x0, 0x0 }, + { "vpn_dresult", DPP_FIELD_FLAG_RW, 23, 22, 0x0, 0x0 }, + { "vpn_flag", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "vpn_vld", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zblock_default_route1_reg[] = { + { "vpn_id", DPP_FIELD_FLAG_RW, 39, 16, 0x0, 0x0 }, + { "vpn_dresult", DPP_FIELD_FLAG_RW, 23, 22, 0x0, 0x0 }, + { "vpn_flag", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "vpn_vld", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zblock_default_route2_reg[] = { + { "vpn_id", DPP_FIELD_FLAG_RW, 39, 16, 0x0, 0x0 }, + { "vpn_dresult", DPP_FIELD_FLAG_RW, 23, 22, 0x0, 0x0 }, + { "vpn_flag", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "vpn_vld", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zblock_default_route3_reg[] = { + { "vpn_id", DPP_FIELD_FLAG_RW, 39, 16, 0x0, 0x0 }, + { "vpn_dresult", DPP_FIELD_FLAG_RW, 23, 22, 0x0, 0x0 }, + { "vpn_flag", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "vpn_vld", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zblock_default_route4_reg[] = { + { "vpn_id", DPP_FIELD_FLAG_RW, 39, 16, 0x0, 0x0 }, + { "vpn_dresult", DPP_FIELD_FLAG_RW, 23, 22, 0x0, 0x0 }, + { "vpn_flag", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "vpn_vld", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zblock_default_route5_reg[] = { + { "vpn_id", DPP_FIELD_FLAG_RW, 39, 16, 0x0, 0x0 }, + { "vpn_dresult", DPP_FIELD_FLAG_RW, 23, 22, 0x0, 0x0 }, + { "vpn_flag", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "vpn_vld", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zblock_default_route6_reg[] = { + { "vpn_id", DPP_FIELD_FLAG_RW, 39, 16, 0x0, 0x0 }, + { "vpn_dresult", DPP_FIELD_FLAG_RW, 23, 22, 0x0, 0x0 }, + { "vpn_flag", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "vpn_vld", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zblock_default_route7_reg[] = { + { "vpn_id", DPP_FIELD_FLAG_RW, 39, 16, 0x0, 0x0 }, + { "vpn_dresult", DPP_FIELD_FLAG_RW, 23, 22, 0x0, 0x0 }, + { "vpn_flag", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "vpn_vld", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zblock_hash_listtable_item0_reg[] = { + { "hash_item", DPP_FIELD_FLAG_RW, 511, 512, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zblock_hash_listtable_item1_reg[] = { + { "hash_item", DPP_FIELD_FLAG_RW, 511, 512, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zblock_hash_listtable_item2_reg[] = { + { "hash_item", DPP_FIELD_FLAG_RW, 511, 512, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zblock_hash_listtable_item3_reg[] = { + { "hash_item", DPP_FIELD_FLAG_RW, 511, 512, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zblock_ecc_err_status_reg[] = { + { "sram3_ecc_err", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "sram2_ecc_err", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "sram1_ecc_err", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "sram0_ecc_err", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zblock_lpm_v6_sram_cmp_reg[] = { + { "sram_cmp_flag", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_alg_zblock_lpm_v4_sram_cmp_reg[] = { + { "sram_cmp_flag", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_parser_kschd_pful_cfg_reg[] = { + { "kschd_pful_assert", DPP_FIELD_FLAG_RW, 11, 6, 0xc, 0x0 }, + { "kschd_pful_negate", DPP_FIELD_FLAG_RW, 5, 6, 0xc, 0x0 }, +}; +DPP_FIELD_T g_se_parser_debug_cnt_mode_reg[] = { + { "cnt_rd_mode", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "cnt_overflow_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_parser_parser_int_en_reg[] = { + { "parser_int_en", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_parser_parser_int_mask_reg[] = { + { "parser_int_mask", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_parser_parser_int_status_reg[] = { + { "parser_int_status", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_parser_parser_int_unmask_flag_reg[] = { + { "parser_int_unmask_flag", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_parser_ecc_bypass_read_reg[] = { + { "ecc_bypass_read", DPP_FIELD_FLAG_RW, 5, 6, 0x3f, 0x0 }, +}; +DPP_FIELD_T g_se_parser_mex0_5_req_cnt_reg[] = { + { "mex0_5_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_parser_kschd_req0_5_cnt_reg[] = { + { "kschd_req0_5_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_parser_kschd_parser_fc0_5_cnt_reg[] = { + { "kschd_parser_fc0_5_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_parser_se_ppu_mex0_5_fc_cnt_reg[] = { + { "se_ppu_mex0_5_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_parser_smmu0_marc_fc_cnt_reg[] = { + { "smmu0_marc_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_parser_smmu0_marc_key_cnt_reg[] = { + { "smmu0_marc_key_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_parser_cmmu_key_cnt_reg[] = { + { "cmmu_key_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_parser_cmmu_parser_fc_cnt_reg[] = { + { "cmmu_parser_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_parser_marc_tab_type_err_mex0_5_cnt_reg[] = { + { "marc_tab_type_err_mex0_5_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_parser_eram_fulladdr_drop_cnt_reg[] = { + { "eram_fulladdr_drop_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_hash0_pful_cfg_reg[] = { + { "hash0_pful_cfg", DPP_FIELD_FLAG_RW, 19, 20, 0x7d9f6, 0x0 }, +}; +DPP_FIELD_T g_se_as_hash1_pful_cfg_reg[] = { + { "hash1_pful_cfg", DPP_FIELD_FLAG_RW, 19, 20, 0x7d9f6, 0x0 }, +}; +DPP_FIELD_T g_se_as_hash2_pful_cfg_reg[] = { + { "hash2_pful_cfg", DPP_FIELD_FLAG_RW, 19, 20, 0x7d9f6, 0x0 }, +}; +DPP_FIELD_T g_se_as_hash3_pful_cfg_reg[] = { + { "hash3_pful_cfg", DPP_FIELD_FLAG_RW, 19, 20, 0x7d9f6, 0x0 }, +}; +DPP_FIELD_T g_se_as_pbu_pful_cfg_reg[] = { + { "pbu_pful_cfg", DPP_FIELD_FLAG_RW, 11, 12, 0x69a, 0x0 }, +}; +DPP_FIELD_T g_se_as_lpm_pful_cfg_reg[] = { + { "lpm_pful_cfg", DPP_FIELD_FLAG_RW, 19, 20, 0x7d9f6, 0x0 }, +}; +DPP_FIELD_T g_se_as_etcam_pful_cfg_reg[] = { + { "etcam_pful_cfg", DPP_FIELD_FLAG_RW, 13, 14, 0x1c38, 0x0 }, +}; +DPP_FIELD_T g_se_as_as_learn0_fifo_cfg_reg[] = { + { "as_learn1_pful_negate", DPP_FIELD_FLAG_RW, 27, 7, 0x38, 0x0 }, + { "as_learn1_pful_asert", DPP_FIELD_FLAG_RW, 20, 7, 0x38, 0x0 }, + { "as_learn0_pful_negate", DPP_FIELD_FLAG_RW, 13, 7, 0x38, 0x0 }, + { "as_learn0_pful_asert", DPP_FIELD_FLAG_RW, 6, 7, 0x38, 0x0 }, +}; +DPP_FIELD_T g_se_as_as_learn1_fifo_cfg_reg[] = { + { "as_learn3_pful_negate", DPP_FIELD_FLAG_RW, 27, 7, 0x38, 0x0 }, + { "as_learn3_pful_asert", DPP_FIELD_FLAG_RW, 20, 7, 0x38, 0x0 }, + { "as_learn2_pful_negate", DPP_FIELD_FLAG_RW, 13, 7, 0x38, 0x0 }, + { "as_learn2_pful_asert", DPP_FIELD_FLAG_RW, 6, 7, 0x38, 0x0 }, +}; +DPP_FIELD_T g_se_as_as_dma_fifo_cfg_reg[] = { + { "as_dma_fifo_cfg", DPP_FIELD_FLAG_RW, 15, 16, 0x7272, 0x0 }, +}; +DPP_FIELD_T g_se_as_age_pful_cfg_reg[] = { + { "age_pful_cfg", DPP_FIELD_FLAG_RW, 11, 12, 0x618, 0x0 }, +}; +DPP_FIELD_T g_se_as_etcam_rsp_cfg_reg[] = { + { "eram_rsp_pful_negate", DPP_FIELD_FLAG_RW, 27, 7, 0x24, 0x0 }, + { "eram_rsp_pful_assert", DPP_FIELD_FLAG_RW, 20, 7, 0x24, 0x0 }, + { "etcam_rsp_pful_negate", DPP_FIELD_FLAG_RW, 13, 7, 0x21, 0x0 }, + { "etcam_rsp_pful_assert", DPP_FIELD_FLAG_RW, 6, 7, 0x21, 0x0 }, +}; +DPP_FIELD_T g_se_as_pbu_ecc_bypass_read_reg[] = { + { "pbu_ecc_bypass_read", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_as_etcam0_ecc_bypass_read_reg[] = { + { "etcam0_ecc_bypass_read", DPP_FIELD_FLAG_RW, 2, 3, 0x7, 0x0 }, +}; +DPP_FIELD_T g_se_as_etcam1_ecc_bypass_read_reg[] = { + { "etcam1_ecc_bypass_read", DPP_FIELD_FLAG_RW, 2, 3, 0x7, 0x0 }, +}; +DPP_FIELD_T g_se_as_lpm_ecc_bypass_read_reg[] = { + { "lpm_ecc_bypass_read", DPP_FIELD_FLAG_RW, 1, 2, 0x3, 0x0 }, +}; +DPP_FIELD_T g_se_as_hash_ecc_bypass_read_reg[] = { + { "hash3_ecc_bypass_read", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "hash2_ecc_bypass_read", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "hash1_ecc_bypass_read", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "hash0_ecc_bypass_read", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_as_hash_learn_ecc_bypass_read_reg[] = { + { "hash_learn_ecc_bypass_read", DPP_FIELD_FLAG_RW, 3, 4, 0xf, 0x0 }, +}; +DPP_FIELD_T g_se_as_debug_cnt_mode_reg[] = { + { "cnt_rd_mode", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "cnt_overflow_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_as_as_int_0_en_reg[] = { + { "as_int_0_en", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_as_as_int_0_mask_reg[] = { + { "as_int_0_mask", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_as_as_int_1_en_reg[] = { + { "as_int_1_en", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_as_as_int_1_mask_reg[] = { + { "as_int_1_mask", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_as_as_int_2_en_reg[] = { + { "as_int_2_en", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_as_as_int_2_mask_reg[] = { + { "as_int_2_mask", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_as_as_int_0_status_reg[] = { + { "port0_int_status", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_as_int_1_status_reg[] = { + { "port1_int_status", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_as_int_2_status_reg[] = { + { "port2_int_status", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_se_as_int_status_reg[] = { + { "as_int_2_unmask_flag", DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "as_int_1_unmask_flag", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "as_int_0_unmask_flag", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_hash0_3_wr_req_cnt_reg[] = { + { "hash0_3_wr_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_smmu0_etcam0_1_fc_cnt_reg[] = { + { "smmu0_etcam0_1_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_etcam0_1_smmu0_req_cnt_reg[] = { + { "etcam0_1_smmu0_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_smmu0_etcam0_1_rsp_cnt_reg[] = { + { "smmu0_etcam0_1_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_as_hla_hash_p0_3_key_cnt_reg[] = { + { "as_hla_hash_p0_3_key_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_as_hla_lpm_p0_key_cnt_reg[] = { + { "as_hla_lpm_p0_key_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_alg_as_hash_p0_3_rsp_cnt_reg[] = { + { "alg_as_hash_p0_3_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_alg_as_hash_p0_3_smf_rsp_cnt_reg[] = { + { "alg_as_hash_p0_3_smf_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_alg_as_lpm_p0_rsp_cnt_reg[] = { + { "alg_as_lpm_p0_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_alg_as_lpm_p0_3_smf_rsp_cnt_reg[] = { + { "alg_as_lpm_p0_3_smf_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_as_pbu_key_cnt_reg[] = { + { "as_pbu_key_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_pbu_se_dpi_rsp_dat_cnt_reg[] = { + { "pbu_se_dpi_rsp_dat_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_as_etcam_ctrl_req0_cnt_reg[] = { + { "as_etcam_ctrl_req0_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_etcam_ctrl_as_index0_1_cnt_reg[] = { + { "etcam_ctrl_as_index0_1_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_etcam_ctrl_as_hit0_1_cnt_reg[] = { + { "etcam_ctrl_as_hit0_1_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_as_smmu0_req_cnt_reg[] = { + { "as_smmu0_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_learn_hla_wr_cnt_reg[] = { + { "learn_hla_wr_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_as_smmu1_req_cnt_reg[] = { + { "as_smmu1_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_se_cfg_mac_dat_cnt_reg[] = { + { "se_cfg_mac_dat_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_alg_as_hash_p0_3_fc_cnt_reg[] = { + { "alg_as_hash_p0_3_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_alg_as_lpm_p0_fc_cnt_reg[] = { + { "alg_as_lpm_p0_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_as_alg_hash_p0_3_fc_cnt_reg[] = { + { "as_alg_hash_p0_3_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_as_alg_lpm_p0_fc_cnt_reg[] = { + { "as_alg_lpm_p0_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_as_pbu_fc_cnt_reg[] = { + { "as_pbu_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_pbu_se_dpi_key_fc_cnt_reg[] = { + { "pbu_se_dpi_key_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_as_etcam_ctrl_fc0_1_cnt_reg[] = { + { "as_etcam_ctrl_fc0_1_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_etcam_ctrl_as_fc0_1_cnt_reg[] = { + { "etcam_ctrl_as_fc0_1_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_smmu0_as_mac_age_fc_cnt_reg[] = { + { "smmu0_as_mac_age_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_alg_learn_fc_cnt_reg[] = { + { "alg_learn_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_smmu1_as_fc_cnt_reg[] = { + { "smmu1_as_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_as_cfg_se_mac_fc_cnt_reg[] = { + { "cfg_se_mac_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_kschd_cpu_rdy_reg[] = { + { "kschd_cpu_rdy", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_ppu0_ecc_bypass_read_reg[] = { + { "ppu0_ecc_bypass_read", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_pbu_ecc_bypass_read_reg[] = { + { "pbu_ecc_bypass_read", DPP_FIELD_FLAG_RW, 5, 6, 0x3f, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_smmu1_ecc_bypass_read_reg[] = { + { "u3_smmu1_ecc_bypass_read", DPP_FIELD_FLAG_RO, 23, 6, 0x3f, 0x0 }, + { "u2_smmu1_ecc_bypass_read", DPP_FIELD_FLAG_RO, 17, 6, 0x3f, 0x0 }, + { "u1_smmu1_ecc_bypass_read", DPP_FIELD_FLAG_RO, 11, 6, 0x3f, 0x0 }, + { "u0_smmu1_ecc_bypass_read", DPP_FIELD_FLAG_RW, 5, 6, 0x3f, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_ass_ecc_bypass_read_reg[] = { + { "ass_ecc_bypass_read", DPP_FIELD_FLAG_RW, 5, 6, 0x3f, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_sdt_h_reg[] = { + { "sdt_h", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_sdt_l_reg[] = { + { "sdt_l", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_hash_key15_reg[] = { + { "dma_en", DPP_FIELD_FLAG_RW, 25, 1, 0x0, 0x0 }, + { "delete_en", DPP_FIELD_FLAG_RW, 24, 1, 0x0, 0x0 }, + { "hash_key15", DPP_FIELD_FLAG_RW, 23, 24, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_hash_key14_reg[] = { + { "hash_key14", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_hash_key13_reg[] = { + { "hash_key13", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_hash_key12_reg[] = { + { "hash_key12", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_hash_key11_reg[] = { + { "hash_key11", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_hash_key10_reg[] = { + { "hash_key10", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_hash_key9_reg[] = { + { "hash_key9", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_hash_key8_reg[] = { + { "hash_key8", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_hash_key7_reg[] = { + { "hash_key7", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_hash_key6_reg[] = { + { "hash_key6", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_hash_key5_reg[] = { + { "hash_key5", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_hash_key4_reg[] = { + { "hash_key4", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_hash_key3_reg[] = { + { "hash_key3", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_hash_key2_reg[] = { + { "hash_key2", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_hash_key1_reg[] = { + { "hash_key1", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_hash_key0_reg[] = { + { "hash_key0", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_schd_int_0_en_reg[] = { + { "port0_int_en", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_schd_int_0_mask_reg[] = { + { "port0_int_mask", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_schd_int_1_en_reg[] = { + { "port1_int_en", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_schd_int_1_mask_reg[] = { + { "port1_int_mask", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_schd_int_2_en_reg[] = { + { "port2_int_en", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_schd_int_2_mask_reg[] = { + { "port2_int_mask", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_schd_int_3_en_reg[] = { + { "port3_int_en", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_schd_int_3_mask_reg[] = { + { "port3_int_mask", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_schd_int_4_en_reg[] = { + { "port4_int_en", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_schd_int_4_mask_reg[] = { + { "port4_int_mask", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_schd_int_0_status_reg[] = { + { "port0_int_status", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_schd_int_1_status_reg[] = { + { "port1_int_status", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_schd_int_2_status_reg[] = { + { "port2_int_status", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_schd_int_3_status_reg[] = { + { "port3_int_status", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_schd_int_4_status_reg[] = { + { "port4_int_status", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_se_kschd_int_status_reg[] = { + { "schd_int4_unmask_flag", DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "schd_int3_unmask_flag", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "schd_int2_unmask_flag", DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "schd_int1_unmask_flag", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "schd_int0_unmask_flag", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_debug_cnt_mode_reg[] = { + { "cnt_rd_mode", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "cnt_overflow_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_se_parser_kschd_key0_3_cnt_reg[] = { + { "se_parser_kschd_key0_3_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_se_smmu1_key0_3_cnt_reg[] = { + { "se_smmu1_key0_3_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_kschd_as_key0_cnt_reg[] = { + { "kschd_as_key0_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_kschd_as_key1_cnt_reg[] = { + { "kschd_as_key1_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_kschd_as_key2_cnt_reg[] = { + { "kschd_as_key2_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_kschd_as_key3_cnt_reg[] = { + { "kschd_as_key3_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_kschd_as_key4_cnt_reg[] = { + { "kschd_as_key4_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_kschd_as_key5_cnt_reg[] = { + { "kschd_as_key5_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_kschd_as_key6_cnt_reg[] = { + { "kschd_as_key6_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_kschd_as_key9_cnt_reg[] = { + { "kschd_as_key9_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_kschd_se_parser_fc0_3_cnt_reg[] = { + { "kschd_se_parser_fc0_3_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_smmu1_se_fc0_3_cnt_reg[] = { + { "smmu1_se_fc0_3_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_as_kschd_fc_cnt0_reg[] = { + { "as_kschd_fc_cnt0", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_as_kschd_fc_cnt1_reg[] = { + { "as_kschd_fc_cnt1", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_as_kschd_fc_cnt2_reg[] = { + { "as_kschd_fc_cnt2", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_as_kschd_fc_cnt3_reg[] = { + { "as_kschd_fc_cnt3", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_as_kschd_fc_cnt4_reg[] = { + { "as_kschd_fc_cnt4", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_as_kschd_fc_cnt5_reg[] = { + { "as_kschd_fc_cnt5", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_as_kschd_fc_cnt6_reg[] = { + { "as_kschd_fc_cnt6", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_kschd_as_kschd_fc_cnt9_reg[] = { + { "as_kschd_fc_cnt9", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_rschd_hash_pful_cfg_reg[] = { + { "rschd_hash_pful_cfg", DPP_FIELD_FLAG_RW, 31, 32, 0x00370037, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_rschd_hash_ept_cfg_reg[] = { + { "rschd_hash_ept_cfg", DPP_FIELD_FLAG_RW, 31, 32, 0x00090009, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_rschd_pbu_pful_cfg_reg[] = { + { "rschd_pbu_pful_cfg", DPP_FIELD_FLAG_RW, 31, 32, 0x002d002d, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_rschd_pbu_ept_cfg_reg[] = { + { "rschd_pbu_ept_cfg", DPP_FIELD_FLAG_RW, 31, 32, 0x00130013, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_rschd_lpm_pful_cfg_reg[] = { + { "rschd_lpm_pful_cfg", DPP_FIELD_FLAG_RW, 31, 32, 0x00240024, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_rschd_lpm_ept_cfg_reg[] = { + { "rschd_lpm_ept_cfg", DPP_FIELD_FLAG_RW, 31, 32, 0x001c001c, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_rschd_etcam_pful_cfg_reg[] = { + { "rschd_etcam_pful_cfg", DPP_FIELD_FLAG_RW, 31, 32, 0x00110011, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_rschd_etcam_ept_cfg_reg[] = { + { "rschd_etcam_ept_cfg", DPP_FIELD_FLAG_RW, 31, 32, 0x002f002f, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_smmu0_wb_pful_cfg_reg[] = { + { "smmu0_wb_pful_cfg", DPP_FIELD_FLAG_RW, 31, 32, 0x00330033, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_smmu0_wb_ept_cfg_reg[] = { + { "smmu0_wb_ept_cfg", DPP_FIELD_FLAG_RW, 31, 32, 0x000d000d, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_smmu1_wb_pful_cfg_reg[] = { + { "smmu1_wb_pful_cfg", DPP_FIELD_FLAG_RW, 31, 32, 0x003a003a, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_smmu1_wb_ept_cfg_reg[] = { + { "smmu1_wb_ept_cfg", DPP_FIELD_FLAG_RW, 31, 32, 0x00060006, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_alg_wb_pful_cfg_reg[] = { + { "alg_wb_pful_cfg", DPP_FIELD_FLAG_RW, 31, 32, 0x003a003a, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_alg_wb_ept_cfg_reg[] = { + { "alg_wb_ept_cfg", DPP_FIELD_FLAG_RW, 31, 32, 0x00060006, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_wr_rsp_vld_en_reg[] = { + { "wr_rsp_vld_en", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_nppu_wb_pful_cfg_reg[] = { + { "nppu_wb_pful_cfg", DPP_FIELD_FLAG_RW, 31, 32, 0x00380038, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_nppu_wb_ept_cfg_reg[] = { + { "nppu_wb_ept_cfg", DPP_FIELD_FLAG_RW, 31, 32, 0x00080008, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_port0_int_en_reg[] = { + { "port0_int_en", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_port0_int_mask_reg[] = { + { "port0_int_mask", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_port1_int_en_reg[] = { + { "port1_int_en", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_port1_int_mask_reg[] = { + { "port1_int_mask", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_port0_int_status_reg[] = { + { "port0_int_status", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_port1_int_status_reg[] = { + { "port1_int_status", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_se_rschd_int_status_reg[] = { + { "port1_int_unmask_flag", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "port0_int_unmask_flag", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_debug_cnt_mode_reg[] = { + { "cnt_rd_mode", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "cnt_overflow_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_se_ppu_mex0_5_rsp1_cnt_reg[] = { + { "se_ppu_mex0_5_rsp1_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_as_rschd_rsp0_cnt_reg[] = { + { "as_rschd_rsp0_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_as_rschd_rsp1_cnt_reg[] = { + { "as_rschd_rsp1_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_as_rschd_rsp2_cnt_reg[] = { + { "as_rschd_rsp2_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_as_rschd_rsp3_cnt_reg[] = { + { "as_rschd_rsp3_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_as_rschd_rsp4_cnt_reg[] = { + { "as_rschd_rsp4_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_as_rschd_rsp5_cnt_reg[] = { + { "as_rschd_rsp5_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_as_rschd_rsp6_cnt_reg[] = { + { "as_rschd_rsp6_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_as_rschd_rsp9_cnt_reg[] = { + { "as_rschd_rsp9_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_smmu1_se_rsp0_3_cnt_reg[] = { + { "smmu1_se_rsp0_3_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_ppu_se_mex0_3_fc_cnt_reg[] = { + { "ppu_se_mex0_3_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_rschd_as_fc_cnt0_reg[] = { + { "rschd_as_fc_cnt0", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_rschd_as_fc_cnt1_reg[] = { + { "rschd_as_fc_cnt1", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_rschd_as_fc_cnt2_reg[] = { + { "rschd_as_fc_cnt2", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_rschd_as_fc_cnt3_reg[] = { + { "rschd_as_fc_cnt3", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_rschd_as_fc_cnt4_reg[] = { + { "rschd_as_fc_cnt4", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_rschd_as_fc_cnt5_reg[] = { + { "rschd_as_fc_cnt5", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_rschd_as_fc_cnt6_reg[] = { + { "rschd_as_fc_cnt6", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_rschd_as_fc_cnt9_reg[] = { + { "rschd_as_fc_cnt9", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_se_smmu1_fc0_3_cnt_reg[] = { + { "se_smmu1_fc0_3_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_smmu0_se_wr_done_cnt_reg[] = { + { "smmu0_se_wr_done_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_se_smmu0_wr_done_fc_cnt_reg[] = { + { "se_smmu0_wr_done_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_smmu1_se_wr_rsp_cnt_reg[] = { + { "smmu1_se_wr_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_se_smmu1_wr_rsp_fc_cnt_reg[] = { + { "se_smmu1_wr_rsp_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_alg_se_wr_rsp_cnt_reg[] = { + { "alg_se_wr_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_rschd_se_alg_wr_rsp_fc_cnt_reg[] = { + { "se_alg_wr_rsp_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_pful_cfg0_reg[] = { + { "kschd_pful_assert0_1", DPP_FIELD_FLAG_RW, 27, 7, 0x18, 0x0 }, + { "kschd_pful_negate0_1", DPP_FIELD_FLAG_RW, 20, 7, 0x18, 0x0 }, + { "kschd_pful_assert0_0", DPP_FIELD_FLAG_RW, 13, 7, 0x20, 0x0 }, + { "kschd_pful_negate0_0", DPP_FIELD_FLAG_RW, 6, 7, 0x20, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_pful_cfg1_reg[] = { + { "kschd_pful_assert1_1", DPP_FIELD_FLAG_RW, 25, 7, 0x20, 0x0 }, + { "kschd_pful_negate1_1", DPP_FIELD_FLAG_RW, 18, 7, 0x20, 0x0 }, + { "kschd_pful_assert1_0", DPP_FIELD_FLAG_RW, 11, 6, 0x0c, 0x0 }, + { "kschd_pful_negate1_0", DPP_FIELD_FLAG_RW, 5, 6, 0x0c, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl_pful1_cfg_reg[] = { + { "ctrl_pful1_assert", DPP_FIELD_FLAG_RW, 7, 4, 0x3, 0x0 }, + { "ctrl_pful1_negate", DPP_FIELD_FLAG_RW, 3, 4, 0x2, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl_pful2_cfg_reg[] = { + { "ctrl_pful2_assert", DPP_FIELD_FLAG_RW, 9, 5, 0x0a, 0x0 }, + { "ctrl_pful2_negate", DPP_FIELD_FLAG_RW, 4, 5, 0x08, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl_pful3_cfg_reg[] = { + { "ctrl_pful3_assert", DPP_FIELD_FLAG_RW, 13, 7, 0x38, 0x0 }, + { "ctrl_pful3_negate", DPP_FIELD_FLAG_RW, 6, 7, 0x36, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_pful_cfg_reg[] = { + { "rschd_pful_assert", DPP_FIELD_FLAG_RW, 13, 7, 0x24, 0x0 }, + { "rschd_pful_negate", DPP_FIELD_FLAG_RW, 6, 7, 0x24, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_ept_cfg_reg[] = { + { "rschd_ept_assert", DPP_FIELD_FLAG_RW, 13, 7, 0x1c, 0x0 }, + { "rschd_ept_negate", DPP_FIELD_FLAG_RW, 6, 7, 0x1c, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_alucmd_pful_cfg_reg[] = { + { "alucmd_pful_assert", DPP_FIELD_FLAG_RW, 11, 6, 0x18, 0x0 }, + { "alucmd_pful_negate", DPP_FIELD_FLAG_RW, 5, 6, 0x16, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_aluwr_pful_cfg_reg[] = { + { "aluwr_pful_assert", DPP_FIELD_FLAG_RW, 7, 4, 0x5, 0x0 }, + { "aluwr_pful_negate", DPP_FIELD_FLAG_RW, 3, 4, 0x4, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_wr_arb_pful_cfg0_reg[] = { + { "wr_arb_pful0_assert", DPP_FIELD_FLAG_RW, 11, 6, 0x15, 0x0 }, + { "wr_arb_pful0_negate", DPP_FIELD_FLAG_RW, 5, 6, 0x15, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_wr_arb_pful_cfg1_reg[] = { + { "wr_arb_pful1_assert", DPP_FIELD_FLAG_RW, 15, 8, 0x64, 0x0 }, + { "wr_arb_pful1_negate", DPP_FIELD_FLAG_RW, 7, 8, 0x64, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ord_pful_cfg_reg[] = { + { "ord_pful_assert", DPP_FIELD_FLAG_RW, 13, 7, 0x2a, 0x0 }, + { "ord_pful_negate", DPP_FIELD_FLAG_RW, 6, 7, 0x2a, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cfg_dma_baddr_reg[] = { + { "cfg_dma_baddr", DPP_FIELD_FLAG_RW, 18, 19, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cfg_odma0_baddr_reg[] = { + { "cfg_odma0_baddr", DPP_FIELD_FLAG_RW, 18, 19, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cfg_odma1_baddr_reg[] = { + { "cfg_odma1_baddr", DPP_FIELD_FLAG_RW, 18, 19, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cfg_odma2_baddr_reg[] = { + { "cfg_odma2_baddr", DPP_FIELD_FLAG_RW, 18, 19, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cfg_odma_tdm_baddr_reg[] = { + { "cfg_odma_tdm_baddr", DPP_FIELD_FLAG_RW, 18, 19, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cfg_mcast_baddr_reg[] = { + { "cfg_mcast_baddr", DPP_FIELD_FLAG_RW, 18, 19, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cfg_lpm0_reg[] = { + { "lpm0_rsp_mode", DPP_FIELD_FLAG_RW, 21, 3, 0x0, 0x0 }, + { "lpm0_baddr", DPP_FIELD_FLAG_RW, 18, 19, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cfg_lpm1_reg[] = { + { "lpm1_rsp_mode", DPP_FIELD_FLAG_RW, 21, 3, 0x0, 0x0 }, + { "lpm1_baddr", DPP_FIELD_FLAG_RW, 18, 19, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cfg_lpm2_reg[] = { + { "lpm2_rsp_mode", DPP_FIELD_FLAG_RW, 21, 3, 0x0, 0x0 }, + { "lpm2_baddr", DPP_FIELD_FLAG_RW, 18, 19, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cfg_lpm3_reg[] = { + { "lpm3_rsp_mode", DPP_FIELD_FLAG_RW, 21, 3, 0x0, 0x0 }, + { "lpm3_baddr", DPP_FIELD_FLAG_RW, 18, 19, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cfg_lpm4_reg[] = { + { "lpm4_rsp_mode", DPP_FIELD_FLAG_RW, 21, 3, 0x0, 0x0 }, + { "lpm4_baddr", DPP_FIELD_FLAG_RW, 18, 19, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cfg_lpm5_reg[] = { + { "lpm5_rsp_mode", DPP_FIELD_FLAG_RW, 21, 3, 0x0, 0x0 }, + { "lpm5_baddr", DPP_FIELD_FLAG_RW, 18, 19, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cfg_lpm6_reg[] = { + { "lpm6_rsp_mode", DPP_FIELD_FLAG_RW, 21, 3, 0x0, 0x0 }, + { "lpm6_baddr", DPP_FIELD_FLAG_RW, 18, 19, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cfg_lpm7_reg[] = { + { "lpm7_rsp_mode", DPP_FIELD_FLAG_RW, 21, 3, 0x0, 0x0 }, + { "lpm7_baddr", DPP_FIELD_FLAG_RW, 18, 19, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_debug_cnt_mode_reg[] = { + { "cnt_rd_mode", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "cnt_overflow_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_stat_overflow_mode_reg[] = { + { "stat_overflow_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_init_en_cfg_tmp_reg[] = { + { "init_en_cfg_tmp31", DPP_FIELD_FLAG_RW, 31, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp30", DPP_FIELD_FLAG_RW, 30, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp29", DPP_FIELD_FLAG_RW, 29, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp28", DPP_FIELD_FLAG_RW, 28, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp27", DPP_FIELD_FLAG_RW, 27, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp26", DPP_FIELD_FLAG_RW, 26, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp25", DPP_FIELD_FLAG_RW, 25, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp24", DPP_FIELD_FLAG_RW, 24, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp23", DPP_FIELD_FLAG_RW, 23, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp22", DPP_FIELD_FLAG_RW, 22, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp21", DPP_FIELD_FLAG_RW, 21, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp20", DPP_FIELD_FLAG_RW, 20, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp19", DPP_FIELD_FLAG_RW, 19, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp18", DPP_FIELD_FLAG_RW, 18, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp17", DPP_FIELD_FLAG_RW, 17, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp16", DPP_FIELD_FLAG_RW, 16, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp15", DPP_FIELD_FLAG_RW, 15, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp14", DPP_FIELD_FLAG_RW, 14, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp13", DPP_FIELD_FLAG_RW, 13, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp12", DPP_FIELD_FLAG_RW, 12, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp11", DPP_FIELD_FLAG_RW, 11, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp10", DPP_FIELD_FLAG_RW, 10, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp9", DPP_FIELD_FLAG_RW, 9, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp8", DPP_FIELD_FLAG_RW, 8, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp7", DPP_FIELD_FLAG_RW, 7, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp6", DPP_FIELD_FLAG_RW, 6, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp5", DPP_FIELD_FLAG_RW, 5, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp4", DPP_FIELD_FLAG_RW, 4, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp3", DPP_FIELD_FLAG_RW, 3, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp2", DPP_FIELD_FLAG_RW, 2, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp1", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "init_en_cfg_tmp0", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int_unmask_flag_reg[] = { + { "smmu0_int0_31_unmask_flag", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int0_en_reg[] = { + { "smmu0_int0_en31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "smmu0_int0_en30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "smmu0_int0_en29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "smmu0_int0_en28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "smmu0_int0_en27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "smmu0_int0_en26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "smmu0_int0_en25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "smmu0_int0_en24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "smmu0_int0_en23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "smmu0_int0_en22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "smmu0_int0_en21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "smmu0_int0_en20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "smmu0_int0_en19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "smmu0_int0_en18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "smmu0_int0_en17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "smmu0_int0_en16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "smmu0_int0_en15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "smmu0_int0_en14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "smmu0_int0_en13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "smmu0_int0_en12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "smmu0_int0_en11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "smmu0_int0_en10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "smmu0_int0_en9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "smmu0_int0_en8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "smmu0_int0_en7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "smmu0_int0_en6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "smmu0_int0_en5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "smmu0_int0_en4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "smmu0_int0_en3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "smmu0_int0_en2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "smmu0_int0_en1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "smmu0_int0_en0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int0_mask_reg[] = { + { "smmu0_int0_mask31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "smmu0_int0_mask30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "smmu0_int0_mask29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "smmu0_int0_mask28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "smmu0_int0_mask27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "smmu0_int0_mask26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "smmu0_int0_mask25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "smmu0_int0_mask24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "smmu0_int0_mask23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "smmu0_int0_mask22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "smmu0_int0_mask21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "smmu0_int0_mask20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "smmu0_int0_mask19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "smmu0_int0_mask18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "smmu0_int0_mask17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "smmu0_int0_mask16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "smmu0_int0_mask15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "smmu0_int0_mask14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "smmu0_int0_mask13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "smmu0_int0_mask12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "smmu0_int0_mask11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "smmu0_int0_mask10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "smmu0_int0_mask9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "smmu0_int0_mask8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "smmu0_int0_mask7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "smmu0_int0_mask6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "smmu0_int0_mask5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "smmu0_int0_mask4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "smmu0_int0_mask3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "smmu0_int0_mask2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "smmu0_int0_mask1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "smmu0_int0_mask0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int0_status_reg[] = { + { "smmu0_int0_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int0_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int0_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int0_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int0_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int0_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int0_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int0_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int0_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int0_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int0_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int0_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int0_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int0_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int0_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int0_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int0_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int0_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int0_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int0_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int0_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int0_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int0_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int0_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int0_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int0_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int0_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int0_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int0_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int0_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int0_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int0_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int1_en_reg[] = { + { "smmu0_int1_en31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "smmu0_int1_en30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "smmu0_int1_en29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "smmu0_int1_en28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "smmu0_int1_en27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "smmu0_int1_en26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "smmu0_int1_en25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "smmu0_int1_en24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "smmu0_int1_en23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "smmu0_int1_en22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "smmu0_int1_en21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "smmu0_int1_en20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "smmu0_int1_en19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "smmu0_int1_en18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "smmu0_int1_en17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "smmu0_int1_en16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "smmu0_int1_en15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "smmu0_int1_en14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "smmu0_int1_en13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "smmu0_int1_en12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "smmu0_int1_en11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "smmu0_int1_en10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "smmu0_int1_en9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "smmu0_int1_en8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "smmu0_int1_en7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "smmu0_int1_en6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "smmu0_int1_en5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "smmu0_int1_en4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "smmu0_int1_en3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "smmu0_int1_en2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "smmu0_int1_en1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "smmu0_int1_en0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int1_mask_reg[] = { + { "smmu0_int1_mask31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "smmu0_int1_mask30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "smmu0_int1_mask29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "smmu0_int1_mask28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "smmu0_int1_mask27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "smmu0_int1_mask26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "smmu0_int1_mask25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "smmu0_int1_mask24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "smmu0_int1_mask23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "smmu0_int1_mask22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "smmu0_int1_mask21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "smmu0_int1_mask20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "smmu0_int1_mask19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "smmu0_int1_mask18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "smmu0_int1_mask17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "smmu0_int1_mask16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "smmu0_int1_mask15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "smmu0_int1_mask14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "smmu0_int1_mask13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "smmu0_int1_mask12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "smmu0_int1_mask11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "smmu0_int1_mask10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "smmu0_int1_mask9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "smmu0_int1_mask8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "smmu0_int1_mask7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "smmu0_int1_mask6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "smmu0_int1_mask5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "smmu0_int1_mask4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "smmu0_int1_mask3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "smmu0_int1_mask2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "smmu0_int1_mask1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "smmu0_int1_mask0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int1_status_reg[] = { + { "smmu0_int1_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int1_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int1_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int1_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int1_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int1_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int1_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int1_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int1_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int1_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int1_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int1_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int1_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int1_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int1_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int1_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int1_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int1_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int1_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int1_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int1_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int1_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int1_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int1_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int1_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int1_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int1_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int1_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int1_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int1_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int1_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int1_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int2_en_reg[] = { + { "smmu0_int2_en31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "smmu0_int2_en30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "smmu0_int2_en29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "smmu0_int2_en28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "smmu0_int2_en27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "smmu0_int2_en26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "smmu0_int2_en25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "smmu0_int2_en24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "smmu0_int2_en23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "smmu0_int2_en22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "smmu0_int2_en21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "smmu0_int2_en20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "smmu0_int2_en19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "smmu0_int2_en18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "smmu0_int2_en17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "smmu0_int2_en16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "smmu0_int2_en15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "smmu0_int2_en14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "smmu0_int2_en13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "smmu0_int2_en12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "smmu0_int2_en11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "smmu0_int2_en10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "smmu0_int2_en9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "smmu0_int2_en8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "smmu0_int2_en7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "smmu0_int2_en6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "smmu0_int2_en5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "smmu0_int2_en4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "smmu0_int2_en3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "smmu0_int2_en2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "smmu0_int2_en1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "smmu0_int2_en0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int2_mask_reg[] = { + { "smmu0_int2_mask31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "smmu0_int2_mask30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "smmu0_int2_mask29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "smmu0_int2_mask28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "smmu0_int2_mask27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "smmu0_int2_mask26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "smmu0_int2_mask25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "smmu0_int2_mask24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "smmu0_int2_mask23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "smmu0_int2_mask22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "smmu0_int2_mask21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "smmu0_int2_mask20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "smmu0_int2_mask19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "smmu0_int2_mask18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "smmu0_int2_mask17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "smmu0_int2_mask16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "smmu0_int2_mask15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "smmu0_int2_mask14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "smmu0_int2_mask13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "smmu0_int2_mask12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "smmu0_int2_mask11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "smmu0_int2_mask10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "smmu0_int2_mask9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "smmu0_int2_mask8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "smmu0_int2_mask7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "smmu0_int2_mask6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "smmu0_int2_mask5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "smmu0_int2_mask4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "smmu0_int2_mask3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "smmu0_int2_mask2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "smmu0_int2_mask1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "smmu0_int2_mask0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int2_status_reg[] = { + { "smmu0_int2_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int2_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int2_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int2_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int2_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int2_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int2_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int2_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int2_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int2_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int2_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int2_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int2_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int2_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int2_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int2_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int2_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int2_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int2_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int2_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int2_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int2_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int2_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int2_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int2_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int2_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int2_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int2_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int2_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int2_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int2_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int2_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int3_en_reg[] = { + { "smmu0_int3_en31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "smmu0_int3_en30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "smmu0_int3_en29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "smmu0_int3_en28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "smmu0_int3_en27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "smmu0_int3_en26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "smmu0_int3_en25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "smmu0_int3_en24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "smmu0_int3_en23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "smmu0_int3_en22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "smmu0_int3_en21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "smmu0_int3_en20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "smmu0_int3_en19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "smmu0_int3_en18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "smmu0_int3_en17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "smmu0_int3_en16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "smmu0_int3_en15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "smmu0_int3_en14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "smmu0_int3_en13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "smmu0_int3_en12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "smmu0_int3_en11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "smmu0_int3_en10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "smmu0_int3_en9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "smmu0_int3_en8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "smmu0_int3_en7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "smmu0_int3_en6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "smmu0_int3_en5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "smmu0_int3_en4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "smmu0_int3_en3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "smmu0_int3_en2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "smmu0_int3_en1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "smmu0_int3_en0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int3_mask_reg[] = { + { "smmu0_int3_mask31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "smmu0_int3_mask30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "smmu0_int3_mask29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "smmu0_int3_mask28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "smmu0_int3_mask27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "smmu0_int3_mask26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "smmu0_int3_mask25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "smmu0_int3_mask24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "smmu0_int3_mask23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "smmu0_int3_mask22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "smmu0_int3_mask21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "smmu0_int3_mask20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "smmu0_int3_mask19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "smmu0_int3_mask18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "smmu0_int3_mask17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "smmu0_int3_mask16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "smmu0_int3_mask15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "smmu0_int3_mask14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "smmu0_int3_mask13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "smmu0_int3_mask12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "smmu0_int3_mask11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "smmu0_int3_mask10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "smmu0_int3_mask9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "smmu0_int3_mask8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "smmu0_int3_mask7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "smmu0_int3_mask6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "smmu0_int3_mask5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "smmu0_int3_mask4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "smmu0_int3_mask3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "smmu0_int3_mask2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "smmu0_int3_mask1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "smmu0_int3_mask0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int3_status_reg[] = { + { "smmu0_int3_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int3_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int3_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int3_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int3_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int3_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int3_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int3_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int3_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int3_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int3_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int3_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int3_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int3_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int3_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int3_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int3_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int3_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int3_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int3_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int3_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int3_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int3_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int3_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int3_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int3_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int3_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int3_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int3_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int3_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int3_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int3_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int4_en_reg[] = { + { "smmu0_int4_en31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "smmu0_int4_en30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "smmu0_int4_en29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "smmu0_int4_en28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "smmu0_int4_en27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "smmu0_int4_en26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "smmu0_int4_en25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "smmu0_int4_en24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "smmu0_int4_en23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "smmu0_int4_en22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "smmu0_int4_en21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "smmu0_int4_en20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "smmu0_int4_en19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "smmu0_int4_en18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "smmu0_int4_en17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "smmu0_int4_en16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "smmu0_int4_en15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "smmu0_int4_en14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "smmu0_int4_en13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "smmu0_int4_en12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "smmu0_int4_en11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "smmu0_int4_en10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "smmu0_int4_en9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "smmu0_int4_en8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "smmu0_int4_en7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "smmu0_int4_en6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "smmu0_int4_en5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "smmu0_int4_en4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "smmu0_int4_en3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "smmu0_int4_en2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "smmu0_int4_en1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "smmu0_int4_en0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int4_mask_reg[] = { + { "smmu0_int4_mask31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "smmu0_int4_mask30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "smmu0_int4_mask29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "smmu0_int4_mask28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "smmu0_int4_mask27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "smmu0_int4_mask26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "smmu0_int4_mask25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "smmu0_int4_mask24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "smmu0_int4_mask23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "smmu0_int4_mask22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "smmu0_int4_mask21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "smmu0_int4_mask20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "smmu0_int4_mask19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "smmu0_int4_mask18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "smmu0_int4_mask17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "smmu0_int4_mask16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "smmu0_int4_mask15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "smmu0_int4_mask14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "smmu0_int4_mask13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "smmu0_int4_mask12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "smmu0_int4_mask11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "smmu0_int4_mask10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "smmu0_int4_mask9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "smmu0_int4_mask8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "smmu0_int4_mask7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "smmu0_int4_mask6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "smmu0_int4_mask5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "smmu0_int4_mask4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "smmu0_int4_mask3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "smmu0_int4_mask2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "smmu0_int4_mask1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "smmu0_int4_mask0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int4_status_reg[] = { + { "smmu0_int4_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int4_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int4_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int4_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int4_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int4_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int4_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int4_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int4_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int4_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int4_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int4_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int4_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int4_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int4_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int4_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int4_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int4_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int4_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int4_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int4_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int4_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int4_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int4_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int4_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int4_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int4_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int4_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int4_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int4_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int4_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int4_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int5_en_reg[] = { + { "smmu0_int5_en31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "smmu0_int5_en30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "smmu0_int5_en29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "smmu0_int5_en28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "smmu0_int5_en27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "smmu0_int5_en26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "smmu0_int5_en25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "smmu0_int5_en24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "smmu0_int5_en23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "smmu0_int5_en22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "smmu0_int5_en21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "smmu0_int5_en20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "smmu0_int5_en19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "smmu0_int5_en18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "smmu0_int5_en17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "smmu0_int5_en16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "smmu0_int5_en15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "smmu0_int5_en14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "smmu0_int5_en13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "smmu0_int5_en12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "smmu0_int5_en11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "smmu0_int5_en10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "smmu0_int5_en9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "smmu0_int5_en8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "smmu0_int5_en7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "smmu0_int5_en6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "smmu0_int5_en5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "smmu0_int5_en4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "smmu0_int5_en3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "smmu0_int5_en2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "smmu0_int5_en1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "smmu0_int5_en0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int5_mask_reg[] = { + { "smmu0_int5_mask31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "smmu0_int5_mask30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "smmu0_int5_mask29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "smmu0_int5_mask28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "smmu0_int5_mask27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "smmu0_int5_mask26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "smmu0_int5_mask25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "smmu0_int5_mask24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "smmu0_int5_mask23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "smmu0_int5_mask22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "smmu0_int5_mask21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "smmu0_int5_mask20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "smmu0_int5_mask19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "smmu0_int5_mask18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "smmu0_int5_mask17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "smmu0_int5_mask16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "smmu0_int5_mask15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "smmu0_int5_mask14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "smmu0_int5_mask13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "smmu0_int5_mask12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "smmu0_int5_mask11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "smmu0_int5_mask10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "smmu0_int5_mask9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "smmu0_int5_mask8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "smmu0_int5_mask7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "smmu0_int5_mask6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "smmu0_int5_mask5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "smmu0_int5_mask4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "smmu0_int5_mask3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "smmu0_int5_mask2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "smmu0_int5_mask1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "smmu0_int5_mask0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int5_status_reg[] = { + { "smmu0_int5_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int5_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int5_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int5_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int5_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int5_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int5_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int5_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int5_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int5_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int5_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int5_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int5_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int5_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int5_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int5_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int5_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int5_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int5_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int5_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int5_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int5_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int5_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int5_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int5_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int5_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int5_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int5_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int5_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int5_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int5_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int5_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int6_en_reg[] = { + { "smmu0_int6_en31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "smmu0_int6_en30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "smmu0_int6_en29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "smmu0_int6_en28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "smmu0_int6_en27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "smmu0_int6_en26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "smmu0_int6_en25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "smmu0_int6_en24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "smmu0_int6_en23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "smmu0_int6_en22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "smmu0_int6_en21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "smmu0_int6_en20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "smmu0_int6_en19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "smmu0_int6_en18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "smmu0_int6_en17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "smmu0_int6_en16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "smmu0_int6_en15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "smmu0_int6_en14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "smmu0_int6_en13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "smmu0_int6_en12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "smmu0_int6_en11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "smmu0_int6_en10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "smmu0_int6_en9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "smmu0_int6_en8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "smmu0_int6_en7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "smmu0_int6_en6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "smmu0_int6_en5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "smmu0_int6_en4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "smmu0_int6_en3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "smmu0_int6_en2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "smmu0_int6_en1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "smmu0_int6_en0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int6_mask_reg[] = { + { "smmu0_int6_mask31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "smmu0_int6_mask30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "smmu0_int6_mask29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "smmu0_int6_mask28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "smmu0_int6_mask27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "smmu0_int6_mask26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "smmu0_int6_mask25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "smmu0_int6_mask24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "smmu0_int6_mask23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "smmu0_int6_mask22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "smmu0_int6_mask21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "smmu0_int6_mask20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "smmu0_int6_mask19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "smmu0_int6_mask18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "smmu0_int6_mask17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "smmu0_int6_mask16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "smmu0_int6_mask15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "smmu0_int6_mask14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "smmu0_int6_mask13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "smmu0_int6_mask12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "smmu0_int6_mask11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "smmu0_int6_mask10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "smmu0_int6_mask9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "smmu0_int6_mask8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "smmu0_int6_mask7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "smmu0_int6_mask6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "smmu0_int6_mask5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "smmu0_int6_mask4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "smmu0_int6_mask3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "smmu0_int6_mask2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "smmu0_int6_mask1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "smmu0_int6_mask0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int6_status_reg[] = { + { "smmu0_int6_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int6_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int6_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int6_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int6_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int6_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int6_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int6_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int6_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int6_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int6_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int6_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int6_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int6_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int6_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int6_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int6_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int6_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int6_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int6_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int6_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int6_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int6_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int6_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int6_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int6_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int6_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int6_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int6_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int6_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int6_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int6_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int7_en_reg[] = { + { "smmu0_int7_en31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "smmu0_int7_en30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "smmu0_int7_en29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "smmu0_int7_en28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "smmu0_int7_en27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "smmu0_int7_en26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "smmu0_int7_en25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "smmu0_int7_en24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "smmu0_int7_en23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "smmu0_int7_en22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "smmu0_int7_en21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "smmu0_int7_en20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "smmu0_int7_en19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "smmu0_int7_en18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "smmu0_int7_en17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "smmu0_int7_en16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "smmu0_int7_en15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "smmu0_int7_en14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "smmu0_int7_en13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "smmu0_int7_en12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "smmu0_int7_en11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "smmu0_int7_en10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "smmu0_int7_en9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "smmu0_int7_en8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "smmu0_int7_en7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "smmu0_int7_en6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "smmu0_int7_en5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "smmu0_int7_en4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "smmu0_int7_en3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "smmu0_int7_en2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "smmu0_int7_en1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "smmu0_int7_en0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int7_mask_reg[] = { + { "smmu0_int7_mask31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "smmu0_int7_mask30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "smmu0_int7_mask29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "smmu0_int7_mask28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "smmu0_int7_mask27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "smmu0_int7_mask26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "smmu0_int7_mask25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "smmu0_int7_mask24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "smmu0_int7_mask23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "smmu0_int7_mask22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "smmu0_int7_mask21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "smmu0_int7_mask20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "smmu0_int7_mask19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "smmu0_int7_mask18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "smmu0_int7_mask17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "smmu0_int7_mask16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "smmu0_int7_mask15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "smmu0_int7_mask14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "smmu0_int7_mask13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "smmu0_int7_mask12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "smmu0_int7_mask11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "smmu0_int7_mask10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "smmu0_int7_mask9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "smmu0_int7_mask8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "smmu0_int7_mask7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "smmu0_int7_mask6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "smmu0_int7_mask5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "smmu0_int7_mask4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "smmu0_int7_mask3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "smmu0_int7_mask2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "smmu0_int7_mask1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "smmu0_int7_mask0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int7_status_reg[] = { + { "smmu0_int7_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int7_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int7_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int7_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int7_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int7_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int7_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int7_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int7_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int7_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int7_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int7_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int7_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int7_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int7_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int7_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int7_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int7_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int7_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int7_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int7_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int7_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int7_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int7_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int7_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int7_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int7_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int7_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int7_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int7_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int7_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int7_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int8_en_reg[] = { + { "smmu0_int8_en31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "smmu0_int8_en30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "smmu0_int8_en29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "smmu0_int8_en28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "smmu0_int8_en27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "smmu0_int8_en26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "smmu0_int8_en25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "smmu0_int8_en24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "smmu0_int8_en23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "smmu0_int8_en22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "smmu0_int8_en21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "smmu0_int8_en20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "smmu0_int8_en19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "smmu0_int8_en18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "smmu0_int8_en17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "smmu0_int8_en16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "smmu0_int8_en15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "smmu0_int8_en14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "smmu0_int8_en13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "smmu0_int8_en12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "smmu0_int8_en11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "smmu0_int8_en10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "smmu0_int8_en9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "smmu0_int8_en8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "smmu0_int8_en7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "smmu0_int8_en6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "smmu0_int8_en5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "smmu0_int8_en4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "smmu0_int8_en3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "smmu0_int8_en2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "smmu0_int8_en1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "smmu0_int8_en0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int8_mask_reg[] = { + { "smmu0_int8_mask31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "smmu0_int8_mask30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "smmu0_int8_mask29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "smmu0_int8_mask28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "smmu0_int8_mask27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "smmu0_int8_mask26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "smmu0_int8_mask25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "smmu0_int8_mask24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "smmu0_int8_mask23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "smmu0_int8_mask22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "smmu0_int8_mask21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "smmu0_int8_mask20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "smmu0_int8_mask19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "smmu0_int8_mask18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "smmu0_int8_mask17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "smmu0_int8_mask16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "smmu0_int8_mask15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "smmu0_int8_mask14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "smmu0_int8_mask13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "smmu0_int8_mask12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "smmu0_int8_mask11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "smmu0_int8_mask10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "smmu0_int8_mask9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "smmu0_int8_mask8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "smmu0_int8_mask7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "smmu0_int8_mask6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "smmu0_int8_mask5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "smmu0_int8_mask4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "smmu0_int8_mask3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "smmu0_int8_mask2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "smmu0_int8_mask1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "smmu0_int8_mask0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int8_status_reg[] = { + { "smmu0_int8_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int8_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int8_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int8_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int8_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int8_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int8_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int8_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int8_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int8_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int8_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int8_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int8_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int8_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int8_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int8_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int8_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int8_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int8_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int8_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int8_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int8_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int8_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int8_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int8_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int8_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int8_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int8_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int8_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int8_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int8_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int8_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int9_en_reg[] = { + { "smmu0_int8_en31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "smmu0_int8_en30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "smmu0_int8_en29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "smmu0_int8_en28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "smmu0_int8_en27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "smmu0_int8_en26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "smmu0_int8_en25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "smmu0_int8_en24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "smmu0_int8_en23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "smmu0_int8_en22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "smmu0_int8_en21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "smmu0_int8_en20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "smmu0_int9_en19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "smmu0_int9_en18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "smmu0_int9_en17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "smmu0_int9_en16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "smmu0_int9_en15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "smmu0_int9_en14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "smmu0_int9_en13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "smmu0_int9_en12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "smmu0_int9_en11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "smmu0_int9_en10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "smmu0_int9_en9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "smmu0_int9_en8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "smmu0_int9_en7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "smmu0_int9_en6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "smmu0_int9_en5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "smmu0_int9_en4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "smmu0_int9_en3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "smmu0_int9_en2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "smmu0_int9_en1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "smmu0_int9_en0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int9_mask_reg[] = { + { "smmu0_int9_mask0_31", DPP_FIELD_FLAG_RW, 31, 32, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int9_status_reg[] = { + { "smmu0_int9_status0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int10_en_reg[] = { + { "smmu0_int10_en31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "smmu0_int10_en30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "smmu0_int10_en29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "smmu0_int10_en28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "smmu0_int10_en27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "smmu0_int10_en26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "smmu0_int10_en25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "smmu0_int10_en24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "smmu0_int10_en23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "smmu0_int10_en22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "smmu0_int10_en21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "smmu0_int10_en20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "smmu0_int10_en19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "smmu0_int10_en18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "smmu0_int10_en17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "smmu0_int10_en16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "smmu0_int10_en15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "smmu0_int10_en14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "smmu0_int10_en13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "smmu0_int10_en12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "smmu0_int10_en11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "smmu0_int10_en10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "smmu0_int10_en9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "smmu0_int10_en8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "smmu0_int10_en7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "smmu0_int10_en6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "smmu0_int10_en5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "smmu0_int10_en4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "smmu0_int10_en3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "smmu0_int10_en2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "smmu0_int10_en1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "smmu0_int10_en0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int10_mask_reg[] = { + { "smmu0_int10_mask0_31", DPP_FIELD_FLAG_RW, 31, 32, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int10_status_reg[] = { + { "smmu0_int10_status0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int11_en_reg[] = { + { "smmu0_int11_en0_31", DPP_FIELD_FLAG_RW, 31, 32, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int11_mask_reg[] = { + { "smmu0_int11_mask0_31", DPP_FIELD_FLAG_RW, 31, 32, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int11_status_reg[] = { + { "smmu0_int11_status0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int12_en_reg[] = { + { "smmu0_int12_en0_31", DPP_FIELD_FLAG_RW, 31, 32, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int12_mask_reg[] = { + { "smmu0_int12_mask0_31", DPP_FIELD_FLAG_RW, 31, 32, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int12_status_reg[] = { + { "smmu0_int12_status0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int13_en_reg[] = { + { "smmu0_int13_en19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "smmu0_int13_en18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "smmu0_int13_en17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "smmu0_int13_en16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "smmu0_int13_en15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "smmu0_int13_en14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "smmu0_int13_en13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "smmu0_int13_en12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "smmu0_int13_en11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "smmu0_int13_en10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "smmu0_int13_en9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "smmu0_int13_en8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "smmu0_int13_en7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "smmu0_int13_en6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "smmu0_int13_en5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "smmu0_int13_en4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "smmu0_int13_en3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "smmu0_int13_en2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "smmu0_int13_en1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "smmu0_int13_en0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int13_mask_reg[] = { + { "smmu0_int13_mask19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "smmu0_int13_mask18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "smmu0_int13_mask17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "smmu0_int13_mask16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "smmu0_int13_mask15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "smmu0_int13_mask14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "smmu0_int13_mask13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "smmu0_int13_mask12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "smmu0_int13_mask11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "smmu0_int13_mask10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "smmu0_int13_mask9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "smmu0_int13_mask8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "smmu0_int13_mask7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "smmu0_int13_mask6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "smmu0_int13_mask5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "smmu0_int13_mask4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "smmu0_int13_mask3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "smmu0_int13_mask2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "smmu0_int13_mask1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "smmu0_int13_mask0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int13_status_reg[] = { + { "smmu0_int13_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int13_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int13_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int13_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int13_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int13_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int13_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int13_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int13_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int13_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int13_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int13_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int13_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int13_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int13_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int13_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int13_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int13_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int13_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int13_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int14_en_reg[] = { + { "smmu0_int14_en16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "smmu0_int14_en15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "smmu0_int14_en14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "smmu0_int14_en13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "smmu0_int14_en12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "smmu0_int14_en11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "smmu0_int14_en10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "smmu0_int14_en9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "smmu0_int14_en8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "smmu0_int14_en7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "smmu0_int14_en6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "smmu0_int14_en5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "smmu0_int14_en4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "smmu0_int14_en3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "smmu0_int14_en2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "smmu0_int14_en1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "smmu0_int14_en0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int14_mask_reg[] = { + { "smmu0_int14_mask16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "smmu0_int14_mask15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "smmu0_int14_mask14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "smmu0_int14_mask13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "smmu0_int14_mask12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "smmu0_int14_mask11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "smmu0_int14_mask10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "smmu0_int14_mask9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "smmu0_int14_mask8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "smmu0_int14_mask7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "smmu0_int14_mask6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "smmu0_int14_mask5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "smmu0_int14_mask4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "smmu0_int14_mask3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "smmu0_int14_mask2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "smmu0_int14_mask1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "smmu0_int14_mask0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int14_status_reg[] = { + { "smmu0_int14_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int14_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int14_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int14_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int14_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int14_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int14_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int14_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int14_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int14_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int14_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int14_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int14_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int14_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int14_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int14_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int14_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_ecc_unmask_flag_reg[] = { + { "smmu0_int53_unmask_flag", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int52_unmask_flag", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int51_unmask_flag", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int50_unmask_flag", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int49_unmask_flag", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int48_unmask_flag", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int47_unmask_flag", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int46_unmask_flag", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int45_unmask_flag", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int44_unmask_flag", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int43_unmask_flag", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int42_unmask_flag", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int41_unmask_flag", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int40_unmask_flag", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int39_unmask_flag", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int38_unmask_flag", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int37_unmask_flag", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int36_unmask_flag", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int35_unmask_flag", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int34_unmask_flag", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int33_unmask_flag", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int32_unmask_flag", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int15_en_reg[] = { + { "smmu0_int15_en31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int15_en30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int15_en29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int15_en28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int15_en27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int15_en26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int15_en25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int15_en24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int15_en23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int15_en22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int15_en21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int15_en20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int15_en19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int15_en18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int15_en17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int15_en16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int15_en15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int15_en14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int15_en13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int15_en12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int15_en11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int15_en10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int15_en9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int15_en8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int15_en7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int15_en6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int15_en5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int15_en4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int15_en3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int15_en2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int15_en1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int15_en0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int15_mask_reg[] = { + { "smmu0_int15_mask31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int15_mask30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int15_mask29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int15_mask28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int15_mask27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int15_mask26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int15_mask25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int15_mask24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int15_mask23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int15_mask22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int15_mask21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int15_mask20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int15_mask19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int15_mask18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int15_mask17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int15_mask16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int15_mask15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int15_mask14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int15_mask13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int15_mask12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int15_mask11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int15_mask10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int15_mask9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int15_mask8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int15_mask7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int15_mask6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int15_mask5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int15_mask4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int15_mask3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int15_mask2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int15_mask1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int15_mask0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int15_status_reg[] = { + { "smmu0_int15_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int15_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int15_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int15_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int15_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int15_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int15_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int15_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int15_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int15_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int15_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int15_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int15_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int15_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int15_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int15_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int15_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int15_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int15_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int15_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int15_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int15_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int15_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int15_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int15_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int15_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int15_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int15_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int15_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int15_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int15_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int15_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int16_en_reg[] = { + { "smmu0_int16_en31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int16_en30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int16_en29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int16_en28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int16_en27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int16_en26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int16_en25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int16_en24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int16_en23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int16_en22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int16_en21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int16_en20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int16_en19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int16_en18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int16_en17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int16_en16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int16_en15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int16_en14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int16_en13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int16_en12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int16_en11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int16_en10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int16_en9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int16_en8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int16_en7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int16_en6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int16_en5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int16_en4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int16_en3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int16_en2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int16_en1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int16_en0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int16_mask_reg[] = { + { "smmu0_int16_mask31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int16_mask30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int16_mask29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int16_mask28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int16_mask27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int16_mask26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int16_mask25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int16_mask24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int16_mask23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int16_mask22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int16_mask21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int16_mask20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int16_mask19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int16_mask18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int16_mask17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int16_mask16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int16_mask15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int16_mask14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int16_mask13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int16_mask12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int16_mask11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int16_mask10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int16_mask9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int16_mask8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int16_mask7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int16_mask6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int16_mask5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int16_mask4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int16_mask3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int16_mask2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int16_mask1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int16_mask0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int16_status_reg[] = { + { "smmu0_int16_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int16_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int16_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int16_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int16_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int16_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int16_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int16_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int16_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int16_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int16_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int16_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int16_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int16_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int16_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int16_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int16_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int16_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int16_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int16_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int16_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int16_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int16_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int16_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int16_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int16_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int16_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int16_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int16_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int16_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int16_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int16_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int17_en_reg[] = { + { "smmu0_int17_en31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int17_en30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int17_en29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int17_en28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int17_en27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int17_en26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int17_en25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int17_en24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int17_en23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int17_en22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int17_en21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int17_en20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int17_en19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int17_en18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int17_en17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int17_en16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int17_en15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int17_en14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int17_en13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int17_en12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int17_en11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int17_en10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int17_en9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int17_en8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int17_en7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int17_en6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int17_en5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int17_en4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int17_en3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int17_en2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int17_en1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int17_en0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int17_mask_reg[] = { + { "smmu0_int17_mask31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int17_mask30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int17_mask29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int17_mask28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int17_mask27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int17_mask26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int17_mask25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int17_mask24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int17_mask23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int17_mask22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int17_mask21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int17_mask20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int17_mask19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int17_mask18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int17_mask17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int17_mask16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int17_mask15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int17_mask14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int17_mask13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int17_mask12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int17_mask11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int17_mask10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int17_mask9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int17_mask8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int17_mask7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int17_mask6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int17_mask5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int17_mask4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int17_mask3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int17_mask2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int17_mask1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int17_mask0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int17_status_reg[] = { + { "smmu0_int17_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int17_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int17_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int17_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int17_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int17_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int17_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int17_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int17_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int17_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int17_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int17_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int17_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int17_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int17_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int17_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int17_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int17_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int17_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int17_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int17_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int17_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int17_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int17_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int17_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int17_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int17_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int17_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int17_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int17_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int17_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int17_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int18_en_reg[] = { + { "smmu0_int18_en31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int18_en30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int18_en29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int18_en28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int18_en27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int18_en26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int18_en25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int18_en24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int18_en23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int18_en22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int18_en21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int18_en20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int18_en19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int18_en18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int18_en17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int18_en16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int18_en15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int18_en14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int18_en13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int18_en12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int18_en11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int18_en10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int18_en9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int18_en8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int18_en7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int18_en6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int18_en5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int18_en4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int18_en3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int18_en2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int18_en1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int18_en0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int18_mask_reg[] = { + { "smmu0_int18_mask31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int18_mask30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int18_mask29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int18_mask28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int18_mask27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int18_mask26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int18_mask25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int18_mask24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int18_mask23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int18_mask22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int18_mask21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int18_mask20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int18_mask19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int18_mask18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int18_mask17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int18_mask16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int18_mask15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int18_mask14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int18_mask13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int18_mask12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int18_mask11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int18_mask10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int18_mask9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int18_mask8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int18_mask7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int18_mask6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int18_mask5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int18_mask4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int18_mask3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int18_mask2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int18_mask1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int18_mask0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int18_status_reg[] = { + { "smmu0_int18_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int18_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int18_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int18_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int18_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int18_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int18_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int18_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int18_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int18_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int18_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int18_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int18_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int18_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int18_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int18_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int18_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int18_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int18_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int18_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int18_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int18_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int18_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int18_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int18_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int18_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int18_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int18_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int18_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int18_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int18_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int18_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int19_en_reg[] = { + { "smmu0_int19_en31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int19_en30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int19_en29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int19_en28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int19_en27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int19_en26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int19_en25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int19_en24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int19_en23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int19_en22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int19_en21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int19_en20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int19_en19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int19_en18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int19_en17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int19_en16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int19_en15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int19_en14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int19_en13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int19_en12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int19_en11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int19_en10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int19_en9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int19_en8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int19_en7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int19_en6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int19_en5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int19_en4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int19_en3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int19_en2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int19_en1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int19_en0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int19_mask_reg[] = { + { "smmu0_int19_mask31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int19_mask30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int19_mask29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int19_mask28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int19_mask27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int19_mask26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int19_mask25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int19_mask24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int19_mask23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int19_mask22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int19_mask21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int19_mask20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int19_mask19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int19_mask18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int19_mask17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int19_mask16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int19_mask15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int19_mask14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int19_mask13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int19_mask12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int19_mask11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int19_mask10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int19_mask9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int19_mask8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int19_mask7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int19_mask6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int19_mask5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int19_mask4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int19_mask3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int19_mask2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int19_mask1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int19_mask0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int19_status_reg[] = { + { "smmu0_int19_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int19_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int19_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int19_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int19_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int19_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int19_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int19_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int19_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int19_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int19_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int19_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int19_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int19_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int19_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int19_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int19_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int19_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int19_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int19_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int19_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int19_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int19_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int19_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int19_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int19_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int19_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int19_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int19_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int19_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int19_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int19_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int20_en_reg[] = { + { "smmu0_int20_en31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int20_en30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int20_en29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int20_en28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int20_en27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int20_en26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int20_en25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int20_en24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int20_en23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int20_en22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int20_en21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int20_en20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int20_en19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int20_en18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int20_en17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int20_en16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int20_en15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int20_en14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int20_en13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int20_en12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int20_en11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int20_en10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int20_en9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int20_en8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int20_en7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int20_en6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int20_en5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int20_en4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int20_en3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int20_en2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int20_en1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int20_en0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int20_mask_reg[] = { + { "smmu0_int20_mask31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int20_mask30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int20_mask29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int20_mask28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int20_mask27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int20_mask26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int20_mask25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int20_mask24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int20_mask23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int20_mask22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int20_mask21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int20_mask20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int20_mask19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int20_mask18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int20_mask17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int20_mask16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int20_mask15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int20_mask14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int20_mask13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int20_mask12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int20_mask11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int20_mask10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int20_mask9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int20_mask8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int20_mask7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int20_mask6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int20_mask5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int20_mask4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int20_mask3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int20_mask2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int20_mask1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int20_mask0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int20_status_reg[] = { + { "smmu0_int20_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int20_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int20_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int20_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int20_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int20_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int20_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int20_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int20_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int20_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int20_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int20_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int20_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int20_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int20_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int20_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int20_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int20_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int20_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int20_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int20_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int20_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int20_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int20_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int20_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int20_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int20_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int20_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int20_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int20_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int20_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int20_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int21_en_reg[] = { + { "smmu0_int21_en31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int21_en30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int21_en29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int21_en28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int21_en27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int21_en26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int21_en25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int21_en24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int21_en23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int21_en22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int21_en21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int21_en20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int21_en19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int21_en18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int21_en17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int21_en16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int21_en15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int21_en14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int21_en13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int21_en12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int21_en11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int21_en10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int21_en9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int21_en8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int21_en7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int21_en6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int21_en5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int21_en4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int21_en3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int21_en2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int21_en1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int21_en0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int21_mask_reg[] = { + { "smmu0_int21_mask31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int21_mask30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int21_mask29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int21_mask28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int21_mask27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int21_mask26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int21_mask25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int21_mask24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int21_mask23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int21_mask22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int21_mask21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int21_mask20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int21_mask19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int21_mask18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int21_mask17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int21_mask16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int21_mask15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int21_mask14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int21_mask13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int21_mask12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int21_mask11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int21_mask10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int21_mask9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int21_mask8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int21_mask7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int21_mask6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int21_mask5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int21_mask4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int21_mask3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int21_mask2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int21_mask1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int21_mask0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int21_status_reg[] = { + { "smmu0_int21_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int21_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int21_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int21_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int21_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int21_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int21_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int21_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int21_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int21_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int21_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int21_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int21_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int21_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int21_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int21_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int21_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int21_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int21_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int21_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int21_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int21_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int21_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int21_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int21_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int21_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int21_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int21_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int21_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int21_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int21_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int21_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int22_en_reg[] = { + { "smmu0_int22_en31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int22_en30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int22_en29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int22_en28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int22_en27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int22_en26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int22_en25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int22_en24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int22_en23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int22_en22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int22_en21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int22_en20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int22_en19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int22_en18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int22_en17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int22_en16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int22_en15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int22_en14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int22_en13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int22_en12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int22_en11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int22_en10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int22_en9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int22_en8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int22_en7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int22_en6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int22_en5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int22_en4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int22_en3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int22_en2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int22_en1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int22_en0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int22_mask_reg[] = { + { "smmu0_int22_mask31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int22_mask30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int22_mask29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int22_mask28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int22_mask27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int22_mask26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int22_mask25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int22_mask24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int22_mask23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int22_mask22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int22_mask21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int22_mask20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int22_mask19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int22_mask18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int22_mask17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int22_mask16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int22_mask15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int22_mask14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int22_mask13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int22_mask12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int22_mask11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int22_mask10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int22_mask9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int22_mask8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int22_mask7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int22_mask6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int22_mask5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int22_mask4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int22_mask3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int22_mask2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int22_mask1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int22_mask0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int22_status_reg[] = { + { "smmu0_int22_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int22_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int22_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int22_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int22_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int22_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int22_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int22_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int22_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int22_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int22_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int22_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int22_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int22_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int22_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int22_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int22_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int22_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int22_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int22_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int22_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int22_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int22_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int22_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int22_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int22_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int22_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int22_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int22_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int22_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int22_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int22_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int23_en_reg[] = { + { "smmu0_int23_en31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int23_en30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int23_en29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int23_en28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int23_en27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int23_en26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int23_en25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int23_en24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int23_en23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int23_en22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int23_en21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int23_en20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int23_en19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int23_en18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int23_en17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int23_en16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int23_en15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int23_en14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int23_en13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int23_en12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int23_en11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int23_en10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int23_en9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int23_en8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int23_en7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int23_en6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int23_en5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int23_en4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int23_en3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int23_en2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int23_en1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int23_en0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int23_mask_reg[] = { + { "smmu0_int23_mask31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int23_mask30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int23_mask29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int23_mask28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int23_mask27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int23_mask26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int23_mask25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int23_mask24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int23_mask23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int23_mask22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int23_mask21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int23_mask20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int23_mask19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int23_mask18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int23_mask17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int23_mask16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int23_mask15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int23_mask14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int23_mask13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int23_mask12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int23_mask11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int23_mask10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int23_mask9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int23_mask8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int23_mask7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int23_mask6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int23_mask5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int23_mask4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int23_mask3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int23_mask2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int23_mask1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int23_mask0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int23_status_reg[] = { + { "smmu0_int23_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int23_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int23_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int23_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int23_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int23_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int23_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int23_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int23_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int23_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int23_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int23_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int23_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int23_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int23_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int23_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int23_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int23_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int23_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int23_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int23_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int23_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int23_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int23_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int23_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int23_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int23_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int23_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int23_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int23_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int23_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int23_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int24_en_reg[] = { + { "smmu0_int24_en31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int24_en30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int24_en29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int24_en28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int24_en27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int24_en26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int24_en25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int24_en24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int24_en23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int24_en22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int24_en21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int24_en20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int24_en19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int24_en18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int24_en17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int24_en16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int24_en15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int24_en14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int24_en13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int24_en12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int24_en11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int24_en10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int24_en9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int24_en8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int24_en7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int24_en6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int24_en5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int24_en4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int24_en3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int24_en2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int24_en1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int24_en0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int24_mask_reg[] = { + { "smmu0_int24_mask31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int24_mask30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int24_mask29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int24_mask28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int24_mask27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int24_mask26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int24_mask25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int24_mask24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int24_mask23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int24_mask22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int24_mask21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int24_mask20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int24_mask19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int24_mask18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int24_mask17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int24_mask16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int24_mask15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int24_mask14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int24_mask13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int24_mask12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int24_mask11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int24_mask10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int24_mask9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int24_mask8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int24_mask7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int24_mask6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int24_mask5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int24_mask4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int24_mask3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int24_mask2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int24_mask1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int24_mask0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int24_status_reg[] = { + { "smmu0_int24_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int24_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int24_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int24_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int24_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int24_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int24_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int24_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int24_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int24_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int24_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int24_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int24_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int24_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int24_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int24_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int24_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int24_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int24_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int24_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int24_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int24_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int24_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int24_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int24_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int24_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int24_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int24_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int24_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int24_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int24_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int24_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int25_en_reg[] = { + { "smmu0_int25_en31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int25_en30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int25_en29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int25_en28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int25_en27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int25_en26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int25_en25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int25_en24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int25_en23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int25_en22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int25_en21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int25_en20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int25_en19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int25_en18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int25_en17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int25_en16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int25_en15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int25_en14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int25_en13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int25_en12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int25_en11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int25_en10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int25_en9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int25_en8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int25_en7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int25_en6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int25_en5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int25_en4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int25_en3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int25_en2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int25_en1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int25_en0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int25_mask_reg[] = { + { "smmu0_int25_mask31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int25_mask30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int25_mask29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int25_mask28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int25_mask27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int25_mask26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int25_mask25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int25_mask24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int25_mask23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int25_mask22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int25_mask21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int25_mask20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int25_mask19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int25_mask18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int25_mask17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int25_mask16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int25_mask15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int25_mask14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int25_mask13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int25_mask12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int25_mask11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int25_mask10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int25_mask9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int25_mask8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int25_mask7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int25_mask6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int25_mask5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int25_mask4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int25_mask3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int25_mask2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int25_mask1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int25_mask0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int25_status_reg[] = { + { "smmu0_int25_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int25_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int25_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int25_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int25_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int25_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int25_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int25_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int25_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int25_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int25_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int25_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int25_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int25_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int25_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int25_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int25_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int25_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int25_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int25_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int25_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int25_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int25_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int25_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int25_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int25_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int25_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int25_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int25_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int25_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int25_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int25_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int26_en_reg[] = { + { "smmu0_int26_en31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int26_en30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int26_en29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int26_en28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int26_en27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int26_en26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int26_en25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int26_en24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int26_en23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int26_en22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int26_en21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int26_en20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int26_en19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int26_en18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int26_en17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int26_en16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int26_en15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int26_en14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int26_en13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int26_en12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int26_en11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int26_en10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int26_en9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int26_en8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int26_en7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int26_en6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int26_en5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int26_en4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int26_en3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int26_en2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int26_en1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int26_en0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int26_mask_reg[] = { + { "smmu0_int26_mask31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int26_mask30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int26_mask29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int26_mask28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int26_mask27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int26_mask26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int26_mask25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int26_mask24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int26_mask23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int26_mask22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int26_mask21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int26_mask20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int26_mask19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int26_mask18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int26_mask17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int26_mask16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int26_mask15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int26_mask14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int26_mask13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int26_mask12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int26_mask11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int26_mask10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int26_mask9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int26_mask8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int26_mask7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int26_mask6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int26_mask5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int26_mask4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int26_mask3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int26_mask2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int26_mask1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int26_mask0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int26_status_reg[] = { + { "smmu0_int26_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int26_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int26_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int26_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int26_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int26_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int26_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int26_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int26_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int26_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int26_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int26_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int26_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int26_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int26_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int26_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int26_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int26_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int26_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int26_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int26_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int26_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int26_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int26_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int26_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int26_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int26_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int26_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int26_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int26_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int26_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int26_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int27_en_reg[] = { + { "smmu0_int27_en31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int27_en30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int27_en29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int27_en28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int27_en27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int27_en26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int27_en25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int27_en24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int27_en23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int27_en22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int27_en21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int27_en20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int27_en19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int27_en18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int27_en17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int27_en16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int27_en15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int27_en14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int27_en13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int27_en12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int27_en11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int27_en10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int27_en9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int27_en8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int27_en7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int27_en6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int27_en5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int27_en4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int27_en3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int27_en2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int27_en1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int27_en0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int27_mask_reg[] = { + { "smmu0_int27_mask31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int27_mask30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int27_mask29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int27_mask28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int27_mask27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int27_mask26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int27_mask25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int27_mask24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int27_mask23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int27_mask22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int27_mask21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int27_mask20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int27_mask19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int27_mask18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int27_mask17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int27_mask16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int27_mask15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int27_mask14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int27_mask13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int27_mask12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int27_mask11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int27_mask10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int27_mask9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int27_mask8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int27_mask7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int27_mask6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int27_mask5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int27_mask4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int27_mask3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int27_mask2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int27_mask1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int27_mask0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int27_status_reg[] = { + { "smmu0_int27_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int27_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int27_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int27_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int27_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int27_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int27_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int27_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int27_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int27_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int27_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int27_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int27_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int27_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int27_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int27_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int27_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int27_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int27_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int27_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int27_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int27_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int27_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int27_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int27_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int27_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int27_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int27_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int27_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int27_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int27_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int27_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int28_en_reg[] = { + { "smmu0_int28_en31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int28_en30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int28_en29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int28_en28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int28_en27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int28_en26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int28_en25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int28_en24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int28_en23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int28_en22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int28_en21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int28_en20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int28_en19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int28_en18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int28_en17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int28_en16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int28_en15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int28_en14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int28_en13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int28_en12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int28_en11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int28_en10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int28_en9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int28_en8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int28_en7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int28_en6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int28_en5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int28_en4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int28_en3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int28_en2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int28_en1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int28_en0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int28_mask_reg[] = { + { "smmu0_int28_mask31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int28_mask30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int28_mask29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int28_mask28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int28_mask27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int28_mask26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int28_mask25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int28_mask24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int28_mask23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int28_mask22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int28_mask21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int28_mask20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int28_mask19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int28_mask18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int28_mask17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int28_mask16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int28_mask15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int28_mask14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int28_mask13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int28_mask12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int28_mask11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int28_mask10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int28_mask9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int28_mask8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int28_mask7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int28_mask6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int28_mask5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int28_mask4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int28_mask3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int28_mask2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int28_mask1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int28_mask0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int28_status_reg[] = { + { "smmu0_int28_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int28_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int28_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int28_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int28_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int28_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int28_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int28_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int28_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int28_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int28_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int28_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int28_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int28_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int28_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int28_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int28_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int28_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int28_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int28_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int28_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int28_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int28_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int28_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int28_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int28_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int28_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int28_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int28_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int28_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int28_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int28_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int29_en_reg[] = { + { "smmu0_int29_en31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int29_en30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int29_en29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int29_en28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int29_en27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int29_en26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int29_en25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int29_en24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int29_en23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int29_en22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int29_en21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int29_en20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int29_en19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int29_en18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int29_en17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int29_en16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int29_en15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int29_en14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int29_en13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int29_en12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int29_en11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int29_en10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int29_en9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int29_en8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int29_en7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int29_en6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int29_en5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int29_en4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int29_en3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int29_en2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int29_en1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int29_en0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int29_mask_reg[] = { + { "smmu0_int29_mask31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int29_mask30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int29_mask29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int29_mask28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int29_mask27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int29_mask26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int29_mask25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int29_mask24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int29_mask23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int29_mask22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int29_mask21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int29_mask20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int29_mask19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int29_mask18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int29_mask17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int29_mask16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int29_mask15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int29_mask14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int29_mask13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int29_mask12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int29_mask11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int29_mask10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int29_mask9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int29_mask8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int29_mask7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int29_mask6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int29_mask5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int29_mask4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int29_mask3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int29_mask2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int29_mask1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int29_mask0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int29_status_reg[] = { + { "smmu0_int29_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int29_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int29_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int29_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int29_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int29_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int29_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int29_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int29_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int29_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int29_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int29_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int29_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int29_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int29_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int29_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int29_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int29_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int29_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int29_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int29_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int29_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int29_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int29_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int29_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int29_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int29_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int29_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int29_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int29_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int29_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int29_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int30_en_reg[] = { + { "smmu0_int30_en31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int30_en30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int30_en29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int30_en28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int30_en27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int30_en26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int30_en25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int30_en24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int30_en23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int30_en22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int30_en21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int30_en20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int30_en19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int30_en18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int30_en17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int30_en16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int30_en15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int30_en14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int30_en13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int30_en12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int30_en11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int30_en10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int30_en9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int30_en8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int30_en7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int30_en6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int30_en5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int30_en4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int30_en3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int30_en2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int30_en1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int30_en0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int30_mask_reg[] = { + { "smmu0_int30_mask31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int30_mask30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int30_mask29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int30_mask28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int30_mask27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int30_mask26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int30_mask25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int30_mask24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int30_mask23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int30_mask22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int30_mask21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int30_mask20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int30_mask19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int30_mask18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int30_mask17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int30_mask16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int30_mask15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int30_mask14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int30_mask13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int30_mask12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int30_mask11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int30_mask10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int30_mask9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int30_mask8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int30_mask7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int30_mask6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int30_mask5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int30_mask4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int30_mask3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int30_mask2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int30_mask1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int30_mask0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int30_status_reg[] = { + { "smmu0_int30_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int30_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int30_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int30_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int30_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int30_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int30_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int30_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int30_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int30_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int30_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int30_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int30_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int30_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int30_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int30_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int30_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int30_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int30_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int30_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int30_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int30_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int30_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int30_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int30_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int30_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int30_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int30_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int30_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int30_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int30_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int30_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int31_en_reg[] = { + { "smmu0_int31_en31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int31_en30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int31_en29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int31_en28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int31_en27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int31_en26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int31_en25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int31_en24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int31_en23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int31_en22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int31_en21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int31_en20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int31_en19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int31_en18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int31_en17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int31_en16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int31_en15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int31_en14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int31_en13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int31_en12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int31_en11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int31_en10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int31_en9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int31_en8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int31_en7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int31_en6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int31_en5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int31_en4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int31_en3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int31_en2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int31_en1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int31_en0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int31_mask_reg[] = { + { "smmu0_int31_mask31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int31_mask30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int31_mask29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int31_mask28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int31_mask27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int31_mask26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int31_mask25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int31_mask24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int31_mask23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int31_mask22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int31_mask21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int31_mask20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int31_mask19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int31_mask18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int31_mask17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int31_mask16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int31_mask15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int31_mask14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int31_mask13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int31_mask12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int31_mask11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int31_mask10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int31_mask9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int31_mask8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int31_mask7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int31_mask6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int31_mask5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int31_mask4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int31_mask3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int31_mask2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int31_mask1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int31_mask0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int31_status_reg[] = { + { "smmu0_int31_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int31_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int31_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int31_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int31_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int31_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int31_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int31_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int31_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int31_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int31_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int31_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int31_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int31_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int31_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int31_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int31_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int31_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int31_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int31_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int31_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int31_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int31_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int31_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int31_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int31_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int31_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int31_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int31_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int31_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int31_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int31_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int32_en_reg[] = { + { "smmu0_int32_en31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int32_en30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int32_en29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int32_en28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int32_en27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int32_en26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int32_en25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int32_en24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int32_en23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int32_en22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int32_en21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int32_en20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int32_en19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int32_en18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int32_en17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int32_en16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int32_en15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int32_en14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int32_en13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int32_en12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int32_en11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int32_en10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int32_en9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int32_en8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int32_en7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int32_en6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int32_en5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int32_en4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int32_en3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int32_en2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int32_en1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int32_en0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int32_mask_reg[] = { + { "smmu0_int32_mask31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int32_mask30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int32_mask29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int32_mask28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int32_mask27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int32_mask26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int32_mask25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int32_mask24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int32_mask23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int32_mask22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int32_mask21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int32_mask20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int32_mask19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int32_mask18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int32_mask17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int32_mask16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int32_mask15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int32_mask14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int32_mask13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int32_mask12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int32_mask11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int32_mask10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int32_mask9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int32_mask8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int32_mask7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int32_mask6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int32_mask5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int32_mask4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int32_mask3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int32_mask2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int32_mask1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int32_mask0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int32_status_reg[] = { + { "smmu0_int32_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int32_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int32_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int32_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int32_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int32_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int32_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int32_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int32_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int32_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int32_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int32_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int32_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int32_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int32_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int32_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int32_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int32_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int32_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int32_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int32_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int32_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int32_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int32_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int32_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int32_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int32_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int32_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int32_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int32_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int32_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int32_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int33_en_reg[] = { + { "smmu0_int33_en31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int33_en30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int33_en29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int33_en28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int33_en27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int33_en26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int33_en25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int33_en24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int33_en23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int33_en22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int33_en21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int33_en20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int33_en19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int33_en18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int33_en17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int33_en16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int33_en15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int33_en14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int33_en13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int33_en12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int33_en11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int33_en10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int33_en9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int33_en8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int33_en7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int33_en6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int33_en5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int33_en4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int33_en3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int33_en2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int33_en1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int33_en0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int33_mask_reg[] = { + { "smmu0_int33_mask31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int33_mask30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int33_mask29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int33_mask28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int33_mask27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int33_mask26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int33_mask25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int33_mask24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int33_mask23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int33_mask22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int33_mask21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int33_mask20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int33_mask19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int33_mask18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int33_mask17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int33_mask16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int33_mask15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int33_mask14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int33_mask13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int33_mask12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int33_mask11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int33_mask10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int33_mask9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int33_mask8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int33_mask7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int33_mask6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int33_mask5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int33_mask4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int33_mask3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int33_mask2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int33_mask1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int33_mask0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int33_status_reg[] = { + { "smmu0_int33_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int33_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int33_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int33_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int33_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int33_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int33_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int33_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int33_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int33_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int33_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int33_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int33_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int33_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int33_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int33_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int33_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int33_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int33_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int33_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int33_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int33_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int33_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int33_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int33_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int33_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int33_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int33_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int33_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int33_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int33_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int33_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int34_en_reg[] = { + { "smmu0_int34_en31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int34_en30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int34_en29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int34_en28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int34_en27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int34_en26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int34_en25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int34_en24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int34_en23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int34_en22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int34_en21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int34_en20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int34_en19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int34_en18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int34_en17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int34_en16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int34_en15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int34_en14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int34_en13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int34_en12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int34_en11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int34_en10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int34_en9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int34_en8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int34_en7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int34_en6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int34_en5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int34_en4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int34_en3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int34_en2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int34_en1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int34_en0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int34_mask_reg[] = { + { "smmu0_int34_mask31", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "smmu0_int34_mask30", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "smmu0_int34_mask29", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "smmu0_int34_mask28", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "smmu0_int34_mask27", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "smmu0_int34_mask26", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "smmu0_int34_mask25", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "smmu0_int34_mask24", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "smmu0_int34_mask23", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "smmu0_int34_mask22", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "smmu0_int34_mask21", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "smmu0_int34_mask20", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "smmu0_int34_mask19", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "smmu0_int34_mask18", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "smmu0_int34_mask17", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "smmu0_int34_mask16", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "smmu0_int34_mask15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "smmu0_int34_mask14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "smmu0_int34_mask13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "smmu0_int34_mask12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "smmu0_int34_mask11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "smmu0_int34_mask10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "smmu0_int34_mask9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "smmu0_int34_mask8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "smmu0_int34_mask7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "smmu0_int34_mask6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "smmu0_int34_mask5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "smmu0_int34_mask4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "smmu0_int34_mask3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int34_mask2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int34_mask1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int34_mask0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int34_status_reg[] = { + { "smmu0_int34_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "smmu0_int34_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "smmu0_int34_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "smmu0_int34_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "smmu0_int34_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "smmu0_int34_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "smmu0_int34_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "smmu0_int34_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "smmu0_int34_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "smmu0_int34_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "smmu0_int34_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "smmu0_int34_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "smmu0_int34_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "smmu0_int34_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "smmu0_int34_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "smmu0_int34_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "smmu0_int34_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "smmu0_int34_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "smmu0_int34_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "smmu0_int34_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "smmu0_int34_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "smmu0_int34_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "smmu0_int34_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "smmu0_int34_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "smmu0_int34_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "smmu0_int34_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "smmu0_int34_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "smmu0_int34_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "smmu0_int34_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int34_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int34_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int34_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int35_en_reg[] = { + { "smmu0_int35_en0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int35_mask_reg[] = { + { "smmu0_int35_mask0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int35_status_reg[] = { + { "smmu0_int35_status0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int36_en_reg[] = { + { "smmu0_int36_en0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int36_mask_reg[] = { + { "smmu0_int36_mask0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int36_status_reg[] = { + { "smmu0_int36_status0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int37_en_reg[] = { + { "smmu0_int37_en0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int37_mask_reg[] = { + { "smmu0_int37_mask0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int37_status_reg[] = { + { "smmu0_int37_status0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int38_en_reg[] = { + { "smmu0_int38_en0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int38_mask_reg[] = { + { "smmu0_int38_mask0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int38_status_reg[] = { + { "smmu0_int38_status0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int39_en_reg[] = { + { "smmu0_int39_en0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int39_mask_reg[] = { + { "smmu0_int39_mask0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int39_status_reg[] = { + { "smmu0_int39_status0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int40_en_reg[] = { + { "smmu0_int40_en0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int40_mask_reg[] = { + { "smmu0_int40_mask0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int40_status_reg[] = { + { "smmu0_int40_status0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int41_en_reg[] = { + { "smmu0_int41_en0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int41_mask_reg[] = { + { "smmu0_int41_mask0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int41_status_reg[] = { + { "smmu0_int41_status0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int42_en_reg[] = { + { "smmu0_int42_en0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int42_mask_reg[] = { + { "smmu0_int42_mask0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int42_status_reg[] = { + { "smmu0_int42_status0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int43_en_reg[] = { + { "smmu0_int43_en0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int43_mask_reg[] = { + { "smmu0_int43_mask0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int43_status_reg[] = { + { "smmu0_int43_status0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int44_en_reg[] = { + { "smmu0_int44_en0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int44_mask_reg[] = { + { "smmu0_int44_mask0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int44_status_reg[] = { + { "smmu0_int44_status0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int45_en_reg[] = { + { "smmu0_int45_en0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int45_mask_reg[] = { + { "smmu0_int45_mask0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int45_status_reg[] = { + { "smmu0_int45_status0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int46_en_reg[] = { + { "smmu0_int46_en0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int46_mask_reg[] = { + { "smmu0_int46_mask0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int46_status_reg[] = { + { "smmu0_int46_status0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int47_en_reg[] = { + { "smmu0_int47_en0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int47_mask_reg[] = { + { "smmu0_int47_mask0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int47_status_reg[] = { + { "smmu0_int47_status0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int48_en_reg[] = { + { "smmu0_int48_en0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int48_mask_reg[] = { + { "smmu0_int48_mask0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int48_status_reg[] = { + { "smmu0_int48_status0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int49_en_reg[] = { + { "smmu0_int49_en0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int49_mask_reg[] = { + { "smmu0_int49_mask0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int49_status_reg[] = { + { "smmu0_int49_status0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int50_en_reg[] = { + { "smmu0_int50_en0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int50_mask_reg[] = { + { "smmu0_int50_mask0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int50_status_reg[] = { + { "smmu0_int50_status0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int51_en_reg[] = { + { "smmu0_int51_en0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int51_mask_reg[] = { + { "smmu0_int51_mask0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int51_status_reg[] = { + { "smmu0_int51_status0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int52_en_reg[] = { + { "smmu0_int52_en0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int52_mask_reg[] = { + { "smmu0_int52_mask0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int52_status_reg[] = { + { "smmu0_int52_status0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int53_en_reg[] = { + { "smmu0_int53_en3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int53_en2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int53_en1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int53_en0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int53_mask_reg[] = { + { "smmu0_int53_mask3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "smmu0_int53_mask2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "smmu0_int53_mask1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_int53_mask0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_int53_status_reg[] = { + { "smmu0_int53_status15", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_int53_status14", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_int53_status13", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_int53_status12", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl0_arbiter_ecc_bypass_reg[] = { + { "ctrl1_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "ctrl1_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "ctrl1_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "ctrl1_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "ctrl1_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "ctrl1_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "ctrl1_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "ctrl1_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "ctrl1_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "ctrl1_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "ctrl1_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "ctrl1_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "ctrl1_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "ctrl1_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "ctrl1_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "ctrl1_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "ctrl0_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "ctrl0_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "ctrl0_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "ctrl0_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "ctrl0_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "ctrl0_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "ctrl0_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "ctrl0_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "ctrl0_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "ctrl0_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "ctrl0_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "ctrl0_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "ctrl0_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "ctrl0_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "ctrl0_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "ctrl0_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl2_arbiter_ecc_bypass_reg[] = { + { "ctrl3_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "ctrl3_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "ctrl3_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "ctrl3_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "ctrl3_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "ctrl3_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "ctrl3_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "ctrl3_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "ctrl3_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "ctrl3_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "ctrl3_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "ctrl3_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "ctrl3_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "ctrl3_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "ctrl3_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "ctrl3_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "ctrl2_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "ctrl2_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "ctrl2_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "ctrl2_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "ctrl2_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "ctrl2_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "ctrl2_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "ctrl2_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "ctrl2_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "ctrl2_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "ctrl2_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "ctrl2_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "ctrl2_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "ctrl2_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "ctrl2_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "ctrl2_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl4_arbiter_ecc_bypass_reg[] = { + { "ctrl5_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "ctrl5_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "ctrl5_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "ctrl5_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "ctrl5_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "ctrl5_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "ctrl5_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "ctrl5_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "ctrl5_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "ctrl5_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "ctrl5_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "ctrl5_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "ctrl5_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "ctrl5_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "ctrl5_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "ctrl5_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "ctrl4_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "ctrl4_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "ctrl4_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "ctrl4_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "ctrl4_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "ctrl4_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "ctrl4_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "ctrl4_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "ctrl4_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "ctrl4_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "ctrl4_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "ctrl4_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "ctrl4_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "ctrl4_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "ctrl4_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "ctrl4_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl6_arbiter_ecc_bypass_reg[] = { + { "ctrl7_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "ctrl7_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "ctrl7_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "ctrl7_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "ctrl7_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "ctrl7_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "ctrl7_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "ctrl7_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "ctrl7_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "ctrl7_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "ctrl7_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "ctrl7_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "ctrl7_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "ctrl7_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "ctrl7_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "ctrl7_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "ctrl6_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "ctrl6_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "ctrl6_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "ctrl6_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "ctrl6_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "ctrl6_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "ctrl6_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "ctrl6_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "ctrl6_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "ctrl6_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "ctrl6_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "ctrl6_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "ctrl6_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "ctrl6_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "ctrl6_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "ctrl6_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl8_arbiter_ecc_bypass_reg[] = { + { "ctrl9_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "ctrl9_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "ctrl9_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "ctrl9_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "ctrl9_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "ctrl9_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "ctrl9_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "ctrl9_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "ctrl9_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "ctrl9_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "ctrl9_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "ctrl9_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "ctrl9_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "ctrl9_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "ctrl9_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "ctrl9_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "ctrl8_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "ctrl8_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "ctrl8_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "ctrl8_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "ctrl8_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "ctrl8_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "ctrl8_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "ctrl8_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "ctrl8_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "ctrl8_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "ctrl8_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "ctrl8_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "ctrl8_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "ctrl8_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "ctrl8_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "ctrl8_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl10_arbiter_ecc_bypass_reg[] = { + { "ctrl11_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "ctrl11_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "ctrl11_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "ctrl11_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "ctrl11_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "ctrl11_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "ctrl11_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "ctrl11_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "ctrl11_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "ctrl11_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "ctrl11_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "ctrl11_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "ctrl11_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "ctrl11_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "ctrl11_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "ctrl11_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "ctrl10_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "ctrl10_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "ctrl10_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "ctrl10_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "ctrl10_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "ctrl10_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "ctrl10_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "ctrl10_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "ctrl10_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "ctrl10_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "ctrl10_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "ctrl10_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "ctrl10_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "ctrl10_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "ctrl10_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "ctrl10_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl12_arbiter_ecc_bypass_reg[] = { + { "ctrl13_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "ctrl13_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "ctrl13_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "ctrl13_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "ctrl13_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "ctrl13_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "ctrl13_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "ctrl13_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "ctrl13_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "ctrl13_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "ctrl13_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "ctrl13_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "ctrl13_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "ctrl13_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "ctrl13_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "ctrl13_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "ctrl12_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "ctrl12_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "ctrl12_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "ctrl12_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "ctrl12_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "ctrl12_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "ctrl12_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "ctrl12_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "ctrl12_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "ctrl12_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "ctrl12_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "ctrl12_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "ctrl12_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "ctrl12_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "ctrl12_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "ctrl12_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl14_arbiter_ecc_bypass_reg[] = { + { "ctrl15_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "ctrl15_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "ctrl15_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "ctrl15_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "ctrl15_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "ctrl15_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "ctrl15_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "ctrl15_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "ctrl15_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "ctrl15_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "ctrl15_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "ctrl15_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "ctrl15_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "ctrl15_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "ctrl15_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "ctrl15_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "ctrl14_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "ctrl14_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "ctrl14_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "ctrl14_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "ctrl14_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "ctrl14_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "ctrl14_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "ctrl14_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "ctrl14_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "ctrl14_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "ctrl14_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "ctrl14_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "ctrl14_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "ctrl14_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "ctrl14_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "ctrl14_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl16_arbiter_ecc_bypass_reg[] = { + { "ctrl17_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "ctrl17_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "ctrl17_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "ctrl17_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "ctrl17_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "ctrl17_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "ctrl17_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "ctrl17_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "ctrl17_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "ctrl17_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "ctrl17_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "ctrl17_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "ctrl17_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "ctrl17_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "ctrl17_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "ctrl17_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "ctrl16_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "ctrl16_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "ctrl16_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "ctrl16_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "ctrl16_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "ctrl16_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "ctrl16_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "ctrl16_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "ctrl16_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "ctrl16_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "ctrl16_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "ctrl16_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "ctrl16_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "ctrl16_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "ctrl16_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "ctrl16_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl18_arbiter_ecc_bypass_reg[] = { + { "ctrl19_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "ctrl19_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "ctrl19_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "ctrl19_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "ctrl19_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "ctrl19_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "ctrl19_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "ctrl19_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "ctrl19_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "ctrl19_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "ctrl19_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "ctrl19_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "ctrl19_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "ctrl19_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "ctrl19_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "ctrl19_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "ctrl18_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "ctrl18_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "ctrl18_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "ctrl18_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "ctrl18_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "ctrl18_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "ctrl18_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "ctrl18_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "ctrl18_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "ctrl18_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "ctrl18_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "ctrl18_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "ctrl18_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "ctrl18_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "ctrl18_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "ctrl18_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl20_arbiter_ecc_bypass_reg[] = { + { "ctrl21_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "ctrl21_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "ctrl21_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "ctrl21_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "ctrl21_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "ctrl21_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "ctrl21_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "ctrl21_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "ctrl21_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "ctrl21_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "ctrl21_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "ctrl21_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "ctrl21_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "ctrl21_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "ctrl21_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "ctrl21_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "ctrl20_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "ctrl20_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "ctrl20_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "ctrl20_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "ctrl20_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "ctrl20_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "ctrl20_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "ctrl20_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "ctrl20_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "ctrl20_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "ctrl20_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "ctrl20_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "ctrl20_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "ctrl20_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "ctrl20_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "ctrl20_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl22_arbiter_ecc_bypass_reg[] = { + { "ctrl23_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "ctrl23_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "ctrl23_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "ctrl23_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "ctrl23_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "ctrl23_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "ctrl23_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "ctrl23_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "ctrl23_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "ctrl23_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "ctrl23_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "ctrl23_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "ctrl23_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "ctrl23_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "ctrl23_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "ctrl23_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "ctrl22_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "ctrl22_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "ctrl22_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "ctrl22_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "ctrl22_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "ctrl22_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "ctrl22_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "ctrl22_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "ctrl22_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "ctrl22_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "ctrl22_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "ctrl22_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "ctrl22_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "ctrl22_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "ctrl22_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "ctrl22_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl24_arbiter_ecc_bypass_reg[] = { + { "ctrl25_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "ctrl25_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "ctrl25_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "ctrl25_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "ctrl25_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "ctrl25_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "ctrl25_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "ctrl25_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "ctrl25_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "ctrl25_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "ctrl25_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "ctrl25_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "ctrl25_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "ctrl25_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "ctrl25_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "ctrl25_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "ctrl24_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "ctrl24_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "ctrl24_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "ctrl24_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "ctrl24_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "ctrl24_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "ctrl24_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "ctrl24_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "ctrl24_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "ctrl24_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "ctrl24_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "ctrl24_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "ctrl24_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "ctrl24_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "ctrl24_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "ctrl24_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl26_arbiter_ecc_bypass_reg[] = { + { "ctrl27_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "ctrl27_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "ctrl27_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "ctrl27_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "ctrl27_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "ctrl27_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "ctrl27_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "ctrl27_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "ctrl27_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "ctrl27_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "ctrl27_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "ctrl27_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "ctrl27_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "ctrl27_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "ctrl27_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "ctrl27_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "ctrl26_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "ctrl26_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "ctrl26_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "ctrl26_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "ctrl26_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "ctrl26_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "ctrl26_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "ctrl26_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "ctrl26_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "ctrl26_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "ctrl26_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "ctrl26_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "ctrl26_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "ctrl26_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "ctrl26_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "ctrl26_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl28_arbiter_ecc_bypass_reg[] = { + { "ctrl29_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "ctrl29_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "ctrl29_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "ctrl29_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "ctrl29_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "ctrl29_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "ctrl29_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "ctrl29_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "ctrl29_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "ctrl29_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "ctrl29_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "ctrl29_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "ctrl29_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "ctrl29_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "ctrl29_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "ctrl29_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "ctrl28_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "ctrl28_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "ctrl28_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "ctrl28_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "ctrl28_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "ctrl28_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "ctrl28_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "ctrl28_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "ctrl28_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "ctrl28_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "ctrl28_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "ctrl28_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "ctrl28_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "ctrl28_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "ctrl28_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "ctrl28_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl30_arbiter_ecc_bypass_reg[] = { + { "ctrl31_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 31, 1, 0x1, 0x0 }, + { "ctrl31_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 30, 1, 0x1, 0x0 }, + { "ctrl31_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 29, 1, 0x1, 0x0 }, + { "ctrl31_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 28, 1, 0x1, 0x0 }, + { "ctrl31_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 27, 1, 0x1, 0x0 }, + { "ctrl31_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 26, 1, 0x1, 0x0 }, + { "ctrl31_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 25, 1, 0x1, 0x0 }, + { "ctrl31_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 24, 1, 0x1, 0x0 }, + { "ctrl31_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 23, 1, 0x1, 0x0 }, + { "ctrl31_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 22, 1, 0x1, 0x0 }, + { "ctrl31_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 21, 1, 0x1, 0x0 }, + { "ctrl31_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 20, 1, 0x1, 0x0 }, + { "ctrl31_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 19, 1, 0x1, 0x0 }, + { "ctrl31_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 18, 1, 0x1, 0x0 }, + { "ctrl31_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 17, 1, 0x1, 0x0 }, + { "ctrl31_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 16, 1, 0x1, 0x0 }, + { "ctrl30_arbiter_ecc_bypass_15", DPP_FIELD_FLAG_RC, 15, 1, 0x1, 0x0 }, + { "ctrl30_arbiter_ecc_bypass_14", DPP_FIELD_FLAG_RC, 14, 1, 0x1, 0x0 }, + { "ctrl30_arbiter_ecc_bypass_13", DPP_FIELD_FLAG_RC, 13, 1, 0x1, 0x0 }, + { "ctrl30_arbiter_ecc_bypass_12", DPP_FIELD_FLAG_RC, 12, 1, 0x1, 0x0 }, + { "ctrl30_arbiter_ecc_bypass_11", DPP_FIELD_FLAG_RC, 11, 1, 0x1, 0x0 }, + { "ctrl30_arbiter_ecc_bypass_10", DPP_FIELD_FLAG_RC, 10, 1, 0x1, 0x0 }, + { "ctrl30_arbiter_ecc_bypass_9", DPP_FIELD_FLAG_RC, 9, 1, 0x1, 0x0 }, + { "ctrl30_arbiter_ecc_bypass_8", DPP_FIELD_FLAG_RC, 8, 1, 0x1, 0x0 }, + { "ctrl30_arbiter_ecc_bypass_7", DPP_FIELD_FLAG_RC, 7, 1, 0x1, 0x0 }, + { "ctrl30_arbiter_ecc_bypass_6", DPP_FIELD_FLAG_RC, 6, 1, 0x1, 0x0 }, + { "ctrl30_arbiter_ecc_bypass_5", DPP_FIELD_FLAG_RC, 5, 1, 0x1, 0x0 }, + { "ctrl30_arbiter_ecc_bypass_4", DPP_FIELD_FLAG_RC, 4, 1, 0x1, 0x0 }, + { "ctrl30_arbiter_ecc_bypass_3", DPP_FIELD_FLAG_RC, 3, 1, 0x1, 0x0 }, + { "ctrl30_arbiter_ecc_bypass_2", DPP_FIELD_FLAG_RC, 2, 1, 0x1, 0x0 }, + { "ctrl30_arbiter_ecc_bypass_1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "ctrl30_arbiter_ecc_bypass_0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl_req_ecc_bypass_reg[] = { + { "ctrl_req_ecc_bypass_0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, + 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl_info_ecc_bypass_reg[] = { + { "ctrl_info_ecc_bypass_0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, + 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_rschd_ecc_bypass_reg[] = { + { "smmu0_rschd_ecc_bypass_0_31", DPP_FIELD_FLAG_RC, 31, 32, 0xffffffff, + 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_wr_ecc_bypass_reg[] = { + { "smmu0_wr_ecc_bypass1", DPP_FIELD_FLAG_RC, 1, 1, 0x1, 0x0 }, + { "smmu0_wr_ecc_bypass0", DPP_FIELD_FLAG_RC, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl0_arbiter_ecc_err_reg[] = { + { "ctrl0_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl0_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl1_arbiter_ecc_err_reg[] = { + { "ctrl1_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl1_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl2_arbiter_ecc_err_reg[] = { + { "ctrl2_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl2_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl3_arbiter_ecc_err_reg[] = { + { "ctrl3_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl3_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl4_arbiter_ecc_err_reg[] = { + { "ctrl4_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl4_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl5_arbiter_ecc_err_reg[] = { + { "ctrl5_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl5_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl6_arbiter_ecc_err_reg[] = { + { "ctrl6_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl6_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl7_arbiter_ecc_err_reg[] = { + { "ctrl7_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl7_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl8_arbiter_ecc_err_reg[] = { + { "ctrl8_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl8_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl9_arbiter_ecc_err_reg[] = { + { "ctrl9_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl9_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl10_arbiter_ecc_err_reg[] = { + { "ctrl10_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl10_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl11_arbiter_ecc_err_reg[] = { + { "ctrl11_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl11_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl12_arbiter_ecc_err_reg[] = { + { "ctrl12_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl12_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl13_arbiter_ecc_err_reg[] = { + { "ctrl13_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl13_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl14_arbiter_ecc_err_reg[] = { + { "ctrl14_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl14_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl15_arbiter_ecc_err_reg[] = { + { "ctrl15_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl15_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl16_arbiter_ecc_err_reg[] = { + { "ctrl16_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl16_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl17_arbiter_ecc_err_reg[] = { + { "ctrl17_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl17_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl18_arbiter_ecc_err_reg[] = { + { "ctrl18_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl18_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl19_arbiter_ecc_err_reg[] = { + { "ctrl19_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl19_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl20_arbiter_ecc_err_reg[] = { + { "ctrl20_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl20_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl21_arbiter_ecc_err_reg[] = { + { "ctrl21_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl21_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl22_arbiter_ecc_err_reg[] = { + { "ctrl22_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl22_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl23_arbiter_ecc_err_reg[] = { + { "ctrl23_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl23_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl24_arbiter_ecc_err_reg[] = { + { "ctrl24_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl24_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl25_arbiter_ecc_err_reg[] = { + { "ctrl25_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl25_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl26_arbiter_ecc_err_reg[] = { + { "ctrl26_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl26_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl27_arbiter_ecc_err_reg[] = { + { "ctrl27_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl27_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl28_arbiter_ecc_err_reg[] = { + { "ctrl28_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl28_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl29_arbiter_ecc_err_reg[] = { + { "ctrl29_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl29_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl30_arbiter_ecc_err_reg[] = { + { "ctrl30_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl30_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl31_arbiter_ecc_err_reg[] = { + { "ctrl31_arbiter_ecc_err_31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "ctrl31_arbiter_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl_req_ecc_single_err_reg[] = { + { "ctrl_req_ecc_single_err_0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl_req_ecc_double_err_reg[] = { + { "ctrl_req_ecc_double_err_0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl_info_ecc_single_err_reg[] = { + { "ctrl_info_ecc_single_err_0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl_info_ecc_double_err_reg[] = { + { "ctrl_info_ecc_double_err_0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_wr_ecc_err_reg[] = { + { "smmu0_wr_ecc_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "smmu0_wr_ecc_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "smmu0_wr_ecc_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "smmu0_wr_ecc_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_rschd_ecc_single_err_reg[] = { + { "smmu0_rschd_ecc_single_err_0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_rschd_ecc_double_err_reg[] = { + { "smmu0_rschd_ecc_double_err_0_31", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ord_fifo_empty_reg[] = { + { "ord_fifo_empty", DPP_FIELD_FLAG_RO, 4, 5, 0x1f, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_wr_arb_fifo_empty_reg[] = { + { "wr_arb_fifo_empty", DPP_FIELD_FLAG_RO, 3, 4, 0xf, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl_fifo_empty0_reg[] = { + { "ctrl_fifo_empty0_5", DPP_FIELD_FLAG_RO, 29, 5, 0x1f, 0x0 }, + { "ctrl_fifo_empty0_4", DPP_FIELD_FLAG_RO, 24, 5, 0x1f, 0x0 }, + { "ctrl_fifo_empty0_3", DPP_FIELD_FLAG_RO, 19, 5, 0x1f, 0x0 }, + { "ctrl_fifo_empty0_2", DPP_FIELD_FLAG_RO, 14, 5, 0x1f, 0x0 }, + { "ctrl_fifo_empty0_1", DPP_FIELD_FLAG_RO, 9, 5, 0x1f, 0x0 }, + { "ctrl_fifo_empty0_0", DPP_FIELD_FLAG_RO, 4, 5, 0x1f, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl_fifo_empty1_reg[] = { + { "ctrl_fifo_empty1_5", DPP_FIELD_FLAG_RO, 29, 5, 0x1f, 0x0 }, + { "ctrl_fifo_empty1_4", DPP_FIELD_FLAG_RO, 24, 5, 0x1f, 0x0 }, + { "ctrl_fifo_empty1_3", DPP_FIELD_FLAG_RO, 19, 5, 0x1f, 0x0 }, + { "ctrl_fifo_empty1_2", DPP_FIELD_FLAG_RO, 14, 5, 0x1f, 0x0 }, + { "ctrl_fifo_empty1_1", DPP_FIELD_FLAG_RO, 9, 5, 0x1f, 0x0 }, + { "ctrl_fifo_empty1_0", DPP_FIELD_FLAG_RO, 4, 5, 0x1f, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl_fifo_empty2_reg[] = { + { "ctrl_fifo_empty2_5", DPP_FIELD_FLAG_RO, 29, 5, 0x1f, 0x0 }, + { "ctrl_fifo_empty2_4", DPP_FIELD_FLAG_RO, 24, 5, 0x1f, 0x0 }, + { "ctrl_fifo_empty2_3", DPP_FIELD_FLAG_RO, 19, 5, 0x1f, 0x0 }, + { "ctrl_fifo_empty2_2", DPP_FIELD_FLAG_RO, 14, 5, 0x1f, 0x0 }, + { "ctrl_fifo_empty2_1", DPP_FIELD_FLAG_RO, 9, 5, 0x1f, 0x0 }, + { "ctrl_fifo_empty2_0", DPP_FIELD_FLAG_RO, 4, 5, 0x1f, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl_fifo_empty3_reg[] = { + { "ctrl_fifo_empty3_5", DPP_FIELD_FLAG_RO, 29, 5, 0x1f, 0x0 }, + { "ctrl_fifo_empty3_4", DPP_FIELD_FLAG_RO, 24, 5, 0x1f, 0x0 }, + { "ctrl_fifo_empty3_3", DPP_FIELD_FLAG_RO, 19, 5, 0x1f, 0x0 }, + { "ctrl_fifo_empty3_2", DPP_FIELD_FLAG_RO, 14, 5, 0x1f, 0x0 }, + { "ctrl_fifo_empty3_1", DPP_FIELD_FLAG_RO, 9, 5, 0x1f, 0x0 }, + { "ctrl_fifo_empty3_0", DPP_FIELD_FLAG_RO, 4, 5, 0x1f, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl_fifo_empty4_reg[] = { + { "ctrl_fifo_empty4_5", DPP_FIELD_FLAG_RO, 29, 5, 0x1f, 0x0 }, + { "ctrl_fifo_empty4_4", DPP_FIELD_FLAG_RO, 24, 5, 0x1f, 0x0 }, + { "ctrl_fifo_empty4_3", DPP_FIELD_FLAG_RO, 19, 5, 0x1f, 0x0 }, + { "ctrl_fifo_empty4_2", DPP_FIELD_FLAG_RO, 14, 5, 0x1f, 0x0 }, + { "ctrl_fifo_empty4_1", DPP_FIELD_FLAG_RO, 9, 5, 0x1f, 0x0 }, + { "ctrl_fifo_empty4_0", DPP_FIELD_FLAG_RO, 4, 5, 0x1f, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ctrl_fifo_empty5_reg[] = { + { "ctrl_fifo_empty5_1", DPP_FIELD_FLAG_RO, 9, 5, 0x1f, 0x0 }, + { "ctrl_fifo_empty5_0", DPP_FIELD_FLAG_RO, 4, 5, 0x1f, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty0_reg[] = { + { "kschd_fifo_empty0", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty1_reg[] = { + { "kschd_fifo_empty1", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty2_reg[] = { + { "kschd_fifo_empty2", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty3_reg[] = { + { "kschd_fifo_empty3", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty4_reg[] = { + { "kschd_fifo_empty4", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty5_reg[] = { + { "kschd_fifo_empty5", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty6_reg[] = { + { "kschd_fifo_empty6", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty7_reg[] = { + { "kschd_fifo_empty7", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty8_reg[] = { + { "kschd_fifo_empty8", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty9_reg[] = { + { "kschd_fifo_empty9", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty10_reg[] = { + { "kschd_fifo_empty10", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty11_reg[] = { + { "kschd_fifo_empty11", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty12_reg[] = { + { "kschd_fifo_empty12", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty13_reg[] = { + { "kschd_fifo_empty13", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty14_reg[] = { + { "kschd_fifo_empty14", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty15_reg[] = { + { "kschd_fifo_empty15", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty16_reg[] = { + { "kschd_fifo_empty16", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty17_reg[] = { + { "kschd_fifo_empty17", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty18_reg[] = { + { "kschd_fifo_empty18", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty19_reg[] = { + { "kschd_fifo_empty19", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty20_reg[] = { + { "kschd_fifo_empty20", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty21_reg[] = { + { "kschd_fifo_empty21", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty22_reg[] = { + { "kschd_fifo_empty22", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty23_reg[] = { + { "kschd_fifo_empty23", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty24_reg[] = { + { "kschd_fifo_empty24", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty25_reg[] = { + { "kschd_fifo_empty25", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty26_reg[] = { + { "kschd_fifo_empty26", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty27_reg[] = { + { "kschd_fifo_empty27", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty28_reg[] = { + { "kschd_fifo_empty28", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty29_reg[] = { + { "kschd_fifo_empty29", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty30_reg[] = { + { "kschd_fifo_empty30", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_kschd_fifo_empty31_reg[] = { + { "kschd_fifo_empty31", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty0_reg[] = { + { "rschd_fifo_empty0", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty1_reg[] = { + { "rschd_fifo_empty1", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty2_reg[] = { + { "rschd_fifo_empty2", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty3_reg[] = { + { "rschd_fifo_empty3", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty4_reg[] = { + { "rschd_fifo_empty4", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty5_reg[] = { + { "rschd_fifo_empty5", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty6_reg[] = { + { "rschd_fifo_empty6", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty7_reg[] = { + { "rschd_fifo_empty7", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty8_reg[] = { + { "rschd_fifo_empty8", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty9_reg[] = { + { "rschd_fifo_empty9", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty10_reg[] = { + { "rschd_fifo_empty10", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty11_reg[] = { + { "rschd_fifo_empty11", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty12_reg[] = { + { "rschd_fifo_empty12", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty13_reg[] = { + { "rschd_fifo_empty13", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty14_reg[] = { + { "rschd_fifo_empty14", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty15_reg[] = { + { "rschd_fifo_empty15", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty16_reg[] = { + { "rschd_fifo_empty16", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty17_reg[] = { + { "rschd_fifo_empty17", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty18_reg[] = { + { "rschd_fifo_empty18", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty19_reg[] = { + { "rschd_fifo_empty19", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty20_reg[] = { + { "rschd_fifo_empty20", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty21_reg[] = { + { "rschd_fifo_empty21", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty22_reg[] = { + { "rschd_fifo_empty22", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty23_reg[] = { + { "rschd_fifo_empty23", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty24_reg[] = { + { "rschd_fifo_empty24", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty25_reg[] = { + { "rschd_fifo_empty25", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty26_reg[] = { + { "rschd_fifo_empty26", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty27_reg[] = { + { "rschd_fifo_empty27", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty28_reg[] = { + { "rschd_fifo_empty28", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty29_reg[] = { + { "rschd_fifo_empty29", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty30_reg[] = { + { "rschd_fifo_empty30", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_rschd_fifo_empty31_reg[] = { + { "rschd_fifo_empty31", DPP_FIELD_FLAG_RO, 14, 15, 0x7ffe, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ept_flag_reg[] = { + { "ept_flag8", DPP_FIELD_FLAG_RO, 8, 1, 0x1, 0x0 }, + { "ept_flag7", DPP_FIELD_FLAG_RO, 7, 1, 0x1, 0x0 }, + { "ept_flag6", DPP_FIELD_FLAG_RO, 6, 1, 0x1, 0x0 }, + { "ept_flag5", DPP_FIELD_FLAG_RO, 5, 1, 0x1, 0x0 }, + { "ept_flag4", DPP_FIELD_FLAG_RO, 4, 1, 0x1, 0x0 }, + { "ept_flag3", DPP_FIELD_FLAG_RO, 3, 1, 0x1, 0x0 }, + { "ept_flag2", DPP_FIELD_FLAG_RO, 2, 1, 0x1, 0x0 }, + { "ept_flag1", DPP_FIELD_FLAG_RO, 1, 1, 0x1, 0x0 }, + { "ept_flag0", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ppu_soft_rst_reg[] = { + { "ppu_soft_rst", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_as_mac_age_fc_cnt_reg[] = { + { "smmu0_as_mac_age_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_marc_se_parser_fc_cnt_reg[] = { + { "smmu0_marc_se_parser_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_wr_arb_cpu_fc_cnt_reg[] = { + { "wr_arb_cpu_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_lpm_as_fc_cnt_reg[] = { + { "smmu0_lpm_as_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_lpm_as_smmu0_fc_cnt_reg[] = { + { "lpm_as_smmu0_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_etcam1_0_as_fc_cnt_reg[] = { + { "smmu0_etcam1_0_as_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_as_etcam1_0_smmu0_fc_cnt_reg[] = { + { "as_etcam1_0_smmu0_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_ppu_mcast_fc_cnt_reg[] = { + { "smmu0_ppu_mcast_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ppu_smmu0_mcast_fc_cnt_reg[] = { + { "ppu_smmu0_mcast_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_odma_smmu0_tdm_fc_rsp_fc_cnt_reg[] = { + { "odma_smmu0_tdm_fc_rsp_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_odma_tdm_fc_key_fc_cnt_reg[] = { + { "smmu0_odma_tdm_fc_key_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_odma_fc_cnt_reg[] = { + { "smmu0_odma_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_cfg_tab_rd_fc_cnt_reg[] = { + { "smmu0_cfg_tab_rd_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_stat_fc15_0_cnt_reg[] = { + { "smmu0_stat_fc15_0_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_stat_smmu0_fc15_0_cnt_reg[] = { + { "stat_smmu0_fc15_0_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_ppu_mex5_0_fc_cnt_reg[] = { + { "smmu0_ppu_mex5_0_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ppu_smmu0_mex5_0_fc_cnt_reg[] = { + { "ppu_smmu0_mex5_0_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_as_smmu0_mac_age_req_cnt_reg[] = { + { "as_smmu0_mac_age_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_se_parser_smmu0_marc_key_cnt_reg[] = { + { "se_parser_smmu0_marc_key_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cpu_ind_rdat_cnt_reg[] = { + { "cpu_ind_rdat_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cpu_ind_rd_req_cnt_reg[] = { + { "cpu_ind_rd_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cpu_ind_wr_req_cnt_reg[] = { + { "cpu_ind_wr_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_plcr_rsp0_cnt_reg[] = { + { "smmu0_plcr_rsp0_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_plcr_smmu0_req0_cnt_reg[] = { + { "plcr_smmu0_req0_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_lpm_as_rsp_cnt_reg[] = { + { "smmu0_lpm_as_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_lpm_as_smmu0_req_cnt_reg[] = { + { "lpm_as_smmu0_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_etcam1_0_as_rsp_cnt_reg[] = { + { "smmu0_etcam1_0_as_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_etcam1_0_as_smmu0_req_cnt_reg[] = { + { "etcam1_0_as_smmu0_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_ppu_mcast_rsp_cnt_reg[] = { + { "smmu0_ppu_mcast_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ppu_smmu0_mcast_key_cnt_reg[] = { + { "ppu_smmu0_mcast_key_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_odma_tdm_mc_rsp_cnt_reg[] = { + { "smmu0_odma_tdm_mc_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_odma_smmu0_tdm_mc_key_cnt_reg[] = { + { "odma_smmu0_tdm_mc_key_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_odma_rsp_cnt_reg[] = { + { "smmu0_odma_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_odma_smmu0_cmd_cnt_reg[] = { + { "odma_smmu0_cmd_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_cfg_tab_rdat_cnt_reg[] = { + { "smmu0_cfg_tab_rdat_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_cfg_smmu0_tab_rd_cnt_reg[] = { + { "cfg_smmu0_tab_rd_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_stat_rsp15_0_cnt_reg[] = { + { "smmu0_stat_rsp15_0_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_stat_smmu0_req15_0_cnt_reg[] = { + { "stat_smmu0_req15_0_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_smmu0_ppu_mex5_0_rsp_cnt_reg[] = { + { "smmu0_ppu_mex5_0_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ppu_smmu0_mex5_0_key_cnt_reg[] = { + { "ppu_smmu0_mex5_0_key_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ftm_stat_smmu0_req0_cnt_reg[] = { + { "ftm_stat_smmu0_req0_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_ftm_stat_smmu0_req1_cnt_reg[] = { + { "ftm_stat_smmu0_req1_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_etm_stat_smmu0_req0_cnt_reg[] = { + { "etm_stat_smmu0_req0_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_etm_stat_smmu0_req1_cnt_reg[] = { + { "etm_stat_smmu0_req1_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_req_eram0_31_rd_cnt_reg[] = { + { "req_eram0_31_rd_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu0_smmu0_req_eram0_31_wr_cnt_reg[] = { + { "req_eram0_31_wr_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ddr_wdat1_reg[] = { + { "ddr_wdat1", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ddr_wdat2_reg[] = { + { "ddr_wdat2", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ddr_wdat3_reg[] = { + { "ddr_wdat3", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ddr_wdat4_reg[] = { + { "ddr_wdat4", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ddr_wdat5_reg[] = { + { "ddr_wdat5", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ddr_wdat6_reg[] = { + { "ddr_wdat6", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ddr_wdat7_reg[] = { + { "ddr_wdat7", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ddr_wdat8_reg[] = { + { "ddr_wdat8", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ddr_wdat9_reg[] = { + { "ddr_wdat9", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ddr_wdat10_reg[] = { + { "ddr_wdat10", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ddr_wdat11_reg[] = { + { "ddr_wdat11", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ddr_wdat12_reg[] = { + { "ddr_wdat12", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ddr_wdat13_reg[] = { + { "ddr_wdat13", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ddr_wdat14_reg[] = { + { "ddr_wdat14", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ddr_wdat15_reg[] = { + { "ddr_wdat15", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cnt_stat_cache_en_reg[] = { + { "cnt_stat_cache_en", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cnt_stat_cache_clr_reg[] = { + { "cnt_stat_cache_clr", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cnt_stat_cache_req_63_32_reg[] = { + { "cnt_stat_cache_req_63_32", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cnt_stat_cache_req_31_0_reg[] = { + { "cnt_stat_cache_req_31_0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cnt_stat_cache_hit_63_32_reg[] = { + { "cnt_stat_cache_hit_63_32", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cnt_stat_cache_hit_31_0_reg[] = { + { "cnt_stat_cache_hit_31_0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ddr_cmd0_reg[] = { + { "ecc_en", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "rw_len", DPP_FIELD_FLAG_RW, 21, 2, 0x0, 0x0 }, + { "baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_info_addr_reg[] = { + { "info_addr", DPP_FIELD_FLAG_RW, 5, 6, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ddr_cmd1_reg[] = { + { "rw_flag", DPP_FIELD_FLAG_RW, 31, 1, 0x0, 0x0 }, + { "rw_addr", DPP_FIELD_FLAG_RW, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_clr_start_addr_reg[] = { + { "clr_start_addr", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_clr_end_addr_reg[] = { + { "clr_end_addr", DPP_FIELD_FLAG_RW, 31, 32, 0xffffff, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_clr_tbl_en_reg[] = { + { "cfg_init_en", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "clr_tbl_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_debug_cnt_mode_reg[] = { + { "cnt_rd_mode", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "cnt_overflow_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_init_done_reg[] = { + { "cache_init_done", DPP_FIELD_FLAG_RO, 7, 1, 0x0, 0x0 }, + { "clr_done", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "init_ok", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cpu_rsp_rd_done_reg[] = { + { "cpu_rsp_rd_done", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ksch_oam_sp_en_reg[] = { + { "ksch_oam_sp_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cfg_cache_en_reg[] = { + { "cfg_cache_en", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cache_age_en_reg[] = { + { "cache_age_en", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cpu_rdat0_reg[] = { + { "cpu_rdat0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cpu_rdat1_reg[] = { + { "cpu_rdat1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cpu_rdat2_reg[] = { + { "cpu_rdat2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cpu_rdat3_reg[] = { + { "cpu_rdat3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cpu_rdat4_reg[] = { + { "cpu_rdat4", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cpu_rdat5_reg[] = { + { "cpu_rdat5", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cpu_rdat6_reg[] = { + { "cpu_rdat6", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cpu_rdat7_reg[] = { + { "cpu_rdat7", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cpu_rdat8_reg[] = { + { "cpu_rdat8", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cpu_rdat9_reg[] = { + { "cpu_rdat9", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cpu_rdat10_reg[] = { + { "cpu_rdat10", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cpu_rdat11_reg[] = { + { "cpu_rdat11", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cpu_rdat12_reg[] = { + { "cpu_rdat12", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cpu_rdat13_reg[] = { + { "cpu_rdat13", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cpu_rdat14_reg[] = { + { "cpu_rdat14", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cpu_rdat15_reg[] = { + { "cpu_rdat15", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ctrl_cpu_rd_rdy_reg[] = { + { "ctrl_cpu_rd_rdy", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cpu_warbi_rdy_cfg_reg[] = { + { "cpu_warbi_rdy_cfg", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_dir_arbi_cpu_rpful_reg[] = { + { "smmu1_cfg_rpful", DPP_FIELD_FLAG_RW, 15, 8, 0x44, 0x0 }, + { "smmu1_cfg_wpful", DPP_FIELD_FLAG_RW, 7, 8, 0x44, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_dir_arbi_wpful_reg[] = { + { "smmu1_ser_wdir_pful", DPP_FIELD_FLAG_RW, 17, 10, 0x14a, 0x0 }, + { "smmu1_cfg_wdir_pful", DPP_FIELD_FLAG_RW, 7, 8, 0x22, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cfg_wr_arbi_pful0_reg[] = { + { "arbi_out_pful", DPP_FIELD_FLAG_RW, 17, 12, 0x71c, 0x0 }, + { "cpu_wr_pful", DPP_FIELD_FLAG_RW, 5, 6, 0x22, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cfg_wr_arbi_pful1_reg[] = { + { "tm_wr_pful", DPP_FIELD_FLAG_RW, 23, 12, 0x4d3, 0x0 }, + { "stat_wr_pful", DPP_FIELD_FLAG_RW, 11, 12, 0x555, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_wdone_pful_cfg_reg[] = { + { "smmu1_wdone_pful_cfg", DPP_FIELD_FLAG_RW, 19, 20, 0x7e1f8, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_stat_rate_cfg_cnt_reg[] = { + { "stat_rate_cfg_cnt", DPP_FIELD_FLAG_RW, 9, 10, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ftm_rate_cfg_cnt_reg[] = { + { "ftm_rate_cfg_cnt", DPP_FIELD_FLAG_RW, 9, 10, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_etm_rate_cfg_cnt_reg[] = { + { "etm_rate_cfg_cnt", DPP_FIELD_FLAG_RW, 9, 10, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_dir_rate_cfg_cnt_reg[] = { + { "dir_rate_cfg_cnt", DPP_FIELD_FLAG_RW, 9, 10, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_hash_rate_cfg_cnt_reg[] = { + { "hash_rate_cfg_cnt", DPP_FIELD_FLAG_RW, 9, 10, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ftm_tbl_cfg_reg[] = { + { "ftm_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_lpm_v4_as_tbl_cfg_reg[] = { + { "lpm_v4_as_rsp_len", DPP_FIELD_FLAG_RW, 22, 2, 0x0, 0x0 }, + { "lpm_v4_as_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "lpm_v4_as_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_lpm_v4_tbl_cfg_reg[] = { + { "lpm_v4_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "lpm_v4_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "lpm_v4_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_lpm_v6_tbl_cfg_reg[] = { + { "lpm_v6_len", DPP_FIELD_FLAG_RO, 22, 2, 0x2, 0x0 }, + { "lpm_v6_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "lpm_v6_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_lpm_v6_as_tbl_cfg_reg[] = { + { "lpm_v6_as_rsp_len", DPP_FIELD_FLAG_RW, 22, 2, 0x0, 0x0 }, + { "lpm_v6_as_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "lpm_v6_as_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_dma_tbl_cfg_reg[] = { + { "dma_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_stat_mode_cfg_reg[] = { + { "stat_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ctrl_rpar_cpu_pful_reg[] = { + { "ctrl_rpar_cpu_pful", DPP_FIELD_FLAG_RW, 7, 8, 0x44, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cfg_ksch_dir_pful_reg[] = { + { "cfg_ksch_dir_pful", DPP_FIELD_FLAG_RW, 15, 16, 0x3434, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cfg_ksch_hash_pful_reg[] = { + { "cfg_ksch_hash_pful", DPP_FIELD_FLAG_RW, 15, 16, 0x3030, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cfg_ksch_lpm_pful_reg[] = { + { "cfg_ksch_lpm_pful", DPP_FIELD_FLAG_RW, 15, 16, 0x3030, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cfg_ksch_lpm_as_pful_reg[] = { + { "cfg_ksch_lpm_as_pful", DPP_FIELD_FLAG_RW, 15, 16, 0x3030, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cfg_ksch_stat_pful_reg[] = { + { "cfg_ksch_stat_pful", DPP_FIELD_FLAG_RW, 15, 16, 0x3434, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cfg_ksch_tm_pful_reg[] = { + { "cfg_ksch_tm_pful", DPP_FIELD_FLAG_RW, 15, 16, 0x3232, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cfg_ksch_oam_pful_reg[] = { + { "cfg_ksch_oam_pful", DPP_FIELD_FLAG_RW, 15, 16, 0x2a2a, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cfg_ksch_dma_pful_reg[] = { + { "cfg_ksch_dma_pful", DPP_FIELD_FLAG_RW, 15, 16, 0x2727, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ctrl_wfifo_cfg_reg[] = { + { "ctrl_wfifo_cfg", DPP_FIELD_FLAG_RW, 9, 10, 0x18c, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_rsch_hash_ptr_cfg_reg[] = { + { "rsch_hash_ptr_cfg", DPP_FIELD_FLAG_RW, 8, 9, 0xe, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_rsch_lpm_ptr_cfg_reg[] = { + { "rsch_lpm_ptr_cfg", DPP_FIELD_FLAG_RW, 8, 9, 0xe, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_rsch_lpm_as_ptr_cfg_reg[] = { + { "rsch_lpm_as_ptr_cfg", DPP_FIELD_FLAG_RW, 8, 9, 0xe, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_rsch_stat_ptr_cfg_reg[] = { + { "rsch_stat_ptr_cfg", DPP_FIELD_FLAG_RW, 8, 9, 0xe, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_rsch_oam_ptr_cfg_reg[] = { + { "rsch_oam_ptr_cfg", DPP_FIELD_FLAG_RW, 8, 9, 0x12, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_rschd_fifo_pept_cfg_reg[] = { + { "rschd_fifo_pept_cfg", DPP_FIELD_FLAG_RW, 13, 14, 0x204, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_dir_fifo_pful_cfg_reg[] = { + { "dir_fifo_pful_cfg", DPP_FIELD_FLAG_RW, 13, 14, 0x1e3c, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_hash_fifo_pful_cfg_reg[] = { + { "hash_fifo_pful_cfg", DPP_FIELD_FLAG_RW, 13, 14, 0x1e3c, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_lpm_fifo_pful_cfg_reg[] = { + { "lpm_fifo_pful_cfg", DPP_FIELD_FLAG_RW, 13, 14, 0x1e3c, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_lpm_as_fifo_pful_cfg_reg[] = { + { "lpm_as_fifo_pful_cfg", DPP_FIELD_FLAG_RW, 13, 14, 0x1e3c, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_stat_fifo_pful_cfg_reg[] = { + { "stat_fifo_pful_cfg", DPP_FIELD_FLAG_RW, 13, 14, 0x1e3c, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ftm_fifo_pful_cfg_reg[] = { + { "ftm_fifo_pful_cfg", DPP_FIELD_FLAG_RW, 13, 14, 0x1e3c, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_etm_fifo_pful_cfg_reg[] = { + { "etm_fifo_pful_cfg", DPP_FIELD_FLAG_RW, 13, 14, 0x1e3c, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_oam_fifo_pful_cfg_reg[] = { + { "oam_fifo_pful_cfg", DPP_FIELD_FLAG_RW, 13, 14, 0x1e3c, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_dma_fifo_pful_cfg_reg[] = { + { "dma_fifo_pful_cfg", DPP_FIELD_FLAG_RW, 13, 14, 0x1e3c, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cache_rsp_rr_fifo_cfg_reg[] = { + { "rr_pfull_assert0", DPP_FIELD_FLAG_RW, 11, 6, 0x14, 0x0 }, + { "rr_pfull_negate0", DPP_FIELD_FLAG_RW, 5, 6, 0x14, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ddr_rsp_rr_fifo_cfg_reg[] = { + { "rr_pfull_assert1", DPP_FIELD_FLAG_RW, 13, 7, 0x20, 0x0 }, + { "rr_pfull_negate1", DPP_FIELD_FLAG_RW, 6, 7, 0x20, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cpu_cahce_fifo_cfg_reg[] = { + { "smmu1_cahce_fwft_fifo_pfull_assert", DPP_FIELD_FLAG_RW, 7, 4, 0x6, + 0x0 }, + { "smmu1_cahce_fwft_fifo_pfull_negate", DPP_FIELD_FLAG_RW, 3, 4, 0x6, + 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cache_rsp_fifo_cfg_reg[] = { + { "rschd_fifo_pfull_assert", DPP_FIELD_FLAG_RW, 11, 6, 0x10, 0x0 }, + { "rschd_fifo_pfull_negate", DPP_FIELD_FLAG_RW, 5, 6, 0x10, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_test_state_reg[] = { + { "test_state", DPP_FIELD_FLAG_RW, 31, 32, 0x11111111, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cache_fifo_ept_reg[] = { + { "cache_fifo_ept", DPP_FIELD_FLAG_RO, 1, 2, 0x3, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_rr_fifo_ept_reg[] = { + { "rr_fifo_ept", DPP_FIELD_FLAG_RO, 1, 2, 0x3, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_wr_fifo_ept_reg[] = { + { "dir_arbi_ept", DPP_FIELD_FLAG_RO, 6, 7, 0x7f, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_wdone_fifo_ept_reg[] = { + { "wdone_fifo_ept", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_kschd_fifo_ept0_reg[] = { + { "kschd_fifo_ept0", DPP_FIELD_FLAG_RO, 14, 15, 0x7fff, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cash_fifo_ept_reg[] = { + { "cash_fifo_ept", DPP_FIELD_FLAG_RO, 1, 2, 0x3, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ctrl_fifo_ept_reg[] = { + { "ctrl_fifo_ept", DPP_FIELD_FLAG_RO, 2, 3, 0x7, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_rschd_ept3_reg[] = { + { "rschd_fifo_ept3", DPP_FIELD_FLAG_RO, 31, 32, 0xffffff, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_rschd_ept2_reg[] = { + { "rschd_fifo_ept2", DPP_FIELD_FLAG_RO, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_rschd_ept1_reg[] = { + { "rschd_fifo_ept1", DPP_FIELD_FLAG_RO, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_rschd_ept0_reg[] = { + { "rschd_fifo_ept0", DPP_FIELD_FLAG_RO, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cash0_ecc_err_addr_reg[] = { + { "cash0_ecc_err_addr", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_arbi_cpu_wr_rdy_reg[] = { + { "arbi_cpu_wr_rdy", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_int_0_en_reg[] = { + { "smmu1_int_0_en", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_int_0_mask_reg[] = { + { "smmu1_int_0_mask", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_int_1_en_reg[] = { + { "smmu1_int_1_en", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_int_1_mask_reg[] = { + { "smmu1_int_1_mask", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_int_2_en_reg[] = { + { "smmu1_int_2_en", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_int_2_mask_reg[] = { + { "smmu1_int_2_mask", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_int_3_en_reg[] = { + { "smmu1_int_3_en", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_int_3_mask_reg[] = { + { "smmu1_int_3_mask", DPP_FIELD_FLAG_RW, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_int_0_status_reg[] = { + { "smmu1_int_0_status", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_int_1_status_reg[] = { + { "smmu1_int_1_status", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_int_2_status_reg[] = { + { "smmu1_int_2_status", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_int_3_status_reg[] = { + { "smmu1_int_3_status", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_int_status_reg[] = { + { "smmu1_int_status", DPP_FIELD_FLAG_RO, 3, 4, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ctrl_to_cash7_0_fc_cnt_reg[] = { + { "ctrl_to_cash7_0_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cash7_0_to_ctrl_req_cnt_reg[] = { + { "cash7_0_to_ctrl_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_rschd_to_cache7_fc_cnt_reg[] = { + { "rschd_to_cache7_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cash7_to_cache_rsp_cnt_reg[] = { + { "cash7_to_cache_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cash7_to_ctrl_fc_cnt_reg[] = { + { "cash7_to_ctrl_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ctrl_to_cash7_0_rsp_cnt_reg[] = { + { "ctrl_to_cash7_0_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_kschd_to_cache7_0_req_cnt_reg[] = { + { "kschd_to_cache7_0_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cache7_0_to_kschd_fc_cnt_reg[] = { + { "cache7_0_to_kschd_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_dma_to_smmu1_rd_req_cnt_reg[] = { + { "dma_to_smmu1_rd_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_oam_to_kschd_req_cnt_reg[] = { + { "oam_to_kschd_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_oam_rr_state_rsp_cnt_reg[] = { + { "oam_rr_state_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_oam_clash_info_cnt_reg[] = { + { "oam_clash_info_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_oam_to_rr_req_cnt_reg[] = { + { "oam_to_rr_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_lpm_as_to_kschd_req_cnt_reg[] = { + { "lpm_as_to_kschd_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_lpm_as_rr_state_rsp_cnt_reg[] = { + { "lpm_as_rr_state_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_lpm_as_clash_info_cnt_reg[] = { + { "lpm_as_clash_info_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_lpm_as_to_rr_req_cnt_reg[] = { + { "lpm_as_to_rr_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_lpm_to_kschd_req_cnt_reg[] = { + { "lpm_to_kschd_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_lpm_rr_state_rsp_cnt_reg[] = { + { "lpm_rr_state_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_lpm_clash_info_cnt_reg[] = { + { "lpm_clash_info_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_lpm_to_rr_req_cnt_reg[] = { + { "lpm_to_rr_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_hash3_0_to_kschd_req_cnt_reg[] = { + { "hash3_0_to_kschd_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_hash3_0_rr_state_rsp_cnt_reg[] = { + { "hash3_0_rr_state_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_hash3_0_clash_info_cnt_reg[] = { + { "hash3_0_clash_info_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_hash3_0_to_rr_req_cnt_reg[] = { + { "hash3_0_to_rr_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_dir3_0_to_kschd_req_cnt_reg[] = { + { "dir3_0_to_kschd_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_dir3_0_clash_info_cnt_reg[] = { + { "dir3_0_clash_info_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_dir_tbl_wr_req_cnt_reg[] = { + { "dir_tbl_wr_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_warbi_to_dir_tbl_warbi_fc_cnt_reg[] = { + { "warbi_to_dir_tbl_warbi_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_dir3_0_to_bank_rr_req_cnt_reg[] = { + { "dir3_0_to_bank_rr_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_kschd_to_dir3_0_fc_cnt_reg[] = { + { "kschd_to_dir3_0_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_dir3_0_rr_state_rsp_cnt_reg[] = { + { "dir3_0_rr_state_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_wr_done_to_warbi_fc_cnt_reg[] = { + { "wr_done_to_warbi_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_wr_done_ptr_req_cnt_reg[] = { + { "wr_done_ptr_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ctrl7_0_to_warbi_fc_cnt_reg[] = { + { "ctrl7_0_to_warbi_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_warbi_to_ctrl7_0_wr_req_cnt_reg[] = { + { "warbi_to_ctrl7_0_wr_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_warbi_to_cash7_0_wr_req_cnt_reg[] = { + { "warbi_to_cash7_0_wr_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_warbi_to_cpu_wr_fc_cnt_reg[] = { + { "warbi_to_cpu_wr_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cpu_wr_req_cnt_reg[] = { + { "cpu_wr_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ctrl7_0_to_cpu_rd_rsp_cnt_reg[] = { + { "ctrl7_0_to_cpu_rd_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cpu_to_ctrl7_0_rd_req_cnt_reg[] = { + { "cpu_to_ctrl7_0_rd_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cpu_rd_dir_tbl_rsp_cnt_reg[] = { + { "cpu_rd_dir_tbl_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cpu_to_dir_tbl_rd_wr_req_cnt_reg[] = { + { "cpu_to_dir_tbl_rd_wr_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_to_mmu_7_0_rsp_fc_cnt_reg[] = { + { "smmu1_to_mmu_7_0_rsp_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_mmu_7_0_to_smmu1_rd_rsp_cnt_reg[] = { + { "mmu_7_0_to_smmu1_rd_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_mmu_7_0_to_smmu1_rd_fc_cnt_reg[] = { + { "mmu_7_0_to_smmu1_rd_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_to_mmu_7_rd_req_cnt_reg[] = { + { "smmu1_to_mmu_7_rd_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_mmu_7_to_smmu1_wr_fc_cnt_reg[] = { + { "mmu_7_to_smmu1_wr_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_to_mmu_7_0_wr_req_cnt_reg[] = { + { "smmu1_to_mmu_7_0_wr_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_se_to_smmu1_wr_rsp_fc_cnt_reg[] = { + { "se_to_smmu1_wr_rsp_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_to_se_wr_rsp_cnt_reg[] = { + { "smmu1_to_se_wr_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ddr7_0_wr_rsp_cnt_reg[] = { + { "ddr7_0_wr_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_to_as_fc_cnt_reg[] = { + { "smmu1_to_as_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_as_to_smmu1_wr_req_cnt_reg[] = { + { "as_to_smmu1_wr_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_to_se_parser_fc_cnt_reg[] = { + { "smmu1_to_se_parser_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_se_parser_to_smmu1_req_cnt_reg[] = { + { "se_parser_to_smmu1_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_to_etm_wr_fc_cnt_reg[] = { + { "smmu1_to_etm_wr_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_etm_wr_req_cnt_reg[] = { + { "etm_wr_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_to_ftm_wr_fc_cnt_reg[] = { + { "smmu1_to_ftm_wr_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ftm_wr_req_cnt_reg[] = { + { "ftm_wr_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_to_state_wr_fc_cnt_reg[] = { + { "smmu1_to_state_wr_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_state_wr_req_cnt_reg[] = { + { "state_wr_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_se_to_dma_rsp_cnt_reg[] = { + { "se_to_dma_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_se_to_dma_fc_cnt_reg[] = { + { "se_to_dma_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_oam_to_smmu1_fc_cnt_reg[] = { + { "oam_to_smmu1_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_to_oam_rsp_cnt_reg[] = { + { "smmu1_to_oam_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_to_oam_fc_cnt_reg[] = { + { "smmu1_to_oam_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_oam_to_smmu1_req_cnt_reg[] = { + { "oam_to_smmu1_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_to_etm_rsp_cnt_reg[] = { + { "smmu1_to_etm_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_to_ftm_rsp_cnt_reg[] = { + { "smmu1_to_ftm_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_to_etm_fc_cnt_reg[] = { + { "smmu1_to_etm_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_etm_to_smmu1_req_cnt_reg[] = { + { "etm_to_smmu1_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_to_ftm_fc_cnt_reg[] = { + { "smmu1_to_ftm_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_ftm_to_smmu1_req_cnt_reg[] = { + { "ftm_to_smmu1_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_to_stat_rsp_cnt_reg[] = { + { "smmu1_to_stat_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_to_stat_fc_cnt_reg[] = { + { "smmu1_to_stat_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_stat_to_smmu1_req_cnt_reg[] = { + { "stat_to_smmu1_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_lpm_as_to_smmu1_fc_cnt_reg[] = { + { "lpm_as_to_smmu1_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_lpm_to_smmu1_fc_cnt_reg[] = { + { "lpm_to_smmu1_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_to_lpm_as_rsp_cnt_reg[] = { + { "smmu1_to_lpm_as_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_to_lpm_rsp_cnt_reg[] = { + { "smmu1_to_lpm_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_to_lpm_as_fc_cnt_reg[] = { + { "smmu1_to_lpm_as_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_to_lpm_fc_cnt_reg[] = { + { "smmu1_to_lpm_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_lpm_as_to_smmu1_req_cnt_reg[] = { + { "lpm_as_to_smmu1_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_lpm_to_smmu1_req_cnt_reg[] = { + { "lpm_to_smmu1_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_hash3_0_to_smmu1_fc_cnt_reg[] = { + { "hash3_0_to_smmu1_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_to_hash3_0_rsp_cnt_reg[] = { + { "smmu1_to_hash3_0_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_to_hash3_0_fc_cnt_reg[] = { + { "smmu1_to_hash3_0_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_hash3_0_to_smmu1_cnt_reg[] = { + { "hash3_0_to_smmu1_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_se_to_smmu1_dir3_0_rsp_fc_cnt_reg[] = { + { "se_to_smmu1_dir3_0_rsp_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_to_se_dir3_0_rsp_cnt_reg[] = { + { "smmu1_to_se_dir3_0_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_smmu1_to_se_dir3_0_fc_cnt_reg[] = { + { "smmu1_to_se_dir3_0_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_se_to_smmu1_dir3_0_cnt_reg[] = { + { "se_to_smmu1_dir3_0_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_smmu1_cache7_0_to_rschd_rsp_cnt_reg[] = { + { "cache7_0_to_rschd_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cmmu_ddr_rw_addr_reg[] = { + { "ddr_wr", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cmmu_ddr_rw_mode_reg[] = { + { "ddr_rw_flag", DPP_FIELD_FLAG_RW, 31, 1, 0x0, 0x0 }, + { "ddr_rw_mode", DPP_FIELD_FLAG_RW, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cmmu_cp_cmd_reg[] = { + { "stat_tbl_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cmmu_cpu_ind_rd_done_reg[] = { + { "cpu_ind_rd_done", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cmmu_cpu_ind_rdat0_reg[] = { + { "cpu_ind_rdat0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cmmu_cpu_ind_rdat1_reg[] = { + { "cpu_ind_rdat1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cmmu_cpu_ind_rdat2_reg[] = { + { "cpu_ind_rdat2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cmmu_cpu_ind_rdat3_reg[] = { + { "cpu_ind_rdat3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cmmu_cpu_ddr_fifo_almful_reg[] = { + { "cpu_ddr_fifo_almful", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cmmu_debug_cnt_mode_reg[] = { + { "cnt_rd_mode", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "cnt_overflow_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_cmmu_cmmu_pful_cfg_reg[] = { + { "alu_cmd_pful_negate", DPP_FIELD_FLAG_RW, 17, 9, 0xf8, 0x0 }, + { "alu_cmd_pful_assert", DPP_FIELD_FLAG_RW, 8, 9, 0xf8, 0x0 }, +}; +DPP_FIELD_T g_se_cmmu_cmmu_stat_pful_cfg_reg[] = { + { "cmmu_stat_pful_negate", DPP_FIELD_FLAG_RW, 11, 6, 0x14, 0x0 }, + { "cmmu_stat_pful_assert", DPP_FIELD_FLAG_RW, 5, 6, 0x14, 0x0 }, +}; +DPP_FIELD_T g_se_cmmu_stat_overflow_mode_reg[] = { + { "stat_overflow_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_cmmu_cmmu_cp_fifo_pful_reg[] = { + { "cmmu_cp_fifo_pful", DPP_FIELD_FLAG_RW, 7, 8, 0x66, 0x0 }, +}; +DPP_FIELD_T g_se_cmmu_ddr_wr_dat0_reg[] = { + { "ddr_wr_dat0", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cmmu_ddr_wr_dat1_reg[] = { + { "ddr_wr_dat1", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cmmu_cmmu_int_unmask_flag_reg[] = { + { "cmmu_int_unmask_flag", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cmmu_cmmu_int_en_reg[] = { + { "cmmu_int_en12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "cmmu_int_en11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "cmmu_int_en10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "cmmu_int_en9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "cmmu_int_en8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "cmmu_int_en7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "cmmu_int_en6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "cmmu_int_en5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "cmmu_int_en4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "cmmu_int_en3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "cmmu_int_en2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "cmmu_int_en1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "cmmu_int_en0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_cmmu_cmmu_int_mask_reg[] = { + { "cmmu_int_mask12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "cmmu_int_mask11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "cmmu_int_mask10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "cmmu_int_mask9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "cmmu_int_mask8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "cmmu_int_mask7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "cmmu_int_mask6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "cmmu_int_mask5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "cmmu_int_mask4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "cmmu_int_mask3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "cmmu_int_mask2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "cmmu_int_mask1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "cmmu_int_mask0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_se_cmmu_cmmu_int_status_reg[] = { + { "cmmu_int_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "cmmu_int_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "cmmu_int_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "cmmu_int_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "cmmu_int_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "cmmu_int_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "cmmu_int_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "cmmu_int_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "cmmu_int_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "cmmu_int_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "cmmu_int_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "cmmu_int_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "cmmu_int_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cmmu_stat_cmmu_req_cnt_reg[] = { + { "stat_cmmu_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cmmu_cmmu_fc0_cnt_reg[] = { + { "cmmu_stat_rdy", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cmmu_cmmu_fc1_cnt_reg[] = { + { "smmu1_cmmu_wr_rdy", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_se_cmmu_cmmu_fc2_cnt_reg[] = { + { "smmu1_cmmu_rd_rdy", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash0_tbl0_cfg_reg[] = { + { "hash0_tbl0_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash0_tbl0_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash0_tbl0_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash0_tbl1_cfg_reg[] = { + { "hash0_tbl1_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash0_tbl1_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash0_tbl1_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash0_tbl2_cfg_reg[] = { + { "hash0_tbl2_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash0_tbl2_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash0_tbl2_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash0_tbl3_cfg_reg[] = { + { "hash0_tbl3_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash0_tbl3_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash0_tbl3_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash0_tbl4_cfg_reg[] = { + { "hash0_tbl4_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash0_tbl4_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash0_tbl4_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash0_tbl5_cfg_reg[] = { + { "hash0_tbl5_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash0_tbl5_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash0_tbl5_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash0_tbl6_cfg_reg[] = { + { "hash0_tbl6_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash0_tbl6_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash0_tbl6_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash0_tbl7_cfg_reg[] = { + { "hash0_tbl7_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash0_tbl7_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash0_tbl7_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash1_tbl0_cfg_reg[] = { + { "hash1_tbl0_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash1_tbl0_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash1_tbl0_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash1_tbl1_cfg_reg[] = { + { "hash1_tbl1_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash1_tbl1_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash1_tbl1_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash1_tbl2_cfg_reg[] = { + { "hash1_tbl2_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash1_tbl2_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash1_tbl2_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash1_tbl3_cfg_reg[] = { + { "hash1_tbl3_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash1_tbl3_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash1_tbl3_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash1_tbl4_cfg_reg[] = { + { "hash1_tbl4_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash1_tbl4_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash1_tbl4_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash1_tbl5_cfg_reg[] = { + { "hash1_tbl5_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash1_tbl5_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash1_tbl5_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash1_tbl6_cfg_reg[] = { + { "hash1_tbl6_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash1_tbl6_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash1_tbl6_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash1_tbl7_cfg_reg[] = { + { "hash1_tbl7_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash1_tbl7_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash1_tbl7_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash2_tbl0_cfg_reg[] = { + { "hash2_tbl0_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash2_tbl0_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash2_tbl0_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash2_tbl1_cfg_reg[] = { + { "hash2_tbl1_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash2_tbl1_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash2_tbl1_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash2_tbl2_cfg_reg[] = { + { "hash2_tbl2_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash2_tbl2_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash2_tbl2_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash2_tbl3_cfg_reg[] = { + { "hash2_tbl3_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash2_tbl3_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash2_tbl3_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash2_tbl4_cfg_reg[] = { + { "hash2_tbl4_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash2_tbl4_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash2_tbl4_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash2_tbl5_cfg_reg[] = { + { "hash2_tbl5_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash2_tbl5_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash2_tbl5_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash2_tbl6_cfg_reg[] = { + { "hash2_tbl6_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash2_tbl6_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash2_tbl6_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash2_tbl7_cfg_reg[] = { + { "hash2_tbl7_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash2_tbl7_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash2_tbl7_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash3_tbl0_cfg_reg[] = { + { "hash3_tbl0_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash3_tbl0_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash3_tbl0_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash3_tbl1_cfg_reg[] = { + { "hash3_tbl1_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash3_tbl1_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash3_tbl1_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash3_tbl2_cfg_reg[] = { + { "hash3_tbl2_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash3_tbl2_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash3_tbl2_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash3_tbl3_cfg_reg[] = { + { "hash3_tbl3_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash3_tbl3_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash3_tbl3_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash3_tbl4_cfg_reg[] = { + { "hash3_tbl4_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash3_tbl4_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash3_tbl4_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash3_tbl5_cfg_reg[] = { + { "hash3_tbl5_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash3_tbl5_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash3_tbl5_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash3_tbl6_cfg_reg[] = { + { "hash3_tbl6_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash3_tbl6_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash3_tbl6_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_smmu14k_se_smmu1_hash3_tbl7_cfg_reg[] = { + { "hash3_tbl7_len", DPP_FIELD_FLAG_RO, 22, 2, 0x1, 0x0 }, + { "hash3_tbl7_ecc_en", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "hash3_tbl7_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_eram_wdat1_reg[] = { + { "cpu_ind_eram_wdat1", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_eram_wdat2_reg[] = { + { "cpu_ind_eram_wdat2", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_eram_wdat3_reg[] = { + { "cpu_ind_eram_wdat3", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_eram_req_info_reg[] = { + { "rw_mode", DPP_FIELD_FLAG_RW, 31, 1, 0x0, 0x0 }, + { "read_mode", DPP_FIELD_FLAG_RW, 16, 1, 0x0, 0x0 }, + { "tm_cs", DPP_FIELD_FLAG_RW, 15, 1, 0x0, 0x0 }, + { "queue_cs", DPP_FIELD_FLAG_RW, 14, 1, 0x0, 0x0 }, + { "rw_addr", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_eram_rd_done_reg[] = { + { "cpu_ind_eram_rd_done", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_eram_rdat0_reg[] = { + { "cpu_ind_eram_rdat0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_eram_rdat1_reg[] = { + { "cpu_ind_eram_rdat1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_eram_rdat2_reg[] = { + { "cpu_ind_eram_rdat2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_eram_rdat3_reg[] = { + { "cpu_ind_eram_rdat3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_tm_alu_eram_cpu_rdy_reg[] = { + { "tm_alu_eram_cpu_rdy", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_oam_stat_cfg_reg[] = { + { "oam_flow_control_cfg", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "oam_lm_flow_control_cfg", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "oam_in_eram_cfg", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_ftm_port_sel_cfg_reg[] = { + { "ftm_port0_sel_cfg", DPP_FIELD_FLAG_RW, 19, 5, 0x0, 0x0 }, + { "ftm_port1_sel_cfg", DPP_FIELD_FLAG_RW, 14, 5, 0x1, 0x0 }, + { "ftm_port2_sel_cfg", DPP_FIELD_FLAG_RW, 9, 5, 0xf, 0x0 }, + { "ftm_port3_sel_cfg", DPP_FIELD_FLAG_RW, 4, 5, 0x10, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_oam_eram_base_addr_reg[] = { + { "oam_eram_base_addr", DPP_FIELD_FLAG_RW, 18, 19, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_oam_lm_eram_base_addr_reg[] = { + { "oam_lm_eram_base_addr", DPP_FIELD_FLAG_RW, 18, 19, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_oam_ddr_base_addr_reg[] = { + { "oam_ddr_base_addr", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_plcr0_schd_pful_cfg_reg[] = { + { "plcr0_schd_pful_assert", DPP_FIELD_FLAG_RW, 13, 7, 0x2c, 0x0 }, + { "plcr0_schd_pful_negate", DPP_FIELD_FLAG_RW, 6, 7, 0x2c, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_oam_lm_ord_pful_cfg_reg[] = { + { "oam_lm_ord_pful_assert", DPP_FIELD_FLAG_RW, 13, 7, 0x28, 0x0 }, + { "oam_lm_ord_pful_negate", DPP_FIELD_FLAG_RW, 6, 7, 0x28, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_ddr_schd_pful_cfg_reg[] = { + { "ddr_schd_pful_assert", DPP_FIELD_FLAG_RW, 13, 7, 0x2c, 0x0 }, + { "ddr_schd_pful_negate", DPP_FIELD_FLAG_RW, 6, 7, 0x2c, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_eram_schd_pful_cfg_reg[] = { + { "eram_schd_pful_assert", DPP_FIELD_FLAG_RW, 13, 7, 0x28, 0x0 }, + { "eram_schd_pful_negate", DPP_FIELD_FLAG_RW, 6, 7, 0x28, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_eram_schd_pept_cfg_reg[] = { + { "eram_schd_pept_assert", DPP_FIELD_FLAG_RW, 13, 7, 0x18, 0x0 }, + { "eram_schd_pept_negate", DPP_FIELD_FLAG_RW, 6, 7, 0x18, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_eram_schd_oam_pful_cfg_reg[] = { + { "eram_schd_oam_pful_assert", DPP_FIELD_FLAG_RW, 13, 7, 0x28, 0x0 }, + { "eram_schd_oam_pful_negate", DPP_FIELD_FLAG_RW, 6, 7, 0x28, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_eram_schd_oam_pept_cfg_reg[] = { + { "eram_schd_oam_pept_assert", DPP_FIELD_FLAG_RW, 13, 7, 0x18, 0x0 }, + { "eram_schd_oam_pept_negate", DPP_FIELD_FLAG_RW, 6, 7, 0x18, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_eram_schd_oam_lm_pful_cfg_reg[] = { + { "eram_schd_oam_lm_pful_assert", DPP_FIELD_FLAG_RW, 13, 7, 0x28, 0x0 }, + { "eram_schd_oam_lm_pful_negate", DPP_FIELD_FLAG_RW, 6, 7, 0x28, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_eram_schd_oam_lm_pept_cfg_reg[] = { + { "eram_schd_oam_lm_pept_assert", DPP_FIELD_FLAG_RW, 13, 7, 0x18, 0x0 }, + { "eram_schd_oam_lm_pept_negate", DPP_FIELD_FLAG_RW, 6, 7, 0x18, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_rschd_pful_cfg_reg[] = { + { "rschd_pful_assert", DPP_FIELD_FLAG_RW, 13, 7, 0x24, 0x0 }, + { "rschd_pful_negate", DPP_FIELD_FLAG_RW, 6, 7, 0x24, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_rschd_pept_cfg_reg[] = { + { "rschd_pept_assert", DPP_FIELD_FLAG_RW, 13, 7, 0x1c, 0x0 }, + { "rschd_pept_negate", DPP_FIELD_FLAG_RW, 6, 7, 0x1c, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_rschd_plcr_pful_cfg_reg[] = { + { "rschd_plcr_pful_assert", DPP_FIELD_FLAG_RW, 15, 8, 0x40, 0x0 }, + { "rschd_plcr_pful_negate", DPP_FIELD_FLAG_RW, 7, 8, 0x40, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_rschd_plcr_pept_cfg_reg[] = { + { "rschd_plcr_pept_assert", DPP_FIELD_FLAG_RW, 15, 8, 0x40, 0x0 }, + { "rschd_plcr_pept_negate", DPP_FIELD_FLAG_RW, 7, 8, 0x40, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_rschd_plcr_info_pful_cfg_reg[] = { + { "rschd_plcr_info_pful_assert", DPP_FIELD_FLAG_RW, 13, 7, 0x38, 0x0 }, + { "rschd_plcr_info_pful_negate", DPP_FIELD_FLAG_RW, 6, 7, 0x38, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_alu_arb_cpu_pful_cfg_reg[] = { + { "alu_arb_cpu_pful_assert", DPP_FIELD_FLAG_RW, 9, 5, 0x08, 0x0 }, + { "alu_arb_cpu_pful_negate", DPP_FIELD_FLAG_RW, 4, 5, 0x08, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_alu_arb_user_pful_cfg_reg[] = { + { "alu_arb_user_pful_assert", DPP_FIELD_FLAG_RW, 9, 5, 0x0d, 0x0 }, + { "alu_arb_user_pful_negate", DPP_FIELD_FLAG_RW, 4, 5, 0x0d, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_alu_arb_stat_pful_cfg_reg[] = { + { "alu_arb_stat_pful_assert", DPP_FIELD_FLAG_RW, 13, 7, 0x20, 0x0 }, + { "alu_arb_stat_pful_negate", DPP_FIELD_FLAG_RW, 6, 7, 0x20, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cycmov_dat_pful_cfg_reg[] = { + { "cycmov_dat_pful_assert", DPP_FIELD_FLAG_RW, 9, 5, 0x0d, 0x0 }, + { "cycmov_dat_pful_negate", DPP_FIELD_FLAG_RW, 4, 5, 0x0d, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_ddr_opr_pful_cfg_reg[] = { + { "ddr_opr_pful_assert", DPP_FIELD_FLAG_RW, 9, 5, 0xc, 0x0 }, + { "ddr_opr_pful_negate", DPP_FIELD_FLAG_RW, 4, 5, 0xc, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cycle_mov_pful_cfg_reg[] = { + { "cycle_mov_pful_assert", DPP_FIELD_FLAG_RW, 11, 6, 0x1c, 0x0 }, + { "cycle_mov_pful_negate", DPP_FIELD_FLAG_RW, 5, 6, 0x1c, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cntovf_pful_cfg_reg[] = { + { "cntovf_pful_assert", DPP_FIELD_FLAG_RW, 13, 7, 0x32, 0x0 }, + { "cntovf_pful_negate", DPP_FIELD_FLAG_RW, 6, 7, 0x32, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_eram_schd_plcr_pful_cfg_reg[] = { + { "eram_schd_plcr_pful_assert", DPP_FIELD_FLAG_RW, 15, 8, 0x40, 0x0 }, + { "eram_schd_plcr_pful_negate", DPP_FIELD_FLAG_RW, 7, 8, 0x40, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_eram_schd_plcr_pept_cfg_reg[] = { + { "eram_schd_plcr_pept_assert", DPP_FIELD_FLAG_RW, 15, 8, 0x40, 0x0 }, + { "eram_schd_plcr_pept_negate", DPP_FIELD_FLAG_RW, 7, 8, 0x40, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_debug_cnt_mode_reg[] = { + { "cnt_rd_mode", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "cnt_overflow_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_tm_mov_period_cfg_reg[] = { + { "etm_mov_period_cfg", DPP_FIELD_FLAG_RW, 15, 8, 0x3c, 0x0 }, + { "ftm_mov_period_cfg", DPP_FIELD_FLAG_RW, 7, 8, 0x3c, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_alu_ddr_cpu_req_pful_cfg_reg[] = { + { "alu_ddr_cpu_req_pful_assert", DPP_FIELD_FLAG_RW, 7, 4, 0x4, 0x0 }, + { "alu_ddr_cpu_req_pful_negate", DPP_FIELD_FLAG_RW, 3, 4, 0x4, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cycmov_addr_pful_cfg_reg[] = { + { "cycmov_addr_pful_assert", DPP_FIELD_FLAG_RW, 9, 5, 0xd, 0x0 }, + { "cycmov_addr_pful_negate", DPP_FIELD_FLAG_RW, 4, 5, 0xd, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_ord_ddr_plcr_fifo_empty_reg[] = { + { "ord_oam_lm_empty", DPP_FIELD_FLAG_RO, 19, 1, 0x1, 0x0 }, + { "ddr_schd_fifo_empty", DPP_FIELD_FLAG_RO, 18, 7, 0x7f, 0x0 }, + { "plcr0_schd_fifo_empty", DPP_FIELD_FLAG_RO, 5, 6, 0x3f, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_tm_stat_fifo_empty_reg[] = { + { "tm_stat_fifo_empty", DPP_FIELD_FLAG_RO, 13, 14, 0x3fff, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_eram_schd_fifo_empty_0_1_reg[] = { + { "eram_schd_fifo_empty1", DPP_FIELD_FLAG_RO, 31, 16, 0xffff, 0x0 }, + { "eram_schd_fifo_empty0", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_eram_schd_fifo_empty_2_3_reg[] = { + { "eram_schd_fifo_empty3", DPP_FIELD_FLAG_RO, 31, 16, 0xffff, 0x0 }, + { "eram_schd_fifo_empty2", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_eram_schd_fifo_empty_4_5_reg[] = { + { "eram_schd_fifo_empty5", DPP_FIELD_FLAG_RO, 31, 16, 0xffff, 0x0 }, + { "eram_schd_fifo_empty4", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_eram_schd_fifo_empty_6_7_reg[] = { + { "eram_schd_fifo_empty7", DPP_FIELD_FLAG_RO, 31, 16, 0xffff, 0x0 }, + { "eram_schd_fifo_empty6", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_eram_schd_fifo_empty_free_8_reg[] = { + { "eram_schd_free_fifo_empty8", DPP_FIELD_FLAG_RO, 24, 1, 0x1, 0x0 }, + { "eram_schd_free_fifo_empty7", DPP_FIELD_FLAG_RO, 23, 1, 0x1, 0x0 }, + { "eram_schd_free_fifo_empty6", DPP_FIELD_FLAG_RO, 22, 1, 0x1, 0x0 }, + { "eram_schd_free_fifo_empty5", DPP_FIELD_FLAG_RO, 21, 1, 0x1, 0x0 }, + { "eram_schd_free_fifo_empty4", DPP_FIELD_FLAG_RO, 20, 1, 0x1, 0x0 }, + { "eram_schd_free_fifo_empty3", DPP_FIELD_FLAG_RO, 19, 1, 0x1, 0x0 }, + { "eram_schd_free_fifo_empty2", DPP_FIELD_FLAG_RO, 18, 1, 0x1, 0x0 }, + { "eram_schd_free_fifo_empty1", DPP_FIELD_FLAG_RO, 17, 1, 0x1, 0x0 }, + { "eram_schd_free_fifo_empty0", DPP_FIELD_FLAG_RO, 16, 1, 0x1, 0x0 }, + { "eram_schd_fifo_empty8", DPP_FIELD_FLAG_RO, 15, 16, 0xffff, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_rschd_fifo_empty_0_3_reg[] = { + { "rschd_fifo_empty3", DPP_FIELD_FLAG_RO, 31, 8, 0xff, 0x0 }, + { "rschd_fifo_empty2", DPP_FIELD_FLAG_RO, 23, 8, 0xff, 0x0 }, + { "rschd_fifo_empty1", DPP_FIELD_FLAG_RO, 15, 8, 0xff, 0x0 }, + { "rschd_fifo_empty0", DPP_FIELD_FLAG_RO, 7, 8, 0xff, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_rschd_fifo_empty_4_7_reg[] = { + { "rschd_fifo_empty7", DPP_FIELD_FLAG_RO, 31, 8, 0xff, 0x0 }, + { "rschd_fifo_empty6", DPP_FIELD_FLAG_RO, 23, 8, 0xff, 0x0 }, + { "rschd_fifo_empty5", DPP_FIELD_FLAG_RO, 15, 8, 0xff, 0x0 }, + { "rschd_fifo_empty4", DPP_FIELD_FLAG_RO, 7, 8, 0xff, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_rschd_fifo_empty_8_11_reg[] = { + { "rschd_fifo_empty11", DPP_FIELD_FLAG_RO, 31, 8, 0xff, 0x0 }, + { "rschd_fifo_empty10", DPP_FIELD_FLAG_RO, 23, 8, 0xff, 0x0 }, + { "rschd_fifo_empty9", DPP_FIELD_FLAG_RO, 15, 8, 0xff, 0x0 }, + { "rschd_fifo_empty8", DPP_FIELD_FLAG_RO, 7, 8, 0xff, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_rschd_fifo_empty_12_15_reg[] = { + { "rschd_fifo_empty15", DPP_FIELD_FLAG_RO, 31, 8, 0xff, 0x0 }, + { "rschd_fifo_empty14", DPP_FIELD_FLAG_RO, 23, 8, 0xff, 0x0 }, + { "rschd_fifo_empty13", DPP_FIELD_FLAG_RO, 15, 8, 0xff, 0x0 }, + { "rschd_fifo_empty12", DPP_FIELD_FLAG_RO, 7, 8, 0xff, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_rschd_fifo_empty_plcr_16_17_reg[] = { + { "rschd_fifo_empty_plcr", DPP_FIELD_FLAG_RO, 16, 1, 0x1, 0x0 }, + { "rschd_fifo_empty17", DPP_FIELD_FLAG_RO, 15, 8, 0xff, 0x0 }, + { "rschd_fifo_empty16", DPP_FIELD_FLAG_RO, 7, 8, 0xff, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_int_unmask_flag_reg[] = { + { "stat_int5_unmask_flag", DPP_FIELD_FLAG_RO, 5, 1, 0x0, 0x0 }, + { "stat_int4_unmask_flag", DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "stat_int3_unmask_flag", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "stat_int2_unmask_flag", DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "stat_int1_unmask_flag", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "stat_int0_unmask_flag", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_int0_en_reg[] = { + { "stat_int0_en31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "stat_int0_en30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "stat_int0_en29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "stat_int0_en28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "stat_int0_en27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "stat_int0_en26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "stat_int0_en25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "stat_int0_en24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "stat_int0_en23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "stat_int0_en22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "stat_int0_en21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "stat_int0_en20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "stat_int0_en19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "stat_int0_en18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "stat_int0_en17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "stat_int0_en16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "stat_int0_en15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "stat_int0_en14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "stat_int0_en13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "stat_int0_en12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "stat_int0_en11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "stat_int0_en10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "stat_int0_en9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "stat_int0_en8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "stat_int0_en7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "stat_int0_en6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "stat_int0_en5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "stat_int0_en4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "stat_int0_en3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "stat_int0_en2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "stat_int0_en1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "stat_int0_en0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_int0_mask_reg[] = { + { "stat_int0_mask31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "stat_int0_mask30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "stat_int0_mask29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "stat_int0_mask28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "stat_int0_mask27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "stat_int0_mask26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "stat_int0_mask25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "stat_int0_mask24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "stat_int0_mask23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "stat_int0_mask22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "stat_int0_mask21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "stat_int0_mask20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "stat_int0_mask19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "stat_int0_mask18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "stat_int0_mask17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "stat_int0_mask16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "stat_int0_mask15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "stat_int0_mask14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "stat_int0_mask13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "stat_int0_mask12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "stat_int0_mask11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "stat_int0_mask10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "stat_int0_mask9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "stat_int0_mask8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "stat_int0_mask7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "stat_int0_mask6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "stat_int0_mask5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "stat_int0_mask4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "stat_int0_mask3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "stat_int0_mask2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "stat_int0_mask1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "stat_int0_mask0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_int0_status_reg[] = { + { "stat_int0_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "stat_int0_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "stat_int0_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "stat_int0_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "stat_int0_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "stat_int0_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "stat_int0_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "stat_int0_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "stat_int0_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "stat_int0_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "stat_int0_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "stat_int0_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "stat_int0_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "stat_int0_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "stat_int0_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "stat_int0_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "stat_int0_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "stat_int0_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "stat_int0_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "stat_int0_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "stat_int0_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "stat_int0_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "stat_int0_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "stat_int0_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "stat_int0_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "stat_int0_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "stat_int0_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "stat_int0_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "stat_int0_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "stat_int0_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "stat_int0_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "stat_int0_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_int1_en_reg[] = { + { "stat_int1_en31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "stat_int1_en30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "stat_int1_en29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "stat_int1_en28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "stat_int1_en27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "stat_int1_en26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "stat_int1_en25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "stat_int1_en24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "stat_int1_en23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "stat_int1_en22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "stat_int1_en21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "stat_int1_en20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "stat_int1_en19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "stat_int1_en18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "stat_int1_en17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "stat_int1_en16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "stat_int1_en15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "stat_int1_en14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "stat_int1_en13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "stat_int1_en12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "stat_int1_en11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "stat_int1_en10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "stat_int1_en9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "stat_int1_en8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "stat_int1_en7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "stat_int1_en6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "stat_int1_en5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "stat_int1_en4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "stat_int1_en3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "stat_int1_en2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "stat_int1_en1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "stat_int1_en0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_int1_mask_reg[] = { + { "stat_int1_mask31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "stat_int1_mask30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "stat_int1_mask29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "stat_int1_mask28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "stat_int1_mask27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "stat_int1_mask26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "stat_int1_mask25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "stat_int1_mask24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "stat_int1_mask23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "stat_int1_mask22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "stat_int1_mask21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "stat_int1_mask20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "stat_int1_mask19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "stat_int1_mask18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "stat_int1_mask17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "stat_int1_mask16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "stat_int1_mask15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "stat_int1_mask14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "stat_int1_mask13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "stat_int1_mask12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "stat_int1_mask11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "stat_int1_mask10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "stat_int1_mask9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "stat_int1_mask8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "stat_int1_mask7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "stat_int1_mask6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "stat_int1_mask5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "stat_int1_mask4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "stat_int1_mask3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "stat_int1_mask2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "stat_int1_mask1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "stat_int1_mask0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_int1_status_reg[] = { + { "stat_int1_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "stat_int1_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "stat_int1_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "stat_int1_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "stat_int1_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "stat_int1_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "stat_int1_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "stat_int1_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "stat_int1_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "stat_int1_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "stat_int1_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "stat_int1_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "stat_int1_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "stat_int1_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "stat_int1_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "stat_int1_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "stat_int1_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "stat_int1_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "stat_int1_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "stat_int1_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "stat_int1_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "stat_int1_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "stat_int1_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "stat_int1_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "stat_int1_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "stat_int1_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "stat_int1_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "stat_int1_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "stat_int1_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "stat_int1_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "stat_int1_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "stat_int1_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_int2_en_reg[] = { + { "stat_int2_en31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "stat_int2_en30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "stat_int2_en29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "stat_int2_en28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "stat_int2_en27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "stat_int2_en26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "stat_int2_en25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "stat_int2_en24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "stat_int2_en23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "stat_int2_en22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "stat_int2_en21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "stat_int2_en20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "stat_int2_en19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "stat_int2_en18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "stat_int2_en17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "stat_int2_en16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "stat_int2_en15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "stat_int2_en14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "stat_int2_en13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "stat_int2_en12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "stat_int2_en11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "stat_int2_en10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "stat_int2_en9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "stat_int2_en8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "stat_int2_en7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "stat_int2_en6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "stat_int2_en5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "stat_int2_en4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "stat_int2_en3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "stat_int2_en2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "stat_int2_en1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "stat_int2_en0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_int2_mask_reg[] = { + { "stat_int2_mask31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "stat_int2_mask30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "stat_int2_mask29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "stat_int2_mask28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "stat_int2_mask27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "stat_int2_mask26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "stat_int2_mask25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "stat_int2_mask24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "stat_int2_mask23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "stat_int2_mask22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "stat_int2_mask21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "stat_int2_mask20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "stat_int2_mask19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "stat_int2_mask18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "stat_int2_mask17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "stat_int2_mask16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "stat_int2_mask15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "stat_int2_mask14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "stat_int2_mask13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "stat_int2_mask12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "stat_int2_mask11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "stat_int2_mask10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "stat_int2_mask9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "stat_int2_mask8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "stat_int2_mask7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "stat_int2_mask6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "stat_int2_mask5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "stat_int2_mask4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "stat_int2_mask3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "stat_int2_mask2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "stat_int2_mask1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "stat_int2_mask0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_int2_status_reg[] = { + { "stat_int2_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "stat_int2_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "stat_int2_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "stat_int2_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "stat_int2_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "stat_int2_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "stat_int2_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "stat_int2_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "stat_int2_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "stat_int2_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "stat_int2_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "stat_int2_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "stat_int2_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "stat_int2_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "stat_int2_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "stat_int2_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "stat_int2_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "stat_int2_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "stat_int2_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "stat_int2_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "stat_int2_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "stat_int2_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "stat_int2_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "stat_int2_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "stat_int2_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "stat_int2_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "stat_int2_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "stat_int2_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "stat_int2_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "stat_int2_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "stat_int2_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "stat_int2_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_int3_en_reg[] = { + { "stat_int3_en31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "stat_int3_en30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "stat_int3_en29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "stat_int3_en28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "stat_int3_en27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "stat_int3_en26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "stat_int3_en25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "stat_int3_en24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "stat_int3_en23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "stat_int3_en22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "stat_int3_en21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "stat_int3_en20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "stat_int3_en19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "stat_int3_en18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "stat_int3_en17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "stat_int3_en16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "stat_int3_en15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "stat_int3_en14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "stat_int3_en13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "stat_int3_en12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "stat_int3_en11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "stat_int3_en10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "stat_int3_en9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "stat_int3_en8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "stat_int3_en7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "stat_int3_en6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "stat_int3_en5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "stat_int3_en4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "stat_int3_en3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "stat_int3_en2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "stat_int3_en1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "stat_int3_en0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_int3_mask_reg[] = { + { "stat_int3_mask31", DPP_FIELD_FLAG_RW, 31, 1, 0x1, 0x0 }, + { "stat_int3_mask30", DPP_FIELD_FLAG_RW, 30, 1, 0x1, 0x0 }, + { "stat_int3_mask29", DPP_FIELD_FLAG_RW, 29, 1, 0x1, 0x0 }, + { "stat_int3_mask28", DPP_FIELD_FLAG_RW, 28, 1, 0x1, 0x0 }, + { "stat_int3_mask27", DPP_FIELD_FLAG_RW, 27, 1, 0x1, 0x0 }, + { "stat_int3_mask26", DPP_FIELD_FLAG_RW, 26, 1, 0x1, 0x0 }, + { "stat_int3_mask25", DPP_FIELD_FLAG_RW, 25, 1, 0x1, 0x0 }, + { "stat_int3_mask24", DPP_FIELD_FLAG_RW, 24, 1, 0x1, 0x0 }, + { "stat_int3_mask23", DPP_FIELD_FLAG_RW, 23, 1, 0x1, 0x0 }, + { "stat_int3_mask22", DPP_FIELD_FLAG_RW, 22, 1, 0x1, 0x0 }, + { "stat_int3_mask21", DPP_FIELD_FLAG_RW, 21, 1, 0x1, 0x0 }, + { "stat_int3_mask20", DPP_FIELD_FLAG_RW, 20, 1, 0x1, 0x0 }, + { "stat_int3_mask19", DPP_FIELD_FLAG_RW, 19, 1, 0x1, 0x0 }, + { "stat_int3_mask18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "stat_int3_mask17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "stat_int3_mask16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "stat_int3_mask15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "stat_int3_mask14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "stat_int3_mask13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "stat_int3_mask12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "stat_int3_mask11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "stat_int3_mask10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "stat_int3_mask9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "stat_int3_mask8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "stat_int3_mask7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "stat_int3_mask6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "stat_int3_mask5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "stat_int3_mask4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "stat_int3_mask3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "stat_int3_mask2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "stat_int3_mask1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "stat_int3_mask0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_int3_status_reg[] = { + { "stat_int3_status31", DPP_FIELD_FLAG_RC, 31, 1, 0x0, 0x0 }, + { "stat_int3_status30", DPP_FIELD_FLAG_RC, 30, 1, 0x0, 0x0 }, + { "stat_int3_status29", DPP_FIELD_FLAG_RC, 29, 1, 0x0, 0x0 }, + { "stat_int3_status28", DPP_FIELD_FLAG_RC, 28, 1, 0x0, 0x0 }, + { "stat_int3_status27", DPP_FIELD_FLAG_RC, 27, 1, 0x0, 0x0 }, + { "stat_int3_status26", DPP_FIELD_FLAG_RC, 26, 1, 0x0, 0x0 }, + { "stat_int3_status25", DPP_FIELD_FLAG_RC, 25, 1, 0x0, 0x0 }, + { "stat_int3_status24", DPP_FIELD_FLAG_RC, 24, 1, 0x0, 0x0 }, + { "stat_int3_status23", DPP_FIELD_FLAG_RC, 23, 1, 0x0, 0x0 }, + { "stat_int3_status22", DPP_FIELD_FLAG_RC, 22, 1, 0x0, 0x0 }, + { "stat_int3_status21", DPP_FIELD_FLAG_RC, 21, 1, 0x0, 0x0 }, + { "stat_int3_status20", DPP_FIELD_FLAG_RC, 20, 1, 0x0, 0x0 }, + { "stat_int3_status19", DPP_FIELD_FLAG_RC, 19, 1, 0x0, 0x0 }, + { "stat_int3_status18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "stat_int3_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "stat_int3_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "stat_int3_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "stat_int3_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "stat_int3_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "stat_int3_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "stat_int3_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "stat_int3_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "stat_int3_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "stat_int3_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "stat_int3_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "stat_int3_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "stat_int3_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "stat_int3_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "stat_int3_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "stat_int3_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "stat_int3_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "stat_int3_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_int4_en_reg[] = { + { "stat_int4_en_18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "stat_int4_en_17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "stat_int4_en_16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "stat_int4_en_15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "stat_int4_en_14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "stat_int4_en_13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "stat_int4_en_12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "stat_int4_en_11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "stat_int4_en_10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "stat_int4_en_9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "stat_int4_en_8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "stat_int4_en_7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "stat_int4_en_6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "stat_int4_en_5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "stat_int4_en_4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "stat_int4_en_3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "stat_int4_en_2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "stat_int4_en_1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "stat_int4_en_0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_int4_mask_reg[] = { + { "stat_int4_mask_18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "stat_int4_mask_17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "stat_int4_mask_16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "stat_int4_mask_15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "stat_int4_mask_14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "stat_int4_mask_13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "stat_int4_mask_12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "stat_int4_mask_11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "stat_int4_mask_10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "stat_int4_mask_9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "stat_int4_mask_8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "stat_int4_mask_7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "stat_int4_mask_6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "stat_int4_mask_5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "stat_int4_mask_4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "stat_int4_mask_3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "stat_int4_mask_2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "stat_int4_mask_1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "stat_int4_mask_0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_int4_status_reg[] = { + { "stat_int4_mask_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "stat_int4_mask_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "stat_int4_mask_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "stat_int4_mask_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "stat_int4_mask_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "stat_int4_mask_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "stat_int4_mask_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "stat_int4_mask_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "stat_int4_mask_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "stat_int4_mask_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "stat_int4_mask_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "stat_int4_mask_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "stat_int4_mask_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "stat_int4_mask_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "stat_int4_mask_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "stat_int4_mask_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "stat_int4_mask_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "stat_int4_mask_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "stat_int4_mask_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_int5_en_reg[] = { + { "stat_int5_en_18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "stat_int5_en_17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "stat_int5_en_16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "stat_int5_en_15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "stat_int5_en_14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "stat_int5_en_13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "stat_int5_en_12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "stat_int5_en_11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "stat_int5_en_10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "stat_int5_en_9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "stat_int5_en_8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "stat_int5_en_7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "stat_int5_en_6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "stat_int5_en_5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "stat_int5_en_4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "stat_int5_en_3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "stat_int5_en_2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "stat_int5_en_1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "stat_int5_en_0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_int5_mask_reg[] = { + { "stat_int5_mask_18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "stat_int5_mask_17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "stat_int5_mask_16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "stat_int5_mask_15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "stat_int5_mask_14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "stat_int5_mask_13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "stat_int5_mask_12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "stat_int5_mask_11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "stat_int5_mask_10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "stat_int5_mask_9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "stat_int5_mask_8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "stat_int5_mask_7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "stat_int5_mask_6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "stat_int5_mask_5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "stat_int5_mask_4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "stat_int5_mask_3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "stat_int5_mask_2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "stat_int5_mask_1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "stat_int5_mask_0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_int5_status_reg[] = { + { "stat_int5_mask_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "stat_int5_mask_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "stat_int5_mask_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "stat_int5_mask_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "stat_int5_mask_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "stat_int5_mask_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "stat_int5_mask_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "stat_int5_mask_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "stat_int5_mask_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "stat_int5_mask_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "stat_int5_mask_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "stat_int5_mask_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "stat_int5_mask_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "stat_int5_mask_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "stat_int5_mask_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "stat_int5_mask_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "stat_int5_mask_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "stat_int5_mask_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "stat_int5_mask_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_rschd_ecc_bypass_reg[] = { + { "rschd_ecc_bypass_18", DPP_FIELD_FLAG_RW, 18, 1, 0x1, 0x0 }, + { "rschd_ecc_bypass_17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "rschd_ecc_bypass_16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "rschd_ecc_bypass_15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "rschd_ecc_bypass_14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "rschd_ecc_bypass_13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "rschd_ecc_bypass_12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "rschd_ecc_bypass_11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "rschd_ecc_bypass_10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "rschd_ecc_bypass_9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "rschd_ecc_bypass_8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "rschd_ecc_bypass_7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "rschd_ecc_bypass_6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "rschd_ecc_bypass_5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "rschd_ecc_bypass_4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "rschd_ecc_bypass_3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "rschd_ecc_bypass_2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "rschd_ecc_bypass_1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "rschd_ecc_bypass_0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_rschd_ecc_single_err_reg[] = { + { "rschd_ecc_single_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "rschd_ecc_single_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "rschd_ecc_single_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "rschd_ecc_single_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "rschd_ecc_single_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "rschd_ecc_single_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "rschd_ecc_single_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "rschd_ecc_single_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "rschd_ecc_single_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "rschd_ecc_single_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "rschd_ecc_single_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "rschd_ecc_single_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "rschd_ecc_single_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "rschd_ecc_single_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "rschd_ecc_single_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "rschd_ecc_single_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "rschd_ecc_single_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "rschd_ecc_single_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "rschd_ecc_single_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_rschd_ecc_double_err_reg[] = { + { "rschd_ecc_double_err_18", DPP_FIELD_FLAG_RC, 18, 1, 0x0, 0x0 }, + { "rschd_ecc_double_err_17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "rschd_ecc_double_err_16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "rschd_ecc_double_err_15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "rschd_ecc_double_err_14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "rschd_ecc_double_err_13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "rschd_ecc_double_err_12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "rschd_ecc_double_err_11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "rschd_ecc_double_err_10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "rschd_ecc_double_err_9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "rschd_ecc_double_err_8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "rschd_ecc_double_err_7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "rschd_ecc_double_err_6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "rschd_ecc_double_err_5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "rschd_ecc_double_err_4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "rschd_ecc_double_err_3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "rschd_ecc_double_err_2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "rschd_ecc_double_err_1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "rschd_ecc_double_err_0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_wdat0_reg[] = { + { "cpu_ind_ddr_wdat0", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_wdat1_reg[] = { + { "cpu_ind_ddr_wdat1", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_wdat2_reg[] = { + { "cpu_ind_ddr_wdat2", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_wdat3_reg[] = { + { "cpu_ind_ddr_wdat3", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_wdat4_reg[] = { + { "cpu_ind_ddr_wdat4", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_wdat5_reg[] = { + { "cpu_ind_ddr_wdat5", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_wdat6_reg[] = { + { "cpu_ind_ddr_wdat6", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_wdat7_reg[] = { + { "cpu_ind_ddr_wdat7", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_wdat8_reg[] = { + { "cpu_ind_ddr_wdat8", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_wdat9_reg[] = { + { "cpu_ind_ddr_wdat9", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_wdat10_reg[] = { + { "cpu_ind_ddr_wdat10", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_wdat11_reg[] = { + { "cpu_ind_ddr_wdat11", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_wdat12_reg[] = { + { "cpu_ind_ddr_wdat12", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_wdat13_reg[] = { + { "cpu_ind_ddr_wdat13", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_wdat14_reg[] = { + { "cpu_ind_ddr_wdat14", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_wdat15_reg[] = { + { "cpu_ind_ddr_wdat15", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_req_info_reg[] = { + { "rw_mode", DPP_FIELD_FLAG_RW, 31, 1, 0x0, 0x0 }, + { "read_mode", DPP_FIELD_FLAG_RW, 15, 1, 0x0, 0x0 }, + { "tm_cs", DPP_FIELD_FLAG_RW, 14, 1, 0x0, 0x0 }, + { "rw_addr", DPP_FIELD_FLAG_RW, 13, 14, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_rd_done_reg[] = { + { "cpu_ind_ddr_rd_done", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_rdat0_reg[] = { + { "cpu_ind_ddr_rdat0", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_rdat1_reg[] = { + { "cpu_ind_ddr_rdat1", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_rdat2_reg[] = { + { "cpu_ind_ddr_rdat2", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_rdat3_reg[] = { + { "cpu_ind_ddr_rdat3", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_rdat4_reg[] = { + { "cpu_ind_ddr_rdat4", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_rdat5_reg[] = { + { "cpu_ind_ddr_rdat5", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_rdat6_reg[] = { + { "cpu_ind_ddr_rdat6", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_rdat7_reg[] = { + { "cpu_ind_ddr_rdat7", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_rdat8_reg[] = { + { "cpu_ind_ddr_rdat8", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_rdat9_reg[] = { + { "cpu_ind_ddr_rdat9", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_rdat10_reg[] = { + { "cpu_ind_ddr_rdat10", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_rdat11_reg[] = { + { "cpu_ind_ddr_rdat11", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_rdat12_reg[] = { + { "cpu_ind_ddr_rdat12", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_rdat13_reg[] = { + { "cpu_ind_ddr_rdat13", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_rdat14_reg[] = { + { "cpu_ind_ddr_rdat14", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_ind_ddr_rdat15_reg[] = { + { "cpu_ind_ddr_rdat15", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_tm_alu_ddr_cpu_rdy_reg[] = { + { "tm_alu_ddr_cpu_rdy", DPP_FIELD_FLAG_RO, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_ept_flag_reg[] = { + { "ept_flag", DPP_FIELD_FLAG_RO, 5, 6, 0x3f, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_ppu_soft_rst_reg[] = { + { "ppu_soft_rst", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_smmu0_fc15_0_cnt_reg[] = { + { "stat_smmu0_fc15_0_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_smmu0_stat_fc15_0_cnt_reg[] = { + { "smmu0_stat_fc15_0_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_smmu0_stat_rsp15_0_cnt_reg[] = { + { "smmu0_stat_rsp15_0_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_smmu0_req15_0_cnt_reg[] = { + { "stat_smmu0_req15_0_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_ppu_stat_mec5_0_rsp_fc_cnt_reg[] = { + { "ppu_stat_mec5_0_rsp_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_ppu_mec5_0_key_fc_cnt_reg[] = { + { "stat_ppu_mec5_0_key_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_ppu_mec5_0_rsp_cnt_reg[] = { + { "stat_ppu_mec5_0_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_ppu_stat_mec5_0_key_cnt_reg[] = { + { "ppu_stat_mec5_0_key_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_ppu5_0_no_exist_opcd_ex_cnt_reg[] = { + { "ppu5_0_no_exist_opcd_ex_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_se_etm_stat_wr_fc_cnt_reg[] = { + { "se_etm_stat_wr_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_se_etm_stat_rd_fc_cnt_reg[] = { + { "se_etm_stat_rd_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_etm_deq_fc_cnt_reg[] = { + { "stat_etm_deq_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_etm_enq_fc_cnt_reg[] = { + { "stat_etm_enq_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_oam_lm_fc_cnt_reg[] = { + { "stat_oam_lm_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_oam_stat_lm_fc_cnt_reg[] = { + { "oam_stat_lm_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_oam_fc_cnt_reg[] = { + { "stat_oam_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cmmu_stat_fc_cnt_reg[] = { + { "cmmu_stat_fc_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_cmmu_req_cnt_reg[] = { + { "stat_cmmu_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_smmu0_plcr_rsp0_cnt_reg[] = { + { "smmu0_plcr_rsp0_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_plcr_smmu0_req0_cnt_reg[] = { + { "plcr_smmu0_req0_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_stat_oam_lm_rsp_cnt_reg[] = { + { "stat_oam_lm_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_oam_stat_lm_req_cnt_reg[] = { + { "oam_stat_lm_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_oam_stat_req_cnt_reg[] = { + { "oam_stat_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_se_etm_stat_rsp_cnt_reg[] = { + { "se_etm_stat_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_etm_stat_se_wr_req_cnt_reg[] = { + { "etm_stat_se_wr_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_etm_stat_se_rd_req_cnt_reg[] = { + { "etm_stat_se_rd_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_etm_stat_smmu0_req_cnt0_reg[] = { + { "etm_stat_smmu0_req_cnt0", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_etm_stat_smmu0_req_cnt1_reg[] = { + { "etm_stat_smmu0_req_cnt1", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_tm_stat_eram_cpu_rsp_cnt_reg[] = { + { "tm_stat_eram_cpu_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_rd_eram_req_cnt_reg[] = { + { "cpu_rd_eram_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_wr_eram_req_cnt_reg[] = { + { "cpu_wr_eram_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_tm_stat_ddr_cpu_rsp_cnt_reg[] = { + { "tm_stat_ddr_cpu_rsp_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_rd_ddr_req_cnt_reg[] = { + { "cpu_rd_ddr_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_stat_cfg_cpu_wr_ddr_req_cnt_reg[] = { + { "cpu_wr_ddr_req_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_ind_wdat1_reg[] = { + { "wdat1", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_ind_wdat2_reg[] = { + { "wdat2", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_ind_wdat3_reg[] = { + { "wdat3", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_ind_wdat4_reg[] = { + { "wdat4", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_ind_wdat5_reg[] = { + { "wdat5", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_ind_wdat6_reg[] = { + { "wdat6", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_ind_wdat7_reg[] = { + { "wdat7", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_ind_wdat8_reg[] = { + { "wdat8", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_ind_wdat9_reg[] = { + { "wdat9", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_ind_wdat10_reg[] = { + { "wdat10", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_ind_wdat11_reg[] = { + { "wdat11", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_ind_wdat12_reg[] = { + { "wdat12", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_ind_wdat13_reg[] = { + { "wdat13", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_ind_wdat14_reg[] = { + { "wdat14", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_ind_wdat15_reg[] = { + { "wdat15", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_ind_wdat16_reg[] = { + { "wdat16", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_ind_wdat17_reg[] = { + { "wdat17", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_ind_wdat18_reg[] = { + { "wdat18", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_cpu_ind_wdat19_reg[] = { + { "wdat19", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_t_strwc_cfg_reg[] = { + { "t_strwc_cfg", DPP_FIELD_FLAG_RW, 1, 2, 0x1, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_etcam_int_unmask_flag_reg[] = { + { "etcam_int_unmask_flag", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_etcam_int_en0_reg[] = { + { "etcam_int_en17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "etcam_int_en16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "etcam_int_en15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "etcam_int_en14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "etcam_int_en13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "etcam_int_en12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "etcam_int_en11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "etcam_int_en10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "etcam_int_en9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "etcam_int_en8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "etcam_int_en7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "etcam_int_en6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "etcam_int_en5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "etcam_int_en4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "etcam_int_en3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "etcam_int_en2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "etcam_int_en1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "etcam_int_en0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_etcam_int_mask0_reg[] = { + { "etcam_int_mask17", DPP_FIELD_FLAG_RW, 17, 1, 0x1, 0x0 }, + { "etcam_int_mask16", DPP_FIELD_FLAG_RW, 16, 1, 0x1, 0x0 }, + { "etcam_int_mask15", DPP_FIELD_FLAG_RW, 15, 1, 0x1, 0x0 }, + { "etcam_int_mask14", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "etcam_int_mask13", DPP_FIELD_FLAG_RW, 13, 1, 0x1, 0x0 }, + { "etcam_int_mask12", DPP_FIELD_FLAG_RW, 12, 1, 0x1, 0x0 }, + { "etcam_int_mask11", DPP_FIELD_FLAG_RW, 11, 1, 0x1, 0x0 }, + { "etcam_int_mask10", DPP_FIELD_FLAG_RW, 10, 1, 0x1, 0x0 }, + { "etcam_int_mask9", DPP_FIELD_FLAG_RW, 9, 1, 0x1, 0x0 }, + { "etcam_int_mask8", DPP_FIELD_FLAG_RW, 8, 1, 0x1, 0x0 }, + { "etcam_int_mask7", DPP_FIELD_FLAG_RW, 7, 1, 0x1, 0x0 }, + { "etcam_int_mask6", DPP_FIELD_FLAG_RW, 6, 1, 0x1, 0x0 }, + { "etcam_int_mask5", DPP_FIELD_FLAG_RW, 5, 1, 0x1, 0x0 }, + { "etcam_int_mask4", DPP_FIELD_FLAG_RW, 4, 1, 0x1, 0x0 }, + { "etcam_int_mask3", DPP_FIELD_FLAG_RW, 3, 1, 0x1, 0x0 }, + { "etcam_int_mask2", DPP_FIELD_FLAG_RW, 2, 1, 0x1, 0x0 }, + { "etcam_int_mask1", DPP_FIELD_FLAG_RW, 1, 1, 0x1, 0x0 }, + { "etcam_int_mask0", DPP_FIELD_FLAG_RW, 0, 1, 0x1, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_etcam_int_status_reg[] = { + { "etcam_int_status17", DPP_FIELD_FLAG_RC, 17, 1, 0x0, 0x0 }, + { "etcam_int_status16", DPP_FIELD_FLAG_RC, 16, 1, 0x0, 0x0 }, + { "etcam_int_status15", DPP_FIELD_FLAG_RC, 15, 1, 0x0, 0x0 }, + { "etcam_int_status14", DPP_FIELD_FLAG_RC, 14, 1, 0x0, 0x0 }, + { "etcam_int_status13", DPP_FIELD_FLAG_RC, 13, 1, 0x0, 0x0 }, + { "etcam_int_status12", DPP_FIELD_FLAG_RC, 12, 1, 0x0, 0x0 }, + { "etcam_int_status11", DPP_FIELD_FLAG_RC, 11, 1, 0x0, 0x0 }, + { "etcam_int_status10", DPP_FIELD_FLAG_RC, 10, 1, 0x0, 0x0 }, + { "etcam_int_status9", DPP_FIELD_FLAG_RC, 9, 1, 0x0, 0x0 }, + { "etcam_int_status8", DPP_FIELD_FLAG_RC, 8, 1, 0x0, 0x0 }, + { "etcam_int_status7", DPP_FIELD_FLAG_RC, 7, 1, 0x0, 0x0 }, + { "etcam_int_status6", DPP_FIELD_FLAG_RC, 6, 1, 0x0, 0x0 }, + { "etcam_int_status5", DPP_FIELD_FLAG_RC, 5, 1, 0x0, 0x0 }, + { "etcam_int_status4", DPP_FIELD_FLAG_RC, 4, 1, 0x0, 0x0 }, + { "etcam_int_status3", DPP_FIELD_FLAG_RC, 3, 1, 0x0, 0x0 }, + { "etcam_int_status2", DPP_FIELD_FLAG_RC, 2, 1, 0x0, 0x0 }, + { "etcam_int_status1", DPP_FIELD_FLAG_RC, 1, 1, 0x0, 0x0 }, + { "etcam_int_status0", DPP_FIELD_FLAG_RC, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_int_tb_ini_ok_reg[] = { + { "int_tb_ini_ok", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_etcam_clk_en_reg[] = { + { "etcam_clk_en", DPP_FIELD_FLAG_RW, 7, 8, 0xff, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_as_etcam_req0_cnt_reg[] = { + { "as_etcam_req0_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_as_etcam_req1_cnt_reg[] = { + { "as_etcam_req1_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_etcam_as_index0_cnt_reg[] = { + { "etcam_as_index0_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_etcam_as_index1_cnt_reg[] = { + { "etcam_as_index1_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_etcam_not_hit0_cnt_reg[] = { + { "etcam_not_hit0_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_etcam_not_hit1_cnt_reg[] = { + { "etcam_not_hit1_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_table_id_not_match_cnt_reg[] = { + { "table_id_not_match_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_table_id_clash01_cnt_reg[] = { + { "table_id_clash01_cnt", DPP_FIELD_FLAG_RC, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_etcam_cpu_fl_reg[] = { + { "etcam_cpu_fl", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_stat_etcam_etcam_arb_empty_reg[] = { + { "etcam_arb_empty", DPP_FIELD_FLAG_RO, 31, 32, 0xffffffff, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_finish_int_event0_reg[] = { + { "cfg_finish_int_event0", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_finish_int_event1_reg[] = { + { "cfg_finish_int_event1", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_finish_int_event2_reg[] = { + { "cfg_finish_int_event2", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_finish_int_event3_reg[] = { + { "cfg_finish_int_event3", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_finish_int_maks0_reg[] = { + { "cfg_finish_int_mask0", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_finish_int_maks1_reg[] = { + { "cfg_finish_int_mask1", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_finish_int_maks2_reg[] = { + { "cfg_finish_int_mask2", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_finish_int_maks3_reg[] = { + { "cfg_finish_int_mask3", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_finish_int_test0_reg[] = { + { "cfg_finish_int_test0", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_finish_int_test1_reg[] = { + { "cfg_finish_int_test1", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_finish_int_test2_reg[] = { + { "cfg_finish_int_test2", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_finish_int_test3_reg[] = { + { "cfg_finish_int_test3", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_dtb_int_to_riscv_sel_reg[] = { + { "cfg_dtb_int_to_riscv_sel0", DPP_FIELD_FLAG_RW, 6, 7, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_dtb_ep_int_msix_enable_reg[] = { + { "cfg_dtb_ep_int_msix_enable", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_dtb_ep_doorbell_addr_h_0_15_reg[] = { + { "cfg_dtb_ep_doorbell_addr_h_0_15", DPP_FIELD_FLAG_RW, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_dtb_ep_doorbell_addr_l_0_15_reg[] = { + { "cfg_dtb_ep_doorbell_addr_l_0_15", DPP_FIELD_FLAG_RW, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_dtb_debug_mode_en_reg[] = { + { "cfg_dtb_debug_mode_en", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_axi_last_rd_table_addr_high_reg[] = { + { "info_axi_last_rd_table_addr_high", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_axi_last_rd_table_addr_low_reg[] = { + { "info_axi_last_rd_table_addr_low", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_axi_last_rd_table_len_reg[] = { + { "info_axi_last_rd_table_len", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_axi_last_rd_table_user_reg[] = { + { "info_rd_table_user_en", DPP_FIELD_FLAG_RO, 31, 1, 0x0, 0x0 }, + { "info_rd_table_epid", DPP_FIELD_FLAG_RO, 27, 4, 0x0, 0x0 }, + { "info_rd_table_vfunc_num", DPP_FIELD_FLAG_RO, 23, 8, 0x0, 0x0 }, + { "info_rd_table_func_num", DPP_FIELD_FLAG_RO, 7, 3, 0x0, 0x0 }, + { "info_rd_table_vfunc_active", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_axi_last_rd_table_onload_cnt_reg[] = { + { "info_axi_last_rd_table_onload_cnt", DPP_FIELD_FLAG_RO, 3, 4, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cnt_axi_rd_table_resp_err_reg[] = { + { "cnt_axi_rd_table_resp_err", DPP_FIELD_FLAG_RO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_axi_last_rd_pd_addr_high_reg[] = { + { "info_axi_last_rd_pd_addr_high", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_axi_last_rd_pd_addr_low_reg[] = { + { "info_axi_last_rd_pd_addr_low", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_axi_last_rd_pd_len_reg[] = { + { "info_axi_last_rd_pd_len", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_axi_last_rd_pd_user_reg[] = { + { "info_rd_pd_user_en", DPP_FIELD_FLAG_RO, 31, 1, 0x0, 0x0 }, + { "info_rd_pd_epid", DPP_FIELD_FLAG_RO, 27, 4, 0x0, 0x0 }, + { "info_rd_pd_vfunc_num", DPP_FIELD_FLAG_RO, 23, 8, 0x0, 0x0 }, + { "info_rd_pd_func_num", DPP_FIELD_FLAG_RO, 7, 3, 0x0, 0x0 }, + { "info_rd_pd_vfunc_active", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_axi_last_rd_pd_onload_cnt_reg[] = { + { "info_axi_last_rd_pd_onload_cnt", DPP_FIELD_FLAG_RO, 3, 4, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cnt_axi_rd_pd_resp_err_reg[] = { + { "cnt_axi_rd_pd_resp_err", DPP_FIELD_FLAG_RO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_axi_last_wr_ctrl_addr_high_reg[] = { + { "info_axi_last_wr_ctrl_addr_high", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_axi_last_wr_ctrl_addr_low_reg[] = { + { "info_axi_last_wr_ctrl_addr_low", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_axi_last_wr_ctrl_len_reg[] = { + { "info_axi_last_wr_ctrl_len", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_axi_last_wr_ctrl_user_reg[] = { + { "info_wr_ctrl_user_en", DPP_FIELD_FLAG_RO, 31, 1, 0x0, 0x0 }, + { "info_wr_ctrl_epid", DPP_FIELD_FLAG_RO, 27, 4, 0x0, 0x0 }, + { "info_wr_ctrl_vfunc_num", DPP_FIELD_FLAG_RO, 23, 8, 0x0, 0x0 }, + { "info_wr_ctrl_func_num", DPP_FIELD_FLAG_RO, 7, 3, 0x0, 0x0 }, + { "info_wr_ctrl_vfunc_active", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_axi_last_wr_ctrl_onload_cnt_reg[] = { + { "info_axi_last_wr_ctrl_onload_cnt", DPP_FIELD_FLAG_RO, 3, 4, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cnt_axi_wr_ctrl_resp_err_reg[] = { + { "cnt_axi_wr_ctrl_resp_err", DPP_FIELD_FLAG_RO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_axi_last_wr_ddr_addr_high_reg[] = { + { "info_axi_last_wr_ddr_addr_high", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_axi_last_wr_ddr_addr_low_reg[] = { + { "info_axi_last_wr_ddr_addr_low", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_axi_last_wr_ddr_len_reg[] = { + { "info_axi_last_wr_ddr_len", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_axi_last_wr_ddr_user_reg[] = { + { "info_wr_ddr_user_en", DPP_FIELD_FLAG_RO, 31, 1, 0x0, 0x0 }, + { "info_wr_ddr_epid", DPP_FIELD_FLAG_RO, 27, 4, 0x0, 0x0 }, + { "info_wr_ddr_vfunc_num", DPP_FIELD_FLAG_RO, 23, 8, 0x0, 0x0 }, + { "info_wr_ddr_func_num", DPP_FIELD_FLAG_RO, 7, 3, 0x0, 0x0 }, + { "info_wr_ddr_vfunc_active", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_axi_last_wr_ddr_onload_cnt_reg[] = { + { "info_axi_last_wr_ddr_onload_cnt", DPP_FIELD_FLAG_RO, 3, 4, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cnt_axi_wr_ddr_resp_err_reg[] = { + { "cnt_axi_wr_ddr_resp_err", DPP_FIELD_FLAG_RO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_axi_last_wr_fin_addr_high_reg[] = { + { "info_axi_last_wr_fin_addr_high", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_axi_last_wr_fin_addr_low_reg[] = { + { "info_axi_last_wr_fin_addr_low", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_axi_last_wr_fin_len_reg[] = { + { "info_axi_last_wr_fin_len", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_axi_last_wr_fin_user_reg[] = { + { "info_wr_fin_user_en", DPP_FIELD_FLAG_RO, 31, 1, 0x0, 0x0 }, + { "info_wr_fin_epid", DPP_FIELD_FLAG_RO, 27, 4, 0x0, 0x0 }, + { "info_wr_fin_vfunc_num", DPP_FIELD_FLAG_RO, 23, 8, 0x0, 0x0 }, + { "info_wr_fin_func_num", DPP_FIELD_FLAG_RO, 7, 3, 0x0, 0x0 }, + { "info_wr_fin_vfunc_active", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_axi_last_wr_fin_onload_cnt_reg[] = { + { "info_axi_last_wr_fin_onload_cnt", DPP_FIELD_FLAG_RO, 3, 4, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cnt_axi_wr_fin_resp_err_reg[] = { + { "cnt_axi_wr_fin_resp_err", DPP_FIELD_FLAG_RO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cnt_dtb_wr_smmu0_table_high_reg[] = { + { "cnt_dtb_wr_smmu0_table_high", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cnt_dtb_wr_smmu0_table_low_reg[] = { + { "cnt_dtb_wr_smmu0_table_low", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cnt_dtb_wr_smmu1_table_high_reg[] = { + { "cnt_dtb_wr_smmu1_table_high", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cnt_dtb_wr_smmu1_table_low_reg[] = { + { "cnt_dtb_wr_smmu1_table_low", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cnt_dtb_wr_zcam_table_high_reg[] = { + { "cnt_dtb_wr_zcam_table_high", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cnt_dtb_wr_zcam_table_low_reg[] = { + { "cnt_dtb_wr_zcam_table_low", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cnt_dtb_wr_etcam_table_high_reg[] = { + { "cnt_dtb_wr_etcam_table_high", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cnt_dtb_wr_etcam_table_low_reg[] = { + { "cnt_dtb_wr_etcam_table_low", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cnt_dtb_wr_hash_table_high_reg[] = { + { "cnt_dtb_wr_hash_table_high", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cnt_dtb_wr_hash_table_low_reg[] = { + { "cnt_dtb_wr_hash_table_low", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cnt_dtb_rd_smmu0_table_high_reg[] = { + { "cnt_dtb_rd_smmu0_table_high", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cnt_dtb_rd_smmu0_table_low_reg[] = { + { "cnt_dtb_rd_smmu0_table_low", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cnt_dtb_rd_smmu1_table_high_reg[] = { + { "cnt_dtb_rd_smmu1_table_high", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cnt_dtb_rd_smmu1_table_low_reg[] = { + { "cnt_dtb_rd_smmu1_table_low", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cnt_dtb_rd_zcam_table_high_reg[] = { + { "cnt_dtb_rd_zcam_table_high", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cnt_dtb_rd_zcam_table_low_reg[] = { + { "cnt_dtb_rd_zcam_table_low", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cnt_dtb_rd_etcam_table_high_reg[] = { + { "cnt_dtb_rd_etcam_table_high", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cnt_dtb_rd_etcam_table_low_reg[] = { + { "cnt_dtb_rd_etcam_table_low", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_wr_ctrl_state_reg[] = { + { "info_wr_ctrl_state", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_rd_table_state_reg[] = { + { "info_rd_table_state", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_rd_pd_state_reg[] = { + { "info_rd_pd_state", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_dump_cmd_state_reg[] = { + { "info_dump_cmd_state", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_info_wr_ddr_state_reg[] = { + { "info_wr_ddr_state", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_cfg_cfg_dtb_debug_info_clr_reg[] = { + { "cfg_dtb_debug_info_clr", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_ddos_cfg_ddos_stat_dump_thrd_0_15_reg[] = { + { "cfg_ddos_stat_dump_thrd", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_ddos_cfg_ddos_stat_dump_thrd_comp_en_reg[] = { + { "cfg_ddos_stat_dump_thrd_comp_en", DPP_FIELD_FLAG_RW, 15, 16, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_dtb_ddos_cfg_ddos_dump_stat_num_reg[] = { + { "cfg_ddos_dump_stat_num", DPP_FIELD_FLAG_RW, 23, 24, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_ddos_cfg_ddos_even_hash_table_baddr_reg[] = { + { "cfg_ddos_even_hash_table_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_dtb_ddos_cfg_ddos_odd_hash_table_baddr_reg[] = { + { "cfg_ddos_odd_hash_table_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_dtb_ddos_cfg_ddos_stat_index_offset_reg[] = { + { "cfg_ddos_stat_index_offset", DPP_FIELD_FLAG_RW, 18, 19, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_ddos_cfg_ddos_ns_flag_cnt_reg[] = { + { "cfg_ddos_ns_flag_cnt", DPP_FIELD_FLAG_RW, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_ddos_cfg_ddos_even_stat_table_baddr_reg[] = { + { "cfg_ddos_even_stat_table_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_dtb_ddos_cfg_ddos_odd_stat_table_baddr_reg[] = { + { "cfg_ddos_odd_stat_table_baddr", DPP_FIELD_FLAG_RW, 19, 20, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_dtb_ddos_cfg_ddos_even_stat_dump_daddr_h_reg[] = { + { "cfg_ddos_even_stat_dump_daddr_h", DPP_FIELD_FLAG_RW, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_dtb_ddos_cfg_ddos_even_stat_dump_daddr_l_reg[] = { + { "cfg_ddos_even_stat_dump_daddr_l", DPP_FIELD_FLAG_RW, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_dtb_ddos_cfg_ddos_odd_stat_dump_daddr_h_reg[] = { + { "cfg_ddos_odd_stat_dump_daddr_h", DPP_FIELD_FLAG_RW, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_dtb_ddos_cfg_ddos_odd_stat_dump_daddr_l_reg[] = { + { "cfg_ddos_odd_stat_dump_daddr_l", DPP_FIELD_FLAG_RW, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_dtb_ddos_cfg_ddos_work_mode_enable_reg[] = { + { "cfg_ddos_mode_work_enable", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_ddos_cfg_ddos_stat_table_len_reg[] = { + { "cfg_ddos_stat_table_len", DPP_FIELD_FLAG_RW, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_ddos_cfg_ddos_hash_table_len_reg[] = { + { "cfg_ddos_hash_table_len", DPP_FIELD_FLAG_RW, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_ram_traf_ctrl_ram0_0_255_reg[] = { + { "traf_ctrl_ram0_0_255", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_ram_traf_ctrl_ram1_0_255_reg[] = { + { "traf_ctrl_ram1_0_255", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_ram_traf_ctrl_ram2_0_255_reg[] = { + { "traf_ctrl_ram2_0_255", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_ram_traf_ctrl_ram3_0_255_reg[] = { + { "traf_ctrl_ram3_0_255", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_ram_traf_ctrl_ram4_0_255_reg[] = { + { "traf_ctrl_ram4_0_255", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_ram_traf_ctrl_ram5_0_63_reg[] = { + { "traf_ctrl_ram5_0_63", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_ram_dump_pd_ram_0_2047_reg[] = { + { "dump_pd_ram_0_2047", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_ram_rd_ctrl_ram_0_4095_reg[] = { + { "rd_ctrl_ram_0_4095", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_ram_rd_table_ram_0_8191_reg[] = { + { "rd_table_ram_0_8191", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_dtb_dtb_ram_dtb_cmd_man_ram_0_16383_reg[] = { + { "dtb_cmd_man_ram_0_16383", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_port_cpu_trpg_ms_st_reg[] = { + { "cpu_trpgrx_ms_st", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_port_cpu_trpg_ms_ind_reg[] = { + { "cpu_trpgrx_ms_ind", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_port_cpu_trpg_ms_slave_ind_reg[] = { + { "cpu_trpgrx_ms_slave_ind", DPP_FIELD_FLAG_RW, 3, 4, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_port_cpu_trpgrx_up_water_level_reg[] = { + { "cpu_trpgrx_up_water_level", DPP_FIELD_FLAG_RW, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_rx_port_cpu_trpgrx_low_water_level_reg[] = { + { "cpu_trpgrx_low_water_level", DPP_FIELD_FLAG_RW, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_port_cpu_trpg_ms_st_reg[] = { + { "cpu_trpgtx_ms_st", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_port_cpu_trpg_ms_ind_reg[] = { + { "cpu_trpgtx_ms_ind", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_port_cpu_trpg_ms_slave_ind_reg[] = { + { "cpu_trpgtx_ms_slave_ind", DPP_FIELD_FLAG_RW, 3, 4, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_glb_cpu_todtime_update_int_event_reg[] = { + { "cpu_todtime_update_int_event", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_glb_cpu_todtime_update_int_test_reg[] = { + { "cpu_todtime_update_int_test", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_glb_cpu_todtime_update_int_addr_reg[] = { + { "cpu_todtime_update_int_addr", DPP_FIELD_FLAG_RO, 10, 11, 0x0, 0x0 }, +}; +DPP_FIELD_T g_trpg_trpg_tx_todtime_ram_trpg_tx_todtime_ram_reg[] = { + { "trpg_tx_todtime_ram", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_tsn_tsn_port_cfg_tsn_test_reg_reg[] = { + { "cfg_tsn_test_reg", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_tsn_tsn_port_cfg_tsn_port_qbv_enable_reg[] = { + { "cfg_tsn_port_qbv_enable", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_tsn_tsn_port_cfg_tsn_phy_port_sel_reg[] = { + { "cfg_tsn_phy_port_sel", DPP_FIELD_FLAG_RW, 3, 4, 0xf, 0x0 }, +}; +DPP_FIELD_T g_tsn_tsn_port_cfg_tsn_port_time_sel_reg[] = { + { "cfg_tsn_port_time_sel", DPP_FIELD_FLAG_RW, 1, 2, 0x0, 0x0 }, +}; +DPP_FIELD_T g_tsn_tsn_port_cfg_tsn_clk_freq_reg[] = { + { "en", DPP_FIELD_FLAG_RW, 31, 1, 0x0, 0x0 }, + { "cfg_tsn_clk_freq", DPP_FIELD_FLAG_RW, 30, 31, 0x64, 0x0 }, +}; +DPP_FIELD_T g_tsn_tsn_port_cfg_tsn_read_ram_n_reg[] = { + { "cfg_tsn_data", DPP_FIELD_FLAG_RO, 15, 8, 0x0, 0x0 }, + { "cfg_tsn_read_status", DPP_FIELD_FLAG_RO, 5, 4, 0x0, 0x0 }, + { "cfg_tsn_read_ram_n", DPP_FIELD_FLAG_RO, 1, 2, 0x0, 0x0 }, +}; +DPP_FIELD_T g_tsn_tsn_port_cfg_tsn_exe_time_reg[] = { + { "cfg_tsn_exe_time", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_tsn_tsn_port_cfg_tsn_port_itr_shift_reg[] = { + { "cfg_tsn_port_itr_shift", DPP_FIELD_FLAG_RW, 3, 4, 0x0, 0x0 }, +}; +DPP_FIELD_T g_tsn_tsn_port_cfg_tsn_port_base_time_h_reg[] = { + { "cfg_tsn_port_base_time_h", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_tsn_tsn_port_cfg_tsn_port_base_time_l_reg[] = { + { "cfg_tsn_port_base_time_l", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_tsn_tsn_port_cfg_tsn_port_cycle_time_h_reg[] = { + { "cfg_tsn_port_cycle_time_h", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_tsn_tsn_port_cfg_tsn_port_cycle_time_l_reg[] = { + { "cfg_tsn_port_cycle_time_l", DPP_FIELD_FLAG_RW, 19, 20, 0x0, 0x0 }, +}; +DPP_FIELD_T g_tsn_tsn_port_cfg_tsn_port_guard_band_time_reg[] = { + { "cfg_tsn_port_guard_band_time", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_tsn_tsn_port_cfg_tsn_port_default_gate_en_reg[] = { + { "cfg_tsn_port_default_gate_en", DPP_FIELD_FLAG_RW, 7, 8, 0xff, 0x0 }, +}; +DPP_FIELD_T g_tsn_tsn_port_cfg_tsn_port_change_gate_en_reg[] = { + { "cfg_tsn_port_change_gate_en", DPP_FIELD_FLAG_RW, 7, 8, 0xff, 0x0 }, +}; +DPP_FIELD_T g_tsn_tsn_port_cfg_tsn_port_init_finish_reg[] = { + { "cfg_tsn_port_init_finish", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_tsn_tsn_port_cfg_tsn_port_change_en_reg[] = { + { "cfg_tsn_port_change_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_tsn_tsn_port_cfg_tsn_port_gcl_num0_reg[] = { + { "cfg_tsn_port_gcl_num0", DPP_FIELD_FLAG_RW, 8, 9, 0x0, 0x0 }, +}; +DPP_FIELD_T g_tsn_tsn_port_cfg_tsn_port_gcl_num1_reg[] = { + { "cfg_tsn_port_gcl_num1", DPP_FIELD_FLAG_RW, 8, 9, 0x0, 0x0 }, +}; +DPP_FIELD_T g_tsn_tsn_port_cfg_tsn_port_gcl_value0_reg[] = { + { "cfg_tsn_port_gcl_gate_control0", DPP_FIELD_FLAG_RW, 31, 8, 0x0, + 0x0 }, + { "cfg_tsn_port_gcl_interval_time0", DPP_FIELD_FLAG_RW, 23, 24, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_tsn_tsn_port_cfg_tsn_port_gcl_value1_reg[] = { + { "cfg_tsn_port_gcl_gate_control1", DPP_FIELD_FLAG_RW, 31, 8, 0x0, + 0x0 }, + { "cfg_tsn_port_gcl_interval_time1", DPP_FIELD_FLAG_RW, 23, 24, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_axi_axi_conv_cfg_epid_v_func_num_reg[] = { + { "user_en", DPP_FIELD_FLAG_RW, 31, 1, 0x0, 0x0 }, + { "cfg_epid", DPP_FIELD_FLAG_RW, 27, 4, 0x0, 0x0 }, + { "cfg_vfunc_num", DPP_FIELD_FLAG_RW, 23, 8, 0x0, 0x0 }, + { "cfg_func_num", DPP_FIELD_FLAG_RW, 7, 3, 0x0, 0x0 }, + { "cfg_vfunc_active", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_axi_axi_conv_info_axim_rw_hsk_cnt_reg[] = { + { "axim_rd_handshake_cnt", DPP_FIELD_FLAG_RO, 24, 9, 0x0, 0x0 }, + { "axim_wr_handshake_cnt", DPP_FIELD_FLAG_RO, 8, 9, 0x0, 0x0 }, +}; +DPP_FIELD_T g_axi_axi_conv_info_axim_last_wr_id_reg[] = { + { "axim_rd_id", DPP_FIELD_FLAG_RO, 24, 9, 0x0, 0x0 }, + { "axim_wr_id", DPP_FIELD_FLAG_RO, 8, 9, 0x0, 0x0 }, +}; +DPP_FIELD_T g_axi_axi_conv_info_axim_last_wr_addr_h_reg[] = { + { "aximlastwraddrhigh", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_axi_axi_conv_info_axim_last_wr_addr_l_reg[] = { + { "aximlastrdaddrlow", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_axi_axi_conv_cfg_debug_info_clr_en_reg[] = { + { "cfg_global_clr_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_pp1s_interrupt_reg[] = { + { "int_state", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "int_test", DPP_FIELD_FLAG_RW, 2, 1, 0x0, 0x0 }, + { "int_clr", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "int_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_pp1s_external_select_reg[] = { + { "pp1s_external_select", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_pp1s_out_select_reg[] = { + { "pp1s_out_sel", DPP_FIELD_FLAG_RW, 1, 2, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_test_pp1s_select_reg[] = { + { "test_pp1s_sel", DPP_FIELD_FLAG_RW, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_local_pp1s_en_reg[] = { + { "local_pp1s_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_local_pp1s_adjust_reg[] = { + { "local_pp1s_adjust_sel", DPP_FIELD_FLAG_RW, 2, 2, 0x0, 0x0 }, + { "local_pp1s_adjust_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_local_pp1s_adjust_value_reg[] = { + { "local_pp1s_adjust_value", DPP_FIELD_FLAG_RW, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_pp1s_to_np_select_reg[] = { + { "pp1s_to_np_sel", DPP_FIELD_FLAG_RW, 1, 2, 0x3, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_pd_u1_sel_reg[] = { + { "pd_u1_sel1", DPP_FIELD_FLAG_RW, 5, 3, 0x0, 0x0 }, + { "pd_u1_sel0", DPP_FIELD_FLAG_RW, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_pd_u1_pd0_shift_reg[] = { + { "pd_u1_pd0_shift", DPP_FIELD_FLAG_RW, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_pd_u1_pd1_shift_reg[] = { + { "pd_u1_pd1_shift", DPP_FIELD_FLAG_RW, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_pd_u1_result_reg[] = { + { "pd_u1_result_sign", DPP_FIELD_FLAG_RO, 30, 1, 0x0, 0x0 }, + { "pd_u1_overflow", DPP_FIELD_FLAG_RO, 29, 1, 0x0, 0x0 }, + { "pd_u1_result", DPP_FIELD_FLAG_RO, 28, 29, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_pd_u2_sel_reg[] = { + { "pd_u2_sel1", DPP_FIELD_FLAG_RW, 5, 3, 0x0, 0x0 }, + { "pd_u2_sel0", DPP_FIELD_FLAG_RW, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_pd_u2_pd0_shift_reg[] = { + { "pd_u2_pd0_shift", DPP_FIELD_FLAG_RW, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_pd_u2_pd1_shift_reg[] = { + { "pd_u2_pd1_shift", DPP_FIELD_FLAG_RW, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_pd_u2_result_reg[] = { + { "pd_u2_result_sign", DPP_FIELD_FLAG_RO, 30, 1, 0x0, 0x0 }, + { "pd_u2_overflow", DPP_FIELD_FLAG_RO, 29, 1, 0x0, 0x0 }, + { "pd_u2_result", DPP_FIELD_FLAG_RO, 28, 29, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_tsn_group_nanosecond_delay0_reg[] = { + { "tsn_group_nanosecond_delay0", DPP_FIELD_FLAG_RW, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_tsn_group_fracnanosecond_delay0_reg[] = { + { "tsn_group_fracnanosecond_delay0", DPP_FIELD_FLAG_RW, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_tsn_group_nanosecond_delay1_reg[] = { + { "tsn_group_nanosecond_delay1", DPP_FIELD_FLAG_RW, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_tsn_group_fracnanosecond_delay1_reg[] = { + { "tsn_group_fracnanosecond_delay1", DPP_FIELD_FLAG_RW, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_tsn_group_nanosecond_delay2_reg[] = { + { "tsn_group_nanosecond_delay2", DPP_FIELD_FLAG_RW, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_tsn_group_fracnanosecond_delay2_reg[] = { + { "tsn_group_fracnanosecond_delay2", DPP_FIELD_FLAG_RW, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_tsn_group_nanosecond_delay3_reg[] = { + { "tsn_group_nanosecond_delay3", DPP_FIELD_FLAG_RW, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_tsn_group_fracnanosecond_delay3_reg[] = { + { "tsn_group_fracnanosecond_delay3", DPP_FIELD_FLAG_RW, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_tsn_ptp1588_rdma_nanosecond_delay_reg[] = { + { "ptp1588_rdma_nanosecond_delay", DPP_FIELD_FLAG_RW, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_ptp1588_rdma_fracnanosecond_delay_reg[] = { + { "ptp1588_rdma_fracnanosecond_delay", DPP_FIELD_FLAG_RW, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_ptp1588_np_nanosecond_delay_reg[] = { + { "ptp1588_np_nanosecond_delay", DPP_FIELD_FLAG_RW, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_ptp1588_np_fracnanosecond_delay_reg[] = { + { "ptp1588_np_fracnanosecond_delay", DPP_FIELD_FLAG_RW, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptp_top_time_sync_period_reg[] = { + { "time_sync_period", DPP_FIELD_FLAG_RW, 2, 3, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_module_id_reg[] = { + { "module_id", DPP_FIELD_FLAG_RO, 15, 16, 0x89, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_module_version_reg[] = { + { "module_major_version", DPP_FIELD_FLAG_RO, 15, 8, 0x3, 0x0 }, + { "module_minor_version", DPP_FIELD_FLAG_RO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_module_date_reg[] = { + { "year", DPP_FIELD_FLAG_RO, 31, 16, 0x2017, 0x0 }, + { "month", DPP_FIELD_FLAG_RO, 15, 8, 0x1, 0x0 }, + { "date", DPP_FIELD_FLAG_RO, 7, 8, 0x1, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_interrupt_status_reg[] = { + { "pps_in_status", DPP_FIELD_FLAG_RO, 4, 1, 0x0, 0x0 }, + { "fifo_almost_full_status", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "fifo_no_empty_status", DPP_FIELD_FLAG_RO, 2, 1, 0x0, 0x0 }, + { "trigger_output_status", DPP_FIELD_FLAG_RO, 1, 1, 0x0, 0x0 }, + { "trigger_input_status", DPP_FIELD_FLAG_RO, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_interrupt_event_reg[] = { + { "pps_in_event", DPP_FIELD_FLAG_RW, 4, 1, 0x0, 0x0 }, + { "fifo_almost_full_event", DPP_FIELD_FLAG_RW, 3, 1, 0x0, 0x0 }, + { "fifo_no_empty_event", DPP_FIELD_FLAG_RW, 2, 1, 0x0, 0x0 }, + { "trigger_output_event", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "trigger_input_event", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_interrupt_mask_reg[] = { + { "pps_in_event_mask", DPP_FIELD_FLAG_RW, 4, 1, 0x0, 0x0 }, + { "fifo_almost_full_event_mask", DPP_FIELD_FLAG_RW, 3, 1, 0x0, 0x0 }, + { "fifo_no_empty_event_mask", DPP_FIELD_FLAG_RW, 2, 1, 0x0, 0x0 }, + { "trigger_output_event_mask", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "trigger_input_eventt_mask", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_interrupt_test_reg[] = { + { "trigger_pps_in_event_test", DPP_FIELD_FLAG_RW, 4, 1, 0x0, 0x0 }, + { "trigger_fifo_almost_full_event_test", DPP_FIELD_FLAG_RW, 3, 1, 0x0, + 0x0 }, + { "trigger_fifo_no_empty_event_test", DPP_FIELD_FLAG_RW, 2, 1, 0x0, + 0x0 }, + { "trigger_output_event_test", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "trigger_input_event_test", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_hw_clock_cycle_integer_reg[] = { + { "integeral_nanosecond_of_hw_clock_cycle", DPP_FIELD_FLAG_RW, 7, 8, + 0x1, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_hw_clock_cycle_fraction_reg[] = { + { "fractional_nanosecond_of_hw_clock_cycle", DPP_FIELD_FLAG_RW, 31, 32, + 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_ptp_clock_cycle_integer_reg[] = { + { "integeral_nanosecond_of_ptp_clock_cycle", DPP_FIELD_FLAG_RW, 7, 8, + 0x1, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_ptp_clock_cycle_fraction_reg[] = { + { "fractional_nanosecond_of_ptp_clock_cycle", DPP_FIELD_FLAG_RW, 31, 32, + 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_ptp_configuration_reg[] = { + { "trig_oe", DPP_FIELD_FLAG_RW, 18, 1, 0x0, 0x0 }, + { "hw_time_update_en", DPP_FIELD_FLAG_RW, 17, 1, 0x0, 0x0 }, + { "ptp1588_tod_time_update_en", DPP_FIELD_FLAG_RW, 16, 1, 0x0, 0x0 }, + { "timer_enable", DPP_FIELD_FLAG_RW, 15, 1, 0x0, 0x0 }, + { "pps_output_enable", DPP_FIELD_FLAG_RW, 14, 1, 0x1, 0x0 }, + { "pp1_output_enable", DPP_FIELD_FLAG_RW, 13, 1, 0x0, 0x0 }, + { "pp2_output_enable", DPP_FIELD_FLAG_RW, 12, 1, 0x0, 0x0 }, + { "enable_writing_timestamps_to_the_fifo", DPP_FIELD_FLAG_RW, 11, 1, + 0x0, 0x0 }, + { "l2s_time_output_select", DPP_FIELD_FLAG_RW, 10, 1, 0x0, 0x0 }, + { "reserved_9", DPP_FIELD_FLAG_RO, 9, 1, 0x0, 0x0 }, + { "pps_input_select", DPP_FIELD_FLAG_RW, 8, 1, 0x0, 0x0 }, + { "pp_output_select", DPP_FIELD_FLAG_RW, 7, 1, 0x0, 0x0 }, + { "reserved_6", DPP_FIELD_FLAG_RO, 6, 1, 0x0, 0x0 }, + { "timer_run_mode", DPP_FIELD_FLAG_RW, 5, 2, 0x0, 0x0 }, + { "update_command_select", DPP_FIELD_FLAG_RW, 3, 1, 0x0, 0x0 }, + { "trigger_out_enable", DPP_FIELD_FLAG_RW, 2, 1, 0x0, 0x0 }, + { "trigger_in_enable", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "timer_capture_slave_mode", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_timer_control_reg[] = { + { "ptpmoutputsynchroningstate", DPP_FIELD_FLAG_RO, 3, 1, 0x0, 0x0 }, + { "ptp1588_fifo_read_command", DPP_FIELD_FLAG_RW, 2, 1, 0x0, 0x0 }, + { "adjust_the_timer", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_pps_income_delay_reg[] = { + { "pps_income_delay_nanosecond", DPP_FIELD_FLAG_RW, 31, 16, 0x0, 0x0 }, + { "pps_income_delay_frac_nanosecond", DPP_FIELD_FLAG_RW, 15, 16, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_clock_cycle_update_reg[] = { + { "tsn3_clock_cycle_update_enable", DPP_FIELD_FLAG_RW, 4, 1, 0x0, 0x0 }, + { "tsn2_clock_cycle_update_enable", DPP_FIELD_FLAG_RW, 3, 1, 0x0, 0x0 }, + { "tsn1_clock_cycle_update_enable", DPP_FIELD_FLAG_RW, 2, 1, 0x0, 0x0 }, + { "tsn0_clock_cycle_update_enable", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "ptp1588_clock_cycle_update_enable", DPP_FIELD_FLAG_RW, 0, 1, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_cycle_time_of_output_period_pulse_1_reg[] = { + { "clock_number_of_output_period_pulse_1", DPP_FIELD_FLAG_RW, 31, 32, + 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_cycle_time_of_output_period_pulse_2_reg[] = { + { "clock_number_of_output_period_pulse_2", DPP_FIELD_FLAG_RW, 31, 32, + 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_timer_latch_en_reg[] = { + { "latch_the_timer_en", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_timer_latch_sel_reg[] = { + { "timer_latch_sel", DPP_FIELD_FLAG_RW, 5, 6, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_trigger_in_tod_nanosecond_reg[] = { + { "trigger_in_tod_nanosecond", DPP_FIELD_FLAG_RO, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_trigger_in_lower_tod_second_reg[] = { + { "trigger_in_lower_tod_second", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_trigger_in_high_tod_second_reg[] = { + { "trigger_in_high_tod_second", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_trigger_in_fracnanosecond_reg[] = { + { "trigger_in_fracnanosecond", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_trigger_in_hardware_time_low_reg[] = { + { "trigger_in_hardware_time_low", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_trigger_in_hardware_time_high_reg[] = { + { "trigger_in_hardware_time_high", DPP_FIELD_FLAG_RO, 15, 16, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_trigger_out_tod_nanosecond_reg[] = { + { "trigger_out_tod_nanosecond", DPP_FIELD_FLAG_RW, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_trigger_out_lower_tod_second_reg[] = { + { "trigger_out_lower_tod_second", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_trigger_out_high_tod_second_reg[] = { + { "trigger_out_high_tod_second", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_trigger_out_hardware_time_low_reg[] = { + { "trigger_out_hardware_time_low", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_trigger_out_hardware_time_high_reg[] = { + { "trigger_out_hardware_time_high", DPP_FIELD_FLAG_RO, 15, 16, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_adjust_tod_nanosecond_reg[] = { + { "adjust_tod_nanosecond", DPP_FIELD_FLAG_RW, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_adjust_lower_tod_second_reg[] = { + { "adjust_lower_tod_second", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_adjust_high_tod_second_reg[] = { + { "adjust_high_tod_second", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_adjust_fracnanosecond_reg[] = { + { "adjust_fracnanosecond", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_adjust_hardware_time_low_reg[] = { + { "adjust_hardware_time_low", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_adjust_hardware_time_high_reg[] = { + { "adjust_hardware_time_high", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_latch_tod_nanosecond_reg[] = { + { "latch_tod_nanosecond", DPP_FIELD_FLAG_RO, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_latch_lower_tod_second_reg[] = { + { "latch_lower_tod_second", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_latch_high_tod_second_reg[] = { + { "latch_high_tod_second", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_latch_fracnanosecond_reg[] = { + { "latch_fracnanosecond", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_latch_hardware_time_low_reg[] = { + { "latch_hardware_time_low", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_latch_hardware_time_high_reg[] = { + { "latch_hardware_time_high", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_real_tod_nanosecond_reg[] = { + { "real_tod_nanosecond", DPP_FIELD_FLAG_RO, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_real_lower_tod_second_reg[] = { + { "real_lower_tod_second", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_real_high_tod_second_reg[] = { + { "real_high_tod_second", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_real_hardware_time_low_reg[] = { + { "real_hardware_time_low", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_real_hardware_time_high_reg[] = { + { "real_hardware_time_high", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_ptp1588_event_message_port_reg[] = { + { "ptp1588_event_message_port", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_ptp1588_event_message_timestamp_low_reg[] = { + { "ptp1588_event_message_timestamp_low", DPP_FIELD_FLAG_RO, 31, 32, 0x0, + 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_ptp1588_event_message_timestamp_high_reg[] = { + { "ptp1588_event_message_timestamp_high", DPP_FIELD_FLAG_RO, 31, 32, + 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_ptp1588_event_message_fifo_status_reg[] = { + { "fifo_full", DPP_FIELD_FLAG_RO, 9, 1, 0x0, 0x0 }, + { "fifo_empty", DPP_FIELD_FLAG_RO, 8, 1, 0x1, 0x0 }, + { "timestamps_count", DPP_FIELD_FLAG_RO, 7, 8, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_pp1s_latch_tod_nanosecond_reg[] = { + { "latch_1588tod_nanosecond", DPP_FIELD_FLAG_RO, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_pp1s_latch_lower_tod_second_reg[] = { + { "latch_lower_1588tod_second", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_pp1s_latch_high_tod_second_reg[] = { + { "latch_high_1588tod_second", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_pp1s_latch_fracnanosecond_reg[] = { + { "latch_1588fracnanosecond", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn_time_configuration_reg[] = { + { "tsn_pps_enable", DPP_FIELD_FLAG_RW, 19, 4, 0x0, 0x0 }, + { "tsn_timer_enable", DPP_FIELD_FLAG_RW, 15, 4, 0x0, 0x0 }, + { "tsn_timer_run_mode", DPP_FIELD_FLAG_RW, 11, 8, 0x0, 0x0 }, + { "timer_capture_slave_mode", DPP_FIELD_FLAG_RW, 3, 4, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn_timer_control_reg[] = { + { "adjust_the_tsn3_timer", DPP_FIELD_FLAG_RW, 3, 1, 0x0, 0x0 }, + { "adjust_the_tsn2_timer", DPP_FIELD_FLAG_RW, 2, 1, 0x0, 0x0 }, + { "adjust_the_tsn1_timer", DPP_FIELD_FLAG_RW, 1, 1, 0x0, 0x0 }, + { "adjust_the_tsn0_timer", DPP_FIELD_FLAG_RW, 0, 1, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn0_clock_cycle_integer_reg[] = { + { "integeral_nanosecond_of_tsn0_clock_cycle", DPP_FIELD_FLAG_RW, 7, 8, + 0x1, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn0_clock_cycle_fraction_reg[] = { + { "fractional_nanosecond_of_tsn0_clock_cycle", DPP_FIELD_FLAG_RW, 31, + 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn1_clock_cycle_integer_reg[] = { + { "integeral_nanosecond_of_tsn1_clock_cycle", DPP_FIELD_FLAG_RW, 7, 8, + 0x1, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn1_clock_cycle_fraction_reg[] = { + { "fractional_nanosecond_of_tsn1_clock_cycle", DPP_FIELD_FLAG_RW, 31, + 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn2_clock_cycle_integer_reg[] = { + { "integeral_nanosecond_of_tsn2_clock_cycle", DPP_FIELD_FLAG_RW, 7, 8, + 0x1, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn2_clock_cycle_fraction_reg[] = { + { "fractional_nanosecond_of_tsn2_clock_cycle", DPP_FIELD_FLAG_RW, 31, + 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn3_clock_cycle_integer_reg[] = { + { "integeral_nanosecond_of_tsn3_clock_cycle", DPP_FIELD_FLAG_RW, 7, 8, + 0x1, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn3_clock_cycle_fraction_reg[] = { + { "fractional_nanosecond_of_tsn3_clock_cycle", DPP_FIELD_FLAG_RW, 31, + 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn0_adjust_tod_nanosecond_reg[] = { + { "tsn0_adjust_tod_nanosecond", DPP_FIELD_FLAG_RW, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn0_adjust_lower_tod_second_reg[] = { + { "tsn0_adjust_lower_tod_second", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn0_adjust_high_tod_second_reg[] = { + { "tsn0_adjust_high_tod_second", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn0_adjust_fracnanosecond_reg[] = { + { "tsn0_adjust_fracnanosecond", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn1_adjust_tod_nanosecond_reg[] = { + { "tsn1_adjust_tod_nanosecond", DPP_FIELD_FLAG_RW, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn1_adjust_lower_tod_second_reg[] = { + { "tsn1_adjust_lower_tod_second", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn1_adjust_high_tod_second_reg[] = { + { "tsn1_adjust_high_tod_second", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn1_adjust_fracnanosecond_reg[] = { + { "tsn1_adjust_fracnanosecond", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn2_adjust_tod_nanosecond_reg[] = { + { "tsn2_adjust_tod_nanosecond", DPP_FIELD_FLAG_RW, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn2_adjust_lower_tod_second_reg[] = { + { "tsn2_adjust_lower_tod_second", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn2_adjust_high_tod_second_reg[] = { + { "tsn2_adjust_high_tod_second", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn2_adjust_fracnanosecond_reg[] = { + { "tsn2_adjust_fracnanosecond", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn3_adjust_tod_nanosecond_reg[] = { + { "tsn3_adjust_tod_nanosecond", DPP_FIELD_FLAG_RW, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn3_adjust_lower_tod_second_reg[] = { + { "tsn3_adjust_lower_tod_second", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn3_adjust_high_tod_second_reg[] = { + { "tsn3_adjust_high_tod_second", DPP_FIELD_FLAG_RW, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn3_adjust_fracnanosecond_reg[] = { + { "tsn3_adjust_fracnanosecond", DPP_FIELD_FLAG_RW, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn0_latch_tod_nanosecond_reg[] = { + { "tsn0_latch_tod_nanosecond", DPP_FIELD_FLAG_RO, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn0_latch_lower_tod_second_reg[] = { + { "tsn0_latch_lower_tod_second", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn0_latch_high_tod_second_reg[] = { + { "tsn0_latch_high_tod_second", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn0_latch_fracnanosecond_reg[] = { + { "tsn0_latch_fracnanosecond", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn1_latch_tod_nanosecond_reg[] = { + { "tsn1_latch_tod_nanosecond", DPP_FIELD_FLAG_RO, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn1_latch_lower_tod_second_reg[] = { + { "tsn1_latch_lower_tod_second", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn1_latch_high_tod_second_reg[] = { + { "tsn1_latch_high_tod_second", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn1_latch_fracnanosecond_reg[] = { + { "tsn1_latch_fracnanosecond", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn2_latch_tod_nanosecond_reg[] = { + { "tsn2_latch_tod_nanosecond", DPP_FIELD_FLAG_RO, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn2_latch_lower_tod_second_reg[] = { + { "tsn2_latch_lower_tod_second", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn2_latch_high_tod_second_reg[] = { + { "tsn2_latch_high_tod_second", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn2_latch_fracnanosecond_reg[] = { + { "tsn2_latch_fracnanosecond", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn3_latch_tod_nanosecond_reg[] = { + { "tsn3_latch_tod_nanosecond", DPP_FIELD_FLAG_RO, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn3_latch_lower_tod_second_reg[] = { + { "tsn3_latch_lower_tod_second", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn3_latch_high_tod_second_reg[] = { + { "tsn3_latch_high_tod_second", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn3_latch_fracnanosecond_reg[] = { + { "tsn3_latch_fracnanosecond", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_pp1s_latch_tsn0_tod_nanosecond_reg[] = { + { "latch_tsn0_tod_nanosecond", DPP_FIELD_FLAG_RO, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_pp1s_latch_tsn0_lower_tod_second_reg[] = { + { "latch_lower_tsn0_tod_second", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_pp1s_latch_tsn0_high_tod_second_reg[] = { + { "latch_high_tsn0_tod_second", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_pp1s_latch_tsn0_fracnanosecond_reg[] = { + { "latch_tsn0_fracnanosecond", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_pp1s_latch_tsn1_tod_nanosecond_reg[] = { + { "latch_tsn1_tod_nanosecond", DPP_FIELD_FLAG_RO, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_pp1s_latch_tsn1_lower_tod_second_reg[] = { + { "latch_lower_tsn1_tod_second", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_pp1s_latch_tsn1_high_tod_second_reg[] = { + { "latch_high_tsn1_tod_second", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_pp1s_latch_tsn1_fracnanosecond_reg[] = { + { "latch_tsn1_fracnanosecond", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_pp1s_latch_tsn2_tod_nanosecond_reg[] = { + { "latch_tsn2_tod_nanosecond", DPP_FIELD_FLAG_RO, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_pp1s_latch_tsn2_lower_tod_second_reg[] = { + { "latch_lower_tsn2_tod_second", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_pp1s_latch_tsn2_high_tod_second_reg[] = { + { "latch_high_tsn2_tod_second", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_pp1s_latch_tsn2_fracnanosecond_reg[] = { + { "latch_tsn2_fracnanosecond", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_pp1s_latch_tsn3_tod_nanosecond_reg[] = { + { "latch_tsn3_tod_nanosecond", DPP_FIELD_FLAG_RO, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_pp1s_latch_tsn3_lower_tod_second_reg[] = { + { "latch_lower_tsn3_tod_second", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_pp1s_latch_tsn3_high_tod_second_reg[] = { + { "latch_high_tsn3_tod_second", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_pp1s_latch_tsn3_fracnanosecond_reg[] = { + { "latch_tsn3_fracnanosecond", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn0_real_tod_nanosecond_reg[] = { + { "tsn0_real_tod_nanosecond", DPP_FIELD_FLAG_RO, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn0_real_lower_tod_second_reg[] = { + { "tsn0_real_lower_tod_second", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn0_real_high_tod_second_reg[] = { + { "tsn0_real_high_tod_second", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn1_real_tod_nanosecond_reg[] = { + { "tsn1_real_tod_nanosecond", DPP_FIELD_FLAG_RO, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn1_real_lower_tod_second_reg[] = { + { "tsn1_real_lower_tod_second", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn1_real_high_tod_second_reg[] = { + { "tsn1_real_high_tod_second", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn2_real_tod_nanosecond_reg[] = { + { "tsn2_real_tod_nanosecond", DPP_FIELD_FLAG_RO, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn2_real_lower_tod_second_reg[] = { + { "tsn2_real_lower_tod_second", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn2_real_high_tod_second_reg[] = { + { "tsn2_real_high_tod_second", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn3_real_tod_nanosecond_reg[] = { + { "tsn3_real_tod_nanosecond", DPP_FIELD_FLAG_RO, 29, 30, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn3_real_lower_tod_second_reg[] = { + { "tsn3_real_lower_tod_second", DPP_FIELD_FLAG_RO, 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_tsn3_real_high_tod_second_reg[] = { + { "tsn3_real_high_tod_second", DPP_FIELD_FLAG_RO, 15, 16, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_real_ptp_clock_cycle_integer_reg[] = { + { "integeral_nanosecond_of_real_ptp_clock_cycle", DPP_FIELD_FLAG_RO, 7, + 8, 0x1, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_real_ptp_clock_cycle_fraction_reg[] = { + { "fractional_nanosecond_of_real_ptp_clock_cycle", DPP_FIELD_FLAG_RO, + 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_real_tsn0_clock_cycle_integer_reg[] = { + { "integeral_nanosecond_of_real_tsn0_clock_cycle", DPP_FIELD_FLAG_RO, 7, + 8, 0x1, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_real_tsn0_clock_cycle_fraction_reg[] = { + { "fractional_nanosecond_of_real_tsn0_clock_cycle", DPP_FIELD_FLAG_RO, + 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_real_tsn1_clock_cycle_integer_reg[] = { + { "integeral_nanosecond_of_real_tsn1_clock_cycle", DPP_FIELD_FLAG_RO, 7, + 8, 0x1, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_real_tsn1_clock_cycle_fraction_reg[] = { + { "fractional_nanosecond_of_real_tsn1_clock_cycle", DPP_FIELD_FLAG_RO, + 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_real_tsn2_clock_cycle_integer_reg[] = { + { "integeral_nanosecond_of_real_tsn2_clock_cycle", DPP_FIELD_FLAG_RO, 7, + 8, 0x1, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_real_tsn2_clock_cycle_fraction_reg[] = { + { "fractional_nanosecond_of_real_tsn2_clock_cycle", DPP_FIELD_FLAG_RO, + 31, 32, 0x0, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_real_tsn3_clock_cycle_integer_reg[] = { + { "integeral_nanosecond_of_real_tsn3_clock_cycle", DPP_FIELD_FLAG_RO, 7, + 8, 0x1, 0x0 }, +}; +DPP_FIELD_T g_ptptm_ptptm_real_tsn3_clock_cycle_fraction_reg[] = { + { "fractional_nanosecond_of_real_tsn3_clock_cycle", DPP_FIELD_FLAG_RO, + 31, 32, 0x0, 0x0 }, +}; +DPP_REG_T g_dpp_reg_info[] = { + { + "cpu_check_reg", + ETM_CFGMT_CPU_CHECK_REGr, + ETM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + 0x60, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cfgmt_cpu_check_reg_reg, + NULL, + NULL, + }, + { + "cfgmt_blksize", + ETM_CFGMT_CFGMT_BLKSIZEr, + ETM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + 0x70, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cfgmt_cfgmt_blksize_reg, + NULL, + NULL, + }, + { + "reg_int_state_reg", + ETM_CFGMT_REG_INT_STATE_REGr, + ETM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + 0x90, + (32 / 8), + 0, + 0, + 0, + 0, + 7, + g_etm_cfgmt_reg_int_state_reg_reg, + NULL, + NULL, + }, + { + "reg_int_mask_reg", + ETM_CFGMT_REG_INT_MASK_REGr, + ETM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + 0xa0, + (32 / 8), + 0, + 0, + 0, + 0, + 7, + g_etm_cfgmt_reg_int_mask_reg_reg, + NULL, + NULL, + }, + { + "timeout_limit", + ETM_CFGMT_TIMEOUT_LIMITr, + ETM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + 0xb0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cfgmt_timeout_limit_reg, + NULL, + NULL, + }, + { + "subsystem_rdy_reg", + ETM_CFGMT_SUBSYSTEM_RDY_REGr, + ETM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + 0xc0, + (32 / 8), + 0, + 0, + 0, + 0, + 6, + g_etm_cfgmt_subsystem_rdy_reg_reg, + NULL, + NULL, + }, + { + "subsystem_en_reg", + ETM_CFGMT_SUBSYSTEM_EN_REGr, + ETM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + 0xd0, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_cfgmt_subsystem_en_reg_reg, + NULL, + NULL, + }, + { + "cfgmt_int_reg", + ETM_CFGMT_CFGMT_INT_REGr, + ETM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + 0xe0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cfgmt_cfgmt_int_reg_reg, + NULL, + NULL, + }, + { + "qmu_work_mode", + ETM_CFGMT_QMU_WORK_MODEr, + ETM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + 0x100, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cfgmt_qmu_work_mode_reg, + NULL, + NULL, + }, + { + "cfgmt_ddr_attach", + ETM_CFGMT_CFGMT_DDR_ATTACHr, + ETM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + 0x120, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cfgmt_cfgmt_ddr_attach_reg, + NULL, + NULL, + }, + { + "cnt_mode_reg", + ETM_CFGMT_CNT_MODE_REGr, + ETM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + 0x140, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_etm_cfgmt_cnt_mode_reg_reg, + NULL, + NULL, + }, + { + "clkgate_en", + ETM_CFGMT_CLKGATE_ENr, + ETM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + 0x1c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cfgmt_clkgate_en_reg, + NULL, + NULL, + }, + { + "softrst_en", + ETM_CFGMT_SOFTRST_ENr, + ETM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + 0x1d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cfgmt_softrst_en_reg, + NULL, + NULL, + }, + { + "imem_prog_full", + ETM_OLIF_IMEM_PROG_FULLr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_olif_imem_prog_full_reg, + NULL, + NULL, + }, + { + "qmu_para_prog_full", + ETM_OLIF_QMU_PARA_PROG_FULLr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x4, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_olif_qmu_para_prog_full_reg, + NULL, + NULL, + }, + { + "olif_int_mask", + ETM_OLIF_OLIF_INT_MASKr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x8, + (32 / 8), + 0, + 0, + 0, + 0, + 19, + g_etm_olif_olif_int_mask_reg, + NULL, + NULL, + }, + { + "itmhram_parity_err_2_int", + ETM_OLIF_ITMHRAM_PARITY_ERR_2_INTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x9, + (32 / 8), + 0, + 0, + 0, + 0, + 22, + g_etm_olif_itmhram_parity_err_2_int_reg, + NULL, + NULL, + }, + { + "lif0_port_rdy_mask_h", + ETM_OLIF_LIF0_PORT_RDY_MASK_Hr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x30, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_lif0_port_rdy_mask_h_reg, + NULL, + NULL, + }, + { + "lif0_port_rdy_mask_l", + ETM_OLIF_LIF0_PORT_RDY_MASK_Lr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x31, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_lif0_port_rdy_mask_l_reg, + NULL, + NULL, + }, + { + "lif0_port_rdy_cfg_h", + ETM_OLIF_LIF0_PORT_RDY_CFG_Hr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x32, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_lif0_port_rdy_cfg_h_reg, + NULL, + NULL, + }, + { + "lif0_port_rdy_cfg_l", + ETM_OLIF_LIF0_PORT_RDY_CFG_Lr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x33, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_lif0_port_rdy_cfg_l_reg, + NULL, + NULL, + }, + { + "lif0_link_rdy_mask_cfg", + ETM_OLIF_LIF0_LINK_RDY_MASK_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x34, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_olif_lif0_link_rdy_mask_cfg_reg, + NULL, + NULL, + }, + { + "tm_lif_stat_cfg", + ETM_OLIF_TM_LIF_STAT_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x40, + (32 / 8), + 0, + 0xf + 1, + 0, + 0x1, + 4, + g_etm_olif_tm_lif_stat_cfg_reg, + NULL, + NULL, + }, + { + "tm_lif_sop_stat", + ETM_OLIF_TM_LIF_SOP_STATr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x50, + (32 / 8), + 0, + 0xf + 1, + 0, + 0x1, + 1, + g_etm_olif_tm_lif_sop_stat_reg, + NULL, + NULL, + }, + { + "tm_lif_eop_stat", + ETM_OLIF_TM_LIF_EOP_STATr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x60, + (32 / 8), + 0, + 0xf + 1, + 0, + 0x1, + 1, + g_etm_olif_tm_lif_eop_stat_reg, + NULL, + NULL, + }, + { + "tm_lif_vld_stat", + ETM_OLIF_TM_LIF_VLD_STATr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x80, + (32 / 8), + 0, + 0xf + 1, + 0, + 0x1, + 1, + g_etm_olif_tm_lif_vld_stat_reg, + NULL, + NULL, + }, + { + "prog_full_assert_cfg", + ETM_CGAVD_PROG_FULL_ASSERT_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_cgavd_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "cgavd_int", + ETM_CGAVD_CGAVD_INTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_int_reg, + NULL, + NULL, + }, + { + "cgavd_ram_err", + ETM_CGAVD_CGAVD_RAM_ERRr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x3, + (32 / 8), + 0, + 0, + 0, + 0, + 13, + g_etm_cgavd_cgavd_ram_err_reg, + NULL, + NULL, + }, + { + "cgavd_int_mask", + ETM_CGAVD_CGAVD_INT_MASKr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_int_mask_reg, + NULL, + NULL, + }, + { + "cgavd_ram_err_int_mask", + ETM_CGAVD_CGAVD_RAM_ERR_INT_MASKr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x5, + (32 / 8), + 0, + 0, + 0, + 0, + 13, + g_etm_cgavd_cgavd_ram_err_int_mask_reg, + NULL, + NULL, + }, + { + "cfgmt_byte_mode", + ETM_CGAVD_CFGMT_BYTE_MODEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x6, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cfgmt_byte_mode_reg, + NULL, + NULL, + }, + { + "avg_qlen_return_zero_en", + ETM_CGAVD_AVG_QLEN_RETURN_ZERO_ENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x7, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_avg_qlen_return_zero_en_reg, + NULL, + NULL, + }, + { + "flow_wred_q_len_th", + ETM_CGAVD_FLOW_WRED_Q_LEN_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x20, + (32 / 8), + 0, + 0x1f + 1, + 0, + 0x1, + 1, + g_etm_cgavd_flow_wred_q_len_th_reg, + NULL, + NULL, + }, + { + "flow_wq", + ETM_CGAVD_FLOW_WQr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x40, + (32 / 8), + 0, + 0x1f + 1, + 0, + 0x1, + 1, + g_etm_cgavd_flow_wq_reg, + NULL, + NULL, + }, + { + "flow_wred_max_th", + ETM_CGAVD_FLOW_WRED_MAX_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x100, + (32 / 8), + 0, + 0xff + 1, + 0, + 0x1, + 1, + g_etm_cgavd_flow_wred_max_th_reg, + NULL, + NULL, + }, + { + "flow_wred_min_th", + ETM_CGAVD_FLOW_WRED_MIN_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x200, + (32 / 8), + 0, + 0xff + 1, + 0, + 0x1, + 1, + g_etm_cgavd_flow_wred_min_th_reg, + NULL, + NULL, + }, + { + "flow_wred_cfg_para", + ETM_CGAVD_FLOW_WRED_CFG_PARAr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x300, + (32 / 8), + 0, + 0xff + 1, + 0, + 0x1, + 1, + g_etm_cgavd_flow_wred_cfg_para_reg, + NULL, + NULL, + }, + { + "pp_avg_q_len", + ETM_CGAVD_PP_AVG_Q_LENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x400, + (32 / 8), + 0, + 0x7f + 1, + 0, + 0x1, + 1, + g_etm_cgavd_pp_avg_q_len_reg, + NULL, + NULL, + }, + { + "pp_td_th", + ETM_CGAVD_PP_TD_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x600, + (32 / 8), + 0, + 0x7f + 1, + 0, + 0x1, + 1, + g_etm_cgavd_pp_td_th_reg, + NULL, + NULL, + }, + { + "pp_ca_mtd", + ETM_CGAVD_PP_CA_MTDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x800, + (32 / 8), + 0, + 0x7f + 1, + 0, + 0x1, + 1, + g_etm_cgavd_pp_ca_mtd_reg, + NULL, + NULL, + }, + { + "pp_wred_grp_th_en", + ETM_CGAVD_PP_WRED_GRP_TH_ENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0xc00, + (32 / 8), + 0, + 0x7f + 1, + 0, + 0x1, + 2, + g_etm_cgavd_pp_wred_grp_th_en_reg, + NULL, + NULL, + }, + { + "pp_wred_q_len_th", + ETM_CGAVD_PP_WRED_Q_LEN_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0xe00, + (32 / 8), + 0, + 0x7 + 1, + 0, + 0x1, + 1, + g_etm_cgavd_pp_wred_q_len_th_reg, + NULL, + NULL, + }, + { + "pp_wq", + ETM_CGAVD_PP_WQr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0xe08, + (32 / 8), + 0, + 0x7 + 1, + 0, + 0x1, + 1, + g_etm_cgavd_pp_wq_reg, + NULL, + NULL, + }, + { + "pp_wred_max_th", + ETM_CGAVD_PP_WRED_MAX_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x1000, + (32 / 8), + 0, + 0x3f + 1, + 0, + 0x1, + 1, + g_etm_cgavd_pp_wred_max_th_reg, + NULL, + NULL, + }, + { + "pp_wred_min_th", + ETM_CGAVD_PP_WRED_MIN_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x1080, + (32 / 8), + 0, + 0x3f + 1, + 0, + 0x1, + 1, + g_etm_cgavd_pp_wred_min_th_reg, + NULL, + NULL, + }, + { + "pp_cfg_para", + ETM_CGAVD_PP_CFG_PARAr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x1100, + (32 / 8), + 0, + 0x3f + 1, + 0, + 0x1, + 1, + g_etm_cgavd_pp_cfg_para_reg, + NULL, + NULL, + }, + { + "sys_avg_q_len", + ETM_CGAVD_SYS_AVG_Q_LENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1200, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_sys_avg_q_len_reg, + NULL, + NULL, + }, + { + "sys_td_th", + ETM_CGAVD_SYS_TD_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1202, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_sys_td_th_reg, + NULL, + NULL, + }, + { + "sys_cgavd_metd", + ETM_CGAVD_SYS_CGAVD_METDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1204, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_sys_cgavd_metd_reg, + NULL, + NULL, + }, + { + "sys_cfg_q_grp_para", + ETM_CGAVD_SYS_CFG_Q_GRP_PARAr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1208, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_sys_cfg_q_grp_para_reg, + NULL, + NULL, + }, + { + "sys_wq", + ETM_CGAVD_SYS_WQr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1210, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_sys_wq_reg, + NULL, + NULL, + }, + { + "gred_max_th", + ETM_CGAVD_GRED_MAX_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x1218, + (32 / 8), + 0, + 0x7 + 1, + 0, + 0x1, + 1, + g_etm_cgavd_gred_max_th_reg, + NULL, + NULL, + }, + { + "gred_mid_th", + ETM_CGAVD_GRED_MID_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x1228, + (32 / 8), + 0, + 0x7 + 1, + 0, + 0x1, + 1, + g_etm_cgavd_gred_mid_th_reg, + NULL, + NULL, + }, + { + "gred_min_th", + ETM_CGAVD_GRED_MIN_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x1238, + (32 / 8), + 0, + 0x7 + 1, + 0, + 0x1, + 1, + g_etm_cgavd_gred_min_th_reg, + NULL, + NULL, + }, + { + "gred_cfg_para0", + ETM_CGAVD_GRED_CFG_PARA0r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x1248, + (32 / 8), + 0, + 0x7 + 1, + 0, + 0x1, + 1, + g_etm_cgavd_gred_cfg_para0_reg, + NULL, + NULL, + }, + { + "gred_cfg_para1", + ETM_CGAVD_GRED_CFG_PARA1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x1258, + (32 / 8), + 0, + 0x7 + 1, + 0, + 0x1, + 1, + g_etm_cgavd_gred_cfg_para1_reg, + NULL, + NULL, + }, + { + "gred_cfg_para2", + ETM_CGAVD_GRED_CFG_PARA2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x1268, + (32 / 8), + 0, + 0x7 + 1, + 0, + 0x1, + 1, + g_etm_cgavd_gred_cfg_para2_reg, + NULL, + NULL, + }, + { + "sys_window_th_h", + ETM_CGAVD_SYS_WINDOW_TH_Hr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1278, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_sys_window_th_h_reg, + NULL, + NULL, + }, + { + "sys_window_th_l", + ETM_CGAVD_SYS_WINDOW_TH_Lr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x127a, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_sys_window_th_l_reg, + NULL, + NULL, + }, + { + "amplify_gene0", + ETM_CGAVD_AMPLIFY_GENE0r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x127c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_amplify_gene0_reg, + NULL, + NULL, + }, + { + "amplify_gene1", + ETM_CGAVD_AMPLIFY_GENE1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x127d, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_amplify_gene1_reg, + NULL, + NULL, + }, + { + "amplify_gene2", + ETM_CGAVD_AMPLIFY_GENE2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x127e, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_amplify_gene2_reg, + NULL, + NULL, + }, + { + "amplify_gene3", + ETM_CGAVD_AMPLIFY_GENE3r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x127f, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_amplify_gene3_reg, + NULL, + NULL, + }, + { + "amplify_gene4", + ETM_CGAVD_AMPLIFY_GENE4r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1280, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_amplify_gene4_reg, + NULL, + NULL, + }, + { + "amplify_gene5", + ETM_CGAVD_AMPLIFY_GENE5r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1281, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_amplify_gene5_reg, + NULL, + NULL, + }, + { + "amplify_gene6", + ETM_CGAVD_AMPLIFY_GENE6r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1282, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_amplify_gene6_reg, + NULL, + NULL, + }, + { + "amplify_gene7", + ETM_CGAVD_AMPLIFY_GENE7r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1283, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_amplify_gene7_reg, + NULL, + NULL, + }, + { + "amplify_gene8", + ETM_CGAVD_AMPLIFY_GENE8r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1284, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_amplify_gene8_reg, + NULL, + NULL, + }, + { + "amplify_gene9", + ETM_CGAVD_AMPLIFY_GENE9r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1285, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_amplify_gene9_reg, + NULL, + NULL, + }, + { + "amplify_gene10", + ETM_CGAVD_AMPLIFY_GENE10r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1286, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_amplify_gene10_reg, + NULL, + NULL, + }, + { + "amplify_gene11", + ETM_CGAVD_AMPLIFY_GENE11r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1287, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_amplify_gene11_reg, + NULL, + NULL, + }, + { + "amplify_gene12", + ETM_CGAVD_AMPLIFY_GENE12r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1288, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_amplify_gene12_reg, + NULL, + NULL, + }, + { + "amplify_gene13", + ETM_CGAVD_AMPLIFY_GENE13r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1289, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_amplify_gene13_reg, + NULL, + NULL, + }, + { + "amplify_gene14", + ETM_CGAVD_AMPLIFY_GENE14r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x128a, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_amplify_gene14_reg, + NULL, + NULL, + }, + { + "amplify_gene15", + ETM_CGAVD_AMPLIFY_GENE15r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x128b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_amplify_gene15_reg, + NULL, + NULL, + }, + { + "equal_pkt_len_en", + ETM_CGAVD_EQUAL_PKT_LEN_ENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x128c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_equal_pkt_len_en_reg, + NULL, + NULL, + }, + { + "equal_pkt_len_th0", + ETM_CGAVD_EQUAL_PKT_LEN_TH0r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x128d, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_equal_pkt_len_th0_reg, + NULL, + NULL, + }, + { + "equal_pkt_len_th1", + ETM_CGAVD_EQUAL_PKT_LEN_TH1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x128e, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_equal_pkt_len_th1_reg, + NULL, + NULL, + }, + { + "equal_pkt_len_th2", + ETM_CGAVD_EQUAL_PKT_LEN_TH2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x128f, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_equal_pkt_len_th2_reg, + NULL, + NULL, + }, + { + "equal_pkt_len_th3", + ETM_CGAVD_EQUAL_PKT_LEN_TH3r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1290, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_equal_pkt_len_th3_reg, + NULL, + NULL, + }, + { + "equal_pkt_len_th4", + ETM_CGAVD_EQUAL_PKT_LEN_TH4r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1291, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_equal_pkt_len_th4_reg, + NULL, + NULL, + }, + { + "equal_pkt_len_th5", + ETM_CGAVD_EQUAL_PKT_LEN_TH5r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1292, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_equal_pkt_len_th5_reg, + NULL, + NULL, + }, + { + "equal_pkt_len_th6", + ETM_CGAVD_EQUAL_PKT_LEN_TH6r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1293, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_equal_pkt_len_th6_reg, + NULL, + NULL, + }, + { + "equal_pkt_len0", + ETM_CGAVD_EQUAL_PKT_LEN0r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1294, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_equal_pkt_len0_reg, + NULL, + NULL, + }, + { + "equal_pkt_len1", + ETM_CGAVD_EQUAL_PKT_LEN1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1295, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_equal_pkt_len1_reg, + NULL, + NULL, + }, + { + "equal_pkt_len2", + ETM_CGAVD_EQUAL_PKT_LEN2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1296, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_equal_pkt_len2_reg, + NULL, + NULL, + }, + { + "equal_pkt_len3", + ETM_CGAVD_EQUAL_PKT_LEN3r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1297, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_equal_pkt_len3_reg, + NULL, + NULL, + }, + { + "equal_pkt_len4", + ETM_CGAVD_EQUAL_PKT_LEN4r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1298, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_equal_pkt_len4_reg, + NULL, + NULL, + }, + { + "equal_pkt_len5", + ETM_CGAVD_EQUAL_PKT_LEN5r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1299, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_equal_pkt_len5_reg, + NULL, + NULL, + }, + { + "equal_pkt_len6", + ETM_CGAVD_EQUAL_PKT_LEN6r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x129a, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_equal_pkt_len6_reg, + NULL, + NULL, + }, + { + "equal_pkt_len7", + ETM_CGAVD_EQUAL_PKT_LEN7r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x129b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_equal_pkt_len7_reg, + NULL, + NULL, + }, + { + "flow_cpu_set_avg_len", + ETM_CGAVD_FLOW_CPU_SET_AVG_LENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x129c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_flow_cpu_set_avg_len_reg, + NULL, + NULL, + }, + { + "flow_cpu_set_q_len", + ETM_CGAVD_FLOW_CPU_SET_Q_LENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x129d, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_flow_cpu_set_q_len_reg, + NULL, + NULL, + }, + { + "pp_cpu_set_avg_q_len", + ETM_CGAVD_PP_CPU_SET_AVG_Q_LENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12ef, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_pp_cpu_set_avg_q_len_reg, + NULL, + NULL, + }, + { + "pp_cpu_set_q_len", + ETM_CGAVD_PP_CPU_SET_Q_LENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x129e, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_pp_cpu_set_q_len_reg, + NULL, + NULL, + }, + { + "sys_cpu_set_avg_len", + ETM_CGAVD_SYS_CPU_SET_AVG_LENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_sys_cpu_set_avg_len_reg, + NULL, + NULL, + }, + { + "sys_cpu_set_q_len", + ETM_CGAVD_SYS_CPU_SET_Q_LENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12a2, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_sys_cpu_set_q_len_reg, + NULL, + NULL, + }, + { + "pke_len_calc_sign", + ETM_CGAVD_PKE_LEN_CALC_SIGNr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12a3, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_pke_len_calc_sign_reg, + NULL, + NULL, + }, + { + "rd_cpu_or_ram", + ETM_CGAVD_RD_CPU_OR_RAMr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12a4, + (32 / 8), + 0, + 0, + 0, + 0, + 6, + g_etm_cgavd_rd_cpu_or_ram_reg, + NULL, + NULL, + }, + { + "q_len_update_disable", + ETM_CGAVD_Q_LEN_UPDATE_DISABLEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12a5, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_etm_cgavd_q_len_update_disable_reg, + NULL, + NULL, + }, + { + "cgavd_dp_sel", + ETM_CGAVD_CGAVD_DP_SELr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12ad, + (32 / 8), + 0, + 0, + 0, + 0, + 9, + g_etm_cgavd_cgavd_dp_sel_reg, + NULL, + NULL, + }, + { + "cgavd_sub_en", + ETM_CGAVD_CGAVD_SUB_ENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12e6, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_etm_cgavd_cgavd_sub_en_reg, + NULL, + NULL, + }, + { + "default_start_queue", + ETM_CGAVD_DEFAULT_START_QUEUEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12e7, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_default_start_queue_reg, + NULL, + NULL, + }, + { + "default_finish_queue", + ETM_CGAVD_DEFAULT_FINISH_QUEUEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_default_finish_queue_reg, + NULL, + NULL, + }, + { + "protocol_start_queue", + ETM_CGAVD_PROTOCOL_START_QUEUEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12e9, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_protocol_start_queue_reg, + NULL, + NULL, + }, + { + "protocol_finish_queue", + ETM_CGAVD_PROTOCOL_FINISH_QUEUEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12ea, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_protocol_finish_queue_reg, + NULL, + NULL, + }, + { + "uniform_td_th", + ETM_CGAVD_UNIFORM_TD_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12eb, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_uniform_td_th_reg, + NULL, + NULL, + }, + { + "uniform_td_th_en", + ETM_CGAVD_UNIFORM_TD_TH_ENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_uniform_td_th_en_reg, + NULL, + NULL, + }, + { + "cgavd_cfg_fc", + ETM_CGAVD_CGAVD_CFG_FCr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12ed, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_cfg_fc_reg, + NULL, + NULL, + }, + { + "cgavd_cfg_no_fc", + ETM_CGAVD_CGAVD_CFG_NO_FCr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12ee, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_cfg_no_fc_reg, + NULL, + NULL, + }, + { + "cgavd_force_imem_omem", + ETM_CGAVD_CGAVD_FORCE_IMEM_OMEMr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12f0, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_cgavd_cgavd_force_imem_omem_reg, + NULL, + NULL, + }, + { + "cgavd_sys_q_len_l", + ETM_CGAVD_CGAVD_SYS_Q_LEN_Lr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12f1, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_sys_q_len_l_reg, + NULL, + NULL, + }, + { + "default_queue_en", + ETM_CGAVD_DEFAULT_QUEUE_ENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12f5, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_default_queue_en_reg, + NULL, + NULL, + }, + { + "protocol_queue_en", + ETM_CGAVD_PROTOCOL_QUEUE_ENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12f6, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_protocol_queue_en_reg, + NULL, + NULL, + }, + { + "cfg_tc_flowid_dat", + ETM_CGAVD_CFG_TC_FLOWID_DATr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x1400, + (32 / 8), + 0, + 0x7 + 1, + 0, + 0x1, + 1, + g_etm_cgavd_cfg_tc_flowid_dat_reg, + NULL, + NULL, + }, + { + "flow_td_th", + ETM_CGAVD_FLOW_TD_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x40000, + (32 / 8), + 0, + 0x23FF + 1, + 0, + 0x1, + 1, + g_etm_cgavd_flow_td_th_reg, + NULL, + NULL, + }, + { + "flow_ca_mtd", + ETM_CGAVD_FLOW_CA_MTDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x80000, + (32 / 8), + 0, + 0x23FF + 1, + 0, + 0x1, + 1, + g_etm_cgavd_flow_ca_mtd_reg, + NULL, + NULL, + }, + { + "flow_dynamic_th_en", + ETM_CGAVD_FLOW_DYNAMIC_TH_ENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0xc0000, + (32 / 8), + 0, + 0x23FF + 1, + 0, + 0x1, + 1, + g_etm_cgavd_flow_dynamic_th_en_reg, + NULL, + NULL, + }, + { + "pp_num", + ETM_CGAVD_PP_NUMr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x100000, + (32 / 8), + 0, + 0x23FF + 1, + 0, + 0x1, + 1, + g_etm_cgavd_pp_num_reg, + NULL, + NULL, + }, + { + "flow_q_len", + ETM_CGAVD_FLOW_Q_LENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x140000, + (32 / 8), + 0, + 0x23FF + 1, + 0, + 0x1, + 1, + g_etm_cgavd_flow_q_len_reg, + NULL, + NULL, + }, + { + "flow_wred_grp", + ETM_CGAVD_FLOW_WRED_GRPr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x180000, + (32 / 8), + 0, + 0x23FF + 1, + 0, + 0x1, + 1, + g_etm_cgavd_flow_wred_grp_reg, + NULL, + NULL, + }, + { + "flow_avg_q_len", + ETM_CGAVD_FLOW_AVG_Q_LENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x1c0000, + (32 / 8), + 0, + 0x23FF + 1, + 0, + 0x1, + 1, + g_etm_cgavd_flow_avg_q_len_reg, + NULL, + NULL, + }, + { + "qos_sign", + ETM_CGAVD_QOS_SIGNr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x200000, + (32 / 8), + 0, + 0x23FF + 1, + 0, + 0x1, + 1, + g_etm_cgavd_qos_sign_reg, + NULL, + NULL, + }, + { + "q_pri", + ETM_CGAVD_Q_PRIr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x240000, + (32 / 8), + 0, + 0x23FF + 1, + 0, + 0x1, + 1, + g_etm_cgavd_q_pri_reg, + NULL, + NULL, + }, + { + "odma_tm_itmd_rd_low", + ETM_CGAVD_ODMA_TM_ITMD_RD_LOWr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x250000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_odma_tm_itmd_rd_low_reg, + NULL, + NULL, + }, + { + "odma_tm_itmd_rd_mid", + ETM_CGAVD_ODMA_TM_ITMD_RD_MIDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x250001, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_odma_tm_itmd_rd_mid_reg, + NULL, + NULL, + }, + { + "odma_tm_itmd_rd_high", + ETM_CGAVD_ODMA_TM_ITMD_RD_HIGHr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x250002, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_odma_tm_itmd_rd_high_reg, + NULL, + NULL, + }, + { + "cgavd_stat_pkt_len", + ETM_CGAVD_CGAVD_STAT_PKT_LENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x250003, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_cgavd_cgavd_stat_pkt_len_reg, + NULL, + NULL, + }, + { + "cgavd_stat_qnum", + ETM_CGAVD_CGAVD_STAT_QNUMr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x250004, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_cgavd_cgavd_stat_qnum_reg, + NULL, + NULL, + }, + { + "cgavd_stat_dp", + ETM_CGAVD_CGAVD_STAT_DPr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x250005, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_stat_dp_reg, + NULL, + NULL, + }, + { + "flow_num0", + ETM_CGAVD_FLOW_NUM0r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x260000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_flow_num0_reg, + NULL, + NULL, + }, + { + "flow_num1", + ETM_CGAVD_FLOW_NUM1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x260001, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_flow_num1_reg, + NULL, + NULL, + }, + { + "flow_num2", + ETM_CGAVD_FLOW_NUM2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x260002, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_flow_num2_reg, + NULL, + NULL, + }, + { + "flow_num3", + ETM_CGAVD_FLOW_NUM3r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x260003, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_flow_num3_reg, + NULL, + NULL, + }, + { + "flow_num4", + ETM_CGAVD_FLOW_NUM4r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x260004, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_flow_num4_reg, + NULL, + NULL, + }, + { + "flow0_imem_cnt", + ETM_CGAVD_FLOW0_IMEM_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x260010, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_flow0_imem_cnt_reg, + NULL, + NULL, + }, + { + "flow1_imem_cnt", + ETM_CGAVD_FLOW1_IMEM_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x260011, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_flow1_imem_cnt_reg, + NULL, + NULL, + }, + { + "flow2_imem_cnt", + ETM_CGAVD_FLOW2_IMEM_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x260012, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_flow2_imem_cnt_reg, + NULL, + NULL, + }, + { + "flow3_imem_cnt", + ETM_CGAVD_FLOW3_IMEM_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x260013, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_flow3_imem_cnt_reg, + NULL, + NULL, + }, + { + "flow4_imem_cnt", + ETM_CGAVD_FLOW4_IMEM_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x260014, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_flow4_imem_cnt_reg, + NULL, + NULL, + }, + { + "flow0_drop_cnt", + ETM_CGAVD_FLOW0_DROP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x260030, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_flow0_drop_cnt_reg, + NULL, + NULL, + }, + { + "flow1_drop_cnt", + ETM_CGAVD_FLOW1_DROP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x260031, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_flow1_drop_cnt_reg, + NULL, + NULL, + }, + { + "flow2_drop_cnt", + ETM_CGAVD_FLOW2_DROP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x260032, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_flow2_drop_cnt_reg, + NULL, + NULL, + }, + { + "flow3_drop_cnt", + ETM_CGAVD_FLOW3_DROP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x260033, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_flow3_drop_cnt_reg, + NULL, + NULL, + }, + { + "flow4_drop_cnt", + ETM_CGAVD_FLOW4_DROP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x260034, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_flow4_drop_cnt_reg, + NULL, + NULL, + }, + { + "fc_count_mode", + ETM_CGAVD_FC_COUNT_MODEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x264000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_fc_count_mode_reg, + NULL, + NULL, + }, + { + "qmu_cgavd_fc_num", + ETM_CGAVD_QMU_CGAVD_FC_NUMr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x264100, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_cgavd_qmu_cgavd_fc_num_reg, + NULL, + NULL, + }, + { + "cgavd_odma_fc_num", + ETM_CGAVD_CGAVD_ODMA_FC_NUMr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x264101, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_cgavd_cgavd_odma_fc_num_reg, + NULL, + NULL, + }, + { + "cfg_offset", + ETM_CGAVD_CFG_OFFSETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x290000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cfg_offset_reg, + NULL, + NULL, + }, + { + "tmmu_init_done", + ETM_TMMU_TMMU_INIT_DONEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0001, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_tmmu_init_done_reg, + NULL, + NULL, + }, + { + "tmmu_int_mask_1", + ETM_TMMU_TMMU_INT_MASK_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x002, + (32 / 8), + 0, + 0, + 0, + 0, + 27, + g_etm_tmmu_tmmu_int_mask_1_reg, + NULL, + NULL, + }, + { + "tmmu_int_mask_2", + ETM_TMMU_TMMU_INT_MASK_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0003, + (32 / 8), + 0, + 0, + 0, + 0, + 17, + g_etm_tmmu_tmmu_int_mask_2_reg, + NULL, + NULL, + }, + { + "cfgmt_tm_pure_imem_en", + ETM_TMMU_CFGMT_TM_PURE_IMEM_ENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0004, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_tm_pure_imem_en_reg, + NULL, + NULL, + }, + { + "cfgmt_force_ddr_rdy_cfg", + ETM_TMMU_CFGMT_FORCE_DDR_RDY_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0005, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_force_ddr_rdy_cfg_reg, + NULL, + NULL, + }, + { + "pd_order_fifo_aful_th", + ETM_TMMU_PD_ORDER_FIFO_AFUL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0007, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_pd_order_fifo_aful_th_reg, + NULL, + NULL, + }, + { + "cached_pd_fifo_aful_th", + ETM_TMMU_CACHED_PD_FIFO_AFUL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cached_pd_fifo_aful_th_reg, + NULL, + NULL, + }, + { + "wr_cmd_fifo_aful_th", + ETM_TMMU_WR_CMD_FIFO_AFUL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x000b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_wr_cmd_fifo_aful_th_reg, + NULL, + NULL, + }, + { + "imem_enq_rd_fifo_aful_th", + ETM_TMMU_IMEM_ENQ_RD_FIFO_AFUL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x000c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_imem_enq_rd_fifo_aful_th_reg, + NULL, + NULL, + }, + { + "imem_enq_drop_fifo_aful_th", + ETM_TMMU_IMEM_ENQ_DROP_FIFO_AFUL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x000d, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_imem_enq_drop_fifo_aful_th_reg, + NULL, + NULL, + }, + { + "imem_deq_drop_fifo_aful_th", + ETM_TMMU_IMEM_DEQ_DROP_FIFO_AFUL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x000e, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_imem_deq_drop_fifo_aful_th_reg, + NULL, + NULL, + }, + { + "imem_deq_rd_fifo_aful_th", + ETM_TMMU_IMEM_DEQ_RD_FIFO_AFUL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x000f, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_imem_deq_rd_fifo_aful_th_reg, + NULL, + NULL, + }, + { + "tmmu_states_1", + ETM_TMMU_TMMU_STATES_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0011, + (32 / 8), + 0, + 0, + 0, + 0, + 27, + g_etm_tmmu_tmmu_states_1_reg, + NULL, + NULL, + }, + { + "tmmu_states_2", + ETM_TMMU_TMMU_STATES_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0012, + (32 / 8), + 0, + 0, + 0, + 0, + 17, + g_etm_tmmu_tmmu_states_2_reg, + NULL, + NULL, + }, + { + "shap_ind_cmd", + ETM_SHAP_SHAP_IND_CMDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x000a, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_etm_shap_shap_ind_cmd_reg, + NULL, + NULL, + }, + { + "shap_ind_sta", + ETM_SHAP_SHAP_IND_STAr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x000b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_shap_shap_ind_sta_reg, + NULL, + NULL, + }, + { + "shap_ind_data0", + ETM_SHAP_SHAP_IND_DATA0r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x000c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_shap_shap_ind_data0_reg, + NULL, + NULL, + }, + { + "shap_ind_data1", + ETM_SHAP_SHAP_IND_DATA1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x000d, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_shap_shap_ind_data1_reg, + NULL, + NULL, + }, + { + "full_threshold", + ETM_SHAP_FULL_THRESHOLDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_shap_full_threshold_reg, + NULL, + NULL, + }, + { + "empty_threshold", + ETM_SHAP_EMPTY_THRESHOLDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1d, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_shap_empty_threshold_reg, + NULL, + NULL, + }, + { + "shap_sta_init_cfg", + ETM_SHAP_SHAP_STA_INIT_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1e, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_shap_shap_sta_init_cfg_reg, + NULL, + NULL, + }, + { + "shap_cfg_init_cfg", + ETM_SHAP_SHAP_CFG_INIT_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1f, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_shap_shap_cfg_init_cfg_reg, + NULL, + NULL, + }, + { + "token_mode_switch", + ETM_SHAP_TOKEN_MODE_SWITCHr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_shap_token_mode_switch_reg, + NULL, + NULL, + }, + { + "token_grain", + ETM_SHAP_TOKEN_GRAINr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x21, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_shap_token_grain_reg, + NULL, + NULL, + }, + { + "crd_grain", + ETM_SHAP_CRD_GRAINr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x22, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_shap_crd_grain_reg, + NULL, + NULL, + }, + { + "shap_stat_ctrl", + ETM_SHAP_SHAP_STAT_CTRLr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x37, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_shap_shap_stat_ctrl_reg, + NULL, + NULL, + }, + { + "token_stat_id", + ETM_SHAP_TOKEN_STAT_IDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x38, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_shap_token_stat_id_reg, + NULL, + NULL, + }, + { + "token_stat", + ETM_SHAP_TOKEN_STATr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x39, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_shap_token_stat_reg, + NULL, + NULL, + }, + { + "shap_stat_clk_cnt", + ETM_SHAP_SHAP_STAT_CLK_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x74, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_shap_shap_stat_clk_cnt_reg, + NULL, + NULL, + }, + { + "shap_bucket_map_tbl", + ETM_SHAP_SHAP_BUCKET_MAP_TBLr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x80000000 + 0x000000, + (64 / 8), + 0, + 0xABFF + 1, + 0, + 1, + 1, + g_etm_shap_shap_bucket_map_tbl_reg, + NULL, + NULL, + }, + { + "bkt_para_tbl", + ETM_SHAP_BKT_PARA_TBLr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x80000000 + 0x100000, + (64 / 8), + 0, + 0xAFF + 1, + 0, + 1, + 2, + g_etm_shap_bkt_para_tbl_reg, + NULL, + NULL, + }, + { + "credit_en", + ETM_CRDT_CREDIT_ENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0001, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_credit_en_reg, + NULL, + NULL, + }, + { + "crt_inter1", + ETM_CRDT_CRT_INTER1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0002, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_crt_inter1_reg, + NULL, + NULL, + }, + { + "db_token", + ETM_CRDT_DB_TOKENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0003, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_db_token_reg, + NULL, + NULL, + }, + { + "crs_flt_cfg", + ETM_CRDT_CRS_FLT_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0004, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_crs_flt_cfg_reg, + NULL, + NULL, + }, + { + "th_sp", + ETM_CRDT_TH_SPr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0005, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_th_sp_reg, + NULL, + NULL, + }, + { + "th_wfq_fq", + ETM_CRDT_TH_WFQ_FQr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0006, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_crdt_th_wfq_fq_reg, + NULL, + NULL, + }, + { + "th_wfq2_fq2", + ETM_CRDT_TH_WFQ2_FQ2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0007, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_crdt_th_wfq2_fq2_reg, + NULL, + NULL, + }, + { + "th_wfq4_fq4", + ETM_CRDT_TH_WFQ4_FQ4r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0008, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_crdt_th_wfq4_fq4_reg, + NULL, + NULL, + }, + { + "cfg_state", + ETM_CRDT_CFG_STATEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0009, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_cfg_state_reg, + NULL, + NULL, + }, + { + "crdt_ind_cmd", + ETM_CRDT_CRDT_IND_CMDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x000a, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_etm_crdt_crdt_ind_cmd_reg, + NULL, + NULL, + }, + { + "crdt_ind_sta", + ETM_CRDT_CRDT_IND_STAr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x000b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_crdt_ind_sta_reg, + NULL, + NULL, + }, + { + "crdt_ind_data0", + ETM_CRDT_CRDT_IND_DATA0r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x000c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_crdt_ind_data0_reg, + NULL, + NULL, + }, + { + "crdt_ind_data1", + ETM_CRDT_CRDT_IND_DATA1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x000d, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_crdt_ind_data1_reg, + NULL, + NULL, + }, + { + "crdt_state", + ETM_CRDT_CRDT_STATEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x000f, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_crdt_crdt_state_reg, + NULL, + NULL, + }, + { + "stat_que_id_0", + ETM_CRDT_STAT_QUE_ID_0r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x10, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_stat_que_id_0_reg, + NULL, + NULL, + }, + { + "stat_que_id_1", + ETM_CRDT_STAT_QUE_ID_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x11, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_stat_que_id_1_reg, + NULL, + NULL, + }, + { + "stat_que_id_2", + ETM_CRDT_STAT_QUE_ID_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_stat_que_id_2_reg, + NULL, + NULL, + }, + { + "stat_que_id_3", + ETM_CRDT_STAT_QUE_ID_3r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x13, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_stat_que_id_3_reg, + NULL, + NULL, + }, + { + "stat_que_id_4", + ETM_CRDT_STAT_QUE_ID_4r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x14, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_stat_que_id_4_reg, + NULL, + NULL, + }, + { + "stat_que_id_5", + ETM_CRDT_STAT_QUE_ID_5r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x15, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_stat_que_id_5_reg, + NULL, + NULL, + }, + { + "stat_que_id_6", + ETM_CRDT_STAT_QUE_ID_6r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x16, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_stat_que_id_6_reg, + NULL, + NULL, + }, + { + "stat_que_id_7", + ETM_CRDT_STAT_QUE_ID_7r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x17, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_stat_que_id_7_reg, + NULL, + NULL, + }, + { + "stat_que_id_8", + ETM_CRDT_STAT_QUE_ID_8r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x18, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_stat_que_id_8_reg, + NULL, + NULL, + }, + { + "stat_que_id_9", + ETM_CRDT_STAT_QUE_ID_9r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x19, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_stat_que_id_9_reg, + NULL, + NULL, + }, + { + "stat_que_id_10", + ETM_CRDT_STAT_QUE_ID_10r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1a, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_stat_que_id_10_reg, + NULL, + NULL, + }, + { + "stat_que_id_11", + ETM_CRDT_STAT_QUE_ID_11r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_stat_que_id_11_reg, + NULL, + NULL, + }, + { + "stat_que_id_12", + ETM_CRDT_STAT_QUE_ID_12r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_stat_que_id_12_reg, + NULL, + NULL, + }, + { + "stat_que_id_13", + ETM_CRDT_STAT_QUE_ID_13r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1d, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_stat_que_id_13_reg, + NULL, + NULL, + }, + { + "stat_que_id_14", + ETM_CRDT_STAT_QUE_ID_14r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1e, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_stat_que_id_14_reg, + NULL, + NULL, + }, + { + "stat_que_id_15", + ETM_CRDT_STAT_QUE_ID_15r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1f, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_stat_que_id_15_reg, + NULL, + NULL, + }, + { + "stat_que_credit", + ETM_CRDT_STAT_QUE_CREDITr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x20, + (32 / 8), + 0, + 0x0f + 1, + 0, + 1, + 1, + g_etm_crdt_stat_que_credit_reg, + NULL, + NULL, + }, + { + "crdt_cfg_ram_init", + ETM_CRDT_CRDT_CFG_RAM_INITr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x7a, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_crdt_crdt_cfg_ram_init_reg, + NULL, + NULL, + }, + { + "crdt_sta_ram_init", + ETM_CRDT_CRDT_STA_RAM_INITr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x7b, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_crdt_crdt_sta_ram_init_reg, + NULL, + NULL, + }, + { + "crs_que_id", + ETM_CRDT_CRS_QUE_IDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x86, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_crs_que_id_reg, + NULL, + NULL, + }, + { + "qmu_crs_end_state", + ETM_CRDT_QMU_CRS_END_STATEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x87, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_qmu_crs_end_state_reg, + NULL, + NULL, + }, + { + "shap_rdy", + ETM_CRDT_SHAP_RDYr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x90, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_shap_rdy_reg, + NULL, + NULL, + }, + { + "shap_int_reg", + ETM_CRDT_SHAP_INT_REGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x91, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_shap_int_reg_reg, + NULL, + NULL, + }, + { + "shap_int_mask_reg", + ETM_CRDT_SHAP_INT_MASK_REGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x92, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_shap_int_mask_reg_reg, + NULL, + NULL, + }, + { + "token_state_almost_empty_th", + ETM_CRDT_TOKEN_STATE_ALMOST_EMPTY_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x93, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_token_state_almost_empty_th_reg, + NULL, + NULL, + }, + { + "token_state_empty_th", + ETM_CRDT_TOKEN_STATE_EMPTY_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x94, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_token_state_empty_th_reg, + NULL, + NULL, + }, + { + "full_th", + ETM_CRDT_FULL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x95, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_full_th_reg, + NULL, + NULL, + }, + { + "pp_c_level_shap_en", + ETM_CRDT_PP_C_LEVEL_SHAP_ENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x96, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_pp_c_level_shap_en_reg, + NULL, + NULL, + }, + { + "enq_token_th", + ETM_CRDT_ENQ_TOKEN_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x97, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_enq_token_th_reg, + NULL, + NULL, + }, + { + "pp_tokenq_level1_qstate_weight_cir", + ETM_CRDT_PP_TOKENQ_LEVEL1_QSTATE_WEIGHT_CIRr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x98, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_crdt_pp_tokenq_level1_qstate_weight_cir_reg, + NULL, + NULL, + }, + { + "pp_idle_weight_level1_cir", + ETM_CRDT_PP_IDLE_WEIGHT_LEVEL1_CIRr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x99, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_pp_idle_weight_level1_cir_reg, + NULL, + NULL, + }, + { + "rci_grade_th_0_cfg", + ETM_CRDT_RCI_GRADE_TH_0_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xc0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_rci_grade_th_0_cfg_reg, + NULL, + NULL, + }, + { + "rci_grade_th_1_cfg", + ETM_CRDT_RCI_GRADE_TH_1_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xc1, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_rci_grade_th_1_cfg_reg, + NULL, + NULL, + }, + { + "rci_grade_th_2_cfg", + ETM_CRDT_RCI_GRADE_TH_2_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xc2, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_rci_grade_th_2_cfg_reg, + NULL, + NULL, + }, + { + "rci_grade_th_3_cfg", + ETM_CRDT_RCI_GRADE_TH_3_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xc3, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_rci_grade_th_3_cfg_reg, + NULL, + NULL, + }, + { + "rci_grade_th_4_cfg", + ETM_CRDT_RCI_GRADE_TH_4_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xc4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_rci_grade_th_4_cfg_reg, + NULL, + NULL, + }, + { + "rci_grade_th_5_cfg", + ETM_CRDT_RCI_GRADE_TH_5_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xc5, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_rci_grade_th_5_cfg_reg, + NULL, + NULL, + }, + { + "rci_grade_th_6_cfg", + ETM_CRDT_RCI_GRADE_TH_6_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xc6, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_rci_grade_th_6_cfg_reg, + NULL, + NULL, + }, + { + "flow_del_cmd", + ETM_CRDT_FLOW_DEL_CMDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x00f1, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_etm_crdt_flow_del_cmd_reg, + NULL, + NULL, + }, + { + "cnt_clr", + ETM_CRDT_CNT_CLRr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x00f2, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_cnt_clr_reg, + NULL, + NULL, + }, + { + "crdt_int_bus", + ETM_CRDT_CRDT_INT_BUSr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x00f3, + (32 / 8), + 0, + 0, + 0, + 0, + 19, + g_etm_crdt_crdt_int_bus_reg, + NULL, + NULL, + }, + { + "crdt_int_mask", + ETM_CRDT_CRDT_INT_MASKr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x00f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_crdt_int_mask_reg, + NULL, + NULL, + }, + { + "cfg_weight_together", + ETM_CRDT_CFG_WEIGHT_TOGETHERr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x00f9, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_cfg_weight_together_reg, + NULL, + NULL, + }, + { + "weight", + ETM_CRDT_WEIGHTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x00fa, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_crdt_weight_reg, + NULL, + NULL, + }, + { + "dev_sp_state", + ETM_CRDT_DEV_SP_STATEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x11e, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_dev_sp_state_reg, + NULL, + NULL, + }, + { + "dev_crs", + ETM_CRDT_DEV_CRSr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x11f, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_dev_crs_reg, + NULL, + NULL, + }, + { + "congest_token_disable_31_0", + ETM_CRDT_CONGEST_TOKEN_DISABLE_31_0r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x130, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_congest_token_disable_31_0_reg, + NULL, + NULL, + }, + { + "congest_token_disable_63_32", + ETM_CRDT_CONGEST_TOKEN_DISABLE_63_32r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x131, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_congest_token_disable_63_32_reg, + NULL, + NULL, + }, + { + "crdt_interval_en_cfg", + ETM_CRDT_CRDT_INTERVAL_EN_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0139, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_crdt_interval_en_cfg_reg, + NULL, + NULL, + }, + { + "q_token_staue_cfg", + ETM_CRDT_Q_TOKEN_STAUE_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x150, + (32 / 8), + 0, + 0xf + 1, + 0, + 1, + 1, + g_etm_crdt_q_token_staue_cfg_reg, + NULL, + NULL, + }, + { + "q_token_dist_cnt", + ETM_CRDT_Q_TOKEN_DIST_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x160, + (32 / 8), + 0, + 0xf + 1, + 0, + 1, + 1, + g_etm_crdt_q_token_dist_cnt_reg, + NULL, + NULL, + }, + { + "q_token_dec_cnt", + ETM_CRDT_Q_TOKEN_DEC_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x170, + (32 / 8), + 0, + 0xf + 1, + 0, + 1, + 1, + g_etm_crdt_q_token_dec_cnt_reg, + NULL, + NULL, + }, + { + "pp_weight_ram", + ETM_CRDT_PP_WEIGHT_RAMr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x41000000, + (32 / 8), + 0, + 0x003F + 1, + 0, + 1, + 1, + g_etm_crdt_pp_weight_ram_reg, + NULL, + NULL, + }, + { + "pp_cbs_shape_en_ram", + ETM_CRDT_PP_CBS_SHAPE_EN_RAMr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x42000000, + (32 / 8), + 0, + 0x003f + 1, + 0, + 1, + 2, + g_etm_crdt_pp_cbs_shape_en_ram_reg, + NULL, + NULL, + }, + { + "pp_next_pc_q_state_ram", + ETM_CRDT_PP_NEXT_PC_Q_STATE_RAMr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x43000000, + (32 / 8), + 0, + 0x003f + 1, + 0, + 1, + 3, + g_etm_crdt_pp_next_pc_q_state_ram_reg, + NULL, + NULL, + }, + { + "dev_interval", + ETM_CRDT_DEV_INTERVALr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x50000000, + (32 / 8), + 0, + 0x0127 + 1, + 0, + 1, + 1, + g_etm_crdt_dev_interval_reg, + NULL, + NULL, + }, + { + "dev_wfq_cnt", + ETM_CRDT_DEV_WFQ_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x51000000, + (32 / 8), + 0, + 0x0007 + 1, + 0, + 1, + 1, + g_etm_crdt_dev_wfq_cnt_reg, + NULL, + NULL, + }, + { + "dev_wfq_state", + ETM_CRDT_DEV_WFQ_STATEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x52000000, + (32 / 8), + 0, + 0x0007 + 1, + 0, + 1, + 1, + g_etm_crdt_dev_wfq_state_reg, + NULL, + NULL, + }, + { + "dev_active_head_ptr", + ETM_CRDT_DEV_ACTIVE_HEAD_PTRr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x53000000, + (32 / 8), + 0, + 0x0007 + 1, + 0, + 1, + 1, + g_etm_crdt_dev_active_head_ptr_reg, + NULL, + NULL, + }, + { + "dev_active_tail_ptr", + ETM_CRDT_DEV_ACTIVE_TAIL_PTRr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x54000000, + (32 / 8), + 0, + 0x0007 + 1, + 0, + 1, + 1, + g_etm_crdt_dev_active_tail_ptr_reg, + NULL, + NULL, + }, + { + "dev_unactive_head_ptr", + ETM_CRDT_DEV_UNACTIVE_HEAD_PTRr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x55000000, + (32 / 8), + 0, + 0x0037 + 1, + 0, + 1, + 1, + g_etm_crdt_dev_unactive_head_ptr_reg, + NULL, + NULL, + }, + { + "dev_unactive_tail_ptr", + ETM_CRDT_DEV_UNACTIVE_TAIL_PTRr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x56000000, + (32 / 8), + 0, + 0x0037 + 1, + 0, + 1, + 1, + g_etm_crdt_dev_unactive_tail_ptr_reg, + NULL, + NULL, + }, + { + "pp_weight", + ETM_CRDT_PP_WEIGHTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x44000000, + (32 / 8), + 0, + 0x003F + 1, + 0, + 1, + 1, + g_etm_crdt_pp_weight_reg, + NULL, + NULL, + }, + { + "pp_que_state", + ETM_CRDT_PP_QUE_STATEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x46000000, + (32 / 8), + 0, + 0x003f + 1, + 0, + 1, + 5, + g_etm_crdt_pp_que_state_reg, + NULL, + NULL, + }, + { + "pp_next_ptr", + ETM_CRDT_PP_NEXT_PTRr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x49000000, + (32 / 8), + 0, + 0x003f + 1, + 0, + 1, + 1, + g_etm_crdt_pp_next_ptr_reg, + NULL, + NULL, + }, + { + "pp_cfg", + ETM_CRDT_PP_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x4a000000, + (32 / 8), + 0, + 0x003f + 1, + 0, + 1, + 1, + g_etm_crdt_pp_cfg_reg, + NULL, + NULL, + }, + { + "pp_up_ptr", + ETM_CRDT_PP_UP_PTRr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x4b000000, + (32 / 8), + 0, + 0x003f + 1, + 0, + 1, + 1, + g_etm_crdt_pp_up_ptr_reg, + NULL, + NULL, + }, + { + "credit_drop_num", + ETM_CRDT_CREDIT_DROP_NUMr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x180, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_credit_drop_num_reg, + NULL, + NULL, + }, + { + "se_id_lv0", + ETM_CRDT_SE_ID_LV0r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x181, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_se_id_lv0_reg, + NULL, + NULL, + }, + { + "se_id_lv1", + ETM_CRDT_SE_ID_LV1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x182, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_se_id_lv1_reg, + NULL, + NULL, + }, + { + "se_id_lv2", + ETM_CRDT_SE_ID_LV2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x183, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_se_id_lv2_reg, + NULL, + NULL, + }, + { + "se_id_lv3", + ETM_CRDT_SE_ID_LV3r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x184, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_se_id_lv3_reg, + NULL, + NULL, + }, + { + "se_id_lv4", + ETM_CRDT_SE_ID_LV4r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x185, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_se_id_lv4_reg, + NULL, + NULL, + }, + { + "que_id", + ETM_CRDT_QUE_IDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x186, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_que_id_reg, + NULL, + NULL, + }, + { + "se_info_lv0", + ETM_CRDT_SE_INFO_LV0r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x187, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_etm_crdt_se_info_lv0_reg, + NULL, + NULL, + }, + { + "se_info_lv1", + ETM_CRDT_SE_INFO_LV1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x188, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_etm_crdt_se_info_lv1_reg, + NULL, + NULL, + }, + { + "se_info_lv2", + ETM_CRDT_SE_INFO_LV2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x189, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_etm_crdt_se_info_lv2_reg, + NULL, + NULL, + }, + { + "se_info_lv3", + ETM_CRDT_SE_INFO_LV3r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x18a, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_etm_crdt_se_info_lv3_reg, + NULL, + NULL, + }, + { + "se_info_lv4", + ETM_CRDT_SE_INFO_LV4r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x18b, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_etm_crdt_se_info_lv4_reg, + NULL, + NULL, + }, + { + "que_state", + ETM_CRDT_QUE_STATEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x18c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_que_state_reg, + NULL, + NULL, + }, + { + "eir_off_in_advance", + ETM_CRDT_EIR_OFF_IN_ADVANCEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x190, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_eir_off_in_advance_reg, + NULL, + NULL, + }, + { + "double_level_shap_prevent", + ETM_CRDT_DOUBLE_LEVEL_SHAP_PREVENTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x192, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_double_level_shap_prevent_reg, + NULL, + NULL, + }, + { + "add_store_cycle", + ETM_CRDT_ADD_STORE_CYCLEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x193, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_add_store_cycle_reg, + NULL, + NULL, + }, + { + "tflag2_wr_flag_sum", + ETM_CRDT_TFLAG2_WR_FLAG_SUMr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x194, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_tflag2_wr_flag_sum_reg, + NULL, + NULL, + }, + { + "flowque_para_tbl", + ETM_CRDT_FLOWQUE_PARA_TBLr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x00000000 + 0x200000, + (32 / 8), + 0, + 0x47FF + 1, + 0, + 1, + 3, + g_etm_crdt_flowque_para_tbl_reg, + NULL, + NULL, + }, + { + "se_para_tbl", + ETM_CRDT_SE_PARA_TBLr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x80000000 + 0x300000, + (64 / 8), + 0, + 0x63FF + 1, + 0, + 1, + 5, + g_etm_crdt_se_para_tbl_reg, + NULL, + NULL, + }, + { + "flowque_ins_tbl", + ETM_CRDT_FLOWQUE_INS_TBLr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x00000000 + 0x400000, + (32 / 8), + 0, + 0x47FF + 1, + 0, + 1, + 1, + g_etm_crdt_flowque_ins_tbl_reg, + NULL, + NULL, + }, + { + "se_ins_tbl", + ETM_CRDT_SE_INS_TBLr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x00000000 + 0x500000, + (32 / 8), + 0, + 0x63FF + 1, + 0, + 1, + 2, + g_etm_crdt_se_ins_tbl_reg, + NULL, + NULL, + }, + { + "eir_crs_filter_tbl", + ETM_CRDT_EIR_CRS_FILTER_TBLr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x00000000 + 0x600000, + (32 / 8), + 0, + 0x23FF + 1, + 0, + 1, + 1, + g_etm_crdt_eir_crs_filter_tbl_reg, + NULL, + NULL, + }, + { + "qcfg_qlist_cfg_done", + ETM_QMU_QCFG_QLIST_CFG_DONEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x10, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qcfg_qlist_cfg_done_reg, + NULL, + NULL, + }, + { + "qcfg_qsch_credit_value", + ETM_QMU_QCFG_QSCH_CREDIT_VALUEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x11, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qcfg_qsch_credit_value_reg, + NULL, + NULL, + }, + { + "qcfg_qsch_crbal_init_value", + ETM_QMU_QCFG_QSCH_CRBAL_INIT_VALUEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qcfg_qsch_crbal_init_value_reg, + NULL, + NULL, + }, + { + "qcfg_qsch_crbal_init_mask", + ETM_QMU_QCFG_QSCH_CRBAL_INIT_MASKr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x13, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qcfg_qsch_crbal_init_mask_reg, + NULL, + NULL, + }, + { + "cmdsch_rd_cmd_aful_th", + ETM_QMU_CMDSCH_RD_CMD_AFUL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x22, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cmdsch_rd_cmd_aful_th_reg, + NULL, + NULL, + }, + { + "cfg_port_fc_interval", + ETM_QMU_CFG_PORT_FC_INTERVALr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x23, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_port_fc_interval_reg, + NULL, + NULL, + }, + { + "qcfg_csch_aged_cfg", + ETM_QMU_QCFG_CSCH_AGED_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x24, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qcfg_csch_aged_cfg_reg, + NULL, + NULL, + }, + { + "qcfg_csch_aged_scan_time", + ETM_QMU_QCFG_CSCH_AGED_SCAN_TIMEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x25, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qcfg_csch_aged_scan_time_reg, + NULL, + NULL, + }, + { + "qcfg_qmu_qlist_state_query", + ETM_QMU_QCFG_QMU_QLIST_STATE_QUERYr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x26, + (32 / 8), + 0, + 0, + 0, + 0, + 19, + g_etm_qmu_qcfg_qmu_qlist_state_query_reg, + NULL, + NULL, + }, + { + "cfgmt_qsch_crbal_drop_en", + ETM_QMU_CFGMT_QSCH_CRBAL_DROP_ENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x27, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_qmu_cfgmt_qsch_crbal_drop_en_reg, + NULL, + NULL, + }, + { + "cfgmt_wlist_qnum_fifo_aful_th", + ETM_QMU_CFGMT_WLIST_QNUM_FIFO_AFUL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x29, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_wlist_qnum_fifo_aful_th_reg, + NULL, + NULL, + }, + { + "qcfg_csw_pkt_blk_mode", + ETM_QMU_QCFG_CSW_PKT_BLK_MODEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x28, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qcfg_csw_pkt_blk_mode_reg, + NULL, + NULL, + }, + { + "qcfg_qlist_ram_init_cancel", + ETM_QMU_QCFG_QLIST_RAM_INIT_CANCELr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2a, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qcfg_qlist_ram_init_cancel_reg, + NULL, + NULL, + }, + { + "qcfg_qsch_crbal_transfer_mode", + ETM_QMU_QCFG_QSCH_CRBAL_TRANSFER_MODEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2b, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_qmu_qcfg_qsch_crbal_transfer_mode_reg, + NULL, + NULL, + }, + { + "qcfg_qlist_qclr_interval", + ETM_QMU_QCFG_QLIST_QCLR_INTERVALr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qcfg_qlist_qclr_interval_reg, + NULL, + NULL, + }, + { + "qcfg_qsch_qclr_rate", + ETM_QMU_QCFG_QSCH_QCLR_RATEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2d, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qcfg_qsch_qclr_rate_reg, + NULL, + NULL, + }, + { + "qcfg_qlist_ddr_random", + ETM_QMU_QCFG_QLIST_DDR_RANDOMr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2f, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qcfg_qlist_ddr_random_reg, + NULL, + NULL, + }, + { + "cfgmt_qlist_pds_fifo_afull_th", + ETM_QMU_CFGMT_QLIST_PDS_FIFO_AFULL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x30, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_qlist_pds_fifo_afull_th_reg, + NULL, + NULL, + }, + { + "cfgmt_sop_cmd_fifo_afull_th", + ETM_QMU_CFGMT_SOP_CMD_FIFO_AFULL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x31, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_sop_cmd_fifo_afull_th_reg, + NULL, + NULL, + }, + { + "cfgmt_non_sop_cmd_fifo_afull_th", + ETM_QMU_CFGMT_NON_SOP_CMD_FIFO_AFULL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x32, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_non_sop_cmd_fifo_afull_th_reg, + NULL, + NULL, + }, + { + "cfgmt_mmu_data_fifo_afull_th", + ETM_QMU_CFGMT_MMU_DATA_FIFO_AFULL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x33, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_mmu_data_fifo_afull_th_reg, + NULL, + NULL, + }, + { + "qcfg_qlist_bank_ept_th", + ETM_QMU_QCFG_QLIST_BANK_EPT_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x34, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qcfg_qlist_bank_ept_th_reg, + NULL, + NULL, + }, + { + "random_bypass_en", + ETM_QMU_RANDOM_BYPASS_ENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x36, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_random_bypass_en_reg, + NULL, + NULL, + }, + { + "cfgmt_crs_spd_bypass", + ETM_QMU_CFGMT_CRS_SPD_BYPASSr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x37, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_crs_spd_bypass_reg, + NULL, + NULL, + }, + { + "cfgmt_crs_interval", + ETM_QMU_CFGMT_CRS_INTERVALr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x38, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_crs_interval_reg, + NULL, + NULL, + }, + { + "cfg_qsch_auto_credit_control_en", + ETM_QMU_CFG_QSCH_AUTO_CREDIT_CONTROL_ENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x3b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_auto_credit_control_en_reg, + NULL, + NULL, + }, + { + "cfg_qsch_autocrfrstque", + ETM_QMU_CFG_QSCH_AUTOCRFRSTQUEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x3c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_autocrfrstque_reg, + NULL, + NULL, + }, + { + "cfg_qsch_autocrlastque", + ETM_QMU_CFG_QSCH_AUTOCRLASTQUEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x3d, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_autocrlastque_reg, + NULL, + NULL, + }, + { + "cfg_qsch_autocreditrate", + ETM_QMU_CFG_QSCH_AUTOCREDITRATEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x3e, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_autocreditrate_reg, + NULL, + NULL, + }, + { + "cfg_qsch_scanfrstque", + ETM_QMU_CFG_QSCH_SCANFRSTQUEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x51, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_scanfrstque_reg, + NULL, + NULL, + }, + { + "cfg_qsch_scanlastque", + ETM_QMU_CFG_QSCH_SCANLASTQUEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x52, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_scanlastque_reg, + NULL, + NULL, + }, + { + "cfg_qsch_scanrate", + ETM_QMU_CFG_QSCH_SCANRATEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x53, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_scanrate_reg, + NULL, + NULL, + }, + { + "cfg_qsch_scan_en", + ETM_QMU_CFG_QSCH_SCAN_ENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x54, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_scan_en_reg, + NULL, + NULL, + }, + { + "cfgmt_qsch_rd_credit_fifo_rate", + ETM_QMU_CFGMT_QSCH_RD_CREDIT_FIFO_RATEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x7c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_qsch_rd_credit_fifo_rate_reg, + NULL, + NULL, + }, + { + "qcfg_qlist_bdep", + ETM_QMU_QCFG_QLIST_BDEPr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x9a7, + (32 / 8), + 0, + 0x3F + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_qlist_bdep_reg, + NULL, + NULL, + }, + { + "qcfg_qlist_bhead", + ETM_QMU_QCFG_QLIST_BHEADr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x9e7, + (32 / 8), + 0, + 0x3F + 1, + 0, + 1, + 2, + g_etm_qmu_qcfg_qlist_bhead_reg, + NULL, + NULL, + }, + { + "qcfg_qlist_btail", + ETM_QMU_QCFG_QLIST_BTAILr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0xa27, + (32 / 8), + 0, + 0x3F + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_qlist_btail_reg, + NULL, + NULL, + }, + { + "qcfg_qsch_shap_param", + ETM_QMU_QCFG_QSCH_SHAP_PARAMr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0xfc, + (32 / 8), + 0, + 0x3F + 1, + 0, + 1, + 3, + g_etm_qmu_qcfg_qsch_shap_param_reg, + NULL, + NULL, + }, + { + "qcfg_qsch_shap_token", + ETM_QMU_QCFG_QSCH_SHAP_TOKENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x185, + (32 / 8), + 0, + 0x3F + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_qsch_shap_token_reg, + NULL, + NULL, + }, + { + "qcfg_qsch_shap_offset", + ETM_QMU_QCFG_QSCH_SHAP_OFFSETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x20e, + (32 / 8), + 0, + 0x3F + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_qsch_shap_offset_reg, + NULL, + NULL, + }, + { + "qcfg_qsch_crs_eir_th", + ETM_QMU_QCFG_QSCH_CRS_EIR_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x250, + (32 / 8), + 0, + 0xF + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_qsch_crs_eir_th_reg, + NULL, + NULL, + }, + { + "qcfg_qsch_crs_th1", + ETM_QMU_QCFG_QSCH_CRS_TH1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x297, + (32 / 8), + 0, + 0xF + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_qsch_crs_th1_reg, + NULL, + NULL, + }, + { + "qcfg_qsch_crs_th2", + ETM_QMU_QCFG_QSCH_CRS_TH2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x2a7, + (32 / 8), + 0, + 0xF + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_qsch_crs_th2_reg, + NULL, + NULL, + }, + { + "qcfg_csch_congest_th", + ETM_QMU_QCFG_CSCH_CONGEST_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x2b7, + (32 / 8), + 0, + 0x3F + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_csch_congest_th_reg, + NULL, + NULL, + }, + { + "qcfg_csch_sp_fc_th", + ETM_QMU_QCFG_CSCH_SP_FC_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x330, + (32 / 8), + 0, + 0x140 + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_csch_sp_fc_th_reg, + NULL, + NULL, + }, + { + "qcfg_csw_shap_parameter", + ETM_QMU_QCFG_CSW_SHAP_PARAMETERr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x5d0, + (32 / 8), + 0, + 0x3F + 1, + 0, + 1, + 2, + g_etm_qmu_qcfg_csw_shap_parameter_reg, + NULL, + NULL, + }, + { + "cfgmt_rd_release_aful_th", + ETM_QMU_CFGMT_RD_RELEASE_AFUL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x58e, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_rd_release_aful_th_reg, + NULL, + NULL, + }, + { + "cfgmt_drop_imem_release_fifo_aful_th", + ETM_QMU_CFGMT_DROP_IMEM_RELEASE_FIFO_AFUL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x58f, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_drop_imem_release_fifo_aful_th_reg, + NULL, + NULL, + }, + { + "cfgmt_nnh_rd_buf_aful_th", + ETM_QMU_CFGMT_NNH_RD_BUF_AFUL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x590, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_nnh_rd_buf_aful_th_reg, + NULL, + NULL, + }, + { + "cfg_pid_use_inall", + ETM_QMU_CFG_PID_USE_INALLr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x591, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_pid_use_inall_reg, + NULL, + NULL, + }, + { + "cfg_pid_round_th", + ETM_QMU_CFG_PID_ROUND_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x592, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_pid_round_th_reg, + NULL, + NULL, + }, + { + "cfgmt_credit_fifo_afull_th", + ETM_QMU_CFGMT_CREDIT_FIFO_AFULL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x593, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_credit_fifo_afull_th_reg, + NULL, + NULL, + }, + { + "cfgmt_scan_fifo_afull_th", + ETM_QMU_CFGMT_SCAN_FIFO_AFULL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x594, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_scan_fifo_afull_th_reg, + NULL, + NULL, + }, + { + "cfgmt_small_fifo_aful_th", + ETM_QMU_CFGMT_SMALL_FIFO_AFUL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x595, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_small_fifo_aful_th_reg, + NULL, + NULL, + }, + { + "cfgmt_free_addr_fifo_aful_th", + ETM_QMU_CFGMT_FREE_ADDR_FIFO_AFUL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x596, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_free_addr_fifo_aful_th_reg, + NULL, + NULL, + }, + { + "cfgmt_enq_rpt_fifo_aful_th", + ETM_QMU_CFGMT_ENQ_RPT_FIFO_AFUL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x597, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_enq_rpt_fifo_aful_th_reg, + NULL, + NULL, + }, + { + "qcfg_csw_shap_token_depth", + ETM_QMU_QCFG_CSW_SHAP_TOKEN_DEPTHr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x656, + (32 / 8), + 0, + 0x3F + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_csw_shap_token_depth_reg, + NULL, + NULL, + }, + { + "qcfg_csw_shap_offset_value", + ETM_QMU_QCFG_CSW_SHAP_OFFSET_VALUEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x6cf, + (32 / 8), + 0, + 0x3F + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_csw_shap_offset_value_reg, + NULL, + NULL, + }, + { + "qcfg_csw_fc_offset_value", + ETM_QMU_QCFG_CSW_FC_OFFSET_VALUEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x750, + (32 / 8), + 0, + 0x3F + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_csw_fc_offset_value_reg, + NULL, + NULL, + }, + { + "qmu_init_done_state", + ETM_QMU_QMU_INIT_DONE_STATEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2000, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_etm_qmu_qmu_init_done_state_reg, + NULL, + NULL, + }, + { + "csw_qcfg_port_shap_rdy_0", + ETM_QMU_CSW_QCFG_PORT_SHAP_RDY_0r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2001, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_csw_qcfg_port_shap_rdy_0_reg, + NULL, + NULL, + }, + { + "csw_qcfg_port_shap_rdy_1", + ETM_QMU_CSW_QCFG_PORT_SHAP_RDY_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2002, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_csw_qcfg_port_shap_rdy_1_reg, + NULL, + NULL, + }, + { + "qlist_cfgmt_ram_init_done", + ETM_QMU_QLIST_CFGMT_RAM_INIT_DONEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2015, + (32 / 8), + 0, + 0, + 0, + 0, + 8, + g_etm_qmu_qlist_cfgmt_ram_init_done_reg, + NULL, + NULL, + }, + { + "qlist_cfgmt_ram_ecc_err", + ETM_QMU_QLIST_CFGMT_RAM_ECC_ERRr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2016, + (32 / 8), + 0, + 0, + 0, + 0, + 30, + g_etm_qmu_qlist_cfgmt_ram_ecc_err_reg, + NULL, + NULL, + }, + { + "qlist_cfgmt_ram_slot_err", + ETM_QMU_QLIST_CFGMT_RAM_SLOT_ERRr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2017, + (32 / 8), + 0, + 0, + 0, + 0, + 20, + g_etm_qmu_qlist_cfgmt_ram_slot_err_reg, + NULL, + NULL, + }, + { + "qsch_cfgmt_ram_ecc", + ETM_QMU_QSCH_CFGMT_RAM_ECCr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2018, + (32 / 8), + 0, + 0, + 0, + 0, + 27, + g_etm_qmu_qsch_cfgmt_ram_ecc_reg, + NULL, + NULL, + }, + { + "qlist_cfgmt_fifo_state", + ETM_QMU_QLIST_CFGMT_FIFO_STATEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2019, + (32 / 8), + 0, + 0, + 0, + 0, + 26, + g_etm_qmu_qlist_cfgmt_fifo_state_reg, + NULL, + NULL, + }, + { + "qlist_qcfg_clr_done", + ETM_QMU_QLIST_QCFG_CLR_DONEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x201a, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qlist_qcfg_clr_done_reg, + NULL, + NULL, + }, + { + "qmu_int_mask1", + ETM_QMU_QMU_INT_MASK1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x201b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qmu_int_mask1_reg, + NULL, + NULL, + }, + { + "qmu_int_mask2", + ETM_QMU_QMU_INT_MASK2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x201c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qmu_int_mask2_reg, + NULL, + NULL, + }, + { + "qmu_int_mask3", + ETM_QMU_QMU_INT_MASK3r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x201d, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qmu_int_mask3_reg, + NULL, + NULL, + }, + { + "qmu_int_mask4", + ETM_QMU_QMU_INT_MASK4r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x201e, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qmu_int_mask4_reg, + NULL, + NULL, + }, + { + "qmu_int_mask5", + ETM_QMU_QMU_INT_MASK5r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x201f, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qmu_int_mask5_reg, + NULL, + NULL, + }, + { + "qmu_int_mask6", + ETM_QMU_QMU_INT_MASK6r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2013, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qmu_int_mask6_reg, + NULL, + NULL, + }, + { + "cmd_sch_cfgmt_fifo_state", + ETM_QMU_CMD_SCH_CFGMT_FIFO_STATEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2020, + (32 / 8), + 0, + 0, + 0, + 0, + 26, + g_etm_qmu_cmd_sch_cfgmt_fifo_state_reg, + NULL, + NULL, + }, + { + "qlist_r_bcnt", + ETM_QMU_QLIST_R_BCNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x60000, + (32 / 8), + 0, + 0x23ff + 1, + 0, + 1, + 1, + g_etm_qmu_qlist_r_bcnt_reg, + NULL, + NULL, + }, + { + "qsch_rw_crbal", + ETM_QMU_QSCH_RW_CRBALr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x70000, + (32 / 8), + 0, + 0x23ff + 1, + 0, + 1, + 1, + g_etm_qmu_qsch_rw_crbal_reg, + NULL, + NULL, + }, + { + "qsch_rw_crs", + ETM_QMU_QSCH_RW_CRSr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x80000, + (32 / 8), + 0, + 0x23ff + 1, + 0, + 1, + 1, + g_etm_qmu_qsch_rw_crs_reg, + NULL, + NULL, + }, + { + "qsch_r_wlist_empty", + ETM_QMU_QSCH_R_WLIST_EMPTYr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0xa0000, + (32 / 8), + 0, + 0xfff + 1, + 0, + 1, + 1, + g_etm_qmu_qsch_r_wlist_empty_reg, + NULL, + NULL, + }, + { + "qcfg_qlist_baram_rd", + ETM_QMU_QCFG_QLIST_BARAM_RDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0xe0000, + (32 / 8), + 0, + 0x7ff + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_qlist_baram_rd_reg, + NULL, + NULL, + }, + { + "qcfg_qsch_crbal_fb_rw", + ETM_QMU_QCFG_QSCH_CRBAL_FB_RWr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0xb0000, + (32 / 8), + 0, + 0x23ff + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_qsch_crbal_fb_rw_reg, + NULL, + NULL, + }, + { + "qcfg_qlist_grp0_bank", + ETM_QMU_QCFG_QLIST_GRP0_BANKr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x7c1, + (32 / 8), + 0, + 0x3f + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_qlist_grp0_bank_reg, + NULL, + NULL, + }, + { + "qcfg_qlist_grp1_bank", + ETM_QMU_QCFG_QLIST_GRP1_BANKr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x801, + (32 / 8), + 0, + 0x3f + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_qlist_grp1_bank_reg, + NULL, + NULL, + }, + { + "qcfg_qlist_grp2_bank", + ETM_QMU_QCFG_QLIST_GRP2_BANKr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x841, + (32 / 8), + 0, + 0x3f + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_qlist_grp2_bank_reg, + NULL, + NULL, + }, + { + "qcfg_qlist_grp3_bank", + ETM_QMU_QCFG_QLIST_GRP3_BANKr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x881, + (32 / 8), + 0, + 0x3f + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_qlist_grp3_bank_reg, + NULL, + NULL, + }, + { + "qcfg_qlist_grp4_bank", + ETM_QMU_QCFG_QLIST_GRP4_BANKr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x8c1, + (32 / 8), + 0, + 0x3f + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_qlist_grp4_bank_reg, + NULL, + NULL, + }, + { + "qcfg_qlist_grp5_bank", + ETM_QMU_QCFG_QLIST_GRP5_BANKr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x967, + (32 / 8), + 0, + 0x3f + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_qlist_grp5_bank_reg, + NULL, + NULL, + }, + { + "qcfg_qlist_grp6_bank", + ETM_QMU_QCFG_QLIST_GRP6_BANKr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0xc00, + (32 / 8), + 0, + 0x3f + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_qlist_grp6_bank_reg, + NULL, + NULL, + }, + { + "qcfg_qlist_grp7_bank", + ETM_QMU_QCFG_QLIST_GRP7_BANKr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0xc40, + (32 / 8), + 0, + 0x3f + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_qlist_grp7_bank_reg, + NULL, + NULL, + }, + { + "qcfg_qlist_grp", + ETM_QMU_QCFG_QLIST_GRPr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x901, + (32 / 8), + 0, + 0x3f + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_qlist_grp_reg, + NULL, + NULL, + }, + { + "cfgmt_active_to_bank_cfg", + ETM_QMU_CFGMT_ACTIVE_TO_BANK_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x941, + (32 / 8), + 0, + 0xf + 1, + 0, + 1, + 1, + g_etm_qmu_cfgmt_active_to_bank_cfg_reg, + NULL, + NULL, + }, + { + "cfgmt_ddr_in_mmu_cfg", + ETM_QMU_CFGMT_DDR_IN_MMU_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x951, + (32 / 8), + 0, + 0x7 + 1, + 0, + 1, + 1, + g_etm_qmu_cfgmt_ddr_in_mmu_cfg_reg, + NULL, + NULL, + }, + { + "cfgmt_ddr_in_qmu_cfg", + ETM_QMU_CFGMT_DDR_IN_QMU_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x959, + (32 / 8), + 0, + 0x9 + 1, + 0, + 1, + 1, + g_etm_qmu_cfgmt_ddr_in_qmu_cfg_reg, + NULL, + NULL, + }, + { + "cfgmt_bank_to_mmu_cfg", + ETM_QMU_CFGMT_BANK_TO_MMU_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0xb00, + (32 / 8), + 0, + 0x3f + 1, + 0, + 1, + 1, + g_etm_qmu_cfgmt_bank_to_mmu_cfg_reg, + NULL, + NULL, + }, + { + "cfgmt_bank_to_qmu_cfg", + ETM_QMU_CFGMT_BANK_TO_QMU_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0xb40, + (32 / 8), + 0, + 0x4f + 1, + 0, + 1, + 1, + g_etm_qmu_cfgmt_bank_to_qmu_cfg_reg, + NULL, + NULL, + }, + { + "cfgmt_grp_ram_n_clr_thd", + ETM_QMU_CFGMT_GRP_RAM_N_CLR_THDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xe0a, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_grp_ram_n_clr_thd_reg, + NULL, + NULL, + }, + { + "cfgmt_age_pkt_num", + ETM_QMU_CFGMT_AGE_PKT_NUMr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xe00, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_age_pkt_num_reg, + NULL, + NULL, + }, + { + "cfgmt_age_multi_interval", + ETM_QMU_CFGMT_AGE_MULTI_INTERVALr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xe01, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_age_multi_interval_reg, + NULL, + NULL, + }, + { + "cfgmt_qmu_pkt_age_en", + ETM_QMU_CFGMT_QMU_PKT_AGE_ENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xe02, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_qmu_pkt_age_en_reg, + NULL, + NULL, + }, + { + "cfgmt_qmu_pkt_age_interval", + ETM_QMU_CFGMT_QMU_PKT_AGE_INTERVALr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xe03, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_qmu_pkt_age_interval_reg, + NULL, + NULL, + }, + { + "cfgmt_qmu_pkt_age_start_end", + ETM_QMU_CFGMT_QMU_PKT_AGE_START_ENDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xe04, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_qmu_cfgmt_qmu_pkt_age_start_end_reg, + NULL, + NULL, + }, + { + "cfgmt_pkt_age_req_aful_th", + ETM_QMU_CFGMT_PKT_AGE_REQ_AFUL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xe05, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_pkt_age_req_aful_th_reg, + NULL, + NULL, + }, + { + "cfgmt_pkt_age_step_interval", + ETM_QMU_CFGMT_PKT_AGE_STEP_INTERVALr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xe12, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_pkt_age_step_interval_reg, + NULL, + NULL, + }, + { + "cfgmt_qmu_imem_age_mode", + ETM_QMU_CFGMT_QMU_IMEM_AGE_MODEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xe06, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_etm_qmu_cfgmt_qmu_imem_age_mode_reg, + NULL, + NULL, + }, + { + "cfgmt_qmu_imem_qlen_age_interval", + ETM_QMU_CFGMT_QMU_IMEM_QLEN_AGE_INTERVALr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xe07, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_qmu_imem_qlen_age_interval_reg, + NULL, + NULL, + }, + { + "cfgmt_qmu_imem_time_age_interval", + ETM_QMU_CFGMT_QMU_IMEM_TIME_AGE_INTERVALr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xe08, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_qmu_imem_time_age_interval_reg, + NULL, + NULL, + }, + { + "cfgmt_qmu_imem_qlen_age_thd", + ETM_QMU_CFGMT_QMU_IMEM_QLEN_AGE_THDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xe09, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_qmu_imem_qlen_age_thd_reg, + NULL, + NULL, + }, + { + "cfgmt_imem_age_step_interval", + ETM_QMU_CFGMT_IMEM_AGE_STEP_INTERVALr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xe13, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_imem_age_step_interval_reg, + NULL, + NULL, + }, + { + "cfgmt_qmu_ecc_bypass_read", + ETM_QMU_CFGMT_QMU_ECC_BYPASS_READr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xe0b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_qmu_ecc_bypass_read_reg, + NULL, + NULL, + }, + { + "cfgmt_qmu_resp_stat_fc_en", + ETM_QMU_CFGMT_QMU_RESP_STAT_FC_ENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xe0c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_qmu_resp_stat_fc_en_reg, + NULL, + NULL, + }, + { + "cfgmt_qmu_bank_xoff_pds_mode", + ETM_QMU_CFGMT_QMU_BANK_XOFF_PDS_MODEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xe10, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_qmu_bank_xoff_pds_mode_reg, + NULL, + NULL, + }, + { + "cfgmt_qmu_stat_offset", + ETM_QMU_CFGMT_QMU_STAT_OFFSETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xe11, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_qmu_stat_offset_reg, + NULL, + NULL, + }, + { + "fc_cnt_mode", + ETM_QMU_FC_CNT_MODEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2800, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_fc_cnt_mode_reg, + NULL, + NULL, + }, + { + "mmu_qmu_wr_fc_cnt", + ETM_QMU_MMU_QMU_WR_FC_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2801, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_mmu_qmu_wr_fc_cnt_reg, + NULL, + NULL, + }, + { + "mmu_qmu_rd_fc_cnt", + ETM_QMU_MMU_QMU_RD_FC_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2802, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_mmu_qmu_rd_fc_cnt_reg, + NULL, + NULL, + }, + { + "qmu_cgavd_fc_cnt", + ETM_QMU_QMU_CGAVD_FC_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2803, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qmu_cgavd_fc_cnt_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_pkt_cnt", + ETM_QMU_CGAVD_QMU_PKT_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2804, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cgavd_qmu_pkt_cnt_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_pktlen_all", + ETM_QMU_CGAVD_QMU_PKTLEN_ALLr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2805, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cgavd_qmu_pktlen_all_reg, + NULL, + NULL, + }, + { + "observe_portfc_spec", + ETM_QMU_OBSERVE_PORTFC_SPECr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2818, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_observe_portfc_spec_reg, + NULL, + NULL, + }, + { + "spec_lif_portfc_count", + ETM_QMU_SPEC_LIF_PORTFC_COUNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2819, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_spec_lif_portfc_count_reg, + NULL, + NULL, + }, + { + "cfgmt_qmu_pfc_en", + ETM_QMU_CFGMT_QMU_PFC_ENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_qmu_pfc_en_reg, + NULL, + NULL, + }, + { + "cfgmt_qmu_pfc_mask_1", + ETM_QMU_CFGMT_QMU_PFC_MASK_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20e1, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_qmu_pfc_mask_1_reg, + NULL, + NULL, + }, + { + "cfgmt_qmu_pfc_mask_2", + ETM_QMU_CFGMT_QMU_PFC_MASK_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20e2, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_qmu_pfc_mask_2_reg, + NULL, + NULL, + }, + { + "int_repeat", + CFG_PCIE_INT_REPEATr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x0dc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_int_repeat_reg, + NULL, + NULL, + }, + { + "dma_up_size", + CFG_DMA_DMA_UP_SIZEr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x020, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_up_size_reg, + NULL, + NULL, + }, + { + "soc_wr_time_out_thresh", + CFG_CSR_SOC_WR_TIME_OUT_THRESHr, + CFG, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x028, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_soc_wr_time_out_thresh_reg, + NULL, + NULL, + }, + { + "cfg_shap_param", + NPPU_MR_CFG_CFG_SHAP_PARAMr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0024, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_nppu_mr_cfg_cfg_shap_param_reg, + NULL, + NULL, + }, + { + "cfg_shap_token", + NPPU_MR_CFG_CFG_SHAP_TOKENr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0028, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_nppu_mr_cfg_cfg_shap_token_reg, + NULL, + NULL, + }, + { + "idle_ptr_fifo_aful_th", + NPPU_MR_CFG_IDLE_PTR_FIFO_AFUL_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x003c, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_nppu_mr_cfg_idle_ptr_fifo_aful_th_reg, + NULL, + NULL, + }, + { + "mr_cos_port_cfg", + NPPU_MR_CFG_MR_COS_PORT_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0104, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_nppu_mr_cfg_mr_cos_port_cfg_reg, + NULL, + NULL, + }, + { + "ind_status", + NPPU_PKTRX_CFG_IND_STATUSr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_ind_status_reg, + NULL, + NULL, + }, + { + "ind_cmd", + NPPU_PKTRX_CFG_IND_CMDr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0004, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_nppu_pktrx_cfg_ind_cmd_reg, + NULL, + NULL, + }, + { + "ind_data0", + NPPU_PKTRX_CFG_IND_DATA0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_ind_data0_reg, + NULL, + NULL, + }, + { + "ind_data1", + NPPU_PKTRX_CFG_IND_DATA1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x000c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_ind_data1_reg, + NULL, + NULL, + }, + { + "ind_data2", + NPPU_PKTRX_CFG_IND_DATA2r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0010, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_ind_data2_reg, + NULL, + NULL, + }, + { + "ind_data3", + NPPU_PKTRX_CFG_IND_DATA3r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0014, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_ind_data3_reg, + NULL, + NULL, + }, + { + "ind_data4", + NPPU_PKTRX_CFG_IND_DATA4r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0018, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_ind_data4_reg, + NULL, + NULL, + }, + { + "ind_data5", + NPPU_PKTRX_CFG_IND_DATA5r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x001c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_ind_data5_reg, + NULL, + NULL, + }, + { + "ind_data6", + NPPU_PKTRX_CFG_IND_DATA6r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0020, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_ind_data6_reg, + NULL, + NULL, + }, + { + "ind_data7", + NPPU_PKTRX_CFG_IND_DATA7r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0024, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_ind_data7_reg, + NULL, + NULL, + }, + { + "tcam_0_cmd", + NPPU_PKTRX_CFG_TCAM_0_CMDr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0028, + (32 / 8), + 0, + 0, + 0, + 0, + 11, + g_nppu_pktrx_cfg_tcam_0_cmd_reg, + NULL, + NULL, + }, + { + "tcam_1_cmd", + NPPU_PKTRX_CFG_TCAM_1_CMDr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x002c, + (32 / 8), + 0, + 0, + 0, + 0, + 8, + g_nppu_pktrx_cfg_tcam_1_cmd_reg, + NULL, + NULL, + }, + { + "port_en_0", + NPPU_PKTRX_CFG_PORT_EN_0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0030, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_port_en_0_reg, + NULL, + NULL, + }, + { + "port_en_1", + NPPU_PKTRX_CFG_PORT_EN_1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0034, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_port_en_1_reg, + NULL, + NULL, + }, + { + "port_en_2", + NPPU_PKTRX_CFG_PORT_EN_2r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0038, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_port_en_2_reg, + NULL, + NULL, + }, + { + "port_en_3", + NPPU_PKTRX_CFG_PORT_EN_3r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x003c, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_nppu_pktrx_cfg_port_en_3_reg, + NULL, + NULL, + }, + { + "cfg_port_l2_offset_mode_0", + NPPU_PKTRX_CFG_CFG_PORT_L2_OFFSET_MODE_0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0040, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_cfg_port_l2_offset_mode_0_reg, + NULL, + NULL, + }, + { + "cfg_port_l2_offset_mode_1", + NPPU_PKTRX_CFG_CFG_PORT_L2_OFFSET_MODE_1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0044, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_cfg_port_l2_offset_mode_1_reg, + NULL, + NULL, + }, + { + "cfg_port_l2_offset_mode_2", + NPPU_PKTRX_CFG_CFG_PORT_L2_OFFSET_MODE_2r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0048, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_cfg_port_l2_offset_mode_2_reg, + NULL, + NULL, + }, + { + "cfg_port_l2_offset_mode_3", + NPPU_PKTRX_CFG_CFG_PORT_L2_OFFSET_MODE_3r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x004c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_cfg_port_l2_offset_mode_3_reg, + NULL, + NULL, + }, + { + "port_fc_mode_0", + NPPU_PKTRX_CFG_PORT_FC_MODE_0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0050, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_port_fc_mode_0_reg, + NULL, + NULL, + }, + { + "port_fc_mode_1", + NPPU_PKTRX_CFG_PORT_FC_MODE_1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0054, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_port_fc_mode_1_reg, + NULL, + NULL, + }, + { + "port_fc_mode_2", + NPPU_PKTRX_CFG_PORT_FC_MODE_2r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0058, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_port_fc_mode_2_reg, + NULL, + NULL, + }, + { + "port_fc_mode_3", + NPPU_PKTRX_CFG_PORT_FC_MODE_3r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x005c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_port_fc_mode_3_reg, + NULL, + NULL, + }, + { + "port_fc_mode_4", + NPPU_PKTRX_CFG_PORT_FC_MODE_4r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0060, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_port_fc_mode_4_reg, + NULL, + NULL, + }, + { + "port_fc_mode_5", + NPPU_PKTRX_CFG_PORT_FC_MODE_5r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0064, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_port_fc_mode_5_reg, + NULL, + NULL, + }, + { + "port_fc_mode_6", + NPPU_PKTRX_CFG_PORT_FC_MODE_6r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0068, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_port_fc_mode_6_reg, + NULL, + NULL, + }, + { + "port_fc_mode_7", + NPPU_PKTRX_CFG_PORT_FC_MODE_7r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x006c, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_nppu_pktrx_cfg_port_fc_mode_7_reg, + NULL, + NULL, + }, + { + "cfg_isch_aging_th", + NPPU_PKTRX_CFG_CFG_ISCH_AGING_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0070, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_nppu_pktrx_cfg_cfg_isch_aging_th_reg, + NULL, + NULL, + }, + { + "isch_fifo_th_0", + NPPU_PKTRX_CFG_ISCH_FIFO_TH_0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0074, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_nppu_pktrx_cfg_isch_fifo_th_0_reg, + NULL, + NULL, + }, + { + "isch_cfg_1", + NPPU_PKTRX_CFG_ISCH_CFG_1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x008c, + (32 / 8), + 0, + 0, + 0, + 0, + 5, + g_nppu_pktrx_cfg_isch_cfg_1_reg, + NULL, + NULL, + }, + { + "tcam_0_vld", + NPPU_PKTRX_CFG_TCAM_0_VLDr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x00d4, + (32 / 8), + 0, + 7 + 1, + 0, + 4, + 1, + g_nppu_pktrx_cfg_tcam_0_vld_reg, + NULL, + NULL, + }, + { + "tcam_1_vld", + NPPU_PKTRX_CFG_TCAM_1_VLDr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x00f4, + (32 / 8), + 0, + 7 + 1, + 0, + 4, + 1, + g_nppu_pktrx_cfg_tcam_1_vld_reg, + NULL, + NULL, + }, + { + "cpu_port_en_mask", + NPPU_PKTRX_CFG_CPU_PORT_EN_MASKr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x01f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_cpu_port_en_mask_reg, + NULL, + NULL, + }, + { + "pktrx_glbal_cfg_0", + NPPU_PKTRX_CFG_PKTRX_GLBAL_CFG_0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x01f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_pktrx_glbal_cfg_0_reg, + NULL, + NULL, + }, + { + "pktrx_glbal_cfg_1", + NPPU_PKTRX_CFG_PKTRX_GLBAL_CFG_1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x01fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_pktrx_glbal_cfg_1_reg, + NULL, + NULL, + }, + { + "pktrx_glbal_cfg_2", + NPPU_PKTRX_CFG_PKTRX_GLBAL_CFG_2r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0200, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_pktrx_glbal_cfg_2_reg, + NULL, + NULL, + }, + { + "pktrx_glbal_cfg_3", + NPPU_PKTRX_CFG_PKTRX_GLBAL_CFG_3r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0204, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_pktrx_glbal_cfg_3_reg, + NULL, + NULL, + }, + { + "nppu_start", + NPPU_PKTRX_CFG_NPPU_STARTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0208, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_nppu_start_reg, + NULL, + NULL, + }, + { + "ind_status", + NPPU_PKTRX_STAT_IND_STATUSr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_STAT_BASE_ADDR + 0x0000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_stat_ind_status_reg, + NULL, + NULL, + }, + { + "ind_cmd", + NPPU_PKTRX_STAT_IND_CMDr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_STAT_BASE_ADDR + 0x0004, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_nppu_pktrx_stat_ind_cmd_reg, + NULL, + NULL, + }, + { + "ind_data0", + NPPU_PKTRX_STAT_IND_DATA0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_STAT_BASE_ADDR + 0x0008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_stat_ind_data0_reg, + NULL, + NULL, + }, + { + "debug_cnt_ovfl_mode", + NPPU_IDMA_CFG_DEBUG_CNT_OVFL_MODEr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_IDMA_CFG_BASE_ADDR + 0x4f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_idma_cfg_debug_cnt_ovfl_mode_reg, + NULL, + NULL, + }, + { + "ind_status", + NPPU_IDMA_STAT_IND_STATUSr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_IDMA_STAT_BASE_ADDR + 0x0400, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_idma_stat_ind_status_reg, + NULL, + NULL, + }, + { + "ind_cmd", + NPPU_IDMA_STAT_IND_CMDr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_IDMA_STAT_BASE_ADDR + 0x0404, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_nppu_idma_stat_ind_cmd_reg, + NULL, + NULL, + }, + { + "ind_data0", + NPPU_IDMA_STAT_IND_DATA0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_IDMA_STAT_BASE_ADDR + 0x0408, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_idma_stat_ind_data0_reg, + NULL, + NULL, + }, + { + "ind_status", + NPPU_PBU_CFG_IND_STATUSr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x400, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_ind_status_reg, + NULL, + NULL, + }, + { + "ind_cmd", + NPPU_PBU_CFG_IND_CMDr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x404, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_nppu_pbu_cfg_ind_cmd_reg, + NULL, + NULL, + }, + { + "ind_data0", + NPPU_PBU_CFG_IND_DATA0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x408, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_ind_data0_reg, + NULL, + NULL, + }, + { + "ind_data1", + NPPU_PBU_CFG_IND_DATA1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x40c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_ind_data1_reg, + NULL, + NULL, + }, + { + "ind_data2", + NPPU_PBU_CFG_IND_DATA2r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x410, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_ind_data2_reg, + NULL, + NULL, + }, + { + "ind_data3", + NPPU_PBU_CFG_IND_DATA3r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x414, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_ind_data3_reg, + NULL, + NULL, + }, + { + "ind_data4", + NPPU_PBU_CFG_IND_DATA4r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x418, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_ind_data4_reg, + NULL, + NULL, + }, + { + "ind_data5", + NPPU_PBU_CFG_IND_DATA5r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x41c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_ind_data5_reg, + NULL, + NULL, + }, + { + "ind_data6", + NPPU_PBU_CFG_IND_DATA6r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x420, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_ind_data6_reg, + NULL, + NULL, + }, + { + "ind_data7", + NPPU_PBU_CFG_IND_DATA7r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x424, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_ind_data7_reg, + NULL, + NULL, + }, + { + "idma_public_th", + NPPU_PBU_CFG_IDMA_PUBLIC_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x4c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_idma_public_th_reg, + NULL, + NULL, + }, + { + "lif_public_th", + NPPU_PBU_CFG_LIF_PUBLIC_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x4cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_lif_public_th_reg, + NULL, + NULL, + }, + { + "idma_total_th", + NPPU_PBU_CFG_IDMA_TOTAL_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x4d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_idma_total_th_reg, + NULL, + NULL, + }, + { + "lif_total_th", + NPPU_PBU_CFG_LIF_TOTAL_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x4d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_lif_total_th_reg, + NULL, + NULL, + }, + { + "mc_total_th", + NPPU_PBU_CFG_MC_TOTAL_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x4d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_mc_total_th_reg, + NULL, + NULL, + }, + { + "mc_cos10_th", + NPPU_PBU_CFG_MC_COS10_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x4dc, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_nppu_pbu_cfg_mc_cos10_th_reg, + NULL, + NULL, + }, + { + "mc_cos32_th", + NPPU_PBU_CFG_MC_COS32_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x4e0, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_nppu_pbu_cfg_mc_cos32_th_reg, + NULL, + NULL, + }, + { + "mc_cos54_th", + NPPU_PBU_CFG_MC_COS54_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x4e4, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_nppu_pbu_cfg_mc_cos54_th_reg, + NULL, + NULL, + }, + { + "mc_cos76_th", + NPPU_PBU_CFG_MC_COS76_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x4e8, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_nppu_pbu_cfg_mc_cos76_th_reg, + NULL, + NULL, + }, + { + "debug_cnt_ovfl_mode", + NPPU_PBU_CFG_DEBUG_CNT_OVFL_MODEr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x4f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_debug_cnt_ovfl_mode_reg, + NULL, + NULL, + }, + { + "se_key_aful_negate_cfg", + NPPU_PBU_CFG_SE_KEY_AFUL_NEGATE_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x530, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_se_key_aful_negate_cfg_reg, + NULL, + NULL, + }, + { + "sa_flag", + NPPU_PBU_CFG_SA_FLAGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x724, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_sa_flag_reg, + NULL, + NULL, + }, + { + "ind_data", + NPPU_PBU_STAT_IND_DATAr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_STAT_BASE_ADDR + 0x0000, + (32 / 8), + 0, + 127 + 1, + 0, + 4, + 1, + g_nppu_pbu_stat_ind_data_reg, + NULL, + NULL, + }, + { + "ind_status", + NPPU_PBU_STAT_IND_STATUSr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_STAT_BASE_ADDR + 0x0400, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_ind_status_reg, + NULL, + NULL, + }, + { + "ind_cmd", + NPPU_PBU_STAT_IND_CMDr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_STAT_BASE_ADDR + 0x0404, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_nppu_pbu_stat_ind_cmd_reg, + NULL, + NULL, + }, + { + "total_cnt", + NPPU_PBU_STAT_TOTAL_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_STAT_BASE_ADDR + 0x558, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_total_cnt_reg, + NULL, + NULL, + }, + { + "idma_pub_cnt", + NPPU_PBU_STAT_IDMA_PUB_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_STAT_BASE_ADDR + 0x55c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_idma_pub_cnt_reg, + NULL, + NULL, + }, + { + "lif_pub_cnt", + NPPU_PBU_STAT_LIF_PUB_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_STAT_BASE_ADDR + 0x560, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_lif_pub_cnt_reg, + NULL, + NULL, + }, + { + "mc_total_cnt", + NPPU_PBU_STAT_MC_TOTAL_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_STAT_BASE_ADDR + 0x564, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_mc_total_cnt_reg, + NULL, + NULL, + }, + { + "pbu_thram_init_done", + NPPU_PBU_STAT_PBU_THRAM_INIT_DONEr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_STAT_BASE_ADDR + 0x568, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_pbu_thram_init_done_reg, + NULL, + NULL, + }, + { + "ifb_fptr_init_done", + NPPU_PBU_STAT_IFB_FPTR_INIT_DONEr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_STAT_BASE_ADDR + 0x56c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_ifb_fptr_init_done_reg, + NULL, + NULL, + }, + { + "weight_normal_uc", + NPPU_ISU_CFG_WEIGHT_NORMAL_UCr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_CFG_BASE_ADDR + 0x0000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_cfg_weight_normal_uc_reg, + NULL, + NULL, + }, + { + "fabric_or_saip", + NPPU_ISU_CFG_FABRIC_OR_SAIPr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_CFG_BASE_ADDR + 0x0028, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_cfg_fabric_or_saip_reg, + NULL, + NULL, + }, + { + "ind_status", + NPPU_ISU_STAT_IND_STATUSr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_STAT_BASE_ADDR + 0x0000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_stat_ind_status_reg, + NULL, + NULL, + }, + { + "ind_cmd", + NPPU_ISU_STAT_IND_CMDr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_STAT_BASE_ADDR + 0x0004, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_nppu_isu_stat_ind_cmd_reg, + NULL, + NULL, + }, + { + "ind_dat0", + NPPU_ISU_STAT_IND_DAT0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_STAT_BASE_ADDR + 0x0008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_stat_ind_dat0_reg, + NULL, + NULL, + }, + { + "ind_access_done", + NPPU_ODMA_CFG_IND_ACCESS_DONEr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_ind_access_done_reg, + NULL, + NULL, + }, + { + "ind_command", + NPPU_ODMA_CFG_IND_COMMANDr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0004, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_nppu_odma_cfg_ind_command_reg, + NULL, + NULL, + }, + { + "ind_dat0", + NPPU_ODMA_CFG_IND_DAT0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_ind_dat0_reg, + NULL, + NULL, + }, + { + "ind_dat1", + NPPU_ODMA_CFG_IND_DAT1r, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x000c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_ind_dat1_reg, + NULL, + NULL, + }, + { + "fabric_or_saip", + NPPU_ODMA_CFG_FABRIC_OR_SAIPr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0010, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_fabric_or_saip_reg, + NULL, + NULL, + }, + { + "max_pkt_len", + NPPU_ODMA_CFG_MAX_PKT_LENr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x001c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_max_pkt_len_reg, + NULL, + NULL, + }, + { + "age_en", + NPPU_ODMA_CFG_AGE_ENr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0054, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_age_en_reg, + NULL, + NULL, + }, + { + "age_mode", + NPPU_ODMA_CFG_AGE_MODEr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0058, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_age_mode_reg, + NULL, + NULL, + }, + { + "age_value_time", + NPPU_ODMA_CFG_AGE_VALUE_TIMEr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x005c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_age_value_time_reg, + NULL, + NULL, + }, + { + "age_value_room", + NPPU_ODMA_CFG_AGE_VALUE_ROOMr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0060, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_age_value_room_reg, + NULL, + NULL, + }, + { + "age_out_cnt", + NPPU_ODMA_CFG_AGE_OUT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0064, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_age_out_cnt_reg, + NULL, + NULL, + }, + { + "token_value_a", + NPPU_ODMA_CFG_TOKEN_VALUE_Ar, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0114, + (32 / 8), + 0, + 59 + 1, + 0, + 4, + 1, + g_nppu_odma_cfg_token_value_a_reg, + NULL, + NULL, + }, + { + "token_value_b", + NPPU_ODMA_CFG_TOKEN_VALUE_Br, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0204, + (32 / 8), + 0, + 1 + 1, + 0, + 4, + 1, + g_nppu_odma_cfg_token_value_b_reg, + NULL, + NULL, + }, + { + "cfg_shap_en_p0", + NPPU_ODMA_CFG_CFG_SHAP_EN_P0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x020c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_cfg_shap_en_p0_reg, + NULL, + NULL, + }, + { + "cfg_shap_en_p1", + NPPU_ODMA_CFG_CFG_SHAP_EN_P1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0210, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_cfg_shap_en_p1_reg, + NULL, + NULL, + }, + { + "cfg_shap_en_tm", + NPPU_ODMA_CFG_CFG_SHAP_EN_TMr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0214, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_cfg_shap_en_tm_reg, + NULL, + NULL, + }, + { + "ind_status", + NPPU_ODMA_STAT_IND_STATUSr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_STAT_BASE_ADDR + 0x0000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_stat_ind_status_reg, + NULL, + NULL, + }, + { + "ind_cmd", + NPPU_ODMA_STAT_IND_CMDr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_STAT_BASE_ADDR + 0x0004, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_nppu_odma_stat_ind_cmd_reg, + NULL, + NULL, + }, + { + "ind_data0", + NPPU_ODMA_STAT_IND_DATA0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_STAT_BASE_ADDR + 0x0008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_stat_ind_data0_reg, + NULL, + NULL, + }, + { + "debug_cnt_cfg", + NPPU_ODMA_STAT_DEBUG_CNT_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_STAT_BASE_ADDR + 0x000c, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_nppu_odma_stat_debug_cnt_cfg_reg, + NULL, + NULL, + }, + { + "bfd_firstchk_th", + NPPU_OAM_CFG_BFD_FIRSTCHK_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0030, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_bfd_firstchk_th_reg, + NULL, + NULL, + }, + { + "pbu_fc_idmath_ram", + NPPU_PBU_CFG_MEMID_0_PBU_FC_IDMATH_RAMr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x0000, + (192 / 8), + 0, + 127 + 1, + 0, + 1, + 11, + g_nppu_pbu_cfg_memid_0_pbu_fc_idmath_ram_reg, + NULL, + NULL, + }, + { + "pbu_fc_macth_ram", + NPPU_PBU_CFG_MEMID_1_PBU_FC_MACTH_RAMr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x0000, + (128 / 8), + 0, + 56 + 1, + 0, + 1, + 8, + g_nppu_pbu_cfg_memid_1_pbu_fc_macth_ram_reg, + NULL, + NULL, + }, + { + "all_kind_port_cnt", + NPPU_PBU_STAT_MEMID_1_ALL_KIND_PORT_CNTr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x0000, + (32 / 8), + 0, + 127 + 1, + 0, + 1, + 2, + g_nppu_pbu_stat_memid_1_all_kind_port_cnt_reg, + NULL, + NULL, + }, + { + "ppu_pbu_ifb_req_vld_cnt", + NPPU_PBU_STAT_MEMID_2_PPU_PBU_IFB_REQ_VLD_CNTr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0004, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_memid_2_ppu_pbu_ifb_req_vld_cnt_reg, + NULL, + NULL, + }, + { + "pbu_ppu_ifb_rsp_vld_cnt", + NPPU_PBU_STAT_MEMID_2_PBU_PPU_IFB_RSP_VLD_CNTr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0005, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_memid_2_pbu_ppu_ifb_rsp_vld_cnt_reg, + NULL, + NULL, + }, + { + "odma_pbu_recy_ptr_vld_cnt", + NPPU_PBU_STAT_MEMID_2_ODMA_PBU_RECY_PTR_VLD_CNTr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0006, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_memid_2_odma_pbu_recy_ptr_vld_cnt_reg, + NULL, + NULL, + }, + { + "ppu_pbu_mcode_pf_req_cnt", + NPPU_PBU_STAT_MEMID_2_PPU_PBU_MCODE_PF_REQ_CNTr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0007, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_memid_2_ppu_pbu_mcode_pf_req_cnt_reg, + NULL, + NULL, + }, + { + "pbu_ppu_mcode_pf_rsp_cnt", + NPPU_PBU_STAT_MEMID_2_PBU_PPU_MCODE_PF_RSP_CNTr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_memid_2_pbu_ppu_mcode_pf_rsp_cnt_reg, + NULL, + NULL, + }, + { + "ppu_pbu_logic_pf_req_cnt", + NPPU_PBU_STAT_MEMID_2_PPU_PBU_LOGIC_PF_REQ_CNTr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0009, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_memid_2_ppu_pbu_logic_pf_req_cnt_reg, + NULL, + NULL, + }, + { + "pbu_ppu_logic_pf_rsp_cnt", + NPPU_PBU_STAT_MEMID_2_PBU_PPU_LOGIC_PF_RSP_CNTr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x000a, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_memid_2_pbu_ppu_logic_pf_rsp_cnt_reg, + NULL, + NULL, + }, + { + "ppu_use_ptr_pulse_cnt", + NPPU_PBU_STAT_MEMID_2_PPU_USE_PTR_PULSE_CNTr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x000b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_memid_2_ppu_use_ptr_pulse_cnt_reg, + NULL, + NULL, + }, + { + "ppu_pbu_wb_vld_cnt", + NPPU_PBU_STAT_MEMID_2_PPU_PBU_WB_VLD_CNTr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x000c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_memid_2_ppu_pbu_wb_vld_cnt_reg, + NULL, + NULL, + }, + { + "pbu_ppu_reorder_para_vld_cnt", + NPPU_PBU_STAT_MEMID_2_PBU_PPU_REORDER_PARA_VLD_CNTr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x000d, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_memid_2_pbu_ppu_reorder_para_vld_cnt_reg, + NULL, + NULL, + }, + { + "se_pbu_dpi_key_vld_cnt", + NPPU_PBU_STAT_MEMID_2_SE_PBU_DPI_KEY_VLD_CNTr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x000e, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_memid_2_se_pbu_dpi_key_vld_cnt_reg, + NULL, + NULL, + }, + { + "pbu_se_dpi_rsp_datvld_cnt", + NPPU_PBU_STAT_MEMID_2_PBU_SE_DPI_RSP_DATVLD_CNTr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x000f, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_memid_2_pbu_se_dpi_rsp_datvld_cnt_reg, + NULL, + NULL, + }, + { + "odma_pbu_ifb_rd1_cnt", + NPPU_PBU_STAT_MEMID_2_ODMA_PBU_IFB_RD1_CNTr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0010, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_memid_2_odma_pbu_ifb_rd1_cnt_reg, + NULL, + NULL, + }, + { + "odma_pbu_ifb_rd2_cnt", + NPPU_PBU_STAT_MEMID_2_ODMA_PBU_IFB_RD2_CNTr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0011, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_memid_2_odma_pbu_ifb_rd2_cnt_reg, + NULL, + NULL, + }, + { + "pbu_ppu_mcode_pf_no_rsp_cnt", + NPPU_PBU_STAT_MEMID_2_PBU_PPU_MCODE_PF_NO_RSP_CNTr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0015, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_memid_2_pbu_ppu_mcode_pf_no_rsp_cnt_reg, + NULL, + NULL, + }, + { + "pbu_ppu_logic_pf_no_rsp_cnt", + NPPU_PBU_STAT_MEMID_2_PBU_PPU_LOGIC_PF_NO_RSP_CNTr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0016, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_memid_2_pbu_ppu_logic_pf_no_rsp_cnt_reg, + NULL, + NULL, + }, + { + "cpu_rd_ifb_data", + NPPU_PBU_STAT_MEMID_3_CPU_RD_IFB_DATAr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x0000, + (2048 / 8), + 0, + 16383 + 1, + 0, + 1, + 1, + g_nppu_pbu_stat_memid_3_cpu_rd_ifb_data_reg, + NULL, + NULL, + }, + { + "mux_sel_rgt", + NPPU_PBU_STAT_MEMID_4_MUX_SEL_RGTr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_memid_4_mux_sel_rgt_reg, + NULL, + NULL, + }, + { + "port_pub_cnt", + NPPU_PBU_STAT_MEMID_5_PORT_PUB_CNTr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x0000, + (32 / 8), + 0, + 127 + 1, + 0, + 1, + 1, + g_nppu_pbu_stat_memid_5_port_pub_cnt_reg, + NULL, + NULL, + }, + { + "idma_o_isu_pkt_pulse_total_cnt", + NPPU_IDMA_STAT_MEMID_1_IDMA_O_ISU_PKT_PULSE_TOTAL_CNTr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0012, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_idma_stat_memid_1_idma_o_isu_pkt_pulse_total_cnt_reg, + NULL, + NULL, + }, + { + "idma_o_isu_epkt_pulse_total_cnt", + NPPU_IDMA_STAT_MEMID_1_IDMA_O_ISU_EPKT_PULSE_TOTAL_CNTr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0013, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_idma_stat_memid_1_idma_o_isu_epkt_pulse_total_cnt_reg, + NULL, + NULL, + }, + { + "idma_dispkt_pulse_total_cnt", + NPPU_IDMA_STAT_MEMID_1_IDMA_DISPKT_PULSE_TOTAL_CNTr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0014, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_idma_stat_memid_1_idma_dispkt_pulse_total_cnt_reg, + NULL, + NULL, + }, + { + "idma_o_isu_pkt_pulse_cnt", + NPPU_IDMA_STAT_MEMID_0_IDMA_O_ISU_PKT_PULSE_CNTr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x0000, + (32 / 8), + 0, + 127 + 1, + 0, + 1, + 1, + g_nppu_idma_stat_memid_0_idma_o_isu_pkt_pulse_cnt_reg, + NULL, + NULL, + }, + { + "idma_o_isu_epkt_pulse_cnt", + NPPU_IDMA_STAT_MEMID_0_IDMA_O_ISU_EPKT_PULSE_CNTr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x0080, + (32 / 8), + 0, + 127 + 1, + 0, + 1, + 1, + g_nppu_idma_stat_memid_0_idma_o_isu_epkt_pulse_cnt_reg, + NULL, + NULL, + }, + { + "idma_dispkt_pulse_cnt", + NPPU_IDMA_STAT_MEMID_0_IDMA_DISPKT_PULSE_CNTr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x0100, + (32 / 8), + 0, + 127 + 1, + 0, + 1, + 1, + g_nppu_idma_stat_memid_0_idma_dispkt_pulse_cnt_reg, + NULL, + NULL, + }, + { + "test_r", + PPU_PPU_TEST_Rr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_test_r_reg, + NULL, + NULL, + }, + { + "ppu_debug_en_r", + PPU_PPU_PPU_DEBUG_EN_Rr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x00c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_debug_en_r_reg, + NULL, + NULL, + }, + { + "csr_dup_table_wr_data", + PPU_PPU_CSR_DUP_TABLE_WR_DATAr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x034, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_ppu_ppu_csr_dup_table_wr_data_reg, + NULL, + NULL, + }, + { + "csr_dup_table_rd_data", + PPU_PPU_CSR_DUP_TABLE_RD_DATAr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x038, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_ppu_ppu_csr_dup_table_rd_data_reg, + NULL, + NULL, + }, + { + "csr_dup_table_addr", + PPU_PPU_CSR_DUP_TABLE_ADDRr, + PPU, + DPP_REG_FLAG_WO | DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x03c, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_ppu_ppu_csr_dup_table_addr_reg, + NULL, + NULL, + }, + { + "ppu_debug_vld", + PPU_PPU_PPU_DEBUG_VLDr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x048, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_debug_vld_reg, + NULL, + NULL, + }, + { + "cop_thash_rsk_319_288", + PPU_PPU_COP_THASH_RSK_319_288r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x050, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_cop_thash_rsk_319_288_reg, + NULL, + NULL, + }, + { + "cop_thash_rsk_287_256", + PPU_PPU_COP_THASH_RSK_287_256r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x054, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_cop_thash_rsk_287_256_reg, + NULL, + NULL, + }, + { + "cop_thash_rsk_255_224", + PPU_PPU_COP_THASH_RSK_255_224r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x058, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_cop_thash_rsk_255_224_reg, + NULL, + NULL, + }, + { + "cop_thash_rsk_223_192", + PPU_PPU_COP_THASH_RSK_223_192r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x05c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_cop_thash_rsk_223_192_reg, + NULL, + NULL, + }, + { + "cop_thash_rsk_191_160", + PPU_PPU_COP_THASH_RSK_191_160r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x060, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_cop_thash_rsk_191_160_reg, + NULL, + NULL, + }, + { + "cop_thash_rsk_159_128", + PPU_PPU_COP_THASH_RSK_159_128r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x064, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_cop_thash_rsk_159_128_reg, + NULL, + NULL, + }, + { + "cop_thash_rsk_127_096", + PPU_PPU_COP_THASH_RSK_127_096r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x068, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_cop_thash_rsk_127_096_reg, + NULL, + NULL, + }, + { + "cop_thash_rsk_095_064", + PPU_PPU_COP_THASH_RSK_095_064r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x06c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_cop_thash_rsk_095_064_reg, + NULL, + NULL, + }, + { + "cop_thash_rsk_063_032", + PPU_PPU_COP_THASH_RSK_063_032r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x070, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_cop_thash_rsk_063_032_reg, + NULL, + NULL, + }, + { + "cop_thash_rsk_031_000", + PPU_PPU_COP_THASH_RSK_031_000r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x074, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_cop_thash_rsk_031_000_reg, + NULL, + NULL, + }, + { + "cfg_ipv4_ipid_start_value", + PPU_PPU_CFG_IPV4_IPID_START_VALUEr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x078, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_cfg_ipv4_ipid_start_value_reg, + NULL, + NULL, + }, + { + "cfg_ipv4_ipid_end_value", + PPU_PPU_CFG_IPV4_IPID_END_VALUEr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x07c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_cfg_ipv4_ipid_end_value_reg, + NULL, + NULL, + }, + { + "cluster_mf_in_en", + PPU_PPU_CLUSTER_MF_IN_ENr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x150, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_cluster_mf_in_en_reg, + NULL, + NULL, + }, + { + "ppu_empty", + PPU_PPU_PPU_EMPTYr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x154, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_empty_reg, + NULL, + NULL, + }, + { + "instrmem_w_addr", + PPU_PPU_INSTRMEM_W_ADDRr, + PPU, + DPP_REG_FLAG_WO | DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x514, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_instrmem_w_addr_reg, + NULL, + NULL, + }, + { + "instrmem_w_data_191_160", + PPU_PPU_INSTRMEM_W_DATA_191_160r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x518, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_instrmem_w_data_191_160_reg, + NULL, + NULL, + }, + { + "instrmem_w_data_159_128", + PPU_PPU_INSTRMEM_W_DATA_159_128r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x51c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_instrmem_w_data_159_128_reg, + NULL, + NULL, + }, + { + "instrmem_w_data_127_96", + PPU_PPU_INSTRMEM_W_DATA_127_96r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x520, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_instrmem_w_data_127_96_reg, + NULL, + NULL, + }, + { + "instrmem_w_data_95_64", + PPU_PPU_INSTRMEM_W_DATA_95_64r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x524, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_instrmem_w_data_95_64_reg, + NULL, + NULL, + }, + { + "instrmem_w_data_63_32", + PPU_PPU_INSTRMEM_W_DATA_63_32r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x528, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_instrmem_w_data_63_32_reg, + NULL, + NULL, + }, + { + "instrmem_w_data_31_0", + PPU_PPU_INSTRMEM_W_DATA_31_0r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x52c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_instrmem_w_data_31_0_reg, + NULL, + NULL, + }, + { + "isu_fwft_mf_fifo_prog_full_assert_cfg", + PPU_PPU_ISU_FWFT_MF_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x5f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_isu_fwft_mf_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "isu_fwft_mf_fifo_prog_full_negate_cfg", + PPU_PPU_ISU_FWFT_MF_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x5f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_isu_fwft_mf_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "int_1200m_mask", + PPU_CLUSTER_INT_1200M_MASKr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x98, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 8, + g_ppu_cluster_int_1200m_mask_reg, + NULL, + NULL, + }, + { + "wr_high_data_r_mex", + PPU4K_CLUSTER_WR_HIGH_DATA_R_MEXr, + PPU4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x4000, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu4k_cluster_wr_high_data_r_mex_reg, + dpp_ppu_write, + dpp_ppu_read, + }, + { + "wr_low_data_r_mex", + PPU4K_CLUSTER_WR_LOW_DATA_R_MEXr, + PPU4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x4004, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu4k_cluster_wr_low_data_r_mex_reg, + dpp_ppu_write, + dpp_ppu_read, + }, + { + "addr_r_mex", + PPU4K_CLUSTER_ADDR_R_MEXr, + PPU4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x4008, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 2, + g_ppu4k_cluster_addr_r_mex_reg, + dpp_ppu_write, + dpp_ppu_read, + }, + { + "sdt_tbl_ind_access_done", + PPU4K_CLUSTER_SDT_TBL_IND_ACCESS_DONEr, + PPU4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x400c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu4k_cluster_sdt_tbl_ind_access_done_reg, + dpp_ppu_write, + dpp_ppu_read, + }, + { + "rd_high_data_r_mex", + PPU4K_CLUSTER_RD_HIGH_DATA_R_MEXr, + PPU4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x4010, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu4k_cluster_rd_high_data_r_mex_reg, + dpp_ppu_write, + dpp_ppu_read, + }, + { + "rd_low_data_r_mex", + PPU4K_CLUSTER_RD_LOW_DATA_R_MEXr, + PPU4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x4014, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu4k_cluster_rd_low_data_r_mex_reg, + dpp_ppu_write, + dpp_ppu_read, + }, + { + "init_ok", + SE_ALG_INIT_OKr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_init_ok_reg, + NULL, + NULL, + }, + { + "cpu_rd_rdy", + SE_ALG_CPU_RD_RDYr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0048, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_cpu_rd_rdy_reg, + NULL, + NULL, + }, + { + "cpu_rd_data_tmp0", + SE_ALG_CPU_RD_DATA_TMP0r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x004c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_cpu_rd_data_tmp0_reg, + NULL, + NULL, + }, + { + "cpu_rd_data_tmp1", + SE_ALG_CPU_RD_DATA_TMP1r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0050, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_cpu_rd_data_tmp1_reg, + NULL, + NULL, + }, + { + "cpu_rd_data_tmp2", + SE_ALG_CPU_RD_DATA_TMP2r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0054, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_cpu_rd_data_tmp2_reg, + NULL, + NULL, + }, + { + "cpu_rd_data_tmp3", + SE_ALG_CPU_RD_DATA_TMP3r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0058, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_cpu_rd_data_tmp3_reg, + NULL, + NULL, + }, + { + "cpu_rd_data_tmp4", + SE_ALG_CPU_RD_DATA_TMP4r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x005c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_cpu_rd_data_tmp4_reg, + NULL, + NULL, + }, + { + "cpu_rd_data_tmp5", + SE_ALG_CPU_RD_DATA_TMP5r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0060, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_cpu_rd_data_tmp5_reg, + NULL, + NULL, + }, + { + "cpu_rd_data_tmp6", + SE_ALG_CPU_RD_DATA_TMP6r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0064, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_cpu_rd_data_tmp6_reg, + NULL, + NULL, + }, + { + "cpu_rd_data_tmp7", + SE_ALG_CPU_RD_DATA_TMP7r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0068, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_cpu_rd_data_tmp7_reg, + NULL, + NULL, + }, + { + "cpu_rd_data_tmp8", + SE_ALG_CPU_RD_DATA_TMP8r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x006c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_cpu_rd_data_tmp8_reg, + NULL, + NULL, + }, + { + "cpu_rd_data_tmp9", + SE_ALG_CPU_RD_DATA_TMP9r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0070, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_cpu_rd_data_tmp9_reg, + NULL, + NULL, + }, + { + "cpu_rd_data_tmp10", + SE_ALG_CPU_RD_DATA_TMP10r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0074, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_cpu_rd_data_tmp10_reg, + NULL, + NULL, + }, + { + "cpu_rd_data_tmp11", + SE_ALG_CPU_RD_DATA_TMP11r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0078, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_cpu_rd_data_tmp11_reg, + NULL, + NULL, + }, + { + "cpu_rd_data_tmp12", + SE_ALG_CPU_RD_DATA_TMP12r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x007c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_cpu_rd_data_tmp12_reg, + NULL, + NULL, + }, + { + "cpu_rd_data_tmp13", + SE_ALG_CPU_RD_DATA_TMP13r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0080, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_cpu_rd_data_tmp13_reg, + NULL, + NULL, + }, + { + "cpu_rd_data_tmp14", + SE_ALG_CPU_RD_DATA_TMP14r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0084, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_cpu_rd_data_tmp14_reg, + NULL, + NULL, + }, + { + "cpu_rd_data_tmp15", + SE_ALG_CPU_RD_DATA_TMP15r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0088, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_cpu_rd_data_tmp15_reg, + NULL, + NULL, + }, + { + "lpm_v4_config_rgt", + SE_ALG_LPM_V4_CONFIG_RGTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x014c, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_se_alg_lpm_v4_config_rgt_reg, + NULL, + NULL, + }, + { + "lpm_v6_config_rgt", + SE_ALG_LPM_V6_CONFIG_RGTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0150, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_se_alg_lpm_v6_config_rgt_reg, + NULL, + NULL, + }, + { + "lpm_ext_rsp_fifo_u0_pfull_ast", + SE_ALG_LPM_EXT_RSP_FIFO_U0_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x015c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_ext_rsp_fifo_u0_pfull_ast_reg, + NULL, + NULL, + }, + { + "hash_age_pat_cfg", + SE_AS_HASH_AGE_PAT_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000001d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_hash_age_pat_cfg_reg, + NULL, + NULL, + }, + { + "learn_rdy_cfg", + SE_AS_LEARN_RDY_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000001f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_learn_rdy_cfg_reg, + NULL, + NULL, + }, + { + "kschd_as_pful_cfg", + SE_KSCHD_KSCHD_AS_PFUL_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x00000008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_kschd_as_pful_cfg_reg, + NULL, + NULL, + }, + { + "kschd_dir_pful_cfg", + SE_KSCHD_KSCHD_DIR_PFUL_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x0000000c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_kschd_dir_pful_cfg_reg, + NULL, + NULL, + }, + { + "kschd_as_ept_cfg", + SE_KSCHD_KSCHD_AS_EPT_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000000014, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_kschd_as_ept_cfg_reg, + NULL, + NULL, + }, + { + "cpu_arbi_pful_cfg", + SE_KSCHD_CPU_ARBI_PFUL_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x0000001c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_cpu_arbi_pful_cfg_reg, + NULL, + NULL, + }, + { + "kschd_pbu_pful_cfg", + SE_KSCHD_KSCHD_PBU_PFUL_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x00000020, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_kschd_pbu_pful_cfg_reg, + NULL, + NULL, + }, + { + "rschd_dir_pful_cfg", + SE_RSCHD_RSCHD_DIR_PFUL_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x00000030, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_rschd_dir_pful_cfg_reg, + NULL, + NULL, + }, + { + "rschd_dir_ept_cfg", + SE_RSCHD_RSCHD_DIR_EPT_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x00000034, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_rschd_dir_ept_cfg_reg, + NULL, + NULL, + }, + { + "cpu_cmd_rgt", + SE4K_SE_ALG_CPU_CMD_RGTr, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0004, + (32 / 8), + 0, + 0, + 0, + 0, + 7, + g_se4k_se_alg_cpu_cmd_rgt_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_wr_data_tmp0", + SE4K_SE_ALG_CPU_WR_DATA_TMP0r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_wr_data_tmp0_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_wr_data_tmp1", + SE4K_SE_ALG_CPU_WR_DATA_TMP1r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x000c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_wr_data_tmp1_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_wr_data_tmp2", + SE4K_SE_ALG_CPU_WR_DATA_TMP2r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0010, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_wr_data_tmp2_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_wr_data_tmp3", + SE4K_SE_ALG_CPU_WR_DATA_TMP3r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0014, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_wr_data_tmp3_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_wr_data_tmp4", + SE4K_SE_ALG_CPU_WR_DATA_TMP4r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0018, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_wr_data_tmp4_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_wr_data_tmp5", + SE4K_SE_ALG_CPU_WR_DATA_TMP5r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x001c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_wr_data_tmp5_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_wr_data_tmp6", + SE4K_SE_ALG_CPU_WR_DATA_TMP6r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0020, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_wr_data_tmp6_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_wr_data_tmp7", + SE4K_SE_ALG_CPU_WR_DATA_TMP7r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0024, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_wr_data_tmp7_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_wr_data_tmp8", + SE4K_SE_ALG_CPU_WR_DATA_TMP8r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0028, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_wr_data_tmp8_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_wr_data_tmp9", + SE4K_SE_ALG_CPU_WR_DATA_TMP9r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x002c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_wr_data_tmp9_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_wr_data_tmp10", + SE4K_SE_ALG_CPU_WR_DATA_TMP10r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0030, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_wr_data_tmp10_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_wr_data_tmp11", + SE4K_SE_ALG_CPU_WR_DATA_TMP11r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0034, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_wr_data_tmp11_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_wr_data_tmp12", + SE4K_SE_ALG_CPU_WR_DATA_TMP12r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0038, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_wr_data_tmp12_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_wr_data_tmp13", + SE4K_SE_ALG_CPU_WR_DATA_TMP13r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x003c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_wr_data_tmp13_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_wr_data_tmp14", + SE4K_SE_ALG_CPU_WR_DATA_TMP14r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0040, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_wr_data_tmp14_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_wr_data_tmp15", + SE4K_SE_ALG_CPU_WR_DATA_TMP15r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0044, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_wr_data_tmp15_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_rd_rdy", + SE4K_SE_ALG_CPU_RD_RDYr, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0048, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_rd_rdy_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_rd_data_tmp0", + SE4K_SE_ALG_CPU_RD_DATA_TMP0r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x004c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_rd_data_tmp0_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_rd_data_tmp1", + SE4K_SE_ALG_CPU_RD_DATA_TMP1r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0050, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_rd_data_tmp1_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_rd_data_tmp2", + SE4K_SE_ALG_CPU_RD_DATA_TMP2r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0054, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_rd_data_tmp2_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_rd_data_tmp3", + SE4K_SE_ALG_CPU_RD_DATA_TMP3r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0058, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_rd_data_tmp3_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_rd_data_tmp4", + SE4K_SE_ALG_CPU_RD_DATA_TMP4r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x005c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_rd_data_tmp4_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_rd_data_tmp5", + SE4K_SE_ALG_CPU_RD_DATA_TMP5r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0060, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_rd_data_tmp5_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_rd_data_tmp6", + SE4K_SE_ALG_CPU_RD_DATA_TMP6r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0064, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_rd_data_tmp6_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_rd_data_tmp7", + SE4K_SE_ALG_CPU_RD_DATA_TMP7r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0068, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_rd_data_tmp7_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_rd_data_tmp8", + SE4K_SE_ALG_CPU_RD_DATA_TMP8r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x006c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_rd_data_tmp8_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_rd_data_tmp9", + SE4K_SE_ALG_CPU_RD_DATA_TMP9r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0070, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_rd_data_tmp9_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_rd_data_tmp10", + SE4K_SE_ALG_CPU_RD_DATA_TMP10r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0074, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_rd_data_tmp10_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_rd_data_tmp11", + SE4K_SE_ALG_CPU_RD_DATA_TMP11r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0078, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_rd_data_tmp11_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_rd_data_tmp12", + SE4K_SE_ALG_CPU_RD_DATA_TMP12r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x007c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_rd_data_tmp12_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_rd_data_tmp13", + SE4K_SE_ALG_CPU_RD_DATA_TMP13r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0080, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_rd_data_tmp13_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_rd_data_tmp14", + SE4K_SE_ALG_CPU_RD_DATA_TMP14r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0084, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_rd_data_tmp14_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_rd_data_tmp15", + SE4K_SE_ALG_CPU_RD_DATA_TMP15r, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0088, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_cpu_rd_data_tmp15_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash0_ext_cfg_rgt", + SE4K_SE_ALG_HASH0_EXT_CFG_RGTr, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0bc, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se4k_se_alg_hash0_ext_cfg_rgt_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash1_ext_cfg_rgt", + SE4K_SE_ALG_HASH1_EXT_CFG_RGTr, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0c0, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se4k_se_alg_hash1_ext_cfg_rgt_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash2_ext_cfg_rgt", + SE4K_SE_ALG_HASH2_EXT_CFG_RGTr, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0c4, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se4k_se_alg_hash2_ext_cfg_rgt_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash3_ext_cfg_rgt", + SE4K_SE_ALG_HASH3_EXT_CFG_RGTr, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0c8, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se4k_se_alg_hash3_ext_cfg_rgt_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash0_tbl30_depth", + SE4K_SE_ALG_HASH0_TBL30_DEPTHr, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x01a4, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_se4k_se_alg_hash0_tbl30_depth_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash0_tbl74_depth", + SE4K_SE_ALG_HASH0_TBL74_DEPTHr, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x01a8, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_se4k_se_alg_hash0_tbl74_depth_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash1_tbl30_depth", + SE4K_SE_ALG_HASH1_TBL30_DEPTHr, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x01ac, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_se4k_se_alg_hash1_tbl30_depth_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash1_tbl74_depth", + SE4K_SE_ALG_HASH1_TBL74_DEPTHr, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x01b0, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_se4k_se_alg_hash1_tbl74_depth_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash2_tbl30_depth", + SE4K_SE_ALG_HASH2_TBL30_DEPTHr, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x01b4, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_se4k_se_alg_hash2_tbl30_depth_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash2_tbl74_depth", + SE4K_SE_ALG_HASH2_TBL74_DEPTHr, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x01b8, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_se4k_se_alg_hash2_tbl74_depth_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash3_tbl30_depth", + SE4K_SE_ALG_HASH3_TBL30_DEPTHr, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x01bc, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_se4k_se_alg_hash3_tbl30_depth_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash3_tbl74_depth", + SE4K_SE_ALG_HASH3_TBL74_DEPTHr, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x01c0, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_se4k_se_alg_hash3_tbl74_depth_reg, + dpp_se_write, + dpp_se_read, + }, + { + "wr_rsp_cfg", + SE4K_SE_ALG_WR_RSP_CFGr, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x01c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se4k_se_alg_wr_rsp_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash_mono_flag", + SE4K_SE_ALG_HASH_MONO_FLAGr, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x01c8, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_se4k_se_alg_hash_mono_flag_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash10_ext_crc_cfg", + SE4K_SE_ALG_HASH10_EXT_CRC_CFGr, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x01cc, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se4k_se_alg_hash10_ext_crc_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash32_ext_crc_cfg", + SE4K_SE_ALG_HASH32_EXT_CRC_CFGr, + SE4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x01d0, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se4k_se_alg_hash32_ext_crc_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "zblock_service_configure", + SE4K_SE_ALG_ZBLOCK_SERVICE_CONFIGUREr, + SE4K, + DPP_REG_FLAG_INDIRECT, + DPP_REG_BIN_ARRAY, + 0x10000, + (512 / 8), + 3 + 1, + 7 + 1, + 0x4000, + 0x800, + 3, + g_se4k_se_alg_zblock_service_configure_reg, + dpp_se_alg_write, + dpp_se_alg_read, + }, + { + "zblock_hash_zcell_mono", + SE4K_SE_ALG_ZBLOCK_HASH_ZCELL_MONOr, + SE4K, + DPP_REG_FLAG_INDIRECT, + DPP_REG_BIN_ARRAY, + 0x10014, + (512 / 8), + 3 + 1, + 7 + 1, + 0x4000, + 0x800, + 8, + g_se4k_se_alg_zblock_hash_zcell_mono_reg, + dpp_se_alg_write, + dpp_se_alg_read, + }, + { + "zlock_hash_zreg_mono", + SE4K_SE_ALG_ZLOCK_HASH_ZREG_MONOr, + SE4K, + DPP_REG_FLAG_INDIRECT, + DPP_REG_BIN_ARRAY, + 0x10015, + (512 / 8), + 3 + 1, + 7 + 1, + 0x4000, + 0x800, + 8, + g_se4k_se_alg_zlock_hash_zreg_mono_reg, + dpp_se_alg_write, + dpp_se_alg_read, + }, + { + "init_done", + SMMU0_SMMU0_INIT_DONEr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_init_done_reg, + NULL, + NULL, + }, + { + "cpu_ind_wdat0", + SMMU0_SMMU0_CPU_IND_WDAT0r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000004, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_cpu_ind_wdat0_reg, + NULL, + NULL, + }, + { + "cpu_ind_wdat1", + SMMU0_SMMU0_CPU_IND_WDAT1r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_cpu_ind_wdat1_reg, + NULL, + NULL, + }, + { + "cpu_ind_wdat2", + SMMU0_SMMU0_CPU_IND_WDAT2r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000000c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_cpu_ind_wdat2_reg, + NULL, + NULL, + }, + { + "cpu_ind_wdat3", + SMMU0_SMMU0_CPU_IND_WDAT3r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000010, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_cpu_ind_wdat3_reg, + NULL, + NULL, + }, + { + "cpu_ind_cmd", + SMMU0_SMMU0_CPU_IND_CMDr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000014, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_smmu0_smmu0_cpu_ind_cmd_reg, + NULL, + NULL, + }, + { + "cpu_ind_rd_done", + SMMU0_SMMU0_CPU_IND_RD_DONEr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000040, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_cpu_ind_rd_done_reg, + NULL, + NULL, + }, + { + "cpu_ind_rdat0", + SMMU0_SMMU0_CPU_IND_RDAT0r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000044, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_cpu_ind_rdat0_reg, + NULL, + NULL, + }, + { + "cpu_ind_rdat1", + SMMU0_SMMU0_CPU_IND_RDAT1r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000048, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_cpu_ind_rdat1_reg, + NULL, + NULL, + }, + { + "cpu_ind_rdat2", + SMMU0_SMMU0_CPU_IND_RDAT2r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000004c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_cpu_ind_rdat2_reg, + NULL, + NULL, + }, + { + "cpu_ind_rdat3", + SMMU0_SMMU0_CPU_IND_RDAT3r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000050, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_cpu_ind_rdat3_reg, + NULL, + NULL, + }, + { + "cfg_plcr_mono", + SMMU0_SMMU0_CFG_PLCR_MONOr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000108, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_cfg_plcr_mono_reg, + NULL, + NULL, + }, + { + "wr_arb_cpu_rdy", + SMMU0_SMMU0_WR_ARB_CPU_RDYr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000010c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_wr_arb_cpu_rdy_reg, + NULL, + NULL, + }, + { + "tm_stat_en_cfg", + SMMU0_SMMU0_TM_STAT_EN_CFGr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000110, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_tm_stat_en_cfg_reg, + NULL, + NULL, + }, + { + "ddr_wdat0", + SE_SMMU1_DDR_WDAT0r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_ddr_wdat0_reg, + NULL, + NULL, + }, + { + "dir_arbi_ser_rpful", + SE_SMMU1_DIR_ARBI_SER_RPFULr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000010c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_dir_arbi_ser_rpful_reg, + NULL, + NULL, + }, + { + "cfg_wr_arbi_pful2", + SE_SMMU1_CFG_WR_ARBI_PFUL2r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000120, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se_smmu1_cfg_wr_arbi_pful2_reg, + NULL, + NULL, + }, + { + "etm_tbl_cfg", + SE_SMMU1_ETM_TBL_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000001cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_etm_tbl_cfg_reg, + NULL, + NULL, + }, + { + "cfg_cash_addr_pful", + SE_SMMU1_CFG_CASH_ADDR_PFULr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000020c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cfg_cash_addr_pful_reg, + NULL, + NULL, + }, + { + "ctrl_rfifo_cfg", + SE_SMMU1_CTRL_RFIFO_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000234, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_se_smmu1_ctrl_rfifo_cfg_reg, + NULL, + NULL, + }, + { + "cache_req_fifo_cfg", + SE_SMMU1_CACHE_REQ_FIFO_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000288, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se_smmu1_cache_req_fifo_cfg_reg, + NULL, + NULL, + }, + { + "cpu_ind_eram_wdat0", + STAT_STAT_CFG_CPU_IND_ERAM_WDAT0r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_eram_wdat0_reg, + NULL, + NULL, + }, + { + "etm_port_sel_cfg", + STAT_STAT_CFG_ETM_PORT_SEL_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000044, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_stat_stat_cfg_etm_port_sel_cfg_reg, + NULL, + NULL, + }, + { + "tm_stat_cfg", + STAT_STAT_CFG_TM_STAT_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x0000004c, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_stat_stat_cfg_tm_stat_cfg_reg, + NULL, + NULL, + }, + { + "ppu_eram_depth", + STAT_STAT_CFG_PPU_ERAM_DEPTHr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000050, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_ppu_eram_depth_reg, + NULL, + NULL, + }, + { + "ppu_eram_base_addr", + STAT_STAT_CFG_PPU_ERAM_BASE_ADDRr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000054, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_ppu_eram_base_addr_reg, + NULL, + NULL, + }, + { + "ppu_ddr_base_addr", + STAT_STAT_CFG_PPU_DDR_BASE_ADDRr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000060, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_ppu_ddr_base_addr_reg, + NULL, + NULL, + }, + { + "plcr0_base_addr", + STAT_STAT_CFG_PLCR0_BASE_ADDRr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x0000006c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_plcr0_base_addr_reg, + NULL, + NULL, + }, + { + "etm_stat_start_addr_cfg", + STAT_STAT_CFG_ETM_STAT_START_ADDR_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000000e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_etm_stat_start_addr_cfg_reg, + NULL, + NULL, + }, + { + "etm_stat_depth_cfg", + STAT_STAT_CFG_ETM_STAT_DEPTH_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000000e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_etm_stat_depth_cfg_reg, + NULL, + NULL, + }, + { + "cycle_mov_en_cfg", + STAT_STAT_CFG_CYCLE_MOV_EN_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000000e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cycle_mov_en_cfg_reg, + NULL, + NULL, + }, + { + "cpu_ind_wdat0", + STAT_ETCAM_CPU_IND_WDAT0r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000004, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_ind_wdat0_reg, + NULL, + NULL, + }, + { + "cpu_ind_ctrl_tmp0", + STAT_ETCAM_CPU_IND_CTRL_TMP0r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000054, + (32 / 8), + 0, + 0, + 0, + 0, + 7, + g_stat_etcam_cpu_ind_ctrl_tmp0_reg, + NULL, + NULL, + }, + { + "cpu_ind_ctrl_tmp1", + STAT_ETCAM_CPU_IND_CTRL_TMP1r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000058, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_stat_etcam_cpu_ind_ctrl_tmp1_reg, + NULL, + NULL, + }, + { + "cpu_ind_rd_done", + STAT_ETCAM_CPU_IND_RD_DONEr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x000001fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_ind_rd_done_reg, + NULL, + NULL, + }, + { + "cpu_rdat0", + STAT_ETCAM_CPU_RDAT0r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000200, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_rdat0_reg, + NULL, + NULL, + }, + { + "cpu_rdat1", + STAT_ETCAM_CPU_RDAT1r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000204, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_rdat1_reg, + NULL, + NULL, + }, + { + "cpu_rdat2", + STAT_ETCAM_CPU_RDAT2r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000208, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_rdat2_reg, + NULL, + NULL, + }, + { + "cpu_rdat3", + STAT_ETCAM_CPU_RDAT3r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x0000020c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_rdat3_reg, + NULL, + NULL, + }, + { + "cpu_rdat4", + STAT_ETCAM_CPU_RDAT4r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000210, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_rdat4_reg, + NULL, + NULL, + }, + { + "cpu_rdat5", + STAT_ETCAM_CPU_RDAT5r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000214, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_rdat5_reg, + NULL, + NULL, + }, + { + "cpu_rdat6", + STAT_ETCAM_CPU_RDAT6r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000218, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_rdat6_reg, + NULL, + NULL, + }, + { + "cpu_rdat7", + STAT_ETCAM_CPU_RDAT7r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x0000021c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_rdat7_reg, + NULL, + NULL, + }, + { + "cpu_rdat8", + STAT_ETCAM_CPU_RDAT8r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000220, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_rdat8_reg, + NULL, + NULL, + }, + { + "cpu_rdat9", + STAT_ETCAM_CPU_RDAT9r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000224, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_rdat9_reg, + NULL, + NULL, + }, + { + "cpu_rdat10", + STAT_ETCAM_CPU_RDAT10r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000228, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_rdat10_reg, + NULL, + NULL, + }, + { + "cpu_rdat11", + STAT_ETCAM_CPU_RDAT11r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x0000022c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_rdat11_reg, + NULL, + NULL, + }, + { + "cpu_rdat12", + STAT_ETCAM_CPU_RDAT12r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000230, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_rdat12_reg, + NULL, + NULL, + }, + { + "cpu_rdat13", + STAT_ETCAM_CPU_RDAT13r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000234, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_rdat13_reg, + NULL, + NULL, + }, + { + "cpu_rdat14", + STAT_ETCAM_CPU_RDAT14r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000238, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_rdat14_reg, + NULL, + NULL, + }, + { + "cpu_rdat15", + STAT_ETCAM_CPU_RDAT15r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x0000023c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_rdat15_reg, + NULL, + NULL, + }, + { + "cpu_rdat16", + STAT_ETCAM_CPU_RDAT16r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000240, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_rdat16_reg, + NULL, + NULL, + }, + { + "cpu_rdat17", + STAT_ETCAM_CPU_RDAT17r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000244, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_rdat17_reg, + NULL, + NULL, + }, + { + "cpu_rdat18", + STAT_ETCAM_CPU_RDAT18r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000248, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_rdat18_reg, + NULL, + NULL, + }, + { + "cpu_rdat19", + STAT_ETCAM_CPU_RDAT19r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x0000024c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_rdat19_reg, + NULL, + NULL, + }, + { + "qvbo", + STAT_ETCAM_QVBOr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000250, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_qvbo_reg, + NULL, + NULL, + }, + { + "cnt_overflow_mode", + STAT_ETCAM_CNT_OVERFLOW_MODEr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x000003ec, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_etcam_cnt_overflow_mode_reg, + NULL, + NULL, + }, + { + "cara_queue_ram0_159_0", + STAT_CAR0_CARA_QUEUE_RAM0_159_0r, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x000000 + 0x14000000, + (160 / 8), + 0, + 0x7FFF + 1, + 0, + 8, + 9, + g_stat_car0_cara_queue_ram0_159_0_reg, + NULL, + NULL, + }, + { + "cara_profile_ram1_255_0", + STAT_CAR0_CARA_PROFILE_RAM1_255_0r, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x040000 + 0x20000000, + (256 / 8), + 0, + 0x1FF + 1, + 0, + 8, + 31, + g_stat_car0_cara_profile_ram1_255_0_reg, + NULL, + NULL, + }, + { + "cara_qovs_ram_ram2", + STAT_CAR0_CARA_QOVS_RAM_RAM2r, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x80000, + (32 / 8), + 0, + 0x7FFF + 1, + 0, + 1, + 1, + g_stat_car0_cara_qovs_ram_ram2_reg, + NULL, + NULL, + }, + { + "look_up_table1", + STAT_CAR0_LOOK_UP_TABLE1r, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0xc0000, + (32 / 8), + 0, + 0x7FFF + 1, + 0, + 1, + 2, + g_stat_car0_look_up_table1_reg, + NULL, + NULL, + }, + { + "cara_pkt_des_i_cnt", + STAT_CAR0_CARA_PKT_DES_I_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c0000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_cara_pkt_des_i_cnt_reg, + NULL, + NULL, + }, + { + "cara_green_pkt_i_cnt", + STAT_CAR0_CARA_GREEN_PKT_I_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c0001, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_cara_green_pkt_i_cnt_reg, + NULL, + NULL, + }, + { + "cara_yellow_pkt_i_cnt", + STAT_CAR0_CARA_YELLOW_PKT_I_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c0002, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_cara_yellow_pkt_i_cnt_reg, + NULL, + NULL, + }, + { + "cara_red_pkt_i_cnt", + STAT_CAR0_CARA_RED_PKT_I_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c0003, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_cara_red_pkt_i_cnt_reg, + NULL, + NULL, + }, + { + "cara_pkt_des_o_cnt", + STAT_CAR0_CARA_PKT_DES_O_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c0004, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_cara_pkt_des_o_cnt_reg, + NULL, + NULL, + }, + { + "cara_green_pkt_o_cnt", + STAT_CAR0_CARA_GREEN_PKT_O_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c0005, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_cara_green_pkt_o_cnt_reg, + NULL, + NULL, + }, + { + "cara_yellow_pkt_o_cnt", + STAT_CAR0_CARA_YELLOW_PKT_O_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c0006, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_cara_yellow_pkt_o_cnt_reg, + NULL, + NULL, + }, + { + "cara_red_pkt_o_cnt", + STAT_CAR0_CARA_RED_PKT_O_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c0007, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_cara_red_pkt_o_cnt_reg, + NULL, + NULL, + }, + { + "cara_pkt_des_fc_for_cfg_cnt", + STAT_CAR0_CARA_PKT_DES_FC_FOR_CFG_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c0008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_cara_pkt_des_fc_for_cfg_cnt_reg, + NULL, + NULL, + }, + { + "cara_appoint_qnum_or_sp", + STAT_CAR0_CARA_APPOINT_QNUM_OR_SPr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c0009, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_stat_car0_cara_appoint_qnum_or_sp_reg, + NULL, + NULL, + }, + { + "cara_cfgmt_count_mode", + STAT_CAR0_CARA_CFGMT_COUNT_MODEr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c000a, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_car0_cara_cfgmt_count_mode_reg, + NULL, + NULL, + }, + { + "cara_pkt_size_cnt", + STAT_CAR0_CARA_PKT_SIZE_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c000b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_cara_pkt_size_cnt_reg, + NULL, + NULL, + }, + { + "cara_plcr_init_dont", + STAT_CAR0_CARA_PLCR_INIT_DONTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c000c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_cara_plcr_init_dont_reg, + NULL, + NULL, + }, + { + "carb_queue_ram0_159_0", + STAT_CAR0_CARB_QUEUE_RAM0_159_0r, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x100000 + 0x14000000, + (160 / 8), + 0, + 0xFFF + 1, + 0, + 8, + 9, + g_stat_car0_carb_queue_ram0_159_0_reg, + NULL, + NULL, + }, + { + "carb_profile_ram1_255_0", + STAT_CAR0_CARB_PROFILE_RAM1_255_0r, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x140000 + 0x20000000, + (256 / 8), + 0, + 0x7F + 1, + 0, + 8, + 33, + g_stat_car0_carb_profile_ram1_255_0_reg, + NULL, + NULL, + }, + { + "carb_qovs_ram_ram2", + STAT_CAR0_CARB_QOVS_RAM_RAM2r, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x180000, + (32 / 8), + 0, + 0xFFF + 1, + 0, + 1, + 1, + g_stat_car0_carb_qovs_ram_ram2_reg, + NULL, + NULL, + }, + { + "look_up_table2", + STAT_CAR0_LOOK_UP_TABLE2r, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x1c0000, + (32 / 8), + 0, + 0xFFF + 1, + 0, + 1, + 2, + g_stat_car0_look_up_table2_reg, + NULL, + NULL, + }, + { + "carb_pkt_des_i_cnt", + STAT_CAR0_CARB_PKT_DES_I_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c1000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_carb_pkt_des_i_cnt_reg, + NULL, + NULL, + }, + { + "carb_green_pkt_i_cnt", + STAT_CAR0_CARB_GREEN_PKT_I_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c1001, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_carb_green_pkt_i_cnt_reg, + NULL, + NULL, + }, + { + "carb_yellow_pkt_i_cnt", + STAT_CAR0_CARB_YELLOW_PKT_I_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c1002, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_carb_yellow_pkt_i_cnt_reg, + NULL, + NULL, + }, + { + "carb_red_pkt_i_cnt", + STAT_CAR0_CARB_RED_PKT_I_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c1003, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_carb_red_pkt_i_cnt_reg, + NULL, + NULL, + }, + { + "carb_pkt_des_o_cnt", + STAT_CAR0_CARB_PKT_DES_O_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c1004, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_carb_pkt_des_o_cnt_reg, + NULL, + NULL, + }, + { + "carb_green_pkt_o_cnt", + STAT_CAR0_CARB_GREEN_PKT_O_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c1005, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_carb_green_pkt_o_cnt_reg, + NULL, + NULL, + }, + { + "carb_yellow_pkt_o_cnt", + STAT_CAR0_CARB_YELLOW_PKT_O_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c1006, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_carb_yellow_pkt_o_cnt_reg, + NULL, + NULL, + }, + { + "carb_red_pkt_o_cnt", + STAT_CAR0_CARB_RED_PKT_O_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c1007, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_carb_red_pkt_o_cnt_reg, + NULL, + NULL, + }, + { + "carb_pkt_des_fc_for_cfg_cnt", + STAT_CAR0_CARB_PKT_DES_FC_FOR_CFG_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c1008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_carb_pkt_des_fc_for_cfg_cnt_reg, + NULL, + NULL, + }, + { + "carb_appoint_qnum_or_sp", + STAT_CAR0_CARB_APPOINT_QNUM_OR_SPr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c1009, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_stat_car0_carb_appoint_qnum_or_sp_reg, + NULL, + NULL, + }, + { + "carb_cfgmt_count_mode", + STAT_CAR0_CARB_CFGMT_COUNT_MODEr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c100a, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_car0_carb_cfgmt_count_mode_reg, + NULL, + NULL, + }, + { + "carb_pkt_size_cnt", + STAT_CAR0_CARB_PKT_SIZE_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c100b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_carb_pkt_size_cnt_reg, + NULL, + NULL, + }, + { + "carb_plcr_init_dont", + STAT_CAR0_CARB_PLCR_INIT_DONTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c100c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_carb_plcr_init_dont_reg, + NULL, + NULL, + }, + { + "carc_queue_ram0_159_0", + STAT_CAR0_CARC_QUEUE_RAM0_159_0r, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x200000 + 0x14000000, + (160 / 8), + 0, + 0x3FF + 1, + 0, + 8, + 9, + g_stat_car0_carc_queue_ram0_159_0_reg, + NULL, + NULL, + }, + { + "carc_profile_ram1_255_0", + STAT_CAR0_CARC_PROFILE_RAM1_255_0r, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x240000 + 0x20000000, + (256 / 8), + 0, + 0x1F + 1, + 0, + 8, + 33, + g_stat_car0_carc_profile_ram1_255_0_reg, + NULL, + NULL, + }, + { + "carc_qovs_ram_ram2", + STAT_CAR0_CARC_QOVS_RAM_RAM2r, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x280000, + (32 / 8), + 0, + 0x3FF + 1, + 0, + 1, + 1, + g_stat_car0_carc_qovs_ram_ram2_reg, + NULL, + NULL, + }, + { + "carc_pkt_des_i_cnt", + STAT_CAR0_CARC_PKT_DES_I_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c2000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_carc_pkt_des_i_cnt_reg, + NULL, + NULL, + }, + { + "carc_green_pkt_i_cnt", + STAT_CAR0_CARC_GREEN_PKT_I_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c2001, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_carc_green_pkt_i_cnt_reg, + NULL, + NULL, + }, + { + "carc_yellow_pkt_i_cnt", + STAT_CAR0_CARC_YELLOW_PKT_I_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c2002, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_carc_yellow_pkt_i_cnt_reg, + NULL, + NULL, + }, + { + "carc_red_pkt_i_cnt", + STAT_CAR0_CARC_RED_PKT_I_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c2003, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_carc_red_pkt_i_cnt_reg, + NULL, + NULL, + }, + { + "carc_pkt_des_o_cnt", + STAT_CAR0_CARC_PKT_DES_O_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c2004, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_carc_pkt_des_o_cnt_reg, + NULL, + NULL, + }, + { + "carc_green_pkt_o_cnt", + STAT_CAR0_CARC_GREEN_PKT_O_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c2005, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_carc_green_pkt_o_cnt_reg, + NULL, + NULL, + }, + { + "carc_yellow_pkt_o_cnt", + STAT_CAR0_CARC_YELLOW_PKT_O_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c2006, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_carc_yellow_pkt_o_cnt_reg, + NULL, + NULL, + }, + { + "carc_red_pkt_o_cnt", + STAT_CAR0_CARC_RED_PKT_O_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c2007, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_carc_red_pkt_o_cnt_reg, + NULL, + NULL, + }, + { + "carc_pkt_des_fc_for_cfg_cnt", + STAT_CAR0_CARC_PKT_DES_FC_FOR_CFG_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c2008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_carc_pkt_des_fc_for_cfg_cnt_reg, + NULL, + NULL, + }, + { + "carc_appoint_qnum_or_sp", + STAT_CAR0_CARC_APPOINT_QNUM_OR_SPr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c2009, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_stat_car0_carc_appoint_qnum_or_sp_reg, + NULL, + NULL, + }, + { + "carc_cfgmt_count_mode", + STAT_CAR0_CARC_CFGMT_COUNT_MODEr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c200a, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_car0_carc_cfgmt_count_mode_reg, + NULL, + NULL, + }, + { + "carc_pkt_size_cnt", + STAT_CAR0_CARC_PKT_SIZE_CNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c200b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_carc_pkt_size_cnt_reg, + NULL, + NULL, + }, + { + "carc_plcr_init_dont", + STAT_CAR0_CARC_PLCR_INIT_DONTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c200c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_carc_plcr_init_dont_reg, + NULL, + NULL, + }, + { + "carb_random_ram", + STAT_CAR0_CARB_RANDOM_RAMr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x2d0000 + 0x40000000, + (512 / 8), + 0, + 0x1f + 1, + 0, + 16, + 24, + g_stat_car0_carb_random_ram_reg, + NULL, + NULL, + }, + { + "carc_random_ram", + STAT_CAR0_CARC_RANDOM_RAMr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x2e0000 + 0x40000000, + (512 / 8), + 0, + 0x7 + 1, + 0, + 16, + 24, + g_stat_car0_carc_random_ram_reg, + NULL, + NULL, + }, + { + "cara_begin_flow_id", + STAT_CAR0_CARA_BEGIN_FLOW_IDr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x300000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_cara_begin_flow_id_reg, + NULL, + NULL, + }, + { + "carb_begin_flow_id", + STAT_CAR0_CARB_BEGIN_FLOW_IDr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x300001, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_carb_begin_flow_id_reg, + NULL, + NULL, + }, + { + "carc_begin_flow_id", + STAT_CAR0_CARC_BEGIN_FLOW_IDr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x300002, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_carc_begin_flow_id_reg, + NULL, + NULL, + }, + { + "prog_full_assert_cfg_w", + STAT_CAR0_PROG_FULL_ASSERT_CFG_Wr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x300003, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_prog_full_assert_cfg_w_reg, + NULL, + NULL, + }, + { + "prog_full_negate_cfg_w", + STAT_CAR0_PROG_FULL_NEGATE_CFG_Wr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x300004, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_prog_full_negate_cfg_w_reg, + NULL, + NULL, + }, + { + "timeout_limit", + STAT_CAR0_TIMEOUT_LIMITr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x300005, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_timeout_limit_reg, + NULL, + NULL, + }, + { + "pkt_des_fifo_overflow", + STAT_CAR0_PKT_DES_FIFO_OVERFLOWr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x300006, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_pkt_des_fifo_overflow_reg, + NULL, + NULL, + }, + { + "pkt_des_fifo_underflow", + STAT_CAR0_PKT_DES_FIFO_UNDERFLOWr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x300007, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_pkt_des_fifo_underflow_reg, + NULL, + NULL, + }, + { + "pkt_des_fifo_prog_full", + STAT_CAR0_PKT_DES_FIFO_PROG_FULLr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x300008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_pkt_des_fifo_prog_full_reg, + NULL, + NULL, + }, + { + "pkt_des_fifo_prog_empty", + STAT_CAR0_PKT_DES_FIFO_PROG_EMPTYr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x300009, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_pkt_des_fifo_prog_empty_reg, + NULL, + NULL, + }, + { + "pkt_des_fifo_full", + STAT_CAR0_PKT_DES_FIFO_FULLr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x30000a, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_pkt_des_fifo_full_reg, + NULL, + NULL, + }, + { + "pkt_des_fifo_empty", + STAT_CAR0_PKT_DES_FIFO_EMPTYr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x30000b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_pkt_des_fifo_empty_reg, + NULL, + NULL, + }, + { + "pkt_size_offset", + STAT_CAR0_PKT_SIZE_OFFSETr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x30000e, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_pkt_size_offset_reg, + NULL, + NULL, + }, + { + "car_plcr_init_dont", + STAT_CAR0_CAR_PLCR_INIT_DONTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x30000f, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_car_plcr_init_dont_reg, + NULL, + NULL, + }, + { + "max_pkt_size_a", + STAT_CAR0_MAX_PKT_SIZE_Ar, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x300010, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_max_pkt_size_a_reg, + NULL, + NULL, + }, + { + "max_pkt_size_b", + STAT_CAR0_MAX_PKT_SIZE_Br, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x300011, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_max_pkt_size_b_reg, + NULL, + NULL, + }, + { + "max_pkt_size_c", + STAT_CAR0_MAX_PKT_SIZE_Cr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x300012, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_max_pkt_size_c_reg, + NULL, + NULL, + }, + { + "car_hierarchy_mode", + STAT_CAR0_CAR_HIERARCHY_MODEr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x300013, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_car_hierarchy_mode_reg, + NULL, + NULL, + }, + { + "prog_empty_assert_cfg_w", + STAT_CAR0_PROG_EMPTY_ASSERT_CFG_Wr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x300014, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_prog_empty_assert_cfg_w_reg, + NULL, + NULL, + }, + { + "prog_empty_negate_cfg_w", + STAT_CAR0_PROG_EMPTY_NEGATE_CFG_Wr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x300015, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_prog_empty_negate_cfg_w_reg, + NULL, + NULL, + }, + { + "pkt_des_fifo_ovf_int", + STAT_CAR0_PKT_DES_FIFO_OVF_INTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x300016, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_pkt_des_fifo_ovf_int_reg, + NULL, + NULL, + }, + { + "pkt_des_fifo_data_count", + STAT_CAR0_PKT_DES_FIFO_DATA_COUNTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x300017, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_pkt_des_fifo_data_count_reg, + NULL, + NULL, + }, + { + "pkt_des_fifo_udf_int", + STAT_CAR0_PKT_DES_FIFO_UDF_INTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x300018, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_car0_pkt_des_fifo_udf_int_reg, + NULL, + NULL, + }, + { + "cara_queue_ram0_159_0_pkt", + STAT_CAR0_CARA_QUEUE_RAM0_159_0_PKTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x000000 + 0x14000000, + (160 / 8), + 0, + 0x7FFF + 1, + 0, + 8, + 8, + g_stat_car0_cara_queue_ram0_159_0_pkt_reg, + NULL, + NULL, + }, + { + "cara_profile_ram1_255_0_pkt", + STAT_CAR0_CARA_PROFILE_RAM1_255_0_PKTr, + STAT, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x040000 + 0x20000000, + (256 / 8), + 0, + 0x1FF + 1, + 0, + 8, + 12, + g_stat_car0_cara_profile_ram1_255_0_pkt_reg, + NULL, + NULL, + }, + { + "block0_7_port_id_cfg", + STAT4K_ETCAM_BLOCK0_7_PORT_ID_CFGr, + STAT4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x0000005c, + (32 / 8), + 0, + 0, + 0, + 0, + 8, + g_stat4k_etcam_block0_7_port_id_cfg_reg, + dpp_write, + dpp_read, + }, + { + "block0_3_base_addr_cfg", + STAT4K_ETCAM_BLOCK0_3_BASE_ADDR_CFGr, + STAT4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000064, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_stat4k_etcam_block0_3_base_addr_cfg_reg, + dpp_write, + dpp_read, + }, + { + "block4_7_base_addr_cfg", + STAT4K_ETCAM_BLOCK4_7_BASE_ADDR_CFGr, + STAT4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000068, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_stat4k_etcam_block4_7_base_addr_cfg_reg, + dpp_write, + dpp_read, + }, + { + "cfg_eram_wr_interval_cnt", + DTB_DTB_CFG_CFG_ERAM_WR_INTERVAL_CNTr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0040, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cfg_eram_wr_interval_cnt_reg, + NULL, + NULL, + }, + { + "cfg_zcam_wr_interval_cnt", + DTB_DTB_CFG_CFG_ZCAM_WR_INTERVAL_CNTr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0044, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cfg_zcam_wr_interval_cnt_reg, + NULL, + NULL, + }, + { + "cfg_tcam_wr_interval_cnt", + DTB_DTB_CFG_CFG_TCAM_WR_INTERVAL_CNTr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0048, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cfg_tcam_wr_interval_cnt_reg, + NULL, + NULL, + }, + { + "cfg_ddr_wr_interval_cnt", + DTB_DTB_CFG_CFG_DDR_WR_INTERVAL_CNTr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x004c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cfg_ddr_wr_interval_cnt_reg, + NULL, + NULL, + }, + { + "cfg_hash_wr_interval_cnt", + DTB_DTB_CFG_CFG_HASH_WR_INTERVAL_CNTr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0050, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cfg_hash_wr_interval_cnt_reg, + NULL, + NULL, + }, + { + "cfg_eram_rd_interval_cnt", + DTB_DTB_CFG_CFG_ERAM_RD_INTERVAL_CNTr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0058, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cfg_eram_rd_interval_cnt_reg, + NULL, + NULL, + }, + { + "cfg_zcam_rd_interval_cnt", + DTB_DTB_CFG_CFG_ZCAM_RD_INTERVAL_CNTr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x005c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cfg_zcam_rd_interval_cnt_reg, + NULL, + NULL, + }, + { + "cfg_tcam_rd_interval_cnt", + DTB_DTB_CFG_CFG_TCAM_RD_INTERVAL_CNTr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0060, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cfg_tcam_rd_interval_cnt_reg, + NULL, + NULL, + }, + { + "cfg_ddr_rd_interval_cnt", + DTB_DTB_CFG_CFG_DDR_RD_INTERVAL_CNTr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0064, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cfg_ddr_rd_interval_cnt_reg, + NULL, + NULL, + }, + { + "cfg_dtb_queue_lock_state_0_3", + DTB_DTB_CFG_CFG_DTB_QUEUE_LOCK_STATE_0_3r, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0080, + (32 / 8), + 0, + 3 + 1, + 0, + 4, + 1, + g_dtb_dtb_cfg_cfg_dtb_queue_lock_state_0_3_reg, + NULL, + NULL, + }, + { + "w_convert_0_mode", + DTB_DTB_AXIM0_W_CONVERT_0_MODEr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_AXIM0_BASE_ADDR + 0x6060, + (32 / 8), + 0, + 1 + 1, + 0, + 256, + 1, + g_dtb_dtb_axim0_w_convert_0_mode_reg, + NULL, + NULL, + }, + { + "r_convert_0_mode", + DTB_DTB_AXIM0_R_CONVERT_0_MODEr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_AXIM0_BASE_ADDR + 0x2060, + (32 / 8), + 0, + 1 + 1, + 0, + 256, + 1, + g_dtb_dtb_axim0_r_convert_0_mode_reg, + NULL, + NULL, + }, + { + "aximr_os", + DTB_DTB_AXIM0_AXIMR_OSr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_AXIM0_BASE_ADDR + 0x2000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_axim0_aximr_os_reg, + NULL, + NULL, + }, + { + "w_convert_1_mode", + DTB_DTB_AXIM1_W_CONVERT_1_MODEr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_AXIM1_BASE_ADDR + 0x6060, + (32 / 8), + 0, + 2 + 1, + 0, + 256, + 1, + g_dtb_dtb_axim1_w_convert_1_mode_reg, + NULL, + NULL, + }, + { + "r_convert_1_mode", + DTB_DTB_AXIM1_R_CONVERT_1_MODEr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_AXIM1_BASE_ADDR + 0x2060, + (32 / 8), + 0, + 2 + 1, + 0, + 256, + 1, + g_dtb_dtb_axim1_r_convert_1_mode_reg, + NULL, + NULL, + }, + { + "axis_convert_mode", + DTB_DTB_AXIS_AXIS_CONVERT_MODEr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_AXIS_BASE_ADDR + 0x0450, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_axis_axis_convert_mode_reg, + NULL, + NULL, + }, + { + "cfg_queue_dtb_addr_h_0_127", + DTB4K_DTB_ENQ_CFG_QUEUE_DTB_ADDR_H_0_127r, + DTB4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_ENQ_BASE_ADDR + 0x0000, + (32 / 8), + 0, + 127 + 1, + 0, + 32, + 1, + g_dtb4k_dtb_enq_cfg_queue_dtb_addr_h_0_127_reg, + dpp_write, + dpp_read, + }, + { + "cfg_queue_dtb_addr_l_0_127", + DTB4K_DTB_ENQ_CFG_QUEUE_DTB_ADDR_L_0_127r, + DTB4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_ENQ_BASE_ADDR + 0x0004, + (32 / 8), + 0, + 127 + 1, + 0, + 32, + 1, + g_dtb4k_dtb_enq_cfg_queue_dtb_addr_l_0_127_reg, + dpp_write, + dpp_read, + }, + { + "cfg_queue_dtb_len_0_127", + DTB4K_DTB_ENQ_CFG_QUEUE_DTB_LEN_0_127r, + DTB4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_ENQ_BASE_ADDR + 0x0008, + (32 / 8), + 0, + 127 + 1, + 0, + 32, + 3, + g_dtb4k_dtb_enq_cfg_queue_dtb_len_0_127_reg, + dpp_write, + dpp_read, + }, + { + "info_queue_buf_space_left_0_127", + DTB4K_DTB_ENQ_INFO_QUEUE_BUF_SPACE_LEFT_0_127r, + DTB4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_ENQ_BASE_ADDR + 0x000c, + (32 / 8), + 0, + 127 + 1, + 0, + 32, + 1, + g_dtb4k_dtb_enq_info_queue_buf_space_left_0_127_reg, + dpp_write, + dpp_read, + }, + { + "cfg_epid_v_func_num_0_127", + DTB4K_DTB_ENQ_CFG_EPID_V_FUNC_NUM_0_127r, + DTB4K, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_ENQ_BASE_ADDR + 0x0010, + (32 / 8), + 0, + 127 + 1, + 0, + 32, + 7, + g_dtb4k_dtb_enq_cfg_epid_v_func_num_0_127_reg, + dpp_write, + dpp_read, + }, + { + "cpu_trpg_ms_en", + TRPG_TRPG_RX_PORT_CPU_TRPG_MS_ENr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_BASE_ADDR + 0x0004, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_rx_port_cpu_trpg_ms_en_reg, + NULL, + NULL, + }, + { + "cpu_trpg_port_en", + TRPG_TRPG_RX_PORT_CPU_TRPG_PORT_ENr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_BASE_ADDR + 0x0010, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_rx_port_cpu_trpg_port_en_reg, + NULL, + NULL, + }, + { + "cpu_trpg_look_en", + TRPG_TRPG_RX_PORT_CPU_TRPG_LOOK_ENr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_BASE_ADDR + 0x0014, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_rx_port_cpu_trpg_look_en_reg, + NULL, + NULL, + }, + { + "cpu_trpgrx_ram_almost_full", + TRPG_TRPG_RX_PORT_CPU_TRPGRX_RAM_ALMOST_FULLr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_BASE_ADDR + 0x0028, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_rx_port_cpu_trpgrx_ram_almost_full_reg, + NULL, + NULL, + }, + { + "cpu_trpgrx_ram_test_en", + TRPG_TRPG_RX_PORT_CPU_TRPGRX_RAM_TEST_ENr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_BASE_ADDR + 0x002c, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_rx_port_cpu_trpgrx_ram_test_en_reg, + NULL, + NULL, + }, + { + "cpu_trpgrx_inmod_pfc_rdy_en", + TRPG_TRPG_RX_PORT_CPU_TRPGRX_INMOD_PFC_RDY_ENr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_BASE_ADDR + 0x0030, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_rx_port_cpu_trpgrx_inmod_pfc_rdy_en_reg, + NULL, + NULL, + }, + { + "cpu_trpgrx_pkt_num_h", + TRPG_TRPG_RX_PORT_CPU_TRPGRX_PKT_NUM_Hr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_BASE_ADDR + 0x0034, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_rx_port_cpu_trpgrx_pkt_num_h_reg, + NULL, + NULL, + }, + { + "cpu_trpgrx_pkt_num_l", + TRPG_TRPG_RX_PORT_CPU_TRPGRX_PKT_NUM_Lr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_BASE_ADDR + 0x0038, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_rx_port_cpu_trpgrx_pkt_num_l_reg, + NULL, + NULL, + }, + { + "cpu_trpgrx_pkt_byte_num_h", + TRPG_TRPG_RX_PORT_CPU_TRPGRX_PKT_BYTE_NUM_Hr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_BASE_ADDR + 0x003c, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_rx_port_cpu_trpgrx_pkt_byte_num_h_reg, + NULL, + NULL, + }, + { + "cpu_trpgrx_pkt_byte_num_l", + TRPG_TRPG_RX_PORT_CPU_TRPGRX_PKT_BYTE_NUM_Lr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_BASE_ADDR + 0x0040, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_rx_port_cpu_trpgrx_pkt_byte_num_l_reg, + NULL, + NULL, + }, + { + "cpu_trpgrx_pkt_cnt_clr", + TRPG_TRPG_RX_PORT_CPU_TRPGRX_PKT_CNT_CLRr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_BASE_ADDR + 0x0044, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_rx_port_cpu_trpgrx_pkt_cnt_clr_reg, + NULL, + NULL, + }, + { + "cpu_trpgrx_fc_clk_freq", + TRPG_TRPG_RX_PORT_CPU_TRPGRX_FC_CLK_FREQr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_BASE_ADDR + 0x0048, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_rx_port_cpu_trpgrx_fc_clk_freq_reg, + NULL, + NULL, + }, + { + "cpu_trpgrx_fc_en", + TRPG_TRPG_RX_PORT_CPU_TRPGRX_FC_ENr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_BASE_ADDR + 0x004c, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_rx_port_cpu_trpgrx_fc_en_reg, + NULL, + NULL, + }, + { + "cpu_trpgrx_fc_token_add_num", + TRPG_TRPG_RX_PORT_CPU_TRPGRX_FC_TOKEN_ADD_NUMr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_BASE_ADDR + 0x0050, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_rx_port_cpu_trpgrx_fc_token_add_num_reg, + NULL, + NULL, + }, + { + "cpu_trpgrx_fc_token_max_num", + TRPG_TRPG_RX_PORT_CPU_TRPGRX_FC_TOKEN_MAX_NUMr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_BASE_ADDR + 0x0054, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_rx_port_cpu_trpgrx_fc_token_max_num_reg, + NULL, + NULL, + }, + { + "cpu_trpgrx_port_state_info", + TRPG_TRPG_RX_PORT_CPU_TRPGRX_PORT_STATE_INFOr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_BASE_ADDR + 0x0058, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_rx_port_cpu_trpgrx_port_state_info_reg, + NULL, + NULL, + }, + { + "cpu_trpgrx_ram_past_max_dep", + TRPG_TRPG_RX_PORT_CPU_TRPGRX_RAM_PAST_MAX_DEPr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_BASE_ADDR + 0x005c, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_rx_port_cpu_trpgrx_ram_past_max_dep_reg, + NULL, + NULL, + }, + { + "cpu_trpgrx_ram_past_max_dep_clr", + TRPG_TRPG_RX_PORT_CPU_TRPGRX_RAM_PAST_MAX_DEP_CLRr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_BASE_ADDR + 0x0060, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_rx_port_cpu_trpgrx_ram_past_max_dep_clr_reg, + NULL, + NULL, + }, + { + "cpu_trpgrx_pkt_past_max_len", + TRPG_TRPG_RX_PORT_CPU_TRPGRX_PKT_PAST_MAX_LENr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_BASE_ADDR + 0x0064, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_rx_port_cpu_trpgrx_pkt_past_max_len_reg, + NULL, + NULL, + }, + { + "cpu_trpgrx_pkt_past_max_len_clr", + TRPG_TRPG_RX_PORT_CPU_TRPGRX_PKT_PAST_MAX_LEN_CLRr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_BASE_ADDR + 0x0068, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_rx_port_cpu_trpgrx_pkt_past_max_len_clr_reg, + NULL, + NULL, + }, + { + "cpu_trpgrx_pkt_past_min_len", + TRPG_TRPG_RX_PORT_CPU_TRPGRX_PKT_PAST_MIN_LENr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_BASE_ADDR + 0x006c, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_rx_port_cpu_trpgrx_pkt_past_min_len_reg, + NULL, + NULL, + }, + { + "cpu_trpgrx_pkt_past_min_len_clr", + TRPG_TRPG_RX_PORT_CPU_TRPGRX_PKT_PAST_MIN_LEN_CLRr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_BASE_ADDR + 0x0070, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_rx_port_cpu_trpgrx_pkt_past_min_len_clr_reg, + NULL, + NULL, + }, + { + "trpg_rx_data_ram", + TRPG_TRPG_RX_RAM_TRPG_RX_DATA_RAMr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_RAM_BASE_ADDR + 0x0000, + (32 / 8), + DPP_TRPG_RAM_NUM, + 2047 + 1, + DPP_TRPG_RAM_SPACE_SIZE, + 4, + 1, + g_trpg_trpg_rx_ram_trpg_rx_data_ram_reg, + NULL, + NULL, + }, + { + "trpg_rx_info_ram", + TRPG_TRPG_RX_RAM_TRPG_RX_INFO_RAMr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_RAM_BASE_ADDR + 0x2000, + (32 / 8), + DPP_TRPG_RAM_NUM, + 255 + 1, + DPP_TRPG_RAM_SPACE_SIZE, + 4, + 1, + g_trpg_trpg_rx_ram_trpg_rx_info_ram_reg, + NULL, + NULL, + }, + { + "cpu_trpg_ms_en", + TRPG_TRPG_TX_PORT_CPU_TRPG_MS_ENr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_BASE_ADDR + 0x0004, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_tx_port_cpu_trpg_ms_en_reg, + NULL, + NULL, + }, + { + "cpu_trpg_port_en", + TRPG_TRPG_TX_PORT_CPU_TRPG_PORT_ENr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_BASE_ADDR + 0x0010, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_tx_port_cpu_trpg_port_en_reg, + NULL, + NULL, + }, + { + "cpu_trpg_look_en", + TRPG_TRPG_TX_PORT_CPU_TRPG_LOOK_ENr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_BASE_ADDR + 0x0014, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_tx_port_cpu_trpg_look_en_reg, + NULL, + NULL, + }, + { + "cpu_trpgtx_ram_almost_full", + TRPG_TRPG_TX_PORT_CPU_TRPGTX_RAM_ALMOST_FULLr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_BASE_ADDR + 0x0018, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_tx_port_cpu_trpgtx_ram_almost_full_reg, + NULL, + NULL, + }, + { + "cpu_trpgtx_ram_test_en", + TRPG_TRPG_TX_PORT_CPU_TRPGTX_RAM_TEST_ENr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_BASE_ADDR + 0x001c, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_tx_port_cpu_trpgtx_ram_test_en_reg, + NULL, + NULL, + }, + { + "cpu_trpgtx_pkt_num_h", + TRPG_TRPG_TX_PORT_CPU_TRPGTX_PKT_NUM_Hr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_BASE_ADDR + 0x0034, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_tx_port_cpu_trpgtx_pkt_num_h_reg, + NULL, + NULL, + }, + { + "cpu_trpgtx_pkt_num_l", + TRPG_TRPG_TX_PORT_CPU_TRPGTX_PKT_NUM_Lr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_BASE_ADDR + 0x0038, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_tx_port_cpu_trpgtx_pkt_num_l_reg, + NULL, + NULL, + }, + { + "cpu_trpgtx_pkt_byte_num_h", + TRPG_TRPG_TX_PORT_CPU_TRPGTX_PKT_BYTE_NUM_Hr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_BASE_ADDR + 0x003c, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_tx_port_cpu_trpgtx_pkt_byte_num_h_reg, + NULL, + NULL, + }, + { + "cpu_trpgtx_pkt_byte_num_l", + TRPG_TRPG_TX_PORT_CPU_TRPGTX_PKT_BYTE_NUM_Lr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_BASE_ADDR + 0x0040, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_tx_port_cpu_trpgtx_pkt_byte_num_l_reg, + NULL, + NULL, + }, + { + "cpu_trpgtx_pkt_cnt_clr", + TRPG_TRPG_TX_PORT_CPU_TRPGTX_PKT_CNT_CLRr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_BASE_ADDR + 0x0044, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_tx_port_cpu_trpgtx_pkt_cnt_clr_reg, + NULL, + NULL, + }, + { + "cpu_trpgtx_fc_clk_freq", + TRPG_TRPG_TX_PORT_CPU_TRPGTX_FC_CLK_FREQr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_BASE_ADDR + 0x0048, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_tx_port_cpu_trpgtx_fc_clk_freq_reg, + NULL, + NULL, + }, + { + "cpu_trpgtx_fc_en", + TRPG_TRPG_TX_PORT_CPU_TRPGTX_FC_ENr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_BASE_ADDR + 0x004c, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_tx_port_cpu_trpgtx_fc_en_reg, + NULL, + NULL, + }, + { + "cpu_trpgtx_fc_token_add_num", + TRPG_TRPG_TX_PORT_CPU_TRPGTX_FC_TOKEN_ADD_NUMr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_BASE_ADDR + 0x0050, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_tx_port_cpu_trpgtx_fc_token_add_num_reg, + NULL, + NULL, + }, + { + "cpu_trpgtx_fc_token_max_num", + TRPG_TRPG_TX_PORT_CPU_TRPGTX_FC_TOKEN_MAX_NUMr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_BASE_ADDR + 0x0054, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_tx_port_cpu_trpgtx_fc_token_max_num_reg, + NULL, + NULL, + }, + { + "cpu_trpgtx_port_state_info", + TRPG_TRPG_TX_PORT_CPU_TRPGTX_PORT_STATE_INFOr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_BASE_ADDR + 0x0058, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_tx_port_cpu_trpgtx_port_state_info_reg, + NULL, + NULL, + }, + { + "cpu_trpgtx_ram_past_max_dep", + TRPG_TRPG_TX_PORT_CPU_TRPGTX_RAM_PAST_MAX_DEPr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_BASE_ADDR + 0x005c, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_tx_port_cpu_trpgtx_ram_past_max_dep_reg, + NULL, + NULL, + }, + { + "cpu_trpgtx_ram_past_max_dep_clr", + TRPG_TRPG_TX_PORT_CPU_TRPGTX_RAM_PAST_MAX_DEP_CLRr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_BASE_ADDR + 0x0060, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_tx_port_cpu_trpgtx_ram_past_max_dep_clr_reg, + NULL, + NULL, + }, + { + "cpu_trpgtx_pkt_past_max_len", + TRPG_TRPG_TX_PORT_CPU_TRPGTX_PKT_PAST_MAX_LENr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_BASE_ADDR + 0x0064, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_tx_port_cpu_trpgtx_pkt_past_max_len_reg, + NULL, + NULL, + }, + { + "cpu_trpgtx_pkt_past_max_len_clr", + TRPG_TRPG_TX_PORT_CPU_TRPGTX_PKT_PAST_MAX_LEN_CLRr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_BASE_ADDR + 0x0068, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_tx_port_cpu_trpgtx_pkt_past_max_len_clr_reg, + NULL, + NULL, + }, + { + "cpu_trpgtx_pkt_past_min_len", + TRPG_TRPG_TX_PORT_CPU_TRPGTX_PKT_PAST_MIN_LENr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_BASE_ADDR + 0x006c, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_tx_port_cpu_trpgtx_pkt_past_min_len_reg, + NULL, + NULL, + }, + { + "cpu_trpgtx_pkt_past_min_len_clr", + TRPG_TRPG_TX_PORT_CPU_TRPGTX_PKT_PAST_MIN_LEN_CLRr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_BASE_ADDR + 0x0070, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_tx_port_cpu_trpgtx_pkt_past_min_len_clr_reg, + NULL, + NULL, + }, + { + "cpu_trpgtx_etm_ram_almost_full", + TRPG_TRPG_TX_ETM_PORT_CPU_TRPGTX_ETM_RAM_ALMOST_FULLr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_ETM_PORT_BASE_ADDR + 0x0000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_trpg_trpg_tx_etm_port_cpu_trpgtx_etm_ram_almost_full_reg, + NULL, + NULL, + }, + { + "cpu_trpgtx_etm_ram_test_en", + TRPG_TRPG_TX_ETM_PORT_CPU_TRPGTX_ETM_RAM_TEST_ENr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_ETM_PORT_BASE_ADDR + 0x0010, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_trpg_trpg_tx_etm_port_cpu_trpgtx_etm_ram_test_en_reg, + NULL, + NULL, + }, + { + "cpu_todtime_update_int_mask", + TRPG_TRPG_TX_GLB_CPU_TODTIME_UPDATE_INT_MASKr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_GLB_BASE_ADDR + 0x000c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_trpg_trpg_tx_glb_cpu_todtime_update_int_mask_reg, + NULL, + NULL, + }, + { + "cpu_todtime_update_int_clr", + TRPG_TRPG_TX_GLB_CPU_TODTIME_UPDATE_INT_CLRr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_GLB_BASE_ADDR + 0x0010, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_trpg_trpg_tx_glb_cpu_todtime_update_int_clr_reg, + NULL, + NULL, + }, + { + "cpu_todtime_ram_test_en", + TRPG_TRPG_TX_GLB_CPU_TODTIME_RAM_TEST_ENr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_GLB_BASE_ADDR + 0x004c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_trpg_trpg_tx_glb_cpu_todtime_ram_test_en_reg, + NULL, + NULL, + }, + { + "trpg_tx_data_ram", + TRPG_TRPG_TX_RAM_TRPG_TX_DATA_RAMr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_RAM_BASE_ADDR + 0x0000, + (32 / 8), + DPP_TRPG_RAM_NUM, + 12031 + 1, + DPP_TRPG_RAM_SPACE_SIZE, + 4, + 1, + g_trpg_trpg_tx_ram_trpg_tx_data_ram_reg, + NULL, + NULL, + }, + { + "trpg_tx_info_ram", + TRPG_TRPG_TX_RAM_TRPG_TX_INFO_RAMr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_RAM_BASE_ADDR + 0xc000, + (32 / 8), + DPP_TRPG_RAM_NUM, + 751 + 1, + DPP_TRPG_RAM_SPACE_SIZE, + 4, + 1, + g_trpg_trpg_tx_ram_trpg_tx_info_ram_reg, + NULL, + NULL, + }, + { + "trpg_tx_etm_data_ram", + TRPG_TRPG_TX_ETM_RAM_TRPG_TX_ETM_DATA_RAMr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_ETM_RAM_BASE_ADDR + 0x0000, + (32 / 8), + 0, + 511 + 1, + 0, + 4, + 1, + g_trpg_trpg_tx_etm_ram_trpg_tx_etm_data_ram_reg, + NULL, + NULL, + }, + { + "trpg_tx_etm_info_ram", + TRPG_TRPG_TX_ETM_RAM_TRPG_TX_ETM_INFO_RAMr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_ETM_RAM_BASE_ADDR + 0x800, + (32 / 8), + 0, + 31 + 1, + 0, + 4, + 1, + g_trpg_trpg_tx_etm_ram_trpg_tx_etm_info_ram_reg, + NULL, + NULL, + }, + { + "chip_version_reg", + ETM_CFGMT_CHIP_VERSION_REGr, + ETM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + 0x40, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_etm_cfgmt_chip_version_reg_reg, + NULL, + NULL, + }, + { + "chip_date_reg", + ETM_CFGMT_CHIP_DATE_REGr, + ETM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + 0x50, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cfgmt_chip_date_reg_reg, + NULL, + NULL, + }, + { + "cfgmt_crc_en", + ETM_CFGMT_CFGMT_CRC_ENr, + ETM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + 0x80, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cfgmt_cfgmt_crc_en_reg, + NULL, + NULL, + }, + { + "cfg_port_transfer_en", + ETM_CFGMT_CFG_PORT_TRANSFER_ENr, + ETM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + 0x130, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cfgmt_cfg_port_transfer_en_reg, + NULL, + NULL, + }, + { + "tm_sa_work_mode", + ETM_CFGMT_TM_SA_WORK_MODEr, + ETM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + 0x180, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cfgmt_tm_sa_work_mode_reg, + NULL, + NULL, + }, + { + "local_sa_id", + ETM_CFGMT_LOCAL_SA_IDr, + ETM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + 0x190, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cfgmt_local_sa_id_reg, + NULL, + NULL, + }, + { + "olif_rdy", + ETM_OLIF_OLIF_RDYr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_etm_olif_olif_rdy_reg, + NULL, + NULL, + }, + { + "emem_prog_full", + ETM_OLIF_EMEM_PROG_FULLr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x3, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_olif_emem_prog_full_reg, + NULL, + NULL, + }, + { + "port_order_fifo_full", + ETM_OLIF_PORT_ORDER_FIFO_FULLr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x5, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_olif_port_order_fifo_full_reg, + NULL, + NULL, + }, + { + "olif_release_last", + ETM_OLIF_OLIF_RELEASE_LASTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x6, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_olif_olif_release_last_reg, + NULL, + NULL, + }, + { + "olif_fifo_empty_state", + ETM_OLIF_OLIF_FIFO_EMPTY_STATEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xa, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_etm_olif_olif_fifo_empty_state_reg, + NULL, + NULL, + }, + { + "qmu_olif_release_fc_cnt", + ETM_OLIF_QMU_OLIF_RELEASE_FC_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xb, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_qmu_olif_release_fc_cnt_reg, + NULL, + NULL, + }, + { + "olif_qmu_link_fc_cnt", + ETM_OLIF_OLIF_QMU_LINK_FC_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_olif_qmu_link_fc_cnt_reg, + NULL, + NULL, + }, + { + "lif0_link_fc_cnt", + ETM_OLIF_LIF0_LINK_FC_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xd, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_lif0_link_fc_cnt_reg, + NULL, + NULL, + }, + { + "olif_tmmu_fc_cnt", + ETM_OLIF_OLIF_TMMU_FC_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xe, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_olif_tmmu_fc_cnt_reg, + NULL, + NULL, + }, + { + "olif_mmu_fc_cnt", + ETM_OLIF_OLIF_MMU_FC_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xf, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_olif_mmu_fc_cnt_reg, + NULL, + NULL, + }, + { + "olif_qmu_port_rdy_h", + ETM_OLIF_OLIF_QMU_PORT_RDY_Hr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x10, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_olif_qmu_port_rdy_h_reg, + NULL, + NULL, + }, + { + "olif_qmu_port_rdy_l", + ETM_OLIF_OLIF_QMU_PORT_RDY_Lr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x11, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_olif_qmu_port_rdy_l_reg, + NULL, + NULL, + }, + { + "lif0_port_rdy_h", + ETM_OLIF_LIF0_PORT_RDY_Hr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_lif0_port_rdy_h_reg, + NULL, + NULL, + }, + { + "lif0_port_rdy_l", + ETM_OLIF_LIF0_PORT_RDY_Lr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x13, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_lif0_port_rdy_l_reg, + NULL, + NULL, + }, + { + "qmu_olif_rd_sop_cnt", + ETM_OLIF_QMU_OLIF_RD_SOP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x14, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_qmu_olif_rd_sop_cnt_reg, + NULL, + NULL, + }, + { + "qmu_olif_rd_eop_cnt", + ETM_OLIF_QMU_OLIF_RD_EOP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x15, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_qmu_olif_rd_eop_cnt_reg, + NULL, + NULL, + }, + { + "qmu_olif_rd_vld_cnt", + ETM_OLIF_QMU_OLIF_RD_VLD_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x16, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_qmu_olif_rd_vld_cnt_reg, + NULL, + NULL, + }, + { + "qmu_olif_rd_blk_cnt", + ETM_OLIF_QMU_OLIF_RD_BLK_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x17, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_qmu_olif_rd_blk_cnt_reg, + NULL, + NULL, + }, + { + "mmu_tm_data_sop_cnt", + ETM_OLIF_MMU_TM_DATA_SOP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x18, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_mmu_tm_data_sop_cnt_reg, + NULL, + NULL, + }, + { + "mmu_tm_data_eop_cnt", + ETM_OLIF_MMU_TM_DATA_EOP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x19, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_mmu_tm_data_eop_cnt_reg, + NULL, + NULL, + }, + { + "mmu_tm_data_vld_cnt", + ETM_OLIF_MMU_TM_DATA_VLD_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1a, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_mmu_tm_data_vld_cnt_reg, + NULL, + NULL, + }, + { + "odma_tm_data_sop_cnt", + ETM_OLIF_ODMA_TM_DATA_SOP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_odma_tm_data_sop_cnt_reg, + NULL, + NULL, + }, + { + "odma_tm_data_eop_cnt", + ETM_OLIF_ODMA_TM_DATA_EOP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_odma_tm_data_eop_cnt_reg, + NULL, + NULL, + }, + { + "odma_tm_deq_vld_cnt", + ETM_OLIF_ODMA_TM_DEQ_VLD_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1d, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_odma_tm_deq_vld_cnt_reg, + NULL, + NULL, + }, + { + "olif_qmu_release_vld_cnt", + ETM_OLIF_OLIF_QMU_RELEASE_VLD_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1f, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_olif_qmu_release_vld_cnt_reg, + NULL, + NULL, + }, + { + "emem_dat_vld_cnt", + ETM_OLIF_EMEM_DAT_VLD_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_emem_dat_vld_cnt_reg, + NULL, + NULL, + }, + { + "imem_dat_vld_cnt", + ETM_OLIF_IMEM_DAT_VLD_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x21, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_imem_dat_vld_cnt_reg, + NULL, + NULL, + }, + { + "emem_dat_rd_cnt", + ETM_OLIF_EMEM_DAT_RD_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x22, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_emem_dat_rd_cnt_reg, + NULL, + NULL, + }, + { + "imem_dat_rd_cnt", + ETM_OLIF_IMEM_DAT_RD_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x23, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_imem_dat_rd_cnt_reg, + NULL, + NULL, + }, + { + "qmu_olif_rd_sop_emem_cnt", + ETM_OLIF_QMU_OLIF_RD_SOP_EMEM_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x24, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_qmu_olif_rd_sop_emem_cnt_reg, + NULL, + NULL, + }, + { + "qmu_olif_rd_vld_emem_cnt", + ETM_OLIF_QMU_OLIF_RD_VLD_EMEM_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x25, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_qmu_olif_rd_vld_emem_cnt_reg, + NULL, + NULL, + }, + { + "cpu_last_wr_addr", + ETM_OLIF_CPU_LAST_WR_ADDRr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x26, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_cpu_last_wr_addr_reg, + NULL, + NULL, + }, + { + "cpu_last_wr_data", + ETM_OLIF_CPU_LAST_WR_DATAr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x27, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_cpu_last_wr_data_reg, + NULL, + NULL, + }, + { + "cpu_last_rd_addr", + ETM_OLIF_CPU_LAST_RD_ADDRr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x28, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_cpu_last_rd_addr_reg, + NULL, + NULL, + }, + { + "qmu_olif_last_port", + ETM_OLIF_QMU_OLIF_LAST_PORTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x35, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_qmu_olif_last_port_reg, + NULL, + NULL, + }, + { + "qmu_olif_last_addr", + ETM_OLIF_QMU_OLIF_LAST_ADDRr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x36, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_qmu_olif_last_addr_reg, + NULL, + NULL, + }, + { + "qmu_olif_last_bank", + ETM_OLIF_QMU_OLIF_LAST_BANKr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x37, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_olif_qmu_olif_last_bank_reg, + NULL, + NULL, + }, + { + "tm_lif_byte_stat", + ETM_OLIF_TM_LIF_BYTE_STATr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x70, + (32 / 8), + 0, + 0xf + 1, + 0, + 0x1, + 1, + g_etm_olif_tm_lif_byte_stat_reg, + NULL, + NULL, + }, + { + "tm_lif_err_stat", + ETM_OLIF_TM_LIF_ERR_STATr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x90, + (32 / 8), + 0, + 0xf + 1, + 0, + 0x1, + 1, + g_etm_olif_tm_lif_err_stat_reg, + NULL, + NULL, + }, + { + "port_share_cnt", + ETM_CGAVD_PORT_SHARE_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_port_share_cnt_reg, + NULL, + NULL, + }, + { + "total_imem_cnt", + ETM_CGAVD_TOTAL_IMEM_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xa, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_total_imem_cnt_reg, + NULL, + NULL, + }, + { + "pp_q_len", + ETM_CGAVD_PP_Q_LENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0xa00, + (32 / 8), + 0, + 0x7f + 1, + 0, + 0x1, + 1, + g_etm_cgavd_pp_q_len_reg, + NULL, + NULL, + }, + { + "sys_q_len", + ETM_CGAVD_SYS_Q_LENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1205, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_sys_q_len_reg, + NULL, + NULL, + }, + { + "cgavd_cfg_error_warning", + ETM_CGAVD_CGAVD_CFG_ERROR_WARNINGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12de, + (32 / 8), + 0, + 0, + 0, + 0, + 12, + g_etm_cgavd_cgavd_cfg_error_warning_reg, + NULL, + NULL, + }, + { + "mult_qlen_th_en", + ETM_CGAVD_MULT_QLEN_TH_ENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12df, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_mult_qlen_th_en_reg, + NULL, + NULL, + }, + { + "mult_qlen_th", + ETM_CGAVD_MULT_QLEN_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_mult_qlen_th_reg, + NULL, + NULL, + }, + { + "cgavd_cfg_move", + ETM_CGAVD_CGAVD_CFG_MOVEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12e1, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_etm_cgavd_cgavd_cfg_move_reg, + NULL, + NULL, + }, + { + "cfgmt_total_th", + ETM_CGAVD_CFGMT_TOTAL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12e2, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cfgmt_total_th_reg, + NULL, + NULL, + }, + { + "cfgmt_port_share_th", + ETM_CGAVD_CFGMT_PORT_SHARE_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x12e3, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cfgmt_port_share_th_reg, + NULL, + NULL, + }, + { + "sa_unreach_state", + ETM_CGAVD_SA_UNREACH_STATEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x1300, + (32 / 8), + 0, + 0x3 + 1, + 0, + 0x1, + 1, + g_etm_cgavd_sa_unreach_state_reg, + NULL, + NULL, + }, + { + "mv_port_th", + ETM_CGAVD_MV_PORT_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x2000, + (32 / 8), + 0, + 0x3f + 1, + 0, + 0x1, + 1, + g_etm_cgavd_mv_port_th_reg, + NULL, + NULL, + }, + { + "mv_drop_sp_th", + ETM_CGAVD_MV_DROP_SP_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x3000, + (32 / 8), + 0, + 0x7 + 1, + 0, + 0x1, + 1, + g_etm_cgavd_mv_drop_sp_th_reg, + NULL, + NULL, + }, + { + "cgavd_state_warning", + ETM_CGAVD_CGAVD_STATE_WARNINGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x250006, + (32 / 8), + 0, + 0, + 0, + 0, + 6, + g_etm_cgavd_cgavd_state_warning_reg, + NULL, + NULL, + }, + { + "tmmu_cgavd_dma_fifo_cnt", + ETM_CGAVD_TMMU_CGAVD_DMA_FIFO_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x250008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_tmmu_cgavd_dma_fifo_cnt_reg, + NULL, + NULL, + }, + { + "tmmu_cgavd_dma_fifo_cnt_max", + ETM_CGAVD_TMMU_CGAVD_DMA_FIFO_CNT_MAXr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x250009, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_tmmu_cgavd_dma_fifo_cnt_max_reg, + NULL, + NULL, + }, + { + "imem_total_cnt", + ETM_CGAVD_IMEM_TOTAL_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x25000a, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_imem_total_cnt_reg, + NULL, + NULL, + }, + { + "imem_total_cnt_max", + ETM_CGAVD_IMEM_TOTAL_CNT_MAXr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x25000b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_imem_total_cnt_max_reg, + NULL, + NULL, + }, + { + "flow0_omem_cnt", + ETM_CGAVD_FLOW0_OMEM_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x260020, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_flow0_omem_cnt_reg, + NULL, + NULL, + }, + { + "flow1_omem_cnt", + ETM_CGAVD_FLOW1_OMEM_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x260021, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_flow1_omem_cnt_reg, + NULL, + NULL, + }, + { + "flow2_omem_cnt", + ETM_CGAVD_FLOW2_OMEM_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x260022, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_flow2_omem_cnt_reg, + NULL, + NULL, + }, + { + "flow3_omem_cnt", + ETM_CGAVD_FLOW3_OMEM_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x260023, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_flow3_omem_cnt_reg, + NULL, + NULL, + }, + { + "flow4_omem_cnt", + ETM_CGAVD_FLOW4_OMEM_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x260024, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_flow4_omem_cnt_reg, + NULL, + NULL, + }, + { + "appoint_flow_num_message_1", + ETM_CGAVD_APPOINT_FLOW_NUM_MESSAGE_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x261000, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_cgavd_appoint_flow_num_message_1_reg, + NULL, + NULL, + }, + { + "appoint_flow_num_message_2", + ETM_CGAVD_APPOINT_FLOW_NUM_MESSAGE_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x261001, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_cgavd_appoint_flow_num_message_2_reg, + NULL, + NULL, + }, + { + "odma_cgavd_pkt_num_1", + ETM_CGAVD_ODMA_CGAVD_PKT_NUM_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x262000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_odma_cgavd_pkt_num_1_reg, + NULL, + NULL, + }, + { + "odma_cgavd_byte_num_1", + ETM_CGAVD_ODMA_CGAVD_BYTE_NUM_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x262001, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_odma_cgavd_byte_num_1_reg, + NULL, + NULL, + }, + { + "cgavd_enqueue_pkt_num_1", + ETM_CGAVD_CGAVD_ENQUEUE_PKT_NUM_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x262002, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_enqueue_pkt_num_1_reg, + NULL, + NULL, + }, + { + "cgavd_dequeue_pkt_num_1", + ETM_CGAVD_CGAVD_DEQUEUE_PKT_NUM_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x262003, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_dequeue_pkt_num_1_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_pkt_imem_num_1", + ETM_CGAVD_CGAVD_QMU_PKT_IMEM_NUM_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x262004, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_pkt_imem_num_1_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_pkt_omem_num_1", + ETM_CGAVD_CGAVD_QMU_PKT_OMEM_NUM_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x262005, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_pkt_omem_num_1_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_byte_imem_num_1", + ETM_CGAVD_CGAVD_QMU_BYTE_IMEM_NUM_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x262006, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_byte_imem_num_1_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_byte_omem_num_1", + ETM_CGAVD_CGAVD_QMU_BYTE_OMEM_NUM_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x262007, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_byte_omem_num_1_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_pkt_drop_num_1", + ETM_CGAVD_CGAVD_QMU_PKT_DROP_NUM_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x262008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_pkt_drop_num_1_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_byte_drop_num_1", + ETM_CGAVD_CGAVD_QMU_BYTE_DROP_NUM_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x262009, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_byte_drop_num_1_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_forbid_drop_num_1", + ETM_CGAVD_CGAVD_QMU_FORBID_DROP_NUM_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x262010, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_forbid_drop_num_1_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_flow_td_drop_num_1", + ETM_CGAVD_CGAVD_QMU_FLOW_TD_DROP_NUM_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x262011, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_flow_td_drop_num_1_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_flow_wred_drop_num_1", + ETM_CGAVD_CGAVD_QMU_FLOW_WRED_DROP_NUM_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x262012, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_flow_wred_drop_num_1_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_flow_wred_dp_drop_num_1", + ETM_CGAVD_CGAVD_QMU_FLOW_WRED_DP_DROP_NUM_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x262100, + (32 / 8), + 0, + 0x7 + 1, + 0, + 0x1, + 1, + g_etm_cgavd_cgavd_qmu_flow_wred_dp_drop_num_1_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_pp_td_num_1", + ETM_CGAVD_CGAVD_QMU_PP_TD_NUM_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x262200, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_pp_td_num_1_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_pp_wred_drop_num_1", + ETM_CGAVD_CGAVD_QMU_PP_WRED_DROP_NUM_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x262201, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_pp_wred_drop_num_1_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_pp_wred_dp_drop_num_1", + ETM_CGAVD_CGAVD_QMU_PP_WRED_DP_DROP_NUM_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x262300, + (32 / 8), + 0, + 0x7 + 1, + 0, + 0x1, + 1, + g_etm_cgavd_cgavd_qmu_pp_wred_dp_drop_num_1_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_sys_td_drop_num_1", + ETM_CGAVD_CGAVD_QMU_SYS_TD_DROP_NUM_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x262400, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_sys_td_drop_num_1_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_sys_gred_drop_num_1", + ETM_CGAVD_CGAVD_QMU_SYS_GRED_DROP_NUM_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x262401, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_sys_gred_drop_num_1_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_sys_gred_dp_drop_num1", + ETM_CGAVD_CGAVD_QMU_SYS_GRED_DP_DROP_NUM1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x262500, + (32 / 8), + 0, + 0x7 + 1, + 0, + 0x1, + 1, + g_etm_cgavd_cgavd_qmu_sys_gred_dp_drop_num1_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_sa_drop_num_1", + ETM_CGAVD_CGAVD_QMU_SA_DROP_NUM_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x262600, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_sa_drop_num_1_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_move_drop_num_1", + ETM_CGAVD_CGAVD_QMU_MOVE_DROP_NUM_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x262601, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_move_drop_num_1_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_tm_mult_drop_num_1", + ETM_CGAVD_CGAVD_QMU_TM_MULT_DROP_NUM_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x262602, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_tm_mult_drop_num_1_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_tm_error_drop_num_1", + ETM_CGAVD_CGAVD_QMU_TM_ERROR_DROP_NUM_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x262603, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_tm_error_drop_num_1_reg, + NULL, + NULL, + }, + { + "odma_cgavd_pkt_num_2", + ETM_CGAVD_ODMA_CGAVD_PKT_NUM_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x263000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_odma_cgavd_pkt_num_2_reg, + NULL, + NULL, + }, + { + "odma_cgavd_byte_num_2", + ETM_CGAVD_ODMA_CGAVD_BYTE_NUM_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x263001, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_odma_cgavd_byte_num_2_reg, + NULL, + NULL, + }, + { + "cgavd_enqueue_pkt_num_2", + ETM_CGAVD_CGAVD_ENQUEUE_PKT_NUM_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x263002, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_enqueue_pkt_num_2_reg, + NULL, + NULL, + }, + { + "cgavd_dequeue_pkt_num_2", + ETM_CGAVD_CGAVD_DEQUEUE_PKT_NUM_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x263003, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_dequeue_pkt_num_2_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_pkt_imem_num_2", + ETM_CGAVD_CGAVD_QMU_PKT_IMEM_NUM_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x263004, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_pkt_imem_num_2_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_pkt_omem_num_2", + ETM_CGAVD_CGAVD_QMU_PKT_OMEM_NUM_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x263005, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_pkt_omem_num_2_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_byte_imem_num_2", + ETM_CGAVD_CGAVD_QMU_BYTE_IMEM_NUM_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x263006, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_byte_imem_num_2_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_byte_omem_num_2", + ETM_CGAVD_CGAVD_QMU_BYTE_OMEM_NUM_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x263007, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_byte_omem_num_2_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_pkt_drop_num_2", + ETM_CGAVD_CGAVD_QMU_PKT_DROP_NUM_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x263008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_pkt_drop_num_2_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_byte_drop_num_2", + ETM_CGAVD_CGAVD_QMU_BYTE_DROP_NUM_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x263009, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_byte_drop_num_2_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_forbid_drop_num_2", + ETM_CGAVD_CGAVD_QMU_FORBID_DROP_NUM_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x263010, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_forbid_drop_num_2_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_flow_td_drop_num_2", + ETM_CGAVD_CGAVD_QMU_FLOW_TD_DROP_NUM_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x263011, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_flow_td_drop_num_2_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_flow_wred_drop_num_2", + ETM_CGAVD_CGAVD_QMU_FLOW_WRED_DROP_NUM_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x263012, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_flow_wred_drop_num_2_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_flow_wred_dp_drop_num_2", + ETM_CGAVD_CGAVD_QMU_FLOW_WRED_DP_DROP_NUM_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x263100, + (32 / 8), + 0, + 0x7 + 1, + 0, + 0x1, + 1, + g_etm_cgavd_cgavd_qmu_flow_wred_dp_drop_num_2_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_pp_td_num_2", + ETM_CGAVD_CGAVD_QMU_PP_TD_NUM_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x263200, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_pp_td_num_2_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_pp_wred_drop_num_2", + ETM_CGAVD_CGAVD_QMU_PP_WRED_DROP_NUM_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x263201, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_pp_wred_drop_num_2_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_pp_wred_dp_drop_num_2", + ETM_CGAVD_CGAVD_QMU_PP_WRED_DP_DROP_NUM_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x263300, + (32 / 8), + 0, + 0x7 + 1, + 0, + 0x1, + 1, + g_etm_cgavd_cgavd_qmu_pp_wred_dp_drop_num_2_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_sys_td_drop_num_2", + ETM_CGAVD_CGAVD_QMU_SYS_TD_DROP_NUM_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x263400, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_sys_td_drop_num_2_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_sys_gred_drop_num_2", + ETM_CGAVD_CGAVD_QMU_SYS_GRED_DROP_NUM_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x263401, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_sys_gred_drop_num_2_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_sys_gred_dp_drop_num_2", + ETM_CGAVD_CGAVD_QMU_SYS_GRED_DP_DROP_NUM_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x263500, + (32 / 8), + 0, + 0x7 + 1, + 0, + 0x1, + 1, + g_etm_cgavd_cgavd_qmu_sys_gred_dp_drop_num_2_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_sa_drop_num_2", + ETM_CGAVD_CGAVD_QMU_SA_DROP_NUM_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x263600, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_sa_drop_num_2_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_move_drop_num_2", + ETM_CGAVD_CGAVD_QMU_MOVE_DROP_NUM_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x263601, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_move_drop_num_2_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_tm_mult_drop_num_2", + ETM_CGAVD_CGAVD_QMU_TM_MULT_DROP_NUM_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x263602, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_tm_mult_drop_num_2_reg, + NULL, + NULL, + }, + { + "cgavd_qmu_tm_error_drop_num_2", + ETM_CGAVD_CGAVD_QMU_TM_ERROR_DROP_NUM_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x263603, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_cgavd_cgavd_qmu_tm_error_drop_num_2_reg, + NULL, + NULL, + }, + { + "move_flow_th_profile", + ETM_CGAVD_MOVE_FLOW_TH_PROFILEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x270000, + (32 / 8), + 0, + 0x23FF + 1, + 0, + 0x1, + 1, + g_etm_cgavd_move_flow_th_profile_reg, + NULL, + NULL, + }, + { + "move_flow_th", + ETM_CGAVD_MOVE_FLOW_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x280000, + (32 / 8), + 0, + 0xF + 1, + 0, + 0x1, + 1, + g_etm_cgavd_move_flow_th_reg, + NULL, + NULL, + }, + { + "emem_pd_fifo_aful_th", + ETM_TMMU_EMEM_PD_FIFO_AFUL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0009, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_emem_pd_fifo_aful_th_reg, + NULL, + NULL, + }, + { + "dma_data_fifo_aful_th", + ETM_TMMU_DMA_DATA_FIFO_AFUL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x000a, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_dma_data_fifo_aful_th_reg, + NULL, + NULL, + }, + { + "tmmu_states_0", + ETM_TMMU_TMMU_STATES_0r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0010, + (32 / 8), + 0, + 0, + 0, + 0, + 31, + g_etm_tmmu_tmmu_states_0_reg, + NULL, + NULL, + }, + { + "qmu_tmmu_wr_sop_cnt", + ETM_TMMU_QMU_TMMU_WR_SOP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0013, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_qmu_tmmu_wr_sop_cnt_reg, + NULL, + NULL, + }, + { + "qmu_tmmu_wr_eop_cnt", + ETM_TMMU_QMU_TMMU_WR_EOP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0014, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_qmu_tmmu_wr_eop_cnt_reg, + NULL, + NULL, + }, + { + "qmu_tmmu_wr_drop_cnt", + ETM_TMMU_QMU_TMMU_WR_DROP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0015, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_qmu_tmmu_wr_drop_cnt_reg, + NULL, + NULL, + }, + { + "qmu_tmmu_wr_emem_cnt", + ETM_TMMU_QMU_TMMU_WR_EMEM_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0016, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_qmu_tmmu_wr_emem_cnt_reg, + NULL, + NULL, + }, + { + "qmu_tmmu_wr_imem_cnt", + ETM_TMMU_QMU_TMMU_WR_IMEM_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0017, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_qmu_tmmu_wr_imem_cnt_reg, + NULL, + NULL, + }, + { + "tmmu_mmu_wr_sop_cnt", + ETM_TMMU_TMMU_MMU_WR_SOP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0018, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_tmmu_mmu_wr_sop_cnt_reg, + NULL, + NULL, + }, + { + "tmmu_mmu_wr_eop_cnt", + ETM_TMMU_TMMU_MMU_WR_EOP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0019, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_tmmu_mmu_wr_eop_cnt_reg, + NULL, + NULL, + }, + { + "qmu_tmmu_rd_sop_cnt", + ETM_TMMU_QMU_TMMU_RD_SOP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x001a, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_qmu_tmmu_rd_sop_cnt_reg, + NULL, + NULL, + }, + { + "qmu_tmmu_rd_eop_cnt", + ETM_TMMU_QMU_TMMU_RD_EOP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x001b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_qmu_tmmu_rd_eop_cnt_reg, + NULL, + NULL, + }, + { + "qmu_tmmu_rd_drop_cnt", + ETM_TMMU_QMU_TMMU_RD_DROP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x001c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_qmu_tmmu_rd_drop_cnt_reg, + NULL, + NULL, + }, + { + "qmu_tmmu_rd_emem_cnt", + ETM_TMMU_QMU_TMMU_RD_EMEM_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x001d, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_qmu_tmmu_rd_emem_cnt_reg, + NULL, + NULL, + }, + { + "qmu_tmmu_rd_imem_cnt", + ETM_TMMU_QMU_TMMU_RD_IMEM_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x001e, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_qmu_tmmu_rd_imem_cnt_reg, + NULL, + NULL, + }, + { + "tmmu_mmu_rd_sop_cnt", + ETM_TMMU_TMMU_MMU_RD_SOP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x001f, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_tmmu_mmu_rd_sop_cnt_reg, + NULL, + NULL, + }, + { + "tmmu_mmu_rd_eop_cnt", + ETM_TMMU_TMMU_MMU_RD_EOP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0020, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_tmmu_mmu_rd_eop_cnt_reg, + NULL, + NULL, + }, + { + "tmmu_odma_in_sop_cnt", + ETM_TMMU_TMMU_ODMA_IN_SOP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0021, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_tmmu_odma_in_sop_cnt_reg, + NULL, + NULL, + }, + { + "tmmu_odma_in_eop_cnt", + ETM_TMMU_TMMU_ODMA_IN_EOP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0022, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_tmmu_odma_in_eop_cnt_reg, + NULL, + NULL, + }, + { + "tmmu_odma_vld_cnt", + ETM_TMMU_TMMU_ODMA_VLD_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0023, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_tmmu_odma_vld_cnt_reg, + NULL, + NULL, + }, + { + "qmu_pd_in_cnt", + ETM_TMMU_QMU_PD_IN_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0024, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_qmu_pd_in_cnt_reg, + NULL, + NULL, + }, + { + "tmmu_pd_hit_cnt", + ETM_TMMU_TMMU_PD_HIT_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0025, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_tmmu_pd_hit_cnt_reg, + NULL, + NULL, + }, + { + "tmmu_pd_out_cnt", + ETM_TMMU_TMMU_PD_OUT_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0026, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_tmmu_pd_out_cnt_reg, + NULL, + NULL, + }, + { + "tmmu_wr_cmd_fifo_wr_cnt", + ETM_TMMU_TMMU_WR_CMD_FIFO_WR_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0027, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_tmmu_wr_cmd_fifo_wr_cnt_reg, + NULL, + NULL, + }, + { + "tmmu_imem_age_cnt", + ETM_TMMU_TMMU_IMEM_AGE_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0028, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_tmmu_imem_age_cnt_reg, + NULL, + NULL, + }, + { + "tmmu_cmdsch_rd_cnt", + ETM_TMMU_TMMU_CMDSCH_RD_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0029, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_tmmu_cmdsch_rd_cnt_reg, + NULL, + NULL, + }, + { + "tmmu_cmdsch_drop_cnt", + ETM_TMMU_TMMU_CMDSCH_DROP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x002a, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_tmmu_cmdsch_drop_cnt_reg, + NULL, + NULL, + }, + { + "tmmu_cmdsw_drop_cnt", + ETM_TMMU_TMMU_CMDSW_DROP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x002b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_tmmu_cmdsw_drop_cnt_reg, + NULL, + NULL, + }, + { + "tmmu_odma_enq_rd_cnt", + ETM_TMMU_TMMU_ODMA_ENQ_RD_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x002c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_tmmu_odma_enq_rd_cnt_reg, + NULL, + NULL, + }, + { + "tmmu_odma_enq_drop_cnt", + ETM_TMMU_TMMU_ODMA_ENQ_DROP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x002d, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_tmmu_odma_enq_drop_cnt_reg, + NULL, + NULL, + }, + { + "tmmu_odma_imem_age_cnt", + ETM_TMMU_TMMU_ODMA_IMEM_AGE_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x002e, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_tmmu_odma_imem_age_cnt_reg, + NULL, + NULL, + }, + { + "tmmu_odma_deq_rd_cnt", + ETM_TMMU_TMMU_ODMA_DEQ_RD_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x002f, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_tmmu_odma_deq_rd_cnt_reg, + NULL, + NULL, + }, + { + "tmmu_odma_deq_drop_cnt", + ETM_TMMU_TMMU_ODMA_DEQ_DROP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0030, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_tmmu_odma_deq_drop_cnt_reg, + NULL, + NULL, + }, + { + "olif_tmmu_xoff_cnt", + ETM_TMMU_OLIF_TMMU_XOFF_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0031, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_olif_tmmu_xoff_cnt_reg, + NULL, + NULL, + }, + { + "odma_tm_data_xoff_cnt", + ETM_TMMU_ODMA_TM_DATA_XOFF_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0032, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_odma_tm_data_xoff_cnt_reg, + NULL, + NULL, + }, + { + "tm_odma_pkt_xoff_cnt", + ETM_TMMU_TM_ODMA_PKT_XOFF_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0033, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_tm_odma_pkt_xoff_cnt_reg, + NULL, + NULL, + }, + { + "tm_state_3", + ETM_TMMU_TM_STATE_3r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0034, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_tmmu_tm_state_3_reg, + NULL, + NULL, + }, + { + "cfgmt_pd_cache_cmd", + ETM_TMMU_CFGMT_PD_CACHE_CMDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0050, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_pd_cache_cmd_reg, + NULL, + NULL, + }, + { + "cfgmt_pd_cache_rd_done", + ETM_TMMU_CFGMT_PD_CACHE_RD_DONEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0051, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_pd_cache_rd_done_reg, + NULL, + NULL, + }, + { + "cfgmt_pd_cache_rd_data_0", + ETM_TMMU_CFGMT_PD_CACHE_RD_DATA_0r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0052, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_pd_cache_rd_data_0_reg, + NULL, + NULL, + }, + { + "cfgmt_pd_cache_rd_data_1", + ETM_TMMU_CFGMT_PD_CACHE_RD_DATA_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0053, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_pd_cache_rd_data_1_reg, + NULL, + NULL, + }, + { + "cfgmt_pd_cache_rd_data_2", + ETM_TMMU_CFGMT_PD_CACHE_RD_DATA_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0054, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_pd_cache_rd_data_2_reg, + NULL, + NULL, + }, + { + "cfgmt_pd_cache_rd_data_3", + ETM_TMMU_CFGMT_PD_CACHE_RD_DATA_3r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0055, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_pd_cache_rd_data_3_reg, + NULL, + NULL, + }, + { + "cfgmt_tmmu_to_odma_para", + ETM_TMMU_CFGMT_TMMU_TO_ODMA_PARAr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0056, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_tmmu_to_odma_para_reg, + NULL, + NULL, + }, + { + "cfgmt_dma_data_fifo_cnt", + ETM_TMMU_CFGMT_DMA_DATA_FIFO_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0057, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_dma_data_fifo_cnt_reg, + NULL, + NULL, + }, + { + "cfgmt_cache_tag_bit0_offset", + ETM_TMMU_CFGMT_CACHE_TAG_BIT0_OFFSETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0060, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_cache_tag_bit0_offset_reg, + NULL, + NULL, + }, + { + "cfgmt_cache_tag_bit1_offset", + ETM_TMMU_CFGMT_CACHE_TAG_BIT1_OFFSETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0061, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_cache_tag_bit1_offset_reg, + NULL, + NULL, + }, + { + "cfgmt_cache_tag_bit2_offset", + ETM_TMMU_CFGMT_CACHE_TAG_BIT2_OFFSETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0062, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_cache_tag_bit2_offset_reg, + NULL, + NULL, + }, + { + "cfgmt_cache_tag_bit3_offset", + ETM_TMMU_CFGMT_CACHE_TAG_BIT3_OFFSETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0063, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_cache_tag_bit3_offset_reg, + NULL, + NULL, + }, + { + "cfgmt_cache_tag_bit4_offset", + ETM_TMMU_CFGMT_CACHE_TAG_BIT4_OFFSETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0064, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_cache_tag_bit4_offset_reg, + NULL, + NULL, + }, + { + "cfgmt_cache_tag_bit5_offset", + ETM_TMMU_CFGMT_CACHE_TAG_BIT5_OFFSETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0065, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_cache_tag_bit5_offset_reg, + NULL, + NULL, + }, + { + "cfgmt_cache_index_bit0_offset", + ETM_TMMU_CFGMT_CACHE_INDEX_BIT0_OFFSETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0066, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_cache_index_bit0_offset_reg, + NULL, + NULL, + }, + { + "cfgmt_cache_index_bit1_offset", + ETM_TMMU_CFGMT_CACHE_INDEX_BIT1_OFFSETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0067, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_cache_index_bit1_offset_reg, + NULL, + NULL, + }, + { + "cfgmt_cache_index_bit2_offset", + ETM_TMMU_CFGMT_CACHE_INDEX_BIT2_OFFSETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0068, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_cache_index_bit2_offset_reg, + NULL, + NULL, + }, + { + "cfgmt_cache_index_bit3_offset", + ETM_TMMU_CFGMT_CACHE_INDEX_BIT3_OFFSETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0069, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_cache_index_bit3_offset_reg, + NULL, + NULL, + }, + { + "cfgmt_cache_index_bit4_offset", + ETM_TMMU_CFGMT_CACHE_INDEX_BIT4_OFFSETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x006a, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_cache_index_bit4_offset_reg, + NULL, + NULL, + }, + { + "cfgmt_cache_index_bit5_offset", + ETM_TMMU_CFGMT_CACHE_INDEX_BIT5_OFFSETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x006b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_cache_index_bit5_offset_reg, + NULL, + NULL, + }, + { + "cfgmt_cache_index_bit6_offset", + ETM_TMMU_CFGMT_CACHE_INDEX_BIT6_OFFSETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x006c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_cache_index_bit6_offset_reg, + NULL, + NULL, + }, + { + "cfgmt_cache_index_bit7_offset", + ETM_TMMU_CFGMT_CACHE_INDEX_BIT7_OFFSETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x006d, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_cache_index_bit7_offset_reg, + NULL, + NULL, + }, + { + "cfgmt_cache_index_bit8_offset", + ETM_TMMU_CFGMT_CACHE_INDEX_BIT8_OFFSETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x006e, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_cache_index_bit8_offset_reg, + NULL, + NULL, + }, + { + "cfgmt_cache_index_bit9_offset", + ETM_TMMU_CFGMT_CACHE_INDEX_BIT9_OFFSETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x006f, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_cache_index_bit9_offset_reg, + NULL, + NULL, + }, + { + "cfgmt_cache_index_bit10_offset", + ETM_TMMU_CFGMT_CACHE_INDEX_BIT10_OFFSETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0070, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_cache_index_bit10_offset_reg, + NULL, + NULL, + }, + { + "cfgmt_cache_index_bit11_offset", + ETM_TMMU_CFGMT_CACHE_INDEX_BIT11_OFFSETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0071, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_cache_index_bit11_offset_reg, + NULL, + NULL, + }, + { + "cfgmt_cache_index_bit12_offset", + ETM_TMMU_CFGMT_CACHE_INDEX_BIT12_OFFSETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0072, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_tmmu_cfgmt_cache_index_bit12_offset_reg, + NULL, + NULL, + }, + { + "bktfull_fifo_full_flagregister", + ETM_SHAP_BKTFULL_FIFO_FULL_FLAGREGISTERr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x26, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_shap_bktfull_fifo_full_flagregister_reg, + NULL, + NULL, + }, + { + "fifo_full_regregister", + ETM_SHAP_FIFO_FULL_REGREGISTERr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x29, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_shap_fifo_full_regregister_reg, + NULL, + NULL, + }, + { + "fifo_empty_regregister", + ETM_SHAP_FIFO_EMPTY_REGREGISTERr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_shap_fifo_empty_regregister_reg, + NULL, + NULL, + }, + { + "fifo_almost_full_regregister", + ETM_SHAP_FIFO_ALMOST_FULL_REGREGISTERr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2f, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_shap_fifo_almost_full_regregister_reg, + NULL, + NULL, + }, + { + "fifo_almost_empty_regregister", + ETM_SHAP_FIFO_ALMOST_EMPTY_REGREGISTERr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x32, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_shap_fifo_almost_empty_regregister_reg, + NULL, + NULL, + }, + { + "credit_space_select", + ETM_CRDT_CREDIT_SPACE_SELECTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x000e, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_credit_space_select_reg, + NULL, + NULL, + }, + { + "stat_space_max", + ETM_CRDT_STAT_SPACE_MAXr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x30, + (32 / 8), + 0, + 0x0f + 1, + 0, + 1, + 1, + g_etm_crdt_stat_space_max_reg, + NULL, + NULL, + }, + { + "stat_space_min", + ETM_CRDT_STAT_SPACE_MINr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x40, + (32 / 8), + 0, + 0x0f + 1, + 0, + 1, + 1, + g_etm_crdt_stat_space_min_reg, + NULL, + NULL, + }, + { + "stat_space_credit", + ETM_CRDT_STAT_SPACE_CREDITr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x50, + (32 / 8), + 0, + 0x0f + 1, + 0, + 1, + 1, + g_etm_crdt_stat_space_credit_reg, + NULL, + NULL, + }, + { + "stat_que_step8_credit", + ETM_CRDT_STAT_QUE_STEP8_CREDITr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x60, + (32 / 8), + 0, + 0x07 + 1, + 0, + 1, + 1, + g_etm_crdt_stat_que_step8_credit_reg, + NULL, + NULL, + }, + { + "special_que", + ETM_CRDT_SPECIAL_QUEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x68, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_special_que_reg, + NULL, + NULL, + }, + { + "special_que_credit", + ETM_CRDT_SPECIAL_QUE_CREDITr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x69, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_special_que_credit_reg, + NULL, + NULL, + }, + { + "lif_congest_credit_cnt", + ETM_CRDT_LIF_CONGEST_CREDIT_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x70, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_lif_congest_credit_cnt_reg, + NULL, + NULL, + }, + { + "lif_port_congest_credit_cnt", + ETM_CRDT_LIF_PORT_CONGEST_CREDIT_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x71, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_lif_port_congest_credit_cnt_reg, + NULL, + NULL, + }, + { + "crdt_congest_credit_cnt", + ETM_CRDT_CRDT_CONGEST_CREDIT_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x72, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_crdt_congest_credit_cnt_reg, + NULL, + NULL, + }, + { + "crdt_port_congest_credit_cnt", + ETM_CRDT_CRDT_PORT_CONGEST_CREDIT_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x73, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_crdt_port_congest_credit_cnt_reg, + NULL, + NULL, + }, + { + "congest_port_id", + ETM_CRDT_CONGEST_PORT_IDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x74, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_congest_port_id_reg, + NULL, + NULL, + }, + { + "dev_link_control", + ETM_CRDT_DEV_LINK_CONTROLr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x75, + (32 / 8), + 0, + 0x1 + 1, + 0, + 1, + 1, + g_etm_crdt_dev_link_control_reg, + NULL, + NULL, + }, + { + "crdt_sa_port_rdy", + ETM_CRDT_CRDT_SA_PORT_RDYr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x77, + (32 / 8), + 0, + 0x1 + 1, + 0, + 1, + 1, + g_etm_crdt_crdt_sa_port_rdy_reg, + NULL, + NULL, + }, + { + "crdt_congest_mode_select", + ETM_CRDT_CRDT_CONGEST_MODE_SELECTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x81, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_crdt_congest_mode_select_reg, + NULL, + NULL, + }, + { + "fifo_out_all_crs_normal_cnt", + ETM_CRDT_FIFO_OUT_ALL_CRS_NORMAL_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x82, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_fifo_out_all_crs_normal_cnt_reg, + NULL, + NULL, + }, + { + "fifo_out_all_crs_off_cnt", + ETM_CRDT_FIFO_OUT_ALL_CRS_OFF_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x83, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_fifo_out_all_crs_off_cnt_reg, + NULL, + NULL, + }, + { + "fifo_out_que_crs_normal_cnt", + ETM_CRDT_FIFO_OUT_QUE_CRS_NORMAL_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x84, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_fifo_out_que_crs_normal_cnt_reg, + NULL, + NULL, + }, + { + "fifo_out_que_crs_off_cnt", + ETM_CRDT_FIFO_OUT_QUE_CRS_OFF_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x85, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_fifo_out_que_crs_off_cnt_reg, + NULL, + NULL, + }, + { + "mode_add_60g", + ETM_CRDT_MODE_ADD_60Gr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x9a, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_mode_add_60g_reg, + NULL, + NULL, + }, + { + "pp_token_add", + ETM_CRDT_PP_TOKEN_ADDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x9b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_pp_token_add_reg, + NULL, + NULL, + }, + { + "pp_cir_token_total_dist_cnt", + ETM_CRDT_PP_CIR_TOKEN_TOTAL_DIST_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x9c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_pp_cir_token_total_dist_cnt_reg, + NULL, + NULL, + }, + { + "pp_cir_token_total_dec_cnt", + ETM_CRDT_PP_CIR_TOKEN_TOTAL_DEC_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x9d, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_pp_cir_token_total_dec_cnt_reg, + NULL, + NULL, + }, + { + "dev_credit_cnt", + ETM_CRDT_DEV_CREDIT_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xb0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_dev_credit_cnt_reg, + NULL, + NULL, + }, + { + "no_credit_cnt1", + ETM_CRDT_NO_CREDIT_CNT1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xb7, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_no_credit_cnt1_reg, + NULL, + NULL, + }, + { + "no_credit_cnt2", + ETM_CRDT_NO_CREDIT_CNT2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xb8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_no_credit_cnt2_reg, + NULL, + NULL, + }, + { + "asm_interval_0_cfg", + ETM_CRDT_ASM_INTERVAL_0_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xc7, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_asm_interval_0_cfg_reg, + NULL, + NULL, + }, + { + "asm_interval_1_cfg", + ETM_CRDT_ASM_INTERVAL_1_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xc8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_asm_interval_1_cfg_reg, + NULL, + NULL, + }, + { + "asm_interval_2_cfg", + ETM_CRDT_ASM_INTERVAL_2_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xc9, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_asm_interval_2_cfg_reg, + NULL, + NULL, + }, + { + "asm_interval_3_cfg", + ETM_CRDT_ASM_INTERVAL_3_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xca, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_asm_interval_3_cfg_reg, + NULL, + NULL, + }, + { + "asm_interval_4_cfg", + ETM_CRDT_ASM_INTERVAL_4_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xcb, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_asm_interval_4_cfg_reg, + NULL, + NULL, + }, + { + "asm_interval_5cfg", + ETM_CRDT_ASM_INTERVAL_5CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xcc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_asm_interval_5cfg_reg, + NULL, + NULL, + }, + { + "asm_interval_6_cfg", + ETM_CRDT_ASM_INTERVAL_6_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xcd, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_asm_interval_6_cfg_reg, + NULL, + NULL, + }, + { + "asm_interval_7_cfg", + ETM_CRDT_ASM_INTERVAL_7_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xce, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_asm_interval_7_cfg_reg, + NULL, + NULL, + }, + { + "crdt_total_congest_mode_cfg", + ETM_CRDT_CRDT_TOTAL_CONGEST_MODE_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xcf, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_crdt_total_congest_mode_cfg_reg, + NULL, + NULL, + }, + { + "rci_fifo_ini_deep_cfg", + ETM_CRDT_RCI_FIFO_INI_DEEP_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xd0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_rci_fifo_ini_deep_cfg_reg, + NULL, + NULL, + }, + { + "crdt_ecc", + ETM_CRDT_CRDT_ECCr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x121, + (32 / 8), + 0, + 0, + 0, + 0, + 5, + g_etm_crdt_crdt_ecc_reg, + NULL, + NULL, + }, + { + "ucn_asm_rdy_shield_en", + ETM_CRDT_UCN_ASM_RDY_SHIELD_ENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x013a, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_crdt_ucn_asm_rdy_shield_en_reg, + NULL, + NULL, + }, + { + "ucn_asm_rdy", + ETM_CRDT_UCN_ASM_RDYr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x013b, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_crdt_ucn_asm_rdy_reg, + NULL, + NULL, + }, + { + "rci_grade", + ETM_CRDT_RCI_GRADEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x013c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_rci_grade_reg, + NULL, + NULL, + }, + { + "crdt_rci_value_r", + ETM_CRDT_CRDT_RCI_VALUE_Rr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x013d, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_crdt_rci_value_r_reg, + NULL, + NULL, + }, + { + "crdt_interval_now", + ETM_CRDT_CRDT_INTERVAL_NOWr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x013e, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_crdt_interval_now_reg, + NULL, + NULL, + }, + { + "crs_sheild_flow_id_cfg", + ETM_CRDT_CRS_SHEILD_FLOW_ID_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0140, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_crs_sheild_flow_id_cfg_reg, + NULL, + NULL, + }, + { + "crs_sheild_en_cfg", + ETM_CRDT_CRS_SHEILD_EN_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0141, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_crs_sheild_en_cfg_reg, + NULL, + NULL, + }, + { + "crs_sheild_value_cfg", + ETM_CRDT_CRS_SHEILD_VALUE_CFGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x0142, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_crs_sheild_value_cfg_reg, + NULL, + NULL, + }, + { + "test_token_calc_ctrl", + ETM_CRDT_TEST_TOKEN_CALC_CTRLr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x14c, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_crdt_test_token_calc_ctrl_reg, + NULL, + NULL, + }, + { + "test_token_sample_cycle_num", + ETM_CRDT_TEST_TOKEN_SAMPLE_CYCLE_NUMr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x14d, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_crdt_test_token_sample_cycle_num_reg, + NULL, + NULL, + }, + { + "q_state_0_7", + ETM_CRDT_Q_STATE_0_7r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x14e, + (32 / 8), + 0, + 0, + 0, + 0, + 8, + g_etm_crdt_q_state_0_7_reg, + NULL, + NULL, + }, + { + "q_state_8_15", + ETM_CRDT_Q_STATE_8_15r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x14f, + (32 / 8), + 0, + 0, + 0, + 0, + 8, + g_etm_crdt_q_state_8_15_reg, + NULL, + NULL, + }, + { + "csw_csch_rd_cmd_cnt", + ETM_QMU_CSW_CSCH_RD_CMD_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x14, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_csw_csch_rd_cmd_cnt_reg, + NULL, + NULL, + }, + { + "csw_csch_rd_sop_cnt", + ETM_QMU_CSW_CSCH_RD_SOP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x15, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_csw_csch_rd_sop_cnt_reg, + NULL, + NULL, + }, + { + "csw_csch_rd_eop_cnt", + ETM_QMU_CSW_CSCH_RD_EOP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x16, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_csw_csch_rd_eop_cnt_reg, + NULL, + NULL, + }, + { + "csw_csch_rd_drop_cnt", + ETM_QMU_CSW_CSCH_RD_DROP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x17, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_csw_csch_rd_drop_cnt_reg, + NULL, + NULL, + }, + { + "csch_mmu_rd_cmd_cnt", + ETM_QMU_CSCH_MMU_RD_CMD_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x18, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_csch_mmu_rd_cmd_cnt_reg, + NULL, + NULL, + }, + { + "csch_mmu_rd_sop_cnt", + ETM_QMU_CSCH_MMU_RD_SOP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x19, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_csch_mmu_rd_sop_cnt_reg, + NULL, + NULL, + }, + { + "csch_mmu_rd_eop_cnt", + ETM_QMU_CSCH_MMU_RD_EOP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1a, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_csch_mmu_rd_eop_cnt_reg, + NULL, + NULL, + }, + { + "csch_mmu_rd_drop_cnt", + ETM_QMU_CSCH_MMU_RD_DROP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_csch_mmu_rd_drop_cnt_reg, + NULL, + NULL, + }, + { + "qcfg_qsch_crs_filter", + ETM_QMU_QCFG_QSCH_CRS_FILTERr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qcfg_qsch_crs_filter_reg, + NULL, + NULL, + }, + { + "qcfg_qsch_crs_force_en", + ETM_QMU_QCFG_QSCH_CRS_FORCE_ENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1d, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qcfg_qsch_crs_force_en_reg, + NULL, + NULL, + }, + { + "qcfg_qsch_crs_force_qnum", + ETM_QMU_QCFG_QSCH_CRS_FORCE_QNUMr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1e, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qcfg_qsch_crs_force_qnum_reg, + NULL, + NULL, + }, + { + "qcfg_qsch_crs_force_crs", + ETM_QMU_QCFG_QSCH_CRS_FORCE_CRSr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x1f, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qcfg_qsch_crs_force_crs_reg, + NULL, + NULL, + }, + { + "cfgmt_oshp_sgmii_shap_mode", + ETM_QMU_CFGMT_OSHP_SGMII_SHAP_MODEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_oshp_sgmii_shap_mode_reg, + NULL, + NULL, + }, + { + "cfgmt_qmu_sashap_en", + ETM_QMU_CFGMT_QMU_SASHAP_ENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x21, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_qmu_sashap_en_reg, + NULL, + NULL, + }, + { + "cfgmt_sashap_token_max", + ETM_QMU_CFGMT_SASHAP_TOKEN_MAXr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x39, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_sashap_token_max_reg, + NULL, + NULL, + }, + { + "cfgmt_sashap_token_min", + ETM_QMU_CFGMT_SASHAP_TOKEN_MINr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x3a, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfgmt_sashap_token_min_reg, + NULL, + NULL, + }, + { + "cfg_qsch_q3lbaddrate", + ETM_QMU_CFG_QSCH_Q3LBADDRATEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x3f, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_q3lbaddrate_reg, + NULL, + NULL, + }, + { + "cfg_qsch_q012lbaddrate", + ETM_QMU_CFG_QSCH_Q012LBADDRATEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x40, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_q012lbaddrate_reg, + NULL, + NULL, + }, + { + "cfg_qsch_q3creditlbmaxcnt", + ETM_QMU_CFG_QSCH_Q3CREDITLBMAXCNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x41, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_q3creditlbmaxcnt_reg, + NULL, + NULL, + }, + { + "cfg_qsch_q012creditlbmaxcnt", + ETM_QMU_CFG_QSCH_Q012CREDITLBMAXCNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x42, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_q012creditlbmaxcnt_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mul_token_gen_num", + ETM_QMU_CFG_QSCH_MUL_TOKEN_GEN_NUMr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x43, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mul_token_gen_num_reg, + NULL, + NULL, + }, + { + "cfg_qsch_q3_credit_lb_control_en", + ETM_QMU_CFG_QSCH_Q3_CREDIT_LB_CONTROL_ENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x44, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_q3_credit_lb_control_en_reg, + NULL, + NULL, + }, + { + "cfg_qsch_q012_credit_lb_control_en", + ETM_QMU_CFG_QSCH_Q012_CREDIT_LB_CONTROL_ENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x45, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_q012_credit_lb_control_en_reg, + NULL, + NULL, + }, + { + "cfg_qsch_sp_dwrr_en", + ETM_QMU_CFG_QSCH_SP_DWRR_ENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x46, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_sp_dwrr_en_reg, + NULL, + NULL, + }, + { + "cfg_qsch_q01_attach_en", + ETM_QMU_CFG_QSCH_Q01_ATTACH_ENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x47, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_q01_attach_en_reg, + NULL, + NULL, + }, + { + "cfg_qsch_w0", + ETM_QMU_CFG_QSCH_W0r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x48, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_w0_reg, + NULL, + NULL, + }, + { + "cfg_qsch_w1", + ETM_QMU_CFG_QSCH_W1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x49, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_w1_reg, + NULL, + NULL, + }, + { + "cfg_qsch_w2", + ETM_QMU_CFG_QSCH_W2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x4a, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_w2_reg, + NULL, + NULL, + }, + { + "cfg_qsch_lkybktmaxcnt1", + ETM_QMU_CFG_QSCH_LKYBKTMAXCNT1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x4b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_lkybktmaxcnt1_reg, + NULL, + NULL, + }, + { + "cfg_qsch_lkybktmaxcnt2", + ETM_QMU_CFG_QSCH_LKYBKTMAXCNT2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x4c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_lkybktmaxcnt2_reg, + NULL, + NULL, + }, + { + "cfg_qsch_lkybktdcrrate1", + ETM_QMU_CFG_QSCH_LKYBKTDCRRATE1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x4d, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_lkybktdcrrate1_reg, + NULL, + NULL, + }, + { + "cfg_qsch_lkybktdcrrate2", + ETM_QMU_CFG_QSCH_LKYBKTDCRRATE2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x4e, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_lkybktdcrrate2_reg, + NULL, + NULL, + }, + { + "cfg_qsch_lkybktdcrrate3", + ETM_QMU_CFG_QSCH_LKYBKTDCRRATE3r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x4f, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_lkybktdcrrate3_reg, + NULL, + NULL, + }, + { + "cfg_qsch_lkybktmaxcnt3", + ETM_QMU_CFG_QSCH_LKYBKTMAXCNT3r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x50, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_lkybktmaxcnt3_reg, + NULL, + NULL, + }, + { + "cfg_qsch_qmu_mul_auto_sa_version", + ETM_QMU_CFG_QSCH_QMU_MUL_AUTO_SA_VERSIONr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x55, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_qmu_mul_auto_sa_version_reg, + NULL, + NULL, + }, + { + "cfg_qsch_sa_credit_value_0", + ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_0r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x56, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_sa_credit_value_0_reg, + NULL, + NULL, + }, + { + "cfg_qsch_sa_credit_value_1", + ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x57, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_sa_credit_value_1_reg, + NULL, + NULL, + }, + { + "cfg_qsch_sa_credit_value_2", + ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x58, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_sa_credit_value_2_reg, + NULL, + NULL, + }, + { + "cfg_qsch_sa_credit_value_3", + ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_3r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x59, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_sa_credit_value_3_reg, + NULL, + NULL, + }, + { + "cfg_qsch_sa_credit_value_4", + ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_4r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x5a, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_sa_credit_value_4_reg, + NULL, + NULL, + }, + { + "cfg_qsch_sa_credit_value_5", + ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_5r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x5b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_sa_credit_value_5_reg, + NULL, + NULL, + }, + { + "cfg_qsch_sa_credit_value_6", + ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_6r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x5c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_sa_credit_value_6_reg, + NULL, + NULL, + }, + { + "cfg_qsch_sa_credit_value_7", + ETM_QMU_CFG_QSCH_SA_CREDIT_VALUE_7r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x5d, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_sa_credit_value_7_reg, + NULL, + NULL, + }, + { + "cfg_qsch_remote_credit_fifo_almost_full_th", + ETM_QMU_CFG_QSCH_REMOTE_CREDIT_FIFO_ALMOST_FULL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x76, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_remote_credit_fifo_almost_full_th_reg, + NULL, + NULL, + }, + { + "cfg_qsch_auto_credit_fifo_almost_full_th", + ETM_QMU_CFG_QSCH_AUTO_CREDIT_FIFO_ALMOST_FULL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x77, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_auto_credit_fifo_almost_full_th_reg, + NULL, + NULL, + }, + { + "cfg_qsch_q3_credit_fifo_almost_full_th", + ETM_QMU_CFG_QSCH_Q3_CREDIT_FIFO_ALMOST_FULL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x78, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_q3_credit_fifo_almost_full_th_reg, + NULL, + NULL, + }, + { + "cfg_qsch_q012_credit_fifo_almost_full_th", + ETM_QMU_CFG_QSCH_Q012_CREDIT_FIFO_ALMOST_FULL_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x79, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_q012_credit_fifo_almost_full_th_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mul_fc_res_en", + ETM_QMU_CFG_QSCH_MUL_FC_RES_ENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x7a, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mul_fc_res_en_reg, + NULL, + NULL, + }, + { + "cfgmt_mul_ovf_udf_flg_query", + ETM_QMU_CFGMT_MUL_OVF_UDF_FLG_QUERYr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x7d, + (32 / 8), + 0, + 0, + 0, + 0, + 19, + g_etm_qmu_cfgmt_mul_ovf_udf_flg_query_reg, + NULL, + NULL, + }, + { + "cfgmt_mul_cng_flg_query", + ETM_QMU_CFGMT_MUL_CNG_FLG_QUERYr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x7e, + (32 / 8), + 0, + 0, + 0, + 0, + 5, + g_etm_qmu_cfgmt_mul_cng_flg_query_reg, + NULL, + NULL, + }, + { + "qsch_cfg_lkybktval1", + ETM_QMU_QSCH_CFG_LKYBKTVAL1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x7f, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qsch_cfg_lkybktval1_reg, + NULL, + NULL, + }, + { + "qsch_cfg_lkybktval2", + ETM_QMU_QSCH_CFG_LKYBKTVAL2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x80, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qsch_cfg_lkybktval2_reg, + NULL, + NULL, + }, + { + "qsch_cfg_lkybktval3", + ETM_QMU_QSCH_CFG_LKYBKTVAL3r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x81, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qsch_cfg_lkybktval3_reg, + NULL, + NULL, + }, + { + "qsch_cfg_q3lbval", + ETM_QMU_QSCH_CFG_Q3LBVALr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x82, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qsch_cfg_q3lbval_reg, + NULL, + NULL, + }, + { + "qsch_cfg_q012lbval", + ETM_QMU_QSCH_CFG_Q012LBVALr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x83, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qsch_cfg_q012lbval_reg, + NULL, + NULL, + }, + { + "qlist_cfgmt_ram_ecc_err2", + ETM_QMU_QLIST_CFGMT_RAM_ECC_ERR2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2014, + (32 / 8), + 0, + 0, + 0, + 0, + 16, + g_etm_qmu_qlist_cfgmt_ram_ecc_err2_reg, + NULL, + NULL, + }, + { + "csch_aged_cmd_cnt", + ETM_QMU_CSCH_AGED_CMD_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2838, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_csch_aged_cmd_cnt_reg, + NULL, + NULL, + }, + { + "csch_qcfg_csch_congest_cnt", + ETM_QMU_CSCH_QCFG_CSCH_CONGEST_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2839, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_csch_qcfg_csch_congest_cnt_reg, + NULL, + NULL, + }, + { + "csch_qcfg_qlist_csch_sop_cnt", + ETM_QMU_CSCH_QCFG_QLIST_CSCH_SOP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x283a, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_csch_qcfg_qlist_csch_sop_cnt_reg, + NULL, + NULL, + }, + { + "csch_qcfg_qlist_csch_eop_cnt", + ETM_QMU_CSCH_QCFG_QLIST_CSCH_EOP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x283b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_csch_qcfg_qlist_csch_eop_cnt_reg, + NULL, + NULL, + }, + { + "csch_qcfg_csch_csw_sop_cnt", + ETM_QMU_CSCH_QCFG_CSCH_CSW_SOP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x283c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_csch_qcfg_csch_csw_sop_cnt_reg, + NULL, + NULL, + }, + { + "csch_qcfg_csch_csw_eop_cnt", + ETM_QMU_CSCH_QCFG_CSCH_CSW_EOP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x283d, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_csch_qcfg_csch_csw_eop_cnt_reg, + NULL, + NULL, + }, + { + "csch_qcfg_qlist_csch_drop_cnt", + ETM_QMU_CSCH_QCFG_QLIST_CSCH_DROP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x283e, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_csch_qcfg_qlist_csch_drop_cnt_reg, + NULL, + NULL, + }, + { + "csch_qcfg_csch_csw_drop_cnt", + ETM_QMU_CSCH_QCFG_CSCH_CSW_DROP_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x283f, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_csch_qcfg_csch_csw_drop_cnt_reg, + NULL, + NULL, + }, + { + "csw_mmu_sop_cmd_cnt", + ETM_QMU_CSW_MMU_SOP_CMD_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2840, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_csw_mmu_sop_cmd_cnt_reg, + NULL, + NULL, + }, + { + "mmu_csw_sop_data_cnt", + ETM_QMU_MMU_CSW_SOP_DATA_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2841, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_mmu_csw_sop_data_cnt_reg, + NULL, + NULL, + }, + { + "csw_qsch_feedb_cnt", + ETM_QMU_CSW_QSCH_FEEDB_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2842, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_csw_qsch_feedb_cnt_reg, + NULL, + NULL, + }, + { + "qmu_crdt_port_fc_cnt", + ETM_QMU_QMU_CRDT_PORT_FC_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2843, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qmu_crdt_port_fc_cnt_reg, + NULL, + NULL, + }, + { + "csch_r_block_cnt", + ETM_QMU_CSCH_R_BLOCK_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x3400, + (32 / 8), + 0, + 0x1ff + 1, + 0, + 1, + 1, + g_etm_qmu_csch_r_block_cnt_reg, + NULL, + NULL, + }, + { + "qcfg_qlist_qds_head_rd", + ETM_QMU_QCFG_QLIST_QDS_HEAD_RDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x10000, + (32 / 8), + 0, + 0x23ff + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_qlist_qds_head_rd_reg, + NULL, + NULL, + }, + { + "qcfg_qlist_qds_tail_rd", + ETM_QMU_QCFG_QLIST_QDS_TAIL_RDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x20000, + (32 / 8), + 0, + 0x23ff + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_qlist_qds_tail_rd_reg, + NULL, + NULL, + }, + { + "qcfg_qlist_ept_rd", + ETM_QMU_QCFG_QLIST_EPT_RDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x30000, + (32 / 8), + 0, + 0x23ff + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_qlist_ept_rd_reg, + NULL, + NULL, + }, + { + "qcfg_qlist_age_flag_rd", + ETM_QMU_QCFG_QLIST_AGE_FLAG_RDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x38000, + (32 / 8), + 0, + 0x23ff + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_qlist_age_flag_rd_reg, + NULL, + NULL, + }, + { + "qcfg_qlist_cti_rd", + ETM_QMU_QCFG_QLIST_CTI_RDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x40000, + (32 / 8), + 0, + 0x23ff + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_qlist_cti_rd_reg, + NULL, + NULL, + }, + { + "qcfg_qlist_cto_rd", + ETM_QMU_QCFG_QLIST_CTO_RDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x50000, + (32 / 8), + 0, + 0x23ff + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_qlist_cto_rd_reg, + NULL, + NULL, + }, + { + "qcfg_qlist_chk_rd", + ETM_QMU_QCFG_QLIST_CHK_RDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x100000, + (32 / 8), + 0, + 0x7fff + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_qlist_chk_rd_reg, + NULL, + NULL, + }, + { + "qcfg_qlist_nod_rd", + ETM_QMU_QCFG_QLIST_NOD_RDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x140000, + (32 / 8), + 0, + 0x7fff + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_qlist_nod_rd_reg, + NULL, + NULL, + }, + { + "qcfg_qlist_biu_rd", + ETM_QMU_QCFG_QLIST_BIU_RDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x180000, + (32 / 8), + 0, + 0x7fff + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_qlist_biu_rd_reg, + NULL, + NULL, + }, + { + "qsch_r_wlist_flag", + ETM_QMU_QSCH_R_WLIST_FLAGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x90000, + (32 / 8), + 0, + 0x23ff + 1, + 0, + 1, + 1, + g_etm_qmu_qsch_r_wlist_flag_reg, + NULL, + NULL, + }, + { + "qcfg_crs_flg_rd", + ETM_QMU_QCFG_CRS_FLG_RDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0xd0000, + (32 / 8), + 0, + 0x23ff + 1, + 0, + 1, + 1, + g_etm_qmu_qcfg_crs_flg_rd_reg, + NULL, + NULL, + }, + { + "cfgmt_qmu_imem_age_qds", + ETM_QMU_CFGMT_QMU_IMEM_AGE_QDSr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xe0d, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_qmu_cfgmt_qmu_imem_age_qds_reg, + NULL, + NULL, + }, + { + "cfgmt_qmu_imem_age_qlen", + ETM_QMU_CFGMT_QMU_IMEM_AGE_QLENr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0xe0e, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_qmu_cfgmt_qmu_imem_age_qlen_reg, + NULL, + NULL, + }, + { + "cfgmt_qmu_imem_pd_ram_low", + ETM_QMU_CFGMT_QMU_IMEM_PD_RAM_LOWr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x1c0000, + (32 / 8), + 0, + 0x3fff + 1, + 0, + 1, + 1, + g_etm_qmu_cfgmt_qmu_imem_pd_ram_low_reg, + NULL, + NULL, + }, + { + "cfgmt_qmu_imem_pd_ram_high", + ETM_QMU_CFGMT_QMU_IMEM_PD_RAM_HIGHr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x1c4000, + (32 / 8), + 0, + 0x3fff + 1, + 0, + 1, + 1, + g_etm_qmu_cfgmt_qmu_imem_pd_ram_high_reg, + NULL, + NULL, + }, + { + "cfgmt_qmu_imem_up_ptr", + ETM_QMU_CFGMT_QMU_IMEM_UP_PTRr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x200000, + (32 / 8), + 0, + 0x3fff + 1, + 0, + 1, + 1, + g_etm_qmu_cfgmt_qmu_imem_up_ptr_reg, + NULL, + NULL, + }, + { + "cfgmt_qmu_imem_down_ptr", + ETM_QMU_CFGMT_QMU_IMEM_DOWN_PTRr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x240000, + (32 / 8), + 0, + 0x3fff + 1, + 0, + 1, + 1, + g_etm_qmu_cfgmt_qmu_imem_down_ptr_reg, + NULL, + NULL, + }, + { + "cfgmt_qmu_imem_age_flag", + ETM_QMU_CFGMT_QMU_IMEM_AGE_FLAGr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x280000, + (32 / 8), + 0, + 0x7ffff + 1, + 0, + 1, + 1, + g_etm_qmu_cfgmt_qmu_imem_age_flag_reg, + NULL, + NULL, + }, + { + "cfg_qsch_lkybkt2cngth", + ETM_QMU_CFG_QSCH_LKYBKT2CNGTHr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x2040, + (32 / 8), + 0, + 7 + 1, + 0, + 1, + 1, + g_etm_qmu_cfg_qsch_lkybkt2cngth_reg, + NULL, + NULL, + }, + { + "cfg_qsch_lkybkt1cngth", + ETM_QMU_CFG_QSCH_LKYBKT1CNGTHr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x2030, + (32 / 8), + 0, + 7 + 1, + 0, + 1, + 1, + g_etm_qmu_cfg_qsch_lkybkt1cngth_reg, + NULL, + NULL, + }, + { + "cfg_qsch_lkybkt3cngth", + ETM_QMU_CFG_QSCH_LKYBKT3CNGTHr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x2050, + (32 / 8), + 0, + 7 + 1, + 0, + 1, + 1, + g_etm_qmu_cfg_qsch_lkybkt3cngth_reg, + NULL, + NULL, + }, + { + "cfg_qsch_rm_mul_mcn1_credit_value", + ETM_QMU_CFG_QSCH_RM_MUL_MCN1_CREDIT_VALUEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x2060, + (32 / 8), + 0, + 7 + 1, + 0, + 1, + 1, + g_etm_qmu_cfg_qsch_rm_mul_mcn1_credit_value_reg, + NULL, + NULL, + }, + { + "cfg_qsch_rm_mul_mcn2_credit_value", + ETM_QMU_CFG_QSCH_RM_MUL_MCN2_CREDIT_VALUEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x2070, + (32 / 8), + 0, + 7 + 1, + 0, + 1, + 1, + g_etm_qmu_cfg_qsch_rm_mul_mcn2_credit_value_reg, + NULL, + NULL, + }, + { + "cfg_qsch_rm_mul_mcn3_credit_value", + ETM_QMU_CFG_QSCH_RM_MUL_MCN3_CREDIT_VALUEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x2080, + (32 / 8), + 0, + 7 + 1, + 0, + 1, + 1, + g_etm_qmu_cfg_qsch_rm_mul_mcn3_credit_value_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn1_rand_ansr_seed", + ETM_QMU_RM_MUL_MCN1_RAND_ANSR_SEEDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2090, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_qmu_rm_mul_mcn1_rand_ansr_seed_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn2_rand_ansr_seed", + ETM_QMU_RM_MUL_MCN2_RAND_ANSR_SEEDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2091, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_qmu_rm_mul_mcn2_rand_ansr_seed_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn3_rand_ansr_seed", + ETM_QMU_RM_MUL_MCN3_RAND_ANSR_SEEDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2092, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_qmu_rm_mul_mcn3_rand_ansr_seed_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn1_rand_ansr_th", + ETM_QMU_RM_MUL_MCN1_RAND_ANSR_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2093, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn1_rand_ansr_th_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn2_rand_ansr_th", + ETM_QMU_RM_MUL_MCN2_RAND_ANSR_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2094, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn2_rand_ansr_th_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn3_rand_ansr_th", + ETM_QMU_RM_MUL_MCN3_RAND_ANSR_THr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2095, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn3_rand_ansr_th_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn1_rand_hold_base", + ETM_QMU_RM_MUL_MCN1_RAND_HOLD_BASEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2096, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_qmu_rm_mul_mcn1_rand_hold_base_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn2_rand_hold_base", + ETM_QMU_RM_MUL_MCN2_RAND_HOLD_BASEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2097, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_qmu_rm_mul_mcn2_rand_hold_base_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn3_rand_hold_base", + ETM_QMU_RM_MUL_MCN3_RAND_HOLD_BASEr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2098, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_qmu_rm_mul_mcn3_rand_hold_base_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn1_rand_sel_mask", + ETM_QMU_RM_MUL_MCN1_RAND_SEL_MASKr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2099, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn1_rand_sel_mask_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn2_rand_sel_mask", + ETM_QMU_RM_MUL_MCN2_RAND_SEL_MASKr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x209a, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn2_rand_sel_mask_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn3_rand_sel_mask", + ETM_QMU_RM_MUL_MCN3_RAND_SEL_MASKr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x209b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn3_rand_sel_mask_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn1_rand_sel_seed_reg0", + ETM_QMU_RM_MUL_MCN1_RAND_SEL_SEED_REG0r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x209c, + (32 / 8), + 0, + 0, + 0, + 0, + 8, + g_etm_qmu_rm_mul_mcn1_rand_sel_seed_reg0_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn1_rand_sel_seed_reg1", + ETM_QMU_RM_MUL_MCN1_RAND_SEL_SEED_REG1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x209d, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn1_rand_sel_seed_reg1_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn2_rand_sel_seed_reg0", + ETM_QMU_RM_MUL_MCN2_RAND_SEL_SEED_REG0r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x209e, + (32 / 8), + 0, + 0, + 0, + 0, + 8, + g_etm_qmu_rm_mul_mcn2_rand_sel_seed_reg0_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn2_rand_sel_seed_reg1", + ETM_QMU_RM_MUL_MCN2_RAND_SEL_SEED_REG1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x209f, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn2_rand_sel_seed_reg1_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn3_rand_sel_seed_reg0", + ETM_QMU_RM_MUL_MCN3_RAND_SEL_SEED_REG0r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20a0, + (32 / 8), + 0, + 0, + 0, + 0, + 8, + g_etm_qmu_rm_mul_mcn3_rand_sel_seed_reg0_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn3_rand_sel_seed_reg1", + ETM_QMU_RM_MUL_MCN3_RAND_SEL_SEED_REG1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20a1, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn3_rand_sel_seed_reg1_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn1_step_wait_th1", + ETM_QMU_RM_MUL_MCN1_STEP_WAIT_TH1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20a2, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn1_step_wait_th1_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn1_step_wait_th2", + ETM_QMU_RM_MUL_MCN1_STEP_WAIT_TH2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20a3, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn1_step_wait_th2_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn1_step_wait_th3", + ETM_QMU_RM_MUL_MCN1_STEP_WAIT_TH3r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn1_step_wait_th3_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn1_step_wait_th4", + ETM_QMU_RM_MUL_MCN1_STEP_WAIT_TH4r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20a5, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn1_step_wait_th4_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn1_step_wait_th5", + ETM_QMU_RM_MUL_MCN1_STEP_WAIT_TH5r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20a6, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn1_step_wait_th5_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn1_step_wait_th6", + ETM_QMU_RM_MUL_MCN1_STEP_WAIT_TH6r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20a7, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn1_step_wait_th6_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn1_step_wait_th7", + ETM_QMU_RM_MUL_MCN1_STEP_WAIT_TH7r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn1_step_wait_th7_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn2_step_wait_th1", + ETM_QMU_RM_MUL_MCN2_STEP_WAIT_TH1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20a9, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn2_step_wait_th1_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn2_step_wait_th2", + ETM_QMU_RM_MUL_MCN2_STEP_WAIT_TH2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20aa, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn2_step_wait_th2_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn2_step_wait_th3", + ETM_QMU_RM_MUL_MCN2_STEP_WAIT_TH3r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20ab, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn2_step_wait_th3_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn2_step_wait_th4", + ETM_QMU_RM_MUL_MCN2_STEP_WAIT_TH4r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn2_step_wait_th4_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn2_step_wait_th5", + ETM_QMU_RM_MUL_MCN2_STEP_WAIT_TH5r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20ad, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn2_step_wait_th5_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn2_step_wait_th6", + ETM_QMU_RM_MUL_MCN2_STEP_WAIT_TH6r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20ae, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn2_step_wait_th6_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn2_step_wait_th7", + ETM_QMU_RM_MUL_MCN2_STEP_WAIT_TH7r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20af, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn2_step_wait_th7_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn3_step_wait_th1", + ETM_QMU_RM_MUL_MCN3_STEP_WAIT_TH1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn3_step_wait_th1_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn3_step_wait_th2", + ETM_QMU_RM_MUL_MCN3_STEP_WAIT_TH2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20b1, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn3_step_wait_th2_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn3_step_wait_th3", + ETM_QMU_RM_MUL_MCN3_STEP_WAIT_TH3r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20b2, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn3_step_wait_th3_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn3_step_wait_th4", + ETM_QMU_RM_MUL_MCN3_STEP_WAIT_TH4r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20b3, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn3_step_wait_th4_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn3_step_wait_th5", + ETM_QMU_RM_MUL_MCN3_STEP_WAIT_TH5r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn3_step_wait_th5_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn3_step_wait_th6", + ETM_QMU_RM_MUL_MCN3_STEP_WAIT_TH6r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20b5, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn3_step_wait_th6_reg, + NULL, + NULL, + }, + { + "rm_mul_mcn3step_wait_th7", + ETM_QMU_RM_MUL_MCN3STEP_WAIT_TH7r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20b6, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_rm_mul_mcn3step_wait_th7_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate0", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE0r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20b7, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate0_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate1", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE1r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20b8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate1_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate2", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE2r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20b9, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate2_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate3", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE3r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20ba, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate3_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate4", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE4r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20bb, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate4_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate5", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE5r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20bc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate5_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate6", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE6r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20bd, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate6_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate7", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE7r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20be, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate7_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate8", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE8r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20bf, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate8_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate9", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE9r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate9_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate10", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE10r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20c1, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate10_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate11", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE11r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20c2, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate11_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate12", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE12r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20c3, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate12_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate13", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE13r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate13_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate14", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE14r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20c5, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate14_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate15", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE15r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20c6, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate15_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate16", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE16r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20c7, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate16_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate17", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE17r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate17_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate18", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE18r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20c9, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate18_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate19", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE19r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20ca, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate19_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate20", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE20r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20cb, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate20_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate21", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE21r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate21_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate22", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE22r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20cd, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate22_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate23", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE23r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20ce, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate23_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate24", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE24r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20cf, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate24_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate25", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE25r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate25_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate26", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE26r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20d1, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate26_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate27", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE27r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20d2, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate27_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate28", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE28r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20d3, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate28_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate29", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE29r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate29_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate30", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE30r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20d5, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate30_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate31", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE31r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20d6, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate31_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate32", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE32r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20d7, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate32_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate33", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE33r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate33_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate34", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE34r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20d9, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate34_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate35", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE35r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20da, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate35_reg, + NULL, + NULL, + }, + { + "cfg_qsch_mulcrdcntrate36", + ETM_QMU_CFG_QSCH_MULCRDCNTRATE36r, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20db, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_mulcrdcntrate36_reg, + NULL, + NULL, + }, + { + "cfg_qsch_rm_mul_mcn1_rand_hold_shift", + ETM_QMU_CFG_QSCH_RM_MUL_MCN1_RAND_HOLD_SHIFTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20dc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_rm_mul_mcn1_rand_hold_shift_reg, + NULL, + NULL, + }, + { + "cfg_qsch_rm_mul_mcn2_rand_hold_shift", + ETM_QMU_CFG_QSCH_RM_MUL_MCN2_RAND_HOLD_SHIFTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20dd, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_rm_mul_mcn2_rand_hold_shift_reg, + NULL, + NULL, + }, + { + "cfg_qsch_rm_mul_mcn3_rand_hold_shift", + ETM_QMU_CFG_QSCH_RM_MUL_MCN3_RAND_HOLD_SHIFTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x20de, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_cfg_qsch_rm_mul_mcn3_rand_hold_shift_reg, + NULL, + NULL, + }, + { + "last_drop_qnum_get", + ETM_QMU_LAST_DROP_QNUM_GETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2806, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_etm_qmu_last_drop_qnum_get_reg, + NULL, + NULL, + }, + { + "crdt_qmu_credit_cnt", + ETM_QMU_CRDT_QMU_CREDIT_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2807, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_crdt_qmu_credit_cnt_reg, + NULL, + NULL, + }, + { + "qmu_to_qsch_report_cnt", + ETM_QMU_QMU_TO_QSCH_REPORT_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2808, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qmu_to_qsch_report_cnt_reg, + NULL, + NULL, + }, + { + "qmu_to_cgavd_report_cnt", + ETM_QMU_QMU_TO_CGAVD_REPORT_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2809, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qmu_to_cgavd_report_cnt_reg, + NULL, + NULL, + }, + { + "qmu_crdt_crs_normal_cnt", + ETM_QMU_QMU_CRDT_CRS_NORMAL_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x280a, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qmu_crdt_crs_normal_cnt_reg, + NULL, + NULL, + }, + { + "qmu_crdt_crs_off_cnt", + ETM_QMU_QMU_CRDT_CRS_OFF_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x280b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qmu_crdt_crs_off_cnt_reg, + NULL, + NULL, + }, + { + "qsch_qlist_shedule_cnt", + ETM_QMU_QSCH_QLIST_SHEDULE_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x280c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qsch_qlist_shedule_cnt_reg, + NULL, + NULL, + }, + { + "qsch_qlist_sch_ept_cnt", + ETM_QMU_QSCH_QLIST_SCH_EPT_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x280d, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qsch_qlist_sch_ept_cnt_reg, + NULL, + NULL, + }, + { + "qmu_to_mmu_blk_wr_cnt", + ETM_QMU_QMU_TO_MMU_BLK_WR_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x280e, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qmu_to_mmu_blk_wr_cnt_reg, + NULL, + NULL, + }, + { + "qmu_to_csw_blk_rd_cnt", + ETM_QMU_QMU_TO_CSW_BLK_RD_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x280f, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qmu_to_csw_blk_rd_cnt_reg, + NULL, + NULL, + }, + { + "qmu_to_mmu_sop_wr_cnt", + ETM_QMU_QMU_TO_MMU_SOP_WR_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2810, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qmu_to_mmu_sop_wr_cnt_reg, + NULL, + NULL, + }, + { + "qmu_to_mmu_eop_wr_cnt", + ETM_QMU_QMU_TO_MMU_EOP_WR_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2811, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qmu_to_mmu_eop_wr_cnt_reg, + NULL, + NULL, + }, + { + "qmu_to_mmu_drop_wr_cnt", + ETM_QMU_QMU_TO_MMU_DROP_WR_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2812, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qmu_to_mmu_drop_wr_cnt_reg, + NULL, + NULL, + }, + { + "qmu_to_csw_sop_rd_cnt", + ETM_QMU_QMU_TO_CSW_SOP_RD_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2813, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qmu_to_csw_sop_rd_cnt_reg, + NULL, + NULL, + }, + { + "qmu_to_csw_eop_rd_cnt", + ETM_QMU_QMU_TO_CSW_EOP_RD_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2814, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qmu_to_csw_eop_rd_cnt_reg, + NULL, + NULL, + }, + { + "qmu_to_csw_drop_rd_cnt", + ETM_QMU_QMU_TO_CSW_DROP_RD_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2815, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_qmu_to_csw_drop_rd_cnt_reg, + NULL, + NULL, + }, + { + "mmu_to_qmu_wr_release_cnt", + ETM_QMU_MMU_TO_QMU_WR_RELEASE_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2816, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_mmu_to_qmu_wr_release_cnt_reg, + NULL, + NULL, + }, + { + "mmu_to_qmu_rd_release_cnt", + ETM_QMU_MMU_TO_QMU_RD_RELEASE_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2817, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_mmu_to_qmu_rd_release_cnt_reg, + NULL, + NULL, + }, + { + "observe_qnum_set", + ETM_QMU_OBSERVE_QNUM_SETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2820, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_observe_qnum_set_reg, + NULL, + NULL, + }, + { + "spec_q_pkt_received", + ETM_QMU_SPEC_Q_PKT_RECEIVEDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2821, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_spec_q_pkt_received_reg, + NULL, + NULL, + }, + { + "spec_q_pkt_dropped", + ETM_QMU_SPEC_Q_PKT_DROPPEDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2822, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_spec_q_pkt_dropped_reg, + NULL, + NULL, + }, + { + "spec_q_pkt_scheduled", + ETM_QMU_SPEC_Q_PKT_SCHEDULEDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2823, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_spec_q_pkt_scheduled_reg, + NULL, + NULL, + }, + { + "spec_q_wr_cmd_sent", + ETM_QMU_SPEC_Q_WR_CMD_SENTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2824, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_spec_q_wr_cmd_sent_reg, + NULL, + NULL, + }, + { + "spec_q_rd_cmd_sent", + ETM_QMU_SPEC_Q_RD_CMD_SENTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2825, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_spec_q_rd_cmd_sent_reg, + NULL, + NULL, + }, + { + "spec_q_pkt_enq", + ETM_QMU_SPEC_Q_PKT_ENQr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2826, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_spec_q_pkt_enq_reg, + NULL, + NULL, + }, + { + "spec_q_pkt_deq", + ETM_QMU_SPEC_Q_PKT_DEQr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2827, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_spec_q_pkt_deq_reg, + NULL, + NULL, + }, + { + "spec_q_crdt_uncon_received", + ETM_QMU_SPEC_Q_CRDT_UNCON_RECEIVEDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2828, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_spec_q_crdt_uncon_received_reg, + NULL, + NULL, + }, + { + "spec_q_crdt_cong_received", + ETM_QMU_SPEC_Q_CRDT_CONG_RECEIVEDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2829, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_spec_q_crdt_cong_received_reg, + NULL, + NULL, + }, + { + "spec_q_crs_normal_cnt", + ETM_QMU_SPEC_Q_CRS_NORMAL_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x282a, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_spec_q_crs_normal_cnt_reg, + NULL, + NULL, + }, + { + "spec_q_crs_off_cnt", + ETM_QMU_SPEC_Q_CRS_OFF_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x282b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_spec_q_crs_off_cnt_reg, + NULL, + NULL, + }, + { + "observe_batch_set", + ETM_QMU_OBSERVE_BATCH_SETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x282c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_observe_batch_set_reg, + NULL, + NULL, + }, + { + "spec_bat_pkt_received", + ETM_QMU_SPEC_BAT_PKT_RECEIVEDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x282d, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_spec_bat_pkt_received_reg, + NULL, + NULL, + }, + { + "spec_bat_pkt_dropped", + ETM_QMU_SPEC_BAT_PKT_DROPPEDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x282e, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_spec_bat_pkt_dropped_reg, + NULL, + NULL, + }, + { + "spec_bat_blk_scheduled", + ETM_QMU_SPEC_BAT_BLK_SCHEDULEDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x282f, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_spec_bat_blk_scheduled_reg, + NULL, + NULL, + }, + { + "spec_bat_wr_cmd_sent", + ETM_QMU_SPEC_BAT_WR_CMD_SENTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2830, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_spec_bat_wr_cmd_sent_reg, + NULL, + NULL, + }, + { + "spec_bat_rd_cmd_sent", + ETM_QMU_SPEC_BAT_RD_CMD_SENTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2831, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_spec_bat_rd_cmd_sent_reg, + NULL, + NULL, + }, + { + "spec_bat_pkt_enq", + ETM_QMU_SPEC_BAT_PKT_ENQr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2832, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_spec_bat_pkt_enq_reg, + NULL, + NULL, + }, + { + "spec_bat_pkt_deq", + ETM_QMU_SPEC_BAT_PKT_DEQr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2833, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_spec_bat_pkt_deq_reg, + NULL, + NULL, + }, + { + "spec_bat_crdt_uncon_received", + ETM_QMU_SPEC_BAT_CRDT_UNCON_RECEIVEDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2834, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_spec_bat_crdt_uncon_received_reg, + NULL, + NULL, + }, + { + "spec_bat_crdt_cong_received", + ETM_QMU_SPEC_BAT_CRDT_CONG_RECEIVEDr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2835, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_spec_bat_crdt_cong_received_reg, + NULL, + NULL, + }, + { + "spec_bat_crs_normal_cnt", + ETM_QMU_SPEC_BAT_CRS_NORMAL_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2836, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_spec_bat_crs_normal_cnt_reg, + NULL, + NULL, + }, + { + "spec_bat_crs_off_cnt", + ETM_QMU_SPEC_BAT_CRS_OFF_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2837, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_spec_bat_crs_off_cnt_reg, + NULL, + NULL, + }, + { + "bcntm_ovfl_qnum_get", + ETM_QMU_BCNTM_OVFL_QNUM_GETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2844, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_bcntm_ovfl_qnum_get_reg, + NULL, + NULL, + }, + { + "crbal_a_ovf_qnum_get", + ETM_QMU_CRBAL_A_OVF_QNUM_GETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2845, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_crbal_a_ovf_qnum_get_reg, + NULL, + NULL, + }, + { + "crbal_b_ovf_qnum_get", + ETM_QMU_CRBAL_B_OVF_QNUM_GETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2846, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_crbal_b_ovf_qnum_get_reg, + NULL, + NULL, + }, + { + "crbal_drop_qnum_get", + ETM_QMU_CRBAL_DROP_QNUM_GETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2847, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_crbal_drop_qnum_get_reg, + NULL, + NULL, + }, + { + "deq_flg_report_cnt", + ETM_QMU_DEQ_FLG_REPORT_CNTr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2848, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_deq_flg_report_cnt_reg, + NULL, + NULL, + }, + { + "spec_q_crs_get", + ETM_QMU_SPEC_Q_CRS_GETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x2849, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_spec_q_crs_get_reg, + NULL, + NULL, + }, + { + "spec_q_crs_in_get", + ETM_QMU_SPEC_Q_CRS_IN_GETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x284a, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_spec_q_crs_in_get_reg, + NULL, + NULL, + }, + { + "spec_q_crs_flg_csol_get", + ETM_QMU_SPEC_Q_CRS_FLG_CSOL_GETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x284b, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_spec_q_crs_flg_csol_get_reg, + NULL, + NULL, + }, + { + "ept_sch_qnum_get", + ETM_QMU_EPT_SCH_QNUM_GETr, + ETM, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + 0x284c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_etm_qmu_ept_sch_qnum_get_reg, + NULL, + NULL, + }, + { + "pcie_ddr_switch", + CFG_PCIE_PCIE_DDR_SWITCHr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_pcie_ddr_switch_reg, + NULL, + NULL, + }, + { + "user0_int_en", + CFG_PCIE_USER0_INT_ENr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x004, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_user0_int_en_reg, + NULL, + NULL, + }, + { + "user0_int_mask", + CFG_PCIE_USER0_INT_MASKr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_user0_int_mask_reg, + NULL, + NULL, + }, + { + "user0_int_status", + CFG_PCIE_USER0_INT_STATUSr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x00c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_user0_int_status_reg, + NULL, + NULL, + }, + { + "user1_int_en", + CFG_PCIE_USER1_INT_ENr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x010, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_user1_int_en_reg, + NULL, + NULL, + }, + { + "user1_int_mask", + CFG_PCIE_USER1_INT_MASKr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x014, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_user1_int_mask_reg, + NULL, + NULL, + }, + { + "user1_int_status", + CFG_PCIE_USER1_INT_STATUSr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x018, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_user1_int_status_reg, + NULL, + NULL, + }, + { + "user2_int_en", + CFG_PCIE_USER2_INT_ENr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x01c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_user2_int_en_reg, + NULL, + NULL, + }, + { + "user2_int_mask", + CFG_PCIE_USER2_INT_MASKr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x020, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_user2_int_mask_reg, + NULL, + NULL, + }, + { + "user2_int_status", + CFG_PCIE_USER2_INT_STATUSr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x024, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_user2_int_status_reg, + NULL, + NULL, + }, + { + "ecc_1b_int_en", + CFG_PCIE_ECC_1B_INT_ENr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x028, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_ecc_1b_int_en_reg, + NULL, + NULL, + }, + { + "ecc_1b_int_mask", + CFG_PCIE_ECC_1B_INT_MASKr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x02c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_ecc_1b_int_mask_reg, + NULL, + NULL, + }, + { + "ecc_1b_int_status", + CFG_PCIE_ECC_1B_INT_STATUSr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x030, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_ecc_1b_int_status_reg, + NULL, + NULL, + }, + { + "ecc_2b_int_en", + CFG_PCIE_ECC_2B_INT_ENr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x034, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_ecc_2b_int_en_reg, + NULL, + NULL, + }, + { + "ecc_2b_int_mask", + CFG_PCIE_ECC_2B_INT_MASKr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x038, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_ecc_2b_int_mask_reg, + NULL, + NULL, + }, + { + "ecc_2b_int_status", + CFG_PCIE_ECC_2B_INT_STATUSr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x03c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_ecc_2b_int_status_reg, + NULL, + NULL, + }, + { + "cfg_int_status", + CFG_PCIE_CFG_INT_STATUSr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x040, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_cfg_int_status_reg, + NULL, + NULL, + }, + { + "i_core_to_cntl", + CFG_PCIE_I_CORE_TO_CNTLr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x044, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_i_core_to_cntl_reg, + NULL, + NULL, + }, + { + "test_in_low", + CFG_PCIE_TEST_IN_LOWr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x048, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_test_in_low_reg, + NULL, + NULL, + }, + { + "test_in_high", + CFG_PCIE_TEST_IN_HIGHr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x04c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_test_in_high_reg, + NULL, + NULL, + }, + { + "local_interrupt_out", + CFG_PCIE_LOCAL_INTERRUPT_OUTr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x050, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_local_interrupt_out_reg, + NULL, + NULL, + }, + { + "pl_ltssm", + CFG_PCIE_PL_LTSSMr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x054, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_pl_ltssm_reg, + NULL, + NULL, + }, + { + "test_out0", + CFG_PCIE_TEST_OUT0r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x058, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_test_out0_reg, + NULL, + NULL, + }, + { + "test_out1", + CFG_PCIE_TEST_OUT1r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x05c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_test_out1_reg, + NULL, + NULL, + }, + { + "test_out2", + CFG_PCIE_TEST_OUT2r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x060, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_test_out2_reg, + NULL, + NULL, + }, + { + "test_out3", + CFG_PCIE_TEST_OUT3r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x064, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_test_out3_reg, + NULL, + NULL, + }, + { + "test_out4", + CFG_PCIE_TEST_OUT4r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x068, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_test_out4_reg, + NULL, + NULL, + }, + { + "test_out5", + CFG_PCIE_TEST_OUT5r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x06c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_test_out5_reg, + NULL, + NULL, + }, + { + "test_out6", + CFG_PCIE_TEST_OUT6r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x070, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_test_out6_reg, + NULL, + NULL, + }, + { + "test_out7", + CFG_PCIE_TEST_OUT7r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x074, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_test_out7_reg, + NULL, + NULL, + }, + { + "sync_o_core_status", + CFG_PCIE_SYNC_O_CORE_STATUSr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x078, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_sync_o_core_status_reg, + NULL, + NULL, + }, + { + "sync_o_alert_dbe", + CFG_PCIE_SYNC_O_ALERT_DBEr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x07c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_sync_o_alert_dbe_reg, + NULL, + NULL, + }, + { + "sync_o_alert_sbe", + CFG_PCIE_SYNC_O_ALERT_SBEr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x080, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_sync_o_alert_sbe_reg, + NULL, + NULL, + }, + { + "sync_o_link_loopback_en", + CFG_PCIE_SYNC_O_LINK_LOOPBACK_ENr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x084, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_sync_o_link_loopback_en_reg, + NULL, + NULL, + }, + { + "sync_o_local_fs_lf_valid", + CFG_PCIE_SYNC_O_LOCAL_FS_LF_VALIDr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x088, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_sync_o_local_fs_lf_valid_reg, + NULL, + NULL, + }, + { + "sync_o_rx_idle_detect", + CFG_PCIE_SYNC_O_RX_IDLE_DETECTr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x08c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_sync_o_rx_idle_detect_reg, + NULL, + NULL, + }, + { + "sync_o_rx_rdy", + CFG_PCIE_SYNC_O_RX_RDYr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x090, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_sync_o_rx_rdy_reg, + NULL, + NULL, + }, + { + "sync_o_tx_rdy", + CFG_PCIE_SYNC_O_TX_RDYr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x094, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_sync_o_tx_rdy_reg, + NULL, + NULL, + }, + { + "pcie_link_up_cnt", + CFG_PCIE_PCIE_LINK_UP_CNTr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x098, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_pcie_link_up_cnt_reg, + NULL, + NULL, + }, + { + "test_out_pcie0", + CFG_PCIE_TEST_OUT_PCIE0r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x09c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_test_out_pcie0_reg, + NULL, + NULL, + }, + { + "test_out_pcie1", + CFG_PCIE_TEST_OUT_PCIE1r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x0a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_test_out_pcie1_reg, + NULL, + NULL, + }, + { + "test_out_pcie2", + CFG_PCIE_TEST_OUT_PCIE2r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x0a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_test_out_pcie2_reg, + NULL, + NULL, + }, + { + "test_out_pcie3", + CFG_PCIE_TEST_OUT_PCIE3r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x0a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_test_out_pcie3_reg, + NULL, + NULL, + }, + { + "test_out_pcie4", + CFG_PCIE_TEST_OUT_PCIE4r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x0ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_test_out_pcie4_reg, + NULL, + NULL, + }, + { + "test_out_pcie5", + CFG_PCIE_TEST_OUT_PCIE5r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x0b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_test_out_pcie5_reg, + NULL, + NULL, + }, + { + "test_out_pcie6", + CFG_PCIE_TEST_OUT_PCIE6r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x0b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_test_out_pcie6_reg, + NULL, + NULL, + }, + { + "test_out_pcie7", + CFG_PCIE_TEST_OUT_PCIE7r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x0b8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_test_out_pcie7_reg, + NULL, + NULL, + }, + { + "test_out_pcie8", + CFG_PCIE_TEST_OUT_PCIE8r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x0bc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_test_out_pcie8_reg, + NULL, + NULL, + }, + { + "test_out_pcie9", + CFG_PCIE_TEST_OUT_PCIE9r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x0c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_test_out_pcie9_reg, + NULL, + NULL, + }, + { + "test_out_pcie10", + CFG_PCIE_TEST_OUT_PCIE10r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x0c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_test_out_pcie10_reg, + NULL, + NULL, + }, + { + "test_out_pcie11", + CFG_PCIE_TEST_OUT_PCIE11r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x0c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_test_out_pcie11_reg, + NULL, + NULL, + }, + { + "test_out_pcie12", + CFG_PCIE_TEST_OUT_PCIE12r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x0cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_test_out_pcie12_reg, + NULL, + NULL, + }, + { + "test_out_pcie13", + CFG_PCIE_TEST_OUT_PCIE13r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x0d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_test_out_pcie13_reg, + NULL, + NULL, + }, + { + "test_out_pcie14", + CFG_PCIE_TEST_OUT_PCIE14r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x0d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_test_out_pcie14_reg, + NULL, + NULL, + }, + { + "test_out_pcie15", + CFG_PCIE_TEST_OUT_PCIE15r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x0d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_test_out_pcie15_reg, + NULL, + NULL, + }, + { + "int_repeat_en", + CFG_PCIE_INT_REPEAT_ENr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x0e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_int_repeat_en_reg, + NULL, + NULL, + }, + { + "dbg_awid_axi_mst", + CFG_PCIE_DBG_AWID_AXI_MSTr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x400, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_awid_axi_mst_reg, + NULL, + NULL, + }, + { + "dbg_awaddr_axi_mst0", + CFG_PCIE_DBG_AWADDR_AXI_MST0r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x404, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_awaddr_axi_mst0_reg, + NULL, + NULL, + }, + { + "dbg_awaddr_axi_mst1", + CFG_PCIE_DBG_AWADDR_AXI_MST1r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x408, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_awaddr_axi_mst1_reg, + NULL, + NULL, + }, + { + "dbg_awlen_axi_mst", + CFG_PCIE_DBG_AWLEN_AXI_MSTr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x40c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_awlen_axi_mst_reg, + NULL, + NULL, + }, + { + "dbg_awsize_axi_mst", + CFG_PCIE_DBG_AWSIZE_AXI_MSTr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x410, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_awsize_axi_mst_reg, + NULL, + NULL, + }, + { + "dbg_awburst_axi_mst", + CFG_PCIE_DBG_AWBURST_AXI_MSTr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x414, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_awburst_axi_mst_reg, + NULL, + NULL, + }, + { + "dbg_awlock_axi_mst", + CFG_PCIE_DBG_AWLOCK_AXI_MSTr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x418, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_awlock_axi_mst_reg, + NULL, + NULL, + }, + { + "dbg_awcache_axi_mst", + CFG_PCIE_DBG_AWCACHE_AXI_MSTr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x41c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_awcache_axi_mst_reg, + NULL, + NULL, + }, + { + "dbg_awprot_axi_mst", + CFG_PCIE_DBG_AWPROT_AXI_MSTr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x420, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_awprot_axi_mst_reg, + NULL, + NULL, + }, + { + "dbg_wid_axi_mst", + CFG_PCIE_DBG_WID_AXI_MSTr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x424, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_wid_axi_mst_reg, + NULL, + NULL, + }, + { + "dbg_wdata_axi_mst0", + CFG_PCIE_DBG_WDATA_AXI_MST0r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x428, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_wdata_axi_mst0_reg, + NULL, + NULL, + }, + { + "dbg_wdata_axi_mst1", + CFG_PCIE_DBG_WDATA_AXI_MST1r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x42c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_wdata_axi_mst1_reg, + NULL, + NULL, + }, + { + "dbg_wdata_axi_mst2", + CFG_PCIE_DBG_WDATA_AXI_MST2r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x430, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_wdata_axi_mst2_reg, + NULL, + NULL, + }, + { + "dbg_wdata_axi_mst3", + CFG_PCIE_DBG_WDATA_AXI_MST3r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x434, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_wdata_axi_mst3_reg, + NULL, + NULL, + }, + { + "dbg_wstrb_axi_mst", + CFG_PCIE_DBG_WSTRB_AXI_MSTr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x438, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_wstrb_axi_mst_reg, + NULL, + NULL, + }, + { + "dbg_wlast_axi_mst", + CFG_PCIE_DBG_WLAST_AXI_MSTr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x43c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_wlast_axi_mst_reg, + NULL, + NULL, + }, + { + "dbg_arid_axi_mst", + CFG_PCIE_DBG_ARID_AXI_MSTr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x440, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_arid_axi_mst_reg, + NULL, + NULL, + }, + { + "dbg_araddr_axi_mst0", + CFG_PCIE_DBG_ARADDR_AXI_MST0r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x444, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_araddr_axi_mst0_reg, + NULL, + NULL, + }, + { + "dbg_araddr_axi_mst1", + CFG_PCIE_DBG_ARADDR_AXI_MST1r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x448, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_araddr_axi_mst1_reg, + NULL, + NULL, + }, + { + "dbg_arlen_axi_mst", + CFG_PCIE_DBG_ARLEN_AXI_MSTr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x44c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_arlen_axi_mst_reg, + NULL, + NULL, + }, + { + "dbg_arsize_axi_mst", + CFG_PCIE_DBG_ARSIZE_AXI_MSTr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x450, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_arsize_axi_mst_reg, + NULL, + NULL, + }, + { + "dbg_arburst_axi_mst", + CFG_PCIE_DBG_ARBURST_AXI_MSTr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x454, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_arburst_axi_mst_reg, + NULL, + NULL, + }, + { + "dbg_arlock_axi_mst", + CFG_PCIE_DBG_ARLOCK_AXI_MSTr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x458, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_arlock_axi_mst_reg, + NULL, + NULL, + }, + { + "dbg_arcache_axi_mst", + CFG_PCIE_DBG_ARCACHE_AXI_MSTr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x45c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_arcache_axi_mst_reg, + NULL, + NULL, + }, + { + "dbg_arprot_axi_mst", + CFG_PCIE_DBG_ARPROT_AXI_MSTr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x460, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_arprot_axi_mst_reg, + NULL, + NULL, + }, + { + "dbg_rdata_axi_mst0", + CFG_PCIE_DBG_RDATA_AXI_MST0r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x464, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_rdata_axi_mst0_reg, + NULL, + NULL, + }, + { + "dbg_rdata_axi_mst1", + CFG_PCIE_DBG_RDATA_AXI_MST1r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x468, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_rdata_axi_mst1_reg, + NULL, + NULL, + }, + { + "dbg_rdata_axi_mst2", + CFG_PCIE_DBG_RDATA_AXI_MST2r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x46c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_rdata_axi_mst2_reg, + NULL, + NULL, + }, + { + "dbg_rdata_axi_mst3", + CFG_PCIE_DBG_RDATA_AXI_MST3r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x470, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_rdata_axi_mst3_reg, + NULL, + NULL, + }, + { + "axi_mst_state", + CFG_PCIE_AXI_MST_STATEr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x474, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_axi_mst_state_reg, + NULL, + NULL, + }, + { + "axi_cfg_state", + CFG_PCIE_AXI_CFG_STATEr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x478, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_axi_cfg_state_reg, + NULL, + NULL, + }, + { + "axi_slv_rd_state", + CFG_PCIE_AXI_SLV_RD_STATEr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x47c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_axi_slv_rd_state_reg, + NULL, + NULL, + }, + { + "axi_slv_wr_state", + CFG_PCIE_AXI_SLV_WR_STATEr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x480, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_axi_slv_wr_state_reg, + NULL, + NULL, + }, + { + "axim_delay_en", + CFG_PCIE_AXIM_DELAY_ENr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x484, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_axim_delay_en_reg, + NULL, + NULL, + }, + { + "axim_delay", + CFG_PCIE_AXIM_DELAYr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x488, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_axim_delay_reg, + NULL, + NULL, + }, + { + "axim_speed_wr", + CFG_PCIE_AXIM_SPEED_WRr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x48c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_axim_speed_wr_reg, + NULL, + NULL, + }, + { + "axim_speed_rd", + CFG_PCIE_AXIM_SPEED_RDr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x490, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_axim_speed_rd_reg, + NULL, + NULL, + }, + { + "dbg_awaddr_axi_slv0", + CFG_PCIE_DBG_AWADDR_AXI_SLV0r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x4c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_awaddr_axi_slv0_reg, + NULL, + NULL, + }, + { + "dbg_awaddr_axi_slv1", + CFG_PCIE_DBG_AWADDR_AXI_SLV1r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x4c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_awaddr_axi_slv1_reg, + NULL, + NULL, + }, + { + "dbg0_wdata_axi_slv0", + CFG_PCIE_DBG0_WDATA_AXI_SLV0r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x4c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg0_wdata_axi_slv0_reg, + NULL, + NULL, + }, + { + "dbg0_wdata_axi_slv1", + CFG_PCIE_DBG0_WDATA_AXI_SLV1r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x4cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg0_wdata_axi_slv1_reg, + NULL, + NULL, + }, + { + "dbg0_wdata_axi_slv2", + CFG_PCIE_DBG0_WDATA_AXI_SLV2r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x4d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg0_wdata_axi_slv2_reg, + NULL, + NULL, + }, + { + "dbg0_wdata_axi_slv3", + CFG_PCIE_DBG0_WDATA_AXI_SLV3r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x4d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg0_wdata_axi_slv3_reg, + NULL, + NULL, + }, + { + "dbg1_wdata_axi_slv0", + CFG_PCIE_DBG1_WDATA_AXI_SLV0r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x4d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg1_wdata_axi_slv0_reg, + NULL, + NULL, + }, + { + "dbg1_wdata_axi_slv1", + CFG_PCIE_DBG1_WDATA_AXI_SLV1r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x4dc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg1_wdata_axi_slv1_reg, + NULL, + NULL, + }, + { + "dbg1_wdata_axi_slv2", + CFG_PCIE_DBG1_WDATA_AXI_SLV2r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x4e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg1_wdata_axi_slv2_reg, + NULL, + NULL, + }, + { + "dbg1_wdata_axi_slv3", + CFG_PCIE_DBG1_WDATA_AXI_SLV3r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x4e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg1_wdata_axi_slv3_reg, + NULL, + NULL, + }, + { + "dbg2_wdata_axi_slv0", + CFG_PCIE_DBG2_WDATA_AXI_SLV0r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x4e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg2_wdata_axi_slv0_reg, + NULL, + NULL, + }, + { + "dbg2_wdata_axi_slv1", + CFG_PCIE_DBG2_WDATA_AXI_SLV1r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x4ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg2_wdata_axi_slv1_reg, + NULL, + NULL, + }, + { + "dbg2_wdata_axi_slv2", + CFG_PCIE_DBG2_WDATA_AXI_SLV2r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x4f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg2_wdata_axi_slv2_reg, + NULL, + NULL, + }, + { + "dbg2_wdata_axi_slv3", + CFG_PCIE_DBG2_WDATA_AXI_SLV3r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x4f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg2_wdata_axi_slv3_reg, + NULL, + NULL, + }, + { + "dbg3_wdata_axi_slv0", + CFG_PCIE_DBG3_WDATA_AXI_SLV0r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x4f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg3_wdata_axi_slv0_reg, + NULL, + NULL, + }, + { + "dbg3_wdata_axi_slv1", + CFG_PCIE_DBG3_WDATA_AXI_SLV1r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x4fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg3_wdata_axi_slv1_reg, + NULL, + NULL, + }, + { + "dbg3_wdata_axi_slv2", + CFG_PCIE_DBG3_WDATA_AXI_SLV2r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x500, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg3_wdata_axi_slv2_reg, + NULL, + NULL, + }, + { + "dbg3_wdata_axi_slv3", + CFG_PCIE_DBG3_WDATA_AXI_SLV3r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x504, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg3_wdata_axi_slv3_reg, + NULL, + NULL, + }, + { + "dbg4_wdata_axi_slv0", + CFG_PCIE_DBG4_WDATA_AXI_SLV0r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x508, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg4_wdata_axi_slv0_reg, + NULL, + NULL, + }, + { + "dbg4_wdata_axi_slv1", + CFG_PCIE_DBG4_WDATA_AXI_SLV1r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x50c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg4_wdata_axi_slv1_reg, + NULL, + NULL, + }, + { + "dbg4_wdata_axi_slv2", + CFG_PCIE_DBG4_WDATA_AXI_SLV2r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x510, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg4_wdata_axi_slv2_reg, + NULL, + NULL, + }, + { + "dbg4_wdata_axi_slv3", + CFG_PCIE_DBG4_WDATA_AXI_SLV3r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x514, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg4_wdata_axi_slv3_reg, + NULL, + NULL, + }, + { + "dbg5_wdata_axi_slv0", + CFG_PCIE_DBG5_WDATA_AXI_SLV0r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x518, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg5_wdata_axi_slv0_reg, + NULL, + NULL, + }, + { + "dbg5_wdata_axi_slv1", + CFG_PCIE_DBG5_WDATA_AXI_SLV1r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x51c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg5_wdata_axi_slv1_reg, + NULL, + NULL, + }, + { + "dbg5_wdata_axi_slv2", + CFG_PCIE_DBG5_WDATA_AXI_SLV2r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x520, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg5_wdata_axi_slv2_reg, + NULL, + NULL, + }, + { + "dbg5_wdata_axi_slv3", + CFG_PCIE_DBG5_WDATA_AXI_SLV3r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x524, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg5_wdata_axi_slv3_reg, + NULL, + NULL, + }, + { + "dbg6_wdata_axi_slv0", + CFG_PCIE_DBG6_WDATA_AXI_SLV0r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x528, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg6_wdata_axi_slv0_reg, + NULL, + NULL, + }, + { + "dbg6_wdata_axi_slv1", + CFG_PCIE_DBG6_WDATA_AXI_SLV1r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x52c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg6_wdata_axi_slv1_reg, + NULL, + NULL, + }, + { + "dbg6_wdata_axi_slv2", + CFG_PCIE_DBG6_WDATA_AXI_SLV2r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x530, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg6_wdata_axi_slv2_reg, + NULL, + NULL, + }, + { + "dbg6_wdata_axi_slv3", + CFG_PCIE_DBG6_WDATA_AXI_SLV3r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x534, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg6_wdata_axi_slv3_reg, + NULL, + NULL, + }, + { + "dbg7_wdata_axi_slv0", + CFG_PCIE_DBG7_WDATA_AXI_SLV0r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x538, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg7_wdata_axi_slv0_reg, + NULL, + NULL, + }, + { + "dbg7_wdata_axi_slv1", + CFG_PCIE_DBG7_WDATA_AXI_SLV1r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x53c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg7_wdata_axi_slv1_reg, + NULL, + NULL, + }, + { + "dbg7_wdata_axi_slv2", + CFG_PCIE_DBG7_WDATA_AXI_SLV2r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x540, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg7_wdata_axi_slv2_reg, + NULL, + NULL, + }, + { + "dbg7_wdata_axi_slv3", + CFG_PCIE_DBG7_WDATA_AXI_SLV3r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x544, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg7_wdata_axi_slv3_reg, + NULL, + NULL, + }, + { + "dbg8_wdata_axi_slv0", + CFG_PCIE_DBG8_WDATA_AXI_SLV0r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x548, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg8_wdata_axi_slv0_reg, + NULL, + NULL, + }, + { + "dbg8_wdata_axi_slv1", + CFG_PCIE_DBG8_WDATA_AXI_SLV1r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x54c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg8_wdata_axi_slv1_reg, + NULL, + NULL, + }, + { + "dbg8_wdata_axi_slv2", + CFG_PCIE_DBG8_WDATA_AXI_SLV2r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x550, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg8_wdata_axi_slv2_reg, + NULL, + NULL, + }, + { + "dbg8_wdata_axi_slv3", + CFG_PCIE_DBG8_WDATA_AXI_SLV3r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x554, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg8_wdata_axi_slv3_reg, + NULL, + NULL, + }, + { + "dbg9_wdata_axi_slv0", + CFG_PCIE_DBG9_WDATA_AXI_SLV0r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x558, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg9_wdata_axi_slv0_reg, + NULL, + NULL, + }, + { + "dbg9_wdata_axi_slv1", + CFG_PCIE_DBG9_WDATA_AXI_SLV1r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x55c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg9_wdata_axi_slv1_reg, + NULL, + NULL, + }, + { + "dbg9_wdata_axi_slv2", + CFG_PCIE_DBG9_WDATA_AXI_SLV2r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x560, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg9_wdata_axi_slv2_reg, + NULL, + NULL, + }, + { + "dbg9_wdata_axi_slv3", + CFG_PCIE_DBG9_WDATA_AXI_SLV3r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x564, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg9_wdata_axi_slv3_reg, + NULL, + NULL, + }, + { + "dbg_awlen_axi_slv", + CFG_PCIE_DBG_AWLEN_AXI_SLVr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x568, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_awlen_axi_slv_reg, + NULL, + NULL, + }, + { + "dbg_wlast_axi_slv", + CFG_PCIE_DBG_WLAST_AXI_SLVr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x56c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_wlast_axi_slv_reg, + NULL, + NULL, + }, + { + "dbg_araddr_axi_slv0", + CFG_PCIE_DBG_ARADDR_AXI_SLV0r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x580, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_araddr_axi_slv0_reg, + NULL, + NULL, + }, + { + "dbg_araddr_axi_slv1", + CFG_PCIE_DBG_ARADDR_AXI_SLV1r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x584, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_araddr_axi_slv1_reg, + NULL, + NULL, + }, + { + "dbg0_rdata_axi_slv0", + CFG_PCIE_DBG0_RDATA_AXI_SLV0r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x588, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg0_rdata_axi_slv0_reg, + NULL, + NULL, + }, + { + "dbg0_rdata_axi_slv1", + CFG_PCIE_DBG0_RDATA_AXI_SLV1r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x58c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg0_rdata_axi_slv1_reg, + NULL, + NULL, + }, + { + "dbg0_rdata_axi_slv2", + CFG_PCIE_DBG0_RDATA_AXI_SLV2r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x590, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg0_rdata_axi_slv2_reg, + NULL, + NULL, + }, + { + "dbg0_rdata_axi_slv3", + CFG_PCIE_DBG0_RDATA_AXI_SLV3r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x594, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg0_rdata_axi_slv3_reg, + NULL, + NULL, + }, + { + "dbg1_rdata_axi_slv0", + CFG_PCIE_DBG1_RDATA_AXI_SLV0r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x598, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg1_rdata_axi_slv0_reg, + NULL, + NULL, + }, + { + "dbg1_rdata_axi_slv1", + CFG_PCIE_DBG1_RDATA_AXI_SLV1r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x59c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg1_rdata_axi_slv1_reg, + NULL, + NULL, + }, + { + "dbg1_rdata_axi_slv2", + CFG_PCIE_DBG1_RDATA_AXI_SLV2r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x5a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg1_rdata_axi_slv2_reg, + NULL, + NULL, + }, + { + "dbg1_rdata_axi_slv3", + CFG_PCIE_DBG1_RDATA_AXI_SLV3r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x5a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg1_rdata_axi_slv3_reg, + NULL, + NULL, + }, + { + "dbg_rlast_axi_slv", + CFG_PCIE_DBG_RLAST_AXI_SLVr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_PCIE_BASE_ADDR + 0x5a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_pcie_dbg_rlast_axi_slv_reg, + NULL, + NULL, + }, + { + "dma_enable", + CFG_DMA_DMA_ENABLEr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_enable_reg, + NULL, + NULL, + }, + { + "up_req", + CFG_DMA_UP_REQr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x004, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_up_req_reg, + NULL, + NULL, + }, + { + "dma_up_current_state", + CFG_DMA_DMA_UP_CURRENT_STATEr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x010, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_up_current_state_reg, + NULL, + NULL, + }, + { + "dma_up_req_ack", + CFG_DMA_DMA_UP_REQ_ACKr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x00c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_up_req_ack_reg, + NULL, + NULL, + }, + { + "dma_done_latch", + CFG_DMA_DMA_DONE_LATCHr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x014, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_done_latch_reg, + NULL, + NULL, + }, + { + "dma_up_cpu_addr_low32", + CFG_DMA_DMA_UP_CPU_ADDR_LOW32r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x018, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_up_cpu_addr_low32_reg, + NULL, + NULL, + }, + { + "dma_up_cpu_addr_high32", + CFG_DMA_DMA_UP_CPU_ADDR_HIGH32r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x01c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_up_cpu_addr_high32_reg, + NULL, + NULL, + }, + { + "dma_up_se_addr", + CFG_DMA_DMA_UP_SE_ADDRr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x024, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_up_se_addr_reg, + NULL, + NULL, + }, + { + "dma_done_int", + CFG_DMA_DMA_DONE_INTr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x034, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_done_int_reg, + NULL, + NULL, + }, + { + "sp_cfg", + CFG_DMA_SP_CFGr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x038, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_sp_cfg_reg, + NULL, + NULL, + }, + { + "dma_ing", + CFG_DMA_DMA_INGr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x03c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_ing_reg, + NULL, + NULL, + }, + { + "rd_timeout_thresh", + CFG_DMA_RD_TIMEOUT_THRESHr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x040, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_rd_timeout_thresh_reg, + NULL, + NULL, + }, + { + "dma_tab_sta_up_fifo_gap", + CFG_DMA_DMA_TAB_STA_UP_FIFO_GAPr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x044, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_tab_sta_up_fifo_gap_reg, + NULL, + NULL, + }, + { + "cfg_mac_tim", + CFG_DMA_CFG_MAC_TIMr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x048, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_cfg_mac_tim_reg, + NULL, + NULL, + }, + { + "cfg_mac_num", + CFG_DMA_CFG_MAC_NUMr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x04c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_cfg_mac_num_reg, + NULL, + NULL, + }, + { + "init_bd_addr", + CFG_DMA_INIT_BD_ADDRr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x050, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_init_bd_addr_reg, + NULL, + NULL, + }, + { + "mac_up_bd_addr1_low32", + CFG_DMA_MAC_UP_BD_ADDR1_LOW32r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x054, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_mac_up_bd_addr1_low32_reg, + NULL, + NULL, + }, + { + "mac_up_bd_addr1_high32", + CFG_DMA_MAC_UP_BD_ADDR1_HIGH32r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x058, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_mac_up_bd_addr1_high32_reg, + NULL, + NULL, + }, + { + "mac_up_bd_addr2_low32", + CFG_DMA_MAC_UP_BD_ADDR2_LOW32r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x05c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_mac_up_bd_addr2_low32_reg, + NULL, + NULL, + }, + { + "mac_up_bd_addr2_high32", + CFG_DMA_MAC_UP_BD_ADDR2_HIGH32r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x060, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_mac_up_bd_addr2_high32_reg, + NULL, + NULL, + }, + { + "cfg_mac_max_num", + CFG_DMA_CFG_MAC_MAX_NUMr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x064, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_cfg_mac_max_num_reg, + NULL, + NULL, + }, + { + "dma_wbuf_ff_empty", + CFG_DMA_DMA_WBUF_FF_EMPTYr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x068, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_wbuf_ff_empty_reg, + NULL, + NULL, + }, + { + "dma_wbuf_state", + CFG_DMA_DMA_WBUF_STATEr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x06c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_wbuf_state_reg, + NULL, + NULL, + }, + { + "dma_mac_bd_addr_low32", + CFG_DMA_DMA_MAC_BD_ADDR_LOW32r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x070, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_mac_bd_addr_low32_reg, + NULL, + NULL, + }, + { + "dma_mac_bd_addr_high32", + CFG_DMA_DMA_MAC_BD_ADDR_HIGH32r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x074, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_mac_bd_addr_high32_reg, + NULL, + NULL, + }, + { + "mac_up_enable", + CFG_DMA_MAC_UP_ENABLEr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x078, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_mac_up_enable_reg, + NULL, + NULL, + }, + { + "mac_endian", + CFG_DMA_MAC_ENDIANr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x07c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_mac_endian_reg, + NULL, + NULL, + }, + { + "up_endian", + CFG_DMA_UP_ENDIANr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x080, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_up_endian_reg, + NULL, + NULL, + }, + { + "dma_up_rd_cnt_latch", + CFG_DMA_DMA_UP_RD_CNT_LATCHr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x084, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_up_rd_cnt_latch_reg, + NULL, + NULL, + }, + { + "dma_up_rcv_cnt_latch", + CFG_DMA_DMA_UP_RCV_CNT_LATCHr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x088, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_up_rcv_cnt_latch_reg, + NULL, + NULL, + }, + { + "dma_up_cnt_latch", + CFG_DMA_DMA_UP_CNT_LATCHr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x08c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_up_cnt_latch_reg, + NULL, + NULL, + }, + { + "cpu_rd_bd_pulse", + CFG_DMA_CPU_RD_BD_PULSEr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x090, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_cpu_rd_bd_pulse_reg, + NULL, + NULL, + }, + { + "cpu_bd_threshold", + CFG_DMA_CPU_BD_THRESHOLDr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x094, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_cpu_bd_threshold_reg, + NULL, + NULL, + }, + { + "cpu_bd_used_cnt", + CFG_DMA_CPU_BD_USED_CNTr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x098, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_cpu_bd_used_cnt_reg, + NULL, + NULL, + }, + { + "dma_up_rcv_status", + CFG_DMA_DMA_UP_RCV_STATUSr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x09c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_up_rcv_status_reg, + NULL, + NULL, + }, + { + "slv_rid_err_en", + CFG_DMA_SLV_RID_ERR_ENr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x0a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_slv_rid_err_en_reg, + NULL, + NULL, + }, + { + "slv_rresp_err_en", + CFG_DMA_SLV_RRESP_ERR_ENr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x0a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_slv_rresp_err_en_reg, + NULL, + NULL, + }, + { + "se_rdbk_ff_full", + CFG_DMA_SE_RDBK_FF_FULLr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x0a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_se_rdbk_ff_full_reg, + NULL, + NULL, + }, + { + "dma_up_data_count", + CFG_DMA_DMA_UP_DATA_COUNTr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x0ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_up_data_count_reg, + NULL, + NULL, + }, + { + "dma_mwr_fifo_afull_gap", + CFG_DMA_DMA_MWR_FIFO_AFULL_GAPr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0xb0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_mwr_fifo_afull_gap_reg, + NULL, + NULL, + }, + { + "dma_info_fifo_afull_gap", + CFG_DMA_DMA_INFO_FIFO_AFULL_GAPr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0xb4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_info_fifo_afull_gap_reg, + NULL, + NULL, + }, + { + "dma_rd_timeout_set", + CFG_DMA_DMA_RD_TIMEOUT_SETr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x0c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_rd_timeout_set_reg, + NULL, + NULL, + }, + { + "dma_bd_dat_err_en", + CFG_DMA_DMA_BD_DAT_ERR_ENr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x0c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_bd_dat_err_en_reg, + NULL, + NULL, + }, + { + "dma_repeat_cnt", + CFG_DMA_DMA_REPEAT_CNTr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x0c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_repeat_cnt_reg, + NULL, + NULL, + }, + { + "dma_rd_timeout_en", + CFG_DMA_DMA_RD_TIMEOUT_ENr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x0cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_rd_timeout_en_reg, + NULL, + NULL, + }, + { + "dma_repeat_read", + CFG_DMA_DMA_REPEAT_READr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x0d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_repeat_read_reg, + NULL, + NULL, + }, + { + "dma_repeat_read_en", + CFG_DMA_DMA_REPEAT_READ_ENr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x0d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_repeat_read_en_reg, + NULL, + NULL, + }, + { + "bd_ctl_state", + CFG_DMA_BD_CTL_STATEr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x0d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_bd_ctl_state_reg, + NULL, + NULL, + }, + { + "dma_done_int_cnt_wr", + CFG_DMA_DMA_DONE_INT_CNT_WRr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x0dc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_done_int_cnt_wr_reg, + NULL, + NULL, + }, + { + "dma_done_int_cnt_mac", + CFG_DMA_DMA_DONE_INT_CNT_MACr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x0e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_done_int_cnt_mac_reg, + NULL, + NULL, + }, + { + "current_mac_num", + CFG_DMA_CURRENT_MAC_NUMr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x0f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_current_mac_num_reg, + NULL, + NULL, + }, + { + "cfg_mac_afifo_afull", + CFG_DMA_CFG_MAC_AFIFO_AFULLr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x0f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_cfg_mac_afifo_afull_reg, + NULL, + NULL, + }, + { + "dma_mac_ff_full", + CFG_DMA_DMA_MAC_FF_FULLr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x0fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_dma_dma_mac_ff_full_reg, + NULL, + NULL, + }, + { + "user_axi_mst", + CFG_DMA_USER_AXI_MSTr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_DMA_BASE_ADDR + 0x0100, + (32 / 8), + 0, + 0, + 0, + 0, + 5, + g_cfg_dma_user_axi_mst_reg, + NULL, + NULL, + }, + { + "sbus_state", + CFG_CSR_SBUS_STATEr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_sbus_state_reg, + NULL, + NULL, + }, + { + "mst_debug_en", + CFG_CSR_MST_DEBUG_ENr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x020, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_en_reg, + NULL, + NULL, + }, + { + "sbus_command_sel", + CFG_CSR_SBUS_COMMAND_SELr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x024, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_sbus_command_sel_reg, + NULL, + NULL, + }, + { + "soc_rd_time_out_thresh", + CFG_CSR_SOC_RD_TIME_OUT_THRESHr, + CFG, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x02c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_soc_rd_time_out_thresh_reg, + NULL, + NULL, + }, + { + "big_little_byte_order", + CFG_CSR_BIG_LITTLE_BYTE_ORDERr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x030, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_big_little_byte_order_reg, + NULL, + NULL, + }, + { + "ecc_bypass_read", + CFG_CSR_ECC_BYPASS_READr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x034, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_ecc_bypass_read_reg, + NULL, + NULL, + }, + { + "ahb_async_wr_fifo_afull_gap", + CFG_CSR_AHB_ASYNC_WR_FIFO_AFULL_GAPr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x038, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_ahb_async_wr_fifo_afull_gap_reg, + NULL, + NULL, + }, + { + "ahb_async_rd_fifo_afull_gap", + CFG_CSR_AHB_ASYNC_RD_FIFO_AFULL_GAPr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x03c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_ahb_async_rd_fifo_afull_gap_reg, + NULL, + NULL, + }, + { + "ahb_async_cpl_fifo_afull_gap", + CFG_CSR_AHB_ASYNC_CPL_FIFO_AFULL_GAPr, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x040, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_ahb_async_cpl_fifo_afull_gap_reg, + NULL, + NULL, + }, + { + "mst_debug_data0_high26", + CFG_CSR_MST_DEBUG_DATA0_HIGH26r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x080, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data0_high26_reg, + NULL, + NULL, + }, + { + "mst_debug_data0_low32", + CFG_CSR_MST_DEBUG_DATA0_LOW32r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x084, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data0_low32_reg, + NULL, + NULL, + }, + { + "mst_debug_data1_high26", + CFG_CSR_MST_DEBUG_DATA1_HIGH26r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x088, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data1_high26_reg, + NULL, + NULL, + }, + { + "mst_debug_data1_low32", + CFG_CSR_MST_DEBUG_DATA1_LOW32r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x08c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data1_low32_reg, + NULL, + NULL, + }, + { + "mst_debug_data2_high26", + CFG_CSR_MST_DEBUG_DATA2_HIGH26r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x090, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data2_high26_reg, + NULL, + NULL, + }, + { + "mst_debug_data2_low32", + CFG_CSR_MST_DEBUG_DATA2_LOW32r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x094, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data2_low32_reg, + NULL, + NULL, + }, + { + "mst_debug_data3_high26", + CFG_CSR_MST_DEBUG_DATA3_HIGH26r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x098, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data3_high26_reg, + NULL, + NULL, + }, + { + "mst_debug_data3_low32", + CFG_CSR_MST_DEBUG_DATA3_LOW32r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x09c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data3_low32_reg, + NULL, + NULL, + }, + { + "mst_debug_data4_high26", + CFG_CSR_MST_DEBUG_DATA4_HIGH26r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x0a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data4_high26_reg, + NULL, + NULL, + }, + { + "mst_debug_data4_low32", + CFG_CSR_MST_DEBUG_DATA4_LOW32r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x0a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data4_low32_reg, + NULL, + NULL, + }, + { + "mst_debug_data5_high26", + CFG_CSR_MST_DEBUG_DATA5_HIGH26r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x0a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data5_high26_reg, + NULL, + NULL, + }, + { + "mst_debug_data5_low32", + CFG_CSR_MST_DEBUG_DATA5_LOW32r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x0ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data5_low32_reg, + NULL, + NULL, + }, + { + "mst_debug_data6_high26", + CFG_CSR_MST_DEBUG_DATA6_HIGH26r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x0b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data6_high26_reg, + NULL, + NULL, + }, + { + "mst_debug_data6_low32", + CFG_CSR_MST_DEBUG_DATA6_LOW32r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x0b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data6_low32_reg, + NULL, + NULL, + }, + { + "mst_debug_data7_high26", + CFG_CSR_MST_DEBUG_DATA7_HIGH26r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x0b8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data7_high26_reg, + NULL, + NULL, + }, + { + "mst_debug_data7_low32", + CFG_CSR_MST_DEBUG_DATA7_LOW32r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x0bc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data7_low32_reg, + NULL, + NULL, + }, + { + "mst_debug_data8_high26", + CFG_CSR_MST_DEBUG_DATA8_HIGH26r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x0c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data8_high26_reg, + NULL, + NULL, + }, + { + "mst_debug_data8_low32", + CFG_CSR_MST_DEBUG_DATA8_LOW32r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x0c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data8_low32_reg, + NULL, + NULL, + }, + { + "mst_debug_data9_high26", + CFG_CSR_MST_DEBUG_DATA9_HIGH26r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x0c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data9_high26_reg, + NULL, + NULL, + }, + { + "mst_debug_data9_low32", + CFG_CSR_MST_DEBUG_DATA9_LOW32r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x0cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data9_low32_reg, + NULL, + NULL, + }, + { + "mst_debug_data10_high26", + CFG_CSR_MST_DEBUG_DATA10_HIGH26r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x0d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data10_high26_reg, + NULL, + NULL, + }, + { + "mst_debug_data10_low32", + CFG_CSR_MST_DEBUG_DATA10_LOW32r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x0d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data10_low32_reg, + NULL, + NULL, + }, + { + "mst_debug_data11_high26", + CFG_CSR_MST_DEBUG_DATA11_HIGH26r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x0d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data11_high26_reg, + NULL, + NULL, + }, + { + "mst_debug_data11_low32", + CFG_CSR_MST_DEBUG_DATA11_LOW32r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x0dc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data11_low32_reg, + NULL, + NULL, + }, + { + "mst_debug_data12_high26", + CFG_CSR_MST_DEBUG_DATA12_HIGH26r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x0e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data12_high26_reg, + NULL, + NULL, + }, + { + "mst_debug_data12_low32", + CFG_CSR_MST_DEBUG_DATA12_LOW32r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x0e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data12_low32_reg, + NULL, + NULL, + }, + { + "mst_debug_data13_high26", + CFG_CSR_MST_DEBUG_DATA13_HIGH26r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x0e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data13_high26_reg, + NULL, + NULL, + }, + { + "mst_debug_data13_low32", + CFG_CSR_MST_DEBUG_DATA13_LOW32r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x0ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data13_low32_reg, + NULL, + NULL, + }, + { + "mst_debug_data14_high26", + CFG_CSR_MST_DEBUG_DATA14_HIGH26r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x0f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data14_high26_reg, + NULL, + NULL, + }, + { + "mst_debug_data14_low32", + CFG_CSR_MST_DEBUG_DATA14_LOW32r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x0f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data14_low32_reg, + NULL, + NULL, + }, + { + "mst_debug_data15_high26", + CFG_CSR_MST_DEBUG_DATA15_HIGH26r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x0f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data15_high26_reg, + NULL, + NULL, + }, + { + "mst_debug_data15_low32", + CFG_CSR_MST_DEBUG_DATA15_LOW32r, + CFG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_CFG_BASE_ADDR + MODULE_CFG_CSR_BASE_ADDR + 0x0fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_cfg_csr_mst_debug_data15_low32_reg, + NULL, + NULL, + }, + { + "ind_access_states", + NPPU_MR_CFG_IND_ACCESS_STATESr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_ind_access_states_reg, + NULL, + NULL, + }, + { + "ind_access_cmd0", + NPPU_MR_CFG_IND_ACCESS_CMD0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0004, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_nppu_mr_cfg_ind_access_cmd0_reg, + NULL, + NULL, + }, + { + "ind_access_data0", + NPPU_MR_CFG_IND_ACCESS_DATA0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_ind_access_data0_reg, + NULL, + NULL, + }, + { + "ind_access_data1", + NPPU_MR_CFG_IND_ACCESS_DATA1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x000c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_ind_access_data1_reg, + NULL, + NULL, + }, + { + "ind_access_cmd1", + NPPU_MR_CFG_IND_ACCESS_CMD1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0010, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_ind_access_cmd1_reg, + NULL, + NULL, + }, + { + "mr_init_done", + NPPU_MR_CFG_MR_INIT_DONEr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0014, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_init_done_reg, + NULL, + NULL, + }, + { + "cnt_mode_reg", + NPPU_MR_CFG_CNT_MODE_REGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0018, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_nppu_mr_cfg_cnt_mode_reg_reg, + NULL, + NULL, + }, + { + "cfg_ecc_bypass_read", + NPPU_MR_CFG_CFG_ECC_BYPASS_READr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x001c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_cfg_ecc_bypass_read_reg, + NULL, + NULL, + }, + { + "cfg_rep_mod", + NPPU_MR_CFG_CFG_REP_MODr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0020, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_cfg_rep_mod_reg, + NULL, + NULL, + }, + { + "block_ptr_fifo_aful_th", + NPPU_MR_CFG_BLOCK_PTR_FIFO_AFUL_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x002c, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_nppu_mr_cfg_block_ptr_fifo_aful_th_reg, + NULL, + NULL, + }, + { + "pre_rcv_ptr_fifo_aful_th", + NPPU_MR_CFG_PRE_RCV_PTR_FIFO_AFUL_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0030, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_nppu_mr_cfg_pre_rcv_ptr_fifo_aful_th_reg, + NULL, + NULL, + }, + { + "mgid_fifo_aful_th", + NPPU_MR_CFG_MGID_FIFO_AFUL_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0034, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_nppu_mr_cfg_mgid_fifo_aful_th_reg, + NULL, + NULL, + }, + { + "rep_cmd_fifo_aful_th", + NPPU_MR_CFG_REP_CMD_FIFO_AFUL_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0038, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_nppu_mr_cfg_rep_cmd_fifo_aful_th_reg, + NULL, + NULL, + }, + { + "mr_int_mask_1", + NPPU_MR_CFG_MR_INT_MASK_1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x00c0, + (32 / 8), + 0, + 0, + 0, + 0, + 20, + g_nppu_mr_cfg_mr_int_mask_1_reg, + NULL, + NULL, + }, + { + "mr_int_mask_2", + NPPU_MR_CFG_MR_INT_MASK_2r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x00c4, + (32 / 8), + 0, + 0, + 0, + 0, + 20, + g_nppu_mr_cfg_mr_int_mask_2_reg, + NULL, + NULL, + }, + { + "mr_int_mask_3", + NPPU_MR_CFG_MR_INT_MASK_3r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x00c8, + (32 / 8), + 0, + 0, + 0, + 0, + 20, + g_nppu_mr_cfg_mr_int_mask_3_reg, + NULL, + NULL, + }, + { + "mr_int_mask_4", + NPPU_MR_CFG_MR_INT_MASK_4r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x00cc, + (32 / 8), + 0, + 0, + 0, + 0, + 25, + g_nppu_mr_cfg_mr_int_mask_4_reg, + NULL, + NULL, + }, + { + "mr_states_1", + NPPU_MR_CFG_MR_STATES_1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0040, + (32 / 8), + 0, + 0, + 0, + 0, + 20, + g_nppu_mr_cfg_mr_states_1_reg, + NULL, + NULL, + }, + { + "mr_states_2", + NPPU_MR_CFG_MR_STATES_2r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0044, + (32 / 8), + 0, + 0, + 0, + 0, + 20, + g_nppu_mr_cfg_mr_states_2_reg, + NULL, + NULL, + }, + { + "mr_states_3", + NPPU_MR_CFG_MR_STATES_3r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0048, + (32 / 8), + 0, + 0, + 0, + 0, + 20, + g_nppu_mr_cfg_mr_states_3_reg, + NULL, + NULL, + }, + { + "mr_states_4", + NPPU_MR_CFG_MR_STATES_4r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x004c, + (32 / 8), + 0, + 0, + 0, + 0, + 25, + g_nppu_mr_cfg_mr_states_4_reg, + NULL, + NULL, + }, + { + "mr_states_5", + NPPU_MR_CFG_MR_STATES_5r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0050, + (32 / 8), + 0, + 0, + 0, + 0, + 21, + g_nppu_mr_cfg_mr_states_5_reg, + NULL, + NULL, + }, + { + "mr_states_6", + NPPU_MR_CFG_MR_STATES_6r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0054, + (32 / 8), + 0, + 0, + 0, + 0, + 20, + g_nppu_mr_cfg_mr_states_6_reg, + NULL, + NULL, + }, + { + "mr_states_7", + NPPU_MR_CFG_MR_STATES_7r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0058, + (32 / 8), + 0, + 0, + 0, + 0, + 22, + g_nppu_mr_cfg_mr_states_7_reg, + NULL, + NULL, + }, + { + "mr_states_8", + NPPU_MR_CFG_MR_STATES_8r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x005c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_states_8_reg, + NULL, + NULL, + }, + { + "mr_sop_in_cnt", + NPPU_MR_CFG_MR_SOP_IN_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0060, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_sop_in_cnt_reg, + NULL, + NULL, + }, + { + "mr_eop_in_cnt", + NPPU_MR_CFG_MR_EOP_IN_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0064, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_eop_in_cnt_reg, + NULL, + NULL, + }, + { + "mr_sop_out_cnt", + NPPU_MR_CFG_MR_SOP_OUT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0068, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_sop_out_cnt_reg, + NULL, + NULL, + }, + { + "mr_eop_out_cnt", + NPPU_MR_CFG_MR_EOP_OUT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x006c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_eop_out_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos0_in_cnt", + NPPU_MR_CFG_MR_COS0_IN_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0070, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos0_in_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos1_in_cnt", + NPPU_MR_CFG_MR_COS1_IN_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0074, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos1_in_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos2_in_cnt", + NPPU_MR_CFG_MR_COS2_IN_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0078, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos2_in_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos3_in_cnt", + NPPU_MR_CFG_MR_COS3_IN_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x007c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos3_in_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos0_out_cnt", + NPPU_MR_CFG_MR_COS0_OUT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0080, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos0_out_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos1_out_cnt", + NPPU_MR_CFG_MR_COS1_OUT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0084, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos1_out_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos2_out_cnt", + NPPU_MR_CFG_MR_COS2_OUT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0088, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos2_out_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos3_out_cnt", + NPPU_MR_CFG_MR_COS3_OUT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x008c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos3_out_cnt_reg, + NULL, + NULL, + }, + { + "mr_err_in_cnt", + NPPU_MR_CFG_MR_ERR_IN_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0090, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_err_in_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos0_sop_in_cnt", + NPPU_MR_CFG_MR_COS0_SOP_IN_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0140, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos0_sop_in_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos0_eop_in_cnt", + NPPU_MR_CFG_MR_COS0_EOP_IN_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0144, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos0_eop_in_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos1_sop_in_cnt", + NPPU_MR_CFG_MR_COS1_SOP_IN_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0148, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos1_sop_in_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos1_eop_in_cnt", + NPPU_MR_CFG_MR_COS1_EOP_IN_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x014c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos1_eop_in_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos2_sop_in_cnt", + NPPU_MR_CFG_MR_COS2_SOP_IN_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0150, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos2_sop_in_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos2_eop_in_cnt", + NPPU_MR_CFG_MR_COS2_EOP_IN_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0154, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos2_eop_in_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos3_sop_in_cnt", + NPPU_MR_CFG_MR_COS3_SOP_IN_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0158, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos3_sop_in_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos3_eop_in_cnt", + NPPU_MR_CFG_MR_COS3_EOP_IN_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x015c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos3_eop_in_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos0_in_err_cnt", + NPPU_MR_CFG_MR_COS0_IN_ERR_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0160, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos0_in_err_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos1_in_err_cnt", + NPPU_MR_CFG_MR_COS1_IN_ERR_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0164, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos1_in_err_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos2_in_err_cnt", + NPPU_MR_CFG_MR_COS2_IN_ERR_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0168, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos2_in_err_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos3_in_err_cnt", + NPPU_MR_CFG_MR_COS3_IN_ERR_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x016c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos3_in_err_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos0_sop_out_cnt", + NPPU_MR_CFG_MR_COS0_SOP_OUT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0170, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos0_sop_out_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos0_eop_out_cnt", + NPPU_MR_CFG_MR_COS0_EOP_OUT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0174, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos0_eop_out_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos1_sop_out_cnt", + NPPU_MR_CFG_MR_COS1_SOP_OUT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0178, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos1_sop_out_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos1_eop_out_cnt", + NPPU_MR_CFG_MR_COS1_EOP_OUT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x017c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos1_eop_out_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos2_sop_out_cnt", + NPPU_MR_CFG_MR_COS2_SOP_OUT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0180, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos2_sop_out_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos2_eop_out_cnt", + NPPU_MR_CFG_MR_COS2_EOP_OUT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0184, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos2_eop_out_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos3_sop_out_cnt", + NPPU_MR_CFG_MR_COS3_SOP_OUT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0188, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos3_sop_out_cnt_reg, + NULL, + NULL, + }, + { + "mr_cos3_eop_out_cnt", + NPPU_MR_CFG_MR_COS3_EOP_OUT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x018c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_cos3_eop_out_cnt_reg, + NULL, + NULL, + }, + { + "mr_mlt_unvld_cnt", + NPPU_MR_CFG_MR_MLT_UNVLD_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0190, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_mlt_unvld_cnt_reg, + NULL, + NULL, + }, + { + "mr_sop_eop_match_cfg", + NPPU_MR_CFG_MR_SOP_EOP_MATCH_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0100, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_nppu_mr_cfg_mr_sop_eop_match_cfg_reg, + NULL, + NULL, + }, + { + "mr_mlt_unvld_mgid", + NPPU_MR_CFG_MR_MLT_UNVLD_MGIDr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_MR_CFG_BASE_ADDR + 0x0108, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_mr_cfg_mr_mlt_unvld_mgid_reg, + NULL, + NULL, + }, + { + "isch_fifo_th_1", + NPPU_PKTRX_CFG_ISCH_FIFO_TH_1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0078, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_nppu_pktrx_cfg_isch_fifo_th_1_reg, + NULL, + NULL, + }, + { + "isch_fifo_th_2", + NPPU_PKTRX_CFG_ISCH_FIFO_TH_2r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x007c, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_nppu_pktrx_cfg_isch_fifo_th_2_reg, + NULL, + NULL, + }, + { + "isch_fifo_th_3", + NPPU_PKTRX_CFG_ISCH_FIFO_TH_3r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0080, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_nppu_pktrx_cfg_isch_fifo_th_3_reg, + NULL, + NULL, + }, + { + "isch_fifo_th_4", + NPPU_PKTRX_CFG_ISCH_FIFO_TH_4r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0084, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_nppu_pktrx_cfg_isch_fifo_th_4_reg, + NULL, + NULL, + }, + { + "isch_cfg_0", + NPPU_PKTRX_CFG_ISCH_CFG_0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0088, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_isch_cfg_0_reg, + NULL, + NULL, + }, + { + "hdu_ex_tpid_0", + NPPU_PKTRX_CFG_HDU_EX_TPID_0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0090, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_nppu_pktrx_cfg_hdu_ex_tpid_0_reg, + NULL, + NULL, + }, + { + "hdu_ex_tpid_1", + NPPU_PKTRX_CFG_HDU_EX_TPID_1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0094, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_nppu_pktrx_cfg_hdu_ex_tpid_1_reg, + NULL, + NULL, + }, + { + "hdu_int_tpid_0", + NPPU_PKTRX_CFG_HDU_INT_TPID_0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0098, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_nppu_pktrx_cfg_hdu_int_tpid_0_reg, + NULL, + NULL, + }, + { + "hdu_int_tpid_1", + NPPU_PKTRX_CFG_HDU_INT_TPID_1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x009c, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_nppu_pktrx_cfg_hdu_int_tpid_1_reg, + NULL, + NULL, + }, + { + "hdu_hdlc_0", + NPPU_PKTRX_CFG_HDU_HDLC_0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x00a0, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_nppu_pktrx_cfg_hdu_hdlc_0_reg, + NULL, + NULL, + }, + { + "hdu_hdlc_1", + NPPU_PKTRX_CFG_HDU_HDLC_1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x00a4, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_nppu_pktrx_cfg_hdu_hdlc_1_reg, + NULL, + NULL, + }, + { + "hdu_udf_l3type_0", + NPPU_PKTRX_CFG_HDU_UDF_L3TYPE_0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x00a8, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_nppu_pktrx_cfg_hdu_udf_l3type_0_reg, + NULL, + NULL, + }, + { + "hdu_udf_l3type_1", + NPPU_PKTRX_CFG_HDU_UDF_L3TYPE_1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x00ac, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_nppu_pktrx_cfg_hdu_udf_l3type_1_reg, + NULL, + NULL, + }, + { + "hdu_udf_l3type_2", + NPPU_PKTRX_CFG_HDU_UDF_L3TYPE_2r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x00b0, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_nppu_pktrx_cfg_hdu_udf_l3type_2_reg, + NULL, + NULL, + }, + { + "hdu_udf_l3type_3", + NPPU_PKTRX_CFG_HDU_UDF_L3TYPE_3r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x00b4, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_nppu_pktrx_cfg_hdu_udf_l3type_3_reg, + NULL, + NULL, + }, + { + "hdu_udf_l4type_0", + NPPU_PKTRX_CFG_HDU_UDF_L4TYPE_0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x00b8, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_nppu_pktrx_cfg_hdu_udf_l4type_0_reg, + NULL, + NULL, + }, + { + "hdu_udf_l4type_1", + NPPU_PKTRX_CFG_HDU_UDF_L4TYPE_1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x00bc, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_nppu_pktrx_cfg_hdu_udf_l4type_1_reg, + NULL, + NULL, + }, + { + "hdu_udf_l4type_2", + NPPU_PKTRX_CFG_HDU_UDF_L4TYPE_2r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x00c0, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_nppu_pktrx_cfg_hdu_udf_l4type_2_reg, + NULL, + NULL, + }, + { + "slot_no_cfg", + NPPU_PKTRX_CFG_SLOT_NO_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x00d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_slot_no_cfg_reg, + NULL, + NULL, + }, + { + "pktrx_int_en_0", + NPPU_PKTRX_CFG_PKTRX_INT_EN_0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0114, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_nppu_pktrx_cfg_pktrx_int_en_0_reg, + NULL, + NULL, + }, + { + "pktrx_int_en_1", + NPPU_PKTRX_CFG_PKTRX_INT_EN_1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0118, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_nppu_pktrx_cfg_pktrx_int_en_1_reg, + NULL, + NULL, + }, + { + "pktrx_int_mask_0", + NPPU_PKTRX_CFG_PKTRX_INT_MASK_0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x011c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_nppu_pktrx_cfg_pktrx_int_mask_0_reg, + NULL, + NULL, + }, + { + "pktrx_int_mask_1", + NPPU_PKTRX_CFG_PKTRX_INT_MASK_1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0120, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_nppu_pktrx_cfg_pktrx_int_mask_1_reg, + NULL, + NULL, + }, + { + "pktrx_int_status", + NPPU_PKTRX_CFG_PKTRX_INT_STATUSr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x0124, + (32 / 8), + 0, + 35 + 1, + 0, + 4, + 1, + g_nppu_pktrx_cfg_pktrx_int_status_reg, + NULL, + NULL, + }, + { + "pktrx_port_rdy0", + NPPU_PKTRX_CFG_PKTRX_PORT_RDY0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x01b4, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_nppu_pktrx_cfg_pktrx_port_rdy0_reg, + NULL, + NULL, + }, + { + "pktrx_lif0_pfc_rdy0", + NPPU_PKTRX_CFG_PKTRX_LIF0_PFC_RDY0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x01bc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_pktrx_lif0_pfc_rdy0_reg, + NULL, + NULL, + }, + { + "pktrx_lif0_pfc_rdy1", + NPPU_PKTRX_CFG_PKTRX_LIF0_PFC_RDY1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x01c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_pktrx_lif0_pfc_rdy1_reg, + NULL, + NULL, + }, + { + "pktrx_lif0_pfc_rdy2", + NPPU_PKTRX_CFG_PKTRX_LIF0_PFC_RDY2r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x01c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_pktrx_lif0_pfc_rdy2_reg, + NULL, + NULL, + }, + { + "pktrx_lif0_pfc_rdy3", + NPPU_PKTRX_CFG_PKTRX_LIF0_PFC_RDY3r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x01c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_pktrx_lif0_pfc_rdy3_reg, + NULL, + NULL, + }, + { + "pktrx_lif0_pfc_rdy4", + NPPU_PKTRX_CFG_PKTRX_LIF0_PFC_RDY4r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x01cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_pktrx_lif0_pfc_rdy4_reg, + NULL, + NULL, + }, + { + "pktrx_lif0_pfc_rdy5", + NPPU_PKTRX_CFG_PKTRX_LIF0_PFC_RDY5r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x01d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_pktrx_lif0_pfc_rdy5_reg, + NULL, + NULL, + }, + { + "pktrx_lif0_pfc_rdy6", + NPPU_PKTRX_CFG_PKTRX_LIF0_PFC_RDY6r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x01d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_pktrx_lif0_pfc_rdy6_reg, + NULL, + NULL, + }, + { + "cfg_port_l2_offset_mode", + NPPU_PKTRX_CFG_CFG_PORT_L2_OFFSET_MODEr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PKTRX_CFG_BASE_ADDR + 0x01d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pktrx_cfg_cfg_port_l2_offset_mode_reg, + NULL, + NULL, + }, + { + "int_ram_en", + NPPU_IDMA_CFG_INT_RAM_ENr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_IDMA_CFG_BASE_ADDR + 0x000, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_nppu_idma_cfg_int_ram_en_reg, + NULL, + NULL, + }, + { + "int_ram_mask", + NPPU_IDMA_CFG_INT_RAM_MASKr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_IDMA_CFG_BASE_ADDR + 0x040, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_nppu_idma_cfg_int_ram_mask_reg, + NULL, + NULL, + }, + { + "int_ram_status", + NPPU_IDMA_CFG_INT_RAM_STATUSr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_IDMA_CFG_BASE_ADDR + 0x080, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_nppu_idma_cfg_int_ram_status_reg, + NULL, + NULL, + }, + { + "subsys_int_mask_flag", + NPPU_IDMA_CFG_SUBSYS_INT_MASK_FLAGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_IDMA_CFG_BASE_ADDR + 0x180, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_idma_cfg_subsys_int_mask_flag_reg, + NULL, + NULL, + }, + { + "subsys_int_unmask_flag", + NPPU_IDMA_CFG_SUBSYS_INT_UNMASK_FLAGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_IDMA_CFG_BASE_ADDR + 0x184, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_idma_cfg_subsys_int_unmask_flag_reg, + NULL, + NULL, + }, + { + "debug_cnt_rdclr_mode", + NPPU_IDMA_CFG_DEBUG_CNT_RDCLR_MODEr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_IDMA_CFG_BASE_ADDR + 0x4f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_idma_cfg_debug_cnt_rdclr_mode_reg, + NULL, + NULL, + }, + { + "int_ram_en0", + NPPU_PBU_CFG_INT_RAM_EN0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x000, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_nppu_pbu_cfg_int_ram_en0_reg, + NULL, + NULL, + }, + { + "int_ram_mask0", + NPPU_PBU_CFG_INT_RAM_MASK0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x040, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_nppu_pbu_cfg_int_ram_mask0_reg, + NULL, + NULL, + }, + { + "int_ram_status0", + NPPU_PBU_CFG_INT_RAM_STATUS0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x080, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_nppu_pbu_cfg_int_ram_status0_reg, + NULL, + NULL, + }, + { + "int_fifo_en0", + NPPU_PBU_CFG_INT_FIFO_EN0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x0c0, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_nppu_pbu_cfg_int_fifo_en0_reg, + NULL, + NULL, + }, + { + "int_fifo_en1", + NPPU_PBU_CFG_INT_FIFO_EN1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x0c4, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_nppu_pbu_cfg_int_fifo_en1_reg, + NULL, + NULL, + }, + { + "int_fifo_mask0", + NPPU_PBU_CFG_INT_FIFO_MASK0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x100, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_nppu_pbu_cfg_int_fifo_mask0_reg, + NULL, + NULL, + }, + { + "int_fifo_mask1", + NPPU_PBU_CFG_INT_FIFO_MASK1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x104, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_nppu_pbu_cfg_int_fifo_mask1_reg, + NULL, + NULL, + }, + { + "int_fifo_status0", + NPPU_PBU_CFG_INT_FIFO_STATUS0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x140, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_nppu_pbu_cfg_int_fifo_status0_reg, + NULL, + NULL, + }, + { + "int_fifo_status1", + NPPU_PBU_CFG_INT_FIFO_STATUS1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x144, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_nppu_pbu_cfg_int_fifo_status1_reg, + NULL, + NULL, + }, + { + "subsys_int_mask_flag", + NPPU_PBU_CFG_SUBSYS_INT_MASK_FLAGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x180, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_subsys_int_mask_flag_reg, + NULL, + NULL, + }, + { + "subsys_int_unmask_flag", + NPPU_PBU_CFG_SUBSYS_INT_UNMASK_FLAGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x184, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_subsys_int_unmask_flag_reg, + NULL, + NULL, + }, + { + "sa_ip_en", + NPPU_PBU_CFG_SA_IP_ENr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x4ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_sa_ip_en_reg, + NULL, + NULL, + }, + { + "debug_cnt_rdclr_mode", + NPPU_PBU_CFG_DEBUG_CNT_RDCLR_MODEr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x4f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_debug_cnt_rdclr_mode_reg, + NULL, + NULL, + }, + { + "fptr_fifo_aful_assert_cfg", + NPPU_PBU_CFG_FPTR_FIFO_AFUL_ASSERT_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x50c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_fptr_fifo_aful_assert_cfg_reg, + NULL, + NULL, + }, + { + "fptr_fifo_aful_negate_cfg", + NPPU_PBU_CFG_FPTR_FIFO_AFUL_NEGATE_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x510, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_fptr_fifo_aful_negate_cfg_reg, + NULL, + NULL, + }, + { + "pf_fifo_aful_assert_cfg", + NPPU_PBU_CFG_PF_FIFO_AFUL_ASSERT_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x514, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_pf_fifo_aful_assert_cfg_reg, + NULL, + NULL, + }, + { + "pf_fifo_aful_negate_cfg", + NPPU_PBU_CFG_PF_FIFO_AFUL_NEGATE_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x518, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_pf_fifo_aful_negate_cfg_reg, + NULL, + NULL, + }, + { + "pf_fifo_aept_assert_cfg", + NPPU_PBU_CFG_PF_FIFO_AEPT_ASSERT_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x51c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_pf_fifo_aept_assert_cfg_reg, + NULL, + NULL, + }, + { + "pf_fifo_aept_negate_cfg", + NPPU_PBU_CFG_PF_FIFO_AEPT_NEGATE_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x520, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_pf_fifo_aept_negate_cfg_reg, + NULL, + NULL, + }, + { + "wb_aful_assert_cfg", + NPPU_PBU_CFG_WB_AFUL_ASSERT_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x524, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_wb_aful_assert_cfg_reg, + NULL, + NULL, + }, + { + "wb_aful_negate_cfg", + NPPU_PBU_CFG_WB_AFUL_NEGATE_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x528, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_wb_aful_negate_cfg_reg, + NULL, + NULL, + }, + { + "se_key_aful_assert_cfg", + NPPU_PBU_CFG_SE_KEY_AFUL_ASSERT_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x52c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_se_key_aful_assert_cfg_reg, + NULL, + NULL, + }, + { + "ifbrd_se_aful_assert_cfg", + NPPU_PBU_CFG_IFBRD_SE_AFUL_ASSERT_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x534, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_ifbrd_se_aful_assert_cfg_reg, + NULL, + NULL, + }, + { + "ifbrd_se_aful_negate_cfg", + NPPU_PBU_CFG_IFBRD_SE_AFUL_NEGATE_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x538, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_ifbrd_se_aful_negate_cfg_reg, + NULL, + NULL, + }, + { + "ifbrd_odma_aful_assert_cfg", + NPPU_PBU_CFG_IFBRD_ODMA_AFUL_ASSERT_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x53c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_ifbrd_odma_aful_assert_cfg_reg, + NULL, + NULL, + }, + { + "ifbrd_odma_aful_negate_cfg", + NPPU_PBU_CFG_IFBRD_ODMA_AFUL_NEGATE_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x540, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_ifbrd_odma_aful_negate_cfg_reg, + NULL, + NULL, + }, + { + "ifbrd_ppu_aful_assert_cfg", + NPPU_PBU_CFG_IFBRD_PPU_AFUL_ASSERT_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x544, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_ifbrd_ppu_aful_assert_cfg_reg, + NULL, + NULL, + }, + { + "ifbrd_ppu_aful_negate_cfg", + NPPU_PBU_CFG_IFBRD_PPU_AFUL_NEGATE_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x548, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_ifbrd_ppu_aful_negate_cfg_reg, + NULL, + NULL, + }, + { + "mc_logic_aful_assert_cfg", + NPPU_PBU_CFG_MC_LOGIC_AFUL_ASSERT_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x54c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_mc_logic_aful_assert_cfg_reg, + NULL, + NULL, + }, + { + "mc_logic_aful_negate_cfg", + NPPU_PBU_CFG_MC_LOGIC_AFUL_NEGATE_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x550, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_mc_logic_aful_negate_cfg_reg, + NULL, + NULL, + }, + { + "mc_logic_diff", + NPPU_PBU_CFG_MC_LOGIC_DIFFr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x5e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_mc_logic_diff_reg, + NULL, + NULL, + }, + { + "cfg_peak_port_cnt_clr", + NPPU_PBU_CFG_CFG_PEAK_PORT_CNT_CLRr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x0600, + (32 / 8), + 0, + 3 + 1, + 0, + 4, + 1, + g_nppu_pbu_cfg_cfg_peak_port_cnt_clr_reg, + NULL, + NULL, + }, + { + "all_ftm_crdt_th", + NPPU_PBU_CFG_ALL_FTM_CRDT_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x0640, + (32 / 8), + 0, + 47 + 1, + 0, + 4, + 2, + g_nppu_pbu_cfg_all_ftm_crdt_th_reg, + NULL, + NULL, + }, + { + "all_ftm_link_th_01", + NPPU_PBU_CFG_ALL_FTM_LINK_TH_01r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x700, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_nppu_pbu_cfg_all_ftm_link_th_01_reg, + NULL, + NULL, + }, + { + "all_ftm_link_th_23", + NPPU_PBU_CFG_ALL_FTM_LINK_TH_23r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x704, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_nppu_pbu_cfg_all_ftm_link_th_23_reg, + NULL, + NULL, + }, + { + "all_ftm_link_th_45", + NPPU_PBU_CFG_ALL_FTM_LINK_TH_45r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x708, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_nppu_pbu_cfg_all_ftm_link_th_45_reg, + NULL, + NULL, + }, + { + "all_ftm_link_th_6", + NPPU_PBU_CFG_ALL_FTM_LINK_TH_6r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x70c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_all_ftm_link_th_6_reg, + NULL, + NULL, + }, + { + "all_ftm_total_congest_th", + NPPU_PBU_CFG_ALL_FTM_TOTAL_CONGEST_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x710, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_all_ftm_total_congest_th_reg, + NULL, + NULL, + }, + { + "cfg_crdt_mode", + NPPU_PBU_CFG_CFG_CRDT_MODEr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x720, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_cfg_crdt_mode_reg, + NULL, + NULL, + }, + { + "cfg_pfc_rdy_high_time", + NPPU_PBU_CFG_CFG_PFC_RDY_HIGH_TIMEr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x728, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_cfg_pfc_rdy_high_time_reg, + NULL, + NULL, + }, + { + "cfg_pfc_rdy_low_time", + NPPU_PBU_CFG_CFG_PFC_RDY_LOW_TIMEr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_CFG_BASE_ADDR + 0x72c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_cfg_cfg_pfc_rdy_low_time_reg, + NULL, + NULL, + }, + { + "pbu_fc_rdy", + NPPU_PBU_STAT_PBU_FC_RDYr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_STAT_BASE_ADDR + 0x580, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_nppu_pbu_stat_pbu_fc_rdy_reg, + NULL, + NULL, + }, + { + "pbu_lif_group0_rdy0", + NPPU_PBU_STAT_PBU_LIF_GROUP0_RDY0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_STAT_BASE_ADDR + 0x5c0, + (32 / 8), + 0, + 0, + 0, + 0, + 5, + g_nppu_pbu_stat_pbu_lif_group0_rdy0_reg, + NULL, + NULL, + }, + { + "pbu_lif_group0_rdy1", + NPPU_PBU_STAT_PBU_LIF_GROUP0_RDY1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_STAT_BASE_ADDR + 0x5c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_pbu_lif_group0_rdy1_reg, + NULL, + NULL, + }, + { + "pbu_lif_group1_rdy", + NPPU_PBU_STAT_PBU_LIF_GROUP1_RDYr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_STAT_BASE_ADDR + 0x5e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_pbu_lif_group1_rdy_reg, + NULL, + NULL, + }, + { + "pbu_lif_group0_pfc_rdy", + NPPU_PBU_STAT_PBU_LIF_GROUP0_PFC_RDYr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_STAT_BASE_ADDR + 0x0600, + (32 / 8), + 0, + 11 + 1, + 0, + 4, + 1, + g_nppu_pbu_stat_pbu_lif_group0_pfc_rdy_reg, + NULL, + NULL, + }, + { + "pbu_lif_group1_pfc_rdy", + NPPU_PBU_STAT_PBU_LIF_GROUP1_PFC_RDYr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_STAT_BASE_ADDR + 0x0640, + (32 / 8), + 0, + 1 + 1, + 0, + 4, + 1, + g_nppu_pbu_stat_pbu_lif_group1_pfc_rdy_reg, + NULL, + NULL, + }, + { + "pbu_sa_port_rdy_0_31", + NPPU_PBU_STAT_PBU_SA_PORT_RDY_0_31r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_STAT_BASE_ADDR + 0x680, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_pbu_sa_port_rdy_0_31_reg, + NULL, + NULL, + }, + { + "pbu_sa_port_rdy_32_50", + NPPU_PBU_STAT_PBU_SA_PORT_RDY_32_50r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_STAT_BASE_ADDR + 0x684, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_pbu_sa_port_rdy_32_50_reg, + NULL, + NULL, + }, + { + "pbu_pktrx_mr_pfc_rdy", + NPPU_PBU_STAT_PBU_PKTRX_MR_PFC_RDYr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_STAT_BASE_ADDR + 0x6a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_pbu_pktrx_mr_pfc_rdy_reg, + NULL, + NULL, + }, + { + "pbu_ftm_crdt_port_rdy_0_31", + NPPU_PBU_STAT_PBU_FTM_CRDT_PORT_RDY_0_31r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_STAT_BASE_ADDR + 0x6a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_pbu_ftm_crdt_port_rdy_0_31_reg, + NULL, + NULL, + }, + { + "pbu_ftm_crdt_port_rdy_32_47", + NPPU_PBU_STAT_PBU_FTM_CRDT_PORT_RDY_32_47r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_STAT_BASE_ADDR + 0x6a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_pbu_ftm_crdt_port_rdy_32_47_reg, + NULL, + NULL, + }, + { + "pbu_ftm_crdt_port_cng_rdy_0_31", + NPPU_PBU_STAT_PBU_FTM_CRDT_PORT_CNG_RDY_0_31r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_STAT_BASE_ADDR + 0x6ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_pbu_ftm_crdt_port_cng_rdy_0_31_reg, + NULL, + NULL, + }, + { + "pbu_ftm_crdt_port_cng_rdy_32_47", + NPPU_PBU_STAT_PBU_FTM_CRDT_PORT_CNG_RDY_32_47r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_STAT_BASE_ADDR + 0x6b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_pbu_ftm_crdt_port_cng_rdy_32_47_reg, + NULL, + NULL, + }, + { + "pbu_ftm_crdt_sys_info", + NPPU_PBU_STAT_PBU_FTM_CRDT_SYS_INFOr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_PBU_STAT_BASE_ADDR + 0x6b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_pbu_stat_pbu_ftm_crdt_sys_info_reg, + NULL, + NULL, + }, + { + "weight_normal_mc", + NPPU_ISU_CFG_WEIGHT_NORMAL_MCr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_CFG_BASE_ADDR + 0x0004, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_cfg_weight_normal_mc_reg, + NULL, + NULL, + }, + { + "weight_sa_mc", + NPPU_ISU_CFG_WEIGHT_SA_MCr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_CFG_BASE_ADDR + 0x0008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_cfg_weight_sa_mc_reg, + NULL, + NULL, + }, + { + "weight_etm", + NPPU_ISU_CFG_WEIGHT_ETMr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_CFG_BASE_ADDR + 0x000c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_cfg_weight_etm_reg, + NULL, + NULL, + }, + { + "weight_lp_mc", + NPPU_ISU_CFG_WEIGHT_LP_MCr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_CFG_BASE_ADDR + 0x0010, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_cfg_weight_lp_mc_reg, + NULL, + NULL, + }, + { + "weight_oam", + NPPU_ISU_CFG_WEIGHT_OAMr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_CFG_BASE_ADDR + 0x0014, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_cfg_weight_oam_reg, + NULL, + NULL, + }, + { + "weight_lif_ctrl1", + NPPU_ISU_CFG_WEIGHT_LIF_CTRL1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_CFG_BASE_ADDR + 0x0018, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_cfg_weight_lif_ctrl1_reg, + NULL, + NULL, + }, + { + "weight_lif_ctrl2", + NPPU_ISU_CFG_WEIGHT_LIF_CTRL2r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_CFG_BASE_ADDR + 0x001c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_cfg_weight_lif_ctrl2_reg, + NULL, + NULL, + }, + { + "ecc_bypass_read", + NPPU_ISU_CFG_ECC_BYPASS_READr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_CFG_BASE_ADDR + 0x0020, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_cfg_ecc_bypass_read_reg, + NULL, + NULL, + }, + { + "isu_int_mask", + NPPU_ISU_CFG_ISU_INT_MASKr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_CFG_BASE_ADDR + 0x0024, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_cfg_isu_int_mask_reg, + NULL, + NULL, + }, + { + "cfg_crdt_cycle", + NPPU_ISU_CFG_CFG_CRDT_CYCLEr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_CFG_BASE_ADDR + 0x002c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_cfg_cfg_crdt_cycle_reg, + NULL, + NULL, + }, + { + "cfg_crdt_value", + NPPU_ISU_CFG_CFG_CRDT_VALUEr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_CFG_BASE_ADDR + 0x0030, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_cfg_cfg_crdt_value_reg, + NULL, + NULL, + }, + { + "isu_int_en", + NPPU_ISU_CFG_ISU_INT_ENr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_CFG_BASE_ADDR + 0x0034, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_cfg_isu_int_en_reg, + NULL, + NULL, + }, + { + "isu_ppu_fifo_fc", + NPPU_ISU_CFG_ISU_PPU_FIFO_FCr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_CFG_BASE_ADDR + 0x0038, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_cfg_isu_ppu_fifo_fc_reg, + NULL, + NULL, + }, + { + "isu_int_status", + NPPU_ISU_CFG_ISU_INT_STATUSr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_CFG_BASE_ADDR + 0x003c, + (32 / 8), + 0, + 0, + 0, + 0, + 27, + g_nppu_isu_cfg_isu_int_status_reg, + NULL, + NULL, + }, + { + "fd_prog_full_assert_cfg", + NPPU_ISU_CFG_FD_PROG_FULL_ASSERT_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_CFG_BASE_ADDR + 0x0040, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_cfg_fd_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "fd_prog_full_negate_cfg", + NPPU_ISU_CFG_FD_PROG_FULL_NEGATE_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_CFG_BASE_ADDR + 0x0044, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_cfg_fd_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "lp_prog_full_assert_cfg", + NPPU_ISU_CFG_LP_PROG_FULL_ASSERT_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_CFG_BASE_ADDR + 0x0048, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_cfg_lp_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "lp_prog_full_negate_cfg", + NPPU_ISU_CFG_LP_PROG_FULL_NEGATE_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_CFG_BASE_ADDR + 0x004c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_cfg_lp_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "debug_cnt_dat0", + NPPU_ISU_STAT_DEBUG_CNT_DAT0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_STAT_BASE_ADDR + 0x000c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_stat_debug_cnt_dat0_reg, + NULL, + NULL, + }, + { + "debug_cnt_dat1", + NPPU_ISU_STAT_DEBUG_CNT_DAT1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_STAT_BASE_ADDR + 0x0010, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_stat_debug_cnt_dat1_reg, + NULL, + NULL, + }, + { + "debug_cnt_dat2", + NPPU_ISU_STAT_DEBUG_CNT_DAT2r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_STAT_BASE_ADDR + 0x0014, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_stat_debug_cnt_dat2_reg, + NULL, + NULL, + }, + { + "debug_cnt_dat3", + NPPU_ISU_STAT_DEBUG_CNT_DAT3r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_STAT_BASE_ADDR + 0x0018, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_stat_debug_cnt_dat3_reg, + NULL, + NULL, + }, + { + "debug_cnt_dat4", + NPPU_ISU_STAT_DEBUG_CNT_DAT4r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_STAT_BASE_ADDR + 0x001c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_stat_debug_cnt_dat4_reg, + NULL, + NULL, + }, + { + "debug_cnt_dat5", + NPPU_ISU_STAT_DEBUG_CNT_DAT5r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_STAT_BASE_ADDR + 0x0020, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_stat_debug_cnt_dat5_reg, + NULL, + NULL, + }, + { + "debug_cnt_dat6", + NPPU_ISU_STAT_DEBUG_CNT_DAT6r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_STAT_BASE_ADDR + 0x0024, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_stat_debug_cnt_dat6_reg, + NULL, + NULL, + }, + { + "debug_cnt_dat7", + NPPU_ISU_STAT_DEBUG_CNT_DAT7r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_STAT_BASE_ADDR + 0x0028, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_stat_debug_cnt_dat7_reg, + NULL, + NULL, + }, + { + "debug_cnt_dat8", + NPPU_ISU_STAT_DEBUG_CNT_DAT8r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_STAT_BASE_ADDR + 0x002c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_stat_debug_cnt_dat8_reg, + NULL, + NULL, + }, + { + "debug_cnt_dat9", + NPPU_ISU_STAT_DEBUG_CNT_DAT9r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_STAT_BASE_ADDR + 0x0030, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_stat_debug_cnt_dat9_reg, + NULL, + NULL, + }, + { + "debug_cnt_dat10", + NPPU_ISU_STAT_DEBUG_CNT_DAT10r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_STAT_BASE_ADDR + 0x0034, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_stat_debug_cnt_dat10_reg, + NULL, + NULL, + }, + { + "debug_cnt_dat11", + NPPU_ISU_STAT_DEBUG_CNT_DAT11r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_STAT_BASE_ADDR + 0x0038, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_stat_debug_cnt_dat11_reg, + NULL, + NULL, + }, + { + "debug_cnt_dat12", + NPPU_ISU_STAT_DEBUG_CNT_DAT12r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_STAT_BASE_ADDR + 0x003c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_stat_debug_cnt_dat12_reg, + NULL, + NULL, + }, + { + "debug_cnt_dat13", + NPPU_ISU_STAT_DEBUG_CNT_DAT13r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_STAT_BASE_ADDR + 0x0040, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_stat_debug_cnt_dat13_reg, + NULL, + NULL, + }, + { + "debug_cnt_dat14", + NPPU_ISU_STAT_DEBUG_CNT_DAT14r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_STAT_BASE_ADDR + 0x0044, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_stat_debug_cnt_dat14_reg, + NULL, + NULL, + }, + { + "debug_cnt_dat15", + NPPU_ISU_STAT_DEBUG_CNT_DAT15r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_STAT_BASE_ADDR + 0x0048, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_stat_debug_cnt_dat15_reg, + NULL, + NULL, + }, + { + "debug_cnt_dat16", + NPPU_ISU_STAT_DEBUG_CNT_DAT16r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_STAT_BASE_ADDR + 0x004c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_stat_debug_cnt_dat16_reg, + NULL, + NULL, + }, + { + "debug_cnt_dat17", + NPPU_ISU_STAT_DEBUG_CNT_DAT17r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_STAT_BASE_ADDR + 0x0050, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_stat_debug_cnt_dat17_reg, + NULL, + NULL, + }, + { + "debug_cnt_dat18", + NPPU_ISU_STAT_DEBUG_CNT_DAT18r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_STAT_BASE_ADDR + 0x0054, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_stat_debug_cnt_dat18_reg, + NULL, + NULL, + }, + { + "debug_cnt_dat19", + NPPU_ISU_STAT_DEBUG_CNT_DAT19r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_STAT_BASE_ADDR + 0x0058, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_isu_stat_debug_cnt_dat19_reg, + NULL, + NULL, + }, + { + "debug_cnt_cfg", + NPPU_ISU_STAT_DEBUG_CNT_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ISU_STAT_BASE_ADDR + 0x0100, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_nppu_isu_stat_debug_cnt_cfg_reg, + NULL, + NULL, + }, + { + "exsa_tdm_offset", + NPPU_ODMA_CFG_EXSA_TDM_OFFSETr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0014, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_exsa_tdm_offset_reg, + NULL, + NULL, + }, + { + "ecc_bypass_readt", + NPPU_ODMA_CFG_ECC_BYPASS_READTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0018, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_ecc_bypass_readt_reg, + NULL, + NULL, + }, + { + "odma_int_en_0", + NPPU_ODMA_CFG_ODMA_INT_EN_0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0020, + (32 / 8), + 0, + 0, + 0, + 0, + 11, + g_nppu_odma_cfg_odma_int_en_0_reg, + NULL, + NULL, + }, + { + "odma_int_en_1", + NPPU_ODMA_CFG_ODMA_INT_EN_1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0024, + (32 / 8), + 0, + 0, + 0, + 0, + 23, + g_nppu_odma_cfg_odma_int_en_1_reg, + NULL, + NULL, + }, + { + "odma_int_en_2", + NPPU_ODMA_CFG_ODMA_INT_EN_2r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0028, + (32 / 8), + 0, + 0, + 0, + 0, + 10, + g_nppu_odma_cfg_odma_int_en_2_reg, + NULL, + NULL, + }, + { + "odma_int_en_3", + NPPU_ODMA_CFG_ODMA_INT_EN_3r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x002c, + (32 / 8), + 0, + 0, + 0, + 0, + 13, + g_nppu_odma_cfg_odma_int_en_3_reg, + NULL, + NULL, + }, + { + "odma_int_mask_0", + NPPU_ODMA_CFG_ODMA_INT_MASK_0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0030, + (32 / 8), + 0, + 0, + 0, + 0, + 11, + g_nppu_odma_cfg_odma_int_mask_0_reg, + NULL, + NULL, + }, + { + "odma_int_mask_1", + NPPU_ODMA_CFG_ODMA_INT_MASK_1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0034, + (32 / 8), + 0, + 0, + 0, + 0, + 24, + g_nppu_odma_cfg_odma_int_mask_1_reg, + NULL, + NULL, + }, + { + "odma_int_mask_2", + NPPU_ODMA_CFG_ODMA_INT_MASK_2r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0038, + (32 / 8), + 0, + 0, + 0, + 0, + 10, + g_nppu_odma_cfg_odma_int_mask_2_reg, + NULL, + NULL, + }, + { + "odma_int_mask_3", + NPPU_ODMA_CFG_ODMA_INT_MASK_3r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x003c, + (32 / 8), + 0, + 0, + 0, + 0, + 13, + g_nppu_odma_cfg_odma_int_mask_3_reg, + NULL, + NULL, + }, + { + "odma_int_status_0", + NPPU_ODMA_CFG_ODMA_INT_STATUS_0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0040, + (32 / 8), + 0, + 0, + 0, + 0, + 11, + g_nppu_odma_cfg_odma_int_status_0_reg, + NULL, + NULL, + }, + { + "odma_int_status_1", + NPPU_ODMA_CFG_ODMA_INT_STATUS_1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0044, + (32 / 8), + 0, + 0, + 0, + 0, + 23, + g_nppu_odma_cfg_odma_int_status_1_reg, + NULL, + NULL, + }, + { + "odma_int_status_2", + NPPU_ODMA_CFG_ODMA_INT_STATUS_2r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0048, + (32 / 8), + 0, + 0, + 0, + 0, + 10, + g_nppu_odma_cfg_odma_int_status_2_reg, + NULL, + NULL, + }, + { + "odma_int_status_3", + NPPU_ODMA_CFG_ODMA_INT_STATUS_3r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x004c, + (32 / 8), + 0, + 0, + 0, + 0, + 15, + g_nppu_odma_cfg_odma_int_status_3_reg, + NULL, + NULL, + }, + { + "sp_tdm_err_nor_cfg", + NPPU_ODMA_CFG_SP_TDM_ERR_NOR_CFGr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0050, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_sp_tdm_err_nor_cfg_reg, + NULL, + NULL, + }, + { + "etm_dis_ptr_prog_full_cfg_a", + NPPU_ODMA_CFG_ETM_DIS_PTR_PROG_FULL_CFG_Ar, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0068, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_etm_dis_ptr_prog_full_cfg_a_reg, + NULL, + NULL, + }, + { + "etm_dis_ptr_prog_full_cfg_n", + NPPU_ODMA_CFG_ETM_DIS_PTR_PROG_FULL_CFG_Nr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x006c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_etm_dis_ptr_prog_full_cfg_n_reg, + NULL, + NULL, + }, + { + "ftm_dis_ptr_prog_full_cfg_a", + NPPU_ODMA_CFG_FTM_DIS_PTR_PROG_FULL_CFG_Ar, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0070, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_ftm_dis_ptr_prog_full_cfg_a_reg, + NULL, + NULL, + }, + { + "ftm_dis_ptr_prog_full_cfg_n", + NPPU_ODMA_CFG_FTM_DIS_PTR_PROG_FULL_CFG_Nr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0074, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_ftm_dis_ptr_prog_full_cfg_n_reg, + NULL, + NULL, + }, + { + "tm_dis_fifo_prog_full_cfg_a", + NPPU_ODMA_CFG_TM_DIS_FIFO_PROG_FULL_CFG_Ar, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0078, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_tm_dis_fifo_prog_full_cfg_a_reg, + NULL, + NULL, + }, + { + "tm_dis_fifo_prog_full_cfg_n", + NPPU_ODMA_CFG_TM_DIS_FIFO_PROG_FULL_CFG_Nr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x007c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_tm_dis_fifo_prog_full_cfg_n_reg, + NULL, + NULL, + }, + { + "err_prog_full_cfg_a", + NPPU_ODMA_CFG_ERR_PROG_FULL_CFG_Ar, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0088, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_err_prog_full_cfg_a_reg, + NULL, + NULL, + }, + { + "err_prog_full_cfg_n", + NPPU_ODMA_CFG_ERR_PROG_FULL_CFG_Nr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x008c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_err_prog_full_cfg_n_reg, + NULL, + NULL, + }, + { + "tdmuc_prog_full_cfg_a", + NPPU_ODMA_CFG_TDMUC_PROG_FULL_CFG_Ar, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0090, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_tdmuc_prog_full_cfg_a_reg, + NULL, + NULL, + }, + { + "tdmuc_prog_full_cfg_n", + NPPU_ODMA_CFG_TDMUC_PROG_FULL_CFG_Nr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0094, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_tdmuc_prog_full_cfg_n_reg, + NULL, + NULL, + }, + { + "tdmmc_groupid_prog_full_cfg_a", + NPPU_ODMA_CFG_TDMMC_GROUPID_PROG_FULL_CFG_Ar, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0098, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_tdmmc_groupid_prog_full_cfg_a_reg, + NULL, + NULL, + }, + { + "tdmmc_groupid_prog_full_cfg_n", + NPPU_ODMA_CFG_TDMMC_GROUPID_PROG_FULL_CFG_Nr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x009c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_tdmmc_groupid_prog_full_cfg_n_reg, + NULL, + NULL, + }, + { + "tdmmc_no_bitmap_prog_full_cfg_a", + NPPU_ODMA_CFG_TDMMC_NO_BITMAP_PROG_FULL_CFG_Ar, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x00a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_tdmmc_no_bitmap_prog_full_cfg_a_reg, + NULL, + NULL, + }, + { + "tdmmc_no_bitmap_prog_full_cfg_n", + NPPU_ODMA_CFG_TDMMC_NO_BITMAP_PROG_FULL_CFG_Nr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x00a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_tdmmc_no_bitmap_prog_full_cfg_n_reg, + NULL, + NULL, + }, + { + "tdmmc_prog_full_cfg_a", + NPPU_ODMA_CFG_TDMMC_PROG_FULL_CFG_Ar, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x00a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_tdmmc_prog_full_cfg_a_reg, + NULL, + NULL, + }, + { + "tdmmc_prog_full_cfg_n", + NPPU_ODMA_CFG_TDMMC_PROG_FULL_CFG_Nr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x00ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_tdmmc_prog_full_cfg_n_reg, + NULL, + NULL, + }, + { + "desc_prog_full_cfg_a", + NPPU_ODMA_CFG_DESC_PROG_FULL_CFG_Ar, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x00b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_desc_prog_full_cfg_a_reg, + NULL, + NULL, + }, + { + "desc_prog_full_cfg_n", + NPPU_ODMA_CFG_DESC_PROG_FULL_CFG_Nr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x00b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_desc_prog_full_cfg_n_reg, + NULL, + NULL, + }, + { + "dly_prog_full_cfg_a", + NPPU_ODMA_CFG_DLY_PROG_FULL_CFG_Ar, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x00b8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_dly_prog_full_cfg_a_reg, + NULL, + NULL, + }, + { + "dly_prog_full_cfg_n", + NPPU_ODMA_CFG_DLY_PROG_FULL_CFG_Nr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x00bc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_dly_prog_full_cfg_n_reg, + NULL, + NULL, + }, + { + "rsp_prog_full_cfg_a", + NPPU_ODMA_CFG_RSP_PROG_FULL_CFG_Ar, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x00c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_rsp_prog_full_cfg_a_reg, + NULL, + NULL, + }, + { + "rsp_prog_full_cfg_n", + NPPU_ODMA_CFG_RSP_PROG_FULL_CFG_Nr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x00c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_rsp_prog_full_cfg_n_reg, + NULL, + NULL, + }, + { + "nor_prog_full_cfg_a", + NPPU_ODMA_CFG_NOR_PROG_FULL_CFG_Ar, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x00c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_nor_prog_full_cfg_a_reg, + NULL, + NULL, + }, + { + "nor_prog_full_cfg_n", + NPPU_ODMA_CFG_NOR_PROG_FULL_CFG_Nr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x00cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_nor_prog_full_cfg_n_reg, + NULL, + NULL, + }, + { + "etm_nor_prog_full_cfg_a", + NPPU_ODMA_CFG_ETM_NOR_PROG_FULL_CFG_Ar, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x00d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_etm_nor_prog_full_cfg_a_reg, + NULL, + NULL, + }, + { + "etm_nor_prog_full_cfg_n", + NPPU_ODMA_CFG_ETM_NOR_PROG_FULL_CFG_Nr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x00d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_etm_nor_prog_full_cfg_n_reg, + NULL, + NULL, + }, + { + "ftm_nor_prog_full_cfg_a", + NPPU_ODMA_CFG_FTM_NOR_PROG_FULL_CFG_Ar, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x00d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_ftm_nor_prog_full_cfg_a_reg, + NULL, + NULL, + }, + { + "ftm_nor_prog_full_cfg_n", + NPPU_ODMA_CFG_FTM_NOR_PROG_FULL_CFG_Nr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x00dc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_ftm_nor_prog_full_cfg_n_reg, + NULL, + NULL, + }, + { + "etm_prog_full_cfg_a", + NPPU_ODMA_CFG_ETM_PROG_FULL_CFG_Ar, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x00e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_etm_prog_full_cfg_a_reg, + NULL, + NULL, + }, + { + "etm_prog_full_cfg_n", + NPPU_ODMA_CFG_ETM_PROG_FULL_CFG_Nr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x00e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_etm_prog_full_cfg_n_reg, + NULL, + NULL, + }, + { + "ftm_prog_full_cfg_a", + NPPU_ODMA_CFG_FTM_PROG_FULL_CFG_Ar, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x00e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_ftm_prog_full_cfg_a_reg, + NULL, + NULL, + }, + { + "ftm_prog_full_cfg_n", + NPPU_ODMA_CFG_FTM_PROG_FULL_CFG_Nr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x00ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_ftm_prog_full_cfg_n_reg, + NULL, + NULL, + }, + { + "etm_nrdcnt_prog_full_cfg_a", + NPPU_ODMA_CFG_ETM_NRDCNT_PROG_FULL_CFG_Ar, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x00f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_etm_nrdcnt_prog_full_cfg_a_reg, + NULL, + NULL, + }, + { + "etm_nrdcnt_prog_full_cfg_n", + NPPU_ODMA_CFG_ETM_NRDCNT_PROG_FULL_CFG_Nr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x00f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_etm_nrdcnt_prog_full_cfg_n_reg, + NULL, + NULL, + }, + { + "ftm_nrdcnt_prog_full_cfg_a", + NPPU_ODMA_CFG_FTM_NRDCNT_PROG_FULL_CFG_Ar, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x00f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_ftm_nrdcnt_prog_full_cfg_a_reg, + NULL, + NULL, + }, + { + "ftm_nrdcnt_prog_full_cfg_n", + NPPU_ODMA_CFG_FTM_NRDCNT_PROG_FULL_CFG_Nr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x00fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_ftm_nrdcnt_prog_full_cfg_n_reg, + NULL, + NULL, + }, + { + "pp_prog_full_cfg_a", + NPPU_ODMA_CFG_PP_PROG_FULL_CFG_Ar, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0104, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_pp_prog_full_cfg_a_reg, + NULL, + NULL, + }, + { + "pp_prog_full_cfg_n", + NPPU_ODMA_CFG_PP_PROG_FULL_CFG_Nr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0108, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_pp_prog_full_cfg_n_reg, + NULL, + NULL, + }, + { + "tm_weight", + NPPU_ODMA_CFG_TM_WEIGHTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x010c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_tm_weight_reg, + NULL, + NULL, + }, + { + "pp_weight", + NPPU_ODMA_CFG_PP_WEIGHTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0110, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_pp_weight_reg, + NULL, + NULL, + }, + { + "ifbcmd_prog_full_cfg_a", + NPPU_ODMA_CFG_IFBCMD_PROG_FULL_CFG_Ar, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0218, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_ifbcmd_prog_full_cfg_a_reg, + NULL, + NULL, + }, + { + "ifbcmd_prog_full_cfg_n", + NPPU_ODMA_CFG_IFBCMD_PROG_FULL_CFG_Nr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x021c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_ifbcmd_prog_full_cfg_n_reg, + NULL, + NULL, + }, + { + "mccnt_prog_full_cfg_a", + NPPU_ODMA_CFG_MCCNT_PROG_FULL_CFG_Ar, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0220, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_mccnt_prog_full_cfg_a_reg, + NULL, + NULL, + }, + { + "mccnt_prog_full_cfg_n", + NPPU_ODMA_CFG_MCCNT_PROG_FULL_CFG_Nr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0224, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_mccnt_prog_full_cfg_n_reg, + NULL, + NULL, + }, + { + "int_or_pon", + NPPU_ODMA_CFG_INT_OR_PONr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x228, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_int_or_pon_reg, + NULL, + NULL, + }, + { + "quemng_cnt_in_err_cnt", + NPPU_ODMA_CFG_QUEMNG_CNT_IN_ERR_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x22c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_quemng_cnt_in_err_cnt_reg, + NULL, + NULL, + }, + { + "lif0_port_eop_cnt", + NPPU_ODMA_CFG_LIF0_PORT_EOP_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x240, + (32 / 8), + 0, + 47 + 1, + 0, + 4, + 1, + g_nppu_odma_cfg_lif0_port_eop_cnt_reg, + NULL, + NULL, + }, + { + "lif1_port_eop_cnt", + NPPU_ODMA_CFG_LIF1_PORT_EOP_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x300, + (32 / 8), + 0, + 7 + 1, + 0, + 4, + 1, + g_nppu_odma_cfg_lif1_port_eop_cnt_reg, + NULL, + NULL, + }, + { + "lifc_port0_eop_cnt", + NPPU_ODMA_CFG_LIFC_PORT0_EOP_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x320, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_lifc_port0_eop_cnt_reg, + NULL, + NULL, + }, + { + "lifc_port1_eop_cnt", + NPPU_ODMA_CFG_LIFC_PORT1_EOP_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x324, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_lifc_port1_eop_cnt_reg, + NULL, + NULL, + }, + { + "fptr_fifo_prog_ept_cfg_n", + NPPU_ODMA_CFG_FPTR_FIFO_PROG_EPT_CFG_Nr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0404, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_fptr_fifo_prog_ept_cfg_n_reg, + NULL, + NULL, + }, + { + "isu_fifo_prog_full_cfg_a", + NPPU_ODMA_CFG_ISU_FIFO_PROG_FULL_CFG_Ar, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x0408, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_isu_fifo_prog_full_cfg_a_reg, + NULL, + NULL, + }, + { + "isu_fifo_prog_full_cfg_n", + NPPU_ODMA_CFG_ISU_FIFO_PROG_FULL_CFG_Nr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_ODMA_CFG_BASE_ADDR + 0x040c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_odma_cfg_isu_fifo_prog_full_cfg_n_reg, + NULL, + NULL, + }, + { + "ind_access_done", + NPPU_OAM_CFG_IND_ACCESS_DONEr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_ind_access_done_reg, + NULL, + NULL, + }, + { + "ind_access_command", + NPPU_OAM_CFG_IND_ACCESS_COMMANDr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0004, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_nppu_oam_cfg_ind_access_command_reg, + NULL, + NULL, + }, + { + "ind_dat0", + NPPU_OAM_CFG_IND_DAT0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_ind_dat0_reg, + NULL, + NULL, + }, + { + "ind_dat1", + NPPU_OAM_CFG_IND_DAT1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x000c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_ind_dat1_reg, + NULL, + NULL, + }, + { + "ind_dat2", + NPPU_OAM_CFG_IND_DAT2r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0010, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_ind_dat2_reg, + NULL, + NULL, + }, + { + "ind_dat3", + NPPU_OAM_CFG_IND_DAT3r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0014, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_ind_dat3_reg, + NULL, + NULL, + }, + { + "oam_tx_main_en", + NPPU_OAM_CFG_OAM_TX_MAIN_ENr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0018, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_oam_tx_main_en_reg, + NULL, + NULL, + }, + { + "tx_total_num", + NPPU_OAM_CFG_TX_TOTAL_NUMr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x001c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_tx_total_num_reg, + NULL, + NULL, + }, + { + "oam_chk_main_en", + NPPU_OAM_CFG_OAM_CHK_MAIN_ENr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0020, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_oam_chk_main_en_reg, + NULL, + NULL, + }, + { + "chk_total_num0", + NPPU_OAM_CFG_CHK_TOTAL_NUM0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0024, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_chk_total_num0_reg, + NULL, + NULL, + }, + { + "ma_chk_main_en", + NPPU_OAM_CFG_MA_CHK_MAIN_ENr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0028, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_ma_chk_main_en_reg, + NULL, + NULL, + }, + { + "chk_total_num1", + NPPU_OAM_CFG_CHK_TOTAL_NUM1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x002c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_chk_total_num1_reg, + NULL, + NULL, + }, + { + "tx_stat_en", + NPPU_OAM_CFG_TX_STAT_ENr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0034, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_tx_stat_en_reg, + NULL, + NULL, + }, + { + "rec_stat_en", + NPPU_OAM_CFG_REC_STAT_ENr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0038, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_rec_stat_en_reg, + NULL, + NULL, + }, + { + "stat_oam_rdy_mask", + NPPU_OAM_CFG_STAT_OAM_RDY_MASKr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x003c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_stat_oam_rdy_mask_reg, + NULL, + NULL, + }, + { + "session_grading0", + NPPU_OAM_CFG_SESSION_GRADING0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0040, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_session_grading0_reg, + NULL, + NULL, + }, + { + "session_grading1", + NPPU_OAM_CFG_SESSION_GRADING1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0044, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_session_grading1_reg, + NULL, + NULL, + }, + { + "session_grading2", + NPPU_OAM_CFG_SESSION_GRADING2r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0048, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_session_grading2_reg, + NULL, + NULL, + }, + { + "session_grading3", + NPPU_OAM_CFG_SESSION_GRADING3r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x004c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_session_grading3_reg, + NULL, + NULL, + }, + { + "bfd_chk_haddr", + NPPU_OAM_CFG_BFD_CHK_HADDRr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0050, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_bfd_chk_haddr_reg, + NULL, + NULL, + }, + { + "ethccm_chk_haddr", + NPPU_OAM_CFG_ETHCCM_CHK_HADDRr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0054, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_ethccm_chk_haddr_reg, + NULL, + NULL, + }, + { + "tpbfd_chk_haddr", + NPPU_OAM_CFG_TPBFD_CHK_HADDRr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0058, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_tpbfd_chk_haddr_reg, + NULL, + NULL, + }, + { + "tpoam_ccm_chk_haddr", + NPPU_OAM_CFG_TPOAM_CCM_CHK_HADDRr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x005c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_tpoam_ccm_chk_haddr_reg, + NULL, + NULL, + }, + { + "bfd_tx_haddr", + NPPU_OAM_CFG_BFD_TX_HADDRr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0060, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_bfd_tx_haddr_reg, + NULL, + NULL, + }, + { + "ethccm_tx_haddr", + NPPU_OAM_CFG_ETHCCM_TX_HADDRr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0064, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_ethccm_tx_haddr_reg, + NULL, + NULL, + }, + { + "tpbfd_tx_haddr", + NPPU_OAM_CFG_TPBFD_TX_HADDRr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0068, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_tpbfd_tx_haddr_reg, + NULL, + NULL, + }, + { + "tpoam_ccm_tx_haddr", + NPPU_OAM_CFG_TPOAM_CCM_TX_HADDRr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x006c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_tpoam_ccm_tx_haddr_reg, + NULL, + NULL, + }, + { + "ethccm_ma_chk_haddr", + NPPU_OAM_CFG_ETHCCM_MA_CHK_HADDRr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0070, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_ethccm_ma_chk_haddr_reg, + NULL, + NULL, + }, + { + "tpccm_ma_chk_haddr", + NPPU_OAM_CFG_TPCCM_MA_CHK_HADDRr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0074, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_tpccm_ma_chk_haddr_reg, + NULL, + NULL, + }, + { + "groupnum_ram_clr", + NPPU_OAM_CFG_GROUPNUM_RAM_CLRr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0078, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_groupnum_ram_clr_reg, + NULL, + NULL, + }, + { + "index_ram0_clr", + NPPU_OAM_CFG_INDEX_RAM0_CLRr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x007c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_index_ram0_clr_reg, + NULL, + NULL, + }, + { + "index_ram1_clr", + NPPU_OAM_CFG_INDEX_RAM1_CLRr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0080, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_index_ram1_clr_reg, + NULL, + NULL, + }, + { + "rmep_ram_clr", + NPPU_OAM_CFG_RMEP_RAM_CLRr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0084, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_rmep_ram_clr_reg, + NULL, + NULL, + }, + { + "ma_ram_clr", + NPPU_OAM_CFG_MA_RAM_CLRr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0088, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_ma_ram_clr_reg, + NULL, + NULL, + }, + { + "ram_init_done", + NPPU_OAM_CFG_RAM_INIT_DONEr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x008c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_ram_init_done_reg, + NULL, + NULL, + }, + { + "rec_bfd_debug_en", + NPPU_OAM_CFG_REC_BFD_DEBUG_ENr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0090, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_rec_bfd_debug_en_reg, + NULL, + NULL, + }, + { + "oam_session_int", + NPPU_OAM_CFG_OAM_SESSION_INTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0098, + (32 / 8), + 0, + 0, + 0, + 0, + 6, + g_nppu_oam_cfg_oam_session_int_reg, + NULL, + NULL, + }, + { + "pon_int", + NPPU_OAM_CFG_PON_INTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x009c, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_nppu_oam_cfg_pon_int_reg, + NULL, + NULL, + }, + { + "oam_int_clr", + NPPU_OAM_CFG_OAM_INT_CLRr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x00a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_oam_int_clr_reg, + NULL, + NULL, + }, + { + "type_int_clr0", + NPPU_OAM_CFG_TYPE_INT_CLR0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x00a4, + (32 / 8), + 0, + 0, + 0, + 0, + 6, + g_nppu_oam_cfg_type_int_clr0_reg, + NULL, + NULL, + }, + { + "type_int_clr1", + NPPU_OAM_CFG_TYPE_INT_CLR1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x00a8, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_nppu_oam_cfg_type_int_clr1_reg, + NULL, + NULL, + }, + { + "interrupt_mask", + NPPU_OAM_CFG_INTERRUPT_MASKr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x00ac, + (32 / 8), + 0, + 0, + 0, + 0, + 8, + g_nppu_oam_cfg_interrupt_mask_reg, + NULL, + NULL, + }, + { + "int0_index", + NPPU_OAM_CFG_INT0_INDEXr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x00b0, + (32 / 8), + 0, + 3 + 1, + 0, + 4, + 1, + g_nppu_oam_cfg_int0_index_reg, + NULL, + NULL, + }, + { + "int1_index", + NPPU_OAM_CFG_INT1_INDEXr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x00c0, + (32 / 8), + 0, + 3 + 1, + 0, + 4, + 1, + g_nppu_oam_cfg_int1_index_reg, + NULL, + NULL, + }, + { + "int0_index_region", + NPPU_OAM_CFG_INT0_INDEX_REGIONr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x00d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_int0_index_region_reg, + NULL, + NULL, + }, + { + "int1_index_region", + NPPU_OAM_CFG_INT1_INDEX_REGIONr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x00d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_int1_index_region_reg, + NULL, + NULL, + }, + { + "bdiinfo_fwft_fifo_th", + NPPU_OAM_CFG_BDIINFO_FWFT_FIFO_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x00d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_bdiinfo_fwft_fifo_th_reg, + NULL, + NULL, + }, + { + "recsec_fwft_fifo_th", + NPPU_OAM_CFG_RECSEC_FWFT_FIFO_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x00dc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_recsec_fwft_fifo_th_reg, + NULL, + NULL, + }, + { + "timing_chk_info0_fwft_fifo_th", + NPPU_OAM_CFG_TIMING_CHK_INFO0_FWFT_FIFO_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x00e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_timing_chk_info0_fwft_fifo_th_reg, + NULL, + NULL, + }, + { + "recma_fwft_fifo_th", + NPPU_OAM_CFG_RECMA_FWFT_FIFO_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x00e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_recma_fwft_fifo_th_reg, + NULL, + NULL, + }, + { + "timing_chk_info1_fwft_fifo_th", + NPPU_OAM_CFG_TIMING_CHK_INFO1_FWFT_FIFO_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x00e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_timing_chk_info1_fwft_fifo_th_reg, + NULL, + NULL, + }, + { + "oam_txinst_fifo_th", + NPPU_OAM_CFG_OAM_TXINST_FIFO_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x00ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_oam_txinst_fifo_th_reg, + NULL, + NULL, + }, + { + "oam_rdinfo_fwft_fifo_th", + NPPU_OAM_CFG_OAM_RDINFO_FWFT_FIFO_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x00f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_oam_rdinfo_fwft_fifo_th_reg, + NULL, + NULL, + }, + { + "lm_cnt_fwft_fifo_th", + NPPU_OAM_CFG_LM_CNT_FWFT_FIFO_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x00f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_lm_cnt_fwft_fifo_th_reg, + NULL, + NULL, + }, + { + "oam_pkt_fifo_th", + NPPU_OAM_CFG_OAM_PKT_FIFO_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x00f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_oam_pkt_fifo_th_reg, + NULL, + NULL, + }, + { + "reclm_stat_fifo_th", + NPPU_OAM_CFG_RECLM_STAT_FIFO_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x00fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_reclm_stat_fifo_th_reg, + NULL, + NULL, + }, + { + "txlm_stat_fifo_th", + NPPU_OAM_CFG_TXLM_STAT_FIFO_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x00100, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_txlm_stat_fifo_th_reg, + NULL, + NULL, + }, + { + "oam_chk_fwft_fifo_th", + NPPU_OAM_CFG_OAM_CHK_FWFT_FIFO_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0104, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_oam_chk_fwft_fifo_th_reg, + NULL, + NULL, + }, + { + "txoam_stat_fifo_th", + NPPU_OAM_CFG_TXOAM_STAT_FIFO_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0108, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_txoam_stat_fifo_th_reg, + NULL, + NULL, + }, + { + "recoam_stat_fifo_th", + NPPU_OAM_CFG_RECOAM_STAT_FIFO_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x010c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_recoam_stat_fifo_th_reg, + NULL, + NULL, + }, + { + "txpkt_data_fwft_fifo_th", + NPPU_OAM_CFG_TXPKT_DATA_FWFT_FIFO_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0110, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_txpkt_data_fwft_fifo_th_reg, + NULL, + NULL, + }, + { + "tstpkt_fwft_fifo_th", + NPPU_OAM_CFG_TSTPKT_FWFT_FIFO_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0114, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_tstpkt_fwft_fifo_th_reg, + NULL, + NULL, + }, + { + "tst_txinst_fwft_fifo_th", + NPPU_OAM_CFG_TST_TXINST_FWFT_FIFO_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0118, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_tst_txinst_fwft_fifo_th_reg, + NULL, + NULL, + }, + { + "tstrx_main_en", + NPPU_OAM_CFG_TSTRX_MAIN_ENr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x011c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_tstrx_main_en_reg, + NULL, + NULL, + }, + { + "tsttx_cfg_para_tbl2", + NPPU_OAM_CFG_TSTTX_CFG_PARA_TBL2r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0120, + (32 / 8), + 0, + 9 + 1, + 0, + 12, + 8, + g_nppu_oam_cfg_tsttx_cfg_para_tbl2_reg, + NULL, + NULL, + }, + { + "tsttx_cfg_para_tbl1", + NPPU_OAM_CFG_TSTTX_CFG_PARA_TBL1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0124, + (32 / 8), + 0, + 9 + 1, + 0, + 12, + 1, + g_nppu_oam_cfg_tsttx_cfg_para_tbl1_reg, + NULL, + NULL, + }, + { + "tsttx_cfg_para_tbl0", + NPPU_OAM_CFG_TSTTX_CFG_PARA_TBL0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0128, + (32 / 8), + 0, + 9 + 1, + 0, + 12, + 3, + g_nppu_oam_cfg_tsttx_cfg_para_tbl0_reg, + NULL, + NULL, + }, + { + "tstrx_cfg_para", + NPPU_OAM_CFG_TSTRX_CFG_PARAr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0198, + (32 / 8), + 0, + 0x9 + 1, + 0, + 4, + 2, + g_nppu_oam_cfg_tstrx_cfg_para_reg, + NULL, + NULL, + }, + { + "fifo_status_int_en_0", + NPPU_OAM_CFG_FIFO_STATUS_INT_EN_0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x01c0, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_nppu_oam_cfg_fifo_status_int_en_0_reg, + NULL, + NULL, + }, + { + "fifo_status_int_en_1", + NPPU_OAM_CFG_FIFO_STATUS_INT_EN_1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x01c4, + (32 / 8), + 0, + 0, + 0, + 0, + 10, + g_nppu_oam_cfg_fifo_status_int_en_1_reg, + NULL, + NULL, + }, + { + "fifo_status_int_mask_0", + NPPU_OAM_CFG_FIFO_STATUS_INT_MASK_0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x01c8, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_nppu_oam_cfg_fifo_status_int_mask_0_reg, + NULL, + NULL, + }, + { + "fifo_status_int_mask_1", + NPPU_OAM_CFG_FIFO_STATUS_INT_MASK_1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x01cc, + (32 / 8), + 0, + 0, + 0, + 0, + 10, + g_nppu_oam_cfg_fifo_status_int_mask_1_reg, + NULL, + NULL, + }, + { + "fifo_status_int_status", + NPPU_OAM_CFG_FIFO_STATUS_INT_STATUSr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x01d0, + (32 / 8), + 0, + 39 + 1, + 0, + 4, + 1, + g_nppu_oam_cfg_fifo_status_int_status_reg, + NULL, + NULL, + }, + { + "main_frequency", + NPPU_OAM_CFG_MAIN_FREQUENCYr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0270, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_main_frequency_reg, + NULL, + NULL, + }, + { + "oam_cfg_type", + NPPU_OAM_CFG_OAM_CFG_TYPEr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0274, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_oam_cfg_type_reg, + NULL, + NULL, + }, + { + "fst_swch_eth_head0", + NPPU_OAM_CFG_FST_SWCH_ETH_HEAD0r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0278, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_fst_swch_eth_head0_reg, + NULL, + NULL, + }, + { + "fst_swch_eth_head1", + NPPU_OAM_CFG_FST_SWCH_ETH_HEAD1r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x027c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_fst_swch_eth_head1_reg, + NULL, + NULL, + }, + { + "fst_swch_eth_head2", + NPPU_OAM_CFG_FST_SWCH_ETH_HEAD2r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0280, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_fst_swch_eth_head2_reg, + NULL, + NULL, + }, + { + "fst_swch_eth_head3", + NPPU_OAM_CFG_FST_SWCH_ETH_HEAD3r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0284, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_fst_swch_eth_head3_reg, + NULL, + NULL, + }, + { + "oam_fs_txinst_fifo_th", + NPPU_OAM_CFG_OAM_FS_TXINST_FIFO_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0288, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_oam_fs_txinst_fifo_th_reg, + NULL, + NULL, + }, + { + "oam_ma_fs_txinst_fifo_th", + NPPU_OAM_CFG_OAM_MA_FS_TXINST_FIFO_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x028c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_oam_ma_fs_txinst_fifo_th_reg, + NULL, + NULL, + }, + { + "pon_int_ram_clr", + NPPU_OAM_CFG_PON_INT_RAM_CLRr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0290, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_pon_int_ram_clr_reg, + NULL, + NULL, + }, + { + "pon_p_int_index", + NPPU_OAM_CFG_PON_P_INT_INDEXr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0294, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_pon_p_int_index_reg, + NULL, + NULL, + }, + { + "pon_protect_pkt_fifo_th", + NPPU_OAM_CFG_PON_PROTECT_PKT_FIFO_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0298, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_pon_protect_pkt_fifo_th_reg, + NULL, + NULL, + }, + { + "pon_laser_off_en", + NPPU_OAM_CFG_PON_LASER_OFF_ENr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x029c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_pon_laser_off_en_reg, + NULL, + NULL, + }, + { + "pon_prtct_pkt_tx_en", + NPPU_OAM_CFG_PON_PRTCT_PKT_TX_ENr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x02a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_pon_prtct_pkt_tx_en_reg, + NULL, + NULL, + }, + { + "cfg_pon_master", + NPPU_OAM_CFG_CFG_PON_MASTERr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x02a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_cfg_pon_master_reg, + NULL, + NULL, + }, + { + "level_mode", + NPPU_OAM_CFG_LEVEL_MODEr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x02a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_level_mode_reg, + NULL, + NULL, + }, + { + "interrupt_en", + NPPU_OAM_CFG_INTERRUPT_ENr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x02ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_interrupt_en_reg, + NULL, + NULL, + }, + { + "pon_laser_on_en", + NPPU_OAM_CFG_PON_LASER_ON_ENr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x02b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_pon_laser_on_en_reg, + NULL, + NULL, + }, + { + "ti_pon_sd", + NPPU_OAM_CFG_TI_PON_SDr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x02b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_ti_pon_sd_reg, + NULL, + NULL, + }, + { + "ti_pon_los", + NPPU_OAM_CFG_TI_PON_LOSr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x02b8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_ti_pon_los_reg, + NULL, + NULL, + }, + { + "ind_dat4", + NPPU_OAM_CFG_IND_DAT4r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x02bc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_ind_dat4_reg, + NULL, + NULL, + }, + { + "ind_dat5", + NPPU_OAM_CFG_IND_DAT5r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x02c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_ind_dat5_reg, + NULL, + NULL, + }, + { + "ind_dat6", + NPPU_OAM_CFG_IND_DAT6r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x02c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_ind_dat6_reg, + NULL, + NULL, + }, + { + "ind_dat7", + NPPU_OAM_CFG_IND_DAT7r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x02c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_ind_dat7_reg, + NULL, + NULL, + }, + { + "ind_dat8", + NPPU_OAM_CFG_IND_DAT8r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x02cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_ind_dat8_reg, + NULL, + NULL, + }, + { + "ind_dat9", + NPPU_OAM_CFG_IND_DAT9r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x02d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_ind_dat9_reg, + NULL, + NULL, + }, + { + "ind_dat10", + NPPU_OAM_CFG_IND_DAT10r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x02d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_ind_dat10_reg, + NULL, + NULL, + }, + { + "ind_dat11", + NPPU_OAM_CFG_IND_DAT11r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x02d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_ind_dat11_reg, + NULL, + NULL, + }, + { + "ind_dat12", + NPPU_OAM_CFG_IND_DAT12r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x02dc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_ind_dat12_reg, + NULL, + NULL, + }, + { + "ind_dat13", + NPPU_OAM_CFG_IND_DAT13r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x02e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_ind_dat13_reg, + NULL, + NULL, + }, + { + "ind_dat14", + NPPU_OAM_CFG_IND_DAT14r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x02e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_ind_dat14_reg, + NULL, + NULL, + }, + { + "ind_dat15", + NPPU_OAM_CFG_IND_DAT15r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x02e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_ind_dat15_reg, + NULL, + NULL, + }, + { + "oam_2544_pkt_fifo_th", + NPPU_OAM_CFG_OAM_2544_PKT_FIFO_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x02ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_oam_2544_pkt_fifo_th_reg, + NULL, + NULL, + }, + { + "txinfo_ram_clr", + NPPU_OAM_CFG_TXINFO_RAM_CLRr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x02f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_txinfo_ram_clr_reg, + NULL, + NULL, + }, + { + "txinfo_ram_init_done", + NPPU_OAM_CFG_TXINFO_RAM_INIT_DONEr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x02f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_txinfo_ram_init_done_reg, + NULL, + NULL, + }, + { + "fifo_status_int_status40", + NPPU_OAM_CFG_FIFO_STATUS_INT_STATUS40r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x02f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_fifo_status_int_status40_reg, + NULL, + NULL, + }, + { + "fifo_status_int_status41", + NPPU_OAM_CFG_FIFO_STATUS_INT_STATUS41r, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x02fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_fifo_status_int_status41_reg, + NULL, + NULL, + }, + { + "oam_2544_fun_en", + NPPU_OAM_CFG_OAM_2544_FUN_ENr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0300, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_oam_2544_fun_en_reg, + NULL, + NULL, + }, + { + "oam_2544_stat_clr", + NPPU_OAM_CFG_OAM_2544_STAT_CLRr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0304, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_oam_2544_stat_clr_reg, + NULL, + NULL, + }, + { + "txdis_default", + NPPU_OAM_CFG_TXDIS_DEFAULTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0308, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_txdis_default_reg, + NULL, + NULL, + }, + { + "txdis_default_en", + NPPU_OAM_CFG_TXDIS_DEFAULT_ENr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x030c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_txdis_default_en_reg, + NULL, + NULL, + }, + { + "tpbfd_firstchk_th", + NPPU_OAM_CFG_TPBFD_FIRSTCHK_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0310, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_tpbfd_firstchk_th_reg, + NULL, + NULL, + }, + { + "ethccm_firstchk_th", + NPPU_OAM_CFG_ETHCCM_FIRSTCHK_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0314, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_ethccm_firstchk_th_reg, + NULL, + NULL, + }, + { + "tpccm_firstchk_th", + NPPU_OAM_CFG_TPCCM_FIRSTCHK_THr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_CFG_BASE_ADDR + 0x0318, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_cfg_tpccm_firstchk_th_reg, + NULL, + NULL, + }, + { + "txstat_req_cnt", + NPPU_OAM_STAT_TXSTAT_REQ_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_txstat_req_cnt_reg, + NULL, + NULL, + }, + { + "chkstat_req_cnt", + NPPU_OAM_STAT_CHKSTAT_REQ_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0004, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_chkstat_req_cnt_reg, + NULL, + NULL, + }, + { + "stat_oam_fc_cnt", + NPPU_OAM_STAT_STAT_OAM_FC_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_stat_oam_fc_cnt_reg, + NULL, + NULL, + }, + { + "bfdseq_req_cnt", + NPPU_OAM_STAT_BFDSEQ_REQ_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x000c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_bfdseq_req_cnt_reg, + NULL, + NULL, + }, + { + "lmcnt_req_cnt", + NPPU_OAM_STAT_LMCNT_REQ_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0010, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_lmcnt_req_cnt_reg, + NULL, + NULL, + }, + { + "stat_oam_lm_rsp_cnt", + NPPU_OAM_STAT_STAT_OAM_LM_RSP_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0014, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_stat_oam_lm_rsp_cnt_reg, + NULL, + NULL, + }, + { + "stat_oam_lm_fc_cnt", + NPPU_OAM_STAT_STAT_OAM_LM_FC_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0018, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_stat_oam_lm_fc_cnt_reg, + NULL, + NULL, + }, + { + "se_req_cnt", + NPPU_OAM_STAT_SE_REQ_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x001c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_se_req_cnt_reg, + NULL, + NULL, + }, + { + "se_rsp_cnt", + NPPU_OAM_STAT_SE_RSP_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0020, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_se_rsp_cnt_reg, + NULL, + NULL, + }, + { + "se_oam_fc_cnt", + NPPU_OAM_STAT_SE_OAM_FC_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0024, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_se_oam_fc_cnt_reg, + NULL, + NULL, + }, + { + "oam_se_fc_cnt", + NPPU_OAM_STAT_OAM_SE_FC_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0028, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_oam_se_fc_cnt_reg, + NULL, + NULL, + }, + { + "oam_pktrx_sop_cnt", + NPPU_OAM_STAT_OAM_PKTRX_SOP_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x002c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_oam_pktrx_sop_cnt_reg, + NULL, + NULL, + }, + { + "oam_pktrx_eop_cnt", + NPPU_OAM_STAT_OAM_PKTRX_EOP_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0030, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_oam_pktrx_eop_cnt_reg, + NULL, + NULL, + }, + { + "pktrx_oam_fc_cnt", + NPPU_OAM_STAT_PKTRX_OAM_FC_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0034, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_pktrx_oam_fc_cnt_reg, + NULL, + NULL, + }, + { + "pktrx_oam_tst_fc_cnt", + NPPU_OAM_STAT_PKTRX_OAM_TST_FC_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0038, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_pktrx_oam_tst_fc_cnt_reg, + NULL, + NULL, + }, + { + "odma_oam_sop_cnt", + NPPU_OAM_STAT_ODMA_OAM_SOP_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x003c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_odma_oam_sop_cnt_reg, + NULL, + NULL, + }, + { + "odma_oam_eop_cnt", + NPPU_OAM_STAT_ODMA_OAM_EOP_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0040, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_odma_oam_eop_cnt_reg, + NULL, + NULL, + }, + { + "oam_odma_fc_cnt", + NPPU_OAM_STAT_OAM_ODMA_FC_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0044, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_oam_odma_fc_cnt_reg, + NULL, + NULL, + }, + { + "rec_ma_pkt_illegal_cnt", + NPPU_OAM_STAT_REC_MA_PKT_ILLEGAL_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0048, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_rec_ma_pkt_illegal_cnt_reg, + NULL, + NULL, + }, + { + "rec_rmep_pkt_illegal_cnt", + NPPU_OAM_STAT_REC_RMEP_PKT_ILLEGAL_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x004c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_rec_rmep_pkt_illegal_cnt_reg, + NULL, + NULL, + }, + { + "rec_eth_ais_pkt_cnt", + NPPU_OAM_STAT_REC_ETH_AIS_PKT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0050, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_rec_eth_ais_pkt_cnt_reg, + NULL, + NULL, + }, + { + "rec_tp_ais_pkt_cnt", + NPPU_OAM_STAT_REC_TP_AIS_PKT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0054, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_rec_tp_ais_pkt_cnt_reg, + NULL, + NULL, + }, + { + "rec_tp_csf_pkt_cnt", + NPPU_OAM_STAT_REC_TP_CSF_PKT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0058, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_rec_tp_csf_pkt_cnt_reg, + NULL, + NULL, + }, + { + "rec_eth_level_defect_cnt", + NPPU_OAM_STAT_REC_ETH_LEVEL_DEFECT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x005c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_rec_eth_level_defect_cnt_reg, + NULL, + NULL, + }, + { + "rec_eth_megid_defect_cnt", + NPPU_OAM_STAT_REC_ETH_MEGID_DEFECT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0060, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_rec_eth_megid_defect_cnt_reg, + NULL, + NULL, + }, + { + "rec_eth_mepid_defect_cnt", + NPPU_OAM_STAT_REC_ETH_MEPID_DEFECT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0064, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_rec_eth_mepid_defect_cnt_reg, + NULL, + NULL, + }, + { + "rec_eth_interval_defect_cnt", + NPPU_OAM_STAT_REC_ETH_INTERVAL_DEFECT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0068, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_rec_eth_interval_defect_cnt_reg, + NULL, + NULL, + }, + { + "rec_sess_unenable_cnt", + NPPU_OAM_STAT_REC_SESS_UNENABLE_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x006c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_rec_sess_unenable_cnt_reg, + NULL, + NULL, + }, + { + "oam_2544_rd_pkt_cnt", + NPPU_OAM_STAT_OAM_2544_RD_PKT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0070, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_oam_2544_rd_pkt_cnt_reg, + NULL, + NULL, + }, + { + "debug_cnt_clr", + NPPU_OAM_STAT_DEBUG_CNT_CLRr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0074, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_debug_cnt_clr_reg, + NULL, + NULL, + }, + { + "oam_pktrx_catch_data", + NPPU_OAM_STAT_OAM_PKTRX_CATCH_DATAr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0078, + (32 / 8), + 0, + 0x1F + 1, + 0, + 4, + 1, + g_nppu_oam_stat_oam_pktrx_catch_data_reg, + NULL, + NULL, + }, + { + "odma_oam_catch_data", + NPPU_OAM_STAT_ODMA_OAM_CATCH_DATAr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x00f8, + (32 / 8), + 0, + 0x1F + 1, + 0, + 4, + 1, + g_nppu_oam_stat_odma_oam_catch_data_reg, + NULL, + NULL, + }, + { + "tst_session_tx_cnt", + NPPU_OAM_STAT_TST_SESSION_TX_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0178, + (32 / 8), + 0, + 0x9 + 1, + 0, + 4, + 1, + g_nppu_oam_stat_tst_session_tx_cnt_reg, + NULL, + NULL, + }, + { + "tst_session_rx_cnt", + NPPU_OAM_STAT_TST_SESSION_RX_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x01a0, + (32 / 8), + 0, + 0x9 + 1, + 0, + 4, + 1, + g_nppu_oam_stat_tst_session_rx_cnt_reg, + NULL, + NULL, + }, + { + "tstrx_lost_cnt", + NPPU_OAM_STAT_TSTRX_LOST_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x01c8, + (32 / 8), + 0, + 0x9 + 1, + 0, + 4, + 1, + g_nppu_oam_stat_tstrx_lost_cnt_reg, + NULL, + NULL, + }, + { + "bfdseq_wr_cnt", + NPPU_OAM_STAT_BFDSEQ_WR_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x01f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_bfdseq_wr_cnt_reg, + NULL, + NULL, + }, + { + "bfdtime_wr_cnt", + NPPU_OAM_STAT_BFDTIME_WR_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x01f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_bfdtime_wr_cnt_reg, + NULL, + NULL, + }, + { + "lmcnt_wr_cnt", + NPPU_OAM_STAT_LMCNT_WR_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x01f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_lmcnt_wr_cnt_reg, + NULL, + NULL, + }, + { + "oam_fs_pkt_cnt", + NPPU_OAM_STAT_OAM_FS_PKT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x01fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_oam_fs_pkt_cnt_reg, + NULL, + NULL, + }, + { + "oam_ma_fs_pkt_cnt", + NPPU_OAM_STAT_OAM_MA_FS_PKT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0200, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_oam_ma_fs_pkt_cnt_reg, + NULL, + NULL, + }, + { + "rec_tp_level_defect_cnt", + NPPU_OAM_STAT_REC_TP_LEVEL_DEFECT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0204, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_rec_tp_level_defect_cnt_reg, + NULL, + NULL, + }, + { + "rec_tp_megid_defect_cnt", + NPPU_OAM_STAT_REC_TP_MEGID_DEFECT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0208, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_rec_tp_megid_defect_cnt_reg, + NULL, + NULL, + }, + { + "rec_tp_mepid_defect_cnt", + NPPU_OAM_STAT_REC_TP_MEPID_DEFECT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x020c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_rec_tp_mepid_defect_cnt_reg, + NULL, + NULL, + }, + { + "rec_tp_interval_defect_cnt", + NPPU_OAM_STAT_REC_TP_INTERVAL_DEFECT_CNTr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0210, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_rec_tp_interval_defect_cnt_reg, + NULL, + NULL, + }, + { + "rd_reg_clear_mode", + NPPU_OAM_STAT_RD_REG_CLEAR_MODEr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0214, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_rd_reg_clear_mode_reg, + NULL, + NULL, + }, + { + "rd_data_reg_clear_mode", + NPPU_OAM_STAT_RD_DATA_REG_CLEAR_MODEr, + NPPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_NPPU_BASE_ADDR + MODULE_NPPU_OAM_STAT_BASE_ADDR + 0x0218, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_nppu_oam_stat_rd_data_reg_clear_mode_reg, + NULL, + NULL, + }, + { + "oam_int_status_ram_0", + NPPU_OAM_CFG_INDIR_OAM_INT_STATUS_RAM_0r, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x00000000, + (32 / 8), + 0, + 4095 + 1, + 0, + 1, + 7, + g_nppu_oam_cfg_indir_oam_int_status_ram_0_reg, + NULL, + NULL, + }, + { + "oam_int_status_ram1", + NPPU_OAM_CFG_INDIR_OAM_INT_STATUS_RAM1r, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x10000000, + (32 / 8), + 0, + 4095 + 1, + 0, + 1, + 12, + g_nppu_oam_cfg_indir_oam_int_status_ram1_reg, + NULL, + NULL, + }, + { + "tst_pkt_tx_para_ram", + NPPU_OAM_CFG_INDIR_TST_PKT_TX_PARA_RAMr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x20000000, + (96 / 8), + 0, + 127 + 1, + 0, + 1, + 12, + g_nppu_oam_cfg_indir_tst_pkt_tx_para_ram_reg, + NULL, + NULL, + }, + { + "groupnumram", + NPPU_OAM_CFG_INDIR_GROUPNUMRAMr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x30000000, + (32 / 8), + 0, + 4095 + 1, + 0, + 1, + 1, + g_nppu_oam_cfg_indir_groupnumram_reg, + NULL, + NULL, + }, + { + "oam_tx_tbl_ram", + NPPU_OAM_CFG_INDIR_OAM_TX_TBL_RAMr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x40000000, + (64 / 8), + 0, + 4095 + 1, + 0, + 1, + 8, + g_nppu_oam_cfg_indir_oam_tx_tbl_ram_reg, + NULL, + NULL, + }, + { + "oam_chk_tbl_ram", + NPPU_OAM_CFG_INDIR_OAM_CHK_TBL_RAMr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x50000000, + (64 / 8), + 0, + 4095 + 1, + 0, + 1, + 9, + g_nppu_oam_cfg_indir_oam_chk_tbl_ram_reg, + NULL, + NULL, + }, + { + "oam_ma_chk_tbl_ram", + NPPU_OAM_CFG_INDIR_OAM_MA_CHK_TBL_RAMr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x60000000, + (160 / 8), + 0, + 4095 + 1, + 0, + 1, + 22, + g_nppu_oam_cfg_indir_oam_ma_chk_tbl_ram_reg, + NULL, + NULL, + }, + { + "oam_2544_tx_ram", + NPPU_OAM_CFG_INDIR_OAM_2544_TX_RAMr, + NPPU, + DPP_REG_FLAG_INDIRECT, + DPP_REG_UNI_ARRAY, + 0x90000000, + (64 / 8), + 0, + 127 + 1, + 0, + 1, + 5, + g_nppu_oam_cfg_indir_oam_2544_tx_ram_reg, + NULL, + NULL, + }, + { + "interrupt_en_r", + PPU_PPU_INTERRUPT_EN_Rr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x004, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_interrupt_en_r_reg, + NULL, + NULL, + }, + { + "mec_host_interrupt", + PPU_PPU_MEC_HOST_INTERRUPTr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_mec_host_interrupt_reg, + NULL, + NULL, + }, + { + "dbg_rtl_date", + PPU_PPU_DBG_RTL_DATEr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x014, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_dbg_rtl_date_reg, + NULL, + NULL, + }, + { + "dup_start_num_cfg", + PPU_PPU_DUP_START_NUM_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x02c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_dup_start_num_cfg_reg, + NULL, + NULL, + }, + { + "debug_data_write_complete", + PPU_PPU_DEBUG_DATA_WRITE_COMPLETEr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x030, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_debug_data_write_complete_reg, + NULL, + NULL, + }, + { + "uc_mc_wrr_cfg", + PPU_PPU_UC_MC_WRR_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x040, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_uc_mc_wrr_cfg_reg, + NULL, + NULL, + }, + { + "debug_pkt_send_en", + PPU_PPU_DEBUG_PKT_SEND_ENr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x044, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_debug_pkt_send_en_reg, + NULL, + NULL, + }, + { + "dup_tbl_ind_access_done", + PPU_PPU_DUP_TBL_IND_ACCESS_DONEr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x04c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_dup_tbl_ind_access_done_reg, + NULL, + NULL, + }, + { + "isu_ppu_demux_fifo_interrupt_mask", + PPU_PPU_ISU_PPU_DEMUX_FIFO_INTERRUPT_MASKr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x080, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_ppu_ppu_isu_ppu_demux_fifo_interrupt_mask_reg, + NULL, + NULL, + }, + { + "ppu_multicast_fifo_interrupt_mask", + PPU_PPU_PPU_MULTICAST_FIFO_INTERRUPT_MASKr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x084, + (32 / 8), + 0, + 0, + 0, + 0, + 20, + g_ppu_ppu_ppu_multicast_fifo_interrupt_mask_reg, + NULL, + NULL, + }, + { + "ppu_in_schedule_fifo_interrupt_mask", + PPU_PPU_PPU_IN_SCHEDULE_FIFO_INTERRUPT_MASKr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x088, + (32 / 8), + 0, + 0, + 0, + 0, + 6, + g_ppu_ppu_ppu_in_schedule_fifo_interrupt_mask_reg, + NULL, + NULL, + }, + { + "ppu_mf_out_fifo_interrupt_mask", + PPU_PPU_PPU_MF_OUT_FIFO_INTERRUPT_MASKr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x08c, + (32 / 8), + 0, + 0, + 0, + 0, + 12, + g_ppu_ppu_ppu_mf_out_fifo_interrupt_mask_reg, + NULL, + NULL, + }, + { + "pbu_mcode_pf_req_schedule_fifo_interrupt_mask", + PPU_PPU_PBU_MCODE_PF_REQ_SCHEDULE_FIFO_INTERRUPT_MASKr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x090, + (32 / 8), + 0, + 0, + 0, + 0, + 12, + g_ppu_ppu_pbu_mcode_pf_req_schedule_fifo_interrupt_mask_reg, + NULL, + NULL, + }, + { + "pbu_mcode_pf_rsp_schedule_fifo_interrupt_mask", + PPU_PPU_PBU_MCODE_PF_RSP_SCHEDULE_FIFO_INTERRUPT_MASKr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x094, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_ppu_ppu_pbu_mcode_pf_rsp_schedule_fifo_interrupt_mask_reg, + NULL, + NULL, + }, + { + "ppu_mccnt_fifo_interrupt_mask", + PPU_PPU_PPU_MCCNT_FIFO_INTERRUPT_MASKr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x098, + (32 / 8), + 0, + 0, + 0, + 0, + 6, + g_ppu_ppu_ppu_mccnt_fifo_interrupt_mask_reg, + NULL, + NULL, + }, + { + "coprocessor_fifo_interrupt_mask_l", + PPU_PPU_COPROCESSOR_FIFO_INTERRUPT_MASK_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x09c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_ppu_ppu_coprocessor_fifo_interrupt_mask_l_reg, + NULL, + NULL, + }, + { + "coprocessor_fifo_interrupt_mask_m", + PPU_PPU_COPROCESSOR_FIFO_INTERRUPT_MASK_Mr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x0a0, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_ppu_ppu_coprocessor_fifo_interrupt_mask_m_reg, + NULL, + NULL, + }, + { + "coprocessor_fifo_interrupt_mask_h", + PPU_PPU_COPROCESSOR_FIFO_INTERRUPT_MASK_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa4, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_ppu_ppu_coprocessor_fifo_interrupt_mask_h_reg, + NULL, + NULL, + }, + { + "ppu_ram_check_err_mask", + PPU_PPU_PPU_RAM_CHECK_ERR_MASKr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x0a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_ram_check_err_mask_reg, + NULL, + NULL, + }, + { + "instrmem_fifo_interrupt_mask", + PPU_PPU_INSTRMEM_FIFO_INTERRUPT_MASKr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x0ac, + (32 / 8), + 0, + 0, + 0, + 0, + 12, + g_ppu_ppu_instrmem_fifo_interrupt_mask_reg, + NULL, + NULL, + }, + { + "isu_ppu_demux_fifo_interrupt_sta", + PPU_PPU_ISU_PPU_DEMUX_FIFO_INTERRUPT_STAr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xc0, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_ppu_ppu_isu_ppu_demux_fifo_interrupt_sta_reg, + NULL, + NULL, + }, + { + "ppu_multicast_fifo_interrupt_sta", + PPU_PPU_PPU_MULTICAST_FIFO_INTERRUPT_STAr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x0c4, + (32 / 8), + 0, + 0, + 0, + 0, + 20, + g_ppu_ppu_ppu_multicast_fifo_interrupt_sta_reg, + NULL, + NULL, + }, + { + "ppu_in_schedule_fifo_interrupt_sta", + PPU_PPU_PPU_IN_SCHEDULE_FIFO_INTERRUPT_STAr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x0c8, + (32 / 8), + 0, + 0, + 0, + 0, + 6, + g_ppu_ppu_ppu_in_schedule_fifo_interrupt_sta_reg, + NULL, + NULL, + }, + { + "ppu_mf_out_fifo_interrupt_sta", + PPU_PPU_PPU_MF_OUT_FIFO_INTERRUPT_STAr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xcc, + (32 / 8), + 0, + 0, + 0, + 0, + 12, + g_ppu_ppu_ppu_mf_out_fifo_interrupt_sta_reg, + NULL, + NULL, + }, + { + "pbu_mcode_pf_req_schedule_fifo_interrupt_sta", + PPU_PPU_PBU_MCODE_PF_REQ_SCHEDULE_FIFO_INTERRUPT_STAr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x0d0, + (32 / 8), + 0, + 0, + 0, + 0, + 12, + g_ppu_ppu_pbu_mcode_pf_req_schedule_fifo_interrupt_sta_reg, + NULL, + NULL, + }, + { + "pbu_mcode_pf_rsp_schedule_fifo_interrupt_sta", + PPU_PPU_PBU_MCODE_PF_RSP_SCHEDULE_FIFO_INTERRUPT_STAr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x0d4, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_ppu_ppu_pbu_mcode_pf_rsp_schedule_fifo_interrupt_sta_reg, + NULL, + NULL, + }, + { + "ppu_mccnt_fifo_interrupt_sta", + PPU_PPU_PPU_MCCNT_FIFO_INTERRUPT_STAr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x0d8, + (32 / 8), + 0, + 0, + 0, + 0, + 6, + g_ppu_ppu_ppu_mccnt_fifo_interrupt_sta_reg, + NULL, + NULL, + }, + { + "coprocessor_fifo_interrupt_sta_l", + PPU_PPU_COPROCESSOR_FIFO_INTERRUPT_STA_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x0dc, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_ppu_ppu_coprocessor_fifo_interrupt_sta_l_reg, + NULL, + NULL, + }, + { + "coprocessor_fifo_interrupt_sta_m", + PPU_PPU_COPROCESSOR_FIFO_INTERRUPT_STA_Mr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x0e0, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_ppu_ppu_coprocessor_fifo_interrupt_sta_m_reg, + NULL, + NULL, + }, + { + "coprocessor_fifo_interrupt_sta_h", + PPU_PPU_COPROCESSOR_FIFO_INTERRUPT_STA_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xe4, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_ppu_ppu_coprocessor_fifo_interrupt_sta_h_reg, + NULL, + NULL, + }, + { + "instrmem_fifo_interrupt_sta", + PPU_PPU_INSTRMEM_FIFO_INTERRUPT_STAr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x0e8, + (32 / 8), + 0, + 0, + 0, + 0, + 8, + g_ppu_ppu_instrmem_fifo_interrupt_sta_reg, + NULL, + NULL, + }, + { + "ppu_ram_check_ecc_err_flag_1", + PPU_PPU_PPU_RAM_CHECK_ECC_ERR_FLAG_1r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x0f0, + (32 / 8), + 0, + 0, + 0, + 0, + 26, + g_ppu_ppu_ppu_ram_check_ecc_err_flag_1_reg, + NULL, + NULL, + }, + { + "isu_ppu_demux_fifo_interrupt_flag", + PPU_PPU_ISU_PPU_DEMUX_FIFO_INTERRUPT_FLAGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x100, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_ppu_ppu_isu_ppu_demux_fifo_interrupt_flag_reg, + NULL, + NULL, + }, + { + "ppu_multicast_fifo_interrupt_flag", + PPU_PPU_PPU_MULTICAST_FIFO_INTERRUPT_FLAGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x104, + (32 / 8), + 0, + 0, + 0, + 0, + 20, + g_ppu_ppu_ppu_multicast_fifo_interrupt_flag_reg, + NULL, + NULL, + }, + { + "ppu_in_schedule_fifo_interrupt_flag", + PPU_PPU_PPU_IN_SCHEDULE_FIFO_INTERRUPT_FLAGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x108, + (32 / 8), + 0, + 0, + 0, + 0, + 6, + g_ppu_ppu_ppu_in_schedule_fifo_interrupt_flag_reg, + NULL, + NULL, + }, + { + "ppu_mf_out_fifo_interrupt_flag", + PPU_PPU_PPU_MF_OUT_FIFO_INTERRUPT_FLAGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x10c, + (32 / 8), + 0, + 0, + 0, + 0, + 12, + g_ppu_ppu_ppu_mf_out_fifo_interrupt_flag_reg, + NULL, + NULL, + }, + { + "pbu_mcode_pf_req_schedule_fifo_interrupt_flag", + PPU_PPU_PBU_MCODE_PF_REQ_SCHEDULE_FIFO_INTERRUPT_FLAGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x110, + (32 / 8), + 0, + 0, + 0, + 0, + 12, + g_ppu_ppu_pbu_mcode_pf_req_schedule_fifo_interrupt_flag_reg, + NULL, + NULL, + }, + { + "pbu_mcode_pf_rsp_schedule_fifo_interrupt_flag", + PPU_PPU_PBU_MCODE_PF_RSP_SCHEDULE_FIFO_INTERRUPT_FLAGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x114, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_ppu_ppu_pbu_mcode_pf_rsp_schedule_fifo_interrupt_flag_reg, + NULL, + NULL, + }, + { + "ppu_mccnt_fifo_interrupt_flag", + PPU_PPU_PPU_MCCNT_FIFO_INTERRUPT_FLAGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x118, + (32 / 8), + 0, + 0, + 0, + 0, + 6, + g_ppu_ppu_ppu_mccnt_fifo_interrupt_flag_reg, + NULL, + NULL, + }, + { + "coprocessor_fifo_interrupt_flag_l", + PPU_PPU_COPROCESSOR_FIFO_INTERRUPT_FLAG_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x11c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_ppu_ppu_coprocessor_fifo_interrupt_flag_l_reg, + NULL, + NULL, + }, + { + "coprocessor_fifo_interrupt_flag_m", + PPU_PPU_COPROCESSOR_FIFO_INTERRUPT_FLAG_Mr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x120, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_ppu_ppu_coprocessor_fifo_interrupt_flag_m_reg, + NULL, + NULL, + }, + { + "coprocessor_fifo_interrupt_flag_h", + PPU_PPU_COPROCESSOR_FIFO_INTERRUPT_FLAG_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x124, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_ppu_ppu_coprocessor_fifo_interrupt_flag_h_reg, + NULL, + NULL, + }, + { + "instrmem_fifo_interrupt_flag", + PPU_PPU_INSTRMEM_FIFO_INTERRUPT_FLAGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x128, + (32 / 8), + 0, + 0, + 0, + 0, + 12, + g_ppu_ppu_instrmem_fifo_interrupt_flag_reg, + NULL, + NULL, + }, + { + "instrmem_ram_int_out", + PPU_PPU_INSTRMEM_RAM_INT_OUTr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x140, + (32 / 8), + 0, + 0, + 0, + 0, + 12, + g_ppu_ppu_instrmem_ram_int_out_reg, + NULL, + NULL, + }, + { + "instrmem_ram_int_mask", + PPU_PPU_INSTRMEM_RAM_INT_MASKr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x144, + (32 / 8), + 0, + 0, + 0, + 0, + 12, + g_ppu_ppu_instrmem_ram_int_mask_reg, + NULL, + NULL, + }, + { + "instrmem_ram_int_stat", + PPU_PPU_INSTRMEM_RAM_INT_STATr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x148, + (32 / 8), + 0, + 0, + 0, + 0, + 12, + g_ppu_ppu_instrmem_ram_int_stat_reg, + NULL, + NULL, + }, + { + "instrmem_ram_int_flag", + PPU_PPU_INSTRMEM_RAM_INT_FLAGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x14c, + (32 / 8), + 0, + 0, + 0, + 0, + 12, + g_ppu_ppu_instrmem_ram_int_flag_reg, + NULL, + NULL, + }, + { + "ppu_count_cfg", + PPU_PPU_PPU_COUNT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x158, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_ppu_ppu_ppu_count_cfg_reg, + NULL, + NULL, + }, + { + "ppu_statics_cfg", + PPU_PPU_PPU_STATICS_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x15c, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_ppu_ppu_ppu_statics_cfg_reg, + NULL, + NULL, + }, + { + "ppu_statics_wb_cfg", + PPU_PPU_PPU_STATICS_WB_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x160, + (32 / 8), + 0, + 0, + 0, + 0, + 6, + g_ppu_ppu_ppu_statics_wb_cfg_reg, + NULL, + NULL, + }, + { + "wr_table_self_rsp_en_cfg", + PPU_PPU_WR_TABLE_SELF_RSP_EN_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x164, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_wr_table_self_rsp_en_cfg_reg, + NULL, + NULL, + }, + { + "ppu_random_arbiter_8to1_cfg", + PPU_PPU_PPU_RANDOM_ARBITER_8TO1_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x168, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_random_arbiter_8to1_cfg_reg, + NULL, + NULL, + }, + { + "ppu_reorder_bypass_flow_num_cfg", + PPU_PPU_PPU_REORDER_BYPASS_FLOW_NUM_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x16c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_reorder_bypass_flow_num_cfg_reg, + NULL, + NULL, + }, + { + "cos_meter_cfg_h", + PPU_PPU_COS_METER_CFG_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x170, + (32 / 8), + 0, + 7 + 1, + 0, + 8, + 5, + g_ppu_ppu_cos_meter_cfg_h_reg, + NULL, + NULL, + }, + { + "cos_meter_cfg_l", + PPU_PPU_COS_METER_CFG_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x174, + (32 / 8), + 0, + 7 + 1, + 0, + 8, + 3, + g_ppu_ppu_cos_meter_cfg_l_reg, + NULL, + NULL, + }, + { + "instrmem_rdy", + PPU_PPU_INSTRMEM_RDYr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x1c0, + (32 / 8), + 0, + 2 + 1, + 0, + 4, + 1, + g_ppu_ppu_instrmem_rdy_reg, + NULL, + NULL, + }, + { + "instrmem_addr", + PPU_PPU_INSTRMEM_ADDRr, + PPU, + DPP_REG_FLAG_WO | DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x1d0, + (32 / 8), + 0, + 2 + 1, + 0, + 4, + 2, + g_ppu_ppu_instrmem_addr_reg, + NULL, + NULL, + }, + { + "instrmem_ind_access_done", + PPU_PPU_INSTRMEM_IND_ACCESS_DONEr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x1e0, + (32 / 8), + 0, + 2 + 1, + 0, + 4, + 1, + g_ppu_ppu_instrmem_ind_access_done_reg, + NULL, + NULL, + }, + { + "instrmem_instr0_data_l", + PPU_PPU_INSTRMEM_INSTR0_DATA_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x1f0, + (32 / 8), + 0, + 2 + 1, + 0, + 4, + 1, + g_ppu_ppu_instrmem_instr0_data_l_reg, + NULL, + NULL, + }, + { + "instrmem_instr0_data_h", + PPU_PPU_INSTRMEM_INSTR0_DATA_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x200, + (32 / 8), + 0, + 2 + 1, + 0, + 4, + 1, + g_ppu_ppu_instrmem_instr0_data_h_reg, + NULL, + NULL, + }, + { + "instrmem_instr1_data_l", + PPU_PPU_INSTRMEM_INSTR1_DATA_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x210, + (32 / 8), + 0, + 2 + 1, + 0, + 4, + 1, + g_ppu_ppu_instrmem_instr1_data_l_reg, + NULL, + NULL, + }, + { + "instrmem_instr1_data_h", + PPU_PPU_INSTRMEM_INSTR1_DATA_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x220, + (32 / 8), + 0, + 2 + 1, + 0, + 4, + 1, + g_ppu_ppu_instrmem_instr1_data_h_reg, + NULL, + NULL, + }, + { + "instrmem_instr2_data_l", + PPU_PPU_INSTRMEM_INSTR2_DATA_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x230, + (32 / 8), + 0, + 2 + 1, + 0, + 4, + 1, + g_ppu_ppu_instrmem_instr2_data_l_reg, + NULL, + NULL, + }, + { + "instrmem_instr2_data_h", + PPU_PPU_INSTRMEM_INSTR2_DATA_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x240, + (32 / 8), + 0, + 2 + 1, + 0, + 4, + 1, + g_ppu_ppu_instrmem_instr2_data_h_reg, + NULL, + NULL, + }, + { + "instrmem_instr3_data_l", + PPU_PPU_INSTRMEM_INSTR3_DATA_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x250, + (32 / 8), + 0, + 2 + 1, + 0, + 4, + 1, + g_ppu_ppu_instrmem_instr3_data_l_reg, + NULL, + NULL, + }, + { + "instrmem_instr3_data_h", + PPU_PPU_INSTRMEM_INSTR3_DATA_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x260, + (32 / 8), + 0, + 2 + 1, + 0, + 4, + 1, + g_ppu_ppu_instrmem_instr3_data_h_reg, + NULL, + NULL, + }, + { + "instrmem_read_instr0_data_l", + PPU_PPU_INSTRMEM_READ_INSTR0_DATA_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x270, + (32 / 8), + 0, + 2 + 1, + 0, + 4, + 1, + g_ppu_ppu_instrmem_read_instr0_data_l_reg, + NULL, + NULL, + }, + { + "instrmem_read_instr0_data_h", + PPU_PPU_INSTRMEM_READ_INSTR0_DATA_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x280, + (32 / 8), + 0, + 2 + 1, + 0, + 4, + 1, + g_ppu_ppu_instrmem_read_instr0_data_h_reg, + NULL, + NULL, + }, + { + "instrmem_read_instr1_data_l", + PPU_PPU_INSTRMEM_READ_INSTR1_DATA_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x290, + (32 / 8), + 0, + 2 + 1, + 0, + 4, + 1, + g_ppu_ppu_instrmem_read_instr1_data_l_reg, + NULL, + NULL, + }, + { + "instrmem_read_instr1_data_h", + PPU_PPU_INSTRMEM_READ_INSTR1_DATA_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x2a0, + (32 / 8), + 0, + 2 + 1, + 0, + 4, + 1, + g_ppu_ppu_instrmem_read_instr1_data_h_reg, + NULL, + NULL, + }, + { + "instrmem_read_instr2_data_l", + PPU_PPU_INSTRMEM_READ_INSTR2_DATA_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x2b0, + (32 / 8), + 0, + 2 + 1, + 0, + 4, + 1, + g_ppu_ppu_instrmem_read_instr2_data_l_reg, + NULL, + NULL, + }, + { + "instrmem_read_instr2_data_h", + PPU_PPU_INSTRMEM_READ_INSTR2_DATA_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x2c0, + (32 / 8), + 0, + 2 + 1, + 0, + 4, + 1, + g_ppu_ppu_instrmem_read_instr2_data_h_reg, + NULL, + NULL, + }, + { + "instrmem_read_instr3_data_l", + PPU_PPU_INSTRMEM_READ_INSTR3_DATA_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x2d0, + (32 / 8), + 0, + 2 + 1, + 0, + 4, + 1, + g_ppu_ppu_instrmem_read_instr3_data_l_reg, + NULL, + NULL, + }, + { + "instrmem_read_instr3_data_h", + PPU_PPU_INSTRMEM_READ_INSTR3_DATA_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x2e0, + (32 / 8), + 0, + 2 + 1, + 0, + 4, + 1, + g_ppu_ppu_instrmem_read_instr3_data_h_reg, + NULL, + NULL, + }, + { + "se_ppu_mc_srh_fc_cnt_h", + PPU_PPU_SE_PPU_MC_SRH_FC_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x300, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_se_ppu_mc_srh_fc_cnt_h_reg, + NULL, + NULL, + }, + { + "se_ppu_mc_srh_fc_cnt_l", + PPU_PPU_SE_PPU_MC_SRH_FC_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x304, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_se_ppu_mc_srh_fc_cnt_l_reg, + NULL, + NULL, + }, + { + "ppu_se_mc_srh_fc_cnt_h", + PPU_PPU_PPU_SE_MC_SRH_FC_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x308, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_se_mc_srh_fc_cnt_h_reg, + NULL, + NULL, + }, + { + "ppu_se_mc_srh_fc_cnt_l", + PPU_PPU_PPU_SE_MC_SRH_FC_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x30c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_se_mc_srh_fc_cnt_l_reg, + NULL, + NULL, + }, + { + "ppu_se_mc_srh_vld_cnt_h", + PPU_PPU_PPU_SE_MC_SRH_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x310, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_se_mc_srh_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "ppu_se_mc_srh_vld_cnt_l", + PPU_PPU_PPU_SE_MC_SRH_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x314, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_se_mc_srh_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "se_ppu_mc_srh_vld_cnt_h", + PPU_PPU_SE_PPU_MC_SRH_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x318, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_se_ppu_mc_srh_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "se_ppu_mc_srh_vld_cnt_l", + PPU_PPU_SE_PPU_MC_SRH_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x31c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_se_ppu_mc_srh_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "pbu_ppu_logic_pf_fc_cnt_h", + PPU_PPU_PBU_PPU_LOGIC_PF_FC_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x320, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pbu_ppu_logic_pf_fc_cnt_h_reg, + NULL, + NULL, + }, + { + "pbu_ppu_logic_pf_fc_cnt_l", + PPU_PPU_PBU_PPU_LOGIC_PF_FC_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x324, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pbu_ppu_logic_pf_fc_cnt_l_reg, + NULL, + NULL, + }, + { + "ppu_pbu_logic_rsp_fc_cnt_h", + PPU_PPU_PPU_PBU_LOGIC_RSP_FC_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x328, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_pbu_logic_rsp_fc_cnt_h_reg, + NULL, + NULL, + }, + { + "ppu_pbu_logic_rsp_fc_cnt_l", + PPU_PPU_PPU_PBU_LOGIC_RSP_FC_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x32c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_pbu_logic_rsp_fc_cnt_l_reg, + NULL, + NULL, + }, + { + "ppu_pbu_logic_pf_req_vld_cnt_h", + PPU_PPU_PPU_PBU_LOGIC_PF_REQ_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x330, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_pbu_logic_pf_req_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "ppu_pbu_logic_pf_req_vld_cnt_l", + PPU_PPU_PPU_PBU_LOGIC_PF_REQ_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x334, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_pbu_logic_pf_req_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "pbu_ppu_logic_pf_rsp_vld_cnt_h", + PPU_PPU_PBU_PPU_LOGIC_PF_RSP_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x338, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pbu_ppu_logic_pf_rsp_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "pbu_ppu_logic_pf_rsp_vld_cnt_l", + PPU_PPU_PBU_PPU_LOGIC_PF_RSP_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x33c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pbu_ppu_logic_pf_rsp_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "pbu_ppu_ifb_rd_fc_cnt_h", + PPU_PPU_PBU_PPU_IFB_RD_FC_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x340, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pbu_ppu_ifb_rd_fc_cnt_h_reg, + NULL, + NULL, + }, + { + "pbu_ppu_ifb_rd_fc_cnt_l", + PPU_PPU_PBU_PPU_IFB_RD_FC_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x344, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pbu_ppu_ifb_rd_fc_cnt_l_reg, + NULL, + NULL, + }, + { + "pbu_ppu_wb_fc_cnt_h", + PPU_PPU_PBU_PPU_WB_FC_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x348, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pbu_ppu_wb_fc_cnt_h_reg, + NULL, + NULL, + }, + { + "pbu_ppu_wb_fc_cnt_l", + PPU_PPU_PBU_PPU_WB_FC_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x34c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pbu_ppu_wb_fc_cnt_l_reg, + NULL, + NULL, + }, + { + "ppu_pbu_mcode_pf_req_vld_cnt_h", + PPU_PPU_PPU_PBU_MCODE_PF_REQ_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x350, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_pbu_mcode_pf_req_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "ppu_pbu_mcode_pf_req_vld_cnt_l", + PPU_PPU_PPU_PBU_MCODE_PF_REQ_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x354, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_pbu_mcode_pf_req_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "pbu_ppu_mcode_pf_rsp_vld_cnt_h", + PPU_PPU_PBU_PPU_MCODE_PF_RSP_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x358, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pbu_ppu_mcode_pf_rsp_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "pbu_ppu_mcode_pf_rsp_vld_cnt_l", + PPU_PPU_PBU_PPU_MCODE_PF_RSP_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x35c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pbu_ppu_mcode_pf_rsp_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "odma_ppu_para_fc_cnt_h", + PPU_PPU_ODMA_PPU_PARA_FC_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x360, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_odma_ppu_para_fc_cnt_h_reg, + NULL, + NULL, + }, + { + "odma_ppu_para_fc_cnt_l", + PPU_PPU_ODMA_PPU_PARA_FC_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x364, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_odma_ppu_para_fc_cnt_l_reg, + NULL, + NULL, + }, + { + "odma_ppu_mccnt_wr_fc_cnt_h", + PPU_PPU_ODMA_PPU_MCCNT_WR_FC_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x368, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_odma_ppu_mccnt_wr_fc_cnt_h_reg, + NULL, + NULL, + }, + { + "odma_ppu_mccnt_wr_fc_cnt_l", + PPU_PPU_ODMA_PPU_MCCNT_WR_FC_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x36c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_odma_ppu_mccnt_wr_fc_cnt_l_reg, + NULL, + NULL, + }, + { + "ppu_odma_mccnt_wr_vld_cnt_h", + PPU_PPU_PPU_ODMA_MCCNT_WR_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x370, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_odma_mccnt_wr_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "ppu_odma_mccnt_wr_vld_cnt_l", + PPU_PPU_PPU_ODMA_MCCNT_WR_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x374, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_odma_mccnt_wr_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "odma_ppu_mccnt_rsp_vld_cnt_h", + PPU_PPU_ODMA_PPU_MCCNT_RSP_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x378, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_odma_ppu_mccnt_rsp_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "odma_ppu_mccnt_rsp_vld_cnt_l", + PPU_PPU_ODMA_PPU_MCCNT_RSP_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x37c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_odma_ppu_mccnt_rsp_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "ppu_pktrx_uc_fc_cnt_h", + PPU_PPU_PPU_PKTRX_UC_FC_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x380, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_pktrx_uc_fc_cnt_h_reg, + NULL, + NULL, + }, + { + "ppu_pktrx_uc_fc_cnt_l", + PPU_PPU_PPU_PKTRX_UC_FC_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x384, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_pktrx_uc_fc_cnt_l_reg, + NULL, + NULL, + }, + { + "ppu_pktrx_mc_fc_cnt_h", + PPU_PPU_PPU_PKTRX_MC_FC_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x388, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_pktrx_mc_fc_cnt_h_reg, + NULL, + NULL, + }, + { + "ppu_pktrx_mc_fc_cnt_l", + PPU_PPU_PPU_PKTRX_MC_FC_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x38c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_pktrx_mc_fc_cnt_l_reg, + NULL, + NULL, + }, + { + "pktrx_ppu_desc_vld_cnt_h", + PPU_PPU_PKTRX_PPU_DESC_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x390, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pktrx_ppu_desc_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "pktrx_ppu_desc_vld_cnt_l", + PPU_PPU_PKTRX_PPU_DESC_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x394, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pktrx_ppu_desc_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "ppu_pbu_ifb_req_vld_cnt_h", + PPU_PPU_PPU_PBU_IFB_REQ_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x398, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_pbu_ifb_req_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "ppu_pbu_ifb_req_vld_cnt_l", + PPU_PPU_PPU_PBU_IFB_REQ_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x39c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_pbu_ifb_req_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "pbu_ppu_ifb_rsp_vld_cnt_h", + PPU_PPU_PBU_PPU_IFB_RSP_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x3a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pbu_ppu_ifb_rsp_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "pbu_ppu_ifb_rsp_vld_cnt_l", + PPU_PPU_PBU_PPU_IFB_RSP_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x3a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pbu_ppu_ifb_rsp_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "ppu_pbu_wb_vld_cnt_h", + PPU_PPU_PPU_PBU_WB_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x3a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_pbu_wb_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "ppu_pbu_wb_vld_cnt_l", + PPU_PPU_PPU_PBU_WB_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x3ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_pbu_wb_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "pbu_ppu_reorder_para_vld_cnt_h", + PPU_PPU_PBU_PPU_REORDER_PARA_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x3b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pbu_ppu_reorder_para_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "pbu_ppu_reorder_para_vld_cnt_l", + PPU_PPU_PBU_PPU_REORDER_PARA_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x3b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pbu_ppu_reorder_para_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "ppu_odma_para_vld_cnt_h", + PPU_PPU_PPU_ODMA_PARA_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x3b8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_odma_para_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "ppu_odma_para_vld_cnt_l", + PPU_PPU_PPU_ODMA_PARA_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x3bc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_odma_para_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "statics_isu_ppu_mc_vld_cnt_h", + PPU_PPU_STATICS_ISU_PPU_MC_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x400, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_isu_ppu_mc_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "statics_isu_ppu_mc_vld_cnt_l", + PPU_PPU_STATICS_ISU_PPU_MC_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x404, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_isu_ppu_mc_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "statics_isu_ppu_mc_loop_vld_cnt_h", + PPU_PPU_STATICS_ISU_PPU_MC_LOOP_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x410, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_isu_ppu_mc_loop_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "statics_isu_ppu_mc_loop_vld_cnt_l", + PPU_PPU_STATICS_ISU_PPU_MC_LOOP_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x414, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_isu_ppu_mc_loop_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "statics_isu_ppu_uc_vld_cnt_h", + PPU_PPU_STATICS_ISU_PPU_UC_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x418, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_isu_ppu_uc_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "statics_isu_ppu_uc_vld_cnt_l", + PPU_PPU_STATICS_ISU_PPU_UC_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x41c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_isu_ppu_uc_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "statics_isu_ppu_uc_bufnumis0_vld_cnt_h", + PPU_PPU_STATICS_ISU_PPU_UC_BUFNUMIS0_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x420, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_isu_ppu_uc_bufnumis0_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "statics_isu_ppu_uc_bufnumis0_vld_cnt_l", + PPU_PPU_STATICS_ISU_PPU_UC_BUFNUMIS0_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x424, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_isu_ppu_uc_bufnumis0_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "statics_demux_schedule_mc_vld_cnt_h", + PPU_PPU_STATICS_DEMUX_SCHEDULE_MC_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x438, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_demux_schedule_mc_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "statics_demux_schedule_mc_vld_cnt_l", + PPU_PPU_STATICS_DEMUX_SCHEDULE_MC_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x43c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_demux_schedule_mc_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "statics_demux_schedule_mc_bufnumis0_vld_cnt_h", + PPU_PPU_STATICS_DEMUX_SCHEDULE_MC_BUFNUMIS0_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x440, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_demux_schedule_mc_bufnumis0_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "statics_demux_schedule_mc_bufnumis0_vld_cnt_l", + PPU_PPU_STATICS_DEMUX_SCHEDULE_MC_BUFNUMIS0_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x444, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_demux_schedule_mc_bufnumis0_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "statics_demux_schedule_mc_srcportis0_vld_cnt_h", + PPU_PPU_STATICS_DEMUX_SCHEDULE_MC_SRCPORTIS0_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x448, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_demux_schedule_mc_srcportis0_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "statics_demux_schedule_mc_srcportis0_vld_cnt_l", + PPU_PPU_STATICS_DEMUX_SCHEDULE_MC_SRCPORTIS0_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x44c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_demux_schedule_mc_srcportis0_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "statics_demux_schedule_mc_srcportis1_vld_cnt_h", + PPU_PPU_STATICS_DEMUX_SCHEDULE_MC_SRCPORTIS1_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x450, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_demux_schedule_mc_srcportis1_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "statics_demux_schedule_mc_srcportis1_vld_cnt_l", + PPU_PPU_STATICS_DEMUX_SCHEDULE_MC_SRCPORTIS1_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x454, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_demux_schedule_mc_srcportis1_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "statics_demux_schedule_uc_vld_cnt_h", + PPU_PPU_STATICS_DEMUX_SCHEDULE_UC_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x458, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_demux_schedule_uc_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "statics_demux_schedule_uc_vld_cnt_l", + PPU_PPU_STATICS_DEMUX_SCHEDULE_UC_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x45c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_demux_schedule_uc_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "statics_demux_schedule_uc_bufnumis0_vld_cnt_h", + PPU_PPU_STATICS_DEMUX_SCHEDULE_UC_BUFNUMIS0_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x460, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_demux_schedule_uc_bufnumis0_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "statics_demux_schedule_uc_bufnumis0_vld_cnt_l", + PPU_PPU_STATICS_DEMUX_SCHEDULE_UC_BUFNUMIS0_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x464, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_demux_schedule_uc_bufnumis0_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "statics_demux_schedule_uc_srcportis0_vld_cnt_h", + PPU_PPU_STATICS_DEMUX_SCHEDULE_UC_SRCPORTIS0_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x468, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_demux_schedule_uc_srcportis0_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "statics_demux_schedule_uc_srcportis0_vld_cnt_l", + PPU_PPU_STATICS_DEMUX_SCHEDULE_UC_SRCPORTIS0_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x46c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_demux_schedule_uc_srcportis0_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "statics_demux_schedule_uc_srcportis1_vld_cnt_h", + PPU_PPU_STATICS_DEMUX_SCHEDULE_UC_SRCPORTIS1_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x470, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_demux_schedule_uc_srcportis1_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "statics_demux_schedule_uc_srcportis1_vld_cnt_l", + PPU_PPU_STATICS_DEMUX_SCHEDULE_UC_SRCPORTIS1_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x474, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_demux_schedule_uc_srcportis1_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "statics_ppu_wb_vld_cnt_h", + PPU_PPU_STATICS_PPU_WB_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x478, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_ppu_wb_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "statics_ppu_wb_vld_cnt_l", + PPU_PPU_STATICS_PPU_WB_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x47c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_ppu_wb_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "statics_ppu_wb_bufnumis0_vld_cnt_h", + PPU_PPU_STATICS_PPU_WB_BUFNUMIS0_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x480, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_ppu_wb_bufnumis0_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "statics_ppu_wb_bufnumis0_vld_cnt_l", + PPU_PPU_STATICS_PPU_WB_BUFNUMIS0_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x484, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_ppu_wb_bufnumis0_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "statics_ppu_wb_srcportis0_vld_cnt_h", + PPU_PPU_STATICS_PPU_WB_SRCPORTIS0_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x488, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_ppu_wb_srcportis0_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "statics_ppu_wb_srcportis0_vld_cnt_l", + PPU_PPU_STATICS_PPU_WB_SRCPORTIS0_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x48c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_ppu_wb_srcportis0_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "statics_ppu_wb_srcportis1_vld_cnt_h", + PPU_PPU_STATICS_PPU_WB_SRCPORTIS1_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x490, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_ppu_wb_srcportis1_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "statics_ppu_wb_srcportis1_vld_cnt_l", + PPU_PPU_STATICS_PPU_WB_SRCPORTIS1_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x494, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_ppu_wb_srcportis1_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "statics_ppu_wb_halt_send_type_vld_cnt_h", + PPU_PPU_STATICS_PPU_WB_HALT_SEND_TYPE_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x498, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_ppu_wb_halt_send_type_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "statics_ppu_wb_halt_send_type_vld_cnt_l", + PPU_PPU_STATICS_PPU_WB_HALT_SEND_TYPE_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x49c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_ppu_wb_halt_send_type_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "statics_ppu_wb_mf_type_vld_cnt_h", + PPU_PPU_STATICS_PPU_WB_MF_TYPE_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x4a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_ppu_wb_mf_type_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "statics_ppu_wb_mf_type_vld_cnt_l", + PPU_PPU_STATICS_PPU_WB_MF_TYPE_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x4a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_ppu_wb_mf_type_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "statics_ppu_wb_halt_continue_end_vld_cnt_h", + PPU_PPU_STATICS_PPU_WB_HALT_CONTINUE_END_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x4a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_ppu_wb_halt_continue_end_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "statics_ppu_wb_halt_continue_end_vld_cnt_l", + PPU_PPU_STATICS_PPU_WB_HALT_CONTINUE_END_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x4ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_ppu_wb_halt_continue_end_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "statics_ppu_wb_dup_flag_vld_cnt_h", + PPU_PPU_STATICS_PPU_WB_DUP_FLAG_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x4b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_ppu_wb_dup_flag_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "statics_ppu_wb_dup_flag_vld_cnt_l", + PPU_PPU_STATICS_PPU_WB_DUP_FLAG_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x4b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_ppu_wb_dup_flag_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "statics_ppu_wb_last_flag_vld_cnt_h", + PPU_PPU_STATICS_PPU_WB_LAST_FLAG_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x4b8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_ppu_wb_last_flag_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "statics_ppu_wb_last_flag_vld_cnt_l", + PPU_PPU_STATICS_PPU_WB_LAST_FLAG_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x4bc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_ppu_wb_last_flag_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "statics_ppu_wb_dis_flag_vld_cnt_h", + PPU_PPU_STATICS_PPU_WB_DIS_FLAG_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x4c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_ppu_wb_dis_flag_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "statics_ppu_wb_dis_flag_vld_cnt_l", + PPU_PPU_STATICS_PPU_WB_DIS_FLAG_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x4c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_ppu_wb_dis_flag_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "statics_pbu_ppu_reorder_halt_send_type_vld_cnt_h", + PPU_PPU_STATICS_PBU_PPU_REORDER_HALT_SEND_TYPE_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x4c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_pbu_ppu_reorder_halt_send_type_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "statics_pbu_ppu_reorder_halt_send_type_vld_cnt_l", + PPU_PPU_STATICS_PBU_PPU_REORDER_HALT_SEND_TYPE_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x4c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_pbu_ppu_reorder_halt_send_type_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "statics_pbu_ppu_reorder_mf_type_vld_cnt_h", + PPU_PPU_STATICS_PBU_PPU_REORDER_MF_TYPE_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x4d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_pbu_ppu_reorder_mf_type_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "statics_pbu_ppu_reorder_mf_type_vld_cnt_l", + PPU_PPU_STATICS_PBU_PPU_REORDER_MF_TYPE_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x4d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_pbu_ppu_reorder_mf_type_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "statics_pbu_ppu_reorder_halt_continue_end_vld_cnt_h", + PPU_PPU_STATICS_PBU_PPU_REORDER_HALT_CONTINUE_END_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x4d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_pbu_ppu_reorder_halt_continue_end_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "statics_pbu_ppu_reorder_halt_continue_end_vld_cnt_l", + PPU_PPU_STATICS_PBU_PPU_REORDER_HALT_CONTINUE_END_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x4dc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_statics_pbu_ppu_reorder_halt_continue_end_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "car_green_pkt_vld_cnt_h", + PPU_PPU_CAR_GREEN_PKT_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x4e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_car_green_pkt_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "car_green_pkt_vld_cnt_l", + PPU_PPU_CAR_GREEN_PKT_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x4e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_car_green_pkt_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "car_yellow_pkt_vld_cnt_h", + PPU_PPU_CAR_YELLOW_PKT_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x4e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_car_yellow_pkt_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "car_yellow_pkt_vld_cnt_l", + PPU_PPU_CAR_YELLOW_PKT_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x4ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_car_yellow_pkt_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "car_red_pkt_vld_cnt_h", + PPU_PPU_CAR_RED_PKT_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x4f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_car_red_pkt_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "car_red_pkt_vld_cnt_l", + PPU_PPU_CAR_RED_PKT_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x4f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_car_red_pkt_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "car_drop_pkt_vld_cnt_h", + PPU_PPU_CAR_DROP_PKT_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x4f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_car_drop_pkt_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "car_drop_pkt_vld_cnt_l", + PPU_PPU_CAR_DROP_PKT_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x4fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_car_drop_pkt_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "ppu_pktrx_mc_ptr_vld_cnt_h", + PPU_PPU_PPU_PKTRX_MC_PTR_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x500, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_pktrx_mc_ptr_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "ppu_pktrx_mc_ptr_vld_cnt_l", + PPU_PPU_PPU_PKTRX_MC_PTR_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x504, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_pktrx_mc_ptr_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "isu_ppu_loopback_fc_cnt_h", + PPU_PPU_ISU_PPU_LOOPBACK_FC_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x508, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_isu_ppu_loopback_fc_cnt_h_reg, + NULL, + NULL, + }, + { + "isu_ppu_loopback_fc_cnt_l", + PPU_PPU_ISU_PPU_LOOPBACK_FC_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x50c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_isu_ppu_loopback_fc_cnt_l_reg, + NULL, + NULL, + }, + { + "ppu_culster_pbu_mcode_pf_req_prog_full_assert_cfg", + PPU_PPU_PPU_CULSTER_PBU_MCODE_PF_REQ_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x540, + (32 / 8), + 0, + 0x5 + 1, + 0, + 4, + 1, + g_ppu_ppu_ppu_culster_pbu_mcode_pf_req_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_culster_pbu_mcode_pf_req_prog_full_negate_cfg", + PPU_PPU_PPU_CULSTER_PBU_MCODE_PF_REQ_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x558, + (32 / 8), + 0, + 0x5 + 1, + 0, + 4, + 1, + g_ppu_ppu_ppu_culster_pbu_mcode_pf_req_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_culster_pbu_mcode_pf_req_prog_empty_assert_cfg", + PPU_PPU_PPU_CULSTER_PBU_MCODE_PF_REQ_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x570, + (32 / 8), + 0, + 0x5 + 1, + 0, + 4, + 1, + g_ppu_ppu_ppu_culster_pbu_mcode_pf_req_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_culster_pbu_mcode_pf_req_prog_empty_negate_cfg", + PPU_PPU_PPU_CULSTER_PBU_MCODE_PF_REQ_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x588, + (32 / 8), + 0, + 0x5 + 1, + 0, + 4, + 1, + g_ppu_ppu_ppu_culster_pbu_mcode_pf_req_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_pbu_mcode_pf_rsp_prog_full_assert_cfg", + PPU_PPU_PPU_PBU_MCODE_PF_RSP_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x5a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_pbu_mcode_pf_rsp_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_pbu_mcode_pf_rsp_prog_full_negate_cfg", + PPU_PPU_PPU_PBU_MCODE_PF_RSP_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x5a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_pbu_mcode_pf_rsp_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_pbu_mcode_pf_rsp_prog_empty_assert_cfg", + PPU_PPU_PPU_PBU_MCODE_PF_RSP_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x5a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_pbu_mcode_pf_rsp_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_pbu_mcode_pf_rsp_prog_empty_negate_cfg", + PPU_PPU_PPU_PBU_MCODE_PF_RSP_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x5ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_pbu_mcode_pf_rsp_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "mccnt_fifo_prog_full_assert_cfg", + PPU_PPU_MCCNT_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x5b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_mccnt_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "mccnt_fifo_prog_full_negate_cfg", + PPU_PPU_MCCNT_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x5b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_mccnt_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "mccnt_fifo_prog_empty_assert_cfg", + PPU_PPU_MCCNT_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x5b8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_mccnt_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "mccnt_fifo_prog_empty_negate_cfg", + PPU_PPU_MCCNT_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x5bc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_mccnt_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "uc_mf_fifo_prog_full_assert_cfg", + PPU_PPU_UC_MF_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x5c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_uc_mf_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "uc_mf_fifo_prog_full_negate_cfg", + PPU_PPU_UC_MF_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x5c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_uc_mf_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "uc_mf_fifo_prog_empty_assert_cfg", + PPU_PPU_UC_MF_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x5c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_uc_mf_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "uc_mf_fifo_prog_empty_negate_cfg", + PPU_PPU_UC_MF_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x5cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_uc_mf_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "mc_mf_fifo_prog_full_assert_cfg", + PPU_PPU_MC_MF_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x5d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_mc_mf_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "mc_mf_fifo_prog_full_negate_cfg", + PPU_PPU_MC_MF_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x5d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_mc_mf_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "mc_mf_fifo_prog_empty_assert_cfg", + PPU_PPU_MC_MF_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x5d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_mc_mf_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "mc_mf_fifo_prog_empty_negate_cfg", + PPU_PPU_MC_MF_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x5dc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_mc_mf_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "isu_mf_fifo_prog_full_assert_cfg", + PPU_PPU_ISU_MF_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x5e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_isu_mf_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "isu_mf_fifo_prog_full_negate_cfg", + PPU_PPU_ISU_MF_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x5e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_isu_mf_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "isu_mf_fifo_prog_empty_assert_cfg", + PPU_PPU_ISU_MF_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x5e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_isu_mf_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "isu_mf_fifo_prog_empty_negate_cfg", + PPU_PPU_ISU_MF_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x5ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_isu_mf_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "isu_fwft_mf_fifo_prog_empty_assert_cfg", + PPU_PPU_ISU_FWFT_MF_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x5f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_isu_fwft_mf_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "isu_fwft_mf_fifo_prog_empty_negate_cfg", + PPU_PPU_ISU_FWFT_MF_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x5fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_isu_fwft_mf_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "isu_mc_para_mf_fifo_prog_full_assert_cfg", + PPU_PPU_ISU_MC_PARA_MF_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x600, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_isu_mc_para_mf_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "isu_mc_para_mf_fifo_prog_full_negate_cfg", + PPU_PPU_ISU_MC_PARA_MF_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x604, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_isu_mc_para_mf_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "isu_mc_para_mf_fifo_prog_empty_assert_cfg", + PPU_PPU_ISU_MC_PARA_MF_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x608, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_isu_mc_para_mf_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "isu_mc_para_mf_fifo_prog_empty_negate_cfg", + PPU_PPU_ISU_MC_PARA_MF_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x60c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_isu_mc_para_mf_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "group_id_fifo_prog_full_assert_cfg", + PPU_PPU_GROUP_ID_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x610, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_group_id_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "group_id_fifo_prog_full_negate_cfg", + PPU_PPU_GROUP_ID_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x614, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_group_id_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "group_id_fifo_prog_empty_assert_cfg", + PPU_PPU_GROUP_ID_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x618, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_group_id_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "group_id_fifo_prog_empty_negate_cfg", + PPU_PPU_GROUP_ID_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x61c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_group_id_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "sa_para_fifo_prog_full_assert_cfg", + PPU_PPU_SA_PARA_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x620, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_sa_para_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "sa_para_fifo_prog_full_negate_cfg", + PPU_PPU_SA_PARA_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x624, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_sa_para_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "sa_para_fifo_prog_empty_assert_cfg", + PPU_PPU_SA_PARA_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x628, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_sa_para_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "sa_para_fifo_prog_empty_negate_cfg", + PPU_PPU_SA_PARA_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x62c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_sa_para_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "se_mc_rsp_fifo_prog_full_assert_cfg", + PPU_PPU_SE_MC_RSP_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x630, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_se_mc_rsp_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "se_mc_rsp_fifo_prog_full_negate_cfg", + PPU_PPU_SE_MC_RSP_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x634, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_se_mc_rsp_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "se_mc_rsp_fifo_prog_empty_assert_cfg", + PPU_PPU_SE_MC_RSP_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x638, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_se_mc_rsp_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "se_mc_rsp_fifo_prog_empty_negate_cfg", + PPU_PPU_SE_MC_RSP_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x63c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_se_mc_rsp_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "dup_para_fifo_prog_full_assert_cfg", + PPU_PPU_DUP_PARA_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x640, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_dup_para_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "dup_para_fifo_prog_full_negate_cfg", + PPU_PPU_DUP_PARA_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x644, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_dup_para_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "dup_para_fifo_prog_empty_assert_cfg", + PPU_PPU_DUP_PARA_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x648, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_dup_para_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "dup_para_fifo_prog_empty_negate_cfg", + PPU_PPU_DUP_PARA_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x64c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_dup_para_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "pf_rsp_fifo_prog_full_assert_cfg", + PPU_PPU_PF_RSP_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x650, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pf_rsp_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "pf_rsp_fifo_prog_full_negate_cfg", + PPU_PPU_PF_RSP_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x654, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pf_rsp_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "pf_rsp_fifo_prog_empty_assert_cfg", + PPU_PPU_PF_RSP_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x658, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pf_rsp_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "pf_rsp_fifo_prog_empty_negate_cfg", + PPU_PPU_PF_RSP_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x65c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pf_rsp_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "dup_freeptr_fifo_prog_full_assert_cfg", + PPU_PPU_DUP_FREEPTR_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x660, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_dup_freeptr_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "dup_freeptr_fifo_prog_full_negate_cfg", + PPU_PPU_DUP_FREEPTR_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x664, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_dup_freeptr_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "dup_freeptr_fifo_prog_empty_assert_cfg", + PPU_PPU_DUP_FREEPTR_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x668, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_dup_freeptr_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "dup_freeptr_fifo_prog_empty_negate_cfg", + PPU_PPU_DUP_FREEPTR_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x66c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_dup_freeptr_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "pf_req_fifo_prog_full_assert_cfg", + PPU_PPU_PF_REQ_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x670, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pf_req_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "pf_req_fifo_prog_full_negate_cfg", + PPU_PPU_PF_REQ_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x674, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pf_req_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "pf_req_fifo_prog_empty_assert_cfg", + PPU_PPU_PF_REQ_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x678, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pf_req_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "pf_req_fifo_prog_empty_negate_cfg", + PPU_PPU_PF_REQ_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x67c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pf_req_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "car_flag_fifo_prog_full_assert_cfg", + PPU_PPU_CAR_FLAG_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x680, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_car_flag_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "car_flag_fifo_prog_full_negate_cfg", + PPU_PPU_CAR_FLAG_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x684, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_car_flag_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "car_flag_fifo_prog_empty_assert_cfg", + PPU_PPU_CAR_FLAG_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x688, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_car_flag_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "car_flag_fifo_prog_empty_negate_cfg", + PPU_PPU_CAR_FLAG_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x68c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_car_flag_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cluster_mf_out_afifo_prog_full_assert_cfg", + PPU_PPU_PPU_CLUSTER_MF_OUT_AFIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x690, + (32 / 8), + 0, + 0x5 + 1, + 0, + 16, + 1, + g_ppu_ppu_ppu_cluster_mf_out_afifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cluster_mf_out_afifo_prog_full_negate_cfg", + PPU_PPU_PPU_CLUSTER_MF_OUT_AFIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x694, + (32 / 8), + 0, + 0x5 + 1, + 0, + 16, + 1, + g_ppu_ppu_ppu_cluster_mf_out_afifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cluster_mf_out_afifo_prog_empty_assert_cfg", + PPU_PPU_PPU_CLUSTER_MF_OUT_AFIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x698, + (32 / 8), + 0, + 0x5 + 1, + 0, + 16, + 1, + g_ppu_ppu_ppu_cluster_mf_out_afifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cluster_mf_out_afifo_prog_empty_negate_cfg", + PPU_PPU_PPU_CLUSTER_MF_OUT_AFIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x69c, + (32 / 8), + 0, + 0x5 + 1, + 0, + 16, + 1, + g_ppu_ppu_ppu_cluster_mf_out_afifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_key_fifo_prog_full_assert_cfg", + PPU_PPU_PPU_COP_KEY_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x6f0, + (32 / 8), + 0, + 0x5 + 1, + 0, + 16, + 1, + g_ppu_ppu_ppu_cop_key_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_key_fifo_prog_full_negate_cfg", + PPU_PPU_PPU_COP_KEY_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x6f4, + (32 / 8), + 0, + 0x5 + 1, + 0, + 16, + 1, + g_ppu_ppu_ppu_cop_key_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_key_fifo_prog_empty_assert_cfg", + PPU_PPU_PPU_COP_KEY_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x6f8, + (32 / 8), + 0, + 0x5 + 1, + 0, + 16, + 1, + g_ppu_ppu_ppu_cop_key_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_key_fifo_prog_empty_negate_cfg", + PPU_PPU_PPU_COP_KEY_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x6fc, + (32 / 8), + 0, + 0x5 + 1, + 0, + 16, + 1, + g_ppu_ppu_ppu_cop_key_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_random_mod_para_fifo_prog_full_assert_cfg", + PPU_PPU_PPU_COP_RANDOM_MOD_PARA_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x750, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_random_mod_para_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_random_mod_para_fifo_prog_full_negate_cfg", + PPU_PPU_PPU_COP_RANDOM_MOD_PARA_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x754, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_random_mod_para_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_random_mod_para_fifo_prog_empty_assert_cfg", + PPU_PPU_PPU_COP_RANDOM_MOD_PARA_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x758, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_random_mod_para_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_random_mod_para_fifo_prog_empty_negate_cfg", + PPU_PPU_PPU_COP_RANDOM_MOD_PARA_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x75c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_random_mod_para_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_random_mod_result_fifo_prog_full_assert_cfg", + PPU_PPU_PPU_COP_RANDOM_MOD_RESULT_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x760, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_random_mod_result_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_random_mod_result_fifo_prog_full_negate_cfg", + PPU_PPU_PPU_COP_RANDOM_MOD_RESULT_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x764, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_random_mod_result_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_random_mod_result_fifo_prog_empty_assert_cfg", + PPU_PPU_PPU_COP_RANDOM_MOD_RESULT_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x768, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_random_mod_result_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_random_mod_result_fifo_prog_empty_negate_cfg", + PPU_PPU_PPU_COP_RANDOM_MOD_RESULT_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x76c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_random_mod_result_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_checksum_result_fifo_prog_full_assert_cfg", + PPU_PPU_PPU_COP_CHECKSUM_RESULT_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x770, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_checksum_result_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_checksum_result_fifo_prog_full_negate_cfg", + PPU_PPU_PPU_COP_CHECKSUM_RESULT_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x774, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_checksum_result_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_checksum_result_fifo_prog_empty_assert_cfg", + PPU_PPU_PPU_COP_CHECKSUM_RESULT_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x778, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_checksum_result_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_checksum_result_fifo_prog_empty_negate_cfg", + PPU_PPU_PPU_COP_CHECKSUM_RESULT_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x77c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_checksum_result_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_crc_first_para_fifo_prog_full_assert_cfg", + PPU_PPU_PPU_COP_CRC_FIRST_PARA_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x780, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_crc_first_para_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_crc_first_para_fifo_prog_full_negate_cfg", + PPU_PPU_PPU_COP_CRC_FIRST_PARA_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x784, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_crc_first_para_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_crc_first_para_fifo_prog_empty_assert_cfg", + PPU_PPU_PPU_COP_CRC_FIRST_PARA_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x788, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_crc_first_para_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_crc_first_para_fifo_prog_empty_negate_cfg", + PPU_PPU_PPU_COP_CRC_FIRST_PARA_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x78c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_crc_first_para_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_crc_bypass_delay_prog_full_assert_cfg", + PPU_PPU_PPU_COP_CRC_BYPASS_DELAY_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x790, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_crc_bypass_delay_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_crc_bypass_delay_prog_full_negate_cfg", + PPU_PPU_PPU_COP_CRC_BYPASS_DELAY_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x794, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_crc_bypass_delay_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_crc_bypass_delay_prog_empty_assert_cfg", + PPU_PPU_PPU_COP_CRC_BYPASS_DELAY_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x798, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_crc_bypass_delay_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_crc_bypass_delay_prog_empty_negate_cfg", + PPU_PPU_PPU_COP_CRC_BYPASS_DELAY_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x79c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_crc_bypass_delay_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_crc_second_para_fifo_prog_full_assert_cfg", + PPU_PPU_PPU_COP_CRC_SECOND_PARA_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x7a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_crc_second_para_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_crc_second_para_fifo_prog_full_negate_cfg", + PPU_PPU_PPU_COP_CRC_SECOND_PARA_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x7a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_crc_second_para_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_crc_second_para_fifo_prog_empty_assert_cfg", + PPU_PPU_PPU_COP_CRC_SECOND_PARA_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x7a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_crc_second_para_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_crc_second_para_fifo_prog_empty_negate_cfg", + PPU_PPU_PPU_COP_CRC_SECOND_PARA_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x7ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_crc_second_para_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_crc_result_fwft_fifo_prog_full_assert_cfg", + PPU_PPU_PPU_COP_CRC_RESULT_FWFT_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x7b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_crc_result_fwft_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_crc_result_fwft_fifo_prog_full_negate_cfg", + PPU_PPU_PPU_COP_CRC_RESULT_FWFT_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x7b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_crc_result_fwft_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_crc_result_fwft_fifo_prog_empty_assert_cfg", + PPU_PPU_PPU_COP_CRC_RESULT_FWFT_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x7b8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_crc_result_fwft_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_crc_result_fwft_fifo_prog_empty_negate_cfg", + PPU_PPU_PPU_COP_CRC_RESULT_FWFT_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x7bc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_crc_result_fwft_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_multiply_para_fifo_prog_full_assert_cfg", + PPU_PPU_PPU_COP_MULTIPLY_PARA_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x7c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_multiply_para_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_multiply_para_fifo_prog_full_negate_cfg", + PPU_PPU_PPU_COP_MULTIPLY_PARA_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x7c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_multiply_para_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_multiply_para_fifo_prog_empty_assert_cfg", + PPU_PPU_PPU_COP_MULTIPLY_PARA_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x7c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_multiply_para_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_multiply_para_fifo_prog_empty_negate_cfg", + PPU_PPU_PPU_COP_MULTIPLY_PARA_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x7cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_multiply_para_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_multiply_para_result_fifo_prog_full_assert_cfg", + PPU_PPU_PPU_COP_MULTIPLY_PARA_RESULT_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x7d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_multiply_para_result_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_multiply_para_result_fifo_prog_full_negate_cfg", + PPU_PPU_PPU_COP_MULTIPLY_PARA_RESULT_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x7d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_multiply_para_result_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_multiply_para_result_fifo_prog_empty_assert_cfg", + PPU_PPU_PPU_COP_MULTIPLY_PARA_RESULT_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x7d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_multiply_para_result_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_multiply_para_result_fifo_prog_empty_negate_cfg", + PPU_PPU_PPU_COP_MULTIPLY_PARA_RESULT_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x7dc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_cop_multiply_para_result_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "free_global_num_fwft_fifo_prog_full_assert_cfg", + PPU_PPU_FREE_GLOBAL_NUM_FWFT_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x7e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_free_global_num_fwft_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "free_global_num_fwft_fifo_prog_full_negate_cfg", + PPU_PPU_FREE_GLOBAL_NUM_FWFT_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x7e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_free_global_num_fwft_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "free_global_num_fwft_fifo_prog_empty_assert_cfg", + PPU_PPU_FREE_GLOBAL_NUM_FWFT_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x7e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_free_global_num_fwft_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "free_global_num_fwft_fifo_prog_empty_negate_cfg", + PPU_PPU_FREE_GLOBAL_NUM_FWFT_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x7ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_free_global_num_fwft_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_pktrx_mc_ptr_fifo_prog_full_assert_cfg", + PPU_PPU_PPU_PKTRX_MC_PTR_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x7f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_pktrx_mc_ptr_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_pktrx_mc_ptr_fifo_prog_full_negate_cfg", + PPU_PPU_PPU_PKTRX_MC_PTR_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x7f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_pktrx_mc_ptr_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_pktrx_mc_ptr_fifo_prog_empty_assert_cfg", + PPU_PPU_PPU_PKTRX_MC_PTR_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x7f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_pktrx_mc_ptr_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_pktrx_mc_ptr_fifo_prog_empty_negate_cfg", + PPU_PPU_PPU_PKTRX_MC_PTR_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x7fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_ppu_pktrx_mc_ptr_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "pkt_data0", + PPU_PPU_PKT_DATA0r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x800, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data0_reg, + NULL, + NULL, + }, + { + "pkt_data1", + PPU_PPU_PKT_DATA1r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x804, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data1_reg, + NULL, + NULL, + }, + { + "pkt_data2", + PPU_PPU_PKT_DATA2r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x808, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data2_reg, + NULL, + NULL, + }, + { + "pkt_data3", + PPU_PPU_PKT_DATA3r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x80c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data3_reg, + NULL, + NULL, + }, + { + "pkt_data4", + PPU_PPU_PKT_DATA4r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x810, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data4_reg, + NULL, + NULL, + }, + { + "pkt_data5", + PPU_PPU_PKT_DATA5r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x814, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data5_reg, + NULL, + NULL, + }, + { + "pkt_data6", + PPU_PPU_PKT_DATA6r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x818, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data6_reg, + NULL, + NULL, + }, + { + "pkt_data7", + PPU_PPU_PKT_DATA7r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x81c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data7_reg, + NULL, + NULL, + }, + { + "pkt_data8", + PPU_PPU_PKT_DATA8r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x820, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data8_reg, + NULL, + NULL, + }, + { + "pkt_data9", + PPU_PPU_PKT_DATA9r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x824, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data9_reg, + NULL, + NULL, + }, + { + "pkt_data10", + PPU_PPU_PKT_DATA10r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x828, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data10_reg, + NULL, + NULL, + }, + { + "pkt_data11", + PPU_PPU_PKT_DATA11r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x82c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data11_reg, + NULL, + NULL, + }, + { + "pkt_data12", + PPU_PPU_PKT_DATA12r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x830, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data12_reg, + NULL, + NULL, + }, + { + "pkt_data13", + PPU_PPU_PKT_DATA13r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x834, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data13_reg, + NULL, + NULL, + }, + { + "pkt_data14", + PPU_PPU_PKT_DATA14r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x838, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data14_reg, + NULL, + NULL, + }, + { + "pkt_data15", + PPU_PPU_PKT_DATA15r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x83c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data15_reg, + NULL, + NULL, + }, + { + "pkt_data16", + PPU_PPU_PKT_DATA16r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x840, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data16_reg, + NULL, + NULL, + }, + { + "pkt_data17", + PPU_PPU_PKT_DATA17r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x844, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data17_reg, + NULL, + NULL, + }, + { + "pkt_data18", + PPU_PPU_PKT_DATA18r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x848, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data18_reg, + NULL, + NULL, + }, + { + "pkt_data19", + PPU_PPU_PKT_DATA19r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x84c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data19_reg, + NULL, + NULL, + }, + { + "pkt_data20", + PPU_PPU_PKT_DATA20r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x850, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data20_reg, + NULL, + NULL, + }, + { + "pkt_data21", + PPU_PPU_PKT_DATA21r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x854, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data21_reg, + NULL, + NULL, + }, + { + "pkt_data22", + PPU_PPU_PKT_DATA22r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x858, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data22_reg, + NULL, + NULL, + }, + { + "pkt_data23", + PPU_PPU_PKT_DATA23r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x85c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data23_reg, + NULL, + NULL, + }, + { + "pkt_data24", + PPU_PPU_PKT_DATA24r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x860, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data24_reg, + NULL, + NULL, + }, + { + "pkt_data25", + PPU_PPU_PKT_DATA25r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x864, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data25_reg, + NULL, + NULL, + }, + { + "pkt_data26", + PPU_PPU_PKT_DATA26r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x868, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data26_reg, + NULL, + NULL, + }, + { + "pkt_data27", + PPU_PPU_PKT_DATA27r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x86c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data27_reg, + NULL, + NULL, + }, + { + "pkt_data28", + PPU_PPU_PKT_DATA28r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x870, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data28_reg, + NULL, + NULL, + }, + { + "pkt_data29", + PPU_PPU_PKT_DATA29r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x874, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data29_reg, + NULL, + NULL, + }, + { + "pkt_data30", + PPU_PPU_PKT_DATA30r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x878, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data30_reg, + NULL, + NULL, + }, + { + "pkt_data31", + PPU_PPU_PKT_DATA31r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x87c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data31_reg, + NULL, + NULL, + }, + { + "pkt_data32", + PPU_PPU_PKT_DATA32r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x880, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data32_reg, + NULL, + NULL, + }, + { + "pkt_data33", + PPU_PPU_PKT_DATA33r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x884, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data33_reg, + NULL, + NULL, + }, + { + "pkt_data34", + PPU_PPU_PKT_DATA34r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x888, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data34_reg, + NULL, + NULL, + }, + { + "pkt_data35", + PPU_PPU_PKT_DATA35r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x88c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data35_reg, + NULL, + NULL, + }, + { + "pkt_data36", + PPU_PPU_PKT_DATA36r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x890, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data36_reg, + NULL, + NULL, + }, + { + "pkt_data37", + PPU_PPU_PKT_DATA37r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x894, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data37_reg, + NULL, + NULL, + }, + { + "pkt_data38", + PPU_PPU_PKT_DATA38r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x898, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data38_reg, + NULL, + NULL, + }, + { + "pkt_data39", + PPU_PPU_PKT_DATA39r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x89c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data39_reg, + NULL, + NULL, + }, + { + "pkt_data40", + PPU_PPU_PKT_DATA40r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x8a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data40_reg, + NULL, + NULL, + }, + { + "pkt_data41", + PPU_PPU_PKT_DATA41r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x8a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data41_reg, + NULL, + NULL, + }, + { + "pkt_data42", + PPU_PPU_PKT_DATA42r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x8a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data42_reg, + NULL, + NULL, + }, + { + "pkt_data43", + PPU_PPU_PKT_DATA43r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x8ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data43_reg, + NULL, + NULL, + }, + { + "pkt_data44", + PPU_PPU_PKT_DATA44r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x8b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data44_reg, + NULL, + NULL, + }, + { + "pkt_data45", + PPU_PPU_PKT_DATA45r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x8b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data45_reg, + NULL, + NULL, + }, + { + "pkt_data46", + PPU_PPU_PKT_DATA46r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x8b8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data46_reg, + NULL, + NULL, + }, + { + "pkt_data47", + PPU_PPU_PKT_DATA47r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x8bc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data47_reg, + NULL, + NULL, + }, + { + "pkt_data48", + PPU_PPU_PKT_DATA48r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x8c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data48_reg, + NULL, + NULL, + }, + { + "pkt_data49", + PPU_PPU_PKT_DATA49r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x8c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data49_reg, + NULL, + NULL, + }, + { + "pkt_data50", + PPU_PPU_PKT_DATA50r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x8c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data50_reg, + NULL, + NULL, + }, + { + "pkt_data51", + PPU_PPU_PKT_DATA51r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x8cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data51_reg, + NULL, + NULL, + }, + { + "pkt_data52", + PPU_PPU_PKT_DATA52r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x8d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data52_reg, + NULL, + NULL, + }, + { + "pkt_data53", + PPU_PPU_PKT_DATA53r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x8d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data53_reg, + NULL, + NULL, + }, + { + "pkt_data54", + PPU_PPU_PKT_DATA54r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x8d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data54_reg, + NULL, + NULL, + }, + { + "pkt_data55", + PPU_PPU_PKT_DATA55r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x8dc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data55_reg, + NULL, + NULL, + }, + { + "pkt_data56", + PPU_PPU_PKT_DATA56r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x8e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data56_reg, + NULL, + NULL, + }, + { + "pkt_data57", + PPU_PPU_PKT_DATA57r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x8e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data57_reg, + NULL, + NULL, + }, + { + "pkt_data58", + PPU_PPU_PKT_DATA58r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x8e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data58_reg, + NULL, + NULL, + }, + { + "pkt_data59", + PPU_PPU_PKT_DATA59r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x8ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data59_reg, + NULL, + NULL, + }, + { + "pkt_data60", + PPU_PPU_PKT_DATA60r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x8f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data60_reg, + NULL, + NULL, + }, + { + "pkt_data61", + PPU_PPU_PKT_DATA61r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x8f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data61_reg, + NULL, + NULL, + }, + { + "pkt_data62", + PPU_PPU_PKT_DATA62r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x8f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data62_reg, + NULL, + NULL, + }, + { + "pkt_data63", + PPU_PPU_PKT_DATA63r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x8fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data63_reg, + NULL, + NULL, + }, + { + "pkt_data64", + PPU_PPU_PKT_DATA64r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x900, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data64_reg, + NULL, + NULL, + }, + { + "pkt_data65", + PPU_PPU_PKT_DATA65r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x904, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data65_reg, + NULL, + NULL, + }, + { + "pkt_data66", + PPU_PPU_PKT_DATA66r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x908, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data66_reg, + NULL, + NULL, + }, + { + "pkt_data67", + PPU_PPU_PKT_DATA67r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x90c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data67_reg, + NULL, + NULL, + }, + { + "pkt_data68", + PPU_PPU_PKT_DATA68r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x910, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data68_reg, + NULL, + NULL, + }, + { + "pkt_data69", + PPU_PPU_PKT_DATA69r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x914, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data69_reg, + NULL, + NULL, + }, + { + "pkt_data70", + PPU_PPU_PKT_DATA70r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x918, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data70_reg, + NULL, + NULL, + }, + { + "pkt_data71", + PPU_PPU_PKT_DATA71r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x91c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data71_reg, + NULL, + NULL, + }, + { + "pkt_data72", + PPU_PPU_PKT_DATA72r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x920, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data72_reg, + NULL, + NULL, + }, + { + "pkt_data73", + PPU_PPU_PKT_DATA73r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x924, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data73_reg, + NULL, + NULL, + }, + { + "pkt_data74", + PPU_PPU_PKT_DATA74r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x928, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data74_reg, + NULL, + NULL, + }, + { + "pkt_data75", + PPU_PPU_PKT_DATA75r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x92c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data75_reg, + NULL, + NULL, + }, + { + "pkt_data76", + PPU_PPU_PKT_DATA76r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x930, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data76_reg, + NULL, + NULL, + }, + { + "pkt_data77", + PPU_PPU_PKT_DATA77r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x934, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data77_reg, + NULL, + NULL, + }, + { + "pkt_data78", + PPU_PPU_PKT_DATA78r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x938, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data78_reg, + NULL, + NULL, + }, + { + "pkt_data79", + PPU_PPU_PKT_DATA79r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x93c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data79_reg, + NULL, + NULL, + }, + { + "pkt_data80", + PPU_PPU_PKT_DATA80r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x940, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data80_reg, + NULL, + NULL, + }, + { + "pkt_data81", + PPU_PPU_PKT_DATA81r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x944, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data81_reg, + NULL, + NULL, + }, + { + "pkt_data82", + PPU_PPU_PKT_DATA82r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x948, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data82_reg, + NULL, + NULL, + }, + { + "pkt_data83", + PPU_PPU_PKT_DATA83r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x94c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data83_reg, + NULL, + NULL, + }, + { + "pkt_data84", + PPU_PPU_PKT_DATA84r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x950, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data84_reg, + NULL, + NULL, + }, + { + "pkt_data85", + PPU_PPU_PKT_DATA85r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x954, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data85_reg, + NULL, + NULL, + }, + { + "pkt_data86", + PPU_PPU_PKT_DATA86r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x958, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data86_reg, + NULL, + NULL, + }, + { + "pkt_data87", + PPU_PPU_PKT_DATA87r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x95c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data87_reg, + NULL, + NULL, + }, + { + "pkt_data88", + PPU_PPU_PKT_DATA88r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x960, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data88_reg, + NULL, + NULL, + }, + { + "pkt_data89", + PPU_PPU_PKT_DATA89r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x964, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data89_reg, + NULL, + NULL, + }, + { + "pkt_data90", + PPU_PPU_PKT_DATA90r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x968, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data90_reg, + NULL, + NULL, + }, + { + "pkt_data91", + PPU_PPU_PKT_DATA91r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x96c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data91_reg, + NULL, + NULL, + }, + { + "pkt_data92", + PPU_PPU_PKT_DATA92r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x970, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data92_reg, + NULL, + NULL, + }, + { + "pkt_data93", + PPU_PPU_PKT_DATA93r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x974, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data93_reg, + NULL, + NULL, + }, + { + "pkt_data94", + PPU_PPU_PKT_DATA94r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x978, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data94_reg, + NULL, + NULL, + }, + { + "pkt_data95", + PPU_PPU_PKT_DATA95r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x97c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data95_reg, + NULL, + NULL, + }, + { + "pkt_data96", + PPU_PPU_PKT_DATA96r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x980, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data96_reg, + NULL, + NULL, + }, + { + "pkt_data97", + PPU_PPU_PKT_DATA97r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x984, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data97_reg, + NULL, + NULL, + }, + { + "pkt_data98", + PPU_PPU_PKT_DATA98r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x988, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data98_reg, + NULL, + NULL, + }, + { + "pkt_data99", + PPU_PPU_PKT_DATA99r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x98c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data99_reg, + NULL, + NULL, + }, + { + "pkt_data100", + PPU_PPU_PKT_DATA100r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x990, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data100_reg, + NULL, + NULL, + }, + { + "pkt_data101", + PPU_PPU_PKT_DATA101r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x994, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data101_reg, + NULL, + NULL, + }, + { + "pkt_data102", + PPU_PPU_PKT_DATA102r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x998, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data102_reg, + NULL, + NULL, + }, + { + "pkt_data103", + PPU_PPU_PKT_DATA103r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x99c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data103_reg, + NULL, + NULL, + }, + { + "pkt_data104", + PPU_PPU_PKT_DATA104r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x9a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data104_reg, + NULL, + NULL, + }, + { + "pkt_data105", + PPU_PPU_PKT_DATA105r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x9a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data105_reg, + NULL, + NULL, + }, + { + "pkt_data106", + PPU_PPU_PKT_DATA106r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x9a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data106_reg, + NULL, + NULL, + }, + { + "pkt_data107", + PPU_PPU_PKT_DATA107r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x9ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data107_reg, + NULL, + NULL, + }, + { + "pkt_data108", + PPU_PPU_PKT_DATA108r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x9b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data108_reg, + NULL, + NULL, + }, + { + "pkt_data109", + PPU_PPU_PKT_DATA109r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x9b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data109_reg, + NULL, + NULL, + }, + { + "pkt_data110", + PPU_PPU_PKT_DATA110r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x9b8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data110_reg, + NULL, + NULL, + }, + { + "pkt_data111", + PPU_PPU_PKT_DATA111r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x9bc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data111_reg, + NULL, + NULL, + }, + { + "pkt_data112", + PPU_PPU_PKT_DATA112r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x9c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data112_reg, + NULL, + NULL, + }, + { + "pkt_data113", + PPU_PPU_PKT_DATA113r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x9c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data113_reg, + NULL, + NULL, + }, + { + "pkt_data114", + PPU_PPU_PKT_DATA114r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x9c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data114_reg, + NULL, + NULL, + }, + { + "pkt_data115", + PPU_PPU_PKT_DATA115r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x9cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data115_reg, + NULL, + NULL, + }, + { + "pkt_data116", + PPU_PPU_PKT_DATA116r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x9d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data116_reg, + NULL, + NULL, + }, + { + "pkt_data117", + PPU_PPU_PKT_DATA117r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x9d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data117_reg, + NULL, + NULL, + }, + { + "pkt_data118", + PPU_PPU_PKT_DATA118r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x9d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data118_reg, + NULL, + NULL, + }, + { + "pkt_data119", + PPU_PPU_PKT_DATA119r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x9dc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data119_reg, + NULL, + NULL, + }, + { + "pkt_data120", + PPU_PPU_PKT_DATA120r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x9e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data120_reg, + NULL, + NULL, + }, + { + "pkt_data121", + PPU_PPU_PKT_DATA121r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x9e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data121_reg, + NULL, + NULL, + }, + { + "pkt_data122", + PPU_PPU_PKT_DATA122r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x9e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data122_reg, + NULL, + NULL, + }, + { + "pkt_data123", + PPU_PPU_PKT_DATA123r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x9ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data123_reg, + NULL, + NULL, + }, + { + "pkt_data124", + PPU_PPU_PKT_DATA124r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x9f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data124_reg, + NULL, + NULL, + }, + { + "pkt_data125", + PPU_PPU_PKT_DATA125r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x9f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data125_reg, + NULL, + NULL, + }, + { + "pkt_data126", + PPU_PPU_PKT_DATA126r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x9f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data126_reg, + NULL, + NULL, + }, + { + "pkt_data127", + PPU_PPU_PKT_DATA127r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0x9fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_pkt_data127_reg, + NULL, + NULL, + }, + { + "spr0", + PPU_PPU_SPR0r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa00, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr0_reg, + NULL, + NULL, + }, + { + "spr1", + PPU_PPU_SPR1r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa04, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr1_reg, + NULL, + NULL, + }, + { + "spr2", + PPU_PPU_SPR2r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa08, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr2_reg, + NULL, + NULL, + }, + { + "spr3", + PPU_PPU_SPR3r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa0c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr3_reg, + NULL, + NULL, + }, + { + "spr4", + PPU_PPU_SPR4r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa10, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr4_reg, + NULL, + NULL, + }, + { + "spr5", + PPU_PPU_SPR5r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa14, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr5_reg, + NULL, + NULL, + }, + { + "spr6", + PPU_PPU_SPR6r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa18, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr6_reg, + NULL, + NULL, + }, + { + "spr7", + PPU_PPU_SPR7r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa1c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr7_reg, + NULL, + NULL, + }, + { + "spr8", + PPU_PPU_SPR8r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa20, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr8_reg, + NULL, + NULL, + }, + { + "spr9", + PPU_PPU_SPR9r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa24, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr9_reg, + NULL, + NULL, + }, + { + "spr10", + PPU_PPU_SPR10r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa28, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr10_reg, + NULL, + NULL, + }, + { + "spr11", + PPU_PPU_SPR11r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa2c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr11_reg, + NULL, + NULL, + }, + { + "spr12", + PPU_PPU_SPR12r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa30, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr12_reg, + NULL, + NULL, + }, + { + "spr13", + PPU_PPU_SPR13r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa34, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr13_reg, + NULL, + NULL, + }, + { + "spr14", + PPU_PPU_SPR14r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa38, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr14_reg, + NULL, + NULL, + }, + { + "spr15", + PPU_PPU_SPR15r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa3c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr15_reg, + NULL, + NULL, + }, + { + "spr16", + PPU_PPU_SPR16r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa40, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr16_reg, + NULL, + NULL, + }, + { + "spr17", + PPU_PPU_SPR17r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa44, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr17_reg, + NULL, + NULL, + }, + { + "spr18", + PPU_PPU_SPR18r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa48, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr18_reg, + NULL, + NULL, + }, + { + "spr19", + PPU_PPU_SPR19r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa4c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr19_reg, + NULL, + NULL, + }, + { + "spr20", + PPU_PPU_SPR20r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa50, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr20_reg, + NULL, + NULL, + }, + { + "spr21", + PPU_PPU_SPR21r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa54, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr21_reg, + NULL, + NULL, + }, + { + "spr22", + PPU_PPU_SPR22r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa58, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr22_reg, + NULL, + NULL, + }, + { + "spr23", + PPU_PPU_SPR23r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa5c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr23_reg, + NULL, + NULL, + }, + { + "spr24", + PPU_PPU_SPR24r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa60, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr24_reg, + NULL, + NULL, + }, + { + "spr25", + PPU_PPU_SPR25r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa64, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr25_reg, + NULL, + NULL, + }, + { + "spr26", + PPU_PPU_SPR26r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa68, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr26_reg, + NULL, + NULL, + }, + { + "spr27", + PPU_PPU_SPR27r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa6c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr27_reg, + NULL, + NULL, + }, + { + "spr28", + PPU_PPU_SPR28r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa70, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr28_reg, + NULL, + NULL, + }, + { + "spr29", + PPU_PPU_SPR29r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa74, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr29_reg, + NULL, + NULL, + }, + { + "spr30", + PPU_PPU_SPR30r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa78, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr30_reg, + NULL, + NULL, + }, + { + "spr31", + PPU_PPU_SPR31r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa7c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_spr31_reg, + NULL, + NULL, + }, + { + "rsp0", + PPU_PPU_RSP0r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa80, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp0_reg, + NULL, + NULL, + }, + { + "rsp1", + PPU_PPU_RSP1r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa84, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp1_reg, + NULL, + NULL, + }, + { + "rsp2", + PPU_PPU_RSP2r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa88, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp2_reg, + NULL, + NULL, + }, + { + "rsp3", + PPU_PPU_RSP3r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa8c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp3_reg, + NULL, + NULL, + }, + { + "rsp4", + PPU_PPU_RSP4r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa90, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp4_reg, + NULL, + NULL, + }, + { + "rsp5", + PPU_PPU_RSP5r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa94, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp5_reg, + NULL, + NULL, + }, + { + "rsp6", + PPU_PPU_RSP6r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa98, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp6_reg, + NULL, + NULL, + }, + { + "rsp7", + PPU_PPU_RSP7r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xa9c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp7_reg, + NULL, + NULL, + }, + { + "rsp8", + PPU_PPU_RSP8r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xaa0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp8_reg, + NULL, + NULL, + }, + { + "rsp9", + PPU_PPU_RSP9r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xaa4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp9_reg, + NULL, + NULL, + }, + { + "rsp10", + PPU_PPU_RSP10r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xaa8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp10_reg, + NULL, + NULL, + }, + { + "rsp11", + PPU_PPU_RSP11r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xaac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp11_reg, + NULL, + NULL, + }, + { + "rsp12", + PPU_PPU_RSP12r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xab0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp12_reg, + NULL, + NULL, + }, + { + "rsp13", + PPU_PPU_RSP13r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xab4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp13_reg, + NULL, + NULL, + }, + { + "rsp14", + PPU_PPU_RSP14r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xab8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp14_reg, + NULL, + NULL, + }, + { + "rsp15", + PPU_PPU_RSP15r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xabc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp15_reg, + NULL, + NULL, + }, + { + "rsp16", + PPU_PPU_RSP16r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xac0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp16_reg, + NULL, + NULL, + }, + { + "rsp17", + PPU_PPU_RSP17r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xac4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp17_reg, + NULL, + NULL, + }, + { + "rsp18", + PPU_PPU_RSP18r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xac8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp18_reg, + NULL, + NULL, + }, + { + "rsp19", + PPU_PPU_RSP19r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xacc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp19_reg, + NULL, + NULL, + }, + { + "rsp20", + PPU_PPU_RSP20r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xad0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp20_reg, + NULL, + NULL, + }, + { + "rsp21", + PPU_PPU_RSP21r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xad4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp21_reg, + NULL, + NULL, + }, + { + "rsp22", + PPU_PPU_RSP22r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xad8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp22_reg, + NULL, + NULL, + }, + { + "rsp23", + PPU_PPU_RSP23r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xadc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp23_reg, + NULL, + NULL, + }, + { + "rsp24", + PPU_PPU_RSP24r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xae0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp24_reg, + NULL, + NULL, + }, + { + "rsp25", + PPU_PPU_RSP25r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xae4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp25_reg, + NULL, + NULL, + }, + { + "rsp26", + PPU_PPU_RSP26r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xae8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp26_reg, + NULL, + NULL, + }, + { + "rsp27", + PPU_PPU_RSP27r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xaec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp27_reg, + NULL, + NULL, + }, + { + "rsp28", + PPU_PPU_RSP28r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xaf0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp28_reg, + NULL, + NULL, + }, + { + "rsp29", + PPU_PPU_RSP29r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xaf4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp29_reg, + NULL, + NULL, + }, + { + "rsp30", + PPU_PPU_RSP30r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xaf8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp30_reg, + NULL, + NULL, + }, + { + "rsp31", + PPU_PPU_RSP31r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xafc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_rsp31_reg, + NULL, + NULL, + }, + { + "key0", + PPU_PPU_KEY0r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xb00, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_key0_reg, + NULL, + NULL, + }, + { + "key1", + PPU_PPU_KEY1r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xb04, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_key1_reg, + NULL, + NULL, + }, + { + "key2", + PPU_PPU_KEY2r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xb08, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_key2_reg, + NULL, + NULL, + }, + { + "key3", + PPU_PPU_KEY3r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xb0c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_key3_reg, + NULL, + NULL, + }, + { + "key4", + PPU_PPU_KEY4r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xb10, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_key4_reg, + NULL, + NULL, + }, + { + "key5", + PPU_PPU_KEY5r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xb14, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_key5_reg, + NULL, + NULL, + }, + { + "key6", + PPU_PPU_KEY6r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xb18, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_key6_reg, + NULL, + NULL, + }, + { + "key7", + PPU_PPU_KEY7r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xb1c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_key7_reg, + NULL, + NULL, + }, + { + "key8", + PPU_PPU_KEY8r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xb20, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_key8_reg, + NULL, + NULL, + }, + { + "key9", + PPU_PPU_KEY9r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xb24, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_key9_reg, + NULL, + NULL, + }, + { + "key10", + PPU_PPU_KEY10r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xb28, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_key10_reg, + NULL, + NULL, + }, + { + "key11", + PPU_PPU_KEY11r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xb2c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_key11_reg, + NULL, + NULL, + }, + { + "key12", + PPU_PPU_KEY12r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xb30, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_key12_reg, + NULL, + NULL, + }, + { + "key13", + PPU_PPU_KEY13r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xb34, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_key13_reg, + NULL, + NULL, + }, + { + "key14", + PPU_PPU_KEY14r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xb38, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_key14_reg, + NULL, + NULL, + }, + { + "key15", + PPU_PPU_KEY15r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xb3c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_key15_reg, + NULL, + NULL, + }, + { + "key16", + PPU_PPU_KEY16r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xb40, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_key16_reg, + NULL, + NULL, + }, + { + "key17", + PPU_PPU_KEY17r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xb44, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_key17_reg, + NULL, + NULL, + }, + { + "key18", + PPU_PPU_KEY18r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xb48, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_key18_reg, + NULL, + NULL, + }, + { + "key19", + PPU_PPU_KEY19r, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xb4c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ppu_ppu_key19_reg, + NULL, + NULL, + }, + { + "flag", + PPU_PPU_FLAGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_PPU_CSR_BASE_ADDR + 0xb50, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_ppu_ppu_flag_reg, + NULL, + NULL, + }, + { + "int_1200m_flag", + PPU_CLUSTER_INT_1200M_FLAGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x00, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 8, + g_ppu_cluster_int_1200m_flag_reg, + NULL, + NULL, + }, + { + "bp_instr_l", + PPU_CLUSTER_BP_INSTR_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x04, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_bp_instr_l_reg, + NULL, + NULL, + }, + { + "bp_instr_h", + PPU_CLUSTER_BP_INSTR_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x08, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_bp_instr_h_reg, + NULL, + NULL, + }, + { + "bp_addr", + PPU_CLUSTER_BP_ADDRr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x0c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0x7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 4, + 1, + g_ppu_cluster_bp_addr_reg, + NULL, + NULL, + }, + { + "drr", + PPU_CLUSTER_DRRr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x2c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0x7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 4, + 1, + g_ppu_cluster_drr_reg, + NULL, + NULL, + }, + { + "dsr", + PPU_CLUSTER_DSRr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x4c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_dsr_reg, + NULL, + NULL, + }, + { + "dbg_rtl_date", + PPU_CLUSTER_DBG_RTL_DATEr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x50, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_dbg_rtl_date_reg, + NULL, + NULL, + }, + { + "me_continue", + PPU_CLUSTER_ME_CONTINUEr, + PPU, + DPP_REG_FLAG_WO | DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x80, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_me_continue_reg, + NULL, + NULL, + }, + { + "me_step", + PPU_CLUSTER_ME_STEPr, + PPU, + DPP_REG_FLAG_WO | DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x84, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_me_step_reg, + NULL, + NULL, + }, + { + "me_refresh", + PPU_CLUSTER_ME_REFRESHr, + PPU, + DPP_REG_FLAG_WO | DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x88, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_me_refresh_reg, + NULL, + NULL, + }, + { + "drr_clr", + PPU_CLUSTER_DRR_CLRr, + PPU, + DPP_REG_FLAG_WO | DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x8c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_drr_clr_reg, + NULL, + NULL, + }, + { + "me_busy_thresold", + PPU_CLUSTER_ME_BUSY_THRESOLDr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x90, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_me_busy_thresold_reg, + NULL, + NULL, + }, + { + "int_1200m_sta", + PPU_CLUSTER_INT_1200M_STAr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x94, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 8, + g_ppu_cluster_int_1200m_sta_reg, + NULL, + NULL, + }, + { + "int_1200m_me_fifo_mask_l", + PPU_CLUSTER_INT_1200M_ME_FIFO_MASK_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc0, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0x7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 4, + 32, + g_ppu_cluster_int_1200m_me_fifo_mask_l_reg, + NULL, + NULL, + }, + { + "int_1200m_me_fifo_mask_h", + PPU_CLUSTER_INT_1200M_ME_FIFO_MASK_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xe0, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0x7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 4, + 2, + g_ppu_cluster_int_1200m_me_fifo_mask_h_reg, + NULL, + NULL, + }, + { + "me_fifo_interrupt_flag_l", + PPU_CLUSTER_ME_FIFO_INTERRUPT_FLAG_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x100, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0x7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 4, + 32, + g_ppu_cluster_me_fifo_interrupt_flag_l_reg, + NULL, + NULL, + }, + { + "me_fifo_interrupt_flag_h", + PPU_CLUSTER_ME_FIFO_INTERRUPT_FLAG_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x120, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0x7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 4, + 2, + g_ppu_cluster_me_fifo_interrupt_flag_h_reg, + NULL, + NULL, + }, + { + "me_fifo_interrupt_sta_l", + PPU_CLUSTER_ME_FIFO_INTERRUPT_STA_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x140, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0x7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 4, + 32, + g_ppu_cluster_me_fifo_interrupt_sta_l_reg, + NULL, + NULL, + }, + { + "me_fifo_interrupt_sta_h", + PPU_CLUSTER_ME_FIFO_INTERRUPT_STA_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x160, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0x7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 4, + 2, + g_ppu_cluster_me_fifo_interrupt_sta_h_reg, + NULL, + NULL, + }, + { + "int_1200m_cluster_mex_fifo_mask_l", + PPU_CLUSTER_INT_1200M_CLUSTER_MEX_FIFO_MASK_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x800, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 32, + g_ppu_cluster_int_1200m_cluster_mex_fifo_mask_l_reg, + NULL, + NULL, + }, + { + "int_1200m_cluster_mex_fifo_mask_h", + PPU_CLUSTER_INT_1200M_CLUSTER_MEX_FIFO_MASK_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x804, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 19, + g_ppu_cluster_int_1200m_cluster_mex_fifo_mask_h_reg, + NULL, + NULL, + }, + { + "int_1200m_cluster_mex_fifo_flag_l", + PPU_CLUSTER_INT_1200M_CLUSTER_MEX_FIFO_FLAG_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x808, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 32, + g_ppu_cluster_int_1200m_cluster_mex_fifo_flag_l_reg, + NULL, + NULL, + }, + { + "int_1200m_cluster_mex_fifo_flag_h", + PPU_CLUSTER_INT_1200M_CLUSTER_MEX_FIFO_FLAG_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x80c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 19, + g_ppu_cluster_int_1200m_cluster_mex_fifo_flag_h_reg, + NULL, + NULL, + }, + { + "int_1200m_cluster_mex_fifo_stat_l", + PPU_CLUSTER_INT_1200M_CLUSTER_MEX_FIFO_STAT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x810, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 32, + g_ppu_cluster_int_1200m_cluster_mex_fifo_stat_l_reg, + NULL, + NULL, + }, + { + "int_1200m_cluster_mex_fifo_stat_h", + PPU_CLUSTER_INT_1200M_CLUSTER_MEX_FIFO_STAT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x814, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 19, + g_ppu_cluster_int_1200m_cluster_mex_fifo_stat_h_reg, + NULL, + NULL, + }, + { + "ppu_statics_wb_exception_cfg", + PPU_CLUSTER_PPU_STATICS_WB_EXCEPTION_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x824, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 6, + g_ppu_cluster_ppu_statics_wb_exception_cfg_reg, + NULL, + NULL, + }, + { + "thread_switch_en", + PPU_CLUSTER_THREAD_SWITCH_ENr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x8c0, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_thread_switch_en_reg, + NULL, + NULL, + }, + { + "is_me_not_idle", + PPU_CLUSTER_IS_ME_NOT_IDLEr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x8c4, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 8, + g_ppu_cluster_is_me_not_idle_reg, + NULL, + NULL, + }, + { + "ppu_cluster_mf_in_afifo_prog_empty_assert_cfg", + PPU_CLUSTER_PPU_CLUSTER_MF_IN_AFIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc00, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_cluster_mf_in_afifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cluster_mf_in_afifo_prog_empty_negate_cfg", + PPU_CLUSTER_PPU_CLUSTER_MF_IN_AFIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc04, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_cluster_mf_in_afifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "ese_rsp_afifo_prog_empty_assert_cfg", + PPU_CLUSTER_ESE_RSP_AFIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc08, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ese_rsp_afifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "ese_rsp_afifo_prog_empty_negate_cfg", + PPU_CLUSTER_ESE_RSP_AFIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc0c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ese_rsp_afifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "ise_rsp_afifo_prog_empty_assert_cfg", + PPU_CLUSTER_ISE_RSP_AFIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc10, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ise_rsp_afifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "ise_rsp_afifo_prog_empty_negate_cfg", + PPU_CLUSTER_ISE_RSP_AFIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc14, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ise_rsp_afifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_rsp_ptr_fwft_fifo0_prog_full_assert_cfg", + PPU_CLUSTER_PPU_RSP_PTR_FWFT_FIFO0_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc18, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_rsp_ptr_fwft_fifo0_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_rsp_ptr_fwft_fifo0_prog_full_negate_cfg", + PPU_CLUSTER_PPU_RSP_PTR_FWFT_FIFO0_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc1c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_rsp_ptr_fwft_fifo0_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_rsp_ptr_fwft_fifo0_prog_empty_assert_cfg", + PPU_CLUSTER_PPU_RSP_PTR_FWFT_FIFO0_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc20, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_rsp_ptr_fwft_fifo0_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_rsp_ptr_fwft_fifo0_prog_empty_negate_cfg", + PPU_CLUSTER_PPU_RSP_PTR_FWFT_FIFO0_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc24, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_rsp_ptr_fwft_fifo0_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_rsp_ptr_fwft_fifo1_prog_full_assert_cfg", + PPU_CLUSTER_PPU_RSP_PTR_FWFT_FIFO1_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc28, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_rsp_ptr_fwft_fifo1_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_rsp_ptr_fwft_fifo1_prog_full_negate_cfg", + PPU_CLUSTER_PPU_RSP_PTR_FWFT_FIFO1_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc2c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_rsp_ptr_fwft_fifo1_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_rsp_ptr_fwft_fifo1_prog_empty_assert_cfg", + PPU_CLUSTER_PPU_RSP_PTR_FWFT_FIFO1_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc30, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_rsp_ptr_fwft_fifo1_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_rsp_ptr_fwft_fifo1_prog_empty_negate_cfg", + PPU_CLUSTER_PPU_RSP_PTR_FWFT_FIFO1_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc34, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_rsp_ptr_fwft_fifo1_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "sta_rsp_afifo_prog_empty_assert_cfg", + PPU_CLUSTER_STA_RSP_AFIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc38, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_sta_rsp_afifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "sta_rsp_afifo_prog_empty_negate_cfg", + PPU_CLUSTER_STA_RSP_AFIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc3c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_sta_rsp_afifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_sta_rsp_fwft_fifo_prog_full_assert_cfg", + PPU_CLUSTER_PPU_STA_RSP_FWFT_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc40, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_sta_rsp_fwft_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_sta_rsp_fwft_fifo_prog_full_negate_cfg", + PPU_CLUSTER_PPU_STA_RSP_FWFT_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc44, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_sta_rsp_fwft_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_sta_rsp_fwft_fifo_prog_empty_assert_cfg", + PPU_CLUSTER_PPU_STA_RSP_FWFT_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc48, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_sta_rsp_fwft_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_sta_rsp_fwft_fifo_prog_empty_negate_cfg", + PPU_CLUSTER_PPU_STA_RSP_FWFT_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc4c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_sta_rsp_fwft_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "cop_rsp_fifo_prog_full_assert_cfg", + PPU_CLUSTER_COP_RSP_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc50, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_cop_rsp_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "cop_rsp_fifo_prog_full_negate_cfg", + PPU_CLUSTER_COP_RSP_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc54, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_cop_rsp_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "cop_rsp_fifo_prog_empty_assert_cfg", + PPU_CLUSTER_COP_RSP_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc58, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_cop_rsp_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "cop_rsp_fifo_prog_empty_negate_cfg", + PPU_CLUSTER_COP_RSP_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc5c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_cop_rsp_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "mcode_pf_rsp_fifo_prog_full_assert_cfg", + PPU_CLUSTER_MCODE_PF_RSP_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc60, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_mcode_pf_rsp_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "mcode_pf_rsp_fifo_prog_full_negate_cfg", + PPU_CLUSTER_MCODE_PF_RSP_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc64, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_mcode_pf_rsp_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "mcode_pf_rsp_fifo_prog_empty_assert_cfg", + PPU_CLUSTER_MCODE_PF_RSP_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc68, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_mcode_pf_rsp_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "mcode_pf_rsp_fifo_prog_empty_negate_cfg", + PPU_CLUSTER_MCODE_PF_RSP_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc6c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_mcode_pf_rsp_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_rsp_fwft_fifo_prog_full_assert_cfg", + PPU_CLUSTER_PPU_COP_RSP_FWFT_FIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc70, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_cop_rsp_fwft_fifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_rsp_fwft_fifo_prog_full_negate_cfg", + PPU_CLUSTER_PPU_COP_RSP_FWFT_FIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc74, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_cop_rsp_fwft_fifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_rsp_fwft_fifo_prog_empty_assert_cfg", + PPU_CLUSTER_PPU_COP_RSP_FWFT_FIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc78, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_cop_rsp_fwft_fifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cop_rsp_fwft_fifo_prog_empty_negate_cfg", + PPU_CLUSTER_PPU_COP_RSP_FWFT_FIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc7c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_cop_rsp_fwft_fifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_ise_key_afifo_prog_full_assert_cfg", + PPU_CLUSTER_PPU_ISE_KEY_AFIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc80, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_ise_key_afifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_ise_key_afifo_prog_full_negate_cfg", + PPU_CLUSTER_PPU_ISE_KEY_AFIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc84, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_ise_key_afifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_ese_key_afifo_prog_full_assert_cfg", + PPU_CLUSTER_PPU_ESE_KEY_AFIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc88, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_ese_key_afifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_ese_key_afifo_prog_full_negate_cfg", + PPU_CLUSTER_PPU_ESE_KEY_AFIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc8c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_ese_key_afifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_sta_key_afifo_prog_full_assert_cfg", + PPU_CLUSTER_PPU_STA_KEY_AFIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc90, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_sta_key_afifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_sta_key_afifo_prog_full_negate_cfg", + PPU_CLUSTER_PPU_STA_KEY_AFIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0xc94, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_sta_key_afifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "int_600m_cluster_mex_fifo_mask", + PPU_CLUSTER_INT_600M_CLUSTER_MEX_FIFO_MASKr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x4034, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 7, + g_ppu_cluster_int_600m_cluster_mex_fifo_mask_reg, + NULL, + NULL, + }, + { + "cluster_mex_fifo_600m_interrupt_flag", + PPU_CLUSTER_CLUSTER_MEX_FIFO_600M_INTERRUPT_FLAGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x4038, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 7, + g_ppu_cluster_cluster_mex_fifo_600m_interrupt_flag_reg, + NULL, + NULL, + }, + { + "cluster_mex_fifo_600m_interrupt_sta", + PPU_CLUSTER_CLUSTER_MEX_FIFO_600M_INTERRUPT_STAr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x403c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 7, + g_ppu_cluster_cluster_mex_fifo_600m_interrupt_sta_reg, + NULL, + NULL, + }, + { + "mex_cnt_cfg", + PPU_CLUSTER_MEX_CNT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x4050, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 2, + g_ppu_cluster_mex_cnt_cfg_reg, + NULL, + NULL, + }, + { + "int_600m_cluster_mex_ram_ecc_error_interrupt_mask", + PPU_CLUSTER_INT_600M_CLUSTER_MEX_RAM_ECC_ERROR_INTERRUPT_MASKr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x4064, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 6, + g_ppu_cluster_int_600m_cluster_mex_ram_ecc_error_interrupt_mask_reg, + NULL, + NULL, + }, + { + "cluster_mex_ram_600m_ecc_error_interrupt_flag", + PPU_CLUSTER_CLUSTER_MEX_RAM_600M_ECC_ERROR_INTERRUPT_FLAGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x4068, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 6, + g_ppu_cluster_cluster_mex_ram_600m_ecc_error_interrupt_flag_reg, + NULL, + NULL, + }, + { + "cluster_mex_ram_600m_ecc_error_interrupt_sta", + PPU_CLUSTER_CLUSTER_MEX_RAM_600M_ECC_ERROR_INTERRUPT_STAr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x406c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 6, + g_ppu_cluster_cluster_mex_ram_600m_ecc_error_interrupt_sta_reg, + NULL, + NULL, + }, + { + "ppu_cluster_mf_in_afifo_prog_full_assert_cfg", + PPU_CLUSTER_PPU_CLUSTER_MF_IN_AFIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x4400, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_cluster_mf_in_afifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cluster_mf_in_afifo_prog_full_negate_cfg", + PPU_CLUSTER_PPU_CLUSTER_MF_IN_AFIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x4410, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_cluster_mf_in_afifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "ese_rsp_afifo_prog_full_assert_cfg", + PPU_CLUSTER_ESE_RSP_AFIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x4420, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ese_rsp_afifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "ese_rsp_afifo_prog_full_negate_cfg", + PPU_CLUSTER_ESE_RSP_AFIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x4430, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ese_rsp_afifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "ise_rsp_afifo_prog_full_assert_cfg", + PPU_CLUSTER_ISE_RSP_AFIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x4440, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ise_rsp_afifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "ise_rsp_afifo_prog_full_negate_cfg", + PPU_CLUSTER_ISE_RSP_AFIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x4450, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ise_rsp_afifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "sta_rsp_afifo_prog_full_assert_cfg", + PPU_CLUSTER_STA_RSP_AFIFO_PROG_FULL_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x4460, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_sta_rsp_afifo_prog_full_assert_cfg_reg, + NULL, + NULL, + }, + { + "sta_rsp_afifo_prog_full_negate_cfg", + PPU_CLUSTER_STA_RSP_AFIFO_PROG_FULL_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x4470, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_sta_rsp_afifo_prog_full_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_ise_key_afifo_prog_empty_assert_cfg", + PPU_CLUSTER_PPU_ISE_KEY_AFIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x4480, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_ise_key_afifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_ise_key_afifo_prog_empty_negate_cfg", + PPU_CLUSTER_PPU_ISE_KEY_AFIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x4490, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_ise_key_afifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_ese_key_afifo_prog_empty_assert_cfg", + PPU_CLUSTER_PPU_ESE_KEY_AFIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x44a0, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_ese_key_afifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_ese_key_afifo_prog_empty_negate_cfg", + PPU_CLUSTER_PPU_ESE_KEY_AFIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x44b0, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_ese_key_afifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_sta_key_afifo_prog_empty_assert_cfg", + PPU_CLUSTER_PPU_STA_KEY_AFIFO_PROG_EMPTY_ASSERT_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x44c0, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_sta_key_afifo_prog_empty_assert_cfg_reg, + NULL, + NULL, + }, + { + "ppu_sta_key_afifo_prog_empty_negate_cfg", + PPU_CLUSTER_PPU_STA_KEY_AFIFO_PROG_EMPTY_NEGATE_CFGr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x44d0, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_sta_key_afifo_prog_empty_negate_cfg_reg, + NULL, + NULL, + }, + { + "ppu_cluster_mf_vld_cnt_h", + PPU_CLUSTER_PPU_CLUSTER_MF_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x6000, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_cluster_mf_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "ppu_cluster_mf_vld_cnt_l", + PPU_CLUSTER_PPU_CLUSTER_MF_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x6004, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ppu_cluster_mf_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "cluster_ise_key_out_vld_cnt", + PPU_CLUSTER_CLUSTER_ISE_KEY_OUT_VLD_CNTr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x6008, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_cluster_ise_key_out_vld_cnt_reg, + NULL, + NULL, + }, + { + "ise_cluster_rsp_in_vld_cnt", + PPU_CLUSTER_ISE_CLUSTER_RSP_IN_VLD_CNTr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x600c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ise_cluster_rsp_in_vld_cnt_reg, + NULL, + NULL, + }, + { + "cluster_ese_key_out_vld_cnt", + PPU_CLUSTER_CLUSTER_ESE_KEY_OUT_VLD_CNTr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x6010, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_cluster_ese_key_out_vld_cnt_reg, + NULL, + NULL, + }, + { + "ese_cluster_rsp_in_vld_cnt", + PPU_CLUSTER_ESE_CLUSTER_RSP_IN_VLD_CNTr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x6014, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ese_cluster_rsp_in_vld_cnt_reg, + NULL, + NULL, + }, + { + "cluster_stat_cmd_vld_cnt", + PPU_CLUSTER_CLUSTER_STAT_CMD_VLD_CNTr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x6018, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_cluster_stat_cmd_vld_cnt_reg, + NULL, + NULL, + }, + { + "stat_cluster_rsp_vld_cnt", + PPU_CLUSTER_STAT_CLUSTER_RSP_VLD_CNTr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x601c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_stat_cluster_rsp_vld_cnt_reg, + NULL, + NULL, + }, + { + "mex_debug_key_vld_cnt", + PPU_CLUSTER_MEX_DEBUG_KEY_VLD_CNTr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x6020, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_mex_debug_key_vld_cnt_reg, + NULL, + NULL, + }, + { + "ise_cluster_key_fc_cnt", + PPU_CLUSTER_ISE_CLUSTER_KEY_FC_CNTr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x6024, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ise_cluster_key_fc_cnt_reg, + NULL, + NULL, + }, + { + "ese_cluster_key_fc_cnt", + PPU_CLUSTER_ESE_CLUSTER_KEY_FC_CNTr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x6028, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_ese_cluster_key_fc_cnt_reg, + NULL, + NULL, + }, + { + "cluster_ise_rsp_fc_cnt", + PPU_CLUSTER_CLUSTER_ISE_RSP_FC_CNTr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x602c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_cluster_ise_rsp_fc_cnt_reg, + NULL, + NULL, + }, + { + "cluster_ese_rsp_fc_cnt", + PPU_CLUSTER_CLUSTER_ESE_RSP_FC_CNTr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x6030, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_cluster_ese_rsp_fc_cnt_reg, + NULL, + NULL, + }, + { + "stat_cluster_cmd_fc_cnt", + PPU_CLUSTER_STAT_CLUSTER_CMD_FC_CNTr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x6034, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_stat_cluster_cmd_fc_cnt_reg, + NULL, + NULL, + }, + { + "cluster_stat_rsp_fc_cnt", + PPU_CLUSTER_CLUSTER_STAT_RSP_FC_CNTr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x6038, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_cluster_stat_rsp_fc_cnt_reg, + NULL, + NULL, + }, + { + "cluster_ppu_mf_vld_cnt_l", + PPU_CLUSTER_CLUSTER_PPU_MF_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x2000, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_cluster_ppu_mf_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "cluster_ppu_mf_vld_cnt_h", + PPU_CLUSTER_CLUSTER_PPU_MF_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x2004, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_cluster_ppu_mf_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "cluster_cop_key_vld_cnt_l", + PPU_CLUSTER_CLUSTER_COP_KEY_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x2008, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_cluster_cop_key_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "cluster_cop_key_vld_cnt_h", + PPU_CLUSTER_CLUSTER_COP_KEY_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x200c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_cluster_cop_key_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "cop_cluster_rsp_vld_cnt_l", + PPU_CLUSTER_COP_CLUSTER_RSP_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x2010, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_cop_cluster_rsp_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "cop_cluster_rsp_vld_cnt_h", + PPU_CLUSTER_COP_CLUSTER_RSP_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x2014, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 0, + DPP_PPU_CLUSTER_SPACE_SIZE, + 0, + 1, + g_ppu_cluster_cop_cluster_rsp_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "mex_me_pkt_in_sop_cnt_l", + PPU_CLUSTER_MEX_ME_PKT_IN_SOP_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x2018, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_mex_me_pkt_in_sop_cnt_l_reg, + NULL, + NULL, + }, + { + "mex_me_pkt_in_sop_cnt_h", + PPU_CLUSTER_MEX_ME_PKT_IN_SOP_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x201c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_mex_me_pkt_in_sop_cnt_h_reg, + NULL, + NULL, + }, + { + "mex_me_pkt_in_eop_cnt_l", + PPU_CLUSTER_MEX_ME_PKT_IN_EOP_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x2020, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_mex_me_pkt_in_eop_cnt_l_reg, + NULL, + NULL, + }, + { + "mex_me_pkt_in_eop_cnt_h", + PPU_CLUSTER_MEX_ME_PKT_IN_EOP_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x2024, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_mex_me_pkt_in_eop_cnt_h_reg, + NULL, + NULL, + }, + { + "mex_me_pkt_in_vld_cnt_l", + PPU_CLUSTER_MEX_ME_PKT_IN_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x2028, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_mex_me_pkt_in_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "mex_me_pkt_in_vld_cnt_h", + PPU_CLUSTER_MEX_ME_PKT_IN_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x202c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_mex_me_pkt_in_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "me_mex_pkt_out_sop_cnt_l", + PPU_CLUSTER_ME_MEX_PKT_OUT_SOP_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x20d8, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_mex_pkt_out_sop_cnt_l_reg, + NULL, + NULL, + }, + { + "me_mex_pkt_out_sop_cnt_h", + PPU_CLUSTER_ME_MEX_PKT_OUT_SOP_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x20dc, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_mex_pkt_out_sop_cnt_h_reg, + NULL, + NULL, + }, + { + "me_mex_pkt_out_eop_cnt_l", + PPU_CLUSTER_ME_MEX_PKT_OUT_EOP_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x20e0, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_mex_pkt_out_eop_cnt_l_reg, + NULL, + NULL, + }, + { + "me_mex_pkt_out_eop_cnt_h", + PPU_CLUSTER_ME_MEX_PKT_OUT_EOP_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x20e4, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_mex_pkt_out_eop_cnt_h_reg, + NULL, + NULL, + }, + { + "me_mex_pkt_out_vld_cnt_l", + PPU_CLUSTER_ME_MEX_PKT_OUT_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x20e8, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_mex_pkt_out_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "me_mex_pkt_out_vld_cnt_h", + PPU_CLUSTER_ME_MEX_PKT_OUT_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x20ec, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_mex_pkt_out_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "me_mex_i_key_out_sop_cnt_l", + PPU_CLUSTER_ME_MEX_I_KEY_OUT_SOP_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x2198, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_mex_i_key_out_sop_cnt_l_reg, + NULL, + NULL, + }, + { + "me_mex_i_key_out_sop_cnt_h", + PPU_CLUSTER_ME_MEX_I_KEY_OUT_SOP_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x219c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_mex_i_key_out_sop_cnt_h_reg, + NULL, + NULL, + }, + { + "me_mex_i_key_out_eop_cnt_l", + PPU_CLUSTER_ME_MEX_I_KEY_OUT_EOP_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x21a0, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_mex_i_key_out_eop_cnt_l_reg, + NULL, + NULL, + }, + { + "me_mex_i_key_out_eop_cnt_h", + PPU_CLUSTER_ME_MEX_I_KEY_OUT_EOP_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x21a4, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_mex_i_key_out_eop_cnt_h_reg, + NULL, + NULL, + }, + { + "me_mex_i_key_out_vld_cnt_l", + PPU_CLUSTER_ME_MEX_I_KEY_OUT_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x21a8, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_mex_i_key_out_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "me_mex_i_key_out_vld_cnt_h", + PPU_CLUSTER_ME_MEX_I_KEY_OUT_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x21ac, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_mex_i_key_out_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "me_mex_e_key_out_sop_cnt_l", + PPU_CLUSTER_ME_MEX_E_KEY_OUT_SOP_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x2258, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_mex_e_key_out_sop_cnt_l_reg, + NULL, + NULL, + }, + { + "me_mex_e_key_out_sop_cnt_h", + PPU_CLUSTER_ME_MEX_E_KEY_OUT_SOP_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x225c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_mex_e_key_out_sop_cnt_h_reg, + NULL, + NULL, + }, + { + "me_mex_e_key_out_eop_cnt_l", + PPU_CLUSTER_ME_MEX_E_KEY_OUT_EOP_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x2260, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_mex_e_key_out_eop_cnt_l_reg, + NULL, + NULL, + }, + { + "me_mex_e_key_out_eop_cnt_h", + PPU_CLUSTER_ME_MEX_E_KEY_OUT_EOP_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x2264, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_mex_e_key_out_eop_cnt_h_reg, + NULL, + NULL, + }, + { + "me_mex_e_key_out_vld_cnt_l", + PPU_CLUSTER_ME_MEX_E_KEY_OUT_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x2268, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_mex_e_key_out_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "me_mex_e_key_out_vld_cnt_h", + PPU_CLUSTER_ME_MEX_E_KEY_OUT_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x226c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_mex_e_key_out_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "me_mex_demux_ise_key_vld_cnt_l", + PPU_CLUSTER_ME_MEX_DEMUX_ISE_KEY_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x2318, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 8, + 1, + g_ppu_cluster_me_mex_demux_ise_key_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "me_mex_demux_ise_key_vld_cnt_h", + PPU_CLUSTER_ME_MEX_DEMUX_ISE_KEY_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x231c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 8, + 1, + g_ppu_cluster_me_mex_demux_ise_key_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "me_mex_demux_ese_key_vld_cnt_l", + PPU_CLUSTER_ME_MEX_DEMUX_ESE_KEY_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x2358, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 8, + 1, + g_ppu_cluster_me_mex_demux_ese_key_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "me_mex_demux_ese_key_vld_cnt_h", + PPU_CLUSTER_ME_MEX_DEMUX_ESE_KEY_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x235c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 8, + 1, + g_ppu_cluster_me_mex_demux_ese_key_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "me_mex_demux_sta_key_vld_cnt_l", + PPU_CLUSTER_ME_MEX_DEMUX_STA_KEY_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x2398, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 8, + 1, + g_ppu_cluster_me_mex_demux_sta_key_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "me_mex_demux_sta_key_vld_cnt_h", + PPU_CLUSTER_ME_MEX_DEMUX_STA_KEY_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x239c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 8, + 1, + g_ppu_cluster_me_mex_demux_sta_key_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "me_mex_demux_cop_key_vld_cnt_l", + PPU_CLUSTER_ME_MEX_DEMUX_COP_KEY_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x23d8, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 8, + 1, + g_ppu_cluster_me_mex_demux_cop_key_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "me_mex_demux_cop_key_vld_cnt_h", + PPU_CLUSTER_ME_MEX_DEMUX_COP_KEY_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x23dc, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 8, + 1, + g_ppu_cluster_me_mex_demux_cop_key_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "mex_me_demux_ise_rsp_vld_cnt_l", + PPU_CLUSTER_MEX_ME_DEMUX_ISE_RSP_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x2418, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 8, + 1, + g_ppu_cluster_mex_me_demux_ise_rsp_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "mex_me_demux_ise_rsp_vld_cnt_h", + PPU_CLUSTER_MEX_ME_DEMUX_ISE_RSP_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x241c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 8, + 1, + g_ppu_cluster_mex_me_demux_ise_rsp_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "mex_me_demux_ese_rsp_vld_cnt_l", + PPU_CLUSTER_MEX_ME_DEMUX_ESE_RSP_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x2458, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 8, + 1, + g_ppu_cluster_mex_me_demux_ese_rsp_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "mex_me_demux_ese_rsp_vld_cnt_h", + PPU_CLUSTER_MEX_ME_DEMUX_ESE_RSP_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x245c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 8, + 1, + g_ppu_cluster_mex_me_demux_ese_rsp_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "mex_me_demux_sta_rsp_vld_cnt_l", + PPU_CLUSTER_MEX_ME_DEMUX_STA_RSP_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x2498, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 8, + 1, + g_ppu_cluster_mex_me_demux_sta_rsp_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "mex_me_demux_sta_rsp_vld_cnt_h", + PPU_CLUSTER_MEX_ME_DEMUX_STA_RSP_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x249c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 8, + 1, + g_ppu_cluster_mex_me_demux_sta_rsp_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "mex_me_demux_cop_rsp_vld_cnt_l", + PPU_CLUSTER_MEX_ME_DEMUX_COP_RSP_VLD_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x24d8, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 8, + 1, + g_ppu_cluster_mex_me_demux_cop_rsp_vld_cnt_l_reg, + NULL, + NULL, + }, + { + "mex_me_demux_cop_rsp_vld_cnt_h", + PPU_CLUSTER_MEX_ME_DEMUX_COP_RSP_VLD_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x24dc, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 8, + 1, + g_ppu_cluster_mex_me_demux_cop_rsp_vld_cnt_h_reg, + NULL, + NULL, + }, + { + "me_exception_code0_cnt_l", + PPU_CLUSTER_ME_EXCEPTION_CODE0_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x2518, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_exception_code0_cnt_l_reg, + NULL, + NULL, + }, + { + "me_exception_code0_cnt_h", + PPU_CLUSTER_ME_EXCEPTION_CODE0_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x251c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_exception_code0_cnt_h_reg, + NULL, + NULL, + }, + { + "me_exception_code1_cnt_l", + PPU_CLUSTER_ME_EXCEPTION_CODE1_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x2520, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_exception_code1_cnt_l_reg, + NULL, + NULL, + }, + { + "me_exception_code1_cnt_h", + PPU_CLUSTER_ME_EXCEPTION_CODE1_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x2524, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_exception_code1_cnt_h_reg, + NULL, + NULL, + }, + { + "me_exception_code2_cnt_l", + PPU_CLUSTER_ME_EXCEPTION_CODE2_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x2528, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_exception_code2_cnt_l_reg, + NULL, + NULL, + }, + { + "me_exception_code2_cnt_h", + PPU_CLUSTER_ME_EXCEPTION_CODE2_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x252c, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_exception_code2_cnt_h_reg, + NULL, + NULL, + }, + { + "me_exception_code3_cnt_l", + PPU_CLUSTER_ME_EXCEPTION_CODE3_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x25d8, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_exception_code3_cnt_l_reg, + NULL, + NULL, + }, + { + "me_exception_code3_cnt_h", + PPU_CLUSTER_ME_EXCEPTION_CODE3_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x25dc, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_exception_code3_cnt_h_reg, + NULL, + NULL, + }, + { + "me_exception_code4_cnt_l", + PPU_CLUSTER_ME_EXCEPTION_CODE4_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x25e0, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_exception_code4_cnt_l_reg, + NULL, + NULL, + }, + { + "me_exception_code4_cnt_h", + PPU_CLUSTER_ME_EXCEPTION_CODE4_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x25e4, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_exception_code4_cnt_h_reg, + NULL, + NULL, + }, + { + "me_exception_code5_cnt_l", + PPU_CLUSTER_ME_EXCEPTION_CODE5_CNT_Lr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x25e8, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_exception_code5_cnt_l_reg, + NULL, + NULL, + }, + { + "me_exception_code5_cnt_h", + PPU_CLUSTER_ME_EXCEPTION_CODE5_CNT_Hr, + PPU, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_PPU_BASE_ADDR + MODULE_CLUSTER0_BASE_ADDR + 0x25ec, + (32 / 8), + DPP_PPU_CLUSTER_NUM, + 7 + 1, + DPP_PPU_CLUSTER_SPACE_SIZE, + 24, + 1, + g_ppu_cluster_me_exception_code5_cnt_h_reg, + NULL, + NULL, + }, + { + "ppu_soft_rst", + SE_CFG_PPU_SOFT_RSTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_ppu_soft_rst_reg, + NULL, + NULL, + }, + { + "ept_flag", + SE_CFG_EPT_FLAGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000004, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_ept_flag_reg, + NULL, + NULL, + }, + { + "ddr_key_lk0_3", + SE_CFG_DDR_KEY_LK0_3r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000020, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_ddr_key_lk0_3_reg, + NULL, + NULL, + }, + { + "ddr_key_lk0_2", + SE_CFG_DDR_KEY_LK0_2r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000024, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_ddr_key_lk0_2_reg, + NULL, + NULL, + }, + { + "ddr_key_lk0_1", + SE_CFG_DDR_KEY_LK0_1r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000028, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_ddr_key_lk0_1_reg, + NULL, + NULL, + }, + { + "ddr_key_lk0_0", + SE_CFG_DDR_KEY_LK0_0r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000002c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_ddr_key_lk0_0_reg, + NULL, + NULL, + }, + { + "ddr_key_lk1_3", + SE_CFG_DDR_KEY_LK1_3r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000030, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_ddr_key_lk1_3_reg, + NULL, + NULL, + }, + { + "ddr_key_lk1_2", + SE_CFG_DDR_KEY_LK1_2r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000034, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_ddr_key_lk1_2_reg, + NULL, + NULL, + }, + { + "ddr_key_lk1_1", + SE_CFG_DDR_KEY_LK1_1r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000038, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_ddr_key_lk1_1_reg, + NULL, + NULL, + }, + { + "ddr_key_lk1_0", + SE_CFG_DDR_KEY_LK1_0r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000003c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_ddr_key_lk1_0_reg, + NULL, + NULL, + }, + { + "hash_key_lk0_18", + SE_CFG_HASH_KEY_LK0_18r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000080, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk0_18_reg, + NULL, + NULL, + }, + { + "hash_key_lk0_17", + SE_CFG_HASH_KEY_LK0_17r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000084, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk0_17_reg, + NULL, + NULL, + }, + { + "hash_key_lk0_16", + SE_CFG_HASH_KEY_LK0_16r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000088, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk0_16_reg, + NULL, + NULL, + }, + { + "hash_key_lk0_15", + SE_CFG_HASH_KEY_LK0_15r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000008c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk0_15_reg, + NULL, + NULL, + }, + { + "hash_key_lk0_14", + SE_CFG_HASH_KEY_LK0_14r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000090, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk0_14_reg, + NULL, + NULL, + }, + { + "hash_key_lk0_13", + SE_CFG_HASH_KEY_LK0_13r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000094, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk0_13_reg, + NULL, + NULL, + }, + { + "hash_key_lk0_12", + SE_CFG_HASH_KEY_LK0_12r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000098, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk0_12_reg, + NULL, + NULL, + }, + { + "hash_key_lk0_11", + SE_CFG_HASH_KEY_LK0_11r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000009c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk0_11_reg, + NULL, + NULL, + }, + { + "hash_key_lk0_10", + SE_CFG_HASH_KEY_LK0_10r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000000a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk0_10_reg, + NULL, + NULL, + }, + { + "hash_key_lk0_9", + SE_CFG_HASH_KEY_LK0_9r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000000a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk0_9_reg, + NULL, + NULL, + }, + { + "hash_key_lk0_8", + SE_CFG_HASH_KEY_LK0_8r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000000a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk0_8_reg, + NULL, + NULL, + }, + { + "hash_key_lk0_7", + SE_CFG_HASH_KEY_LK0_7r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000000ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk0_7_reg, + NULL, + NULL, + }, + { + "hash_key_lk0_6", + SE_CFG_HASH_KEY_LK0_6r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000000b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk0_6_reg, + NULL, + NULL, + }, + { + "hash_key_lk0_5", + SE_CFG_HASH_KEY_LK0_5r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000000b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk0_5_reg, + NULL, + NULL, + }, + { + "hash_key_lk0_4", + SE_CFG_HASH_KEY_LK0_4r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000000b8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk0_4_reg, + NULL, + NULL, + }, + { + "hash_key_lk0_3", + SE_CFG_HASH_KEY_LK0_3r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000000bc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk0_3_reg, + NULL, + NULL, + }, + { + "hash_key_lk0_2", + SE_CFG_HASH_KEY_LK0_2r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000000c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk0_2_reg, + NULL, + NULL, + }, + { + "hash_key_lk0_1", + SE_CFG_HASH_KEY_LK0_1r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000000c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk0_1_reg, + NULL, + NULL, + }, + { + "hash_key_lk0_0", + SE_CFG_HASH_KEY_LK0_0r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000000c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk0_0_reg, + NULL, + NULL, + }, + { + "hash_key_lk1_18", + SE_CFG_HASH_KEY_LK1_18r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000000cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk1_18_reg, + NULL, + NULL, + }, + { + "hash_key_lk1_17", + SE_CFG_HASH_KEY_LK1_17r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000000d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk1_17_reg, + NULL, + NULL, + }, + { + "hash_key_lk1_16", + SE_CFG_HASH_KEY_LK1_16r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000000d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk1_16_reg, + NULL, + NULL, + }, + { + "hash_key_lk1_15", + SE_CFG_HASH_KEY_LK1_15r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000000d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk1_15_reg, + NULL, + NULL, + }, + { + "hash_key_lk1_14", + SE_CFG_HASH_KEY_LK1_14r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000000dc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk1_14_reg, + NULL, + NULL, + }, + { + "hash_key_lk1_13", + SE_CFG_HASH_KEY_LK1_13r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000000e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk1_13_reg, + NULL, + NULL, + }, + { + "hash_key_lk1_12", + SE_CFG_HASH_KEY_LK1_12r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000000e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk1_12_reg, + NULL, + NULL, + }, + { + "hash_key_lk1_11", + SE_CFG_HASH_KEY_LK1_11r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000000e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk1_11_reg, + NULL, + NULL, + }, + { + "hash_key_lk1_10", + SE_CFG_HASH_KEY_LK1_10r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000000ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk1_10_reg, + NULL, + NULL, + }, + { + "hash_key_lk1_9", + SE_CFG_HASH_KEY_LK1_9r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000000f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk1_9_reg, + NULL, + NULL, + }, + { + "hash_key_lk1_8", + SE_CFG_HASH_KEY_LK1_8r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000000f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk1_8_reg, + NULL, + NULL, + }, + { + "hash_key_lk1_7", + SE_CFG_HASH_KEY_LK1_7r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000000f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk1_7_reg, + NULL, + NULL, + }, + { + "hash_key_lk1_6", + SE_CFG_HASH_KEY_LK1_6r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000000fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk1_6_reg, + NULL, + NULL, + }, + { + "hash_key_lk1_5", + SE_CFG_HASH_KEY_LK1_5r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000100, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk1_5_reg, + NULL, + NULL, + }, + { + "hash_key_lk1_4", + SE_CFG_HASH_KEY_LK1_4r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000104, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk1_4_reg, + NULL, + NULL, + }, + { + "hash_key_lk1_3", + SE_CFG_HASH_KEY_LK1_3r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000108, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk1_3_reg, + NULL, + NULL, + }, + { + "hash_key_lk1_2", + SE_CFG_HASH_KEY_LK1_2r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000010c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk1_2_reg, + NULL, + NULL, + }, + { + "hash_key_lk1_1", + SE_CFG_HASH_KEY_LK1_1r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000110, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk1_1_reg, + NULL, + NULL, + }, + { + "hash_key_lk1_0", + SE_CFG_HASH_KEY_LK1_0r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000114, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk1_0_reg, + NULL, + NULL, + }, + { + "hash_key_lk2_18", + SE_CFG_HASH_KEY_LK2_18r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000118, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk2_18_reg, + NULL, + NULL, + }, + { + "hash_key_lk2_17", + SE_CFG_HASH_KEY_LK2_17r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000011c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk2_17_reg, + NULL, + NULL, + }, + { + "hash_key_lk2_16", + SE_CFG_HASH_KEY_LK2_16r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000120, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk2_16_reg, + NULL, + NULL, + }, + { + "hash_key_lk2_15", + SE_CFG_HASH_KEY_LK2_15r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000124, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk2_15_reg, + NULL, + NULL, + }, + { + "hash_key_lk2_14", + SE_CFG_HASH_KEY_LK2_14r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000128, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk2_14_reg, + NULL, + NULL, + }, + { + "hash_key_lk2_13", + SE_CFG_HASH_KEY_LK2_13r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000012c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk2_13_reg, + NULL, + NULL, + }, + { + "hash_key_lk2_12", + SE_CFG_HASH_KEY_LK2_12r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000130, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk2_12_reg, + NULL, + NULL, + }, + { + "hash_key_lk2_11", + SE_CFG_HASH_KEY_LK2_11r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000134, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk2_11_reg, + NULL, + NULL, + }, + { + "hash_key_lk2_10", + SE_CFG_HASH_KEY_LK2_10r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000138, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk2_10_reg, + NULL, + NULL, + }, + { + "hash_key_lk2_9", + SE_CFG_HASH_KEY_LK2_9r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000013c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk2_9_reg, + NULL, + NULL, + }, + { + "hash_key_lk2_8", + SE_CFG_HASH_KEY_LK2_8r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000140, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk2_8_reg, + NULL, + NULL, + }, + { + "hash_key_lk2_7", + SE_CFG_HASH_KEY_LK2_7r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000144, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk2_7_reg, + NULL, + NULL, + }, + { + "hash_key_lk2_6", + SE_CFG_HASH_KEY_LK2_6r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000148, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk2_6_reg, + NULL, + NULL, + }, + { + "hash_key_lk2_5", + SE_CFG_HASH_KEY_LK2_5r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000014c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk2_5_reg, + NULL, + NULL, + }, + { + "hash_key_lk2_4", + SE_CFG_HASH_KEY_LK2_4r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000150, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk2_4_reg, + NULL, + NULL, + }, + { + "hash_key_lk2_3", + SE_CFG_HASH_KEY_LK2_3r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000154, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk2_3_reg, + NULL, + NULL, + }, + { + "hash_key_lk2_2", + SE_CFG_HASH_KEY_LK2_2r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000158, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk2_2_reg, + NULL, + NULL, + }, + { + "hash_key_lk2_1", + SE_CFG_HASH_KEY_LK2_1r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000015c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk2_1_reg, + NULL, + NULL, + }, + { + "hash_key_lk2_0", + SE_CFG_HASH_KEY_LK2_0r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000160, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk2_0_reg, + NULL, + NULL, + }, + { + "hash_key_lk3_18", + SE_CFG_HASH_KEY_LK3_18r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000164, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk3_18_reg, + NULL, + NULL, + }, + { + "hash_key_lk3_17", + SE_CFG_HASH_KEY_LK3_17r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000168, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk3_17_reg, + NULL, + NULL, + }, + { + "hash_key_lk3_16", + SE_CFG_HASH_KEY_LK3_16r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000016c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk3_16_reg, + NULL, + NULL, + }, + { + "hash_key_lk3_15", + SE_CFG_HASH_KEY_LK3_15r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000170, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk3_15_reg, + NULL, + NULL, + }, + { + "hash_key_lk3_14", + SE_CFG_HASH_KEY_LK3_14r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000174, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk3_14_reg, + NULL, + NULL, + }, + { + "hash_key_lk3_13", + SE_CFG_HASH_KEY_LK3_13r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000178, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk3_13_reg, + NULL, + NULL, + }, + { + "hash_key_lk3_12", + SE_CFG_HASH_KEY_LK3_12r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000017c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk3_12_reg, + NULL, + NULL, + }, + { + "hash_key_lk3_11", + SE_CFG_HASH_KEY_LK3_11r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000180, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk3_11_reg, + NULL, + NULL, + }, + { + "hash_key_lk3_10", + SE_CFG_HASH_KEY_LK3_10r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000184, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk3_10_reg, + NULL, + NULL, + }, + { + "hash_key_lk3_9", + SE_CFG_HASH_KEY_LK3_9r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000188, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk3_9_reg, + NULL, + NULL, + }, + { + "hash_key_lk3_8", + SE_CFG_HASH_KEY_LK3_8r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000018c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk3_8_reg, + NULL, + NULL, + }, + { + "hash_key_lk3_7", + SE_CFG_HASH_KEY_LK3_7r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000190, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk3_7_reg, + NULL, + NULL, + }, + { + "hash_key_lk3_6", + SE_CFG_HASH_KEY_LK3_6r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000194, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk3_6_reg, + NULL, + NULL, + }, + { + "hash_key_lk3_5", + SE_CFG_HASH_KEY_LK3_5r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000198, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk3_5_reg, + NULL, + NULL, + }, + { + "hash_key_lk3_4", + SE_CFG_HASH_KEY_LK3_4r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000019c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk3_4_reg, + NULL, + NULL, + }, + { + "hash_key_lk3_3", + SE_CFG_HASH_KEY_LK3_3r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000001a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk3_3_reg, + NULL, + NULL, + }, + { + "hash_key_lk3_2", + SE_CFG_HASH_KEY_LK3_2r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000001a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk3_2_reg, + NULL, + NULL, + }, + { + "hash_key_lk3_1", + SE_CFG_HASH_KEY_LK3_1r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000001a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk3_1_reg, + NULL, + NULL, + }, + { + "hash_key_lk3_0", + SE_CFG_HASH_KEY_LK3_0r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000001ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_hash_key_lk3_0_reg, + NULL, + NULL, + }, + { + "lpm_key_lk0_6", + SE_CFG_LPM_KEY_LK0_6r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000002e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk0_6_reg, + NULL, + NULL, + }, + { + "lpm_key_lk0_5", + SE_CFG_LPM_KEY_LK0_5r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000002e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk0_5_reg, + NULL, + NULL, + }, + { + "lpm_key_lk0_4", + SE_CFG_LPM_KEY_LK0_4r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000002e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk0_4_reg, + NULL, + NULL, + }, + { + "lpm_key_lk0_3", + SE_CFG_LPM_KEY_LK0_3r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000002ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk0_3_reg, + NULL, + NULL, + }, + { + "lpm_key_lk0_2", + SE_CFG_LPM_KEY_LK0_2r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000002f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk0_2_reg, + NULL, + NULL, + }, + { + "lpm_key_lk0_1", + SE_CFG_LPM_KEY_LK0_1r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000002f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk0_1_reg, + NULL, + NULL, + }, + { + "lpm_key_lk0_0", + SE_CFG_LPM_KEY_LK0_0r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000002f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk0_0_reg, + NULL, + NULL, + }, + { + "lpm_key_lk1_6", + SE_CFG_LPM_KEY_LK1_6r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000002fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk1_6_reg, + NULL, + NULL, + }, + { + "lpm_key_lk1_5", + SE_CFG_LPM_KEY_LK1_5r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000300, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk1_5_reg, + NULL, + NULL, + }, + { + "lpm_key_lk1_4", + SE_CFG_LPM_KEY_LK1_4r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000304, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk1_4_reg, + NULL, + NULL, + }, + { + "lpm_key_lk1_3", + SE_CFG_LPM_KEY_LK1_3r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000308, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk1_3_reg, + NULL, + NULL, + }, + { + "lpm_key_lk1_2", + SE_CFG_LPM_KEY_LK1_2r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000030c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk1_2_reg, + NULL, + NULL, + }, + { + "lpm_key_lk1_1", + SE_CFG_LPM_KEY_LK1_1r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000310, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk1_1_reg, + NULL, + NULL, + }, + { + "lpm_key_lk1_0", + SE_CFG_LPM_KEY_LK1_0r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000314, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk1_0_reg, + NULL, + NULL, + }, + { + "lpm_key_lk2_6", + SE_CFG_LPM_KEY_LK2_6r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000318, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk2_6_reg, + NULL, + NULL, + }, + { + "lpm_key_lk2_5", + SE_CFG_LPM_KEY_LK2_5r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000031c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk2_5_reg, + NULL, + NULL, + }, + { + "lpm_key_lk2_4", + SE_CFG_LPM_KEY_LK2_4r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000320, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk2_4_reg, + NULL, + NULL, + }, + { + "lpm_key_lk2_3", + SE_CFG_LPM_KEY_LK2_3r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000324, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk2_3_reg, + NULL, + NULL, + }, + { + "lpm_key_lk2_2", + SE_CFG_LPM_KEY_LK2_2r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000328, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk2_2_reg, + NULL, + NULL, + }, + { + "lpm_key_lk2_1", + SE_CFG_LPM_KEY_LK2_1r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000032c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk2_1_reg, + NULL, + NULL, + }, + { + "lpm_key_lk2_0", + SE_CFG_LPM_KEY_LK2_0r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000330, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk2_0_reg, + NULL, + NULL, + }, + { + "lpm_key_lk3_6", + SE_CFG_LPM_KEY_LK3_6r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000334, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk3_6_reg, + NULL, + NULL, + }, + { + "lpm_key_lk3_5", + SE_CFG_LPM_KEY_LK3_5r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000338, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk3_5_reg, + NULL, + NULL, + }, + { + "lpm_key_lk3_4", + SE_CFG_LPM_KEY_LK3_4r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000033c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk3_4_reg, + NULL, + NULL, + }, + { + "lpm_key_lk3_3", + SE_CFG_LPM_KEY_LK3_3r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000340, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk3_3_reg, + NULL, + NULL, + }, + { + "lpm_key_lk3_2", + SE_CFG_LPM_KEY_LK3_2r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000344, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk3_2_reg, + NULL, + NULL, + }, + { + "lpm_key_lk3_1", + SE_CFG_LPM_KEY_LK3_1r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000348, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk3_1_reg, + NULL, + NULL, + }, + { + "lpm_key_lk3_0", + SE_CFG_LPM_KEY_LK3_0r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000034c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_lpm_key_lk3_0_reg, + NULL, + NULL, + }, + { + "etcam_key_lk0_22", + SE_CFG_ETCAM_KEY_LK0_22r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000003c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk0_22_reg, + NULL, + NULL, + }, + { + "etcam_key_lk0_21", + SE_CFG_ETCAM_KEY_LK0_21r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000003c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk0_21_reg, + NULL, + NULL, + }, + { + "etcam_key_lk0_20", + SE_CFG_ETCAM_KEY_LK0_20r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000003c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk0_20_reg, + NULL, + NULL, + }, + { + "etcam_key_lk0_19", + SE_CFG_ETCAM_KEY_LK0_19r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000003cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk0_19_reg, + NULL, + NULL, + }, + { + "etcam_key_lk0_18", + SE_CFG_ETCAM_KEY_LK0_18r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000003d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk0_18_reg, + NULL, + NULL, + }, + { + "etcam_key_lk0_17", + SE_CFG_ETCAM_KEY_LK0_17r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000003d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk0_17_reg, + NULL, + NULL, + }, + { + "etcam_key_lk0_16", + SE_CFG_ETCAM_KEY_LK0_16r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000003d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk0_16_reg, + NULL, + NULL, + }, + { + "etcam_key_lk0_15", + SE_CFG_ETCAM_KEY_LK0_15r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000003dc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk0_15_reg, + NULL, + NULL, + }, + { + "etcam_key_lk0_14", + SE_CFG_ETCAM_KEY_LK0_14r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000003e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk0_14_reg, + NULL, + NULL, + }, + { + "etcam_key_lk0_13", + SE_CFG_ETCAM_KEY_LK0_13r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000003e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk0_13_reg, + NULL, + NULL, + }, + { + "etcam_key_lk0_12", + SE_CFG_ETCAM_KEY_LK0_12r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000003e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk0_12_reg, + NULL, + NULL, + }, + { + "etcam_key_lk0_11", + SE_CFG_ETCAM_KEY_LK0_11r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000003ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk0_11_reg, + NULL, + NULL, + }, + { + "etcam_key_lk0_10", + SE_CFG_ETCAM_KEY_LK0_10r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000003f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk0_10_reg, + NULL, + NULL, + }, + { + "etcam_key_lk0_9", + SE_CFG_ETCAM_KEY_LK0_9r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000003f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk0_9_reg, + NULL, + NULL, + }, + { + "etcam_key_lk0_8", + SE_CFG_ETCAM_KEY_LK0_8r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000003f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk0_8_reg, + NULL, + NULL, + }, + { + "etcam_key_lk0_7", + SE_CFG_ETCAM_KEY_LK0_7r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000003fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk0_7_reg, + NULL, + NULL, + }, + { + "etcam_key_lk0_6", + SE_CFG_ETCAM_KEY_LK0_6r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000400, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk0_6_reg, + NULL, + NULL, + }, + { + "etcam_key_lk0_5", + SE_CFG_ETCAM_KEY_LK0_5r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000404, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk0_5_reg, + NULL, + NULL, + }, + { + "etcam_key_lk0_4", + SE_CFG_ETCAM_KEY_LK0_4r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000408, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk0_4_reg, + NULL, + NULL, + }, + { + "etcam_key_lk0_3", + SE_CFG_ETCAM_KEY_LK0_3r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000040c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk0_3_reg, + NULL, + NULL, + }, + { + "etcam_key_lk0_2", + SE_CFG_ETCAM_KEY_LK0_2r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000410, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk0_2_reg, + NULL, + NULL, + }, + { + "etcam_key_lk0_1", + SE_CFG_ETCAM_KEY_LK0_1r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000414, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk0_1_reg, + NULL, + NULL, + }, + { + "etcam_key_lk0_0", + SE_CFG_ETCAM_KEY_LK0_0r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000418, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk0_0_reg, + NULL, + NULL, + }, + { + "etcam_key_lk1_22", + SE_CFG_ETCAM_KEY_LK1_22r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000041c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk1_22_reg, + NULL, + NULL, + }, + { + "etcam_key_lk1_21", + SE_CFG_ETCAM_KEY_LK1_21r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000420, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk1_21_reg, + NULL, + NULL, + }, + { + "etcam_key_lk1_20", + SE_CFG_ETCAM_KEY_LK1_20r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000424, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk1_20_reg, + NULL, + NULL, + }, + { + "etcam_key_lk1_19", + SE_CFG_ETCAM_KEY_LK1_19r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000428, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk1_19_reg, + NULL, + NULL, + }, + { + "etcam_key_lk1_18", + SE_CFG_ETCAM_KEY_LK1_18r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000042c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk1_18_reg, + NULL, + NULL, + }, + { + "etcam_key_lk1_17", + SE_CFG_ETCAM_KEY_LK1_17r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000430, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk1_17_reg, + NULL, + NULL, + }, + { + "etcam_key_lk1_16", + SE_CFG_ETCAM_KEY_LK1_16r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000434, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk1_16_reg, + NULL, + NULL, + }, + { + "etcam_key_lk1_15", + SE_CFG_ETCAM_KEY_LK1_15r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000438, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk1_15_reg, + NULL, + NULL, + }, + { + "etcam_key_lk1_14", + SE_CFG_ETCAM_KEY_LK1_14r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000043c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk1_14_reg, + NULL, + NULL, + }, + { + "etcam_key_lk1_13", + SE_CFG_ETCAM_KEY_LK1_13r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000440, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk1_13_reg, + NULL, + NULL, + }, + { + "etcam_key_lk1_12", + SE_CFG_ETCAM_KEY_LK1_12r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000444, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk1_12_reg, + NULL, + NULL, + }, + { + "etcam_key_lk1_11", + SE_CFG_ETCAM_KEY_LK1_11r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000448, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk1_11_reg, + NULL, + NULL, + }, + { + "etcam_key_lk1_10", + SE_CFG_ETCAM_KEY_LK1_10r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000044c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk1_10_reg, + NULL, + NULL, + }, + { + "etcam_key_lk1_9", + SE_CFG_ETCAM_KEY_LK1_9r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000450, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk1_9_reg, + NULL, + NULL, + }, + { + "etcam_key_lk1_8", + SE_CFG_ETCAM_KEY_LK1_8r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000454, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk1_8_reg, + NULL, + NULL, + }, + { + "etcam_key_lk1_7", + SE_CFG_ETCAM_KEY_LK1_7r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000458, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk1_7_reg, + NULL, + NULL, + }, + { + "etcam_key_lk1_6", + SE_CFG_ETCAM_KEY_LK1_6r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000045c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk1_6_reg, + NULL, + NULL, + }, + { + "etcam_key_lk1_5", + SE_CFG_ETCAM_KEY_LK1_5r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000460, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk1_5_reg, + NULL, + NULL, + }, + { + "etcam_key_lk1_4", + SE_CFG_ETCAM_KEY_LK1_4r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000464, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk1_4_reg, + NULL, + NULL, + }, + { + "etcam_key_lk1_3", + SE_CFG_ETCAM_KEY_LK1_3r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000468, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk1_3_reg, + NULL, + NULL, + }, + { + "etcam_key_lk1_2", + SE_CFG_ETCAM_KEY_LK1_2r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000046c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk1_2_reg, + NULL, + NULL, + }, + { + "etcam_key_lk1_1", + SE_CFG_ETCAM_KEY_LK1_1r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000470, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk1_1_reg, + NULL, + NULL, + }, + { + "etcam_key_lk1_0", + SE_CFG_ETCAM_KEY_LK1_0r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000474, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk1_0_reg, + NULL, + NULL, + }, + { + "etcam_key_lk2_22", + SE_CFG_ETCAM_KEY_LK2_22r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000478, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk2_22_reg, + NULL, + NULL, + }, + { + "etcam_key_lk2_21", + SE_CFG_ETCAM_KEY_LK2_21r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000047c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk2_21_reg, + NULL, + NULL, + }, + { + "etcam_key_lk2_20", + SE_CFG_ETCAM_KEY_LK2_20r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000480, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk2_20_reg, + NULL, + NULL, + }, + { + "etcam_key_lk2_19", + SE_CFG_ETCAM_KEY_LK2_19r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000484, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk2_19_reg, + NULL, + NULL, + }, + { + "etcam_key_lk2_18", + SE_CFG_ETCAM_KEY_LK2_18r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000488, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk2_18_reg, + NULL, + NULL, + }, + { + "etcam_key_lk2_17", + SE_CFG_ETCAM_KEY_LK2_17r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000048c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk2_17_reg, + NULL, + NULL, + }, + { + "etcam_key_lk2_16", + SE_CFG_ETCAM_KEY_LK2_16r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000490, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk2_16_reg, + NULL, + NULL, + }, + { + "etcam_key_lk2_15", + SE_CFG_ETCAM_KEY_LK2_15r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000494, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk2_15_reg, + NULL, + NULL, + }, + { + "etcam_key_lk2_14", + SE_CFG_ETCAM_KEY_LK2_14r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000498, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk2_14_reg, + NULL, + NULL, + }, + { + "etcam_key_lk2_13", + SE_CFG_ETCAM_KEY_LK2_13r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000049c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk2_13_reg, + NULL, + NULL, + }, + { + "etcam_key_lk2_12", + SE_CFG_ETCAM_KEY_LK2_12r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000004a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk2_12_reg, + NULL, + NULL, + }, + { + "etcam_key_lk2_11", + SE_CFG_ETCAM_KEY_LK2_11r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000004a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk2_11_reg, + NULL, + NULL, + }, + { + "etcam_key_lk2_10", + SE_CFG_ETCAM_KEY_LK2_10r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000004a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk2_10_reg, + NULL, + NULL, + }, + { + "etcam_key_lk2_9", + SE_CFG_ETCAM_KEY_LK2_9r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000004ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk2_9_reg, + NULL, + NULL, + }, + { + "etcam_key_lk2_8", + SE_CFG_ETCAM_KEY_LK2_8r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000004b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk2_8_reg, + NULL, + NULL, + }, + { + "etcam_key_lk2_7", + SE_CFG_ETCAM_KEY_LK2_7r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000004b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk2_7_reg, + NULL, + NULL, + }, + { + "etcam_key_lk2_6", + SE_CFG_ETCAM_KEY_LK2_6r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000004b8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk2_6_reg, + NULL, + NULL, + }, + { + "etcam_key_lk2_5", + SE_CFG_ETCAM_KEY_LK2_5r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000004bc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk2_5_reg, + NULL, + NULL, + }, + { + "etcam_key_lk2_4", + SE_CFG_ETCAM_KEY_LK2_4r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000004c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk2_4_reg, + NULL, + NULL, + }, + { + "etcam_key_lk2_3", + SE_CFG_ETCAM_KEY_LK2_3r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000004c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk2_3_reg, + NULL, + NULL, + }, + { + "etcam_key_lk2_2", + SE_CFG_ETCAM_KEY_LK2_2r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000004c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk2_2_reg, + NULL, + NULL, + }, + { + "etcam_key_lk2_1", + SE_CFG_ETCAM_KEY_LK2_1r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000004cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk2_1_reg, + NULL, + NULL, + }, + { + "etcam_key_lk2_0", + SE_CFG_ETCAM_KEY_LK2_0r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000004d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk2_0_reg, + NULL, + NULL, + }, + { + "etcam_key_lk3_22", + SE_CFG_ETCAM_KEY_LK3_22r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000004d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk3_22_reg, + NULL, + NULL, + }, + { + "etcam_key_lk3_21", + SE_CFG_ETCAM_KEY_LK3_21r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000004d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk3_21_reg, + NULL, + NULL, + }, + { + "etcam_key_lk3_20", + SE_CFG_ETCAM_KEY_LK3_20r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000004dc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk3_20_reg, + NULL, + NULL, + }, + { + "etcam_key_lk3_19", + SE_CFG_ETCAM_KEY_LK3_19r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000004e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk3_19_reg, + NULL, + NULL, + }, + { + "etcam_key_lk3_18", + SE_CFG_ETCAM_KEY_LK3_18r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000004e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk3_18_reg, + NULL, + NULL, + }, + { + "etcam_key_lk3_17", + SE_CFG_ETCAM_KEY_LK3_17r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000004e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk3_17_reg, + NULL, + NULL, + }, + { + "etcam_key_lk3_16", + SE_CFG_ETCAM_KEY_LK3_16r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000004ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk3_16_reg, + NULL, + NULL, + }, + { + "etcam_key_lk3_15", + SE_CFG_ETCAM_KEY_LK3_15r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000004f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk3_15_reg, + NULL, + NULL, + }, + { + "etcam_key_lk3_14", + SE_CFG_ETCAM_KEY_LK3_14r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000004f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk3_14_reg, + NULL, + NULL, + }, + { + "etcam_key_lk3_13", + SE_CFG_ETCAM_KEY_LK3_13r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000004f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk3_13_reg, + NULL, + NULL, + }, + { + "etcam_key_lk3_12", + SE_CFG_ETCAM_KEY_LK3_12r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000004fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk3_12_reg, + NULL, + NULL, + }, + { + "etcam_key_lk3_11", + SE_CFG_ETCAM_KEY_LK3_11r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000500, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk3_11_reg, + NULL, + NULL, + }, + { + "etcam_key_lk3_10", + SE_CFG_ETCAM_KEY_LK3_10r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000504, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk3_10_reg, + NULL, + NULL, + }, + { + "etcam_key_lk3_9", + SE_CFG_ETCAM_KEY_LK3_9r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000508, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk3_9_reg, + NULL, + NULL, + }, + { + "etcam_key_lk3_8", + SE_CFG_ETCAM_KEY_LK3_8r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000050c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk3_8_reg, + NULL, + NULL, + }, + { + "etcam_key_lk3_7", + SE_CFG_ETCAM_KEY_LK3_7r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000510, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk3_7_reg, + NULL, + NULL, + }, + { + "etcam_key_lk3_6", + SE_CFG_ETCAM_KEY_LK3_6r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000514, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk3_6_reg, + NULL, + NULL, + }, + { + "etcam_key_lk3_5", + SE_CFG_ETCAM_KEY_LK3_5r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000518, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk3_5_reg, + NULL, + NULL, + }, + { + "etcam_key_lk3_4", + SE_CFG_ETCAM_KEY_LK3_4r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000051c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk3_4_reg, + NULL, + NULL, + }, + { + "etcam_key_lk3_3", + SE_CFG_ETCAM_KEY_LK3_3r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000520, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk3_3_reg, + NULL, + NULL, + }, + { + "etcam_key_lk3_2", + SE_CFG_ETCAM_KEY_LK3_2r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000524, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk3_2_reg, + NULL, + NULL, + }, + { + "etcam_key_lk3_1", + SE_CFG_ETCAM_KEY_LK3_1r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x00000528, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk3_1_reg, + NULL, + NULL, + }, + { + "etcam_key_lk3_0", + SE_CFG_ETCAM_KEY_LK3_0r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x0000052c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_etcam_key_lk3_0_reg, + NULL, + NULL, + }, + { + "pbu_key_lk0_3", + SE_CFG_PBU_KEY_LK0_3r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000006a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_pbu_key_lk0_3_reg, + NULL, + NULL, + }, + { + "pbu_key_lk0_2", + SE_CFG_PBU_KEY_LK0_2r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000006a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_pbu_key_lk0_2_reg, + NULL, + NULL, + }, + { + "pbu_key_lk0_1", + SE_CFG_PBU_KEY_LK0_1r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000006a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_pbu_key_lk0_1_reg, + NULL, + NULL, + }, + { + "pbu_key_lk0_0", + SE_CFG_PBU_KEY_LK0_0r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000006ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_pbu_key_lk0_0_reg, + NULL, + NULL, + }, + { + "pbu_key_lk1_3", + SE_CFG_PBU_KEY_LK1_3r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000006b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_pbu_key_lk1_3_reg, + NULL, + NULL, + }, + { + "pbu_key_lk1_2", + SE_CFG_PBU_KEY_LK1_2r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000006b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_pbu_key_lk1_2_reg, + NULL, + NULL, + }, + { + "pbu_key_lk1_1", + SE_CFG_PBU_KEY_LK1_1r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000006b8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_pbu_key_lk1_1_reg, + NULL, + NULL, + }, + { + "pbu_key_lk1_0", + SE_CFG_PBU_KEY_LK1_0r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000006bc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_pbu_key_lk1_0_reg, + NULL, + NULL, + }, + { + "pbu_key_lk2_3", + SE_CFG_PBU_KEY_LK2_3r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000006c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_pbu_key_lk2_3_reg, + NULL, + NULL, + }, + { + "pbu_key_lk2_2", + SE_CFG_PBU_KEY_LK2_2r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000006c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_pbu_key_lk2_2_reg, + NULL, + NULL, + }, + { + "pbu_key_lk2_1", + SE_CFG_PBU_KEY_LK2_1r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000006c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_pbu_key_lk2_1_reg, + NULL, + NULL, + }, + { + "pbu_key_lk2_0", + SE_CFG_PBU_KEY_LK2_0r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000006cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_pbu_key_lk2_0_reg, + NULL, + NULL, + }, + { + "pbu_key_lk3_3", + SE_CFG_PBU_KEY_LK3_3r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000006d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_pbu_key_lk3_3_reg, + NULL, + NULL, + }, + { + "pbu_key_lk3_2", + SE_CFG_PBU_KEY_LK3_2r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000006d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_pbu_key_lk3_2_reg, + NULL, + NULL, + }, + { + "pbu_key_lk3_1", + SE_CFG_PBU_KEY_LK3_1r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000006d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_pbu_key_lk3_1_reg, + NULL, + NULL, + }, + { + "pbu_key_lk3_0", + SE_CFG_PBU_KEY_LK3_0r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_CFG_BASE_ADDR + 0x000006dc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cfg_pbu_key_lk3_0_reg, + NULL, + NULL, + }, + { + "schd_learn_fifo_pfull_ast", + SE_ALG_SCHD_LEARN_FIFO_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x008c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_schd_learn_fifo_pfull_ast_reg, + NULL, + NULL, + }, + { + "schd_learn_fifo_pfull_neg", + SE_ALG_SCHD_LEARN_FIFO_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0090, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_schd_learn_fifo_pfull_neg_reg, + NULL, + NULL, + }, + { + "schd_hash0_fifo_pfull_ast", + SE_ALG_SCHD_HASH0_FIFO_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0094, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_schd_hash0_fifo_pfull_ast_reg, + NULL, + NULL, + }, + { + "schd_hash0_fifo_pfull_neg", + SE_ALG_SCHD_HASH0_FIFO_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0098, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_schd_hash0_fifo_pfull_neg_reg, + NULL, + NULL, + }, + { + "schd_hash1_fifo_pfull_ast", + SE_ALG_SCHD_HASH1_FIFO_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x009c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_schd_hash1_fifo_pfull_ast_reg, + NULL, + NULL, + }, + { + "schd_hash1_fifo_pfull_neg", + SE_ALG_SCHD_HASH1_FIFO_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_schd_hash1_fifo_pfull_neg_reg, + NULL, + NULL, + }, + { + "schd_hash2_fifo_pfull_ast", + SE_ALG_SCHD_HASH2_FIFO_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_schd_hash2_fifo_pfull_ast_reg, + NULL, + NULL, + }, + { + "schd_hash2_fifo_pfull_neg", + SE_ALG_SCHD_HASH2_FIFO_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_schd_hash2_fifo_pfull_neg_reg, + NULL, + NULL, + }, + { + "schd_hash3_fifo_pfull_ast", + SE_ALG_SCHD_HASH3_FIFO_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_schd_hash3_fifo_pfull_ast_reg, + NULL, + NULL, + }, + { + "schd_hash3_fifo_pfull_neg", + SE_ALG_SCHD_HASH3_FIFO_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_schd_hash3_fifo_pfull_neg_reg, + NULL, + NULL, + }, + { + "schd_lpm_fifo_pfull_ast", + SE_ALG_SCHD_LPM_FIFO_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_schd_lpm_fifo_pfull_ast_reg, + NULL, + NULL, + }, + { + "schd_lpm_fifo_pfull_neg", + SE_ALG_SCHD_LPM_FIFO_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0b8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_schd_lpm_fifo_pfull_neg_reg, + NULL, + NULL, + }, + { + "hash0_key_fifo_pfull_ast", + SE_ALG_HASH0_KEY_FIFO_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash0_key_fifo_pfull_ast_reg, + NULL, + NULL, + }, + { + "hash0_key_fifo_pfull_neg", + SE_ALG_HASH0_KEY_FIFO_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash0_key_fifo_pfull_neg_reg, + NULL, + NULL, + }, + { + "hash0_sreq_fifo_pfull_ast", + SE_ALG_HASH0_SREQ_FIFO_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash0_sreq_fifo_pfull_ast_reg, + NULL, + NULL, + }, + { + "hash0_sreq_fifo_pfull_neg", + SE_ALG_HASH0_SREQ_FIFO_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash0_sreq_fifo_pfull_neg_reg, + NULL, + NULL, + }, + { + "hash0_int_rsp_fifo_pfull_ast", + SE_ALG_HASH0_INT_RSP_FIFO_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0dc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash0_int_rsp_fifo_pfull_ast_reg, + NULL, + NULL, + }, + { + "hash0_int_rsp_fifo_pfull_neg", + SE_ALG_HASH0_INT_RSP_FIFO_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash0_int_rsp_fifo_pfull_neg_reg, + NULL, + NULL, + }, + { + "hash0_ext_rsp_fifo_pfull_ast", + SE_ALG_HASH0_EXT_RSP_FIFO_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash0_ext_rsp_fifo_pfull_ast_reg, + NULL, + NULL, + }, + { + "hash0_ext_rsp_fifo_pfull_neg", + SE_ALG_HASH0_EXT_RSP_FIFO_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash0_ext_rsp_fifo_pfull_neg_reg, + NULL, + NULL, + }, + { + "hash1_key_fifo_pfull_ast", + SE_ALG_HASH1_KEY_FIFO_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash1_key_fifo_pfull_ast_reg, + NULL, + NULL, + }, + { + "hash1_key_fifo_pfull_neg", + SE_ALG_HASH1_KEY_FIFO_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash1_key_fifo_pfull_neg_reg, + NULL, + NULL, + }, + { + "hash1_sreq_fifo_pfull_ast", + SE_ALG_HASH1_SREQ_FIFO_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash1_sreq_fifo_pfull_ast_reg, + NULL, + NULL, + }, + { + "hash1_sreq_fifo_pfull_neg", + SE_ALG_HASH1_SREQ_FIFO_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash1_sreq_fifo_pfull_neg_reg, + NULL, + NULL, + }, + { + "hash1_int_rsp_fifo_pfull_ast", + SE_ALG_HASH1_INT_RSP_FIFO_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash1_int_rsp_fifo_pfull_ast_reg, + NULL, + NULL, + }, + { + "hash1_int_rsp_fifo_pfull_neg", + SE_ALG_HASH1_INT_RSP_FIFO_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0100, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash1_int_rsp_fifo_pfull_neg_reg, + NULL, + NULL, + }, + { + "hash1_ext_rsp_fifo_pfull_ast", + SE_ALG_HASH1_EXT_RSP_FIFO_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0104, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash1_ext_rsp_fifo_pfull_ast_reg, + NULL, + NULL, + }, + { + "hash1_ext_rsp_fifo_pfull_neg", + SE_ALG_HASH1_EXT_RSP_FIFO_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0108, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash1_ext_rsp_fifo_pfull_neg_reg, + NULL, + NULL, + }, + { + "hash2_key_fifo_pfull_ast", + SE_ALG_HASH2_KEY_FIFO_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x010c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash2_key_fifo_pfull_ast_reg, + NULL, + NULL, + }, + { + "hash2_key_fifo_pfull_neg", + SE_ALG_HASH2_KEY_FIFO_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0110, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash2_key_fifo_pfull_neg_reg, + NULL, + NULL, + }, + { + "hash2_sreq_fifo_pfull_ast", + SE_ALG_HASH2_SREQ_FIFO_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0114, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash2_sreq_fifo_pfull_ast_reg, + NULL, + NULL, + }, + { + "hash2_sreq_fifo_pfull_neg", + SE_ALG_HASH2_SREQ_FIFO_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0118, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash2_sreq_fifo_pfull_neg_reg, + NULL, + NULL, + }, + { + "hash2_int_rsp_fifo_pfull_ast", + SE_ALG_HASH2_INT_RSP_FIFO_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x011c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash2_int_rsp_fifo_pfull_ast_reg, + NULL, + NULL, + }, + { + "hash2_int_rsp_fifo_pfull_neg", + SE_ALG_HASH2_INT_RSP_FIFO_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0120, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash2_int_rsp_fifo_pfull_neg_reg, + NULL, + NULL, + }, + { + "hash2_ext_rsp_fifo_pfull_ast", + SE_ALG_HASH2_EXT_RSP_FIFO_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0124, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash2_ext_rsp_fifo_pfull_ast_reg, + NULL, + NULL, + }, + { + "hash2_ext_rsp_fifo_pfull_neg", + SE_ALG_HASH2_EXT_RSP_FIFO_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0128, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash2_ext_rsp_fifo_pfull_neg_reg, + NULL, + NULL, + }, + { + "hash3_key_fifo_pfull_ast", + SE_ALG_HASH3_KEY_FIFO_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x012c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash3_key_fifo_pfull_ast_reg, + NULL, + NULL, + }, + { + "hash3_key_fifo_pfull_neg", + SE_ALG_HASH3_KEY_FIFO_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0130, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash3_key_fifo_pfull_neg_reg, + NULL, + NULL, + }, + { + "hash3_sreq_fifo_pfull_ast", + SE_ALG_HASH3_SREQ_FIFO_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0134, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash3_sreq_fifo_pfull_ast_reg, + NULL, + NULL, + }, + { + "hash3_sreq_fifo_pfull_neg", + SE_ALG_HASH3_SREQ_FIFO_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0138, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash3_sreq_fifo_pfull_neg_reg, + NULL, + NULL, + }, + { + "hash3_int_rsp_fifo_pfull_ast", + SE_ALG_HASH3_INT_RSP_FIFO_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x013c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash3_int_rsp_fifo_pfull_ast_reg, + NULL, + NULL, + }, + { + "hash3_int_rsp_fifo_pfull_neg", + SE_ALG_HASH3_INT_RSP_FIFO_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0140, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash3_int_rsp_fifo_pfull_neg_reg, + NULL, + NULL, + }, + { + "hash3_ext_rsp_fifo_pfull_ast", + SE_ALG_HASH3_EXT_RSP_FIFO_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0144, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash3_ext_rsp_fifo_pfull_ast_reg, + NULL, + NULL, + }, + { + "hash3_ext_rsp_fifo_pfull_neg", + SE_ALG_HASH3_EXT_RSP_FIFO_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0148, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash3_ext_rsp_fifo_pfull_neg_reg, + NULL, + NULL, + }, + { + "lpm_as_info", + SE_ALG_LPM_AS_INFOr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0158, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se_alg_lpm_as_info_reg, + NULL, + NULL, + }, + { + "lpm_ext_rsp_fifo_u0_pfull_neg", + SE_ALG_LPM_EXT_RSP_FIFO_U0_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0160, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_ext_rsp_fifo_u0_pfull_neg_reg, + NULL, + NULL, + }, + { + "lpm_ext_rsp_fifo_u2_pfull_ast", + SE_ALG_LPM_EXT_RSP_FIFO_U2_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x016c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_ext_rsp_fifo_u2_pfull_ast_reg, + NULL, + NULL, + }, + { + "lpm_ext_rsp_fifo_u2_pfull_neg", + SE_ALG_LPM_EXT_RSP_FIFO_U2_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0170, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_ext_rsp_fifo_u2_pfull_neg_reg, + NULL, + NULL, + }, + { + "lpm_ext_rsp_fifo_u3_pfull_ast", + SE_ALG_LPM_EXT_RSP_FIFO_U3_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0174, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_ext_rsp_fifo_u3_pfull_ast_reg, + NULL, + NULL, + }, + { + "lpm_ext_rsp_fifo_u3_pfull_neg", + SE_ALG_LPM_EXT_RSP_FIFO_U3_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0178, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_ext_rsp_fifo_u3_pfull_neg_reg, + NULL, + NULL, + }, + { + "lpm_ext_rsp_fifo_u4_pfull_ast", + SE_ALG_LPM_EXT_RSP_FIFO_U4_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x017c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_ext_rsp_fifo_u4_pfull_ast_reg, + NULL, + NULL, + }, + { + "lpm_ext_rsp_fifo_u4_pfull_neg", + SE_ALG_LPM_EXT_RSP_FIFO_U4_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0180, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_ext_rsp_fifo_u4_pfull_neg_reg, + NULL, + NULL, + }, + { + "lpm_as_rsp_fifo_u0_pfull_ast", + SE_ALG_LPM_AS_RSP_FIFO_U0_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x018c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_as_rsp_fifo_u0_pfull_ast_reg, + NULL, + NULL, + }, + { + "lpm_as_rsp_fifo_u0_pfull_neg", + SE_ALG_LPM_AS_RSP_FIFO_U0_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0190, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_as_rsp_fifo_u0_pfull_neg_reg, + NULL, + NULL, + }, + { + "lpm_as_rsp_fifo_u1_pfull_ast", + SE_ALG_LPM_AS_RSP_FIFO_U1_PFULL_ASTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0194, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_as_rsp_fifo_u1_pfull_ast_reg, + NULL, + NULL, + }, + { + "lpm_as_rsp_fifo_u1_pfull_neg", + SE_ALG_LPM_AS_RSP_FIFO_U1_PFULL_NEGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x0198, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_as_rsp_fifo_u1_pfull_neg_reg, + NULL, + NULL, + }, + { + "lpm_v4_ddr3_base_addr", + SE_ALG_LPM_V4_DDR3_BASE_ADDRr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x019c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_v4_ddr3_base_addr_reg, + NULL, + NULL, + }, + { + "lpm_v6_ddr3_base_addr", + SE_ALG_LPM_V6_DDR3_BASE_ADDRr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x01a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_v6_ddr3_base_addr_reg, + NULL, + NULL, + }, + { + "debug_cnt_mode", + SE_ALG_DEBUG_CNT_MODEr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x01d4, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se_alg_debug_cnt_mode_reg, + NULL, + NULL, + }, + { + "hash_p0_key_vld_cnt", + SE_ALG_HASH_P0_KEY_VLD_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash_p0_key_vld_cnt_reg, + NULL, + NULL, + }, + { + "hash_p1_key_vld_cnt", + SE_ALG_HASH_P1_KEY_VLD_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1004, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash_p1_key_vld_cnt_reg, + NULL, + NULL, + }, + { + "hash_p2_key_vld_cnt", + SE_ALG_HASH_P2_KEY_VLD_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash_p2_key_vld_cnt_reg, + NULL, + NULL, + }, + { + "hash_p3_key_vld_cnt", + SE_ALG_HASH_P3_KEY_VLD_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x100c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash_p3_key_vld_cnt_reg, + NULL, + NULL, + }, + { + "lpm_p0_key_vld_cnt", + SE_ALG_LPM_P0_KEY_VLD_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1010, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_p0_key_vld_cnt_reg, + NULL, + NULL, + }, + { + "hash_p0_rsp_vld_cnt", + SE_ALG_HASH_P0_RSP_VLD_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1014, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash_p0_rsp_vld_cnt_reg, + NULL, + NULL, + }, + { + "hash_p1_rsp_vld_cnt", + SE_ALG_HASH_P1_RSP_VLD_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1018, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash_p1_rsp_vld_cnt_reg, + NULL, + NULL, + }, + { + "hash_p2_rsp_vld_cnt", + SE_ALG_HASH_P2_RSP_VLD_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x101c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash_p2_rsp_vld_cnt_reg, + NULL, + NULL, + }, + { + "hash_p3_rsp_vld_cnt", + SE_ALG_HASH_P3_RSP_VLD_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1020, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash_p3_rsp_vld_cnt_reg, + NULL, + NULL, + }, + { + "lpm_p0_rsp_vld_cnt", + SE_ALG_LPM_P0_RSP_VLD_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1024, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_p0_rsp_vld_cnt_reg, + NULL, + NULL, + }, + { + "hash_p0_smf_cnt", + SE_ALG_HASH_P0_SMF_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1028, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash_p0_smf_cnt_reg, + NULL, + NULL, + }, + { + "hash_p1_smf_cnt", + SE_ALG_HASH_P1_SMF_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x102c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash_p1_smf_cnt_reg, + NULL, + NULL, + }, + { + "hash_p2_smf_cnt", + SE_ALG_HASH_P2_SMF_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1030, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash_p2_smf_cnt_reg, + NULL, + NULL, + }, + { + "hash_p3_smf_cnt", + SE_ALG_HASH_P3_SMF_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1034, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash_p3_smf_cnt_reg, + NULL, + NULL, + }, + { + "lpm_p0_smf_cnt", + SE_ALG_LPM_P0_SMF_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1038, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_p0_smf_cnt_reg, + NULL, + NULL, + }, + { + "hash_p0_spacevld_cnt", + SE_ALG_HASH_P0_SPACEVLD_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x103c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash_p0_spacevld_cnt_reg, + NULL, + NULL, + }, + { + "hash_p1_spacevld_cnt", + SE_ALG_HASH_P1_SPACEVLD_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1040, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash_p1_spacevld_cnt_reg, + NULL, + NULL, + }, + { + "hash_p2_spacevld_cnt", + SE_ALG_HASH_P2_SPACEVLD_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1044, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash_p2_spacevld_cnt_reg, + NULL, + NULL, + }, + { + "hash_p3_spacevld_cnt", + SE_ALG_HASH_P3_SPACEVLD_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1048, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash_p3_spacevld_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_p0_req_vld_cnt", + SE_ALG_SMMU1_P0_REQ_VLD_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x104c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_smmu1_p0_req_vld_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_p1_req_vld_cnt", + SE_ALG_SMMU1_P1_REQ_VLD_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1050, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_smmu1_p1_req_vld_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_p2_req_vld_cnt", + SE_ALG_SMMU1_P2_REQ_VLD_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1054, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_smmu1_p2_req_vld_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_p3_req_vld_cnt", + SE_ALG_SMMU1_P3_REQ_VLD_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1058, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_smmu1_p3_req_vld_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_p4_req_vld_cnt", + SE_ALG_SMMU1_P4_REQ_VLD_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x105c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_smmu1_p4_req_vld_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_p5_req_vld_cnt", + SE_ALG_SMMU1_P5_REQ_VLD_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1060, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_smmu1_p5_req_vld_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_p0_rsp_vld_cnt", + SE_ALG_SMMU1_P0_RSP_VLD_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1064, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_smmu1_p0_rsp_vld_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_p1_rsp_vld_cnt", + SE_ALG_SMMU1_P1_RSP_VLD_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1068, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_smmu1_p1_rsp_vld_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_p2_rsp_vld_cnt", + SE_ALG_SMMU1_P2_RSP_VLD_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x106c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_smmu1_p2_rsp_vld_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_p3_rsp_vld_cnt", + SE_ALG_SMMU1_P3_RSP_VLD_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1070, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_smmu1_p3_rsp_vld_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_p4_rsp_vld_cnt", + SE_ALG_SMMU1_P4_RSP_VLD_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1074, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_smmu1_p4_rsp_vld_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_p5_rsp_vld_cnt", + SE_ALG_SMMU1_P5_RSP_VLD_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1078, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_smmu1_p5_rsp_vld_cnt_reg, + NULL, + NULL, + }, + { + "schd_learn_fifo_int_cnt", + SE_ALG_SCHD_LEARN_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x107c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_schd_learn_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "schd_hash0_fifo_int_cnt", + SE_ALG_SCHD_HASH0_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1080, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_schd_hash0_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "schd_hash1_fifo_int_cnt", + SE_ALG_SCHD_HASH1_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1084, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_schd_hash1_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "schd_hash2_fifo_int_cnt", + SE_ALG_SCHD_HASH2_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1088, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_schd_hash2_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "schd_hash3_fifo_int_cnt", + SE_ALG_SCHD_HASH3_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x108c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_schd_hash3_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "schd_lpm_fifo_int_cnt", + SE_ALG_SCHD_LPM_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1090, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_schd_lpm_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "schd_learn_fifo_parity_err_cnt", + SE_ALG_SCHD_LEARN_FIFO_PARITY_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1094, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_schd_learn_fifo_parity_err_cnt_reg, + NULL, + NULL, + }, + { + "schd_hash0_fifo_parity_err_cnt", + SE_ALG_SCHD_HASH0_FIFO_PARITY_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1098, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_schd_hash0_fifo_parity_err_cnt_reg, + NULL, + NULL, + }, + { + "schd_hash1_fifo_parity_err_cnt", + SE_ALG_SCHD_HASH1_FIFO_PARITY_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x109c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_schd_hash1_fifo_parity_err_cnt_reg, + NULL, + NULL, + }, + { + "schd_hash2_fifo_parity_err_cnt", + SE_ALG_SCHD_HASH2_FIFO_PARITY_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x10a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_schd_hash2_fifo_parity_err_cnt_reg, + NULL, + NULL, + }, + { + "schd_hash3_fifo_parity_err_cnt", + SE_ALG_SCHD_HASH3_FIFO_PARITY_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x10a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_schd_hash3_fifo_parity_err_cnt_reg, + NULL, + NULL, + }, + { + "schd_lpm_fifo_parity_err_cnt", + SE_ALG_SCHD_LPM_FIFO_PARITY_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x10a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_schd_lpm_fifo_parity_err_cnt_reg, + NULL, + NULL, + }, + { + "rd_init_cft_cnt", + SE_ALG_RD_INIT_CFT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x10ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_rd_init_cft_cnt_reg, + NULL, + NULL, + }, + { + "zgp0_zblk0_ecc_err_cnt", + SE_ALG_ZGP0_ZBLK0_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x10b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp0_zblk0_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp0_zblk1_ecc_err_cnt", + SE_ALG_ZGP0_ZBLK1_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x10b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp0_zblk1_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp0_zblk2_ecc_err_cnt", + SE_ALG_ZGP0_ZBLK2_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x10b8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp0_zblk2_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp0_zblk3_ecc_err_cnt", + SE_ALG_ZGP0_ZBLK3_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x10bc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp0_zblk3_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp0_zblk4_ecc_err_cnt", + SE_ALG_ZGP0_ZBLK4_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x10c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp0_zblk4_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp0_zblk5_ecc_err_cnt", + SE_ALG_ZGP0_ZBLK5_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x10c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp0_zblk5_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp0_zblk6_ecc_err_cnt", + SE_ALG_ZGP0_ZBLK6_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x10c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp0_zblk6_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp0_zblk7_ecc_err_cnt", + SE_ALG_ZGP0_ZBLK7_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x10cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp0_zblk7_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp1_zblk0_ecc_err_cnt", + SE_ALG_ZGP1_ZBLK0_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x10d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp1_zblk0_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp1_zblk1_ecc_err_cnt", + SE_ALG_ZGP1_ZBLK1_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x10d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp1_zblk1_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp1_zblk2_ecc_err_cnt", + SE_ALG_ZGP1_ZBLK2_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x10d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp1_zblk2_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp1_zblk3_ecc_err_cnt", + SE_ALG_ZGP1_ZBLK3_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x10dc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp1_zblk3_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp1_zblk4_ecc_err_cnt", + SE_ALG_ZGP1_ZBLK4_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x10e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp1_zblk4_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp1_zblk5_ecc_err_cnt", + SE_ALG_ZGP1_ZBLK5_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x10e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp1_zblk5_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp1_zblk6_ecc_err_cnt", + SE_ALG_ZGP1_ZBLK6_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x10e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp1_zblk6_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp1_zblk7_ecc_err_cnt", + SE_ALG_ZGP1_ZBLK7_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x10ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp1_zblk7_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp2_zblk0_ecc_err_cnt", + SE_ALG_ZGP2_ZBLK0_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x10f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp2_zblk0_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp2_zblk1_ecc_err_cnt", + SE_ALG_ZGP2_ZBLK1_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x10f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp2_zblk1_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp2_zblk2_ecc_err_cnt", + SE_ALG_ZGP2_ZBLK2_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x10f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp2_zblk2_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp2_zblk3_ecc_err_cnt", + SE_ALG_ZGP2_ZBLK3_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x10fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp2_zblk3_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp2_zblk4_ecc_err_cnt", + SE_ALG_ZGP2_ZBLK4_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1100, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp2_zblk4_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp2_zblk5_ecc_err_cnt", + SE_ALG_ZGP2_ZBLK5_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1104, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp2_zblk5_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp2_zblk6_ecc_err_cnt", + SE_ALG_ZGP2_ZBLK6_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1108, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp2_zblk6_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp2_zblk7_ecc_err_cnt", + SE_ALG_ZGP2_ZBLK7_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x110c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp2_zblk7_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp3_zblk0_ecc_err_cnt", + SE_ALG_ZGP3_ZBLK0_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1110, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp3_zblk0_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp3_zblk1_ecc_err_cnt", + SE_ALG_ZGP3_ZBLK1_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1114, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp3_zblk1_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp3_zblk2_ecc_err_cnt", + SE_ALG_ZGP3_ZBLK2_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1118, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp3_zblk2_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp3_zblk3_ecc_err_cnt", + SE_ALG_ZGP3_ZBLK3_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x111c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp3_zblk3_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp3_zblk4_ecc_err_cnt", + SE_ALG_ZGP3_ZBLK4_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1120, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp3_zblk4_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp3_zblk5_ecc_err_cnt", + SE_ALG_ZGP3_ZBLK5_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1124, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp3_zblk5_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp3_zblk6_ecc_err_cnt", + SE_ALG_ZGP3_ZBLK6_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1128, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp3_zblk6_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zgp3_zblk7_ecc_err_cnt", + SE_ALG_ZGP3_ZBLK7_ECC_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x112c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zgp3_zblk7_ecc_err_cnt_reg, + NULL, + NULL, + }, + { + "zcam_hash_p0_err_cnt", + SE_ALG_ZCAM_HASH_P0_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1130, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zcam_hash_p0_err_cnt_reg, + NULL, + NULL, + }, + { + "zcam_hash_p1_err_cnt", + SE_ALG_ZCAM_HASH_P1_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1134, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zcam_hash_p1_err_cnt_reg, + NULL, + NULL, + }, + { + "zcam_hash_p2_err_cnt", + SE_ALG_ZCAM_HASH_P2_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1138, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zcam_hash_p2_err_cnt_reg, + NULL, + NULL, + }, + { + "zcam_hash_p3_err_cnt", + SE_ALG_ZCAM_HASH_P3_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x113c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zcam_hash_p3_err_cnt_reg, + NULL, + NULL, + }, + { + "zcam_lpm_err_cnt", + SE_ALG_ZCAM_LPM_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1140, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zcam_lpm_err_cnt_reg, + NULL, + NULL, + }, + { + "hash0_sreq_fifo_parity_err_cnt", + SE_ALG_HASH0_SREQ_FIFO_PARITY_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1144, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash0_sreq_fifo_parity_err_cnt_reg, + NULL, + NULL, + }, + { + "hash0_sreq_fifo_int_cnt", + SE_ALG_HASH0_SREQ_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1148, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash0_sreq_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "hash0_key_fifo_int_cnt", + SE_ALG_HASH0_KEY_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x114c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash0_key_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "hash0_int_rsp_fifo_parity_err_cnt", + SE_ALG_HASH0_INT_RSP_FIFO_PARITY_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1150, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash0_int_rsp_fifo_parity_err_cnt_reg, + NULL, + NULL, + }, + { + "hash0_ext_rsp_fifo_parity_err_cnt", + SE_ALG_HASH0_EXT_RSP_FIFO_PARITY_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1154, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash0_ext_rsp_fifo_parity_err_cnt_reg, + NULL, + NULL, + }, + { + "hash0_ext_rsp_fifo_int_cnt", + SE_ALG_HASH0_EXT_RSP_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1158, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash0_ext_rsp_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "hash0_int_rsp_fifo_int_cnt", + SE_ALG_HASH0_INT_RSP_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x115c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash0_int_rsp_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "hash1_sreq_fifo_parity_err_cnt", + SE_ALG_HASH1_SREQ_FIFO_PARITY_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1160, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash1_sreq_fifo_parity_err_cnt_reg, + NULL, + NULL, + }, + { + "hash1_sreq_fifo_int_cnt", + SE_ALG_HASH1_SREQ_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1164, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash1_sreq_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "hash1_key_fifo_int_cnt", + SE_ALG_HASH1_KEY_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1168, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash1_key_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "hash1_int_rsp_fifo_parity_err_cnt", + SE_ALG_HASH1_INT_RSP_FIFO_PARITY_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x116c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash1_int_rsp_fifo_parity_err_cnt_reg, + NULL, + NULL, + }, + { + "hash1_ext_rsp_fifo_parity_err_cnt", + SE_ALG_HASH1_EXT_RSP_FIFO_PARITY_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1170, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash1_ext_rsp_fifo_parity_err_cnt_reg, + NULL, + NULL, + }, + { + "hash1_ext_rsp_fifo_int_cnt", + SE_ALG_HASH1_EXT_RSP_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1174, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash1_ext_rsp_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "hash1_int_rsp_fifo_int_cnt", + SE_ALG_HASH1_INT_RSP_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1178, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash1_int_rsp_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "hash2_sreq_fifo_parity_err_cnt", + SE_ALG_HASH2_SREQ_FIFO_PARITY_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x117c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash2_sreq_fifo_parity_err_cnt_reg, + NULL, + NULL, + }, + { + "hash2_sreq_fifo_int_cnt", + SE_ALG_HASH2_SREQ_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1180, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash2_sreq_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "hash2_key_fifo_int_cnt", + SE_ALG_HASH2_KEY_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1184, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash2_key_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "hash2_int_rsp_fifo_parity_err_cnt", + SE_ALG_HASH2_INT_RSP_FIFO_PARITY_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1188, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash2_int_rsp_fifo_parity_err_cnt_reg, + NULL, + NULL, + }, + { + "hash2_ext_rsp_fifo_parity_err_cnt", + SE_ALG_HASH2_EXT_RSP_FIFO_PARITY_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x118c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash2_ext_rsp_fifo_parity_err_cnt_reg, + NULL, + NULL, + }, + { + "hash2_ext_rsp_fifo_int_cnt", + SE_ALG_HASH2_EXT_RSP_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1190, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash2_ext_rsp_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "hash2_int_rsp_fifo_int_cnt", + SE_ALG_HASH2_INT_RSP_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1194, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash2_int_rsp_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "hash3_sreq_fifo_parity_err_cnt", + SE_ALG_HASH3_SREQ_FIFO_PARITY_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1198, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash3_sreq_fifo_parity_err_cnt_reg, + NULL, + NULL, + }, + { + "hash3_sreq_fifo_int_cnt", + SE_ALG_HASH3_SREQ_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x119c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash3_sreq_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "hash3_key_fifo_int_cnt", + SE_ALG_HASH3_KEY_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x11a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash3_key_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "hash3_int_rsp_fifo_parity_err_cnt", + SE_ALG_HASH3_INT_RSP_FIFO_PARITY_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x11a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash3_int_rsp_fifo_parity_err_cnt_reg, + NULL, + NULL, + }, + { + "hash3_ext_rsp_fifo_parity_err_cnt", + SE_ALG_HASH3_EXT_RSP_FIFO_PARITY_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x11a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash3_ext_rsp_fifo_parity_err_cnt_reg, + NULL, + NULL, + }, + { + "hash3_ext_rsp_fifo_int_cnt", + SE_ALG_HASH3_EXT_RSP_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x11ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash3_ext_rsp_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "hash3_int_rsp_fifo_int_cnt", + SE_ALG_HASH3_INT_RSP_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x11b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash3_int_rsp_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "lpm_ext_rsp_fifo_int_cnt", + SE_ALG_LPM_EXT_RSP_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x11b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_ext_rsp_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "lpm_ext_v6_fifo_int_cnt", + SE_ALG_LPM_EXT_V6_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x11b8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_ext_v6_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "lpm_ext_v4_fifo_int_cnt", + SE_ALG_LPM_EXT_V4_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x11bc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_ext_v4_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "lpm_ext_addr_fifo_int_cnt", + SE_ALG_LPM_EXT_ADDR_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x11c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_ext_addr_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "lpm_ext_v4_fifo_parity_err_cnt", + SE_ALG_LPM_EXT_V4_FIFO_PARITY_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x11c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_ext_v4_fifo_parity_err_cnt_reg, + NULL, + NULL, + }, + { + "lpm_ext_v6_fifo_parity_err_cnt", + SE_ALG_LPM_EXT_V6_FIFO_PARITY_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x11c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_ext_v6_fifo_parity_err_cnt_reg, + NULL, + NULL, + }, + { + "lpm_ext_rsp_fifo_parity_err_cnt", + SE_ALG_LPM_EXT_RSP_FIFO_PARITY_ERR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x11cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_ext_rsp_fifo_parity_err_cnt_reg, + NULL, + NULL, + }, + { + "lpm_as_req_fifo_int_cnt", + SE_ALG_LPM_AS_REQ_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x11d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_as_req_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "lpm_as_int_rsp_fifo_int_cnt", + SE_ALG_LPM_AS_INT_RSP_FIFO_INT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x11d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_as_int_rsp_fifo_int_cnt_reg, + NULL, + NULL, + }, + { + "se_alg_int_status", + SE_ALG_SE_ALG_INT_STATUSr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1800, + (32 / 8), + 0, + 0, + 0, + 0, + 7, + g_se_alg_se_alg_int_status_reg, + NULL, + NULL, + }, + { + "schd_int_en", + SE_ALG_SCHD_INT_ENr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1804, + (32 / 8), + 0, + 0, + 0, + 0, + 14, + g_se_alg_schd_int_en_reg, + NULL, + NULL, + }, + { + "schd_int_mask", + SE_ALG_SCHD_INT_MASKr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1808, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_schd_int_mask_reg, + NULL, + NULL, + }, + { + "schd_int_status", + SE_ALG_SCHD_INT_STATUSr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x180c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_schd_int_status_reg, + NULL, + NULL, + }, + { + "zblk_ecc_int_en", + SE_ALG_ZBLK_ECC_INT_ENr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1810, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zblk_ecc_int_en_reg, + NULL, + NULL, + }, + { + "zblk_ecc_int_mask", + SE_ALG_ZBLK_ECC_INT_MASKr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1814, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zblk_ecc_int_mask_reg, + NULL, + NULL, + }, + { + "zblk_ecc_int_status", + SE_ALG_ZBLK_ECC_INT_STATUSr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1818, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_zblk_ecc_int_status_reg, + NULL, + NULL, + }, + { + "hash0_int_en", + SE_ALG_HASH0_INT_ENr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x181c, + (32 / 8), + 0, + 0, + 0, + 0, + 8, + g_se_alg_hash0_int_en_reg, + NULL, + NULL, + }, + { + "hash0_int_mask", + SE_ALG_HASH0_INT_MASKr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1820, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash0_int_mask_reg, + NULL, + NULL, + }, + { + "hash0_int_status", + SE_ALG_HASH0_INT_STATUSr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1824, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash0_int_status_reg, + NULL, + NULL, + }, + { + "hash1_int_en", + SE_ALG_HASH1_INT_ENr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1828, + (32 / 8), + 0, + 0, + 0, + 0, + 8, + g_se_alg_hash1_int_en_reg, + NULL, + NULL, + }, + { + "hash1_int_mask", + SE_ALG_HASH1_INT_MASKr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x182c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash1_int_mask_reg, + NULL, + NULL, + }, + { + "hash1_int_status", + SE_ALG_HASH1_INT_STATUSr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1830, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash1_int_status_reg, + NULL, + NULL, + }, + { + "hash2_int_en", + SE_ALG_HASH2_INT_ENr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1834, + (32 / 8), + 0, + 0, + 0, + 0, + 8, + g_se_alg_hash2_int_en_reg, + NULL, + NULL, + }, + { + "hash2_int_mask", + SE_ALG_HASH2_INT_MASKr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1838, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash2_int_mask_reg, + NULL, + NULL, + }, + { + "hash2_int_status", + SE_ALG_HASH2_INT_STATUSr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x183c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash2_int_status_reg, + NULL, + NULL, + }, + { + "hash3_int_en", + SE_ALG_HASH3_INT_ENr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1840, + (32 / 8), + 0, + 0, + 0, + 0, + 8, + g_se_alg_hash3_int_en_reg, + NULL, + NULL, + }, + { + "hash3_int_mask", + SE_ALG_HASH3_INT_MASKr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1844, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash3_int_mask_reg, + NULL, + NULL, + }, + { + "hash3_int_status", + SE_ALG_HASH3_INT_STATUSr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1848, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_hash3_int_status_reg, + NULL, + NULL, + }, + { + "lpm_int_en", + SE_ALG_LPM_INT_ENr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x184c, + (32 / 8), + 0, + 0, + 0, + 0, + 10, + g_se_alg_lpm_int_en_reg, + NULL, + NULL, + }, + { + "lpm_int_mask", + SE_ALG_LPM_INT_MASKr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1850, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_int_mask_reg, + NULL, + NULL, + }, + { + "lpm_int_status", + SE_ALG_LPM_INT_STATUSr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_ALG_BASE_ADDR + 0x1854, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_alg_lpm_int_status_reg, + NULL, + NULL, + }, + { + "zblock_lpm_mask0", + SE_ALG_ZBLOCK_LPM_MASK0r, + SE, + DPP_REG_FLAG_INDIRECT, + DPP_REG_BIN_ARRAY, + 0x10001, + (512 / 8), + 3 + 1, + 7 + 1, + 0x4000, + 0x800, + 5, + g_se_alg_zblock_lpm_mask0_reg, + NULL, + NULL, + }, + { + "zblock_lpm_mask1", + SE_ALG_ZBLOCK_LPM_MASK1r, + SE, + DPP_REG_FLAG_INDIRECT, + DPP_REG_BIN_ARRAY, + 0x10002, + (512 / 8), + 3 + 1, + 7 + 1, + 0x4000, + 0x800, + 5, + g_se_alg_zblock_lpm_mask1_reg, + NULL, + NULL, + }, + { + "zblock_lpm_mask2", + SE_ALG_ZBLOCK_LPM_MASK2r, + SE, + DPP_REG_FLAG_INDIRECT, + DPP_REG_BIN_ARRAY, + 0x10003, + (512 / 8), + 3 + 1, + 7 + 1, + 0x4000, + 0x800, + 5, + g_se_alg_zblock_lpm_mask2_reg, + NULL, + NULL, + }, + { + "zblock_lpm_mask3", + SE_ALG_ZBLOCK_LPM_MASK3r, + SE, + DPP_REG_FLAG_INDIRECT, + DPP_REG_BIN_ARRAY, + 0x10004, + (512 / 8), + 3 + 1, + 7 + 1, + 0x4000, + 0x800, + 5, + g_se_alg_zblock_lpm_mask3_reg, + NULL, + NULL, + }, + { + "zblock_default_route0", + SE_ALG_ZBLOCK_DEFAULT_ROUTE0r, + SE, + DPP_REG_FLAG_INDIRECT, + DPP_REG_BIN_ARRAY, + 0x10005, + (512 / 8), + 3 + 1, + 7 + 1, + 0x4000, + 0x800, + 4, + g_se_alg_zblock_default_route0_reg, + NULL, + NULL, + }, + { + "zblock_default_route1", + SE_ALG_ZBLOCK_DEFAULT_ROUTE1r, + SE, + DPP_REG_FLAG_INDIRECT, + DPP_REG_BIN_ARRAY, + 0x10006, + (512 / 8), + 3 + 1, + 7 + 1, + 0x4000, + 0x800, + 4, + g_se_alg_zblock_default_route1_reg, + NULL, + NULL, + }, + { + "zblock_default_route2", + SE_ALG_ZBLOCK_DEFAULT_ROUTE2r, + SE, + DPP_REG_FLAG_INDIRECT, + DPP_REG_BIN_ARRAY, + 0x10007, + (512 / 8), + 3 + 1, + 7 + 1, + 0x4000, + 0x800, + 4, + g_se_alg_zblock_default_route2_reg, + NULL, + NULL, + }, + { + "zblock_default_route3", + SE_ALG_ZBLOCK_DEFAULT_ROUTE3r, + SE, + DPP_REG_FLAG_INDIRECT, + DPP_REG_BIN_ARRAY, + 0x10008, + (512 / 8), + 3 + 1, + 7 + 1, + 0x4000, + 0x800, + 4, + g_se_alg_zblock_default_route3_reg, + NULL, + NULL, + }, + { + "zblock_default_route4", + SE_ALG_ZBLOCK_DEFAULT_ROUTE4r, + SE, + DPP_REG_FLAG_INDIRECT, + DPP_REG_BIN_ARRAY, + 0x10009, + (512 / 8), + 3 + 1, + 7 + 1, + 0x4000, + 0x800, + 4, + g_se_alg_zblock_default_route4_reg, + NULL, + NULL, + }, + { + "zblock_default_route5", + SE_ALG_ZBLOCK_DEFAULT_ROUTE5r, + SE, + DPP_REG_FLAG_INDIRECT, + DPP_REG_BIN_ARRAY, + 0x1000a, + (512 / 8), + 3 + 1, + 7 + 1, + 0x4000, + 0x800, + 4, + g_se_alg_zblock_default_route5_reg, + NULL, + NULL, + }, + { + "zblock_default_route6", + SE_ALG_ZBLOCK_DEFAULT_ROUTE6r, + SE, + DPP_REG_FLAG_INDIRECT, + DPP_REG_BIN_ARRAY, + 0x1000b, + (512 / 8), + 3 + 1, + 7 + 1, + 0x4000, + 0x800, + 4, + g_se_alg_zblock_default_route6_reg, + NULL, + NULL, + }, + { + "zblock_default_route7", + SE_ALG_ZBLOCK_DEFAULT_ROUTE7r, + SE, + DPP_REG_FLAG_INDIRECT, + DPP_REG_BIN_ARRAY, + 0x1000c, + (512 / 8), + 3 + 1, + 7 + 1, + 0x4000, + 0x800, + 4, + g_se_alg_zblock_default_route7_reg, + NULL, + NULL, + }, + { + "zblock_hash_listtable_item0", + SE_ALG_ZBLOCK_HASH_LISTTABLE_ITEM0r, + SE, + DPP_REG_FLAG_INDIRECT, + DPP_REG_BIN_ARRAY, + 0x1000d, + (512 / 8), + 3 + 1, + 7 + 1, + 0x4000, + 0x800, + 1, + g_se_alg_zblock_hash_listtable_item0_reg, + NULL, + NULL, + }, + { + "zblock_hash_listtable_item1", + SE_ALG_ZBLOCK_HASH_LISTTABLE_ITEM1r, + SE, + DPP_REG_FLAG_INDIRECT, + DPP_REG_BIN_ARRAY, + 0x1000e, + (512 / 8), + 3 + 1, + 7 + 1, + 0x4000, + 0x800, + 1, + g_se_alg_zblock_hash_listtable_item1_reg, + NULL, + NULL, + }, + { + "zblock_hash_listtable_item2", + SE_ALG_ZBLOCK_HASH_LISTTABLE_ITEM2r, + SE, + DPP_REG_FLAG_INDIRECT, + DPP_REG_BIN_ARRAY, + 0x1000f, + (512 / 8), + 3 + 1, + 7 + 1, + 0x4000, + 0x800, + 1, + g_se_alg_zblock_hash_listtable_item2_reg, + NULL, + NULL, + }, + { + "zblock_hash_listtable_item3", + SE_ALG_ZBLOCK_HASH_LISTTABLE_ITEM3r, + SE, + DPP_REG_FLAG_INDIRECT, + DPP_REG_BIN_ARRAY, + 0x10010, + (512 / 8), + 3 + 1, + 7 + 1, + 0x4000, + 0x800, + 1, + g_se_alg_zblock_hash_listtable_item3_reg, + NULL, + NULL, + }, + { + "zblock_ecc_err_status", + SE_ALG_ZBLOCK_ECC_ERR_STATUSr, + SE, + DPP_REG_FLAG_INDIRECT, + DPP_REG_BIN_ARRAY, + 0x10011, + (512 / 8), + 3 + 1, + 7 + 1, + 0x4000, + 0x800, + 4, + g_se_alg_zblock_ecc_err_status_reg, + NULL, + NULL, + }, + { + "zblock_lpm_v6_sram_cmp", + SE_ALG_ZBLOCK_LPM_V6_SRAM_CMPr, + SE, + DPP_REG_FLAG_INDIRECT, + DPP_REG_BIN_ARRAY, + 0x10012, + (512 / 8), + 3 + 1, + 7 + 1, + 0x4000, + 0x800, + 1, + g_se_alg_zblock_lpm_v6_sram_cmp_reg, + NULL, + NULL, + }, + { + "zblock_lpm_v4_sram_cmp", + SE_ALG_ZBLOCK_LPM_V4_SRAM_CMPr, + SE, + DPP_REG_FLAG_INDIRECT, + DPP_REG_BIN_ARRAY, + 0x10013, + (512 / 8), + 3 + 1, + 5 + 1, + 0x4000, + 0x800, + 1, + g_se_alg_zblock_lpm_v4_sram_cmp_reg, + NULL, + NULL, + }, + { + "kschd_pful_cfg", + SE_PARSER_KSCHD_PFUL_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_PARSER_BASE_ADDR + 0x00000000, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se_parser_kschd_pful_cfg_reg, + NULL, + NULL, + }, + { + "debug_cnt_mode", + SE_PARSER_DEBUG_CNT_MODEr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_PARSER_BASE_ADDR + 0x00000004, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se_parser_debug_cnt_mode_reg, + NULL, + NULL, + }, + { + "parser_int_en", + SE_PARSER_PARSER_INT_ENr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_PARSER_BASE_ADDR + 0x0000000c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_parser_parser_int_en_reg, + NULL, + NULL, + }, + { + "parser_int_mask", + SE_PARSER_PARSER_INT_MASKr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_PARSER_BASE_ADDR + 0x00000010, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_parser_parser_int_mask_reg, + NULL, + NULL, + }, + { + "parser_int_status", + SE_PARSER_PARSER_INT_STATUSr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_PARSER_BASE_ADDR + 0x00000014, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_parser_parser_int_status_reg, + NULL, + NULL, + }, + { + "parser_int_unmask_flag", + SE_PARSER_PARSER_INT_UNMASK_FLAGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_PARSER_BASE_ADDR + 0x00000018, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_parser_parser_int_unmask_flag_reg, + NULL, + NULL, + }, + { + "ecc_bypass_read", + SE_PARSER_ECC_BYPASS_READr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_PARSER_BASE_ADDR + 0x0000001c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_parser_ecc_bypass_read_reg, + NULL, + NULL, + }, + { + "mex0_5_req_cnt", + SE_PARSER_MEX0_5_REQ_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_PARSER_BASE_ADDR + 0x00000400, + (32 / 8), + 0, + 5 + 1, + 0, + 4, + 1, + g_se_parser_mex0_5_req_cnt_reg, + NULL, + NULL, + }, + { + "kschd_req0_5_cnt", + SE_PARSER_KSCHD_REQ0_5_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_PARSER_BASE_ADDR + 0x00000420, + (32 / 8), + 0, + 5 + 1, + 0, + 4, + 1, + g_se_parser_kschd_req0_5_cnt_reg, + NULL, + NULL, + }, + { + "kschd_parser_fc0_5_cnt", + SE_PARSER_KSCHD_PARSER_FC0_5_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_PARSER_BASE_ADDR + 0x00000440, + (32 / 8), + 0, + 5 + 1, + 0, + 4, + 1, + g_se_parser_kschd_parser_fc0_5_cnt_reg, + NULL, + NULL, + }, + { + "se_ppu_mex0_5_fc_cnt", + SE_PARSER_SE_PPU_MEX0_5_FC_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_PARSER_BASE_ADDR + 0x00000460, + (32 / 8), + 0, + 5 + 1, + 0, + 4, + 1, + g_se_parser_se_ppu_mex0_5_fc_cnt_reg, + NULL, + NULL, + }, + { + "smmu0_marc_fc_cnt", + SE_PARSER_SMMU0_MARC_FC_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_PARSER_BASE_ADDR + 0x00000480, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_parser_smmu0_marc_fc_cnt_reg, + NULL, + NULL, + }, + { + "smmu0_marc_key_cnt", + SE_PARSER_SMMU0_MARC_KEY_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_PARSER_BASE_ADDR + 0x00000484, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_parser_smmu0_marc_key_cnt_reg, + NULL, + NULL, + }, + { + "cmmu_key_cnt", + SE_PARSER_CMMU_KEY_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_PARSER_BASE_ADDR + 0x00000488, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_parser_cmmu_key_cnt_reg, + NULL, + NULL, + }, + { + "cmmu_parser_fc_cnt", + SE_PARSER_CMMU_PARSER_FC_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_PARSER_BASE_ADDR + 0x0000048c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_parser_cmmu_parser_fc_cnt_reg, + NULL, + NULL, + }, + { + "marc_tab_type_err_mex0_5_cnt", + SE_PARSER_MARC_TAB_TYPE_ERR_MEX0_5_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_PARSER_BASE_ADDR + 0x00000490, + (32 / 8), + 0, + 5 + 1, + 0, + 4, + 1, + g_se_parser_marc_tab_type_err_mex0_5_cnt_reg, + NULL, + NULL, + }, + { + "eram_fulladdr_drop_cnt", + SE_PARSER_ERAM_FULLADDR_DROP_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_PARSER_BASE_ADDR + 0x000004b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_parser_eram_fulladdr_drop_cnt_reg, + NULL, + NULL, + }, + { + "hash0_pful_cfg", + SE_AS_HASH0_PFUL_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000001c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_hash0_pful_cfg_reg, + NULL, + NULL, + }, + { + "hash1_pful_cfg", + SE_AS_HASH1_PFUL_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000001c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_hash1_pful_cfg_reg, + NULL, + NULL, + }, + { + "hash2_pful_cfg", + SE_AS_HASH2_PFUL_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000001c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_hash2_pful_cfg_reg, + NULL, + NULL, + }, + { + "hash3_pful_cfg", + SE_AS_HASH3_PFUL_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000001cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_hash3_pful_cfg_reg, + NULL, + NULL, + }, + { + "pbu_pful_cfg", + SE_AS_PBU_PFUL_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000001d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_pbu_pful_cfg_reg, + NULL, + NULL, + }, + { + "lpm_pful_cfg", + SE_AS_LPM_PFUL_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000001d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_lpm_pful_cfg_reg, + NULL, + NULL, + }, + { + "etcam_pful_cfg", + SE_AS_ETCAM_PFUL_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000001dc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_etcam_pful_cfg_reg, + NULL, + NULL, + }, + { + "as_learn0_fifo_cfg", + SE_AS_AS_LEARN0_FIFO_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000001e0, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_se_as_as_learn0_fifo_cfg_reg, + NULL, + NULL, + }, + { + "as_learn1_fifo_cfg", + SE_AS_AS_LEARN1_FIFO_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000001e4, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_se_as_as_learn1_fifo_cfg_reg, + NULL, + NULL, + }, + { + "as_dma_fifo_cfg", + SE_AS_AS_DMA_FIFO_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000001e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_as_dma_fifo_cfg_reg, + NULL, + NULL, + }, + { + "age_pful_cfg", + SE_AS_AGE_PFUL_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000001ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_age_pful_cfg_reg, + NULL, + NULL, + }, + { + "etcam_rsp_cfg", + SE_AS_ETCAM_RSP_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000001f4, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_se_as_etcam_rsp_cfg_reg, + NULL, + NULL, + }, + { + "pbu_ecc_bypass_read", + SE_AS_PBU_ECC_BYPASS_READr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000001fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_pbu_ecc_bypass_read_reg, + NULL, + NULL, + }, + { + "etcam0_ecc_bypass_read", + SE_AS_ETCAM0_ECC_BYPASS_READr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x00000200, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_etcam0_ecc_bypass_read_reg, + NULL, + NULL, + }, + { + "etcam1_ecc_bypass_read", + SE_AS_ETCAM1_ECC_BYPASS_READr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x00000204, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_etcam1_ecc_bypass_read_reg, + NULL, + NULL, + }, + { + "lpm_ecc_bypass_read", + SE_AS_LPM_ECC_BYPASS_READr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x00000208, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_lpm_ecc_bypass_read_reg, + NULL, + NULL, + }, + { + "hash_ecc_bypass_read", + SE_AS_HASH_ECC_BYPASS_READr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x0000020c, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_se_as_hash_ecc_bypass_read_reg, + NULL, + NULL, + }, + { + "hash_learn_ecc_bypass_read", + SE_AS_HASH_LEARN_ECC_BYPASS_READr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x00000210, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_hash_learn_ecc_bypass_read_reg, + NULL, + NULL, + }, + { + "debug_cnt_mode", + SE_AS_DEBUG_CNT_MODEr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000003e8, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se_as_debug_cnt_mode_reg, + NULL, + NULL, + }, + { + "as_int_0_en", + SE_AS_AS_INT_0_ENr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x00000440, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_as_int_0_en_reg, + NULL, + NULL, + }, + { + "as_int_0_mask", + SE_AS_AS_INT_0_MASKr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x00000444, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_as_int_0_mask_reg, + NULL, + NULL, + }, + { + "as_int_1_en", + SE_AS_AS_INT_1_ENr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x00000448, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_as_int_1_en_reg, + NULL, + NULL, + }, + { + "as_int_1_mask", + SE_AS_AS_INT_1_MASKr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x0000044c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_as_int_1_mask_reg, + NULL, + NULL, + }, + { + "as_int_2_en", + SE_AS_AS_INT_2_ENr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x00000450, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_as_int_2_en_reg, + NULL, + NULL, + }, + { + "as_int_2_mask", + SE_AS_AS_INT_2_MASKr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x00000454, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_as_int_2_mask_reg, + NULL, + NULL, + }, + { + "as_int_0_status", + SE_AS_AS_INT_0_STATUSr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x00000458, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_as_int_0_status_reg, + NULL, + NULL, + }, + { + "as_int_1_status", + SE_AS_AS_INT_1_STATUSr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x0000045c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_as_int_1_status_reg, + NULL, + NULL, + }, + { + "as_int_2_status", + SE_AS_AS_INT_2_STATUSr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x00000460, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_as_int_2_status_reg, + NULL, + NULL, + }, + { + "se_as_int_status", + SE_AS_SE_AS_INT_STATUSr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x00000464, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_se_as_se_as_int_status_reg, + NULL, + NULL, + }, + { + "hash0_3_wr_req_cnt", + SE_AS_HASH0_3_WR_REQ_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x0000082c, + (32 / 8), + 0, + 3 + 1, + 0, + 4, + 1, + g_se_as_hash0_3_wr_req_cnt_reg, + NULL, + NULL, + }, + { + "smmu0_etcam0_1_fc_cnt", + SE_AS_SMMU0_ETCAM0_1_FC_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x0000083c, + (32 / 8), + 0, + 0 + 1, + 0, + 4, + 1, + g_se_as_smmu0_etcam0_1_fc_cnt_reg, + NULL, + NULL, + }, + { + "etcam0_1_smmu0_req_cnt", + SE_AS_ETCAM0_1_SMMU0_REQ_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x00000844, + (32 / 8), + 0, + 0 + 1, + 0, + 8, + 1, + g_se_as_etcam0_1_smmu0_req_cnt_reg, + NULL, + NULL, + }, + { + "smmu0_etcam0_1_rsp_cnt", + SE_AS_SMMU0_ETCAM0_1_RSP_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x00000848, + (32 / 8), + 0, + 0 + 1, + 0, + 8, + 1, + g_se_as_smmu0_etcam0_1_rsp_cnt_reg, + NULL, + NULL, + }, + { + "as_hla_hash_p0_3_key_cnt", + SE_AS_AS_HLA_HASH_P0_3_KEY_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x00000854, + (32 / 8), + 0, + 3 + 1, + 0, + 4, + 1, + g_se_as_as_hla_hash_p0_3_key_cnt_reg, + NULL, + NULL, + }, + { + "as_hla_lpm_p0_key_cnt", + SE_AS_AS_HLA_LPM_P0_KEY_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x00000864, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_as_hla_lpm_p0_key_cnt_reg, + NULL, + NULL, + }, + { + "alg_as_hash_p0_3_rsp_cnt", + SE_AS_ALG_AS_HASH_P0_3_RSP_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x00000868, + (32 / 8), + 0, + 3 + 1, + 0, + 8, + 1, + g_se_as_alg_as_hash_p0_3_rsp_cnt_reg, + NULL, + NULL, + }, + { + "alg_as_hash_p0_3_smf_rsp_cnt", + SE_AS_ALG_AS_HASH_P0_3_SMF_RSP_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x0000086c, + (32 / 8), + 0, + 3 + 1, + 0, + 8, + 1, + g_se_as_alg_as_hash_p0_3_smf_rsp_cnt_reg, + NULL, + NULL, + }, + { + "alg_as_lpm_p0_rsp_cnt", + SE_AS_ALG_AS_LPM_P0_RSP_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x00000888, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_alg_as_lpm_p0_rsp_cnt_reg, + NULL, + NULL, + }, + { + "alg_as_lpm_p0_3_smf_rsp_cnt", + SE_AS_ALG_AS_LPM_P0_3_SMF_RSP_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x0000088c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_alg_as_lpm_p0_3_smf_rsp_cnt_reg, + NULL, + NULL, + }, + { + "as_pbu_key_cnt", + SE_AS_AS_PBU_KEY_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x00000890, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_as_pbu_key_cnt_reg, + NULL, + NULL, + }, + { + "pbu_se_dpi_rsp_dat_cnt", + SE_AS_PBU_SE_DPI_RSP_DAT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x00000894, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_pbu_se_dpi_rsp_dat_cnt_reg, + NULL, + NULL, + }, + { + "as_etcam_ctrl_req0_cnt", + SE_AS_AS_ETCAM_CTRL_REQ0_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x00000898, + (32 / 8), + 0, + 0 + 1, + 0, + 4, + 1, + g_se_as_as_etcam_ctrl_req0_cnt_reg, + NULL, + NULL, + }, + { + "etcam_ctrl_as_index0_1_cnt", + SE_AS_ETCAM_CTRL_AS_INDEX0_1_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000008a0, + (32 / 8), + 0, + 0 + 1, + 0, + 8, + 1, + g_se_as_etcam_ctrl_as_index0_1_cnt_reg, + NULL, + NULL, + }, + { + "etcam_ctrl_as_hit0_1_cnt", + SE_AS_ETCAM_CTRL_AS_HIT0_1_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000008a4, + (32 / 8), + 0, + 0 + 1, + 0, + 8, + 1, + g_se_as_etcam_ctrl_as_hit0_1_cnt_reg, + NULL, + NULL, + }, + { + "as_smmu0_req_cnt", + SE_AS_AS_SMMU0_REQ_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000008b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_as_smmu0_req_cnt_reg, + NULL, + NULL, + }, + { + "learn_hla_wr_cnt", + SE_AS_LEARN_HLA_WR_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000008b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_learn_hla_wr_cnt_reg, + NULL, + NULL, + }, + { + "as_smmu1_req_cnt", + SE_AS_AS_SMMU1_REQ_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000008b8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_as_smmu1_req_cnt_reg, + NULL, + NULL, + }, + { + "se_cfg_mac_dat_cnt", + SE_AS_SE_CFG_MAC_DAT_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000008bc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_se_cfg_mac_dat_cnt_reg, + NULL, + NULL, + }, + { + "alg_as_hash_p0_3_fc_cnt", + SE_AS_ALG_AS_HASH_P0_3_FC_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000008c0, + (32 / 8), + 0, + 3 + 1, + 0, + 4, + 1, + g_se_as_alg_as_hash_p0_3_fc_cnt_reg, + NULL, + NULL, + }, + { + "alg_as_lpm_p0_fc_cnt", + SE_AS_ALG_AS_LPM_P0_FC_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000008d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_alg_as_lpm_p0_fc_cnt_reg, + NULL, + NULL, + }, + { + "as_alg_hash_p0_3_fc_cnt", + SE_AS_AS_ALG_HASH_P0_3_FC_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000008d4, + (32 / 8), + 0, + 3 + 1, + 0, + 4, + 1, + g_se_as_as_alg_hash_p0_3_fc_cnt_reg, + NULL, + NULL, + }, + { + "as_alg_lpm_p0_fc_cnt", + SE_AS_AS_ALG_LPM_P0_FC_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000008e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_as_alg_lpm_p0_fc_cnt_reg, + NULL, + NULL, + }, + { + "as_pbu_fc_cnt", + SE_AS_AS_PBU_FC_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000008e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_as_pbu_fc_cnt_reg, + NULL, + NULL, + }, + { + "pbu_se_dpi_key_fc_cnt", + SE_AS_PBU_SE_DPI_KEY_FC_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000008ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_pbu_se_dpi_key_fc_cnt_reg, + NULL, + NULL, + }, + { + "as_etcam_ctrl_fc0_1_cnt", + SE_AS_AS_ETCAM_CTRL_FC0_1_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000008f0, + (32 / 8), + 0, + 0 + 1, + 0, + 4, + 1, + g_se_as_as_etcam_ctrl_fc0_1_cnt_reg, + NULL, + NULL, + }, + { + "etcam_ctrl_as_fc0_1_cnt", + SE_AS_ETCAM_CTRL_AS_FC0_1_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x000008f8, + (32 / 8), + 0, + 0 + 1, + 0, + 4, + 1, + g_se_as_etcam_ctrl_as_fc0_1_cnt_reg, + NULL, + NULL, + }, + { + "smmu0_as_mac_age_fc_cnt", + SE_AS_SMMU0_AS_MAC_AGE_FC_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x00000900, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_smmu0_as_mac_age_fc_cnt_reg, + NULL, + NULL, + }, + { + "alg_learn_fc_cnt", + SE_AS_ALG_LEARN_FC_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x00000904, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_alg_learn_fc_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_as_fc_cnt", + SE_AS_SMMU1_AS_FC_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x00000908, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_smmu1_as_fc_cnt_reg, + NULL, + NULL, + }, + { + "cfg_se_mac_fc_cnt", + SE_AS_CFG_SE_MAC_FC_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_AS_BASE_ADDR + 0x0000090c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_as_cfg_se_mac_fc_cnt_reg, + NULL, + NULL, + }, + { + "kschd_cpu_rdy", + SE_KSCHD_KSCHD_CPU_RDYr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x00000018, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_kschd_cpu_rdy_reg, + NULL, + NULL, + }, + { + "ppu0_ecc_bypass_read", + SE_KSCHD_PPU0_ECC_BYPASS_READr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x00000024, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_ppu0_ecc_bypass_read_reg, + NULL, + NULL, + }, + { + "pbu_ecc_bypass_read", + SE_KSCHD_PBU_ECC_BYPASS_READr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x00000028, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_pbu_ecc_bypass_read_reg, + NULL, + NULL, + }, + { + "smmu1_ecc_bypass_read", + SE_KSCHD_SMMU1_ECC_BYPASS_READr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x0000002c, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_se_kschd_smmu1_ecc_bypass_read_reg, + NULL, + NULL, + }, + { + "ass_ecc_bypass_read", + SE_KSCHD_ASS_ECC_BYPASS_READr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x00000030, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_ass_ecc_bypass_read_reg, + NULL, + NULL, + }, + { + "sdt_h", + SE_KSCHD_SDT_Hr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x00000040, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_sdt_h_reg, + NULL, + NULL, + }, + { + "sdt_l", + SE_KSCHD_SDT_Lr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x00000044, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_sdt_l_reg, + NULL, + NULL, + }, + { + "hash_key15", + SE_KSCHD_HASH_KEY15r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x00000048, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_se_kschd_hash_key15_reg, + NULL, + NULL, + }, + { + "hash_key14", + SE_KSCHD_HASH_KEY14r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x0000004c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_hash_key14_reg, + NULL, + NULL, + }, + { + "hash_key13", + SE_KSCHD_HASH_KEY13r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x00000050, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_hash_key13_reg, + NULL, + NULL, + }, + { + "hash_key12", + SE_KSCHD_HASH_KEY12r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x00000054, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_hash_key12_reg, + NULL, + NULL, + }, + { + "hash_key11", + SE_KSCHD_HASH_KEY11r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x00000058, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_hash_key11_reg, + NULL, + NULL, + }, + { + "hash_key10", + SE_KSCHD_HASH_KEY10r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x0000005c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_hash_key10_reg, + NULL, + NULL, + }, + { + "hash_key9", + SE_KSCHD_HASH_KEY9r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x00000060, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_hash_key9_reg, + NULL, + NULL, + }, + { + "hash_key8", + SE_KSCHD_HASH_KEY8r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x00000064, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_hash_key8_reg, + NULL, + NULL, + }, + { + "hash_key7", + SE_KSCHD_HASH_KEY7r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x00000068, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_hash_key7_reg, + NULL, + NULL, + }, + { + "hash_key6", + SE_KSCHD_HASH_KEY6r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x0000006c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_hash_key6_reg, + NULL, + NULL, + }, + { + "hash_key5", + SE_KSCHD_HASH_KEY5r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x00000070, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_hash_key5_reg, + NULL, + NULL, + }, + { + "hash_key4", + SE_KSCHD_HASH_KEY4r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x00000074, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_hash_key4_reg, + NULL, + NULL, + }, + { + "hash_key3", + SE_KSCHD_HASH_KEY3r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x00000078, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_hash_key3_reg, + NULL, + NULL, + }, + { + "hash_key2", + SE_KSCHD_HASH_KEY2r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x0000007c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_hash_key2_reg, + NULL, + NULL, + }, + { + "hash_key1", + SE_KSCHD_HASH_KEY1r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x00000080, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_hash_key1_reg, + NULL, + NULL, + }, + { + "hash_key0", + SE_KSCHD_HASH_KEY0r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x00000084, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_hash_key0_reg, + NULL, + NULL, + }, + { + "schd_int_0_en", + SE_KSCHD_SCHD_INT_0_ENr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000000c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_schd_int_0_en_reg, + NULL, + NULL, + }, + { + "schd_int_0_mask", + SE_KSCHD_SCHD_INT_0_MASKr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000000c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_schd_int_0_mask_reg, + NULL, + NULL, + }, + { + "schd_int_1_en", + SE_KSCHD_SCHD_INT_1_ENr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000000c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_schd_int_1_en_reg, + NULL, + NULL, + }, + { + "schd_int_1_mask", + SE_KSCHD_SCHD_INT_1_MASKr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000000cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_schd_int_1_mask_reg, + NULL, + NULL, + }, + { + "schd_int_2_en", + SE_KSCHD_SCHD_INT_2_ENr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000000d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_schd_int_2_en_reg, + NULL, + NULL, + }, + { + "schd_int_2_mask", + SE_KSCHD_SCHD_INT_2_MASKr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000000d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_schd_int_2_mask_reg, + NULL, + NULL, + }, + { + "schd_int_3_en", + SE_KSCHD_SCHD_INT_3_ENr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000000d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_schd_int_3_en_reg, + NULL, + NULL, + }, + { + "schd_int_3_mask", + SE_KSCHD_SCHD_INT_3_MASKr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000000dc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_schd_int_3_mask_reg, + NULL, + NULL, + }, + { + "schd_int_4_en", + SE_KSCHD_SCHD_INT_4_ENr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000000e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_schd_int_4_en_reg, + NULL, + NULL, + }, + { + "schd_int_4_mask", + SE_KSCHD_SCHD_INT_4_MASKr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000000e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_schd_int_4_mask_reg, + NULL, + NULL, + }, + { + "schd_int_0_status", + SE_KSCHD_SCHD_INT_0_STATUSr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000000e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_schd_int_0_status_reg, + NULL, + NULL, + }, + { + "schd_int_1_status", + SE_KSCHD_SCHD_INT_1_STATUSr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000000ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_schd_int_1_status_reg, + NULL, + NULL, + }, + { + "schd_int_2_status", + SE_KSCHD_SCHD_INT_2_STATUSr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000000f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_schd_int_2_status_reg, + NULL, + NULL, + }, + { + "schd_int_3_status", + SE_KSCHD_SCHD_INT_3_STATUSr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000000f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_schd_int_3_status_reg, + NULL, + NULL, + }, + { + "schd_int_4_status", + SE_KSCHD_SCHD_INT_4_STATUSr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000000f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_schd_int_4_status_reg, + NULL, + NULL, + }, + { + "se_kschd_int_status", + SE_KSCHD_SE_KSCHD_INT_STATUSr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x00000148, + (32 / 8), + 0, + 0, + 0, + 0, + 5, + g_se_kschd_se_kschd_int_status_reg, + NULL, + NULL, + }, + { + "debug_cnt_mode", + SE_KSCHD_DEBUG_CNT_MODEr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000003ec, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se_kschd_debug_cnt_mode_reg, + NULL, + NULL, + }, + { + "se_parser_kschd_key0_3_cnt", + SE_KSCHD_SE_PARSER_KSCHD_KEY0_3_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x00000878, + (32 / 8), + 0, + 5 + 1, + 0, + 4, + 1, + g_se_kschd_se_parser_kschd_key0_3_cnt_reg, + NULL, + NULL, + }, + { + "se_smmu1_key0_3_cnt", + SE_KSCHD_SE_SMMU1_KEY0_3_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x00000890, + (32 / 8), + 0, + 3 + 1, + 0, + 4, + 1, + g_se_kschd_se_smmu1_key0_3_cnt_reg, + NULL, + NULL, + }, + { + "kschd_as_key0_cnt", + SE_KSCHD_KSCHD_AS_KEY0_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000008a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_kschd_as_key0_cnt_reg, + NULL, + NULL, + }, + { + "kschd_as_key1_cnt", + SE_KSCHD_KSCHD_AS_KEY1_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000008a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_kschd_as_key1_cnt_reg, + NULL, + NULL, + }, + { + "kschd_as_key2_cnt", + SE_KSCHD_KSCHD_AS_KEY2_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000008a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_kschd_as_key2_cnt_reg, + NULL, + NULL, + }, + { + "kschd_as_key3_cnt", + SE_KSCHD_KSCHD_AS_KEY3_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000008ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_kschd_as_key3_cnt_reg, + NULL, + NULL, + }, + { + "kschd_as_key4_cnt", + SE_KSCHD_KSCHD_AS_KEY4_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000008b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_kschd_as_key4_cnt_reg, + NULL, + NULL, + }, + { + "kschd_as_key5_cnt", + SE_KSCHD_KSCHD_AS_KEY5_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000008b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_kschd_as_key5_cnt_reg, + NULL, + NULL, + }, + { + "kschd_as_key6_cnt", + SE_KSCHD_KSCHD_AS_KEY6_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000008b8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_kschd_as_key6_cnt_reg, + NULL, + NULL, + }, + { + "kschd_as_key9_cnt", + SE_KSCHD_KSCHD_AS_KEY9_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000008bc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_kschd_as_key9_cnt_reg, + NULL, + NULL, + }, + { + "kschd_se_parser_fc0_3_cnt", + SE_KSCHD_KSCHD_SE_PARSER_FC0_3_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000008c0, + (32 / 8), + 0, + 5 + 1, + 0, + 4, + 1, + g_se_kschd_kschd_se_parser_fc0_3_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_se_fc0_3_cnt", + SE_KSCHD_SMMU1_SE_FC0_3_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000008d8, + (32 / 8), + 0, + 3 + 1, + 0, + 4, + 1, + g_se_kschd_smmu1_se_fc0_3_cnt_reg, + NULL, + NULL, + }, + { + "as_kschd_fc_cnt0", + SE_KSCHD_AS_KSCHD_FC_CNT0r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000008e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_as_kschd_fc_cnt0_reg, + NULL, + NULL, + }, + { + "as_kschd_fc_cnt1", + SE_KSCHD_AS_KSCHD_FC_CNT1r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000008ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_as_kschd_fc_cnt1_reg, + NULL, + NULL, + }, + { + "as_kschd_fc_cnt2", + SE_KSCHD_AS_KSCHD_FC_CNT2r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000008f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_as_kschd_fc_cnt2_reg, + NULL, + NULL, + }, + { + "as_kschd_fc_cnt3", + SE_KSCHD_AS_KSCHD_FC_CNT3r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000008f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_as_kschd_fc_cnt3_reg, + NULL, + NULL, + }, + { + "as_kschd_fc_cnt4", + SE_KSCHD_AS_KSCHD_FC_CNT4r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000008f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_as_kschd_fc_cnt4_reg, + NULL, + NULL, + }, + { + "as_kschd_fc_cnt5", + SE_KSCHD_AS_KSCHD_FC_CNT5r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x000008fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_as_kschd_fc_cnt5_reg, + NULL, + NULL, + }, + { + "as_kschd_fc_cnt6", + SE_KSCHD_AS_KSCHD_FC_CNT6r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x00000900, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_as_kschd_fc_cnt6_reg, + NULL, + NULL, + }, + { + "as_kschd_fc_cnt9", + SE_KSCHD_AS_KSCHD_FC_CNT9r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_KSCHD_BASE_ADDR + 0x00000904, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_kschd_as_kschd_fc_cnt9_reg, + NULL, + NULL, + }, + { + "rschd_hash_pful_cfg", + SE_RSCHD_RSCHD_HASH_PFUL_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x00000008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_rschd_hash_pful_cfg_reg, + NULL, + NULL, + }, + { + "rschd_hash_ept_cfg", + SE_RSCHD_RSCHD_HASH_EPT_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x0000000c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_rschd_hash_ept_cfg_reg, + NULL, + NULL, + }, + { + "rschd_pbu_pful_cfg", + SE_RSCHD_RSCHD_PBU_PFUL_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x00000010, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_rschd_pbu_pful_cfg_reg, + NULL, + NULL, + }, + { + "rschd_pbu_ept_cfg", + SE_RSCHD_RSCHD_PBU_EPT_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x00000014, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_rschd_pbu_ept_cfg_reg, + NULL, + NULL, + }, + { + "rschd_lpm_pful_cfg", + SE_RSCHD_RSCHD_LPM_PFUL_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x00000018, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_rschd_lpm_pful_cfg_reg, + NULL, + NULL, + }, + { + "rschd_lpm_ept_cfg", + SE_RSCHD_RSCHD_LPM_EPT_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x0000001c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_rschd_lpm_ept_cfg_reg, + NULL, + NULL, + }, + { + "rschd_etcam_pful_cfg", + SE_RSCHD_RSCHD_ETCAM_PFUL_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x00000020, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_rschd_etcam_pful_cfg_reg, + NULL, + NULL, + }, + { + "rschd_etcam_ept_cfg", + SE_RSCHD_RSCHD_ETCAM_EPT_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x00000024, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_rschd_etcam_ept_cfg_reg, + NULL, + NULL, + }, + { + "smmu0_wb_pful_cfg", + SE_RSCHD_SMMU0_WB_PFUL_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x00000040, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_smmu0_wb_pful_cfg_reg, + NULL, + NULL, + }, + { + "smmu0_wb_ept_cfg", + SE_RSCHD_SMMU0_WB_EPT_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x00000044, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_smmu0_wb_ept_cfg_reg, + NULL, + NULL, + }, + { + "smmu1_wb_pful_cfg", + SE_RSCHD_SMMU1_WB_PFUL_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x00000048, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_smmu1_wb_pful_cfg_reg, + NULL, + NULL, + }, + { + "smmu1_wb_ept_cfg", + SE_RSCHD_SMMU1_WB_EPT_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x0000004c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_smmu1_wb_ept_cfg_reg, + NULL, + NULL, + }, + { + "alg_wb_pful_cfg", + SE_RSCHD_ALG_WB_PFUL_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x00000050, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_alg_wb_pful_cfg_reg, + NULL, + NULL, + }, + { + "alg_wb_ept_cfg", + SE_RSCHD_ALG_WB_EPT_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x00000054, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_alg_wb_ept_cfg_reg, + NULL, + NULL, + }, + { + "wr_rsp_vld_en", + SE_RSCHD_WR_RSP_VLD_ENr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x00000058, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_wr_rsp_vld_en_reg, + NULL, + NULL, + }, + { + "nppu_wb_pful_cfg", + SE_RSCHD_NPPU_WB_PFUL_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x0000005c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_nppu_wb_pful_cfg_reg, + NULL, + NULL, + }, + { + "nppu_wb_ept_cfg", + SE_RSCHD_NPPU_WB_EPT_CFGr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x00000060, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_nppu_wb_ept_cfg_reg, + NULL, + NULL, + }, + { + "port0_int_en", + SE_RSCHD_PORT0_INT_ENr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x000000c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_port0_int_en_reg, + NULL, + NULL, + }, + { + "port0_int_mask", + SE_RSCHD_PORT0_INT_MASKr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x000000c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_port0_int_mask_reg, + NULL, + NULL, + }, + { + "port1_int_en", + SE_RSCHD_PORT1_INT_ENr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x000000c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_port1_int_en_reg, + NULL, + NULL, + }, + { + "port1_int_mask", + SE_RSCHD_PORT1_INT_MASKr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x000000cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_port1_int_mask_reg, + NULL, + NULL, + }, + { + "port0_int_status", + SE_RSCHD_PORT0_INT_STATUSr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x000000d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_port0_int_status_reg, + NULL, + NULL, + }, + { + "port1_int_status", + SE_RSCHD_PORT1_INT_STATUSr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x000000d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_port1_int_status_reg, + NULL, + NULL, + }, + { + "se_rschd_int_status", + SE_RSCHD_SE_RSCHD_INT_STATUSr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x0000018c, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se_rschd_se_rschd_int_status_reg, + NULL, + NULL, + }, + { + "debug_cnt_mode", + SE_RSCHD_DEBUG_CNT_MODEr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x000003ec, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se_rschd_debug_cnt_mode_reg, + NULL, + NULL, + }, + { + "se_ppu_mex0_5_rsp1_cnt", + SE_RSCHD_SE_PPU_MEX0_5_RSP1_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x00000880, + (32 / 8), + 0, + 5 + 1, + 0, + 4, + 1, + g_se_rschd_se_ppu_mex0_5_rsp1_cnt_reg, + NULL, + NULL, + }, + { + "as_rschd_rsp0_cnt", + SE_RSCHD_AS_RSCHD_RSP0_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x00000898, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_as_rschd_rsp0_cnt_reg, + NULL, + NULL, + }, + { + "as_rschd_rsp1_cnt", + SE_RSCHD_AS_RSCHD_RSP1_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x0000089c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_as_rschd_rsp1_cnt_reg, + NULL, + NULL, + }, + { + "as_rschd_rsp2_cnt", + SE_RSCHD_AS_RSCHD_RSP2_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x000008a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_as_rschd_rsp2_cnt_reg, + NULL, + NULL, + }, + { + "as_rschd_rsp3_cnt", + SE_RSCHD_AS_RSCHD_RSP3_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x000008a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_as_rschd_rsp3_cnt_reg, + NULL, + NULL, + }, + { + "as_rschd_rsp4_cnt", + SE_RSCHD_AS_RSCHD_RSP4_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x000008a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_as_rschd_rsp4_cnt_reg, + NULL, + NULL, + }, + { + "as_rschd_rsp5_cnt", + SE_RSCHD_AS_RSCHD_RSP5_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x000008ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_as_rschd_rsp5_cnt_reg, + NULL, + NULL, + }, + { + "as_rschd_rsp6_cnt", + SE_RSCHD_AS_RSCHD_RSP6_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x000008b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_as_rschd_rsp6_cnt_reg, + NULL, + NULL, + }, + { + "as_rschd_rsp9_cnt", + SE_RSCHD_AS_RSCHD_RSP9_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x000008b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_as_rschd_rsp9_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_se_rsp0_3_cnt", + SE_RSCHD_SMMU1_SE_RSP0_3_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x000008b8, + (32 / 8), + 0, + 3 + 1, + 0, + 4, + 1, + g_se_rschd_smmu1_se_rsp0_3_cnt_reg, + NULL, + NULL, + }, + { + "ppu_se_mex0_3_fc_cnt", + SE_RSCHD_PPU_SE_MEX0_3_FC_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x000008c4, + (32 / 8), + 0, + 5 + 1, + 0, + 4, + 1, + g_se_rschd_ppu_se_mex0_3_fc_cnt_reg, + NULL, + NULL, + }, + { + "rschd_as_fc_cnt0", + SE_RSCHD_RSCHD_AS_FC_CNT0r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x000008e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_rschd_as_fc_cnt0_reg, + NULL, + NULL, + }, + { + "rschd_as_fc_cnt1", + SE_RSCHD_RSCHD_AS_FC_CNT1r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x000008e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_rschd_as_fc_cnt1_reg, + NULL, + NULL, + }, + { + "rschd_as_fc_cnt2", + SE_RSCHD_RSCHD_AS_FC_CNT2r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x000008e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_rschd_as_fc_cnt2_reg, + NULL, + NULL, + }, + { + "rschd_as_fc_cnt3", + SE_RSCHD_RSCHD_AS_FC_CNT3r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x000008ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_rschd_as_fc_cnt3_reg, + NULL, + NULL, + }, + { + "rschd_as_fc_cnt4", + SE_RSCHD_RSCHD_AS_FC_CNT4r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x000008f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_rschd_as_fc_cnt4_reg, + NULL, + NULL, + }, + { + "rschd_as_fc_cnt5", + SE_RSCHD_RSCHD_AS_FC_CNT5r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x000008f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_rschd_as_fc_cnt5_reg, + NULL, + NULL, + }, + { + "rschd_as_fc_cnt6", + SE_RSCHD_RSCHD_AS_FC_CNT6r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x000008f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_rschd_as_fc_cnt6_reg, + NULL, + NULL, + }, + { + "rschd_as_fc_cnt9", + SE_RSCHD_RSCHD_AS_FC_CNT9r, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x000008fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_rschd_as_fc_cnt9_reg, + NULL, + NULL, + }, + { + "se_smmu1_fc0_3_cnt", + SE_RSCHD_SE_SMMU1_FC0_3_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x00000900, + (32 / 8), + 0, + 3 + 1, + 0, + 4, + 1, + g_se_rschd_se_smmu1_fc0_3_cnt_reg, + NULL, + NULL, + }, + { + "smmu0_se_wr_done_cnt", + SE_RSCHD_SMMU0_SE_WR_DONE_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x00000910, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_smmu0_se_wr_done_cnt_reg, + NULL, + NULL, + }, + { + "se_smmu0_wr_done_fc_cnt", + SE_RSCHD_SE_SMMU0_WR_DONE_FC_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x00000914, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_se_smmu0_wr_done_fc_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_se_wr_rsp_cnt", + SE_RSCHD_SMMU1_SE_WR_RSP_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x00000918, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_smmu1_se_wr_rsp_cnt_reg, + NULL, + NULL, + }, + { + "se_smmu1_wr_rsp_fc_cnt", + SE_RSCHD_SE_SMMU1_WR_RSP_FC_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x0000091c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_se_smmu1_wr_rsp_fc_cnt_reg, + NULL, + NULL, + }, + { + "alg_se_wr_rsp_cnt", + SE_RSCHD_ALG_SE_WR_RSP_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x00000920, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_alg_se_wr_rsp_cnt_reg, + NULL, + NULL, + }, + { + "se_alg_wr_rsp_fc_cnt", + SE_RSCHD_SE_ALG_WR_RSP_FC_CNTr, + SE, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_BASE_ADDR + MODULE_SE_RSCHD_BASE_ADDR + 0x00000924, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_rschd_se_alg_wr_rsp_fc_cnt_reg, + NULL, + NULL, + }, + { + "kschd_pful_cfg0", + SMMU0_SMMU0_KSCHD_PFUL_CFG0r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000080, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_smmu0_smmu0_kschd_pful_cfg0_reg, + NULL, + NULL, + }, + { + "kschd_pful_cfg1", + SMMU0_SMMU0_KSCHD_PFUL_CFG1r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000084, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_smmu0_smmu0_kschd_pful_cfg1_reg, + NULL, + NULL, + }, + { + "ctrl_pful1_cfg", + SMMU0_SMMU0_CTRL_PFUL1_CFGr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000088, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_smmu0_smmu0_ctrl_pful1_cfg_reg, + NULL, + NULL, + }, + { + "ctrl_pful2_cfg", + SMMU0_SMMU0_CTRL_PFUL2_CFGr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000008c, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_smmu0_smmu0_ctrl_pful2_cfg_reg, + NULL, + NULL, + }, + { + "ctrl_pful3_cfg", + SMMU0_SMMU0_CTRL_PFUL3_CFGr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000090, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_smmu0_smmu0_ctrl_pful3_cfg_reg, + NULL, + NULL, + }, + { + "rschd_pful_cfg", + SMMU0_SMMU0_RSCHD_PFUL_CFGr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000094, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_smmu0_smmu0_rschd_pful_cfg_reg, + NULL, + NULL, + }, + { + "rschd_ept_cfg", + SMMU0_SMMU0_RSCHD_EPT_CFGr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000098, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_smmu0_smmu0_rschd_ept_cfg_reg, + NULL, + NULL, + }, + { + "alucmd_pful_cfg", + SMMU0_SMMU0_ALUCMD_PFUL_CFGr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000009c, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_smmu0_smmu0_alucmd_pful_cfg_reg, + NULL, + NULL, + }, + { + "aluwr_pful_cfg", + SMMU0_SMMU0_ALUWR_PFUL_CFGr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000000a0, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_smmu0_smmu0_aluwr_pful_cfg_reg, + NULL, + NULL, + }, + { + "wr_arb_pful_cfg0", + SMMU0_SMMU0_WR_ARB_PFUL_CFG0r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000000a4, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_smmu0_smmu0_wr_arb_pful_cfg0_reg, + NULL, + NULL, + }, + { + "wr_arb_pful_cfg1", + SMMU0_SMMU0_WR_ARB_PFUL_CFG1r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000000a8, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_smmu0_smmu0_wr_arb_pful_cfg1_reg, + NULL, + NULL, + }, + { + "ord_pful_cfg", + SMMU0_SMMU0_ORD_PFUL_CFGr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000000ac, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_smmu0_smmu0_ord_pful_cfg_reg, + NULL, + NULL, + }, + { + "cfg_dma_baddr", + SMMU0_SMMU0_CFG_DMA_BADDRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000000c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_cfg_dma_baddr_reg, + NULL, + NULL, + }, + { + "cfg_odma0_baddr", + SMMU0_SMMU0_CFG_ODMA0_BADDRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000000c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_cfg_odma0_baddr_reg, + NULL, + NULL, + }, + { + "cfg_odma1_baddr", + SMMU0_SMMU0_CFG_ODMA1_BADDRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000000c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_cfg_odma1_baddr_reg, + NULL, + NULL, + }, + { + "cfg_odma2_baddr", + SMMU0_SMMU0_CFG_ODMA2_BADDRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000000cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_cfg_odma2_baddr_reg, + NULL, + NULL, + }, + { + "cfg_odma_tdm_baddr", + SMMU0_SMMU0_CFG_ODMA_TDM_BADDRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000000d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_cfg_odma_tdm_baddr_reg, + NULL, + NULL, + }, + { + "cfg_mcast_baddr", + SMMU0_SMMU0_CFG_MCAST_BADDRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000000d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_cfg_mcast_baddr_reg, + NULL, + NULL, + }, + { + "cfg_lpm0", + SMMU0_SMMU0_CFG_LPM0r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000000d8, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_smmu0_smmu0_cfg_lpm0_reg, + NULL, + NULL, + }, + { + "cfg_lpm1", + SMMU0_SMMU0_CFG_LPM1r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000000dc, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_smmu0_smmu0_cfg_lpm1_reg, + NULL, + NULL, + }, + { + "cfg_lpm2", + SMMU0_SMMU0_CFG_LPM2r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000000e0, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_smmu0_smmu0_cfg_lpm2_reg, + NULL, + NULL, + }, + { + "cfg_lpm3", + SMMU0_SMMU0_CFG_LPM3r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000000e4, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_smmu0_smmu0_cfg_lpm3_reg, + NULL, + NULL, + }, + { + "cfg_lpm4", + SMMU0_SMMU0_CFG_LPM4r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000000e8, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_smmu0_smmu0_cfg_lpm4_reg, + NULL, + NULL, + }, + { + "cfg_lpm5", + SMMU0_SMMU0_CFG_LPM5r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000000ec, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_smmu0_smmu0_cfg_lpm5_reg, + NULL, + NULL, + }, + { + "cfg_lpm6", + SMMU0_SMMU0_CFG_LPM6r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000000f0, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_smmu0_smmu0_cfg_lpm6_reg, + NULL, + NULL, + }, + { + "cfg_lpm7", + SMMU0_SMMU0_CFG_LPM7r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000000f4, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_smmu0_smmu0_cfg_lpm7_reg, + NULL, + NULL, + }, + { + "debug_cnt_mode", + SMMU0_SMMU0_DEBUG_CNT_MODEr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000100, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_smmu0_smmu0_debug_cnt_mode_reg, + NULL, + NULL, + }, + { + "stat_overflow_mode", + SMMU0_SMMU0_STAT_OVERFLOW_MODEr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000104, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_stat_overflow_mode_reg, + NULL, + NULL, + }, + { + "init_en_cfg_tmp", + SMMU0_SMMU0_INIT_EN_CFG_TMPr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000013c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_init_en_cfg_tmp_reg, + NULL, + NULL, + }, + { + "smmu0_int_unmask_flag", + SMMU0_SMMU0_SMMU0_INT_UNMASK_FLAGr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000140, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int_unmask_flag_reg, + NULL, + NULL, + }, + { + "smmu0_int0_en", + SMMU0_SMMU0_SMMU0_INT0_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000144, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int0_en_reg, + NULL, + NULL, + }, + { + "smmu0_int0_mask", + SMMU0_SMMU0_SMMU0_INT0_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000148, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int0_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int0_status", + SMMU0_SMMU0_SMMU0_INT0_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000014c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int0_status_reg, + NULL, + NULL, + }, + { + "smmu0_int1_en", + SMMU0_SMMU0_SMMU0_INT1_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000150, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int1_en_reg, + NULL, + NULL, + }, + { + "smmu0_int1_mask", + SMMU0_SMMU0_SMMU0_INT1_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000154, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int1_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int1_status", + SMMU0_SMMU0_SMMU0_INT1_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000158, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int1_status_reg, + NULL, + NULL, + }, + { + "smmu0_int2_en", + SMMU0_SMMU0_SMMU0_INT2_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000015c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int2_en_reg, + NULL, + NULL, + }, + { + "smmu0_int2_mask", + SMMU0_SMMU0_SMMU0_INT2_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000160, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int2_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int2_status", + SMMU0_SMMU0_SMMU0_INT2_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000164, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int2_status_reg, + NULL, + NULL, + }, + { + "smmu0_int3_en", + SMMU0_SMMU0_SMMU0_INT3_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000168, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int3_en_reg, + NULL, + NULL, + }, + { + "smmu0_int3_mask", + SMMU0_SMMU0_SMMU0_INT3_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000016c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int3_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int3_status", + SMMU0_SMMU0_SMMU0_INT3_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000170, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int3_status_reg, + NULL, + NULL, + }, + { + "smmu0_int4_en", + SMMU0_SMMU0_SMMU0_INT4_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000174, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int4_en_reg, + NULL, + NULL, + }, + { + "smmu0_int4_mask", + SMMU0_SMMU0_SMMU0_INT4_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000178, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int4_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int4_status", + SMMU0_SMMU0_SMMU0_INT4_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000017c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int4_status_reg, + NULL, + NULL, + }, + { + "smmu0_int5_en", + SMMU0_SMMU0_SMMU0_INT5_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000180, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int5_en_reg, + NULL, + NULL, + }, + { + "smmu0_int5_mask", + SMMU0_SMMU0_SMMU0_INT5_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000184, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int5_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int5_status", + SMMU0_SMMU0_SMMU0_INT5_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000188, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int5_status_reg, + NULL, + NULL, + }, + { + "smmu0_int6_en", + SMMU0_SMMU0_SMMU0_INT6_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000018c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int6_en_reg, + NULL, + NULL, + }, + { + "smmu0_int6_mask", + SMMU0_SMMU0_SMMU0_INT6_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000190, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int6_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int6_status", + SMMU0_SMMU0_SMMU0_INT6_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000194, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int6_status_reg, + NULL, + NULL, + }, + { + "smmu0_int7_en", + SMMU0_SMMU0_SMMU0_INT7_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000198, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int7_en_reg, + NULL, + NULL, + }, + { + "smmu0_int7_mask", + SMMU0_SMMU0_SMMU0_INT7_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000019c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int7_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int7_status", + SMMU0_SMMU0_SMMU0_INT7_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000001a0, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int7_status_reg, + NULL, + NULL, + }, + { + "smmu0_int8_en", + SMMU0_SMMU0_SMMU0_INT8_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000001a4, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int8_en_reg, + NULL, + NULL, + }, + { + "smmu0_int8_mask", + SMMU0_SMMU0_SMMU0_INT8_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000001a8, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int8_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int8_status", + SMMU0_SMMU0_SMMU0_INT8_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000001ac, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int8_status_reg, + NULL, + NULL, + }, + { + "smmu0_int9_en", + SMMU0_SMMU0_SMMU0_INT9_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000001b0, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int9_en_reg, + NULL, + NULL, + }, + { + "smmu0_int9_mask", + SMMU0_SMMU0_SMMU0_INT9_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000001b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int9_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int9_status", + SMMU0_SMMU0_SMMU0_INT9_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000001b8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int9_status_reg, + NULL, + NULL, + }, + { + "smmu0_int10_en", + SMMU0_SMMU0_SMMU0_INT10_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000001bc, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int10_en_reg, + NULL, + NULL, + }, + { + "smmu0_int10_mask", + SMMU0_SMMU0_SMMU0_INT10_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000001c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int10_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int10_status", + SMMU0_SMMU0_SMMU0_INT10_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000001c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int10_status_reg, + NULL, + NULL, + }, + { + "smmu0_int11_en", + SMMU0_SMMU0_SMMU0_INT11_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000001c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int11_en_reg, + NULL, + NULL, + }, + { + "smmu0_int11_mask", + SMMU0_SMMU0_SMMU0_INT11_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000001cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int11_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int11_status", + SMMU0_SMMU0_SMMU0_INT11_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000001d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int11_status_reg, + NULL, + NULL, + }, + { + "smmu0_int12_en", + SMMU0_SMMU0_SMMU0_INT12_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000001d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int12_en_reg, + NULL, + NULL, + }, + { + "smmu0_int12_mask", + SMMU0_SMMU0_SMMU0_INT12_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000001d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int12_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int12_status", + SMMU0_SMMU0_SMMU0_INT12_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000001dc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int12_status_reg, + NULL, + NULL, + }, + { + "smmu0_int13_en", + SMMU0_SMMU0_SMMU0_INT13_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000001e0, + (32 / 8), + 0, + 0, + 0, + 0, + 20, + g_smmu0_smmu0_smmu0_int13_en_reg, + NULL, + NULL, + }, + { + "smmu0_int13_mask", + SMMU0_SMMU0_SMMU0_INT13_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000001e4, + (32 / 8), + 0, + 0, + 0, + 0, + 20, + g_smmu0_smmu0_smmu0_int13_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int13_status", + SMMU0_SMMU0_SMMU0_INT13_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000001e8, + (32 / 8), + 0, + 0, + 0, + 0, + 20, + g_smmu0_smmu0_smmu0_int13_status_reg, + NULL, + NULL, + }, + { + "smmu0_int14_en", + SMMU0_SMMU0_SMMU0_INT14_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000001ec, + (32 / 8), + 0, + 0, + 0, + 0, + 17, + g_smmu0_smmu0_smmu0_int14_en_reg, + NULL, + NULL, + }, + { + "smmu0_int14_mask", + SMMU0_SMMU0_SMMU0_INT14_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000001f0, + (32 / 8), + 0, + 0, + 0, + 0, + 17, + g_smmu0_smmu0_smmu0_int14_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int14_status", + SMMU0_SMMU0_SMMU0_INT14_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000001f4, + (32 / 8), + 0, + 0, + 0, + 0, + 17, + g_smmu0_smmu0_smmu0_int14_status_reg, + NULL, + NULL, + }, + { + "smmu0_ecc_unmask_flag", + SMMU0_SMMU0_SMMU0_ECC_UNMASK_FLAGr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000001f8, + (32 / 8), + 0, + 0, + 0, + 0, + 22, + g_smmu0_smmu0_smmu0_ecc_unmask_flag_reg, + NULL, + NULL, + }, + { + "smmu0_int15_en", + SMMU0_SMMU0_SMMU0_INT15_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000001fc, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int15_en_reg, + NULL, + NULL, + }, + { + "smmu0_int15_mask", + SMMU0_SMMU0_SMMU0_INT15_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000200, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int15_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int15_status", + SMMU0_SMMU0_SMMU0_INT15_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000204, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int15_status_reg, + NULL, + NULL, + }, + { + "smmu0_int16_en", + SMMU0_SMMU0_SMMU0_INT16_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000208, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int16_en_reg, + NULL, + NULL, + }, + { + "smmu0_int16_mask", + SMMU0_SMMU0_SMMU0_INT16_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000020c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int16_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int16_status", + SMMU0_SMMU0_SMMU0_INT16_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000210, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int16_status_reg, + NULL, + NULL, + }, + { + "smmu0_int17_en", + SMMU0_SMMU0_SMMU0_INT17_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000214, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int17_en_reg, + NULL, + NULL, + }, + { + "smmu0_int17_mask", + SMMU0_SMMU0_SMMU0_INT17_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000218, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int17_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int17_status", + SMMU0_SMMU0_SMMU0_INT17_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000021c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int17_status_reg, + NULL, + NULL, + }, + { + "smmu0_int18_en", + SMMU0_SMMU0_SMMU0_INT18_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000220, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int18_en_reg, + NULL, + NULL, + }, + { + "smmu0_int18_mask", + SMMU0_SMMU0_SMMU0_INT18_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000224, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int18_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int18_status", + SMMU0_SMMU0_SMMU0_INT18_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000228, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int18_status_reg, + NULL, + NULL, + }, + { + "smmu0_int19_en", + SMMU0_SMMU0_SMMU0_INT19_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000022c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int19_en_reg, + NULL, + NULL, + }, + { + "smmu0_int19_mask", + SMMU0_SMMU0_SMMU0_INT19_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000230, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int19_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int19_status", + SMMU0_SMMU0_SMMU0_INT19_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000234, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int19_status_reg, + NULL, + NULL, + }, + { + "smmu0_int20_en", + SMMU0_SMMU0_SMMU0_INT20_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000238, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int20_en_reg, + NULL, + NULL, + }, + { + "smmu0_int20_mask", + SMMU0_SMMU0_SMMU0_INT20_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000023c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int20_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int20_status", + SMMU0_SMMU0_SMMU0_INT20_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000240, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int20_status_reg, + NULL, + NULL, + }, + { + "smmu0_int21_en", + SMMU0_SMMU0_SMMU0_INT21_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000244, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int21_en_reg, + NULL, + NULL, + }, + { + "smmu0_int21_mask", + SMMU0_SMMU0_SMMU0_INT21_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000248, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int21_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int21_status", + SMMU0_SMMU0_SMMU0_INT21_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000024c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int21_status_reg, + NULL, + NULL, + }, + { + "smmu0_int22_en", + SMMU0_SMMU0_SMMU0_INT22_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000250, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int22_en_reg, + NULL, + NULL, + }, + { + "smmu0_int22_mask", + SMMU0_SMMU0_SMMU0_INT22_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000254, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int22_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int22_status", + SMMU0_SMMU0_SMMU0_INT22_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000258, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int22_status_reg, + NULL, + NULL, + }, + { + "smmu0_int23_en", + SMMU0_SMMU0_SMMU0_INT23_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000025c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int23_en_reg, + NULL, + NULL, + }, + { + "smmu0_int23_mask", + SMMU0_SMMU0_SMMU0_INT23_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000260, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int23_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int23_status", + SMMU0_SMMU0_SMMU0_INT23_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000264, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int23_status_reg, + NULL, + NULL, + }, + { + "smmu0_int24_en", + SMMU0_SMMU0_SMMU0_INT24_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000268, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int24_en_reg, + NULL, + NULL, + }, + { + "smmu0_int24_mask", + SMMU0_SMMU0_SMMU0_INT24_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000026c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int24_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int24_status", + SMMU0_SMMU0_SMMU0_INT24_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000270, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int24_status_reg, + NULL, + NULL, + }, + { + "smmu0_int25_en", + SMMU0_SMMU0_SMMU0_INT25_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000274, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int25_en_reg, + NULL, + NULL, + }, + { + "smmu0_int25_mask", + SMMU0_SMMU0_SMMU0_INT25_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000278, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int25_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int25_status", + SMMU0_SMMU0_SMMU0_INT25_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000027c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int25_status_reg, + NULL, + NULL, + }, + { + "smmu0_int26_en", + SMMU0_SMMU0_SMMU0_INT26_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000280, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int26_en_reg, + NULL, + NULL, + }, + { + "smmu0_int26_mask", + SMMU0_SMMU0_SMMU0_INT26_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000284, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int26_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int26_status", + SMMU0_SMMU0_SMMU0_INT26_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000288, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int26_status_reg, + NULL, + NULL, + }, + { + "smmu0_int27_en", + SMMU0_SMMU0_SMMU0_INT27_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000028c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int27_en_reg, + NULL, + NULL, + }, + { + "smmu0_int27_mask", + SMMU0_SMMU0_SMMU0_INT27_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000290, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int27_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int27_status", + SMMU0_SMMU0_SMMU0_INT27_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000294, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int27_status_reg, + NULL, + NULL, + }, + { + "smmu0_int28_en", + SMMU0_SMMU0_SMMU0_INT28_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000298, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int28_en_reg, + NULL, + NULL, + }, + { + "smmu0_int28_mask", + SMMU0_SMMU0_SMMU0_INT28_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000029c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int28_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int28_status", + SMMU0_SMMU0_SMMU0_INT28_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000002a0, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int28_status_reg, + NULL, + NULL, + }, + { + "smmu0_int29_en", + SMMU0_SMMU0_SMMU0_INT29_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000002a4, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int29_en_reg, + NULL, + NULL, + }, + { + "smmu0_int29_mask", + SMMU0_SMMU0_SMMU0_INT29_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000002a8, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int29_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int29_status", + SMMU0_SMMU0_SMMU0_INT29_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000002ac, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int29_status_reg, + NULL, + NULL, + }, + { + "smmu0_int30_en", + SMMU0_SMMU0_SMMU0_INT30_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000002b0, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int30_en_reg, + NULL, + NULL, + }, + { + "smmu0_int30_mask", + SMMU0_SMMU0_SMMU0_INT30_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000002b4, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int30_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int30_status", + SMMU0_SMMU0_SMMU0_INT30_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000002b8, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int30_status_reg, + NULL, + NULL, + }, + { + "smmu0_int31_en", + SMMU0_SMMU0_SMMU0_INT31_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000002bc, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int31_en_reg, + NULL, + NULL, + }, + { + "smmu0_int31_mask", + SMMU0_SMMU0_SMMU0_INT31_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000002c0, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int31_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int31_status", + SMMU0_SMMU0_SMMU0_INT31_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000002c4, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int31_status_reg, + NULL, + NULL, + }, + { + "smmu0_int32_en", + SMMU0_SMMU0_SMMU0_INT32_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000002c8, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int32_en_reg, + NULL, + NULL, + }, + { + "smmu0_int32_mask", + SMMU0_SMMU0_SMMU0_INT32_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000002cc, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int32_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int32_status", + SMMU0_SMMU0_SMMU0_INT32_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000002d0, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int32_status_reg, + NULL, + NULL, + }, + { + "smmu0_int33_en", + SMMU0_SMMU0_SMMU0_INT33_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000002d4, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int33_en_reg, + NULL, + NULL, + }, + { + "smmu0_int33_mask", + SMMU0_SMMU0_SMMU0_INT33_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000002d8, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int33_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int33_status", + SMMU0_SMMU0_SMMU0_INT33_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000002dc, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int33_status_reg, + NULL, + NULL, + }, + { + "smmu0_int34_en", + SMMU0_SMMU0_SMMU0_INT34_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000002e0, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int34_en_reg, + NULL, + NULL, + }, + { + "smmu0_int34_mask", + SMMU0_SMMU0_SMMU0_INT34_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000002e4, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int34_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int34_status", + SMMU0_SMMU0_SMMU0_INT34_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000002e8, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_smmu0_int34_status_reg, + NULL, + NULL, + }, + { + "smmu0_int35_en", + SMMU0_SMMU0_SMMU0_INT35_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000002ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int35_en_reg, + NULL, + NULL, + }, + { + "smmu0_int35_mask", + SMMU0_SMMU0_SMMU0_INT35_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000002f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int35_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int35_status", + SMMU0_SMMU0_SMMU0_INT35_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000002f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int35_status_reg, + NULL, + NULL, + }, + { + "smmu0_int36_en", + SMMU0_SMMU0_SMMU0_INT36_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000002f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int36_en_reg, + NULL, + NULL, + }, + { + "smmu0_int36_mask", + SMMU0_SMMU0_SMMU0_INT36_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000002fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int36_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int36_status", + SMMU0_SMMU0_SMMU0_INT36_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000300, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int36_status_reg, + NULL, + NULL, + }, + { + "smmu0_int37_en", + SMMU0_SMMU0_SMMU0_INT37_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000304, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int37_en_reg, + NULL, + NULL, + }, + { + "smmu0_int37_mask", + SMMU0_SMMU0_SMMU0_INT37_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000308, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int37_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int37_status", + SMMU0_SMMU0_SMMU0_INT37_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000030c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int37_status_reg, + NULL, + NULL, + }, + { + "smmu0_int38_en", + SMMU0_SMMU0_SMMU0_INT38_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000310, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int38_en_reg, + NULL, + NULL, + }, + { + "smmu0_int38_mask", + SMMU0_SMMU0_SMMU0_INT38_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000314, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int38_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int38_status", + SMMU0_SMMU0_SMMU0_INT38_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000318, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int38_status_reg, + NULL, + NULL, + }, + { + "smmu0_int39_en", + SMMU0_SMMU0_SMMU0_INT39_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000036c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int39_en_reg, + NULL, + NULL, + }, + { + "smmu0_int39_mask", + SMMU0_SMMU0_SMMU0_INT39_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000370, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int39_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int39_status", + SMMU0_SMMU0_SMMU0_INT39_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000374, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int39_status_reg, + NULL, + NULL, + }, + { + "smmu0_int40_en", + SMMU0_SMMU0_SMMU0_INT40_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000378, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int40_en_reg, + NULL, + NULL, + }, + { + "smmu0_int40_mask", + SMMU0_SMMU0_SMMU0_INT40_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000037c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int40_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int40_status", + SMMU0_SMMU0_SMMU0_INT40_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000380, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int40_status_reg, + NULL, + NULL, + }, + { + "smmu0_int41_en", + SMMU0_SMMU0_SMMU0_INT41_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000384, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int41_en_reg, + NULL, + NULL, + }, + { + "smmu0_int41_mask", + SMMU0_SMMU0_SMMU0_INT41_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000388, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int41_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int41_status", + SMMU0_SMMU0_SMMU0_INT41_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000038c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int41_status_reg, + NULL, + NULL, + }, + { + "smmu0_int42_en", + SMMU0_SMMU0_SMMU0_INT42_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000390, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int42_en_reg, + NULL, + NULL, + }, + { + "smmu0_int42_mask", + SMMU0_SMMU0_SMMU0_INT42_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000394, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int42_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int42_status", + SMMU0_SMMU0_SMMU0_INT42_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000398, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int42_status_reg, + NULL, + NULL, + }, + { + "smmu0_int43_en", + SMMU0_SMMU0_SMMU0_INT43_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000039c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int43_en_reg, + NULL, + NULL, + }, + { + "smmu0_int43_mask", + SMMU0_SMMU0_SMMU0_INT43_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000003a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int43_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int43_status", + SMMU0_SMMU0_SMMU0_INT43_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000003a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int43_status_reg, + NULL, + NULL, + }, + { + "smmu0_int44_en", + SMMU0_SMMU0_SMMU0_INT44_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000003a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int44_en_reg, + NULL, + NULL, + }, + { + "smmu0_int44_mask", + SMMU0_SMMU0_SMMU0_INT44_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000003ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int44_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int44_status", + SMMU0_SMMU0_SMMU0_INT44_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000003b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int44_status_reg, + NULL, + NULL, + }, + { + "smmu0_int45_en", + SMMU0_SMMU0_SMMU0_INT45_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000003b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int45_en_reg, + NULL, + NULL, + }, + { + "smmu0_int45_mask", + SMMU0_SMMU0_SMMU0_INT45_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000003b8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int45_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int45_status", + SMMU0_SMMU0_SMMU0_INT45_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000003bc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int45_status_reg, + NULL, + NULL, + }, + { + "smmu0_int46_en", + SMMU0_SMMU0_SMMU0_INT46_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000003c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int46_en_reg, + NULL, + NULL, + }, + { + "smmu0_int46_mask", + SMMU0_SMMU0_SMMU0_INT46_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000003c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int46_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int46_status", + SMMU0_SMMU0_SMMU0_INT46_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000003c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int46_status_reg, + NULL, + NULL, + }, + { + "smmu0_int47_en", + SMMU0_SMMU0_SMMU0_INT47_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000003cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int47_en_reg, + NULL, + NULL, + }, + { + "smmu0_int47_mask", + SMMU0_SMMU0_SMMU0_INT47_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000003d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int47_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int47_status", + SMMU0_SMMU0_SMMU0_INT47_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000003d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int47_status_reg, + NULL, + NULL, + }, + { + "smmu0_int48_en", + SMMU0_SMMU0_SMMU0_INT48_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000003d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int48_en_reg, + NULL, + NULL, + }, + { + "smmu0_int48_mask", + SMMU0_SMMU0_SMMU0_INT48_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000003dc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int48_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int48_status", + SMMU0_SMMU0_SMMU0_INT48_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000003e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int48_status_reg, + NULL, + NULL, + }, + { + "smmu0_int49_en", + SMMU0_SMMU0_SMMU0_INT49_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000003e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int49_en_reg, + NULL, + NULL, + }, + { + "smmu0_int49_mask", + SMMU0_SMMU0_SMMU0_INT49_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000003e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int49_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int49_status", + SMMU0_SMMU0_SMMU0_INT49_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000003ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int49_status_reg, + NULL, + NULL, + }, + { + "smmu0_int50_en", + SMMU0_SMMU0_SMMU0_INT50_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000003f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int50_en_reg, + NULL, + NULL, + }, + { + "smmu0_int50_mask", + SMMU0_SMMU0_SMMU0_INT50_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000003f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int50_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int50_status", + SMMU0_SMMU0_SMMU0_INT50_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000003f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int50_status_reg, + NULL, + NULL, + }, + { + "smmu0_int51_en", + SMMU0_SMMU0_SMMU0_INT51_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000003fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int51_en_reg, + NULL, + NULL, + }, + { + "smmu0_int51_mask", + SMMU0_SMMU0_SMMU0_INT51_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000400, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int51_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int51_status", + SMMU0_SMMU0_SMMU0_INT51_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000404, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int51_status_reg, + NULL, + NULL, + }, + { + "smmu0_int52_en", + SMMU0_SMMU0_SMMU0_INT52_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000408, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int52_en_reg, + NULL, + NULL, + }, + { + "smmu0_int52_mask", + SMMU0_SMMU0_SMMU0_INT52_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000040c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int52_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int52_status", + SMMU0_SMMU0_SMMU0_INT52_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000410, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_int52_status_reg, + NULL, + NULL, + }, + { + "smmu0_int53_en", + SMMU0_SMMU0_SMMU0_INT53_ENr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000414, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_smmu0_smmu0_smmu0_int53_en_reg, + NULL, + NULL, + }, + { + "smmu0_int53_mask", + SMMU0_SMMU0_SMMU0_INT53_MASKr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000418, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_smmu0_smmu0_smmu0_int53_mask_reg, + NULL, + NULL, + }, + { + "smmu0_int53_status", + SMMU0_SMMU0_SMMU0_INT53_STATUSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000041c, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_smmu0_smmu0_smmu0_int53_status_reg, + NULL, + NULL, + }, + { + "ctrl0_arbiter_ecc_bypass", + SMMU0_SMMU0_CTRL0_ARBITER_ECC_BYPASSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000031c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl0_arbiter_ecc_bypass_reg, + NULL, + NULL, + }, + { + "ctrl2_arbiter_ecc_bypass", + SMMU0_SMMU0_CTRL2_ARBITER_ECC_BYPASSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000320, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl2_arbiter_ecc_bypass_reg, + NULL, + NULL, + }, + { + "ctrl4_arbiter_ecc_bypass", + SMMU0_SMMU0_CTRL4_ARBITER_ECC_BYPASSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000324, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl4_arbiter_ecc_bypass_reg, + NULL, + NULL, + }, + { + "ctrl6_arbiter_ecc_bypass", + SMMU0_SMMU0_CTRL6_ARBITER_ECC_BYPASSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000328, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl6_arbiter_ecc_bypass_reg, + NULL, + NULL, + }, + { + "ctrl8_arbiter_ecc_bypass", + SMMU0_SMMU0_CTRL8_ARBITER_ECC_BYPASSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000032c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl8_arbiter_ecc_bypass_reg, + NULL, + NULL, + }, + { + "ctrl10_arbiter_ecc_bypass", + SMMU0_SMMU0_CTRL10_ARBITER_ECC_BYPASSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000330, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl10_arbiter_ecc_bypass_reg, + NULL, + NULL, + }, + { + "ctrl12_arbiter_ecc_bypass", + SMMU0_SMMU0_CTRL12_ARBITER_ECC_BYPASSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000334, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl12_arbiter_ecc_bypass_reg, + NULL, + NULL, + }, + { + "ctrl14_arbiter_ecc_bypass", + SMMU0_SMMU0_CTRL14_ARBITER_ECC_BYPASSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000338, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl14_arbiter_ecc_bypass_reg, + NULL, + NULL, + }, + { + "ctrl16_arbiter_ecc_bypass", + SMMU0_SMMU0_CTRL16_ARBITER_ECC_BYPASSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000033c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl16_arbiter_ecc_bypass_reg, + NULL, + NULL, + }, + { + "ctrl18_arbiter_ecc_bypass", + SMMU0_SMMU0_CTRL18_ARBITER_ECC_BYPASSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000340, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl18_arbiter_ecc_bypass_reg, + NULL, + NULL, + }, + { + "ctrl20_arbiter_ecc_bypass", + SMMU0_SMMU0_CTRL20_ARBITER_ECC_BYPASSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000344, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl20_arbiter_ecc_bypass_reg, + NULL, + NULL, + }, + { + "ctrl22_arbiter_ecc_bypass", + SMMU0_SMMU0_CTRL22_ARBITER_ECC_BYPASSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000348, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl22_arbiter_ecc_bypass_reg, + NULL, + NULL, + }, + { + "ctrl24_arbiter_ecc_bypass", + SMMU0_SMMU0_CTRL24_ARBITER_ECC_BYPASSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000034c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl24_arbiter_ecc_bypass_reg, + NULL, + NULL, + }, + { + "ctrl26_arbiter_ecc_bypass", + SMMU0_SMMU0_CTRL26_ARBITER_ECC_BYPASSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000350, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl26_arbiter_ecc_bypass_reg, + NULL, + NULL, + }, + { + "ctrl28_arbiter_ecc_bypass", + SMMU0_SMMU0_CTRL28_ARBITER_ECC_BYPASSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000354, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl28_arbiter_ecc_bypass_reg, + NULL, + NULL, + }, + { + "ctrl30_arbiter_ecc_bypass", + SMMU0_SMMU0_CTRL30_ARBITER_ECC_BYPASSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000358, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl30_arbiter_ecc_bypass_reg, + NULL, + NULL, + }, + { + "ctrl_req_ecc_bypass", + SMMU0_SMMU0_CTRL_REQ_ECC_BYPASSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000035c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_ctrl_req_ecc_bypass_reg, + NULL, + NULL, + }, + { + "ctrl_info_ecc_bypass", + SMMU0_SMMU0_CTRL_INFO_ECC_BYPASSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000360, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_ctrl_info_ecc_bypass_reg, + NULL, + NULL, + }, + { + "smmu0_rschd_ecc_bypass", + SMMU0_SMMU0_SMMU0_RSCHD_ECC_BYPASSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000368, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_rschd_ecc_bypass_reg, + NULL, + NULL, + }, + { + "smmu0_wr_ecc_bypass", + SMMU0_SMMU0_SMMU0_WR_ECC_BYPASSr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000364, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_smmu0_smmu0_smmu0_wr_ecc_bypass_reg, + NULL, + NULL, + }, + { + "ctrl0_arbiter_ecc_err", + SMMU0_SMMU0_CTRL0_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000f50, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl0_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl1_arbiter_ecc_err", + SMMU0_SMMU0_CTRL1_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000f54, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl1_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl2_arbiter_ecc_err", + SMMU0_SMMU0_CTRL2_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000f58, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl2_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl3_arbiter_ecc_err", + SMMU0_SMMU0_CTRL3_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000f5c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl3_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl4_arbiter_ecc_err", + SMMU0_SMMU0_CTRL4_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000f60, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl4_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl5_arbiter_ecc_err", + SMMU0_SMMU0_CTRL5_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000f64, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl5_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl6_arbiter_ecc_err", + SMMU0_SMMU0_CTRL6_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000f68, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl6_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl7_arbiter_ecc_err", + SMMU0_SMMU0_CTRL7_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000f6c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl7_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl8_arbiter_ecc_err", + SMMU0_SMMU0_CTRL8_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000f70, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl8_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl9_arbiter_ecc_err", + SMMU0_SMMU0_CTRL9_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000f74, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl9_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl10_arbiter_ecc_err", + SMMU0_SMMU0_CTRL10_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000f78, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl10_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl11_arbiter_ecc_err", + SMMU0_SMMU0_CTRL11_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000f7c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl11_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl12_arbiter_ecc_err", + SMMU0_SMMU0_CTRL12_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000f80, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl12_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl13_arbiter_ecc_err", + SMMU0_SMMU0_CTRL13_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000f84, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl13_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl14_arbiter_ecc_err", + SMMU0_SMMU0_CTRL14_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000f88, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl14_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl15_arbiter_ecc_err", + SMMU0_SMMU0_CTRL15_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000f8c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl15_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl16_arbiter_ecc_err", + SMMU0_SMMU0_CTRL16_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000f90, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl16_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl17_arbiter_ecc_err", + SMMU0_SMMU0_CTRL17_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000f94, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl17_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl18_arbiter_ecc_err", + SMMU0_SMMU0_CTRL18_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000f98, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl18_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl19_arbiter_ecc_err", + SMMU0_SMMU0_CTRL19_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000f9c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl19_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl20_arbiter_ecc_err", + SMMU0_SMMU0_CTRL20_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000fa0, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl20_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl21_arbiter_ecc_err", + SMMU0_SMMU0_CTRL21_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000fa4, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl21_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl22_arbiter_ecc_err", + SMMU0_SMMU0_CTRL22_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000fa8, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl22_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl23_arbiter_ecc_err", + SMMU0_SMMU0_CTRL23_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000fac, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl23_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl24_arbiter_ecc_err", + SMMU0_SMMU0_CTRL24_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000fb0, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl24_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl25_arbiter_ecc_err", + SMMU0_SMMU0_CTRL25_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000fb4, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl25_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl26_arbiter_ecc_err", + SMMU0_SMMU0_CTRL26_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000fbc, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl26_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl27_arbiter_ecc_err", + SMMU0_SMMU0_CTRL27_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000fc0, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl27_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl28_arbiter_ecc_err", + SMMU0_SMMU0_CTRL28_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000fc4, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl28_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl29_arbiter_ecc_err", + SMMU0_SMMU0_CTRL29_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000fc8, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl29_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl30_arbiter_ecc_err", + SMMU0_SMMU0_CTRL30_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000fcc, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl30_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl31_arbiter_ecc_err", + SMMU0_SMMU0_CTRL31_ARBITER_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000fd0, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_smmu0_smmu0_ctrl31_arbiter_ecc_err_reg, + NULL, + NULL, + }, + { + "ctrl_req_ecc_single_err", + SMMU0_SMMU0_CTRL_REQ_ECC_SINGLE_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000fd4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_ctrl_req_ecc_single_err_reg, + NULL, + NULL, + }, + { + "ctrl_req_ecc_double_err", + SMMU0_SMMU0_CTRL_REQ_ECC_DOUBLE_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000fd8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_ctrl_req_ecc_double_err_reg, + NULL, + NULL, + }, + { + "ctrl_info_ecc_single_err", + SMMU0_SMMU0_CTRL_INFO_ECC_SINGLE_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000fdc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_ctrl_info_ecc_single_err_reg, + NULL, + NULL, + }, + { + "ctrl_info_ecc_double_err", + SMMU0_SMMU0_CTRL_INFO_ECC_DOUBLE_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000fe0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_ctrl_info_ecc_double_err_reg, + NULL, + NULL, + }, + { + "smmu0_wr_ecc_err", + SMMU0_SMMU0_SMMU0_WR_ECC_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000fe4, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_smmu0_smmu0_smmu0_wr_ecc_err_reg, + NULL, + NULL, + }, + { + "smmu0_rschd_ecc_single_err", + SMMU0_SMMU0_SMMU0_RSCHD_ECC_SINGLE_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000fe8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_rschd_ecc_single_err_reg, + NULL, + NULL, + }, + { + "smmu0_rschd_ecc_double_err", + SMMU0_SMMU0_SMMU0_RSCHD_ECC_DOUBLE_ERRr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000fec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_rschd_ecc_double_err_reg, + NULL, + NULL, + }, + { + "ord_fifo_empty", + SMMU0_SMMU0_ORD_FIFO_EMPTYr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000440, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_ord_fifo_empty_reg, + NULL, + NULL, + }, + { + "wr_arb_fifo_empty", + SMMU0_SMMU0_WR_ARB_FIFO_EMPTYr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000444, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_wr_arb_fifo_empty_reg, + NULL, + NULL, + }, + { + "ctrl_fifo_empty0", + SMMU0_SMMU0_CTRL_FIFO_EMPTY0r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000448, + (32 / 8), + 0, + 0, + 0, + 0, + 6, + g_smmu0_smmu0_ctrl_fifo_empty0_reg, + NULL, + NULL, + }, + { + "ctrl_fifo_empty1", + SMMU0_SMMU0_CTRL_FIFO_EMPTY1r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000044c, + (32 / 8), + 0, + 0, + 0, + 0, + 6, + g_smmu0_smmu0_ctrl_fifo_empty1_reg, + NULL, + NULL, + }, + { + "ctrl_fifo_empty2", + SMMU0_SMMU0_CTRL_FIFO_EMPTY2r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000450, + (32 / 8), + 0, + 0, + 0, + 0, + 6, + g_smmu0_smmu0_ctrl_fifo_empty2_reg, + NULL, + NULL, + }, + { + "ctrl_fifo_empty3", + SMMU0_SMMU0_CTRL_FIFO_EMPTY3r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000454, + (32 / 8), + 0, + 0, + 0, + 0, + 6, + g_smmu0_smmu0_ctrl_fifo_empty3_reg, + NULL, + NULL, + }, + { + "ctrl_fifo_empty4", + SMMU0_SMMU0_CTRL_FIFO_EMPTY4r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000560, + (32 / 8), + 0, + 0, + 0, + 0, + 6, + g_smmu0_smmu0_ctrl_fifo_empty4_reg, + NULL, + NULL, + }, + { + "ctrl_fifo_empty5", + SMMU0_SMMU0_CTRL_FIFO_EMPTY5r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000564, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_smmu0_smmu0_ctrl_fifo_empty5_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty0", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY0r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000458, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty0_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty1", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY1r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000045c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty1_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty2", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY2r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000460, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty2_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty3", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY3r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000464, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty3_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty4", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY4r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000468, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty4_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty5", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY5r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000046c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty5_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty6", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY6r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000470, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty6_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty7", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY7r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000474, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty7_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty8", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY8r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000478, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty8_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty9", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY9r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000047c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty9_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty10", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY10r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000480, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty10_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty11", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY11r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000484, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty11_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty12", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY12r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000488, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty12_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty13", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY13r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000048c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty13_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty14", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY14r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000490, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty14_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty15", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY15r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000494, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty15_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty16", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY16r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000498, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty16_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty17", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY17r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000049c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty17_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty18", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY18r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000004a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty18_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty19", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY19r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000004a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty19_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty20", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY20r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000004a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty20_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty21", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY21r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000004ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty21_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty22", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY22r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000004b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty22_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty23", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY23r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000004b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty23_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty24", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY24r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000004b8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty24_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty25", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY25r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000004bc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty25_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty26", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY26r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000004c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty26_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty27", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY27r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000004c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty27_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty28", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY28r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000004c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty28_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty29", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY29r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000004cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty29_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty30", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY30r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000004d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty30_reg, + NULL, + NULL, + }, + { + "kschd_fifo_empty31", + SMMU0_SMMU0_KSCHD_FIFO_EMPTY31r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000004d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_kschd_fifo_empty31_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty0", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY0r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000004d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty0_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty1", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY1r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000004dc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty1_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty2", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY2r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000004e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty2_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty3", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY3r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000004e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty3_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty4", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY4r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000004e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty4_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty5", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY5r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000004ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty5_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty6", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY6r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000004f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty6_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty7", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY7r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000004f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty7_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty8", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY8r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000004f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty8_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty9", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY9r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000004fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty9_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty10", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY10r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000500, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty10_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty11", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY11r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000504, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty11_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty12", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY12r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000508, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty12_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty13", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY13r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000050c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty13_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty14", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY14r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000510, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty14_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty15", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY15r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000514, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty15_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty16", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY16r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000518, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty16_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty17", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY17r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000051c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty17_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty18", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY18r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000520, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty18_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty19", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY19r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000524, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty19_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty20", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY20r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000528, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty20_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty21", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY21r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000052c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty21_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty22", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY22r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000530, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty22_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty23", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY23r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000534, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty23_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty24", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY24r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000538, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty24_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty25", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY25r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000053c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty25_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty26", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY26r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000540, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty26_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty27", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY27r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000544, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty27_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty28", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY28r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000548, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty28_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty29", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY29r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000054c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty29_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty30", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY30r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000550, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty30_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty31", + SMMU0_SMMU0_RSCHD_FIFO_EMPTY31r, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000554, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_rschd_fifo_empty31_reg, + NULL, + NULL, + }, + { + "ept_flag", + SMMU0_SMMU0_EPT_FLAGr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000558, + (32 / 8), + 0, + 0, + 0, + 0, + 9, + g_smmu0_smmu0_ept_flag_reg, + NULL, + NULL, + }, + { + "ppu_soft_rst", + SMMU0_SMMU0_PPU_SOFT_RSTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000055c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_ppu_soft_rst_reg, + NULL, + NULL, + }, + { + "smmu0_as_mac_age_fc_cnt", + SMMU0_SMMU0_SMMU0_AS_MAC_AGE_FC_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000800, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_as_mac_age_fc_cnt_reg, + NULL, + NULL, + }, + { + "smmu0_marc_se_parser_fc_cnt", + SMMU0_SMMU0_SMMU0_MARC_SE_PARSER_FC_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000804, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_marc_se_parser_fc_cnt_reg, + NULL, + NULL, + }, + { + "wr_arb_cpu_fc_cnt", + SMMU0_SMMU0_WR_ARB_CPU_FC_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000808, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_wr_arb_cpu_fc_cnt_reg, + NULL, + NULL, + }, + { + "smmu0_lpm_as_fc_cnt", + SMMU0_SMMU0_SMMU0_LPM_AS_FC_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000080c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_lpm_as_fc_cnt_reg, + NULL, + NULL, + }, + { + "lpm_as_smmu0_fc_cnt", + SMMU0_SMMU0_LPM_AS_SMMU0_FC_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000810, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_lpm_as_smmu0_fc_cnt_reg, + NULL, + NULL, + }, + { + "smmu0_etcam1_0_as_fc_cnt", + SMMU0_SMMU0_SMMU0_ETCAM1_0_AS_FC_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000814, + (32 / 8), + 0, + 1 + 1, + 0, + 4, + 1, + g_smmu0_smmu0_smmu0_etcam1_0_as_fc_cnt_reg, + NULL, + NULL, + }, + { + "as_etcam1_0_smmu0_fc_cnt", + SMMU0_SMMU0_AS_ETCAM1_0_SMMU0_FC_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000081c, + (32 / 8), + 0, + 1 + 1, + 0, + 4, + 1, + g_smmu0_smmu0_as_etcam1_0_smmu0_fc_cnt_reg, + NULL, + NULL, + }, + { + "smmu0_ppu_mcast_fc_cnt", + SMMU0_SMMU0_SMMU0_PPU_MCAST_FC_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000824, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_ppu_mcast_fc_cnt_reg, + NULL, + NULL, + }, + { + "ppu_smmu0_mcast_fc_cnt", + SMMU0_SMMU0_PPU_SMMU0_MCAST_FC_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000828, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_ppu_smmu0_mcast_fc_cnt_reg, + NULL, + NULL, + }, + { + "odma_smmu0_tdm_fc_rsp_fc_cnt", + SMMU0_SMMU0_ODMA_SMMU0_TDM_FC_RSP_FC_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000082c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_odma_smmu0_tdm_fc_rsp_fc_cnt_reg, + NULL, + NULL, + }, + { + "smmu0_odma_tdm_fc_key_fc_cnt", + SMMU0_SMMU0_SMMU0_ODMA_TDM_FC_KEY_FC_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000830, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_odma_tdm_fc_key_fc_cnt_reg, + NULL, + NULL, + }, + { + "smmu0_odma_fc_cnt", + SMMU0_SMMU0_SMMU0_ODMA_FC_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000834, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_odma_fc_cnt_reg, + NULL, + NULL, + }, + { + "smmu0_cfg_tab_rd_fc_cnt", + SMMU0_SMMU0_SMMU0_CFG_TAB_RD_FC_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000838, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_cfg_tab_rd_fc_cnt_reg, + NULL, + NULL, + }, + { + "smmu0_stat_fc15_0_cnt", + SMMU0_SMMU0_SMMU0_STAT_FC15_0_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000083c, + (32 / 8), + 0, + 15 + 1, + 0, + 4, + 1, + g_smmu0_smmu0_smmu0_stat_fc15_0_cnt_reg, + NULL, + NULL, + }, + { + "stat_smmu0_fc15_0_cnt", + SMMU0_SMMU0_STAT_SMMU0_FC15_0_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000087c, + (32 / 8), + 0, + 15 + 1, + 0, + 4, + 1, + g_smmu0_smmu0_stat_smmu0_fc15_0_cnt_reg, + NULL, + NULL, + }, + { + "smmu0_ppu_mex5_0_fc_cnt", + SMMU0_SMMU0_SMMU0_PPU_MEX5_0_FC_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000008bc, + (32 / 8), + 0, + 5 + 1, + 0, + 4, + 1, + g_smmu0_smmu0_smmu0_ppu_mex5_0_fc_cnt_reg, + NULL, + NULL, + }, + { + "ppu_smmu0_mex5_0_fc_cnt", + SMMU0_SMMU0_PPU_SMMU0_MEX5_0_FC_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000008d4, + (32 / 8), + 0, + 5 + 1, + 0, + 4, + 1, + g_smmu0_smmu0_ppu_smmu0_mex5_0_fc_cnt_reg, + NULL, + NULL, + }, + { + "as_smmu0_mac_age_req_cnt", + SMMU0_SMMU0_AS_SMMU0_MAC_AGE_REQ_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000008ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_as_smmu0_mac_age_req_cnt_reg, + NULL, + NULL, + }, + { + "se_parser_smmu0_marc_key_cnt", + SMMU0_SMMU0_SE_PARSER_SMMU0_MARC_KEY_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000008f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_se_parser_smmu0_marc_key_cnt_reg, + NULL, + NULL, + }, + { + "cpu_ind_rdat_cnt", + SMMU0_SMMU0_CPU_IND_RDAT_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000008f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_cpu_ind_rdat_cnt_reg, + NULL, + NULL, + }, + { + "cpu_ind_rd_req_cnt", + SMMU0_SMMU0_CPU_IND_RD_REQ_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000008f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_cpu_ind_rd_req_cnt_reg, + NULL, + NULL, + }, + { + "cpu_ind_wr_req_cnt", + SMMU0_SMMU0_CPU_IND_WR_REQ_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000008fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_cpu_ind_wr_req_cnt_reg, + NULL, + NULL, + }, + { + "smmu0_plcr_rsp0_cnt", + SMMU0_SMMU0_SMMU0_PLCR_RSP0_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000900, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_plcr_rsp0_cnt_reg, + NULL, + NULL, + }, + { + "plcr_smmu0_req0_cnt", + SMMU0_SMMU0_PLCR_SMMU0_REQ0_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000904, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_plcr_smmu0_req0_cnt_reg, + NULL, + NULL, + }, + { + "smmu0_lpm_as_rsp_cnt", + SMMU0_SMMU0_SMMU0_LPM_AS_RSP_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000908, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_lpm_as_rsp_cnt_reg, + NULL, + NULL, + }, + { + "lpm_as_smmu0_req_cnt", + SMMU0_SMMU0_LPM_AS_SMMU0_REQ_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000090c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_lpm_as_smmu0_req_cnt_reg, + NULL, + NULL, + }, + { + "smmu0_etcam1_0_as_rsp_cnt", + SMMU0_SMMU0_SMMU0_ETCAM1_0_AS_RSP_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000910, + (32 / 8), + 0, + 1 + 1, + 0, + 4, + 1, + g_smmu0_smmu0_smmu0_etcam1_0_as_rsp_cnt_reg, + NULL, + NULL, + }, + { + "etcam1_0_as_smmu0_req_cnt", + SMMU0_SMMU0_ETCAM1_0_AS_SMMU0_REQ_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000918, + (32 / 8), + 0, + 1 + 1, + 0, + 4, + 1, + g_smmu0_smmu0_etcam1_0_as_smmu0_req_cnt_reg, + NULL, + NULL, + }, + { + "smmu0_ppu_mcast_rsp_cnt", + SMMU0_SMMU0_SMMU0_PPU_MCAST_RSP_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000920, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_ppu_mcast_rsp_cnt_reg, + NULL, + NULL, + }, + { + "ppu_smmu0_mcast_key_cnt", + SMMU0_SMMU0_PPU_SMMU0_MCAST_KEY_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000924, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_ppu_smmu0_mcast_key_cnt_reg, + NULL, + NULL, + }, + { + "smmu0_odma_tdm_mc_rsp_cnt", + SMMU0_SMMU0_SMMU0_ODMA_TDM_MC_RSP_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000928, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_odma_tdm_mc_rsp_cnt_reg, + NULL, + NULL, + }, + { + "odma_smmu0_tdm_mc_key_cnt", + SMMU0_SMMU0_ODMA_SMMU0_TDM_MC_KEY_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000092c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_odma_smmu0_tdm_mc_key_cnt_reg, + NULL, + NULL, + }, + { + "smmu0_odma_rsp_cnt", + SMMU0_SMMU0_SMMU0_ODMA_RSP_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000930, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_odma_rsp_cnt_reg, + NULL, + NULL, + }, + { + "odma_smmu0_cmd_cnt", + SMMU0_SMMU0_ODMA_SMMU0_CMD_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000934, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_odma_smmu0_cmd_cnt_reg, + NULL, + NULL, + }, + { + "smmu0_cfg_tab_rdat_cnt", + SMMU0_SMMU0_SMMU0_CFG_TAB_RDAT_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000938, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_smmu0_cfg_tab_rdat_cnt_reg, + NULL, + NULL, + }, + { + "cfg_smmu0_tab_rd_cnt", + SMMU0_SMMU0_CFG_SMMU0_TAB_RD_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x0000093c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_cfg_smmu0_tab_rd_cnt_reg, + NULL, + NULL, + }, + { + "smmu0_stat_rsp15_0_cnt", + SMMU0_SMMU0_SMMU0_STAT_RSP15_0_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000940, + (32 / 8), + 0, + 15 + 1, + 0, + 4, + 1, + g_smmu0_smmu0_smmu0_stat_rsp15_0_cnt_reg, + NULL, + NULL, + }, + { + "stat_smmu0_req15_0_cnt", + SMMU0_SMMU0_STAT_SMMU0_REQ15_0_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000980, + (32 / 8), + 0, + 15 + 1, + 0, + 4, + 1, + g_smmu0_smmu0_stat_smmu0_req15_0_cnt_reg, + NULL, + NULL, + }, + { + "smmu0_ppu_mex5_0_rsp_cnt", + SMMU0_SMMU0_SMMU0_PPU_MEX5_0_RSP_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000009c0, + (32 / 8), + 0, + 5 + 1, + 0, + 4, + 1, + g_smmu0_smmu0_smmu0_ppu_mex5_0_rsp_cnt_reg, + NULL, + NULL, + }, + { + "ppu_smmu0_mex5_0_key_cnt", + SMMU0_SMMU0_PPU_SMMU0_MEX5_0_KEY_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000009d8, + (32 / 8), + 0, + 5 + 1, + 0, + 4, + 1, + g_smmu0_smmu0_ppu_smmu0_mex5_0_key_cnt_reg, + NULL, + NULL, + }, + { + "ftm_stat_smmu0_req0_cnt", + SMMU0_SMMU0_FTM_STAT_SMMU0_REQ0_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000009f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_ftm_stat_smmu0_req0_cnt_reg, + NULL, + NULL, + }, + { + "ftm_stat_smmu0_req1_cnt", + SMMU0_SMMU0_FTM_STAT_SMMU0_REQ1_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000009f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_ftm_stat_smmu0_req1_cnt_reg, + NULL, + NULL, + }, + { + "etm_stat_smmu0_req0_cnt", + SMMU0_SMMU0_ETM_STAT_SMMU0_REQ0_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000009f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_etm_stat_smmu0_req0_cnt_reg, + NULL, + NULL, + }, + { + "etm_stat_smmu0_req1_cnt", + SMMU0_SMMU0_ETM_STAT_SMMU0_REQ1_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x000009fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_smmu0_smmu0_etm_stat_smmu0_req1_cnt_reg, + NULL, + NULL, + }, + { + "req_eram0_31_rd_cnt", + SMMU0_SMMU0_REQ_ERAM0_31_RD_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000a00, + (32 / 8), + 0, + 31 + 1, + 0, + 8, + 1, + g_smmu0_smmu0_req_eram0_31_rd_cnt_reg, + NULL, + NULL, + }, + { + "req_eram0_31_wr_cnt", + SMMU0_SMMU0_REQ_ERAM0_31_WR_CNTr, + SMMU0, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU0_BASE_ADDR + MODULE_SE_SMMU0_BASE_ADDR + 0x00000a04, + (32 / 8), + 0, + 31 + 1, + 0, + 8, + 1, + g_smmu0_smmu0_req_eram0_31_wr_cnt_reg, + NULL, + NULL, + }, + { + "ddr_wdat1", + SE_SMMU1_DDR_WDAT1r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000004, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_ddr_wdat1_reg, + NULL, + NULL, + }, + { + "ddr_wdat2", + SE_SMMU1_DDR_WDAT2r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_ddr_wdat2_reg, + NULL, + NULL, + }, + { + "ddr_wdat3", + SE_SMMU1_DDR_WDAT3r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000000c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_ddr_wdat3_reg, + NULL, + NULL, + }, + { + "ddr_wdat4", + SE_SMMU1_DDR_WDAT4r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000010, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_ddr_wdat4_reg, + NULL, + NULL, + }, + { + "ddr_wdat5", + SE_SMMU1_DDR_WDAT5r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000014, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_ddr_wdat5_reg, + NULL, + NULL, + }, + { + "ddr_wdat6", + SE_SMMU1_DDR_WDAT6r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000018, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_ddr_wdat6_reg, + NULL, + NULL, + }, + { + "ddr_wdat7", + SE_SMMU1_DDR_WDAT7r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000001c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_ddr_wdat7_reg, + NULL, + NULL, + }, + { + "ddr_wdat8", + SE_SMMU1_DDR_WDAT8r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000020, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_ddr_wdat8_reg, + NULL, + NULL, + }, + { + "ddr_wdat9", + SE_SMMU1_DDR_WDAT9r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000024, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_ddr_wdat9_reg, + NULL, + NULL, + }, + { + "ddr_wdat10", + SE_SMMU1_DDR_WDAT10r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000028, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_ddr_wdat10_reg, + NULL, + NULL, + }, + { + "ddr_wdat11", + SE_SMMU1_DDR_WDAT11r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000002c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_ddr_wdat11_reg, + NULL, + NULL, + }, + { + "ddr_wdat12", + SE_SMMU1_DDR_WDAT12r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000030, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_ddr_wdat12_reg, + NULL, + NULL, + }, + { + "ddr_wdat13", + SE_SMMU1_DDR_WDAT13r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000034, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_ddr_wdat13_reg, + NULL, + NULL, + }, + { + "ddr_wdat14", + SE_SMMU1_DDR_WDAT14r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000038, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_ddr_wdat14_reg, + NULL, + NULL, + }, + { + "ddr_wdat15", + SE_SMMU1_DDR_WDAT15r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000003c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_ddr_wdat15_reg, + NULL, + NULL, + }, + { + "cnt_stat_cache_en", + SE_SMMU1_CNT_STAT_CACHE_ENr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000040, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cnt_stat_cache_en_reg, + NULL, + NULL, + }, + { + "cnt_stat_cache_clr", + SE_SMMU1_CNT_STAT_CACHE_CLRr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000044, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cnt_stat_cache_clr_reg, + NULL, + NULL, + }, + { + "cnt_stat_cache_req_63_32", + SE_SMMU1_CNT_STAT_CACHE_REQ_63_32r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000048, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cnt_stat_cache_req_63_32_reg, + NULL, + NULL, + }, + { + "cnt_stat_cache_req_31_0", + SE_SMMU1_CNT_STAT_CACHE_REQ_31_0r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000004c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cnt_stat_cache_req_31_0_reg, + NULL, + NULL, + }, + { + "cnt_stat_cache_hit_63_32", + SE_SMMU1_CNT_STAT_CACHE_HIT_63_32r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000050, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cnt_stat_cache_hit_63_32_reg, + NULL, + NULL, + }, + { + "cnt_stat_cache_hit_31_0", + SE_SMMU1_CNT_STAT_CACHE_HIT_31_0r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000054, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cnt_stat_cache_hit_31_0_reg, + NULL, + NULL, + }, + { + "ddr_cmd0", + SE_SMMU1_DDR_CMD0r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000060, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_se_smmu1_ddr_cmd0_reg, + NULL, + NULL, + }, + { + "info_addr", + SE_SMMU1_INFO_ADDRr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000064, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_info_addr_reg, + NULL, + NULL, + }, + { + "ddr_cmd1", + SE_SMMU1_DDR_CMD1r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000068, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se_smmu1_ddr_cmd1_reg, + NULL, + NULL, + }, + { + "clr_start_addr", + SE_SMMU1_CLR_START_ADDRr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000006c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_clr_start_addr_reg, + NULL, + NULL, + }, + { + "clr_end_addr", + SE_SMMU1_CLR_END_ADDRr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000070, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_clr_end_addr_reg, + NULL, + NULL, + }, + { + "clr_tbl_en", + SE_SMMU1_CLR_TBL_ENr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000074, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se_smmu1_clr_tbl_en_reg, + NULL, + NULL, + }, + { + "debug_cnt_mode", + SE_SMMU1_DEBUG_CNT_MODEr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000084, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se_smmu1_debug_cnt_mode_reg, + NULL, + NULL, + }, + { + "init_done", + SE_SMMU1_INIT_DONEr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000088, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_se_smmu1_init_done_reg, + NULL, + NULL, + }, + { + "cpu_rsp_rd_done", + SE_SMMU1_CPU_RSP_RD_DONEr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000008c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cpu_rsp_rd_done_reg, + NULL, + NULL, + }, + { + "ksch_oam_sp_en", + SE_SMMU1_KSCH_OAM_SP_ENr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000090, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_ksch_oam_sp_en_reg, + NULL, + NULL, + }, + { + "cfg_cache_en", + SE_SMMU1_CFG_CACHE_ENr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000094, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cfg_cache_en_reg, + NULL, + NULL, + }, + { + "cache_age_en", + SE_SMMU1_CACHE_AGE_ENr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000000b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cache_age_en_reg, + NULL, + NULL, + }, + { + "cpu_rdat0", + SE_SMMU1_CPU_RDAT0r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000000c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cpu_rdat0_reg, + NULL, + NULL, + }, + { + "cpu_rdat1", + SE_SMMU1_CPU_RDAT1r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000000c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cpu_rdat1_reg, + NULL, + NULL, + }, + { + "cpu_rdat2", + SE_SMMU1_CPU_RDAT2r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000000c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cpu_rdat2_reg, + NULL, + NULL, + }, + { + "cpu_rdat3", + SE_SMMU1_CPU_RDAT3r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000000cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cpu_rdat3_reg, + NULL, + NULL, + }, + { + "cpu_rdat4", + SE_SMMU1_CPU_RDAT4r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000000d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cpu_rdat4_reg, + NULL, + NULL, + }, + { + "cpu_rdat5", + SE_SMMU1_CPU_RDAT5r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000000d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cpu_rdat5_reg, + NULL, + NULL, + }, + { + "cpu_rdat6", + SE_SMMU1_CPU_RDAT6r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000000d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cpu_rdat6_reg, + NULL, + NULL, + }, + { + "cpu_rdat7", + SE_SMMU1_CPU_RDAT7r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000000dc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cpu_rdat7_reg, + NULL, + NULL, + }, + { + "cpu_rdat8", + SE_SMMU1_CPU_RDAT8r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000000e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cpu_rdat8_reg, + NULL, + NULL, + }, + { + "cpu_rdat9", + SE_SMMU1_CPU_RDAT9r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000000e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cpu_rdat9_reg, + NULL, + NULL, + }, + { + "cpu_rdat10", + SE_SMMU1_CPU_RDAT10r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000000e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cpu_rdat10_reg, + NULL, + NULL, + }, + { + "cpu_rdat11", + SE_SMMU1_CPU_RDAT11r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000000ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cpu_rdat11_reg, + NULL, + NULL, + }, + { + "cpu_rdat12", + SE_SMMU1_CPU_RDAT12r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000000f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cpu_rdat12_reg, + NULL, + NULL, + }, + { + "cpu_rdat13", + SE_SMMU1_CPU_RDAT13r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000000f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cpu_rdat13_reg, + NULL, + NULL, + }, + { + "cpu_rdat14", + SE_SMMU1_CPU_RDAT14r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000000f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cpu_rdat14_reg, + NULL, + NULL, + }, + { + "cpu_rdat15", + SE_SMMU1_CPU_RDAT15r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000000fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cpu_rdat15_reg, + NULL, + NULL, + }, + { + "ctrl_cpu_rd_rdy", + SE_SMMU1_CTRL_CPU_RD_RDYr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000100, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_ctrl_cpu_rd_rdy_reg, + NULL, + NULL, + }, + { + "cpu_warbi_rdy_cfg", + SE_SMMU1_CPU_WARBI_RDY_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000104, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cpu_warbi_rdy_cfg_reg, + NULL, + NULL, + }, + { + "dir_arbi_cpu_rpful", + SE_SMMU1_DIR_ARBI_CPU_RPFULr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000108, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se_smmu1_dir_arbi_cpu_rpful_reg, + NULL, + NULL, + }, + { + "dir_arbi_wpful", + SE_SMMU1_DIR_ARBI_WPFULr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000110, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se_smmu1_dir_arbi_wpful_reg, + NULL, + NULL, + }, + { + "cfg_wr_arbi_pful0", + SE_SMMU1_CFG_WR_ARBI_PFUL0r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000118, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se_smmu1_cfg_wr_arbi_pful0_reg, + NULL, + NULL, + }, + { + "cfg_wr_arbi_pful1", + SE_SMMU1_CFG_WR_ARBI_PFUL1r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000011c, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se_smmu1_cfg_wr_arbi_pful1_reg, + NULL, + NULL, + }, + { + "smmu1_wdone_pful_cfg", + SE_SMMU1_SMMU1_WDONE_PFUL_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000124, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_wdone_pful_cfg_reg, + NULL, + NULL, + }, + { + "stat_rate_cfg_cnt", + SE_SMMU1_STAT_RATE_CFG_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000128, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_stat_rate_cfg_cnt_reg, + NULL, + NULL, + }, + { + "ftm_rate_cfg_cnt", + SE_SMMU1_FTM_RATE_CFG_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000012c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_ftm_rate_cfg_cnt_reg, + NULL, + NULL, + }, + { + "etm_rate_cfg_cnt", + SE_SMMU1_ETM_RATE_CFG_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000130, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_etm_rate_cfg_cnt_reg, + NULL, + NULL, + }, + { + "dir_rate_cfg_cnt", + SE_SMMU1_DIR_RATE_CFG_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000134, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_dir_rate_cfg_cnt_reg, + NULL, + NULL, + }, + { + "hash_rate_cfg_cnt", + SE_SMMU1_HASH_RATE_CFG_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000138, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_hash_rate_cfg_cnt_reg, + NULL, + NULL, + }, + { + "ftm_tbl_cfg", + SE_SMMU1_FTM_TBL_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000001c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_ftm_tbl_cfg_reg, + NULL, + NULL, + }, + { + "lpm_v4_as_tbl_cfg", + SE_SMMU1_LPM_V4_AS_TBL_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000001d0, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_se_smmu1_lpm_v4_as_tbl_cfg_reg, + NULL, + NULL, + }, + { + "lpm_v4_tbl_cfg", + SE_SMMU1_LPM_V4_TBL_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000001d4, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_se_smmu1_lpm_v4_tbl_cfg_reg, + NULL, + NULL, + }, + { + "lpm_v6_tbl_cfg", + SE_SMMU1_LPM_V6_TBL_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000001d8, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_se_smmu1_lpm_v6_tbl_cfg_reg, + NULL, + NULL, + }, + { + "lpm_v6_as_tbl_cfg", + SE_SMMU1_LPM_V6_AS_TBL_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000001dc, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_se_smmu1_lpm_v6_as_tbl_cfg_reg, + NULL, + NULL, + }, + { + "dma_tbl_cfg", + SE_SMMU1_DMA_TBL_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000001e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_dma_tbl_cfg_reg, + NULL, + NULL, + }, + { + "stat_mode_cfg", + SE_SMMU1_STAT_MODE_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000001e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_stat_mode_cfg_reg, + NULL, + NULL, + }, + { + "ctrl_rpar_cpu_pful", + SE_SMMU1_CTRL_RPAR_CPU_PFULr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000001e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_ctrl_rpar_cpu_pful_reg, + NULL, + NULL, + }, + { + "cfg_ksch_dir_pful", + SE_SMMU1_CFG_KSCH_DIR_PFULr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000001ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cfg_ksch_dir_pful_reg, + NULL, + NULL, + }, + { + "cfg_ksch_hash_pful", + SE_SMMU1_CFG_KSCH_HASH_PFULr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000001f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cfg_ksch_hash_pful_reg, + NULL, + NULL, + }, + { + "cfg_ksch_lpm_pful", + SE_SMMU1_CFG_KSCH_LPM_PFULr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000001f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cfg_ksch_lpm_pful_reg, + NULL, + NULL, + }, + { + "cfg_ksch_lpm_as_pful", + SE_SMMU1_CFG_KSCH_LPM_AS_PFULr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000001f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cfg_ksch_lpm_as_pful_reg, + NULL, + NULL, + }, + { + "cfg_ksch_stat_pful", + SE_SMMU1_CFG_KSCH_STAT_PFULr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000001fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cfg_ksch_stat_pful_reg, + NULL, + NULL, + }, + { + "cfg_ksch_tm_pful", + SE_SMMU1_CFG_KSCH_TM_PFULr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000200, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cfg_ksch_tm_pful_reg, + NULL, + NULL, + }, + { + "cfg_ksch_oam_pful", + SE_SMMU1_CFG_KSCH_OAM_PFULr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000204, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cfg_ksch_oam_pful_reg, + NULL, + NULL, + }, + { + "cfg_ksch_dma_pful", + SE_SMMU1_CFG_KSCH_DMA_PFULr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000208, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cfg_ksch_dma_pful_reg, + NULL, + NULL, + }, + { + "ctrl_wfifo_cfg", + SE_SMMU1_CTRL_WFIFO_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000230, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_ctrl_wfifo_cfg_reg, + NULL, + NULL, + }, + { + "rsch_hash_ptr_cfg", + SE_SMMU1_RSCH_HASH_PTR_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000240, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_rsch_hash_ptr_cfg_reg, + NULL, + NULL, + }, + { + "rsch_lpm_ptr_cfg", + SE_SMMU1_RSCH_LPM_PTR_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000244, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_rsch_lpm_ptr_cfg_reg, + NULL, + NULL, + }, + { + "rsch_lpm_as_ptr_cfg", + SE_SMMU1_RSCH_LPM_AS_PTR_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000248, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_rsch_lpm_as_ptr_cfg_reg, + NULL, + NULL, + }, + { + "rsch_stat_ptr_cfg", + SE_SMMU1_RSCH_STAT_PTR_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000024c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_rsch_stat_ptr_cfg_reg, + NULL, + NULL, + }, + { + "rsch_oam_ptr_cfg", + SE_SMMU1_RSCH_OAM_PTR_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000250, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_rsch_oam_ptr_cfg_reg, + NULL, + NULL, + }, + { + "rschd_fifo_pept_cfg", + SE_SMMU1_RSCHD_FIFO_PEPT_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000254, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_rschd_fifo_pept_cfg_reg, + NULL, + NULL, + }, + { + "dir_fifo_pful_cfg", + SE_SMMU1_DIR_FIFO_PFUL_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000258, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_dir_fifo_pful_cfg_reg, + NULL, + NULL, + }, + { + "hash_fifo_pful_cfg", + SE_SMMU1_HASH_FIFO_PFUL_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000025c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_hash_fifo_pful_cfg_reg, + NULL, + NULL, + }, + { + "lpm_fifo_pful_cfg", + SE_SMMU1_LPM_FIFO_PFUL_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000260, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_lpm_fifo_pful_cfg_reg, + NULL, + NULL, + }, + { + "lpm_as_fifo_pful_cfg", + SE_SMMU1_LPM_AS_FIFO_PFUL_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000264, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_lpm_as_fifo_pful_cfg_reg, + NULL, + NULL, + }, + { + "stat_fifo_pful_cfg", + SE_SMMU1_STAT_FIFO_PFUL_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000268, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_stat_fifo_pful_cfg_reg, + NULL, + NULL, + }, + { + "ftm_fifo_pful_cfg", + SE_SMMU1_FTM_FIFO_PFUL_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000026c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_ftm_fifo_pful_cfg_reg, + NULL, + NULL, + }, + { + "etm_fifo_pful_cfg", + SE_SMMU1_ETM_FIFO_PFUL_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000270, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_etm_fifo_pful_cfg_reg, + NULL, + NULL, + }, + { + "oam_fifo_pful_cfg", + SE_SMMU1_OAM_FIFO_PFUL_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000274, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_oam_fifo_pful_cfg_reg, + NULL, + NULL, + }, + { + "dma_fifo_pful_cfg", + SE_SMMU1_DMA_FIFO_PFUL_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000278, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_dma_fifo_pful_cfg_reg, + NULL, + NULL, + }, + { + "cache_rsp_rr_fifo_cfg", + SE_SMMU1_CACHE_RSP_RR_FIFO_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000027c, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se_smmu1_cache_rsp_rr_fifo_cfg_reg, + NULL, + NULL, + }, + { + "ddr_rsp_rr_fifo_cfg", + SE_SMMU1_DDR_RSP_RR_FIFO_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000280, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se_smmu1_ddr_rsp_rr_fifo_cfg_reg, + NULL, + NULL, + }, + { + "cpu_cahce_fifo_cfg", + SE_SMMU1_CPU_CAHCE_FIFO_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000284, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se_smmu1_cpu_cahce_fifo_cfg_reg, + NULL, + NULL, + }, + { + "cache_rsp_fifo_cfg", + SE_SMMU1_CACHE_RSP_FIFO_CFGr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000028c, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se_smmu1_cache_rsp_fifo_cfg_reg, + NULL, + NULL, + }, + { + "test_state", + SE_SMMU1_TEST_STATEr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000002c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_test_state_reg, + NULL, + NULL, + }, + { + "cache_fifo_ept", + SE_SMMU1_CACHE_FIFO_EPTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000002f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cache_fifo_ept_reg, + NULL, + NULL, + }, + { + "rr_fifo_ept", + SE_SMMU1_RR_FIFO_EPTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000002fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_rr_fifo_ept_reg, + NULL, + NULL, + }, + { + "wr_fifo_ept", + SE_SMMU1_WR_FIFO_EPTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000304, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_wr_fifo_ept_reg, + NULL, + NULL, + }, + { + "wdone_fifo_ept", + SE_SMMU1_WDONE_FIFO_EPTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000308, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_wdone_fifo_ept_reg, + NULL, + NULL, + }, + { + "kschd_fifo_ept0", + SE_SMMU1_KSCHD_FIFO_EPT0r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000318, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_kschd_fifo_ept0_reg, + NULL, + NULL, + }, + { + "cash_fifo_ept", + SE_SMMU1_CASH_FIFO_EPTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000031c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cash_fifo_ept_reg, + NULL, + NULL, + }, + { + "ctrl_fifo_ept", + SE_SMMU1_CTRL_FIFO_EPTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000320, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_ctrl_fifo_ept_reg, + NULL, + NULL, + }, + { + "smmu1_rschd_ept3", + SE_SMMU1_SMMU1_RSCHD_EPT3r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000324, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_rschd_ept3_reg, + NULL, + NULL, + }, + { + "smmu1_rschd_ept2", + SE_SMMU1_SMMU1_RSCHD_EPT2r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000328, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_rschd_ept2_reg, + NULL, + NULL, + }, + { + "smmu1_rschd_ept1", + SE_SMMU1_SMMU1_RSCHD_EPT1r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000032c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_rschd_ept1_reg, + NULL, + NULL, + }, + { + "smmu1_rschd_ept0", + SE_SMMU1_SMMU1_RSCHD_EPT0r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000330, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_rschd_ept0_reg, + NULL, + NULL, + }, + { + "cash0_ecc_err_addr", + SE_SMMU1_CASH0_ECC_ERR_ADDRr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000334, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cash0_ecc_err_addr_reg, + NULL, + NULL, + }, + { + "arbi_cpu_wr_rdy", + SE_SMMU1_ARBI_CPU_WR_RDYr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000354, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_arbi_cpu_wr_rdy_reg, + NULL, + NULL, + }, + { + "smmu1_int_0_en", + SE_SMMU1_SMMU1_INT_0_ENr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000480, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_int_0_en_reg, + NULL, + NULL, + }, + { + "smmu1_int_0_mask", + SE_SMMU1_SMMU1_INT_0_MASKr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000484, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_int_0_mask_reg, + NULL, + NULL, + }, + { + "smmu1_int_1_en", + SE_SMMU1_SMMU1_INT_1_ENr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000488, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_int_1_en_reg, + NULL, + NULL, + }, + { + "smmu1_int_1_mask", + SE_SMMU1_SMMU1_INT_1_MASKr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000048c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_int_1_mask_reg, + NULL, + NULL, + }, + { + "smmu1_int_2_en", + SE_SMMU1_SMMU1_INT_2_ENr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000490, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_int_2_en_reg, + NULL, + NULL, + }, + { + "smmu1_int_2_mask", + SE_SMMU1_SMMU1_INT_2_MASKr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000494, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_int_2_mask_reg, + NULL, + NULL, + }, + { + "smmu1_int_3_en", + SE_SMMU1_SMMU1_INT_3_ENr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000498, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_int_3_en_reg, + NULL, + NULL, + }, + { + "smmu1_int_3_mask", + SE_SMMU1_SMMU1_INT_3_MASKr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000049c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_int_3_mask_reg, + NULL, + NULL, + }, + { + "smmu1_int_0_status", + SE_SMMU1_SMMU1_INT_0_STATUSr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000540, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_int_0_status_reg, + NULL, + NULL, + }, + { + "smmu1_int_1_status", + SE_SMMU1_SMMU1_INT_1_STATUSr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000544, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_int_1_status_reg, + NULL, + NULL, + }, + { + "smmu1_int_2_status", + SE_SMMU1_SMMU1_INT_2_STATUSr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000548, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_int_2_status_reg, + NULL, + NULL, + }, + { + "smmu1_int_3_status", + SE_SMMU1_SMMU1_INT_3_STATUSr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000054c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_int_3_status_reg, + NULL, + NULL, + }, + { + "smmu1_int_status", + SE_SMMU1_SMMU1_INT_STATUSr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000005a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_int_status_reg, + NULL, + NULL, + }, + { + "ctrl_to_cash7_0_fc_cnt", + SE_SMMU1_CTRL_TO_CASH7_0_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000600, + (32 / 8), + 0, + 7 + 1, + 0, + 24, + 1, + g_se_smmu1_ctrl_to_cash7_0_fc_cnt_reg, + NULL, + NULL, + }, + { + "cash7_0_to_ctrl_req_cnt", + SE_SMMU1_CASH7_0_TO_CTRL_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000604, + (32 / 8), + 0, + 7 + 1, + 0, + 24, + 1, + g_se_smmu1_cash7_0_to_ctrl_req_cnt_reg, + NULL, + NULL, + }, + { + "rschd_to_cache7_fc_cnt", + SE_SMMU1_RSCHD_TO_CACHE7_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000608, + (32 / 8), + 0, + 7 + 1, + 0, + 24, + 1, + g_se_smmu1_rschd_to_cache7_fc_cnt_reg, + NULL, + NULL, + }, + { + "cash7_to_cache_rsp_cnt", + SE_SMMU1_CASH7_TO_CACHE_RSP_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000060c, + (32 / 8), + 0, + 7 + 1, + 0, + 24, + 1, + g_se_smmu1_cash7_to_cache_rsp_cnt_reg, + NULL, + NULL, + }, + { + "cash7_to_ctrl_fc_cnt", + SE_SMMU1_CASH7_TO_CTRL_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000610, + (32 / 8), + 0, + 7 + 1, + 0, + 24, + 1, + g_se_smmu1_cash7_to_ctrl_fc_cnt_reg, + NULL, + NULL, + }, + { + "ctrl_to_cash7_0_rsp_cnt", + SE_SMMU1_CTRL_TO_CASH7_0_RSP_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000614, + (32 / 8), + 0, + 7 + 1, + 0, + 24, + 1, + g_se_smmu1_ctrl_to_cash7_0_rsp_cnt_reg, + NULL, + NULL, + }, + { + "kschd_to_cache7_0_req_cnt", + SE_SMMU1_KSCHD_TO_CACHE7_0_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000006c0, + (32 / 8), + 0, + 7 + 1, + 0, + 4, + 1, + g_se_smmu1_kschd_to_cache7_0_req_cnt_reg, + NULL, + NULL, + }, + { + "cache7_0_to_kschd_fc_cnt", + SE_SMMU1_CACHE7_0_TO_KSCHD_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000006e0, + (32 / 8), + 0, + 7 + 1, + 0, + 4, + 1, + g_se_smmu1_cache7_0_to_kschd_fc_cnt_reg, + NULL, + NULL, + }, + { + "dma_to_smmu1_rd_req_cnt", + SE_SMMU1_DMA_TO_SMMU1_RD_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000700, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_dma_to_smmu1_rd_req_cnt_reg, + NULL, + NULL, + }, + { + "oam_to_kschd_req_cnt", + SE_SMMU1_OAM_TO_KSCHD_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000704, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_oam_to_kschd_req_cnt_reg, + NULL, + NULL, + }, + { + "oam_rr_state_rsp_cnt", + SE_SMMU1_OAM_RR_STATE_RSP_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000708, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_oam_rr_state_rsp_cnt_reg, + NULL, + NULL, + }, + { + "oam_clash_info_cnt", + SE_SMMU1_OAM_CLASH_INFO_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000070c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_oam_clash_info_cnt_reg, + NULL, + NULL, + }, + { + "oam_to_rr_req_cnt", + SE_SMMU1_OAM_TO_RR_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000710, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_oam_to_rr_req_cnt_reg, + NULL, + NULL, + }, + { + "lpm_as_to_kschd_req_cnt", + SE_SMMU1_LPM_AS_TO_KSCHD_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000714, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_lpm_as_to_kschd_req_cnt_reg, + NULL, + NULL, + }, + { + "lpm_as_rr_state_rsp_cnt", + SE_SMMU1_LPM_AS_RR_STATE_RSP_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000718, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_lpm_as_rr_state_rsp_cnt_reg, + NULL, + NULL, + }, + { + "lpm_as_clash_info_cnt", + SE_SMMU1_LPM_AS_CLASH_INFO_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000071c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_lpm_as_clash_info_cnt_reg, + NULL, + NULL, + }, + { + "lpm_as_to_rr_req_cnt", + SE_SMMU1_LPM_AS_TO_RR_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000720, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_lpm_as_to_rr_req_cnt_reg, + NULL, + NULL, + }, + { + "lpm_to_kschd_req_cnt", + SE_SMMU1_LPM_TO_KSCHD_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000724, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_lpm_to_kschd_req_cnt_reg, + NULL, + NULL, + }, + { + "lpm_rr_state_rsp_cnt", + SE_SMMU1_LPM_RR_STATE_RSP_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000728, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_lpm_rr_state_rsp_cnt_reg, + NULL, + NULL, + }, + { + "lpm_clash_info_cnt", + SE_SMMU1_LPM_CLASH_INFO_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000072c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_lpm_clash_info_cnt_reg, + NULL, + NULL, + }, + { + "lpm_to_rr_req_cnt", + SE_SMMU1_LPM_TO_RR_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000730, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_lpm_to_rr_req_cnt_reg, + NULL, + NULL, + }, + { + "hash3_0_to_kschd_req_cnt", + SE_SMMU1_HASH3_0_TO_KSCHD_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000734, + (32 / 8), + 0, + 3 + 1, + 0, + 16, + 1, + g_se_smmu1_hash3_0_to_kschd_req_cnt_reg, + NULL, + NULL, + }, + { + "hash3_0_rr_state_rsp_cnt", + SE_SMMU1_HASH3_0_RR_STATE_RSP_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000738, + (32 / 8), + 0, + 3 + 1, + 0, + 16, + 1, + g_se_smmu1_hash3_0_rr_state_rsp_cnt_reg, + NULL, + NULL, + }, + { + "hash3_0_clash_info_cnt", + SE_SMMU1_HASH3_0_CLASH_INFO_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000073c, + (32 / 8), + 0, + 3 + 1, + 0, + 16, + 1, + g_se_smmu1_hash3_0_clash_info_cnt_reg, + NULL, + NULL, + }, + { + "hash3_0_to_rr_req_cnt", + SE_SMMU1_HASH3_0_TO_RR_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000740, + (32 / 8), + 0, + 3 + 1, + 0, + 16, + 1, + g_se_smmu1_hash3_0_to_rr_req_cnt_reg, + NULL, + NULL, + }, + { + "dir3_0_to_kschd_req_cnt", + SE_SMMU1_DIR3_0_TO_KSCHD_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000774, + (32 / 8), + 0, + 3 + 1, + 0, + 8, + 1, + g_se_smmu1_dir3_0_to_kschd_req_cnt_reg, + NULL, + NULL, + }, + { + "dir3_0_clash_info_cnt", + SE_SMMU1_DIR3_0_CLASH_INFO_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000778, + (32 / 8), + 0, + 3 + 1, + 0, + 8, + 1, + g_se_smmu1_dir3_0_clash_info_cnt_reg, + NULL, + NULL, + }, + { + "dir_tbl_wr_req_cnt", + SE_SMMU1_DIR_TBL_WR_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000794, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_dir_tbl_wr_req_cnt_reg, + NULL, + NULL, + }, + { + "warbi_to_dir_tbl_warbi_fc_cnt", + SE_SMMU1_WARBI_TO_DIR_TBL_WARBI_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000798, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_warbi_to_dir_tbl_warbi_fc_cnt_reg, + NULL, + NULL, + }, + { + "dir3_0_to_bank_rr_req_cnt", + SE_SMMU1_DIR3_0_TO_BANK_RR_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000079c, + (32 / 8), + 0, + 3 + 1, + 0, + 4, + 1, + g_se_smmu1_dir3_0_to_bank_rr_req_cnt_reg, + NULL, + NULL, + }, + { + "kschd_to_dir3_0_fc_cnt", + SE_SMMU1_KSCHD_TO_DIR3_0_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000007ac, + (32 / 8), + 0, + 3 + 1, + 0, + 4, + 1, + g_se_smmu1_kschd_to_dir3_0_fc_cnt_reg, + NULL, + NULL, + }, + { + "dir3_0_rr_state_rsp_cnt", + SE_SMMU1_DIR3_0_RR_STATE_RSP_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000007bc, + (32 / 8), + 0, + 3 + 1, + 0, + 4, + 1, + g_se_smmu1_dir3_0_rr_state_rsp_cnt_reg, + NULL, + NULL, + }, + { + "wr_done_to_warbi_fc_cnt", + SE_SMMU1_WR_DONE_TO_WARBI_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000007cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_wr_done_to_warbi_fc_cnt_reg, + NULL, + NULL, + }, + { + "wr_done_ptr_req_cnt", + SE_SMMU1_WR_DONE_PTR_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000007d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_wr_done_ptr_req_cnt_reg, + NULL, + NULL, + }, + { + "ctrl7_0_to_warbi_fc_cnt", + SE_SMMU1_CTRL7_0_TO_WARBI_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000007d4, + (32 / 8), + 0, + 7 + 1, + 0, + 4, + 1, + g_se_smmu1_ctrl7_0_to_warbi_fc_cnt_reg, + NULL, + NULL, + }, + { + "warbi_to_ctrl7_0_wr_req_cnt", + SE_SMMU1_WARBI_TO_CTRL7_0_WR_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000007f4, + (32 / 8), + 0, + 7 + 1, + 0, + 4, + 1, + g_se_smmu1_warbi_to_ctrl7_0_wr_req_cnt_reg, + NULL, + NULL, + }, + { + "warbi_to_cash7_0_wr_req_cnt", + SE_SMMU1_WARBI_TO_CASH7_0_WR_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000814, + (32 / 8), + 0, + 7 + 1, + 0, + 4, + 1, + g_se_smmu1_warbi_to_cash7_0_wr_req_cnt_reg, + NULL, + NULL, + }, + { + "warbi_to_cpu_wr_fc_cnt", + SE_SMMU1_WARBI_TO_CPU_WR_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000834, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_warbi_to_cpu_wr_fc_cnt_reg, + NULL, + NULL, + }, + { + "cpu_wr_req_cnt", + SE_SMMU1_CPU_WR_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000838, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cpu_wr_req_cnt_reg, + NULL, + NULL, + }, + { + "ctrl7_0_to_cpu_rd_rsp_cnt", + SE_SMMU1_CTRL7_0_TO_CPU_RD_RSP_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000083c, + (32 / 8), + 0, + 7 + 1, + 0, + 4, + 1, + g_se_smmu1_ctrl7_0_to_cpu_rd_rsp_cnt_reg, + NULL, + NULL, + }, + { + "cpu_to_ctrl7_0_rd_req_cnt", + SE_SMMU1_CPU_TO_CTRL7_0_RD_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000085c, + (32 / 8), + 0, + 7 + 1, + 0, + 4, + 1, + g_se_smmu1_cpu_to_ctrl7_0_rd_req_cnt_reg, + NULL, + NULL, + }, + { + "cpu_rd_dir_tbl_rsp_cnt", + SE_SMMU1_CPU_RD_DIR_TBL_RSP_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000087c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cpu_rd_dir_tbl_rsp_cnt_reg, + NULL, + NULL, + }, + { + "cpu_to_dir_tbl_rd_wr_req_cnt", + SE_SMMU1_CPU_TO_DIR_TBL_RD_WR_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000880, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_cpu_to_dir_tbl_rd_wr_req_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_to_mmu_7_0_rsp_fc_cnt", + SE_SMMU1_SMMU1_TO_MMU_7_0_RSP_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000884, + (32 / 8), + 0, + 7 + 1, + 0, + 24, + 1, + g_se_smmu1_smmu1_to_mmu_7_0_rsp_fc_cnt_reg, + NULL, + NULL, + }, + { + "mmu_7_0_to_smmu1_rd_rsp_cnt", + SE_SMMU1_MMU_7_0_TO_SMMU1_RD_RSP_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000888, + (32 / 8), + 0, + 7 + 1, + 0, + 24, + 1, + g_se_smmu1_mmu_7_0_to_smmu1_rd_rsp_cnt_reg, + NULL, + NULL, + }, + { + "mmu_7_0_to_smmu1_rd_fc_cnt", + SE_SMMU1_MMU_7_0_TO_SMMU1_RD_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000088c, + (32 / 8), + 0, + 7 + 1, + 0, + 24, + 1, + g_se_smmu1_mmu_7_0_to_smmu1_rd_fc_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_to_mmu_7_rd_req_cnt", + SE_SMMU1_SMMU1_TO_MMU_7_RD_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000890, + (32 / 8), + 0, + 7 + 1, + 0, + 24, + 1, + g_se_smmu1_smmu1_to_mmu_7_rd_req_cnt_reg, + NULL, + NULL, + }, + { + "mmu_7_to_smmu1_wr_fc_cnt", + SE_SMMU1_MMU_7_TO_SMMU1_WR_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000894, + (32 / 8), + 0, + 7 + 1, + 0, + 24, + 1, + g_se_smmu1_mmu_7_to_smmu1_wr_fc_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_to_mmu_7_0_wr_req_cnt", + SE_SMMU1_SMMU1_TO_MMU_7_0_WR_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000898, + (32 / 8), + 0, + 7 + 1, + 0, + 24, + 1, + g_se_smmu1_smmu1_to_mmu_7_0_wr_req_cnt_reg, + NULL, + NULL, + }, + { + "se_to_smmu1_wr_rsp_fc_cnt", + SE_SMMU1_SE_TO_SMMU1_WR_RSP_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000944, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_se_to_smmu1_wr_rsp_fc_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_to_se_wr_rsp_cnt", + SE_SMMU1_SMMU1_TO_SE_WR_RSP_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000948, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_to_se_wr_rsp_cnt_reg, + NULL, + NULL, + }, + { + "ddr7_0_wr_rsp_cnt", + SE_SMMU1_DDR7_0_WR_RSP_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000094c, + (32 / 8), + 0, + 7 + 1, + 0, + 4, + 1, + g_se_smmu1_ddr7_0_wr_rsp_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_to_as_fc_cnt", + SE_SMMU1_SMMU1_TO_AS_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000096c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_to_as_fc_cnt_reg, + NULL, + NULL, + }, + { + "as_to_smmu1_wr_req_cnt", + SE_SMMU1_AS_TO_SMMU1_WR_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000970, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_as_to_smmu1_wr_req_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_to_se_parser_fc_cnt", + SE_SMMU1_SMMU1_TO_SE_PARSER_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000974, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_to_se_parser_fc_cnt_reg, + NULL, + NULL, + }, + { + "se_parser_to_smmu1_req_cnt", + SE_SMMU1_SE_PARSER_TO_SMMU1_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000978, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_se_parser_to_smmu1_req_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_to_etm_wr_fc_cnt", + SE_SMMU1_SMMU1_TO_ETM_WR_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000097c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_to_etm_wr_fc_cnt_reg, + NULL, + NULL, + }, + { + "etm_wr_req_cnt", + SE_SMMU1_ETM_WR_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000980, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_etm_wr_req_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_to_ftm_wr_fc_cnt", + SE_SMMU1_SMMU1_TO_FTM_WR_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000984, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_to_ftm_wr_fc_cnt_reg, + NULL, + NULL, + }, + { + "ftm_wr_req_cnt", + SE_SMMU1_FTM_WR_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000988, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_ftm_wr_req_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_to_state_wr_fc_cnt", + SE_SMMU1_SMMU1_TO_STATE_WR_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000098c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_to_state_wr_fc_cnt_reg, + NULL, + NULL, + }, + { + "state_wr_req_cnt", + SE_SMMU1_STATE_WR_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000990, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_state_wr_req_cnt_reg, + NULL, + NULL, + }, + { + "se_to_dma_rsp_cnt", + SE_SMMU1_SE_TO_DMA_RSP_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000994, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_se_to_dma_rsp_cnt_reg, + NULL, + NULL, + }, + { + "se_to_dma_fc_cnt", + SE_SMMU1_SE_TO_DMA_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000998, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_se_to_dma_fc_cnt_reg, + NULL, + NULL, + }, + { + "oam_to_smmu1_fc_cnt", + SE_SMMU1_OAM_TO_SMMU1_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000099c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_oam_to_smmu1_fc_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_to_oam_rsp_cnt", + SE_SMMU1_SMMU1_TO_OAM_RSP_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000009a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_to_oam_rsp_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_to_oam_fc_cnt", + SE_SMMU1_SMMU1_TO_OAM_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000009a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_to_oam_fc_cnt_reg, + NULL, + NULL, + }, + { + "oam_to_smmu1_req_cnt", + SE_SMMU1_OAM_TO_SMMU1_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000009a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_oam_to_smmu1_req_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_to_etm_rsp_cnt", + SE_SMMU1_SMMU1_TO_ETM_RSP_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000009ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_to_etm_rsp_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_to_ftm_rsp_cnt", + SE_SMMU1_SMMU1_TO_FTM_RSP_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000009b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_to_ftm_rsp_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_to_etm_fc_cnt", + SE_SMMU1_SMMU1_TO_ETM_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000009b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_to_etm_fc_cnt_reg, + NULL, + NULL, + }, + { + "etm_to_smmu1_req_cnt", + SE_SMMU1_ETM_TO_SMMU1_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000009b8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_etm_to_smmu1_req_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_to_ftm_fc_cnt", + SE_SMMU1_SMMU1_TO_FTM_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000009bc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_to_ftm_fc_cnt_reg, + NULL, + NULL, + }, + { + "ftm_to_smmu1_req_cnt", + SE_SMMU1_FTM_TO_SMMU1_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000009c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_ftm_to_smmu1_req_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_to_stat_rsp_cnt", + SE_SMMU1_SMMU1_TO_STAT_RSP_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000009c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_to_stat_rsp_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_to_stat_fc_cnt", + SE_SMMU1_SMMU1_TO_STAT_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000009c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_to_stat_fc_cnt_reg, + NULL, + NULL, + }, + { + "stat_to_smmu1_req_cnt", + SE_SMMU1_STAT_TO_SMMU1_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000009cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_stat_to_smmu1_req_cnt_reg, + NULL, + NULL, + }, + { + "lpm_as_to_smmu1_fc_cnt", + SE_SMMU1_LPM_AS_TO_SMMU1_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000009d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_lpm_as_to_smmu1_fc_cnt_reg, + NULL, + NULL, + }, + { + "lpm_to_smmu1_fc_cnt", + SE_SMMU1_LPM_TO_SMMU1_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000009d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_lpm_to_smmu1_fc_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_to_lpm_as_rsp_cnt", + SE_SMMU1_SMMU1_TO_LPM_AS_RSP_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000009d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_to_lpm_as_rsp_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_to_lpm_rsp_cnt", + SE_SMMU1_SMMU1_TO_LPM_RSP_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000009dc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_to_lpm_rsp_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_to_lpm_as_fc_cnt", + SE_SMMU1_SMMU1_TO_LPM_AS_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000009e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_to_lpm_as_fc_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_to_lpm_fc_cnt", + SE_SMMU1_SMMU1_TO_LPM_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000009e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_smmu1_to_lpm_fc_cnt_reg, + NULL, + NULL, + }, + { + "lpm_as_to_smmu1_req_cnt", + SE_SMMU1_LPM_AS_TO_SMMU1_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000009e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_lpm_as_to_smmu1_req_cnt_reg, + NULL, + NULL, + }, + { + "lpm_to_smmu1_req_cnt", + SE_SMMU1_LPM_TO_SMMU1_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000009ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_smmu1_lpm_to_smmu1_req_cnt_reg, + NULL, + NULL, + }, + { + "hash3_0_to_smmu1_fc_cnt", + SE_SMMU1_HASH3_0_TO_SMMU1_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000009f0, + (32 / 8), + 0, + 3 + 1, + 0, + 4, + 1, + g_se_smmu1_hash3_0_to_smmu1_fc_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_to_hash3_0_rsp_cnt", + SE_SMMU1_SMMU1_TO_HASH3_0_RSP_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000a00, + (32 / 8), + 0, + 3 + 1, + 0, + 4, + 1, + g_se_smmu1_smmu1_to_hash3_0_rsp_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_to_hash3_0_fc_cnt", + SE_SMMU1_SMMU1_TO_HASH3_0_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000a10, + (32 / 8), + 0, + 3 + 1, + 0, + 4, + 1, + g_se_smmu1_smmu1_to_hash3_0_fc_cnt_reg, + NULL, + NULL, + }, + { + "hash3_0_to_smmu1_cnt", + SE_SMMU1_HASH3_0_TO_SMMU1_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000a20, + (32 / 8), + 0, + 3 + 1, + 0, + 4, + 1, + g_se_smmu1_hash3_0_to_smmu1_cnt_reg, + NULL, + NULL, + }, + { + "se_to_smmu1_dir3_0_rsp_fc_cnt", + SE_SMMU1_SE_TO_SMMU1_DIR3_0_RSP_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000a30, + (32 / 8), + 0, + 3 + 1, + 0, + 4, + 1, + g_se_smmu1_se_to_smmu1_dir3_0_rsp_fc_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_to_se_dir3_0_rsp_cnt", + SE_SMMU1_SMMU1_TO_SE_DIR3_0_RSP_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000a40, + (32 / 8), + 0, + 3 + 1, + 0, + 4, + 1, + g_se_smmu1_smmu1_to_se_dir3_0_rsp_cnt_reg, + NULL, + NULL, + }, + { + "smmu1_to_se_dir3_0_fc_cnt", + SE_SMMU1_SMMU1_TO_SE_DIR3_0_FC_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000a50, + (32 / 8), + 0, + 3 + 1, + 0, + 4, + 1, + g_se_smmu1_smmu1_to_se_dir3_0_fc_cnt_reg, + NULL, + NULL, + }, + { + "se_to_smmu1_dir3_0_cnt", + SE_SMMU1_SE_TO_SMMU1_DIR3_0_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000a60, + (32 / 8), + 0, + 3 + 1, + 0, + 4, + 1, + g_se_smmu1_se_to_smmu1_dir3_0_cnt_reg, + NULL, + NULL, + }, + { + "cache7_0_to_rschd_rsp_cnt", + SE_SMMU1_CACHE7_0_TO_RSCHD_RSP_CNTr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000a70, + (32 / 8), + 0, + 7 + 1, + 0, + 4, + 1, + g_se_smmu1_cache7_0_to_rschd_rsp_cnt_reg, + NULL, + NULL, + }, + { + "ddr_rw_addr", + SE_CMMU_DDR_RW_ADDRr, + SMMU1, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_CMMU_BASE_ADDR + 0x00000000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cmmu_ddr_rw_addr_reg, + NULL, + NULL, + }, + { + "ddr_rw_mode", + SE_CMMU_DDR_RW_MODEr, + SMMU1, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_CMMU_BASE_ADDR + 0x00000004, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se_cmmu_ddr_rw_mode_reg, + NULL, + NULL, + }, + { + "cp_cmd", + SE_CMMU_CP_CMDr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_CMMU_BASE_ADDR + 0x00000018, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cmmu_cp_cmd_reg, + NULL, + NULL, + }, + { + "cpu_ind_rd_done", + SE_CMMU_CPU_IND_RD_DONEr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_CMMU_BASE_ADDR + 0x0000001c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cmmu_cpu_ind_rd_done_reg, + NULL, + NULL, + }, + { + "cpu_ind_rdat0", + SE_CMMU_CPU_IND_RDAT0r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_CMMU_BASE_ADDR + 0x00000020, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cmmu_cpu_ind_rdat0_reg, + NULL, + NULL, + }, + { + "cpu_ind_rdat1", + SE_CMMU_CPU_IND_RDAT1r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_CMMU_BASE_ADDR + 0x00000024, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cmmu_cpu_ind_rdat1_reg, + NULL, + NULL, + }, + { + "cpu_ind_rdat2", + SE_CMMU_CPU_IND_RDAT2r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_CMMU_BASE_ADDR + 0x00000028, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cmmu_cpu_ind_rdat2_reg, + NULL, + NULL, + }, + { + "cpu_ind_rdat3", + SE_CMMU_CPU_IND_RDAT3r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_CMMU_BASE_ADDR + 0x0000002c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cmmu_cpu_ind_rdat3_reg, + NULL, + NULL, + }, + { + "cpu_ddr_fifo_almful", + SE_CMMU_CPU_DDR_FIFO_ALMFULr, + SMMU1, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_CMMU_BASE_ADDR + 0x00000030, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cmmu_cpu_ddr_fifo_almful_reg, + NULL, + NULL, + }, + { + "debug_cnt_mode", + SE_CMMU_DEBUG_CNT_MODEr, + SMMU1, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_CMMU_BASE_ADDR + 0x00000034, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se_cmmu_debug_cnt_mode_reg, + NULL, + NULL, + }, + { + "cmmu_pful_cfg", + SE_CMMU_CMMU_PFUL_CFGr, + SMMU1, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_CMMU_BASE_ADDR + 0x00000038, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se_cmmu_cmmu_pful_cfg_reg, + NULL, + NULL, + }, + { + "cmmu_stat_pful_cfg", + SE_CMMU_CMMU_STAT_PFUL_CFGr, + SMMU1, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_CMMU_BASE_ADDR + 0x0000003c, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_se_cmmu_cmmu_stat_pful_cfg_reg, + NULL, + NULL, + }, + { + "stat_overflow_mode", + SE_CMMU_STAT_OVERFLOW_MODEr, + SMMU1, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_CMMU_BASE_ADDR + 0x00000040, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cmmu_stat_overflow_mode_reg, + NULL, + NULL, + }, + { + "cmmu_cp_fifo_pful", + SE_CMMU_CMMU_CP_FIFO_PFULr, + SMMU1, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_CMMU_BASE_ADDR + 0x00000044, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cmmu_cmmu_cp_fifo_pful_reg, + NULL, + NULL, + }, + { + "ddr_wr_dat0", + SE_CMMU_DDR_WR_DAT0r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_CMMU_BASE_ADDR + 0x00000078, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cmmu_ddr_wr_dat0_reg, + NULL, + NULL, + }, + { + "ddr_wr_dat1", + SE_CMMU_DDR_WR_DAT1r, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_CMMU_BASE_ADDR + 0x0000007c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cmmu_ddr_wr_dat1_reg, + NULL, + NULL, + }, + { + "cmmu_int_unmask_flag", + SE_CMMU_CMMU_INT_UNMASK_FLAGr, + SMMU1, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_CMMU_BASE_ADDR + 0x00000080, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cmmu_cmmu_int_unmask_flag_reg, + NULL, + NULL, + }, + { + "cmmu_int_en", + SE_CMMU_CMMU_INT_ENr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_CMMU_BASE_ADDR + 0x00000084, + (32 / 8), + 0, + 0, + 0, + 0, + 13, + g_se_cmmu_cmmu_int_en_reg, + NULL, + NULL, + }, + { + "cmmu_int_mask", + SE_CMMU_CMMU_INT_MASKr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_CMMU_BASE_ADDR + 0x00000088, + (32 / 8), + 0, + 0, + 0, + 0, + 13, + g_se_cmmu_cmmu_int_mask_reg, + NULL, + NULL, + }, + { + "cmmu_int_status", + SE_CMMU_CMMU_INT_STATUSr, + SMMU1, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_CMMU_BASE_ADDR + 0x0000008c, + (32 / 8), + 0, + 0, + 0, + 0, + 13, + g_se_cmmu_cmmu_int_status_reg, + NULL, + NULL, + }, + { + "stat_cmmu_req_cnt", + SE_CMMU_STAT_CMMU_REQ_CNTr, + SMMU1, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_CMMU_BASE_ADDR + 0x00000400, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cmmu_stat_cmmu_req_cnt_reg, + NULL, + NULL, + }, + { + "cmmu_fc0_cnt", + SE_CMMU_CMMU_FC0_CNTr, + SMMU1, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_CMMU_BASE_ADDR + 0x00000404, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cmmu_cmmu_fc0_cnt_reg, + NULL, + NULL, + }, + { + "cmmu_fc1_cnt", + SE_CMMU_CMMU_FC1_CNTr, + SMMU1, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_CMMU_BASE_ADDR + 0x00000408, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cmmu_cmmu_fc1_cnt_reg, + NULL, + NULL, + }, + { + "cmmu_fc2_cnt", + SE_CMMU_CMMU_FC2_CNTr, + SMMU1, + DPP_REG_FLAG_INDIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_CMMU_BASE_ADDR + 0x0000040c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_se_cmmu_cmmu_fc2_cnt_reg, + NULL, + NULL, + }, + { + "hash0_tbl0_cfg", + SMMU14K_SE_SMMU1_HASH0_TBL0_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000148, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash0_tbl0_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash0_tbl1_cfg", + SMMU14K_SE_SMMU1_HASH0_TBL1_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000014c, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash0_tbl1_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash0_tbl2_cfg", + SMMU14K_SE_SMMU1_HASH0_TBL2_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000150, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash0_tbl2_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash0_tbl3_cfg", + SMMU14K_SE_SMMU1_HASH0_TBL3_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000154, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash0_tbl3_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash0_tbl4_cfg", + SMMU14K_SE_SMMU1_HASH0_TBL4_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000158, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash0_tbl4_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash0_tbl5_cfg", + SMMU14K_SE_SMMU1_HASH0_TBL5_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000015c, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash0_tbl5_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash0_tbl6_cfg", + SMMU14K_SE_SMMU1_HASH0_TBL6_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000160, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash0_tbl6_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash0_tbl7_cfg", + SMMU14K_SE_SMMU1_HASH0_TBL7_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000164, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash0_tbl7_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash1_tbl0_cfg", + SMMU14K_SE_SMMU1_HASH1_TBL0_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000168, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash1_tbl0_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash1_tbl1_cfg", + SMMU14K_SE_SMMU1_HASH1_TBL1_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000016c, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash1_tbl1_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash1_tbl2_cfg", + SMMU14K_SE_SMMU1_HASH1_TBL2_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000170, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash1_tbl2_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash1_tbl3_cfg", + SMMU14K_SE_SMMU1_HASH1_TBL3_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000174, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash1_tbl3_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash1_tbl4_cfg", + SMMU14K_SE_SMMU1_HASH1_TBL4_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000178, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash1_tbl4_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash1_tbl5_cfg", + SMMU14K_SE_SMMU1_HASH1_TBL5_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000017c, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash1_tbl5_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash1_tbl6_cfg", + SMMU14K_SE_SMMU1_HASH1_TBL6_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000180, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash1_tbl6_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash1_tbl7_cfg", + SMMU14K_SE_SMMU1_HASH1_TBL7_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000184, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash1_tbl7_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash2_tbl0_cfg", + SMMU14K_SE_SMMU1_HASH2_TBL0_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000188, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash2_tbl0_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash2_tbl1_cfg", + SMMU14K_SE_SMMU1_HASH2_TBL1_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000018c, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash2_tbl1_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash2_tbl2_cfg", + SMMU14K_SE_SMMU1_HASH2_TBL2_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000190, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash2_tbl2_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash2_tbl3_cfg", + SMMU14K_SE_SMMU1_HASH2_TBL3_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000194, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash2_tbl3_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash2_tbl4_cfg", + SMMU14K_SE_SMMU1_HASH2_TBL4_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x00000198, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash2_tbl4_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash2_tbl5_cfg", + SMMU14K_SE_SMMU1_HASH2_TBL5_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x0000019c, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash2_tbl5_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash2_tbl6_cfg", + SMMU14K_SE_SMMU1_HASH2_TBL6_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000001a0, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash2_tbl6_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash2_tbl7_cfg", + SMMU14K_SE_SMMU1_HASH2_TBL7_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000001a4, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash2_tbl7_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash3_tbl0_cfg", + SMMU14K_SE_SMMU1_HASH3_TBL0_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000001a8, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash3_tbl0_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash3_tbl1_cfg", + SMMU14K_SE_SMMU1_HASH3_TBL1_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000001ac, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash3_tbl1_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash3_tbl2_cfg", + SMMU14K_SE_SMMU1_HASH3_TBL2_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000001b0, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash3_tbl2_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash3_tbl3_cfg", + SMMU14K_SE_SMMU1_HASH3_TBL3_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000001b4, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash3_tbl3_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash3_tbl4_cfg", + SMMU14K_SE_SMMU1_HASH3_TBL4_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000001b8, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash3_tbl4_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash3_tbl5_cfg", + SMMU14K_SE_SMMU1_HASH3_TBL5_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000001bc, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash3_tbl5_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash3_tbl6_cfg", + SMMU14K_SE_SMMU1_HASH3_TBL6_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000001c0, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash3_tbl6_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "hash3_tbl7_cfg", + SMMU14K_SE_SMMU1_HASH3_TBL7_CFGr, + SMMU14K, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_SE_SMMU1_BASE_ADDR + MODULE_SE_SMMU1_BASE_ADDR + 0x000001c4, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_smmu14k_se_smmu1_hash3_tbl7_cfg_reg, + dpp_se_write, + dpp_se_read, + }, + { + "cpu_ind_eram_wdat1", + STAT_STAT_CFG_CPU_IND_ERAM_WDAT1r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000004, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_eram_wdat1_reg, + NULL, + NULL, + }, + { + "cpu_ind_eram_wdat2", + STAT_STAT_CFG_CPU_IND_ERAM_WDAT2r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_eram_wdat2_reg, + NULL, + NULL, + }, + { + "cpu_ind_eram_wdat3", + STAT_STAT_CFG_CPU_IND_ERAM_WDAT3r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x0000000c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_eram_wdat3_reg, + NULL, + NULL, + }, + { + "cpu_ind_eram_req_info", + STAT_STAT_CFG_CPU_IND_ERAM_REQ_INFOr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000010, + (32 / 8), + 0, + 0, + 0, + 0, + 5, + g_stat_stat_cfg_cpu_ind_eram_req_info_reg, + NULL, + NULL, + }, + { + "cpu_ind_eram_rd_done", + STAT_STAT_CFG_CPU_IND_ERAM_RD_DONEr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000014, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_eram_rd_done_reg, + NULL, + NULL, + }, + { + "cpu_ind_eram_rdat0", + STAT_STAT_CFG_CPU_IND_ERAM_RDAT0r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000018, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_eram_rdat0_reg, + NULL, + NULL, + }, + { + "cpu_ind_eram_rdat1", + STAT_STAT_CFG_CPU_IND_ERAM_RDAT1r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x0000001c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_eram_rdat1_reg, + NULL, + NULL, + }, + { + "cpu_ind_eram_rdat2", + STAT_STAT_CFG_CPU_IND_ERAM_RDAT2r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000020, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_eram_rdat2_reg, + NULL, + NULL, + }, + { + "cpu_ind_eram_rdat3", + STAT_STAT_CFG_CPU_IND_ERAM_RDAT3r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000024, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_eram_rdat3_reg, + NULL, + NULL, + }, + { + "tm_alu_eram_cpu_rdy", + STAT_STAT_CFG_TM_ALU_ERAM_CPU_RDYr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000028, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_tm_alu_eram_cpu_rdy_reg, + NULL, + NULL, + }, + { + "oam_stat_cfg", + STAT_STAT_CFG_OAM_STAT_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000040, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_stat_stat_cfg_oam_stat_cfg_reg, + NULL, + NULL, + }, + { + "ftm_port_sel_cfg", + STAT_STAT_CFG_FTM_PORT_SEL_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000048, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_stat_stat_cfg_ftm_port_sel_cfg_reg, + NULL, + NULL, + }, + { + "oam_eram_base_addr", + STAT_STAT_CFG_OAM_ERAM_BASE_ADDRr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000058, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_oam_eram_base_addr_reg, + NULL, + NULL, + }, + { + "oam_lm_eram_base_addr", + STAT_STAT_CFG_OAM_LM_ERAM_BASE_ADDRr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x0000005c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_oam_lm_eram_base_addr_reg, + NULL, + NULL, + }, + { + "oam_ddr_base_addr", + STAT_STAT_CFG_OAM_DDR_BASE_ADDRr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000064, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_oam_ddr_base_addr_reg, + NULL, + NULL, + }, + { + "plcr0_schd_pful_cfg", + STAT_STAT_CFG_PLCR0_SCHD_PFUL_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000078, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_plcr0_schd_pful_cfg_reg, + NULL, + NULL, + }, + { + "oam_lm_ord_pful_cfg", + STAT_STAT_CFG_OAM_LM_ORD_PFUL_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000080, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_oam_lm_ord_pful_cfg_reg, + NULL, + NULL, + }, + { + "ddr_schd_pful_cfg", + STAT_STAT_CFG_DDR_SCHD_PFUL_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000084, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_ddr_schd_pful_cfg_reg, + NULL, + NULL, + }, + { + "eram_schd_pful_cfg", + STAT_STAT_CFG_ERAM_SCHD_PFUL_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000088, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_eram_schd_pful_cfg_reg, + NULL, + NULL, + }, + { + "eram_schd_pept_cfg", + STAT_STAT_CFG_ERAM_SCHD_PEPT_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x0000008c, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_eram_schd_pept_cfg_reg, + NULL, + NULL, + }, + { + "eram_schd_oam_pful_cfg", + STAT_STAT_CFG_ERAM_SCHD_OAM_PFUL_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000090, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_eram_schd_oam_pful_cfg_reg, + NULL, + NULL, + }, + { + "eram_schd_oam_pept_cfg", + STAT_STAT_CFG_ERAM_SCHD_OAM_PEPT_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000094, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_eram_schd_oam_pept_cfg_reg, + NULL, + NULL, + }, + { + "eram_schd_oam_lm_pful_cfg", + STAT_STAT_CFG_ERAM_SCHD_OAM_LM_PFUL_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000098, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_eram_schd_oam_lm_pful_cfg_reg, + NULL, + NULL, + }, + { + "eram_schd_oam_lm_pept_cfg", + STAT_STAT_CFG_ERAM_SCHD_OAM_LM_PEPT_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x0000009c, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_eram_schd_oam_lm_pept_cfg_reg, + NULL, + NULL, + }, + { + "rschd_pful_cfg", + STAT_STAT_CFG_RSCHD_PFUL_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000000a0, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_rschd_pful_cfg_reg, + NULL, + NULL, + }, + { + "rschd_pept_cfg", + STAT_STAT_CFG_RSCHD_PEPT_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000000a4, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_rschd_pept_cfg_reg, + NULL, + NULL, + }, + { + "rschd_plcr_pful_cfg", + STAT_STAT_CFG_RSCHD_PLCR_PFUL_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000000a8, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_rschd_plcr_pful_cfg_reg, + NULL, + NULL, + }, + { + "rschd_plcr_pept_cfg", + STAT_STAT_CFG_RSCHD_PLCR_PEPT_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000000ac, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_rschd_plcr_pept_cfg_reg, + NULL, + NULL, + }, + { + "rschd_plcr_info_pful_cfg", + STAT_STAT_CFG_RSCHD_PLCR_INFO_PFUL_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000000b0, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_rschd_plcr_info_pful_cfg_reg, + NULL, + NULL, + }, + { + "alu_arb_cpu_pful_cfg", + STAT_STAT_CFG_ALU_ARB_CPU_PFUL_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000000b4, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_alu_arb_cpu_pful_cfg_reg, + NULL, + NULL, + }, + { + "alu_arb_user_pful_cfg", + STAT_STAT_CFG_ALU_ARB_USER_PFUL_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000000b8, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_alu_arb_user_pful_cfg_reg, + NULL, + NULL, + }, + { + "alu_arb_stat_pful_cfg", + STAT_STAT_CFG_ALU_ARB_STAT_PFUL_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000000bc, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_alu_arb_stat_pful_cfg_reg, + NULL, + NULL, + }, + { + "cycmov_dat_pful_cfg", + STAT_STAT_CFG_CYCMOV_DAT_PFUL_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000000c0, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_cycmov_dat_pful_cfg_reg, + NULL, + NULL, + }, + { + "ddr_opr_pful_cfg", + STAT_STAT_CFG_DDR_OPR_PFUL_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000000c4, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_ddr_opr_pful_cfg_reg, + NULL, + NULL, + }, + { + "cycle_mov_pful_cfg", + STAT_STAT_CFG_CYCLE_MOV_PFUL_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000000c8, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_cycle_mov_pful_cfg_reg, + NULL, + NULL, + }, + { + "cntovf_pful_cfg", + STAT_STAT_CFG_CNTOVF_PFUL_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000000cc, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_cntovf_pful_cfg_reg, + NULL, + NULL, + }, + { + "eram_schd_plcr_pful_cfg", + STAT_STAT_CFG_ERAM_SCHD_PLCR_PFUL_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000000d0, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_eram_schd_plcr_pful_cfg_reg, + NULL, + NULL, + }, + { + "eram_schd_plcr_pept_cfg", + STAT_STAT_CFG_ERAM_SCHD_PLCR_PEPT_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000000d4, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_eram_schd_plcr_pept_cfg_reg, + NULL, + NULL, + }, + { + "debug_cnt_mode", + STAT_STAT_CFG_DEBUG_CNT_MODEr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000000d8, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_debug_cnt_mode_reg, + NULL, + NULL, + }, + { + "tm_mov_period_cfg", + STAT_STAT_CFG_TM_MOV_PERIOD_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000000dc, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_tm_mov_period_cfg_reg, + NULL, + NULL, + }, + { + "alu_ddr_cpu_req_pful_cfg", + STAT_STAT_CFG_ALU_DDR_CPU_REQ_PFUL_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000000ec, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_alu_ddr_cpu_req_pful_cfg_reg, + NULL, + NULL, + }, + { + "cycmov_addr_pful_cfg", + STAT_STAT_CFG_CYCMOV_ADDR_PFUL_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000000f0, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_cycmov_addr_pful_cfg_reg, + NULL, + NULL, + }, + { + "ord_ddr_plcr_fifo_empty", + STAT_STAT_CFG_ORD_DDR_PLCR_FIFO_EMPTYr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000100, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_stat_stat_cfg_ord_ddr_plcr_fifo_empty_reg, + NULL, + NULL, + }, + { + "tm_stat_fifo_empty", + STAT_STAT_CFG_TM_STAT_FIFO_EMPTYr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000104, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_tm_stat_fifo_empty_reg, + NULL, + NULL, + }, + { + "eram_schd_fifo_empty_0_1", + STAT_STAT_CFG_ERAM_SCHD_FIFO_EMPTY_0_1r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000108, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_eram_schd_fifo_empty_0_1_reg, + NULL, + NULL, + }, + { + "eram_schd_fifo_empty_2_3", + STAT_STAT_CFG_ERAM_SCHD_FIFO_EMPTY_2_3r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x0000010c, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_eram_schd_fifo_empty_2_3_reg, + NULL, + NULL, + }, + { + "eram_schd_fifo_empty_4_5", + STAT_STAT_CFG_ERAM_SCHD_FIFO_EMPTY_4_5r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000110, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_eram_schd_fifo_empty_4_5_reg, + NULL, + NULL, + }, + { + "eram_schd_fifo_empty_6_7", + STAT_STAT_CFG_ERAM_SCHD_FIFO_EMPTY_6_7r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000114, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_stat_stat_cfg_eram_schd_fifo_empty_6_7_reg, + NULL, + NULL, + }, + { + "eram_schd_fifo_empty_free_8", + STAT_STAT_CFG_ERAM_SCHD_FIFO_EMPTY_FREE_8r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000118, + (32 / 8), + 0, + 0, + 0, + 0, + 10, + g_stat_stat_cfg_eram_schd_fifo_empty_free_8_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty_0_3", + STAT_STAT_CFG_RSCHD_FIFO_EMPTY_0_3r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x0000011c, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_stat_stat_cfg_rschd_fifo_empty_0_3_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty_4_7", + STAT_STAT_CFG_RSCHD_FIFO_EMPTY_4_7r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000120, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_stat_stat_cfg_rschd_fifo_empty_4_7_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty_8_11", + STAT_STAT_CFG_RSCHD_FIFO_EMPTY_8_11r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000124, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_stat_stat_cfg_rschd_fifo_empty_8_11_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty_12_15", + STAT_STAT_CFG_RSCHD_FIFO_EMPTY_12_15r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000128, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_stat_stat_cfg_rschd_fifo_empty_12_15_reg, + NULL, + NULL, + }, + { + "rschd_fifo_empty_plcr_16_17", + STAT_STAT_CFG_RSCHD_FIFO_EMPTY_PLCR_16_17r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x0000012c, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_stat_stat_cfg_rschd_fifo_empty_plcr_16_17_reg, + NULL, + NULL, + }, + { + "stat_int_unmask_flag", + STAT_STAT_CFG_STAT_INT_UNMASK_FLAGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000140, + (32 / 8), + 0, + 0, + 0, + 0, + 6, + g_stat_stat_cfg_stat_int_unmask_flag_reg, + NULL, + NULL, + }, + { + "stat_int0_en", + STAT_STAT_CFG_STAT_INT0_ENr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000144, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_stat_stat_cfg_stat_int0_en_reg, + NULL, + NULL, + }, + { + "stat_int0_mask", + STAT_STAT_CFG_STAT_INT0_MASKr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000148, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_stat_stat_cfg_stat_int0_mask_reg, + NULL, + NULL, + }, + { + "stat_int0_status", + STAT_STAT_CFG_STAT_INT0_STATUSr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x0000014c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_stat_stat_cfg_stat_int0_status_reg, + NULL, + NULL, + }, + { + "stat_int1_en", + STAT_STAT_CFG_STAT_INT1_ENr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000150, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_stat_stat_cfg_stat_int1_en_reg, + NULL, + NULL, + }, + { + "stat_int1_mask", + STAT_STAT_CFG_STAT_INT1_MASKr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000154, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_stat_stat_cfg_stat_int1_mask_reg, + NULL, + NULL, + }, + { + "stat_int1_status", + STAT_STAT_CFG_STAT_INT1_STATUSr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000158, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_stat_stat_cfg_stat_int1_status_reg, + NULL, + NULL, + }, + { + "stat_int2_en", + STAT_STAT_CFG_STAT_INT2_ENr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x0000015c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_stat_stat_cfg_stat_int2_en_reg, + NULL, + NULL, + }, + { + "stat_int2_mask", + STAT_STAT_CFG_STAT_INT2_MASKr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000160, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_stat_stat_cfg_stat_int2_mask_reg, + NULL, + NULL, + }, + { + "stat_int2_status", + STAT_STAT_CFG_STAT_INT2_STATUSr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000164, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_stat_stat_cfg_stat_int2_status_reg, + NULL, + NULL, + }, + { + "stat_int3_en", + STAT_STAT_CFG_STAT_INT3_ENr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000168, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_stat_stat_cfg_stat_int3_en_reg, + NULL, + NULL, + }, + { + "stat_int3_mask", + STAT_STAT_CFG_STAT_INT3_MASKr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x0000016c, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_stat_stat_cfg_stat_int3_mask_reg, + NULL, + NULL, + }, + { + "stat_int3_status", + STAT_STAT_CFG_STAT_INT3_STATUSr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000170, + (32 / 8), + 0, + 0, + 0, + 0, + 32, + g_stat_stat_cfg_stat_int3_status_reg, + NULL, + NULL, + }, + { + "stat_int4_en", + STAT_STAT_CFG_STAT_INT4_ENr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000174, + (32 / 8), + 0, + 0, + 0, + 0, + 19, + g_stat_stat_cfg_stat_int4_en_reg, + NULL, + NULL, + }, + { + "stat_int4_mask", + STAT_STAT_CFG_STAT_INT4_MASKr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000178, + (32 / 8), + 0, + 0, + 0, + 0, + 19, + g_stat_stat_cfg_stat_int4_mask_reg, + NULL, + NULL, + }, + { + "stat_int4_status", + STAT_STAT_CFG_STAT_INT4_STATUSr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x0000017c, + (32 / 8), + 0, + 0, + 0, + 0, + 19, + g_stat_stat_cfg_stat_int4_status_reg, + NULL, + NULL, + }, + { + "stat_int5_en", + STAT_STAT_CFG_STAT_INT5_ENr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000180, + (32 / 8), + 0, + 0, + 0, + 0, + 19, + g_stat_stat_cfg_stat_int5_en_reg, + NULL, + NULL, + }, + { + "stat_int5_mask", + STAT_STAT_CFG_STAT_INT5_MASKr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000184, + (32 / 8), + 0, + 0, + 0, + 0, + 19, + g_stat_stat_cfg_stat_int5_mask_reg, + NULL, + NULL, + }, + { + "stat_int5_status", + STAT_STAT_CFG_STAT_INT5_STATUSr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000188, + (32 / 8), + 0, + 0, + 0, + 0, + 19, + g_stat_stat_cfg_stat_int5_status_reg, + NULL, + NULL, + }, + { + "rschd_ecc_bypass", + STAT_STAT_CFG_RSCHD_ECC_BYPASSr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000198, + (32 / 8), + 0, + 0, + 0, + 0, + 19, + g_stat_stat_cfg_rschd_ecc_bypass_reg, + NULL, + NULL, + }, + { + "rschd_ecc_single_err", + STAT_STAT_CFG_RSCHD_ECC_SINGLE_ERRr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x0000019c, + (32 / 8), + 0, + 0, + 0, + 0, + 19, + g_stat_stat_cfg_rschd_ecc_single_err_reg, + NULL, + NULL, + }, + { + "rschd_ecc_double_err", + STAT_STAT_CFG_RSCHD_ECC_DOUBLE_ERRr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000001a0, + (32 / 8), + 0, + 0, + 0, + 0, + 19, + g_stat_stat_cfg_rschd_ecc_double_err_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_wdat0", + STAT_STAT_CFG_CPU_IND_DDR_WDAT0r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000200, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_wdat0_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_wdat1", + STAT_STAT_CFG_CPU_IND_DDR_WDAT1r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000204, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_wdat1_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_wdat2", + STAT_STAT_CFG_CPU_IND_DDR_WDAT2r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000208, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_wdat2_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_wdat3", + STAT_STAT_CFG_CPU_IND_DDR_WDAT3r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x0000020c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_wdat3_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_wdat4", + STAT_STAT_CFG_CPU_IND_DDR_WDAT4r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000210, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_wdat4_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_wdat5", + STAT_STAT_CFG_CPU_IND_DDR_WDAT5r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000214, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_wdat5_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_wdat6", + STAT_STAT_CFG_CPU_IND_DDR_WDAT6r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000218, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_wdat6_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_wdat7", + STAT_STAT_CFG_CPU_IND_DDR_WDAT7r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x0000021c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_wdat7_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_wdat8", + STAT_STAT_CFG_CPU_IND_DDR_WDAT8r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000220, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_wdat8_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_wdat9", + STAT_STAT_CFG_CPU_IND_DDR_WDAT9r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000224, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_wdat9_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_wdat10", + STAT_STAT_CFG_CPU_IND_DDR_WDAT10r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000228, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_wdat10_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_wdat11", + STAT_STAT_CFG_CPU_IND_DDR_WDAT11r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x0000022c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_wdat11_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_wdat12", + STAT_STAT_CFG_CPU_IND_DDR_WDAT12r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000230, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_wdat12_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_wdat13", + STAT_STAT_CFG_CPU_IND_DDR_WDAT13r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000234, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_wdat13_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_wdat14", + STAT_STAT_CFG_CPU_IND_DDR_WDAT14r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000238, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_wdat14_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_wdat15", + STAT_STAT_CFG_CPU_IND_DDR_WDAT15r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x0000023c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_wdat15_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_req_info", + STAT_STAT_CFG_CPU_IND_DDR_REQ_INFOr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000240, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_stat_stat_cfg_cpu_ind_ddr_req_info_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_rd_done", + STAT_STAT_CFG_CPU_IND_DDR_RD_DONEr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000244, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_rd_done_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_rdat0", + STAT_STAT_CFG_CPU_IND_DDR_RDAT0r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000248, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_rdat0_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_rdat1", + STAT_STAT_CFG_CPU_IND_DDR_RDAT1r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x0000024c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_rdat1_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_rdat2", + STAT_STAT_CFG_CPU_IND_DDR_RDAT2r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000250, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_rdat2_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_rdat3", + STAT_STAT_CFG_CPU_IND_DDR_RDAT3r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000254, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_rdat3_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_rdat4", + STAT_STAT_CFG_CPU_IND_DDR_RDAT4r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000258, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_rdat4_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_rdat5", + STAT_STAT_CFG_CPU_IND_DDR_RDAT5r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x0000025c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_rdat5_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_rdat6", + STAT_STAT_CFG_CPU_IND_DDR_RDAT6r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000260, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_rdat6_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_rdat7", + STAT_STAT_CFG_CPU_IND_DDR_RDAT7r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000264, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_rdat7_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_rdat8", + STAT_STAT_CFG_CPU_IND_DDR_RDAT8r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000268, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_rdat8_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_rdat9", + STAT_STAT_CFG_CPU_IND_DDR_RDAT9r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x0000026c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_rdat9_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_rdat10", + STAT_STAT_CFG_CPU_IND_DDR_RDAT10r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000270, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_rdat10_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_rdat11", + STAT_STAT_CFG_CPU_IND_DDR_RDAT11r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000274, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_rdat11_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_rdat12", + STAT_STAT_CFG_CPU_IND_DDR_RDAT12r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000278, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_rdat12_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_rdat13", + STAT_STAT_CFG_CPU_IND_DDR_RDAT13r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x0000027c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_rdat13_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_rdat14", + STAT_STAT_CFG_CPU_IND_DDR_RDAT14r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000280, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_rdat14_reg, + NULL, + NULL, + }, + { + "cpu_ind_ddr_rdat15", + STAT_STAT_CFG_CPU_IND_DDR_RDAT15r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000284, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_ind_ddr_rdat15_reg, + NULL, + NULL, + }, + { + "tm_alu_ddr_cpu_rdy", + STAT_STAT_CFG_TM_ALU_DDR_CPU_RDYr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000288, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_tm_alu_ddr_cpu_rdy_reg, + NULL, + NULL, + }, + { + "ept_flag", + STAT_STAT_CFG_EPT_FLAGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000003f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_ept_flag_reg, + NULL, + NULL, + }, + { + "ppu_soft_rst", + STAT_STAT_CFG_PPU_SOFT_RSTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000003fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_ppu_soft_rst_reg, + NULL, + NULL, + }, + { + "stat_smmu0_fc15_0_cnt", + STAT_STAT_CFG_STAT_SMMU0_FC15_0_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000400, + (32 / 8), + 0, + 15 + 1, + 0, + 4, + 1, + g_stat_stat_cfg_stat_smmu0_fc15_0_cnt_reg, + NULL, + NULL, + }, + { + "smmu0_stat_fc15_0_cnt", + STAT_STAT_CFG_SMMU0_STAT_FC15_0_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000440, + (32 / 8), + 0, + 15 + 1, + 0, + 4, + 1, + g_stat_stat_cfg_smmu0_stat_fc15_0_cnt_reg, + NULL, + NULL, + }, + { + "smmu0_stat_rsp15_0_cnt", + STAT_STAT_CFG_SMMU0_STAT_RSP15_0_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000480, + (32 / 8), + 0, + 15 + 1, + 0, + 4, + 1, + g_stat_stat_cfg_smmu0_stat_rsp15_0_cnt_reg, + NULL, + NULL, + }, + { + "stat_smmu0_req15_0_cnt", + STAT_STAT_CFG_STAT_SMMU0_REQ15_0_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000004c0, + (32 / 8), + 0, + 15 + 1, + 0, + 4, + 1, + g_stat_stat_cfg_stat_smmu0_req15_0_cnt_reg, + NULL, + NULL, + }, + { + "ppu_stat_mec5_0_rsp_fc_cnt", + STAT_STAT_CFG_PPU_STAT_MEC5_0_RSP_FC_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000500, + (32 / 8), + 0, + 5 + 1, + 0, + 4, + 1, + g_stat_stat_cfg_ppu_stat_mec5_0_rsp_fc_cnt_reg, + NULL, + NULL, + }, + { + "stat_ppu_mec5_0_key_fc_cnt", + STAT_STAT_CFG_STAT_PPU_MEC5_0_KEY_FC_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000518, + (32 / 8), + 0, + 5 + 1, + 0, + 4, + 1, + g_stat_stat_cfg_stat_ppu_mec5_0_key_fc_cnt_reg, + NULL, + NULL, + }, + { + "stat_ppu_mec5_0_rsp_cnt", + STAT_STAT_CFG_STAT_PPU_MEC5_0_RSP_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000530, + (32 / 8), + 0, + 5 + 1, + 0, + 4, + 1, + g_stat_stat_cfg_stat_ppu_mec5_0_rsp_cnt_reg, + NULL, + NULL, + }, + { + "ppu_stat_mec5_0_key_cnt", + STAT_STAT_CFG_PPU_STAT_MEC5_0_KEY_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000548, + (32 / 8), + 0, + 5 + 1, + 0, + 4, + 1, + g_stat_stat_cfg_ppu_stat_mec5_0_key_cnt_reg, + NULL, + NULL, + }, + { + "ppu5_0_no_exist_opcd_ex_cnt", + STAT_STAT_CFG_PPU5_0_NO_EXIST_OPCD_EX_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000560, + (32 / 8), + 0, + 5 + 1, + 0, + 4, + 1, + g_stat_stat_cfg_ppu5_0_no_exist_opcd_ex_cnt_reg, + NULL, + NULL, + }, + { + "se_etm_stat_wr_fc_cnt", + STAT_STAT_CFG_SE_ETM_STAT_WR_FC_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000578, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_se_etm_stat_wr_fc_cnt_reg, + NULL, + NULL, + }, + { + "se_etm_stat_rd_fc_cnt", + STAT_STAT_CFG_SE_ETM_STAT_RD_FC_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x0000057c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_se_etm_stat_rd_fc_cnt_reg, + NULL, + NULL, + }, + { + "stat_etm_deq_fc_cnt", + STAT_STAT_CFG_STAT_ETM_DEQ_FC_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000580, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_stat_etm_deq_fc_cnt_reg, + NULL, + NULL, + }, + { + "stat_etm_enq_fc_cnt", + STAT_STAT_CFG_STAT_ETM_ENQ_FC_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000584, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_stat_etm_enq_fc_cnt_reg, + NULL, + NULL, + }, + { + "stat_oam_lm_fc_cnt", + STAT_STAT_CFG_STAT_OAM_LM_FC_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000588, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_stat_oam_lm_fc_cnt_reg, + NULL, + NULL, + }, + { + "oam_stat_lm_fc_cnt", + STAT_STAT_CFG_OAM_STAT_LM_FC_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x0000058c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_oam_stat_lm_fc_cnt_reg, + NULL, + NULL, + }, + { + "stat_oam_fc_cnt", + STAT_STAT_CFG_STAT_OAM_FC_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000590, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_stat_oam_fc_cnt_reg, + NULL, + NULL, + }, + { + "cmmu_stat_fc_cnt", + STAT_STAT_CFG_CMMU_STAT_FC_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000594, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cmmu_stat_fc_cnt_reg, + NULL, + NULL, + }, + { + "stat_cmmu_req_cnt", + STAT_STAT_CFG_STAT_CMMU_REQ_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x00000598, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_stat_cmmu_req_cnt_reg, + NULL, + NULL, + }, + { + "smmu0_plcr_rsp0_cnt", + STAT_STAT_CFG_SMMU0_PLCR_RSP0_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x0000059c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_smmu0_plcr_rsp0_cnt_reg, + NULL, + NULL, + }, + { + "plcr_smmu0_req0_cnt", + STAT_STAT_CFG_PLCR_SMMU0_REQ0_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000005a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_plcr_smmu0_req0_cnt_reg, + NULL, + NULL, + }, + { + "stat_oam_lm_rsp_cnt", + STAT_STAT_CFG_STAT_OAM_LM_RSP_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000005a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_stat_oam_lm_rsp_cnt_reg, + NULL, + NULL, + }, + { + "oam_stat_lm_req_cnt", + STAT_STAT_CFG_OAM_STAT_LM_REQ_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000005a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_oam_stat_lm_req_cnt_reg, + NULL, + NULL, + }, + { + "oam_stat_req_cnt", + STAT_STAT_CFG_OAM_STAT_REQ_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000005ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_oam_stat_req_cnt_reg, + NULL, + NULL, + }, + { + "se_etm_stat_rsp_cnt", + STAT_STAT_CFG_SE_ETM_STAT_RSP_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000005b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_se_etm_stat_rsp_cnt_reg, + NULL, + NULL, + }, + { + "etm_stat_se_wr_req_cnt", + STAT_STAT_CFG_ETM_STAT_SE_WR_REQ_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000005b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_etm_stat_se_wr_req_cnt_reg, + NULL, + NULL, + }, + { + "etm_stat_se_rd_req_cnt", + STAT_STAT_CFG_ETM_STAT_SE_RD_REQ_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000005b8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_etm_stat_se_rd_req_cnt_reg, + NULL, + NULL, + }, + { + "etm_stat_smmu0_req_cnt0", + STAT_STAT_CFG_ETM_STAT_SMMU0_REQ_CNT0r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000005bc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_etm_stat_smmu0_req_cnt0_reg, + NULL, + NULL, + }, + { + "etm_stat_smmu0_req_cnt1", + STAT_STAT_CFG_ETM_STAT_SMMU0_REQ_CNT1r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000005c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_etm_stat_smmu0_req_cnt1_reg, + NULL, + NULL, + }, + { + "tm_stat_eram_cpu_rsp_cnt", + STAT_STAT_CFG_TM_STAT_ERAM_CPU_RSP_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000005c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_tm_stat_eram_cpu_rsp_cnt_reg, + NULL, + NULL, + }, + { + "cpu_rd_eram_req_cnt", + STAT_STAT_CFG_CPU_RD_ERAM_REQ_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000005c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_rd_eram_req_cnt_reg, + NULL, + NULL, + }, + { + "cpu_wr_eram_req_cnt", + STAT_STAT_CFG_CPU_WR_ERAM_REQ_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000005cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_wr_eram_req_cnt_reg, + NULL, + NULL, + }, + { + "tm_stat_ddr_cpu_rsp_cnt", + STAT_STAT_CFG_TM_STAT_DDR_CPU_RSP_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000005d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_tm_stat_ddr_cpu_rsp_cnt_reg, + NULL, + NULL, + }, + { + "cpu_rd_ddr_req_cnt", + STAT_STAT_CFG_CPU_RD_DDR_REQ_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000005d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_rd_ddr_req_cnt_reg, + NULL, + NULL, + }, + { + "cpu_wr_ddr_req_cnt", + STAT_STAT_CFG_CPU_WR_DDR_REQ_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_GLBL_BASE_ADDR + 0x000005d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_stat_cfg_cpu_wr_ddr_req_cnt_reg, + NULL, + NULL, + }, + { + "cpu_ind_wdat1", + STAT_ETCAM_CPU_IND_WDAT1r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_ind_wdat1_reg, + NULL, + NULL, + }, + { + "cpu_ind_wdat2", + STAT_ETCAM_CPU_IND_WDAT2r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x0000000c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_ind_wdat2_reg, + NULL, + NULL, + }, + { + "cpu_ind_wdat3", + STAT_ETCAM_CPU_IND_WDAT3r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000010, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_ind_wdat3_reg, + NULL, + NULL, + }, + { + "cpu_ind_wdat4", + STAT_ETCAM_CPU_IND_WDAT4r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000014, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_ind_wdat4_reg, + NULL, + NULL, + }, + { + "cpu_ind_wdat5", + STAT_ETCAM_CPU_IND_WDAT5r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000018, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_ind_wdat5_reg, + NULL, + NULL, + }, + { + "cpu_ind_wdat6", + STAT_ETCAM_CPU_IND_WDAT6r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x0000001c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_ind_wdat6_reg, + NULL, + NULL, + }, + { + "cpu_ind_wdat7", + STAT_ETCAM_CPU_IND_WDAT7r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000020, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_ind_wdat7_reg, + NULL, + NULL, + }, + { + "cpu_ind_wdat8", + STAT_ETCAM_CPU_IND_WDAT8r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000024, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_ind_wdat8_reg, + NULL, + NULL, + }, + { + "cpu_ind_wdat9", + STAT_ETCAM_CPU_IND_WDAT9r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000028, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_ind_wdat9_reg, + NULL, + NULL, + }, + { + "cpu_ind_wdat10", + STAT_ETCAM_CPU_IND_WDAT10r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x0000002c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_ind_wdat10_reg, + NULL, + NULL, + }, + { + "cpu_ind_wdat11", + STAT_ETCAM_CPU_IND_WDAT11r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000030, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_ind_wdat11_reg, + NULL, + NULL, + }, + { + "cpu_ind_wdat12", + STAT_ETCAM_CPU_IND_WDAT12r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000034, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_ind_wdat12_reg, + NULL, + NULL, + }, + { + "cpu_ind_wdat13", + STAT_ETCAM_CPU_IND_WDAT13r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000038, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_ind_wdat13_reg, + NULL, + NULL, + }, + { + "cpu_ind_wdat14", + STAT_ETCAM_CPU_IND_WDAT14r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x0000003c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_ind_wdat14_reg, + NULL, + NULL, + }, + { + "cpu_ind_wdat15", + STAT_ETCAM_CPU_IND_WDAT15r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000040, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_ind_wdat15_reg, + NULL, + NULL, + }, + { + "cpu_ind_wdat16", + STAT_ETCAM_CPU_IND_WDAT16r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000044, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_ind_wdat16_reg, + NULL, + NULL, + }, + { + "cpu_ind_wdat17", + STAT_ETCAM_CPU_IND_WDAT17r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000048, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_ind_wdat17_reg, + NULL, + NULL, + }, + { + "cpu_ind_wdat18", + STAT_ETCAM_CPU_IND_WDAT18r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x0000004c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_ind_wdat18_reg, + NULL, + NULL, + }, + { + "cpu_ind_wdat19", + STAT_ETCAM_CPU_IND_WDAT19r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000050, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_cpu_ind_wdat19_reg, + NULL, + NULL, + }, + { + "t_strwc_cfg", + STAT_ETCAM_T_STRWC_CFGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000074, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_t_strwc_cfg_reg, + NULL, + NULL, + }, + { + "etcam_int_unmask_flag", + STAT_ETCAM_ETCAM_INT_UNMASK_FLAGr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000080, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_etcam_int_unmask_flag_reg, + NULL, + NULL, + }, + { + "etcam_int_en0", + STAT_ETCAM_ETCAM_INT_EN0r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000084, + (32 / 8), + 0, + 0, + 0, + 0, + 18, + g_stat_etcam_etcam_int_en0_reg, + NULL, + NULL, + }, + { + "etcam_int_mask0", + STAT_ETCAM_ETCAM_INT_MASK0r, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x0000008c, + (32 / 8), + 0, + 0, + 0, + 0, + 18, + g_stat_etcam_etcam_int_mask0_reg, + NULL, + NULL, + }, + { + "etcam_int_status", + STAT_ETCAM_ETCAM_INT_STATUSr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000094, + (32 / 8), + 0, + 0, + 0, + 0, + 18, + g_stat_etcam_etcam_int_status_reg, + NULL, + NULL, + }, + { + "int_tb_ini_ok", + STAT_ETCAM_INT_TB_INI_OKr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x000003f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_int_tb_ini_ok_reg, + NULL, + NULL, + }, + { + "etcam_clk_en", + STAT_ETCAM_ETCAM_CLK_ENr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x000003fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_etcam_clk_en_reg, + NULL, + NULL, + }, + { + "as_etcam_req0_cnt", + STAT_ETCAM_AS_ETCAM_REQ0_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000400, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_as_etcam_req0_cnt_reg, + NULL, + NULL, + }, + { + "as_etcam_req1_cnt", + STAT_ETCAM_AS_ETCAM_REQ1_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000404, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_as_etcam_req1_cnt_reg, + NULL, + NULL, + }, + { + "etcam_as_index0_cnt", + STAT_ETCAM_ETCAM_AS_INDEX0_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000408, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_etcam_as_index0_cnt_reg, + NULL, + NULL, + }, + { + "etcam_as_index1_cnt", + STAT_ETCAM_ETCAM_AS_INDEX1_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x0000040c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_etcam_as_index1_cnt_reg, + NULL, + NULL, + }, + { + "etcam_not_hit0_cnt", + STAT_ETCAM_ETCAM_NOT_HIT0_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000410, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_etcam_not_hit0_cnt_reg, + NULL, + NULL, + }, + { + "etcam_not_hit1_cnt", + STAT_ETCAM_ETCAM_NOT_HIT1_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000414, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_etcam_not_hit1_cnt_reg, + NULL, + NULL, + }, + { + "table_id_not_match_cnt", + STAT_ETCAM_TABLE_ID_NOT_MATCH_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000418, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_table_id_not_match_cnt_reg, + NULL, + NULL, + }, + { + "table_id_clash01_cnt", + STAT_ETCAM_TABLE_ID_CLASH01_CNTr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x0000041c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_table_id_clash01_cnt_reg, + NULL, + NULL, + }, + { + "etcam_cpu_fl", + STAT_ETCAM_ETCAM_CPU_FLr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x0000044c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_etcam_cpu_fl_reg, + NULL, + NULL, + }, + { + "etcam_arb_empty", + STAT_ETCAM_ETCAM_ARB_EMPTYr, + STAT, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_STAT_BASE_ADDR + MODULE_STAT_ETCAM_BASE_ADDR + 0x00000450, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_stat_etcam_etcam_arb_empty_reg, + NULL, + NULL, + }, + { + "cfg_finish_int_event0", + DTB_DTB_CFG_CFG_FINISH_INT_EVENT0r, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cfg_finish_int_event0_reg, + NULL, + NULL, + }, + { + "cfg_finish_int_event1", + DTB_DTB_CFG_CFG_FINISH_INT_EVENT1r, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0004, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cfg_finish_int_event1_reg, + NULL, + NULL, + }, + { + "cfg_finish_int_event2", + DTB_DTB_CFG_CFG_FINISH_INT_EVENT2r, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cfg_finish_int_event2_reg, + NULL, + NULL, + }, + { + "cfg_finish_int_event3", + DTB_DTB_CFG_CFG_FINISH_INT_EVENT3r, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x000c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cfg_finish_int_event3_reg, + NULL, + NULL, + }, + { + "cfg_finish_int_maks0", + DTB_DTB_CFG_CFG_FINISH_INT_MAKS0r, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0010, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cfg_finish_int_maks0_reg, + NULL, + NULL, + }, + { + "cfg_finish_int_maks1", + DTB_DTB_CFG_CFG_FINISH_INT_MAKS1r, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0014, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cfg_finish_int_maks1_reg, + NULL, + NULL, + }, + { + "cfg_finish_int_maks2", + DTB_DTB_CFG_CFG_FINISH_INT_MAKS2r, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0018, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cfg_finish_int_maks2_reg, + NULL, + NULL, + }, + { + "cfg_finish_int_maks3", + DTB_DTB_CFG_CFG_FINISH_INT_MAKS3r, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x001c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cfg_finish_int_maks3_reg, + NULL, + NULL, + }, + { + "cfg_finish_int_test0", + DTB_DTB_CFG_CFG_FINISH_INT_TEST0r, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0020, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cfg_finish_int_test0_reg, + NULL, + NULL, + }, + { + "cfg_finish_int_test1", + DTB_DTB_CFG_CFG_FINISH_INT_TEST1r, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0024, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cfg_finish_int_test1_reg, + NULL, + NULL, + }, + { + "cfg_finish_int_test2", + DTB_DTB_CFG_CFG_FINISH_INT_TEST2r, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0028, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cfg_finish_int_test2_reg, + NULL, + NULL, + }, + { + "cfg_finish_int_test3", + DTB_DTB_CFG_CFG_FINISH_INT_TEST3r, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x002c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cfg_finish_int_test3_reg, + NULL, + NULL, + }, + { + "cfg_dtb_int_to_riscv_sel", + DTB_DTB_CFG_CFG_DTB_INT_TO_RISCV_SELr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0090, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cfg_dtb_int_to_riscv_sel_reg, + NULL, + NULL, + }, + { + "cfg_dtb_ep_int_msix_enable", + DTB_DTB_CFG_CFG_DTB_EP_INT_MSIX_ENABLEr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0094, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cfg_dtb_ep_int_msix_enable_reg, + NULL, + NULL, + }, + { + "cfg_dtb_ep_doorbell_addr_h_0_15", + DTB_DTB_CFG_CFG_DTB_EP_DOORBELL_ADDR_H_0_15r, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0100, + (32 / 8), + 0, + 15 + 1, + 0, + 4, + 1, + g_dtb_dtb_cfg_cfg_dtb_ep_doorbell_addr_h_0_15_reg, + NULL, + NULL, + }, + { + "cfg_dtb_ep_doorbell_addr_l_0_15", + DTB_DTB_CFG_CFG_DTB_EP_DOORBELL_ADDR_L_0_15r, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0140, + (32 / 8), + 0, + 15 + 1, + 0, + 4, + 1, + g_dtb_dtb_cfg_cfg_dtb_ep_doorbell_addr_l_0_15_reg, + NULL, + NULL, + }, + { + "cfg_dtb_debug_mode_en", + DTB_DTB_CFG_CFG_DTB_DEBUG_MODE_ENr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0180, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cfg_dtb_debug_mode_en_reg, + NULL, + NULL, + }, + { + "info_axi_last_rd_table_addr_high", + DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_ADDR_HIGHr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0184, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_info_axi_last_rd_table_addr_high_reg, + NULL, + NULL, + }, + { + "info_axi_last_rd_table_addr_low", + DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_ADDR_LOWr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0188, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_info_axi_last_rd_table_addr_low_reg, + NULL, + NULL, + }, + { + "info_axi_last_rd_table_len", + DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_LENr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x018c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_info_axi_last_rd_table_len_reg, + NULL, + NULL, + }, + { + "info_axi_last_rd_table_user", + DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_USERr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0190, + (32 / 8), + 0, + 0, + 0, + 0, + 5, + g_dtb_dtb_cfg_info_axi_last_rd_table_user_reg, + NULL, + NULL, + }, + { + "info_axi_last_rd_table_onload_cnt", + DTB_DTB_CFG_INFO_AXI_LAST_RD_TABLE_ONLOAD_CNTr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0194, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_info_axi_last_rd_table_onload_cnt_reg, + NULL, + NULL, + }, + { + "cnt_axi_rd_table_resp_err", + DTB_DTB_CFG_CNT_AXI_RD_TABLE_RESP_ERRr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0198, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cnt_axi_rd_table_resp_err_reg, + NULL, + NULL, + }, + { + "info_axi_last_rd_pd_addr_high", + DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_ADDR_HIGHr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x01a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_info_axi_last_rd_pd_addr_high_reg, + NULL, + NULL, + }, + { + "info_axi_last_rd_pd_addr_low", + DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_ADDR_LOWr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x01a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_info_axi_last_rd_pd_addr_low_reg, + NULL, + NULL, + }, + { + "info_axi_last_rd_pd_len", + DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_LENr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x01a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_info_axi_last_rd_pd_len_reg, + NULL, + NULL, + }, + { + "info_axi_last_rd_pd_user", + DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_USERr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x01ac, + (32 / 8), + 0, + 0, + 0, + 0, + 5, + g_dtb_dtb_cfg_info_axi_last_rd_pd_user_reg, + NULL, + NULL, + }, + { + "info_axi_last_rd_pd_onload_cnt", + DTB_DTB_CFG_INFO_AXI_LAST_RD_PD_ONLOAD_CNTr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x01b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_info_axi_last_rd_pd_onload_cnt_reg, + NULL, + NULL, + }, + { + "cnt_axi_rd_pd_resp_err", + DTB_DTB_CFG_CNT_AXI_RD_PD_RESP_ERRr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x01b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cnt_axi_rd_pd_resp_err_reg, + NULL, + NULL, + }, + { + "info_axi_last_wr_ctrl_addr_high", + DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_ADDR_HIGHr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x01b8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_info_axi_last_wr_ctrl_addr_high_reg, + NULL, + NULL, + }, + { + "info_axi_last_wr_ctrl_addr_low", + DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_ADDR_LOWr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x01bc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_info_axi_last_wr_ctrl_addr_low_reg, + NULL, + NULL, + }, + { + "info_axi_last_wr_ctrl_len", + DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_LENr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x01c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_info_axi_last_wr_ctrl_len_reg, + NULL, + NULL, + }, + { + "info_axi_last_wr_ctrl_user", + DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_USERr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x01c4, + (32 / 8), + 0, + 0, + 0, + 0, + 5, + g_dtb_dtb_cfg_info_axi_last_wr_ctrl_user_reg, + NULL, + NULL, + }, + { + "info_axi_last_wr_ctrl_onload_cnt", + DTB_DTB_CFG_INFO_AXI_LAST_WR_CTRL_ONLOAD_CNTr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x01c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_info_axi_last_wr_ctrl_onload_cnt_reg, + NULL, + NULL, + }, + { + "cnt_axi_wr_ctrl_resp_err", + DTB_DTB_CFG_CNT_AXI_WR_CTRL_RESP_ERRr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x01cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cnt_axi_wr_ctrl_resp_err_reg, + NULL, + NULL, + }, + { + "info_axi_last_wr_ddr_addr_high", + DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_ADDR_HIGHr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x01d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_info_axi_last_wr_ddr_addr_high_reg, + NULL, + NULL, + }, + { + "info_axi_last_wr_ddr_addr_low", + DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_ADDR_LOWr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x01d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_info_axi_last_wr_ddr_addr_low_reg, + NULL, + NULL, + }, + { + "info_axi_last_wr_ddr_len", + DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_LENr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x01d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_info_axi_last_wr_ddr_len_reg, + NULL, + NULL, + }, + { + "info_axi_last_wr_ddr_user", + DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_USERr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x01dc, + (32 / 8), + 0, + 0, + 0, + 0, + 5, + g_dtb_dtb_cfg_info_axi_last_wr_ddr_user_reg, + NULL, + NULL, + }, + { + "info_axi_last_wr_ddr_onload_cnt", + DTB_DTB_CFG_INFO_AXI_LAST_WR_DDR_ONLOAD_CNTr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x01e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_info_axi_last_wr_ddr_onload_cnt_reg, + NULL, + NULL, + }, + { + "cnt_axi_wr_ddr_resp_err", + DTB_DTB_CFG_CNT_AXI_WR_DDR_RESP_ERRr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x01e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cnt_axi_wr_ddr_resp_err_reg, + NULL, + NULL, + }, + { + "info_axi_last_wr_fin_addr_high", + DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_ADDR_HIGHr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x01e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_info_axi_last_wr_fin_addr_high_reg, + NULL, + NULL, + }, + { + "info_axi_last_wr_fin_addr_low", + DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_ADDR_LOWr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x01ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_info_axi_last_wr_fin_addr_low_reg, + NULL, + NULL, + }, + { + "info_axi_last_wr_fin_len", + DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_LENr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x01f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_info_axi_last_wr_fin_len_reg, + NULL, + NULL, + }, + { + "info_axi_last_wr_fin_user", + DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_USERr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x01f4, + (32 / 8), + 0, + 0, + 0, + 0, + 5, + g_dtb_dtb_cfg_info_axi_last_wr_fin_user_reg, + NULL, + NULL, + }, + { + "info_axi_last_wr_fin_onload_cnt", + DTB_DTB_CFG_INFO_AXI_LAST_WR_FIN_ONLOAD_CNTr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x01f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_info_axi_last_wr_fin_onload_cnt_reg, + NULL, + NULL, + }, + { + "cnt_axi_wr_fin_resp_err", + DTB_DTB_CFG_CNT_AXI_WR_FIN_RESP_ERRr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x01fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cnt_axi_wr_fin_resp_err_reg, + NULL, + NULL, + }, + { + "cnt_dtb_wr_smmu0_table_high", + DTB_DTB_CFG_CNT_DTB_WR_SMMU0_TABLE_HIGHr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0200, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cnt_dtb_wr_smmu0_table_high_reg, + NULL, + NULL, + }, + { + "cnt_dtb_wr_smmu0_table_low", + DTB_DTB_CFG_CNT_DTB_WR_SMMU0_TABLE_LOWr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0204, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cnt_dtb_wr_smmu0_table_low_reg, + NULL, + NULL, + }, + { + "cnt_dtb_wr_smmu1_table_high", + DTB_DTB_CFG_CNT_DTB_WR_SMMU1_TABLE_HIGHr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0208, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cnt_dtb_wr_smmu1_table_high_reg, + NULL, + NULL, + }, + { + "cnt_dtb_wr_smmu1_table_low", + DTB_DTB_CFG_CNT_DTB_WR_SMMU1_TABLE_LOWr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x020c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cnt_dtb_wr_smmu1_table_low_reg, + NULL, + NULL, + }, + { + "cnt_dtb_wr_zcam_table_high", + DTB_DTB_CFG_CNT_DTB_WR_ZCAM_TABLE_HIGHr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0210, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cnt_dtb_wr_zcam_table_high_reg, + NULL, + NULL, + }, + { + "cnt_dtb_wr_zcam_table_low", + DTB_DTB_CFG_CNT_DTB_WR_ZCAM_TABLE_LOWr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0214, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cnt_dtb_wr_zcam_table_low_reg, + NULL, + NULL, + }, + { + "cnt_dtb_wr_etcam_table_high", + DTB_DTB_CFG_CNT_DTB_WR_ETCAM_TABLE_HIGHr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0218, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cnt_dtb_wr_etcam_table_high_reg, + NULL, + NULL, + }, + { + "cnt_dtb_wr_etcam_table_low", + DTB_DTB_CFG_CNT_DTB_WR_ETCAM_TABLE_LOWr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x021c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cnt_dtb_wr_etcam_table_low_reg, + NULL, + NULL, + }, + { + "cnt_dtb_wr_hash_table_high", + DTB_DTB_CFG_CNT_DTB_WR_HASH_TABLE_HIGHr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0220, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cnt_dtb_wr_hash_table_high_reg, + NULL, + NULL, + }, + { + "cnt_dtb_wr_hash_table_low", + DTB_DTB_CFG_CNT_DTB_WR_HASH_TABLE_LOWr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0224, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cnt_dtb_wr_hash_table_low_reg, + NULL, + NULL, + }, + { + "cnt_dtb_rd_smmu0_table_high", + DTB_DTB_CFG_CNT_DTB_RD_SMMU0_TABLE_HIGHr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0228, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cnt_dtb_rd_smmu0_table_high_reg, + NULL, + NULL, + }, + { + "cnt_dtb_rd_smmu0_table_low", + DTB_DTB_CFG_CNT_DTB_RD_SMMU0_TABLE_LOWr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x022c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cnt_dtb_rd_smmu0_table_low_reg, + NULL, + NULL, + }, + { + "cnt_dtb_rd_smmu1_table_high", + DTB_DTB_CFG_CNT_DTB_RD_SMMU1_TABLE_HIGHr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0230, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cnt_dtb_rd_smmu1_table_high_reg, + NULL, + NULL, + }, + { + "cnt_dtb_rd_smmu1_table_low", + DTB_DTB_CFG_CNT_DTB_RD_SMMU1_TABLE_LOWr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0234, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cnt_dtb_rd_smmu1_table_low_reg, + NULL, + NULL, + }, + { + "cnt_dtb_rd_zcam_table_high", + DTB_DTB_CFG_CNT_DTB_RD_ZCAM_TABLE_HIGHr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0238, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cnt_dtb_rd_zcam_table_high_reg, + NULL, + NULL, + }, + { + "cnt_dtb_rd_zcam_table_low", + DTB_DTB_CFG_CNT_DTB_RD_ZCAM_TABLE_LOWr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x023c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cnt_dtb_rd_zcam_table_low_reg, + NULL, + NULL, + }, + { + "cnt_dtb_rd_etcam_table_high", + DTB_DTB_CFG_CNT_DTB_RD_ETCAM_TABLE_HIGHr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0240, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cnt_dtb_rd_etcam_table_high_reg, + NULL, + NULL, + }, + { + "cnt_dtb_rd_etcam_table_low", + DTB_DTB_CFG_CNT_DTB_RD_ETCAM_TABLE_LOWr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0244, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cnt_dtb_rd_etcam_table_low_reg, + NULL, + NULL, + }, + { + "info_wr_ctrl_state", + DTB_DTB_CFG_INFO_WR_CTRL_STATEr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0248, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_info_wr_ctrl_state_reg, + NULL, + NULL, + }, + { + "info_rd_table_state", + DTB_DTB_CFG_INFO_RD_TABLE_STATEr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x024c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_info_rd_table_state_reg, + NULL, + NULL, + }, + { + "info_rd_pd_state", + DTB_DTB_CFG_INFO_RD_PD_STATEr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0250, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_info_rd_pd_state_reg, + NULL, + NULL, + }, + { + "info_dump_cmd_state", + DTB_DTB_CFG_INFO_DUMP_CMD_STATEr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0254, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_info_dump_cmd_state_reg, + NULL, + NULL, + }, + { + "info_wr_ddr_state", + DTB_DTB_CFG_INFO_WR_DDR_STATEr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x0258, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_info_wr_ddr_state_reg, + NULL, + NULL, + }, + { + "cfg_dtb_debug_info_clr", + DTB_DTB_CFG_CFG_DTB_DEBUG_INFO_CLRr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_CFG_BASE_ADDR + 0x025c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_dtb_cfg_cfg_dtb_debug_info_clr_reg, + NULL, + NULL, + }, + { + "cfg_ddos_stat_dump_thrd_0_15", + DTB_DDOS_CFG_DDOS_STAT_DUMP_THRD_0_15r, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_DDOS_BASE_ADDR + 0x0000, + (32 / 8), + 0, + 15 + 1, + 0, + 4, + 1, + g_dtb_ddos_cfg_ddos_stat_dump_thrd_0_15_reg, + NULL, + NULL, + }, + { + "cfg_ddos_stat_dump_thrd_comp_en", + DTB_DDOS_CFG_DDOS_STAT_DUMP_THRD_COMP_ENr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_DDOS_BASE_ADDR + 0x0040, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_ddos_cfg_ddos_stat_dump_thrd_comp_en_reg, + NULL, + NULL, + }, + { + "cfg_ddos_dump_stat_num", + DTB_DDOS_CFG_DDOS_DUMP_STAT_NUMr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_DDOS_BASE_ADDR + 0x0044, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_ddos_cfg_ddos_dump_stat_num_reg, + NULL, + NULL, + }, + { + "cfg_ddos_even_hash_table_baddr", + DTB_DDOS_CFG_DDOS_EVEN_HASH_TABLE_BADDRr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_DDOS_BASE_ADDR + 0x0048, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_ddos_cfg_ddos_even_hash_table_baddr_reg, + NULL, + NULL, + }, + { + "cfg_ddos_odd_hash_table_baddr", + DTB_DDOS_CFG_DDOS_ODD_HASH_TABLE_BADDRr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_DDOS_BASE_ADDR + 0x004c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_ddos_cfg_ddos_odd_hash_table_baddr_reg, + NULL, + NULL, + }, + { + "cfg_ddos_stat_index_offset", + DTB_DDOS_CFG_DDOS_STAT_INDEX_OFFSETr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_DDOS_BASE_ADDR + 0x0050, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_ddos_cfg_ddos_stat_index_offset_reg, + NULL, + NULL, + }, + { + "cfg_ddos_ns_flag_cnt", + DTB_DDOS_CFG_DDOS_NS_FLAG_CNTr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_DDOS_BASE_ADDR + 0x0054, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_ddos_cfg_ddos_ns_flag_cnt_reg, + NULL, + NULL, + }, + { + "cfg_ddos_even_stat_table_baddr", + DTB_DDOS_CFG_DDOS_EVEN_STAT_TABLE_BADDRr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_DDOS_BASE_ADDR + 0x0058, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_ddos_cfg_ddos_even_stat_table_baddr_reg, + NULL, + NULL, + }, + { + "cfg_ddos_odd_stat_table_baddr", + DTB_DDOS_CFG_DDOS_ODD_STAT_TABLE_BADDRr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_DDOS_BASE_ADDR + 0x005c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_ddos_cfg_ddos_odd_stat_table_baddr_reg, + NULL, + NULL, + }, + { + "cfg_ddos_even_stat_dump_daddr_h", + DTB_DDOS_CFG_DDOS_EVEN_STAT_DUMP_DADDR_Hr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_DDOS_BASE_ADDR + 0x0060, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_ddos_cfg_ddos_even_stat_dump_daddr_h_reg, + NULL, + NULL, + }, + { + "cfg_ddos_even_stat_dump_daddr_l", + DTB_DDOS_CFG_DDOS_EVEN_STAT_DUMP_DADDR_Lr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_DDOS_BASE_ADDR + 0x0064, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_ddos_cfg_ddos_even_stat_dump_daddr_l_reg, + NULL, + NULL, + }, + { + "cfg_ddos_odd_stat_dump_daddr_h", + DTB_DDOS_CFG_DDOS_ODD_STAT_DUMP_DADDR_Hr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_DDOS_BASE_ADDR + 0x0068, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_ddos_cfg_ddos_odd_stat_dump_daddr_h_reg, + NULL, + NULL, + }, + { + "cfg_ddos_odd_stat_dump_daddr_l", + DTB_DDOS_CFG_DDOS_ODD_STAT_DUMP_DADDR_Lr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_DDOS_BASE_ADDR + 0x006c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_ddos_cfg_ddos_odd_stat_dump_daddr_l_reg, + NULL, + NULL, + }, + { + "cfg_ddos_work_mode_enable", + DTB_DDOS_CFG_DDOS_WORK_MODE_ENABLEr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_DDOS_BASE_ADDR + 0x0070, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_ddos_cfg_ddos_work_mode_enable_reg, + NULL, + NULL, + }, + { + "cfg_ddos_stat_table_len", + DTB_DDOS_CFG_DDOS_STAT_TABLE_LENr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_DDOS_BASE_ADDR + 0x0074, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_ddos_cfg_ddos_stat_table_len_reg, + NULL, + NULL, + }, + { + "cfg_ddos_hash_table_len", + DTB_DDOS_CFG_DDOS_HASH_TABLE_LENr, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_DDOS_BASE_ADDR + 0x0078, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_dtb_ddos_cfg_ddos_hash_table_len_reg, + NULL, + NULL, + }, + { + "traf_ctrl_ram0_0_255", + DTB_DTB_RAM_TRAF_CTRL_RAM0_0_255r, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_RAM_BASE_ADDR + 0x00000, + (32 / 8), + 0, + 255 + 1, + 0, + 4, + 1, + g_dtb_dtb_ram_traf_ctrl_ram0_0_255_reg, + NULL, + NULL, + }, + { + "traf_ctrl_ram1_0_255", + DTB_DTB_RAM_TRAF_CTRL_RAM1_0_255r, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_RAM_BASE_ADDR + 0x00400, + (32 / 8), + 0, + 255 + 1, + 0, + 4, + 1, + g_dtb_dtb_ram_traf_ctrl_ram1_0_255_reg, + NULL, + NULL, + }, + { + "traf_ctrl_ram2_0_255", + DTB_DTB_RAM_TRAF_CTRL_RAM2_0_255r, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_RAM_BASE_ADDR + 0x00800, + (32 / 8), + 0, + 255 + 1, + 0, + 4, + 1, + g_dtb_dtb_ram_traf_ctrl_ram2_0_255_reg, + NULL, + NULL, + }, + { + "traf_ctrl_ram3_0_255", + DTB_DTB_RAM_TRAF_CTRL_RAM3_0_255r, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_RAM_BASE_ADDR + 0x00c00, + (32 / 8), + 0, + 255 + 1, + 0, + 4, + 1, + g_dtb_dtb_ram_traf_ctrl_ram3_0_255_reg, + NULL, + NULL, + }, + { + "traf_ctrl_ram4_0_255", + DTB_DTB_RAM_TRAF_CTRL_RAM4_0_255r, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_RAM_BASE_ADDR + 0x01000, + (32 / 8), + 0, + 255 + 1, + 0, + 4, + 1, + g_dtb_dtb_ram_traf_ctrl_ram4_0_255_reg, + NULL, + NULL, + }, + { + "traf_ctrl_ram5_0_63", + DTB_DTB_RAM_TRAF_CTRL_RAM5_0_63r, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_RAM_BASE_ADDR + 0x01400, + (32 / 8), + 0, + 63 + 1, + 0, + 4, + 1, + g_dtb_dtb_ram_traf_ctrl_ram5_0_63_reg, + NULL, + NULL, + }, + { + "dump_pd_ram_0_2047", + DTB_DTB_RAM_DUMP_PD_RAM_0_2047r, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_RAM_BASE_ADDR + 0x02000, + (32 / 8), + 0, + 2047 + 1, + 0, + 4, + 1, + g_dtb_dtb_ram_dump_pd_ram_0_2047_reg, + NULL, + NULL, + }, + { + "rd_ctrl_ram_0_4095", + DTB_DTB_RAM_RD_CTRL_RAM_0_4095r, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_RAM_BASE_ADDR + 0x04000, + (32 / 8), + 0, + 4095 + 1, + 0, + 4, + 1, + g_dtb_dtb_ram_rd_ctrl_ram_0_4095_reg, + NULL, + NULL, + }, + { + "rd_table_ram_0_8191", + DTB_DTB_RAM_RD_TABLE_RAM_0_8191r, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_RAM_BASE_ADDR + 0x08000, + (32 / 8), + 0, + 8191 + 1, + 0, + 4, + 1, + g_dtb_dtb_ram_rd_table_ram_0_8191_reg, + NULL, + NULL, + }, + { + "dtb_cmd_man_ram_0_16383", + DTB_DTB_RAM_DTB_CMD_MAN_RAM_0_16383r, + DTB, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_DTB_BASE_ADDR + MODULE_DTB_RAM_BASE_ADDR + 0x10000, + (32 / 8), + 0, + 16383 + 1, + 0, + 4, + 1, + g_dtb_dtb_ram_dtb_cmd_man_ram_0_16383_reg, + NULL, + NULL, + }, + { + "cpu_trpg_ms_st", + TRPG_TRPG_RX_PORT_CPU_TRPG_MS_STr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_BASE_ADDR + 0x0000, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_rx_port_cpu_trpg_ms_st_reg, + NULL, + NULL, + }, + { + "cpu_trpg_ms_ind", + TRPG_TRPG_RX_PORT_CPU_TRPG_MS_INDr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_BASE_ADDR + 0x0008, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_rx_port_cpu_trpg_ms_ind_reg, + NULL, + NULL, + }, + { + "cpu_trpg_ms_slave_ind", + TRPG_TRPG_RX_PORT_CPU_TRPG_MS_SLAVE_INDr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_BASE_ADDR + 0x000c, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_rx_port_cpu_trpg_ms_slave_ind_reg, + NULL, + NULL, + }, + { + "cpu_trpgrx_up_water_level", + TRPG_TRPG_RX_PORT_CPU_TRPGRX_UP_WATER_LEVELr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_BASE_ADDR + 0x0020, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_rx_port_cpu_trpgrx_up_water_level_reg, + NULL, + NULL, + }, + { + "cpu_trpgrx_low_water_level", + TRPG_TRPG_RX_PORT_CPU_TRPGRX_LOW_WATER_LEVELr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_RX_BASE_ADDR + 0x0024, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_rx_port_cpu_trpgrx_low_water_level_reg, + NULL, + NULL, + }, + { + "cpu_trpg_ms_st", + TRPG_TRPG_TX_PORT_CPU_TRPG_MS_STr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_BASE_ADDR + 0x0000, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_tx_port_cpu_trpg_ms_st_reg, + NULL, + NULL, + }, + { + "cpu_trpg_ms_ind", + TRPG_TRPG_TX_PORT_CPU_TRPG_MS_INDr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_BASE_ADDR + 0x0008, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_tx_port_cpu_trpg_ms_ind_reg, + NULL, + NULL, + }, + { + "cpu_trpg_ms_slave_ind", + TRPG_TRPG_TX_PORT_CPU_TRPG_MS_SLAVE_INDr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_BASE_ADDR + 0x000c, + (32 / 8), + DPP_TRPG_PORT_NUM, + 0, + DPP_TRPG_PORT_SPACE_SIZE, + 0, + 1, + g_trpg_trpg_tx_port_cpu_trpg_ms_slave_ind_reg, + NULL, + NULL, + }, + { + "cpu_todtime_update_int_event", + TRPG_TRPG_TX_GLB_CPU_TODTIME_UPDATE_INT_EVENTr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_GLB_BASE_ADDR + 0x0004, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_trpg_trpg_tx_glb_cpu_todtime_update_int_event_reg, + NULL, + NULL, + }, + { + "cpu_todtime_update_int_test", + TRPG_TRPG_TX_GLB_CPU_TODTIME_UPDATE_INT_TESTr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_GLB_BASE_ADDR + 0x0008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_trpg_trpg_tx_glb_cpu_todtime_update_int_test_reg, + NULL, + NULL, + }, + { + "cpu_todtime_update_int_addr", + TRPG_TRPG_TX_GLB_CPU_TODTIME_UPDATE_INT_ADDRr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_GLB_BASE_ADDR + 0x0014, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_trpg_trpg_tx_glb_cpu_todtime_update_int_addr_reg, + NULL, + NULL, + }, + { + "trpg_tx_todtime_ram", + TRPG_TRPG_TX_TODTIME_RAM_TRPG_TX_TODTIME_RAMr, + TRPG, + DPP_REG_FLAG_DIRECT, + DPP_REG_UNI_ARRAY, + SYS_TRPG_BASE_ADDR + MODULE_TRPG_TX_TODTIME_RAM_BASE_ADDR + + 0x0000, + (32 / 8), + 0, + 4095 + 1, + 0, + 4, + 1, + g_trpg_trpg_tx_todtime_ram_trpg_tx_todtime_ram_reg, + NULL, + NULL, + }, + { + "cfg_tsn_test_reg", + TSN_TSN_PORT_CFG_TSN_TEST_REGr, + TSN, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TSN_BASE_ADDR + MODULE_TSN_PORT0_BASE_ADDR + 0x000, + (32 / 8), + DPP_TSN_PORT_NUM, + 0, + DPP_TSN_PORT_SPACE_SIZE, + 0, + 1, + g_tsn_tsn_port_cfg_tsn_test_reg_reg, + NULL, + NULL, + }, + { + "cfg_tsn_port_qbv_enable", + TSN_TSN_PORT_CFG_TSN_PORT_QBV_ENABLEr, + TSN, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TSN_BASE_ADDR + MODULE_TSN_PORT0_BASE_ADDR + 0x0004, + (32 / 8), + DPP_TSN_PORT_NUM, + 0, + DPP_TSN_PORT_SPACE_SIZE, + 0, + 1, + g_tsn_tsn_port_cfg_tsn_port_qbv_enable_reg, + NULL, + NULL, + }, + { + "cfg_tsn_phy_port_sel", + TSN_TSN_PORT_CFG_TSN_PHY_PORT_SELr, + TSN, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TSN_BASE_ADDR + MODULE_TSN_PORT0_BASE_ADDR + 0x0008, + (32 / 8), + DPP_TSN_PORT_NUM, + 0, + DPP_TSN_PORT_SPACE_SIZE, + 0, + 1, + g_tsn_tsn_port_cfg_tsn_phy_port_sel_reg, + NULL, + NULL, + }, + { + "cfg_tsn_port_time_sel", + TSN_TSN_PORT_CFG_TSN_PORT_TIME_SELr, + TSN, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TSN_BASE_ADDR + MODULE_TSN_PORT0_BASE_ADDR + 0x000c, + (32 / 8), + DPP_TSN_PORT_NUM, + 0, + DPP_TSN_PORT_SPACE_SIZE, + 0, + 1, + g_tsn_tsn_port_cfg_tsn_port_time_sel_reg, + NULL, + NULL, + }, + { + "cfg_tsn_clk_freq", + TSN_TSN_PORT_CFG_TSN_CLK_FREQr, + TSN, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TSN_BASE_ADDR + MODULE_TSN_PORT0_BASE_ADDR + 0x0014, + (32 / 8), + DPP_TSN_PORT_NUM, + 0, + DPP_TSN_PORT_SPACE_SIZE, + 0, + 2, + g_tsn_tsn_port_cfg_tsn_clk_freq_reg, + NULL, + NULL, + }, + { + "cfg_tsn_read_ram_n", + TSN_TSN_PORT_CFG_TSN_READ_RAM_Nr, + TSN, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TSN_BASE_ADDR + MODULE_TSN_PORT0_BASE_ADDR + 0x0018, + (32 / 8), + DPP_TSN_PORT_NUM, + 0, + DPP_TSN_PORT_SPACE_SIZE, + 0, + 3, + g_tsn_tsn_port_cfg_tsn_read_ram_n_reg, + NULL, + NULL, + }, + { + "cfg_tsn_exe_time", + TSN_TSN_PORT_CFG_TSN_EXE_TIMEr, + TSN, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TSN_BASE_ADDR + MODULE_TSN_PORT0_BASE_ADDR + 0x001c, + (32 / 8), + DPP_TSN_PORT_NUM, + 0, + DPP_TSN_PORT_SPACE_SIZE, + 0, + 1, + g_tsn_tsn_port_cfg_tsn_exe_time_reg, + NULL, + NULL, + }, + { + "cfg_tsn_port_itr_shift", + TSN_TSN_PORT_CFG_TSN_PORT_ITR_SHIFTr, + TSN, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TSN_BASE_ADDR + MODULE_TSN_PORT0_BASE_ADDR + 0x0020, + (32 / 8), + DPP_TSN_PORT_NUM, + 0, + DPP_TSN_PORT_SPACE_SIZE, + 0, + 1, + g_tsn_tsn_port_cfg_tsn_port_itr_shift_reg, + NULL, + NULL, + }, + { + "cfg_tsn_port_base_time_h", + TSN_TSN_PORT_CFG_TSN_PORT_BASE_TIME_Hr, + TSN, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TSN_BASE_ADDR + MODULE_TSN_PORT0_BASE_ADDR + 0x0024, + (32 / 8), + DPP_TSN_PORT_NUM, + 0, + DPP_TSN_PORT_SPACE_SIZE, + 0, + 1, + g_tsn_tsn_port_cfg_tsn_port_base_time_h_reg, + NULL, + NULL, + }, + { + "cfg_tsn_port_base_time_l", + TSN_TSN_PORT_CFG_TSN_PORT_BASE_TIME_Lr, + TSN, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TSN_BASE_ADDR + MODULE_TSN_PORT0_BASE_ADDR + 0x0028, + (32 / 8), + DPP_TSN_PORT_NUM, + 0, + DPP_TSN_PORT_SPACE_SIZE, + 0, + 1, + g_tsn_tsn_port_cfg_tsn_port_base_time_l_reg, + NULL, + NULL, + }, + { + "cfg_tsn_port_cycle_time_h", + TSN_TSN_PORT_CFG_TSN_PORT_CYCLE_TIME_Hr, + TSN, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TSN_BASE_ADDR + MODULE_TSN_PORT0_BASE_ADDR + 0x0030, + (32 / 8), + DPP_TSN_PORT_NUM, + 0, + DPP_TSN_PORT_SPACE_SIZE, + 0, + 1, + g_tsn_tsn_port_cfg_tsn_port_cycle_time_h_reg, + NULL, + NULL, + }, + { + "cfg_tsn_port_cycle_time_l", + TSN_TSN_PORT_CFG_TSN_PORT_CYCLE_TIME_Lr, + TSN, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TSN_BASE_ADDR + MODULE_TSN_PORT0_BASE_ADDR + 0x0034, + (32 / 8), + DPP_TSN_PORT_NUM, + 0, + DPP_TSN_PORT_SPACE_SIZE, + 0, + 1, + g_tsn_tsn_port_cfg_tsn_port_cycle_time_l_reg, + NULL, + NULL, + }, + { + "cfg_tsn_port_guard_band_time", + TSN_TSN_PORT_CFG_TSN_PORT_GUARD_BAND_TIMEr, + TSN, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TSN_BASE_ADDR + MODULE_TSN_PORT0_BASE_ADDR + 0x0040, + (32 / 8), + DPP_TSN_PORT_NUM, + 7 + 1, + DPP_TSN_PORT_SPACE_SIZE, + 4, + 1, + g_tsn_tsn_port_cfg_tsn_port_guard_band_time_reg, + NULL, + NULL, + }, + { + "cfg_tsn_port_default_gate_en", + TSN_TSN_PORT_CFG_TSN_PORT_DEFAULT_GATE_ENr, + TSN, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TSN_BASE_ADDR + MODULE_TSN_PORT0_BASE_ADDR + 0x0060, + (32 / 8), + DPP_TSN_PORT_NUM, + 0, + DPP_TSN_PORT_SPACE_SIZE, + 0, + 1, + g_tsn_tsn_port_cfg_tsn_port_default_gate_en_reg, + NULL, + NULL, + }, + { + "cfg_tsn_port_change_gate_en", + TSN_TSN_PORT_CFG_TSN_PORT_CHANGE_GATE_ENr, + TSN, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TSN_BASE_ADDR + MODULE_TSN_PORT0_BASE_ADDR + 0x0064, + (32 / 8), + DPP_TSN_PORT_NUM, + 0, + DPP_TSN_PORT_SPACE_SIZE, + 0, + 1, + g_tsn_tsn_port_cfg_tsn_port_change_gate_en_reg, + NULL, + NULL, + }, + { + "cfg_tsn_port_init_finish", + TSN_TSN_PORT_CFG_TSN_PORT_INIT_FINISHr, + TSN, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TSN_BASE_ADDR + MODULE_TSN_PORT0_BASE_ADDR + 0x0068, + (32 / 8), + DPP_TSN_PORT_NUM, + 0, + DPP_TSN_PORT_SPACE_SIZE, + 0, + 1, + g_tsn_tsn_port_cfg_tsn_port_init_finish_reg, + NULL, + NULL, + }, + { + "cfg_tsn_port_change_en", + TSN_TSN_PORT_CFG_TSN_PORT_CHANGE_ENr, + TSN, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TSN_BASE_ADDR + MODULE_TSN_PORT0_BASE_ADDR + 0x006c, + (32 / 8), + DPP_TSN_PORT_NUM, + 0, + DPP_TSN_PORT_SPACE_SIZE, + 0, + 1, + g_tsn_tsn_port_cfg_tsn_port_change_en_reg, + NULL, + NULL, + }, + { + "cfg_tsn_port_gcl_num0", + TSN_TSN_PORT_CFG_TSN_PORT_GCL_NUM0r, + TSN, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TSN_BASE_ADDR + MODULE_TSN_PORT0_BASE_ADDR + 0x0070, + (32 / 8), + DPP_TSN_PORT_NUM, + 0, + DPP_TSN_PORT_SPACE_SIZE, + 0, + 1, + g_tsn_tsn_port_cfg_tsn_port_gcl_num0_reg, + NULL, + NULL, + }, + { + "cfg_tsn_port_gcl_num1", + TSN_TSN_PORT_CFG_TSN_PORT_GCL_NUM1r, + TSN, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TSN_BASE_ADDR + MODULE_TSN_PORT0_BASE_ADDR + 0x0074, + (32 / 8), + DPP_TSN_PORT_NUM, + 0, + DPP_TSN_PORT_SPACE_SIZE, + 0, + 1, + g_tsn_tsn_port_cfg_tsn_port_gcl_num1_reg, + NULL, + NULL, + }, + { + "cfg_tsn_port_gcl_value0", + TSN_TSN_PORT_CFG_TSN_PORT_GCL_VALUE0r, + TSN, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TSN_BASE_ADDR + MODULE_TSN_PORT0_BASE_ADDR + 0x1000, + (32 / 8), + DPP_TSN_PORT_NUM, + 255 + 1, + DPP_TSN_PORT_SPACE_SIZE, + 4, + 2, + g_tsn_tsn_port_cfg_tsn_port_gcl_value0_reg, + NULL, + NULL, + }, + { + "cfg_tsn_port_gcl_value1", + TSN_TSN_PORT_CFG_TSN_PORT_GCL_VALUE1r, + TSN, + DPP_REG_FLAG_DIRECT, + DPP_REG_BIN_ARRAY, + SYS_TSN_BASE_ADDR + MODULE_TSN_PORT0_BASE_ADDR + 0x2000, + (32 / 8), + DPP_TSN_PORT_NUM, + 255 + 1, + DPP_TSN_PORT_SPACE_SIZE, + 4, + 2, + g_tsn_tsn_port_cfg_tsn_port_gcl_value1_reg, + NULL, + NULL, + }, + { + "cfg_epid_v_func_num", + AXI_AXI_CONV_CFG_EPID_V_FUNC_NUMr, + AXI, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_AXI_CONV_BASE_ADDR + 0x0010, + (32 / 8), + 0, + 0, + 0, + 0, + 5, + g_axi_axi_conv_cfg_epid_v_func_num_reg, + NULL, + NULL, + }, + { + "info_axim_rw_hsk_cnt", + AXI_AXI_CONV_INFO_AXIM_RW_HSK_CNTr, + AXI, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_AXI_CONV_BASE_ADDR + 0x0020, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_axi_axi_conv_info_axim_rw_hsk_cnt_reg, + NULL, + NULL, + }, + { + "info_axim_last_wr_id", + AXI_AXI_CONV_INFO_AXIM_LAST_WR_IDr, + AXI, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_AXI_CONV_BASE_ADDR + 0x0024, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_axi_axi_conv_info_axim_last_wr_id_reg, + NULL, + NULL, + }, + { + "info_axim_last_wr_addr_h", + AXI_AXI_CONV_INFO_AXIM_LAST_WR_ADDR_Hr, + AXI, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_AXI_CONV_BASE_ADDR + 0x0030, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_axi_axi_conv_info_axim_last_wr_addr_h_reg, + NULL, + NULL, + }, + { + "info_axim_last_wr_addr_l", + AXI_AXI_CONV_INFO_AXIM_LAST_WR_ADDR_Lr, + AXI, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_AXI_CONV_BASE_ADDR + 0x0034, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_axi_axi_conv_info_axim_last_wr_addr_l_reg, + NULL, + NULL, + }, + { + "cfg_debug_info_clr_en", + AXI_AXI_CONV_CFG_DEBUG_INFO_CLR_ENr, + AXI, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_AXI_CONV_BASE_ADDR + 0x0038, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_axi_axi_conv_cfg_debug_info_clr_en_reg, + NULL, + NULL, + }, + { + "pp1s_interrupt", + PTPTM_PTP_TOP_PP1S_INTERRUPTr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x0000, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_ptptm_ptp_top_pp1s_interrupt_reg, + NULL, + NULL, + }, + { + "pp1s_external_select", + PTPTM_PTP_TOP_PP1S_EXTERNAL_SELECTr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x0004, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptp_top_pp1s_external_select_reg, + NULL, + NULL, + }, + { + "pp1s_out_select", + PTPTM_PTP_TOP_PP1S_OUT_SELECTr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x0008, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptp_top_pp1s_out_select_reg, + NULL, + NULL, + }, + { + "test_pp1s_select", + PTPTM_PTP_TOP_TEST_PP1S_SELECTr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x000c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptp_top_test_pp1s_select_reg, + NULL, + NULL, + }, + { + "local_pp1s_en", + PTPTM_PTP_TOP_LOCAL_PP1S_ENr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x0010, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptp_top_local_pp1s_en_reg, + NULL, + NULL, + }, + { + "local_pp1s_adjust", + PTPTM_PTP_TOP_LOCAL_PP1S_ADJUSTr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x0014, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_ptptm_ptp_top_local_pp1s_adjust_reg, + NULL, + NULL, + }, + { + "local_pp1s_adjust_value", + PTPTM_PTP_TOP_LOCAL_PP1S_ADJUST_VALUEr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x0018, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptp_top_local_pp1s_adjust_value_reg, + NULL, + NULL, + }, + { + "pp1s_to_np_select", + PTPTM_PTP_TOP_PP1S_TO_NP_SELECTr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x001c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptp_top_pp1s_to_np_select_reg, + NULL, + NULL, + }, + { + "pd_u1_sel", + PTPTM_PTP_TOP_PD_U1_SELr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x0040, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_ptptm_ptp_top_pd_u1_sel_reg, + NULL, + NULL, + }, + { + "pd_u1_pd0_shift", + PTPTM_PTP_TOP_PD_U1_PD0_SHIFTr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x0044, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptp_top_pd_u1_pd0_shift_reg, + NULL, + NULL, + }, + { + "pd_u1_pd1_shift", + PTPTM_PTP_TOP_PD_U1_PD1_SHIFTr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x0048, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptp_top_pd_u1_pd1_shift_reg, + NULL, + NULL, + }, + { + "pd_u1_result", + PTPTM_PTP_TOP_PD_U1_RESULTr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x004c, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_ptptm_ptp_top_pd_u1_result_reg, + NULL, + NULL, + }, + { + "pd_u2_sel", + PTPTM_PTP_TOP_PD_U2_SELr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x0050, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_ptptm_ptp_top_pd_u2_sel_reg, + NULL, + NULL, + }, + { + "pd_u2_pd0_shift", + PTPTM_PTP_TOP_PD_U2_PD0_SHIFTr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x0054, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptp_top_pd_u2_pd0_shift_reg, + NULL, + NULL, + }, + { + "pd_u2_pd1_shift", + PTPTM_PTP_TOP_PD_U2_PD1_SHIFTr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x0058, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptp_top_pd_u2_pd1_shift_reg, + NULL, + NULL, + }, + { + "pd_u2_result", + PTPTM_PTP_TOP_PD_U2_RESULTr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x005c, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_ptptm_ptp_top_pd_u2_result_reg, + NULL, + NULL, + }, + { + "tsn_group_nanosecond_delay0", + PTPTM_PTP_TOP_TSN_GROUP_NANOSECOND_DELAY0r, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x0080, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptp_top_tsn_group_nanosecond_delay0_reg, + NULL, + NULL, + }, + { + "tsn_group_fracnanosecond_delay0", + PTPTM_PTP_TOP_TSN_GROUP_FRACNANOSECOND_DELAY0r, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x0084, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptp_top_tsn_group_fracnanosecond_delay0_reg, + NULL, + NULL, + }, + { + "tsn_group_nanosecond_delay1", + PTPTM_PTP_TOP_TSN_GROUP_NANOSECOND_DELAY1r, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x0088, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptp_top_tsn_group_nanosecond_delay1_reg, + NULL, + NULL, + }, + { + "tsn_group_fracnanosecond_delay1", + PTPTM_PTP_TOP_TSN_GROUP_FRACNANOSECOND_DELAY1r, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x008c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptp_top_tsn_group_fracnanosecond_delay1_reg, + NULL, + NULL, + }, + { + "tsn_group_nanosecond_delay2", + PTPTM_PTP_TOP_TSN_GROUP_NANOSECOND_DELAY2r, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x0090, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptp_top_tsn_group_nanosecond_delay2_reg, + NULL, + NULL, + }, + { + "tsn_group_fracnanosecond_delay2", + PTPTM_PTP_TOP_TSN_GROUP_FRACNANOSECOND_DELAY2r, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x0094, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptp_top_tsn_group_fracnanosecond_delay2_reg, + NULL, + NULL, + }, + { + "tsn_group_nanosecond_delay3", + PTPTM_PTP_TOP_TSN_GROUP_NANOSECOND_DELAY3r, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x0098, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptp_top_tsn_group_nanosecond_delay3_reg, + NULL, + NULL, + }, + { + "tsn_group_fracnanosecond_delay3", + PTPTM_PTP_TOP_TSN_GROUP_FRACNANOSECOND_DELAY3r, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x009c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptp_top_tsn_group_fracnanosecond_delay3_reg, + NULL, + NULL, + }, + { + "tsn_ptp1588_rdma_nanosecond_delay", + PTPTM_PTP_TOP_TSN_PTP1588_RDMA_NANOSECOND_DELAYr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x00a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptp_top_tsn_ptp1588_rdma_nanosecond_delay_reg, + NULL, + NULL, + }, + { + "ptp1588_rdma_fracnanosecond_delay", + PTPTM_PTP_TOP_PTP1588_RDMA_FRACNANOSECOND_DELAYr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x00a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptp_top_ptp1588_rdma_fracnanosecond_delay_reg, + NULL, + NULL, + }, + { + "ptp1588_np_nanosecond_delay", + PTPTM_PTP_TOP_PTP1588_NP_NANOSECOND_DELAYr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x00a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptp_top_ptp1588_np_nanosecond_delay_reg, + NULL, + NULL, + }, + { + "ptp1588_np_fracnanosecond_delay", + PTPTM_PTP_TOP_PTP1588_NP_FRACNANOSECOND_DELAYr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x00ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptp_top_ptp1588_np_fracnanosecond_delay_reg, + NULL, + NULL, + }, + { + "time_sync_period", + PTPTM_PTP_TOP_TIME_SYNC_PERIODr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP0_BASE_ADDR + 0x00b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptp_top_time_sync_period_reg, + NULL, + NULL, + }, + { + "module_id", + PTPTM_PTPTM_MODULE_IDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0000, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_module_id_reg, + NULL, + NULL, + }, + { + "module_version", + PTPTM_PTPTM_MODULE_VERSIONr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0008, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_ptptm_ptptm_module_version_reg, + NULL, + NULL, + }, + { + "module_date", + PTPTM_PTPTM_MODULE_DATEr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x000c, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_ptptm_ptptm_module_date_reg, + NULL, + NULL, + }, + { + "interrupt_status", + PTPTM_PTPTM_INTERRUPT_STATUSr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0010, + (32 / 8), + 0, + 0, + 0, + 0, + 5, + g_ptptm_ptptm_interrupt_status_reg, + NULL, + NULL, + }, + { + "interrupt_event", + PTPTM_PTPTM_INTERRUPT_EVENTr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0014, + (32 / 8), + 0, + 0, + 0, + 0, + 5, + g_ptptm_ptptm_interrupt_event_reg, + NULL, + NULL, + }, + { + "interrupt_mask", + PTPTM_PTPTM_INTERRUPT_MASKr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0018, + (32 / 8), + 0, + 0, + 0, + 0, + 5, + g_ptptm_ptptm_interrupt_mask_reg, + NULL, + NULL, + }, + { + "interrupt_test", + PTPTM_PTPTM_INTERRUPT_TESTr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x001c, + (32 / 8), + 0, + 0, + 0, + 0, + 5, + g_ptptm_ptptm_interrupt_test_reg, + NULL, + NULL, + }, + { + "hw_clock_cycle_integer", + PTPTM_PTPTM_HW_CLOCK_CYCLE_INTEGERr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0028, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_hw_clock_cycle_integer_reg, + NULL, + NULL, + }, + { + "hw_clock_cycle_fraction", + PTPTM_PTPTM_HW_CLOCK_CYCLE_FRACTIONr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x002c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_hw_clock_cycle_fraction_reg, + NULL, + NULL, + }, + { + "ptp_clock_cycle_integer", + PTPTM_PTPTM_PTP_CLOCK_CYCLE_INTEGERr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0030, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_ptp_clock_cycle_integer_reg, + NULL, + NULL, + }, + { + "ptp_clock_cycle_fraction", + PTPTM_PTPTM_PTP_CLOCK_CYCLE_FRACTIONr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0034, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_ptp_clock_cycle_fraction_reg, + NULL, + NULL, + }, + { + "ptp_configuration", + PTPTM_PTPTM_PTP_CONFIGURATIONr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0040, + (32 / 8), + 0, + 0, + 0, + 0, + 18, + g_ptptm_ptptm_ptp_configuration_reg, + NULL, + NULL, + }, + { + "timer_control", + PTPTM_PTPTM_TIMER_CONTROLr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0044, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_ptptm_ptptm_timer_control_reg, + NULL, + NULL, + }, + { + "pps_income_delay", + PTPTM_PTPTM_PPS_INCOME_DELAYr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0048, + (32 / 8), + 0, + 0, + 0, + 0, + 2, + g_ptptm_ptptm_pps_income_delay_reg, + NULL, + NULL, + }, + { + "clock_cycle_update", + PTPTM_PTPTM_CLOCK_CYCLE_UPDATEr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x004c, + (32 / 8), + 0, + 0, + 0, + 0, + 5, + g_ptptm_ptptm_clock_cycle_update_reg, + NULL, + NULL, + }, + { + "cycle_time_of_output_period_pulse_1", + PTPTM_PTPTM_CYCLE_TIME_OF_OUTPUT_PERIOD_PULSE_1r, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0050, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_cycle_time_of_output_period_pulse_1_reg, + NULL, + NULL, + }, + { + "cycle_time_of_output_period_pulse_2", + PTPTM_PTPTM_CYCLE_TIME_OF_OUTPUT_PERIOD_PULSE_2r, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0054, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_cycle_time_of_output_period_pulse_2_reg, + NULL, + NULL, + }, + { + "timer_latch_en", + PTPTM_PTPTM_TIMER_LATCH_ENr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0058, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_timer_latch_en_reg, + NULL, + NULL, + }, + { + "timer_latch_sel", + PTPTM_PTPTM_TIMER_LATCH_SELr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x005c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_timer_latch_sel_reg, + NULL, + NULL, + }, + { + "trigger_in_tod_nanosecond", + PTPTM_PTPTM_TRIGGER_IN_TOD_NANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0060, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_trigger_in_tod_nanosecond_reg, + NULL, + NULL, + }, + { + "trigger_in_lower_tod_second", + PTPTM_PTPTM_TRIGGER_IN_LOWER_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0064, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_trigger_in_lower_tod_second_reg, + NULL, + NULL, + }, + { + "trigger_in_high_tod_second", + PTPTM_PTPTM_TRIGGER_IN_HIGH_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0068, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_trigger_in_high_tod_second_reg, + NULL, + NULL, + }, + { + "trigger_in_fracnanosecond", + PTPTM_PTPTM_TRIGGER_IN_FRACNANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x006c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_trigger_in_fracnanosecond_reg, + NULL, + NULL, + }, + { + "trigger_in_hardware_time_low", + PTPTM_PTPTM_TRIGGER_IN_HARDWARE_TIME_LOWr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0070, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_trigger_in_hardware_time_low_reg, + NULL, + NULL, + }, + { + "trigger_in_hardware_time_high", + PTPTM_PTPTM_TRIGGER_IN_HARDWARE_TIME_HIGHr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0074, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_trigger_in_hardware_time_high_reg, + NULL, + NULL, + }, + { + "trigger_out_tod_nanosecond", + PTPTM_PTPTM_TRIGGER_OUT_TOD_NANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0080, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_trigger_out_tod_nanosecond_reg, + NULL, + NULL, + }, + { + "trigger_out_lower_tod_second", + PTPTM_PTPTM_TRIGGER_OUT_LOWER_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0084, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_trigger_out_lower_tod_second_reg, + NULL, + NULL, + }, + { + "trigger_out_high_tod_second", + PTPTM_PTPTM_TRIGGER_OUT_HIGH_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0088, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_trigger_out_high_tod_second_reg, + NULL, + NULL, + }, + { + "trigger_out_hardware_time_low", + PTPTM_PTPTM_TRIGGER_OUT_HARDWARE_TIME_LOWr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0090, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_trigger_out_hardware_time_low_reg, + NULL, + NULL, + }, + { + "trigger_out_hardware_time_high", + PTPTM_PTPTM_TRIGGER_OUT_HARDWARE_TIME_HIGHr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0094, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_trigger_out_hardware_time_high_reg, + NULL, + NULL, + }, + { + "adjust_tod_nanosecond", + PTPTM_PTPTM_ADJUST_TOD_NANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x00a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_adjust_tod_nanosecond_reg, + NULL, + NULL, + }, + { + "adjust_lower_tod_second", + PTPTM_PTPTM_ADJUST_LOWER_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x00a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_adjust_lower_tod_second_reg, + NULL, + NULL, + }, + { + "adjust_high_tod_second", + PTPTM_PTPTM_ADJUST_HIGH_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x00a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_adjust_high_tod_second_reg, + NULL, + NULL, + }, + { + "adjust_fracnanosecond", + PTPTM_PTPTM_ADJUST_FRACNANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x00ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_adjust_fracnanosecond_reg, + NULL, + NULL, + }, + { + "adjust_hardware_time_low", + PTPTM_PTPTM_ADJUST_HARDWARE_TIME_LOWr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x00b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_adjust_hardware_time_low_reg, + NULL, + NULL, + }, + { + "adjust_hardware_time_high", + PTPTM_PTPTM_ADJUST_HARDWARE_TIME_HIGHr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x00b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_adjust_hardware_time_high_reg, + NULL, + NULL, + }, + { + "latch_tod_nanosecond", + PTPTM_PTPTM_LATCH_TOD_NANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x00c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_latch_tod_nanosecond_reg, + NULL, + NULL, + }, + { + "latch_lower_tod_second", + PTPTM_PTPTM_LATCH_LOWER_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x00c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_latch_lower_tod_second_reg, + NULL, + NULL, + }, + { + "latch_high_tod_second", + PTPTM_PTPTM_LATCH_HIGH_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x00c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_latch_high_tod_second_reg, + NULL, + NULL, + }, + { + "latch_fracnanosecond", + PTPTM_PTPTM_LATCH_FRACNANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x00cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_latch_fracnanosecond_reg, + NULL, + NULL, + }, + { + "latch_hardware_time_low", + PTPTM_PTPTM_LATCH_HARDWARE_TIME_LOWr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x00d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_latch_hardware_time_low_reg, + NULL, + NULL, + }, + { + "latch_hardware_time_high", + PTPTM_PTPTM_LATCH_HARDWARE_TIME_HIGHr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x00d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_latch_hardware_time_high_reg, + NULL, + NULL, + }, + { + "real_tod_nanosecond", + PTPTM_PTPTM_REAL_TOD_NANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x00e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_real_tod_nanosecond_reg, + NULL, + NULL, + }, + { + "real_lower_tod_second", + PTPTM_PTPTM_REAL_LOWER_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x00e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_real_lower_tod_second_reg, + NULL, + NULL, + }, + { + "real_high_tod_second", + PTPTM_PTPTM_REAL_HIGH_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x00e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_real_high_tod_second_reg, + NULL, + NULL, + }, + { + "real_hardware_time_low", + PTPTM_PTPTM_REAL_HARDWARE_TIME_LOWr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x00f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_real_hardware_time_low_reg, + NULL, + NULL, + }, + { + "real_hardware_time_high", + PTPTM_PTPTM_REAL_HARDWARE_TIME_HIGHr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x00f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_real_hardware_time_high_reg, + NULL, + NULL, + }, + { + "ptp1588_event_message_port", + PTPTM_PTPTM_PTP1588_EVENT_MESSAGE_PORTr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0100, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_ptp1588_event_message_port_reg, + NULL, + NULL, + }, + { + "ptp1588_event_message_timestamp_low", + PTPTM_PTPTM_PTP1588_EVENT_MESSAGE_TIMESTAMP_LOWr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0104, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_ptp1588_event_message_timestamp_low_reg, + NULL, + NULL, + }, + { + "ptp1588_event_message_timestamp_high", + PTPTM_PTPTM_PTP1588_EVENT_MESSAGE_TIMESTAMP_HIGHr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0108, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_ptp1588_event_message_timestamp_high_reg, + NULL, + NULL, + }, + { + "ptp1588_event_message_fifo_status", + PTPTM_PTPTM_PTP1588_EVENT_MESSAGE_FIFO_STATUSr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x010c, + (32 / 8), + 0, + 0, + 0, + 0, + 3, + g_ptptm_ptptm_ptp1588_event_message_fifo_status_reg, + NULL, + NULL, + }, + { + "pp1s_latch_tod_nanosecond", + PTPTM_PTPTM_PP1S_LATCH_TOD_NANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0120, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_pp1s_latch_tod_nanosecond_reg, + NULL, + NULL, + }, + { + "pp1s_latch_lower_tod_second", + PTPTM_PTPTM_PP1S_LATCH_LOWER_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0124, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_pp1s_latch_lower_tod_second_reg, + NULL, + NULL, + }, + { + "pp1s_latch_high_tod_second", + PTPTM_PTPTM_PP1S_LATCH_HIGH_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0128, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_pp1s_latch_high_tod_second_reg, + NULL, + NULL, + }, + { + "pp1s_latch_fracnanosecond", + PTPTM_PTPTM_PP1S_LATCH_FRACNANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x012c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_pp1s_latch_fracnanosecond_reg, + NULL, + NULL, + }, + { + "tsn_time_configuration", + PTPTM_PTPTM_TSN_TIME_CONFIGURATIONr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0140, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_ptptm_ptptm_tsn_time_configuration_reg, + NULL, + NULL, + }, + { + "tsn_timer_control", + PTPTM_PTPTM_TSN_TIMER_CONTROLr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x00144, + (32 / 8), + 0, + 0, + 0, + 0, + 4, + g_ptptm_ptptm_tsn_timer_control_reg, + NULL, + NULL, + }, + { + "tsn0_clock_cycle_integer", + PTPTM_PTPTM_TSN0_CLOCK_CYCLE_INTEGERr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x00148, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn0_clock_cycle_integer_reg, + NULL, + NULL, + }, + { + "tsn0_clock_cycle_fraction", + PTPTM_PTPTM_TSN0_CLOCK_CYCLE_FRACTIONr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x014c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn0_clock_cycle_fraction_reg, + NULL, + NULL, + }, + { + "tsn1_clock_cycle_integer", + PTPTM_PTPTM_TSN1_CLOCK_CYCLE_INTEGERr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0150, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn1_clock_cycle_integer_reg, + NULL, + NULL, + }, + { + "tsn1_clock_cycle_fraction", + PTPTM_PTPTM_TSN1_CLOCK_CYCLE_FRACTIONr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0154, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn1_clock_cycle_fraction_reg, + NULL, + NULL, + }, + { + "tsn2_clock_cycle_integer", + PTPTM_PTPTM_TSN2_CLOCK_CYCLE_INTEGERr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0158, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn2_clock_cycle_integer_reg, + NULL, + NULL, + }, + { + "tsn2_clock_cycle_fraction", + PTPTM_PTPTM_TSN2_CLOCK_CYCLE_FRACTIONr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x015c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn2_clock_cycle_fraction_reg, + NULL, + NULL, + }, + { + "tsn3_clock_cycle_integer", + PTPTM_PTPTM_TSN3_CLOCK_CYCLE_INTEGERr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0160, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn3_clock_cycle_integer_reg, + NULL, + NULL, + }, + { + "tsn3_clock_cycle_fraction", + PTPTM_PTPTM_TSN3_CLOCK_CYCLE_FRACTIONr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0164, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn3_clock_cycle_fraction_reg, + NULL, + NULL, + }, + { + "tsn0_adjust_tod_nanosecond", + PTPTM_PTPTM_TSN0_ADJUST_TOD_NANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0180, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn0_adjust_tod_nanosecond_reg, + NULL, + NULL, + }, + { + "tsn0_adjust_lower_tod_second", + PTPTM_PTPTM_TSN0_ADJUST_LOWER_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0184, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn0_adjust_lower_tod_second_reg, + NULL, + NULL, + }, + { + "tsn0_adjust_high_tod_second", + PTPTM_PTPTM_TSN0_ADJUST_HIGH_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0188, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn0_adjust_high_tod_second_reg, + NULL, + NULL, + }, + { + "tsn0_adjust_fracnanosecond", + PTPTM_PTPTM_TSN0_ADJUST_FRACNANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x018c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn0_adjust_fracnanosecond_reg, + NULL, + NULL, + }, + { + "tsn1_adjust_tod_nanosecond", + PTPTM_PTPTM_TSN1_ADJUST_TOD_NANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0190, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn1_adjust_tod_nanosecond_reg, + NULL, + NULL, + }, + { + "tsn1_adjust_lower_tod_second", + PTPTM_PTPTM_TSN1_ADJUST_LOWER_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0194, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn1_adjust_lower_tod_second_reg, + NULL, + NULL, + }, + { + "tsn1_adjust_high_tod_second", + PTPTM_PTPTM_TSN1_ADJUST_HIGH_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0198, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn1_adjust_high_tod_second_reg, + NULL, + NULL, + }, + { + "tsn1_adjust_fracnanosecond", + PTPTM_PTPTM_TSN1_ADJUST_FRACNANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x019c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn1_adjust_fracnanosecond_reg, + NULL, + NULL, + }, + { + "tsn2_adjust_tod_nanosecond", + PTPTM_PTPTM_TSN2_ADJUST_TOD_NANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x01a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn2_adjust_tod_nanosecond_reg, + NULL, + NULL, + }, + { + "tsn2_adjust_lower_tod_second", + PTPTM_PTPTM_TSN2_ADJUST_LOWER_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x01a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn2_adjust_lower_tod_second_reg, + NULL, + NULL, + }, + { + "tsn2_adjust_high_tod_second", + PTPTM_PTPTM_TSN2_ADJUST_HIGH_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x01a8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn2_adjust_high_tod_second_reg, + NULL, + NULL, + }, + { + "tsn2_adjust_fracnanosecond", + PTPTM_PTPTM_TSN2_ADJUST_FRACNANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x01ac, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn2_adjust_fracnanosecond_reg, + NULL, + NULL, + }, + { + "tsn3_adjust_tod_nanosecond", + PTPTM_PTPTM_TSN3_ADJUST_TOD_NANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x01b0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn3_adjust_tod_nanosecond_reg, + NULL, + NULL, + }, + { + "tsn3_adjust_lower_tod_second", + PTPTM_PTPTM_TSN3_ADJUST_LOWER_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x01b4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn3_adjust_lower_tod_second_reg, + NULL, + NULL, + }, + { + "tsn3_adjust_high_tod_second", + PTPTM_PTPTM_TSN3_ADJUST_HIGH_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x01b8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn3_adjust_high_tod_second_reg, + NULL, + NULL, + }, + { + "tsn3_adjust_fracnanosecond", + PTPTM_PTPTM_TSN3_ADJUST_FRACNANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x01bc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn3_adjust_fracnanosecond_reg, + NULL, + NULL, + }, + { + "tsn0_latch_tod_nanosecond", + PTPTM_PTPTM_TSN0_LATCH_TOD_NANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x01c0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn0_latch_tod_nanosecond_reg, + NULL, + NULL, + }, + { + "tsn0_latch_lower_tod_second", + PTPTM_PTPTM_TSN0_LATCH_LOWER_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x01c4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn0_latch_lower_tod_second_reg, + NULL, + NULL, + }, + { + "tsn0_latch_high_tod_second", + PTPTM_PTPTM_TSN0_LATCH_HIGH_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x01c8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn0_latch_high_tod_second_reg, + NULL, + NULL, + }, + { + "tsn0_latch_fracnanosecond", + PTPTM_PTPTM_TSN0_LATCH_FRACNANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x01cc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn0_latch_fracnanosecond_reg, + NULL, + NULL, + }, + { + "tsn1_latch_tod_nanosecond", + PTPTM_PTPTM_TSN1_LATCH_TOD_NANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x01d0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn1_latch_tod_nanosecond_reg, + NULL, + NULL, + }, + { + "tsn1_latch_lower_tod_second", + PTPTM_PTPTM_TSN1_LATCH_LOWER_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x01d4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn1_latch_lower_tod_second_reg, + NULL, + NULL, + }, + { + "tsn1_latch_high_tod_second", + PTPTM_PTPTM_TSN1_LATCH_HIGH_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x01d8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn1_latch_high_tod_second_reg, + NULL, + NULL, + }, + { + "tsn1_latch_fracnanosecond", + PTPTM_PTPTM_TSN1_LATCH_FRACNANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x01dc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn1_latch_fracnanosecond_reg, + NULL, + NULL, + }, + { + "tsn2_latch_tod_nanosecond", + PTPTM_PTPTM_TSN2_LATCH_TOD_NANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x01e0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn2_latch_tod_nanosecond_reg, + NULL, + NULL, + }, + { + "tsn2_latch_lower_tod_second", + PTPTM_PTPTM_TSN2_LATCH_LOWER_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x01e4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn2_latch_lower_tod_second_reg, + NULL, + NULL, + }, + { + "tsn2_latch_high_tod_second", + PTPTM_PTPTM_TSN2_LATCH_HIGH_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x01e8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn2_latch_high_tod_second_reg, + NULL, + NULL, + }, + { + "tsn2_latch_fracnanosecond", + PTPTM_PTPTM_TSN2_LATCH_FRACNANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x01ec, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn2_latch_fracnanosecond_reg, + NULL, + NULL, + }, + { + "tsn3_latch_tod_nanosecond", + PTPTM_PTPTM_TSN3_LATCH_TOD_NANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x01f0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn3_latch_tod_nanosecond_reg, + NULL, + NULL, + }, + { + "tsn3_latch_lower_tod_second", + PTPTM_PTPTM_TSN3_LATCH_LOWER_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x01f4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn3_latch_lower_tod_second_reg, + NULL, + NULL, + }, + { + "tsn3_latch_high_tod_second", + PTPTM_PTPTM_TSN3_LATCH_HIGH_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x01f8, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn3_latch_high_tod_second_reg, + NULL, + NULL, + }, + { + "tsn3_latch_fracnanosecond", + PTPTM_PTPTM_TSN3_LATCH_FRACNANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x01fc, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn3_latch_fracnanosecond_reg, + NULL, + NULL, + }, + { + "pp1s_latch_tsn0_tod_nanosecond", + PTPTM_PTPTM_PP1S_LATCH_TSN0_TOD_NANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0200, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_pp1s_latch_tsn0_tod_nanosecond_reg, + NULL, + NULL, + }, + { + "pp1s_latch_tsn0_lower_tod_second", + PTPTM_PTPTM_PP1S_LATCH_TSN0_LOWER_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0204, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_pp1s_latch_tsn0_lower_tod_second_reg, + NULL, + NULL, + }, + { + "pp1s_latch_tsn0_high_tod_second", + PTPTM_PTPTM_PP1S_LATCH_TSN0_HIGH_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0208, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_pp1s_latch_tsn0_high_tod_second_reg, + NULL, + NULL, + }, + { + "pp1s_latch_tsn0_fracnanosecond", + PTPTM_PTPTM_PP1S_LATCH_TSN0_FRACNANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x020c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_pp1s_latch_tsn0_fracnanosecond_reg, + NULL, + NULL, + }, + { + "pp1s_latch_tsn1_tod_nanosecond", + PTPTM_PTPTM_PP1S_LATCH_TSN1_TOD_NANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0210, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_pp1s_latch_tsn1_tod_nanosecond_reg, + NULL, + NULL, + }, + { + "pp1s_latch_tsn1_lower_tod_second", + PTPTM_PTPTM_PP1S_LATCH_TSN1_LOWER_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0214, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_pp1s_latch_tsn1_lower_tod_second_reg, + NULL, + NULL, + }, + { + "pp1s_latch_tsn1_high_tod_second", + PTPTM_PTPTM_PP1S_LATCH_TSN1_HIGH_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0218, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_pp1s_latch_tsn1_high_tod_second_reg, + NULL, + NULL, + }, + { + "pp1s_latch_tsn1_fracnanosecond", + PTPTM_PTPTM_PP1S_LATCH_TSN1_FRACNANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x021c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_pp1s_latch_tsn1_fracnanosecond_reg, + NULL, + NULL, + }, + { + "pp1s_latch_tsn2_tod_nanosecond", + PTPTM_PTPTM_PP1S_LATCH_TSN2_TOD_NANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0220, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_pp1s_latch_tsn2_tod_nanosecond_reg, + NULL, + NULL, + }, + { + "pp1s_latch_tsn2_lower_tod_second", + PTPTM_PTPTM_PP1S_LATCH_TSN2_LOWER_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0224, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_pp1s_latch_tsn2_lower_tod_second_reg, + NULL, + NULL, + }, + { + "pp1s_latch_tsn2_high_tod_second", + PTPTM_PTPTM_PP1S_LATCH_TSN2_HIGH_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0228, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_pp1s_latch_tsn2_high_tod_second_reg, + NULL, + NULL, + }, + { + "pp1s_latch_tsn2_fracnanosecond", + PTPTM_PTPTM_PP1S_LATCH_TSN2_FRACNANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x022c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_pp1s_latch_tsn2_fracnanosecond_reg, + NULL, + NULL, + }, + { + "pp1s_latch_tsn3_tod_nanosecond", + PTPTM_PTPTM_PP1S_LATCH_TSN3_TOD_NANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0230, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_pp1s_latch_tsn3_tod_nanosecond_reg, + NULL, + NULL, + }, + { + "pp1s_latch_tsn3_lower_tod_second", + PTPTM_PTPTM_PP1S_LATCH_TSN3_LOWER_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0234, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_pp1s_latch_tsn3_lower_tod_second_reg, + NULL, + NULL, + }, + { + "pp1s_latch_tsn3_high_tod_second", + PTPTM_PTPTM_PP1S_LATCH_TSN3_HIGH_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0238, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_pp1s_latch_tsn3_high_tod_second_reg, + NULL, + NULL, + }, + { + "pp1s_latch_tsn3_fracnanosecond", + PTPTM_PTPTM_PP1S_LATCH_TSN3_FRACNANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x023c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_pp1s_latch_tsn3_fracnanosecond_reg, + NULL, + NULL, + }, + { + "tsn0_real_tod_nanosecond", + PTPTM_PTPTM_TSN0_REAL_TOD_NANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0240, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn0_real_tod_nanosecond_reg, + NULL, + NULL, + }, + { + "tsn0_real_lower_tod_second", + PTPTM_PTPTM_TSN0_REAL_LOWER_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0244, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn0_real_lower_tod_second_reg, + NULL, + NULL, + }, + { + "tsn0_real_high_tod_second", + PTPTM_PTPTM_TSN0_REAL_HIGH_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0248, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn0_real_high_tod_second_reg, + NULL, + NULL, + }, + { + "tsn1_real_tod_nanosecond", + PTPTM_PTPTM_TSN1_REAL_TOD_NANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x024c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn1_real_tod_nanosecond_reg, + NULL, + NULL, + }, + { + "tsn1_real_lower_tod_second", + PTPTM_PTPTM_TSN1_REAL_LOWER_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0250, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn1_real_lower_tod_second_reg, + NULL, + NULL, + }, + { + "tsn1_real_high_tod_second", + PTPTM_PTPTM_TSN1_REAL_HIGH_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0254, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn1_real_high_tod_second_reg, + NULL, + NULL, + }, + { + "tsn2_real_tod_nanosecond", + PTPTM_PTPTM_TSN2_REAL_TOD_NANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0258, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn2_real_tod_nanosecond_reg, + NULL, + NULL, + }, + { + "tsn2_real_lower_tod_second", + PTPTM_PTPTM_TSN2_REAL_LOWER_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x025c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn2_real_lower_tod_second_reg, + NULL, + NULL, + }, + { + "tsn2_real_high_tod_second", + PTPTM_PTPTM_TSN2_REAL_HIGH_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0260, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn2_real_high_tod_second_reg, + NULL, + NULL, + }, + { + "tsn3_real_tod_nanosecond", + PTPTM_PTPTM_TSN3_REAL_TOD_NANOSECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0264, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn3_real_tod_nanosecond_reg, + NULL, + NULL, + }, + { + "tsn3_real_lower_tod_second", + PTPTM_PTPTM_TSN3_REAL_LOWER_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0268, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn3_real_lower_tod_second_reg, + NULL, + NULL, + }, + { + "tsn3_real_high_tod_second", + PTPTM_PTPTM_TSN3_REAL_HIGH_TOD_SECONDr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x026c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_tsn3_real_high_tod_second_reg, + NULL, + NULL, + }, + { + "real_ptp_clock_cycle_integer", + PTPTM_PTPTM_REAL_PTP_CLOCK_CYCLE_INTEGERr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0280, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_real_ptp_clock_cycle_integer_reg, + NULL, + NULL, + }, + { + "real_ptp_clock_cycle_fraction", + PTPTM_PTPTM_REAL_PTP_CLOCK_CYCLE_FRACTIONr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0284, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_real_ptp_clock_cycle_fraction_reg, + NULL, + NULL, + }, + { + "real_tsn0_clock_cycle_integer", + PTPTM_PTPTM_REAL_TSN0_CLOCK_CYCLE_INTEGERr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0288, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_real_tsn0_clock_cycle_integer_reg, + NULL, + NULL, + }, + { + "real_tsn0_clock_cycle_fraction", + PTPTM_PTPTM_REAL_TSN0_CLOCK_CYCLE_FRACTIONr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x028c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_real_tsn0_clock_cycle_fraction_reg, + NULL, + NULL, + }, + { + "real_tsn1_clock_cycle_integer", + PTPTM_PTPTM_REAL_TSN1_CLOCK_CYCLE_INTEGERr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0290, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_real_tsn1_clock_cycle_integer_reg, + NULL, + NULL, + }, + { + "real_tsn1_clock_cycle_fraction", + PTPTM_PTPTM_REAL_TSN1_CLOCK_CYCLE_FRACTIONr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0294, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_real_tsn1_clock_cycle_fraction_reg, + NULL, + NULL, + }, + { + "real_tsn2_clock_cycle_integer", + PTPTM_PTPTM_REAL_TSN2_CLOCK_CYCLE_INTEGERr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x0298, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_real_tsn2_clock_cycle_integer_reg, + NULL, + NULL, + }, + { + "real_tsn2_clock_cycle_fraction", + PTPTM_PTPTM_REAL_TSN2_CLOCK_CYCLE_FRACTIONr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x029c, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_real_tsn2_clock_cycle_fraction_reg, + NULL, + NULL, + }, + { + "real_tsn3_clock_cycle_integer", + PTPTM_PTPTM_REAL_TSN3_CLOCK_CYCLE_INTEGERr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x02a0, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_real_tsn3_clock_cycle_integer_reg, + NULL, + NULL, + }, + { + "real_tsn3_clock_cycle_fraction", + PTPTM_PTPTM_REAL_TSN3_CLOCK_CYCLE_FRACTIONr, + PTPTM, + DPP_REG_FLAG_DIRECT, + DPP_REG_NUL_ARRAY, + SYS_PTP1_BASE_ADDR + 0x02a4, + (32 / 8), + 0, + 0, + 0, + 0, + 1, + g_ptptm_ptptm_real_tsn3_clock_cycle_fraction_reg, + NULL, + NULL, + }, +}; diff --git a/drivers/net/ethernet/dinghai/en_np/table/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/table/Kbuild.include new file mode 100644 index 000000000000..df016a9a040b --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/Kbuild.include @@ -0,0 +1,4 @@ +cur_dir := en_np/table/ +subdirs := source/ +src_files += +include $(foreach subdir, $(subdirs), $(dinghai_root)/$(cur_dir)$(subdir)/Kbuild.include) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_api.h b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_api.h new file mode 100644 index 000000000000..47876f9ba596 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_api.h @@ -0,0 +1,565 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_tbl_api.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2014/01/27 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef DPP_TBL_API_H +#define DPP_TBL_API_H + +#include "zxic_common.h" +#include "dpp_drv_eram.h" +#include "dpp_drv_acl.h" +#include "dpp_tbl_comm.h" +#include "dpp_tbl_stat.h" + +// byte[15:16] +// #define SRIOV_VPORT_TPID ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, rsv6) / sizeof(ZXIC_UINT32))) +// byte[13:14] +#define SRIOV_VPORT_1588_EN \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, flag_1588_enable) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_VHCA \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, vhca) / \ + sizeof(ZXIC_UINT32))) +// byte[12] +#define SRIOV_VPORT_RSS_HASH_FACTOR \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, rss_hash_factor) / \ + sizeof(ZXIC_UINT32))) +// byte[11] +#define SRIOV_VPORT_HASH_ALG \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, hash_alg) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_UPLINK_PHY_PORT_ID \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, uplink_phy_port_id) / \ + sizeof(ZXIC_UINT32))) +// byte[9:10] +#define SRIOV_VPORT_LAG_ID \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, lag_id) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_FD_VXLAN_OFFLOAD_EN \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, fd_vxlan_offload_en) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_PF_VQM_VFID \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, pf_vqm_vfid) / \ + sizeof(ZXIC_UINT32))) +// byte[7:8] +#define SRIOV_VPORT_MTU \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, mtu) / sizeof(ZXIC_UINT32))) +// byte[5:6] +#define SRIOV_VPORT_HASH_SEARCH_INDEX \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, hash_search_index) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_PORT_BASE_QID \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, port_base_qid) / \ + sizeof(ZXIC_UINT32))) +// byte[4] +#define SRIOV_VPORT_SPOOFCHK_EN_OFF \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, spoof_check_enable) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_NP_INGRESS_TM_EN_OFF \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, np_ingress_tm_enable) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_NP_EGRESS_TM_EN_OFF \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, np_egress_tm_enable) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_NP_INGRESS_MODE \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, np_ingress_meter_mode) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_NP_EGRESS_MODE \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, np_egress_meter_mode) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_NP_INGRESS_METER_EN_OFF \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, np_ingress_meter_enable) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_NP_EGRESS_METER_EN_OFF \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, np_egress_meter_enable) / \ + sizeof(ZXIC_UINT32))) +// byte[3] +#define SRIOV_VPORT_VIRTIO_EN_OFF \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, virtio_enable) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_VIRTIO_VERSION \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, virtio_version) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_IS_VF \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, is_vf) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_VEPA_EN_OFF \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, vepa_enable) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_LAG_EN_OFF \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, lag_enable) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_FD_EN_OFF \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, fd_enable) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_INLINE_SEC_OFFLOAD \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, inline_sec_offload) / \ + sizeof(ZXIC_UINT32))) +// byte[2] +#define SRIOV_VPORT_BUSINESS_EN_OFF \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, business_enable) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_IS_UP \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, is_up) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_OUTER_IP_CHECKSUM_OFFLOAD \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, \ + outer_ip_checksum_offload) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_IP_CHKSUM \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, ip_checksum_offload) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_TCP_UDP_CHKSUM \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, \ + tcp_udp_checksum_offload) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_IP_RECOMBINE \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, ip_recombine_offload) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_IPV4_TCP_ASSEMBLE \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, lro_offload) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_IPV6_TCP_ASSEMBLE \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, lro_offload) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_ACCELERATOR_OFFLOAD_FLAG \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, \ + accelerator_offload_flag) / \ + sizeof(ZXIC_UINT32))) +// byte[1] +#define SRIOV_VPORT_HW_BOND_EN_OFF \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, hw_bond_enable) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_RDMA_OFFLOAD_EN_OFF \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, rdma_offload_enable) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_PROMISC_EN \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, promisc_enable) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_VLAN_OFFLOAD_EN \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, \ + sriov_vlan_offload_enable) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_BUSINESS_VLAN_OFFLOAD_EN \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, \ + sriov_business_vlan_offload_enable) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_RSS_EN_OFF \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, rss_enable) / \ + sizeof(ZXIC_UINT32))) +#define SRIOV_VPORT_MTU_OFFLOAD_EN_OFF \ + ((ZXIC_UINT32)(offsetof(ZXDH_SRIOV_VPORT_T, mtu_offload_enable) / \ + sizeof(ZXIC_UINT32))) + +#define UPLINK_PHY_PORT_PF_VQM_VFID \ + ((ZXIC_UINT32)(offsetof(ZXDH_UPLINK_PHY_PORT_T, pf_vqm_vfid) / \ + sizeof(ZXIC_UINT32))) +#define UPLINK_PHY_PORT_LACP_PF_MEMPORT_QID \ + ((ZXIC_UINT32)(offsetof(ZXDH_UPLINK_PHY_PORT_T, lacp_pf_memport_qid) / \ + sizeof(ZXIC_UINT32))) +#define UPLINK_PHY_PORT_LACP_PF_VQM_VFID \ + ((ZXIC_UINT32)(offsetof(ZXDH_UPLINK_PHY_PORT_T, lacp_pf_vqm_vfid) / \ + sizeof(ZXIC_UINT32))) +#define UPLINK_PHY_PORT_IS_UP \ + ((ZXIC_UINT32)(offsetof(ZXDH_UPLINK_PHY_PORT_T, is_up) / \ + sizeof(ZXIC_UINT32))) +#define UPLINK_PHY_PORT_BOND_LINK_UP \ + ((ZXIC_UINT32)(offsetof(ZXDH_UPLINK_PHY_PORT_T, bond_link_up) / \ + sizeof(ZXIC_UINT32))) +#define UPLINK_PHY_PORT_HW_BOND_ENABLE \ + ((ZXIC_UINT32)(offsetof(ZXDH_UPLINK_PHY_PORT_T, hw_bond_enable) / \ + sizeof(ZXIC_UINT32))) +#define UPLINK_PHY_PORT_MTU \ + ((ZXIC_UINT32)(offsetof(ZXDH_UPLINK_PHY_PORT_T, mtu) / \ + sizeof(ZXIC_UINT32))) +#define UPLINK_PHY_PORT_MTU_OFFLOAD_ENABLE \ + ((ZXIC_UINT32)(offsetof(ZXDH_UPLINK_PHY_PORT_T, mtu_offload_enable) / \ + sizeof(ZXIC_UINT32))) +#define UPLINK_PHY_PORT_TM_BASE_QUEUE \ + ((ZXIC_UINT32)(offsetof(ZXDH_UPLINK_PHY_PORT_T, tm_base_queue) / \ + sizeof(ZXIC_UINT32))) +#define UPLINK_PHY_PORT_PTP_PORT_VFID \ + ((ZXIC_UINT32)(offsetof(ZXDH_UPLINK_PHY_PORT_T, ptp_port_vfid) / \ + sizeof(ZXIC_UINT32))) +#define UPLINK_PHY_PORT_MAGIC_PACKET_ENABLE \ + ((ZXIC_UINT32)(offsetof(ZXDH_UPLINK_PHY_PORT_T, magic_packet_enable) / \ + sizeof(ZXIC_UINT32))) +#define UPLINK_PHY_PORT_TM_SHAPE_ENABLE \ + ((ZXIC_UINT32)(offsetof(ZXDH_UPLINK_PHY_PORT_T, tm_shape_enable) / \ + sizeof(ZXIC_UINT32))) +#define UPLINK_PHY_PORT_PTP_TC_ENABLE \ + ((ZXIC_UINT32)(offsetof(ZXDH_UPLINK_PHY_PORT_T, ptp_tc_enable) / \ + sizeof(ZXIC_UINT32))) +#define UPLINK_PHY_PORT_TRUST_MODE \ + ((ZXIC_UINT32)(offsetof(ZXDH_UPLINK_PHY_PORT_T, trust_mode) / \ + sizeof(ZXIC_UINT32))) +#define UPLINK_PHY_PORT_PRIMARY_PF_VQM_VFID \ + ((ZXIC_UINT32)(offsetof(ZXDH_UPLINK_PHY_PORT_T, primary_pf_vqm_vfid) / \ + sizeof(ZXIC_UINT32))) +#define UPLINK_PHY_PORT_SRIOV_HD_BOND_EN \ + ((ZXIC_UINT32)(offsetof(ZXDH_UPLINK_PHY_PORT_T, sriov_hdbond_enable) / \ + sizeof(ZXIC_UINT32))) + +#define VLAN_SRIOV_VLAN_TCI \ + ((ZXIC_UINT32)(offsetof(ZXDH_VQM_VFID_VLAN_T, sriov_vlan_tci) / \ + sizeof(ZXIC_UINT32))) +#define VLAN_SRIOV_VLAN_TPID \ + ((ZXIC_UINT32)(offsetof(ZXDH_VQM_VFID_VLAN_T, sriov_vlan_tpid) / \ + sizeof(ZXIC_UINT32))) +#define VLAN_SRIOV_BUSINESS_VLAN_TPID \ + ((ZXIC_UINT32)(offsetof(ZXDH_VQM_VFID_VLAN_T, \ + sriov_business_vlan_tpid) / \ + sizeof(ZXIC_UINT32))) +#define VLAN_SRIOV_BUSINESS_VLAN_STRIP_OFFLIAD \ + ((ZXIC_UINT32)(offsetof(ZXDH_VQM_VFID_VLAN_T, \ + sriov_business_vlan_strip_offload) / \ + sizeof(ZXIC_UINT32))) +#define VLAN_SRIOV_BUSINESS_QINQ_VLAN_STRIP_OFFLOAD \ + ((ZXIC_UINT32)(offsetof(ZXDH_VQM_VFID_VLAN_T, \ + sriov_business_qinq_vlan_strip_offload) / \ + sizeof(ZXIC_UINT32))) +#define VLAN_SRIOV_BUSINESS_VLAN_FILTER \ + ((ZXIC_UINT32)(offsetof(ZXDH_VQM_VFID_VLAN_T, \ + sriov_business_vlan_filter) / \ + sizeof(ZXIC_UINT32))) + +#define DPP_RC_TBL_BASE (DPP_RC_DTB_BASE | 0x80000000) +#define DPP_RC_TBL_IS_FULL (DPP_RC_TBL_BASE | 0x0) + +ZXIC_UINT32 dpp_vport_create(DPP_PF_INFO_T *pf_info); +ZXIC_UINT32 dpp_vport_create_by_vqm_vfid(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 vqm_vfid); +ZXIC_UINT32 dpp_vport_delete(DPP_PF_INFO_T *pf_info); +ZXIC_UINT32 dpp_vport_attr_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 attr, + ZXIC_UINT32 value); +ZXIC_UINT32 dpp_vport_attr_get(DPP_PF_INFO_T *pf_info, + ZXDH_SRIOV_VPORT_T *port_attr_entry); +ZXIC_UINT32 dpp_vport_rx_flow_hash_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 hash_mode); +ZXIC_UINT32 dpp_vport_rx_flow_hash_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *hash_mode); +ZXIC_UINT32 dpp_vport_base_qid_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *base_qid); +ZXIC_UINT32 dpp_vport_hash_index_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *hash_index); +ZXIC_UINT32 dpp_vport_hash_funcs_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 funcs); +ZXIC_UINT32 dpp_vport_rss_en_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 enable); +ZXIC_UINT32 dpp_vport_fd_en_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 enable); +ZXIC_UINT32 dpp_vport_virtio_en_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 enable); +ZXIC_UINT32 dpp_vport_virtio_version_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 version); +ZXIC_UINT32 dpp_vport_promisc_en_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 enable); +ZXIC_UINT32 dpp_vport_business_vlan_offload_en_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 enable); +ZXIC_UINT32 dpp_vport_vlan_offload_en_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 enable); + +ZXIC_UINT32 dpp_vlan_filter_init(DPP_PF_INFO_T *pf_info); +ZXIC_UINT32 dpp_add_vlan_filter(DPP_PF_INFO_T *pf_info, ZXIC_UINT16 vlan_id); +ZXIC_UINT32 dpp_del_vlan_filter(DPP_PF_INFO_T *pf_info, ZXIC_UINT16 vlan_id); + +ZXIC_UINT32 dpp_vport_bond_pf(DPP_PF_INFO_T *pf_info); +ZXIC_UINT32 dpp_vport_unbond_pf(DPP_PF_INFO_T *pf_info); + +ZXIC_UINT32 dpp_rxfh_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 *queue_list, + ZXIC_UINT32 queue_num); +ZXIC_UINT32 dpp_rxfh_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 *queue_list, + ZXIC_UINT32 queue_num); +ZXIC_UINT32 dpp_rxfh_del(DPP_PF_INFO_T *pf_info); +ZXIC_UINT32 dpp_thash_key_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 *hash_key, + ZXIC_UINT32 key_num); +ZXIC_UINT32 dpp_thash_key_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 *hash_key, + ZXIC_UINT32 key_num); +ZXIC_UINT32 dpp_add_mac(DPP_PF_INFO_T *pf_info, ZXIC_CONST ZXIC_VOID *mac, + ZXIC_UINT16 sriov_vlan_tpid, ZXIC_UINT16 sriov_vlan_id); +ZXIC_UINT32 dpp_del_mac(DPP_PF_INFO_T *pf_info, ZXIC_CONST ZXIC_VOID *mac, + ZXIC_UINT16 sriov_vlan_tpid, ZXIC_UINT16 sriov_vlan_id); +ZXIC_UINT32 dpp_unicast_mac_search(DPP_PF_INFO_T *pf_info, + ZXIC_CONST ZXIC_VOID *mac, + ZXIC_UINT16 sriov_vlan_tpid, + ZXIC_UINT16 sriov_vlan_id, + ZXIC_UINT16 *current_vport); +ZXIC_UINT32 dpp_batch_add_unicast_mac(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 mac_num, + ZXIC_CONST ZXIC_VOID *l2key); +ZXIC_UINT32 dpp_batch_del_unicast_mac(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 mac_num, + ZXIC_CONST ZXIC_VOID *l2key); +ZXIC_UINT32 dpp_unicast_mac_dump(DPP_PF_INFO_T *pf_info, + MAC_VPORT_INFO *p_mac_arr, + ZXIC_UINT32 *p_mac_num); +ZXIC_UINT32 dpp_unicast_mac_transfer(DPP_PF_INFO_T *pf_info, + DPP_PF_INFO_T *new_pf_info); +ZXIC_UINT32 dpp_unicast_mac_max_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *max_num); +ZXIC_UINT32 dpp_unicast_all_mac_delete(DPP_PF_INFO_T *pf_info); +ZXIC_UINT32 dpp_unicast_all_mac_online_delete(DPP_PF_INFO_T *pf_info); +ZXIC_UINT32 dpp_unicast_all_mac_soft_delete(DPP_PF_INFO_T *pf_info); + +ZXIC_UINT32 dpp_multi_mac_add_member(DPP_PF_INFO_T *pf_info, + ZXIC_CONST ZXIC_VOID *mac); +ZXIC_UINT32 dpp_multi_mac_del_member(DPP_PF_INFO_T *pf_info, + ZXIC_CONST ZXIC_VOID *mac); +ZXIC_UINT32 dpp_batch_add_multicast_mac(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 mac_num, + ZXIC_CONST ZXIC_VOID *mac); +ZXIC_UINT32 dpp_batch_del_multicast_mac(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 mac_num, + ZXIC_CONST ZXIC_VOID *mac); +ZXIC_UINT32 dpp_multicast_mac_dump(DPP_PF_INFO_T *pf_info, + MAC_VPORT_INFO *p_mac_arr, + ZXIC_UINT32 *p_mac_num); +ZXIC_UINT32 dpp_multicast_mac_transfer(DPP_PF_INFO_T *pf_info, + DPP_PF_INFO_T *new_pf_info); +ZXIC_UINT32 dpp_multicast_mac_max_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *max_num); +ZXIC_UINT32 dpp_multicast_all_mac_delete(DPP_PF_INFO_T *pf_info); +ZXIC_UINT32 dpp_multicast_all_mac_online_delete(DPP_PF_INFO_T *pf_info); +ZXIC_UINT32 dpp_multicast_all_mac_soft_delete(DPP_PF_INFO_T *pf_info); + +ZXIC_UINT32 dpp_ptp_port_vfid_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 ptp_port_vfid); +ZXIC_UINT32 dpp_ptp_tc_enable_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 ptp_tc_enable); + +ZXIC_UINT32 dpp_ipsec_enc_entry_add(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 index, + ZXIC_UINT8 *sip, ZXIC_UINT8 *dip, + ZXIC_UINT8 *sip_mask, ZXIC_UINT8 *dip_mask, + ZXIC_UINT32 is_ipv4, ZXIC_UINT32 sa_id); +ZXIC_UINT32 dpp_ipsec_enc_entry_del(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 index); + +ZXIC_UINT32 dpp_lag_group_create(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 lag_id); +ZXIC_UINT32 dpp_lag_group_delete(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 lag_id); +ZXIC_UINT32 dpp_lag_mode_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 lag_id, + ZXIC_UINT8 mode); +ZXIC_UINT32 dpp_lag_group_hash_factor_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 lag_id, ZXIC_UINT8 factor); +ZXIC_UINT32 dpp_lag_group_member_add(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 lag_id, + ZXIC_UINT8 uplink_phy_port_id); +ZXIC_UINT32 dpp_lag_group_member_del(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 lag_id, + ZXIC_UINT8 uplink_phy_port_id); +ZXIC_UINT32 dpp_lag_hit_flag_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 lag_id, + ZXIC_UINT8 *hit_flag); + +ZXIC_UINT32 dpp_uplink_phy_bond_vport(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 uplink_phy_id); +ZXIC_UINT32 dpp_uplink_phy_hardware_bond_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 uplink_phy_id, + ZXIC_UINT8 enable); +ZXIC_UINT32 dpp_uplink_phy_lacp_pf_vqm_vfid_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 uplink_phy_id, + ZXIC_UINT16 vqm_vfid); +ZXIC_UINT32 dpp_uplink_phy_lacp_pf_memport_qid_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 uplink_phy_id, + ZXIC_UINT16 qid); +ZXIC_UINT32 dpp_uplink_phy_attr_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 uplink_phy_id, ZXIC_UINT32 attr, + ZXIC_UINT32 value); + +ZXIC_UINT32 dpp_vport_uc_promisc_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 enable); +ZXIC_UINT32 dpp_vport_mc_promisc_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 enable); + +ZXIC_UINT32 dpp_stat_cnt_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 index, + ZXIC_UINT32 mode, ZXIC_UINT64 *p_cnt); +ZXIC_UINT32 dpp_stat_item_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 stat_item_no, ZXIC_UINT32 index, + ZXIC_UINT32 rd_mode, + DPP_STAT_VALUE_U *p_stat_value); +ZXIC_UINT32 dpp_stat_cnt_get_128(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 index, + ZXIC_UINT32 mode, ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt); +ZXIC_UINT32 dpp_stat_mc_packet_rx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt); +ZXIC_UINT32 dpp_stat_bc_packet_rx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt); +ZXIC_UINT32 dpp_stat_1588_packet_rx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt); +ZXIC_UINT32 dpp_stat_1588_packet_tx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt); +ZXIC_UINT32 dpp_stat_1588_packet_drop_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt); +ZXIC_UINT32 dpp_stat_1588_enc_packet_rx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt); +ZXIC_UINT32 dpp_stat_1588_enc_packet_tx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt); +ZXIC_UINT32 dpp_stat_spoof_packet_drop_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt); +ZXIC_UINT32 dpp_stat_mcode_packet_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt); +ZXIC_UINT32 dpp_stat_port_RDMA_packet_msg_tx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt); +ZXIC_UINT32 dpp_stat_port_RDMA_packet_msg_rx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt); +ZXIC_UINT32 dpp_stat_plcr_packet_drop_tx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt); +ZXIC_UINT32 dpp_stat_plcr_packet_drop_rx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt); +ZXIC_UINT32 dpp_stat_MTU_packet_msg_tx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt); +ZXIC_UINT32 dpp_stat_MTU_packet_msg_rx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt); +ZXIC_UINT32 dpp_stat_port_uc_packet_rx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt); +ZXIC_UINT32 dpp_stat_port_uc_packet_tx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt); +ZXIC_UINT32 dpp_stat_port_mc_packet_rx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt); +ZXIC_UINT32 dpp_stat_port_mc_packet_tx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt); +ZXIC_UINT32 dpp_stat_port_bc_packet_rx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt); +ZXIC_UINT32 dpp_stat_port_bc_packet_tx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt); +ZXIC_UINT32 dpp_stat_fd_stat_cnt_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 index, + ZXIC_UINT32 mode, ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt); + +ZXIC_UINT32 dpp_vport_vhca_id_add(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 vhca_id); +ZXIC_UINT32 dpp_vport_vhca_id_del(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 vhca_id); +ZXIC_UINT32 dpp_add_rdma_trans_item(DPP_PF_INFO_T *pf_info, + ZXIC_CONST ZXIC_VOID *mac, + ZXIC_CONST ZXIC_UINT16 vhcaId); +ZXIC_UINT32 dpp_del_rdma_trans_item(DPP_PF_INFO_T *pf_info, + ZXIC_CONST ZXIC_VOID *mac); +ZXIC_UINT32 dpp_rdma_trans_item_soft_delete(DPP_PF_INFO_T *pf_info); + +ZXIC_UINT32 dpp_vqm_vfid_vlan_init(DPP_PF_INFO_T *pf_info); +ZXIC_UINT32 dpp_vqm_vfid_vlan_delete(DPP_PF_INFO_T *pf_info); +ZXIC_UINT32 dpp_vqm_vfid_vlan_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 attr, + ZXIC_UINT32 value); +ZXIC_UINT32 dpp_vqm_vfid_vlan_get(DPP_PF_INFO_T *pf_info, + ZXDH_VQM_VFID_VLAN_T *vqm_vfid_vlan_entry); +ZXIC_UINT32 dpp_fd_acl_index_request(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *p_index); +ZXIC_UINT32 dpp_fd_acl_index_release(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 index); +ZXIC_UINT32 dpp_fd_acl_entry_add(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 handle, + ZXIC_UINT8 *key, ZXIC_UINT8 *key_mask, + ZXIC_UINT8 *result); +ZXIC_UINT32 dpp_fd_acl_entry_del(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 index); +ZXIC_UINT32 dpp_fd_acl_entry_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 handle, + ZXIC_UINT8 *key, ZXIC_UINT8 *key_mask, + ZXIC_UINT8 *result); +ZXIC_UINT32 dpp_fd_acl_entry_search(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 handle, + ZXIC_UINT8 *key, ZXIC_UINT8 *key_mask, + ZXIC_UINT8 *result); +ZXIC_UINT32 dpp_fd_acl_all_delete(DPP_PF_INFO_T *pf_info); +ZXIC_UINT32 dpp_fd_acl_stat_clear(DPP_PF_INFO_T *pf_info); + +ZXIC_UINT32 dpp_glb_cfg_set_0(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 glb_cfg_data_0); +ZXIC_UINT32 dpp_glb_cfg_set_1(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 glb_cfg_data_1); +ZXIC_UINT32 dpp_glb_cfg_set_2(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 glb_cfg_data_2); +ZXIC_UINT32 dpp_glb_cfg_set_3(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 glb_cfg_data_3); +ZXIC_UINT32 dpp_glb_cfg_get_0(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *p_glb_cfg_data_0); +ZXIC_UINT32 dpp_glb_cfg_get_1(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *p_glb_cfg_data_1); +ZXIC_UINT32 dpp_glb_cfg_get_2(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *p_glb_cfg_data_2); +ZXIC_UINT32 dpp_glb_cfg_get_3(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *p_glb_cfg_data_3); +ZXIC_UINT32 dpp_l2d_psn_cfg_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 psn_cfg); +ZXIC_UINT32 dpp_l2d_psn_cfg_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 *p_psn_cfg); +ZXIC_UINT32 dpp_stat_asn_phyport_rx_pkt_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt); +ZXIC_UINT32 dpp_stat_psn_phyport_tx_pkt_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt); +ZXIC_UINT32 dpp_stat_psn_phyport_rx_pkt_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt); +ZXIC_UINT32 dpp_stat_psn_ack_phyport_tx_pkt_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt); +ZXIC_UINT32 dpp_stat_psn_ack_phyport_rx_pkt_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt); +ZXIC_UINT32 dpp_pktrx_mcode_glb_cfg_write(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 start_bit_no, + ZXIC_UINT32 end_bit_no, + ZXIC_UINT32 glb_cfg_data_1); +ZXIC_UINT32 dpp_mcode_feature_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 index, + ZXIC_UINT64 *feature); + +ZXIC_UINT32 dpp_eram_entry_insert(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 index, ZXIC_UINT8 *p_data); +ZXIC_UINT32 dpp_eram_entry_delete(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 index); +ZXIC_UINT32 dpp_eram_entry_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 index, ZXIC_UINT8 *p_data); +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_bc.h b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_bc.h new file mode 100644 index 000000000000..2eef1f74247c --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_bc.h @@ -0,0 +1,37 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_tbl_bc.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2014/01/27 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef DPP_TBL_BC_H +#define DPP_TBL_BC_H + +#include "zxic_common.h" +#include "dpp_type_api.h" + +#define BC_GROUP_NUM (4) +#define BC_MEMBER_NUM_IN_GROUP (64) + +typedef struct dpp_vport_bc_info_t { + ZXIC_UINT64 bc_bitmap[BC_GROUP_NUM]; +} DPP_VPORT_BC_INFO_T; + +typedef struct dpp_vport_bc_table_t { + DPP_VPORT_BC_INFO_T bc_info; +} DPP_VPORT_BC_TABLE_T; + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_cfg.h b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_cfg.h new file mode 100644 index 000000000000..b563917f0aff --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_cfg.h @@ -0,0 +1,26 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_tbl_port.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2014/01/27 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef DPP_TBL_CFG_H +#define DPP_TBL_CFG_H + +#include "zxic_common.h" +#include "dpp_type_api.h" + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_comm.h b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_comm.h new file mode 100644 index 000000000000..3f31335c2410 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_comm.h @@ -0,0 +1,95 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_tbl_comm.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2014/01/27 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef DPP_TBL_COMM_H +#define DPP_TBL_COMM_H + +#include "zxic_common.h" +#include "dpp_type_api.h" +#include "dpp_tbl_mc.h" +#include "dpp_tbl_mac.h" +#include "dpp_tbl_qid.h" +#include "dpp_tbl_port.h" +#include "dpp_tbl_bc.h" +#include "dpp_tbl_promisc.h" + +#define VF_ACTIVE(VPORT) ((VPORT & 0x0800) >> 11) +#define EPID(VPORT) ((VPORT & 0x7000) >> 12) +#define FUNC_NUM(VPORT) ((VPORT & 0x0700) >> 8) +#define VFUNC_NUM(VPORT) ((VPORT & 0x00FF)) + +#define PF_VQM_VFID_OFFSET (1152) +#define IS_PF(VPORT) (!VF_ACTIVE(VPORT)) +#define VQM_VFID(VPORT) \ + (IS_PF(VPORT) ? \ + (PF_VQM_VFID_OFFSET + (EPID(VPORT) * 8) + FUNC_NUM(VPORT)) : \ + ((EPID(VPORT) * 256) + VFUNC_NUM(VPORT))) + +#define OWNER_PF_VQM_VFID(VPORT) \ + (PF_VQM_VFID_OFFSET + (EPID(VPORT) * 8) + FUNC_NUM(VPORT)) +#define OWNER_PF_VPORT(VPORT) (((EPID(VPORT)) << 12) | ((FUNC_NUM(VPORT)) << 8)) + +#define VQM_VFID_MAX_NUM (2048) + +typedef struct dpp_vport_mgr_t { + DPP_VPORT_BC_TABLE_T bc_table; + DPP_VPORT_MC_TABLE_T mc_table; + DPP_VPORT_PROMISC_TABLE_T uc_promisc_table; + DPP_VPORT_PROMISC_TABLE_T mc_promisc_table; + ZXIC_MUTEX_T *table_lock[DPP_DEV_SDT_ID_MAX]; +} DPP_VPORT_MGR_T; + +typedef struct { + uint8_t addr[6]; + uint16_t vport; + uint16_t sriov_vlan_tpid; + uint16_t sriov_vlan_id; +} MAC_VPORT_INFO; + +typedef struct { + uint8_t mc_addr[6]; + uint16_t pf_flag; +} MC_PF_FLAG_MGR; + +ZXIC_UINT32 dpp_data_print(ZXIC_UINT8 *data, ZXIC_UINT32 len); +ZXIC_UINT32 dpp_vport_attr_value_show(ZXIC_VOID); +ZXIC_UINT32 dpp_vport_mgr_init(DPP_PF_INFO_T *pf_info); +ZXIC_UINT32 dpp_vport_mgr_release(DPP_PF_INFO_T *pf_info); +ZXIC_UINT32 dpp_vport_table_lock(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 sdt_no, + ZXIC_MUTEX_T **table_lock); +ZXIC_UINT32 dpp_vport_table_unlock(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 sdt_no); +ZXIC_UINT32 dpp_vport_bc_table_get(DPP_PF_INFO_T *pf_info, + DPP_VPORT_BC_TABLE_T **bc_table); +ZXIC_UINT32 dpp_vport_mc_table_get(DPP_PF_INFO_T *pf_info, + DPP_VPORT_MC_TABLE_T **mc_table); +ZXIC_UINT32 +dpp_vport_uc_promisc_table_get(DPP_PF_INFO_T *pf_info, + DPP_VPORT_PROMISC_TABLE_T **promisc_table); +ZXIC_UINT32 +dpp_vport_mc_promisc_table_get(DPP_PF_INFO_T *pf_info, + DPP_VPORT_PROMISC_TABLE_T **promisc_table); +ZXIC_UINT32 dpp_vport_get_by_vqm_vfid(ZXIC_UINT16 pf_vport, + ZXIC_UINT32 vqm_vfid, ZXIC_UINT16 *vport); +ZXIC_UINT32 dpp_vport_get_by_mc_bitmap(ZXIC_UINT16 pf_vport, + ZXIC_UINT32 group_id, + ZXIC_UINT64 mc_bitmap, + ZXIC_UINT16 vport[64], + ZXIC_UINT32 *vport_num); +BOOLEAN dpp_vport_in_mc_bitmap(ZXIC_UINT32 vport, ZXIC_UINT64 mc_bitmap); +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_diag.h b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_diag.h new file mode 100644 index 000000000000..45eaab7fd894 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_diag.h @@ -0,0 +1,518 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_tbl_diag.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2014/01/27 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef DPP_TBL_DIAG_H +#define DPP_TBL_DIAG_H + +#include "zxic_common.h" +#include "dpp_type_api.h" +#include "dpp_tbl_pkt_cap.h" + +ZXIC_CONST ZXIC_CHAR *dpp_vport_table_attr_name_get(ZXIC_UINT32 attr); +ZXIC_CONST ZXIC_CHAR *dpp_uplink_phy_port_table_attr_name_get(ZXIC_UINT32 attr); +ZXIC_CONST ZXIC_CHAR *dpp_vqm_vfid_vlan_attr_name_get(ZXIC_UINT32 attr); + +ZXIC_UINT32 diag_dpp_sdt_tbl_prt(ZXIC_UINT32 sdt_no); +ZXIC_UINT32 diag_dpp_se_smmu0_wr64(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 base_addr, ZXIC_UINT32 index, + ZXIC_UINT32 data0, ZXIC_UINT32 data1); +ZXIC_UINT32 diag_dpp_se_smmu0_rd64(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 base_addr, ZXIC_UINT32 index); +ZXIC_UINT32 diag_dpp_se_smmu0_wr128(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 base_addr, ZXIC_UINT32 index, + ZXIC_UINT32 data0, ZXIC_UINT32 data1, + ZXIC_UINT32 data2, ZXIC_UINT32 data3); +ZXIC_UINT32 diag_dpp_se_smmu0_rd128(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 base_addr, ZXIC_UINT32 index); +ZXIC_UINT32 diag_dpp_vport_mac_add(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT16 sriov_vlan_tpid, + ZXIC_UINT16 sriov_vlan_id, ZXIC_UINT8 mac0, + ZXIC_UINT8 mac1, ZXIC_UINT8 mac2, + ZXIC_UINT8 mac3, ZXIC_UINT8 mac4, + ZXIC_UINT8 mac5); +ZXIC_UINT32 diag_dpp_vport_mac_del(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT16 sriov_vlan_tpid, + ZXIC_UINT16 sriov_vlan_id, ZXIC_UINT8 mac0, + ZXIC_UINT8 mac1, ZXIC_UINT8 mac2, + ZXIC_UINT8 mac3, ZXIC_UINT8 mac4, + ZXIC_UINT8 mac5); +ZXIC_UINT32 diag_dpp_vport_batch_mac_add(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT16 mac_num, + ZXIC_UINT32 vlan_id, ZXIC_UINT16 mac16, + ZXIC_UINT32 mac32); +ZXIC_UINT32 diag_dpp_vport_batch_mac_del(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT16 mac_num, + ZXIC_UINT32 vlan_id, ZXIC_UINT16 mac16, + ZXIC_UINT32 mac32); +ZXIC_UINT32 diag_dpp_vport_mac_transter(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT16 new_vport); +ZXIC_UINT32 diag_dpp_vport_mac_max_num(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_vport_mac_flush_online(ZXIC_UINT16 slot, + ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_vport_mac_flush_offline(ZXIC_UINT16 slot, + ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_vport_mac_search(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT16 sriov_vlan_tpid, + ZXIC_UINT16 sriov_vlan_id, + ZXIC_UINT8 mac0, ZXIC_UINT8 mac1, + ZXIC_UINT8 mac2, ZXIC_UINT8 mac3, + ZXIC_UINT8 mac4, ZXIC_UINT8 mac5); +ZXIC_UINT32 diag_dpp_vport_mac_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_vport_mc_mac_add(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 mac0, ZXIC_UINT8 mac1, + ZXIC_UINT8 mac2, ZXIC_UINT8 mac3, + ZXIC_UINT8 mac4, ZXIC_UINT8 mac5); +ZXIC_UINT32 diag_dpp_vport_mc_mac_del(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 mac0, ZXIC_UINT8 mac1, + ZXIC_UINT8 mac2, ZXIC_UINT8 mac3, + ZXIC_UINT8 mac4, ZXIC_UINT8 mac5); +ZXIC_UINT32 diag_dpp_vport_batch_mc_mac_add(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT16 mac_num, + ZXIC_UINT8 mac0, ZXIC_UINT8 mac1, + ZXIC_UINT8 mac2, ZXIC_UINT8 mac3, + ZXIC_UINT8 mac4, ZXIC_UINT8 mac5); +ZXIC_UINT32 diag_dpp_vport_batch_mc_mac_del(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT16 mac_num, + ZXIC_UINT8 mac0, ZXIC_UINT8 mac1, + ZXIC_UINT8 mac2, ZXIC_UINT8 mac3, + ZXIC_UINT8 mac4, ZXIC_UINT8 mac5); +ZXIC_UINT32 diag_dpp_vport_mc_mac_transter(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT16 new_vport); +ZXIC_UINT32 diag_dpp_vport_mc_mac_max_num(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_vport_mc_mac_flush_online(ZXIC_UINT16 slot, + ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_vport_mc_mac_flush_offline(ZXIC_UINT16 slot, + ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_vport_mc_mac_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_vport_table_init(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_vport_table_delete(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_vport_table_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 attr, ZXIC_UINT32 value); +ZXIC_UINT32 diag_dpp_vport_table_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport); + +ZXIC_UINT32 diag_dpp_vport_egress_meter_en_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 enable); +ZXIC_UINT32 diag_dpp_vport_egress_meter_en_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_vport_ingress_meter_en_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 enable); +ZXIC_UINT32 diag_dpp_vport_ingress_meter_en_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_vport_egress_meter_mode_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_vport_egress_meter_mode_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_vport_ingress_meter_mode_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_vport_ingress_meter_mode_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport); + +ZXIC_UINT32 diag_dpp_vport_rx_flow_hash_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 hash_mode); +ZXIC_UINT32 diag_dpp_vport_rx_flow_hash_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_vport_hash_index_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_vport_hash_funcs_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 funcs); +ZXIC_UINT32 diag_dpp_vport_rss_en_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 enable); +ZXIC_UINT32 diag_dpp_vport_virtio_en_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 enable); +ZXIC_UINT32 diag_dpp_vport_virtio_version_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 version); +ZXIC_UINT32 diag_dpp_vport_promisc_en_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 enable); +ZXIC_UINT32 diag_dpp_vport_business_vlan_offload_en_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 enable); +ZXIC_UINT32 diag_dpp_vport_vlan_offload_en_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 enable); + +ZXIC_UINT32 diag_dpp_uplink_phy_port_table_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT8 uplink_phy_port_id, + ZXIC_UINT32 attr, + ZXIC_UINT32 value); +ZXIC_UINT32 diag_dpp_uplink_phy_port_table_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT8 uplink_phy_port_id); +ZXIC_UINT32 diag_dpp_uplink_phy_bond_vport(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 uplink_phy_port_id); +ZXIC_UINT32 diag_dpp_uplink_phy_hardware_bond_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT8 uplink_phy_port_id, + ZXIC_UINT8 enable); +ZXIC_UINT32 +diag_dpp_uplink_phy_lacp_pf_vqm_vfid_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 uplink_phy_port_id, + ZXIC_UINT16 vqm_vfid); +ZXIC_UINT32 +diag_dpp_uplink_phy_lacp_pf_memport_qid_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 uplink_phy_port_id, + ZXIC_UINT16 qid); +ZXIC_UINT32 diag_dpp_ptp_port_vfid_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 ptp_port_vfid); +ZXIC_UINT32 diag_dpp_ptp_tc_enable_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 ptp_tc_enable); +ZXIC_UINT32 diag_dpp_tm_flowid_pport_table_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT8 uplink_phy_port_id, + ZXIC_UINT32 flow_id); +ZXIC_UINT32 diag_dpp_tm_flowid_pport_table_del(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT8 uplink_phy_port_id); +ZXIC_UINT32 +diag_dpp_tm_pport_trust_mode_table_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 uplink_phy_port_id, + ZXIC_UINT32 mode); +ZXIC_UINT32 +diag_dpp_tm_pport_trust_mode_table_del(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 uplink_phy_port_id); +ZXIC_UINT32 diag_dpp_tm_pport_mcode_switch_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT8 uplink_phy_port_id, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_tm_pport_mcode_switch_del(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT8 uplink_phy_port_id); + +ZXIC_UINT32 diag_dpp_vport_bc_table_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 enable); +ZXIC_UINT32 diag_dpp_vport_bc_table_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_vport_uc_promisc_table_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 enable); +ZXIC_UINT32 diag_dpp_vport_uc_promisc_table_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_vport_mc_promisc_table_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 enable); +ZXIC_UINT32 diag_dpp_vport_mc_promisc_table_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_rdma_trans_item_add(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 mac0, ZXIC_UINT8 mac1, + ZXIC_UINT8 mac2, ZXIC_UINT8 mac3, + ZXIC_UINT8 mac4, ZXIC_UINT8 mac5, + ZXIC_UINT16 vhcaId); +ZXIC_UINT32 diag_dpp_rdma_trans_item_del(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 mac0, ZXIC_UINT8 mac1, + ZXIC_UINT8 mac2, ZXIC_UINT8 mac3, + ZXIC_UINT8 mac4, ZXIC_UINT8 mac5); +DPP_STATUS diag_dpp_pcie_channel_prt(ZXIC_VOID); +DPP_STATUS diag_dpp_se_hash_stat_prt(ZXIC_UINT32 slot_id, ZXIC_UINT32 fun_id); +DPP_STATUS diag_dpp_se_hash_stat_clr(ZXIC_UINT32 slot_id, ZXIC_UINT32 fun_id); +DPP_STATUS diag_dpp_hash_item_prt(ZXIC_UINT32 slot, ZXIC_UINT32 sdt_no); +ZXIC_UINT32 diag_dpp_vqm_vfid_vlan_init(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_vqm_vfid_vlan_delete(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_vqm_vfid_vlan_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 attr, ZXIC_UINT32 value); +ZXIC_UINT32 diag_dpp_vqm_vfid_vlan_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_rxfh_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 qid0, ZXIC_UINT32 qid1, + ZXIC_UINT32 qid2, ZXIC_UINT32 qid3, + ZXIC_UINT32 qnum); +ZXIC_UINT32 diag_dpp_rxfh_del(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_rxfh_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_thash_key_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 key0, ZXIC_UINT8 key1, + ZXIC_UINT8 key2, ZXIC_UINT8 key3, + ZXIC_UINT32 knum); +ZXIC_UINT32 diag_dpp_thash_key_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport); + +ZXIC_UINT32 diag_dpp_vport_register_info_prt(ZXIC_VOID); + +ZXIC_UINT32 diag_dpp_stat_mc_packet_rx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_stat_bc_packet_rx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_stat_1588_packet_rx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_stat_1588_packet_tx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_stat_1588_packet_drop_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_stat_1588_enc_packet_rx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_stat_1588_enc_packet_tx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_stat_spoof_packet_drop_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_stat_mcode_packet_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_stat_port_RDMA_packet_msg_tx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_stat_port_RDMA_packet_msg_rx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_stat_plcr_packet_drop_tx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_stat_plcr_packet_drop_rx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_stat_MTU_packet_msg_tx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_stat_MTU_packet_msg_rx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_stat_port_uc_packet_rx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_stat_port_uc_packet_tx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_stat_port_mc_packet_rx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_stat_port_mc_packet_tx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_stat_port_bc_packet_rx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_stat_port_bc_packet_tx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_stat_asn_phyport_rx_pkt_cnt_get(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_stat_psn_phyport_tx_pkt_cnt_get(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_stat_psn_phyport_rx_pkt_cnt_get(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_stat_psn_ack_phyport_tx_pkt_cnt_get(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode); +ZXIC_UINT32 diag_dpp_stat_psn_ack_phyport_rx_pkt_cnt_get(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode); + +ZXIC_UINT32 diag_dpp_lag_group_create(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 lag_id); +ZXIC_UINT32 diag_dpp_lag_group_delete(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 lag_id); +ZXIC_UINT32 diag_dpp_lag_mode_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 lag_id, ZXIC_UINT8 mode); +ZXIC_UINT32 diag_dpp_lag_group_hash_factor_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT8 lag_id, + ZXIC_UINT8 factor); +ZXIC_UINT32 diag_dpp_lag_group_member_add(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 lag_id, + ZXIC_UINT8 uplink_phy_port_id); +ZXIC_UINT32 diag_dpp_lag_group_member_del(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 lag_id, + ZXIC_UINT8 uplink_phy_port_id); +ZXIC_UINT32 diag_dpp_lag_table_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 lag_id); +ZXIC_UINT32 diag_dpp_tm_pport_dscp_map_table_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 port, + ZXIC_UINT32 dscp_id, + ZXIC_UINT32 up_id); +ZXIC_UINT32 diag_dpp_tm_pport_dscp_map_table_del(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 port, + ZXIC_UINT32 dscp_id); +ZXIC_UINT32 diag_dpp_tm_pport_dscp_map_table_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 port, + ZXIC_UINT32 dscp_id); +ZXIC_UINT32 diag_dpp_tm_pport_up_map_table_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 port, + ZXIC_UINT32 up_id, + ZXIC_UINT32 tc_id); +ZXIC_UINT32 diag_dpp_tm_pport_up_map_table_del(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 port, + ZXIC_UINT32 up_id); +ZXIC_UINT32 diag_dpp_tm_pport_up_map_table_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 port, + ZXIC_UINT32 up_id); +ZXIC_UINT32 diag_dpp_vport_vhca_id_add(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 vhca_id); +ZXIC_UINT32 diag_dpp_vport_vhca_id_del(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 vhca_id); +ZXIC_UINT32 diag_dpp_vport_vhca_id_table_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 vhca_id); +ZXIC_UINT32 diag_dpp_vport_reset(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_vlan_filter_init(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_add_vlan_filter(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT16 vlan_id); +ZXIC_UINT32 diag_dpp_del_vlan_filter(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT16 vlan_id); +ZXIC_UINT32 diag_dpp_vlan_filter_table_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 vlan_group_id); +ZXIC_VOID diag_dpp_fd_cfg_pre1(ZXIC_UINT32 smac, ZXIC_UINT32 dmac, + ZXIC_UINT32 sip, ZXIC_UINT32 dip, + ZXIC_UINT32 sport, ZXIC_UINT32 dport); +ZXIC_VOID diag_dpp_fd_cfg_pre2(ZXIC_UINT32 ethertype, ZXIC_UINT32 cvlan_pri, + ZXIC_UINT32 vlan, ZXIC_UINT32 vxlan_vni, + ZXIC_UINT32 vqm_vfid); +ZXIC_VOID diag_dpp_fd_cfg_pre3(ZXIC_UINT32 action_index, + ZXIC_UINT32 action_index2, ZXIC_UINT32 count_id, + ZXIC_UINT32 hash_alg, + ZXIC_UINT32 rss_hash_factor); +ZXIC_VOID diag_dpp_fd_cfg_pre4(ZXIC_UINT32 uplink_fd_id, ZXIC_UINT32 v_qid); +ZXIC_UINT32 diag_dpp_fd_cfg_add(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_fd_cfg_del(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 index); +ZXIC_UINT32 diag_dpp_fd_cfg_get(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 index); +ZXIC_UINT32 diag_dpp_fd_cfg_search(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 index); +ZXIC_UINT32 diag_dpp_fd_acl_index_req(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_fd_acl_index_rel(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 index); +ZXIC_UINT32 diag_dpp_fd_acl_all_delete(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +ZXIC_VOID diag_dpp_acl_glb_data_prt(ZXIC_VOID); +DPP_STATUS diag_dpp_dtb_stat_ppu_cnt_clr(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 rd_mode, + ZXIC_UINT32 counter_id, + ZXIC_UINT32 num); +DPP_STATUS diag_dpp_fd_acl_stat_clear(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +DPP_STATUS diag_dpp_se_eram_res_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +DPP_STATUS diag_dpp_se_hash_res_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +DPP_STATUS diag_dpp_se_acl_res_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +DPP_STATUS diag_dpp_se_lpm_res_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +DPP_STATUS diag_dpp_se_ddr_res_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +DPP_STATUS diag_dpp_se_stat_res_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +ZXIC_VOID diag_dpp_eram_data_stub(ZXIC_UINT32 data0, ZXIC_UINT32 data1, + ZXIC_UINT32 data2, ZXIC_UINT32 data3); +DPP_STATUS diag_dpp_eram_entry_insert(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 index); +DPP_STATUS diag_dpp_eram_entry_delete(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 index); +DPP_STATUS diag_dpp_eram_entry_get(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 index); +DPP_STATUS diag_dpp_stat_item_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT16 stat_item_no); +DPP_STATUS diag_dpp_stat_item_prt_all(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +DPP_STATUS diag_dpp_stat_item_cnt_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 stat_item_no, + ZXIC_UINT32 index, ZXIC_UINT32 rd_mode); +ZXIC_UINT32 diag_dpp_glb_cfg_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 glb_cfg_data_0, + ZXIC_UINT32 glb_cfg_data_1, + ZXIC_UINT32 glb_cfg_data_2, + ZXIC_UINT32 glb_cfg_data_3); +ZXIC_UINT32 diag_dpp_glb_cfg_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_pkt_capture_enable(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXDH_PKT_CAP_POINT capture_pkt_flag); +ZXIC_UINT32 diag_dpp_pkt_capture_disable(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXDH_PKT_CAP_POINT capture_pkt_flag); +ZXIC_UINT32 diag_dpp_pkt_capture_disable_all(ZXIC_UINT16 slot, + ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_pkt_capture_enable_status_get(ZXIC_UINT16 slot, + ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_pkt_capture_rule_index_to_tcam_index( + ZXIC_UINT32 rule_index, ZXDH_PKT_CAP_MODE rule_mode, + ZXDH_PKT_CAP_POINT capture_pkt_flag); +ZXIC_UINT32 +diag_dpp_pkt_capture_tcam_index_to_rule_index(ZXIC_UINT32 tcam_index); +ZXIC_UINT32 diag_dpp_pkt_capture_item_l3_set( + ZXIC_UINT32 sip_0, ZXIC_UINT32 sip_1, ZXIC_UINT32 sip_2, + ZXIC_UINT32 sip_3, ZXIC_UINT32 dip_0, ZXIC_UINT32 dip_1, + ZXIC_UINT32 dip_2, ZXIC_UINT32 dip_3, ZXIC_UINT8 protocol); +ZXIC_UINT32 diag_dpp_pkt_capture_item_l2_set(ZXIC_UINT16 dmac_0, + ZXIC_UINT32 dmac_1, + ZXIC_UINT16 smac_0, + ZXIC_UINT32 smac_1, + ZXIC_UINT16 ethtype); +ZXIC_UINT32 diag_dpp_pkt_capture_item_l4_set(ZXIC_UINT16 dport, + ZXIC_UINT16 sport, ZXIC_UINT32 qp); +ZXIC_UINT32 diag_dpp_pkt_capture_item_kw_set(ZXIC_UINT32 kw_0, ZXIC_UINT32 kw_1, + ZXIC_UINT32 kw_2, ZXIC_UINT32 kw_3, + ZXIC_UINT16 kw_off, + ZXIC_UINT8 kw_len); +ZXIC_UINT32 diag_dpp_pkt_capture_item_insert( + ZXIC_UINT16 slot, ZXIC_UINT16 vport, ZXIC_UINT32 tcam_index, + ZXIC_UINT16 rule_config, ZXIC_UINT8 capture_pkt_flag, + ZXIC_UINT8 panel_id, ZXIC_UINT16 vqm_vfid, ZXIC_UINT16 vhca_id); +ZXIC_UINT32 diag_dpp_pkt_capture_item_delete(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 tcam_index); +ZXIC_UINT32 diag_dpp_pkt_capture_table_dump(ZXIC_UINT16 slot, + ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_pkt_capture_table_flush(ZXIC_UINT16 slot, + ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_pkt_capture_speed_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 speed); +ZXIC_UINT32 diag_dpp_pkt_capture_speed_get(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_mcode_feature_get(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 index); +ZXIC_UINT32 diag_dpp_pktrx_mcode_glb_cfg_write(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 start_bit_no, + ZXIC_UINT32 end_bit_no, + ZXIC_UINT32 glb_cfg_data_1); +ZXIC_UINT32 diag_dpp_l2d_psn_cfg_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 psn_cfg); +ZXIC_UINT32 diag_dpp_l2d_psn_cfg_get(ZXIC_UINT16 slot, ZXIC_UINT16 vport); +ZXIC_UINT32 diag_dpp_dtb_dump_test(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 num, ZXIC_UINT32 flag); +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_fd.h b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_fd.h new file mode 100644 index 000000000000..3bcc12874ccd --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_fd.h @@ -0,0 +1,26 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_tbl_fd.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2024/09/20 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef DPP_TBL_IPSEC_H +#define DPP_TBL_IPSEC_H + +#include "zxic_common.h" +#include "dpp_type_api.h" + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_ipsec.h b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_ipsec.h new file mode 100644 index 000000000000..b08438e21848 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_ipsec.h @@ -0,0 +1,26 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_tbl_ipsec.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2014/01/27 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef DPP_TBL_IPSEC_H +#define DPP_TBL_IPSEC_H + +#include "zxic_common.h" +#include "dpp_type_api.h" + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_lag.h b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_lag.h new file mode 100644 index 000000000000..206a4639b1c3 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_lag.h @@ -0,0 +1,29 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_tbl_lag.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2014/01/27 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef DPP_TBL_LAG_H +#define DPP_TBL_LAG_H + +#include "zxic_common.h" +#include "dpp_type_api.h" + +#define LAG_LACP_MODE (2) +#define LAG_STANDBY_MODE (1) + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_mac.h b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_mac.h new file mode 100644 index 000000000000..1eeea0a24544 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_mac.h @@ -0,0 +1,26 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_tbl_mac.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2014/01/27 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef DPP_TBL_MAC_H +#define DPP_TBL_MAC_H + +#include "zxic_common.h" +#include "dpp_type_api.h" + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_mc.h b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_mc.h new file mode 100644 index 000000000000..c5290fe9f5f7 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_mc.h @@ -0,0 +1,41 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_tbl_mc.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2014/01/27 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef DPP_TBL_MC_H +#define DPP_TBL_MC_H + +#include "zxic_common.h" +#include "dpp_type_api.h" + +#define MC_TABLE_SIZE (1028) +#define MC_GROUP_NUM (4) +#define MC_MEMBER_NUM_IN_GROUP (64) + +typedef struct dpp_vport_mc_info_t { + ZXIC_UINT32 is_valid; + ZXIC_UINT8 mac[6]; + ZXIC_UINT32 mc_pf_enable; + ZXIC_UINT64 mc_bitmap[MC_GROUP_NUM]; +} DPP_VPORT_MC_INFO_T; + +typedef struct dpp_vport_mc_table_t { + DPP_VPORT_MC_INFO_T *mc_info; +} DPP_VPORT_MC_TABLE_T; + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_pkt_cap.h b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_pkt_cap.h new file mode 100644 index 000000000000..dfa4fc27d962 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_pkt_cap.h @@ -0,0 +1,251 @@ +#ifndef _DPP_PKT_CAP_TBL_H_ +#define _DPP_PKT_CAP_TBL_H_ + +#include "zxic_common.h" +#include "dpp_apt_se_api.h" +#include "dpp_drv_acl.h" + +#define DH_PKT_CAP_POINT_IN_MF_GLOBAL_OFFSET (18U) +#define DH_PKT_CAP_POINT_IN_MF_GLOBAL_LENGTH (6U) + +#define DH_PKT_CAP_POINT_NORMAL_RULE_NUM (10U) +#define DH_PKT_CAP_POINT_KEY_WORD_RULE_NUM (2U) + +#define DH_PKT_CAP_TCAM_ITEM_NUM \ + (DH_PKT_CAP_POINT_MAX * DH_PKT_CAP_POINT_NORMAL_RULE_NUM + \ + DH_PKT_CAP_POINT_RDMA_RX * DH_PKT_CAP_POINT_KEY_WORD_RULE_NUM) + +#define DH_PKT_CAP_SPEED_MIN (0U) +#define DH_PKT_CAP_SPEED_MAX (300000U) +#define DH_PKT_CAP_SPEED_DEFAULT (10000U) + +typedef enum zxdh_pkt_cap_point { + DH_PKT_CAP_POINT_PANEL_RX = 0, + DH_PKT_CAP_POINT_PANEL_TX = 1, + DH_PKT_CAP_POINT_VQM_RX = 2, + DH_PKT_CAP_POINT_VQM_TX = 3, + DH_PKT_CAP_POINT_RDMA_RX = 4, + DH_PKT_CAP_POINT_RDMA_TX = 5, + DH_PKT_CAP_POINT_MAX = 6, +} ZXDH_PKT_CAP_POINT; + +typedef enum zxdh_pkt_cap_mode { + DH_PKT_CAP_MODE_NORMAL = 0, + DH_PKT_CAP_MODE_KEY_WORD = 1, + DH_PKT_CAP_MODE_MAX = 2, +} ZXDH_PKT_CAP_MODE; + +typedef struct zxdh_pkt_cap_enable_status { + ZXIC_UINT8 panel_rx_enable_status; + ZXIC_UINT8 panel_tx_enable_status; + ZXIC_UINT8 vqm_rx_enable_status; + ZXIC_UINT8 vqm_tx_enable_status; + ZXIC_UINT8 rdma_rx_enable_status; + ZXIC_UINT8 rdma_tx_enable_status; +} ZXDH_PKT_CAP_ENABLE_STATUS; + +typedef struct zxdh_pkt_cap_normal_configure { + ZXIC_UINT16 rsv : 6; + ZXIC_UINT16 sourceid : 1; + ZXIC_UINT16 dmac : 1; + ZXIC_UINT16 smac : 1; + ZXIC_UINT16 ethtype : 1; + ZXIC_UINT16 sip : 1; + ZXIC_UINT16 dip : 1; + ZXIC_UINT16 sport : 1; + ZXIC_UINT16 dport : 1; + ZXIC_UINT16 protocol : 1; + ZXIC_UINT16 qp : 1; +} ZXDH_PKT_CAP_NORMAL_CONFIG; + +typedef struct zxdh_pkt_cap_rule { + ZXIC_UINT16 dst_vqm_vfid; + ZXDH_PKT_CAP_NORMAL_CONFIG rule_config; + ZXIC_UINT32 tcam_index; + ZXDH_PKT_CAP_KEY pkt_cap_key; +} ZXDH_PKT_CAP_RULE; + +/***********************************************************/ +/** 抓包初始化函数 +* @param pf_info 镜像流上送PF的信息 +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 dpp_pkt_capture_init(DPP_PF_INFO_T *pf_info); + +/***********************************************************/ +/** 抓包退出函数 +* @param pf_info 镜像流上送PF的信息 +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 dpp_pkt_capture_uninit(DPP_PF_INFO_T *pf_info); + +/***********************************************************/ +/** 抓包点使能函数 +* @param pf_info 镜像流上送PF的信息 +* @param capture_pkt_flag 抓包点使能标志位(0:panel_rx, 1:panel_tx, 2:vqm_rx 3:vqm_tx 4:rdma_rx, 5:rdma_tx) +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 dpp_pkt_capture_enable(DPP_PF_INFO_T *pf_info, + ZXDH_PKT_CAP_POINT capture_pkt_flag); + +/***********************************************************/ +/** 抓包点去使能函数 +* @param pf_info 镜像流上送PF的信息 +* @param capture_pkt_flag 抓包点使能标志位(0:panel_rx, 1:panel_tx, 2:vqm_rx 3:vqm_tx 4:rdma_rx, 5:rdma_tx) +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 dpp_pkt_capture_disable(DPP_PF_INFO_T *pf_info, + ZXDH_PKT_CAP_POINT capture_pkt_flag); + +/***********************************************************/ +/** 抓包点全去使能函数 +* @param pf_info 镜像流上送PF的信息 +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 dpp_pkt_capture_disable_all(DPP_PF_INFO_T *pf_info); + +/***********************************************************/ +/** 抓包点使能状态获取函数 +* @param pf_info 镜像流上送PF的信息 +* @param enable_status 抓包点状态 +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 +dpp_pkt_capture_enable_status_get(DPP_PF_INFO_T *pf_info, + ZXDH_PKT_CAP_ENABLE_STATUS *enable_status); + +/***********************************************************/ +/** 将rule_index转换成tcam_index +* @param rule_index 规则索引 +* @param rule_mode 规则模式 +* @param capture_pkt_flag 抓包点使能标志位(0:panel_rx, 1:panel_tx, 2:vqm_rx 3:vqm_tx 4:rdma_rx, 5:rdma_tx) +* @param tcam_index tcam表索引 +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 dpp_pkt_capture_rule_index_to_tcam_index( + ZXIC_UINT32 rule_index, ZXDH_PKT_CAP_MODE rule_mode, + ZXDH_PKT_CAP_POINT capture_pkt_flag, ZXIC_UINT32 *tcam_index); + +/***********************************************************/ +/** 将tcam_index转换成rule_index +* @param tcam_index tcam表索引 +* @param rule_mode 规则模式 +* @param rule_index 规则索引 +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 +dpp_pkt_capture_tcam_index_to_rule_index(ZXIC_UINT32 tcam_index, + ZXDH_PKT_CAP_MODE *rule_mode, + ZXIC_UINT32 *rule_index); + +/***********************************************************/ +/** 插入表项 +* @param pf_info 镜像流上送PF的信息 +* @param rule 表项信息 +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 dpp_pkt_capture_item_insert(DPP_PF_INFO_T *pf_info, + ZXDH_PKT_CAP_RULE *rule); + +/***********************************************************/ +/** 删除表项 +* @param pf_info 镜像流上送PF的信息 +* @param tcam_index TCAM表索引 +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 dpp_pkt_capture_item_delete(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 tcam_index); + +/***********************************************************/ +/** 获取所有表项 +* @param pf_info 镜像流上送PF的信息 +* @param rule_array rule数组 +* @param entry_num 表项个数 +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 dpp_pkt_capture_table_dump(DPP_PF_INFO_T *pf_info, + ZXDH_PKT_CAP_RULE *rule_array, + ZXIC_UINT32 *entry_num); + +/***********************************************************/ +/** 删除所有表项 +* @param pf_info 镜像流上送PF的信息 +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 dpp_pkt_capture_table_flush(DPP_PF_INFO_T *pf_info); + +/***********************************************************/ +/** 设置镜像流速率 +* @param pf_info 镜像流上送PF的信息 +* @param speed_kbps 镜像流速率kbps +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 dpp_pkt_capture_speed_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 speed_kbps); + +/***********************************************************/ +/** 获取镜像流速率 +* @param pf_info 镜像流上送PF的信息 +* @param speed_kbps 镜像流速率kbps +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 dpp_pkt_capture_speed_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *speed_kbps); + +#endif // !_DPP_PKT_CAP_TBL_H_ \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_plcr.h b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_plcr.h new file mode 100644 index 000000000000..cd006fb2648e --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_plcr.h @@ -0,0 +1,28 @@ +#ifndef DPP_TBL_PLCR_H +#define DPP_TBL_PLCR_H + +#include "dpp_dev.h" +#include "dpp_tbl_comm.h" + +ZXIC_UINT32 dpp_vport_egress_meter_en_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 enable); +ZXIC_UINT32 dpp_vport_ingress_meter_en_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 enable); +ZXIC_UINT32 dpp_vport_egress_meter_mode_set( + DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 + mode); //0表示模式2: 通过vport映射flow_id;1表示模式1: 通过队列映射flow_id +ZXIC_UINT32 dpp_vport_ingress_meter_mode_set( + DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 + mode); //0表示模式2: 通过vport映射flow_id;1表示模式1: 通过队列映射flow_id +ZXIC_UINT32 dpp_vport_egress_meter_en_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *enable); +ZXIC_UINT32 dpp_vport_ingress_meter_en_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *enable); +ZXIC_UINT32 dpp_vport_egress_meter_mode_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *mode); +ZXIC_UINT32 dpp_vport_ingress_meter_mode_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *mode); + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_port.h b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_port.h new file mode 100644 index 000000000000..af2126055221 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_port.h @@ -0,0 +1,26 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_tbl_port.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2014/01/27 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef DPP_TBL_PORT_H +#define DPP_TBL_PORT_H + +#include "zxic_common.h" +#include "dpp_type_api.h" + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_promisc.h b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_promisc.h new file mode 100644 index 000000000000..8e0be17c3905 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_promisc.h @@ -0,0 +1,38 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_tbl_promisc.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2014/01/27 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef DPP_TBL_PROMISC_H +#define DPP_TBL_PROMISC_H + +#include "zxic_common.h" +#include "dpp_type_api.h" + +#define PROMISC_GROUP_NUM (4) +#define PROMISC_MEMBER_NUM_IN_GROUP (64) + +typedef struct dpp_vport_promisc_info_t { + ZXIC_UINT32 pf_enable; + ZXIC_UINT64 bitmap[PROMISC_GROUP_NUM]; +} DPP_VPORT_PROMISC_INFO_T; + +typedef struct dpp_vport_uc_promisc_table_t { + DPP_VPORT_PROMISC_INFO_T promisc_info; +} DPP_VPORT_PROMISC_TABLE_T; + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_ptp.h b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_ptp.h new file mode 100644 index 000000000000..eca6ec5e5279 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_ptp.h @@ -0,0 +1,26 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_tbl_port.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2014/01/27 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef DPP_TBL_PTP_H +#define DPP_TBL_PTP_H + +#include "zxic_common.h" +#include "dpp_type_api.h" + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_qid.h b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_qid.h new file mode 100644 index 000000000000..fdb2832f898c --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_qid.h @@ -0,0 +1,28 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_tbl_qid.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2014/01/27 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef DPP_TBL_QID_H +#define DPP_TBL_QID_H + +#include "zxic_common.h" +#include "dpp_type_api.h" + +#define RSS_TO_VQID_GROUP_NUM (32) + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_rdma.h b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_rdma.h new file mode 100644 index 000000000000..7a13344f6e5c --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_rdma.h @@ -0,0 +1,33 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_tbl_mac.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2014/01/27 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef DPP_TBL_MAC_H +#define DPP_TBL_MAC_H + +#include "zxic_common.h" +#include "dpp_type_api.h" +#include "dpp_dev.h" + +ZXIC_UINT32 dpp_add_rdma_trans_item(DPP_PF_INFO_T *pf_info, + ZXIC_CONST ZXIC_VOID *mac, + ZXIC_CONST ZXIC_UINT16 vhcaId); +ZXIC_UINT32 dpp_del_rdma_trans_item(DPP_PF_INFO_T *pf_info, + ZXIC_CONST ZXIC_VOID *mac); + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_stat.h b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_stat.h new file mode 100644 index 000000000000..a124fc21bc70 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_stat.h @@ -0,0 +1,193 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_tbl_stat.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2014/01/27 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef DPP_TBL_STAT_H +#define DPP_TBL_STAT_H + +#include "zxic_common.h" +#include "dpp_type_api.h" + +typedef enum { + DPP_STAT_ITEM_RX_TOTAL_BYTES = 0, + DPP_STAT_ITEM_TX_TOTAL_BYTES = 1, + DPP_STAT_ITEM_RX_UNICAST_PKTS = 2, + DPP_STAT_ITEM_RX_MULTICAST_PKTS = 3, + DPP_STAT_ITEM_RX_BROADCAST_PKTS = 4, + DPP_STAT_ITEM_TX_UNICAST_PKTS = 5, + DPP_STAT_ITEM_TX_MULTICAST_PKTS = 6, + DPP_STAT_ITEM_TX_BROADCAST_PKTS = 7, + DPP_STAT_ITEM_RX_UNDERSIZE_PKTS = 8, + DPP_STAT_ITEM_RX_64_OCTERS_PKTS = 9, + DPP_STAT_ITEM_RX_65_127_OCTERS_PKTS = 10, + DPP_STAT_ITEM_RX_128_255_OCTERS_PKTS = 11, + DPP_STAT_ITEM_RX_256_511_OCTERS_PKTS = 12, + DPP_STAT_ITEM_RX_512_1023_OCTERS_PKTS = 13, + DPP_STAT_ITEM_RX_1024_1522_OCTERS_PKTS = 14, + DPP_STAT_ITEM_RX_1023_9022_OCTERS_PKTS = 15, + DPP_STAT_ITEM_TX_64_OCTERS_PKTS = 16, + DPP_STAT_ITEM_TX_65_127_OCTERS_PKTS = 17, + DPP_STAT_ITEM_TX_128_255_OCTERS_PKTS = 18, + DPP_STAT_ITEM_TX_256_511_OCTERS_PKTS = 19, + DPP_STAT_ITEM_TX_512_1023_OCTERS_PKTS = 20, + DPP_STAT_ITEM_TX_1024_1522_OCTERS_PKTS = 21, + DPP_STAT_ITEM_TX_1523_9022_OCTERS_PKTS = 22, + DPP_STAT_ITEM_RX_NO_ERROR_BYTES = 23, + DPP_STAT_ITEM_RX_ERROR_UNDERSIZE_PKTS = 24, + DPP_STAT_ITEM_RX_ERROR_JABBER_PKTS = 25, + DPP_STAT_ITEM_RX_PF_MULTICAST_PKTS = 26, + DPP_STAT_ITEM_RX_VF_BROADCAST_PKTS = 27, + DPP_STAT_ITEM_RX_1588_PKTS = 28, + DPP_STAT_ITEM_TX_1588_PKTS = 29, + DPP_STAT_ITEM_1588_DROP_PKTS = 30, + DPP_STAT_ITEM_1588_NP_DRS_ENCRYPT_PKTS = 31, + DPP_STAT_ITEM_1588_DRS_NP_ENCRYPT_PKTS = 32, + DPP_STAT_ITEM_SPOOF_DROP_PKTS = 33, + DPP_STAT_ITEM_NP_RX_VPORT_STATE_DROP_STAT = 34, + DPP_STAT_ITEM_RDMA_TX_STAT = 37, + DPP_STAT_ITEM_RDMA_RX_STAT = 38, + DPP_STAT_ITEM_NIC_OUT_RATE_LIMIT_DROP_STAT = 39, + DPP_STAT_ITEM_NIC_IN_RATE_LIMIT_DROP_STAT = 40, + DPP_STAT_ITEM_TX_MTU_DROP_STAT = 41, + DPP_STAT_ITEM_RX_MTU_DROP_STAT = 42, + DPP_STAT_ITEM_MCODE_PPU_PKTS = 43, + DPP_STAT_ITEM_NP_PORT_UNICAST_RX_STAT = 44, + DPP_STAT_ITEM_NP_PORT_UNICAST_TX_STAT = 45, + DPP_STAT_ITEM_NP_PORT_MULTICAST_RX_STAT = 46, + DPP_STAT_ITEM_NP_PORT_MULTICAST_TX_STAT = 47, + DPP_STAT_ITEM_NP_PORT_BROADCAST_RX_STAT = 48, + DPP_STAT_ITEM_NP_PORT_BROADCAST_TX_STAT = 49, + DPP_STAT_ITEM_FD_FLOW_STAT = 50, + DPP_STAT_ITEM_HPCC_STAT = 51, + DPP_STAT_ITEM_ASN_PHYPORT_RX_PKTS = 52, + DPP_STAT_ITEM_PSN_PHYPORT_TX_PKTS = 53, + DPP_STAT_ITEM_PSN_PHYPORT_RX_PKTS = 54, + DPP_STAT_ITEM_PSN_ACK_PHYPORT_TX_PKTS = 55, + DPP_STAT_ITEM_PSN_ACK_PHYPORT_RX_PKTS = 56, + DPP_STAT_ITEM_UPPER = 255 +} DPP_STAT_ITEM_E; + +typedef struct { + ZXIC_UINT64 bytes; + ZXIC_UINT64 pkts; +} DPP_STAT_CNT128_T; + +typedef union dpp_stat_value_u { + DPP_STAT_CNT128_T stat_cnt_128; + ZXIC_UINT64 stat_cnt_64; +} DPP_STAT_VALUE_U; + +#define DPP_STAT_MC_PACKET_RX_CNT_ERAM_BAADDR ((ZXIC_UINT32)(0x1A)) +#define DPP_STAT_MC_PACKET_RX_CNT_ERAM_DEPTH ((ZXIC_UINT32)(40)) + +#define DPP_STAT_BC_PACKET_RX_CNT_ERAM_BAADDR ((ZXIC_UINT32)(0x42)) +#define DPP_STAT_BC_PACKET_RX_CNT_ERAM_DEPTH ((ZXIC_UINT32)(40 * 1024)) + +#define DPP_STAT_1588_PACKET_RX_CNT_ERAM_BAADDR ((ZXIC_UINT32)(0xA042)) +#define DPP_STAT_1588_PACKET_RX_CNT_ERAM_DEPTH ((ZXIC_UINT32)(0x800)) + +#define DPP_STAT_1588_PACKET_TX_CNT_ERAM_BAADDR ((ZXIC_UINT32)(0xA842)) +#define DPP_STAT_1588_PACKET_TX_CNT_ERAM_DEPTH ((ZXIC_UINT32)(0x800)) + +#define DPP_STAT_1588_PACKET_DROP_CNT_ERAM_BAADDR ((ZXIC_UINT32)(0xB042)) +#define DPP_STAT_1588_PACKET_DROP_CNT_ERAM_DEPTH ((ZXIC_UINT32)(0x800)) + +#define DPP_STAT_1588_ENC_PACKET_RX_CNT_ERAM_BAADDR ((ZXIC_UINT32)(0xC042)) +#define DPP_STAT_1588_ENC_PACKET_RX_CNT_ERAM_DEPTH ((ZXIC_UINT32)(0x800)) + +#define DPP_STAT_1588_ENC_PACKET_TX_CNT_ERAM_BAADDR ((ZXIC_UINT32)(0xB842)) +#define DPP_STAT_1588_ENC_PACKET_TX_CNT_ERAM_DEPTH ((ZXIC_UINT32)(0x800)) + +#define DPP_STAT_SPOOF_PACKET_DROP_CNT_ERAM_BAADDR ((ZXIC_UINT32)(0xC842)) +#define DPP_STAT_SPOOF_PACKET_DROP_CNT_ERAM_DEPTH ((ZXIC_UINT32)(0x40)) + +#define DPP_STAT_PORT_RDMA_PACKET_TX_CNT_ERAM_BAADDR \ + ((ZXIC_UINT32)(0x6C81)) // 128bit +#define DPP_STAT_PORT_RDMA_PACKET_TX_CNT_ERAM_DEPTH ((ZXIC_UINT32)(0x400)) + +#define DPP_STAT_PORT_RDMA_PACKET_RX_CNT_ERAM_BAADDR \ + ((ZXIC_UINT32)(0x7081)) // 128bit +#define DPP_STAT_PORT_RDMA_PACKET_RX_CNT_ERAM_DEPTH ((ZXIC_UINT32)(0x400)) + +#define DPP_STAT_PLCR_PACKET_DROP_TX_CNT_ERAM_BAADDR \ + ((ZXIC_UINT32)(0x7481)) // 128bit +#define DPP_STAT_PLCR_PACKET_DROP_TX_CNT_ERAM_DEPTH ((ZXIC_UINT32)(0x800)) + +#define DPP_STAT_PLCR_PACKET_DROP_RX_CNT_ERAM_BAADDR \ + ((ZXIC_UINT32)(0x7C81)) // 128bit +#define DPP_STAT_PLCR_PACKET_DROP_RX_CNT_ERAM_DEPTH ((ZXIC_UINT32)(0x800)) + +#define DPP_STAT_MTU_PACKET_DROP_TX_CNT_ERAM_BAADDR \ + ((ZXIC_UINT32)(0x8481)) // 128bit +#define DPP_STAT_MTU_PACKET_DROP_TX_CNT_ERAM_DEPTH ((ZXIC_UINT32)(0x500)) + +#define DPP_STAT_MTU_PACKET_DROP_RX_CNT_ERAM_BAADDR \ + ((ZXIC_UINT32)(0x8981)) // 128bit +#define DPP_STAT_MTU_PACKET_DROP_RX_CNT_ERAM_DEPTH ((ZXIC_UINT32)(0x500)) + +#define DPP_STAT_MCODE_PACKET_CNT_ERAM_BAADDR ((ZXIC_UINT32)(0x11D02)) // 64bit +#define DPP_STAT_MCODE_PACKET_CNT_ERAM_DEPTH ((ZXIC_UINT32)(0x280)) + +#define DPP_STAT_PORT_UC_PACKET_RX_CNT_ERAM_BAADDR \ + ((ZXIC_UINT32)(0x8FC1)) // 128bit +#define DPP_STAT_PORT_UC_PACKET_RX_CNT_ERAM_DEPTH ((ZXIC_UINT32)(0x500)) + +#define DPP_STAT_PORT_UC_PACKET_TX_CNT_ERAM_BAADDR \ + ((ZXIC_UINT32)(0x94C1)) // 128bit +#define DPP_STAT_PORT_UC_PACKET_TX_CNT_ERAM_DEPTH ((ZXIC_UINT32)(0x500)) + +#define DPP_STAT_PORT_MC_PACKET_RX_CNT_ERAM_BAADDR \ + ((ZXIC_UINT32)(0x99C1)) // 128bit +#define DPP_STAT_PORT_MC_PACKET_RX_CNT_ERAM_DEPTH ((ZXIC_UINT32)(0x500)) + +#define DPP_STAT_PORT_MC_PACKET_TX_CNT_ERAM_BAADDR \ + ((ZXIC_UINT32)(0x9EC1)) // 128bit +#define DPP_STAT_PORT_MC_PACKET_TX_CNT_ERAM_DEPTH ((ZXIC_UINT32)(0x500)) + +#define DPP_STAT_PORT_BC_PACKET_RX_CNT_ERAM_BAADDR \ + ((ZXIC_UINT32)(0xA3C1)) // 128bit +#define DPP_STAT_PORT_BC_PACKET_RX_CNT_ERAM_DEPTH ((ZXIC_UINT32)(0x500)) + +#define DPP_STAT_PORT_BC_PACKET_TX_CNT_ERAM_BAADDR \ + ((ZXIC_UINT32)(0xA8C1)) // 128bit +#define DPP_STAT_PORT_BC_PACKET_TX_CNT_ERAM_DEPTH ((ZXIC_UINT32)(0x500)) + +#define DPP_STAT_FD_ACL_CNT_ERAM_BAADDR ((ZXIC_UINT32)(0xADC1)) // 128bit +#define DPP_STAT_FD_ACL_CNT_ERAM_DEPTH ((ZXIC_UINT32)(0x800)) + +#define DPP_STAT_ASN_PHYPORT_RX_CNT_ERAM_BAADDR \ + ((ZXIC_UINT32)(0x17382)) // 64bit +#define DPP_STAT_ASN_PHYPORT_RX_CNT_ERAM_DEPTH ((ZXIC_UINT32)(10)) + +#define DPP_STAT_PSN_PHYPORT_TX_CNT_ERAM_BAADDR \ + ((ZXIC_UINT32)(0x1738C)) // 64bit +#define DPP_STAT_PSN_PHYPORT_TX_CNT_ERAM_DEPTH ((ZXIC_UINT32)(10)) + +#define DPP_STAT_PSN_PHYPORT_RX_CNT_ERAM_BAADDR \ + ((ZXIC_UINT32)(0x17396)) // 64bit +#define DPP_STAT_PSN_PHYPORT_RX_CNT_ERAM_DEPTH ((ZXIC_UINT32)(10)) + +#define DPP_STAT_PSN_ACK_PHYPORT_TX_CNT_ERAM_BAADDR \ + ((ZXIC_UINT32)(0x173A0)) // 64bit +#define DPP_STAT_PSN_ACK_PHYPORT_TX_CNT_ERAM_DEPTH ((ZXIC_UINT32)(10)) + +#define DPP_STAT_PSN_ACK_PHYPORT_RX_CNT_ERAM_BAADDR \ + ((ZXIC_UINT32)(0x173AA)) // 64bit +#define DPP_STAT_PSN_ACK_PHYPORT_RX_CNT_ERAM_DEPTH ((ZXIC_UINT32)(10)) + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_tm.h b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_tm.h new file mode 100644 index 000000000000..8e4f191d789e --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_tm.h @@ -0,0 +1,60 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_tbl_tm.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2014/01/27 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef DPP_TBL_TM_H +#define DPP_TBL_TM_H + +#include "zxic_common.h" +#include "dpp_type_api.h" +#include "dpp_dev.h" + +#define TM_BASE_QUEUE_VALID (0x1000) +#define TRUST_MODE_VALID (0x10) +#define UP_VALID (0x10) +#define TC_VALID (0x10) +#define TM_SWITCH_ON (1) +#define TM_SWITCH_OFF (0) + +ZXIC_UINT32 dpp_tm_flowid_pport_table_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 port, ZXIC_UINT32 flow_id); +ZXIC_UINT32 dpp_tm_flowid_pport_table_del(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 port); +ZXIC_UINT32 dpp_tm_pport_trust_mode_table_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 port, + ZXIC_UINT32 mode); +ZXIC_UINT32 dpp_tm_pport_trust_mode_table_del(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 port); +ZXIC_UINT32 dpp_tm_pport_dscp_map_table_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 port, + ZXIC_UINT32 dscp_id, + ZXIC_UINT32 up_id); +ZXIC_UINT32 dpp_tm_pport_dscp_map_table_del(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 port, + ZXIC_UINT32 dscp_id); +ZXIC_UINT32 dpp_tm_pport_up_map_table_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 port, ZXIC_UINT32 up_id, + ZXIC_UINT32 tc_id); +ZXIC_UINT32 dpp_tm_pport_up_map_table_del(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 port, ZXIC_UINT32 up_id); +ZXIC_UINT32 dpp_tm_pport_mcode_switch_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 port, ZXIC_UINT32 mode); +ZXIC_UINT32 dpp_tm_pport_mcode_switch_del(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 port); + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_uplink.h b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_uplink.h new file mode 100644 index 000000000000..c76f7882870e --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_uplink.h @@ -0,0 +1,26 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_tbl_uplink.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2014/01/27 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef DPP_TBL_UPLINK_H +#define DPP_TBL_UPLINK_H + +#include "zxic_common.h" +#include "dpp_type_api.h" + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_vlan.h b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_vlan.h new file mode 100644 index 000000000000..cee2dc93dcf9 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_vlan.h @@ -0,0 +1,29 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_tbl_vlan.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2014/01/27 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef DPP_TBL_VLAN_H +#define DPP_TBL_VLAN_H + +#include "zxic_common.h" +#include "dpp_type_api.h" + +#define VLAN_GROUP_NUM (35) +#define VLAN_ID_NUM_IN_GROUP (120) + +#endif diff --git a/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_vqm.h b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_vqm.h new file mode 100644 index 000000000000..12052deb30be --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_vqm.h @@ -0,0 +1,26 @@ +/************************************************************** +* 版权所有 (C)2013-2015, 深圳市中兴通讯股份有限公司 +* 文件名称 : dpp_tbl_port.h +* 文件标识 : +* 内容摘要 : +* 其它说明 : +* 当前版本 : +* 作 者 : +* 完成日期 : 2014/01/27 +* DEPARTMENT: ASIC_FPGA_R&D_Dept +* MANUAL_PERCENT: 100% + +* 修改记录1: +* 修改日期: +* 版 本 号: +* 修 改 人: +* 修改内容: +***************************************************************/ + +#ifndef DPP_TBL_VQM_H +#define DPP_TBL_VQM_H + +#include "zxic_common.h" +#include "dpp_type_api.h" + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/table/source/Kbuild.include b/drivers/net/ethernet/dinghai/en_np/table/source/Kbuild.include new file mode 100644 index 000000000000..680d62419518 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/source/Kbuild.include @@ -0,0 +1,2 @@ +cur_dir := en_np/table/source/ +src_files += $(addprefix $(cur_dir),$(notdir $(wildcard $(dinghai_root)/$(cur_dir)*.c))) \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_bc.c b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_bc.c new file mode 100644 index 000000000000..3fa7fdd67617 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_bc.c @@ -0,0 +1,227 @@ +#include "dpp_drv_init.h" +#include "dpp_drv_acl.h" +#include "dpp_drv_hash.h" +#include "dpp_drv_eram.h" +#include "dpp_drv_sdt.h" +#include "dpp_dev.h" +#include "dpp_dtb.h" +#include "dpp_hash.h" +#include "dpp_dtb_table.h" +#include "dpp_dtb_table_api.h" +#include "dpp_tbl_api.h" +#include "dpp_tbl_comm.h" +#include "dpp_tbl_bc.h" + +ZXIC_UINT32 dpp_vport_bc_info_add(DPP_PF_INFO_T *pf_info) +{ + ZXIC_UINT32 group_id = 0; + ZXIC_UINT32 vfunc_num = 0; + ZXIC_UINT32 rc = DPP_OK; + + DPP_VPORT_BC_TABLE_T *bc_table = NULL; + + ZXIC_COMM_CHECK_POINT(pf_info); + + rc = dpp_vport_bc_table_get(pf_info, &bc_table); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_bc_table_get"); + ZXIC_COMM_CHECK_POINT(bc_table); + + vfunc_num = VFUNC_NUM(pf_info->vport); + ZXIC_COMM_CHECK_INDEX(vfunc_num, 0, + (BC_GROUP_NUM * BC_MEMBER_NUM_IN_GROUP) - 1); + + group_id = vfunc_num / BC_MEMBER_NUM_IN_GROUP; + ZXIC_COMM_CHECK_INDEX(group_id, 0, BC_GROUP_NUM - 1); + + bc_table->bc_info.bc_bitmap[group_id] |= + ((ZXIC_UINT64)(1) << (BC_MEMBER_NUM_IN_GROUP - 1 - + (vfunc_num % BC_MEMBER_NUM_IN_GROUP))); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_vport_bc_info_del(DPP_PF_INFO_T *pf_info) +{ + ZXIC_UINT32 group_id = 0; + ZXIC_UINT32 vfunc_num = 0; + ZXIC_UINT32 rc = DPP_OK; + + DPP_VPORT_BC_TABLE_T *bc_table = NULL; + + ZXIC_COMM_CHECK_POINT(pf_info); + + rc = dpp_vport_bc_table_get(pf_info, &bc_table); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_bc_table_get"); + ZXIC_COMM_CHECK_POINT(bc_table); + + vfunc_num = VFUNC_NUM(pf_info->vport); + ZXIC_COMM_CHECK_INDEX(vfunc_num, 0, + (BC_GROUP_NUM * BC_MEMBER_NUM_IN_GROUP) - 1); + + group_id = vfunc_num / BC_MEMBER_NUM_IN_GROUP; + ZXIC_COMM_CHECK_INDEX(group_id, 0, BC_GROUP_NUM - 1); + + bc_table->bc_info.bc_bitmap[group_id] &= + ~((ZXIC_UINT64)(1) << (BC_MEMBER_NUM_IN_GROUP - 1 - + (vfunc_num % BC_MEMBER_NUM_IN_GROUP))); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_vport_bc_info_clear_all(DPP_PF_INFO_T *pf_info) +{ + ZXIC_UINT32 group_id = 0; + ZXIC_UINT32 rc = DPP_OK; + + DPP_VPORT_BC_TABLE_T *bc_table = NULL; + + ZXIC_COMM_CHECK_POINT(pf_info); + + rc = dpp_vport_bc_table_get(pf_info, &bc_table); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_bc_table_get"); + ZXIC_COMM_CHECK_POINT(bc_table); + + for (group_id = 0; group_id < BC_GROUP_NUM; group_id++) { + bc_table->bc_info.bc_bitmap[group_id] = 0; + } + + return DPP_OK; +} + +ZXIC_UINT32 dpp_vport_bc_table_insert(DPP_PF_INFO_T *pf_info) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 group_id = 0; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_BC_TABLE; + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_BC_T bc_entry = { 0 }; + DPP_VPORT_BC_TABLE_T *bc_table = NULL; + + ZXIC_COMM_CHECK_POINT(pf_info); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_bc_table_get(pf_info, &bc_table); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_bc_table_get"); + ZXIC_COMM_CHECK_POINT(bc_table); + + for (group_id = 0; group_id < BC_GROUP_NUM; group_id++) { + index = (((OWNER_PF_VQM_VFID(pf_info->vport) - + PF_VQM_VFID_OFFSET) + << 2) | + group_id); + bc_entry.hit_flag = 1; + bc_entry.bc_bitmap = bc_table->bc_info.bc_bitmap[group_id]; + + rc = dpp_apt_dtb_eram_insert(&dev, queue, sdt_no, index, + &bc_entry); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_dtb_eram_insert"); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sdt_no: %u group_id: %u index: 0x%02x.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, + group_id, index); + ZXIC_COMM_TRACE_NOTICE( + "[%s] bc_bitmap: %02x %02x %02x %02x %02x %02x %02x %02x.\n", + __FUNCTION__, + *((ZXIC_UINT8 *)(&bc_entry.bc_bitmap) + 7), + *((ZXIC_UINT8 *)(&bc_entry.bc_bitmap) + 6), + *((ZXIC_UINT8 *)(&bc_entry.bc_bitmap) + 5), + *((ZXIC_UINT8 *)(&bc_entry.bc_bitmap) + 4), + *((ZXIC_UINT8 *)(&bc_entry.bc_bitmap) + 3), + *((ZXIC_UINT8 *)(&bc_entry.bc_bitmap) + 2), + *((ZXIC_UINT8 *)(&bc_entry.bc_bitmap) + 1), + *((ZXIC_UINT8 *)(&bc_entry.bc_bitmap) + 0)); + } + + return DPP_OK; +} + +ZXIC_UINT32 dpp_vport_bond_pf(DPP_PF_INFO_T *pf_info) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 sdt_no = ZXDH_SDT_BC_TABLE; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + if (IS_PF(pf_info->vport)) { + rc = dpp_vport_bc_info_clear_all(pf_info); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_vport_bc_info_clear_all", + DEV_PCIE_LOCK(&dev)); + } else { + rc = dpp_vport_bc_info_add(pf_info); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_vport_bc_info_add", + DEV_PCIE_LOCK(&dev)); + } + + rc = dpp_vport_bc_table_insert(pf_info); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_vport_bc_table_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x success.\n", __FUNCTION__, + pf_info->slot, pf_info->vport); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_bond_pf); + +ZXIC_UINT32 dpp_vport_unbond_pf(DPP_PF_INFO_T *pf_info) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 sdt_no = ZXDH_SDT_BC_TABLE; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_INDEX_EQUAL(IS_PF(pf_info->vport), 1); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_bc_info_del(pf_info); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_vport_bc_info_del", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_bc_table_insert(pf_info); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_vport_bc_table_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x success.\n", __FUNCTION__, + pf_info->slot, pf_info->vport); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_unbond_pf); diff --git a/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_cfg.c b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_cfg.c new file mode 100644 index 000000000000..7e327b7660e7 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_cfg.c @@ -0,0 +1,256 @@ +#include "dpp_tbl_cfg.h" +#include "dpp_dev.h" +#include "dpp_pktrx_cfg.h" +#include "dpp_pktrx_api.h" +#include "dpp_agent_channel.h" + +ZXIC_UINT32 dpp_glb_cfg_set_0(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 glb_cfg_data_0) +{ + ZXIC_UINT32 rc = 0; + DPP_DEV_T dev = { 0 }; + ZXIC_COMM_CHECK_POINT(pf_info); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_pktrx_mcode_glb_cfg_set_0(&dev, glb_cfg_data_0); + ZXIC_COMM_CHECK_RC(rc, "dpp_pktrx_mcode_glb_cfg_set_0"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_glb_cfg_set_0); + +ZXIC_UINT32 dpp_glb_cfg_set_1(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 glb_cfg_data_1) +{ + ZXIC_UINT32 rc = 0; + DPP_DEV_T dev = { 0 }; + ZXIC_COMM_CHECK_POINT(pf_info); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_pktrx_mcode_glb_cfg_set_1(&dev, glb_cfg_data_1); + ZXIC_COMM_CHECK_RC(rc, "dpp_pktrx_mcode_glb_cfg_set_1"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_glb_cfg_set_1); + +ZXIC_UINT32 dpp_glb_cfg_set_2(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 glb_cfg_data_2) +{ + ZXIC_UINT32 rc = 0; + DPP_DEV_T dev = { 0 }; + ZXIC_COMM_CHECK_POINT(pf_info); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_pktrx_mcode_glb_cfg_set_2(&dev, glb_cfg_data_2); + ZXIC_COMM_CHECK_RC(rc, "dpp_pktrx_mcode_glb_cfg_set_2"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_glb_cfg_set_2); + +ZXIC_UINT32 dpp_glb_cfg_set_3(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 glb_cfg_data_3) +{ + ZXIC_UINT32 rc = 0; + DPP_DEV_T dev = { 0 }; + ZXIC_COMM_CHECK_POINT(pf_info); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_pktrx_mcode_glb_cfg_set_3(&dev, glb_cfg_data_3); + ZXIC_COMM_CHECK_RC(rc, "dpp_pktrx_mcode_glb_cfg_set_3"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_glb_cfg_set_3); + +ZXIC_UINT32 dpp_glb_cfg_get_0(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *p_glb_cfg_data_0) +{ + ZXIC_UINT32 rc = 0; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(p_glb_cfg_data_0); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_pktrx_mcode_glb_cfg_get_0(&dev, p_glb_cfg_data_0); + ZXIC_COMM_CHECK_RC(rc, "dpp_pktrx_mcode_glb_cfg_get_0"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_glb_cfg_get_0); + +ZXIC_UINT32 dpp_glb_cfg_get_1(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *p_glb_cfg_data_1) +{ + ZXIC_UINT32 rc = 0; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(p_glb_cfg_data_1); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_pktrx_mcode_glb_cfg_get_1(&dev, p_glb_cfg_data_1); + ZXIC_COMM_CHECK_RC(rc, "dpp_pktrx_mcode_glb_cfg_get_1"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_glb_cfg_get_1); + +ZXIC_UINT32 dpp_glb_cfg_get_2(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *p_glb_cfg_data_2) +{ + ZXIC_UINT32 rc = 0; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(p_glb_cfg_data_2); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_pktrx_mcode_glb_cfg_get_2(&dev, p_glb_cfg_data_2); + ZXIC_COMM_CHECK_RC(rc, "dpp_pktrx_mcode_glb_cfg_get_2"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_glb_cfg_get_2); + +ZXIC_UINT32 dpp_glb_cfg_get_3(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *p_glb_cfg_data_3) +{ + ZXIC_UINT32 rc = 0; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(p_glb_cfg_data_3); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_pktrx_mcode_glb_cfg_get_3(&dev, p_glb_cfg_data_3); + ZXIC_COMM_CHECK_RC(rc, "dpp_pktrx_mcode_glb_cfg_get_3"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_glb_cfg_get_3); + +ZXIC_UINT32 dpp_l2d_psn_cfg_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 psn_cfg) +{ + ZXIC_UINT32 rc = 0; + DPP_DEV_T dev = { 0 }; + ZXIC_MUTEX_T *p_mutex = NULL; + + ZXIC_COMM_CHECK_POINT(pf_info); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dev_opr_mutex_get(&dev, DPP_DEV_MUTEX_T_PKTRX_MF_GLB_CFG_1, + &p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(&dev), rc, "dpp_dev_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(&dev), rc, "zxic_comm_mutex_lock"); + + rc = dpp_agent_channel_psn_cfg_l2d_write(&dev, psn_cfg); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK(DEV_ID(&dev), rc, + "dpp_agent_channel_psn_cfg_l2d_write", + p_mutex); + + rc = zxic_comm_mutex_unlock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(&dev), rc, "zxic_comm_mutex_unlock"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_l2d_psn_cfg_set); + +ZXIC_UINT32 dpp_l2d_psn_cfg_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 *p_psn_cfg) +{ + ZXIC_UINT32 rc = 0; + DPP_DEV_T dev = { 0 }; + ZXIC_MUTEX_T *p_mutex = NULL; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(p_psn_cfg); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dev_opr_mutex_get(&dev, DPP_DEV_MUTEX_T_PKTRX_MF_GLB_CFG_1, + &p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(&dev), rc, "dpp_dev_opr_mutex_get"); + + rc = zxic_comm_mutex_lock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(&dev), rc, "zxic_comm_mutex_lock"); + + rc = dpp_agent_channel_psn_cfg_l2d_read(&dev, p_psn_cfg); + ZXIC_COMM_CHECK_DEV_RC_UNLOCK(DEV_ID(&dev), rc, + "dpp_agent_channel_psn_cfg_l2d_read", + p_mutex); + + rc = zxic_comm_mutex_unlock(p_mutex); + ZXIC_COMM_CHECK_DEV_RC(DEV_ID(&dev), rc, "zxic_comm_mutex_unlock"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_l2d_psn_cfg_get); + +ZXIC_UINT32 dpp_pktrx_mcode_glb_cfg_write(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 start_bit_no, + ZXIC_UINT32 end_bit_no, + ZXIC_UINT32 glb_cfg_data_1) +{ + ZXIC_UINT32 rc = 0; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_pktrx_mcode_glb_cfg_write_1(&dev, start_bit_no, end_bit_no, + glb_cfg_data_1); + ZXIC_COMM_CHECK_RC(rc, "dpp_pktrx_mcode_glb_cfg_write_1"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_pktrx_mcode_glb_cfg_write); + +ZXIC_UINT32 dpp_mcode_feature_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 index, + ZXIC_UINT64 *feature) +{ + ZXIC_UINT32 rc = 0; + DPP_DEV_T dev = { 0 }; + DPP_PKTRX_PHYPORT_UDF_TABLE_T phy_udf_table = { 0 }; + + ZXIC_COMM_CHECK_INDEX(index, 0, DPP_MCODE_FEATURE_LIST_NUM - 1); + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(feature); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_pktrx_udf_table_get(&dev, 11 + index / 2, &phy_udf_table); + ZXIC_COMM_CHECK_RC(rc, "dpp_pktrx_udf_table_get"); + + *feature = ZXIC_COMM_COUNTER64_BUILD( + phy_udf_table.port_based_user_data[(index % 2) * 2], + phy_udf_table.port_based_user_data[(index % 2) * 2 + 1]); + + return 0; +} +EXPORT_SYMBOL(dpp_mcode_feature_get); diff --git a/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_comm.c b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_comm.c new file mode 100644 index 000000000000..1928778af9c5 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_comm.c @@ -0,0 +1,429 @@ +#include "dpp_drv_init.h" +#include "dpp_drv_acl.h" +#include "dpp_drv_hash.h" +#include "dpp_drv_eram.h" +#include "dpp_drv_sdt.h" +#include "dpp_dev.h" +#include "dpp_dtb.h" +#include "dpp_hash.h" +#include "dpp_dtb_table.h" +#include "dpp_dtb_table_api.h" +#include "dpp_tbl_mc.h" +#include "dpp_tbl_bc.h" +#include "dpp_tbl_promisc.h" +#include "dpp_tbl_comm.h" +#include "dpp_tbl_api.h" + +static DPP_VPORT_MGR_T g_vport_mgr[DPP_PCIE_SLOT_MAX][DPP_PCIE_CHANNEL_MAX] = { + 0 +}; + +ZXIC_UINT32 dpp_data_print(ZXIC_UINT8 *data, ZXIC_UINT32 len) +{ + ZXIC_UINT32 i = 0; + ZXIC_UINT32 loop_cnt = len / 16; + ZXIC_UINT32 last_line_len = len % 16; + + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + for (i = 0; i < loop_cnt; i++) { + ZXIC_COMM_PRINT( + "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", + *(data + (i * 16) + 0), *(data + (i * 16) + 1), + *(data + (i * 16) + 2), *(data + (i * 16) + 3), + *(data + (i * 16) + 4), *(data + (i * 16) + 5), + *(data + (i * 16) + 6), *(data + (i * 16) + 7), + *(data + (i * 16) + 8), *(data + (i * 16) + 9), + *(data + (i * 16) + 10), *(data + (i * 16) + 11), + *(data + (i * 16) + 12), *(data + (i * 16) + 13), + *(data + (i * 16) + 14), *(data + (i * 16) + 15)); + } + if (last_line_len != 0) { + if (last_line_len == 1) { + ZXIC_COMM_PRINT("%02x\n", *(data + (i * 16) + 0)); + } else if (last_line_len == 2) { + ZXIC_COMM_PRINT("%02x %02x\n", *(data + (i * 16) + 0), + *(data + (i * 16) + 1)); + } else if (last_line_len == 3) { + ZXIC_COMM_PRINT("%02x %02x %02x\n", + *(data + (i * 16) + 0), + *(data + (i * 16) + 1), + *(data + (i * 16) + 2)); + } else if (last_line_len == 4) { + ZXIC_COMM_PRINT("%02x %02x %02x %02x\n", + *(data + (i * 16) + 0), + *(data + (i * 16) + 1), + *(data + (i * 16) + 2), + *(data + (i * 16) + 3)); + } else if (last_line_len == 5) { + ZXIC_COMM_PRINT("%02x %02x %02x %02x %02x\n", + *(data + (i * 16) + 0), + *(data + (i * 16) + 1), + *(data + (i * 16) + 2), + *(data + (i * 16) + 3), + *(data + (i * 16) + 4)); + } else if (last_line_len == 6) { + ZXIC_COMM_PRINT( + "%02x %02x %02x %02x %02x %02x\n", + *(data + (i * 16) + 0), *(data + (i * 16) + 1), + *(data + (i * 16) + 2), *(data + (i * 16) + 3), + *(data + (i * 16) + 4), *(data + (i * 16) + 5)); + } else if (last_line_len == 7) { + ZXIC_COMM_PRINT( + "%02x %02x %02x %02x %02x %02x %02x\n", + *(data + (i * 16) + 0), *(data + (i * 16) + 1), + *(data + (i * 16) + 2), *(data + (i * 16) + 3), + *(data + (i * 16) + 4), *(data + (i * 16) + 5), + *(data + (i * 16) + 6)); + } else if (last_line_len == 8) { + ZXIC_COMM_PRINT( + "%02x %02x %02x %02x %02x %02x %02x %02x\n", + *(data + (i * 16) + 0), *(data + (i * 16) + 1), + *(data + (i * 16) + 2), *(data + (i * 16) + 3), + *(data + (i * 16) + 4), *(data + (i * 16) + 5), + *(data + (i * 16) + 6), *(data + (i * 16) + 7)); + } else if (last_line_len == 9) { + ZXIC_COMM_PRINT( + "%02x %02x %02x %02x %02x %02x %02x %02x %02x\n", + *(data + (i * 16) + 0), *(data + (i * 16) + 1), + *(data + (i * 16) + 2), *(data + (i * 16) + 3), + *(data + (i * 16) + 4), *(data + (i * 16) + 5), + *(data + (i * 16) + 6), *(data + (i * 16) + 7), + *(data + (i * 16) + 8)); + } else if (last_line_len == 10) { + ZXIC_COMM_PRINT( + "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", + *(data + (i * 16) + 0), *(data + (i * 16) + 1), + *(data + (i * 16) + 2), *(data + (i * 16) + 3), + *(data + (i * 16) + 4), *(data + (i * 16) + 5), + *(data + (i * 16) + 6), *(data + (i * 16) + 7), + *(data + (i * 16) + 8), *(data + (i * 16) + 9)); + } else if (last_line_len == 11) { + ZXIC_COMM_PRINT( + "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", + *(data + (i * 16) + 0), *(data + (i * 16) + 1), + *(data + (i * 16) + 2), *(data + (i * 16) + 3), + *(data + (i * 16) + 4), *(data + (i * 16) + 5), + *(data + (i * 16) + 6), *(data + (i * 16) + 7), + *(data + (i * 16) + 8), *(data + (i * 16) + 9), + *(data + (i * 16) + 10)); + } else if (last_line_len == 12) { + ZXIC_COMM_PRINT( + "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", + *(data + (i * 16) + 0), *(data + (i * 16) + 1), + *(data + (i * 16) + 2), *(data + (i * 16) + 3), + *(data + (i * 16) + 4), *(data + (i * 16) + 5), + *(data + (i * 16) + 6), *(data + (i * 16) + 7), + *(data + (i * 16) + 8), *(data + (i * 16) + 9), + *(data + (i * 16) + 10), + *(data + (i * 16) + 11)); + } else if (last_line_len == 13) { + ZXIC_COMM_PRINT( + "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", + *(data + (i * 16) + 0), *(data + (i * 16) + 1), + *(data + (i * 16) + 2), *(data + (i * 16) + 3), + *(data + (i * 16) + 4), *(data + (i * 16) + 5), + *(data + (i * 16) + 6), *(data + (i * 16) + 7), + *(data + (i * 16) + 8), *(data + (i * 16) + 9), + *(data + (i * 16) + 10), + *(data + (i * 16) + 11), + *(data + (i * 16) + 12)); + } else if (last_line_len == 14) { + ZXIC_COMM_PRINT( + "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", + *(data + (i * 16) + 0), *(data + (i * 16) + 1), + *(data + (i * 16) + 2), *(data + (i * 16) + 3), + *(data + (i * 16) + 4), *(data + (i * 16) + 5), + *(data + (i * 16) + 6), *(data + (i * 16) + 7), + *(data + (i * 16) + 8), *(data + (i * 16) + 9), + *(data + (i * 16) + 10), + *(data + (i * 16) + 11), + *(data + (i * 16) + 12), + *(data + (i * 16) + 13)); + } else if (last_line_len == 15) { + ZXIC_COMM_PRINT( + "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", + *(data + (i * 16) + 0), *(data + (i * 16) + 1), + *(data + (i * 16) + 2), *(data + (i * 16) + 3), + *(data + (i * 16) + 4), *(data + (i * 16) + 5), + *(data + (i * 16) + 6), *(data + (i * 16) + 7), + *(data + (i * 16) + 8), *(data + (i * 16) + 9), + *(data + (i * 16) + 10), + *(data + (i * 16) + 11), + *(data + (i * 16) + 12), + *(data + (i * 16) + 13), + *(data + (i * 16) + 14)); + } + } + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_vport_mgr_init(DPP_PF_INFO_T *pf_info) +{ + ZXIC_UINT16 slot = 0; + ZXIC_UINT16 channel_id = 0; + ZXIC_UINT32 sdt_no = 0; + ZXIC_UINT32 rc = 0; + + ZXIC_COMM_CHECK_POINT(pf_info); + + slot = pf_info->slot; + ZXIC_COMM_CHECK_INDEX(slot, 0, DPP_PCIE_SLOT_MAX - 1); + + channel_id = DPP_PCIE_CHANNEL_ID(pf_info->vport); + ZXIC_COMM_CHECK_INDEX(channel_id, 0, DPP_PCIE_CHANNEL_MAX - 1); + + ZXIC_COMM_MEMSET(&g_vport_mgr[slot][channel_id], 0x00, + sizeof(DPP_VPORT_MGR_T)); + + g_vport_mgr[slot][channel_id].mc_table.mc_info = + ZXIC_COMM_MALLOC(sizeof(DPP_VPORT_MC_INFO_T) * MC_TABLE_SIZE); + ZXIC_COMM_CHECK_POINT(g_vport_mgr[slot][channel_id].mc_table.mc_info); + ZXIC_COMM_MEMSET(g_vport_mgr[slot][channel_id].mc_table.mc_info, 0x00, + sizeof(DPP_VPORT_MC_INFO_T) * MC_TABLE_SIZE); + + for (sdt_no = 0; sdt_no < DPP_DEV_SDT_ID_MAX; sdt_no++) { + g_vport_mgr[slot][channel_id].table_lock[sdt_no] = + ZXIC_COMM_MALLOC(sizeof(ZXIC_MUTEX_T)); + ZXIC_COMM_CHECK_POINT( + g_vport_mgr[slot][channel_id].table_lock[sdt_no]); + ZXIC_COMM_MEMSET( + g_vport_mgr[slot][channel_id].table_lock[sdt_no], 0x00, + sizeof(ZXIC_MUTEX_T)); + + rc = zxic_comm_mutex_create( + g_vport_mgr[slot][channel_id].table_lock[sdt_no]); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_mutex_create"); + } + + return DPP_OK; +} + +ZXIC_UINT32 dpp_vport_mgr_release(DPP_PF_INFO_T *pf_info) +{ + ZXIC_UINT16 slot = 0; + ZXIC_UINT16 channel_id = 0; + ZXIC_UINT32 sdt_no = 0; + ZXIC_UINT32 rc = 0; + + ZXIC_COMM_CHECK_POINT(pf_info); + + slot = pf_info->slot; + ZXIC_COMM_CHECK_INDEX(slot, 0, DPP_PCIE_SLOT_MAX - 1); + + channel_id = DPP_PCIE_CHANNEL_ID(pf_info->vport); + ZXIC_COMM_CHECK_INDEX(channel_id, 0, DPP_PCIE_CHANNEL_MAX - 1); + + ZXIC_COMM_FREE(g_vport_mgr[slot][channel_id].mc_table.mc_info); + + for (sdt_no = 0; sdt_no < DPP_DEV_SDT_ID_MAX; sdt_no++) { + rc = zxic_comm_mutex_destroy( + g_vport_mgr[slot][channel_id].table_lock[sdt_no]); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_mutex_destroy"); + ZXIC_COMM_FREE( + g_vport_mgr[slot][channel_id].table_lock[sdt_no]); + } + + ZXIC_COMM_MEMSET(&g_vport_mgr[slot][channel_id], 0x00, + sizeof(DPP_VPORT_MGR_T)); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_vport_table_lock(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 sdt_no, + ZXIC_MUTEX_T **table_lock) +{ + ZXIC_UINT16 slot = 0; + ZXIC_UINT16 channel_id = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + ZXIC_COMM_CHECK_POINT(table_lock); + + *table_lock = NULL; + + slot = pf_info->slot; + ZXIC_COMM_CHECK_INDEX(slot, 0, DPP_PCIE_SLOT_MAX - 1); + + channel_id = DPP_PCIE_CHANNEL_ID(pf_info->vport); + ZXIC_COMM_CHECK_INDEX(channel_id, 0, DPP_PCIE_CHANNEL_MAX - 1); + + rc = zxic_comm_mutex_lock( + g_vport_mgr[slot][channel_id].table_lock[sdt_no]); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_mutex_lock"); + + *table_lock = g_vport_mgr[slot][channel_id].table_lock[sdt_no]; + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sdt_no: %u table lock.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no); + return DPP_OK; +} + +ZXIC_UINT32 dpp_vport_table_unlock(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 sdt_no) +{ + ZXIC_UINT16 slot = 0; + ZXIC_UINT16 channel_id = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_INDEX(sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + + slot = pf_info->slot; + ZXIC_COMM_CHECK_INDEX(slot, 0, DPP_PCIE_SLOT_MAX - 1); + + channel_id = DPP_PCIE_CHANNEL_ID(pf_info->vport); + ZXIC_COMM_CHECK_INDEX(channel_id, 0, DPP_PCIE_CHANNEL_MAX - 1); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sdt_no: %u table unlock.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no); + + rc = zxic_comm_mutex_unlock( + g_vport_mgr[slot][channel_id].table_lock[sdt_no]); + ZXIC_COMM_CHECK_RC(rc, "zxic_comm_mutex_unlock"); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_vport_bc_table_get(DPP_PF_INFO_T *pf_info, + DPP_VPORT_BC_TABLE_T **bc_table) +{ + ZXIC_UINT16 slot = 0; + ZXIC_UINT16 channel_id = 0; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(bc_table); + + slot = pf_info->slot; + ZXIC_COMM_CHECK_INDEX(slot, 0, DPP_PCIE_SLOT_MAX - 1); + + channel_id = DPP_PCIE_CHANNEL_ID(pf_info->vport); + ZXIC_COMM_CHECK_INDEX(channel_id, 0, DPP_PCIE_CHANNEL_MAX - 1); + + *bc_table = &g_vport_mgr[slot][channel_id].bc_table; + + return DPP_OK; +} + +ZXIC_UINT32 dpp_vport_mc_table_get(DPP_PF_INFO_T *pf_info, + DPP_VPORT_MC_TABLE_T **mc_table) +{ + ZXIC_UINT16 slot = 0; + ZXIC_UINT16 channel_id = 0; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(mc_table); + + slot = pf_info->slot; + ZXIC_COMM_CHECK_INDEX(slot, 0, DPP_PCIE_SLOT_MAX - 1); + + channel_id = DPP_PCIE_CHANNEL_ID(pf_info->vport); + ZXIC_COMM_CHECK_INDEX(channel_id, 0, DPP_PCIE_CHANNEL_MAX - 1); + + *mc_table = &g_vport_mgr[slot][channel_id].mc_table; + + return DPP_OK; +} + +ZXIC_UINT32 +dpp_vport_uc_promisc_table_get(DPP_PF_INFO_T *pf_info, + DPP_VPORT_PROMISC_TABLE_T **promisc_table) +{ + ZXIC_UINT16 slot = 0; + ZXIC_UINT16 channel_id = 0; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(promisc_table); + + slot = pf_info->slot; + ZXIC_COMM_CHECK_INDEX(slot, 0, DPP_PCIE_SLOT_MAX - 1); + + channel_id = DPP_PCIE_CHANNEL_ID(pf_info->vport); + ZXIC_COMM_CHECK_INDEX(channel_id, 0, DPP_PCIE_CHANNEL_MAX - 1); + + *promisc_table = &g_vport_mgr[slot][channel_id].uc_promisc_table; + + return DPP_OK; +} + +ZXIC_UINT32 +dpp_vport_mc_promisc_table_get(DPP_PF_INFO_T *pf_info, + DPP_VPORT_PROMISC_TABLE_T **promisc_table) +{ + ZXIC_UINT16 slot = 0; + ZXIC_UINT16 channel_id = 0; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(promisc_table); + + slot = pf_info->slot; + ZXIC_COMM_CHECK_INDEX(slot, 0, DPP_PCIE_SLOT_MAX - 1); + + channel_id = DPP_PCIE_CHANNEL_ID(pf_info->vport); + ZXIC_COMM_CHECK_INDEX(channel_id, 0, DPP_PCIE_CHANNEL_MAX - 1); + + *promisc_table = &g_vport_mgr[slot][channel_id].mc_promisc_table; + + return DPP_OK; +} + +ZXIC_UINT32 dpp_vport_get_by_vqm_vfid(ZXIC_UINT16 pf_vport, + ZXIC_UINT32 vqm_vfid, ZXIC_UINT16 *vport) +{ + ZXIC_COMM_CHECK_POINT(vport); + + if (vqm_vfid >= PF_VQM_VFID_OFFSET) { + *vport = pf_vport; + } else { + *vport = ((EPID(pf_vport) << 12) | 0x800 | + (FUNC_NUM(pf_vport) << 8) | + (vqm_vfid - (EPID(pf_vport) * 256))); + } + return DPP_OK; +} + +ZXIC_UINT32 dpp_vport_get_by_mc_bitmap(ZXIC_UINT16 pf_vport, + ZXIC_UINT32 group_id, + ZXIC_UINT64 mc_bitmap, + ZXIC_UINT16 vport[64], + ZXIC_UINT32 *p_vport_num) +{ + ZXIC_UINT32 i = 0; + + ZXIC_UINT32 vport_num = 0; + + ZXIC_COMM_CHECK_POINT(vport); + ZXIC_COMM_CHECK_POINT(p_vport_num); + + for (i = 0; i < MC_MEMBER_NUM_IN_GROUP; i++) { + if ((mc_bitmap >> i) & 1) { + vport[vport_num] = + ((EPID(pf_vport) << 12) | 0x800 | + (FUNC_NUM(pf_vport) << 8) | + ((group_id * MC_MEMBER_NUM_IN_GROUP) + + MC_MEMBER_NUM_IN_GROUP - 1 - i)); + vport_num++; + } + } + + *p_vport_num = vport_num; + + return DPP_OK; +} + +BOOLEAN dpp_vport_in_mc_bitmap(ZXIC_UINT32 vport, ZXIC_UINT64 mc_bitmap) +{ + ZXIC_UINT32 bit_index = 0; + + bit_index = VFUNC_NUM(vport) % MC_MEMBER_NUM_IN_GROUP; + + if ((VF_ACTIVE(vport)) && + ((mc_bitmap >> (MC_MEMBER_NUM_IN_GROUP - 1 - bit_index)) & 1)) { + return TRUE; + } + + return FALSE; +} diff --git a/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_diag.c b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_diag.c new file mode 100644 index 000000000000..1f14d7f47d1a --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_diag.c @@ -0,0 +1,4539 @@ +#include "dpp_tbl_api.h" +#include "dpp_tbl_comm.h" +#include "dpp_dtb.h" +#include "dpp_drv_sdt.h" +#include "dpp_ppu_api.h" +#include "dpp_ppu.h" +#include "dpp_sdt.h" +#include "dpp_hash.h" +#include "dpp_se_api.h" +#include "dpp_apt_se.h" +#include "dpp_drv_hash.h" +#include "dpp_tbl_plcr.h" +#include "dpp_tbl_tm.h" +#include "dpp_tbl_vlan.h" +#include "dpp_drv_acl.h" +#include "dpp_dtb_table_api.h" +#include "dpp_tbl_cfg.h" +#include "dpp_tbl_pkt_cap.h" +#include "dpp_np_init.h" +#include "dpp_tbl_stat.h" +#include "dpp_tbl_fd_cfg.h" + +static ZXDH_FD_CFG_T g_diag_fd_cfg = { 0 }; +static ZXIC_UINT32 g_diag_fd_index; +static ZXIC_UINT32 g_eram_buff[4] = { 0 }; +extern DPP_DEV_MGR_T *dpp_dev_mgr_get(ZXIC_VOID); +extern DPP_SE_CFG *dpp_apt_get_se_cfg(DPP_DEV_T *dev); +ZXIC_CONST ZXIC_CHAR *g_vport_table_attr_name[] = { + // byte[15:16] + "rsv6", + // byte[13:14] + "vhca", "rsv5", + // byte[12] + "rss_hash_factor", + // byte[11] + "hash_alg", "uplink_phy_port_id", + // byte[9:10] + "lag_id", "vxlan_offload_en", "pf_vqm_vfid", "rsv3", + // byte[7:8] + "mtu", + // byte[5:6] + "port_base_qid", "hash_search_index", "rsv2", + // byte[4] + "np_egress_meter_enable", "np_ingress_meter_enable", + "np_egress_meter_mode", "np_ingress_meter_mode", "np_egress_tm_enable", + "np_ingress_tm_enable", "rsv1", "spoof_check_enable", + // byte[3] + "inline_sec_offload", "fd_enable", "lag_enable", "vepa_enable", "is_vf", + "virtio_version", "virtio_enable", + // byte[2] + "accelerator_offload_flag", "lro_offload", "ip_recombine_offload", + "tcp_udp_checksum_offload", "ip_checksum_offload", + "outer_ip_checksum_offload", "is_up", "business_enable", + // byte[1] + "hw_bond_enable", "rdma_offload_enable", "promisc_enable", + "sriov_vlan_offload_enable", "sriov_business_vlan_offload_enable", + "rss_enable", "mtu_offload_enable", "hit_flag", + + // byte[13:14] + "flag_1588_enable" +}; + +ZXIC_CONST ZXIC_CHAR *g_uplink_phy_port_table_attr_name[] = { + "rsv6", + "pf_vqm_vfid", + "rsv5", + "lacp_pf_memport_qid", + "rsv4", + "lacp_pf_vqm_vfid", + "rsv3", + "is_up", + "bond_link_up", + "hw_bond_enable", + "mtu", + "mtu_offload_enable", + "rsv2", + "tm_base_queue", + "ptp_port_vfid", + "rsv1", + "magic_packet_enable", + "tm_shape_enable", + "ptp_tc_enable", + "trust_mode", + "hit_flag", + "primary_pf_vqm_vfid", + "sriov_hdbond_enable" +}; + +ZXIC_CONST ZXIC_CHAR *g_vqm_vfid_vlan_attr_name[] = { + "sriov_vlan_tci", + "sriov_vlan_tpid", + "sriov_business_vlan_tpid", + "rsv", + "sriov_business_vlan_strip_offload", + "sriov_business_qinq_vlan_strip_offload", + "sriov_business_vlan_filter", + "hit_flag" +}; + +ZXIC_CONST ZXIC_CHAR *dpp_vport_table_attr_name_get(ZXIC_UINT32 attr) +{ + if (attr >= (sizeof(g_vport_table_attr_name) / sizeof(ZXIC_CHAR *))) { + return NULL; + } + + return g_vport_table_attr_name[attr]; +} + +ZXIC_CONST ZXIC_CHAR *dpp_uplink_phy_port_table_attr_name_get(ZXIC_UINT32 attr) +{ + if (attr >= + (sizeof(g_uplink_phy_port_table_attr_name) / sizeof(ZXIC_CHAR *))) { + return NULL; + } + + return g_uplink_phy_port_table_attr_name[attr]; +} + +ZXIC_CONST ZXIC_CHAR *dpp_vqm_vfid_vlan_attr_name_get(ZXIC_UINT32 attr) +{ + if (attr >= (sizeof(g_vqm_vfid_vlan_attr_name) / sizeof(ZXIC_CHAR *))) { + return NULL; + } + + return g_vqm_vfid_vlan_attr_name[attr]; +} + +ZXIC_UINT32 diag_dpp_sdt_tbl_prt(ZXIC_UINT32 sdt_no) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 tbl_type = 0; + ZXIC_UINT32 slot = 0; + + DPP_SDT_TBL_DATA_T sdt_tbl = { 0 }; + DPP_SDTTBL_ERAM_T sdt_eram = { 0 }; + DPP_SDTTBL_HASH_T sdt_hash = { 0 }; + DPP_SDTTBL_ETCAM_T sdt_etcam = { 0 }; + DPP_SDTTBL_PORTTBL_T sdt_porttbl = { 0 }; + DPP_DEV_T dev = { 0 }; + + if (sdt_no > PPU_SDT_IDX_MAX) { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, "sdt_no[%d] error, please check it.\n", sdt_no); + return DPP_ERR; + } + + for (slot = 0; slot < DPP_PCIE_SLOT_MAX; slot++) { + dev.pcie_channel.slot = slot; + dev.device_id = 0; + rc = dpp_sdt_tbl_data_get(&dev, sdt_no, &sdt_tbl); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_sdt_tbl_data_get"); + + if ((sdt_tbl.data_low32 == 0xFFFFFFFF) && + (sdt_tbl.data_high32 == 0xFFFFFFFF)) { + continue; + } + + ZXIC_COMM_PRINT("%-30s : 0x%08x\n", "slot", slot); + ZXIC_COMM_PRINT("%-30s : 0x%08x\n", "sdt_no", sdt_no); + ZXIC_COMM_PRINT("%-30s : 0x%08x\n", "data_high32", + sdt_tbl.data_high32); + ZXIC_COMM_PRINT("%-30s : 0x%08x\n", "data_low32", + sdt_tbl.data_low32); + ZXIC_COMM_PRINT("\n"); + + /* 获取sdt 类型信息 */ + ZXIC_COMM_UINT32_GET_BITS(tbl_type, sdt_tbl.data_high32, + DPP_SDT_H_TBL_TYPE_BT_POS, + DPP_SDT_H_TBL_TYPE_BT_LEN); + + if (tbl_type >= DPP_SDT_TBLT_eRAM && + tbl_type <= DPP_SDT_TBLT_PORTTBL) { + switch (tbl_type) { + case DPP_SDT_TBLT_eRAM: { + rc = dpp_soft_sdt_tbl_get(&dev, sdt_no, + &sdt_eram); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, + "dpp_soft_sdt_tbl_get"); + + ZXIC_COMM_PRINT("%-30s : 0x%08x\n", "eram_mode", + sdt_eram.eram_mode); + ZXIC_COMM_PRINT("%-30s : 0x%08x\n", + "eram_base_addr", + sdt_eram.eram_base_addr); + ZXIC_COMM_PRINT("%-30s : 0x%08x\n", + "eram_table_depth", + sdt_eram.eram_table_depth); + ZXIC_COMM_PRINT("%-30s : 0x%08x\n", + "eram_clutch_en", + sdt_eram.eram_clutch_en); + break; + } + + case DPP_SDT_TBLT_HASH: { + rc = dpp_soft_sdt_tbl_get(&dev, sdt_no, + &sdt_hash); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, + "dpp_soft_sdt_tbl_get"); + + ZXIC_COMM_PRINT("%-30s : 0x%08x\n", "hash_id", + sdt_hash.hash_id); + ZXIC_COMM_PRINT("%-30s : 0x%08x\n", + "hash_table_width", + sdt_hash.hash_table_width); + ZXIC_COMM_PRINT("%-30s : 0x%08x\n", "key_size", + sdt_hash.key_size); + ZXIC_COMM_PRINT("%-30s : 0x%08x\n", + "hash_table_id", + sdt_hash.hash_table_id); + ZXIC_COMM_PRINT("%-30s : 0x%08x\n", "learn_en", + sdt_hash.learn_en); + ZXIC_COMM_PRINT("%-30s : 0x%08x\n", + "keep_alive", + sdt_hash.keep_alive); + ZXIC_COMM_PRINT("%-30s : 0x%08x\n", + "keep_alive_baddr", + sdt_hash.keep_alive_baddr); + ZXIC_COMM_PRINT("%-30s : 0x%08x\n", "rsp_mode", + sdt_hash.rsp_mode); + ZXIC_COMM_PRINT("%-30s : 0x%08x\n", + "hash_clutch_en", + sdt_hash.hash_clutch_en); + break; + } + + case DPP_SDT_TBLT_eTCAM: { + rc = dpp_soft_sdt_tbl_get(&dev, sdt_no, + &sdt_etcam); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, + "dpp_soft_sdt_tbl_get"); + + ZXIC_COMM_PRINT("%-30s : 0x%08x\n", "etcam_id", + sdt_etcam.etcam_id); + ZXIC_COMM_PRINT("%-30s : 0x%08x\n", + "etcam_key_mode", + sdt_etcam.etcam_key_mode); + ZXIC_COMM_PRINT("%-30s : 0x%08x\n", + "etcam_table_id", + sdt_etcam.etcam_table_id); + ZXIC_COMM_PRINT("%-30s : 0x%08x\n", + "no_as_rsp_mode", + sdt_etcam.no_as_rsp_mode); + ZXIC_COMM_PRINT("%-30s : 0x%08x\n", "as_en", + sdt_etcam.as_en); + ZXIC_COMM_PRINT("%-30s : 0x%08x\n", + "as_eram_baddr", + sdt_etcam.as_eram_baddr); + ZXIC_COMM_PRINT("%-30s : 0x%08x\n", + "as_rsp_mode", + sdt_etcam.as_rsp_mode); + ZXIC_COMM_PRINT("%-30s : 0x%08x\n", + "etcam_table_depth", + sdt_etcam.etcam_table_depth); + ZXIC_COMM_PRINT("%-30s : 0x%08x\n", + "etcam_clutch_en", + sdt_etcam.etcam_clutch_en); + break; + } + + case DPP_SDT_TBLT_PORTTBL: { + rc = dpp_soft_sdt_tbl_get(&dev, sdt_no, + &sdt_porttbl); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, + "dpp_soft_sdt_tbl_get"); + + ZXIC_COMM_PRINT("%-30s : 0x%08x\n", + "porttbl_clutch_en", + sdt_porttbl.porttbl_clutch_en); + break; + } + + default: { + ZXIC_COMM_TRACE_DEV_ERROR( + dev_id, + "SDT table_type[ %d ] is invalid!\n", + tbl_type); + return DPP_ERR; + } + } + + ZXIC_COMM_PRINT("\n"); + } else { + ZXIC_COMM_TRACE_DEV_ERROR(dev_id, + "no sdt information \n"); + } + } + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_se_smmu0_wr64(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 base_addr, ZXIC_UINT32 index, + ZXIC_UINT32 data0, ZXIC_UINT32 data1) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_UINT32 buff[2] = { 0 }; + + ZXIC_COMM_CHECK_INDEX(base_addr, 0, SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1); + ZXIC_COMM_CHECK_INDEX(data0, 0, 0xffffffff); + ZXIC_COMM_CHECK_INDEX(data1, 0, 0xffffffff); + + buff[0] = data0; + buff[1] = data1; + + rc = dpp_dev_get(&pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_se_smmu0_ind_write(&dev, base_addr, index, ERAM128_OPR_64b, + buff); + ZXIC_COMM_CHECK_RC(rc, "dpp_se_smmu0_ind_write"); + + return rc; +} + +ZXIC_UINT32 diag_dpp_se_smmu0_rd64(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 base_addr, ZXIC_UINT32 index) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_UINT32 buff[2] = { 0 }; + + rc = dpp_dev_get(&pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX(base_addr, 0, SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1); + + rc = dpp_se_smmu0_ind_read(&dev, base_addr, index, ERAM128_OPR_64b, + RD_MODE_HOLD, buff); + ZXIC_COMM_CHECK_RC(rc, "dpp_se_smmu0_ind_read"); + + ZXIC_COMM_PRINT( + "base_addr[0x%08x] index[0x%08x] value[0x%08x 0x%08x] \n", + base_addr, index, buff[0], buff[1]); + + return rc; +} + +ZXIC_UINT32 diag_dpp_se_smmu0_wr128(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 base_addr, ZXIC_UINT32 index, + ZXIC_UINT32 data0, ZXIC_UINT32 data1, + ZXIC_UINT32 data2, ZXIC_UINT32 data3) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_UINT32 buff[4] = { 0 }; + + rc = dpp_dev_get(&pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX(base_addr, 0, SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1); + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT(base_addr, index); + ZXIC_COMM_CHECK_INDEX(base_addr + index, 0, + SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1); + ZXIC_COMM_CHECK_INDEX(data0, 0, 0xffffffff); + ZXIC_COMM_CHECK_INDEX(data1, 0, 0xffffffff); + ZXIC_COMM_CHECK_INDEX(data2, 0, 0xffffffff); + ZXIC_COMM_CHECK_INDEX(data3, 0, 0xffffffff); + + buff[0] = data0; + buff[1] = data1; + buff[2] = data2; + buff[3] = data3; + + rc = dpp_se_smmu0_ind_write(&dev, base_addr, index, ERAM128_OPR_128b, + buff); + ZXIC_COMM_CHECK_RC(rc, "dpp_se_smmu0_ind_write"); + + return rc; +} + +ZXIC_UINT32 diag_dpp_se_smmu0_rd128(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 base_addr, ZXIC_UINT32 index) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + DPP_DEV_T dev = { 0 }; + + DPP_STATUS rc = DPP_OK; + + ZXIC_UINT32 buff[4] = { 0 }; + + rc = dpp_dev_get(&pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + ZXIC_COMM_CHECK_INDEX(base_addr, 0, SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1); + ZXIC_COMM_CHECK_INDEX_ADD_OVERFLOW_NO_ASSERT(base_addr, index); + ZXIC_COMM_CHECK_INDEX(base_addr + index, 0, + SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1); + + rc = dpp_se_smmu0_ind_read(&dev, base_addr, index, ERAM128_OPR_128b, + RD_MODE_HOLD, buff); + ZXIC_COMM_CHECK_RC(rc, "dpp_se_smmu0_ind_read"); + + ZXIC_COMM_PRINT( + "base_addr[0x%08x] index[0x%08x] value[0x%08x 0x%08x 0x%08x 0x%08x] \n", + base_addr, index, buff[0], buff[1], buff[2], buff[3]); + + return rc; +} + +ZXIC_UINT32 diag_dpp_vport_mac_add(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT16 sriov_vlan_tpid, + ZXIC_UINT16 sriov_vlan_id, ZXIC_UINT8 mac0, + ZXIC_UINT8 mac1, ZXIC_UINT8 mac2, + ZXIC_UINT8 mac3, ZXIC_UINT8 mac4, + ZXIC_UINT8 mac5) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT8 mac[6]; + ZXIC_UINT32 rc = DPP_OK; + + mac[0] = mac0; + mac[1] = mac1; + mac[2] = mac2; + mac[3] = mac3; + mac[4] = mac4; + mac[5] = mac5; + + rc = dpp_add_mac(&pf_info, mac, sriov_vlan_tpid, sriov_vlan_id); + ZXIC_COMM_CHECK_RC(rc, "dpp_add_mac"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_mac_del(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT16 sriov_vlan_tpid, + ZXIC_UINT16 sriov_vlan_id, ZXIC_UINT8 mac0, + ZXIC_UINT8 mac1, ZXIC_UINT8 mac2, + ZXIC_UINT8 mac3, ZXIC_UINT8 mac4, + ZXIC_UINT8 mac5) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT8 mac[6]; + ZXIC_UINT32 rc = DPP_OK; + + mac[0] = mac0; + mac[1] = mac1; + mac[2] = mac2; + mac[3] = mac3; + mac[4] = mac4; + mac[5] = mac5; + + rc = dpp_del_mac(&pf_info, mac, sriov_vlan_tpid, sriov_vlan_id); + ZXIC_COMM_CHECK_RC(rc, "dpp_del_mac"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_batch_mac_add(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT16 mac_num, + ZXIC_UINT32 vlan_id, ZXIC_UINT16 mac16, + ZXIC_UINT32 mac32) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 index = 0; + ZXDH_L2_FWD_KEY *p_key_temp = NULL; + ZXDH_L2_FWD_KEY *p_mac_key = NULL; + ZXIC_UINT32 mac = 0; + ZXIC_UINT32 rc = DPP_OK; + + p_mac_key = (ZXDH_L2_FWD_KEY *)ZXIC_COMM_MALLOC( + mac_num * sizeof(ZXDH_L2_FWD_KEY)); + ZXIC_COMM_CHECK_POINT(p_mac_key); + + for (index = 0; index < mac_num; index++) { + p_key_temp = p_mac_key + index; + mac = mac32 + index; + p_key_temp->dmac_addr[0] = (mac16 >> 8) & 0xff; + p_key_temp->dmac_addr[1] = mac16 & 0xff; + p_key_temp->dmac_addr[2] = (mac >> 24) & 0xff; + p_key_temp->dmac_addr[3] = (mac >> 16) & 0xff; + p_key_temp->dmac_addr[4] = (mac >> 8) & 0xff; + p_key_temp->dmac_addr[5] = mac & 0xff; + p_key_temp->sriov_vlan_tpid = (vlan_id >> 16) & 0xffff; + p_key_temp->sriov_vlan_id = (vlan_id & 0xffff) + index; + } + rc = dpp_batch_add_unicast_mac(&pf_info, mac_num, + (ZXIC_VOID *)p_mac_key); + ZXIC_COMM_FREE(p_mac_key); + ZXIC_COMM_CHECK_RC(rc, "dpp_batch_add_unicast_mac"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_batch_mac_del(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT16 mac_num, + ZXIC_UINT32 vlan_id, ZXIC_UINT16 mac16, + ZXIC_UINT32 mac32) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 index = 0; + ZXDH_L2_FWD_KEY *p_key_temp = NULL; + ZXDH_L2_FWD_KEY *p_mac_key = NULL; + ZXIC_UINT32 mac = 0; + ZXIC_UINT32 rc = DPP_OK; + + p_mac_key = (ZXDH_L2_FWD_KEY *)ZXIC_COMM_MALLOC( + mac_num * sizeof(ZXDH_L2_FWD_KEY)); + ZXIC_COMM_CHECK_POINT(p_mac_key); + + for (index = 0; index < mac_num; index++) { + p_key_temp = p_mac_key + index; + mac = mac32 + index; + p_key_temp->dmac_addr[0] = (mac16 >> 8) & 0xff; + p_key_temp->dmac_addr[1] = mac16 & 0xff; + p_key_temp->dmac_addr[2] = (mac >> 24) & 0xff; + p_key_temp->dmac_addr[3] = (mac >> 16) & 0xff; + p_key_temp->dmac_addr[4] = (mac >> 8) & 0xff; + p_key_temp->dmac_addr[5] = mac & 0xff; + p_key_temp->sriov_vlan_tpid = (vlan_id >> 16) & 0xffff; + p_key_temp->sriov_vlan_id = (vlan_id & 0xffff) + index; + } + rc = dpp_batch_del_unicast_mac(&pf_info, mac_num, + (ZXIC_VOID *)p_mac_key); + ZXIC_COMM_FREE(p_mac_key); + ZXIC_COMM_CHECK_RC(rc, "dpp_batch_del_unicast_mac"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_mac_transter(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT16 new_vport) +{ + ZXIC_UINT32 rc = DPP_OK; + + DPP_PF_INFO_T pf_info = { slot, vport }; + DPP_PF_INFO_T new_pf_info = { slot, new_vport }; + + rc = dpp_unicast_mac_transfer(&pf_info, &new_pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_unicast_mac_transfer"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_mac_max_num(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 max_num = 0; + + DPP_PF_INFO_T pf_info = { slot, vport }; + + rc = dpp_unicast_mac_max_get(&pf_info, &max_num); + ZXIC_COMM_CHECK_RC(rc, "dpp_multicast_mac_max_get"); + + ZXIC_COMM_PRINT("uc_max_num: %u\n", max_num); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_batch_mc_mac_add(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT16 mac_num, + ZXIC_UINT8 mac0, ZXIC_UINT8 mac1, + ZXIC_UINT8 mac2, ZXIC_UINT8 mac3, + ZXIC_UINT8 mac4, ZXIC_UINT8 mac5) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 index = 0; + ZXIC_UINT8 *p_mac = NULL; + ZXIC_UINT8 *p_mac_temp = NULL; + ZXIC_UINT32 rc = DPP_OK; + + p_mac = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC(mac_num * 6); + ZXIC_COMM_CHECK_POINT(p_mac); + + for (index = 0; index < mac_num; index++) { + p_mac_temp = p_mac + index * 6; + p_mac_temp[0] = mac0; + p_mac_temp[1] = mac1; + p_mac_temp[2] = mac2; + p_mac_temp[3] = mac3; + p_mac_temp[4] = mac4 + ((index >> 8) & 0xff); + p_mac_temp[5] = mac5 + (index & 0xff); + } + rc = dpp_batch_add_multicast_mac(&pf_info, mac_num, p_mac); + ZXIC_COMM_FREE(p_mac); + ZXIC_COMM_CHECK_RC(rc, "dpp_batch_add_multicast_mac"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_batch_mc_mac_del(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT16 mac_num, + ZXIC_UINT8 mac0, ZXIC_UINT8 mac1, + ZXIC_UINT8 mac2, ZXIC_UINT8 mac3, + ZXIC_UINT8 mac4, ZXIC_UINT8 mac5) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 index = 0; + ZXIC_UINT8 *p_mac = NULL; + ZXIC_UINT8 *p_mac_temp = NULL; + ZXIC_UINT32 rc = DPP_OK; + + p_mac = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC(mac_num * 6); + ZXIC_COMM_CHECK_POINT(p_mac); + + for (index = 0; index < mac_num; index++) { + p_mac_temp = p_mac + index * 6; + p_mac_temp[0] = mac0; + p_mac_temp[1] = mac1; + p_mac_temp[2] = mac2; + p_mac_temp[3] = mac3; + p_mac_temp[4] = mac4 + ((index >> 8) & 0xff); + p_mac_temp[5] = mac5 + (index & 0xff); + } + rc = dpp_batch_del_multicast_mac(&pf_info, mac_num, p_mac); + ZXIC_COMM_FREE(p_mac); + ZXIC_COMM_CHECK_RC(rc, "dpp_batch_del_multicast_mac"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_mc_mac_transter(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT16 new_vport) +{ + ZXIC_UINT32 rc = DPP_OK; + + DPP_PF_INFO_T pf_info = { slot, vport }; + DPP_PF_INFO_T new_pf_info = { slot, new_vport }; + + rc = dpp_multicast_mac_transfer(&pf_info, &new_pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_unicast_mac_transfer"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_mc_mac_max_num(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 max_num = 0; + + DPP_PF_INFO_T pf_info = { slot, vport }; + + rc = dpp_multicast_mac_max_get(&pf_info, &max_num); + ZXIC_COMM_CHECK_RC(rc, "dpp_multicast_mac_max_get"); + + ZXIC_COMM_PRINT("mc_max_num: %u\n", max_num); + + return DPP_OK; +} + +/* Started by AICoder, pid:51ed8w97c8bf8e214f3608def09084150ce944fb */ +ZXIC_UINT32 diag_dpp_vport_mac_flush_online(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + ZXIC_UINT32 rc = dpp_unicast_all_mac_online_delete(&pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_unicast_all_mac_online_delete"); + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_mac_flush_offline(ZXIC_UINT16 slot, + ZXIC_UINT16 vport) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + ZXIC_UINT32 rc = dpp_unicast_all_mac_delete(&pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_unicast_all_mac_delete"); + return DPP_OK; +} +/* Ended by AICoder, pid:51ed8w97c8bf8e214f3608def09084150ce944fb */ + +ZXIC_UINT32 diag_dpp_vport_mac_search(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT16 sriov_vlan_tpid, + ZXIC_UINT16 sriov_vlan_id, + ZXIC_UINT8 mac0, ZXIC_UINT8 mac1, + ZXIC_UINT8 mac2, ZXIC_UINT8 mac3, + ZXIC_UINT8 mac4, ZXIC_UINT8 mac5) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT8 mac[6]; + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT16 current_vport = 0; + + mac[0] = mac0; + mac[1] = mac1; + mac[2] = mac2; + mac[3] = mac3; + mac[4] = mac4; + mac[5] = mac5; + + rc = dpp_unicast_mac_search(&pf_info, mac, sriov_vlan_tpid, + sriov_vlan_id, ¤t_vport); + ZXIC_COMM_CHECK_RC(rc, "dpp_unicast_mac_search"); + + ZXIC_COMM_PRINT("current_mac_vport = 0x%04x\n", current_vport); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_mac_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + ZXIC_UINT32 mac_num = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 rc = DPP_OK; + + DPP_PF_INFO_T pf_info = { slot, vport }; + + MAC_VPORT_INFO *p_mac_arr = (MAC_VPORT_INFO *)ZXIC_COMM_MALLOC( + DTB_DUMP_UNICAST_MAC_DUMP_NUM * sizeof(MAC_VPORT_INFO)); + ZXIC_COMM_CHECK_POINT(p_mac_arr); + + rc = dpp_unicast_mac_dump(&pf_info, p_mac_arr, &mac_num); + ZXIC_COMM_CHECK_RC_MEMORY_FREE(rc, "dpp_unicast_mac_dump", p_mac_arr); + + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + for (i = 0; i < mac_num; i++) { + ZXIC_COMM_PRINT( + "slot: %u vport: 0x%04x sriov_vlan_tpid: 0x%04x sriov_vlan_id: 0x%04x mac: %02x:%02x:%02x:%02x:%02x:%02x\n", + slot, p_mac_arr[i].vport, p_mac_arr[i].sriov_vlan_tpid, + p_mac_arr[i].sriov_vlan_id, p_mac_arr[i].addr[0], + p_mac_arr[i].addr[1], p_mac_arr[i].addr[2], + p_mac_arr[i].addr[3], p_mac_arr[i].addr[4], + p_mac_arr[i].addr[5]); + } + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + + ZXIC_COMM_FREE(p_mac_arr); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_mc_mac_add(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 mac0, ZXIC_UINT8 mac1, + ZXIC_UINT8 mac2, ZXIC_UINT8 mac3, + ZXIC_UINT8 mac4, ZXIC_UINT8 mac5) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT8 mac[6]; + ZXIC_UINT32 rc = DPP_OK; + + mac[0] = mac0; + mac[1] = mac1; + mac[2] = mac2; + mac[3] = mac3; + mac[4] = mac4; + mac[5] = mac5; + + rc = dpp_multi_mac_add_member(&pf_info, mac); + ZXIC_COMM_CHECK_RC(rc, "dpp_multi_mac_add_member"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_mc_mac_del(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 mac0, ZXIC_UINT8 mac1, + ZXIC_UINT8 mac2, ZXIC_UINT8 mac3, + ZXIC_UINT8 mac4, ZXIC_UINT8 mac5) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT8 mac[6]; + ZXIC_UINT32 rc = DPP_OK; + + mac[0] = mac0; + mac[1] = mac1; + mac[2] = mac2; + mac[3] = mac3; + mac[4] = mac4; + mac[5] = mac5; + + rc = dpp_multi_mac_del_member(&pf_info, mac); + ZXIC_COMM_CHECK_RC(rc, "dpp_multi_mac_del_member"); + + return DPP_OK; +} + +/* Started by AICoder, pid:t2b06ldb39ibd58147ae09f7b04d8517f6751d65 */ +ZXIC_UINT32 diag_dpp_vport_mc_mac_flush_online(ZXIC_UINT16 slot, + ZXIC_UINT16 vport) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + ZXIC_UINT32 rc = dpp_multicast_all_mac_online_delete(&pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_multicast_all_mac_online_delete"); + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_mc_mac_flush_offline(ZXIC_UINT16 slot, + ZXIC_UINT16 vport) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + ZXIC_UINT32 rc = dpp_multicast_all_mac_delete(&pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_multicast_all_mac_delete"); + return DPP_OK; +} +/* Ended by AICoder, pid:t2b06ldb39ibd58147ae09f7b04d8517f6751d65 */ + +ZXIC_UINT32 diag_dpp_vport_mc_mac_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + ZXIC_UINT32 mac_num = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 rc = DPP_OK; + + DPP_PF_INFO_T pf_info = { slot, vport }; + + MAC_VPORT_INFO *p_mac_arr = (MAC_VPORT_INFO *)ZXIC_COMM_MALLOC( + DTB_DUMP_MULTICAST_MAC_DUMP_NUM * sizeof(MAC_VPORT_INFO)); + ZXIC_COMM_CHECK_POINT(p_mac_arr); + + rc = dpp_multicast_mac_dump(&pf_info, p_mac_arr, &mac_num); + ZXIC_COMM_CHECK_RC_MEMORY_FREE(rc, "dpp_multicast_mac_dump", p_mac_arr); + + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + for (i = 0; i < mac_num; i++) { + ZXIC_COMM_PRINT( + "slot: %u vport: 0x%04x mac: %02x:%02x:%02x:%02x:%02x:%02x\n", + slot, p_mac_arr[i].vport, p_mac_arr[i].addr[0], + p_mac_arr[i].addr[1], p_mac_arr[i].addr[2], + p_mac_arr[i].addr[3], p_mac_arr[i].addr[4], + p_mac_arr[i].addr[5]); + } + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + + ZXIC_COMM_FREE(p_mac_arr); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_table_init(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vport_create(&pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_create"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_table_delete(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vport_delete(&pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_delete"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_table_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 attr, ZXIC_UINT32 value) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vport_attr_set(&pf_info, attr, value); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_attr_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_table_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + ZXDH_SRIOV_VPORT_T port_table = { 0 }; + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vport_attr_get(&pf_info, &port_table); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_attr_get"); + + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + ZXIC_COMM_PRINT("hit_flag = %u\n", port_table.hit_flag); + ZXIC_COMM_PRINT("%02u 1588_enable = %u\n", SRIOV_VPORT_1588_EN, + port_table.flag_1588_enable); + ZXIC_COMM_PRINT("%02u mtu_offload_enable = %u\n", + SRIOV_VPORT_MTU_OFFLOAD_EN_OFF, + port_table.mtu_offload_enable); + ZXIC_COMM_PRINT("%02u rss_enable = %u\n", SRIOV_VPORT_RSS_EN_OFF, + port_table.rss_enable); + ZXIC_COMM_PRINT("%02u sriov_business_vlan_offload_enable = %u\n", + SRIOV_VPORT_BUSINESS_VLAN_OFFLOAD_EN, + port_table.sriov_business_vlan_offload_enable); + ZXIC_COMM_PRINT("%02u sriov_vlan_offload_enable = %u\n", + SRIOV_VPORT_VLAN_OFFLOAD_EN, + port_table.sriov_vlan_offload_enable); + ZXIC_COMM_PRINT("%02u promisc_enable = %u\n", SRIOV_VPORT_PROMISC_EN, + port_table.promisc_enable); + ZXIC_COMM_PRINT("%02u rdma_offload_enable = %u\n", + SRIOV_VPORT_RDMA_OFFLOAD_EN_OFF, + port_table.rdma_offload_enable); + ZXIC_COMM_PRINT("%02u hw_bond_enable = %u\n", + SRIOV_VPORT_HW_BOND_EN_OFF, port_table.hw_bond_enable); + + ZXIC_COMM_PRINT("%02u business_enable = %u\n", + SRIOV_VPORT_BUSINESS_EN_OFF, + port_table.business_enable); + ZXIC_COMM_PRINT("%02u is_up = %u\n", SRIOV_VPORT_IS_UP, + port_table.is_up); + ZXIC_COMM_PRINT("%02u outer_ip_checksum_offload = %u\n", + SRIOV_VPORT_OUTER_IP_CHECKSUM_OFFLOAD, + port_table.outer_ip_checksum_offload); + ZXIC_COMM_PRINT("%02u ip_checksum_offload = %u\n", + SRIOV_VPORT_IP_CHKSUM, port_table.ip_checksum_offload); + ZXIC_COMM_PRINT("%02u tcp_udp_checksum_offload = %u\n", + SRIOV_VPORT_TCP_UDP_CHKSUM, + port_table.tcp_udp_checksum_offload); + ZXIC_COMM_PRINT("%02u ip_recombine_offload = %u\n", + SRIOV_VPORT_IP_RECOMBINE, + port_table.ip_recombine_offload); + ZXIC_COMM_PRINT("%02u lro_offload = %u\n", + SRIOV_VPORT_IPV6_TCP_ASSEMBLE, port_table.lro_offload); + ZXIC_COMM_PRINT("%02u lro_offload = %u\n", + SRIOV_VPORT_IPV4_TCP_ASSEMBLE, port_table.lro_offload); + ZXIC_COMM_PRINT("%02u accelerator_offload_flag = %u\n", + SRIOV_VPORT_ACCELERATOR_OFFLOAD_FLAG, + port_table.accelerator_offload_flag); + + ZXIC_COMM_PRINT("%02u virtio_enable = %u\n", SRIOV_VPORT_VIRTIO_EN_OFF, + port_table.virtio_enable); + ZXIC_COMM_PRINT("%02u virtio_version = %u\n", + SRIOV_VPORT_VIRTIO_VERSION, port_table.virtio_version); + ZXIC_COMM_PRINT("%02u is_vf = %u\n", SRIOV_VPORT_IS_VF, + port_table.is_vf); + ZXIC_COMM_PRINT("%02u vepa_enable = %u\n", SRIOV_VPORT_VEPA_EN_OFF, + port_table.vepa_enable); + ZXIC_COMM_PRINT("%02u lag_enable = %u\n", SRIOV_VPORT_LAG_EN_OFF, + port_table.lag_enable); + ZXIC_COMM_PRINT("%02u fd_enable = %u\n", SRIOV_VPORT_FD_EN_OFF, + port_table.fd_enable); + ZXIC_COMM_PRINT("%02u inline_sec_offload = %u\n", + SRIOV_VPORT_INLINE_SEC_OFFLOAD, + port_table.inline_sec_offload); + + ZXIC_COMM_PRINT("%02u spoof_check_enable = %u\n", + SRIOV_VPORT_SPOOFCHK_EN_OFF, + port_table.spoof_check_enable); + ZXIC_COMM_PRINT("%02u np_ingress_tm_enable = %u\n", + SRIOV_VPORT_NP_INGRESS_TM_EN_OFF, + port_table.np_ingress_tm_enable); + ZXIC_COMM_PRINT("%02u np_egress_tm_enable = %u\n", + SRIOV_VPORT_NP_EGRESS_TM_EN_OFF, + port_table.np_egress_tm_enable); + ZXIC_COMM_PRINT("%02u np_ingress_meter_mode = %u\n", + SRIOV_VPORT_NP_INGRESS_MODE, + port_table.np_ingress_meter_mode); + ZXIC_COMM_PRINT("%02u np_egress_meter_mode = %u\n", + SRIOV_VPORT_NP_EGRESS_MODE, + port_table.np_egress_meter_mode); + ZXIC_COMM_PRINT("%02u np_egress_meter_enable = %u\n", + SRIOV_VPORT_NP_INGRESS_METER_EN_OFF, + port_table.np_egress_meter_enable); + ZXIC_COMM_PRINT("%02u np_ingress_meter_enable = %u\n", + SRIOV_VPORT_NP_EGRESS_METER_EN_OFF, + port_table.np_ingress_meter_enable); + + ZXIC_COMM_PRINT("%02u hash_search_index = %u\n", + SRIOV_VPORT_HASH_SEARCH_INDEX, + port_table.hash_search_index); + ZXIC_COMM_PRINT("%02u port_base_qid = %u\n", SRIOV_VPORT_PORT_BASE_QID, + port_table.port_base_qid); + ZXIC_COMM_PRINT("%02u mtu = %u\n", SRIOV_VPORT_MTU, port_table.mtu); + ZXIC_COMM_PRINT("%02u pf_vqm_vfid = %u\n", SRIOV_VPORT_PF_VQM_VFID, + port_table.pf_vqm_vfid); + ZXIC_COMM_PRINT("%02u lag_id = %u\n", SRIOV_VPORT_LAG_ID, + port_table.lag_id); + ZXIC_COMM_PRINT("%02u fd_vxlan_offload_en = %u\n", + SRIOV_VPORT_FD_VXLAN_OFFLOAD_EN, + port_table.fd_vxlan_offload_en); + ZXIC_COMM_PRINT("%02u uplink_phy_port_id = %u\n", + SRIOV_VPORT_UPLINK_PHY_PORT_ID, + port_table.uplink_phy_port_id); + ZXIC_COMM_PRINT("%02u hash_alg = %u\n", SRIOV_VPORT_HASH_ALG, + port_table.hash_alg); + ZXIC_COMM_PRINT("%02u rss_hash_factor = %u\n", + SRIOV_VPORT_RSS_HASH_FACTOR, + port_table.rss_hash_factor); + ZXIC_COMM_PRINT("%02u vhca = %u\n", SRIOV_VPORT_VHCA, port_table.vhca); + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_egress_meter_en_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 enable) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vport_egress_meter_en_set(&pf_info, enable); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_egress_meter_en_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_egress_meter_en_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 enable = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vport_egress_meter_en_get(&pf_info, &enable); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_egress_meter_en_get"); + + ZXIC_COMM_PRINT("[%s] enable: %u\n", __FUNCTION__, enable); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_ingress_meter_en_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 enable) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vport_ingress_meter_en_set(&pf_info, enable); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_ingress_meter_en_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_ingress_meter_en_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 enable = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vport_ingress_meter_en_get(&pf_info, &enable); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_ingress_meter_en_get"); + + ZXIC_COMM_PRINT("[%s] enable: %u\n", __FUNCTION__, enable); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_egress_meter_mode_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vport_egress_meter_mode_set(&pf_info, mode); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_egress_meter_mode_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_egress_meter_mode_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 mode = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vport_egress_meter_mode_get(&pf_info, &mode); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_egress_meter_mode_get"); + + ZXIC_COMM_PRINT("[%s] mode: %u\n", __FUNCTION__, mode); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_ingress_meter_mode_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vport_ingress_meter_mode_set(&pf_info, mode); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_ingress_meter_mode_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_ingress_meter_mode_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 mode = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vport_ingress_meter_mode_get(&pf_info, &mode); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_ingress_meter_mode_get"); + + ZXIC_COMM_PRINT("[%s] mode: %u\n", __FUNCTION__, mode); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_rx_flow_hash_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 hash_mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vport_rx_flow_hash_set(&pf_info, hash_mode); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_rx_flow_hash_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_rx_flow_hash_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 hash_mode = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vport_rx_flow_hash_get(&pf_info, &hash_mode); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_rx_flow_hash_get"); + + ZXIC_COMM_PRINT("[%s] hash_mode: %u\n", __FUNCTION__, hash_mode); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_hash_index_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 hash_index = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vport_hash_index_get(&pf_info, &hash_index); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_hash_index_get"); + + ZXIC_COMM_PRINT("[%s] hash_index: %u\n", __FUNCTION__, hash_index); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_hash_funcs_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 funcs) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vport_hash_funcs_set(&pf_info, funcs); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_hash_funcs_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_rss_en_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 enable) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vport_rss_en_set(&pf_info, enable); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_rss_en_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_virtio_en_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 enable) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vport_virtio_en_set(&pf_info, enable); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_virtio_en_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_virtio_version_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 version) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vport_virtio_version_set(&pf_info, version); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_virtio_version_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_promisc_en_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 enable) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vport_promisc_en_set(&pf_info, enable); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_promisc_en_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_business_vlan_offload_en_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 enable) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vport_business_vlan_offload_en_set(&pf_info, enable); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_business_vlan_offload_en_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_vlan_offload_en_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 enable) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vport_vlan_offload_en_set(&pf_info, enable); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_vlan_offload_en_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_uplink_phy_port_table_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT8 uplink_phy_port_id, + ZXIC_UINT32 attr, + ZXIC_UINT32 value) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_uplink_phy_attr_set(&pf_info, uplink_phy_port_id, attr, value); + ZXIC_COMM_CHECK_RC(rc, "dpp_uplink_phy_attr_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_uplink_phy_port_table_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT8 uplink_phy_port_id) +{ + DPP_DEV_T dev = { 0 }; + ZXDH_UPLINK_PHY_PORT_T uplink_phy_port_table = { 0 }; + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_UPLINK_PHY_PORT_ATTR_TABLE; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_dev_get(&pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_apt_dtb_eram_get(&dev, queue, sdt_no, uplink_phy_port_id, + &uplink_phy_port_table); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_dtb_eram_get"); + + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + ZXIC_COMM_PRINT("hit_flag = %u\n", uplink_phy_port_table.hit_flag); + ZXIC_COMM_PRINT("%02u sriov_dh_bond_en = %u\n", + UPLINK_PHY_PORT_SRIOV_HD_BOND_EN, + uplink_phy_port_table.sriov_hdbond_enable); + ZXIC_COMM_PRINT("%02u primary_pf_vqm_vfid = %u\n", + UPLINK_PHY_PORT_PRIMARY_PF_VQM_VFID, + uplink_phy_port_table.primary_pf_vqm_vfid); + ZXIC_COMM_PRINT("%02u trust_mode = %u\n", UPLINK_PHY_PORT_TRUST_MODE, + uplink_phy_port_table.trust_mode); + ZXIC_COMM_PRINT("%02u ptp_tc_enable = %u\n", + UPLINK_PHY_PORT_PTP_TC_ENABLE, + uplink_phy_port_table.ptp_tc_enable); + ZXIC_COMM_PRINT("%02u tm_shape_enable = %u\n", + UPLINK_PHY_PORT_TM_SHAPE_ENABLE, + uplink_phy_port_table.tm_shape_enable); + ZXIC_COMM_PRINT("%02u magic_packet_enable = %u\n", + UPLINK_PHY_PORT_MAGIC_PACKET_ENABLE, + uplink_phy_port_table.magic_packet_enable); + ZXIC_COMM_PRINT("%02u ptp_port_vfid = %u\n", + UPLINK_PHY_PORT_PTP_PORT_VFID, + uplink_phy_port_table.ptp_port_vfid); + ZXIC_COMM_PRINT("%02u tm_base_queue = %u\n", + UPLINK_PHY_PORT_TM_BASE_QUEUE, + uplink_phy_port_table.tm_base_queue); + ZXIC_COMM_PRINT("%02u mtu_offload_enable = %u\n", + UPLINK_PHY_PORT_MTU_OFFLOAD_ENABLE, + uplink_phy_port_table.mtu_offload_enable); + ZXIC_COMM_PRINT("%02u mtu = %u\n", UPLINK_PHY_PORT_MTU, + uplink_phy_port_table.mtu); + ZXIC_COMM_PRINT("%02u hw_bond_enable = %u\n", + UPLINK_PHY_PORT_HW_BOND_ENABLE, + uplink_phy_port_table.hw_bond_enable); + ZXIC_COMM_PRINT("%02u bond_link_up = %u\n", + UPLINK_PHY_PORT_BOND_LINK_UP, + uplink_phy_port_table.bond_link_up); + ZXIC_COMM_PRINT("%02u is_up = %u\n", UPLINK_PHY_PORT_IS_UP, + uplink_phy_port_table.is_up); + ZXIC_COMM_PRINT("%02u lacp_pf_vqm_vfid = %u\n", + UPLINK_PHY_PORT_LACP_PF_VQM_VFID, + uplink_phy_port_table.lacp_pf_vqm_vfid); + ZXIC_COMM_PRINT("%02u lacp_pf_memport_qid = %u\n", + UPLINK_PHY_PORT_LACP_PF_MEMPORT_QID, + uplink_phy_port_table.lacp_pf_memport_qid); + ZXIC_COMM_PRINT("%02u pf_vqm_vfid = %u\n", UPLINK_PHY_PORT_PF_VQM_VFID, + uplink_phy_port_table.pf_vqm_vfid); + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_uplink_phy_bond_vport(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 uplink_phy_port_id) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_uplink_phy_bond_vport(&pf_info, uplink_phy_port_id); + ZXIC_COMM_CHECK_RC(rc, "dpp_uplink_phy_bond_vport"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_uplink_phy_hardware_bond_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT8 uplink_phy_port_id, + ZXIC_UINT8 enable) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_uplink_phy_hardware_bond_set(&pf_info, uplink_phy_port_id, + enable); + ZXIC_COMM_CHECK_RC(rc, "dpp_uplink_phy_hardware_bond_set"); + + return DPP_OK; +} + +ZXIC_UINT32 +diag_dpp_uplink_phy_lacp_pf_vqm_vfid_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 uplink_phy_port_id, + ZXIC_UINT16 vqm_vfid) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_uplink_phy_lacp_pf_vqm_vfid_set(&pf_info, uplink_phy_port_id, + vqm_vfid); + ZXIC_COMM_CHECK_RC(rc, "dpp_uplink_phy_lacp_pf_vqm_vfid_set"); + + return DPP_OK; +} + +ZXIC_UINT32 +diag_dpp_uplink_phy_lacp_pf_memport_qid_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 uplink_phy_port_id, + ZXIC_UINT16 qid) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_uplink_phy_lacp_pf_memport_qid_set(&pf_info, + uplink_phy_port_id, qid); + ZXIC_COMM_CHECK_RC(rc, "dpp_uplink_phy_lacp_pf_memport_qid_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_ptp_port_vfid_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 ptp_port_vfid) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_ptp_port_vfid_set(&pf_info, ptp_port_vfid); + ZXIC_COMM_CHECK_RC(rc, "dpp_ptp_port_vfid_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_ptp_tc_enable_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 ptp_tc_enable) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_ptp_tc_enable_set(&pf_info, ptp_tc_enable); + ZXIC_COMM_CHECK_RC(rc, "dpp_ptp_tc_enable_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_tm_flowid_pport_table_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT8 uplink_phy_port_id, + ZXIC_UINT32 flow_id) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_tm_flowid_pport_table_set(&pf_info, uplink_phy_port_id, + flow_id); + ZXIC_COMM_CHECK_RC(rc, "dpp_tm_flowid_pport_table_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_tm_flowid_pport_table_del(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT8 uplink_phy_port_id) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_tm_flowid_pport_table_del(&pf_info, uplink_phy_port_id); + ZXIC_COMM_CHECK_RC(rc, "dpp_tm_flowid_pport_table_del"); + + return DPP_OK; +} + +ZXIC_UINT32 +diag_dpp_tm_pport_trust_mode_table_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 uplink_phy_port_id, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_tm_pport_trust_mode_table_set(&pf_info, uplink_phy_port_id, + mode); + ZXIC_COMM_CHECK_RC(rc, "dpp_tm_pport_trust_mode_table_set"); + + return DPP_OK; +} + +ZXIC_UINT32 +diag_dpp_tm_pport_trust_mode_table_del(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 uplink_phy_port_id) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_tm_pport_trust_mode_table_del(&pf_info, uplink_phy_port_id); + ZXIC_COMM_CHECK_RC(rc, "dpp_tm_pport_trust_mode_table_del"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_tm_pport_mcode_switch_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT8 uplink_phy_port_id, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_tm_pport_mcode_switch_set(&pf_info, uplink_phy_port_id, mode); + ZXIC_COMM_CHECK_RC(rc, "dpp_tm_pport_mcode_switch_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_tm_pport_mcode_switch_del(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT8 uplink_phy_port_id) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_tm_pport_mcode_switch_del(&pf_info, uplink_phy_port_id); + ZXIC_COMM_CHECK_RC(rc, "dpp_tm_pport_mcode_switch_del"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_bc_table_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 enable) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + if (enable == 1) { + rc = dpp_vport_bond_pf(&pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_bond_pf"); + } else { + rc = dpp_vport_unbond_pf(&pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_unbond_pf"); + } + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_bc_table_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + DPP_DEV_T dev = { 0 }; + ZXDH_BC_T bc_table = { 0 }; + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_BC_TABLE; + ZXIC_UINT32 group_id = 0; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_dev_get(&pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + for (group_id = 0; group_id < BC_GROUP_NUM; group_id++) { + index = (((OWNER_PF_VQM_VFID(pf_info.vport) - PF_VQM_VFID_OFFSET) + << 2) | + group_id); + + rc = dpp_apt_dtb_eram_get(&dev, queue, sdt_no, index, + &bc_table); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_dtb_eram_get"); + + if (bc_table.hit_flag == 1) { + for (i = 0; i < BC_MEMBER_NUM_IN_GROUP; i++) { + if ((bc_table.bc_bitmap & + ((ZXIC_UINT64)(1) + << (BC_MEMBER_NUM_IN_GROUP - 1 - i))) != + 0) { + ZXIC_COMM_PRINT( + "vf %u enable\n", + i + (group_id * + BC_MEMBER_NUM_IN_GROUP)); + } + } + } + } + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_promisc_table_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 sdt_no) +{ + DPP_DEV_T dev = { 0 }; + ZXDH_PROMISC_T promisc_table[4] = { 0 }; + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 group_id = 0; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_dev_get(&pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + for (group_id = 0; group_id < BC_GROUP_NUM; group_id++) { + index = (((OWNER_PF_VQM_VFID(pf_info.vport) - PF_VQM_VFID_OFFSET) + << 2) | + group_id); + + rc = dpp_apt_dtb_eram_get(&dev, queue, sdt_no, index, + &promisc_table[group_id]); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_dtb_eram_get"); + + if (promisc_table[group_id].hit_flag == 1) { + for (i = 0; i < PROMISC_MEMBER_NUM_IN_GROUP; i++) { + if ((promisc_table[group_id].bitmap & + ((ZXIC_UINT64)(1) + << (PROMISC_MEMBER_NUM_IN_GROUP - 1 - + i))) != 0) { + ZXIC_COMM_PRINT( + "vf %u enable\n", + i + (group_id * + PROMISC_MEMBER_NUM_IN_GROUP)); + } + } + } + } + if ((promisc_table[0].pf_enable == 1) && + (promisc_table[1].pf_enable == 1) && + (promisc_table[2].pf_enable == 1) && + (promisc_table[3].pf_enable == 1)) { + ZXIC_COMM_PRINT("pf enable\n"); + } + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_uc_promisc_table_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 enable) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vport_uc_promisc_set(&pf_info, enable); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_uc_promisc_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_uc_promisc_table_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport) +{ + diag_dpp_vport_promisc_table_prt(slot, vport, + ZXDH_SDT_UC_PROMISC_TABLE); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_mc_promisc_table_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 enable) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vport_mc_promisc_set(&pf_info, enable); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_mc_promisc_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_mc_promisc_table_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport) +{ + diag_dpp_vport_promisc_table_prt(slot, vport, + ZXDH_SDT_MC_PROMISC_TABLE); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_rdma_trans_item_add(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 mac0, ZXIC_UINT8 mac1, + ZXIC_UINT8 mac2, ZXIC_UINT8 mac3, + ZXIC_UINT8 mac4, ZXIC_UINT8 mac5, + ZXIC_UINT16 vhcaId) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT8 mac[6]; + ZXIC_UINT32 rc = DPP_OK; + + mac[0] = mac0; + mac[1] = mac1; + mac[2] = mac2; + mac[3] = mac3; + mac[4] = mac4; + mac[5] = mac5; + + rc = dpp_add_rdma_trans_item(&pf_info, mac, vhcaId); + ZXIC_COMM_CHECK_RC(rc, "dpp_add_rdma_trans_item"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_rdma_trans_item_del(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 mac0, ZXIC_UINT8 mac1, + ZXIC_UINT8 mac2, ZXIC_UINT8 mac3, + ZXIC_UINT8 mac4, ZXIC_UINT8 mac5) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT8 mac[6]; + ZXIC_UINT32 rc = DPP_OK; + + mac[0] = mac0; + mac[1] = mac1; + mac[2] = mac2; + mac[3] = mac3; + mac[4] = mac4; + mac[5] = mac5; + + rc = dpp_del_rdma_trans_item(&pf_info, mac); + ZXIC_COMM_CHECK_RC(rc, "dpp_del_rdma_trans_item"); + + return DPP_OK; +} + +DPP_STATUS diag_dpp_pcie_channel_prt(ZXIC_VOID) +{ + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 slot = 0; + ZXIC_UINT32 channel = 0; + DPP_DEV_CFG_T *p_dev_info = NULL; + DPP_DEV_MGR_T *p_dev_mgr = NULL; + DPP_PCIE_CHANNEL_T *p_pcie = NULL; + DPP_SE_CFG *p_se_cfg = NULL; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_INDEX_UPPER(dev_id, DPP_DEV_CHANNEL_MAX - 1); + + p_dev_mgr = dpp_dev_mgr_get(); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev_mgr); + p_dev_info = p_dev_mgr->p_dev_array[dev_id]; + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_dev_info); + + for (slot = 0; slot < DPP_PCIE_SLOT_MAX; slot++) { + for (channel = 0; channel < DPP_PCIE_CHANNEL_MAX; channel++) { + if (p_dev_info->pcie_channel[slot][channel].is_used) { + p_pcie = + &p_dev_info->pcie_channel[slot][channel]; + ZXIC_COMM_PRINT( + "\n------------PCIE SLOT[%d] CHANNEL[%d]--------------------\n", + slot, channel); + ZXIC_COMM_PRINT( + "|slot:%d vport:0x%x pcie_id:0x%x\n", + p_pcie->slot, p_pcie->vport, + p_pcie->pcie_id); + ZXIC_COMM_PRINT( + "|base_addr:0x%llx offset_addr:0x%llx\n", + p_pcie->base_addr, p_pcie->offset_addr); + ZXIC_COMM_PRINT("|hash_index:0x%x\n", + p_pcie->hash_index); + ZXIC_COMM_PRINT( + "|dma_size:0x%x dma_phy_addr:0x%llx dma_vir_addr:0x%llx\n", + p_pcie->dump_dma_size, + p_pcie->dump_dma_phy_addr, + p_pcie->dump_dma_vir_addr); + } + } + } + + for (slot = 0; slot < DPP_PCIE_SLOT_MAX; slot++) { + ZXIC_COMM_MEMSET_S(&dev, sizeof(DPP_DEV_T), 0x0, + sizeof(DPP_DEV_T)); + dev.pcie_channel.slot = slot; + p_se_cfg = dpp_apt_get_se_cfg(&dev); + if (p_se_cfg != NULL) { + p_pcie = &(p_se_cfg->dev.pcie_channel); + ZXIC_COMM_PRINT( + "\n------------SE CFG SLOT[%d] USED[%d]--------------------\n", + slot, p_pcie->is_used); + ZXIC_COMM_PRINT("|slot:%d vport:0x%x pcie_id:0x%x\n", + p_pcie->slot, p_pcie->vport, + p_pcie->pcie_id); + ZXIC_COMM_PRINT( + "|base_addr:0x%llx offset_addr:0x%llx\n", + p_pcie->base_addr, p_pcie->offset_addr); + } + } + + return DPP_OK; +} + +DPP_STATUS diag_dpp_se_hash_stat_prt(ZXIC_UINT32 slot_id, ZXIC_UINT32 fun_id) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_SE_CFG *p_se_cfg = NULL; + DPP_DEV_T dev = { 0 }; + DPP_HASH_CFG *p_hash_cfg = NULL; + FUNC_ID_INFO *p_func_info = NULL; + + ZXIC_COMM_CHECK_INDEX(fun_id, HASH_FUNC_ID_MIN, HASH_FUNC_ID_NUM - 1); + ZXIC_COMM_CHECK_INDEX(slot_id, 0, DPP_PCIE_SLOT_MAX - 1); + + /* 获取 g_se_cfg */ + dev.device_id = 0; + dev.pcie_channel.slot = slot_id; + rc = dpp_se_cfg_get(&dev, &p_se_cfg); + ZXIC_COMM_CHECK_RC(rc, "dpp_se_cfg_get"); + ZXIC_COMM_CHECK_POINT(p_se_cfg); + + p_func_info = DPP_GET_FUN_INFO(p_se_cfg, fun_id); + DPP_SE_CHECK_FUN(p_func_info, fun_id, FUN_HASH); + p_hash_cfg = (DPP_HASH_CFG *)p_func_info->fun_ptr; + + ZXIC_COMM_PRINT("------------slot[%d]--------------------\n", slot_id); + ZXIC_COMM_PRINT("|insert ok num is:%d |\n", + p_hash_cfg->hash_stat.insert_ok); + ZXIC_COMM_PRINT("|insert ddr num is:%d |\n", + p_hash_cfg->hash_stat.insert_ddr); + ZXIC_COMM_PRINT("|insert zcell num is:%d |\n", + p_hash_cfg->hash_stat.insert_zcell); + ZXIC_COMM_PRINT("|insert zreg num is:%d |\n", + p_hash_cfg->hash_stat.insert_zreg); + ZXIC_COMM_PRINT("|insert same num is:%d |\n", + p_hash_cfg->hash_stat.insert_same); + ZXIC_COMM_PRINT("|insert fail num is:%d |\n", + p_hash_cfg->hash_stat.insert_fail); + ZXIC_COMM_PRINT("|delete ok num is:%d |\n", + p_hash_cfg->hash_stat.delete_ok); + ZXIC_COMM_PRINT("|delete fail num is:%d |\n", + p_hash_cfg->hash_stat.delete_fail); + ZXIC_COMM_PRINT("|search ok num is:%d |\n", + p_hash_cfg->hash_stat.search_ok); + ZXIC_COMM_PRINT("|search fail num is:%d |\n", + p_hash_cfg->hash_stat.search_fail); + ZXIC_COMM_PRINT("--------------------------------\n"); + + return DPP_OK; +} + +DPP_STATUS diag_dpp_se_hash_stat_clr(ZXIC_UINT32 slot_id, ZXIC_UINT32 fun_id) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_SE_CFG *p_se_cfg = NULL; + DPP_DEV_T dev = { 0 }; + DPP_HASH_CFG *p_hash_cfg = NULL; + FUNC_ID_INFO *p_func_info = NULL; + + ZXIC_COMM_CHECK_INDEX(fun_id, HASH_FUNC_ID_MIN, HASH_FUNC_ID_NUM - 1); + ZXIC_COMM_CHECK_INDEX(slot_id, 0, DPP_PCIE_SLOT_MAX - 1); + + /* 获取 g_se_cfg */ + dev.device_id = 0; + dev.pcie_channel.slot = slot_id; + rc = dpp_se_cfg_get(&dev, &p_se_cfg); + ZXIC_COMM_CHECK_RC(rc, "dpp_se_cfg_get"); + ZXIC_COMM_CHECK_POINT(p_se_cfg); + + p_func_info = DPP_GET_FUN_INFO(p_se_cfg, fun_id); + DPP_SE_CHECK_FUN(p_func_info, fun_id, FUN_HASH); + p_hash_cfg = (DPP_HASH_CFG *)p_func_info->fun_ptr; + ZXIC_COMM_CHECK_POINT(p_hash_cfg); + + p_hash_cfg->hash_stat.insert_ok = 0; + p_hash_cfg->hash_stat.insert_ddr = 0; + p_hash_cfg->hash_stat.insert_zcell = 0; + p_hash_cfg->hash_stat.insert_zreg = 0; + p_hash_cfg->hash_stat.insert_same = 0; + p_hash_cfg->hash_stat.insert_fail = 0; + p_hash_cfg->hash_stat.delete_ok = 0; + p_hash_cfg->hash_stat.delete_fail = 0; + p_hash_cfg->hash_stat.search_ok = 0; + p_hash_cfg->hash_stat.search_fail = 0; + + return DPP_OK; +} + +DPP_STATUS diag_dpp_hash_item_prt(ZXIC_UINT32 slot, ZXIC_UINT32 sdt_no) +{ + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 rc = 0; + ZXIC_UINT8 key_valid = 0; + ZXIC_UINT32 table_id = 0; + ZXIC_UINT32 key_type = 0; + DPP_DEV_T dev = { 0 }; + + D_NODE *p_node = NULL; + ZXIC_RB_TN *p_rb_tn = NULL; + D_HEAD *p_head_hash_rb = NULL; + DPP_HASH_CFG *p_hash_cfg = NULL; + DPP_HASH_RBKEY_INFO *p_rbkey = NULL; + SE_APT_CALLBACK_T *pAptCallback = NULL; + + DPP_HASH_ENTRY hash_entry = { 0 }; + HASH_ENTRY_CFG hash_entry_cfg = { 0 }; + ZXDH_L2_ENTRY_T l2_entry = { 0 }; + ZXDH_MC_T mc_entry = { 0 }; + ZXDH_RDMA_TRANS_T rdma_trans = { 0 }; + ZXIC_UINT8 key[HASH_KEY_MAX] = { 0 }; + ZXIC_UINT8 rst[HASH_RST_MAX] = { 0 }; + + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, dev_id, 0, DPP_DEV_CHANNEL_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, sdt_no, 0, DPP_DEV_SDT_ID_MAX - 1); + ZXIC_COMM_CHECK_DEV_INDEX(dev_id, slot, 0, DPP_PCIE_SLOT_MAX - 1); + + ZXIC_COMM_MEMSET_S(&hash_entry, sizeof(DPP_HASH_ENTRY), 0x0, + sizeof(DPP_HASH_ENTRY)); + ZXIC_COMM_MEMSET_S(&hash_entry_cfg, sizeof(HASH_ENTRY_CFG), 0x0, + sizeof(HASH_ENTRY_CFG)); + ZXIC_COMM_MEMSET_S(&l2_entry, sizeof(ZXDH_L2_ENTRY_T), 0x0, + sizeof(ZXDH_L2_ENTRY_T)); + ZXIC_COMM_MEMSET_S(&mc_entry, sizeof(ZXDH_MC_T), 0x0, + sizeof(ZXDH_MC_T)); + ZXIC_COMM_MEMSET_S(&rdma_trans, sizeof(ZXDH_RDMA_TRANS_T), 0x0, + sizeof(ZXDH_RDMA_TRANS_T)); + ZXIC_COMM_MEMSET_S(&dev, sizeof(DPP_DEV_T), 0x0, sizeof(DPP_DEV_T)); + ZXIC_COMM_MEMSET_S(key, sizeof(key), 0x0, sizeof(key)); + ZXIC_COMM_MEMSET_S(rst, sizeof(rst), 0x0, sizeof(rst)); + + //从sdt_no中获取hash配置 + dev.device_id = dev_id; + dev.pcie_channel.slot = slot; + rc = dpp_hash_get_hash_info_from_sdt(&dev, sdt_no, &hash_entry_cfg); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, + "dpp_hash_get_hash_info_from_sdt"); + + p_hash_cfg = hash_entry_cfg.p_hash_cfg; + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_hash_cfg); + + pAptCallback = dpp_apt_get_func(&dev, sdt_no); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, pAptCallback); + + hash_entry.p_key = key; + hash_entry.p_rst = rst; + + p_head_hash_rb = &p_hash_cfg->hash_rb.tn_list; + p_node = p_head_hash_rb->p_next; + while (p_node) { + p_rb_tn = (ZXIC_RB_TN *)p_node->data; + p_node = p_node->next; + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_rb_tn); + p_rbkey = (DPP_HASH_RBKEY_INFO *)p_rb_tn->p_key; + key_valid = DPP_GET_HASH_KEY_VALID(p_rbkey->key); + table_id = DPP_GET_HASH_TBL_ID(p_rbkey->key); + key_type = DPP_GET_HASH_KEY_TYPE(p_rbkey->key); + if ((!key_valid) || (table_id != hash_entry_cfg.table_id) || + (key_type != hash_entry_cfg.key_type)) { + continue; + } + + ZXIC_COMM_MEMCPY_S(hash_entry.p_key, HASH_KEY_MAX, p_rbkey->key, + HASH_KEY_MAX); + ZXIC_COMM_MEMCPY_S(hash_entry.p_rst, HASH_RST_MAX, p_rbkey->rst, + HASH_RST_MAX); + if ((sdt_no >= ZXDH_SDT_L2_ENTRY_TABLE_PHYPORT0) && + (sdt_no <= ZXDH_SDT_L2_ENTRY_TABLE_PHYPORT3)) { + rc = pAptCallback->se_func_info.hashFunc.hash_get_func( + &l2_entry, &hash_entry); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, + "hash_get_func"); + + ZXIC_COMM_PRINT( + "slot:%d sdt:%d vqm_vfid:0x%x uni-mac:%02x:%02x:%02x:%02x:%02x:%02x\n", + slot, sdt_no, l2_entry.entry.vqm_vfid, + l2_entry.key.dmac_addr[0], + l2_entry.key.dmac_addr[1], + l2_entry.key.dmac_addr[2], + l2_entry.key.dmac_addr[3], + l2_entry.key.dmac_addr[4], + l2_entry.key.dmac_addr[5]); + } + + if ((sdt_no >= ZXDH_SDT_MC_TABLE_PHYPORT0) && + (sdt_no <= ZXDH_SDT_MC_TABLE_PHYPORT3)) { + rc = pAptCallback->se_func_info.hashFunc.hash_get_func( + &mc_entry, &hash_entry); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, + "hash_get_func"); + + ZXIC_COMM_PRINT( + "slot:%d sdt:%d bitmap:0x%llx multi-mac:%02x:%02x:%02x:%02x:%02x:%02x\n", + slot, sdt_no, mc_entry.entry.mc_bitmap, + mc_entry.key.mc_mac[0], mc_entry.key.mc_mac[1], + mc_entry.key.mc_mac[2], mc_entry.key.mc_mac[3], + mc_entry.key.mc_mac[4], mc_entry.key.mc_mac[5]); + } + + if (sdt_no == ZXDH_SDT_RDMA_ENTRY_TABLE) { + rc = pAptCallback->se_func_info.hashFunc.hash_get_func( + &rdma_trans, &hash_entry); + ZXIC_COMM_CHECK_DEV_RC_NO_ASSERT(dev_id, rc, + "hash_get_func"); + + ZXIC_COMM_PRINT( + "slot:%d sdt:%d vhca_id:0x%x rdma-mac:%02x:%02x:%02x:%02x:%02x:%02x\n", + slot, sdt_no, rdma_trans.entry.rdma_vhca_id, + rdma_trans.key.mac_addr[0], + rdma_trans.key.mac_addr[1], + rdma_trans.key.mac_addr[2], + rdma_trans.key.mac_addr[3], + rdma_trans.key.mac_addr[4], + rdma_trans.key.mac_addr[5]); + } + } + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vqm_vfid_vlan_init(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vqm_vfid_vlan_init(&pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_vqm_vfid_vlan_init"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vqm_vfid_vlan_delete(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vqm_vfid_vlan_delete(&pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_vqm_vfid_vlan_delete"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vqm_vfid_vlan_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 attr, ZXIC_UINT32 value) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vqm_vfid_vlan_set(&pf_info, attr, value); + ZXIC_COMM_CHECK_RC(rc, "dpp_vqm_vfid_vlan_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vqm_vfid_vlan_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + ZXDH_VQM_VFID_VLAN_T vqm_vfid_vlan_entry = { 0 }; + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vqm_vfid_vlan_get(&pf_info, &vqm_vfid_vlan_entry); + ZXIC_COMM_CHECK_RC(rc, "dpp_vqm_vfid_vlan_get"); + + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + ZXIC_COMM_PRINT("hit_flag = %u\n", vqm_vfid_vlan_entry.hit_flag); + ZXIC_COMM_PRINT("%02u sriov_business_vlan_filter = %u\n", + VLAN_SRIOV_BUSINESS_VLAN_FILTER, + vqm_vfid_vlan_entry.sriov_business_vlan_filter); + ZXIC_COMM_PRINT( + "%02u sriov_business_qinq_vlan_strip_offload = %u\n", + VLAN_SRIOV_BUSINESS_QINQ_VLAN_STRIP_OFFLOAD, + vqm_vfid_vlan_entry.sriov_business_qinq_vlan_strip_offload); + ZXIC_COMM_PRINT("%02u sriov_business_vlan_strip_offload = %u\n", + VLAN_SRIOV_BUSINESS_VLAN_STRIP_OFFLIAD, + vqm_vfid_vlan_entry.sriov_business_vlan_strip_offload); + ZXIC_COMM_PRINT("%02u sriov_business_vlan_tpid = %u\n", + VLAN_SRIOV_BUSINESS_VLAN_TPID, + vqm_vfid_vlan_entry.sriov_business_vlan_tpid); + ZXIC_COMM_PRINT("%02u sriov_vlan_tpid = %u\n", VLAN_SRIOV_VLAN_TPID, + vqm_vfid_vlan_entry.sriov_vlan_tpid); + ZXIC_COMM_PRINT("%02u sriov_vlan_tci = %u\n", VLAN_SRIOV_VLAN_TCI, + vqm_vfid_vlan_entry.sriov_vlan_tci); + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_register_info_prt(ZXIC_VOID) +{ + DPP_DEV_T dev = { 0 }; + DPP_PF_INFO_T pf_info = { 0 }; + ZXIC_UINT32 print_level = 0; + ZXIC_UINT32 slot_id = 0; + ZXIC_UINT32 ep_id = 0; + ZXIC_UINT32 pf_id = 0; + ZXIC_UINT32 rc = DPP_OK; + print_level = zxic_comm_get_print_level(); + zxic_comm_set_print_level(0); + for (slot_id = 0; slot_id < DPP_PCIE_SLOT_MAX; slot_id++) { + pf_info.slot = slot_id; + for (ep_id = 0; ep_id < 8; ep_id++) { + for (pf_id = 0; pf_id < 8; pf_id++) { + pf_info.vport = ((ep_id << 12) | (pf_id << 8)); + rc = dpp_dev_get(&pf_info, &dev); + if (rc == DPP_OK) { + ZXIC_COMM_PRINT( + "slot: %u vport: 0x%04x device: %s registered.\n", + pf_info.slot, pf_info.vport, + pci_name(dev.pcie_channel + .device)); + } + } + } + } + zxic_comm_set_print_level(print_level); + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_stat_mc_packet_rx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT64 pkt_cnt = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_stat_mc_packet_rx_cnt_get(&pf_info, index, mode, &pkt_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_mc_packet_rx_cnt_get"); + + ZXIC_COMM_PRINT("[%s] pkt_cnt: %llu\n", __FUNCTION__, pkt_cnt); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_stat_bc_packet_rx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT64 pkt_cnt = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_stat_bc_packet_rx_cnt_get(&pf_info, index, mode, &pkt_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_bc_packet_rx_cnt_get"); + + ZXIC_COMM_PRINT("[%s] pkt_cnt: %llu\n", __FUNCTION__, pkt_cnt); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_stat_1588_packet_rx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT64 pkt_cnt = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_stat_1588_packet_rx_cnt_get(&pf_info, index, mode, &pkt_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_1588_packet_rx_cnt_get"); + + ZXIC_COMM_PRINT("[%s] pkt_cnt: %llu\n", __FUNCTION__, pkt_cnt); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_stat_1588_packet_tx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT64 pkt_cnt = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_stat_1588_packet_tx_cnt_get(&pf_info, index, mode, &pkt_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_1588_packet_tx_cnt_get"); + + ZXIC_COMM_PRINT("[%s] pkt_cnt: %llu\n", __FUNCTION__, pkt_cnt); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_stat_1588_packet_drop_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT64 pkt_cnt = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_stat_1588_packet_drop_cnt_get(&pf_info, index, mode, &pkt_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_1588_packet_drop_cnt_get"); + + ZXIC_COMM_PRINT("[%s] pkt_cnt: %llu\n", __FUNCTION__, pkt_cnt); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_stat_1588_enc_packet_rx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT64 pkt_cnt = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_stat_1588_enc_packet_rx_cnt_get(&pf_info, index, mode, + &pkt_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_1588_enc_packet_rx_cnt_get"); + + ZXIC_COMM_PRINT("[%s] pkt_cnt: %llu\n", __FUNCTION__, pkt_cnt); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_stat_1588_enc_packet_tx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT64 pkt_cnt = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_stat_1588_enc_packet_tx_cnt_get(&pf_info, index, mode, + &pkt_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_1588_enc_packet_tx_cnt_get"); + + ZXIC_COMM_PRINT("[%s] pkt_cnt: %llu\n", __FUNCTION__, pkt_cnt); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_stat_spoof_packet_drop_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT64 pkt_cnt = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_stat_spoof_packet_drop_cnt_get(&pf_info, index, mode, + &pkt_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_spoof_packet_drop_cnt_get"); + + ZXIC_COMM_PRINT("[%s] pkt_cnt: %llu\n", __FUNCTION__, pkt_cnt); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_stat_mcode_packet_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT64 pkt_cnt = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_stat_mcode_packet_cnt_get(&pf_info, index, mode, &pkt_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_mcode_packet_cnt_get"); + + ZXIC_COMM_PRINT("[%s] pkt_cnt: %llu\n", __FUNCTION__, pkt_cnt); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_stat_port_RDMA_packet_msg_tx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT64 pkt_cnt = 0; + ZXIC_UINT64 byte_cnt = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_stat_port_RDMA_packet_msg_tx_cnt_get(&pf_info, index, mode, + &byte_cnt, &pkt_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_port_RDMA_packet_msg_tx_cnt_get"); + + ZXIC_COMM_PRINT("[%s] pkt_cnt: %llu, byte_cnt: %llu\n", __FUNCTION__, + pkt_cnt, byte_cnt); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_stat_port_RDMA_packet_msg_rx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT64 pkt_cnt = 0; + ZXIC_UINT64 byte_cnt = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_stat_port_RDMA_packet_msg_rx_cnt_get(&pf_info, index, mode, + &byte_cnt, &pkt_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_port_RDMA_packet_msg_rx_cnt_get"); + + ZXIC_COMM_PRINT("[%s] pkt_cnt: %llu, byte_cnt: %llu\n", __FUNCTION__, + pkt_cnt, byte_cnt); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_stat_plcr_packet_drop_tx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT64 pkt_cnt = 0; + ZXIC_UINT64 byte_cnt = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_stat_plcr_packet_drop_tx_cnt_get(&pf_info, index, mode, + &byte_cnt, &pkt_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_plcr_packet_drop_tx_cnt_get"); + + ZXIC_COMM_PRINT("[%s] pkt_cnt: %llu, byte_cnt: %llu\n", __FUNCTION__, + pkt_cnt, byte_cnt); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_stat_plcr_packet_drop_rx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT64 pkt_cnt = 0; + ZXIC_UINT64 byte_cnt = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_stat_plcr_packet_drop_rx_cnt_get(&pf_info, index, mode, + &byte_cnt, &pkt_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_plcr_packet_drop_rx_cnt_get"); + + ZXIC_COMM_PRINT("[%s] pkt_cnt: %llu, byte_cnt: %llu\n", __FUNCTION__, + pkt_cnt, byte_cnt); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_stat_MTU_packet_msg_tx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT64 pkt_cnt = 0; + ZXIC_UINT64 byte_cnt = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_stat_MTU_packet_msg_tx_cnt_get(&pf_info, index, mode, + &byte_cnt, &pkt_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_MTU_packet_msg_tx_cnt_get"); + + ZXIC_COMM_PRINT("[%s] pkt_cnt: %llu, byte_cnt: %llu\n", __FUNCTION__, + pkt_cnt, byte_cnt); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_stat_MTU_packet_msg_rx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT64 pkt_cnt = 0; + ZXIC_UINT64 byte_cnt = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_stat_MTU_packet_msg_rx_cnt_get(&pf_info, index, mode, + &byte_cnt, &pkt_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_MTU_packet_msg_rx_cnt_get"); + + ZXIC_COMM_PRINT("[%s] pkt_cnt: %llu, byte_cnt: %llu\n", __FUNCTION__, + pkt_cnt, byte_cnt); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_stat_port_uc_packet_rx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT64 pkt_cnt = 0; + ZXIC_UINT64 byte_cnt = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_stat_port_uc_packet_rx_cnt_get(&pf_info, index, mode, + &byte_cnt, &pkt_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_port_uc_packet_rx_cnt_get"); + + ZXIC_COMM_PRINT("[%s] pkt_cnt: %llu, byte_cnt: %llu\n", __FUNCTION__, + pkt_cnt, byte_cnt); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_stat_port_uc_packet_tx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT64 pkt_cnt = 0; + ZXIC_UINT64 byte_cnt = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_stat_port_uc_packet_tx_cnt_get(&pf_info, index, mode, + &byte_cnt, &pkt_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_port_uc_packet_tx_cnt_get"); + + ZXIC_COMM_PRINT("[%s] pkt_cnt: %llu, byte_cnt: %llu\n", __FUNCTION__, + pkt_cnt, byte_cnt); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_stat_port_mc_packet_rx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT64 pkt_cnt = 0; + ZXIC_UINT64 byte_cnt = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_stat_port_mc_packet_rx_cnt_get(&pf_info, index, mode, + &byte_cnt, &pkt_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_port_mc_packet_rx_cnt_get"); + + ZXIC_COMM_PRINT("[%s] pkt_cnt: %llu, byte_cnt: %llu\n", __FUNCTION__, + pkt_cnt, byte_cnt); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_stat_port_mc_packet_tx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT64 pkt_cnt = 0; + ZXIC_UINT64 byte_cnt = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_stat_port_mc_packet_tx_cnt_get(&pf_info, index, mode, + &byte_cnt, &pkt_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_port_mc_packet_tx_cnt_get"); + + ZXIC_COMM_PRINT("[%s] pkt_cnt: %llu, byte_cnt: %llu\n", __FUNCTION__, + pkt_cnt, byte_cnt); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_stat_port_bc_packet_rx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT64 pkt_cnt = 0; + ZXIC_UINT64 byte_cnt = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_stat_port_bc_packet_rx_cnt_get(&pf_info, index, mode, + &byte_cnt, &pkt_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_port_bc_packet_rx_cnt_get"); + + ZXIC_COMM_PRINT("[%s] pkt_cnt: %llu, byte_cnt: %llu\n", __FUNCTION__, + pkt_cnt, byte_cnt); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_stat_port_bc_packet_tx_cnt_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT64 pkt_cnt = 0; + ZXIC_UINT64 byte_cnt = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_stat_port_bc_packet_tx_cnt_get(&pf_info, index, mode, + &byte_cnt, &pkt_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_port_bc_packet_tx_cnt_get"); + + ZXIC_COMM_PRINT("[%s] pkt_cnt: %llu, byte_cnt: %llu\n", __FUNCTION__, + pkt_cnt, byte_cnt); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_stat_asn_phyport_rx_pkt_cnt_get(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT64 pkt_cnt = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_stat_asn_phyport_rx_pkt_cnt_get(&pf_info, index, mode, + &pkt_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_asn_phyport_rx_pkt_cnt_get"); + + ZXIC_COMM_PRINT("[%s] pkt_cnt: %llu\n", __FUNCTION__, pkt_cnt); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_stat_psn_phyport_tx_pkt_cnt_get(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT64 pkt_cnt = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_stat_psn_phyport_tx_pkt_cnt_get(&pf_info, index, mode, + &pkt_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_psn_phyport_tx_pkt_cnt_get"); + + ZXIC_COMM_PRINT("[%s] pkt_cnt: %llu\n", __FUNCTION__, pkt_cnt); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_stat_psn_phyport_rx_pkt_cnt_get(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT64 pkt_cnt = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_stat_psn_phyport_rx_pkt_cnt_get(&pf_info, index, mode, + &pkt_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_psn_phyport_rx_pkt_cnt_get"); + + ZXIC_COMM_PRINT("[%s] pkt_cnt: %llu\n", __FUNCTION__, pkt_cnt); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_stat_psn_ack_phyport_tx_pkt_cnt_get(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT64 pkt_cnt = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_stat_psn_ack_phyport_tx_pkt_cnt_get(&pf_info, index, mode, + &pkt_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_psn_ack_phyport_tx_pkt_cnt_get"); + + ZXIC_COMM_PRINT("[%s] pkt_cnt: %llu\n", __FUNCTION__, pkt_cnt); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_stat_psn_ack_phyport_rx_pkt_cnt_get(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 index, + ZXIC_UINT32 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT64 pkt_cnt = 0; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_stat_psn_ack_phyport_rx_pkt_cnt_get(&pf_info, index, mode, + &pkt_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_psn_ack_phyport_rx_pkt_cnt_get"); + + ZXIC_COMM_PRINT("[%s] pkt_cnt: %llu\n", __FUNCTION__, pkt_cnt); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_rxfh_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 qid0, ZXIC_UINT32 qid1, + ZXIC_UINT32 qid2, ZXIC_UINT32 qid3, + ZXIC_UINT32 qnum) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 *queue_list = NULL; + ZXIC_UINT32 group_id = 0; + ZXIC_UINT32 rc = DPP_OK; + + queue_list = (ZXIC_UINT32 *)ZXIC_COMM_MALLOC(256 * sizeof(ZXIC_UINT32)); + ZXIC_COMM_CHECK_POINT(queue_list); + + for (group_id = 0; group_id < RSS_TO_VQID_GROUP_NUM; group_id++) { + queue_list[(group_id * 8) + 0] = qid0; + queue_list[(group_id * 8) + 1] = qid1; + queue_list[(group_id * 8) + 2] = qid2; + queue_list[(group_id * 8) + 3] = qid3; + queue_list[(group_id * 8) + 4] = qid0; + queue_list[(group_id * 8) + 5] = qid1; + queue_list[(group_id * 8) + 6] = qid2; + queue_list[(group_id * 8) + 7] = qid3; + } + + rc = dpp_rxfh_set(&pf_info, queue_list, qnum); + ZXIC_COMM_CHECK_RC_MEMORY_FREE(rc, "dpp_rxfh_set", queue_list); + + ZXIC_COMM_FREE(queue_list); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_rxfh_del(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_rxfh_del(&pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_rxfh_del"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_rxfh_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 *queue_list = NULL; + ZXIC_UINT32 group_id = 0; + ZXIC_UINT32 rc = DPP_OK; + + queue_list = (ZXIC_UINT32 *)ZXIC_COMM_MALLOC(256 * sizeof(ZXIC_UINT32)); + ZXIC_COMM_CHECK_POINT(queue_list); + + rc = dpp_rxfh_get(&pf_info, queue_list, 256); + ZXIC_COMM_CHECK_RC_MEMORY_FREE(rc, "dpp_rxfh_get", queue_list); + + for (group_id = 0; group_id < RSS_TO_VQID_GROUP_NUM; group_id++) { + ZXIC_COMM_PRINT( + "[%s] qid%u: 0x%04x qid%u: 0x%04x qid%u: 0x%04x qid%u: 0x%04x qid%u: 0x%04x qid%u: 0x%04x qid%u: 0x%04x qid%u: 0x%04x\n", + __FUNCTION__, (group_id * 8) + 0, + queue_list[(group_id * 8) + 0], (group_id * 8) + 1, + queue_list[(group_id * 8) + 1], (group_id * 8) + 2, + queue_list[(group_id * 8) + 2], (group_id * 8) + 3, + queue_list[(group_id * 8) + 3], (group_id * 8) + 4, + queue_list[(group_id * 8) + 4], (group_id * 8) + 5, + queue_list[(group_id * 8) + 5], (group_id * 8) + 6, + queue_list[(group_id * 8) + 6], (group_id * 8) + 7, + queue_list[(group_id * 8) + 7]); + } + + ZXIC_COMM_FREE(queue_list); + + return DPP_OK; +} + +ZXIC_UINT32 +diag_dpp_thash_key_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 rsk_031_000, ZXIC_UINT32 rsk_063_032, + ZXIC_UINT32 rsk_095_064, ZXIC_UINT32 rsk_127_096, + ZXIC_UINT32 rsk_159_128, ZXIC_UINT32 rsk_191_160, + ZXIC_UINT32 rsk_223_192, ZXIC_UINT32 rsk_255_224, + ZXIC_UINT32 rsk_287_256, ZXIC_UINT32 rsk_319_288) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + ZXIC_UINT32 rc = DPP_OK; + DPP_PPU_PPU_COP_THASH_RSK_T ppu_cop_thash_rsk; + + ppu_cop_thash_rsk.rsk_031_000 = rsk_031_000; + ppu_cop_thash_rsk.rsk_063_032 = rsk_063_032; + ppu_cop_thash_rsk.rsk_095_064 = rsk_095_064; + ppu_cop_thash_rsk.rsk_127_096 = rsk_127_096; + ppu_cop_thash_rsk.rsk_159_128 = rsk_159_128; + ppu_cop_thash_rsk.rsk_191_160 = rsk_191_160; + ppu_cop_thash_rsk.rsk_223_192 = rsk_223_192; + ppu_cop_thash_rsk.rsk_255_224 = rsk_255_224; + ppu_cop_thash_rsk.rsk_287_256 = rsk_287_256; + ppu_cop_thash_rsk.rsk_319_288 = rsk_319_288; + + rc = dpp_thash_key_set(&pf_info, (ZXIC_UINT8 *)&ppu_cop_thash_rsk, + sizeof(DPP_PPU_PPU_COP_THASH_RSK_T)); + ZXIC_COMM_CHECK_RC(rc, "dpp_thash_key_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_thash_key_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + DPP_PPU_PPU_COP_THASH_RSK_T ppu_cop_thash_rsk = { 0 }; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_thash_key_get(&pf_info, (ZXIC_UINT8 *)&ppu_cop_thash_rsk, + sizeof(DPP_PPU_PPU_COP_THASH_RSK_T)); + ZXIC_COMM_CHECK_RC(rc, "dpp_thash_key_get"); + + ZXIC_COMM_DBGCNT32_PRINT("rsk_319_288", ppu_cop_thash_rsk.rsk_319_288); + ZXIC_COMM_DBGCNT32_PRINT("rsk_287_256", ppu_cop_thash_rsk.rsk_287_256); + ZXIC_COMM_DBGCNT32_PRINT("rsk_255_224", ppu_cop_thash_rsk.rsk_255_224); + ZXIC_COMM_DBGCNT32_PRINT("rsk_223_192", ppu_cop_thash_rsk.rsk_223_192); + ZXIC_COMM_DBGCNT32_PRINT("rsk_191_160", ppu_cop_thash_rsk.rsk_191_160); + ZXIC_COMM_DBGCNT32_PRINT("rsk_159_128", ppu_cop_thash_rsk.rsk_159_128); + ZXIC_COMM_DBGCNT32_PRINT("rsk_127_096", ppu_cop_thash_rsk.rsk_127_096); + ZXIC_COMM_DBGCNT32_PRINT("rsk_095_064", ppu_cop_thash_rsk.rsk_095_064); + ZXIC_COMM_DBGCNT32_PRINT("rsk_063_032", ppu_cop_thash_rsk.rsk_063_032); + ZXIC_COMM_DBGCNT32_PRINT("rsk_031_000", ppu_cop_thash_rsk.rsk_031_000); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_lag_group_create(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 lag_id) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_lag_group_create(&pf_info, lag_id); + ZXIC_COMM_CHECK_RC(rc, "dpp_lag_group_create"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_lag_group_delete(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 lag_id) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_lag_group_delete(&pf_info, lag_id); + ZXIC_COMM_CHECK_RC(rc, "dpp_lag_group_delete"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_lag_mode_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 lag_id, ZXIC_UINT8 mode) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_lag_mode_set(&pf_info, lag_id, mode); + ZXIC_COMM_CHECK_RC(rc, "dpp_lag_mode_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_lag_group_hash_factor_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT8 lag_id, + ZXIC_UINT8 factor) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_lag_group_hash_factor_set(&pf_info, lag_id, factor); + ZXIC_COMM_CHECK_RC(rc, "dpp_lag_group_hash_factor_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_lag_group_member_add(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 lag_id, + ZXIC_UINT8 uplink_phy_port_id) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_lag_group_member_add(&pf_info, lag_id, uplink_phy_port_id); + ZXIC_COMM_CHECK_RC(rc, "dpp_lag_group_member_add"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_lag_group_member_del(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 lag_id, + ZXIC_UINT8 uplink_phy_port_id) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_lag_group_member_del(&pf_info, lag_id, uplink_phy_port_id); + ZXIC_COMM_CHECK_RC(rc, "dpp_lag_group_member_del"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_lag_table_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 lag_id) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_LAG_TABLE; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_LAG_T lag_entry = { 0 }; + + ZXIC_COMM_MEMSET(&lag_entry, 0, sizeof(ZXDH_LAG_T)); + + rc = dpp_dev_get(&pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_apt_dtb_eram_get(&dev, queue, sdt_no, lag_id, &lag_entry); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_dtb_eram_get"); + + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + ZXIC_COMM_PRINT("hit_flag = %u\n", lag_entry.hit_flag); + ZXIC_COMM_PRINT("member_num = %u\n", lag_entry.member_num); + ZXIC_COMM_PRINT("bond_mode = %u\n", lag_entry.bond_mode); + ZXIC_COMM_PRINT("hash_factor = %u\n", lag_entry.hash_factor); + ZXIC_COMM_PRINT("member_bitmap = %u\n", lag_entry.member_bitmap); + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_tm_pport_dscp_map_table_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 port, + ZXIC_UINT32 dscp_id, + ZXIC_UINT32 up_id) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_tm_pport_dscp_map_table_set(&pf_info, port, dscp_id, up_id); + ZXIC_COMM_CHECK_RC(rc, "dpp_tm_pport_dscp_map_table_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_tm_pport_dscp_map_table_del(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 port, + ZXIC_UINT32 dscp_id) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_tm_pport_dscp_map_table_del(&pf_info, port, dscp_id); + ZXIC_COMM_CHECK_RC(rc, "dpp_tm_pport_dscp_map_table_del"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_tm_pport_dscp_map_table_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 port, + ZXIC_UINT32 dscp_id) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_DSCP_TO_UP_TABLE; + ZXIC_UINT32 index = 0x3ff & ((port << 6) | (dscp_id & 0x3f)); + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_DSCP_TO_UP_T dscp_to_up = { 0 }; + + rc = dpp_dev_get(&pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_apt_dtb_eram_get(&dev, queue, sdt_no, index, &dscp_to_up); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_dtb_eram_get"); + + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + ZXIC_COMM_PRINT("hit_flag = %u\n", dscp_to_up.hit_flag); + ZXIC_COMM_PRINT("up = %u\n", dscp_to_up.up); + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_tm_pport_up_map_table_set(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 port, + ZXIC_UINT32 up_id, + ZXIC_UINT32 tc_id) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_tm_pport_up_map_table_set(&pf_info, port, up_id, tc_id); + ZXIC_COMM_CHECK_RC(rc, "dpp_tm_pport_up_map_table_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_tm_pport_up_map_table_del(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 port, + ZXIC_UINT32 up_id) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_tm_pport_up_map_table_del(&pf_info, port, up_id); + ZXIC_COMM_CHECK_RC(rc, "dpp_tm_pport_up_map_table_del"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_tm_pport_up_map_table_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 port, + ZXIC_UINT32 up_id) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_UP_TO_TC_TABLE; + ZXIC_UINT32 index = 0x7F & ((port << 3) | (up_id & 0x7)); + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_UP_TO_TC_T up_to_tc = { 0 }; + + rc = dpp_dev_get(&pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_apt_dtb_eram_get(&dev, queue, sdt_no, index, &up_to_tc); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_dtb_eram_get"); + + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + ZXIC_COMM_PRINT("hit_flag = %u\n", up_to_tc.hit_flag); + ZXIC_COMM_PRINT("tc = %u\n", up_to_tc.tc); + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_vhca_id_add(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 vhca_id) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vport_vhca_id_add(&pf_info, vhca_id); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_vhca_id_add"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_vhca_id_del(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 vhca_id) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vport_vhca_id_del(&pf_info, vhca_id); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_vhca_id_del"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_vhca_id_table_prt(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 vhca_id) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_VHCA_TABLE; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_VHCA_T vhca_entry = { 0 }; + + ZXIC_COMM_MEMSET(&vhca_entry, 0, sizeof(ZXDH_VHCA_T)); + + rc = dpp_dev_get(&pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_apt_dtb_eram_get(&dev, queue, sdt_no, vhca_id, &vhca_entry); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_dtb_eram_get"); + + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + ZXIC_COMM_PRINT("hit_flag = %u\n", vhca_entry.valid); + ZXIC_COMM_PRINT("vqm_vfid = %u\n", vhca_entry.vqm_vfid); + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vport_reset(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vport_reset(&pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_reset"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vlan_filter_init(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_vlan_filter_init(&pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_vlan_filter_init"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_add_vlan_filter(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT16 vlan_id) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_add_vlan_filter(&pf_info, vlan_id); + ZXIC_COMM_CHECK_RC(rc, "dpp_add_vlan_filter"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_del_vlan_filter(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT16 vlan_id) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_del_vlan_filter(&pf_info, vlan_id); + ZXIC_COMM_CHECK_RC(rc, "dpp_del_vlan_filter"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_vlan_filter_table_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 vlan_group_id) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_VLAN_FILTER_TABLE; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_VLAN_FILTER_T vlan_filter_entry = { 0 }; + + rc = dpp_dev_get(&pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + index = ((vlan_group_id << 11) | (VQM_VFID(pf_info.vport))); + rc = dpp_apt_dtb_eram_get(&dev, queue, sdt_no, index, + &vlan_filter_entry); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_dtb_eram_get"); + + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + ZXIC_COMM_PRINT("hit_flag = %u\n", vlan_filter_entry.hit_flag); + for (i = 0; i < sizeof(vlan_filter_entry.vport_bitmap); i++) { + ZXIC_COMM_PRINT("vport_bitmap[%u]: 0x%02x\n", i, + vlan_filter_entry.vport_bitmap[i]); + } + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + + return DPP_OK; +} + +ZXIC_VOID diag_dpp_fd_cfg_pre1(ZXIC_UINT32 smac, ZXIC_UINT32 dmac, + ZXIC_UINT32 sip, ZXIC_UINT32 dip, + ZXIC_UINT32 sport, ZXIC_UINT32 dport) +{ + ZXIC_COMM_MEMSET_S(&g_diag_fd_cfg, sizeof(ZXDH_FD_CFG_T), 0x0, + sizeof(ZXDH_FD_CFG_T)); + + g_diag_fd_cfg.key.dmac[0] = 0x00; + g_diag_fd_cfg.key.dmac[1] = 0x00; + g_diag_fd_cfg.key.dmac[2] = (dmac >> 24) & 0xff; + g_diag_fd_cfg.key.dmac[3] = (dmac >> 16) & 0xff; + g_diag_fd_cfg.key.dmac[4] = (dmac >> 8) & 0xff; + g_diag_fd_cfg.key.dmac[5] = dmac & 0xff; + + g_diag_fd_cfg.key.smac[0] = 0x00; + g_diag_fd_cfg.key.smac[1] = 0x00; + g_diag_fd_cfg.key.smac[2] = (smac >> 24) & 0xff; + g_diag_fd_cfg.key.smac[3] = (smac >> 16) & 0xff; + g_diag_fd_cfg.key.smac[4] = (smac >> 8) & 0xff; + g_diag_fd_cfg.key.smac[5] = smac & 0xff; + + g_diag_fd_cfg.key.dip[0] = (dip >> 24) & 0xff; + g_diag_fd_cfg.key.dip[1] = (dip >> 16) & 0xff; + g_diag_fd_cfg.key.dip[2] = (dip >> 8) & 0xff; + g_diag_fd_cfg.key.dip[3] = dip & 0xff; + + g_diag_fd_cfg.key.sip[0] = (sip >> 24) & 0xff; + g_diag_fd_cfg.key.sip[1] = (sip >> 16) & 0xff; + g_diag_fd_cfg.key.sip[2] = (sip >> 8) & 0xff; + g_diag_fd_cfg.key.sip[3] = sip & 0xff; + + g_diag_fd_cfg.key.dport = dport; + g_diag_fd_cfg.key.sport = sport; + + g_diag_fd_cfg.key.rsv1 = 0xff; + g_diag_fd_cfg.key.rsv2 = 0xffffffff; + g_diag_fd_cfg.key.rsv3 = 0xffff; + + g_diag_fd_cfg.mask.rsv1 = 0x0; + g_diag_fd_cfg.mask.rsv2 = 0x0; + g_diag_fd_cfg.mask.rsv3 = 0x0; +} + +ZXIC_VOID diag_dpp_fd_cfg_pre2(ZXIC_UINT32 ethtype, ZXIC_UINT32 cvlan_pri, + ZXIC_UINT32 vlan, ZXIC_UINT32 vxlan_vni, + ZXIC_UINT32 vqm_vfid) +{ + g_diag_fd_cfg.key.ethtype = ethtype; + g_diag_fd_cfg.key.cvlan_pri = cvlan_pri; + g_diag_fd_cfg.key.cvlanid = vlan; + g_diag_fd_cfg.key.vxlan_vni = vxlan_vni; + g_diag_fd_cfg.key.vqm_vfid = vqm_vfid; +} + +ZXIC_VOID diag_dpp_fd_cfg_pre3(ZXIC_UINT32 action_index, + ZXIC_UINT32 action_index2, ZXIC_UINT32 count_id, + ZXIC_UINT32 hash_alg, + ZXIC_UINT32 rss_hash_factor) +{ + g_diag_fd_cfg.as_rlt.hit_flag = 0x00; + g_diag_fd_cfg.as_rlt.action_index = action_index; + g_diag_fd_cfg.as_rlt.action_index2 = action_index2; + g_diag_fd_cfg.as_rlt.count_id = count_id; + g_diag_fd_cfg.as_rlt.hash_alg = hash_alg; + g_diag_fd_cfg.as_rlt.rss_hash_factor = rss_hash_factor; +} + +ZXIC_VOID diag_dpp_fd_cfg_pre4(ZXIC_UINT32 uplink_fd_id, ZXIC_UINT32 v_qid) +{ + g_diag_fd_cfg.as_rlt.uplink_fd_id = uplink_fd_id; + g_diag_fd_cfg.as_rlt.v_qid = v_qid; +} + +ZXIC_UINT32 diag_dpp_fd_cfg_add(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_tbl_fd_cfg_add(&pf_info, ZXDH_SDT_FD_CFG_TABLE, + g_diag_fd_index, &g_diag_fd_cfg); + ZXIC_COMM_CHECK_RC(rc, "dpp_fd_cfg_entry_add"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_fd_cfg_del(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 index) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_tbl_fd_cfg_del(&pf_info, ZXDH_SDT_FD_CFG_TABLE, index); + ZXIC_COMM_CHECK_RC(rc, "dpp_tbl_fd_cfg_del"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_fd_cfg_get(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 index) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_tbl_fd_cfg_get(&pf_info, ZXDH_SDT_FD_CFG_TABLE, index, + &g_diag_fd_cfg); + ZXIC_COMM_CHECK_RC(rc, "dpp_tbl_fd_cfg_get"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_fd_cfg_search(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 index) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_tbl_fd_cfg_search(&pf_info, ZXDH_SDT_FD_CFG_TABLE, index, + &g_diag_fd_cfg); + ZXIC_COMM_CHECK_RC(rc, "dpp_tbl_fd_cfg_search"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_fd_acl_index_req(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 index = 0; + + rc = dpp_fd_acl_index_request(&pf_info, &index); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_acl_index_request"); + + g_diag_fd_index = index; + ZXIC_COMM_PRINT("reuqest index=%u\n", index); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_fd_acl_index_rel(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 index) +{ + DPP_STATUS rc = DPP_OK; + DPP_PF_INFO_T pf_info = { slot, vport }; + + rc = dpp_fd_acl_index_release(&pf_info, index); + ZXIC_COMM_CHECK_RC(rc, "dpp_fd_acl_index_release"); + + ZXIC_COMM_PRINT("slot[%u] vport[0x%x] release index= %u\n", slot, vport, + index); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_fd_acl_all_delete(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + DPP_STATUS rc = DPP_OK; + DPP_PF_INFO_T pf_info = { slot, vport }; + + rc = dpp_fd_acl_all_delete(&pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_fd_acl_all_delete"); + + return DPP_OK; +} + +DPP_STATUS diag_dpp_dtb_stat_ppu_cnt_clr(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 rd_mode, + ZXIC_UINT32 counter_id, + ZXIC_UINT32 num) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 queue = 0; + DPP_DEV_T dev = { 0 }; + DPP_PF_INFO_T pf_info = { slot, vport }; + + rc = dpp_dev_get(&pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_dtb_stat_ppu_cnt_clr(&dev, queue, rd_mode, counter_id, num); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_stat_ppu_cnt_clr"); + + return DPP_OK; +} + +DPP_STATUS diag_dpp_fd_acl_stat_clear(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + DPP_STATUS rc = DPP_OK; + DPP_PF_INFO_T pf_info = { slot, vport }; + + rc = dpp_fd_acl_stat_clear(&pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_fd_acl_stat_clear"); + + return DPP_OK; +} + +ZXIC_VOID diag_dpp_acl_glb_data_prt(ZXIC_VOID) +{ + ZXDH_FD_CFG_T *p_fd_cfg = NULL; + + p_fd_cfg = &g_diag_fd_cfg; + ZXIC_COMM_PRINT("key--smac: 0x%02x:%02x:%02x:%02x:%02x:%02x.\n", + p_fd_cfg->key.smac[0], p_fd_cfg->key.smac[1], + p_fd_cfg->key.smac[2], p_fd_cfg->key.smac[3], + p_fd_cfg->key.smac[4], p_fd_cfg->key.smac[5]); + ZXIC_COMM_PRINT("key--dmac: 0x%02x:%02x:%02x:%02x:%02x:%02x.\n", + p_fd_cfg->key.dmac[0], p_fd_cfg->key.dmac[1], + p_fd_cfg->key.dmac[2], p_fd_cfg->key.dmac[3], + p_fd_cfg->key.dmac[4], p_fd_cfg->key.dmac[5]); + ZXIC_COMM_PRINT("key--sip: 0x%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:", + p_fd_cfg->key.sip[0], p_fd_cfg->key.sip[1], + p_fd_cfg->key.sip[2], p_fd_cfg->key.sip[3], + p_fd_cfg->key.sip[4], p_fd_cfg->key.sip[5], + p_fd_cfg->key.sip[6], p_fd_cfg->key.sip[7]); + ZXIC_COMM_PRINT("%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x.\n", + p_fd_cfg->key.sip[8], p_fd_cfg->key.sip[9], + p_fd_cfg->key.sip[10], p_fd_cfg->key.sip[11], + p_fd_cfg->key.sip[12], p_fd_cfg->key.sip[13], + p_fd_cfg->key.sip[14], p_fd_cfg->key.sip[15]); + ZXIC_COMM_PRINT("key--dip: 0x%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:", + p_fd_cfg->key.dip[0], p_fd_cfg->key.dip[1], + p_fd_cfg->key.dip[2], p_fd_cfg->key.dip[3], + p_fd_cfg->key.dip[4], p_fd_cfg->key.dip[5], + p_fd_cfg->key.dip[6], p_fd_cfg->key.dip[7]); + ZXIC_COMM_PRINT("%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x.\n", + p_fd_cfg->key.dip[8], p_fd_cfg->key.dip[9], + p_fd_cfg->key.dip[10], p_fd_cfg->key.dip[11], + p_fd_cfg->key.dip[12], p_fd_cfg->key.dip[13], + p_fd_cfg->key.dip[14], p_fd_cfg->key.dip[15]); + ZXIC_COMM_PRINT("key--ethtype: 0x%04x\n", p_fd_cfg->key.ethtype); + ZXIC_COMM_PRINT("key--cvlan_pri: 0x%02x\n", p_fd_cfg->key.cvlan_pri); + ZXIC_COMM_PRINT("key--cvlanid: 0x%04x\n", p_fd_cfg->key.cvlanid); + ZXIC_COMM_PRINT("key--tos: 0x%02x\n", p_fd_cfg->key.tos); + ZXIC_COMM_PRINT("key--proto: 0x%02x\n", p_fd_cfg->key.proto); + ZXIC_COMM_PRINT("key--fragment: 0x%02x\n", p_fd_cfg->key.fragment); + ZXIC_COMM_PRINT("key--sport: 0x%04x\n", p_fd_cfg->key.sport); + ZXIC_COMM_PRINT("key--dport: 0x%04x\n", p_fd_cfg->key.dport); + ZXIC_COMM_PRINT("key--vxlan_vni: 0x%08x\n", p_fd_cfg->key.vxlan_vni); + ZXIC_COMM_PRINT("key--vqm_vfid: 0x%04x\n", p_fd_cfg->key.vqm_vfid); + + ZXIC_COMM_PRINT("mask--smac: 0x%02x:%02x:%02x:%02x:%02x:%02x.\n", + p_fd_cfg->mask.smac[0], p_fd_cfg->mask.smac[1], + p_fd_cfg->mask.smac[2], p_fd_cfg->mask.smac[3], + p_fd_cfg->mask.smac[4], p_fd_cfg->mask.smac[5]); + ZXIC_COMM_PRINT("mask--dmac: 0x%02x:%02x:%02x:%02x:%02x:%02x.\n", + p_fd_cfg->mask.dmac[0], p_fd_cfg->mask.dmac[1], + p_fd_cfg->mask.dmac[2], p_fd_cfg->mask.dmac[3], + p_fd_cfg->mask.dmac[4], p_fd_cfg->mask.dmac[5]); + ZXIC_COMM_PRINT("mask--sip: 0x%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:", + p_fd_cfg->mask.sip[0], p_fd_cfg->mask.sip[1], + p_fd_cfg->mask.sip[2], p_fd_cfg->mask.sip[3], + p_fd_cfg->mask.sip[4], p_fd_cfg->mask.sip[5], + p_fd_cfg->mask.sip[6], p_fd_cfg->mask.sip[7]); + ZXIC_COMM_PRINT("%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x.\n", + p_fd_cfg->mask.sip[8], p_fd_cfg->mask.sip[9], + p_fd_cfg->mask.sip[10], p_fd_cfg->mask.sip[11], + p_fd_cfg->mask.sip[12], p_fd_cfg->mask.sip[13], + p_fd_cfg->mask.sip[14], p_fd_cfg->mask.sip[15]); + ZXIC_COMM_PRINT("mask--dip: 0x%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:", + p_fd_cfg->mask.dip[0], p_fd_cfg->mask.dip[1], + p_fd_cfg->mask.dip[2], p_fd_cfg->mask.dip[3], + p_fd_cfg->mask.dip[4], p_fd_cfg->mask.dip[5], + p_fd_cfg->mask.dip[6], p_fd_cfg->mask.dip[7]); + ZXIC_COMM_PRINT("%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x.\n", + p_fd_cfg->mask.dip[8], p_fd_cfg->mask.dip[9], + p_fd_cfg->mask.dip[10], p_fd_cfg->mask.dip[11], + p_fd_cfg->mask.dip[12], p_fd_cfg->mask.dip[13], + p_fd_cfg->mask.dip[14], p_fd_cfg->mask.dip[15]); + ZXIC_COMM_PRINT("mask--ethtype: 0x%04x\n", p_fd_cfg->mask.ethtype); + ZXIC_COMM_PRINT("mask--cvlan_pri: 0x%02x\n", p_fd_cfg->mask.cvlan_pri); + ZXIC_COMM_PRINT("mask--cvlanid: 0x%04x\n", p_fd_cfg->mask.cvlanid); + ZXIC_COMM_PRINT("mask--tos: 0x%02x\n", p_fd_cfg->mask.tos); + ZXIC_COMM_PRINT("mask--proto: 0x%02x\n", p_fd_cfg->mask.proto); + ZXIC_COMM_PRINT("mask--fragment: 0x%02x\n", p_fd_cfg->mask.fragment); + ZXIC_COMM_PRINT("mask--sport: 0x%04x\n", p_fd_cfg->mask.sport); + ZXIC_COMM_PRINT("mask--dport: 0x%04x\n", p_fd_cfg->mask.dport); + ZXIC_COMM_PRINT("mask--vxlan_vni: 0x%08x\n", p_fd_cfg->mask.vxlan_vni); + ZXIC_COMM_PRINT("mask--vqm_vfid: 0x%04x\n", p_fd_cfg->mask.vqm_vfid); + + ZXIC_COMM_PRINT("rst--hit_flag: 0x%02x\n", + p_fd_cfg->as_rlt.hit_flag); + ZXIC_COMM_PRINT("rst--action_index: 0x%02x\n", + p_fd_cfg->as_rlt.action_index); + ZXIC_COMM_PRINT("rst--action_index2: 0x%02x\n", + p_fd_cfg->as_rlt.action_index2); + ZXIC_COMM_PRINT("rst--v_qid: 0x%04x\n", + p_fd_cfg->as_rlt.v_qid); + ZXIC_COMM_PRINT("rst--uplink_fd_id: 0x%08x\n", + p_fd_cfg->as_rlt.uplink_fd_id); + ZXIC_COMM_PRINT("rst--count_id: 0x%08x\n", + p_fd_cfg->as_rlt.count_id); + ZXIC_COMM_PRINT("rst--hash_alg: 0x%02x\n", + p_fd_cfg->as_rlt.hash_alg); + ZXIC_COMM_PRINT("rst--rss_hash_factor:0x%02x\n", + p_fd_cfg->as_rlt.rss_hash_factor); + + return; +} + +DPP_STATUS diag_dpp_se_eram_res_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 index = 0; + ZXIC_CHAR res_name[32] = "STD_NIC_RES"; + DPP_APT_ERAM_RES_INIT_T tEramResInit = { 0 }; + DPP_APT_ERAM_TABLE_T *pEramResTemp = NULL; + DPP_APT_SE_RES_T *p_se_res = NULL; + DPP_DEV_T dev = { 0 }; + DPP_PF_INFO_T pf_info = { slot, vport }; + + rc = dpp_dev_get(&pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + ZXIC_COMM_MEMSET_S(&tEramResInit, sizeof(DPP_APT_ERAM_RES_INIT_T), 0x0, + sizeof(DPP_APT_ERAM_RES_INIT_T)); + p_se_res = (DPP_APT_SE_RES_T *)dpp_dev_get_se_res_ptr(&dev); + ZXIC_COMM_CHECK_DEV_POINT(DEV_ID(&dev), p_se_res); + tEramResInit.eram_res = p_se_res->eram_tbl; + tEramResInit.tbl_num = p_se_res->eram_num; + + ZXIC_COMM_PRINT( + "#######################[%s] ERAM_NUM[%u]#######################\n", + res_name, p_se_res->eram_num); + for (index = 0; index < tEramResInit.tbl_num; index++) { + pEramResTemp = tEramResInit.eram_res + index; + ZXIC_COMM_CHECK_POINT(pEramResTemp); + ZXIC_COMM_PRINT( + "#######################sdt_no=%d#######################\n", + pEramResTemp->sdtNo); + ZXIC_COMM_PRINT( + " eram_base_addr=0x%x eram_table_depth=0x%x\n", + pEramResTemp->eRamSdt.eram_base_addr, + pEramResTemp->eRamSdt.eram_table_depth); + ZXIC_COMM_PRINT( + " eram_mode=%u(0:1bit 1:32bit 2:64bit 3:128bit 4:2bit 5:4bit 6:8bit 7:16bit)\n", + pEramResTemp->eRamSdt.eram_mode); + ZXIC_COMM_PRINT( + " opr_mode=%u(0:128bit 1:64bit 2:1bit 3:32bit)\n", + pEramResTemp->opr_mode); + } + + return DPP_OK; +} + +DPP_STATUS diag_dpp_se_hash_res_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 zblk_idx[32] = { 0 }; + ZXIC_UINT32 ddr_en = 0; + ZXIC_UINT32 array[4] = { 0 }; + ZXIC_CHAR res_name[32] = "STD_NIC_RES"; + DPP_DEV_T dev = { 0 }; + DPP_PF_INFO_T pf_info = { slot, vport }; + + DPP_APT_HASH_RES_INIT_T tHashResInit = { 0 }; + DPP_APT_HASH_FUNC_RES_T *pHashFuncTemp = NULL; + DPP_APT_HASH_BULK_RES_T *pHashBulkTemp = NULL; + DPP_APT_HASH_TABLE_T *pHashResTemp = NULL; + DPP_APT_SE_RES_T *p_se_res = NULL; + + rc = dpp_dev_get(&pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + dev_id = DEV_ID(&dev); + + ZXIC_COMM_MEMSET_S(&tHashResInit, sizeof(DPP_APT_HASH_RES_INIT_T), 0x0, + sizeof(DPP_APT_HASH_RES_INIT_T)); + p_se_res = (DPP_APT_SE_RES_T *)dpp_dev_get_se_res_ptr(&dev); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_se_res); + tHashResInit.func_res = p_se_res->hash_func; + tHashResInit.bulk_res = p_se_res->hash_bulk; + tHashResInit.tbl_res = p_se_res->hash_tbl; + tHashResInit.func_num = p_se_res->hash_func_num; + tHashResInit.bulk_num = p_se_res->hash_bulk_num; + tHashResInit.tbl_num = p_se_res->hash_tbl_num; + + ZXIC_COMM_PRINT("#######################[%s]#######################\n", + res_name); + ZXIC_COMM_PRINT( + "#######################hash func info#######################\n"); + for (index = 0; index < tHashResInit.func_num; index++) { + pHashFuncTemp = tHashResInit.func_res + index; + ZXIC_COMM_CHECK_POINT(pHashFuncTemp); + rc = dpp_apt_get_zblock_index(pHashFuncTemp->zblk_bitmap, + zblk_idx); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "se_apt_get_zblock_index"); + ddr_en = (0 == pHashFuncTemp->ddr_dis) ? 1 : 0; + array[pHashFuncTemp->func_id % 4] = ddr_en; + ZXIC_COMM_PRINT("[hash%u] ddr_en:%u zblock_num:%u zblk_index:", + pHashFuncTemp->func_id, ddr_en, + pHashFuncTemp->zblk_num); + for (i = 0; i < pHashFuncTemp->zblk_num; i++) { + ZXIC_COMM_PRINT("%u ", zblk_idx[i]); + } + ZXIC_COMM_PRINT("\n"); + } + + ZXIC_COMM_PRINT( + "#######################hash bulk info#######################\n"); + for (index = 0; index < tHashResInit.bulk_num; index++) { + pHashBulkTemp = tHashResInit.bulk_res + index; + ZXIC_COMM_CHECK_POINT(pHashBulkTemp); + ZXIC_COMM_PRINT("[hash%u][bulk%u]:zcell_num=%u zreg_num=%u\n", + pHashBulkTemp->func_id, pHashBulkTemp->bulk_id, + pHashBulkTemp->zcell_num, + pHashBulkTemp->zreg_num); + if (array[pHashBulkTemp->func_id % 4]) { + ZXIC_COMM_PRINT( + " ddr_baddr=0x%x item_num=0x%x width_mode=%u(1:256b 2:512b) crc_sel=%u ecc_en=%u\n", + pHashBulkTemp->ddr_baddr, + pHashBulkTemp->ddr_item_num, + pHashBulkTemp->ddr_width_mode, + pHashBulkTemp->ddr_crc_sel, + pHashBulkTemp->ddr_ecc_en); + } + } + + ZXIC_COMM_PRINT( + "#######################hash table num[%u]#######################\n", + p_se_res->hash_tbl_num); + ZXIC_COMM_PRINT( + "------[table_width] 1:128bit 2:256bit 3:512bit-----\n"); + ZXIC_COMM_PRINT( + "------[rsp_mode] 0:32bit 1:64bit 2:128bit 3:256bit-----\n"); + ZXIC_COMM_PRINT( + "------[tbl_flag] bit0:age_en bit1:learn_en bit2:mc_write_en-----\n"); + for (index = 0; index < tHashResInit.tbl_num; index++) { + pHashResTemp = tHashResInit.tbl_res + index; + ZXIC_COMM_CHECK_POINT(pHashResTemp); + ZXIC_COMM_PRINT( + "[sdt%u][hash%u][table%u]:table_width=%u key_size=%u rsp_mode=%u sdt_parter=0x%x\n", + pHashResTemp->sdtNo, pHashResTemp->hashSdt.hash_id, + pHashResTemp->hashSdt.hash_table_id, + pHashResTemp->hashSdt.hash_table_width, + pHashResTemp->hashSdt.key_size, + pHashResTemp->hashSdt.rsp_mode, + pHashResTemp->sdt_partner); + ZXIC_COMM_PRINT( + " tbl_flag=%u alive=%u alive_baddr=0x%x learn_en=%u\n", + pHashResTemp->tbl_flag, + pHashResTemp->hashSdt.keep_alive, + pHashResTemp->hashSdt.keep_alive_baddr, + pHashResTemp->hashSdt.learn_en); + } + + return DPP_OK; +} + +DPP_STATUS diag_dpp_se_acl_res_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 dev_id = 0; + DPP_APT_ACL_RES_INIT_T tAclResInit = { 0 }; + DPP_APT_ACL_TABLE_T *pAclResTemp = NULL; + DPP_APT_SE_RES_T *p_se_res = NULL; + ZXIC_CHAR res_name[32] = "STD_NIC_RES"; + DPP_DEV_T dev = { 0 }; + DPP_PF_INFO_T pf_info = { slot, vport }; + + rc = dpp_dev_get(&pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + dev_id = DEV_ID(&dev); + + ZXIC_COMM_MEMSET_S(&tAclResInit, sizeof(DPP_APT_ACL_RES_INIT_T), 0x0, + sizeof(DPP_APT_ACL_RES_INIT_T)); + p_se_res = (DPP_APT_SE_RES_T *)dpp_dev_get_se_res_ptr(&dev); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_se_res); + tAclResInit.acl_res = p_se_res->acl_tbl; + tAclResInit.tbl_num = p_se_res->acl_num; + + ZXIC_COMM_PRINT( + "#######################%s:ACL table info ACL_NUM[%u]#######################\n", + res_name, p_se_res->acl_num); + ZXIC_COMM_PRINT( + "------[key_mode]0:640bit 1:320bit 2:160bit 3:80bit------\n"); + ZXIC_COMM_PRINT( + "------[as_rsp_mode]0:1b 1:32b 2:64b 3:128b 4:2b 5:4b 6:8b 7:16b------\n"); + for (index = 0; index < tAclResInit.tbl_num; index++) { + pAclResTemp = tAclResInit.acl_res + index; + ZXIC_COMM_CHECK_POINT(pAclResTemp); + ZXIC_COMM_PRINT("[sdt%u][tbl%u]:block_num=%u block_index=", + pAclResTemp->sdtNo, + pAclResTemp->aclSdt.etcam_table_id, + pAclResTemp->aclRes.block_num); + for (i = 0; (i < (pAclResTemp->aclRes.block_num)) && + (i < DPP_ETCAM_BLOCK_NUM); + i++) { + ZXIC_COMM_PRINT("%u ", + pAclResTemp->aclRes.block_index[i]); + } + ZXIC_COMM_PRINT("\n"); + ZXIC_COMM_PRINT( + " key_mode=%u depth=%u entry_num=%u pri_mode=%u as_en=%u as_eram_baddr=0x%x as_rsp_mode=%u\n", + pAclResTemp->aclSdt.etcam_key_mode, + pAclResTemp->aclSdt.etcam_table_depth, + pAclResTemp->aclRes.entry_num, + pAclResTemp->aclRes.pri_mode, pAclResTemp->aclSdt.as_en, + pAclResTemp->aclSdt.as_eram_baddr, + pAclResTemp->aclSdt.as_rsp_mode); + } + + return DPP_OK; +} + +DPP_STATUS diag_dpp_se_ddr_res_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 dev_id = 0; + DPP_APT_DDR_RES_INIT_T tDdrResInit = { 0 }; + DPP_APT_DDR_TABLE_T *pDdrResTemp = NULL; + DPP_APT_SE_RES_T *p_se_res = NULL; + ZXIC_CHAR res_name[32] = "STD_NIC_RES"; + DPP_DEV_T dev = { 0 }; + DPP_PF_INFO_T pf_info = { slot, vport }; + + rc = dpp_dev_get(&pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + dev_id = DEV_ID(&dev); + + ZXIC_COMM_MEMSET_S(&tDdrResInit, sizeof(DPP_APT_DDR_RES_INIT_T), 0x0, + sizeof(DPP_APT_DDR_RES_INIT_T)); + + p_se_res = (DPP_APT_SE_RES_T *)dpp_dev_get_se_res_ptr(&dev); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_se_res); + tDdrResInit.ddr_res = p_se_res->ddr_tbl; + tDdrResInit.tbl_num = p_se_res->ddr_num; + + ZXIC_COMM_PRINT( + "#######################%s:DDR table info DDR_NUM[%u]#######################\n", + res_name, p_se_res->ddr_num); + for (index = 0; index < tDdrResInit.tbl_num; index++) { + pDdrResTemp = tDdrResInit.ddr_res + index; + ZXIC_COMM_CHECK_POINT(pDdrResTemp); + ZXIC_COMM_PRINT( + "[sdt%u]:baddr=0x%x tbl_depth=0x%x rw_len=%u(0:128b 1:256b 2:512b) ecc_en=%u\n", + pDdrResTemp->sdtNo, pDdrResTemp->eDdrSdt.ddr3_base_addr, + pDdrResTemp->ddr_table_depth, + pDdrResTemp->eDdrSdt.ddr3_rw_len, + pDdrResTemp->eDdrSdt.ddr3_ecc_en); + } + + return DPP_OK; +} + +DPP_STATUS diag_dpp_se_lpm_res_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 dev_id = 0; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 lpm_flags = 0; + ZXIC_UINT32 as_en = 0; + ZXIC_UINT32 as_mode = 0; + ZXIC_UINT32 v4_ddr_en = 0; + ZXIC_UINT32 v6_ddr_en = 0; + ZXIC_UINT32 zblk_idx[32] = { 0 }; + + DPP_APT_LPM_RES_INIT_T tLpmResInit = { 0 }; + DPP_APT_LPM_TABLE_T *pLpmResTemp = NULL; + DPP_APT_SE_RES_T *p_se_res = NULL; + DPP_DEV_T dev = { 0 }; + DPP_PF_INFO_T pf_info = { slot, vport }; + + rc = dpp_dev_get(&pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + dev_id = DEV_ID(&dev); + + ZXIC_COMM_MEMSET_S(&tLpmResInit, sizeof(DPP_APT_LPM_RES_INIT_T), 0x0, + sizeof(DPP_APT_LPM_RES_INIT_T)); + p_se_res = (DPP_APT_SE_RES_T *)dpp_dev_get_se_res_ptr(&dev); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_se_res); + tLpmResInit.lpm_res = p_se_res->lpm_tbl; + tLpmResInit.glb_res = &p_se_res->lpm_global_res; + tLpmResInit.tbl_num = p_se_res->lpm_num; + + lpm_flags = tLpmResInit.glb_res->lpm_flags; + ZXIC_COMM_UINT32_GET_BITS(as_en, lpm_flags, LPM_FLAG_RT_HANDLE_START, + LPM_FLAG_RT_HANDLE_WIDTH); + ZXIC_COMM_UINT32_GET_BITS(v4_ddr_en, lpm_flags, LPM4_FLAG_DDR_EN_START, + LPM4_FLAG_DDR_EN_WIDTH); + ZXIC_COMM_UINT32_GET_BITS(v6_ddr_en, lpm_flags, LPM6_FLAG_DDR_EN_START, + LPM6_FLAG_DDR_EN_WIDTH); + ZXIC_COMM_UINT32_GET_BITS(as_mode, lpm_flags, LPM_FLAG_AS_MODE_START, + LPM_FLAG_AS_MODE_WIDTH); + + ZXIC_COMM_PRINT( + "#######################LPM table info:lpm_flags=%u#######################\n", + tLpmResInit.glb_res->lpm_flags); + if (v4_ddr_en) { + ZXIC_COMM_PRINT( + " ddr4_baddr=0x%x ddr4_item_num=0x%x ecc=%u\n", + tLpmResInit.glb_res->ddr4_baddr, + tLpmResInit.glb_res->ddr4_item_num, + tLpmResInit.glb_res->ddr4_ecc_en); + } + + if (v6_ddr_en) { + ZXIC_COMM_PRINT( + " ddr6_baddr=0x%x ddr6_item_num=0x%x ecc=%u\n", + tLpmResInit.glb_res->ddr6_baddr, + tLpmResInit.glb_res->ddr6_item_num, + tLpmResInit.glb_res->ddr6_ecc_en); + } + + rc = dpp_apt_get_zblock_index(p_se_res->lpm_global_res.zblk_bitmap, + zblk_idx); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "se_apt_get_zblock_index"); + ZXIC_COMM_PRINT("[ipv4&ipv6] share_zblock_num:%u zblk_index:", + p_se_res->lpm_global_res.zblk_num); + for (i = 0; i < (p_se_res->lpm_global_res.zblk_num); i++) { + ZXIC_COMM_PRINT("%u ", zblk_idx[i]); + } + ZXIC_COMM_PRINT("\n"); + + rc = dpp_apt_get_zblock_index( + p_se_res->lpm_global_res.mono_ipv4_zblk_bitmap, zblk_idx); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "se_apt_get_zblock_index"); + ZXIC_COMM_PRINT("[ipv4] mono_zblock_num:%u zblk_index:", + p_se_res->lpm_global_res.mono_ipv4_zblk_num); + for (i = 0; i < (p_se_res->lpm_global_res.mono_ipv4_zblk_num); i++) { + ZXIC_COMM_PRINT("%u ", zblk_idx[i]); + } + ZXIC_COMM_PRINT("\n"); + + rc = dpp_apt_get_zblock_index( + p_se_res->lpm_global_res.mono_ipv6_zblk_bitmap, zblk_idx); + ZXIC_COMM_CHECK_DEV_RC(dev_id, rc, "se_apt_get_zblock_index"); + ZXIC_COMM_PRINT("[ipv6] mono_zblock_num:%u zblk_index:", + p_se_res->lpm_global_res.mono_ipv6_zblk_num); + for (i = 0; i < (p_se_res->lpm_global_res.mono_ipv6_zblk_num); i++) { + ZXIC_COMM_PRINT("%u ", zblk_idx[i]); + } + ZXIC_COMM_PRINT("\n"); + + for (index = 0; index < tLpmResInit.tbl_num; index++) { + pLpmResTemp = tLpmResInit.lpm_res + index; + ZXIC_COMM_CHECK_POINT(pLpmResTemp); + ZXIC_COMM_PRINT( + "[sdt%u]:v46_id=%u(0:ipv6 1:ipv4) rsp_mode=%u(0:32b 1:64b 2:128b 3:256b)\n", + pLpmResTemp->sdtNo, pLpmResTemp->lpmSdt.lpm_v46_id, + pLpmResTemp->lpmSdt.rsp_mode); + if (as_en) { + if (as_mode) { + ZXIC_COMM_PRINT( + " as_ddr_baddr=0x%x rsp_mode=%u(0:128b 1:256b) ecc=%u\n", + pLpmResTemp->as_ddr_cfg.baddr, + pLpmResTemp->as_ddr_cfg.rsp_len, + pLpmResTemp->as_ddr_cfg.ecc_en); + } else { + for (i = 0; i < DPP_SMMU0_LPM_AS_TBL_ID_NUM; + i++) { + ZXIC_COMM_PRINT( + " as_eram_baddr=0x%x rsp_mode=%u(0:1b 1:32b 2:64b 3:128b 4:2b 5:4b 6:8b 7:16b)\n", + i, + pLpmResTemp->as_eram_cfg[i] + .baddr, + pLpmResTemp->as_eram_cfg[i] + .rsp_mode); + } + } + } + } + + return DPP_OK; +} + +DPP_STATUS diag_dpp_se_stat_res_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 dev_id = 0; + DPP_APT_SE_RES_T *p_se_res = NULL; + DPP_DEV_T dev = { 0 }; + DPP_PF_INFO_T pf_info = { slot, vport }; + + rc = dpp_dev_get(&pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + dev_id = DEV_ID(&dev); + + p_se_res = (DPP_APT_SE_RES_T *)dpp_dev_get_se_res_ptr(&dev); + ZXIC_COMM_CHECK_DEV_POINT(dev_id, p_se_res); + + ZXIC_COMM_PRINT("eram_baddr(unit:128bit):0x%x\n", + p_se_res->stat_cfg.eram_baddr); + ZXIC_COMM_PRINT("eram_depth(unit:128bit):0x%x\n", + p_se_res->stat_cfg.eram_depth); + ZXIC_COMM_PRINT("ddr_baddr(unit:2k*256bit):0x%x\n", + p_se_res->stat_cfg.ddr_baddr); + ZXIC_COMM_PRINT("ppu_ddr_offset:0x%x\n", + p_se_res->stat_cfg.ppu_ddr_offset); + + return DPP_OK; +} + +DPP_STATUS diag_dpp_stat_item_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT16 stat_item_no) +{ + DPP_STATUS rc = DPP_OK; + DPP_APT_SE_RES_T *p_se_res = NULL; + DPP_DEV_T dev = { 0 }; + DPP_PF_INFO_T pf_info = { slot, vport }; + + ZXIC_COMM_CHECK_INDEX(stat_item_no, 0, STAT_ITEM_MAX_NUM - 1); + + rc = dpp_dev_get(&pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + p_se_res = (DPP_APT_SE_RES_T *)dpp_dev_get_se_res_ptr(&dev); + ZXIC_COMM_CHECK_POINT(p_se_res); + + ZXIC_COMM_PRINT("STAT ITEM No. %d:\n", stat_item_no); + ZXIC_COMM_PRINT("\tmode: %u(0:64bit 1:128bit)\n", + p_se_res->stat_item[stat_item_no].mode); + ZXIC_COMM_PRINT("\taddr_offset: 0x%x\n", + p_se_res->stat_item[stat_item_no].addr_offset); + ZXIC_COMM_PRINT("\tdepth: 0x%x\n", + p_se_res->stat_item[stat_item_no].depth); + + return DPP_OK; +} + +DPP_STATUS diag_dpp_stat_item_prt_all(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 index = 0; + DPP_APT_SE_RES_T *p_se_res = NULL; + DPP_DEV_T dev = { 0 }; + DPP_PF_INFO_T pf_info = { slot, vport }; + + rc = dpp_dev_get(&pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + p_se_res = (DPP_APT_SE_RES_T *)dpp_dev_get_se_res_ptr(&dev); + ZXIC_COMM_CHECK_POINT(p_se_res); + + if (p_se_res->stat_item_num == 0) { + ZXIC_COMM_PRINT("stat item num is 0!\n"); + return DPP_OK; + } + + for (index = 0; index < STAT_ITEM_MAX_NUM; index++) { + if (p_se_res->stat_item[index].depth) { + ZXIC_COMM_PRINT("STAT ITEM No. %d:\n", index); + ZXIC_COMM_PRINT("\tmode: %u(0:64bit 1:128bit)\n", + p_se_res->stat_item[index].mode); + ZXIC_COMM_PRINT("\taddr_offset: 0x%x\n", + p_se_res->stat_item[index].addr_offset); + ZXIC_COMM_PRINT("\tdepth: 0x%x\n", + p_se_res->stat_item[index].depth); + } + } + + return DPP_OK; +} + +ZXIC_VOID diag_dpp_eram_data_stub(ZXIC_UINT32 data0, ZXIC_UINT32 data1, + ZXIC_UINT32 data2, ZXIC_UINT32 data3) +{ + g_eram_buff[0] = data0; + g_eram_buff[1] = data1; + g_eram_buff[2] = data2; + g_eram_buff[3] = data3; + return; +} + +DPP_STATUS diag_dpp_eram_entry_insert(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 index) +{ + DPP_STATUS rc = DPP_OK; + DPP_PF_INFO_T pf_info = { slot, vport }; + + rc = dpp_eram_entry_insert(&pf_info, sdt_no, index, + (ZXIC_UINT8 *)g_eram_buff); + ZXIC_COMM_CHECK_RC(rc, "dpp_eram_entry_insert"); + + return DPP_OK; +} + +DPP_STATUS diag_dpp_eram_entry_delete(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 index) +{ + DPP_STATUS rc = DPP_OK; + DPP_PF_INFO_T pf_info = { slot, vport }; + + rc = dpp_eram_entry_delete(&pf_info, sdt_no, index); + ZXIC_COMM_CHECK_RC(rc, "dpp_eram_entry_delete"); + + return DPP_OK; +} + +DPP_STATUS diag_dpp_eram_entry_get(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 sdt_no, ZXIC_UINT32 index) +{ + DPP_STATUS rc = DPP_OK; + DPP_PF_INFO_T pf_info = { slot, vport }; + ZXIC_UINT32 eram_buff[4] = { 0 }; + + ZXIC_COMM_MEMSET_S(eram_buff, sizeof(eram_buff), 0, sizeof(eram_buff)); + rc = dpp_eram_entry_get(&pf_info, sdt_no, index, + (ZXIC_UINT8 *)eram_buff); + ZXIC_COMM_CHECK_RC(rc, "dpp_eram_entry_get"); + + ZXIC_COMM_PRINT("eram_data: 0x%08x-%08x-%08x-%08x\n", eram_buff[0], + eram_buff[1], eram_buff[2], eram_buff[3]); + + return DPP_OK; +} +DPP_STATUS diag_dpp_stat_item_cnt_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 stat_item_no, + ZXIC_UINT32 index, ZXIC_UINT32 rd_mode) +{ + DPP_STATUS rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + DPP_PF_INFO_T pf_info = { slot, vport }; + + rc = dpp_stat_item_cnt_get(&pf_info, stat_item_no, index, rd_mode, + &stat_value); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_item_cnt_get"); + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x stat_item_no: %u index: %u h64_cnt: %llu l64_cnt: %llu success.\n", + __FUNCTION__, slot, vport, stat_item_no, index, + stat_value.stat_cnt_128.pkts, stat_value.stat_cnt_128.bytes); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_glb_cfg_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 glb_cfg_data_0, + ZXIC_UINT32 glb_cfg_data_1, + ZXIC_UINT32 glb_cfg_data_2, + ZXIC_UINT32 glb_cfg_data_3) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_glb_cfg_set_0(&pf_info, glb_cfg_data_0); + ZXIC_COMM_CHECK_RC(rc, "dpp_glb_cfg_set_0"); + + rc = dpp_glb_cfg_set_1(&pf_info, glb_cfg_data_1); + ZXIC_COMM_CHECK_RC(rc, "dpp_glb_cfg_set_1"); + + rc = dpp_glb_cfg_set_2(&pf_info, glb_cfg_data_2); + ZXIC_COMM_CHECK_RC(rc, "dpp_glb_cfg_set_2"); + + rc = dpp_glb_cfg_set_3(&pf_info, glb_cfg_data_3); + ZXIC_COMM_CHECK_RC(rc, "dpp_glb_cfg_set_3"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_glb_cfg_prt(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + DPP_PF_INFO_T pf_info = { slot, vport }; + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 mcode_glb_cfg_0 = 0; + ZXIC_UINT32 mcode_glb_cfg_1 = 0; + ZXIC_UINT32 mcode_glb_cfg_2 = 0; + ZXIC_UINT32 mcode_glb_cfg_3 = 0; + + rc = dpp_glb_cfg_get_0(&pf_info, &mcode_glb_cfg_0); + ZXIC_COMM_CHECK_RC(rc, "dpp_glb_cfg_get_0"); + + rc = dpp_glb_cfg_get_1(&pf_info, &mcode_glb_cfg_1); + ZXIC_COMM_CHECK_RC(rc, "dpp_glb_cfg_get_1"); + + rc = dpp_glb_cfg_get_2(&pf_info, &mcode_glb_cfg_2); + ZXIC_COMM_CHECK_RC(rc, "dpp_glb_cfg_get_2"); + + rc = dpp_glb_cfg_get_3(&pf_info, &mcode_glb_cfg_3); + ZXIC_COMM_CHECK_RC(rc, "dpp_glb_cfg_get_3"); + + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + ZXIC_COMM_PRINT("pktrx_mcode_glb_cfg_data0 = 0x%08X\n", + mcode_glb_cfg_0); + ZXIC_COMM_PRINT("pktrx_mcode_glb_cfg_data1 = 0x%08X\n", + mcode_glb_cfg_1); + ZXIC_COMM_PRINT("pktrx_mcode_glb_cfg_data2 = 0x%08X\n", + mcode_glb_cfg_2); + ZXIC_COMM_PRINT("pktrx_mcode_glb_cfg_data3 = 0x%08X\n", + mcode_glb_cfg_3); + ZXIC_COMM_PRINT("-----------------------------------------------\n"); + + return DPP_OK; +} +ZXDH_PKT_CAP_RULE g_rule = { 0 }; + +ZXIC_UINT32 diag_dpp_pkt_capture_enable(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXDH_PKT_CAP_POINT capture_pkt_flag) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_PF_INFO_T pf_info = { slot, vport }; + + rc = dpp_pkt_capture_enable(&pf_info, capture_pkt_flag); + ZXIC_COMM_CHECK_RC(rc, "dpp_pkt_capture_enable"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_pkt_capture_disable(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXDH_PKT_CAP_POINT capture_pkt_flag) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_PF_INFO_T pf_info = { slot, vport }; + + rc = dpp_pkt_capture_disable(&pf_info, capture_pkt_flag); + ZXIC_COMM_CHECK_RC(rc, "dpp_pkt_capture_disable"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_pkt_capture_disable_all(ZXIC_UINT16 slot, + ZXIC_UINT16 vport) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_PF_INFO_T pf_info = { slot, vport }; + + rc = dpp_pkt_capture_disable_all(&pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_pkt_capture_disable_all"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_pkt_capture_enable_status_get(ZXIC_UINT16 slot, + ZXIC_UINT16 vport) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXDH_PKT_CAP_ENABLE_STATUS enable_status = { 0 }; + DPP_PF_INFO_T pf_info = { slot, vport }; + + rc = dpp_pkt_capture_enable_status_get(&pf_info, &enable_status); + ZXIC_COMM_CHECK_RC(rc, "dpp_pkt_capture_enable_status_get"); + + ZXIC_COMM_PRINT("panel_rx_enable_status = %d", + enable_status.panel_rx_enable_status); + ZXIC_COMM_PRINT("panel_tx_enable_status = %d", + enable_status.panel_tx_enable_status); + ZXIC_COMM_PRINT("vqm_rx_enable_status = %d", + enable_status.vqm_rx_enable_status); + ZXIC_COMM_PRINT("vqm_tx_enable_status = %d", + enable_status.vqm_tx_enable_status); + ZXIC_COMM_PRINT("rdma_rx_enable_status = %d", + enable_status.rdma_rx_enable_status); + ZXIC_COMM_PRINT("rdma_tx_enable_status = %d", + enable_status.rdma_tx_enable_status); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_pkt_capture_rule_index_to_tcam_index( + ZXIC_UINT32 rule_index, ZXDH_PKT_CAP_MODE rule_mode, + ZXDH_PKT_CAP_POINT capture_pkt_flag) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 tcam_index = 0; + + rc = dpp_pkt_capture_rule_index_to_tcam_index( + rule_index, rule_mode, capture_pkt_flag, &tcam_index); + ZXIC_COMM_CHECK_RC(rc, "dpp_pkt_capture_rule_index_to_tcam_index"); + + ZXIC_COMM_PRINT( + "rule index = %d, rule mode = %d, cap_point = %d, tcam index = %d", + rule_index, rule_mode, capture_pkt_flag, tcam_index); + + return DPP_OK; +} + +ZXIC_UINT32 +diag_dpp_pkt_capture_tcam_index_to_rule_index(ZXIC_UINT32 tcam_index) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXDH_PKT_CAP_MODE rule_mode = 0; + ZXIC_UINT32 rule_index = 0; + + rc = dpp_pkt_capture_tcam_index_to_rule_index(tcam_index, &rule_mode, + &rule_index); + ZXIC_COMM_CHECK_RC(rc, "dpp_pkt_capture_tcam_index_to_rule_index"); + + ZXIC_COMM_PRINT("tcam index = %d, rule mode = %d, rule index = %d", + tcam_index, rule_mode, rule_index); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_pkt_capture_item_l3_set( + ZXIC_UINT32 sip_0, ZXIC_UINT32 sip_1, ZXIC_UINT32 sip_2, + ZXIC_UINT32 sip_3, ZXIC_UINT32 dip_0, ZXIC_UINT32 dip_1, + ZXIC_UINT32 dip_2, ZXIC_UINT32 dip_3, ZXIC_UINT8 protocol) +{ + ZXIC_UINT32 sip[4] = { sip_0, sip_1, sip_2, sip_3 }; + ZXIC_UINT32 dip[4] = { dip_0, dip_1, dip_2, dip_3 }; + + ZXIC_COMM_MEMCPY(&g_rule.pkt_cap_key.sip, sip, 16); + ZXIC_COMM_MEMCPY(&g_rule.pkt_cap_key.dip, dip, 16); + + g_rule.pkt_cap_key.protocol = protocol; + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_pkt_capture_item_l2_set(ZXIC_UINT16 dmac_0, + ZXIC_UINT32 dmac_1, + ZXIC_UINT16 smac_0, + ZXIC_UINT32 smac_1, + ZXIC_UINT16 ethtype) +{ + g_rule.pkt_cap_key.dmac[0] = (dmac_0 >> 8) & 0xFF; + g_rule.pkt_cap_key.dmac[1] = (dmac_0 >> 0) & 0xFF; + g_rule.pkt_cap_key.dmac[2] = (dmac_1 >> 24) & 0xFF; + g_rule.pkt_cap_key.dmac[3] = (dmac_1 >> 16) & 0xFF; + g_rule.pkt_cap_key.dmac[4] = (dmac_1 >> 8) & 0xFF; + g_rule.pkt_cap_key.dmac[5] = (dmac_1 >> 0) & 0xFF; + + g_rule.pkt_cap_key.smac[0] = (smac_0 >> 8) & 0xFF; + g_rule.pkt_cap_key.smac[1] = (smac_0 >> 0) & 0xFF; + g_rule.pkt_cap_key.smac[2] = (smac_1 >> 24) & 0xFF; + g_rule.pkt_cap_key.smac[3] = (smac_1 >> 16) & 0xFF; + g_rule.pkt_cap_key.smac[4] = (smac_1 >> 8) & 0xFF; + g_rule.pkt_cap_key.smac[5] = (smac_1 >> 0) & 0xFF; + + g_rule.pkt_cap_key.ethtype = ethtype; + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_pkt_capture_item_l4_set(ZXIC_UINT16 dport, + ZXIC_UINT16 sport, ZXIC_UINT32 qp) +{ + g_rule.pkt_cap_key.dport = dport; + g_rule.pkt_cap_key.sport = sport; + + g_rule.pkt_cap_key.qp = qp; + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_pkt_capture_item_kw_set(ZXIC_UINT32 kw_0, ZXIC_UINT32 kw_1, + ZXIC_UINT32 kw_2, ZXIC_UINT32 kw_3, + ZXIC_UINT16 kw_off, + ZXIC_UINT8 kw_len) +{ + ZXIC_UINT32 kw[4] = { kw_0, kw_1, kw_2, kw_3 }; + + g_rule.pkt_cap_key.key_word_len = kw_len; + g_rule.pkt_cap_key.key_word_off = kw_off; + + ZXIC_COMM_MEMCPY(&g_rule.pkt_cap_key.key_word, kw, 15); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_pkt_capture_item_insert( + ZXIC_UINT16 slot, ZXIC_UINT16 vport, ZXIC_UINT32 tcam_index, + ZXIC_UINT16 rule_config, ZXIC_UINT8 capture_pkt_flag, + ZXIC_UINT8 panel_id, ZXIC_UINT16 vqm_vfid, ZXIC_UINT16 vhca_id) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_PF_INFO_T pf_info = { slot, vport }; + + g_rule.tcam_index = tcam_index; + ZXIC_COMM_MEMCPY(&g_rule.rule_config, &rule_config, + sizeof(ZXDH_PKT_CAP_NORMAL_CONFIG)); + + g_rule.dst_vqm_vfid = VQM_VFID(pf_info.vport); + + g_rule.pkt_cap_key.capture_pkt_flag = capture_pkt_flag; + g_rule.pkt_cap_key.panel_id = panel_id; + g_rule.pkt_cap_key.vqm_vfid = vqm_vfid; + g_rule.pkt_cap_key.vhca_id = vhca_id; + + rc = dpp_pkt_capture_item_insert(&pf_info, &g_rule); + ZXIC_COMM_CHECK_RC(rc, "dpp_pkt_capture_item_insert"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_pkt_capture_item_delete(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 tcam_index) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_PF_INFO_T pf_info = { slot, vport }; + + rc = dpp_pkt_capture_item_delete(&pf_info, tcam_index); + ZXIC_COMM_CHECK_RC(rc, "dpp_pkt_capture_item_delete"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_pkt_capture_table_dump(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 dump_num = 68; + DPP_PF_INFO_T pf_info = { slot, vport }; + ZXDH_PKT_CAP_RULE *p_rule_array = NULL; + + p_rule_array = (ZXDH_PKT_CAP_RULE *)ZXIC_COMM_MALLOC( + 68 * sizeof(ZXDH_PKT_CAP_RULE)); + ZXIC_COMM_CHECK_POINT(p_rule_array); + + rc = dpp_pkt_capture_table_dump(&pf_info, p_rule_array, &dump_num); + ZXIC_COMM_CHECK_RC_MEMORY_FREE(rc, "dpp_pkt_capture_table_dump", + p_rule_array); + + for (i = 0; i < dump_num; i++) { + ZXIC_COMM_PRINT("rule [%u] tcam_index = %u\n", i, + p_rule_array[i].tcam_index); + ZXIC_COMM_PRINT("rule [%u] dst_vqm_vfid = %u\n", i, + p_rule_array[i].dst_vqm_vfid); + ZXIC_COMM_PRINT("rule [%u] rule_config.sourceid = %u\n", i, + p_rule_array[i].rule_config.sourceid); + ZXIC_COMM_PRINT("rule [%u] rule_config.dmac = %u\n", i, + p_rule_array[i].rule_config.dmac); + ZXIC_COMM_PRINT("rule [%u] rule_config.smac = %u\n", i, + p_rule_array[i].rule_config.smac); + ZXIC_COMM_PRINT("rule [%u] rule_config.ethtype = %u\n", i, + p_rule_array[i].rule_config.ethtype); + ZXIC_COMM_PRINT("rule [%u] rule_config.sip = %u\n", i, + p_rule_array[i].rule_config.sip); + ZXIC_COMM_PRINT("rule [%u] rule_config.dip = %u\n", i, + p_rule_array[i].rule_config.dip); + ZXIC_COMM_PRINT("rule [%u] rule_config.protocol = %u\n", i, + p_rule_array[i].rule_config.protocol); + ZXIC_COMM_PRINT("rule [%u] rule_config.sport = %u\n", i, + p_rule_array[i].rule_config.sport); + ZXIC_COMM_PRINT("rule [%u] rule_config.dport = %u\n", i, + p_rule_array[i].rule_config.dport); + ZXIC_COMM_PRINT("rule [%u] rule_config.qp = %u\n", i, + p_rule_array[i].rule_config.qp); + ZXIC_COMM_PRINT("rule [%u] l2_info:\n", i); + ZXIC_COMM_PRINT("\t dmac:\n"); + ZXIC_COMM_PRINT("\t\t 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", + p_rule_array[i].pkt_cap_key.dmac[0], + p_rule_array[i].pkt_cap_key.dmac[1], + p_rule_array[i].pkt_cap_key.dmac[2], + p_rule_array[i].pkt_cap_key.dmac[3], + p_rule_array[i].pkt_cap_key.dmac[4], + p_rule_array[i].pkt_cap_key.dmac[5]); + ZXIC_COMM_PRINT("\t smac:\n"); + ZXIC_COMM_PRINT("\t\t 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", + p_rule_array[i].pkt_cap_key.smac[0], + p_rule_array[i].pkt_cap_key.smac[1], + p_rule_array[i].pkt_cap_key.smac[2], + p_rule_array[i].pkt_cap_key.smac[3], + p_rule_array[i].pkt_cap_key.smac[4], + p_rule_array[i].pkt_cap_key.smac[5]); + ZXIC_COMM_PRINT("\t ethtype:\n"); + ZXIC_COMM_PRINT("\t\t 0x%x\n", + p_rule_array[i].pkt_cap_key.ethtype); + ZXIC_COMM_PRINT("rule [%u] l3_info:\n", i); + ZXIC_COMM_PRINT("\t sip:\n"); + ZXIC_COMM_PRINT("\t\t 0x%x 0x%x 0x%x 0x%x\n", + p_rule_array[i].pkt_cap_key.sip[0], + p_rule_array[i].pkt_cap_key.sip[1], + p_rule_array[i].pkt_cap_key.sip[2], + p_rule_array[i].pkt_cap_key.sip[3]); + ZXIC_COMM_PRINT("\t\t 0x%x 0x%x 0x%x 0x%x\n", + p_rule_array[i].pkt_cap_key.sip[4], + p_rule_array[i].pkt_cap_key.sip[5], + p_rule_array[i].pkt_cap_key.sip[6], + p_rule_array[i].pkt_cap_key.sip[7]); + ZXIC_COMM_PRINT("\t\t 0x%x 0x%x 0x%x 0x%x\n", + p_rule_array[i].pkt_cap_key.sip[8], + p_rule_array[i].pkt_cap_key.sip[9], + p_rule_array[i].pkt_cap_key.sip[10], + p_rule_array[i].pkt_cap_key.sip[11]); + ZXIC_COMM_PRINT("\t\t 0x%x 0x%x 0x%x 0x%x\n", + p_rule_array[i].pkt_cap_key.sip[12], + p_rule_array[i].pkt_cap_key.sip[13], + p_rule_array[i].pkt_cap_key.sip[14], + p_rule_array[i].pkt_cap_key.sip[15]); + ZXIC_COMM_PRINT("\t dip:\n"); + ZXIC_COMM_PRINT("\t\t 0x%x 0x%x 0x%x 0x%x\n", + p_rule_array[i].pkt_cap_key.dip[0], + p_rule_array[i].pkt_cap_key.dip[1], + p_rule_array[i].pkt_cap_key.dip[2], + p_rule_array[i].pkt_cap_key.dip[3]); + ZXIC_COMM_PRINT("\t\t 0x%x 0x%x 0x%x 0x%x\n", + p_rule_array[i].pkt_cap_key.dip[4], + p_rule_array[i].pkt_cap_key.dip[5], + p_rule_array[i].pkt_cap_key.dip[6], + p_rule_array[i].pkt_cap_key.dip[7]); + ZXIC_COMM_PRINT("\t\t 0x%x 0x%x 0x%x 0x%x\n", + p_rule_array[i].pkt_cap_key.dip[8], + p_rule_array[i].pkt_cap_key.dip[9], + p_rule_array[i].pkt_cap_key.dip[10], + p_rule_array[i].pkt_cap_key.dip[11]); + ZXIC_COMM_PRINT("\t\t 0x%x 0x%x 0x%x 0x%x\n", + p_rule_array[i].pkt_cap_key.dip[12], + p_rule_array[i].pkt_cap_key.dip[13], + p_rule_array[i].pkt_cap_key.dip[14], + p_rule_array[i].pkt_cap_key.dip[15]); + ZXIC_COMM_PRINT("\t protocol:\n"); + ZXIC_COMM_PRINT("\t\t 0x%x\n", + p_rule_array[i].pkt_cap_key.protocol); + ZXIC_COMM_PRINT("rule [%u] l4_info:\n", i); + ZXIC_COMM_PRINT("\t dport: 0x%x\n", + p_rule_array[i].pkt_cap_key.dport); + ZXIC_COMM_PRINT("\t sport: 0x%x\n", + p_rule_array[i].pkt_cap_key.sport); + ZXIC_COMM_PRINT("rule [%u] qp: 0x%x\n", i, + p_rule_array[i].pkt_cap_key.qp); + ZXIC_COMM_PRINT("rule [%u] pkt_cap_flag: %u\n", i, + p_rule_array[i].pkt_cap_key.capture_pkt_flag); + ZXIC_COMM_PRINT("rule [%u] panel_id: 0x%x\n", i, + p_rule_array[i].pkt_cap_key.panel_id); + ZXIC_COMM_PRINT("rule [%u] vqm_vfid: 0x%x\n", i, + p_rule_array[i].pkt_cap_key.vqm_vfid); + ZXIC_COMM_PRINT("rule [%u] vhca_id: 0x%x\n", i, + p_rule_array[i].pkt_cap_key.vhca_id); + ZXIC_COMM_PRINT("rule [%u] kw_len: 0x%x\n", i, + p_rule_array[i].pkt_cap_key.key_word_len); + ZXIC_COMM_PRINT("rule [%u] kw_off: 0x%x\n", i, + p_rule_array[i].pkt_cap_key.key_word_off); + ZXIC_COMM_PRINT("rule [%u] kw:\n", i); + ZXIC_COMM_PRINT("\t\t 0x%x 0x%x 0x%x 0x%x\n", + p_rule_array[i].pkt_cap_key.key_word[0], + p_rule_array[i].pkt_cap_key.key_word[1], + p_rule_array[i].pkt_cap_key.key_word[2], + p_rule_array[i].pkt_cap_key.key_word[3]); + ZXIC_COMM_PRINT("\t\t 0x%x 0x%x 0x%x 0x%x\n", + p_rule_array[i].pkt_cap_key.key_word[4], + p_rule_array[i].pkt_cap_key.key_word[5], + p_rule_array[i].pkt_cap_key.key_word[6], + p_rule_array[i].pkt_cap_key.key_word[7]); + ZXIC_COMM_PRINT("\t\t 0x%x 0x%x 0x%x 0x%x\n", + p_rule_array[i].pkt_cap_key.key_word[8], + p_rule_array[i].pkt_cap_key.key_word[9], + p_rule_array[i].pkt_cap_key.key_word[10], + p_rule_array[i].pkt_cap_key.key_word[11]); + ZXIC_COMM_PRINT("\t\t 0x%x 0x%x 0x%x\n", + p_rule_array[i].pkt_cap_key.key_word[12], + p_rule_array[i].pkt_cap_key.key_word[13], + p_rule_array[i].pkt_cap_key.key_word[14]); + } + + ZXIC_COMM_FREE(p_rule_array); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_pkt_capture_table_flush(ZXIC_UINT16 slot, + ZXIC_UINT16 vport) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_PF_INFO_T pf_info = { slot, vport }; + + rc = dpp_pkt_capture_table_flush(&pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_pkt_capture_table_flush"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_pkt_capture_speed_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 speed) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_PF_INFO_T pf_info = { slot, vport }; + + rc = dpp_pkt_capture_speed_set(&pf_info, speed); + ZXIC_COMM_CHECK_RC(rc, "dpp_pkt_capture_speed_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_pkt_capture_speed_get(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_PF_INFO_T pf_info = { slot, vport }; + ZXIC_UINT32 speed = 0; + + rc = dpp_pkt_capture_speed_get(&pf_info, &speed); + ZXIC_COMM_CHECK_RC(rc, "dpp_pkt_capture_speed_get"); + + ZXIC_COMM_PRINT("pkt cap speed is %u\n", speed); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_mcode_feature_get(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 index) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_PF_INFO_T pf_info = { slot, vport }; + ZXIC_UINT64 feature = 0; + + rc = dpp_mcode_feature_get(&pf_info, index, &feature); + ZXIC_COMM_CHECK_RC(rc, "dpp_mcode_feature_get"); + + ZXIC_COMM_PRINT("mcode feature[%d] is 0x%lx\n", index, feature); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_pktrx_mcode_glb_cfg_write(ZXIC_UINT16 slot, + ZXIC_UINT16 vport, + ZXIC_UINT32 start_bit_no, + ZXIC_UINT32 end_bit_no, + ZXIC_UINT32 glb_cfg_data_1) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_PF_INFO_T pf_info = { slot, vport }; + + rc = dpp_pktrx_mcode_glb_cfg_write(&pf_info, start_bit_no, end_bit_no, + glb_cfg_data_1); + ZXIC_COMM_CHECK_RC(rc, "dpp_pktrx_mcode_glb_cfg_write"); + + ZXIC_COMM_PRINT("diag_dpp_pktrx_mcode_glb_cfg_write success\n"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_l2d_psn_cfg_set(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT8 psn_cfg) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_PF_INFO_T pf_info = { slot, vport }; + + rc = dpp_l2d_psn_cfg_set(&pf_info, psn_cfg); + ZXIC_COMM_CHECK_RC(rc, "dpp_l2d_psn_cfg_set"); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_l2d_psn_cfg_get(ZXIC_UINT16 slot, ZXIC_UINT16 vport) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_PF_INFO_T pf_info = { slot, vport }; + ZXIC_UINT32 psn_cfg = 0; + + rc = dpp_l2d_psn_cfg_get(&pf_info, &psn_cfg); + ZXIC_COMM_CHECK_RC(rc, "dpp_l2d_psn_cfg_get"); + + ZXIC_COMM_PRINT("l2d psn cfg is 0x%x\n", psn_cfg); + + return DPP_OK; +} + +ZXIC_UINT32 diag_dpp_dtb_dump_test(ZXIC_UINT16 slot, ZXIC_UINT16 vport, + ZXIC_UINT32 num, ZXIC_UINT32 flag) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 i = 0; + + switch (flag) { + case 1: { + for (i = 0; i < num; i++) { + rc = diag_dpp_vport_mac_prt(slot, vport); + ZXIC_COMM_CHECK_RC(rc, "diag_dpp_vport_mac_prt"); + } + break; + } + case 2: { + for (i = 0; i < num; i++) { + rc = diag_dpp_vport_mc_mac_prt(slot, vport); + ZXIC_COMM_CHECK_RC(rc, "diag_dpp_vport_mc_mac_prt"); + } + break; + } + case 3: { + for (i = 0; i < num; i++) { + rc = diag_dpp_pkt_capture_table_dump(slot, vport); + ZXIC_COMM_CHECK_RC(rc, + "diag_dpp_pkt_capture_table_dump"); + } + break; + } + case 4: { + for (i = 0; i < num; i++) { + rc = diag_dpp_fd_acl_all_delete(slot, vport); + ZXIC_COMM_CHECK_RC(rc, "diag_dpp_fd_acl_all_delete"); + } + break; + } + default: { + break; + } + } + + return DPP_OK; +} diff --git a/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_fd.c b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_fd.c new file mode 100644 index 000000000000..ac1bbd9b2dc0 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_fd.c @@ -0,0 +1,322 @@ +#include "dpp_drv_init.h" +#include "dpp_drv_acl.h" +#include "dpp_drv_hash.h" +#include "dpp_drv_eram.h" +#include "dpp_drv_sdt.h" +#include "dpp_dev.h" +#include "dpp_dtb.h" +#include "dpp_hash.h" +#include "dpp_dtb_table.h" +#include "dpp_dtb_table_api.h" +#include "dpp_tbl_comm.h" +#include "dpp_tbl_fd.h" +#include "dpp_tbl_stat.h" + +ZXIC_UINT32 dpp_fd_acl_index_request(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *p_index) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 sdt_no = ZXDH_SDT_FD_CFG_TABLE; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(p_index); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_dtb_acl_index_request(&dev, sdt_no, pf_info->vport, p_index); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_acl_index_request", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sdt_no: %u index: %u.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, *p_index); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_fd_acl_index_request); + +ZXIC_UINT32 dpp_fd_acl_index_release(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 index) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 sdt_no = ZXDH_SDT_FD_CFG_TABLE; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_dtb_acl_index_release(&dev, sdt_no, pf_info->vport, index); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_acl_index_release", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sdt_no: %u index: %u.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, index); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_fd_acl_index_release); + +ZXIC_UINT32 dpp_fd_acl_entry_add(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 handle, + ZXIC_UINT8 *key, ZXIC_UINT8 *key_mask, + ZXIC_UINT8 *result) +{ + DPP_DEV_T dev = { 0 }; + DPP_DTB_ACL_ENTRY_INFO_T fd_entry = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_FD_CFG_TABLE; + ZXIC_UINT32 element_id = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(key); + ZXIC_COMM_CHECK_POINT(key_mask); + ZXIC_COMM_CHECK_POINT(result); + + ZXIC_COMM_MEMSET_S(&fd_entry, sizeof(DPP_DTB_ACL_ENTRY_INFO_T), 0, + sizeof(DPP_DTB_ACL_ENTRY_INFO_T)); + fd_entry.handle = handle; + fd_entry.key_data = key; + fd_entry.key_mask = key_mask; + fd_entry.p_as_rslt = result; + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_dtb_acl_dma_insert(&dev, queue, sdt_no, 1, &fd_entry, + &element_id); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_acl_dma_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_fd_acl_entry_add); + +ZXIC_UINT32 dpp_fd_acl_entry_del(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 handle) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_FD_CFG_TABLE; + ZXIC_UINT32 element_id = 0; + ZXIC_UINT8 data[DPP_ETCAM_WIDTH_MAX / 8] = { 0xff }; + ZXIC_UINT8 mask[DPP_ETCAM_WIDTH_MAX / 8] = { 0 }; + ZXIC_UINT8 as_rlt[16] = { 0 }; + DPP_DTB_ACL_ENTRY_INFO_T fd_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_MEMSET(&fd_entry, 0, sizeof(DPP_DTB_ACL_ENTRY_INFO_T)); + ZXIC_COMM_MEMSET_S(data, sizeof(data), 0xff, sizeof(data)); + ZXIC_COMM_MEMSET_S(mask, sizeof(mask), 0x0, sizeof(mask)); + ZXIC_COMM_MEMSET_S(as_rlt, sizeof(as_rlt), 0xff, sizeof(as_rlt)); + + fd_entry.handle = handle; + fd_entry.key_data = data; + fd_entry.key_mask = mask; + fd_entry.p_as_rslt = as_rlt; + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_dtb_acl_dma_insert(&dev, queue, sdt_no, 1, &fd_entry, + &element_id); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_acl_dma_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_fd_acl_entry_del); + +ZXIC_UINT32 dpp_fd_acl_entry_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 handle, + ZXIC_UINT8 *key, ZXIC_UINT8 *key_mask, + ZXIC_UINT8 *result) +{ + DPP_DEV_T dev = { 0 }; + DPP_DTB_ACL_ENTRY_INFO_T fd_entry = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_FD_CFG_TABLE; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(key); + ZXIC_COMM_CHECK_POINT(key_mask); + ZXIC_COMM_CHECK_POINT(result); + + ZXIC_COMM_MEMSET_S(&fd_entry, sizeof(DPP_DTB_ACL_ENTRY_INFO_T), 0, + sizeof(DPP_DTB_ACL_ENTRY_INFO_T)); + fd_entry.handle = handle; + fd_entry.key_data = key; + fd_entry.key_mask = key_mask; + fd_entry.p_as_rslt = result; + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_dtb_etcam_data_get(&dev, queue, sdt_no, &fd_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_etcam_data_get", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_fd_acl_entry_get); + +ZXIC_UINT32 dpp_fd_acl_entry_search(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 handle, + ZXIC_UINT8 *key, ZXIC_UINT8 *key_mask, + ZXIC_UINT8 *result) +{ + DPP_DEV_T dev = { 0 }; + DPP_DTB_ACL_ENTRY_INFO_T fd_entry = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_FD_CFG_TABLE; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(key); + ZXIC_COMM_CHECK_POINT(key_mask); + ZXIC_COMM_CHECK_POINT(result); + + ZXIC_COMM_MEMSET_S(&fd_entry, sizeof(DPP_DTB_ACL_ENTRY_INFO_T), 0, + sizeof(DPP_DTB_ACL_ENTRY_INFO_T)); + fd_entry.handle = handle; + fd_entry.key_data = key; + fd_entry.key_mask = key_mask; + fd_entry.p_as_rslt = result; + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_dtb_acl_data_get(&dev, queue, sdt_no, &fd_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_acl_data_get", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_fd_acl_entry_search); + +ZXIC_UINT32 dpp_fd_acl_all_delete(DPP_PF_INFO_T *pf_info) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_FD_CFG_TABLE; + + ZXIC_COMM_CHECK_POINT(pf_info); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_dtb_acl_offline_delete(&dev, queue, sdt_no, pf_info->vport, + DPP_STAT_FD_ACL_CNT_ERAM_BAADDR, 1); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_acl_offline_delete", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_fd_acl_all_delete); + +ZXIC_UINT32 dpp_fd_acl_stat_clear(DPP_PF_INFO_T *pf_info) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_FD_CFG_TABLE; + + ZXIC_COMM_CHECK_POINT(pf_info); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_dtb_acl_stat_clr_by_vport(&dev, queue, sdt_no, pf_info->vport, + 1, DPP_STAT_FD_ACL_CNT_ERAM_BAADDR); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_acl_stat_clr_by_vport", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_fd_acl_stat_clear); \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_ipsec.c b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_ipsec.c new file mode 100644 index 000000000000..3ca80d0fe915 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_ipsec.c @@ -0,0 +1,125 @@ +#include "dpp_drv_init.h" +#include "dpp_drv_acl.h" +#include "dpp_drv_hash.h" +#include "dpp_drv_eram.h" +#include "dpp_drv_sdt.h" +#include "dpp_dev.h" +#include "dpp_dtb.h" +#include "dpp_hash.h" +#include "dpp_dtb_table.h" +#include "dpp_dtb_table_api.h" +#include "dpp_tbl_comm.h" +#include "dpp_tbl_ipsec.h" + +ZXIC_UINT32 dpp_ipsec_enc_entry_add(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 index, + ZXIC_UINT8 *sip, ZXIC_UINT8 *dip, + ZXIC_UINT8 *sip_mask, ZXIC_UINT8 *dip_mask, + ZXIC_UINT32 is_ipv4, ZXIC_UINT32 sa_id) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_IPSEC_ENC_TABLE; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_IPSEC_ENC_T ipsec_enc_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(sip); + ZXIC_COMM_CHECK_POINT(dip); + ZXIC_COMM_CHECK_POINT(sip_mask); + ZXIC_COMM_CHECK_POINT(dip_mask); + + ZXIC_COMM_MEMSET(&ipsec_enc_entry, 0, sizeof(ZXDH_IPSEC_ENC_T)); + + ipsec_enc_entry.index = index; + + ZXIC_COMM_MEMCPY(ipsec_enc_entry.key.sip, sip, is_ipv4 ? 4 : 16); + ZXIC_COMM_MEMCPY(ipsec_enc_entry.key.dip, dip, is_ipv4 ? 4 : 16); + ZXIC_COMM_MEMCPY(ipsec_enc_entry.mask.sip, sip_mask, is_ipv4 ? 4 : 16); + ZXIC_COMM_MEMCPY(ipsec_enc_entry.mask.dip, dip_mask, is_ipv4 ? 4 : 16); + + ipsec_enc_entry.entry.sa_id = sa_id; + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_acl_entry_insert(&dev, queue, sdt_no, + &ipsec_enc_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_acl_entry_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x sdt_no: %u index: %u sa_id: %u is_ipv4: %u.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, index, + sa_id, is_ipv4); + + ZXIC_COMM_PRINT("[%s] sip:\n", __FUNCTION__); + dpp_data_print(sip, is_ipv4 ? 4 : 16); + + ZXIC_COMM_PRINT("[%s] sip_mask:\n", __FUNCTION__); + dpp_data_print(sip_mask, is_ipv4 ? 4 : 16); + + ZXIC_COMM_PRINT("[%s] dip:\n", __FUNCTION__); + dpp_data_print(dip, is_ipv4 ? 4 : 16); + + ZXIC_COMM_PRINT("[%s] dip_mask:\n", __FUNCTION__); + dpp_data_print(dip_mask, is_ipv4 ? 4 : 16); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_ipsec_enc_entry_add); + +ZXIC_UINT32 dpp_ipsec_enc_entry_del(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 index) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_IPSEC_ENC_TABLE; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_IPSEC_ENC_T ipsec_enc_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_MEMSET(&ipsec_enc_entry, 0, sizeof(ZXDH_IPSEC_ENC_T)); + ZXIC_COMM_MEMSET(&ipsec_enc_entry.mask, 0xFF, + sizeof(ZXDH_IPSEC_ENC_MASK)); + + ipsec_enc_entry.index = index; + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_acl_entry_insert(&dev, queue, sdt_no, + &ipsec_enc_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_acl_entry_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x sdt_no: %u index: %u.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, + index); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_ipsec_enc_entry_del); diff --git a/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_lag.c b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_lag.c new file mode 100644 index 000000000000..badeb27f06cb --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_lag.c @@ -0,0 +1,371 @@ +#include "dpp_drv_init.h" +#include "dpp_drv_acl.h" +#include "dpp_drv_hash.h" +#include "dpp_drv_eram.h" +#include "dpp_drv_sdt.h" +#include "dpp_dev.h" +#include "dpp_dtb.h" +#include "dpp_hash.h" +#include "dpp_dtb_table.h" +#include "dpp_dtb_table_api.h" +#include "dpp_tbl_api.h" +#include "dpp_tbl_comm.h" +#include "dpp_tbl_port.h" +#include "dpp_tbl_lag.h" + +ZXIC_UINT32 dpp_lag_group_create(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 lag_id) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_LAG_TABLE; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_LAG_T lag_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sdt_no: %u lag_id: %u start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, lag_id); + + ZXIC_COMM_MEMSET(&lag_entry, 0, sizeof(ZXDH_LAG_T)); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + lag_entry.hit_flag = 1; + lag_entry.bond_mode = LAG_LACP_MODE; + + rc = dpp_apt_dtb_eram_insert(&dev, queue, sdt_no, lag_id, &lag_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x sdt_no: %u lag_id: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, lag_id); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_lag_group_create); + +ZXIC_UINT32 dpp_lag_group_delete(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 lag_id) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_LAG_TABLE; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sdt_no: %u lag_id: %u start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, lag_id); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_eram_clear(&dev, queue, sdt_no, lag_id); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_clear", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x sdt_no: %u lag_id: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, lag_id); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_lag_group_delete); + +ZXIC_UINT32 dpp_lag_mode_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 lag_id, + ZXIC_UINT8 mode) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_LAG_TABLE; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_LAG_T lag_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_INDEX(mode, LAG_STANDBY_MODE, LAG_LACP_MODE); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sdt_no: %u lag_id: %u mode: %u start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, lag_id, + mode); + + ZXIC_COMM_MEMSET(&lag_entry, 0, sizeof(ZXDH_LAG_T)); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_eram_get(&dev, queue, sdt_no, lag_id, &lag_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_get", + DEV_PCIE_LOCK(&dev)); + + lag_entry.hit_flag = 1; + lag_entry.bond_mode = mode; + + rc = dpp_apt_dtb_eram_insert(&dev, queue, sdt_no, lag_id, &lag_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x sdt_no: %u lag_id: %u mode: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, lag_id, + mode); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_lag_mode_set); + +ZXIC_UINT32 dpp_lag_group_hash_factor_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 lag_id, ZXIC_UINT8 factor) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_LAG_TABLE; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_LAG_T lag_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sdt_no: %u lag_id: %u factor: %u start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, lag_id, + factor); + + ZXIC_COMM_MEMSET(&lag_entry, 0, sizeof(ZXDH_LAG_T)); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_eram_get(&dev, queue, sdt_no, lag_id, &lag_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_get", + DEV_PCIE_LOCK(&dev)); + + lag_entry.hit_flag = 1; + lag_entry.hash_factor = factor; + + rc = dpp_apt_dtb_eram_insert(&dev, queue, sdt_no, lag_id, &lag_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x sdt_no: %u lag_id: %u factor: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, lag_id, + factor); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_lag_group_hash_factor_set); + +ZXIC_UINT32 dpp_lag_group_member_add(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 lag_id, + ZXIC_UINT8 uplink_phy_port_id) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_LAG_TABLE; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_LAG_T lag_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sdt_no: %u lag_id: %u uplink_phy_port_id: %u start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, lag_id, + uplink_phy_port_id); + + ZXIC_COMM_MEMSET(&lag_entry, 0, sizeof(ZXDH_LAG_T)); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_eram_get(&dev, queue, sdt_no, lag_id, &lag_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_get", + DEV_PCIE_LOCK(&dev)); + + lag_entry.hit_flag = 1; + + if ((lag_entry.member_bitmap & (1 << (15 - uplink_phy_port_id))) == 0) { + lag_entry.member_bitmap |= (1 << (15 - uplink_phy_port_id)); + lag_entry.member_num++; + } + + rc = dpp_apt_dtb_eram_insert(&dev, queue, sdt_no, lag_id, &lag_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x sdt_no: %u lag_id: %u uplink_phy_port_id: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, lag_id, + uplink_phy_port_id); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_lag_group_member_add); + +ZXIC_UINT32 dpp_lag_group_member_del(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 lag_id, + ZXIC_UINT8 uplink_phy_port_id) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_LAG_TABLE; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_LAG_T lag_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sdt_no: %u lag_id: %u uplink_phy_port_id: %u start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, lag_id, + uplink_phy_port_id); + + ZXIC_COMM_MEMSET(&lag_entry, 0, sizeof(ZXDH_LAG_T)); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_eram_get(&dev, queue, sdt_no, lag_id, &lag_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_get", + DEV_PCIE_LOCK(&dev)); + + lag_entry.hit_flag = 1; + + if ((lag_entry.member_bitmap & (1 << (15 - uplink_phy_port_id))) != 0) { + lag_entry.member_bitmap &= ~(1 << (15 - uplink_phy_port_id)); + ZXIC_COMM_CHECK_INDEX_LOWER_UNLOCK(lag_entry.member_num, 1, + DEV_PCIE_LOCK(&dev)); + lag_entry.member_num--; + } + + rc = dpp_apt_dtb_eram_insert(&dev, queue, sdt_no, lag_id, &lag_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x sdt_no: %u lag_id: %u uplink_phy_port_id: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, lag_id, + uplink_phy_port_id); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_lag_group_member_del); + +ZXIC_UINT32 dpp_lag_hit_flag_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 lag_id, + ZXIC_UINT8 *hit_flag) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_LAG_TABLE; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_LAG_T lag_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(hit_flag); + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x sdt_no: %u lag_id: %u start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, lag_id); + + ZXIC_COMM_MEMSET_S(&lag_entry, sizeof(ZXDH_LAG_T), 0, + sizeof(ZXDH_LAG_T)); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_eram_get(&dev, queue, sdt_no, lag_id, &lag_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_get", + DEV_PCIE_LOCK(&dev)); + + *hit_flag = (ZXIC_UINT8)lag_entry.hit_flag; + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x sdt_no: %u lag_id: %u hit_flag: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, lag_id, + *hit_flag); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_lag_hit_flag_get); diff --git a/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_mac.c b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_mac.c new file mode 100644 index 000000000000..059efd52fbab --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_mac.c @@ -0,0 +1,809 @@ +#include "dpp_drv_init.h" +#include "dpp_drv_acl.h" +#include "dpp_drv_hash.h" +#include "dpp_drv_eram.h" +#include "dpp_drv_sdt.h" +#include "dpp_dev.h" +#include "dpp_dtb.h" +#include "dpp_hash.h" +#include "dpp_dtb_table.h" +#include "dpp_dtb_table_api.h" +#include "dpp_tbl_comm.h" +#include "dpp_tbl_mac.h" +#include "dpp_tbl_api.h" +#include "dpp_apt_se.h" +#include "dpp_sdt.h" + +ZXIC_VOID dpp_l2_entry_print(ZXDH_L2_ENTRY_T *l2_entry) +{ + ZXIC_COMM_TRACE_NOTICE("key--vlan_id: 0x%04x\n", + l2_entry->key.sriov_vlan_id); + ZXIC_COMM_TRACE_NOTICE("key--vlan_tpid: 0x%04x\n", + l2_entry->key.sriov_vlan_tpid); + ZXIC_COMM_TRACE_NOTICE( + "key--mac: %02x:%02x:%02x:%02x:%02x:%02x.\n", + l2_entry->key.dmac_addr[0], l2_entry->key.dmac_addr[1], + l2_entry->key.dmac_addr[2], l2_entry->key.dmac_addr[3], + l2_entry->key.dmac_addr[4], l2_entry->key.dmac_addr[5]); + + ZXIC_COMM_TRACE_NOTICE("entry--vqm_vfid: 0x%02x\n", + l2_entry->entry.vqm_vfid); + ZXIC_COMM_TRACE_NOTICE("entry--rsv: 0x%02x\n", l2_entry->entry.rsv); + ZXIC_COMM_TRACE_NOTICE("entry--hit_flag: 0x%02x\n", + l2_entry->entry.hit_flag); +} + +ZXIC_UINT32 dpp_add_mac(DPP_PF_INFO_T *pf_info, ZXIC_CONST ZXIC_VOID *mac, + ZXIC_UINT16 sriov_vlan_tpid, ZXIC_UINT16 sriov_vlan_id) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = 0; + ZXIC_UINT32 hash_index = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_L2_ENTRY_T l2_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(mac); + + ZXIC_COMM_MEMSET(&l2_entry, 0, sizeof(ZXDH_L2_ENTRY_T)); + ZXIC_COMM_MEMCPY(l2_entry.key.dmac_addr, mac, 6); + l2_entry.key.sriov_vlan_tpid = sriov_vlan_tpid; + l2_entry.key.sriov_vlan_id = sriov_vlan_id; + + l2_entry.entry.vqm_vfid = VQM_VFID(pf_info->vport); + l2_entry.entry.hit_flag = 0x00; + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sriov_vlan_tpid: 0x%04x sriov_vlan_id: 0x%04x mac: %02x:%02x:%02x:%02x:%02x:%02x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, + l2_entry.key.sriov_vlan_tpid, l2_entry.key.sriov_vlan_id, + l2_entry.key.dmac_addr[0], l2_entry.key.dmac_addr[1], + l2_entry.key.dmac_addr[2], l2_entry.key.dmac_addr[3], + l2_entry.key.dmac_addr[4], l2_entry.key.dmac_addr[5]); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_hash_index_get(pf_info, &hash_index); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_hash_index_get"); + + sdt_no = ZXDH_SDT_L2_ENTRY_TABLE_PHYPORT0 + hash_index; + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_hash_insert(&dev, queue, sdt_no, &l2_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_hash_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x sriov_vlan_tpid: 0x%04x sriov_vlan_id: 0x%04x mac: %02x:%02x:%02x:%02x:%02x:%02x success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, + l2_entry.key.sriov_vlan_tpid, l2_entry.key.sriov_vlan_id, + l2_entry.key.dmac_addr[0], l2_entry.key.dmac_addr[1], + l2_entry.key.dmac_addr[2], l2_entry.key.dmac_addr[3], + l2_entry.key.dmac_addr[4], l2_entry.key.dmac_addr[5]); + return DPP_OK; +} +EXPORT_SYMBOL(dpp_add_mac); + +ZXIC_UINT32 dpp_del_mac(DPP_PF_INFO_T *pf_info, ZXIC_CONST ZXIC_VOID *mac, + ZXIC_UINT16 sriov_vlan_tpid, ZXIC_UINT16 sriov_vlan_id) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = 0; + ZXIC_UINT32 hash_index = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_L2_ENTRY_T l2_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(mac); + + ZXIC_COMM_MEMSET(&l2_entry, 0, sizeof(ZXDH_L2_ENTRY_T)); + ZXIC_COMM_MEMCPY(l2_entry.key.dmac_addr, mac, 6); + l2_entry.key.sriov_vlan_tpid = sriov_vlan_tpid; + l2_entry.key.sriov_vlan_id = sriov_vlan_id; + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sriov_vlan_tpid: 0x%04x sriov_vlan_id: 0x%04x mac: %02x:%02x:%02x:%02x:%02x:%02x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, + l2_entry.key.sriov_vlan_tpid, l2_entry.key.sriov_vlan_id, + l2_entry.key.dmac_addr[0], l2_entry.key.dmac_addr[1], + l2_entry.key.dmac_addr[2], l2_entry.key.dmac_addr[3], + l2_entry.key.dmac_addr[4], l2_entry.key.dmac_addr[5]); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_hash_index_get(pf_info, &hash_index); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_hash_index_get"); + + sdt_no = ZXDH_SDT_L2_ENTRY_TABLE_PHYPORT0 + hash_index; + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_hash_delete(&dev, queue, sdt_no, &l2_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_hash_delete", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x sriov_vlan_tpid: 0x%04x sriov_vlan_id: 0x%04x mac: %02x:%02x:%02x:%02x:%02x:%02x success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, + l2_entry.key.sriov_vlan_tpid, l2_entry.key.sriov_vlan_id, + l2_entry.key.dmac_addr[0], l2_entry.key.dmac_addr[1], + l2_entry.key.dmac_addr[2], l2_entry.key.dmac_addr[3], + l2_entry.key.dmac_addr[4], l2_entry.key.dmac_addr[5]); + return DPP_OK; +} +EXPORT_SYMBOL(dpp_del_mac); + +ZXIC_UINT32 dpp_batch_add_unicast_mac(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 mac_num, + ZXIC_CONST ZXIC_VOID *l2key) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = 0; + ZXIC_UINT32 hash_index = 0; + ZXIC_UINT32 entry_index = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_L2_ENTRY_T *p_multi_l2_entry = NULL; + ZXDH_L2_ENTRY_T *p_one_l2_entry = NULL; + ZXDH_L2_FWD_KEY *p_mac_key = NULL; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(l2key); + ZXIC_COMM_CHECK_INDEX_LOWER(mac_num, 1); + + p_multi_l2_entry = (ZXDH_L2_ENTRY_T *)ZXIC_COMM_MALLOC( + mac_num * sizeof(ZXDH_L2_ENTRY_T)); + ZXIC_COMM_CHECK_POINT(p_multi_l2_entry); + ZXIC_COMM_MEMSET_S(p_multi_l2_entry, mac_num * sizeof(ZXDH_L2_ENTRY_T), + 0, mac_num * sizeof(ZXDH_L2_ENTRY_T)); + + for (entry_index = 0; entry_index < mac_num; entry_index++) { + p_one_l2_entry = p_multi_l2_entry + entry_index; + p_mac_key = (ZXDH_L2_FWD_KEY *)l2key + entry_index; + p_one_l2_entry->entry.vqm_vfid = VQM_VFID(pf_info->vport); + p_one_l2_entry->entry.hit_flag = 0x00; + ZXIC_COMM_MEMCPY_S((ZXIC_UINT8 *)&(p_one_l2_entry->key), + sizeof(ZXDH_L2_FWD_KEY), p_mac_key, + sizeof(ZXDH_L2_FWD_KEY)); + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x tpid: 0x%04x vlan_id: 0x%04x mac: %02x:%02x:%02x:%02x:%02x:%02x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, + p_one_l2_entry->key.sriov_vlan_tpid, + p_one_l2_entry->key.sriov_vlan_id, + p_one_l2_entry->key.dmac_addr[0], + p_one_l2_entry->key.dmac_addr[1], + p_one_l2_entry->key.dmac_addr[2], + p_one_l2_entry->key.dmac_addr[3], + p_one_l2_entry->key.dmac_addr[4], + p_one_l2_entry->key.dmac_addr[5]); + } + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC_MEMORY_FREE(rc, "dpp_dev_get", p_multi_l2_entry); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC_MEMORY_FREE(rc, "dpp_dtb_queue_id_get", + p_multi_l2_entry); + + rc = dpp_vport_hash_index_get(pf_info, &hash_index); + ZXIC_COMM_CHECK_RC_MEMORY_FREE(rc, "dpp_vport_hash_index_get", + p_multi_l2_entry); + + sdt_no = ZXDH_SDT_L2_ENTRY_TABLE_PHYPORT0 + hash_index; + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC_MEMORY_FREE(rc, "dpp_vport_table_lock", + p_multi_l2_entry); + ZXIC_COMM_CHECK_POINT_MEMORY_FREE(DEV_PCIE_LOCK(&dev), + p_multi_l2_entry); + + rc = dpp_apt_dtb_multi_hash_insert(&dev, queue, sdt_no, mac_num, + sizeof(ZXDH_L2_ENTRY_T), + p_multi_l2_entry); + ZXIC_COMM_FREE(p_multi_l2_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_hash_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_batch_add_unicast_mac); + +ZXIC_UINT32 dpp_batch_del_unicast_mac(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 mac_num, + ZXIC_CONST ZXIC_VOID *l2key) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = 0; + ZXIC_UINT32 hash_index = 0; + ZXIC_UINT32 entry_index = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_L2_ENTRY_T *p_multi_l2_entry = NULL; + ZXDH_L2_ENTRY_T *p_one_l2_entry = NULL; + ZXDH_L2_FWD_KEY *p_mac_key = NULL; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(l2key); + ZXIC_COMM_CHECK_INDEX_LOWER(mac_num, 1); + + p_multi_l2_entry = (ZXDH_L2_ENTRY_T *)ZXIC_COMM_MALLOC( + mac_num * sizeof(ZXDH_L2_ENTRY_T)); + ZXIC_COMM_CHECK_POINT(p_multi_l2_entry); + ZXIC_COMM_MEMSET_S(p_multi_l2_entry, mac_num * sizeof(ZXDH_L2_ENTRY_T), + 0, mac_num * sizeof(ZXDH_L2_ENTRY_T)); + + for (entry_index = 0; entry_index < mac_num; entry_index++) { + p_one_l2_entry = p_multi_l2_entry + entry_index; + p_mac_key = (ZXDH_L2_FWD_KEY *)l2key + entry_index; + ZXIC_COMM_MEMCPY_S((ZXIC_UINT8 *)&(p_one_l2_entry->key), + sizeof(ZXDH_L2_FWD_KEY), p_mac_key, + sizeof(ZXDH_L2_FWD_KEY)); + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x tpid: 0x%04x vlan_id: 0x%04x mac: %02x:%02x:%02x:%02x:%02x:%02x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, + p_one_l2_entry->key.sriov_vlan_tpid, + p_one_l2_entry->key.sriov_vlan_id, + p_one_l2_entry->key.dmac_addr[0], + p_one_l2_entry->key.dmac_addr[1], + p_one_l2_entry->key.dmac_addr[2], + p_one_l2_entry->key.dmac_addr[3], + p_one_l2_entry->key.dmac_addr[4], + p_one_l2_entry->key.dmac_addr[5]); + } + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC_MEMORY_FREE(rc, "dpp_dev_get", p_multi_l2_entry); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC_MEMORY_FREE(rc, "dpp_dtb_queue_id_get", + p_multi_l2_entry); + + rc = dpp_vport_hash_index_get(pf_info, &hash_index); + ZXIC_COMM_CHECK_RC_MEMORY_FREE(rc, "dpp_vport_hash_index_get", + p_multi_l2_entry); + + sdt_no = ZXDH_SDT_L2_ENTRY_TABLE_PHYPORT0 + hash_index; + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC_MEMORY_FREE(rc, "dpp_vport_table_lock", + p_multi_l2_entry); + ZXIC_COMM_CHECK_POINT_MEMORY_FREE(DEV_PCIE_LOCK(&dev), + p_multi_l2_entry); + + rc = dpp_apt_dtb_multi_hash_delete(&dev, queue, sdt_no, mac_num, + sizeof(ZXDH_L2_ENTRY_T), + p_multi_l2_entry); + ZXIC_COMM_FREE(p_multi_l2_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_multi_hash_delete", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_batch_del_unicast_mac); + +ZXIC_UINT32 dpp_unicast_mac_search(DPP_PF_INFO_T *pf_info, + ZXIC_CONST ZXIC_VOID *mac, + ZXIC_UINT16 sriov_vlan_tpid, + ZXIC_UINT16 sriov_vlan_id, + ZXIC_UINT16 *current_vport) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT8 key_valid = 1; + ZXIC_UINT32 hash_index = 0; + ZXIC_UINT32 sdt_no = 0; + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 srch_mode = HASH_SRH_MODE_HDW; + ZXIC_UINT32 rc = DPP_OK; + + DPP_HASH_ENTRY entry = { 0 }; + DPP_SDTTBL_HASH_T sdt_hash_info = { 0 }; /*SDT内容*/ + ZXIC_UINT8 actKey[HASH_KEY_MAX] = { 0 }; + ZXIC_UINT8 actRst[HASH_RST_MAX] = { 0 }; + ZXDH_L2_ENTRY_T l2_entry = { 0 }; + DPP_DTB_HASH_ENTRY_INFO_T p_dtb_hash_entry = { 0 }; + + SE_APT_CALLBACK_T *pAptCallback = NULL; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(mac); + ZXIC_COMM_CHECK_POINT(current_vport); + + ZXIC_COMM_MEMSET(&l2_entry, 0, sizeof(ZXDH_L2_ENTRY_T)); + ZXIC_COMM_MEMCPY(l2_entry.key.dmac_addr, mac, 6); + l2_entry.key.sriov_vlan_tpid = sriov_vlan_tpid; + l2_entry.key.sriov_vlan_id = sriov_vlan_id; + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x vlan_tpid: 0x%04x vlan_id: 0x%04x.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, + l2_entry.key.sriov_vlan_tpid, l2_entry.key.sriov_vlan_id); + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x mac: %02x:%02x:%02x:%02x:%02x:%02x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, + l2_entry.key.dmac_addr[0], l2_entry.key.dmac_addr[1], + l2_entry.key.dmac_addr[2], l2_entry.key.dmac_addr[3], + l2_entry.key.dmac_addr[4], l2_entry.key.dmac_addr[5]); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_vport_hash_index_get(pf_info, &hash_index); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_hash_index_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + sdt_no = ZXDH_SDT_L2_ENTRY_TABLE_PHYPORT0 + hash_index; + + //从sdt_no中获取SDT配置 + rc = dpp_soft_sdt_tbl_get(&dev, sdt_no, &sdt_hash_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_soft_sdt_tbl_get"); + + pAptCallback = dpp_apt_get_func(&dev, sdt_no); + ZXIC_COMM_CHECK_POINT(pAptCallback); + + entry.p_key = actKey; + entry.p_rst = actRst; + entry.p_key[0] = DPP_GET_HASH_KEY_CTRL(key_valid, + sdt_hash_info.hash_table_width, + sdt_hash_info.hash_table_id); + + //结构体转码流 + rc = pAptCallback->se_func_info.hashFunc.hash_set_func(&l2_entry, + &entry); + ZXIC_COMM_CHECK_RC(rc, "hash_set_func"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + p_dtb_hash_entry.p_actu_key = &entry.p_key[1]; + p_dtb_hash_entry.p_rst = entry.p_rst; + rc = dpp_dtb_hash_data_get(&dev, queue, sdt_no, &p_dtb_hash_entry, + srch_mode); + if (rc != DPP_OK) { + if (rc == DPP_HASH_RC_SRH_FAIL) { + ZXIC_COMM_PRINT("There is no such hash!\n"); + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + return DPP_HASH_RC_SRH_FAIL; + } + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + return DPP_ERR; + } + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + //码流转结构体 + rc = pAptCallback->se_func_info.hashFunc.hash_get_func(&l2_entry, + &entry); + ZXIC_COMM_CHECK_RC(rc, "hash_get_func"); + + rc = dpp_vport_get_by_vqm_vfid(OWNER_PF_VPORT(pf_info->vport), + l2_entry.entry.vqm_vfid, current_vport); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_get_by_vqm_vfid"); + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x sdt_no: %u vqm_vfid: %u vport: 0x%04x success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, + l2_entry.entry.vqm_vfid, *current_vport); + return DPP_OK; +} +EXPORT_SYMBOL(dpp_unicast_mac_search); + +ZXIC_UINT32 dpp_unicast_mac_dump(DPP_PF_INFO_T *pf_info, + MAC_VPORT_INFO *p_mac_arr, + ZXIC_UINT32 *p_mac_num) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 hash_index = 0; + ZXIC_UINT32 sdt_no = 0; + ZXIC_UINT32 current_vqm_vfid = 0; + ZXIC_UINT16 current_vport = 0; + + ZXIC_UINT32 max_item_num = DTB_DUMP_UNICAST_MAC_DUMP_NUM; + ZXIC_UINT32 entryNum = 0; + ZXDH_L2_ENTRY_T *pL2DataArr = NULL; + ZXDH_L2_ENTRY_T *p_l2_temp_entry = NULL; + MAC_VPORT_INFO *p_temp_mac_info = NULL; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(p_mac_num); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_hash_index_get(pf_info, &hash_index); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_hash_index_get"); + sdt_no = ZXDH_SDT_L2_ENTRY_TABLE_PHYPORT0 + hash_index; + + rc = dpp_hash_max_item_num_get(&dev, sdt_no, &max_item_num); + ZXIC_COMM_CHECK_RC(rc, "dpp_hash_max_item_num_get"); + + pL2DataArr = (ZXDH_L2_ENTRY_T *)ZXIC_COMM_VMALLOC( + max_item_num * sizeof(ZXDH_L2_ENTRY_T)); + ZXIC_COMM_CHECK_POINT_NO_ASSERT(pL2DataArr); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE(rc, "dpp_vport_table_lock", pL2DataArr); + ZXIC_COMM_CHECK_POINT_MEMORY_VFREE(DEV_PCIE_LOCK(&dev), pL2DataArr); + + rc = dpp_apt_dtb_hash_table_unicast_mac_dump(&dev, queue, sdt_no, + pL2DataArr, &entryNum); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE_UNLOCK_NO_ASSERT( + rc, "dpp_apt_dtb_hash_table_unicast_mac_dump", pL2DataArr, + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE(rc, "dpp_vport_table_unlock", + pL2DataArr); + + ZXIC_COMM_TRACE_NOTICE("unicast mac dump num:0x%x\n", entryNum); + + if (p_mac_arr != ZXIC_NULL) { + for (index = 0; index < entryNum; index++) { + p_l2_temp_entry = pL2DataArr + index; + p_temp_mac_info = p_mac_arr + index; + + ZXIC_COMM_TRACE_NOTICE("l2 entry index:0x%x\n", index); + dpp_l2_entry_print(p_l2_temp_entry); + + //从l2中获取mac地址给mac_info,再转换vport信息 + ZXIC_COMM_MEMCPY(p_temp_mac_info->addr, + p_l2_temp_entry->key.dmac_addr, 6); + current_vqm_vfid = p_l2_temp_entry->entry.vqm_vfid; + + p_temp_mac_info->sriov_vlan_tpid = + p_l2_temp_entry->key.sriov_vlan_tpid; + p_temp_mac_info->sriov_vlan_id = + p_l2_temp_entry->key.sriov_vlan_id; + + rc = dpp_vport_get_by_vqm_vfid( + OWNER_PF_VPORT(pf_info->vport), + current_vqm_vfid, ¤t_vport); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE_UNLOCK_NO_ASSERT( + rc, "dpp_vport_get_by_vqm_vfid", pL2DataArr, + DEV_PCIE_LOCK(&dev)); + p_temp_mac_info->vport = current_vport; + ZXIC_COMM_TRACE_NOTICE( + "current_vqm_vfid:0x%x --> current_vport:0x%x\n", + current_vqm_vfid, current_vport); + } + } + + *p_mac_num = entryNum; + + ZXIC_COMM_VFREE(pL2DataArr); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_unicast_mac_dump); + +ZXIC_UINT32 dpp_unicast_mac_transfer(DPP_PF_INFO_T *pf_info, + DPP_PF_INFO_T *new_pf_info) +{ + DPP_DEV_T dev = { 0 }; + DPP_DEV_T new_dev = { 0 }; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 hash_index = 0; + ZXIC_UINT32 sdt_no = 0; + ZXIC_UINT32 current_vqm_vfid = 0; + ZXIC_UINT16 current_vport = 0; + + ZXIC_UINT32 max_item_num = DTB_DUMP_UNICAST_MAC_DUMP_NUM; + ZXIC_UINT32 entryNum = 0; + ZXIC_UINT32 transfer_num = 0; + ZXDH_L2_ENTRY_T *pL2DataArr = NULL; + ZXDH_L2_ENTRY_T *pL2DataArrNew = NULL; + ZXDH_L2_ENTRY_T *p_l2_temp_entry = NULL; + ZXDH_L2_ENTRY_T *p_l2_entry_new = NULL; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(new_pf_info); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dev_get(new_pf_info, &new_dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + if ((0 != ZXIC_COMM_MEMCMP(&dev, &new_dev, sizeof(DPP_DEV_T))) || + IS_PF(pf_info->vport) || IS_PF(new_pf_info->vport)) { + ZXIC_COMM_TRACE_ERROR( + "current slot[0x%x] vport[0x%x] & new slot[0x%x] vport[0x%x] belong to different pf or slot\n", + dev.pcie_channel.slot, dev.pcie_channel.vport, + new_dev.pcie_channel.slot, new_dev.pcie_channel.vport); + ZXIC_COMM_TRACE_ERROR( + "current vport is %s, transfre vport is %s\n", + IS_PF(pf_info->vport) ? "PF" : "VF", + IS_PF(new_pf_info->vport) ? "PF" : "VF"); + return DPP_ERR; + } + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_hash_index_get(pf_info, &hash_index); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_hash_index_get"); + sdt_no = ZXDH_SDT_L2_ENTRY_TABLE_PHYPORT0 + hash_index; + + rc = dpp_hash_max_item_num_get(&dev, sdt_no, &max_item_num); + ZXIC_COMM_CHECK_RC(rc, "dpp_hash_max_item_num_get"); + + pL2DataArr = (ZXDH_L2_ENTRY_T *)ZXIC_COMM_VMALLOC( + max_item_num * sizeof(ZXDH_L2_ENTRY_T)); + ZXIC_COMM_CHECK_POINT_NO_ASSERT(pL2DataArr); + + pL2DataArrNew = (ZXDH_L2_ENTRY_T *)ZXIC_COMM_VMALLOC( + max_item_num * sizeof(ZXDH_L2_ENTRY_T)); + ZXIC_COMM_CHECK_POINT_MEMORY_VFREE_NO_ASSERT(pL2DataArrNew, pL2DataArr); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE2PTR_NO_ASSERT( + rc, "dpp_vport_table_lock", pL2DataArrNew, pL2DataArr); + ZXIC_COMM_CHECK_POINT_MEMORY_VFREE2PTR_NO_ASSERT( + DEV_PCIE_LOCK(&dev), pL2DataArrNew, pL2DataArr); + + rc = dpp_apt_dtb_hash_table_unicast_mac_dump(&dev, queue, sdt_no, + pL2DataArr, &entryNum); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE2PTR_UNLOCK_NO_ASSERT( + rc, "dpp_apt_dtb_hash_table_unicast_mac_dump", pL2DataArrNew, + pL2DataArr, DEV_PCIE_LOCK(&dev)); + + for (index = 0; index < entryNum; index++) { + p_l2_temp_entry = pL2DataArr + index; + current_vqm_vfid = p_l2_temp_entry->entry.vqm_vfid; + rc = dpp_vport_get_by_vqm_vfid(OWNER_PF_VPORT(pf_info->vport), + current_vqm_vfid, + ¤t_vport); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE2PTR_UNLOCK_NO_ASSERT( + rc, "dpp_vport_get_by_vqm_vfid", pL2DataArrNew, + pL2DataArr, DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_TRACE_NOTICE( + "[%u] slot[%u],vport[0x%x] cur_vqm_vfid[%u] cur_vqm_vport[0x%x]\n", + index, pf_info->slot, pf_info->vport, current_vqm_vfid, + current_vport); + if (pf_info->vport == current_vport) { + p_l2_entry_new = pL2DataArrNew + transfer_num; + ZXIC_COMM_MEMCPY_S(p_l2_entry_new, + sizeof(ZXDH_L2_ENTRY_T), + p_l2_temp_entry, + sizeof(ZXDH_L2_ENTRY_T)); + p_l2_entry_new->entry.vqm_vfid = + VQM_VFID(new_pf_info->vport); + ZXIC_COMM_TRACE_NOTICE( + "[%u]:new_vport=0x%x new_vqm_vfid=0x%x\n", + transfer_num, new_pf_info->vport, + p_l2_entry_new->entry.vqm_vfid); + transfer_num++; + } + } + + if (entryNum == 0) { + ZXIC_COMM_VFREE(pL2DataArrNew); + ZXIC_COMM_VFREE(pL2DataArr); + ZXIC_COMM_PRINT("[%s] transfer num is 0!\n", __FUNCTION__); + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + return DPP_OK; + } + + rc = dpp_apt_dtb_multi_hash_insert(&dev, queue, sdt_no, transfer_num, + sizeof(ZXDH_L2_ENTRY_T), + pL2DataArrNew); + ZXIC_COMM_VFREE(pL2DataArrNew); + ZXIC_COMM_VFREE(pL2DataArr); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_multi_hash_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_unicast_mac_transfer); + +ZXIC_UINT32 dpp_unicast_all_mac_delete(DPP_PF_INFO_T *pf_info) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = 0; + ZXIC_UINT32 hash_index = 0; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_hash_index_get(pf_info, &hash_index); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_hash_index_get"); + + sdt_no = ZXDH_SDT_L2_ENTRY_TABLE_PHYPORT0 + hash_index; + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_dtb_hash_offline_delete(&dev, queue, sdt_no); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_hash_offline_delete", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x success.\n", __FUNCTION__, + pf_info->slot, pf_info->vport); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_unicast_all_mac_delete); + +ZXIC_UINT32 dpp_unicast_all_mac_online_delete(DPP_PF_INFO_T *pf_info) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = 0; + ZXIC_UINT32 hash_index = 0; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_hash_index_get(pf_info, &hash_index); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_hash_index_get"); + + sdt_no = ZXDH_SDT_L2_ENTRY_TABLE_PHYPORT0 + hash_index; + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_dtb_hash_online_delete(&dev, queue, sdt_no); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_hash_online_delete", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x success.\n", __FUNCTION__, + pf_info->slot, pf_info->vport); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_unicast_all_mac_online_delete); + +ZXIC_UINT32 dpp_unicast_mac_max_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *max_num) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 sdt_no = 0; + ZXIC_UINT32 hash_index = 0; + + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(max_num); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_vport_hash_index_get(pf_info, &hash_index); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_hash_index_get"); + + sdt_no = ZXDH_SDT_L2_ENTRY_TABLE_PHYPORT0 + hash_index; + + rc = dpp_hash_max_item_num_get(&dev, sdt_no, max_num); + ZXIC_COMM_CHECK_RC(rc, "dpp_hash_max_item_num_get"); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x max_num: 0x%x get succ.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, *max_num); + return DPP_OK; +} +EXPORT_SYMBOL(dpp_unicast_mac_max_get); + +ZXIC_UINT32 dpp_unicast_all_mac_soft_delete(DPP_PF_INFO_T *pf_info) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 sdt_no = 0; + ZXIC_UINT32 hash_index = 0; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_soft_hash_index_get(&dev, &hash_index); + ZXIC_COMM_CHECK_RC(rc, "dpp_soft_hash_index_get"); + sdt_no = ZXDH_SDT_L2_ENTRY_TABLE_PHYPORT0 + hash_index; + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_hash_soft_delete_by_sdt(&dev, sdt_no); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_hash_soft_delete_by_sdt", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x sdt:%u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_unicast_all_mac_soft_delete); \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_mc.c b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_mc.c new file mode 100644 index 000000000000..f065cba1addf --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_mc.c @@ -0,0 +1,1267 @@ +#include "dpp_drv_init.h" +#include "dpp_drv_acl.h" +#include "dpp_drv_hash.h" +#include "dpp_drv_eram.h" +#include "dpp_drv_sdt.h" +#include "dpp_dev.h" +#include "dpp_dtb.h" +#include "dpp_hash.h" +#include "dpp_dtb_table.h" +#include "dpp_dtb_table_api.h" +#include "dpp_tbl_comm.h" +#include "dpp_tbl_mc.h" +#include "dpp_tbl_bc.h" +#include "dpp_tbl_api.h" +#include "dpp_sdt.h" + +ZXIC_VOID dpp_mc_entry_print(ZXDH_MC_T *mc_entry) +{ + ZXIC_COMM_TRACE_NOTICE("key--mc_mac: %02x:%02x:%02x:%02x:%02x:%02x.\n", + mc_entry->key.mc_mac[0], mc_entry->key.mc_mac[1], + mc_entry->key.mc_mac[2], mc_entry->key.mc_mac[3], + mc_entry->key.mc_mac[4], + mc_entry->key.mc_mac[5]); + ZXIC_COMM_TRACE_NOTICE("key--group_id: 0x%02x\n", + mc_entry->key.group_id); + ZXIC_COMM_TRACE_NOTICE("key--rsv: 0x%02x\n", mc_entry->key.rsv); + + ZXIC_COMM_TRACE_NOTICE("entry--mc_bitmap: 0x%016llx\n", + mc_entry->entry.mc_bitmap); + ZXIC_COMM_TRACE_NOTICE("entry--rsv2: 0x%02x\n", mc_entry->entry.rsv2); + ZXIC_COMM_TRACE_NOTICE("entry--rsv1: 0x%02x\n", mc_entry->entry.rsv1); + ZXIC_COMM_TRACE_NOTICE("entry--mc_pf_enable: 0x%02x\n", + mc_entry->entry.mc_pf_enable); + ZXIC_COMM_TRACE_NOTICE("entry--hit_flag: 0x%02x\n", + mc_entry->entry.hit_flag); +} + +ZXIC_UINT32 dpp_vport_mc_info_add(DPP_PF_INFO_T *pf_info, + ZXIC_CONST ZXIC_VOID *mac, + DPP_VPORT_MC_INFO_T *mc_info) +{ + ZXIC_UINT32 index = 0; + ZXIC_UINT32 group_id = 0; + ZXIC_UINT32 first_free_flag = 0; + ZXIC_UINT32 find_flag = 0; + ZXIC_UINT32 mc_index = 0; + ZXIC_UINT32 vfunc_num = 0; + ZXIC_UINT32 rc = DPP_OK; + DPP_VPORT_MC_TABLE_T *mc_table = NULL; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(mac); + ZXIC_COMM_CHECK_POINT(mc_info); + + rc = dpp_vport_mc_table_get(pf_info, &mc_table); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_mc_table_get"); + ZXIC_COMM_CHECK_POINT(mc_table); + + vfunc_num = VFUNC_NUM(pf_info->vport); + ZXIC_COMM_CHECK_INDEX(vfunc_num, 0, + (MC_GROUP_NUM * MC_MEMBER_NUM_IN_GROUP) - 1); + + group_id = vfunc_num / MC_MEMBER_NUM_IN_GROUP; + ZXIC_COMM_CHECK_INDEX(group_id, 0, MC_GROUP_NUM - 1); + + for (index = 0; index < MC_TABLE_SIZE; index++) { + if ((first_free_flag == 0) && + (mc_table->mc_info[index].is_valid == 0)) { + first_free_flag = 1; + mc_index = index; + } + + /*update*/ + if ((mc_table->mc_info[index].is_valid == 1) && + (ZXIC_COMM_MEMCMP(mc_table->mc_info[index].mac, mac, 6) == + 0)) { + find_flag = 1; + mc_index = index; + break; + } + } + + if ((first_free_flag == 0) && (find_flag == 0)) { + return DPP_RC_TBL_IS_FULL; + } + + mc_table->mc_info[mc_index].is_valid = 1; + ZXIC_COMM_MEMCPY(mc_table->mc_info[mc_index].mac, mac, 6); + if (IS_PF(pf_info->vport)) { + mc_table->mc_info[mc_index].mc_pf_enable = 1; + } else { + mc_table->mc_info[mc_index].mc_bitmap[group_id] |= + ((ZXIC_UINT64)(1) + << (MC_MEMBER_NUM_IN_GROUP - 1 - + (vfunc_num % MC_MEMBER_NUM_IN_GROUP))); + } + ZXIC_COMM_MEMCPY_S(mc_info, sizeof(DPP_VPORT_MC_INFO_T), + &mc_table->mc_info[mc_index], + sizeof(DPP_VPORT_MC_INFO_T)); + + ZXIC_COMM_TRACE_NOTICE("[%s] mc_index:%u mc_bitmap:0x%016llx\n", + __FUNCTION__, mc_index, + mc_table->mc_info[mc_index].mc_bitmap[group_id]); + + return DPP_OK; +} +ZXIC_UINT32 dpp_vport_mc_info_del(DPP_PF_INFO_T *pf_info, + ZXIC_CONST ZXIC_VOID *mac) +{ + ZXIC_UINT32 index = 0; + ZXIC_UINT32 group_id = 0; + ZXIC_UINT32 vfunc_num = 0; + ZXIC_UINT32 rc = DPP_OK; + + DPP_VPORT_MC_TABLE_T *mc_table = NULL; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(mac); + + rc = dpp_vport_mc_table_get(pf_info, &mc_table); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_mc_table_get"); + ZXIC_COMM_CHECK_POINT(mc_table); + + vfunc_num = VFUNC_NUM(pf_info->vport); + ZXIC_COMM_CHECK_INDEX(vfunc_num, 0, + (MC_GROUP_NUM * MC_MEMBER_NUM_IN_GROUP) - 1); + + group_id = vfunc_num / MC_MEMBER_NUM_IN_GROUP; + ZXIC_COMM_CHECK_INDEX(group_id, 0, MC_GROUP_NUM - 1); + + for (index = 0; index < MC_TABLE_SIZE; index++) { + if (mc_table->mc_info[index].is_valid == 0) { + continue; + } + if (ZXIC_COMM_MEMCMP(mc_table->mc_info[index].mac, mac, 6) == + 0) { + if (IS_PF(pf_info->vport)) { + mc_table->mc_info[index].mc_pf_enable = 0; + } else { + mc_table->mc_info[index].mc_bitmap[group_id] &= + ~((ZXIC_UINT64)(1) + << (MC_MEMBER_NUM_IN_GROUP - 1 - + (vfunc_num % + MC_MEMBER_NUM_IN_GROUP))); + } + + if ((mc_table->mc_info[index].mc_bitmap[0] == 0) && + (mc_table->mc_info[index].mc_bitmap[1] == 0) && + (mc_table->mc_info[index].mc_bitmap[2] == 0) && + (mc_table->mc_info[index].mc_bitmap[3] == 0) && + (mc_table->mc_info[index].mc_pf_enable == 0)) { + mc_table->mc_info[index].is_valid = 0; + ZXIC_COMM_MEMSET(mc_table->mc_info[index].mac, + 0x00, 6); + } + + break; + } + } + + return DPP_OK; +} + +ZXIC_UINT32 dpp_vport_mc_info_update(ZXIC_UINT32 slot, ZXIC_UINT32 vport, + ZXIC_UINT32 new_vport, + ZXIC_CONST ZXIC_VOID *mac, + DPP_VPORT_MC_INFO_T *mc_info) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 group_id = 0; + ZXIC_UINT32 vfunc_num = 0; + ZXIC_UINT32 new_group_id = 0; + ZXIC_UINT32 new_vfunc_num = 0; + + DPP_PF_INFO_T pf_info = { slot, vport }; + DPP_PF_INFO_T new_pf_info = { slot, new_vport }; + DPP_VPORT_MC_TABLE_T *mc_table = NULL; + + ZXIC_COMM_CHECK_POINT(mac); + ZXIC_COMM_CHECK_POINT(mc_info); + + rc = dpp_vport_mc_table_get(&pf_info, &mc_table); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_mc_table_get"); + ZXIC_COMM_CHECK_POINT(mc_table); + + vfunc_num = VFUNC_NUM(pf_info.vport); + ZXIC_COMM_CHECK_INDEX(vfunc_num, 0, + (MC_GROUP_NUM * MC_MEMBER_NUM_IN_GROUP) - 1); + group_id = vfunc_num / MC_MEMBER_NUM_IN_GROUP; + ZXIC_COMM_CHECK_INDEX(group_id, 0, MC_GROUP_NUM - 1); + + new_vfunc_num = VFUNC_NUM(new_pf_info.vport); + ZXIC_COMM_CHECK_INDEX(new_vfunc_num, 0, + (MC_GROUP_NUM * MC_MEMBER_NUM_IN_GROUP) - 1); + new_group_id = new_vfunc_num / MC_MEMBER_NUM_IN_GROUP; + ZXIC_COMM_CHECK_INDEX(new_group_id, 0, MC_GROUP_NUM - 1); + + ZXIC_COMM_TRACE_NOTICE( + "[%s]vfunc_num:%u,group_id:%u,new_vfunc_num:%u,new_group_id:%u\n", + __FUNCTION__, vfunc_num, group_id, new_vfunc_num, new_group_id); + + for (index = 0; index < MC_TABLE_SIZE; index++) { + if ((mc_table->mc_info[index].is_valid == 1) && + (ZXIC_COMM_MEMCMP(mc_table->mc_info[index].mac, mac, 6) == + 0)) { + if (IS_PF(new_pf_info.vport)) { + mc_table->mc_info[index].mc_pf_enable = 1; + } else { + ZXIC_COMM_TRACE_NOTICE( + "[%u] mc_bitmap=0x%016llx\n", index, + mc_table->mc_info[index] + .mc_bitmap[group_id]); + mc_table->mc_info[index].mc_bitmap[group_id] &= + ~((ZXIC_UINT64)(1) + << (MC_MEMBER_NUM_IN_GROUP - 1 - + (vfunc_num % + MC_MEMBER_NUM_IN_GROUP))); + ZXIC_COMM_TRACE_NOTICE( + "mc_bitmap(del vport)=0x%016llx\n", + mc_table->mc_info[index] + .mc_bitmap[new_group_id]); + mc_table->mc_info[index] + .mc_bitmap[new_group_id] |= + ((ZXIC_UINT64)(1) + << (MC_MEMBER_NUM_IN_GROUP - 1 - + (new_vfunc_num % + MC_MEMBER_NUM_IN_GROUP))); + ZXIC_COMM_TRACE_NOTICE( + "mc_bitmap(add new vport)=0x%016llx\n", + mc_table->mc_info[index] + .mc_bitmap[new_group_id]); + } + ZXIC_COMM_MEMCPY_S(mc_info, sizeof(DPP_VPORT_MC_INFO_T), + &mc_table->mc_info[index], + sizeof(DPP_VPORT_MC_INFO_T)); + + ZXIC_COMM_TRACE_NOTICE("[%s] update success !", + __FUNCTION__); + return DPP_OK; + } + } + + ZXIC_COMM_TRACE_ERROR("[%s] update fail !", __FUNCTION__); + return DPP_ERR; +} + +ZXIC_UINT32 dpp_vport_mc_table_insert(DPP_PF_INFO_T *pf_info, + ZXIC_CONST ZXIC_VOID *mac) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 index = 0; + ZXIC_UINT32 sdt_no = 0; + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 group_id = 0; + ZXIC_UINT32 hash_index = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_MC_T mc_entry = { 0 }; + DPP_VPORT_MC_TABLE_T *mc_table = NULL; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(mac); + + ZXIC_COMM_MEMSET(&mc_entry, 0, sizeof(ZXDH_MC_T)); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_hash_index_get(pf_info, &hash_index); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_hash_index_get"); + + sdt_no = ZXDH_SDT_MC_TABLE_PHYPORT0 + hash_index; + + rc = dpp_vport_mc_table_get(pf_info, &mc_table); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_mc_table_get"); + ZXIC_COMM_CHECK_POINT(mc_table); + + for (index = 0; index < MC_TABLE_SIZE; index++) { + if (mc_table->mc_info[index].is_valid == 0) { + continue; + } + if (ZXIC_COMM_MEMCMP(mc_table->mc_info[index].mac, mac, 6) != + 0) { + continue; + } + for (group_id = 0; group_id < MC_GROUP_NUM; group_id++) { + ZXIC_COMM_MEMCPY(mc_entry.key.mc_mac, mac, 6); + mc_entry.key.group_id = group_id; + mc_entry.entry.hit_flag = 0x00; + mc_entry.entry.mc_pf_enable = + mc_table->mc_info[index].mc_pf_enable; + mc_entry.entry.mc_bitmap = + mc_table->mc_info[index].mc_bitmap[group_id]; + + rc = dpp_apt_dtb_hash_insert(&dev, queue, sdt_no, + &mc_entry); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_dtb_hash_insert"); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot %u vport: 0x%04x sdt_no: %u group_id: %u.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, + sdt_no, group_id); + ZXIC_COMM_TRACE_NOTICE( + "[%s] mac: %02x:%02x:%02x:%02x:%02x:%02x.\n", + __FUNCTION__, mc_entry.key.mc_mac[0], + mc_entry.key.mc_mac[1], mc_entry.key.mc_mac[2], + mc_entry.key.mc_mac[3], mc_entry.key.mc_mac[4], + mc_entry.key.mc_mac[5]); + ZXIC_COMM_TRACE_NOTICE( + "[%s] mc_bitmap: %02x %02x %02x %02x %02x %02x %02x %02x.\n", + __FUNCTION__, + *((ZXIC_UINT8 *)(&mc_entry.entry.mc_bitmap) + + 7), + *((ZXIC_UINT8 *)(&mc_entry.entry.mc_bitmap) + + 6), + *((ZXIC_UINT8 *)(&mc_entry.entry.mc_bitmap) + + 5), + *((ZXIC_UINT8 *)(&mc_entry.entry.mc_bitmap) + + 4), + *((ZXIC_UINT8 *)(&mc_entry.entry.mc_bitmap) + + 3), + *((ZXIC_UINT8 *)(&mc_entry.entry.mc_bitmap) + + 2), + *((ZXIC_UINT8 *)(&mc_entry.entry.mc_bitmap) + + 1), + *((ZXIC_UINT8 *)(&mc_entry.entry.mc_bitmap) + + 0)); + } + return DPP_OK; + } + + for (group_id = 0; group_id < MC_GROUP_NUM; group_id++) { + ZXIC_COMM_MEMCPY(mc_entry.key.mc_mac, mac, 6); + mc_entry.key.group_id = group_id; + + rc = dpp_apt_dtb_hash_delete(&dev, queue, sdt_no, &mc_entry); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_dtb_hash_insert"); + + ZXIC_COMM_TRACE_NOTICE("[%s] delete mc table.\n", __FUNCTION__); + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot %u vport: 0x%04x sdt_no: %u group_id: %u.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, + group_id); + ZXIC_COMM_TRACE_NOTICE( + "[%s] mac: %02x:%02x:%02x:%02x:%02x:%02x.\n", + __FUNCTION__, mc_entry.key.mc_mac[0], + mc_entry.key.mc_mac[1], mc_entry.key.mc_mac[2], + mc_entry.key.mc_mac[3], mc_entry.key.mc_mac[4], + mc_entry.key.mc_mac[5]); + } + + return DPP_OK; +} + +ZXIC_UINT32 dpp_multi_mac_add_member(DPP_PF_INFO_T *pf_info, + ZXIC_CONST ZXIC_VOID *mac) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = 0; + ZXIC_UINT32 hash_index = 0; + ZXIC_UINT32 rc = DPP_OK; + DPP_VPORT_MC_INFO_T mc_info = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_hash_index_get(pf_info, &hash_index); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_hash_index_get"); + + sdt_no = ZXDH_SDT_MC_TABLE_PHYPORT0 + hash_index; + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_mc_info_add(pf_info, mac, &mc_info); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_vport_mc_info_add", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_mc_table_insert(pf_info, mac); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_vport_mc_table_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_multi_mac_add_member); + +ZXIC_UINT32 dpp_multi_mac_del_member(DPP_PF_INFO_T *pf_info, + ZXIC_CONST ZXIC_VOID *mac) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = 0; + ZXIC_UINT32 hash_index = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_hash_index_get(pf_info, &hash_index); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_hash_index_get"); + + sdt_no = ZXDH_SDT_MC_TABLE_PHYPORT0 + hash_index; + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_mc_info_del(pf_info, mac); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_vport_mc_info_del", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_mc_table_insert(pf_info, mac); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_vport_mc_table_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_multi_mac_del_member); +ZXIC_UINT32 dpp_batch_add_multicast_mac(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 mac_num, + ZXIC_CONST ZXIC_VOID *mac) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = 0; + ZXIC_UINT32 hash_index = 0; + ZXIC_UINT32 group_index = 0; + ZXIC_UINT32 entry_index = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_MC_T *p_batch_entry = NULL; + ZXDH_MC_T *p_one_entry = NULL; + ZXIC_UINT8 *p_mac = NULL; + DPP_VPORT_MC_INFO_T mc_info = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(mac); + ZXIC_COMM_CHECK_INDEX_LOWER(mac_num, 1); + + p_batch_entry = (ZXDH_MC_T *)ZXIC_COMM_MALLOC(MC_GROUP_NUM * mac_num * + sizeof(ZXDH_MC_T)); + ZXIC_COMM_CHECK_POINT(p_batch_entry); + ZXIC_COMM_MEMSET_S(p_batch_entry, + MC_GROUP_NUM * mac_num * sizeof(ZXDH_MC_T), 0, + MC_GROUP_NUM * mac_num * sizeof(ZXDH_MC_T)); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC_MEMORY_FREE(rc, "dpp_dev_get", p_batch_entry); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC_MEMORY_FREE(rc, "dpp_dtb_queue_id_get", + p_batch_entry); + + rc = dpp_vport_hash_index_get(pf_info, &hash_index); + ZXIC_COMM_CHECK_RC_MEMORY_FREE(rc, "dpp_vport_hash_index_get", + p_batch_entry); + + sdt_no = ZXDH_SDT_MC_TABLE_PHYPORT0 + hash_index; + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC_MEMORY_FREE(rc, "dpp_vport_table_lock", + p_batch_entry); + ZXIC_COMM_CHECK_POINT_MEMORY_FREE(DEV_PCIE_LOCK(&dev), p_batch_entry); + + for (entry_index = 0; entry_index < mac_num; entry_index++) { + p_mac = (ZXIC_UINT8 *)mac + entry_index * 6; + rc = dpp_vport_mc_info_add(pf_info, p_mac, &mc_info); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_UNLOCK_NO_ASSERT( + rc, "dpp_vport_mc_info_add", p_batch_entry, + DEV_PCIE_LOCK(&dev)); + + for (group_index = 0; group_index < MC_GROUP_NUM; + group_index++) { + p_one_entry = p_batch_entry + + MC_GROUP_NUM * entry_index + group_index; + p_one_entry->entry.hit_flag = 0x00; + p_one_entry->entry.mc_bitmap = + mc_info.mc_bitmap[group_index]; + p_one_entry->entry.mc_pf_enable = mc_info.mc_pf_enable; + p_one_entry->key.group_id = group_index; + ZXIC_COMM_MEMCPY_S(p_one_entry->key.mc_mac, 6, p_mac, + 6); + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x group_id: %u mac: %02x:%02x:%02x:%02x:%02x:%02x bitmap:0x%016llx start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, + p_one_entry->key.group_id, + p_one_entry->key.mc_mac[0], + p_one_entry->key.mc_mac[1], + p_one_entry->key.mc_mac[2], + p_one_entry->key.mc_mac[3], + p_one_entry->key.mc_mac[4], + p_one_entry->key.mc_mac[5], + p_one_entry->entry.mc_bitmap); + } + } + + rc = dpp_apt_dtb_multi_hash_insert(&dev, queue, sdt_no, + MC_GROUP_NUM * mac_num, + sizeof(ZXDH_MC_T), p_batch_entry); + ZXIC_COMM_FREE(p_batch_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_hash_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_batch_add_multicast_mac); + +ZXIC_UINT32 dpp_batch_del_multicast_mac(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 mac_num, + ZXIC_CONST ZXIC_VOID *mac) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = 0; + ZXIC_UINT32 group_index = 0; + ZXIC_UINT32 hash_index = 0; + ZXIC_UINT32 entry_index = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_MC_T *p_batch_entry = NULL; + ZXDH_MC_T *p_one_entry = NULL; + ZXIC_UINT8 *p_mac = NULL; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(mac); + ZXIC_COMM_CHECK_INDEX_LOWER(mac_num, 1); + + p_batch_entry = (ZXDH_MC_T *)ZXIC_COMM_MALLOC(MC_GROUP_NUM * mac_num * + sizeof(ZXDH_MC_T)); + ZXIC_COMM_CHECK_POINT(p_batch_entry); + ZXIC_COMM_MEMSET_S(p_batch_entry, + MC_GROUP_NUM * mac_num * sizeof(ZXDH_MC_T), 0, + MC_GROUP_NUM * mac_num * sizeof(ZXDH_MC_T)); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC_MEMORY_FREE(rc, "dpp_dev_get", p_batch_entry); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC_MEMORY_FREE(rc, "dpp_dtb_queue_id_get", + p_batch_entry); + + rc = dpp_vport_hash_index_get(pf_info, &hash_index); + ZXIC_COMM_CHECK_RC_MEMORY_FREE(rc, "dpp_vport_hash_index_get", + p_batch_entry); + + sdt_no = ZXDH_SDT_MC_TABLE_PHYPORT0 + hash_index; + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC_MEMORY_FREE(rc, "dpp_vport_table_lock", + p_batch_entry); + ZXIC_COMM_CHECK_POINT_MEMORY_FREE(DEV_PCIE_LOCK(&dev), p_batch_entry); + + for (entry_index = 0; entry_index < mac_num; entry_index++) { + p_mac = (ZXIC_UINT8 *)mac + entry_index * 6; + rc = dpp_vport_mc_info_del(pf_info, p_mac); + ZXIC_COMM_CHECK_RC_MEMORY_FREE_UNLOCK_NO_ASSERT( + rc, "dpp_vport_mc_info_del", p_batch_entry, + DEV_PCIE_LOCK(&dev)); + + for (group_index = 0; group_index < MC_GROUP_NUM; + group_index++) { + p_one_entry = p_batch_entry + + MC_GROUP_NUM * entry_index + group_index; + p_one_entry->key.group_id = group_index; + ZXIC_COMM_MEMCPY_S(p_one_entry->key.mc_mac, 6, p_mac, + 6); + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x group_id: %u mac: %02x:%02x:%02x:%02x:%02x:%02x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, + p_one_entry->key.group_id, + p_one_entry->key.mc_mac[0], + p_one_entry->key.mc_mac[1], + p_one_entry->key.mc_mac[2], + p_one_entry->key.mc_mac[3], + p_one_entry->key.mc_mac[4], + p_one_entry->key.mc_mac[5]); + } + } + + rc = dpp_apt_dtb_multi_hash_delete(&dev, queue, sdt_no, + MC_GROUP_NUM * mac_num, + sizeof(ZXDH_MC_T), p_batch_entry); + ZXIC_COMM_FREE(p_batch_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_multi_hash_delete", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_batch_del_multicast_mac); + +/*vport belong to the same group*/ +ZXIC_UINT32 dpp_multicast_mac_transfer_intra_group(DPP_DEV_T *dev, + ZXIC_UINT32 slot, + ZXIC_UINT32 vport, + ZXIC_UINT32 new_vport) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 hash_index = 0; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 sdt_no = 0; + ZXIC_UINT32 group_id = 0; + ZXIC_UINT32 max_item_num = DTB_DUMP_MULTICAST_MAC_DUMP_NUM; + ZXIC_UINT32 entryNum = 0; + ZXIC_UINT32 transfer_num = 0; + ZXDH_MC_T *pMcDataArr = NULL; + ZXDH_MC_T *pMcDataArrNew = NULL; + ZXDH_MC_T *p_mc_temp_entry = NULL; + ZXDH_MC_T *p_mc_entry_new = NULL; + + DPP_PF_INFO_T pf_info = { slot, vport }; + DPP_VPORT_MC_INFO_T mc_info = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + + rc = dpp_dtb_queue_id_get(dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_hash_index_get(&pf_info, &hash_index); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_hash_index_get"); + sdt_no = ZXDH_SDT_MC_TABLE_PHYPORT0 + hash_index; + + rc = dpp_hash_max_item_num_get(dev, sdt_no, &max_item_num); + ZXIC_COMM_CHECK_RC(rc, "dpp_hash_max_item_num_get"); + + group_id = VFUNC_NUM(pf_info.vport) / MC_MEMBER_NUM_IN_GROUP; + ZXIC_COMM_CHECK_INDEX(group_id, 0, MC_GROUP_NUM - 1); + + pMcDataArr = (ZXDH_MC_T *)ZXIC_COMM_VMALLOC(max_item_num * + sizeof(ZXDH_MC_T)); + ZXIC_COMM_CHECK_POINT_NO_ASSERT(pMcDataArr); + + pMcDataArrNew = (ZXDH_MC_T *)ZXIC_COMM_VMALLOC(max_item_num * + sizeof(ZXDH_MC_T)); + ZXIC_COMM_CHECK_POINT_MEMORY_VFREE_NO_ASSERT(pMcDataArrNew, pMcDataArr); + + rc = dpp_vport_table_lock(&pf_info, sdt_no, &DEV_PCIE_LOCK(dev)); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE2PTR_NO_ASSERT( + rc, "dpp_vport_table_lock", pMcDataArrNew, pMcDataArr); + ZXIC_COMM_CHECK_POINT_MEMORY_VFREE2PTR_NO_ASSERT( + DEV_PCIE_LOCK(dev), pMcDataArrNew, pMcDataArr); + + rc = dpp_apt_dtb_hash_table_multicast_mac_dump(dev, queue, sdt_no, + pMcDataArr, &entryNum); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE2PTR_UNLOCK_NO_ASSERT( + rc, "dpp_apt_dtb_hash_table_unicast_mac_dump", pMcDataArrNew, + pMcDataArr, DEV_PCIE_LOCK(dev)); + + for (index = 0; index < entryNum; index++) { + p_mc_temp_entry = pMcDataArr + index; + if (TRUE == dpp_vport_in_mc_bitmap( + vport, p_mc_temp_entry->entry.mc_bitmap)) { + rc = dpp_vport_mc_info_update( + slot, vport, new_vport, + p_mc_temp_entry->key.mc_mac, &mc_info); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE2PTR_UNLOCK_NO_ASSERT( + rc, "dpp_vport_mc_info_update", pMcDataArrNew, + pMcDataArr, DEV_PCIE_LOCK(dev)); + p_mc_entry_new = pMcDataArrNew + transfer_num; + ZXIC_COMM_MEMCPY_S(p_mc_entry_new, sizeof(ZXDH_MC_T), + p_mc_temp_entry, sizeof(ZXDH_MC_T)); + p_mc_entry_new->entry.hit_flag = 0; + p_mc_entry_new->entry.mc_pf_enable = + mc_info.mc_pf_enable; + p_mc_entry_new->entry.mc_bitmap = + mc_info.mc_bitmap[group_id]; + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x new_vport: 0x%04x mac: %02x:%02x:%02x:%02x:%02x:%02x start.\n", + __FUNCTION__, slot, vport, new_vport, + p_mc_entry_new->key.mc_mac[0], + p_mc_entry_new->key.mc_mac[1], + p_mc_entry_new->key.mc_mac[2], + p_mc_entry_new->key.mc_mac[3], + p_mc_entry_new->key.mc_mac[4], + p_mc_entry_new->key.mc_mac[5]); + ZXIC_COMM_TRACE_NOTICE( + "[%s] group_id: %u mc_bitmap: 0x%016llx new_group_id: %u new_mc_bitmap: 0x%016llx\n", + __FUNCTION__, p_mc_temp_entry->key.group_id, + p_mc_temp_entry->entry.mc_bitmap, + p_mc_entry_new->key.group_id, + p_mc_entry_new->entry.mc_bitmap); + transfer_num++; + } + } + + if (transfer_num == 0) { + ZXIC_COMM_VFREE(pMcDataArrNew); + ZXIC_COMM_VFREE(pMcDataArr); + ZXIC_COMM_PRINT("[%s] transfer num is 0!\n", __FUNCTION__); + rc = dpp_vport_table_unlock(&pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + return DPP_OK; + } + + rc = dpp_apt_dtb_multi_hash_insert(dev, queue, sdt_no, transfer_num, + sizeof(ZXDH_MC_T), pMcDataArrNew); + ZXIC_COMM_VFREE(pMcDataArrNew); + ZXIC_COMM_VFREE(pMcDataArr); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_multi_hash_insert", + DEV_PCIE_LOCK(dev)); + + rc = dpp_vport_table_unlock(&pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + return DPP_OK; +} + +/*vport belong to the different group*/ +ZXIC_UINT32 dpp_multicast_mac_transfer_inter_group(DPP_DEV_T *dev, + ZXIC_UINT32 slot, + ZXIC_UINT32 vport, + ZXIC_UINT32 new_vport) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 hash_index = 0; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 sdt_no = 0; + ZXIC_UINT32 group_id = 0; + ZXIC_UINT32 new_group_id = 0; + ZXIC_UINT32 max_item_num = DTB_DUMP_MULTICAST_MAC_DUMP_NUM; + ZXIC_UINT32 entryNum = 0; + ZXDH_MC_T *pMcDataArr = NULL; + ZXDH_MC_T *pMcDataArrNew = NULL; + ZXDH_MC_T *p_mc_temp_entry = NULL; + ZXDH_MC_T *p_mc_entry_new = NULL; + + DPP_PF_INFO_T pf_info = { slot, vport }; + DPP_VPORT_MC_INFO_T mc_info = { 0 }; + + ZXIC_COMM_CHECK_POINT(dev); + + rc = dpp_dtb_queue_id_get(dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_hash_index_get(&pf_info, &hash_index); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_hash_index_get"); + sdt_no = ZXDH_SDT_MC_TABLE_PHYPORT0 + hash_index; + + rc = dpp_hash_max_item_num_get(dev, sdt_no, &max_item_num); + ZXIC_COMM_CHECK_RC(rc, "dpp_hash_max_item_num_get"); + + group_id = VFUNC_NUM(pf_info.vport) / MC_MEMBER_NUM_IN_GROUP; + ZXIC_COMM_CHECK_INDEX(group_id, 0, MC_GROUP_NUM - 1); + + new_group_id = VFUNC_NUM(new_vport) / MC_MEMBER_NUM_IN_GROUP; + ZXIC_COMM_CHECK_INDEX(new_group_id, 0, MC_GROUP_NUM - 1); + + pMcDataArr = (ZXDH_MC_T *)ZXIC_COMM_VMALLOC(max_item_num * + sizeof(ZXDH_MC_T)); + ZXIC_COMM_CHECK_POINT_NO_ASSERT(pMcDataArr); + + pMcDataArrNew = (ZXDH_MC_T *)ZXIC_COMM_VMALLOC(max_item_num * + sizeof(ZXDH_MC_T)); + ZXIC_COMM_CHECK_POINT_MEMORY_VFREE_NO_ASSERT(pMcDataArrNew, pMcDataArr); + + rc = dpp_vport_table_lock(&pf_info, sdt_no, &DEV_PCIE_LOCK(dev)); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE2PTR_NO_ASSERT( + rc, "dpp_vport_table_lock", pMcDataArrNew, pMcDataArr); + ZXIC_COMM_CHECK_POINT_MEMORY_VFREE2PTR_NO_ASSERT( + DEV_PCIE_LOCK(dev), pMcDataArrNew, pMcDataArr); + + rc = dpp_apt_dtb_hash_table_multicast_mac_dump(dev, queue, sdt_no, + pMcDataArr, &entryNum); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE2PTR_UNLOCK_NO_ASSERT( + rc, "dpp_apt_dtb_hash_table_unicast_mac_dump", pMcDataArrNew, + pMcDataArr, DEV_PCIE_LOCK(dev)); + + for (index = 0; index < entryNum; index++) { + p_mc_temp_entry = pMcDataArr + index; + p_mc_entry_new = pMcDataArrNew + index; + ZXIC_COMM_MEMCPY_S(p_mc_entry_new, sizeof(ZXDH_MC_T), + p_mc_temp_entry, sizeof(ZXDH_MC_T)); + p_mc_entry_new->entry.hit_flag = 0; + if (TRUE == dpp_vport_in_mc_bitmap( + vport, p_mc_temp_entry->entry.mc_bitmap)) { + rc = dpp_vport_mc_info_update( + slot, vport, new_vport, + p_mc_temp_entry->key.mc_mac, &mc_info); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE2PTR_UNLOCK_NO_ASSERT( + rc, "dpp_vport_mc_info_update", pMcDataArrNew, + pMcDataArr, DEV_PCIE_LOCK(dev)); + p_mc_entry_new->key.group_id = new_group_id; + p_mc_entry_new->entry.mc_pf_enable = + mc_info.mc_pf_enable; + p_mc_entry_new->entry.mc_bitmap = + mc_info.mc_bitmap[new_group_id]; + } else { + if (p_mc_temp_entry->key.group_id == new_group_id) { + p_mc_entry_new->key.group_id = group_id; + } + } + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x new_vport: 0x%04x mac: %02x:%02x:%02x:%02x:%02x:%02x start.\n", + __FUNCTION__, slot, vport, new_vport, + p_mc_entry_new->key.mc_mac[0], + p_mc_entry_new->key.mc_mac[1], + p_mc_entry_new->key.mc_mac[2], + p_mc_entry_new->key.mc_mac[3], + p_mc_entry_new->key.mc_mac[4], + p_mc_entry_new->key.mc_mac[5]); + ZXIC_COMM_TRACE_NOTICE( + "[%s] group_id: %u mc_bitmap: 0x%016llx new_group_id: %u new_mc_bitmap: 0x%016llx\n", + __FUNCTION__, p_mc_temp_entry->key.group_id, + p_mc_temp_entry->entry.mc_bitmap, + p_mc_entry_new->key.group_id, + p_mc_entry_new->entry.mc_bitmap); + } + + if (entryNum == 0) { + ZXIC_COMM_VFREE(pMcDataArrNew); + ZXIC_COMM_VFREE(pMcDataArr); + ZXIC_COMM_PRINT("[%s] transfer num is 0!\n", __FUNCTION__); + rc = dpp_vport_table_unlock(&pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + return DPP_OK; + } + + rc = dpp_apt_dtb_multi_hash_delete(dev, queue, sdt_no, entryNum, + sizeof(ZXDH_MC_T), pMcDataArr); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE2PTR_UNLOCK_NO_ASSERT( + rc, "dpp_apt_dtb_multi_hash_delete", pMcDataArrNew, pMcDataArr, + DEV_PCIE_LOCK(dev)); + + rc = dpp_apt_dtb_multi_hash_insert(dev, queue, sdt_no, entryNum, + sizeof(ZXDH_MC_T), pMcDataArrNew); + ZXIC_COMM_VFREE(pMcDataArrNew); + ZXIC_COMM_VFREE(pMcDataArr); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_multi_hash_insert", + DEV_PCIE_LOCK(dev)); + + rc = dpp_vport_table_unlock(&pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + return DPP_OK; +} + +ZXIC_UINT32 dpp_multicast_mac_transfer(DPP_PF_INFO_T *pf_info, + DPP_PF_INFO_T *new_pf_info) +{ + DPP_DEV_T dev = { 0 }; + DPP_DEV_T new_dev = { 0 }; + ZXIC_UINT32 group_id = 0; + ZXIC_UINT32 new_group_id = 0; + + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(new_pf_info); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dev_get(new_pf_info, &new_dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + if ((0 != ZXIC_COMM_MEMCMP(&dev, &new_dev, sizeof(DPP_DEV_T))) || + IS_PF(pf_info->vport) || IS_PF(new_pf_info->vport)) { + ZXIC_COMM_TRACE_ERROR( + "current slot[0x%x] vport[0x%x] & new slot[0x%x] vport[0x%x] belong to different pf or slot\n", + dev.pcie_channel.slot, dev.pcie_channel.vport, + new_dev.pcie_channel.slot, new_dev.pcie_channel.vport); + ZXIC_COMM_TRACE_ERROR( + "current vport is %s, transfre vport is %s\n", + IS_PF(pf_info->vport) ? "PF" : "VF", + IS_PF(new_pf_info->vport) ? "PF" : "VF"); + return DPP_ERR; + } + + group_id = VFUNC_NUM(pf_info->vport) / MC_MEMBER_NUM_IN_GROUP; + new_group_id = VFUNC_NUM(new_pf_info->vport) / MC_MEMBER_NUM_IN_GROUP; + if (group_id == new_group_id) { + rc = dpp_multicast_mac_transfer_intra_group(&dev, pf_info->slot, + pf_info->vport, + new_pf_info->vport); + ZXIC_COMM_CHECK_RC(rc, + "dpp_multicast_mac_transfer_intra_group"); + } else { + rc = dpp_multicast_mac_transfer_inter_group(&dev, pf_info->slot, + pf_info->vport, + new_pf_info->vport); + ZXIC_COMM_CHECK_RC(rc, + "dpp_multicast_mac_transfer_inter_group"); + } + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x new_vport: 0x%04x success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, + new_pf_info->vport); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_multicast_mac_transfer); + +ZXIC_UINT32 dpp_mc_pf_flag_add(MC_PF_FLAG_MGR *p_flag_mgr, ZXIC_UINT32 index, + ZXIC_UINT8 *mc_addr) +{ + ZXIC_COMM_MEMCPY(p_flag_mgr[index].mc_addr, mc_addr, 6); + p_flag_mgr[index].pf_flag = 1; + return DPP_OK; +} + +ZXIC_UINT32 dpp_mc_pf_flag_search(MC_PF_FLAG_MGR *p_flag_mgr, + ZXIC_UINT8 *mc_addr) +{ + ZXIC_UINT32 index = 0; + + for (index = 0; index < MC_TABLE_SIZE; index++) { + if (ZXIC_COMM_MEMCMP(p_flag_mgr[index].mc_addr, mc_addr, 6) == + 0) { + break; + } + } + + if (index == MC_TABLE_SIZE) { + ZXIC_COMM_TRACE_NOTICE("dpp_mc_pf_flag_search failed\n"); + return DPP_ERR; + } + + return DPP_OK; +} + +ZXIC_UINT32 dpp_multicast_mac_dump(DPP_PF_INFO_T *pf_info, + MAC_VPORT_INFO *p_mac_arr, + ZXIC_UINT32 *p_mac_num) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 mac_info_index = 0; + ZXIC_UINT32 num = 0; + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = 0; + ZXIC_UINT32 hash_index = 0; + ZXIC_UINT32 current_group_id = 0; + ZXIC_UINT64 current_mc_bitmap = 0; + ZXIC_UINT32 current_mc_pf_enable = 0; + + ZXIC_UINT16 current_vport[64] = { 0 }; + ZXIC_UINT32 current_vport_num = 0; + + MC_PF_FLAG_MGR *p_mc_pf_flag_mgr = NULL; + ZXIC_UINT32 pf_flag_count = 0; + + ZXIC_UINT32 max_item_num = DTB_DUMP_MULTICAST_MAC_DUMP_NUM; + ZXIC_UINT32 entryNum = 0; + + ZXDH_MC_T *pMcDataArr = NULL; + ZXDH_MC_T *p_mc_temp_entry = NULL; + MAC_VPORT_INFO *p_temp_mac_info = NULL; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(p_mac_arr); + ZXIC_COMM_CHECK_POINT(p_mac_num); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_hash_index_get(pf_info, &hash_index); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_hash_index_get"); + sdt_no = ZXDH_SDT_MC_TABLE_PHYPORT0 + hash_index; + + rc = dpp_hash_max_item_num_get(&dev, sdt_no, &max_item_num); + ZXIC_COMM_CHECK_RC(rc, "dpp_hash_max_item_num_get"); + + pMcDataArr = (ZXDH_MC_T *)ZXIC_COMM_VMALLOC(max_item_num * + sizeof(ZXDH_MC_T)); + ZXIC_COMM_CHECK_POINT_NO_ASSERT(pMcDataArr); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE(rc, "dpp_vport_table_lock", pMcDataArr); + ZXIC_COMM_CHECK_POINT_MEMORY_VFREE(DEV_PCIE_LOCK(&dev), pMcDataArr); + + rc = dpp_apt_dtb_hash_table_multicast_mac_dump(&dev, queue, sdt_no, + pMcDataArr, &entryNum); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE_UNLOCK_NO_ASSERT( + rc, "dpp_apt_dtb_hash_table_multicast_mac_dump", pMcDataArr, + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE(rc, "dpp_vport_table_unlock", + pMcDataArr); + + ZXIC_COMM_TRACE_NOTICE("multicast mac dump num:0x%x\n", entryNum); + + p_mc_pf_flag_mgr = (MC_PF_FLAG_MGR *)ZXIC_COMM_VMALLOC( + MC_TABLE_SIZE * sizeof(MC_PF_FLAG_MGR)); + ZXIC_COMM_CHECK_POINT_MEMORY_VFREE_NO_ASSERT(p_mc_pf_flag_mgr, + pMcDataArr); + ZXIC_COMM_MEMSET(p_mc_pf_flag_mgr, 0x00, + MC_TABLE_SIZE * sizeof(MC_PF_FLAG_MGR)); + + for (index = 0; index < entryNum; index++) { + p_mc_temp_entry = pMcDataArr + index; + + ZXIC_COMM_TRACE_NOTICE("mc entry index:0x%x\n", index); + dpp_mc_entry_print(p_mc_temp_entry); + + // 从mc中获取mac地址给mac_info,再转换group_id 和 bitmap信息 + current_group_id = p_mc_temp_entry->key.group_id; + current_mc_bitmap = p_mc_temp_entry->entry.mc_bitmap; + current_mc_pf_enable = p_mc_temp_entry->entry.mc_pf_enable; + + if (current_mc_pf_enable) { + p_temp_mac_info = p_mac_arr + mac_info_index; + if (dpp_mc_pf_flag_search(p_mc_pf_flag_mgr, + p_mc_temp_entry->key.mc_mac)) { + //没查中 + dpp_mc_pf_flag_add(p_mc_pf_flag_mgr, + pf_flag_count, + p_mc_temp_entry->key.mc_mac); + pf_flag_count++; + + ZXIC_COMM_MEMCPY(p_temp_mac_info->addr, + p_mc_temp_entry->key.mc_mac, + 6); + p_temp_mac_info->vport = + OWNER_PF_VPORT(pf_info->vport); + + mac_info_index = mac_info_index + 1; + } + } + + rc = dpp_vport_get_by_mc_bitmap( + OWNER_PF_VPORT(pf_info->vport), current_group_id, + current_mc_bitmap, current_vport, ¤t_vport_num); + ZXIC_COMM_CHECK_RC_MEMORY_VFREE2PTR_NO_ASSERT( + rc, "dpp_vport_get_by_mc_bitmap", pMcDataArr, + p_mc_pf_flag_mgr); + + ZXIC_COMM_TRACE_NOTICE("index %d get vf num %d\n", index, + current_vport_num); + + for (num = 0; num < current_vport_num; num++) { + p_temp_mac_info = p_mac_arr + mac_info_index; + ZXIC_COMM_MEMCPY(p_temp_mac_info->addr, + p_mc_temp_entry->key.mc_mac, 6); + p_temp_mac_info->vport = current_vport[num]; + mac_info_index = mac_info_index + 1; + } + ZXIC_COMM_TRACE_NOTICE("mac_info_index 0x%x\n", mac_info_index); + } + + ZXIC_COMM_VFREE(pMcDataArr); + ZXIC_COMM_VFREE(p_mc_pf_flag_mgr); + + *p_mac_num = mac_info_index; + + ZXIC_COMM_TRACE_NOTICE("dump mac num: 0x%x\n", *p_mac_num); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_multicast_mac_dump); + +ZXIC_UINT32 dpp_multicast_all_mac_delete(DPP_PF_INFO_T *pf_info) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = 0; + ZXIC_UINT32 hash_index = 0; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_hash_index_get(pf_info, &hash_index); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_hash_index_get"); + + sdt_no = ZXDH_SDT_MC_TABLE_PHYPORT0 + hash_index; + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_dtb_hash_offline_delete(&dev, queue, sdt_no); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_hash_offline_delete", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x success.\n", __FUNCTION__, + pf_info->slot, pf_info->vport); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_multicast_all_mac_delete); + +ZXIC_UINT32 dpp_multicast_all_mac_online_delete(DPP_PF_INFO_T *pf_info) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = 0; + ZXIC_UINT32 hash_index = 0; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_hash_index_get(pf_info, &hash_index); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_hash_index_get"); + + sdt_no = ZXDH_SDT_MC_TABLE_PHYPORT0 + hash_index; + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_dtb_hash_online_delete(&dev, queue, sdt_no); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_hash_online_delete", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x success.\n", __FUNCTION__, + pf_info->slot, pf_info->vport); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_multicast_all_mac_online_delete); + +ZXIC_UINT32 dpp_multicast_mac_max_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *max_num) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 sdt_no = 0; + ZXIC_UINT32 hash_index = 0; + + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(max_num); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_vport_hash_index_get(pf_info, &hash_index); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_hash_index_get"); + sdt_no = ZXDH_SDT_MC_TABLE_PHYPORT0 + hash_index; + + rc = dpp_hash_max_item_num_get(&dev, sdt_no, max_num); + ZXIC_COMM_CHECK_RC(rc, "dpp_hash_max_item_num_get"); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x max_num: 0x%x get succ.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, *max_num); + return DPP_OK; +} +EXPORT_SYMBOL(dpp_multicast_mac_max_get); + +ZXIC_UINT32 dpp_multicast_all_mac_soft_delete(DPP_PF_INFO_T *pf_info) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 sdt_no = 0; + ZXIC_UINT32 hash_index = 0; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_soft_hash_index_get(&dev, &hash_index); + ZXIC_COMM_CHECK_RC(rc, "dpp_soft_hash_index_get"); + sdt_no = ZXDH_SDT_MC_TABLE_PHYPORT0 + hash_index; + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_hash_soft_delete_by_sdt(&dev, sdt_no); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_hash_soft_delete_by_sdt", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x sdt:%u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_multicast_all_mac_soft_delete); diff --git a/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_pkt_cap.c b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_pkt_cap.c new file mode 100644 index 000000000000..c59f458b4978 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_pkt_cap.c @@ -0,0 +1,1315 @@ +#include "dpp_drv_init.h" +#include "dpp_drv_acl.h" +#include "dpp_drv_hash.h" +#include "dpp_drv_eram.h" +#include "dpp_drv_sdt.h" +#include "dpp_dev.h" +#include "dpp_dtb.h" +#include "dpp_hash.h" +#include "dpp_dtb_table.h" +#include "dpp_dtb_table_api.h" +#include "dpp_tbl_comm.h" +#include "dpp_tbl_pkt_cap.h" +#include "dpp_nppu_reg.h" +#include "dpp_reg_info.h" +#include "dpp_reg_api.h" +#include "dpp_sdt.h" +#include "dpp_drv_qos.h" +#include "dpp_dtb_table_api.h" +#include "dpp_kernel_init.h" +#include "dpp_dev.h" +#include "dpp_pktrx_api.h" + +static ZXIC_UINT32 g_speed_limit = DH_PKT_CAP_SPEED_DEFAULT; + +static ZXIC_UINT32 dpp_pkt_capture_key_word_mode_table_insert( + DPP_PF_INFO_T *pf_info, ZXIC_UINT32 tcam_index, + ZXIC_UINT16 key_word_offest, ZXIC_UINT8 key_word_length); +static ZXIC_UINT32 +dpp_pkt_capture_key_word_mode_table_delete(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 tcam_index); +static ZXIC_UINT32 dpp_pkt_capture_tcam_index_to_mask( + ZXIC_UINT32 tcam_index, ZXIC_UINT8 capture_pkt_flag, + ZXDH_PKT_CAP_NORMAL_CONFIG *config, ZXDH_PKT_CAP_MASK *rule_mask); + +/***********************************************************/ +/** 抓包初始化函数 +* @param pf_info 镜像流上送PF的信息 +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 dpp_pkt_capture_init(DPP_PF_INFO_T *pf_info) +{ + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + + rc = dpp_pkt_capture_disable_all(pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_pkt_capture_disable_all"); + + rc = dpp_pkt_capture_table_flush(pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_pkt_capture_table_flush"); + + rc = dpp_pkt_capture_speed_set(pf_info, DH_PKT_CAP_SPEED_DEFAULT); + ZXIC_COMM_CHECK_RC(rc, "dpp_pkt_capture_table_flush"); + + return DPP_OK; +} + +/***********************************************************/ +/** 抓包退出函数 +* @param pf_info 镜像流上送PF的信息 +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 dpp_pkt_capture_uninit(DPP_PF_INFO_T *pf_info) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 last_flag = 0; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dev_last_check(&dev, &last_flag); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_last_check"); + + if (last_flag) { + rc = dpp_pkt_capture_disable_all(pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_pkt_capture_disable_all"); + + rc = dpp_pkt_capture_table_flush(pf_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_pkt_capture_table_flush"); + } + + return DPP_OK; +} + +/***********************************************************/ +/** 抓包点使能函数 +* @param pf_info 镜像流上送PF的信息 +* @param capture_pkt_flag 抓包点使能标志位(0:panel_rx, 1:panel_tx, 2:vqm_rx 3:vqm_tx 4:rdma_rx, 5:rdma_tx) +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 dpp_pkt_capture_enable(DPP_PF_INFO_T *pf_info, + ZXDH_PKT_CAP_POINT capture_pkt_flag) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_INDEX(capture_pkt_flag, DH_PKT_CAP_POINT_PANEL_RX, + DH_PKT_CAP_POINT_RDMA_TX); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_pktrx_mcode_glb_cfg_write_1( + &dev, DH_PKT_CAP_POINT_IN_MF_GLOBAL_OFFSET + capture_pkt_flag, + DH_PKT_CAP_POINT_IN_MF_GLOBAL_OFFSET + capture_pkt_flag, 1); + ZXIC_COMM_CHECK_RC(rc, "dpp_pktrx_mcode_glb_cfg_write_1"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_pkt_capture_enable); + +/***********************************************************/ +/** 抓包点去使能函数 +* @param pf_info 镜像流上送PF的信息 +* @param capture_pkt_flag 抓包点使能标志位(0:panel_rx, 1:panel_tx, 2:vqm_rx 3:vqm_tx 4:rdma_rx, 5:rdma_tx) +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 dpp_pkt_capture_disable(DPP_PF_INFO_T *pf_info, + ZXDH_PKT_CAP_POINT capture_pkt_flag) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_INDEX(capture_pkt_flag, DH_PKT_CAP_POINT_PANEL_RX, + DH_PKT_CAP_POINT_RDMA_TX); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_pktrx_mcode_glb_cfg_write_1( + &dev, DH_PKT_CAP_POINT_IN_MF_GLOBAL_OFFSET + capture_pkt_flag, + DH_PKT_CAP_POINT_IN_MF_GLOBAL_OFFSET + capture_pkt_flag, 0); + ZXIC_COMM_CHECK_RC(rc, "dpp_pktrx_mcode_glb_cfg_write_1"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_pkt_capture_disable); + +/***********************************************************/ +/** 抓包点全去使能函数 +* @param pf_info 镜像流上送PF的信息 +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 dpp_pkt_capture_disable_all(DPP_PF_INFO_T *pf_info) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_pktrx_mcode_glb_cfg_write_1( + &dev, DH_PKT_CAP_POINT_IN_MF_GLOBAL_OFFSET, + DH_PKT_CAP_POINT_IN_MF_GLOBAL_OFFSET + + DH_PKT_CAP_POINT_IN_MF_GLOBAL_LENGTH - 1, + 0); + ZXIC_COMM_CHECK_RC(rc, "dpp_pktrx_mcode_glb_cfg_write_1"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_pkt_capture_disable_all); + +/***********************************************************/ +/** 抓包点使能状态获取函数 +* @param pf_info 镜像流上送PF的信息 +* @param enable_status 抓包点状态 +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 +dpp_pkt_capture_enable_status_get(DPP_PF_INFO_T *pf_info, + ZXDH_PKT_CAP_ENABLE_STATUS *enable_status) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 pktrxGlbalCfg = 0; + DPP_DEV_T dev = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(enable_status); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_pktrx_mcode_glb_cfg_get_1(&dev, &pktrxGlbalCfg); + ZXIC_COMM_CHECK_RC(rc, "dpp_pktrx_mcode_glb_cfg_get_1"); + + pktrxGlbalCfg >>= DH_PKT_CAP_POINT_IN_MF_GLOBAL_OFFSET; + + enable_status->panel_rx_enable_status = + (pktrxGlbalCfg >> DH_PKT_CAP_POINT_PANEL_RX) & 1U; + enable_status->panel_tx_enable_status = + (pktrxGlbalCfg >> DH_PKT_CAP_POINT_PANEL_TX) & 1U; + enable_status->vqm_rx_enable_status = + (pktrxGlbalCfg >> DH_PKT_CAP_POINT_VQM_RX) & 1U; + enable_status->vqm_tx_enable_status = + (pktrxGlbalCfg >> DH_PKT_CAP_POINT_VQM_TX) & 1U; + enable_status->rdma_rx_enable_status = + (pktrxGlbalCfg >> DH_PKT_CAP_POINT_RDMA_RX) & 1U; + enable_status->rdma_tx_enable_status = + (pktrxGlbalCfg >> DH_PKT_CAP_POINT_RDMA_TX) & 1U; + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_pkt_capture_enable_status_get); + +/***********************************************************/ +/** 将rule_index转换成tcam_index +* @param rule_index 规则索引 +* @param rule_mode 规则模式 +* @param capture_pkt_flag 抓包点使能标志位(0:panel_rx, 1:panel_tx, 2:vqm_rx 3:vqm_tx 4:rdma_rx, 5:rdma_tx) +* @param tcam_index tcam表索引 +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 dpp_pkt_capture_rule_index_to_tcam_index( + ZXIC_UINT32 rule_index, ZXDH_PKT_CAP_MODE rule_mode, + ZXDH_PKT_CAP_POINT capture_pkt_flag, ZXIC_UINT32 *tcam_index) +{ + ZXIC_COMM_CHECK_POINT(tcam_index); + ZXIC_COMM_CHECK_INDEX(rule_mode, DH_PKT_CAP_MODE_NORMAL, + DH_PKT_CAP_MODE_KEY_WORD); + + if (rule_mode == DH_PKT_CAP_MODE_NORMAL) { + ZXIC_COMM_CHECK_INDEX(capture_pkt_flag, + DH_PKT_CAP_POINT_PANEL_RX, + DH_PKT_CAP_POINT_RDMA_TX); + ZXIC_COMM_CHECK_INDEX(rule_index, 0, + DH_PKT_CAP_POINT_NORMAL_RULE_NUM - 1); + *tcam_index = + capture_pkt_flag * DH_PKT_CAP_POINT_NORMAL_RULE_NUM + + rule_index; + } else { + ZXIC_COMM_CHECK_INDEX(capture_pkt_flag, + DH_PKT_CAP_POINT_PANEL_RX, + DH_PKT_CAP_POINT_VQM_TX); + ZXIC_COMM_CHECK_INDEX(rule_index, 0, + DH_PKT_CAP_POINT_KEY_WORD_RULE_NUM - 1); + *tcam_index = + DH_PKT_CAP_POINT_MAX * + DH_PKT_CAP_POINT_NORMAL_RULE_NUM + + capture_pkt_flag * DH_PKT_CAP_POINT_KEY_WORD_RULE_NUM + + rule_index; + } + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_pkt_capture_rule_index_to_tcam_index); + +/***********************************************************/ +/** 将tcam_index转换成rule_index +* @param tcam_index tcam表索引 +* @param rule_mode 规则模式 +* @param rule_index 规则索引 +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 +dpp_pkt_capture_tcam_index_to_rule_index(ZXIC_UINT32 tcam_index, + ZXDH_PKT_CAP_MODE *rule_mode, + ZXIC_UINT32 *rule_index) +{ + ZXIC_COMM_CHECK_POINT(rule_index); + ZXIC_COMM_CHECK_POINT(rule_mode); + ZXIC_COMM_CHECK_INDEX(tcam_index, 0, DH_PKT_CAP_TCAM_ITEM_NUM - 1); + + if (tcam_index < + DH_PKT_CAP_POINT_MAX * DH_PKT_CAP_POINT_NORMAL_RULE_NUM) { + *rule_mode = DH_PKT_CAP_MODE_NORMAL; + *rule_index = tcam_index % DH_PKT_CAP_POINT_NORMAL_RULE_NUM; + } else { + *rule_mode = DH_PKT_CAP_MODE_KEY_WORD; + *rule_index = tcam_index % DH_PKT_CAP_POINT_KEY_WORD_RULE_NUM; + } + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_pkt_capture_tcam_index_to_rule_index); + +static ZXIC_UINT32 dpp_pkt_capture_tcam_index_to_mask( + ZXIC_UINT32 tcam_index, ZXIC_UINT8 capture_pkt_flag, + ZXDH_PKT_CAP_NORMAL_CONFIG *config, ZXDH_PKT_CAP_MASK *rule_mask) +{ + ZXIC_COMM_CHECK_POINT(rule_mask); + ZXIC_COMM_CHECK_INDEX(tcam_index, 0, DH_PKT_CAP_TCAM_ITEM_NUM - 1); + + ZXIC_COMM_MEMSET(rule_mask, 0xFF, sizeof(ZXDH_PKT_CAP_MASK)); + + rule_mask->capture_pkt_flag_mask = 0; + + if (tcam_index < + DH_PKT_CAP_POINT_MAX * DH_PKT_CAP_POINT_NORMAL_RULE_NUM) { + if (config->sourceid) { + if (capture_pkt_flag <= DH_PKT_CAP_POINT_PANEL_TX) { + rule_mask->panel_id_mask = 0; + } else if (capture_pkt_flag <= + DH_PKT_CAP_POINT_VQM_TX) { + rule_mask->vqm_vfid_mask = 0; + } else if (capture_pkt_flag <= + DH_PKT_CAP_POINT_RDMA_TX) { + rule_mask->vhca_id_mask = 0; + } else { + ZXIC_COMM_TRACE_ERROR( + "capture_pkt_flag = %d is error.\n", + capture_pkt_flag); + return DPP_ERR; + } + } + + if (config->protocol) { + rule_mask->protocol_mask = 0; + } + + if (config->ethtype) { + rule_mask->ethtype_mask = 0; + } + + if (config->dmac) { + ZXIC_COMM_MEMSET(rule_mask->dmac_mask, 0, 6); + } + + if (config->smac) { + ZXIC_COMM_MEMSET(rule_mask->smac_mask, 0, 6); + } + + if (config->sip) { + ZXIC_COMM_MEMSET(rule_mask->sip_mask, 0, 16); + } + + if (config->dip) { + ZXIC_COMM_MEMSET(rule_mask->dip_mask, 0, 16); + } + + if (config->sport) { + rule_mask->sport_mask = 0; + } + + if (config->dport) { + rule_mask->dport_mask = 0; + } + + if (config->qp) { + rule_mask->qp_mask = 0; + } + } else { + rule_mask->capture_pkt_flag_mask = 0; + rule_mask->key_word_len_mask = 0; + rule_mask->key_word_off_mask = 0; + ZXIC_COMM_MEMSET(rule_mask->key_word_mask, 0, 15); + } + + return DPP_OK; +} + +static ZXIC_UINT32 dpp_pkt_capture_key_word_mode_table_insert( + DPP_PF_INFO_T *pf_info, ZXIC_UINT32 tcam_index, + ZXIC_UINT16 key_word_offest, ZXIC_UINT8 key_word_length) +{ + DPP_DEV_T dev = { 0 }; + ZXDH_PKT_CAP_KW_MODE_T kw_mode = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 eram_index = 0; + ZXIC_UINT32 rule_index = 0; + + ZXIC_UINT32 sdt_no = ZXDH_SDT_CAP_KEYWORD_ATTR_TABLE; + ZXIC_UINT32 rc = DPP_OK; + ZXIC_COMM_CHECK_INDEX(tcam_index, + DH_PKT_CAP_POINT_MAX * + DH_PKT_CAP_POINT_NORMAL_RULE_NUM, + DH_PKT_CAP_TCAM_ITEM_NUM - 1); + ZXIC_COMM_CHECK_INDEX(key_word_offest, 0, 8191); + ZXIC_COMM_CHECK_INDEX(key_word_length, 1, 15); + + eram_index = (tcam_index - + DH_PKT_CAP_POINT_MAX * DH_PKT_CAP_POINT_NORMAL_RULE_NUM) / + DH_PKT_CAP_POINT_KEY_WORD_RULE_NUM; + rule_index = (tcam_index - + DH_PKT_CAP_POINT_MAX * DH_PKT_CAP_POINT_NORMAL_RULE_NUM) % + DH_PKT_CAP_POINT_KEY_WORD_RULE_NUM; + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_eram_get(&dev, queue, sdt_no, eram_index, &kw_mode); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_get", + DEV_PCIE_LOCK(&dev)); + + kw_mode.hit_flag = 1; + + if (rule_index == 0) { + kw_mode.rule1_key_word_len = key_word_length; + kw_mode.rule1_key_word_off = key_word_offest; + } else { + kw_mode.rule2_key_word_len = key_word_length; + kw_mode.rule2_key_word_off = key_word_offest; + } + + rc = dpp_apt_dtb_eram_insert(&dev, queue, sdt_no, eram_index, &kw_mode); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x sdt_no: %u eram_index: %u rule_index: %u kw_off: %u kw_len: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, eram_index, + rule_index, key_word_offest, key_word_length); + + return DPP_OK; +} + +static ZXIC_UINT32 +dpp_pkt_capture_key_word_mode_table_delete(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 tcam_index) +{ + DPP_DEV_T dev = { 0 }; + ZXDH_PKT_CAP_KW_MODE_T kw_mode = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 eram_index = 0; + ZXIC_UINT32 rule_index = 0; + + ZXIC_UINT32 sdt_no = ZXDH_SDT_CAP_KEYWORD_ATTR_TABLE; + ZXIC_UINT32 rc = DPP_OK; + ZXIC_COMM_CHECK_INDEX(tcam_index, + DH_PKT_CAP_POINT_MAX * + DH_PKT_CAP_POINT_NORMAL_RULE_NUM, + DH_PKT_CAP_TCAM_ITEM_NUM - 1); + + eram_index = (tcam_index - + DH_PKT_CAP_POINT_MAX * DH_PKT_CAP_POINT_NORMAL_RULE_NUM) / + DH_PKT_CAP_POINT_KEY_WORD_RULE_NUM; + rule_index = (tcam_index - + DH_PKT_CAP_POINT_MAX * DH_PKT_CAP_POINT_NORMAL_RULE_NUM) % + DH_PKT_CAP_POINT_KEY_WORD_RULE_NUM; + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_eram_get(&dev, queue, sdt_no, eram_index, &kw_mode); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_get", + DEV_PCIE_LOCK(&dev)); + + if (rule_index == 0) { + kw_mode.rule1_key_word_len = 0; + kw_mode.rule1_key_word_off = 0; + } else { + kw_mode.rule2_key_word_len = 0; + kw_mode.rule2_key_word_off = 0; + } + + if (kw_mode.rule1_key_word_len == 0 && + kw_mode.rule2_key_word_len == 0) { + kw_mode.hit_flag = 0; + } + + rc = dpp_apt_dtb_eram_insert(&dev, queue, sdt_no, eram_index, &kw_mode); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x sdt_no: %u eram_index: %u rule_index: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, eram_index, + rule_index); + + return DPP_OK; +} + +/***********************************************************/ +/** 插入表项 +* @param pf_info 镜像流上送PF的信息 +* @param rule 表项信息 +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 dpp_pkt_capture_item_insert(DPP_PF_INFO_T *pf_info, + ZXDH_PKT_CAP_RULE *rule) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + + ZXIC_UINT32 sdt_no = ZXDH_SDT_CAPTURE_PKT_TABLE; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_PKT_CAP_T pkt_cap_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(rule); + ZXIC_COMM_CHECK_INDEX(rule->tcam_index, 0, + DH_PKT_CAP_TCAM_ITEM_NUM - 1); + ZXIC_COMM_CHECK_INDEX(rule->pkt_cap_key.capture_pkt_flag, + DH_PKT_CAP_POINT_PANEL_RX, + DH_PKT_CAP_POINT_RDMA_TX); + + ZXIC_COMM_MEMSET(&pkt_cap_entry.key, 0, sizeof(ZXDH_PKT_CAP_KEY)); + ZXIC_COMM_MEMSET(&pkt_cap_entry.mask, 0xFF, sizeof(ZXDH_PKT_CAP_MASK)); + ZXIC_COMM_MEMSET(&pkt_cap_entry.entry, 0, sizeof(ZXDH_PKT_CAP_ENTRY)); + + rc = dpp_pkt_capture_tcam_index_to_mask( + rule->tcam_index, rule->pkt_cap_key.capture_pkt_flag, + &rule->rule_config, &pkt_cap_entry.mask); + ZXIC_COMM_CHECK_RC(rc, "dpp_pkt_capture_tcam_index_to_mask"); + + if (rule->tcam_index >= + DH_PKT_CAP_POINT_MAX * DH_PKT_CAP_POINT_NORMAL_RULE_NUM) { + ZXIC_COMM_CHECK_INDEX(rule->pkt_cap_key.capture_pkt_flag, + DH_PKT_CAP_POINT_PANEL_RX, + DH_PKT_CAP_POINT_VQM_TX); + ZXIC_COMM_CHECK_INDEX(rule->pkt_cap_key.key_word_off, 0, 8191); + ZXIC_COMM_CHECK_INDEX(rule->pkt_cap_key.key_word_len, 1, 15); + + rc = dpp_pkt_capture_key_word_mode_table_insert( + pf_info, rule->tcam_index, + rule->pkt_cap_key.key_word_off, + rule->pkt_cap_key.key_word_len); + ZXIC_COMM_CHECK_RC( + rc, "dpp_pkt_capture_key_word_mode_table_insert"); + } + + pkt_cap_entry.index = rule->tcam_index; + + rc = ZXIC_COMM_MEMCPY(&(pkt_cap_entry.key), &(rule->pkt_cap_key), + sizeof(ZXDH_PKT_CAP_KEY)); + ZXIC_COMM_CHECK_RC(rc, "ZXIC_COMM_MEMCPY"); + + pkt_cap_entry.entry.hit_flag = 0; + pkt_cap_entry.entry.value_flag = 1; + pkt_cap_entry.entry.index = rule->tcam_index; + pkt_cap_entry.entry.vqm_vfid = rule->dst_vqm_vfid; + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_apt_dtb_acl_entry_insert(&dev, queue, sdt_no, &pkt_cap_entry); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_dtb_acl_entry_insert"); + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x sdt_no: %u index: %u.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, + rule->tcam_index); + + ZXIC_COMM_PRINT("[%s] rule_config:\n", __FUNCTION__); + ZXIC_COMM_PRINT("\t sourceid : %u\n", rule->rule_config.sourceid); + ZXIC_COMM_PRINT("\t dmac : %u\n", rule->rule_config.dmac); + ZXIC_COMM_PRINT("\t smac : %u\n", rule->rule_config.smac); + ZXIC_COMM_PRINT("\t ethtype : %u\n", rule->rule_config.ethtype); + ZXIC_COMM_PRINT("\t sip : %u\n", rule->rule_config.sip); + ZXIC_COMM_PRINT("\t dip : %u\n", rule->rule_config.dip); + ZXIC_COMM_PRINT("\t sport : %u\n", rule->rule_config.sport); + ZXIC_COMM_PRINT("\t dport : %u\n", rule->rule_config.dport); + ZXIC_COMM_PRINT("\t protocol : %u\n", rule->rule_config.protocol); + ZXIC_COMM_PRINT("\t qp : %u\n", rule->rule_config.qp); + + ZXIC_COMM_PRINT("[%s] rule_key:\n", __FUNCTION__); + dpp_data_print((ZXIC_UINT8 *)(&(pkt_cap_entry.key)), + sizeof(ZXDH_PKT_CAP_KEY)); + + ZXIC_COMM_PRINT("[%s] rule_mask:\n", __FUNCTION__); + dpp_data_print((ZXIC_UINT8 *)(&(pkt_cap_entry.mask)), + sizeof(ZXDH_PKT_CAP_MASK)); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_pkt_capture_item_insert); + +/***********************************************************/ +/** 删除表项 +* @param pf_info 镜像流上送PF的信息 +* @param tcam_index TCAM表索引 +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 dpp_pkt_capture_item_delete(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 tcam_index) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + + ZXIC_UINT32 sdt_no = ZXDH_SDT_CAPTURE_PKT_TABLE; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_PKT_CAP_T pkt_cap_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_INDEX(tcam_index, 0, DH_PKT_CAP_TCAM_ITEM_NUM - 1); + + ZXIC_COMM_MEMSET(&pkt_cap_entry.key, 0xFF, sizeof(ZXDH_PKT_CAP_KEY)); + ZXIC_COMM_MEMSET(&pkt_cap_entry.mask, 0, sizeof(ZXDH_PKT_CAP_MASK)); + ZXIC_COMM_MEMSET(&pkt_cap_entry.entry, 0, sizeof(ZXDH_PKT_CAP_ENTRY)); + + if (tcam_index >= + DH_PKT_CAP_POINT_MAX * DH_PKT_CAP_POINT_NORMAL_RULE_NUM) { + rc = dpp_pkt_capture_key_word_mode_table_delete(pf_info, + tcam_index); + ZXIC_COMM_CHECK_RC( + rc, "dpp_pkt_capture_key_word_mode_table_delete"); + } + + pkt_cap_entry.index = tcam_index; + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_apt_dtb_acl_entry_insert(&dev, queue, sdt_no, &pkt_cap_entry); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_dtb_acl_entry_insert"); + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x sdt_no: %u index: %u.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, + tcam_index); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_pkt_capture_item_delete); + +/***********************************************************/ +/** 获取所有表项 +* @param pf_info 镜像流上送PF的信息 +* @param rule_array rule数组 +* @param entry_num 表项个数 +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 dpp_pkt_capture_table_dump(DPP_PF_INFO_T *pf_info, + ZXDH_PKT_CAP_RULE *rule_array, + ZXIC_UINT32 *entry_num) +{ + DPP_DEV_T dev = { 0 }; + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_CAPTURE_PKT_TABLE; + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 i = 0; + ZXIC_UINT32 rule_array_index = 0; + ZXIC_UINT32 dump_num = 0; + ZXIC_UINT32 data_byte_size = 0; + ZXIC_UINT32 table_depth = 0; + ZXDH_PKT_CAP_T pkt_cap_entry = { 0 }; + DPP_ACL_ENTRY_EX_T acl_entry = { 0 }; + DPP_DTB_ACL_ENTRY_INFO_T *p_entry_arr = NULL; + DPP_SDTTBL_ETCAM_T sdt_etcam_info = { 0 }; + ZXIC_UINT8 *data_buff = NULL; + ZXIC_UINT8 *mask_buff = NULL; + ZXIC_UINT32 *eram_buff = NULL; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(entry_num); + ZXIC_COMM_CHECK_POINT(rule_array); + ZXIC_COMM_CHECK_INDEX_LOWER(*entry_num, 1); + + ZXIC_COMM_MEMSET(rule_array, 0, *entry_num * sizeof(ZXDH_PKT_CAP_RULE)); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_soft_sdt_tbl_get(&dev, sdt_no, &sdt_etcam_info); + ZXIC_COMM_CHECK_RC(rc, "dpp_soft_sdt_tbl_get"); + + table_depth = sdt_etcam_info.etcam_table_depth; + ZXIC_COMM_CHECK_INDEX_LOWER(table_depth, 1); + + data_byte_size = + DPP_ETCAM_ENTRY_SIZE_GET(sdt_etcam_info.etcam_key_mode); + + p_entry_arr = (DPP_DTB_ACL_ENTRY_INFO_T *)ZXIC_COMM_MALLOC( + table_depth * sizeof(DPP_DTB_ACL_ENTRY_INFO_T)); + ZXIC_COMM_CHECK_POINT(p_entry_arr); + + for (i = 0; i < table_depth; i++) { + data_buff = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC(data_byte_size * + sizeof(ZXIC_UINT8)); + if (!data_buff) { + ZXIC_COMM_TRACE_ERROR( + "ZXIC_COMM_MALLOC data_buff filed.\n"); + goto err_to_free; + } + ZXIC_COMM_MEMSET(data_buff, 0, + data_byte_size * sizeof(ZXIC_UINT8)); + p_entry_arr[i].key_data = data_buff; + + mask_buff = (ZXIC_UINT8 *)ZXIC_COMM_MALLOC(data_byte_size * + sizeof(ZXIC_UINT8)); + if (!mask_buff) { + ZXIC_COMM_TRACE_ERROR( + "ZXIC_COMM_MALLOC mask_buff filed.\n"); + goto err_to_free; + } + ZXIC_COMM_MEMSET(mask_buff, 0, + data_byte_size * sizeof(ZXIC_UINT8)); + p_entry_arr[i].key_mask = mask_buff; + + eram_buff = (ZXIC_UINT32 *)ZXIC_COMM_MALLOC( + 2 * sizeof(ZXIC_UINT32)); + if (!eram_buff) { + ZXIC_COMM_TRACE_ERROR( + "ZXIC_COMM_MALLOC eram_buff filed.\n"); + goto err_to_free; + } + ZXIC_COMM_MEMSET(eram_buff, 0, 2 * sizeof(ZXIC_UINT32)); + p_entry_arr[i].p_as_rslt = (ZXIC_UINT8 *)eram_buff; + + mask_buff = NULL; + data_buff = NULL; + eram_buff = NULL; + } + + rc = dpp_dtb_acl_dump(&dev, queue, sdt_no, (ZXIC_UINT8 *)p_entry_arr, + &dump_num); + if (rc != DPP_OK) { + ZXIC_COMM_TRACE_ERROR("dpp_dtb_acl_dump filed, rc = %d\n", rc); + goto err_to_free; + } + + if (dump_num == table_depth) { + for (i = 0; i < dump_num; i++) { + acl_entry.key_data = p_entry_arr[i].key_data; + acl_entry.key_mask = p_entry_arr[i].key_mask; + acl_entry.p_as_rslt = p_entry_arr[i].p_as_rslt; + acl_entry.pri = p_entry_arr[i].handle; + + rc = dpp_apt_get_pkt_cap_data(&pkt_cap_entry, + &acl_entry); + if (rc != 0) { + ZXIC_COMM_TRACE_ERROR( + "dpp_apt_get_pkt_cap_data filed, rc = %d\n", + rc); + goto err_to_free; + } + + if (pkt_cap_entry.entry.value_flag) { + rule_array[rule_array_index].tcam_index = + pkt_cap_entry.index; + rule_array[rule_array_index].dst_vqm_vfid = + pkt_cap_entry.entry.vqm_vfid; + ZXIC_COMM_MEMCPY(&(rule_array[rule_array_index] + .pkt_cap_key), + &(pkt_cap_entry.key), + sizeof(ZXDH_PKT_CAP_KEY)); + + if (pkt_cap_entry.index < + DH_PKT_CAP_POINT_MAX * + DH_PKT_CAP_POINT_NORMAL_RULE_NUM) { + if (pkt_cap_entry.mask.panel_id_mask != + 0xF || + pkt_cap_entry.mask.vqm_vfid_mask != + 0xFFFF || + pkt_cap_entry.mask.vhca_id_mask != + 0xFFFF) { + rule_array[rule_array_index] + .rule_config.sourceid = + 1; + } + + if (pkt_cap_entry.mask.protocol_mask != + 0xFF) { + rule_array[rule_array_index] + .rule_config.protocol = + 1; + } + + if (pkt_cap_entry.mask.ethtype_mask != + 0xFFFF) { + rule_array[rule_array_index] + .rule_config.ethtype = + 1; + } + + if (pkt_cap_entry.mask.dmac_mask[0] != + 0xFF) { + rule_array[rule_array_index] + .rule_config.dmac = 1; + } + + if (pkt_cap_entry.mask.smac_mask[0] != + 0xFF) { + rule_array[rule_array_index] + .rule_config.smac = 1; + } + + if (pkt_cap_entry.mask.sip_mask[0] != + 0xFF) { + rule_array[rule_array_index] + .rule_config.sip = 1; + } + + if (pkt_cap_entry.mask.dip_mask[0] != + 0xFF) { + rule_array[rule_array_index] + .rule_config.dip = 1; + } + + if (pkt_cap_entry.mask.sport_mask != + 0xFFFF) { + rule_array[rule_array_index] + .rule_config.sport = 1; + } + + if (pkt_cap_entry.mask.dport_mask != + 0xFFFF) { + rule_array[rule_array_index] + .rule_config.dport = 1; + } + + if (pkt_cap_entry.mask.qp_mask != + 0xFFFFFF) { + rule_array[rule_array_index] + .rule_config.qp = 1; + } + } + + ZXIC_COMM_PRINT( + "rule [%u] tcam_index = %u\n", + rule_array_index, + rule_array[rule_array_index].tcam_index); + ZXIC_COMM_PRINT("rule [%u] dst_vqm_vfid = %u\n", + rule_array_index, + rule_array[rule_array_index] + .dst_vqm_vfid); + ZXIC_COMM_PRINT( + "rule [%u] rule_config.sourceid = %u\n", + rule_array_index, + rule_array[rule_array_index] + .rule_config.sourceid); + ZXIC_COMM_PRINT( + "rule [%u] rule_config.dmac = %u\n", + rule_array_index, + rule_array[rule_array_index] + .rule_config.dmac); + ZXIC_COMM_PRINT( + "rule [%u] rule_config.smac = %u\n", + rule_array_index, + rule_array[rule_array_index] + .rule_config.smac); + ZXIC_COMM_PRINT( + "rule [%u] rule_config.ethtype = %u\n", + rule_array_index, + rule_array[rule_array_index] + .rule_config.ethtype); + ZXIC_COMM_PRINT( + "rule [%u] rule_config.sip = %u\n", + rule_array_index, + rule_array[rule_array_index] + .rule_config.sip); + ZXIC_COMM_PRINT( + "rule [%u] rule_config.dip = %u\n", + rule_array_index, + rule_array[rule_array_index] + .rule_config.dip); + ZXIC_COMM_PRINT( + "rule [%u] rule_config.protocol = %u\n", + rule_array_index, + rule_array[rule_array_index] + .rule_config.protocol); + ZXIC_COMM_PRINT( + "rule [%u] rule_config.sport = %u\n", + rule_array_index, + rule_array[rule_array_index] + .rule_config.sport); + ZXIC_COMM_PRINT( + "rule [%u] rule_config.dport = %u\n", + rule_array_index, + rule_array[rule_array_index] + .rule_config.dport); + ZXIC_COMM_PRINT( + "rule [%u] rule_config.qp = %u\n", + rule_array_index, + rule_array[rule_array_index] + .rule_config.qp); + ZXIC_COMM_PRINT("rule [%u] l2_info:\n", + rule_array_index); + ZXIC_COMM_PRINT("\t dmac:\n"); + ZXIC_COMM_PRINT( + "\t\t 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", + rule_array[rule_array_index] + .pkt_cap_key.dmac[0], + rule_array[rule_array_index] + .pkt_cap_key.dmac[1], + rule_array[rule_array_index] + .pkt_cap_key.dmac[2], + rule_array[rule_array_index] + .pkt_cap_key.dmac[3], + rule_array[rule_array_index] + .pkt_cap_key.dmac[4], + rule_array[rule_array_index] + .pkt_cap_key.dmac[5]); + ZXIC_COMM_PRINT("\t smac:\n"); + ZXIC_COMM_PRINT( + "\t\t 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", + rule_array[rule_array_index] + .pkt_cap_key.smac[0], + rule_array[rule_array_index] + .pkt_cap_key.smac[1], + rule_array[rule_array_index] + .pkt_cap_key.smac[2], + rule_array[rule_array_index] + .pkt_cap_key.smac[3], + rule_array[rule_array_index] + .pkt_cap_key.smac[4], + rule_array[rule_array_index] + .pkt_cap_key.smac[5]); + ZXIC_COMM_PRINT("\t ethtype:\n"); + ZXIC_COMM_PRINT("\t\t 0x%x\n", + rule_array[rule_array_index] + .pkt_cap_key.ethtype); + ZXIC_COMM_PRINT("rule [%u] l3_info:\n", + rule_array_index); + ZXIC_COMM_PRINT("\t sip:\n"); + ZXIC_COMM_PRINT("\t\t 0x%x 0x%x 0x%x 0x%x\n", + rule_array[rule_array_index] + .pkt_cap_key.sip[0], + rule_array[rule_array_index] + .pkt_cap_key.sip[1], + rule_array[rule_array_index] + .pkt_cap_key.sip[2], + rule_array[rule_array_index] + .pkt_cap_key.sip[3]); + ZXIC_COMM_PRINT("\t\t 0x%x 0x%x 0x%x 0x%x\n", + rule_array[rule_array_index] + .pkt_cap_key.sip[4], + rule_array[rule_array_index] + .pkt_cap_key.sip[5], + rule_array[rule_array_index] + .pkt_cap_key.sip[6], + rule_array[rule_array_index] + .pkt_cap_key.sip[7]); + ZXIC_COMM_PRINT("\t\t 0x%x 0x%x 0x%x 0x%x\n", + rule_array[rule_array_index] + .pkt_cap_key.sip[8], + rule_array[rule_array_index] + .pkt_cap_key.sip[9], + rule_array[rule_array_index] + .pkt_cap_key.sip[10], + rule_array[rule_array_index] + .pkt_cap_key.sip[11]); + ZXIC_COMM_PRINT("\t\t 0x%x 0x%x 0x%x 0x%x\n", + rule_array[rule_array_index] + .pkt_cap_key.sip[12], + rule_array[rule_array_index] + .pkt_cap_key.sip[13], + rule_array[rule_array_index] + .pkt_cap_key.sip[14], + rule_array[rule_array_index] + .pkt_cap_key.sip[15]); + ZXIC_COMM_PRINT("\t dip:\n"); + ZXIC_COMM_PRINT("\t\t 0x%x 0x%x 0x%x 0x%x\n", + rule_array[rule_array_index] + .pkt_cap_key.dip[0], + rule_array[rule_array_index] + .pkt_cap_key.dip[1], + rule_array[rule_array_index] + .pkt_cap_key.dip[2], + rule_array[rule_array_index] + .pkt_cap_key.dip[3]); + ZXIC_COMM_PRINT("\t\t 0x%x 0x%x 0x%x 0x%x\n", + rule_array[rule_array_index] + .pkt_cap_key.dip[4], + rule_array[rule_array_index] + .pkt_cap_key.dip[5], + rule_array[rule_array_index] + .pkt_cap_key.dip[6], + rule_array[rule_array_index] + .pkt_cap_key.dip[7]); + ZXIC_COMM_PRINT("\t\t 0x%x 0x%x 0x%x 0x%x\n", + rule_array[rule_array_index] + .pkt_cap_key.dip[8], + rule_array[rule_array_index] + .pkt_cap_key.dip[9], + rule_array[rule_array_index] + .pkt_cap_key.dip[10], + rule_array[rule_array_index] + .pkt_cap_key.dip[11]); + ZXIC_COMM_PRINT("\t\t 0x%x 0x%x 0x%x 0x%x\n", + rule_array[rule_array_index] + .pkt_cap_key.dip[12], + rule_array[rule_array_index] + .pkt_cap_key.dip[13], + rule_array[rule_array_index] + .pkt_cap_key.dip[14], + rule_array[rule_array_index] + .pkt_cap_key.dip[15]); + ZXIC_COMM_PRINT("\t protocol:\n"); + ZXIC_COMM_PRINT("\t\t 0x%x\n", + rule_array[rule_array_index] + .pkt_cap_key.protocol); + ZXIC_COMM_PRINT("rule [%u] l4_info:\n", + rule_array_index); + ZXIC_COMM_PRINT("\t dport: 0x%x\n", + rule_array[rule_array_index] + .pkt_cap_key.dport); + ZXIC_COMM_PRINT("\t sport: 0x%x\n", + rule_array[rule_array_index] + .pkt_cap_key.sport); + ZXIC_COMM_PRINT("rule [%u] qp: 0x%x\n", + rule_array_index, + rule_array[rule_array_index] + .pkt_cap_key.qp); + ZXIC_COMM_PRINT( + "rule [%u] pkt_cap_flag: %u\n", + rule_array_index, + rule_array[rule_array_index] + .pkt_cap_key.capture_pkt_flag); + ZXIC_COMM_PRINT("rule [%u] panel_id: 0x%x\n", + rule_array_index, + rule_array[rule_array_index] + .pkt_cap_key.panel_id); + ZXIC_COMM_PRINT("rule [%u] vqm_vfid: 0x%x\n", + rule_array_index, + rule_array[rule_array_index] + .pkt_cap_key.vqm_vfid); + ZXIC_COMM_PRINT("rule [%u] vhca_id: 0x%x\n", + rule_array_index, + rule_array[rule_array_index] + .pkt_cap_key.vhca_id); + ZXIC_COMM_PRINT( + "rule [%u] kw_len: 0x%x\n", + rule_array_index, + rule_array[rule_array_index] + .pkt_cap_key.key_word_len); + ZXIC_COMM_PRINT( + "rule [%u] kw_off: 0x%x\n", + rule_array_index, + rule_array[rule_array_index] + .pkt_cap_key.key_word_off); + ZXIC_COMM_PRINT("rule [%u] kw:\n", + rule_array_index); + ZXIC_COMM_PRINT( + "\t\t 0x%x 0x%x 0x%x 0x%x\n", + rule_array[rule_array_index] + .pkt_cap_key.key_word[0], + rule_array[rule_array_index] + .pkt_cap_key.key_word[1], + rule_array[rule_array_index] + .pkt_cap_key.key_word[2], + rule_array[rule_array_index] + .pkt_cap_key.key_word[3]); + ZXIC_COMM_PRINT( + "\t\t 0x%x 0x%x 0x%x 0x%x\n", + rule_array[rule_array_index] + .pkt_cap_key.key_word[4], + rule_array[rule_array_index] + .pkt_cap_key.key_word[5], + rule_array[rule_array_index] + .pkt_cap_key.key_word[6], + rule_array[rule_array_index] + .pkt_cap_key.key_word[7]); + ZXIC_COMM_PRINT( + "\t\t 0x%x 0x%x 0x%x 0x%x\n", + rule_array[rule_array_index] + .pkt_cap_key.key_word[8], + rule_array[rule_array_index] + .pkt_cap_key.key_word[9], + rule_array[rule_array_index] + .pkt_cap_key.key_word[10], + rule_array[rule_array_index] + .pkt_cap_key.key_word[11]); + ZXIC_COMM_PRINT( + "\t\t 0x%x 0x%x 0x%x\n", + rule_array[rule_array_index] + .pkt_cap_key.key_word[12], + rule_array[rule_array_index] + .pkt_cap_key.key_word[13], + rule_array[rule_array_index] + .pkt_cap_key.key_word[14]); + + rule_array_index++; + *entry_num = (*entry_num) - 1; + + if (*entry_num == 0) { + break; + } + } + } + + *entry_num = rule_array_index; + } else { + ZXIC_COMM_TRACE_ERROR("dpp_dtb_acl_dump filed, dump_num = %d\n", + dump_num); + goto err_to_free; + } + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x sdt_no: %u pkt cap tcam table dump successed.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no); + + for (i = 0; i < table_depth; i++) { + if (p_entry_arr[i].key_data) { + ZXIC_COMM_FREE(p_entry_arr[i].key_data); + } + + if (p_entry_arr[i].key_mask) { + ZXIC_COMM_FREE(p_entry_arr[i].key_mask); + } + + if (sdt_etcam_info.as_en) { + if (p_entry_arr[i].p_as_rslt) { + ZXIC_COMM_FREE(p_entry_arr[i].p_as_rslt); + } + } + } + + ZXIC_COMM_FREE(p_entry_arr); + + return DPP_OK; + +err_to_free: + for (i = 0; i < table_depth; i++) { + if (p_entry_arr[i].key_data) { + ZXIC_COMM_FREE(p_entry_arr[i].key_data); + } + + if (p_entry_arr[i].key_mask) { + ZXIC_COMM_FREE(p_entry_arr[i].key_mask); + } + + if (sdt_etcam_info.as_en) { + if (p_entry_arr[i].p_as_rslt) { + ZXIC_COMM_FREE(p_entry_arr[i].p_as_rslt); + } + } + } + + ZXIC_COMM_FREE(p_entry_arr); + return DPP_ERR; +} +EXPORT_SYMBOL(dpp_pkt_capture_table_dump); + +/***********************************************************/ +/** 删除所有表项 +* @param pf_info 镜像流上送PF的信息 +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 dpp_pkt_capture_table_flush(DPP_PF_INFO_T *pf_info) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + + ZXIC_UINT32 sdt_no = ZXDH_SDT_CAPTURE_PKT_TABLE; + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 tcam_index = 0; + + ZXIC_COMM_CHECK_POINT(pf_info); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_dtb_etcam_table_flush(&dev, queue, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_etcam_table_flush"); + + for (tcam_index = + DH_PKT_CAP_POINT_MAX * DH_PKT_CAP_POINT_NORMAL_RULE_NUM; + tcam_index < DH_PKT_CAP_TCAM_ITEM_NUM; tcam_index++) { + rc = dpp_pkt_capture_key_word_mode_table_delete(pf_info, + tcam_index); + ZXIC_COMM_CHECK_RC( + rc, "dpp_pkt_capture_key_word_mode_table_delete"); + } + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x sdt_no: %u.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_pkt_capture_table_flush); + +/***********************************************************/ +/** 设置镜像流速率 +* @param pf_info 镜像流上送PF的信息 +* @param speed_kbps 镜像流速率kbps +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 dpp_pkt_capture_speed_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 speed_kbps) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_CAR_PROFILE_CFG_T cfg = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_INDEX(speed_kbps, DH_PKT_CAP_SPEED_MIN, + DH_PKT_CAP_SPEED_MAX); + + ZXIC_COMM_MEMSET(&cfg, 0, sizeof(DPP_STAT_CAR_PROFILE_CFG_T)); + + cfg.profile_id = 511; + cfg.cir = speed_kbps; + cfg.cbs = 1280000; + + rc = dpp_car_queue_cfg_set(pf_info, 0, 30000, 0, 1, 511); + ZXIC_COMM_CHECK_RC(rc, "dpp_car_queue_cfg_set"); + + rc = dpp_car_profile_cfg_set(pf_info, 0, 0, 511, &cfg); + ZXIC_COMM_CHECK_RC(rc, "dpp_car_profile_cfg_set"); + + // 配置成功后更新限速值g_speed_limit + g_speed_limit = speed_kbps; + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x speed_kbps: %u.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, + speed_kbps); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_pkt_capture_speed_set); + +/***********************************************************/ +/** 获取镜像流速率 +* @param pf_info 镜像流上送PF的信息 +* @param speed_kbps 镜像流速率kbps +* +* @return DPP_OK 函数执行成功, DPP_ERR 函数执行失败 +* @remark 无 +* @see +* @author sl @date 2024/10/28 +************************************************************/ +ZXIC_UINT32 dpp_pkt_capture_speed_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *speed_kbps) +{ + // ZXIC_UINT32 rc = DPP_OK; + // DPP_STAT_CAR_PROFILE_CFG_T cfg = {0}; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(speed_kbps); + + // rc = dpp_car_profile_cfg_get(pf_info, 0, 0, 511, &cfg); + // ZXIC_COMM_CHECK_RC(rc, "dpp_car_profile_cfg_set"); + + *speed_kbps = g_speed_limit; + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x speed_kbps: %u.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, + *speed_kbps); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_pkt_capture_speed_get); \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_plcr.c b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_plcr.c new file mode 100644 index 000000000000..e880418a6aaa --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_plcr.c @@ -0,0 +1,147 @@ +#include "dpp_tbl_plcr.h" +#include "dpp_dev.h" +#include "dpp_drv_sdt.h" +#include "dpp_drv_eram.h" +#include "dpp_dtb.h" +#include "dpp_apt_se_api.h" +#include "dpp_tbl_api.h" + +ZXIC_UINT32 dpp_vport_egress_meter_en_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 enable) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 attr = SRIOV_VPORT_NP_EGRESS_METER_EN_OFF; + + rc = dpp_vport_attr_set(pf_info, attr, enable & 0x1); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_attr_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_egress_meter_en_set); + +ZXIC_UINT32 dpp_vport_egress_meter_en_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *enable) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXDH_SRIOV_VPORT_T port_attr_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(enable); + + rc = dpp_vport_attr_get(pf_info, &port_attr_entry); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_attr_get"); + + *enable = port_attr_entry.np_egress_meter_enable; + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x egress_meter_enable_status: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, *enable); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_egress_meter_en_get); + +ZXIC_UINT32 dpp_vport_ingress_meter_en_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 enable) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 attr = SRIOV_VPORT_NP_INGRESS_METER_EN_OFF; + + rc = dpp_vport_attr_set(pf_info, attr, enable & 0x1); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_attr_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_ingress_meter_en_set); + +ZXIC_UINT32 dpp_vport_ingress_meter_en_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *enable) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXDH_SRIOV_VPORT_T port_attr_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(enable); + + rc = dpp_vport_attr_get(pf_info, &port_attr_entry); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_attr_get"); + + *enable = port_attr_entry.np_ingress_meter_enable; + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x ingress_meter_enable_status: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, *enable); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_ingress_meter_en_get); + +ZXIC_UINT32 dpp_vport_egress_meter_mode_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 mode) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 attr = SRIOV_VPORT_NP_EGRESS_MODE; + + rc = dpp_vport_attr_set(pf_info, attr, mode & 0x1); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_attr_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_egress_meter_mode_set); + +ZXIC_UINT32 dpp_vport_egress_meter_mode_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *mode) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXDH_SRIOV_VPORT_T port_attr_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(mode); + + rc = dpp_vport_attr_get(pf_info, &port_attr_entry); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_attr_get"); + + *mode = port_attr_entry.np_egress_meter_mode; + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x egress_meter_mode_status: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, *mode); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_egress_meter_mode_get); + +ZXIC_UINT32 dpp_vport_ingress_meter_mode_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 mode) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 attr = SRIOV_VPORT_NP_INGRESS_MODE; + + rc = dpp_vport_attr_set(pf_info, attr, mode & 0x1); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_attr_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_ingress_meter_mode_set); + +ZXIC_UINT32 dpp_vport_ingress_meter_mode_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *mode) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXDH_SRIOV_VPORT_T port_attr_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(mode); + + rc = dpp_vport_attr_get(pf_info, &port_attr_entry); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_attr_get"); + + *mode = port_attr_entry.np_ingress_meter_mode; + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x ingress_meter_mode_status: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, *mode); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_ingress_meter_mode_get); diff --git a/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_port.c b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_port.c new file mode 100644 index 000000000000..3c89a14022f7 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_port.c @@ -0,0 +1,428 @@ +#include "dpp_drv_init.h" +#include "dpp_drv_acl.h" +#include "dpp_drv_hash.h" +#include "dpp_drv_eram.h" +#include "dpp_drv_sdt.h" +#include "dpp_dev.h" +#include "dpp_dtb.h" +#include "dpp_hash.h" +#include "dpp_dtb_table.h" +#include "dpp_dtb_table_api.h" +#include "dpp_tbl_api.h" +#include "dpp_tbl_comm.h" +#include "dpp_tbl_port.h" +#include "dpp_tbl_diag.h" + +ZXIC_UINT32 dpp_vport_create(DPP_PF_INFO_T *pf_info) +{ + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + rc = dpp_vport_create_by_vqm_vfid(pf_info, VQM_VFID(pf_info->vport)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_create_by_vqm_vfid"); + + rc = dpp_vport_attr_set(pf_info, SRIOV_VPORT_IS_VF, + !IS_PF(pf_info->vport)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_attr_set"); + + rc = dpp_vport_attr_set(pf_info, SRIOV_VPORT_PF_VQM_VFID, + OWNER_PF_VQM_VFID(pf_info->vport)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_attr_set"); + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x success.\n", __FUNCTION__, + pf_info->slot, pf_info->vport); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_create); + +ZXIC_UINT32 dpp_vport_create_by_vqm_vfid(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 vqm_vfid) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_SRIOV_VPORT_ATTR_TABLE; + ZXIC_UINT32 index = vqm_vfid; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_SRIOV_VPORT_T port_attr_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sdt_no: %u index: %u start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, index); + + ZXIC_COMM_MEMSET(&port_attr_entry, 0, sizeof(ZXDH_SRIOV_VPORT_T)); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + port_attr_entry.hit_flag = 1; + + rc = dpp_apt_dtb_eram_insert(&dev, queue, sdt_no, index, + &port_attr_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x sdt_no: %u index: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, index); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_create_by_vqm_vfid); + +ZXIC_UINT32 dpp_vport_delete(DPP_PF_INFO_T *pf_info) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_SRIOV_VPORT_ATTR_TABLE; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + + index = VQM_VFID(pf_info->vport); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sdt_no: %u index: %u start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, index); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_eram_clear(&dev, queue, sdt_no, index); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_clear", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x sdt_no: %u index: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, index); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_delete); + +ZXIC_UINT32 dpp_vport_attr_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 attr, + ZXIC_UINT32 value) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_SRIOV_VPORT_ATTR_TABLE; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_SRIOV_VPORT_T port_attr_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_INDEX(attr, 0, + (ZXIC_UINT32)((sizeof(ZXDH_SRIOV_VPORT_T) / + sizeof(ZXIC_UINT32)) - + 1)); + + index = VQM_VFID(pf_info->vport); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sdt_no: %u index: %u attr: %s(%u) value: %u start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, index, + dpp_vport_table_attr_name_get(attr), attr, value); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_eram_get(&dev, queue, sdt_no, index, &port_attr_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_get", + DEV_PCIE_LOCK(&dev)); + + port_attr_entry.hit_flag = 1; + *((((ZXIC_UINT32 *)(&port_attr_entry)) + attr)) = value; + + rc = dpp_apt_dtb_eram_insert(&dev, queue, sdt_no, index, + &port_attr_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_insert", + DEV_PCIE_LOCK(&dev)); + + if (attr == SRIOV_VPORT_HASH_SEARCH_INDEX) { + rc = dpp_soft_hash_index_set(&dev, value); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_soft_hash_index_set", + DEV_PCIE_LOCK(&dev)); + } + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x sdt_no: %u index: %u attr: %s(%u) value: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, index, + dpp_vport_table_attr_name_get(attr), attr, value); + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_attr_set); + +ZXIC_UINT32 dpp_vport_attr_get(DPP_PF_INFO_T *pf_info, + ZXDH_SRIOV_VPORT_T *port_attr_entry) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_SRIOV_VPORT_ATTR_TABLE; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(port_attr_entry); + + index = VQM_VFID(pf_info->vport); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x index: %u start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, + index); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_eram_get(&dev, queue, sdt_no, index, port_attr_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_get", + DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_INDEX_NOT_EQUAL_UNLOCK(port_attr_entry->hit_flag, 1, + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x index: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, index); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_attr_get); + +ZXIC_UINT32 dpp_vport_rx_flow_hash_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 hash_mode) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 attr = SRIOV_VPORT_RSS_HASH_FACTOR; + + rc = dpp_vport_attr_set(pf_info, attr, hash_mode & 0xff); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_attr_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_rx_flow_hash_set); + +ZXIC_UINT32 dpp_vport_base_qid_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *base_qid) +{ + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_SRIOV_VPORT_T port_attr_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(base_qid); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + rc = dpp_vport_attr_get(pf_info, &port_attr_entry); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_attr_get"); + + *base_qid = port_attr_entry.port_base_qid; + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x factor: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, *base_qid); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_base_qid_get); + +ZXIC_UINT32 dpp_vport_rx_flow_hash_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *hash_mode) +{ + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_SRIOV_VPORT_T port_attr_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(hash_mode); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + rc = dpp_vport_attr_get(pf_info, &port_attr_entry); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_attr_get"); + + *hash_mode = port_attr_entry.rss_hash_factor; + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x factor: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, + *hash_mode); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_rx_flow_hash_get); + +ZXIC_UINT32 dpp_vport_hash_index_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 *hash_index) +{ + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_SRIOV_VPORT_T port_attr_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(hash_index); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + rc = dpp_vport_attr_get(pf_info, &port_attr_entry); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_attr_get"); + + *hash_index = port_attr_entry.hash_search_index; + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x hash_search_index: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, *hash_index); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_hash_index_get); + +ZXIC_UINT32 dpp_vport_hash_funcs_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 funcs) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 attr = SRIOV_VPORT_HASH_ALG; + + rc = dpp_vport_attr_set(pf_info, attr, funcs & 0x0f); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_attr_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_hash_funcs_set); + +ZXIC_UINT32 dpp_vport_rss_en_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 enable) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 attr = SRIOV_VPORT_RSS_EN_OFF; + + rc = dpp_vport_attr_set(pf_info, attr, enable & 0x1); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_attr_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_rss_en_set); + +ZXIC_UINT32 dpp_vport_fd_en_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 enable) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 attr = SRIOV_VPORT_FD_EN_OFF; + + rc = dpp_vport_attr_set(pf_info, attr, enable & 0x1); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_attr_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_fd_en_set); + +ZXIC_UINT32 dpp_vport_virtio_en_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 enable) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 attr = SRIOV_VPORT_VIRTIO_EN_OFF; + + rc = dpp_vport_attr_set(pf_info, attr, enable & 0x1); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_attr_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_virtio_en_set); + +ZXIC_UINT32 dpp_vport_virtio_version_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 version) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 attr = SRIOV_VPORT_VIRTIO_VERSION; + + rc = dpp_vport_attr_set(pf_info, attr, version & 0x3); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_attr_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_virtio_version_set); + +ZXIC_UINT32 dpp_vport_promisc_en_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 enable) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 attr = SRIOV_VPORT_PROMISC_EN; + + rc = dpp_vport_attr_set(pf_info, attr, enable & 0x1); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_attr_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_promisc_en_set); + +ZXIC_UINT32 dpp_vport_business_vlan_offload_en_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 enable) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 attr = SRIOV_VPORT_BUSINESS_VLAN_OFFLOAD_EN; + + rc = dpp_vport_attr_set(pf_info, attr, enable & 0x1); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_attr_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_business_vlan_offload_en_set); + +ZXIC_UINT32 dpp_vport_vlan_offload_en_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 enable) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 attr = SRIOV_VPORT_VLAN_OFFLOAD_EN; + + rc = dpp_vport_attr_set(pf_info, attr, enable & 0x1); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_attr_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_vlan_offload_en_set); diff --git a/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_promisc.c b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_promisc.c new file mode 100644 index 000000000000..d1cdc100621f --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_promisc.c @@ -0,0 +1,193 @@ +#include "dpp_drv_init.h" +#include "dpp_drv_acl.h" +#include "dpp_drv_hash.h" +#include "dpp_drv_eram.h" +#include "dpp_drv_sdt.h" +#include "dpp_dev.h" +#include "dpp_dtb.h" +#include "dpp_hash.h" +#include "dpp_dtb_table.h" +#include "dpp_dtb_table_api.h" +#include "dpp_tbl_api.h" +#include "dpp_tbl_comm.h" +#include "dpp_tbl_promisc.h" + +ZXIC_UINT32 dpp_vport_promisc_info_set(DPP_PF_INFO_T *pf_info, + DPP_VPORT_PROMISC_TABLE_T *promisc_table, + ZXIC_UINT32 enable) +{ + ZXIC_UINT32 group_id = 0; + ZXIC_UINT32 vfunc_num = 0; + ZXIC_UINT64 bitmap_mask = 0; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(promisc_table); + ZXIC_COMM_CHECK_INDEX(enable, 0, 1); + + vfunc_num = VFUNC_NUM(pf_info->vport); + ZXIC_COMM_CHECK_INDEX( + vfunc_num, 0, + (PROMISC_GROUP_NUM * PROMISC_MEMBER_NUM_IN_GROUP) - 1); + + group_id = vfunc_num / PROMISC_MEMBER_NUM_IN_GROUP; + ZXIC_COMM_CHECK_INDEX(group_id, 0, PROMISC_GROUP_NUM - 1); + + bitmap_mask = ((ZXIC_UINT64)(1) + << (PROMISC_MEMBER_NUM_IN_GROUP - 1 - + (vfunc_num % PROMISC_MEMBER_NUM_IN_GROUP))); + + if (IS_PF(pf_info->vport)) { + promisc_table->promisc_info.pf_enable = enable; + } else { + promisc_table->promisc_info.bitmap[group_id] = + (enable == 1) ? + (promisc_table->promisc_info.bitmap[group_id] | + bitmap_mask) : + (promisc_table->promisc_info.bitmap[group_id] & + ~bitmap_mask); + } + + return DPP_OK; +} + +ZXIC_UINT32 +dpp_vport_promisc_table_insert(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 sdt_no, + DPP_VPORT_PROMISC_TABLE_T *promisc_table) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 group_id = 0; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_PROMISC_T promisc_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(promisc_table); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + for (group_id = 0; group_id < PROMISC_GROUP_NUM; group_id++) { + index = (((OWNER_PF_VQM_VFID(pf_info->vport) - + PF_VQM_VFID_OFFSET) + << 2) | + group_id); + promisc_entry.hit_flag = 1; + promisc_entry.pf_enable = promisc_table->promisc_info.pf_enable; + promisc_entry.bitmap = + promisc_table->promisc_info.bitmap[group_id]; + + rc = dpp_apt_dtb_eram_insert(&dev, queue, sdt_no, index, + &promisc_entry); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_dtb_eram_insert"); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sdt_no: %u group_id: %u index: 0x%02x.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, + group_id, index); + ZXIC_COMM_TRACE_NOTICE( + "[%s] pf_enable: %u bitmap: %02x %02x %02x %02x %02x %02x %02x %02x.\n", + __FUNCTION__, promisc_entry.pf_enable, + *((ZXIC_UINT8 *)(&promisc_entry.bitmap) + 7), + *((ZXIC_UINT8 *)(&promisc_entry.bitmap) + 6), + *((ZXIC_UINT8 *)(&promisc_entry.bitmap) + 5), + *((ZXIC_UINT8 *)(&promisc_entry.bitmap) + 4), + *((ZXIC_UINT8 *)(&promisc_entry.bitmap) + 3), + *((ZXIC_UINT8 *)(&promisc_entry.bitmap) + 2), + *((ZXIC_UINT8 *)(&promisc_entry.bitmap) + 1), + *((ZXIC_UINT8 *)(&promisc_entry.bitmap) + 0)); + } + + return DPP_OK; +} + +ZXIC_UINT32 dpp_vport_uc_promisc_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 enable) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 sdt_no = ZXDH_SDT_UC_PROMISC_TABLE; + ZXIC_UINT32 rc = DPP_OK; + DPP_VPORT_PROMISC_TABLE_T *promisc_table = NULL; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x enable: %u start.\n", __FUNCTION__, + pf_info->slot, pf_info->vport, enable); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_uc_promisc_table_get(pf_info, &promisc_table); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_vport_uc_promisc_table_get", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_promisc_info_set(pf_info, promisc_table, enable); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_vport_promisc_info_set", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_promisc_table_insert(pf_info, sdt_no, promisc_table); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_vport_promisc_table_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x enable: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, enable); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_uc_promisc_set); + +ZXIC_UINT32 dpp_vport_mc_promisc_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 enable) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 sdt_no = ZXDH_SDT_MC_PROMISC_TABLE; + ZXIC_UINT32 rc = DPP_OK; + DPP_VPORT_PROMISC_TABLE_T *promisc_table = NULL; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x enable: %u start.\n", __FUNCTION__, + pf_info->slot, pf_info->vport, enable); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_mc_promisc_table_get(pf_info, &promisc_table); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_vport_mc_promisc_table_get", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_promisc_info_set(pf_info, promisc_table, enable); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_vport_promisc_info_set", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_promisc_table_insert(pf_info, sdt_no, promisc_table); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_vport_promisc_table_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x enable: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, enable); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_mc_promisc_set); diff --git a/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_ptp.c b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_ptp.c new file mode 100644 index 000000000000..7332651d5b54 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_ptp.c @@ -0,0 +1,53 @@ +#include "dpp_drv_init.h" +#include "dpp_drv_acl.h" +#include "dpp_drv_hash.h" +#include "dpp_drv_eram.h" +#include "dpp_drv_sdt.h" +#include "dpp_dev.h" +#include "dpp_dtb.h" +#include "dpp_hash.h" +#include "dpp_dtb_table.h" +#include "dpp_dtb_table_api.h" +#include "dpp_tbl_api.h" +#include "dpp_tbl_comm.h" +#include "dpp_tbl_ptp.h" + +ZXIC_UINT32 dpp_ptp_port_vfid_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 ptp_port_vfid) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 attr = UPLINK_PHY_PORT_PTP_PORT_VFID; + + ZXDH_SRIOV_VPORT_T port_attr_entry = { 0 }; + + rc = dpp_vport_attr_get(pf_info, &port_attr_entry); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_attr_get"); + + rc = dpp_uplink_phy_attr_set(pf_info, + port_attr_entry.uplink_phy_port_id, attr, + ptp_port_vfid); + ZXIC_COMM_CHECK_RC(rc, "dpp_uplink_phy_attr_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_ptp_port_vfid_set); + +ZXIC_UINT32 dpp_ptp_tc_enable_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 ptp_tc_enable) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 attr = UPLINK_PHY_PORT_PTP_TC_ENABLE; + + ZXDH_SRIOV_VPORT_T port_attr_entry = { 0 }; + + rc = dpp_vport_attr_get(pf_info, &port_attr_entry); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_attr_get"); + + rc = dpp_uplink_phy_attr_set(pf_info, + port_attr_entry.uplink_phy_port_id, attr, + ptp_tc_enable); + ZXIC_COMM_CHECK_RC(rc, "dpp_uplink_phy_attr_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_ptp_tc_enable_set); diff --git a/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_qid.c b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_qid.c new file mode 100644 index 000000000000..bd68bf71f398 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_qid.c @@ -0,0 +1,235 @@ +#include "dpp_drv_init.h" +#include "dpp_drv_acl.h" +#include "dpp_drv_hash.h" +#include "dpp_drv_eram.h" +#include "dpp_drv_sdt.h" +#include "dpp_dev.h" +#include "dpp_ppu.h" +#include "dpp_dtb.h" +#include "dpp_hash.h" +#include "dpp_dtb_table.h" +#include "dpp_dtb_table_api.h" +#include "dpp_tbl_comm.h" +#include "dpp_tbl_qid.h" + +ZXIC_UINT32 dpp_rxfh_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 *queue_list, + ZXIC_UINT32 queue_num) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 i = 0; + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 group_id = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_RSS_TO_VQID_TABLE; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_RSS_TO_VQID_T rss_to_vqid_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(queue_list); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + index = VQM_VFID(pf_info->vport) * RSS_TO_VQID_GROUP_NUM; + rss_to_vqid_entry.hit_flag = 1; + + for (group_id = 0; group_id < RSS_TO_VQID_GROUP_NUM; group_id++) { + for (i = 0; i < 8; i++) { + rss_to_vqid_entry.vqm_qid[i] = + queue_list[((group_id * 8) + i) % queue_num]; + } + + rc = dpp_apt_dtb_eram_insert(&dev, queue, sdt_no, + index + group_id, + &rss_to_vqid_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_insert", + DEV_PCIE_LOCK(&dev)); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sdt_no: %u index: 0x%04x.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, + index + group_id); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] vqm_qid0: 0x%04x vqm_qid1: 0x%04x vqm_qid2: 0x%04x vqm_qid3: 0x%04x.\n", + __FUNCTION__, rss_to_vqid_entry.vqm_qid[0], + rss_to_vqid_entry.vqm_qid[1], + rss_to_vqid_entry.vqm_qid[2], + rss_to_vqid_entry.vqm_qid[3]); + ZXIC_COMM_TRACE_NOTICE( + "[%s] vqm_qid4: 0x%04x vqm_qid5: 0x%04x vqm_qid6: 0x%04x vqm_qid7: 0x%04x.\n", + __FUNCTION__, rss_to_vqid_entry.vqm_qid[4], + rss_to_vqid_entry.vqm_qid[5], + rss_to_vqid_entry.vqm_qid[6], + rss_to_vqid_entry.vqm_qid[7]); + } + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x success.\n", __FUNCTION__, + pf_info->slot, pf_info->vport); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_rxfh_set); + +ZXIC_UINT32 dpp_rxfh_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 *queue_list, + ZXIC_UINT32 queue_num) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 i = 0; + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 group_id = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_RSS_TO_VQID_TABLE; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_RSS_TO_VQID_T rss_to_vqid_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(queue_list); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + index = VQM_VFID(pf_info->vport) * RSS_TO_VQID_GROUP_NUM; + for (group_id = 0; group_id < RSS_TO_VQID_GROUP_NUM; group_id++) { + rc = dpp_apt_dtb_eram_get(&dev, queue, sdt_no, index + group_id, + &rss_to_vqid_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_get", + DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_INDEX_NOT_EQUAL_UNLOCK( + rss_to_vqid_entry.hit_flag, 1, DEV_PCIE_LOCK(&dev)); + + for (i = 0; i < 8; i++) { + queue_list[((group_id * 8) + i) % queue_num] = + rss_to_vqid_entry.vqm_qid[i]; + } + } + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x success.\n", __FUNCTION__, + pf_info->slot, pf_info->vport); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_rxfh_get); + +ZXIC_UINT32 dpp_rxfh_del(DPP_PF_INFO_T *pf_info) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 group_id = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_RSS_TO_VQID_TABLE; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + index = VQM_VFID(pf_info->vport) * RSS_TO_VQID_GROUP_NUM; + for (group_id = 0; group_id < RSS_TO_VQID_GROUP_NUM; group_id++) { + rc = dpp_apt_dtb_eram_clear(&dev, queue, sdt_no, + index + group_id); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_clear", + DEV_PCIE_LOCK(&dev)); + } + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x success.\n", __FUNCTION__, + pf_info->slot, pf_info->vport); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_rxfh_del); + +ZXIC_UINT32 dpp_thash_key_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 *hash_key, + ZXIC_UINT32 key_num) +{ + ZXIC_UINT32 rc = DPP_OK; + + DPP_DEV_T dev = { 0 }; + DPP_PPU_PPU_COP_THASH_RSK_T *thash = + (DPP_PPU_PPU_COP_THASH_RSK_T *)hash_key; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_CHECK_POINT(thash); + ZXIC_COMM_CHECK_INDEX_LOWER( + key_num, (ZXIC_UINT32)sizeof(DPP_PPU_PPU_COP_THASH_RSK_T)); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_ppu_ppu_cop_thash_rsk_set(&dev, thash); + ZXIC_COMM_CHECK_RC(rc, "dpp_ppu_ppu_cop_thash_rsk_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_thash_key_set); + +ZXIC_UINT32 dpp_thash_key_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT8 *hash_key, + ZXIC_UINT32 key_num) +{ + ZXIC_UINT32 rc = DPP_OK; + + DPP_DEV_T dev = { 0 }; + DPP_PPU_PPU_COP_THASH_RSK_T *thash = + (DPP_PPU_PPU_COP_THASH_RSK_T *)hash_key; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_CHECK_POINT(thash); + ZXIC_COMM_CHECK_INDEX_LOWER( + key_num, (ZXIC_UINT32)sizeof(DPP_PPU_PPU_COP_THASH_RSK_T)); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_ppu_ppu_cop_thash_rsk_get(&dev, thash); + ZXIC_COMM_CHECK_RC(rc, "dpp_ppu_ppu_cop_thash_rsk_get"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_thash_key_get); \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_rdma.c b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_rdma.c new file mode 100644 index 000000000000..bc6ce1ea06ed --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_rdma.c @@ -0,0 +1,173 @@ +#include "dpp_drv_init.h" +#include "dpp_drv_acl.h" +#include "dpp_drv_hash.h" +#include "dpp_drv_eram.h" +#include "dpp_drv_sdt.h" +#include "dpp_dev.h" +#include "dpp_dtb.h" +#include "dpp_hash.h" +#include "dpp_dtb_table.h" +#include "dpp_dtb_table_api.h" +#include "dpp_tbl_comm.h" +#include "dpp_tbl_mac.h" +#include "dpp_tbl_api.h" + +ZXIC_VOID dpp_rdma_trans_item_print(ZXDH_RDMA_TRANS_T *rdma_trans) +{ + ZXIC_COMM_TRACE_NOTICE( + "key--mac: %02x:%02x:%02x:%02x:%02x:%02x.\n", + rdma_trans->key.mac_addr[0], rdma_trans->key.mac_addr[1], + rdma_trans->key.mac_addr[2], rdma_trans->key.mac_addr[3], + rdma_trans->key.mac_addr[4], rdma_trans->key.mac_addr[5]); + ZXIC_COMM_TRACE_NOTICE("key--rsv: 0x%02x\n", rdma_trans->key.rsv); + + ZXIC_COMM_TRACE_NOTICE("entry--rdma_vhca_id: 0x%02x\n", + rdma_trans->entry.rdma_vhca_id); + ZXIC_COMM_TRACE_NOTICE("entry--rsv: 0x%02x\n", + rdma_trans->entry.rsv); + ZXIC_COMM_TRACE_NOTICE("entry--hit_flag: 0x%02x\n", + rdma_trans->entry.hit_flag); +} + +ZXIC_UINT32 dpp_add_rdma_trans_item(DPP_PF_INFO_T *pf_info, + ZXIC_CONST ZXIC_VOID *mac, + ZXIC_CONST ZXIC_UINT16 vhcaId) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_RDMA_ENTRY_TABLE; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_RDMA_TRANS_T rdma_trans = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(mac); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + ZXIC_COMM_MEMSET(&rdma_trans, 0, sizeof(ZXDH_RDMA_TRANS_T)); + + ZXIC_COMM_MEMCPY(rdma_trans.key.mac_addr, mac, 6); + rdma_trans.entry.rdma_vhca_id = vhcaId & 0x3ff; + rdma_trans.entry.hit_flag = 0x00; + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_hash_insert(&dev, queue, sdt_no, &rdma_trans); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_hash_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x sdt_no: %u rdma_vhca_id: %u mac: %02x:%02x:%02x:%02x:%02x:%02x success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, + rdma_trans.entry.rdma_vhca_id, rdma_trans.key.mac_addr[0], + rdma_trans.key.mac_addr[1], rdma_trans.key.mac_addr[2], + rdma_trans.key.mac_addr[3], rdma_trans.key.mac_addr[4], + rdma_trans.key.mac_addr[5]); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_add_rdma_trans_item); + +ZXIC_UINT32 dpp_del_rdma_trans_item(DPP_PF_INFO_T *pf_info, + ZXIC_CONST ZXIC_VOID *mac) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_RDMA_ENTRY_TABLE; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_RDMA_TRANS_T rdma_trans = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(mac); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + ZXIC_COMM_MEMSET(&rdma_trans, 0, sizeof(ZXDH_RDMA_TRANS_T)); + + ZXIC_COMM_MEMCPY(rdma_trans.key.mac_addr, mac, 6); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_hash_delete(&dev, queue, sdt_no, &rdma_trans); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_hash_delete", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x sdt_no: %u rdma_vhca_id: %u mac: %02x:%02x:%02x:%02x:%02x:%02x success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, + rdma_trans.entry.rdma_vhca_id, rdma_trans.key.mac_addr[0], + rdma_trans.key.mac_addr[1], rdma_trans.key.mac_addr[2], + rdma_trans.key.mac_addr[3], rdma_trans.key.mac_addr[4], + rdma_trans.key.mac_addr[5]); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_del_rdma_trans_item); + +ZXIC_UINT32 dpp_rdma_trans_item_soft_delete(DPP_PF_INFO_T *pf_info) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 sdt_no = ZXDH_SDT_RDMA_ENTRY_TABLE; + ZXIC_UINT32 last_flag = 0; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dev_last_check(&dev, &last_flag); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_last_check"); + + if (last_flag) { + rc = dpp_vport_table_lock(pf_info, sdt_no, + &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_hash_soft_delete_by_sdt(&dev, sdt_no); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_hash_soft_delete_by_sdt", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + } + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x success.\n", __FUNCTION__, + pf_info->slot, pf_info->vport); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_rdma_trans_item_soft_delete); diff --git a/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_stat.c b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_stat.c new file mode 100644 index 000000000000..1994a4c62d7b --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_stat.c @@ -0,0 +1,1087 @@ +#include "dpp_drv_init.h" +#include "dpp_drv_acl.h" +#include "dpp_drv_hash.h" +#include "dpp_drv_eram.h" +#include "dpp_drv_sdt.h" +#include "dpp_dev.h" +#include "dpp_dtb.h" +#include "dpp_hash.h" +#include "dpp_dtb_table.h" +#include "dpp_dtb_table_api.h" +#include "dpp_stat_api.h" +#include "dpp_tbl_api.h" +#include "dpp_tbl_comm.h" +#include "dpp_tbl_stat.h" + +ZXIC_UINT32 dpp_stat_cnt_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 index, + ZXIC_UINT32 mode, ZXIC_UINT64 *p_cnt) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 buff[2] = { 0 }; + ZXIC_UINT32 rc = DPP_OK; + DPP_APT_SE_RES_T *p_se_res = NULL; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x index: %u mode: %u start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, index, mode); + + ZXIC_COMM_CHECK_POINT(p_cnt); + ZXIC_COMM_CHECK_INDEX(mode, STAT_RD_CLR_MODE_UNCLR, + STAT_RD_CLR_MODE_CLR); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + p_se_res = (DPP_APT_SE_RES_T *)dpp_dev_get_se_res_ptr(&dev); + ZXIC_COMM_CHECK_POINT(p_se_res); + ZXIC_COMM_CHECK_INDEX_LOWER(p_se_res->stat_cfg.eram_depth, 1); + ZXIC_COMM_CHECK_INDEX(index, 0, p_se_res->stat_cfg.eram_depth * 2 - 1); + if (mode == STAT_RD_CLR_MODE_CLR) { + rc = dpp_stat_ppu_cnt_get(&dev, STAT_64_MODE, index, + STAT_RD_CLR_MODE_CLR, buff); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_ppu_cnt_get"); + } else { + rc = dpp_dtb_eram_stat_data_get(&dev, queue, + p_se_res->stat_cfg.eram_baddr, + ERAM128_TBL_64b, index, buff); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_eram_stat_data_get"); + } + + *p_cnt = ((ZXIC_UINT64)buff[0] << 32) | buff[1]; + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x index: %u mode: %u cnt: %llu success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, index, mode, + *p_cnt); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_cnt_get); + +ZXIC_UINT32 dpp_stat_cnt_get_128(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 index, + ZXIC_UINT32 mode, ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 buff[4] = { 0 }; + ZXIC_UINT32 rc = DPP_OK; + DPP_APT_SE_RES_T *p_se_res = NULL; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x index: %u mode: %u start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, index, mode); + + ZXIC_COMM_CHECK_POINT(p_pkB_cnt); + ZXIC_COMM_CHECK_POINT(p_pk_cnt); + ZXIC_COMM_CHECK_INDEX(mode, STAT_RD_CLR_MODE_UNCLR, + STAT_RD_CLR_MODE_CLR); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + p_se_res = (DPP_APT_SE_RES_T *)dpp_dev_get_se_res_ptr(&dev); + ZXIC_COMM_CHECK_POINT(p_se_res); + ZXIC_COMM_CHECK_INDEX_LOWER(p_se_res->stat_cfg.eram_depth, 1); + ZXIC_COMM_CHECK_INDEX(index, 0, p_se_res->stat_cfg.eram_depth - 1); + + if (mode == STAT_RD_CLR_MODE_CLR) { + rc = dpp_stat_ppu_cnt_get(&dev, STAT_128_MODE, index, + STAT_RD_CLR_MODE_CLR, buff); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_ppu_cnt_get"); + } else { + rc = dpp_dtb_eram_stat_data_get(&dev, queue, + p_se_res->stat_cfg.eram_baddr, + ERAM128_TBL_128b, index, buff); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_eram_stat_data_get"); + } + + *p_pk_cnt = ((ZXIC_UINT64)buff[0] << 32) | buff[1]; + *p_pkB_cnt = ((ZXIC_UINT64)buff[2] << 32) | buff[3]; + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x index: %u mode: %u h64_cnt: %llu success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, index, mode, + *p_pk_cnt); + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x index: %u mode: %u l64_cnt: %llu success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, index, mode, + *p_pkB_cnt); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_cnt_get_128); + +ZXIC_UINT32 dpp_stat_item_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 stat_item_no, ZXIC_UINT32 index, + ZXIC_UINT32 rd_mode, + DPP_STAT_VALUE_U *p_stat_value) +{ + DPP_STATUS rc = DPP_OK; + ZXIC_UINT32 exist_flag = 0; + DPP_DEV_T dev = { 0 }; + DPP_APT_STAT_ITEM_T *p_stat_item = NULL; + DPP_APT_SE_RES_T *p_se_res = NULL; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(p_stat_value); + ZXIC_COMM_CHECK_INDEX(rd_mode, STAT_RD_CLR_MODE_UNCLR, + STAT_RD_CLR_MODE_CLR); + ZXIC_COMM_CHECK_INDEX_UPPER(stat_item_no, STAT_ITEM_MAX_NUM - 1); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + p_se_res = (DPP_APT_SE_RES_T *)dpp_dev_get_se_res_ptr(&dev); + ZXIC_COMM_CHECK_POINT(p_se_res); + + rc = dpp_apt_sdt_is_exist(p_se_res, DPP_SDT_TBLT_eRAM, + ZXDH_SDT_STAT_ATTR_TABLE, &exist_flag); + ZXIC_COMM_CHECK_RC(rc, "dpp_apt_sdt_is_exist"); + if (exist_flag == 0) { + ZXIC_COMM_TRACE_INFO( + "The firmware not support stat item table!\n"); + return DPP_RC_TABLE_SDT_NOT_EXIST; + } + + p_stat_item = &p_se_res->stat_item[stat_item_no]; + ZXIC_COMM_CHECK_INDEX_LOWER(p_stat_item->depth, 1); + ZXIC_COMM_CHECK_INDEX(index, 0, p_stat_item->depth - 1); + + ZXIC_COMM_MEMSET_S(p_stat_value, sizeof(DPP_STAT_VALUE_U), 0x0, + sizeof(DPP_STAT_VALUE_U)); + if (p_stat_item->mode == STAT_64_MODE) { + rc = dpp_stat_cnt_get(pf_info, index + p_stat_item->addr_offset, + rd_mode, &p_stat_value->stat_cnt_64); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get"); + } else { + rc = dpp_stat_cnt_get_128(pf_info, + index + p_stat_item->addr_offset, + rd_mode, + &p_stat_value->stat_cnt_128.bytes, + &p_stat_value->stat_cnt_128.pkts); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get_128"); + } + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_item_cnt_get); + +ZXIC_UINT32 dpp_stat_mc_packet_rx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_cnt); + + rc = dpp_stat_item_cnt_get(pf_info, DPP_STAT_ITEM_RX_PF_MULTICAST_PKTS, + index, mode, &stat_value); + if (rc != DPP_RC_TABLE_SDT_NOT_EXIST) { + *p_cnt = stat_value.stat_cnt_64; + ZXIC_COMM_TRACE_INFO("dpp_stat_item_cnt_get,rc=0x%x\n", rc); + return rc; + } + + /*对接老固件流程*/ + ZXIC_COMM_CHECK_INDEX(index, 0, + DPP_STAT_MC_PACKET_RX_CNT_ERAM_DEPTH - 1); + + rc = dpp_stat_cnt_get(pf_info, + index + DPP_STAT_MC_PACKET_RX_CNT_ERAM_BAADDR, + mode, p_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_mc_packet_rx_cnt_get); + +ZXIC_UINT32 dpp_stat_bc_packet_rx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_cnt); + + /*对接新固件流程*/ + rc = dpp_stat_item_cnt_get(pf_info, DPP_STAT_ITEM_RX_VF_BROADCAST_PKTS, + index, mode, &stat_value); + if (rc != DPP_RC_TABLE_SDT_NOT_EXIST) { + *p_cnt = stat_value.stat_cnt_64; + ZXIC_COMM_TRACE_INFO("dpp_stat_item_cnt_get,rc=0x%x\n", rc); + return rc; + } + + /*对接老固件流程*/ + ZXIC_COMM_CHECK_INDEX(index, 0, + DPP_STAT_BC_PACKET_RX_CNT_ERAM_DEPTH - 1); + + rc = dpp_stat_cnt_get(pf_info, + index + DPP_STAT_BC_PACKET_RX_CNT_ERAM_BAADDR, + mode, p_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_bc_packet_rx_cnt_get); + +ZXIC_UINT32 dpp_stat_1588_packet_rx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_cnt); + + /*对接新固件流程*/ + rc = dpp_stat_item_cnt_get(pf_info, DPP_STAT_ITEM_RX_1588_PKTS, index, + mode, &stat_value); + if (rc != DPP_RC_TABLE_SDT_NOT_EXIST) { + *p_cnt = stat_value.stat_cnt_64; + ZXIC_COMM_TRACE_INFO("dpp_stat_item_cnt_get,rc=0x%x\n", rc); + return rc; + } + + /*对接老固件流程*/ + ZXIC_COMM_CHECK_INDEX(index, 0, + DPP_STAT_1588_PACKET_RX_CNT_ERAM_DEPTH - 1); + + rc = dpp_stat_cnt_get(pf_info, + index + DPP_STAT_1588_PACKET_RX_CNT_ERAM_BAADDR, + mode, p_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_1588_packet_rx_cnt_get); + +ZXIC_UINT32 dpp_stat_1588_packet_tx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_cnt); + + /*对接新固件流程*/ + rc = dpp_stat_item_cnt_get(pf_info, DPP_STAT_ITEM_TX_1588_PKTS, index, + mode, &stat_value); + if (rc != DPP_RC_TABLE_SDT_NOT_EXIST) { + *p_cnt = stat_value.stat_cnt_64; + ZXIC_COMM_TRACE_INFO("dpp_stat_item_cnt_get,rc=0x%x\n", rc); + return rc; + } + + /*对接老固件流程*/ + ZXIC_COMM_CHECK_INDEX(index, 0, + DPP_STAT_1588_PACKET_TX_CNT_ERAM_DEPTH - 1); + + rc = dpp_stat_cnt_get(pf_info, + index + DPP_STAT_1588_PACKET_TX_CNT_ERAM_BAADDR, + mode, p_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_1588_packet_tx_cnt_get); + +ZXIC_UINT32 dpp_stat_1588_packet_drop_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_cnt); + + /*对接新固件流程*/ + rc = dpp_stat_item_cnt_get(pf_info, DPP_STAT_ITEM_1588_DROP_PKTS, index, + mode, &stat_value); + if (rc != DPP_RC_TABLE_SDT_NOT_EXIST) { + *p_cnt = stat_value.stat_cnt_64; + ZXIC_COMM_TRACE_INFO("dpp_stat_item_cnt_get,rc=0x%x\n", rc); + return rc; + } + + /*对接老固件流程*/ + ZXIC_COMM_CHECK_INDEX(index, 0, + DPP_STAT_1588_PACKET_DROP_CNT_ERAM_DEPTH - 1); + + rc = dpp_stat_cnt_get(pf_info, + index + DPP_STAT_1588_PACKET_DROP_CNT_ERAM_BAADDR, + mode, p_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_1588_packet_drop_cnt_get); + +ZXIC_UINT32 dpp_stat_1588_enc_packet_rx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_cnt); + + /*对接新固件流程*/ + rc = dpp_stat_item_cnt_get(pf_info, + DPP_STAT_ITEM_1588_DRS_NP_ENCRYPT_PKTS, + index, mode, &stat_value); + if (rc != DPP_RC_TABLE_SDT_NOT_EXIST) { + *p_cnt = stat_value.stat_cnt_64; + ZXIC_COMM_TRACE_INFO("dpp_stat_item_cnt_get,rc=0x%x\n", rc); + return rc; + } + + /*对接老固件流程*/ + ZXIC_COMM_CHECK_INDEX(index, 0, + DPP_STAT_1588_ENC_PACKET_RX_CNT_ERAM_DEPTH - 1); + + rc = dpp_stat_cnt_get( + pf_info, index + DPP_STAT_1588_ENC_PACKET_RX_CNT_ERAM_BAADDR, + mode, p_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_1588_enc_packet_rx_cnt_get); + +ZXIC_UINT32 dpp_stat_1588_enc_packet_tx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_cnt); + + /*对接新固件流程*/ + rc = dpp_stat_item_cnt_get(pf_info, + DPP_STAT_ITEM_1588_NP_DRS_ENCRYPT_PKTS, + index, mode, &stat_value); + if (rc != DPP_RC_TABLE_SDT_NOT_EXIST) { + *p_cnt = stat_value.stat_cnt_64; + ZXIC_COMM_TRACE_INFO("dpp_stat_item_cnt_get,rc=0x%x\n", rc); + return rc; + } + + /*对接老固件流程*/ + ZXIC_COMM_CHECK_INDEX(index, 0, + DPP_STAT_1588_ENC_PACKET_TX_CNT_ERAM_DEPTH - 1); + + rc = dpp_stat_cnt_get( + pf_info, index + DPP_STAT_1588_ENC_PACKET_TX_CNT_ERAM_BAADDR, + mode, p_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_1588_enc_packet_tx_cnt_get); + +ZXIC_UINT32 dpp_stat_spoof_packet_drop_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_cnt); + + /*对接新固件流程*/ + rc = dpp_stat_item_cnt_get(pf_info, DPP_STAT_ITEM_SPOOF_DROP_PKTS, + index, mode, &stat_value); + if (rc != DPP_RC_TABLE_SDT_NOT_EXIST) { + *p_cnt = stat_value.stat_cnt_64; + ZXIC_COMM_TRACE_INFO("dpp_stat_item_cnt_get,rc=0x%x\n", rc); + return rc; + } + + /*对接老固件流程*/ + ZXIC_COMM_CHECK_INDEX(index, 0, + DPP_STAT_SPOOF_PACKET_DROP_CNT_ERAM_DEPTH - 1); + + rc = dpp_stat_cnt_get( + pf_info, index + DPP_STAT_SPOOF_PACKET_DROP_CNT_ERAM_BAADDR, + mode, p_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_spoof_packet_drop_cnt_get); + +ZXIC_UINT32 dpp_stat_mcode_packet_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_cnt); + + /*对接新固件流程*/ + rc = dpp_stat_item_cnt_get(pf_info, DPP_STAT_ITEM_MCODE_PPU_PKTS, index, + mode, &stat_value); + if (rc != DPP_RC_TABLE_SDT_NOT_EXIST) { + *p_cnt = stat_value.stat_cnt_64; + ZXIC_COMM_TRACE_INFO("dpp_stat_item_cnt_get,rc=0x%x\n", rc); + return rc; + } + + /*对接老固件流程*/ + ZXIC_COMM_CHECK_INDEX(index, 0, + DPP_STAT_MCODE_PACKET_CNT_ERAM_DEPTH - 1); + + rc = dpp_stat_cnt_get(pf_info, + index + DPP_STAT_MCODE_PACKET_CNT_ERAM_BAADDR, + mode, p_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_mcode_packet_cnt_get); + +ZXIC_UINT32 dpp_stat_port_RDMA_packet_msg_tx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_pkB_cnt); + ZXIC_COMM_CHECK_POINT(p_pk_cnt); + + /*对接新固件流程*/ + rc = dpp_stat_item_cnt_get(pf_info, DPP_STAT_ITEM_RDMA_TX_STAT, index, + mode, &stat_value); + if (rc != DPP_RC_TABLE_SDT_NOT_EXIST) { + *p_pkB_cnt = stat_value.stat_cnt_128.bytes; + *p_pk_cnt = stat_value.stat_cnt_128.pkts; + ZXIC_COMM_TRACE_INFO("dpp_stat_item_cnt_get,rc=0x%x\n", rc); + return rc; + } + + /*对接老固件流程*/ + ZXIC_COMM_CHECK_INDEX(index, 0, + DPP_STAT_PORT_RDMA_PACKET_TX_CNT_ERAM_DEPTH - 1); + + rc = dpp_stat_cnt_get_128( + pf_info, index + DPP_STAT_PORT_RDMA_PACKET_TX_CNT_ERAM_BAADDR, + mode, p_pkB_cnt, p_pk_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get_128"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_port_RDMA_packet_msg_tx_cnt_get); + +ZXIC_UINT32 dpp_stat_port_RDMA_packet_msg_rx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_pkB_cnt); + ZXIC_COMM_CHECK_POINT(p_pk_cnt); + + /*对接新固件流程*/ + rc = dpp_stat_item_cnt_get(pf_info, DPP_STAT_ITEM_RDMA_RX_STAT, index, + mode, &stat_value); + if (rc != DPP_RC_TABLE_SDT_NOT_EXIST) { + *p_pkB_cnt = stat_value.stat_cnt_128.bytes; + *p_pk_cnt = stat_value.stat_cnt_128.pkts; + ZXIC_COMM_TRACE_INFO("dpp_stat_item_cnt_get,rc=0x%x\n", rc); + return rc; + } + + /*对接老固件流程*/ + ZXIC_COMM_CHECK_INDEX(index, 0, + DPP_STAT_PORT_RDMA_PACKET_RX_CNT_ERAM_DEPTH - 1); + + rc = dpp_stat_cnt_get_128( + pf_info, index + DPP_STAT_PORT_RDMA_PACKET_RX_CNT_ERAM_BAADDR, + mode, p_pkB_cnt, p_pk_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get_128"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_port_RDMA_packet_msg_rx_cnt_get); + +ZXIC_UINT32 dpp_stat_plcr_packet_drop_tx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_pkB_cnt); + ZXIC_COMM_CHECK_POINT(p_pk_cnt); + + /*对接新固件流程*/ + rc = dpp_stat_item_cnt_get(pf_info, + DPP_STAT_ITEM_NIC_OUT_RATE_LIMIT_DROP_STAT, + index, mode, &stat_value); + if (rc != DPP_RC_TABLE_SDT_NOT_EXIST) { + *p_pkB_cnt = stat_value.stat_cnt_128.bytes; + *p_pk_cnt = stat_value.stat_cnt_128.pkts; + ZXIC_COMM_TRACE_INFO("dpp_stat_item_cnt_get,rc=0x%x\n", rc); + return rc; + } + + /*对接老固件流程*/ + ZXIC_COMM_CHECK_INDEX(index, 0, + DPP_STAT_PLCR_PACKET_DROP_TX_CNT_ERAM_DEPTH - 1); + + rc = dpp_stat_cnt_get_128( + pf_info, index + DPP_STAT_PLCR_PACKET_DROP_TX_CNT_ERAM_BAADDR, + mode, p_pkB_cnt, p_pk_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get_128"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_plcr_packet_drop_tx_cnt_get); + +ZXIC_UINT32 dpp_stat_plcr_packet_drop_rx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_pkB_cnt); + ZXIC_COMM_CHECK_POINT(p_pk_cnt); + + /*对接新固件流程*/ + rc = dpp_stat_item_cnt_get(pf_info, + DPP_STAT_ITEM_NIC_IN_RATE_LIMIT_DROP_STAT, + index, mode, &stat_value); + if (rc != DPP_RC_TABLE_SDT_NOT_EXIST) { + *p_pkB_cnt = stat_value.stat_cnt_128.bytes; + *p_pk_cnt = stat_value.stat_cnt_128.pkts; + ZXIC_COMM_TRACE_INFO("dpp_stat_item_cnt_get,rc=0x%x\n", rc); + return rc; + } + + /*对接老固件流程*/ + ZXIC_COMM_CHECK_INDEX(index, 0, + DPP_STAT_PLCR_PACKET_DROP_RX_CNT_ERAM_DEPTH - 1); + + rc = dpp_stat_cnt_get_128( + pf_info, index + DPP_STAT_PLCR_PACKET_DROP_RX_CNT_ERAM_BAADDR, + mode, p_pkB_cnt, p_pk_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get_128"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_plcr_packet_drop_rx_cnt_get); + +ZXIC_UINT32 dpp_stat_MTU_packet_msg_tx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_pkB_cnt); + ZXIC_COMM_CHECK_POINT(p_pk_cnt); + + /*对接新固件流程*/ + rc = dpp_stat_item_cnt_get(pf_info, DPP_STAT_ITEM_TX_MTU_DROP_STAT, + index, mode, &stat_value); + if (rc != DPP_RC_TABLE_SDT_NOT_EXIST) { + *p_pkB_cnt = stat_value.stat_cnt_128.bytes; + *p_pk_cnt = stat_value.stat_cnt_128.pkts; + ZXIC_COMM_TRACE_INFO("dpp_stat_item_cnt_get,rc=0x%x\n", rc); + return rc; + } + + /*对接老固件流程*/ + ZXIC_COMM_CHECK_INDEX(index, 0, + DPP_STAT_MTU_PACKET_DROP_TX_CNT_ERAM_DEPTH - 1); + + rc = dpp_stat_cnt_get_128( + pf_info, index + DPP_STAT_MTU_PACKET_DROP_TX_CNT_ERAM_BAADDR, + mode, p_pkB_cnt, p_pk_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get_128"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_MTU_packet_msg_tx_cnt_get); + +ZXIC_UINT32 dpp_stat_MTU_packet_msg_rx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_pkB_cnt); + ZXIC_COMM_CHECK_POINT(p_pk_cnt); + + /*对接新固件流程*/ + rc = dpp_stat_item_cnt_get(pf_info, DPP_STAT_ITEM_RX_MTU_DROP_STAT, + index, mode, &stat_value); + if (rc != DPP_RC_TABLE_SDT_NOT_EXIST) { + *p_pkB_cnt = stat_value.stat_cnt_128.bytes; + *p_pk_cnt = stat_value.stat_cnt_128.pkts; + ZXIC_COMM_TRACE_INFO("dpp_stat_item_cnt_get,rc=0x%x\n", rc); + return rc; + } + + /*对接老固件流程*/ + ZXIC_COMM_CHECK_INDEX(index, 0, + DPP_STAT_MTU_PACKET_DROP_RX_CNT_ERAM_DEPTH - 1); + + rc = dpp_stat_cnt_get_128( + pf_info, index + DPP_STAT_MTU_PACKET_DROP_RX_CNT_ERAM_BAADDR, + mode, p_pkB_cnt, p_pk_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get_128"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_MTU_packet_msg_rx_cnt_get); + +ZXIC_UINT32 dpp_stat_port_uc_packet_rx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_pkB_cnt); + ZXIC_COMM_CHECK_POINT(p_pk_cnt); + + /*对接新固件流程*/ + rc = dpp_stat_item_cnt_get(pf_info, + DPP_STAT_ITEM_NP_PORT_UNICAST_RX_STAT, index, + mode, &stat_value); + if (rc != DPP_RC_TABLE_SDT_NOT_EXIST) { + *p_pkB_cnt = stat_value.stat_cnt_128.bytes; + *p_pk_cnt = stat_value.stat_cnt_128.pkts; + ZXIC_COMM_TRACE_INFO("dpp_stat_item_cnt_get,rc=0x%x\n", rc); + return rc; + } + + /*对接老固件流程*/ + ZXIC_COMM_CHECK_INDEX(index, 0, + DPP_STAT_PORT_UC_PACKET_RX_CNT_ERAM_DEPTH - 1); + + rc = dpp_stat_cnt_get_128( + pf_info, index + DPP_STAT_PORT_UC_PACKET_RX_CNT_ERAM_BAADDR, + mode, p_pkB_cnt, p_pk_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get_128"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_port_uc_packet_rx_cnt_get); + +ZXIC_UINT32 dpp_stat_port_uc_packet_tx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_pkB_cnt); + ZXIC_COMM_CHECK_POINT(p_pk_cnt); + + /*对接新固件流程*/ + rc = dpp_stat_item_cnt_get(pf_info, + DPP_STAT_ITEM_NP_PORT_UNICAST_TX_STAT, index, + mode, &stat_value); + if (rc != DPP_RC_TABLE_SDT_NOT_EXIST) { + *p_pkB_cnt = stat_value.stat_cnt_128.bytes; + *p_pk_cnt = stat_value.stat_cnt_128.pkts; + ZXIC_COMM_TRACE_INFO("dpp_stat_item_cnt_get,rc=0x%x\n", rc); + return rc; + } + + /*对接老固件流程*/ + ZXIC_COMM_CHECK_INDEX(index, 0, + DPP_STAT_PORT_UC_PACKET_TX_CNT_ERAM_DEPTH - 1); + + rc = dpp_stat_cnt_get_128( + pf_info, index + DPP_STAT_PORT_UC_PACKET_TX_CNT_ERAM_BAADDR, + mode, p_pkB_cnt, p_pk_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get_128"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_port_uc_packet_tx_cnt_get); + +ZXIC_UINT32 dpp_stat_port_mc_packet_rx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_pkB_cnt); + ZXIC_COMM_CHECK_POINT(p_pk_cnt); + + /*对接新固件流程*/ + rc = dpp_stat_item_cnt_get(pf_info, + DPP_STAT_ITEM_NP_PORT_MULTICAST_RX_STAT, + index, mode, &stat_value); + if (rc != DPP_RC_TABLE_SDT_NOT_EXIST) { + *p_pkB_cnt = stat_value.stat_cnt_128.bytes; + *p_pk_cnt = stat_value.stat_cnt_128.pkts; + ZXIC_COMM_TRACE_INFO("dpp_stat_item_cnt_get,rc=0x%x\n", rc); + return rc; + } + + /*对接老固件流程*/ + ZXIC_COMM_CHECK_INDEX(index, 0, + DPP_STAT_PORT_MC_PACKET_RX_CNT_ERAM_DEPTH - 1); + + rc = dpp_stat_cnt_get_128( + pf_info, index + DPP_STAT_PORT_MC_PACKET_RX_CNT_ERAM_BAADDR, + mode, p_pkB_cnt, p_pk_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get_128"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_port_mc_packet_rx_cnt_get); + +ZXIC_UINT32 dpp_stat_port_mc_packet_tx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_pkB_cnt); + ZXIC_COMM_CHECK_POINT(p_pk_cnt); + + /*对接新固件流程*/ + rc = dpp_stat_item_cnt_get(pf_info, + DPP_STAT_ITEM_NP_PORT_MULTICAST_TX_STAT, + index, mode, &stat_value); + if (rc != DPP_RC_TABLE_SDT_NOT_EXIST) { + *p_pkB_cnt = stat_value.stat_cnt_128.bytes; + *p_pk_cnt = stat_value.stat_cnt_128.pkts; + ZXIC_COMM_TRACE_INFO("dpp_stat_item_cnt_get,rc=0x%x\n", rc); + return rc; + } + + /*对接老固件流程*/ + ZXIC_COMM_CHECK_INDEX(index, 0, + DPP_STAT_PORT_MC_PACKET_TX_CNT_ERAM_DEPTH - 1); + + rc = dpp_stat_cnt_get_128( + pf_info, index + DPP_STAT_PORT_MC_PACKET_TX_CNT_ERAM_BAADDR, + mode, p_pkB_cnt, p_pk_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get_128"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_port_mc_packet_tx_cnt_get); + +ZXIC_UINT32 dpp_stat_port_bc_packet_rx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_pkB_cnt); + ZXIC_COMM_CHECK_POINT(p_pk_cnt); + + /*对接新固件流程*/ + rc = dpp_stat_item_cnt_get(pf_info, + DPP_STAT_ITEM_NP_PORT_BROADCAST_RX_STAT, + index, mode, &stat_value); + if (rc != DPP_RC_TABLE_SDT_NOT_EXIST) { + *p_pkB_cnt = stat_value.stat_cnt_128.bytes; + *p_pk_cnt = stat_value.stat_cnt_128.pkts; + ZXIC_COMM_TRACE_INFO("dpp_stat_item_cnt_get,rc=0x%x\n", rc); + return rc; + } + + /*对接老固件流程*/ + ZXIC_COMM_CHECK_INDEX(index, 0, + DPP_STAT_PORT_BC_PACKET_RX_CNT_ERAM_DEPTH - 1); + + rc = dpp_stat_cnt_get_128( + pf_info, index + DPP_STAT_PORT_BC_PACKET_RX_CNT_ERAM_BAADDR, + mode, p_pkB_cnt, p_pk_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get_128"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_port_bc_packet_rx_cnt_get); + +ZXIC_UINT32 dpp_stat_port_bc_packet_tx_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_pkB_cnt); + ZXIC_COMM_CHECK_POINT(p_pk_cnt); + + /*对接新固件流程*/ + rc = dpp_stat_item_cnt_get(pf_info, + DPP_STAT_ITEM_NP_PORT_BROADCAST_TX_STAT, + index, mode, &stat_value); + if (rc != DPP_RC_TABLE_SDT_NOT_EXIST) { + *p_pkB_cnt = stat_value.stat_cnt_128.bytes; + *p_pk_cnt = stat_value.stat_cnt_128.pkts; + ZXIC_COMM_TRACE_INFO("dpp_stat_item_cnt_get,rc=0x%x\n", rc); + return rc; + } + + /*对接老固件流程*/ + ZXIC_COMM_CHECK_INDEX(index, 0, + DPP_STAT_PORT_BC_PACKET_TX_CNT_ERAM_DEPTH - 1); + + rc = dpp_stat_cnt_get_128( + pf_info, index + DPP_STAT_PORT_BC_PACKET_TX_CNT_ERAM_BAADDR, + mode, p_pkB_cnt, p_pk_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get_128"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_port_bc_packet_tx_cnt_get); + +ZXIC_UINT32 dpp_stat_fd_stat_cnt_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 index, + ZXIC_UINT32 mode, ZXIC_UINT64 *p_pkB_cnt, + ZXIC_UINT64 *p_pk_cnt) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_pkB_cnt); + ZXIC_COMM_CHECK_POINT(p_pk_cnt); + + /*对接新固件流程*/ + rc = dpp_stat_item_cnt_get(pf_info, DPP_STAT_ITEM_FD_FLOW_STAT, index, + mode, &stat_value); + if (rc != DPP_RC_TABLE_SDT_NOT_EXIST) { + *p_pkB_cnt = stat_value.stat_cnt_128.bytes; + *p_pk_cnt = stat_value.stat_cnt_128.pkts; + ZXIC_COMM_TRACE_INFO("dpp_stat_item_cnt_get,rc=0x%x\n", rc); + return rc; + } + + /*对接老固件流程*/ + ZXIC_COMM_CHECK_INDEX(index, 0, DPP_STAT_FD_ACL_CNT_ERAM_DEPTH - 1); + + rc = dpp_stat_cnt_get_128(pf_info, + index + DPP_STAT_FD_ACL_CNT_ERAM_BAADDR, mode, + p_pkB_cnt, p_pk_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get_128"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_fd_stat_cnt_get); + +ZXIC_UINT32 dpp_stat_asn_phyport_rx_pkt_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_cnt); + + /*对接新固件流程*/ + rc = dpp_stat_item_cnt_get(pf_info, DPP_STAT_ITEM_ASN_PHYPORT_RX_PKTS, + index, mode, &stat_value); + if (rc != DPP_RC_TABLE_SDT_NOT_EXIST) { + *p_cnt = stat_value.stat_cnt_64; + ZXIC_COMM_TRACE_INFO("dpp_stat_item_cnt_get,rc=0x%x\n", rc); + return rc; + } + + /*对接老固件流程*/ + ZXIC_COMM_CHECK_INDEX(index, 0, + DPP_STAT_ASN_PHYPORT_RX_CNT_ERAM_DEPTH - 1); + + rc = dpp_stat_cnt_get(pf_info, + index + DPP_STAT_ASN_PHYPORT_RX_CNT_ERAM_BAADDR, + mode, p_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_asn_phyport_rx_pkt_cnt_get); + +ZXIC_UINT32 dpp_stat_psn_phyport_tx_pkt_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_cnt); + + /*对接新固件流程*/ + rc = dpp_stat_item_cnt_get(pf_info, DPP_STAT_ITEM_PSN_PHYPORT_TX_PKTS, + index, mode, &stat_value); + if (rc != DPP_RC_TABLE_SDT_NOT_EXIST) { + *p_cnt = stat_value.stat_cnt_64; + ZXIC_COMM_TRACE_INFO("dpp_stat_item_cnt_get,rc=0x%x\n", rc); + return rc; + } + + /*对接老固件流程*/ + ZXIC_COMM_CHECK_INDEX(index, 0, + DPP_STAT_PSN_PHYPORT_TX_CNT_ERAM_DEPTH - 1); + + rc = dpp_stat_cnt_get(pf_info, + index + DPP_STAT_PSN_PHYPORT_TX_CNT_ERAM_BAADDR, + mode, p_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_psn_phyport_tx_pkt_cnt_get); + +ZXIC_UINT32 dpp_stat_psn_phyport_rx_pkt_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_cnt); + + /*对接新固件流程*/ + rc = dpp_stat_item_cnt_get(pf_info, DPP_STAT_ITEM_PSN_PHYPORT_RX_PKTS, + index, mode, &stat_value); + if (rc != DPP_RC_TABLE_SDT_NOT_EXIST) { + *p_cnt = stat_value.stat_cnt_64; + ZXIC_COMM_TRACE_INFO("dpp_stat_item_cnt_get,rc=0x%x\n", rc); + return rc; + } + + /*对接老固件流程*/ + ZXIC_COMM_CHECK_INDEX(index, 0, + DPP_STAT_PSN_PHYPORT_RX_CNT_ERAM_DEPTH - 1); + + rc = dpp_stat_cnt_get(pf_info, + index + DPP_STAT_PSN_PHYPORT_RX_CNT_ERAM_BAADDR, + mode, p_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_psn_phyport_rx_pkt_cnt_get); + +ZXIC_UINT32 dpp_stat_psn_ack_phyport_tx_pkt_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_cnt); + + /*对接新固件流程*/ + rc = dpp_stat_item_cnt_get(pf_info, + DPP_STAT_ITEM_PSN_ACK_PHYPORT_TX_PKTS, index, + mode, &stat_value); + if (rc != DPP_RC_TABLE_SDT_NOT_EXIST) { + *p_cnt = stat_value.stat_cnt_64; + ZXIC_COMM_TRACE_INFO("dpp_stat_item_cnt_get,rc=0x%x\n", rc); + return rc; + } + + /*对接老固件流程*/ + ZXIC_COMM_CHECK_INDEX(index, 0, + DPP_STAT_PSN_ACK_PHYPORT_TX_CNT_ERAM_DEPTH - 1); + + rc = dpp_stat_cnt_get( + pf_info, index + DPP_STAT_PSN_ACK_PHYPORT_TX_CNT_ERAM_BAADDR, + mode, p_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_psn_ack_phyport_tx_pkt_cnt_get); + +ZXIC_UINT32 dpp_stat_psn_ack_phyport_rx_pkt_cnt_get(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 index, + ZXIC_UINT32 mode, + ZXIC_UINT64 *p_cnt) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_STAT_VALUE_U stat_value = { 0 }; + + ZXIC_COMM_CHECK_POINT(p_cnt); + + /*对接新固件流程*/ + rc = dpp_stat_item_cnt_get(pf_info, + DPP_STAT_ITEM_PSN_ACK_PHYPORT_RX_PKTS, index, + mode, &stat_value); + if (rc != DPP_RC_TABLE_SDT_NOT_EXIST) { + *p_cnt = stat_value.stat_cnt_64; + ZXIC_COMM_TRACE_INFO("dpp_stat_item_cnt_get,rc=0x%x\n", rc); + return rc; + } + + /*对接老固件流程*/ + ZXIC_COMM_CHECK_INDEX(index, 0, + DPP_STAT_PSN_ACK_PHYPORT_RX_CNT_ERAM_DEPTH - 1); + + rc = dpp_stat_cnt_get( + pf_info, index + DPP_STAT_PSN_ACK_PHYPORT_RX_CNT_ERAM_BAADDR, + mode, p_cnt); + ZXIC_COMM_CHECK_RC(rc, "dpp_stat_cnt_get"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_stat_psn_ack_phyport_rx_pkt_cnt_get); \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_stream.c b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_stream.c new file mode 100644 index 000000000000..691005d99938 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_stream.c @@ -0,0 +1,125 @@ +#include "dpp_drv_init.h" +#include "dpp_drv_eram.h" +#include "dpp_drv_sdt.h" +#include "dpp_dev.h" +#include "dpp_dtb.h" +#include "dpp_dtb_table.h" +#include "dpp_tbl_api.h" + +ZXIC_UINT32 dpp_eram_entry_insert(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 index, ZXIC_UINT8 *p_data) +{ + DPP_DEV_T dev = { 0 }; + DPP_DTB_ERAM_ENTRY_INFO_T eram_entry = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 element_id = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(p_data); + + ZXIC_COMM_MEMSET_S(&eram_entry, sizeof(DPP_DTB_ERAM_ENTRY_INFO_T), 0, + sizeof(DPP_DTB_ERAM_ENTRY_INFO_T)); + eram_entry.index = index; + eram_entry.p_data = (ZXIC_UINT32 *)p_data; + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_dtb_eram_dma_write(&dev, queue, sdt_no, 1, &eram_entry, + &element_id); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_acl_dma_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_eram_entry_insert); + +ZXIC_UINT32 dpp_eram_entry_delete(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 index) +{ + DPP_DEV_T dev = { 0 }; + ZXIC_UINT8 data[DPP_SMMU0_READ_REG_MAX_NUM * 4] = { 0 }; + DPP_DTB_ERAM_ENTRY_INFO_T eram_entry = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 element_id = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_MEMSET_S(&eram_entry, sizeof(DPP_DTB_ERAM_ENTRY_INFO_T), 0, + sizeof(DPP_DTB_ERAM_ENTRY_INFO_T)); + ZXIC_COMM_MEMSET_S(data, sizeof(data), 0, sizeof(data)); + eram_entry.index = index; + eram_entry.p_data = (ZXIC_UINT32 *)data; + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_dtb_eram_dma_write(&dev, queue, sdt_no, 1, &eram_entry, + &element_id); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_acl_dma_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_eram_entry_delete); + +ZXIC_UINT32 dpp_eram_entry_get(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 sdt_no, + ZXIC_UINT32 index, ZXIC_UINT8 *p_data) +{ + DPP_DEV_T dev = { 0 }; + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 rc = DPP_OK; + DPP_DTB_ERAM_ENTRY_INFO_T eram_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(p_data); + + ZXIC_COMM_MEMSET_S(&eram_entry, sizeof(DPP_DTB_ERAM_ENTRY_INFO_T), 0, + sizeof(DPP_DTB_ERAM_ENTRY_INFO_T)); + eram_entry.index = index; + eram_entry.p_data = (ZXIC_UINT32 *)p_data; + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_dtb_eram_data_get(&dev, queue, sdt_no, &eram_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_dtb_acl_dma_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_eram_entry_get); diff --git a/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_tm.c b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_tm.c new file mode 100644 index 000000000000..f7cba9f59339 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_tm.c @@ -0,0 +1,208 @@ +#include "dpp_tbl_tm.h" +#include "dpp_dev.h" +#include "dpp_drv_sdt.h" +#include "dpp_drv_eram.h" +#include "dpp_drv_qos.h" +#include "dpp_dtb.h" +#include "dpp_apt_se_api.h" +#include "dpp_tbl_api.h" + +ZXIC_UINT32 dpp_tm_flowid_pport_table_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 port, ZXIC_UINT32 flow_id) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 attr = UPLINK_PHY_PORT_TM_BASE_QUEUE; + + rc = dpp_uplink_phy_attr_set(pf_info, port, attr, flow_id); + ZXIC_COMM_CHECK_RC(rc, "dpp_uplink_phy_attr_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_tm_flowid_pport_table_set); + +ZXIC_UINT32 dpp_tm_flowid_pport_table_del(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 port) +{ + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_tm_flowid_pport_table_set(pf_info, port, TM_BASE_QUEUE_VALID); + ZXIC_COMM_CHECK_RC(rc, "dpp_tm_flowid_pport_table_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_tm_flowid_pport_table_del); + +ZXIC_UINT32 dpp_tm_pport_trust_mode_table_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 port, + ZXIC_UINT32 mode) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 attr = UPLINK_PHY_PORT_TRUST_MODE; + + rc = dpp_uplink_phy_attr_set(pf_info, port, attr, mode); + ZXIC_COMM_CHECK_RC(rc, "dpp_uplink_phy_attr_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_tm_pport_trust_mode_table_set); + +ZXIC_UINT32 dpp_tm_pport_trust_mode_table_del(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 port) +{ + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_tm_pport_trust_mode_table_set(pf_info, port, TRUST_MODE_VALID); + ZXIC_COMM_CHECK_RC(rc, "dpp_tm_pport_trust_mode_table_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_tm_pport_trust_mode_table_del); + +ZXIC_UINT32 dpp_tm_pport_dscp_map_table_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 port, + ZXIC_UINT32 dscp_id, + ZXIC_UINT32 up_id) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_DSCP_TO_UP_TABLE; + ZXIC_UINT32 index = 0x3ff & ((port << 6) | (dscp_id & 0x3f)); + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_DSCP_TO_UP_T dscp_to_up = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sdt_no: %u index: %u up_id: %u start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, index, + up_id); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + dscp_to_up.hit_flag = 1; + dscp_to_up.up = up_id; + + rc = dpp_apt_dtb_eram_insert(&dev, queue, sdt_no, index, &dscp_to_up); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sdt_no: %u index: %u up_id: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, index, + up_id); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_tm_pport_dscp_map_table_set); + +ZXIC_UINT32 dpp_tm_pport_dscp_map_table_del(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 port, + ZXIC_UINT32 dscp_id) +{ + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_tm_pport_dscp_map_table_set(pf_info, port, dscp_id, UP_VALID); + ZXIC_COMM_CHECK_RC(rc, "dpp_tm_pport_dscp_map_table_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_tm_pport_dscp_map_table_del); + +ZXIC_UINT32 dpp_tm_pport_up_map_table_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 port, ZXIC_UINT32 up_id, + ZXIC_UINT32 tc_id) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_UP_TO_TC_TABLE; + ZXIC_UINT32 index = 0x7F & ((port << 3) | (up_id & 0x7)); + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_UP_TO_TC_T up_to_tc = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sdt_no: %u index: %u tc_id: %u start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, index, + tc_id); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + up_to_tc.hit_flag = 1; + up_to_tc.tc = tc_id; + + rc = dpp_apt_dtb_eram_insert(&dev, queue, sdt_no, index, &up_to_tc); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sdt_no: %u index: %u tc_id: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, index, + tc_id); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_tm_pport_up_map_table_set); + +ZXIC_UINT32 dpp_tm_pport_up_map_table_del(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 port, ZXIC_UINT32 up_id) +{ + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_tm_pport_up_map_table_set(pf_info, port, up_id, TC_VALID); + ZXIC_COMM_CHECK_RC(rc, "dpp_tm_pport_dscp_map_table_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_tm_pport_up_map_table_del); + +ZXIC_UINT32 dpp_tm_pport_mcode_switch_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 port, ZXIC_UINT32 mode) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 attr = UPLINK_PHY_PORT_TM_SHAPE_ENABLE; + + rc = dpp_uplink_phy_attr_set(pf_info, port, attr, mode); + ZXIC_COMM_CHECK_RC(rc, "dpp_uplink_phy_attr_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_tm_pport_mcode_switch_set); + +ZXIC_UINT32 dpp_tm_pport_mcode_switch_del(DPP_PF_INFO_T *pf_info, + ZXIC_UINT32 port) +{ + ZXIC_UINT32 rc = DPP_OK; + + rc = dpp_tm_pport_mcode_switch_set(pf_info, port, TM_SWITCH_OFF); + ZXIC_COMM_CHECK_RC(rc, "dpp_tm_pport_mcode_switch_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_tm_pport_mcode_switch_del); diff --git a/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_uplink.c b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_uplink.c new file mode 100644 index 000000000000..3785924302c7 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_uplink.c @@ -0,0 +1,137 @@ +#include "dpp_drv_init.h" +#include "dpp_drv_acl.h" +#include "dpp_drv_hash.h" +#include "dpp_drv_eram.h" +#include "dpp_drv_sdt.h" +#include "dpp_dev.h" +#include "dpp_dtb.h" +#include "dpp_hash.h" +#include "dpp_dtb_table.h" +#include "dpp_dtb_table_api.h" +#include "dpp_tbl_api.h" +#include "dpp_tbl_comm.h" +#include "dpp_tbl_port.h" +#include "dpp_tbl_uplink.h" +#include "dpp_tbl_diag.h" + +ZXIC_UINT32 dpp_uplink_phy_bond_vport(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 uplink_phy_id) +{ + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + + rc = dpp_vport_attr_set(pf_info, SRIOV_VPORT_UPLINK_PHY_PORT_ID, + uplink_phy_id); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_attr_set"); + + rc = dpp_uplink_phy_attr_set(pf_info, uplink_phy_id, + UPLINK_PHY_PORT_PF_VQM_VFID, + OWNER_PF_VQM_VFID(pf_info->vport)); + ZXIC_COMM_CHECK_RC(rc, "dpp_uplink_phy_attr_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_uplink_phy_bond_vport); + +ZXIC_UINT32 dpp_uplink_phy_hardware_bond_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 uplink_phy_id, + ZXIC_UINT8 enable) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 attr = UPLINK_PHY_PORT_HW_BOND_ENABLE; + + rc = dpp_uplink_phy_attr_set(pf_info, uplink_phy_id, attr, enable); + ZXIC_COMM_CHECK_RC(rc, "dpp_uplink_phy_attr_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_uplink_phy_hardware_bond_set); + +ZXIC_UINT32 dpp_uplink_phy_lacp_pf_vqm_vfid_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 uplink_phy_id, + ZXIC_UINT16 vqm_vfid) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 attr = UPLINK_PHY_PORT_LACP_PF_VQM_VFID; + + rc = dpp_uplink_phy_attr_set(pf_info, uplink_phy_id, attr, vqm_vfid); + ZXIC_COMM_CHECK_RC(rc, "dpp_uplink_phy_attr_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_uplink_phy_lacp_pf_vqm_vfid_set); + +ZXIC_UINT32 dpp_uplink_phy_lacp_pf_memport_qid_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 uplink_phy_id, + ZXIC_UINT16 qid) +{ + ZXIC_UINT32 rc = DPP_OK; + ZXIC_UINT32 attr = UPLINK_PHY_PORT_LACP_PF_MEMPORT_QID; + + rc = dpp_uplink_phy_attr_set(pf_info, uplink_phy_id, attr, qid); + ZXIC_COMM_CHECK_RC(rc, "dpp_uplink_phy_attr_set"); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_uplink_phy_lacp_pf_memport_qid_set); + +ZXIC_UINT32 dpp_uplink_phy_attr_set(DPP_PF_INFO_T *pf_info, + ZXIC_UINT8 uplink_phy_id, ZXIC_UINT32 attr, + ZXIC_UINT32 value) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_UPLINK_PHY_PORT_ATTR_TABLE; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_UPLINK_PHY_PORT_T uplink_phy_port = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_INDEX(attr, 0, + (ZXIC_UINT32)((sizeof(ZXDH_UPLINK_PHY_PORT_T) / + sizeof(ZXIC_UINT32)) - + 1)); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sdt_no: %u index: %u attr: %s(%u) value: %u start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, + uplink_phy_id, dpp_uplink_phy_port_table_attr_name_get(attr), + attr, value); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_eram_get(&dev, queue, sdt_no, uplink_phy_id, + &uplink_phy_port); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_get", + DEV_PCIE_LOCK(&dev)); + + uplink_phy_port.hit_flag = 1; + *((((ZXIC_UINT32 *)(&uplink_phy_port)) + attr)) = value; + + rc = dpp_apt_dtb_eram_insert(&dev, queue, sdt_no, uplink_phy_id, + &uplink_phy_port); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x sdt_no: %u index: %u attr: %s(%u) value: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, + uplink_phy_id, dpp_uplink_phy_port_table_attr_name_get(attr), + attr, value); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_uplink_phy_attr_set); diff --git a/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_vhca.c b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_vhca.c new file mode 100644 index 000000000000..512f2dec00ad --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_vhca.c @@ -0,0 +1,98 @@ +#include "dpp_drv_init.h" +#include "dpp_drv_acl.h" +#include "dpp_drv_hash.h" +#include "dpp_drv_eram.h" +#include "dpp_drv_sdt.h" +#include "dpp_dev.h" +#include "dpp_dtb.h" +#include "dpp_hash.h" +#include "dpp_dtb_table.h" +#include "dpp_dtb_table_api.h" +#include "dpp_tbl_api.h" +#include "dpp_tbl_comm.h" +#include "dpp_tbl_port.h" + +ZXIC_UINT32 dpp_vport_vhca_id_add(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 vhca_id) +{ + ZXIC_UINT32 rc = DPP_OK; + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_VHCA_TABLE; + + ZXDH_VHCA_T vhca_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sdt_no: %u vhca_id: %u start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, vhca_id); + + ZXIC_COMM_MEMSET(&vhca_entry, 0, sizeof(ZXDH_VHCA_T)); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + vhca_entry.valid = 1; + vhca_entry.vqm_vfid = VQM_VFID(pf_info->vport); + + rc = dpp_apt_dtb_eram_insert(&dev, queue, sdt_no, vhca_id, &vhca_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x sdt_no: %u vhca_id: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, vhca_id); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_vhca_id_add); + +ZXIC_UINT32 dpp_vport_vhca_id_del(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 vhca_id) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_VHCA_TABLE; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sdt_no: %u vhca_id: %u start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, vhca_id); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_eram_clear(&dev, queue, sdt_no, vhca_id); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_clear", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x sdt_no: %u vhca_id: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, vhca_id); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vport_vhca_id_del); \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_vlan.c b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_vlan.c new file mode 100644 index 000000000000..99cd83762479 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_vlan.c @@ -0,0 +1,188 @@ +#include "dpp_drv_init.h" +#include "dpp_drv_acl.h" +#include "dpp_drv_hash.h" +#include "dpp_drv_eram.h" +#include "dpp_drv_sdt.h" +#include "dpp_dev.h" +#include "dpp_dtb.h" +#include "dpp_hash.h" +#include "dpp_dtb_table.h" +#include "dpp_dtb_table_api.h" +#include "dpp_tbl_comm.h" +#include "dpp_tbl_vlan.h" + +ZXIC_UINT32 dpp_vlan_filter_init(DPP_PF_INFO_T *pf_info) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_VLAN_FILTER_TABLE; + ZXIC_UINT32 vlan_group_id = 0; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_VLAN_FILTER_T vlan_filter_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + ZXIC_COMM_MEMSET(&vlan_filter_entry, 0, sizeof(ZXDH_VLAN_FILTER_T)); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + vlan_filter_entry.hit_flag = 1; + + for (vlan_group_id = 0; vlan_group_id < VLAN_GROUP_NUM; + vlan_group_id++) { + index = ((vlan_group_id << 11) | (VQM_VFID(pf_info->vport))); + rc = dpp_apt_dtb_eram_insert(&dev, queue, sdt_no, index, + &vlan_filter_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_insert", + DEV_PCIE_LOCK(&dev)); + } + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x success.\n", __FUNCTION__, + pf_info->slot, pf_info->vport); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vlan_filter_init); + +ZXIC_UINT32 dpp_add_vlan_filter(DPP_PF_INFO_T *pf_info, ZXIC_UINT16 vlan_id) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_VLAN_FILTER_TABLE; + + ZXIC_UINT32 vlan_group_id = vlan_id / VLAN_ID_NUM_IN_GROUP; + ZXIC_UINT32 vlan_remainder = vlan_id % VLAN_ID_NUM_IN_GROUP; + + ZXIC_UINT32 index = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_VLAN_FILTER_T vlan_filter_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_INDEX(vlan_id, 0, 4095); + + index = ((vlan_group_id << 11) | (VQM_VFID(pf_info->vport))); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sdt_no: %u index: 0x%04x group_id: %u vlan_id: %u start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, index, + vlan_group_id, vlan_id); + + ZXIC_COMM_MEMSET(&vlan_filter_entry, 0, sizeof(ZXDH_VLAN_FILTER_T)); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_eram_get(&dev, queue, sdt_no, index, + &vlan_filter_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_get", + DEV_PCIE_LOCK(&dev)); + + vlan_filter_entry.hit_flag = 1; + vlan_filter_entry.vport_bitmap[vlan_remainder / 8] |= + 1 << (7 - (vlan_remainder % 8)); + + rc = dpp_apt_dtb_eram_insert(&dev, queue, sdt_no, index, + &vlan_filter_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x sdt_no: %u index: 0x%04x group_id: %u vlan_id: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, index, + vlan_group_id, vlan_id); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_add_vlan_filter); + +ZXIC_UINT32 dpp_del_vlan_filter(DPP_PF_INFO_T *pf_info, ZXIC_UINT16 vlan_id) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_VLAN_FILTER_TABLE; + + ZXIC_UINT32 vlan_group_id = vlan_id / VLAN_ID_NUM_IN_GROUP; + ZXIC_UINT32 vlan_remainder = vlan_id % VLAN_ID_NUM_IN_GROUP; + + ZXIC_UINT32 index = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_VLAN_FILTER_T vlan_filter_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_INDEX(vlan_id, 0, 4095); + + index = ((vlan_group_id << 11) | (VQM_VFID(pf_info->vport))); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sdt_no: %u index: 0x%04x group_id: %u vlan_id: %u start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, index, + vlan_group_id, vlan_id); + + ZXIC_COMM_MEMSET(&vlan_filter_entry, 0, sizeof(ZXDH_VLAN_FILTER_T)); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_eram_get(&dev, queue, sdt_no, index, + &vlan_filter_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_get", + DEV_PCIE_LOCK(&dev)); + + vlan_filter_entry.hit_flag = 1; + vlan_filter_entry.vport_bitmap[vlan_remainder / 8] &= + ~(1 << (7 - (vlan_remainder % 8))); + + rc = dpp_apt_dtb_eram_insert(&dev, queue, sdt_no, index, + &vlan_filter_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x sdt_no: %u index: 0x%04x group_id: %u vlan_id: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, index, + vlan_group_id, vlan_id); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_del_vlan_filter); diff --git a/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_vqm.c b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_vqm.c new file mode 100644 index 000000000000..8ac701fe8ebd --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/table/source/dpp_tbl_vqm.c @@ -0,0 +1,207 @@ +#include "dpp_drv_init.h" +#include "dpp_drv_acl.h" +#include "dpp_drv_hash.h" +#include "dpp_drv_eram.h" +#include "dpp_drv_sdt.h" +#include "dpp_dev.h" +#include "dpp_dtb.h" +#include "dpp_hash.h" +#include "dpp_dtb_table.h" +#include "dpp_dtb_table_api.h" +#include "dpp_tbl_comm.h" +#include "dpp_tbl_diag.h" +#include "dpp_tbl_vqm.h" + +ZXIC_UINT32 dpp_vqm_vfid_vlan_init(DPP_PF_INFO_T *pf_info) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_VQM_VFID_VLAN_ATTR_TABLE; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_VQM_VFID_VLAN_T vqm_vfid_vlan_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport); + + ZXIC_COMM_MEMSET(&vqm_vfid_vlan_entry, 0, sizeof(ZXDH_VQM_VFID_VLAN_T)); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + vqm_vfid_vlan_entry.hit_flag = 1; + index = VQM_VFID(pf_info->vport); + + rc = dpp_apt_dtb_eram_insert(&dev, queue, sdt_no, index, + &vqm_vfid_vlan_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT("[%s] slot: %u vport: 0x%04x success.\n", __FUNCTION__, + pf_info->slot, pf_info->vport); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vqm_vfid_vlan_init); + +ZXIC_UINT32 dpp_vqm_vfid_vlan_delete(DPP_PF_INFO_T *pf_info) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_VQM_VFID_VLAN_ATTR_TABLE; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + + index = VQM_VFID(pf_info->vport); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sdt_no: %u index: %u start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, index); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_eram_clear(&dev, queue, sdt_no, index); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_clear", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x sdt_no: %u index: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, index); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vqm_vfid_vlan_delete); + +ZXIC_UINT32 dpp_vqm_vfid_vlan_set(DPP_PF_INFO_T *pf_info, ZXIC_UINT32 attr, + ZXIC_UINT32 value) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_VQM_VFID_VLAN_ATTR_TABLE; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXDH_VQM_VFID_VLAN_T vqm_vfid_vlan_entry = { 0 }; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_INDEX(attr, 0, + (ZXIC_UINT32)((sizeof(ZXDH_VQM_VFID_VLAN_T) / + sizeof(ZXIC_UINT32)) - + 1)); + + index = VQM_VFID(pf_info->vport); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x sdt_no: %u index: %u attr: %s(%u) value: %u start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, index, + dpp_vqm_vfid_vlan_attr_name_get(attr), attr, value); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_eram_get(&dev, queue, sdt_no, index, + &vqm_vfid_vlan_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_get", + DEV_PCIE_LOCK(&dev)); + + vqm_vfid_vlan_entry.hit_flag = 1; + *((((ZXIC_UINT32 *)(&vqm_vfid_vlan_entry)) + attr)) = value; + + rc = dpp_apt_dtb_eram_insert(&dev, queue, sdt_no, index, + &vqm_vfid_vlan_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_insert", + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_PRINT( + "[%s] slot: %u vport: 0x%04x sdt_no: %u index: %u attr: %s(%u) value: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, sdt_no, index, + dpp_vqm_vfid_vlan_attr_name_get(attr), attr, value); + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vqm_vfid_vlan_set); + +ZXIC_UINT32 dpp_vqm_vfid_vlan_get(DPP_PF_INFO_T *pf_info, + ZXDH_VQM_VFID_VLAN_T *vqm_vfid_vlan_entry) +{ + DPP_DEV_T dev = { 0 }; + + ZXIC_UINT32 queue = 0; + ZXIC_UINT32 sdt_no = ZXDH_SDT_VQM_VFID_VLAN_ATTR_TABLE; + ZXIC_UINT32 index = 0; + ZXIC_UINT32 rc = DPP_OK; + + ZXIC_COMM_CHECK_POINT(pf_info); + ZXIC_COMM_CHECK_POINT(vqm_vfid_vlan_entry); + + index = VQM_VFID(pf_info->vport); + + ZXIC_COMM_TRACE_NOTICE("[%s] slot: %u vport: 0x%04x index: %u start.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, + index); + + rc = dpp_dev_get(pf_info, &dev); + ZXIC_COMM_CHECK_RC(rc, "dpp_dev_get"); + + rc = dpp_dtb_queue_id_get(&dev, &queue); + ZXIC_COMM_CHECK_RC(rc, "dpp_dtb_queue_id_get"); + + rc = dpp_vport_table_lock(pf_info, sdt_no, &DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_lock"); + ZXIC_COMM_CHECK_POINT(DEV_PCIE_LOCK(&dev)); + + rc = dpp_apt_dtb_eram_get(&dev, queue, sdt_no, index, + vqm_vfid_vlan_entry); + ZXIC_COMM_CHECK_RC_UNLOCK(rc, "dpp_apt_dtb_eram_get", + DEV_PCIE_LOCK(&dev)); + ZXIC_COMM_CHECK_INDEX_NOT_EQUAL_UNLOCK(vqm_vfid_vlan_entry->hit_flag, 1, + DEV_PCIE_LOCK(&dev)); + + rc = dpp_vport_table_unlock(pf_info, sdt_no); + ZXIC_COMM_CHECK_RC(rc, "dpp_vport_table_unlock"); + + ZXIC_COMM_TRACE_NOTICE( + "[%s] slot: %u vport: 0x%04x index: %u success.\n", + __FUNCTION__, pf_info->slot, pf_info->vport, index); + + return DPP_OK; +} +EXPORT_SYMBOL(dpp_vqm_vfid_vlan_get); \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_np/tools/dpp_tools.py b/drivers/net/ethernet/dinghai/en_np/tools/dpp_tools.py new file mode 100644 index 000000000000..88040f6ad9d8 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/tools/dpp_tools.py @@ -0,0 +1,136 @@ +# /usr/bin/env python +# -*- coding: utf-8 -*- +import sys +import re +import string +import xlrd +import xlwt +import sys, os.path +import time +from collections import defaultdict + +file_out_path = './c_code/' + +fp_dpp_drv_eram_c = open(file_out_path + 'dpp_drv_eram.c', 'w') +fp_dpp_drv_eram_c.truncate() + +print('#include "dpp_drv_eram.h"', file=fp_dpp_drv_eram_c) +print('', file=fp_dpp_drv_eram_c) + +fp_dpp_drv_eram_h = open(file_out_path + 'dpp_drv_eram.h', 'w') +fp_dpp_drv_eram_h.truncate() + +print('\n#ifndef DPP_DRV_ERAM_H', file=fp_dpp_drv_eram_h) +print('#define DPP_DRV_ERAM_H', file=fp_dpp_drv_eram_h) +print('', file=fp_dpp_drv_eram_h) +print('#include "zxic_common.h"', file=fp_dpp_drv_eram_h) +print('', file=fp_dpp_drv_eram_h) + +def gen_eram_info_c(fp_excel_flie, table_name): + + for i in range(0, len(fp_excel_flie.sheets())): + sheet = fp_excel_flie.sheet_by_index(i) + if (sheet.name != table_name): + continue + + table_name = sheet.cell_value(1, 0) + table_type = sheet.cell_value(1, 1) + table_width = sheet.cell_value(1, 2) + field_bits = sheet.col_values(4) + field_name = sheet.col_values(5) + + if (table_type != '直接表'): + continue + + print('typedef struct zxdh_' + table_name.lower() + '_t', file=fp_dpp_drv_eram_h) + print('{', file=fp_dpp_drv_eram_h) + for value in field_name[1:][::-1]: + if (value.lower() == 'rsv'): + continue + print(' ZXIC_UINT32 ' + value.lower() + ';', file=fp_dpp_drv_eram_h) + print('} ZXDH_' + table_name.upper() + '_T;', file=fp_dpp_drv_eram_h) + + print('ZXIC_UINT32 dpp_apt_set_' + table_name.lower() + '_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4])',\ + file=fp_dpp_drv_eram_c) + print('{', file=fp_dpp_drv_eram_c) + print(' ZXDH_' + table_name.upper() + '_T *attr = (ZXDH_' + table_name.upper() + '_T *)pData;', file=fp_dpp_drv_eram_c) + print('', file=fp_dpp_drv_eram_c) + + for bits, name in zip(field_bits[1:], field_name[1:]): + if (name.lower() == 'rsv'): + continue + + start_bit = int(bits.split(':')[0]) + end_bit = int(bits.split(':')[1]) + + bit_width = start_bit - end_bit + 1 + + byte_index_by_start_bit = 3 - (start_bit // 32) + byte_index_end_bit = 3 - (end_bit // 32) + + start_bit_in_byte = (start_bit % 32) - bit_width + 1 + + if (byte_index_by_start_bit == byte_index_end_bit): + print(f' ZXIC_COMM_UINT32_WRITE_BITS(buff[{byte_index_by_start_bit}], attr->{name.lower()}, {start_bit_in_byte}, {bit_width});', file=fp_dpp_drv_eram_c) + else: + print(f' ZXIC_COMM_UINT32_WRITE_BITS(buff[{byte_index_by_start_bit}], attr->{name.lower()} >> {bit_width - (start_bit % 32 + 1)}, {0}, {(start_bit % 32 + 1)});', file=fp_dpp_drv_eram_c) + print(f' ZXIC_COMM_UINT32_WRITE_BITS(buff[{byte_index_end_bit}], attr->{name.lower()}, {end_bit % 32}, {32 - start_bit_in_byte % 32});', file=fp_dpp_drv_eram_c) + + print('', file=fp_dpp_drv_eram_c) + print(' return DPP_OK;', file=fp_dpp_drv_eram_c) + print('}', file=fp_dpp_drv_eram_c) + print('', file=fp_dpp_drv_eram_c) + + + print('ZXIC_UINT32 dpp_apt_get_' + table_name.lower() + '_data(ZXIC_VOID *pData, ZXIC_UINT32 buff[4])',\ + file=fp_dpp_drv_eram_c) + print('{', file=fp_dpp_drv_eram_c) + print(' ZXDH_' + table_name.upper() + '_T *attr = (ZXDH_' + table_name.upper() + '_T *)pData;', file=fp_dpp_drv_eram_c) + print('', file=fp_dpp_drv_eram_c) + + for bits, name in zip(field_bits[1:], field_name[1:]): + if (name.lower() == 'rsv'): + continue + + start_bit = int(bits.split(':')[0]) + end_bit = int(bits.split(':')[1]) + + bit_width = start_bit - end_bit + 1 + + byte_index_by_start_bit = 3 - (start_bit // 32) + byte_index_end_bit = 3 - (end_bit // 32) + + start_bit_in_byte = (start_bit % 32) - bit_width + 1 + + if (byte_index_by_start_bit == byte_index_end_bit): + print(f' ZXIC_COMM_UINT32_GET_BITS(attr->{name.lower()}, buff[{byte_index_by_start_bit}], {start_bit_in_byte}, {bit_width});', file=fp_dpp_drv_eram_c) + else: + print(f' ZXIC_COMM_UINT32_GET_BITS(attr->{name.lower()} >> {bit_width - (start_bit % 32 + 1)}, buff[{byte_index_by_start_bit}], {0}, {(start_bit % 32 + 1)});', file=fp_dpp_drv_eram_c) + print(f' ZXIC_COMM_UINT32_GET_BITS(attr->{name.lower()}, buff[{byte_index_end_bit}], {end_bit % 32}, {32 - start_bit_in_byte % 32});', file=fp_dpp_drv_eram_c) + + print('', file=fp_dpp_drv_eram_c) + print(' return DPP_OK;', file=fp_dpp_drv_eram_c) + print('}', file=fp_dpp_drv_eram_c) + + +file_name = "标卡表项定义说明.xlsx" +table_name = sys.argv[1] + +# 脚本入口 +print("%s 数据结构及码流转换接口编码中..."% table_name) + +if not os.path.isfile(file_name): + print('Error: %s 文件不存在' % (file_name)) + sys.exit() + +fp_excel_flie = xlrd.open_workbook(file_name) +gen_eram_info_c(fp_excel_flie, table_name) + +print('', file=fp_dpp_drv_eram_h) +print('#endif', file=fp_dpp_drv_eram_h) + + +fp_dpp_drv_eram_c.close() +fp_dpp_drv_eram_h.close() + +print("%s 数据结构及码流转换接口编码完成!"% table_name) diff --git a/drivers/net/ethernet/dinghai/en_np/tools/readme.txt b/drivers/net/ethernet/dinghai/en_np/tools/readme.txt new file mode 100644 index 000000000000..55d29b72fd4f --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_np/tools/readme.txt @@ -0,0 +1,2 @@ + +/usr/bin/python3.6 dpp_tools.py 端口属性表 diff --git a/drivers/net/ethernet/dinghai/en_pf.c b/drivers/net/ethernet/dinghai/en_pf.c new file mode 100644 index 000000000000..d835f7eaccb3 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_pf.c @@ -0,0 +1,4339 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "en_pf.h" +#include "./en_pf/en_pf_irq.h" +#include "./en_pf/en_pf_eq.h" +#include "./en_pf/en_pf_events.h" +#include "en_aux.h" +#include "en_sf.h" +#include "en_np/init/include/dpp_np_init.h" +#include "en_pf/msg_func.h" +#include "msg_common.h" +#include "slib.h" +#include "en_aux/en_aux_events.h" + +#ifdef CONFIG_ZXDH_SF +#include +#endif + +#ifdef DRIVER_VERSION_VAL +#define DRV_VERSION DRIVER_VERSION_VAL +#else +#define DRV_VERSION "1.0-1" +#endif + +#define DRV_SUMMARY "ZTE(R) zxdh-net driver" + +const char zxdh_pf_driver_version[] = DRV_VERSION; +static const char zxdh_pf_driver_string[] = DRV_SUMMARY; +static const char zxdh_pf_copyright[] = + "Copyright (c) 2022-23, ZTE Corporation."; + +MODULE_AUTHOR("ZTE Corporation"); +MODULE_DESCRIPTION(DRV_SUMMARY); +MODULE_VERSION(DRV_VERSION); +MODULE_LICENSE("Dual BSD/GPL"); + +uint32_t dh_debug_mask; +struct slot_id_array dh_slot[DPP_PCIE_SLOT_MAX]; +module_param_named(debug_mask, dh_debug_mask, uint, 0644); +MODULE_PARM_DESC( + debug_mask, + "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0"); +static bool probe_vf = 1; +module_param(probe_vf, bool, 0644); +MODULE_PARM_DESC(probe_vf, "probe_vf: 0 = N, 1 = Y"); + +static const struct pci_device_id dh_pf_pci_table[] = { + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_PF_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_VF_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_BSI_VENDOR_ID, ZXDH_PF_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_BSI_VENDOR_ID, ZXDH_VF_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_INICA_BOND_DEVICE_ID), + 0 }, /* bond */ + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_INICB_BOND_DEVICE_ID), + 0 }, /* bond */ + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_INICC_BOND_DEVICE_ID), + 0 }, /* bond */ + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_DPUA_BOND_DEVICE_ID), + 0 }, /* bond */ + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_INICA_UPF_BOND_DEVICE_ID), + 0 }, /* bond */ + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_PF_E310_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_VF_E310_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_PF_E310_CMCC_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_VF_E310_CMCC_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_PF_E312_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_VF_E312_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_PF_DPUB_NOF_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_PF_DPUB_PF_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_PF_DPUB_INITIATOR1_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_PF_DPUB_INITIATOR2_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_PF_DPUB_RDMA_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_VF_DPUB_RDMA_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_UPF_PF_I512_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_UPF_VF_I512_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_INICA_RDMA_PF_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_INICA_RDMA_VF_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_PF_E316_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_VF_E316_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_E316_XPU_VENDER_ID, ZXDH_PF_E316_XPU_DEVICE_ID), + 0 }, + { PCI_DEVICE(ZXDH_PF_E316_XPU_VENDER_ID, ZXDH_VF_E316_XPU_DEVICE_ID), + 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_PF_E311_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_VF_E311_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_PF_I511_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_VF_I511_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_INICD_BOND0_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_INICD_BOND1_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_INICD_NE0_PF_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_INICD_NE0_VF_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_INICD_NE1_PF_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_INICD_NE1_VF_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_INICD_NE2_PF_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_INICD_NE2_VF_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_PF_E310_RDMA_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_VF_E310_RDMA_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_PF_E310S_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_VF_E310S_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_PF_E312S_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_VF_E312S_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_PF_DPUB_SRIOV0_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_PF_DPUB_SRIOV1_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_PF_I510_SRIOV_SEC_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_VF_I510_SRIOV_SEC_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_PF_E312_RDMA_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_VF_E312_RDMA_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_PF_INICA_OFFLOAD_DEVICE_ID), 0 }, + { PCI_DEVICE(CTC_PF_VENDOR_ID, CTC_PF_B512Y_DEVICE_ID), 0 }, + { PCI_DEVICE(CTC_PF_VENDOR_ID, CTC_VF_B512Y_DEVICE_ID), 0 }, + { PCI_DEVICE(CTC_PF_VENDOR_ID, CTC_PF_B522Y_DEVICE_ID), 0 }, + { PCI_DEVICE(CTC_PF_VENDOR_ID, CTC_VF_B522Y_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_PF_E312S_D_DEVICE_ID), 0 }, + { PCI_DEVICE(ZXDH_PF_VENDOR_ID, ZXDH_VF_E312S_D_DEVICE_ID), 0 }, + { + 0, + } +}; + +MODULE_DEVICE_TABLE(pci, dh_pf_pci_table); + +extern struct devlink_ops dh_pf_devlink_ops; +extern struct dh_core_devlink_ops dh_pf_core_devlink_ops; + +#ifdef PTP_DRIVER_INTERFACE_EN +int zxdh_ptp_init(struct dh_core_dev *zxdev); +void zxdh_ptp_stop(struct dh_core_dev *zxdev); +#endif + +/** + * @brief Set the rp cpl timeout mask status object + * + * @param pdev: PF的pci_dev + * @param status: 0为unmask,其余为mask + * @return int32_t 0为正常,其余异常 + */ +static int32_t set_rp_cpl_timeout_mask_status(struct pci_dev *pdev, + uint32_t status) +{ + struct pci_dev *rp_dev = NULL; + int aer = 0; + u32 data = 0; + + rp_dev = pcie_find_root_port(pdev); + if (!rp_dev) { + LOG_ERR("Can not find RP\n"); + return -ENODEV; + } + + aer = pci_find_ext_capability(rp_dev, PCI_EXT_CAP_ID_ERR); + if (!aer) { + LOG_ERR("Can not find RP AER CAP\n"); + return -ENXIO; + } + + pci_read_config_dword(rp_dev, aer + PCI_ERR_UNCOR_MASK, &data); + + if (status) { + data |= PCI_ERR_UNC_COMP_TIME; + } else { + data &= ~PCI_ERR_UNC_COMP_TIME; + } + + pci_write_config_dword(rp_dev, aer + PCI_ERR_UNCOR_MASK, data); + + return 0; +} + +static int32_t zxdh_pf_set_cpl_timeout_mask(struct dh_core_dev *dev, + uint32_t mask) +{ + return set_rp_cpl_timeout_mask_status(dev->pdev, mask); +} + +/** + * @brief Get the rp cpl timeout mask status object + * + * @param pdev: PF的pci_dev + * @return int32_t 1为mask,0为unmask,其余为获取状态失败 + */ +static int32_t get_rp_cpl_timeout_mask_status(struct pci_dev *pdev) +{ + struct pci_dev *rp_dev = NULL; + int aer = 0; + u32 data = 0; + + rp_dev = pcie_find_root_port(pdev); + if (!rp_dev) { + LOG_ERR("Can not find RP\n"); + return -ENODEV; + } + + aer = pci_find_ext_capability(rp_dev, PCI_EXT_CAP_ID_ERR); + if (!aer) { + LOG_ERR("Can not find RP AER CAP\n"); + return -ENXIO; + } + + pci_read_config_dword(rp_dev, aer + PCI_ERR_UNCOR_MASK, &data); + + return (data & PCI_ERR_UNC_COMP_TIME) ? 1 : 0; +} + +static int32_t zxdh_pf_get_cpl_timeout_if_mask(struct dh_core_dev *dev) +{ + return get_rp_cpl_timeout_mask_status(dev->pdev); +} + +/** + * @brief Set the rp hp irq ctrl object + * + * @param pdev: PF的pci_dev + * @param status: 0为失能,其余为使能 + * @return int32_t 0为正常,其余异常 + */ +int32_t set_rp_hp_irq_ctrl(struct pci_dev *pdev, uint32_t status) +{ + struct pci_dev *rp_dev = NULL; + int express = 0; + u32 data = 0; + + rp_dev = pcie_find_root_port(pdev); + if (!rp_dev) { + LOG_ERR("Can not find RP\n"); + return -ENODEV; + } + + express = pci_find_capability(rp_dev, PCI_CAP_ID_EXP); + if (!express) { + LOG_ERR("Can not find RP EXPRESS CAP\n"); + return -ENXIO; + } + + pci_read_config_dword(rp_dev, express + PCI_EXP_SLTCTL, &data); + + if (status) { + data |= PCI_EXP_SLTCTL_HPIE; + } else { + data &= ~PCI_EXP_SLTCTL_HPIE; + } + + pci_write_config_dword(rp_dev, express + PCI_EXP_SLTCTL, data); + + return 0; +} + +int32_t zxdh_pf_set_hp_irq_ctrl_status(struct dh_core_dev *dev, uint32_t status) +{ + return set_rp_hp_irq_ctrl(dev->pdev, status); +} + +/** + * @brief Get the rp hp irq ctrl status object + * + * @param pdev: PF的pci_dev + * @return int32_t 1为使能,0为失能,其余为获取状态失败 + */ +int32_t get_rp_hp_irq_ctrl_status(struct pci_dev *pdev) +{ + struct pci_dev *rp_dev = NULL; + u32 data = 0; + int express = 0; + + rp_dev = pcie_find_root_port(pdev); + if (!rp_dev) { + LOG_ERR("Can not find RP\n"); + return -ENODEV; + } + + express = pci_find_capability(rp_dev, PCI_CAP_ID_EXP); + if (!express) { + LOG_ERR("Can not find RP EXPRESS CAP\n"); + return -ENXIO; + } + + pci_read_config_dword(rp_dev, express + PCI_EXP_SLTCTL, &data); + + return (data & PCI_EXP_SLTCTL_HPIE) ? 1 : 0; +} + +int32_t zxdh_pf_get_hp_irq_ctrl_status(struct dh_core_dev *dev) +{ + return get_rp_hp_irq_ctrl_status(dev->pdev); +} + +int32_t zxdh_pf_rp_config_init(struct dh_core_dev *dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dev); + int32_t err = 0; + + err = zxdh_pf_pcie_config_store(dev); + if (err) + return err; + + zxdh_pf_set_cpl_timeout_mask(dev, 1); + + if (pf_dev->board_type == DH_STD_E312S || + pf_dev->board_type == DH_STD_E312S_D) { + LOG_INFO("hp_irq no change\n"); + return 0; + } + + zxdh_pf_set_hp_irq_ctrl_status(dev, 0); + return 0; +} + +static bool zxdh_pf_get_rp_link_status(struct dh_core_dev *dev) +{ + struct pci_dev *rp_dev = NULL; + int pcie_cap = 0; + u16 data = 0; + + rp_dev = pcie_find_root_port(dev->pdev); + if (!rp_dev) { + LOG_ERR("Can not find RP\n"); + return -ENODEV; + } + + pcie_cap = pci_find_capability(rp_dev, PCI_CAP_ID_EXP); + if (!pcie_cap) { + LOG_ERR("Can not find PCI Express CAP\n"); + return -ENXIO; + } + + pci_read_config_word(rp_dev, pcie_cap + PCI_EXP_LNKSTA, &data); + return (data & PCI_EXP_LNKSTA_DLLLA) ? true : false; +} + +static bool zxdh_pf_get_upstream_port_link_status(struct dh_core_dev *dev) +{ + struct pci_dev *up_stream_dev = NULL; + int pcie_cap = 0; + u16 data = 0; + + up_stream_dev = pci_upstream_bridge(dev->pdev); + if (!up_stream_dev) { + LOG_ERR("Can not find RP\n"); + return -ENODEV; + } + pcie_cap = pci_find_capability(up_stream_dev, PCI_CAP_ID_EXP); + if (!pcie_cap) { + LOG_ERR("Can not find PCI Express CAP\n"); + return -ENXIO; + } + pci_read_config_word(up_stream_dev, pcie_cap + PCI_EXP_LNKSTA, &data); + return (data & PCI_EXP_LNKSTA_DLLLA) ? true : false; +} + +static bool zxdh_pf_check_remove_state(struct dh_core_dev *dev) +{ + if (!zxdh_pf_get_rp_link_status(dev)) + return false; + + return zxdh_pf_get_upstream_port_link_status(dev); +} + +int32_t dh_pf_pci_init(struct dh_core_dev *dev) +{ + int32_t ret = 0; + struct zxdh_pf_device *pf_dev = NULL; + + pci_set_drvdata(dev->pdev, dev); + + ret = pci_enable_device(dev->pdev); + if (ret != 0) { + LOG_ERR("pci_enable_device failed: %d\n", ret); + return -ENOMEM; + } + + ret = dma_set_mask_and_coherent(dev->device, DMA_BIT_MASK(64)); + if (ret != 0) { + ret = dma_set_mask_and_coherent(dev->device, DMA_BIT_MASK(32)); + if (ret != 0) { + LOG_ERR("dma_set_mask_and_coherent failed: %d\n", ret); + goto err_pci; + } + } + + ret = pci_request_selected_regions( + dev->pdev, pci_select_bars(dev->pdev, IORESOURCE_MEM), "dh-pf"); + if (ret != 0) { + LOG_ERR("pci_request_selected_regions failed: %d\n", ret); + goto err_pci; + } + + pci_enable_pcie_error_reporting(dev->pdev); + pci_set_master(dev->pdev); + ret = pci_save_state(dev->pdev); + if (ret != 0) { + LOG_ERR("pci_save_state failed: %d\n", ret); + goto err_pci_save_state; + } + + pf_dev = dh_core_priv(dev); + pf_dev->pci_ioremap_addr[0] = + (uint64_t)(uintptr_t)ioremap(pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0)); + if (pf_dev->pci_ioremap_addr[0] == 0) { + ret = -1; + LOG_ERR("ioremap(0x%llx, 0x%llx) failed\n", + (unsigned long long)pci_resource_start(dev->pdev, 0), + (unsigned long long)pci_resource_len(dev->pdev, 0)); + goto err_pci_save_state; + } + + return 0; + +err_pci_save_state: + pci_disable_pcie_error_reporting(dev->pdev); + pci_release_selected_regions( + dev->pdev, pci_select_bars(dev->pdev, IORESOURCE_MEM)); +err_pci: + pci_disable_device(dev->pdev); + return ret; +} + +void dh_pf_pci_close(struct dh_core_dev *dev) +{ + struct zxdh_pf_device *pf_dev = NULL; + + pf_dev = dh_core_priv(dev); + iounmap((void *)(uintptr_t)pf_dev->pci_ioremap_addr[0]); + pci_disable_pcie_error_reporting(dev->pdev); + pci_release_selected_regions( + dev->pdev, pci_select_bars(dev->pdev, IORESOURCE_MEM)); + pci_disable_device(dev->pdev); + + return; +} + +int32_t zxdh_pf_pci_find_capability(struct pci_dev *pdev, uint8_t cfg_type, + uint32_t ioresource_types, int32_t *bars) +{ + int32_t pos = 0; + uint8_t type = 0; + uint8_t bar = 0; + + for (pos = pci_find_capability(pdev, PCI_CAP_ID_VNDR); pos > 0; + pos = pci_find_next_capability(pdev, pos, PCI_CAP_ID_VNDR)) { + pci_read_config_byte( + pdev, pos + offsetof(struct zxdh_pf_pci_cap, cfg_type), + &type); + pci_read_config_byte( + pdev, pos + offsetof(struct zxdh_pf_pci_cap, bar), + &bar); + + /* ignore structures with reserved BAR values */ + if (bar > ZXDH_PF_MAX_BAR_VAL) { + continue; + } + + if (type == cfg_type) { + if (pci_resource_len(pdev, bar) && + pci_resource_flags(pdev, bar) & ioresource_types) { + *bars |= (1 << bar); + return pos; + } + } + } + + return 0; +} + +void __iomem *zxdh_pf_map_capability(struct dh_core_dev *dh_dev, int32_t off, + size_t minlen, uint32_t align, + uint32_t start, uint32_t size, size_t *len, + resource_size_t *pa, uint32_t *bar_off) +{ + struct pci_dev *pdev = dh_dev->pdev; + uint8_t bar = 0; + uint32_t offset = 0; + uint32_t length = 0; + void __iomem *p = NULL; + + pci_read_config_byte(pdev, off + offsetof(struct zxdh_pf_pci_cap, bar), + &bar); + pci_read_config_dword( + pdev, off + offsetof(struct zxdh_pf_pci_cap, offset), &offset); + pci_read_config_dword( + pdev, off + offsetof(struct zxdh_pf_pci_cap, length), &length); + + if (bar_off != NULL) { + *bar_off = offset; + } + + if (length <= start) { + LOG_ERR("bad capability len %u (>%u expected)\n", length, + start); + return NULL; + } + + if (length - start < minlen) { + LOG_ERR("bad capability len %u (>=%zu expected)\n", length, + minlen); + return NULL; + } + + length -= start; + if (start + offset < offset) { + LOG_ERR("map wrap-around %u+%u\n", start, offset); + return NULL; + } + + offset += start; + if (offset & (align - 1)) { + LOG_ERR("offset %u not aligned to %u\n", offset, align); + return NULL; + } + + if (length > size) { + length = size; + } + + if (len) { + *len = length; + } + + if (minlen + offset < minlen || + minlen + offset > pci_resource_len(pdev, bar)) { + LOG_ERR("map custom queue %zu@%u " + "out of range on bar %i length %lu\n", + minlen, offset, bar, + (unsigned long)pci_resource_len(pdev, bar)); + return NULL; + } + + p = pci_iomap_range(pdev, bar, offset, length); + if (unlikely(p == NULL)) { + LOG_ERR("unable to map custom queue %u@%u on bar %i\n", length, + offset, bar); + } else if (pa) { + *pa = pci_resource_start(pdev, bar) + offset; + } + + return p; +} + +int32_t zxdh_pf_common_cfg_init(struct dh_core_dev *dh_dev) +{ + int32_t common = 0; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct pci_dev *pdev = dh_dev->pdev; + + /* check for a common config: if not, use legacy mode (bar 0). */ + common = zxdh_pf_pci_find_capability(pdev, ZXDH_PCI_CAP_COMMON_CFG, + IORESOURCE_IO | IORESOURCE_MEM, + &pf_dev->modern_bars); + if (common == 0) { + LOG_ERR("missing capabilities %i, leaving for legacy driver\n", + common); + return -ENODEV; + } + + pf_dev->common = zxdh_pf_map_capability( + dh_dev, common, sizeof(struct zxdh_pf_pci_common_cfg), + ZXDH_PF_ALIGN4, 0, sizeof(struct zxdh_pf_pci_common_cfg), NULL, + NULL, NULL); + if (unlikely(pf_dev->common == NULL)) { + LOG_ERR("pf_dev->common is null\n"); + return -EINVAL; + } + + return 0; +} + +int32_t zxdh_pf_notify_cfg_init(struct dh_core_dev *dh_dev) +{ + int32_t notify = 0; + uint32_t notify_length = 0; + uint32_t notify_offset = 0; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct pci_dev *pdev = dh_dev->pdev; + + /* If common is there, these should be too... */ + notify = zxdh_pf_pci_find_capability(pdev, ZXDH_PCI_CAP_NOTIFY_CFG, + IORESOURCE_IO | IORESOURCE_MEM, + &pf_dev->modern_bars); + if (notify == 0) { + LOG_ERR("missing capabilities %i\n", notify); + return -EINVAL; + } + + pci_read_config_dword(pdev, + notify + offsetof(struct zxdh_pf_pci_notify_cap, + notify_off_multiplier), + &pf_dev->notify_offset_multiplier); + pci_read_config_dword(pdev, + notify + offsetof(struct zxdh_pf_pci_notify_cap, + cap.length), + ¬ify_length); + pci_read_config_dword(pdev, + notify + offsetof(struct zxdh_pf_pci_notify_cap, + cap.offset), + ¬ify_offset); + + /* We don't know how many VQs we'll map, ahead of the time. + * If notify length is small, map it all now. Otherwise, map each VQ individually later. */ + if ((uint64_t)notify_length + (notify_offset % PAGE_SIZE) <= + PAGE_SIZE) { + pf_dev->notify_base = zxdh_pf_map_capability( + dh_dev, notify, ZXDH_PF_MAP_MINLEN2, ZXDH_PF_ALIGN2, 0, + notify_length, &pf_dev->notify_len, &pf_dev->notify_pa, + NULL); + if (unlikely(pf_dev->notify_base == NULL)) { + LOG_ERR("pf_dev->notify_base is null\n"); + return -EINVAL; + } + } else { + pf_dev->notify_map_cap = notify; + } + + return 0; +} + +int32_t zxdh_pf_device_cfg_init(struct dh_core_dev *dh_dev) +{ + int32_t device = 0; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct pci_dev *pdev = dh_dev->pdev; + + /* Device capability is only mandatory for devices that have device-specific configuration. */ + device = zxdh_pf_pci_find_capability(pdev, ZXDH_PCI_CAP_DEVICE_CFG, + IORESOURCE_IO | IORESOURCE_MEM, + &pf_dev->modern_bars); + + /* we don't know how much we should map, but PAGE_SIZE is more than enough for all existing devices. */ + if (device) { + pf_dev->device = zxdh_pf_map_capability( + dh_dev, device, 0, ZXDH_PF_ALIGN4, 0, PAGE_SIZE, + &pf_dev->device_len, NULL, &pf_dev->dev_cfg_bar_off); + if (unlikely(pf_dev->device == NULL)) { + LOG_ERR("pf_dev->device is null\n"); + return -EINVAL; + } + } + return 0; +} + +void zxdh_pf_modern_cfg_uninit(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct pci_dev *pdev = dh_dev->pdev; + + if (pf_dev->device) { + pci_iounmap(pdev, pf_dev->device); + } + if (pf_dev->notify_base) { + pci_iounmap(pdev, pf_dev->notify_base); + } + pci_iounmap(pdev, pf_dev->common); +} + +int32_t zxdh_pf_modern_cfg_init(struct dh_core_dev *dh_dev) +{ + int32_t ret = 0; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct pci_dev *pdev = dh_dev->pdev; + + ret = zxdh_pf_common_cfg_init(dh_dev); + if (ret != 0) { + LOG_ERR("zxdh_pf_common_cfg_init failed: %d\n", ret); + return -EINVAL; + } + + ret = zxdh_pf_notify_cfg_init(dh_dev); + if (ret != 0) { + LOG_ERR("zxdh_pf_notify_cfg_init failed: %d\n", ret); + goto err_map_notify; + } + + ret = zxdh_pf_device_cfg_init(dh_dev); + if (ret != 0) { + LOG_ERR("zxdh_pf_device_cfg_init failed: %d\n", ret); + goto err_map_device; + } + + return 0; + +err_map_device: + if (pf_dev->notify_base) { + pci_iounmap(pdev, pf_dev->notify_base); + } +err_map_notify: + pci_iounmap(pdev, pf_dev->common); + return -EINVAL; +} + +uint16_t zxdh_pf_get_queue_notify_off(struct dh_core_dev *dh_dev, + uint16_t index) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + iowrite16(index, &pf_dev->common->queue_select); + + return ioread16(&pf_dev->common->queue_notify_off); +} + +void __iomem *zxdh_pf_map_vq_notify(struct dh_core_dev *dh_dev, uint32_t index, + resource_size_t *pa) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + uint16_t off = 0; + + off = zxdh_pf_get_queue_notify_off(dh_dev, index); + + if (pf_dev->notify_base) { + /* offset should not wrap */ + if ((uint64_t)off * pf_dev->notify_offset_multiplier + 2 > + pf_dev->notify_len) { + LOG_ERR("bad notification offset %u (x %u) " + "for queue %u > %zd", + off, pf_dev->notify_offset_multiplier, index, + pf_dev->notify_len); + return NULL; + } + + if (pa) { + *pa = pf_dev->notify_pa + + off * pf_dev->notify_offset_multiplier; + } + + return pf_dev->notify_base + + off * pf_dev->notify_offset_multiplier; + } else { + return zxdh_pf_map_capability( + dh_dev, pf_dev->notify_map_cap, 2, 2, + off * pf_dev->notify_offset_multiplier, 2, NULL, pa, + NULL); + } +} + +void zxdh_pf_unmap_vq_notify(struct dh_core_dev *dh_dev, void *priv) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + if (!pf_dev->notify_base) { + pci_iounmap(dh_dev->pdev, priv); + } +} + +void zxdh_pf_set_status(struct dh_core_dev *dh_dev, uint8_t status) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + iowrite8(status, &pf_dev->common->device_status); + + return; +} + +uint8_t zxdh_pf_get_status(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + return ioread8(&pf_dev->common->device_status); +} + +static uint8_t zxdh_pf_get_cfg_gen(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + uint8_t config_generation = 0; + + config_generation = ioread8(&pf_dev->common->config_generation); + LOG_INFO("config_generation is %d\n", config_generation); + + return config_generation; +} + +static uint8_t zxdh_pf_wait_bar_ok(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + uint8_t config_generation = 0; + uint8_t i = 0; + + for (i = 0; i < 20; i++) { + config_generation = ioread8(&pf_dev->common->config_generation); + + if (!config_generation) { + LOG_INFO("wait %ds, config_generation is %d\n", i, + config_generation); + return 0; + } + + msleep(1000); + } + + return -ETIMEDOUT; +} + +void zxdh_pf_get_vf_mac(struct dh_core_dev *dh_dev, uint8_t *mac, int32_t vf_id) +{ + uint32_t DEV_MAC_L = 0; + uint16_t DEV_MAC_H = 0; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + if (pf_dev->pf_sriov_cap_base) { + DEV_MAC_L = ioread32( + (void __iomem *)(pf_dev->pf_sriov_cap_base + + (pf_dev->sriov_bar_size) * vf_id + + pf_dev->dev_cfg_bar_off)); + mac[0] = DEV_MAC_L & 0xff; + mac[1] = (DEV_MAC_L >> 8) & 0xff; + mac[2] = (DEV_MAC_L >> 16) & 0xff; + mac[3] = (DEV_MAC_L >> 24) & 0xff; + DEV_MAC_H = ioread16( + (void __iomem *)(pf_dev->pf_sriov_cap_base + + (pf_dev->sriov_bar_size) * vf_id + + pf_dev->dev_cfg_bar_off + + ZXDH_DEV_MAC_HIGH_OFFSET)); + mac[4] = DEV_MAC_H & 0xff; + mac[5] = (DEV_MAC_H >> 8) & 0xff; + } + return; +} + +void zxdh_pf_set_vf_mac_reg(struct zxdh_pf_device *pf_dev, uint8_t *mac, + int32_t vf_id) +{ + uint32_t DEV_MAC_L = 0; + uint16_t DEV_MAC_H = 0; + + if (pf_dev->pf_sriov_cap_base) { + DEV_MAC_L = mac[0] | (mac[1] << 8) | (mac[2] << 16) | + (mac[3] << 24); + DEV_MAC_H = mac[4] | (mac[5] << 8); + iowrite32(DEV_MAC_L, + (void __iomem *)(pf_dev->pf_sriov_cap_base + + (pf_dev->sriov_bar_size) * vf_id + + pf_dev->dev_cfg_bar_off)); + iowrite16(DEV_MAC_H, + (void __iomem *)(pf_dev->pf_sriov_cap_base + + (pf_dev->sriov_bar_size) * vf_id + + pf_dev->dev_cfg_bar_off + + ZXDH_DEV_MAC_HIGH_OFFSET)); + } + return; +} + +void zxdh_pf_set_vf_mac(struct dh_core_dev *dh_dev, uint8_t *mac, int32_t vf_id) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + zxdh_pf_set_vf_mac_reg(pf_dev, mac, vf_id); + return; +} + +void zxdh_set_mac(struct dh_core_dev *dh_dev, uint8_t *mac) +{ + uint32_t DEV_MAC_L = 0; + uint16_t DEV_MAC_H = 0; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + DEV_MAC_L = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24); + DEV_MAC_H = mac[4] | (mac[5] << 8); + iowrite32(DEV_MAC_L, pf_dev->device); + iowrite16(DEV_MAC_H, (void __iomem *)((uint8_t *)pf_dev->device + + ZXDH_DEV_MAC_HIGH_OFFSET)); + return; +} + +void zxdh_get_mac(struct dh_core_dev *dh_dev, uint8_t *mac) +{ + uint32_t DEV_MAC_L = 0; + uint16_t DEV_MAC_H = 0; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + DEV_MAC_L = ioread32(pf_dev->device); + mac[0] = DEV_MAC_L & 0xff; + mac[1] = (DEV_MAC_L >> 8) & 0xff; + mac[2] = (DEV_MAC_L >> 16) & 0xff; + mac[3] = (DEV_MAC_L >> 24) & 0xff; + DEV_MAC_H = ioread16((void __iomem *)((uint8_t *)pf_dev->device + + ZXDH_DEV_MAC_HIGH_OFFSET)); + mac[4] = DEV_MAC_H & 0xff; + mac[5] = (DEV_MAC_H >> 8) & 0xff; + return; +} + +uint64_t zxdh_pf_get_features(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + uint64_t device_feature = 0; + + iowrite32(0, &pf_dev->common->device_feature_select); + device_feature = ioread32(&pf_dev->common->device_feature); + iowrite32(1, &pf_dev->common->device_feature_select); + device_feature |= + ((uint64_t)ioread32(&pf_dev->common->device_feature) << 32); + + return device_feature; +} + +void zxdh_pf_set_features(struct dh_core_dev *dh_dev, uint64_t features) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + iowrite32(0, &pf_dev->common->guest_feature_select); + iowrite32((uint32_t)features, &pf_dev->common->guest_feature); + iowrite32(1, &pf_dev->common->guest_feature_select); + iowrite32(features >> 32, &pf_dev->common->guest_feature); + + return; +} + +void zxdh_pf_set_queue_enable(struct dh_core_dev *dh_dev, uint16_t index, + bool enable) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + iowrite16(index, &pf_dev->common->queue_select); + iowrite16(enable, &pf_dev->common->queue_enable); +} + +uint16_t zxdh_pf_get_epbdf(struct dh_core_dev *dh_dev) +{ + struct pci_dev *pdev = dh_dev->pdev; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + uint32_t domain = 0; + uint32_t bus = 0; + uint32_t devid = 0; + uint32_t function = 0; + + if (pdev == NULL) { + LOG_ERR("err: pdev null, return epbdf data 0.\n"); + return 0; + } + + if (sscanf(pci_name(pdev), "%x:%x:%x.%u", &domain, &bus, &devid, + &function) != 4) { + LOG_ERR("failed to get pcie bus-info\n"); + return 0; + } + pf_dev->epbdf = BDF_ECAM(bus, devid, function); + return pf_dev->epbdf; +} + +uint64_t zxdh_pf_get_spec_sbdf(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + return pf_dev->spec_sbdf; +} + +bool zxdh_pf_is_multi_ep(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + return pf_dev->is_multi_ep; +} + +uint32_t zxdh_pf_get_rp_sbdf(struct dh_core_dev *dh_dev) +{ + struct pci_dev *pdev = dh_dev->pdev; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct pci_dev *rp_pdev = NULL; + struct pci_dev *uppder_pdev = NULL; + uint32_t domain = 0; + uint32_t bus = 0; + uint32_t devid = 0; + uint32_t function = 0; + + if (pdev == NULL) { + LOG_ERR("err: pdev null, return epbdf data 0\n"); + return 0; + } + + /* 找到当前设备的父节点 */ + uppder_pdev = pci_upstream_bridge(pdev); + if (uppder_pdev == NULL) { + LOG_ERR("err: uppder_pdev null, return rp_sbdf data 0\n"); + return 0; + } + + /* 判断当前是单EP还是多EP */ + if ((uppder_pdev->vendor == ZXDH_PF_VENDOR_ID) && + (uppder_pdev->device == ZXDH_SWITCH_DEVICE_ID)) { + /* switch的上节点 */ + uppder_pdev = pci_upstream_bridge(uppder_pdev); + if (uppder_pdev == NULL) { + LOG_ERR("err: uppder_pdev null, return rp_sbdf data 0\n"); + return 0; + } + /* rp节点 */ + uppder_pdev = pci_upstream_bridge(uppder_pdev); + if (uppder_pdev == NULL) { + LOG_ERR("err: uppder_pdev null, return rp_sbdf data 0\n"); + return 0; + } + rp_pdev = uppder_pdev; + pf_dev->is_multi_ep = true; + } else { + rp_pdev = uppder_pdev; + pf_dev->is_multi_ep = false; + } + + /* 获取rp节点的sbdf号 */ + if (sscanf(pci_name(rp_pdev), "%x:%x:%x.%u", &domain, &bus, &devid, + &function) != 4) { + LOG_ERR("failed to get pcie rp bus-info\n"); + return 0; + } + + pf_dev->rp_sbdf = SBDF_ECAM(domain, bus, devid, function); + LOG_INFO( + "rp: domain %#x, bus %#x, devid %#x, function %#x, rp_sbdf %#x. is_multi_ep: %d\n", + domain, bus, devid, function, pf_dev->rp_sbdf, + pf_dev->is_multi_ep); + + return pf_dev->rp_sbdf; +} + +int zxdh_pf_get_pannel_port_num(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + return pf_dev->pannel_port_num; +} + +uint16_t zxdh_pf_get_vport(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + return pf_dev->vport; +} + +enum dh_coredev_type zxdh_pf_get_coredev_type(struct dh_core_dev *dh_dev) +{ + return dh_dev->coredev_type; +} + +uint16_t zxdh_pf_get_pcie_id(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + return pf_dev->pcie_id; +} + +static uint16_t zxdh_pf_get_slot_id(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + uint16_t slot_id = 0; + union zxdh_msg *msg = NULL; + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + if (dh_dev->coredev_type == DH_COREDEV_PF) + return pf_dev->slot_id; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + LOG_ERR("failed to kzalloc\n"); + return 0; + } + + msg->payload.hdr.op_code = ZXDH_VF_SLOT_ID_GET; + + err = zxdh_pf_msg_send_cmd(dh_dev, MODULE_VF_BAR_MSG_TO_PF, msg, msg, + ¶); + if (err != 0) { + LOG_ERR("send_msg_to_pf failed, err: %d\n", err); + kfree(msg); + return 0; + } + slot_id = msg->reps.slot_info.slot_id; + kfree(msg); + + return slot_id; +} + +bool zxdh_pf_is_special_bond(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct firmware_capability *fwcap = &pf_dev->fwcap; + + if (FW_FEATURE_GET(fwcap->fw_feature, FW_FEATURE_STD) == 1) { + return true; + } + + return false; +} + +bool zxdh_pf_suport_np_ext_stats(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct firmware_capability *fwcap = &pf_dev->fwcap; + + if (FW_FEATURE_GET(fwcap->fw_feature, FW_FEATURE_NPSTAT) == 1) { + return true; + } + + return false; +} + +static bool zxdh_pf_is_fw_feature_support(struct dh_core_dev *dh_dev, + uint32_t feature) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct firmware_capability *fwcap = &pf_dev->fwcap; + + if (FW_FEATURE_GET(fwcap->fw_feature, feature) == 1) { + return true; + } + + return false; +} + +struct zxdh_np_ext_stats *zxdh_get_np_ext_stats(struct dh_core_dev *dh_dev, + uint8_t panel_id) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + uint32_t err_offset = 0; + uint32_t disc_offset = 0; + struct zxdh_np_ext_stats *ext_stats = &pf_dev->np_ext_stats; + void __iomem *err_addr = NULL; + void __iomem *disc_addr = NULL; + uint32_t err_cnt = 0; + uint32_t disc_cnt = 0; + + err_offset = 2 * pf_dev->phy_port * sizeof(uint32_t); + disc_offset = (2 * pf_dev->phy_port + 1) * sizeof(uint32_t); + + err_addr = (void __iomem *)(uintptr_t)(pf_dev->pci_ioremap_addr[0] + + ZXDH_NP_EXT_STATS_OFFSET + + err_offset); + disc_addr = (void __iomem *)(uintptr_t)(pf_dev->pci_ioremap_addr[0] + + ZXDH_NP_EXT_STATS_OFFSET + + disc_offset); + if ((err_addr >= + (void __iomem *)(uintptr_t)(pf_dev->pci_ioremap_addr[0] + + ZXDH_NP_EXT_STATS_OFFSET + + ZXDH_NP_EXT_STATS_SIZE)) || + (disc_addr >= + (void __iomem *)(uintptr_t)(pf_dev->pci_ioremap_addr[0] + + ZXDH_NP_EXT_STATS_OFFSET + + ZXDH_NP_EXT_STATS_SIZE))) { + LOG_ERR("addr out-off rang, err_addr: %llx\n", + (unsigned long long)(uintptr_t)err_addr); + LOG_ERR("addr out-off rang, disc_addr: %llx\n", + (unsigned long long)(uintptr_t)disc_addr); + return NULL; + } + + err_cnt = ioread32((void __iomem *)err_addr); + disc_cnt = ioread32((void __iomem *)disc_addr); + + ext_stats->rx_vport2np_packets = err_cnt + disc_cnt; + + return ext_stats; +} + +bool zxdh_pf_is_bond(struct dh_core_dev *dh_dev) +{ + bool flags = false; + + if (!dh_core_is_pf(dh_dev)) { + return false; + } + + if (zxdh_pf_is_special_bond(dh_dev)) { + return false; + } + + if ((dh_dev->pdev->device == ZXDH_INICA_BOND_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_INICB_BOND_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_INICC_BOND_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_INICA_UPF_BOND_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_DPUA_BOND_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_INICD_BOND0_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_INICD_BOND1_DEVICE_ID)) { + flags = true; + } + + return flags; +} + +bool zxdh_pf_is_upf(struct dh_core_dev *dh_dev) +{ + bool flags = false; + + if ((dh_dev->pdev->device == ZXDH_UPF_PF_I512_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_UPF_VF_I512_DEVICE_ID)) { + flags = true; + } + + return flags; +} + +bool zxdh_pf_is_nic(struct dh_core_dev *dh_dev) +{ + if ((dh_dev->pdev->device == ZXDH_PF_E312_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_VF_E312_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_PF_E316_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_VF_E316_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_PF_E316_XPU_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_VF_E316_XPU_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_PF_E310_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_VF_E310_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_PF_E310_CMCC_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_VF_E310_CMCC_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_PF_E311_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_VF_E311_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_PF_E310_RDMA_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_VF_E310_RDMA_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_PF_E310S_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_VF_E310S_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_PF_E312S_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_VF_E312S_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_PF_E312_RDMA_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_VF_E312_RDMA_DEVICE_ID) || + (dh_dev->pdev->device == CTC_PF_B512Y_DEVICE_ID) || + (dh_dev->pdev->device == CTC_VF_B512Y_DEVICE_ID) || + (dh_dev->pdev->device == CTC_PF_B522Y_DEVICE_ID) || + (dh_dev->pdev->device == CTC_VF_B522Y_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_PF_E312S_D_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_VF_E312S_D_DEVICE_ID)) { + return true; + } + + return false; +} + +bool zxdh_pf_is_rdma_enable(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct firmware_capability *fwcap = &pf_dev->fwcap; + + if (FW_FEATURE_GET(fwcap->fw_feature, FW_FEATURE_RDMA) == 1) { + return true; + } + + return false; +} + +bool zxdh_pf_is_drs_sec_enable(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct firmware_capability *fwcap = &pf_dev->fwcap; + + if (FW_FEATURE_GET(fwcap->fw_feature, FW_FEATURE_SEC) == 1) { + return true; + } + + return false; +} + +uint32_t zxdh_pf_get_dev_type(struct dh_core_dev *dh_dev) +{ + if ((dh_dev->pdev->device == ZXDH_UPF_PF_I512_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_UPF_VF_I512_DEVICE_ID)) { + return ZXDH_DEV_UPF; + } + + if ((dh_dev->pdev->device == ZXDH_INICD_NE0_PF_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_INICD_NE0_VF_DEVICE_ID)) { + return ZXDH_DEV_NE0; + } + + if ((dh_dev->pdev->device == ZXDH_INICD_NE1_PF_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_INICD_NE1_VF_DEVICE_ID)) { + return ZXDH_DEV_NE1; + } + + return ZXDH_DEV_UNKNOW; +} + +uint8_t zxdh_pf_get_queue_pairs(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + return pf_dev->vq_pairs; +} + +struct zxdh_vf_item *zxdh_pf_get_vf_item(struct dh_core_dev *dh_dev, + uint16_t vf_idx) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + if (dh_dev->coredev_type != DH_COREDEV_PF) { + LOG_ERR("Invalid device\n"); + return ERR_PTR(-EINVAL); + } + + if (vf_idx >= ZXDH_VF_NUM_MAX) { + LOG_ERR("vf idx(%u) out of range(0~255)\n", vf_idx); + return ERR_PTR(-EINVAL); + } + + if (pf_dev->vf_item == NULL) { + LOG_ERR("vf_item is NULL\n"); + return ERR_PTR(-EINVAL); + } + + if (!pf_dev->vf_item[vf_idx].enable) { + LOG_ERR("vf(%u) is disable\n", vf_idx); + return ERR_PTR(-EINVAL); + } + + return &(pf_dev->vf_item[vf_idx]); +} + +int32_t zxdh_vf_compat_check(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + union zxdh_msg *msg = NULL; + int32_t err = 0; + uint64_t msg_idmax = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + if (dh_dev->coredev_type == DH_COREDEV_PF) + return 0; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + LOG_ERR("failed to kzalloc\n"); + return -1; + } + + msg->payload.hdr.op_code = ZXDH_GET_K_CMPAT_VERINFO; + msg->payload.kernel_cmpat_msg.vfid = VQM_VFID(pf_dev->vport); + err = zxdh_pf_msg_send_cmd(dh_dev, MODULE_VF_BAR_MSG_TO_PF, msg, msg, + ¶); + if (err != 0) { + LOG_ERR("send_msg_to_pf failed, err: %d\n", err); + kfree(msg); + return err; + } + + msg_idmax = msg->reps.kernel_cmpat_rsp.k_msg_idmax; + if (msg_idmax < + ZXDH_MSG_TYPE_CNT_MAX) { //vf驱动的消息数量比pf驱动多,做兼容性提示 + LOG_INFO( + "msg_idmax error!, msg_idmax=%lld, ZXDH_MSG_TYPE_CNT_MAX=%d\n", + msg_idmax, ZXDH_MSG_TYPE_CNT_MAX); + LOG_INFO( + "Perhaps the version of the pf device driver is too old.\n"); + } + + kfree(msg); + + return 0; +} + +static int32_t zxdh_get_mcfeature_from_pf(struct dh_core_dev *dh_dev, + uint64_t *mcfeature) +{ + union zxdh_msg *msg = NULL; + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + if (dh_dev->coredev_type == DH_COREDEV_PF) + return -1; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + LOG_ERR("failed to kzalloc\n"); + return -1; + } + + msg->payload.hdr.op_code = ZXDH_MC_CMPAT_VERINFO; + msg->payload.mcode_feature_msg.dev_id = 0; + msg->payload.mcode_feature_msg.index = 1; + err = zxdh_pf_msg_send_cmd(dh_dev, MODULE_VF_BAR_MSG_TO_PF, msg, msg, + ¶); + if (err != 0) { + LOG_ERR("send_msg_to_pf failed, err: %d\n", err); + kfree(msg); + return err; + } + + if (msg->reps.mcode_feature_rsp.len != + sizeof(msg->reps.mcode_feature_rsp.feature)) { + LOG_ERR("rsp len error!, len=%lld\n", + msg->reps.mcode_feature_rsp.len); + kfree(msg); + return -1; + } + + *mcfeature = msg->reps.mcode_feature_rsp.feature; + + kfree(msg); + + return 0; +} + +/* 如果识别到微码兼容性问题,调整ZXDH_MCODE_FEATURE_VAL的值与微码feature值对齐 */ +#define ZXDH_MCODE_FEATURE_INDEX (1) +int32_t dh_pf_mcode_compat_check(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + int32_t ret = 0; + uint64_t mcode_feature = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = pf_dev->slot_id; + pf_info.vport = pf_dev->vport; + + if (dh_core_is_pf(dh_dev)) { + ret = dpp_mcode_feature_get(&pf_info, ZXDH_MCODE_FEATURE_INDEX, + &mcode_feature); + if (ret > 0) { + LOG_ERR("mcode_feature_get failed! ret=%d\n", ret); + return -ret; + } + } else { + ret = zxdh_get_mcfeature_from_pf(dh_dev, &mcode_feature); + if (ret != 0) { + LOG_ERR("get_mcfeature_from_pf failed! ret=%d\n", ret); + return -1; + } + } + + LOG_INFO("mcode_feature:0x%llx\n", mcode_feature); + pf_dev->mcode_feature = mcode_feature; + + return ret; +} + +void clear_zxdh_plcr_table(struct zxdh_plcr_table *table) +{ + int32_t i = 0; + for (i = 0; i <= 2; i++) { + xa_destroy(&table->plcr_profiles[i]); + xa_destroy(&table->plcr_flows[i]); + xa_destroy(&table->plcr_maps[i]); + } + + table->burst_size = 0; + table->is_init = false; +} + +int32_t zxdh_pf_dpp_init(struct dh_core_dev *dh_dev, bool boot) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + int32_t ret = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = pf_dev->slot_id; + pf_info.vport = pf_dev->vport; + + ret = dpp_vport_register(&pf_info, dh_dev->pdev); + if (ret > 0) { + return -ret; + } + + if (dh_dev->coredev_type == DH_COREDEV_PF) { + clear_zxdh_plcr_table(&(pf_dev->plcr_table)); + dpp_dev_status_set(&pf_info, 1); + } + + if (boot) { + ret = dh_pf_mcode_compat_check(dh_dev); + if (ret != 0) { + return ret; + } + } + + return ret; +} + +static int32_t zxdh_pf_dpp_uninit(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = pf_dev->slot_id; + pf_info.vport = pf_dev->vport; + return dpp_vport_unregister(&pf_info); +} + +static int32_t zxdh_pf_dpp_reset(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + DPP_PF_INFO_T pf_info = { 0 }; + + if (dh_dev->coredev_type == DH_COREDEV_VF) + return 0; + + pf_info.slot = pf_dev->slot_id; + pf_info.vport = pf_dev->vport; + + return dpp_vport_reset(&pf_info); +} + +int32_t zxdh_init_ip6mac_tbl(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct zxdh_ipv6_mac_entry *ip6mac_entry_list = NULL; + unsigned int ip6mact_size = DEV_MULTICAST_MAX_NUM; + int i; + + pf_dev->ip6mac_tbl = kvzalloc(struct_size(pf_dev->ip6mac_tbl, hash_list, + ip6mact_size), + GFP_KERNEL); + if (!pf_dev->ip6mac_tbl) { + LOG_ERR("kvzalloc ip6mac_tbl failed\n"); + return -ENOMEM; + } + + pf_dev->ip6mac_tbl->ip6mact_size = ip6mact_size; + + INIT_LIST_HEAD(&pf_dev->ip6mac_tbl->ip6mac_free_head); + + mutex_init(&pf_dev->ip6mac_tbl->mlock); + + for (i = 0; i < pf_dev->ip6mac_tbl->ip6mact_size; ++i) + INIT_LIST_HEAD(&pf_dev->ip6mac_tbl->hash_list[i]); + + ip6mac_entry_list = kvcalloc(pf_dev->ip6mac_tbl->ip6mact_size, + sizeof(struct zxdh_ipv6_mac_entry), + GFP_KERNEL); + if (!ip6mac_entry_list) { + kvfree(pf_dev->ip6mac_tbl); + LOG_ERR("kvcalloc ip6mac_entry_list failed\n"); + return -ENOMEM; + } + pf_dev->ip6mac_tbl->ip6mac_entry_list = (void *)ip6mac_entry_list; + + for (i = 0; i < pf_dev->ip6mac_tbl->ip6mact_size; i++) { + INIT_LIST_HEAD(&ip6mac_entry_list[i].list); + list_add_tail(&ip6mac_entry_list[i].list, + &pf_dev->ip6mac_tbl->ip6mac_free_head); + } + + return 0; +} + +void zxdh_cleanup_ip6mac_tbl(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct zxdh_ipv6_mac_tbl *ip6mac_tbl = pf_dev->ip6mac_tbl; + + if (ip6mac_tbl) { + if (ip6mac_tbl->ip6mac_entry_list) { + kvfree(ip6mac_tbl->ip6mac_entry_list); + ip6mac_tbl->ip6mac_entry_list = NULL; + } + kvfree(ip6mac_tbl); + pf_dev->ip6mac_tbl = NULL; + } +} + +struct pci_dev *zxdh_pf_get_pdev(struct dh_core_dev *dh_dev) +{ + return dh_dev->pdev; +} + +uint64_t zxdh_pf_get_bar_virt_addr(struct dh_core_dev *dh_dev, uint8_t bar_num) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + return pf_dev->pci_ioremap_addr[bar_num]; +} + +//#define BAR_MSG_RETRY_CNT_MAX (10) + +uint64_t zxdh_pf_get_bar_phy_addr(struct dh_core_dev *dh_dev, uint8_t bar_num) +{ + return pci_resource_start(dh_dev->pdev, bar_num); +} + +uint64_t zxdh_pf_get_bar_size(struct dh_core_dev *dh_dev, uint8_t bar_num) +{ + return pci_resource_len(dh_dev->pdev, bar_num); +} + +int32_t zxdh_pf_msg_send_cmd(struct dh_core_dev *dh_dev, uint16_t module_id, + void *msg, void *ack, + struct zxdh_bar_extra_para *para) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + zxdh_reps_info *reps = (zxdh_reps_info *)(ack); + vqm_rsp_host_data *vqm_reps = (vqm_rsp_host_data *)(ack); + uint64_t vaddr = 0; + int32_t err = 0; + uint32_t i = 0; + + vaddr = (uint64_t)ZXDH_BAR_MSG_BASE(pf_dev->pci_ioremap_addr[0]); + + for (i = 0; i < (para->retrycnt + 1); i++) { + if (pf_dev->quick_remove) + return 0; + + if (!pf_dev->bar_chan_valid) + return -7; + + err = zxdh_send_command(vaddr, pf_dev->pcie_id, module_id, msg, + ack, TRUE); + if (((-err) != BAR_MSG_ERR_LOCK_FAILED) && + ((-err) != BAR_MSG_ERR_TIME_OUT)) + break; + + if ((-err) == BAR_MSG_ERR_LOCK_FAILED) { + LOG_WARN( + "Get lock failed while send msg, try again ...(cnt:%u)", + i); + msleep(200); + } + if ((-err) == BAR_MSG_ERR_TIME_OUT) { + LOG_WARN( + "Timeout while send msg, try again ...(cnt:%u)", + i); + msleep(500); + } + } + + if (err != 0) { + LOG_ERR("zxdh_send_command failed, err=%d\n", err); + return -1; + } + + if (module_id == MODULE_CFG_VQM) { + if (vqm_reps->check_result != 0xaa) { + LOG_ERR("failed vqm_reps->check_result: 0x%x\n", + vqm_reps->check_result); + return -1; + } + return 0; + } + + if (reps->flag != ZXDH_REPS_SUCC) { + if (reps->flag == ZXDH_INVALID_OP_CODE) { + LOG_ERR("msg to vf is invlaid op_code, reps->flag:0x%x\n", + reps->flag); + return ZXDH_INVALID_OP_CODE; + } + LOG_ERR("failed reps->flag: 0x%x\n", reps->flag); + return -1; + } + + return err; +} + +int32_t zxdh_pf_query_port(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct port_message_recv *recv_data = NULL; + struct zxdh_port_msg *payload = NULL; + struct zxdh_pannle_port *pnlport, *recvport; + int32_t ret = 0, idx = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + payload = kzalloc(sizeof(struct zxdh_port_msg), GFP_KERNEL); + if (unlikely(NULL == payload)) { + LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + + recv_data = kzalloc(sizeof(struct port_message_recv), GFP_KERNEL); + if (unlikely(NULL == recv_data)) { + LOG_ERR("failed to kzalloc\n"); + goto out; + } + + payload->pcie_id = zxdh_pf_get_pcie_id(dh_dev); + + ret = zxdh_pf_msg_send_cmd(dh_dev, MODULE_PHYPORT_QUERY, payload, + recv_data, ¶); + if (ret != 0) { + LOG_ERR("zxdh_pf_query_port send message failed \n"); + goto free_recv; + } + LOG_INFO("pcie_id: 0x%x, bond_num: %u lag_id: %u port_num: %u\n", + payload->pcie_id, recv_data->bond_num, recv_data->bond_idx, + recv_data->port_num); + + if (recv_data->port_num > ZXDH_PANNEL_PORT_MAX) { + LOG_ERR("bond pf query port num from fw out of range.\n"); + ret = -1; + goto free_recv; + } + + if ((recv_data->bond_num != 0) && + (recv_data->bond_idx >= recv_data->bond_num)) { + LOG_ERR("bond pf query bond idx from fw out of range.\n"); + ret = -1; + goto free_recv; + } + pf_dev->port_resource.pannel_num = recv_data->port_num; + pf_dev->port_resource.bond_num = recv_data->bond_num; + pf_dev->port_resource.bond_idx = recv_data->bond_idx; + + for (idx = 0; idx < recv_data->port_num; idx++) { + pnlport = &pf_dev->port_resource.port[idx]; + recvport = (struct zxdh_pannle_port *)((uint8_t *)&recv_data + ->data[idx]); + + pnlport->phyport = recvport->phyport; + pnlport->pannel_id = recvport->pannel_id; + pnlport->link_check_bit = recvport->link_check_bit; + pnlport->flags = 0; + + LOG_DEBUG("[%d] pannel %u, phyport %u link check bit %u\n", idx, + (uint32_t)pnlport->pannel_id, + (uint32_t)pnlport->phyport, + (uint32_t)pnlport->link_check_bit); + } + +free_recv: + kfree(recv_data); +out: + kfree(payload); + return ret; +} + +int32_t zxdh_pf_query_fwinfo(struct dh_core_dev *dh_dev) +{ + int32_t ret = 0; + + ret = zxdh_pf_query_port(dh_dev); + if (ret != 0) { + return ret; + } + + return 0; +} + +uint16_t zxdh_pf_get_queue_num(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + uint16_t qnum = 0; + + qnum = ioread16(&pf_dev->common->num_queues); + + return qnum; +} + +uint16_t zxdh_pf_get_queue_size(struct dh_core_dev *dh_dev, uint16_t index) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + uint16_t queue_size = 0; + + iowrite16(index, &pf_dev->common->queue_select); + queue_size = ioread16(&pf_dev->common->queue_size); + + return queue_size; +} + +uint16_t zxdh_pf_get_queue_vector(struct dh_core_dev *dh_dev, uint16_t channel, + struct list_head *eqs_list, + uint16_t queue_index, uint16_t vq_idx) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct zxdh_pf_pci_common_cfg __iomem *cfg = pf_dev->common; + struct dh_eq_vqs *eq_vqs = NULL; + struct dh_eq_vqs *n; + int32_t i = 0; + int32_t msix_vec = ZXDH_MSI_NO_VECTOR; + + iowrite16(queue_index, &cfg->queue_select); + + list_for_each_entry_safe(eq_vqs, n, eqs_list, list) { + if (i++ == channel) { + iowrite16(eq_vqs->vq_s.core.irq->index, + &cfg->queue_msix_vector); + break; + } + } + + msix_vec = ioread16(&cfg->queue_msix_vector); + LOG_DEBUG("%s vq %d mapped to irqn %d\n", pci_name(dh_dev->pdev), + vq_idx, eq_vqs->vq_s.core.irq->irqn); + /* Flush the write out to device */ + return msix_vec; +} + +void zxdh_pf_release_queue_vector(struct dh_core_dev *dh_dev, + int32_t queue_index) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct zxdh_pf_pci_common_cfg __iomem *cfg = pf_dev->common; + + iowrite16(queue_index, &cfg->queue_select); + iowrite16(ZXDH_MSI_NO_VECTOR, &cfg->queue_msix_vector); +} + +void zxdh_pf_set_queue_size(struct dh_core_dev *dh_dev, uint32_t index, + uint16_t size) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + iowrite16(index, &pf_dev->common->queue_select); + iowrite16(size, &pf_dev->common->queue_size); +} + +void zxdh_pf_set_queue_address(struct dh_core_dev *dh_dev, uint32_t index, + uint64_t desc_addr, uint64_t driver_addr, + uint64_t device_addr) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + iowrite16(index, &pf_dev->common->queue_select); + iowrite32((uint32_t)desc_addr, &pf_dev->common->queue_desc_lo); + iowrite32(desc_addr >> 32, &pf_dev->common->queue_desc_hi); + iowrite32((uint32_t)driver_addr, &pf_dev->common->queue_avail_lo); + iowrite32(driver_addr >> 32, &pf_dev->common->queue_avail_hi); + iowrite32((uint32_t)device_addr, &pf_dev->common->queue_used_lo); + iowrite32(device_addr >> 32, &pf_dev->common->queue_used_hi); +} + +/* Started by AICoder, pid:0afd1w375cx9bf4141b80b7cd00512118ff39939 */ +int32_t zxdh_pf_get_phy_vq_info(uint32_t phy_index, uint32_t *phy_vq_reg, + uint32_t *vq_bit) +{ + if (phy_index >= ZXDH_MAX_QUEUES_NUM) { + LOG_ERR("Invalid phy_index:%u\n", phy_index); + return -1; + } + + *phy_vq_reg = phy_index / ZXDH_PHY_REG_BITS; + *vq_bit = phy_index % ZXDH_PHY_REG_BITS; + + return 0; +} +/* Ended by AICoder, pid:0afd1w375cx9bf4141b80b7cd00512118ff39939 */ + +int32_t zxdh_pf_get_vq_lock(struct dh_core_dev *dh_dev) +{ + int32_t i = 0; + int32_t val = 0; + int32_t wait_time = ZXDH_PF_WAIT_COUNT; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + for (i = 0; i < wait_time; i++) { + val = ioread32((void __iomem *)((uintptr_t)pf_dev->common + + LOCK_VQ_REG_OFFSET)); + if (val & ZXDH_PF_LOCK_ENABLE_MASK) { + break; + } + udelay(ZXDH_PF_DELAY_US); + } + + if ((val & ZXDH_PF_LOCK_ENABLE_MASK) == 0) { + LOG_INFO("get phy vq_id is busy\n"); + return -1; + } + + return 0; +} + +int32_t zxdh_pf_release_vq_lock(struct dh_core_dev *dh_dev) +{ + int32_t val = 0; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + val = ioread32((void __iomem *)((uintptr_t)pf_dev->common + + LOCK_VQ_REG_OFFSET)); + if (val & ZXDH_PF_LOCK_ENABLE_MASK) { + iowrite32(ZXDH_PF_RELEASE_LOCK_VAL, + (void __iomem *)((uintptr_t)pf_dev->common + + LOCK_VQ_REG_OFFSET)); + return 0; + } else { + LOG_INFO("no lock need to be released\n"); + return -1; + } +} + +int32_t find_valid_vqs_by_bit(struct dh_core_dev *dh_dev, uint8_t queue_type, + uint16_t vq_cnt, int32_t *phy_index, + uint16_t total_qp, uint16_t start_id) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + uint32_t phy_vq_reg = 0; + uint32_t val = 0; + uint32_t done = 0; + uint32_t j = 0; + uint16_t index = 0; + uint16_t total_queue_num = total_qp * 2; + uint16_t start_qp_id = start_id * 2; + + uint32_t phy_vq_reg_oft = start_qp_id / ZXDH_PHY_REG_BITS; + uint32_t inval_bit = start_qp_id % ZXDH_PHY_REG_BITS; + uint32_t res_bit = (total_queue_num + inval_bit) % ZXDH_PHY_REG_BITS; + uint32_t vq_reg_num = + (total_queue_num + inval_bit) / ZXDH_PHY_REG_BITS + + (res_bit ? 1 : 0); + + LOG_DEBUG( + "phy_vq_reg_oft:%u, inval_bit is %u, res_bit:%u, vq_reg_num:%u\n", + phy_vq_reg_oft, inval_bit, res_bit, vq_reg_num); + + for (phy_vq_reg = 0; phy_vq_reg < vq_reg_num; phy_vq_reg++) { + val = ioread32(( + void __iomem *)(uintptr_t)(pf_dev->pci_ioremap_addr[0] + + PHY_VQ_REG_OFFSET + + (phy_vq_reg + + phy_vq_reg_oft) * + 4)); + + if (phy_vq_reg == 0) { + val = val | (((uint32_t)1 << inval_bit) - 1); + } + + if ((phy_vq_reg == (vq_reg_num - 1)) && (res_bit != 0)) { + val = val | (~((uint32_t)1 << res_bit) + 1); + } + + for (j = queue_type; + (j < ZXDH_PHY_REG_BITS) && (index < vq_cnt); + j += ZXDH_PF_POWER_INDEX2) { + if ((val & (ZXDH_PF_GET_PHY_INDEX_BIT << j)) == 0) { + phy_index[queue_type + 2 * index] = + (phy_vq_reg + phy_vq_reg_oft) * + ZXDH_PHY_REG_BITS + + j; + //LOG_DEBUG("phy_index:%u, qp_bit is %u\n", (queue_type + 2*index), ((phy_vq_reg + phy_vq_reg_oft) * ZXDH_PHY_REG_BITS + j)); + index++; + } + } + + if (index == vq_cnt) { + done = ZXDH_PF_GET_PHY_INDEX_DONE; + break; + } + } + + if (done != ZXDH_PF_GET_PHY_INDEX_DONE) { + LOG_ERR("no availd phy queue, Currently can only apply %u %s queues.\n", + index, queue_type ? "tx" : "rx"); + return -1; + } + + return 0; +} + +int32_t find_valid_vqs_by_type(struct dh_core_dev *dh_dev, uint8_t queue_type, + uint16_t vq_cnt, int32_t *phy_index) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + uint32_t phy_vq_reg = 0; + uint32_t vq_reg_num = ZXDH_MAX_QUEUES_NUM / ZXDH_PHY_REG_BITS; + uint32_t val = 0; + uint32_t done = 0; + uint32_t j = 0; + uint16_t index = 0; + + for (phy_vq_reg = 0; phy_vq_reg < vq_reg_num; phy_vq_reg++) { + val = ioread32(( + void __iomem *)(uintptr_t)(pf_dev->pci_ioremap_addr[0] + + PHY_VQ_REG_OFFSET + + phy_vq_reg * 4)); + + for (j = queue_type; + (j < ZXDH_PHY_REG_BITS) && (index < vq_cnt); + j += ZXDH_PF_POWER_INDEX2) { + if ((val & (ZXDH_PF_GET_PHY_INDEX_BIT << j)) == 0) { + phy_index[queue_type + 2 * index] = + phy_vq_reg * ZXDH_PHY_REG_BITS + j; + index++; + } + } + + if (index == vq_cnt) { + done = ZXDH_PF_GET_PHY_INDEX_DONE; + break; + } + } + + if (done != ZXDH_PF_GET_PHY_INDEX_DONE) { + LOG_ERR("no availd phy queue, Currently can only apply %u %s queues.\n", + index, queue_type ? "tx" : "rx"); + return -1; + } + + return 0; +} + +int32_t zxdh_pf_find_valid_vqs(struct dh_core_dev *dh_dev, uint16_t vq_cnt, + int32_t *phy_index) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct zxdh_dev_queue_info *dev_qinfo = NULL; + struct zxdh_fw_compat fw_compat = pf_dev->fw_compat; + int32_t ret = 0; + uint32_t pair_cnt = 0; + uint16_t ep_id = 0; + uint16_t pf_idx = 0; + + pair_cnt = vq_cnt / 2; + + if ((fw_compat.patch >= 1) && zxdh_pf_is_nic(dh_dev)) { + ep_id = EPID_GEN_FROM_VPORT(pf_dev->vport); + pf_idx = GLOBAL_PF_IDX(ep_id, pf_dev->vport); + + dev_qinfo = (struct zxdh_dev_queue_info + *)(uintptr_t)(pf_dev->pci_ioremap_addr[0] + + ZXDH_DEV_QUEUE_INFO_OFFSET + + pf_idx * 4); + LOG_DEBUG( + "pf(vport:0x%x) get queue config: ep_id:%u, pf_idx:%u, total_qp:%u, start_id:%u\n", + pf_dev->vport, ep_id, pf_idx, dev_qinfo->total_qp, + dev_qinfo->start_id); + + ret = find_valid_vqs_by_bit(dh_dev, ZXDH_PF_RQ_TYPE, pair_cnt, + phy_index, dev_qinfo->total_qp, + dev_qinfo->start_id); + if (ret != 0) { + return ret; + } + + ret = find_valid_vqs_by_bit(dh_dev, ZXDH_PF_TQ_TYPE, pair_cnt, + phy_index, dev_qinfo->total_qp, + dev_qinfo->start_id); + if (ret != 0) { + return ret; + } + + return 0; + } + + ret = find_valid_vqs_by_type(dh_dev, ZXDH_PF_RQ_TYPE, pair_cnt, + phy_index); + if (ret != 0) { + return ret; + } + + ret = find_valid_vqs_by_type(dh_dev, ZXDH_PF_TQ_TYPE, pair_cnt, + phy_index); + if (ret != 0) { + return ret; + } + + return 0; +} + +int32_t zxdh_pf_write_vqs_bit(struct dh_core_dev *dh_dev, uint16_t vq_cnt, + uint32_t *phy_index) +{ + uint32_t phy_vq_reg = 0; + uint32_t vq_bit = 0; + uint32_t val = 0; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + uint16_t i = 0; + + for (i = 0; i < vq_cnt; ++i) { + phy_vq_reg = phy_index[i] / ZXDH_PHY_REG_BITS; + vq_bit = phy_index[i] % ZXDH_PHY_REG_BITS; + + val = ioread32(( + void __iomem *)(uintptr_t)(pf_dev->pci_ioremap_addr[0] + + PHY_VQ_REG_OFFSET + + phy_vq_reg * 4)); + val |= (ZXDH_PF_GET_PHY_INDEX_BIT << vq_bit); + iowrite32( + val, + (void __iomem *)(uintptr_t)(pf_dev->pci_ioremap_addr[0] + + PHY_VQ_REG_OFFSET + + phy_vq_reg * 4)); + } + + return 0; +} + +int32_t zxdh_pf_write_queue_tlb(struct dh_core_dev *dh_dev, uint16_t vq_cnt, + uint32_t *phy_index, bool need_msgq) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + uint16_t i = 0; + uint16_t pcieid = pf_dev->pcie_id; + + for (i = 0; i < vq_cnt; ++i) { + pcieid = pf_dev->pcie_id; + if (need_msgq && (i >= (vq_cnt - 2))) { + pcieid |= BIT(15); + } + iowrite16( + pcieid, + (void __iomem *)(uintptr_t)(pf_dev->pci_ioremap_addr[0] + + pf_dev->qtlb_offset + + phy_index[i] * 2)); + } + + return 0; +} + +uint16_t zxdh_pf_get_fw_patch(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + return pf_dev->fw_compat.patch; +} + +void zxdh_pf_update_link_info(struct dh_core_dev *dh_dev, + struct link_info_struct *link_info_val) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + if (pf_dev->link_up && link_info_val->speed == SPEED_UNKNOWN) { + LOG_INFO( + "pf_dev->link_up is %d and link_info_val->speed is %d, can't update pf info\n", + pf_dev->link_up, link_info_val->speed); + return; + } + pf_dev->speed = link_info_val->speed; + pf_dev->autoneg_enable = link_info_val->autoneg_enable; + pf_dev->supported_speed_modes = link_info_val->supported_speed_modes; + pf_dev->advertising_speed_modes = + link_info_val->advertising_speed_modes; + pf_dev->duplex = link_info_val->duplex; +} + +int32_t zxdh_pf_get_drv_msg(struct dh_core_dev *dh_dev, uint8_t *drv_version, + uint8_t *drv_version_len) +{ + *drv_version_len = sizeof(zxdh_pf_driver_version); + memcpy(drv_version, zxdh_pf_driver_version, *drv_version_len); + return 0; +} + +void zxdh_pf_set_vepa(struct dh_core_dev *dh_dev, bool setting) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + pf_dev->vepa = setting; +} + +bool zxdh_pf_get_vepa(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + return pf_dev->vepa; +} + +int32_t zxdh_pf_request_port(struct dh_core_dev *dh_dev, void *data) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + uint8_t port_num = pf_dev->port_resource.pannel_num; + struct zxdh_pannle_port *port; + struct zxdh_pannle_port *req_data = (struct zxdh_pannle_port *)data; + int32_t idx = 0; + + for (idx = 0; idx < port_num; idx++) { + port = &pf_dev->port_resource.port[idx]; + if (!(port->flags & PORT_FLAGS_ALLOC_STAT)) { + req_data->phyport = port->phyport; + req_data->pannel_id = port->pannel_id; + req_data->link_check_bit = port->link_check_bit; + port->flags |= PORT_FLAGS_ALLOC_STAT; + break; + } + } + + if (idx == port_num) { + LOG_ERR("failed to obtain the panel information from the riscv, or this part is not released when the aux is removed\n"); + return -1; + } + + return 0; +} + +int32_t zxdh_pf_release_port(struct dh_core_dev *dh_dev, uint32_t pnl_id) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + uint8_t port_num = pf_dev->port_resource.pannel_num; + struct zxdh_pannle_port *port; + int32_t idx = 0; + + for (idx = 0; idx < port_num; idx++) { + port = &pf_dev->port_resource.port[idx]; + if (pnl_id == port->pannel_id) { + port->flags &= ~PORT_FLAGS_ALLOC_STAT; + break; + } + } + + return 0; +} + +void zxdh_pf_set_bond_num(struct dh_core_dev *dh_dev, bool add) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + if (add) { + pf_dev->bond_num++; + } else { + pf_dev->bond_num--; + } +} + +bool zxdh_pf_if_init(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + if (pf_dev->bond_num == 0) { + return true; + } + + return false; +} + +void zxdh_pf_set_init_comp_flag(struct dh_core_dev *dh_dev, uint8_t flag) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + pf_dev->aux_comp_flag = flag; + return; +} + +struct zxdh_ipv6_mac_tbl *zxdh_pf_get_ip6mac_tbl(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + return pf_dev->ip6mac_tbl; +} + +static int32_t zxdh_pf_events_call_chain(struct dh_core_dev *dh_dev, + unsigned long type, void *data) +{ + struct dh_eq_table *eq_table = &dh_dev->eq_table; + + return atomic_notifier_call_chain(&eq_table->nh[type], type, data); +} + +/* Started by AICoder, pid:cb5f8h4da9v582014e6c0ae8d0bd2b1c27f6a602 */ +uint16_t zxdh_pf_get_ovs_pf_vfid(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + uint32_t ovs_pf_vfid; // 使用 uint32_t 来存储4字节的数据 + + // 读取4字节的数据 + ovs_pf_vfid = ioread32( + (void __iomem *)(uintptr_t)(pf_dev->pci_ioremap_addr[0] + + ZXDH_OVS_PF_VFID_OFFSET)); + LOG_INFO("pf(vport:0x%x) get ovs pf vfid 0x%x\n", pf_dev->vport, + ovs_pf_vfid); + + // 将高16位截断以适应uint16_t + return (uint16_t)ovs_pf_vfid; +} +/* Ended by AICoder, pid:cb5f8h4da9v582014e6c0ae8d0bd2b1c27f6a602 */ + +uint8_t zxdh_pf_get_board_type(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + return pf_dev->board_type; +} + +bool zxdh_pf_is_hwbond(struct dh_core_dev *dh_dev, bool is_hwbond, + bool update_pf) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + if (update_pf) { + pf_dev->is_hwbond = is_hwbond; + } + + return pf_dev->is_hwbond; +} + +bool zxdh_pf_is_rdma_aux_plug(struct dh_core_dev *dh_dev, bool is_rdma_aux_plug, + bool update_pf) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + if (update_pf) { + pf_dev->is_rdma_aux_plug = is_rdma_aux_plug; + } + + return pf_dev->is_rdma_aux_plug; +} + +bool zxdh_pf_is_primary_port(struct dh_core_dev *dh_dev, bool is_primary_port, + bool update_pf) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + if (update_pf) { + pf_dev->is_primary_port = is_primary_port; + } + + return pf_dev->is_primary_port; +} + +int32_t zxdh_pf_update_hb_file_val(struct dh_core_dev *dh_dev, + uint64_t spec_sbdf, const char *file_name, + bool flag); + +void zxdh_pf_optim_hardware_bond_time(struct dh_core_dev *dh_dev, bool enable) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + DPP_PF_INFO_T dpp_pf_info = { + .slot = pf_dev->slot_id, + .vport = pf_dev->vport, + }; + if (enable) { + dpp_pktrx_mcode_glb_cfg_write(&dpp_pf_info, 29, 29, 1); + } else { + dpp_pktrx_mcode_glb_cfg_write(&dpp_pf_info, 29, 29, 0); + } +} + +struct zxdh_en_sf_if en_sf_ops = { + .en_sf_map_vq_notify = zxdh_pf_map_vq_notify, + .en_sf_unmap_vq_notify = zxdh_pf_unmap_vq_notify, + .en_sf_set_status = zxdh_pf_set_status, + .en_sf_get_status = zxdh_pf_get_status, + .en_sf_get_cfg_gen = zxdh_pf_get_cfg_gen, + .en_sf_get_rp_link_status = zxdh_pf_check_remove_state, + .en_sf_get_features = zxdh_pf_get_features, + .en_sf_set_features = zxdh_pf_set_features, + .en_sf_set_vf_mac = zxdh_pf_set_vf_mac, + .en_sf_get_vf_mac = zxdh_pf_get_vf_mac, + .en_sf_set_mac = zxdh_set_mac, + .en_sf_get_mac = zxdh_get_mac, + .en_sf_set_queue_enable = zxdh_pf_set_queue_enable, + .en_sf_get_channels_num = zxdh_pf_get_vqs_channels_num, + .en_sf_get_queue_num = zxdh_pf_get_queue_num, + .en_sf_get_queue_size = zxdh_pf_get_queue_size, + .en_sf_get_queue_vector = zxdh_pf_get_queue_vector, + .en_sf_release_queue_vector = zxdh_pf_release_queue_vector, + .en_sf_set_queue_size = zxdh_pf_set_queue_size, + .en_sf_set_queue_address = zxdh_pf_set_queue_address, + .en_sf_vq_irqs_request = zxdh_pf_vq_irqs_request, + .en_sf_affinity_irqs_release = zxdh_pf_affinity_irqs_release, + .en_sf_switch_irq = zxdh_pf_switch_irq, + .en_sf_get_vq_lock = zxdh_pf_get_vq_lock, + .en_sf_release_vq_lock = zxdh_pf_release_vq_lock, + .en_sf_find_valid_vqs = zxdh_pf_find_valid_vqs, + .en_sf_write_vqs_bit = zxdh_pf_write_vqs_bit, + .en_sf_write_queue_tlb = zxdh_pf_write_queue_tlb, + .en_sf_get_fw_patch = zxdh_pf_get_fw_patch, + .en_sf_get_epbdf = zxdh_pf_get_epbdf, + .en_sf_get_spec_sbdf = zxdh_pf_get_spec_sbdf, + .en_sf_is_multi_ep = zxdh_pf_is_multi_ep, + .en_sf_get_vport = zxdh_pf_get_vport, + .en_sf_get_coredev_type = zxdh_pf_get_coredev_type, + .en_sf_get_pcie_id = zxdh_pf_get_pcie_id, + .en_sf_get_slot_id = zxdh_pf_get_slot_id, + .en_sf_is_bond = zxdh_pf_is_bond, + .en_sf_is_upf = zxdh_pf_is_upf, + .en_sf_get_pdev = zxdh_pf_get_pdev, + .en_sf_get_bar_virt_addr = zxdh_pf_get_bar_virt_addr, + .en_sf_get_bar_phy_addr = zxdh_pf_get_bar_phy_addr, + .en_sf_get_bar_size = zxdh_pf_get_bar_size, + .en_sf_msg_send_cmd = zxdh_pf_msg_send_cmd, + .en_sf_async_eq_enable = zxdh_pf_async_eq_enable, + .en_sf_nh_attach = zxdh_pf_nh_attach, + .en_sf_get_vf_item = zxdh_pf_get_vf_item, + .en_sf_set_pf_link_up = zxdh_pf_set_pf_link_up, + .en_sf_get_pf_link_up = zxdh_pf_get_pf_link_up, + .en_sf_update_pf_link_info = zxdh_pf_update_link_info, + .en_sf_get_drv_msg = zxdh_pf_get_drv_msg, + .en_sf_get_vepa = zxdh_pf_get_vepa, + .en_sf_set_vepa = zxdh_pf_set_vepa, + .en_sf_set_bond_num = zxdh_pf_set_bond_num, + .en_sf_if_init = zxdh_pf_if_init, + .en_sf_request_port_info = zxdh_pf_request_port, + .en_sf_release_port_info = zxdh_pf_release_port, + .en_sf_get_link_info_from_vqm = zxdh_pf_get_link_info_from_vqm, + .en_sf_set_vf_link_info = zxdh_pf_set_vf_link_info, + .en_sf_get_vf_is_probe = zxdh_pf_get_vf_is_probe, + .en_sf_set_pf_phy_port = zxdh_pf_set_pf_phy_port, + .en_sf_get_pf_phy_port = zxdh_pf_get_pf_phy_port, + .en_sf_set_init_comp_flag = zxdh_pf_set_init_comp_flag, + .en_sf_events_call_chain = zxdh_pf_events_call_chain, + .en_sf_get_ip6mac_tbl = zxdh_pf_get_ip6mac_tbl, + .en_sf_is_nic = zxdh_pf_is_nic, + .en_sf_is_special_bond = zxdh_pf_is_special_bond, + .en_sf_get_queue_pairs = zxdh_pf_get_queue_pairs, + .en_sf_get_cpl_timeout_if_mask = zxdh_pf_get_cpl_timeout_if_mask, + .en_sf_set_cpl_timeout_mask = zxdh_pf_set_cpl_timeout_mask, + .en_sf_get_hp_irq_ctrl_status = zxdh_pf_get_hp_irq_ctrl_status, + .en_sf_set_hp_irq_ctrl_status = zxdh_pf_set_hp_irq_ctrl_status, + .en_sf_is_rdma_enable = zxdh_pf_is_rdma_enable, + .en_sf_get_dev_type = zxdh_pf_get_dev_type, + .en_sf_pf_suport_np_ext_stats = zxdh_pf_suport_np_ext_stats, + .en_sf_get_np_ext_stats = zxdh_get_np_ext_stats, + .en_sf_is_drs_sec_enable = zxdh_pf_is_drs_sec_enable, + .en_sf_is_fw_feature_support = zxdh_pf_is_fw_feature_support, + .en_sf_get_ovs_pf_vfid = zxdh_pf_get_ovs_pf_vfid, + .en_sf_get_board_type = zxdh_pf_get_board_type, + .en_sf_is_hwbond = zxdh_pf_is_hwbond, + .en_sf_is_rdma_aux_plug = zxdh_pf_is_rdma_aux_plug, + .en_sf_is_primary_port = zxdh_pf_is_primary_port, + .en_sf_optim_hardware_bond_time = zxdh_pf_optim_hardware_bond_time, + .en_sf_update_hb_file_val = zxdh_pf_update_hb_file_val, +}; + +void zxdh_adev_release(struct device *dev) +{ + return; +} + +static DEFINE_IDA(zxdh_adev_ida); + +int32_t zxdh_plug_aux_dev(struct dh_core_dev *dh_dev, int32_t idx) +{ + struct zxdh_auxiliary_device *adev = NULL; + struct zxdh_pf_device *pf_dev = NULL; + struct zxdh_en_sf_container *sf_con = NULL; + struct zxdh_pf_adev *pf_adevs_table = NULL; + int32_t ret = 0; + + pf_dev = dh_core_priv(dh_dev); + + if (idx >= pf_dev->adevs_num) { + return 0; + } + + pf_adevs_table = &pf_dev->adevs_table[idx]; + if (pf_adevs_table->adev != NULL) { + return 0; + } + + sf_con = kzalloc(sizeof(struct zxdh_en_sf_container), GFP_KERNEL); + + if (unlikely(sf_con == NULL)) { + LOG_ERR("zxadev kzalloc is null\n"); + return -ENOMEM; + } + + pf_adevs_table->aux_idx = ida_alloc(&zxdh_adev_ida, GFP_KERNEL); + if (pf_adevs_table->aux_idx < 0) { + LOG_ERR("failed to allocate device id for aux drvs\n"); + goto free_kzalloc; + } + + adev = &sf_con->adev; + + adev->id = pf_adevs_table->aux_idx; + adev->dev.parent = &dh_dev->pdev->dev; + adev->dev.release = zxdh_adev_release; + adev->name = ZXDH_PF_EN_SF_DEV_ID_NAME; + + pf_adevs_table->adev = adev; + sf_con->dh_dev = dh_dev; + sf_con->ops = &en_sf_ops; + + ret = zxdh_auxiliary_device_init(adev); + if (ret != 0) { + LOG_ERR("zxdh_auxiliary_device_init failed: %d\n", ret); + goto free_ida_alloc; + } + + ret = zxdh_auxiliary_device_add(adev); + if (ret != 0) { + LOG_ERR("zxdh_auxiliary_device_add failed: %d\n", ret); + goto release_aux_init; + } + + return 0; + +release_aux_init: + zxdh_auxiliary_device_uninit(adev); +free_ida_alloc: + ida_simple_remove(&zxdh_adev_ida, pf_adevs_table->aux_idx); + pf_adevs_table->aux_idx = -1; +free_kzalloc: + kfree(sf_con); + sf_con = NULL; + return ret; +} + +void zxdh_unplug_aux_dev(struct dh_core_dev *dh_dev, int32_t idx) +{ + struct zxdh_pf_device *pf_dev = NULL; + struct zxdh_en_sf_container *sf_con = NULL; + struct zxdh_pf_adev *pf_adevs_table = NULL; + + pf_dev = dh_core_priv(dh_dev); + if (idx >= pf_dev->adevs_num) { + return; + } + + pf_adevs_table = &pf_dev->adevs_table[idx]; + if (!pf_adevs_table->adev) { + return; + } + + sf_con = container_of(pf_adevs_table->adev, struct zxdh_en_sf_container, + adev); + + zxdh_auxiliary_device_delete(pf_adevs_table->adev); + zxdh_auxiliary_device_uninit(pf_adevs_table->adev); + ida_simple_remove(&zxdh_adev_ida, pf_adevs_table->aux_idx); + pf_adevs_table->aux_idx = -1; + kfree(sf_con); + sf_con = NULL; + + return; +} + +int32_t dh_pf_vf_vport_get(struct dh_core_dev *dev, uint16_t vf_idx, + uint16_t *vport) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dev); + uint16_t pcie_id = 0; + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + uint8_t recv_buf[8] = { 0 }; + int32_t ret = 0; + + if (vport == NULL) { + return BAR_MSG_ERR_NULL; + } + + pcie_id = FIND_VF_PCIE_ID(pf_dev->pcie_id, vf_idx); + + in.virt_addr = (uint64_t)ZXDH_BAR_MSG_BASE(pf_dev->pci_ioremap_addr[0]); + in.payload_addr = &pcie_id; + in.payload_len = sizeof(pcie_id); + in.src = MSG_CHAN_END_PF; + in.dst = MSG_CHAN_END_RISC; + in.event_id = MODULE_VPORT_GET; + in.src_pcieid = pf_dev->pcie_id; + + result.recv_buffer = recv_buf; + result.buffer_len = sizeof(recv_buf); + + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + switch (ret) { + case BAR_MSG_OK: { + *vport = *(uint16_t *)(recv_buf + 4); + LOG_DEBUG("pf(0x%x) get vf(%u) vport(0x%x) success\n", + pf_dev->pcie_id, vf_idx, *vport); + break; + } + default: { + LOG_ERR("Failed to pf(0x%x) get vf(%u) vport, ret:%d.\n", + pcie_id, vf_idx, ret); + break; + } + } + + return ret; +} + +int32_t dh_pf_vf_item_init(struct dh_core_dev *dev, uint16_t vf_idx) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dev); + struct zxdh_vf_item *vf_item = NULL; + + if (pf_dev->vf_item == NULL) { + LOG_ERR("vf_item is NULL\n"); + return -EINVAL; + } + vf_item = &pf_dev->vf_item[vf_idx]; + vf_item->link_forced = false; + vf_item->vport = pf_dev->vf_item[0].vport + vf_idx; + vf_item->enable = true; + vf_item->spoofchk = false; + mutex_init(&vf_item->lock); + vf_item->init_np_stats = + kzalloc(sizeof(struct zxdh_en_vport_np_stats), GFP_KERNEL); + if (vf_item->init_np_stats == NULL) { + LOG_ERR("pf_dev->vf_item->init_np_stats failed\n"); + return -ENOMEM; + } + return 0; +} + +int32_t dh_pf_vf_item_uninit(struct dh_core_dev *dev, uint16_t vf_idx) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dev); + struct zxdh_vf_item *vf_item = NULL; + + if (pf_dev->vf_item == NULL) { + LOG_ERR("vf_item is NULL\n"); + return -EINVAL; + } + vf_item = &pf_dev->vf_item[vf_idx]; + eth_zero_addr(vf_item->mac); + zxdh_pf_set_vf_mac_reg(pf_dev, vf_item->mac, vf_idx); + vf_item->pf_set_mac = false; + vf_item->enable = false; + vf_item->vlan = 0; + vf_item->qos = 0; + vf_item->vlan_proto = 0; + vf_item->spoofchk = false; + mutex_destroy(&vf_item->lock); + kfree(vf_item->init_np_stats); + return 0; +} + +int32_t dh_pf_vf_enable(struct dh_core_dev *dev, int32_t num_vfs) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dev); + int32_t vf_idx = 0; + int32_t ret = 0; + + ret = dh_pf_vf_vport_get(dev, 0, &pf_dev->vf_item[0].vport); + if (ret != 0) { + return ret; + } + + for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) { + ret = dh_pf_vf_item_init(dev, vf_idx); + if (ret != 0) { + LOG_ERR("Failed to init vf(%d) item\n", vf_idx); + return ret; + } + } + + return ret; +} + +void dh_pf_vf_disable(struct dh_core_dev *dev, int32_t num_vfs) +{ + int32_t vf_idx = 0; + + for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) { + dh_pf_vf_item_uninit(dev, vf_idx); + } +} + +int32_t dh_pf_sriov_enable(struct pci_dev *pdev, int32_t num_vfs) +{ + struct dh_core_dev *dev = pci_get_drvdata(pdev); + int32_t pre_existing_vfs = pci_num_vf(pdev); + int32_t ret = 0; + struct zxdh_pf_device *pf_dev = dh_core_priv(dev); + + if ((pre_existing_vfs != 0) && (pre_existing_vfs == num_vfs)) { + return 0; + } + + ret = dh_pf_vf_enable(dev, num_vfs); + if (ret != 0) { + LOG_ERR("Failed to enable vf\n"); + return ret; + } + +#ifdef ZXDH_SRIOV_SYSFS_EN + ret = zxdh_create_vfs_sysfs(dev, num_vfs); + if (ret != 0) { + LOG_ERR("zxdh_create_vfs_sysfs failed : %d\n", ret); + goto err_create_vfs_sysfs; + } +#endif + + ret = pci_enable_sriov(pdev, num_vfs); + if (ret != 0) { + LOG_ERR("pci_enable_sriov failed : %d\n", ret); + goto err_pci_enable_sriov; + } + + LOG_DEBUG("start init_vf_link_info_work"); + zxdh_events_work_enqueue(dev, &pf_dev->init_vf_link_info_work); + return ret; + +err_pci_enable_sriov: +#ifdef ZXDH_SRIOV_SYSFS_EN + zxdh_destroy_vfs_sysfs(dev, num_vfs); +err_create_vfs_sysfs: +#endif + dh_pf_vf_disable(dev, num_vfs); + + return ret; +} + +void dh_pf_sriov_disable(struct pci_dev *pdev) +{ + struct dh_core_dev *dev = pci_get_drvdata(pdev); + int32_t num_vfs = pci_num_vf(pdev); + + pci_disable_sriov(pdev); +#ifdef ZXDH_SRIOV_SYSFS_EN + zxdh_destroy_vfs_sysfs(dev, num_vfs); +#endif + dh_pf_vf_disable(dev, num_vfs); +} + +int32_t dh_pf_vf_item_create(struct dh_core_dev *dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dev); + + if (dev->coredev_type == DH_COREDEV_PF) { + pf_dev->vf_item = + kzalloc(sizeof(struct zxdh_vf_item) * ZXDH_VF_NUM_MAX, + GFP_KERNEL); + if (pf_dev->vf_item == NULL) { + LOG_ERR("pf_dev->vf_item kzalloc failed\n"); + return -ENOMEM; + } + } + + return 0; +} + +void dh_pf_vf_item_destroy(struct dh_core_dev *dev, bool disable_vf) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dev); + + if (dev->coredev_type == DH_COREDEV_PF) { + if (disable_vf) { + pci_disable_sriov(dev->pdev); + } + if (pf_dev->vf_item != NULL) { + kfree(pf_dev->vf_item); + pf_dev->vf_item = NULL; + } + } +} + +static bool is_sn_invalid(uint8_t sn_code[]) +{ + bool all_zero = true; + bool all_ff = true; + uint8_t i = 0; + + for (i = 0; i < SN_CODE_LENGTH; ++i) { + if (sn_code[i] != 0) { + all_zero = false; + } + if (sn_code[i] != 0xff) { + all_ff = false; + } + if (!all_zero && !all_ff) { + break; + } + } + + return all_zero || all_ff; +} + +#define DH_SN_OFFSET (0x5690) +static int32_t zxdh_nic_sn_get(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct nic_sn_info sn_info = { 0 }; + uint8_t i = 0; + + memcpy(&sn_info, + (void *)(uintptr_t)(pf_dev->pci_ioremap_addr[0] + DH_SN_OFFSET), + sizeof(struct nic_sn_info)); + + if ((sn_info.fixed_sn_valid != 0xaa) && + (sn_info.pseudo_sn_valid != 0xaa)) { + memcpy(sn_info.sn_code, pci_name(dh_dev->pdev), + strlen(pci_name(dh_dev->pdev)) < SN_CODE_LENGTH ? + strlen(pci_name(dh_dev->pdev)) : + SN_CODE_LENGTH); + sn_info.pseudo_sn_valid = 0xaa; + memcpy((void *)(uintptr_t)(pf_dev->pci_ioremap_addr[0] + + DH_SN_OFFSET), + &sn_info, sizeof(struct nic_sn_info)); + } + + if (is_sn_invalid(sn_info.sn_code)) + return -2; + + memcpy(pf_dev->sn_code, sn_info.sn_code, SN_CODE_LENGTH); + // 打印数组的全部内容 + printk(KERN_CONT "[zxdh_pf][%s][%d] sn_code: ", __func__, __LINE__); + for (i = 0; i < SN_CODE_LENGTH; ++i) { + printk(KERN_CONT "%02x ", pf_dev->sn_code[i]); + } + printk(KERN_CONT "\n"); + + return 0; +} + +static int32_t dh_pf_slot_id_get(struct zxdh_pf_device *pf_dev) +{ + uint16_t i = 0; + + for (i = 1; i < DPP_PCIE_SLOT_MAX; i++) { + if (is_sn_invalid(dh_slot[i].sn_code)) { + memcpy(dh_slot[i].sn_code, pf_dev->sn_code, + SN_CODE_LENGTH); + pf_dev->slot_id = i; + break; + } + + if (memcmp(pf_dev->sn_code, dh_slot[i].sn_code, + SN_CODE_LENGTH) == 0) { + pf_dev->slot_id = i; + break; + } + } + + if (i == DPP_PCIE_SLOT_MAX) { + return -1; + } + + return 0; +} + +int32_t dh_pf_pcie_id_get(struct dh_core_dev *dh_dev) +{ + int32_t pos = 0; + uint8_t type = 0; + uint16_t padding = 0; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct pci_dev *pdev = dh_dev->pdev; + + if (dh_dev->coredev_type == DH_COREDEV_PF) { + if (zxdh_nic_sn_get(dh_dev)) { + LOG_ERR("zxdh_nic_sn_get failed\n"); + return -1; + } + + if (dh_pf_slot_id_get(pf_dev)) { + LOG_ERR("dh_pf_slot_id_get failed\n"); + return -1; + } + LOG_INFO("slot_id: 0x%x\n", pf_dev->slot_id); + } + for (pos = pci_find_capability(pdev, PCI_CAP_ID_VNDR); pos > 0; + pos = pci_find_next_capability(pdev, pos, PCI_CAP_ID_VNDR)) { + pci_read_config_byte( + pdev, pos + offsetof(struct zxdh_pf_pci_cap, cfg_type), + &type); + + if (type == ZXDH_PCI_CAP_PCI_CFG) { + pci_read_config_word( + pdev, + pos + offsetof(struct zxdh_pf_pci_cap, + padding[0]), + &padding); + pf_dev->pcie_id = padding; + LOG_INFO("pcie_id: 0x%x\n", pf_dev->pcie_id); + return 0; + } + } + + LOG_INFO("the pci_cap that meets the requirements is not matched\n"); + return -1; +} + +static uint64_t pci_size(uint64_t base, uint64_t maxbase, uint64_t mask) +{ + uint64_t size = mask & maxbase; + + if (!size) + return 0; + size = size & ~(size - 1); + if (base == maxbase && ((base | (size - 1)) & mask) != mask) + return 0; + return size; +} + +int32_t zxdh_send_pxe_status_to_riscv(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + zxdh_cfg_np_msg msg = { 0 }; + uint64_t vaddr = 0; + int32_t err = 0; + + if (dh_dev->coredev_type != DH_COREDEV_PF) { + return 0; + } + + msg.dev_id = 0; + msg.type = ZXDH_CFG_NPSDK_TYPE; + msg.operate_mode = ZXDH_STOP_PXE_MODE; + + vaddr = (uint64_t)ZXDH_BAR_MSG_BASE(pf_dev->pci_ioremap_addr[0]); + + err = zxdh_send_command(vaddr, pf_dev->pcie_id, MODULE_NPSDK, &msg, + &msg, true); + if (err != 0) { + LOG_ERR("send pxe status to config np failed: %d\n", err); + } + + return err; +} + +int32_t dh_pf_sriov_cap_cfg_init(struct dh_core_dev *dh_dev) +{ + int32_t pos = 0; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct pci_dev *pdev = dh_dev->pdev; + uint32_t bar_address32 = 0; + uint64_t bar_address64 = 0; + uint64_t bar_size64 = 0; + uint32_t bar_size32 = 0; + uint64_t mask64 = 0; + uint32_t mem_type = 0; + uint16_t nr_virtfn = 0; + + if (dh_dev->coredev_type == DH_COREDEV_VF) { + return 0; + } + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (pos == 0) { + return 0; + } + + pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); + if (nr_virtfn == 0) { + return 0; + } + + pci_read_config_dword(pdev, pos + PCI_SRIOV_BAR, &bar_address32); + pci_write_config_dword(pdev, pos + PCI_SRIOV_BAR, ~0); + pci_read_config_dword(pdev, pos + PCI_SRIOV_BAR, &bar_size32); + pci_write_config_dword(pdev, pos + PCI_SRIOV_BAR, bar_address32); + + bar_size64 = bar_size32 & PCI_BASE_ADDRESS_MEM_MASK; + bar_address64 = bar_address32 & PCI_BASE_ADDRESS_MEM_MASK; + mask64 = (uint32_t)PCI_BASE_ADDRESS_MEM_MASK; + mem_type = bar_address32 & PCI_BASE_ADDRESS_MEM_TYPE_MASK; + + if (mem_type == PCI_BASE_ADDRESS_MEM_TYPE_64) { + pci_read_config_dword(pdev, pos + PCI_SRIOV_BAR + 4, + &bar_address32); + pci_write_config_dword(pdev, pos + PCI_SRIOV_BAR + 4, ~0); + pci_read_config_dword(pdev, pos + PCI_SRIOV_BAR + 4, + &bar_size32); + pci_write_config_dword(pdev, pos + PCI_SRIOV_BAR + 4, + bar_address32); + + bar_size64 |= ((uint64_t)bar_size32 << 32); + bar_address64 |= ((uint64_t)bar_address32 << 32); + mask64 |= ((uint64_t)~0 << 32); + } + + bar_size64 = pci_size(bar_address64, bar_size64, mask64); + if (!bar_size64) { + LOG_ERR("reg 0x%x: invalid BAR (can't size)\n", pos); + } + + if (bar_address64 == 0) { + pf_dev->pf_sriov_cap_base = NULL; + return 0; + } + + pf_dev->pf_sriov_cap_base = + (void __iomem *)ioremap(bar_address64, bar_size64 * nr_virtfn); + if (!pf_dev->pf_sriov_cap_base) { + LOG_ERR("ioremap(0x%llx, 0x%llx) failed\n", bar_address64, + bar_size64 * nr_virtfn); + } + pf_dev->sriov_bar_size = bar_size64; + return 0; +} + +static uint8_t zxdh_pf_fwcap_readb(struct dh_core_dev *dh_dev, uint32_t offset) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + uint64_t vaddr = (uint64_t)ZXDH_BAR_FWCAP(pf_dev->pci_ioremap_addr[0]); + + return readb( + (const volatile void __iomem *)(uintptr_t)(vaddr + offset)); +} + +static bool zxdh_pf_is_ovs(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + uint8_t product = pf_dev->product_type; + + if ((product == ZXDH_PRODUCT_OVS) || (product == ZXDH_PRODUCT_NEO) || + (product == ZXDH_PRODUCT_EVB_EP0) || + (product == ZXDH_PRODUCT_EVB_EP0_EP4)) { + return true; + } + + return false; +} + +static bool zxdh_pf_is_bond_pf_in_ovs(struct dh_core_dev *dh_dev) +{ + if (dh_core_is_pf(dh_dev) && zxdh_pf_is_bond(dh_dev) && + zxdh_pf_is_ovs(dh_dev)) + return true; + + return false; +} + +static int32_t zxdh_pf_lag_init(struct dh_core_dev *dh_dev, int32_t *port_num) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + pf_dev->pannel_port_num = 1; + + /* BOND PF && 非OVS场景,则产生异常 */ + if (zxdh_pf_is_bond(dh_dev) && !zxdh_pf_is_ovs(dh_dev)) { + LOG_ERR("pf is not ovs\n"); + return -1; + } + + if (!zxdh_pf_is_bond_pf_in_ovs(dh_dev)) + goto out; + + pf_dev->pannel_port_num = pf_dev->port_resource.pannel_num; + zxdh_regitster_ldev(dh_dev); + LOG_INFO("zxdh pf lag init finish(port num %d)\n", + pf_dev->pannel_port_num); + +out: + *port_num = pf_dev->pannel_port_num; + return 0; +} + +static void zxdh_pf_lag_exit(struct dh_core_dev *dh_dev) +{ + if (!zxdh_pf_is_bond_pf_in_ovs(dh_dev)) + return; + + zxdh_unregitster_ldev(dh_dev); +} + +int32_t dh_pf_adevs_table_init(struct dh_core_dev *dh_dev, int32_t nr) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + pf_dev->adevs_table = + kzalloc(sizeof(*(pf_dev->adevs_table)) * nr, GFP_KERNEL); + if (!pf_dev->adevs_table) { + pf_dev->adevs_num = 0; + LOG_ERR("pf_dev->adevs_table kzalloc failed\n"); + return -ENOMEM; + } + + pf_dev->adevs_num = nr; + + return 0; +} + +int32_t zxdh_pf_vf_qpairs_uninit(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct zxdh_fw_compat fw_compat = pf_dev->fw_compat; + uint8_t vf_qp_user_max = 0; + uint16_t ep_id = 0; + uint16_t pf_idx = 0; + + if (fw_compat.patch < 1) { + return 0; + } + + if (dh_dev->coredev_type != DH_COREDEV_PF) { + return 0; + } + + ep_id = EPID_GEN_FROM_VPORT(pf_dev->vport); + pf_idx = GLOBAL_PF_IDX(ep_id, pf_dev->vport); + + vf_qp_user_max = ioread8( + (void __iomem *)(uintptr_t)(pf_dev->pci_ioremap_addr[0] + + ZXDH_VF_MAX_QUEUE_USER_OFFSET)); + iowrite8(vf_qp_user_max, + (void __iomem *)(uintptr_t)(pf_dev->pci_ioremap_addr[0] + + ZXDH_VF_QUEUE_USER_OFFSET + + pf_idx)); + + return 0; +} + +void dh_pf_adevs_table_destroy(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + if (pf_dev->adevs_table != NULL) { + kfree(pf_dev->adevs_table); + pf_dev->adevs_table = NULL; + pf_dev->adevs_num = 0; + } +} + +void zxdh_unplug_aux_dev_all(struct dh_core_dev *dh_dev) +{ + int32_t idx; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + for (idx = 0; idx < pf_dev->adevs_num; idx++) { + zxdh_unplug_aux_dev(dh_dev, idx); + } +} + +int32_t dh_pf_fw_compat_check(struct dh_core_dev *dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dev); + struct zxdh_fw_compat *fw_compat = NULL; + + fw_compat = + (struct zxdh_fw_compat *)((void __iomem *)(uintptr_t) + pf_dev->pci_ioremap_addr[0] + + ZXDH_FW_VER_OFFSET); + memcpy(&pf_dev->fw_compat, (uint8_t *)(uintptr_t)fw_compat, + sizeof(struct zxdh_fw_compat)); + + if (ZXDH_MODULE_ID != fw_compat->module_id) { + LOG_INFO( + "The module id %u from fw version is wrong, ignore fw compat check\n", + fw_compat->module_id); + return 0; + } + + if (ZXDH_MAJOR != fw_compat->major) { + LOG_ERR("drv major:%u is not match fw:%u!\n", ZXDH_MAJOR, + fw_compat->major); + return -1; + } + + if (ZXDH_FW_MINOR > fw_compat->fw_minor) { + LOG_ERR("drv fw_minor:%u is higher than fw:%u!\n", + ZXDH_FW_MINOR, fw_compat->fw_minor); + return -1; + } + + if (ZXDH_DRV_MINOR < fw_compat->drv_minor) { + LOG_ERR("drv drv_minor:%u is lower than fw:%u!\n", + ZXDH_DRV_MINOR, fw_compat->drv_minor); + return -1; + } + + LOG_INFO("%s fw_compat.patch = %d", pci_name(dev->pdev), + pf_dev->fw_compat.patch); + if (pf_dev->fw_compat.patch >= DH_NEW_QUEEU_ALLOC_PATCH) { + pf_dev->qtlb_offset = ioread32(( + void __iomem *)(uintptr_t)(pf_dev->pci_ioremap_addr[0] + + ZXDH_VQ_TLB_OFFSET + 4)); + pf_dev->qtlb_offset = + (pf_dev->qtlb_offset << 32) + + ioread32(( + void __iomem + *)(uintptr_t)(pf_dev->pci_ioremap_addr[0] + + ZXDH_VQ_TLB_OFFSET)); + LOG_INFO("qtlb_offset: 0x%llx", pf_dev->qtlb_offset); + //防止读出来的地址值有问题,这里需要尽量做保护 + if ((pf_dev->qtlb_offset + 2 * ZXDH_MAX_QUEUES_NUM) > + pci_resource_len(dev->pdev, 0)) { + LOG_ERR("pf_dev->qtlb_offset out-off rang, (pf_dev->qtlb_offset + 2*ZXDH_MAX_QUEUES_NUM) over BAR0 size: %llx!", + pci_resource_len(dev->pdev, 0)); + + return -1; + } + } + + return 0; +} + +void dh_pf_fwcap_init(struct dh_core_dev *dev) +{ +#define FWCAP_BAR_READ_UNIT (4) + struct zxdh_pf_device *pf_dev = dh_core_priv(dev); + uint32_t idx = 0; + uint32_t group = 0; + + group = sizeof(struct firmware_capability) / FWCAP_BAR_READ_UNIT; + + for (idx = 0; idx < group; idx++) { + *((uint32_t *)&pf_dev->fwcap + idx) = + *((uint32_t *)(uintptr_t)(pf_dev->pci_ioremap_addr[0] + + ZXDH_FW_CAP_OFFSET) + + idx); + } + + pf_dev->board_type = + ioread8((void __iomem *)(uintptr_t)pf_dev->pci_ioremap_addr[0] + + ZXDH_FW_CAP_OFFSET + 1); + pf_dev->product_type = zxdh_pf_fwcap_readb(dev, ZXDH_PRODUCT_TYPE); + LOG_INFO("%s, board_type: %d, product type: %d\n", pci_name(dev->pdev), + pf_dev->board_type, pf_dev->product_type); + + return; +} + +void zxdh_pf_vq_pairs_config(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct zxdh_pf_queue_info *pf_qinfo = NULL; + + pf_qinfo = (struct zxdh_pf_queue_info + *)(uintptr_t)(pf_dev->pci_ioremap_addr[0] + + ZXDH_PF_QUEUE_INFO_OFFSET); + pf_dev->vq_pairs = (pf_qinfo->pf_qp < ZXDH_QUEUE_PAIRS_MAX) ? + pf_qinfo->pf_qp : + ZXDH_QUEUE_PAIRS_MAX; + LOG_DEBUG("setup pf(vport:0x%x) queue pairs to %u\n", pf_dev->vport, + pf_dev->vq_pairs); +} + +void zxdh_pf_vf_vq_pairs_config(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + uint16_t ep_id = 0; + uint16_t vf_idx = 0; + uint8_t *addr = NULL; + uint8_t val = 0; + uint8_t power = 0; + uint8_t vq_pairs = 0; + + /* vport bit[12:14] ep_id */ + ep_id = EPID_GEN_FROM_VPORT(pf_dev->vport); + vf_idx = GLOBAL_VF_IDX(ep_id, pf_dev->vport); + addr = (uint8_t *)(uintptr_t)pf_dev->pci_ioremap_addr[0] + + ZXDH_VF_QUEUE_PAIRS_OFFSET + (vf_idx / 2); + val = ioread8((void __iomem *)addr); + if (vf_idx % 2) { /* 从高4位获取队列的次幂 */ + power = (val & 0xf0) >> 4; + } else { /* 从低4位获取队列的次幂 */ + power = val & 0xf; + } + + vq_pairs = 1 << power; + if (vq_pairs > ZXDH_QUEUE_PAIRS_MAX) { + LOG_ERR("vf(vport:0x%x) get queue pairs:%u exceeds max value, using default:%u\n", + pf_dev->vport, vq_pairs, ZXDH_QUEUE_PAIRS_MAX); + vq_pairs = ZXDH_QUEUE_PAIRS_MAX; + } + pf_dev->vq_pairs = vq_pairs; + LOG_DEBUG("setup vf(vport:0x%x) queue pairs to %u\n", pf_dev->vport, + pf_dev->vq_pairs); +} + +int32_t zxdh_pf_vq_pairs_init(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct zxdh_fw_compat fw_compat = pf_dev->fw_compat; + + if (zxdh_pf_is_special_bond(dh_dev) && + (dh_dev->coredev_type == DH_COREDEV_PF)) { + zxdh_pf_vq_pairs_config(dh_dev); + return 0; + } + + if (zxdh_pf_is_nic(dh_dev) == false) { + return 0; + } + + if (fw_compat.patch < 1) { + pf_dev->vq_pairs = ZXDH_MAX_QPS_NUM; + return 0; + } + + if (dh_dev->coredev_type == DH_COREDEV_PF) + zxdh_pf_vq_pairs_config(dh_dev); + else + zxdh_pf_vf_vq_pairs_config(dh_dev); + + return 0; +} + +bool zxdh_pf_is_panel_port(struct dh_core_dev *dh_dev) +{ + if ((zxdh_pf_get_dev_type(dh_dev) == ZXDH_DEV_UPF) || + (zxdh_pf_get_dev_type(dh_dev) == ZXDH_DEV_NE0) || + (zxdh_pf_get_dev_type(dh_dev) == ZXDH_DEV_NE1)) { + return false; + } + + return true; +} + +static int create_directory(const char *path) +{ + struct path parent_path; + struct dentry *dentry; + int ret; + char *last_slash; + char parent[128]; + char name[128]; + + // 分离出父目录和最后一级目录名 + strscpy(parent, path, sizeof(parent)); + last_slash = strrchr(parent, '/'); + if (!last_slash) { + return -EINVAL; + } + + // 分割路径,获取父目录和最后一级目录 + *last_slash = '\0'; + strscpy(name, last_slash + 1, sizeof(name)); + + // 获取父目录 + ret = kern_path(parent, LOOKUP_FOLLOW, &parent_path); + DH_LOG_DEBUG(MODULE_PF, "check parent path %s, ret is %d\n", parent, + ret); + if (ret) { + return ret; + } + + // 加锁父目录的 inode,防止并发冲突 + inode_lock_nested(parent_path.dentry->d_inode, I_MUTEX_PARENT); + + // 获取最后一级的 dentry + dentry = lookup_one_len(name, parent_path.dentry, zte_strlen_s(name)); + if (IS_ERR(dentry)) { + ret = PTR_ERR(dentry); + path_put(&parent_path); + DH_LOG_DEBUG(MODULE_PF, "lookup_one_len error, ret is %d\n", + ret); + inode_unlock(parent_path.dentry->d_inode); + return ret; + } + + // 创建目录 + ret = vfs_mkdir(&nop_mnt_idmap, d_inode(parent_path.dentry), dentry, + 0755); + dput(dentry); + + // 解锁父目录的 inode + inode_unlock(parent_path.dentry->d_inode); + path_put(&parent_path); + + DH_LOG_DEBUG(MODULE_PF, "mkdir %s, ret is %d\n", path, ret); + return ret; +} + +static int create_directory_recursion(const char *path) +{ + int ret = 0; + char *temp_path = NULL; + char *slash = NULL; + + // 创建临时路径缓冲区 + temp_path = kmalloc(zte_strlen_s(path) + 1, GFP_KERNEL); + if (!temp_path) { + return -ENOMEM; + } + + // 从根路径开始逐级检查和创建 + ret = zte_snprintf_s(temp_path, zte_strlen_s(path) + 1, "%s", path); + if (ret < 0) { + LOG_ERR("zte_snprintf_s %s failed, ret=%d\n", path, ret); + kfree(temp_path); + return ret; + } + slash = temp_path; + + // 循环切分路径 + while ((slash = strchr(slash + 1, '/')) != NULL) { + *slash = '\0'; + + // 检查当前路径是否存在,不存在则创建 + DH_LOG_DEBUG(MODULE_PF, "start create %s\n", temp_path); + ret = create_directory(temp_path); + if (ret && ret != -EEXIST) { + kfree(temp_path); + return ret; + } + + // 恢复路径中的 '/' + *slash = '/'; + } + + // 最后检查并创建完整路径 + ret = create_directory(temp_path); + kfree(temp_path); + return ret; +} + +int32_t zxdh_pf_update_hb_file_val(struct dh_core_dev *dh_dev, + uint64_t spec_sbdf, const char *file_name, + bool flag) +{ + struct file *file = NULL; + int32_t ret = 0; + char dir_path[128]; + char xxx_file_path[128]; + const char *target_content = flag ? "1" : "0"; + loff_t pos = 0; + + zte_snprintf_s(dir_path, sizeof(dir_path), "/etc/dinghai/net/%llx", + spec_sbdf); + zte_snprintf_s(xxx_file_path, sizeof(xxx_file_path), "%s/%s", dir_path, + file_name); + + file = filp_open(xxx_file_path, O_WRONLY | O_TRUNC, 0); + if (IS_ERR(file)) { + ret = PTR_ERR(file); + if (ret == -ENOENT) { + LOG_INFO( + "File %s does not exist, attempting to create it.\n", + xxx_file_path); + } else { + LOG_ERR("Error opening file %s: %d\n", xxx_file_path, + ret); + return ret; + } + + // Create directory if it doesn't exist + ret = create_directory_recursion(dir_path); + if (ret && ret != -EEXIST) { + LOG_ERR("Failed to create directory %s: %d\n", dir_path, + ret); + return ret; + } + + // Reopen file after directory creation + file = filp_open(xxx_file_path, O_WRONLY | O_CREAT | O_TRUNC, + 0666); + if (IS_ERR(file)) { + LOG_ERR("Error creating file %s: %ld\n", xxx_file_path, + PTR_ERR(file)); + return -1; + } + } + + // Write target content to the file + ret = kernel_write(file, target_content, zte_strlen_s(target_content), + &pos); + if (ret < 0) { + LOG_ERR("Failed to write to file %s: %d\n", xxx_file_path, ret); + filp_close(file, NULL); + return ret; + } + + filp_close(file, NULL); + LOG_INFO("Updated content %s to file %s\n", target_content, + xxx_file_path); + return 0; +} + +int32_t zxdh_read_file_val(const char *xxx_file_path) +{ + struct file *file; + ssize_t bytes_read; + loff_t pos = 0; // 文件读取的起始位置 + char buffer[16] = { 0 }; // 初始化 buffer + size_t buffer_size = sizeof(buffer); + int result = -1; // 默认返回值,表示错误 + + // 打开文件 + file = filp_open(xxx_file_path, O_RDONLY, 0); + if (IS_ERR(file)) { + LOG_ERR("open %s failed\n", xxx_file_path); + return -1; + } + + // 读取文件内容到 buffer 中 + bytes_read = kernel_read(file, buffer, buffer_size - 1, &pos); + + if (bytes_read != 1) { + LOG_ERR("read %s failed, bytes_read %zd\n", xxx_file_path, + bytes_read); + goto cleanup; + } + + // 计算返回值 + result = (buffer[0] == '0') ? 0 : 1; + LOG_INFO("%s buffer val: %s\n", xxx_file_path, buffer); + +cleanup: + filp_close(file, NULL); // 关闭文件 + return result; +} + +void zxdh_hardware_bond_files_process(struct dh_core_dev *dh_dev) +{ + char solid_file_path[128]; + char primary_file_path[128]; + struct path solid_path, primary_path; + bool is_primary_port; + int32_t ret = 0; + uint16_t pf_id = 0; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + zxdh_pf_get_rp_sbdf(dh_dev); + pf_id = (pf_dev->pcie_id >> 8) & 0x7; + pf_dev->spec_sbdf = ((pf_dev->rp_sbdf) << 8) | (pf_id); + LOG_INFO("spec_sbdf: %#llx, pf_id: %d\n", pf_dev->spec_sbdf, pf_id); + + /* do nothing if vf */ + if (zxdh_pf_get_coredev_type(dh_dev) == DH_COREDEV_VF || + zxdh_pf_is_special_bond(dh_dev)) { + pf_dev->is_hwbond = false; + pf_dev->is_rdma_aux_plug = true; + pf_dev->is_primary_port = true; + return; + } + + // 构建文件路径 + zte_snprintf_s(solid_file_path, sizeof(solid_file_path), + "/etc/dinghai/net/%llx/solid", pf_dev->spec_sbdf); + zte_snprintf_s(primary_file_path, sizeof(primary_file_path), + "/etc/dinghai/net/%llx/primary", pf_dev->spec_sbdf); + + // 检查 solid、primary 文件是否都存在 + if (kern_path(solid_file_path, LOOKUP_FOLLOW, &solid_path) || + kern_path(primary_file_path, LOOKUP_FOLLOW, &primary_path)) { + goto no_solid; // 如果有任何一个文件不存在,跳转至 no_solid + } + LOG_INFO("solid and primary file exist\n"); + + // 读取 solid 文件内容 + ret = zxdh_read_file_val(solid_file_path); + if (ret != 1) { + LOG_INFO("solid config is off\n"); + goto no_solid; + } + pf_dev->is_hwbond = true; + + // 读取 primary 文件 + ret = zxdh_read_file_val(primary_file_path); + if (ret == -1) { + LOG_INFO("primary config is off\n"); + goto no_solid; + } + // 判断 primary 文件内容 + is_primary_port = (ret == 0) ? false : true; + + // 根据 is_primary_port 的值执行相应操作 + if (is_primary_port) { + pf_dev->is_primary_port = true; + pf_dev->is_rdma_aux_plug = true; + } else { + pf_dev->is_primary_port = false; + pf_dev->is_rdma_aux_plug = false; + } + LOG_INFO("is_hwbond %d, is_primary_port %d, is_rdma_aux_plug %d\n", + pf_dev->is_hwbond, pf_dev->is_primary_port, + pf_dev->is_rdma_aux_plug); + zxdh_pf_optim_hardware_bond_time(dh_dev, true); + goto out; + +no_solid: + // 跳转到这里 + pf_dev->is_hwbond = false; + pf_dev->is_rdma_aux_plug = true; + pf_dev->is_primary_port = true; + zxdh_pf_optim_hardware_bond_time(dh_dev, false); + zxdh_pf_update_hb_file_val(dh_dev, pf_dev->spec_sbdf, "solid", + pf_dev->is_hwbond); + zxdh_pf_update_hb_file_val(dh_dev, pf_dev->spec_sbdf, "primary", + pf_dev->is_primary_port); + LOG_INFO( + "Reached no_solid. Create/Update config file. Exiting function\n"); +out: + return; +} + +static int32_t dh_pf_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct dh_core_dev *dh_dev = NULL; + struct zxdh_pf_device *pf_dev = NULL; + struct devlink *devlink = NULL; + int32_t ret = 0; + int32_t idx = 0; + int32_t port_num = 0; + + LOG_INFO("pf level start\n"); +#ifdef ZTE_SAFE_FUNC_TEST + recording_not_safe_func(); +#endif + + if ((GET_COREDEV_TYPE(pdev) != DH_COREDEV_PF) && (probe_vf == 0)) { + LOG_INFO("probe_vf is N, VF is not allowed to probe\n"); + return -1; + } + + devlink = zxdh_devlink_alloc(&pdev->dev, &dh_pf_devlink_ops, + sizeof(struct zxdh_pf_device)); + if (devlink == NULL) { + LOG_ERR("devlink alloc failed\n"); + return -ENOMEM; + } + + dh_dev = devlink_priv(devlink); + dh_dev->device = &pdev->dev; + dh_dev->pdev = pdev; + dh_dev->devlink_ops = &dh_pf_core_devlink_ops; + + pf_dev = dh_core_priv(dh_dev); + pf_dev->bar_chan_valid = false; + pf_dev->vepa = false; + pf_dev->plcr_table.is_init = false; + mutex_init(&dh_dev->lock); + + dh_dev->coredev_type = GET_COREDEV_TYPE(pdev); + LOG_DEBUG("%s device: %s\n", + (dh_dev->coredev_type == DH_COREDEV_PF) ? "PF" : "VF", + pci_name(pdev)); + + ret = dh_pf_pci_init(dh_dev); + if (ret != 0) { + LOG_ERR("dh_pf_pci_init failed: %d\n", ret); + goto err_irq_table_init; + } + + ret = zxdh_pf_modern_cfg_init(dh_dev); + if (ret != 0) { + LOG_ERR("zxdh_pf_modern_cfg_init failed: %d\n", ret); + goto err_cfg_init; + } + + ret = zxdh_pf_wait_bar_ok(dh_dev); + if (ret != 0) { + LOG_ERR("%s wait_bar_ok time out\n", pci_name(dh_dev->pdev)); + goto err_pci; + } + + ret = dh_pf_fw_compat_check(dh_dev); + if (ret != 0) { + LOG_ERR("The driver version and firmware version are incompatible\n"); + goto err_pci; + } + + dh_pf_fwcap_init(dh_dev); + + ret = dh_pf_wait_riscv_ready(dh_dev); + if (ret != 0) { + LOG_ERR("%s wait_riscv_ready time out\n", + pci_name(dh_dev->pdev)); + goto err_pci; + } + + ret = dh_pf_pcie_id_get(dh_dev); + if (ret != 0) { + LOG_ERR("dh_pf_pcie_id_get failed: %d\n", ret); + goto err_pci; + } + + ret = dh_pf_vf_item_create(dh_dev); + if (ret != 0) { + LOG_ERR("Failed to alloc vf item\n"); + goto err_cfg_init; + } + + ret = dh_pf_irq_table_init(dh_dev); + if (ret != 0) { + LOG_ERR("Failed to alloc IRQs\n"); + goto err_vf_item; + } + + ret = dh_pf_eq_table_init(dh_dev); + if (ret != 0) { + LOG_ERR("Failed to alloc IRQs\n"); + goto err_eq_table_init; + } + + ret = dh_pf_events_init(dh_dev); + if (ret != 0) { + LOG_ERR("failed to initialize events\n"); + goto err_events_init; + } + + ret = dh_pf_irq_table_create(dh_dev); + if (ret != 0) { + LOG_ERR("Failed to alloc IRQs\n"); + goto err_irq_table_create; + } + + ret = dh_pf_eq_table_create(dh_dev); + if (ret != 0) { + LOG_ERR("Failed to alloc EQs\n"); + goto err_eq_table_create; + } + + ret = dh_pf_sriov_cap_cfg_init(dh_dev); + if (ret != 0) { + LOG_ERR("dh_pf_sriov_cap_cfg_init failed: %d\n", ret); + goto err_sriov_cap_init; + } + + ret = zxdh_send_pxe_status_to_riscv(dh_dev); + if (ret != 0) { + LOG_ERR("zxdh_send_pxe_status_to_riscv failed: %d\n", ret); + goto err_send_pxe_status; + } + + zxdh_devlink_register(devlink); + + ret = zxdh_vf_compat_check(dh_dev); + if (ret != 0) { + LOG_ERR("zxdh_vf_check_compat failed: %d\n", ret); + goto err_vf_compat; + } + + ret = zxdh_pf_dpp_init(dh_dev, true); + if (ret != 0) { + LOG_ERR("zxdh_pf_dpp_init failed: %d\n", ret); + goto err_dpp_init; + } + + ret = zxdh_pf_query_fwinfo(dh_dev); + if (ret != 0) { + LOG_ERR("zxdh_pf_query_fwinfo failed: %d\n", ret); + goto err_query_fwinfo; + } + + ret = zxdh_pf_vq_pairs_init(dh_dev); + if (ret != 0) { + LOG_ERR("zxdh_pf_vq_pairs_init failed: %d\n", ret); + goto err_query_fwinfo; + } + + ret = zxdh_pf_lag_init(dh_dev, &port_num); + if (ret != 0) { + LOG_ERR("zxdh_pf_lag_init failed: %d\n", ret); + goto err_query_fwinfo; + } + +#ifdef PTP_DRIVER_INTERFACE_EN + if (dh_dev->coredev_type == DH_COREDEV_PF) { + ret = zxdh_ptp_init(dh_dev); + if (ret != 0) { + LOG_ERR("zxdh_ptp_init failed: %d\n", ret); + goto err_ptp_init; + } + } +#endif + +#ifdef CONFIG_DINGHAI_TSN + if (zxdh_pf_is_panel_port(dh_dev)) { + ret = zxdh_tsn_init(dh_dev); + if (ret != 0) { + LOG_ERR("zxdh_tsn_init failed: %d\n", ret); + goto err_tsn_init; + } + } +#endif + +#ifdef ZXDH_SRIOV_SYSFS_EN + ret = zxdh_sriov_sysfs_init(dh_dev); + if (ret != 0) { + LOG_ERR("zxdh_sriov_sysfs_init failed: %d, vport = %x\n", ret, + pf_dev->vport); + goto err_sriov_sysfs; + } +#endif + + ret = zxdh_init_ip6mac_tbl(dh_dev); + if (ret != 0) { + LOG_ERR("zxdh_init_ip6mac_tbl failed: %d, vport = %x\n", ret, + pf_dev->vport); + goto err_init_ip6mac_tbl; + } + + ret = zxdh_health_init(dh_dev); + if (ret != 0) { + LOG_ERR("zxdh_health_init failed: %d\n", ret); + goto err_health_init; + } + + ret = dh_pf_adevs_table_init(dh_dev, port_num); + if (ret != 0) { + LOG_ERR("dh_pf_adevs_table_init failed: %d\n", ret); + goto err_adevs_tbl_init; + } + + if (!zxdh_pf_is_bond(dh_dev) && zxdh_pf_is_panel_port(dh_dev)) { + zxdh_hardware_bond_files_process(dh_dev); + } + + for (idx = 0; idx < port_num; idx++) { + zxdh_plug_aux_dev(dh_dev, idx); + } + + LOG_INFO("pf level completed\n"); + + return 0; + +err_adevs_tbl_init: + zxdh_drain_health_wq(dh_dev); + zxdh_health_cleanup(dh_dev); +err_health_init: + zxdh_cleanup_ip6mac_tbl(dh_dev); +err_init_ip6mac_tbl: +#ifdef ZXDH_SRIOV_SYSFS_EN + zxdh_sriov_sysfs_exit(dh_dev); +err_sriov_sysfs: +#endif +#ifdef CONFIG_DINGHAI_TSN + if (zxdh_pf_is_panel_port(dh_dev)) { + zxdh_tsn_exit(dh_dev); + } +err_tsn_init: +#endif +#ifdef PTP_DRIVER_INTERFACE_EN + if (dh_dev->coredev_type == DH_COREDEV_PF) + zxdh_ptp_stop(dh_dev); +err_ptp_init: +#endif + zxdh_pf_lag_exit(dh_dev); +err_query_fwinfo: + zxdh_pf_dpp_uninit(dh_dev); +err_dpp_init: +err_vf_compat: + zxdh_devlink_unregister(devlink); +err_send_pxe_status: + dh_pf_sriov_cap_cfg_uninit(dh_dev); +err_sriov_cap_init: + dh_pf_eq_table_destroy(dh_dev); +err_eq_table_create: + dh_pf_irq_table_destroy(dh_dev); +err_irq_table_create: + dh_pf_events_uninit(dh_dev); +err_events_init: + dh_eq_table_cleanup(dh_dev); +err_eq_table_init: + dh_irq_table_cleanup(dh_dev); +err_vf_item: + dh_pf_vf_item_destroy(dh_dev, true); +err_pci: + zxdh_pf_modern_cfg_uninit(dh_dev); +err_cfg_init: + dh_pf_pci_close(dh_dev); +err_irq_table_init: + mutex_destroy(&dh_dev->lock); + zxdh_devlink_free(devlink); + pf_dev = NULL; + return -EPERM; +} + +int32_t zxdh_pf_vf_qpairs_init(struct dh_core_dev *dev, int32_t num_vfs); +int zxdh_load_one(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct zxdh_core_health *health = &pf_dev->health; + int ret = 0; + + mutex_lock(&dh_dev->lock); + if (dh_dev->device_state == ZXDH_DEVICE_STATE_UP) + goto unlock; + + if (dh_dev->driver_process == ZXDH_REMOVE) { + ret = -1; + goto unlock; + } + + if (dh_dev->coredev_type == DH_COREDEV_VF) { + ret = zxdh_vf_wait_pf_ok(dh_dev); + if (ret != 0) { + HEAL_ERR("%s zxdh_vf_wait_pf_ok failed: %d\n", + pci_name(dh_dev->pdev), ret); + goto unlock; + } + } else if (dh_dev->coredev_type == DH_COREDEV_PF) { + if (pf_dev->num_vfs > 0) { + ret = zxdh_pf_vf_qpairs_init( + dh_dev, (int32_t)(pf_dev->num_vfs)); + if (ret != 0) { + HEAL_ERR("Failed to recover vf queue pairs\n"); + goto unlock; + } + } + } + + ret = dh_pf_irq_table_create(dh_dev); + if (ret != 0) { + HEAL_ERR("%s Failed to alloc IRQs\n", pci_name(dh_dev->pdev)); + goto unlock; + } + + ret = dh_pf_eq_table_create(dh_dev); + if (ret != 0) { + HEAL_ERR("%s Failed to alloc EQs\n", pci_name(dh_dev->pdev)); + goto irq_table_destroy; + } + + ret = zxdh_pf_dpp_reset(dh_dev); + if (ret != 0) { + HEAL_ERR("%s zxdh_pf_dpp_reset failed: %d\n", + pci_name(dh_dev->pdev), ret); + goto eq_table_destroy; + } + + ret = zxdh_pf_dpp_init(dh_dev, false); + if (ret != 0) { + HEAL_ERR("%s zxdh_pf_dpp_init failed: %d\n", + pci_name(dh_dev->pdev), ret); + goto eq_table_destroy; + } + + ret = zxdh_pf_call_aux_events(dh_dev, DH_EVENT_TYPE_AUX_LOAD); + if (ret != 0) { + HEAL_ERR("%s DH_EVENT_TYPE_AUX_LOAD failed: %d\n", + pci_name(dh_dev->pdev), ret); + goto dpp_uninit; + } + + pf_dev->fast_unload = false; + dh_dev->device_state = ZXDH_DEVICE_STATE_UP; + health->recovery_cnt++; + HEAL_INFO("%s zxdh_load_one success\n", pci_name(dh_dev->pdev)); + mutex_unlock(&dh_dev->lock); + return 0; + +dpp_uninit: + zxdh_pf_dpp_uninit(dh_dev); +eq_table_destroy: + dh_pf_eq_table_destroy(dh_dev); +irq_table_destroy: + dh_pf_irq_table_destroy(dh_dev); +unlock: + mutex_unlock(&dh_dev->lock); + return ret; +} + +static void zxdh_reset_all_vf_item(struct dh_core_dev *dh_dev) +{ + struct zxdh_vf_item *vf_item = NULL; + uint16_t num_vfs = 0; + uint16_t vf_idx = 0; + + if (dh_dev->coredev_type == DH_COREDEV_VF) + return; + + num_vfs = pci_num_vf(dh_dev->pdev); + for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) { + vf_item = zxdh_pf_get_vf_item(dh_dev, vf_idx); + vf_item->is_probed = false; + } +} + +void zxdh_unload_one(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + HEAL_INFO("%s zxdh_unload_one start\n", pci_name(dh_dev->pdev)); + mutex_lock(&dh_dev->lock); + if (dh_dev->driver_process == ZXDH_REMOVE) { + mutex_unlock(&dh_dev->lock); + return; + } + pf_dev->fast_unload = true; + pf_dev->aux_comp_flag = 0; + zxdh_pf_call_aux_events(dh_dev, DH_EVENT_TYPE_AUX_UNLOAD); + dh_pf_eq_table_destroy(dh_dev); + dh_pf_irq_table_destroy(dh_dev); + zxdh_reset_all_vf_item(dh_dev); + mutex_unlock(&dh_dev->lock); +} + +static void dh_pf_remove(struct pci_dev *pdev) +{ + struct dh_core_dev *dh_dev = pci_get_drvdata(pdev); + struct devlink *devlink = priv_to_devlink(dh_dev); + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + if (pf_dev == NULL) { + return; + } + LOG_INFO("pf level start\n"); + if (!zxdh_pf_check_remove_state(dh_dev)) { + pf_dev->quick_remove = true; + LOG_INFO("%s: quick_remove start\n", pci_name(pdev)); + } + + mutex_lock(&dh_dev->lock); + dh_dev->driver_process = ZXDH_REMOVE; + mutex_unlock(&dh_dev->lock); + zxdh_unplug_aux_dev_all(dh_dev); + dh_pf_adevs_table_destroy(dh_dev); + + zxdh_drain_health_wq(dh_dev); + zxdh_health_cleanup(dh_dev); + + zxdh_cleanup_ip6mac_tbl(dh_dev); +#ifdef ZXDH_SRIOV_SYSFS_EN + zxdh_sriov_sysfs_exit(dh_dev); +#endif +#ifdef CONFIG_DINGHAI_TSN + if (zxdh_pf_is_panel_port(dh_dev)) { + zxdh_tsn_exit(dh_dev); + } +#endif +#ifdef PTP_DRIVER_INTERFACE_EN + if (dh_dev->coredev_type == DH_COREDEV_PF) { + zxdh_ptp_stop(dh_dev); + } +#endif + zxdh_pf_vf_qpairs_uninit(dh_dev); + + zxdh_pf_lag_exit(dh_dev); + zxdh_pf_dpp_uninit(dh_dev); + + zxdh_devlink_unregister(devlink); + dh_pf_sriov_cap_cfg_uninit(dh_dev); + if (!pf_dev->fast_unload) { + dh_pf_eq_table_destroy(dh_dev); + dh_pf_irq_table_destroy(dh_dev); + } + dh_pf_events_uninit(dh_dev); + dh_eq_table_cleanup(dh_dev); + dh_irq_table_cleanup(dh_dev); + dh_pf_vf_item_destroy(dh_dev, true); + zxdh_pf_modern_cfg_uninit(dh_dev); + dh_pf_pci_close(dh_dev); + mutex_destroy(&dh_dev->lock); + zxdh_devlink_free(devlink); + + pci_set_drvdata(pdev, NULL); + LOG_INFO("pf level completed\n"); + + return; +} + +static int32_t dh_pf_suspend(struct pci_dev *pdev, pm_message_t state) +{ + return 0; +} + +static int32_t dh_pf_resume(struct pci_dev *pdev) +{ + return 0; +} + +static void dh_pf_shutdown(struct pci_dev *pdev) +{ + struct dh_core_dev *dh_dev = pci_get_drvdata(pdev); + struct devlink *devlink = priv_to_devlink(dh_dev); + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + LOG_INFO("pf level start\n"); + mutex_lock(&dh_dev->lock); + dh_dev->driver_process = ZXDH_REMOVE; + mutex_unlock(&dh_dev->lock); + + dh_pf_adevs_table_destroy(dh_dev); + + zxdh_drain_health_wq(dh_dev); + zxdh_health_cleanup(dh_dev); + + zxdh_cleanup_ip6mac_tbl(dh_dev); +#ifdef ZXDH_SRIOV_SYSFS_EN + zxdh_sriov_sysfs_exit(dh_dev); +#endif +#ifdef CONFIG_DINGHAI_TSN + if (!zxdh_pf_is_upf(dh_dev)) { + zxdh_tsn_exit(dh_dev); + } +#endif +#ifdef PTP_DRIVER_INTERFACE_EN + if (dh_dev->coredev_type == DH_COREDEV_PF) { + zxdh_ptp_stop(dh_dev); + } +#endif + + zxdh_pf_lag_exit(dh_dev); + if (!pf_dev->fast_unload) + zxdh_pf_dpp_uninit(dh_dev); + + zxdh_devlink_unregister(devlink); + dh_pf_sriov_cap_cfg_uninit(dh_dev); + if (!pf_dev->fast_unload) { + dh_pf_eq_table_destroy(dh_dev); + dh_pf_irq_table_destroy(dh_dev); + } + dh_pf_events_uninit(dh_dev); + dh_eq_table_cleanup(dh_dev); + dh_irq_table_cleanup(dh_dev); + dh_pf_vf_item_destroy(dh_dev, false); + zxdh_pf_modern_cfg_uninit(dh_dev); + + dh_pf_pci_close(dh_dev); + mutex_destroy(&dh_dev->lock); + zxdh_devlink_free(devlink); + + pci_set_drvdata(pdev, NULL); + LOG_INFO("pf level completed\n"); +} + +static pci_ers_result_t dh_pci_err_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + // LOG_INFO("PCI error detected\n"); + + return state == pci_channel_io_perm_failure ? + PCI_ERS_RESULT_DISCONNECT : + PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t dh_pf_pci_slot_reset(struct pci_dev *pdev) +{ + // LOG_INFO("start PCI slot reset\n"); + return PCI_ERS_RESULT_RECOVERED; +} + +static void dh_pf_pci_resume(struct pci_dev *pdev) +{ + // LOG_INFO("start PCI resume\n"); +} + +int32_t zxdh_user_vf_qpairs_update(struct dh_core_dev *dev, uint8_t vf_qp, + uint16_t pf_idx) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dev); + + iowrite8(vf_qp, + (void __iomem *)(uintptr_t)(pf_dev->pci_ioremap_addr[0] + + ZXDH_VF_QUEUE_USER_OFFSET + + pf_idx)); + return 0; +} + +int32_t zxdh_pf_vf_qpairs_update(struct dh_core_dev *dev, uint8_t vf_qp, + int32_t num_vfs) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dev); + uint8_t power = 0; + int32_t ret = 0; + uint16_t vport = 0; + uint16_t vf_idx = 0; + uint16_t ep_id = 0; + int32_t i = 0; + uint8_t *addr = NULL; + uint8_t val = 0; + + if (vf_qp == 0) + return -1; + + /* 求不超过vf_qp的最大2次幂数的指数 */ + while ((1U << power) <= vf_qp) { + power++; + } + power--; + vf_qp = 1 << power; + LOG_DEBUG("pf(vport:0x%x) setup vf queue pairs:%u, power:%u\n", + pf_dev->vport, vf_qp, power); + + ret = dh_pf_vf_vport_get(dev, 0, &vport); + if (ret != 0) { + LOG_ERR("Failed to pf(vport:0x%x) get vf0 vport\n", + pf_dev->vport); + return ret; + } + + /* vport bit[12:14] ep_id */ + ep_id = EPID_GEN_FROM_VPORT(pf_dev->vport); + if (ep_id >= ZXDH_EP_NUM) { + LOG_ERR("vf vport is err, ep_id:%u\n", ep_id); + return -1; + } + + for (i = 0; i < num_vfs; i++) { + vf_idx = GLOBAL_VF_IDX(ep_id, vport) + i; + addr = (uint8_t *)(uintptr_t)pf_dev->pci_ioremap_addr[0] + + ZXDH_VF_QUEUE_PAIRS_OFFSET + (vf_idx / 2); + val = ioread8((void __iomem *)addr); + if (vf_idx % 2) { /* 将队列的次幂更新到高4位 */ + val = (val & 0xf) | (power << 4); + } else { /* 将队列的次幂更新到低4位 */ + val = (val & 0xf0) | power; + } + iowrite8(val, (void __iomem *)addr); + } + + return 0; +} + +int32_t zxdh_pf_vf_qpairs_init(struct dh_core_dev *dev, int32_t num_vfs) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dev); + struct zxdh_dev_queue_info *dev_qinfo = NULL; + struct zxdh_pf_queue_info *pf_qinfo = NULL; + struct zxdh_fw_compat fw_compat = pf_dev->fw_compat; + uint16_t ep_id = 0; + uint16_t pf_idx = 0; + uint16_t vf_qp_flx = 0; + uint8_t vf_qp_user_max = 0; + uint8_t vf_qp = 0; + int32_t ret = 0; + + if ((!zxdh_pf_is_nic(dev)) || (fw_compat.patch < 1)) + return 0; + + /* 更新内核态vf分配队列数信息 */ + ep_id = EPID_GEN_FROM_VPORT(pf_dev->vport); + pf_idx = GLOBAL_PF_IDX(ep_id, pf_dev->vport); + + dev_qinfo = + (struct zxdh_dev_queue_info + *)(uintptr_t)(pf_dev->pci_ioremap_addr[0] + + ZXDH_DEV_QUEUE_INFO_OFFSET + pf_idx * 4); + pf_qinfo = (struct zxdh_pf_queue_info + *)(uintptr_t)(pf_dev->pci_ioremap_addr[0] + + ZXDH_PF_QUEUE_INFO_OFFSET); + LOG_DEBUG( + "pf(vport:0x%x) get queue config: total_qp:%u, start_qp_id:%u, pf_qp:%u, vf_qp:%u\n", + pf_dev->vport, dev_qinfo->total_qp, dev_qinfo->start_id, + pf_qinfo->pf_qp, pf_qinfo->vf_qp); + +#ifdef ZXDH_MSGQ + vf_qp_flx = (dev_qinfo->total_qp - pf_dev->vq_pairs - 1) / num_vfs; +#else + vf_qp_flx = (dev_qinfo->total_qp - pf_dev->vq_pairs) / num_vfs; +#endif + + vf_qp = (pf_qinfo->vf_qp < vf_qp_flx) ? pf_qinfo->vf_qp : vf_qp_flx; + ret = zxdh_pf_vf_qpairs_update(dev, vf_qp, num_vfs); + if (ret != 0) { + LOG_DEBUG( + "Failed to pf(vport:0x%x) setup vf queue pairs to %u\n", + pf_dev->vport, vf_qp); + return ret; + } + + /* 更新用户态vf分配队列数信息 */ + vf_qp_user_max = ioread8( + (void __iomem *)(uintptr_t)(pf_dev->pci_ioremap_addr[0] + + ZXDH_VF_MAX_QUEUE_USER_OFFSET)); + vf_qp = (vf_qp_user_max < vf_qp_flx) ? vf_qp_user_max : vf_qp_flx; + + zxdh_user_vf_qpairs_update(dev, vf_qp, pf_idx); + + return 0; +} + +int32_t dh_pf_sriov_configure(struct pci_dev *pdev, int32_t num_vfs) +{ + struct dh_core_dev *dev = pci_get_drvdata(pdev); + struct zxdh_pf_device *pf_dev = dh_core_priv(dev); + struct zxdh_rdma_sriov_event_info rdma_sriov_info = { 0 }; + + if (dev->coredev_type != DH_COREDEV_PF) { + LOG_ERR("This device is not capable of SR-IOV\n"); + return -EOPNOTSUPP; + } + + if (!pf_dev->pf_sriov_cap_base) { + LOG_ERR("sriov not enable\n"); + return -EOPNOTSUPP; + } + + if (num_vfs > 0) { + if (zxdh_pf_vf_qpairs_init(dev, num_vfs) != 0) { + LOG_ERR("Failed to init vf queue pairs\n"); + return -1; + } + + if (zxdh_pf_is_rdma_enable(dev)) { + rdma_sriov_info.pdev = pdev; + rdma_sriov_info.bar0_virt_addr = + pf_dev->pci_ioremap_addr[0]; + rdma_sriov_info.vport_id = pf_dev->vport; + rdma_sriov_info.num_vfs = num_vfs; + zxdh_rdma_events_call(NULL, ZXDH_RDMA_SRIOV_EVENT, + &rdma_sriov_info); + } + + if (dh_pf_sriov_enable(pdev, num_vfs) != 0) { + LOG_ERR("Failed to enable sriov, num_vfs:%d\n", + num_vfs); + return -1; + } + } else { + dh_pf_sriov_disable(pdev); + } + + if (zxdh_pf_pcie_config_store(dev)) + LOG_ERR("zxdh_pf_pcie_config_store failed\n"); + + pf_dev->num_vfs = (uint16_t)num_vfs; + + return num_vfs; +} + +static const struct pci_error_handlers dh_pf_err_handler = { + .error_detected = dh_pci_err_detected, + .slot_reset = dh_pf_pci_slot_reset, + .resume = dh_pf_pci_resume +}; + +static struct pci_driver dh_pf_driver = { + .name = KBUILD_MODNAME, + .id_table = dh_pf_pci_table, + .probe = dh_pf_probe, + .remove = dh_pf_remove, + .suspend = dh_pf_suspend, + .resume = dh_pf_resume, + .shutdown = dh_pf_shutdown, + .err_handler = &dh_pf_err_handler, + .sriov_configure = dh_pf_sriov_configure, +}; + +static int32_t __init dh_pf_pci_init_module(void) +{ + int32_t ret = 0; + + LOG_INFO("%s - version %s %s\n", zxdh_pf_driver_string, + zxdh_pf_driver_version, zxdh_pf_copyright); + +#ifdef NEED_XARRAY + dh_radix_tree_init(); +#endif + + ret = pci_register_driver(&dh_pf_driver); + if (ret != 0) { + LOG_ERR("pci_register_driver failed: %d\n", ret); + goto err_register_driver; + } + + ret = dh_pf_msg_recv_func_register(); + if (ret != 0) { + LOG_ERR("dh_pf_msg_recv_func_register failed: %d\n", ret); + goto err_msg_recv_func_registe; + } + +#ifdef CONFIG_ZXDH_SF + ret = zxdh_en_sf_driver_register(); + if (ret != 0) { + LOG_ERR("zxdh_en_sf_driver_register failed: %d\n", ret); + goto err_sf_driver_register; + } +#endif + + return 0; + +#ifdef CONFIG_ZXDH_SF +err_sf_driver_register: + dh_pf_msg_recv_func_unregister(); +#endif +err_msg_recv_func_registe: + pci_unregister_driver(&dh_pf_driver); +err_register_driver: + return ret; +} + +static void dh_pf_pci_exit_module(void) +{ + LOG_INFO("%s - version %s %s\n", zxdh_pf_driver_string, + zxdh_pf_driver_version, zxdh_pf_copyright); + +#ifdef CONFIG_ZXDH_SF + zxdh_en_sf_driver_unregister(); +#endif + + dh_pf_msg_recv_func_unregister(); + + pci_unregister_driver(&dh_pf_driver); + + return; +} + +module_init(dh_pf_pci_init_module); +module_exit(dh_pf_pci_exit_module); diff --git a/drivers/net/ethernet/dinghai/en_pf.h b/drivers/net/ethernet/dinghai/en_pf.h new file mode 100644 index 000000000000..06dba81e61cb --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_pf.h @@ -0,0 +1,362 @@ +#ifndef __ZXDH_EN_PF_H__ +#define __ZXDH_EN_PF_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include "plcr.h" +#ifdef CONFIG_DINGHAI_TSN +#include "en_tsn/zxdh_tsn.h" +#endif + +/* Common configuration */ +#define ZXDH_PCI_CAP_COMMON_CFG 1 +/* Notifications */ +#define ZXDH_PCI_CAP_NOTIFY_CFG 2 +/* ISR access */ +#define ZXDH_PCI_CAP_ISR_CFG 3 +/* Device specific configuration */ +#define ZXDH_PCI_CAP_DEVICE_CFG 4 +/* PCI configuration access */ +#define ZXDH_PCI_CAP_PCI_CFG 5 + +#define ZXDH_PF_MAX_BAR_VAL 0x5 +#define ZXDH_PF_ALIGN4 4 +#define ZXDH_PF_ALIGN2 2 +#define ZXDH_PF_MAP_MINLEN2 2 + +#define ZXDH_DEV_MAC_HIGH_OFFSET 4 +#define ZXDH_DEV_SPEED_OFFSET 0x4c +#define ZXDH_DEV_DUPLEX_OFFSET 0x50 +#define ZXDH_FW_VER_OFFSET 0x5400 +#define ZXDH_QUEUE_INFO_OFFSET 0x5480 +#define ZXDH_FW_CAP_OFFSET 0x1000 + +#define ZXDH_FWSHARE_BASE_ADDR 0x5000 +#define ZXDH_DEV_QUEUE_INFO_OFFSET (ZXDH_FWSHARE_BASE_ADDR + 0x6a0) +#define ZXDH_PF_QUEUE_INFO_OFFSET (ZXDH_FWSHARE_BASE_ADDR + 0x740) +#define ZXDH_VF_QUEUE_PAIRS_OFFSET (ZXDH_FWSHARE_BASE_ADDR + 0x490) +#define ZXDH_VF_QUEUE_USER_OFFSET \ + (ZXDH_FWSHARE_BASE_ADDR + 0x744) //内核态写,vf加载用户态程序时读 +#define ZXDH_VF_MAX_QUEUE_USER_OFFSET \ + (ZXDH_FWSHARE_BASE_ADDR + \ + 0x743) //固件写:vf加载用户态程序时最大支持的队列对数 +#define ZXDH_OVS_PF_VFID_OFFSET \ + (ZXDH_FWSHARE_BASE_ADDR + 0x77C) //I511 OVS_PF的VFID + +#define ZXDH_NP_EXT_STATS_OFFSET (ZXDH_FWSHARE_BASE_ADDR + 0xA00) +#define ZXDH_NP_EXT_STATS_SIZE (512) + +#define EPID_MASK_BIT (12) +#define PFID_MASK_BIT (8) +#define EPID_GEN_FROM_VPORT(a) (((a) & ~BIT(15)) >> EPID_MASK_BIT) +#define GLOBAL_PF_IDX(a, b) \ + ((a)*ZXDH_PF_NUM_PER_EP + (((b)&ZXDH_PF_IDX_MASK) >> PFID_MASK_BIT)) +#define GLOBAL_VF_IDX(a, b) ((a)*ZXDH_VF_NUM_MAX + ((b)&ZXDH_VF_IDX_MASK)) +#define ZXDH_MAX_QPS_NUM (8) + +#define ZXDH_CFG_NPSDK_TYPE 7 +#define ZXDH_STOP_PXE_MODE 1 + +#define ZXDH_PANNEL_PORT_MAX (10) + +#define TO_EP4_ADDR(addr) (((addr & 0xFFFFFFFFFFFF0000) << 4) | (addr & 0xFFFF)) + +#define ZXDH_EP_NUM 4 +#define ZXDH_QUEUE_PAIRS_MAX 32 +#define ZXDH_VF_IDX_MASK 0xff +#define ZXDH_PF_IDX_MASK 0x700 + +#define GET_COREDEV_TYPE(pdev) \ + ((((pdev->device == ZXDH_VF_DEVICE_ID) || \ + (pdev->device == ZXDH_VF_E310_DEVICE_ID) || \ + (pdev->device == ZXDH_VF_E312_DEVICE_ID) || \ + (pdev->device == ZXDH_UPF_VF_I512_DEVICE_ID) || \ + (pdev->device == ZXDH_INICA_RDMA_VF_DEVICE_ID) || \ + (pdev->device == ZXDH_VF_DPUB_RDMA_DEVICE_ID) || \ + (pdev->device == ZXDH_VF_E316_DEVICE_ID) || \ + (pdev->device == ZXDH_VF_E316_XPU_DEVICE_ID) || \ + (pdev->device == ZXDH_VF_E311_DEVICE_ID) || \ + (pdev->device == ZXDH_VF_I511_DEVICE_ID) || \ + (pdev->device == ZXDH_INICD_NE0_VF_DEVICE_ID) || \ + (pdev->device == ZXDH_INICD_NE1_VF_DEVICE_ID) || \ + (pdev->device == ZXDH_INICD_NE2_VF_DEVICE_ID) || \ + (pdev->device == ZXDH_VF_E310_RDMA_DEVICE_ID) || \ + (pdev->device == ZXDH_VF_E310S_DEVICE_ID) || \ + (pdev->device == ZXDH_VF_E312S_DEVICE_ID) || \ + (pdev->device == ZXDH_VF_E312_RDMA_DEVICE_ID) || \ + (pdev->device == ZXDH_VF_I510_SRIOV_SEC_DEVICE_ID) || \ + (pdev->device == CTC_VF_B512Y_DEVICE_ID) || \ + (pdev->device == CTC_VF_B522Y_DEVICE_ID) || \ + (pdev->device == ZXDH_VF_E312S_D_DEVICE_ID)) || \ + (pdev->device == ZXDH_VF_E310_CMCC_DEVICE_ID)) ? \ + DH_COREDEV_VF : \ + DH_COREDEV_PF) + +#define ZXDH_AUX_COMP_FLAG 1 +#define ZXDH_AUX_COMP_FLAG_CHECK(pf_dev) \ + do { \ + if (pf_dev->aux_comp_flag != ZXDH_AUX_COMP_FLAG) { \ + return; \ + } \ + } while (0) + +#define PORT_FLAGS_ALLOC_STAT (1 << 0) + +struct dh_core_dev; + +struct zxdh_pf_adev { + struct zxdh_auxiliary_device *adev; + int32_t aux_idx; +}; + +struct zxdh_pannle_port { + uint8_t pannel_id; + uint8_t phyport; + uint8_t link_check_bit; + uint8_t flags; +} __attribute__((packed)); + +struct nic_sn_info { + uint8_t fixed_sn_valid; + uint8_t pseudo_sn_valid; + uint8_t pseudoed_before; + uint8_t rsv[1]; +#define SN_CODE_LENGTH (12) + uint8_t sn_code[SN_CODE_LENGTH]; +} __attribute__((packed)); + +struct zxdh_port_resource { + uint8_t pannel_num; + uint8_t bond_num; + uint8_t bond_idx; + uint8_t rsv; + struct zxdh_pannle_port port[ZXDH_PANNEL_PORT_MAX]; +} __attribute__((packed)); + +#define DH_HEALTH_ATTR_NUM (5) +struct zxdh_core_health { + struct timer_list timer; + struct core_health riscv; + struct core_health m7; + uint64_t m7_log_offset; + uint64_t riscv_crdump_size; + unsigned long synd; + uint8_t fatal; + uint8_t health_version; + uint16_t recovery_cnt; + bool health_supported; + bool reset_done; + uint8_t fatal_detect_cnt; + uint8_t selfhealing; + uint16_t synd_statics[64]; + /* wq spinlock to synchronize draining */ + spinlock_t wq_lock; + struct workqueue_struct *wq; + unsigned long flags; + struct work_struct m7_bbx_saving_work; + struct work_struct riscv_log_saving_work; + struct work_struct riscv_bbx_saving_work; + struct work_struct fw_fatal_err_work; + struct work_struct dh_reset_work; + struct kobj_attribute attrs[DH_HEALTH_ATTR_NUM]; +}; + +struct zxdh_fw_compat { + uint8_t module_id; + uint8_t major; + int8_t fw_minor; + uint8_t drv_minor; + uint16_t patch; + uint16_t rsv; +} __attribute__((packed)); + +/* If feature bits are added, add the following enumeration types. */ +enum fw_feature_bit { + FW_FEATURE_COMPAT = 0, + FW_FEATURE_RDMA = 1, + FW_FEATURE_STD = 2, + FW_FEATURE_NPSTAT = 3, + FW_FEATURE_SEC = 4, + FW_FEATURE_QUEUE_RESET = 5, + FW_FEATURE_PFM = 6, + FW_FEATURE_MAX, +}; + +#define FW_FEATURE_GET(value, bit) (((value) >> (bit)) & 1) + +struct firmware_capability { + uint8_t ddr_aval : 1; + uint8_t multihost_aval : 1; + uint8_t riscv_init_done : 1; + uint8_t board_type; /* enum dh_board_type */ + uint8_t scen_type; + uint8_t bond_pf_pnl_num; + + uint8_t stat_power_mask; + uint8_t ctrl_power_mask; + uint64_t fw_feature; /* enum fw_feature_bit */ + uint16_t fw_feature_extra; /* enum fw_feature_bit */ + uint32_t pf_rate_default; +} __attribute__((packed)); + +struct zxdh_pf_queue_info { + uint8_t pf_qp; /* 配置的PF队列队数 */ + uint8_t vf_qp; /* 配置的VF队列队数 */ +} __attribute__((packed)); + +/* pf域队列总池子,以全局pf编号为索引 */ +struct zxdh_dev_queue_info { + uint16_t total_qp; /* PF及其下VF总共支持的队列队数 */ + uint16_t start_id; /* 起始队列号*/ +} __attribute__((packed)); + +struct zxdh_np_ext_stats { + uint32_t rx_vport2np_packets; +} __attribute__((packed)); + +struct zxdh_pf_device { + struct list_head virtqueues; + + struct zxdh_pf_pci_common_cfg __iomem *common; + /* Device-specific data (non-legacy mode) */ + /* Base of vq notifications (non-legacy mode). */ + void __iomem *device; + void __iomem *notify_base; + void __iomem *pf_sriov_cap_base; + /* Physical base of vq notifications */ + resource_size_t notify_pa; + /* Where to read and clear interrupt */ + uint8_t __iomem *isr; + /* So we can sanity-check accesses. */ + size_t notify_len; + size_t device_len; + /* Capability for when we need to map notifications per-vq. */ + int32_t notify_map_cap; + /* Multiply queue_notify_off by this value. (non-legacy mode). */ + uint32_t notify_offset_multiplier; + int32_t modern_bars; + + uint64_t pci_ioremap_addr[6]; + uint64_t qtlb_offset; + + uint32_t speed; + uint32_t autoneg_enable; + uint32_t supported_speed_modes; + uint32_t advertising_speed_modes; + uint8_t duplex; + bool bar_chan_valid; + uint16_t pcie_id; + uint16_t slot_id; + uint16_t vport; + struct zxdh_vf_item *vf_item; + uint16_t num_vfs; + bool vepa; + uint8_t phy_port; + + bool link_up; + bool fast_unload; + struct work_struct riscv_ready_work; + struct work_struct riscv2pf_msg_proc_work; + struct work_struct vf2pf_msg_proc_work; + struct work_struct link_info_irq_update_vf_bond_pf_work; + struct work_struct init_vf_link_info_work; + struct work_struct riscv_ext_pps_work; + struct work_struct riscv_local_pps_work; + struct work_struct mac_info_pf_work; + + uint64_t sriov_bar_size; + + struct zxdh_plcr_table plcr_table; + struct zxdh_sriov_sysfs sriov; + struct zxdh_pf_adev *adevs_table; + int32_t adevs_num; + + struct zxdh_port_resource port_resource; + struct zxdh_lag *ldev; + int32_t pannel_port_num; + + /* initialization completion flag */ + uint8_t aux_comp_flag; + uint8_t bond_num; + struct zxdh_ptp_private *ptp; +#ifdef CONFIG_DINGHAI_TSN + struct zxdh_tsn_private *tsn; +#endif + struct zxdh_ipv6_mac_tbl *ip6mac_tbl; + uint32_t dev_cfg_bar_off; + struct zxdh_core_health health; + struct zxdh_fw_compat fw_compat; + uint8_t sn_code[SN_CODE_LENGTH]; + uint8_t board_type; + uint8_t product_type; + uint8_t vq_pairs; + uint64_t mcode_feature; + struct firmware_capability fwcap; + struct zxdh_np_ext_stats np_ext_stats; + uint16_t epbdf; + uint32_t rp_sbdf; + bool is_multi_ep; + bool is_hwbond; + bool is_rdma_aux_plug; + bool is_primary_port; + uint64_t spec_sbdf; /* special bdf, 用于rdma持久化配置文件路径创建 */ + bool quick_remove; +}; + +struct slot_id_array { + uint8_t sn_code[SN_CODE_LENGTH]; +}; + +struct zxdh_ipv6_mac_entry { + spinlock_t lock; + refcount_t refcnt; + struct list_head list; + uint8_t ipv6_mac[ETH_ALEN]; +}; + +struct zxdh_ipv6_mac_tbl { + unsigned int ip6mact_size; + struct mutex mlock; + struct list_head ip6mac_free_head; + void *ip6mac_entry_list; + struct list_head hash_list[]; +}; + +#define IS_MSGQ_DEV(en_dev) \ + (((en_dev->ops->get_coredev_type(en_dev->parent) == \ + DH_COREDEV_PF) && \ + ((!en_dev->ops->is_bond(en_dev->parent)) || \ + (en_dev->ops->is_bond(en_dev->parent) && \ + en_dev->ops->if_init(en_dev->parent))))) +#define NEED_MSGQ(en_dev) (en_dev->need_msgq) + +bool zxdh_pf_is_bond(struct dh_core_dev *dh_dev); +bool zxdh_pf_is_upf(struct dh_core_dev *dh_dev); +int32_t zxdh_pf_msg_send_cmd(struct dh_core_dev *dh_dev, uint16_t module_id, + void *msg, void *ack, + struct zxdh_bar_extra_para *para); +struct zxdh_vf_item *zxdh_pf_get_vf_item(struct dh_core_dev *dh_dev, + uint16_t vf_idx); +int zxdh_pf_get_pannel_port_num(struct dh_core_dev *dh_dev); +void zxdh_pf_set_vf_mac_reg(struct zxdh_pf_device *pf_dev, uint8_t *mac, + int32_t vf_id); +void zxdh_unload_one(struct dh_core_dev *dh_dev); +int zxdh_load_one(struct dh_core_dev *dh_dev); +int zxdh_pf_status_ok(struct dh_core_dev *dh_dev); +int zxdh_vf_wait_pf_ok(struct dh_core_dev *dh_dev); +void zxdh_pf_set_bond_num(struct dh_core_dev *dh_dev, bool add); +int zxdh_pf_pcie_config_store(struct dh_core_dev *dh_dev); +int32_t zxdh_pf_get_hp_irq_ctrl_status(struct dh_core_dev *dev); +int32_t zxdh_pf_rp_config_init(struct dh_core_dev *dev); +uint32_t zxdh_pf_get_dev_type(struct dh_core_dev *dh_dev); +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_pf/en_pf_devlink.c b/drivers/net/ethernet/dinghai/en_pf/en_pf_devlink.c new file mode 100644 index 000000000000..9645871a4846 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_pf/en_pf_devlink.c @@ -0,0 +1,126 @@ +#include +#include +#include "en_pf_devlink.h" + +struct devlink_ops dh_pf_devlink_ops = { + +}; + +enum { + DH_PF_PARAMS_MAX, +}; + +static int32_t __attribute__((unused)) sample_check(struct dh_core_dev *dev) +{ + return 1; +} + +enum dh_pf_devlink_param_id { + DH_PF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + DH_PF_DEVLINK_PARAM_ID_SAMPLE, +}; + +static int32_t dh_devlink_sample_set(struct devlink *devlink, uint32_t id, + struct devlink_param_gset_ctx *ctx) +{ + struct dh_core_dev *__attribute__((unused)) dev = devlink_priv(devlink); + + return 0; +} + +static int32_t dh_devlink_sample_get(struct devlink *devlink, uint32_t id, + struct devlink_param_gset_ctx *ctx) +{ + struct dh_core_dev *__attribute__((unused)) dev = devlink_priv(devlink); + + return 0; +} + +#ifdef HAVE_DEVLINK_PARAM_REGISTER +static const struct devlink_params { + const char *name; + int32_t (*check)(struct dh_core_dev *dev); + struct devlink_param param; +} devlink_params[] = { [DH_PF_PARAMS_MAX] = { + .name = "sample", + .check = &sample_check, + .param = DEVLINK_PARAM_DRIVER( + DH_PF_DEVLINK_PARAM_ID_SAMPLE, "sample", + DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + dh_devlink_sample_get, + dh_devlink_sample_set, NULL), + } }; + +static int32_t params_register(struct devlink *devlink) +{ + int32_t i = 0; + int32_t err = 0; + struct dh_core_dev *dh_dev = devlink_priv(devlink); + + for (i = 0; i < ARRAY_SIZE(devlink_params); i++) { + if (devlink_params[i].check(dh_dev)) { + err = devlink_param_register(devlink, + &devlink_params[i].param); + if (err) { + goto rollback; + } + } + } + + return 0; + +rollback: + if (i == 0) { + return err; + } + + for (; i > 0; i--) { + devlink_param_unregister(devlink, &devlink_params[i].param); + } + + return err; +} + +static int32_t params_unregister(struct devlink *devlink) +{ + int32_t i = 0; + + for (i = 0; i < ARRAY_SIZE(devlink_params); i++) { + devlink_param_unregister(devlink, &devlink_params[i].param); + } + + return 0; +} +#else +static struct devlink_param devlink_params[] = { + [DH_PF_PARAMS_MAX] = DEVLINK_PARAM_DRIVER( + DH_PF_DEVLINK_PARAM_ID_SAMPLE, "sample", + DEVLINK_PARAM_TYPE_BOOL, BIT(DEVLINK_PARAM_CMODE_RUNTIME), + dh_devlink_sample_get, dh_devlink_sample_set, NULL), +}; + +static int32_t params_register(struct devlink *devlink) +{ + struct dh_core_dev *__attribute__((unused)) + dh_dev = devlink_priv(devlink); + int32_t err = 0; + + err = devlink_params_register(devlink, devlink_params, + ARRAY_SIZE(devlink_params)); + + return err; +} +static int32_t params_unregister(struct devlink *devlink) +{ + devlink_params_unregister(devlink, devlink_params, + ARRAY_SIZE(devlink_params)); + + return 0; +} +#endif + +struct dh_core_devlink_ops dh_pf_core_devlink_ops = { + .params_register = params_register, + .params_unregister = params_unregister +}; diff --git a/drivers/net/ethernet/dinghai/en_pf/en_pf_devlink.h b/drivers/net/ethernet/dinghai/en_pf/en_pf_devlink.h new file mode 100644 index 000000000000..83cc1e6dee27 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_pf/en_pf_devlink.h @@ -0,0 +1,14 @@ +#ifndef __EN_AUX_DEVLINK_H__ +#define __EN_AUX_DEVLINK_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_pf/en_pf_eq.c b/drivers/net/ethernet/dinghai/en_pf/en_pf_eq.c new file mode 100644 index 000000000000..d0c466cfebfa --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_pf/en_pf_eq.c @@ -0,0 +1,606 @@ +#include +#include +#include +#include +#include +#include "en_pf_irq.h" +#include "en_pf_eq.h" +#include "../msg_common.h" +#include "../en_pf.h" + +static int32_t create_async_eqs(struct dh_core_dev *dev); + +int32_t dh_pf_eq_table_init(struct dh_core_dev *dev) +{ + struct dh_pf_eq_table *table_priv = NULL; + + table_priv = kvzalloc(sizeof(*table_priv), GFP_KERNEL); + if (unlikely(table_priv == NULL)) { + LOG_ERR("dh_pf_eq_table kvzalloc failed\n"); + return -ENOMEM; + } + + dh_eq_table_init(dev, table_priv); + + return 0; +} + +uint16_t zxdh_pf_get_vqs_channels_num(struct dh_core_dev *dh_dev) +{ + if ((dh_dev->pdev->device == ZXDH_INICA_BOND_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_INICB_BOND_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_INICC_BOND_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_INICA_UPF_BOND_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_DPUA_BOND_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_INICD_BOND0_DEVICE_ID) || + (dh_dev->pdev->device == ZXDH_INICD_BOND1_DEVICE_ID)) { + return ZXDH_BOND_VQS_CHANNELS_NUM; + } + + if (dh_dev->coredev_type == DH_COREDEV_VF) { + return ZXDH_VF_VQS_CHANNELS_NUM; + } + return ZXDH_VQS_CHANNELS_NUM; +} + +void zxdh_pf_switch_irq(struct dh_core_dev *dh_dev, int32_t i, int32_t op) +{ + if (op) { + enable_irq(i); + return; + } + + disable_irq(i); + return; +} + +int32_t zxdh_pf_vq_irqs_request(struct dh_core_dev *dh_dev, + struct dh_irq **vq_irqs, int32_t vq_channels, + void *data) +{ + struct dh_irq_table *irq_table = &dh_dev->irq_table; + struct dh_pf_irq_table *pf_irq_table = irq_table->priv; + int32_t ret = 0; + int32_t vqs_irq_num = vq_channels; + int numa = dev_to_node(dh_dev->device); + + pf_irq_table->pf_vq_pool->data = data; + ret = dh_irq_affinity_irqs_request_auto(pf_irq_table->pf_vq_pool, + vq_irqs, vqs_irq_num, numa); + if (ret < vqs_irq_num) { + LOG_ERR("the actual obtain irq_num %d < need request irq_num %d\n", + ret, vqs_irq_num); + return -1; + } + + return ret; +} + +void zxdh_pf_affinity_irqs_release(struct dh_core_dev *dh_dev, + struct dh_irq **vq_irqs, int32_t num_irqs) +{ + struct dh_irq_table *irq_table = &dh_dev->irq_table; + struct dh_pf_irq_table *pf_irq_table = irq_table->priv; + + dh_irq_affinity_irqs_release(pf_irq_table->pf_vq_pool, vq_irqs, + num_irqs); +} + +static int32_t destroy_async_eq(struct dh_core_dev *dev) +{ + struct dh_eq_table *eq_table = &dev->eq_table; + struct zxdh_pf_device *pf_dev = dh_core_priv(dev); + struct msix_para in = { 0 }; + int32_t err = 0; + + if (pf_dev->quick_remove) + return 0; + + in.vector_risc = ZXDH_PF_INVALID_MSIX_VEC; + in.vector_pfvf = ZXDH_PF_INVALID_MSIX_VEC; + in.vector_mpf = ZXDH_PF_INVALID_MSIX_VEC; + in.driver_type = MSG_CHAN_END_PF; + in.pdev = dev->pdev; + in.virt_addr = pf_dev->pci_ioremap_addr[0] + ZXDH_BAR_MSG_OFFSET; + in.pcie_id = pf_dev->pcie_id; + + mutex_lock(&eq_table->lock); + + err = zxdh_bar_enable_chan(&in, &pf_dev->vport); + if (err != 0) { + LOG_ERR("zxdh_bar_disable_chan failed\n"); + } + + mutex_unlock(&eq_table->lock); + + return err; +} + +int32_t dh_pf_eq_table_create(struct dh_core_dev *dev) +{ + int32_t err = 0; + + err = create_async_eqs(dev); + if (err != 0) { + LOG_ERR("Failed to create async EQs: %d\n", err); + return err; + } + + return 0; +} + +/*create eventq*/ +static int32_t create_async_eq(struct dh_core_dev *dev, struct dh_irq *riscv, + struct dh_irq *pf) +{ + struct dh_eq_table *eq_table = &dev->eq_table; + struct zxdh_pf_device *pf_dev = dh_core_priv(dev); + struct msix_para in = { 0 }; + int ret = 0; + + in.vector_risc = riscv->index; + in.vector_pfvf = pf->index; + in.vector_mpf = ZXDH_PF_INVALID_MSIX_VEC; + + in.driver_type = MSG_CHAN_END_PF; + in.pdev = dev->pdev; + in.virt_addr = pf_dev->pci_ioremap_addr[0] + ZXDH_BAR_MSG_OFFSET; + in.pcie_id = pf_dev->pcie_id; + + /* pf设备初始化时清理自身4k对应的两把硬件锁*/ + if (dev->coredev_type == DH_COREDEV_PF) { + bar_chan_pf_init_spinlock(pf_dev->pcie_id, + pf_dev->pci_ioremap_addr[0]); + } + + mutex_lock(&eq_table->lock); + + LOG_DEBUG("msix vector riscv: %d, pfvf: %d\n", riscv->index, pf->index); + ret = zxdh_bar_enable_chan(&in, &pf_dev->vport); + if (!ret) + pf_dev->bar_chan_valid = true; + + mutex_unlock(&eq_table->lock); + + return ret; +} + +static int32_t dh_eq_async_riscv_int(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct dh_eq_async *eq_riscv_async = + container_of(nb, struct dh_eq_async, irq_nb); + struct dh_core_dev *dev = (struct dh_core_dev *)eq_riscv_async->priv; + struct zxdh_pf_device *pf_dev = dh_core_priv(dev); + struct dh_eq_table *eq_table = &dev->eq_table; + struct dh_events *events = dev->events; + struct dh_event_nb *event_nb = NULL; + uint64_t virt_addr = 0; + int32_t event_type = 0; + uint16_t event_idx = 0; + uint16_t i = 0; + uint8_t src = MSG_CHAN_END_RISC; + uint8_t dst = MSG_CHAN_END_VF; + + if (dev->coredev_type == DH_COREDEV_PF) { + dst = MSG_CHAN_END_PF; + } + + virt_addr = pf_dev->pci_ioremap_addr[0] + ZXDH_BAR_MSG_OFFSET; + event_idx = zxdh_get_event_id(virt_addr, src, dst); + event_type = dh_eq_event_type_get(event_idx); + //LOG_INFO("------------- event_idx: %d, event_type: %d------------\n", event_idx, event_type); + + if (events == NULL) { + LOG_ERR("riscv_irq trigger, events is null\n"); + return 0; + } + + for (i = 0; i < events->evt_num; i++) { + event_nb = &events->notifiers[i]; + if (event_type == event_nb->nb.event_type) { + LOG_DEBUG("en_pf event_type[%d] is called\n", + event_type); + atomic_notifier_call_chain(&eq_table->nh[event_type], + event_type, NULL); + return NOTIFY_STOP_MASK; + } + } + + return 0; +} + +static int32_t dh_eq_async_pf_int(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct dh_eq_async *eq_riscv_async = + container_of(nb, struct dh_eq_async, irq_nb); + struct dh_core_dev *dev = (struct dh_core_dev *)eq_riscv_async->priv; + struct zxdh_pf_device *pf_dev = dh_core_priv(dev); + struct dh_eq_table *eq_table = &dev->eq_table; + struct dh_events *events = dev->events; + struct dh_event_nb *event_nb = NULL; + uint64_t virt_addr = 0; + int32_t event_type = 0; + uint16_t event_idx = 0; + uint16_t i = 0; + + if (dev->coredev_type == DH_COREDEV_VF) { + return 0; + } + + virt_addr = pf_dev->pci_ioremap_addr[0] + ZXDH_BAR_MSG_OFFSET + + ZXDH_BAR_PFVF_MSG_OFFSET; + event_idx = + zxdh_get_event_id(virt_addr, MSG_CHAN_END_VF, MSG_CHAN_END_PF); + event_type = dh_eq_event_type_get(event_idx); + + for (i = 0; i < events->evt_num; i++) { + event_nb = &events->notifiers[i]; + + if (event_type == event_nb->nb.event_type) { + LOG_DEBUG("en_pf async pf/vf irq_handler called\n"); + atomic_notifier_call_chain(&eq_table->nh[event_type], + event_type, NULL); + return NOTIFY_STOP_MASK; + } + } + + return 0; +} + +static int32_t dh_eq_async_link_info_int_bond_pf(struct notifier_block *nb, + unsigned long action, + void *data) +{ + struct dh_eq_async *eq_riscv_async = + container_of(nb, struct dh_eq_async, irq_nb); + struct dh_core_dev *dev = (struct dh_core_dev *)eq_riscv_async->priv; + struct zxdh_pf_device *pf_dev = dh_core_priv(dev); + + if (!zxdh_pf_is_bond(dev)) { + LOG_DEBUG("isn't bond_pf exit\n"); + return 0; + } + + zxdh_events_work_enqueue(dev, + &pf_dev->link_info_irq_update_vf_bond_pf_work); + return 0; +} + +static int32_t dh_eq_async_extpps_int(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct dh_eq_async *eq_pps_async = + container_of(nb, struct dh_eq_async, irq_nb); + struct dh_core_dev *dev = (struct dh_core_dev *)eq_pps_async->priv; + struct dh_eq_table *eq_table = &dev->eq_table; + + atomic_notifier_call_chain( + &eq_table->nh[DH_EVENT_TYPE_NOTIFY_RISC_EXT_PPS], + DH_EVENT_TYPE_NOTIFY_RISC_EXT_PPS, NULL); + return 0; +} + +static int32_t dh_eq_async_local_pps_int(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct dh_eq_async *eq_pps_async = + container_of(nb, struct dh_eq_async, irq_nb); + struct dh_core_dev *dev = (struct dh_core_dev *)eq_pps_async->priv; + struct dh_eq_table *eq_table = &dev->eq_table; + + atomic_notifier_call_chain( + &eq_table->nh[DH_EVENT_TYPE_NOTIFY_RISC_LOCAL_PPS], + DH_EVENT_TYPE_NOTIFY_RISC_LOCAL_PPS, NULL); + return 0; +} + +static int32_t dh_eq_async_mac_info_pf(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct dh_eq_async *eq_riscv_async = + container_of(nb, struct dh_eq_async, irq_nb); + struct dh_core_dev *dh_dev = (struct dh_core_dev *)eq_riscv_async->priv; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + if (dh_dev->coredev_type == DH_COREDEV_VF) { + return 0; + } + + zxdh_events_work_enqueue(dh_dev, &pf_dev->mac_info_pf_work); + return 0; +} + +static int32_t dh_eq_rsv_int(struct notifier_block *nb, unsigned long action, + void *data) +{ + LOG_DEBUG("dh_eq_rsv_int is called\n"); + return 0; +} + +/* 注意:数组长度不能超过 ZXDH_VF_ASYNC_CHANNELS_NUM */ +static struct dh_pf_async_irq_table dh_pf_async_irq_tbl[] = { + { "link_info", dh_eq_async_link_info_int_bond_pf }, + { "riscv", dh_eq_async_riscv_int }, + { "pf", dh_eq_async_pf_int }, + { "expps", dh_eq_async_extpps_int }, + { "localpps", dh_eq_async_local_pps_int }, + { "rsv", dh_eq_rsv_int }, + { "mac_info", dh_eq_async_mac_info_pf }, +}; + +static struct dh_pf_async_irq_table dh_vf_async_irq_tbl[] = { + { "link_info", dh_eq_async_link_info_int_bond_pf }, + { "riscv", dh_eq_async_riscv_int }, + { "pf", dh_eq_async_pf_int }, + { "expps", dh_eq_async_extpps_int }, + { "localpps", dh_eq_async_local_pps_int }, +}; + +static void cleanup_async_eq(struct dh_core_dev *dev, struct dh_eq_async *eq, + const char *name) +{ + dh_eq_disable(dev, &eq->core, &eq->irq_nb); +} + +static void destroy_async_eqs(struct dh_core_dev *dev) +{ + struct dh_eq_table *table = &dev->eq_table; + struct dh_pf_eq_table *table_priv = table->priv; + const struct dh_pf_async_irq_table *irq_tbl; + int32_t tbl_size = 0; + int32_t i = 0; + + if (dev->coredev_type == DH_COREDEV_PF) { + irq_tbl = dh_pf_async_irq_tbl; + tbl_size = ARRAY_SIZE(dh_pf_async_irq_tbl); + } else { + irq_tbl = dh_vf_async_irq_tbl; + tbl_size = ARRAY_SIZE(dh_vf_async_irq_tbl); + } + + for (i = 0; i < tbl_size; ++i) { + cleanup_async_eq(dev, &table_priv->async_eq_tbl[i], + irq_tbl[i].name); + } + destroy_async_eq(dev); + dh_irqs_release_vectors(table_priv->async_irq_tbl, tbl_size); +} + +void dh_pf_eq_table_destroy(struct dh_core_dev *dev) +{ + destroy_async_eqs(dev); +} + +static int32_t create_async_eqs(struct dh_core_dev *dev) +{ + struct dh_eq_table *table = &dev->eq_table; + struct dh_pf_eq_table *table_priv = table->priv; + struct dh_eq_param param = {}; + int32_t err = 0; + const struct dh_pf_async_irq_table *irq_tbl; + int32_t tbl_size = 0; + int32_t i = 0; + int32_t j = 0; + int32_t k = 0; + + if (dev->coredev_type == DH_COREDEV_PF) { + irq_tbl = dh_pf_async_irq_tbl; + tbl_size = ARRAY_SIZE(dh_pf_async_irq_tbl); + } else { + irq_tbl = dh_vf_async_irq_tbl; + tbl_size = ARRAY_SIZE(dh_vf_async_irq_tbl); + } + + for (i = 0; i < tbl_size; ++i) { + table_priv->async_irq_tbl[i] = dh_pf_async_irq_request(dev); + if (IS_ERR(table_priv->async_irq_tbl[i])) { + err = PTR_ERR(table_priv->async_irq_tbl[i]); + LOG_ERR("Failed to get async_irq_tbl[%d]\n", i); + goto err_async_irq_request; + } + } + + err = create_async_eq(dev, table_priv->async_irq_tbl[1], + table_priv->async_irq_tbl[2]); + if (err != 0) { + LOG_ERR("Failed to create async_eq\n"); + goto err_async_irq_request; + } + + param.nent = 10; + param.event_type = DH_EVENT_QUEUE_TYPE_RISCV; + for (j = 0; j < tbl_size; ++j) { + param.irq = table_priv->async_irq_tbl[j], + err = setup_async_eq(dev, &table_priv->async_eq_tbl[j], ¶m, + irq_tbl[j].async_int, irq_tbl[j].name, + dev); + if (err != 0) { + LOG_ERR("Failed to setup async_eq_tbl[%d]\n", j); + goto err_setup_async_eq; + } + } + + return 0; + +err_setup_async_eq: + for (k = 0; k < j; ++k) { + cleanup_async_eq(dev, &table_priv->async_eq_tbl[j], + irq_tbl[j].name); + } + destroy_async_eq(dev); +err_async_irq_request: + dh_irqs_release_vectors(table_priv->async_irq_tbl, i); + return err; +} + +int32_t zxdh_pf_async_eq_enable(struct dh_core_dev *dh_dev, + struct dh_eq_async *eq, const char *name, + bool attach) +{ + struct dh_eq_table *table = &dh_dev->eq_table; + struct dh_pf_eq_table *table_priv = table->priv; + int32_t err = 0; + const struct dh_pf_async_irq_table *irq_tbl; + int32_t tbl_size = 0; + int32_t i = 0; + + if (dh_dev->coredev_type == DH_COREDEV_PF) { + irq_tbl = dh_pf_async_irq_tbl; + tbl_size = ARRAY_SIZE(dh_pf_async_irq_tbl); + } else { + irq_tbl = dh_vf_async_irq_tbl; + tbl_size = ARRAY_SIZE(dh_vf_async_irq_tbl); + } + + for (i = 0; i < tbl_size; ++i) { + if (strcmp(irq_tbl[i].name, name) == 0) { + eq->core.irq = table_priv->async_irq_tbl[i]; + break; + } + } + + if (i == tbl_size) { + LOG_ERR("failed to find %s irq\n", name); + return -1; + } + + LOG_DEBUG("%s attach[%d] irq[%d]\n", name, attach, eq->core.irq->index); + if (attach) { + err = dh_eq_enable(dh_dev, &eq->core, &eq->irq_nb); + if (err != 0) { + LOG_WARN("failed to enable EQ %d\n", err); + } + } else { + dh_eq_disable(dh_dev, &eq->core, &eq->irq_nb); + } + + return err; +} + +void zxdh_pf_get_link_info_from_vqm(struct dh_core_dev *dh_dev, + uint8_t *link_up) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + uint32_t dev_link_up = 0; + + dev_link_up = ioread32((void __iomem *)((uintptr_t)pf_dev->device + + ZXDH_DEV_MAC_HIGH_OFFSET)); + *link_up = (dev_link_up >> 16) & 0xff; + + LOG_DEBUG("dev pcieid:0x%x ******** link_up: %d ********\n", + pf_dev->pcie_id, *link_up); + return; +} + +void zxdh_pf_set_vf_link_info(struct dh_core_dev *dh_dev, uint16_t vf_idx, + uint8_t link_up) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + uint32_t dev_link_up = 0; + LOG_DEBUG("dev pcieid:0x%x write link_val %d to vf%d\n", + pf_dev->pcie_id, link_up, vf_idx); + + if (pf_dev->pf_sriov_cap_base) { + dev_link_up = ioread32( + (void __iomem *)(pf_dev->pf_sriov_cap_base + + (pf_dev->sriov_bar_size) * vf_idx + + pf_dev->dev_cfg_bar_off + + ZXDH_DEV_MAC_HIGH_OFFSET)); + dev_link_up = (dev_link_up & ~(0xFF << 16)) | + ((uint32_t)(link_up) << 16); + iowrite32(dev_link_up, + (void __iomem *)(pf_dev->pf_sriov_cap_base + + (pf_dev->sriov_bar_size) * vf_idx + + pf_dev->dev_cfg_bar_off + + ZXDH_DEV_MAC_HIGH_OFFSET)); + } + return; +} + +bool zxdh_pf_get_vf_is_probe(struct dh_core_dev *dh_dev, uint16_t vf_idx) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + uint32_t dev_link_up = 0; + struct zxdh_vf_item *vf_item = NULL; + uint16_t num_vfs = 0; + + /* 判断vf是否超过上限 */ + num_vfs = pci_num_vf(dh_dev->pdev); + if (vf_idx >= num_vfs) { + LOG_ERR("invalid VF idx: %d\n", vf_idx); + return false; + } + + if (pf_dev->pf_sriov_cap_base) { + dev_link_up = ioread32( + (void __iomem *)(pf_dev->pf_sriov_cap_base + + (pf_dev->sriov_bar_size) * vf_idx + + pf_dev->dev_cfg_bar_off + + ZXDH_DEV_MAC_HIGH_OFFSET)); + } + + vf_item = &pf_dev->vf_item[vf_idx]; + return ((((uint8_t)((dev_link_up >> 16) & 0xff)) != 0xff) && + (vf_item->is_probed)); +} + +void zxdh_pf_set_pf_phy_port(struct dh_core_dev *dh_dev, uint8_t phy_port) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + pf_dev->phy_port = phy_port; + +#ifdef CONFIG_DINGHAI_TSN + if (dh_dev->coredev_type == DH_COREDEV_PF) { + if (pf_dev->tsn == NULL) { + LOG_ERR("pf_dev->tsn is null\n"); + return; + } + pf_dev->tsn->phy_port_id = phy_port; + } +#endif +} + +uint8_t zxdh_pf_get_pf_phy_port(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + return pf_dev->phy_port; +} + +void zxdh_pf_set_pf_link_up(struct dh_core_dev *dh_dev, bool link_up) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + pf_dev->link_up = link_up; +} + +bool zxdh_pf_get_pf_link_up(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + return pf_dev->link_up; +} + +int32_t zxdh_pf_call_aux_events(struct dh_core_dev *dev, int32_t event_type) +{ + struct dh_eq_table *eq_table = &dev->eq_table; + int32_t err = 0; + + atomic_notifier_call_chain(&eq_table->nh[event_type], event_type, &err); + if (err != 0) { + return err; + } + + if (event_type == DH_EVENT_TYPE_AUX_LOAD) + zxdh_pf_status_ok(dev); + + return 0; +} + +int32_t zxdh_pf_call_aux_events_with_data(struct dh_core_dev *dev, + int32_t event_type, void *data) +{ + struct dh_eq_table *eq_table = &dev->eq_table; + + atomic_notifier_call_chain(&eq_table->nh[event_type], event_type, data); + return 0; +} \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_pf/en_pf_eq.h b/drivers/net/ethernet/dinghai/en_pf/en_pf_eq.h new file mode 100644 index 000000000000..12ca6e441a10 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_pf/en_pf_eq.h @@ -0,0 +1,65 @@ +#ifndef __EN_PF_EQ_H__ +#define __EN_PF_EQ_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include + +#define ZXDH_PF_INVALID_MSIX_VEC 0xffff +#define ZXDH_MAC_FLAG_BAR_OFFSET 0xFB030 +#define ZXDH_EP_FLAG_SIZE 2048 +#define ZXDH_PF_FLAG_SIZE 256 +#define ZXDH_VF_NUM 256 + +int32_t dh_pf_eq_table_create(struct dh_core_dev *dev); +void dh_pf_eq_table_destroy(struct dh_core_dev *dev); +int32_t dh_pf_eq_table_init(struct dh_core_dev *dev); +uint16_t zxdh_pf_get_vqs_channels_num(struct dh_core_dev *dh_dev); + +void zxdh_pf_switch_irq(struct dh_core_dev *dh_dev, int32_t i, int32_t op); +int32_t zxdh_pf_vq_irqs_request(struct dh_core_dev *dh_dev, + struct dh_irq **vq_irqs, int32_t vq_channels, + void *data); +void zxdh_pf_affinity_irqs_release(struct dh_core_dev *dh_dev, + struct dh_irq **vq_irqs, int32_t num_irqs); +void zxdh_enable_irq(struct dh_core_dev *dh_dev, int32_t irq_index); + +int32_t zxdh_pf_async_eq_enable(struct dh_core_dev *dev, struct dh_eq_async *eq, + const char *name, bool attach); +void zxdh_pf_set_pf_link_up(struct dh_core_dev *dh_dev, bool link_up); +bool zxdh_pf_get_pf_link_up(struct dh_core_dev *dh_dev); +void zxdh_pf_set_vf_link_info(struct dh_core_dev *dh_dev, uint16_t vf_idx, + uint8_t link_up); +bool zxdh_pf_get_vf_is_probe(struct dh_core_dev *dh_dev, uint16_t vf_idx); +void zxdh_pf_get_link_info_from_vqm(struct dh_core_dev *dh_dev, + uint8_t *link_up); +void zxdh_pf_set_pf_phy_port(struct dh_core_dev *dh_dev, uint8_t phy_port); +uint8_t zxdh_pf_get_pf_phy_port(struct dh_core_dev *dh_dev); +int32_t zxdh_pf_call_aux_events(struct dh_core_dev *dev, int32_t event_type); +int32_t zxdh_pf_call_aux_events_with_data(struct dh_core_dev *dev, + int32_t event_type, void *data); + +struct dh_pf_eq_table { + struct dh_irq **vq_irqs; + int32_t vq_irq_num; + struct list_head vqs_eqs_list; + struct dh_irq *async_irq_tbl[ZXDH_ASYNC_CHANNELS_NUM]; + struct dh_eq_async async_eq_tbl[ZXDH_ASYNC_CHANNELS_NUM]; +}; + +struct dh_pf_async_irq_table { + char name[64]; + notifier_fn_t async_int; +}; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_pf/en_pf_events.c b/drivers/net/ethernet/dinghai/en_pf/en_pf_events.c new file mode 100644 index 000000000000..efa1bc7d1ee2 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_pf/en_pf_events.c @@ -0,0 +1,549 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include "en_pf_events.h" +#include "../en_pf.h" +#include "../msg_common.h" +#include "en_pf_eq.h" +#include "../en_aux.h" + +static int32_t riscv2pf_notifier(struct notifier_block *, unsigned long, + void *); +static int32_t riscv_ready_notifier(struct notifier_block *, unsigned long, + void *); +static int32_t vf2pf_notifier(struct notifier_block *, unsigned long, void *); +static int32_t riscv_ext_pps_notifier(struct notifier_block *, unsigned long, + void *); +static int32_t riscv_local_pps_notifier(struct notifier_block *nb, + unsigned long type, void *data); + +#ifdef PTP_DRIVER_INTERFACE_EN +extern irqreturn_t +msix_extern_pps_irq_from_risc_handler(struct zxdh_pf_device *dev); +extern irqreturn_t +msix_local_pps_irq_from_risc_handler(struct zxdh_pf_device *dev); +#endif + +static struct dh_nb pf_events[] = { + { .nb.notifier_call = riscv_ready_notifier, + .event_type = DH_EVENT_TYPE_RISCV_READY }, + { .nb.notifier_call = vf2pf_notifier, + .event_type = DH_EVENT_TYPE_NOTIFY_VF_TO_PF }, + { .nb.notifier_call = riscv_ext_pps_notifier, + .event_type = DH_EVENT_TYPE_NOTIFY_RISC_EXT_PPS }, + { .nb.notifier_call = riscv_local_pps_notifier, + .event_type = DH_EVENT_TYPE_NOTIFY_RISC_LOCAL_PPS }, + { .nb.notifier_call = riscv2pf_notifier, + .event_type = DH_EVENT_TYPE_NOTIFY_ANY }, +}; + +static int32_t riscv2pf_notifier(struct notifier_block *nb, unsigned long type, + void *data) +{ + struct dh_event_nb *event_nb = dh_nb_cof(nb, struct dh_event_nb, nb); + struct dh_core_dev *dh_dev = (struct dh_core_dev *)event_nb->ctx; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + zxdh_events_work_enqueue(dh_dev, &pf_dev->riscv2pf_msg_proc_work); + + return NOTIFY_OK; +} + +static int32_t riscv_ready_notifier(struct notifier_block *nb, + unsigned long type, void *data) +{ + struct dh_event_nb *event_nb = dh_nb_cof(nb, struct dh_event_nb, nb); + struct dh_core_dev *dh_dev = (struct dh_core_dev *)event_nb->ctx; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + uint64_t virt_addr = pf_dev->pci_ioremap_addr[0] + ZXDH_BAR_MSG_OFFSET; + zxdh_bar_reset_valid(virt_addr); + zxdh_events_work_enqueue(dh_dev, &pf_dev->riscv_ready_work); + + return NOTIFY_OK; +} + +static int32_t vf2pf_notifier(struct notifier_block *nb, unsigned long type, + void *data) +{ + struct dh_event_nb *event_nb = dh_nb_cof(nb, struct dh_event_nb, nb); + struct dh_core_dev *dh_dev = (struct dh_core_dev *)event_nb->ctx; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + LOG_DEBUG("is called\n"); + + zxdh_events_work_enqueue(dh_dev, &pf_dev->vf2pf_msg_proc_work); + + return NOTIFY_OK; +} + +static int32_t riscv_ext_pps_notifier(struct notifier_block *nb, + unsigned long type, void *data) +{ + struct dh_event_nb *event_nb = dh_nb_cof(nb, struct dh_event_nb, nb); + struct dh_core_dev *dh_dev = (struct dh_core_dev *)event_nb->ctx; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + LOG_DEBUG("is called\n"); + + zxdh_events_work_enqueue(dh_dev, &pf_dev->riscv_ext_pps_work); + + return NOTIFY_OK; +} + +static int32_t riscv_local_pps_notifier(struct notifier_block *nb, + unsigned long type, void *data) +{ + struct dh_event_nb *event_nb = dh_nb_cof(nb, struct dh_event_nb, nb); + struct dh_core_dev *dh_dev = (struct dh_core_dev *)event_nb->ctx; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + LOG_DEBUG("is called\n"); + + zxdh_events_work_enqueue(dh_dev, &pf_dev->riscv_local_pps_work); + + return NOTIFY_OK; +} + +extern int32_t zxdh_plug_aux_dev(struct dh_core_dev *dh_dev); + +static void riscv2pf_msg_proc_work_handler(struct work_struct *_work) +{ + struct zxdh_pf_device *pf_dev = container_of( + _work, struct zxdh_pf_device, riscv2pf_msg_proc_work); + + uint16_t src = MSG_CHAN_END_RISC; + uint16_t dst = MSG_CHAN_END_PF; + uint64_t virt_addr = pf_dev->pci_ioremap_addr[0] + ZXDH_BAR_MSG_OFFSET; + + ZXDH_AUX_COMP_FLAG_CHECK(pf_dev); + + zxdh_bar_irq_recv(src, dst, virt_addr, NULL); +} + +static void vf2pf_msg_proc_work_handler(struct work_struct *_work) +{ + struct zxdh_pf_device *pf_dev = + container_of(_work, struct zxdh_pf_device, vf2pf_msg_proc_work); + + uint16_t src = MSG_CHAN_END_VF; + uint16_t dst = MSG_CHAN_END_PF; + uint64_t virt_addr = pf_dev->pci_ioremap_addr[0] + ZXDH_BAR_MSG_OFFSET + + ZXDH_BAR_PFVF_MSG_OFFSET; + + ZXDH_AUX_COMP_FLAG_CHECK(pf_dev); + + zxdh_bar_irq_recv(src, dst, virt_addr, pf_dev); +} + +static void riscv_ready_work_handler(struct work_struct *_work) +{ + struct zxdh_pf_device *pf_dev = + container_of(_work, struct zxdh_pf_device, riscv_ready_work); + struct dh_core_dev *dh_dev = + container_of((void *)pf_dev, struct dh_core_dev, priv); + + ZXDH_AUX_COMP_FLAG_CHECK(pf_dev); + + zxdh_plug_aux_dev(dh_dev); +} + +int32_t findFirstSetBit(uint8_t link_up_val) +{ + uint8_t i = 0; + for (; i < 8; i++) { + if (link_up_val & (1 << i)) { + return i; // 返回第一个设置位的位置 + } + } + return -1; // 没有找到 +} + +int32_t get_link_up_phyport(uint8_t link_up_val, struct zxdh_pf_device *pf_dev, + uint8_t *phyport_val) +{ + int16_t first_link_up_idx = -1; + uint8_t port_num = pf_dev->port_resource.pannel_num; + struct zxdh_pannle_port *port; + int32_t idx = 0; + + first_link_up_idx = findFirstSetBit(link_up_val); + if (first_link_up_idx < 0) { + return -1; + } + //基于first_link_up_idx找到phyport值TODO:待完善 + for (idx = 0; idx < port_num; idx++) { + port = &pf_dev->port_resource.port[idx]; + if (port->link_check_bit == first_link_up_idx) { + *phyport_val = port->phyport; + LOG_DEBUG("first link_up idx %d <-> phyport 0x%x\n", + first_link_up_idx, port->phyport); + return 0; + } + } + + return -1; +} + +static void +link_info_irq_update_vf_bond_pf_work_handler(struct work_struct *_work) +{ + struct zxdh_pf_device *pf_dev = + container_of(_work, struct zxdh_pf_device, + link_info_irq_update_vf_bond_pf_work); + struct dh_core_dev *dh_dev = + container_of((void *)pf_dev, struct dh_core_dev, priv); + struct zxdh_vf_item *vf_item = NULL; + int32_t err = 0; + uint16_t vf_idx = 0; + struct pci_dev *pdev = dh_dev->pdev; + uint16_t num_vfs = 0; + uint8_t link_up_val = 0; + uint8_t phyport_val = 0; + uint8_t link_info = 0; + uint16_t func_no = 0; + uint16_t pf_no = FIND_PF_ID(pf_dev->pcie_id); + union zxdh_msg *msg = NULL; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + ZXDH_AUX_COMP_FLAG_CHECK(pf_dev); + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%zu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return; + } + + zxdh_pf_get_link_info_from_vqm(dh_dev, &link_up_val); + LOG_DEBUG("[pf_level] bond_pf pcie_id:0x%x read from VQM, val: 0x%x\n", + pf_dev->pcie_id, link_up_val); + pf_dev->link_up = (link_up_val == 0) ? FALSE : TRUE; + + if (pf_dev->link_up) { + if (get_link_up_phyport(link_up_val, pf_dev, &phyport_val) < + 0) { + LOG_ERR("failed to get link up phyport\n"); + kfree(msg); + return; + } + link_up_val = 1; + } + + link_info = (phyport_val & 0x0F) << 4 | (link_up_val & 0x0F); + msg->payload.hdr_to_agt.op_code = AGENT_DEV_STATUS_NOTIFY; + msg->payload.hdr_to_agt.pcie_id = pf_dev->pcie_id; + num_vfs = pci_num_vf(pdev); + for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) { + vf_item = zxdh_pf_get_vf_item(dh_dev, vf_idx); + if (vf_item->link_forced == FALSE && vf_item->is_probed) { + func_no = GET_FUNC_NO(pf_no, vf_idx); + msg->payload.pcie_msix_msg + .func_no[msg->payload.pcie_msix_msg.num++] = + func_no; + zxdh_pf_set_vf_link_info(dh_dev, vf_idx, link_info); + LOG_DEBUG( + "[pf_level] bond_pf pcie_id:0x%x write phyport[0x%x] and link_up[%d] to VF[%d] VQM[0x%x]\n", + pf_dev->pcie_id, phyport_val, link_up_val, + vf_idx, link_info); + } + } + LOG_DEBUG("vf num:%d\n", msg->payload.pcie_msix_msg.num); + if (msg->payload.pcie_msix_msg.num > 0) { + err = zxdh_pf_msg_send_cmd(dh_dev, MODULE_MAC, msg, msg, ¶); + if (err != 0) { + LOG_ERR("failed to update VF link info\n"); + } + } + + kfree(msg); +} + +static void init_vf_link_info_work_handler(struct work_struct *_work) +{ + struct zxdh_pf_device *pf_dev = container_of( + _work, struct zxdh_pf_device, init_vf_link_info_work); + struct dh_core_dev *dh_dev = + container_of((void *)pf_dev, struct dh_core_dev, priv); + struct zxdh_vf_item *vf_item = NULL; + int32_t err = 0; + uint16_t vf_idx = 0; + struct pci_dev *pdev = dh_dev->pdev; + uint16_t num_vfs = 0; + uint8_t link_up_val = 0; + uint8_t link_info = 0; + uint16_t func_no = 0; + uint16_t pf_no = FIND_PF_ID(pf_dev->pcie_id); + union zxdh_msg *msg = NULL; + struct zxdh_bar_extra_para para = { 0 }; + + LOG_DEBUG("is called\n"); + ZXDH_AUX_COMP_FLAG_CHECK(pf_dev); + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%zu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return; + } + + zxdh_pf_get_link_info_from_vqm(dh_dev, &link_up_val); + pf_dev->link_up = (link_up_val == 0) ? FALSE : TRUE; + + if (zxdh_pf_is_upf(dh_dev)) { + link_info = (pf_dev->phy_port & 0x0F) << 4 | + (link_up_val & 0x0F); + LOG_DEBUG("upf update vf link_info: %u\n", link_info); + } else { + link_info = pf_dev->link_up ? 1 : 0; + } + + msg->payload.hdr_to_agt.op_code = AGENT_DEV_STATUS_NOTIFY; + msg->payload.hdr_to_agt.pcie_id = pf_dev->pcie_id; + + num_vfs = pci_num_vf(pdev); + for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) { + vf_item = zxdh_pf_get_vf_item(dh_dev, vf_idx); + if (vf_item->link_forced == FALSE && vf_item->is_probed) { + func_no = GET_FUNC_NO(pf_no, vf_idx); + msg->payload.pcie_msix_msg + .func_no[msg->payload.pcie_msix_msg.num++] = + func_no; + zxdh_pf_set_vf_link_info(dh_dev, vf_idx, link_info); + LOG_DEBUG("pcie_id:0x%x init VF[%d] VQM[0x%x]\n", + pf_dev->pcie_id, vf_idx, link_info); + } + } + LOG_DEBUG("pcie_id:0x%x vf num:%d\n", pf_dev->pcie_id, + msg->payload.pcie_msix_msg.num); + if (msg->payload.pcie_msix_msg.num > 0) { + err = zxdh_pf_msg_send_cmd(dh_dev, MODULE_MAC, msg, msg, ¶); + if (err != 0) { + LOG_ERR("failed to update VF link info\n"); + } + } + + kfree(msg); +} + +static void riscv_extern_pps_handler(struct work_struct *_work) +{ +#ifdef PTP_DRIVER_INTERFACE_EN + struct zxdh_pf_device *pf_dev = + container_of(_work, struct zxdh_pf_device, riscv_ext_pps_work); + msix_extern_pps_irq_from_risc_handler(pf_dev); +#endif +} + +static void riscv_local_pps_handler(struct work_struct *_work) +{ +#ifdef PTP_DRIVER_INTERFACE_EN + struct zxdh_pf_device *pf_dev = container_of( + _work, struct zxdh_pf_device, riscv_local_pps_work); + msix_local_pps_irq_from_risc_handler(pf_dev); +#endif +} + +static void mac_info_pf_work_handler(struct work_struct *_work) +{ + struct zxdh_pf_device *pf_dev = + container_of(_work, struct zxdh_pf_device, mac_info_pf_work); + struct dh_core_dev *dh_dev = + container_of((void *)pf_dev, struct dh_core_dev, priv); + struct zxdh_vf_item *vf_item = NULL; + struct pci_dev *pdev = dh_dev->pdev; + uint16_t vf_vport = 0; + DPP_PF_INFO_T pf_info = { 0 }; + MAC_VPORT_INFO *unicast_mac_arry = NULL; + uint32_t current_unicast_num = 0; + int32_t retval = 0; + uint32_t i = 0; + uint32_t j = 0; + int32_t num_vfs = 0; + uint32_t val = 0; + uint8_t vf_flag[ZXDH_VF_NUM] = { 0 }; + uint8_t sum_flag = 0; + uint8_t ep_id = (pf_dev->pcie_id >> 12) & 0x7; + uint8_t pf_id = (pf_dev->pcie_id >> 8) & 0x7; + + LOG_INFO("mac_info_pf_work_handler is call\n"); + ZXDH_AUX_COMP_FLAG_CHECK(pf_dev); + + if (dh_dev->coredev_type == DH_COREDEV_VF) { + return; + } + + unicast_mac_arry = (MAC_VPORT_INFO *)kzalloc( + sizeof(MAC_VPORT_INFO) * UNICAST_MAX_NUM, GFP_KERNEL); + if (unicast_mac_arry == NULL) { + LOG_ERR("kzalloc unicast_mac_arry failed \n"); + return; + } + + num_vfs = pci_num_vf(pdev); + if (num_vfs == 0) { + kfree(unicast_mac_arry); + return; + } + + for (i = 0; i < num_vfs; i++) { + val = ioread8(( + void __iomem *)(uintptr_t)(pf_dev->pci_ioremap_addr[0] + + ZXDH_MAC_FLAG_BAR_OFFSET + + ep_id * ZXDH_EP_FLAG_SIZE + + pf_id * ZXDH_PF_FLAG_SIZE + + i)); + if (val == 1) { + vf_flag[sum_flag] = i; + iowrite8( + 0, + (void __iomem + *)(uintptr_t)(pf_dev->pci_ioremap_addr + [0] + + ZXDH_MAC_FLAG_BAR_OFFSET + + ep_id * ZXDH_EP_FLAG_SIZE + + pf_id * ZXDH_PF_FLAG_SIZE + + i)); + sum_flag++; + } + } + + for (i = 0; i < sum_flag; i++) { + vf_item = zxdh_pf_get_vf_item(dh_dev, vf_flag[i]); + pf_info.slot = pf_dev->slot_id; + pf_info.vport = pf_dev->vport; + vf_vport = vf_item->vport; + retval = dpp_unicast_mac_dump(&pf_info, unicast_mac_arry, + ¤t_unicast_num); + if (retval != 0) { + kfree(unicast_mac_arry); + LOG_ERR("dpp_unicast_mac_dump failed, retval:%d\n", + retval); + return; + } + + for (j = 0; j < current_unicast_num; j++) { + if (vf_vport == unicast_mac_arry[j].vport) { + retval = dpp_del_mac( + &pf_info, unicast_mac_arry[j].addr, + unicast_mac_arry[j].sriov_vlan_tpid, + unicast_mac_arry[j].sriov_vlan_id); + if (retval != 0) { + LOG_ERR("dpp_del_mac failed, ret: %d\n", + retval); + kfree(unicast_mac_arry); + return; + } + LOG_INFO("dpp_del_mac sucess, vport: 0x%x\n", + vf_vport); + } + } + + vf_item->is_probed = false; + } + + kfree(unicast_mac_arry); + return; +} + +void zxdh_pf_nh_attach(struct dh_core_dev *dev, struct dh_nb *nb, bool attach) +{ + struct dh_eq_table *eq_table = &dev->eq_table; + + if (attach) + dh_eq_notifier_register(eq_table, nb); + else + dh_eq_notifier_unregister(eq_table, nb); +} + +int32_t dh_pf_events_init(struct dh_core_dev *dev) +{ + struct dh_events *events = NULL; + struct zxdh_pf_device *pf_dev = dh_core_priv(dev); + int32_t i = 0; + int32_t ret = 0; + uint32_t evt_num = ARRAY_SIZE(pf_events); + + if (pf_dev->bond_num != 0) { + evt_num -= 1; + } + events = kzalloc( + (sizeof(*events) + evt_num * sizeof(struct dh_event_nb)), + GFP_KERNEL); + if (unlikely(events == NULL)) { + LOG_ERR("events kzalloc failed: %p\n", events); + ret = -ENOMEM; + goto err_events_kzalloc; + } + + events->evt_num = evt_num; + events->dev = dev; + dev->events = events; + events->wq = create_singlethread_workqueue("dh_pf_events"); + if (!events->wq) { + LOG_ERR("events->wq create_singlethread_workqueue failed: %p\n", + events->wq); + ret = -ENOMEM; + goto err_create_wq; + } + + INIT_WORK(&pf_dev->riscv_ready_work, riscv_ready_work_handler); + INIT_WORK(&pf_dev->riscv2pf_msg_proc_work, + riscv2pf_msg_proc_work_handler); + INIT_WORK(&pf_dev->vf2pf_msg_proc_work, vf2pf_msg_proc_work_handler); + INIT_WORK(&pf_dev->link_info_irq_update_vf_bond_pf_work, + link_info_irq_update_vf_bond_pf_work_handler); + INIT_WORK(&pf_dev->init_vf_link_info_work, + init_vf_link_info_work_handler); + INIT_WORK(&pf_dev->riscv_ext_pps_work, riscv_extern_pps_handler); + INIT_WORK(&pf_dev->riscv_local_pps_work, riscv_local_pps_handler); + INIT_WORK(&pf_dev->mac_info_pf_work, mac_info_pf_work_handler); + + for (i = 0; i < evt_num; i++) { + events->notifiers[i].nb = pf_events[i]; + events->notifiers[i].ctx = dev; + dh_eq_notifier_register(&dev->eq_table, + &events->notifiers[i].nb); + } + + return 0; + +err_create_wq: + kfree(events); +err_events_kzalloc: + return ret; +} + +void dh_pf_events_uninit(struct dh_core_dev *dev) +{ + struct dh_events *events = dev->events; + int32_t i = 0; + + for (i = events->evt_num - 1; i >= 0; i--) { + dh_eq_notifier_unregister(&dev->eq_table, + &events->notifiers[i].nb); + } + + zxdh_events_cleanup(dev); + return; +} + +void dh_pf_sriov_cap_cfg_uninit(struct dh_core_dev *dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dev); + if (dev->coredev_type == DH_COREDEV_PF && + pf_dev->pf_sriov_cap_base != NULL) { + iounmap((void *)pf_dev->pf_sriov_cap_base); + pf_dev->pf_sriov_cap_base = NULL; + } + return; +} diff --git a/drivers/net/ethernet/dinghai/en_pf/en_pf_events.h b/drivers/net/ethernet/dinghai/en_pf/en_pf_events.h new file mode 100644 index 000000000000..14bf2af7c597 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_pf/en_pf_events.h @@ -0,0 +1,17 @@ +#ifndef __EN_PF_EVENTS_H__ +#define __EN_PF_EVENTS_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +int32_t dh_pf_events_init(struct dh_core_dev *dev); +void dh_pf_events_uninit(struct dh_core_dev *dev); +void dh_pf_sriov_cap_cfg_uninit(struct dh_core_dev *dev); +void zxdh_pf_nh_attach(struct dh_core_dev *dev, struct dh_nb *nb, bool attach); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_pf/en_pf_irq.c b/drivers/net/ethernet/dinghai/en_pf/en_pf_irq.c new file mode 100644 index 000000000000..b2d36fa68030 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_pf/en_pf_irq.c @@ -0,0 +1,190 @@ +#include +#include +#include +#include "en_pf_irq.h" +#include "en_pf_eq.h" + +#define ZXDH_PF_ASYNC_IRQ_MIN_COMP 0 +#define ZXDH_PF_ASYNC_IRQ_MAX_COMP 7 + +#define ZXDH_PF_RDMA_IRQ_MIN 0 +#define ZXDH_PF_RDMA_IRQ_MAX 5 + +#define ZXDH_PF_COMP_IRQ_MIN_COMP 0 +#define ZXDH_PF_COMP_IRQ_MAX_COMP 1 + +#define ZXDH_PF_VQ_IRQ_MIN 0 +#define ZXDH_PF_VQ_IRQ_MAX 17 + +struct dh_irq_range { + int32_t start; + int32_t size; +}; + +static int32_t irq_pools_init(struct dh_core_dev *dev, int vq_n, + int pf_async_vec) +{ + struct dh_irq_table *table = &dev->irq_table; + struct dh_pf_irq_table *pf_irq_table = + (struct dh_pf_irq_table *)table->priv; + int32_t err = 0; + struct dh_irq_range irq_range; + + if (vq_n > 0) { + irq_range.start = ZXDH_VQS_IRQ_START_IDX; + if (dev->coredev_type == DH_COREDEV_VF) { + irq_range.start = ZXDH_VF_VQS_IRQ_START_IDX; + } + irq_range.size = vq_n; + + pf_irq_table->pf_vq_pool = irq_pool_alloc( + dev, irq_range.start, irq_range.size, "zxdh_pf_vq", + ZXDH_PF_VQ_IRQ_MIN, ZXDH_PF_VQ_IRQ_MAX); + if (IS_ERR_OR_NULL(pf_irq_table->pf_vq_pool)) { + LOG_ERR("pf_irq_table->pf_vq_pool irq_pool_alloc failed\n"); + return PTR_ERR(pf_irq_table->pf_vq_pool); + } + + pf_irq_table->pf_vq_pool->irqs_per_cpu = + kcalloc(nr_cpu_ids, sizeof(u16), GFP_KERNEL); + if (unlikely(pf_irq_table->pf_vq_pool->irqs_per_cpu == NULL)) { + LOG_ERR("pf_irq_table->pf_vq_pool->irqs_per_cpu kcalloc failed\n"); + err = -ENOMEM; + goto err_irqs_per_cpu; + } + } + + if (pf_async_vec > 0) { + irq_range.start = 0; + irq_range.size = pf_async_vec; + pf_irq_table->pf_async_pool = irq_pool_alloc( + dev, irq_range.start, irq_range.size, "zxdh_pf_async", + ZXDH_PF_ASYNC_IRQ_MIN_COMP, ZXDH_PF_ASYNC_IRQ_MAX_COMP); + if (IS_ERR_OR_NULL(pf_irq_table->pf_async_pool)) { + LOG_ERR("pf_irq_table->pf_async_pool irq_pool_alloc failed\n"); + err = PTR_ERR(pf_irq_table->pf_async_pool); + goto err_irq_async_pool; + } + } + + irq_range.start = ZXDH_RDMA_IRQ_START_IDX; + if (dev->coredev_type == DH_COREDEV_VF) { + irq_range.start = ZXDH_VF_RDMA_IRQ_START_IDX; + } + irq_range.size = ZXDH_RDMA_CHANNELS_NUM; + pf_irq_table->pf_rdma_pool = irq_pool_alloc( + dev, irq_range.start, irq_range.size, "zxdh_pf_rdma", + ZXDH_PF_RDMA_IRQ_MIN, ZXDH_PF_RDMA_IRQ_MAX); + if (IS_ERR_OR_NULL(pf_irq_table->pf_rdma_pool)) { + LOG_ERR("pf_irq_table->pf_rdma_pool irq_pool_alloc failed\n"); + err = PTR_ERR(pf_irq_table->pf_rdma_pool); + goto err_irq_rdma_pool; + } + + return 0; + +err_irq_rdma_pool: + if (pf_async_vec > 0) { + irq_pool_free(pf_irq_table->pf_async_pool); + } +err_irq_async_pool: +err_irqs_per_cpu: + if (vq_n > 0) { + irq_pool_free(pf_irq_table->pf_vq_pool); + } + return err; +} + +static void irq_pools_destroy(struct dh_irq_table *table) +{ + struct dh_pf_irq_table *pf_irq_table = NULL; + + pf_irq_table = (struct dh_pf_irq_table *)table->priv; + pf_irq_table->pf_vq_pool ? irq_pool_free(pf_irq_table->pf_vq_pool) : 0; + pf_irq_table->pf_async_pool ? + irq_pool_free(pf_irq_table->pf_async_pool) : + 0; + pf_irq_table->pf_rdma_pool ? irq_pool_free(pf_irq_table->pf_rdma_pool) : + 0; +} + +static int32_t zxdh_get_total_vec(struct dh_core_dev *dev) +{ + if (dev->coredev_type == DH_COREDEV_VF) { + return ZXDH_VF_VQS_CHANNELS_NUM + ZXDH_VF_ASYNC_CHANNELS_NUM + + ZXDH_RDMA_CHANNELS_NUM; + } + return ZXDH_VQS_CHANNELS_NUM + ZXDH_ASYNC_CHANNELS_NUM + + ZXDH_RDMA_CHANNELS_NUM; +} + +int32_t dh_pf_irq_table_create(struct dh_core_dev *dev) +{ + int32_t total_vec = 0; + int32_t err = 0; + + total_vec = zxdh_get_total_vec(dev); + + total_vec = pci_alloc_irq_vectors(dev->pdev, total_vec, total_vec, + PCI_IRQ_MSIX); + if (total_vec < 0) { + LOG_ERR("pci_alloc_irq_vectors failed: %d\n", total_vec); + return total_vec; + } + + if (dev->coredev_type == DH_COREDEV_VF) { + err = irq_pools_init(dev, ZXDH_VF_VQS_CHANNELS_NUM, + ZXDH_VF_ASYNC_CHANNELS_NUM); + } else { + err = irq_pools_init(dev, ZXDH_VQS_CHANNELS_NUM, + ZXDH_ASYNC_CHANNELS_NUM); + } + if (err != 0) { + LOG_ERR("irq_pools_init failed: %d\n", err); + pci_free_irq_vectors(dev->pdev); + } + + return err; +} + +void dh_pf_irq_table_destroy(struct dh_core_dev *dev) +{ + struct dh_irq_table *table = &dev->irq_table; + + /* There are cases where IRQs still will be in used when we reaching + * to here. Hence, making sure all the irqs are released. + */ + irq_pools_destroy(table); + pci_free_irq_vectors(dev->pdev); +} + +struct dh_irq *dh_pf_async_irq_request(struct dh_core_dev *dev) +{ + struct dh_irq_table *table = &dev->irq_table; + struct dh_pf_irq_table *pf_irq_table; + + pf_irq_table = (struct dh_pf_irq_table *)table->priv; + + return pf_irq_table->pf_async_pool ? + zxdh_get_irq_of_pool(dev, pf_irq_table->pf_async_pool) : + NULL; +} + +/* irq_table API */ +int32_t dh_pf_irq_table_init(struct dh_core_dev *dev) +{ + struct dh_irq_table *irq_table; + struct dh_pf_irq_table *pf_irq_table = NULL; + + irq_table = &dev->irq_table; + + pf_irq_table = kvzalloc(sizeof(*pf_irq_table), GFP_KERNEL); + if (unlikely(pf_irq_table == NULL)) { + LOG_ERR("pf_irq_table kvzalloc failed\n"); + return -ENOMEM; + } + + irq_table->priv = pf_irq_table; + + return 0; +} \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_pf/en_pf_irq.h b/drivers/net/ethernet/dinghai/en_pf/en_pf_irq.h new file mode 100644 index 000000000000..47c92dc5d0ed --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_pf/en_pf_irq.h @@ -0,0 +1,27 @@ +#ifndef __EN_PF_IRQ_H__ +#define __EN_PF_IRQ_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +struct dh_irq *dh_pf_async_irq_request(struct dh_core_dev *dev); +int32_t dh_pf_irq_table_create(struct dh_core_dev *dev); +void dh_pf_irq_table_destroy(struct dh_core_dev *dev); +int32_t dh_pf_irq_table_init(struct dh_core_dev *dev); + +struct dh_pf_irq_table { + struct dh_irq_pool *sf_comp_pool; + struct dh_irq_pool *pf_async_pool; + struct dh_irq_pool *pf_rdma_pool; + struct dh_irq_pool *pf_vq_pool; +}; + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_pf/msg_func.c b/drivers/net/ethernet/dinghai/en_pf/msg_func.c new file mode 100644 index 000000000000..c693c2163c53 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_pf/msg_func.c @@ -0,0 +1,3207 @@ +#include +#include +#include "../msg_common.h" +#include "../en_pf.h" +#include "../en_aux/en_aux_cmd.h" +#include "../en_aux.h" +#include "../en_np/init/include/dpp_np_init.h" +#include "en_pf_eq.h" +#include "msg_func.h" +#include "../plcr.h" +#include "../en_np/driver/include/dpp_drv_sdt.h" +#include "../slib.h" + +#define FUNC_NAME_SIZE_MAX 32 +#define ZXDH_MAX_VF 256 +#define PF_HAS_MAX_ENCAP1_NUM 256 + +#define ETH_PKT_IPV4 0x0800 +#define ETH_PKT_IPV6 0x86dd + +typedef uint32_t (*zxdh_vf_msg_func)(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev); + +typedef struct { + zxdh_msg_op_code op_code; + uint8_t proc_name[FUNC_NAME_SIZE_MAX]; + zxdh_vf_msg_func msg_proc; +} zxdh_vf_msg_proc; + +extern int debug_print; +void zxdh_u32_array_print(uint32_t *array, uint16_t size) +{ + uint16_t i; + + if (debug_print == 0) + return; + + for (i = 0; i < size; ++i) { + printk(KERN_CONT "%u ", array[i]); + if ((i + 1) % 8 == 0) { + printk(KERN_CONT "\n"); + } + } +} +EXPORT_SYMBOL(zxdh_u32_array_print); + +static void zxdh_vf_link_state_get_proc(struct zxdh_pf_device *pf_dev, + struct zxdh_vf_item *vf_item, + uint16_t vf_idx) +{ + uint32_t dev_link_up_reg = 0; + uint8_t vf_link_up = 0; + + if (vf_item->link_forced) { + vf_link_up = vf_item->link_up ? 1 : 0; + } else { + vf_link_up = pf_dev->link_up ? 1 : 0; + } + + if (pf_dev->pf_sriov_cap_base) { + dev_link_up_reg = ioread32( + (void __iomem *)(pf_dev->pf_sriov_cap_base + + (pf_dev->sriov_bar_size) * vf_idx + + pf_dev->dev_cfg_bar_off + + ZXDH_DEV_MAC_HIGH_OFFSET)); + dev_link_up_reg = (dev_link_up_reg & ~(0xFF << 16)) | + ((uint32_t)(vf_link_up) << 16); + iowrite32(dev_link_up_reg, + (void __iomem *)(pf_dev->pf_sriov_cap_base + + (pf_dev->sriov_bar_size) * vf_idx + + pf_dev->dev_cfg_bar_off + + ZXDH_DEV_MAC_HIGH_OFFSET)); + } + LOG_INFO("vf[%d] link_forced is [%s], link state[%s] update ok.\n", + vf_idx, vf_item->link_forced ? "TRUE" : "FALSE", + (vf_link_up == 1) ? "UP" : "DOWN"); +} + +int32_t zxdh_vf_flush_mac(DPP_PF_INFO_T *pf_info, struct zxdh_vf_item *vf_item) +{ + int32_t err = 0; + uint8_t i = 0; + uint8_t *addr = NULL; + uint16_t sriov_vlan_tpid = 0; + uint16_t sriov_vlan_id = 0; + + mutex_lock(&vf_item->lock); + + sriov_vlan_tpid = vf_item->vlan_proto; + sriov_vlan_id = ZXDH_VLAN_TCI_GEN(vf_item->vlan, vf_item->qos); + + /* 删除此VF的所有单播mac地址 */ + for (i = 0; i < DEV_UNICAST_MAX_NUM; ++i) { + addr = vf_item->vf_mac_info.unicast_mac[i].mac_addr; + + if (!is_zero_ether_addr(addr)) { /* mac不全为0 */ + LOG_DEBUG("the deleted unicast mac is %pM\n", addr); + err = dpp_del_mac(pf_info, addr, sriov_vlan_tpid, + sriov_vlan_id); + if (err != 0) { + LOG_ERR("dpp_del_mac failed\n"); + mutex_unlock(&vf_item->lock); + return err; + } + } + } + + /* 删除此VF的所有组播mac地址 */ + for (i = 0; i < DEV_MULTICAST_MAX_NUM; ++i) { + addr = vf_item->vf_mac_info.multicast_mac[i].mac_addr; + + if (!is_zero_ether_addr(addr)) { /* mac不全为0 */ + LOG_DEBUG("the deleted multicasat mac is %pM\n", addr); + err = dpp_multi_mac_del_member(pf_info, addr); + if (err != 0) { + LOG_ERR("dpp_multi_mac_del_member failed\n"); + mutex_unlock(&vf_item->lock); + return err; + } + } + } + + /* 将vf_mac_info结构体全部清零 */ + memset(&vf_item->vf_mac_info, 0, sizeof(vf_item->vf_mac_info)); + + mutex_unlock(&vf_item->lock); + return err; +} + +static int zxdh_vf_enable_sriov_vlan_tbl(DPP_PF_INFO_T *pf_info, u16 vlan_tci, + uint16_t vlan_proto) +{ + int ret = 0; + + ret = dpp_vport_vlan_offload_en_set(pf_info, 1); + if (ret != 0) { + goto err; + } + + ret = dpp_vqm_vfid_vlan_set(pf_info, VLAN_SRIOV_VLAN_TCI, vlan_tci); + if (ret != 0) { + goto err; + } + + ret = dpp_vqm_vfid_vlan_set(pf_info, VLAN_SRIOV_VLAN_TPID, vlan_proto); + if (ret != 0) { + goto err; + } + +err: + return ret; +} + +static int zxdh_vf_init_vlan_recfg(struct zxdh_vf_item *vf_item, + DPP_PF_INFO_T *pf_info) +{ + int ret = 0; + uint16_t vlan_tci = 0; + + /* 先清空vlan 二级表,vqm Vlan表*/ + ret = dpp_vqm_vfid_vlan_init(pf_info); + if (ret != 0) { + LOG_ERR("dpp_vqm_vfid_vlan_init, ret: %d\n", ret); + goto out; + } + + ret = dpp_vlan_filter_init(pf_info); + if (ret != 0) { + LOG_ERR("dpp_vlan_filter_init failed: %d\n", ret); + goto out; + } + + ret = dpp_add_vlan_filter(pf_info, 0); + if (ret != 0) { + LOG_ERR("dpp_add_vlan_filter 0 failed: %d\n", ret); + goto out; + } + + /* 再次从vf_item中获取vlanID信息, 如果非0就重配*/ + if (vf_item->vlan != 0) { + vlan_tci = ZXDH_VLAN_TCI_GEN(vf_item->vlan, vf_item->qos); + ret = zxdh_vf_enable_sriov_vlan_tbl(pf_info, vlan_tci, + vf_item->vlan_proto); + if (ret != 0) { + LOG_ERR("zxdh_enable_sriov_vlan_tbl failed, ret: %d\n", + ret); + return ret; + } + + LOG_DEBUG("recover vf vlan: %d.\n", vf_item->vlan); + } + +out: + return ret; +} + +static uint32_t zxdh_vf_port_init(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + uint32_t ret = 0; + uint8_t mac[6] = { 0 }; + int32_t vf_idx = msg->hdr.pcie_id & (0xff); + uint8_t addr_type = NET_ADDR_PERM; + uint16_t sriov_vlan_tpid = vf_item->vlan_proto; + uint16_t sriov_vlan_id = ZXDH_VLAN_TCI_GEN(vf_item->vlan, vf_item->qos); + + LOG_INFO("zxdh_vf_port_init, vfindex%d\n", vf_idx); + ret = dpp_vport_create(pf_info); + if (ret != 0) { + LOG_ERR("dpp_vport_create failed, ret: %d\n", ret); + return ret; + } + + if (msg->vf_init_msg.is_upf) { + ret = dpp_vport_attr_set(pf_info, SRIOV_VPORT_LAG_ID, + 0); /* 不需要配置 VF LAG属性跟随PF */ + if (ret != 0) { + LOG_ERR("dpp_vport_attr_set panel_id %d failed: %d\n", + pf_dev->phy_port, ret); + goto err_init; + } + + ret = dpp_vport_attr_set(pf_info, SRIOV_VPORT_LAG_EN_OFF, 1); + if (ret != 0) { + LOG_ERR("dpp_vport_attr_set hash_search_idx %u failed: %d\n", + msg->vf_init_msg.hash_search_idx, ret); + goto err_init; + } + } else { + ret = dpp_vport_attr_set(pf_info, + SRIOV_VPORT_UPLINK_PHY_PORT_ID, + pf_dev->phy_port); + if (ret != 0) { + LOG_ERR("dpp_vport_attr_set panel_id %d failed: %d\n", + pf_dev->phy_port, ret); + goto err_init; + } + } + + ret = dpp_vport_attr_set(pf_info, SRIOV_VPORT_HASH_SEARCH_INDEX, + msg->vf_init_msg.hash_search_idx); + if (ret != 0) { + LOG_ERR("dpp_vport_attr_set hash_search_idx %u failed: %d\n", + msg->vf_init_msg.hash_search_idx, ret); + goto err_init; + } + + ret = dpp_vport_bond_pf(pf_info); + if (ret != 0) { + LOG_ERR("dpp_vport_bond_pf failed, ret: %d\n", ret); + goto err_init; + } + + ret = dpp_vport_rss_en_set(pf_info, msg->vf_init_msg.rss_enable); + if (ret != 0) { + LOG_ERR("dpp_vport_rss_en_set failed, ret: %d\n", ret); + goto err_init; + } + + ret = dpp_vport_hash_funcs_set(pf_info, ZXDH_FUNC_CRC32); + if (ret != 0) { + LOG_ERR("dpp_vport_hash_funcs_set failed, ret: %d\n", ret); + goto err_init; + } + + ret = dpp_vport_rx_flow_hash_set(pf_info, ZXDH_NET_RX_FLOW_HASH_SDFNT); + if (ret != 0) { + LOG_ERR("dpp_vport_rx_flow_hash_set failed, ret: %d\n", ret); + goto err_init; + } + + ret = dpp_vport_attr_set(pf_info, SRIOV_VPORT_VEPA_EN_OFF, + (uint32_t)pf_dev->vepa); + if (ret != 0) { + LOG_ERR("dpp_vport_attr_set vport(0x%x) %s mode failed: %d\n", + msg->hdr.vport, pf_dev->vepa ? "vepa" : "veb", ret); + goto err_init; + } + LOG_INFO("Initialize vport(0x%x) to %s mode\n", msg->hdr.vport, + pf_dev->vepa ? "vepa" : "veb"); + + ret = dpp_vport_attr_set(pf_info, SRIOV_VPORT_PORT_BASE_QID, + msg->vf_init_msg.base_qid); + if (ret != 0) { + LOG_ERR("set_base_qid %d failed: %d\n", + msg->vf_init_msg.base_qid, ret); + goto err_init; + } + + ret = dpp_vport_attr_set(pf_info, SRIOV_VPORT_SPOOFCHK_EN_OFF, + vf_item->spoofchk); + if (0 != ret) { + LOG_ERR("dpp_vport_attr_set spookchk %s failed: %d\n", + vf_item->spoofchk ? "on" : "off", ret); + goto err_init; + } + + ret = zxdh_vf_init_vlan_recfg(vf_item, pf_info); + if (ret != 0) { + LOG_ERR("zxdh_vf_init_vlan_recfg %d\n", ret); + goto err_init; + } + + ret = zxdh_vf_flush_mac(pf_info, vf_item); + if (ret != 0) { + goto err_init; + } + + ret = dpp_fd_acl_all_delete(pf_info); + if (ret != 0) { + LOG_ERR("dpp_fd_acl_all_delete failed! %d\n", ret); + goto err_init; + } + + ret = dpp_vport_attr_set(pf_info, SRIOV_VPORT_FD_VXLAN_OFFLOAD_EN, 0); + if (ret != 0) { + LOG_ERR("dpp_vport_attr_set vxlan offload ip checksum failed: %d\n", + ret); + goto err_init; + } + + ether_addr_copy(mac, vf_item->mac); + if (is_zero_ether_addr(mac)) { + get_random_bytes(mac, 6); + mac[0] &= 0xfe; + addr_type = NET_ADDR_RANDOM; + LOG_INFO("vf set random mac %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + } + LOG_INFO("zxdh_vf_port_init mac %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + ret = dpp_add_mac(pf_info, mac, sriov_vlan_tpid, sriov_vlan_id); + if (ret != 0) { + LOG_ERR("dpp_add_mac failed, ret: %d\n", ret); + goto err_init; + } + vf_item->vf_mac_info.current_unicast_num = 1; + zxdh_pf_set_vf_mac_reg(pf_dev, mac, vf_idx); + dpp_vport_uc_promisc_set(pf_info, 0); + dpp_vport_mc_promisc_set(pf_info, 0); + + ether_addr_copy(vf_item->vf_mac_info.unicast_mac[0].mac_addr, mac); + ether_addr_copy(reps->vf_init_msg.mac_addr, mac); + reps->vf_init_msg.addr_assign_type = addr_type; + reps->vf_init_msg.phy_port = pf_dev->phy_port; + reps->vf_init_msg.link_up = pf_dev->link_up; + reps->vf_init_msg.speed = pf_dev->speed; + reps->vf_init_msg.duplex = pf_dev->duplex; + reps->vf_init_msg.autoneg_enable = pf_dev->autoneg_enable; + reps->vf_init_msg.sup_link_modes = pf_dev->supported_speed_modes; + reps->vf_init_msg.adv_link_modes = pf_dev->advertising_speed_modes; + reps->vf_init_msg.vlan_id = vf_item->vlan; + reps->vf_init_msg.vlan_qos = vf_item->qos; + + LOG_INFO("zxdh_vf_port_init plcr %u\n", vf_item->max_tx_rate); + zxdh_plcr_recover_cfg(vf_item, pf_dev, vf_idx); + + zxdh_vf_link_state_get_proc(pf_dev, vf_item, vf_idx); + + vf_item->is_probed = true; + return 0; + +err_init: + dpp_vport_delete(pf_info); + return ret; +} + +static int32_t zxdh_vf_rate_clear(uint16_t vf_idx, struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + zxdh_plcr_rate_limit_paras rate_limit_paras = { 0 }; + int32_t rtn = 0; + + rate_limit_paras.req_type = E_RATE_LIMIT_REQ_VF_BYTE; + rate_limit_paras.direction = E_RATE_LIMIT_TX; + rate_limit_paras.mode = E_RATE_LIMIT_BYTE; + rate_limit_paras.max_rate = 0; + rate_limit_paras.min_rate = 0; + rate_limit_paras.queue_id = PLCR_INVALID_PARAM; + rate_limit_paras.vf_idx = vf_idx; + rate_limit_paras.vfid = PLCR_INVALID_PARAM; + rate_limit_paras.group_id = PLCR_INVALID_PARAM; + + rtn = zxdh_plcr_unified_set_rate_limit(pf_dev, &rate_limit_paras); + if (PLCR_REMOVE_RATE_LIMIT == rtn || PLCR_DUPLICATE_RATE == rtn) { + return 0; + } + + return rtn; +} + +static int32_t zxdh_vf_rate_limit_health_set(uint16_t vf_idx, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + zxdh_plcr_rate_limit_paras rate_limit_paras = { 0 }; + int32_t rtn = 0; + + PLCR_FUNC_DBG_ENTER(); + + rate_limit_paras.req_type = E_RATE_LIMIT_REQ_VF_BYTE; + rate_limit_paras.direction = E_RATE_LIMIT_TX; + rate_limit_paras.mode = E_RATE_LIMIT_BYTE; + rate_limit_paras.max_rate = vf_item->max_tx_rate; + rate_limit_paras.min_rate = vf_item->min_tx_rate; + rate_limit_paras.queue_id = PLCR_INVALID_PARAM; + rate_limit_paras.vf_idx = vf_idx; + rate_limit_paras.vfid = PLCR_INVALID_PARAM; + rate_limit_paras.group_id = PLCR_INVALID_PARAM; + + rtn = zxdh_plcr_unified_set_rate_limit(pf_dev, &rate_limit_paras); + if (PLCR_REMOVE_RATE_LIMIT == rtn || PLCR_DUPLICATE_RATE == rtn) { + return 0; + } + + return rtn; +} + +static uint32_t zxdh_vf_mac_recover(DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item) +{ + uint16_t sriov_vlan_id = ZXDH_VLAN_TCI_GEN(vf_item->vlan, vf_item->qos); + uint16_t sriov_vlan_tpid = vf_item->vlan_proto; + uint32_t i = 0; + uint32_t err = 0; + + /* 遍历vf_item的单播数组 */ + for (i = 0; i < DEV_UNICAST_MAX_NUM; ++i) { + if (!is_zero_ether_addr( + vf_item->vf_mac_info.unicast_mac[i].mac_addr)) { + err = dpp_add_mac( + pf_info, + vf_item->vf_mac_info.unicast_mac[i].mac_addr, + sriov_vlan_tpid, sriov_vlan_id); + ZXDH_CHECK_RET_RETURN( + err, "dpp_add_unicast_mac[%d] failed: %d\n", i, + err); + } + } + + /* 遍历vf_item的组播数组 */ + for (i = 0; i < DEV_MULTICAST_MAX_NUM; ++i) { + if (!is_zero_ether_addr( + vf_item->vf_mac_info.multicast_mac[i].mac_addr)) { + err = dpp_multi_mac_add_member( + pf_info, + vf_item->vf_mac_info.multicast_mac[i].mac_addr); + ZXDH_CHECK_RET_RETURN( + err, "dpp_add_multicast_mac[%d] failed: %d\n", + i, err); + } + } + + return 0; +} + +static uint32_t zxdh_vf_item_reload(struct zxdh_pf_device *pf_dev, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + uint16_t vf_idx) +{ + uint32_t ret = 0; + + ret = zxdh_vf_mac_recover(pf_info, vf_item); + ZXDH_CHECK_RET_RETURN(ret, "zxdh_vf_mac_recover failed! %d\n", ret); + + ret = dpp_vport_attr_set(pf_info, SRIOV_VPORT_SPOOFCHK_EN_OFF, + vf_item->spoofchk); + ZXDH_CHECK_RET_RETURN(ret, "spookchk %s failed: %d\n", + vf_item->spoofchk ? "on" : "off", ret); + + ret = zxdh_vf_init_vlan_recfg(vf_item, pf_info); + ZXDH_CHECK_RET_RETURN(ret, "zxdh_vf_init_vlan_recfg %d\n", ret); + + vf_item->is_probed = true; + zxdh_vf_link_state_get_proc(pf_dev, vf_item, vf_idx); + + return ret; +} + +int32_t zxdh_vlan_trunk_recover(DPP_PF_INFO_T *pf_info, + uint8_t *vlan_trunk_bitmap) +{ + int ret = 0; + uint16_t vlan_idx = 0; + uint16_t byte_index = 0; + uint8_t bit_idx = 0; + + for (vlan_idx = 0; vlan_idx < 4096; vlan_idx++) { + byte_index = vlan_idx / 8; + bit_idx = vlan_idx % 8; + if (vlan_trunk_bitmap[byte_index] & (1 << bit_idx)) { + ret = dpp_add_vlan_filter(pf_info, vlan_idx); + if (0 != ret) { + LOG_ERR("failed to recover vlan bit %d\n", + vlan_idx); + return -1; + } + LOG_DEBUG("dev-0x%x recover vlan-%d.\n", pf_info->vport, + vlan_idx); + } + } + return ret; +} +EXPORT_SYMBOL(zxdh_vlan_trunk_recover); + +static uint32_t zxdh_vf_port_reload(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + uint16_t vf_idx = msg->hdr.pcie_id & (0xff); + zxdh_vf_reload_msg *eth_config = &msg->vf_reload_msg; + uint32_t ret = 0; + + LOG_INFO("zxdh_vf_port_reload, vfindex%d\n", vf_idx); + ret = dpp_vport_create(pf_info); + ZXDH_CHECK_RET_RETURN(ret, "dpp_vport_create failed, ret: %d\n", ret); + + if (msg->vf_init_msg.is_upf) { + ret = dpp_vport_attr_set(pf_info, SRIOV_VPORT_LAG_ID, + 0); /* 不需要配置 VF LAG属性跟随PF */ + ZXDH_CHECK_RET_GOTO_ERR( + ret, err_init, + "dpp_vport_attr_set panel_id %d failed: %d\n", + pf_dev->phy_port, ret); + + ret = dpp_vport_attr_set(pf_info, SRIOV_VPORT_LAG_EN_OFF, 1); + ZXDH_CHECK_RET_GOTO_ERR( + ret, err_init, + "dpp_vport_attr_set SRIOV_VPORT_LAG_EN_OFF failed: %d\n", + ret); + } else { + ret = dpp_vport_attr_set(pf_info, + SRIOV_VPORT_UPLINK_PHY_PORT_ID, + pf_dev->phy_port); + ZXDH_CHECK_RET_GOTO_ERR( + ret, err_init, + "dpp_vport_attr_set panel_id %d failed: %d\n", + pf_dev->phy_port, ret); + } + + ret = dpp_vport_attr_set(pf_info, SRIOV_VPORT_HASH_SEARCH_INDEX, + eth_config->hash_search_idx); + ZXDH_CHECK_RET_GOTO_ERR( + ret, err_init, + "dpp_vport_attr_set hash_search_idx %u failed: %d\n", + eth_config->hash_search_idx, ret); + + ret = dpp_vport_bond_pf(pf_info); + ZXDH_CHECK_RET_GOTO_ERR(ret, err_init, + "dpp_vport_bond_pf failed, ret: %d\n", ret); + + ret = dpp_vport_hash_funcs_set(pf_info, eth_config->hash_func); + ZXDH_CHECK_RET_GOTO_ERR(ret, err_init, + "dpp_vport_hash_funcs_set failed, ret: %d\n", + ret); + + ret = dpp_vport_rx_flow_hash_set(pf_info, eth_config->hash_mode); + ZXDH_CHECK_RET_GOTO_ERR(ret, err_init, + "dpp_vport_rx_flow_hash_set failed, ret: %d\n", + ret); + + ret = dpp_vport_attr_set(pf_info, SRIOV_VPORT_VEPA_EN_OFF, + (uint32_t)pf_dev->vepa); + ZXDH_CHECK_RET_GOTO_ERR( + ret, err_init, + "dpp_vport_attr_set vport(0x%x) %s mode failed: %d\n", + msg->hdr.vport, pf_dev->vepa ? "vepa" : "veb", ret); + LOG_INFO("Initialize vport(0x%x) to %s mode\n", msg->hdr.vport, + pf_dev->vepa ? "vepa" : "veb"); + + ret = dpp_vport_attr_set(pf_info, SRIOV_VPORT_PORT_BASE_QID, + eth_config->base_qid); + ZXDH_CHECK_RET_GOTO_ERR(ret, err_init, "set_base_qid %d failed: %d\n", + eth_config->base_qid, ret); + + ret = dpp_rxfh_set(pf_info, eth_config->queue_map, ZXDH_INDIR_RQT_SIZE); + ZXDH_CHECK_RET_GOTO_ERR(ret, err_init, "dpp_rxfh_set failed: %d\n", + ret); + + ret = dpp_fd_acl_all_delete(pf_info); + ZXDH_CHECK_RET_GOTO_ERR(ret, err_init, + "dpp_fd_acl_all_delete failed: %d\n", ret); + + if (vf_item->trusted) { + dpp_vport_uc_promisc_set(pf_info, eth_config->uc_promisc); + dpp_vport_mc_promisc_set(pf_info, eth_config->mc_promisc); + if (eth_config->uc_promisc) + dpp_vport_promisc_en_set(pf_info, 1); + } + + //SRIOV_CONFIG + ret = zxdh_vf_item_reload(pf_dev, pf_info, vf_item, vf_idx); + ZXDH_CHECK_RET_GOTO_ERR(ret, err_init, + "sriov_config_recover failed! %d\n", ret); + + ret = zxdh_vlan_trunk_recover(pf_info, eth_config->vlan_trunk_bitmap); + ZXDH_CHECK_RET_GOTO_ERR(ret, err_init, + "vlan_trunk_tbl_recover failed! %d\n", ret); + + ret = zxdh_vf_rate_clear(vf_idx, vf_item, pf_dev); + ZXDH_CHECK_RET_RETURN(ret, "zxdh_vf_rate_clear failed: %d\n", ret); + + ret = zxdh_vf_rate_limit_health_set(vf_idx, vf_item, pf_dev); + ZXDH_CHECK_RET_RETURN(ret, "zxdh_vf_rate_limit_health_set failed: %d\n", + ret); + + return 0; + +err_init: + dpp_vport_delete(pf_info); + return ret; +} + +static uint32_t zxdh_vf_port_uninit(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + int32_t ret = 0; + + dpp_vport_uc_promisc_set(pf_info, 0); + dpp_vport_mc_promisc_set(pf_info, 0); + + ret = dpp_fd_acl_all_delete(pf_info); + if (ret != 0) { + LOG_ERR("dpp_fd_acl_all_delete failed! %d\n", ret); + return ret; + } + /* 退出时, 都先清除vlan相关的配置*/ + ret = dpp_vqm_vfid_vlan_delete(pf_info); + if (ret != 0) { + LOG_ERR("dpp_vport_vlan_filter_en_set failed, ret: %d\n", ret); + return ret; + } + + ret = dpp_vlan_filter_init(pf_info); + if (ret != 0) { + LOG_ERR("dpp_vlan_filter_init failed: %d\n", ret); + return ret; + } + + ret = zxdh_vf_flush_mac(pf_info, vf_item); + if (ret != 0) { + LOG_ERR("zxdh_vf_flush_macf failed, ret: %d\n", ret); + return ret; + } + + ret = dpp_vport_unbond_pf(pf_info); + if (ret != 0) { + LOG_ERR("dpp_vport_unbond_pf failed, ret: %d\n", ret); + return ret; + } + + ret = dpp_vport_delete(pf_info); + if (ret != 0) { + LOG_ERR("dpp_vport_delete failed, ret: %d\n", ret); + return ret; + } + + ret = dpp_vport_unregister(pf_info); + if (ret != 0) { + LOG_ERR("dpp_vport_unregister failed, ret: %d\n", ret); + return ret; + } + + vf_item->is_probed = false; + return ret; +} + +void zxdh_vf_item_mac_add(struct zxdh_vf_item *vf_item, uint8_t *mac_addr, + uint8_t dhtool_mac_set_flag) +{ + uint8_t *addr = NULL; + uint8_t i = 0; + + if (is_unicast_ether_addr(mac_addr)) { + for (i = 0; i < DEV_UNICAST_MAX_NUM; ++i) { + if (ether_addr_equal(mac_addr, + vf_item->vf_mac_info.unicast_mac[i] + .mac_addr)) { + /* mac已经存在 */ + vf_item->vf_mac_info.unicast_mac[i] + .dhtool_mac_set_flag = + dhtool_mac_set_flag; + return; + } + } + for (i = 1; i < DEV_UNICAST_MAX_NUM; ++i) { /* 保留本机mac */ + addr = vf_item->vf_mac_info.unicast_mac[i].mac_addr; + if (is_zero_ether_addr(addr)) { /*查询没使用的数组*/ + /* 将此mac添加到zxdh_vf_item中 */ + memcpy(addr, mac_addr, ETH_ALEN); + vf_item->vf_mac_info.unicast_mac[i] + .dhtool_mac_set_flag = + dhtool_mac_set_flag; + vf_item->vf_mac_info.current_unicast_num++; + break; + } + } + } else { + for (i = 0; i < DEV_MULTICAST_MAX_NUM; ++i) { + if (ether_addr_equal(mac_addr, vf_item->vf_mac_info + .multicast_mac[i] + .mac_addr)) { + /* mac已经存在 */ + vf_item->vf_mac_info.multicast_mac[i] + .dhtool_mac_set_flag = + dhtool_mac_set_flag; + return; + } + } + for (i = 0; i < DEV_MULTICAST_MAX_NUM; ++i) { + addr = vf_item->vf_mac_info.multicast_mac[i].mac_addr; + if (is_zero_ether_addr(addr)) { + /* 将此mac添加到zxdh_vf_item中 */ + memcpy(addr, mac_addr, ETH_ALEN); + vf_item->vf_mac_info.multicast_mac[i] + .dhtool_mac_set_flag = + dhtool_mac_set_flag; + vf_item->vf_mac_info.current_multicast_num++; + break; + } + } + } + + return; +} +EXPORT_SYMBOL(zxdh_vf_item_mac_add); + +void zxdh_vf_item_mac_del(struct zxdh_vf_item *vf_item, uint8_t *mac_addr) +{ + uint8_t i = 0; + uint8_t *addr = NULL; + + if (is_unicast_ether_addr(mac_addr)) { + for (i = 1; i < DEV_UNICAST_MAX_NUM; ++i) { + /* 获取此mac地址 */ + addr = vf_item->vf_mac_info.unicast_mac[i].mac_addr; + + if (ether_addr_equal(addr, mac_addr)) { /* 查询到此mac */ + LOG_DEBUG("the mac is %pM\n", addr); + /* 在地址数组中将此mac清空*/ + memset(&vf_item->vf_mac_info.unicast_mac[i], 0, + ETH_ALEN); + vf_item->vf_mac_info.current_unicast_num--; + break; + } + } + } else { + for (i = 0; i < DEV_MULTICAST_MAX_NUM; ++i) { + addr = vf_item->vf_mac_info.multicast_mac[i].mac_addr; + + if (ether_addr_equal(addr, mac_addr)) { /* 查询到此mac */ + LOG_DEBUG("the mac is %pM\n", addr); + /* 在地址数组中将此mac清空 */ + memset(&vf_item->vf_mac_info.multicast_mac[i], + 0, ETH_ALEN); + vf_item->vf_mac_info.current_multicast_num--; + break; + } + } + } + + return; +} +EXPORT_SYMBOL(zxdh_vf_item_mac_del); + +static uint32_t zxdh_vf_mac_add(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + int32_t ret = 0; + uint16_t sriov_vlan_tpid = vf_item->vlan_proto; + uint16_t sriov_vlan_id = ZXDH_VLAN_TCI_GEN(vf_item->vlan, vf_item->qos); + + ether_addr_copy(vf_item->vf_mac_info.unicast_mac[0].mac_addr, + msg->mac_addr_set_msg.mac_addr); + ret = dpp_add_mac(pf_info, msg->mac_addr_set_msg.mac_addr, + sriov_vlan_tpid, sriov_vlan_id); + if (ret != 0) { + LOG_ERR("dpp_add_mac failed, ret: %d\n", ret); + } + + return ret; +} + +static uint32_t zxdh_vf_mac_del(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + int32_t ret = 0; + uint8_t i = 0; + uint8_t *addr = NULL; + uint16_t sriov_vlan_tpid = vf_item->vlan_proto; + uint16_t sriov_vlan_id = ZXDH_VLAN_TCI_GEN(vf_item->vlan, vf_item->qos); + + if (msg->mac_addr_set_msg.mac_flag) { + ret = dpp_del_mac(pf_info, msg->mac_addr_set_msg.mac_addr, + sriov_vlan_tpid, sriov_vlan_id); + if (ret != 0) { + LOG_ERR("dpp_del_mac failed, ret: %d\n", ret); + return ret; + } + } else if (msg->mac_addr_set_msg.mac_addr == + vf_item->vf_mac_info.unicast_mac[0].mac_addr) { + for (i = 1; i < DEV_UNICAST_MAX_NUM; ++i) { + addr = vf_item->vf_mac_info.unicast_mac[i].mac_addr; + if (is_zero_ether_addr(addr)) { + memcpy(addr, + vf_item->vf_mac_info.unicast_mac[0] + .mac_addr, + ETH_ALEN); + break; + } + } + } + + return ret; +} + +static uint32_t zxdh_vf_filter_mac_add(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + uint32_t err = 0; + uint16_t sriov_vlan_tpid = vf_item->vlan_proto; + uint16_t sriov_vlan_id = ZXDH_VLAN_TCI_GEN(vf_item->vlan, vf_item->qos); + + /* 将获取到的mac地址配置到NP中 */ + LOG_DEBUG("msg->mac_addr_set_msg.mac_addr is %pM\n", + msg->mac_addr_set_msg.mac_addr); + + err = dpp_add_mac(pf_info, msg->mac_addr_set_msg.mac_addr, + sriov_vlan_tpid, sriov_vlan_id); + if (err != 0) { + LOG_ERR("dpp_add_mac failed \n"); + return err; + } + + /* 将此mac地址添加到zxdh_vf_item */ + zxdh_vf_item_mac_add(vf_item, msg->mac_addr_set_msg.mac_addr, 0); + + return err; +} + +static uint32_t zxdh_vf_filter_mac_del(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + uint32_t err = 0; + uint16_t sriov_vlan_tpid = vf_item->vlan_proto; + uint16_t sriov_vlan_id = ZXDH_VLAN_TCI_GEN(vf_item->vlan, vf_item->qos); + + /* 将获取到的mac地址从NP中删除 */ + LOG_DEBUG("msg->mac_addr_set_msg.mac_addr is %pM\n", + msg->mac_addr_set_msg.mac_addr); + + err = dpp_del_mac(pf_info, msg->mac_addr_set_msg.mac_addr, + sriov_vlan_tpid, sriov_vlan_id); + if (err != 0) { + LOG_ERR("dpp_mac_del failed\n"); + return err; + } + + /* 将此mac从zxdh_vf_item中删除 */ + zxdh_vf_item_mac_del(vf_item, msg->mac_addr_set_msg.mac_addr); + + return err; +} + +static uint32_t zxdh_vf_multi_mac_add(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + uint32_t err = 0; + + /* 将此mac地址添加到np中 */ + LOG_DEBUG("msg->mac_addr_set_msg.mac_addr is %pM\n", + msg->mac_addr_set_msg.mac_addr); + + /* 判断是否超过本机组播mac数量 */ + if (vf_item->vf_mac_info.current_multicast_num >= + VF_MAX_MULTICAST_MAC) { + LOG_ERR("vf multicast mac num:%u beyond 32", + vf_item->vf_mac_info.current_multicast_num); + return ZXDH_REPS_BEYOND_MAC; + } + + err = dpp_multi_mac_add_member(pf_info, msg->mac_addr_set_msg.mac_addr); + if (err != 0) { + if (err == DPP_RC_TBL_IS_FULL) { + LOG_ERR("multicast mac is beyond whole transfer num\n"); + return ZXDH_REPS_BEYOND_MAC; + } + LOG_ERR("dpp_multi_mac_add_member failed %d\n", err); + return err; + } + + /* 将此mac添加到zxdh_vf_item中 */ + zxdh_vf_item_mac_add(vf_item, msg->mac_addr_set_msg.mac_addr, 0); + + return err; +} + +static uint32_t zxdh_vf_multi_mac_del(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + uint32_t err = 0; + + LOG_DEBUG("msg->mac_addr_set_msg.mac_addr is %pM\n", + msg->mac_addr_set_msg.mac_addr); + + /* 将此组播mac地址从np中删除 */ + err = dpp_multi_mac_del_member(pf_info, msg->mac_addr_set_msg.mac_addr); + if (err != 0) { + LOG_INFO("dpp_multi_mac_del_member failed %d\n", err); + return err; + } + + /* 将此mac从zxdh_vf_item中删除 */ + zxdh_vf_item_mac_del(vf_item, msg->mac_addr_set_msg.mac_addr); + + return err; +} + +static bool zxdh_check_item_mac_exists(DPP_PF_INFO_T *pf_info, + struct zxdh_pf_device *pf_dev, + uint16_t vf_idx, + struct zxdh_vf_item *vf_item, + const unsigned char *target_mac) +{ + uint32_t i = 0; + struct zxdh_vf_item *cur_vf_item = NULL; + uint16_t sriov_vlan_id = 0; + uint16_t sriov_vlan_tpid = 0; + struct dh_core_dev *dh_dev = + container_of((void *)(pf_dev), struct dh_core_dev, priv); + struct pci_dev *pdev = dh_dev->pdev; + int num_vfs = pci_num_vf(pdev); + sriov_vlan_id = ZXDH_VLAN_TCI_GEN(vf_item->vlan, vf_item->qos); + sriov_vlan_tpid = vf_item->vlan_proto; + + for (i = 0; i < num_vfs; i++) { + if (i == vf_idx) + continue; + + cur_vf_item = &pf_dev->vf_item[i]; + if (ether_addr_equal(cur_vf_item->mac, target_mac) && + ((ZXDH_VLAN_TCI_GEN(cur_vf_item->vlan, cur_vf_item->qos) == + sriov_vlan_id) && + (cur_vf_item->vlan_proto == sriov_vlan_tpid))) { + LOG_INFO("Mac already exists vf %d\n", i); + return true; + } + } + + return false; +} + +static uint32_t zxdh_vf_all_mac_dump(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + uint32_t err = 0; + uint16_t current_vport = 0; + uint16_t vport = vf_item->vport; + uint16_t sriov_vlan_id = ZXDH_VLAN_TCI_GEN(vf_item->vlan, vf_item->qos); + uint16_t sriov_vlan_tpid = vf_item->vlan_proto; + uint16_t vf_idx = msg->hdr.pcie_id & (0xff); + + /* 遍历整个转发域,判断此转发域是否存在此单播mac地址 */ + err = dpp_unicast_mac_search(pf_info, msg->mac_addr_set_msg.mac_addr, + sriov_vlan_tpid, sriov_vlan_id, + ¤t_vport); + if ((err == 0) && (vport == current_vport)) { + return 0; + } else if ((err == 0) && (vport != current_vport)) { + LOG_ERR("Mac already exists\n"); + return ZXDH_REPS_EXIST_MAC; + } else if ((err != 0) && (err != DPP_HASH_RC_SRH_FAIL)) { + LOG_ERR("dpp_unicast_mac_search failed, ret:%d\n", err); + return 1; + } + + if (zxdh_check_item_mac_exists(pf_info, pf_dev, vf_idx, vf_item, + msg->mac_addr_set_msg.mac_addr)) { + LOG_ERR("Mac already exists\n"); + return ZXDH_REPS_EXIST_MAC; + } + + return 0; +} + +static uint32_t zxdh_vf_all_mac_add(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + uint32_t err = 0; + MAC_VPORT_INFO *p_mac_arr = NULL; + uint32_t p_mac_num = 0; + uint16_t current_vport = 0; + uint16_t vport = vf_item->vport; + uint16_t sriov_vlan_id = 0; + uint16_t sriov_vlan_tpid = 0; + uint32_t max_unicast_num = 0; + uint16_t vf_idx = msg->hdr.pcie_id & (0xff); + + /* 配置组播mac*/ + if (!is_unicast_ether_addr(msg->mac_addr_set_msg.mac_addr) && + !is_link_local_ether_addr(msg->mac_addr_set_msg.mac_addr)) { + return zxdh_vf_multi_mac_add(msg, reps, pf_info, vf_item, + pf_dev); + } + + mutex_lock(&vf_item->lock); + sriov_vlan_id = ZXDH_VLAN_TCI_GEN(vf_item->vlan, vf_item->qos); + sriov_vlan_tpid = vf_item->vlan_proto; + + /* 遍历整个转发域,判断此转发域是否存在此单播mac地址 */ + err = dpp_unicast_mac_search(pf_info, msg->mac_addr_set_msg.mac_addr, + sriov_vlan_tpid, sriov_vlan_id, + ¤t_vport); + if ((err == 0) && (vport == current_vport)) { + mutex_unlock(&vf_item->lock); + return 0; + } else if ((err == 0) && (vport != current_vport)) { + LOG_ERR("Mac already exists\n"); + mutex_unlock(&vf_item->lock); + return ZXDH_REPS_EXIST_MAC; + } else if ((err != 0) && (err != DPP_HASH_RC_SRH_FAIL)) { + LOG_ERR("dpp_unicast_mac_search failed, ret:%d\n", err); + mutex_unlock(&vf_item->lock); + return 1; + } + + if (zxdh_check_item_mac_exists(pf_info, pf_dev, vf_idx, vf_item, + msg->mac_addr_set_msg.mac_addr)) { + LOG_ERR("Mac already exists\n"); + mutex_unlock(&vf_item->lock); + return ZXDH_REPS_EXIST_MAC; + } + + /* 配置本机mac */ + if (msg->mac_addr_set_msg.filter_flag == UNFILTER_MAC) { + err = zxdh_vf_mac_add(msg, reps, pf_info, vf_item, pf_dev); + if (err != 0) { + LOG_ERR("zxdh_vf_mac_add failed\n"); + } + mutex_unlock(&vf_item->lock); + return err; + } + + /* 判断是否超过本机单播mac数量 */ + if (vf_item->vf_mac_info.current_unicast_num >= VF_MAX_UNICAST_MAC) { + LOG_ERR("vf unicast mac num:%u beyond 128", + vf_item->vf_mac_info.current_unicast_num); + mutex_unlock(&vf_item->lock); + return ZXDH_REPS_BEYOND_MAC; + } + + /* dump整个转发域已经配置的单播mac数量 */ + err = dpp_unicast_mac_dump(pf_info, p_mac_arr, &p_mac_num); + if (err != 0) { + LOG_ERR("dpp_unicast_mac_dump failed, ret:%d\n", err); + mutex_unlock(&vf_item->lock); + return err; + } + LOG_INFO("p_mac_num is %d\n", p_mac_num); + + /* 获取当前pf级最大单播mac数量 */ + err = dpp_unicast_mac_max_get(pf_info, &max_unicast_num); + if (err != 0) { + LOG_ERR("dpp_unicast_mac_max_get failed %u\n", max_unicast_num); + mutex_unlock(&vf_item->lock); + return err; + } + /* 判断整个转发域配置的单播mac数量是否超过上限 */ + if (p_mac_num >= max_unicast_num) { + LOG_ERR("curr_all_unicast_num is beyond maximum\n"); + mutex_unlock(&vf_item->lock); + return ZXDH_REPS_BEYOND_MAC; + } + + /* 配置过滤用的单播mac */ + err = zxdh_vf_filter_mac_add(msg, reps, pf_info, vf_item, pf_dev); + if (err != 0) { + LOG_ERR("dpp_unicast_mac_dump failed\n"); + } + mutex_unlock(&vf_item->lock); + return err; +} +static uint32_t zxdh_vf_all_mac_del(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + uint32_t err = 0; + + /* 删除组播mac */ + if (!is_unicast_ether_addr(msg->mac_addr_set_msg.mac_addr) && + !is_link_local_ether_addr(msg->mac_addr_set_msg.mac_addr)) { + return zxdh_vf_multi_mac_del(msg, reps, pf_info, vf_item, + pf_dev); + } + + mutex_lock(&vf_item->lock); + /* 删除本机mac */ + if (msg->mac_addr_set_msg.filter_flag == UNFILTER_MAC) { + err = zxdh_vf_mac_del(msg, reps, pf_info, vf_item, pf_dev); + if (err != 0) { + LOG_ERR("zxdh_vf_mac_del failed\n"); + } + mutex_unlock(&vf_item->lock); + return err; + } + + /* 删除过滤用的单播mac */ + err = zxdh_vf_filter_mac_del(msg, reps, pf_info, vf_item, pf_dev); + if (err != 0) { + LOG_ERR("zxdh_vf_filter_mac_del failed\n"); + } + + mutex_unlock(&vf_item->lock); + return err; +} + +static uint32_t zxdh_vf_ipv6_mac_add(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + uint32_t err = 0; + + mutex_lock(&pf_dev->ip6mac_tbl->mlock); + + err = zxdh_vf_multi_mac_add(msg, reps, pf_info, vf_item, pf_dev); + if (err != 0) { + LOG_ERR("zxdh_vf_multi_mac_add failed\n"); + } + + mutex_unlock(&pf_dev->ip6mac_tbl->mlock); + + return err; +} + +static uint32_t zxdh_vf_ipv6_mac_del(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + uint32_t err = 0; + + mutex_lock(&pf_dev->ip6mac_tbl->mlock); + + err = zxdh_vf_multi_mac_del(msg, reps, pf_info, vf_item, pf_dev); + if (err != 0) { + LOG_ERR("zxdh_vf_multi_mac_del failed\n"); + } + + mutex_unlock(&pf_dev->ip6mac_tbl->mlock); + + return err; +} + +static uint32_t zxdh_vf_lacp_mac_add(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + uint32_t err = 0; + + err = zxdh_vf_multi_mac_add(msg, reps, pf_info, vf_item, pf_dev); + if (err != 0) { + LOG_ERR("zxdh_vf_multi_mac_add failed\n"); + } + + return err; +} + +static uint32_t zxdh_vf_lacp_mac_del(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + uint32_t err = 0; + + err = zxdh_vf_multi_mac_del(msg, reps, pf_info, vf_item, pf_dev); + if (err != 0) { + LOG_ERR("zxdh_vf_multi_mac_del failed\n"); + } + + return err; +} + +static uint32_t zxdh_vf_mac_get(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + ether_addr_copy(reps->vf_mac_addr_get_msg.mac_addr, vf_item->mac); + return 0; +} + +static uint32_t zxdh_vf_rss_state_set(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + return dpp_vport_rss_en_set(pf_info, msg->rss_enable_msg.rss_enable); +} + +static uint32_t zxdh_vf_fd_state_set(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + return dpp_vport_fd_en_set(pf_info, msg->vf_fd_enable_msg.fd_enable); +} + +static uint32_t zxdh_vf_rxfh_set(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + return dpp_rxfh_set(pf_info, msg->rxfh_set_msg.queue_map, + ZXDH_INDIR_RQT_SIZE); +} + +static uint32_t zxdh_vf_rxfh_get(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + uint32_t err = 0; + + err = dpp_rxfh_get(pf_info, reps->rxfh_get_msg.queue_map, + ZXDH_INDIR_RQT_SIZE); + if (err != 0) { + LOG_ERR("dpp_rxfh_get failed: %d\n", err); + return err; + } + + return 0; +} + +static uint32_t zxdh_vf_rxfh_del(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + return dpp_rxfh_del(pf_info); +} + +static uint32_t zxdh_vf_thash_key_set(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + return dpp_thash_key_set(pf_info, msg->thash_key_set_msg.key_map, + ZXDH_NET_HASH_KEY_SIZE); +} + +static uint32_t zxdh_vf_thash_key_get(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + return dpp_thash_key_get(pf_info, reps->thash_key_set_msg.key_map, + ZXDH_NET_HASH_KEY_SIZE); +} + +static uint32_t zxdh_vf_hash_funcs_set(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + return dpp_vport_hash_funcs_set(pf_info, msg->hfunc_set_msg.func); +} + +static uint32_t zxdh_vf_rx_flow_hash_set(zxdh_msg_info *msg, + zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + return dpp_vport_rx_flow_hash_set(pf_info, + msg->rx_flow_hash_set_msg.hash_mode); +} + +/** + * zxdh_vf_switch_business_vlan - 配置bussness vlan子开关 + * @pf_info: pf信息 + * @type: vlan二级开关 + * @wanted_feature: 切换的值 + */ +int zxdh_vf_switch_business_vlan(DPP_PF_INFO_T *pf_info, uint8_t type, + uint32_t wanted_feature) +{ + int ret = 0; + ZXDH_VQM_VFID_VLAN_T vf_vlan_attr = { 0 }; + bool old_vport_bit = 0; + bool wanted_vport_bit = 0; + uint32_t *changed_vlan_attr = NULL; + /* sizeof(vf_vlan_attr)/sizeof(vf_vlan_attr.rsv): 结构体成员数量*/ + if (type >= sizeof(vf_vlan_attr) / sizeof(vf_vlan_attr.rsv)) { + LOG_ERR("zxdh_vf_switch_business_vlan para type err: %u.\n", + type); + return -1; + } + changed_vlan_attr = (uint32_t *)&vf_vlan_attr + type; + ret = dpp_vqm_vfid_vlan_get(pf_info, &vf_vlan_attr); + if (ret != 0) { + LOG_ERR("dpp_vqm_vfid_vlan_get failed: %d.\n", ret); + return -1; + } + old_vport_bit = vf_vlan_attr.sriov_business_qinq_vlan_strip_offload | + vf_vlan_attr.sriov_business_vlan_filter | + vf_vlan_attr.sriov_business_vlan_strip_offload; + *changed_vlan_attr = wanted_feature; + wanted_vport_bit = vf_vlan_attr.sriov_business_qinq_vlan_strip_offload | + vf_vlan_attr.sriov_business_vlan_filter | + vf_vlan_attr.sriov_business_vlan_strip_offload; + + /* 先将二级开关切换*/ + ret = dpp_vqm_vfid_vlan_set(pf_info, type, wanted_feature); + if (ret != 0) { + LOG_ERR("dpp_vqm_vfid_vlan_set, ret: %d\n", ret); + return -1; + } + + /* 如果vport没有发生变化*/ + if (!(old_vport_bit ^ wanted_vport_bit)) { + return 0; + } + + ret = dpp_vport_business_vlan_offload_en_set(pf_info, wanted_vport_bit); + if (ret != 0) { + LOG_ERR("dpp_vport_business_vlan_offload_en_set, ret: %d\n", + ret); + return -1; + } + + return 0; +} + +static uint32_t zxdh_vf_vlan_strip_set(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + if (msg->vlan_strip_msg.flag == VLAN_STRIP_MSG_TYPE) { + return zxdh_vf_switch_business_vlan( + pf_info, VLAN_SRIOV_BUSINESS_VLAN_STRIP_OFFLIAD, + msg->vlan_strip_msg.enable); + } else { + return zxdh_vf_switch_business_vlan( + pf_info, VLAN_SRIOV_BUSINESS_QINQ_VLAN_STRIP_OFFLOAD, + msg->vlan_strip_msg.enable); + } +} + +static uint32_t zxdh_vf_vxlan_offload_add(zxdh_msg_info *msg, + zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + uint32_t mcode_glb_cfg = 0; + uint32_t ret = 0; + + ret = dpp_glb_cfg_get_0(pf_info, &mcode_glb_cfg); + if (ret != 0) { + LOG_ERR("dpp_pktrx_mcode_glb_cfg_get_0 failed: %d\n", ret); + return -1; + } + + mcode_glb_cfg = (mcode_glb_cfg & 0xFFFF0000) | msg->vf_vxlan_port.port; + ret = dpp_glb_cfg_set_0(pf_info, mcode_glb_cfg); + if (ret != 0) { + LOG_ERR("dpp_pktrx_mcode_glb_cfg_set_0 failed: %d\n", ret); + return -1; + } + + return 0; +} + +static uint32_t zxdh_vf_vxlan_offload_del(zxdh_msg_info *msg, + zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + uint32_t mcode_glb_cfg = 0; + uint16_t vxlan_port_cfg = 0; + uint32_t ret = 0; + + ret = dpp_glb_cfg_get_0(pf_info, &mcode_glb_cfg); + if (ret != 0) { + LOG_ERR("dpp_pktrx_mcode_glb_cfg_get_0 failed: %d\n", ret); + return -1; + } + + vxlan_port_cfg = mcode_glb_cfg & 0x0000FFFF; + if (vxlan_port_cfg != msg->vf_vxlan_port.port) { + LOG_ERR("del vxlan offload failed,port[%d] no equals to del_port[%d]\n", + vxlan_port_cfg, msg->vf_vxlan_port.port); + return -1; + } + + mcode_glb_cfg = mcode_glb_cfg & 0xFFFF0000; + ret = dpp_glb_cfg_set_0(pf_info, mcode_glb_cfg); + if (ret != 0) { + LOG_ERR("dpp_pktrx_mcode_glb_cfg_set_0 failed: %d\n", ret); + return -1; + } + + return 0; +} + +static uint32_t zxdh_vf_qinq_tpid_cfg(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + return dpp_vqm_vfid_vlan_set(pf_info, VLAN_SRIOV_BUSINESS_VLAN_TPID, + msg->tpid_cfg_msg.tpid); +} + +static uint32_t zxdh_vf_rx_flow_hash_get(zxdh_msg_info *msg, + zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + return dpp_vport_rx_flow_hash_get( + pf_info, &reps->rx_flow_hash_set_msg.hash_mode); +} + +static uint32_t zxdh_vf_port_attrs_set(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + uint32_t err = 0; + if (msg->port_attr_set_msg.mode == SRIOV_VPORT_TCP_UDP_CHKSUM) { + err = dpp_vport_attr_set(pf_info, SRIOV_VPORT_IP_CHKSUM, + msg->port_attr_set_msg.value); + if (err != 0) { + LOG_ERR("dpp_vport_ip_checksum_set failed: %u\n", err); + return err; + } + } + + return dpp_vport_attr_set(pf_info, msg->port_attr_set_msg.mode, + msg->port_attr_set_msg.value); +} + +static uint32_t zxdh_vf_port_attrs_get(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + return dpp_vport_attr_get(pf_info, + &reps->port_attr_get_msg.port_attr_entry); +} + +static uint32_t zxdh_vf_promisc_set(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + uint32_t err = 0; + + if (!vf_item->trusted) { + LOG_ERR("vf untrusted!\n"); + return 0; + } + + if (msg->promisc_set_msg.mode == ZXDH_PROMISC_MODE) { + LOG_INFO("PROMISC_EN_SET: %d", msg->promisc_set_msg.value); + err = dpp_vport_uc_promisc_set(pf_info, + msg->promisc_set_msg.value); + if (err != 0) { + LOG_ERR("dpp_vport_uc_promisc_set failed: %d\n", err); + return err; + } + err = dpp_vport_promisc_en_set(pf_info, + msg->promisc_set_msg.value); + if (err != 0) { + LOG_ERR("dpp_vport_promisc_en_set failed: %d\n", err); + return err; + } + if (msg->promisc_set_msg.mc_follow != 0) { + LOG_DEBUG("allmulti_follow\n"); + err = dpp_vport_mc_promisc_set( + pf_info, msg->promisc_set_msg.value); + if (err != 0) { + LOG_ERR("dpp_vport_mc_promisc_set failed: %d\n", + err); + return err; + } + } + vf_item->promisc = msg->promisc_set_msg.value; + } else if (msg->promisc_set_msg.mode == ZXDH_ALLMULTI_MODE) { + LOG_INFO("ALLMULTI_EN_SET: %d", msg->promisc_set_msg.value); + err = dpp_vport_mc_promisc_set(pf_info, + msg->promisc_set_msg.value); + if (err != 0) { + LOG_ERR("dpp_vport_mc_promisc_set failed: %d\n", err); + return err; + } + vf_item->mc_promisc = msg->promisc_set_msg.value; + } else { + LOG_ERR("promisc_set_msg.mode[%d] error\n", + msg->promisc_set_msg.mode); + return 1; + } + + return err; +} + +static uint32_t zxdh_vf_vlan_filter_set(zxdh_msg_info *msg, + zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + bool vf_vlan_filter_enable = msg->vlan_filter_set_msg.enable; + + return zxdh_vf_switch_business_vlan(pf_info, + VLAN_SRIOV_BUSINESS_VLAN_FILTER, + vf_vlan_filter_enable); +} + +static uint32_t zxdh_vf_rx_vid_add(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + uint16_t vid = msg->rx_vid_add_msg.vlan_id; + + return dpp_add_vlan_filter(pf_info, vid); +} + +static uint32_t zxdh_vf_rx_vid_del(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + uint16_t vid = msg->rx_vid_del_msg.vlan_id; + + return dpp_del_vlan_filter(pf_info, vid); +} + +static uint32_t zxdh_vf_np_stats_get(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + uint32_t vf_id = msg->hdr.vf_id; + LOG_DEBUG( + "zxdh_vf_np_stats_get is called, vport: 0x%x, vf_id %d, pf_info.slot %u\n", + pf_info->vport, vf_id, pf_info->slot); + dpp_stat_port_uc_packet_rx_cnt_get( + pf_info, vf_id, msg->np_stats_get_msg.clear_mode, + &(reps->np_stats_msg.np_rx_vport_unicast_bytes), + &(reps->np_stats_msg.np_rx_vport_unicast_packets)); + dpp_stat_port_uc_packet_tx_cnt_get( + pf_info, vf_id, msg->np_stats_get_msg.clear_mode, + &(reps->np_stats_msg.np_tx_vport_unicast_bytes), + &(reps->np_stats_msg.np_tx_vport_unicast_packets)); + dpp_stat_port_mc_packet_rx_cnt_get( + pf_info, vf_id, msg->np_stats_get_msg.clear_mode, + &(reps->np_stats_msg.np_rx_vport_multicast_bytes), + &(reps->np_stats_msg.np_rx_vport_multicast_packets)); + dpp_stat_port_mc_packet_tx_cnt_get( + pf_info, vf_id, msg->np_stats_get_msg.clear_mode, + &(reps->np_stats_msg.np_tx_vport_multicast_bytes), + &(reps->np_stats_msg.np_tx_vport_multicast_packets)); + dpp_stat_port_bc_packet_rx_cnt_get( + pf_info, vf_id, msg->np_stats_get_msg.clear_mode, + &(reps->np_stats_msg.np_rx_vport_broadcast_bytes), + &(reps->np_stats_msg.np_rx_vport_broadcast_packets)); + dpp_stat_port_bc_packet_tx_cnt_get( + pf_info, vf_id, msg->np_stats_get_msg.clear_mode, + &(reps->np_stats_msg.np_tx_vport_broadcast_bytes), + &(reps->np_stats_msg.np_tx_vport_broadcast_packets)); + dpp_stat_MTU_packet_msg_rx_cnt_get( + pf_info, vf_id, msg->np_stats_get_msg.clear_mode, + &(reps->np_stats_msg.np_rx_vport_mtu_drop_bytes), + &(reps->np_stats_msg.np_rx_vport_mtu_drop_packets)); + dpp_stat_MTU_packet_msg_tx_cnt_get( + pf_info, vf_id, msg->np_stats_get_msg.clear_mode, + &(reps->np_stats_msg.np_tx_vport_mtu_drop_bytes), + &(reps->np_stats_msg.np_tx_vport_mtu_drop_packets)); + dpp_stat_plcr_packet_drop_rx_cnt_get( + pf_info, vf_id, msg->np_stats_get_msg.clear_mode, + &(reps->np_stats_msg.np_rx_vport_plcr_drop_bytes), + &(reps->np_stats_msg.np_rx_vport_plcr_drop_packets)); + dpp_stat_plcr_packet_drop_tx_cnt_get( + pf_info, vf_id, msg->np_stats_get_msg.clear_mode, + &(reps->np_stats_msg.np_tx_vport_plcr_drop_bytes), + &(reps->np_stats_msg.np_tx_vport_plcr_drop_packets)); + reps->np_stats_msg.np_tx_vport_ssvpc_packets = 0; + reps->np_stats_msg.rx_vport_idma_drop_packets = + 0; //这里VF来查询统计,对VF不存在的统计需要清零,否则可能是随机数。 + if (msg->np_stats_get_msg.is_init_get) { + memcpy(vf_item->init_np_stats, &reps->np_stats_msg, + sizeof(struct zxdh_en_vport_np_stats)); + } + + return 0; +} + +static uint32_t zxdh_vf_rate_limit_set(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + int32_t rtn; + uint16_t vport = msg->hdr.vport; + uint32_t flowid = msg->rate_limit_set_msg.flowid; + uint32_t car_type = msg->rate_limit_set_msg.car_type; + uint32_t max_rate = msg->rate_limit_set_msg.max_rate; + uint32_t min_rate = msg->rate_limit_set_msg.min_rate; + uint32_t is_packet = msg->rate_limit_set_msg.is_packet; + + PLCR_FUNC_DBG_ENTER(); + + rtn = zxdh_plcr_set_rate_limit(pf_dev, is_packet, car_type, vport, + flowid, max_rate, min_rate); + reps->rate_limit_set_rsp.err_code = rtn; + + if (PLCR_REMOVE_RATE_LIMIT == rtn || PLCR_DUPLICATE_RATE == rtn) { + return 0; + } else { + return rtn; + } +} + +static uint32_t zxdh_vf_plcr_uninit(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + uint16_t vport; + unsigned long flow_id; + E_PLCR_CAR_TYPE car_index; + struct xarray *xarray_flow; + struct zxdh_plcr_flow *flow = NULL; + + PLCR_FUNC_DBG_ENTER(); + + vport = msg->hdr.vport; + + //deal with car A's flowid + for (car_index = E_PLCR_CAR_A; car_index <= E_PLCR_CAR_B; car_index++) { + xarray_flow = &(pf_dev->plcr_table.plcr_flows[car_index]); + xa_for_each_range(xarray_flow, flow_id, flow, 0, + gaudPlcrCarxFlowIdNum[car_index]) { + if (flow->vport == vport) { + zxdh_plcr_remove_rate_limit(pf_dev, car_index, + (uint32_t)flow_id, + 0); + + //clear vport mappings between car B and car C. + if (E_PLCR_CAR_B == car_index) { + zxdh_plcr_clear_map(pf_dev, car_index, + flow_id); + } + } + } + } + + zxdh_plcr_count_profiles(pf_dev); + + return 0; +} + +static uint32_t zxdh_vf_plcr_flowid_map(zxdh_msg_info *msg, + zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + int32_t rtn = 0; + uint32_t car_type = 0; + uint32_t flowid = 0; + uint32_t map_flowid = 0; + uint32_t map_sp = 0; + + /*提取消息中的参数字段*/ + car_type = msg->plcr_flowid_map_msg.car_type; + flowid = msg->plcr_flowid_map_msg.flowid; + map_flowid = msg->plcr_flowid_map_msg.map_flowid; + map_sp = msg->plcr_flowid_map_msg.sp; + + //前面的流程会判断是否需要进行映射,不会出现vf端口原来是非0group,现在会被group 0覆盖的情况 + PLCR_LOG_INFO( + "dpp_car_queue_map_set: pf_info->vport = 0x%x, car_type = %d, flowid = %d, map_flowid = %d\n", + pf_info->vport, car_type, flowid, map_flowid); + rtn = dpp_car_queue_map_set(pf_info, car_type, flowid, map_flowid, + map_sp); + PLCR_COMM_ASSERT(rtn); + + zxdh_plcr_stroe_map(pf_dev, car_type, flowid, map_flowid); + + return 0; +} + +static uint32_t zxdh_vf_plcr_get_mode(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + int32_t rtn = 0; + uint16_t vport = 0; + E_RATE_LIMIT_MODE mode = 0; + + /*提取消息中的参数字段*/ + vport = msg->plcr_work_mode_msg.vport; + + /*获取plcr vport的工作模式*/ + rtn = zxdh_pf_plcr_get_mode(pf_dev, vport, &mode); + PLCR_COMM_ASSERT(rtn); + + reps->plcr_work_mode_rsp.mode = mode; + + return rtn; +} + +static uint32_t zxdh_vf_plcr_set_mode(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + int32_t rtn = 0; + uint16_t vport = 0; + E_RATE_LIMIT_MODE mode = 0; + + /*提取消息中的参数字段*/ + vport = msg->plcr_work_mode_msg.vport; + mode = msg->plcr_work_mode_msg.mode; + + /*设置plcr vport的工作模式*/ + rtn = zxdh_pf_plcr_set_mode(pf_dev, vport, mode); + PLCR_COMM_ASSERT(rtn); + + return rtn; +} + +static uint32_t zxdh_vf_plcr_flow_init(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + int rtn = 0; + uint32_t car_type; + uint32_t flowid; + + car_type = msg->plcr_flow_init_msg.car_type; + flowid = msg->plcr_flow_init_msg.flowid; + pf_info->slot = pf_dev->slot_id; + pf_info->vport = pf_dev->vport; + + //对于vf队列限速,carC flow的初始化时,group_id为0,不需检查目标group中num_vfs是否为0,直接进行初始化 + + PLCR_LOG_INFO( + "dpp_car_queue_cfg_set: vport = 0x%x, car_type = %d, flowid = %d, plcr_en = 0\n", + pf_dev->vport, car_type, flowid); + rtn = dpp_car_queue_cfg_set(pf_info, (uint32_t)car_type, flowid, + DROP_DISABLE, PLCR_DISABLE, 0); + if (rtn) { + PLCR_LOG_ERR("failed to call dpp_car_queue_cfg_set()\n"); + } + + return rtn; +} + +static uint32_t zxdh_vf_plcr_profile_id_add(zxdh_msg_info *msg, + zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + int32_t rtn = 0; + uint32_t car_type = 0; + uint16_t profile_id = 0; + + LOG_INFO("%s-%d:enter\n", __FUNCTION__, __LINE__); + /*1. 提取消息中的模板参数*/ + car_type = msg->vf_plcr_profile_id_add_msg.car_type; + + /*2. 申请新的profile*/ + rtn = zxdh_plcr_req_profile(pf_dev, car_type, &profile_id); + if (rtn) { + LOG_ERR("%s-%d : failed !\n", __FUNCTION__, __LINE__); + return rtn; + } + + /*3. 返回消息*/ + reps->vf_plcr_profile_id_add_rsp.profile_id = profile_id; + + return 0; +} + +static uint32_t zxdh_vf_plcr_profile_id_delete(zxdh_msg_info *msg, + zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + int32_t rtn = 0; + uint32_t car_type = 0; + uint16_t profile_id = 0; + + LOG_INFO("%s-%d:enter\n", __FUNCTION__, __LINE__); + /*1. 提取消息中的模板参数*/ + car_type = msg->vf_plcr_profile_id_delete_msg.car_type; + profile_id = msg->vf_plcr_profile_id_delete_msg.profile_id; + + /*2. 限速模板使用计数-1:在下面的解除绑定zxdh_vf_plcr_queue_cfg_set()负责对使用计数-1*/ + + /*3. 释放限速模板资源*/ + rtn = zxdh_plcr_release_profile(pf_dev, car_type, profile_id, 0); + if (rtn) { + LOG_ERR("%s-%d : failed !\n", __FUNCTION__, __LINE__); + return rtn; + } + + /*4. 没有返回消息*/ + + return rtn; +} + +static uint32_t zxdh_vf_plcr_profile_cfg_set(zxdh_msg_info *msg, + zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + int32_t rtn = 0; + uint32_t car_type = 0; + uint32_t pkt_mode = 0; + uint16_t profile_id = 0; + uint32_t max_rate = 0; + uint32_t min_rate = 0; + struct xarray *xarray_profile = NULL; + struct zxdh_plcr_profile *plcr_profile = NULL; + union zxdh_plcr_profile_cfg profile_cfg; + + LOG_INFO("%s-%d:enter\n", __FUNCTION__, __LINE__); + /*1. 提取消息中的模板参数*/ + car_type = msg->vf_plcr_profile_cfg_set_msg.car_type; + pkt_mode = msg->vf_plcr_profile_cfg_set_msg.pkt_mode; + profile_id = msg->vf_plcr_profile_cfg_set_msg.profile_id; + + /*2. 校验外面的profile id和里面的profile id必须一致:限速模板联合体下包限速和字节限速结构体的前面2个成员是一样的(profile_id和pkt_sign)*/ + if (profile_id != msg->vf_plcr_profile_cfg_set_msg.profile_cfg + .byte_profile_cfg.profile_id) { + LOG_ERR("%s-%d : failed\n", __FUNCTION__, __LINE__); + return -EINVAL; + } + + /*3. 校验外面的pkt_mode和里面的pkt_sign必须一致:限速模板联合体下包限速和字节限速结构体的前面2个成员是一样的(profile_id和pkt_sign)*/ + if (pkt_mode != msg->vf_plcr_profile_cfg_set_msg.profile_cfg + .byte_profile_cfg.pkt_sign) { + LOG_ERR("%s-%d : failed\n", __FUNCTION__, __LINE__); + return -EINVAL; + } + + /*2. 获取profile*/ + xarray_profile = &(pf_dev->plcr_table.plcr_profiles[car_type]); + plcr_profile = xa_load(xarray_profile, profile_id); + + /*3. 包限速处理*/ + if (1 == pkt_mode) { + /*3.1 获取用户传递的字节限速模板参数*/ + profile_cfg.pkt_profile_cfg = + msg->vf_plcr_profile_cfg_set_msg.profile_cfg + .pkt_profile_cfg; + + /*3.2 包模式下直接使用限速模板里的包限速值*/ + max_rate = profile_cfg.pkt_profile_cfg.cir; + min_rate = profile_cfg.pkt_profile_cfg.cir; + + /*3.2 将限速模板的参数,配置到寄存器中去:包限速模板和字节限速模板前面2个成员profile_id和pkt_sign一样,保证了下面接口可以兼容包和字节限速配置*/ + rtn = zxdh_plcr_cfg_profile(pf_dev, car_type, + &profile_cfg.byte_profile_cfg); + if (rtn) { + LOG_ERR("%s-%d : failed\n", __FUNCTION__, __LINE__); + return -EINVAL; + } + } + /*4. 字节限速处理*/ + else { + /*4.1 获取用户传递的字节限速模板参数*/ + profile_cfg.byte_profile_cfg = + msg->vf_plcr_profile_cfg_set_msg.profile_cfg + .byte_profile_cfg; + + /*4.2 将寄存器中的配置值,转换成用户限速值(单位:Mbit/s)*/ + max_rate = zxdh_plcr_reg_maxrate_user( + profile_cfg.byte_profile_cfg.eir); + min_rate = zxdh_plcr_reg_maxrate_user( + profile_cfg.byte_profile_cfg.cir); + + /*4.3 将限速模板的参数,配置到寄存器中去*/ + rtn = zxdh_plcr_cfg_profile(pf_dev, car_type, + &profile_cfg.byte_profile_cfg); + if (rtn) { + LOG_ERR("%s-%d : failed\n", __FUNCTION__, __LINE__); + return -EINVAL; + } + } + + /*5. 将限速模板参数保存到profile下面*/ + rtn = zxdh_plcr_store_profile(pf_dev, car_type, max_rate, min_rate, + &profile_cfg.byte_profile_cfg); + if (rtn) { + LOG_ERR("%s-%d : failed\n", __FUNCTION__, __LINE__); + return -EINVAL; + } + + /*6. 没有返回消息*/ + + return 0; +} + +static uint32_t zxdh_vf_plcr_profile_cfg_get(zxdh_msg_info *msg, + zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + int32_t rtn = 0; + uint32_t car_type = 0; + uint32_t pkt_mode = 0; + uint16_t profile_id = 0; + + LOG_INFO("%s-%d:enter\n", __FUNCTION__, __LINE__); + /*1. 提取消息中的模板参数*/ + car_type = msg->vf_plcr_profile_cfg_get_msg.car_type; + pkt_mode = msg->vf_plcr_profile_cfg_get_msg.pkt_mode; + profile_id = msg->vf_plcr_profile_cfg_get_msg.profile_id; + + /*2. 从寄存器中,获取限速模板的参数*/ + rtn = zxdh_plcr_get_profile( + pf_dev, car_type, pkt_mode, profile_id, + &reps->vf_plcr_profile_cfg_get_rsp.profile_cfg.byte_profile_cfg); + if (rtn) { + LOG_ERR("%s-%d : failed to call zxdh_plcr_cfg_profile()\n", + __FUNCTION__, __LINE__); + return rtn; + } + + /*3. 返回消息:上面已经填充好返回消息*/ + + return 0; +} + +static uint32_t zxdh_vf_plcr_queue_cfg_set(zxdh_msg_info *msg, + zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + int32_t rtn = 0; + uint32_t car_type = 0; + uint32_t drop_flag = 0; + uint32_t plcr_en = 0; + uint32_t flow_id = 0; + uint32_t profile_id = 0; + uint16_t vport = msg->hdr.vport; + struct xarray *xarray_profile = NULL; + struct zxdh_plcr_flow *plcr_flow = NULL; + struct zxdh_plcr_profile *plcr_profile = NULL; + + LOG_INFO("%s-%d:enter\n", __FUNCTION__, __LINE__); + LOG_INFO("%s-%d:vport = 0x%x\n", __FUNCTION__, __LINE__, vport); + + /*1. 提取消息中的模板参数*/ + car_type = msg->vf_plcr_queue_cfg_set_msg.car_type; + drop_flag = msg->vf_plcr_queue_cfg_set_msg.drop_flag; + plcr_en = msg->vf_plcr_queue_cfg_set_msg.plcr_en; + flow_id = msg->vf_plcr_queue_cfg_set_msg.flow_id; + profile_id = msg->vf_plcr_queue_cfg_set_msg.profile_id; + + /*2. 获取xarray中存储的profile*/ + xarray_profile = &(pf_dev->plcr_table.plcr_profiles[car_type]); + plcr_profile = xa_load(xarray_profile, profile_id); + if (NULL == plcr_profile) { + LOG_ERR("%s-%d : failed\n", __FUNCTION__, __LINE__); + return -EINVAL; + } + + /*3. 绑定flow和profile*/ + if (PLCR_ENABLE == plcr_en) { + /*3.1 申请flow结构体,并存储到xarray*/ + rtn = zxdh_plcr_req_flow(pf_dev, car_type, flow_id, &plcr_flow); + if (rtn) { + LOG_ERR("%s-%d : kzalloc failed\n", __FUNCTION__, + __LINE__); + return -EINVAL; + } + + /*3.2 更新flow信息*/ + zxdh_plcr_update_flow(plcr_flow, vport, plcr_profile->max_rate, + plcr_profile->min_rate); + + /*3.3 更新flow信息*/ + plcr_flow->profile_id = profile_id; + + /*3.4 将flow与profile进行绑定*/ + rtn = dpp_car_queue_cfg_set(pf_info, car_type, flow_id, + drop_flag, plcr_en, profile_id); + if (rtn) { + LOG_ERR("%s-%d : failed to call dpp_car_queue_cfg_set()\n", + __FUNCTION__, __LINE__); + + /*释放先前申请的flow*/ + zxdh_plcr_release_flow(pf_dev, car_type, flow_id); + return -EINVAL; + } + + /*3.4 模板使用计数+1*/ + zxdh_plcr_count_up_profile(pf_dev, car_type, profile_id); + } + /*4. 解除绑定flow和profile*/ + else { + /*4.1 解除绑定flow与profile*/ + rtn = dpp_car_queue_cfg_set(pf_info, car_type, flow_id, + drop_flag, plcr_en, profile_id); + if (rtn) { + LOG_ERR("%s-%d : failed to call dpp_car_queue_cfg_set()\n", + __FUNCTION__, __LINE__); + return -EINVAL; + } + + /*4.2 释放flow*/ + zxdh_plcr_release_flow(pf_dev, car_type, flow_id); + + /*4.3 模板使用计数-1*/ + zxdh_plcr_count_down_profile(pf_dev, car_type, profile_id); + } + + /*5. 没有返回消息*/ + + return 0; +} + +static uint32_t zxdh_vf_plcr_port_meter_stat_clr(zxdh_msg_info *msg, + zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + int32_t rtn = 0; + uint64_t pkB_cnt = 0; + uint64_t pk_cnt = 0; + + LOG_INFO("%s-%d:enter\n", __FUNCTION__, __LINE__); + /*1. dpp接口函数原型如下:mode = 1,表示读清零*/ + dpp_stat_plcr_packet_drop_tx_cnt_get(pf_info, msg->hdr.vf_id, 1, + &pkB_cnt, &pk_cnt); + dpp_stat_plcr_packet_drop_rx_cnt_get(pf_info, msg->hdr.vf_id, 1, + &pkB_cnt, &pk_cnt); + + /*2. 没有返回消息*/ + return rtn; +} + +static uint32_t zxdh_vf_plcr_port_meter_stat_get(zxdh_msg_info *msg, + zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + int32_t rtn = 0; + uint32_t direction = 0; + uint32_t is_clr = 0; + uint64_t *p_pkB_cnt = NULL; + uint64_t *p_pk_cnt = NULL; + + LOG_INFO("%s-%d:enter\n", __FUNCTION__, __LINE__); + /*1. 提取消息中的参数*/ + direction = msg->vf_plcr_port_meter_stat_get_msg.direction; + is_clr = msg->vf_plcr_port_meter_stat_get_msg.is_clr; + + /*2. 获取返回信息地址*/ + p_pkB_cnt = &(reps->vf_plcr_port_meter_stat_get_rsp.drop_pkB_cnt); + p_pk_cnt = &(reps->vf_plcr_port_meter_stat_get_rsp.drop_pk_cnt); + + /*2. 获取丢包统计*/ + if (1 == direction) { + dpp_stat_plcr_packet_drop_tx_cnt_get( + pf_info, msg->hdr.vf_id, is_clr, p_pkB_cnt, p_pk_cnt); + } else { + dpp_stat_plcr_packet_drop_rx_cnt_get( + pf_info, msg->hdr.vf_id, is_clr, p_pkB_cnt, p_pk_cnt); + } + + /*3. 返回丢包统计值*/ + + return rtn; +} + +static uint32_t zxdh_vf_call_np_1588(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + uint32_t vfid = msg->vf_1588_call_np.vfid; + uint32_t interface_num = msg->vf_1588_call_np.call_np_interface_num; + uint32_t opt = msg->vf_1588_call_np.ptp_tc_enable_opt; + + switch (interface_num) { + case PTP_PORT_VFID_SET: { + LOG_INFO("call dpp_ptp_port_vfid_set\n"); + dpp_ptp_port_vfid_set(pf_info, vfid); + break; + } + case PTP_TC_ENABLE_SET: { + LOG_INFO("call dpp_ptp_tc_enable_set\n"); + dpp_ptp_tc_enable_set(pf_info, opt); + break; + } + default: { + LOG_ERR("cannot found the interface_num %u\n", interface_num); + return -1; + } + } + + return 0; +} + +static uint32_t zxdh_vf_slot_id_get(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + reps->slot_info.slot_id = pf_dev->slot_id; + return 0; +} + +static uint32_t zxdh_vf_mcode_feature_get(zxdh_msg_info *msg, + zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + reps->mcode_feature_rsp.len = sizeof(reps->mcode_feature_rsp.feature); + reps->mcode_feature_rsp.feature = pf_dev->mcode_feature; + return 0; +} + +static uint32_t zxdh_vf_k_cmpat_get(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + reps->kernel_cmpat_rsp.k_msg_idmax = ZXDH_MSG_TYPE_CNT_MAX; + return 0; +} + +static uint32_t zxdh_vf_1588_enable_proc(zxdh_msg_info *msg, + zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + uint32_t proc_cmd = 0; + uint32_t enable = 0; + uint32_t ret = 0; + ZXDH_SRIOV_VPORT_T port_attr_entry = { 0 }; + + proc_cmd = msg->vf_1588_enable.proc_cmd; + switch (proc_cmd) { + case ZXDH_VF_1588_ENABLE_SET: { + enable = msg->vf_1588_enable.enable_1588_vf; + pf_info->vport = msg->hdr.vport; + ret = dpp_vport_attr_set(pf_info, SRIOV_VPORT_1588_EN, + (uint32_t)enable); + if (ret != 0) { + LOG_ERR("dpp_vport_attr_set SRIOV_VPORT_1588_EN failed, ret:%d\n", + ret); + return ret; + } + break; + } + case ZXDH_VF_1588_ENABLE_GET: { + pf_info->vport = msg->hdr.vport; + ret = dpp_vport_attr_get(pf_info, &port_attr_entry); + if (ret != 0) { + LOG_ERR("dpp_vport_attr_get SRIOV_VPORT_1588_EN failed, ret:%d\n", + ret); + return ret; + } + reps->vf_1588_enable_rsp.enable_1588_vf_rsp = + port_attr_entry.flag_1588_enable; + break; + } + default: { + LOG_ERR("cannot found proc_cmd %u\n", proc_cmd); + break; + } + } + return 0; +} + +static uint32_t zxdh_vf_flow_hw_add(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + int32_t err = 0; + uint32_t handle = 0; + uint8_t *key = NULL; + uint8_t *key_mask = NULL; + uint8_t *result = NULL; + int32_t vf_idx = 0; + uint8_t eth_type_bit = 0; + uint16_t sriov_tunnel_encap0_index = 0; + uint16_t sriov_tunnel_encap1_index = 0; + zxdh_flow_op_msg *f_msg = &msg->flow_msg; + zxdh_flow_op_rsp *f_rsp = &reps->flow_rsp; + + vf_idx = msg->hdr.pcie_id & (0xff); + // 申请handle + err = dpp_fd_acl_index_request(pf_info, &handle); + if (err) { + LOG_ERR("failed to request index!!!\n"); + /* Started by AICoder, pid:u97cfh8123k1f9c14867087ab092ec03e7f1b105 */ + zte_strncpy_s(f_rsp->error.reason, "failed to request index!!!", + sizeof(f_rsp->error.reason) - 1); + f_rsp->error.reason[sizeof(f_rsp->error.reason) - 1] = '\0'; + /* Ended by AICoder, pid:u97cfh8123k1f9c14867087ab092ec03e7f1b105 */ + return -EINVAL; + } + + // 配置规则 + key = (uint8_t *)&f_msg->dh_flow.flowentry.fd_flow.key; + key_mask = (uint8_t *)&f_msg->dh_flow.flowentry.fd_flow.key_mask; + if ((f_msg->dh_flow.flowentry.fd_flow.result.action_idx & + (1 << FD_ACTION_COUNT_BIT)) != 0) { + f_msg->dh_flow.flowentry.fd_flow.result.countid = handle; + } + result = (uint8_t *)&f_msg->dh_flow.flowentry.fd_flow.result; + + if ((f_msg->dh_flow.flowentry.fd_flow.result.action_idx & + (1 << FD_ACTION_VXLAN_ENCAP)) != 0) { + f_msg->dh_flow.flowentry.fd_flow.result + .sriov_tunnel_encap0_index = handle; + sriov_tunnel_encap0_index = handle; + + if (vf_idx < (ZXDH_MAX_VF - 1)) { + f_msg->dh_flow.flowentry.fd_flow.result + .sriov_tunnel_encap1_index = + f_msg->dh_flow.hash_search_idx * + PF_HAS_MAX_ENCAP1_NUM + + vf_idx + 1; + } else { + LOG_ERR("encap1 vf_index is too big:%d\n", vf_idx); + zte_strncpy_s(f_rsp->error.reason, + "encap1 vf_index is too big!!!", + sizeof(f_rsp->error.reason) - 1); + f_rsp->error.reason[sizeof(f_rsp->error.reason) - 1] = + '\0'; + return -EINVAL; + } + + sriov_tunnel_encap1_index = + f_msg->dh_flow.flowentry.fd_flow.result + .sriov_tunnel_encap1_index; + eth_type_bit = f_msg->encap0.eth_type; + err = dpp_eram_entry_insert(pf_info, + ZXDH_SDT_TUNNEL_ENCAP0_TABLE, + sriov_tunnel_encap0_index * 2, + (uint8_t *)&(f_msg->encap0)); + if (err) { + LOG_ERR("dpp_eram_entry_insert encap0 table failed\n"); + zte_strncpy_s( + f_rsp->error.reason, + "dpp_eram_entry_insert encap0 table failed", + sizeof(f_rsp->error.reason) - 1); + f_rsp->error.reason[sizeof(f_rsp->error.reason) - 1] = + '\0'; + return -EINVAL; + } + + err = dpp_eram_entry_insert(pf_info, + ZXDH_SDT_TUNNEL_ENCAP0_TABLE, + sriov_tunnel_encap0_index * 2 + 1, + (uint8_t *)&(f_msg->encap0.dip)); + if (err) { + LOG_ERR("dpp_eram_entry_insert encap0 dip table failed\n"); + zte_strncpy_s( + f_rsp->error.reason, + "dpp_eram_entry_insert encap0 dip table failed", + sizeof(f_rsp->error.reason) - 1); + f_rsp->error.reason[sizeof(f_rsp->error.reason) - 1] = + '\0'; + return -EINVAL; + } + + if (eth_type_bit == 0) { + err = dpp_eram_entry_insert( + pf_info, ZXDH_SDT_TUNNEL_ENCAP1_TABLE, + sriov_tunnel_encap1_index * 4, + (uint8_t *)&(f_msg->encap1)); + if (err) { + LOG_ERR("dpp_eram_entry_insert ipv4 encap1 table failed\n"); + zte_strncpy_s( + f_rsp->error.reason, + "dpp_eram_entry_insert ipv4 encap1 table failed", + sizeof(f_rsp->error.reason) - 1); + f_rsp->error.reason[sizeof(f_rsp->error.reason) - + 1] = '\0'; + return -EINVAL; + } + + err = dpp_eram_entry_insert( + pf_info, ZXDH_SDT_TUNNEL_ENCAP1_TABLE, + sriov_tunnel_encap1_index * 4 + 2, + (uint8_t *)&(f_msg->encap1.sip)); + if (err) { + LOG_ERR("dpp_eram_entry_insert ipv4 encap1 sip table failed\n"); + zte_strncpy_s( + f_rsp->error.reason, + "dpp_eram_entry_insert ipv4 encap1 sip table failed", + sizeof(f_rsp->error.reason) - 1); + f_rsp->error.reason[sizeof(f_rsp->error.reason) - + 1] = '\0'; + return -EINVAL; + } + } else { + err = dpp_eram_entry_insert( + pf_info, ZXDH_SDT_TUNNEL_ENCAP1_TABLE, + sriov_tunnel_encap1_index * 4 + 1, + (uint8_t *)&(f_msg->encap1)); + if (err) { + LOG_ERR("dpp_eram_entry_insert ipv6 encap1 table failed\n"); + zte_strncpy_s( + f_rsp->error.reason, + "dpp_eram_entry_insert ipv6 encap1 table failed", + sizeof(f_rsp->error.reason) - 1); + f_rsp->error.reason[sizeof(f_rsp->error.reason) - + 1] = '\0'; + return -EINVAL; + } + + err = dpp_eram_entry_insert( + pf_info, ZXDH_SDT_TUNNEL_ENCAP1_TABLE, + sriov_tunnel_encap1_index * 4 + 3, + (uint8_t *)&(f_msg->encap1.sip)); + if (err) { + LOG_ERR("dpp_eram_entry_insert ipv6 encap1 sip table failed\n"); + zte_strncpy_s( + f_rsp->error.reason, + "dpp_eram_entry_insert ipv6 encap1 sip table failed", + sizeof(f_rsp->error.reason) - 1); + f_rsp->error.reason[sizeof(f_rsp->error.reason) - + 1] = '\0'; + return -EINVAL; + } + } + } + + err = dpp_fd_acl_entry_add(pf_info, handle, key, key_mask, result); + if (err) { + LOG_ERR("failed to call dpp_fd_acl_entry_add()\n"); + zte_strncpy_s(f_rsp->error.reason, + "failed to call dpp_fd_acl_entry_add()", + sizeof(f_rsp->error.reason) - 1); + f_rsp->error.reason[sizeof(f_rsp->error.reason) - 1] = '\0'; + return -EINVAL; + } + + // 清空统计空间 + err = dpp_stat_fd_stat_cnt_get(pf_info, handle, RD_CLR_MODE_CLR, + &f_rsp->count.bytes, &f_rsp->count.hits); + if (err) { + LOG_ERR("failed to clear fd cnt!!!\n"); + zte_strncpy_s(f_rsp->error.reason, "failed to clear fd cnt!!!", + sizeof(f_rsp->error.reason) - 1); + f_rsp->error.reason[sizeof(f_rsp->error.reason) - 1] = '\0'; + return -EINVAL; + } + + f_rsp->dh_flow.flowentry.hw_idx = handle; + + return 0; +} + +static uint32_t zxdh_vf_flow_hw_del(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + int32_t err = 0; + uint32_t handle = 0; + zxdh_flow_op_msg *f_msg = &msg->flow_msg; + zxdh_flow_op_rsp *f_rsp = &reps->flow_rsp; + + handle = f_msg->dh_flow.flowentry.hw_idx; + + // 删表 + err = dpp_fd_acl_entry_del(pf_info, handle); + if (err) { + LOG_ERR("failed to call dpp_fd_acl_entry_del()\n"); + zte_strncpy_s(f_rsp->error.reason, + "failed to call dpp_fd_acl_entry_del()", + sizeof(f_rsp->error.reason) - 1); + f_rsp->error.reason[sizeof(f_rsp->error.reason) - 1] = '\0'; + return -EINVAL; + } + + // 清空统计空间 + err = dpp_stat_fd_stat_cnt_get(pf_info, handle, RD_CLR_MODE_CLR, + &f_rsp->count.bytes, &f_rsp->count.hits); + if (err) { + LOG_ERR("failed to clear fd cnt!!!\n"); + zte_strncpy_s(f_rsp->error.reason, "failed to clear fd cnt!!!", + sizeof(f_rsp->error.reason) - 1); + f_rsp->error.reason[sizeof(f_rsp->error.reason) - 1] = '\0'; + return -EINVAL; + } + + // 释放handle + err = dpp_fd_acl_index_release(pf_info, handle); + if (err) { + LOG_ERR("failed to release index!!!\n"); + zte_strncpy_s(f_rsp->error.reason, "failed to release index!!!", + sizeof(f_rsp->error.reason) - 1); + f_rsp->error.reason[sizeof(f_rsp->error.reason) - 1] = '\0'; + return -EINVAL; + } + + f_rsp->dh_flow.flowentry.fd_flow.result = + f_msg->dh_flow.flowentry.fd_flow.result; + + return 0; +} + +static uint32_t zxdh_vf_flow_hw_get(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + int32_t err = 0; + uint32_t handle = 0; + uint8_t *key = NULL; + uint8_t *key_mask = NULL; + uint8_t *result = NULL; + zxdh_flow_op_msg *f_msg = &msg->flow_msg; + zxdh_flow_op_rsp *f_rsp = &reps->flow_rsp; + + handle = f_msg->dh_flow.flowentry.hw_idx; + + // 查询 + key = (uint8_t *)&f_rsp->dh_flow.flowentry.fd_flow.key; + key_mask = (uint8_t *)&f_rsp->dh_flow.flowentry.fd_flow.key_mask; + result = (uint8_t *)&f_rsp->dh_flow.flowentry.fd_flow.result; + + err = dpp_fd_acl_entry_get(pf_info, handle, key, key_mask, result); + if (err) { + LOG_ERR("failed to get fd rule!!!\n"); + zte_strncpy_s(f_rsp->error.reason, "failed to get fd rule!!!", + sizeof(f_rsp->error.reason) - 1); + f_rsp->error.reason[sizeof(f_rsp->error.reason) - 1] = '\0'; + return -EINVAL; + } + + // 统计 + f_rsp->count.bytes = 0; + f_rsp->count.hits = 0; + err = dpp_stat_fd_stat_cnt_get(pf_info, handle, RD_CLR_MODE_UNCLR, + &f_rsp->count.bytes, &f_rsp->count.hits); + if (err) { + LOG_ERR("failed to get fd cnt!!!\n"); + zte_strncpy_s(f_rsp->error.reason, "failed to get fd cnt!!!", + sizeof(f_rsp->error.reason) - 1); + f_rsp->error.reason[sizeof(f_rsp->error.reason) - 1] = '\0'; + return -EINVAL; + } + + return 0; +} + +static uint32_t zxdh_vf_flow_hw_flush(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + int32_t err = 0; + zxdh_flow_op_rsp *f_rsp = &reps->flow_rsp; + + // 删除所有vport表项 + err = dpp_fd_acl_all_delete(pf_info); + if (err) { + LOG_ERR("failed to detele all fd!!!\n"); + zte_strncpy_s(f_rsp->error.reason, "failed to detele all fd!!!", + sizeof(f_rsp->error.reason) - 1); + f_rsp->error.reason[sizeof(f_rsp->error.reason) - 1] = '\0'; + return -EINVAL; + } + + // 清除所有vport统计 + err = dpp_fd_acl_stat_clear(pf_info); + if (err) { + LOG_ERR("failed to clear fd stat!!!\n"); + zte_strncpy_s(f_rsp->error.reason, + "failed to gclear fd stat!!!", + sizeof(f_rsp->error.reason) - 1); + f_rsp->error.reason[sizeof(f_rsp->error.reason) - 1] = '\0'; + return -EINVAL; + } + + return 0; +} + +static int32_t zxdh_flow_table_vf_action_add(struct ethtool_rx_flow_spec *fs, + ZXDH_FD_CFG_T *p_fd_cfg, + DPP_PF_INFO_T *pf_info) +{ + uint8_t vf_id = 0; + uint32_t queue_id = 0; + uint32_t base_qid = 0; + int32_t ret = 0; + + if (fs->ring_cookie == RX_CLS_FLOW_DISC) { + p_fd_cfg->as_rlt.action_index |= ACTION_TYPE_DROP; + return 0; + } + + vf_id = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); + queue_id = ethtool_get_flow_spec_ring(fs->ring_cookie); + + if (vf_id > 0) { + LOG_ERR("vf fd action do not support specific vf"); + return -EINVAL; + } + + if (queue_id == QUEUE_RSS) { + p_fd_cfg->as_rlt.action_index |= ACTION_TYPE_RSS; + return 0; + } + + ret = dpp_vport_base_qid_get(pf_info, &base_qid); + if (ret) { + LOG_ERR("zxdh_cfg_fd_add: get vf base qid failed"); + return ret; + } + + p_fd_cfg->as_rlt.action_index |= ACTION_TYPE_QUEUE; + p_fd_cfg->as_rlt.v_qid = queue_id * 2 + base_qid; + LOG_INFO("zxdh_cfg_vf_fd_add, phy queue id is %u", + p_fd_cfg->as_rlt.v_qid); + return 0; +} + +void zxdh_flow_table_add(struct ethtool_rx_flow_spec *fs, + ZXDH_FD_CFG_T *p_fd_cfg, DPP_PF_INFO_T *pf_info) +{ + /* 设置默认掩码为不关心 */ + zte_memset_s(&p_fd_cfg->mask, 0xff, sizeof(ZXDH_FD_CFG_MASK)); + + p_fd_cfg->key.vqm_vfid = VQM_VFID(pf_info->vport); + p_fd_cfg->mask.vqm_vfid = ETHTOOL_TRUE_MASK; + + /* 根据不同流类型,设置掩码和键值 */ + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case ETHER_FLOW: + if (!is_zero_ether_addr(fs->m_u.ether_spec.h_dest)) { + zte_memcpy_s(p_fd_cfg->key.dmac, + fs->h_u.ether_spec.h_dest, ETH_ALEN); + zte_memset_s(p_fd_cfg->mask.dmac, ETHTOOL_TRUE_MASK, + ETH_ALEN); + LOG_INFO("dmac is %pM\n", p_fd_cfg->key.dmac); + } + if (!is_zero_ether_addr(fs->m_u.ether_spec.h_source)) { + zte_memcpy_s(p_fd_cfg->key.smac, + fs->h_u.ether_spec.h_source, ETH_ALEN); + zte_memset_s(p_fd_cfg->mask.smac, ETHTOOL_TRUE_MASK, + ETH_ALEN); + LOG_INFO("smac is %pM\n", p_fd_cfg->key.smac); + } + if (fs->m_u.ether_spec.h_proto) { + p_fd_cfg->key.ethtype = + ntohs(fs->h_u.ether_spec.h_proto); + p_fd_cfg->mask.ethtype = ETHTOOL_TRUE_MASK; + LOG_INFO("ethertype is 0x%x\n", p_fd_cfg->key.ethtype); + } + break; + case IPV4_USER_FLOW: + if (fs->m_u.usr_ip4_spec.ip4src) { + zte_memcpy_s((uint8_t *)p_fd_cfg->key.sip + 12, + &fs->h_u.usr_ip4_spec.ip4src, + ETHTOOL_IP4_LEN); + zte_memset_s((uint8_t *)p_fd_cfg->mask.sip + 12, + ETHTOOL_TRUE_MASK, ETHTOOL_IP4_LEN); + LOG_INFO("sip: %d.%d.%d.%d\n", p_fd_cfg->key.sip[12], + p_fd_cfg->key.sip[13], p_fd_cfg->key.sip[14], + p_fd_cfg->key.sip[15]); + } + if (fs->m_u.usr_ip4_spec.ip4dst) { + zte_memcpy_s((uint8_t *)p_fd_cfg->key.dip + 12, + &fs->h_u.usr_ip4_spec.ip4dst, + ETHTOOL_IP4_LEN); + zte_memset_s((uint8_t *)p_fd_cfg->mask.dip + 12, + ETHTOOL_TRUE_MASK, ETHTOOL_IP4_LEN); + LOG_INFO("dip: %d.%d.%d.%d\n", p_fd_cfg->key.dip[12], + p_fd_cfg->key.dip[13], p_fd_cfg->key.dip[14], + p_fd_cfg->key.dip[15]); + } + if (fs->m_u.usr_ip4_spec.proto) { + p_fd_cfg->key.proto = fs->h_u.usr_ip4_spec.proto; + p_fd_cfg->mask.proto = ETHTOOL_TRUE_MASK; + LOG_INFO("proto: %d\n", p_fd_cfg->key.proto); + } + p_fd_cfg->key.ethtype = ETH_PKT_IPV4; + p_fd_cfg->mask.ethtype = ETHTOOL_TRUE_MASK; + LOG_INFO("ethertype is 0x%x\n", p_fd_cfg->key.ethtype); + break; + case TCP_V4_FLOW: + if (fs->m_u.tcp_ip4_spec.ip4src) { + zte_memcpy_s((uint8_t *)p_fd_cfg->key.sip + 12, + &fs->h_u.tcp_ip4_spec.ip4src, + ETHTOOL_IP4_LEN); + zte_memset_s((uint8_t *)p_fd_cfg->mask.sip + 12, + ETHTOOL_TRUE_MASK, ETHTOOL_IP4_LEN); + LOG_INFO("sip: %d.%d.%d.%d\n", p_fd_cfg->key.sip[12], + p_fd_cfg->key.sip[13], p_fd_cfg->key.sip[14], + p_fd_cfg->key.sip[15]); + } + if (fs->m_u.tcp_ip4_spec.ip4dst) { + zte_memcpy_s((uint8_t *)p_fd_cfg->key.dip + 12, + &fs->h_u.tcp_ip4_spec.ip4dst, + ETHTOOL_IP4_LEN); + zte_memset_s((uint8_t *)p_fd_cfg->mask.dip + 12, + ETHTOOL_TRUE_MASK, ETHTOOL_IP4_LEN); + LOG_INFO("dip: %d.%d.%d.%d\n", p_fd_cfg->key.dip[12], + p_fd_cfg->key.dip[13], p_fd_cfg->key.dip[14], + p_fd_cfg->key.dip[15]); + } + if (fs->m_u.tcp_ip4_spec.psrc) { + p_fd_cfg->key.sport = ntohs(fs->h_u.tcp_ip4_spec.psrc); + p_fd_cfg->mask.sport = ETHTOOL_TRUE_MASK; + LOG_INFO("sport is %d\n", p_fd_cfg->key.sport); + } + if (fs->m_u.tcp_ip4_spec.pdst) { + p_fd_cfg->key.dport = ntohs(fs->h_u.tcp_ip4_spec.pdst); + p_fd_cfg->mask.dport = ETHTOOL_TRUE_MASK; + LOG_INFO("dport is %d\n", p_fd_cfg->key.dport); + } + p_fd_cfg->key.ethtype = ETH_PKT_IPV4; + p_fd_cfg->mask.ethtype = ETHTOOL_TRUE_MASK; + LOG_INFO("ethertype is 0x%x\n", p_fd_cfg->key.ethtype); + p_fd_cfg->key.proto = IPPROTO_TCP; + p_fd_cfg->mask.proto = ETHTOOL_TRUE_MASK; + LOG_INFO("proto is %d\n", p_fd_cfg->key.proto); + break; + case UDP_V4_FLOW: + if (fs->m_u.udp_ip4_spec.ip4src) { + zte_memcpy_s((uint8_t *)p_fd_cfg->key.sip + 12, + &fs->h_u.udp_ip4_spec.ip4src, + ETHTOOL_IP4_LEN); + zte_memset_s((uint8_t *)p_fd_cfg->mask.sip + 12, + ETHTOOL_TRUE_MASK, ETHTOOL_IP4_LEN); + LOG_INFO("sip: %d.%d.%d.%d\n", p_fd_cfg->key.sip[12], + p_fd_cfg->key.sip[13], p_fd_cfg->key.sip[14], + p_fd_cfg->key.sip[15]); + } + if (fs->m_u.udp_ip4_spec.ip4dst) { + zte_memcpy_s((uint8_t *)p_fd_cfg->key.dip + 12, + &fs->h_u.udp_ip4_spec.ip4dst, + ETHTOOL_IP4_LEN); + zte_memset_s((uint8_t *)p_fd_cfg->mask.dip + 12, + ETHTOOL_TRUE_MASK, ETHTOOL_IP4_LEN); + LOG_INFO("dip: %d.%d.%d.%d\n", p_fd_cfg->key.dip[12], + p_fd_cfg->key.dip[13], p_fd_cfg->key.dip[14], + p_fd_cfg->key.dip[15]); + } + if (fs->m_u.udp_ip4_spec.psrc) { + p_fd_cfg->key.sport = ntohs(fs->h_u.udp_ip4_spec.psrc); + p_fd_cfg->mask.sport = ETHTOOL_TRUE_MASK; + LOG_INFO("sport is %d\n", p_fd_cfg->key.sport); + } + if (fs->m_u.udp_ip4_spec.pdst) { + p_fd_cfg->key.dport = ntohs(fs->h_u.udp_ip4_spec.pdst); + p_fd_cfg->mask.dport = ETHTOOL_TRUE_MASK; + LOG_INFO("dport is %d\n", p_fd_cfg->key.dport); + } + p_fd_cfg->key.ethtype = ETH_PKT_IPV4; + p_fd_cfg->mask.ethtype = ETHTOOL_TRUE_MASK; + LOG_INFO("ethertype is 0x%x\n", p_fd_cfg->key.ethtype); + p_fd_cfg->key.proto = IPPROTO_UDP; + p_fd_cfg->mask.proto = ETHTOOL_TRUE_MASK; + LOG_INFO("proto is %d\n", p_fd_cfg->key.proto); + break; + case IPV6_USER_FLOW: + if (!ipv6_addr_any( + (struct in6_addr *)fs->m_u.usr_ip6_spec.ip6src)) { + zte_memcpy_s(p_fd_cfg->key.sip, + &fs->h_u.usr_ip6_spec.ip6src, + ETHTOOL_IP6_LEN); + zte_memset_s(p_fd_cfg->mask.sip, ETHTOOL_TRUE_MASK, + ETHTOOL_IP6_LEN); + LOG_INFO( + "SIP: %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n", + p_fd_cfg->key.sip[0], p_fd_cfg->key.sip[1], + p_fd_cfg->key.sip[2], p_fd_cfg->key.sip[3], + p_fd_cfg->key.sip[4], p_fd_cfg->key.sip[5], + p_fd_cfg->key.sip[6], p_fd_cfg->key.sip[7], + p_fd_cfg->key.sip[8], p_fd_cfg->key.sip[9], + p_fd_cfg->key.sip[10], p_fd_cfg->key.sip[11], + p_fd_cfg->key.sip[12], p_fd_cfg->key.sip[13], + p_fd_cfg->key.sip[14], p_fd_cfg->key.sip[15]); + } + if (!ipv6_addr_any( + (struct in6_addr *)fs->m_u.usr_ip6_spec.ip6dst)) { + zte_memcpy_s(p_fd_cfg->key.dip, + &fs->h_u.usr_ip6_spec.ip6dst, + ETHTOOL_IP6_LEN); + zte_memset_s(p_fd_cfg->mask.dip, ETHTOOL_TRUE_MASK, + ETHTOOL_IP6_LEN); + LOG_INFO( + "DIP: %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n", + p_fd_cfg->key.dip[0], p_fd_cfg->key.dip[1], + p_fd_cfg->key.dip[2], p_fd_cfg->key.dip[3], + p_fd_cfg->key.dip[4], p_fd_cfg->key.dip[5], + p_fd_cfg->key.dip[6], p_fd_cfg->key.dip[7], + p_fd_cfg->key.dip[8], p_fd_cfg->key.dip[9], + p_fd_cfg->key.dip[10], p_fd_cfg->key.dip[11], + p_fd_cfg->key.dip[12], p_fd_cfg->key.dip[13], + p_fd_cfg->key.dip[14], p_fd_cfg->key.dip[15]); + } + if (fs->m_u.usr_ip6_spec.l4_proto) { + p_fd_cfg->key.proto = fs->h_u.usr_ip6_spec.l4_proto; + p_fd_cfg->mask.proto = ETHTOOL_TRUE_MASK; + LOG_INFO("proto: %d\n", p_fd_cfg->key.proto); + } + p_fd_cfg->key.ethtype = ETH_PKT_IPV6; + p_fd_cfg->mask.ethtype = ETHTOOL_TRUE_MASK; + LOG_INFO("ethertype is 0x%x\n", p_fd_cfg->key.ethtype); + break; + case TCP_V6_FLOW: + if (!ipv6_addr_any( + (struct in6_addr *)fs->m_u.tcp_ip6_spec.ip6src)) { + zte_memcpy_s(p_fd_cfg->key.sip, + &fs->h_u.tcp_ip6_spec.ip6src, + ETHTOOL_IP6_LEN); + zte_memset_s(p_fd_cfg->mask.sip, ETHTOOL_TRUE_MASK, + ETHTOOL_IP6_LEN); + LOG_INFO( + "SIP: %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n", + p_fd_cfg->key.sip[0], p_fd_cfg->key.sip[1], + p_fd_cfg->key.sip[2], p_fd_cfg->key.sip[3], + p_fd_cfg->key.sip[4], p_fd_cfg->key.sip[5], + p_fd_cfg->key.sip[6], p_fd_cfg->key.sip[7], + p_fd_cfg->key.sip[8], p_fd_cfg->key.sip[9], + p_fd_cfg->key.sip[10], p_fd_cfg->key.sip[11], + p_fd_cfg->key.sip[12], p_fd_cfg->key.sip[13], + p_fd_cfg->key.sip[14], p_fd_cfg->key.sip[15]); + } + if (!ipv6_addr_any( + (struct in6_addr *)fs->m_u.tcp_ip6_spec.ip6dst)) { + zte_memcpy_s(p_fd_cfg->key.dip, + &fs->h_u.tcp_ip6_spec.ip6dst, + ETHTOOL_IP6_LEN); + zte_memset_s(p_fd_cfg->mask.dip, ETHTOOL_TRUE_MASK, + ETHTOOL_IP6_LEN); + LOG_INFO( + "DIP: %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n", + p_fd_cfg->key.dip[0], p_fd_cfg->key.dip[1], + p_fd_cfg->key.dip[2], p_fd_cfg->key.dip[3], + p_fd_cfg->key.dip[4], p_fd_cfg->key.dip[5], + p_fd_cfg->key.dip[6], p_fd_cfg->key.dip[7], + p_fd_cfg->key.dip[8], p_fd_cfg->key.dip[9], + p_fd_cfg->key.dip[10], p_fd_cfg->key.dip[11], + p_fd_cfg->key.dip[12], p_fd_cfg->key.dip[13], + p_fd_cfg->key.dip[14], p_fd_cfg->key.dip[15]); + } + if (fs->m_u.tcp_ip6_spec.psrc) { + p_fd_cfg->key.sport = ntohs(fs->h_u.tcp_ip6_spec.psrc); + p_fd_cfg->mask.sport = ETHTOOL_TRUE_MASK; + LOG_INFO("sport is %d\n", p_fd_cfg->key.sport); + } + if (fs->m_u.tcp_ip6_spec.pdst) { + p_fd_cfg->key.dport = ntohs(fs->h_u.tcp_ip6_spec.pdst); + p_fd_cfg->mask.dport = ETHTOOL_TRUE_MASK; + LOG_INFO("dport is %d\n", p_fd_cfg->key.dport); + } + p_fd_cfg->key.ethtype = ETH_PKT_IPV6; + p_fd_cfg->mask.ethtype = ETHTOOL_TRUE_MASK; + LOG_INFO("ethertype is 0x%x\n", p_fd_cfg->key.ethtype); + p_fd_cfg->key.proto = IPPROTO_TCP; + p_fd_cfg->mask.proto = ETHTOOL_TRUE_MASK; + LOG_INFO("proto is %d\n", p_fd_cfg->key.proto); + break; + case UDP_V6_FLOW: + if (!ipv6_addr_any( + (struct in6_addr *)fs->m_u.udp_ip6_spec.ip6src)) { + zte_memcpy_s(p_fd_cfg->key.sip, + &fs->h_u.udp_ip6_spec.ip6src, + ETHTOOL_IP6_LEN); + zte_memset_s(p_fd_cfg->mask.sip, ETHTOOL_TRUE_MASK, + ETHTOOL_IP6_LEN); + LOG_INFO( + "SIP: %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n", + p_fd_cfg->key.sip[0], p_fd_cfg->key.sip[1], + p_fd_cfg->key.sip[2], p_fd_cfg->key.sip[3], + p_fd_cfg->key.sip[4], p_fd_cfg->key.sip[5], + p_fd_cfg->key.sip[6], p_fd_cfg->key.sip[7], + p_fd_cfg->key.sip[8], p_fd_cfg->key.sip[9], + p_fd_cfg->key.sip[10], p_fd_cfg->key.sip[11], + p_fd_cfg->key.sip[12], p_fd_cfg->key.sip[13], + p_fd_cfg->key.sip[14], p_fd_cfg->key.sip[15]); + } + if (!ipv6_addr_any( + (struct in6_addr *)fs->m_u.udp_ip6_spec.ip6dst)) { + zte_memcpy_s(p_fd_cfg->key.dip, + &fs->h_u.udp_ip6_spec.ip6dst, + ETHTOOL_IP6_LEN); + zte_memset_s(p_fd_cfg->mask.dip, ETHTOOL_TRUE_MASK, + ETHTOOL_IP6_LEN); + LOG_INFO( + "DIP: %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n", + p_fd_cfg->key.dip[0], p_fd_cfg->key.dip[1], + p_fd_cfg->key.dip[2], p_fd_cfg->key.dip[3], + p_fd_cfg->key.dip[4], p_fd_cfg->key.dip[5], + p_fd_cfg->key.dip[6], p_fd_cfg->key.dip[7], + p_fd_cfg->key.dip[8], p_fd_cfg->key.dip[9], + p_fd_cfg->key.dip[10], p_fd_cfg->key.dip[11], + p_fd_cfg->key.dip[12], p_fd_cfg->key.dip[13], + p_fd_cfg->key.dip[14], p_fd_cfg->key.dip[15]); + } + if (fs->m_u.udp_ip6_spec.psrc) { + p_fd_cfg->key.sport = ntohs(fs->h_u.udp_ip6_spec.psrc); + p_fd_cfg->mask.sport = ETHTOOL_TRUE_MASK; + LOG_INFO("sport is %d\n", p_fd_cfg->key.dport); + } + if (fs->m_u.udp_ip6_spec.pdst) { + p_fd_cfg->key.dport = ntohs(fs->h_u.udp_ip6_spec.pdst); + p_fd_cfg->mask.dport = ETHTOOL_TRUE_MASK; + LOG_INFO("dport is %d\n", p_fd_cfg->key.dport); + } + p_fd_cfg->key.ethtype = ETH_PKT_IPV6; + p_fd_cfg->mask.ethtype = ETHTOOL_TRUE_MASK; + LOG_INFO("ethertype is 0x%x\n", p_fd_cfg->key.ethtype); + p_fd_cfg->key.proto = IPPROTO_UDP; + p_fd_cfg->mask.proto = ETHTOOL_TRUE_MASK; + LOG_INFO("proto is %d\n", p_fd_cfg->key.proto); + break; + default: + break; + } + + /* 添加Vlan扩展字段 */ + if ((fs->flow_type & FLOW_EXT)) { + LOG_INFO("fs->h_ext.vlan_tci is %d\n", + ntohs(fs->h_ext.vlan_tci)); + if (fs->m_ext.vlan_tci) { + p_fd_cfg->key.cvlan_pri = + (ntohs(fs->h_ext.vlan_tci) & VLAN_PCP_MASK) >> + VLAN_PCP_SHIFT; + p_fd_cfg->mask.cvlan_pri = ETHTOOL_TRUE_MASK; + p_fd_cfg->key.cvlanid = ntohs(fs->h_ext.vlan_tci) & + VLAN_VID_MASK; + p_fd_cfg->mask.cvlanid = ETHTOOL_TRUE_MASK; + LOG_INFO("VLAN TCI: PRI=%u, VID=%u\n", + p_fd_cfg->key.cvlan_pri, + p_fd_cfg->key.cvlanid); + } + } + + /* 添加mac扩展字段 */ + if ((fs->flow_type & FLOW_MAC_EXT) && + (!is_zero_ether_addr(fs->m_ext.h_dest))) { + zte_memcpy_s(p_fd_cfg->key.dmac, fs->h_ext.h_dest, ETH_ALEN); + zte_memset_s(p_fd_cfg->mask.dmac, ETHTOOL_TRUE_MASK, ETH_ALEN); + LOG_INFO("dmac is %pM\n", p_fd_cfg->key.dmac); + } +} +EXPORT_SYMBOL(zxdh_flow_table_add); + +static uint32_t zxdh_vf_fd_add(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + ZXDH_FD_CFG_T p_fd_cfg = { 0 }; + uint32_t handle = 0; + uint32_t err = 0; + LOG_INFO("zxdh_vf_fd_add start\n"); + + /* 填写fd表 */ + zxdh_flow_table_add(&msg->vf_fd_cfg_msg.fs, &p_fd_cfg, pf_info); + err = zxdh_flow_table_vf_action_add(&msg->vf_fd_cfg_msg.fs, &p_fd_cfg, + pf_info); + if (err != 0) { + LOG_ERR("failed to add vf_action!\n"); + return 1; + } + /* 获取index */ + if (msg->vf_fd_cfg_msg.index == DEFAULT_ADD_INDEX) { + /* 申请新的index */ + err = dpp_fd_acl_index_request(pf_info, &handle); + if (err != 0) { + LOG_ERR("failed to request index!\n"); + return 1; + } + } else { + handle = msg->vf_fd_cfg_msg.index; /* 使用旧的index */ + } + reps->fd_cfg_resp.index = handle; + + /* 配置到np */ + err = dpp_tbl_fd_cfg_add(pf_info, ZXDH_SDT_FD_CFG_TABLE, handle, + &p_fd_cfg); + if (err != 0) { + LOG_ERR("failed to add fd in np!\n"); + return 1; + } + + return 0; +} + +static uint32_t zxdh_vf_fd_get(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + ZXDH_FD_CFG_T p_fd_cfg = { 0 }; + uint32_t err = 0; + + LOG_INFO("zxdh_vf_fd_get start\n"); + + err = dpp_tbl_fd_cfg_get(pf_info, ZXDH_SDT_FD_CFG_TABLE, + msg->vf_fd_cfg_msg.index, &p_fd_cfg); + if (err != 0) { + LOG_ERR("failed to get fd in np!\n"); + return 1; + } + return 0; +} + +static uint32_t zxdh_vf_fd_del(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + uint32_t index = 0; + uint32_t err = 0; + + LOG_INFO("zxdh_vf_fd_del start\n"); + + index = msg->vf_fd_cfg_msg.index; + if (index >= ETHTOOL_FD_MAX_NUM) { + LOG_ERR("the index is invlaid: %d\n", index); + return 1; + } + + /* 配置到np */ + err = dpp_tbl_fd_cfg_del(pf_info, ZXDH_SDT_FD_CFG_TABLE, index); + if (err != 0) { + LOG_ERR("failed to del fd in np!\n"); + return 1; + } + + /* 释放index */ + err = dpp_fd_acl_index_release(pf_info, index); + if (err) { + LOG_ERR("failed to release index!\n"); + return EINVAL; + } + + return 0; +} + +static uint32_t zxdh_vf_udp_stats_get(zxdh_msg_info *msg, zxdh_reps_info *reps, + DPP_PF_INFO_T *pf_info, + struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev) +{ + int32_t err = 0; + + err = dpp_stat_asn_phyport_rx_pkt_cnt_get( + pf_info, pf_dev->phy_port, STAT_RD_CLR_MODE_UNCLR, + &reps->udp_phy_stats_msg.rx_arn_phy); + if (err != 0) { + LOG_ERR("dpp_stat_asn_phyport_rx_pkt_cnt_get failed: %d\n", + err); + return err; + } + + err = dpp_stat_psn_phyport_tx_pkt_cnt_get( + pf_info, pf_dev->phy_port, STAT_RD_CLR_MODE_UNCLR, + &reps->udp_phy_stats_msg.tx_psn_phy); + if (err != 0) { + LOG_ERR("dpp_stat_psn_phyport_tx_pkt_cnt_get failed: %d\n", + err); + return err; + } + + err = dpp_stat_psn_phyport_rx_pkt_cnt_get( + pf_info, pf_dev->phy_port, STAT_RD_CLR_MODE_UNCLR, + &reps->udp_phy_stats_msg.rx_psn_phy); + if (err != 0) { + LOG_ERR("dpp_stat_psn_phyport_rx_pkt_cnt_get failed: %d\n", + err); + return err; + } + + err = dpp_stat_psn_ack_phyport_tx_pkt_cnt_get( + pf_info, pf_dev->phy_port, STAT_RD_CLR_MODE_UNCLR, + &reps->udp_phy_stats_msg.tx_psn_ack_phy); + if (err != 0) { + LOG_ERR("dpp_stat_psn_ack_phyport_tx_pkt_cnt_get failed: %d\n", + err); + return err; + } + + err = dpp_stat_psn_ack_phyport_rx_pkt_cnt_get( + pf_info, pf_dev->phy_port, STAT_RD_CLR_MODE_UNCLR, + &reps->udp_phy_stats_msg.rx_psn_ack_phy); + if (err != 0) { + LOG_ERR("dpp_stat_psn_ack_phyport_rx_pkt_cnt_get failed: %d\n", + err); + return err; + } + + return 0; +} + +zxdh_vf_msg_proc vf_msg_proc[] = { + { ZXDH_VF_PORT_INIT, "vf_port_init", zxdh_vf_port_init }, + { ZXDH_VF_PORT_UNINIT, "vf_port_uninit", zxdh_vf_port_uninit }, + { ZXDH_VF_PORT_RELOAD, "vf_port_reload", zxdh_vf_port_reload }, + { ZXDH_MAC_ADD, "vf_all_mac_add", zxdh_vf_all_mac_add }, + { ZXDH_MAC_DEL, "vf_all_mac_del", zxdh_vf_all_mac_del }, + { ZXDH_MAC_DUMP, "vf_all_mac_dump", zxdh_vf_all_mac_dump }, + { ZXDH_IPV6_MAC_ADD, "vf_ipv6_mac_add", zxdh_vf_ipv6_mac_add }, + { ZXDH_IPV6_MAC_DEL, "vf_ipv6_mac_del", zxdh_vf_ipv6_mac_del }, + { ZXDH_LACP_MAC_ADD, "vf_lacp_mac_add", zxdh_vf_lacp_mac_add }, + { ZXDH_LACP_MAC_DEL, "vf_lacp_mac_del", zxdh_vf_lacp_mac_del }, + { ZXDH_MAC_GET, "vf_mac_get", zxdh_vf_mac_get }, + { ZXDH_RSS_EN_SET, "vf_rss_state_set", zxdh_vf_rss_state_set }, + { ZXDH_RXFH_SET, "vf_rxfh_set", zxdh_vf_rxfh_set }, + { ZXDH_RXFH_GET, "vf_rxfh_get", zxdh_vf_rxfh_get }, + { ZXDH_RXFH_DEL, "vf_rxfh_del", zxdh_vf_rxfh_del }, + { ZXDH_THASH_KEY_SET, "vf_thash_key_set", zxdh_vf_thash_key_set }, + { ZXDH_THASH_KEY_GET, "vf_thash_key_get", zxdh_vf_thash_key_get }, + { ZXDH_HASH_FUNC_SET, "vf_hash_funcs_set", zxdh_vf_hash_funcs_set }, + { ZXDH_RX_FLOW_HASH_SET, "vf_rx_flow_hash_set", + zxdh_vf_rx_flow_hash_set }, + { ZXDH_RX_FLOW_HASH_GET, "vf_rx_flow_hash_get", + zxdh_vf_rx_flow_hash_get }, + { ZXDH_PORT_ATTRS_SET, "vf_port_attrs_set", zxdh_vf_port_attrs_set }, + { ZXDH_PORT_ATTRS_GET, "vf_port_attrs_get", zxdh_vf_port_attrs_get }, + { ZXDH_PROMISC_SET, "vf_promisc_set", zxdh_vf_promisc_set }, + { ZXDH_VLAN_FILTER_SET, "vf_vlan_filter_set", zxdh_vf_vlan_filter_set }, + { ZXDH_VLAN_FILTER_ADD, "vf_rx_vid_add", zxdh_vf_rx_vid_add }, + { ZXDH_VLAN_FILTER_DEL, "vf_rx_vid_del", zxdh_vf_rx_vid_del }, + { ZXDH_GET_NP_STATS, "vf_np_stats_get", zxdh_vf_np_stats_get }, + { ZXDH_VF_GET_UDP_STATS, "vf_udp_stats_get", zxdh_vf_udp_stats_get }, + { ZXDH_VF_RATE_LIMIT_SET, "vf_rate_limit_set", zxdh_vf_rate_limit_set }, + { ZXDH_PLCR_UNINIT, "vf_plcr_uninit", zxdh_vf_plcr_uninit }, + { ZXDH_MAP_PLCR_FLOWID, "vf_map_plcr_flowid", zxdh_vf_plcr_flowid_map }, + { ZXDH_PLCR_FLOW_INIT, "vf_plcr_flow_init", zxdh_vf_plcr_flow_init }, + { ZXDH_PLCR_GET_MODE, "vf_plcr_get_mode", zxdh_vf_plcr_get_mode }, + { ZXDH_PLCR_SET_MODE, "vf_plcr_set_mode", zxdh_vf_plcr_set_mode }, + { ZXDH_FLOW_HW_ADD, "vf_flow_hw_add", zxdh_vf_flow_hw_add }, + { ZXDH_FLOW_HW_DEL, "vf_flow_hw_del", zxdh_vf_flow_hw_del }, + { ZXDH_FLOW_HW_GET, "vf_flow_hw_get", zxdh_vf_flow_hw_get }, + { ZXDH_FLOW_HW_FLUSH, "vf_flow_hw_flush", zxdh_vf_flow_hw_flush }, + { ZXDH_VLAN_OFFLOAD_SET, "vf_vlan_strip_set", zxdh_vf_vlan_strip_set }, + { ZXDH_VXLAN_OFFLOAD_ADD, "vf_vxlan_offload_add", + zxdh_vf_vxlan_offload_add }, + { ZXDH_VXLAN_OFFLOAD_DEL, "vf_vxlan_offload_del", + zxdh_vf_vxlan_offload_del }, + { ZXDH_SET_TPID, "vf_qinq_tpid_cfg", zxdh_vf_qinq_tpid_cfg }, + { ZXDH_FD_ADD, "vf_fd_add", zxdh_vf_fd_add }, + { ZXDH_FD_GET, "vf_fd_get", zxdh_vf_fd_get }, + { ZXDH_FD_DEL, "vf_fd_del", zxdh_vf_fd_del }, + { ZXDH_FD_EN_SET, "vf_fd_state_set", zxdh_vf_fd_state_set }, + + { ZXDH_PLCR_CAR_PROFILE_ID_ADD, "vf_plcr_profile_id_add", + zxdh_vf_plcr_profile_id_add }, + { ZXDH_PLCR_CAR_PROFILE_ID_DELETE, "vf_plcr_profile_id_detele", + zxdh_vf_plcr_profile_id_delete }, + { ZXDH_PLCR_CAR_PROFILE_CFG_SET, "vf_plcr_profile_cfg_set", + zxdh_vf_plcr_profile_cfg_set }, + { ZXDH_PLCR_CAR_PROFILE_CFG_GET, "vf_plcr_profile_cfg_get", + zxdh_vf_plcr_profile_cfg_get }, + { ZXDH_PLCR_CAR_QUEUE_CFG_SET, "vf_plcr_queue_cfg_set", + zxdh_vf_plcr_queue_cfg_set }, + { ZXDH_PORT_METER_STAT_CLR, "vf_plcr_port_meter_stat_clr", + zxdh_vf_plcr_port_meter_stat_clr }, + { ZXDH_PORT_METER_STAT_GET, "vf_plcr_port_meter_stat_get", + zxdh_vf_plcr_port_meter_stat_get }, + { ZXDH_VF_1588_CALL_NP, "vf_1588_call_np", zxdh_vf_call_np_1588 }, + { ZXDH_VF_SLOT_ID_GET, "vf_slot_id_get", zxdh_vf_slot_id_get }, + { ZXDH_MC_CMPAT_VERINFO, "vf_mcode_feature_get", + zxdh_vf_mcode_feature_get }, + { ZXDH_GET_K_CMPAT_VERINFO, "vf_k_cmpat_get", zxdh_vf_k_cmpat_get }, + { ZXDH_VF_1588_ENABLE, "vf_1588_enable_proc", + zxdh_vf_1588_enable_proc }, +}; + +int32_t dh_pf_msg_recv_func(void *pay_load, uint16_t len, void *reps_buffer, + uint16_t *reps_len, void *dev) +{ + zxdh_msg_info *msg = (zxdh_msg_info *)pay_load; + zxdh_reps_info *reps = (zxdh_reps_info *)reps_buffer; + struct zxdh_pf_device *pf_dev = (struct zxdh_pf_device *)dev; + struct zxdh_vf_item *vf_item = NULL; + uint32_t ret = 0; + int32_t i = 0; + int32_t num = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + if (pf_dev == NULL) { + LOG_ERR("dev is NULL\n"); + return -1; + } + + LOG_DEBUG("vport: 0x%x vfitem indx %d\n", msg->hdr.vport, + (msg->hdr.pcie_id & (0xff))); + pf_info.slot = pf_dev->slot_id; + pf_info.vport = msg->hdr.vport; + num = sizeof(vf_msg_proc) / sizeof(zxdh_vf_msg_proc); + vf_item = &pf_dev->vf_item[(msg->hdr.pcie_id & (0xff))]; + for (i = 0; i < num; i++) { + *reps_len = sizeof(union zxdh_msg); + if (msg->hdr.op_code < + ZXDH_GET_SW_STATS) { //op_code=57之前的PF VF兼容性场景考虑 + *reps_len = ZXDH_REPS_MAX_SIZE_BEFORE57; + } + + if (vf_msg_proc[i].op_code == msg->hdr.op_code) { + ret = vf_msg_proc[i].msg_proc(msg, reps, &pf_info, + vf_item, pf_dev); + if (ret != 0) { + if ((msg->hdr.op_code == ZXDH_MAC_ADD) || + (msg->hdr.op_code == ZXDH_IPV6_MAC_ADD) || + (msg->hdr.op_code == ZXDH_MAC_DUMP) || + (msg->hdr.op_code == ZXDH_LACP_MAC_ADD)) { + if (ret == ZXDH_REPS_BEYOND_MAC) { + reps->vf_mac_set_msg + .mac_err_flag = + ZXDH_REPS_BEYOND_MAC; + } else if (ret == ZXDH_REPS_EXIST_MAC) { + reps->vf_mac_set_msg + .mac_err_flag = + ZXDH_REPS_EXIST_MAC; + } + } + reps->flag = ZXDH_REPS_FAIL; + LOG_ERR("%s failed, ret: %d\n", + vf_msg_proc[i].proc_name, ret); + return -1; + } + + reps->flag = ZXDH_REPS_SUCC; + return 0; + } + } + + LOG_ERR("invalid op_code: [%u]\n", msg->hdr.op_code); + return -2; +} + +int32_t dh_pf_msg_recv_func_register(void) +{ + int32_t ret = 0; + + ret = zxdh_bar_chan_msg_recv_register(MODULE_VF_BAR_MSG_TO_PF, + dh_pf_msg_recv_func); + if (ret != 0) { + LOG_ERR("event_id[%d] register failed: %d\n", + MODULE_VF_BAR_MSG_TO_PF, ret); + return ret; + } + + return ret; +} + +void dh_pf_msg_recv_func_unregister(void) +{ + zxdh_bar_chan_msg_recv_unregister(MODULE_VF_BAR_MSG_TO_PF); +} diff --git a/drivers/net/ethernet/dinghai/en_pf/msg_func.h b/drivers/net/ethernet/dinghai/en_pf/msg_func.h new file mode 100644 index 000000000000..ce9b6cfee8a9 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_pf/msg_func.h @@ -0,0 +1,33 @@ +#ifndef __ZXDH_PF_MSG_FUNC_H__ +#define __ZXDH_PF_MSG_FUNC_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "../en_np/flow/api/include/dpp_tbl_fd_cfg.h" +#include "../en_np/driver/include/dpp_drv_sdt.h" +#define RD_CLR_MODE_UNCLR 0 /* 不读清 */ +#define RD_CLR_MODE_CLR 1 /* 读清 */ + +typedef enum { + PTP_PORT_VFID_SET, + PTP_TC_ENABLE_SET, + + MAX_VF_CALL_NP_NUM, +} vf_call_np_num; + +int32_t dh_pf_msg_recv_func_register(void); +void dh_pf_msg_recv_func_unregister(void); +void zxdh_vf_item_mac_add(struct zxdh_vf_item *vf_item, uint8_t *mac_addr, + uint8_t dhtool_mac_set_flag); +void zxdh_vf_item_mac_del(struct zxdh_vf_item *vf_item, uint8_t *mac_addr); +void zxdh_flow_table_add(struct ethtool_rx_flow_spec *fs, + ZXDH_FD_CFG_T *p_fd_cfg, DPP_PF_INFO_T *pf_info); + +#ifdef __cplusplus +} +#endif + +#endif /* __ZXDH_PF_MSG_FUNC_H__ */ diff --git a/drivers/net/ethernet/dinghai/en_ptp/tod_driver.c b/drivers/net/ethernet/dinghai/en_ptp/tod_driver.c new file mode 100644 index 000000000000..3cc502a4cb0c --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_ptp/tod_driver.c @@ -0,0 +1,525 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tod_driver.h" +//#include "../msg_chan_driver/msg_chan_pub.h" +#include +#include +#include "zxdh_ptp_common.h" + +#define DEVICE_NUM 3 +#define TOD_AGENT_NAME_LEN 15 +#define TOD_DEVICE_NAME "tod-dev" +#define TOD_DEVICE_CLASS "tod_class" + +static uint64_t virt_addr; +static uint64_t pcie_id; +static dev_t tod_device_no; +static struct class *tod_device_class; + +struct tod_device { + struct cdev tod_cdev; + // gps:"/dev/ttyAMA1", recv tod: "/dev/ttyAMA2", send tod: "/dev/ttyAMA3" + char tod_agent_name[TOD_AGENT_NAME_LEN]; + struct file *tod_device_file; +}; + +struct tod_device tod_dev_array[DEVICE_NUM]; + +int32_t tod_device_set_bar_virtual_addr(uint64_t virtaddr, uint16_t pcieid) +{ + virt_addr = virtaddr; + pcie_id = pcieid; + PTP_LOG_INFO("%s: bar msg virtaddr: 0x%llx\n", __FUNCTION__, virtaddr); + return 0; +} +EXPORT_SYMBOL(tod_device_set_bar_virtual_addr); + +static int32_t tod_device_sync_msg_send(uint8_t *req, uint32_t req_len, + uint8_t *resp, uint32_t resp_len) +{ + int32_t result = 0; + uint32_t payload_len = 0; + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem out = { 0 }; + + if (req == NULL || req_len < 4) { + PTP_LOG_ERR("%s: arg invalid, req: %px, req_len: %u.\n", + __FUNCTION__, req, req_len); + return -EINVAL; + } + + out.buffer_len = + 4 + 4 + + resp_len; // 4B 消息头 + 4B result + resp_len实际应答消息 + out.recv_buffer = (uint8_t *)kmalloc(out.buffer_len, GFP_KERNEL); + if (out.recv_buffer == NULL) { + PTP_LOG_ERR("%s: no space left on device.\n", __FUNCTION__); + return -ENOSPC; + } + memset(out.recv_buffer, 0, out.buffer_len); + + in.virt_addr = virt_addr; + in.event_id = MODULE_TOD; + in.src = MSG_CHAN_END_PF; + in.dst = MSG_CHAN_END_RISC; + in.payload_addr = req; + in.payload_len = req_len; + in.src_pcieid = pcie_id; + + if (zxdh_bar_chan_sync_msg_send(&in, &out) != BAR_MSG_OK) { + kfree(out.recv_buffer); + PTP_LOG_ERR("%s: zxdh_bar_chan_sync_msg_send failed.\n", + __FUNCTION__); + return -EINVAL; + } + + // 消息应答格式: header(4B) + payload(nB), payload最大2048 - 12 + // header格式: 0xFF(1B) + payload_len(2B) + rsv(1B) + // payload格式: result(4B) + msg((n - 4)B) + payload_len = *(uint16_t *)((uint8_t *)out.recv_buffer + 1); + if (payload_len < 4) { + kfree(out.recv_buffer); + PTP_LOG_ERR("%s: payload_len: %u check failed.\n", __FUNCTION__, + payload_len); + return -EINVAL; + } + + result = *(int32_t *)((uint8_t *)out.recv_buffer + 4); + if (result != 0) { + kfree(out.recv_buffer); + PTP_LOG_ERR("%s: result: %d check failed.\n", __FUNCTION__, + result); + return result; + } + + if (payload_len > 4 && resp != NULL) { + memcpy(resp, out.recv_buffer + 8, + (((payload_len - 4) > resp_len) ? resp_len : + (payload_len - 4))); + } + + kfree(out.recv_buffer); + + return 0; +} + +static int tod_device_open(struct inode *inode, struct file *file) +{ + int32_t result = 0; + struct tod_device_msg msg; + struct tod_device *tod; + file->private_data = (void *)(container_of( + inode->i_cdev, struct tod_device, tod_cdev)); + tod = (struct tod_device *)file->private_data; + + PTP_LOG_INFO("%s. dev_name: %s\n", __FUNCTION__, (tod->tod_agent_name)); + + if (tod->tod_device_file != NULL) { + PTP_LOG_INFO("%s: device already open.\n", __FUNCTION__); + return 0; + } + + memset(&msg, 0x00, sizeof(struct tod_device_msg)); + msg.type = TOD_DEVICE_MSG_OPEN; + memcpy(msg.data, tod->tod_agent_name, strlen(tod->tod_agent_name) + 1); + + result = tod_device_sync_msg_send((uint8_t *)(&msg), + sizeof(struct tod_device_msg), + (uint8_t *)(&tod->tod_device_file), + sizeof(struct file *)); + if (result != 0) { + tod->tod_device_file = NULL; + PTP_LOG_ERR( + "%s: tod_device_sync_msg_send failed, result: %d.\n", + __FUNCTION__, result); + return -EINVAL; + } + PTP_LOG_INFO("%s: file %px open success.\n", __FUNCTION__, + tod->tod_device_file); + + return 0; +} + +static int tod_device_release(struct inode *inode, struct file *file) +{ + int32_t result = 0; + struct tod_device_msg msg; + struct tod_device *tod; + + tod = (struct tod_device *)file->private_data; + + PTP_LOG_INFO("%s tod_agent_name: %s.\n", __FUNCTION__, + tod->tod_agent_name); + + if (tod->tod_device_file == NULL) { + PTP_LOG_ERR("%s: device already close.\n", __FUNCTION__); + return 0; + } + + memset(&msg, 0x00, sizeof(struct tod_device_msg)); + msg.type = TOD_DEVICE_MSG_CLOSE; + msg.file = tod->tod_device_file; + + result = tod_device_sync_msg_send( + (uint8_t *)(&msg), sizeof(struct tod_device_msg), NULL, 0); + if (result != 0) { + PTP_LOG_ERR( + "%s: tod_device_sync_msg_send failed, result: %d.\n", + __FUNCTION__, result); + return -EINVAL; + } + PTP_LOG_INFO("%s: file %px close success.\n", __FUNCTION__, + tod->tod_device_file); + tod->tod_device_file = NULL; + + return 0; +} + +static ssize_t tod_device_read(struct file *file, char *buf, size_t count, + loff_t *f_pos) +{ + int32_t result = 0; + uint8_t *resp = NULL; + struct tod_device_msg msg; + struct tod_device *tod; + + tod = (struct tod_device *)file->private_data; + + PTP_LOG_INFO("%s tod_agent_name: %s.\n", __FUNCTION__, + tod->tod_agent_name); + + if (tod->tod_device_file == NULL) { + PTP_LOG_ERR("%s: no such device.\n", __FUNCTION__); + return -ENODEV; + } + + if (count > + (2048 - 12 - sizeof(int32_t) - + sizeof(size_t))) { // common bar: 2048 - 12, result: 4, count: 8. + PTP_LOG_ERR("%s: no space left on device.\n", __FUNCTION__); + return -ENOSPC; + } + + resp = (uint8_t *)kmalloc(sizeof(size_t) + count, GFP_KERNEL); + if (resp == NULL) { + PTP_LOG_ERR("%s: no space left on device.\n", __FUNCTION__); + return -ENOSPC; + } + + memset(&msg, 0x00, sizeof(struct tod_device_msg)); + msg.type = TOD_DEVICE_MSG_READ; + msg.count = count; + msg.file = tod->tod_device_file; + + result = tod_device_sync_msg_send((uint8_t *)(&msg), + sizeof(struct tod_device_msg), resp, + sizeof(size_t) + count); + if (result != 0) { + kfree(resp); + PTP_LOG_ERR( + "%s: tod_device_sync_msg_send failed, result: %d.\n", + __FUNCTION__, result); + return -EINVAL; + } + + count = *((size_t *)(resp)); + if (count > msg.count) { + PTP_LOG_ERR("%s: no space left on device.\n", __FUNCTION__); + kfree(resp); + return -ENOSPC; + } + + result = copy_to_user(buf, resp + sizeof(size_t), count); + if (result != 0) { + kfree(resp); + PTP_LOG_ERR("%s: copy_to_user failed, result: %d.\n", + __FUNCTION__, result); + return -EINVAL; + } + + kfree(resp); + PTP_LOG_INFO("%s: file %px read %lu bytes success.\n", __FUNCTION__, + tod->tod_device_file, count); + + return count; +} + +static ssize_t tod_device_write(struct file *file, const char *buf, + size_t count, loff_t *f_pos) +{ + int32_t result = 0; + struct tod_device_msg msg; + struct tod_device *tod; + + tod = (struct tod_device *)file->private_data; + + PTP_LOG_INFO("%s tod_agent_name: %s.\n", __FUNCTION__, + tod->tod_agent_name); + + if (tod->tod_device_file == NULL) { + PTP_LOG_ERR("%s: no such device.\n", __FUNCTION__); + return -ENODEV; + } + + if (count > sizeof(msg.data)) { + PTP_LOG_ERR("%s: no space left on device.\n", __FUNCTION__); + return -ENOSPC; + } + + memset(&msg, 0x00, sizeof(struct tod_device_msg)); + msg.type = TOD_DEVICE_MSG_WRITE; + msg.count = count; + msg.file = tod->tod_device_file; + result = copy_from_user(msg.data, buf, count); + if (result != 0) { + PTP_LOG_ERR("%s: copy_from_user failed, result: %d.\n", + __FUNCTION__, result); + return -EINVAL; + } + + result = tod_device_sync_msg_send((uint8_t *)(&msg), + sizeof(struct tod_device_msg), + (uint8_t *)(&count), sizeof(size_t)); + if (result != 0) { + PTP_LOG_ERR( + "%s: tod_device_sync_msg_send failed, result: %d.\n", + __FUNCTION__, result); + return -EINVAL; + } + PTP_LOG_INFO("%s: file %px write %lu bytes success.\n", __FUNCTION__, + tod->tod_device_file, count); + + return count; +} + +static __poll_t tod_device_poll(struct file *file, + struct poll_table_struct *wait) +{ + int32_t result = 0; + uint16_t poll_mask = 0; + struct tod_device_msg msg; + struct tod_device *tod; + + tod = (struct tod_device *)file->private_data; + PTP_LOG_INFO("%s tod_agent_name: %s.\n", __FUNCTION__, + tod->tod_agent_name); + + if (tod->tod_device_file == NULL) { + PTP_LOG_ERR("%s: no such device.\n", __FUNCTION__); + return POLLERR; + } + + memset(&msg, 0x00, sizeof(struct tod_device_msg)); + msg.type = TOD_DEVICE_MSG_POLL; + msg.file = tod->tod_device_file; + + result = tod_device_sync_msg_send((uint8_t *)(&msg), + sizeof(struct tod_device_msg), + (uint8_t *)(&poll_mask), + sizeof(uint16_t)); + if (result != 0) { + PTP_LOG_ERR( + "%s: tod_device_sync_msg_send failed, result: %d.\n", + __FUNCTION__, result); + return POLLERR; + } + PTP_LOG_INFO("%s: file %px poll mask 0x%04x success.\n", __FUNCTION__, + tod->tod_device_file, poll_mask); + + return poll_mask; +} + +static long tod_device_ioctl(struct file *file, uint32_t request, + unsigned long args) +{ + int32_t result = 0; + uint8_t *resp = NULL; + struct tod_device_msg msg; + struct tod_device *tod; + + tod = (struct tod_device *)file->private_data; + + PTP_LOG_INFO("%s tod_agent_name: %s.\n", __FUNCTION__, + tod->tod_agent_name); + + if (tod->tod_device_file == NULL) { + PTP_LOG_ERR("%s: no such device.\n", __FUNCTION__); + return -ENODEV; + } + + resp = (uint8_t *)kmalloc(sizeof(struct termios), GFP_KERNEL); + if (resp == NULL) { + PTP_LOG_ERR("%s: no space left on device.\n", __FUNCTION__); + return -ENOSPC; + } + + memset(&msg, 0x00, sizeof(struct tod_device_msg)); + msg.type = TOD_DEVICE_MSG_IOCTL; + msg.file = tod->tod_device_file; + msg.command = request; + + if ((struct termios *)args != NULL) { + result = copy_from_user(msg.data, (uint8_t *)args, + sizeof(struct termios)); + if (result != 0) { + PTP_LOG_ERR("%s: copy_from_user failed, result: %d.\n", + __FUNCTION__, result); + kfree(resp); + return -EINVAL; + } + } + + result = tod_device_sync_msg_send((uint8_t *)(&msg), + sizeof(struct tod_device_msg), resp, + sizeof(struct termios)); + if (result != 0) { + kfree(resp); + PTP_LOG_ERR( + "%s: tod_device_sync_msg_send failed, result: %d.\n", + __FUNCTION__, result); + return -EINVAL; + } + + if ((struct termios *)args != NULL) { + result = copy_to_user((uint8_t *)args, resp, + sizeof(struct termios)); + if (result != 0) { + kfree(resp); + PTP_LOG_ERR("%s: copy_to_user failed, result: %d.\n", + __FUNCTION__, result); + return -EINVAL; + } + } + + kfree(resp); + PTP_LOG_INFO("%s: file %px ioctl success.\n", __FUNCTION__, + tod->tod_device_file); + + return 0; +} + +struct file_operations tod_device_ops = { .owner = THIS_MODULE, + .open = tod_device_open, + .release = tod_device_release, + .read = tod_device_read, + .write = tod_device_write, + .poll = tod_device_poll, + .unlocked_ioctl = tod_device_ioctl }; + +static int32_t __init tod_device_init(void) +{ + int32_t result = 0; + int32_t i = 0; + int32_t j = 0; + struct device *dev = NULL; + + result = alloc_chrdev_region(&tod_device_no, 0, DEVICE_NUM, + TOD_DEVICE_NAME); + if (result < 0) { + PTP_LOG_ERR("%s: alloc_chrdev_region failed, result: %d.\n", + __FUNCTION__, result); + return -EINVAL; + } + + for (i = 0; i < DEVICE_NUM; i++) { + // 给 struct cdev对象制定操作函数集 + cdev_init(&tod_dev_array[i].tod_cdev, &tod_device_ops); + tod_dev_array[i].tod_cdev.owner = THIS_MODULE; + snprintf(tod_dev_array[i].tod_agent_name, TOD_AGENT_NAME_LEN, + "/dev/ttyAMA%d", i + 1); + + // 将struct cdev对象添加到内核对应的数据结构里 + result = cdev_add(&tod_dev_array[i].tod_cdev, + MKDEV(MAJOR(tod_device_no), + MINOR(tod_device_no) + i), + 1); + if (result != 0) { + PTP_LOG_ERR("%s: cdev_add failed, result: %d.\n", + __FUNCTION__, result); + if (i > 0) { + for (j = i - 1; j >= 0; j--) { + cdev_del(&tod_dev_array[j].tod_cdev); + } + } + goto cdev_add_fail; + } + } + + tod_device_class = class_create(TOD_DEVICE_CLASS); + + if (IS_ERR(tod_device_class)) { + PTP_LOG_ERR("%s: class_create failed, err: %lu.\n", + __FUNCTION__, PTR_ERR(tod_device_class)); + + goto class_create_fail; + } + + for (i = 0; i < DEVICE_NUM; i++) { + dev = device_create(tod_device_class, NULL, + MKDEV(MAJOR(tod_device_no), + MINOR(tod_device_no) + i), + NULL, "tod_device%d", i); + if (IS_ERR(dev)) { + PTP_LOG_ERR("%s: device_create failed, err: %lu.\n", + __FUNCTION__, PTR_ERR(dev)); + if (i > 0) { + for (j = i - 1; j >= 0; j--) { + device_destroy( + tod_device_class, + MKDEV(MAJOR(tod_device_no), + MINOR(tod_device_no) + + j)); + } + } + + goto device_create_fail; + } + } + + return 0; + +device_create_fail: + class_destroy(tod_device_class); +class_create_fail: + for (i = 0; i < DEVICE_NUM; i++) { + cdev_del(&tod_dev_array[i].tod_cdev); + } +cdev_add_fail: + unregister_chrdev_region(tod_device_no, DEVICE_NUM); + return -EINVAL; +} + +static void __exit tod_device_exit(void) +{ + int32_t i; + PTP_LOG_INFO("%s: start.\n", __FUNCTION__); + + for (i = 0; i < DEVICE_NUM; i++) { + device_destroy(tod_device_class, + MKDEV(MAJOR(tod_device_no), + MINOR(tod_device_no) + i)); + } + + class_destroy(tod_device_class); + + for (i = 0; i < DEVICE_NUM; i++) { + cdev_del(&tod_dev_array[i].tod_cdev); + } + + unregister_chrdev_region(tod_device_no, DEVICE_NUM); + + PTP_LOG_ERR("%s: success.\n", __FUNCTION__); +} + +module_init(tod_device_init); +module_exit(tod_device_exit); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/dinghai/en_ptp/tod_driver.h b/drivers/net/ethernet/dinghai/en_ptp/tod_driver.h new file mode 100644 index 000000000000..65d1868409ee --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_ptp/tod_driver.h @@ -0,0 +1,23 @@ +#ifndef _TOD_DRIVER_H_ +#define _TOD_DRIVER_H_ + +#include + +#define TOD_DEVICE_MSG_OPEN ((uint32_t)(0)) +#define TOD_DEVICE_MSG_CLOSE ((uint32_t)(1)) +#define TOD_DEVICE_MSG_READ ((uint32_t)(2)) +#define TOD_DEVICE_MSG_WRITE ((uint32_t)(3)) +#define TOD_DEVICE_MSG_POLL ((uint32_t)(4)) +#define TOD_DEVICE_MSG_IOCTL ((uint32_t)(5)) + +#define TOD_DEVICE_DATA_LEN ((uint32_t)(512)) + +struct tod_device_msg { + uint32_t type; + uint32_t command; + size_t count; + void *file; + uint8_t data[TOD_DEVICE_DATA_LEN]; +}; + +#endif diff --git a/drivers/net/ethernet/dinghai/en_ptp/tod_driver_stub.c b/drivers/net/ethernet/dinghai/en_ptp/tod_driver_stub.c new file mode 100644 index 000000000000..2e8d1feb63dd --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_ptp/tod_driver_stub.c @@ -0,0 +1,62 @@ +#include +//#include "../msg_chan_driver/msg_chan_pub.h" +#include +#if 0 +typedef uint16_t (*rsc_recv_func_ptr)(uint8_t *pay_load, uint16_t len, uint8_t *reps_buffer, uint16_t *reps_len); + +static rsc_recv_func_ptr g_riscv_event[100] = {0}; + +uint16_t RSC_MsgProRegister(uint8_t event_id, rsc_recv_func_ptr msg_pro_fun) +{ + if (100 <= event_id) + return 0; + g_riscv_event[event_id] = msg_pro_fun; + return 0; +} +EXPORT_SYMBOL(RSC_MsgProRegister); + +uint16_t RSC_MsgProUnregister(uint8_t event_id) +{ + if (100 <= event_id) + return 0; + g_riscv_event[event_id] = NULL; + return 0; +} +EXPORT_SYMBOL(RSC_MsgProUnregister); + + +// int zxdh_bar_chan_sync_msg_send(struct zxdh_pci_bar_msg *in, struct zxdh_msg_recviver_mem *result) +// { +// uint16_t reps_len = 0; +// uint8_t *reps_buffer = NULL; +// rsc_recv_func_ptr ptr = NULL; + +// reps_buffer = (uint8_t*)kmalloc(2048 - 12 + 4, GFP_KERNEL); +// if (reps_buffer == NULL) +// { +// printk(KERN_ERR "%s: no space left on device.\n", __FUNCTION__); +// return -ENOSPC; +// } +// memset(reps_buffer, 0x00, 2048 - 12 + 4); // 消息应答净荷长度最大2048 - 12, 再加上4B消息头 + +// ptr = g_riscv_event[in->event_id]; +// if (ptr(in->payload_addr, in->payload_len, reps_buffer + 4, &reps_len) != 0) +// { +// kfree(reps_buffer); +// printk(KERN_ERR "%s: rsc_recv_func_ptr failed.\n", __FUNCTION__); +// return -EINVAL; +// } + +// *(uint8_t*)reps_buffer = 0xFF; +// *(uint16_t*)((((uint8_t*)reps_buffer )+ 1)) = reps_len; + +// memcpy(result->recv_buffer, reps_buffer, result->buffer_len); + +// kfree(reps_buffer); + +// return 0; +// } +// EXPORT_SYMBOL(zxdh_bar_chan_sync_msg_send); + +MODULE_LICENSE("GPL"); +#endif diff --git a/drivers/net/ethernet/dinghai/en_ptp/zxdh_ptp.c b/drivers/net/ethernet/dinghai/en_ptp/zxdh_ptp.c new file mode 100644 index 000000000000..9fbcbce5930c --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_ptp/zxdh_ptp.c @@ -0,0 +1,1862 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "en_aux.h" +#include "zxdh_ptp.h" +#include "zxdh_ptp_regs.h" +#include "zxdh_ptp_common.h" + +#define ZXDH_PF_BAR0 0 + +char pps[3][15] = { "pp1s_out", "pp1s_1588", "pp1s_external" }; + +spinlock_t global_ptpm_lock; +u_int32_t ptpm_lock_init_stat; +// all tsn timer name: tsn0 tsn1 tsn2 tsn3, so the return value is 0/1/2/3. +static int get_tsn_timer_no(char *clock_name) +{ + int ret; + int timer_no; + ret = sscanf(clock_name, "tsn%d", &timer_no); + if (ret != 1) { + PTP_LOG_INFO(" tsn: %s get timer no fail!\n", clock_name); + return -1; + } + return timer_no; +} + +static uint32_t tsn_clock_cycle_integer_reg(int timer_no) +{ + if (0 == timer_no) + return TSN_CLOCK_CYCLE_INTEGER(0); + else if (1 == timer_no) + return TSN_CLOCK_CYCLE_INTEGER(1); + else if (2 == timer_no) + return TSN_CLOCK_CYCLE_INTEGER(2); + else if (3 == timer_no) + return TSN_CLOCK_CYCLE_INTEGER(3); + + return 0; +} + +static uint32_t tsn_clock_cycle_fraction_reg(int timer_no) +{ + if (0 == timer_no) + return TSN_CLOCK_CYCLE_FRACTION(0); + else if (1 == timer_no) + return TSN_CLOCK_CYCLE_FRACTION(1); + else if (2 == timer_no) + return TSN_CLOCK_CYCLE_FRACTION(2); + else if (3 == timer_no) + return TSN_CLOCK_CYCLE_FRACTION(3); + + return 0; +} + +#define GET_TSN_ADJUST_NANO_REG(tsn_no) \ + ({ \ + if (0 == tsn_no) \ + nano_sec_reg = TSN_ADJUST_NANO_SEC(0); \ + else if (1 == tsn_no) \ + nano_sec_reg = TSN_ADJUST_NANO_SEC(1); \ + else if (2 == tsn_no) \ + nano_sec_reg = TSN_ADJUST_NANO_SEC(2); \ + else if (3 == tsn_no) \ + nano_sec_reg = TSN_ADJUST_NANO_SEC(3); \ + }) + +#define GET_TSN_ADJUST_LOW_SEC_REG(tsn_no) \ + ({ \ + if (0 == tsn_no) \ + low_sec_reg = TSN_ADJUST_LOW_SECOND(0); \ + else if (1 == tsn_no) \ + low_sec_reg = TSN_ADJUST_LOW_SECOND(1); \ + else if (2 == tsn_no) \ + low_sec_reg = TSN_ADJUST_LOW_SECOND(2); \ + else if (3 == tsn_no) \ + low_sec_reg = TSN_ADJUST_LOW_SECOND(3); \ + }) + +#define GET_TSN_ADJUST_HIGH_SEC_REG(tsn_no) \ + ({ \ + if (0 == tsn_no) \ + high_sec_reg = TSN_ADJUST_HIGH_SECOND(0); \ + else if (1 == tsn_no) \ + high_sec_reg = TSN_ADJUST_HIGH_SECOND(1); \ + else if (2 == tsn_no) \ + high_sec_reg = TSN_ADJUST_HIGH_SECOND(2); \ + else if (3 == tsn_no) \ + high_sec_reg = TSN_ADJUST_HIGH_SECOND(3); \ + }) + +#define GET_TSN_ADJUST_FRAC_NANO_SEC_REG(tsn_no) \ + ({ \ + if (0 == tsn_no) \ + frac_nano_reg = TSN_ADJUST_FRAC_NANO_SEC(0); \ + else if (1 == tsn_no) \ + frac_nano_reg = TSN_ADJUST_FRAC_NANO_SEC(1); \ + else if (2 == tsn_no) \ + frac_nano_reg = TSN_ADJUST_FRAC_NANO_SEC(2); \ + else if (3 == tsn_no) \ + frac_nano_reg = TSN_ADJUST_FRAC_NANO_SEC(3); \ + }) + +#define GET_TSN_LATCH_NANO_REG(tsn_no) \ + ({ \ + if (0 == tsn_no) \ + nano_sec_reg = TSN_LATCH_NANO_SEC(0); \ + else if (1 == tsn_no) \ + nano_sec_reg = TSN_LATCH_NANO_SEC(1); \ + else if (2 == tsn_no) \ + nano_sec_reg = TSN_LATCH_NANO_SEC(2); \ + else if (3 == tsn_no) \ + nano_sec_reg = TSN_LATCH_NANO_SEC(3); \ + }) + +#define GET_TSN_LATCH_LOW_SEC_REG(tsn_no) \ + ({ \ + if (0 == tsn_no) \ + low_sec_reg = TSN_LATCH_LOW_SECOND(0); \ + else if (1 == tsn_no) \ + low_sec_reg = TSN_LATCH_LOW_SECOND(1); \ + else if (2 == tsn_no) \ + low_sec_reg = TSN_LATCH_LOW_SECOND(2); \ + else if (3 == tsn_no) \ + low_sec_reg = TSN_LATCH_LOW_SECOND(3); \ + }) + +#define GET_TSN_LATCH_HIGH_SEC_REG(tsn_no) \ + ({ \ + if (0 == tsn_no) \ + high_sec_reg = TSN_LATCH_HIGH_SECOND(0); \ + else if (1 == tsn_no) \ + high_sec_reg = TSN_LATCH_HIGH_SECOND(1); \ + else if (2 == tsn_no) \ + high_sec_reg = TSN_LATCH_HIGH_SECOND(2); \ + else if (3 == tsn_no) \ + high_sec_reg = TSN_LATCH_HIGH_SECOND(3); \ + }) + +#define GET_TSN_LATCH_FRAC_NANO_SEC_REG(tsn_no) \ + ({ \ + if (0 == tsn_no) \ + frac_nano_reg = TSN_LATCH_FRAC_NANO_SEC(0); \ + else if (1 == tsn_no) \ + frac_nano_reg = TSN_LATCH_FRAC_NANO_SEC(1); \ + else if (2 == tsn_no) \ + frac_nano_reg = TSN_LATCH_FRAC_NANO_SEC(2); \ + else if (3 == tsn_no) \ + frac_nano_reg = TSN_LATCH_FRAC_NANO_SEC(3); \ + }) + +#define GET_PPS_LATCH_TSN_NANO_REG(tsn_no) \ + ({ \ + if (1 == tsn_no) \ + nano_sec_reg = PPS_LATCH_TSN_NANO_SEC(0); \ + else if (2 == tsn_no) \ + nano_sec_reg = PPS_LATCH_TSN_NANO_SEC(1); \ + else if (3 == tsn_no) \ + nano_sec_reg = PPS_LATCH_TSN_NANO_SEC(2); \ + else if (4 == tsn_no) \ + nano_sec_reg = PPS_LATCH_TSN_NANO_SEC(3); \ + }) + +#define GET_PPS_LATCH_TSN_LOW_SEC_REG(tsn_no) \ + ({ \ + if (1 == tsn_no) \ + low_sec_reg = PPS_LATCH_TSN_LOW_SECOND(0); \ + else if (2 == tsn_no) \ + low_sec_reg = PPS_LATCH_TSN_LOW_SECOND(1); \ + else if (3 == tsn_no) \ + low_sec_reg = PPS_LATCH_TSN_LOW_SECOND(2); \ + else if (4 == tsn_no) \ + low_sec_reg = PPS_LATCH_TSN_LOW_SECOND(3); \ + }) + +#define GET_PPS_LATCH_TSN_HIGH_SEC_REG(tsn_no) \ + ({ \ + if (1 == tsn_no) \ + high_sec_reg = PPS_LATCH_TSN_HIGH_SECOND(0); \ + else if (2 == tsn_no) \ + high_sec_reg = PPS_LATCH_TSN_HIGH_SECOND(1); \ + else if (3 == tsn_no) \ + high_sec_reg = PPS_LATCH_TSN_HIGH_SECOND(2); \ + else if (4 == tsn_no) \ + high_sec_reg = PPS_LATCH_TSN_HIGH_SECOND(3); \ + }) + +#define GET_PPS_LATCH_TSN_FRAC_NANO_SEC_REG(tsn_no) \ + ({ \ + if (1 == tsn_no) \ + frac_nano_reg = PPS_LATCH_TSN_FRAC_NANO_SEC(0); \ + else if (2 == tsn_no) \ + frac_nano_reg = PPS_LATCH_TSN_FRAC_NANO_SEC(1); \ + else if (3 == tsn_no) \ + frac_nano_reg = PPS_LATCH_TSN_FRAC_NANO_SEC(2); \ + else if (4 == tsn_no) \ + frac_nano_reg = PPS_LATCH_TSN_FRAC_NANO_SEC(3); \ + }) + +enum reg_module { + PTP_TOP, + PTP_M, + PTP_S0, + PTP_S1, + PTP_S2, +}; + +static inline uint32_t zxdh_read_reg(uint64_t base_addr, uint32_t offset) +{ + return readl((const volatile void *)(base_addr + offset)); +} + +static inline void zxdh_write_reg(uint64_t base_addr, uint32_t offset, + uint32_t val) +{ + writel(val, (volatile void *)(base_addr + offset)); +} + +static struct zxdh_ptp_private * +zxdh_ptp_get_ptp_private(struct zxdh_en_device *en_dev) +{ + struct zxdh_pf_device *pf_dev; + /* Started by AICoder, pid:xb4bc4896fi268d144270abd6054e701bd21ae4f */ + PTP_COMM_CHECK_POINT_RET(en_dev, NULL); + /* Ended by AICoder, pid:xb4bc4896fi268d144270abd6054e701bd21ae4f */ + + pf_dev = dh_core_priv(en_dev->parent->parent); + PTP_COMM_CHECK_POINT_RET(pf_dev, NULL); + + return pf_dev->ptp; +} + +static bool zxdh_pf_is_evb(struct zxdh_pf_device *pf_dev) +{ + uint8_t product; + uint64_t vaddr = 0; + + PTP_COMM_CHECK_POINT_RET(pf_dev, false); + vaddr = (uint64_t)ZXDH_BAR_FWCAP(pf_dev->pci_ioremap_addr[0]); + + product = readb( + (const volatile void __iomem *)(vaddr + ZXDH_PRODUCT_TYPE)); + if ((product == ZXDH_PRODUCT_EVB_EP0) || + (product == ZXDH_PRODUCT_EVB_EP0_EP4)) { + return true; + } + return false; +} +static int zx_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + unsigned long flags; + int neg_adj = 0; + uint32_t cur_nano, cur_frac_nano; + uint64_t tmp_frac_nano; + uint64_t base_addr; + uint64_t freq_adj; + s32 ppb; + struct zxdh_ptp_private *adapter = NULL; + PTP_COMM_CHECK_POINT_RET(ptp, PTP_PARA_CHK_POINT_NULL); + + adapter = container_of(ptp, struct zxdh_ptp_private, ptp_caps[0]); + + if (adapter == NULL) { + PTP_LOG_ERR("zx_ptp_adjfine adapter null\n"); + return -1; + } + ppb = scaled_ppm_to_ppb(scaled_ppm); + PTP_LOG_INFO("name: %s, ppb: %d\n", ptp->name, ppb); + + if (ppb == 0) + return 0; + + if (ppb < 0) { + ppb = -ppb; + neg_adj = 1; + } + + base_addr = adapter->ptpm_addr; + cur_nano = zxdh_read_reg(base_addr, PTP_CLOCK_CYCLE_INTEGER); + cur_frac_nano = zxdh_read_reg(base_addr, PTP_CLOCK_CYCLE_FRACTION); + + tmp_frac_nano = ((unsigned long long)cur_nano << 32) + cur_frac_nano; + + PTP_LOG_INFO("cur_nano: %u, cur_frac_nano: %u, tmp_frac_nano: 0x%llx\n", + cur_nano, cur_frac_nano, tmp_frac_nano); + + /* positive adjust */ + if (0 == neg_adj) { + tmp_frac_nano += tmp_frac_nano * ppb / 1000000000; + } else { /* negative adjust */ + freq_adj = tmp_frac_nano * ppb / 1000000000; + if (tmp_frac_nano > freq_adj) + tmp_frac_nano -= freq_adj; + } + PTP_LOG_INFO("new tmp_frac_nano: 0x%llx\n", tmp_frac_nano); + cur_nano = (uint32_t)(tmp_frac_nano >> 32); + cur_frac_nano = tmp_frac_nano & 0xffffffff; + + PTP_LOG_INFO("cur_nano: %u, cur_frac_nano: %u\n", cur_nano, + cur_frac_nano); + spin_lock_irqsave(&global_ptpm_lock, flags); + + zxdh_write_reg(base_addr, PTP_CLOCK_CYCLE_INTEGER, cur_nano); + zxdh_write_reg(base_addr, PTP_CLOCK_CYCLE_FRACTION, cur_frac_nano); + zxdh_write_reg(base_addr, CLOCK_CYCLE_UPDATE, 1); + + spin_unlock_irqrestore(&global_ptpm_lock, flags); + + return 0; +} + +static int zxdh_ptp_adjtime(struct ptp_clock_info *ptp_clock, s64 delta) +{ + unsigned long flags; + uint32_t run_mode; + uint32_t reg_val; + uint64_t adjust; + // s32 rem; + uint64_t sec; + uint32_t nsec; + + uint64_t base_addr; + struct zxdh_ptp_private *adapter = NULL; + + PTP_COMM_CHECK_POINT_RET(ptp_clock, PTP_PARA_CHK_POINT_NULL); + PTP_LOG_INFO("name: %s, delta: %lld\n", ptp_clock->name, delta); + + adapter = container_of(ptp_clock, struct zxdh_ptp_private, ptp_caps[0]); + if (adapter == NULL) { + PTP_LOG_ERR("zxdh_ptp_adjtime adapter null\n"); + return -1; + } + base_addr = adapter->ptpm_addr; + + spin_lock_irqsave(&global_ptpm_lock, flags); + // timecounter_adjtime(&adapter->tc, delta); + + /* 1588 timer, update mode */ + if (delta > 0) { + run_mode = INCRE_MODE; + adjust = delta; + } else { + run_mode = DECRE_MODE; + adjust = -delta; + } + + /* adjust value */ + sec = div_u64_rem(adjust, NSEC_PER_SEC, &nsec); + PTP_LOG_INFO("sec: %llu, nsec: %u\n", sec, nsec); + // nsec = rem; + zxdh_write_reg(base_addr, ADJUST_HIGH_TOD_SECOND, + (uint32_t)(sec >> 32)); + zxdh_write_reg(base_addr, ADJUST_LOWER_TOD_SECOND, + (uint32_t)(sec & 0xffffffff)); + zxdh_write_reg(base_addr, ADJUST_TOD_NANO_SECOND, nsec); + + reg_val = zxdh_read_reg(base_addr, PTP_CONFIGURATION); + reg_val &= ~(0x3 << 4); + reg_val |= ((run_mode << 4) | (1 << TIMER_1588_UPT_EN_BIT)); + zxdh_write_reg(base_addr, PTP_CONFIGURATION, reg_val); + + /* enable adjust */ + zxdh_write_reg(base_addr, TIMER_CONTROL, 1 << 1); + + spin_unlock_irqrestore(&global_ptpm_lock, flags); + + return 0; +} + +static int zxdh_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + uint32_t ns; + uint64_t s; + uint32_t reg_val; + unsigned long flags; + struct zxdh_ptp_private *adapter = NULL; + uint64_t base_addr; + //ptp,ts + PTP_COMM_CHECK_POINT_RET(ptp, PTP_PARA_CHK_POINT_NULL); + PTP_COMM_CHECK_POINT_RET(ts, PTP_PARA_CHK_POINT_NULL); + + adapter = container_of(ptp, struct zxdh_ptp_private, ptp_caps[0]); + PTP_LOG_INFO("name: %s\n", ptp->name); + + if (adapter == NULL) { + PTP_LOG_ERR("zxdh_ptp_gettime adapter null\n"); + return -1; + } + base_addr = adapter->ptpm_addr; + + mutex_lock(&adapter->ptp_clk_mutex); + spin_lock_irqsave(&global_ptpm_lock, flags); + + // normal mode. + reg_val = zxdh_read_reg(base_addr, PTP_CONFIGURATION); + reg_val &= ~(0x3 << PPS_RUN_MODE_BIT); + reg_val |= (NORMAL_MODE << PPS_RUN_MODE_BIT); + zxdh_write_reg(base_addr, PTP_CONFIGURATION, reg_val); + zxdh_write_reg(base_addr, TIMER_LACTH_SEL, 1 << LATCH_1588_TIMER); + zxdh_write_reg(base_addr, TIMER_LATCH_EN, 1); + + ns = zxdh_read_reg(base_addr, LATCH_TOD_NANO_SECOND); + s = zxdh_read_reg(base_addr, LATCH_LOWER_TOD_SECOND); + s |= ((uint64_t)(zxdh_read_reg(base_addr, LATCH_HIGH_TOD_SECOND)) + << 32); + + spin_unlock_irqrestore(&global_ptpm_lock, flags); + mutex_unlock(&adapter->ptp_clk_mutex); + + // *ts = ns_to_timespec64(ns); + ts->tv_sec = s; + ts->tv_nsec = ns; + PTP_LOG_INFO("kernel get clock time: %lld.%09ld\n", ts->tv_sec, + ts->tv_nsec); + + return 0; +} + +static int zxdh_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + unsigned long flags; + uint32_t reg_val; + + uint64_t base_addr; + struct zxdh_ptp_private *adapter = NULL; + + PTP_COMM_CHECK_POINT_RET(ptp, PTP_PARA_CHK_POINT_NULL); + PTP_COMM_CHECK_POINT_RET(ts, PTP_PARA_CHK_POINT_NULL); + + adapter = container_of(ptp, struct zxdh_ptp_private, ptp_caps[0]); + PTP_LOG_INFO("name: %s, sec: %lld, nsec: %ld\n", ptp->name, ts->tv_sec, + ts->tv_nsec); + + if (adapter == NULL) { + PTP_LOG_ERR("zxdh_ptp_settime adapter null\n"); + return -1; + } + base_addr = adapter->ptpm_addr; + + mutex_lock(&adapter->ptp_clk_mutex); + + spin_lock_irqsave(&global_ptpm_lock, flags); + + /* adjust value */ + zxdh_write_reg(base_addr, ADJUST_HIGH_TOD_SECOND, + (uint32_t)(ts->tv_sec >> 32) & 0xffff); + zxdh_write_reg(base_addr, ADJUST_LOWER_TOD_SECOND, + (uint32_t)(ts->tv_sec & 0xffffffff)); + zxdh_write_reg(base_addr, ADJUST_TOD_NANO_SECOND, ts->tv_nsec); + + /* 1588 timer, update mode */ + reg_val = zxdh_read_reg(base_addr, PTP_CONFIGURATION); + reg_val &= ~(0x3 << 4); + reg_val |= UPDATE_MODE << 4 | 1 << TIMER_1588_UPT_EN_BIT; + zxdh_write_reg(base_addr, PTP_CONFIGURATION, reg_val); + + /* enable adjust */ + zxdh_write_reg(base_addr, TIMER_CONTROL, 1 << 1); + + spin_unlock_irqrestore(&global_ptpm_lock, flags); + mutex_unlock(&adapter->ptp_clk_mutex); + + return 0; +} + +static int zxdh_ptp_enable_pps(uint64_t base_addr, int on) +{ + uint32_t reg_val; + uint64_t ptpm_base_addr; + + ptpm_base_addr = base_addr; + + // bit14 enable ptp pps output + reg_val = zxdh_read_reg(ptpm_base_addr, PTP_CONFIGURATION); + reg_val &= ~(1 << 14); + reg_val |= on << 14; + zxdh_write_reg(ptpm_base_addr, PTP_CONFIGURATION, reg_val); + return 0; +} + +/** + * zxdh_ptp_enable + * @ptp: the ptp clock structure + * @rq: the requested feature to change + * @on: whether to enable or disable the feature + * + */ +static int zxdh_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + int ret; + //int pin; + struct zxdh_ptp_private *adapter = NULL; + + PTP_COMM_CHECK_POINT_RET(ptp, PTP_PARA_CHK_POINT_NULL); + PTP_COMM_CHECK_POINT_RET(rq, PTP_PARA_CHK_POINT_NULL); + + PTP_LOG_INFO("name: %s, rq->type: %d\n", ptp->name, rq->type); + ret = 0; + //pin = -1; + adapter = container_of(ptp, struct zxdh_ptp_private, ptp_caps[0]); + if (adapter == NULL) { + PTP_LOG_ERR("zxdh_ptp_enable adapter null\n"); + return -1; + } + switch (rq->type) { + case PTP_CLK_REQ_PPS: + ret = zxdh_ptp_enable_pps(adapter->ptpm_addr, on); + return ret; + case PTP_CLK_REQ_EXTTS: + if (on) { + // pin = ptp_find_pin(adapter->ptp_clock[0], PTP_PF_EXTTS, + // rq->extts.index); + // if (pin < 0) + // return -EBUSY; + if (rq->extts.index == 0 || + rq->extts.index == + 1) { // 选择pps0 or pps1做为capture和中断源 + adapter->pps_channel = rq->extts.index; + zxdh_write_reg(adapter->ptptop_addr, + PP1S_EXTERNAL_SEL, + rq->extts.index); + } + } + return 0; + + case PTP_CLK_REQ_PEROUT: + return 0; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int zxdh_tsn_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + unsigned long flags; + int neg_adj = 0; + uint32_t cur_nano = 0; + uint32_t cur_frac_nano = 0; //初值 + uint64_t tmp_frac_nano = 0; + int timer_no; + uint32_t integer_reg = 0; + uint32_t fraction_reg = 0; + uint64_t base_addr; + uint64_t freq_adj; + s32 ppb; + struct zxdh_ptp_private *adapter = NULL; + + PTP_COMM_CHECK_POINT_RET(ptp, PTP_PARA_CHK_POINT_NULL); + ppb = scaled_ppm_to_ppb(scaled_ppm); + PTP_LOG_INFO("name: %s, ppb: %d\n", ptp->name, ppb); + timer_no = get_tsn_timer_no(ptp->name); + PTP_CHECK_RANGE_WITH_RETURN(timer_no, TSN_TIMER_NAME_MIN_NO, + TSN_TIMER_NAME_MAX_NO, -1); + + adapter = container_of(ptp, struct zxdh_ptp_private, + ptp_caps[timer_no + 1]); + if (adapter == NULL) { + PTP_LOG_ERR("zxdh_tsn_adjfine adapter null\n"); + return -1; + } + base_addr = adapter->ptpm_addr; + + if (ppb == 0) + return 0; + + if (ppb < 0) { + ppb = -ppb; + neg_adj = 1; + } + + integer_reg = tsn_clock_cycle_integer_reg(timer_no); + fraction_reg = tsn_clock_cycle_fraction_reg(timer_no); + + cur_nano = zxdh_read_reg(base_addr, integer_reg); + cur_frac_nano = zxdh_read_reg(base_addr, fraction_reg); + + tmp_frac_nano = ((unsigned long long)cur_nano << 32) + cur_frac_nano; + + PTP_LOG_INFO("cur_nano: %u, cur_frac_nano: %u, tmp_frac_nano: 0x%llx\n", + cur_nano, cur_frac_nano, tmp_frac_nano); + + /* positive adjust */ + if (0 == neg_adj) { + tmp_frac_nano += tmp_frac_nano * ppb / 1000000000; + } else { /* negative adjust */ + freq_adj = tmp_frac_nano * ppb / 1000000000; + if (tmp_frac_nano > freq_adj) + tmp_frac_nano -= freq_adj; + } + + PTP_LOG_INFO("new tmp_frac_nano: 0x%llx\n", tmp_frac_nano); + cur_nano = (uint32_t)(tmp_frac_nano >> 32); + cur_frac_nano = tmp_frac_nano & 0xffffffff; + + PTP_LOG_INFO("cur_nano: %u, cur_frac_nano: %u\n", cur_nano, + cur_frac_nano); + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + zxdh_write_reg(base_addr, integer_reg, cur_nano); + zxdh_write_reg(base_addr, fraction_reg, cur_frac_nano); + + zxdh_write_reg(base_addr, CLOCK_CYCLE_UPDATE, 1 << (timer_no + 1)); + + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + return 0; +} + +static int zxdh_tsn_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + unsigned long flags = 0; + uint32_t run_mode; + uint32_t reg_val; + uint64_t adjust; + uint64_t sec; + uint32_t nsec; + int timer_no; + uint32_t nano_sec_reg; + uint32_t low_sec_reg; + uint32_t high_sec_reg; + int run_mode_bit_shift; + uint64_t base_addr; + struct zxdh_ptp_private *adapter = NULL; + + PTP_COMM_CHECK_POINT_RET(ptp, PTP_PARA_CHK_POINT_NULL); + PTP_LOG_INFO("name: %s, delta: %lld\n", ptp->name, delta); + timer_no = get_tsn_timer_no(ptp->name); + PTP_CHECK_RANGE_WITH_RETURN(timer_no, TSN_TIMER_NAME_MIN_NO, + TSN_TIMER_NAME_MAX_NO, -1); + + adapter = container_of(ptp, struct zxdh_ptp_private, + ptp_caps[timer_no + 1]); + if (adapter == NULL) { + PTP_LOG_ERR("zxdh_tsn_adjtime adapter null\n"); + return -1; + } + base_addr = adapter->ptpm_addr; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + /* 1588 timer, update mode */ + if (delta > 0) { + run_mode = INCRE_MODE; + adjust = delta; + } else { + run_mode = DECRE_MODE; + adjust = -delta; + } + + GET_TSN_ADJUST_NANO_REG(timer_no); + GET_TSN_ADJUST_LOW_SEC_REG(timer_no); + GET_TSN_ADJUST_HIGH_SEC_REG(timer_no); + + /* adjust value */ + sec = div_u64_rem(adjust, NSEC_PER_SEC, &nsec); + + PTP_LOG_INFO("sec: %llu, nsec: %u\n", sec, nsec); + // nsec = rem; + zxdh_write_reg(base_addr, high_sec_reg, (uint32_t)(sec >> 32)); + zxdh_write_reg(base_addr, low_sec_reg, (uint32_t)(sec & 0xffffffff)); + zxdh_write_reg(base_addr, nano_sec_reg, nsec); + + run_mode_bit_shift = 4 + timer_no * 2; + reg_val = zxdh_read_reg(base_addr, TSN_TIME_CONFIGURATION); + reg_val &= ~(0x3 << run_mode_bit_shift); + reg_val |= run_mode << run_mode_bit_shift; + zxdh_write_reg(base_addr, TSN_TIME_CONFIGURATION, reg_val); + /* enable adjust */ + zxdh_write_reg(base_addr, TSN_TIMER_CONTROL, 1 << timer_no); + + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + return 0; +} + +static int zxdh_tsn_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + uint32_t ns; + uint64_t s; + unsigned long flags; + int timer_no; + uint32_t reg_val; + int run_mode_bit_shift; + uint32_t nano_sec_reg; + uint32_t low_sec_reg; + uint32_t high_sec_reg; + uint64_t base_addr; + struct zxdh_ptp_private *adapter = NULL; + + PTP_COMM_CHECK_POINT_RET(ptp, PTP_PARA_CHK_POINT_NULL); + PTP_COMM_CHECK_POINT_RET(ts, PTP_PARA_CHK_POINT_NULL); + + PTP_LOG_INFO("name: %s\n", ptp->name); + timer_no = get_tsn_timer_no(ptp->name); + PTP_CHECK_RANGE_WITH_RETURN(timer_no, TSN_TIMER_NAME_MIN_NO, + TSN_TIMER_NAME_MAX_NO, -1); + + adapter = container_of(ptp, struct zxdh_ptp_private, + ptp_caps[timer_no + 1]); + if (adapter == NULL) { + PTP_LOG_ERR("zxdh_tsn_gettime adapter null\n"); + return -1; + } + base_addr = adapter->ptpm_addr; + + mutex_lock(&adapter->ptp_clk_mutex); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + // bit11~bit4, configure normal mode, should make sure bit15~bit12 enable + // first. + run_mode_bit_shift = 4 + timer_no * 2; + reg_val = zxdh_read_reg(base_addr, TSN_TIME_CONFIGURATION); + reg_val &= ~(0x3 << run_mode_bit_shift); + reg_val |= NORMAL_MODE << run_mode_bit_shift; + zxdh_write_reg(base_addr, TSN_TIME_CONFIGURATION, reg_val); + + // config latch one tsn timer + reg_val = 0; + reg_val = 1 << (timer_no + 2); + zxdh_write_reg(base_addr, TIMER_LACTH_SEL, reg_val); + + // enable latch + zxdh_write_reg(base_addr, TIMER_LATCH_EN, 1); + + GET_TSN_LATCH_NANO_REG(timer_no); + GET_TSN_LATCH_LOW_SEC_REG(timer_no); + GET_TSN_LATCH_HIGH_SEC_REG(timer_no); + + ns = zxdh_read_reg(base_addr, nano_sec_reg); + s = zxdh_read_reg(base_addr, low_sec_reg); + s |= (uint64_t)zxdh_read_reg(base_addr, high_sec_reg) << 32; + + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + mutex_unlock(&adapter->ptp_clk_mutex); + + // *ts = ns_to_timespec64(ns); + ts->tv_sec = s; + ts->tv_nsec = ns; + PTP_LOG_INFO("kernel get %s clock time: %lld.%09ld\n", ptp->name, + ts->tv_sec, ts->tv_nsec); + + return 0; +} + +/** + * zxdh_tsn_settime + * @ptp: the ptp clock struct + * @ts: the timespec containing the new time + */ +static int zxdh_tsn_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + unsigned long flags; //检查irq库函数 flag用法 + uint32_t reg_val; + int timer_no; + int run_mode_bit_shift; + uint32_t nano_sec_reg; + uint32_t low_sec_reg; + uint32_t high_sec_reg; + uint64_t base_addr; + struct zxdh_ptp_private *adapter = NULL; + + PTP_COMM_CHECK_POINT_RET(ptp, PTP_PARA_CHK_POINT_NULL); + PTP_COMM_CHECK_POINT_RET(ts, PTP_PARA_CHK_POINT_NULL); + PTP_LOG_INFO("name: %s, sec: %lld, nsec: %ld\n", ptp->name, ts->tv_sec, + ts->tv_nsec); + timer_no = get_tsn_timer_no(ptp->name); + PTP_CHECK_RANGE_WITH_RETURN(timer_no, TSN_TIMER_NAME_MIN_NO, + TSN_TIMER_NAME_MAX_NO, -1); + + adapter = container_of(ptp, struct zxdh_ptp_private, + ptp_caps[timer_no + 1]); + if (adapter == NULL) { + PTP_LOG_ERR("zxdh_tsn_settime adapter null\n"); + return -1; + } + base_addr = adapter->ptpm_addr; + + mutex_lock(&adapter->ptp_clk_mutex); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + GET_TSN_ADJUST_NANO_REG(timer_no); + GET_TSN_ADJUST_LOW_SEC_REG(timer_no); + GET_TSN_ADJUST_HIGH_SEC_REG(timer_no); + + /* adjust value */ + zxdh_write_reg(base_addr, high_sec_reg, (uint32_t)(ts->tv_sec >> 32)); + zxdh_write_reg(base_addr, low_sec_reg, + (uint32_t)(ts->tv_sec & 0xffffffff)); + zxdh_write_reg(base_addr, nano_sec_reg, ts->tv_nsec); + + run_mode_bit_shift = 4 + timer_no * 2; + reg_val = zxdh_read_reg(base_addr, TSN_TIME_CONFIGURATION); + reg_val &= ~(0x3 << run_mode_bit_shift); + reg_val |= UPDATE_MODE << run_mode_bit_shift; + zxdh_write_reg(base_addr, TSN_TIME_CONFIGURATION, reg_val); + /* enable adjust */ + zxdh_write_reg(base_addr, TSN_TIMER_CONTROL, 1 << timer_no); + + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + mutex_unlock(&adapter->ptp_clk_mutex); + + return 0; +} + +static int zxdh_tsn_enable_pps(uint64_t base_addr, int tsn_timer, int on) +{ + uint32_t reg_val = 0; + + uint64_t ptpm_base_addr; + + ptpm_base_addr = base_addr; + + // enable or disable tsn pps + reg_val = zxdh_read_reg(ptpm_base_addr, TSN_TIME_CONFIGURATION); + reg_val &= ~(1 << (16 + tsn_timer)); + reg_val |= on << (16 + tsn_timer); + zxdh_write_reg(ptpm_base_addr, TSN_TIME_CONFIGURATION, reg_val); + + // select tsn pps output from test_1pps + // reg_val = 0x4 + tsn_timer; + // zxdh_write_reg(base_addr, TEST_PP1S_SEL, reg_val); + // 这里先不关联配置test_pps输出,单独用ioctl配置test_pps输出 + return 0; +} + +/** + * zxdh_tsn_enable + * @ptp: the ptp clock structure + * @rq: the requested feature to change + * @on: whether to enable or disable the feature + * + */ +static int zxdh_tsn_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + int timer_no; + int ret = 0; + //int pin = -1; + struct zxdh_ptp_private *adapter = NULL; + + PTP_COMM_CHECK_POINT_RET(ptp, PTP_PARA_CHK_POINT_NULL); + PTP_COMM_CHECK_POINT_RET(rq, PTP_PARA_CHK_POINT_NULL); + timer_no = get_tsn_timer_no(ptp->name); + PTP_CHECK_RANGE_WITH_RETURN(timer_no, TSN_TIMER_NAME_MIN_NO, + TSN_TIMER_NAME_MAX_NO, -1); + + adapter = container_of(ptp, struct zxdh_ptp_private, + ptp_caps[timer_no + 1]); + if (adapter == NULL) { + PTP_LOG_ERR("zxdh_tsn_enable adapter null\n"); + return -1; + } + PTP_LOG_INFO("name: %s, timer_no: %u, rq->type: %d\n", ptp->name, + timer_no, rq->type); + + switch (rq->type) { + case PTP_CLK_REQ_PPS: + ret = zxdh_tsn_enable_pps(adapter->ptpm_addr, timer_no, on); + return ret; + case PTP_CLK_REQ_EXTTS: + if (on) { + // pin = ptp_find_pin(adapter->ptp_clock[timer_no+1], PTP_PF_EXTTS, + // rq->extts.index); + // if (pin < 0) + // return -EBUSY; + if ((rq->extts.index == 0) || + (rq->extts.index == + 1)) { // 选择pps0 or pps1做为capture和中断源 + adapter->pps_channel = rq->extts.index; + zxdh_write_reg(adapter->ptptop_addr, + PP1S_EXTERNAL_SEL, + rq->extts.index); + } + } + return 0; + + case PTP_CLK_REQ_PEROUT: + return 0; + default: + return -EOPNOTSUPP; + } +} + +/* This function handle the pps interrupt event. */ +irqreturn_t msix_extern_pps_irq_from_risc_handler(struct zxdh_pf_device *dev) +{ + uint32_t high_sec, low_sec, nsec; + struct ptp_clock_event event; + int i; + struct zxdh_pf_device *zxdev = dev; + struct zxdh_ptp_private *adapter = NULL; + uint64_t base_addr = 0x0; + + __u32 pps_event; + __u32 clear_event; + __u32 pps_mask; + + uint32_t nano_sec_reg; + uint32_t low_sec_reg; + uint32_t high_sec_reg; + // PTP_LOG_INFO("irq: %d\n", irq); + if (dev == NULL) { + return -1; + } + + adapter = zxdev->ptp; + if (adapter == NULL) { + PTP_LOG_ERR("msix_extern_pps_irq_from_risc_handler adapter\n"); + return -1; + } + if (!zxdh_pf_is_evb(zxdev)) { + return IRQ_HANDLED; + } + base_addr = adapter->ptpm_addr; + + pps_event = zxdh_read_reg(base_addr, INTERRUPT_EVENT); // 0x10 + pps_mask = zxdh_read_reg(base_addr, INTERRUPT_MASK); + PTP_LOG_INFO("pps_event: 0x%x,pps_mask: 0x%x, capture_timer: %d\n", + pps_event, pps_mask, zxdev->ptp->interrupt_capture_timer); + + // disable int + for (i = 0; i < PTPM_INTERRUPT_BIT_NUM; i++) { + if (pps_event & (1 << i)) + zxdh_write_reg(base_addr, INTERRUPT_MASK, + pps_mask & (~(1 << i))); + } + + // 清中断event + clear_event = pps_mask & pps_event; // 0x10 + zxdh_write_reg(base_addr, INTERRUPT_EVENT, clear_event); + + if (zxdev->ptp->interrupt_capture_timer > INTERRUPT_CAP_TIMER_MAX_NO) { + PTP_LOG_INFO("capture_timer: %u out of range!\n", + zxdev->ptp->interrupt_capture_timer); + return -1; + } + // 用status不如用event准 + if (pps_event & + (1 + << EXTERNAL_PPS_BIT)) { // 外部PPS信号产生的中断,Capture模式抓到的TOD + // 1588 timer + if (zxdev->ptp->interrupt_capture_timer == 0) { + nsec = zxdh_read_reg(base_addr, + PPS_LATCH_TOD_NANO_SECOND); + low_sec = zxdh_read_reg(base_addr, + PPS_LATCH_LOWER_TOD_SECOND); + high_sec = zxdh_read_reg(base_addr, + PPS_LATCH_HIGH_TOD_SECOND); + } else { // tsn timer + GET_PPS_LATCH_TSN_HIGH_SEC_REG( + zxdev->ptp->interrupt_capture_timer); + GET_PPS_LATCH_TSN_LOW_SEC_REG( + zxdev->ptp->interrupt_capture_timer); + GET_PPS_LATCH_TSN_NANO_REG( + zxdev->ptp->interrupt_capture_timer); + nsec = zxdh_read_reg(base_addr, nano_sec_reg); + low_sec = zxdh_read_reg(base_addr, low_sec_reg); + high_sec = zxdh_read_reg(base_addr, high_sec_reg); + } + } else if (pps_event & (1 << TRIGGER_IN_BIT)) { // trigger in信号捕捉的TOD + nsec = zxdh_read_reg(base_addr, TRIGGER_IN_TOD_NANO_SECOND); + low_sec = zxdh_read_reg(base_addr, TRIGGER_IN_LOWER_TOD_SECOND); + high_sec = zxdh_read_reg(base_addr, TRIGGER_IN_HIGH_TOD_SECOND); + } else { + PTP_LOG_INFO("unknown pps irq\n"); + nsec = 0; + low_sec = 0; + high_sec = 0; + } + + event.type = PTP_CLOCK_EXTTS; + event.index = adapter->pps_channel; + event.timestamp = + (((uint64_t)high_sec << 32) | low_sec) * 1000000000ULL + nsec; + PTP_LOG_INFO("nsec: %u, low_sec: %u, high_sec: %u\n", nsec, low_sec, + high_sec); + PTP_LOG_INFO("capture_timer: %u, timestamp: %llu\n", + zxdev->ptp->interrupt_capture_timer, event.timestamp); + ptp_clock_event( + zxdev->ptp->ptp_clock[zxdev->ptp->interrupt_capture_timer], + &event); + + // enable int + zxdh_write_reg(base_addr, INTERRUPT_MASK, pps_mask); + + return IRQ_HANDLED; +} +EXPORT_SYMBOL(msix_extern_pps_irq_from_risc_handler); + +irqreturn_t msix_local_pps_irq_from_risc_handler(struct zxdh_pf_device *dev) +{ + struct ptp_clock_event event; + __u32 reg_int; + struct zxdh_pf_device *zxdev = dev; + struct zxdh_ptp_private *adapter = NULL; + uint64_t base_addr = 0x0; + + if (dev == NULL) { + return -1; + } + + adapter = zxdev->ptp; + if (adapter == NULL) { + PTP_LOG_ERR("msix_local_pps_irq_from_risc_handler adapter\n"); + return -1; + } + if (!zxdh_pf_is_evb(zxdev)) { + return IRQ_HANDLED; + } + base_addr = adapter->ptptop_addr; + + // PTP_LOG_INFO("irq: %d\n", irq); + event.type = PTP_CLOCK_PPS; + + /* adapter->ptp_clock[0]中创建pps, 用pps对应local_pps的中断 */ + if (!zxdev->ptp->ptp_clock[0]) + return IRQ_HANDLED; + + ptp_clock_event(zxdev->ptp->ptp_clock[0], &event); + + // base_addr = bar_addr + PTP_HOST_BAR_OFFSET; + reg_int = zxdh_read_reg(base_addr, LOCAL_PPS_INTERRUPT); // 0x10 + PTP_LOG_INFO("reg_int: 0x%x\n", reg_int); + reg_int |= 1 << 1; + zxdh_write_reg(base_addr, LOCAL_PPS_INTERRUPT, reg_int); + +#if 0 // only debug for local pps + uint64_t ns; + uint32_t s; + static uint64_t last_ns; + static uint32_t last_s; + + reg_val = zxdh_read_reg(adapter, PTP_CONFIGURATION, PTP_M); + reg_val &= ~(0x3 << 4); + reg_val |= NORMAL_MODE << 4; + zxdh_write_reg(adapter, PTP_CONFIGURATION, reg_val, PTP_M); + + // config latch 1588 timer + zxdh_write_reg(adapter, TIMER_LACTH_SEL, 1 << LATCH_1588_TIMER, PTP_M); + + // enable latch + zxdh_write_reg(adapter, TIMER_LATCH_EN, 1, PTP_M); + + + ns = zxdh_read_reg(adapter, LATCH_TOD_NANO_SECOND, PTP_M); + s = zxdh_read_reg(adapter, LATCH_LOWER_TOD_SECOND, PTP_M); + // hwts->s |= (uint64_t)zxdh_read_reg(adapter, LATCH_HIGH_TOD_SECOND, PTP_M) << 32; + + // ns += s * 1000000000ULL; + last_ns = ns; + last_s = s; + PTP_LOG_INFO("timestamp: cur:%llu.%09lu\n", low_sec*200, nsec); + +#endif + + return IRQ_HANDLED; +} +EXPORT_SYMBOL(msix_local_pps_irq_from_risc_handler); + +// for net_device driver get timestamp, param ptp get from get_tsn_clock +// function. hw timestamp only use 32 bit. +int get_pkt_timestamp(int32_t clock_no, struct zxdh_en_device *en_dev, + struct time_stamps *ts, u32 *hwts) +{ + u32 nano_sec_reg; + u32 low_sec_reg; + u32 high_sec_reg; + int timer_no; + int run_mode_bit_shift; + u32 reg_val; + struct time_stamps temp_ts; + unsigned long flags; + uint64_t base_addr; + struct zxdh_ptp_private *adapter = NULL; + struct ptp_clock_info *ptp; + int phcidx; + //uint64_t latch_time = 0; + //uint64_t read_time = 0; + //uint64_t end_time = 0; + if (NULL == en_dev || NULL == ts || NULL == hwts) + return -1; + + adapter = zxdh_ptp_get_ptp_private(en_dev); + + /* Started by AICoder, pid:z8d8d0b8d9ecc7614bcb09b9001e960b5135e639 */ + if (adapter == NULL) { + PTP_LOG_ERR("get_pkt_timestamp adapter vport 0x%x\n", + en_dev->vport); + return -1; + } + /* Ended by AICoder, pid:z8d8d0b8d9ecc7614bcb09b9001e960b5135e639 */ + for (phcidx = 0; phcidx < ZX_CLOCK_TIMER_NUM; phcidx++) { + if (clock_no == ptp_clock_index(adapter->ptp_clock[phcidx])) { + ptp = &adapter->ptp_caps[phcidx]; + break; + } + } + if (phcidx == ZX_CLOCK_TIMER_NUM) { + PTP_LOG_ERR("get phcindex fail\n"); + return -1; + } + base_addr = adapter->ptpm_addr; + + PTP_LOG_INFO("ptp->name: %s\n", ptp->name); + // first 80bit and second 80bit: both are 1588 timestamp + if (0 == strcmp(ptp->name, "ptp0")) { + PTP_LOG_INFO("ptp0\n"); + //spin_lock_irqsave(&adapter->tmreg_lock, flags); + spin_lock_irqsave(&global_ptpm_lock, flags); + // latch_time = ktime_get_ns(); + // PTP_LOG_INFO("latch time %llu pid %d\n",latch_time,current->pid); + + reg_val = zxdh_read_reg(base_addr, PTP_CONFIGURATION); + reg_val &= ~(0x3 << PPS_RUN_MODE_BIT); + reg_val |= NORMAL_MODE << PPS_RUN_MODE_BIT; + zxdh_write_reg(base_addr, PTP_CONFIGURATION, reg_val); + + // config latch 1588 timer and hw timer + zxdh_write_reg(base_addr, TIMER_LACTH_SEL, + 1 << LATCH_1588_TIMER | 1 << LATCH_HW_TIMER); + + // enable latch + zxdh_write_reg(base_addr, TIMER_LATCH_EN, 1); + + // read_time = ktime_get_ns(); + // PTP_LOG_INFO("read time %llu pid %d\n",read_time,current->pid); + + temp_ts.ns = zxdh_read_reg(base_addr, LATCH_TOD_NANO_SECOND); + temp_ts.s = zxdh_read_reg(base_addr, LATCH_LOWER_TOD_SECOND); + temp_ts.s |= + (u64)zxdh_read_reg(base_addr, LATCH_HIGH_TOD_SECOND) + << 32; + + ts->ns = temp_ts.ns; + ts->s = temp_ts.s; + + ts++; + ts->ns = temp_ts.ns; + ts->s = temp_ts.s; + + *hwts = zxdh_read_reg(base_addr, LATCH_HARDWARE_TIME_LOW); + //global_ptpm_lock确认其他函数中的使用方式,应该按照全局锁来操作 + //spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + spin_unlock_irqrestore(&global_ptpm_lock, flags); + // end_time = ktime_get_ns(); + // PTP_LOG_INFO("end_time %llu pid %d\n",end_time,current->pid); + // PTP_LOG_INFO("ts[]: 0x%x.%x, hwts: 0x%x\n", ts->s, ts->ns, *hwts); + } else { // first 80bit is 1588 timestamp, second 80bit is one tsn timestamp + timer_no = get_tsn_timer_no(ptp->name); + PTP_LOG_INFO("tsn: %d\n", timer_no); + PTP_CHECK_RANGE_WITH_RETURN(timer_no, TSN_TIMER_NAME_MIN_NO, + TSN_TIMER_NAME_MAX_NO, -1); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + reg_val = zxdh_read_reg(base_addr, PTP_CONFIGURATION); + reg_val &= ~(0x3 << PPS_RUN_MODE_BIT); + reg_val |= NORMAL_MODE << PPS_RUN_MODE_BIT; + zxdh_write_reg(base_addr, PTP_CONFIGURATION, reg_val); + + // bit11~bit4, configure normal mode, should make sure bit15~bit12 enable + // first. + run_mode_bit_shift = 4 + timer_no * 2; + reg_val = zxdh_read_reg(base_addr, TSN_TIME_CONFIGURATION); + reg_val &= ~(0x3 << run_mode_bit_shift); + reg_val |= NORMAL_MODE << run_mode_bit_shift; + zxdh_write_reg(base_addr, TSN_TIME_CONFIGURATION, reg_val); + + // config latch 1588 and one tsn timer and hw timer + zxdh_write_reg(base_addr, TIMER_LACTH_SEL, + (1 << LATCH_1588_TIMER) | (1 << LATCH_HW_TIMER) | + (1 << (timer_no + 2))); + + // enable latch + zxdh_write_reg(base_addr, TIMER_LATCH_EN, 1); + + // read 1588 timer + ts->ns = zxdh_read_reg(base_addr, LATCH_TOD_NANO_SECOND); + ts->s = zxdh_read_reg(base_addr, LATCH_LOWER_TOD_SECOND); + ts->s |= (u64)zxdh_read_reg(base_addr, LATCH_HIGH_TOD_SECOND) + << 32; + + ts++; + + GET_TSN_LATCH_NANO_REG(timer_no); + GET_TSN_LATCH_LOW_SEC_REG(timer_no); + GET_TSN_LATCH_HIGH_SEC_REG(timer_no); + // read one tsn timer + ts->ns = zxdh_read_reg(base_addr, nano_sec_reg); + ts->s = zxdh_read_reg(base_addr, low_sec_reg); + ts->s |= (u64)zxdh_read_reg(base_addr, high_sec_reg) << 32; + + *hwts = zxdh_read_reg(base_addr, LATCH_HARDWARE_TIME_LOW); + + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + // PTP_LOG_INFO("ts[]: 0x%x.%x, hwts: 0x%x\n", ts->s, ts->ns, *hwts); + } + + return 0; +} +EXPORT_SYMBOL(get_pkt_timestamp); + +#define PTPS_NUMS 3 +int enable_write_ts_to_fifo(struct zxdh_en_device *en_dev, u32 enable, + u32 mac_number) +{ + uint64_t base_addr; + struct zxdh_ptp_private *adapter = NULL; + + if (PTPS_NUMS <= mac_number) { + PTP_LOG_ERR("mac number out of range\n"); + return -1; + } + + if (NULL == en_dev) + return -1; + + adapter = zxdh_ptp_get_ptp_private(en_dev); + if (adapter == NULL) { + PTP_LOG_ERR("enable_write_ts_to_fifo ptp adapter null\n"); + return -1; + } + base_addr = adapter->ptps_addr; + + PTP_LOG_INFO("enable: %u, mac: %u\n", enable, mac_number); + zxdh_write_reg(base_addr, PTPS_CONFIGURATION, enable); + + return 0; +} +EXPORT_SYMBOL(enable_write_ts_to_fifo); + +int get_event_ts_info(struct zxdh_en_device *en_dev, struct ptp_buff *p_tsInfo, + u32 mac_number) +{ + u32 count = 0; + int i; + uint64_t base_addr; + // enum reg_module ptps_module; + struct zxdh_ptp_private *adapter = NULL; + + if (PTPS_NUMS <= mac_number) { + PTP_LOG_ERR("mac number out of range\n"); + return -1; + } + + if (NULL == en_dev || NULL == p_tsInfo) { + PTP_LOG_ERR("input pointer null\n"); + return -1; + } + + adapter = zxdh_ptp_get_ptp_private(en_dev); + if (adapter == NULL) { + PTP_LOG_ERR("get_event_ts_info ptp adapter null\n"); + return -1; + } + base_addr = adapter->ptps_addr; + + // the maximum count is 64 + count = zxdh_read_reg(base_addr, PTP1588_EVENT_MESSAGE_FIFO_STATUS) & + 0xff; + // half is timestamp and half is match info(messageType, sourcePortIdentity, + // sequenceId) + if (count > PTP_ENCRYPTED_MESG_MAX_NUM) { + PTP_LOG_ERR("encrypted ptp message out of range!\n"); + return -1; + } + count /= 2; + PTP_LOG_INFO("count: %d\n", count); + + for (i = 0; i < count; i++) { + zxdh_write_reg(base_addr, PTPS_TIMER_CONTROL, 1); + // read timestamp + p_tsInfo->ptpRegInfo[i].cfVal[0] = + zxdh_read_reg(base_addr, PTP1588_EVENT_MESSAGE_TS_LOW); + p_tsInfo->ptpRegInfo[i].cfVal[1] = + zxdh_read_reg(base_addr, PTP1588_EVENT_MESSAGE_TS_HIGH); + + PTP_LOG_INFO("i: %d, low: 0x%x, high: 0x%x\n", i, + p_tsInfo->ptpRegInfo[i].cfVal[0], + p_tsInfo->ptpRegInfo[i].cfVal[1]); + zxdh_write_reg(base_addr, PTPS_TIMER_CONTROL, 1); + // read messageType, sourcePortIdentity, sequenceId + p_tsInfo->ptpRegInfo[i].matchInfo = + zxdh_read_reg(base_addr, PTP1588_EVENT_MESSAGE_TS_LOW); + PTP_LOG_INFO("i: %d, matchInfo: 0x%x\n", i, + p_tsInfo->ptpRegInfo[i].matchInfo); + } + p_tsInfo->cfCount = count; + PTP_LOG_INFO("success\n"); + return 0; +} +EXPORT_SYMBOL(get_event_ts_info); + +int32_t set_interrupt_capture_timer(struct zxdh_en_device *en_dev, + uint32_t index) +{ + struct zxdh_ptp_private *adapter = NULL; + if (NULL == en_dev) + return -1; + + if (index > INTERRUPT_CAP_TIMER_MAX_NO) { + PTP_LOG_INFO("capture_timer: %u out of range!\n", index); + return -1; + } + adapter = zxdh_ptp_get_ptp_private(en_dev); + if (adapter == NULL) { + PTP_LOG_ERR("set_interrupt_capture_timer ptp adapter null\n"); + return -1; + } + adapter->interrupt_capture_timer = index; + PTP_LOG_INFO("index: %u\n", index); + PTP_LOG_INFO("pcie_id: 0x%x, vport: 0x%x, phy_port: %u\n", + adapter->pdev->pcie_id, adapter->pdev->vport, + adapter->pdev->phy_port); + return 0; +} +EXPORT_SYMBOL(set_interrupt_capture_timer); + +int32_t zxdh_set_pps_selection(struct zxdh_en_device *en_dev, uint32_t pps_type, + uint32_t selection) +{ + struct zxdh_ptp_private *adapter = NULL; + uint64_t base_addr; + if (NULL == en_dev) + return -1; + + if (pps_type > PP1S_EXTERNAL || selection > PP1S_TSN3) + return -1; + + adapter = zxdh_ptp_get_ptp_private(en_dev); + if (adapter == NULL) { + PTP_LOG_ERR("zxdh_get_pd_value ptp adapter fail\n"); + return -1; + } + base_addr = adapter->ptptop_addr; + + PTP_LOG_INFO("pps_type: %s, selection: %u\n", pps[pps_type], selection); + + switch (pps_type) { + case PP1S_OUT: + zxdh_write_reg(base_addr, PP1S_OUT_SEL, selection); + break; + case PP1S_TEST: + zxdh_write_reg(base_addr, TEST_PP1S_SEL, selection); + break; + case PP1S_EXTERNAL: + zxdh_write_reg(base_addr, PP1S_EXTERNAL_SEL, selection); + // 这个用PTP_CLK_REQ_EXTTS来配置 + break; + default: + break; + } + + return 0; +} +EXPORT_SYMBOL(zxdh_set_pps_selection); + +int32_t zxdh_set_pd_detection(struct zxdh_en_device *en_dev, uint32_t pd_index, + uint32_t pd_input1, uint32_t pd_input2) +{ + struct zxdh_ptp_private *adapter = NULL; + uint64_t base_addr; + uint32_t reg_val; + if (NULL == en_dev) + return -1; + + adapter = zxdh_ptp_get_ptp_private(en_dev); + if (adapter == NULL) { + PTP_LOG_ERR("zxdh_get_pd_value ptp adapter fail\n"); + return -1; + } + base_addr = adapter->ptptop_addr; + + PTP_LOG_INFO("pd_index: %u, pd_input1: %u, pd_input2: %u\n", pd_index, + pd_input1, pd_input2); + + // bit1~0: Pd_U1_Sel0 bit3~2: Pd_U1_Sel1. + reg_val = ((pd_input1) | (pd_input2 << 3)); + + if (PHASE_DETECTION1 == pd_index) { + zxdh_write_reg(base_addr, PD_U1_SEL, reg_val); + } else if (PHASE_DETECTION2 == pd_index) { + zxdh_write_reg(base_addr, PD_U2_SEL, reg_val); + } else { + PTP_LOG_ERR("pd_index error\n"); + return -1; + } + + return 0; +} +EXPORT_SYMBOL(zxdh_set_pd_detection); + +int32_t zxdh_get_pd_value(struct zxdh_en_device *en_dev, uint32_t pd_index, + uint32_t *pd_result) +{ + struct zxdh_ptp_private *adapter = NULL; + uint64_t base_addr; + if (NULL == en_dev) + return -1; + + adapter = zxdh_ptp_get_ptp_private(en_dev); + if (adapter == NULL) { + PTP_LOG_ERR("zxdh_get_pd_value ptp adapter fail\n"); + return -1; + } + base_addr = adapter->ptptop_addr; + + if (PHASE_DETECTION1 == pd_index) { + *pd_result = zxdh_read_reg(base_addr, PD_U1_RESULT); + } else if (PHASE_DETECTION2 == pd_index) { + *pd_result = zxdh_read_reg(base_addr, PD_U2_RESULT); + } else { + PTP_LOG_ERR("pd_index error\n"); + return -1; + } + PTP_LOG_INFO("pd_index: %u, pd_result: 0x%x\n", pd_index, *pd_result); + + return 0; +} +EXPORT_SYMBOL(zxdh_get_pd_value); + +int32_t zxdh_get_ptp_clock_index(struct zxdh_en_device *en_dev, + uint32_t *ptp_clock_idx) +{ + struct zxdh_ptp_private *adapter = NULL; + if (NULL == en_dev) + return -1; + PTP_COMM_CHECK_POINT_RET(ptp_clock_idx, PTP_PARA_CHK_POINT_NULL); + PTP_COMM_CHECK_POINT_RET(en_dev->ops, PTP_PARA_CHK_POINT_NULL); + + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + return 0; + } + adapter = zxdh_ptp_get_ptp_private(en_dev); + + if (adapter == NULL) { + PTP_LOG_ERR("zxdh_get_ptp_clock_index ptp adapter fail\n"); + return -1; + } + + PTP_COMM_CHECK_POINT_RET(adapter->ptp_clock[0], + PTP_PARA_CHK_POINT_NULL); + *ptp_clock_idx = ptp_clock_index(adapter->ptp_clock[0]); + PTP_LOG_INFO("first ptp_clock_idx: %u\n", *ptp_clock_idx); + + return 0; +} +EXPORT_SYMBOL(zxdh_get_ptp_clock_index); + +int32_t zxdh_set_pps_interrupt_support(struct zxdh_en_device *en_dev, + uint32_t support) +{ + struct zxdh_ptp_private *adapter = NULL; + if (NULL == en_dev) + return -1; + + adapter = zxdh_ptp_get_ptp_private(en_dev); + if (adapter == NULL) { + PTP_LOG_ERR( + "zxdh_set_pps_interrupt_support ptp adapter fail\n"); + return -1; + } + /* message from riscv, triggered by SIOCDEVPRIVATE_PPS_FUNC */ + adapter->pps_intr_support = support; + PTP_LOG_INFO("set pps interrupt support: %u\n", support); + + return 0; +} +EXPORT_SYMBOL(zxdh_set_pps_interrupt_support); + +int32_t zxdh_get_pps_interrupt_support(struct zxdh_en_device *en_dev, + uint32_t *support) +{ + struct zxdh_ptp_private *adapter = NULL; + if (NULL == en_dev) + return -1; + PTP_COMM_CHECK_POINT_RET(support, PTP_PARA_CHK_POINT_NULL); + adapter = zxdh_ptp_get_ptp_private(en_dev); + + if (adapter == NULL) { + PTP_LOG_ERR( + "zxdh_get_pps_interrupt_support ptp adapter fail\n"); + return -1; + } + /* message from riscv, triggered by SIOCDEVPRIVATE_PPS_FUNC */ + + *support = adapter->pps_intr_support; + PTP_LOG_INFO("get pps interrupt support: %u\n", *support); + + return 0; +} +EXPORT_SYMBOL(zxdh_get_pps_interrupt_support); + +int32_t zxdh_set_local_pps_interrupt_enable(struct zxdh_en_device *en_dev, + uint32_t enable) +{ + struct zxdh_ptp_private *adapter = NULL; + uint64_t base_addr; + uint32_t reg_val; + if (NULL == en_dev) + return -1; + + if (enable != ENABLE && enable != DISABLE) + return -1; + adapter = zxdh_ptp_get_ptp_private(en_dev); + + if (adapter == NULL) { + PTP_LOG_ERR( + "zxdh_set_local_pps_interrupt_enable ptp adapter fail\n"); + return -1; + } + base_addr = adapter->ptptop_addr; + + reg_val = zxdh_read_reg(base_addr, LOCAL_PPS_INTERRUPT); + reg_val &= ~0x1; + reg_val |= enable; + zxdh_write_reg(base_addr, LOCAL_PPS_INTERRUPT, reg_val); + + PTP_LOG_INFO("set local pps interrupt enable: %u\n", enable); + + return 0; +} +EXPORT_SYMBOL(zxdh_set_local_pps_interrupt_enable); + +int32_t zxdh_set_ext_pps_interrupt_enable(struct zxdh_en_device *en_dev, + uint32_t pps_src, uint32_t enable) +{ + struct zxdh_ptp_private *adapter = NULL; + uint64_t base_addr; + uint32_t reg_val; + if (NULL == en_dev) + return -1; + + if (enable != ENABLE && enable != DISABLE) + return -1; + /* + pps_src: + 0: trigger in + 1: trigger out + 2: FIFO no empty + 3: FIFO almost full + 4: pps in + */ + if (4 < pps_src) + return -1; + + adapter = zxdh_ptp_get_ptp_private(en_dev); + + if (adapter == NULL) { + PTP_LOG_ERR( + "zxdh_set_ext_pps_interrupt_enable ptp adapter fail\n"); + return -1; + } + base_addr = adapter->ptpm_addr; + + reg_val = zxdh_read_reg(base_addr, INTERRUPT_MASK); + //PTP_LOG_INFO("reg_val: 0x%08x\n", reg_val); + reg_val &= ~(1 << pps_src); + //PTP_LOG_INFO("reg_val: 0x%08x\n", reg_val); + reg_val |= enable << pps_src; + //PTP_LOG_INFO("reg_val: 0x%08x\n", reg_val); + zxdh_write_reg(base_addr, INTERRUPT_MASK, reg_val); + + if (enable == ENABLE) { + reg_val = zxdh_read_reg(base_addr, PTP_CONFIGURATION); + //PTP_LOG_INFO("reg_val: 0x%08x\n", reg_val); + reg_val |= (1 << PPS_INPUT_SEL_BIT); + //PTP_LOG_INFO("reg_val: 0x%08x\n", reg_val); + zxdh_write_reg(base_addr, PTP_CONFIGURATION, reg_val); + } + + PTP_LOG_INFO("set ext pps interrupt pps_src: %u, enable: %u\n", pps_src, + enable); + + return 0; +} +EXPORT_SYMBOL(zxdh_set_ext_pps_interrupt_enable); + +int32_t zxdh_set_pd_sel_shift(struct zxdh_en_device *en_dev, uint32_t pd_index, + uint32_t sel, uint32_t shift) +{ + struct zxdh_ptp_private *adapter = NULL; + uint64_t base_addr; + uint32_t reg_addr; + + if (NULL == en_dev) + return -1; + + if (PHASE_DETECTION1 != pd_index && PHASE_DETECTION2 != pd_index) + return -1; + + if (PD_SEL_1 != sel && PD_SEL_2 != sel) + return -1; + + adapter = zxdh_ptp_get_ptp_private(en_dev); + base_addr = adapter->ptptop_addr; + + if (PHASE_DETECTION1 == pd_index) { + reg_addr = (PD_SEL_1 == sel) ? PD_U1_PD0_SHIFT : + PD_U1_PD1_SHIFT; + } else { + reg_addr = (PD_SEL_1 == sel) ? PD_U2_PD0_SHIFT : + PD_U2_PD1_SHIFT; + } + + zxdh_write_reg(base_addr, reg_addr, shift); + + PTP_LOG_INFO("set ext pd%u sel%u shift: %u\n", pd_index, sel, shift); + + return 0; +} +EXPORT_SYMBOL(zxdh_set_pd_sel_shift); + +int get_hw_timestamp(struct zxdh_en_device *en_dev, u32 *hwts) +{ + u32 reg_val; + unsigned long flags; + uint64_t base_addr; + struct zxdh_ptp_private *adapter = NULL; + + if (NULL == en_dev || NULL == hwts) + return -1; + + adapter = zxdh_ptp_get_ptp_private(en_dev); + + if (adapter == NULL) { + PTP_LOG_ERR("get_hw_timestamp ptp adapter fail\n"); + return -1; + } + base_addr = adapter->ptpm_addr; + + spin_lock_irqsave(&global_ptpm_lock, flags); + + reg_val = zxdh_read_reg(base_addr, PTP_CONFIGURATION); + reg_val &= ~(0x3 << PPS_RUN_MODE_BIT); + reg_val |= (NORMAL_MODE << PPS_RUN_MODE_BIT); + zxdh_write_reg(base_addr, PTP_CONFIGURATION, reg_val); + + // config latch hw timer + zxdh_write_reg(base_addr, TIMER_LACTH_SEL, 1 << LATCH_HW_TIMER); + + // enable latch + zxdh_write_reg(base_addr, TIMER_LATCH_EN, 1); + + *hwts = zxdh_read_reg(base_addr, LATCH_HARDWARE_TIME_LOW); + + spin_unlock_irqrestore(&global_ptpm_lock, flags); + // PTP_LOG_INFO("ts[]: 0x%x.%x, hwts: 0x%x\n", ts->s, ts->ns, *hwts); + + return 0; +} +EXPORT_SYMBOL(get_hw_timestamp); + +int zxdh_ptp_init(struct dh_core_dev *zxdev) +{ + struct zxdh_ptp_private *zxp; + int err = -ENOMEM; + int i; + + uint32_t reg; + int size; + uint16_t ep_no; + uint64_t pci_addr; + uint64_t ptptop_paddr; + uint64_t ptpm_paddr; + uint64_t ptps_paddr; + struct zxdh_pf_device *pf_dev = NULL; + + PTP_COMM_CHECK_POINT_RET(zxdev, PTP_PARA_CHK_POINT_NULL); + + pf_dev = dh_core_priv(zxdev); + PTP_COMM_CHECK_POINT_RET(pf_dev, PTP_PARA_CHK_POINT_NULL); + + PTP_LOG_DEBUG("enter\n"); + + zxp = kzalloc(sizeof(*zxp), GFP_KERNEL); + if (!zxp) { + PTP_LOG_ERR("zxp kzalloc failed\n"); + goto no_memory; + } + + err = -ENODEV; + + zxp->ptp_caps[0].owner = THIS_MODULE; + strlcpy(zxp->ptp_caps[0].name, "ptp0", sizeof(zxp->ptp_caps[0].name)); + + zxp->ptp_caps[0].max_adj = 999999999; + zxp->ptp_caps[0].n_alarm = 0; + zxp->ptp_caps[0].n_ext_ts = + 2; // ptp_chardev.c: ptp_ioctl: if (req.extts.index >= ops->n_ext_ts) 需要改初值 + zxp->ptp_caps[0].n_per_out = 0; + zxp->ptp_caps[0].n_pins = 0; // + if (zxdh_pf_is_evb(pf_dev)) { + zxp->ptp_caps[0].pps = 1; + } else { + zxp->ptp_caps[0].pps = 0; + } + zxp->ptp_caps[0].adjfine = zx_ptp_adjfine; + zxp->ptp_caps[0].adjtime = zxdh_ptp_adjtime; + zxp->ptp_caps[0].gettime64 = zxdh_ptp_gettime; + zxp->ptp_caps[0].settime64 = zxdh_ptp_settime; + zxp->ptp_caps[0].enable = zxdh_ptp_enable; + + zxp->ptp_clock[0] = + ptp_clock_register(&zxp->ptp_caps[0], &zxdev->pdev->dev); + if (IS_ERR(zxp->ptp_clock[0])) { + zxp->ptp_clock[0] = NULL; + PTP_LOG_ERR("ptp_clock_register ptp0 failed\n"); + goto no_ptp_clock; + } + + for (i = 0; i < ZX_TSN_TIMER_NUM; i++) { + zxp->ptp_caps[i + 1].owner = THIS_MODULE; + size = snprintf(zxp->ptp_caps[i + 1].name, + sizeof(zxp->ptp_caps[i + 1].name), "tsn%d", i); + if (size >= sizeof(zxp->ptp_caps[i + 1].name)) + zxp->ptp_caps[i + 1] + .name[sizeof(zxp->ptp_caps[i + 1].name) - 1] = + '\0'; + + zxp->ptp_caps[i + 1].max_adj = 999999999; + zxp->ptp_caps[i + 1].n_alarm = 0; + zxp->ptp_caps[i + 1].n_ext_ts = 2; + zxp->ptp_caps[i + 1].n_per_out = 0; + zxp->ptp_caps[i + 1].n_pins = 0; + zxp->ptp_caps[i + 1].pps = 0; + zxp->ptp_caps[i + 1].adjfine = zxdh_tsn_adjfine; + zxp->ptp_caps[i + 1].adjtime = zxdh_tsn_adjtime; + zxp->ptp_caps[i + 1].gettime64 = zxdh_tsn_gettime; + zxp->ptp_caps[i + 1].settime64 = zxdh_tsn_settime; + zxp->ptp_caps[i + 1].enable = zxdh_tsn_enable; + zxp->ptp_clock[i + 1] = ptp_clock_register( + &zxp->ptp_caps[i + 1], &zxdev->pdev->dev); + if (IS_ERR(zxp->ptp_clock[i + 1])) { + zxp->ptp_clock[i + 1] = NULL; + PTP_LOG_ERR("ptp_clock_register tsn%d failed\n", i); + goto no_tsn_clock; + } + } + /* not support pps interrupt by default */ + zxp->pps_intr_support = 0; + + spin_lock_init(&zxp->tmreg_lock); + mutex_init(&zxp->ptp_clk_mutex); + if (ptpm_lock_init_stat == 0) { + PTP_LOG_INFO("global_ptpm_lock init\n"); + spin_lock_init(&global_ptpm_lock); + ptpm_lock_init_stat = 1; + } + + ep_no = EPID(pf_dev->vport); + pci_addr = pci_resource_start(zxdev->pdev, 0); + PTP_LOG_DEBUG("ep_no: %u, pci_addr: 0x%llx\n", ep_no, pci_addr); + + if (ep_no == EPID_4) { + ptptop_paddr = pci_addr + PTPTOP_ZF_BAR_OFFSET; + ptpm_paddr = pci_addr + PTPM_ZF_BAR_OFFSET; + ptps_paddr = pci_addr + PTPS_ZF_BAR_OFFSET; + } else { + ptptop_paddr = pci_addr + PTPTOP_HOST_BAR_OFFSET; + ptpm_paddr = pci_addr + PTPM_HOST_BAR_OFFSET; + ptps_paddr = pci_addr + PTPS_HOST_BAR_OFFSET; + } + + zxp->ptptop_addr = (uint64_t)ioremap(ptptop_paddr, PTPTOP_REGS_LEN); + if (zxp->ptptop_addr == 0) { + PTP_LOG_ERR("ptptop ioremap failed\n"); + goto ptptop_ioremap_fail; + } + + zxp->ptpm_addr = (uint64_t)ioremap(ptpm_paddr, PTPM_REGS_LEN); + if (zxp->ptpm_addr == 0) { + PTP_LOG_ERR("ptpm ioremap failed\n"); + goto ptpm_ioremap_fail; + } + + zxp->ptps_addr = (uint64_t)ioremap(ptps_paddr, PTPS_REGS_LEN); + if (zxp->ptps_addr == 0) { + PTP_LOG_ERR("ptps ioremap failed\n"); + goto ptps_ioremap_fail; + } + + pf_dev->ptp = zxp; + zxp->pdev = pf_dev; + + reg = zxdh_read_reg(zxp->ptpm_addr, PTP_CONFIGURATION); + reg |= (1 << 15); + zxdh_write_reg(zxp->ptpm_addr, PTP_CONFIGURATION, reg); + // enable four tsn timer and tsn pps enable + zxdh_write_reg(zxp->ptpm_addr, TSN_TIME_CONFIGURATION, 0xff000); + + // timesync delay + zxdh_write_reg(zxp->ptptop_addr, TSN_GROUP_NANO_SEC_DELAY0, 0x1); + zxdh_write_reg(zxp->ptptop_addr, TSN_GROUP_NANO_SEC_DELAY1, 0x1); + zxdh_write_reg(zxp->ptptop_addr, TSN_GROUP_NANO_SEC_DELAY2, 0x1); + zxdh_write_reg(zxp->ptptop_addr, TSN_GROUP_NANO_SEC_DELAY3, 0x1); + zxdh_write_reg(zxp->ptptop_addr, PTP1588_NP_NANO_SEC_DELAY, 0x1); + zxdh_write_reg(zxp->ptptop_addr, PTP1588_NVME_NANO_SEC_DELAY1, 0x13); + zxdh_write_reg(zxp->ptptop_addr, PTP1588_NVME_NANO_SEC_DELAY2, 0x13); + zxdh_write_reg(zxp->ptptop_addr, PTP1588_RDMA_NANO_SEC_DELAY, 0xC); + + return 0; + +ptps_ioremap_fail: + iounmap((void *)zxp->ptpm_addr); +ptpm_ioremap_fail: + iounmap((void *)zxp->ptptop_addr); +ptptop_ioremap_fail: +no_tsn_clock: + for (i = 0; i < ZX_CLOCK_TIMER_NUM; i++) { + if (zxp->ptp_clock[i]) + ptp_clock_unregister(zxp->ptp_clock[i]); + } +no_ptp_clock: + kfree(zxp); +no_memory: + return err; +} +EXPORT_SYMBOL(zxdh_ptp_init); + +void zxdh_ptp_stop(struct dh_core_dev *zxdev) +{ + int i; + + struct zxdh_pf_device *pf_dev; + struct zxdh_ptp_private *zxp = NULL; + PTP_COMM_CHECK_POINT(zxdev); + + pf_dev = dh_core_priv(zxdev); + PTP_COMM_CHECK_POINT(pf_dev); + + zxp = pf_dev->ptp; + + if (NULL == zxp) + return; + + iounmap((void *)zxp->ptptop_addr); + iounmap((void *)zxp->ptpm_addr); + iounmap((void *)zxp->ptps_addr); + + for (i = 0; i < ZX_CLOCK_TIMER_NUM; i++) { + if (zxp->ptp_clock[i]) + ptp_clock_unregister(zxp->ptp_clock[i]); + } + + kfree(zxp); +} +EXPORT_SYMBOL(zxdh_ptp_stop); diff --git a/drivers/net/ethernet/dinghai/en_ptp/zxdh_ptp.h b/drivers/net/ethernet/dinghai/en_ptp/zxdh_ptp.h new file mode 100644 index 000000000000..31528787271e --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_ptp/zxdh_ptp.h @@ -0,0 +1,148 @@ +#ifndef _ZX_PTP_H +#define _ZX_PTP_H + +#include +#include "../en_pf.h" + +#define PTP_PARA_CHK_POINT_NULL (-1) + +#define ZX_CLOCK_TIMER_NUM 5 // 1st: ptp, other: tsn +#define ZX_TSN_TIMER_NUM (ZX_CLOCK_TIMER_NUM - 1) + +#define PTP_REG_INFO_NUM 32 +#define PTP_ENCRYPTED_MESG_MAX_NUM 64 + +#define PTP_DRIVER_UNINIT 0 +#define PTP_DRIVER_INITED 1 + +#define X86_ADDR_2_ARRCH64(X86_ADDR) \ + (((X86_ADDR & (~0xFFFF)) << 4) | (X86_ADDR & 0xFFFF)) + +#define PTPTOP_HOST_BAR_OFFSET 0xc000 +#define PTPM_HOST_BAR_OFFSET 0x10000 +#define PTPS_HOST_BAR_OFFSET 0x34000 + +#define PTPTOP_ZF_BAR_OFFSET PTPTOP_HOST_BAR_OFFSET +#define PTPM_ZF_BAR_OFFSET X86_ADDR_2_ARRCH64(PTPM_HOST_BAR_OFFSET) +#define PTPS_ZF_BAR_OFFSET X86_ADDR_2_ARRCH64(PTPS_HOST_BAR_OFFSET) + +#define PTPTOP_REGS_LEN 0x4000 +#define PTPM_REGS_LEN 0x4000 +#define PTPS_REGS_LEN 0x1000 + +#define EPID(VPORT) ((VPORT & 0x7000) >> 12) +#define EPID_4 (4) + +#define PHASE_DETECTION1 1 +#define PHASE_DETECTION2 2 + +typedef enum { + PD_SEL_INVALID, // no use + PD_SEL_1, + PD_SEL_2 +} PD_SEL; +#define PTPM_INTERRUPT_BIT_NUM 5 + +#define ENABLE 1 +#define DISABLE 0 + +#define TSN_TIMER_NAME_MIN_NO 0 +#define TSN_TIMER_NAME_MAX_NO 3 + +#define INTERRUPT_CAP_TIMER_MIN_NO 0 +#define INTERRUPT_CAP_TIMER_MAX_NO 4 +#define PTP_CHECK_RANGE_WITH_RETURN(val, min, max, ret) \ + do { \ + if (!(min <= val && val <= max)) \ + return ret; \ + } while (0) + +#define PTP_COMM_CHECK_POINT_RET(point, ret) \ + do { \ + if (NULL == point) { \ + PTP_LOG_ERR( \ + "\n %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + return ret; \ + } \ + } while (0) + +#define PTP_COMM_CHECK_POINT(point) \ + do { \ + if (NULL == point) { \ + PTP_LOG_ERR( \ + "\n %s:%d[Error:POINT NULL] ! FUNCTION : %s!\n", \ + __FILE__, __LINE__, __FUNCTION__); \ + return; \ + } \ + } while (0) + +struct time_stamps { + u64 s; + u32 ns; +}; + +struct pkt_hw_ts { + struct time_stamps ts[2]; +}; + +enum { + PP1S_OUT, + PP1S_TEST, + PP1S_EXTERNAL, // maybe use PTP_CLK_REQ_EXTTS +}; + +typedef enum { + // PPS_OUT / TEST_PP1S / EXTERNAL_PP1S selection: + PP1S_REF0, + PP1S_REF1, + // PPS_OUT / TEST_PP1S selection: + PP1S_LOCAL, + PP1S_1588, + // TEST PP1S selection only: + PP1S_TSN0, + PP1S_TSN1, + PP1S_TSN2, + PP1S_TSN3 +} PPS_SELECT; + +typedef enum { LOCAL_PPS, + EXTERNAL_PPS } PPS_INTERRUPT_TYPE; + +struct zxdh_ptp_private { + // void __iomem *ptpm_regs; + // void __iomem *ptp_top_regs; + // void __iomem *ptps0_regs; + // void __iomem *ptps1_regs; + // void __iomem *ptps2_regs; + + struct mutex ptp_clk_mutex; + struct zxdh_pf_device *pdev; + struct ptp_clock *ptp_clock[ZX_CLOCK_TIMER_NUM]; + struct ptp_clock_info ptp_caps[ZX_CLOCK_TIMER_NUM]; + unsigned int pps_channel; // externel pps0/pps1 + unsigned int interrupt_capture_timer; + unsigned int pps_intr_support; + uint64_t ptptop_addr; + uint64_t ptpm_addr; + uint64_t ptps_addr; + + spinlock_t tmreg_lock; +}; + +struct ptp_reg_info { + uint32_t cfVal[2]; + uint32_t matchInfo; +}; + +struct ptp_buff { + uint32_t cfCount; + struct ptp_reg_info ptpRegInfo[PTP_REG_INFO_NUM]; +}; + +int zxdh_ptp_init(struct dh_core_dev *zxdev); +void zxdh_ptp_stop(struct dh_core_dev *zxdev); +irqreturn_t msix_extern_pps_irq_from_risc_handler(struct zxdh_pf_device *dev); +irqreturn_t msix_local_pps_irq_from_risc_handler(struct zxdh_pf_device *dev); + +#endif /* _ZX_PTP_H */ diff --git a/drivers/net/ethernet/dinghai/en_ptp/zxdh_ptp_common.h b/drivers/net/ethernet/dinghai/en_ptp/zxdh_ptp_common.h new file mode 100644 index 000000000000..6839bfc40bc4 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_ptp/zxdh_ptp_common.h @@ -0,0 +1,11 @@ +#ifndef _ZX_PTP_COMMON_H +#define _ZX_PTP_COMMON_H + +#include + +#define PTP_LOG_ERR(fmt, arg...) DH_LOG_ERR(MODULE_PTP, fmt, ##arg); +#define PTP_LOG_INFO(fmt, arg...) DH_LOG_INFO(MODULE_PTP, fmt, ##arg); +#define PTP_LOG_DEBUG(fmt, arg...) DH_LOG_DEBUG(MODULE_PTP, fmt, ##arg); +#define PTP_LOG_WARN(fmt, arg...) DH_LOG_WARNING(MODULE_PTP, fmt, ##arg); + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_ptp/zxdh_ptp_regs.h b/drivers/net/ethernet/dinghai/en_ptp/zxdh_ptp_regs.h new file mode 100644 index 000000000000..5b8e10f7e1f8 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_ptp/zxdh_ptp_regs.h @@ -0,0 +1,473 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ZX_REGS_H +#define _ZX_REGS_H + +/**************************** ptpm start **************************/ + +#define PTPM_OFFSET_WITH_TOP 0x4000 + +#define EXTERNAL_PPS_BIT 4 +#define TRIGGER_OUT_BIT 1 +#define TRIGGER_IN_BIT 0 +/* bit4: pps_in_status + * 0: not receive pps + * 1: receive pps + */ +#define INTERRUPT_STATUS 0x00000010 +/* bit4: pps income event, writing 1 to the bit clearing it + * 0: not receive pps + * 1: receive pps + */ +#define INTERRUPT_EVENT 0x00000014 +/* bit4: pps income event mask + * 0: mask + * 1: no mask + */ +#define INTERRUPT_MASK 0x00000018 +/* bit4: trigger pps income event test + * 0: not occur + * 1: occur + */ +#define INTERRUPT_TEST 0x0000001C + +/* adjust clock cycle, for 1588 and hw timer */ +#define PTP_CLOCK_CYCLE_INTEGER 0x00000030 +#define PTP_CLOCK_CYCLE_FRACTION 0x00000034 + +/* BIT18: trig oe + * 1: trig out + * 0: trig in + * bit17: hw timer update enable, used when timer run mode is update,increment or decrement + * 0: update disable + * 1: update enable + * bit16: 1588 timer update enable, used when timer run mode is update,increment or decrement + * 0: update disable + * 1: update enable + * bit15: 1588 and hw time timer enable + * 0: timer disable + * 1: timer enable + * bit8: pps input select + * 0: select internal pps + * 1: select external pps + * bit5:4 1588 tod and hw timer run mode + * 0: normal + * 1: update + * 2: increment + * 3: decrement + * bit2: trigger out enable + * 0:disable + * 1:enable + * bit1: trigger in enable + * 0:disable + * 1:enable + * bit0: 1588 tod timer slave/capture mode + * 0:capture mode, capture 1588 tod timer when input pps pulse + * 1:slave mode, the timer make the input pps as reference + */ +#define PTP_CONFIGURATION 0x00000040 + +#define PPS_TRIGGER_IN_BIT 1 +#define PPS_TRIGGER_OUT_BIT 2 +#define PPS_RUN_MODE_BIT 4 +#define PPS_INPUT_SEL_BIT 8 +#define TIMER_EN_BIT 15 +#define TIMER_1588_UPT_EN_BIT 16 +#define TIMER_HW_UPT_EN_BIT 17 +#define TRIG_OE 18 +enum timer_run_mode { + NORMAL_MODE, + UPDATE_MODE, + INCRE_MODE, + DECRE_MODE, +}; + +/* bit1: adjust the 1588 tod and hw timer + * 0: not adjust + * 1: adjust the timer with add/sub/ or update + */ +#define TIMER_CONTROL 0x00000044 +#define ADJ_TIMER_BIT 1 + +/* bit4: tsn3 clock cycle update enable + * bit3: tsn2 clock cycle update enable + * bit2: tsn1 clock cycle update enable + * bit1: tsn0 clock cycle update enable + * bit0: 1588 clock cycle update enable + */ +#define CLOCK_CYCLE_UPDATE 0x0000004C + +/* bit31~16: nanosecond 0~0xffff + * bit15~0 : frac nanosecond 0~0xffff + */ +#define PPS_INCOME_DELAY 0x00000048 +/* bit0, 0:not latch, 1:latch the timer */ +#define TIMER_LATCH_EN 0x00000058 +/* bit5:0, select the latch timer + * bit0: 1588 timer + * bit1: hw timer + * bit2: tsn0 timer + * bit3: tsn1 timer + * bit4: tsn2 timer + * bit5: tsn3 timer + */ +#define TIMER_LACTH_SEL 0x0000005C + +enum latch_timer_type { + LATCH_1588_TIMER, + LATCH_HW_TIMER, + LATCH_TSN0_TIMER, + LATCH_TSN1_TIMER, + LATCH_TSN2_TIMER, + LATCH_TSN3_TIMER, +}; + +// trigger in +#define TRIGGER_IN_TOD_NANO_SECOND 0x00000060 +#define TRIGGER_IN_LOWER_TOD_SECOND 0x00000064 +#define TRIGGER_IN_HIGH_TOD_SECOND 0x00000068 +#define TRIGGER_IN_FRAC_NANO_SECOND 0x0000006C + +#define TRIGGER_IN_HARDWARE_TIME_LOW 0x00000070 +#define TRIGGER_IN_HARDWARE_TIME_HIGH 0x00000074 + +// trigger out +#define TRIGGER_OUT_TOD_NANO_SECOND 0x00000080 +#define TRIGGER_OUT_LOWER_TOD_SECOND 0x00000084 +#define TRIGGER_OUT_HIGH_TOD_SECOND 0x00000088 +// #define TRIGGER_OUT_FRAC_NANO_SECOND 0x0000008C // none + +#define TRIGGER_OUT_HARDWARE_TIME_LOW 0x00000090 +#define TRIGGER_OUT_HARDWARE_TIME_HIGH 0x00000094 + +/* adjust 1588 timer */ +#define ADJUST_TOD_NANO_SECOND 0x000000A0 +#define ADJUST_LOWER_TOD_SECOND 0x000000A4 +#define ADJUST_HIGH_TOD_SECOND 0x000000A8 +#define ADJUST_FRAC_NANO_SECOND 0x000000AC +/* adjust hardware timer */ +#define ADJUST_HARDWARE_TIME_LOW 0x000000B0 +#define ADJUST_HARDWARE_TIME_HIGH 0x000000B4 + +/* 1588 timer latched time */ +#define LATCH_TOD_NANO_SECOND 0x000000C0 +#define LATCH_LOWER_TOD_SECOND 0x000000C4 +#define LATCH_HIGH_TOD_SECOND 0x000000C8 +#define LATCH_FRAC_NANO_SECOND 0x000000CC +/* hw timer latched time */ +#define LATCH_HARDWARE_TIME_LOW 0x000000D0 +#define LATCH_HARDWARE_TIME_HIGH 0x000000D4 + +/* pps capture tod time*/ +#define PPS_LATCH_TOD_NANO_SECOND 0x00000120 +#define PPS_LATCH_LOWER_TOD_SECOND 0x00000124 +#define PPS_LATCH_HIGH_TOD_SECOND 0x00000128 +#define PPS_LATCH_FRAC_NANO_SECOND 0x0000012C + +/* bit19~16 tsn pps enable + * bit19: tsn3 + * bit18: tsn2 + * bit17: tsn1 + * bit16: tsn0 + * bit15~12 tsn timer enable + * bit15: tsn3 + * bit14: tsn2 + * bit13: tsn1 + * bit12: tsn0 + * bit11~10 tsn0 timer run mode + * bit9~8 tsn1 timer run mode, + * bit7~6 tsn2 timer run mode, + * bit5~4 tsn3 timer run mode, + * 0: normal + * 1: update + * 2: increment + * 3: decrement + * bit3~0: tsn timer slave/capture mode, 0 is capture mode, 1 is slave mode + * bit3: tsn3 + * bit2: tsn2 + * bit1: tsn1 + * bit0: tsn0 + */ +#define TSN_TIME_CONFIGURATION 0x00000140 +/* bit3: adjust tsn3 timer + * bit2: adjust tsn2 timer + * bit1: adjust tsn1 timer + * bit0: adjust tsn0 timer + */ +#define TSN_TIMER_CONTROL 0x00000144 +#define TSN0_ADJ_EN_BIT 0 +#define TSN1_ADJ_EN_BIT 1 +#define TSN2_ADJ_EN_BIT 2 +#define TSN3_ADJ_EN_BIT 3 + +/* adjust clock cycle, for four tsn timer */ +#define TSN0_CLOCK_CYCLE_INTEGER 0x00000148 +#define TSN0_CLOCK_CYCLE_FRACTION 0x0000014C +#define TSN1_CLOCK_CYCLE_INTEGER 0x00000150 +#define TSN1_CLOCK_CYCLE_FRACTION 0x00000154 +#define TSN2_CLOCK_CYCLE_INTEGER 0x00000158 +#define TSN2_CLOCK_CYCLE_FRACTION 0x0000015C +#define TSN3_CLOCK_CYCLE_INTEGER 0x00000160 +#define TSN3_CLOCK_CYCLE_FRACTION 0x00000164 + +#define TSN_CLOCK_CYCLE_INTEGER(tsn_no) TSN##tsn_no##_CLOCK_CYCLE_INTEGER +#define TSN_CLOCK_CYCLE_FRACTION(tsn_no) TSN##tsn_no##_CLOCK_CYCLE_FRACTION + +/* adjust tsn timer */ +#define TSN0_ADJUST_TOD_NANO_SECOND 0x00000180 +#define TSN0_ADJUST_LOWER_TOD_SECOND 0x00000184 +#define TSN0_ADJUST_HIGH_TOD_SECOND 0x00000188 +#define TSN0_ADJUST_FRAC_NANO_SECOND 0x0000018C + +#define TSN1_ADJUST_TOD_NANO_SECOND 0x00000190 +#define TSN1_ADJUST_LOWER_TOD_SECOND 0x00000194 +#define TSN1_ADJUST_HIGH_TOD_SECOND 0x00000198 +#define TSN1_ADJUST_FRAC_NANO_SECOND 0x0000019C + +#define TSN2_ADJUST_TOD_NANO_SECOND 0x000001A0 +#define TSN2_ADJUST_LOWER_TOD_SECOND 0x000001A4 +#define TSN2_ADJUST_HIGH_TOD_SECOND 0x000001A8 +#define TSN2_ADJUST_FRAC_NANO_SECOND 0x000001AC + +#define TSN3_ADJUST_TOD_NANO_SECOND 0x000001B0 +#define TSN3_ADJUST_LOWER_TOD_SECOND 0x000001B4 +#define TSN3_ADJUST_HIGH_TOD_SECOND 0x000001B8 +#define TSN3_ADJUST_FRAC_NANO_SECOND 0x000001BC + +#define TSN_ADJUST_NANO_SEC(tsn_no) TSN##tsn_no##_ADJUST_TOD_NANO_SECOND +#define TSN_ADJUST_LOW_SECOND(tsn_no) TSN##tsn_no##_ADJUST_LOWER_TOD_SECOND +#define TSN_ADJUST_HIGH_SECOND(tsn_no) TSN##tsn_no##_ADJUST_HIGH_TOD_SECOND +#define TSN_ADJUST_FRAC_NANO_SEC(tsn_no) TSN##tsn_no##_ADJUST_FRAC_NANO_SECOND + +/* tsn0 timer latched time */ +#define TSN0_LATCH_TOD_NANO_SECOND 0x000001C0 +#define TSN0_LATCH_LOWER_TOD_SECOND 0x000001C4 +#define TSN0_LATCH_HIGH_TOD_SECOND 0x000001C8 +#define TSN0_LATCH_FRAC_NANO_SECOND 0x000001CC +/* tsn1 timer latched time */ +#define TSN1_LATCH_TOD_NANO_SECOND 0x000001D0 +#define TSN1_LATCH_LOWER_TOD_SECOND 0x000001D4 +#define TSN1_LATCH_HIGH_TOD_SECOND 0x000001D8 +#define TSN1_LATCH_FRAC_NANO_SECOND 0x000001DC +/* tsn2 timer latched time */ +#define TSN2_LATCH_TOD_NANO_SECOND 0x000001E0 +#define TSN2_LATCH_LOWER_TOD_SECOND 0x000001E4 +#define TSN2_LATCH_HIGH_TOD_SECOND 0x000001E8 +#define TSN2_LATCH_FRAC_NANO_SECOND 0x000001EC +/* tsn3 timer latched time */ +#define TSN3_LATCH_TOD_NANO_SECOND 0x000001F0 +#define TSN3_LATCH_LOWER_TOD_SECOND 0x000001F4 +#define TSN3_LATCH_HIGH_TOD_SECOND 0x000001F8 +#define TSN3_LATCH_FRAC_NANO_SECOND 0x000001FC + +#define TSN_LATCH_NANO_SEC(tsn_no) TSN##tsn_no##_LATCH_TOD_NANO_SECOND +#define TSN_LATCH_LOW_SECOND(tsn_no) TSN##tsn_no##_LATCH_LOWER_TOD_SECOND +#define TSN_LATCH_HIGH_SECOND(tsn_no) TSN##tsn_no##_LATCH_HIGH_TOD_SECOND +#define TSN_LATCH_FRAC_NANO_SEC(tsn_no) TSN##tsn_no##_LATCH_FRAC_NANO_SECOND + +/* pps capture tsn0 time*/ +#define PPS_LATCH_TSN0_NANO_SECOND 0x00000200 +#define PPS_LATCH_TSN0_LOWER_SECOND 0x00000204 +#define PPS_LATCH_TSN0_HIGH_SECOND 0x00000208 +#define PPS_LATCH_TSN0_FRAC_NANO 0x0000020C +/* pps capture tsn1 time*/ +#define PPS_LATCH_TSN1_NANO_SECOND 0x00000210 +#define PPS_LATCH_TSN1_LOWER_SECOND 0x00000214 +#define PPS_LATCH_TSN1_HIGH_SECOND 0x00000218 +#define PPS_LATCH_TSN1_FRAC_NANO 0x0000021C +/* pps capture tsn2 time*/ +#define PPS_LATCH_TSN2_NANO_SECOND 0x00000220 +#define PPS_LATCH_TSN2_LOWER_SECOND 0x00000224 +#define PPS_LATCH_TSN2_HIGH_SECOND 0x00000228 +#define PPS_LATCH_TSN2_FRAC_NANO 0x0000022C +/* pps capture tsn3 time*/ +#define PPS_LATCH_TSN3_NANO_SECOND 0x00000230 +#define PPS_LATCH_TSN3_LOWER_SECOND 0x00000234 +#define PPS_LATCH_TSN3_HIGH_SECOND 0x00000238 +#define PPS_LATCH_TSN3_FRAC_NANO 0x0000023C + +#define PPS_LATCH_TSN_NANO_SEC(tsn_no) PPS_LATCH_TSN##tsn_no##_NANO_SECOND +#define PPS_LATCH_TSN_LOW_SECOND(tsn_no) PPS_LATCH_TSN##tsn_no##_LOWER_SECOND +#define PPS_LATCH_TSN_HIGH_SECOND(tsn_no) PPS_LATCH_TSN##tsn_no##_HIGH_SECOND +#define PPS_LATCH_TSN_FRAC_NANO_SEC(tsn_no) PPS_LATCH_TSN##tsn_no##_FRAC_NANO + +/**************************** ptpm end **************************/ + +/**************************** ptp_top start **************************/ + +/* bit3: local pp1s status + * 0: not generate local pp1s + * 1: generate local pp1s + * bit2: trigger local pp1s event test + * 0: not occur + * 1: occur + * bit1: writing 1 to the bit clearing local pp1s status + * 0: not clear + * 1: clear + * bit0: local pp1s event enable + * 0: no enable + * 1: enable + */ +#define LOCAL_PPS_INTERRUPT 0x00000000 +// bit0: 0 ref0 1 ref1 +#define PP1S_EXTERNAL_SEL 0x00000004 +/* bit1~0: select pp1s out: + * 00: pp1s ref0 + * 01: pp1s ref1 + * 10: local pp1s + * 11: 1588 pp1s + */ +#define PP1S_OUT_SEL 0x00000008 +/* bit2~0: select test pp1s: + * 000: pp1s ref0 + * 001: pp1s ref1 + * 010: local pp1s + * 011: 1588 pp1s + * 100: tsn0 pp1s + * 101: tsn1 pp1s + * 110: tsn2 pp1s + * 111: tsn3 pp1s + */ +#define TEST_PP1S_SEL 0x0000000C + +#define LOCAL_PP1S_EN 0x00000010 // use local_pp1s need enable this reg +/* bit2~1: select local pp1s adjust reference + * 00: pp1s ref0 + * 01: pp1s ref1 + * 10,11: 1588 pp1s + * bit0: adjust local pp1s phase once + */ +#define LOCAL_PP1S_ADJUST 0x00000014 +/* bit29~0: adjust local pp1s value, must below 10e9, 1bit is 1 nanosecond */ +#define LOCAL_PP1S_ADJUST_VALUE 0x00000018 + +/* bit5~3: Pd_U1_Sel1 + * 000: pp1s ref0 + * 001: pp1s ref1 + * 010: local pp1s + * 011: 1588 pp1s + * 100: tsn0 pp1s + * 101: tsn1 pp1s + * 110: tsn2 pp1s + * 111: tsn3 pp1s + * bit2~0: Pd_U1_Sel0 + * 000: pp1s ref0 + * 001: pp1s ref1 + * 010: local pp1s + * 011: 1588 pp1s + * 100: tsn0 pp1s + * 101: tsn1 pp1s + * 110: tsn2 pp1s + * 111: tsn3 pp1s + */ +#define PD_U1_SEL 0x00000040 +// bit29~0: phase detector module 1 select0 input pp1s shift value. +#define PD_U1_PD0_SHIFT 0x00000044 +// bit29~0: phase detector module 1 select1 input pp1s shift value. +#define PD_U1_PD1_SHIFT 0x00000048 + +/* bit30: Pd_U1_Result_sign + * 1: positive + * 0: negative + * bit29: Pd_U1_Overflow: the interval between two pp1s pluse is greater than + * 0x1FFF_FFFF, 1 is overflow + * bit28~0: phase detector module 1 resule value, must below 5*10e8, 1 bit is 1 nanosecond. + */ +#define PD_U1_RESULT 0x0000004C + +/* bit5~3: Pd_U2_Sel1 + * 000: pp1s ref0 + * 001: pp1s ref1 + * 010: local pp1s + * 011: 1588 pp1s + * 100: tsn0 pp1s + * 101: tsn1 pp1s + * 110: tsn2 pp1s + * 111: tsn3 pp1s + * bit2~0: Pd_U2_Sel0 + * 000: pp1s ref0 + * 001: pp1s ref1 + * 010: local pp1s + * 011: 1588 pp1s + * 100: tsn0 pp1s + * 101: tsn1 pp1s + * 110: tsn2 pp1s + * 111: tsn3 pp1s + */ +#define PD_U2_SEL 0x00000050 +// bit29~0: phase detector module 2 select0 input pp1s shift value. +#define PD_U2_PD0_SHIFT 0x00000054 +// bit29~0: phase detector module 2 select1 input pp1s shift value. +#define PD_U2_PD1_SHIFT 0x00000058 +/* bit30: Pd_U2_Result_sign + * 1: positive + * 0: negative + * bit29: Pd_U2_Overflow: the interval between two pp1s pluse is greater than + * 0x1FFF_FFFF, 1 is overflow + * bit28~0: phase detector module 2 resule value, must below 5*10e8, 1 bit is 1 nanosecond. + */ +#define PD_U2_RESULT 0x0000005C + +#define TSN_GROUP_NANO_SEC_DELAY0 0x00000080 +#define TSN_GROUP_FRAC_NANO_SEC_DELAY0 0x00000084 +#define TSN_GROUP_NANO_SEC_DELAY1 0x00000088 +#define TSN_GROUP_FRAC_NANO_SEC_DELAY1 0x0000008C +#define TSN_GROUP_NANO_SEC_DELAY2 0x00000090 +#define TSN_GROUP_FRAC_NANO_SEC_DELAY2 0x00000094 +#define TSN_GROUP_NANO_SEC_DELAY3 0x00000098 +#define TSN_GROUP_FRAC_NANO_SEC_DELAY3 0x0000009C +#define PTP1588_RDMA_NANO_SEC_DELAY 0x000000A0 +#define PTP1588_RDMA_FRAC_NANO_SEC_DELAY 0x000000A4 +#define PTP1588_NP_NANO_SEC_DELAY 0x000000A8 +#define PTP1588_NP_FRAC_NANO_SEC_DELAY 0x000000AC +#define PTP1588_NVME_NANO_SEC_DELAY1 0x000000C0 +#define PTP1588_NVME_FRAC_NANO_SEC_DELAY1 0x000000C4 +#define PTP1588_NVME_NANO_SEC_DELAY2 0x000000C8 +#define PTP1588_NVME_FRAC_NANO_SEC_DELAY2 0x000000CC + +/**************************** ptp_top end **************************/ + +/**************************** ptps start **************************/ +/* bit0: enable writing timestamps to the FIFO + * 0: disable + * 1: enable + */ +#define PTPS_CONFIGURATION 0x00000020 + +/* bit0: PTP1588 FIFO read command + * 0: not read + * 1: read + */ +#define PTPS_TIMER_CONTROL 0x00000024 + +/* bit15~0: integral nanosecond of sync Hardware Time compensaion, 1 bit is 1 nanosecond */ +#define SYNC_HW_TIME_COMPENSATION 0x00000038 + +/* 对于加密报文,先读 PTP event timestamps count,然后读时间戳和匹配信息, + * 读时间戳和匹配信息之前,都要先配置TIMER_CONTROL. + * 匹配信息是在PTP1588_EVENT_MESSAGE_TS_LOW里,具体对应关系: + * BIT[23:0]: messageType, sourcePortIdentity[3:0],sequenceId[15:0] + */ +/* ptp1588 event message timestamp[31:0] */ +#define PTP1588_EVENT_MESSAGE_TS_LOW 0x00000084 +/* ptp1588 event message timestamp[63:32] */ +#define PTP1588_EVENT_MESSAGE_TS_HIGH 0x00000088 + +/* bit9: FIFO full + * bit8: FIFO empty + * bit7~0: PTP event timestamps count + */ +#define PTP1588_EVENT_MESSAGE_FIFO_STATUS 0x0000008C + +struct event_ts_info { + u32 ts_low; + u32 ts_high; + unsigned char messageType; + unsigned char srcPortId; + short sequenceId; +}; + +#define EVENT_MESSAGE_MAX_NUM 32 + +/**************************** ptps end **************************/ + +#endif /* _ZX_REGS_H */ diff --git a/drivers/net/ethernet/dinghai/en_sf.c b/drivers/net/ethernet/dinghai/en_sf.c new file mode 100644 index 000000000000..94fa5a26fead --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_sf.c @@ -0,0 +1,1158 @@ +#include +#include +#ifdef HAVE_DEV_PRINTK_OPS +#include +#endif +#include +#include +#include +#include +#include "en_aux.h" +#include "en_sf.h" +#include "./en_sf/en_sf_eq.h" +#ifdef CONFIG_DINGHAI_EN_AUX +#include +#endif + +extern struct devlink_ops dh_sf_devlink_ops; +extern struct dh_core_devlink_ops dh_sf_core_devlink_ops; + +int32_t zxdh_aux_plug_aux_dev(struct dh_core_dev *dh_dev, + enum AUX_DEVICE_TYPE adev_type); +void zxdh_aux_unplug_aux_dev_one(struct dh_core_dev *dh_dev, + enum AUX_DEVICE_TYPE adev_type); + +int32_t zxdh_en_sf_get_vq_lock(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_get_vq_lock(dh_dev->parent); +} + +int32_t zxdh_en_sf_release_vq_lock(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_release_vq_lock(dh_dev->parent); +} + +int32_t zxdh_en_sf_find_valid_vqs(struct dh_core_dev *dh_dev, uint16_t vqs_cnt, + uint32_t vq_index[]) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_find_valid_vqs(dh_dev->parent, vqs_cnt, + vq_index); +} + +int32_t zxdh_en_sf_write_vqs_bit(struct dh_core_dev *dh_dev, uint16_t vqs_cnt, + uint32_t vq_index[]) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_write_vqs_bit(dh_dev->parent, vqs_cnt, + vq_index); +} + +int32_t zxdh_en_sf_write_queue_tlb(struct dh_core_dev *dh_dev, uint16_t vqs_cnt, + uint32_t vq_index[], bool need_msgq) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_write_queue_tlb(dh_dev->parent, vqs_cnt, + vq_index, need_msgq); +} + +uint16_t zxdh_en_sf_get_fw_patch(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_get_fw_patch(dh_dev->parent); +} + +bool zxdh_en_sf_is_bond(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_is_bond(dh_dev->parent); +} + +bool zxdh_en_sf_is_upf(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_is_upf(dh_dev->parent); +} + +void zxdh_en_sf_set_status(struct dh_core_dev *dh_dev, uint8_t status) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + en_sf_dev->sf_ops->en_sf_set_status(dh_dev->parent, status); + + return; +} + +uint8_t zxdh_en_sf_get_status(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + uint8_t status = 0; + + status = en_sf_dev->sf_ops->en_sf_get_status(dh_dev->parent); + + return status; +} + +uint8_t zxdh_en_sf_get_cfg_gen(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_get_cfg_gen(dh_dev->parent); +} + +bool zxdh_en_sf_get_rp_link_status(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_get_rp_link_status(dh_dev->parent); +} + +void zxdh_en_sf_set_vf_mac(struct dh_core_dev *dh_dev, uint8_t *mac, + int32_t vf_id) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + en_sf_dev->sf_ops->en_sf_set_vf_mac(dh_dev->parent, mac, vf_id); + + return; +} + +void zxdh_en_sf_get_vf_mac(struct dh_core_dev *dh_dev, uint8_t *mac, + int32_t vf_id) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + en_sf_dev->sf_ops->en_sf_get_vf_mac(dh_dev->parent, mac, vf_id); + + return; +} + +void zxdh_en_sf_set_mac(struct dh_core_dev *dh_dev, uint8_t *mac) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + en_sf_dev->sf_ops->en_sf_set_mac(dh_dev->parent, mac); + + return; +} + +void zxdh_en_sf_get_mac(struct dh_core_dev *dh_dev, uint8_t *mac) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + en_sf_dev->sf_ops->en_sf_get_mac(dh_dev->parent, mac); + + return; +} + +uint64_t zxdh_en_sf_get_features(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + uint64_t device_feature = 0; + + device_feature = en_sf_dev->sf_ops->en_sf_get_features(dh_dev->parent); + + return device_feature; +} + +void zxdh_en_sf_set_features(struct dh_core_dev *dh_dev, uint64_t features) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + en_sf_dev->sf_ops->en_sf_set_features(dh_dev->parent, features); + + return; +} + +uint16_t zxdh_en_sf_get_queue_num(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + uint16_t qnum = 0; + + qnum = en_sf_dev->sf_ops->en_sf_get_queue_num(dh_dev->parent); + + return qnum; +} + +uint16_t zxdh_en_sf_get_queue_size(struct dh_core_dev *dh_dev, uint32_t index) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + uint16_t queue_size = 0; + + queue_size = + en_sf_dev->sf_ops->en_sf_get_queue_size(dh_dev->parent, index); + + return queue_size; +} + +void zxdh_en_sf_set_queue_size(struct dh_core_dev *dh_dev, uint32_t index, + uint16_t size) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + en_sf_dev->sf_ops->en_sf_set_queue_size(dh_dev->parent, index, size); +} + +struct pci_dev *zxdh_en_sf_get_pdev(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_get_pdev(dh_dev->parent); +} + +uint64_t zxdh_en_sf_get_bar_virt_addr(struct dh_core_dev *dh_dev, + uint8_t bar_num) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_get_bar_virt_addr(dh_dev->parent, + bar_num); +} + +uint64_t zxdh_en_sf_get_bar_phy_addr(struct dh_core_dev *dh_dev, + uint8_t bar_num) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_get_bar_phy_addr(dh_dev->parent, + bar_num); +} + +uint64_t zxdh_en_sf_get_bar_size(struct dh_core_dev *dh_dev, uint8_t bar_num) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_get_bar_size(dh_dev->parent, bar_num); +} + +int32_t zxdh_en_sf_msg_send_cmd(struct dh_core_dev *dh_dev, uint16_t module_id, + void *msg, void *ack, + struct zxdh_bar_extra_para *para) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_msg_send_cmd(dh_dev->parent, module_id, + msg, ack, para); +} + +int32_t zxdh_en_sf_async_eq_enable(struct dh_core_dev *dh_dev, + struct dh_eq_async *eq, const char *name, + bool attach) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_async_eq_enable(dh_dev->parent, eq, + name, attach); +} + +void zxdh_en_sf_nh_attach(struct dh_core_dev *dh_dev, struct dh_nb *nb, + bool attach) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + en_sf_dev->sf_ops->en_sf_nh_attach(dh_dev->parent, nb, attach); +} + +void zxdh_en_sf_set_pf_link_up(struct dh_core_dev *dh_dev, bool link_up) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + en_sf_dev->sf_ops->en_sf_set_pf_link_up(dh_dev->parent, link_up); + return; +} + +bool zxdh_en_sf_get_pf_link_up(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_get_pf_link_up(dh_dev->parent); +} + +void zxdh_en_sf_update_pf_link_info(struct dh_core_dev *dh_dev, + struct link_info_struct *link_info_val) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + en_sf_dev->sf_ops->en_sf_update_pf_link_info(dh_dev->parent, + link_info_val); + return; +} + +int32_t zxdh_en_sf_get_pf_drv_msg(struct dh_core_dev *dh_dev, + uint8_t *drv_version, + uint8_t *drv_version_len) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_get_drv_msg(dh_dev, drv_version, + drv_version_len); +} + +void zxdh_en_sf_set_vepa(struct dh_core_dev *dh_dev, bool setting) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + en_sf_dev->sf_ops->en_sf_set_vepa(dh_dev->parent, setting); + return; +} + +bool zxdh_en_sf_get_vepa(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_get_vepa(dh_dev->parent); +} + +void zxdh_en_sf_get_link_info_from_vqm(struct dh_core_dev *dh_dev, + uint8_t *link_up) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + en_sf_dev->sf_ops->en_sf_get_link_info_from_vqm(dh_dev->parent, + link_up); + return; +} + +void zxdh_en_sf_set_vf_link_info(struct dh_core_dev *dh_dev, uint16_t vf_idx, + uint8_t link_up) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + en_sf_dev->sf_ops->en_sf_set_vf_link_info(dh_dev->parent, vf_idx, + link_up); + return; +} + +bool zxdh_en_sf_get_vf_is_probe(struct dh_core_dev *dh_dev, uint16_t vf_idx) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_get_vf_is_probe(dh_dev->parent, vf_idx); +} + +void zxdh_en_sf_set_pf_phy_port(struct dh_core_dev *dh_dev, uint8_t phy_port) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + en_sf_dev->sf_ops->en_sf_set_pf_phy_port(dh_dev->parent, phy_port); + return; +} + +uint8_t zxdh_en_sf_get_pf_phy_port(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_get_pf_phy_port(dh_dev->parent); +} + +void zxdh_en_sf_set_rdma_netdev(struct dh_core_dev *dh_dev, void *data) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + en_sf_dev->netdev = data; + return; +} + +void *zxdh_en_sf_get_rdma_netdev(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->netdev; +} + +struct zxdh_rdma_if rdma_ops = { + .get_rdma_netdev = zxdh_en_sf_get_rdma_netdev, +}; + +void zxdh_en_sf_set_sec_info(struct dh_core_dev *dh_dev, void *data) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + en_sf_dev->sec_info = data; +} + +void *zxdh_en_sf_get_sec_info(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sec_info; +} + +struct zxdh_sec_if sec_ops = { + .get_sec_info = zxdh_en_sf_get_sec_info, +}; + +bool zxdh_en_sf_is_drs_sec_enable(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_is_drs_sec_enable(dh_dev->parent); +} + +bool zxdh_en_sf_is_fw_feature_support(struct dh_core_dev *dh_dev, + uint32_t feature) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_is_fw_feature_support(dh_dev->parent, + feature); +} + +uint16_t zxdh_en_sf_get_ovs_pf_vfid(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_get_ovs_pf_vfid(dh_dev->parent); +} + +bool zxdh_en_sf_is_hwbond(struct dh_core_dev *dh_dev, bool is_hwbond, + bool update_pf) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_is_hwbond(dh_dev->parent, is_hwbond, + update_pf); +} + +bool zxdh_en_sf_is_rdma_aux_plug(struct dh_core_dev *dh_dev, + bool is_rdma_aux_plug, bool update_pf) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_is_rdma_aux_plug( + dh_dev->parent, is_rdma_aux_plug, update_pf); +} + +bool zxdh_en_sf_is_primary_port(struct dh_core_dev *dh_dev, + bool is_primary_port, bool update_pf) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_is_primary_port( + dh_dev->parent, is_primary_port, update_pf); +} + +void zxdh_en_sf_optim_hardware_bond_time(struct dh_core_dev *dh_dev, + bool enable) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_optim_hardware_bond_time(dh_dev->parent, + enable); +} + +int32_t zxdh_en_sf_update_hb_file_val(struct dh_core_dev *dh_dev, + uint64_t spec_sbdf, const char *file_name, + bool flag) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_update_hb_file_val( + dh_dev->parent, spec_sbdf, file_name, flag); +} + +bool zxdh_en_sf_is_rdma_enable(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_is_rdma_enable(dh_dev->parent); +} + +static int32_t zxdh_en_sf_request_port(struct dh_core_dev *dh_dev, void *data) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_request_port_info(dh_dev->parent, data); +} + +static int32_t zxdh_en_sf_release_port(struct dh_core_dev *dh_dev, + uint32_t pnl_id) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_release_port_info(dh_dev->parent, + pnl_id); +} + +static void zxdh_en_sf_set_bond_num(struct dh_core_dev *dh_dev, bool add) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_set_bond_num(dh_dev->parent, add); +} + +static bool zxdh_en_sf_if_init(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_if_init(dh_dev->parent); +} + +void zxdh_en_sf_set_init_comp_flag(struct dh_core_dev *dh_dev, uint8_t flag) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + en_sf_dev->sf_ops->en_sf_set_init_comp_flag(dh_dev->parent, flag); + return; +} + +struct zxdh_ipv6_mac_tbl *zxdh_en_sf_get_ip6mac_tbl(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + return en_sf_dev->sf_ops->en_sf_get_ip6mac_tbl(dh_dev->parent); +} + +static int32_t zxdh_en_sf_get_cpl_timeout_if_mask(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + return en_sf_dev->sf_ops->en_sf_get_cpl_timeout_if_mask(dh_dev->parent); +} + +static int32_t zxdh_en_sf_set_cpl_timeout_mask(struct dh_core_dev *dh_dev, + uint32_t mask) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + return en_sf_dev->sf_ops->en_sf_set_cpl_timeout_mask(dh_dev->parent, + mask); +} + +static int32_t zxdh_en_sf_get_hp_irq_ctrl_status(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + return en_sf_dev->sf_ops->en_sf_get_hp_irq_ctrl_status(dh_dev->parent); +} + +static int32_t zxdh_en_sf_set_hp_irq_ctrl_status(struct dh_core_dev *dh_dev, + uint32_t status) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + return en_sf_dev->sf_ops->en_sf_set_hp_irq_ctrl_status(dh_dev->parent, + status); +} + +struct device *zxdh_en_sf_get_dma_dev(struct dh_core_dev *dh_dev) +{ + return dh_dev->parent->device; +} + +bool zxdh_en_sf_is_nic(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_is_nic(dh_dev->parent); +} + +bool zxdh_en_sf_is_special_bond(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_is_special_bond(dh_dev->parent); +} + +bool zxdh_en_sf_suport_np_ext_stats(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_pf_suport_np_ext_stats(dh_dev->parent); +} + +struct zxdh_np_ext_stats * +zxdh_en_sf_get_np_ext_stats(struct dh_core_dev *dh_dev, uint8_t panel_id) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_get_np_ext_stats(dh_dev->parent, + panel_id); +} + +uint8_t zxdh_en_sf_get_queue_pairs(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_get_queue_pairs(dh_dev->parent); +} + +static int32_t zxdh_aux_events_call_chain(struct dh_core_dev *dh_dev, + unsigned long type, void *data) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_events_call_chain(dh_dev->parent, type, + data); +} + +uint32_t zxdh_en_sf_get_dev_type(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_get_dev_type(dh_dev->parent); +} + +bool zxdh_en_sf_is_pf_rate_enable(struct dh_core_dev *dh_dev, + uint32_t *pf_fc_val) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev->parent); + struct firmware_capability *fwcap = &pf_dev->fwcap; + + if (FW_FEATURE_GET(fwcap->fw_feature, FW_FEATURE_PFM) == 1) { + if (pf_fc_val != NULL) { + *pf_fc_val = fwcap->pf_rate_default; + } else { + LOG_ERR("NULL pointer!\n"); + } + return true; + } + + return false; +} + +uint8_t zxdh_en_sf_get_board_type(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_get_board_type(dh_dev->parent); +} + +bool zxdh_en_sf_is_multi_ep(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_is_multi_ep(dh_dev->parent); +} + +struct zxdh_en_if en_ops = { + .get_channels_num = zxdh_en_sf_get_vqs_channels_num, + .create_vqs_channels = zxdh_en_sf_create_vqs_channels, + .destroy_vqs_channels = zxdh_en_sf_destroy_vqs_channels, + .switch_vqs_channel = zxdh_en_sf_switch_vqs_channel, + .vqs_channel_bind_handler = zxdh_en_sf_vqs_channel_bind_handler, + .vqs_channel_unbind_handler = zxdh_en_sf_vqs_channel_unbind_handler, + .vq_bind_channel = zxdh_en_sf_vq_bind_channel, + .vq_unbind_channel = zxdh_en_sf_vq_unbind_channel, + .vqs_bind_eqs = zxdh_en_sf_vqs_bind_eqs, + .vqs_unbind_eqs = zxdh_en_sf_vqs_unbind_eqs, + .vp_modern_map_vq_notify = zxdh_en_sf_map_vq_notify, + .vp_modern_unmap_vq_notify = zxdh_en_sf_unmap_vq_notify, + .activate_phy_vq = zxdh_en_sf_activate_phy_vq, + .get_vq_lock = zxdh_en_sf_get_vq_lock, + .find_valid_vqs = zxdh_en_sf_find_valid_vqs, + .write_vqs_bit = zxdh_en_sf_write_vqs_bit, + .write_queue_tlb = zxdh_en_sf_write_queue_tlb, + .get_fw_patch = zxdh_en_sf_get_fw_patch, + .release_vq_lock = zxdh_en_sf_release_vq_lock, + .set_status = zxdh_en_sf_set_status, + .get_status = zxdh_en_sf_get_status, + .get_cfg_gen = zxdh_en_sf_get_cfg_gen, + .get_rp_link_status = zxdh_en_sf_get_rp_link_status, + .set_vf_mac = zxdh_en_sf_set_vf_mac, + .get_vf_mac = zxdh_en_sf_get_vf_mac, + .set_mac = zxdh_en_sf_set_mac, + .get_mac = zxdh_en_sf_get_mac, + .get_features = zxdh_en_sf_get_features, + .set_features = zxdh_en_sf_set_features, + .get_queue_num = zxdh_en_sf_get_queue_num, + .get_queue_size = zxdh_en_sf_get_queue_size, + .set_queue_size = zxdh_en_sf_set_queue_size, + .set_queue_enable = zxdh_en_sf_set_queue_enable, + .get_epbdf = zxdh_en_sf_get_epbdf, + .get_spec_sbdf = zxdh_en_sf_get_spec_sbdf, + .get_vport = zxdh_en_sf_get_vport, + .get_pcie_id = zxdh_en_sf_get_pcie_id, + .get_slot_id = zxdh_en_sf_get_slot_id, + .is_bond = zxdh_en_sf_is_bond, + .is_upf = zxdh_en_sf_is_upf, + .get_coredev_type = zxdh_en_sf_get_coredev_type, + .get_pdev = zxdh_en_sf_get_pdev, + .get_bar_virt_addr = zxdh_en_sf_get_bar_virt_addr, + .get_bar_phy_addr = zxdh_en_sf_get_bar_phy_addr, + .get_bar_size = zxdh_en_sf_get_bar_size, + .msg_send_cmd = zxdh_en_sf_msg_send_cmd, + .async_eq_enable = zxdh_en_sf_async_eq_enable, + .aux_nh_attach = zxdh_en_sf_nh_attach, + .get_vf_item = zxdh_en_sf_get_vf_item, + .set_pf_link_up = zxdh_en_sf_set_pf_link_up, + .get_pf_link_up = zxdh_en_sf_get_pf_link_up, + .update_pf_link_info = zxdh_en_sf_update_pf_link_info, + .get_pf_drv_msg = zxdh_en_sf_get_pf_drv_msg, + .set_vepa = zxdh_en_sf_set_vepa, + .get_vepa = zxdh_en_sf_get_vepa, + .get_link_info_from_vqm = zxdh_en_sf_get_link_info_from_vqm, + .set_vf_link_info = zxdh_en_sf_set_vf_link_info, + .get_vf_is_probe = zxdh_en_sf_get_vf_is_probe, + .request_port = zxdh_en_sf_request_port, + .release_port = zxdh_en_sf_release_port, + .set_bond_num = zxdh_en_sf_set_bond_num, + .if_init = zxdh_en_sf_if_init, + .set_pf_phy_port = zxdh_en_sf_set_pf_phy_port, + .set_rdma_netdev = zxdh_en_sf_set_rdma_netdev, + .get_pf_phy_port = zxdh_en_sf_get_pf_phy_port, + .set_init_comp_flag = zxdh_en_sf_set_init_comp_flag, + .get_ip6mac_tbl = zxdh_en_sf_get_ip6mac_tbl, + .get_dma_dev = zxdh_en_sf_get_dma_dev, + .unplug_adev = zxdh_aux_unplug_aux_dev_one, + .plug_adev = zxdh_aux_plug_aux_dev, + .is_nic = zxdh_en_sf_is_nic, + .is_special_bond = zxdh_en_sf_is_special_bond, + .get_qpairs = zxdh_en_sf_get_queue_pairs, + .events_call_chain = zxdh_aux_events_call_chain, + .get_cpl_timeout_if_mask = zxdh_en_sf_get_cpl_timeout_if_mask, + .set_cpl_timeout_mask = zxdh_en_sf_set_cpl_timeout_mask, + .get_hp_irq_ctrl_status = zxdh_en_sf_get_hp_irq_ctrl_status, + .set_hp_irq_ctrl_status = zxdh_en_sf_set_hp_irq_ctrl_status, + .get_dev_type = zxdh_en_sf_get_dev_type, + .if_suport_np_ext_stats = zxdh_en_sf_suport_np_ext_stats, + .get_np_ext_stats = zxdh_en_sf_get_np_ext_stats, + .set_sec_info = zxdh_en_sf_set_sec_info, + .is_drs_sec_enable = zxdh_en_sf_is_drs_sec_enable, + .is_fw_feature_support = zxdh_en_sf_is_fw_feature_support, + .is_pf_rate_enable = zxdh_en_sf_is_pf_rate_enable, + .get_ovs_pf_vfid = zxdh_en_sf_get_ovs_pf_vfid, + .get_board_type = zxdh_en_sf_get_board_type, + .is_hwbond = zxdh_en_sf_is_hwbond, + .is_rdma_aux_plug = zxdh_en_sf_is_rdma_aux_plug, + .is_primary_port = zxdh_en_sf_is_primary_port, + .optim_hardware_bond_time = zxdh_en_sf_optim_hardware_bond_time, + .update_hb_file_val = zxdh_en_sf_update_hb_file_val, + .is_rdma_enable = zxdh_en_sf_is_rdma_enable, + .is_multi_ep = zxdh_en_sf_is_multi_ep, +}; + +void zxdh_aux_adev_release(struct device *dev) +{ + return; +} + +int32_t zxdh_rdma_infos_request_reset(struct zxdh_rdma_dev_info *rdma_infos, + enum zxdh_rdma_reset_type reset_type) +{ + return 0; +} + +static struct zxdh_rdma_dev_ops rdma_handle_ops = { + .request_reset = zxdh_rdma_infos_request_reset, +}; + +struct zxdh_rdma_dev_info * +zxdh_rdma_infos_init(struct dh_core_dev *dh_dev, + struct zxdh_auxiliary_device *adev) +{ + struct zxdh_en_container *en_container = + container_of(adev, struct zxdh_en_container, adev); + struct zxdh_rdma_dev_info *rdma_infos = NULL; + + en_container->rdma_infos = + kzalloc(sizeof(*en_container->rdma_infos), GFP_KERNEL); + if (unlikely(en_container->rdma_infos == NULL)) { + LOG_ERR("en_container->rdma_infos kzalloc failed\n"); + return NULL; + } + + rdma_infos = en_container->rdma_infos; + rdma_infos->pdev = zxdh_en_sf_get_pdev(dh_dev); + rdma_infos->hw_addr = + (uint8_t __iomem *)zxdh_en_sf_get_bar_virt_addr(dh_dev, 0); + rdma_infos->ver.major = ZXDH_MAJOR_VER; + rdma_infos->ver.minor = ZXDH_MINOR_VER; + rdma_infos->ver.support = ZXDH_NET_MAJOR_VER + + (ZXDH_NET_MINOR_VER << ZXDH_HIGH_8BIT) + + (ZXDH_RDMA_MINOR_VER << ZXDH_HIGH_16BIT); + rdma_infos->rdma_protocol = ZXDH_RDMA_PROTOCOL_IWARP; + rdma_infos->ops = &rdma_handle_ops; + rdma_infos->ftype = ZXDH_FUNCTION_TYPE_PF; + rdma_infos->vport_id = zxdh_en_sf_get_vport(dh_dev); + rdma_infos->slot_id = zxdh_en_sf_get_slot_id(dh_dev); + if (zxdh_en_sf_get_coredev_type(dh_dev) == DH_COREDEV_VF) { + rdma_infos->ftype = ZXDH_FUNCTION_TYPE_VF; + } + + rdma_infos->msix_count = ZXDH_RDMA_CHANNELS_NUM; + rdma_infos->msix_entries.entry = ZXDH_RDMA_IRQ_START_IDX; + rdma_infos->msix_entries.vector = + pci_irq_vector(rdma_infos->pdev, ZXDH_RDMA_IRQ_START_IDX); + if (zxdh_en_sf_get_coredev_type(dh_dev) == DH_COREDEV_VF) { + rdma_infos->msix_entries.entry = ZXDH_VF_RDMA_IRQ_START_IDX; + rdma_infos->msix_entries.vector = pci_irq_vector( + rdma_infos->pdev, ZXDH_VF_RDMA_IRQ_START_IDX); + } + + return rdma_infos; +} + +int32_t zxdh_net_adev_handle(struct dh_core_dev *dh_dev, + struct zxdh_auxiliary_device *adev) +{ + struct zxdh_en_container *en_container = + container_of(adev, struct zxdh_en_container, adev); + + adev->name = ZXDH_EN_DEV_ID_NAME; + adev->dev.parent = dh_dev->device; + en_container->rdma_infos = NULL; + en_container->ops = &en_ops; + + return 0; +} + +int32_t zxdh_rdma_adev_handle(struct dh_core_dev *dh_dev, + struct zxdh_auxiliary_device *adev) +{ + struct zxdh_en_container *en_container = + container_of(adev, struct zxdh_en_container, adev); + struct zxdh_rdma_dev_info *rdma_infos = NULL; + + adev->name = ZXDH_RDMA_DEV_NAME; + rdma_infos = zxdh_rdma_infos_init(dh_dev, adev); + if (unlikely(rdma_infos == NULL)) { + LOG_ERR("zxdh_rdma_infos_init failed, return NULL\n"); + return -1; + } + rdma_infos->adev = adev; + adev->dev.parent = &rdma_infos->pdev->dev; + en_container->rdma_ops = &rdma_ops; + + return 0; +} + +int32_t zxdh_sec_adev_handle(struct dh_core_dev *dh_dev, + struct zxdh_auxiliary_device *adev) +{ + struct zxdh_en_container *en_container = + container_of(adev, struct zxdh_en_container, adev); + + adev->name = ZXDH_SEC_DEV_NAME; + + adev->dev.parent = dh_dev->device; + en_container->sec_ops = &sec_ops; + + return 0; +} + +struct zxdh_adev_handle_table zxdh_adev_handle_table[] = { + { NET_AUX_DEVICE, zxdh_net_adev_handle }, + { RDMA_AUX_DEVICE, zxdh_rdma_adev_handle }, + { SEC_AUX_DEVICE, zxdh_sec_adev_handle }, +}; + +int32_t zxdh_adev_handle(struct dh_core_dev *dh_dev, + struct zxdh_auxiliary_device *adev, + enum AUX_DEVICE_TYPE adev_type) +{ + uint32_t i = 0; + int32_t ret = 0; + + for (i = 0; i < ARRAY_SIZE(zxdh_adev_handle_table); i++) { + if ((zxdh_adev_handle_table[i].adev_type == adev_type) && + (zxdh_adev_handle_table[i].cb_fn)) { + ret = zxdh_adev_handle_table[i].cb_fn(dh_dev, adev); + } + } + + return ret; +} + +static DEFINE_IDA(zxdh_aux_adev_ida); + +int32_t zxdh_aux_plug_aux_dev(struct dh_core_dev *dh_dev, + enum AUX_DEVICE_TYPE adev_type) +{ + struct zxdh_auxiliary_device *adev = NULL; + struct zxdh_en_sf_device *en_sf_dev = NULL; + struct zxdh_en_container *en_container = NULL; + int32_t ret = 0; + en_sf_dev = dh_core_priv(dh_dev); + + en_container = kzalloc(sizeof(struct zxdh_en_container), GFP_KERNEL); + if (unlikely(en_container == NULL)) { + LOG_ERR("sf_con kzalloc is null\n"); + return -ENOMEM; + } + + en_container->aux_id = ida_alloc(&zxdh_aux_adev_ida, GFP_KERNEL); + if (en_container->aux_id < 0) { + LOG_ERR("failed to allocate device id for aux drvs\n"); + goto free_kzalloc; + } + + adev = &en_container->adev; + + adev->id = en_container->aux_id; + adev->dev.release = zxdh_aux_adev_release; + ret = zxdh_adev_handle(dh_dev, adev, adev_type); + if (ret != 0) { + LOG_ERR("zxdh_adev_handle failed: %d\n", ret); + goto free_ida_alloc; + } + + if (en_sf_dev->aux_idx < 0) + goto free_rdma_infos_alloc; + en_sf_dev->adev[en_sf_dev->aux_idx] = adev; + en_sf_dev->adev[en_sf_dev->aux_idx]->adev_type = adev_type; + en_sf_dev->aux_idx++; + + en_container->parent = dh_dev; + + ret = zxdh_auxiliary_device_init(adev); + if (ret != 0) { + LOG_ERR("zxdh_auxiliary_device_init failed: %d\n", ret); + goto free_rdma_infos_alloc; + } + + ret = zxdh_auxiliary_device_add(adev); + if (ret != 0) { + LOG_ERR("zxdh_auxiliary_device_add failed: %d\n", ret); + goto release_aux_init; + } + + return 0; + +release_aux_init: + zxdh_auxiliary_device_uninit(adev); +free_rdma_infos_alloc: + if (adev_type == RDMA_AUX_DEVICE) { + if (en_container->rdma_infos != NULL) { + kfree(en_container->rdma_infos); + en_container->rdma_infos = NULL; + } + } +free_ida_alloc: + ida_simple_remove(&zxdh_aux_adev_ida, en_container->aux_id); + en_container->aux_id = -1; +free_kzalloc: + kfree(en_container); + en_container = NULL; + return ret; +} + +void zxdh_aux_unplug_aux_dev(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = NULL; + struct zxdh_en_container *en_container = NULL; + int16_t i = 0; + + en_sf_dev = dh_core_priv(dh_dev); + for (i = en_sf_dev->aux_idx - 1; i >= 0; i--) { + en_container = container_of(en_sf_dev->adev[i], + struct zxdh_en_container, adev); + + zxdh_auxiliary_device_delete(en_sf_dev->adev[i]); + zxdh_auxiliary_device_uninit(en_sf_dev->adev[i]); + if (en_container->rdma_infos != NULL) { + kfree(en_container->rdma_infos); + en_container->rdma_infos = NULL; + } + ida_simple_remove(&zxdh_aux_adev_ida, en_container->aux_id); + en_container->aux_id = -1; + kfree(en_container); + en_container = NULL; + } + + return; +} + +void zxdh_aux_unplug_aux_dev_one(struct dh_core_dev *dh_dev, + enum AUX_DEVICE_TYPE adev_type) +{ + struct zxdh_en_sf_device *en_sf_dev = NULL; + struct zxdh_en_container *en_container = NULL; + int16_t i = 0; + int16_t aux_idx = 0; + + en_sf_dev = dh_core_priv(dh_dev); + aux_idx = en_sf_dev->aux_idx - 1; + for (i = aux_idx; i >= 0; i--) { + if (adev_type != en_sf_dev->adev[i]->adev_type) { + continue; + } + + en_container = container_of(en_sf_dev->adev[i], + struct zxdh_en_container, adev); + + zxdh_auxiliary_device_delete(en_sf_dev->adev[i]); + zxdh_auxiliary_device_uninit(en_sf_dev->adev[i]); + if (en_container->rdma_infos != NULL) { + kfree(en_container->rdma_infos); + en_container->rdma_infos = NULL; + } + + ida_simple_remove(&zxdh_aux_adev_ida, en_container->aux_id); + en_container->aux_id = -1; + + kfree(en_container); + en_container = NULL; + + /* 针对末尾的adev删除没有问题,如果是前面的adev删除,则当前处理有问题 */ + /* TODO: 后续优化 */ + en_sf_dev->aux_idx--; + } + + return; +} + +static int32_t zxdh_en_sf_dev_probe(struct zxdh_auxiliary_device *adev, + const struct zxdh_auxiliary_device_id *id) +{ + int32_t err = 0; + struct zxdh_en_sf_container *sf_con = + container_of(adev, struct zxdh_en_sf_container, adev); + struct dh_core_dev *dh_dev = NULL; + struct devlink *devlink = NULL; + struct zxdh_en_sf_device *en_sf_dev = NULL; + struct zxdh_en_sf_if *sf_ops = sf_con->ops; + + LOG_INFO("sf level start\n"); + + devlink = zxdh_devlink_alloc(&adev->dev, &dh_sf_devlink_ops, + sizeof(struct zxdh_en_sf_device)); + if (devlink == NULL) { + LOG_ERR("devlink alloc failed\n"); + return -ENOMEM; + } + + dh_dev = devlink_priv(devlink); + en_sf_dev = dh_core_priv(dh_dev); + dh_dev->parent = sf_con->dh_dev; + dh_dev->device = &adev->dev; + dh_dev->irq_table = dh_dev->parent->irq_table; + en_sf_dev->max_channels = sf_con->max_channels; + en_sf_dev->sf_ops = sf_ops; + en_sf_dev->aux_idx = 0; + sf_con->cdev = dh_dev; + dh_dev->devlink = devlink; + dh_dev->devlink_ops = &dh_sf_core_devlink_ops; + mutex_init(&dh_dev->lock); + + err = dh_en_sf_eq_table_init(dh_dev); + if (err != 0) { + LOG_ERR("Failed to alloc IRQs\n"); + goto err_eq_table_init; + } + + dh_en_sf_eq_table_create(dh_dev, sf_ops); + + zxdh_devlink_register(devlink); + + zxdh_aux_plug_aux_dev(dh_dev, NET_AUX_DEVICE); + + if (sf_ops->en_sf_is_rdma_enable(dh_dev->parent) && + sf_ops->en_sf_is_primary_port(dh_dev->parent, TRUE, FALSE)) { + zxdh_aux_plug_aux_dev(dh_dev, RDMA_AUX_DEVICE); + LOG_INFO("sf set rdma done\n"); + } + if (sf_ops->en_sf_is_drs_sec_enable(dh_dev->parent)) { + zxdh_aux_plug_aux_dev(dh_dev, SEC_AUX_DEVICE); + LOG_INFO("sf set sec done\n"); + } + + LOG_INFO("sf level completed\n"); + + return 0; + +err_eq_table_init: + mutex_destroy(&dh_dev->lock); + zxdh_devlink_free(devlink); + return -EPERM; +} + +static int32_t zxdh_en_sf_dev_remove(struct zxdh_auxiliary_device *adev) +{ + struct zxdh_en_sf_container *sf_con = + container_of(adev, struct zxdh_en_sf_container, adev); + struct dh_core_dev *dh_dev = NULL; + struct devlink *devlink = NULL; + + LOG_INFO("sf level start\n"); + dh_dev = sf_con->cdev; + devlink = dh_dev->devlink; + + zxdh_aux_unplug_aux_dev(dh_dev); + zxdh_devlink_unregister(devlink); + dh_sf_eq_table_destroy(dh_dev); + dh_eq_table_cleanup(dh_dev); + mutex_destroy(&dh_dev->lock); + zxdh_devlink_free(devlink); + LOG_INFO("sf level completed\n"); + + return 0; +} + +static void zxdh_en_sf_dev_shutdown(struct zxdh_auxiliary_device *adev) +{ + struct zxdh_en_sf_container *sf_con = + container_of(adev, struct zxdh_en_sf_container, adev); + struct dh_core_dev *dh_dev = NULL; + struct devlink *devlink = NULL; + + LOG_INFO("sf level start\n"); + dh_dev = sf_con->cdev; + devlink = dh_dev->devlink; + + zxdh_devlink_unregister(devlink); + dh_sf_eq_table_destroy(dh_dev); + dh_eq_table_cleanup(dh_dev); + mutex_destroy(&dh_dev->lock); + zxdh_devlink_free(devlink); + LOG_INFO("sf level completed\n"); +} + +static const struct zxdh_auxiliary_device_id zxdh_en_sf_dev_id_table[] = { + { + .name = ZXDH_PF_NAME "." ZXDH_PF_EN_SF_DEV_ID_NAME, + }, + {}, +}; + +MODULE_DEVICE_TABLE(zxdh_auxiliary, zxdh_en_sf_dev_id_table); + +static struct zxdh_auxiliary_driver zxdh_en_sf_driver = { + .name = ZXDH_PF_EN_SF_DEV_ID_NAME, + .probe = zxdh_en_sf_dev_probe, + .remove = zxdh_en_sf_dev_remove, + .shutdown = zxdh_en_sf_dev_shutdown, + .id_table = zxdh_en_sf_dev_id_table, +}; + +int32_t zxdh_en_sf_driver_register(void) +{ + int32_t err = 0; + + err = zxdh_auxiliary_driver_register(&zxdh_en_sf_driver); + if (err != 0) { + LOG_ERR("zxdh_auxiliary_driver_register failed: %d\n", err); + goto err_auxiliary_driver_register; + } + +#ifdef CONFIG_DINGHAI_EN_AUX + err = zxdh_en_driver_register(); + if (err != 0) { + LOG_ERR("zxdh_en_driver_register failed: %d\n", err); + goto err_en_driver_register; + } +#endif + + return 0; + +#ifdef CONFIG_DINGHAI_EN_AUX +err_en_driver_register: + zxdh_auxiliary_driver_unregister(&zxdh_en_sf_driver); +#endif +err_auxiliary_driver_register: + return err; +} + +void zxdh_en_sf_driver_unregister(void) +{ +#ifdef CONFIG_DINGHAI_EN_AUX + zxdh_en_driver_unregister(); +#endif + + zxdh_auxiliary_driver_unregister(&zxdh_en_sf_driver); +} diff --git a/drivers/net/ethernet/dinghai/en_sf.h b/drivers/net/ethernet/dinghai/en_sf.h new file mode 100644 index 000000000000..35b68a388e2f --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_sf.h @@ -0,0 +1,108 @@ +#ifndef __ZXDH_PF_EN_SF_H__ +#define __ZXDH_PF_EN_SF_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include +#include + +#define ZXDH_MAJOR_VER 10 +#define ZXDH_MINOR_VER 1 +#define ZXDH_NET_MAJOR_VER 0 //0-255, bit0-7 +#define ZXDH_NET_MINOR_VER 0 //0-255, bit8-15 +#define ZXDH_RDMA_MINOR_VER 0 //0-255, bit16-23 +#define ZXDH_HIGH_8BIT 8 +#define ZXDH_HIGH_16BIT 16 +#define ZXDH_SF_ADEV_NUM 32 + +struct zxdh_rdma_dev_info; + +enum AUX_DEVICE_TYPE { + NET_AUX_DEVICE, + RDMA_AUX_DEVICE, + SEC_AUX_DEVICE, +}; + +struct zxdh_adev_handle_table { + enum AUX_DEVICE_TYPE adev_type; + int32_t (*cb_fn)(struct dh_core_dev *dh_dev, + struct zxdh_auxiliary_device *adev); +}; + +struct zxdh_ver_info { + uint16_t major; + uint16_t minor; + uint64_t support; +}; + +enum zxdh_function_type { + ZXDH_FUNCTION_TYPE_PF, + ZXDH_FUNCTION_TYPE_VF, +}; + +enum zxdh_rdma_protocol { + ZXDH_RDMA_PROTOCOL_IWARP = BIT(0), + ZXDH_RDMA_PROTOCOL_ROCEV2 = BIT(1), +}; + +struct zxdh_rdma_qos_params { + uint8_t reserve; +}; + +enum zxdh_rdma_reset_type { + ZXDH_RESET_MTU_CHANGE, + ZXDH_RESET_HW_ERROR, +}; + +struct zxdh_rdma_dev_ops { + int32_t (*request_reset)(struct zxdh_rdma_dev_info *rdma_infos, + enum zxdh_rdma_reset_type reset_type); +}; + +/* auxiliary driver tailored information about the core PCI dev */ +struct zxdh_rdma_dev_info { + struct pci_dev *pdev; + struct zxdh_auxiliary_device *adev; + + uint8_t __iomem *hw_addr; + int32_t adev_info_id; + struct zxdh_ver_info ver; + + void *auxiliary_priv; + + enum zxdh_function_type ftype; + uint16_t vport_id; + uint16_t slot_id; + /* Current active RDMA protocol */ + enum zxdh_rdma_protocol rdma_protocol; + + struct zxdh_rdma_qos_params qos_info; + + struct msix_entry msix_entries; + /* How many vectors are reserved for this device */ + uint16_t msix_count; + /* function pointers to be initialized by core PCI driver and called by auxiliary driver */ + struct zxdh_rdma_dev_ops *ops; +}; + +struct zxdh_en_sf_device { + int32_t max_channels; + struct zxdh_en_sf_if *sf_ops; + void *netdev; + void *sec_info; + + struct zxdh_auxiliary_device *adev[ZXDH_SF_ADEV_NUM]; + int32_t aux_idx; +}; + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_sf/en_sf_devlink.c b/drivers/net/ethernet/dinghai/en_sf/en_sf_devlink.c new file mode 100644 index 000000000000..83da86e4d699 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_sf/en_sf_devlink.c @@ -0,0 +1,126 @@ +#include +#include +#include "en_sf_devlink.h" + +struct devlink_ops dh_sf_devlink_ops = { + +}; + +enum { + DH_SF_PARAMS_MAX, +}; + +static int32_t __attribute__((unused)) sample_check(struct dh_core_dev *dev) +{ + return 1; +} + +enum dh_sf_devlink_param_id { + DH_SF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + DH_SF_DEVLINK_PARAM_ID_SAMPLE, +}; + +static int32_t dh_devlink_sample_set(struct devlink *devlink, uint32_t id, + struct devlink_param_gset_ctx *ctx) +{ + struct dh_core_dev *__attribute__((unused)) dev = devlink_priv(devlink); + + return 0; +} + +static int32_t dh_devlink_sample_get(struct devlink *devlink, uint32_t id, + struct devlink_param_gset_ctx *ctx) +{ + struct dh_core_dev *__attribute__((unused)) dev = devlink_priv(devlink); + + return 0; +} + +#ifdef HAVE_DEVLINK_PARAM_REGISTER +static const struct devlink_params { + const char *name; + int32_t (*check)(struct dh_core_dev *dev); + struct devlink_param param; +} devlink_params[] = { [DH_SF_PARAMS_MAX] = { + .name = "sample", + .check = &sample_check, + .param = DEVLINK_PARAM_DRIVER( + DH_SF_DEVLINK_PARAM_ID_SAMPLE, "sample", + DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + dh_devlink_sample_get, + dh_devlink_sample_set, NULL), + } }; + +static int32_t params_register(struct devlink *devlink) +{ + int32_t i = 0; + int32_t err = 0; + struct dh_core_dev *dh_dev = devlink_priv(devlink); + + for (i = 0; i < ARRAY_SIZE(devlink_params); i++) { + if (devlink_params[i].check(dh_dev)) { + err = devlink_param_register(devlink, + &devlink_params[i].param); + if (err) { + goto rollback; + } + } + } + + return 0; + +rollback: + if (i == 0) { + return err; + } + + for (; i > 0; i--) { + devlink_param_unregister(devlink, &devlink_params[i].param); + } + + return err; +} + +static int32_t params_unregister(struct devlink *devlink) +{ + int32_t i = 0; + + for (i = 0; i < ARRAY_SIZE(devlink_params); i++) { + devlink_param_unregister(devlink, &devlink_params[i].param); + } + + return 0; +} +#else +static struct devlink_param devlink_params[] = { + [DH_SF_PARAMS_MAX] = DEVLINK_PARAM_DRIVER( + DH_SF_DEVLINK_PARAM_ID_SAMPLE, "sample", + DEVLINK_PARAM_TYPE_BOOL, BIT(DEVLINK_PARAM_CMODE_RUNTIME), + dh_devlink_sample_get, dh_devlink_sample_set, NULL), +}; + +static int32_t params_register(struct devlink *devlink) +{ + struct dh_core_dev *__attribute__((unused)) + dh_dev = devlink_priv(devlink); + int32_t err = 0; + + err = devlink_params_register(devlink, devlink_params, + ARRAY_SIZE(devlink_params)); + + return err; +} +static int32_t params_unregister(struct devlink *devlink) +{ + devlink_params_unregister(devlink, devlink_params, + ARRAY_SIZE(devlink_params)); + + return 0; +} +#endif + +struct dh_core_devlink_ops dh_sf_core_devlink_ops = { + .params_register = params_register, + .params_unregister = params_unregister +}; \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_sf/en_sf_devlink.h b/drivers/net/ethernet/dinghai/en_sf/en_sf_devlink.h new file mode 100644 index 000000000000..e838bf822227 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_sf/en_sf_devlink.h @@ -0,0 +1,14 @@ +#ifndef __EN_SF_DEVLINK_H__ +#define __EN_SF_DEVLINK_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_sf/en_sf_eq.c b/drivers/net/ethernet/dinghai/en_sf/en_sf_eq.c new file mode 100644 index 000000000000..1f67fbdf7ab7 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_sf/en_sf_eq.c @@ -0,0 +1,414 @@ +#include +#include +#include +#include +#include +#include +#include "en_sf_irq.h" +#include "en_sf_eq.h" +#include "../en_sf.h" + +static int32_t create_async_eqs(struct dh_core_dev *dev) +{ + return 0; +} + +int32_t dh_en_sf_eq_table_create(struct dh_core_dev *dev, + struct zxdh_en_sf_if *ops) +{ + int32_t err; + + err = create_async_eqs(dev); + + return err; +} + +void dh_sf_eq_table_destroy(struct dh_core_dev *dev) +{ + return; +} + +void zxdh_set_queue_size(struct dh_core_dev *dh_dev, uint32_t index, + uint16_t size) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + en_sf_dev->sf_ops->en_sf_set_queue_size(dh_dev->parent, index, size); +} + +void zxdh_queue_address(struct dh_core_dev *dh_dev, uint32_t index, + uint64_t desc_addr, uint64_t driver_addr, + uint64_t device_addr) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + en_sf_dev->sf_ops->en_sf_set_queue_address( + dh_dev->parent, index, desc_addr, driver_addr, device_addr); +} + +void zxdh_en_sf_activate_phy_vq(struct dh_core_dev *dh_dev, uint32_t phy_index, + int32_t queue_size, uint64_t desc_addr, + uint64_t avail_addr, uint64_t used_addr) +{ + zxdh_set_queue_size(dh_dev, phy_index, queue_size); + zxdh_queue_address(dh_dev, phy_index, desc_addr, avail_addr, used_addr); +} + +int32_t dh_en_sf_eq_table_init(struct dh_core_dev *dev) +{ + struct dh_eq_table *eq_table = &dev->eq_table; + struct dh_en_sf_eq_table *table_priv = NULL; + int32_t err = 0; + + table_priv = kvzalloc(sizeof(*table_priv), GFP_KERNEL); + if (unlikely(table_priv == NULL)) { + LOG_ERR("dh_en_sf_eq_table kvzalloc failed\n"); + err = -ENOMEM; + goto err_table_priv; + } + dh_eq_table_init(dev, table_priv); + + return 0; + +err_table_priv: + kvfree(eq_table); + return err; +} + +static void vqs_irqs_release(struct dh_core_dev *dh_dev) +{ + struct dh_eq_table *table = &dh_dev->eq_table; + struct dh_en_sf_eq_table *sf_eq_table = table->priv; + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + int32_t vqs_channel_num = 0; + + vqs_channel_num = zxdh_en_sf_get_vqs_channels_num(dh_dev); + + en_sf_dev->sf_ops->en_sf_affinity_irqs_release( + dh_dev->parent, sf_eq_table->vq_irqs, vqs_channel_num); + + dh_irqs_release_vectors(sf_eq_table->vq_irqs, sf_eq_table->vq_irq_num); +} + +static void clean_vqs_eqs(struct dh_core_dev *dh_dev) +{ + struct dh_eq_table *table = &dh_dev->eq_table; + struct dh_en_sf_eq_table *sf_eq_table = table->priv; + struct dh_eq_vqs *eq; + struct dh_eq_vqs *n; + + list_for_each_entry_safe(eq, n, &sf_eq_table->vqs_eqs_list, list) { + list_del(&eq->list); + kfree(eq); + } +} + +static void destroy_vqs_eqs(struct dh_core_dev *dh_dev, int32_t vqs_channel_num) +{ + struct dh_eq_table *table = &dh_dev->eq_table; + struct dh_en_sf_eq_table *sf_eq_table = table->priv; + struct dh_eq_vqs *eq; + struct dh_eq_vqs *n; + int32_t i = 0; + + list_for_each_entry_safe(eq, n, &sf_eq_table->vqs_eqs_list, list) { + if (i <= vqs_channel_num) { + dh_eq_disable(dh_dev, &eq->vq_s.core, &eq->vq_s.irq_nb); + } + i++; + } +} + +uint16_t zxdh_en_sf_get_vqs_channels_num(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + uint16_t channels_num = 0; + + channels_num = + en_sf_dev->sf_ops->en_sf_get_channels_num(dh_dev->parent); + + return channels_num; +} + +static int32_t create_map_eq(struct dh_core_dev *dev, struct dh_eq *eq, + struct dh_eq_param *param) +{ + eq->irq = param->irq; + + return 0; +} + +void zxdh_en_sf_destroy_vqs_channels(struct dh_core_dev *dh_dev) +{ + struct dh_eq_table *table = &dh_dev->eq_table; + struct dh_en_sf_eq_table *sf_eq_table = table->priv; + + clean_vqs_eqs(dh_dev); + vqs_irqs_release(dh_dev); + kfree(sf_eq_table->vq_irqs); +} + +void zxdh_en_sf_switch_vqs_channel(struct dh_core_dev *dh_dev, int32_t channel, + int32_t op) +{ + struct dh_eq_table *table = &dh_dev->eq_table; + struct dh_en_sf_eq_table *sf_eq_table = table->priv; + struct dh_irq *irq = sf_eq_table->vq_irqs[channel]; + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + en_sf_dev->sf_ops->en_sf_switch_irq(dh_dev->parent, irq->irqn, op); +} + +int32_t zxdh_en_sf_create_vqs_channels(struct dh_core_dev *dh_dev, void *data) +{ + struct dh_eq_table *table = &dh_dev->eq_table; + struct dh_en_sf_eq_table *sf_eq_table = table->priv; + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + int32_t vqs_channel_num = 0; + int32_t i = 0; + struct dh_eq_vqs *eq_vqs = NULL; + int32_t err = 0; + + vqs_channel_num = zxdh_en_sf_get_vqs_channels_num(dh_dev); + + sf_eq_table->vq_irqs = kcalloc( + vqs_channel_num, sizeof(*sf_eq_table->vq_irqs), GFP_KERNEL); + if (unlikely(sf_eq_table->vq_irqs == NULL)) { + LOG_ERR("sf_eq_table->vq_irqs kcalloc null\n"); + return -ENOMEM; + } + + vqs_channel_num = en_sf_dev->sf_ops->en_sf_vq_irqs_request( + dh_dev->parent, sf_eq_table->vq_irqs, vqs_channel_num, data); + if (vqs_channel_num < 0) { + LOG_ERR("en_sf_vq_irqs_request failed: %d\n", vqs_channel_num); + kfree(sf_eq_table->vq_irqs); + return vqs_channel_num; + } + + sf_eq_table->vq_irq_num = vqs_channel_num; + + INIT_LIST_HEAD(&sf_eq_table->vqs_eqs_list); + + for (i = 0; i < vqs_channel_num; i++) { + eq_vqs = kzalloc(sizeof(struct dh_eq_vqs), GFP_KERNEL); + if (unlikely(eq_vqs == NULL)) { + LOG_ERR("eq_vqs %d kzalloc null\n", i); + err = -ENOMEM; + goto clean; + } + + INIT_LIST_HEAD(&eq_vqs->vqs); + + list_add_tail(&eq_vqs->list, &sf_eq_table->vqs_eqs_list); + } + + return vqs_channel_num; + +clean: + zxdh_en_sf_destroy_vqs_channels(dh_dev); + return err; +} + +void zxdh_en_sf_vqs_unbind_eqs(struct dh_core_dev *dh_dev, + int32_t vqs_channel_num) +{ + struct dh_eq_table *table = &dh_dev->eq_table; + struct dh_en_sf_eq_table *sf_eq_table = table->priv; + struct dh_eq_vqs *eq; + struct dh_eq_vqs *n; + int32_t i = 0; + + list_for_each_entry_safe(eq, n, &sf_eq_table->vqs_eqs_list, list) { + if (i++ <= vqs_channel_num) { + list_del(&eq->vqs); + } + } + + return; +} + +int32_t zxdh_en_sf_vqs_bind_eqs(struct dh_core_dev *dh_dev, + int32_t vqs_channel_num, + struct list_head *vq_node) +{ + struct dh_eq_table *table = &dh_dev->eq_table; + struct dh_en_sf_eq_table *sf_eq_table = table->priv; + struct dh_eq_vqs *eq; + struct dh_eq_vqs *n; + int32_t i = 0; + + list_for_each_entry_safe(eq, n, &sf_eq_table->vqs_eqs_list, list) { + if (i++ == vqs_channel_num) { + list_add_tail(vq_node, &eq->vqs); + return 0; + } + } + + return -ENOENT; +} + +void __iomem *zxdh_en_sf_map_vq_notify(struct dh_core_dev *dh_dev, + uint32_t index, resource_size_t *pa) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + void __iomem *notify_addr = NULL; + + notify_addr = en_sf_dev->sf_ops->en_sf_map_vq_notify(dh_dev->parent, + index, pa); + + return notify_addr; +} + +void zxdh_en_sf_unmap_vq_notify(struct dh_core_dev *dh_dev, void *priv) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + en_sf_dev->sf_ops->en_sf_unmap_vq_notify(dh_dev->parent, priv); +} + +void zxdh_en_sf_set_queue_enable(struct dh_core_dev *dh_dev, uint16_t index, + bool enable) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + en_sf_dev->sf_ops->en_sf_set_queue_enable(dh_dev->parent, index, + enable); +} + +uint16_t zxdh_en_sf_get_queue_vector(struct dh_core_dev *dh_dev, + uint16_t channel, uint16_t queue_index, + uint16_t vq_idx) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + struct dh_eq_table *table = &dh_dev->eq_table; + struct dh_en_sf_eq_table *sf_eq_table = table->priv; + int32_t msix_vec = ZXDH_MSI_NO_VECTOR; + + msix_vec = en_sf_dev->sf_ops->en_sf_get_queue_vector( + dh_dev->parent, channel, &sf_eq_table->vqs_eqs_list, + queue_index, vq_idx); + + return msix_vec; +} + +void zxdh_en_sf_vq_unbind_channel(struct dh_core_dev *dh_dev, + int32_t queue_index) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + en_sf_dev->sf_ops->en_sf_release_queue_vector(dh_dev->parent, + queue_index); +} + +int32_t zxdh_en_sf_vq_bind_channel(struct dh_core_dev *dh_dev, + int32_t channel_num, int32_t queue_index, + uint16_t vq_idx) +{ + int32_t msix_vec = ZXDH_MSI_NO_VECTOR; + + msix_vec = zxdh_en_sf_get_queue_vector(dh_dev, channel_num, queue_index, + vq_idx); + + if (msix_vec == ZXDH_MSI_NO_VECTOR) { + return -EBUSY; + } + + return msix_vec; +} + +void zxdh_en_sf_vqs_channel_unbind_handler(struct dh_core_dev *dh_dev, + int32_t vqs_channel_num) +{ + destroy_vqs_eqs(dh_dev, vqs_channel_num); + return; +} + +int32_t zxdh_en_sf_vqs_channel_bind_handler(struct dh_core_dev *dh_dev, + int32_t vqs_channel_num, + struct dh_vq_handler *handler) +{ + struct dh_eq_table *table = &dh_dev->eq_table; + struct dh_en_sf_eq_table *sf_eq_table = table->priv; + int32_t i = 0; + struct dh_eq_vqs *eq_vqs; + struct dh_eq_vqs *n; + int32_t err = 0; + + list_for_each_entry_safe(eq_vqs, n, &sf_eq_table->vqs_eqs_list, list) { + if (i == vqs_channel_num) { + struct dh_eq_param param = {}; + + eq_vqs->vq_s.irq_nb.notifier_call = handler->callback; + eq_vqs->vq_s.para = handler->para; + param = (struct dh_eq_param){ + .irq = sf_eq_table->vq_irqs[i], + .nent = 0, + }; + create_map_eq(dh_dev, &eq_vqs->vq_s.core, ¶m); + + err = dh_eq_enable(dh_dev, &eq_vqs->vq_s.core, + &eq_vqs->vq_s.irq_nb); + if (err != 0) { + LOG_ERR("dh_eq_enable failed: %d\n", err); + goto clean_eq; + } + return 0; + } + i++; + } + +clean_eq: + destroy_vqs_eqs(dh_dev, vqs_channel_num); + return err; +} + +uint16_t zxdh_en_sf_get_epbdf(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_get_epbdf(dh_dev->parent); +} + +uint64_t zxdh_en_sf_get_spec_sbdf(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_get_spec_sbdf(dh_dev->parent); +} + +uint16_t zxdh_en_sf_get_vport(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_get_vport(dh_dev->parent); +} + +enum dh_coredev_type zxdh_en_sf_get_coredev_type(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_get_coredev_type(dh_dev->parent); +} + +uint16_t zxdh_en_sf_get_pcie_id(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_get_pcie_id(dh_dev->parent); +} + +uint16_t zxdh_en_sf_get_slot_id(struct dh_core_dev *dh_dev) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_get_slot_id(dh_dev->parent); +} + +struct zxdh_vf_item *zxdh_en_sf_get_vf_item(struct dh_core_dev *dh_dev, + uint16_t vf_idx) +{ + struct zxdh_en_sf_device *en_sf_dev = dh_core_priv(dh_dev); + + return en_sf_dev->sf_ops->en_sf_get_vf_item(dh_dev->parent, vf_idx); +} diff --git a/drivers/net/ethernet/dinghai/en_sf/en_sf_eq.h b/drivers/net/ethernet/dinghai/en_sf/en_sf_eq.h new file mode 100644 index 000000000000..a30d4eac477b --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_sf/en_sf_eq.h @@ -0,0 +1,77 @@ +#ifndef __EN_SF_EQ_H__ +#define __EN_SF_EQ_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include +#include + +void dh_en_sf_eq_table_destroy(struct dh_core_dev *dev); +int32_t dh_en_sf_eq_table_init(struct dh_core_dev *dev); +int32_t dh_en_sf_eq_table_create(struct dh_core_dev *dev, + struct zxdh_en_sf_if *ops); +void dh_sf_eq_table_destroy(struct dh_core_dev *dev); + +uint16_t zxdh_en_sf_get_vqs_channels_num(struct dh_core_dev *dh_dev); +int32_t zxdh_en_sf_create_vqs_channels(struct dh_core_dev *dh_dev, void *data); +void zxdh_en_sf_destroy_vqs_channels(struct dh_core_dev *dh_dev); +void zxdh_en_sf_switch_vqs_channel(struct dh_core_dev *dh_dev, int32_t channel, + int32_t op); +int32_t zxdh_en_sf_vqs_channel_bind_handler(struct dh_core_dev *dh_dev, + int32_t vqs_channel_num, + struct dh_vq_handler *handler); +void zxdh_en_sf_vqs_channel_unbind_handler(struct dh_core_dev *dh_dev, + int32_t vqs_channel_num); +int32_t zxdh_en_sf_vq_bind_channel(struct dh_core_dev *dh_dev, + int32_t channel_num, int32_t queue_index, + uint16_t vq_idx); +void zxdh_en_sf_vq_unbind_channel(struct dh_core_dev *dh_dev, + int32_t queue_index); +int32_t zxdh_en_sf_vqs_bind_eqs(struct dh_core_dev *dh_dev, + int32_t vqs_channel_num, + struct list_head *vq_node); +void zxdh_en_sf_vqs_unbind_eqs(struct dh_core_dev *dh_dev, + int32_t vqs_channel_num); +void __iomem *zxdh_en_sf_map_vq_notify(struct dh_core_dev *dh_dev, + uint32_t index, resource_size_t *pa); +void zxdh_en_sf_unmap_vq_notify(struct dh_core_dev *dh_dev, void *priv); +void zxdh_en_sf_activate_phy_vq(struct dh_core_dev *dh_dev, uint32_t phy_index, + int32_t queue_size, uint64_t desc_addr, + uint64_t avail_addr, uint64_t used_addr); +void zxdh_en_sf_set_queue_enable(struct dh_core_dev *dh_dev, uint16_t index, + bool enable); +uint16_t zxdh_en_sf_get_epbdf(struct dh_core_dev *dh_dev); +uint64_t zxdh_en_sf_get_spec_sbdf(struct dh_core_dev *dh_dev); +uint16_t zxdh_en_sf_get_vport(struct dh_core_dev *dh_dev); +uint16_t zxdh_en_sf_get_pcie_id(struct dh_core_dev *dh_dev); +uint16_t zxdh_en_sf_get_slot_id(struct dh_core_dev *dh_dev); +enum dh_coredev_type zxdh_en_sf_get_coredev_type(struct dh_core_dev *dh_dev); +void zxdh_en_sf_dpp_np_init(struct dh_core_dev *dh_dev, uint32_t vport); +struct pci_dev *zxdh_en_sf_get_pdev(struct dh_core_dev *dh_dev); +uint64_t zxdh_en_sf_get_bar_virt_addr(struct dh_core_dev *dh_dev, + uint8_t bar_num); +int32_t zxdh_en_sf_do_cmd_exec(struct dh_core_dev *dh_dev, uint32_t dst, + uint32_t id, uint32_t len, void *payload, + void *ack); +struct zxdh_vf_item *zxdh_en_sf_get_vf_item(struct dh_core_dev *dh_dev, + uint16_t vf_idx); + +struct dh_en_sf_eq_table { + struct dh_irq **vq_irqs; + struct dh_irq *async_irq; + struct dh_eq_async async_eq; + int32_t vq_irq_num; + struct list_head vqs_eqs_list; +}; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/en_sf/en_sf_irq.c b/drivers/net/ethernet/dinghai/en_sf/en_sf_irq.c new file mode 100644 index 000000000000..95b72dda9beb --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_sf/en_sf_irq.c @@ -0,0 +1,4 @@ +#include +#include +#include +#include "en_sf_irq.h" diff --git a/drivers/net/ethernet/dinghai/en_sf/en_sf_irq.h b/drivers/net/ethernet/dinghai/en_sf/en_sf_irq.h new file mode 100644 index 000000000000..f0c390151856 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_sf/en_sf_irq.h @@ -0,0 +1,24 @@ +#ifndef __EN_SF_IRQ_H__ +#define __EN_SF_IRQ_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +struct dh_irq *dh_pf_async_irq_request(struct dh_core_dev *dev); +int32_t dh_pf_irq_table_create(struct dh_core_dev *dev); +void dh_pf_irq_table_destroy(struct dh_core_dev *dev); +int32_t dh_pf_irq_table_init(struct dh_core_dev *dev); + +struct dh_en_sf_irq_table { + struct dh_irq_pool *sf_vq_pool; +}; + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn.c b/drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn.c new file mode 100644 index 000000000000..3f7e6bbe2a50 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn.c @@ -0,0 +1,73 @@ +#include +#include +#include +#include "en_pf.h" +#include "zxdh_tsn.h" +#include "zxdh_tsn_reg.h" +#include "zxdh_tsn_comm.h" +#include "zxdh_tsn_ioctl.h" + +int32_t zxdh_tsn_init(struct dh_core_dev *dh_dev) +{ + struct zxdh_tsn_private *tsn = NULL; + struct zxdh_pf_device *pf_dev = NULL; + + ZXDH_TSN_COMM_CHECK_POINT(dh_dev); + ZXDH_TSN_COMM_CHECK_INDEX_EQUAL_RETURN_OK(dh_dev->coredev_type, + DH_COREDEV_PF); + + pf_dev = dh_core_priv(dh_dev); + ZXDH_TSN_COMM_CHECK_POINT(pf_dev); + + tsn = kzalloc(sizeof(struct zxdh_tsn_private), GFP_KERNEL); + ZXDH_TSN_COMM_CHECK_POINT(tsn); + + pf_dev->tsn = tsn; + + memset(tsn, 0x00, sizeof(struct zxdh_tsn_private)); + + tsn->tsn_qbv_cap.ct_min = TSN_CYCLE_TIME_MIN; + tsn->tsn_qbv_cap.ct_max = TSN_CYCLE_TIME_MAX; + tsn->tsn_qbv_cap.it_min = TSN_INTERVAL_TIME_MIN; + tsn->tsn_qbv_cap.it_max = TSN_INTERVAL_TIME_MAX; + tsn->tsn_qbv_cap.gcl_num = TSN_PORT_GCL_NUM; + tsn->tsn_port_id.port_id = TSN_PORT_PORT_ID_DEF; + tsn->pci_ioremap_addr = pf_dev->pci_ioremap_addr[0]; + + spin_lock_init(&tsn->tsn_spin_lock); + + hrtimer_init(&tsn->tsn_qbv_change_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + tsn->tsn_qbv_change_timer.function = zxdh_tsn_qbv_change_timer_callback; + + return TSN_OK; +} +EXPORT_SYMBOL(zxdh_tsn_init); + +void zxdh_tsn_exit(struct dh_core_dev *dh_dev) +{ + struct zxdh_tsn_private *tsn = NULL; + struct zxdh_pf_device *pf_dev = NULL; + + ZXDH_TSN_COMM_CHECK_POINT_RETURN_NONE(dh_dev); + ZXDH_TSN_COMM_CHECK_INDEX_EQUAL_RETURN_NONE(dh_dev->coredev_type, + DH_COREDEV_PF); + + pf_dev = dh_core_priv(dh_dev); + ZXDH_TSN_COMM_CHECK_POINT_RETURN_NONE(pf_dev); + + tsn = pf_dev->tsn; + ZXDH_TSN_COMM_CHECK_POINT_RETURN_NONE(tsn); + + hrtimer_cancel(&tsn->tsn_qbv_change_timer); + + if (!IS_ERR_OR_NULL((void *)(tsn->tsn_reg_base_addr))) { + tsn_port_disable_set(tsn); + tsn_port_phy_port_set(tsn, TSN_PORT_PORT_ID_DEF); + } + + kfree(tsn); +} +EXPORT_SYMBOL(zxdh_tsn_exit); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn.h b/drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn.h new file mode 100644 index 000000000000..8fe2de7f1759 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn.h @@ -0,0 +1,104 @@ +#ifndef __ZXDH_TSN_H__ +#define __ZXDH_TSN_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include + +#define TSN_PORT_RAM_NUM (2) +#define TSN_PORT_RAM_MAX (TSN_PORT_RAM_NUM - 1) +#define TSN_PORT_GCL_NUM (250) +#define TSN_PORT_GCL_EXT_NUM (6) +#define TSN_PORT_GCL_MAX (TSN_PORT_GCL_NUM + TSN_PORT_GCL_EXT_NUM - 1) +#define TSN_PORT_QUEUE_NUM (8) +#define TSN_PORT_QUEUE_MAX (TSN_PORT_QUEUE_NUM - 1) +#define TSN_PORT_PORT_ID_NUM (4) +#define TSN_PORT_PORT_ID_MAX (TSN_PORT_PORT_ID_NUM - 1) +#define TSN_PORT_PORT_ID_DEF (15) +#define TSN_PORT_TIMER_ID_NUM (4) +#define TSN_PORT_TIMER_ID_MAX (TSN_PORT_TIMER_ID_NUM - 1) + +#define TSN_PORT_GATE_ENABLE (1) +#define TSN_PORT_GATE_DISABLE (0) +#define TSN_PORT_INIT_ENABLE (1) +#define TSN_PORT_INIT_DISABLE (0) +#define TSN_PORT_CHANGE_ENABLE (1) +#define TSN_PORT_CHANGE_DISABLE (0) + +#define TSN_PORT_GATE_IDLE (0) +#define TSN_PORT_GATE_RUNNING (1) +#define TSN_PORT_GATE_CHANGING (2) +#define TSN_PORT_GATE_PENDING (3) + +#define TSN_CYCLE_TIME_MIN (500000) +#define TSN_CYCLE_TIME_MAX (4000000000) +#define TSN_INTERVAL_TIME_MIN (1000) +#define TSN_INTERVAL_TIME_MAX (16000000) + +struct zxdh_tsn_port_id { + uint32_t port_id; +}; + +struct zxdh_tsn_timer_id { + uint32_t timer_id; +}; + +struct zxdh_tsn_qbv_cap { + uint64_t ct_min; + uint64_t ct_max; + uint32_t it_min; + uint32_t it_max; + uint32_t gcl_num; +}; + +struct zxdh_tsn_qbv_entry { + uint32_t gate_state; + uint32_t time_interval; +}; + +struct zxdh_tsn_qbv_basic { + uint64_t base_time; + uint64_t cycle_time; + uint32_t maxsdu[TSN_PORT_QUEUE_NUM]; + uint32_t guard_band_time[TSN_PORT_QUEUE_NUM]; + uint32_t control_list_length; + struct zxdh_tsn_qbv_entry control_list[TSN_PORT_GCL_NUM]; +}; + +struct zxdh_tsn_qbv_conf { + uint32_t enable; + struct zxdh_tsn_qbv_basic admin; +}; + +struct zxdh_tsn_qbv_status { + uint64_t current_time; + uint32_t current_status; + struct zxdh_tsn_qbv_basic oper; +}; + +struct zxdh_tsn_private { + uint32_t phy_port_id; + uint64_t pci_ioremap_addr; + uint64_t tsn_reg_base_addr; + + struct zxdh_tsn_port_id tsn_port_id; + struct zxdh_tsn_qbv_cap tsn_qbv_cap; + struct zxdh_tsn_qbv_conf tsn_qbv_conf[TSN_PORT_RAM_NUM]; + struct hrtimer tsn_qbv_change_timer; + + spinlock_t tsn_spin_lock; +}; + +int32_t zxdh_tsn_init(struct dh_core_dev *dh_dev); +void zxdh_tsn_exit(struct dh_core_dev *dh_dev); + +#ifdef __cplusplus +} +#endif + +#endif /* __ZXDH_TSN_H__ */ diff --git a/drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn_comm.h b/drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn_comm.h new file mode 100644 index 000000000000..3ecadc23f475 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn_comm.h @@ -0,0 +1,223 @@ +#ifndef __ZXDH_TSN_COMM_H__ +#define __ZXDH_TSN_COMM_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "log.h" + +#ifndef TSN_OK +#define TSN_OK (0) +#endif + +#define ZXDH_TSN_COMM_CHECK_RC(rc) \ + do { \ + if (TSN_OK != (rc)) { \ + DH_LOG_ERR(MODULE_TSN, "[ErrorCode: %d] !\n", rc); \ + return rc; \ + } \ + } while (0) + +#define ZXDH_TSN_COMM_CHECK_RC_RETURN_NONE(rc) \ + do { \ + if (TSN_OK != (rc)) { \ + DH_LOG_ERR(MODULE_TSN, "[ErrorCode: %d] !\n", rc); \ + return; \ + } \ + } while (0) + +#define ZXDH_TSN_COMM_CHECK_RC_UNLOCK_RETURN_NONE(rc, lock) \ + do { \ + if (TSN_OK != (rc)) { \ + DH_LOG_ERR(MODULE_TSN, "[ErrorCode: %d] !\n", rc); \ + spin_unlock(lock); \ + return; \ + } \ + } while (0) + +#define ZXDH_TSN_COMM_CHECK_RC_UNLOCK_RETURN_VALUE(rc, lock, value) \ + do { \ + if (TSN_OK != (rc)) { \ + DH_LOG_ERR(MODULE_TSN, "[ErrorCode: %d] !\n", rc); \ + spin_unlock(lock); \ + return value; \ + } \ + } while (0) + +#define ZXDH_TSN_COMM_CHECK_RC_UNLOCKIRQ_MEMORY_FREE(rc, lock, flags, ptr) \ + do { \ + if (TSN_OK != (rc)) { \ + DH_LOG_ERR(MODULE_TSN, "[ErrorCode: %d] !\n", rc); \ + spin_unlock_irqrestore(lock, flags); \ + kfree(ptr); \ + return rc; \ + } \ + } while (0) + +#define ZXDH_TSN_COMM_CHECK_RC_UNLOCK_MEMORY_FREE_RETURN_NONE(rc, lock, ptr) \ + do { \ + if (TSN_OK != (rc)) { \ + DH_LOG_ERR(MODULE_TSN, "[ErrorCode: %d] !\n", rc); \ + spin_unlock(lock); \ + kfree(ptr); \ + return; \ + } \ + } while (0) + +#define ZXDH_TSN_COMM_CHECK_RC_UNLOCK_MEMORY_FREE_RETURN_VALUE(rc, lock, ptr, \ + value) \ + do { \ + if (TSN_OK != (rc)) { \ + DH_LOG_ERR(MODULE_TSN, "[ErrorCode: %d] !\n", rc); \ + spin_unlock(lock); \ + kfree(ptr); \ + return value; \ + } \ + } while (0) + +#define ZXDH_TSN_COMM_CHECK_RC_MEMORY_FREE(rc, ptr) \ + do { \ + if (TSN_OK != (rc)) { \ + DH_LOG_ERR(MODULE_TSN, "[ErrorCode: %d] !\n", rc); \ + kfree(ptr); \ + return rc; \ + } \ + } while (0) + +#define ZXDH_TSN_COMM_CHECK_POINT(point) \ + do { \ + if (NULL == (point)) { \ + DH_LOG_ERR(MODULE_TSN, "[Error: POINT NULL] !\n"); \ + return -EINVAL; \ + } \ + } while (0) + +#define ZXDH_TSN_COMM_CHECK_POINT_RETURN_NONE(point) \ + do { \ + if (NULL == (point)) { \ + DH_LOG_ERR(MODULE_TSN, "[Error: POINT NULL] !\n"); \ + return; \ + } \ + } while (0) + +#define ZXDH_TSN_COMM_CHECK_POINT_RETURN_VALUE(point, value) \ + do { \ + if (NULL == (point)) { \ + DH_LOG_ERR(MODULE_TSN, "[Error: POINT NULL] !\n"); \ + return value; \ + } \ + } while (0) + +#define ZXDH_TSN_COMM_CHECK_POINT_UNLOCK_RETURN_NONE(point, lock) \ + do { \ + if (NULL == (point)) { \ + DH_LOG_ERR(MODULE_TSN, "[Error: POINT NULL] !\n"); \ + spin_unlock(lock); \ + return; \ + } \ + } while (0) + +#define ZXDH_TSN_COMM_CHECK_POINT_UNLOCK_RETURN_VALUE(point, lock, value) \ + do { \ + if (NULL == (point)) { \ + DH_LOG_ERR(MODULE_TSN, "[Error: POINT NULL] !\n"); \ + spin_unlock(lock); \ + return value; \ + } \ + } while (0) + +#define ZXDH_TSN_COMM_CHECK_POINT_UNLOCK_MEMORY_FREE_RETURN_NONE(point, lock, \ + ptr) \ + do { \ + if (NULL == (point)) { \ + DH_LOG_ERR(MODULE_TSN, "[Error: POINT NULL] !\n"); \ + spin_unlock(lock); \ + kfree(ptr); \ + return; \ + } \ + } while (0) + +#define ZXDH_TSN_COMM_CHECK_INDEX(val, min, max) \ + do { \ + if (((val) < (min)) || ((val) > (max))) { \ + DH_LOG_ERR( \ + MODULE_TSN, \ + "[Error: VALUE %u INVALID] [MIN %u MAX %u] !\n", \ + val, min, max); \ + return -EINVAL; \ + } \ + } while (0) + +#define ZXDH_TSN_COMM_CHECK_INDEX_64(val, min, max) \ + do { \ + if (((val) < (min)) || ((val) > (max))) { \ + DH_LOG_ERR( \ + MODULE_TSN, \ + "[Error: VALUE %llu INVALID] [MIN %llu MAX %llu] !\n", \ + val, min, max); \ + return -EINVAL; \ + } \ + } while (0) + +#define ZXDH_TSN_COMM_CHECK_INDEX_MAX(val, max) \ + do { \ + if ((val) > (max)) { \ + DH_LOG_ERR(MODULE_TSN, \ + "[Error: VALUE %u INVALID] [MAX %u] !\n", \ + val, max); \ + return -EINVAL; \ + } \ + } while (0) + +#define ZXDH_TSN_COMM_CHECK_INDEX_MAX_MEMORY_FREE(val, max, ptr) \ + do { \ + if ((val) > (max)) { \ + DH_LOG_ERR(MODULE_TSN, \ + "[Error: VALUE %u INVALID] [MAX %u] !\n", \ + val, max); \ + kfree(ptr); \ + return -EINVAL; \ + } \ + } while (0) + +#define ZXDH_TSN_COMM_CHECK_INDEX_EQUAL(val, equal) \ + do { \ + if ((val) != (equal)) { \ + DH_LOG_ERR(MODULE_TSN, \ + "[Error: VALUE %u INVALID] [EQUAL %u] !\n", \ + val, equal); \ + return -EINVAL; \ + } \ + } while (0) + +#define ZXDH_TSN_COMM_CHECK_INDEX_EQUAL_64(val, equal) \ + do { \ + if ((val) != (equal)) { \ + DH_LOG_ERR( \ + MODULE_TSN, \ + "[Error: VALUE %llu INVALID] [EQUAL %llu] !\n", \ + val, equal); \ + return -EINVAL; \ + } \ + } while (0) + +#define ZXDH_TSN_COMM_CHECK_INDEX_EQUAL_RETURN_OK(val, equal) \ + do { \ + if ((val) != (equal)) { \ + return TSN_OK; \ + } \ + } while (0) + +#define ZXDH_TSN_COMM_CHECK_INDEX_EQUAL_RETURN_NONE(val, equal) \ + do { \ + if ((val) != (equal)) { \ + return; \ + } \ + } while (0) + +#ifdef __cplusplus +} +#endif +#endif /* __ZXDH_TSN_COMM_H__ */ diff --git a/drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn_ioctl.c b/drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn_ioctl.c new file mode 100644 index 000000000000..d5d428471db5 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn_ioctl.c @@ -0,0 +1,837 @@ +#include +#include "log.h" +#include "en_pf.h" +#include "en_aux.h" +#include "zxdh_tsn.h" +#include "zxdh_tsn_reg.h" +#include "zxdh_tsn_ioctl.h" +#include "zxdh_tsn_comm.h" +#ifndef HAVE_IOPOLL_OPS +#include +#endif + +static int32_t zxdh_tsn_qbv_disable(struct zxdh_tsn_private *tsn) +{ + int32_t ret = 0; + + ret = tsn_port_disable_set(tsn); + ZXDH_TSN_COMM_CHECK_RC(ret); + + memset(tsn->tsn_qbv_conf, 0x00, sizeof(tsn->tsn_qbv_conf)); + + hrtimer_cancel(&tsn->tsn_qbv_change_timer); + + DH_LOG_INFO(MODULE_TSN, "tsn port id %u is disable.\n", + tsn->tsn_port_id.port_id); + + return TSN_OK; +} + +static int32_t zxdh_tsn_port_id_set(struct zxdh_tsn_private *tsn, + struct zxdh_tsn_msg *msg) +{ + int32_t ret = 0; + uint64_t reg_base_addr = 0; + struct zxdh_tsn_port_id *tsn_port_id = NULL; + + ZXDH_TSN_COMM_CHECK_INDEX_EQUAL( + msg->len, (uint32_t)sizeof(struct zxdh_tsn_port_id)); + + tsn_port_id = (struct zxdh_tsn_port_id *)msg->data; + ZXDH_TSN_COMM_CHECK_POINT(tsn_port_id); + + ZXDH_TSN_COMM_CHECK_INDEX_MAX(tsn_port_id->port_id, + TSN_PORT_PORT_ID_MAX); + + if (!IS_ERR_OR_NULL((void *)(tsn->tsn_reg_base_addr))) { + ret = tsn_port_disable_set(tsn); + ZXDH_TSN_COMM_CHECK_RC(ret); + + ret = tsn_port_phy_port_set(tsn, TSN_PORT_PORT_ID_DEF); + ZXDH_TSN_COMM_CHECK_RC(ret); + } + + reg_base_addr = tsn->pci_ioremap_addr + TSN_PORT_REG_BAR_OFFSET + + ((tsn_port_id->port_id) * TSN_PORT_REG_BAR_SIZE); + ret = tsn_write(reg_base_addr, TSN_PORT_PHY_PORT_SEL, tsn->phy_port_id); + ZXDH_TSN_COMM_CHECK_RC(ret); + + DH_LOG_INFO(MODULE_TSN, "tsn port id %u is bound to phy port id %u.\n", + tsn_port_id->port_id, tsn->phy_port_id); + tsn->tsn_reg_base_addr = reg_base_addr; + tsn->tsn_port_id.port_id = tsn_port_id->port_id; + + ret = zxdh_tsn_qbv_disable(tsn); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +static int32_t zxdh_tsn_port_id_get(struct zxdh_tsn_private *tsn, + struct zxdh_tsn_msg *msg) +{ + struct zxdh_tsn_port_id tsn_port_id; + + ZXDH_TSN_COMM_CHECK_INDEX_EQUAL( + msg->len, (uint32_t)sizeof(struct zxdh_tsn_port_id)); + + tsn_port_id.port_id = tsn->tsn_port_id.port_id; + + memcpy(msg->data, &tsn_port_id, sizeof(struct zxdh_tsn_port_id)); + + return TSN_OK; +} + +static int32_t zxdh_tsn_timer_id_set(struct zxdh_tsn_private *tsn, + struct zxdh_tsn_msg *msg) +{ + int32_t ret = 0; + struct zxdh_tsn_timer_id *tsn_timer_id = NULL; + + ZXDH_TSN_COMM_CHECK_INDEX_EQUAL( + msg->len, (uint32_t)sizeof(struct zxdh_tsn_timer_id)); + + tsn_timer_id = (struct zxdh_tsn_timer_id *)msg->data; + ZXDH_TSN_COMM_CHECK_POINT(tsn_timer_id); + + ZXDH_TSN_COMM_CHECK_INDEX_MAX(tsn_timer_id->timer_id, + TSN_PORT_TIMER_ID_MAX); + + ret = tsn_port_timer_id_set(tsn, tsn_timer_id->timer_id); + ZXDH_TSN_COMM_CHECK_RC(ret); + + DH_LOG_INFO(MODULE_TSN, "tsn port id %u is bound to timer id %u.\n", + tsn->tsn_port_id.port_id, tsn_timer_id->timer_id); + + return TSN_OK; +} + +static int32_t zxdh_tsn_timer_id_get(struct zxdh_tsn_private *tsn, + struct zxdh_tsn_msg *msg) +{ + int32_t ret = 0; + struct zxdh_tsn_timer_id tsn_timer_id; + + ZXDH_TSN_COMM_CHECK_INDEX_EQUAL( + msg->len, (uint32_t)sizeof(struct zxdh_tsn_timer_id)); + + ret = tsn_port_timer_id_get(tsn, &tsn_timer_id.timer_id); + ZXDH_TSN_COMM_CHECK_RC(ret); + + memcpy(msg->data, &tsn_timer_id, sizeof(struct zxdh_tsn_timer_id)); + + return TSN_OK; +} + +static int32_t zxdh_tsn_qbv_conf_check(struct zxdh_tsn_private *tsn, + struct zxdh_tsn_qbv_conf *tsn_qbv_conf) +{ + uint32_t index = 0; + uint64_t time_interval_sum = 0; + struct zxdh_tsn_qbv_basic *tsn_qbv_basic = &tsn_qbv_conf->admin; + struct zxdh_tsn_qbv_entry *tsn_qbv_entry = + tsn_qbv_conf->admin.control_list; + + ZXDH_TSN_COMM_CHECK_INDEX_MAX(tsn_qbv_conf->enable, + TSN_PORT_GATE_ENABLE); + + ZXDH_TSN_COMM_CHECK_INDEX_64(tsn_qbv_basic->cycle_time, + tsn->tsn_qbv_cap.ct_min, + tsn->tsn_qbv_cap.ct_max); + + ZXDH_TSN_COMM_CHECK_INDEX(tsn_qbv_basic->control_list_length, 1, + tsn->tsn_qbv_cap.gcl_num); + + for (index = 0; index < tsn_qbv_basic->control_list_length; index++) { + ZXDH_TSN_COMM_CHECK_INDEX(tsn_qbv_entry[index].time_interval, + tsn->tsn_qbv_cap.it_min, + tsn->tsn_qbv_cap.it_max); + time_interval_sum += tsn_qbv_entry[index].time_interval; + } + + ZXDH_TSN_COMM_CHECK_INDEX_EQUAL_64(tsn_qbv_basic->cycle_time, + time_interval_sum); + + return TSN_OK; +} + +static int32_t +zxdh_tsn_qbv_base_time_cal(struct zxdh_tsn_private *tsn, + struct zxdh_tsn_qbv_conf *tsn_qbv_conf_oper, + struct zxdh_tsn_qbv_conf *tsn_qbv_conf_admin, + uint64_t real_tod_time, uint32_t status) +{ + uint64_t oper_base_time = 0; + uint64_t oper_cycle_time = 0; + uint64_t admin_base_time = 0; + uint64_t admin_cycle_time = 0; + uint64_t cycle_time_extension = 0; + + ZXDH_TSN_COMM_CHECK_POINT(tsn_qbv_conf_oper); + ZXDH_TSN_COMM_CHECK_POINT(tsn_qbv_conf_admin); + + oper_base_time = tsn_qbv_conf_oper->admin.base_time; + oper_cycle_time = tsn_qbv_conf_oper->admin.cycle_time; + + admin_base_time = tsn_qbv_conf_admin->admin.base_time; + admin_cycle_time = tsn_qbv_conf_admin->admin.cycle_time; + + if (real_tod_time >= admin_base_time) { + admin_base_time += + admin_cycle_time * (((real_tod_time - admin_base_time) / + admin_cycle_time) + + 1); + if ((admin_base_time - real_tod_time) < + TSN_SOFT_RESERVED_TIME) { + admin_base_time += TSN_RESERVED_TIME(admin_cycle_time); + } + DH_LOG_INFO( + MODULE_TSN, + "tsn port id %u admin_base_time change to %llu real_tod_time %llu diff %llu.\n", + tsn->tsn_port_id.port_id, admin_base_time, + real_tod_time, admin_base_time - real_tod_time); + } else if ((admin_base_time - real_tod_time) < TSN_SOFT_RESERVED_TIME) { + admin_base_time += TSN_RESERVED_TIME(admin_cycle_time); + DH_LOG_INFO( + MODULE_TSN, + "tsn port id %u admin_base_time change to %llu real_tod_time %llu diff %llu.\n", + tsn->tsn_port_id.port_id, admin_base_time, + real_tod_time, admin_base_time - real_tod_time); + } + + if (status != TSN_PORT_GATE_IDLE) { + if (real_tod_time >= oper_base_time) { + cycle_time_extension = + (admin_base_time - oper_base_time) % + oper_cycle_time; + if (cycle_time_extension < + TSN_CYCLE_TIME_EXTENSION_MIN) { + cycle_time_extension += oper_cycle_time; + } + DH_LOG_INFO( + MODULE_TSN, + "tsn port id %u cycle_time_extension %llu .\n", + tsn->tsn_port_id.port_id, cycle_time_extension); + + if (cycle_time_extension < oper_cycle_time) { + oper_base_time += + oper_cycle_time * + (((real_tod_time - oper_base_time) / + oper_cycle_time) + + 1); + if ((oper_base_time - real_tod_time) < + TSN_SOFT_RESERVED_TIME) { + oper_base_time += TSN_RESERVED_TIME( + oper_cycle_time); + } + } else { + oper_base_time += + oper_cycle_time * + (((real_tod_time - oper_base_time) / + oper_cycle_time) + + 2); + if ((oper_base_time - oper_cycle_time - + real_tod_time) < TSN_SOFT_RESERVED_TIME) { + oper_base_time += TSN_RESERVED_TIME( + oper_cycle_time); + } + } + DH_LOG_INFO( + MODULE_TSN, + "tsn port id %u oper_base_time change to %llu real_tod_time %llu diff %llu.\n", + tsn->tsn_port_id.port_id, oper_base_time, + real_tod_time, oper_base_time - real_tod_time); + } + + if (oper_base_time > admin_base_time) { + if (cycle_time_extension == oper_cycle_time) { + admin_base_time += + admin_cycle_time * + (((oper_base_time - admin_base_time) / + admin_cycle_time)); + } else { + admin_base_time += + admin_cycle_time * + (((oper_base_time - admin_base_time) / + admin_cycle_time) + + 1); + } + DH_LOG_INFO( + MODULE_TSN, + "tsn port id %u admin_base_time change to %llu oper_base_time %llu diff %llu.\n", + tsn->tsn_port_id.port_id, admin_base_time, + oper_base_time, + admin_base_time - oper_base_time); + } + } + + tsn_qbv_conf_admin->admin.base_time = admin_base_time; + + return TSN_OK; +} + +static int32_t zxdh_tsn_qbv_cycle_time_extension_cal( + struct zxdh_tsn_private *tsn, + struct zxdh_tsn_qbv_conf *tsn_qbv_conf_oper, + struct zxdh_tsn_qbv_conf *tsn_qbv_conf_admin, + uint64_t *cycle_time_extension) +{ + uint64_t admin_base_time = 0; + uint64_t oper_base_time = 0; + uint64_t oper_cycle_time = 0; + + ZXDH_TSN_COMM_CHECK_POINT(tsn_qbv_conf_oper); + ZXDH_TSN_COMM_CHECK_POINT(tsn_qbv_conf_admin); + ZXDH_TSN_COMM_CHECK_POINT(cycle_time_extension); + + oper_base_time = tsn_qbv_conf_oper->admin.base_time; + oper_cycle_time = tsn_qbv_conf_oper->admin.cycle_time; + + admin_base_time = tsn_qbv_conf_admin->admin.base_time; + + if ((admin_base_time > oper_base_time) && (oper_cycle_time != 0)) { + oper_base_time += + oper_cycle_time * + ((admin_base_time - oper_base_time) / oper_cycle_time); + if (admin_base_time >= oper_base_time) { + *cycle_time_extension = + admin_base_time - oper_base_time; + if ((*cycle_time_extension) < + TSN_CYCLE_TIME_EXTENSION_MIN) { + *cycle_time_extension += oper_cycle_time; + } + DH_LOG_INFO( + MODULE_TSN, + "tsn port id %u cycle_time_extension %llu.\n", + tsn->tsn_port_id.port_id, + *cycle_time_extension); + return TSN_OK; + } + } + + return -EINVAL; +} + +static int32_t zxdh_tsn_qbv_gate_status_get(struct zxdh_tsn_private *tsn, + uint32_t *p_ram_n_idle, + uint32_t *p_status) +{ + int32_t ret = 0; + uint32_t init_finish = 0; + uint32_t change_en = 0; + uint32_t ram_n = 0; + uint32_t status = 0; + uint32_t enable = 0; + uint64_t admin_base_time = 0; + uint64_t real_tod_time = 0; + uint64_t cycle_time_extension = 0; + + ret = tsn_port_status_get(tsn, &ram_n, &status); + ZXDH_TSN_COMM_CHECK_RC(ret); + + ret = tsn_port_init_finish_get(tsn, &init_finish); + ZXDH_TSN_COMM_CHECK_RC(ret); + + ret = tsn_port_change_en_get(tsn, &change_en); + ZXDH_TSN_COMM_CHECK_RC(ret); + + ret = tsn_port_enable_get(tsn, &enable); + ZXDH_TSN_COMM_CHECK_RC(ret); + + *p_ram_n_idle = + (ram_n == 0) ? 0 : ((ram_n == 1) ? 1 : ((ram_n == 2) ? 0 : 2)); + ZXDH_TSN_COMM_CHECK_INDEX_MAX(*p_ram_n_idle, TSN_PORT_RAM_MAX); + + if ((status == 0) && (init_finish == 0) && (change_en == 0) && + (enable == TSN_PORT_GATE_DISABLE)) { + *p_status = TSN_PORT_GATE_IDLE; + DH_LOG_INFO( + MODULE_TSN, + "tsn port id %u idle status %u init_finish %u change_en %u enable %u.\n", + tsn->tsn_port_id.port_id, status, init_finish, + change_en, enable); + return TSN_OK; + } + + if (((status >= 3) && (status <= 8)) && (init_finish == 0) && + (change_en == 0) && (enable == TSN_PORT_GATE_ENABLE)) { + *p_status = TSN_PORT_GATE_RUNNING; + DH_LOG_INFO( + MODULE_TSN, + "tsn port id %u running status %u init_finish %u change_en %u enable %u.\n", + tsn->tsn_port_id.port_id, status, init_finish, + change_en, enable); + return TSN_OK; + } + + if (((status >= 3) && (status <= 8)) && (init_finish == 0) && + (change_en == 1) && (enable == TSN_PORT_GATE_ENABLE)) { + admin_base_time = + tsn->tsn_qbv_conf[*p_ram_n_idle].admin.base_time; + ret = zxdh_tsn_qbv_cycle_time_extension_cal( + tsn, + &tsn->tsn_qbv_conf[TSN_RAM_N_IN_SERVICE(*p_ram_n_idle)], + &tsn->tsn_qbv_conf[*p_ram_n_idle], + &cycle_time_extension); + ZXDH_TSN_COMM_CHECK_RC(ret); + + ret = tsn_port_real_tod_time_get(tsn, &real_tod_time); + ZXDH_TSN_COMM_CHECK_RC(ret); + + if ((admin_base_time - cycle_time_extension - + TSN_SOFT_RESERVED_TIME) > real_tod_time) { + *p_status = TSN_PORT_GATE_CHANGING; + DH_LOG_INFO( + MODULE_TSN, + "tsn port id %u changing status %u init_finish %u change_en %u enable %u.\n", + tsn->tsn_port_id.port_id, status, init_finish, + change_en, enable); + return TSN_OK; + } + } + + *p_status = TSN_PORT_GATE_PENDING; + DH_LOG_INFO( + MODULE_TSN, + "tsn port id %u pending status %u init_finish %u change_en %u enable %u.\n", + tsn->tsn_port_id.port_id, status, init_finish, change_en, + enable); + return TSN_OK; +} + +static int32_t zxdh_tsn_qbv_basic_set(struct zxdh_tsn_private *tsn, + struct zxdh_tsn_qbv_conf *tsn_qbv_conf, + uint32_t ram_n_idle) +{ + int32_t ret = 0; + uint32_t index = 0; + + ret = tsn_port_default_gate_set(tsn, 0x00); + ZXDH_TSN_COMM_CHECK_RC(ret); + + ret = tsn_port_change_gate_set(tsn, 0xFF); + ZXDH_TSN_COMM_CHECK_RC(ret); + + for (index = 0; index < TSN_PORT_QUEUE_NUM; index++) { + ret = tsn_port_guard_band_time_set( + tsn, index, tsn_qbv_conf->admin.guard_band_time[index]); + ZXDH_TSN_COMM_CHECK_RC(ret); + } + + ret = tsn_port_gcl_num_set(tsn, ram_n_idle, + tsn_qbv_conf->admin.control_list_length); + ZXDH_TSN_COMM_CHECK_RC(ret); + + for (index = 0; index < tsn_qbv_conf->admin.control_list_length; + index++) { + ret = tsn_port_gcl_control_set( + tsn, ram_n_idle, index, + tsn_qbv_conf->admin.control_list[index].gate_state, + tsn_qbv_conf->admin.control_list[index].time_interval); + ZXDH_TSN_COMM_CHECK_RC(ret); + } + + ret = tsn_port_enable_set(tsn, TSN_PORT_GATE_ENABLE); + ZXDH_TSN_COMM_CHECK_RC(ret); + + ret = tsn_port_cycle_time_set(tsn, tsn_qbv_conf->admin.cycle_time); + ZXDH_TSN_COMM_CHECK_RC(ret); + + ret = tsn_port_base_time_set(tsn, tsn_qbv_conf->admin.base_time); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +static int32_t zxdh_tsn_qbv_change_set(struct zxdh_tsn_private *tsn, + struct zxdh_tsn_qbv_conf *tsn_qbv_conf, + uint32_t ram_n_idle) +{ + int32_t ret = 0; + + ret = tsn_port_change_en_set(tsn, TSN_PORT_CHANGE_DISABLE); + ZXDH_TSN_COMM_CHECK_RC(ret); + + ret = zxdh_tsn_qbv_basic_set(tsn, tsn_qbv_conf, ram_n_idle); + ZXDH_TSN_COMM_CHECK_RC(ret); + + ret = tsn_port_change_en_set(tsn, TSN_PORT_CHANGE_ENABLE); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +static int32_t zxdh_tsn_qbv_set(struct zxdh_tsn_private *tsn, + struct zxdh_tsn_msg *msg) +{ + int32_t ret = 0; + uint32_t ram_n_idle = 0; + uint32_t status = 0; + uint64_t cycle_time_extension = 0; + uint64_t real_tod_time = 0; + uint64_t expires_in_nanosecond = 0; + struct zxdh_tsn_qbv_conf *tsn_qbv_conf = NULL; + + ZXDH_TSN_COMM_CHECK_INDEX_EQUAL( + msg->len, (uint32_t)sizeof(struct zxdh_tsn_qbv_conf)); + + tsn_qbv_conf = (struct zxdh_tsn_qbv_conf *)msg->data; + ZXDH_TSN_COMM_CHECK_POINT(tsn_qbv_conf); + + if (tsn_qbv_conf->enable == TSN_PORT_GATE_DISABLE) { + ret = zxdh_tsn_qbv_disable(tsn); + ZXDH_TSN_COMM_CHECK_RC(ret); + return TSN_OK; + } + + ret = zxdh_tsn_qbv_conf_check(tsn, tsn_qbv_conf); + ZXDH_TSN_COMM_CHECK_RC(ret); + + ret = zxdh_tsn_qbv_gate_status_get(tsn, &ram_n_idle, &status); + ZXDH_TSN_COMM_CHECK_RC(ret); + + if (status == TSN_PORT_GATE_PENDING) { + DH_LOG_ERR(MODULE_TSN, "tsn port id %u is pending.\n", + tsn->tsn_port_id.port_id); + return -EBUSY; + } + + ret = tsn_port_real_tod_time_get(tsn, &real_tod_time); + ZXDH_TSN_COMM_CHECK_RC(ret); + + ret = zxdh_tsn_qbv_base_time_cal( + tsn, &tsn->tsn_qbv_conf[TSN_RAM_N_IN_SERVICE(ram_n_idle)], + tsn_qbv_conf, real_tod_time, status); + ZXDH_TSN_COMM_CHECK_RC(ret); + + if (status == TSN_PORT_GATE_IDLE) { + ret = zxdh_tsn_qbv_basic_set(tsn, tsn_qbv_conf, ram_n_idle); + ZXDH_TSN_COMM_CHECK_RC(ret); + + ret = tsn_port_init_finish_set(tsn, TSN_PORT_INIT_ENABLE); + ZXDH_TSN_COMM_CHECK_RC(ret); + + memcpy(&tsn->tsn_qbv_conf[ram_n_idle], tsn_qbv_conf, + sizeof(struct zxdh_tsn_qbv_conf)); + + DH_LOG_INFO(MODULE_TSN, "tsn port id %u ram %u is enable.\n", + tsn->tsn_port_id.port_id, ram_n_idle); + } else { + ret = zxdh_tsn_qbv_cycle_time_extension_cal( + tsn, + &tsn->tsn_qbv_conf[TSN_RAM_N_IN_SERVICE(ram_n_idle)], + tsn_qbv_conf, &cycle_time_extension); + ZXDH_TSN_COMM_CHECK_RC(ret); + + // if (cycle_time_extension < TSN_CYCLE_TIME_EXTENSION_MIN) + // { + // ret = zxdh_tsn_qbv_change_set(tsn, tsn_qbv_conf, ram_n_idle); + // ZXDH_TSN_COMM_CHECK_RC(ret); + + // memcpy(&tsn->tsn_qbv_conf[ram_n_idle], tsn_qbv_conf, sizeof(struct zxdh_tsn_qbv_conf)); + + // hrtimer_cancel(&tsn->tsn_qbv_change_timer); + + // DH_LOG_INFO(MODULE_TSN, "tsn port id %u ram %u is going to change.\n", tsn->tsn_port_id.port_id, ram_n_idle); + // } + // else + // { + memcpy(&tsn->tsn_qbv_conf[ram_n_idle], tsn_qbv_conf, + sizeof(struct zxdh_tsn_qbv_conf)); + + expires_in_nanosecond = + tsn->tsn_qbv_conf[ram_n_idle].admin.base_time - + cycle_time_extension - TSN_TIMER_RESERVED_TIME - + real_tod_time; + hrtimer_start(&tsn->tsn_qbv_change_timer, + ns_to_ktime(expires_in_nanosecond), + HRTIMER_MODE_REL); + + DH_LOG_INFO(MODULE_TSN, + "tsn port id %u timer wake up in %llu ns later.\n", + tsn->tsn_port_id.port_id, expires_in_nanosecond); + // } + } + + return TSN_OK; +} + +static int32_t zxdh_tsn_qbv_status_get(struct zxdh_tsn_private *tsn, + struct zxdh_tsn_msg *msg) +{ + int32_t ret = 0; + uint32_t ram_n_idle = 0; + struct zxdh_tsn_qbv_status *tsn_qbv_status; + + ZXDH_TSN_COMM_CHECK_INDEX_EQUAL( + msg->len, (uint32_t)sizeof(struct zxdh_tsn_qbv_status)); + + tsn_qbv_status = (struct zxdh_tsn_qbv_status *)msg->data; + ZXDH_TSN_COMM_CHECK_POINT(tsn_qbv_status); + + ret = zxdh_tsn_qbv_gate_status_get(tsn, &ram_n_idle, + &tsn_qbv_status->current_status); + ZXDH_TSN_COMM_CHECK_RC(ret); + + ret = tsn_port_real_tod_time_get(tsn, &tsn_qbv_status->current_time); + ZXDH_TSN_COMM_CHECK_RC(ret); + + memcpy(&tsn_qbv_status->oper, + &tsn->tsn_qbv_conf[TSN_RAM_N_IN_SERVICE(ram_n_idle)].admin, + sizeof(struct zxdh_tsn_qbv_basic)); + + return TSN_OK; +} + +static int32_t zxdh_tsn_qbv_cycle_time_extension_set( + struct zxdh_tsn_private *tsn, struct zxdh_tsn_qbv_conf *tsn_qbv_conf, + uint64_t cycle_time_extension, uint32_t ram_n_idle) +{ + int32_t ret = 0; + uint32_t index = 0; + uint32_t gate_state = 0; + uint32_t change_gate_status = 0; + uint32_t time_interval = 0; + uint64_t real_tod_time = 0; + uint64_t cycle_time_reserved = 0; + + ret = tsn_port_real_tod_time_get(tsn, &real_tod_time); + ZXDH_TSN_COMM_CHECK_RC(ret); + + memcpy(tsn_qbv_conf, + &tsn->tsn_qbv_conf[TSN_RAM_N_IN_SERVICE(ram_n_idle)], + sizeof(struct zxdh_tsn_qbv_conf)); + + if (cycle_time_extension > tsn_qbv_conf->admin.cycle_time) { + cycle_time_reserved = + cycle_time_extension - tsn_qbv_conf->admin.cycle_time; + + for (index = 0; index < TSN_PORT_GCL_EXT_NUM; index++) { + gate_state = tsn_qbv_conf->admin.control_list[index] + .gate_state; + time_interval = tsn_qbv_conf->admin.control_list[index] + .time_interval; + + ret = tsn_port_gcl_control_set( + tsn, ram_n_idle, + tsn_qbv_conf->admin.control_list_length + index, + gate_state, time_interval); + ZXDH_TSN_COMM_CHECK_RC(ret); + + if (cycle_time_reserved > time_interval) { + cycle_time_reserved = + cycle_time_reserved - time_interval; + continue; + } + break; + } + } + + tsn_qbv_conf->admin.base_time = + tsn->tsn_qbv_conf[ram_n_idle].admin.base_time - + cycle_time_extension + TSN_HW_RESERVED_TIME; + tsn_qbv_conf->admin.cycle_time = + cycle_time_extension - (2 * TSN_HW_RESERVED_TIME); + tsn_qbv_conf->admin.control_list[0].time_interval -= + TSN_HW_RESERVED_TIME; + + ret = zxdh_tsn_qbv_change_set(tsn, tsn_qbv_conf, ram_n_idle); + ZXDH_TSN_COMM_CHECK_RC(ret); + + change_gate_status = tsn_qbv_conf->admin.control_list[0].gate_state; + ret = tsn_port_change_gate_set(tsn, change_gate_status); + ZXDH_TSN_COMM_CHECK_RC(ret); + + ret = tsn_port_gcl_num_set(tsn, ram_n_idle, + tsn_qbv_conf->admin.control_list_length + + index + 1); + ZXDH_TSN_COMM_CHECK_RC(ret); + + DH_LOG_INFO( + MODULE_TSN, + "tsn port id %u admin_base_time %llu real_tod_time %llu diff %llu.\n", + tsn->tsn_port_id.port_id, tsn_qbv_conf->admin.base_time, + real_tod_time, tsn_qbv_conf->admin.base_time - real_tod_time); + DH_LOG_INFO(MODULE_TSN, + "tsn port id %u ram %u is going to change in timer.\n", + tsn->tsn_port_id.port_id, ram_n_idle); + + return TSN_OK; +} + +enum hrtimer_restart zxdh_tsn_qbv_change_timer_callback(struct hrtimer *t) +{ + int32_t ret = 0; + uint32_t change_en = 0; + uint32_t change_gate_status = 0; + uint32_t ram_n_idle = 0; + uint32_t status = 0; + uint64_t real_tod_time = 0; + uint64_t cycle_time_extension = 0; + struct zxdh_tsn_private *tsn = NULL; + struct zxdh_tsn_qbv_conf *tsn_qbv_conf = NULL; + + tsn = container_of(t, struct zxdh_tsn_private, tsn_qbv_change_timer); + ZXDH_TSN_COMM_CHECK_POINT_RETURN_VALUE(tsn, HRTIMER_NORESTART); + + spin_lock(&tsn->tsn_spin_lock); + + tsn_qbv_conf = kzalloc(sizeof(struct zxdh_tsn_qbv_conf), GFP_KERNEL); + ZXDH_TSN_COMM_CHECK_POINT_UNLOCK_RETURN_VALUE( + tsn_qbv_conf, &tsn->tsn_spin_lock, HRTIMER_NORESTART); + + ret = zxdh_tsn_qbv_gate_status_get(tsn, &ram_n_idle, &status); + ZXDH_TSN_COMM_CHECK_RC_UNLOCK_MEMORY_FREE_RETURN_VALUE( + ret, &tsn->tsn_spin_lock, tsn_qbv_conf, HRTIMER_NORESTART); + + ret = zxdh_tsn_qbv_cycle_time_extension_cal( + tsn, &tsn->tsn_qbv_conf[TSN_RAM_N_IN_SERVICE(ram_n_idle)], + &tsn->tsn_qbv_conf[ram_n_idle], &cycle_time_extension); + ZXDH_TSN_COMM_CHECK_RC_UNLOCK_MEMORY_FREE_RETURN_VALUE( + ret, &tsn->tsn_spin_lock, tsn_qbv_conf, HRTIMER_NORESTART); + + ret = zxdh_tsn_qbv_cycle_time_extension_set( + tsn, tsn_qbv_conf, cycle_time_extension, ram_n_idle); + ZXDH_TSN_COMM_CHECK_RC_UNLOCK_MEMORY_FREE_RETURN_VALUE( + ret, &tsn->tsn_spin_lock, tsn_qbv_conf, HRTIMER_NORESTART); + + ret = readx_poll_timeout_atomic( + readl, + ((const volatile void *)((tsn->tsn_reg_base_addr) + + TSN_PORT_CHANGE_EN)), + change_en, (change_en == 0), 1, (TSN_TIMER_RESERVED_TIME * 2)); + ZXDH_TSN_COMM_CHECK_RC_UNLOCK_MEMORY_FREE_RETURN_VALUE( + ret, &tsn->tsn_spin_lock, tsn_qbv_conf, HRTIMER_NORESTART); + + change_gate_status = + tsn_qbv_conf->admin + .control_list[tsn_qbv_conf->admin.control_list_length - + 1] + .gate_state; + + ret = tsn_port_real_tod_time_get(tsn, &real_tod_time); + ZXDH_TSN_COMM_CHECK_RC_UNLOCK_MEMORY_FREE_RETURN_VALUE( + ret, &tsn->tsn_spin_lock, tsn_qbv_conf, HRTIMER_NORESTART); + + memcpy(tsn_qbv_conf, &tsn->tsn_qbv_conf[ram_n_idle], + sizeof(struct zxdh_tsn_qbv_conf)); + memcpy(&tsn->tsn_qbv_conf[ram_n_idle], + &tsn->tsn_qbv_conf[TSN_RAM_N_IN_SERVICE(ram_n_idle)], + sizeof(struct zxdh_tsn_qbv_conf)); + + ret = zxdh_tsn_qbv_gate_status_get(tsn, &ram_n_idle, &status); + ZXDH_TSN_COMM_CHECK_RC_UNLOCK_MEMORY_FREE_RETURN_VALUE( + ret, &tsn->tsn_spin_lock, tsn_qbv_conf, HRTIMER_NORESTART); + + ret = zxdh_tsn_qbv_change_set(tsn, tsn_qbv_conf, ram_n_idle); + ZXDH_TSN_COMM_CHECK_RC_UNLOCK_MEMORY_FREE_RETURN_VALUE( + ret, &tsn->tsn_spin_lock, tsn_qbv_conf, HRTIMER_NORESTART); + + ret = tsn_port_change_gate_set(tsn, change_gate_status); + ZXDH_TSN_COMM_CHECK_RC_UNLOCK_MEMORY_FREE_RETURN_VALUE( + ret, &tsn->tsn_spin_lock, tsn_qbv_conf, HRTIMER_NORESTART); + + memcpy(&tsn->tsn_qbv_conf[ram_n_idle], tsn_qbv_conf, + sizeof(struct zxdh_tsn_qbv_conf)); + + DH_LOG_INFO( + MODULE_TSN, + "tsn port id %u admin_base_time %llu real_tod_time %llu diff %llu.\n", + tsn->tsn_port_id.port_id, tsn_qbv_conf->admin.base_time, + real_tod_time, tsn_qbv_conf->admin.base_time - real_tod_time); + + kfree(tsn_qbv_conf); + + spin_unlock(&tsn->tsn_spin_lock); + + DH_LOG_INFO(MODULE_TSN, + "tsn port id %u ram %u is going to change in timer.\n", + tsn->tsn_port_id.port_id, ram_n_idle); + + return HRTIMER_NORESTART; +} + +static struct zxdh_tsn_ioctl_table tsn_ioctl_table[] = { + { TSN_PORT_ID_SET, zxdh_tsn_port_id_set }, + { TSN_PORT_ID_GET, zxdh_tsn_port_id_get }, + { TSN_TIMER_ID_SET, zxdh_tsn_timer_id_set }, + { TSN_TIMER_ID_GET, zxdh_tsn_timer_id_get }, + { TSN_QBV_CONF_SET, zxdh_tsn_qbv_set }, + { TSN_QBV_STATUS_GET, zxdh_tsn_qbv_status_get }, +}; + +int32_t zxdh_en_tsn_func(struct net_device *netdev, struct ifreq *ifr) +{ + int32_t ret = 0; + uint32_t index = 0; + uint32_t table_size = 0; + uint64_t start_time = 0; + uint64_t end_time = 0; + unsigned long flags = 0; + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + struct zxdh_pf_device *pf_dev = NULL; + struct zxdh_tsn_private *tsn = NULL; + struct zxdh_tsn_msg *msg = NULL; + + ZXDH_TSN_COMM_CHECK_POINT(netdev); + ZXDH_TSN_COMM_CHECK_POINT(ifr); + + en_priv = netdev_priv(netdev); + ZXDH_TSN_COMM_CHECK_POINT(en_priv); + + en_dev = &en_priv->edev; + ZXDH_TSN_COMM_CHECK_POINT(en_dev); + + pf_dev = dh_core_priv(en_dev->parent->parent); + ZXDH_TSN_COMM_CHECK_POINT(pf_dev); + + tsn = pf_dev->tsn; + ZXDH_TSN_COMM_CHECK_POINT(tsn); + + msg = kzalloc(sizeof(struct zxdh_tsn_msg), GFP_KERNEL); + ZXDH_TSN_COMM_CHECK_POINT(msg); + + ret = unlikely(copy_from_user(msg, ifr->ifr_ifru.ifru_data, + sizeof(struct zxdh_tsn_msg))); + ZXDH_TSN_COMM_CHECK_RC_MEMORY_FREE(ret, msg); + + table_size = (uint32_t)(sizeof(tsn_ioctl_table) / + sizeof(struct zxdh_tsn_ioctl_table)); + + for (index = 0; index < table_size; index++) { + if ((msg->cmd == tsn_ioctl_table[index].cmd) && + (tsn_ioctl_table[index].func != NULL)) { + spin_lock_irqsave(&tsn->tsn_spin_lock, flags); + + start_time = ktime_get_ns(); + + ret = tsn_ioctl_table[index].func(tsn, msg); + ZXDH_TSN_COMM_CHECK_RC_UNLOCKIRQ_MEMORY_FREE( + ret, &tsn->tsn_spin_lock, flags, msg); + + end_time = ktime_get_ns(); + + spin_unlock_irqrestore(&tsn->tsn_spin_lock, flags); + + DH_LOG_INFO( + MODULE_TSN, + "tsn port id %u cmd %u total take up %lld ns.\n", + tsn->tsn_port_id.port_id, msg->cmd, + end_time - start_time); + break; + } + } + + ZXDH_TSN_COMM_CHECK_INDEX_MAX_MEMORY_FREE(index, table_size - 1, msg); + + ret = unlikely(copy_to_user(ifr->ifr_ifru.ifru_data, msg, + sizeof(struct zxdh_tsn_msg))); + ZXDH_TSN_COMM_CHECK_RC_MEMORY_FREE(ret, msg); + + kfree(msg); + + return TSN_OK; +} + +EXPORT_SYMBOL_GPL(zxdh_en_tsn_func); diff --git a/drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn_ioctl.h b/drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn_ioctl.h new file mode 100644 index 000000000000..55a3da0da7cf --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn_ioctl.h @@ -0,0 +1,49 @@ +#ifndef __ZXDH_TSN_IOCTL_H__ +#define __ZXDH_TSN_IOCTL_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +#define TSN_MSG_LEN (4096 - 8) + +#define TSN_PORT_ID_SET (0) +#define TSN_PORT_ID_GET (1) +#define TSN_TIMER_ID_SET (2) +#define TSN_TIMER_ID_GET (3) +#define TSN_QBV_CONF_SET (4) +#define TSN_QBV_STATUS_GET (5) + +#define TSN_SOFT_RESERVED_TIME (500000) +#define TSN_HW_RESERVED_TIME (200) +#define TSN_TIMER_RESERVED_TIME (300000) +#define TSN_RESERVED_TIME(CT) \ + ((((CT) < TSN_SOFT_RESERVED_TIME) ? \ + ((TSN_SOFT_RESERVED_TIME / (CT)) + 1) : \ + 1) * \ + (CT)) +#define TSN_RAM_N_IN_SERVICE(RAM_N_IDLE) ((~(RAM_N_IDLE)) & 1) + +#define TSN_CYCLE_TIME_EXTENSION_MIN (500000) + +struct zxdh_tsn_msg { + uint32_t cmd; + uint32_t len; + uint8_t data[TSN_MSG_LEN]; +}; + +struct zxdh_tsn_ioctl_table { + int32_t cmd; + int32_t (*func)(struct zxdh_tsn_private *tsn, struct zxdh_tsn_msg *msg); +}; + +enum hrtimer_restart zxdh_tsn_qbv_change_timer_callback(struct hrtimer *t); +int32_t zxdh_en_tsn_func(struct net_device *netdev, struct ifreq *ifr); + +#ifdef __cplusplus +} +#endif +#endif /* __ZXDH_TSN_IOCTL_H__ */ diff --git a/drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn_reg.c b/drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn_reg.c new file mode 100644 index 000000000000..14f8c94e6639 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn_reg.c @@ -0,0 +1,472 @@ +#include +#include "zxdh_tsn.h" +#include "zxdh_tsn_reg.h" +#include "zxdh_tsn_comm.h" + +int32_t tsn_read(uint64_t base_addr, uint32_t offset, uint32_t *p_val) +{ + if (IS_ERR_OR_NULL((void *)(base_addr))) { + DH_LOG_ERR(MODULE_TSN, "base_addr 0x%llx invalid.\n", + base_addr); + return -EINVAL; + } + + *p_val = readl((const volatile void *)(base_addr + offset)); + + return TSN_OK; +} + +int32_t tsn_write(uint64_t base_addr, uint32_t offset, uint32_t val) +{ + if (IS_ERR_OR_NULL((void *)(base_addr))) { + DH_LOG_ERR(MODULE_TSN, "base_addr 0x%llx invalid.\n", + base_addr); + return -EINVAL; + } + + writel(val, (volatile void *)(base_addr + offset)); + + return TSN_OK; +} + +int32_t tsn_reg_read(struct zxdh_tsn_private *tsn, uint32_t offset, + uint32_t *p_val) +{ + int32_t ret = 0; + + ZXDH_TSN_COMM_CHECK_POINT(tsn); + ZXDH_TSN_COMM_CHECK_POINT(p_val); + + ret = tsn_read(tsn->tsn_reg_base_addr, offset, p_val); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +int32_t tsn_reg_write(struct zxdh_tsn_private *tsn, uint32_t offset, + uint32_t val) +{ + int32_t ret = 0; + + ZXDH_TSN_COMM_CHECK_POINT(tsn); + + ret = tsn_write(tsn->tsn_reg_base_addr, offset, val); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +int32_t tsn_port_enable_set(struct zxdh_tsn_private *tsn, uint32_t enable) +{ + int32_t ret = 0; + + ret = tsn_reg_write(tsn, TSN_PORT_QBV_ENABLE, enable); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +int32_t tsn_port_enable_get(struct zxdh_tsn_private *tsn, uint32_t *p_enable) +{ + int32_t ret = 0; + + ret = tsn_reg_read(tsn, TSN_PORT_QBV_ENABLE, p_enable); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +int32_t tsn_port_phy_port_set(struct zxdh_tsn_private *tsn, uint32_t phy_port) +{ + int32_t ret = 0; + + ret = tsn_reg_write(tsn, TSN_PORT_PHY_PORT_SEL, phy_port); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +int32_t tsn_port_phy_port_get(struct zxdh_tsn_private *tsn, + uint32_t *p_phy_port) +{ + int32_t ret = 0; + + ret = tsn_reg_read(tsn, TSN_PORT_PHY_PORT_SEL, p_phy_port); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +int32_t tsn_port_timer_id_set(struct zxdh_tsn_private *tsn, uint32_t timer_id) +{ + int32_t ret = 0; + + ret = tsn_reg_write(tsn, TSN_PORT_TIME_SEL, timer_id); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +int32_t tsn_port_timer_id_get(struct zxdh_tsn_private *tsn, uint32_t *p_time_id) +{ + int32_t ret = 0; + + ret = tsn_reg_read(tsn, TSN_PORT_TIME_SEL, p_time_id); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +int32_t tsn_port_status_get(struct zxdh_tsn_private *tsn, uint32_t *p_ram_n, + uint32_t *p_status) +{ + int32_t ret = 0; + uint32_t val = 0; + + ZXDH_TSN_COMM_CHECK_POINT(p_ram_n); + ZXDH_TSN_COMM_CHECK_POINT(p_status); + + ret = tsn_reg_read(tsn, TSN_PORT_READ_RAM_N, &val); + ZXDH_TSN_COMM_CHECK_RC(ret); + + *p_ram_n = val & 0x3; + *p_status = (val & 0x3C) >> 2; + + return TSN_OK; +} + +int32_t tsn_port_base_time_l_set(struct zxdh_tsn_private *tsn, + uint32_t base_time) +{ + int32_t ret = 0; + + ret = tsn_reg_write(tsn, TSN_PORT_BASE_TIME_L, base_time); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +int32_t tsn_port_base_time_h_set(struct zxdh_tsn_private *tsn, + uint32_t base_time) +{ + int32_t ret = 0; + + ret = tsn_reg_write(tsn, TSN_PORT_BASE_TIME_H, base_time); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +int32_t tsn_port_base_time_set(struct zxdh_tsn_private *tsn, uint64_t base_time) +{ + int32_t ret = 0; + uint32_t base_time_l = (uint32_t)((base_time)&0xffffffff); + uint32_t base_time_h = (uint32_t)((base_time >> 32) & 0xffffffff); + + ret = tsn_port_base_time_l_set(tsn, base_time_l); + ZXDH_TSN_COMM_CHECK_RC(ret); + + ret = tsn_port_base_time_h_set(tsn, base_time_h); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +int32_t tsn_port_base_time_get(struct zxdh_tsn_private *tsn, + uint64_t *p_base_time) +{ + int32_t ret = 0; + uint32_t base_time_l = 0; + uint32_t base_time_h = 0; + + ZXDH_TSN_COMM_CHECK_POINT(p_base_time); + + ret = tsn_reg_read(tsn, TSN_PORT_BASE_TIME_L, &base_time_l); + ZXDH_TSN_COMM_CHECK_RC(ret); + + ret = tsn_reg_read(tsn, TSN_PORT_BASE_TIME_H, &base_time_h); + ZXDH_TSN_COMM_CHECK_RC(ret); + + *p_base_time = (uint64_t)((((uint64_t)(base_time_h)) << 32) | + ((uint64_t)(base_time_l))); + + return TSN_OK; +} + +int32_t tsn_port_cycle_time_l_set(struct zxdh_tsn_private *tsn, + uint32_t cycle_time) +{ + int32_t ret = 0; + + ret = tsn_reg_write(tsn, TSN_PORT_CYCLE_TIME_L, cycle_time); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +int32_t tsn_port_cycle_time_h_set(struct zxdh_tsn_private *tsn, + uint32_t cycle_time) +{ + int32_t ret = 0; + + ret = tsn_reg_write(tsn, TSN_PORT_CYCLE_TIME_H, cycle_time); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +int32_t tsn_port_cycle_time_set(struct zxdh_tsn_private *tsn, + uint64_t cycle_time) +{ + int32_t ret = 0; + uint32_t cycle_time_l = (uint32_t)((cycle_time)&0x000fffff); + uint32_t cycle_time_h = (uint32_t)((cycle_time >> 20) & 0x000fffff); + + ret = tsn_port_cycle_time_l_set(tsn, cycle_time_l); + ZXDH_TSN_COMM_CHECK_RC(ret); + + ret = tsn_port_cycle_time_h_set(tsn, cycle_time_h); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +int32_t tsn_port_cycle_time_get(struct zxdh_tsn_private *tsn, + uint64_t *p_cycle_time) +{ + int32_t ret = 0; + uint32_t cycle_time_l = 0; + uint32_t cycle_time_h = 0; + + ZXDH_TSN_COMM_CHECK_POINT(p_cycle_time); + + ret = tsn_reg_read(tsn, TSN_PORT_CYCLE_TIME_L, &cycle_time_l); + ZXDH_TSN_COMM_CHECK_RC(ret); + + ret = tsn_reg_read(tsn, TSN_PORT_CYCLE_TIME_H, &cycle_time_h); + ZXDH_TSN_COMM_CHECK_RC(ret); + + *p_cycle_time = (uint64_t)((((uint64_t)(cycle_time_h)) << 20) | + ((uint64_t)(cycle_time_l))); + + return TSN_OK; +} + +int32_t tsn_port_guard_band_time_set(struct zxdh_tsn_private *tsn, uint32_t cos, + uint32_t band_time) +{ + int32_t ret = 0; + + ZXDH_TSN_COMM_CHECK_INDEX_MAX(cos, TSN_PORT_QUEUE_MAX); + + ret = tsn_reg_write(tsn, TSN_PORT_GUARD_BAND_TIME + (cos * 4), + band_time); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +int32_t tsn_port_guard_band_time_get(struct zxdh_tsn_private *tsn, uint32_t cos, + uint32_t *p_band_time) +{ + int32_t ret = 0; + + ZXDH_TSN_COMM_CHECK_INDEX_MAX(cos, TSN_PORT_QUEUE_MAX); + + ret = tsn_reg_read(tsn, TSN_PORT_GUARD_BAND_TIME + (cos * 4), + p_band_time); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +int32_t tsn_port_default_gate_set(struct zxdh_tsn_private *tsn, + uint32_t gate_state) +{ + int32_t ret = 0; + + ret = tsn_reg_write(tsn, TSN_PORT_DEFAULT_GATE_EN, gate_state); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +int32_t tsn_port_default_gate_get(struct zxdh_tsn_private *tsn, + uint32_t *p_gate_state) +{ + int32_t ret = 0; + + ret = tsn_reg_read(tsn, TSN_PORT_DEFAULT_GATE_EN, p_gate_state); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +int32_t tsn_port_change_gate_set(struct zxdh_tsn_private *tsn, + uint32_t gate_state) +{ + int32_t ret = 0; + + ret = tsn_reg_write(tsn, TSN_PORT_CHANGE_GATE_EN, gate_state); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +int32_t tsn_port_init_finish_set(struct zxdh_tsn_private *tsn, + uint32_t init_finish) +{ + int32_t ret = 0; + + ret = tsn_reg_write(tsn, TSN_PORT_INIT_FINISH, init_finish); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +int32_t tsn_port_init_finish_get(struct zxdh_tsn_private *tsn, + uint32_t *p_init_finish) +{ + int32_t ret = 0; + + ret = tsn_reg_read(tsn, TSN_PORT_INIT_FINISH, p_init_finish); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +int32_t tsn_port_change_en_set(struct zxdh_tsn_private *tsn, uint32_t change_en) +{ + int32_t ret = 0; + + ret = tsn_reg_write(tsn, TSN_PORT_CHANGE_EN, change_en); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +int32_t tsn_port_change_en_get(struct zxdh_tsn_private *tsn, + uint32_t *p_change_en) +{ + int32_t ret = 0; + + ret = tsn_reg_read(tsn, TSN_PORT_CHANGE_EN, p_change_en); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +int32_t tsn_port_gcl_num_set(struct zxdh_tsn_private *tsn, uint32_t ram_n, + uint32_t gcl_num) +{ + int32_t ret = 0; + uint32_t tsn_port_gcl_num[TSN_PORT_RAM_NUM] = { TSN_PORT_GCL_NUM0, + TSN_PORT_GCL_NUM1 }; + + ZXDH_TSN_COMM_CHECK_INDEX_MAX(ram_n, TSN_PORT_RAM_MAX); + + ret = tsn_reg_write(tsn, tsn_port_gcl_num[ram_n], gcl_num); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +int32_t tsn_port_gcl_num_get(struct zxdh_tsn_private *tsn, uint32_t ram_n, + uint32_t *p_gcl_num) +{ + int32_t ret = 0; + uint32_t tsn_port_gcl_num[TSN_PORT_RAM_NUM] = { TSN_PORT_GCL_NUM0, + TSN_PORT_GCL_NUM1 }; + + ZXDH_TSN_COMM_CHECK_INDEX_MAX(ram_n, TSN_PORT_RAM_MAX); + + ret = tsn_reg_read(tsn, tsn_port_gcl_num[ram_n], p_gcl_num); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +int32_t tsn_port_gcl_control_set(struct zxdh_tsn_private *tsn, uint32_t ram_n, + uint32_t index, uint32_t gate_state, + uint32_t internal) +{ + int32_t ret = 0; + uint32_t tsn_port_gcl_value[TSN_PORT_RAM_NUM] = { TSN_PORT_GCL_VALUE0, + TSN_PORT_GCL_VALUE1 }; + + ZXDH_TSN_COMM_CHECK_INDEX_MAX(ram_n, TSN_PORT_RAM_MAX); + ZXDH_TSN_COMM_CHECK_INDEX_MAX(index, TSN_PORT_GCL_MAX); + + ret = tsn_reg_write(tsn, tsn_port_gcl_value[ram_n] + (index * 4), + (gate_state << 24) | internal); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +int32_t tsn_port_disable_set(struct zxdh_tsn_private *tsn) +{ + int32_t ret = 0; + + ret = tsn_port_enable_set(tsn, TSN_PORT_GATE_DISABLE); + ZXDH_TSN_COMM_CHECK_RC(ret); + + ret = tsn_port_init_finish_set(tsn, TSN_PORT_INIT_DISABLE); + ZXDH_TSN_COMM_CHECK_RC(ret); + + ret = tsn_port_change_en_set(tsn, TSN_PORT_CHANGE_DISABLE); + ZXDH_TSN_COMM_CHECK_RC(ret); + + return TSN_OK; +} + +int32_t tsn_port_real_tod_time_get(struct zxdh_tsn_private *tsn, + uint64_t *p_tod_time) +{ + int32_t ret = 0; + uint32_t tsn_timer_id = 0; + uint32_t tod_second_h = 0; + uint32_t tod_second_l = 0; + uint32_t tod_nanosecond = 0; + + uint32_t tsn_real_tod_nanosecond_reg_offset[TSN_PORT_TIMER_ID_NUM] = { + TSN0_REAL_TOD_NANOSECOND, TSN1_REAL_TOD_NANOSECOND, + TSN2_REAL_TOD_NANOSECOND, TSN3_REAL_TOD_NANOSECOND + }; + uint32_t tsn_real_high_tod_second_reg_offset[TSN_PORT_TIMER_ID_NUM] = { + TSN0_REAL_HIGH_TOD_SECOND, TSN1_REAL_HIGH_TOD_SECOND, + TSN2_REAL_HIGH_TOD_SECOND, TSN3_REAL_HIGH_TOD_SECOND + }; + uint32_t tsn_real_lower_tod_second_reg_offset[TSN_PORT_TIMER_ID_NUM] = { + TSN0_REAL_LOWER_TOD_SECOND, TSN1_REAL_LOWER_TOD_SECOND, + TSN2_REAL_LOWER_TOD_SECOND, TSN3_REAL_LOWER_TOD_SECOND + }; + + ZXDH_TSN_COMM_CHECK_POINT(p_tod_time); + + ret = tsn_port_timer_id_get(tsn, &tsn_timer_id); + ZXDH_TSN_COMM_CHECK_RC(ret); + ZXDH_TSN_COMM_CHECK_INDEX_MAX(tsn_timer_id, TSN_PORT_TIMER_ID_MAX); + + ret = tsn_read(tsn->pci_ioremap_addr + 0xC000, + tsn_real_high_tod_second_reg_offset[tsn_timer_id], + &tod_second_h); + ZXDH_TSN_COMM_CHECK_RC(ret); + + ret = tsn_read(tsn->pci_ioremap_addr + 0xC000, + tsn_real_lower_tod_second_reg_offset[tsn_timer_id], + &tod_second_l); + ZXDH_TSN_COMM_CHECK_RC(ret); + + ret = tsn_read(tsn->pci_ioremap_addr + 0xC000, + tsn_real_tod_nanosecond_reg_offset[tsn_timer_id], + &tod_nanosecond); + ZXDH_TSN_COMM_CHECK_RC(ret); + + *p_tod_time = + ((((uint64_t)tod_second_h << 32) | (uint64_t)tod_second_l) * + NSEC_PER_SEC) + + tod_nanosecond; + + return TSN_OK; +} diff --git a/drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn_reg.h b/drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn_reg.h new file mode 100644 index 000000000000..acf400a93d98 --- /dev/null +++ b/drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn_reg.h @@ -0,0 +1,113 @@ +#ifndef __ZXDH_TSN_REG_H__ +#define __ZXDH_TSN_REG_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +#define TSN_PORT_REG_BAR_SIZE (0x4000) +#define TSN_PORT_REG_BAR_OFFSET (0x14000) + +#define TSN_PORT_QBV_ENABLE (0x0004) +#define TSN_PORT_PHY_PORT_SEL (0x0008) +#define TSN_PORT_TIME_SEL (0x000C) +#define TSN_PORT_CLK_FREQ (0x0014) +#define TSN_PORT_READ_RAM_N (0x0018) +#define TSN_PORT_EXE_TIME (0x001C) +#define TSN_PORT_ITR_SHIFT (0x0020) +#define TSN_PORT_BASE_TIME_H (0x0024) +#define TSN_PORT_BASE_TIME_L (0x0028) +#define TSN_PORT_CYCLE_TIME_H (0x0030) +#define TSN_PORT_CYCLE_TIME_L (0x0034) +#define TSN_PORT_GUARD_BAND_TIME (0x0040) +#define TSN_PORT_DEFAULT_GATE_EN (0x0060) +#define TSN_PORT_CHANGE_GATE_EN (0x0064) +#define TSN_PORT_INIT_FINISH (0x0068) +#define TSN_PORT_CHANGE_EN (0x006C) +#define TSN_PORT_GCL_NUM0 (0x0070) +#define TSN_PORT_GCL_NUM1 (0x0074) +#define TSN_PORT_GCL_VALUE0 (0x1000) +#define TSN_PORT_GCL_VALUE1 (0x2000) + +#define TSN0_REAL_TOD_NANOSECOND (0x4240) +#define TSN0_REAL_LOWER_TOD_SECOND (0x4244) +#define TSN0_REAL_HIGH_TOD_SECOND (0x4248) +#define TSN1_REAL_TOD_NANOSECOND (0x424C) +#define TSN1_REAL_LOWER_TOD_SECOND (0x4250) +#define TSN1_REAL_HIGH_TOD_SECOND (0x4254) +#define TSN2_REAL_TOD_NANOSECOND (0x4258) +#define TSN2_REAL_LOWER_TOD_SECOND (0x425C) +#define TSN2_REAL_HIGH_TOD_SECOND (0x4260) +#define TSN3_REAL_TOD_NANOSECOND (0x4264) +#define TSN3_REAL_LOWER_TOD_SECOND (0x4268) +#define TSN3_REAL_HIGH_TOD_SECOND (0x426C) + +int32_t tsn_read(uint64_t base_addr, uint32_t offset, uint32_t *p_val); +int32_t tsn_write(uint64_t base_addr, uint32_t offset, uint32_t val); +int32_t tsn_reg_read(struct zxdh_tsn_private *tsn, uint32_t offset, + uint32_t *p_val); +int32_t tsn_reg_write(struct zxdh_tsn_private *tsn, uint32_t offset, + uint32_t val); +int32_t tsn_port_enable_set(struct zxdh_tsn_private *tsn, uint32_t enable); +int32_t tsn_port_enable_get(struct zxdh_tsn_private *tsn, uint32_t *p_enable); +int32_t tsn_port_phy_port_set(struct zxdh_tsn_private *tsn, uint32_t phy_port); +int32_t tsn_port_phy_port_get(struct zxdh_tsn_private *tsn, + uint32_t *p_phy_port); +int32_t tsn_port_timer_id_set(struct zxdh_tsn_private *tsn, uint32_t timer_id); +int32_t tsn_port_timer_id_get(struct zxdh_tsn_private *tsn, + uint32_t *p_time_id); +int32_t tsn_port_status_get(struct zxdh_tsn_private *tsn, uint32_t *p_ram_n, + uint32_t *p_status); +int32_t tsn_port_base_time_l_set(struct zxdh_tsn_private *tsn, + uint32_t base_time); +int32_t tsn_port_base_time_h_set(struct zxdh_tsn_private *tsn, + uint32_t base_time); +int32_t tsn_port_base_time_set(struct zxdh_tsn_private *tsn, + uint64_t base_time); +int32_t tsn_port_base_time_get(struct zxdh_tsn_private *tsn, + uint64_t *p_base_time); +int32_t tsn_port_cycle_time_l_set(struct zxdh_tsn_private *tsn, + uint32_t cycle_time); +int32_t tsn_port_cycle_time_h_set(struct zxdh_tsn_private *tsn, + uint32_t cycle_time); +int32_t tsn_port_cycle_time_set(struct zxdh_tsn_private *tsn, + uint64_t cycle_time); +int32_t tsn_port_cycle_time_get(struct zxdh_tsn_private *tsn, + uint64_t *p_cycle_time); +int32_t tsn_port_guard_band_time_set(struct zxdh_tsn_private *tsn, uint32_t cos, + uint32_t band_time); +int32_t tsn_port_guard_band_time_get(struct zxdh_tsn_private *tsn, uint32_t cos, + uint32_t *p_band_time); +int32_t tsn_port_default_gate_set(struct zxdh_tsn_private *tsn, + uint32_t gate_state); +int32_t tsn_port_default_gate_get(struct zxdh_tsn_private *tsn, + uint32_t *p_gate_state); +int32_t tsn_port_change_gate_set(struct zxdh_tsn_private *tsn, + uint32_t gate_state); +int32_t tsn_port_init_finish_set(struct zxdh_tsn_private *tsn, + uint32_t init_finish); +int32_t tsn_port_init_finish_get(struct zxdh_tsn_private *tsn, + uint32_t *p_init_finish); +int32_t tsn_port_change_en_set(struct zxdh_tsn_private *tsn, + uint32_t change_en); +int32_t tsn_port_change_en_get(struct zxdh_tsn_private *tsn, + uint32_t *p_change_en); +int32_t tsn_port_gcl_num_set(struct zxdh_tsn_private *tsn, uint32_t ram_n, + uint32_t gcl_num); +int32_t tsn_port_gcl_num_get(struct zxdh_tsn_private *tsn, uint32_t ram_n, + uint32_t *p_gcl_num); +int32_t tsn_port_gcl_control_set(struct zxdh_tsn_private *tsn, uint32_t ram_n, + uint32_t index, uint32_t gate_state, + uint32_t internal); +int32_t tsn_port_disable_set(struct zxdh_tsn_private *tsn); +int32_t tsn_port_real_tod_time_get(struct zxdh_tsn_private *tsn, + uint64_t *p_tod_time); + +#ifdef __cplusplus +} +#endif + +#endif /* __ZXDH_TSN_REG_H__ */ diff --git a/drivers/net/ethernet/dinghai/en_vf.c b/drivers/net/ethernet/dinghai/en_vf.c new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/drivers/net/ethernet/dinghai/eq.c b/drivers/net/ethernet/dinghai/eq.c new file mode 100644 index 000000000000..ece7b5b6d4cd --- /dev/null +++ b/drivers/net/ethernet/dinghai/eq.c @@ -0,0 +1,120 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int32_t dh_eq_enable(struct dh_core_dev *dev, struct dh_eq *eq, + struct notifier_block *nb) +{ + return dh_irq_attach_nb(eq->irq, nb); +} + +int32_t setup_async_eq(struct dh_core_dev *dev, struct dh_eq_async *eq, + struct dh_eq_param *param, notifier_fn_t dh_eq_async_int, + const char *name, void *priv) +{ + struct dh_eq *eq_core = NULL; + int32_t err = 0; + + eq->irq_nb.notifier_call = dh_eq_async_int; + eq->priv = priv; + spin_lock_init(&eq->lock); //unused + + eq_core = &eq->core; + eq_core->irq = param->irq; + + err = dh_eq_enable(dev, &eq->core, &eq->irq_nb); + if (err != 0) { + LOG_WARN("failed to enable %s EQ %d\n", name, err); + } + + return err; +} + +void dh_eq_disable(struct dh_core_dev *dev, struct dh_eq *eq, + struct notifier_block *nb) +{ + dh_irq_detach_nb(eq->irq, nb); +} + +void dh_eq_table_cleanup(struct dh_core_dev *dev) +{ + kvfree(dev->eq_table.priv); +} + +int32_t dh_inet6_addr_change_notifier_register( + struct notifier_block *inet6_addr_change_notifier) +{ + return register_inet6addr_notifier(inet6_addr_change_notifier); +} + +int32_t dh_vxlan_netdev_change_notifier_register( + struct notifier_block *vxlan_netdev_change_notifier) +{ + return register_netdevice_notifier(vxlan_netdev_change_notifier); +} + +int32_t dh_inet6_addr_change_notifier_unregister( + struct notifier_block *inet6_addr_change_notifier) +{ + return unregister_inet6addr_notifier(inet6_addr_change_notifier); +} + +int32_t dh_vxlan_netdev_change_notifier_unregister( + struct notifier_block *vxlan_netdev_change_notifier) +{ + return unregister_netdevice_notifier(vxlan_netdev_change_notifier); +} + +int32_t dh_eq_notifier_register(struct dh_eq_table *eqt, struct dh_nb *nb) +{ + return atomic_notifier_chain_register(&eqt->nh[nb->event_type], + &nb->nb); +} + +int32_t dh_eq_notifier_unregister(struct dh_eq_table *eqt, struct dh_nb *nb) +{ + return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], + &nb->nb); +} + +void dh_eq_table_init(struct dh_core_dev *dev, void *table_priv) +{ + struct dh_eq_table *eq_table = &dev->eq_table; + int32_t i; + + eq_table->priv = table_priv; + + mutex_init(&eq_table->lock); + for (i = 0; i < DH_EVENT_TYPE_MAX; i++) { + ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]); + } + + eq_table->irq_table = &dev->irq_table; +} + +static uint16_t event_type_map[MSG_MODULE_NUM] = { + [MODULE_VF_BAR_MSG_TO_PF] = DH_EVENT_TYPE_NOTIFY_VF_TO_PF, + [MODULE_RISC_READY] = DH_EVENT_TYPE_RISCV_READY, + [MODULE_PF_BAR_MSG_TO_VF] = DH_EVENT_TYPE_NOTIFY_PF_TO_VF, + [MODULE_VIRTIO] = DH_EVENT_TYPE_NOTIFY_ANY, + [MODULE_DHTOOL] = DH_EVENT_TYPE_NOTIFY_RISCV_TO_AUX, + [MODULE_VQMB] = DH_EVENT_TYPE_NOTIFY_RISCV_TO_AUX, + [MODULE_RESET_MSG] = DH_EVENT_TYPE_NOTIFY_ANY, + [MODULE_DEMO] = DH_EVENT_TYPE_NOTIFY_ANY, +}; + +uint16_t dh_eq_event_type_get(uint16_t event_id) +{ + if (event_id >= MSG_MODULE_NUM) { + LOG_ERR("event_id %d is out of range\n", event_id); + return 0; + } + + return event_type_map[event_id]; +} diff --git a/drivers/net/ethernet/dinghai/events.c b/drivers/net/ethernet/dinghai/events.c new file mode 100644 index 000000000000..bb3920b1f183 --- /dev/null +++ b/drivers/net/ethernet/dinghai/events.c @@ -0,0 +1,12 @@ +#include + +void zxdh_events_work_enqueue(struct dh_core_dev *dev, struct work_struct *work) +{ + queue_work(dev->events->wq, work); +} + +void zxdh_events_cleanup(struct dh_core_dev *dev) +{ + destroy_workqueue(dev->events->wq); + kfree(dev->events); +} diff --git a/drivers/net/ethernet/dinghai/health.c b/drivers/net/ethernet/dinghai/health.c new file mode 100644 index 000000000000..a6eba82a2851 --- /dev/null +++ b/drivers/net/ethernet/dinghai/health.c @@ -0,0 +1,1226 @@ +#include +#include +#include "en_pf.h" +#include +#include "msg_common.h" +#include "en_pf/en_pf_eq.h" +#include "en_np/init/include/dpp_np_init.h" + +#define ZXDH_RISCV_HB_OFFSET 0x5300 +#define ZXDH_M7_HB_OFFSET 0x5350 +#define ZXDH_M7_ZIOS_LOG_OFFSET 0x10b000 +#define ZXDH_M7_CGEL_LOG_OFFSET 0x3e0000 +#define ZXDH_RISCV_FWLOG_OFFSET 0x100000 +#define ZXDH_M7_LOG_SIZE 0x4010 +#define ZXDH_ZIOS_LOG_SIZE 0x700000 +#define ZXDH_CGEL_LOG_SIZE 0x120000 + +#define ZXDH_FOUR_BYTE_FF 0xffffffff + +static void zxdh_start_health_poll(struct dh_core_dev *dh_dev); + +#ifdef NEED_SYSFS_EMIT +int sysfs_emit(char *buf, const char *fmt, ...); +#endif + +enum { + ZXDH_HEALTH_POLL_INTERVAL = 1 * HZ, + M7_LOGDUMP = 6, + RISCV_LOG_DUMP = 10, + M7_MAX_MISSES = 20, + RISCV_BBX_DUMP = 40, + RISCV_MAX_MISSES = 60, +}; + +#define INVALID_SYND 0xff +enum { + RISCV_FW_EXCEPTION, + RISCV_CORE_EXCEPTION, + RISCV_COUNTER_MISSED, + BAR_ERROR, + VQM_FATAL, + BTTL_FATAL, + DRR_FATAL, + OCM_FATAL, + PCIE_FATAL, + RDMA_FATAL, + FLR_RESET, + RISCV_SYND_COUNT_MAX, + M7_COUNTER_MISSED = 32, + SYND_COUNT_MAX, +}; + +static const char *synd_name[] = { + "RISCV_FW_EXCEPTION", + "RISCV_CORE_EXCEPTION", + "RISCV_COUNTER_MISSED", + "BAR_ERROR", + "VQM_FATAL", + "BTTL_FATAL", + "DRR_FATAL", + "OCM_FATAL", + "PCIE_FATAL", + "RDMA_FATAL", + "FLR_RESET", + "M7_COUNTER_MISSED", +}; + +static void dh_health_version_get(struct zxdh_core_health *health) +{ + struct health_buffer __iomem *hb = health->riscv.hb; + struct zxdh_pf_device *pf_dev = + container_of(health, struct zxdh_pf_device, health); + struct dh_core_dev *dh_dev = + container_of((void *)pf_dev, struct dh_core_dev, priv); + + health->health_version = ioread8(&hb->health_version); + HEAL_INFO("%s health_version: %d\n", pci_name(dh_dev->pdev), + health->health_version); +} + +struct health_attribute { + const char *name; + umode_t mode; + ssize_t (*store)(struct kobject *, struct kobj_attribute *, + const char *, size_t); +}; + +enum { + HEALTH_FATAL = 0, + HEALTH_SYND, + HEALTH_RECOVERY_CNT, + HEALTH_ACTION, + HEALTH_SELFHEALING, +}; + +static ssize_t health_action_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, + size_t count); + +static ssize_t health_selfhealing_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct zxdh_core_health *health = container_of( + attr, struct zxdh_core_health, attrs[HEALTH_SELFHEALING]); + struct zxdh_pf_device *pf_dev = + container_of(health, struct zxdh_pf_device, health); + struct dh_core_dev *dh_dev = + container_of((void *)pf_dev, struct dh_core_dev, priv); + int err = 0; + int selfhealing; + + err = kstrtoint(buf, 10, &selfhealing); + if (err) + return err; + + health->selfhealing = selfhealing; + HEAL_INFO("%s selfhealing = %d\n", pci_name(dh_dev->pdev), + selfhealing); + + return count; +} + +struct health_attribute health_attrs[DH_HEALTH_ATTR_NUM] = { + { "fatal", 0440, NULL }, + { "synd", 0440, NULL }, + { "recovery_cnt", 0440, NULL }, + { "action", 0640, health_action_store }, + { "selfhealing", 0640, health_selfhealing_store }, +}; + +static ssize_t health_attrs_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct zxdh_core_health *health; + int i = 0; + + for (i = 0; i < DH_HEALTH_ATTR_NUM; ++i) { + if (strcmp(attr->attr.name, health_attrs[i].name) == 0) { + break; + } + } + + HEAL_DEBUG("attr->attr.name = %s, i = %d\n", attr->attr.name, i); + if (i == DH_HEALTH_ATTR_NUM) + return -1; + + health = container_of(attr, struct zxdh_core_health, attrs[i]); + + switch (i) { + case HEALTH_FATAL: + return sysfs_emit(buf, "%d\n", health->fatal == 0 ? 0 : 1); + case HEALTH_SYND: + return sysfs_emit(buf, "0x%lx\n", health->synd); + case HEALTH_RECOVERY_CNT: + return sysfs_emit(buf, "%d\n", health->recovery_cnt); + case HEALTH_ACTION: + return sysfs_emit(buf, "[%d] act_health_info_show, \ + [%d] act_bbx_log_dump, \ + [%d] act_reset, \ + [%d] act_reload \n", + act_health_info_show, act_bbx_log_dump, + act_reset, act_reload); + case HEALTH_SELFHEALING: + return sysfs_emit(buf, "%d\n", health->selfhealing); + } + + return -1; +} + +static void get_m7_and_riscv_counter(struct zxdh_core_health *dh_health) +{ + struct health_buffer __iomem *riscv_hb = dh_health->riscv.hb; + struct health_buffer __iomem *m7_hb = dh_health->m7.hb; + uint32_t riscv_count; + uint32_t m7_count; + + m7_count = ioread32(&m7_hb->health_counter); + riscv_count = ioread32(&riscv_hb->health_counter); + printk(KERN_INFO "** m7_health_counter: 0x%x\n", m7_count); + printk(KERN_INFO "** riscv_health_counter: 0x%x\n", riscv_count); +} + +static void zxdh_health_info_show(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct zxdh_core_health *health = &pf_dev->health; + uint8_t i = 0; + + printk(KERN_INFO "***************** %s nic info *****************\n", + pci_name(dh_dev->pdev)); + printk(KERN_INFO "** board_type: %d\n", pf_dev->board_type); + printk(KERN_INFO "** health_version: %d\n", health->health_version); + printk(KERN_INFO "** health_supported: %d\n", health->health_supported); + if (!health->health_supported) + return; + printk(KERN_INFO "** bar_chan_valid: %d\n", pf_dev->bar_chan_valid); + printk(KERN_INFO "** fast_unload: %d\n", pf_dev->fast_unload); + printk(KERN_INFO "** fatal: %d\n", health->fatal); + printk(KERN_INFO + "** health->flags: %ld, 1 means ZXDH_DROP_NEW_HEALTH_WORK\n", + health->flags); + printk(KERN_INFO "** recovery_cnt: %d\n", health->recovery_cnt); + get_m7_and_riscv_counter(health); + + printk(KERN_INFO "****************** health config ***************\n"); + printk(KERN_INFO "** DH_HEALTH_ATTR_NUM: %d\n", DH_HEALTH_ATTR_NUM); + printk(KERN_INFO "** health->riscv.hb: 0x%p, health->m7.hb: 0x%p\n", + health->riscv.hb, health->m7.hb); + printk(KERN_INFO + "** m7_log_offset: 0x%llx, riscv_crdump_size: 0x%llx\n", + health->m7_log_offset, health->riscv_crdump_size); + printk(KERN_INFO "** timer_poll: %d\n", ZXDH_HEALTH_POLL_INTERVAL); + printk(KERN_INFO "** M7_MAX_MISSES: %d\n", M7_MAX_MISSES); + printk(KERN_INFO "** RISCV_LOG_DUMP: %d\n", RISCV_LOG_DUMP); + printk(KERN_INFO "** RISCV_MAX_MISSES: %d\n", RISCV_MAX_MISSES); + + printk(KERN_INFO "************ dh_dev->device_state: 0x%x **********\n", + dh_dev->device_state); + printk(KERN_INFO "** [%d]: ZXDH_DEVICE_STATE_UNINITIALIZED\n", + ZXDH_DEVICE_STATE_UNINITIALIZED); + printk(KERN_INFO "** [%d]: ZXDH_DEVICE_STATE_UP\n", + ZXDH_DEVICE_STATE_UP); + printk(KERN_INFO "** [%d]: ZXDH_DEVICE_STATE_INTERNAL_ERROR\n", + ZXDH_DEVICE_STATE_INTERNAL_ERROR); + + printk(KERN_INFO "******************** synd: 0x%lx *************\n", + health->synd); + for (i = 0; i < SYND_COUNT_MAX; ++i) { + if (i < RISCV_SYND_COUNT_MAX) + printk(KERN_INFO "** bit[%d]: %s set %d times\n", i, + synd_name[i], health->synd_statics[i]); + else if (i >= M7_COUNTER_MISSED) + printk(KERN_INFO "** bit[%d]: %s set %d times\n", i, + synd_name[i - 32 + RISCV_SYND_COUNT_MAX], + health->synd_statics[i]); + } + printk(KERN_INFO + "****************************************************\n"); +} + +static int32_t zxdh_pf_dh_reset_request(struct dh_core_dev *dh_dev); +static ssize_t health_action_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, + size_t count) +{ + struct zxdh_core_health *health = container_of( + attr, struct zxdh_core_health, attrs[HEALTH_ACTION]); + struct zxdh_pf_device *pf_dev = + container_of(health, struct zxdh_pf_device, health); + struct dh_core_dev *dh_dev = + container_of((void *)pf_dev, struct dh_core_dev, priv); + int err = 0; + int action; + + err = kstrtoint(buf, 10, &action); + if (err) + return err; + + HEAL_DEBUG("action = %d\n", action); + switch (action) { + case act_health_info_show: + zxdh_health_info_show(dh_dev); + break; + case act_bbx_log_dump: + queue_work(health->wq, &health->riscv_log_saving_work); + queue_work(health->wq, &health->riscv_bbx_saving_work); + queue_work(health->wq, &health->m7_bbx_saving_work); + break; + case act_reset: + zxdh_pf_dh_reset_request(dh_dev); + break; + case act_reload: + if (!zxdh_load_one(dh_dev)) + zxdh_start_health_poll(dh_dev); + break; + } + + return count; +} + +static int zxdh_health_attr_create(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct zxdh_core_health *health = &pf_dev->health; + struct kobj_attribute *attr = NULL; + int err = 0; + int i = 0; + int j = 0; + + for (i = 0; i < DH_HEALTH_ATTR_NUM; ++i) { + attr = &health->attrs[i]; + attr->attr.name = health_attrs[i].name; + attr->attr.mode = health_attrs[i].mode; + attr->show = health_attrs_show; + attr->store = health_attrs[i].store; + err = sysfs_create_file(&dh_dev->device->kobj, &attr->attr); + if (err != 0) { + HEAL_ERR("%s %s sysfs_create_file failed!\n", + pci_name(dh_dev->pdev), health_attrs[i].name); + goto cleanup; + } + } + + return 0; + +cleanup: + for (j = --i; j >= 0; --j) { + attr = &health->attrs[j]; + sysfs_remove_file(&dh_dev->device->kobj, &attr->attr); + } + return err; +} + +static void zxdh_health_attr_remove(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct zxdh_core_health *health = &pf_dev->health; + struct kobj_attribute *attr = NULL; + int i = 0; + + for (i = 0; i < DH_HEALTH_ATTR_NUM; ++i) { + attr = &health->attrs[i]; + sysfs_remove_file(&dh_dev->device->kobj, &attr->attr); + } +} + +struct bbox_hdr __iomem { + uint32_t magic; + uint16_t start_offset; + uint16_t end_offset; + bool wrap; + uint8_t rsv[3]; +}; + +enum { + ZIOS_M7_LOG, + CGEL_M7_LOG, + ZIOS_RISCV_LOG1, + ZIOS_RISCV_LOG2, + CGEL_RISCV_LOG1, + CGEL_RISCV_LOG2, +}; + +static uint8_t log_name[6][32] = { + "zios_m7_log", + "cgel_m7_log", + "zios_riscv_log1", + "zios_riscv_log2", + "cgel_riscv_log1", + "cgel_riscv_log2", +}; + +#define EP4_DUMP_SIZE_MAX (0x10000) +static void fw_log_dump(struct dh_core_dev *dh_dev, uint8_t type) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct zxdh_core_health *health = &pf_dev->health; + const char *dev_name = pci_name(dh_dev->pdev); + char filename[64]; + struct file *file; + uint8_t *buf; + uint64_t offset; + uint64_t real_offset; + uint64_t dump_size; + uint8_t i = 0; + + HEAL_DEBUG("%s_%s dump start\n", log_name[type], dev_name); + snprintf(filename, sizeof(filename), "/var/log/%s_%s.txt", + log_name[type], dev_name); + file = filp_open(filename, O_WRONLY | O_CREAT, 0640); + if (!file || IS_ERR(file)) { + HEAL_ERR("%s Error opening file %s\n", pci_name(dh_dev->pdev), + filename); + return; + } + + if ((type == ZIOS_M7_LOG) || (type == CGEL_M7_LOG)) { + offset = health->m7_log_offset; + dump_size = ZXDH_M7_LOG_SIZE; + } else { + offset = ZXDH_RISCV_FWLOG_OFFSET; + dump_size = health->riscv_crdump_size; + } + + buf = vmalloc(dump_size); + if (buf == NULL) { + HEAL_ERR("%s vmalloc buf failed\n", pci_name(dh_dev->pdev)); + goto out; + } + + memset(buf, 0, dump_size); + + if ((pf_dev->pcie_id & BIT(14)) != 0) { + for (i = 0; (i * EP4_DUMP_SIZE_MAX) < dump_size; ++i) { + real_offset = TO_EP4_ADDR(offset); + memcpy(buf + i * EP4_DUMP_SIZE_MAX, + (void __iomem *)(pf_dev->pci_ioremap_addr[0] + + real_offset), + EP4_DUMP_SIZE_MAX); + offset += EP4_DUMP_SIZE_MAX; + } + } else + memcpy(buf, + (void __iomem *)(pf_dev->pci_ioremap_addr[0] + offset), + dump_size); + + kernel_write(file, buf, dump_size, &file->f_pos); + + vfree(buf); + HEAL_DEBUG("%s_%s dump success\n", log_name[type], dev_name); +out: + filp_close(file, NULL); +} + +static void zxdh_m7_bbx_log_dump_work(struct work_struct *work) +{ + struct zxdh_core_health *health = + container_of(work, struct zxdh_core_health, m7_bbx_saving_work); + struct zxdh_pf_device *pf_dev = + container_of(health, struct zxdh_pf_device, health); + struct dh_core_dev *dh_dev = + container_of((void *)pf_dev, struct dh_core_dev, priv); + + if (dh_dev->coredev_type == DH_COREDEV_VF) + return; + + if (IS_STD_BOARD(pf_dev->board_type)) + fw_log_dump(dh_dev, ZIOS_M7_LOG); + else + fw_log_dump(dh_dev, CGEL_M7_LOG); +} + +static void zxdh_riscv_fw_log_dump_work(struct work_struct *work) +{ + struct zxdh_core_health *health = container_of( + work, struct zxdh_core_health, riscv_log_saving_work); + struct zxdh_pf_device *pf_dev = + container_of(health, struct zxdh_pf_device, health); + struct dh_core_dev *dh_dev = + container_of((void *)pf_dev, struct dh_core_dev, priv); + + if (dh_dev->coredev_type == DH_COREDEV_VF) + return; + + if (IS_STD_BOARD(pf_dev->board_type)) + fw_log_dump(dh_dev, ZIOS_RISCV_LOG1); + else + fw_log_dump(dh_dev, CGEL_RISCV_LOG1); +} + +static void zxdh_riscv_bbx_log_dump_work(struct work_struct *work) +{ + struct zxdh_core_health *health = container_of( + work, struct zxdh_core_health, riscv_bbx_saving_work); + struct zxdh_pf_device *pf_dev = + container_of(health, struct zxdh_pf_device, health); + struct dh_core_dev *dh_dev = + container_of((void *)pf_dev, struct dh_core_dev, priv); + + if (dh_dev->coredev_type == DH_COREDEV_VF) + return; + + if (IS_STD_BOARD(pf_dev->board_type)) + fw_log_dump(dh_dev, ZIOS_RISCV_LOG2); + else + fw_log_dump(dh_dev, CGEL_RISCV_LOG2); +} + +static void zxdh_trigger_health_work(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct zxdh_core_health *health = &pf_dev->health; + unsigned long flags; + + if (!health->selfhealing) { + HEAL_INFO("%s selfhealing is not permitted\n", + pci_name(dh_dev->pdev)); + return; + } + + spin_lock_irqsave(&health->wq_lock, flags); + if (test_bit(ZXDH_DROP_NEW_HEALTH_WORK, &health->flags)) { + HEAL_ERR( + "%s new health works are not permitted at this stage\n", + pci_name(dh_dev->pdev)); + } else + queue_work(health->wq, &health->fw_fatal_err_work); + + spin_unlock_irqrestore(&health->wq_lock, flags); +} + +static void zxdh_riscv_cnt_check(struct core_health *health) +{ + struct zxdh_core_health *dh_health = + container_of(health, struct zxdh_core_health, riscv); + struct zxdh_pf_device *pf_dev = + container_of(dh_health, struct zxdh_pf_device, health); + struct dh_core_dev *dh_dev = + container_of((void *)pf_dev, struct dh_core_dev, priv); + struct health_buffer __iomem *hb = health->hb; + uint32_t count; + + count = ioread32(&hb->health_counter); + if (count == health->prev) + ++health->miss_counter; + else + health->miss_counter = 0; + + health->prev = count; + if (health->miss_counter == RISCV_MAX_MISSES) { + HEAL_ERR("%s riscv health compromised - reached miss count\n", + pci_name(dh_dev->pdev)); + set_bit(RISCV_COUNTER_MISSED, &dh_health->synd); + } else if (health->miss_counter == RISCV_LOG_DUMP) { + queue_work(dh_health->wq, &dh_health->riscv_log_saving_work); + } else if (health->miss_counter == RISCV_BBX_DUMP) { + queue_work(dh_health->wq, &dh_health->riscv_bbx_saving_work); + } +} + +static void zxdh_m7_cnt_check(struct core_health *health) +{ + struct health_buffer __iomem *hb = health->hb; + struct zxdh_core_health *dh_health = + container_of(health, struct zxdh_core_health, m7); + struct zxdh_pf_device *pf_dev = + container_of(dh_health, struct zxdh_pf_device, health); + struct dh_core_dev *dh_dev = + container_of((void *)pf_dev, struct dh_core_dev, priv); + uint32_t count; + + count = ioread32(&hb->health_counter); + if (count == 0) + return; + else if (count == health->prev) + ++health->miss_counter; + else + health->miss_counter = 0; + + health->prev = count; + if (health->miss_counter == M7_MAX_MISSES) { + HEAL_ERR("%s m7 health compromised - reached miss count\n", + pci_name(dh_dev->pdev)); + set_bit(M7_COUNTER_MISSED, &dh_health->synd); + } else if (health->miss_counter == M7_LOGDUMP) { + queue_work(dh_health->wq, &dh_health->m7_bbx_saving_work); + } +} + +#define MAX_DETECT_CNT 3 +static bool sensor_bar_error(struct zxdh_core_health *health) +{ + struct health_buffer __iomem *hb = health->riscv.hb; + struct zxdh_pf_device *pf_dev = + container_of(health, struct zxdh_pf_device, health); + struct dh_core_dev *dh_dev = + container_of((void *)pf_dev, struct dh_core_dev, priv); + + if (ioread32(&hb->fw_version) == ZXDH_FOUR_BYTE_FF) { + health->fatal_detect_cnt++; + HEAL_INFO("%s bar_err_detect_cnt: %d\n", pci_name(dh_dev->pdev), + health->fatal_detect_cnt); + } else + health->fatal_detect_cnt = 0; + + if (health->fatal_detect_cnt == MAX_DETECT_CNT) { + health->reset_done = true; + return true; + } + + return false; +} + +static inline bool sensor_fw_synd_rfr(struct zxdh_core_health *health) +{ + struct health_buffer __iomem *hb = health->riscv.hb; + struct zxdh_pf_device *pf_dev = + container_of(health, struct zxdh_pf_device, health); + struct dh_core_dev *dh_dev = + container_of((void *)pf_dev, struct dh_core_dev, priv); + + if (ioread8(&hb->rfr) != 1) + return false; + + if (dh_dev->coredev_type == DH_COREDEV_PF) + queue_work(health->wq, &health->dh_reset_work); + + return true; +} + +static inline bool sensor_fw_exception(const struct core_health *health) +{ + struct health_buffer __iomem *hb = health->hb; + + return (ioread8(&hb->fw_exception) == 1); +} + +static bool sensor_dh_fw_exception(struct zxdh_core_health *health) +{ + struct zxdh_pf_device *pf_dev = + container_of(health, struct zxdh_pf_device, health); + struct dh_core_dev *dh_dev = + container_of((void *)pf_dev, struct dh_core_dev, priv); + int32_t i = 0; + struct { + struct core_health *core; + struct work_struct *work; + const char *name; + } cores[] = { { &health->riscv, &health->riscv_bbx_saving_work, + "riscv" }, + { &health->m7, &health->m7_bbx_saving_work, "m7" } }; + + for (i = 0; i < ARRAY_SIZE(cores); i++) { + if (sensor_fw_exception(cores[i].core)) { + HEAL_ERR("%s %s fw_exception\n", pci_name(dh_dev->pdev), + cores[i].name); + if (!queue_work(health->wq, cores[i].work)) { + HEAL_ERR("%s Failed to queue work for %s\n", + pci_name(dh_dev->pdev), cores[i].name); + continue; + } + return 1; + } + } + return 0; +} + +static inline bool sensor_dh_reset_ok(struct core_health *health) +{ + struct health_buffer __iomem *hb = health->hb; + + return (ioread8(&hb->riscv_power_on) == 1); +} + +static bool sensor_flr_reset(struct zxdh_core_health *health) +{ + struct zxdh_pf_device *pf_dev = + container_of(health, struct zxdh_pf_device, health); + + if (ioread8(&pf_dev->common->device_status) != 0xb) + return false; + + health->reset_done = true; + return true; +} + +static bool sensor_cnt_missed(struct zxdh_core_health *health) +{ + if (test_bit(RISCV_COUNTER_MISSED, &health->synd)) + return true; + + if (test_bit(M7_COUNTER_MISSED, &health->synd)) + return true; + + return false; +} + +static void update_synd_statics(struct zxdh_core_health *health, uint64_t synd) +{ + int32_t i = 0; + + for (i = 0; i < 64; i++) { + if ((synd >> i) & 1) { + health->synd_statics[i]++; + } + } +} + +static void zxdh_synd_detect(struct zxdh_core_health *health) +{ + struct zxdh_pf_device *pf_dev = + container_of(health, struct zxdh_pf_device, health); + struct dh_core_dev *dh_dev = + container_of((void *)pf_dev, struct dh_core_dev, priv); + struct core_health *riscv = &health->riscv; + struct core_health *m7 = &health->m7; + struct health_buffer __iomem *hb = NULL; + uint32_t prev_synd; + uint64_t total_synd; + uint64_t prev_total_synd; + + prev_total_synd = health->synd; + + hb = riscv->hb; + prev_synd = riscv->synd; + riscv->synd = ioread32(&hb->synd); + if (riscv->synd && riscv->synd != ZXDH_FOUR_BYTE_FF && + riscv->synd != prev_synd) { + HEAL_ERR("%s riscv->synd 0x%x\n", pci_name(dh_dev->pdev), + riscv->synd); + health->synd |= riscv->synd; + } + + if (health->health_version != 1) + goto out; + + hb = m7->hb; + prev_synd = m7->synd; + m7->synd = ioread32(&hb->synd); + if (m7->synd && m7->synd != ZXDH_FOUR_BYTE_FF && + m7->synd != prev_synd) { + HEAL_ERR("%s m7->synd 0x%x\n", pci_name(dh_dev->pdev), + m7->synd); + total_synd = m7->synd; + health->synd |= (total_synd << 32); + } + +out: + if (health->synd && health->synd != prev_total_synd) + update_synd_statics(health, health->synd ^ prev_total_synd); +} + +static void zxdh_health_sync(struct zxdh_core_health *health, uint8_t synd) +{ + set_bit(synd, &health->synd); + update_synd_statics(health, (uint64_t)1 << (synd)); +} + +struct sensor_check { + bool (*func)(struct zxdh_core_health *); + const char *error_msg; + uint32_t sync_code; +} const checks[] = { + { sensor_bar_error, "bar error", BAR_ERROR }, + { sensor_flr_reset, "sensor_flr_reset", FLR_RESET }, + { sensor_fw_synd_rfr, "fw need rfr", INVALID_SYND }, + { sensor_dh_fw_exception, "sensor_dh_fw_exception", INVALID_SYND }, + { sensor_cnt_missed, "sensor_cnt_missed", INVALID_SYND }, +}; + +static uint8_t zxdh_health_check_fatal_sensors(struct zxdh_core_health *health) +{ + struct zxdh_pf_device *pf_dev = + container_of(health, struct zxdh_pf_device, health); + struct dh_core_dev *dh_dev = + container_of((void *)pf_dev, struct dh_core_dev, priv); + uint8_t i = 0; + uint8_t check_num = ARRAY_SIZE(checks); + + if (health->fatal) + check_num = 2; + + for (i = 0; i < check_num; i++) { + if (checks[i].func(health)) { + HEAL_ERR("%s %s\n", pci_name(dh_dev->pdev), + checks[i].error_msg); + if (checks[i].sync_code == INVALID_SYND) { + zxdh_synd_detect(health); + } else { + zxdh_health_sync(health, checks[i].sync_code); + } + return health->fatal++; + } + } + + return health->fatal; +} + +#define ZXDH_HEALTH_MAX_WAIT_MSECS 600000 +#define ZXDH_WAIT_CONDITION(condition, stop_valid) \ + do { \ + unsigned long end = \ + jiffies + \ + msecs_to_jiffies(ZXDH_HEALTH_MAX_WAIT_MSECS); \ + while (!(condition)) { \ + if ((stop_valid) == ZXDH_REMOVE) \ + return -ETIMEDOUT; \ + if (time_after(jiffies, end)) \ + return -ETIMEDOUT; \ + msleep(1000); \ + } \ + } while (0) + +static int zxdh_health_wait_dh_ok(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct zxdh_core_health *health = &pf_dev->health; + + ZXDH_WAIT_CONDITION(sensor_dh_reset_ok(&health->riscv), + dh_dev->driver_process); + HEAL_INFO("%s dh is ok\n", pci_name(dh_dev->pdev)); + return 0; +} + +static int wait_vital(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct zxdh_core_health *health = &pf_dev->health; + struct health_buffer __iomem *hb = health->riscv.hb; + const int niter = 600; + u32 last_count = 0; + u32 count; + int i; + + for (i = 0; i < niter; i++) { + count = ioread32(&hb->health_counter); + if (count && count != 0xffffffff) { + if (last_count && last_count != count) { + HEAL_INFO( + "%s wait vital counter value 0x%x after %d iterations\n", + pci_name(dh_dev->pdev), count, i); + return 0; + } + last_count = count; + } + if (dh_dev->driver_process == ZXDH_REMOVE) + return -ETIMEDOUT; + + msleep(1000); + } + + return -ETIMEDOUT; +} + +static inline bool dh_reload_confirm(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct zxdh_core_health *health = &pf_dev->health; + DPP_PF_INFO_T pf_info = { .slot = pf_dev->slot_id, + .vport = pf_dev->vport }; + + if (!sensor_flr_reset(health)) { + pf_dev->bar_chan_valid = true; + dpp_dev_status_set(&pf_info, 1); + dh_dev->device_state = ZXDH_DEVICE_STATE_UP; + zxdh_pf_call_aux_events_with_data( + dh_dev, DH_EVENT_TYPE_AUX_STATE, &dh_dev->device_state); + return false; + } + + return true; +} + +int dh_pf_wait_riscv_ready(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct zxdh_core_health *health = &pf_dev->health; + int i = 0; + + health->riscv.hb = + (struct health_buffer __iomem *)(pf_dev->pci_ioremap_addr[0] + + ZXDH_RISCV_HB_OFFSET); + health->m7.hb = + (struct health_buffer __iomem *)(pf_dev->pci_ioremap_addr[0] + + ZXDH_M7_HB_OFFSET); + + dh_health_version_get(health); + + if ((health->health_version != 1) && + (pf_dev->fw_compat.patch < DH_HPIRQ_PATCH)) { + HEAL_INFO("%s riscv_power_on not valid\n", + pci_name(dh_dev->pdev)); + return 0; + } + + for (i = 0; i < 200; ++i) { + if (sensor_dh_reset_ok(&health->riscv)) { + HEAL_INFO("%s wait %ds\n", pci_name(dh_dev->pdev), i); + return 0; + } + + if (dh_dev->driver_process == ZXDH_REMOVE) + return -ETIMEDOUT; + + msleep(1000); + } + + return -1; +} + +int zxdh_vf_wait_pf_ok(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct zxdh_core_health *health = &pf_dev->health; + struct health_buffer __iomem *hb = health->riscv.hb; + uint8_t ep_id = (pf_dev->pcie_id >> 12) & 0x7; + uint8_t pf_id = (pf_dev->pcie_id >> 8) & 0x7; + uint8_t pf_ok = 1 << pf_id; + ZXDH_WAIT_CONDITION((ioread8(&hb->pf_status[ep_id]) & pf_ok) != 0, + dh_dev->driver_process); + return 0; +} + +static int zxdh_pf_health_msg_send(struct dh_core_dev *dh_dev, + union zxdh_msg *msg) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + if (pf_dev->health.health_version != 1) + return -1; + + return zxdh_pf_msg_send_cmd(dh_dev, MODULE_HEALTH, msg, msg, ¶); +} + +int zxdh_pf_status_ok(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + union zxdh_msg *msg = NULL; + int32_t err = 0; + + if (dh_dev->coredev_type == DH_COREDEV_VF) + return 0; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + HEAL_ERR("%s kzalloc(%lu, GFP_KERNEL) failed\n", + pci_name(dh_dev->pdev), sizeof(union zxdh_msg)); + return -1; + } + + msg->payload.health_hdr.opcode = 1; + msg->payload.pf_status_msg.pcie_id = pf_dev->pcie_id; + msg->payload.health_hdr.sum_check = + sum_func(&msg->payload.pf_status_msg, 2); + + err = zxdh_pf_health_msg_send(dh_dev, msg); + kfree(msg); + return err; +} + +#define PCIE_CONFIG_STORE (0x1c) +int zxdh_pf_pcie_config_store(struct dh_core_dev *dh_dev) +{ + union zxdh_msg *msg = NULL; + int32_t err = 0; + + if (dh_dev->coredev_type == DH_COREDEV_VF) + return 0; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + HEAL_ERR("%s kzalloc(%lu, GFP_KERNEL) failed\n", + pci_name(dh_dev->pdev), sizeof(union zxdh_msg)); + return -1; + } + + msg->payload.health_hdr.opcode = 0; + msg->payload.health_config_msg.act = PCIE_CONFIG_STORE; + msg->payload.health_hdr.sum_check = PCIE_CONFIG_STORE; + + err = zxdh_pf_health_msg_send(dh_dev, msg); + kfree(msg); + return err; +} + +#define DH_RESET_REQUEST (0x1a) +static int32_t zxdh_pf_dh_reset_request(struct dh_core_dev *dh_dev) +{ + union zxdh_msg *msg = NULL; + int32_t err = 0; + + if (dh_dev->coredev_type == DH_COREDEV_VF) + return 0; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + HEAL_ERR("%s kzalloc(%lu, GFP_KERNEL) failed\n", + pci_name(dh_dev->pdev), sizeof(union zxdh_msg)); + return -1; + } + + msg->payload.health_hdr.opcode = 0; + msg->payload.health_config_msg.act = DH_RESET_REQUEST; + msg->payload.health_hdr.sum_check = DH_RESET_REQUEST; + + err = zxdh_pf_health_msg_send(dh_dev, msg); + HEAL_INFO("%s dh reset request, err = %d\n", pci_name(dh_dev->pdev), + err); + kfree(msg); + return err; +} + +static void poll_health(struct timer_list *t) +{ + struct zxdh_core_health *health = from_timer(health, t, timer); + struct zxdh_pf_device *pf_dev = + container_of(health, struct zxdh_pf_device, health); + struct dh_core_dev *dh_dev = + container_of((void *)pf_dev, struct dh_core_dev, priv); + uint8_t fatal = 0; + + if (pf_dev->aux_comp_flag == 0) + goto out; + + fatal = zxdh_health_check_fatal_sensors(health); + if (fatal != health->fatal) { + HEAL_ERR("%s Fatal error detected: %d\n", + pci_name(dh_dev->pdev), health->fatal); + dh_dev->device_state = ZXDH_DEVICE_STATE_INTERNAL_ERROR; + zxdh_pf_call_aux_events_with_data( + dh_dev, DH_EVENT_TYPE_AUX_STATE, &dh_dev->device_state); + if (health->reset_done) + return zxdh_trigger_health_work(dh_dev); + } + + zxdh_riscv_cnt_check(&health->riscv); + if (health->health_version == 1) + zxdh_m7_cnt_check(&health->m7); + zxdh_synd_detect(health); + +out: + mod_timer(&health->timer, jiffies + ZXDH_HEALTH_POLL_INTERVAL); +} + +static void zxdh_start_health_poll(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct zxdh_core_health *health = &pf_dev->health; + + clear_bit(ZXDH_DROP_NEW_HEALTH_WORK, &health->flags); + health->synd = 0; + health->fatal = 0; + health->reset_done = false; + health->fatal_detect_cnt = 0; + mod_timer(&health->timer, jiffies + ZXDH_HEALTH_POLL_INTERVAL); +} + +static int zxdh_health_try_recover(struct dh_core_dev *dh_dev) +{ + HEAL_INFO("%s handling bad device here\n", pci_name(dh_dev->pdev)); + if (wait_vital(dh_dev)) { + HEAL_ERR("%s wait_vital time out\n", pci_name(dh_dev->pdev)); + return -EIO; + } + if (zxdh_health_wait_dh_ok(dh_dev)) { + HEAL_ERR("%s zxdh_health_wait_dh_ok time out\n", + pci_name(dh_dev->pdev)); + return -EIO; + } + if (!dh_reload_confirm(dh_dev)) { + HEAL_INFO("%s no need to reload\n", pci_name(dh_dev->pdev)); + goto out; + } + + zxdh_unload_one(dh_dev); + HEAL_INFO("%s zxdh_unload_one finish\n", pci_name(dh_dev->pdev)); + if (zxdh_load_one(dh_dev)) { + HEAL_ERR("%s zxdh_load_one failed\n", pci_name(dh_dev->pdev)); + return -EIO; + } + +out: + zxdh_start_health_poll(dh_dev); + return 0; +} + +static void zxdh_dh_reset_work(struct work_struct *work) +{ + struct zxdh_core_health *health = + container_of(work, struct zxdh_core_health, dh_reset_work); + struct zxdh_pf_device *pf_dev = + container_of(health, struct zxdh_pf_device, health); + struct dh_core_dev *dh_dev = + container_of((void *)pf_dev, struct dh_core_dev, priv); + + zxdh_pf_dh_reset_request(dh_dev); +} + +static void zxdh_fw_fatal_err_work(struct work_struct *work) +{ + struct zxdh_core_health *health = + container_of(work, struct zxdh_core_health, fw_fatal_err_work); + struct zxdh_pf_device *pf_dev = + container_of(health, struct zxdh_pf_device, health); + struct dh_core_dev *dh_dev = + container_of((void *)pf_dev, struct dh_core_dev, priv); + DPP_PF_INFO_T pf_info = { .slot = pf_dev->slot_id, + .vport = pf_dev->vport }; + + HEAL_INFO("%s zxdh_fw_fatal_err_work start\n", pci_name(dh_dev->pdev)); + if (dh_dev->coredev_type == DH_COREDEV_PF) + dpp_dev_status_set(&pf_info, 0); + + pf_dev->bar_chan_valid = false; + if (health->health_version != 1) + return; + + zxdh_health_try_recover(dh_dev); +} + +void zxdh_drain_health_wq(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct zxdh_core_health *health = &pf_dev->health; + unsigned long flags; + + if (!health->health_supported) + return; + + spin_lock_irqsave(&health->wq_lock, flags); + set_bit(ZXDH_DROP_NEW_HEALTH_WORK, &health->flags); + spin_unlock_irqrestore(&health->wq_lock, flags); + cancel_work_sync(&health->fw_fatal_err_work); + cancel_work_sync(&health->dh_reset_work); + cancel_work_sync(&health->m7_bbx_saving_work); + cancel_work_sync(&health->riscv_bbx_saving_work); + cancel_work_sync(&health->riscv_log_saving_work); +} + +static void zxdh_stop_health_poll(struct dh_core_dev *dh_dev, + bool disable_health) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct zxdh_core_health *health = &pf_dev->health; + unsigned long flags; + + if (disable_health) { + spin_lock_irqsave(&health->wq_lock, flags); + set_bit(ZXDH_DROP_NEW_HEALTH_WORK, &health->flags); + spin_unlock_irqrestore(&health->wq_lock, flags); + } + + del_timer_sync(&health->timer); +} + +void zxdh_health_cleanup(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct zxdh_core_health *health = &pf_dev->health; + + if (!health->health_supported) + return; + + zxdh_stop_health_poll(dh_dev, true); + destroy_workqueue(health->wq); + zxdh_health_attr_remove(dh_dev); +} + +int zxdh_crdump_size_get(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + uint8_t board_type = pf_dev->board_type; + struct zxdh_core_health *health = &pf_dev->health; + + if (IS_STD_BOARD(board_type)) { + health->m7_log_offset = ZXDH_M7_ZIOS_LOG_OFFSET; + health->riscv_crdump_size = ZXDH_ZIOS_LOG_SIZE; + } else if (IS_INIC_BOARD(board_type)) { + health->m7_log_offset = ZXDH_M7_CGEL_LOG_OFFSET; + health->riscv_crdump_size = ZXDH_CGEL_LOG_SIZE; + } else { + HEAL_INFO("%s invalid board_type: %d\n", pci_name(dh_dev->pdev), + board_type); + return -1; + } + + if ((dh_dev->coredev_type == DH_COREDEV_PF) && + (ZXDH_RISCV_FWLOG_OFFSET + health->riscv_crdump_size > + pci_resource_len(dh_dev->pdev, 0))) { + HEAL_ERR("%s pci_resource_len: %llx\n", pci_name(dh_dev->pdev), + pci_resource_len(dh_dev->pdev, 0)); + return -1; + } + + return 0; +} + +int zxdh_health_init(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + struct zxdh_core_health *health = &pf_dev->health; + char *name; + int err = 0; + + if (health->health_version > 1) + goto out; + + if (zxdh_crdump_size_get(dh_dev)) + goto out; + + name = kmalloc(64, GFP_KERNEL); + if (!name) + return -ENOMEM; + + strcpy(name, "zxdh_health"); + strcat(name, dev_name(dh_dev->device)); + health->wq = create_singlethread_workqueue(name); + kfree(name); + + if (!health->wq) + return -ENOMEM; + + spin_lock_init(&health->wq_lock); + INIT_WORK(&health->fw_fatal_err_work, zxdh_fw_fatal_err_work); + INIT_WORK(&health->dh_reset_work, zxdh_dh_reset_work); + INIT_WORK(&health->m7_bbx_saving_work, zxdh_m7_bbx_log_dump_work); + INIT_WORK(&health->riscv_log_saving_work, zxdh_riscv_fw_log_dump_work); + INIT_WORK(&health->riscv_bbx_saving_work, zxdh_riscv_bbx_log_dump_work); + + if (health->health_version == 1) { + err = zxdh_pf_rp_config_init(dh_dev); + if (err != 0) { + HEAL_ERR("%s zxdh_pf_rp_config_init failed: %d\n", + pci_name(dh_dev->pdev), err); + goto destroy_wq; + } + err = zxdh_pf_status_ok(dh_dev); + if (err != 0) { + HEAL_ERR("%s zxdh_pf_status_ok failed: %d\n", + pci_name(dh_dev->pdev), err); + goto destroy_wq; + } + } + + if (zxdh_health_attr_create(dh_dev) != 0) + goto destroy_wq; + + timer_setup(&health->timer, poll_health, 0); + health->health_supported = true; + health->selfhealing = 1; + zxdh_start_health_poll(dh_dev); + return 0; + +destroy_wq: + destroy_workqueue(health->wq); + return -ENOEXEC; +out: + health->health_supported = false; + HEAL_INFO("%s health buffer not supported\n", pci_name(dh_dev->pdev)); + return 0; +} diff --git a/drivers/net/ethernet/dinghai/irq_affinity.c b/drivers/net/ethernet/dinghai/irq_affinity.c new file mode 100644 index 000000000000..b577a7365c5e --- /dev/null +++ b/drivers/net/ethernet/dinghai/irq_affinity.c @@ -0,0 +1,319 @@ +#include +#include +#include + +static void cpu_put(struct dh_irq_pool *pool, int32_t cpu) +{ + pool->irqs_per_cpu[cpu]--; +} + +static void cpu_get(struct dh_irq_pool *pool, int32_t cpu) +{ + pool->irqs_per_cpu[cpu]++; +} + +/* Gets the least loaded CPU. e.g.: the CPU with least IRQs bound to it */ +static int32_t cpu_get_least_loaded(struct dh_irq_pool *pool, + const struct cpumask *req_mask) +{ + int32_t best_cpu = -1; + int32_t cpu; + + for_each_cpu_and(cpu, req_mask, cpu_online_mask) { + /* CPU has zero IRQs on it. No need to search any more CPUs. */ + if (!pool->irqs_per_cpu[cpu]) { + best_cpu = cpu; + break; + } + if (best_cpu < 0) { + best_cpu = cpu; + } + if (pool->irqs_per_cpu[cpu] < pool->irqs_per_cpu[best_cpu]) { + best_cpu = cpu; + } + } + + if (best_cpu == -1) { + /* There isn't online CPUs in req_mask */ + LOG_ERR("NO online CPUs in req_mask (%*pbl)\n", + cpumask_pr_args(req_mask)); + best_cpu = cpumask_first(cpu_online_mask); + } + pool->irqs_per_cpu[best_cpu]++; + + return best_cpu; +} + +/* Creating an IRQ from irq_pool */ +struct dh_irq *irq_pool_request_irq(struct dh_irq_pool *pool, + const struct cpumask *req_mask) +{ + cpumask_var_t auto_mask; + struct dh_irq *irq = NULL; + u32 irq_index = 0; + int32_t err = 0; + + if (!zalloc_cpumask_var(&auto_mask, GFP_KERNEL)) { + LOG_ERR("zalloc_cpumask_var failed, ERR_PTR(-ENOMEM)=0x%llx", + (unsigned long long)ERR_PTR(-ENOMEM)); + return ERR_PTR(-ENOMEM); + } + + err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs, + GFP_KERNEL); + if (err) { + if (err == -EBUSY) { + err = -EUSERS; + } + LOG_ERR("xa_alloc failed, ERR_PTR(err)=0x%llx", + (unsigned long long)ERR_PTR(err)); + return ERR_PTR(err); + } + + if (pool->irqs_per_cpu) { + if (cpumask_weight(req_mask) > 1) { + /* if req_mask contain more then one CPU, set the least loadad CPU + * of req_mask + */ + cpumask_set_cpu(cpu_get_least_loaded(pool, req_mask), + auto_mask); + } else { + cpu_get(pool, cpumask_first(req_mask)); + } + } + + irq = dh_irq_alloc(pool, irq_index, + cpumask_empty(auto_mask) ? req_mask : auto_mask); + if (IS_ERR_OR_NULL(irq)) { + LOG_ERR("dh_irq_alloc failed, irq=%p\n", irq); + return irq; + } + free_cpumask_var(auto_mask); + + return irq; +} + +/* Looking for the IRQ with the smallest refcount that fits req_mask. + * If pool is sf_comp_pool, then we are looking for an IRQ with any of the + * requested CPUs in req_mask. + * for example: req_mask = 0xf, irq0_mask = 0x10, irq1_mask = 0x1. irq0_mask + * isn't subset of req_mask, so we will skip it. irq1_mask is subset of req_mask, + * we don't skip it. + * If pool is sf_ctrl_pool, then all IRQs have the same mask, so any IRQ will + * fit. And since mask is subset of itself, we will pass the first if bellow. + */ +static struct dh_irq *irq_pool_find_least_loaded(struct dh_irq_pool *pool, + const struct cpumask *req_mask) +{ + int32_t start = pool->xa_num_irqs.min; + int32_t end = pool->xa_num_irqs.max; + struct dh_irq *irq = NULL; + struct dh_irq *iter; + int32_t irq_refcount = 0; + unsigned long index; + + lockdep_assert_held(&pool->lock); + xa_for_each_range(&pool->irqs, index, iter, start, end) { + struct cpumask *iter_mask = dh_irq_get_affinity_mask(iter); + int32_t iter_refcount = dh_irq_read_locked(iter); + + if (!cpumask_subset(iter_mask, req_mask)) { + /* skip IRQs with a mask which is not subset of req_mask */ + continue; + } + if (iter_refcount < pool->min_threshold) { + /* If we found an IRQ with less than min_thres, return it */ + return iter; + } + if (!irq || iter_refcount < irq_refcount) { + /* In case we won't find an IRQ with less than min_thres, + * keep a pointer to the least used IRQ + */ + irq_refcount = iter_refcount; + irq = iter; + } + } + + return irq; +} + +/** + * dh_irq_affinity_request - request an IRQ according to the given mask. + * @pool: IRQ pool to request from. + * @req_mask: cpumask requested for this IRQ. + * + * This function returns a pointer to IRQ, or ERR_PTR in case of error. + */ +struct dh_irq *dh_irq_affinity_request(struct dh_irq_pool *pool, + const struct cpumask *req_mask) +{ + struct dh_irq *least_loaded_irq = NULL; + struct dh_irq *new_irq = NULL; + + mutex_lock(&pool->lock); + + least_loaded_irq = irq_pool_find_least_loaded(pool, req_mask); + if (least_loaded_irq && + dh_irq_read_locked(least_loaded_irq) < pool->min_threshold) { + LOG_ERR("least_loaded_irq error: pool->min_threshold=%d\r\n", + pool->min_threshold); + goto out; + } + + /* We didn't find an IRQ with less than min_thres, try to allocate a new IRQ */ + new_irq = irq_pool_request_irq(pool, req_mask); + if (IS_ERR_OR_NULL(new_irq)) { + if (!least_loaded_irq) { + /* We failed to create an IRQ and we didn't find an IRQ */ + LOG_ERR("Didn't find a matching IRQ. err = %ld\n", + PTR_ERR(new_irq)); + mutex_unlock(&pool->lock); + return new_irq; + } + /* We failed to create a new IRQ for the requested affinity, + * sharing existing IRQ. + */ + LOG_ERR("new_irq error\r\n"); + goto out; + } + + least_loaded_irq = new_irq; + goto unlock; + +out: + dh_irq_get_locked(least_loaded_irq); + if (dh_irq_read_locked(least_loaded_irq) > pool->max_threshold) + LOG_DEBUG( + "IRQ %u overloaded, pool_name: %s, %u EQs on this irq\n", + pci_irq_vector(pool->dev->pdev, + dh_irq_get_index(least_loaded_irq)), + pool->name, + dh_irq_read_locked(least_loaded_irq) / + DH_EQ_REFS_PER_IRQ); +unlock: + mutex_unlock(&pool->lock); + return least_loaded_irq; +} + +void dh_irq_affinity_irqs_release(struct dh_irq_pool *pool, + struct dh_irq **irqs, int32_t num_irqs) +{ + int32_t i; + + for (i = 0; i < num_irqs; i++) { + int32_t cpu = cpumask_first(dh_irq_get_affinity_mask(irqs[i])); + + synchronize_irq(pci_irq_vector(pool->dev->pdev, + dh_irq_get_index(irqs[i]))); + + if (pool->irqs_per_cpu) { + cpu_put(pool, cpu); + } + } +} + +/** + * dh_irq_affinity_irqs_request_auto - request one or more IRQs for zxdh device. + * @pool: requesting the IRQs from the irqs pool. + * @num_irqs: number of IRQs to request. + * @irqs: an output array of IRQs pointers. + * @numa: NUMA node of the CPU that handles the IRQS. + * + * Each IRQ is bounded to at most 1 CPU. + * This function is requesting IRQs according to the default assignment. + * The default assignment policy is: + * - in each iteration, request the least loaded IRQ which is not bound to any + * CPU of the previous IRQs requested. + * + * This function returns the number of IRQs requested, (which might be smaller than + * @nirqs), if successful, or a negative error code in case of an error. + */ +int32_t dh_irq_affinity_irqs_request_auto(struct dh_irq_pool *pool, + struct dh_irq **irqs, + int32_t num_irqs, int numa) +{ + cpumask_var_t req_mask; + struct dh_irq *irq = NULL; + int32_t i, j; + int num_cpus; + int *cpus = NULL; + int cpu_index; + int pair_offset; + int irq_idx = 0; + + if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL)) { + LOG_ERR("zalloc_cpumask_var failed for req_mask\n"); + return -ENOMEM; + } + + // 生成 NUMA 节点的在线 CPU 掩码 + if (numa == NUMA_NO_NODE) { + LOG_INFO("NUMA_NO_NODE\n"); + cpumask_copy(req_mask, cpu_online_mask); + } else { + cpumask_copy( + req_mask, + cpumask_of_node(numa)); // 获取 NUMA 节点的 CPU 掩码 + cpumask_and(req_mask, req_mask, + cpu_online_mask); // 过滤在线 CPU + } + num_cpus = cpumask_weight(req_mask); + if (num_cpus == 0) { + LOG_ERR("NUMA node %d has no online CPUs!\n", numa); + goto clean_req_mask; + } + + // 将掩码转换为 CPU 编号数组 + cpus = kcalloc(num_cpus, sizeof(*cpus), GFP_KERNEL); + if (!cpus) { + LOG_ERR("cpus kcalloc failed!\n"); + goto clean_req_mask; + } + + j = 0; + for_each_cpu(i, req_mask) + cpus[j++] = i; + + // 绑定 IRQ 对到循环分配的 CPU + for (i = 0; i < num_irqs; i += 2) { + // 计算 CPU 索引 + cpu_index = (i / 2) % num_cpus; + + // 设置 CPU 掩码 + cpumask_clear(req_mask); + cpumask_set_cpu(cpus[cpu_index], req_mask); + + // 处理当前 IRQ 对 + for (pair_offset = 0; pair_offset < 2; pair_offset++) { + irq_idx = i + pair_offset; + if (irq_idx >= num_irqs) + goto out; + irq = irq_pool_request_irq(pool, req_mask); + if (IS_ERR_OR_NULL(irq)) { + LOG_ERR("irq_pool_request_irq %d failed, req_mask=%p, irq=%p", + irq_idx, req_mask, irq); + goto cleanup; + } + irqs[irq_idx] = irq; + LOG_DEBUG( + "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n", + pci_irq_vector(pool->dev->pdev, + dh_irq_get_index(irq)), + cpumask_pr_args(dh_irq_get_affinity_mask(irq)), + dh_irq_read_locked(irq) / DH_EQ_REFS_PER_IRQ); + } + } + +out: + free_cpumask_var(req_mask); + kfree(cpus); + return num_irqs; + +cleanup: + kfree(cpus); + dh_irq_affinity_irqs_release(pool, irqs, irq_idx); + dh_irqs_release_vectors(irqs, irq_idx); +clean_req_mask: + free_cpumask_var(req_mask); + return PTR_ERR(irq); +} \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/lag/lag.c b/drivers/net/ethernet/dinghai/lag/lag.c new file mode 100644 index 000000000000..823d53d06f65 --- /dev/null +++ b/drivers/net/ethernet/dinghai/lag/lag.c @@ -0,0 +1,869 @@ +#include +#include +#include +#include +#include +#include "lag.h" +#include "en_pf.h" +#include "dpp_tbl_api.h" +#include "../en_aux.h" + +static DEFINE_SPINLOCK(lag_lock); +static DEFINE_MUTEX(zxdh_intf_mutex); + +void zxdh_dev_list_lock(void) +{ + mutex_lock(&zxdh_intf_mutex); +} + +void zxdh_dev_list_unlock(void) +{ + mutex_unlock(&zxdh_intf_mutex); +} + +int zxdh_dev_list_trylock(void) +{ + return mutex_trylock(&zxdh_intf_mutex); +} + +struct zxdh_lag *lag_get_ldev(struct dh_core_dev *dh_dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + return pf_dev->ldev; +} + +void lag_set_ldev(struct dh_core_dev *dh_dev, struct zxdh_lag *ldev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + pf_dev->ldev = ldev; +} + +void ldev_kref_free(struct kref *ref) +{ + struct zxdh_lag *ldev = container_of(ref, struct zxdh_lag, ref); + + if (ldev->nb.notifier_call) { + unregister_netdevice_notifier_net(&init_net, &ldev->nb); + } + + cancel_delayed_work_sync(&ldev->bond_work); + destroy_workqueue(ldev->wq); + + lag_set_ldev(ldev->parent, NULL); + kfree(ldev); + + LAG_LOG_INFO("pf lag device alloced memory free \n"); +} + +static void zxdh_queue_bond_work(struct zxdh_lag *ldev, unsigned long delay) +{ + queue_delayed_work(ldev->wq, &ldev->bond_work, delay); +} + +void lag_dev_release(struct dh_core_dev *dh_dev, struct zxdh_lag *ldev) +{ + int i; + struct dh_core_dev *parent; + + for (i = 0; i < ZXDH_MAX_PORTS; i++) { + if (!ldev->lagfunc[i].dev) { + continue; + } + + parent = ldev->lagfunc[i].dev->parent; + if (parent == dh_dev) { + ldev->lagfunc[i].dev = NULL; + } + } +} + +/** + * lag_get_netdev_idx - get the index of netdev generated by en_aux + * @ldev: zxdh lag dev + * @ndev: netdev to search for + **/ +int32_t lag_get_netdev_idx(struct zxdh_lag *ldev, struct net_device *ndev) +{ + int i; + + for (i = 0; i < ZXDH_MAX_PORTS; i++) { + if (ldev->lagfunc[i].netdev == ndev) { + return i; + } + } + + return -ENOENT; +} + +void ldev_remove_netdev(struct dh_core_dev *dh_dev, struct net_device *netdev) +{ + int32_t index = 0; + struct zxdh_lag *ldev; + + ldev = lag_get_ldev(dh_dev->parent); + if (!ldev) { + return; + } + + spin_lock(&lag_lock); + + index = lag_get_netdev_idx(ldev, netdev); + if (index < 0) { + goto out; + } + + ldev->lagfunc[index].netdev = NULL; +out: + spin_unlock(&lag_lock); +} + +/** + * ldev_add_netdev - add net_device to lag dev + * @dev: zxdh core device + * @id: the index that joined to ldev + * @netdev: netdev to adding for + * @attr: some configs info from aux + **/ +static void ldev_add_netdev(struct dh_core_dev *dh_dev, uint16_t id, + struct net_device *netdev, + struct zxdh_lag_attrs *attr) +{ + struct zxdh_lag *ldev; + + if (id >= ZXDH_MAX_PORTS) { + LAG_LOG_ERR("%s: index[%hu] out of range. \n", + netdev_name(netdev), id); + return; + } + + ldev = lag_get_ldev(dh_dev->parent); + + spin_lock(&lag_lock); + ldev->lagfunc[id].dev = dh_dev; + ldev->lagfunc[id].netdev = netdev; + ldev->lagfunc[id].attrs = *attr; + ldev->lagfunc[id].valid = true; + ldev->tracker.netdev_state[id].link_up = 0; + ldev->tracker.netdev_state[id].tx_enabled = 0; + ldev->lag_func_index++; + spin_unlock(&lag_lock); + + zxdh_queue_bond_work(ldev, 0); +} + +static bool zxdh_netdev_belongs(struct zxdh_lag *ldev, + struct net_device *netdev) +{ + int32_t idx = 0; + + idx = lag_get_netdev_idx(ldev, netdev); + if (idx < 0) { + return false; + } + + return true; +} + +/** + * handle_changeupper_event - handle change upper device event + * @ldev: zxdh lag dev + * @tracker: the tracker of zxdh lag dev + * @ndev: upper netdev + * @info: upper dev attribute passed from bonding + **/ +static int +handle_changeupper_event(struct zxdh_lag *ldev, struct lag_tracker *tracker, + struct net_device *ndev, + struct netdev_notifier_changeupper_info *info) +{ + struct net_device *upper = info->upper_dev, *ndev_tmp, *slave_dev; + struct netdev_lag_upper_info *lag_upper_info = NULL; + bool is_bonded = false; + bool mode_supported = false; + int changed = 0; + int bond_status = 0; + int num_slaves = 0; + int idx = 0; + + if (!netif_is_lag_master(upper)) { + LAG_LOG_INFO("changeupper received, but not master. \n"); + return 0; + } + + slave_dev = netdev_notifier_info_to_dev((void *)info); + if (!zxdh_netdev_belongs(ldev, slave_dev)) { + LAG_LOG_INFO("%s is not dinghai bond, ignore. \n", upper->name); + return 0; + } + + if (info->linking) { + lag_upper_info = info->upper_info; + } + + rcu_read_lock(); + for_each_netdev_in_bond_rcu(upper, ndev_tmp) { + idx = lag_get_netdev_idx(ldev, ndev_tmp); + if (idx >= 0) { + bond_status |= (1 << idx); + } + + num_slaves++; + } + rcu_read_unlock(); + + ldev->slaves = bond_status; + + /* None of this lagdev's netdevs are slaves of this master. */ + if (!bond_status) { + is_bonded = false; + goto out; + } + + if (lag_upper_info) { + tracker->tx_type = lag_upper_info->tx_type; + tracker->hash_type = lag_upper_info->hash_type; + } + + LAG_LOG_INFO( + "lag tx type: %d [%d-active-backup %d-hash], hash type %d. \n", + tracker->tx_type, NETDEV_LAG_TX_TYPE_ACTIVEBACKUP, + NETDEV_LAG_TX_TYPE_HASH, tracker->hash_type); + + /* Lag mode must be activebackup or hash. */ + mode_supported = + (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) || + (tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH); + + /* if slaves exist */ + is_bonded = !!bond_status && mode_supported; + +out: + if ((tracker->is_bonded != is_bonded)) { + tracker->is_bonded = is_bonded; + changed = 1; + } + + LAG_LOG_INFO( + "master: %s slaves num: %d, mask: 0x%x, changed: %d, mode supported: %s\n", + upper->name, num_slaves, bond_status, changed, + mode_supported ? "true" : "false"); + + return changed; +} + +/** + * handle_changelowerstate_event - handle changed lower device state event + * @ldev: zxdh lag dev + * @tracker: the tracker of zxdh lag dev + * @ndev: lower netdev + * @info: lower dev attribute passed from bonding + **/ +static int handle_changelowerstate_event( + struct zxdh_lag *ldev, struct lag_tracker *tracker, + struct net_device *ndev, + struct netdev_notifier_changelowerstate_info *info) +{ + struct netdev_lag_lower_state_info *lag_lower_info; + int idx; + + if (!netif_is_lag_port(ndev)) { + return 0; + } + + idx = lag_get_netdev_idx(ldev, ndev); + if (idx < 0) { + return 0; + } + + lag_lower_info = info->lower_state_info; + if (!lag_lower_info) { + return 0; + } + + /* update netdev state from bonding */ + tracker->netdev_state[idx] = *lag_lower_info; + + LAG_LOG_INFO("%s: link up %u tx enable %u \n", ndev->name, + (uint32_t)lag_lower_info->link_up, + (uint32_t)lag_lower_info->tx_enabled); + + return 1; +} + +bool lag_check_aux_netdev_ready(struct zxdh_lag *ldev) +{ + int ix = 0; + int lag_netdev_cnt = 0; + int lag_pannel_num = zxdh_pf_get_pannel_port_num(ldev->parent); + + for (ix = 0; ix < ZXDH_MAX_PORTS; ix++) { + if (ldev->lagfunc[ix].netdev != NULL) { + lag_netdev_cnt++; + } + } + + return (lag_pannel_num == lag_netdev_cnt) ? true : false; +} + +static int lag_netdev_event(struct notifier_block *this, unsigned long event, + void *ptr) +{ + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + struct lag_tracker tracker; + struct zxdh_lag *ldev = NULL; + int changed = 0; + + if ((event != NETDEV_CHANGEUPPER) && + (event != NETDEV_CHANGELOWERSTATE)) { + return NOTIFY_DONE; + } + + ldev = container_of(this, struct zxdh_lag, nb); + if (!lag_check_aux_netdev_ready(ldev)) { + return NOTIFY_DONE; + } + + LAG_LOG_INFO("received %s\n", netdev_cmd_to_name(event)); + + tracker = ldev->tracker; + + switch (event) { + case NETDEV_CHANGEUPPER: { + changed = handle_changeupper_event(ldev, &tracker, ndev, ptr); + break; + } + case NETDEV_CHANGELOWERSTATE: { + changed = handle_changelowerstate_event(ldev, &tracker, ndev, + ptr); + break; + } + } + + ldev->tracker = tracker; + + if (changed) { + zxdh_queue_bond_work(ldev, 0); + } + + return NOTIFY_DONE; +} + +static int32_t lag_find_first_netdev_pf_info(struct zxdh_lag *ldev, + DPP_PF_INFO_T *pf_info) +{ + int i; + struct lag_func *func; + + for (i = 0; i < ZXDH_MAX_PORTS; i++) { + func = &ldev->lagfunc[i]; + + if (func->netdev) { + pf_info->vport = func->attrs.vport; + pf_info->slot = func->attrs.slot_id; + return 0; + } + } + + return -EOPNOTSUPP; +} + +static int32_t zxdh_disable_lag(struct zxdh_lag *ldev) +{ + int i; + uint8_t phy_port = 0; + struct lag_func *func; + DPP_PF_INFO_T pf_info = { 0 }; + + for (i = 0; i < ZXDH_MAX_PORTS; i++) { + func = &ldev->lagfunc[i]; + + if (!func->netdev) { + continue; + } + + pf_info.slot = func->attrs.slot_id; + pf_info.vport = func->attrs.vport; + phy_port = func->attrs.phy_port; + + dpp_lag_group_member_del(&pf_info, ldev->lag_id, phy_port); + dpp_uplink_phy_attr_set(&pf_info, phy_port, + UPLINK_PHY_PORT_BOND_LINK_UP, + LAG_FLAGS_DISABLE); + } + + ldev->flags &= ~ZXDH_LAG_MODE_FLAGS; + LAG_LOG_INFO("zxdh lag disabled. vport 0x%hx lag id %d.\n", + pf_info.vport, ldev->lag_id); + + return 0; +} + +static int32_t zxdh_lag_active_backup(struct zxdh_lag *ldev) +{ + int i; + bool porten = false; + uint8_t phy_port = ZXDH_ACTIVE_PHY_PORT_NA; + uint8_t active_phy_port = ZXDH_ACTIVE_PHY_PORT_NA; + int32_t err = 0; + struct lag_func *func; + struct lag_tracker *tracker = &ldev->tracker; + DPP_PF_INFO_T pf_info = { 0 }; + + err = lag_find_first_netdev_pf_info(ldev, &pf_info); + if (err != 0) { + LAG_LOG_ERR("vport %d is incorrect\n", pf_info.vport); + return -EOPNOTSUPP; + } + + dpp_lag_mode_set(&pf_info, ldev->lag_id, LAG_MODE_ACTIVE_BACKUP); + + for (i = ZXDH_MAX_PORTS - 1; i >= 0; i--) { + func = &ldev->lagfunc[i]; + + if (!func->netdev) { + continue; + } + + phy_port = func->attrs.phy_port; + pf_info.slot = func->attrs.slot_id; + pf_info.vport = func->attrs.vport; + + dpp_lag_group_member_del(&pf_info, ldev->lag_id, phy_port); + dpp_uplink_phy_attr_set(&pf_info, phy_port, + UPLINK_PHY_PORT_BOND_LINK_UP, + LAG_FLAGS_DISABLE); + + porten = tracker->netdev_state[i].tx_enabled && + tracker->netdev_state[i].link_up; + if (porten && (ldev->slaves & (1 << i))) { + active_phy_port = phy_port; + LAG_LOG_INFO( + "lag_id %u: [backup] select acitve phyport %hu pannel port %hu \n", + ldev->lag_id, active_phy_port, + func->attrs.pannel_id); + } + + LAG_LOG_INFO( + "lag_id %u: [backup] phyport %hhu pannel port %hhu tx enable %d link up %d\n", + ldev->lag_id, phy_port, func->attrs.pannel_id, + (int32_t)tracker->netdev_state[i].tx_enabled, + (int32_t)tracker->netdev_state[i].link_up); + } + + if (active_phy_port != ZXDH_ACTIVE_PHY_PORT_NA) { + dpp_lag_group_member_add(&pf_info, ldev->lag_id, + active_phy_port); + dpp_uplink_phy_attr_set(&pf_info, active_phy_port, + UPLINK_PHY_PORT_BOND_LINK_UP, + LAG_FLAGS_ENABLE); + } + + ldev->flags |= ZXDH_LAG_FLAG_BACKUP; + + return 0; +} + +uint32_t zxdh_covert_hash_type_2_np(uint32_t hash_type) +{ + uint32_t np_hash_type = 0; + + switch (hash_type) { + case NETDEV_LAG_HASH_L2: { + np_hash_type = ZXDH_NP_HASH_TYPE_L2; + break; + } + case NETDEV_LAG_HASH_L23: { + np_hash_type = ZXDH_NP_HASH_TYPE_L23; + break; + } + case NETDEV_LAG_HASH_L34: { + np_hash_type = ZXDH_NP_HASH_TYPE_L34; + break; + } + default: { + np_hash_type = ZXDH_NP_HASH_TYPE_DEFAULT; + break; + } + } + + return np_hash_type; +} + +static int32_t zxdh_lag_hash(struct zxdh_lag *ldev) +{ + int i; + bool porten = false; + uint8_t phy_port = 0; + uint8_t flags = 0; + uint32_t np_hash_type = 0; + int32_t err = 0; + struct lag_func *func; + struct lag_tracker *tracker = &ldev->tracker; + DPP_PF_INFO_T pf_info = { 0 }; + + err = lag_find_first_netdev_pf_info(ldev, &pf_info); + if (err != 0) { + LAG_LOG_ERR("vport %d is incorrect\n", pf_info.vport); + return -EOPNOTSUPP; + } + +#if 0 + np_hash_type = zxdh_covert_hash_type_2_np((uint32_t)tracker->hash_type); +#else + /* 顾剑总要求,HASH模式直接写死Layer3+4 */ + np_hash_type = ZXDH_NP_HASH_TYPE_L34; +#endif + + dpp_lag_group_hash_factor_set(&pf_info, ldev->lag_id, + (uint8_t)np_hash_type); + dpp_lag_mode_set(&pf_info, ldev->lag_id, LAG_MODE_802_3AD); + + for (i = 0; i < ZXDH_MAX_PORTS; i++) { + func = &ldev->lagfunc[i]; + + if (!func->netdev) { + continue; + } + + phy_port = func->attrs.phy_port; + pf_info.slot = func->attrs.slot_id; + pf_info.vport = func->attrs.vport; + + porten = tracker->netdev_state[i].tx_enabled && + tracker->netdev_state[i].link_up; + if (porten && (ldev->slaves & (1 << i))) { + dpp_lag_group_member_add(&pf_info, ldev->lag_id, + phy_port); + dpp_uplink_phy_attr_set(&pf_info, phy_port, + UPLINK_PHY_PORT_BOND_LINK_UP, + LAG_FLAGS_ENABLE); + flags = LAG_FLAGS_ENABLE; + } else { + dpp_lag_group_member_del(&pf_info, ldev->lag_id, + phy_port); + dpp_uplink_phy_attr_set(&pf_info, phy_port, + UPLINK_PHY_PORT_BOND_LINK_UP, + LAG_FLAGS_DISABLE); + flags = LAG_FLAGS_DISABLE; + } + + LAG_LOG_INFO( + "lag_id %u: [hash] %s, vport %hu, pannel port %hu, phy port %hu, slave mask 0x%x, tx enable %d, link up %d \n", + ldev->lag_id, + (flags == LAG_FLAGS_ENABLE) ? "enable" : "disable", + pf_info.vport, func->attrs.pannel_id, phy_port, + ldev->slaves, + (int32_t)tracker->netdev_state[i].tx_enabled, + (int32_t)tracker->netdev_state[i].link_up); + } + + ldev->flags |= ZXDH_LAG_FLAG_HASH; + + return 0; +} + +static void zxdh_lagtracker_info(struct lag_tracker *tracker) +{ + const char *bond_tx_mode, *bonded, *hash_type; + + bonded = tracker->is_bonded ? "BONDED" : "UNBONDED"; + + switch (tracker->tx_type) { + case NETDEV_LAG_TX_TYPE_HASH: { + bond_tx_mode = "HASH"; + break; + } + case NETDEV_LAG_TX_TYPE_ACTIVEBACKUP: { + bond_tx_mode = "ACTIVE-BACKUP"; + break; + } + default: { + bond_tx_mode = "UNKOWN"; + } + } + + switch (tracker->hash_type) { + case ZXDH_NP_HASH_TYPE_L2: { + hash_type = "layer2"; + break; + } + case ZXDH_NP_HASH_TYPE_L23: { + hash_type = "layer2+3"; + break; + } + case ZXDH_NP_HASH_TYPE_L34: { + hash_type = "layer2+3"; + break; + } + default: { + hash_type = "layer2"; + } + } + + LAG_LOG_INFO("%s: tx type: %s, hash type %s.\n", bonded, bond_tx_mode, + hash_type); +} + +static void do_bond(struct zxdh_lag *ldev) +{ + bool do_bond; + struct lag_tracker *tracker = &ldev->tracker; + struct lag_func *func; + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + int32_t i = 0; + + for (i = 0; i < ZXDH_MAX_PORTS; i++) { + func = &ldev->lagfunc[i]; + + if (func->netdev) { + en_priv = netdev_priv(func->netdev); + en_dev = &en_priv->edev; + if (en_dev->device_state == + ZXDH_DEVICE_STATE_INTERNAL_ERROR) + return; + } + } + + zxdh_lagtracker_info(tracker); + + do_bond = tracker->is_bonded; + + if (do_bond && lag_is_backup(ldev)) { + zxdh_lag_active_backup(ldev); + } else if (do_bond && lag_is_hash(ldev)) { + zxdh_lag_hash(ldev); + } else { + zxdh_disable_lag(ldev); + } +} + +static void zxdh_do_bond_work(struct work_struct *work) +{ + struct delayed_work *delayed_work = to_delayed_work(work); + struct zxdh_lag *ldev = + container_of(delayed_work, struct zxdh_lag, bond_work); + int status; + + status = zxdh_dev_list_trylock(); + if (!status) { + zxdh_queue_bond_work(ldev, HZ); + return; + } + + do_bond(ldev); + zxdh_dev_list_unlock(); +} + +static struct zxdh_lag *lag_dev_alloc(struct dh_core_dev *dev) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dev); + struct zxdh_lag *ldev = NULL; + char name[ETH_GSTRING_LEN]; + + ldev = kzalloc(sizeof(*ldev), GFP_KERNEL); + if (!ldev) { + LAG_LOG_ERR("Failed to alloc ldev\n"); + return NULL; + } + + ldev->parent = dev; + ldev->lag_id = pf_dev->port_resource.bond_idx; + if (snprintf(name, ETH_GSTRING_LEN, "zxdh_lag%d", ldev->lag_id) < 0) { + LAG_LOG_ERR("Failed to snprintf ldev name!\n"); + kfree(ldev); + return NULL; + } + + ldev->wq = create_singlethread_workqueue(name); + if (!ldev->wq) { + kfree(ldev); + return NULL; + } + + kref_init(&ldev->ref); + INIT_DELAYED_WORK(&ldev->bond_work, zxdh_do_bond_work); + + ldev->nb.notifier_call = lag_netdev_event; + if (register_netdevice_notifier_net(&init_net, &ldev->nb)) { + ldev->nb.notifier_call = NULL; + LAG_LOG_ERR("Failed to register LAG netdev notifier\n"); + } + + return ldev; +} + +static int lag_dev_create(struct dh_core_dev *dh_dev) +{ + struct zxdh_lag *ldev = NULL; + + ldev = lag_get_ldev(dh_dev); + if (!ldev) { + ldev = lag_dev_alloc(dh_dev); + if (!ldev) { + LAG_LOG_ERR("Failed to alloc lag dev.\n"); + return 0; + } + } else { + get_ldev_kref(ldev); + } + + lag_set_ldev(dh_dev, ldev); + + return 0; +} + +int32_t lag_dpp_pannel_init(struct dh_core_dev *dh_dev, + struct zxdh_lag_attrs *attr) +{ + struct zxdh_lag *ldev = lag_get_ldev(dh_dev->parent); + uint16_t pannel_id = attr->pannel_id; + uint16_t pf_id = (attr->pcie_id >> 8) & PCIE_ID_PF_INDEX_MASK; + uint16_t ep_id = (attr->pcie_id >> 12) & PCIE_ID_EP_INDEX_MASK; + uint16_t bond_vfid = ZXDH_PF_VFID(ep_id, pf_id); + uint16_t ovs_vfid = ZXDH_PF_VFID(ep_id, (pf_id + 1)); + uint8_t phy_port = attr->phy_port; + DPP_PF_INFO_T pf_info = { 0 }; + int32_t ret = 0; + + pf_info.slot = attr->slot_id; + pf_info.vport = attr->vport; + dpp_uplink_phy_bond_vport(&pf_info, phy_port); + dpp_uplink_phy_lacp_pf_vqm_vfid_set(&pf_info, phy_port, bond_vfid); + dpp_uplink_phy_lacp_pf_memport_qid_set(&pf_info, phy_port, + attr->qid[0]); + dpp_uplink_phy_attr_set(&pf_info, phy_port, UPLINK_PHY_PORT_PF_VQM_VFID, + ovs_vfid); + + dpp_uplink_phy_hardware_bond_set(&pf_info, phy_port, LAG_FLAGS_ENABLE); + ret = dpp_vport_attr_set(&pf_info, SRIOV_VPORT_HW_BOND_EN_OFF, + LAG_FLAGS_ENABLE); + if (ret != 0) { + LOG_ERR("dpp_vport_attr_set SRIOV_VPORT_HW_BOND_EN_OFF LAG_FLAGS_ENABLE failed\n"); + return ret; + } + + dpp_lag_group_create(&pf_info, ldev->lag_id); + + LAG_LOG_INFO( + "vport 0x%hx, pannel id %hu, phy port %hu, rx qid %hu tx qid %hu bond vfid %d, ovs vfid %d, lag_id %d\n", + pf_info.vport, pannel_id, phy_port, attr->qid[0], attr->qid[1], + bond_vfid, ovs_vfid, ldev->lag_id); + + return 0; +} + +int32_t lag_dpp_pannel_release(struct zxdh_lag_attrs *attr) +{ + uint8_t phy_port = attr->phy_port; + DPP_PF_INFO_T pf_info = { 0 }; + int32_t ret = 0; + + pf_info.slot = attr->slot_id; + pf_info.vport = attr->vport; + dpp_uplink_phy_hardware_bond_set(&pf_info, phy_port, LAG_FLAGS_DISABLE); + ret = dpp_vport_attr_set(&pf_info, SRIOV_VPORT_HW_BOND_EN_OFF, + LAG_FLAGS_DISABLE); + if (ret != 0) { + LOG_ERR("dpp_vport_attr_set SRIOV_VPORT_HW_BOND_EN_OFF LAG_FLAGS_DISABLE failed\n"); + return ret; + } + + return 0; +} + +int32_t lag_netdev_input_check(struct dh_core_dev *dh_dev, + struct zxdh_lag_attrs *attr) +{ + struct zxdh_lag *ldev; + + if (!dh_core_is_pf(dh_dev->parent)) { + LAG_LOG_ERR("dh_core_dev type PF needs to be used\n"); + return -EINVAL; + } + + ldev = lag_get_ldev(dh_dev->parent); + if (!ldev) { + LAG_LOG_ERR("ldev can not be null\n"); + return -EINVAL; + } + + if (attr->pannel_id >= ZXDH_MAX_PORTS) { + LAG_LOG_ERR("pannel id %hu out of range.\n", attr->pannel_id); + return -EINVAL; + } + + return 0; +} + +/** + * zxdh_regitster_ldev - create lagdev and add to dh core dev + * @dh_dev: zxdh lag dev + * the function called by pf driver + **/ +void zxdh_regitster_ldev(struct dh_core_dev *dh_dev) +{ + zxdh_dev_list_lock(); + lag_dev_create(dh_dev); + zxdh_dev_list_unlock(); +} + +void zxdh_unregitster_ldev(struct dh_core_dev *dh_dev) +{ + struct zxdh_lag *ldev; + + zxdh_dev_list_lock(); + ldev = lag_get_ldev(dh_dev); + if (!ldev) { + zxdh_dev_list_unlock(); + return; + } + + lag_dev_release(dh_dev, ldev); + put_ldev_kref(ldev); + + zxdh_dev_list_unlock(); +} + +/** + * zxdh_ldev_add_netdev - add net device to lag device + * @dh_dev: sf core dev + * @ida: netdev idx + * @netdev: the net device which en aux created + * @attr: netdev info + * this function may called by en aux + **/ +int32_t zxdh_ldev_add_netdev(struct dh_core_dev *dh_dev, uint16_t ida, + struct net_device *netdev, + struct zxdh_lag_attrs *attr) +{ + int32_t ret = 0; + + zxdh_dev_list_lock(); + ret = lag_netdev_input_check(dh_dev, attr); + if (ret != 0) { + zxdh_dev_list_unlock(); + return -EINVAL; + } + + lag_dpp_pannel_init(dh_dev, attr); + ldev_add_netdev(dh_dev, ida, netdev, attr); + zxdh_dev_list_unlock(); + + return 0; +} +EXPORT_SYMBOL(zxdh_ldev_add_netdev); + +void zxdh_ldev_remove_netdev(struct dh_core_dev *dh_dev, + struct net_device *netdev, + struct zxdh_lag_attrs *attr) +{ + zxdh_dev_list_lock(); + + lag_dpp_pannel_release(attr); + ldev_remove_netdev(dh_dev, netdev); + zxdh_dev_list_unlock(); +} +EXPORT_SYMBOL(zxdh_ldev_remove_netdev); diff --git a/drivers/net/ethernet/dinghai/lag/lag.h b/drivers/net/ethernet/dinghai/lag/lag.h new file mode 100644 index 000000000000..4aff24a3208d --- /dev/null +++ b/drivers/net/ethernet/dinghai/lag/lag.h @@ -0,0 +1,114 @@ +#ifndef _ZXDH_ETH_LAG_H_ +#define _ZXDH_ETH_LAG_H_ + +#include +#include +#include +#include +#include + +#define LAG_LOG_ERR(fmt, arg...) DH_LOG_ERR(MODULE_LAG, fmt, ##arg); +#define LAG_LOG_INFO(fmt, arg...) DH_LOG_INFO(MODULE_LAG, fmt, ##arg); +#define LAG_LOG_DEBUG(fmt, arg...) DH_LOG_DEBUG(MODULE_LAG, fmt, ##arg); +#define LAG_LOG_WARN(fmt, arg...) DH_LOG_WARNING(MODULE_LAG, fmt, ##arg); + +/* max panel port */ +#define ZXDH_MAX_PORTS (10) + +#define ZXDH_ACTIVE_PHY_PORT_NA (0xFF) + +#define PCIE_ID_PF_INDEX_MASK (0x7) +#define PCIE_ID_EP_INDEX_MASK (0x7) + +enum { + ZXDH_LAG_FLAG_BACKUP = 1 << 0, + ZXDH_LAG_FLAG_HASH = 1 << 1, + ZXDH_LAG_FLAG_READY = 1 << 2, +}; + +#define ZXDH_LAG_MODE_FLAGS (ZXDH_LAG_FLAG_BACKUP | ZXDH_LAG_FLAG_HASH) + +enum { + LAG_FLAGS_DISABLE = 0, + LAG_FLAGS_ENABLE = 1, +}; + +enum { + LAG_MODE_ACTIVE_BACKUP = 1, + LAG_MODE_802_3AD = 2, +}; + +/* define hash type for NP SDK */ +enum { + ZXDH_NP_HASH_TYPE_DEFAULT = 0, + ZXDH_NP_HASH_TYPE_L2 = 1, + ZXDH_NP_HASH_TYPE_L23 = 2, + ZXDH_NP_HASH_TYPE_L34 = 4, +}; + +struct lag_func { + bool valid; + struct dh_core_dev *dev; + struct net_device *netdev; + struct zxdh_lag_attrs attrs; +}; + +struct lag_tracker { + enum netdev_lag_tx_type tx_type; + struct netdev_lag_lower_state_info netdev_state[ZXDH_MAX_PORTS]; + bool is_bonded; + enum netdev_lag_hash hash_type; + char master_name[IFNAMSIZ]; +}; + +struct zxdh_lag { + uint32_t flags; + uint32_t lag_func_index; + uint32_t slaves; + int32_t mode_changes_in_progress; + uint8_t lag_id; + struct kref ref; + struct lag_func lagfunc[ZXDH_MAX_PORTS]; + struct workqueue_struct *wq; + struct delayed_work bond_work; + struct notifier_block nb; + struct lag_tracker tracker; + struct dh_core_dev *parent; + + struct zxdh_lag_if *ops; +}; + +static inline bool lag_is_ready(struct zxdh_lag *ldev) +{ + return true; +} + +static inline bool lag_is_backup(struct zxdh_lag *ldev) +{ + return ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP ? true : + false; +} + +static inline bool lag_is_hash(struct zxdh_lag *ldev) +{ + return ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_HASH ? true : false; +} + +static inline bool lag_is_port_invalid(struct zxdh_lag *ldev, uint32_t index) +{ + BUG_ON(index >= ZXDH_MAX_PORTS); + return !ldev->lagfunc[index].dev || !ldev->lagfunc[index].netdev; +} + +void ldev_kref_free(struct kref *ref); +static inline void get_ldev_kref(struct zxdh_lag *ldev) +{ + kref_get(&ldev->ref); +} + +static inline void put_ldev_kref(struct zxdh_lag *ldev) +{ + kref_put(&ldev->ref, ldev_kref_free); +} + +#endif /* _ZXDH_ETH_LAG_H_ */ \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/lag/lag_procfs.c b/drivers/net/ethernet/dinghai/lag/lag_procfs.c new file mode 100644 index 000000000000..af6a69fb9af1 --- /dev/null +++ b/drivers/net/ethernet/dinghai/lag/lag_procfs.c @@ -0,0 +1,58 @@ +#include +#include +#include + +#include "dh_procfs.h" +#include "lag.h" + +void *lag_info_seq_start(struct seq_file *seq, loff_t *pos) +{ + if (*pos == 0) { + return SEQ_START_TOKEN; + } + + return NULL; +} + +void *lag_info_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ +#if 0 + struct test *tst = PDE_DATA(file_inode(seq->file)); + + ++*pos; + if (v == SEQ_START_TOKEN) + pr_info("%s first *pos = %u\n", __FUNCTION__, (uint32_t)*pos); + + pr_info("%s *pos = %u\n", __FUNCTION__, (uint32_t)*pos); + if (*pos < 5) { + return tst; + } +#endif + return NULL; +} + +void lag_info_seq_stop(struct seq_file *seq, void *v) +{ + pr_info("%s \n", __FUNCTION__); +} + +int lag_info_seq_show(struct seq_file *seq, void *v) +{ +#if 0 + struct zxdh_lag *lag = PDE_DATA(file_inode(seq->file)); + + if (v == SEQ_START_TOKEN) { + seq_printf(seq, "Port Mode: %u", tst->port); + seq_printf(seq, "Num Mode: %u", tst->age); + seq_printf(seq, "\n"); + } +#endif + return 0; +} + +struct seq_operations lag_info_seq_ops = { + .start = lag_info_seq_start, + .next = lag_info_seq_next, + .stop = lag_info_seq_stop, + .show = lag_info_seq_show, +}; \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/log.c b/drivers/net/ethernet/dinghai/log.c new file mode 100644 index 000000000000..befc13021be0 --- /dev/null +++ b/drivers/net/ethernet/dinghai/log.c @@ -0,0 +1,4 @@ +#include + +int debug_print; +module_param(debug_print, int, 0644); \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/msg_common.h b/drivers/net/ethernet/dinghai/msg_common.h new file mode 100644 index 000000000000..a8e59505b7d0 --- /dev/null +++ b/drivers/net/ethernet/dinghai/msg_common.h @@ -0,0 +1,1147 @@ +#ifndef __ZXDH_MSG_COMMON_H__ +#define __ZXDH_MSG_COMMON_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "en_np/table/include/dpp_tbl_api.h" +#include "en_aux/queue.h" +#include "en_pf.h" + +#define ZXDH_VLAN_TCI_GEN(vid, qos) ((vid) | ((qos) << 12)) +#define FW_VERSION_LEN 32 +#define ZXDH_REPS_BEYOND_MAC 0xfe /* 转发域mac超过上限 */ +#define ZXDH_REPS_EXIST_MAC 0xfd /* 存在重复的单播mac */ +#define MAX_QUE_CNT 60 +#define ZXDH_REPS_MAX_SIZE_BEFORE57 \ + 1032 //zxdh_msg_op_code=57之前的最大sizeof(union zxdh_msg) +#define BAR_MSG_RETRY_CNT_MAX 100 + +#define ZXDH_BAR_DUALTOR_LABEL_ON (0xaaaaaaaa) +#define ZXDH_DUALTOR_LABEL_OFFSET (0x5000 + 1920) + +#define DEFAULT_ADD_INDEX 0XFFFFFFFF /* 默认申请index */ +typedef enum { + ZXDH_NULL = 0, + + ZXDH_VF_PORT_INIT = 1, + ZXDH_VF_PORT_UNINIT = 2, + ZXDH_MAC_ADD = 3, + ZXDH_MAC_DEL = 4, + ZXDH_MAC_GET = 5, + + ZXDH_RSS_EN_SET = 7, + ZXDH_RXFH_SET = 8, + ZXDH_RXFH_GET = 9, + ZXDH_RXFH_DEL = 10, + ZXDH_THASH_KEY_SET = 11, + ZXDH_THASH_KEY_GET = 12, + ZXDH_HASH_FUNC_SET = 13, + ZXDH_HASH_FUNC_GET = 14, + ZXDH_RX_FLOW_HASH_SET = 15, + ZXDH_RX_FLOW_HASH_GET = 16, + + ZXDH_VLAN_FILTER_SET = 17, + ZXDH_VLAN_FILTER_ADD = 18, + ZXDH_VLAN_FILTER_DEL = 19, + ZXDH_VLAN_OFFLOAD_SET = 21, + + ZXDH_PORT_ATTRS_GET = 22, + ZXDH_SET_TPID = 23, + ZXDH_VXLAN_OFFLOAD_ADD = 24, + ZXDH_PORT_ATTRS_SET = 25, + ZXDH_PROMISC_SET = 26, + + /*sriov msg type*/ + ZXDH_SRIOV_RESET = 27, + + ZXDH_SET_VF_LINK_STATE = 28, + ZXDH_PF_SET_VF_VLAN = 29, + ZXDH_SET_VF_RESET = 30, + ZXDH_GET_NP_STATS = 31, + + ZXDH_VF_RATE_LIMIT_SET = 32, + ZXDH_PLCR_UNINIT = 33, + ZXDH_MAP_PLCR_FLOWID = 34, + ZXDH_PLCR_FLOW_INIT = 35, + ZXDH_PLCR_CAR_PROFILE_ID_ADD = 36, + ZXDH_PLCR_CAR_PROFILE_ID_DELETE = 37, + ZXDH_PLCR_CAR_PROFILE_CFG_SET = 38, + ZXDH_PLCR_CAR_PROFILE_CFG_GET = 39, + ZXDH_PLCR_CAR_QUEUE_CFG_SET = 40, + ZXDH_PORT_METER_STAT_CLR = 41, + ZXDH_PORT_METER_STAT_GET = 42, + ZXDH_PF_GET_VF_QUEUE_INFO = 43, + ZXDH_PLCR_GET_MODE = 44, + ZXDH_PLCR_SET_MODE = 45, + ZXDH_FLOW_HW_ADD = 46, + ZXDH_FLOW_HW_DEL = 47, + ZXDH_FLOW_HW_GET = 48, + ZXDH_FLOW_HW_FLUSH = 49, + + ZXDH_VF_1588_CALL_NP = 50, + ZXDH_VF_SLOT_ID_GET = 51, + + ZXDH_IPV6_MAC_ADD = 52, + ZXDH_IPV6_MAC_DEL = 53, + ZXDH_MAC_DUMP = 54, + ZXDH_MC_CMPAT_VERINFO = 55, + ZXDH_GET_K_CMPAT_VERINFO = 56, + ZXDH_GET_SW_STATS = 57, + ZXDH_LACP_MAC_ADD = 58, + ZXDH_LACP_MAC_DEL = 59, + ZXDH_VXLAN_OFFLOAD_DEL = 60, + ZXDH_VF_PORT_RELOAD = 61, + ZXDH_VF_1588_ENABLE = 62, + ZXDH_VF_GET_UDP_STATS = 63, + ZXDH_FD_ADD = 64, + ZXDH_FD_GET = 65, + ZXDH_FD_DEL = 66, + ZXDH_FD_EN_SET = 67, + ZXDH_MSG_TYPE_CNT_MAX, /* should be at last */ +} __attribute__((packed)) zxdh_msg_op_code; + +enum dh_flow_type { + FLOW_TYPE_FLOW = 0, + FLOW_TYPE_FD_TCAM, + FLOW_TYPE_FD_SW, +}; + +enum { + FD_ACTION_VXLAN_ENCAP = 0, + FD_ACTION_VXLAN_DECAP = 1, + FD_ACTION_RSS_BIT = 2, + FD_ACTION_COUNT_BIT = 3, + FD_ACTION_DROP_BIT = 4, + FD_ACTION_MARK_BIT = 5, + FD_ACTION_QUEUE_BIT = 6, +}; + +enum vqm_msg_opcode { + MSIX_MODE_GET = 0, + MSIX_MODE_SET = 1, + OPCODE_GET = 0, + OPCODE_SET = 1, +}; + +enum vqm_msg_cmd { + VQM_VF_FC_CMD = 0x9, + MSIX_MODE_CMD = 0x10, + OVS_VQM_CTRL_RESET_QIDS = 0x11, + COALESCE_USECS_CMD = 0xf, +}; + +struct vqm_wr_used_t { + uint16_t rx_used_ring_t; /* rx超时写已用环时间查询结果或配置结果,单位us */ + uint16_t tx_used_ring_t; /* tx超时写已用环时间查询结果或配置结果,单位us */ +} __attribute__((packed)); + +struct vqm_msix_mode { + uint16_t rx_msix_mode; /* 查询不填写,配置时填写rx msix配置模式值 */ + uint16_t tx_msix_mode; /* 查询不填写,配置时填写tx msix配置模式值 */ +} __attribute__((packed)); + +struct vqm_phy_qid { +#define ZXDH_VNET_ZTE (0x6) + uint8_t version; + uint8_t qnum; +#define MAX_QNUM ((ZXDH_QUEUE_PAIRS_MAX + 1) * 2) + uint32_t qid[MAX_QNUM]; +} __attribute__((packed)); + +struct vqm_flow_cfg { + uint32_t pps; + uint32_t kbps; +} __attribute__((packed)); + +typedef struct { + uint16_t vqm_vfid; /* 设备号,填写任意值 */ + uint16_t opcode; /* 查询 0, 配置 1 */ + uint16_t cmd; /* cmd - 0x10 */ + + union { + struct vqm_wr_used_t wr_used_t; + struct vqm_msix_mode msix_mode_sel; + struct vqm_phy_qid qid_reset_msg; + struct vqm_flow_cfg vqm_vf_fc; + } __attribute__((packed)); +} __attribute__((packed)) host_to_vqm_msg; + +typedef struct { + uint32_t check_result; /* 执行结果, 如果成功为0xaa, 其他失败*/ + union { + struct vqm_wr_used_t wr_used_t; + struct vqm_msix_mode msix_mode_sel; + struct vqm_flow_cfg vqm_vf_fc; + } __attribute__((packed)); +} __attribute__((packed)) vqm_rsp_host_data; + +struct fd_flow_key { + uint8_t mac_dst[ZXDH_MAC_NUM]; + uint8_t mac_src[ZXDH_MAC_NUM]; + uint16_t ether_type; + union { + struct { + uint16_t cvlan_pri : 4; + uint16_t cvlan_vlanid : 12; /* vlanid 0xfff is valid */ + }; + uint16_t vlan_tci; + }; + + uint8_t src_ip[16]; + uint8_t dst_ip[16]; + uint8_t rsv0; + union { + uint8_t tos; + uint8_t tc; + }; + uint8_t nw_proto; + uint8_t frag_flag; + uint16_t tp_src; + uint16_t tp_dst; + + uint8_t rsv1; + uint8_t vni[3]; + + uint16_t vfid; + uint8_t rsvs[18]; +} __attribute__((packed)); + +struct fd_flow_result { + uint16_t qid; + uint8_t rsv0; + + uint8_t action_idx : 7; + uint8_t hit_flag : 1; + + uint32_t mark_fd_id; + uint32_t countid : 20; + uint32_t sriov_tunnel_encap1_index : 12; + + uint16_t sriov_tunnel_encap0_index : 12; + uint16_t rsv1 : 4; + uint8_t rss_hash_factor; + uint8_t rss_hash_alg; +} __attribute__((packed)); + +struct fd_flow_entry { + struct fd_flow_key key; + struct fd_flow_key key_mask; + struct fd_flow_result result; +} __attribute__((packed)); + +struct zxdh_flow_info { + enum dh_flow_type flowtype; + uint16_t hw_idx; + uint16_t rsv; + union { + struct fd_flow_entry fd_flow; + }; +} __attribute__((packed)); + +struct zxdh_flow { + uint8_t direct; + uint8_t group; + uint8_t pri; + uint8_t hash_search_idx; + struct zxdh_flow_info flowentry; +} __attribute__((packed)); + +typedef struct { + zxdh_msg_op_code op_code; + uint16_t vport; + uint16_t vf_id; + uint16_t pcie_id; +} __attribute__((packed)) zxdh_msg_head_to_pf; + +typedef struct { + zxdh_msg_op_code op_code; + uint16_t dst_pcie_id; +} __attribute__((packed)) zxdh_msg_head_to_vf; + +typedef struct { + bool link_up; + bool is_upf; + uint16_t base_qid; + uint8_t mac_addr[ZXDH_MAC_NUM]; + uint32_t speed; + uint32_t autoneg_enable; + uint32_t sup_link_modes; + uint32_t adv_link_modes; + uint8_t hash_search_idx; + uint8_t duplex; + uint8_t phy_port; + uint8_t rss_enable; + uint16_t vlan_id; + uint16_t tpid; + uint8_t vlan_qos; + uint8_t addr_assign_type; +} __attribute__((packed)) zxdh_vf_init_msg; + +typedef struct { + bool is_upf; + bool uc_promisc; + bool mc_promisc; + uint8_t hash_search_idx; + uint16_t base_qid; + uint8_t vlan_qos; + uint8_t hash_func; + uint32_t hash_mode; + uint32_t queue_map[ZXDH_INDIR_RQT_SIZE]; + uint8_t link_up; + uint8_t speed; + uint8_t duplex; +#define VLAN_BITMAP_BYTE_SIZE (512) + uint8_t vlan_trunk_bitmap[VLAN_BITMAP_BYTE_SIZE]; +} __attribute__((packed)) zxdh_vf_reload_msg; + +typedef struct { + uint32_t vfid; + uint32_t call_np_interface_num; + uint32_t ptp_tc_enable_opt; +} __attribute__((packed)) zxdh_vf_1588_call_np; + +typedef struct { + uint8_t rss_enable; +} __attribute__((packed)) zxdh_rss_enable_msg; + +typedef struct { + uint8_t fd_enable; +} __attribute__((packed)) zxdh_fd_enable_msg; + +typedef struct { + struct ethtool_rx_flow_spec fs; + uint32_t index; +} __attribute__((packed)) zxdh_vf_fd_cfg_msg; +typedef struct { + bool enable; +#define VLAN_STRIP_MSG_TYPE 0 +#define QINQ_STRIP_MSG_TYPE 1 + uint8_t flag; +} __attribute__((packed)) zxdh_strip_enable_msg; + +typedef struct { + uint16_t vf_idx; + uint16_t vlan_id; + uint8_t qos; + uint8_t rsv; + uint16_t protocl; +} __attribute__((packed)) zxdh_set_vf_vlan_msg; + +typedef struct { + uint16_t tpid; +} __attribute__((packed)) zxdh_qinq_tpid_cfg_msg; + +typedef struct { + uint32_t queue_map[ZXDH_INDIR_RQT_SIZE]; +} __attribute__((packed)) zxdh_rxfh_set_msg; + +typedef struct { + uint8_t key_map[ZXDH_NET_HASH_KEY_SIZE]; +} __attribute__((packed)) zxdh_thash_key_set_msg; + +typedef struct { + uint16_t slot_id; +} __attribute__((packed)) zxdh_slot_id_msg; + +typedef struct { + uint8_t func; +} __attribute__((packed)) zxdh_hfunc_set_msg; + +typedef struct { + uint32_t hash_mode; +} __attribute__((packed)) zxdh_rx_flow_hash_set_msg; + +typedef struct { + bool mac_flag; + uint8_t filter_flag; /* 0xaa表示过滤,0Xff表示其他*/ + uint8_t mac_addr[ZXDH_MAC_NUM]; +} __attribute__((packed)) zxdh_mac_addr_msg; + +typedef struct { + uint8_t mac_addr[ZXDH_MAC_NUM]; +} __attribute__((packed)) zxdh_ipv6_mac_addr_msg; + +typedef struct { + uint32_t mode; + uint32_t value; + uint8_t allmulti_follow; +} __attribute__((packed)) zxdh_port_attr_set_msg; + +#define ZXDH_PROMISC_MODE 1 +#define ZXDH_ALLMULTI_MODE 2 +typedef struct { + uint8_t mode; + uint8_t value; + uint8_t mc_follow; +} __attribute__((packed)) zxdh_promisc_set_msg; + +typedef struct { + uint8_t rsv2; + uint16_t read_bytes; + uint8_t value; +} __attribute__((packed)) common_recv_msg; + +typedef struct { + uint8_t rsv2; + uint16_t read_bytes; + uint16_t queue_nums; + uint16_t phy_qidx[256]; +} __attribute__((packed)) common_vq_msg; + +typedef enum { + AGENT_MAC_STATS_CLEAR = 11, + AGENT_MAC_PHYPORT_INIT, + AGENT_MAC_AUTONEG_SET, + AGENT_MAC_LINK_INFO_GET, + AGENT_MAC_LED_BLINK, + AGENT_MAC_FEC_MODE_SET, + AGENT_MAC_FEC_MODE_GET, + AGENT_MAC_FC_MODE_SET, + AGENT_MAC_FC_MODE_GET, + AGENT_MAC_MODULE_EEPROM_READ, + AGENT_VQM_DEVICE_STATS_GET, + AGENT_VQM_STATS_CLEAR, + AGENT_FLASH_FIR_VERSION_GET = 23, + AGENT_DEV_STATUS_NOTIFY, + AGENT_DEBUG_LLDP_ENABLE_SET, + AGENT_DEBUG_LLDP_ENABLE_GET, + AGENT_SSHD_START, + AGENT_SSHD_STOP, + AGENT_FLASH_MAC_READ, + AGENT_FLASH_MAC_WRITE, + AGENT_FLASH_MAC_ERASE, + AGENT_MAC_RECOVERY_CLK_SET, + AGENT_MAC_SYNCE_CLK_STATS_GET, + AGENT_MAC_PORT_TSTAMP_ENABLE_SET, + AGENT_MAC_PORT_TSTAMP_ENABLE_GET, + AGENT_MAC_PORT_TSTAMP_MODE_SET, + AGENT_MAC_PORT_TSTAMP_MODE_GET, + AGENT_MAC_PORT_DELAY_VALUE_GET, + AGENT_MAC_PORT_DELAY_VALUE_CLR, + AGENT_SLOT_INFO_SEND = 40, + AGENT_OS_TYPE_GET = 41, + AGENT_DTP_STATS_GET, + AGENT_SPM_PORT_ENABLE_SET, + AGENT_MAC_MSG_NUM_MAX, /* should be at last */ +} __attribute__((packed)) agent_msg_op_code; + +typedef struct { + agent_msg_op_code op_code; + uint8_t port_id; + uint8_t phyport; + uint8_t is_upf; + uint16_t vf_id; + uint16_t pcie_id; +} __attribute__((packed)) agent_msg_hdr; + +typedef struct { + uint8_t autoneg; + uint8_t link_state; + uint8_t blink_enable; + uint8_t duplex; + uint32_t speed_modes; + uint32_t speed; +} __attribute__((packed)) agent_mac_autoneg_msg; + +typedef struct { + uint64_t rx_total; + uint64_t tx_total; + uint64_t rx_total_bytes; + uint64_t tx_total_bytes; + uint64_t rx_error; + uint64_t tx_error; + uint64_t rx_drop; + uint64_t tx_drop; + uint64_t rx_good_bytes; + uint64_t tx_good_bytes; + uint64_t rx_unicast; + uint64_t tx_unicast; + uint64_t rx_multicast; + uint64_t tx_multicast; + uint64_t rx_broadcast; + uint64_t tx_broadcast; + uint64_t rx_undersize; + uint64_t rx_size_64; + uint64_t rx_size_65_127; + uint64_t rx_size_128_255; + uint64_t rx_size_256_511; + uint64_t rx_size_512_1023; + uint64_t rx_size_1024_1518; + uint64_t rx_size_1519_mru; + uint64_t rx_oversize; + uint64_t tx_undersize; + uint64_t tx_size_64; + uint64_t tx_size_65_127; + uint64_t tx_size_128_255; + uint64_t tx_size_256_511; + uint64_t tx_size_512_1023; + uint64_t tx_size_1024_1518; + uint64_t tx_size_1519_mtu; + uint64_t tx_oversize; + uint64_t rx_pause; + uint64_t tx_pause; + uint64_t rx_fcs_error; + uint64_t tx_fcs_error; + uint64_t rx_mac_control; + uint64_t tx_mac_control; + uint64_t rx_fragment; + uint64_t tx_fragment; + uint64_t rx_jabber; + uint64_t tx_jabber; + uint64_t rx_vlan; + uint64_t tx_vlan; + uint64_t rx_eee; + uint64_t tx_eee; +} __attribute__((packed)) agent_stats; + +typedef struct { + uint64_t np_rx_vport_unicast_packets; + uint64_t np_tx_vport_unicast_packets; + uint64_t np_rx_vport_unicast_bytes; + uint64_t np_tx_vport_unicast_bytes; + uint64_t np_rx_vport_multicast_packets; + uint64_t np_tx_vport_multicast_packets; + uint64_t np_rx_vport_multicast_bytes; + uint64_t np_tx_vport_multicast_bytes; + uint64_t np_rx_vport_broadcast_packets; + uint64_t np_tx_vport_broadcast_packets; + uint64_t np_rx_vport_broadcast_bytes; + uint64_t np_tx_vport_broadcast_bytes; + uint64_t np_rx_vport_mtu_drop_packets; + uint64_t np_tx_vport_mtu_drop_packets; + uint64_t np_rx_vport_mtu_drop_bytes; + uint64_t np_tx_vport_mtu_drop_bytes; + uint64_t np_rx_vport_plcr_drop_packets; + uint64_t np_tx_vport_plcr_drop_packets; + uint64_t np_rx_vport_plcr_drop_bytes; + uint64_t np_tx_vport_plcr_drop_bytes; + uint64_t np_tx_vport_ssvpc_packets; // switch security violation packet count, only for PF. + uint64_t rx_vport_idma_drop_packets; // port to np drop (idma point not enough). +} __attribute__((packed)) np_stats; + +typedef struct { + uint8_t fec_cfg; + uint8_t fec_cap; + uint8_t fec_link; +} __attribute__((packed)) agent_mac_fec_mode_msg; + +typedef struct { + uint8_t fc_mode; +} __attribute__((packed)) agent_mac_fc_mode_msg; + +typedef struct { + uint16_t index; +} __attribute__((packed)) agent_flash_read_msg; + +typedef struct { + uint8_t i2c_addr; + uint8_t bank; + uint8_t page; + uint8_t offset; + uint8_t length; + uint8_t data[128]; +} __attribute__((packed)) agent_mac_module_eeprom_msg; + +typedef struct { + bool is_link_force_set; + bool link_forced; + bool link_up; + uint32_t speed; + uint32_t autoneg_enable; + uint32_t supported_speed_modes; + uint32_t advertising_speed_modes; + uint8_t duplex; +} __attribute__((packed)) zxdh_link_state_msg; + +typedef struct { + bool enable; +} __attribute__((packed)) zxdh_vlan_filter_set_msg; + +typedef struct { + uint16_t vlan_id; +} __attribute__((packed)) zxdh_rx_vid_add_msg; + +typedef struct { + uint16_t vlan_id; +} __attribute__((packed)) zxdh_rx_vid_del_msg; + +typedef struct { + uint8_t type; + uint8_t field; + uint16_t pcie_id; + uint16_t write_bytes; + uint16_t rsv; +} __attribute__((packed)) zxdh_common_tbl_hdr; + +typedef struct { + uint8_t tmmng_type; + uint8_t dir; + uint16_t year; + uint8_t month; + uint8_t day; + uint8_t hour; + uint8_t min; + uint8_t sec; +} __attribute__((packed)) zxdh_cfg_time_msg; + +typedef struct { + uint16_t pcie_id; + uint16_t write_bytes; +} __attribute__((packed)) zxdh_common_time_hdr; + +typedef struct { + uint8_t clk_speed; + uint8_t clk_stats; +} __attribute__((packed)) zxdh_synce_clk_msg; + +typedef struct { + uint32_t tx_enable; + uint32_t rx_enable; + uint32_t tx_mode; + uint32_t rx_mode; +} __attribute__((packed)) zxdh_mac_tstamp_msg; + +typedef struct { + uint64_t min_delay; + uint64_t max_delay; +} __attribute__((packed)) zxdh_delay_statistics_val; + +typedef struct { + uint16_t pcieid; // 发送者自己的pcie id + uint16_t extern_pps_vector; // 外部pps中断向量 + uint16_t local_pps_vector; // local pps中断向量 + uint16_t pps_intr_support; // riscv侧返回的是否支持pps中断 +} __attribute__((packed)) zxdh_bar_msg_pps; + +typedef struct { + uint8_t dev_id; //用dbg module的id, 为0 + uint8_t type; //区分PXE下和正常的np配表流程, 0: PXE 7: 配np + uint8_t operate_mode; //区分PXE开始和结束, 0: 开始 1: 结束 + uint8_t pfNum; + uint32_t portNum[10]; + uint32_t evid[10]; + uint32_t qid[10]; +} __attribute__((packed)) zxdh_cfg_np_msg; + +#define MAX_HDR_LEN 8 + +typedef struct { + char ifname[IFNAMSIZ]; + uint8_t mac[ETH_ALEN]; + uint16_t pannel_id; + uint16_t ctl; + uint16_t rsv; +} __attribute__((packed)) zxdh_pf_cfg_mac_msg; + +#define MAX_VF_NUM 256 +typedef struct { + uint16_t num; + uint16_t func_no[MAX_VF_NUM]; +} __attribute__((packed)) agent_pcie_msix_msg; + +typedef struct { + bool lldp_enable; +} __attribute__((packed)) zxdh_lldp_enable_msg; + +typedef struct { + uint32_t flowid; + uint32_t car_type; + uint32_t is_packet; + uint32_t max_rate; + uint32_t min_rate; +} __attribute__((packed)) zxdh_rate_limit_set_msg; + +/*vf send message to pf to map flow id between CARS*/ +typedef struct { + uint32_t car_type; + uint32_t flowid; + uint32_t map_flowid; + uint32_t sp; +} __attribute__((packed)) zxdh_plcr_flowid_map_msg; + +typedef struct { + uint32_t car_type; + uint32_t flowid; + uint32_t profile_id; +} __attribute__((packed)) zxdh_plcr_flow_init_msg; + +typedef struct { + uint32_t vir_queue_start; + uint32_t vir_queue_num; +} __attribute__((packed)) zxdh_plcr_pf_get_vf_queue_info_msg; + +typedef struct { + uint16_t vport; + uint16_t mode; +} __attribute__((packed)) zxdh_plcr_work_mode_msg; + +/*用户态QOS申请限速模板*/ +typedef struct { + uint8_t car_type; +} __attribute__((packed)) zxdh_vf_plcr_profile_id_add_msg; + +/*用户态QOS删除限速模板*/ +typedef struct { + uint8_t car_type; + uint8_t rsvd; + uint16_t profile_id; +} __attribute__((packed)) zxdh_vf_plcr_profile_id_delete_msg; + +/*用户态QOS配置限速模板*/ +typedef struct { + uint8_t car_type; + uint8_t pkt_mode; + uint16_t profile_id; + union zxdh_plcr_profile_cfg profile_cfg; +} __attribute__((packed)) zxdh_vf_plcr_profile_cfg_set_msg; + +/*用户态QOS获取限速模板*/ +typedef struct { + uint8_t car_type; + uint8_t pkt_mode; + uint16_t profile_id; +} __attribute__((packed)) zxdh_vf_plcr_profile_cfg_get_msg; + +/*用户态QOS绑定flow和profile*/ +typedef struct { + uint8_t car_type; + uint8_t drop_flag; + uint8_t plcr_en; + uint8_t rsvd; + uint16_t flow_id; + uint16_t profile_id; +} __attribute__((packed)) zxdh_vf_plcr_queue_cfg_set_msg; + +/*用户态QOS获取plcr丢包统计*/ +typedef struct { + uint8_t direction; //取值为1:获取tx方向的统计计数;取值为0:获取rx方向的统计计数 + uint8_t is_clr; //取值为1:读取之后,计数器清零,取值为0:读取之后,计数器不清零 +} __attribute__((packed)) zxdh_vf_plcr_port_meter_stat_get_msg; + +typedef struct { + uint16_t port; +} __attribute__((packed)) zxdh_vf_vxlan_port_msg; + +typedef struct { + uint32_t clear_mode; + bool is_init_get; +} __attribute__((packed)) zxdh_np_stats_get_msg; + +typedef struct { + uint8_t slot_info; //槽位号 +} __attribute__((packed)) zxdh_slot_info; + +struct zxdh_port_msg { + uint16_t pcie_id; + uint8_t rsv[2]; +} __attribute__((packed)); + +struct port_message_recv { + uint8_t hdr[4]; + uint8_t port_num; //bond组中端口个数 + uint8_t bond_num; //bond组个数 + uint8_t bond_idx; + uint8_t rsv; + struct zxdh_pannle_port data[16]; +} __attribute__((packed)); + +typedef struct { + uint32_t enable; +} __attribute__((packed)) zxdh_spm_port_enable; + +struct sriov_tunnel_encap_ip_dip { + uint8_t ip_addr[16]; +} __attribute__((packed)); + +struct sriov_tunnel_encap0 { + uint8_t tos; + uint8_t rsv2[2]; + uint8_t rsv1 : 6; + uint8_t eth_type : 1; + uint8_t hit_flag : 1; + uint16_t dst_mac1; + uint16_t tp_dst; + uint32_t dst_mac2; + + uint32_t encap_ttl : 8; + uint32_t vni : 24; + struct sriov_tunnel_encap_ip_dip dip; +}; + +struct sriov_tunnel_encap_ip_sip { + uint8_t ip_addr[16]; +} __attribute__((packed)); + +struct sriov_tunnel_encap1 { + uint32_t rsv1 : 31; + uint32_t hit_flag : 1; + + uint16_t src_mac1; + uint16_t vlan_tci; + uint32_t src_mac2; + uint32_t rsv; + struct sriov_tunnel_encap_ip_dip sip; +}; + +typedef struct { + struct zxdh_flow dh_flow; + struct sriov_tunnel_encap0 encap0; + struct sriov_tunnel_encap1 encap1; +} __attribute__((packed)) zxdh_flow_op_msg; + +typedef struct { + uint32_t dev_id; + uint32_t index; +} __attribute__((packed)) zxdh_mcode_feature_msg; + +typedef struct { + uint32_t vfid; +} __attribute__((packed)) zxdh_k_cmpat_msg; + +typedef struct { + uint16_t sum_check; + uint8_t opcode; + uint8_t rsv; +} __attribute__((packed)) health_msg_hdr; + +typedef struct { + uint16_t pcie_id; + uint16_t vector; +} __attribute__((packed)) pf_status_msg; + +typedef struct { + uint8_t act; +} __attribute__((packed)) health_config_msg; + +typedef struct { + uint8_t err_stat_flag : 1; /*1: 取异常统计,0:取所有统计*/ + uint8_t que_stat_flag : 1; /*1:取各队列统计 0: 取端口统计*/ + uint8_t rd_clr : 1; /* 读清标记,0:不读清 1:读清*/ + uint8_t rsv0 : 5; + uint16_t vf_idx; /* 指定vf */ + uint16_t start_index; /* 指定获取的起始队列号0~223 */ + uint16_t queue_num; /* 每次获取的最大队列数,最大值32个*/ +} __attribute__((packed)) zxdh_get_sw_stats; + +typedef enum { + ZXDH_VF_1588_ENABLE_SET, + ZXDH_VF_1588_ENABLE_GET, + ZXDH_VF_1588_CMD_CNT_MAX, /* should be at last */ +} __attribute__((packed)) zxdh_1588_vf_op_code; + +typedef struct { + uint32_t proc_cmd; + uint32_t enable_1588_vf; +} __attribute__((packed)) zxdh_vf_1588_enable; + +typedef struct { + uint32_t enable_1588_vf_rsp; +} __attribute__((packed)) zxdh_vf_1588_enable_rsp; + +typedef struct { + uint32_t vfid; + uint16_t pcie_id; + uint16_t rsv; + uint64_t bits; +} __attribute__((packed)) zxdh_vqmb_hdr; + +typedef struct { + uint16_t port_enable; + uint16_t version; +} __attribute__((packed)) zxdh_vqmb_port_ctrl_msg; + +typedef struct { + zxdh_vqmb_hdr vqmb_hdr; + zxdh_vqmb_port_ctrl_msg vqmb_port_ctrl_msg; +} __attribute__((packed)) vqmb_to_host_msg; + +typedef struct { + union { + uint8_t len[MAX_HDR_LEN]; + zxdh_msg_head_to_pf hdr; + zxdh_msg_head_to_vf hdr_vf; + agent_msg_hdr hdr_to_agt; + zxdh_common_tbl_hdr hdr_to_cmn; + zxdh_common_time_hdr hdr_time_to_cmn; + health_msg_hdr health_hdr; + }; /* should be no more than MAX_HDR_LEN */ + + union { + zxdh_vf_reload_msg vf_reload_msg; + zxdh_rss_enable_msg rss_enable_msg; + zxdh_rxfh_set_msg rxfh_set_msg; + zxdh_thash_key_set_msg thash_key_set_msg; + zxdh_hfunc_set_msg hfunc_set_msg; + zxdh_rx_flow_hash_set_msg rx_flow_hash_set_msg; + zxdh_mac_addr_msg mac_addr_set_msg; + zxdh_port_attr_set_msg port_attr_set_msg; + zxdh_promisc_set_msg promisc_set_msg; + zxdh_link_state_msg link_state_msg; + zxdh_vlan_filter_set_msg vlan_filter_set_msg; + zxdh_rx_vid_add_msg rx_vid_add_msg; + zxdh_rx_vid_del_msg rx_vid_del_msg; + agent_mac_autoneg_msg mac_set_msg; + agent_mac_fec_mode_msg mac_fec_mode_msg; + agent_mac_fc_mode_msg mac_fc_mode_msg; + agent_mac_module_eeprom_msg module_eeprom_msg; + agent_flash_read_msg flash_read_msg; + zxdh_vf_init_msg vf_init_msg; + zxdh_strip_enable_msg vlan_strip_msg; + zxdh_set_vf_vlan_msg vf_vlan_msg; + zxdh_qinq_tpid_cfg_msg tpid_cfg_msg; + zxdh_pf_cfg_mac_msg mac_cfg_msg; + zxdh_cfg_time_msg time_cfg_msg; + agent_pcie_msix_msg pcie_msix_msg; + zxdh_lldp_enable_msg lldp_msg; + zxdh_rate_limit_set_msg rate_limit_set_msg; + zxdh_plcr_flowid_map_msg plcr_flowid_map_msg; + zxdh_plcr_flow_init_msg plcr_flow_init_msg; + zxdh_plcr_pf_get_vf_queue_info_msg plcr_pf_get_vf_queue_info_msg; + zxdh_plcr_work_mode_msg plcr_work_mode_msg; + zxdh_slot_info debug_ip_send; + + /*用户态QOS申请限速模板*/ + zxdh_vf_plcr_profile_id_add_msg vf_plcr_profile_id_add_msg; + /*用户态QOS删除限速模板*/ + zxdh_vf_plcr_profile_id_delete_msg vf_plcr_profile_id_delete_msg; + /*用户态QOS配置限速模板*/ + zxdh_vf_plcr_profile_cfg_set_msg vf_plcr_profile_cfg_set_msg; + /*用户态QOS获取限速模板*/ + zxdh_vf_plcr_profile_cfg_get_msg vf_plcr_profile_cfg_get_msg; + /*用户态QOS绑定flow和profile*/ + zxdh_vf_plcr_queue_cfg_set_msg vf_plcr_queue_cfg_set_msg; + /*用户态QOS获取plcr丢包统计*/ + zxdh_vf_plcr_port_meter_stat_get_msg + vf_plcr_port_meter_stat_get_msg; + zxdh_vf_vxlan_port_msg vf_vxlan_port; + zxdh_np_stats_get_msg np_stats_get_msg; + + zxdh_vf_1588_call_np vf_1588_call_np; + zxdh_synce_clk_msg synce_clk_recovery_port; + zxdh_mac_tstamp_msg mac_tstamp_msg; + zxdh_bar_msg_pps msg_pps; + uint16_t cmn_tbl_msg[257]; + zxdh_spm_port_enable spm_port_enable_set; + zxdh_flow_op_msg flow_msg; + zxdh_mcode_feature_msg mcode_feature_msg; + zxdh_k_cmpat_msg kernel_cmpat_msg; + health_config_msg health_config_msg; + pf_status_msg pf_status_msg; + zxdh_get_sw_stats vf_sw_stats; + zxdh_vf_1588_enable vf_1588_enable; + zxdh_vf_fd_cfg_msg vf_fd_cfg_msg; + zxdh_fd_enable_msg vf_fd_enable_msg; + }; +} zxdh_msg_info; + +typedef enum { + ZXDH_REPS_FAIL, + ZXDH_REPS_SUCC = 0xaa, + ZXDH_INVALID_OP_CODE = 0xee, +} __attribute__((packed)) zxdh_reps_flag; + +typedef enum { + GET_STAT_SUCCESS = 0, + GET_STAT_FAILED = 1, + VF_ERR = 2, + ACTION_IS_NOT_SUPPORTED = 3, +} __attribute__((packed)) zxdh_get_sw_stats_flag; + +typedef struct { + uint8_t lldp_status; +} __attribute__((packed)) agent_debug_lldp_msg; + +typedef struct { + uint8_t firmware_version[FW_VERSION_LEN]; +} __attribute__((packed)) agent_flash_msg; + +typedef struct { + uint8_t mac[ETH_ALEN]; +} __attribute__((packed)) agent_flash_mac_read_msg; + +typedef struct { + uint32_t phy_queue_num; + uint16_t phy_rxq[16]; + uint16_t phy_txq[16]; +} __attribute__((packed)) zxdh_plcr_pf_get_vf_queue_info_rsp; + +typedef struct { + int32_t err_code; +} __attribute__((packed)) zxdh_rate_limit_set_rsp; + +typedef struct { + uint8_t mode; +} __attribute__((packed)) zxdh_plcr_work_mode_rsp; + +/*用户态QOS申请限速模板,返回模板profile_id*/ +typedef struct { + uint16_t profile_id; +} __attribute__((packed)) zxdh_vf_plcr_profile_id_add_rsp; + +/*用户态QOS获取限速模板,返回模板参数*/ +typedef struct { + union zxdh_plcr_profile_cfg profile_cfg; +} __attribute__((packed)) zxdh_vf_plcr_profile_cfg_get_rsp; + +/*用户态QOS获取丢包统计*/ +typedef struct { + uint64_t drop_pkB_cnt; //丢数据包:包数统计 + uint64_t drop_pk_cnt; //丢数据包:字节数计数 +} __attribute__((packed)) zxdh_vf_plcr_port_meter_stat_get_rsp; + +typedef struct { + ZXDH_SRIOV_VPORT_T port_attr_entry; +} __attribute__((packed)) zxdh_port_attr_get_msg; + +typedef struct { + uint8_t mac_err_flag; +} __attribute__((packed)) zxdh_port_mac_set_rsp; + +typedef struct { + uint32_t index; +} __attribute__((packed)) zxdh_fd_cfg_reply; +struct rte_flow_query_count { + uint32_t reset : 1; + uint32_t hits_set : 1; + uint32_t bytes_set : 1; + uint32_t reserved : 29; + uint64_t hits; + uint64_t bytes; +}; + +struct err_reason { + uint8_t err_type; + uint8_t rsv[3]; + char reason[512]; +} __attribute__((packed)); + +typedef struct { + struct zxdh_flow dh_flow; + union { + struct rte_flow_query_count count; + struct err_reason error; + }; +} __attribute__((packed)) zxdh_flow_op_rsp; + +typedef struct { + uint64_t len; + uint64_t feature; +} __attribute__((packed)) zxdh_mcode_feature_rsp; + +typedef struct { + uint64_t k_msg_idmax; +} __attribute__((packed)) zxdh_k_cmpat_rsp; + +typedef struct { + uint64_t truncated_err; + uint64_t offload_cfg_err; + uint64_t invalid_hdr_len_err; + uint64_t no_segs_err; +} __attribute__((packed)) err_stats; + +typedef struct { + err_stats rx_stats; + err_stats tx_stats; +} __attribute__((packed)) sw_stats; + +typedef struct { + uint8_t queue_state; /*0:rx 1:tx*/ + err_stats stats; +} __attribute__((packed)) que_err_stats; + +typedef struct { + union { + que_err_stats que_stats[MAX_QUE_CNT]; + sw_stats port_stats; + }; +} __attribute__((packed)) zxdh_sw_stats_reply; + +typedef struct { + uint64_t rx_arn_phy; + uint64_t tx_psn_phy; + uint64_t rx_psn_phy; + uint64_t tx_psn_ack_phy; + uint64_t rx_psn_ack_phy; +} __attribute__((packed)) udp_phy_stats; + +typedef struct { + zxdh_reps_flag flag; + union { + zxdh_vf_reload_msg vf_reload_msg; + zxdh_thash_key_set_msg thash_key_set_msg; + zxdh_rx_flow_hash_set_msg rx_flow_hash_set_msg; + zxdh_link_state_msg link_state_msg; + agent_mac_autoneg_msg mac_set_msg; + agent_stats stats_msg; + np_stats np_stats_msg; + udp_phy_stats udp_phy_stats_msg; + agent_mac_fec_mode_msg mac_fec_mode_msg; + agent_mac_fc_mode_msg mac_fc_mode_msg; + agent_mac_module_eeprom_msg module_eeprom_msg; + common_recv_msg cmn_recv_msg; + common_vq_msg cmn_vq_msg; + zxdh_port_mac_set_rsp vf_mac_set_msg; + zxdh_mac_addr_msg vf_mac_addr_get_msg; + zxdh_vf_init_msg vf_init_msg; + agent_flash_msg flash_msg; + agent_flash_mac_read_msg flash_mac_read_msg; + agent_debug_lldp_msg debug_lldp_msg; + zxdh_plcr_pf_get_vf_queue_info_rsp plcr_pf_get_vf_queue_info_rsp; + zxdh_rate_limit_set_rsp rate_limit_set_rsp; + zxdh_plcr_work_mode_rsp plcr_work_mode_rsp; + + /*用户态QOS申请限速模板返回profile_id*/ + zxdh_vf_plcr_profile_id_add_rsp vf_plcr_profile_id_add_rsp; + /*用户态QOS获取限速模板*/ + zxdh_vf_plcr_profile_cfg_get_rsp vf_plcr_profile_cfg_get_rsp; + /*用户态QOS获取丢包统计*/ + zxdh_vf_plcr_port_meter_stat_get_rsp + vf_plcr_port_meter_stat_get_rsp; + zxdh_synce_clk_msg synce_clk_recovery_port; + zxdh_mac_tstamp_msg mac_tstamp_msg; + zxdh_delay_statistics_val delay_statistics_val; + zxdh_port_attr_get_msg port_attr_get_msg; + zxdh_rxfh_set_msg rxfh_get_msg; + zxdh_bar_msg_pps msg_pps; + zxdh_slot_id_msg slot_info; + zxdh_flow_op_rsp flow_rsp; + zxdh_mcode_feature_rsp mcode_feature_rsp; + zxdh_k_cmpat_rsp kernel_cmpat_rsp; + zxdh_sw_stats_reply vf_sw_stats_rsp; + zxdh_vf_1588_enable_rsp vf_1588_enable_rsp; + zxdh_fd_cfg_reply fd_cfg_resp; + }; +} zxdh_reps_info; + +union zxdh_msg { + zxdh_msg_info payload; + zxdh_reps_info reps; + vqm_rsp_host_data vqm_reps; + host_to_vqm_msg vqm_msg; +}; + +static inline uint16_t sum_func(void *data, uint16_t len) +{ + uint64_t result = 0; + int idx = 0; + uint16_t ret = 0; + + if (data == NULL) { + return 0; + } + + for (idx = 0; idx < len; idx++) { + result += *((uint8_t *)data + idx); + } + + ret = (uint16_t)result; + return ret; +} + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/pci_irq.c b/drivers/net/ethernet/dinghai/pci_irq.c new file mode 100644 index 000000000000..c66ff5b239aa --- /dev/null +++ b/drivers/net/ethernet/dinghai/pci_irq.c @@ -0,0 +1,331 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DH_PF_IRQ_CTRL_NUM (1) + +#define DH_SFS_PER_CTRL_IRQ 64 +#define DH_IRQ_CTRL_SF_MAX 8 +/* min num of vectors for SFs to be enabled */ +#define DH_IRQ_VEC_COMP_BASE_SF 2 + +#define DH_EQ_SHARE_IRQ_MAX_COMP (8) +#define DH_EQ_SHARE_IRQ_MAX_CTRL (UINT_MAX) +#define DH_EQ_SHARE_IRQ_MIN_COMP (1) +#define DH_EQ_SHARE_IRQ_MIN_CTRL (4) + +static void irq_release(struct dh_irq *irq) +{ + struct dh_irq_pool *pool = irq->pool; + + xa_erase(&pool->irqs, irq->index); + /* free_irq requires that affinity_hint and rmap will be cleared + * before calling it. This is why there is asymmetry with set_rmap + * which should be called after alloc_irq but before request_irq. + */ + // irq_update_affinity_hint(irq->irqn, NULL); + irq_set_affinity_hint(irq->irqn, NULL); + free_cpumask_var(irq->mask); + free_irq(irq->irqn, &irq->nh); + kfree(irq); +} + +int32_t dh_irq_put(struct dh_irq *irq) +{ + struct dh_irq_pool *pool = irq->pool; + int32_t ret = 0; + + mutex_lock(&pool->lock); + irq->refcount--; + if (!irq->refcount) { + irq_release(irq); + ret = 1; + } + mutex_unlock(&pool->lock); + + return ret; +} + +int32_t dh_irq_read_locked(struct dh_irq *irq) +{ + lockdep_assert_held(&irq->pool->lock); + + return irq->refcount; +} + +int32_t dh_irq_get_locked(struct dh_irq *irq) +{ + lockdep_assert_held(&irq->pool->lock); + if (WARN_ON_ONCE(!irq->refcount)) { + return 0; + } + + irq->refcount++; + + return 1; +} + +static int32_t irq_get(struct dh_irq *irq) +{ + int32_t err = 0; + + mutex_lock(&irq->pool->lock); + err = dh_irq_get_locked(irq); + mutex_unlock(&irq->pool->lock); + + return err; +} + +static irqreturn_t irq_int_handler(int32_t irq, void *data) +{ + struct dh_irq *dh_irq = NULL; + + dh_irq = (struct dh_irq *)data; + atomic_notifier_call_chain(&dh_irq->nh, 0, dh_irq->pool->data); + + return IRQ_HANDLED; +} + +static void irq_set_name(struct dh_irq_pool *pool, char *name, int32_t vecidx) +{ + if (!strcmp(pool->name, "zxdh_pf_vq")) { + int type = (vecidx - pool->xa_num_irqs.min) % 2; + snprintf(name, DH_MAX_IRQ_NAME, "vq_%s_%d", + type ? "output" : "input", vecidx); + return; + } else if (!strcmp(pool->name, "zxdh_pf_async")) { + snprintf(name, DH_MAX_IRQ_NAME, "async_%d", vecidx); + return; + } else if (!strcmp(pool->name, "zxdh_mpf_gdma")) { + snprintf(name, DH_MAX_IRQ_NAME, "gdma_%d", vecidx); + return; + } +} + +struct dh_irq *dh_irq_alloc(struct dh_irq_pool *pool, int32_t i, + const struct cpumask *affinity) +{ + struct dh_core_dev *dev = pool->dev; + char name[DH_MAX_IRQ_NAME] = {}; + struct dh_irq *irq = NULL; + int32_t err = 0; + int32_t num_cpu = 0; + int32_t cpu_loop = 0; + + irq = kzalloc(sizeof(*irq), GFP_KERNEL); + if (unlikely(irq == NULL)) { + LOG_ERR("irq kzalloc failed\n"); + return ERR_PTR(-ENOMEM); + } + + irq->pool = pool; + irq->irqn = pci_irq_vector(dev->pdev, i); + irq_set_name(pool, name, i); + ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh); + snprintf(irq->name, DH_MAX_IRQ_NAME, "%s@pci:%s", name, + pci_name(dev->pdev)); + LOG_DEBUG("i=%d, irqn=%d, name=%s\r\n", i, irq->irqn, irq->name); + + err = request_irq(irq->irqn, irq_int_handler, 0, irq->name, irq); + if (err != 0) { + LOG_ERR("Failed to request irq. err = %d\n", err); + goto err_req_irq; + } + + if (!zalloc_cpumask_var(&irq->mask, GFP_KERNEL)) { + LOG_WARN("zalloc_cpumask_var failed\n"); + err = -ENOMEM; + goto err_cpumask; + } + + if (affinity != NULL) { + cpumask_copy(irq->mask, affinity); + irq_set_affinity_hint(irq->irqn, irq->mask); + } else { + num_cpu = num_online_cpus(); + for (cpu_loop = 0; cpu_loop < num_cpu; cpu_loop++) { + cpumask_set_cpu(cpu_loop, irq->mask); + } + irq_set_affinity_hint(irq->irqn, irq->mask); + } + + irq->refcount = 1; + irq->index = i; + err = xa_err(xa_store(&pool->irqs, irq->index, irq, GFP_KERNEL)); + if (err != 0) { + LOG_ERR("Failed to alloc xa entry for irq(%u). err = %d\n", + irq->index, err); + goto err_xa; + } + + return irq; + +err_xa: + irq_set_affinity_hint(irq->irqn, NULL); + free_cpumask_var(irq->mask); +err_cpumask: + free_irq(irq->irqn, &irq->nh); +err_req_irq: + kfree(irq); + return ERR_PTR(err); +} + +int32_t dh_irq_attach_nb(struct dh_irq *irq, struct notifier_block *nb) +{ + int32_t ret = 0; + + ret = irq_get(irq); + if (ret == 0) { + return -ENOENT; + } + + ret = atomic_notifier_chain_register(&irq->nh, nb); + if (ret != 0) { + dh_irq_put(irq); + } + + return ret; +} + +int32_t dh_irq_detach_nb(struct dh_irq *irq, struct notifier_block *nb) +{ + int32_t err = 0; + + err = atomic_notifier_chain_unregister(&irq->nh, nb); + dh_irq_put(irq); + + return err; +} + +struct cpumask *dh_irq_get_affinity_mask(struct dh_irq *irq) +{ + return irq->mask; +} + +int32_t dh_irq_get_index(struct dh_irq *irq) +{ + return irq->index; +} + +/** + * dh_irqs_release - release one or more IRQs back to the system. + * @irqs: IRQs to be released. + * @nirqs: number of IRQs to be released. + */ +static void dh_irqs_release(struct dh_irq **irqs, int32_t nirqs) +{ + int32_t i; + + for (i = 0; i < nirqs; i++) { + synchronize_irq(irqs[i]->irqn); + dh_irq_put(irqs[i]); + } +} + +/* get a irq from pool*/ +struct dh_irq *zxdh_get_irq_of_pool(struct dh_core_dev *dev, + struct dh_irq_pool *pool) +{ + cpumask_var_t req_mask; + struct dh_irq *irq = NULL; + + if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL)) { + LOG_ERR("zalloc_cpumask_var failed\n"); + return ERR_PTR(-ENOMEM); + } + cpumask_copy(req_mask, cpu_online_mask); + + irq = dh_irq_affinity_request(pool, req_mask); + + free_cpumask_var(req_mask); + if (IS_ERR_OR_NULL(irq)) { + LOG_ERR("irq=0x%llx dh_irq_affinity_request failed\n", + (unsigned long long)(uintptr_t)irq); + } + + return irq; +} + +/** + * dh_irqs_release_vectors - release one or more IRQs back to the system. + * @irqs: IRQs to be released. + * @nirqs: number of IRQs to be released. + */ +void dh_irqs_release_vectors(struct dh_irq **irqs, int32_t nirqs) +{ + dh_irqs_release(irqs, nirqs); +} + +struct dh_irq_pool *irq_pool_alloc(struct dh_core_dev *dev, int32_t start, + int32_t size, char *name, u32 min_threshold, + u32 max_threshold) +{ + struct dh_irq_pool *pool = kvzalloc(sizeof(*pool), GFP_KERNEL); + + if (unlikely(pool == NULL)) { + LOG_ERR("pool kvzalloc failed\n"); + return ERR_PTR(-ENOMEM); + } + + pool->dev = dev; + mutex_init(&pool->lock); + xa_init_flags(&pool->irqs, XA_FLAGS_ALLOC); + pool->xa_num_irqs.min = start; + pool->xa_num_irqs.max = start + size - 1; + + if (name) { + snprintf(pool->name, DH_MAX_IRQ_NAME - DH_MAX_IRQ_IDX_CHARS, + "%s", name); + } + + pool->min_threshold = min_threshold * DH_EQ_REFS_PER_IRQ; + pool->max_threshold = max_threshold * DH_EQ_REFS_PER_IRQ; + + return pool; +} + +void irq_pool_free(struct dh_irq_pool *pool) +{ + struct dh_irq *irq = NULL; + unsigned long index; + uint32_t cpu; + + /* There are cases in which we are destrying the irq_table before + * freeing all the IRQs, fast teardown for example. Hence, free the irqs + * which might not have been freed. + */ + xa_for_each(&pool->irqs, index, irq) { + irq_release(irq); + } + xa_destroy(&pool->irqs); + mutex_destroy(&pool->lock); + + if (pool->irqs_per_cpu) { + for_each_online_cpu(cpu) { + WARN_ON(pool->irqs_per_cpu[cpu]); + } + kfree(pool->irqs_per_cpu); + } + + kvfree(pool); +} + +void dh_irq_table_cleanup(struct dh_core_dev *dev) +{ + if (dh_core_is_sf(dev)) { + return; + } + + kvfree(dev->irq_table.priv); +} diff --git a/drivers/net/ethernet/dinghai/plcr.c b/drivers/net/ethernet/dinghai/plcr.c new file mode 100644 index 000000000000..ff6b0d40e9c1 --- /dev/null +++ b/drivers/net/ethernet/dinghai/plcr.c @@ -0,0 +1,2849 @@ +#include +#include +#include +#include +#include +#include +#include +#include "en_aux.h" +#include "en_ethtool/ethtool.h" +#include +#include +#include +#include "en_np/table/include/dpp_tbl_api.h" +#include "en_np/table/include/dpp_tbl_plcr.h" +#include "en_aux/en_aux_events.h" +#include "en_aux/en_aux_eq.h" +#include "en_aux/en_aux_cmd.h" +#include "msg_common.h" +#include "cmd/msg_chan_priv.h" +#include "en_pf.h" +#include +#include "en_aux/en_aux_ioctl.h" +#ifdef TIME_STAMP_1588 +#include "en_aux/en_1588_pkt_proc.h" +#endif + +#define VQM_BAR_MSG 36 + +#define OPCODE_GET 0 +#define OPCODE_SET 1 + +#define CMD_MAC 1 +#define CMD_ENABLED_QP 4 +#define CMD_FEATURES 5 +#define CMD_DRIVER_STATUS 6 +#define CMD_VF_STATS 7 +#define CMD_VF_FLAG 8 +#define CMD_VF_QOS 9 +#define CMD_VF_POLL 10 +#define CMD_GLOBAL_FEATURES 11 + +const uint32_t gaudPlcrCarxProfileNum[E_PLCR_CAR_NUM] = { + PLCR_CAR_A_PROFILE_RES_NUM, //一级CAR:512个限速模板 + PLCR_CAR_B_PROFILE_RES_NUM, //二级CAR:128个限速模板 + PLCR_CAR_C_PROFILE_RES_NUM, //三级CAR:32个限速模板 +}; + +const uint32_t gaudPlcrCarxFlowIdNum[E_PLCR_CAR_NUM] = { + PLCR_CAR_A_FLOWID_RES_NUM, //一级CAR:包含内核和dpdk的id + PLCR_CAR_B_FLOWID_RES_NUM, //二级CAR:前2304个分配给vf,后64个个分配给pf + PLCR_CAR_C_FLOWID_RES_NUM, //三级CAR +}; + +struct zxdh_plcr_cbs gat_carA_byte_rate_limit_cbs[] = { + { 0, 500, 4 * 1024 * 1024 }, + { 500, 800, 10 * 1024 * 1024 }, + { 800, 1500, 12 * 1024 * 1024 }, + { 1500, 3000, 15 * 1024 * 1024 }, + { 3000, 12000, 20 * 1024 * 1024 }, + { 12000, 20000, 30 * 1024 * 1024 }, + { 20000, 500000, 50 * 1024 * 1024 }, +}; + +struct zxdh_plcr_cbs gat_carB_byte_rate_limit_cbs[] = { + { 0, 4000, 8 * 1024 * 1024 }, + { 4000, 8000, 16 * 1024 * 1024 }, + { 8000, 16000, 64 * 1024 * 1024 }, + { 16000, 500000, 128 * 1024 * 1024 - 1 }, +}; + +static inline struct zxdh_en_device * +pf_dev_get_edev(struct zxdh_pf_device *pf_dev) +{ + struct zxdh_auxiliary_device *adev = NULL; + struct zxdh_en_sf_container *sf_con = NULL; + struct zxdh_en_sf_device *en_sf_dev = NULL; + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + + adev = pf_dev->adevs_table[0].adev; //sf adev + sf_con = container_of(adev, struct zxdh_en_sf_container, adev); + en_sf_dev = dh_core_priv(sf_con->cdev); //sf cdev + en_priv = en_sf_dev->adev[0]->dev.driver_data; //en adev + if (en_priv == NULL) + return ERR_PTR(-ENODEV); + + en_dev = &en_priv->edev; + if (en_dev == NULL && !en_dev->init_comp_flag) { + LOG_ERR("en_device not initialized!\n"); + return ERR_PTR(-ENODEV); + } + + return en_dev; +} + +/* +todo: +rsvd字段为1标识失败,为0标识成功 +修改原有的限速值,只用修改限速模板,不用再调用关联函数; +还有代码中在配置限速模板的时候引用+1了,会影响现有的流程的(结果可能没问题,需要考虑是不是将+1和-1更换位置); +如果找到共享模板,在配置队列失败的时候会减一,加一和减一的位置不对称,会导致计数值不正确。 +*/ + +/* +函数功能:将用户输入的速率,转换成限速模板配置寄存器的格式 +入参: + ---max_rate : 单位是Mbit/s + +返回值:返回vqm中的发送队列号,发送队列号为奇数 +*/ +uint32_t zxdh_plcr_user_maxrate_2_reg(uint32_t user_max_rate) +{ + uint64_t reg_maxrate; + + PLCR_FUNC_DBG_ENTER(); + reg_maxrate = ((uint64_t)user_max_rate << 10 / PLCR_STEP_SIZE); + return (uint32_t)reg_maxrate; +} + +/* +函数功能:将寄存器中的配置值,转换成用户的限速值() +将用户输入的速率,转换成限速模板配置寄存器的格式 +入参: + ---maxrate_cfg : 单位是61Kb/s +出参: + ---user_max_rate : 单位是Mbit/s + +返回值:返回vqm中的发送队列号,发送队列号为奇数 +*/ +uint32_t zxdh_plcr_reg_maxrate_user(uint32_t reg_maxrate) +{ + uint32_t user_max_rate; + + PLCR_FUNC_DBG_ENTER(); + + user_max_rate = reg_maxrate * PLCR_STEP_SIZE / 1024; + + return user_max_rate; +} + +/* +函数功能:查找是否有共享的限速模板 +入参: + ---pf_dev : pf设备结构体 + ---car_type : plcr CAR层级,取值:CAR_A,CAR_B,CAR_C + ---profile_cfg: 待查询的限速模板参数,可能是字节限速模板或者包限速模板 + ---profile_id : 保存查询到的限速模板的id + +返回值:0表示查询成功,其它值表示查询失败 +*/ +static int32_t zxdh_plcr_match_profile(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, + DPP_STAT_CAR_PROFILE_CFG_T *profile_cfg, + uint16_t *profile_id) +{ + struct xarray *xarray_profile = + &(pf_dev->plcr_table.plcr_profiles[car_type]); + struct zxdh_plcr_profile *profile; + unsigned long index; + DPP_STAT_CAR_PKT_PROFILE_CFG_T *pkt_profile_cfg = + (DPP_STAT_CAR_PKT_PROFILE_CFG_T *)(profile_cfg); + uint32_t profile_max_num = gaudPlcrCarxProfileNum[car_type]; + + PLCR_FUNC_DBG_ENTER(); + + /*遍历vf申请的某一级CAR所有限速模板,是否有指定速率的限速模板存在*/ + xa_for_each_range(xarray_profile, index, profile, 0, profile_max_num) { + if (0 == profile->ref_cnt) { + continue; + } + + /*包限速模板比较*/ + if (E_RATE_LIMIT_PACKET == profile_cfg->pkt_sign) { + if ((pkt_profile_cfg->pkt_sign == + (((DPP_STAT_CAR_PKT_PROFILE_CFG_T + *)(&profile->profile_cfg)) + ->pkt_sign)) && + (pkt_profile_cfg->cir == + (((DPP_STAT_CAR_PKT_PROFILE_CFG_T + *)(&profile->profile_cfg)) + ->cir)) && + (pkt_profile_cfg->cbs == + (((DPP_STAT_CAR_PKT_PROFILE_CFG_T + *)(&profile->profile_cfg)) + ->cbs))) { + *profile_id = profile->profile_id; + PLCR_LOG_INFO("profile_id = %d\n", *profile_id); + + return 0; + } + } + //字节限速模板比较 + else if (E_RATE_LIMIT_BYTE == profile_cfg->pkt_sign) { + if ((profile->profile_cfg.pkt_sign == + profile_cfg->pkt_sign) && + (profile->profile_cfg.cd == + profile_cfg + ->cd) && /**< @brief CD算法标志/令牌桶算法标志 0:srtcm 1:trtcm 2:MEF10.1*/ + (profile->profile_cfg.cf == + profile_cfg + ->cf) && /**< @brief CF溢出耦合标志,0:不溢出,1:溢出*/ + (profile->profile_cfg.cm == + profile_cfg + ->cm) && /**< @brief CM色盲/色敏标志,0:色盲模式,1:色敏模式 */ + (profile->profile_cfg.cir == + profile_cfg + ->cir) && /**< @brief C令牌桶添加速率(0~X, X Gbps/64kbps),最小值为64Kbps,步长为64Kbps*/ + (profile->profile_cfg.cbs == + profile_cfg + ->cbs) && /**< @brief C桶桶深(XM),配置范围为0~XMByte-1,步长为1Byte*/ + (profile->profile_cfg.eir == + profile_cfg + ->eir) && /**< @brief E令牌桶添加速率(0~X, XGbps/64kbps),最小值为64Kbps,步长为64Kbps*/ + (profile->profile_cfg.ebs == + profile_cfg + ->ebs)) { /**< @brief E桶桶深(XM),配置范围为0~XMByte-1,步长为1Byte*/ + *profile_id = profile->profile_id; + PLCR_LOG_INFO("profile_id = %d\n", *profile_id); + + return 0; + } + } + } + + /* 未搜索到匹配项*/ + return -ERANGE; +} + +/* +函数功能:为flowid申请一个xarray成员 +入参: + ---pf_dev : pf设备结构体 + ---car_type : plcr CAR层级,一级,二级或三级 + ---flow_id : 作为xarray的索引 +返回值:返回创建的zxdh_plcr_flow *指针 +*/ +int32_t zxdh_plcr_req_flow(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, uint16_t flow_id, + struct zxdh_plcr_flow **flow) +{ + struct zxdh_plcr_flow *flow_old; + struct xarray *xarray_flow = &(pf_dev->plcr_table.plcr_flows[car_type]); + + PLCR_FUNC_DBG_ENTER(); + + /*1. malloc一个flow结构体*/ + *flow = kzalloc(sizeof(struct zxdh_plcr_flow), GFP_KERNEL); + if (unlikely(NULL == *flow)) { + PLCR_LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + + /*2. 存储到xarray*/ + flow_old = xa_store(xarray_flow, flow_id, *flow, GFP_KERNEL); + if (flow_old) { + /* 正常情况下,这里应该都是空的*/ + kfree(flow_old); + } + + return 0; +} + +/* +函数功能:释放一个xarray下的flowid成员 +入参: + ---pf_dev : pf设备结构体 + ---car_type : plcr CAR层级,一级,二级或三级 + ---flow_id : 作为xarray的索引 +返回值:返回创建的zxdh_plcr_flow *指针 +*/ +int32_t zxdh_plcr_release_flow(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, uint16_t flow_id) +{ + struct zxdh_plcr_flow *flow; + struct xarray *xarray_flow = &(pf_dev->plcr_table.plcr_flows[car_type]); + + PLCR_FUNC_DBG_ENTER(); + + /*1. 检查xarray里是否有该flow*/ + flow = xa_load(xarray_flow, flow_id); + if (NULL == flow) { + PLCR_LOG_ERR("failed to release an invalid flow_id=%d\n", + flow_id); + return EINVAL; + } + + /*2. 从xarray删除该成员*/ + xa_erase(xarray_flow, flow_id); + + /*3. 释放flow*/ + kfree(flow); + + return 0; +} + +/* +函数功能:更新flow的成员信息 +入参: + ---flow : xarray的成员 + ---vport : + ---max_rate : + ---min_rate : +返回值:无 +*/ +void zxdh_plcr_update_flow(struct zxdh_plcr_flow *flow, uint16_t vport, + uint32_t max_rate, uint32_t min_rate) +{ + flow->vport = vport; + flow->max_rate = max_rate; + flow->min_rate = min_rate; +} + +/* +函数功能:申请一个指定CAR的限速模板 +入参: + ---pf_dev : pf设备结构体 + ---car_type : plcr CAR层级,一级,二级或三级 + ---vport : vf端口号 + ---profile_id_out : 返回申请到的限速模板的profile_id +返回值:成功返回0,失败返回其它值 +*/ +int zxdh_plcr_req_profile(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, uint16_t *profile_id_out) +{ + int rtn = 0; + struct zxdh_plcr_profile *profile; + struct zxdh_plcr_profile *profile_old; + struct xarray *xarray_profile = + &(pf_dev->plcr_table.plcr_profiles[car_type]); + uint16_t profile_id = 0; + uint64_t cred_id = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = pf_dev->slot_id; + pf_info.vport = pf_dev->vport; + + PLCR_FUNC_DBG_ENTER(); + + /*在指定的CAR申请一个新的限速模板*/ + rtn = dpp_car_profile_id_add(&pf_info, (uint32_t)car_type, &cred_id); + if (rtn) { + /*判断消息交互是否正常*/ + PLCR_LOG_ERR("failed to request a new profile\n"); + return -EINVAL; + } + + /*判断riscv是否成功返回了有效的profile:bit[56 - 63]为0标识成功,为1标识失败*/ + if (0 != ((cred_id >> 56) & 0xFF)) { + PLCR_LOG_ERR("failed to request a new profile\n"); + return -EINVAL; + } + + /*提取profile id*/ + profile_id = PROFILE_ID(cred_id); + *profile_id_out = profile_id; + PLCR_LOG_INFO( + "dpp_car_profile_id_add: pf_info.vport = 0x%x, car_type = %d, profile_id = %d, cred_id = 0x%llx\n", + pf_info.vport, car_type, profile_id, cred_id); + + /*申请一个限速模板结构体,保存限速模板信息*/ + profile = kzalloc(sizeof(struct zxdh_plcr_profile), GFP_KERNEL); + if (unlikely(NULL == profile)) { + dpp_car_profile_id_delete(&pf_info, (uint32_t)car_type, + cred_id); + PLCR_LOG_ERR("failed to kzalloc profile\n"); + + return -ENOMEM; + } + profile->ref_cnt = 0; + profile->max_rate = 0; + profile->min_rate = 0; + profile->cred_id = cred_id; + profile->profile_id = profile_id; + profile->vport = pf_dev->vport; + + /*将申请到的限速模板资源存储起来*/ + profile_old = xa_store(xarray_profile, profile_id, profile, GFP_KERNEL); + if (profile_old) { //正常情况下,这里应该都是空的 + PLCR_LOG_ERR("failed to unreachable branch\n"); + kfree(profile_old); + } + + return rtn; +} + +/* +函数功能:释放一个指定CAR的限速模板 +入参: + ---pf_dev : pf设备结构体 + ---car_type : plcr CAR层级,一级,二级或三级 + ---Profile_id : 限速模板的profile_id + ---flag : 热插拔标记 +返回值:成功返回0,失败返回其它值 +*/ +int zxdh_plcr_release_profile(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, uint16_t profile_id, + uint32_t flag) +{ + int rtn = 0; + struct xarray *xarray_profile = + &(pf_dev->plcr_table.plcr_profiles[car_type]); + struct zxdh_plcr_profile *profile; + DPP_PF_INFO_T pf_info = { 0 }; + + PLCR_FUNC_DBG_ENTER(); + + /*判断是有是有效成员*/ + profile = xa_load(xarray_profile, profile_id); + if (NULL == profile) { + PLCR_LOG_ERR("failed to release an invalid profile=%d\n", + profile_id); + return EINVAL; + } + + /*如果引用计数为0,则可以释放所有资源*/ + if (0 == profile->ref_cnt) { + pf_info.slot = pf_dev->slot_id; + pf_info.vport = profile->vport; + PLCR_LOG_INFO( + "dpp_car_profile_id_delete: pf_info.vport = 0x%x, car_type = %d, profile_id = %d, cred_id = 0x%llx\n", + pf_info.vport, car_type, profile_id, profile->cred_id); + + /*归还限速模板资源(注意,引用计数为0表示没有关联的flow了)*/ + if (!flag) { + rtn = dpp_car_profile_id_delete(&pf_info, car_type, + profile->cred_id); + if (rtn) { + PLCR_LOG_ERR( + "failed to call dpp_car_profile_id_delete, car_type=%d,profile_id=%d)\n", + car_type, profile_id); + rtn = EINVAL; + } + } + + /*删除xarray中的元素*/ + xa_erase(xarray_profile, profile_id); + + /*释放profile指针*/ + kfree(profile); + } + + /*如果引用计数不为0,就不释放任何资源*/ + return rtn; +} + +/* +函数功能:内核态使用的接口,根据car_type & is_byte_rate_limit & max_rate & min_rate 这4个参数生成限速模板配置参数 + 内核态下各限速场景使用的参数应该是固定的,我们自己根据这4个参数生成完整的结构体参数:DPP_STAT_CAR_PROFILE_CFG_T + 用户态的场景下,会通过消息直接传递过来,不需要组装 +入参: + ---is_pkt_mode: 包限速还是字节限速 + ---car_type : plcr CAR层级,一级,二级或三级 + ---is_byte_rate_limit : 是否是字节限速,为后续包限速预留参数,这个值传参的时候暂时固定为1, + 后续可能会有一个全局变量进行指示,会提供对应的接口来获取这个入参值进行传递 + ---max_rate : 用户指定的最大限速值 + ---min_rate : 用户指定的最小承诺速率 + ---profile_cfg : 返回值,填充好的限速模板参数 +返回值:成功返回0,失败返回其它值 +*/ +static int zxdh_plcr_gen_profile(struct zxdh_pf_device *pf_dev, + E_RATE_LIMIT_PKT_BYTE is_pkt_mode, + E_PLCR_CAR_TYPE car_type, uint32_t max_rate, + uint32_t min_rate, + DPP_STAT_CAR_PROFILE_CFG_T *profile_cfg) +{ + int rtn = 0; + int pri = 0; + uint32_t cbs = 0; + uint32_t ebs = 0; + DPP_STAT_CAR_PKT_PROFILE_CFG_T *pkt_profile_cfg = + (DPP_STAT_CAR_PKT_PROFILE_CFG_T *)(profile_cfg); + + PLCR_FUNC_DBG_ENTER(); + + //入参检测 + if ((E_RATE_LIMIT_PACKET == is_pkt_mode) && + (E_PLCR_CAR_A != car_type)) { + PLCR_LOG_ERR( + "failed and only CAR A supports packet rate limit\n"); + rtn = EINVAL; + } + + /*2. 重新生成参数*/ + + /*填充限速模板的参数,准备将这些参数配置到寄存器*/ + memset(profile_cfg, 0, sizeof(*profile_cfg)); + if (E_RATE_LIMIT_BYTE == is_pkt_mode) { + if (USER_MAX_BYTE_RATE < max_rate) + PLCR_COMM_ASSERT(-EINVAL); + + profile_cfg->pkt_sign = E_RATE_LIMIT_BYTE; + profile_cfg->cf = 1; //溢出标志,默认使能 + + if (pf_dev->plcr_table.burst_size) { + cbs = pf_dev->plcr_table.burst_size; + ebs = pf_dev->plcr_table.burst_size; + } else { + cbs = DPP_CAR_MAX_CBS_VALUE; + ebs = DPP_CAR_MAX_EBS_VALUE; + } + + profile_cfg->cbs = cbs; + profile_cfg->ebs = ebs; + profile_cfg->random_disc_c = 0; + profile_cfg->random_disc_e = 0; + + if (E_PLCR_CAR_A == car_type) { + profile_cfg->cm = 0; //色盲模式 + profile_cfg->cd = 0; //0: srTCM,单速率;1:双速率 + profile_cfg->cir = + zxdh_plcr_user_maxrate_2_reg(max_rate); + profile_cfg->eir = 0; + } else if (E_PLCR_CAR_B == car_type) { + profile_cfg->cm = 1; //色敏模式 + profile_cfg->cd = 1; //0: srTCM,单速率;1:双速率 + profile_cfg->cir = + zxdh_plcr_user_maxrate_2_reg(min_rate); + profile_cfg->eir = + zxdh_plcr_user_maxrate_2_reg(max_rate); + } else if (E_PLCR_CAR_C == car_type) { + //todo:端口组限速,待调试确认 + profile_cfg->cm = 1; //色敏模式 + profile_cfg->cd = 0; //0: srTCM,单速率;1:双速率 + profile_cfg->cir = + zxdh_plcr_user_maxrate_2_reg(max_rate); + profile_cfg->eir = 0; + } + + for (pri = 0; pri < DPP_CAR_PRI_MAX; pri++) { + profile_cfg->c_pri[pri] = 0; + profile_cfg->e_green_pri[pri] = 0; + profile_cfg->e_yellow_pri[pri] = 0; + } + + PLCR_LOG_INFO( + "cir = 0x%x, eir = 0x%x, cbs = 0x%x, ebs = 0x%x\n", + profile_cfg->cir, profile_cfg->eir, profile_cfg->cbs, + profile_cfg->ebs); + } else { + //超过最大值就报错 + if (USER_MAX_PKT_RATE < max_rate) + PLCR_COMM_ASSERT(-EINVAL); + + if (pf_dev->plcr_table.burst_size) { + cbs = pf_dev->plcr_table.burst_size; + } else { + cbs = DPP_CAR_MAX_PKT_CBS_VALUE; + } + + pkt_profile_cfg->pkt_sign = E_RATE_LIMIT_PACKET; + pkt_profile_cfg->cbs = cbs; + pkt_profile_cfg->cir = max_rate; + + PLCR_LOG_INFO("pkt_type = 0x%x, cir = 0x%x, cbs = 0x%x\n", + pkt_profile_cfg->pkt_sign, pkt_profile_cfg->cir, + pkt_profile_cfg->cbs); + } + + return rtn; +} + +/* +函数功能:更新限速模板参数 +入参: + ---profile_cfg: 限速模板参数结构体 + ---Profile_id : 限速模板的profile_id +返回值:无 +*/ +static void zxdh_plcr_update_profile(DPP_STAT_CAR_PROFILE_CFG_T *profile_cfg, + u_int16_t profile_id) +{ + PLCR_FUNC_DBG_ENTER(); + + /*1. 重新生成参数*/ + profile_cfg->profile_id = profile_id; +} + +/* +函数功能:将限速模板参数,存储到profile下 +入参: + ---pf_dev : pf设备结构体 + ---car_type : plcr CAR层级,一级,二级或三级 + ---max_rate : 用户原始的限速速率,单位是Mbit/s + ---min_rate : 用户原始的限速速率,单位是Mbit/s + ---profile_cfg: 要存储到profile中的,限速模板参数 +返回值:成功返回0,失败返回其它值 +*/ +int zxdh_plcr_store_profile(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, uint32_t user_max_rate, + uint32_t user_min_rate, + DPP_STAT_CAR_PROFILE_CFG_T *profile_cfg) +{ + int rtn = 0; + uint16_t profile_id; + struct zxdh_plcr_profile *profile; + struct xarray *xarray_profile = + &(pf_dev->plcr_table.plcr_profiles[car_type]); + + PLCR_FUNC_DBG_ENTER(); + + /*1. 从限速模板结构体,获取profile_id*/ + profile_id = profile_cfg->profile_id; + + /*2. 从xarray获取profile*/ + profile = xa_load(xarray_profile, profile_id); + if (NULL == profile) { + PLCR_LOG_ERR( + "failed to specify an invalid profile, profile_id=%d\n", + profile_id); + return -EINVAL; + } + + /*3. 更新profile的参数*/ + profile->max_rate = user_max_rate; + profile->min_rate = user_min_rate; + + /*4. 将完整的限速模板参数,存储到profile结构体下*/ + memcpy(&profile->profile_cfg, profile_cfg, + sizeof(DPP_STAT_CAR_PROFILE_CFG_T)); + + return rtn; +} + +/* +函数功能:将限速模板参数,配置到plcr寄存器中去 +入参: + ---pf_dev : pf设备结构体 + ---car_type : plcr CAR层级,一级,二级或三级 + ---profile_cfg: 要配置个plcr寄存器的限速模板参数 +返回值:成功返回0,失败返回其它值 +*/ +int zxdh_plcr_cfg_profile(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, + DPP_STAT_CAR_PROFILE_CFG_T *profile_cfg) +{ + int rtn = 0; + uint16_t profile_id = 0; + uint32_t pkt_sign = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = pf_dev->slot_id; + pf_info.vport = pf_dev->vport; + + PLCR_FUNC_DBG_ENTER(); + + /*1. 根据限速模板参数,获取profile_id*/ + profile_id = profile_cfg->profile_id; + + /*2. 根据限速模板参数,获取包/字节模式*/ + pkt_sign = profile_cfg->pkt_sign; + + /*3. 将限速模板参数,配置到寄存器中去*/ + rtn = dpp_car_profile_cfg_set(&pf_info, (uint32_t)car_type, pkt_sign, + profile_id, profile_cfg); + if (rtn) { + PLCR_LOG_ERR( + "failed to configure the profile registers, car_type=%d,profile_id=%d\n", + car_type, profile_id); + return -EINVAL; + } + PLCR_LOG_INFO( + "dpp_car_profile_cfg_set: pf_info.vport = 0x%x, car_type = %d, profile_id = %d, pkt_sign = %d\n", + pf_info.vport, car_type, profile_id, pkt_sign); + + return rtn; +} + +/* +函数功能:获取寄存器中限速模板的参数 +入参: + ---pf_dev : pf设备结构体 + ---car_type : plcr CAR层级,一级,二级或三级 + ---profile_cfg: 要配置个plcr寄存器的限速模板参数 +返回值:成功返回0,失败返回其它值 +*/ +int zxdh_plcr_get_profile(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, uint32_t pkt_sign, + uint16_t profile_id, + DPP_STAT_CAR_PROFILE_CFG_T *profile_cfg) +{ + int rtn = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = pf_dev->slot_id; + pf_info.vport = pf_dev->vport; + + PLCR_FUNC_DBG_ENTER(); + + /*3. 将限速模板参数,配置到寄存器中去*/ + PLCR_LOG_INFO( + "dpp_car_profile_cfg_get: pf_info.vport = 0x%x, car_type = %d, profile_id = %d, pkt_sign = %d\n", + pf_info.vport, car_type, profile_id, pkt_sign); + rtn = dpp_car_profile_cfg_get(&pf_info, car_type, pkt_sign, profile_id, + profile_cfg); + if (rtn) { + PLCR_LOG_ERR( + "failed to call dpp_car_profile_cfg_get(), car_type=%d,profile_id=%d\n", + car_type, profile_id); + return -EINVAL; + } + + return rtn; +} + +/* +函数功能:配置指定CAR的限速模板:考虑plcr三级CAR能共享接口 +入参: + ---pf_dev : pf设备结构体 + ---car_type : plcr CAR层级,一级,二级或三级 + ---flowid : 指定CAR的flow编号 + ---profile_id : 申请到的指定CAR层级的profile资源的id +返回值:成功返回0,失败返回其它值 +*/ +static int zxdh_plcr_bind_flow_profile(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, + uint32_t flowid, uint16_t profile_id) +{ + int rtn = 0; + struct zxdh_plcr_flow *plcr_flow; + struct xarray *xarray_flow = &(pf_dev->plcr_table.plcr_flows[car_type]); + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = pf_dev->slot_id; + pf_info.vport = pf_dev->vport; + + PLCR_FUNC_DBG_ENTER(); + + /*1. 获取plcr*/ + plcr_flow = xa_load(xarray_flow, flowid); + if (NULL == plcr_flow) { + PLCR_LOG_ERR( + "failed to xa_load an invalid element,car_type=%d,flowid=%d,profile_id=%d\n", + car_type, flowid, profile_id); + return -EINVAL; + } + + /*2. 调用接口,将flow与profile进行绑定*/ + PLCR_LOG_INFO( + "dpp_car_queue_cfg_set: pf_info.vport = 0x%x, car_type = %d, flowid = %d, profile_id = %d\n", + pf_info.vport, car_type, flowid, profile_id); + rtn = dpp_car_queue_cfg_set(&pf_info, (uint32_t)car_type, flowid, + DROP_DISABLE, PLCR_ENABLE, profile_id); + if (rtn) { + PLCR_LOG_ERR( + "failed to call dpp_car_queue_cfg_set(),car_type=%d,flowid=%d,profile_id=%d\n", + car_type, flowid, profile_id); + return -EINVAL; + } + PLCR_LOG_INFO("Bind profile_%d to flow_%d complete\n", profile_id, + flowid); + + /*3. 将profile_id更新到plcr中*/ + plcr_flow->profile_id = profile_id; + + return rtn; +} + +/* +函数功能:解除flow和profile之间的绑定 & 删除xarray中的元素 & 释放flow指针 +入参: + ---pf_dev : pf设备结构体 + ---car_type : plcr CAR层级,一级,二级或三级 + ---flowid : 指定CAR的flow编号 + ---profile_id : 申请到的指定CAR层级的profile资源的id + ---flag : 热插拔标记 +返回值:成功返回0,失败返回其它值 +*/ +int zxdh_plcr_unbind_flow_profile(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, uint32_t flowid, + uint16_t profile_id, uint32_t flag) +{ + int rtn = 0; + struct zxdh_plcr_flow *plcr_flow; + struct xarray *xarray_flow = &(pf_dev->plcr_table.plcr_flows[car_type]); + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = pf_dev->slot_id; + pf_info.vport = pf_dev->vport; + + PLCR_FUNC_DBG_ENTER(); + + /*1. 检查flow是否与profile是否已绑定*/ + plcr_flow = xa_load(xarray_flow, flowid); + if (NULL == plcr_flow) { + PLCR_LOG_ERR( + "xa_load an invalid element, flowid=%d,profile_id=%d\n", + flowid, profile_id); + return -EINVAL; + } + if (profile_id != plcr_flow->profile_id) { + PLCR_LOG_ERR( + "xa_load an invalid element, profile_id=%d,plcr_flow->profile_id=%d\n", + profile_id, plcr_flow->profile_id); + return -EINVAL; + } + + /*调用接口,将flow与profile进行解除绑定:配置flow,将其不要指向profile*/ + PLCR_LOG_INFO( + "dpp_car_queue_cfg_set: pf_info.vport = 0x%x, car_type = %d, flowid = %d, profile_id = %d\n", + pf_info.vport, car_type, flowid, profile_id); + if (!flag) { + rtn = dpp_car_queue_cfg_set(&pf_info, (uint32_t)car_type, + flowid, DROP_DISABLE, PLCR_DISABLE, + profile_id); + if (rtn) { + PLCR_LOG_ERR( + "failed to call dpp_car_queue_cfg_set(),car_type=%d,flowid=%d,profile_id=%d\n", + car_type, flowid, profile_id); + return rtn; + } + } + + return rtn; +} + +int zxdh_plcr_count_up_profile(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, uint16_t profile_id) +{ + int rtn = 0; + struct zxdh_plcr_profile *plcr_profile; + struct xarray *xarray_profile = + &(pf_dev->plcr_table.plcr_profiles[car_type]); + + PLCR_FUNC_DBG_ENTER(); + + plcr_profile = xa_load(xarray_profile, profile_id); + if (NULL == plcr_profile) { + PLCR_LOG_ERR( + "failed to load element form xarray_profile, car_type=%d,profile_id=%d\n", + car_type, profile_id); + return -EINVAL; + } + + plcr_profile->ref_cnt++; + + return rtn; +} + +int zxdh_plcr_count_down_profile(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, uint16_t profile_id) +{ + int rtn = 0; + struct zxdh_plcr_profile *plcr_profile; + struct xarray *xarray_profile = + &(pf_dev->plcr_table.plcr_profiles[car_type]); + + PLCR_FUNC_DBG_ENTER(); + + plcr_profile = xa_load(xarray_profile, profile_id); + if (NULL == plcr_profile) { + PLCR_LOG_ERR( + "failed to load element form xarray_profile, car_type=%d,profile_id=%d\n", + car_type, profile_id); + return -EINVAL; + } + + //不能对计数为0的profile进行减操作 + if (0 == plcr_profile->ref_cnt) { + PLCR_LOG_ERR("failed and plcr_profile->ref_cnt=0\n"); + return -EINVAL; + } + + plcr_profile->ref_cnt--; + + return rtn; +} + +static int +zxdh_plcr_get_profile_by_flowid(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, uint32_t flowid, + struct zxdh_plcr_profile **pplcr_profile) +{ + int rtn = 0; + uint16_t profile_id = 0; + struct zxdh_plcr_profile *plcr_profile; + struct zxdh_plcr_flow *plcr_flow; + struct xarray *xarray_profile = + &(pf_dev->plcr_table.plcr_profiles[car_type]); + struct xarray *xarray_flow = &(pf_dev->plcr_table.plcr_flows[car_type]); + + PLCR_FUNC_DBG_ENTER(); + + plcr_flow = xa_load(xarray_flow, flowid); + if (NULL == plcr_flow) { + PLCR_LOG_ERR( + "failed to load element form xarray_flow, car_type=%d,flowid=%d\n", + car_type, flowid); + return -EINVAL; + } + profile_id = plcr_flow->profile_id; + + plcr_profile = xa_load(xarray_profile, profile_id); + if (NULL == plcr_profile) { + PLCR_LOG_ERR( + "failed to load element form xarray_profile,car_type=%d,profile_id=%d\n", + car_type, profile_id); + return -EINVAL; + } + + *pplcr_profile = plcr_profile; + + return rtn; +} + +/*******************************下面是新实现的代码*******************************/ +/* +函数功能:将car之间flowid的映射关系存储起来 + +入参: + --- +出参: + --- + +返回值:成功返回0,失败返回其它值 +*/ +int32_t zxdh_plcr_stroe_map(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, uint32_t flowid, + uint32_t map_flowid) +{ + int32_t rtn = 0; + + struct xarray *xarray_map = &(pf_dev->plcr_table.plcr_maps[car_type]); + if ((E_PLCR_CAR_A == car_type) || (E_PLCR_CAR_B == car_type)) { + xa_store(xarray_map, flowid, + (void *)(uintptr_t)(FLOWID_2_XARRAY(map_flowid)), + GFP_KERNEL); + } + + return rtn; +} + +int32_t zxdh_plcr_clear_map(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, uint32_t flowid) +{ + int32_t rtn = 0; + void *xarray_element; + + struct xarray *xarray_map = &(pf_dev->plcr_table.plcr_maps[car_type]); + if ((E_PLCR_CAR_A == car_type) || (E_PLCR_CAR_B == car_type)) { + xarray_element = xa_load(xarray_map, flowid); + if (NULL != xarray_element) { + xa_erase(xarray_map, flowid); + } + } + + return rtn; +} + +/* +函数功能:指定前一级的flowid,查询下一级映射的flowid + +入参: + --- +出参: + --- + +返回值:成功返回0,失败返回其它值 +*/ +int32_t zxdh_plcr_get_next_map(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, uint32_t flowid, + uint32_t *map_flowid) +{ + int32_t rtn = 0; + void *xarray_element; + + struct xarray *xarray_map = &(pf_dev->plcr_table.plcr_maps[car_type]); + if ((E_PLCR_CAR_A == car_type) || (E_PLCR_CAR_B == car_type)) { + xarray_element = xa_load(xarray_map, flowid); + if (NULL == xarray_element) { + rtn = -EINVAL; + } else { + *map_flowid = XARRAY_2_FLOWID( + (uint32_t)(uintptr_t)xarray_element); + } + } else { + rtn = -ERANGE; + } + + return rtn; +} + +/* +函数功能:检查指定的car_type所在的三级car flowid链是否全都没有限速,如果是就进行资源清理 +入参: + ---pf_dev : 设备结构体 + ---vport : 标识vf端口 +场景说明: + 1. 为什么要引入这个接口? + 新的方案引入了mode 0,mode 1,mode 2三种模式; + vport在三级car上有一条完整的flowid映射链; + 用户在解除某一级car指定flowid的限速之后,驱动程序需要检查这个链上是不是没有限速了,且car C是不是处于group 0,如果是这样的话就要切换到模式0; + 2. 切换到模式0的必要性 + 驱动程序中很多限速要需要先判断当前的限速模式; + 如果当前链上已经没有限速,且car C还是处于group 0,就必须切换回模式0,这也是从模式1和模式2切换回模式0的唯一途径 + 3. 在什么时候需要调用这个接口? + 用户在解除某一级car指定flowid的限速之后,需要调用这个接口。 + 4.流程: + 3.1 如果是对car C的group解除限速,说明group是非0的,函数直接返回(废弃,因为有可能是将vf端口从非0group移动到group0) + 3.2 根据vport得到car B的flowid + 3.3 根据car B的flowid,查询得到car c的flowid + 3.4.1 如果car C的flowid处于group 0,就进行模式切换 + 3.4.2 如果car C的flowid不处于group 0,就不进行模式切换,函数返回 + 5.plcr_maps是否需要清除? + 这个资源不需要清除:只有本函数会用到这个记录,且进行限速配置的时候会重新进行映射,且存储的成员不是动态分配的内存,没有必要清理资源; + +返回值:成功返回0,失败返回其它值 +*/ +int zxdh_plcr_check_release_flow_chain(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE e_car_type, + uint16_t vport) +{ + int rtn = 0; + E_PLCR_CAR_TYPE car_type; + uint32_t flag1 = 0; + uint32_t flag2 = 0; + unsigned long flow_index; + uint32_t flowid_car_B; + uint32_t flowid_car_C; + struct zxdh_plcr_flow *flow; + struct xarray *xarray_flow; + + PLCR_FUNC_DBG_ENTER(); + + //如果是解除端口组的字节限速:不需要模式切换,因为操作端口组使用的是pf的vport + if (E_PLCR_CAR_C == e_car_type) { + PLCR_LOG_INFO( + "It is not necessary to change mode for vf group limit!\n"); + return rtn; + } + + //统计car A和car B上是否有限速 + for (car_type = E_PLCR_CAR_A, flag1 = 0; car_type < E_PLCR_CAR_C; + car_type++) { + xarray_flow = &(pf_dev->plcr_table.plcr_flows[car_type]); + + xa_for_each_range(xarray_flow, flow_index, flow, 0, + gaudPlcrCarxFlowIdNum[car_type]) { + if (vport == flow->vport) { + flag1 = 1; + break; + } + } + if (1 == flag1) { + PLCR_LOG_INFO("flow->flowid = 0x%x, vport = 0x%x\n", + flow->flowid, vport); + break; + } + } + + //检查是否处于group 0 + if (0 == flag1) { + //如果是vf,就要检查group是否为0 + if (VF_ACTIVE(vport)) { + flowid_car_B = VQM_VFID(vport) * 2; + rtn = zxdh_plcr_get_next_map(pf_dev, E_PLCR_CAR_B, + flowid_car_B, + &flowid_car_C); + PLCR_COMM_ASSERT(rtn); + PLCR_LOG_INFO("flowid_car_B = 0x%x\n", flowid_car_B); + PLCR_LOG_INFO("flowid_car_C = 0x%x\n", flowid_car_C); + + //4个EP * 8PF * 2收发方向,每个pf占32个car c flowid,前面2个映射到group 0 + if (0 == (flowid_car_C % (PLCR_CAR_C_FLOWIDS_PER_PF))) { + flag2 = 1; + } + } else { + //如果是pf(队列限速),就没有car B和Car C + flag2 = 1; + } + } + + if (0 != flag2) { + //执行清理操作 + PLCR_LOG_INFO( + "Change to mode0: e_car_type = 0x%x, vport = 0x%x, \n", + e_car_type, vport); + + //1.清理级间映射 + // for (index=0; indexplcr_table.plcr_flows[car_type]); + struct zxdh_plcr_flow *plcr_flow = xa_load(xarray_flowid, flowid); + struct zxdh_plcr_profile *profile_old = NULL; + + PLCR_FUNC_DBG_ENTER(); + + /*1. 如果二次修改的限速值和原来的一样,这个配置操作无意义,直接结束*/ + if ((E_PLCR_CAR_A == car_type) || (E_PLCR_CAR_C == car_type)) { + if (plcr_flow->max_rate == max_rate) { + PLCR_LOG_INFO("duplicate max_rate=%d on flowid=%d\n", + max_rate, flowid); + return PLCR_DUPLICATE_RATE; + } + } else if (E_PLCR_CAR_B == car_type) { + if ((plcr_flow->max_rate == max_rate) && + (plcr_flow->min_rate == min_rate)) { + PLCR_LOG_INFO( + "duplicate max_rate=%d, min_rate=%d on flowid=%d\n", + max_rate, min_rate, flowid); + return PLCR_DUPLICATE_RATE; + } + } else { + return -EINVAL; + } + + /*2. 根据新的限速值,生成限速模板参数*/ + rtn = zxdh_plcr_gen_profile(pf_dev, is_pkt_mode, car_type, max_rate, + min_rate, &profile_cfg); + PLCR_COMM_ASSERT(rtn); + + /*3. 获取原来关联的限速模板*/ + rtn = zxdh_plcr_get_profile_by_flowid(pf_dev, car_type, flowid, + &profile_old); + PLCR_COMM_ASSERT(rtn); + + /*3. 先查询有没有相同速率的限速模板*/ + rtn = zxdh_plcr_match_profile(pf_dev, car_type, &profile_cfg, + &profile_id); + if (rtn) { + /*3.1 没有找到能共享的限速模板*/ + + /*3.2 原来的模板不是共享模板:直接修改限速模板的限速值*/ + if (1 == profile_old->ref_cnt) { + /*3.2.1 将profile_id更新到限速模板参数中去*/ + zxdh_plcr_update_profile(&profile_cfg, + profile_old->profile_id); + + /*3.2.2 将限速模板的参数,配置到寄存器中去*/ + rtn = zxdh_plcr_cfg_profile(pf_dev, car_type, + &profile_cfg); + PLCR_COMM_ASSERT(rtn); + + /*3.2.3 将限速模板配置参数,保存到zxdh_plcr_profile结构体*/ + rtn = zxdh_plcr_store_profile(pf_dev, car_type, + max_rate, min_rate, + &profile_cfg); + PLCR_COMM_ASSERT(rtn); + + /* 更新flow中记录的用户原始限速值,todo:是否使用bind函数,要考虑后期在哪里加锁*/ + zxdh_plcr_update_flow(plcr_flow, plcr_flow->vport, + max_rate, min_rate); + + /*这种情况只修改限速模板的寄存器,flowid先前已经与profile绑定了*/ + return rtn; + } + + /*3.3 原来的限速模板是共享模板,所以要申请新的限速模板*/ + rtn = zxdh_plcr_req_profile(pf_dev, car_type, &profile_id); + PLCR_COMM_ASSERT(rtn); + + /*3.4 将profile_id更新到限速模板参数中去*/ + zxdh_plcr_update_profile(&profile_cfg, profile_id); + + /*3.5 将限速模板的参数,配置到寄存器中去*/ + rtn = zxdh_plcr_cfg_profile(pf_dev, car_type, &profile_cfg); + if (rtn) { + PLCR_LOG_ERR( + "failed to call zxdh_plcr_cfg_profile()\n"); + goto err4; + } + + /*3.6 将限速模板配置参数,保存到zxdh_plcr_profile结构体*/ + rtn = zxdh_plcr_store_profile(pf_dev, car_type, max_rate, + min_rate, &profile_cfg); + if (rtn) { + PLCR_LOG_ERR( + "failed to call zxdh_plcr_store_profile()\n"); + goto err4; + } + + /*3.7 接下来的绑定流程,和下面是共享的*/ + } + /*4. 查询到共享的限速模板:直接进行绑定即可*/ + rtn = zxdh_plcr_bind_flow_profile(pf_dev, car_type, flowid, profile_id); + if (rtn) { + PLCR_LOG_ERR("failed to call zxdh_plcr_bind_flow_profile()\n"); + goto err4; + } + + /*5. 更新flow中记录的用户原始限速值*/ + zxdh_plcr_update_flow(plcr_flow, plcr_flow->vport, max_rate, min_rate); + + /*6. 新模板使用计数+1*/ + rtn = zxdh_plcr_count_up_profile(pf_dev, car_type, profile_id); + if (rtn) { + PLCR_LOG_ERR("failed to call zxdh_plcr_count_up_profile()\n"); + goto err4; + } + + /*7. 旧的计数模板-1*/ + rtn = zxdh_plcr_count_down_profile(pf_dev, car_type, + profile_old->profile_id); + if (rtn) { + PLCR_LOG_ERR("failed to call zxdh_plcr_count_up_profile()\n"); + goto err4; + } + + zxdh_plcr_release_profile(pf_dev, car_type, profile_old->profile_id, 0); + + return rtn; + +err4: + zxdh_plcr_release_profile(pf_dev, car_type, profile_id, 0); + return rtn; +} + +int zxdh_plcr_remove_rate_limit(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, uint32_t flowid, + uint32_t flag) +{ + int rtn = 0; + struct zxdh_plcr_profile *profile_old = NULL; + + PLCR_FUNC_DBG_ENTER(); + + PLCR_LOG_INFO("car_type=%d,flowid=%d\n", car_type, flowid); + + /* 获取原来关联的限速模板*/ + rtn = zxdh_plcr_get_profile_by_flowid(pf_dev, car_type, flowid, + &profile_old); + PLCR_COMM_ASSERT(rtn); + + /*解除绑定*/ + rtn = zxdh_plcr_unbind_flow_profile(pf_dev, car_type, flowid, + profile_old->profile_id, flag); + //PLCR_COMM_ASSERT(rtn); + + /*限速模板引用计数 -1*/ + rtn = zxdh_plcr_count_down_profile(pf_dev, car_type, + profile_old->profile_id); + //PLCR_COMM_ASSERT(rtn); + + /*释放限速模板:如果引用计数为0,归还模板资源 & 删除xarray元素 & 释放profile指针*/ + rtn = zxdh_plcr_release_profile(pf_dev, car_type, + profile_old->profile_id, flag); + //PLCR_COMM_ASSERT(rtn); + + /*释放掉flow*/ + rtn = zxdh_plcr_release_flow(pf_dev, car_type, flowid); + //PLCR_COMM_ASSERT(rtn); + + return rtn; +} + +void zxdh_plcr_count_profiles(struct zxdh_pf_device *pf_dev) +{ + struct zxdh_plcr_profile *profile; + unsigned long index; + uint32_t count = 0; + E_PLCR_CAR_TYPE car_type; + + PLCR_FUNC_DBG_ENTER(); + + for (car_type = E_PLCR_CAR_A; car_type < E_PLCR_CAR_NUM; car_type++) { + count = 0; + xa_for_each_range(&(pf_dev->plcr_table.plcr_profiles[car_type]), + index, profile, 0, + gaudPlcrCarxProfileNum[car_type]) { + count++; + } + PLCR_LOG_INFO("car_type = %d, profiles_num = %d\n", car_type, + count); + } +} + +int zxdh_plcr_set_rate_limit(struct zxdh_pf_device *pf_dev, + E_RATE_LIMIT_PKT_BYTE is_pkt_mode, + E_PLCR_CAR_TYPE car_type, uint16_t vport, + uint32_t flowid, uint32_t max_rate, + uint32_t min_rate) +{ + int rtn = 0; + struct xarray *xarray_flow = &(pf_dev->plcr_table.plcr_flows[car_type]); + struct zxdh_plcr_flow *flow = NULL; + + PLCR_FUNC_DBG_ENTER(); + + /*1. 判断该队列先前是否已经配置过限速值*/ + flow = xa_load(xarray_flow, flowid); + if (NULL == flow) { + /*1.1 初次配置限速值*/ + rtn = zxdh_plcr_create_rate_limit(pf_dev, is_pkt_mode, car_type, + vport, flowid, max_rate, + min_rate); + PLCR_COMM_ASSERT(rtn); + } else if ((max_rate != 0) || (min_rate != 0)) { + /*1.2 修改限速值*/ + rtn = zxdh_plcr_modify_rate_limit(pf_dev, is_pkt_mode, car_type, + flowid, max_rate, min_rate); + PLCR_COMM_ASSERT(rtn); + } else { + /*1.3.1 解除限速:即,第二次配置,且max_rate=0,表示用户要解除限速*/ + rtn = zxdh_plcr_remove_rate_limit(pf_dev, car_type, flowid, 0); + PLCR_COMM_ASSERT(rtn); + + zxdh_plcr_check_release_flow_chain(pf_dev, car_type, vport); + + //不是错误码,用来标记是进行了解除限速的操作 + rtn = PLCR_REMOVE_RATE_LIMIT; + } + + zxdh_plcr_count_profiles(pf_dev); + + return rtn; +} + +int zxdh_pf_plcr_set_mode(struct zxdh_pf_device *pf_dev, uint16_t vport, + E_RATE_LIMIT_MODE mode) +{ + int rtn = 0; + uint32_t enable = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = pf_dev->slot_id; + pf_info.vport = vport; + PLCR_LOG_INFO("slot: %d, vport: 0x%x, mode: 0x%x\n", pf_info.slot, + pf_info.vport, mode); + + //Check if the vport attribute table exists. + rtn = dpp_vport_egress_meter_en_get(&pf_info, &enable); + if (ZXIC_PAR_CHK_INVALID_INDEX == rtn) { + PLCR_LOG_INFO( + "Write vport attribute table which does not exist!\n"); + return 0; + } else { + PLCR_COMM_ASSERT(rtn); + } + + //modify the vport attribute table + if (E_RATE_LIMIT_MODE0 == mode) { + rtn = dpp_vport_egress_meter_en_set(&pf_info, 0); + PLCR_COMM_ASSERT(rtn); + rtn = dpp_vport_ingress_meter_en_set(&pf_info, 0); + PLCR_COMM_ASSERT(rtn); + } else if (E_RATE_LIMIT_MODE1 == mode) { + rtn = dpp_vport_egress_meter_en_set(&pf_info, 0); + PLCR_COMM_ASSERT(rtn); + rtn = dpp_vport_ingress_meter_en_set(&pf_info, 0); + PLCR_COMM_ASSERT(rtn); + + rtn = dpp_vport_egress_meter_mode_set(&pf_info, 1); + PLCR_COMM_ASSERT(rtn); + rtn = dpp_vport_ingress_meter_mode_set(&pf_info, 1); + PLCR_COMM_ASSERT(rtn); + + rtn = dpp_vport_egress_meter_en_set(&pf_info, 1); + PLCR_COMM_ASSERT(rtn); + rtn = dpp_vport_ingress_meter_en_set(&pf_info, 1); + PLCR_COMM_ASSERT(rtn); + } else if (E_RATE_LIMIT_MODE2 == mode) { + rtn = dpp_vport_egress_meter_en_set(&pf_info, 0); + PLCR_COMM_ASSERT(rtn); + rtn = dpp_vport_ingress_meter_en_set(&pf_info, 0); + PLCR_COMM_ASSERT(rtn); + + rtn = dpp_vport_egress_meter_mode_set(&pf_info, 0); + PLCR_COMM_ASSERT(rtn); + rtn = dpp_vport_ingress_meter_mode_set(&pf_info, 0); + PLCR_COMM_ASSERT(rtn); + + rtn = dpp_vport_egress_meter_en_set(&pf_info, 1); + PLCR_COMM_ASSERT(rtn); + rtn = dpp_vport_ingress_meter_en_set(&pf_info, 1); + PLCR_COMM_ASSERT(rtn); + } else { + return -ERANGE; + } + + return rtn; +} + +int zxdh_pf_plcr_get_mode(struct zxdh_pf_device *pf_dev, uint16_t vport, + E_RATE_LIMIT_MODE *p_mode) +{ + int rtn = 0; + uint32_t enable = 0; + uint32_t mode = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + PLCR_FUNC_DBG_ENTER(); + + pf_info.slot = pf_dev->slot_id; + pf_info.vport = vport; + PLCR_LOG_INFO("pf_info.slot = %d, pf_info.vport = 0x%x\n", pf_info.slot, + pf_info.vport); + + rtn = dpp_vport_egress_meter_en_get(&pf_info, &enable); + if (ZXIC_PAR_CHK_INVALID_INDEX == rtn) { + PLCR_LOG_INFO( + "Read vport attribute table which does not exist!\n"); + *p_mode = E_RATE_LIMIT_MODE3; + return 0; + } else { + PLCR_COMM_ASSERT(rtn); + } + + if (0 == enable) { + *p_mode = E_RATE_LIMIT_MODE0; + } else { + rtn = dpp_vport_egress_meter_mode_get(&pf_info, &mode); + PLCR_COMM_ASSERT(rtn); + + if (1 == mode) { + *p_mode = E_RATE_LIMIT_MODE1; + } else { + *p_mode = E_RATE_LIMIT_MODE2; + } + } + + PLCR_LOG_INFO("mode = %d\n", *p_mode); + + return rtn; +} + +int zxdh_plcr_set_mode(struct zxdh_pf_device *pf_dev, uint16_t vport, + E_RATE_LIMIT_MODE mode) +{ + int32_t rtn = 0; + struct dh_core_dev *dh_dev = + container_of((void *)pf_dev, struct dh_core_dev, priv); + union zxdh_msg *msg = NULL; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + if (dh_dev->coredev_type == DH_COREDEV_PF) { + rtn = zxdh_pf_plcr_set_mode(pf_dev, vport, mode); + PLCR_COMM_ASSERT(rtn); + } else { + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + PLCR_LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + msg->payload.hdr.op_code = ZXDH_PLCR_SET_MODE; + msg->payload.hdr.vport = pf_dev->vport; + msg->payload.hdr.pcie_id = pf_dev->pcie_id; + msg->payload.hdr.vf_id = pf_dev->pcie_id & (0xff); + + msg->payload.plcr_work_mode_msg.vport = vport; + msg->payload.plcr_work_mode_msg.mode = mode; + + rtn = zxdh_pf_msg_send_cmd(dh_dev, MODULE_VF_BAR_MSG_TO_PF, msg, + msg, ¶); + kfree(msg); + PLCR_COMM_ASSERT(rtn); + } + + return rtn; +} +EXPORT_SYMBOL(zxdh_plcr_set_mode); + +int zxdh_plcr_get_mode(struct zxdh_pf_device *pf_dev, uint16_t vport, + E_RATE_LIMIT_MODE *mode) +{ + int32_t rtn = 0; + struct dh_core_dev *dh_dev = + container_of((void *)pf_dev, struct dh_core_dev, priv); + union zxdh_msg *msg = NULL; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + if (dh_dev->coredev_type == DH_COREDEV_PF) { + rtn = zxdh_pf_plcr_get_mode(pf_dev, vport, mode); + PLCR_COMM_ASSERT(rtn); + } else { + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + PLCR_LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + msg->payload.hdr.op_code = ZXDH_PLCR_GET_MODE; + msg->payload.hdr.vport = pf_dev->vport; + msg->payload.hdr.pcie_id = pf_dev->pcie_id; + msg->payload.hdr.vf_id = pf_dev->pcie_id & (0xff); + + msg->payload.plcr_work_mode_msg.vport = vport; + PLCR_LOG_INFO("msg->payload.hdr.vf_id %u\n", + msg->payload.hdr.vf_id); + rtn = zxdh_pf_msg_send_cmd(dh_dev, MODULE_VF_BAR_MSG_TO_PF, msg, + msg, ¶); + if (rtn) { + PLCR_LOG_ERR("failed and rtn=%d\n", rtn); + kfree(msg); + return rtn; + } + + *mode = msg->reps.plcr_work_mode_rsp.mode; + kfree(msg); + + PLCR_LOG_INFO("mode = %d\n", *mode); + } + + return rtn; +} + +int zxdh_plcr_show_rate_limit_paras(zxdh_plcr_rate_limit_paras *rate_limit_paras) +{ + PLCR_LOG_INFO("rate_limit_paras->req_type = 0x%x\n", + rate_limit_paras->req_type); + PLCR_LOG_INFO("rate_limit_paras->direction = 0x%x\n", + rate_limit_paras->direction); + PLCR_LOG_INFO("rate_limit_paras->mode = 0x%x\n", + rate_limit_paras->mode); + PLCR_LOG_INFO("rate_limit_paras->max_rate = 0x%x\n", + rate_limit_paras->max_rate); + PLCR_LOG_INFO("rate_limit_paras->min_rate = 0x%x\n", + rate_limit_paras->min_rate); + PLCR_LOG_INFO("rate_limit_paras->queue_id = 0x%x\n", + rate_limit_paras->queue_id); + PLCR_LOG_INFO("rate_limit_paras->vf_idx = 0x%x\n", + rate_limit_paras->vf_idx); + PLCR_LOG_INFO("rate_limit_paras->vfid = 0x%x\n", + rate_limit_paras->vfid); + PLCR_LOG_INFO("rate_limit_paras->vport = 0x%x\n", + rate_limit_paras->vport); + PLCR_LOG_INFO("rate_limit_paras->group_id = 0x%x\n", + rate_limit_paras->group_id); + + return 0; +} + +int zxdh_plcr_check_req_type(struct zxdh_pf_device *pf_dev, + E_RATE_LIMIT_MODE mode, + zxdh_plcr_rate_limit_paras *rate_limit_paras, + E_RATE_LIMIT_REQ_TYPE *req_type) +{ + PLCR_FUNC_DBG_ENTER(); + + //队列字节限速 + if ((rate_limit_paras->req_type == E_RATE_LIMIT_REQ_QUEUE_BYTE) && + (rate_limit_paras->mode == E_RATE_LIMIT_BYTE) && + ((rate_limit_paras->min_rate != PLCR_INVALID_PARAM) || + (rate_limit_paras->max_rate != PLCR_INVALID_PARAM)) && + (rate_limit_paras->direction == E_RATE_LIMIT_TX) && + (rate_limit_paras->vf_idx == PLCR_INVALID_PARAM) && + (rate_limit_paras->group_id == PLCR_INVALID_PARAM) && + (rate_limit_paras->queue_id < PLCR_MAX_QUEUE_PAIRS)) { + *req_type = E_RATE_LIMIT_REQ_QUEUE_BYTE; + + //mode2模式下不支持队列字节限速 + if (E_RATE_LIMIT_MODE2 == mode) { + PLCR_LOG_ERR( + "E_RATE_LIMIT_REQ_QUEUE_BYTE is not supported under E_RATE_LIMIT_MODE2\n"); + return -EPERM; + } + + //端口组默认为0 + rate_limit_paras->group_id = 0; + + return 0; + } + //vf端口字节限速 + else if ((rate_limit_paras->req_type == E_RATE_LIMIT_REQ_VF_BYTE) && + (rate_limit_paras->mode == E_RATE_LIMIT_BYTE) && + ((rate_limit_paras->min_rate != PLCR_INVALID_PARAM) || + (rate_limit_paras->max_rate != PLCR_INVALID_PARAM)) && + ((rate_limit_paras->direction == E_RATE_LIMIT_RX) || + (rate_limit_paras->direction == E_RATE_LIMIT_TX)) && + (rate_limit_paras->group_id == PLCR_INVALID_PARAM) && + (rate_limit_paras->vf_idx != PLCR_INVALID_PARAM)) { + //端口组默认为0: + rate_limit_paras->group_id = 0; + + *req_type = E_RATE_LIMIT_REQ_VF_BYTE; + return 0; + } + //vf端口包限速 + else if ((rate_limit_paras->req_type == E_RATE_LIMIT_REQ_VF_PKT) && + (rate_limit_paras->mode == E_RATE_LIMIT_PACKET) && + ((rate_limit_paras->min_rate != PLCR_INVALID_PARAM) || + (rate_limit_paras->max_rate != PLCR_INVALID_PARAM)) && + ((rate_limit_paras->direction == E_RATE_LIMIT_RX) || + (rate_limit_paras->direction == E_RATE_LIMIT_TX)) && + (rate_limit_paras->group_id == PLCR_INVALID_PARAM) && + (rate_limit_paras->vf_idx != PLCR_INVALID_PARAM)) { + *req_type = E_RATE_LIMIT_REQ_VF_PKT; + //mode1模式下不支持端口包限速 + if (E_RATE_LIMIT_MODE1 == mode) { + PLCR_LOG_ERR( + "E_RATE_LIMIT_REQ_VF_PKT is not supported under E_RATE_LIMIT_MODE1\n"); + return -EPERM; + } + + //端口组默认为0 + rate_limit_paras->group_id = 0; + + return 0; + } + //端口组字节限速 + else if ((rate_limit_paras->req_type == + E_RATE_LIMIT_REQ_VF_GROUP_BYTE) && + (rate_limit_paras->mode == E_RATE_LIMIT_BYTE) && + ((rate_limit_paras->min_rate != PLCR_INVALID_PARAM) || + (rate_limit_paras->max_rate != PLCR_INVALID_PARAM)) && + ((rate_limit_paras->direction == E_RATE_LIMIT_RX) || + (rate_limit_paras->direction == E_RATE_LIMIT_TX)) && + (rate_limit_paras->vf_idx == + PLCR_INVALID_PARAM) && //不需要flowid级间映射,不需要vf_idx,不需要Vfid + (rate_limit_paras->group_id != PLCR_INVALID_PARAM)) { + *req_type = E_RATE_LIMIT_REQ_VF_GROUP_BYTE; + return 0; + } + //移动vf端口组 + else if ((rate_limit_paras->req_type == + E_RATE_LIMIT_REQ_MOVE_VF_GROUP) && + (rate_limit_paras->mode == PLCR_INVALID_PARAM) && + ((rate_limit_paras->min_rate == PLCR_INVALID_PARAM) && + (rate_limit_paras->max_rate == PLCR_INVALID_PARAM)) && + ((rate_limit_paras->direction == E_RATE_LIMIT_RX) || + (rate_limit_paras->direction == E_RATE_LIMIT_TX)) && + (rate_limit_paras->vf_idx != PLCR_INVALID_PARAM) && + (rate_limit_paras->group_id != PLCR_INVALID_PARAM)) { + *req_type = E_RATE_LIMIT_REQ_MOVE_VF_GROUP; + return 0; + } else { + //将入参中的请求信息打印出来 + zxdh_plcr_show_rate_limit_paras(rate_limit_paras); + + return PLCR_GET_REQ_TYPE_INVALID_ERR; + } +} + +int32_t zxdh_pf_get_vf_queue_info(struct zxdh_pf_device *pf_dev, int32_t vf_idx, + int32_t *phy_queue_num, int32_t *phy_rx_queue, + int32_t *phy_tx_queue) +{ + int32_t rtn = 0; + int32_t i; + union zxdh_msg *msg = NULL; + int32_t queue_pair_index; + int32_t queue_num; + int32_t queue_pair = 0; + struct dh_core_dev *dh_dev = + container_of((void *)pf_dev, struct dh_core_dev, priv); + struct zxdh_en_device *en_dev; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + PLCR_FUNC_DBG_ENTER(); + + en_dev = pf_dev_get_edev(pf_dev); + if (IS_ERR(en_dev)) + return PTR_ERR(en_dev); + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + PLCR_LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + msg->payload.hdr_vf.op_code = ZXDH_PF_GET_VF_QUEUE_INFO; + msg->payload.hdr_vf.dst_pcie_id = + FIND_VF_PCIE_ID(pf_dev->pcie_id, vf_idx); + + for (queue_pair_index = 0; queue_pair_index < PLCR_MAX_QUEUE_PAIRS;) { + msg->payload.plcr_pf_get_vf_queue_info_msg.vir_queue_start = + queue_pair_index; + msg->payload.plcr_pf_get_vf_queue_info_msg.vir_queue_num = 16; + + //get rx&tx phy queue + PLCR_LOG_INFO("vir_queue_start = 0x%x\n", + msg->payload.plcr_pf_get_vf_queue_info_msg + .vir_queue_start); + PLCR_LOG_INFO("vir_queue_num = 0x%x\n", + msg->payload.plcr_pf_get_vf_queue_info_msg + .vir_queue_num); + rtn = zxdh_pf_msg_send_cmd(dh_dev, MODULE_PF_BAR_MSG_TO_VF, msg, + msg, ¶); + if (rtn) { + PLCR_LOG_ERR("failed and rtn=%d\n", rtn); + goto free_msg; + } + + queue_num = + msg->reps.plcr_pf_get_vf_queue_info_rsp.phy_queue_num; + for (i = 0; i < queue_num; i++) { + phy_rx_queue[queue_pair_index * 16 + i] = + msg->reps.plcr_pf_get_vf_queue_info_rsp + .phy_rxq[i]; + phy_tx_queue[queue_pair_index * 16 + i] = + msg->reps.plcr_pf_get_vf_queue_info_rsp + .phy_txq[i]; + + PLCR_LOG_INFO("rxq: 0x%x - 0x%x\n", + queue_pair_index * 16 + i, + phy_rx_queue[queue_pair_index * 16 + i]); + PLCR_LOG_INFO("txq: 0x%x - 0x%x\n", + queue_pair_index * 16 + i, + phy_tx_queue[queue_pair_index * 16 + i]); + } + + queue_pair += queue_num; + + if (queue_num < 16) { + *phy_queue_num = queue_pair; + PLCR_LOG_INFO("phy_queue_num = 0x%x\n", *phy_queue_num); + goto free_msg; + } else { + queue_pair_index += 16; + } + } + +free_msg: + kfree(msg); + return rtn; +} + +int zxdh_plcr_map_flowid(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, uint32_t flowid, + uint32_t map_flowid) +{ + int rtn = 0; + uint32_t map_sp = 0; //priority + union zxdh_msg *msg = NULL; + DPP_PF_INFO_T pf_info = { 0 }; + struct dh_core_dev *dh_dev = + container_of((void *)pf_dev, struct dh_core_dev, priv); + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + PLCR_FUNC_DBG_ENTER(); + PLCR_LOG_INFO("car_type = 0x%x, flowid = 0x%x, map_flowid = 0x%x\n", + car_type, flowid, map_flowid); + + if (dh_dev->coredev_type == DH_COREDEV_PF) { + //前面的流程会判断是否需要进行映射,不会出现vf端口原来是非0group,现在会被group 0覆盖的情况 + PLCR_LOG_INFO("flowid=0x%x, map_flowid=0x%x\n", flowid, + map_flowid); + pf_info.slot = pf_dev->slot_id; + pf_info.vport = pf_dev->vport; + PLCR_LOG_INFO( + "dpp_car_queue_map_set: pf_info.vport = 0x%x, car_type = %d, flowid = %d, map_flowid = %d\n", + pf_info.vport, car_type, flowid, map_flowid); + rtn = dpp_car_queue_map_set(&pf_info, car_type, flowid, + map_flowid, map_sp); + PLCR_COMM_ASSERT(rtn); + + rtn = zxdh_plcr_stroe_map(pf_dev, car_type, flowid, map_flowid); + } else { + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + PLCR_LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + msg->payload.hdr.op_code = ZXDH_MAP_PLCR_FLOWID; + msg->payload.hdr.vport = pf_dev->vport; + msg->payload.hdr.pcie_id = pf_dev->pcie_id; + msg->payload.hdr.vf_id = pf_dev->pcie_id & (0xff); + + msg->payload.plcr_flowid_map_msg.car_type = car_type; + msg->payload.plcr_flowid_map_msg.flowid = flowid; + msg->payload.plcr_flowid_map_msg.map_flowid = map_flowid; + msg->payload.plcr_flowid_map_msg.sp = map_sp; + PLCR_LOG_INFO("flowid=0x%x, map_flowid=0x%x\n", flowid, + map_flowid); + + rtn = zxdh_pf_msg_send_cmd(dh_dev, MODULE_VF_BAR_MSG_TO_PF, msg, + msg, ¶); + kfree(msg); + PLCR_COMM_ASSERT(rtn); + } + + return rtn; +} + +int zxdh_plcr_mode_init(struct zxdh_pf_device *pf_dev) +{ + int32_t rtn = 0; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = pf_dev->slot_id; + pf_info.vport = pf_dev->vport; + + rtn = zxdh_plcr_set_mode(pf_dev, pf_dev->vport, E_RATE_LIMIT_MODE0); + PLCR_COMM_ASSERT(rtn); + + return rtn; +} +int32_t zxdh_plcr_recover_cfg(struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev, int32_t vf_idx) +{ + int32_t rtn = 0; + zxdh_plcr_rate_limit_paras rate_limit_paras; + + PLCR_LOG_INFO("zxdh_plcr_recover_cfg zxdh_vf_item %p\n", vf_item); + if (vf_item == NULL) { + PLCR_LOG_INFO("plcr init vfid %u vfitem null\n", vf_idx); + return 0; + } + PLCR_LOG_INFO("zxdh_plcr_recover_cfg vfid %u maxrate %u\n", vf_idx, + vf_item->max_tx_rate); + if (vf_item->max_tx_rate != 0 || vf_item->min_tx_rate != 0) { + rate_limit_paras.req_type = E_RATE_LIMIT_REQ_VF_BYTE; + rate_limit_paras.direction = E_RATE_LIMIT_TX; + rate_limit_paras.mode = E_RATE_LIMIT_BYTE; + rate_limit_paras.max_rate = vf_item->max_tx_rate; + rate_limit_paras.min_rate = vf_item->min_tx_rate; + rate_limit_paras.queue_id = PLCR_INVALID_PARAM; + rate_limit_paras.vf_idx = vf_idx; + rate_limit_paras.vfid = PLCR_INVALID_PARAM; + rate_limit_paras.group_id = PLCR_INVALID_PARAM; + + rtn = zxdh_plcr_unified_set_rate_limit(pf_dev, + &rate_limit_paras); + } + + return rtn; +} +int32_t zxdh_plcr_init(struct zxdh_en_priv *en_priv) +{ + int32_t rtn = 0; + struct zxdh_en_device *en_dev = &en_priv->edev; + struct dh_core_dev *dh_dev = en_dev->parent->parent; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + if (dh_dev->coredev_type == DH_COREDEV_PF) { + xa_init(&pf_dev->plcr_table.plcr_profiles[E_PLCR_CAR_A]); + xa_init(&pf_dev->plcr_table.plcr_flows[E_PLCR_CAR_A]); + xa_init(&pf_dev->plcr_table.plcr_maps[E_PLCR_CAR_A]); + + xa_init(&pf_dev->plcr_table.plcr_profiles[E_PLCR_CAR_B]); + xa_init(&pf_dev->plcr_table.plcr_flows[E_PLCR_CAR_B]); + xa_init(&pf_dev->plcr_table.plcr_maps[E_PLCR_CAR_B]); + + xa_init(&pf_dev->plcr_table.plcr_profiles[E_PLCR_CAR_C]); + xa_init(&pf_dev->plcr_table.plcr_flows[E_PLCR_CAR_C]); + + pf_dev->plcr_table.burst_size = 0; + } + + //vf需要设置到mode0模式 + if (dh_dev->coredev_type != DH_COREDEV_VF) { + rtn = zxdh_plcr_mode_init(pf_dev); + PLCR_COMM_ASSERT(rtn); + } + pf_dev->plcr_table.is_init = true; + + return rtn; +} +EXPORT_SYMBOL(zxdh_plcr_init); + +/*释放PF/VF*/ +int32_t zxdh_plcr_uninit(struct zxdh_en_priv *en_priv) +{ + int rtn = 0; + union zxdh_msg *msg = NULL; + struct zxdh_en_device *en_dev = &en_priv->edev; + struct dh_core_dev *dh_dev = en_dev->parent->parent; + struct zxdh_pf_device *pf_dev = dh_core_priv(dh_dev); + + unsigned long flow_id; + E_PLCR_CAR_TYPE car_index; + struct xarray *xarray_flow; + struct xarray *xarray_profile; + struct zxdh_plcr_flow *flow = NULL; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + PLCR_FUNC_DBG_ENTER(); + + if (dh_dev->coredev_type == DH_COREDEV_PF) { + for (car_index = E_PLCR_CAR_A; car_index <= E_PLCR_CAR_C; + car_index++) { + xarray_flow = + &(pf_dev->plcr_table.plcr_flows[car_index]); + xarray_profile = + &(pf_dev->plcr_table.plcr_profiles[car_index]); + xa_for_each_range(xarray_flow, flow_id, flow, 0, + gaudPlcrCarxFlowIdNum[car_index]) { + zxdh_plcr_remove_rate_limit( + pf_dev, car_index, flow_id, + en_dev->quick_remove); + + //clear all vport mappings between car B and car C. + if (E_PLCR_CAR_B == car_index) { + zxdh_plcr_clear_map(pf_dev, car_index, + flow_id); + } + } + xa_destroy(xarray_flow); + xa_destroy(xarray_profile); + } + + pf_dev->plcr_table.is_init = false; + } else if (dh_dev->coredev_type == DH_COREDEV_VF) { + if (!en_dev->quick_remove) { + //解除一二级flowid与profile之间的绑定关系,并释放profile + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + PLCR_LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + msg->payload.hdr.op_code = ZXDH_PLCR_UNINIT; + msg->payload.hdr.vport = pf_dev->vport; + msg->payload.hdr.pcie_id = pf_dev->pcie_id; + msg->payload.hdr.vf_id = pf_dev->pcie_id & (0xff); + + rtn = zxdh_pf_msg_send_cmd(dh_dev, + MODULE_VF_BAR_MSG_TO_PF, msg, + msg, ¶); + kfree(msg); + PLCR_COMM_ASSERT(rtn); + } + } + return rtn; +} +EXPORT_SYMBOL(zxdh_plcr_uninit); + +/*******************************下面是新实现的代码*******************************/ +/* +函数功能:获取car A的flowid +入参: + --- + --- +场景: + ---1. 如果处于mode0模式,用户请求队列字节限速,则car A的flowid就是队列 + 这种场景下,pf和vf调用各自的钩子函数,所以通过en_dev直接获取队列信息 + ---2. 如果处于mode0模式,用户请求非队列字节限速,则car A的flowid就是vf端口 + ---3. 如果处于mode1模式,则不管用户是什么请求,没有必要获取car A的flowid,因为场景1已经进行了映射 + ---4. 如果处于mode2模式,则不管用户是什么请求,没有必要获取car A的flowid,因为场景2已经进行了映射 +返回值:成功返回0,失败返回其它值 +*/ +int zxdh_plcr_get_car_a_flowid(struct zxdh_pf_device *pf_dev, + E_RATE_LIMIT_MODE mode, + zxdh_plcr_rate_limit_paras *rate_limit_paras, + zxdh_plcr_flowids *flowids) +{ + int rtn = 0; + uint32_t queue_pair_index; + struct zxdh_en_device *en_dev; + + PLCR_FUNC_DBG_ENTER(); + + PLCR_LOG_INFO("mode = %d, rate_limit_paras->req_type = %d\n", mode, + rate_limit_paras->req_type); + + en_dev = pf_dev_get_edev(pf_dev); + if (IS_ERR(en_dev)) + return PTR_ERR(en_dev); + + if (PLCR_MAX_QUEUE_PAIRS < (en_dev->curr_queue_pairs)) { + PLCR_COMM_ASSERT( + PLCR_DEV_ALL_QID_2_FLOWID_QUEUE_PAIRS_OVERFLOW); + } + + PLCR_LOG_INFO("rate_limit_paras->req_type = 0x%x\n", + rate_limit_paras->req_type); + + if (E_RATE_LIMIT_REQ_VF_GROUP_BYTE == rate_limit_paras->req_type) { + PLCR_LOG_INFO( + "E_RATE_LIMIT_REQ_VF_GROUP_BYTE does not need car A flowid!\n"); + return 0; + } else if ((((E_RATE_LIMIT_MODE0 == mode)) || + ((E_RATE_LIMIT_MODE1 == mode))) && + (E_RATE_LIMIT_REQ_QUEUE_BYTE == + rate_limit_paras->req_type)) { + //队列字节限速调用的是pf和vf各自的钩子,所以这里直接使用en_dev->curr_queue_pairs + for (queue_pair_index = 0; + queue_pair_index < en_dev->curr_queue_pairs; + queue_pair_index++) { + //rx + flowids->flowids_A[0][queue_pair_index] = + en_dev->rq[queue_pair_index].vq->phy_index; + + //tx + flowids->flowids_A[1][queue_pair_index] = + en_dev->sq[queue_pair_index].vq->phy_index; + + PLCR_LOG_INFO("flowids->flowids_A[0][%d] = 0x%x\n", + queue_pair_index, + flowids->flowids_A[0][queue_pair_index]); + PLCR_LOG_INFO("flowids->flowids_A[1][%d] = 0x%x\n", + queue_pair_index, + flowids->flowids_A[1][queue_pair_index]); + } + flowids->queue_pairs = en_dev->curr_queue_pairs; + + PLCR_LOG_INFO("flowids->queue_pairs = 0x%x\n", + flowids->queue_pairs); + } else if (((E_RATE_LIMIT_MODE0 == mode) && + (E_RATE_LIMIT_REQ_QUEUE_BYTE != + rate_limit_paras->req_type)) || + ((E_RATE_LIMIT_MODE2 == mode) && + (E_RATE_LIMIT_REQ_VF_PKT == rate_limit_paras->req_type))) { + flowids->flowid_A[0] = rate_limit_paras->vfid * 2 + + PLCR_CAR_A_DPDK_FLOWID_OFFSET; + flowids->flowid_A[1] = rate_limit_paras->vfid * 2 + 1 + + PLCR_CAR_A_DPDK_FLOWID_OFFSET; + PLCR_LOG_INFO("flowids->flowid_A[0] = 0x%x\n", + flowids->flowid_A[0]); + PLCR_LOG_INFO("flowids->flowid_A[1] = 0x%x\n", + flowids->flowid_A[1]); + } else { + PLCR_LOG_INFO("Car A's flowid is not needed!\n"); + return 0; + } + + return rtn; +} + +int zxdh_plcr_get_car_b_flowid(struct zxdh_pf_device *pf_dev, + E_RATE_LIMIT_MODE mode, + zxdh_plcr_rate_limit_paras *rate_limit_paras, + zxdh_plcr_flowids *flowids) +{ + int rtn = 0; + + PLCR_FUNC_DBG_ENTER(); + + //pf队列字节限速不需要指定vf_idx,视作一个特殊vf端口,同样根据vqm_vfid分配flowid + //端口组字节限速的前提是先建立端口组,在后面流程会直接退出 + flowids->flowid_B[0] = rate_limit_paras->vfid * 2; + flowids->flowid_B[1] = rate_limit_paras->vfid * 2 + 1; + + PLCR_LOG_INFO("flowids->flowid_B[0] = 0x%x\n", flowids->flowid_B[0]); + PLCR_LOG_INFO("flowids->flowid_B[1] = 0x%x\n", flowids->flowid_B[1]); + + return rtn; +} + +/* +函数功能:获取car c的flowid +资源分配: + ---car c有1024个flowid资源 + ---4个EP * 8个PF * 2个方向(接收和发送)= 64 + ---1024 / 64 = 16,即每个pf分配16个group + +背景描述: + ---除了队列限速,其它所有限速都是操作pf下的文件系统,en_priv对应的肯定是pf设备 + ---rate_limit_paras->vport,这个指向的是vf_idx对应的vf设备 + ---所以,我们需要通过en_priv获取ep和pf func num + +返回值:成功返回0,失败返回其它值 +*/ +int zxdh_plcr_get_car_c_flowid(struct zxdh_pf_device *pf_dev, + E_RATE_LIMIT_MODE mode, + zxdh_plcr_rate_limit_paras *rate_limit_paras, + zxdh_plcr_flowids *flowids) +{ + uint16_t vport = 0; + uint32_t epid = 0; + uint32_t pf_num = 0; + + PLCR_FUNC_DBG_ENTER(); + + vport = pf_dev->vport; + PLCR_LOG_INFO("pf's info : vport = 0x%x\n0", vport); + + if (rate_limit_paras->group_id >= 16) { + PLCR_LOG_ERR("group_id must be less than 16!\n"); + return -ERANGE; + } + + //用户设置vf的队列限速的时候,这里的vport是vf端口,不是pf端口 + //vf的vport和pf的vport一样,都包含了相同的epid和pf function + epid = EPID(vport); + pf_num = FUNC_NUM(vport); + PLCR_LOG_INFO("pf's info : epid = 0x%x, pf_num = 0x%x\n", epid, pf_num); + + //ZF group0和EP0共用flowid + epid = epid == 4 ? 0 : epid; + //端口组要么是0,要么是用户指定(端口组字节限速,移动端口组) + flowids->flowid_C[0] = epid * PLCR_CAR_C_FLOWIDS_PER_EP + + pf_num * PLCR_CAR_C_FLOWIDS_PER_PF + + rate_limit_paras->group_id * 2; + flowids->flowid_C[1] = epid * PLCR_CAR_C_FLOWIDS_PER_EP + + pf_num * PLCR_CAR_C_FLOWIDS_PER_PF + + rate_limit_paras->group_id * 2 + 1; + + PLCR_LOG_INFO("flowids->flowid_C[0] = 0x%x\n", flowids->flowid_C[0]); + PLCR_LOG_INFO("flowids->flowid_C[1] = 0x%x\n", flowids->flowid_C[1]); + + return 0; +} + +/* +函数功能:获取对应的Vfi +入参: + ---vf_idx : pf内vf的编号,vf_idx是查找vport的索引,是中间工具,最终使用Vfid来定位vport + ---vfid : vport的Vfid +场景: + ---1. pf队列字节限速: 用户使用echo, 调用pf的钩子函数,走if分支, 不需要指定vf_idx,不需要Vfid(不需要进行级间映射) + ---2. vf队列字节限速: 用户命令使用echo, 调用vf的否子函数,走else分支, 不需要指定vf_idx + ---3. vf端口字节限速: 用户命令使用ip link,调用pf的钩子函数,走if分支, 需要指定vf_idx + ---3. vf端口字节限速: 后期会支持echo, 调用pf的钩子函数,走if分支, 需要指定vf_idx + ---4. vf端口包限速: 用户命令使用echo, 调用pf的钩子函数,走if分支, 需要指定vf_idx + ---5. vf端口组字节限速:用户命令使用echo, 调用pf的钩子函数,走if分支, 不需要指定vf_idx,不需要Vfid(不需要进行级间映射) + ---6. vf端口移动组: 用户命令使用echo, 调用pf的钩子函数,走if分支, 需要指定vf_idx +返回值:成功返回0,失败返回其它值 +*/ +int zxdh_plcr_get_vport_vfid(struct zxdh_pf_device *pf_dev, uint32_t vf_idx, + uint32_t *vport, uint32_t *vfid) +{ + int32_t rtn = 0; + struct zxdh_vf_item *vf_item; + struct dh_core_dev *dh_dev = + container_of((void *)pf_dev, struct dh_core_dev, priv); + + PLCR_FUNC_DBG_ENTER(); + + if (dh_dev->coredev_type == DH_COREDEV_PF) { + PLCR_LOG_INFO( + "pf: pf_dev->vport = 0x%x, pf_dev->pcie_id = 0x%x\n", + pf_dev->vport, pf_dev->pcie_id); + //场景中有不需要指定vf_idx的情况,不会生成vf_id + if (PLCR_INVALID_PARAM == vf_idx) { + //pf队列限速和vf端口组字节限速,都不需要指定vf_idx + //但vf端口组字节限速必须要用到vport, pf队列也需分配一个carb flowid + *vport = pf_dev->vport; + *vfid = VQM_VFID(pf_dev->vport); + + PLCR_LOG_INFO( + "vf_idx is not specified! vport = %x, vfid = %x\n", + pf_dev->vport, *vfid); + return rtn; + } else { + vf_item = &pf_dev->vf_item[vf_idx]; + if (ERR_PTR(-EINVAL) == vf_item) { + return -EINVAL; + } + + *vport = vf_item->vport; + *vfid = VQM_VFID(vf_item->vport); + + PLCR_LOG_INFO( + "vf_idx = 0x%x, vf_item->vport = 0x%x, vfid = 0x%x\n", + vf_idx, vf_item->vport, *vfid); + PLCR_LOG_INFO("mac address = %x %x %x %x %x %x\n", + vf_item->mac[0], vf_item->mac[1], + vf_item->mac[2], vf_item->mac[3], + vf_item->mac[4], vf_item->mac[5]); + } + } else { + PLCR_LOG_INFO( + "vf: pf_dev->vport = 0x%x, pf_dev->pcie_id = 0x%x\n", + pf_dev->vport, pf_dev->pcie_id); + + *vport = pf_dev->vport; + *vfid = VQM_VFID(pf_dev->vport); + + PLCR_LOG_INFO( + "vf_idx = 0x%x, pf_dev->vport = 0x%x, vfid = 0x%x\n", + vf_idx, pf_dev->vport, *vfid); + } + + return rtn; +} + +int zxdh_plcr_get_cars_flowid(struct zxdh_pf_device *pf_dev, + E_RATE_LIMIT_MODE mode, + zxdh_plcr_rate_limit_paras *rate_limit_paras, + zxdh_plcr_flowids *flowids) +{ + int rtn = 0; + + PLCR_FUNC_DBG_ENTER(); + + //init the pointer flowids with invalid value + memset(flowids, 0xff, sizeof(zxdh_plcr_flowids)); + + //get car A flowid + rtn = zxdh_plcr_get_car_a_flowid(pf_dev, mode, rate_limit_paras, + flowids); + PLCR_COMM_ASSERT(rtn); + + //get car B flowid + rtn = zxdh_plcr_get_car_b_flowid(pf_dev, mode, rate_limit_paras, + flowids); + PLCR_COMM_ASSERT(rtn); + + //get car C flowid + rtn = zxdh_plcr_get_car_c_flowid(pf_dev, mode, rate_limit_paras, + flowids); + PLCR_COMM_ASSERT(rtn); + + return rtn; +} + +int zxdh_plcr_get_next_mode(struct zxdh_pf_device *pf_dev, + zxdh_plcr_rate_limit_paras *rate_limit_paras, + uint32_t *next_mode) +{ + int rtn = 0; + E_RATE_LIMIT_MODE cur_mode; + + //get vport current mode + rtn = zxdh_plcr_get_mode(pf_dev, rate_limit_paras->vport, &cur_mode); + PLCR_COMM_ASSERT(rtn); + + if (E_RATE_LIMIT_MODE0 == cur_mode) { + if (E_RATE_LIMIT_REQ_QUEUE_BYTE == rate_limit_paras->req_type) { + *next_mode = E_RATE_LIMIT_MODE1; + } else { + *next_mode = E_RATE_LIMIT_MODE2; + } + + return 0; + } else { + return -EINVAL; + } + + return rtn; +} + +int zxdh_plcr_init_flow(struct zxdh_pf_device *pf_dev, E_PLCR_CAR_TYPE car_type, + uint32_t flowid) +{ + int rtn = 0; + uint32_t vf_idx; + uint32_t vport; + uint32_t flowid_offset; + uint32_t vfid; + uint32_t map_flowid; + union zxdh_msg *msg = NULL; + DPP_PF_INFO_T pf_info = { 0 }; + struct dh_core_dev *dh_dev = + container_of((void *)pf_dev, struct dh_core_dev, priv); + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + PLCR_FUNC_DBG_ENTER(); + + if (dh_dev->coredev_type == DH_COREDEV_PF) { + vport = pf_dev->vport; + flowid_offset = EPID(vport) * PLCR_CAR_C_FLOWIDS_PER_EP + + FUNC_NUM(vport) * PLCR_CAR_C_FLOWIDS_PER_PF; + + if ((car_type == E_PLCR_CAR_C) && + (flowid - flowid_offset > + 1)) { //group_id为0时,不进行检验,直接初始化;只有移动group才需要检验 + //对于carC flow的初始化,需检查目标group中num_vfs是否为0,确定其是否是本次移动前新建 + for (vf_idx = 0; vf_idx < pf_dev->num_vfs; vf_idx++) { + map_flowid = 0xffff; + rtn = zxdh_plcr_get_vport_vfid(pf_dev, vf_idx, + &vport, &vfid); + PLCR_COMM_ASSERT(rtn); + + if (0 == (flowid % 2)) { + rtn = zxdh_plcr_get_next_map( + pf_dev, E_PLCR_CAR_B, vfid * 2, + &map_flowid); + } else { + rtn = zxdh_plcr_get_next_map( + pf_dev, E_PLCR_CAR_B, + vfid * 2 + 1, &map_flowid); + } + if ((!rtn) && (flowid == map_flowid)) { + PLCR_LOG_INFO( + "Group is currently being used by at least one VF\n"); + return 0; + } + } + } + pf_info.slot = pf_dev->slot_id; + pf_info.vport = pf_dev->vport; + PLCR_LOG_INFO( + "dpp_car_queue_cfg_set: vport = 0x%x, car_type = %d, flowid = %d, plcr_en = 0\n", + pf_dev->vport, car_type, flowid); + rtn = dpp_car_queue_cfg_set(&pf_info, (uint32_t)car_type, + flowid, DROP_DISABLE, PLCR_DISABLE, + 0); + if (rtn) { + PLCR_LOG_ERR( + "failed to call dpp_car_queue_cfg_set()\n"); + } + } else { + //针对vf队列限速的场景 + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + PLCR_LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + msg->payload.hdr.op_code = ZXDH_PLCR_FLOW_INIT; + msg->payload.hdr.vport = pf_dev->vport; + msg->payload.hdr.pcie_id = pf_dev->pcie_id; + msg->payload.hdr.vf_id = pf_dev->pcie_id & (0xff); + + msg->payload.plcr_flow_init_msg.car_type = car_type; + msg->payload.plcr_flow_init_msg.flowid = flowid; + + rtn = zxdh_pf_msg_send_cmd(dh_dev, MODULE_VF_BAR_MSG_TO_PF, msg, + msg, ¶); + kfree(msg); + PLCR_COMM_ASSERT(rtn); + } + + return rtn; +} + +/* +函数功能:根据不同的场景进行三级映射,下面枚举出所有场景。 + + 原始模式 限速请求 转换模式 映射 + + mode0 队列字节限速 mode1 (car A -> car B) & (car B -> car C) + mode0 端口字节限速 mode2 (car A -> car B) & (car B -> car C) + mode0 端口包限速 mode2 (car A -> car B) & (car B -> car C) + mode0 移动组 mode2 (car A -> car B) & (car B -> car C) + + mode1 队列字节限速 mode1 不需要 + mode1 端口字节限速 mode1 不需要 + mode1 移动组 mode1 (car B -> car C) + + mode2 端口包限速 mode2 不需要 + mode2 端口字节限速 mode2 不需要 + mode2 移动组 mode2 (car B -> car C) + + x 端口组字节限速 无端口,无模式 不需要映射,直接配置限速 +*/ +int zxdh_plcr_set_cars_map(struct zxdh_pf_device *pf_dev, + zxdh_plcr_rate_limit_paras *rate_limit_paras, + zxdh_plcr_flowids *flowids) +{ + int rtn = 0; + uint32_t flowid; + uint32_t map_flowid; + E_RATE_LIMIT_MODE mode; + E_RATE_LIMIT_REQ_TYPE req_type; + int queue_pair_index; + + PLCR_FUNC_DBG_ENTER(); + + //get vport and vfid + rtn = zxdh_plcr_get_vport_vfid(pf_dev, rate_limit_paras->vf_idx, + &rate_limit_paras->vport, + &rate_limit_paras->vfid); + PLCR_COMM_ASSERT(rtn); + + //get vport current mode + rtn = zxdh_plcr_get_mode(pf_dev, rate_limit_paras->vport, &mode); + PLCR_COMM_ASSERT(rtn); + + //get rate limit type + rtn = zxdh_plcr_check_req_type(pf_dev, mode, rate_limit_paras, + &req_type); + PLCR_COMM_ASSERT(rtn); + + //获取三级car的flowid + rtn = zxdh_plcr_get_cars_flowid(pf_dev, mode, rate_limit_paras, + flowids); + PLCR_COMM_ASSERT(rtn); + + if ((E_RATE_LIMIT_MODE0 == mode) && + (E_RATE_LIMIT_REQ_VF_GROUP_BYTE != req_type)) { + if (E_RATE_LIMIT_REQ_QUEUE_BYTE == req_type) { + for (queue_pair_index = 0; + queue_pair_index < flowids->queue_pairs; + queue_pair_index++) { + //rxq:初始化flow,避免有复位前的配置遗留,将car A flowid映射到car B flowid + flowid = + flowids->flowids_A[0][queue_pair_index]; + map_flowid = flowids->flowid_B[0]; + PLCR_LOG_INFO( + "car_type = 0x%x, flowid = 0x%x, map_flowid = 0x%x\n", + E_PLCR_CAR_A, flowid, map_flowid); + + rtn = zxdh_plcr_init_flow(pf_dev, E_PLCR_CAR_A, + flowid); + PLCR_COMM_ASSERT(rtn); + rtn = zxdh_plcr_map_flowid(pf_dev, E_PLCR_CAR_A, + flowid, map_flowid); + PLCR_COMM_ASSERT(rtn); + + //txq:初始化car A flow,映射到car B flowid + flowid = + flowids->flowids_A[1][queue_pair_index]; + map_flowid = flowids->flowid_B[1]; + PLCR_LOG_INFO( + "car_type = 0x%x, flowid = 0x%x, map_flowid = 0x%x\n", + E_PLCR_CAR_A, flowid, map_flowid); + + rtn = zxdh_plcr_init_flow(pf_dev, E_PLCR_CAR_A, + flowid); + PLCR_COMM_ASSERT(rtn); + rtn = zxdh_plcr_map_flowid(pf_dev, E_PLCR_CAR_A, + flowid, map_flowid); + PLCR_COMM_ASSERT(rtn); + } + } else if ((E_RATE_LIMIT_REQ_VF_PKT == req_type) || + (E_RATE_LIMIT_REQ_VF_BYTE == req_type) || + (E_RATE_LIMIT_REQ_MOVE_VF_GROUP == req_type)) { + //vfid's rx :初始化car A flow,映射到car B flowid + flowid = flowids->flowid_A[0]; + map_flowid = flowids->flowid_B[0]; + PLCR_LOG_INFO( + "car_type = 0x%x, flowid = 0x%x, map_flowid = 0x%x\n", + E_PLCR_CAR_A, flowid, map_flowid); + + rtn = zxdh_plcr_init_flow(pf_dev, E_PLCR_CAR_A, flowid); + PLCR_COMM_ASSERT(rtn); + rtn = zxdh_plcr_map_flowid(pf_dev, E_PLCR_CAR_A, flowid, + map_flowid); + PLCR_COMM_ASSERT(rtn); + + //vfid's tx :初始化car A flow,映射到car B flowid + flowid = flowids->flowid_A[1]; + map_flowid = flowids->flowid_B[1]; + PLCR_LOG_INFO( + "car_type = 0x%x, flowid = 0x%x, map_flowid = 0x%x\n", + E_PLCR_CAR_A, flowid, map_flowid); + + rtn = zxdh_plcr_init_flow(pf_dev, E_PLCR_CAR_A, flowid); + PLCR_COMM_ASSERT(rtn); + rtn = zxdh_plcr_map_flowid(pf_dev, E_PLCR_CAR_A, flowid, + map_flowid); + PLCR_COMM_ASSERT(rtn); + } + } + + /* + 功能:car B的flowid映射到car C的flowid + 场景: + ---1.当前mode0模式,用户请求队列字节限速, 用户未指定group:group默认为0 + ---2.当前mode0模式,用户请求vf端口字节限速,用户未指定group:group默认为0 + ---3.当前mode0模式,用户请求vf端口包限速, 用户未指定group:group默认为0 + ---4.当前mode0模式,用户请求端口组字节限速, 用户指定group:不需要car B到car C的映射,直接设置限速 + + ---5.任意模式, 用户请求移动group, 用户指定group:按照用户指定的group + ---其它场景下:不需要进行映射 + */ + if (((E_RATE_LIMIT_MODE0 == mode) && + (E_RATE_LIMIT_REQ_VF_GROUP_BYTE != req_type)) || + (E_RATE_LIMIT_REQ_MOVE_VF_GROUP == req_type)) { + //car B rx flowid is mapped to car C rx flowid + flowid = flowids->flowid_B[0]; + map_flowid = flowids->flowid_C[0]; + PLCR_LOG_INFO( + "car_type = 0x%x, flowid = 0x%x, map_flowid = 0x%x\n", + E_PLCR_CAR_B, flowid, map_flowid); + + if (E_RATE_LIMIT_MODE0 == mode) { + rtn = zxdh_plcr_init_flow(pf_dev, E_PLCR_CAR_B, flowid); + PLCR_COMM_ASSERT(rtn); + } + rtn = zxdh_plcr_init_flow(pf_dev, E_PLCR_CAR_C, map_flowid); + PLCR_COMM_ASSERT(rtn); + + rtn = zxdh_plcr_map_flowid(pf_dev, E_PLCR_CAR_B, flowid, + map_flowid); + PLCR_COMM_ASSERT(rtn); + + //car B tx flowid is mapped to car C tx flowid + flowid = flowids->flowid_B[1]; + map_flowid = flowids->flowid_C[1]; + PLCR_LOG_INFO( + "car_type = 0x%x, flowid = 0x%x, map_flowid = 0x%x\n", + E_PLCR_CAR_B, flowid, map_flowid); + + if (E_RATE_LIMIT_MODE0 == mode) { + rtn = zxdh_plcr_init_flow(pf_dev, E_PLCR_CAR_B, flowid); + PLCR_COMM_ASSERT(rtn); + } + rtn = zxdh_plcr_init_flow(pf_dev, E_PLCR_CAR_C, map_flowid); + PLCR_COMM_ASSERT(rtn); + + rtn = zxdh_plcr_map_flowid(pf_dev, E_PLCR_CAR_B, flowid, + map_flowid); + PLCR_COMM_ASSERT(rtn); + PLCR_LOG_INFO("Successfull to map!\n"); + } + + return rtn; +} + +int zxdh_plcr_unified_set_rate_limit( + struct zxdh_pf_device *pf_dev, + zxdh_plcr_rate_limit_paras *rate_limit_paras) +{ + int rtn = 0; + union zxdh_msg *msg = NULL; + zxdh_plcr_flowids flowids; + uint32_t next_mode; + E_RATE_LIMIT_MODE cur_mode; + E_RATE_LIMIT_REQ_TYPE req_type; + //uint32_t vport = 0; + uint32_t flowid = 0; + uint32_t car_type = 0; + uint32_t is_packet = 0; + uint32_t max_rate = 0; + uint32_t min_rate = 0; + struct dh_core_dev *dh_dev = + container_of((void *)pf_dev, struct dh_core_dev, priv); + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + + PLCR_FUNC_DBG_ENTER(); + + //对三级car进行映射关联 + rtn = zxdh_plcr_set_cars_map(pf_dev, rate_limit_paras, &flowids); + PLCR_COMM_ASSERT(rtn); + + req_type = rate_limit_paras->req_type; + + //移动vf端口组:不需要设置限速,只需要级间映射(上面已经完成映射);但是需要切换模式 + if (E_RATE_LIMIT_REQ_MOVE_VF_GROUP == req_type) { + rtn = zxdh_plcr_get_mode(pf_dev, rate_limit_paras->vport, + &cur_mode); + + //如果当前是模式0,且vf端口移动到非0 group,则切换到模式2 + if ((E_RATE_LIMIT_MODE0 == cur_mode) && + (0 != rate_limit_paras->group_id)) { + rtn = zxdh_plcr_set_mode(pf_dev, + rate_limit_paras->vport, + E_RATE_LIMIT_MODE2); + PLCR_COMM_ASSERT(rtn); + } + //如果当前是非模式0移动到group 0,则要检查三级car上是否都没有限速,则切换到模式0 + else if ((E_RATE_LIMIT_MODE0 != cur_mode) && + (0 == rate_limit_paras->group_id)) { + zxdh_plcr_check_release_flow_chain( + pf_dev, E_PLCR_CAR_B, rate_limit_paras->vport); + ; + } + + return rtn; + } //队列字节限速 + else if (E_RATE_LIMIT_REQ_QUEUE_BYTE == req_type) { + if (E_RATE_LIMIT_RX == rate_limit_paras->direction) { + flowid = + flowids.flowids_A[0][rate_limit_paras->queue_id]; + } else { + flowid = + flowids.flowids_A[1][rate_limit_paras->queue_id]; + } + + max_rate = rate_limit_paras->max_rate; + min_rate = 0; + car_type = E_PLCR_CAR_A; + is_packet = E_RATE_LIMIT_BYTE; + } //vf端口字节限速 + else if (E_RATE_LIMIT_REQ_VF_BYTE == req_type) { + if (E_RATE_LIMIT_RX == rate_limit_paras->direction) { + flowid = flowids.flowid_B[0]; + } else { + flowid = flowids.flowid_B[1]; + } + + max_rate = rate_limit_paras->max_rate; + min_rate = rate_limit_paras->min_rate; + car_type = E_PLCR_CAR_B; + is_packet = E_RATE_LIMIT_BYTE; + } //vf端口包限速 + else if (E_RATE_LIMIT_REQ_VF_PKT == req_type) { + if (E_RATE_LIMIT_RX == rate_limit_paras->direction) { + flowid = flowids.flowid_A[0]; + } else { + flowid = flowids.flowid_A[1]; + } + + max_rate = rate_limit_paras->max_rate; + min_rate = rate_limit_paras->min_rate; + car_type = E_PLCR_CAR_A; + is_packet = E_RATE_LIMIT_PACKET; + } //端口组字节限速 + else if (E_RATE_LIMIT_REQ_VF_GROUP_BYTE == req_type) { + if (E_RATE_LIMIT_RX == rate_limit_paras->direction) { + flowid = flowids.flowid_C[0]; + } else { + flowid = flowids.flowid_C[1]; + } + + max_rate = rate_limit_paras->max_rate; + min_rate = 0; + car_type = E_PLCR_CAR_C; + is_packet = E_RATE_LIMIT_BYTE; + } else { + return -EINVAL; + } + + PLCR_LOG_INFO("rate_limit_paras vport = 0x%x\n", + rate_limit_paras->vport); + PLCR_LOG_INFO("rate_limit_paras vfid = 0x%x\n", + rate_limit_paras->vfid); + PLCR_LOG_INFO("flowid = 0x%x\n", flowid); + PLCR_LOG_INFO("car_type = 0x%x\n", car_type); + PLCR_LOG_INFO("is_packet = 0x%x\n", is_packet); + PLCR_LOG_INFO("max_rate = 0x%x\n", max_rate); + PLCR_LOG_INFO("min_rate = 0x%x\n", min_rate); + + //set rate limit + if (dh_dev->coredev_type == DH_COREDEV_PF) { + rtn = zxdh_plcr_set_rate_limit(pf_dev, is_packet, car_type, + rate_limit_paras->vport, flowid, + max_rate, min_rate); + } else if (dh_dev->coredev_type == DH_COREDEV_VF) { + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (unlikely(NULL == msg)) { + PLCR_LOG_ERR("failed to kzalloc\n"); + return -ENOMEM; + } + msg->payload.hdr.op_code = ZXDH_VF_RATE_LIMIT_SET; + msg->payload.hdr.vport = pf_dev->vport; + msg->payload.hdr.pcie_id = pf_dev->pcie_id; + msg->payload.hdr.vf_id = pf_dev->pcie_id & (0xff); + + msg->payload.rate_limit_set_msg.flowid = flowid; + msg->payload.rate_limit_set_msg.car_type = car_type; + msg->payload.rate_limit_set_msg.is_packet = is_packet; + msg->payload.rate_limit_set_msg.max_rate = max_rate; + msg->payload.rate_limit_set_msg.min_rate = min_rate; + + PLCR_LOG_INFO("rate_limit_set_msg.flowid = 0x%x\n", + msg->payload.rate_limit_set_msg.flowid); + PLCR_LOG_INFO("rate_limit_set_msg.car_type = 0x%x\n", + msg->payload.rate_limit_set_msg.car_type); + PLCR_LOG_INFO("rate_limit_set_msg.is_packet = 0x%x\n", + msg->payload.rate_limit_set_msg.is_packet); + PLCR_LOG_INFO("rate_limit_set_msg.max_rate = 0x%x\n", + msg->payload.rate_limit_set_msg.max_rate); + PLCR_LOG_INFO("rate_limit_set_msg.min_rate = 0x%x\n", + msg->payload.rate_limit_set_msg.min_rate); + + rtn = zxdh_pf_msg_send_cmd(dh_dev, MODULE_VF_BAR_MSG_TO_PF, msg, + msg, ¶); + if (rtn) { + PLCR_LOG_ERR("failed and rtn=%d\n", rtn); + kfree(msg); + return rtn; + } + + rtn = msg->reps.rate_limit_set_rsp.err_code; + kfree(msg); + } + //0:限速设置成功 + //PLCR_REMOVE_RATE_LIMIT:解除限速成功 + //其它值:错误码 + if (rtn && (PLCR_REMOVE_RATE_LIMIT != rtn)) { + PLCR_LOG_ERR("failed and rtn=%d\n", rtn); + return (rtn == PLCR_DUPLICATE_RATE) ? 0 : -EPERM; + } + + /*************下面2种场景需要模式切换************* + * 1. mode 0 -> mode 1 + * 2. mode 0 -> mode 2 + * 3. vport处于mode 1或者mode 2保持不变,只有在解除限速的时候可能会跳转到mode 0 + * 4. 设置端口组字节限速时,不需要进行模式切换(端口组不直接与pf或vf关联) + ****************************************/ + if ((0 == rtn) && (E_RATE_LIMIT_REQ_VF_GROUP_BYTE != req_type)) { + rtn = zxdh_plcr_get_next_mode(pf_dev, rate_limit_paras, + &next_mode); + if (0 == rtn) { + rtn = zxdh_plcr_set_mode( + pf_dev, rate_limit_paras->vport, next_mode); + PLCR_COMM_ASSERT(rtn); + } + } + + return 0; +} +EXPORT_SYMBOL(zxdh_plcr_unified_set_rate_limit); + +static int zxdh_vqm_send_rate_msg(struct zxdh_pf_device *pf_dev, + uint16_t vqm_vfid, void *in_payload, + uint16_t in_len, struct bar_recv_msg *out) +{ + int rtn = 0; + uint16_t pcie_id = 0; + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + + pcie_id = FIND_VF_PCIE_ID(pf_dev->pcie_id, vqm_vfid); + + in.virt_addr = (uint64_t)ZXDH_BAR_MSG_BASE(pf_dev->pci_ioremap_addr[0]); + in.payload_addr = in_payload; + in.payload_len = in_len; + in.emec = 0; + in.src = MSG_CHAN_END_PF; + in.dst = MSG_CHAN_END_RISC; + in.event_id = VQM_BAR_MSG; + in.src_pcieid = pcie_id; + in.dst_pcieid = 0, + + result.recv_buffer = (void *)out; + result.buffer_len = sizeof(struct bar_recv_msg); + rtn = zxdh_bar_chan_sync_msg_send(&in, &result); + if (rtn != BAR_MSG_OK) { + PLCR_LOG_ERR("zxdh_vqm_send_rate_msg failed\n"); + } + return BAR_MSG_OK; +} + +int zxdh_vqm_vf_set_rate_limit(struct zxdh_pf_device *pf_dev, uint16_t vqm_vfid, + uint32_t vf_rate) +{ + int rtn = 0; + uint32_t index = 0; + struct zxdh_vqm_param param = { 0 }; + struct bar_recv_msg *recv_msg = NULL; + + if (NULL == pf_dev) { + PLCR_LOG_ERR("pf_dev NULL ptr\n"); + return -1; + } + + index = VQM_VFID(vqm_vfid); + + param.vqm_vfid = index, param.opcode = OPCODE_SET, + param.cmd = CMD_VF_QOS, param.vqm_rate.pack_rate = 0, + param.vqm_rate.rate = (uint32_t)(((uint64_t)1000 * vf_rate * 106) / + 100); //配置上浮6%,提高整体精度 + + recv_msg = kzalloc(sizeof(struct bar_recv_msg), GFP_KERNEL); + if (NULL == recv_msg) { + PLCR_LOG_ERR("recv_msg NULL ptr\n"); + return -1; + } + + rtn = zxdh_vqm_send_rate_msg(pf_dev, vqm_vfid, ¶m, + (uint16_t)sizeof(param), recv_msg); + if (0 != rtn) { + //考虑兼容性,返回值错误,不影响plcr限速,限速系统正常工作 + PLCR_LOG_ERR("zxdh_vqm_send_rate_msg failed\n"); + } + + PLCR_LOG_INFO( + "The Rate of VF vqm_vfid:0x%x index: %d has been set to: Max Tx Rate: %dMbit/s\n", + vqm_vfid, index, vf_rate); + + return 0; +} +EXPORT_SYMBOL(zxdh_vqm_vf_set_rate_limit); diff --git a/drivers/net/ethernet/dinghai/plcr.h b/drivers/net/ethernet/dinghai/plcr.h new file mode 100644 index 000000000000..f03e436582a0 --- /dev/null +++ b/drivers/net/ethernet/dinghai/plcr.h @@ -0,0 +1,413 @@ +#ifndef __ZXDH_QOS_H__ +#define __ZXDH_QOS_H__ + +#ifdef __cplusplus +extern "C" { +#endif +#include +#include +#include +#include +#include +#include "dpp_drv_qos.h" +#include "en_sf.h" +// #include "./en_aux/queue.h" + +#define ZXDH_PLCR_OPEN +#define ZXDH_SRIOV_SYSFS_EN +#ifdef ZXDH_PLCR_OPEN +#define ZXDH_PLCR_DEBUG +#endif + +#define DROP_ENABLE 1 +#define DROP_DISABLE 0 +#define PLCR_ENABLE 1 +#define PLCR_DISABLE 0 + +#define PLCR_STEP_SIZE 1u +#define PLCR_MIN_RATE 1u +#define PLCR_MAX_RATE (400 * (1 << 20)) +#define PLCR_MAX_PKT_RATE DPP_CAR_MAX_PKT_CIR_VALUE + +#define USER_MAX_BYTE_RATE ((PLCR_MAX_RATE * PLCR_STEP_SIZE) >> 10) +#define USER_MAX_PKT_RATE PLCR_MAX_PKT_RATE + +#define CRED_ID(vport, car_type, profile_id) \ + (((vport & 0xffff) << 20) | ((car_type & 0xf) << 16) | \ + (profile_id & 0x1FF)); +#define PROFILE_ID(cred_id) ((cred_id & 0x1FF)) +#define VQM_QUEUE_PAIRS_MAX_NUM 2048 + +#define PLCR_CAR_A_PROFILE_RES_NUM 512 +#define PLCR_CAR_B_PROFILE_RES_NUM 128 +#define PLCR_CAR_C_PROFILE_RES_NUM 32 + +#define PLCR_CAR_A_FLOWID_RES_NUM 10240 +#define PLCR_CAR_B_FLOWID_RES_NUM 2368 // 2304 + 64 +#define PLCR_CAR_C_FLOWID_RES_NUM 1024 +#define PLCR_CAR_B_PF_DEFAULT_FLOWID 2048 + +#define PLCR_CAR_C_FLOWIDS_PER_EP 256 +#define PLCR_CAR_C_FLOWIDS_PER_PF 32 + +/********************************** definition of types **********************************/ +#define FLOWID_2_XARRAY(flowid) ((flowid + 1) * 8) +#define XARRAY_2_FLOWID(flowid) ((flowid / 8) - 1) + +#define PLCR_INVALID_PARAM 0xffffffff +#define PLCR_CAR_A_DPDK_FLOWID_OFFSET 8192 +#define PLCR_MAX_QUEUE_PAIRS 128 //ZXDH_MAX_PAIRS_NUM +#define PLCR_DEBUG + +#ifdef PLCR_DEBUG +#define PLCR_FUNC_DBG_ENTER() \ + LOG_INFO("%s-%d:enter !\n", __FUNCTION__, __LINE__) + +#define PLCR_LOG_INFO(fmt, arg...) DH_LOG_INFO(MODULE_PF, fmt, ##arg) + +#define PLCR_LOG_ERR(fmt, arg...) DH_LOG_ERR(MODULE_PF, fmt, ##arg) + +#define PLCR_COMM_ASSERT(rtn) \ + do { \ + if (0 != rtn) { \ + PLCR_LOG_ERR("failed and rtn=%d\n", rtn) \ + return rtn; \ + } \ + } while (0) +#else +#define PLCR_FUNC_DBG_ENTER() + +#define PLCR_LOG_INFO(fmt, arg...) + +#define PLCR_LOG_ERR(fmt, arg...) \ + do { \ + LOG_INFO("%s-%d: ", __FUNCTION__, __LINE__); \ + DH_LOG_ERR(MODULE_PF, fmt, ##arg); \ + } while (0) + +#define PLCR_COMM_ASSERT(rtn) \ + do { \ + if (0 != rtn) { \ + return rtn; \ + } \ + } while (0) +#endif + +typedef enum { + PLCR_DEV_ALL_QID_2_FLOWID_QUEUE_PAIRS_OVERFLOW = 1, + PLCR_GET_REQ_TYPE_INVALID_ERR, + PLCR_DUPLICATE_RATE, + PLCR_REMOVE_RATE_LIMIT, + + PLCR_ERROR_NUM +} E_PLCR_ERR_CODE; + +// Define the type of PLCR card +typedef enum { + E_PLCR_CAR_A = 0, // PLCR card type A + E_PLCR_CAR_B = 1, // PLCR card type B + E_PLCR_CAR_C = 2, // PLCR card type C + E_PLCR_CAR_NUM = 3, // Number of PLCR card types +} E_PLCR_CAR_TYPE; + +// Define the union for PLCR profile configuration +union zxdh_plcr_profile_cfg { + DPP_STAT_CAR_PROFILE_CFG_T + byte_profile_cfg; // Byte rate limit profile configuration + DPP_STAT_CAR_PKT_PROFILE_CFG_T + pkt_profile_cfg; // Packet rate limit profile configuration +}; + +typedef enum { + E_RATE_LIMIT_MODE0 = 0, // mode 0:no limit + E_RATE_LIMIT_MODE1, // mode 1:queue is mapped to car A flowid + E_RATE_LIMIT_MODE2, // mode 2:vfid is mapped to car A flowid + E_RATE_LIMIT_MODE3, // mode 3:vf is uninstalled and vport table does not exist. +} E_RATE_LIMIT_MODE; + +typedef enum { + E_RATE_LIMIT_REQ_QUEUE_BYTE = 0, // queue byte rate limit + E_RATE_LIMIT_REQ_VF_BYTE, // vf byte rate limit + E_RATE_LIMIT_REQ_VF_GROUP_BYTE, // vf group byte rate limit + E_RATE_LIMIT_REQ_VF_PKT, // vf packet rate limit + E_RATE_LIMIT_REQ_MOVE_VF_GROUP, //just to move vf to other group + E_RATE_LIMIT_REQ_TYPE_NUM, // +} E_RATE_LIMIT_REQ_TYPE; + +/* Define rate limit direction */ +typedef enum { + E_RATE_LIMIT_RX = 0, // Receive direction + E_RATE_LIMIT_TX, // Send direction +} E_RATE_LIMIT_DIRECTION; + +/* Define rate limit type */ +typedef enum { + E_RATE_LIMIT_BYTE = 0, // Byte rate limit + E_RATE_LIMIT_PACKET, // Packet rate limit +} E_RATE_LIMIT_PKT_BYTE; + +/* Define the structure for rate limit parameters */ +typedef struct { + E_RATE_LIMIT_REQ_TYPE req_type; // Limit scope + E_RATE_LIMIT_DIRECTION direction; // Limit direction + E_RATE_LIMIT_PKT_BYTE mode; // Limit mode + uint32_t max_rate; // Maximum rate limit + uint32_t min_rate; // Minimum rate limit + uint32_t queue_id; // Queue id + uint32_t vf_idx; // VF index in PF + uint32_t vfid; // VF id is global + uint32_t vport; // VF id is global + uint32_t group_id; // Group id +} zxdh_plcr_rate_limit_paras; + +typedef struct { + uint16_t queue_pairs; + uint16_t flowids_A[2][PLCR_MAX_QUEUE_PAIRS]; // flowid in car A + uint16_t flowid_A[2]; // flowid in car A + uint16_t flowid_B[2]; // flowid in car B + uint16_t flowid_C[2]; // flowid in car C +} zxdh_plcr_flowids; + +// Define the structure for PLCR profile +struct zxdh_plcr_profile { + uint16_t ref_cnt; // Reference count + uint16_t profile_id; // Profile ID + uint16_t vport; // Virtual port + uint32_t max_rate; // Maximum rate + uint32_t min_rate; // Minimum rate + uint64_t cred_id; // Credit ID + DPP_STAT_CAR_PROFILE_CFG_T profile_cfg; // Profile configuration +}; + +// Define the structure for PLCR flow +struct zxdh_plcr_flow { + uint16_t vport; // Virtual port + uint16_t vf_id; // VF ID + uint16_t profile_id; // Profile ID + uint16_t flowid; // flowid + uint16_t map_flowid; // net car's flowid + uint16_t next_flowid; // next car's flowid + uint32_t max_rate; // Maximum rate + uint32_t min_rate; // Minimum rate +}; + +struct dh_core_dev; +struct zxdh_pf_device; +struct zxdh_en_priv; + +typedef enum { + ZXDH_GROUP_RX_RATE = 0, + ZXDH_GROUP_TX_RATE = 1, +} ZXDH_GROUP_DATA_TYPE; + +struct zxdh_group_obj { + struct zxdh_pf_device *pf_dev; + struct kobject kobj; + struct completion free_group_comp; + + struct list_head list; + int32_t group_id; + int32_t num_vfs; + uint32_t max_tx_rate; + uint32_t max_rx_rate; +}; + +struct zxdh_group_work { + struct work_struct work; + struct zxdh_group_obj *group_obj; +}; + +typedef enum { + ZXDH_VF_MIN_RATE = 0, + ZXDH_VF_MAX_RATE = 1, +} ZXDH_VF_METER_DATA_TYPE; + +typedef enum { + VF_METER_RX_BPS = 0, + VF_METER_RX_PPS = 1, + VF_METER_TX_BPS = 2, + VF_METER_TX_PPS = 3, + VF_METER_TYPE_NUM, +} ZXDH_VF_METER_TYPE; + +#define IS_TX_METER(meter_type) \ + (meter_type == VF_METER_TX_BPS || meter_type == VF_METER_TX_PPS) +#define IS_PPS_METER(meter_type) \ + (meter_type == VF_METER_RX_PPS || meter_type == VF_METER_TX_PPS) + +struct zxdh_vf_meter_obj { + struct zxdh_pf_device *pf_dev; + struct zxdh_vf_obj *vf_obj; + struct kobject kobj; + uint32_t meter_type; + uint32_t min_rate; + uint32_t max_rate; +}; + +struct zxdh_vf_meters { + struct kobject *kobj; + struct kobject *rx_obj; + struct kobject *tx_obj; + struct zxdh_vf_meter_obj xps[4]; +}; + +struct zxdh_vf_file_stats { + uint64_t tx_packets; + uint64_t tx_bytes; + uint64_t rx_packets; + uint64_t rx_bytes; + uint64_t rx_broadcast; + uint64_t rx_multicast; + uint64_t tx_broadcast; + uint64_t tx_multicast; + uint64_t rx_dropped; + uint64_t tx_error; + uint64_t rx_error; +}; + +struct zxdh_vf_obj { + struct zxdh_pf_device *pf_dev; + struct kobject kobj; + uint16_t vport; + uint16_t vf_idx; + struct zxdh_group_obj *group; + struct zxdh_vf_meters *meters; +}; + +struct zxdh_sriov_sysfs { + struct kobject *sriov_obj; +#ifdef ZXDH_PLCR_DEBUG + struct kobj_attribute burst_attr; + struct kobj_attribute profile_attr; + struct kobj_attribute all_vf_stats_attr; +#endif + struct kobject *groups_obj; + struct zxdh_group_obj *group_0; + struct list_head groups_head; + struct zxdh_vf_obj *vfs; +}; + +struct zxdh_plcr_table { + struct xarray plcr_profiles + [E_PLCR_CAR_NUM]; // Array of PLCR profiles(index = prfile id) + struct xarray + plcr_flows[E_PLCR_CAR_NUM]; // Array of PLCR flows(index = flowid) + struct xarray plcr_maps + [E_PLCR_CAR_NUM]; // Array of PLCR flows mapping relationship + uint32_t burst_size; + bool is_init; +}; + +struct zxdh_plcr_cbs { + uint32_t min_rate; + uint32_t max_rate; + uint32_t cbs; +}; + +struct vqm_rate { + uint32_t pack_rate; //pps + uint32_t rate; //kbps +} __attribute__((packed)); + +struct vqm_poll { + uint16_t poll_mode; /* bit0:rx, bit1:tx, 1:poll, 0:kick */ + uint16_t poll_time; /* 0:not cfg, other:cfg, unit:ms */ +} __attribute__((packed)); + +struct vqm_global_feature { + uint16_t version; + uint64_t features; +} __attribute__((packed)); + +struct zxdh_vqm_param { + uint16_t vqm_vfid; + uint16_t opcode; // 0: get, 1: set + uint16_t cmd; + union { + uint8_t mac[6]; + uint8_t enable_flag; + struct vqm_rate vqm_rate; + struct vqm_poll vqm_poll; + struct vqm_global_feature vqm_global_feature; + }; +} __attribute__((packed)); + +extern const uint32_t gaudPlcrCarxProfileNum[E_PLCR_CAR_NUM]; +extern const uint32_t gaudPlcrCarxFlowIdNum[E_PLCR_CAR_NUM]; +int zxdh_plcr_remove_rate_limit(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, uint32_t flowid, + uint32_t flag); +void zxdh_plcr_count_profiles(struct zxdh_pf_device *pf_dev); +int zxdh_plcr_set_rate_limit(struct zxdh_pf_device *pf_dev, + E_RATE_LIMIT_PKT_BYTE is_pkt_mode, + E_PLCR_CAR_TYPE car_type, uint16_t vport, + uint32_t flowid, uint32_t max_rate, + uint32_t min_rate); +int32_t zxdh_plcr_get_next_map(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, uint32_t flowid, + uint32_t *map_flowid); +int32_t zxdh_plcr_init(struct zxdh_en_priv *en_priv); +int32_t zxdh_plcr_uninit(struct zxdh_en_priv *en_priv); +int zxdh_plcr_get_vport_vfid(struct zxdh_pf_device *pf_dev, uint32_t vf_idx, + uint32_t *vport, uint32_t *vfid); +int zxdh_plcr_req_profile(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, uint16_t *profile_id_out); +int zxdh_plcr_release_profile(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, uint16_t profile_id, + uint32_t flag); +int zxdh_plcr_count_up_profile(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, uint16_t profile_id); +int zxdh_plcr_count_down_profile(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, uint16_t profile_id); +int zxdh_plcr_cfg_profile(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, + DPP_STAT_CAR_PROFILE_CFG_T *profile_cfg); +int zxdh_plcr_get_profile(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, uint32_t pkt_sign, + uint16_t profile_id, + DPP_STAT_CAR_PROFILE_CFG_T *profile_cfg); +uint32_t zxdh_plcr_reg_maxrate_user(uint32_t reg_maxrate); +int32_t zxdh_plcr_req_flow(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, uint16_t flow_id, + struct zxdh_plcr_flow **flow); +int32_t zxdh_plcr_release_flow(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, uint16_t flow_id); +void zxdh_plcr_update_flow(struct zxdh_plcr_flow *flow, uint16_t vport, + uint32_t max_rate, uint32_t min_rate); +int zxdh_plcr_store_profile(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, uint32_t user_max_rate, + uint32_t user_min_rate, + DPP_STAT_CAR_PROFILE_CFG_T *profile_cfg); +int32_t zxdh_plcr_stroe_map(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, uint32_t flowid, + uint32_t map_flowid); +int32_t zxdh_plcr_clear_map(struct zxdh_pf_device *pf_dev, + E_PLCR_CAR_TYPE car_type, uint32_t flowid); +int zxdh_pf_plcr_get_mode(struct zxdh_pf_device *pf_dev, uint16_t vport, + E_RATE_LIMIT_MODE *p_mode); +int zxdh_pf_plcr_set_mode(struct zxdh_pf_device *pf_dev, uint16_t vport, + E_RATE_LIMIT_MODE mode); +int zxdh_plcr_get_mode(struct zxdh_pf_device *pf_dev, uint16_t vport, + E_RATE_LIMIT_MODE *mode); +int zxdh_plcr_set_mode(struct zxdh_pf_device *pf_dev, uint16_t vport, + E_RATE_LIMIT_MODE mode); +int zxdh_plcr_unified_set_rate_limit( + struct zxdh_pf_device *pf_dev, + zxdh_plcr_rate_limit_paras *rate_limit_paras); +int32_t zxdh_plcr_recover_cfg(struct zxdh_vf_item *vf_item, + struct zxdh_pf_device *pf_dev, int32_t vf_idx); +int zxdh_vqm_vf_set_rate_limit(struct zxdh_pf_device *pf_dev, uint16_t vqm_vfid, + uint32_t vf_rate); + +int zxdh_vf_update_sysfs_group(struct zxdh_pf_device *pf_dev, + struct zxdh_vf_obj *vf, int32_t group_id); +int zxdh_create_vfs_sysfs(struct dh_core_dev *dev, int32_t num_vfs); +void zxdh_destroy_vfs_sysfs(struct dh_core_dev *dev, int32_t num_vfs); +int zxdh_sriov_sysfs_init(struct dh_core_dev *dev); +void zxdh_sriov_sysfs_exit(struct dh_core_dev *dev); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/slib.c b/drivers/net/ethernet/dinghai/slib.c new file mode 100644 index 000000000000..1fb0d4c517af --- /dev/null +++ b/drivers/net/ethernet/dinghai/slib.c @@ -0,0 +1,168 @@ +#include +#include "slib.h" + +void *zte_memcpy_s(void *dest, const void *src, size_t n) +{ + return memcpy(dest, src, n); +} + +char *zte_strncpy_s(char *dest, const char *src, size_t count) +{ + return strncpy(dest, src, count); +} + +void *zte_memset_s(void *s, int c, size_t n) +{ + return memset(s, c, n); +} + +int zte_snprintf_s(char *buf, size_t size, const char *format, ...) +{ + va_list args; + int i; + + if (buf == NULL || size == 0) { + return 0; + } + + va_start(args, format); + i = vsnprintf(buf, size, format, args); + va_end(args); + + return i; +} + +int zte_sprintf_s(char *buf, const char *format, ...) +{ + va_list args; + int i; + + if (buf == NULL) { + return 0; + } + + va_start(args, format); + i = vsprintf(buf, format, args); + va_end(args); + return i; +} + +int zte_sscanf_s(const char *buf, const char *format, ...) +{ + va_list args; + int i; + + va_start(args, format); + i = vsscanf(buf, format, args); + va_end(args); + + return i; +} + +size_t zte_strlen_s(const char *s) +{ + return strlen(s); +} + +char *zte_strncat_s(char *dest, const char *src, size_t n) +{ + return strncat(dest, src, n); +} + +/* 测试用例 */ +void test_zte_memcpy_s(void) +{ + char src[] = "Hello"; + char dest[10]; + zte_memcpy_s(dest, src, zte_strlen_s(src) + 1); + if (strcmp(dest, src) == 0) { + LOG_INFO("test_zte_memcpy_s success\n"); + } +} + +void test_zte_memset_s(void) +{ + char buf[10]; + zte_memset_s(buf, 'A', 5); + buf[5] = '\0'; + if (strcmp(buf, "AAAAA") == 0) { + LOG_INFO("test_zte_memset_s success\n"); + } +} + +void test_zte_snprintf_s(void) +{ + char buf[20]; + int result = zte_snprintf_s(buf, sizeof(buf), "Number: %d", 123); + if (result > 0) { + if (strcmp(buf, "Number: 123") == 0) { + LOG_INFO("test_zte_snprintf_s success\n"); + } + } +} + +void test_zte_sprintf_s(void) +{ + char buf[20]; + int result = zte_sprintf_s(buf, "Text: %s", "Test"); + if (result > 0) { + if (strcmp(buf, "Text: Test") == 0) { + LOG_INFO("test_zte_sprintf_s success\n"); + } + } +} + +void test_zte_strlen_s(void) +{ + char str[] = "Length"; + size_t len = zte_strlen_s(str); + LOG_INFO("test_zte_strlen_s success, len = %ld\n", len); +} + +void test_zte_strncat_s(void) +{ + char dest[20] = "Hello"; + char src[] = " World"; + zte_strncat_s(dest, src, 6); + if (strcmp(dest, "Hello World") == 0) { + LOG_INFO("test_zte_strncat success\n"); + } +} + +void test_zte_strncpy_s(void) +{ + char src[] = "World"; + char dest[10]; + unsigned int src_len; + + src_len = strlen(src); + zte_strncpy_s(dest, src, src_len + 1); + dest[src_len] = '\0'; + if (strcmp(dest, src) == 0) { + LOG_INFO("test_zte_strncpy_s success\n"); + } +} + +void test_zte_sscanf_s(void) +{ + char input[] = "123"; + int value; + int result = zte_sscanf_s(input, "%d", &value); + if (result == 1 && value == 123) { + LOG_INFO("test_zte_sscanf_s success\n"); + } +} + +void recording_not_safe_func(void) +{ + test_zte_memcpy_s(); + test_zte_memset_s(); + test_zte_snprintf_s(); + test_zte_sprintf_s(); + test_zte_strlen_s(); + test_zte_strncat_s(); + test_zte_strncpy_s(); + test_zte_sscanf_s(); + + LOG_INFO("All tests passed!\n"); +} \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/slib.h b/drivers/net/ethernet/dinghai/slib.h new file mode 100644 index 000000000000..f77cc3dc03ab --- /dev/null +++ b/drivers/net/ethernet/dinghai/slib.h @@ -0,0 +1,25 @@ +#ifndef __SLIB_H__ +#define __SLIB_H__ +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +void *zte_memcpy_s(void *dest, const void *src, size_t n); +void *zte_memset_s(void *s, int c, size_t n); +int zte_snprintf_s(char *buf, size_t size, const char *format, ...); +int zte_sprintf_s(char *buf, const char *format, ...); +size_t zte_strlen_s(const char *s); +char *zte_strncat_s(char *dest, const char *src, size_t n); +char *zte_strncpy_s(char *dest, const char *src, size_t count); +int zte_sscanf_s(const char *buf, const char *format, ...); +void recording_not_safe_func(void); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/sriov_sysfs.c b/drivers/net/ethernet/dinghai/sriov_sysfs.c new file mode 100644 index 000000000000..47f87dbb35ef --- /dev/null +++ b/drivers/net/ethernet/dinghai/sriov_sysfs.c @@ -0,0 +1,1839 @@ +#include +#include +#include +#include +#include +#include "en_aux.h" +#include +#include "en_np/table/include/dpp_tbl_api.h" +#include "en_np/table/include/dpp_tbl_comm.h" +#include "en_aux/en_aux_cmd.h" +#include "msg_common.h" +#include "en_pf.h" +#include "en_pf/en_pf_eq.h" +#include +#ifdef TIME_STAMP_1588 +#include "en_aux/en_1588_pkt_proc.h" +#endif +#include +#include +#include +#include +#include +#include + +#define ZXDH_DEFAULT_VF_GROUP_ID 0 +#define ZXDH_MAX_VF_GROUP_OBJ_ID 255 +#define ZXDH_MAX_VF_GROUP_OBJ_NUM (ZXDH_MAX_VF_GROUP_OBJ_ID + 1) +#define ZXDH_MAX_VF_OBJ_ID 255 +#define ZXDH_MAX_VF_OBJ_NUM (ZXDH_MAX_VF_OBJ_ID + 1) + +#ifdef NEED_SYSFS_EMIT +int sysfs_emit(char *buf, const char *fmt, ...) +{ + int ret; + va_list args; + + if (WARN(!buf || offset_in_page(buf), "invalid sysfs_emit: buf:%p\n", + buf)) + return 0; + + va_start(args, fmt); + ret = vscnprintf(buf, PAGE_SIZE, fmt, args); + va_end(args); + return ret; +} +#endif + +/************************************************ + * sriov attr: + * burst: WO,配置当前PF的流量突发尺寸 + * profile: RO,打印当前PF对三级car的profile占用情况 +************************************************/ + +/************************************************ + * group attr: + * config: RO,打印当前vf端口组所有配置 + * max_rx_rate: WO,配置当前vf端口组最大接收速率 + * max_tx_rate: WO,配置当前vf端口组最大发送速率 +************************************************/ + +/************************************************ + * vf attr: + * config: RO,打印当前vf所有配置 + * group: WO,指定当前vf端口所属group,默认group-0 + * meter + * rx + * bps + * max_rate: WO,配置当前vf端口接收方向字节限速 + * min_rate: WO,配置当前vf端口接收方向字节最小保障带宽 + * pkt_dropped: RO, 查询丢包数量/字节数 + * pps + * rate: WO,配置当前vf端口接收方向包限速 + * pkt_dropped: RO, 查询丢包数量/字节数 + * tx + * bps + * max_rate: WO,配置当前vf端口发送方向字节限速 + * max_rate: WO,配置当前vf端口发送方向字节最小保障带宽 + * pkt_dropped: RO, 查询丢包数量/字节数 + + * pps + * pps_rate: WO,配置当前vf端口发送方向包限速 + * pkt_dropped: RO, 查询丢包数量/字节数 +************************************************/ + +struct zxdh_group_attribute { + struct attribute attr; + ssize_t (*show)(struct zxdh_group_obj *group, + struct zxdh_group_attribute *attr, char *buf); + ssize_t (*store)(struct zxdh_group_obj *group, + struct zxdh_group_attribute *attr, const char *buf, + size_t count); +}; + +#define to_zxdh_group_attr(x) container_of(x, struct zxdh_group_attribute, attr) +#define to_zxdh_group_obj(x) container_of(x, struct zxdh_group_obj, kobj) + +struct zxdh_vf_attribute { + struct attribute attr; + ssize_t (*show)(struct zxdh_vf_obj *vf, struct zxdh_vf_attribute *attr, + char *buf); + ssize_t (*store)(struct zxdh_vf_obj *vf, struct zxdh_vf_attribute *attr, + const char *buf, size_t count); +}; + +#define to_zxdh_vf_attr(x) container_of(x, struct zxdh_vf_attribute, attr) +#define to_zxdh_vf_obj(x) container_of(x, struct zxdh_vf_obj, kobj) + +struct zxdh_vf_meter_attribute { + struct attribute attr; + ssize_t (*show)(struct zxdh_vf_meter_obj *xps, + struct zxdh_vf_meter_attribute *attr, char *buf); + ssize_t (*store)(struct zxdh_vf_meter_obj *xps, + struct zxdh_vf_meter_attribute *attr, const char *buf, + size_t count); +}; + +#define to_zxdh_vf_meter_attr(x) \ + container_of(x, struct zxdh_vf_meter_attribute, attr) +#define to_zxdh_vf_meter_obj(x) container_of(x, struct zxdh_vf_meter_obj, kobj) + +/* + * The default show function that must be passed to sysfs. This will be + * called by sysfs for whenever a show function is called by the user on a + * sysfs file associated with the kobjects we have registered. We need to + * transpose back from a "default" kobject to our custom struct foo_obj and + * then call the show function for that specific object. + */ +static ssize_t zxdh_group_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct zxdh_group_attribute *attribute; + struct zxdh_group_obj *group; + + attribute = to_zxdh_group_attr(attr); + group = to_zxdh_group_obj(kobj); + + if (!attribute->show) + return -EIO; + + return attribute->show(group, attribute, buf); +} + +/* + * Just like the default show function above, but this one is for when the + * sysfs "store" is requested (when a value is written to a file.) + */ +static ssize_t zxdh_group_attr_store(struct kobject *kobj, + struct attribute *attr, const char *buf, + size_t len) +{ + struct zxdh_group_attribute *attribute; + struct zxdh_group_obj *group; + + attribute = to_zxdh_group_attr(attr); + group = to_zxdh_group_obj(kobj); + + if (!attribute->store) + return -EIO; + + return attribute->store(group, attribute, buf, len); +} + +static ssize_t zxdh_vf_attr_show(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct zxdh_vf_attribute *attribute; + struct zxdh_vf_obj *vf; + + attribute = to_zxdh_vf_attr(attr); + vf = to_zxdh_vf_obj(kobj); + + if (!attribute->show) + return -EIO; + + return attribute->show(vf, attribute, buf); +} + +static ssize_t zxdh_vf_attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t len) +{ + struct zxdh_vf_attribute *attribute; + struct zxdh_vf_obj *vf; + + attribute = to_zxdh_vf_attr(attr); + vf = to_zxdh_vf_obj(kobj); + + if (!attribute->store) + return -EIO; + + return attribute->store(vf, attribute, buf, len); +} + +static ssize_t zxdh_vf_meter_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct zxdh_vf_meter_attribute *attribute; + struct zxdh_vf_meter_obj *xps; + + attribute = to_zxdh_vf_meter_attr(attr); + xps = to_zxdh_vf_meter_obj(kobj); + + if (!attribute->show) + return -EIO; + + return attribute->show(xps, attribute, buf); +} + +static ssize_t zxdh_vf_meter_attr_store(struct kobject *kobj, + struct attribute *attr, const char *buf, + size_t len) +{ + struct zxdh_vf_meter_attribute *attribute; + struct zxdh_vf_meter_obj *xps; + + attribute = to_zxdh_vf_meter_attr(attr); + xps = to_zxdh_vf_meter_obj(kobj); + + if (!attribute->store) + return -EIO; + + return attribute->store(xps, attribute, buf, len); +} + +static ssize_t zxdh_vf_stats_update(DPP_PF_INFO_T pf_info, int vf_idx, + struct zxdh_pf_device *pf_dev, + struct zxdh_vf_file_stats *vf_file_stats) +{ + struct zxdh_vf_item *vf_item = NULL; + struct zxdh_en_vport_np_stats *np_stats = NULL; + union zxdh_msg *msg = NULL; + uint32_t vf_id = EPID(pf_info.vport) * 256 + VFUNC_NUM(pf_info.vport); + uint16_t vf_pcie_id = FIND_VF_PCIE_ID(pf_dev->pcie_id, vf_idx); + struct dh_core_dev *dh_dev = + container_of((void *)(pf_dev), struct dh_core_dev, priv); + int32_t err = 0; + struct zxdh_bar_extra_para para = { 0 }; + + para.is_sync = true; + para.retrycnt = 0; + + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !\n", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + //VQM统计 + msg->payload.hdr_to_agt.op_code = AGENT_VQM_DEVICE_STATS_GET; + msg->payload.hdr_to_agt.vf_id = vf_id; + msg->payload.hdr_to_agt.pcie_id = vf_pcie_id; + err = zxdh_pf_msg_send_cmd(dh_dev, MODULE_VQM, msg, msg, ¶); + if (err != 0) { + LOG_ERR("vfid %d zxdh_vqm_stats_get failed, err: %d\n", vf_id, + err); + kfree(msg); + return -1; + } + + vf_item = zxdh_pf_get_vf_item(dh_dev, vf_idx); + np_stats = kzalloc(sizeof(struct zxdh_en_vport_np_stats), GFP_KERNEL); + if (np_stats == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !\n", + sizeof(struct zxdh_en_vport_np_stats)); + kfree(msg); + return -ENOMEM; + } + LOG_DEBUG( + "zxdh_vf_stats_update is called, vport: 0x%x, vf_id %d, pf_info.slot %u\n", + pf_info.vport, vf_id, pf_info.slot); + + dpp_stat_port_mc_packet_rx_cnt_get( + &pf_info, vf_id, NP_GET_PKT_CNT, + &(np_stats->rx_vport_multicast_bytes), + &(np_stats->rx_vport_multicast_packets)); + dpp_stat_port_mc_packet_tx_cnt_get( + &pf_info, vf_id, NP_GET_PKT_CNT, + &(np_stats->tx_vport_multicast_bytes), + &(np_stats->tx_vport_multicast_packets)); + dpp_stat_port_bc_packet_rx_cnt_get( + &pf_info, vf_id, NP_GET_PKT_CNT, + &(np_stats->rx_vport_broadcast_bytes), + &(np_stats->rx_vport_broadcast_packets)); + dpp_stat_port_bc_packet_tx_cnt_get( + &pf_info, vf_id, NP_GET_PKT_CNT, + &(np_stats->tx_vport_broadcast_bytes), + &(np_stats->tx_vport_broadcast_packets)); + dpp_stat_MTU_packet_msg_rx_cnt_get( + &pf_info, vf_id, NP_GET_PKT_CNT, + &(np_stats->rx_vport_mtu_drop_bytes), + &(np_stats->rx_vport_mtu_drop_packets)); + dpp_stat_MTU_packet_msg_tx_cnt_get( + &pf_info, vf_id, NP_GET_PKT_CNT, + &(np_stats->tx_vport_mtu_drop_bytes), + &(np_stats->tx_vport_mtu_drop_packets)); + dpp_stat_plcr_packet_drop_rx_cnt_get( + &pf_info, vf_id, NP_GET_PKT_CNT, + &(np_stats->rx_vport_plcr_drop_bytes), + &(np_stats->rx_vport_plcr_drop_packets)); + dpp_stat_plcr_packet_drop_tx_cnt_get( + &pf_info, vf_id, NP_GET_PKT_CNT, + &(np_stats->tx_vport_plcr_drop_bytes), + &(np_stats->tx_vport_plcr_drop_packets)); + + vf_file_stats->tx_packets += msg->reps.stats_msg.tx_total; + vf_file_stats->tx_bytes += msg->reps.stats_msg.tx_total_bytes; + vf_file_stats->rx_packets += msg->reps.stats_msg.rx_total; + vf_file_stats->rx_bytes += msg->reps.stats_msg.rx_total_bytes; + vf_file_stats->rx_broadcast += np_stats->rx_vport_broadcast_packets; + vf_file_stats->rx_multicast += np_stats->rx_vport_multicast_packets; + vf_file_stats->tx_broadcast += np_stats->tx_vport_broadcast_packets; + vf_file_stats->tx_multicast += np_stats->tx_vport_multicast_packets; + vf_file_stats->rx_dropped += msg->reps.stats_msg.rx_drop; + vf_file_stats->tx_error += np_stats->tx_vport_mtu_drop_packets; + vf_file_stats->rx_error += np_stats->rx_vport_mtu_drop_packets; + kfree(np_stats); + kfree(msg); + return 0; +} + +#define _sprintf(p, buf, format, arg...) \ + ((PAGE_SIZE - (int)(p - buf)) <= 0 ? \ + 0 : \ + scnprintf(p, PAGE_SIZE - (int)(p - buf), format, ##arg)) + +static ssize_t zxdh_vf_stats_show(struct zxdh_vf_obj *vf, + struct zxdh_vf_attribute *attr, char *buf) +{ + char *p = buf; + struct zxdh_vf_file_stats *vf_file_stats = NULL; + DPP_PF_INFO_T pf_info = { 0 }; + + pf_info.slot = vf->pf_dev->slot_id; + pf_info.vport = vf->vport; + vf_file_stats = kzalloc(sizeof(struct zxdh_vf_file_stats), GFP_KERNEL); + if (vf_file_stats == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(struct zxdh_vf_file_stats)); + return -ENOMEM; + } + + if (zxdh_vf_stats_update(pf_info, vf->vf_idx, vf->pf_dev, + vf_file_stats) != 0) { + kfree(vf_file_stats); + return -ENOMEM; + } + + p += _sprintf(p, buf, "tx_packets : %llu\n", + vf_file_stats->tx_packets); + p += _sprintf(p, buf, "tx_bytes : %llu\n", + vf_file_stats->tx_bytes); + p += _sprintf(p, buf, "rx_packets : %llu\n", + vf_file_stats->rx_packets); + p += _sprintf(p, buf, "rx_bytes : %llu\n", + vf_file_stats->rx_bytes); + p += _sprintf(p, buf, "rx_broadcast : %llu\n", + vf_file_stats->rx_broadcast); + p += _sprintf(p, buf, "rx_multicast : %llu\n", + vf_file_stats->rx_multicast); + p += _sprintf(p, buf, "tx_broadcast : %llu\n", + vf_file_stats->tx_broadcast); + p += _sprintf(p, buf, "tx_multicast : %llu\n", + vf_file_stats->tx_multicast); + p += _sprintf(p, buf, "rx_dropped : %llu\n", + vf_file_stats->rx_dropped); + p += _sprintf(p, buf, "tx_error : %llu\n", + vf_file_stats->tx_error); + p += _sprintf(p, buf, "rx_error : %llu\n", + vf_file_stats->rx_error); + kfree(vf_file_stats); + return (ssize_t)(p - buf); +} + +static ssize_t zxdh_vf_stats_store(struct zxdh_vf_obj *vf, + struct zxdh_vf_attribute *attr, + const char *buf, size_t count) +{ + return -ENOTSUPP; +} + +/* Our custom sysfs_ops that we will associate with our ktype later on */ +static const struct sysfs_ops zxdh_group_sysfs_ops = { + .show = zxdh_group_attr_show, + .store = zxdh_group_attr_store, +}; + +static const struct sysfs_ops zxdh_vf_sysfs_ops = { + .show = zxdh_vf_attr_show, + .store = zxdh_vf_attr_store, +}; + +static const struct sysfs_ops zxdh_vf_meter_sysfs_ops = { + .show = zxdh_vf_meter_attr_show, + .store = zxdh_vf_meter_attr_store, +}; + +/* + * The release function for our object. This is REQUIRED by the kernel to + * have. We free the memory held in our object here. + * + * NEVER try to get away with just a "blank" release function to try to be + * smarter than the kernel. Turns out, no one ever is... + */ +/*static void zxdh_group_release(struct kobject *kobj) +{ + struct zxdh_group_obj *group; + + LOG_INFO("enter\n"); + group = to_zxdh_group_obj(kobj); + kfree(group); +} + +static void zxdh_vf_release(struct kobject *kobj) +{ + struct zxdh_vf_obj *vf; + + LOG_INFO("enter\n"); + vf = to_zxdh_vf_obj(kobj); + kfree(vf); +} + +static void zxdh_vf_meter_release(struct kobject *kobj) +{ + struct zxdh_vf_meter_obj *xps; + + LOG_INFO("enter\n"); + xps = to_zxdh_vf_meter_obj(kobj); + kfree(xps); +}*/ + +#define _sprintf(p, buf, format, arg...) \ + ((PAGE_SIZE - (int)(p - buf)) <= 0 ? \ + 0 : \ + scnprintf(p, PAGE_SIZE - (int)(p - buf), format, ##arg)) + +#ifdef ZXDH_PLCR_DEBUG +static ssize_t zxdh_burst_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct zxdh_pf_device *pf_dev; + struct zxdh_sriov_sysfs *sriov; + + LOG_DEBUG("enter\n"); + sriov = container_of(attr, struct zxdh_sriov_sysfs, burst_attr); + pf_dev = container_of(sriov, struct zxdh_pf_device, sriov); + + return sysfs_emit(buf, "the burst size = %dByte\n", + pf_dev->plcr_table.burst_size); +} + +static ssize_t zxdh_burst_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, + size_t count) +{ + int rtn = 0; + uint32_t burst; + struct zxdh_pf_device *pf_dev; + struct zxdh_sriov_sysfs *sriov; + + LOG_DEBUG("enter\n"); + sriov = container_of(attr, struct zxdh_sriov_sysfs, burst_attr); + pf_dev = container_of(sriov, struct zxdh_pf_device, sriov); + + rtn = kstrtoint(buf, 10, &burst); + if (rtn) + return rtn; + + /* When the burst changes, the rate limit of the specified flow needs + to be reconfigured to update its burst. + The scope of the burst includes the current PF and its associated VFs. + Write 0 to set the burst to the default value.*/ + + pf_dev->plcr_table.burst_size = burst; + + return count; +} + +static ssize_t zxdh_profile_stat_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct zxdh_sriov_sysfs *sriov = + container_of(attr, struct zxdh_sriov_sysfs, profile_attr); + struct zxdh_pf_device *pf_dev = + container_of(sriov, struct zxdh_pf_device, sriov); + struct zxdh_plcr_profile *profile; + unsigned long index; + uint32_t count = 0; + E_PLCR_CAR_TYPE car_type; + char *p = buf; + + LOG_DEBUG("enter\n"); + for (car_type = E_PLCR_CAR_A; car_type < E_PLCR_CAR_NUM; car_type++) { + count = 0; + p += _sprintf(p, buf, "\n"); + p += _sprintf(p, buf, "car_type : %d\n", car_type); + p += _sprintf(p, buf, "profile_id :"); + xa_for_each_range(&(pf_dev->plcr_table.plcr_profiles[car_type]), + index, profile, 0, + gaudPlcrCarxProfileNum[car_type]) { + p += _sprintf(p, buf, " %d", profile->profile_id); + count++; + } + p += _sprintf(p, buf, "\n"); + p += _sprintf(p, buf, "profiles_num : %d\n", count); + } + return (ssize_t)(p - buf); +} + +static ssize_t zxdh_all_vf_stats_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + return -ENOTSUPP; +} + +static ssize_t zxdh_all_vf_stats_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + uint16_t vf_idx = 0; + struct zxdh_vf_item *vf_item = NULL; + struct zxdh_sriov_sysfs *sriov = + container_of(attr, struct zxdh_sriov_sysfs, all_vf_stats_attr); + struct zxdh_pf_device *pf_dev = + container_of(sriov, struct zxdh_pf_device, sriov); + struct dh_core_dev *dh_dev = + container_of((void *)(pf_dev), struct dh_core_dev, priv); + struct pci_dev *pdev = dh_dev->pdev; + int num_vfs = pci_num_vf(pdev); + DPP_PF_INFO_T pf_info = { 0 }; + struct zxdh_vf_file_stats *vf_file_stats = NULL; + char *p = buf; + + vf_file_stats = kzalloc(sizeof(struct zxdh_vf_file_stats), GFP_KERNEL); + if (vf_file_stats == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(struct zxdh_vf_file_stats)); + return -ENOMEM; + } + + for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) { + LOG_DEBUG("current vf_idx %d\n", vf_idx); + vf_item = zxdh_pf_get_vf_item(dh_dev, vf_idx); + // 计算vf_idx的统计值(减去初始值),添加到vf_file_stats总和中 + pf_info.slot = pf_dev->slot_id; + pf_info.vport = vf_item->vport; + if (zxdh_vf_stats_update(pf_info, vf_idx, pf_dev, + vf_file_stats) != 0) { + kfree(vf_file_stats); + LOG_ERR("zxdh_vf_stats_update failed, vf %d\n", vf_idx); + return -ENOMEM; + } + } + + p += _sprintf(p, buf, "tx_packets : %llu\n", + vf_file_stats->tx_packets); + p += _sprintf(p, buf, "tx_bytes : %llu\n", + vf_file_stats->tx_bytes); + p += _sprintf(p, buf, "rx_packets : %llu\n", + vf_file_stats->rx_packets); + p += _sprintf(p, buf, "rx_bytes : %llu\n", + vf_file_stats->rx_bytes); + p += _sprintf(p, buf, "rx_broadcast : %llu\n", + vf_file_stats->rx_broadcast); + p += _sprintf(p, buf, "rx_multicast : %llu\n", + vf_file_stats->rx_multicast); + p += _sprintf(p, buf, "tx_broadcast : %llu\n", + vf_file_stats->tx_broadcast); + p += _sprintf(p, buf, "tx_multicast : %llu\n", + vf_file_stats->tx_multicast); + p += _sprintf(p, buf, "rx_dropped : %llu\n", + vf_file_stats->rx_dropped); + p += _sprintf(p, buf, "tx_error : %llu\n", + vf_file_stats->tx_error); + p += _sprintf(p, buf, "rx_error : %llu\n", + vf_file_stats->rx_error); + kfree(vf_file_stats); + return (ssize_t)(p - buf); +} +#endif + +#ifdef ZXDH_PLCR_OPEN +static uint16_t zxdh_group_to_flowid(struct zxdh_group_obj *group, + int32_t data_type) +{ + uint16_t flowid; + uint16_t vport = group->pf_dev->vport; + int32_t group_id = group->group_id; + int32_t global_group_id; + + global_group_id = EPID(vport) * 128 + FUNC_NUM(vport) * 16 + group_id; + flowid = (data_type == ZXDH_GROUP_TX_RATE) ? (global_group_id * 2 + 1) : + (global_group_id * 2); + + return flowid; +} + +static uint16_t zxdh_vport_to_flowid(uint16_t vport, int32_t req_type, + int32_t direction) +{ + uint16_t vfid; + uint16_t flowid; + + LOG_DEBUG("enter\n"); + vfid = VQM_VFID(vport); + if (req_type == E_RATE_LIMIT_BYTE) { + flowid = direction ? (vfid * 2 + 1) : (vfid * 2); + } else if (req_type == E_RATE_LIMIT_PACKET) { + flowid = direction ? (vfid * 2 + 1) : + (vfid * 2) + PLCR_CAR_A_DPDK_FLOWID_OFFSET; + } else { + flowid = 0xffff; + } + + return flowid; +} + +static uint32_t zxdh_vf_meter_obj_to_flowid(struct zxdh_vf_meter_obj *xps) +{ + uint16_t vport; + uint16_t vfid; + uint16_t flowid; + + LOG_DEBUG("enter\n"); + vport = xps->vf_obj->vport; + vfid = VQM_VFID(vport); + flowid = IS_TX_METER(xps->meter_type) ? (vfid * 2 + 1) : (vfid * 2); + + return flowid; +} + +static int zxdh_set_vf_group_rate_limit(struct zxdh_group_obj *group, + int32_t direction, uint32_t max_rate) +{ + int rtn = 0; + zxdh_plcr_rate_limit_paras rate_limit_paras; + + LOG_DEBUG("enter\n"); + if (!group->pf_dev->plcr_table.is_init) + return 0; + + // en_priv = pf_dev_get_en_priv(group->pf_dev); + // if (IS_ERR(en_priv)) + // return PTR_ERR(en_priv); + + rate_limit_paras.req_type = E_RATE_LIMIT_REQ_VF_GROUP_BYTE; + rate_limit_paras.direction = direction; // rx:0, tx:1 + rate_limit_paras.mode = E_RATE_LIMIT_BYTE; + rate_limit_paras.max_rate = max_rate; + rate_limit_paras.min_rate = PLCR_INVALID_PARAM; + + rate_limit_paras.queue_id = PLCR_INVALID_PARAM; + rate_limit_paras.vf_idx = PLCR_INVALID_PARAM; + rate_limit_paras.vfid = PLCR_INVALID_PARAM; + rate_limit_paras.group_id = group->group_id; + + rtn = zxdh_plcr_unified_set_rate_limit(group->pf_dev, + &rate_limit_paras); + PLCR_COMM_ASSERT(rtn); + + return rtn; +} + +static int zxdh_move_vf_to_group(struct zxdh_vf_obj *vf, + struct zxdh_group_obj *group) +{ + int rtn = 0; + zxdh_plcr_rate_limit_paras rate_limit_paras; + + LOG_DEBUG("enter\n"); + if (!vf->pf_dev->plcr_table.is_init) + return 0; + + rate_limit_paras.req_type = E_RATE_LIMIT_REQ_MOVE_VF_GROUP; + rate_limit_paras.direction = E_RATE_LIMIT_RX; + rate_limit_paras.mode = PLCR_INVALID_PARAM; + rate_limit_paras.max_rate = PLCR_INVALID_PARAM; + rate_limit_paras.min_rate = PLCR_INVALID_PARAM; + + rate_limit_paras.queue_id = PLCR_INVALID_PARAM; + rate_limit_paras.vf_idx = vf->vf_idx; + rate_limit_paras.vfid = PLCR_INVALID_PARAM; + rate_limit_paras.group_id = group->group_id; + + rtn = zxdh_plcr_unified_set_rate_limit(vf->pf_dev, &rate_limit_paras); + PLCR_COMM_ASSERT(rtn); + +#if 0 + //移动group,只需要调用一次统一限速接口,会同时将tx和rx进行映射 + rate_limit_paras.direction = E_RATE_LIMIT_TX; + rtn = zxdh_plcr_unified_set_rate_limit(vf->pf_dev, &rate_limit_paras); + PLCR_COMM_ASSERT(rtn); +#endif + + return rtn; +} +#endif + +static ssize_t zxdh_group_max_rate_store(struct zxdh_group_obj *group, + struct zxdh_group_attribute *attr, + const char *buf, size_t count, + int32_t direction) +{ + int32_t max_rate; + // uint16_t flowid; + int rtn = 0; + LOG_DEBUG("enter\n"); + + if (group == group->pf_dev->sriov.group_0) + return -EPERM; + + rtn = kstrtoint(buf, 10, &max_rate); + if (rtn < 0) + return -EINVAL; + LOG_DEBUG("max_%s_rate = %d\n", direction ? "tx" : "rx", max_rate); + + if ((direction ? group->max_tx_rate : group->max_rx_rate) == max_rate) + return count; + + // flowid = zxdh_group_to_flowid(group, direction); + +#ifdef ZXDH_PLCR_OPEN + //调用限速统一接口,配置vf group限速 + rtn = zxdh_set_vf_group_rate_limit(group, direction, max_rate); + if (rtn) + return rtn; + + PLCR_LOG_INFO("The Max %s Rate of group%d has been set to %d Mbit/s\n", + direction ? "Tx" : "Rx", group->group_id, max_rate); +#endif + + if (direction == ZXDH_GROUP_TX_RATE) + group->max_tx_rate = max_rate; + else + group->max_rx_rate = max_rate; + + return count; +} + +static ssize_t zxdh_group_max_rx_rate_show(struct zxdh_group_obj *group, + struct zxdh_group_attribute *attr, + char *buf) +{ + LOG_DEBUG("enter\n"); + return sysfs_emit( + buf, + "usage: write to set VF group max rx rate\n"); +} + +static ssize_t zxdh_group_max_rx_rate_store(struct zxdh_group_obj *group, + struct zxdh_group_attribute *attr, + const char *buf, size_t count) +{ + return zxdh_group_max_rate_store(group, attr, buf, count, + ZXDH_GROUP_RX_RATE); +} + +/* + * The "max_tx_rate" file where the .max_tx_rate variable is read from and written to. + */ +static ssize_t zxdh_group_max_tx_rate_show(struct zxdh_group_obj *group, + struct zxdh_group_attribute *attr, + char *buf) +{ + LOG_DEBUG("enter\n"); + return sysfs_emit( + buf, + "usage: write to set VF group max tx rate\n"); +} + +static ssize_t zxdh_group_max_tx_rate_store(struct zxdh_group_obj *group, + struct zxdh_group_attribute *attr, + const char *buf, size_t count) +{ + return zxdh_group_max_rate_store(group, attr, buf, count, + ZXDH_GROUP_TX_RATE); +} + +static ssize_t zxdh_group_config_show(struct zxdh_group_obj *group, + struct zxdh_group_attribute *attr, + char *buf) +{ + //打印当前group配置信息 + char *p = buf; + uint16_t vf_idx; + int32_t num_vfs = group->pf_dev->num_vfs; + struct zxdh_vf_obj *vf; + + LOG_DEBUG("enter\n"); + // if (!mutex_trylock(&esw->state_lock)) + // return -EBUSY; + + p += _sprintf(p, buf, "GroupID : %d\n", group->group_id); + p += _sprintf(p, buf, "Num VFs : %d\n", group->num_vfs); + p += _sprintf(p, buf, "MaxTxRate : %d\n", group->max_tx_rate); + p += _sprintf(p, buf, "MaxRxRate : %d\n", group->max_rx_rate); + + if (group->num_vfs) { + p += _sprintf(p, buf, "VFs : "); + for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) { + vf = group->pf_dev->sriov.vfs + vf_idx; + if (vf->group == group) + p += _sprintf(p, buf, "VF%d ", vf_idx); + } + p += _sprintf(p, buf, "\n"); + } + + // mutex_unlock(&esw->state_lock); + + return (ssize_t)(p - buf); +} + +static ssize_t zxdh_group_config_store(struct zxdh_group_obj *group, + struct zxdh_group_attribute *attr, + const char *buf, size_t count) +{ + LOG_DEBUG("enter\n"); + return -ENOTSUPP; +} + +static ssize_t zxdh_vf_config_show(struct zxdh_vf_obj *vf, + struct zxdh_vf_attribute *attr, char *buf) +{ + //打印当前vf配置信息 +#ifdef ZXDH_PLCR_OPEN + struct zxdh_pf_device *pf_dev = vf->pf_dev; + struct xarray *xa_bps_flow, *xa_pps_flow; + struct zxdh_plcr_flow *flow_bps_rx, *flow_bps_tx, *flow_pps_rx, + *flow_pps_tx; + uint32_t id_bps_rx, id_bps_tx, id_pps_rx, id_pps_tx; + E_RATE_LIMIT_MODE mode; +#endif + char *p = buf; + + LOG_DEBUG("enter\n"); + // mutex_lock(&esw->state_lock); + +#ifdef ZXDH_PLCR_OPEN + xa_bps_flow = &(pf_dev->plcr_table.plcr_flows[E_PLCR_CAR_B]); + zxdh_plcr_get_mode(pf_dev, vf->vport, &mode); + + id_bps_rx = zxdh_vport_to_flowid(vf->vport, E_RATE_LIMIT_BYTE, + E_RATE_LIMIT_RX); + id_bps_tx = zxdh_vport_to_flowid(vf->vport, E_RATE_LIMIT_BYTE, + E_RATE_LIMIT_TX); + flow_bps_rx = xa_load(xa_bps_flow, id_bps_rx); + flow_bps_tx = xa_load(xa_bps_flow, id_bps_tx); +#endif + + p += _sprintf(p, buf, "VF : VF%d\n", vf->vf_idx); + p += _sprintf(p, buf, "RateGroup : %d\n", vf->group->group_id); +#ifdef ZXDH_PLCR_OPEN + p += _sprintf(p, buf, "VportQosMode: %d\n", mode); + + if (flow_bps_rx) { + p += _sprintf(p, buf, "MinRxRate : %dMbit/s\n", + flow_bps_rx->min_rate); + p += _sprintf(p, buf, "MaxRxRate : %dMbit/s\n", + flow_bps_rx->max_rate); + } + + if (flow_bps_tx) { + p += _sprintf(p, buf, "MinTxRate : %dMbit/s\n", + flow_bps_tx->min_rate); + p += _sprintf(p, buf, "MaxTxRate : %dMbit/s\n", + flow_bps_tx->max_rate); + } + // if (mode == E_RATE_LIMIT_MODE0) + // { + + // } + // else if (mode == E_RATE_LIMIT_MODE1) + // { + + // } + if (mode == E_RATE_LIMIT_MODE2) { + xa_pps_flow = &(pf_dev->plcr_table.plcr_flows[E_PLCR_CAR_A]); + id_pps_rx = zxdh_vport_to_flowid(vf->vport, E_RATE_LIMIT_PACKET, + E_RATE_LIMIT_RX); + id_pps_tx = zxdh_vport_to_flowid(vf->vport, E_RATE_LIMIT_PACKET, + E_RATE_LIMIT_TX); + flow_pps_rx = xa_load(xa_pps_flow, id_pps_rx); + flow_pps_tx = xa_load(xa_pps_flow, id_pps_tx); + + if (flow_pps_rx) + p += _sprintf(p, buf, "MaxRxRate : %dPackets/s\n", + flow_pps_rx->max_rate); + + if (flow_pps_tx) + p += _sprintf(p, buf, "MaxTxRate : %dPackets/s\n", + flow_pps_tx->min_rate); + } + +#endif + + // mutex_unlock(&esw->state_lock); + + return (ssize_t)(p - buf); +} + +static ssize_t zxdh_vf_config_store(struct zxdh_vf_obj *vf, + struct zxdh_vf_attribute *attr, + const char *buf, size_t count) +{ + LOG_DEBUG("enter\n"); + return -ENOTSUPP; +} + +static ssize_t zxdh_vf_group_show(struct zxdh_vf_obj *vf, + struct zxdh_vf_attribute *attr, char *buf) +{ + LOG_DEBUG("enter\n"); + + return sysfs_emit(buf, "usage: write <0-%d> to set VF vport group\n", + ZXDH_MAX_VF_GROUP_OBJ_ID); +} + +static ssize_t zxdh_vf_group_store(struct zxdh_vf_obj *vf, + struct zxdh_vf_attribute *attr, + const char *buf, size_t count) +{ + int32_t group_id; + int rtn = 0; + LOG_DEBUG("enter\n"); + + rtn = kstrtoint(buf, 10, &group_id); + if (rtn < 0) + return -EINVAL; + LOG_DEBUG("group = %d\n", group_id); + + rtn = zxdh_vf_update_sysfs_group(vf->pf_dev, vf, group_id); + if (rtn) { + LOG_ERR("zxdh_vf_update_sysfs_group failed\n"); + return rtn; + } + + PLCR_LOG_INFO("VF%d has been moved to group%d\n", vf->vf_idx, group_id); + + return count; +} + +static ssize_t zxdh_vf_meter_rate_store(struct zxdh_vf_meter_obj *xps, + struct zxdh_vf_meter_attribute *attr, + const char *buf, size_t count, + int32_t data_type) +{ +#ifdef ZXDH_PLCR_OPEN + struct zxdh_pf_device *pf_dev = xps->pf_dev; + struct zxdh_vf_item *vf_item; + struct xarray *xarray_flowid = + &(pf_dev->plcr_table.plcr_flows[E_PLCR_CAR_B]); + struct zxdh_plcr_flow *plcr_flow; + zxdh_plcr_rate_limit_paras rate_limit_paras; + uint32_t flowid; + uint16_t vport; + const char *direction; +#endif + int32_t data; + int rtn = 0; + + LOG_DEBUG("enter\n"); + rtn = kstrtoint(buf, 10, &data); + if (rtn < 0) + return -EINVAL; + +#ifdef ZXDH_PLCR_OPEN + if (!pf_dev->plcr_table.is_init) + return count; + vport = xps->vf_obj->vport; + flowid = zxdh_vf_meter_obj_to_flowid(xps); + plcr_flow = xa_load(xarray_flowid, flowid); + + //调用限速统一接口,配置vf限速 + if (IS_PPS_METER(xps->meter_type)) { + LOG_DEBUG("max_rate = %d Packets/s\n", data); + rate_limit_paras.req_type = E_RATE_LIMIT_REQ_VF_PKT; + rate_limit_paras.mode = E_RATE_LIMIT_PACKET; + rate_limit_paras.min_rate = 0; + rate_limit_paras.max_rate = data; + } else { + rate_limit_paras.req_type = E_RATE_LIMIT_REQ_VF_BYTE; + rate_limit_paras.mode = E_RATE_LIMIT_BYTE; + + if (data_type == ZXDH_VF_MIN_RATE) { + LOG_DEBUG("min_rate = %d Mbit/s\n", data); + + rate_limit_paras.min_rate = data; + if (plcr_flow) + rate_limit_paras.max_rate = + max(plcr_flow->max_rate, + rate_limit_paras.min_rate); + else + rate_limit_paras.max_rate = data; + } else if (data_type == ZXDH_VF_MAX_RATE) { + LOG_DEBUG("max_rate = %d Mbit/s\n", data); + + rate_limit_paras.max_rate = data; + if (plcr_flow) + rate_limit_paras.min_rate = + min(plcr_flow->min_rate, + rate_limit_paras.max_rate); + else + rate_limit_paras.min_rate = 0; + } else + return -EINVAL; + } + + rate_limit_paras.direction = IS_TX_METER(xps->meter_type) ? + E_RATE_LIMIT_TX : + E_RATE_LIMIT_RX; + rate_limit_paras.queue_id = PLCR_INVALID_PARAM; + rate_limit_paras.vf_idx = xps->vf_obj->vf_idx; + rate_limit_paras.vfid = PLCR_INVALID_PARAM; + rate_limit_paras.group_id = PLCR_INVALID_PARAM; + + rtn = zxdh_plcr_unified_set_rate_limit(pf_dev, &rate_limit_paras); + PLCR_COMM_ASSERT(rtn); + + direction = IS_TX_METER(xps->meter_type) ? "Tx" : "Rx"; + if (IS_PPS_METER(xps->meter_type)) { + PLCR_LOG_INFO( + "The Max %s Rate of VF%d has been set to %d Packets/s\n", + direction, xps->vf_obj->vf_idx, data); + } else { + PLCR_LOG_INFO( + "The Rate of VF%d has been set to: Min %s Rate: %d Mbit/s, Max %s Rate: %d Mbit/s\n", + xps->vf_obj->vf_idx, direction, + rate_limit_paras.min_rate, direction, + rate_limit_paras.max_rate); + } + + if (xps->meter_type == VF_METER_TX_BPS) { + vf_item = &pf_dev->vf_item[rate_limit_paras.vf_idx]; + vf_item->min_tx_rate = rate_limit_paras.min_rate; + vf_item->max_tx_rate = rate_limit_paras.max_rate; + } + + if (E_RATE_LIMIT_TX == rate_limit_paras.direction) { + //引入vqm vf限速,提高小流量限速精度; + //条件1,4G以内满足配置能满足vqm限速周期全局共享,ECO回片可放宽限速值 + //条件2,每次配置新值需要将vqm vf限速清空再做配置 + rtn = zxdh_vqm_vf_set_rate_limit(pf_dev, rate_limit_paras.vfid, + 0); + PLCR_COMM_ASSERT(rtn); + if (rate_limit_paras.max_rate < 4000) { + rtn = zxdh_vqm_vf_set_rate_limit( + pf_dev, rate_limit_paras.vfid, + rate_limit_paras.max_rate); + PLCR_COMM_ASSERT(rtn); + PLCR_LOG_INFO( + "The Rate of VF id:%d has been set to: Max Tx Rate: %dMbit/s in vqm\n", + rate_limit_paras.vfid, + rate_limit_paras.max_rate); + } + } + +#endif + + if (data_type == ZXDH_VF_MIN_RATE) + xps->min_rate = data; + else + xps->max_rate = data; + + return count; +} + +static ssize_t zxdh_vf_meter_min_rate_show(struct zxdh_vf_meter_obj *xps, + struct zxdh_vf_meter_attribute *attr, + char *buf) +{ + uint32_t meter_type = xps->meter_type; + LOG_DEBUG("enter\n"); + + return sysfs_emit( + buf, "usage: write to set VF %s min rate\n", + IS_TX_METER(meter_type) ? "tx" : "rx"); +} + +static ssize_t +zxdh_vf_meter_min_rate_store(struct zxdh_vf_meter_obj *xps, + struct zxdh_vf_meter_attribute *attr, + const char *buf, size_t count) +{ + return zxdh_vf_meter_rate_store(xps, attr, buf, count, + ZXDH_VF_MIN_RATE); +} + +static ssize_t zxdh_vf_meter_max_rate_show(struct zxdh_vf_meter_obj *xps, + struct zxdh_vf_meter_attribute *attr, + char *buf) +{ + uint32_t meter_type = xps->meter_type; + LOG_DEBUG("enter\n"); + + return sysfs_emit(buf, + "usage: write to set VF %s max rate\n", + IS_PPS_METER(meter_type) ? "Packets/s" : "Mbit/s", + IS_TX_METER(meter_type) ? "tx" : "rx"); +} + +static ssize_t +zxdh_vf_meter_max_rate_store(struct zxdh_vf_meter_obj *xps, + struct zxdh_vf_meter_attribute *attr, + const char *buf, size_t count) +{ + return zxdh_vf_meter_rate_store(xps, attr, buf, count, + ZXDH_VF_MAX_RATE); +} + +#ifndef DEFAULT_GROUPS +#define ZXDH_RATE_GROUP_ATTR(_name) \ + static struct zxdh_group_attribute zxdh_group_##_name = \ + __ATTR(_name, 0644, zxdh_group_##_name##_show, \ + zxdh_group_##_name##_store) + +#define ZXDH_VF_ATTR(_name) \ + static struct zxdh_vf_attribute zxdh_vf_##_name = __ATTR( \ + _name, 0644, zxdh_vf_##_name##_show, zxdh_vf_##_name##_store) + +#define ZXDH_VF_METER_ATTR(_name) \ + static struct zxdh_vf_meter_attribute zxdh_vf_meter_##_name = \ + __ATTR(_name, 0644, zxdh_vf_meter_##_name##_show, \ + zxdh_vf_meter_##_name##_store) + +/* Sysfs attributes cannot be world-writable. */ +ZXDH_RATE_GROUP_ATTR(max_tx_rate); +ZXDH_RATE_GROUP_ATTR(max_rx_rate); +ZXDH_RATE_GROUP_ATTR(config); +ZXDH_VF_ATTR(config); +ZXDH_VF_ATTR(group); +ZXDH_VF_ATTR(stats); +ZXDH_VF_METER_ATTR(min_rate); +ZXDH_VF_METER_ATTR(max_rate); +#else +static struct zxdh_group_attribute zxdh_group_max_tx_rate __ro_after_init = + __ATTR(max_tx_rate, 0644, zxdh_group_max_tx_rate_show, + zxdh_group_max_tx_rate_store); +static struct zxdh_group_attribute zxdh_group_max_rx_rate __ro_after_init = + __ATTR(max_rx_rate, 0644, zxdh_group_max_rx_rate_show, + zxdh_group_max_rx_rate_store); +static struct zxdh_group_attribute zxdh_group_config __ro_after_init = + __ATTR(config, 0644, zxdh_group_config_show, zxdh_group_config_store); + +static struct zxdh_vf_attribute zxdh_vf_config __ro_after_init = + __ATTR(config, 0644, zxdh_vf_config_show, zxdh_vf_config_store); +static struct zxdh_vf_attribute zxdh_vf_group __ro_after_init = + __ATTR(group, 0644, zxdh_vf_group_show, zxdh_vf_group_store); +static struct zxdh_vf_attribute zxdh_vf_stats __ro_after_init = + __ATTR(stats, 0644, zxdh_vf_stats_show, zxdh_vf_stats_store); + +static struct zxdh_vf_meter_attribute zxdh_vf_meter_min_rate __ro_after_init = + __ATTR(min_rate, 0644, zxdh_vf_meter_min_rate_show, + zxdh_vf_meter_min_rate_store); +static struct zxdh_vf_meter_attribute zxdh_vf_meter_max_rate __ro_after_init = + __ATTR(max_rate, 0644, zxdh_vf_meter_max_rate_show, + zxdh_vf_meter_max_rate_store); +#endif + +// static struct zxdh_group_attribute zxdh_group_max_tx_rate __ro_after_init +// = __ATTR(max_tx_rate, 0200, NULL, zxdh_group_max_tx_rate_store); + +// static struct zxdh_group_attribute zxdh_group_config __ro_after_init +// = __ATTR(config, 0444, zxdh_group_config_show, NULL); + +// static struct zxdh_vf_attribute zxdh_vf_group __ro_after_init +// = __ATTR(group, 0644, zxdh_vf_group_show, zxdh_vf_group_store); + +/* + * Create a group of attributes so that we can create and destroy them all + * at once. + */ + +static struct attribute *zxdh_group_default_attrs[] = { + &zxdh_group_max_tx_rate.attr, &zxdh_group_max_rx_rate.attr, + &zxdh_group_config.attr, + NULL, /* need to NULL terminate the list of attributes */ +}; +#ifndef DEFAULT_GROUPS +ATTRIBUTE_GROUPS(zxdh_group_default); +#endif + +static struct attribute *zxdh_vf_default_attrs[] = { + &zxdh_vf_config.attr, + &zxdh_vf_group.attr, + &zxdh_vf_stats.attr, + NULL, +}; +#ifndef DEFAULT_GROUPS +ATTRIBUTE_GROUPS(zxdh_vf_default); +#endif + +static struct attribute *zxdh_vf_meter_bps_attrs[] = { + &zxdh_vf_meter_min_rate.attr, + &zxdh_vf_meter_max_rate.attr, + NULL, +}; +#ifndef DEFAULT_GROUPS +ATTRIBUTE_GROUPS(zxdh_vf_meter_bps); +#endif + +static struct attribute *zxdh_vf_meter_pps_attrs[] = { + &zxdh_vf_meter_max_rate.attr, + NULL, +}; +#ifndef DEFAULT_GROUPS +ATTRIBUTE_GROUPS(zxdh_vf_meter_pps); +#endif + +/* + * Our own ktype for our kobjects. Here we specify our sysfs ops, the + * release function, and the set of default attributes we want created + * whenever a kobject of this type is registered with the kernel. + */ +// static const struct kobj_type +static struct kobj_type zxdh_group_ktype = { + .sysfs_ops = &zxdh_group_sysfs_ops, +// .release = zxdh_group_release, +#ifndef DEFAULT_GROUPS + .default_groups = zxdh_group_default_groups, +#else + .default_attrs = zxdh_group_default_attrs, +#endif +}; + +// static const struct kobj_type +static struct kobj_type zxdh_vf_ktype = { + .sysfs_ops = &zxdh_vf_sysfs_ops, +// .release = zxdh_vf_release, +#ifndef DEFAULT_GROUPS + .default_groups = zxdh_vf_default_groups, +#else + .default_attrs = zxdh_vf_default_attrs, +#endif +}; + +static struct kobj_type zxdh_vf_meter_bps_ktype = { + .sysfs_ops = &zxdh_vf_meter_sysfs_ops, +// .release = zxdh_vf_meter_release, +#ifndef DEFAULT_GROUPS + .default_groups = zxdh_vf_meter_bps_groups, +#else + .default_attrs = zxdh_vf_meter_bps_attrs, +#endif +}; + +static struct kobj_type zxdh_vf_meter_pps_ktype = { + .sysfs_ops = &zxdh_vf_meter_sysfs_ops, +// .release = zxdh_vf_meter_release, +#ifndef DEFAULT_GROUPS + .default_groups = zxdh_vf_meter_pps_groups, +#else + .default_attrs = zxdh_vf_meter_pps_attrs, +#endif +}; + +static struct zxdh_group_obj * +zxdh_create_group_obj(struct zxdh_pf_device *pf_dev, int32_t group_id) +{ + struct kobject *groups_obj = pf_dev->sriov.groups_obj; + struct zxdh_group_obj *group; + int rtn = 0; + + LOG_DEBUG("enter\n"); + /* allocate the memory for the whole object */ + group = kzalloc(sizeof(struct zxdh_group_obj), GFP_KERNEL); + if (!group) + return ERR_PTR(-ENOMEM); + + group->pf_dev = pf_dev; + group->group_id = group_id; + + rtn = kobject_init_and_add(&group->kobj, &zxdh_group_ktype, groups_obj, + "group%d", group_id); + if (rtn) { + LOG_INFO("create group-%d kobject failed\n", group_id); + kobject_put(&group->kobj); + kfree(group); + return ERR_PTR(rtn); + } + + /* We are always responsible for sending the uevent that the kobject + * was added to the system. + */ + kobject_uevent(&group->kobj, KOBJ_ADD); + + list_add_tail(&group->list, &pf_dev->sriov.groups_head); + + init_completion(&group->free_group_comp); + + return group; +} + +int zxdh_create_vf_obj(struct zxdh_pf_device *pf_dev, uint16_t vf_idx) +{ + struct zxdh_vf_obj *vf_obj; + int rtn; + + LOG_DEBUG("enter\n"); + + vf_obj = pf_dev->sriov.vfs + vf_idx; + vf_obj->pf_dev = pf_dev; + vf_obj->vport = pf_dev->vf_item[vf_idx].vport; + vf_obj->vf_idx = vf_idx; + vf_obj->group = pf_dev->sriov.group_0; + + LOG_DEBUG("p_sriov_obj = %p, vport = %d\n", pf_dev->sriov.sriov_obj, + pf_dev->vport); + LOG_DEBUG("vf_idx = %d, vport = %d, group_id = %d\n", + pf_dev->sriov.vfs[vf_idx].vf_idx, + pf_dev->sriov.vfs[vf_idx].vport, + pf_dev->sriov.vfs[vf_idx].group->group_id); + + rtn = kobject_init_and_add(&vf_obj->kobj, &zxdh_vf_ktype, + pf_dev->sriov.sriov_obj, "vf%d", vf_idx); + if (rtn) { + LOG_INFO("create vf-%d kobject failed\n", vf_idx); + kobject_put(&vf_obj->kobj); + return -ENOMEM; + } + + vf_obj->group->num_vfs++; + + kobject_uevent(&vf_obj->kobj, KOBJ_ADD); + + return rtn; +} + +static void zxdh_destroy_group_obj_work(struct work_struct *work) +{ + struct zxdh_group_work *group_work = + container_of(work, struct zxdh_group_work, work); + struct zxdh_group_obj *group_obj = group_work->group_obj; + + LOG_DEBUG("enter\n"); + kobject_put(&group_obj->kobj); + complete_all(&group_obj->free_group_comp); + kfree(group_work); +} + +void zxdh_destroy_group_obj(struct zxdh_group_obj *group_obj) +{ + struct zxdh_group_work *group_work; + + LOG_DEBUG("enter\n"); + group_work = kzalloc(sizeof *group_work, GFP_ATOMIC); + if (unlikely(NULL == group_work)) { + kobject_put(&group_obj->kobj); + complete_all(&group_obj->free_group_comp); + + list_del(&group_obj->list); + return; + } + + INIT_WORK(&group_work->work, zxdh_destroy_group_obj_work); + group_work->group_obj = group_obj; + queue_work(system_wq, &group_work->work); + + list_del(&group_obj->list); +} + +void zxdh_destroy_vf_obj(struct zxdh_pf_device *pf_dev, uint16_t vf_idx) +{ + struct zxdh_vf_obj *vf_obj; + + LOG_DEBUG("enter\n"); + vf_obj = pf_dev->sriov.vfs + vf_idx; + kobject_put(&vf_obj->kobj); +} + +/********************************************** +**********************************************/ + +static struct zxdh_group_obj * +zxdh_find_sysfs_group(struct zxdh_pf_device *pf_dev, int32_t group_id) +{ + struct zxdh_group_obj *group; + + list_for_each_entry(group, &pf_dev->sriov.groups_head, list) { + if (group->group_id == group_id) + return group; + } + + return NULL; +} + +int zxdh_vf_update_sysfs_group(struct zxdh_pf_device *pf_dev, + struct zxdh_vf_obj *vf, int32_t group_id) +{ + struct zxdh_group_obj *curr_group; + struct zxdh_group_obj *new_group; + int rtn = 0; +#ifdef ZXDH_PLCR_OPEN + struct xarray *xarray_flowid; + struct zxdh_plcr_flow *plcr_rx_flow; + struct zxdh_plcr_flow *plcr_tx_flow; + int16_t curr_rx_id; + int16_t curr_tx_id; +#endif + + // mutex_lock(&esw->state_lock); + + curr_group = vf->group; + if (curr_group && curr_group->group_id == group_id) { + LOG_ERR("VF is already in the group%d\n", group_id); + goto out; + } + + if (group_id) { + new_group = zxdh_find_sysfs_group(pf_dev, group_id); + if (!new_group) { + new_group = zxdh_create_group_obj(pf_dev, group_id); + if (IS_ERR(new_group)) { + rtn = PTR_ERR(new_group); + LOG_ERR("create new sysfs group-%d failed (%d)\n", + group_id, rtn); + goto out; + } + } + } else { + new_group = pf_dev->sriov.group_0; + } + +#ifdef ZXDH_PLCR_OPEN + //调用限速统一接口,映射vf到目标group + rtn = zxdh_move_vf_to_group(vf, new_group); + if (rtn) { + PLCR_LOG_ERR("failed and rtn=0x%x\n", rtn) + goto err_update; + } +#endif + + vf->group = new_group; + new_group->num_vfs++; + if (!curr_group) + goto out; + curr_group->num_vfs--; + + if (curr_group != pf_dev->sriov.group_0 && !curr_group->num_vfs) { + zxdh_destroy_group_obj(curr_group); + +#ifdef ZXDH_PLCR_OPEN + //限速统一接口,移除当前carc flow限速 + curr_rx_id = + zxdh_group_to_flowid(curr_group, ZXDH_GROUP_RX_RATE); + curr_tx_id = + zxdh_group_to_flowid(curr_group, ZXDH_GROUP_TX_RATE); + + xarray_flowid = &(pf_dev->plcr_table.plcr_flows[E_PLCR_CAR_C]); + plcr_rx_flow = xa_load(xarray_flowid, curr_rx_id); + plcr_tx_flow = xa_load(xarray_flowid, curr_tx_id); + + if (xa_load(xarray_flowid, curr_rx_id)) + rtn = zxdh_set_vf_group_rate_limit( + curr_group, ZXDH_GROUP_RX_RATE, 0); + if (xa_load(xarray_flowid, curr_tx_id)) + rtn = zxdh_set_vf_group_rate_limit( + curr_group, ZXDH_GROUP_TX_RATE, 0); +#endif + + wait_for_completion(&curr_group->free_group_comp); + kfree(curr_group); + } + goto out; + +#ifdef ZXDH_PLCR_OPEN +err_update: + if (new_group != pf_dev->sriov.group_0 && !new_group->num_vfs) { + zxdh_destroy_group_obj(new_group); + + wait_for_completion(&new_group->free_group_comp); + kfree(new_group); + } +#endif +out: + // mutex_unlock(&esw->state_lock); + return rtn; +} + +static int zxdh_creat_vf_meter_obj(struct zxdh_vf_obj *vf, + struct zxdh_vf_meters *meters, + uint32_t meter_type) +{ + struct zxdh_vf_meter_obj *xps; + struct kobject *parent; + struct kobj_type *ktype; + const char *name; + int rtn; + + if (meter_type >= VF_METER_TYPE_NUM) + return -EINVAL; + + xps = &meters->xps[meter_type]; + + if (IS_TX_METER(meter_type)) + parent = meters->tx_obj; + else + parent = meters->rx_obj; + + if (IS_PPS_METER(meter_type)) { + ktype = &zxdh_vf_meter_pps_ktype; + name = "pps"; + } else { + ktype = &zxdh_vf_meter_bps_ktype; + name = "bps"; + } + + rtn = kobject_init_and_add(&xps->kobj, ktype, parent, name); + if (rtn) { + kobject_put(&xps->kobj); + return rtn; + } + + xps->pf_dev = vf->pf_dev; + xps->vf_obj = vf; + xps->meter_type = meter_type; + + return 0; +} + +int zxdh_create_vf_meters_sysfs(struct zxdh_pf_device *pf_dev, uint16_t vf_idx) +{ + int rtn = 0; + struct zxdh_vf_obj *vf; + struct zxdh_vf_meters *meters; + + LOG_DEBUG("enter\n"); + + vf = pf_dev->sriov.vfs + vf_idx; + + meters = kzalloc(sizeof(struct zxdh_vf_meters), GFP_KERNEL); + if (!meters) { + return -ENOMEM; + } + + meters->kobj = kobject_create_and_add("meters", &vf->kobj); + if (!meters->kobj) { + rtn = -EINVAL; + goto err_vf_meters; + } + + meters->rx_obj = kobject_create_and_add("rx", meters->kobj); + if (!meters->rx_obj) { + rtn = -EINVAL; + goto err_vf_meters; + } + + meters->tx_obj = kobject_create_and_add("tx", meters->kobj); + if (!meters->tx_obj) { + rtn = -EINVAL; + goto err_vf_meters; + } + + rtn = zxdh_creat_vf_meter_obj(vf, meters, VF_METER_RX_BPS); + if (rtn) + goto err_vf_meters; + + rtn = zxdh_creat_vf_meter_obj(vf, meters, VF_METER_RX_PPS); + if (rtn) + goto err_put_xps_0; + + rtn = zxdh_creat_vf_meter_obj(vf, meters, VF_METER_TX_BPS); + if (rtn) + goto err_put_xps_1; + + rtn = zxdh_creat_vf_meter_obj(vf, meters, VF_METER_TX_PPS); + if (rtn) + goto err_put_xps_2; + + vf->meters = meters; + + return 0; + +err_put_xps_2: + kobject_put(&meters->xps[VF_METER_TX_BPS].kobj); +err_put_xps_1: + kobject_put(&meters->xps[VF_METER_RX_PPS].kobj); +err_put_xps_0: + kobject_put(&meters->xps[VF_METER_RX_BPS].kobj); +err_vf_meters: + kobject_put(meters->rx_obj); + kobject_put(meters->tx_obj); + kobject_put(meters->kobj); + + kfree(meters); + + return rtn; +} + +static void zxdh_destroy_vf_meters_sysfs(struct zxdh_pf_device *pf_dev, + uint16_t vf_idx) +{ + struct zxdh_vf_obj *vf; + struct zxdh_vf_meters *meters; + uint32_t meter_type; + + LOG_DEBUG("enter\n"); + + vf = pf_dev->sriov.vfs + vf_idx; + meters = vf->meters; + if (!meters) + return; + + //限速统一接口,移除当前vf所有限速 todo:en层的vf remove应该已经执行了该操作 + + for (meter_type = 0; meter_type < 4; meter_type++) + kobject_put(&meters->xps[meter_type].kobj); + + kobject_put(meters->rx_obj); + kobject_put(meters->tx_obj); + kobject_put(meters->kobj); + + kfree(meters); +} + +int zxdh_create_vfs_sysfs(struct dh_core_dev *dev, int32_t num_vfs) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dev); + int rtn = 0; + uint16_t vf_idx; + + LOG_DEBUG("enter\n"); + pf_dev->sriov.vfs = + kcalloc(num_vfs, sizeof(struct zxdh_vf_obj), GFP_KERNEL); + if (!pf_dev->sriov.vfs) { + LOG_ERR("kcalloc vfs failed\n"); + return -ENOMEM; + } + + for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) { + rtn = zxdh_create_vf_obj(pf_dev, vf_idx); + if (rtn) { + LOG_ERR("zxdh_create_vf_obj failed\n"); + goto err_vf; + } + + rtn = zxdh_create_vf_meters_sysfs(pf_dev, vf_idx); + if (rtn) { + zxdh_destroy_vf_obj(pf_dev, vf_idx); + LOG_ERR("zxdh_create_vf_meters_sysfs failed\n"); + goto err_vf; + } + } + + return 0; + +err_vf: + for (; vf_idx > 0; --vf_idx) { + zxdh_destroy_vf_meters_sysfs(pf_dev, vf_idx); + + zxdh_destroy_vf_obj(pf_dev, vf_idx); + } + + kfree(pf_dev->sriov.vfs); + pf_dev->sriov.vfs = NULL; + + return rtn; +} + +void zxdh_destroy_vfs_sysfs(struct dh_core_dev *dev, int32_t num_vfs) +{ + struct zxdh_pf_device *pf_dev = dh_core_priv(dev); + struct zxdh_vf_obj *vf; + uint16_t vf_idx; + + LOG_DEBUG("enter\n"); + if (!num_vfs || !pf_dev->sriov.vfs) + return; + + for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) { + vf = pf_dev->sriov.vfs + vf_idx; + + if (vf->group != pf_dev->sriov.group_0) + zxdh_vf_update_sysfs_group(pf_dev, vf, 0); + + zxdh_destroy_vf_meters_sysfs(pf_dev, vf_idx); + + zxdh_destroy_vf_obj(pf_dev, vf_idx); + } + kfree(pf_dev->sriov.vfs); + pf_dev->sriov.vfs = NULL; +} + +void zxdh_cleanup_sysfs_group(struct zxdh_pf_device *pf_dev) +{ + struct zxdh_group_obj *group, *tmp; + + LOG_DEBUG("enter\n"); + list_for_each_entry_safe(group, tmp, &pf_dev->sriov.groups_head, list) { + list_del(&group->list); + kfree(group); + } +} + +#ifdef ZXDH_PLCR_DEBUG +int zxdh_sriov_attr_create(struct zxdh_pf_device *pf_dev) +{ + int rtn; + struct zxdh_sriov_sysfs *sriov = &pf_dev->sriov; + + LOG_DEBUG("enter\n"); + + sriov->burst_attr.attr.name = "burst"; + sriov->burst_attr.attr.mode = 0644; + sriov->burst_attr.show = zxdh_burst_show; + sriov->burst_attr.store = zxdh_burst_store; + rtn = sysfs_create_file(sriov->sriov_obj, &sriov->burst_attr.attr); + if (rtn) { + LOG_ERR("burst sysfs_create_file failed!") + return rtn; + } + + sriov->profile_attr.attr.name = "profiles_stat"; + sriov->profile_attr.attr.mode = 0444; + sriov->profile_attr.show = zxdh_profile_stat_show; + sriov->profile_attr.store = NULL; + rtn = sysfs_create_file(sriov->sriov_obj, &sriov->profile_attr.attr); + if (rtn) { + LOG_ERR("profiles_stat sysfs_create_file failed!") + goto err_profile_attr; + } + + sriov->all_vf_stats_attr.attr.name = "all_vf_stats"; + sriov->all_vf_stats_attr.attr.mode = 0644; + sriov->all_vf_stats_attr.show = zxdh_all_vf_stats_show; + sriov->all_vf_stats_attr.store = zxdh_all_vf_stats_store; + rtn = sysfs_create_file(sriov->sriov_obj, + &sriov->all_vf_stats_attr.attr); + if (rtn) { + LOG_ERR("all_vf_stats sysfs_create_file failed!") + goto err_all_vf_stats_attr; + } + + return rtn; +err_all_vf_stats_attr: + sysfs_remove_file(sriov->sriov_obj, &sriov->profile_attr.attr); +err_profile_attr: + sysfs_remove_file(sriov->sriov_obj, &sriov->burst_attr.attr); + return rtn; +} + +void zxdh_sriov_attr_remove(struct zxdh_pf_device *pf_dev) +{ + struct zxdh_sriov_sysfs *sriov = &pf_dev->sriov; + + LOG_DEBUG("enter\n"); + sysfs_remove_file(sriov->sriov_obj, &sriov->all_vf_stats_attr.attr); + sysfs_remove_file(sriov->sriov_obj, &sriov->profile_attr.attr); + sysfs_remove_file(sriov->sriov_obj, &sriov->burst_attr.attr); +} +#endif + +int zxdh_sriov_sysfs_init(struct dh_core_dev *dev) +{ + struct device *device = dev->device; + struct zxdh_pf_device *pf_dev = dh_core_priv(dev); + int rtn = 0; + + if (dev->coredev_type != DH_COREDEV_PF) + return rtn; + + pf_dev->sriov.sriov_obj = + kobject_create_and_add("sriov", &device->kobj); + if (!pf_dev->sriov.sriov_obj) { + LOG_ERR("zxdh create sriov sysfs failed (%d)\n", -ENOMEM); + return -ENOMEM; + } + +#ifdef ZXDH_PLCR_DEBUG + rtn = zxdh_sriov_attr_create(pf_dev); + if (rtn) { + LOG_ERR("zxdh_sriov_attr_create failed (%d)\n", rtn); + goto err_attr; + } +#endif + + pf_dev->sriov.groups_obj = + kobject_create_and_add("groups", pf_dev->sriov.sriov_obj); + if (!pf_dev->sriov.groups_obj) { + LOG_ERR("zxdh create groups sysfs failed (%d)\n", -ENOMEM); + rtn = -ENOMEM; + goto err_groups; + } + + INIT_LIST_HEAD(&pf_dev->sriov.groups_head); + pf_dev->sriov.group_0 = + zxdh_create_group_obj(pf_dev, ZXDH_DEFAULT_VF_GROUP_ID); + if (IS_ERR(pf_dev->sriov.group_0)) { + LOG_ERR("zxdh create rate group 0 failed (%ld)\n", + PTR_ERR(pf_dev->sriov.group_0)); + rtn = PTR_ERR(pf_dev->sriov.group_0); + goto err_group0; + } + + LOG_DEBUG("p_sriov_obj = %p, vport = 0x%x\n", pf_dev->sriov.sriov_obj, + pf_dev->vport); + + return rtn; + +err_group0: + kobject_put(pf_dev->sriov.groups_obj); + pf_dev->sriov.groups_obj = NULL; +err_groups: +#ifdef ZXDH_PLCR_DEBUG + zxdh_sriov_attr_remove(pf_dev); +err_attr: +#endif + kobject_put(pf_dev->sriov.sriov_obj); + pf_dev->sriov.sriov_obj = NULL; + + return rtn; +} + +void zxdh_sriov_sysfs_exit(struct dh_core_dev *dev) +{ + // struct device *device = &dev->pdev->dev; + struct pci_dev *pdev = dev->pdev; + struct zxdh_pf_device *pf_dev = dh_core_priv(dev); + int32_t num_vfs = pci_num_vf(pdev); + + if (dev->coredev_type != DH_COREDEV_PF) + return; + + LOG_DEBUG("enter\n"); + + /*对于未通过sriov_config手动删除vf,直接进行驱动卸载的情况,此处需要进行检查*/ + zxdh_destroy_vfs_sysfs(dev, num_vfs); + + zxdh_destroy_group_obj(pf_dev->sriov.group_0); + wait_for_completion(&pf_dev->sriov.group_0->free_group_comp); + kfree(pf_dev->sriov.group_0); + + zxdh_cleanup_sysfs_group(pf_dev); + + kobject_put(pf_dev->sriov.groups_obj); + pf_dev->sriov.groups_obj = NULL; +#ifdef ZXDH_PLCR_DEBUG + zxdh_sriov_attr_remove(pf_dev); +#endif + kobject_put(pf_dev->sriov.sriov_obj); + pf_dev->sriov.sriov_obj = NULL; +} diff --git a/drivers/net/ethernet/dinghai/xarray.c b/drivers/net/ethernet/dinghai/xarray.c new file mode 100644 index 000000000000..345f030775cf --- /dev/null +++ b/drivers/net/ethernet/dinghai/xarray.c @@ -0,0 +1,1933 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * XArray implementation + * Copyright (c) 2017-2018 Microsoft Corporation + * Copyright (c) 2018-2020 Oracle + * Author: Matthew Wilcox + */ + +#include +#ifdef NEED_XARRAY +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Coding conventions in this file: + * + * @xa is used to refer to the entire xarray. + * @xas is the 'xarray operation state'. It may be either a pointer to + * an xa_state, or an xa_state stored on the stack. This is an unfortunate + * ambiguity. + * @index is the index of the entry being operated on + * @mark is an xa_mark_t; a small number indicating one of the mark bits. + * @node refers to an xa_node; usually the primary one being operated on by + * this function. + * @offset is the index into the slots array inside an xa_node. + * @parent refers to the @xa_node closer to the head than @node. + * @entry refers to something stored in a slot in the xarray + */ + +static inline unsigned int xa_lock_type(const struct xarray *xa) +{ + return (__force unsigned int)xa->xa_flags & 3; +} + +static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type) +{ + if (lock_type == DH_XA_LOCK_IRQ) + xas_lock_irq(xas); + else if (lock_type == DH_XA_LOCK_BH) + xas_lock_bh(xas); + else + xas_lock(xas); +} + +static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type) +{ + if (lock_type == DH_XA_LOCK_IRQ) + xas_unlock_irq(xas); + else if (lock_type == DH_XA_LOCK_BH) + xas_unlock_bh(xas); + else + xas_unlock(xas); +} + +static inline bool xa_track_free(const struct xarray *xa) +{ + return xa->xa_flags & XA_FLAGS_TRACK_FREE; +} + +static inline bool xa_zero_busy(const struct xarray *xa) +{ + return xa->xa_flags & XA_FLAGS_ZERO_BUSY; +} + +static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark) +{ + if (!(xa->xa_flags & XA_FLAGS_MARK(mark))) + xa->xa_flags |= XA_FLAGS_MARK(mark); +} + +static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark) +{ + if (xa->xa_flags & XA_FLAGS_MARK(mark)) + xa->xa_flags &= ~(XA_FLAGS_MARK(mark)); +} + +static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark) +{ + return node->marks[(__force unsigned)mark]; +} + +static inline bool node_get_mark(struct xa_node *node, unsigned int offset, + xa_mark_t mark) +{ + return test_bit(offset, node_marks(node, mark)); +} + +/* returns true if the bit was set */ +static inline bool node_set_mark(struct xa_node *node, unsigned int offset, + xa_mark_t mark) +{ + return __test_and_set_bit(offset, node_marks(node, mark)); +} + +/* returns true if the bit was set */ +static inline bool node_clear_mark(struct xa_node *node, unsigned int offset, + xa_mark_t mark) +{ + return __test_and_clear_bit(offset, node_marks(node, mark)); +} + +static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark) +{ + return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE); +} + +static inline void node_mark_all(struct xa_node *node, xa_mark_t mark) +{ + bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE); +} + +#define mark_inc(mark) \ + do { \ + mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \ + } while (0) + +/* + * xas_squash_marks() - Merge all marks to the first entry + * @xas: Array operation state. + * + * Set a mark on the first entry if any entry has it set. Clear marks on + * all sibling entries. + */ +static void xas_squash_marks(const struct xa_state *xas) +{ + unsigned int mark = 0; + unsigned int limit = xas->xa_offset + xas->xa_sibs + 1; + + if (!xas->xa_sibs) + return; + + do { + unsigned long *marks = xas->xa_node->marks[mark]; + if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit) + continue; + __set_bit(xas->xa_offset, marks); + bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs); + } while (mark++ != (__force unsigned)XA_MARK_MAX); +} + +/* extracts the offset within this node from the index */ +static unsigned int get_offset(unsigned long index, struct xa_node *node) +{ + return (index >> node->shift) & XA_CHUNK_MASK; +} + +static void xas_set_offset(struct xa_state *xas) +{ + xas->xa_offset = get_offset(xas->xa_index, xas->xa_node); +} + +/* move the index either forwards (find) or backwards (sibling slot) */ +static void xas_move_index(struct xa_state *xas, unsigned long offset) +{ + unsigned int shift = xas->xa_node->shift; + xas->xa_index &= ~XA_CHUNK_MASK << shift; + xas->xa_index += offset << shift; +} + +static void xas_advance(struct xa_state *xas) +{ + xas->xa_offset++; + xas_move_index(xas, xas->xa_offset); +} + +static void *set_bounds(struct xa_state *xas) +{ + xas->xa_node = XAS_BOUNDS; + return NULL; +} + +/* + * Starts a walk. If the @xas is already valid, we assume that it's on + * the right path and just return where we've got to. If we're in an + * error state, return NULL. If the index is outside the current scope + * of the xarray, return NULL without changing @xas->xa_node. Otherwise + * set @xas->xa_node to NULL and return the current head of the array. + */ +static void *xas_start(struct xa_state *xas) +{ + void *entry; + + if (xas_valid(xas)) + return xas_reload(xas); + if (xas_error(xas)) + return NULL; + + entry = xa_head(xas->xa); + if (!xa_is_node(entry)) { + if (xas->xa_index) + return set_bounds(xas); + } else { + if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK) + return set_bounds(xas); + } + + xas->xa_node = NULL; + return entry; +} + +static void *xas_descend(struct xa_state *xas, struct xa_node *node) +{ + unsigned int offset = get_offset(xas->xa_index, node); + void *entry = xa_entry(xas->xa, node, offset); + + xas->xa_node = node; + if (xa_is_sibling(entry)) { + offset = xa_to_sibling(entry); + entry = xa_entry(xas->xa, node, offset); + } + + xas->xa_offset = offset; + return entry; +} + +/** + * xas_load() - Load an entry from the XArray (advanced). + * @xas: XArray operation state. + * + * Usually walks the @xas to the appropriate state to load the entry + * stored at xa_index. However, it will do nothing and return %NULL if + * @xas is in an error state. xas_load() will never expand the tree. + * + * If the xa_state is set up to operate on a multi-index entry, xas_load() + * may return %NULL or an internal entry, even if there are entries + * present within the range specified by @xas. + * + * Context: Any context. The caller should hold the xa_lock or the RCU lock. + * Return: Usually an entry in the XArray, but see description for exceptions. + */ +void *xas_load(struct xa_state *xas) +{ + void *entry = xas_start(xas); + + while (xa_is_node(entry)) { + struct xa_node *node = xa_to_node(entry); + + if (xas->xa_shift > node->shift) + break; + entry = xas_descend(xas, node); + if (node->shift == 0) + break; + } + return entry; +} + +/* Move the radix tree node cache here */ +#ifdef NEED_XARRAY +static struct kmem_cache *radix_tree_node_cachep; +#else +extern struct kmem_cache *radix_tree_node_cachep; +#endif + +static void radix_tree_node_rcu_free(struct rcu_head *head) +{ + struct radix_tree_node *node = + container_of(head, struct radix_tree_node, rcu_head); + + /* + * Must only free zeroed nodes into the slab. We can be left with + * non-NULL entries by radix_tree_free_nodes, so clear the entries + * and tags here. + */ + memset(node->slots, 0, sizeof(node->slots)); + memset(node->tags, 0, sizeof(node->tags)); + INIT_LIST_HEAD(&node->private_list); + + kmem_cache_free(radix_tree_node_cachep, node); +} + +#define XA_RCU_FREE ((struct xarray *)1) + +static void xa_node_free(struct xa_node *node) +{ + XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); + node->array = XA_RCU_FREE; + call_rcu(&node->rcu_head, radix_tree_node_rcu_free); +} + +/* + * xas_destroy() - Free any resources allocated during the XArray operation. + * @xas: XArray operation state. + * + * This function is now internal-only. + */ +static void xas_destroy(struct xa_state *xas) +{ + struct xa_node *next, *node = xas->xa_alloc; + + while (node) { + XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); + next = rcu_dereference_raw(node->parent); + radix_tree_node_rcu_free(&node->rcu_head); + xas->xa_alloc = node = next; + } +} + +/* + * __xas_nomem() - Drop locks and allocate memory if needed. + * @xas: XArray operation state. + * @gfp: Memory allocation flags. + * + * Internal variant of xas_nomem(). + * + * Return: true if memory was needed, and was successfully allocated. + */ +static bool __xas_nomem(struct xa_state *xas, gfp_t gfp) + __must_hold(xas->xa->xa_lock) +{ + unsigned int lock_type = xa_lock_type(xas->xa); + + if (xas->xa_node != XA_ERROR(-ENOMEM)) { + xas_destroy(xas); + return false; + } + if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT) + gfp |= __GFP_ACCOUNT; + if (gfpflags_allow_blocking(gfp)) { + xas_unlock_type(xas, lock_type); + xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); + xas_lock_type(xas, lock_type); + } else { + xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); + } + if (!xas->xa_alloc) + return false; + xas->xa_alloc->parent = NULL; + XA_NODE_BUG_ON(xas->xa_alloc, + !list_empty(&xas->xa_alloc->private_list)); + xas->xa_node = XAS_RESTART; + return true; +} + +static void xas_update(struct xa_state *xas, struct xa_node *node) +{ + if (xas->xa_update) + xas->xa_update(node); + else + XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); +} + +static void *xas_alloc(struct xa_state *xas, unsigned int shift) +{ + struct xa_node *parent = xas->xa_node; + struct xa_node *node = xas->xa_alloc; + + if (xas_invalid(xas)) + return NULL; + + if (node) { + xas->xa_alloc = NULL; + } else { + gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; + + if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT) + gfp |= __GFP_ACCOUNT; + + node = kmem_cache_alloc(radix_tree_node_cachep, gfp); + if (!node) { + xas_set_err(xas, -ENOMEM); + return NULL; + } + } + + if (parent) { + node->offset = xas->xa_offset; + parent->count++; + XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE); + xas_update(xas, parent); + } + XA_NODE_BUG_ON(node, shift > BITS_PER_LONG); + XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); + node->shift = shift; + node->count = 0; + node->nr_values = 0; + RCU_INIT_POINTER(node->parent, xas->xa_node); + node->array = xas->xa; + + return node; +} + +#ifdef CONFIG_XARRAY_MULTI +/* Returns the number of indices covered by a given xa_state */ +static unsigned long xas_size(const struct xa_state *xas) +{ + return (xas->xa_sibs + 1UL) << xas->xa_shift; +} +#endif + +/* + * Use this to calculate the maximum index that will need to be created + * in order to add the entry described by @xas. Because we cannot store a + * multi-index entry at index 0, the calculation is a little more complex + * than you might expect. + */ +static unsigned long xas_max(struct xa_state *xas) +{ + unsigned long max = xas->xa_index; + +#ifdef CONFIG_XARRAY_MULTI + if (xas->xa_shift || xas->xa_sibs) { + unsigned long mask = xas_size(xas) - 1; + max |= mask; + if (mask == max) + max++; + } +#endif + + return max; +} + +/* The maximum index that can be contained in the array without expanding it */ +static unsigned long max_index(void *entry) +{ + if (!xa_is_node(entry)) + return 0; + return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1; +} + +static void xas_shrink(struct xa_state *xas) +{ + struct xarray *xa = xas->xa; + struct xa_node *node = xas->xa_node; + + for (;;) { + void *entry; + + XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE); + if (node->count != 1) + break; + entry = xa_entry_locked(xa, node, 0); + if (!entry) + break; + if (!xa_is_node(entry) && node->shift) + break; + if (xa_is_zero(entry) && xa_zero_busy(xa)) + entry = NULL; + xas->xa_node = XAS_BOUNDS; + + RCU_INIT_POINTER(xa->xa_head, entry); + if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK)) + xa_mark_clear(xa, XA_FREE_MARK); + + node->count = 0; + node->nr_values = 0; + if (!xa_is_node(entry)) + RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY); + xas_update(xas, node); + xa_node_free(node); + if (!xa_is_node(entry)) + break; + node = xa_to_node(entry); + node->parent = NULL; + } +} + +/* + * xas_delete_node() - Attempt to delete an xa_node + * @xas: Array operation state. + * + * Attempts to delete the @xas->xa_node. This will fail if xa->node has + * a non-zero reference count. + */ +static void xas_delete_node(struct xa_state *xas) +{ + struct xa_node *node = xas->xa_node; + + for (;;) { + struct xa_node *parent; + + XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE); + if (node->count) + break; + + parent = xa_parent_locked(xas->xa, node); + xas->xa_node = parent; + xas->xa_offset = node->offset; + xa_node_free(node); + + if (!parent) { + xas->xa->xa_head = NULL; + xas->xa_node = XAS_BOUNDS; + return; + } + + parent->slots[xas->xa_offset] = NULL; + parent->count--; + XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE); + node = parent; + xas_update(xas, node); + } + + if (!node->parent) + xas_shrink(xas); +} + +/** + * xas_free_nodes() - Free this node and all nodes that it references + * @xas: Array operation state. + * @top: Node to free + * + * This node has been removed from the tree. We must now free it and all + * of its subnodes. There may be RCU walkers with references into the tree, + * so we must replace all entries with retry markers. + */ +static void xas_free_nodes(struct xa_state *xas, struct xa_node *top) +{ + unsigned int offset = 0; + struct xa_node *node = top; + + for (;;) { + void *entry = xa_entry_locked(xas->xa, node, offset); + + if (node->shift && xa_is_node(entry)) { + node = xa_to_node(entry); + offset = 0; + continue; + } + if (entry) + RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY); + offset++; + while (offset == XA_CHUNK_SIZE) { + struct xa_node *parent; + + parent = xa_parent_locked(xas->xa, node); + offset = node->offset + 1; + node->count = 0; + node->nr_values = 0; + xas_update(xas, node); + xa_node_free(node); + if (node == top) + return; + node = parent; + } + } +} + +/* + * xas_expand adds nodes to the head of the tree until it has reached + * sufficient height to be able to contain @xas->xa_index + */ +static int xas_expand(struct xa_state *xas, void *head) +{ + struct xarray *xa = xas->xa; + struct xa_node *node = NULL; + unsigned int shift = 0; + unsigned long max = xas_max(xas); + + if (!head) { + if (max == 0) + return 0; + while ((max >> shift) >= XA_CHUNK_SIZE) + shift += XA_CHUNK_SHIFT; + return shift + XA_CHUNK_SHIFT; + } else if (xa_is_node(head)) { + node = xa_to_node(head); + shift = node->shift + XA_CHUNK_SHIFT; + } + xas->xa_node = NULL; + + while (max > max_index(head)) { + xa_mark_t mark = 0; + + XA_NODE_BUG_ON(node, shift > BITS_PER_LONG); + node = xas_alloc(xas, shift); + if (!node) + return -ENOMEM; + + node->count = 1; + if (xa_is_value(head)) + node->nr_values = 1; + RCU_INIT_POINTER(node->slots[0], head); + + /* Propagate the aggregated mark info to the new child */ + for (;;) { + if (xa_track_free(xa) && mark == XA_FREE_MARK) { + node_mark_all(node, XA_FREE_MARK); + if (!xa_marked(xa, XA_FREE_MARK)) { + node_clear_mark(node, 0, XA_FREE_MARK); + xa_mark_set(xa, XA_FREE_MARK); + } + } else if (xa_marked(xa, mark)) { + node_set_mark(node, 0, mark); + } + if (mark == XA_MARK_MAX) + break; + mark_inc(mark); + } + + /* + * Now that the new node is fully initialised, we can add + * it to the tree + */ + if (xa_is_node(head)) { + xa_to_node(head)->offset = 0; + rcu_assign_pointer(xa_to_node(head)->parent, node); + } + head = xa_mk_node(node); + rcu_assign_pointer(xa->xa_head, head); + xas_update(xas, node); + + shift += XA_CHUNK_SHIFT; + } + + xas->xa_node = node; + return shift; +} + +/* + * xas_create() - Create a slot to store an entry in. + * @xas: XArray operation state. + * @allow_root: %true if we can store the entry in the root directly + * + * Most users will not need to call this function directly, as it is called + * by xas_store(). It is useful for doing conditional store operations + * (see the xa_cmpxchg() implementation for an example). + * + * Return: If the slot already existed, returns the contents of this slot. + * If the slot was newly created, returns %NULL. If it failed to create the + * slot, returns %NULL and indicates the error in @xas. + */ +static void *xas_create(struct xa_state *xas, bool allow_root) +{ + struct xarray *xa = xas->xa; + void *entry; + void __rcu **slot; + struct xa_node *node = xas->xa_node; + int shift; + unsigned int order = xas->xa_shift; + + if (xas_top(node)) { + entry = xa_head_locked(xa); + xas->xa_node = NULL; + if (!entry && xa_zero_busy(xa)) + entry = XA_ZERO_ENTRY; + shift = xas_expand(xas, entry); + if (shift < 0) + return NULL; + if (!shift && !allow_root) + shift = XA_CHUNK_SHIFT; + entry = xa_head_locked(xa); + slot = &xa->xa_head; + } else if (xas_error(xas)) { + return NULL; + } else if (node) { + unsigned int offset = xas->xa_offset; + + shift = node->shift; + entry = xa_entry_locked(xa, node, offset); + slot = &node->slots[offset]; + } else { + shift = 0; + entry = xa_head_locked(xa); + slot = &xa->xa_head; + } + + while (shift > order) { + shift -= XA_CHUNK_SHIFT; + if (!entry) { + node = xas_alloc(xas, shift); + if (!node) + break; + if (xa_track_free(xa)) + node_mark_all(node, XA_FREE_MARK); + rcu_assign_pointer(*slot, xa_mk_node(node)); + } else if (xa_is_node(entry)) { + node = xa_to_node(entry); + } else { + break; + } + entry = xas_descend(xas, node); + slot = &node->slots[xas->xa_offset]; + } + + return entry; +} + +static void update_node(struct xa_state *xas, struct xa_node *node, int count, + int values) +{ + if (!node || (!count && !values)) + return; + + node->count += count; + node->nr_values += values; + XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE); + XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE); + xas_update(xas, node); + if (count < 0) + xas_delete_node(xas); +} + +/** + * xas_store() - Store this entry in the XArray. + * @xas: XArray operation state. + * @entry: New entry. + * + * If @xas is operating on a multi-index entry, the entry returned by this + * function is essentially meaningless (it may be an internal entry or it + * may be %NULL, even if there are non-NULL entries at some of the indices + * covered by the range). This is not a problem for any current users, + * and can be changed if needed. + * + * Return: The old entry at this index. + */ +void *xas_store(struct xa_state *xas, void *entry) +{ + struct xa_node *node; + void __rcu **slot = &xas->xa->xa_head; + unsigned int offset, max; + int count = 0; + int values = 0; + void *first, *next; + bool value = xa_is_value(entry); + + if (entry) { + bool allow_root = !xa_is_node(entry) && !xa_is_zero(entry); + first = xas_create(xas, allow_root); + } else { + first = xas_load(xas); + } + + if (xas_invalid(xas)) + return first; + node = xas->xa_node; + if (node && (xas->xa_shift < node->shift)) + xas->xa_sibs = 0; + if ((first == entry) && !xas->xa_sibs) + return first; + + next = first; + offset = xas->xa_offset; + max = xas->xa_offset + xas->xa_sibs; + if (node) { + slot = &node->slots[offset]; + if (xas->xa_sibs) + xas_squash_marks(xas); + } + if (!entry) + xas_init_marks(xas); + + for (;;) { + /* + * Must clear the marks before setting the entry to NULL, + * otherwise xas_for_each_marked may find a NULL entry and + * stop early. rcu_assign_pointer contains a release barrier + * so the mark clearing will appear to happen before the + * entry is set to NULL. + */ + rcu_assign_pointer(*slot, entry); + if (xa_is_node(next) && (!node || node->shift)) + xas_free_nodes(xas, xa_to_node(next)); + if (!node) + break; + count += !next - !entry; + values += !xa_is_value(first) - !value; + if (entry) { + if (offset == max) + break; + if (!xa_is_sibling(entry)) + entry = xa_mk_sibling(xas->xa_offset); + } else { + if (offset == XA_CHUNK_MASK) + break; + } + next = xa_entry_locked(xas->xa, node, ++offset); + if (!xa_is_sibling(next)) { + if (!entry && (offset > max)) + break; + first = next; + } + slot++; + } + + update_node(xas, node, count, values); + return first; +} + +/** + * xas_get_mark() - Returns the state of this mark. + * @xas: XArray operation state. + * @mark: Mark number. + * + * Return: true if the mark is set, false if the mark is clear or @xas + * is in an error state. + */ +bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark) +{ + if (xas_invalid(xas)) + return false; + if (!xas->xa_node) + return xa_marked(xas->xa, mark); + return node_get_mark(xas->xa_node, xas->xa_offset, mark); +} + +/** + * xas_set_mark() - Sets the mark on this entry and its parents. + * @xas: XArray operation state. + * @mark: Mark number. + * + * Sets the specified mark on this entry, and walks up the tree setting it + * on all the ancestor entries. Does nothing if @xas has not been walked to + * an entry, or is in an error state. + */ +void xas_set_mark(const struct xa_state *xas, xa_mark_t mark) +{ + struct xa_node *node = xas->xa_node; + unsigned int offset = xas->xa_offset; + + if (xas_invalid(xas)) + return; + + while (node) { + if (node_set_mark(node, offset, mark)) + return; + offset = node->offset; + node = xa_parent_locked(xas->xa, node); + } + + if (!xa_marked(xas->xa, mark)) + xa_mark_set(xas->xa, mark); +} + +/** + * xas_clear_mark() - Clears the mark on this entry and its parents. + * @xas: XArray operation state. + * @mark: Mark number. + * + * Clears the specified mark on this entry, and walks back to the head + * attempting to clear it on all the ancestor entries. Does nothing if + * @xas has not been walked to an entry, or is in an error state. + */ +void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark) +{ + struct xa_node *node = xas->xa_node; + unsigned int offset = xas->xa_offset; + + if (xas_invalid(xas)) + return; + + while (node) { + if (!node_clear_mark(node, offset, mark)) + return; + if (node_any_mark(node, mark)) + return; + + offset = node->offset; + node = xa_parent_locked(xas->xa, node); + } + + if (xa_marked(xas->xa, mark)) + xa_mark_clear(xas->xa, mark); +} + +/** + * xas_init_marks() - Initialise all marks for the entry + * @xas: Array operations state. + * + * Initialise all marks for the entry specified by @xas. If we're tracking + * free entries with a mark, we need to set it on all entries. All other + * marks are cleared. + * + * This implementation is not as efficient as it could be; we may walk + * up the tree multiple times. + */ +void xas_init_marks(const struct xa_state *xas) +{ + xa_mark_t mark = 0; + + for (;;) { + if (xa_track_free(xas->xa) && mark == XA_FREE_MARK) + xas_set_mark(xas, mark); + else + xas_clear_mark(xas, mark); + if (mark == XA_MARK_MAX) + break; + mark_inc(mark); + } +} + +#ifdef CONFIG_XARRAY_MULTI +static unsigned int node_get_marks(struct xa_node *node, unsigned int offset) +{ + unsigned int marks = 0; + xa_mark_t mark = XA_MARK_0; + + for (;;) { + if (node_get_mark(node, offset, mark)) + marks |= 1 << (__force unsigned int)mark; + if (mark == XA_MARK_MAX) + break; + mark_inc(mark); + } + + return marks; +} + +static void node_set_marks(struct xa_node *node, unsigned int offset, + struct xa_node *child, unsigned int marks) +{ + xa_mark_t mark = XA_MARK_0; + + for (;;) { + if (marks & (1 << (__force unsigned int)mark)) { + node_set_mark(node, offset, mark); + if (child) + node_mark_all(child, mark); + } + if (mark == XA_MARK_MAX) + break; + mark_inc(mark); + } +} + +/** + * xas_split() - Split a multi-index entry into smaller entries. + * @xas: XArray operation state. + * @entry: New entry to store in the array. + * @order: New entry order. + * + * The value in the entry is copied to all the replacement entries. + * + * Context: Any context. The caller should hold the xa_lock. + */ +void xas_split(struct xa_state *xas, void *entry, unsigned int order) +{ + unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1; + unsigned int offset, marks; + struct xa_node *node; + void *curr = xas_load(xas); + int values = 0; + + node = xas->xa_node; + if (xas_top(node)) + return; + + marks = node_get_marks(node, xas->xa_offset); + + offset = xas->xa_offset + sibs; + do { + if (xas->xa_shift < node->shift) { + struct xa_node *child = xas->xa_alloc; + + xas->xa_alloc = rcu_dereference_raw(child->parent); + child->shift = node->shift - XA_CHUNK_SHIFT; + child->offset = offset; + child->count = XA_CHUNK_SIZE; + child->nr_values = xa_is_value(entry) ? XA_CHUNK_SIZE : + 0; + RCU_INIT_POINTER(child->parent, node); + node_set_marks(node, offset, child, marks); + rcu_assign_pointer(node->slots[offset], + xa_mk_node(child)); + if (xa_is_value(curr)) + values--; + xas_update(xas, child); + } else { + unsigned int canon = offset - xas->xa_sibs; + + node_set_marks(node, canon, NULL, marks); + rcu_assign_pointer(node->slots[canon], entry); + while (offset > canon) + rcu_assign_pointer(node->slots[offset--], + xa_mk_sibling(canon)); + values += (xa_is_value(entry) - xa_is_value(curr)) * + (xas->xa_sibs + 1); + } + } while (offset-- > xas->xa_offset); + + node->nr_values += values; + xas_update(xas, node); +} +#endif + +/** + * xas_pause() - Pause a walk to drop a lock. + * @xas: XArray operation state. + * + * Some users need to pause a walk and drop the lock they're holding in + * order to yield to a higher priority thread or carry out an operation + * on an entry. Those users should call this function before they drop + * the lock. It resets the @xas to be suitable for the next iteration + * of the loop after the user has reacquired the lock. If most entries + * found during a walk require you to call xas_pause(), the xa_for_each() + * iterator may be more appropriate. + * + * Note that xas_pause() only works for forward iteration. If a user needs + * to pause a reverse iteration, we will need a xas_pause_rev(). + */ +void xas_pause(struct xa_state *xas) +{ + struct xa_node *node = xas->xa_node; + + if (xas_invalid(xas)) + return; + + xas->xa_node = XAS_RESTART; + if (node) { + unsigned long offset = xas->xa_offset; + while (++offset < XA_CHUNK_SIZE) { + if (!xa_is_sibling(xa_entry(xas->xa, node, offset))) + break; + } + xas->xa_index += (offset - xas->xa_offset) << node->shift; + if (xas->xa_index == 0) + xas->xa_node = XAS_BOUNDS; + } else { + xas->xa_index++; + } +} + +/* + * __xas_prev() - Find the previous entry in the XArray. + * @xas: XArray operation state. + * + * Helper function for xas_prev() which handles all the complex cases + * out of line. + */ +void *__xas_prev(struct xa_state *xas) +{ + void *entry; + + if (!xas_frozen(xas->xa_node)) + xas->xa_index--; + if (!xas->xa_node) + return set_bounds(xas); + if (xas_not_node(xas->xa_node)) + return xas_load(xas); + + if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node)) + xas->xa_offset--; + + while (xas->xa_offset == 255) { + xas->xa_offset = xas->xa_node->offset - 1; + xas->xa_node = xa_parent(xas->xa, xas->xa_node); + if (!xas->xa_node) + return set_bounds(xas); + } + + for (;;) { + entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); + if (!xa_is_node(entry)) + return entry; + + xas->xa_node = xa_to_node(entry); + xas_set_offset(xas); + } +} + +/* + * __xas_next() - Find the next entry in the XArray. + * @xas: XArray operation state. + * + * Helper function for xas_next() which handles all the complex cases + * out of line. + */ +void *__xas_next(struct xa_state *xas) +{ + void *entry; + + if (!xas_frozen(xas->xa_node)) + xas->xa_index++; + if (!xas->xa_node) + return set_bounds(xas); + if (xas_not_node(xas->xa_node)) + return xas_load(xas); + + if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node)) + xas->xa_offset++; + + while (xas->xa_offset == XA_CHUNK_SIZE) { + xas->xa_offset = xas->xa_node->offset + 1; + xas->xa_node = xa_parent(xas->xa, xas->xa_node); + if (!xas->xa_node) + return set_bounds(xas); + } + + for (;;) { + entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); + if (!xa_is_node(entry)) + return entry; + + xas->xa_node = xa_to_node(entry); + xas_set_offset(xas); + } +} + +/** + * xas_find() - Find the next present entry in the XArray. + * @xas: XArray operation state. + * @max: Highest index to return. + * + * If the @xas has not yet been walked to an entry, return the entry + * which has an index >= xas.xa_index. If it has been walked, the entry + * currently being pointed at has been processed, and so we move to the + * next entry. + * + * If no entry is found and the array is smaller than @max, the iterator + * is set to the smallest index not yet in the array. This allows @xas + * to be immediately passed to xas_store(). + * + * Return: The entry, if found, otherwise %NULL. + */ +void *xas_find(struct xa_state *xas, unsigned long max) +{ + void *entry; + + if (xas_error(xas) || xas->xa_node == XAS_BOUNDS) + return NULL; + if (xas->xa_index > max) + return set_bounds(xas); + + if (!xas->xa_node) { + xas->xa_index = 1; + return set_bounds(xas); + } else if (xas->xa_node == XAS_RESTART) { + entry = xas_load(xas); + if (entry || xas_not_node(xas->xa_node)) + return entry; + } else if (!xas->xa_node->shift && + xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) { + xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1; + } + + xas_advance(xas); + + while (xas->xa_node && (xas->xa_index <= max)) { + if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) { + xas->xa_offset = xas->xa_node->offset + 1; + xas->xa_node = xa_parent(xas->xa, xas->xa_node); + continue; + } + + entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); + if (xa_is_node(entry)) { + xas->xa_node = xa_to_node(entry); + xas->xa_offset = 0; + continue; + } + if (entry && !xa_is_sibling(entry)) + return entry; + + xas_advance(xas); + } + + if (!xas->xa_node) + xas->xa_node = XAS_BOUNDS; + return NULL; +} + +/** + * xas_find_marked() - Find the next marked entry in the XArray. + * @xas: XArray operation state. + * @max: Highest index to return. + * @mark: Mark number to search for. + * + * If the @xas has not yet been walked to an entry, return the marked entry + * which has an index >= xas.xa_index. If it has been walked, the entry + * currently being pointed at has been processed, and so we return the + * first marked entry with an index > xas.xa_index. + * + * If no marked entry is found and the array is smaller than @max, @xas is + * set to the bounds state and xas->xa_index is set to the smallest index + * not yet in the array. This allows @xas to be immediately passed to + * xas_store(). + * + * If no entry is found before @max is reached, @xas is set to the restart + * state. + * + * Return: The entry, if found, otherwise %NULL. + */ +void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark) +{ + bool advance = true; + unsigned int offset; + void *entry; + + if (xas_error(xas)) + return NULL; + if (xas->xa_index > max) + goto max; + + if (!xas->xa_node) { + xas->xa_index = 1; + goto out; + } else if (xas_top(xas->xa_node)) { + advance = false; + entry = xa_head(xas->xa); + xas->xa_node = NULL; + if (xas->xa_index > max_index(entry)) + goto out; + if (!xa_is_node(entry)) { + if (xa_marked(xas->xa, mark)) + return entry; + xas->xa_index = 1; + goto out; + } + xas->xa_node = xa_to_node(entry); + xas->xa_offset = xas->xa_index >> xas->xa_node->shift; + } + + while (xas->xa_index <= max) { + if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) { + xas->xa_offset = xas->xa_node->offset + 1; + xas->xa_node = xa_parent(xas->xa, xas->xa_node); + if (!xas->xa_node) + break; + advance = false; + continue; + } + + if (!advance) { + entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); + if (xa_is_sibling(entry)) { + xas->xa_offset = xa_to_sibling(entry); + xas_move_index(xas, xas->xa_offset); + } + } + + offset = xas_find_chunk(xas, advance, mark); + if (offset > xas->xa_offset) { + advance = false; + xas_move_index(xas, offset); + /* Mind the wrap */ + if ((xas->xa_index - 1) >= max) + goto max; + xas->xa_offset = offset; + if (offset == XA_CHUNK_SIZE) + continue; + } + + entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); + if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK)) + continue; + if (!xa_is_node(entry)) + return entry; + xas->xa_node = xa_to_node(entry); + xas_set_offset(xas); + } + +out: + if (xas->xa_index > max) + goto max; + return set_bounds(xas); +max: + xas->xa_node = XAS_RESTART; + return NULL; +} + +/** + * xas_find_conflict() - Find the next present entry in a range. + * @xas: XArray operation state. + * + * The @xas describes both a range and a position within that range. + * + * Context: Any context. Expects xa_lock to be held. + * Return: The next entry in the range covered by @xas or %NULL. + */ +void *xas_find_conflict(struct xa_state *xas) +{ + void *curr; + + if (xas_error(xas)) + return NULL; + + if (!xas->xa_node) + return NULL; + + if (xas_top(xas->xa_node)) { + curr = xas_start(xas); + if (!curr) + return NULL; + while (xa_is_node(curr)) { + struct xa_node *node = xa_to_node(curr); + curr = xas_descend(xas, node); + } + if (curr) + return curr; + } + + if (xas->xa_node->shift > xas->xa_shift) + return NULL; + + for (;;) { + if (xas->xa_node->shift == xas->xa_shift) { + if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs) + break; + } else if (xas->xa_offset == XA_CHUNK_MASK) { + xas->xa_offset = xas->xa_node->offset; + xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node); + if (!xas->xa_node) + break; + continue; + } + curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset); + if (xa_is_sibling(curr)) + continue; + while (xa_is_node(curr)) { + xas->xa_node = xa_to_node(curr); + xas->xa_offset = 0; + curr = xa_entry_locked(xas->xa, xas->xa_node, 0); + } + if (curr) + return curr; + } + xas->xa_offset -= xas->xa_sibs; + return NULL; +} + +/** + * xa_load() - Load an entry from an XArray. + * @xa: XArray. + * @index: index into array. + * + * Context: Any context. Takes and releases the RCU lock. + * Return: The entry at @index in @xa. + */ +void *xa_load(struct xarray *xa, unsigned long index) +{ + XA_STATE(xas, xa, index); + void *entry; + + rcu_read_lock(); + do { + entry = xas_load(&xas); + if (xa_is_zero(entry)) + entry = NULL; + } while (xas_retry(&xas, entry)); + rcu_read_unlock(); + + return entry; +} + +static void *xas_result(struct xa_state *xas, void *curr) +{ + if (xa_is_zero(curr)) + return NULL; + if (xas_error(xas)) + curr = xas->xa_node; + return curr; +} + +/** + * __xa_erase() - Erase this entry from the XArray while locked. + * @xa: XArray. + * @index: Index into array. + * + * After this function returns, loading from @index will return %NULL. + * If the index is part of a multi-index entry, all indices will be erased + * and none of the entries will be part of a multi-index entry. + * + * Context: Any context. Expects xa_lock to be held on entry. + * Return: The entry which used to be at this index. + */ +void *__xa_erase(struct xarray *xa, unsigned long index) +{ + XA_STATE(xas, xa, index); + return xas_result(&xas, xas_store(&xas, NULL)); +} + +/** + * xa_erase() - Erase this entry from the XArray. + * @xa: XArray. + * @index: Index of entry. + * + * After this function returns, loading from @index will return %NULL. + * If the index is part of a multi-index entry, all indices will be erased + * and none of the entries will be part of a multi-index entry. + * + * Context: Any context. Takes and releases the xa_lock. + * Return: The entry which used to be at this index. + */ +void *xa_erase(struct xarray *xa, unsigned long index) +{ + void *entry; + + xa_lock(xa); + entry = __xa_erase(xa, index); + xa_unlock(xa); + + return entry; +} + +/** + * __xa_store() - Store this entry in the XArray. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * You must already be holding the xa_lock when calling this function. + * It will drop the lock if needed to allocate memory, and then reacquire + * it afterwards. + * + * Context: Any context. Expects xa_lock to be held on entry. May + * release and reacquire xa_lock if @gfp flags permit. + * Return: The old entry at this index or xa_err() if an error happened. + */ +void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) +{ + XA_STATE(xas, xa, index); + void *curr; + + if (WARN_ON_ONCE(xa_is_advanced(entry))) + return XA_ERROR(-EINVAL); + if (xa_track_free(xa) && !entry) + entry = XA_ZERO_ENTRY; + + do { + curr = xas_store(&xas, entry); + if (xa_track_free(xa)) + xas_clear_mark(&xas, XA_FREE_MARK); + } while (__xas_nomem(&xas, gfp)); + + return xas_result(&xas, curr); +} + +/** + * xa_store() - Store this entry in the XArray. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * After this function returns, loads from this index will return @entry. + * Storing into an existing multi-index entry updates the entry of every index. + * The marks associated with @index are unaffected unless @entry is %NULL. + * + * Context: Any context. Takes and releases the xa_lock. + * May sleep if the @gfp flags permit. + * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry + * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation + * failed. + */ +void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) +{ + void *curr; + + xa_lock(xa); + curr = __xa_store(xa, index, entry, gfp); + xa_unlock(xa); + + return curr; +} + +#ifdef CONFIG_XARRAY_MULTI +static void xas_set_range(struct xa_state *xas, unsigned long first, + unsigned long last) +{ + unsigned int shift = 0; + unsigned long sibs = last - first; + unsigned int offset = XA_CHUNK_MASK; + + xas_set(xas, first); + + while ((first & XA_CHUNK_MASK) == 0) { + if (sibs < XA_CHUNK_MASK) + break; + if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK)) + break; + shift += XA_CHUNK_SHIFT; + if (offset == XA_CHUNK_MASK) + offset = sibs & XA_CHUNK_MASK; + sibs >>= XA_CHUNK_SHIFT; + first >>= XA_CHUNK_SHIFT; + } + + offset = first & XA_CHUNK_MASK; + if (offset + sibs > XA_CHUNK_MASK) + sibs = XA_CHUNK_MASK - offset; + if ((((first + sibs + 1) << shift) - 1) > last) + sibs -= 1; + + xas->xa_shift = shift; + xas->xa_sibs = sibs; +} + +/** + * xa_get_order() - Get the order of an entry. + * @xa: XArray. + * @index: Index of the entry. + * + * Return: A number between 0 and 63 indicating the order of the entry. + */ +int xa_get_order(struct xarray *xa, unsigned long index) +{ + XA_STATE(xas, xa, index); + void *entry; + int order = 0; + + rcu_read_lock(); + entry = xas_load(&xas); + + if (!entry) + goto unlock; + + if (!xas.xa_node) + goto unlock; + + for (;;) { + unsigned int slot = xas.xa_offset + (1 << order); + + if (slot >= XA_CHUNK_SIZE) + break; + if (!xa_is_sibling(xas.xa_node->slots[slot])) + break; + order++; + } + + order += xas.xa_node->shift; +unlock: + rcu_read_unlock(); + + return order; +} +#endif /* CONFIG_XARRAY_MULTI */ + +/** + * __xa_alloc() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @limit: Range for allocated ID. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. + * + * Context: Any context. Expects xa_lock to be held on entry. May + * release and reacquire xa_lock if @gfp flags permit. + * Return: 0 on success, -ENOMEM if memory could not be allocated or + * -EBUSY if there are no free entries in @limit. + */ +int __xa_alloc(struct xarray *xa, u32 *id, void *entry, struct xa_limit limit, + gfp_t gfp) +{ + XA_STATE(xas, xa, 0); + + if (WARN_ON_ONCE(xa_is_advanced(entry))) + return -EINVAL; + if (WARN_ON_ONCE(!xa_track_free(xa))) + return -EINVAL; + + if (!entry) + entry = XA_ZERO_ENTRY; + + do { + xas.xa_index = limit.min; + xas_find_marked(&xas, limit.max, XA_FREE_MARK); + if (xas.xa_node == XAS_RESTART) + xas_set_err(&xas, -EBUSY); + else + *id = xas.xa_index; + xas_store(&xas, entry); + xas_clear_mark(&xas, XA_FREE_MARK); + } while (__xas_nomem(&xas, gfp)); + + return xas_error(&xas); +} + +/** + * __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @entry: New entry. + * @limit: Range of allocated ID. + * @next: Pointer to next ID to allocate. + * @gfp: Memory allocation flags. + * + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. + * The search for an empty entry will start at @next and will wrap + * around if necessary. + * + * Context: Any context. Expects xa_lock to be held on entry. May + * release and reacquire xa_lock if @gfp flags permit. + * Return: 0 if the allocation succeeded without wrapping. 1 if the + * allocation succeeded after wrapping, -ENOMEM if memory could not be + * allocated or -EBUSY if there are no free entries in @limit. + */ +int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, + struct xa_limit limit, u32 *next, gfp_t gfp) +{ + u32 min = limit.min; + int ret; + + limit.min = max(min, *next); + ret = __xa_alloc(xa, id, entry, limit, gfp); + if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) { + xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED; + ret = 1; + } + + if (ret < 0 && limit.min > min) { + limit.min = min; + ret = __xa_alloc(xa, id, entry, limit, gfp); + if (ret == 0) + ret = 1; + } + + if (ret >= 0) { + *next = *id + 1; + if (*next == 0) + xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED; + } + return ret; +} + +/** + * __xa_set_mark() - Set this mark on this entry while locked. + * @xa: XArray. + * @index: Index of entry. + * @mark: Mark number. + * + * Attempting to set a mark on a %NULL entry does not succeed. + * + * Context: Any context. Expects xa_lock to be held on entry. + */ +void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) +{ + XA_STATE(xas, xa, index); + void *entry = xas_load(&xas); + + if (entry) + xas_set_mark(&xas, mark); +} + +/** + * __xa_clear_mark() - Clear this mark on this entry while locked. + * @xa: XArray. + * @index: Index of entry. + * @mark: Mark number. + * + * Context: Any context. Expects xa_lock to be held on entry. + */ +void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) +{ + XA_STATE(xas, xa, index); + void *entry = xas_load(&xas); + + if (entry) + xas_clear_mark(&xas, mark); +} + +/** + * xa_find() - Search the XArray for an entry. + * @xa: XArray. + * @indexp: Pointer to an index. + * @max: Maximum index to search to. + * @filter: Selection criterion. + * + * Finds the entry in @xa which matches the @filter, and has the lowest + * index that is at least @indexp and no more than @max. + * If an entry is found, @indexp is updated to be the index of the entry. + * This function is protected by the RCU read lock, so it may not find + * entries which are being simultaneously added. It will not return an + * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find(). + * + * Context: Any context. Takes and releases the RCU lock. + * Return: The entry, if found, otherwise %NULL. + */ +void *xa_find(struct xarray *xa, unsigned long *indexp, unsigned long max, + xa_mark_t filter) +{ + XA_STATE(xas, xa, *indexp); + void *entry; + + rcu_read_lock(); + do { + if ((__force unsigned int)filter < XA_MAX_MARKS) + entry = xas_find_marked(&xas, max, filter); + else + entry = xas_find(&xas, max); + } while (xas_retry(&xas, entry)); + rcu_read_unlock(); + + if (entry) + *indexp = xas.xa_index; + return entry; +} + +static bool xas_sibling(struct xa_state *xas) +{ + struct xa_node *node = xas->xa_node; + unsigned long mask; + + if (!IS_ENABLED(CONFIG_XARRAY_MULTI) || !node) + return false; + mask = (XA_CHUNK_SIZE << node->shift) - 1; + return (xas->xa_index & mask) > + ((unsigned long)xas->xa_offset << node->shift); +} + +/** + * xa_find_after() - Search the XArray for a present entry. + * @xa: XArray. + * @indexp: Pointer to an index. + * @max: Maximum index to search to. + * @filter: Selection criterion. + * + * Finds the entry in @xa which matches the @filter and has the lowest + * index that is above @indexp and no more than @max. + * If an entry is found, @indexp is updated to be the index of the entry. + * This function is protected by the RCU read lock, so it may miss entries + * which are being simultaneously added. It will not return an + * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find(). + * + * Context: Any context. Takes and releases the RCU lock. + * Return: The pointer, if found, otherwise %NULL. + */ +void *xa_find_after(struct xarray *xa, unsigned long *indexp, unsigned long max, + xa_mark_t filter) +{ + XA_STATE(xas, xa, *indexp + 1); + void *entry; + + if (xas.xa_index == 0) + return NULL; + + rcu_read_lock(); + for (;;) { + if ((__force unsigned int)filter < XA_MAX_MARKS) + entry = xas_find_marked(&xas, max, filter); + else + entry = xas_find(&xas, max); + + if (xas_invalid(&xas)) + break; + if (xas_sibling(&xas)) + continue; + if (!xas_retry(&xas, entry)) + break; + } + rcu_read_unlock(); + + if (entry) + *indexp = xas.xa_index; + return entry; +} + +/** + * xa_delete_node() - Private interface for workingset code. + * @node: Node to be removed from the tree. + * @update: Function to call to update ancestor nodes. + * + * Context: xa_lock must be held on entry and will not be released. + */ +void xa_delete_node(struct xa_node *node, xa_update_node_t update) +{ + struct xa_state xas = { + .xa = node->array, + .xa_index = (unsigned long)node->offset + << (node->shift + XA_CHUNK_SHIFT), + .xa_shift = node->shift + XA_CHUNK_SHIFT, + .xa_offset = node->offset, + .xa_node = xa_parent_locked(node->array, node), + .xa_update = update, + }; + + xas_store(&xas, NULL); +} + +/** + * xa_destroy() - Free all internal data structures. + * @xa: XArray. + * + * After calling this function, the XArray is empty and has freed all memory + * allocated for its internal data structures. You are responsible for + * freeing the objects referenced by the XArray. + * + * Context: Any context. Takes and releases the xa_lock, interrupt-safe. + */ +void xa_destroy(struct xarray *xa) +{ + XA_STATE(xas, xa, 0); + unsigned long flags; + void *entry; + + xas.xa_node = NULL; + xas_lock_irqsave(&xas, flags); + entry = xa_head_locked(xa); + RCU_INIT_POINTER(xa->xa_head, NULL); + xas_init_marks(&xas); + if (xa_zero_busy(xa)) + xa_mark_clear(xa, XA_FREE_MARK); + /* lockdep checks we're still holding the lock in xas_free_nodes() */ + if (xa_is_node(entry)) + xas_free_nodes(&xas, xa_to_node(entry)); + xas_unlock_irqrestore(&xas, flags); +} + +#ifdef XA_DEBUG +void xa_dump_node(const struct xa_node *node) +{ + unsigned i, j; + + if (!node) + return; + if ((unsigned long)node & 3) { + pr_cont("node %px\n", node); + return; + } + + pr_cont("node %px %s %d parent %px shift %d count %d values %d " + "array %px list %px %px marks", + node, node->parent ? "offset" : "max", node->offset, + node->parent, node->shift, node->count, node->nr_values, + node->array, node->private_list.prev, node->private_list.next); + for (i = 0; i < XA_MAX_MARKS; i++) + for (j = 0; j < XA_MARK_LONGS; j++) + pr_cont(" %lx", node->marks[i][j]); + pr_cont("\n"); +} + +void xa_dump_index(unsigned long index, unsigned int shift) +{ + if (!shift) + pr_info("%lu: ", index); + else if (shift >= BITS_PER_LONG) + pr_info("0-%lu: ", ~0UL); + else + pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1)); +} + +void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift) +{ + if (!entry) + return; + + xa_dump_index(index, shift); + + if (xa_is_node(entry)) { + if (shift == 0) { + pr_cont("%px\n", entry); + } else { + unsigned long i; + struct xa_node *node = xa_to_node(entry); + xa_dump_node(node); + for (i = 0; i < XA_CHUNK_SIZE; i++) + xa_dump_entry(node->slots[i], + index + (i << node->shift), + node->shift); + } + } else if (xa_is_value(entry)) + pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry), + xa_to_value(entry), entry); + else if (!xa_is_internal(entry)) + pr_cont("%px\n", entry); + else if (xa_is_retry(entry)) + pr_cont("retry (%ld)\n", xa_to_internal(entry)); + else if (xa_is_sibling(entry)) + pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry)); + else if (xa_is_zero(entry)) + pr_cont("zero (%ld)\n", xa_to_internal(entry)); + else + pr_cont("UNKNOWN ENTRY (%px)\n", entry); +} + +void xa_dump(const struct xarray *xa) +{ + void *entry = xa->xa_head; + unsigned int shift = 0; + + pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry, + xa->xa_flags, xa_marked(xa, XA_MARK_0), + xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2)); + if (xa_is_node(entry)) + shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT; + xa_dump_entry(entry, 0, shift); +} +#endif + +static void radix_tree_node_ctor(void *arg) +{ + struct radix_tree_node *node = arg; + + memset(node, 0, sizeof(*node)); + INIT_LIST_HEAD(&node->private_list); +} + +struct radix_tree_preload { + unsigned nr; + /* nodes->parent points to next preallocated node */ + struct radix_tree_node *nodes; +}; + +static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly; + +static __init unsigned long __maxindex(unsigned int height) +{ + unsigned int width = height * RADIX_TREE_MAP_SHIFT; + int shift = RADIX_TREE_INDEX_BITS - width; + + if (shift < 0) + return ~0UL; + if (shift >= BITS_PER_LONG) + return 0UL; + return ~0UL >> shift; +} + +static __init void radix_tree_init_maxnodes(void) +{ + unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH + 1]; + unsigned int i, j; + + for (i = 0; i < ARRAY_SIZE(height_to_maxindex); i++) + height_to_maxindex[i] = __maxindex(i); + for (i = 0; i < ARRAY_SIZE(height_to_maxnodes); i++) { + for (j = i; j > 0; j--) + height_to_maxnodes[i] += height_to_maxindex[j - 1] + 1; + } +} + +void __init dh_radix_tree_init(void) +{ + BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32); + BUILD_BUG_ON(ROOT_IS_IDR & ~GFP_ZONEMASK); + radix_tree_node_cachep = kmem_cache_create( + "radix_tree_node", sizeof(struct radix_tree_node), 0, + SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, radix_tree_node_ctor); + radix_tree_init_maxnodes(); //5.10不存在 +} +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/zf_mpf/bar_chan_user/normal_send_eg.c b/drivers/net/ethernet/dinghai/zf_mpf/bar_chan_user/normal_send_eg.c new file mode 100644 index 000000000000..2d43a9f4a752 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/bar_chan_user/normal_send_eg.c @@ -0,0 +1,113 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BAR_CHAN_PLOAD_SIZE (2036) +#define BAR_REPS_HDR_LEN (4) +#define DEVICE_FILE "/dev/bar_ioctl_dev" +#define BAR_IOCTL_CMD_NORMAL _IOW('a', 1, msg_entity_t) +struct zxdh_ioctl_send_in { + uint16_t pload_len; + uint16_t src; + uint16_t dst; + uint16_t event_id; +}; + +struct zxdh_ioctl_send_out { + int ioctl_state; //消息返回值ioctl状态 - 用户读 + int bar_state; //bar通道接口级别返回值 - 用户读 +}; + +struct zxdh_ioctl_recv_in { + uint16_t event_id; + uint16_t rsv1; + uint32_t rsv2; +}; + +struct zxdh_ioctl_recv_out { + uint16_t msg_len; + uint16_t rsv1; + uint32_t rsv2; +}; + +typedef struct normal_msg_entity { + union ioctl_ctrl_hdr //私有消息控制头 + { + struct zxdh_ioctl_send_in send_hdr_in; //普通消息发送头 + struct zxdh_ioctl_send_out send_hdr_out; //普通消息发送头 + } hdr; + uint8_t pload[BAR_CHAN_PLOAD_SIZE]; +} msg_entity_t; + +typedef enum BAR_DRIVER_TYPE { + MSG_CHAN_END_MPF = 0, + MSG_CHAN_END_PF, + MSG_CHAN_END_VF, + MSG_CHAN_END_RISC, + MSG_CHAN_END_ERR, +} BAR_DRIVER_TYPE; + +int main(void) +{ + int fd, ret; + struct normal_msg_entity entity = { 0 }; + uint8_t data[5] = { 0x12, 0x34, 0x53, 0x32, 0xaa }; + + /* 1 填写消息发送头, 主要包括消息长度, 消息源、消息目的、 事件id(调用到哪一个消息处理函数)*/ + entity.hdr.send_hdr_in.pload_len = sizeof(data); + entity.hdr.send_hdr_in.src = MSG_CHAN_END_PF; + entity.hdr.send_hdr_in.dst = MSG_CHAN_END_RISC; + entity.hdr.send_hdr_in.event_id = 5; + + /* 2 拷贝消息到pload, 这部分数据会传送到消息处理函数中去*/ + memcpy(entity.pload, data, sizeof(data)); + + /* 3 打开ioctl字符设备, ioctl命令码和设备名称看代码*/ + fd = open(DEVICE_FILE, O_RDWR); + if (fd < 0) { + perror("Failed to open the device."); + return 1; + } + printf("reps: 0x%llx.\n", *(uint64_t *)entity.pload); + + /* 4 发起ioctl请求发送消息, 消息传送过去后会根据event_id, 自动调用消息处理函数, 并且返回结果到pload字段*/ + ret = ioctl(fd, BAR_IOCTL_CMD_NORMAL, &entity); + if (ret < 0) { + perror("IOCTL command failed."); + ret = 1; + goto out; + } + + /* 5 根据前4字节校验ioctl是否正常*/ + /* 判断ioctl通信是否异常*/ + if (entity.hdr.send_hdr_out.ioctl_state != 0) { + printf("ioctl failed, state: %d\n", + entity.hdr.send_hdr_out.ioctl_state); + ret = -1; + goto out; + } + + /*6 根据后续4字节校验内核态接口是否调用正常*/ + /* 判断调用bar通道内核态接口是否错误*/ + if (entity.hdr.send_hdr_out.bar_state != 0) { + printf("bar send err, state: %d\n", + entity.hdr.send_hdr_out.bar_state); + ret = entity.hdr.send_hdr_out.bar_state; + goto out; + } + + /* 7 调用接口完毕, 从entity.pload中取消息处理函数的回复结果*/ + printf("the sum 2byes of data is 0x%x.\n", *(uint16_t *)entity.pload); + +out: + close(fd); + return 0; +} \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/zf_mpf/bar_chan_user/pci_res_query_eg.c b/drivers/net/ethernet/dinghai/zf_mpf/bar_chan_user/pci_res_query_eg.c new file mode 100644 index 000000000000..b4d24b9518e1 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/bar_chan_user/pci_res_query_eg.c @@ -0,0 +1,201 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEVICE_FILE "/dev/bar_ioctl_dev" +#define ZXDH_PF_DEV_NUM (40) +#define BAR_IOCTL_CMD_SINGLE_DEV _IOW('a', 2, pci_res_st) +#define BAR_IOCTL_CMD_ALL_DEV _IOW('a', 3, pci_res_st) +#define PF_PCIE_ID(pf_idx) \ + (((pf_idx % 8) << 8) | (1 << 11) | ((pf_idx / 8) << 12)) + +struct zxdh_mpf_pci_res_item { + uint16_t device_id; + uint16_t pcie_id; + uint16_t bdf; + uint8_t link_state; + uint8_t dev_type; + uint16_t total_vfs; + uint16_t initial_vfs; + uint16_t num_vfs; + uint8_t vf_stride; + uint8_t first_vf_offset; + int res; + uint8_t pad[4]; //预留字段 +}; + +/* zf内核态和risc通信的约定结构体*/ +struct zxdh_mpf_pci_res_list { + uint16_t num; + uint16_t verno; //版本号 + int res; //0表示返回成功, 其他表示失败, 包括消息发送失败 + struct zxdh_mpf_pci_res_item pci_res_lis[ZXDH_PF_DEV_NUM]; +}; + +struct zxdh_mpf_query_pci_res_msg { + uint16_t pcie_id; + uint8_t dev_type; + uint8_t pad[5]; + struct zxdh_mpf_pci_res_list reply; +}; + +typedef struct zxdh_mpf_query_bar_msg { + int ioctl_state; + int bar_state; + struct zxdh_mpf_query_pci_res_msg pci_res_msg; +} pci_res_st; + +void print_pf_pci_res(struct zxdh_mpf_pci_res_item *item) +{ + printf("device_id: 0x%x.\n", item->device_id); + printf("link_state: %d.\n", item->link_state); + printf("pcie_id: 0x%x.\n", item->pcie_id); + printf("bdf: 0x%x.\n", item->bdf); + printf("dev_type: %d.\n", item->dev_type); + printf("total_vfs: %d.\n", item->total_vfs); + printf("initial_vfs: %d.\n", item->initial_vfs); + printf("num_vfs: %d.\n", item->num_vfs); + printf("vf_stride: %d.\n", item->vf_stride); + printf("first_vf_offset: %d.\n", item->first_vf_offset); + return; +} + +static int zxdh_get_dev_pci_resource(uint16_t pcie_id, + struct zxdh_mpf_query_bar_msg *data, + int cmd) +{ + int fd, ret; + struct zxdh_mpf_query_bar_msg msg = { 0 }; + + msg.pci_res_msg.pcie_id = pcie_id; + msg.pci_res_msg.dev_type = 8; + + fd = open(DEVICE_FILE, O_RDWR); + if (fd < 0) { + perror("Failed to open the device."); + return 1; + } + + ret = ioctl(fd, cmd, &msg); + if (ret < 0) { + perror("IOCTL command failed."); + ret = 1; + goto out; + } + + /* ioctl通信异常*/ + if (msg.ioctl_state != 0) { + //return IOCTL_ERR + printf("ioctl failed, state: %d\n", msg.ioctl_state); + ret = -1; + goto out; + } + + /* 调用bar通道内核态接口错误*/ + if (msg.bar_state != 0) { + //return IOCTL_ERR + printf("bar send err, state: %d\n", msg.bar_state); + ret = msg.bar_state; + goto out; + } + + *data = msg; +out: + close(fd); + return ret; +} + +int zxdh_get_dev_pci_resource_single(uint16_t pcie_id, + struct zxdh_mpf_query_bar_msg *data) +{ + return zxdh_get_dev_pci_resource(pcie_id, data, + BAR_IOCTL_CMD_SINGLE_DEV); +} + +int zxdh_get_dev_pci_resource_all(struct zxdh_mpf_query_bar_msg *data) +{ + return zxdh_get_dev_pci_resource(0, data, BAR_IOCTL_CMD_ALL_DEV); +} + +void test_pci_res_query_single(uint16_t pcie_id) +{ + int ret = 0; + struct zxdh_mpf_query_bar_msg data = { 0 }; + + printf("**************pcie_id: 0x%x**************.\n", pcie_id); + ret = zxdh_get_dev_pci_resource_single(pcie_id, &data); + if (ret != 0) { + printf("ioctl msg failed, ret: %d.\n", ret); + return; + } + if (data.pci_res_msg.reply.res != 0) { + printf("data.pci_res_msg.reply.res is %d.\n", + data.pci_res_msg.reply.res); + return; + } + print_pf_pci_res(&data.pci_res_msg.reply.pci_res_lis[0]); + return; +} + +void test_pci_res_query_all(void) +{ + int pf_idx = 0; + int ret = 0; + uint16_t pcie_id = 0; + struct zxdh_mpf_query_bar_msg data = { 0 }; + + ret = zxdh_get_dev_pci_resource_all(&data); + if (ret != 0) { + printf("ioctl msg failed, ret:%d.\n", ret); + } + + printf("res: %d.\n", data.pci_res_msg.reply.res); + printf("num: %d.\n", data.pci_res_msg.reply.num); + + for (pf_idx = 0; pf_idx < 40; pf_idx++) { + printf("********%dth dev, ep:%d, pf: %d**********.\n", pf_idx, + pf_idx / 8, pf_idx % 8); + + if (data.pci_res_msg.reply.pci_res_lis[pf_idx].res != 0) { + printf("invalid res.\n"); + continue; + } + print_pf_pci_res(&data.pci_res_msg.reply.pci_res_lis[pf_idx]); + } +} + +int main(int argc, char *argv[]) +{ + uint16_t pcie_id = 0; + + if (argc < 2) { + goto help; + } + if (strcmp(argv[1], "all") == 0) { + test_pci_res_query_all(); + goto out; + } else if (strcmp(argv[1], "dev") == 0) { + if (argc < 3) { + goto help; + } + pcie_id = strtol(argv[2], NULL, 16); + test_pci_res_query_single(pcie_id); + goto out; + } else { + goto help; + } + +help: + printf("./test all ------------------print all pci_dev resources.\n"); + printf("./test dev [pcie_id] --------print pci_dev resource.\n"); +out: + return 0; +} diff --git a/drivers/net/ethernet/dinghai/zf_mpf/epc/Makefile b/drivers/net/ethernet/dinghai/zf_mpf/epc/Makefile new file mode 100644 index 000000000000..ad4897bc7ced --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/epc/Makefile @@ -0,0 +1,11 @@ +KDIR=/home/ls/kernel-5.10.y + +obj-m += zte-epc.o +zte-epc-y += pcie-zte-zf-hdma.o pcie-zte-zf-epc.o virt-dma.o + + +all: + make -C $(KDIR) M=$(PWD) modules + +clean: + make -C $(KDIR) M=$(PWD) clean diff --git a/drivers/net/ethernet/dinghai/zf_mpf/epc/dmaengine.c b/drivers/net/ethernet/dinghai/zf_mpf/epc/dmaengine.c new file mode 100644 index 000000000000..3beaa53f8a21 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/epc/dmaengine.c @@ -0,0 +1,1632 @@ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "dmaengine.h" + +static DEFINE_MUTEX(dma_list_mutex); +static DEFINE_IDA(dma_ida); +static LIST_HEAD(dma_device_list); +static long dmaengine_ref_count; + +/* --- debugfs implementation --- */ +#ifdef CONFIG_DEBUG_FS +#include + +static struct dentry *rootdir; + +static void zxdh_dmaengine_debug_register(struct dma_device *dma_dev) +{ + dma_dev->dbg_dev_root = + debugfs_create_dir(dev_name(dma_dev->dev), rootdir); + if (IS_ERR(dma_dev->dbg_dev_root)) + dma_dev->dbg_dev_root = NULL; +} + +static void zxdh_dmaengine_debug_unregister(struct dma_device *dma_dev) +{ + debugfs_remove_recursive(dma_dev->dbg_dev_root); + dma_dev->dbg_dev_root = NULL; +} + +static void zxdh_dmaengine_dbg_summary_show(struct seq_file *s, + struct dma_device *dma_dev) +{ + struct dma_chan *chan; + + list_for_each_entry(chan, &dma_dev->channels, device_node) { + if (chan->client_count) { + seq_printf(s, " %-13s| %s", dma_chan_name(chan), + chan->dbg_client_name ?: "in-use"); + + if (chan->router) + seq_printf(s, " (via router: %s)\n", + dev_name(chan->router->dev)); + else + seq_puts(s, "\n"); + } + } +} + +static int zxdh_dmaengine_summary_show(struct seq_file *s, void *data) +{ + struct dma_device *dma_dev = NULL; + + mutex_lock(&dma_list_mutex); + list_for_each_entry(dma_dev, &dma_device_list, global_node) { + seq_printf(s, "dma%d (%s): number of channels: %u\n", + dma_dev->dev_id, dev_name(dma_dev->dev), + dma_dev->chancnt); + + if (dma_dev->dbg_summary_show) + dma_dev->dbg_summary_show(s, dma_dev); + else + zxdh_dmaengine_dbg_summary_show(s, dma_dev); + + if (!list_is_last(&dma_dev->global_node, &dma_device_list)) + seq_puts(s, "\n"); + } + mutex_unlock(&dma_list_mutex); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(dmaengine_summary); + +static void __init zxdh_dmaengine_debugfs_init(void) +{ + rootdir = debugfs_create_dir("dmaengine", NULL); + + /* /sys/kernel/debug/dmaengine/summary */ + debugfs_create_file("summary", 0444, rootdir, NULL, + &dmaengine_summary_fops); +} +#else +static inline void zxdh_dmaengine_debugfs_init(void) +{ +} +static inline int zxdh_dmaengine_debug_register(struct dma_device *dma_dev) +{ + return 0; +} + +static inline void zxdh_dmaengine_debug_unregister(struct dma_device *dma_dev) +{ +} +#endif /* DEBUG_FS */ + +/* --- sysfs implementation --- */ + +#define DMA_SLAVE_NAME "slave" + +/** + * zxdh_dev_to_dma_chan - convert a device pointer to its sysfs container object + * @dev: device node + * + * Must be called under dma_list_mutex. + */ +static struct dma_chan *zxdh_dev_to_dma_chan(struct device *dev) +{ + struct dma_chan_dev *chan_dev; + + chan_dev = container_of(dev, typeof(*chan_dev), device); + return chan_dev->chan; +} + +static ssize_t zxdh_memcpy_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dma_chan *chan; + unsigned long count = 0; + int i; + int err; + + mutex_lock(&dma_list_mutex); + chan = zxdh_dev_to_dma_chan(dev); + if (chan) { + for_each_possible_cpu(i) + count += per_cpu_ptr(chan->local, i)->memcpy_count; + err = sprintf(buf, "%lu\n", count); + } else + err = -ENODEV; + mutex_unlock(&dma_list_mutex); + + return err; +} +static DEVICE_ATTR_RO(memcpy_count); + +static ssize_t zxdh_bytes_transferred_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dma_chan *chan; + unsigned long count = 0; + int i; + int err; + + mutex_lock(&dma_list_mutex); + chan = zxdh_dev_to_dma_chan(dev); + if (chan) { + for_each_possible_cpu(i) + count += per_cpu_ptr(chan->local, i)->bytes_transferred; + err = sprintf(buf, "%lu\n", count); + } else + err = -ENODEV; + mutex_unlock(&dma_list_mutex); + + return err; +} +static DEVICE_ATTR_RO(bytes_transferred); + +static ssize_t zxdh_in_use_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dma_chan *chan; + int err; + + mutex_lock(&dma_list_mutex); + chan = zxdh_dev_to_dma_chan(dev); + if (chan) + err = sprintf(buf, "%d\n", chan->client_count); + else + err = -ENODEV; + mutex_unlock(&dma_list_mutex); + + return err; +} +static DEVICE_ATTR_RO(in_use); + +static struct attribute *dma_dev_attrs[] = { + &dev_attr_memcpy_count.attr, + &dev_attr_bytes_transferred.attr, + &dev_attr_in_use.attr, + NULL, +}; +ATTRIBUTE_GROUPS(dma_dev); + +static void zxdh_chan_dev_release(struct device *dev) +{ + struct dma_chan_dev *chan_dev; + + chan_dev = container_of(dev, typeof(*chan_dev), device); + kfree(chan_dev); +} + +static struct class dma_devclass = { + .name = "dma", + .dev_groups = dma_dev_groups, + .dev_release = zxdh_chan_dev_release, +}; + +/* --- client and device registration --- */ + +/* enable iteration over all operation types */ +static dma_cap_mask_t dma_cap_mask_all; + +/** + * struct dma_chan_tbl_ent - tracks channel allocations per core/operation + * @chan: associated channel for this entry + */ +struct dma_chan_tbl_ent { + struct dma_chan *chan; +}; + +/* percpu lookup table for memory-to-memory offload providers */ +static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; + +static int __init zxdh_dma_channel_table_init(void) +{ + enum dma_transaction_type cap; + int err = 0; + + bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); + + /* 'interrupt', 'private', and 'slave' are channel capabilities, + * but are not associated with an operation so they do not need + * an entry in the channel_table + */ + clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); + clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); + clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); + + for_each_dma_cap_mask(cap, dma_cap_mask_all) { + channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); + if (!channel_table[cap]) { + err = -ENOMEM; + break; + } + } + + if (err) { + pr_err("dmaengine zxdh_dma_channel_table_init failure: %d\n", + err); + for_each_dma_cap_mask(cap, dma_cap_mask_all) + free_percpu(channel_table[cap]); + } + + return err; +} +arch_initcall(zxdh_dma_channel_table_init); + +/** + * zxdh_dma_chan_is_local - checks if the channel is in the same NUMA-node as the CPU + * @chan: DMA channel to test + * @cpu: CPU index which the channel should be close to + * + * Returns true if the channel is in the same NUMA-node as the CPU. + */ +static bool zxdh_dma_chan_is_local(struct dma_chan *chan, int cpu) +{ + int node = dev_to_node(chan->device->dev); + return node == NUMA_NO_NODE || + cpumask_test_cpu(cpu, cpumask_of_node(node)); +} + +/** + * zxdh_min_chan - finds the channel with min count and in the same NUMA-node as the CPU + * @cap: capability to match + * @cpu: CPU index which the channel should be close to + * + * If some channels are close to the given CPU, the one with the lowest + * reference count is returned. Otherwise, CPU is ignored and only the + * reference count is taken into account. + * + * Must be called under dma_list_mutex. + */ +static struct dma_chan *zxdh_min_chan(enum dma_transaction_type cap, int cpu) +{ + struct dma_device *device; + struct dma_chan *chan; + struct dma_chan *min = NULL; + struct dma_chan *localmin = NULL; + + list_for_each_entry(device, &dma_device_list, global_node) { + if (!dma_has_cap(cap, device->cap_mask) || + dma_has_cap(DMA_PRIVATE, device->cap_mask)) + continue; + list_for_each_entry(chan, &device->channels, device_node) { + if (!chan->client_count) + continue; + if (!min || chan->table_count < min->table_count) + min = chan; + + if (zxdh_dma_chan_is_local(chan, cpu)) + if (!localmin || + chan->table_count < localmin->table_count) + localmin = chan; + } + } + + chan = localmin ? localmin : min; + + if (chan) + chan->table_count++; + + return chan; +} + +/** + * zxdh_dma_channel_rebalance - redistribute the available channels + * + * Optimize for CPU isolation (each CPU gets a dedicated channel for an + * operation type) in the SMP case, and operation isolation (avoid + * multi-tasking channels) in the non-SMP case. + * + * Must be called under dma_list_mutex. + */ +static void zxdh_dma_channel_rebalance(void) +{ + struct dma_chan *chan; + struct dma_device *device; + int cpu; + int cap; + + /* undo the last distribution */ + for_each_dma_cap_mask(cap, dma_cap_mask_all) + for_each_possible_cpu(cpu) + per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; + + list_for_each_entry(device, &dma_device_list, global_node) { + if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) + continue; + list_for_each_entry(chan, &device->channels, device_node) + chan->table_count = 0; + } + + /* don't populate the channel_table if no clients are available */ + if (!dmaengine_ref_count) + return; + + /* redistribute available channels */ + for_each_dma_cap_mask(cap, dma_cap_mask_all) + for_each_online_cpu(cpu) { + chan = zxdh_min_chan(cap, cpu); + per_cpu_ptr(channel_table[cap], cpu)->chan = chan; + } +} + +static int zxdh_dma_device_satisfies_mask(struct dma_device *device, + const dma_cap_mask_t *want) +{ + dma_cap_mask_t has; + + bitmap_and(has.bits, want->bits, device->cap_mask.bits, + DMA_TX_TYPE_END); + return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); +} + +static struct module *zxdh_dma_chan_to_owner(struct dma_chan *chan) +{ + return chan->device->owner; +} + +/** + * zxdh_balance_ref_count - catch up the channel reference count + * @chan: channel to balance ->client_count versus dmaengine_ref_count + * + * Must be called under dma_list_mutex. + */ +static void zxdh_balance_ref_count(struct dma_chan *chan) +{ + struct module *owner = zxdh_dma_chan_to_owner(chan); + + while (chan->client_count < dmaengine_ref_count) { + __module_get(owner); + chan->client_count++; + } +} + +static void zxdh_dma_device_release(struct kref *ref) +{ + struct dma_device *device = container_of(ref, struct dma_device, ref); + + list_del_rcu(&device->global_node); + zxdh_dma_channel_rebalance(); + + if (device->device_release) + device->device_release(device); +} + +static void zxdh_dma_device_put(struct dma_device *device) +{ + lockdep_assert_held(&dma_list_mutex); + kref_put(&device->ref, zxdh_dma_device_release); +} + +/** + * zxdh_dma_chan_get - try to grab a DMA channel's parent driver module + * @chan: channel to grab + * + * Must be called under dma_list_mutex. + */ +static int zxdh_dma_chan_get(struct dma_chan *chan) +{ + struct module *owner = zxdh_dma_chan_to_owner(chan); + int ret; + + /* The channel is already in use, update client count */ + if (chan->client_count) { + __module_get(owner); + goto out; + } + + if (!try_module_get(owner)) + return -ENODEV; + + ret = kref_get_unless_zero(&chan->device->ref); + if (!ret) { + ret = -ENODEV; + goto module_put_out; + } + + /* allocate upon first client reference */ + if (chan->device->device_alloc_chan_resources) { + ret = chan->device->device_alloc_chan_resources(chan); + if (ret < 0) + goto err_out; + } + + if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) + zxdh_balance_ref_count(chan); + +out: + chan->client_count++; + return 0; + +err_out: + zxdh_dma_device_put(chan->device); +module_put_out: + module_put(owner); + return ret; +} + +/** + * zxdh_dma_chan_put - drop a reference to a DMA channel's parent driver module + * @chan: channel to release + * + * Must be called under dma_list_mutex. + */ +static void zxdh_dma_chan_put(struct dma_chan *chan) +{ + /* This channel is not in use, bail out */ + if (!chan->client_count) + return; + + chan->client_count--; + + /* This channel is not in use anymore, free it */ + if (!chan->client_count && chan->device->device_free_chan_resources) { + /* Make sure all operations have completed */ + dmaengine_synchronize(chan); + chan->device->device_free_chan_resources(chan); + } + + /* If the channel is used via a DMA request router, free the mapping */ + if (chan->router && chan->router->route_free) { + chan->router->route_free(chan->router->dev, chan->route_data); + chan->router = NULL; + chan->route_data = NULL; + } + + zxdh_dma_device_put(chan->device); + module_put(zxdh_dma_chan_to_owner(chan)); +} + +enum dma_status zxdh_dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) +{ + enum dma_status status; + unsigned long zxdh_dma_sync_wait_timeout = + jiffies + msecs_to_jiffies(5000); + + dma_async_issue_pending(chan); + do { + status = dma_async_is_tx_complete(chan, cookie); + if (time_after_eq(jiffies, zxdh_dma_sync_wait_timeout)) { + dev_err(chan->device->dev, "%s: timeout!\n", __func__); + return DMA_ERROR; + } + if (status != DMA_IN_PROGRESS) + break; + cpu_relax(); + } while (1); + + return status; +} + +/** + * zxdh_dma_find_channel - find a channel to carry out the operation + * @tx_type: transaction type + */ +struct dma_chan *zxdh_dma_find_channel(enum dma_transaction_type tx_type) +{ + return this_cpu_read(channel_table[tx_type]->chan); +} + +/** + * zxdh_dma_issue_pending_all - flush all pending operations across all channels + */ +void zxdh_dma_issue_pending_all(void) +{ + struct dma_device *device; + struct dma_chan *chan; + + rcu_read_lock(); + list_for_each_entry_rcu(device, &dma_device_list, global_node) { + if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) + continue; + list_for_each_entry(chan, &device->channels, device_node) + if (chan->client_count) + device->device_issue_pending(chan); + } + rcu_read_unlock(); +} + +int zxdh_dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) +{ + struct dma_device *device; + + if (!chan || !caps) + return -EINVAL; + + device = chan->device; + + /* check if the channel supports slave transactions */ + if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) || + test_bit(DMA_CYCLIC, device->cap_mask.bits))) + return -ENXIO; + + /* + * Check whether it reports it uses the generic slave + * capabilities, if not, that means it doesn't support any + * kind of slave capabilities reporting. + */ + if (!device->directions) + return -ENXIO; + + caps->src_addr_widths = device->src_addr_widths; + caps->dst_addr_widths = device->dst_addr_widths; + caps->directions = device->directions; + caps->min_burst = device->min_burst; + caps->max_burst = device->max_burst; + caps->max_sg_burst = device->max_sg_burst; + caps->residue_granularity = device->residue_granularity; + caps->descriptor_reuse = device->descriptor_reuse; + caps->cmd_pause = !!device->device_pause; + caps->cmd_resume = !!device->device_resume; + caps->cmd_terminate = !!device->device_terminate_all; + + /* + * DMA engine device might be configured with non-uniformly + * distributed slave capabilities per device channels. In this + * case the corresponding driver may provide the device_caps + * callback to override the generic capabilities with + * channel-specific ones. + */ + if (device->device_caps) + device->device_caps(chan, caps); + + return 0; +} + +static struct dma_chan *zxdh_private_candidate(const dma_cap_mask_t *mask, + struct dma_device *dev, + dma_filter_fn fn, void *fn_param) +{ + struct dma_chan *chan; + + if (mask && !zxdh_dma_device_satisfies_mask(dev, mask)) { + dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__); + return NULL; + } + /* devices with multiple channels need special handling as we need to + * ensure that all channels are either private or public. + */ + if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask)) + list_for_each_entry(chan, &dev->channels, device_node) { + /* some channels are already publicly allocated */ + if (chan->client_count) + return NULL; + } + + list_for_each_entry(chan, &dev->channels, device_node) { + if (chan->client_count) { + dev_dbg(dev->dev, "%s: %s busy\n", __func__, + dma_chan_name(chan)); + continue; + } + if (fn && !fn(chan, fn_param)) { + dev_dbg(dev->dev, "%s: %s filter said false\n", + __func__, dma_chan_name(chan)); + continue; + } + return chan; + } + + return NULL; +} + +static struct dma_chan *zxdh_find_candidate(struct dma_device *device, + const dma_cap_mask_t *mask, + dma_filter_fn fn, void *fn_param) +{ + struct dma_chan *chan = + zxdh_private_candidate(mask, device, fn, fn_param); + int err; + + if (chan) { + /* Found a suitable channel, try to grab, prep, and return it. + * We first set DMA_PRIVATE to disable zxdh_balance_ref_count as this + * channel will not be published in the general-purpose + * allocator + */ + dma_cap_set(DMA_PRIVATE, device->cap_mask); + device->privatecnt++; + err = zxdh_dma_chan_get(chan); + + if (err) { + if (err == -ENODEV) { + dev_dbg(device->dev, "%s: %s module removed\n", + __func__, dma_chan_name(chan)); + list_del_rcu(&device->global_node); + } else + dev_dbg(device->dev, + "%s: failed to get %s: (%d)\n", + __func__, dma_chan_name(chan), err); + + if (--device->privatecnt == 0) + dma_cap_clear(DMA_PRIVATE, device->cap_mask); + + chan = ERR_PTR(err); + } + } + + return chan ? chan : ERR_PTR(-EPROBE_DEFER); +} + +/** + * zxdh_dma_get_slave_channel - try to get specific channel exclusively + * @chan: target channel + */ +struct dma_chan *zxdh_dma_get_slave_channel(struct dma_chan *chan) +{ + int err = -EBUSY; + + /* lock against __zxdh__zxdh_dma_request_channel */ + mutex_lock(&dma_list_mutex); + + if (chan->client_count == 0) { + struct dma_device *device = chan->device; + + dma_cap_set(DMA_PRIVATE, device->cap_mask); + device->privatecnt++; + err = zxdh_dma_chan_get(chan); + if (err) { + dev_dbg(chan->device->dev, + "%s: failed to get %s: (%d)\n", __func__, + dma_chan_name(chan), err); + chan = NULL; + if (--device->privatecnt == 0) + dma_cap_clear(DMA_PRIVATE, device->cap_mask); + } + } else + chan = NULL; + + mutex_unlock(&dma_list_mutex); + + return chan; +} + +struct dma_chan *zxdh_dma_get_any_slave_channel(struct dma_device *device) +{ + dma_cap_mask_t mask; + struct dma_chan *chan; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + /* lock against __zxdh__zxdh_dma_request_channel */ + mutex_lock(&dma_list_mutex); + + chan = zxdh_find_candidate(device, &mask, NULL, NULL); + + mutex_unlock(&dma_list_mutex); + + return IS_ERR(chan) ? NULL : chan; +} + +/** + * __zxdh__zxdh_dma_request_channel - try to allocate an exclusive channel + * @mask: capabilities that the channel must satisfy + * @fn: optional callback to disposition available channels + * @fn_param: opaque parameter to pass to dma_filter_fn() + * @np: device node to look for DMA channels + * + * Returns pointer to appropriate DMA channel on success or NULL. + */ +struct dma_chan *__zxdh__zxdh_dma_request_channel(const dma_cap_mask_t *mask, + dma_filter_fn fn, + void *fn_param, + struct device_node *np) +{ + struct dma_device *device, *_d; + struct dma_chan *chan = NULL; + + /* Find a channel */ + mutex_lock(&dma_list_mutex); + list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { + /* Finds a DMA controller with matching device node */ + if (np && device->dev->of_node && np != device->dev->of_node) + continue; + + chan = zxdh_find_candidate(device, mask, fn, fn_param); + if (!IS_ERR(chan)) + break; + + chan = NULL; + } + mutex_unlock(&dma_list_mutex); + + pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail", + chan ? dma_chan_name(chan) : NULL); + + return chan; +} + +static const struct dma_slave_map * +zxdh_dma_filter_match(struct dma_device *device, const char *name, + struct device *dev) +{ + int i; + + if (!device->filter.mapcnt) + return NULL; + + for (i = 0; i < device->filter.mapcnt; i++) { + const struct dma_slave_map *map = &device->filter.map[i]; + + if (!strcmp(map->devname, dev_name(dev)) && + !strcmp(map->slave, name)) + return map; + } + + return NULL; +} + +/** + * zxdh_dma_request_chan - try to allocate an exclusive slave channel + * @dev: pointer to client device structure + * @name: slave channel name + * + * Returns pointer to appropriate DMA channel on success or an error pointer. + */ +struct dma_chan *zxdh_dma_request_chan(struct device *dev, const char *name) +{ + struct dma_device *d, *_d; + struct dma_chan *chan = NULL; + + /* If device-tree is present get slave info from here */ + if (dev->of_node) + chan = of_dma_request_slave_channel(dev->of_node, name); + + /* If device was enumerated by ACPI get slave info from here */ + if (has_acpi_companion(dev) && !chan) + chan = acpi_dma_request_slave_chan_by_name(dev, name); + + if (PTR_ERR(chan) == -EPROBE_DEFER) + return chan; + + if (!IS_ERR_OR_NULL(chan)) + goto found; + + /* Try to find the channel via the DMA filter map(s) */ + mutex_lock(&dma_list_mutex); + list_for_each_entry_safe(d, _d, &dma_device_list, global_node) { + dma_cap_mask_t mask; + const struct dma_slave_map *map = + zxdh_dma_filter_match(d, name, dev); + + if (!map) + continue; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + chan = zxdh_find_candidate(d, &mask, d->filter.fn, map->param); + if (!IS_ERR(chan)) + break; + } + mutex_unlock(&dma_list_mutex); + + if (IS_ERR(chan)) + return chan; + if (!chan) + return ERR_PTR(-EPROBE_DEFER); + +found: +#ifdef CONFIG_DEBUG_FS + chan->dbg_client_name = + kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev), name); +#endif + + chan->name = kasprintf(GFP_KERNEL, "dma:%s", name); + if (!chan->name) + return chan; + chan->slave = dev; + + if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj, + DMA_SLAVE_NAME)) + dev_warn(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME); + if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name)) + dev_warn(dev, "Cannot create DMA %s symlink\n", chan->name); + + return chan; +} + +/** + * zxdh_dma_request_chan_by_mask - allocate a channel satisfying certain capabilities + * @mask: capabilities that the channel must satisfy + * + * Returns pointer to appropriate DMA channel on success or an error pointer. + */ +struct dma_chan *zxdh_dma_request_chan_by_mask(const dma_cap_mask_t *mask) +{ + struct dma_chan *chan; + + if (!mask) + return ERR_PTR(-ENODEV); + + chan = __zxdh__zxdh_dma_request_channel(mask, NULL, NULL, NULL); + if (!chan) { + mutex_lock(&dma_list_mutex); + if (list_empty(&dma_device_list)) + chan = ERR_PTR(-EPROBE_DEFER); + else + chan = ERR_PTR(-ENODEV); + mutex_unlock(&dma_list_mutex); + } + + return chan; +} + +void zxdh_dma_release_channel(struct dma_chan *chan) +{ + mutex_lock(&dma_list_mutex); + WARN_ONCE(chan->client_count != 1, "chan reference count %d != 1\n", + chan->client_count); + zxdh_dma_chan_put(chan); + /* drop PRIVATE cap enabled by __zxdh__zxdh_dma_request_channel() */ + if (--chan->device->privatecnt == 0) + dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); + + if (chan->slave) { + sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME); + sysfs_remove_link(&chan->slave->kobj, chan->name); + kfree(chan->name); + chan->name = NULL; + chan->slave = NULL; + } + +#ifdef CONFIG_DEBUG_FS + kfree(chan->dbg_client_name); + chan->dbg_client_name = NULL; +#endif + mutex_unlock(&dma_list_mutex); +} + +/** + * zxdh_dmaengine_get - register interest in dma_channels + */ +void zxdh_dmaengine_get(void) +{ + struct dma_device *device, *_d; + struct dma_chan *chan; + int err; + + mutex_lock(&dma_list_mutex); + dmaengine_ref_count++; + + /* try to grab channels */ + list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { + if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) + continue; + list_for_each_entry(chan, &device->channels, device_node) { + err = zxdh_dma_chan_get(chan); + if (err == -ENODEV) { + /* module removed before we could use it */ + list_del_rcu(&device->global_node); + break; + } else if (err) + dev_dbg(chan->device->dev, + "%s: failed to get %s: (%d)\n", + __func__, dma_chan_name(chan), err); + } + } + + /* if this is the first reference and there were channels + * waiting we need to rebalance to get those channels + * incorporated into the channel table + */ + if (dmaengine_ref_count == 1) + zxdh_dma_channel_rebalance(); + mutex_unlock(&dma_list_mutex); +} + +/** + * zxdh_dmaengine_put - let DMA drivers be removed when ref_count == 0 + */ +void zxdh_dmaengine_put(void) +{ + struct dma_device *device, *_d; + struct dma_chan *chan; + + mutex_lock(&dma_list_mutex); + dmaengine_ref_count--; + BUG_ON(dmaengine_ref_count < 0); + /* drop channel references */ + list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { + if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) + continue; + list_for_each_entry(chan, &device->channels, device_node) + zxdh_dma_chan_put(chan); + } + mutex_unlock(&dma_list_mutex); +} + +static bool zxdh_device_has_all_tx_types(struct dma_device *device) +{ +/* A device that satisfies this test has channels that will never cause + * an async_tx channel switch event as all possible operation types can + * be handled. + */ +#ifdef CONFIG_ASYNC_TX_DMA + if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) + return false; +#endif + +#if IS_ENABLED(CONFIG_ASYNC_MEMCPY) + if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) + return false; +#endif + +#if IS_ENABLED(CONFIG_ASYNC_XOR) + if (!dma_has_cap(DMA_XOR, device->cap_mask)) + return false; + +#ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA + if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask)) + return false; +#endif +#endif + +#if IS_ENABLED(CONFIG_ASYNC_PQ) + if (!dma_has_cap(DMA_PQ, device->cap_mask)) + return false; + +#ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA + if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask)) + return false; +#endif +#endif + + return true; +} + +static int zxdh_get_dma_id(struct dma_device *device) +{ + int rc = ida_alloc(&dma_ida, GFP_KERNEL); + + if (rc < 0) + return rc; + device->dev_id = rc; + return 0; +} + +static int __zxdh_dma_async_device_channel_register(struct dma_device *device, + struct dma_chan *chan) +{ + int rc; + + chan->local = alloc_percpu(typeof(*chan->local)); + if (!chan->local) + return -ENOMEM; + chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); + if (!chan->dev) { + rc = -ENOMEM; + goto err_free_local; + } + + /* + * When the chan_id is a negative value, we are dynamically adding + * the channel. Otherwise we are static enumerating. + */ + mutex_lock(&device->chan_mutex); + chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL); + mutex_unlock(&device->chan_mutex); + if (chan->chan_id < 0) { + pr_err("%s: unable to alloc ida for chan: %d\n", __func__, + chan->chan_id); + rc = chan->chan_id; + goto err_free_dev; + } + + chan->dev->device.class = &dma_devclass; + chan->dev->device.parent = device->dev; + chan->dev->chan = chan; + chan->dev->dev_id = device->dev_id; + dev_set_name(&chan->dev->device, "dma%dchan%d", device->dev_id, + chan->chan_id); + rc = device_register(&chan->dev->device); + if (rc) + goto err_out_ida; + chan->client_count = 0; + device->chancnt++; + + return 0; + +err_out_ida: + mutex_lock(&device->chan_mutex); + ida_free(&device->chan_ida, chan->chan_id); + mutex_unlock(&device->chan_mutex); +err_free_dev: + kfree(chan->dev); +err_free_local: + free_percpu(chan->local); + chan->local = NULL; + return rc; +} + +int zxdh_dma_async_device_channel_register(struct dma_device *device, + struct dma_chan *chan) +{ + int rc; + + rc = __zxdh_dma_async_device_channel_register(device, chan); + if (rc < 0) + return rc; + + zxdh_dma_channel_rebalance(); + return 0; +} + +static void +__zxdh__dma_async_device_channel_unregister(struct dma_device *device, + struct dma_chan *chan) +{ + WARN_ONCE(!device->device_release && chan->client_count, + "%s called while %d clients hold a reference\n", __func__, + chan->client_count); + mutex_lock(&dma_list_mutex); + device->chancnt--; + chan->dev->chan = NULL; + mutex_unlock(&dma_list_mutex); + mutex_lock(&device->chan_mutex); + ida_free(&device->chan_ida, chan->chan_id); + mutex_unlock(&device->chan_mutex); + device_unregister(&chan->dev->device); + free_percpu(chan->local); +} + +void zxdh_dma_async_device_channel_unregister(struct dma_device *device, + struct dma_chan *chan) +{ + __zxdh__dma_async_device_channel_unregister(device, chan); + zxdh_dma_channel_rebalance(); +} + +/** + * dma_async_device_register - registers DMA devices found + * @device: pointer to &struct dma_device + * + * After calling this routine the structure should not be freed except in the + * device_release() callback which will be called after + * zxdh_dma_async_device_unregister() is called and no further references are taken. + */ +int zxdh_dma_async_device_register(struct dma_device *device) +{ + int rc = 0; + struct dma_chan *chan = NULL; + + DH_LOG_INFO(MODULE_MPF, "enter\n"); + + if (!device) + return -ENODEV; + + /* validate device routines */ + if (!device->dev) { + pr_err("DMAdevice must have dev\n"); + return -EIO; + } + + device->owner = device->dev->driver->owner; + + if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && + !device->device_prep_dma_memcpy) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_MEMCPY"); + return -EIO; + } + + if (dma_has_cap(DMA_MEMCPY_SG, device->cap_mask) && + !device->device_prep_dma_memcpy_sg) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_MEMCPY_SG"); + return -EIO; + } + + if (dma_has_cap(DMA_XOR, device->cap_mask) && + !device->device_prep_dma_xor) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_XOR"); + return -EIO; + } + + if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && + !device->device_prep_dma_xor_val) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_XOR_VAL"); + return -EIO; + } + + if (dma_has_cap(DMA_PQ, device->cap_mask) && + !device->device_prep_dma_pq) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_PQ"); + return -EIO; + } + + if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && + !device->device_prep_dma_pq_val) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_PQ_VAL"); + return -EIO; + } + + if (dma_has_cap(DMA_MEMSET, device->cap_mask) && + !device->device_prep_dma_memset) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_MEMSET"); + return -EIO; + } + + if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && + !device->device_prep_dma_interrupt) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_INTERRUPT"); + return -EIO; + } + + if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && + !device->device_prep_dma_cyclic) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_CYCLIC"); + return -EIO; + } + + if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && + !device->device_prep_interleaved_dma) { + dev_err(device->dev, + "Device claims capability %s, but op is not defined\n", + "DMA_INTERLEAVE"); + return -EIO; + } + + if (!device->device_tx_status) { + dev_err(device->dev, "Device tx_status is not defined\n"); + return -EIO; + } + + if (!device->device_issue_pending) { + dev_err(device->dev, "Device issue_pending is not defined\n"); + return -EIO; + } + + if (!device->device_release) + dev_dbg(device->dev, + "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n"); + + kref_init(&device->ref); + + /* note: this only matters in the + * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case + */ + if (zxdh_device_has_all_tx_types(device)) + dma_cap_set(DMA_ASYNC_TX, device->cap_mask); + + rc = zxdh_get_dma_id(device); + if (rc != 0) + return rc; + + mutex_init(&device->chan_mutex); + ida_init(&device->chan_ida); + + /* represent channels in sysfs. Probably want devs too */ + list_for_each_entry(chan, &device->channels, device_node) { + rc = __zxdh_dma_async_device_channel_register(device, chan); + if (rc < 0) + goto err_out; + } + + mutex_lock(&dma_list_mutex); + /* take references on public channels */ + if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) + list_for_each_entry(chan, &device->channels, device_node) { + /* if clients are already waiting for channels we need + * to take references on their behalf + */ + if (zxdh_dma_chan_get(chan) == -ENODEV) { + /* note we can only get here for the first + * channel as the remaining channels are + * guaranteed to get a reference + */ + rc = -ENODEV; + mutex_unlock(&dma_list_mutex); + goto err_out; + } + } + list_add_tail_rcu(&device->global_node, &dma_device_list); + if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) + device->privatecnt++; /* Always private */ + zxdh_dma_channel_rebalance(); + mutex_unlock(&dma_list_mutex); + + zxdh_dmaengine_debug_register(device); + + return 0; + +err_out: + /* if we never registered a channel just release the idr */ + if (!device->chancnt) { + ida_free(&dma_ida, device->dev_id); + return rc; + } + + list_for_each_entry(chan, &device->channels, device_node) { + if (chan->local == NULL) + continue; + mutex_lock(&dma_list_mutex); + chan->dev->chan = NULL; + mutex_unlock(&dma_list_mutex); + device_unregister(&chan->dev->device); + free_percpu(chan->local); + } + return rc; +} + +/** + * zxdh_dma_async_device_unregister - unregister a DMA device + * @device: pointer to &struct dma_device + * + * This routine is called by dma driver exit routines, dmaengine holds module + * references to prevent it being called while channels are in use. + */ +void zxdh_dma_async_device_unregister(struct dma_device *device) +{ + struct dma_chan *chan, *n; + + zxdh_dmaengine_debug_unregister(device); + + list_for_each_entry_safe(chan, n, &device->channels, device_node) + __zxdh__dma_async_device_channel_unregister(device, chan); + + mutex_lock(&dma_list_mutex); + /* + * setting DMA_PRIVATE ensures the device being torn down will not + * be used in the channel_table + */ + dma_cap_set(DMA_PRIVATE, device->cap_mask); + zxdh_dma_channel_rebalance(); + ida_free(&dma_ida, device->dev_id); + zxdh_dma_device_put(device); + mutex_unlock(&dma_list_mutex); +} + +static void zxdh_dmam_device_release(struct device *dev, void *res) +{ + struct dma_device *device; + + device = *(struct dma_device **)res; + zxdh_dma_async_device_unregister(device); +} + +/** + * zxdh_dmaenginem_async_device_register - registers DMA devices found + * @device: pointer to &struct dma_device + * + * The operation is managed and will be undone on driver detach. + */ +int zxdh_dmaenginem_async_device_register(struct dma_device *device) +{ + void *p; + int ret; + + p = devres_alloc(zxdh_dmam_device_release, sizeof(void *), GFP_KERNEL); + if (!p) + return -ENOMEM; + + ret = dma_async_device_register(device); + if (!ret) { + *(struct dma_device **)p = device; + devres_add(device->dev, p); + } else { + devres_free(p); + } + + return ret; +} + +struct zxdh_dmaengine_unmap_pool { + struct kmem_cache *cache; + const char *name; + mempool_t *pool; + size_t size; +}; + +#define __UNMAP_POOL(x) \ + { \ + .size = x, .name = "dmaengine-unmap-" __stringify(x) \ + } +static struct zxdh_dmaengine_unmap_pool unmap_pool[] = { + __UNMAP_POOL(2), +#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) + __UNMAP_POOL(16), + __UNMAP_POOL(128), + __UNMAP_POOL(256), +#endif +}; + +static struct zxdh_dmaengine_unmap_pool *__zxdh_get_unmap_pool(int nr) +{ + int order = get_count_order(nr); + + switch (order) { + case 0 ... 1: + return &unmap_pool[0]; +#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) + case 2 ... 4: + return &unmap_pool[1]; + case 5 ... 7: + return &unmap_pool[2]; + case 8: + return &unmap_pool[3]; +#endif + default: + BUG(); + return NULL; + } +} + +static void zxdh_dmaengine_unmap(struct kref *kref) +{ + struct dmaengine_unmap_data *unmap = + container_of(kref, typeof(*unmap), kref); + struct device *dev = unmap->dev; + int cnt, i; + + cnt = unmap->to_cnt; + for (i = 0; i < cnt; i++) + dma_unmap_page(dev, unmap->addr[i], unmap->len, DMA_TO_DEVICE); + cnt += unmap->from_cnt; + for (; i < cnt; i++) + dma_unmap_page(dev, unmap->addr[i], unmap->len, + DMA_FROM_DEVICE); + cnt += unmap->bidi_cnt; + for (; i < cnt; i++) { + if (unmap->addr[i] == 0) + continue; + dma_unmap_page(dev, unmap->addr[i], unmap->len, + DMA_BIDIRECTIONAL); + } + cnt = unmap->map_cnt; + mempool_free(unmap, __zxdh_get_unmap_pool(cnt)->pool); +} + +void zxdh_dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) +{ + if (unmap) + kref_put(&unmap->kref, zxdh_dmaengine_unmap); +} + +static void zxdh_dmaengine_destroy_unmap_pool(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { + struct zxdh_dmaengine_unmap_pool *p = &unmap_pool[i]; + + mempool_destroy(p->pool); + p->pool = NULL; + kmem_cache_destroy(p->cache); + p->cache = NULL; + } +} + +static int __init zxdh_dmaengine_init_unmap_pool(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { + struct zxdh_dmaengine_unmap_pool *p = &unmap_pool[i]; + size_t size; + + size = sizeof(struct dmaengine_unmap_data) + + sizeof(dma_addr_t) * p->size; + + p->cache = kmem_cache_create(p->name, size, 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!p->cache) + break; + p->pool = mempool_create_slab_pool(1, p->cache); + if (!p->pool) + break; + } + + if (i == ARRAY_SIZE(unmap_pool)) + return 0; + + zxdh_dmaengine_destroy_unmap_pool(); + return -ENOMEM; +} + +struct dmaengine_unmap_data *zxdh_dmaengine_get_unmap_data(struct device *dev, + int nr, gfp_t flags) +{ + struct dmaengine_unmap_data *unmap; + + unmap = mempool_alloc(__zxdh_get_unmap_pool(nr)->pool, flags); + if (!unmap) + return NULL; + + memset(unmap, 0, sizeof(*unmap)); + kref_init(&unmap->kref); + unmap->dev = dev; + unmap->map_cnt = nr; + + return unmap; +} + +void zxdh_dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, + struct dma_chan *chan) +{ + tx->chan = chan; +#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH + spin_lock_init(&tx->lock); +#endif +} + +static inline int +zxdh_desc_check_and_set_metadata_mode(struct dma_async_tx_descriptor *desc, + enum dma_desc_metadata_mode mode) +{ + /* Make sure that the metadata mode is not mixed */ + if (!desc->desc_metadata_mode) { + if (dmaengine_is_metadata_mode_supported(desc->chan, mode)) + desc->desc_metadata_mode = mode; + else + return -ENOTSUPP; + } else if (desc->desc_metadata_mode != mode) { + return -EINVAL; + } + + return 0; +} + +int zxdh_dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc, + void *data, size_t len) +{ + int ret; + + if (!desc) + return -EINVAL; + + ret = zxdh_desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT); + if (ret) + return ret; + + if (!desc->metadata_ops || !desc->metadata_ops->attach) + return -ENOTSUPP; + + return desc->metadata_ops->attach(desc, data, len); +} + +void *zxdh_dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc, + size_t *payload_len, size_t *max_len) +{ + int ret; + + if (!desc) + return ERR_PTR(-EINVAL); + + ret = zxdh_desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE); + if (ret) + return ERR_PTR(ret); + + if (!desc->metadata_ops || !desc->metadata_ops->get_ptr) + return ERR_PTR(-ENOTSUPP); + + return desc->metadata_ops->get_ptr(desc, payload_len, max_len); +} + +int zxdh_dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc, + size_t payload_len) +{ + int ret; + + if (!desc) + return -EINVAL; + + ret = zxdh_desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE); + if (ret) + return ret; + + if (!desc->metadata_ops || !desc->metadata_ops->set_len) + return -ENOTSUPP; + + return desc->metadata_ops->set_len(desc, payload_len); +} + +/** + * zxdh_dma_wait_for_async_tx - spin wait for a transaction to complete + * @tx: in-flight transaction to wait on + */ +enum dma_status zxdh_dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) +{ + unsigned long zxdh_dma_sync_wait_timeout = + jiffies + msecs_to_jiffies(5000); + + if (!tx) + return DMA_COMPLETE; + + while (tx->cookie == -EBUSY) { + if (time_after_eq(jiffies, zxdh_dma_sync_wait_timeout)) { + dev_err(tx->chan->device->dev, + "%s timeout waiting for descriptor submission\n", + __func__); + return DMA_ERROR; + } + cpu_relax(); + } + return zxdh_dma_sync_wait(tx->chan, tx->cookie); +} + +/** + * zxdh_dma_run_dependencies - process dependent operations on the target channel + * @tx: transaction with dependencies + * + * Helper routine for DMA drivers to process (start) dependent operations + * on their target channel. + */ +void zxdh_dma_run_dependencies(struct dma_async_tx_descriptor *tx) +{ + struct dma_async_tx_descriptor *dep = txd_next(tx); + struct dma_async_tx_descriptor *dep_next; + struct dma_chan *chan; + + if (!dep) + return; + + /* we'll submit tx->next now, so clear the link */ + txd_clear_next(tx); + chan = dep->chan; + + /* keep submitting up until a channel switch is detected + * in that case we will be called again as a result of + * processing the interrupt from async_tx_channel_switch + */ + for (; dep; dep = dep_next) { + txd_lock(dep); + txd_clear_parent(dep); + dep_next = txd_next(dep); + if (dep_next && dep_next->chan == chan) + txd_clear_next(dep); /* ->next will be submitted */ + else + dep_next = NULL; /* submit current dep and terminate */ + txd_unlock(dep); + + dep->tx_submit(dep); + } + + chan->device->device_issue_pending(chan); +} + +static int __init zxdh_dma_bus_init(void) +{ + int err = zxdh_dmaengine_init_unmap_pool(); + + if (err) + return err; + + err = class_register(&dma_devclass); + if (!err) + zxdh_dmaengine_debugfs_init(); + + return err; +} +arch_initcall(zxdh_dma_bus_init); diff --git a/drivers/net/ethernet/dinghai/zf_mpf/epc/dmaengine.h b/drivers/net/ethernet/dinghai/zf_mpf/epc/dmaengine.h new file mode 100644 index 000000000000..22c28e797d42 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/epc/dmaengine.h @@ -0,0 +1,205 @@ +#ifndef ZXDH_DMAENGINE_H +#define ZXDH_DMAENGINE_H + +#include +#include + +/** + * zxdh_dma_cookie_init - initialize the cookies for a DMA channel + * @chan: dma channel to initialize + */ +static inline void zxdh_dma_cookie_init(struct dma_chan *chan) +{ + chan->cookie = DMA_MIN_COOKIE; + chan->completed_cookie = DMA_MIN_COOKIE; +} + +/** + * zxdh_dma_cookie_assign - assign a DMA engine cookie to the descriptor + * @tx: descriptor needing cookie + * + * Assign a unique non-zero per-channel cookie to the descriptor. + * Note: caller is expected to hold a lock to prevent concurrency. + */ +static inline dma_cookie_t +zxdh_dma_cookie_assign(struct dma_async_tx_descriptor *tx) +{ + struct dma_chan *chan = tx->chan; + dma_cookie_t cookie; + + cookie = chan->cookie + 1; + if (cookie < DMA_MIN_COOKIE) + cookie = DMA_MIN_COOKIE; + tx->cookie = chan->cookie = cookie; + + return cookie; +} + +/** + * zxdh_dma_cookie_complete - complete a descriptor + * @tx: descriptor to complete + * + * Mark this descriptor complete by updating the channels completed + * cookie marker. Zero the descriptors cookie to prevent accidental + * repeated completions. + * + * Note: caller is expected to hold a lock to prevent concurrency. + */ +static inline void zxdh_dma_cookie_complete(struct dma_async_tx_descriptor *tx) +{ + BUG_ON(tx->cookie < DMA_MIN_COOKIE); + tx->chan->completed_cookie = tx->cookie; + tx->cookie = 0; +} + +/** + * zxdh_dma_cookie_status - report cookie status + * @chan: dma channel + * @cookie: cookie we are interested in + * @state: dma_tx_state structure to return last/used cookies + * + * Report the status of the cookie, filling in the state structure if + * non-NULL. No locking is required. + */ +static inline enum dma_status zxdh_dma_cookie_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *state) +{ + dma_cookie_t used, complete; + + used = chan->cookie; + complete = chan->completed_cookie; + barrier(); + if (state) { + state->residue = 0; + state->in_flight_bytes = 0; + } + + if (complete <= used) { + if ((cookie <= complete) || (cookie > used)) + return DMA_COMPLETE; + } else { + if ((cookie <= complete) && (cookie > used)) + return DMA_COMPLETE; + } + return DMA_IN_PROGRESS; +} + +static inline void zxdh_dma_set_residue(struct dma_tx_state *state, u32 residue) +{ + if (state) + state->residue = residue; +} + +static inline void zxdh_dma_set_in_flight_bytes(struct dma_tx_state *state, + u32 in_flight_bytes) +{ + if (state) + state->in_flight_bytes = in_flight_bytes; +} + +struct zxdh_dmaengine_desc_callback { + dma_async_tx_callback callback; + dma_async_tx_callback_result callback_result; + void *callback_param; +}; + +/** + * zxdh_dmaengine_desc_get_callback - get the passed in callback function + * @tx: tx descriptor + * @cb: temp struct to hold the callback info + * + * Fill the passed in cb struct with what's available in the passed in + * tx descriptor struct + * No locking is required. + */ +static inline void +zxdh_dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx, + struct zxdh_dmaengine_desc_callback *cb) +{ + cb->callback = tx->callback; + cb->callback_result = tx->callback_result; + cb->callback_param = tx->callback_param; +} + +/** + * zxdh_dmaengine_desc_callback_invoke - call the callback function in cb struct + * @cb: temp struct that is holding the callback info + * @result: transaction result + * + * Call the callback function provided in the cb struct with the parameter + * in the cb struct. + * Locking is dependent on the driver. + */ +static inline void +zxdh_dmaengine_desc_callback_invoke(struct zxdh_dmaengine_desc_callback *cb, + const struct dmaengine_result *result) +{ + struct dmaengine_result dummy_result = { .result = DMA_TRANS_NOERROR, + .residue = 0 }; + + if (cb->callback_result) { + if (!result) + result = &dummy_result; + cb->callback_result(cb->callback_param, result); + } else if (cb->callback) { + cb->callback(cb->callback_param); + } +} + +/** + * zxdh_dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and + * then immediately call the callback. + * @tx: dma async tx descriptor + * @result: transaction result + * + * Call zxdh_dmaengine_desc_get_callback() and zxdh_dmaengine_desc_callback_invoke() + * in a single function since no work is necessary in between for the driver. + * Locking is dependent on the driver. + */ +static inline void +zxdh_dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx, + const struct dmaengine_result *result) +{ + struct zxdh_dmaengine_desc_callback cb; + + zxdh_dmaengine_desc_get_callback(tx, &cb); + zxdh_dmaengine_desc_callback_invoke(&cb, result); +} + +/** + * zxdh_dmaengine_desc_callback_valid - verify the callback is valid in cb + * @cb: callback info struct + * + * Return a bool that verifies whether callback in cb is valid or not. + * No locking is required. + */ +static inline bool +zxdh_dmaengine_desc_callback_valid(struct zxdh_dmaengine_desc_callback *cb) +{ + return cb->callback || cb->callback_result; +} + +struct dma_chan *zxdh_dma_get_slave_channel(struct dma_chan *chan); +struct dma_chan *zxdh_dma_get_any_slave_channel(struct dma_device *device); +void zxdh_dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, + struct dma_chan *chan); + +#ifdef CONFIG_DEBUG_FS +#include + +static inline struct dentry * +zxdh_zxdh_dmaengine_get_debugfs_root(struct dma_device *dma_dev) +{ + return dma_dev->dbg_dev_root; +} +#else +struct dentry; +static inline struct dentry * +zxdh_zxdh_dmaengine_get_debugfs_root(struct dma_device *dma_dev) +{ + return NULL; +} +#endif /* CONFIG_DEBUG_FS */ + +#endif diff --git a/drivers/net/ethernet/dinghai/zf_mpf/epc/pcie-zte-zf-epc.c b/drivers/net/ethernet/dinghai/zf_mpf/epc/pcie-zte-zf-epc.c new file mode 100644 index 000000000000..bb6146c90339 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/epc/pcie-zte-zf-epc.c @@ -0,0 +1,1854 @@ +#include "pcie-zte-zf-epc.h" +#include "pcie-zte-zf-hdma.h" + +struct pcie_zf_ep *zf_ep; +static unsigned int epc_init_flag[4] = { 0 }; + +extern int pcie_zte_zf_cfg_file_init(struct dh_core_dev *core_dev); +extern void pcie_zte_zf_cfg_file_exit(void); + +// PRE_FUNC +#if 1 +int pcie_zf_read(void __iomem *addr, int size, u32 *val) +{ + *val = 0; + if (!IS_ALIGNED((unsigned long)addr, size)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + if (size == 4) + *val = readl(addr); + else if (size == 2) + *val = readw(addr); + else if (size == 1) + *val = readb(addr); + else + return PCIBIOS_BAD_REGISTER_NUMBER; + + return PCIBIOS_SUCCESSFUL; +} +int pcie_zf_write(void __iomem *addr, int size, u32 val) +{ + if (!IS_ALIGNED((unsigned long)addr, size)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + if (size == 4) + writel(val, addr); + else if (size == 2) + writew(val, addr); + else if (size == 1) + writeb(val, addr); + else + return PCIBIOS_BAD_REGISTER_NUMBER; + + return PCIBIOS_SUCCESSFUL; +} +u32 cfg_phy_rmw(u64 phy_addr, u32 value, u32 mask) +{ + u32 reg_val = 0; + void __iomem *virt_addr = NULL; + u64 tmp_addr = 0; + u64 offset = 0; + u64 size = 0; + int ret = 0; + + offset = phy_addr % PAGE_SIZE; + if (phy_addr < offset) { + DH_LOG_ERR(MODULE_MPF, + "data overflow! phy_addr=0x%llx, offset=0x%llx\n", + phy_addr, offset); + return PCIBIOS_BAD_REGISTER_NUMBER; + } else { + tmp_addr = phy_addr - offset; + } + if (offset <= (PAGE_SIZE - 4)) { + size = PAGE_SIZE; + } else { + size = 2 * PAGE_SIZE; + } + + virt_addr = ioremap(tmp_addr, size); + if (NULL == virt_addr) { + DH_LOG_ERR(MODULE_MPF, "cfg_phy_rmw ioremap failed!\n"); + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + ret = pcie_zf_read((virt_addr + offset), 4, ®_val); + if (ret) + goto err; + + reg_val &= (~mask); + reg_val |= (value & mask); + + ret = pcie_zf_write(virt_addr + offset, 4, reg_val); +err: + iounmap(virt_addr); + return ret; +} + +u8 pcie_zf_readb_dbi(struct pcie_dpu_ep *ep, u32 reg) +{ + u32 val = 0; + + pcie_zf_read(ep->dbi_base + reg, 0x1, &val); + + return val; +} + +u16 pcie_zf_readw_dbi(struct pcie_dpu_ep *ep, u32 reg) +{ + int ret; + u32 val; + + ret = pcie_zf_read(ep->dbi_base + reg, 0x2, &val); + if (ret) + DH_LOG_ERR(MODULE_MPF, "Read DBIw address failed\r\n"); + + return val; +} +u32 pcie_zf_readl_dbi(struct pcie_dpu_ep *ep, u32 reg) +{ + int ret = 0; + u32 val = 0; + + ret = pcie_zf_read(ep->dbi_base + reg, 0x4, &val); + if (ret) + DH_LOG_ERR(MODULE_MPF, "Read DBIl address failed\r\n"); + + return val; +} + +void pcie_zf_writeb_dbi(struct pcie_dpu_ep *ep, u32 reg, u32 val) +{ + pcie_zf_write(ep->dbi_base + reg, 0x1, val); +} + +void pcie_zf_writew_dbi(struct pcie_dpu_ep *ep, u32 reg, u32 val) +{ + int ret = 0; + + ret = pcie_zf_write(ep->dbi_base + reg, 0x2, val); + if (ret) + DH_LOG_ERR(MODULE_MPF, "Write DBI address failed\r\n"); +} +void pcie_zf_writel_dbi(struct pcie_dpu_ep *ep, u32 reg, u32 val) +{ + int ret = 0; + + ret = pcie_zf_write(ep->dbi_base + reg, 0x4, val); + if (ret) + DH_LOG_ERR(MODULE_MPF, "Write DBI address failed\r\n"); +} + +void pcie_zf_writeb_dbi2(struct pcie_dpu_ep *ep, u32 reg, u32 val) +{ + pcie_zf_write(ep->dbi_base + PCIE_DPU_EP_DBI2_OFFSET + reg, 0x1, val); +} + +void pcie_zf_writew_dbi2(struct pcie_dpu_ep *ep, u32 reg, u32 val) +{ + int ret = 0; + + ret = pcie_zf_write(ep->dbi_base + PCIE_DPU_EP_DBI2_OFFSET + reg, 0x2, + val); + if (ret) + DH_LOG_ERR(MODULE_MPF, "Write DBI address failed\r\n"); +} +void pcie_zf_writel_dbi2(struct pcie_dpu_ep *ep, u32 reg, u32 val) +{ + int ret; + + ret = pcie_zf_write(ep->dbi_base + PCIE_DPU_EP_DBI2_OFFSET + reg, 0x4, + val); + if (ret) + DH_LOG_ERR(MODULE_MPF, "Write DBI address failed\r\n"); +} +void pcie_zf_writel_atu(struct pcie_dpu_ep *ep, u32 reg, u32 val) +{ + int ret = 0; + + ret = pcie_zf_write(ep->atu_base + reg, 0x4, val); + if (ret) + DH_LOG_ERR(MODULE_MPF, "Write ATU address failed\r\n"); +} +static u32 pcie_zf_readl_atu(struct pcie_dpu_ep *ep, u32 reg) +{ + int ret = 0; + u32 val = 0; + + ret = pcie_zf_read(ep->atu_base + reg, 4, &val); + if (ret) + DH_LOG_ERR(MODULE_MPF, "Read ATU address failed\r\n"); + + return val; +} +static u64 zte_pcie_dma_atu_addr_remapping(u64 addr_input) +{ + u64 addr_output = 0; + // DMA and ATU地址重映射,将地址的bit12~18左移4位,设置bit15 = 0 + addr_output = + (((addr_input & (0x7F << 12)) << 4) | (addr_input & 0xFFF)) & + (~(1 << 15)); + + return addr_output; +} +static u32 pcie_zf_readl_ib_unroll(struct pcie_dpu_ep *ep, u32 index, u32 reg) +{ + u32 offset = zte_pcie_dma_atu_addr_remapping( + PCIE_GET_ATU_INB_UNR_REG_OFFSET(index)); + + return pcie_zf_readl_atu(ep, offset + reg); +} +static void pcie_zf_writel_ib_unroll(struct pcie_dpu_ep *ep, u32 index, u32 reg, + u32 val) +{ + u32 offset = zte_pcie_dma_atu_addr_remapping( + PCIE_GET_ATU_INB_UNR_REG_OFFSET(index)); + + pcie_zf_writel_atu(ep, offset + reg, val); +} +static u32 pcie_zf_readl_ob_unroll(struct pcie_dpu_ep *ep, u32 index, u32 reg) +{ + u32 offset = zte_pcie_dma_atu_addr_remapping( + PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index)); + + return pcie_zf_readl_atu(ep, offset + reg); +} +static void pcie_zf_writel_ob_unroll(struct pcie_dpu_ep *ep, u32 index, u32 reg, + u32 val) +{ + u32 offset = zte_pcie_dma_atu_addr_remapping( + PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index)); + + pcie_zf_writel_atu(ep, offset + reg, val); +} +static void pcie_zf_dbi_ro_wr_en(struct pcie_dpu_ep *ep) +{ + u64 reg = 0; + u32 val = 0; + + reg = PCIE_MISC_CONTROL_1_OFF; + val = pcie_zf_readl_dbi(ep, reg); + val |= PCIE_DBI_RO_WR_EN; + pcie_zf_writel_dbi(ep, reg, val); +} +static void pcie_zf_dbi_ro_wr_dis(struct pcie_dpu_ep *ep) +{ + u64 reg = 0; + u32 val = 0; + + reg = PCIE_MISC_CONTROL_1_OFF; + val = pcie_zf_readl_dbi(ep, reg); + val &= ~PCIE_DBI_RO_WR_EN; + pcie_zf_writel_dbi(ep, reg, val); +} +static void pcie_dpu_ep_sriov_enable(struct pcie_dpu_ep *ep, + u64 sriov_ecap_offset) +{ + u32 val = pcie_zf_readl_dbi(ep, sriov_ecap_offset + PCIE_SRIOV_CTRL); + val |= PCIE_SRIOV_CTRL_VFE; + pcie_zf_writel_dbi(ep, sriov_ecap_offset + PCIE_SRIOV_CTRL, val); +} +static void pcie_dpu_ep_sriov_disable(struct pcie_dpu_ep *ep, + u64 sriov_ecap_offset) +{ + u32 val = pcie_zf_readl_dbi(ep, sriov_ecap_offset + PCIE_SRIOV_CTRL); + val &= ~PCIE_SRIOV_CTRL_VFE; + pcie_zf_writel_dbi(ep, sriov_ecap_offset + PCIE_SRIOV_CTRL, val); +} +struct pcie_dpu_ep_func *pcie_dpu_ep_get_func_from_ep(struct pcie_dpu_ep *ep, + u8 func_no, u8 vfunc_no) +{ + struct pcie_dpu_ep_func *ep_func = NULL; + + list_for_each_entry(ep_func, &ep->func_list, list) { + if (ep_func->func_no == func_no && + ep_func->vfunc_no == vfunc_no) + return ep_func; + } + + return NULL; +} +static u32 pcie_dpu_ep_func_select(u8 func_no, u8 vfunc_no) +{ + u32 func_offset = 0; + + if (isPF(func_no)) { + func_offset = func_no & PCIE_DPU_EP_GET_PF_NO; + } else { + func_offset = (func_no & PCIE_DPU_EP_GET_PF_NO) + + (vfunc_no << DBI_VF_CFG_OFFSET_BIT) + VF_ACT_BIT; + } + + return func_offset * PCIE_DPU_EP_FUNC_CFG_SIZE; +} +static u8 pcie_dpu_ep_find_next_cap(struct pcie_dpu_ep *ep, u32 func_offset, + u8 cap_ptr, u8 capid) +{ + u8 now_cap_id = 0, next_cap_ptr = 0; + u16 reg = 0; + + if (!cap_ptr) + return 0; + + reg = pcie_zf_readl_dbi(ep, func_offset + cap_ptr); + now_cap_id = (reg & 0x00ff); + + if (now_cap_id > PCI_CAP_ID_MAX) + return 0; + + if (now_cap_id == capid) + return cap_ptr; + + next_cap_ptr = (reg & 0xff00) >> 8; + return pcie_dpu_ep_find_next_cap(ep, func_offset, next_cap_ptr, capid); +} +static u8 pcie_dpu_ep_find_cap(struct pcie_dpu_ep *ep, u32 func_offset, + u8 capid) +{ + u8 next_cap_ptr = 0; + u16 reg = 0; + + reg = pcie_zf_readl_dbi(ep, func_offset + PCI_CAPABILITY_LIST); + next_cap_ptr = (reg & 0x00ff); + + return pcie_dpu_ep_find_next_cap(ep, func_offset, next_cap_ptr, capid); +} +static int pcie_dpu_ep_find_extcap(struct pcie_dpu_ep *ep, u32 func_offset, + u8 ext_cap_id, u8 vsecid) +{ + u32 now_cap_id = 0; + u32 vsec_id = 0; + u32 ext_cap_offset = PCIE_ECAP_POINTER_OFF; + + now_cap_id = pcie_zf_readl_dbi(ep, func_offset + ext_cap_offset); + if (now_cap_id == 0x0 || now_cap_id == 0xFFFF) { + DH_LOG_ERR(MODULE_MPF, "pcie_zf_ep get extcap0 failed!\n"); + return -ENXIO; + } + + while (1) { + if ((now_cap_id & 0xFFFF) == ext_cap_id) { + if (ext_cap_id == PCIE_ECAP_VSEC_ID) { + vsec_id = pcie_zf_readl_dbi( + ep, func_offset + ext_cap_offset + 4); + if (vsec_id == vsecid) { + break; + } + } else { + break; + } + } + + ext_cap_offset = (now_cap_id >> 20) & 0xFFF; + if (0x0 == ext_cap_offset) { + DH_LOG_ERR(MODULE_MPF, + "pcie_zf_ep find extcap failed\n"); + return -ENXIO; + } + + now_cap_id = + pcie_zf_readl_dbi(ep, func_offset + ext_cap_offset); + } + + return ext_cap_offset; +} + +static int zf_atu_is_used(struct pcie_dpu_ep *ep, int ib_no) +{ + return (pcie_zf_readl_ib_unroll(ep, ib_no, PCIE_ATU_UNR_REGION_CTRL2) & + PCIE_ATU_ENABLE) ? + 1 : + 0; +} + +static int zf_func_is_set_ib(struct pcie_dpu_ep *ep, u8 func_no, + enum pci_barno bar, int ib_no) +{ + int func_val = 0, bar_val = 0; + int ctl1_val = + pcie_zf_readl_ib_unroll(ep, ib_no, PCIE_ATU_UNR_REGION_CTRL1); + int ctl2_val = + pcie_zf_readl_ib_unroll(ep, ib_no, PCIE_ATU_UNR_REGION_CTRL2); + + func_val = PCIE_ATU_FUNC_NUM(func_no & PCIE_DPU_EP_GET_PF_NO); + if ((ctl1_val & PCIE_ATU_FUNC_NUM_MASK) != func_val) + return 0; + + if (isPF(func_no)) { + if (ctl2_val & PCIE_ATU_VFBAR_MATCH_MODE_ENABLE) + return 0; + } else { + if (!(ctl2_val & PCIE_ATU_VFBAR_MATCH_MODE_ENABLE)) + return 0; + } + bar_val = (bar << 8); + return ((ctl2_val & PCIE_ATU_BAR_NUM_MASK) == bar_val); +} + +static int pcie_zf_prog_inbound_atu(struct pcie_dpu_ep *ep, u8 func_no, + int index, int bar, dma_addr_t dpu_addr, + enum pcie_dpu_as_type as_type) +{ + int type = 0; + u32 retries = 0, val = 0; + int vf_flag = 0; + + dpu_addr = dpu_addr | ZF_PREFIX_ADDR; // dpu addr route + + if (!isPF(func_no)) { + vf_flag = 1; + } + + pcie_zf_writel_ib_unroll(ep, index, PCIE_ATU_UNR_LOWER_TARGET, + lower_32_bits(dpu_addr)); + pcie_zf_writel_ib_unroll(ep, index, PCIE_ATU_UNR_UPPER_TARGET, + upper_32_bits(dpu_addr)); + + switch (as_type) { + case PCIE_DPU_AS_MEM: + type = PCIE_ATU_TYPE_MEM; + break; + case PCIE_DPU_AS_IO: + if (vf_flag == 1) { + return -EINVAL; + } + type = PCIE_ATU_TYPE_IO; + break; + default: + return -EINVAL; + } + + pcie_zf_writel_ib_unroll( + ep, index, PCIE_ATU_UNR_REGION_CTRL1, + type | PCIE_ATU_INCREASE_REGION_SIZE | + PCIE_ATU_FUNC_NUM(func_no & PCIE_DPU_EP_GET_PF_NO)); + if (vf_flag) { + pcie_zf_writel_ib_unroll( + ep, index, PCIE_ATU_UNR_REGION_CTRL2, + PCIE_ATU_FUNC_NUM_MATCH_EN | PCIE_ATU_ENABLE | + PCIE_ATU_VFBAR_MATCH_MODE_ENABLE | + PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); + } else { + pcie_zf_writel_ib_unroll( + ep, index, PCIE_ATU_UNR_REGION_CTRL2, + PCIE_ATU_FUNC_NUM_MATCH_EN | PCIE_ATU_ENABLE | + PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); + } + + /* + * Make sure ATU enable takes effect before any subsequent config + * and I/O accesses. + */ + for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { + val = pcie_zf_readl_ib_unroll(ep, index, + PCIE_ATU_UNR_REGION_CTRL2); + if (val & PCIE_ATU_ENABLE) + return 0; + + mdelay((u32)LINK_WAIT_IATU); + } + DH_LOG_ERR(MODULE_MPF, "Inbound iATU is not being enabled\r\n"); + + return -EBUSY; +} + +static int pcie_dpu_ep_inbound_atu(struct pcie_dpu_ep *ep, u8 func_no, + enum pci_barno bar, dma_addr_t dpu_addr, + enum pcie_dpu_as_type as_type) +{ + int ret = 0, free_win = -1, atu_id = 0; + u32 vf_bar_off = 0; + u32 is_pf = 0; + u32 bar_to_atu_index = 0; + + if ((func_no & PCIE_DPU_EP_GET_PF_NO) >= PCIE_DPU_PF_NUMS) { + DH_LOG_ERR(MODULE_MPF, "func_no is err!\n"); + return -EINVAL; + } + + is_pf = isPF(func_no); + if (!is_pf) { + vf_bar_off = PCIE_VF_BARS_OFF; + } + + spin_lock(&ep->ib_window_lock); + for (atu_id = 0; atu_id < PCIE_DPU_IATU_NUM; atu_id++) { + if (!zf_atu_is_used(ep, atu_id)) { + clear_bit(atu_id, ep->ib_window_map); + } else if (zf_func_is_set_ib(ep, func_no, bar, atu_id)) { + free_win = atu_id; + break; + } + } + + if (-1 == free_win) { + free_win = find_first_zero_bit(ep->ib_window_map, + ep->num_ib_windows); + if (free_win >= ep->num_ib_windows) { + spin_unlock(&ep->ib_window_lock); + DH_LOG_ERR(MODULE_MPF, "No free inbound window\r\n"); + return -EINVAL; + } + } + set_bit(free_win, ep->ib_window_map); + + ret = pcie_zf_prog_inbound_atu(ep, func_no, free_win, bar, dpu_addr, + as_type); + spin_unlock(&ep->ib_window_lock); + if (ret < 0) { + DH_LOG_ERR(MODULE_MPF, "Failed to program IB window\r\n"); + return ret; + } + DH_LOG_INFO(MODULE_MPF, "ep%d func%d bar%d set aitu%d\n", ep->ep_id, + func_no, bar, free_win); + + bar_to_atu_index = (u32)bar + vf_bar_off; + if (bar_to_atu_index < (PCI_STD_NUM_BARS * 2 + 1)) { + ep->bar_to_atu[func_no & PCIE_DPU_EP_GET_PF_NO] + [bar_to_atu_index] = free_win; + } else { + DH_LOG_ERR(MODULE_MPF, "error bar_to_atu index %d\r\n", + bar_to_atu_index); + return -EINVAL; + } + + return 0; +} + +static void pcie_zf_prog_outbound_atu(struct pcie_dpu_ep *ep, u8 func_no, + u8 vfunc_no, int index, int type, + u64 dpu_addr, u64 host_addr, size_t size) +{ + u32 retries = 0, val = 0; + u64 limit_addr = 0; + u64 limit_addr_tmp = 0; + + if (size == 0) { + DH_LOG_ERR(MODULE_MPF, + "data error! dpu_addr=0x%llx, size=0x%lx\r\n", + dpu_addr, size); + return; + } else { + limit_addr_tmp = ULLONG_MAX - size + 1; + if (dpu_addr > limit_addr_tmp) { + DH_LOG_ERR(MODULE_MPF, "data overflow!\r\n"); + return; + } + limit_addr = dpu_addr + size - 1; + } + + DH_LOG_INFO( + MODULE_MPF, + "ep_id[0x%x] func_no:0x%x, vfunc_no:0x%x, index: %d, dpu_addr:0x%llx, host_addr:0x%llx, size:0x%lx\n", + ep->ep_id, func_no, vfunc_no, index, dpu_addr, host_addr, size); + + pcie_zf_writel_ob_unroll(ep, index, PCIE_ATU_UNR_LOWER_BASE, + lower_32_bits(dpu_addr)); + pcie_zf_writel_ob_unroll(ep, index, PCIE_ATU_UNR_UPPER_BASE, + upper_32_bits(dpu_addr)); + pcie_zf_writel_ob_unroll(ep, index, PCIE_ATU_UNR_LOWER_LIMIT, + lower_32_bits(limit_addr)); + pcie_zf_writel_ob_unroll(ep, index, PCIE_ATU_UNR_UPPER_LIMIT, + upper_32_bits(limit_addr)); + pcie_zf_writel_ob_unroll(ep, index, PCIE_ATU_UNR_LOWER_TARGET, + lower_32_bits(host_addr)); + pcie_zf_writel_ob_unroll(ep, index, PCIE_ATU_UNR_UPPER_TARGET, + upper_32_bits(host_addr)); + pcie_zf_writel_ob_unroll( + ep, index, PCIE_ATU_UNR_REGION_CTRL1, + type | PCIE_ATU_FUNC_NUM(func_no & PCIE_DPU_EP_GET_PF_NO)); + + if (type == 4 || type == 5) { + pcie_zf_writel_ob_unroll(ep, index, PCIE_ATU_UNR_REGION_CTRL2, + PCIE_ATU_ENABLE | + PCIE_ATU_CFG_SHIFT_MODE | + PCIE_ATU_DMA_BYPSS); + } else { + pcie_zf_writel_ob_unroll(ep, index, PCIE_ATU_UNR_REGION_CTRL2, + PCIE_ATU_ENABLE | PCIE_ATU_DMA_BYPSS); + } + + if (isPF(func_no)) { + pcie_zf_writel_ob_unroll(ep, index, PCIE_ATU_UNR_REGION_CTRL3, + 0x0); + } else { + pcie_zf_writel_ob_unroll(ep, index, PCIE_ATU_UNR_REGION_CTRL3, + PCIE_ATU_OB_VF_ACTIVE | vfunc_no); + } + + /* + * Make sure ATU enable takes effect before any subsequent config + * and I/O accesses. + */ + for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { + val = pcie_zf_readl_ob_unroll(ep, index, + PCIE_ATU_UNR_REGION_CTRL2); + if (val & PCIE_ATU_ENABLE) + return; + + mdelay((u32)LINK_WAIT_IATU); + } + DH_LOG_ERR(MODULE_MPF, "Outbound iATU is not being enabled\r\n"); +} + +static int pcie_dpu_ep_outbound_atu(struct pcie_dpu_ep *ep, u8 func_no, + u8 vfunc_no, phys_addr_t dpu_offset, + u64 host_addr, size_t size) +{ + u32 free_win = 0; + + free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows); + if (free_win >= ep->num_ob_windows) { + DH_LOG_ERR(MODULE_MPF, "No free outbound window\r\n"); + return -EINVAL; + } + + pcie_zf_prog_outbound_atu(ep, func_no, vfunc_no, free_win, + PCIE_ATU_TYPE_MEM, dpu_offset, host_addr, + size); + + set_bit(free_win, ep->ob_window_map); + ep->ob_src_addr[free_win] = dpu_offset; + + return 0; +} + +static void pcie_dpu_ep_reset_bar(struct pcie_dpu_ep *ep, u8 func_no, + u8 vfunc_no, enum pci_barno bar, int flags) +{ + int sriov_cap_offset = 0; + u32 reg = 0; + u32 func_offset = pcie_dpu_ep_func_select( + func_no & PCIE_DPU_EP_GET_PF_NO, vfunc_no); + + if (isPF(func_no)) { + reg = func_offset + (u32)(PCI_BASE_ADDRESS_0) + + (u32)(PCIE_NEXT_BAR_OFFSET * bar); + pcie_zf_dbi_ro_wr_en(ep); + // pcie_zf_writel_dbi2(ep, reg, 0x0); + pcie_zf_writel_dbi(ep, reg, 0xc); + // pcie_zf_writel_dbi2(ep, reg + PCIE_NEXT_BAR_OFFSET, 0x0); + // pcie_zf_writel_dbi(ep, reg + PCIE_NEXT_BAR_OFFSET, 0x0); + pcie_zf_dbi_ro_wr_dis(ep); + } else { + sriov_cap_offset = pcie_dpu_ep_find_extcap( + ep, func_offset, PCI_EXT_CAP_ID_SRIOV, 0); + if (sriov_cap_offset < 0) { + DH_LOG_ERR(MODULE_MPF, "find_extcap failed!!\n"); + } + + reg = func_offset + (u32)sriov_cap_offset + + (u32)(PCIE_SRIOV_ECAP_BAR0_OFFSET) + + (u32)(bar * PCIE_NEXT_BAR_OFFSET); + pcie_zf_writel_dbi(ep, reg, 0xc); + pcie_zf_writel_dbi(ep, reg + PCIE_NEXT_BAR_OFFSET, 0x0); + + // reg = func_offset + sriov_cap_offset; + // pcie_dpu_ep_sriov_disable(ep, reg); + // pcie_zf_writel_dbi2(ep, reg + PCIE_SRIOV_ECAP_BAR0_OFFSET + bar * PCIE_NEXT_BAR_OFFSET, 0x0); + // pcie_zf_writel_dbi2(ep, reg + PCIE_SRIOV_ECAP_BAR0_OFFSET + (bar + 1) * PCIE_NEXT_BAR_OFFSET, 0x0); + // pcie_dpu_ep_sriov_enable(ep, reg); + } +} + +void pcie_zf_disable_atu(struct pcie_dpu_ep *ep, int index, + enum pcie_dpu_region_type type) +{ + u32 val = 0; + switch (type) { + case PCIE_DPU_REGION_INBOUND: + val = pcie_zf_readl_ib_unroll(ep, index, + PCIE_ATU_UNR_REGION_CTRL2); + val &= (u32)(~PCIE_ATU_ENABLE); + pcie_zf_writel_ib_unroll(ep, index, PCIE_ATU_UNR_REGION_CTRL2, + val); + break; + case PCIE_DPU_REGION_OUTBOUND: + val = pcie_zf_readl_ob_unroll(ep, index, + PCIE_ATU_UNR_REGION_CTRL2); + val &= (u32)(~PCIE_ATU_ENABLE); + pcie_zf_writel_ob_unroll(ep, index, PCIE_ATU_UNR_REGION_CTRL2, + val); + break; + default: + return; + } +} + +static int pcie_zf_find_index(struct pcie_dpu_ep *ep, phys_addr_t dpu_offset, + u32 *atu_index) +{ + u32 index = 0; + + for (index = 0; index < ep->num_ob_windows; index++) { + if (ep->ob_src_addr[index] != dpu_offset) + continue; + *atu_index = index; + return 0; + } + + return -EINVAL; +} +#endif + +// if ep is link up, return 1 +int is_pcie_ep_link(int ep_id) +{ + u32 val = 0; + void __iomem *csr_base_addr = 0; + + if (ep_id < 0 || ep_id >= PCIE_DPU_EP_NUM) { + DH_LOG_ERR(MODULE_MPF, "is_pcie_ep_link:err ep_id!\n"); + return -1; + } + + csr_base_addr = zf_ep->mpf_vaddr + + PCIE_DPU_MPF_CSR_ADDR(PCIE_DPU_EP_CSR_SIZE * ep_id); + pcie_zf_read(csr_base_addr + PCIE_DPU_EP_CSR_LTSSM_ADDR, 4, &val); + if (LTSSM_EN_VAL != val) { + DH_LOG_ERR(MODULE_MPF, "read off:0x150 val:0x%x\n", val); + } + + return (LTSSM_EN_VAL == val); +} +EXPORT_SYMBOL_GPL(is_pcie_ep_link); + +void ep_power_reset(int ep_id) +{ + u64 csr_base_addr = 0; + + csr_base_addr = zf_ep->mpf_paddr + + PCIE_DPU_MPF_CSR_ADDR(PCIE_DPU_EP_CSR_SIZE * ep_id); + cfg_phy_rmw(csr_base_addr + PCIE_DPU_EP_CSR_PRST_ADDR, 0x0, 0x2); + cfg_phy_rmw(csr_base_addr + PCIE_DPU_EP_CSR_PRST_ADDR, 0x2, 0x2); +} +EXPORT_SYMBOL_GPL(ep_power_reset); + +int ep_virtio_module_set(int ep_id, int pf_idx, int en) +{ + u64 csr_base_addr = 0; + + csr_base_addr = zf_ep->mpf_paddr + + PCIE_DPU_MPF_CSR_ADDR(PCIE_DPU_EP_CSR_SIZE * ep_id); + if ((0 != en) & (1 != en)) { + DH_LOG_ERR(MODULE_MPF, "err module!\n"); + return -EINVAL; + } + cfg_phy_rmw(csr_base_addr + PCIE_DPU_EP_CSR_VIRT_ADDR, en << pf_idx, + 0x1 << pf_idx); + return 0; +} +EXPORT_SYMBOL_GPL(ep_virtio_module_set); + +static phys_addr_t ob_addr_set(int ep_id, phys_addr_t phys_addr) +{ + phys_addr_t rel_addr; + u64 addr_mask = 0xffff; + + rel_addr = (phys_addr & addr_mask) | ep_id << 16 | + ((phys_addr & ~addr_mask) << EP_ID_LEN); + return rel_addr; +} + +int pcie_zte_epc_ob_read(struct pci_epc *epc, phys_addr_t phys_addr, + unsigned int size, unsigned int *val) +{ + void __iomem *vaddr = NULL; + struct pcie_dpu_ep *dpu_dev = NULL; + + if (!epc) { + DH_LOG_ERR(MODULE_MPF, "epc is NULL!\n"); + return -ENOMEM; + } + + if (phys_addr < zf_ep->vsock_paddr || + phys_addr >= zf_ep->vsock_paddr + zf_ep->ob_size) { + DH_LOG_ERR(MODULE_MPF, "err:phys_addr out of range!\n"); + return -ENOMEM; + } + + dpu_dev = epc_get_drvdata(epc); + vaddr = zf_ep->vsock_vaddr + + ob_addr_set(dpu_dev->ep_id + 5, phys_addr - zf_ep->vsock_paddr); + + return pcie_zf_read(vaddr, size, val); +} +EXPORT_SYMBOL_GPL(pcie_zte_epc_ob_read); + +int pcie_zte_epc_ob_write(struct pci_epc *epc, phys_addr_t phys_addr, int size, + int val) +{ + void __iomem *vaddr = NULL; + struct pcie_dpu_ep *dpu_dev = NULL; + + if (!epc) { + DH_LOG_ERR(MODULE_MPF, "epc is NULL\n"); + return -ENOMEM; + } + + if (phys_addr < zf_ep->vsock_paddr || + phys_addr >= zf_ep->vsock_paddr + zf_ep->ob_size) { + DH_LOG_ERR(MODULE_MPF, "err:phys_addr out of range\n"); + return -ENOMEM; + } + + dpu_dev = epc_get_drvdata(epc); + + vaddr = zf_ep->vsock_vaddr + + ob_addr_set(dpu_dev->ep_id + 5, phys_addr - zf_ep->vsock_paddr); + + return pcie_zf_write(vaddr, size, val); +} +EXPORT_SYMBOL_GPL(pcie_zte_epc_ob_write); + +/*################the ops of epc###################*/ +#if 1 +static int pcie_dpu_ep_write_header(struct pci_epc *epc, u8 func_no, + u8 vfunc_no, struct pci_epf_header *hdr) +{ + struct pcie_dpu_ep *ep = epc_get_drvdata(epc); + u32 func_offset = 0, sriov_offset = 0; + int sriov_cap_offset = 0; + int pf_no = func_no & PCIE_DPU_EP_GET_PF_NO; + + if (!is_pcie_ep_link(ep->ep_id)) { + DH_LOG_ERR(MODULE_MPF, "err: ep%d not link\n", ep->ep_id); + return -ENODEV; + } + + DH_LOG_INFO(MODULE_MPF, "func_no = 0x%x, vfunc_no = 0x%x\n", func_no, + vfunc_no); + + pcie_zf_dbi_ro_wr_en(ep); + if (isPF(func_no)) { + func_offset = pcie_dpu_ep_func_select(func_no, 0); + pcie_zf_writew_dbi(ep, func_offset + PCI_VENDOR_ID, + hdr->vendorid); + pcie_zf_writew_dbi(ep, func_offset + PCI_DEVICE_ID, + hdr->deviceid); + pcie_zf_writel_dbi(ep, func_offset + PCI_CLASS_REVISION, + hdr->revid | hdr->progif_code << 8 | + hdr->subclass_code << 16 | + hdr->baseclass_code << 24); + pcie_zf_writew_dbi(ep, func_offset + PCI_SUBSYSTEM_VENDOR_ID, + hdr->subsys_vendor_id); + pcie_zf_writew_dbi(ep, func_offset + PCI_SUBSYSTEM_ID, + hdr->subsys_id); + } else { + func_offset = pcie_dpu_ep_func_select(pf_no, 0); + sriov_cap_offset = pcie_dpu_ep_find_extcap( + ep, func_offset, PCI_EXT_CAP_ID_SRIOV, 0); + if (sriov_cap_offset < 0) { + DH_LOG_ERR(MODULE_MPF, "find_extcap failed!!\n"); + return -ENODEV; + } + sriov_offset = func_offset + (u32)sriov_cap_offset; + pcie_zf_writew_dbi(ep, sriov_offset + PCIE_SRIOV_ECAP_DEVICE_ID, + hdr->deviceid); + } + pcie_zf_dbi_ro_wr_dis(ep); + + return 0; +} + +static int pcie_dpu_ep_set_pf_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + struct pci_epf_bar *epf_bar) +{ + int ret = 0; + struct pcie_dpu_ep *ep = epc_get_drvdata(epc); + enum pci_barno barno = epf_bar->barno; + size_t size = epf_bar->size; + int flags = epf_bar->flags; + enum pcie_dpu_as_type as_type; + u32 reg = 0; + u32 func_offset = 0; + u64 dpu_addr = 0; + + if (!is_pcie_ep_link(ep->ep_id)) { + DH_LOG_ERR(MODULE_MPF, "err: ep%d not link\n", ep->ep_id); + return -ENODEV; + } + + dpu_addr = epf_bar->phys_addr | ZF_PREFIX_ADDR; + + if (!(flags & PCI_BASE_ADDRESS_SPACE)) + as_type = PCIE_DPU_AS_MEM; + else + as_type = PCIE_DPU_AS_IO; + + if (barno != BAR_4) { + ret = pcie_dpu_ep_inbound_atu(ep, func_no, barno, dpu_addr, + as_type); + if (ret) + return ret; + } else { + return 0; + } + + func_offset = pcie_dpu_ep_func_select(func_no, vfunc_no); + + if (barno != BAR_ROM) { + reg = PCI_BASE_ADDRESS_0 + (4 * barno) + func_offset; + } else { + reg = PCI_ROM_ADDRESS + func_offset; + } + + if (size) { + pcie_zf_dbi_ro_wr_en(ep); + + pcie_zf_writel_dbi2(ep, reg, 1); + pcie_zf_writel_dbi2(ep, reg, lower_32_bits(size - 1)); + pcie_zf_writel_dbi(ep, reg, flags | BIT(3)); + if (barno != BAR_ROM) { + pcie_zf_writel_dbi2(ep, reg + PCIE_NEXT_BAR_OFFSET, + upper_32_bits(size - 1)); + pcie_zf_writel_dbi(ep, reg + PCIE_NEXT_BAR_OFFSET, 0); + } + pcie_zf_dbi_ro_wr_dis(ep); + } + + return 0; +} + +static int pcie_dpu_ep_set_vf_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + struct pci_epf_bar *epf_bar) +{ + int ret = 0; + struct pcie_dpu_ep *ep = epc_get_drvdata(epc); + enum pci_barno barno = epf_bar->barno; + int flags = epf_bar->flags; + u32 reg = 0; + u32 func_offset = 0; + int sriov_cap_offset = 0; + u32 pf_func_no = func_no & PCIE_DPU_EP_GET_PF_NO; + u64 dpu_addr = epf_bar->phys_addr | ZF_PREFIX_ADDR; + + if (!is_pcie_ep_link(ep->ep_id)) { + DH_LOG_ERR(MODULE_MPF, "err: ep%d not link\n", ep->ep_id); + return -ENODEV; + } + + if (flags & PCI_BASE_ADDRESS_SPACE) { + DH_LOG_ERR(MODULE_MPF, "error:vf bar must be mem\n"); + return -EINVAL; + } + + DH_LOG_DEBUG(MODULE_MPF, "pf%x vf%x bar->flags:%d\n", pf_func_no, + vfunc_no, flags); + + if (barno != BAR_4) { + ret = pcie_dpu_ep_inbound_atu(ep, func_no, barno, dpu_addr, + PCIE_DPU_AS_MEM); + if (ret) + return ret; + } else { + return 0; + } + + func_offset = pcie_dpu_ep_func_select(pf_func_no, 0); + sriov_cap_offset = pcie_dpu_ep_find_extcap(ep, func_offset, + PCI_EXT_CAP_ID_SRIOV, 0); + if (sriov_cap_offset < 0) { + DH_LOG_ERR(MODULE_MPF, "find_extcap failed!!\n"); + return -ENXIO; + } + + reg = func_offset + sriov_cap_offset; + pcie_dpu_ep_sriov_disable(ep, reg); + if (epf_bar->size) { + pcie_zf_writel_dbi2(ep, + reg + PCIE_SRIOV_ECAP_BAR0_OFFSET + + barno * PCIE_NEXT_BAR_OFFSET, + 1); + pcie_zf_writel_dbi2(ep, + reg + PCIE_SRIOV_ECAP_BAR0_OFFSET + + barno * PCIE_NEXT_BAR_OFFSET, + lower_32_bits(epf_bar->size - 1)); + pcie_zf_writel_dbi2(ep, + reg + PCIE_SRIOV_ECAP_BAR0_OFFSET + + (barno + 1) * PCIE_NEXT_BAR_OFFSET, + upper_32_bits(epf_bar->size - 1)); + } + pcie_zf_dbi_ro_wr_en(ep); + pcie_zf_writel_dbi(ep, + reg + PCIE_SRIOV_ECAP_BAR0_OFFSET + + barno * PCIE_NEXT_BAR_OFFSET, + epf_bar->flags | BIT(3)); + pcie_zf_dbi_ro_wr_dis(ep); + + pcie_dpu_ep_sriov_enable(ep, reg); + + return 0; +} + +static int pcie_dpu_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + struct pci_epf_bar *epf_bar) +{ + DH_LOG_DEBUG(MODULE_MPF, "func:0x%x vfunc:0x%x\n", func_no, vfunc_no); + if (isPF(func_no)) { + return pcie_dpu_ep_set_pf_bar(epc, func_no, vfunc_no, epf_bar); + } else if (vfunc_no == 0) { + return pcie_dpu_ep_set_vf_bar(epc, func_no, vfunc_no, epf_bar); + } + + return 0; +} + +static void pcie_dpu_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + bool clear_vf, struct pci_epf_bar *epf_bar) +{ + struct pcie_dpu_ep *ep = epc_get_drvdata(epc); + u32 vf_bar_off = 0; + enum pci_barno barno = epf_bar->barno; + int atu_index = 0; + u32 bar_to_atu_index = 0; + + if (!is_pcie_ep_link(ep->ep_id)) { + DH_LOG_ERR(MODULE_MPF, "err: ep%d not link\n", ep->ep_id); + return; + } + + if ((func_no & PCIE_DPU_EP_GET_PF_NO) >= PCIE_DPU_PF_NUMS) { + DH_LOG_ERR(MODULE_MPF, "func_no is err!\n"); + return; + } + + if ((barno == BAR_4) || (!isPF(func_no) && !vfunc_no)) { + return; + } + + if (!isPF(func_no)) { + vf_bar_off = PCIE_VF_BARS_OFF; + } + + bar_to_atu_index = (u32)barno + vf_bar_off; + if (bar_to_atu_index < (PCI_STD_NUM_BARS * 2 + 1)) { + atu_index = ep->bar_to_atu[func_no & PCIE_DPU_EP_GET_PF_NO] + [bar_to_atu_index]; + } else { + DH_LOG_ERR(MODULE_MPF, "error bar_to_atu index %d\r\n", + bar_to_atu_index); + return; + } + + pcie_dpu_ep_reset_bar(ep, func_no, vfunc_no, barno, epf_bar->flags); + + pcie_zf_disable_atu(ep, atu_index, PCIE_DPU_REGION_INBOUND); + spin_lock(&ep->ib_window_lock); + clear_bit(atu_index, ep->ib_window_map); + spin_unlock(&ep->ib_window_lock); +} + +static int pcie_dpu_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + phys_addr_t dpu_offset, u64 host_addr, + size_t size) +{ + int ret = 0; + struct pcie_dpu_ep *ep = epc_get_drvdata(epc); + + if (!is_pcie_ep_link(ep->ep_id)) { + DH_LOG_ERR(MODULE_MPF, "err: ep%d not link\n", ep->ep_id); + return -ENODEV; + } + + ret = pcie_dpu_ep_outbound_atu(ep, func_no, vfunc_no, + dpu_offset - zf_ep->vsock_paddr, + host_addr, size); + if (ret) { + DH_LOG_ERR(MODULE_MPF, "Failed to enable address\r\n"); + return ret; + } + + return 0; +} + +static void pcie_dpu_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + phys_addr_t dpu_offset) +{ + int ret = 0; + u32 atu_index = 0; + struct pcie_dpu_ep *ep = epc_get_drvdata(epc); + + if (!is_pcie_ep_link(ep->ep_id)) { + DH_LOG_ERR(MODULE_MPF, "err: ep%d not link\n", ep->ep_id); + return; + } + + ret = pcie_zf_find_index(ep, dpu_offset - zf_ep->vsock_paddr, + &atu_index); + if (ret < 0) + return; + + pcie_zf_disable_atu(ep, atu_index, PCIE_DPU_REGION_OUTBOUND); + clear_bit(atu_index, ep->ob_window_map); +} + +static int pcie_dpu_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + u8 interrupts) +{ + DH_LOG_ERR(MODULE_MPF, "error:pcie_dpu_ep can't set msi#\n"); + return -ESRCH; +} + +static int pcie_dpu_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no) +{ + DH_LOG_ERR(MODULE_MPF, "error:pcie_dpu_ep can't get msi#\n"); + return -ESRCH; +} + +static int pcie_dpu_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + u16 interrupts, enum pci_barno bir, + u32 bar_offset) +{ + /* + * MSIX tables are fixed in BAR4 (mapped to PCIe IP), other configs + * will make MSIX unable to function. + */ + dev_warn(&epc->dev, "MSIX config is not supported by ZF epc\n"); + + return 0; +} + +int pcie_dpu_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) +{ + struct pcie_dpu_ep *dpu_dev = epc_get_drvdata(epc); + u32 val, reg; + u32 func_offset = 0; + struct pcie_dpu_ep_func *ep_func; + + if (!is_pcie_ep_link(dpu_dev->ep_id)) { + DH_LOG_ERR(MODULE_MPF, "err: ep%d not link\n", dpu_dev->ep_id); + return -ENODEV; + } + + ep_func = pcie_dpu_ep_get_func_from_ep(dpu_dev, func_no, vfunc_no); + if (!ep_func || !ep_func->msix_cap) + return -EINVAL; + + func_offset = pcie_dpu_ep_func_select(func_no, vfunc_no); + + reg = func_offset + ep_func->msix_cap + PCI_MSIX_FLAGS; + val = pcie_zf_readw_dbi(dpu_dev, reg); + if (!(val & PCI_MSIX_FLAGS_ENABLE)) + return -EINVAL; + + val &= PCI_MSIX_FLAGS_QSIZE; + + return val; +} + +int pcie_dpu_ep_raise_legacy_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no) +{ + DH_LOG_ERR(MODULE_MPF, "EP cannot trigger legacy IRQs\r\n"); + + return -EINVAL; +} + +int pcie_dpu_ep_raise_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + u8 interrupt_num) +{ + DH_LOG_ERR(MODULE_MPF, "EP cannot trigger msi IRQs\r\n"); + + return -EINVAL; +} + +int pcie_dpu_ep_raise_msix_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + u8 interrupt_num) +{ + struct pcie_dpu_ep *dpu_dev = epc_get_drvdata(epc); + u32 msg_data = 0; + + if (!is_pcie_ep_link(dpu_dev->ep_id)) { + DH_LOG_ERR(MODULE_MPF, "err: ep%d not link\n", dpu_dev->ep_id); + return -ENODEV; + } + + // DH_LOG_INFO(MODULE_MPF, "func_no = 0x%x, vfunc_no = 0x%x, interrupt_num = 0x%x\n", func_no, vfunc_no, interrupt_num); + + if (isPF(func_no)) { + msg_data = (func_no << PCIE_MSIX_DOORBELL_PF_SHIFT) | + (interrupt_num); + } else { + msg_data = ((func_no & PCIE_DPU_EP_GET_PF_NO) + << PCIE_MSIX_DOORBELL_PF_SHIFT) | + MSIX_DOORBELL_VF_ACTIVE | + (vfunc_no << PCIE_MSIX_DOORBELL_VF_SHIFT) | + (interrupt_num); + } + + pcie_zf_writel_dbi(dpu_dev, PCIE_MSIX_DOORBELL, msg_data); + + return 0; +} + +static int pcie_dpu_ep_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + enum pci_epc_irq_type type, u16 interrupt_num) +{ + switch (type) { + case PCI_EPC_IRQ_LEGACY: + return pcie_dpu_ep_raise_legacy_irq(epc, func_no, vfunc_no); + case PCI_EPC_IRQ_MSI: + return pcie_dpu_ep_raise_msi_irq(epc, func_no, vfunc_no, + interrupt_num - 1); + case PCI_EPC_IRQ_MSIX: + return pcie_dpu_ep_raise_msix_irq(epc, func_no, vfunc_no, + interrupt_num - 1); + default: + DH_LOG_ERR(MODULE_MPF, "UNKNOWN IRQ type\r\n"); + } + return 0; +} + +static int pcie_dpu_ep_get_max_vfs(struct pci_epc *epc, u8 func_no) +{ + struct pcie_dpu_ep *dpu_dev = NULL; + u32 vf_total_num = 0; + + if (epc == NULL) { + DH_LOG_ERR(MODULE_MPF, "epc is NULL!!!\n"); + return -EINVAL; + } + dpu_dev = epc_get_drvdata(epc); + + if (!is_pcie_ep_link(dpu_dev->ep_id)) { + DH_LOG_ERR(MODULE_MPF, "err: ep%d not link\n", dpu_dev->ep_id); + return -ENODEV; + } + + vf_total_num = func_no & PCIE_DPU_EP_GET_PF_NO; + if (vf_total_num >= PCIE_DPU_PF_NUMS) { + DH_LOG_ERR(MODULE_MPF, "error vf_total_num=%d\n", vf_total_num); + return -EINVAL; + } + + DH_LOG_INFO(MODULE_MPF, "get vf max_num:%d\n", + dpu_dev->vf_total_num[vf_total_num]); + return dpu_dev->vf_total_num[vf_total_num]; +} + +static void pcie_dpu_ep_stop(struct pci_epc *epc) +{ + DH_LOG_INFO(MODULE_MPF, "%s\n", __func__); + return; +} + +static int pcie_dpu_ep_start(struct pci_epc *epc) +{ + DH_LOG_INFO(MODULE_MPF, "%s\n", __func__); + return 0; +} + +static const struct pci_epc_features pcie_zf_epc_features = { + .linkup_notifier = false, + .msi_capable = false, + .msix_capable = true, + .reserved_bar = PCIE_DPU_EP_REAERVED_BAR, + .bar_fixed_64bit = PCIE_DPU_EP_BAR_FIXED_64BIT, + .align = PCIE_DPU_EP_ALIGN, +}; + +static const struct pci_epc_features * +pcie_dpu_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no) +{ + return &pcie_zf_epc_features; +} + +static int pci_dpu_ep_get_port_id(struct pci_epc *epc, + enum pci_epc_port_id_type *id) +{ + struct pcie_dpu_ep *dpu_dev = epc_get_drvdata(epc); + + *id = dpu_dev->ep_id + 5; + return 0; +} + +static int pci_dpu_ep_calc_pfns(struct pci_epc *epc, phys_addr_t phys, + size_t n_pfns, unsigned long *pfn) +{ + enum pci_epc_port_id_type port = 0; + phys_addr_t offset = 0, base = zf_ep->vsock_paddr; + size_t i = 0; + + if (phys < base || + phys - base + (n_pfns << PAGE_SHIFT) > zf_ep->ob_size) + return -EINVAL; + + pci_dpu_ep_get_port_id(epc, &port); + + for (i = 0; i < n_pfns; i++) { + offset = phys - base + (i << PAGE_SHIFT); + pfn[i] = (base + EP_DPU_PA(offset, port)) >> PAGE_SHIFT; + } + return 0; +} + +#endif + +static const struct pci_epc_ops epc_ops = { + .write_header = pcie_dpu_ep_write_header, + .set_bar = pcie_dpu_ep_set_bar, + .clear_bar = pcie_dpu_ep_clear_bar, + .map_addr = pcie_dpu_ep_map_addr, + .unmap_addr = pcie_dpu_ep_unmap_addr, + .set_msi = pcie_dpu_ep_set_msi, + .get_msi = pcie_dpu_ep_get_msi, + .set_msix = pcie_dpu_ep_set_msix, + .get_msix = pcie_dpu_ep_get_msix, + .raise_irq = pcie_dpu_ep_raise_irq, + .get_max_vfs = pcie_dpu_ep_get_max_vfs, + .start = pcie_dpu_ep_start, + .stop = pcie_dpu_ep_stop, + .get_features = pcie_dpu_ep_get_features, + .get_port_id = pci_dpu_ep_get_port_id, + .calc_pfns = pci_dpu_ep_calc_pfns, + .get_xdma_chan = zf_pcie_get_hdma_chan, +}; + +static void dpu_ep_default_set(int ep_id) +{ + u32 pf_idx = 0; + u32 func_offset = 0; + int sriov_cap_offset = 0; + u32 reg = 0; + struct pcie_dpu_ep *ep = zf_ep->dpu_ep_array[ep_id]; + + pcie_zf_dbi_ro_wr_en(ep); + for (pf_idx = 0; pf_idx < PCIE_DPU_PF_NUMS; ++pf_idx) { + if (!(ep->permissible_pf_map & (0x1 << pf_idx))) { + continue; + } + pcie_zf_writel_dbi(ep, pf_idx * PCIE_DPU_EP_FUNC_CFG_SIZE, + PCIE_DPU_PF_INITIAL_ID); + pcie_zf_writel_dbi(ep, + pf_idx * PCIE_DPU_EP_FUNC_CFG_SIZE + + PCI_CLASS_REVISION, + PCIE_DPU_PF_DEFAUTL_CLASSCODE); + pcie_zf_writel_dbi2(ep, + pf_idx * PCIE_DPU_EP_FUNC_CFG_SIZE + + PCI_ROM_ADDRESS, + ZF_DISABLE); + + pcie_zf_writel_dbi2(ep, + pf_idx * PCIE_DPU_EP_FUNC_CFG_SIZE + + PCI_BASE_ADDRESS_4, + ZF_ENABLE); + pcie_zf_writel_dbi2(ep, + pf_idx * PCIE_DPU_EP_FUNC_CFG_SIZE + + PCI_BASE_ADDRESS_4, + lower_32_bits(BAR4_DEFAULT_SIZE - 1)); + pcie_zf_writel_dbi2(ep, + pf_idx * PCIE_DPU_EP_FUNC_CFG_SIZE + + PCI_BASE_ADDRESS_4 + + PCIE_NEXT_BAR_OFFSET, + upper_32_bits(BAR4_DEFAULT_SIZE - 1)); + pcie_zf_writel_dbi(ep, + pf_idx * PCIE_DPU_EP_FUNC_CFG_SIZE + + PCI_BASE_ADDRESS_4, + PCIE_DEFAULT_BAR_FLAG); + + func_offset = pcie_dpu_ep_func_select(pf_idx, 0); + sriov_cap_offset = pcie_dpu_ep_find_extcap( + ep, func_offset, PCI_EXT_CAP_ID_SRIOV, 0); + if (sriov_cap_offset < 0) { + DH_LOG_ERR(MODULE_MPF, "find_extcap failed!!\n"); + return; + } + + reg = func_offset + sriov_cap_offset; + pcie_dpu_ep_sriov_disable(ep, reg); + pcie_zf_writel_dbi(ep, reg + PCIE_SRIOV_ECAP_BAR4_OFFSET, + PCIE_DEFAULT_BAR_FLAG); + pcie_zf_writel_dbi2(ep, reg + PCIE_SRIOV_ECAP_BAR4_OFFSET, 0x1); + pcie_zf_writel_dbi2(ep, reg + PCIE_SRIOV_ECAP_BAR4_OFFSET, + lower_32_bits(BAR4_DEFAULT_SIZE - 1)); + pcie_zf_writel_dbi2(ep, + reg + PCIE_SRIOV_ECAP_BAR4_OFFSET + + PCIE_NEXT_BAR_OFFSET, + upper_32_bits(BAR4_DEFAULT_SIZE - 1)); + pcie_dpu_ep_sriov_enable(ep, reg); + } + pcie_zf_dbi_ro_wr_dis(ep); +} + +static int dpu_ep_iatu_init(struct device *dev, int id) +{ + int iatu_no = 0; + + zf_ep->dpu_ep_array[id]->num_ib_windows = PCIE_DPU_IATU_NUM; + zf_ep->dpu_ep_array[id]->num_ob_windows = PCIE_DPU_IATU_NUM; + zf_ep->dpu_ep_array[id]->ib_window_map = devm_kcalloc( + dev, BITS_TO_LONGS(zf_ep->dpu_ep_array[id]->num_ib_windows), + sizeof(long), GFP_KERNEL); + if (!zf_ep->dpu_ep_array[id]->ib_window_map) { + DH_LOG_ERR(MODULE_MPF, "get ib_map err\n"); + return -ENOMEM; + } + + zf_ep->dpu_ep_array[id]->ob_window_map = devm_kcalloc( + dev, BITS_TO_LONGS(zf_ep->dpu_ep_array[id]->num_ob_windows), + sizeof(long), GFP_KERNEL); + if (!zf_ep->dpu_ep_array[id]->ob_window_map) { + DH_LOG_ERR(MODULE_MPF, "get ob_map err\n"); + goto free_ib_map; + } + + zf_ep->dpu_ep_array[id]->ob_src_addr = + devm_kcalloc(dev, zf_ep->dpu_ep_array[id]->num_ob_windows, + sizeof(phys_addr_t), GFP_KERNEL); + if (!zf_ep->dpu_ep_array[id]->ob_src_addr) { + DH_LOG_ERR(MODULE_MPF, "get ob_src_addr err\n"); + goto free_ob_map; + } + + // 确定哪些inbound已经被使用 + for (iatu_no = 0; iatu_no < PCIE_DPU_IATU_NUM; iatu_no++) { + if (zf_atu_is_used(zf_ep->dpu_ep_array[id], iatu_no)) { + set_bit(iatu_no, + zf_ep->dpu_ep_array[id]->ib_window_map); + } + } + + spin_lock_init(&zf_ep->dpu_ep_array[id]->ib_window_lock); + + return 0; + +free_ob_map: + devm_kfree(dev, zf_ep->dpu_ep_array[id]->ob_window_map); +free_ib_map: + devm_kfree(dev, zf_ep->dpu_ep_array[id]->ib_window_map); + return -ENOMEM; +} + +static int dpu_ep_func_list_init(struct device *dev, int id) +{ + u8 func_no = 0, vfunc_no = 0; + u8 vf_total_num = 0; + u32 func_offset = 0; + struct pcie_dpu_ep_func *ep_func = NULL; + + INIT_LIST_HEAD(&zf_ep->dpu_ep_array[id]->func_list); + for (func_no = 0; func_no < PCIE_DPU_PF_NUMS; ++func_no) { + if (!(zf_ep->dpu_ep_array[id]->permissible_pf_map & + (0x1 << func_no))) { + continue; + } + ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL); + if (!ep_func) { + return -ENOMEM; + } + + ep_func->func_no = func_no; + func_offset = pcie_dpu_ep_func_select(func_no, vfunc_no); + ep_func->msix_cap = pcie_dpu_ep_find_cap( + zf_ep->dpu_ep_array[id], func_offset, PCI_CAP_ID_MSIX); + + list_add_tail(&ep_func->list, + &zf_ep->dpu_ep_array[id]->func_list); + + vf_total_num = + (u8)zf_ep->dpu_ep_array[id]->vf_total_num[func_no]; + for (vfunc_no = 0; vfunc_no < vf_total_num; ++vfunc_no) { + ep_func = + devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL); + if (!ep_func) { + return -ENOMEM; + } + + ep_func->func_no = PCIE_DPU_EP_FUNC_IS_VF | func_no; + ep_func->vfunc_no = vfunc_no; + func_offset = pcie_dpu_ep_func_select(ep_func->func_no, + vfunc_no); + ep_func->msix_cap = pcie_dpu_ep_find_cap( + zf_ep->dpu_ep_array[id], func_offset, + PCI_CAP_ID_MSIX); + + list_add_tail(&ep_func->list, + &zf_ep->dpu_ep_array[id]->func_list); + } + } + + return 0; +} + +static int dpu_ep_vf_total_num_get(int id) +{ + int func_no = 0, func_offset = 0, sriov_cap_offset = 0, + vf_total_num_addr = 0; + + for (func_no = 0; func_no < PCIE_DPU_PF_NUMS; ++func_no) { + func_offset = pcie_dpu_ep_func_select(func_no, 0); + sriov_cap_offset = pcie_dpu_ep_find_extcap( + zf_ep->dpu_ep_array[id], func_offset, + PCI_EXT_CAP_ID_SRIOV, 0); + if (sriov_cap_offset < 0) { + DH_LOG_ERR(MODULE_MPF, "find_extcap failed!!\n"); + return -ENXIO; + } + vf_total_num_addr = + func_offset + sriov_cap_offset + PCIE_SRIOV_TOTAL_VFS; + zf_ep->dpu_ep_array[id]->vf_total_num[func_no] = + pcie_zf_readw_dbi(zf_ep->dpu_ep_array[id], + vf_total_num_addr); + } + + return 0; +} + +static int dpu_ep_get_permissible_pf(int id) +{ + int func_no = 0, func_offset = 0; + u32 func_id = 0; + + for (func_no = 0; func_no < PCIE_DPU_PF_NUMS; ++func_no) { + func_offset = pcie_dpu_ep_func_select(func_no, 0); + func_id = + pcie_zf_readl_dbi(zf_ep->dpu_ep_array[id], func_offset); + if (PCIE_DPU_PF_DEFAUTL_ID1 == func_id || + PCIE_DPU_PF_DEFAUTL_ID2 == func_id || + PCIE_DPU_PF_DEFAUTL_ID3 == func_id || + PCIE_DPU_PF_DEFAUTL_ID4 == func_id) { + zf_ep->dpu_ep_array[id]->permissible_pf_map |= + (0x1 << func_no); + } + } + + return 0; +} + +static void epc_dev_release(struct device *dev) +{ +} + +static int pci_zte_epc_dev_init_one(struct pci_dev *pdev, int id) +{ + struct pci_epc *epc = NULL; + struct device *dev = NULL; + struct device_node *np = NULL; + struct platform_device *zf_pdev = NULL; + struct platform_device *zf_pdev_dma = NULL; + char class_name[PCIE_DPU_EP_CLASS_NAME] = { 0 }; + int node = 0, i = 0; + int ret = -ENOMEM; + + ret = snprintf(class_name, sizeof(class_name) - 1, "zf_epc_class%d", + id); + if ((ret < 0) || (ret >= PCIE_DPU_EP_CLASS_NAME)) { + DH_LOG_ERR(MODULE_MPF, "get ep name failed\n"); + return -ENOMEM; + } + + ret = -ENOMEM; + + zf_pdev = platform_device_register_simple(class_name, -1, NULL, 0); + if (!zf_pdev) { + DH_LOG_ERR(MODULE_MPF, + "Error platform_device_register zf_pdev failed\n"); + return ret; + } + + ret = snprintf(class_name, sizeof(class_name) - 1, "zf_epc_dma_rd%d", + id); + if (ret < 0 || ret > sizeof(class_name)) { + platform_device_unregister(zf_pdev); + return ret; + } + + zf_pdev_dma = platform_device_register_simple(class_name, -1, NULL, 0); + if (!zf_pdev_dma) { + DH_LOG_ERR( + MODULE_MPF, + "Error platform_device_register zf_pdev_dma failed\n"); + platform_device_unregister(zf_pdev); + return ret; + } + + zf_pdev->dev.driver = pdev->dev.driver; + zf_pdev_dma->dev.driver = pdev->dev.driver; + + dev = &zf_pdev->dev; + np = dev->of_node; + epc = pci_epc_create(dev, &epc_ops); + if (IS_ERR_OR_NULL(epc)) { + DH_LOG_ERR(MODULE_MPF, "Failed %ld to create epc device\n", + PTR_ERR(epc)); + ret = -EPERM; + goto free_pdev; + } + + epc->dev.release = epc_dev_release; + epc->max_functions = PCIE_DPU_PF_NUMS; + epc->is_dpu_epc = 1; + + ret = pci_epc_mem_init(epc, zf_ep->vsock_paddr, zf_ep->ob_size, + PAGE_SIZE); + if (ret < 0) { + DH_LOG_ERR(MODULE_MPF, + "ep%d failed to initialize the memory space\n", id); + goto free_epc; + } + + node = dev_to_node(dev); // 多核 + if (node == NUMA_NO_NODE) + set_dev_node(dev, first_memory_node); + + /*##################zf_dev init##################*/ + zf_ep->dpu_ep_array[id] = + kzalloc_node(sizeof(struct pcie_dpu_ep), GFP_KERNEL, node); + if (!zf_ep->dpu_ep_array[id]) { + DH_LOG_ERR(MODULE_MPF, "Error kzalloc node\n"); + ret = -ENOMEM; + goto free_epc_mem; + } + + zf_ep->dpu_ep_array[id]->ep_id = id; + zf_ep->dpu_ep_array[id]->epc = epc; + zf_ep->dpu_ep_array[id]->dbi_base = + zf_ep->dbi_vaddr + PCIE_DPU_EP_DBI_SIZE * id; + zf_ep->dpu_ep_array[id]->atu_base = + zf_ep->dpu_ep_array[id]->dbi_base + DEFAULT_DBI_ATU_OFFSET; + zf_ep->dpu_ep_array[id]->zf_pdev = zf_pdev; + zf_ep->dpu_ep_array[id]->zf_pdev_dma = zf_pdev_dma; + + dpu_ep_default_set(id); + + epc_set_drvdata(epc, zf_ep->dpu_ep_array[id]); + + dpu_ep_get_permissible_pf(id); + for (i = 0; i < PCIE_DPU_PF_NUMS; i++) { + if (!(zf_ep->dpu_ep_array[id]->permissible_pf_map & + (0x1 << i))) { + DH_LOG_INFO(MODULE_MPF, "ep%d pf%d can't uesd\n", id, + i); + set_bit(i, &epc->function_num_map); + } + } + + ret |= dpu_ep_iatu_init(&pdev->dev, id); + ret |= dpu_ep_vf_total_num_get(id); + ret |= dpu_ep_func_list_init(&pdev->dev, id); + + if (ret) + goto free_dpu_ep; + + return 0; + +free_dpu_ep: + kfree(zf_ep->dpu_ep_array[id]); +free_epc_mem: + pci_epc_mem_exit(epc); +free_epc: + pci_epc_destroy(epc); +free_pdev: + platform_device_unregister(zf_pdev); + platform_device_unregister(zf_pdev_dma); + return ret; +} + +static void pci_zte_epc_dev_free_one(struct pci_dev *pdev, int id) +{ + pcie_zf_dma_free(zf_ep->dpu_ep_array[id], pdev); + pci_epc_destroy(zf_ep->dpu_ep_array[id]->epc); + zf_ep->dpu_ep_array[id]->zf_pdev->dev.driver = NULL; + zf_ep->dpu_ep_array[id]->zf_pdev_dma->dev.driver = NULL; + platform_device_unregister(zf_ep->dpu_ep_array[id]->zf_pdev); + platform_device_unregister(zf_ep->dpu_ep_array[id]->zf_pdev_dma); + kfree(zf_ep->dpu_ep_array[id]); +} + +static void pci_zte_epc_dev_free(struct pci_dev *pdev) +{ + u8 ep_idx = 0; + for (ep_idx = 0; ep_idx < PCIE_DPU_EP_NUM; ep_idx++) { + pci_zte_epc_dev_free_one(pdev, ep_idx); + } +} + +static int zf_dev_map(struct pci_dev *pdev) +{ + zf_ep->dbi_vaddr = ioremap(zf_ep->dbi_paddr, + PCIE_DPU_EP_DBI_SIZE * PCIE_DPU_EP_NUM); + if (!zf_ep->dbi_vaddr) { + pci_release_mem_regions(pdev); + return -ENODEV; + } + + zf_ep->vsock_vaddr = + ioremap(zf_ep->vsock_paddr, pci_resource_len(pdev, BAR_0)); + if (!zf_ep->vsock_vaddr) { + iounmap(zf_ep->dbi_vaddr); + pci_release_mem_regions(pdev); + return -ENODEV; + } + + zf_ep->mpf_vaddr = + ioremap(zf_ep->mpf_paddr, pci_resource_len(pdev, BAR_0)); + if (!zf_ep->mpf_vaddr) { + iounmap(zf_ep->dbi_vaddr); + pci_release_mem_regions(pdev); + return -ENODEV; + } + + return 0; +} + +static void zf_dev_unmap(struct pci_dev *pdev) +{ + if (zf_ep->dbi_vaddr) + iounmap(zf_ep->dbi_vaddr); + if (zf_ep->vsock_vaddr) + iounmap(zf_ep->vsock_vaddr); + pci_release_mem_regions(pdev); +} + +int pcie_zte_zf_signal_epc_dev_init(uint32_t ep_idx) +{ + int ret = 0; + struct pci_dev *pdev = zf_ep->mpf_pdev; + + if (epc_init_flag[ep_idx]) { + DH_LOG_DEBUG(MODULE_MPF, "zf_mpf ep%d is already init\n", + ep_idx); + return ret; + } + + ret = pci_zte_epc_dev_init_one(pdev, ep_idx); + if (zf_ep->dpu_ep_array[ep_idx] == NULL) { + DH_LOG_ERR(MODULE_MPF, "pci_zte_epc_dev_init_one ep%d failed\n", + ep_idx); + return -ENODEV; + } + DH_LOG_INFO(MODULE_MPF, "pci_zte_epc_dev_init ep%d success!\n", ep_idx); + + ret = pcie_zf_dma_init(zf_ep->dpu_ep_array[ep_idx], pdev); + if (ret) { + DH_LOG_ERR(MODULE_MPF, "pcie_zf_dma_init failed\n"); + return ret; + } + epc_init_flag[ep_idx] = 1; + + return ret; +} + +static int pci_zte_zf_epc_dev_init(void) +{ + int ret = 0; + int ep_idx = 0; + + for (ep_idx = 0; ep_idx < PCIE_DPU_EP_NUM; ep_idx++) { + if (epc_init_flag[ep_idx]) { + DH_LOG_ERR(MODULE_MPF, "ep%d is already init\n", + ep_idx); + continue; + } + + if (!is_pcie_ep_link(ep_idx)) { + DH_LOG_ERR(MODULE_MPF, "ep%d is not link\n", ep_idx); + continue; + } + + pcie_zte_zf_signal_epc_dev_init(ep_idx); + } + + return ret; +} + +int pcie_zte_zf_epc_module_init(struct dh_core_dev *dh_dev, + const struct pci_device_id *id) +{ + int ret = -ENXIO; + struct pci_dev *vsock_pdev = NULL; + + DH_LOG_INFO(MODULE_MPF, "enter\n"); + + if (IS_ERR_OR_NULL(dh_dev) || IS_ERR_OR_NULL(id)) { + DH_LOG_ERR(MODULE_MPF, "dh_dev or id is NULL\n"); + return -EINVAL; + } + + dh_dev->zf_ep = (struct pcie_zf_ep *)kzalloc(sizeof(struct pcie_zf_ep), + GFP_KERNEL); + if (!dh_dev->zf_ep) { + DH_LOG_ERR(MODULE_MPF, "kzalloc zf_ep err\n"); + return -ENODEV; + } + zf_ep = dh_dev->zf_ep; + + dh_dev->zf_ep->dpu_ep_array = kzalloc( + PCIE_DPU_EP_NUM * sizeof(struct pcie_dpu_ep *), GFP_KERNEL); + if (!dh_dev->zf_ep->dpu_ep_array) { + DH_LOG_ERR(MODULE_MPF, "kzalloc dpu_ep_array err\n"); + ret = -ENODEV; + goto free_zf_ep; + } + + ret = pci_enable_sriov(dh_dev->pdev, 1); // 启用mpf的vf0 + if (ret) { + DH_LOG_ERR(MODULE_MPF, "Failed to enable SR-IOV: %d\n", ret); + goto free_dpu_ep_array; + } + + vsock_pdev = + pci_get_device(PCI_VENDOR_ID_ZTE, PCI_DID_DPUA_VSOCK_VF, NULL); + if (!vsock_pdev) { + vsock_pdev = pci_get_device(PCI_VENDOR_ID_ZTE, + PCI_DID_DPUB_VSOCK_VF, NULL); + if (!vsock_pdev) { + DH_LOG_ERR(MODULE_MPF, + "Failed to find vsock_vf_dev %d\n", ret); + goto disable_sriov; + } + } + + dh_dev->zf_ep->mpf_pdev = dh_dev->pdev; + dh_dev->zf_ep->dbi_paddr = pci_resource_start(dh_dev->pdev, BAR_2); + dh_dev->zf_ep->mpf_paddr = pci_resource_start(dh_dev->pdev, BAR_0); + dh_dev->zf_ep->vsock_paddr = pci_resource_start(vsock_pdev, BAR_0); + dh_dev->zf_ep->ob_size = pci_resource_len(vsock_pdev, BAR_0) >> + EP_ID_LEN; + dh_dev->zf_ep->dpu_ep_num = PCIE_DPU_EP_NUM; + + ret = zf_dev_map(dh_dev->pdev); + if (ret) { + DH_LOG_ERR(MODULE_MPF, "zf_dev_map err\n"); + goto disable_sriov; + } + + ret = pci_zte_zf_epc_dev_init(); + if (ret) { + goto unmap; + } + + if (pcie_zte_zf_cfg_file_init(dh_dev)) { + DH_LOG_ERR(MODULE_MPF, "pcie_zte_zf_cfg_file_init is err!\n"); + } + + DH_LOG_INFO(MODULE_MPF, "INFO:the EP0~3 pci_dev created successed!\n"); + return ret; +unmap: + zf_dev_unmap(dh_dev->pdev); +disable_sriov: + pci_disable_sriov(dh_dev->pdev); +free_dpu_ep_array: + kfree(dh_dev->zf_ep->dpu_ep_array); +free_zf_ep: + kfree(dh_dev->zf_ep); + return ret; +} + +void pcie_zte_zf_epc_free(struct dh_core_dev *dh_dev) +{ + //TODO:removed lose some step + pcie_zte_zf_cfg_file_exit(); + + if (IS_ERR_OR_NULL(dh_dev)) { + DH_LOG_ERR(MODULE_MPF, "dh_dev or id is NULL\n"); + return; + } + + pci_disable_sriov(dh_dev->pdev); + zf_dev_unmap(dh_dev->pdev); + pci_zte_epc_dev_free(dh_dev->pdev); + kfree(dh_dev->zf_ep->dpu_ep_array); + kfree(dh_dev->zf_ep); + DH_LOG_INFO(MODULE_MPF, "the EP0~3 pci_dev removed successed!\n"); +} diff --git a/drivers/net/ethernet/dinghai/zf_mpf/epc/pcie-zte-zf-epc.h b/drivers/net/ethernet/dinghai/zf_mpf/epc/pcie-zte-zf-epc.h new file mode 100644 index 000000000000..7c84a13cadf7 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/epc/pcie-zte-zf-epc.h @@ -0,0 +1,267 @@ +#ifndef __PCIE_ZTE_ZF_EPC_H +#define __PCIE_ZTE_ZF_EPC_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef _cplusplus +extern "C" { +#endif + +#define ZF_DISABLE 0x0 +#define ZF_ENABLE 0x1 + +// ZF INFO +#define PCI_VENDOR_ID_ZTE 0x1cf2 +#define PCI_DID_DPUA_VSOCK_VF 0x8038 +#define PCI_DID_DPUB_VSOCK_VF 0x8039 +#define PCIE_DPU_EP_CLASS_NAME 32 + +#define PCIE_DPU_EP_NUM 4 +#define EP_ID_LEN 4 +#define PCIE_DPU_PF_NUMS 8 +#define PCIE_DPU_IATU_NUM 41 +#define PCIE_VF_BARS_OFF 7 +#define PCIE_BAR0_ADDR_SET(off) \ + (((off)&0xFFFF) | (((off)&0xFFFFFFFFFFFF0000) << EP_ID_LEN)) + +#define PCIE_DPU_PF_INITIAL_ID 0x80371CF2 +#define PCIE_DPU_PF_DEFAUTL_ID1 0x10011af4 +#define PCIE_DPU_PF_DEFAUTL_ID2 0x80531cf2 +#define PCIE_DPU_PF_DEFAUTL_ID3 0x80371CF2 +#define PCIE_DPU_PF_DEFAUTL_ID4 0x80331cf2 +#define PCIE_DPU_PF_DEFAUTL_CLASSCODE 0x02000000 + +// dpu func_no&addr set +#define PCIE_DPU_EP_FUNC_IS_VF BIT(7) +#define PCIE_DPU_EP_GET_PF_NO 0x7f +#define DBI_VF_CFG_OFFSET_BIT 4 +#define VF_ACT_BIT BIT(3) +#define isPF(func_no) ((func_no & PCIE_DPU_EP_FUNC_IS_VF) ? 0 : 1) + +#define ZF_PREFIX_ADDR 0x9000000000000000 +#define DEFAULT_DBI_ATU_OFFSET 0x6000000 +#define PCIE_DPU_EP_DBI_SIZE 0x8000000 +#define PCIE_DPU_EP_OUTBOUND_SIZE 0x40000000 +#define PCIE_DPU_EP_DBI2_OFFSET 0x2000000 +#define PCIE_DPU_MPF_CSR_OFFSET 0x14000 +#define PCIE_DPU_MPF_CSR_ADDR(offset) \ + PCIE_BAR0_ADDR_SET(PCIE_DPU_MPF_CSR_OFFSET + offset) +#define PCIE_DPU_EP_FUNC_CFG_SIZE 0X1000 +#define PCIE_DPU_EP_CSR_SIZE 0x2000 +#define PCIE_DPU_EP_CSR_LTSSM_ADDR 0x150 +#define LTSSM_EN_VAL 0x11 +#define PCIE_DPU_EP_CSR_PRST_ADDR 0x448 +#define PCIE_DPU_EP_CSR_VIRT_ADDR 0x1200 +#define BAR4_DEFAULT_SIZE 0x10000 + +// epc features +#define PCIE_DPU_EP_REAERVED_BAR 0x30 +#define PCIE_DPU_EP_BAR_FIXED_64BIT 0x15 +#define PCIE_DPU_EP_ALIGN 0x1000 + +#define EP_ID_SHIFT 16 +#define EP_ADDR_MASK ((1 << EP_ID_SHIFT) - 1) +#define EP_DPU_PA(addr, ep_id) \ + ((addr & ~EP_ADDR_MASK) << EP_ID_LEN | (addr & EP_ADDR_MASK) | \ + ep_id << EP_ID_SHIFT) + +// PCIE Register +#if 1 +#define LINK_WAIT_MAX_RETRIES 10 +#define LINK_WAIT_USLEEP_MIN 90000 +#define LINK_WAIT_USLEEP_MAX 100000 +#define LINK_WAIT_MAX_IATU_RETRIES 0x10 +#define LINK_WAIT_IATU 10 + +// PCIE_PORT_DEBUG1 cap +#define PCIE_PORT_DEBUG1 0x72C +#define PCIE_PORT_DEBUG1_ZTE_ZF_LINK_UP BIT(4) +#define PCIE_PORT_DEBUG1_LINK_IN_TRAINING BIT(29) + +// PCIE CAP +#define PCIE_ECAP_POINTER_OFF 0x100 +#define PCIE_ECAP_VSEC_ID 0x0B + +#define PCIE_NEXT_BAR_OFFSET 0x4 +#define PCIE_DEFAULT_BAR_FLAG (BIT(3) | BIT(2)) +#define PCIE_SRIOV_ECAP_DEVICE_ID 0x1a +#define PCIE_SRIOV_ECAP_BAR0_OFFSET 0x24 +#define PCIE_SRIOV_ECAP_BAR4_OFFSET 0x34 +#define PCIE_SRIOV_CTRL 0x08 +#define PCIE_SRIOV_TOTAL_VFS 0x0e +#define PCIE_SRIOV_CTRL_VFE 0x01 + +#define PCIE_MSI_ADDR_LO 0x820 +#define PCIE_MSI_ADDR_HI 0x824 +#define PCIE_MSI_INTR0_ENABLE 0x828 +#define PCIE_MSI_INTR0_MASK 0x82C +#define PCIE_MSI_INTR0_STATUS 0x830 + +#define PCIE_PORT_MULTI_LANE_CTRL 0x8C0 +#define PORT_MLTI_UPCFG_SUPPORT BIT(7) + +/* ATU register*/ +#define PCIE_ATU_CR1 0x904 +#define PCIE_ATU_TYPE_MEM 0x0 +#define PCIE_ATU_TYPE_IO 0x2 +#define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20) +#define PCIE_ATU_FUNC_NUM_MASK 0xF00000 +#define PCIE_ATU_BAR_NUM_MASK 0x700 +#define PCIE_ATU_ENABLE BIT(31) +#define PCIE_ATU_BAR_MODE_ENABLE BIT(30) +#define PCIE_ATU_CFG_SHIFT_MODE BIT(28) +#define PCIE_ATU_DMA_BYPSS BIT(27) +#define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19) +#define PCIE_ATU_VFBAR_MATCH_MODE_ENABLE BIT(26) +#define PCIE_ATU_VF_MATCH_ENABLE BIT(20) +#define PCIE_ATU_OB_VF_ACTIVE BIT(31) + +/*MSIX register*/ +#define MSIX_ADDRESS_MATCH_LOW_OFF 0x940 +#define MSI_ADDRESS_MATCH_EN BIT(0) +#define MSIX_ADDRESS_MATCH_HIGH_OFF 0x944 +#define MSIX_DOORBELL_OFF 0x948 +#define MSIX_DOORBELL_PF 24 +#define MSIX_DOORBELL_PF_MASK 0x1F +#define MSIX_DOORBELL_VF 16 +#define MSIX_DOORBELL_VF_MASK 0xFF +#define MSIX_DOORBELL_VF_ACTIVE BIT(15) +#define MSIX_DOORBELL_VECTOR 0 +#define MSIX_DOORBELL_VECTOR_MASK 0x7FF + +#define LINK_WAIT_DMA 20 + +#define PCIE_MISC_CONTROL_1_OFF 0x8BC +#define PCIE_DBI_RO_WR_EN BIT(0) + +#define PCIE_MSIX_DOORBELL 0x948 +#define PCIE_MSIX_DOORBELL_PF_SHIFT 24 +#define PCIE_MSIX_DOORBELL_VF_SHIFT 16 +#define MSIX_DOORBELL_VF_ACTIVE BIT(15) + +/* Register address builder */ +#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) ((region) << 9) +#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) (((region) << 9) | BIT(8)) + +#define PCIE_ATU_UNR_REGION_CTRL1 0x00 +#define PCIE_ATU_UNR_REGION_CTRL2 0x04 +#define PCIE_ATU_UNR_LOWER_BASE 0x08 +#define PCIE_ATU_UNR_UPPER_BASE 0x0C +#define PCIE_ATU_UNR_LOWER_LIMIT 0x10 +#define PCIE_ATU_UNR_LOWER_TARGET 0x14 +#define PCIE_ATU_UNR_UPPER_TARGET 0x18 +#define PCIE_ATU_UNR_REGION_CTRL3 0x1c +#define PCIE_ATU_UNR_UPPER_LIMIT 0x20 +#define PCIE_ATU_INCREASE_REGION_SIZE BIT(13) + +#endif + +enum pcie_dpu_func_type { + PCIE_FUNC_TYPE_PF = 0, + PCIE_FUNC_TYPE_VF, + PCIE_FUNC_TYPE_NUM +}; + +enum pcie_dpu_as_type { + PCIE_DPU_AS_UNKNOWN, + PCIE_DPU_AS_MEM, + PCIE_DPU_AS_IO, +}; + +enum pcie_dpu_region_type { + PCIE_DPU_REGION_UNKNOWN, + PCIE_DPU_REGION_INBOUND, + PCIE_DPU_REGION_OUTBOUND, +}; + +struct pcie_dpu_ep_func { + struct list_head list; + u8 func_no; + u8 vfunc_no; + u8 msi_cap; /* MSI capability offset */ + u8 msix_cap; /* MSI-X capability offset */ +}; + +struct pcie_pf_cfg_info { + u32 vendorid; + u32 deviceid; + u32 class_revision; + u32 subsys_vendor_id; + u32 subsys_id; +}; + +struct pcie_dpu_ep { + int ep_id; + int permissible_pf_map; + struct pci_epc *epc; + struct platform_device *zf_pdev; + struct platform_device *zf_pdev_dma; + struct dma_device *wr_dd; + struct dma_device *rd_dd; + + void __iomem *dbi_base; + void __iomem *atu_base; + + struct list_head func_list; + int bar_to_atu[PCIE_DPU_PF_NUMS][(PCI_STD_NUM_BARS * 2) + 1]; + int vf_total_num[PCIE_DPU_PF_NUMS]; + phys_addr_t *ob_src_addr; + unsigned long *ib_window_map; + unsigned long *ob_window_map; + spinlock_t ib_window_lock; + u32 num_ib_windows; + u32 num_ob_windows; + + // 配置信息保存 + struct pcie_pf_cfg_info cfg_info[PCIE_DPU_PF_NUMS]; +}; + +struct pcie_zf_ep { + struct pci_dev *mpf_pdev; + unsigned long dbi_paddr; + unsigned long mpf_paddr; + unsigned long vsock_paddr; + unsigned long ob_size; + void __iomem *dbi_vaddr; + void __iomem *vsock_vaddr; + void __iomem *mpf_vaddr; + int dpu_ep_num; + struct pcie_dpu_ep **dpu_ep_array; + struct device_driver *dma_driver; +}; + +int pcie_zte_zf_epc_module_init(struct dh_core_dev *dh_dev, + const struct pci_device_id *id); +int pcie_zf_dma_init(struct pcie_dpu_ep *dpu_dev, struct pci_dev *pdev); +void pcie_zte_zf_epc_free(struct dh_core_dev *dh_dev); +void pcie_zf_dma_free(struct pcie_dpu_ep *dpu_dev, struct pci_dev *pdev); +int zf_pcie_get_hdma_chan(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + struct dma_chan **rchan, struct dma_chan **wchan); +u32 cfg_phy_rmw(u64 phy_addr, u32 value, u32 mask); +int pcie_zte_epc_ob_read(struct pci_epc *epc, phys_addr_t phys_addr, + unsigned int size, unsigned int *val); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/zf_mpf/epc/pcie-zte-zf-hdma.c b/drivers/net/ethernet/dinghai/zf_mpf/epc/pcie-zte-zf-hdma.c new file mode 100644 index 000000000000..5f920a0c2299 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/epc/pcie-zte-zf-hdma.c @@ -0,0 +1,752 @@ +#include "pcie-zte-zf-hdma.h" + +static struct task_struct *callback_thread[PCIE_DPU_EP_NUM]; + +static int __attribute__((unused)) cfg_phy_write(u32 value, u64 PhyAddr) +{ + void __iomem *virt_addr = NULL; + u64 tmp_addr = 0; + u64 offset = 0; + u64 size = 0; + + offset = PhyAddr % 0x1000; + if (PhyAddr < offset) { + DH_LOG_ERR(MODULE_MPF, + "data overflow! PhyAddr=0x%llx, offset=0x%llx\n", + PhyAddr, offset); + return PCIBIOS_BAD_REGISTER_NUMBER; + } else { + tmp_addr = PhyAddr - offset; + } + + if (offset <= (0x1000 - 4)) { + size = 0x1000; + } else { + size = 2 * 0x1000; + } + + virt_addr = ioremap(tmp_addr, size); + if (NULL == virt_addr) { + DH_LOG_ERR(MODULE_MPF, "cfg_write ioremap failed!\n"); + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + writel(value, (virt_addr + offset)); + + iounmap(virt_addr); + return 0; +} + +static inline struct zf_hdma_chan *to_zf_hdma_chan(struct dma_chan *chan) +{ + return container_of(chan, struct zf_hdma_chan, zxdh_vc.chan); +} + +static u64 zte_pcie_dma_atu_addr_remapping(u64 addr_input) +{ + u64 addr_output = 0; + // DMA and ATU地址重映射,将地址的bit12~18左移4位,设置bit15 = 0 + addr_output = + (((addr_input & (0x7F << 12)) << 4) | (addr_input & 0xFFF)) & + (~(1UL << 15)); + + return addr_output; +} + +static inline void read_ch(struct zf_hdma_chan *zf_chan, u32 is_read, + int offset, u32 *val) +{ + u64 register_offet = 0; + + void __iomem *addr = zf_chan->base_addr; + + register_offet = + zte_pcie_dma_atu_addr_remapping( + ((u64)zf_chan->id * (u64)ZF_HDMA_PER_CHANNEL_SIZE)) + + ((u64)is_read * (u64)ZF_HDMA_RDCH_OFFSET); + *val = readl(addr + register_offet + offset); +} + +static inline void write_ch(struct zf_hdma_chan *zf_chan, u32 is_read, + int offset, u32 val) +{ + u64 register_off = 0; + void __iomem *addr = zf_chan->base_addr; + + register_off = + zte_pcie_dma_atu_addr_remapping( + ((u64)zf_chan->id * (u64)ZF_HDMA_PER_CHANNEL_SIZE)) + + ((u64)is_read * (u64)ZF_HDMA_RDCH_OFFSET); + writel(val, addr + register_off + offset); +} + +static inline void rmw_ch(struct zf_hdma_chan *zf_chan, u32 is_read, int offset, + u32 val, u32 mask) +{ + u32 reg_val = 0; + + read_ch(zf_chan, is_read, offset, ®_val); + reg_val &= (~mask); + reg_val |= (val & mask); + write_ch(zf_chan, is_read, offset, reg_val); +} + +static int zf_hdma_alloc_chan_resources(struct dma_chan *chan) +{ + u32 is_read = 0; + // struct zf_hdma_chan *zf_chan = to_zf_hdma_chan(chan); + + if (chan->device->directions == BIT(DMA_MEM_TO_DEV)) { + is_read = HDMA_RD; + } else if (chan->device->directions == BIT(DMA_DEV_TO_MEM)) { + is_read = HDMA_WR; + } else { + DH_LOG_ERR(MODULE_MPF, "err direct\n"); + return -EINVAL; + } + + /* 开启dma通道中断 */ + // rmw_ch(zf_chan, is_read, HDMA_INT_SETUP_OFF, 0x0 << HDMA_INT_MASK_BIT, HDMA_INT_MASK << HDMA_INT_MASK_BIT); + // rmw_ch(zf_chan, is_read, HDMA_INT_SETUP_OFF, 0x1 << HDMA_LSIE_BIT, HDMA_LSIE_MASK << HDMA_LSIE_BIT); + + return 0; +} + +static void zf_hdma_free_chan_resources(struct dma_chan *chan) +{ + u32 is_read = 0; + // struct zf_hdma_chan *zf_chan = to_zf_hdma_chan(chan); + + if (chan->device->directions == BIT(DMA_MEM_TO_DEV)) { + is_read = HDMA_RD; + } else if (chan->device->directions == BIT(DMA_DEV_TO_MEM)) { + is_read = HDMA_WR; + } else { + DH_LOG_ERR(MODULE_MPF, "err direct\n"); + return; + } + + /* 释放dma通道中断 */ + // rmw_ch(zf_chan, is_read, HDMA_INT_SETUP_OFF, 0x7 << HDMA_INT_MASK_BIT, HDMA_INT_MASK << HDMA_INT_MASK_BIT); + // rmw_ch(zf_chan, is_read, HDMA_INT_SETUP_OFF, 0x0 << HDMA_LSIE_BIT, HDMA_LSIE_MASK << HDMA_LSIE_BIT); +} + +static int zf_hdma_device_config(struct dma_chan *chan, + struct dma_slave_config *config) +{ + // struct zf_hdma_chan *zf_chan = to_zf_hdma_chan(chan); + // u32 is_read = HDMA_WR; + + return 0; +} + +static struct dma_async_tx_descriptor * +zf_hdma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long flags) +{ + u32 is_read = HDMA_WR; + struct zf_hdma_sqe *sqe = NULL, *tem_sqe = NULL; + struct zf_hdma_tx *tx = NULL, *tem_tx = NULL; + struct dma_async_tx_descriptor *tx_desc = NULL; + struct zf_hdma_chan *zf_chan = to_zf_hdma_chan(chan); + int tx_num = 0; + + DH_LOG_DEBUG(MODULE_MPF, "enter\n"); + if (chan->device->directions == BIT(DMA_MEM_TO_DEV)) { + is_read = HDMA_RD; + dest |= ZF_PREFIX_ADDR; + } else if (chan->device->directions == BIT(DMA_DEV_TO_MEM)) { + is_read = HDMA_WR; + src |= ZF_PREFIX_ADDR; + } else { + DH_LOG_ERR(MODULE_MPF, "err direct\n"); + return NULL; + } + + tem_sqe = zf_chan->sqe_list; + sqe = devm_kzalloc(&zf_chan->ep_pdev->dev, sizeof(struct zf_hdma_sqe), + GFP_KERNEL); + if (!sqe) { + DH_LOG_ERR(MODULE_MPF, "err alloc sqe\n"); + return NULL; + } + sqe->length = len; + sqe->src_addr = src; + sqe->dst_addr = dest; + + while (NULL != tem_sqe->next) { + tem_sqe = tem_sqe->next; + } + tem_sqe->next = sqe; + + tx_desc = + zxdh_vchan_tx_prep(&zf_chan->zxdh_vc, &zf_chan->zxdh_vd, flags); + tem_tx = zf_chan->tx_list; + tx = devm_kzalloc(&zf_chan->ep_pdev->dev, sizeof(struct zf_hdma_tx), + GFP_KERNEL); + if (!tx) { + DH_LOG_ERR(MODULE_MPF, "err alloc tx\n"); + devm_kfree(&zf_chan->ep_pdev->dev, sqe); + return NULL; + } + tx->tx_desc = tx_desc; + + while (NULL != tem_tx->next) { + tem_tx = tem_tx->next; + tx_num++; + } + tx->tx_id = tx_num; + tem_tx->next = tx; + + return tx_desc; +} + +static int zf_hdma_terminate_all(struct dma_chan *chan) +{ + u32 is_read = HDMA_WR; + struct zf_hdma_chan *zf_chan = to_zf_hdma_chan(chan); + + if (chan->device->directions == BIT(DMA_MEM_TO_DEV)) { + is_read = HDMA_RD; + } + + write_ch(zf_chan, is_read, HDMA_DOORBELL_OFF, HDMA_DOORBELL_STOP); + + return 0; +} + +static enum dma_status zf_hdma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + return 0; +} + +static struct dma_async_tx_descriptor * +zf_hdma_device_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int len, + enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct zf_hdma_chan *zf_chan = to_zf_hdma_chan(chan); + u32 is_read = HDMA_WR; + + DH_LOG_INFO(MODULE_MPF, "enter\n"); + + if (chan->device->directions == BIT(DMA_MEM_TO_DEV)) { + is_read = HDMA_RD; + } else if (chan->device->directions == BIT(DMA_DEV_TO_MEM)) { + is_read = HDMA_WR; + } else { + DH_LOG_ERR(MODULE_MPF, "err direct\n"); + return NULL; + } + + return zxdh_vchan_tx_prep(&zf_chan->zxdh_vc, &zf_chan->zxdh_vd, flags); +} + +static struct dma_async_tx_descriptor *zf_hdma_device_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t paddr, size_t len, size_t count, + enum dma_transfer_direction direction, unsigned long flags) +{ + struct zf_hdma_chan *zf_chan = to_zf_hdma_chan(chan); + + return zxdh_vchan_tx_prep(&zf_chan->zxdh_vc, &zf_chan->zxdh_vd, flags); +} + +static void free_used_sqe(struct zf_hdma_chan *zf_chan) +{ + struct zf_hdma_sqe *temp = NULL; + + temp = zf_chan->sqe_list->next; + zf_chan->sqe_list->next = zf_chan->sqe_list->next->next; + + devm_kfree(&zf_chan->ep_pdev->dev, temp); +} + +static void zf_hdma_register_set(struct zf_hdma_chan *zf_chan, u32 is_read, + struct zf_hdma_sqe *sqe) +{ + /* DMA Engine enable */ + write_ch(zf_chan, is_read, HDMA_EN_OFF, HDMA_EN); + + /* DMA transfer Size */ + write_ch(zf_chan, is_read, HDMA_XFERSIZE_OFF, (u32)sqe->length); + + /* DMA SAR & DAR */ + write_ch(zf_chan, is_read, HDMA_SAR_LOW_OFF, + (sqe->src_addr & 0xffffffff)); + write_ch(zf_chan, is_read, HDMA_SAR_HIGH_OFF, + ((sqe->src_addr >> 32) & 0xffffffff)); + write_ch(zf_chan, is_read, HDMA_DAR_LOW_OFF, + (sqe->dst_addr & 0xffffffff)); + write_ch(zf_chan, is_read, HDMA_DAR_HIGH_OFF, + ((sqe->dst_addr >> 32) & 0xffffffff)); + + /* func_no */ + write_ch(zf_chan, is_read, HDMA_FUNC_NUM_OFF, + ((zf_chan->func_no & PCIE_DPU_EP_GET_PF_NO) | + (zf_chan->vfunc_no << HDMA_FUNC_NUM_OFF_VF) | + (!isPF(zf_chan->func_no) << HDMA_FUNC_NUM_OFF_VF_ENABLE))); + + /* DMA Doorbell */ + write_ch(zf_chan, is_read, HDMA_DOORBELL_OFF, HDMA_DOORBELL_START); + return; +} + +static void zf_hdma_issue_pending(struct dma_chan *chan) +{ + // u32 is_read = HDMA_WR; + unsigned long flags = 0; + struct zf_hdma_chan *zf_chan = NULL; + struct zf_hdma_tx *tx_temp = NULL; + + zf_chan = to_zf_hdma_chan(chan); + + tx_temp = zf_chan->tx_list->next; + while (tx_temp->next != NULL) { + tx_temp = tx_temp->next; + } + + tx_temp->callback = tx_temp->tx_desc->callback; + tx_temp->callback_param = tx_temp->tx_desc->callback_param; + + spin_lock_irqsave(&zf_chan->zxdh_vc.lock, flags); + zxdh_vchan_issue_pending(&zf_chan->zxdh_vc); + spin_unlock_irqrestore(&zf_chan->zxdh_vc.lock, flags); + + return; +} + +static void free_used_tx(struct zf_hdma_chan *zf_chan) +{ + struct zf_hdma_tx *temp = NULL; + + temp = zf_chan->tx_list->next; + zf_chan->tx_list->next = zf_chan->tx_list->next->next; + + devm_kfree(&zf_chan->ep_pdev->dev, temp); +} + +int zf_hdma_wr_handler(void *data) +{ + u32 chan_status, chan_int_status; + struct pcie_dpu_ep *dpu_dev = data; + struct dma_chan *chan = NULL; + struct zf_hdma_chan *zf_chan = NULL; + struct zf_hdma_tx *zf_tx_desc = NULL; + + list_for_each_entry(chan, &dpu_dev->wr_dd->channels, device_node) { + zf_chan = to_zf_hdma_chan(chan); + if (!zf_chan->tx_list->next || + !zf_chan->tx_list->next->tx_desc || + !zf_chan->tx_list->next->callback) { + DH_LOG_INFO(MODULE_MPF, "wr_zf_chan%d is not used!\n", + zf_chan->id); + continue; + } else { + zf_tx_desc = zf_chan->tx_list->next; + } + read_ch(zf_chan, HDMA_WR, HDMA_STATUS_OFF, &chan_status); + read_ch(zf_chan, HDMA_WR, HDMA_INT_STATUS_OFF, + &chan_int_status); + + // if (((chan_status & HDMA_STATUS_OFF_STATUS) & HDMA_STATUS_STOPPED) && (chan_int_status & HDMA_STOP_INT_STATUS)) + // { + spin_lock(&zf_chan->zxdh_vc.lock); + zf_tx_desc->callback(zf_tx_desc->callback_param); + free_used_tx(zf_chan); + zf_chan->is_busy = HDMA_CHAN_IDLE; + spin_unlock(&zf_chan->zxdh_vc.lock); + break; + // } + } + + return 0; +} + +int zf_hdma_rd_handler(void *data) +{ + u32 chan_status = 0, chan_int_status = 0; + struct pcie_dpu_ep *dpu_dev = data; + struct dma_chan *chan = NULL; + struct zf_hdma_chan *zf_chan = NULL; + struct zf_hdma_tx *zf_tx_desc = NULL; + + list_for_each_entry(chan, &dpu_dev->rd_dd->channels, device_node) { + zf_chan = to_zf_hdma_chan(chan); + if (!zf_chan->tx_list->next || + !zf_chan->tx_list->next->tx_desc || + !zf_chan->tx_list->next->callback) { + DH_LOG_INFO(MODULE_MPF, "rd_zf_chan%d is not used!\n", + zf_chan->id); + continue; + } else { + zf_tx_desc = zf_chan->tx_list->next; + } + read_ch(zf_chan, HDMA_RD, HDMA_STATUS_OFF, &chan_status); + read_ch(zf_chan, HDMA_RD, HDMA_INT_STATUS_OFF, + &chan_int_status); + + // if (((chan_status & HDMA_STATUS_OFF_STATUS) & HDMA_STATUS_STOPPED) && (chan_int_status & HDMA_STOP_INT_STATUS)) + // { + spin_lock(&zf_chan->zxdh_vc.lock); + zf_tx_desc->callback(zf_tx_desc->callback_param); + free_used_tx(zf_chan); + zf_chan->is_busy = HDMA_CHAN_IDLE; + spin_unlock(&zf_chan->zxdh_vc.lock); + break; + // } + } + + return 0; +} + +static void zf_hdma_desc_free(struct zxdh_virt_dma_desc *zxdh_vd) +{ + dma_descriptor_unmap(&zxdh_vd->tx); +} + +static bool zf_dma_filter_fn(struct dma_chan *chan, void *node) +{ + unsigned long dev_node = (unsigned long)dev_to_node(&chan->dev->device); + return (dev_node == (unsigned long)node); +} + +struct dma_chan *zte_get_chan_for_dma(struct pci_epc *epc, u32 is_read) +{ + int node = 0; + dma_cap_mask_t dma_mask; + struct dma_chan *chan = NULL; + struct pcie_dpu_ep *ep = NULL; + + if (IS_ERR_OR_NULL(epc)) { + DH_LOG_ERR(MODULE_MPF, "not found epc\n"); + return NULL; + } + + ep = epc_get_drvdata(epc); + if (!ep) { + DH_LOG_ERR(MODULE_MPF, " not found ep\n"); + return NULL; + } + + if (is_read) { + node = dev_to_node(ep->rd_dd->dev); + } else { + node = dev_to_node(ep->wr_dd->dev); + } + + dma_cap_zero(dma_mask); + dma_cap_set(DMA_MEMCPY, dma_mask); + chan = dma_request_channel(dma_mask, zf_dma_filter_fn, + (void *)(unsigned long)node); + + return chan; +} +EXPORT_SYMBOL_GPL(zte_get_chan_for_dma); + +void zte_zf_pcie_set_pfvf_no(struct dma_chan *chan, u8 func_no, u8 vfunc_no) +{ + struct zf_hdma_chan *zf_chan = to_zf_hdma_chan(chan); + + DH_LOG_INFO(MODULE_MPF, "func_no = 0x%x, vfunc_no = 0x%x\n", func_no, + vfunc_no); + + zf_chan->func_no = func_no; + zf_chan->vfunc_no = vfunc_no; +} +EXPORT_SYMBOL_GPL(zte_zf_pcie_set_pfvf_no); + +int zf_pcie_get_hdma_chan(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + struct dma_chan **rchan, struct dma_chan **wchan) +{ + struct dma_chan *rch, *wch; + + wch = zte_get_chan_for_dma(epc, HDMA_WR); + if (IS_ERR_OR_NULL(wch)) { + DH_LOG_ERR(MODULE_MPF, "failed to get write chan\n"); + return -EFAULT; + } + + rch = zte_get_chan_for_dma(epc, HDMA_RD); + if (IS_ERR_OR_NULL(rch)) { + DH_LOG_ERR(MODULE_MPF, "failed to get read chan\n"); + dma_release_channel(rch); + return -EFAULT; + } + + zte_zf_pcie_set_pfvf_no(rch, func_no, vfunc_no); + zte_zf_pcie_set_pfvf_no(wch, func_no, vfunc_no); + + *rchan = rch; + *wchan = wch; + return 0; +} + +static int zf_hdma_virtual_channels_init(struct dma_device *dma_dev, + struct pci_dev *pdev, + void __iomem *addr) +{ + struct zf_hdma_chan *zf_chan = NULL; + u32 i = 0; + + INIT_LIST_HEAD(&dma_dev->channels); + + /* + * Register as many memcpy as we have physical channels, + * we won't always be able to use all but the code will have + * to cope with that situation. + */ + for (i = 0; i < (u32)ZF_HDMA_CHAN_NUM; i++) { + zf_chan = + devm_kzalloc(&pdev->dev, sizeof(*zf_chan), GFP_KERNEL); + if (!zf_chan) { + // TODO free + return -ENOMEM; + } + zf_chan->sqe_list = devm_kzalloc( + &pdev->dev, sizeof(struct zf_hdma_sqe), GFP_KERNEL); + if (!zf_chan->sqe_list) { + // TODO free + return -ENOMEM; + } + zf_chan->tx_list = devm_kzalloc( + &pdev->dev, sizeof(struct zf_hdma_tx), GFP_KERNEL); + if (!zf_chan->tx_list) { + // TODO free + return -ENOMEM; + } + zf_chan->tx_list->tx_id = 0; + + zf_chan->id = i + ZF_HDMA_CHAN_FIRST_IDX; + zf_chan->ep_pdev = pdev; + zf_chan->base_addr = addr + ZF_HDMA_ADDR_OFFSET; + zf_chan->is_busy = HDMA_CHAN_IDLE; + + zf_chan->name = kasprintf(GFP_KERNEL, "chan%d", i); + if (!zf_chan->name) + return -ENOMEM; + + zf_chan->zxdh_vc.desc_free = zf_hdma_desc_free; + + zxdh_vchan_init(&zf_chan->zxdh_vc, dma_dev); + } + + return i; +} + +void zf_hdma_device_init(struct device *dev, struct dma_device *dd, u32 is_read) +{ + dd->device_alloc_chan_resources = + zf_hdma_alloc_chan_resources; // 3,4,8,9 unneccessary + dd->device_free_chan_resources = zf_hdma_free_chan_resources; + dd->device_config = zf_hdma_device_config; + dd->device_tx_status = zf_hdma_tx_status; + dd->device_issue_pending = + zf_hdma_issue_pending; // push pending transactions to hardware + dd->device_prep_dma_memcpy = zf_hdma_prep_dma_memcpy; + dd->device_terminate_all = zf_hdma_terminate_all; + dd->device_prep_slave_sg = + zf_hdma_device_prep_slave_sg; // prepares a slave dma operation + dd->device_prep_dma_cyclic = + zf_hdma_device_prep_dma_cyclic; // prepare a cyclic dma operation suitable for audio. The function takes a buffer of size buf_len. The callback function will be called after period_len bytes have been transferred. + + dd->chancnt = ZF_HDMA_CHAN_NUM; + dd->privatecnt = 0; + dd->copy_align = ZF_HDMA_ALIGN_SIZE; + dd->src_addr_widths = ZF_HDMA_DMA_BUSWIDTHS; + dd->dst_addr_widths = ZF_HDMA_DMA_BUSWIDTHS; + dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; + dd->dev = dev; + + if (is_read) { + dd->directions = BIT(DMA_MEM_TO_DEV); + } else { + dd->directions = BIT(DMA_DEV_TO_MEM); + } + + dma_cap_zero(dd->cap_mask); + dma_cap_set(DMA_MEMCPY, dd->cap_mask); +} + +int callback_thread_function(void *data) +{ + u32 chan_status = 0; + struct pcie_dpu_ep *dpu_dev = data; + struct dma_chan *chan = NULL; + struct zf_hdma_chan *zf_chan = NULL; + struct zf_hdma_tx *zf_tx_desc = NULL; + + DH_LOG_INFO(MODULE_MPF, "enter!\n"); + + while (!kthread_should_stop()) { + list_for_each_entry(chan, &dpu_dev->rd_dd->channels, + device_node) { + zf_chan = to_zf_hdma_chan(chan); + if (!zf_chan->tx_list->next || + !zf_chan->tx_list->next->tx_desc || + !zf_chan->tx_list->next->callback) { + continue; + } else { + if ((zf_chan->sqe_list->next != NULL) && + (zf_chan->is_busy == HDMA_CHAN_IDLE)) { + zf_hdma_register_set( + zf_chan, HDMA_RD, + zf_chan->sqe_list->next); + zf_chan->is_busy = HDMA_CHAN_USED; + // DH_LOG_DEBUG(MODULE_MPF, "zf_chan->id:%d is_pending\n", zf_chan->id); + free_used_sqe(zf_chan); + } + + zf_tx_desc = zf_chan->tx_list->next; + // DH_LOG_DEBUG(MODULE_MPF, "rd_zf_chan%d is used!\n", zf_chan->id); + + read_ch(zf_chan, HDMA_RD, HDMA_STATUS_OFF, + &chan_status); + + if ((chan_status & HDMA_STATUS_OFF_STATUS) == + HDMA_STATUS_STOPPED && + (zf_chan->is_busy == HDMA_CHAN_USED)) { + spin_lock(&zf_chan->zxdh_vc.lock); + zf_tx_desc->callback( + zf_tx_desc->callback_param); + free_used_tx(zf_chan); + spin_unlock(&zf_chan->zxdh_vc.lock); + zf_chan->is_busy = HDMA_CHAN_IDLE; + } + } + } + + list_for_each_entry(chan, &dpu_dev->wr_dd->channels, + device_node) { + zf_chan = to_zf_hdma_chan(chan); + if (!zf_chan->tx_list->next || + !zf_chan->tx_list->next->tx_desc || + !zf_chan->tx_list->next->callback) { + continue; + } else { + if ((zf_chan->sqe_list->next != NULL) && + (zf_chan->is_busy == HDMA_CHAN_IDLE)) { + zf_hdma_register_set( + zf_chan, HDMA_WR, + zf_chan->sqe_list->next); + zf_chan->is_busy = HDMA_CHAN_USED; + // DH_LOG_DEBUG(MODULE_MPF, "zf_chan->id:%d is_pending\n", zf_chan->id); + free_used_sqe(zf_chan); + } + + zf_tx_desc = zf_chan->tx_list->next; + // DH_LOG_DEBUG(MODULE_MPF, "wr_zf_chan%d is used!\n", zf_chan->id); + read_ch(zf_chan, HDMA_WR, HDMA_STATUS_OFF, + &chan_status); + + if ((chan_status & HDMA_STATUS_OFF_STATUS) == + HDMA_STATUS_STOPPED && + (zf_chan->is_busy == HDMA_CHAN_USED)) { + spin_lock(&zf_chan->zxdh_vc.lock); + zf_tx_desc->callback( + zf_tx_desc->callback_param); + free_used_tx(zf_chan); + spin_unlock(&zf_chan->zxdh_vc.lock); + zf_chan->is_busy = HDMA_CHAN_IDLE; + } + } + } + msleep(1); + } + + DH_LOG_INFO(MODULE_MPF, "Kernel thread is stopping\n"); + return 0; +} + +int pcie_zf_dma_init(struct pcie_dpu_ep *dpu_dev, struct pci_dev *pdev) +{ + int ret = 0; + int node = 0; + struct device *dev_wr = NULL, *dev_rd = NULL; + struct dma_device *wr_dd = NULL, *rd_dd = NULL; + + if (IS_ERR_OR_NULL(dpu_dev)) { + DH_LOG_ERR(MODULE_MPF, "err input\n"); + return -ENOENT; + } + + dev_wr = &(dpu_dev->zf_pdev->dev); + dev_rd = &(dpu_dev->zf_pdev_dma->dev); + + node = dev_to_node(dev_wr); + + wr_dd = kzalloc_node(sizeof(struct dma_device), GFP_KERNEL, node); + if (!wr_dd) { + DH_LOG_ERR(MODULE_MPF, "Error kzalloc node\n"); + return -ENODEV; + } + + rd_dd = kzalloc_node(sizeof(struct dma_device), GFP_KERNEL, node); + if (!rd_dd) { + DH_LOG_ERR(MODULE_MPF, "Error kzalloc node\n"); + kfree(wr_dd); + return -ENODEV; + } + + zf_hdma_device_init(dev_wr, wr_dd, HDMA_WR); + zf_hdma_device_init(dev_rd, rd_dd, HDMA_RD); + + zf_hdma_virtual_channels_init(wr_dd, pdev, dpu_dev->dbi_base); + zf_hdma_virtual_channels_init(rd_dd, pdev, dpu_dev->dbi_base); + + ret = dma_async_device_register(wr_dd); + ret |= dma_async_device_register(rd_dd); + if (ret) { + DH_LOG_ERR(MODULE_MPF, "dma_async_device_register failed\n"); + goto free_dma_device; + } + + dpu_dev->wr_dd = wr_dd; + dpu_dev->rd_dd = rd_dd; + DH_LOG_INFO(MODULE_MPF, "success!\n"); + + if (dpu_dev->ep_id >= 0 && dpu_dev->ep_id < PCIE_DPU_EP_NUM) { + callback_thread[dpu_dev->ep_id] = kthread_run( + callback_thread_function, dpu_dev, "my_thread"); + if (callback_thread[dpu_dev->ep_id]) { + DH_LOG_INFO(MODULE_MPF, + "Thread created successfully\n"); + } else { + DH_LOG_ERR(MODULE_MPF, "Thread creation failed\n"); + } + } + + return 0; + +free_dma_device: + kfree(wr_dd); + kfree(rd_dd); + return ret; +} + +void pcie_zf_dma_free(struct pcie_dpu_ep *dpu_dev, struct pci_dev *pdev) +{ + if (dpu_dev == NULL || pdev == NULL) { + DH_LOG_ERR(MODULE_MPF, "dpu_dev or pdev is NULL\n"); + return; + } + + if (dpu_dev->ep_id >= 0 && dpu_dev->ep_id < PCIE_DPU_EP_NUM) { + if (callback_thread[dpu_dev->ep_id]) { + kthread_stop(callback_thread[dpu_dev->ep_id]); + DH_LOG_INFO(MODULE_MPF, + "Thread created successfully\n"); + } + } + + if (dpu_dev->wr_dd) { + dma_async_device_unregister(dpu_dev->wr_dd); + kfree(dpu_dev->wr_dd); + } + if (dpu_dev->rd_dd) { + dma_async_device_unregister(dpu_dev->rd_dd); + kfree(dpu_dev->rd_dd); + } +} diff --git a/drivers/net/ethernet/dinghai/zf_mpf/epc/pcie-zte-zf-hdma.h b/drivers/net/ethernet/dinghai/zf_mpf/epc/pcie-zte-zf-hdma.h new file mode 100644 index 000000000000..7f3766cf4e5e --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/epc/pcie-zte-zf-hdma.h @@ -0,0 +1,157 @@ +#ifndef __PCIE_ZTE_ZF_HDMA_H +#define __PCIE_ZTE_ZF_HDMA_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "pcie-zte-zf-epc.h" +#include "virt-dma.h" + +#define ZF_HDMA_DRIVER_NAME "dh_hdma" +#define ZF_HDMA_ADDR_OFFSET (0x6800000) +#define ZF_HDMA_PER_CHANNEL_SIZE (0x200) +#define ZF_HDMA_RDCH_OFFSET (0x100) +#define HDMA_RD 1 +#define HDMA_WR 0 + +/* ZF HDMA init*/ +#define DPU_HDMA_CHAN_NUM (36) +#define ZF_HDMA_CHAN_FIRST_IDX (18) +#define ZF_HDMA_CHAN_NUM (DPU_HDMA_CHAN_NUM - ZF_HDMA_CHAN_FIRST_IDX) +#define ZF_HDMA_VIRT_CHAN_NUM ZF_HDMA_CHAN_NUM +#define ZF_HDMA_ALIGN_SIZE (1) +#define ZF_HDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) +#define ZF_HDMA_WAIT_SLEEP_TIMES (10) +#define ZF_HDMA_MAX_WAIT_TIMES (100) + +/* HDMA register*/ +#define HDMA_EN_OFF (0x0) +#define HDMA_DOORBELL_OFF (0x4) +#define HDMA_ELEM_PF_OFF (0x8) +#define HDMA_HANDSHAKE_OFF (0xc) +#define HDMA_LLP_LOW_OFF (0x10) +#define HDMA_LLP_HIGH_OFF (0x14) +#define HDMA_CYCLE_OFF (0x18) +#define HDMA_XFERSIZE_OFF (0x1c) +#define HDMA_XFERSIZE_OFF_COMPLETE (0x0) +#define HDMA_SAR_LOW_OFF (0x20) +#define HDMA_SAR_HIGH_OFF (0x24) +#define HDMA_DAR_LOW_OFF (0x28) +#define HDMA_DAR_HIGH_OFF (0x2c) +#define HDMA_WATERMARK_EN_OFF (0x30) +#define HDMA_CONTROL1_OFF (0x34) +#define HDMA_FUNC_NUM_OFF (0x38) +#define HDMA_FUNC_NUM_OFF_VF_ENABLE (16) +#define HDMA_FUNC_NUM_OFF_VF (17) +#define HDMA_QOS_OFF (0x3c) + +#define HDMA_STATUS_OFF (0x80) +#define HDMA_STATUS_OFF_STATUS (0x3) +#define HDMA_STATUS_STOPPED (0x3) +#define HDMA_INT_STATUS_OFF (0x84) +#define HDMA_STOP_INT_STATUS (0x1) +#define HDMA_INT_SETUP_OFF (0x88) +#define HDMA_INT_MASK_BIT (0x0) +#define HDMA_INT_MASK (0x7) +#define HDMA_LSIE_BIT (0x4) +#define HDMA_LSIE_MASK (0x1) +#define HDMA_INT_CLEAR_OFF (0x8c) +#define HDMA_MSI_STOP_LOW_OFF (0x90) +#define HDMA_MSI_STOP_HIGH_OFF (0x94) +#define HDMA_MSI_WATERMARK_LOW_OFF (0x98) +#define HDMA_MSI_WATERMARK_HIGH_OFF (0x9c) +#define HDMA_MSI_ABORT_LOW_OFF (0xa0) +#define HDMA_MSI_ABORT_HIGH_OFF (0xa4) +#define HDMA_MSI_MSI_MSGD_OFF (0xa8) + +/* HDMA register val*/ +#define HDMA_EN (BIT(0)) +#define HDMA_DOORBELL_STOP (BIT(1)) +#define HDMA_DOORBELL_START (BIT(0)) +#define HDMA_TRANSFER_DONE (0x3) + +/*LL module*/ +#define HDMA_LL_CONTROL_OFFSET 0x0 +#define HDMA_LL_SIZE_OFFSET 0x4 +#define HDMA_LL_SAR_LOW_OFFSET 0x8 +#define HDMA_LL_SAR_HIGH_OFFSET 0xc +#define HDMA_LL_DAR_LOW_OFFSET 0x10 +#define HDMA_LL_DAR_HIGH_OFFSET 0x14 +#define HDMA_LL_NEXT_ELEMENT 0x18 +#define HDMA_LL_LINK_CONTROL_OFFSET 0x0 +#define HDMA_LL_LINK_EMPTY_OFFSET 0x4 +#define HDMA_LL_LINK_POINTER_LOW_OFFSET 0x8 +#define HDMA_LL_LINK_POINTER_HIGH_OFFSET 0xc +#define HDMA_LL_DATA_CONTROL 0x1 +#define HDMA_LL_LINK_CONTROL 0x6 +#define HDMA_LL_PREFETCH 0x8 +#define PF_DEPTH 0x1f + +enum hdma_mode { HDMA_MODE_LEGACY = 0, + HDMA_MODE_SLAVE, + HDMA_MODE_UNROLL }; + +enum hdma_chan_status { + HDMA_CHAN_IDLE = 0, + HDMA_CHAN_USED, + HDMA_CHAN_UNDEFINE +}; + +struct zf_hdma_sqe { + size_t length; + dma_addr_t src_addr; + dma_addr_t dst_addr; + struct zf_hdma_sqe *next; +}; + +struct zf_hdma_tx { + int tx_id; + struct dma_async_tx_descriptor *tx_desc; + dma_async_tx_callback callback; + void *callback_param; + struct zf_hdma_tx *next; +}; + +struct hdma_ll_element { + size_t length; + dma_addr_t src_addr; + dma_addr_t dst_addr; +}; + +struct zf_hdma_chan { + u32 id; + struct list_head list; + const char *name; + struct pci_dev *ep_pdev; + struct zxdh_virt_dma_chan zxdh_vc; + struct zxdh_virt_dma_desc zxdh_vd; + void __iomem *base_addr; // 对应HDMA寄存器基址 + struct zf_hdma_sqe *sqe_list; + struct zf_hdma_tx *tx_list; + int is_busy; + u8 func_no; + u8 vfunc_no; +}; + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/zf_mpf/epc/pcie-zte-zf-json.c b/drivers/net/ethernet/dinghai/zf_mpf/epc/pcie-zte-zf-json.c new file mode 100644 index 000000000000..c7412fbb8b3b --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/epc/pcie-zte-zf-json.c @@ -0,0 +1,196 @@ +#include "pcie-zte-zf-json.h" + +static struct kobject *kobj_zxdh_cfg; +static char zxdh_pcie_cfg[PAGE_SIZE]; +static struct dh_core_dev *dh_core_dev; +static int pcie_zte_zf_get_dev_cfg(struct dh_core_dev *core_dev); + +extern int zxdh_bar_chan_sync_msg_send(struct zxdh_pci_bar_msg *in, + struct zxdh_msg_recviver_mem *result); + +static ssize_t read_pcie_cfg(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + int ret = 0; + + ret = pcie_zte_zf_get_dev_cfg(dh_core_dev); + if (ret) { + DH_LOG_ERR(MODULE_MPF, "pcie_zte_zf_get_dev_cfg failed.\n"); + } + + return snprintf(buf, PAGE_SIZE, "%s", zxdh_pcie_cfg); +} + +/*使用__ATTR宏初始化zxdh_cfg结构体,该宏定义在include/linux/sysfs.h*/ +struct kobj_attribute zxdh_cfg = + __ATTR(zxdh_pcie_cfg, 0664, read_pcie_cfg, NULL); + +static int pcie_zte_zf_get_zxdh_info(struct dh_core_dev *core_dev, + struct dpu_pf_cfg *pf_cfg) +{ + int ret = 0; + char recv_buffer[RECV_BUFFER_SIZE] = { 0 }; + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + struct dh_en_mpf_dev *mpf_dev = dh_core_priv(core_dev); + + if (RECV_BUFFER_SIZE < + (sizeof(struct dpu_pf_cfg) + BAR_MSG_HEADER_SIZE)) { + DH_LOG_ERR( + MODULE_MPF, + "recv_buffer size is smaller than sizeof struct dpu_pf_cfg \n"); + return -ENOMEM; + } + + in.virt_addr = mpf_dev->pci_ioremap_addr + ZXDH_BAR1_CHAN_OFFSET; + in.payload_addr = pf_cfg; + in.payload_len = sizeof(struct dpu_pf_cfg); + in.src = MSG_CHAN_END_PF; + in.dst = MSG_CHAN_END_RISC; + in.event_id = MODULE_MPF_PCIE_INFO; /* 事件号 */ + in.src_pcieid = mpf_dev->pcie_id; + + result.recv_buffer = recv_buffer; + result.buffer_len = sizeof(struct dpu_pf_cfg) + BAR_MSG_HEADER_SIZE; + + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + if (ret) { + DH_LOG_ERR(MODULE_MPF, + "%s zxdh_bar_chan_sync_msg_send failed.\n", + __func__); + return ret; + } + + memcpy(pf_cfg, (char *)(&recv_buffer) + BAR_MSG_HEADER_SIZE, + sizeof(struct dpu_pf_cfg)); + + return ret; +} + +static void pcie_zte_zf_write_pf_info(struct dpu_pf_cfg *pf_cfg, char flag) +{ + char pf_info[100] = { 0 }; + size_t json_cfg_len = 0; + int ret = 0; + + if (flag) { + ret = snprintf( + pf_info, sizeof(pf_info), + "ep:%d pf:%d pf_enable:%d dev_type:%d vid:0x%x did:0x%x max_vf:%d\n", + pf_cfg->ep_id, pf_cfg->pf_id, pf_cfg->pf_enable, + pf_cfg->dev_type, pf_cfg->vendor_id, pf_cfg->device_id, + pf_cfg->max_vf); + if (ret < 0 || ret > sizeof(pf_info)) { + return; + } + } else { + ret = snprintf(pf_info, sizeof(pf_info), + "get ep:%d pf:%d info failed\n", pf_cfg->ep_id, + pf_cfg->pf_id); + if (ret < 0 || ret > sizeof(pf_info)) { + return; + } + } + + json_cfg_len = strlen(zxdh_pcie_cfg); + + if (json_cfg_len + strlen(pf_info) < PAGE_SIZE - 1) { + strcat(zxdh_pcie_cfg, pf_info); + } else { + DH_LOG_ERR(MODULE_MPF, "error: zxdh_pcie_cfg len not enough\n"); + } +} + +static int pcie_zte_zf_get_dev_cfg(struct dh_core_dev *core_dev) +{ + int ret = 0; + u8 ep_idx = 0, pf_idx = 0; + struct dpu_pf_cfg dpu_pf[PCIE_DPU_EP_NUM][PCIE_DPU_PF_NUMS] = { 0 }; + + memset(zxdh_pcie_cfg, 0, sizeof(zxdh_pcie_cfg)); + + for (ep_idx = 0; ep_idx < PCIE_DPU_EP_NUM; ep_idx++) { + for (pf_idx = 0; pf_idx < PCIE_DPU_PF_NUMS; pf_idx++) { + // get info + dpu_pf[ep_idx][pf_idx].ep_id = ep_idx; + dpu_pf[ep_idx][pf_idx].pf_id = pf_idx; + ret = pcie_zte_zf_get_zxdh_info( + core_dev, &dpu_pf[ep_idx][pf_idx]); + if (ret) { + DH_LOG_ERR(MODULE_MPF, + "ep%d pf%d get info err!!!\n", + ep_idx, pf_idx); + pcie_zte_zf_write_pf_info( + &dpu_pf[ep_idx][pf_idx], ZF_DISABLE); + return ret; + } + // write zxdh_pcie_cfg + pcie_zte_zf_write_pf_info(&dpu_pf[ep_idx][pf_idx], + ZF_ENABLE); + } + } + + return ret; +} + +void delete_folder(char *path) +{ + int ret = 0; + struct path lookup_path; + struct dentry *dentry = NULL; + struct inode *dir = NULL; + + ret = kern_path(path, LOOKUP_DIRECTORY, &lookup_path); + if (ret) { + return; + } + + dentry = lookup_path.dentry; + dir = dentry->d_inode; + + ret = vfs_rmdir(dir, dentry); + if (ret) { + DH_LOG_ERR(MODULE_MPF, "Failed to remove directory: %s\n", + path); + } + + dput(dentry); +} + +int pcie_zte_zf_cfg_file_init(struct dh_core_dev *core_dev) +{ + int ret = 0; + char path[] = ZXDH_SYSFS_PATH; + + delete_folder(path); + + kobj_zxdh_cfg = kobject_create_and_add(ZXDH_SYSFS_DIR, NULL); + if (kobj_zxdh_cfg == NULL) { + DH_LOG_ERR(MODULE_MPF, "zxdh_cfg sysfs create failed\n"); + return -ENOMEM; + } + + if (sysfs_create_file(kobj_zxdh_cfg, &zxdh_cfg.attr)) { + DH_LOG_ERR(MODULE_MPF, "zxdh_cfg file create failed.\n"); + goto error_sysfs; + } + + dh_core_dev = core_dev; + if (pcie_zte_zf_get_dev_cfg(dh_core_dev)) { + DH_LOG_ERR(MODULE_MPF, "pcie_zte_zf_get_dev_cfg failed.\n"); + } + + return ret; +error_sysfs: + kobject_put(kobj_zxdh_cfg); + kobj_zxdh_cfg = NULL; + return -ENOENT; +} + +void pcie_zte_zf_cfg_file_exit(void) +{ + sysfs_remove_file(kernel_kobj, &zxdh_cfg.attr); + kobject_put(kobj_zxdh_cfg); + dh_core_dev = NULL; + DH_LOG_INFO(MODULE_MPF, "PCIe device to JSON driver cleanup\n"); +} \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/zf_mpf/epc/pcie-zte-zf-json.h b/drivers/net/ethernet/dinghai/zf_mpf/epc/pcie-zte-zf-json.h new file mode 100644 index 000000000000..9b2a197bf4d0 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/epc/pcie-zte-zf-json.h @@ -0,0 +1,40 @@ +#ifndef __PCIE_ZTE_ZF_JSON_H +#define __PCIE_ZTE_ZF_JSON_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../zf_mpf.h" +#include "pcie-zte-zf-epc.h" + +#ifdef _cplusplus +extern "C" { +#endif + +#define ZXDH_SYSFS_DIR "zxdh_sysfs" +#define ZXDH_SYSFS_PATH "/sys/zxdh_sysfs" +#define RECV_BUFFER_SIZE 30 +#define BAR_MSG_HEADER_SIZE 4 + +struct dpu_pf_cfg { + u8 ep_id; + u8 pf_id; + u8 pf_enable; + u8 dev_type; + u32 vendor_id; + u32 device_id; + u32 max_vf; +}; + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/zf_mpf/epc/virt-dma.c b/drivers/net/ethernet/dinghai/zf_mpf/epc/virt-dma.c new file mode 100644 index 000000000000..5844b8c52698 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/epc/virt-dma.c @@ -0,0 +1,137 @@ +#include +#include +#include +#include + +#include "virt-dma.h" + +static struct zxdh_virt_dma_desc * +zxdh_to_virt_desc(struct dma_async_tx_descriptor *tx) +{ + return container_of(tx, struct zxdh_virt_dma_desc, tx); +} + +dma_cookie_t zxdh_vchan_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct zxdh_virt_dma_chan *zxdh_vc = zxdh_to_virt_chan(tx->chan); + struct zxdh_virt_dma_desc *zxdh_vd = zxdh_to_virt_desc(tx); + unsigned long flags; + dma_cookie_t cookie; + + spin_lock_irqsave(&zxdh_vc->lock, flags); + cookie = zxdh_dma_cookie_assign(tx); + + list_move_tail(&zxdh_vd->node, &zxdh_vc->desc_submitted); + spin_unlock_irqrestore(&zxdh_vc->lock, flags); + + DH_LOG_DEBUG(MODULE_MPF, "%s vchan %p: txd %p[%x]: submitted\n", + __func__, zxdh_vc, zxdh_vd, cookie); + + return cookie; +} +EXPORT_SYMBOL_GPL(zxdh_vchan_tx_submit); + +/** + * zxdh_vchan_tx_desc_free - free a reusable descriptor + * @tx: the transfer + * + * This function frees a previously allocated reusable descriptor. The only + * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the + * transfer. + * + * Returns 0 upon success + */ +int zxdh_vchan_tx_desc_free(struct dma_async_tx_descriptor *tx) +{ + struct zxdh_virt_dma_chan *zxdh_vc = zxdh_to_virt_chan(tx->chan); + struct zxdh_virt_dma_desc *zxdh_vd = zxdh_to_virt_desc(tx); + unsigned long flags; + + spin_lock_irqsave(&zxdh_vc->lock, flags); + list_del(&zxdh_vd->node); + spin_unlock_irqrestore(&zxdh_vc->lock, flags); + + DH_LOG_DEBUG(MODULE_MPF, "%s vchan %p: txd %p[%x]: freeing\n", __func__, + zxdh_vc, zxdh_vd, zxdh_vd->tx.cookie); + + zxdh_vc->desc_free(zxdh_vd); + return 0; +} +EXPORT_SYMBOL_GPL(zxdh_vchan_tx_desc_free); + +struct zxdh_virt_dma_desc * +zxdh_vchan_find_desc(struct zxdh_virt_dma_chan *zxdh_vc, dma_cookie_t cookie) +{ + struct zxdh_virt_dma_desc *zxdh_vd; + + list_for_each_entry(zxdh_vd, &zxdh_vc->desc_issued, node) + if (zxdh_vd->tx.cookie == cookie) + return zxdh_vd; + + return NULL; +} +EXPORT_SYMBOL_GPL(zxdh_vchan_find_desc); + +/* + * This tasklet handles the completion of a DMA descriptor by + * calling its callback and freeing it. + */ +static void zxdh_vchan_complete(struct tasklet_struct *t) +{ + struct zxdh_virt_dma_chan *zxdh_vc = from_tasklet(zxdh_vc, t, task); + struct zxdh_virt_dma_desc *zxdh_vd, *_vd; + struct zxdh_dmaengine_desc_callback cb; + LIST_HEAD(head); + + spin_lock_irq(&zxdh_vc->lock); + list_splice_tail_init(&zxdh_vc->desc_completed, &head); + zxdh_vd = zxdh_vc->cyclic; + if (zxdh_vd) { + zxdh_vc->cyclic = NULL; + zxdh_dmaengine_desc_get_callback(&zxdh_vd->tx, &cb); + } else { + memset(&cb, 0, sizeof(cb)); + } + spin_unlock_irq(&zxdh_vc->lock); + + zxdh_dmaengine_desc_callback_invoke(&cb, &zxdh_vd->tx_result); + + list_for_each_entry_safe(zxdh_vd, _vd, &head, node) { + zxdh_dmaengine_desc_get_callback(&zxdh_vd->tx, &cb); + + list_del(&zxdh_vd->node); + zxdh_dmaengine_desc_callback_invoke(&cb, &zxdh_vd->tx_result); + zxdh_vchan_vdesc_fini(zxdh_vd); + } +} + +void zxdh_vchan_dma_desc_free_list(struct zxdh_virt_dma_chan *zxdh_vc, + struct list_head *head) +{ + struct zxdh_virt_dma_desc *zxdh_vd, *_vd; + + list_for_each_entry_safe(zxdh_vd, _vd, head, node) { + list_del(&zxdh_vd->node); + zxdh_vchan_vdesc_fini(zxdh_vd); + } +} +EXPORT_SYMBOL_GPL(zxdh_vchan_dma_desc_free_list); + +void zxdh_vchan_init(struct zxdh_virt_dma_chan *zxdh_vc, + struct dma_device *dmadev) +{ + zxdh_dma_cookie_init(&zxdh_vc->chan); + + spin_lock_init(&zxdh_vc->lock); + INIT_LIST_HEAD(&zxdh_vc->desc_allocated); + INIT_LIST_HEAD(&zxdh_vc->desc_submitted); + INIT_LIST_HEAD(&zxdh_vc->desc_issued); + INIT_LIST_HEAD(&zxdh_vc->desc_completed); + INIT_LIST_HEAD(&zxdh_vc->desc_terminated); + + tasklet_setup(&zxdh_vc->task, zxdh_vchan_complete); + + zxdh_vc->chan.device = dmadev; + list_add_tail(&zxdh_vc->chan.device_node, &dmadev->channels); +} +EXPORT_SYMBOL_GPL(zxdh_vchan_init); diff --git a/drivers/net/ethernet/dinghai/zf_mpf/epc/virt-dma.h b/drivers/net/ethernet/dinghai/zf_mpf/epc/virt-dma.h new file mode 100644 index 000000000000..5fdf5700efbc --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/epc/virt-dma.h @@ -0,0 +1,236 @@ +#ifndef ZXDH_VIRT_DMA_H +#define ZXDH_VIRT_DMA_H + +#include +#include +#include +#include "dmaengine.h" + +struct zxdh_virt_dma_desc { + struct dma_async_tx_descriptor tx; + struct dmaengine_result tx_result; + /* protected by zxdh_vc.lock */ + struct list_head node; +}; + +struct zxdh_virt_dma_chan { + struct dma_chan chan; + struct tasklet_struct task; + void (*desc_free)(struct zxdh_virt_dma_desc *); + + spinlock_t lock; + + /* protected by zxdh_vc.lock */ + struct list_head desc_allocated; + struct list_head desc_submitted; + struct list_head desc_issued; + struct list_head desc_completed; + struct list_head desc_terminated; + + struct zxdh_virt_dma_desc *cyclic; +}; + +static inline struct zxdh_virt_dma_chan * +zxdh_to_virt_chan(struct dma_chan *chan) +{ + return container_of(chan, struct zxdh_virt_dma_chan, chan); +} + +void zxdh_vchan_dma_desc_free_list(struct zxdh_virt_dma_chan *zxdh_vc, + struct list_head *head); +void zxdh_vchan_init(struct zxdh_virt_dma_chan *zxdh_vc, + struct dma_device *dmadev); +struct zxdh_virt_dma_desc *zxdh_vchan_find_desc(struct zxdh_virt_dma_chan *, + dma_cookie_t); +extern dma_cookie_t zxdh_vchan_tx_submit(struct dma_async_tx_descriptor *); +extern int zxdh_vchan_tx_desc_free(struct dma_async_tx_descriptor *); + +/** + * zxdh_vchan_tx_prep - prepare a descriptor + * @zxdh_vc: virtual channel allocating this descriptor + * @zxdh_vd: virtual descriptor to prepare + * @tx_flags: flags argument passed in to prepare function + */ +static inline struct dma_async_tx_descriptor * +zxdh_vchan_tx_prep(struct zxdh_virt_dma_chan *zxdh_vc, + struct zxdh_virt_dma_desc *zxdh_vd, unsigned long tx_flags) +{ + unsigned long flags; + + dma_async_tx_descriptor_init(&zxdh_vd->tx, &zxdh_vc->chan); + zxdh_vd->tx.flags = tx_flags; + zxdh_vd->tx.tx_submit = zxdh_vchan_tx_submit; + zxdh_vd->tx.desc_free = zxdh_vchan_tx_desc_free; + + zxdh_vd->tx_result.result = DMA_TRANS_NOERROR; + zxdh_vd->tx_result.residue = 0; + + spin_lock_irqsave(&zxdh_vc->lock, flags); + list_add_tail(&zxdh_vd->node, &zxdh_vc->desc_allocated); + spin_unlock_irqrestore(&zxdh_vc->lock, flags); + + return &zxdh_vd->tx; +} + +/** + * zxdh_vchan_issue_pending - move submitted descriptors to issued list + * @zxdh_vc: virtual channel to update + * + * zxdh_vc.lock must be held by caller + */ +static inline bool zxdh_vchan_issue_pending(struct zxdh_virt_dma_chan *zxdh_vc) +{ + list_splice_tail_init(&zxdh_vc->desc_submitted, &zxdh_vc->desc_issued); + return !list_empty(&zxdh_vc->desc_issued); +} + +/** + * zxdh_vchan_cookie_complete - report completion of a descriptor + * @zxdh_vd: virtual descriptor to update + * + * zxdh_vc.lock must be held by caller + */ +static inline void +zxdh_vchan_cookie_complete(struct zxdh_virt_dma_desc *zxdh_vd) +{ + struct zxdh_virt_dma_chan *zxdh_vc = + zxdh_to_virt_chan(zxdh_vd->tx.chan); + dma_cookie_t cookie; + + cookie = zxdh_vd->tx.cookie; + zxdh_dma_cookie_complete(&zxdh_vd->tx); + dev_vdbg(zxdh_vc->chan.device->dev, "txd %p[%x]: marked complete\n", + zxdh_vd, cookie); + list_add_tail(&zxdh_vd->node, &zxdh_vc->desc_completed); + + tasklet_schedule(&zxdh_vc->task); +} + +/** + * zxdh_vchan_vdesc_fini - Free or reuse a descriptor + * @zxdh_vd: virtual descriptor to free/reuse + */ +static inline void zxdh_vchan_vdesc_fini(struct zxdh_virt_dma_desc *zxdh_vd) +{ + struct zxdh_virt_dma_chan *zxdh_vc = + zxdh_to_virt_chan(zxdh_vd->tx.chan); + + if (dmaengine_desc_test_reuse(&zxdh_vd->tx)) { + unsigned long flags; + + spin_lock_irqsave(&zxdh_vc->lock, flags); + list_add(&zxdh_vd->node, &zxdh_vc->desc_allocated); + spin_unlock_irqrestore(&zxdh_vc->lock, flags); + } else { + zxdh_vc->desc_free(zxdh_vd); + } +} + +/** + * zxdh_vchan_cyclic_callback - report the completion of a period + * @zxdh_vd: virtual descriptor + */ +static inline void +zxdh_vchan_cyclic_callback(struct zxdh_virt_dma_desc *zxdh_vd) +{ + struct zxdh_virt_dma_chan *zxdh_vc = + zxdh_to_virt_chan(zxdh_vd->tx.chan); + + zxdh_vc->cyclic = zxdh_vd; + tasklet_schedule(&zxdh_vc->task); +} + +/** + * zxdh_vchan_terminate_vdesc - Disable pending cyclic callback + * @zxdh_vd: virtual descriptor to be terminated + * + * zxdh_vc.lock must be held by caller + */ +static inline void +zxdh_vchan_terminate_vdesc(struct zxdh_virt_dma_desc *zxdh_vd) +{ + struct zxdh_virt_dma_chan *zxdh_vc = + zxdh_to_virt_chan(zxdh_vd->tx.chan); + + list_add_tail(&zxdh_vd->node, &zxdh_vc->desc_terminated); + + if (zxdh_vc->cyclic == zxdh_vd) + zxdh_vc->cyclic = NULL; +} + +/** + * zxdh_vchan_next_desc - peek at the next descriptor to be processed + * @zxdh_vc: virtual channel to obtain descriptor from + * + * zxdh_vc.lock must be held by caller + */ +static inline struct zxdh_virt_dma_desc * +zxdh_vchan_next_desc(struct zxdh_virt_dma_chan *zxdh_vc) +{ + return list_first_entry_or_null(&zxdh_vc->desc_issued, + struct zxdh_virt_dma_desc, node); +} + +/** + * zxdh_vchan_get_all_descriptors - obtain all submitted and issued descriptors + * @zxdh_vc: virtual channel to get descriptors from + * @head: list of descriptors found + * + * zxdh_vc.lock must be held by caller + * + * Removes all submitted and issued descriptors from internal lists, and + * provides a list of all descriptors found + */ +static inline void +zxdh_vchan_get_all_descriptors(struct zxdh_virt_dma_chan *zxdh_vc, + struct list_head *head) +{ + list_splice_tail_init(&zxdh_vc->desc_allocated, head); + list_splice_tail_init(&zxdh_vc->desc_submitted, head); + list_splice_tail_init(&zxdh_vc->desc_issued, head); + list_splice_tail_init(&zxdh_vc->desc_completed, head); + list_splice_tail_init(&zxdh_vc->desc_terminated, head); +} + +static inline void +zxdh_vchan_free_chan_resources(struct zxdh_virt_dma_chan *zxdh_vc) +{ + struct zxdh_virt_dma_desc *zxdh_vd; + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&zxdh_vc->lock, flags); + zxdh_vchan_get_all_descriptors(zxdh_vc, &head); + list_for_each_entry(zxdh_vd, &head, node) + dmaengine_desc_clear_reuse(&zxdh_vd->tx); + spin_unlock_irqrestore(&zxdh_vc->lock, flags); + + zxdh_vchan_dma_desc_free_list(zxdh_vc, &head); +} + +/** + * zxdh_vchan_synchronize() - synchronize callback execution to the current context + * @zxdh_vc: virtual channel to synchronize + * + * Makes sure that all scheduled or active callbacks have finished running. For + * proper operation the caller has to ensure that no new callbacks are scheduled + * after the invocation of this function started. + * Free up the terminated cyclic descriptor to prevent memory leakage. + */ +static inline void zxdh_vchan_synchronize(struct zxdh_virt_dma_chan *zxdh_vc) +{ + LIST_HEAD(head); + unsigned long flags; + + tasklet_kill(&zxdh_vc->task); + + spin_lock_irqsave(&zxdh_vc->lock, flags); + + list_splice_tail_init(&zxdh_vc->desc_terminated, &head); + + spin_unlock_irqrestore(&zxdh_vc->lock, flags); + + zxdh_vchan_dma_desc_free_list(zxdh_vc, &head); +} + +#endif diff --git a/drivers/net/ethernet/dinghai/zf_mpf/epf/pcie-zte-zf-epf.c b/drivers/net/ethernet/dinghai/zf_mpf/epf/pcie-zte-zf-epf.c new file mode 100644 index 000000000000..933b7e3528c4 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/epf/pcie-zte-zf-epf.c @@ -0,0 +1,991 @@ +#include "pcie-zte-zf-epf.h" +#include "./../epc/pcie-zte-zf-epc.h" + +#define EPF_MDEV_OPS 1 +#define MDEV_FOPS 1 + +extern void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, + enum pci_barno bar, size_t align); +extern void pci_epf_free_space(struct pci_epf *epf, void *addr, + enum pci_barno bar); +extern int ep_virtio_module_set(int ep_id, int pf_idx, int en); +extern int pcie_zte_epc_ob_read(struct pci_epc *epc, phys_addr_t phys_addr, + unsigned int size, unsigned int *val); + +#if EPF_MDEV_OPS +static int pci_epf_dev_set_pf_bar(struct pci_epf *epf) +{ + int bar_no = 0, add = 0; + int ret = 0; + struct pci_epf_bar *epf_bar = NULL; + struct pci_epc *epc = epf->epc; + struct device *dev = &epf->dev; + struct pci_epf_mdev_dev *epf_mdev_dev = epf_get_drvdata(epf); + const struct pci_epc_features *epc_features = NULL; + + epc_features = epf_mdev_dev->epc_features; + + for (bar_no = 0; bar_no < PCI_STD_NUM_BARS; bar_no += add) { + epf_bar = &epf->bar[bar_no]; + epf_bar->flags |= BIT(3); + add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1; + if (!!(epc_features->reserved_bar & (1 << bar_no))) + continue; + + ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, + epf_bar); + if (ret) { + dev_err(dev, "Failed to set BAR%d\n", bar_no); + } + } + + // set oprom bar + if (epf->header->pf_rom_size != 0) { + epf->bar[BAR_ROM].phys_addr = + page_to_phys(epf->header->pf_rom_page); + ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, + &epf->bar[BAR_ROM]); + if (ret) { + dev_err(dev, "Failed to set BAR_ROM\n"); + } + } + + return 0; +} + +static int pci_epf_dev_set_vf_bar(struct pci_epf *epf) +{ + int bar_no = 0, add = 0; + int ret = 0; + struct pci_epf_bar *epf_bar; + struct pci_epc *epc = epf->epc; + struct device *dev = &epf->dev; + struct pci_epf_mdev_dev *epf_mdev_dev = epf_get_drvdata(epf); + const struct pci_epc_features *epc_features; + + epc_features = epf_mdev_dev->epc_features; + + for (bar_no = 0; bar_no < PCI_STD_NUM_BARS; bar_no += add) { + epf_bar = &epf->epf_pf->vf_bar[bar_no]; + epf_bar->flags |= BIT(3); + add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1; + if (!!(epc_features->reserved_bar & (1 << bar_no))) + continue; + + ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, + epf_bar); + if (ret) { + dev_err(dev, "Failed to set BAR%d\n", bar_no); + } + } + return 0; +} + +// 根据epf来配置epc的bar、header、msi、msi-x +static int pci_epf_dev_core_init(struct pci_epf *epf) +{ + const struct pci_epc_features *epc_features; + struct device *dev = &epf->dev; + bool msix_capable = false; + int ret = 0; + + epc_features = + pci_epc_get_features(epf->epc, epf->func_no, epf->vfunc_no); + if (epc_features) { + msix_capable = epc_features->msix_capable; + } + + ret = pci_epc_write_header(epf->epc, epf->func_no, epf->vfunc_no, + epf->header); + if (ret) { + dev_err(dev, "Configuration header write failed\n"); + return ret; + } + + if (isPF(epf->func_no)) { + ret = pci_epf_dev_set_pf_bar(epf); + } else if (epf->vfunc_no == 0) { + ret = pci_epf_dev_set_vf_bar(epf); + } + + return ret; +} + +static int get_vf_number(struct pci_epf *epf) +{ + struct pcie_dpu_ep *dpu_ep = epc_get_drvdata(epf->epc); + int vf_total_num = 0; + + if (!dpu_ep) { + DH_LOG_ERR(MODULE_MPF, "epc is NULL!!!\n"); + return -EINVAL; + } + + vf_total_num = epf->func_no & PCIE_DPU_EP_GET_PF_NO; + if (vf_total_num >= PCIE_DPU_PF_NUMS) { + DH_LOG_ERR(MODULE_MPF, "error vf_total_num=%d\n", vf_total_num); + return -EINVAL; + } + return dpu_ep->vf_total_num[vf_total_num]; +} + +// 填充pci_epf_bar结构体内容 +static int alloc_bar_space(struct pci_epf *epf) +{ + u16 bar_no = 0; + int vf_num = 0; + struct pci_epf_mdev_dev *epf_mdev_dev = NULL; + + if (!epf) { + DH_LOG_ERR(MODULE_MPF, "epf is NULL!\n"); + return -EINVAL; + } + if (!epf->epc) { + DH_LOG_ERR(MODULE_MPF, "epf->epc is NULL!\n"); + return -EINVAL; + } + + if (!epf->epc->ops->get_max_vfs) { + DH_LOG_ERR(MODULE_MPF, "get_max_vfs is NULL!\n"); + return -EINVAL; + } + + epf_mdev_dev = epf_get_drvdata(epf); + vf_num = get_vf_number(epf); + + if (isPF(epf->func_no)) { + epf->bar[BAR_0].size = epf->header->pf_bar0_size; + epf->bar[BAR_2].size = epf->header->pf_bar2_size; + epf->bar[BAR_4].size = epf->header->pf_bar4_size; + } else { + epf->epf_pf->vf_bar[4].size = epf->epf_pf->header->vf_bar4_size; + } + + // 申请物理空间 + for (bar_no = 0; bar_no < PCI_STD_NUM_BARS - 2; bar_no += 2) { + epf->bar[bar_no].addr = pci_epf_alloc_space( + epf, epf->bar[bar_no].size, bar_no, GFP_KERNEL); + if (!epf->bar[bar_no].addr) { + DH_LOG_ERR(MODULE_MPF, "Don't have enough memory!\n"); + return -ENOMEM; + } + } + + for (bar_no = 0; bar_no < PCI_STD_NUM_BARS; bar_no += 2) { + // PF的信息获取 + epf->bar[bar_no].barno = bar_no; + epf->bar[bar_no].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; + if (isPF(epf->func_no)) { + // PF下的VF的信息获取 + epf->vf_bar[bar_no].barno = bar_no; + epf->vf_bar[bar_no].flags |= + PCI_BASE_ADDRESS_MEM_TYPE_64; + } + } + + return 0; +} + +static void pci_epf_mdev_unbind(struct pci_epf *epf) +{ + int bar_no = 0; + struct pci_epc *epc = epf->epc; + + DH_LOG_ERR(MODULE_MPF, "pf = 0x%x, vf = 0x%x\n", epf->func_no, + epf->vfunc_no); + if (epf->vfunc_no == 0) { + for (bar_no = 0; bar_no < PCI_STD_NUM_BARS - 2; bar_no += 2) { + pci_epf_free_space(epf, epf->bar[bar_no].addr, bar_no); + } + } + + if (isPF(epf->func_no)) { + clear_bit(epf->func_no, &epc->function_num_map); + } +} + +static int pci_epf_mdev_bind(struct pci_epf *epf) +{ + int ret = 0; + struct pci_epf_mdev_dev *epf_mdev_dev = epf_get_drvdata(epf); + const struct pci_epc_features *epc_features = NULL; + struct pci_epc *epc = epf->epc; + bool linkup_notifier = false; + bool core_init_notifier = false; + + if (!epc) { + DH_LOG_ERR(MODULE_MPF, "epc is NULL!!!\n"); + return -EINVAL; + } + + if (isPF(epf->func_no)) { + epf->is_vf = PCI_EPF_SRIOV_PF; + } else { + epf->is_vf = PCI_EPF_SRIOV_VF; + } + + ret = alloc_bar_space(epf); + if (ret) { + DH_LOG_ERR(MODULE_MPF, "Alloc bar sapce error!\n"); + return ret; + } + + epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no); + if (!epc_features) { + dev_err(&epf->dev, "epc_features not implemented\n"); + return -EOPNOTSUPP; + } + + linkup_notifier = epc_features->linkup_notifier; + core_init_notifier = epc_features->core_init_notifier; + epf_mdev_dev->epc_features = epc_features; + if (!core_init_notifier) { + ret = pci_epf_dev_core_init(epf); + if (ret) { + return ret; + } + } + + return 0; +} + +static const struct pci_epf_device_id pci_epf_dev_ids[] = { + { + .name = "pci-epf-mdev", + }, + {}, +}; + +static struct pci_epf_ops epf_mdev_ops = { + .unbind = pci_epf_mdev_unbind, + .bind = pci_epf_mdev_bind, +}; +#endif + +#if MDEV_FOPS +// sample_pci_mdev_dev_show +static ssize_t sample_pci_mdev_dev_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "This is pci-epf-mdev device\n"); +} +static DEVICE_ATTR_RO(sample_pci_mdev_dev); + +static struct attribute *pci_mdev_dev_attrs[] = { + &dev_attr_sample_pci_mdev_dev.attr, + NULL, +}; + +static const struct attribute_group pci_mdev_dev_group = { + .name = "pcie_mdev_dev", + .attrs = pci_mdev_dev_attrs, +}; + +static const struct attribute_group *pci_mdev_dev_groups[] = { + &pci_mdev_dev_group, + NULL, +}; + +// sample_mdev_dev_show +static ssize_t sample_mdev_dev_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + if (mdev_from_dev(dev)) + return sprintf(buf, "This is MDEV %s\n", dev_name(dev)); + + return sprintf(buf, "\n"); +} + +static DEVICE_ATTR_RO(sample_mdev_dev); + +static struct attribute *mdev_dev_attrs[] = { + &dev_attr_sample_mdev_dev.attr, + NULL, +}; + +static const struct attribute_group mdev_dev_group = { + .name = "vendor", + .attrs = mdev_dev_attrs, +}; + +static const struct attribute_group *mdev_dev_groups[] = { + &mdev_dev_group, + NULL, +}; + +// name_show +++ device_api_show +static ssize_t name_show(struct mdev_type *mtype, + struct mdev_type_attribute *attr, char *buf) +{ + return sprintf(buf, "%s\n", "ZTE epf mdev"); +} +static MDEV_TYPE_ATTR_RO(name); + +static ssize_t device_api_show(struct mdev_type *mtype, + struct mdev_type_attribute *attr, char *buf) +{ + return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING); +} + +static MDEV_TYPE_ATTR_RO(device_api); + +static struct attribute *mdev_types_attrs[] = { + &mdev_type_attr_name.attr, + &mdev_type_attr_device_api.attr, + NULL, +}; + +static struct attribute_group mdev_type_group = { + .name = "single", + .attrs = mdev_types_attrs, +}; + +static struct attribute_group *mdev_type_groups[] = { + &mdev_type_group, + NULL, +}; + +static int pci_mdev_create(struct mdev_device *mdev) +{ + struct mdev_state *mdev_state = NULL; + struct device *dev = mdev->type->parent->dev; + struct pci_epf *epf = container_of(dev, struct pci_epf, dev); + struct pci_epf_mdev_dev *epf_mdev_dev = epf_get_drvdata(epf); + int ret = 0; + + if (epf_mdev_dev->created_flag == 1) { + DH_LOG_ERR(MODULE_MPF, "This mdev has been created!!!\n"); + return -EPERM; + } + + mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL); + if (!mdev_state) { + return -ENOMEM; + } + + mdev_state->irq_index = -1; + mdev_state->epf_mdev_dev = epf_mdev_dev; + mdev_state->mdev = mdev; + mutex_init(&mdev_state->ops_lock); + mutex_init(&mdev_state->ioeventfds_lock); + INIT_LIST_HEAD(&mdev_state->ioeventfds_list); + + mdev_set_drvdata(mdev, mdev_state); + + epf_mdev_dev->created_flag = 1; + + return ret; +} + +static int pci_mdev_remove(struct mdev_device *mdev) +{ + int ret = 0; + // int bar_no = 0; + struct mdev_state *mdev_state = NULL; + struct epf_mdev_ioeventfd *ioeventfd = NULL, *temp = NULL; + struct device *dev = NULL; + struct pci_epf *epf = NULL; + struct pci_epf_mdev_dev *epf_mdev_dev = NULL; + + mdev_state = mdev_get_drvdata(mdev); + if (!mdev_state) { + DH_LOG_ERR(MODULE_MPF, "mdev_state NULL\n"); + return 0; + } + dev = mdev->type->parent->dev; + if (!dev) { + DH_LOG_ERR(MODULE_MPF, "dev NULL\n"); + return 0; + } + epf = container_of(dev, struct pci_epf, dev); + epf_mdev_dev = epf_get_drvdata(epf); + if (epf_mdev_dev == NULL) { + DH_LOG_ERR(MODULE_MPF, "epf_mdev_dev NULL\n"); + return 0; + } + + mutex_lock(&mdev_state->ioeventfds_lock); + list_for_each_entry_safe(ioeventfd, temp, &mdev_state->ioeventfds_list, + next) { + vfio_virqfd_disable(&ioeventfd->virqfd); + list_del(&ioeventfd->next); + mdev_state->ioeventfds_nr--; + kfree(ioeventfd); + } + mutex_unlock(&mdev_state->ioeventfds_lock); + mutex_destroy(&mdev_state->ioeventfds_lock); + mutex_destroy(&mdev_state->ops_lock); + kfree(mdev_state); // todo LJP + + epf_mdev_dev->created_flag = 0; + + return ret; +} + +static int pci_mdev_open(struct mdev_device *mdev) +{ + return 0; +} + +static void pci_mdev_close(struct mdev_device *mdev) +{ +} + +static ssize_t pci_mdev_read(struct mdev_device *mdev, char __user *buf, + size_t count, loff_t *ppos) +{ + int ret = 0; + + return ret; +} + +static ssize_t pci_mdev_write(struct mdev_device *mdev, const char __user *buf, + size_t count, loff_t *ppos) +{ + int ret = 0; + struct mdev_state *mdev_state = mdev_get_drvdata(mdev); + + if (!mdev_state) { + DH_LOG_ERR(MODULE_MPF, "mdev_state is NULL!!!\n"); + return -EINVAL; + } + + return ret; +} + +static int pci_mdev_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) +{ + struct device *dev = mdev->type->parent->dev; + struct pci_epf *epf = container_of(dev, struct pci_epf, dev); + struct pci_epc *epc = epf->epc; + // struct pci_epf_mdev_dev *epf_mdev_dev = epf_get_drvdata(epf); + int ret = 0; + int bar_no = vma->vm_pgoff; + + if (!epc) { + DH_LOG_ERR(MODULE_MPF, "epc is NULL!!!\n"); + } + + if (vma->vm_end < vma->vm_start) { + goto parameter_error; + } + + if (vma->vm_end - vma->vm_start > epf->bar[bar_no].size) { + goto parameter_error; + } + + if ((vma->vm_flags & VM_SHARED) == 0) { + goto parameter_error; + } + + ret = remap_vmalloc_range_partial(vma, vma->vm_start, + epf->bar[bar_no].addr, 0, + vma->vm_end - vma->vm_start); + + if (ret) { + DH_LOG_ERR(MODULE_MPF, + "remap_vmalloc_range_partial error!!!\n"); + } + + return ret; + +parameter_error: + DH_LOG_ERR(MODULE_MPF, "vm area parameter error!!!\n"); + return -EINVAL; +} + +#if 1 // ioctl +static int zte_get_device_info(struct mdev_device *mdev, + struct vfio_device_info *dev_info) +{ + dev_info->flags = VFIO_DEVICE_FLAGS_PCI; + dev_info->num_regions = VFIO_PCI_NUM_REGIONS; + dev_info->num_irqs = VFIO_PCI_NUM_IRQS; + + return 0; +} + +static int zte_get_region_info(struct mdev_device *mdev, + struct vfio_region_info *region_info) +{ + struct mdev_state *mdev_state = NULL; + struct device *dev = mdev->type->parent->dev; + struct pci_epf *epf = container_of(dev, struct pci_epf, dev); + int bar_no = 0; + int bar_size = 0; + + mdev_state = mdev_get_drvdata(mdev); + if (!mdev_state) { + return -EINVAL; + } + + bar_no = region_info->index; + if (bar_no >= VFIO_PCI_BAR5_REGION_INDEX) { + DH_LOG_ERR(MODULE_MPF, "bar index invaild!\n"); + return -EINVAL; + } + + mutex_lock(&mdev_state->ops_lock); + + if (bar_no == VFIO_PCI_BAR0_REGION_INDEX || + bar_no == VFIO_PCI_BAR2_REGION_INDEX || + bar_no == VFIO_PCI_BAR4_REGION_INDEX) { + bar_size = epf->bar[bar_no].size; + } else { + DH_LOG_ERR(MODULE_MPF, "bar index invaild!\n"); + return -EINVAL; + } + + mdev_state->region_info[bar_no].size = bar_size; + + region_info->offset = (bar_no << 12); + region_info->size = bar_size; + region_info->flags = VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE; + mutex_unlock(&mdev_state->ops_lock); + return 0; +} + +static void __attribute__((unused)) +epf_mdev_ioeventfd_thread(void *opaque, void *unused) +{ + // 打桩 +} + +int epf_mdev_ioeventfd_handler(void *opaque, void *unused) +{ + int ret = 0; + struct epf_mdev_ioeventfd *ioeventfd = opaque; + struct mdev_state *mdev_state = ioeventfd->mdev_state; + struct pci_epf_mdev_dev *epf_mdev_dev = mdev_state->epf_mdev_dev; + struct pci_epf *epf = epf_mdev_dev->epf; + struct pci_epc *epc = epf->epc; + enum pci_epc_irq_type irq_type = PCI_EPC_IRQ_MSIX; + + pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, irq_type, + ioeventfd->offset + 1); + return ret; +} + +#if 0 +int epf_mdev_ioeventfd_func(struct mdev_state *mdev_state, loff_t offset, u64 data, int count, int fd) +{ + int ret = 0; + struct epf_mdev_ioeventfd *ioeventfd = NULL, *temp = NULL; + +#ifndef iowrite64 + if (count == 8) + return -EINVAL; +#endif + + mutex_lock(&mdev_state->ioeventfds_lock); + list_for_each_entry_safe(ioeventfd, temp, &mdev_state->ioeventfds_list, next) { + if (ioeventfd->offset == offset) { + if (fd == -1 || data == 0) { + vfio_virqfd_disable(&ioeventfd->virqfd); + list_del(&ioeventfd->next); + mdev_state->ioeventfds_nr--; + kfree(ioeventfd); + ret = 0; + } else if (data == 1) { + vfio_virqfd_disable(&ioeventfd->virqfd); + list_del(&ioeventfd->next); + mdev_state->ioeventfds_nr--; + kfree(ioeventfd); + goto eventfds_set; + } else { + ret = -EEXIST; + } + + goto out_unlock; + } + } + + if (fd < 0) { + ret = -ENODEV; + goto out_unlock; + } + + if (mdev_state->ioeventfds_nr >= EPF_MDEV_IOEVENTFD_MAX) { + ret = -ENOSPC; + goto out_unlock; + } + +eventfds_set: + ioeventfd = kzalloc(sizeof(*ioeventfd), GFP_KERNEL); + if (!ioeventfd) { + ret = -ENOMEM; + goto out_unlock; + } + + ioeventfd->mdev_state = mdev_state; + ioeventfd->data = data; + ioeventfd->offset = offset; + ioeventfd->count = count; + + ret = vfio_virqfd_enable(ioeventfd, epf_mdev_ioeventfd_handler, + epf_mdev_ioeventfd_thread, NULL, + &ioeventfd->virqfd, fd); + if (ret) { + kfree(ioeventfd); + goto out_unlock; + } + + list_add(&ioeventfd->next, &mdev_state->ioeventfds_list); + mdev_state->ioeventfds_nr++; + +out_unlock: + mutex_unlock(&mdev_state->ioeventfds_lock); + + return ret; +} +#endif + +static int device_get_info(struct mdev_device *mdev, + struct mdev_state *mdev_state, unsigned long arg) +{ + unsigned long minsz = 0; + struct vfio_device_info info = { 0 }; + + minsz = offsetofend(struct vfio_device_info, num_irqs); + if (copy_from_user(&info, (void __user *)arg, minsz)) { + return -EFAULT; + } + + if (info.argsz < minsz) { + return -EINVAL; + } + + zte_get_device_info(mdev, &info); + + memcpy(&mdev_state->dev_info, &info, sizeof(info)); + if (copy_to_user((void __user *)arg, &info, minsz)) + return -EFAULT; + return 0; +} + +static int device_get_region_info(struct mdev_device *mdev, + struct mdev_state *mdev_state, + unsigned long arg) +{ + struct vfio_region_info info = { 0 }; + unsigned long minsz = 0; + int ret = 0; + + minsz = offsetofend(struct vfio_region_info, offset); + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + ret = zte_get_region_info(mdev, &info); + if (ret) + return ret; + + if (copy_to_user((void __user *)arg, &info, minsz)) { + return -EFAULT; + } + + return 0; +} + +static int vfio_device_ioeventfd(struct mdev_device *mdev, + struct mdev_state *mdev_state, + unsigned long arg) +{ + struct vfio_device_ioeventfd ioeventfd = { 0 }; + int ret = 0; + int count = 0; + unsigned long minsz = 0; + struct pci_epf_mdev_dev *epf_mdev_dev = mdev_state->epf_mdev_dev; + struct pci_epf *epf = epf_mdev_dev->epf; + + minsz = offsetofend(struct vfio_device_ioeventfd, fd); + if (copy_from_user(&ioeventfd, (void __user *)arg, minsz)) { + DH_LOG_ERR(MODULE_MPF, "copy from user failed! minsz=0x%lx\n", + minsz); + return -EFAULT; + } + + DH_LOG_INFO( + MODULE_MPF, + "func = 0x%x, vfunc_no = 0x%x, offset = 0x%llx, data = 0x%llx, minsz=0x%lx\n", + epf->func_no, epf->vfunc_no, ioeventfd.offset, ioeventfd.data, + minsz); + if (ioeventfd.argsz < minsz) { + return -EINVAL; + } + + if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK) { + return -EINVAL; + } + + count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK; + + if (hweight8(count) != 1 || ioeventfd.fd < -1) + return -EINVAL; + + // ret = epf_mdev_ioeventfd_func(mdev_state, (loff_t)ioeventfd.offset, (u64)ioeventfd.data, count, (int)ioeventfd.fd); + DH_LOG_ERR(MODULE_MPF, "error: epf_mdev_ioeventfd_func not called!\n"); + return ret; +} + +static int vfio_outbound_set(struct mdev_device *mdev, + struct mdev_state *mdev_state, unsigned long arg) +{ + int ret = 0; + struct pci_epf_mdev_dev *epf_mdev_dev = mdev_state->epf_mdev_dev; + struct pci_epf *epf = epf_mdev_dev->epf; + struct pci_epc *epc = epf->epc; + struct ioctl_ob_data ob = { 0 }; + void __iomem *dst_addr = NULL; + phys_addr_t dst_phys_addr = 0; + + if (copy_from_user(&ob, (void *)arg, sizeof(struct ioctl_ob_data))) { + DH_LOG_ERR(MODULE_MPF, "err:copy_from_user failed!\n"); + return -EFAULT; + } + DH_LOG_INFO(MODULE_MPF, "ob->host = 0x%llx, ob->size = 0x%lxs\n", + ob.host_addr, ob.size); + + dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, ob.size); + if (!dst_addr) { + DH_LOG_ERR(MODULE_MPF, + "Failed to allocate destination address\n"); + return -ENOMEM; + } + + ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr, + ob.host_addr, ob.size); + if (ret) { + DH_LOG_ERR(MODULE_MPF, "pci_epc_map_addr err!!!\n"); + return ret; + } + ob.dpu_vaddr = (unsigned long long)dst_addr; + ob.dpu_paddr = dst_phys_addr; + if (copy_to_user((void *)arg, &ob, sizeof(struct ioctl_ob_data))) { + DH_LOG_ERR(MODULE_MPF, "err:copy_to_user failed!\n"); + return -EFAULT; + } + + return 0; +} + +static int vfio_outbound_read(struct mdev_device *mdev, + struct mdev_state *mdev_state, unsigned long arg) +{ + struct pci_epf_mdev_dev *epf_mdev_dev = mdev_state->epf_mdev_dev; + struct pci_epf *epf = epf_mdev_dev->epf; + struct pci_epc *epc = epf->epc; + struct pci_ob_rw_data ob_rw_data = { 0 }; + + if (copy_from_user(&ob_rw_data, (void *)arg, + sizeof(struct pci_ob_rw_data))) { + DH_LOG_ERR(MODULE_MPF, "err:copy_from_user failed!\n"); + return -EFAULT; + } + DH_LOG_INFO(MODULE_MPF, + "ob_rw_data.phys_addr = 0x%llx, ob_rw_data.size = 0x%x\n", + ob_rw_data.phys_addr, ob_rw_data.size); + + pcie_zte_epc_ob_read(epc, (phys_addr_t)ob_rw_data.phys_addr, + ob_rw_data.size, &ob_rw_data.val); + + if (copy_to_user((void *)arg, &ob_rw_data, + sizeof(struct pci_ob_rw_data))) { + DH_LOG_ERR(MODULE_MPF, "err:copy_to_user failed!\n"); + return -EFAULT; + } + + return 0; +} + +static int vfio_outbound_clear(struct mdev_device *mdev, + struct mdev_state *mdev_state, unsigned long arg) +{ + struct pci_epf_mdev_dev *epf_mdev_dev = mdev_state->epf_mdev_dev; + struct pci_epf *epf = epf_mdev_dev->epf; + struct pci_epc *epc = epf->epc; + struct ioctl_ob_data ob = { 0 }; + + if (copy_from_user(&ob, (void *)arg, sizeof(struct ioctl_ob_data))) { + DH_LOG_ERR(MODULE_MPF, "err:copy_from_user failed!\n"); + return -EFAULT; + } + + pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, ob.dpu_vaddr); + pci_epc_mem_free_addr(epc, ob.dpu_paddr, (void *)ob.dpu_vaddr, ob.size); + + return 0; +} + +static int vfio_virtio_module_set(unsigned long arg) +{ + struct ioctl_virtio_data virtio_data = { 0 }; + + if (copy_from_user(&virtio_data, (void *)arg, + sizeof(struct ioctl_virtio_data))) { + DH_LOG_ERR(MODULE_MPF, "err:copy_from_user failed!\n"); + return -EFAULT; + } + + return ep_virtio_module_set(virtio_data.ep_id, virtio_data.pf_id, + virtio_data.en); +} + +static long pci_mdev_ioctl(struct mdev_device *mdev, unsigned int cmd, + unsigned long arg) +{ + int ret = 0; + struct mdev_state *mdev_state; + struct device *dev = NULL; + // struct pci_epf *epf = container_of(dev, struct pci_epf, dev); + // struct pci_epf_mdev_dev *epf_mdev_dev = epf_get_drvdata(epf); + + if (mdev == NULL) { + DH_LOG_ERR(MODULE_MPF, "err: mdev is NULL!\n"); + return -EINVAL; + } + dev = mdev->type->parent->dev; + + mdev_state = mdev_get_drvdata(mdev); + if (!mdev_state) + return -ENODEV; + + switch (cmd) { + case VFIO_DEVICE_GET_INFO: + return device_get_info(mdev, mdev_state, arg); + case VFIO_DEVICE_GET_REGION_INFO: + return device_get_region_info(mdev, mdev_state, arg); + case VFIO_DEVICE_IOEVENTFD: + return vfio_device_ioeventfd(mdev, mdev_state, arg); + case VFIO_OUTBOUND_SET: + return vfio_outbound_set(mdev, mdev_state, arg); + case VFIO_OUTBOUND_CLEAR: + return vfio_outbound_clear(mdev, mdev_state, arg); + case VFIO_POWER_RESET: + ep_power_reset(arg); + return 0; + case VFIO_VIRTIO_MODULE_SET: + return vfio_virtio_module_set(arg); + case VFIO_LINKUP: + return is_pcie_ep_link(arg); + case VFIO_OUTBOUND_READ: + return vfio_outbound_read(mdev, mdev_state, arg); + // case VFIO_EP4_LINKUP: + // return is_ep4_link_up(); + default: + DH_LOG_ERR(MODULE_MPF, "zte-pci-epf-mdev ioctl cmd error!\n"); + ret = -ENOTTY; + } + + return ret; +} +#endif + +static const struct mdev_parent_ops mdev_fops = { + .owner = THIS_MODULE, + .dev_attr_groups = pci_mdev_dev_groups, + .mdev_attr_groups = mdev_dev_groups, + .supported_type_groups = mdev_type_groups, + .create = pci_mdev_create, + .remove = pci_mdev_remove, + .open = pci_mdev_open, + .release = pci_mdev_close, + .read = pci_mdev_read, + .write = pci_mdev_write, + .mmap = pci_mdev_mmap, + .ioctl = pci_mdev_ioctl, +}; +#endif + +static int pci_epf_mdev_probe(struct pci_epf *epf) +{ + struct pci_epf_mdev_dev *epf_mdev_dev; + struct device *dev = &epf->dev; + int ret = 0; + + epf_mdev_dev = devm_kzalloc(dev, sizeof(*epf_mdev_dev), GFP_KERNEL); + if (!epf_mdev_dev) + return -ENOMEM; + + epf_mdev_dev->epf = epf; + epf->header = + devm_kzalloc(dev, sizeof(struct pci_epf_header), GFP_KERNEL); + if (!epf->header) { + ret = -ENOMEM; + goto err; + } + + epf_mdev_dev->created_flag = 0; + + // DMA一致性设置(未必有效,待查) + dev->coherent_dma_mask = ~((u64)0x0); + + epf_set_drvdata(epf, epf_mdev_dev); + + ret = mdev_register_device(dev, &mdev_fops); + if (ret) { + DH_LOG_ERR(MODULE_MPF, "mdev_register_device failed %d\n", ret); + goto err; + } + + return 0; + +err: + devm_kfree(dev, epf->header); + devm_kfree(dev, epf_mdev_dev); + return ret; +} + +static int pci_epf_remove(struct pci_epf *epf) +{ + struct device *dev = &epf->dev; + struct pci_epf_mdev_dev *epf_mdev_dev = epf_get_drvdata(epf); + + mdev_unregister_device(&epf->dev); + if (epf->header) { + devm_kfree(dev, epf->header); + } + if (epf_mdev_dev) { + devm_kfree(dev, epf_mdev_dev); + } + + return 0; +} + +static struct pci_epf_driver pci_epf_mdev_driver = { + .driver.name = "pci-epf-mdev", + .probe = pci_epf_mdev_probe, + .remove = pci_epf_remove, + .id_table = pci_epf_dev_ids, + .ops = &epf_mdev_ops, + .owner = THIS_MODULE, +}; + +static int __init pci_epf_mdev_init(void) +{ + int ret = 0; + + ret = pci_epf_register_driver(&pci_epf_mdev_driver); + if (ret) { + pr_err("Failed to register pci epf test driver --> %d\n", ret); + return ret; + } + + DH_LOG_ERR(MODULE_MPF, "zte_epf driver regist successful\n"); + return ret; +} +module_init(pci_epf_mdev_init); + +static void __exit pci_epf_mdev_exit(void) +{ + pci_epf_unregister_driver(&pci_epf_mdev_driver); +} +module_exit(pci_epf_mdev_exit); + +MODULE_DESCRIPTION("PCI EPF MDEV DRIVER"); +MODULE_AUTHOR("ZTE"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/dinghai/zf_mpf/epf/pcie-zte-zf-epf.h b/drivers/net/ethernet/dinghai/zf_mpf/epf/pcie-zte-zf-epf.h new file mode 100644 index 000000000000..d031e0068a8f --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/epf/pcie-zte-zf-epf.h @@ -0,0 +1,142 @@ +#ifndef __ZTE_EPF_H +#define __ZTE_EPF_H + +// #include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../epc/pcie-zte-zf-epc.h" + +#define EPF_MDEV_IOEVENTFD_MAX 20 + +/* ioctl cmd */ +#define VFIO_OUTBOUND_SET _IO(VFIO_TYPE, VFIO_BASE + 30) +#define VFIO_OUTBOUND_CLEAR _IO(VFIO_TYPE, VFIO_BASE + 31) +#define VFIO_POWER_RESET _IO(VFIO_TYPE, VFIO_BASE + 32) +#define VFIO_VIRTIO_MODULE_SET _IO(VFIO_TYPE, VFIO_BASE + 33) +#define VFIO_LINKUP _IO(VFIO_TYPE, VFIO_BASE + 34) +#define VFIO_OUTBOUND_READ _IO(VFIO_TYPE, VFIO_BASE + 35) +// #define VFIO_EP4_LINKUP _IO(VFIO_TYPE, VFIO_BASE + 35) + +extern int is_pcie_ep_link(int ep_id); +// extern int is_ep4_link_up(void); +extern void ep_power_reset(int ep_id); +extern int ep_virtio_module_set(int ep_id, int pf_id, int en); + +struct ioctl_virtio_data { + int ep_id; + int pf_id; + int en; +}; + +struct ioctl_ob_data { + unsigned long long dpu_paddr; + unsigned long long dpu_vaddr; + unsigned long long host_addr; + unsigned long size; +}; + +struct pci_epf_mdev_dev { + struct pci_epf *epf; + enum pci_barno epf_barno; + size_t msix_table_offset; + const struct pci_epc_features *epc_features; + int created_flag; // 0:Not created, 1:created + void *pf_bar_vaddr[PCI_STD_NUM_BARS + 1]; + void *vf_bar_vaddr[PCI_STD_NUM_BARS]; +}; + +struct epf_mdev_ioeventfd { + struct list_head next; + struct mdev_state *mdev_state; + struct virqfd *virqfd; + u64 data; + loff_t pos; + u64 offset; + int count; +}; + +struct mdev_region_info { + u64 start; + u64 phys_start; + u32 size; + u64 vfio_offset; + u32 argsz; +}; + +struct mdev_state { + int irq_fd; + struct eventfd_ctx *intx_evtfd; + struct eventfd_ctx *msi_evtfd; + struct eventfd_ctx *msix_evtfd; + int irq_index; + struct mutex ops_lock; + struct mdev_device *mdev; + struct mdev_region_info region_info[VFIO_PCI_NUM_REGIONS]; + u32 bar_mask[VFIO_PCI_NUM_REGIONS]; + struct list_head next; + struct vfio_device_info dev_info; + struct pci_epf_mdev_dev *epf_mdev_dev; + struct mutex ioeventfds_lock; + struct list_head ioeventfds_list; + int ioeventfds_nr; +}; + +// mdev_private struct +struct mdev_parent { + struct device *dev; + const struct mdev_parent_ops *ops; + struct kref ref; + struct list_head next; + struct kset *mdev_types_kset; + struct list_head type_list; + struct rw_semaphore unreg_sem; +}; + +struct mdev_type { + struct kobject kobj; + struct kobject *devices_kobj; + struct mdev_parent *parent; + struct list_head next; + unsigned int type_group_id; +}; +// mdev_private struct end + +struct pci_ob_rw_data { + unsigned long long phys_addr; + unsigned int size; + unsigned int val; +}; + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hotplug.c b/drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hotplug.c new file mode 100644 index 000000000000..9c4f11c6be57 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hotplug.c @@ -0,0 +1,228 @@ +#include +#include "fuc_hotplug.h" +#include "fuc_hotplug_commom.h" + +extern int zxdh_bar_chan_sync_msg_send(struct zxdh_pci_bar_msg *in, + struct zxdh_msg_recviver_mem *result); +extern int get_fuc_hp_ret(void); +extern int reset_fuc_hp_ret(void); +static int fuc_hotplug_trigger(u64 arg); +static int fuc_hotplug_get_pf_state(u64 arg); +static int ep_hotplug(u64 arg); + +static struct func_sel ioctl_func_sel[] = { + { FUC_HP_IOCTL_CMD0, fuc_hotplug_trigger }, + { FUC_HP_IOCTL_CMD1, fuc_hotplug_get_pf_state }, + { FUC_HP_IOCTL_CMD2, ep_hotplug }, +}; + +static int pcie_mt_send_msg(void *msg_info, u32 msg_size, void *resp_msg, + u32 resp_size) +{ + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + struct pci_dev *pdev = NULL; + void __iomem *bar_virt_addr = NULL; + u16 ret = 0, pcie_id = 0; + u64 bar_addr = 0; + u64 bar_len = 0; + + if (msg_info == NULL) { + DH_LOG_ERR(MODULE_FUC_HP, "The msg_info is NULL\n"); + return -EINVAL; + } + + pdev = pci_get_device(FUC_HP_VENDOR_ID, FUC_HP_DEVICE_ID, NULL); + if (pdev == NULL) { + DH_LOG_ERR(MODULE_FUC_HP, + "Can not find devices: deviceID %x, VendorID: %x\n", + FUC_HP_VENDOR_ID, FUC_HP_DEVICE_ID); + return -EINVAL; + } + + bar_addr = pci_resource_start(pdev, 0); + bar_len = pci_resource_len(pdev, 0); + DH_LOG_INFO(MODULE_FUC_HP, "bar_addr->0x%llx\n", bar_addr); + bar_virt_addr = ioremap(bar_addr, bar_len); + + /* 填充用户参数in */ + in.virt_addr = (u64)bar_virt_addr + + FUC_HP_BAR_MSG_OFFSET; /* 使用PF1的bar0偏移8k */ + in.payload_addr = msg_info; /* 消息静荷buffer地址 */ + in.payload_len = msg_size; /* 消息长度 */ + in.src = MSG_CHAN_END_PF; /* 从mpf通道下发 */ + in.dst = MSG_CHAN_END_RISC; /* 消息发到risc */ + in.event_id = FUC_HP_EVENT_ID; /* 调用PCIE的消息处理函数 */ + in.src_pcieid = pcie_id; + + result.buffer_len = + BUF_SIZE; /* 用户准备一个存放消息回复的buffer, buffer长度 */ + result.recv_buffer = + kmalloc(result.buffer_len, GFP_KERNEL); /* 消息回复buffer地址 */ + if (!result.recv_buffer) { + DH_LOG_ERR(MODULE_FUC_HP, "Failed to allocate recv_buffer\n"); + return -EINVAL; + } + + memset(result.recv_buffer, 0, result.buffer_len); + + ret = zxdh_bar_chan_sync_msg_send(&in, &result); /* 发送同步消息 */ + + iounmap(bar_virt_addr); + + /* 如果接口返回值不为0,则说明消息失败 */ + if (ret != 0) { + DH_LOG_ERR(MODULE_FUC_HP, "pcie send msg failed, ret:%d.\n", + ret); + goto exit; + } + + /* 如果消息发送成功, 从recv_buffer + 1的位置往后两字节取回复数据长度, recv_buffer + 4的位置开始取数据内容 */ + if (*((u8 *)(result.recv_buffer + 4)) == 0x1) { + memcpy(resp_msg, result.recv_buffer + 4, resp_size); + ret = 0; + } else { + DH_LOG_ERR(MODULE_FUC_HP, "pcie result failed!\n"); + ret = -EINVAL; + } + +exit: + kfree(result.recv_buffer); + result.recv_buffer = NULL; + return ret; +} + +static int fuc_hotplug_trigger(u64 arg) +{ + int ret = 0; + int resp_msg = 0; + struct fuc_hotplug_bar_msg fuc_hotplug_bar_msg = { 0 }; + + if (copy_from_user(&fuc_hotplug_bar_msg, (void __user *)arg, + sizeof(struct fuc_hotplug_bar_msg))) { + DH_LOG_ERR(MODULE_FUC_HP, "Can not copy from user\n"); + return -EFAULT; + } + + ret = pcie_mt_send_msg(&fuc_hotplug_bar_msg, + sizeof(struct fuc_hotplug_bar_msg), &resp_msg, + sizeof(int)); + if (ret != 0) { + DH_LOG_ERR(MODULE_FUC_HP, "send failed\n"); + fuc_hotplug_bar_msg.cpl_chk = FUC_HP_RET_FAILED; + } else { + fuc_hotplug_bar_msg.cpl_chk = FUC_HP_RET_FINISH; + } + + if (copy_to_user((void __user *)arg, &fuc_hotplug_bar_msg, + sizeof(struct fuc_hotplug_bar_msg))) { + DH_LOG_ERR(MODULE_FUC_HP, "Can not copy to user\n"); + ret = -EFAULT; + } + + return ret; +} + +static int fuc_hotplug_get_pf_state(u64 arg) +{ + int ret = FUC_HP_OK; + struct get_pf_state_resp get_pf_state_resp = { 0 }; + struct get_pf_state_info get_pf_state_info = { 0 }; + + if (copy_from_user(&get_pf_state_info, (void __user *)arg, + sizeof(struct get_pf_state_info))) { + DH_LOG_ERR(MODULE_FUC_HP, "Can not copy from user\n"); + return -EFAULT; + } + + ret = pcie_mt_send_msg(&get_pf_state_info, + sizeof(struct get_pf_state_info), + (void *)&get_pf_state_resp, + sizeof(struct get_pf_state_resp)); + if (ret) { + DH_LOG_ERR(MODULE_FUC_HP, "Remote test failed\n"); + goto finish; + } + + if (get_pf_state_info.ep_no >= MAX_FUCTION_HOTPLUG_EP_NUMS) { + DH_LOG_ERR(MODULE_FUC_HP, "Invalid ep_id\n"); + ret = -EINVAL; + goto finish; + } + + get_pf_state_info.cpl_chk = + (get_pf_state_resp.pf_state_of_ep[get_pf_state_info.ep_no] >> + get_pf_state_info.pf_no) & + 0x1; + if (copy_to_user((void __user *)arg, &get_pf_state_info, + sizeof(struct get_pf_state_info))) { + DH_LOG_ERR(MODULE_FUC_HP, "Can not copy to user\n"); + ret = -EFAULT; + } + + return ret; + +finish: + get_pf_state_info.cpl_chk = FUNCTION_INVALID_TYPE; + if (copy_to_user((void __user *)arg, &get_pf_state_info, + sizeof(struct get_pf_state_info))) { + DH_LOG_ERR(MODULE_FUC_HP, "Can not copy to user\n"); + ret = -EFAULT; + } + + return ret; +} + +static int ep_hotplug(u64 arg) +{ + int ret = FUC_HP_OK; + struct ep_hotplug_resp ep_hotplug_resp = { 0 }; + struct ep_hotplug_info ep_hotplug_info = { 0 }; + + if (copy_from_user(&ep_hotplug_info, (void __user *)arg, + sizeof(struct ep_hotplug_info))) { + DH_LOG_ERR(MODULE_FUC_HP, "Can not copy from user\n"); + return -EFAULT; + } + + ret = pcie_mt_send_msg(&ep_hotplug_info, sizeof(struct ep_hotplug_info), + (void *)&ep_hotplug_resp, + sizeof(struct ep_hotplug_resp)); + if (ret) { + DH_LOG_ERR(MODULE_FUC_HP, "Remote test failed\n"); + goto finish; + } + + ep_hotplug_info.cpl_chk = FUC_HP_RET_FINISH; + if (copy_to_user((void __user *)arg, &ep_hotplug_info, + sizeof(struct ep_hotplug_info))) { + DH_LOG_ERR(MODULE_FUC_HP, "Can not copy to user\n"); + ret = -EFAULT; + } + + return ret; + +finish: + ep_hotplug_info.cpl_chk = FUC_HP_RET_FAILED; + if (copy_to_user((void __user *)arg, &ep_hotplug_info, + sizeof(struct ep_hotplug_info))) { + DH_LOG_ERR(MODULE_FUC_HP, "Can not copy to user\n"); + ret = -EFAULT; + } + + return ret; +} + +long fuc_hp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + u32 i = 0; + u32 ioctl_func_nums = sizeof(ioctl_func_sel) / sizeof(struct func_sel); + + for (i = 0; i < ioctl_func_nums; i++) { + if (ioctl_func_sel[i].cmd == cmd) { + return ioctl_func_sel[i].ioctl_func(arg); + } + } + + return -EINVAL; +} diff --git a/drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hotplug.h b/drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hotplug.h new file mode 100644 index 000000000000..faca3f791c57 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hotplug.h @@ -0,0 +1,62 @@ +#ifndef _FUC_HOTPLUG_H_ +#define _FUC_HOTPLUG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define FUC_HP_EVENT_ID 44 +#define BUF_SIZE 0x1000 + +/* bar消息通道使用pf信息 */ +#define FUC_HP_BAR_MSG_OFFSET (0x2000) +#define FUC_HP_VENDOR_ID (0x1cf2) +#define FUC_HP_DEVICE_ID (0x8044) +#define FUC_HP_IOREMAP_SIZE (0x3000) + +#define FUC_HP_POLLING_SPAN 100 +#define FUC_HP_TIMEOUT_TH 3000 + +/* data type */ +typedef unsigned long long int u64; +typedef signed long long int s64; + +typedef unsigned int u32; +typedef signed int s32; + +typedef unsigned short int u16; +typedef signed short int s16; + +typedef unsigned char u8; +typedef signed char s8; + +struct func_sel { + unsigned int cmd; + int (*ioctl_func)(unsigned long long arg); +}; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hotplug_commom.h b/drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hotplug_commom.h new file mode 100644 index 000000000000..c6aa9499b7ef --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hotplug_commom.h @@ -0,0 +1,77 @@ +#ifndef __FUC_HOTPLUG_COMMOM_H +#define __FUC_HOTPLUG_COMMOM_H + +#define ARG_START_NO 3 +#define ARG_TYPE_NO 1 +#define FUC_HP_OK 0 +#define FUC_HP_FAILED -1 + +#define FUC_HP_IOCTL_TYPE '>' +#define FUC_HP_IOCTL_MAGIC 117 /* random */ +#define FUC_HP_IOCTL_CMD0 (_IO(FUC_HP_IOCTL_TYPE, FUC_HP_IOCTL_MAGIC + 0)) +#define FUC_HP_IOCTL_CMD1 (_IO(FUC_HP_IOCTL_TYPE, FUC_HP_IOCTL_MAGIC + 1)) +#define FUC_HP_IOCTL_CMD2 (_IO(FUC_HP_IOCTL_TYPE, FUC_HP_IOCTL_MAGIC + 2)) + +#define MIN_EP_ID 5 +#define MAX_FUCTION_HOTPLUG_EP_NUMS 4 + +#define FUC_HOTPLUG_MEMBER_NUMS 10 +struct fuc_hotplug_bar_msg { + unsigned int cmd; + unsigned int fuc_hotplug_info; + unsigned int timeout; + unsigned int cpl_chk; +}; + +#define FUC_HOTPLUG_TIMEOUT_NUMS 1 +struct fuc_hotplug_set_timeout { + unsigned int timeout; + unsigned int cpl_chk; +}; + +struct get_pf_state_info { + unsigned int cmd; + unsigned int ep_no; + unsigned int pf_no; + unsigned int cpl_chk; +}; + +struct get_pf_state_resp { + uint8_t check_cpl; + uint8_t pf_state_of_ep[MAX_FUCTION_HOTPLUG_EP_NUMS]; +}; + +#define EP_HOTPLUG_MEMBER_NUMS 4 +struct ep_hotplug_info { + unsigned int cmd; + unsigned int ops_type; + unsigned int ep_no; + unsigned int cpl_chk; +}; + +struct ep_hotplug_resp { + uint8_t check_cpl; +}; + +typedef enum { + FUC_HP_RET_TIMEOUT = 0, + FUC_HP_RET_FINISH, + FUC_HP_RET_FAILED, + INVALID_FUC_HP_RETURE +} FUC_HP_RETURE; + +typedef enum { + FUNCTION_REMOVE = 1, + FUNCTION_INSERT, + FUNCTION_INVALID_TYPE, +} FUNCTION_HP_TYPE; + +typedef enum { + FUC_HP_BAR_MSG_CMD = 1, + USED_BY_HOST_HP, + GET_STATE_BAR_MSG_CMD, + EP_HP_BAR_MSG_CMD, + INVALID_CMD, +} HOTPLUG_CMD; + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hotplug_ioctl.c b/drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hotplug_ioctl.c new file mode 100644 index 000000000000..dc8cb1407466 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hotplug_ioctl.c @@ -0,0 +1,72 @@ +#include "fuc_hotplug_ioctl.h" +#include "fuc_hotplug_commom.h" + +static dev_t dev; +static struct cdev c_dev; +static struct class *cl; + +extern long fuc_hp_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg); + +static int fuc_hp_open(struct inode *inode, struct file *file) +{ + if (inode->i_private) { + file->private_data = inode->i_private; + } + + return FUC_HP_OK; +} + +static int fuc_hp_dev_release(struct inode *i, struct file *f) +{ + DH_LOG_INFO(MODULE_FUC_HP, "fuc_hp device released!\n"); + return FUC_HP_OK; +} + +static struct file_operations fuc_hp_fops = { + .owner = THIS_MODULE, + .open = fuc_hp_open, + .release = fuc_hp_dev_release, + .unlocked_ioctl = fuc_hp_ioctl, +}; + +int zxdh_host_fuc_hotplug_driver_init(void) +{ + int ret = FUC_HP_OK; + + if (alloc_chrdev_region(&dev, 0, 1, DEVICE_NAME) < 0) { + return -EBUSY; + } + + cl = class_create(THIS_MODULE, CLASS_NAME); + if (cl == NULL) { + unregister_chrdev_region(dev, 1); + return -ENOMEM; + } + + if (device_create(cl, NULL, dev, NULL, DEVICE_NAME) == NULL) { + class_destroy(cl); + unregister_chrdev_region(dev, 1); + return -ENOMEM; + } + + cdev_init(&c_dev, &fuc_hp_fops); + + if (cdev_add(&c_dev, dev, 1) == FUC_HP_FAILED) { + device_destroy(cl, dev); + class_destroy(cl); + unregister_chrdev_region(dev, 1); + return -ENOMEM; + } + DH_LOG_INFO(MODULE_FUC_HP, "fuction_hotplug device registered\n"); + return ret; +} + +void zxdh_host_fuc_hotplug_driver_exit(void) +{ + cdev_del(&c_dev); + device_destroy(cl, dev); + class_destroy(cl); + unregister_chrdev_region(dev, 1); + DH_LOG_INFO(MODULE_FUC_HP, "fuc_hp device unregistered\n"); +} \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hotplug_ioctl.h b/drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hotplug_ioctl.h new file mode 100644 index 000000000000..30483806ef77 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hotplug_ioctl.h @@ -0,0 +1,20 @@ +#ifndef __DPU_FUC_HOTPLUG_IOCTL_H +#define __DPU_FUC_HOTPLUG_IOCTL_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEVICE_NAME "fuc_hp_ioctl" +#define CLASS_NAME "fuc_hp_class" + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hp_app/build.sh b/drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hp_app/build.sh new file mode 100755 index 000000000000..724f8d5263f4 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hp_app/build.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +# 按需选择是否需要静态编译 +gcc fuc_hp_app.c -o dpu_hotplug +# gcc -static gcc fuc_hp_app.c -o dpu_hotplug diff --git a/drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hp_app/fuc_hp_app.c b/drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hp_app/fuc_hp_app.c new file mode 100644 index 000000000000..dd63f2796872 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hp_app/fuc_hp_app.c @@ -0,0 +1,257 @@ +#include "fuc_hp_app.h" +#include "../fuc_hotplug_commom.h" + +static int printf_usage(int argc, char *argv[]); +static int fuc_hp(int argc, char *argv[]); +static int ep_hp(int argc, char *argv[]); + +static int printf_usage(int argc, char *argv[]) +{ + printf("** dpu_hotplug -s fuc -e epid -p pfid -v vfid -o add/del -t time \n"); + printf("** -s 指定拔插场景,fuction级热插拔固定输入:fuc \n"); + printf("** -e 指定epid \n"); + printf("** -p 指定pfid \n"); + printf("** -v 指定vfid, vfid为0,表示pf \n"); + printf("** vfid > 0,表示vf,vfid_real = vfid - 1 \n"); + printf("** -o 指定插拔操作, add 插入,del 拔出 \n"); + printf("** -t 指定超时时间,单位:s \n"); + printf("** \n"); + printf("** dpu_hotplug -s ep -e epid -o add/del \n"); + printf("** -s 指定拔插场景,ep级热插拔固定输入:ep \n"); + printf("** -e 指定epid \n"); + printf("** -o 指定插拔操作, add 插入,del 拔出 \n"); + return FUC_HP_OK; +}; + +struct fuc_hp_app_input fuc_hp_app_input[] = { + /* ops_type ops_value*/ + { "-e", 0 }, + { "-p", 0 }, + { "-v", 0 }, + { "-o", 0 }, + { "-t", 0 } +}; + +struct fuc_hp_app_input ep_hp_app_input[] = { + /* ops_type ops_value*/ + { "-e", 0 }, + { "-o", 0 }, +}; + +static int parse_fuc_hp_input(struct fuc_hotplug_bar_msg *fuc_hotplug_bar_msg) +{ + fuc_hotplug_bar_msg->fuc_hotplug_info = + ((fuc_hp_app_input[EP_ID].input_value + MIN_EP_ID) + << FUNC_HP_EP_ID_START_BIT) | + (fuc_hp_app_input[PF_ID].input_value + << FUNC_HP_PF_ID_START_BIT); + + if (fuc_hp_app_input[VF_ID].input_value != 0) { + fuc_hotplug_bar_msg->fuc_hotplug_info |= + ((fuc_hp_app_input[2].input_value - 1) + << FUNC_HP_VF_ID_START_BIT); + fuc_hotplug_bar_msg->fuc_hotplug_info |= + (1 << FUNC_HP_FUNC_TYPE_START_BIT); + } + + if (fuc_hp_app_input[OPS_ID].input_value != 0) { + fuc_hotplug_bar_msg->fuc_hotplug_info |= + (fuc_hp_app_input[OPS_ID].input_value + << FUNC_HP_SCENE_CODE_START_BIT); + } else { + return FUC_HP_FAILED; + } + + fuc_hotplug_bar_msg->timeout = fuc_hp_app_input[TIMEOUT_ID].input_value; + + printf("value:0x%x timeout:%ds\n", + fuc_hotplug_bar_msg->fuc_hotplug_info, + fuc_hotplug_bar_msg->timeout); + + return FUC_HP_OK; +} + +static int fuc_hp(int argc, char *argv[]) +{ + int ret = FUC_HP_OK; + int fd = 0; + int arg_no = ARG_START_NO; + int input_no = 0; + char *stop_at = NULL; + struct fuc_hotplug_bar_msg *fuc_hotplug_bar_msg = NULL; + + fuc_hotplug_bar_msg = malloc(sizeof(struct fuc_hotplug_bar_msg)); + + fuc_hotplug_bar_msg->fuc_hotplug_info = 0; + + if (argc != FUC_HOTPLUG_MEMBER_NUMS + ARG_START_NO) { + printf("[%s]: Invalid argument count %d\n", __func__, argc); + printf_usage(argc, argv); + goto failed; + } + + while (arg_no < argc) { + if (strcmp(argv[arg_no++], + fuc_hp_app_input[input_no].input_type) == 0) { + if (input_no == OPS_ID) { + fuc_hp_app_input[input_no++].input_value = + strcmp(argv[arg_no], "add") == 0 ? + FUNCTION_INSERT : + strcmp(argv[arg_no], "del") == 0 ? + FUNCTION_REMOVE : + 0; + arg_no++; + } else { + fuc_hp_app_input[input_no++].input_value = + strtoul(argv[arg_no++], &stop_at, 0); + } + } else { + printf("[%s]: Invalid input %s\n", __func__, + fuc_hp_app_input[input_no].input_type); + printf_usage(argc, argv); + goto failed; + } + } + + ret = parse_fuc_hp_input(fuc_hotplug_bar_msg); + if (ret != FUC_HP_OK) { + printf_usage(argc, argv); + goto failed; + } + + fuc_hotplug_bar_msg->cmd = FUC_HP_BAR_MSG_CMD; + + fd = open(FUC_HP_IOCTRL_DEV_NAME, O_RDWR, 0); + if (fd < 0) { + printf("[%s]: Can not open %s \n", __func__, + FUC_HP_IOCTRL_DEV_NAME); + goto failed; + } + + ret = ioctl(fd, FUC_HP_IOCTL_CMD0, fuc_hotplug_bar_msg); + if (ret) { + printf("[%s]: ERR --> %d\n", __func__, ret); + goto finish; + } + + if (fuc_hotplug_bar_msg->cpl_chk == FUC_HP_RET_FINISH) { + printf("[%s] fuction hotplug finish!!\n", __func__); + } else if (fuc_hotplug_bar_msg->cpl_chk == FUC_HP_RET_FAILED) { + printf("[%s] fuction hotplug failed!\n", __func__); + } else { + printf("[%s] fuction hotplug timeout!!!\n", __func__); + } + +finish: + free(fuc_hotplug_bar_msg); + fuc_hotplug_bar_msg = NULL; + close(fd); + return ret; + +failed: + free(fuc_hotplug_bar_msg); + fuc_hotplug_bar_msg = NULL; + return ret; +} + +static int ep_hp(int argc, char *argv[]) +{ + int ret = FUC_HP_OK; + int fd = 0; + int arg_no = ARG_START_NO; + int input_no = 0; + char *stop_at = NULL; + struct ep_hotplug_info *ep_hotplug_info = NULL; + + ep_hotplug_info = malloc(sizeof(struct ep_hotplug_info)); + + if (argc != EP_HOTPLUG_MEMBER_NUMS + ARG_START_NO) { + printf("[%s]: Invalid argument count %d\n", __func__, argc); + printf_usage(argc, argv); + goto failed; + } + + while (arg_no < argc) { + if (strcmp(argv[arg_no++], + ep_hp_app_input[input_no].input_type) == 0) { + if (input_no == E_OPS_ID) { + ep_hp_app_input[input_no++].input_value = + strcmp(argv[arg_no], "add") == 0 ? + FUNCTION_INSERT : + strcmp(argv[arg_no], "del") == 0 ? + FUNCTION_REMOVE : + 0; + arg_no++; + } else { + ep_hp_app_input[input_no++].input_value = + strtoul(argv[arg_no++], &stop_at, 0); + } + } else { + printf("[%s]: Invalid input %s\n", __func__, + ep_hp_app_input[input_no].input_type); + printf_usage(argc, argv); + goto failed; + } + } + + ep_hotplug_info->cmd = EP_HP_BAR_MSG_CMD; + ep_hotplug_info->ep_no = ep_hp_app_input[E_EP_ID].input_value; + ep_hotplug_info->ops_type = ep_hp_app_input[E_OPS_ID].input_value; + + fd = open(FUC_HP_IOCTRL_DEV_NAME, O_RDWR, 0); + if (fd < 0) { + printf("[%s]: Can not open %s \n", __func__, + FUC_HP_IOCTRL_DEV_NAME); + goto failed; + } + + ret = ioctl(fd, FUC_HP_IOCTL_CMD2, ep_hotplug_info); + if (ret) { + printf("[%s]: ERR --> %d\n", __func__, ret); + goto finish; + } + + if (ep_hotplug_info->cpl_chk == FUC_HP_RET_FINISH) { + printf("[%s] ep hotplug finish!!\n", __func__); + } else if (ep_hotplug_info->cpl_chk == FUC_HP_RET_FAILED) { + printf("[%s] ep hotplug failed!\n", __func__); + } else { + printf("[%s] ep hotplug timeout!!!\n", __func__); + } + +finish: + free(ep_hotplug_info); + ep_hotplug_info = NULL; + close(fd); + return ret; + +failed: + free(ep_hotplug_info); + ep_hotplug_info = NULL; + return ret; +} + +int main(int argc, char *argv[]) +{ + int ret = 0; + int arg_no = ARG_TYPE_NO; + + if (argc < ARG_START_NO) { + printf("[%s]: Invalid argument count %d\n", __func__, argc); + printf_usage(argc, argv); + ret = FUC_HP_FAILED; + goto failed; + } + + printf("start\n"); + if (strcmp(argv[arg_no++], "-s") == 0) { + if (strcmp(argv[arg_no], "ep") == 0) { + ret = ep_hp(argc, argv); + } else if (strcmp(argv[arg_no], "fuc") == 0) { + ret = fuc_hp(argc, argv); + } + } + +failed: + return ret; +} diff --git a/drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hp_app/fuc_hp_app.h b/drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hp_app/fuc_hp_app.h new file mode 100644 index 000000000000..0a915b8db1dd --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/fuc_hotplug/fuc_hp_app/fuc_hp_app.h @@ -0,0 +1,48 @@ +#ifndef __DPMT_APP_H +#define __DPMT_APP_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "sys/ioctl.h" + +#define FUC_HP_IOCTRL_DEV_NAME "/dev/fuc_hp_ioctl" + +#define FUNC_HP_SCENE_CODE_START_BIT 21 +#define FUNC_HP_FUNC_TYPE_START_BIT 20 +#define FUNC_HP_EP_ID_START_BIT 16 +#define FUNC_HP_PF_ID_START_BIT 12 +#define FUNC_HP_VF_ID_START_BIT 0 + +struct hp_app_func { + char *name; + int (*func)(int argc, char *argv[]); +}; + +struct fuc_hp_app_input { + char *input_type; + unsigned int input_value; +}; + +typedef enum { + EP_ID = 0, + PF_ID, + VF_ID, + OPS_ID, + TIMEOUT_ID, + INVALID_FUC_HP_INPUT +} FUC_HP_INPUT; + +typedef enum { E_EP_ID = 0, + E_OPS_ID, + E_INVALID_FUC_HP_INPUT } EP_HP_INPUT; + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/zf_mpf/gdma.c b/drivers/net/ethernet/dinghai/zf_mpf/gdma.c new file mode 100644 index 000000000000..87052aec4a2f --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/gdma.c @@ -0,0 +1,551 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gdma.h" +#include "zf_mpf.h" + +/* + * User define: + * ep_id-bit[15:12] vfunc_num-bit[11:4] func_num-bit[3:1] vfunc_active-bit0 + * host ep_id:5~8 zf ep_id:9 + */ +#define ZF_GDMA_ZF_USER (0x9000) /* ep4 pf0 */ +#define ZF_GDMA_PF_NUM_SHIFT (1) +#define ZF_GDMA_VF_NUM_SHIFT (4) +#define ZF_GDMA_EP_ID_SHIFT (12) +#define ZF_GDMA_VF_EN (1) +#define ZF_GDMA_VF_MASK (1UL << 7) + +/* Register offset */ +#define ZF_GDMA_BASE_OFFSET (0x100000) +#define ZF_GDMA_CHAN_SHIFT (0x80) +#define ZF_GDMA_EXT_ADDR_OFFSET (0x218) +#define ZF_GDMA_SAR_LOW_OFFSET (0x200) +#define ZF_GDMA_DAR_LOW_OFFSET (0x204) +#define ZF_GDMA_SAR_HIGH_OFFSET (0x234) +#define ZF_GDMA_DAR_HIGH_OFFSET (0x238) +#define ZF_GDMA_XFERSIZE_OFFSET (0x208) +#define ZF_GDMA_CONTROL_OFFSET (0x230) +#define ZF_GDMA_TC_STATUS_OFFSET (0x0) +#define ZF_GDMA_STATUS_CLEAN_OFFSET (0x80) +#define ZF_GDMA_LINKADDR_LOW_OFFSET (0x21c) +#define ZF_GDMA_LINKADDR_HIGH_OFFSET (0x220) +#define ZF_GDMA_CHAN_CONTINUE_OFFSET (0x224) +#define ZF_GDMA_TC_CNT_OFFSET (0x23c) +#define ZF_GDMA_LLI_USER_OFFSET (0x228) +#define ZF_GDMA_PULSE_WIDTH_OFFSET (0x1ec) + +/* Control register */ +#define ZF_GDMA_CHAN_ENABLE (1UL) +#define ZF_GDMA_SOFT_CHAN (1UL << 1) +#define ZF_GDMA_TC_INTR_ENABLE (1UL << 4) +#define ZF_GDMA_ERR_INTR_ENABLE (1UL << 5) +#define ZF_GDMA_SBS_SHIFT (6) /* src burst size */ +#define ZF_GDMA_SBL_SHIFT (9) /* src burst length */ +#define ZF_GDMA_DBS_SHIFT (13) /* dest burst size */ +#define ZF_GDMA_BURST_SIZE_MIN (0x1) /* 1 byte */ +#define ZF_GDMA_BURST_SIZE_MEDIUM (0x4) /* 4 word */ +#define ZF_GDMA_BURST_SIZE_MAX (0x6) /* 16 word */ +#define ZF_GDMA_DEFAULT_BURST_LEN (0xf) /* 16 beats */ +#define ZF_GDMA_TC_CNT_ENABLE (1UL << 27) +#define ZF_GDMA_CHAN_FORCE_CLOSE (1UL << 31) + +/* TC count & Error interrupt status register */ +#define ZF_GDMA_SRC_LLI_ERR (1UL << 16) +#define ZF_GDMA_SRC_DATA_ERR (1UL << 17) +#define ZF_GDMA_DST_ADDR_ERR (1UL << 18) +#define ZF_GDMA_ERR_STATUS (1UL << 19) +#define ZF_GDMA_ERR_RPT_ENABLE (1UL << 20) +#define ZF_GDMA_TC_CNT_CLEAN (1) + +#define ZF_GDMA_ALIGN_SIZE (1) +#define ZF_GDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) +#define ZF_GDMA_BUFF_SIZE_MAX (0xfffff) + +#define LOW16_MASK (0xffff) +#define LOW32_MASK (0xffffffff) + +void gchan_irq_tasklet_process(unsigned long data); +static void zf_gdma_enqueue_buff(struct zf_gdma_chan *gchan); + +static struct zf_gdma_chan *to_zf_gdma_chan(struct dma_chan *chan) +{ + return container_of(chan, struct zf_gdma_chan, vc.chan); +} + +static struct zf_gdma_desc *to_zf_gdma_desc(struct zxdh_virt_dma_desc *vdesc) +{ + return container_of(vdesc, struct zf_gdma_desc, vd); +} + +static inline uint32_t zf_gdma_read_reg(struct zf_gdma_chan *gchan, + uint16_t chan_id, uint32_t offset) +{ + uint64_t addr = chan_id * ZF_GDMA_CHAN_SHIFT + offset; + + return *(volatile uint32_t *)(gchan->gdev->base_addr + addr); +} + +static inline void zf_gdma_write_reg(struct zf_gdma_chan *gchan, + uint16_t chan_id, uint32_t offset, + uint32_t val) +{ + uint64_t addr = chan_id * ZF_GDMA_CHAN_SHIFT + offset; + + *(volatile uint32_t *)(gchan->gdev->base_addr + addr) = val; +} + +static inline void zf_gdma_user_get(struct zf_rbp_info *rbp_info, + uint32_t *user) +{ + uint32_t pf_id = rbp_info->pfid; + + //host addr + if (rbp_info->host) { + if ((pf_id & ZF_GDMA_VF_MASK) != 0) { + pf_id &= ~ZF_GDMA_VF_MASK; + *user = (ZF_GDMA_VF_EN | + (rbp_info->vfid << ZF_GDMA_VF_NUM_SHIFT)); + } + *user |= ((rbp_info->epid << ZF_GDMA_EP_ID_SHIFT) | + (pf_id << ZF_GDMA_PF_NUM_SHIFT)); + } else { + *user = ZF_GDMA_ZF_USER; + } +} + +static inline void zf_gdma_cfg_get(uint32_t *val, uint8_t tc_enable) +{ + *val = (ZF_GDMA_CHAN_ENABLE | ZF_GDMA_SOFT_CHAN | + ZF_GDMA_TC_INTR_ENABLE | ZF_GDMA_ERR_INTR_ENABLE | + (ZF_GDMA_DEFAULT_BURST_LEN << ZF_GDMA_SBL_SHIFT) | + (ZF_GDMA_BURST_SIZE_MAX << ZF_GDMA_SBS_SHIFT) | + (ZF_GDMA_BURST_SIZE_MAX << ZF_GDMA_DBS_SHIFT)); + + if (tc_enable != 0) { + *val |= ZF_GDMA_TC_CNT_ENABLE; + } +} + +static void zf_gdma_desc_free(struct zxdh_virt_dma_desc *vd) +{ + if (vd != NULL) { + kfree(to_zf_gdma_desc(vd)); + } +} + +static int32_t zf_gdma_alloc_chan_resources(struct dma_chan *chan) +{ + return 0; +} + +static void zf_gdma_free_chan_resources(struct dma_chan *chan) +{ +} + +static int32_t zf_gdma_device_config(struct dma_chan *chan, + struct dma_slave_config *config) +{ + return 0; +} + +static struct dma_async_tx_descriptor * +zf_gdma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct zf_gdma_chan *gchan = NULL; + struct zf_gdma_desc *desc = NULL; + struct zf_dma_addr_rbp *srbp = NULL; + struct zf_dma_addr_rbp *drbp = NULL; + uint32_t src_user = 0; + uint32_t dst_user = 0; + unsigned long status = 0; + + if ((chan == NULL) || ((void *)dst == NULL) || ((void *)src == NULL)) { + printk(KERN_ERR "%s:param is invalid\n", __func__); + return ERR_PTR(-EINVAL); + } + + gchan = to_zf_gdma_chan(chan); + srbp = (struct zf_dma_addr_rbp *)src; + drbp = (struct zf_dma_addr_rbp *)dst; + + desc = kzalloc(sizeof(struct zf_gdma_desc), GFP_KERNEL); + if (desc == NULL) { + printk(KERN_ERR "%s: Failed to alloc gdma desc\n", __func__); + return ERR_PTR(-ENOMEM); + } + + zf_gdma_user_get(&srbp->rbp_info, &src_user); + zf_gdma_user_get(&drbp->rbp_info, &dst_user); + + desc->user = ((src_user & LOW16_MASK) | (dst_user << 16)); + desc->src = (uint64_t)(srbp->addr); + desc->dst = (uint64_t)(drbp->addr); + desc->len = (uint64_t)len; + desc->chan = gchan; + spin_lock_irqsave(&gchan->chan_lock, status); + list_add_tail(&desc->node, &gchan->desc_list); + spin_unlock_irqrestore(&gchan->chan_lock, status); + + return zxdh_vchan_tx_prep(&gchan->vc, &desc->vd, flags); +} + +static void zf_gdma_issue_pending(struct dma_chan *chan) +{ + struct zf_gdma_chan *gchan = NULL; + unsigned long flags = 0; + bool pending = false; + + if (chan == NULL) + return; + + gchan = to_zf_gdma_chan(chan); + + spin_lock_irqsave(&gchan->vc.lock, flags); + if (zxdh_vchan_issue_pending(&gchan->vc)) { + pending = true; + } + spin_unlock_irqrestore(&gchan->vc.lock, flags); + + if (pending) { + zf_gdma_enqueue_buff(gchan); + } + + return; +} + +static enum dma_status zf_gdma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *state) +{ + return 0; +} + +static int32_t zf_gdma_terminate_all(struct dma_chan *chan) +{ + return 0; +} + +void zf_gdma_dev_init(struct device *dev, struct dma_device *dd) +{ + dd->device_alloc_chan_resources = zf_gdma_alloc_chan_resources; + dd->device_free_chan_resources = zf_gdma_free_chan_resources; + dd->device_config = zf_gdma_device_config; + dd->device_prep_dma_memcpy = zf_gdma_prep_dma_memcpy; + dd->device_issue_pending = zf_gdma_issue_pending; + dd->device_tx_status = zf_gdma_tx_status; + dd->device_terminate_all = zf_gdma_terminate_all; + + dd->chancnt = ZF_GDMA_CHAN_NUM; + dd->privatecnt = 0; + dd->copy_align = ZF_GDMA_ALIGN_SIZE; + dd->src_addr_widths = ZF_GDMA_DMA_BUSWIDTHS; + dd->dst_addr_widths = ZF_GDMA_DMA_BUSWIDTHS; + dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; + dd->dev = dev; + + dma_cap_zero(dd->cap_mask); + dma_cap_set(DMA_RBP, dd->cap_mask); +} + +static int32_t zf_gdma_virt_chan_init(struct zf_gdma_dev *gdev) +{ + struct zf_gdma_chan *gchan = NULL; + uint32_t val = 0; + uint16_t i = 0; + + INIT_LIST_HEAD(&gdev->dd->channels); + for (i = 0; i < ZF_GDMA_CHAN_NUM; i++) { + gchan = &gdev->chan[i]; + gchan->status = GDMA_CHAN_IDLE; + gchan->chan_id = ZF_GDMA_CHAN_BASE + i; + gchan->gdev = gdev; + gchan->vc.desc_free = zf_gdma_desc_free; + tasklet_init(&gchan->task, gchan_irq_tasklet_process, + (unsigned long)gchan); + + zxdh_vchan_init(&gchan->vc, gdev->dd); + spin_lock_init(&gchan->chan_lock); + INIT_LIST_HEAD(&gchan->desc_list); + + /* reset gdma channel */ + val = ZF_GDMA_CHAN_FORCE_CLOSE; + zf_gdma_write_reg(gchan, gchan->chan_id, ZF_GDMA_CONTROL_OFFSET, + val); + + val = ZF_GDMA_ERR_RPT_ENABLE | ZF_GDMA_ERR_STATUS | + ZF_GDMA_TC_CNT_CLEAN; + zf_gdma_write_reg(gchan, gchan->chan_id, ZF_GDMA_TC_CNT_OFFSET, + val); + } + + /* Configure interrupt pulse width to 8 cycle */ + zf_gdma_write_reg(gchan, 0, ZF_GDMA_PULSE_WIDTH_OFFSET, 7); + + return 0; +} + +static int32_t zf_gdma_xmit_done(struct zf_gdma_chan *gchan) +{ + struct zxdh_virt_dma_desc *vdesc = NULL; + uint32_t widx = gchan->chan_id / 32; + uint32_t bidx = gchan->chan_id % 32; + uint32_t val = 0; + unsigned long flags = 0; + + val = zf_gdma_read_reg( + gchan, 0, ZF_GDMA_TC_STATUS_OFFSET + (widx * sizeof(uint32_t))); + if ((val & (1UL << bidx)) == 0) { + printk(KERN_ERR "%s:chan%d tc status error\n", __func__, + gchan->chan_id); + spin_lock(&gchan->chan_lock); + gchan->status = GDMA_CHAN_ERR; + spin_unlock(&gchan->chan_lock); + return -1; + } + + pr_debug("chan%d transfer success\n", gchan->chan_id); + zf_gdma_write_reg(gchan, 0, + ZF_GDMA_STATUS_CLEAN_OFFSET + + (widx * sizeof(uint32_t)), + 1 << bidx); + + spin_lock(&gchan->chan_lock); + if (gchan->desc == NULL) { + printk(KERN_ERR "%s:chan%d descriptor missing\n", __func__, + gchan->chan_id); + gchan->status = GDMA_CHAN_ERR; + spin_unlock(&gchan->chan_lock); + return -1; + } + + if (gchan->desc->len == 0) { + list_del(&gchan->desc->node); + vdesc = &gchan->desc->vd; + gchan->desc = NULL; + + spin_lock_irqsave(&gchan->vc.lock, flags); + list_del(&vdesc->node); + zxdh_vchan_cookie_complete(vdesc); + spin_unlock_irqrestore(&gchan->vc.lock, flags); + } + gchan->status = GDMA_CHAN_IDLE; + spin_unlock(&gchan->chan_lock); + + return 0; +} + +static void zf_gdma_enqueue_buff(struct zf_gdma_chan *gchan) +{ + struct zf_gdma_desc *desc = NULL; + struct zxdh_virt_dma_desc *vdesc = NULL; + uint32_t val = 0; + uint64_t cur_len = 0; + unsigned long flags = 0; + + spin_lock_irqsave(&gchan->chan_lock, flags); + if (gchan->status != GDMA_CHAN_IDLE) { + goto out; + } + spin_lock(&gchan->vc.lock); + vdesc = zxdh_vchan_next_desc(&gchan->vc); + spin_unlock(&gchan->vc.lock); + if (vdesc == NULL) { + goto out; + } + gchan->status = GDMA_CHAN_BUSY; + spin_unlock_irqrestore(&gchan->chan_lock, flags); + + desc = to_zf_gdma_desc(vdesc); + gchan->desc = desc; + cur_len = (desc->len > ZF_GDMA_BUFF_SIZE_MAX) ? ZF_GDMA_BUFF_SIZE_MAX : + desc->len; + + zf_gdma_write_reg(gchan, gchan->chan_id, ZF_GDMA_SAR_LOW_OFFSET, + desc->src & LOW32_MASK); + zf_gdma_write_reg(gchan, gchan->chan_id, ZF_GDMA_SAR_HIGH_OFFSET, + (desc->src >> 32) & LOW32_MASK); + + zf_gdma_write_reg(gchan, gchan->chan_id, ZF_GDMA_DAR_LOW_OFFSET, + desc->dst & LOW32_MASK); + zf_gdma_write_reg(gchan, gchan->chan_id, ZF_GDMA_DAR_HIGH_OFFSET, + (desc->dst >> 32) & LOW32_MASK); + + zf_gdma_write_reg(gchan, gchan->chan_id, ZF_GDMA_XFERSIZE_OFFSET, + cur_len); + + zf_gdma_write_reg(gchan, gchan->chan_id, ZF_GDMA_EXT_ADDR_OFFSET, + desc->user); + + desc->src += cur_len; + desc->dst += cur_len; + desc->len -= cur_len; + smp_mb(); + + zf_gdma_cfg_get(&val, 1); + zf_gdma_write_reg(gchan, gchan->chan_id, ZF_GDMA_CONTROL_OFFSET, val); + + return; + +out: + spin_unlock_irqrestore(&gchan->chan_lock, flags); +} + +static void zf_gdma_free_channels(struct zf_gdma_dev *gdev) +{ + struct zf_gdma_chan *gchan = NULL; + struct zf_gdma_desc *desc = NULL; + struct zf_gdma_desc *tmp = NULL; + uint16_t i = 0; + unsigned long flags = 0; + + for (i = 0; i < ZF_GDMA_CHAN_NUM; i++) { + gchan = &gdev->chan[i]; + tasklet_kill(&gchan->task); + + spin_lock_irqsave(&gchan->chan_lock, flags); + spin_lock(&gchan->vc.lock); + list_for_each_entry_safe(desc, tmp, &gchan->desc_list, node) { + list_del(&desc->vd.node); + list_del(&desc->node); + kfree(desc); + } + spin_unlock(&gchan->vc.lock); + spin_unlock_irqrestore(&gchan->chan_lock, flags); + } +} + +int32_t dh_zf_mpf_gdma_init(struct dh_core_dev *dh_dev) +{ + struct dh_en_mpf_dev *mpf_dev = dh_core_priv(dh_dev); + struct zf_gdma_dev *gdev = NULL; + struct device *dev = NULL; + int32_t node = 0; + int32_t ret = 0; + + if (dh_dev->pdev == NULL) { + printk(KERN_ERR "%s: pdev is invalid\n", __func__); + return -ENODEV; + } + + if (mpf_dev->pci_ioremap_addr == 0) { + printk(KERN_ERR "%s: pci_ioremap_addr is invalid\n", __func__); + return -ENAVAIL; + } + + gdev = kzalloc(sizeof(struct zf_gdma_dev), GFP_KERNEL); + if (gdev == NULL) { + printk(KERN_ERR "%s: Failed to alloc gdev\n", __FUNCTION__); + return -ENOMEM; + } + mpf_dev->gdev = gdev; + + gdev->base_addr = mpf_dev->pci_ioremap_addr + ZF_GDMA_BASE_OFFSET; + gdev->pdev = dh_dev->pdev; + dev = &(dh_dev->pdev->dev); + node = dev_to_node(dev); + gdev->dd = kzalloc_node(sizeof(struct dma_device), GFP_KERNEL, node); + if (gdev->dd == NULL) { + printk(KERN_ERR "%s: Failed to alloc dma_device\n", + __FUNCTION__); + ret = -ENOMEM; + goto free_gdev; + } + + zf_gdma_dev_init(dev, gdev->dd); + zf_gdma_virt_chan_init(gdev); + + ret = dma_async_device_register(gdev->dd); + if (ret != 0) { + printk(KERN_ERR "%s: Failed to register gdma device\n", + __func__); + goto err_out; + } + + return 0; + +err_out: + zf_gdma_free_channels(gdev); + kfree(gdev->dd); +free_gdev: + kfree(gdev); + return ret; +} + +void dh_zf_mpf_gdma_uninit(struct dh_core_dev *dh_dev) +{ + struct dh_en_mpf_dev *mpf_dev = dh_core_priv(dh_dev); + struct zf_gdma_dev *gdev = NULL; + + if (mpf_dev->gdev == NULL) { + printk(KERN_ERR "%s:gdev is invalid\n", __func__); + return; + } + gdev = mpf_dev->gdev; + + if (gdev->dd != NULL) { + dma_async_device_unregister(gdev->dd); + kfree(gdev->dd); + } + zf_gdma_free_channels(gdev); + kfree(gdev); +} + +int32_t zf_gdma_err_irq_handle(struct notifier_block *nb, unsigned long action, + void *data) +{ + pr_debug("%s is called\n", __func__); + + return 0; +} + +int32_t zf_gdma_chan_irq_handle(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct dh_eq_async *eq = container_of(nb, struct dh_eq_async, irq_nb); + struct zf_gdma_chan *gchan = NULL; + + if (eq->priv == NULL) { + printk(KERN_ERR "%s:eq->priv is NULL\n", __func__); + return -1; + } + gchan = (struct zf_gdma_chan *)eq->priv; + tasklet_hi_schedule(&gchan->task); + + return 0; +} + +void gchan_irq_tasklet_process(unsigned long data) +{ + struct zf_gdma_chan *gchan = NULL; + + if (unlikely(data == 0)) { + printk(KERN_ERR "%s:param is invalid\n", __func__); + return; + } + + gchan = (struct zf_gdma_chan *)data; + if (zf_gdma_xmit_done(gchan) != 0) + return; + + zf_gdma_enqueue_buff(gchan); +} \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/zf_mpf/gdma.h b/drivers/net/ethernet/dinghai/zf_mpf/gdma.h new file mode 100644 index 000000000000..904258e0e580 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/gdma.h @@ -0,0 +1,63 @@ +#ifndef __GDMA_H +#define __GDMA_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "./epc/virt-dma.h" + +#define ZF_GDMA_CHAN_NUM (4) +#define ZF_GDMA_CHAN_BASE (58) + +enum zf_gdma_chan_status { GDMA_CHAN_IDLE = 0, + GDMA_CHAN_BUSY, + GDMA_CHAN_ERR }; + +struct zf_gdma_chan { + enum zf_gdma_chan_status status; + uint16_t chan_id; + + struct list_head desc_list; + spinlock_t chan_lock; + + struct zf_gdma_dev *gdev; + struct zxdh_virt_dma_chan vc; + struct zf_gdma_desc *desc; + struct tasklet_struct task; +}; + +struct zf_gdma_desc { + uint64_t src; /* src addr */ + uint64_t dst; + uint64_t len; + uint32_t user; + struct zxdh_virt_dma_desc vd; + struct list_head node; + struct zf_gdma_chan *chan; +}; + +struct zf_gdma_dev { + uint64_t base_addr; + struct dma_device *dd; + struct pci_dev *pdev; + struct zf_gdma_chan chan[ZF_GDMA_CHAN_NUM]; +}; + +int32_t dh_zf_mpf_gdma_init(struct dh_core_dev *dh_dev); +void dh_zf_mpf_gdma_uninit(struct dh_core_dev *dh_dev); +int32_t zf_gdma_err_irq_handle(struct notifier_block *nb, unsigned long action, + void *data); +int32_t zf_gdma_chan_irq_handle(struct notifier_block *nb, unsigned long action, + void *data); + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/zf_mpf/zf_chan_ioctl.c b/drivers/net/ethernet/dinghai/zf_mpf/zf_chan_ioctl.c new file mode 100644 index 000000000000..e13aba359f4a --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/zf_chan_ioctl.c @@ -0,0 +1,549 @@ +#include +#include +#include +#include +#include +#include "zf_chan_ioctl.h" + +static dev_t dev; +static struct cdev c_dev; +static struct class *cl; + +#define BAR_IOCTL_CMD_NORMAL _IOW('a', 1, msg_entity_t) +#define BAR_IOCTL_CMD_SINGLE_DEV _IOW('a', 2, pci_res_st) +#define BAR_IOCTL_CMD_ALL_DEV _IOW('a', 3, pci_res_st) +#define BAR_IOCTL_CMD_SEND_REGISTER _IOW('a', 4, msg_entity_t) +#define BAR_IOCTL_CMD_SEND_UNREGISTER _IOW('a', 5, msg_entity_t) +#define BAR_IOCTL_CMD_RECV_MSG _IOW('a', 6, msg_entity_t) + +struct mpf_message_node { + struct normal_msg_entity msg; + struct list_head node; + uint16_t event_id; +}; +struct mpf_wait_queue { + wait_queue_head_t wq; + struct list_head msg_list; + spinlock_t lock; +}; +struct zxdh_bar_ioctl_dev { + uint16_t pcie_id; + uint64_t bar0_base_virt_addr; + struct mpf_wait_queue wait_queues; +} ioctl_dev = { 0 }; + +struct zxdh_bar_ioctl_dev *zxdh_get_bar_ioctl_dev(void) +{ + return &ioctl_dev; +} + +int zxdh_init_bar_ioctl_resource(struct dh_core_dev *core_dev) +{ + struct zxdh_bar_ioctl_dev *dev = zxdh_get_bar_ioctl_dev(); + struct dh_en_mpf_dev *mpf_dev = dh_core_priv(core_dev); + + dev->pcie_id = mpf_dev->pcie_id; + dev->bar0_base_virt_addr = mpf_dev->pci_ioremap_addr; + + spin_lock_init(&dev->wait_queues.lock); + init_waitqueue_head(&dev->wait_queues.wq); + INIT_LIST_HEAD(&(dev->wait_queues.msg_list)); + return 0; +} + +static struct mpf_wait_queue *wait_queue_get(void) +{ + struct zxdh_bar_ioctl_dev *dev = zxdh_get_bar_ioctl_dev(); + + return &dev->wait_queues; +} + +void zxdh_remove_bar_ioctl_resource(struct dh_core_dev *core_dev) +{ + struct mpf_wait_queue *wait_queue = wait_queue_get(); + struct mpf_message_node *msg_node, *tmp; + + spin_lock(&wait_queue->lock); + + list_for_each_entry_safe(msg_node, tmp, &wait_queue->msg_list, node) { + list_del(&msg_node->node); + kfree(msg_node); + } + + spin_unlock(&wait_queue->lock); + return; +} + +int info_usr_by_err_code(uint16_t event_id, unsigned long arg, + uint16_t err_code) +{ + int ret = 0; + struct normal_msg_entity *msg_to_usr = NULL; + + msg_to_usr = kzalloc(sizeof(struct normal_msg_entity), GFP_KERNEL); + if (!msg_to_usr) { + return -EFAULT; + } + msg_to_usr->hdr.recv_hdr_out.event_id = event_id; + msg_to_usr->hdr.recv_hdr_out.state = err_code; + if (copy_to_user((struct normal_msg_entity __user *)arg, msg_to_usr, + sizeof(struct normal_msg_entity))) { + ret = -EFAULT; + } + + kfree(msg_to_usr); + return ret; +} + +static int zxdh_bar_recv_func_noop(void *pay_load, uint16_t len, + void *reps_buffer, uint16_t *reps_len, + void *dev) +{ + *reps_len = sizeof(uint16_t); + return 0; +} + +static uint16_t zxdh_err_code_convert(int bar_err_code) +{ + uint16_t err_code = 0; + + switch (bar_err_code) { + case BAR_MSG_ERR_MODULE: + err_code = IOCTRL_ERR_SEND_EVENTID_EXCCED; + break; + case BAR_MSG_ERR_REPEAT_REGISTER: + err_code = IOCTRL_ERR_RECV_REPEAT_REGISTER; + break; + case BAR_MSG_ERR_UNGISTER: + err_code = IOCTRL_ERR_RECV_NOT_REGISTER; + break; + default: + err_code = IOCTRL_OK; + break; + } + return err_code; +} + +static uint16_t zxdh_fill_host_recv_func(uint16_t event_id) +{ + int bar_err_code = 0; + + bar_err_code = zxdh_bar_chan_msg_recv_register(event_id, + zxdh_bar_recv_func_noop); + return zxdh_err_code_convert(bar_err_code); +} + +static int zxdh_strip_host_recv_func(uint16_t event_id) +{ + int bar_err_code = 0; + + bar_err_code = zxdh_bar_chan_msg_recv_unregister(event_id); + return zxdh_err_code_convert(bar_err_code); +} + +/******************************************************************** + * ioctl cmd + * 1 bar_chan_ioctl_register_eventid 接收消息方注册event_id + * 2 bar_chan_ioctl_unregister_eventid 接收消息方解注册event_id + * 3 bar_chan_ioctl_recv_msg 接收消息方主动监听接收消息 + * 4 bar_chan_ioctl_send_msg 消息发送方发送同步消息 + * 5 bar_chan_ioctl_risc_send_simulation 模拟risc发送消息到wait_ques + * 6 bar_chan_ioctl_pci_res_get_single 单个设备pcie资源查询 + * 7 bar_chan_ioctl_pci_res_get_all 全量设备pcie资源查询 + * *******************************************************************/ +int bar_chan_ioctl_register_eventid(unsigned int cmd, unsigned long arg) +{ + uint16_t cmd_state = 0; + uint16_t event_id = 0; + + if (copy_from_user(&event_id, (uint16_t __user *)arg, + sizeof(uint16_t))) { + LOG_ERR("MPF_IOCTL_REGISTER: copy_from_user failed\n"); + return -EFAULT; + } + + cmd_state = zxdh_fill_host_recv_func(event_id); + return info_usr_by_err_code(event_id, arg, cmd_state); +} + +int bar_chan_ioctl_unregister_eventid(unsigned int cmd, unsigned long arg) +{ + uint16_t cmd_state = 0; + uint16_t event_id = 0; + + if (copy_from_user(&event_id, (uint16_t __user *)arg, + sizeof(uint16_t))) { + LOG_ERR(KERN_ERR "MPF_IOCTL_REGISTER: copy_from_user failed\n"); + return -EFAULT; + } + + cmd_state = zxdh_strip_host_recv_func(event_id); + return info_usr_by_err_code(event_id, arg, cmd_state); + ; +} + +/* 判断消息队列里面是佛有指定event_id消息*/ +bool is_msg_list_contain_event_id(uint16_t event_id) +{ + bool found = false; + unsigned long flags; + struct mpf_message_node *node; + struct mpf_wait_queue *wait_queue = wait_queue_get(); + + spin_lock_irqsave(&wait_queue->lock, flags); + + list_for_each_entry(node, &wait_queue->msg_list, node) { + if (node->event_id == event_id) { + found = true; + break; + } + } + spin_unlock_irqrestore(&wait_queue->lock, flags); + return found; +} + +int bar_chan_ioctl_recv_msg(unsigned int cmd, unsigned long arg) +{ + int func_state = 0; + uint16_t event_id; + struct mpf_message_node *msg_ptr; + unsigned long flags; + struct mpf_wait_queue *wait_queue = NULL; + + /* 1 用户态拷贝cmd中的event_id*/ + if (copy_from_user(&event_id, (uint16_t __user *)arg, + sizeof(uint16_t))) { + LOG_ERR(KERN_ERR "MPF_IOCTL_GET_MSG: copy_from_user failed\n"); + return -EFAULT; + } + + /* 2 获得消息队列地址*/ + wait_queue = wait_queue_get(); + if (!wait_queue) { + LOG_ERR("wait_queue not found.\n"); + return info_usr_by_err_code(event_id, arg, + IOCTRL_ERR_SEND_EVENTID_EXCCED); + } + + /* 3 接收之前检查event_id是否注册*/ + func_state = zxdh_bar_callback_register_state(event_id); + if (func_state != BAR_MSG_OK) { + return info_usr_by_err_code(event_id, arg, + zxdh_err_code_convert(func_state)); + } + + /* 4 消息list不含对应event_id消息则阻塞, 被唤醒之后重新判断*/ + wait_event_interruptible(wait_queue->wq, + is_msg_list_contain_event_id(event_id)); + + /* 5 如果msg list中有对应event_id消息, 遍历链表, 从中取出来*/ + spin_lock_irqsave(&wait_queue->lock, flags); + list_for_each_entry(msg_ptr, &wait_queue->msg_list, node) { + if (msg_ptr->event_id == event_id) { + list_del(&msg_ptr->node); //摘链表 + + if (copy_to_user((struct normal_msg_entity __user *)arg, + msg_ptr, + sizeof(struct normal_msg_entity))) { + LOG_ERR(KERN_ERR + "MPF_IOCTL_GET_MSG: copy_to_user failed\n"); + kfree(msg_ptr); + spin_unlock_irqrestore(&wait_queue->lock, + flags); + return -EFAULT; + } + kfree(msg_ptr); + break; + } + } + spin_unlock_irqrestore(&wait_queue->lock, flags); + + return 0; +} + +int bar_chan_common_sync_send(unsigned int cmd, unsigned long arg) +{ + int ret = 0; + struct normal_msg_entity *user_msg = NULL; + struct zxdh_ioctl_send_in *send_paras = NULL; + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + struct zxdh_bar_ioctl_dev *dev = zxdh_get_bar_ioctl_dev(); + + /* 按照既定的申请内存*/ + user_msg = kzalloc(sizeof(struct normal_msg_entity), GFP_KERNEL); + if (NULL == user_msg) { + LOG_ERR("malloc failed.\n"); + return -1; + } + /* 初始化并且按照约定的大小从用户态拷贝数据,保证不拷贝到脏数据*/ + if (copy_from_user(user_msg, (msg_entity_t __user *)arg, + sizeof(struct normal_msg_entity))) { + user_msg->hdr.send_hdr_out.ioctl_state = IOCTRL_ERR_MSG_GET; + goto out; + } + + send_paras = &user_msg->hdr.send_hdr_in; + if (send_paras->pload_len > BAR_CHAN_PLOAD_SIZE) { + user_msg->hdr.send_hdr_out.bar_state = IOCTRL_ERR_MSG_GET; + goto out; + } + + in.virt_addr = dev->bar0_base_virt_addr + ZXDH_BAR1_CHAN_OFFSET; + in.payload_addr = user_msg->pload; + in.payload_len = send_paras->pload_len; + in.src = MSG_CHAN_END_PF; + in.dst = MSG_CHAN_END_RISC; + in.event_id = send_paras->event_id; + in.src_pcieid = dev->pcie_id; + + /* pload直接透传上去*/ + result.buffer_len = BAR_CHAN_PLOAD_SIZE; + result.recv_buffer = (void *)user_msg->pload; + + ret = zxdh_bar_send_without_reps_hdr(&in, &result); + if (ret != 0) { + LOG_ERR("pcie send msg failed, ret:%d.\n", ret); + } + + user_msg->hdr.send_hdr_out.bar_state = ret; + user_msg->hdr.send_hdr_out.ioctl_state = 0; +out: + ret = copy_to_user((int *)arg, user_msg, sizeof(*user_msg)); + if (0 != ret) { + /* msg may not reply to user, but we can locate by demsg*/ + LOG_ERR("reply ioctl msg failed, ret: %d.\n", ret); + } + /* free the user_msg_data*/ + if (NULL != user_msg) { + kfree(user_msg); + } + return 0; +} + +int bar_chan_pci_res_get(unsigned int cmd, unsigned long arg, uint16_t mode) +{ + int ret = 0; + struct zxdh_mpf_query_bar_msg *entity = NULL; + struct zxdh_pci_query_hdr query_hdr = { 0 }; + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + struct zxdh_bar_ioctl_dev *dev = zxdh_get_bar_ioctl_dev(); + + entity = kmalloc(sizeof(*entity), GFP_KERNEL); + if (NULL == entity) { + LOG_ERR("malloc failed.\n"); + return -1; + } + memset(entity, 0, sizeof(*entity)); + if (copy_from_user(entity, (pci_res_st __user *)arg, sizeof(*entity))) { + entity->ioctl_state = IOCTRL_ERR_MSG_GET; + goto out; + } + + query_hdr.mode = mode; + query_hdr.pcie_id = entity->pci_res_msg.pcie_id; + + in.virt_addr = dev->bar0_base_virt_addr + ZXDH_BAR1_CHAN_OFFSET; + in.payload_addr = &query_hdr; + in.payload_len = sizeof(query_hdr); + in.src = MSG_CHAN_END_PF; + in.dst = MSG_CHAN_END_RISC; + in.event_id = MODULE_PCIE_RES_QUERY; + in.src_pcieid = dev->pcie_id; + + result.recv_buffer = &entity->pci_res_msg.reply; + ; + result.buffer_len = sizeof(entity->pci_res_msg.reply); + ; + + ret = zxdh_bar_send_without_reps_hdr(&in, &result); + if (ret != 0) { + LOG_ERR("pcie send msg failed, ret:%d.\n", ret); + goto out; + } + + entity->ioctl_state = 0; +out: + entity->bar_state = ret; + ret = copy_to_user((int *)arg, entity, sizeof(*entity)); + if (0 != ret) { + LOG_ERR("reply tp user failed.\n"); + } + if (NULL != entity) { + kfree(entity); + } + return 0; +} + +int bar_chan_pci_res_get_single(unsigned int cmd, unsigned long arg) +{ + return bar_chan_pci_res_get(cmd, arg, PCI_QUERY_TYPE_SINGLE); +} + +int bar_chan_pci_res_get_all(unsigned int cmd, unsigned long arg) +{ + return bar_chan_pci_res_get(cmd, arg, PCI_QUERY_TYPE_ALL); +} + +struct bar_chan_func_sel { + unsigned int cmd; + int (*ioctl_bar_chan_func)(unsigned int cmd, unsigned long arg); +} ioctl_func_arr[] = { + { BAR_IOCTL_CMD_NORMAL, bar_chan_common_sync_send }, + { BAR_IOCTL_CMD_SINGLE_DEV, bar_chan_pci_res_get_single }, + { BAR_IOCTL_CMD_ALL_DEV, bar_chan_pci_res_get_all }, + { BAR_IOCTL_CMD_SEND_REGISTER, bar_chan_ioctl_register_eventid }, + { BAR_IOCTL_CMD_SEND_UNREGISTER, bar_chan_ioctl_unregister_eventid }, + { BAR_IOCTL_CMD_RECV_MSG, bar_chan_ioctl_recv_msg }, +}; + +static long bar_msg_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int i, ret = 0; + int ioctl_func_nums = + sizeof(ioctl_func_arr) / sizeof(ioctl_func_arr[0]); + + for (i = 0; i < ioctl_func_nums; i++) { + if (ioctl_func_arr[i].cmd == cmd) { + ret = ioctl_func_arr[i].ioctl_bar_chan_func(cmd, arg); + break; + } + } + return 0; +} + +static struct file_operations fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = bar_msg_ioctl, +}; + +/* before called, get lock firstly*/ +static bool is_msg_list_over_limit(struct list_head *list, int max_limit) +{ + int count = 0; + struct list_head *pos; + + list_for_each(pos, list) { + if (++count > max_limit) + return true; + } + return false; +} + +/* +将长度为msg_len的msg放到event_id为对应的wait-queue中去 +如果没有注册, 就直接退出或者跳过 + +*/ +int push_usr_msg_to_wait_queue(uint16_t event_id, void *msg, uint16_t msg_len) +{ + struct mpf_message_node *msg_ptr; + struct mpf_wait_queue *wait_queue = NULL; + unsigned long flags; + + if (msg_len >= BAR_CHAN_PLOAD_SIZE) { + LOG_ERR("msg_len:%u is too long .\n", msg_len); + return -EINVAL; + } + + wait_queue = wait_queue_get(); + if (!wait_queue) { + LOG_ERR("wait_queue not found.\n"); + return -EINVAL; + } + + /* will be free in ioctl cmd "bar_chan_ioctl_recv_msg", or free in module exit func*/ + msg_ptr = kzalloc(sizeof(struct mpf_message_node), GFP_KERNEL); + if (!msg_ptr) { + return -ENOMEM; + } + msg_ptr->event_id = event_id; + msg_ptr->msg.hdr.recv_hdr_out.event_id = event_id; + memcpy(msg_ptr->msg.pload, msg, msg_len); + + /* 加入消息链表并唤醒等待队列*/ + spin_lock_irqsave(&wait_queue->lock, flags); + if (is_msg_list_over_limit(&wait_queue->msg_list, MSG_LIST_MAX_LEN)) { + spin_unlock_irqrestore(&wait_queue->lock, flags); + kfree(msg_ptr); + return -1; + } + list_add_tail(&msg_ptr->node, &wait_queue->msg_list); + spin_unlock_irqrestore(&wait_queue->lock, flags); + wake_up_interruptible(&wait_queue->wq); + return 0; +} + +int bar_msg_ioctl_dev_init(void) +{ + int ret = 0; + + if (alloc_chrdev_region(&dev, 0, 1, DEVICE_NAME) < 0) { + return -EBUSY; + } + + cl = class_create(THIS_MODULE, DEVICE_NAME); + if (cl == NULL) { + unregister_chrdev_region(dev, 1); + return -ENOMEM; + } + + if (device_create(cl, NULL, dev, NULL, DEVICE_NAME) == NULL) { + class_destroy(cl); + unregister_chrdev_region(dev, 1); + return -ENOMEM; + } + + cdev_init(&c_dev, &fops); + if (cdev_add(&c_dev, dev, 1) == -1) { + device_destroy(cl, dev); + class_destroy(cl); + unregister_chrdev_region(dev, 1); + return -ENOMEM; + } + + LOG_INFO("Custom device registered\n"); + return ret; +} + +void bar_msg_ioctl_dev_exit(void) +{ + cdev_del(&c_dev); + device_destroy(cl, dev); + class_destroy(cl); + unregister_chrdev_region(dev, 1); + LOG_INFO("Custom device unregistered\n"); +} + +int zxdh_bar_ioctl_msg_mdl_init(struct dh_core_dev *core_dev) +{ + int ret = 0; + + ret = zxdh_init_bar_ioctl_resource(core_dev); + if (ret != 0) { + return -1; + } + /* 在ioctl设备注册之前初始化了wait_queues, 后续的ioctl访问不会空指针*/ + ret = bar_msg_ioctl_dev_init(); + if (ret != 0) { + LOG_ERR("custom init failed, ret:%d.\n", ret); + zxdh_remove_bar_ioctl_resource(core_dev); + return -1; + } + /* 在资源初始化之后, 才挂上用户态接收钩子, risc消息不会访问空指针*/ + zxdh_usr_msg_cache_func_register(push_usr_msg_to_wait_queue); + return 0; +} + +void zxdh_bar_ioctl_msg_mdl_exit(struct dh_core_dev *core_dev) +{ + /* 用户态接收消息钩子置空后释放资源, risc来的消息访问wait_queues安全*/ + zxdh_usr_msg_cache_func_register(NULL); + + /* ioctl设备解注册之后释放资源, 所有的ioctl cmd访问wait-queus安全*/ + bar_msg_ioctl_dev_exit(); + /* 释放资源*/ + zxdh_remove_bar_ioctl_resource(core_dev); +} diff --git a/drivers/net/ethernet/dinghai/zf_mpf/zf_chan_ioctl.h b/drivers/net/ethernet/dinghai/zf_mpf/zf_chan_ioctl.h new file mode 100644 index 000000000000..bc97faf1658f --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/zf_chan_ioctl.h @@ -0,0 +1,114 @@ +#ifndef _ZXDH_MSG_CHAN_IOCTL_H_ +#define _ZXDH_MSG_CHAN_IOCTL_H_ + +#ifdef __cplusplus +extern "C" { +#endif +#include "zf_mpf.h" + +#define DEVICE_NAME "bar_ioctl_dev" +#define BAR_CHAN_SIZE (1024 * 2) +#define BAR_CHAN_PLOAD_SIZE (BAR_CHAN_SIZE - 12) +#define BAR_REV_HDR_LEN (4) +#define ZXDH_PF_DEV_NUM (40) + +#define PCI_QUERY_TYPE_SINGLE (1) +#define PCI_QUERY_TYPE_ALL (2) + +#define MSG_LIST_MAX_LEN (10) + +enum { + IOCTRL_OK, + IOCTRL_ERR_MALLOC, + IOCTRL_ERR_MSG_GET, + IOCTRL_ERR_COPY_FROM_USR, + IOCTRL_ERR_SEND_LENGTH_EXCCED, + IOCTRL_ERR_SEND_EVENTID_EXCCED, + IOCTRL_ERR_RECV_NOT_REGISTER, + IOCTRL_ERR_RECV_REPEAT_REGISTER, + IOCTRL_ERR_SEND_EXCEED_QUEUE_SIZE, +}; +struct zxdh_ioctl_send_in { + uint16_t pload_len; + uint16_t src; + uint16_t dst; + uint16_t event_id; +}; + +struct zxdh_ioctl_send_out { + int ioctl_state; //ioctrl级别返回值 + int bar_state; //bar通道接口级别返回值 +}; + +struct zxdh_ioctl_recv_in { + uint16_t event_id; + uint16_t rsv1; + uint32_t rsv2; +}; + +struct zxdh_ioctl_recv_out { + uint16_t event_id; + uint16_t state; + uint32_t rsv2; +}; + +typedef struct normal_msg_entity { + union ioctl_ctrl_hdr //私有消息控制头 8 bytes + { + struct zxdh_ioctl_send_in send_hdr_in; //send消息入参 + struct zxdh_ioctl_send_out send_hdr_out; //send消息出参 + struct zxdh_ioctl_recv_in recv_hdr_in; //recv消息入参 + struct zxdh_ioctl_recv_out recv_hdr_out; //recv + uint8_t std[8]; + } hdr; + uint8_t pload[BAR_CHAN_PLOAD_SIZE]; +} msg_entity_t; + +struct zxdh_mpf_pci_res_item { + uint16_t device_id; + uint16_t pcie_id; + uint16_t bdf; + uint8_t link_state; + uint8_t dev_type; + uint16_t total_vfs; + uint16_t initial_vfs; + uint16_t num_vfs; + uint8_t vf_stride; + uint8_t first_vf_offset; + uint8_t pad[8]; //预留字段 +}; + +/* zf内核态和risc通信的约定结构体*/ +struct zxdh_mpf_pci_res_list { + uint16_t num; + uint16_t verno; //版本号 + int res; //0表示返回成功, 其他表示失败, 包括消息发送失败 + struct zxdh_mpf_pci_res_item pci_res_lis[ZXDH_PF_DEV_NUM]; +}; + +struct zxdh_mpf_query_pci_res_msg { + uint16_t pcie_id; + uint8_t dev_type; + uint8_t pad[5]; + struct zxdh_mpf_pci_res_list reply; +}; + +typedef struct zxdh_mpf_query_bar_msg { + int ioctl_state; + int bar_state; + struct zxdh_mpf_query_pci_res_msg pci_res_msg; +} pci_res_st; + +struct zxdh_pci_query_hdr { + uint16_t mode; + uint16_t pcie_id; +}; + +int zxdh_bar_ioctl_msg_mdl_init(struct dh_core_dev *core_dev); +void zxdh_bar_ioctl_msg_mdl_exit(struct dh_core_dev *core_dev); + +#ifdef __cplusplus +} +#endif + +#endif /* _ZXDH_MSG_CHAN_IOCTL_H_ */ diff --git a/drivers/net/ethernet/dinghai/zf_mpf/zf_events.c b/drivers/net/ethernet/dinghai/zf_mpf/zf_events.c new file mode 100644 index 000000000000..2ae0b74412c7 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/zf_events.c @@ -0,0 +1,220 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "zf_events.h" +#include "../en_mpf.h" + +static int32_t zf_riscv_notifier(struct notifier_block *nb, unsigned long type, + void *data); +static int32_t zf_pf_notifier(struct notifier_block *nb, unsigned long type, + void *data); +static int32_t irq1_notifier(struct notifier_block *nb, unsigned long type, + void *data); +static int32_t fuc_hotplug_failed_notifier(struct notifier_block *nb, + unsigned long type, void *data); +static int32_t fuc_hotplug_finish_notifier(struct notifier_block *nb, + unsigned long type, void *data); +static int32_t irq4_notifier(struct notifier_block *nb, unsigned long type, + void *data); +int get_fuc_hp_ret(void); +int finish_flag; + +static struct dh_nb zf_mpf_events[] = { + { .nb.notifier_call = zf_riscv_notifier, + .event_type = DH_EVENT_TYPE_NOTIFY_RISC_TO_MPF }, + { .nb.notifier_call = zf_pf_notifier, + .event_type = DH_EVENT_TYPE_NOTIFY_PF_TO_MPF }, + { .nb.notifier_call = irq1_notifier, + .event_type = DH_EVENT_TYPE_NOTIFY_1 }, + { .nb.notifier_call = fuc_hotplug_failed_notifier, + .event_type = DH_EVENT_TYPE_NOTIFY_2 }, + { .nb.notifier_call = fuc_hotplug_finish_notifier, + .event_type = DH_EVENT_TYPE_NOTIFY_3 }, + { .nb.notifier_call = irq4_notifier, + .event_type = DH_EVENT_TYPE_NOTIFY_4 } +}; + +static int32_t zf_riscv_notifier(struct notifier_block *nb, unsigned long type, + void *data) +{ + struct dh_event_nb *event_nb = dh_nb_cof(nb, struct dh_event_nb, nb); + struct dh_core_dev *dh_dev = (struct dh_core_dev *)event_nb->ctx; + struct dh_en_mpf_dev *mpf_dev = dh_core_priv(dh_dev); + + DH_LOG_INFO(MODULE_MPF, "is called, type=%ld\n", type); + zxdh_events_work_enqueue(dh_dev, &mpf_dev->dh_np_sdk_from_risc); + + return NOTIFY_OK; +} + +static int32_t zf_pf_notifier(struct notifier_block *nb, unsigned long type, + void *data) +{ + struct dh_event_nb *event_nb = dh_nb_cof(nb, struct dh_event_nb, nb); + struct dh_core_dev *dh_dev = (struct dh_core_dev *)event_nb->ctx; + struct dh_en_mpf_dev *mpf_dev = dh_core_priv(dh_dev); + + DH_LOG_INFO(MODULE_MPF, "is called, type=%ld\n", type); + zxdh_events_work_enqueue(dh_dev, &mpf_dev->dh_np_sdk_from_pf); + + return NOTIFY_OK; +} + +static int32_t irq1_notifier(struct notifier_block *nb, unsigned long type, + void *data) +{ + struct dh_event_nb *event_nb = dh_nb_cof(nb, struct dh_event_nb, nb); + struct dh_core_dev *dh_dev = (struct dh_core_dev *)event_nb->ctx; + struct dh_en_mpf_dev *mpf_dev = dh_core_priv(dh_dev); + + DH_LOG_INFO(MODULE_MPF, "is called, ep_bdf=0x%x, pcie_id=%d\n", + mpf_dev->ep_bdf, mpf_dev->pcie_id); + + return zf_hdma_wr_handler((void *)dh_dev->zf_ep->dpu_ep_array[0]); +} + +static int32_t fuc_hotplug_failed_notifier(struct notifier_block *nb, + unsigned long type, void *data) +{ + finish_flag = FUC_HP_RET_FAILED; + DH_LOG_ERR(MODULE_MPF, "hotplug failed\n"); + return NOTIFY_OK; +} +static int32_t fuc_hotplug_finish_notifier(struct notifier_block *nb, + unsigned long type, void *data) +{ + finish_flag = FUC_HP_RET_FINISH; + DH_LOG_INFO(MODULE_MPF, "hotplug success\n"); + return NOTIFY_OK; +} + +static int32_t irq4_notifier(struct notifier_block *nb, unsigned long type, + void *data) +{ + struct dh_event_nb *event_nb = dh_nb_cof(nb, struct dh_event_nb, nb); + struct dh_core_dev *dh_dev = (struct dh_core_dev *)event_nb->ctx; + struct dh_en_mpf_dev *mpf_dev = dh_core_priv(dh_dev); + + DH_LOG_INFO(MODULE_MPF, "is called, ep_bdf=0x%x, pcie_id=%d\n", + mpf_dev->ep_bdf, mpf_dev->pcie_id); + + return NOTIFY_OK; +} + +static void zf_np_sdk_handler_from_risc(struct work_struct *p_work) +{ + struct dh_en_mpf_dev *mpf_dev = + container_of(p_work, struct dh_en_mpf_dev, dh_np_sdk_from_risc); + + DH_LOG_INFO(MODULE_MPF, "is called\n"); + zxdh_bar_irq_recv(MSG_CHAN_END_RISC, MSG_CHAN_END_MPF, + mpf_dev->pci_ioremap_addr + ZXDH_BAR1_CHAN_OFFSET, + NULL); + return; +} + +static void zf_np_sdk_handler_from_pf(struct work_struct *p_work) +{ + struct dh_en_mpf_dev *mpf_dev = + container_of(p_work, struct dh_en_mpf_dev, dh_np_sdk_from_pf); + + DH_LOG_ERR(MODULE_MPF, "is called\n"); + zxdh_bar_irq_recv(MSG_CHAN_END_PF, MSG_CHAN_END_MPF, + mpf_dev->pci_ioremap_addr + ZXDH_BAR2_CHAN_OFFSET, + NULL); + return; +} + +void zxdh_zf_events_start(struct dh_core_dev *dev) +{ + struct dh_events *events = dev->events; + int32_t i = 0; + int32_t err = 0; + + for (i = 0; i < ARRAY_SIZE(zf_mpf_events); i++) { + events->notifiers[i].nb = zf_mpf_events[i]; + events->notifiers[i].ctx = dev; + err = dh_eq_notifier_register(&dev->eq_table, + &events->notifiers[i].nb); + if (err != 0) { + DH_LOG_ERR(MODULE_MPF, "i: %d, err: %d.\n", i, err); + } + } +} + +void dh_zf_events_stop(struct dh_core_dev *dev) +{ + struct dh_events *events = dev->events; + int32_t i = 0; + + for (i = ARRAY_SIZE(zf_mpf_events) - 1; i >= 0; i--) { + dh_eq_notifier_unregister(&dev->eq_table, + &events->notifiers[i].nb); + } + + zxdh_events_cleanup(dev); +} + +int32_t dh_zf_mpf_events_init(struct dh_core_dev *dev) +{ + struct dh_events *events = NULL; + struct dh_en_mpf_dev *mpf_dev = dh_core_priv(dev); + int32_t ret = 0; + + events = kzalloc((sizeof(*events) + ARRAY_SIZE(zf_mpf_events) * + sizeof(struct dh_event_nb)), + GFP_KERNEL); + if (unlikely(events == NULL)) { + DH_LOG_ERR(MODULE_MPF, "events kzalloc failed: %p\n", events); + ret = -ENOMEM; + goto err_events_kzalloc; + } + + events->evt_num = ARRAY_SIZE(zf_mpf_events); + events->dev = dev; + dev->events = events; + events->wq = create_singlethread_workqueue("dh_zf_mpf_events"); + if (!events->wq) { + DH_LOG_ERR( + MODULE_MPF, + "events->wq create_singlethread_workqueue failed: %p\n", + events->wq); + ret = -ENOMEM; + goto err_create_wq; + } + + INIT_WORK(&mpf_dev->dh_np_sdk_from_risc, zf_np_sdk_handler_from_risc); + INIT_WORK(&mpf_dev->dh_np_sdk_from_pf, zf_np_sdk_handler_from_pf); + + zxdh_zf_events_start(dev); + + return 0; + +err_create_wq: + kfree(events); +err_events_kzalloc: + return ret; +} + +void dh_zf_mpf_events_uninit(struct dh_core_dev *dev) +{ + return dh_zf_events_stop(dev); +} + +void reset_fuc_hp_ret(void) +{ + finish_flag = 0; + return; +} + +int get_fuc_hp_ret(void) +{ + return finish_flag; +} \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/zf_mpf/zf_events.h b/drivers/net/ethernet/dinghai/zf_mpf/zf_events.h new file mode 100644 index 000000000000..a7c3f928d90f --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/zf_events.h @@ -0,0 +1,24 @@ +#ifndef __ZXDH_ZF_MPF_EVENTS_H__ +#define __ZXDH_ZF_MPF_EVENTS_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "epc/pcie-zte-zf-epc.h" +#include "fuc_hotplug/fuc_hotplug_commom.h" + +int32_t dh_zf_mpf_events_init(struct dh_core_dev *dev); +void zxdh_zf_events_start(struct dh_core_dev *dev); +int32_t dh_zf_mpf_events_init(struct dh_core_dev *dev); +void dh_zf_events_stop(struct dh_core_dev *dev); +void dh_zf_mpf_events_uninit(struct dh_core_dev *dev); +int zf_hdma_wr_handler(void *data); +int zf_hdma_rd_handler(void *data); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf.c b/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf.c new file mode 100644 index 000000000000..1749cc23c097 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf.c @@ -0,0 +1,510 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "zf_events.h" +#include "zf_mpf_eq.h" +#include "zf_mpf_irq.h" +#include "zf_mpf_cfg_sf.h" +#include "zf_mpf.h" +#include "gdma.h" + +#ifdef PCIE_ZF_EPC_OPEN +#include "epc/pcie-zte-zf-epc.h" +#endif + +#ifdef DRIVER_VERSION_VAL +#define DRV_VERSION DRIVER_VERSION_VAL +#else +#define DRV_VERSION "1.0-1" +#endif + +const struct fw_compat_version zf_mpf_version = { + /*major fw_minor drv_minor patch*/ + 0, 0, 0, 0 +}; + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRV_VERSION); + +uint32_t zf_dh_debug_mask; +module_param_named(debug_mask, zf_dh_debug_mask, uint, 0644); +MODULE_PARM_DESC( + debug_mask, + "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0"); + +extern struct devlink_ops dh_mpf_devlink_ops; +extern struct dh_core_devlink_ops dh_mpf_core_devlink_ops; + +extern int zxdh_host_reset_driver_init(struct dh_core_dev *core_dev); +extern void zxdh_host_reset_driver_exit(struct dh_core_dev *core_dev); +extern int zxdh_host_fuc_hotplug_driver_init(void); +extern void zxdh_host_fuc_hotplug_driver_exit(void); +extern int zf_reset_finish_flag_init(struct dh_core_dev *dh_dev, + unsigned long ep_mpf_paddr); +extern void zf_reset_finish_flag_exit(void); +extern int zxdh_bar_ioctl_msg_mdl_init(struct dh_core_dev *core_dev); +extern void zxdh_bar_ioctl_msg_mdl_exit(struct dh_core_dev *core_dev); + +int is_zf_mpf_version_match_fw(struct pci_dev *pdev); + +static int32_t dh_zf_mpf_pci_init(struct dh_core_dev *dev) +{ + int32_t ret = 0; + struct dh_en_mpf_dev *mpf_dev = NULL; + + pci_set_drvdata(dev->pdev, dev); + + ret = pci_enable_device(dev->pdev); + if (ret) { + dev_err(dev->device, "pci_enable_device failed: %d\n", ret); + return -ENOMEM; + } + + if (is_zf_mpf_version_match_fw(dev->pdev)) { + DH_LOG_ERR(MODULE_MPF, "zf_mpf_version cant match fw\n"); + return -ENOMEM; + } + + ret = dma_set_mask_and_coherent(dev->device, DMA_BIT_MASK(64)); + if (ret != 0) { + ret = dma_set_mask_and_coherent(dev->device, DMA_BIT_MASK(32)); + if (ret != 0) { + dev_err(dev->device, + "dma_set_mask_and_coherent failed: %d\n", ret); + goto err_pci; + } + } + + ret = pci_request_selected_regions( + dev->pdev, pci_select_bars(dev->pdev, IORESOURCE_MEM), + "dh-mpf"); + if (ret != 0) { + dev_err(dev->device, + "pci_request_selected_regions failed: %d\n", ret); + goto err_pci; + } + + pci_enable_pcie_error_reporting(dev->pdev); + pci_set_master(dev->pdev); + ret = pci_save_state(dev->pdev); + if (ret != 0) { + dev_err(dev->device, "pci_save_state failed: %d\n", ret); + goto err_pci_save_state; + } + + mpf_dev = dh_core_priv(dev); + mpf_dev->pci_ioremap_addr = + (uint64_t)ioremap(pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0)); + DH_LOG_INFO(MODULE_MPF, + "pci_ioremap_addr=0x%llx, ioremap(0x%llx, 0x%llx)\n", + mpf_dev->pci_ioremap_addr, pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0)); + if (mpf_dev->pci_ioremap_addr == 0) { + ret = -1; + DH_LOG_ERR(MODULE_MPF, "ioremap(0x%llx, 0x%llx) failed\n", + pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0)); + goto err_pci_save_state; + } + + return 0; + +err_pci_save_state: + pci_disable_pcie_error_reporting(dev->pdev); + pci_release_selected_regions( + dev->pdev, pci_select_bars(dev->pdev, IORESOURCE_MEM)); +err_pci: + pci_disable_device(dev->pdev); + return ret; +} + +static const struct pci_device_id dh_zf_mpf_pci_table[] = { + { PCI_DEVICE(ZXDH_MPF_VENDOR_ID, ZXDH_MPF_DEVICE_ID0), 0 }, + { PCI_DEVICE(ZXDH_MPF_VENDOR_ID, ZXDH_MPF_DEVICE_ID1), 0 }, + { + 0, + } +}; + +MODULE_DEVICE_TABLE(pci, dh_zf_mpf_pci_table); + +void dh_zf_mpf_pci_close(struct dh_core_dev *dev) +{ + struct dh_en_mpf_dev *mpf_dev = NULL; + + mpf_dev = dh_core_priv(dev); + iounmap((void __iomem *)mpf_dev->pci_ioremap_addr); + pci_disable_pcie_error_reporting(dev->pdev); + pci_release_selected_regions( + dev->pdev, pci_select_bars(dev->pdev, IORESOURCE_MEM)); + pci_disable_device(dev->pdev); + + return; +} + +int32_t dh_zf_mpf_pcie_id_get(struct dh_core_dev *dh_dev) +{ + u8 pos = 0; + uint8_t type = 0; + uint16_t padding = 0; + struct dh_en_mpf_dev *mpf_dev = dh_core_priv(dh_dev); + struct pci_dev *pdev = dh_dev->pdev; + + for (pos = pci_find_capability(pdev, PCI_CAP_ID_VNDR); pos > 0; + pos = pci_find_next_capability(pdev, pos, PCI_CAP_ID_VNDR)) { + pci_read_config_byte( + pdev, pos + offsetof(struct zxdh_pf_pci_cap, cfg_type), + &type); + + if (type == ZXDH_PCI_CAP_PCI_CFG) { + pci_read_config_word( + pdev, + pos + offsetof(struct zxdh_pf_pci_cap, + padding[0]), + &padding); + mpf_dev->pcie_id = padding; + DH_LOG_INFO(MODULE_MPF, "pf_dev->pcie_id: 0x%x\n", + mpf_dev->pcie_id); + return 0; + } + } + return -1; +} + +int is_zf_mpf_version_match_fw(struct pci_dev *pdev) +{ + int ret = 0; + u64 bar_addr = 0; + u64 bar_len = 0; + struct version_compat_reg fw_version = { 0 }; + void __iomem *bar_virt_addr = NULL; + u32 fw_feature = 0; + + bar_addr = pci_resource_start(pdev, 0); + bar_len = pci_resource_len(pdev, 0); + bar_virt_addr = (void __iomem *)ioremap(bar_addr, bar_len); + if (bar_virt_addr == NULL) { + DH_LOG_ERR(MODULE_MPF, "bar_virt_addr map failed\n"); + return -1; + } + + memcpy(&fw_feature, bar_virt_addr + FW_FEATURE_OF_ZF_MPF_OFFSET, + sizeof(u32)); + if (!(fw_feature & FW_FEATURE_SUPPORT_MASK)) { + DH_LOG_WARNING(MODULE_MPF, + "fw dont support feature function\n"); + ret = 0; + goto finish; + } + + memcpy(&fw_version, bar_virt_addr + VERSION_OF_ZF_MPF_OFFSET, + sizeof(struct version_compat_reg)); + if (fw_version.version_compat_item != ZF_MPF_COMPAT_ITEM) { + DH_LOG_ERR(MODULE_MPF, + "version_compat_item 0x%x is not zf_mpf(%x)! \n", + fw_version.version_compat_item, ZF_MPF_COMPAT_ITEM); + ret = -1; + goto finish; + } + + if (fw_version.major != zf_mpf_version.major || + fw_version.fw_minor < zf_mpf_version.fw_minor || + fw_version.drv_minor > zf_mpf_version.drv_minor) { + DH_LOG_ERR(MODULE_MPF, "fw_version: %x.%x.%x.%x\n", + fw_version.major, fw_version.fw_minor, + fw_version.drv_minor, fw_version.patch); + DH_LOG_ERR(MODULE_MPF, "zf_mpf_version: %x.%x.%x.%x\n", + zf_mpf_version.major, zf_mpf_version.fw_minor, + zf_mpf_version.drv_minor, zf_mpf_version.patch); + ret = -1; + } + +finish: + iounmap(bar_virt_addr); + return ret; +} + +static int32_t dh_zf_mpf_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct dh_core_dev *dh_dev = NULL; + struct devlink *devlink = NULL; + int32_t err = 0; + + DH_LOG_INFO(MODULE_MPF, "mpf driver start to probe\n"); + + devlink = zxdh_devlink_alloc(&pdev->dev, &dh_mpf_devlink_ops, + sizeof(struct dh_en_mpf_dev)); + if (devlink == NULL) { + dev_err(&pdev->dev, "devlink alloc failed\n"); + return -ENOMEM; + } + + dh_dev = devlink_priv(devlink); + dh_dev->device = &pdev->dev; + dh_dev->pdev = pdev; + dh_dev->devlink_ops = &dh_mpf_core_devlink_ops; + + err = dh_zf_mpf_pci_init(dh_dev); + if (err != 0) { + dev_err(&pdev->dev, "%s failed: %d\n", __func__, err); + goto err_devlink_cleanup; + } + +#ifdef CONFIG_ZF_GDMA + err = dh_zf_mpf_gdma_init(dh_dev); + if (err != 0) { + dh_err(dh_dev, "Failed to initialize gdma module\n"); + goto err_gdma_init; + } +#endif + + err = dh_zf_mpf_pcie_id_get(dh_dev); + if (err != 0) { + dev_err(&pdev->dev, "dh_pf_pcie_id_get failed: %d\n", err); + goto err_pci; + } + + err = dh_mpf_irq_table_init( + dh_dev); // dh_dev->irq_table.priv = kvzalloc(sizeof(struct dh_mpf_irq_table), GFP_KERNEL); + if (err != 0) { + dh_err(dh_dev, "Failed to alloc IRQs\n"); + goto err_pci; + } + + err = dh_mpf_eq_table_init(dh_dev); + if (err != 0) { + dh_err(dh_dev, "Failed to alloc IRQs\n"); + goto err_eq_table_init; + } + + err = dh_mpf_irq_table_create(dh_dev); + if (err != 0) { + dh_err(dh_dev, "Failed to alloc IRQs\n"); + goto err_irq_table_create; + } + + err = dh_mpf_eq_table_create(dh_dev); + if (err != 0) { + dh_err(dh_dev, "Failed to alloc EQs\n"); + goto err_eq_table_create; + } + +#ifdef PCIE_ZF_EPC_OPEN + err = pcie_zte_zf_epc_module_init(dh_dev, id); + if (err != 0) { + dh_err(dh_dev, "failed to initialize epc\n"); + goto err_epc_init; + } + + if (dh_dev->zf_ep != NULL) { + if (dh_dev->zf_ep->mpf_paddr != 0) { + err = zf_reset_finish_flag_init( + dh_dev, dh_dev->zf_ep->mpf_paddr); + if (err != 0) { + dh_err(dh_dev, + "failed to initialize zf_reset_finish_flag_init\n"); + goto err_epc_init; + } + } + } +#endif + + err = dh_zf_mpf_events_init(dh_dev); + if (err != 0) { + dh_err(dh_dev, "failed to initialize events\n"); + goto err_events_init_cleanup; + } + + err = zxdh_host_reset_driver_init(dh_dev); + if (err != 0) { + DH_LOG_ERR(MODULE_MPF, + "zxdh_host_reset_driver_init failed: %d\n", err); + goto err_zf_host_reset; + } + + err = zxdh_bar_ioctl_msg_mdl_init(dh_dev); + if (err != 0) { + DH_LOG_ERR(MODULE_MPF, + "zxdh_bar_ioctl_msg_mdl_init failed: %d\n", err); + goto err_zf_host_reset; + } + + zxdh_devlink_register(devlink); + + DH_LOG_INFO(MODULE_MPF, "mpf driver probe completed\n"); + return 0; + +err_zf_host_reset: + zxdh_host_reset_driver_exit(dh_dev); +err_events_init_cleanup: +#ifdef PCIE_ZF_EPC_OPEN + zf_reset_finish_flag_exit(); + pcie_zte_zf_epc_free(dh_dev); +err_epc_init: +#endif + dh_mpf_eq_table_destroy(dh_dev); +err_eq_table_create: + dh_mpf_irq_table_destroy(dh_dev); +err_irq_table_create: + dh_eq_table_cleanup(dh_dev); +err_eq_table_init: + dh_irq_table_cleanup(dh_dev); +err_pci: +#ifdef CONFIG_ZF_GDMA + dh_zf_mpf_gdma_uninit(dh_dev); +err_gdma_init: +#endif + dh_zf_mpf_pci_close(dh_dev); +err_devlink_cleanup: + zxdh_devlink_free(devlink); + return err; +} + +static void dh_zf_mpf_remove(struct pci_dev *pdev) +{ + struct dh_core_dev *dh_dev = pci_get_drvdata(pdev); + struct devlink *devlink = priv_to_devlink(dh_dev); + + DH_LOG_INFO(MODULE_MPF, "mpf driver start to remove"); + + zxdh_devlink_unregister(devlink); +#ifdef PCIE_ZF_EPC_OPEN + zf_reset_finish_flag_exit(); + pcie_zte_zf_epc_free(dh_dev); +#endif + zxdh_bar_ioctl_msg_mdl_exit(dh_dev); + zxdh_host_reset_driver_exit(dh_dev); + dh_zf_mpf_events_uninit(dh_dev); + dh_mpf_eq_table_destroy(dh_dev); + dh_mpf_irq_table_destroy(dh_dev); + dh_eq_table_cleanup(dh_dev); + dh_irq_table_cleanup(dh_dev); +#ifdef CONFIG_ZF_GDMA + dh_zf_mpf_gdma_uninit(dh_dev); +#endif + dh_zf_mpf_pci_close(dh_dev); + zxdh_devlink_free(devlink); + + pci_set_drvdata(pdev, NULL); + + DH_LOG_INFO(MODULE_MPF, "mpf driver remove completed\n"); +} + +static int32_t dh_zf_mpf_suspend(struct pci_dev *pdev, pm_message_t state) +{ + return 0; +} + +static int32_t dh_zf_mpf_resume(struct pci_dev *pdev) +{ + return 0; +} + +static void dh_zf_mpf_shutdown(struct pci_dev *pdev) +{ + dh_zf_mpf_remove(pdev); +} + +static pci_ers_result_t dh_pci_err_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + DH_LOG_INFO(MODULE_MPF, "%s was called\n", __func__); + + return state == pci_channel_io_perm_failure ? + PCI_ERS_RESULT_DISCONNECT : + PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t dh_zf_mpf_pci_slot_reset(struct pci_dev *pdev) +{ + DH_LOG_INFO(MODULE_MPF, "%s was called\n", __func__); + + return PCI_ERS_RESULT_RECOVERED; +} + +static void dh_zf_mpf_pci_resume(struct pci_dev *pdev) +{ + DH_LOG_INFO(MODULE_MPF, "%s was called\n", __func__); +} + +static const struct pci_error_handlers dh_zf_mpf_err_handler = { + .error_detected = dh_pci_err_detected, + .slot_reset = dh_zf_mpf_pci_slot_reset, + .resume = dh_zf_mpf_pci_resume +}; + +static struct pci_driver zf_dh_mpf_driver = { + .name = KBUILD_MODNAME, + .id_table = dh_zf_mpf_pci_table, + .probe = dh_zf_mpf_probe, + .remove = dh_zf_mpf_remove, + .suspend = dh_zf_mpf_suspend, + .resume = dh_zf_mpf_resume, + .shutdown = dh_zf_mpf_shutdown, + .err_handler = &dh_zf_mpf_err_handler, +}; + +static int32_t __init init(void) +{ + int32_t err = 0; + + err = pci_register_driver(&zf_dh_mpf_driver); + if (err != 0) { + DH_LOG_ERR(MODULE_MPF, "pci_register_driver failed: %d\n", err); + return err; + } + +#ifdef CONFIG_ZXDH_SF + err = zxdh_mpf_sf_driver_register(); + if (err != 0) { + DH_LOG_ERR(MODULE_MPF, + "zxdh_en_sf_driver_register failed: %d\n", err); + goto err_sf; + } +#endif + + err = zxdh_host_fuc_hotplug_driver_init(); + if (err != 0) { + DH_LOG_ERR(MODULE_MPF, "fuc_hotplug_driver_init failed: %d\n", + err); + goto err_fuc_hotplug; + } + DH_LOG_INFO(MODULE_MPF, "zxdh_mpf driver init success\n"); + + return 0; + +err_fuc_hotplug: +#ifdef CONFIG_ZXDH_SF +err_sf: + pci_unregister_driver(&zf_dh_mpf_driver); +#endif + return err; +} + +static void __exit cleanup(void) +{ +#ifdef CONFIG_ZXDH_SF + zxdh_mpf_sf_driver_uregister(); +#endif + zxdh_host_fuc_hotplug_driver_exit(); + pci_unregister_driver(&zf_dh_mpf_driver); + +#ifdef PCIE_ZF_EPC_OPEN + zf_reset_finish_flag_exit(); +#endif + + DH_LOG_INFO(MODULE_MPF, "zxdh_mpf driver remove success\n"); +} + +module_init(init); +module_exit(cleanup); diff --git a/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf.h b/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf.h new file mode 100644 index 000000000000..2fe614119c77 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf.h @@ -0,0 +1,57 @@ +#ifndef __ZXDH_ZF_MPF_H__ +#define __ZXDH_ZF_MPF_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include "gdma.h" + +#define ZXDH_MPF_VENDOR_ID 0x1cf2 +#define ZXDH_MPF_DEVICE_ID0 0x8044 +#define ZXDH_MPF_DEVICE_ID1 0x806a + +#define ZXDH_BAR1_CHAN_OFFSET 0x2000 //0x7801000 +#define ZXDH_BAR2_CHAN_OFFSET 0x3000 //0x7802000 +#define VERSION_OF_ZF_MPF_OFFSET 0x5438 +#define FW_FEATURE_OF_ZF_MPF_OFFSET 0x1004 +#define FW_FEATURE_SUPPORT_MASK 0x10000 +#define ZF_MPF_COMPAT_ITEM 8 + +struct dh_en_mpf_dev { + uint16_t ep_bdf; + uint16_t pcie_id; + uint16_t vport; + + uint64_t pci_ioremap_addr; + + struct work_struct dh_np_sdk_from_risc; + struct work_struct dh_np_sdk_from_pf; + + struct zf_gdma_dev *gdev; +}; + +struct fw_compat_version { + uint8_t major; + uint8_t fw_minor; + uint8_t drv_minor; + uint16_t patch; +}; + +struct version_compat_reg { + uint8_t version_compat_item; + uint8_t major; + uint8_t fw_minor; + uint8_t drv_minor; + uint16_t patch; + uint8_t rsv[2]; +}; + +#ifdef __cplusplus +} +#endif + +#endif /* __ZXDH_EN_MPF_H__ */ \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_cfg_sf.c b/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_cfg_sf.c new file mode 100644 index 000000000000..0ab91f61e9b1 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_cfg_sf.c @@ -0,0 +1,59 @@ +#include +#include + +#include "zf_mpf_cfg_sf.h" + +static int32_t zxdh_cfg_resume(struct zxdh_auxiliary_device *adev) +{ + return 0; +} + +static int32_t zxdh_cfg_suspend(struct zxdh_auxiliary_device *adev, + pm_message_t state) +{ + return 0; +} + +static int32_t zxdh_cfg_probe(struct zxdh_auxiliary_device *adev, + const struct zxdh_auxiliary_device_id *id) +{ + struct cfg_sf_dev *__attribute__((unused)) + cfg_sf_dev = container_of(adev, struct cfg_sf_dev, adev); + + return 0; +} + +static int32_t zxdh_cfg_remove(struct zxdh_auxiliary_device *adev) +{ + return 0; +} + +static const struct zxdh_auxiliary_device_id zxdh_cfg_id_table[] = { + { + .name = ZXDH_EN_SF_NAME ".mpf_cfg", + }, + {}, +}; + +//MODULE_DEVICE_TABLE(auxiliary_zxdh_id_table, zxdh_cfg_id_table); + +static struct zxdh_auxiliary_driver zxdh_cfg_driver = { + .name = "mpf_cfg", + .probe = zxdh_cfg_probe, + .remove = zxdh_cfg_remove, + .suspend = zxdh_cfg_suspend, + .resume = zxdh_cfg_resume, + .id_table = zxdh_cfg_id_table, +}; + +int32_t zxdh_mpf_sf_driver_register(void) +{ + return zxdh_auxiliary_driver_register(&zxdh_cfg_driver); + ; +} + +void zxdh_mpf_sf_driver_uregister(void) +{ + zxdh_auxiliary_driver_unregister(&zxdh_cfg_driver); + ; +} diff --git a/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_cfg_sf.h b/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_cfg_sf.h new file mode 100644 index 000000000000..455932dc40cc --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_cfg_sf.h @@ -0,0 +1,27 @@ +#ifndef __ZF_MPF_CFG_SF_H__ +#define __ZF_MPF_CFG_SF_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +struct cfg_sf_ops { +}; + +struct cfg_sf_dev { + struct zxdh_auxiliary_device adev; + struct dh_core_dev *dh_dev; + struct cfg_sf_ops *ops; +}; + +int32_t zxdh_mpf_sf_driver_register(void); +void zxdh_mpf_sf_driver_uregister(void); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_devlink.c b/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_devlink.c new file mode 100644 index 000000000000..faadd23f3586 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_devlink.c @@ -0,0 +1,126 @@ +#include +#include +#include "zf_mpf_devlink.h" + +struct devlink_ops dh_mpf_devlink_ops = { + +}; + +enum { + DH_MPF_PARAMS_MAX, +}; + +static int32_t __attribute__((unused)) sample_check(struct dh_core_dev *dev) +{ + return 1; +} + +enum dh_mpf_devlink_param_id { + DH_MPF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + DH_MPF_DEVLINK_PARAM_ID_SAMPLE, +}; + +static int32_t dh_devlink_sample_set(struct devlink *devlink, uint32_t id, + struct devlink_param_gset_ctx *ctx) +{ + struct dh_core_dev *__attribute__((unused)) dev = devlink_priv(devlink); + + return 0; +} + +static int32_t dh_devlink_sample_get(struct devlink *devlink, uint32_t id, + struct devlink_param_gset_ctx *ctx) +{ + struct dh_core_dev *__attribute__((unused)) dev = devlink_priv(devlink); + + return 0; +} + +#ifdef HAVE_DEVLINK_PARAM_REGISTER +static const struct devlink_params { + const char *name; + int32_t (*check)(struct dh_core_dev *dev); + struct devlink_param param; +} devlink_params[] = { [DH_MPF_PARAMS_MAX] = { + .name = "sample", + .check = &sample_check, + .param = DEVLINK_PARAM_DRIVER( + DH_MPF_DEVLINK_PARAM_ID_SAMPLE, "sample", + DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + dh_devlink_sample_get, + dh_devlink_sample_set, NULL), + } }; + +static int32_t params_register(struct devlink *devlink) +{ + int32_t i = 0; + int32_t err = 0; + struct dh_core_dev *dh_dev = devlink_priv(devlink); + + for (i = 0; i < ARRAY_SIZE(devlink_params); i++) { + if (devlink_params[i].check(dh_dev)) { + err = devlink_param_register(devlink, + &devlink_params[i].param); + if (err) { + goto rollback; + } + } + } + + return 0; + +rollback: + if (i == 0) { + return err; + } + + for (; i > 0; i--) { + devlink_param_unregister(devlink, &devlink_params[i].param); + } + + return err; +} + +static int32_t params_unregister(struct devlink *devlink) +{ + int32_t i = 0; + + for (i = 0; i < ARRAY_SIZE(devlink_params); i++) { + devlink_param_unregister(devlink, &devlink_params[i].param); + } + + return 0; +} +#else +static struct devlink_param devlink_params[] = { + [DH_MPF_PARAMS_MAX] = DEVLINK_PARAM_DRIVER( + DH_MPF_DEVLINK_PARAM_ID_SAMPLE, "sample", + DEVLINK_PARAM_TYPE_BOOL, BIT(DEVLINK_PARAM_CMODE_RUNTIME), + dh_devlink_sample_get, dh_devlink_sample_set, NULL), +}; + +static int32_t params_register(struct devlink *devlink) +{ + struct dh_core_dev *__attribute__((unused)) + dh_dev = devlink_priv(devlink); + int32_t err = 0; + + err = devlink_params_register(devlink, devlink_params, + ARRAY_SIZE(devlink_params)); + + return err; +} +static int32_t params_unregister(struct devlink *devlink) +{ + devlink_params_unregister(devlink, devlink_params, + ARRAY_SIZE(devlink_params)); + + return 0; +} +#endif + +struct dh_core_devlink_ops dh_mpf_core_devlink_ops = { + .params_register = params_register, + .params_unregister = params_unregister +}; diff --git a/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_devlink.h b/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_devlink.h new file mode 100644 index 000000000000..675b68195563 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_devlink.h @@ -0,0 +1,14 @@ +#ifndef __ZF_MPF_DEVLINK_H__ +#define __ZF_MPF_DEVLINK_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_eq.c b/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_eq.c new file mode 100644 index 000000000000..6550b1e71d3e --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_eq.c @@ -0,0 +1,517 @@ +#include +#include +#include +#include +#include +#include "zf_mpf_irq.h" +#include "zf_mpf_eq.h" +#include "zf_mpf.h" +#include "gdma.h" + +struct dh_mpf_eq_table { + struct dh_irq **comp_irqs; + struct dh_irq *async_risc_irq; + struct dh_irq *async_pf_irq; + struct dh_irq *async_irq1; + struct dh_irq *async_irq2; + struct dh_irq *async_irq3; + struct dh_irq *async_irq4; + struct dh_irq **gdma_irqs; + struct dh_eq_async async_risc_eq; + struct dh_eq_async async_pf_eq; + struct dh_eq_async async_eq1; + struct dh_eq_async async_eq2; + struct dh_eq_async async_eq3; + struct dh_eq_async async_eq4; + struct dh_eq_async gdma_eq[ZXDH_MPF_GDMA_IRQ_NUM]; +}; + +static int32_t create_async_eqs(struct dh_core_dev *dev); + +#ifdef CONFIG_ZF_GDMA +static int32_t create_gdma_eqs(struct dh_core_dev *dev); +void cleanup_gdma_eq(struct dh_core_dev *dev, + struct dh_mpf_eq_table *table_priv, uint16_t num); +#endif + +static int32_t __attribute__((unused)) create_eq_map(struct dh_eq_param *param) +{ + int32_t err = 0; + + /* inform device*/ + return err; +} + +int32_t dh_mpf_eq_table_init(struct dh_core_dev *dev) +{ + struct dh_mpf_eq_table *table_priv = NULL; + + table_priv = kvzalloc(sizeof(*table_priv), GFP_KERNEL); + if (unlikely(table_priv == NULL)) { + return -ENOMEM; + } + + dh_eq_table_init(dev, table_priv); + + return 0; +} + +/*todo*/ +int32_t dh_eq_get_comp_eqs(struct dh_core_dev *dev) +{ + return 0; +} + +static int32_t create_comp_eqs(struct dh_core_dev *dev) +{ + if (IS_ERR_OR_NULL(dev)) { + DH_LOG_ERR(MODULE_MPF, "error dev\n"); + return PTR_ERR(dev); + } + + return 0; +} + +static int32_t destroy_async_eq(struct dh_core_dev *dev) +{ + struct dh_eq_table *eq_table = &dev->eq_table; + + mutex_lock(&eq_table->lock); + /*unmap inform device*/ + mutex_unlock(&eq_table->lock); + + return 0; +} + +static void cleanup_async_eq(struct dh_core_dev *dev, struct dh_eq_async *eq, + const char *name) +{ + dh_eq_disable(dev, &eq->core, &eq->irq_nb); +} + +static void destroy_async_eqs(struct dh_core_dev *dev) +{ + struct dh_eq_table *table = &dev->eq_table; + struct dh_mpf_eq_table *table_priv = table->priv; + + cleanup_async_eq(dev, &table_priv->async_risc_eq, "riscv"); + cleanup_async_eq(dev, &table_priv->async_pf_eq, "pf"); + cleanup_async_eq(dev, &table_priv->async_eq1, "eq1"); + cleanup_async_eq(dev, &table_priv->async_eq2, "eq2"); + cleanup_async_eq(dev, &table_priv->async_eq3, "eq3"); + cleanup_async_eq(dev, &table_priv->async_eq4, "eq4"); + destroy_async_eq(dev); + dh_irqs_release_vectors(&table_priv->async_risc_irq, 1); + dh_irqs_release_vectors(&table_priv->async_pf_irq, 1); + dh_irqs_release_vectors(&table_priv->async_irq1, 1); + dh_irqs_release_vectors(&table_priv->async_irq2, 1); + dh_irqs_release_vectors(&table_priv->async_irq3, 1); + dh_irqs_release_vectors(&table_priv->async_irq4, 1); +} + +void destroy_comp_eqs(struct dh_core_dev *dev) +{ +} + +#ifdef CONFIG_ZF_GDMA +static void destroy_gdma_eqs(struct dh_core_dev *dev) +{ + struct dh_eq_table *eq_table = &dev->eq_table; + struct dh_mpf_eq_table *mpf_eq_table = eq_table->priv; + struct dh_irq_table *irq_table = &dev->irq_table; + struct dh_mpf_irq_table *mpf_irq_table = irq_table->priv; + + cleanup_gdma_eq(dev, mpf_eq_table, ZXDH_MPF_GDMA_IRQ_NUM); + + dh_irq_affinity_irqs_release(mpf_irq_table->mpf_gdma_pool, + mpf_eq_table->gdma_irqs, + ZXDH_MPF_GDMA_IRQ_NUM); + dh_irqs_release_vectors(mpf_eq_table->gdma_irqs, ZXDH_MPF_GDMA_IRQ_NUM); +} +#endif + +void dh_mpf_eq_table_destroy(struct dh_core_dev *dev) +{ + destroy_comp_eqs(dev); + destroy_async_eqs(dev); +#ifdef CONFIG_ZF_GDMA + destroy_gdma_eqs(dev); +#endif +} + +int32_t dh_mpf_eq_table_create(struct dh_core_dev *dev) +{ + int32_t err = 0; + + err = create_async_eqs(dev); + if (err != 0) { + DH_LOG_ERR(MODULE_MPF, "Failed to create async EQs\n"); + goto err_async_eqs; + } + + err = create_comp_eqs(dev); + if (err != 0) { + DH_LOG_ERR(MODULE_MPF, "Failed to create completion EQs\n"); + goto err_comp_eqs; + } + +#ifdef CONFIG_ZF_GDMA + err = create_gdma_eqs(dev); + if (err != 0) { + DH_LOG_ERR(MODULE_MPF, "Failed to create gdma EQs\n"); + goto err_comp_eqs; + } +#endif + + return 0; + +err_comp_eqs: + destroy_async_eqs(dev); +err_async_eqs: + return err; +} + +/*create eventq*/ +static int32_t create_async_eq(struct dh_core_dev *dev, struct dh_irq *risc, + struct dh_irq *pf) +{ + struct dh_eq_table *eq_table = &dev->eq_table; + struct dh_en_mpf_dev *mpf_dev = dh_core_priv(dev); + struct msix_para in = { 0 }; + int32_t err = 0; + + in.vector_risc = risc->index; + in.vector_pfvf = pf->index; + in.vector_mpf = 0xff; + in.driver_type = MSG_CHAN_END_PF; //TODO + in.pdev = dev->pdev; + in.virt_addr = mpf_dev->pci_ioremap_addr + ZXDH_BAR1_CHAN_OFFSET; + in.pcie_id = mpf_dev->pcie_id; + DH_LOG_INFO(MODULE_MPF, "pcie_id = 0x%x\n", mpf_dev->pcie_id); + + mutex_lock(&eq_table->lock); + + err = zxdh_bar_enable_chan(&in, &mpf_dev->vport); + + mutex_unlock(&eq_table->lock); + + return err; +} + +static int32_t dh_eq_async_riscv_int(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct dh_eq_async *eq_riscv_async = + container_of(nb, struct dh_eq_async, irq_nb); + struct dh_core_dev *dev = (struct dh_core_dev *)eq_riscv_async->priv; + struct dh_eq_table *eq_table = &dev->eq_table; + + atomic_notifier_call_chain( + &eq_table->nh[DH_EVENT_TYPE_NOTIFY_RISC_TO_MPF], + DH_EVENT_TYPE_NOTIFY_RISC_TO_MPF, NULL); + + return 0; +} + +static int32_t dh_eq_async_mpf_int(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct dh_eq_async *eq_riscv_async = + container_of(nb, struct dh_eq_async, irq_nb); + struct dh_core_dev *dev = (struct dh_core_dev *)eq_riscv_async->priv; + struct dh_eq_table *eq_table = &dev->eq_table; + + atomic_notifier_call_chain( + &eq_table->nh[DH_EVENT_TYPE_NOTIFY_PF_TO_MPF], + DH_EVENT_TYPE_NOTIFY_PF_TO_MPF, NULL); + + return 0; +} + +static int32_t dh_eq_async_int1(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct dh_eq_async *eq_riscv_async = + container_of(nb, struct dh_eq_async, irq_nb); + struct dh_core_dev *dev = (struct dh_core_dev *)eq_riscv_async->priv; + struct dh_eq_table *eq_table = &dev->eq_table; + + atomic_notifier_call_chain(&eq_table->nh[DH_EVENT_TYPE_NOTIFY_1], + DH_EVENT_TYPE_NOTIFY_1, dev); + + return 0; +} + +static int32_t dh_eq_async_int2(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct dh_eq_async *eq_riscv_async = + container_of(nb, struct dh_eq_async, irq_nb); + struct dh_core_dev *dev = (struct dh_core_dev *)eq_riscv_async->priv; + struct dh_eq_table *eq_table = &dev->eq_table; + + atomic_notifier_call_chain(&eq_table->nh[DH_EVENT_TYPE_NOTIFY_2], + DH_EVENT_TYPE_NOTIFY_2, dev); + + return 0; +} + +static int32_t dh_eq_async_int3(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct dh_eq_async *eq_riscv_async = + container_of(nb, struct dh_eq_async, irq_nb); + struct dh_core_dev *dev = (struct dh_core_dev *)eq_riscv_async->priv; + struct dh_eq_table *eq_table = &dev->eq_table; + + atomic_notifier_call_chain(&eq_table->nh[DH_EVENT_TYPE_NOTIFY_3], + DH_EVENT_TYPE_NOTIFY_3, dev); + + return 0; +} + +static int32_t dh_eq_async_int4(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct dh_eq_async *eq_riscv_async = + container_of(nb, struct dh_eq_async, irq_nb); + struct dh_core_dev *dev = (struct dh_core_dev *)eq_riscv_async->priv; + struct dh_eq_table *eq_table = &dev->eq_table; + + atomic_notifier_call_chain(&eq_table->nh[DH_EVENT_TYPE_NOTIFY_4], + DH_EVENT_TYPE_NOTIFY_4, dev); + + return 0; +} + +static int32_t create_async_eqs(struct dh_core_dev *dev) +{ + struct dh_eq_table *table = &dev->eq_table; + struct dh_mpf_eq_table *table_priv = table->priv; + struct dh_eq_param param = {}; + int32_t err = 0; + + DH_LOG_DEBUG(MODULE_MPF, "start\r\n"); + table_priv->async_risc_irq = dh_mpf_async_irq_request(dev); + if (IS_ERR_OR_NULL(table_priv->async_risc_irq)) { + DH_LOG_ERR(MODULE_MPF, "Failed to get async_risc_irq\n"); + return PTR_ERR(table_priv->async_risc_irq); + } + + table_priv->async_pf_irq = dh_mpf_async_irq_request(dev); + if (IS_ERR_OR_NULL(table_priv->async_pf_irq)) { + err = PTR_ERR(table_priv->async_pf_irq); + DH_LOG_ERR(MODULE_MPF, "Failed to get async_pf_irq\n"); + goto err_irq_request; + } + + table_priv->async_irq1 = dh_mpf_async_irq_request(dev); + if (IS_ERR_OR_NULL(table_priv->async_irq1)) { + err = PTR_ERR(table_priv->async_irq1); + DH_LOG_ERR(MODULE_MPF, "Failed to get async_irq1\n"); + goto err_irq_request1; + } + + table_priv->async_irq2 = dh_mpf_async_irq_request(dev); + if (IS_ERR_OR_NULL(table_priv->async_irq2)) { + err = PTR_ERR(table_priv->async_irq2); + DH_LOG_ERR(MODULE_MPF, "Failed to get async_irq2\n"); + goto err_irq_request2; + } + + table_priv->async_irq3 = dh_mpf_async_irq_request(dev); + if (IS_ERR_OR_NULL(table_priv->async_irq3)) { + err = PTR_ERR(table_priv->async_irq3); + DH_LOG_ERR(MODULE_MPF, "Failed to get async_irq3\n"); + goto err_irq_request3; + } + + table_priv->async_irq4 = dh_mpf_async_irq_request(dev); + if (IS_ERR_OR_NULL(table_priv->async_irq4)) { + err = PTR_ERR(table_priv->async_irq4); + DH_LOG_ERR(MODULE_MPF, "Failed to get async_irq4\n"); + goto err_irq_request4; + } + + err = create_async_eq(dev, table_priv->async_risc_irq, + table_priv->async_pf_irq); + if (err != 0) { + DH_LOG_ERR(MODULE_MPF, "Failed to create async_eq\n"); + goto err_create_async_eq; + } + + param = (struct dh_eq_param){ + .irq = table_priv->async_risc_irq, + .nent = 10, + .event_type = + DH_EVENT_QUEUE_TYPE_RISCV /* used for inform dpu */ + }; + err = setup_async_eq(dev, &table_priv->async_risc_eq, ¶m, + dh_eq_async_riscv_int, "riscv", dev); + if (err != 0) { + DH_LOG_ERR(MODULE_MPF, "Failed to setup async_risc_eq\n"); + goto err_setup_async_risc_eq; + } + + param.irq = table_priv->async_pf_irq, + err = setup_async_eq(dev, &table_priv->async_pf_eq, ¶m, + dh_eq_async_mpf_int, "pf", dev); + if (err != 0) { + DH_LOG_ERR(MODULE_MPF, "Failed to setup async_pf_eq\n"); + goto err_setup_async_pf_eq; + } + + param.irq = table_priv->async_irq1, + err = setup_async_eq(dev, &table_priv->async_eq1, ¶m, + dh_eq_async_int1, "irq1", dev); + if (err != 0) { + DH_LOG_ERR(MODULE_MPF, "Failed to setup async_eq1\n"); + goto err_setup_async_eq1; + } + + param.irq = table_priv->async_irq2, + err = setup_async_eq(dev, &table_priv->async_eq2, ¶m, + dh_eq_async_int2, "irq2", dev); + if (err != 0) { + DH_LOG_ERR(MODULE_MPF, "Failed to setup async_eq1\n"); + goto err_setup_async_eq2; + } + + param.irq = table_priv->async_irq3, + err = setup_async_eq(dev, &table_priv->async_eq3, ¶m, + dh_eq_async_int3, "irq3", dev); + if (err != 0) { + DH_LOG_ERR(MODULE_MPF, "Failed to setup async_eq3\n"); + goto err_setup_async_eq3; + } + + param.irq = table_priv->async_irq4, + err = setup_async_eq(dev, &table_priv->async_eq4, ¶m, + dh_eq_async_int4, "irq4", dev); + if (err != 0) { + DH_LOG_ERR(MODULE_MPF, "Failed to setup async_eq4\n"); + goto err_setup_async_eq4; + } + + return 0; + +err_setup_async_eq4: + cleanup_async_eq(dev, &table_priv->async_eq3, "irq3"); +err_setup_async_eq3: + cleanup_async_eq(dev, &table_priv->async_eq2, "irq2"); +err_setup_async_eq2: + cleanup_async_eq(dev, &table_priv->async_eq1, "irq1"); +err_setup_async_eq1: + cleanup_async_eq(dev, &table_priv->async_pf_eq, "pf"); +err_setup_async_pf_eq: + cleanup_async_eq(dev, &table_priv->async_risc_eq, "riscv"); +err_setup_async_risc_eq: + destroy_async_eq(dev); +err_create_async_eq: + dh_irqs_release_vectors(&table_priv->async_irq4, 1); +err_irq_request4: + dh_irqs_release_vectors(&table_priv->async_irq3, 1); +err_irq_request3: + dh_irqs_release_vectors(&table_priv->async_irq2, 1); +err_irq_request2: + dh_irqs_release_vectors(&table_priv->async_irq1, 1); +err_irq_request1: + dh_irqs_release_vectors(&table_priv->async_pf_irq, 1); +err_irq_request: + dh_irqs_release_vectors(&table_priv->async_risc_irq, 1); + return err; +} + +#ifdef CONFIG_ZF_GDMA +void cleanup_gdma_eq(struct dh_core_dev *dev, + struct dh_mpf_eq_table *table_priv, uint16_t num) +{ + uint16_t i = 0; + + for (i = 0; i < num; i++) { + cleanup_async_eq(dev, &table_priv->gdma_eq[i], NULL); + } +} + +int32_t setup_gdma_eq(struct dh_core_dev *dev, + struct dh_mpf_eq_table *table_priv, uint16_t gdma_irq_num) +{ + struct dh_en_mpf_dev *mpf_dev = dh_core_priv(dev); + struct zf_gdma_dev *gdev = mpf_dev->gdev; + struct dh_eq_param param = {}; + uint16_t i = 0; + int32_t ret = 0; + void *priv = NULL; + notifier_fn_t callback; + + if (gdma_irq_num > ZXDH_MPF_GDMA_IRQ_NUM) { + DH_LOG_ERR(MODULE_MPF, "gdma_irq_num %d is invalid\n", + gdma_irq_num); + return -1; + } + + for (i = 0; i < gdma_irq_num; i++) { + if (i < ZF_GDMA_CHAN_NUM) { + callback = zf_gdma_chan_irq_handle; + priv = &gdev->chan[i]; + } else { + callback = zf_gdma_err_irq_handle; + priv = dev; + } + + param.irq = table_priv->gdma_irqs[i]; + ret = setup_async_eq(dev, &table_priv->gdma_eq[i], ¶m, + callback, "gdma", priv); + if (ret != 0) { + DH_LOG_ERR(MODULE_MPF, "Failed to setup gdma %d eq\n", + i); + cleanup_gdma_eq(dev, table_priv, i); + return ret; + } + } + + return 0; +} + +static int32_t create_gdma_eqs(struct dh_core_dev *dev) +{ + struct dh_eq_table *eq_table = &dev->eq_table; + struct dh_mpf_eq_table *mpf_eq_table = eq_table->priv; + struct dh_irq_table *irq_table = &dev->irq_table; + struct dh_mpf_irq_table *mpf_irq_table = irq_table->priv; + int32_t ret = 0; + int numa = dev_to_node(dev->device); + + mpf_eq_table->gdma_irqs = kcalloc(ZXDH_MPF_GDMA_IRQ_NUM, + sizeof(struct dh_irq *), GFP_KERNEL); + if (unlikely(mpf_eq_table->gdma_irqs == NULL)) { + DH_LOG_ERR(MODULE_MPF, + "Failed to alloc mpf_eq_table->gdma_irqs\n"); + return -ENOMEM; + } + + ret = dh_irq_affinity_irqs_request_auto(mpf_irq_table->mpf_gdma_pool, + mpf_eq_table->gdma_irqs, + ZXDH_MPF_GDMA_IRQ_NUM, numa); + if (ret < 0) { + DH_LOG_ERR(MODULE_MPF, "Failed to get gdma irq\n"); + goto err_gdma_irq; + } + + ret = setup_gdma_eq(dev, mpf_eq_table, ZXDH_MPF_GDMA_IRQ_NUM); + if (ret != 0) { + goto err_gdma_eq; + } + + return 0; + +err_gdma_eq: + dh_irq_affinity_irqs_release(mpf_irq_table->mpf_gdma_pool, + mpf_eq_table->gdma_irqs, + ZXDH_MPF_GDMA_IRQ_NUM); + dh_irqs_release_vectors(mpf_eq_table->gdma_irqs, ZXDH_MPF_GDMA_IRQ_NUM); +err_gdma_irq: + kfree(mpf_eq_table->gdma_irqs); + return ret; +} +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_eq.h b/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_eq.h new file mode 100644 index 000000000000..6997afe67345 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_eq.h @@ -0,0 +1,19 @@ +#ifndef __ZF_MPF_EQ_H__ +#define __ZF_MPF_EQ_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +int32_t dh_mpf_eq_table_init(struct dh_core_dev *dev); + +int32_t dh_mpf_eq_table_create(struct dh_core_dev *dev); +void dh_mpf_eq_table_destroy(struct dh_core_dev *dev); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_irq.c b/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_irq.c new file mode 100644 index 000000000000..25ffd6775f21 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_irq.c @@ -0,0 +1,162 @@ +#include +#include +#include +#include "zf_mpf_irq.h" + +static struct dh_irq_range zxdh_get_mpf_range(struct dh_core_dev *dev) +{ + struct dh_irq_range tmp = { .start = 0, + .size = ZXDH_MPF_ASYNC_IRQ_NUM }; + + return tmp; +} +static struct dh_irq_range zxdh_get_comp_mpf_range(struct dh_core_dev *dev) +{ + struct dh_irq_range tmp = { .start = ZXDH_MPF_ASYNC_IRQ_NUM, + .size = ZXDH_MPF_COMP_IRQ_NUM }; + + return tmp; +} + +static struct dh_irq_range zxdh_get_gdma_irq_range(struct dh_core_dev *dev) +{ + struct dh_irq_range tmp = { .start = ZXDH_MPF_GDMA_MSIX_VEC_BASE, + .size = ZXDH_MPF_GDMA_IRQ_NUM }; + + return tmp; +} + +static int32_t irq_pools_init(struct dh_core_dev *dev) +{ + struct dh_irq_table *table = &dev->irq_table; + int32_t err = 0; + struct dh_irq_range irq_range; + struct dh_mpf_irq_table *mpf_irq_table = table->priv; + + /* init mpf_pool */ + irq_range = zxdh_get_mpf_range(dev); + mpf_irq_table->mpf_async_pool = (struct dh_irq_pool *)irq_pool_alloc( + dev, irq_range.start, irq_range.size, "zxdh_mpf_msg", + ZXDH_MPF_ASYNC_IRQ_MIN_COMP, ZXDH_MPF_ASYNC_IRQ_MAX_COMP); + if (IS_ERR_OR_NULL(mpf_irq_table->mpf_async_pool)) { + return PTR_ERR(mpf_irq_table->mpf_async_pool); + } + + /* init sf_comp_pool */ + irq_range = zxdh_get_comp_mpf_range(dev); + mpf_irq_table->mpf_comp_pool = (struct dh_irq_pool *)irq_pool_alloc( + dev, irq_range.start, irq_range.size, "zxdh_mpf_comp", + ZXDH_MPF_COMP_IRQ_MIN_COMP, ZXDH_MPF_COMP_IRQ_MAX_COMP); + if (IS_ERR_OR_NULL(mpf_irq_table->mpf_comp_pool)) { + err = PTR_ERR(mpf_irq_table->mpf_comp_pool); + goto err_mpf_comp; + } + + mpf_irq_table->mpf_comp_pool->irqs_per_cpu = + kcalloc(nr_cpu_ids, sizeof(u16), GFP_KERNEL); + if (unlikely(mpf_irq_table->mpf_comp_pool->irqs_per_cpu == NULL)) { + err = -ENOMEM; + goto err_irqs_per_cpu; + } + + /* init gdma_irq_pool */ + irq_range = zxdh_get_gdma_irq_range(dev); + mpf_irq_table->mpf_gdma_pool = (struct dh_irq_pool *)irq_pool_alloc( + dev, irq_range.start, irq_range.size, "zxdh_mpf_gdma", + ZXDH_MPF_GDMA_IRQ_MIN, ZXDH_MPF_GDMA_IRQ_MAX); + if (IS_ERR_OR_NULL(mpf_irq_table->mpf_gdma_pool)) { + err = PTR_ERR(mpf_irq_table->mpf_gdma_pool); + goto err_irqs_per_cpu; + } + + return 0; + +err_irqs_per_cpu: + irq_pool_free(mpf_irq_table->mpf_comp_pool); +err_mpf_comp: + irq_pool_free(mpf_irq_table->mpf_async_pool); + return err; +} + +static void irq_pools_destroy(struct dh_irq_table *table) +{ + struct dh_mpf_irq_table *mpf_irq_table = + (struct dh_mpf_irq_table *)table->priv; + + irq_pool_free(mpf_irq_table->mpf_comp_pool); + irq_pool_free(mpf_irq_table->mpf_async_pool); + irq_pool_free(mpf_irq_table->mpf_gdma_pool); +} + +/*todo*/ +static int32_t zxdh_get_total_vec(struct dh_core_dev *dev) +{ + return ZXDH_ZF_MPF_IRQ_NUM_TOTAL; +} + +int32_t dh_mpf_irq_table_create(struct dh_core_dev *dev) +{ + int32_t total_vec = 0; + int32_t err = 0; + + total_vec = zxdh_get_total_vec(dev); + + total_vec = pci_alloc_irq_vectors(dev->pdev, total_vec, total_vec, + PCI_IRQ_MSIX); + if (total_vec < 0) { + DH_LOG_ERR(MODULE_MPF, "pci_alloc_irq_vectors failed: %d\n", + total_vec); + return total_vec; + } + + err = irq_pools_init(dev); + if (err != 0) { + pci_free_irq_vectors(dev->pdev); + } + + return err; +} + +void dh_mpf_irq_table_destroy(struct dh_core_dev *dev) +{ + struct dh_irq_table *table = &dev->irq_table; + + /* There are cases where IRQs still will be in used when we reaching + * to here. Hence, making sure all the irqs are released. + */ + irq_pools_destroy(table); + pci_free_irq_vectors(dev->pdev); +} + +struct dh_irq *dh_mpf_async_irq_request(struct dh_core_dev *dev) +{ + struct dh_irq_table *table = &dev->irq_table; + struct dh_mpf_irq_table *mpf_irq_table = + (struct dh_mpf_irq_table *)table->priv; + + struct dh_irq *irq = + zxdh_get_irq_of_pool(dev, mpf_irq_table->mpf_async_pool); + if (IS_ERR_OR_NULL(irq)) + DH_LOG_ERR(MODULE_MPF, "irq=0x%llx\r\n", + (unsigned long long)irq); + DH_LOG_DEBUG(MODULE_MPF, "end\r\n"); + return irq; +} + +/* irq_table API */ +int32_t dh_mpf_irq_table_init(struct dh_core_dev *dev) +{ + struct dh_irq_table *irq_table; + struct dh_mpf_irq_table *mpf_irq_table = NULL; + + irq_table = &dev->irq_table; + + mpf_irq_table = kvzalloc(sizeof(*mpf_irq_table), GFP_KERNEL); + if (unlikely(mpf_irq_table == NULL)) { + return -ENOMEM; + } + + irq_table->priv = mpf_irq_table; + + return 0; +} diff --git a/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_irq.h b/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_irq.h new file mode 100644 index 000000000000..e1d361cbe346 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/zf_mpf_irq.h @@ -0,0 +1,49 @@ +#ifndef __ZF_MPF_IRQ_H__ +#define __ZF_MPF_IRQ_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +#include "gdma.h" + +#define ZXDH_MPF_ASYNC_IRQ_NUM 6 +#define ZXDH_MPF_ASYNC_IRQ_MIN_COMP 0 +#define ZXDH_MPF_ASYNC_IRQ_MAX_COMP 1 + +#define ZXDH_MPF_COMP_IRQ_NUM 1 +#define ZXDH_MPF_COMP_IRQ_MIN_COMP 0 +#define ZXDH_MPF_COMP_IRQ_MAX_COMP 1 + +#define ZXDH_MPF_GDMA_IRQ_NUM (ZF_GDMA_CHAN_NUM + 1) +#define ZXDH_MPF_GDMA_MSIX_VEC_BASE 10 +#define ZXDH_MPF_GDMA_IRQ_MIN 0 +#define ZXDH_MPF_GDMA_IRQ_MAX 1 + +/* async irq:<0-5> comp irq:<6> gdma irq:<10-14> */ +#define ZXDH_ZF_MPF_IRQ_NUM_TOTAL 16 + +struct dh_mpf_irq_table { + struct dh_irq_pool *mpf_comp_pool; + struct dh_irq_pool *mpf_async_pool; + struct dh_irq_pool *mpf_gdma_pool; +}; + +struct dh_irq_range { + int32_t start; + int32_t size; +}; + +struct dh_irq *dh_mpf_async_irq_request(struct dh_core_dev *dev); +void dh_mpf_irq_table_destroy(struct dh_core_dev *dev); +int32_t dh_mpf_irq_table_create(struct dh_core_dev *dev); +int32_t dh_mpf_irq_table_init(struct dh_core_dev *dev); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/dinghai/zf_mpf/zf_reset_finish_flag.c b/drivers/net/ethernet/dinghai/zf_mpf/zf_reset_finish_flag.c new file mode 100644 index 000000000000..789373d95fdd --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/zf_reset_finish_flag.c @@ -0,0 +1,108 @@ +#include +#include +#include +#include +#include +#include +#include +#include "epc/pcie-zte-zf-epc.h" + +#define SET_BIT1_0 0x0 +#define SET_BIT1_1 0x1 +#define SYSFS_ZF_RESET_FINISH_FLAG_DIR "zf_reset_finish_flag" +#define ZF_RESET_FINISH_FLAG_MODE 0664 +#define ZF_RESET_FINISH_FLAG flag +#define NOTIFY_OFFEST \ + (PCIE_DPU_MPF_CSR_ADDR(PCIE_DPU_EP_CSR_SIZE * PCIE_DPU_EP_NUM) + 4) +#define NOTIFY_VALUE(a) (a << 1) +#define NOTIFY_MASK (1 << 1) + +static char flag_data[PAGE_SIZE]; +static struct kobject *zf_reset_finish_flag_kobj; +unsigned long op_paddr; + +static int notify_host_zf_reset_finished(unsigned long flag) +{ + int ret = 0; + + ret = cfg_phy_rmw(op_paddr + NOTIFY_OFFEST, NOTIFY_VALUE(flag), + NOTIFY_MASK); + + if (ret) { + DH_LOG_ERR( + MODULE_MPF, + "notify_host_zf_reset_finished, cfg_phy_rmw write failed!\n"); + } + + return ret; +} + +static ssize_t read_flag(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s", flag_data); +} + +static ssize_t write_flag(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int size = 0; + unsigned long flag = 0; + char *end = NULL; + + size = snprintf(flag_data, PAGE_SIZE, "%s", buf); + if ((size < 0) || (size >= PAGE_SIZE)) { + DH_LOG_ERR(MODULE_MPF, "get size failed\n"); + size = 0; + return size; + } + + flag = simple_strtoul(flag_data, &end, 16); + if ((SET_BIT1_0 == flag) || (SET_BIT1_1 == flag)) { + notify_host_zf_reset_finished(flag); + } + + return size; +} + +static struct kobj_attribute flag_attribute = __ATTR( + ZF_RESET_FINISH_FLAG, ZF_RESET_FINISH_FLAG_MODE, read_flag, write_flag); + +int zf_reset_finish_flag_init(struct dh_core_dev *dh_dev, + unsigned long ep_mpf_paddr) +{ + int ret = 0; + + op_paddr = ep_mpf_paddr; + if (zf_reset_finish_flag_kobj == NULL) { + zf_reset_finish_flag_kobj = kobject_create_and_add( + SYSFS_ZF_RESET_FINISH_FLAG_DIR, kernel_kobj); + if (zf_reset_finish_flag_kobj == NULL) { + return -ENOMEM; + } + + ret = sysfs_create_file(zf_reset_finish_flag_kobj, + &flag_attribute.attr); + if (ret) { + kobject_put(zf_reset_finish_flag_kobj); + zf_reset_finish_flag_kobj = NULL; + } + } else { + DH_LOG_ERR( + MODULE_MPF, + "zf_reset_finish_flag_kobj is not NULL!, can't create zf_reset_finish_flag!\n"); + ret = -EINVAL; + } + + return ret; +} + +void zf_reset_finish_flag_exit(void) +{ + if (zf_reset_finish_flag_kobj != NULL) { + sysfs_remove_file(zf_reset_finish_flag_kobj, + &flag_attribute.attr); + kobject_put(zf_reset_finish_flag_kobj); + zf_reset_finish_flag_kobj = NULL; + } +} diff --git a/drivers/net/ethernet/dinghai/zf_mpf/zxdh_reset_zf.c b/drivers/net/ethernet/dinghai/zf_mpf/zxdh_reset_zf.c new file mode 100644 index 000000000000..dd0787c65e45 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zf_mpf/zxdh_reset_zf.c @@ -0,0 +1,265 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include "zf_mpf.h" + +#define ZXDH_SYSFS_DIR "zxdh_host_reset" +#define ZXDH_SYSFS_FILE_EP_CHECK_REGISTER "ep_check_register" +#define ZXDH_SYSFS_FILE_EP_RESET_INFO "ep_reset_info" + +struct zxdh_reset_dev { + uint16_t pcie_id; + uint64_t bar0_base_virt_addr; + uint64_t is_valid; +} reset_dev = { 0 }; + +struct zxdh_reset_dev *zxdh_get_reset_dev(void) +{ + return &reset_dev; +} + +int zxdh_init_reset_dev(struct dh_core_dev *core_dev) +{ + struct zxdh_reset_dev *dev = zxdh_get_reset_dev(); + + struct dh_en_mpf_dev *mpf_dev = dh_core_priv(core_dev); + dev->bar0_base_virt_addr = mpf_dev->pci_ioremap_addr; + dev->is_valid = 1; + dev->pcie_id = mpf_dev->pcie_id; + return 0; +} + +extern int zxdh_bar_chan_sync_msg_send(struct zxdh_pci_bar_msg *in, + struct zxdh_msg_recviver_mem *result); +extern int +zxdh_bar_chan_msg_recv_register(uint8_t event_id, + zxdh_bar_chan_msg_recv_callback callback); +extern int zxdh_bar_chan_msg_recv_unregister(uint8_t event_id); +extern int pcie_zte_zf_signal_epc_dev_init(uint32_t ep_idx); + +enum e_reset_event { + EV_HOST_RESET = 0, + EV_ZF_RESET = 1, + EV_DINGHAI_RESET = 2, + EV_ZXDH_RESET_TEST = 3, + EV_MAX_RESET +}; + +struct host_reset_ev_info { + int ep_no; +}; + +struct zxdh_reset_priv { + enum e_reset_event e_reset_event; + union { + struct host_reset_ev_info host_reset_ev; + } ev_info; + struct work_struct work; +}; + +/* 本模块在ZF中insmod + +使用sysfs接口,主目录:/sys/zxdh_host_reset/,包含下述文件 +1、ep_check_register +功能:业务通知risc-v需要进行检测的ep +参数:16进制的字符串。例子:0xffffffff +字符串转为4个字节长度的数值,每个bit位代表一个ep。bit0~bit31代表ep0~ep31。 +当bit位设置为1时,表示该ep需要进行复位检测上报。 +当bit位设置为0时,表示该ep不需要进行复位检测上报。 +用户操作: +1)向 ep_check_register 写入 value + 含义:通知risc-v需要进行检测的ep +2)读取ep_check_register + 返回value,表示当前监测的ep +2、ep_reset_info +功能:ep复位后,业务通过这个文件查询 +参数:16进制的字符串。例子:0xffffffff +字符串转为4个字节长度的数值,每个bit位代表一个ep。bit0~bit31代表ep0~ep31。 +当bit位为1时,表示该ep发生了reset。 +当bit位为0时,表示该ep没有发生reset。 +用户操作: +1)读取ep_reset_info + 返回value,获取发生reset的ep信息 +2)向 ep_reset_info 写入 value + 将发生reset的ep信息清0 +*/ +unsigned int ep_check_register; +unsigned int ep_reset_info; + +struct kobject *kobj_zxdh_host_reset; + +/*该函数被调用在sysfs文件被读时*/ +static ssize_t sysfs_show_ep_check_register(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "0x%08x", ep_check_register); +} + +/* 该函数被调用在sysfs文件被写时*/ +static ssize_t sysfs_store_ep_check_register(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + uint16_t ret = 0; + + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + uint8_t recv_buffer[20] = { 0 }; + uint16_t recv_buff_len = 20; + struct zxdh_reset_dev *dev = zxdh_get_reset_dev(); + + if (sscanf(buf, "0x%x", &ep_check_register) != 1) { + return ret; + } + + in.virt_addr = dev->bar0_base_virt_addr + ZXDH_BAR1_CHAN_OFFSET; + in.payload_addr = &ep_check_register; + in.payload_len = sizeof(ep_check_register); + in.src = MSG_CHAN_END_PF; // MSG_CHAN_END_MPF; + in.dst = MSG_CHAN_END_RISC; + in.event_id = MODULE_RESET_MSG; /* 事件号 */ + in.src_pcieid = dev->pcie_id; + + result.recv_buffer = recv_buffer; + result.buffer_len = recv_buff_len; + + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + if (ret) { + DH_LOG_ERR(MODULE_MPF, + " '%s' zxdh_bar_chan_sync_msg_send failed.\n", + __FUNCTION__); + } + return count; +} + +/*该函数被调用在sysfs文件被读时*/ +static ssize_t sysfs_show_ep_reset_info(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "0x%08x", ep_reset_info); +} + +/* 该函数被调用在sysfs文件被写时*/ +static ssize_t sysfs_store_ep_reset_info(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + if (sscanf(buf, "0x%x", &ep_reset_info) == 1) { + return (ssize_t)count; + } + return 0; +} + +/*使用__ATTR宏初始化zxdh_host_reset_attr结构体,该宏定义在include/linux/sysfs.h*/ +struct kobj_attribute zxdh_host_reset_attr_ep_check_register = + __ATTR(ep_check_register, 0664, sysfs_show_ep_check_register, + sysfs_store_ep_check_register); +struct kobj_attribute zxdh_host_reset_attr_ep_reset_info = + __ATTR(ep_reset_info, 0664, sysfs_show_ep_reset_info, + sysfs_store_ep_reset_info); + +int32_t zxdh_reset_zf_rec_risc(void *pay_load, uint16_t len, void *reps_buffer, + uint16_t *reps_len, void *dev) +{ + struct zxdh_reset_priv *priv = NULL; + uint32_t ep_no = 0; + uint32_t ep_reset_info_tmp = ep_reset_info; + + if (pay_load && len && reps_buffer && reps_len) { + DH_LOG_INFO(MODULE_MPF, + "%s: para check ok ok ok.(%p, %x, %p, %p)\n", + __func__, pay_load, len, reps_buffer, reps_len); + } else { + DH_LOG_ERR(MODULE_MPF, "%s: para error.(%p, %x, %p, %p)\n", + __func__, pay_load, len, reps_buffer, reps_len); + return (uint16_t)-1; + } + + priv = pay_load; + ep_no = priv->ev_info.host_reset_ev.ep_no; + pcie_zte_zf_signal_epc_dev_init(ep_no); // 此函数只能放到中断的下半部 + + DH_LOG_INFO( + MODULE_MPF, + " %s: received msg from RISC-V: event_id[%d] len 0x%x, ep_no=%u\n", + __FUNCTION__, MODULE_RESET_MSG, len, ep_no); + + ep_reset_info |= 1 << ep_no; + DH_LOG_INFO(MODULE_MPF, " %s: ep_reset_info 0x%08x -> 0x%08x\n", + __FUNCTION__, ep_reset_info_tmp, ep_reset_info); + + return 0; +} + +/*模块初始化函数*/ +int zxdh_host_reset_driver_init(struct dh_core_dev *core_dev) +{ + int32_t ret = 0; + /*创建一个目录在/sys下 */ + kobj_zxdh_host_reset = kobject_create_and_add(ZXDH_SYSFS_DIR, NULL); + + zxdh_init_reset_dev(core_dev); + + /*在ZXDH_SYSFS_DIR目录下创建文件*/ + if (sysfs_create_file(kobj_zxdh_host_reset, + &zxdh_host_reset_attr_ep_check_register.attr)) { + DH_LOG_ERR(MODULE_MPF, + " 'ep_check_register' sysfs create failed.\n"); + goto error_sysfs; + } + + if (sysfs_create_file(kobj_zxdh_host_reset, + &zxdh_host_reset_attr_ep_reset_info.attr)) { + DH_LOG_ERR(MODULE_MPF, + " 'ep_reset_info' sysfs create failed.\n"); + goto error_sysfs; + } + + ret = zxdh_bar_chan_msg_recv_register(MODULE_RESET_MSG, + zxdh_reset_zf_rec_risc); + if (ret != 0) { + DH_LOG_ERR( + MODULE_MPF, + " zxdh_bar_chan_msg_recv_register: event_id[%d] register failed: %d\n", + MODULE_RESET_MSG, ret); + // return ret; + } + + DH_LOG_INFO(MODULE_MPF, " zxdh host reset module init ok.\n"); + return 0; + +error_sysfs: + zxdh_bar_chan_msg_recv_unregister(MODULE_RESET_MSG); + sysfs_remove_file(kernel_kobj, + &zxdh_host_reset_attr_ep_reset_info.attr); + sysfs_remove_file(kernel_kobj, + &zxdh_host_reset_attr_ep_check_register.attr); + kobject_put(kobj_zxdh_host_reset); + return -1; +} + +/*模块退出函数*/ +void zxdh_host_reset_driver_exit(struct dh_core_dev *core_dev) +{ + zxdh_bar_chan_msg_recv_unregister(MODULE_RESET_MSG); + sysfs_remove_file(kernel_kobj, + &zxdh_host_reset_attr_ep_reset_info.attr); + sysfs_remove_file(kernel_kobj, + &zxdh_host_reset_attr_ep_check_register.attr); + kobject_put(kobj_zxdh_host_reset); + DH_LOG_INFO(MODULE_MPF, " zxdh host reset module remove ok.\n"); +} +#if 0 +module_init(zxdh_host_reset_driver_init); +module_exit(zxdh_host_reset_driver_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("zxdh"); +MODULE_DESCRIPTION("for zxdh host reset"); +#endif diff --git a/drivers/net/ethernet/dinghai/zxdh_tools/zxdh_tools_ioctl.c b/drivers/net/ethernet/dinghai/zxdh_tools/zxdh_tools_ioctl.c new file mode 100644 index 000000000000..be3900f32892 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zxdh_tools/zxdh_tools_ioctl.c @@ -0,0 +1,2500 @@ +#include +#include +#include "../en_aux.h" +#include "../cmd/msg_chan_priv.h" +#include "../en_aux/priv_queue.h" +#include "zxdh_tools_ioctl.h" +#include +#include +#include +#include + +uint8_t pkt_event_init(struct zxdh_en_device *en_dev) +{ + if (!en_dev->pkt_wq) { + en_dev->pkt_wq = + create_singlethread_workqueue("dh_aux_pkt_events"); + if (!en_dev->pkt_wq) { + LOG_ERR("events->pkt_wq create_singlethread_workqueue failed: %p\n", + en_dev->pkt_wq); + return DHTOOL_ERROR; + } + } + + INIT_WORK(&en_dev->capture_save_file_work, + capture_save_file_work_handler); + return 0; +} + +void pkt_event_uninit(struct zxdh_en_device *en_dev) +{ + if (en_dev->pkt_wq) { + destroy_workqueue(en_dev->pkt_wq); + en_dev->pkt_wq = NULL; + } +} + +uint8_t pkt_packet_process(struct zxdh_en_device *en_dev, void *buf, + uint32_t len, uint8_t pkt_flag) +{ + uint8_t flag = 0; + + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + return 0; + } + + if (pkt_flag == 0) { + return 0; + } + + if (len <= ZXDH_PKT_HEDER_LENGTH) { + LOG_ERR("pkt error, packet len less than pkt_header_length\n"); + return 0; + } + + if (en_dev->pkt_cap_switch == 1) { + flag = 1; + } + + if ((en_dev->pkt_save_file.enable_pkt_num_mode == 1 && + (en_dev->pkt_save_file.pkt_cur_num < + en_dev->pkt_save_file.pkt_set_count)) || + (en_dev->pkt_save_file.enable_pkt_num_mode == 0 && + (en_dev->pkt_save_file.pkt_file_size > 0))) { + pkt_packet_to_file(en_dev, + (const char *)buf + ZXDH_PKT_HEDER_LENGTH, + len - ZXDH_PKT_HEDER_LENGTH); + flag = 1; + } + + return flag; +} + +uint8_t pkt_skb_packet_process(struct zxdh_en_device *en_dev, + struct sk_buff *skb, uint8_t pkt_flag) +{ + uint8_t flag = 0; + uint8_t ret = 0; + void *buffer = NULL; + unsigned int total_len = skb->len; + + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + return 0; + } + + if (pkt_flag == 0) { + return 0; + } + + if (en_dev->pkt_cap_switch == 1) { + flag = 1; + } + + if ((en_dev->pkt_save_file.enable_pkt_num_mode == 1 && + (en_dev->pkt_save_file.pkt_cur_num < + en_dev->pkt_save_file.pkt_set_count)) || + (en_dev->pkt_save_file.enable_pkt_num_mode == 0 && + (en_dev->pkt_save_file.pkt_file_size > 0))) { + buffer = kmalloc(total_len, GFP_ATOMIC); + if (!buffer) { + LOG_ERR("pkt error, kmalloc failed\n"); + return 0; + } + + ret = skb_copy_bits(skb, 0, buffer, total_len); + if (ret) { + kfree(buffer); + LOG_ERR("pkt error, failed to copy skb data\n"); + return 0; + } + + pkt_packet_to_file(en_dev, buffer, total_len); + kfree(buffer); + flag = 1; + } + + return flag; +} + +uint8_t zxdh_get_status_flag(DPP_PF_INFO_T *pf_info) +{ + uint32_t ret = 0; + struct zxdh_pkt_cap_enable_status cap_status = { 0 }; + + ret = dpp_pkt_capture_enable_status_get(pf_info, &cap_status); + if (ret != 0) { + DHTOOLS_LOG_ERR( + "dpp_pkt_capture_enable_status_get failed, ret:%d!!!\n", + ret); + return DHTOOL_ERROR; + } + + if (cap_status.panel_rx_enable_status == 1 || + cap_status.panel_tx_enable_status == 1 || + cap_status.vqm_rx_enable_status == 1 || + cap_status.vqm_tx_enable_status == 1 || + cap_status.rdma_rx_enable_status == 1 || + cap_status.rdma_tx_enable_status == 1) { + return zxdh_cap_enable; + } + + return ret; +} + +uint8_t zxdh_get_rule_flag(DPP_PF_INFO_T *pf_info) +{ + uint32_t ret = 0; + struct zxdh_pkt_cap_rule *pkt_rule = NULL; + uint32_t entry_num = max_entry_num; + + pkt_rule = (struct zxdh_pkt_cap_rule *)kzalloc( + sizeof(struct zxdh_pkt_cap_rule) * entry_num, GFP_KERNEL); + if (pkt_rule == NULL) { + DHTOOLS_LOG_ERR( + " zxdh_pkt_capture_cmd_show kzalloc msg failed!!!\n"); + return DHTOOL_ERROR; + } + + ret = dpp_pkt_capture_table_dump(pf_info, pkt_rule, &entry_num); + if (ret != 0) { + SAFE_KFREE(pkt_rule); + DHTOOLS_LOG_ERR( + "dpp_pkt_capture_table_dump failed, ret:%d!!!\n", ret); + return DHTOOL_ERROR; + } + + if (entry_num != 0) { + SAFE_KFREE(pkt_rule); + return zxdh_cap_enable; + } + + SAFE_KFREE(pkt_rule); + return ret; +} + +uint32_t zxdh_pkt_capture_enable(struct net_device *netdev, + struct zxdh_tools_msg *tool_msg, + struct zxdh_pkt_capture_msg *pkt_msg, + DPP_PF_INFO_T *pf_info) +{ + uint32_t ret = 0; + uint8_t zxdh_config_flag = 0; + uint8_t zxdh_rule_flag = 0; + uint8_t zxdh_pkt_cap_point = 0; + struct zxdh_en_device *en_dev = NULL; + struct zxdh_en_priv *en_priv = NULL; + + en_priv = netdev_priv(netdev); + en_dev = &en_priv->edev; + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + return DHTOOL_ERROR; + } + + zxdh_pkt_cap_point = pkt_msg->payload[0]; + if (zxdh_pkt_cap_point > DH_PKT_CAP_POINT_MAX) { + return DHTOOL_ERROR; + } + + zxdh_config_flag = zxdh_get_status_flag(pf_info); + zxdh_rule_flag = zxdh_get_rule_flag(pf_info); + if (zxdh_config_flag == 0 && zxdh_rule_flag == 0 && + en_dev->pkt_save_file_flag == 0) { + en_dev->pkt_dev_flag = 1; + goto capture_enale; + } else if (zxdh_config_flag == 1 || zxdh_rule_flag == 1 || + en_dev->pkt_save_file_flag == 1) { + if (en_dev->pkt_dev_flag == 0) { + return MSG_RECV_PKT_CAP_PF_LOCK; + } else { + goto capture_enale; + } + } else { + return DHTOOL_ERROR; + } + +capture_enale: + ret = dpp_pkt_capture_enable(pf_info, zxdh_pkt_cap_point); + if (ret != 0) { + DHTOOLS_LOG_ERR("dpp_pkt_capture_enable failed!!!\n"); + return DHTOOL_ERROR; + } + en_dev->pkt_cap_switch = 0; + return ret; +} + +uint32_t zxdh_pkt_capture_disable(struct net_device *netdev, + struct zxdh_tools_msg *tool_msg, + struct zxdh_pkt_capture_msg *pkt_msg, + DPP_PF_INFO_T *pf_info) +{ + uint32_t ret = 0; + uint8_t zxdh_config_flag = 0; + uint8_t zxdh_rule_flag = 0; + uint8_t zxdh_pkt_cap_point = 0; + struct zxdh_en_device *en_dev = NULL; + struct zxdh_en_priv *en_priv = NULL; + + en_priv = netdev_priv(netdev); + en_dev = &en_priv->edev; + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + return DHTOOL_ERROR; + } + + zxdh_pkt_cap_point = pkt_msg->payload[0]; + if (zxdh_pkt_cap_point > DH_PKT_CAP_POINT_MAX) { + return DHTOOL_ERROR; + } + + zxdh_config_flag = zxdh_get_status_flag(pf_info); + zxdh_rule_flag = zxdh_get_rule_flag(pf_info); + if (zxdh_config_flag == 0 && zxdh_rule_flag == 0 && + en_dev->pkt_save_file_flag == 0) { + goto capture_disable; + } else if (zxdh_config_flag == 1 || zxdh_rule_flag == 1 || + en_dev->pkt_save_file_flag == 1) { + if (en_dev->pkt_dev_flag == 0) { + return MSG_RECV_PKT_CAP_PF_LOCK; + } else { + goto capture_disable; + } + } else { + return DHTOOL_ERROR; + } +capture_disable: + ret = dpp_pkt_capture_disable(pf_info, zxdh_pkt_cap_point); + if (ret != 0) { + DHTOOLS_LOG_ERR("zxdh_pkt_capture_disable failed!!!\n"); + return DHTOOL_ERROR; + } + + zxdh_config_flag = zxdh_get_status_flag(pf_info); + zxdh_rule_flag = zxdh_get_rule_flag(pf_info); + if (zxdh_config_flag == 0 && zxdh_rule_flag == 0 && + en_dev->pkt_save_file_flag == 0) { + en_dev->pkt_dev_flag = 0; + ret = dpp_pkt_capture_speed_set(pf_info, ZXDH_PKT_INIT_SPEED); + if (ret != 0) { + DHTOOLS_LOG_ERR( + "dpp_pkt_capture_speed_set failed, ret:%d!!!\n", + ret); + return DHTOOL_ERROR; + } + en_dev->pkt_dev_speed = ZXDH_PKT_INIT_SPEED; + } + return ret; +} + +uint32_t zxdh_pkt_capture_disable_all(struct net_device *netdev, + struct zxdh_tools_msg *tool_msg, + struct zxdh_pkt_capture_msg *pkt_msg, + DPP_PF_INFO_T *pf_info) +{ + uint32_t ret = 0; + uint8_t zxdh_config_flag = 0; + uint8_t zxdh_rule_flag = 0; + struct zxdh_en_device *en_dev = NULL; + struct zxdh_en_priv *en_priv = NULL; + + en_priv = netdev_priv(netdev); + en_dev = &en_priv->edev; + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + return DHTOOL_ERROR; + } + + zxdh_config_flag = zxdh_get_status_flag(pf_info); + zxdh_rule_flag = zxdh_get_rule_flag(pf_info); + if (zxdh_config_flag == 0 && zxdh_rule_flag == 0 && + en_dev->pkt_save_file_flag == 0) { + goto capture_disable_all; + } else if (zxdh_config_flag == 1 || zxdh_rule_flag == 1 || + en_dev->pkt_save_file_flag == 1) { + if (en_dev->pkt_dev_flag == 0) { + return MSG_RECV_PKT_CAP_PF_LOCK; + } else { + goto capture_disable_all; + } + } else { + return DHTOOL_ERROR; + } + +capture_disable_all: + ret = dpp_pkt_capture_disable_all(pf_info); + if (ret != 0) { + DHTOOLS_LOG_ERR( + "dpp_pkt_capture_disable_all failed, ret:%d!!!\n", ret); + return DHTOOL_ERROR; + } + + zxdh_config_flag = zxdh_get_status_flag(pf_info); + zxdh_rule_flag = zxdh_get_rule_flag(pf_info); + if (zxdh_config_flag == 0 && zxdh_rule_flag == 0 && + en_dev->pkt_save_file_flag == 0) { + en_dev->pkt_dev_flag = 0; + ret = dpp_pkt_capture_speed_set(pf_info, ZXDH_PKT_INIT_SPEED); + if (ret != 0) { + DHTOOLS_LOG_ERR( + "dpp_pkt_capture_speed_set failed, ret:%d!!!\n", + ret); + return DHTOOL_ERROR; + } + en_dev->pkt_dev_speed = ZXDH_PKT_INIT_SPEED; + } + en_dev->pkt_cap_switch = 1; + return ret; +} + +uint32_t zxdh_pkt_capture_rule_insert(struct net_device *netdev, + struct zxdh_tools_msg *tool_msg, + struct zxdh_pkt_capture_msg *pkt_msg, + DPP_PF_INFO_T *pf_info) +{ + uint32_t ret = 0; + uint32_t tcam_index = 0; + uint8_t zxdh_config_flag = 0; + uint8_t zxdh_rule_flag = 0; + struct zxdh_pkt_cap_rule_rule_insert *zxdh_pkt_insert_rule = NULL; + struct zxdh_pkt_cap_rule *pkt_rule = NULL; + struct zxdh_en_device *en_dev = NULL; + struct zxdh_en_priv *en_priv = NULL; + + en_priv = netdev_priv(netdev); + en_dev = &en_priv->edev; + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + return DHTOOL_ERROR; + } + + zxdh_config_flag = zxdh_get_status_flag(pf_info); + zxdh_rule_flag = zxdh_get_rule_flag(pf_info); + if (zxdh_config_flag == 0 && zxdh_rule_flag == 0 && + en_dev->pkt_save_file_flag == 0) { + en_dev->pkt_dev_flag = 1; + goto rule_insert; + } else if (zxdh_config_flag == 1 || zxdh_rule_flag == 1 || + en_dev->pkt_save_file_flag == 1) { + if (en_dev->pkt_dev_flag == 0) { + return MSG_RECV_PKT_CAP_PF_LOCK; + } else { + goto rule_insert; + } + } else { + return DHTOOL_ERROR; + } + +rule_insert: + zxdh_pkt_insert_rule = (struct zxdh_pkt_cap_rule_rule_insert *)kzalloc( + sizeof(struct zxdh_pkt_cap_rule_rule_insert), GFP_KERNEL); + if (zxdh_pkt_insert_rule == NULL) { + DHTOOLS_LOG_ERR( + " zxdh_pkt_insert_rule kzalloc msg failed!!!\n"); + return DHTOOL_ERROR; + } + + pkt_rule = (struct zxdh_pkt_cap_rule *)kzalloc( + sizeof(struct zxdh_pkt_cap_rule), GFP_KERNEL); + if (pkt_rule == NULL) { + SAFE_KFREE(zxdh_pkt_insert_rule); + DHTOOLS_LOG_ERR(" pkt_rule kzalloc msg failed!!!\n"); + return DHTOOL_ERROR; + } + + memcpy(zxdh_pkt_insert_rule, pkt_msg->payload, + sizeof(struct zxdh_pkt_cap_rule_rule_insert)); + + DHTOOLS_LOG_INFO("rule_index %d\n", zxdh_pkt_insert_rule->rule_index); + + ret = dpp_pkt_capture_rule_index_to_tcam_index( + zxdh_pkt_insert_rule->rule_index, + zxdh_pkt_insert_rule->cap_mode, zxdh_pkt_insert_rule->cap_point, + &tcam_index); + if (ret != 0) { + DHTOOLS_LOG_ERR( + "dpp_pkt_capture_rule_index_to_tcam_index failed, ret:%d!!!\n", + ret); + SAFE_KFREE(zxdh_pkt_insert_rule); + SAFE_KFREE(pkt_rule); + return DHTOOL_ERROR; + } + + DHTOOLS_LOG_INFO("tcam_index %d\n", tcam_index); + + pkt_rule->rule_config = zxdh_pkt_insert_rule->rule_config; + pkt_rule->pkt_cap_key = zxdh_pkt_insert_rule->pkt_cap_key; + pkt_rule->tcam_index = tcam_index; + pkt_rule->dst_vqm_vfid = VQM_VFID(pf_info->vport); + ret = dpp_pkt_capture_item_insert(pf_info, pkt_rule); + if (ret != 0) { + DHTOOLS_LOG_ERR( + "dpp_pkt_capture_item_insert failed, ret:%d!!!\n", ret); + } + SAFE_KFREE(zxdh_pkt_insert_rule); + SAFE_KFREE(pkt_rule); + return ret; +} + +uint32_t zxdh_pkt_capture_rule_delete(struct net_device *netdev, + struct zxdh_tools_msg *tool_msg, + struct zxdh_pkt_capture_msg *pkt_msg, + DPP_PF_INFO_T *pf_info) +{ + uint32_t ret = 0; + uint32_t tcam_index = 0; + uint32_t i = 0; + uint8_t zxdh_config_flag = 0; + uint8_t zxdh_rule_flag = 0; + struct zxdh_pkt_cap_rule_rule_delete *zxdh_pkt_delete_rule = NULL; + struct zxdh_en_device *en_dev = NULL; + struct zxdh_en_priv *en_priv = NULL; + + en_priv = netdev_priv(netdev); + en_dev = &en_priv->edev; + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + return DHTOOL_ERROR; + } + + zxdh_config_flag = zxdh_get_status_flag(pf_info); + zxdh_rule_flag = zxdh_get_rule_flag(pf_info); + if (zxdh_config_flag == 0 && zxdh_rule_flag == 0 && + en_dev->pkt_save_file_flag == 0) { + goto del_rule; + } else if (zxdh_config_flag == 1 || zxdh_rule_flag == 1 || + en_dev->pkt_save_file_flag == 1) { + if (en_dev->pkt_dev_flag == 0) { + return MSG_RECV_PKT_CAP_PF_LOCK; + } else { + goto del_rule; + } + } else { + return DHTOOL_ERROR; + } + +del_rule: + zxdh_pkt_delete_rule = (struct zxdh_pkt_cap_rule_rule_delete *)kzalloc( + sizeof(struct zxdh_pkt_cap_rule_rule_delete), GFP_KERNEL); + if (zxdh_pkt_delete_rule == NULL) { + DHTOOLS_LOG_ERR( + " zxdh_pkt_capture_rule_delete kzalloc msg failed!!!\n"); + return DHTOOL_ERROR; + } + + memcpy(zxdh_pkt_delete_rule, pkt_msg->payload, + sizeof(struct zxdh_pkt_cap_rule_rule_delete)); + DHTOOLS_LOG_INFO("rule_index %d, is_mode_all%d,is_all%d\n", + zxdh_pkt_delete_rule->rule_index, + zxdh_pkt_delete_rule->is_mode_all, + zxdh_pkt_delete_rule->is_all); + + if ((zxdh_pkt_delete_rule->is_mode_all == 0) && + (zxdh_pkt_delete_rule->is_all == 1)) { + ret = dpp_pkt_capture_table_flush(pf_info); + if (ret != 0) { + SAFE_KFREE(zxdh_pkt_delete_rule); + DHTOOLS_LOG_ERR( + "dpp_pkt_capture_table_flush failed, ret:%d!!!\n", + ret); + return DHTOOL_ERROR; + } + } else if ((zxdh_pkt_delete_rule->is_mode_all == 1) && + (zxdh_pkt_delete_rule->is_all == 1)) { + if (zxdh_pkt_delete_rule->cap_mode == DH_PKT_CAP_MODE_NORMAL) { + for (i = 0; i < normal_tcam_index; i++) { + ret = dpp_pkt_capture_item_delete(pf_info, i); + if (ret != 0) { + SAFE_KFREE(zxdh_pkt_delete_rule); + DHTOOLS_LOG_ERR( + "dpp_pkt_capture_item_delete failed, ret:%d!!!\n", + ret); + return DHTOOL_ERROR; + } + } + } + + if (zxdh_pkt_delete_rule->cap_mode == + DH_PKT_CAP_MODE_KEY_WORD) { + for (i = 60; i < key_tcam_index; i++) { + ret = dpp_pkt_capture_item_delete(pf_info, i); + if (ret != 0) { + SAFE_KFREE(zxdh_pkt_delete_rule); + DHTOOLS_LOG_ERR( + "dpp_pkt_capture_item_delete failed, ret:%d!!!\n", + ret); + return DHTOOL_ERROR; + } + } + } + } else { + ret = dpp_pkt_capture_rule_index_to_tcam_index( + zxdh_pkt_delete_rule->rule_index, + zxdh_pkt_delete_rule->cap_mode, + zxdh_pkt_delete_rule->cap_point, &tcam_index); + if (ret != 0) { + SAFE_KFREE(zxdh_pkt_delete_rule); + DHTOOLS_LOG_ERR( + "dpp_pkt_capture_rule_index_to_tcam_index failed, ret:%d!!!\n", + ret); + return DHTOOL_ERROR; + } + + DHTOOLS_LOG_INFO("tcam_index %d\n", tcam_index); + ret = dpp_pkt_capture_item_delete(pf_info, tcam_index); + if (ret != 0) { + SAFE_KFREE(zxdh_pkt_delete_rule); + DHTOOLS_LOG_ERR( + "dpp_pkt_capture_item_delete failed, ret:%d!!!\n", + ret); + return DHTOOL_ERROR; + } + } + + zxdh_config_flag = zxdh_get_status_flag(pf_info); + zxdh_rule_flag = zxdh_get_rule_flag(pf_info); + if (zxdh_config_flag == 0 && zxdh_rule_flag == 0 && + en_dev->pkt_save_file_flag == 0) { + en_dev->pkt_dev_flag = 0; + } + SAFE_KFREE(zxdh_pkt_delete_rule); + return ret; +} + +uint32_t zxdh_pkt_capture_cmd_show(struct net_device *netdev, + struct zxdh_tools_msg *tool_msg, + struct zxdh_pkt_capture_msg *pkt_msg, + DPP_PF_INFO_T *pf_info) +{ + uint32_t ret = 0; + uint32_t i = 0; + enum zxdh_pkt_cap_mode cap_mode = 0; + uint32_t rule_index = 0; + uint32_t entry_num = max_entry_num; + uint8_t zxdh_config_flag = 0; + uint8_t zxdh_rule_flag = 0; + struct zxdh_pkt_cap_cmd_show *rule_show_info = NULL; + struct zxdh_pkt_cap_rule *pkt_rule = NULL; + struct zxdh_pkt_cap_enable_status cap_status = { 0 }; + struct zxdh_en_device *en_dev = NULL; + struct zxdh_en_priv *en_priv = NULL; + + en_priv = netdev_priv(netdev); + en_dev = &en_priv->edev; + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + return DHTOOL_ERROR; + } + + zxdh_config_flag = zxdh_get_status_flag(pf_info); + zxdh_rule_flag = zxdh_get_rule_flag(pf_info); + if (zxdh_config_flag == 0 && zxdh_rule_flag == 0 && + en_dev->pkt_save_file_flag == 0) { + goto rule_show; + } else if (zxdh_config_flag == 1 || zxdh_rule_flag == 1 || + en_dev->pkt_save_file_flag == 1) { + if (en_dev->pkt_dev_flag == 0) { + return MSG_RECV_PKT_CAP_PF_LOCK; + } else { + goto rule_show; + } + } else { + return DHTOOL_ERROR; + } + +rule_show: + rule_show_info = (struct zxdh_pkt_cap_cmd_show *)kzalloc( + sizeof(struct zxdh_pkt_cap_cmd_show), GFP_KERNEL); + if (rule_show_info == NULL) { + DHTOOLS_LOG_ERR("rule_show_info kzalloc msg failed!!!\n"); + return DHTOOL_ERROR; + } + + pkt_rule = (struct zxdh_pkt_cap_rule *)kzalloc( + sizeof(struct zxdh_pkt_cap_rule) * entry_num, GFP_KERNEL); + if (pkt_rule == NULL) { + DHTOOLS_LOG_ERR( + " zxdh_pkt_capture_cmd_show kzalloc msg failed!!!\n"); + SAFE_KFREE(rule_show_info); + return DHTOOL_ERROR; + } + + ret = dpp_pkt_capture_enable_status_get(pf_info, &cap_status); + if (ret != 0) { + DHTOOLS_LOG_ERR( + "dpp_pkt_capture_enable_status_get failed, ret:%d!!!\n", + ret); + SAFE_KFREE(pkt_rule); + SAFE_KFREE(rule_show_info); + return DHTOOL_ERROR; + } + + ret = dpp_pkt_capture_table_dump(pf_info, pkt_rule, &entry_num); + if (ret != 0) { + SAFE_KFREE(pkt_rule); + SAFE_KFREE(rule_show_info); + DHTOOLS_LOG_ERR( + "dpp_pkt_capture_table_dump failed, ret:%d!!!\n", ret); + return DHTOOL_ERROR; + } + + DHTOOLS_LOG_INFO("entry_num %d\n", entry_num); + for (i = 0; i < entry_num; i++) { + dpp_pkt_capture_tcam_index_to_rule_index( + pkt_rule[i].tcam_index, &cap_mode, &rule_index); + rule_show_info->entry_array[i].cap_mode = cap_mode; + rule_show_info->entry_array[i].rule_index = rule_index; + rule_show_info->entry_array[i].cap_point = + pkt_rule[i].pkt_cap_key.capture_pkt_flag; + rule_show_info->entry_array[i].pkt_cap_key = + pkt_rule[i].pkt_cap_key; + rule_show_info->entry_array[i].rule_config = + pkt_rule[i].rule_config; + + memcpy(rule_show_info->entry_array[i].dev_name, netdev->name, + IFNAMSIZ); + } + + rule_show_info->speed = en_dev->pkt_dev_speed; + rule_show_info->entry_num = entry_num; + rule_show_info->enable_status = cap_status; + if (en_dev->pkt_save_file.pkt_file_size != 0) { + rule_show_info->is_save_to_file = 1; + } + + memcpy(rule_show_info->file_path, en_dev->pkt_save_file.file_path, + sizeof(en_dev->pkt_save_file.file_path)); + rule_show_info->file_size = en_dev->pkt_save_file.pkt_file_size; + rule_show_info->pkt_count = en_dev->pkt_save_file.pkt_set_count; + + if (unlikely(copy_to_user((void __user *)tool_msg->msg_reps, + rule_show_info, + sizeof(struct zxdh_pkt_cap_cmd_show)))) { + DHTOOLS_LOG_ERR("copy_to_user failed!!!\n"); + SAFE_KFREE(pkt_rule); + SAFE_KFREE(rule_show_info); + return DHTOOL_ERROR; + } + + SAFE_KFREE(pkt_rule); + SAFE_KFREE(rule_show_info); + return ret; +} + +struct file *open_log_file(const char *path) +{ + struct file *filp = filp_open(path, O_CREAT | O_WRONLY, 0644); + if (IS_ERR(filp)) { + DHTOOLS_LOG_ERR("Failed to open log file: %ld\n", + PTR_ERR(filp)); + return NULL; + } + return filp; +} + +void close_log_file(struct file *filp) +{ + if (filp) { + filp_close(filp, NULL); + } +} + +void zxdh_pkt_clear_date(struct zxdh_en_device *en_dev) +{ + en_dev->pkt_save_file.pkt_rbuf_idx = 0; + en_dev->pkt_save_file.pkt_ubuf_idx = 0; + en_dev->pkt_save_file.pkt_file_size = 0; + en_dev->pkt_save_file.pkt_cur_num = 0; + en_dev->pkt_save_file.pkt_set_count = 0; + en_dev->pkt_save_file.total_written_bytes = 0; + en_dev->pkt_save_file.pkt_file_size = 0; + en_dev->pkt_save_file.pkt_set_count = 0; + en_dev->pkt_addr_marked = 0; + memset(en_dev->pkt_save_file.file_path, 0, + sizeof(en_dev->pkt_save_file.file_path)); + if (en_dev->pkt_file_info) { + SAFE_KFREE(en_dev->pkt_file_info); + } + if (en_dev->pkt_save_file.log_file != NULL) { + close_log_file(en_dev->pkt_save_file.log_file); + en_dev->pkt_save_file.log_file = NULL; + } +} + +void capture_save_file_work_handler(struct work_struct *work) +{ + struct timespec64 ts; + struct tm tm_info; + long int usec = 0; + size_t buffer_size = 0; + int32_t write_size = 0; + ssize_t ret; + size_t i = 0; + ssize_t filesize = 0; + uint32_t data_len = 0; + char *buf_ptr = NULL; + char *buffer = NULL; + uint8_t *pkt_buf_addr = NULL; + DPP_PF_INFO_T pf_info = { 0 }; + struct zxdh_en_device *en_dev = container_of( + work, struct zxdh_en_device, capture_save_file_work); + + if (en_dev->pkt_file_info == NULL) { + DHTOOLS_LOG_ERR("pkt_work_handler pkt addr error, buf_idx:%d\n", + en_dev->pkt_save_file.pkt_ubuf_idx); + return; + } + + pkt_buf_addr = en_dev->pkt_file_info[en_dev->pkt_save_file.pkt_ubuf_idx] + .pkt_addr_array; + if (pkt_buf_addr == NULL) { + DHTOOLS_LOG_ERR( + "pkt_work_handler buffer addr error, buf_idx:%d\n", + en_dev->pkt_save_file.pkt_ubuf_idx); + return; + } + + data_len = en_dev->pkt_file_info[en_dev->pkt_save_file.pkt_ubuf_idx] + .pkt_buf_len; + buffer_size = data_len * 4; + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + buffer = kzalloc(buffer_size, GFP_KERNEL); + if (!buffer) { + DHTOOLS_LOG_ERR("Failed to allocate buffer\n"); + SAFE_KFREE( + en_dev->pkt_file_info[en_dev->pkt_save_file.pkt_ubuf_idx] + .pkt_addr_array); + zxdh_pkt_clear_date(en_dev); + en_dev->pkt_file_num = 0; + dpp_pkt_capture_disable_all(&pf_info); + en_dev->pkt_cap_switch = 1; + return; + } + + buf_ptr = buffer; + ktime_get_real_ts64(&ts); + ts.tv_sec += EIGHT_HOURS_SECONDS; + time64_to_tm(ts.tv_sec, 0, &tm_info); + usec = ts.tv_nsec / 1000; + + write_size = snprintf(buf_ptr, buffer_size, + "%04ld-%02d-%02d %02d:%02d:%02d.%06lu 00000000 ", + tm_info.tm_year + 1900, // 年份 + tm_info.tm_mon + 1, // 月份 + tm_info.tm_mday, // 日期 + tm_info.tm_hour, // 小时 + tm_info.tm_min, // 分钟 + tm_info.tm_sec, // 秒 + usec); // 微秒 + + if (write_size < 0) { + DHTOOLS_LOG_ERR("snprintf error\n"); + SAFE_KFREE( + en_dev->pkt_file_info[en_dev->pkt_save_file.pkt_ubuf_idx] + .pkt_addr_array); + zxdh_pkt_clear_date(en_dev); + en_dev->pkt_file_num = 0; + dpp_pkt_capture_disable_all(&pf_info); + en_dev->pkt_cap_switch = 1; + goto out_free; + } + + buf_ptr += write_size; + for (i = 0; i < data_len; i++) { + buf_ptr += snprintf(buf_ptr, 4, "%02x ", + (unsigned char)pkt_buf_addr[i]); + } + + *buf_ptr++ = '\n'; + SAFE_KFREE(en_dev->pkt_file_info[en_dev->pkt_save_file.pkt_ubuf_idx] + .pkt_addr_array); + if (en_dev->pkt_save_file.log_file) { + filesize = + i_size_read(file_inode(en_dev->pkt_save_file.log_file)); + filesize = filesize + (buf_ptr - buffer); + if (filesize > + (en_dev->pkt_save_file.pkt_file_size * 1024 * 1024)) { + if (en_dev->pkt_save_file.enable_pkt_num_mode == 1) { + dpp_pkt_capture_disable_all(&pf_info); + en_dev->pkt_cap_switch = 1; + en_dev->pkt_file_num = 0; + en_dev->pkt_save_file_flag = 0; + while (en_dev->pkt_save_file.pkt_ubuf_idx != + en_dev->pkt_save_file.pkt_rbuf_idx) { + if (en_dev->pkt_file_info + [en_dev->pkt_save_file + .pkt_ubuf_idx] + .pkt_addr_array) { + SAFE_KFREE( + en_dev->pkt_file_info + [en_dev->pkt_save_file + .pkt_ubuf_idx] + .pkt_addr_array); + } + en_dev->pkt_save_file.pkt_ubuf_idx++; + if (en_dev->pkt_save_file.pkt_ubuf_idx >= + (ZXDH_MQ_PAIRS_NUM * + ZXDH_PF_MAX_DESC_NUM(en_dev))) { + en_dev->pkt_save_file + .pkt_ubuf_idx = 0; + } + } + + if (en_dev->pkt_file_info[en_dev->pkt_save_file + .pkt_rbuf_idx] + .pkt_addr_array) { + SAFE_KFREE( + en_dev->pkt_file_info + [en_dev->pkt_save_file + .pkt_rbuf_idx] + .pkt_addr_array); + } + zxdh_pkt_clear_date(en_dev); + DHTOOLS_LOG_INFO( + "pkt size is reach, close file\n"); + goto out_free; + } + en_dev->pkt_save_file.total_written_bytes = 0; + en_dev->pkt_save_file.file_pos = 0; + en_dev->pkt_save_file.log_file->f_pos = 0; + } + } + + if (en_dev->pkt_save_file.log_file) { + ret = kernel_write(en_dev->pkt_save_file.log_file, buffer, + buf_ptr - buffer, + &en_dev->pkt_save_file.file_pos); + if (ret < 0) { + DHTOOLS_LOG_ERR("Failed to write to log file: %zd\n", + ret); + en_dev->pkt_file_num = 0; + zxdh_pkt_clear_date(en_dev); + dpp_pkt_capture_disable_all(&pf_info); + en_dev->pkt_cap_switch = 1; + goto out_free; + } + } else { + zxdh_pkt_clear_date(en_dev); + en_dev->pkt_file_num = 0; + dpp_pkt_capture_disable_all(&pf_info); + en_dev->pkt_cap_switch = 1; + goto out_free; + } + + en_dev->pkt_save_file.pkt_ubuf_idx++; + if (en_dev->pkt_save_file.pkt_ubuf_idx >= + (ZXDH_MQ_PAIRS_NUM * ZXDH_PF_MAX_DESC_NUM(en_dev))) { + en_dev->pkt_save_file.pkt_ubuf_idx = 0; + } + + en_dev->pkt_save_file.total_written_bytes += ret; + + if (en_dev->pkt_save_file.enable_pkt_num_mode == 1) { + en_dev->pkt_save_file.pkt_cur_num++; + if (en_dev->pkt_save_file.pkt_cur_num == + en_dev->pkt_save_file.pkt_set_count) { + dpp_pkt_capture_disable_all(&pf_info); + en_dev->pkt_cap_switch = 1; + en_dev->pkt_save_file_flag = 0; + en_dev->pkt_file_num = 0; + while (en_dev->pkt_save_file.pkt_ubuf_idx != + en_dev->pkt_save_file.pkt_rbuf_idx) { + if (en_dev->pkt_file_info[en_dev->pkt_save_file + .pkt_ubuf_idx] + .pkt_addr_array) { + SAFE_KFREE( + en_dev->pkt_file_info + [en_dev->pkt_save_file + .pkt_ubuf_idx] + .pkt_addr_array); + } + en_dev->pkt_save_file.pkt_ubuf_idx++; + if (en_dev->pkt_save_file.pkt_ubuf_idx >= + (ZXDH_MQ_PAIRS_NUM * + ZXDH_PF_MAX_DESC_NUM(en_dev))) { + en_dev->pkt_save_file.pkt_ubuf_idx = 0; + } + } + + if (en_dev->pkt_file_info[en_dev->pkt_save_file + .pkt_rbuf_idx] + .pkt_addr_array) { + SAFE_KFREE(en_dev->pkt_file_info + [en_dev->pkt_save_file + .pkt_rbuf_idx] + .pkt_addr_array); + } + + en_dev->pkt_save_file.file_pos = 0; + en_dev->pkt_save_file.log_file->f_pos = 0; + zxdh_pkt_clear_date(en_dev); + DHTOOLS_LOG_INFO("pkt count is reach, close file\n"); + goto out_free; + } + } +out_free: + SAFE_KFREE(buffer); +} + +ssize_t pkt_packet_to_file(struct zxdh_en_device *en_dev, const char *data, + size_t len) +{ + struct zxdh_en_priv *en_priv = netdev_priv(en_dev->netdev); + if (!en_dev->pkt_file_info) { + DHTOOLS_LOG_ERR("dev not alloc memory\n"); + return 0; + } + + if (en_dev->pkt_file_info[en_dev->pkt_save_file.pkt_rbuf_idx] + .pkt_addr_array) { + if (!en_dev->pkt_addr_marked) { + DHTOOLS_LOG_ERR("pkt cap buffer drop\n"); + en_dev->pkt_addr_marked = 1; + } + return 0; + } else { + en_dev->pkt_addr_marked = 0; + } + + en_dev->pkt_file_info[en_dev->pkt_save_file.pkt_rbuf_idx] + .pkt_addr_array = kzalloc(len, GFP_ATOMIC); + if (en_dev->pkt_file_info[en_dev->pkt_save_file.pkt_rbuf_idx] + .pkt_addr_array == NULL) { + DHTOOLS_LOG_ERR("kzalloc pkt bufer error, buf len:%zu\n", len); + return 0; + } + + memcpy(en_dev->pkt_file_info[en_dev->pkt_save_file.pkt_rbuf_idx] + .pkt_addr_array, + data, len); + en_dev->pkt_file_info[en_dev->pkt_save_file.pkt_rbuf_idx].pkt_buf_len = + len; + + if (en_dev->pkt_wq) { + queue_work(en_dev->pkt_wq, + &en_priv->edev.capture_save_file_work); + en_dev->pkt_save_file.pkt_rbuf_idx++; + if (en_dev->pkt_save_file.pkt_rbuf_idx >= + (ZXDH_MQ_PAIRS_NUM * ZXDH_PF_MAX_DESC_NUM(en_dev))) { + en_dev->pkt_save_file.pkt_rbuf_idx = 0; + } + } + return 0; +} + +uint32_t zxdh_pkt_capture_save_to_file(struct net_device *netdev, + struct zxdh_tools_msg *tool_msg, + struct zxdh_pkt_capture_msg *pkt_msg, + DPP_PF_INFO_T *pf_info) +{ + uint32_t ret = 0; + struct zxdh_pkt_cap_cmd_save_to_file *pkt_save_file = NULL; + struct file *filp = NULL; + struct zxdh_en_priv *en_priv = NULL; + struct zxdh_en_device *en_dev = NULL; + uint32_t malloc_length = 0; + uint8_t zxdh_config_flag = 0; + uint8_t zxdh_rule_flag = 0; + + en_priv = netdev_priv(netdev); + en_dev = &en_priv->edev; + if (en_dev->ops->get_coredev_type(en_dev->parent) != DH_COREDEV_PF) { + return DHTOOL_ERROR; + } + + zxdh_config_flag = zxdh_get_status_flag(pf_info); + zxdh_rule_flag = zxdh_get_rule_flag(pf_info); + if (zxdh_config_flag == 0 && zxdh_rule_flag == 0) { + en_dev->pkt_dev_flag = 1; + goto save_file; + } else if (zxdh_config_flag == 1 || zxdh_rule_flag == 1) { + if (en_dev->pkt_dev_flag == 0) { + return MSG_RECV_PKT_CAP_PF_LOCK; + } else { + goto save_file; + } + } else { + return DHTOOL_ERROR; + } +save_file: + pkt_save_file = (struct zxdh_pkt_cap_cmd_save_to_file *)kzalloc( + sizeof(struct zxdh_pkt_cap_cmd_save_to_file), GFP_KERNEL); + if (pkt_save_file == NULL) { + DHTOOLS_LOG_ERR( + "zxdh_pkt_capture_save_to_file kzalloc msg failed!!!\n"); + return DHTOOL_ERROR; + } + + memcpy(pkt_save_file, pkt_msg->payload, + sizeof(struct zxdh_pkt_cap_cmd_save_to_file)); + DHTOOLS_LOG_INFO("file_path:%s, size:%d,count:%d, is_stop:%d\n", + pkt_save_file->file_path, pkt_save_file->file_size, + pkt_save_file->pkt_count, pkt_save_file->is_stop); + + if (pkt_save_file->is_stop == 0) { + en_dev->pkt_file_num++; + if (en_dev->pkt_file_num < 2) { + filp = filp_open(pkt_save_file->file_path, O_RDONLY, + 0); //文件已存在 + if (!IS_ERR(filp)) { + en_dev->pkt_file_num = 0; + filp_close(filp, NULL); + DHTOOLS_LOG_ERR("File already exists: %s\n", + pkt_save_file->file_path); + SAFE_KFREE(pkt_save_file); + return MSG_RECV_PKT_FILE_EXIST_ERR; + } + + malloc_length = ZXDH_MQ_PAIRS_NUM * + ZXDH_PF_MAX_DESC_NUM(en_dev) * + sizeof(struct zxdh_pkt_file_info); + en_dev->pkt_file_info = + kzalloc(malloc_length, GFP_KERNEL); + if (en_dev->pkt_file_info == NULL) { + DHTOOLS_LOG_ERR("kzalloc failed, szie:%d!!!\n", + malloc_length); + en_dev->pkt_file_num = 0; + filp_close(filp, NULL); + SAFE_KFREE(pkt_save_file); + return DHTOOL_ERROR; + } + + ret = pkt_event_init(en_dev); + if (ret != 0) { + DHTOOLS_LOG_ERR("pkt event init failed\n"); + SAFE_KFREE(pkt_save_file); + SAFE_KFREE(en_dev->pkt_file_info); + return EINVAL; + } + + if (pkt_save_file->pkt_count != 0) { + en_dev->pkt_save_file.enable_pkt_num_mode = 1; + en_dev->pkt_save_file.pkt_cur_num = 0; + } else { + en_dev->pkt_save_file.enable_pkt_num_mode = 0; + en_dev->pkt_save_file.pkt_cur_num = 0; + } + + en_dev->pkt_cap_switch = 0; + en_dev->pkt_save_file_flag = 1; + memcpy(en_dev->pkt_save_file.file_path, + pkt_save_file->file_path, + sizeof(pkt_save_file->file_path)); + + en_dev->pkt_save_file.log_file = + open_log_file(en_dev->pkt_save_file.file_path); + if (!en_dev->pkt_save_file.log_file) { + DHTOOLS_LOG_ERR("cmd open pkt file fialed\n"); + en_dev->pkt_cap_switch = 1; + en_dev->pkt_save_file_flag = 0; + en_dev->pkt_file_num = 0; + en_dev->pkt_save_file.pkt_file_size = 0; + en_dev->pkt_save_file.pkt_set_count = 0; + memset(en_dev->pkt_save_file.file_path, 0, + sizeof(en_dev->pkt_save_file.file_path)); + SAFE_KFREE(pkt_save_file); + SAFE_KFREE(en_dev->pkt_file_info); + return MSG_RECV_PKT_FILE_PATH_ERR; + } + + en_dev->pkt_save_file.pkt_file_size = + pkt_save_file->file_size; + en_dev->pkt_save_file.pkt_set_count = + pkt_save_file->pkt_count; + } else { + DHTOOLS_LOG_ERR( + "another pkt file not close, please stop\n"); + SAFE_KFREE(pkt_save_file); + return MSG_RECV_PKT_FILE_IN_PROGRESS_ERR; + } + } else { + dpp_pkt_capture_disable_all(pf_info); + en_dev->pkt_cap_switch = 1; + en_dev->pkt_save_file_flag = 0; + en_dev->pkt_file_num = 0; + en_dev->pkt_save_file.pkt_file_size = 0; + en_dev->pkt_save_file.pkt_set_count = 0; + en_dev->pkt_save_file.total_written_bytes = 0; + en_dev->pkt_save_file.pkt_cur_num = 0; + en_dev->pkt_addr_marked = 0; + memset(en_dev->pkt_save_file.file_path, 0, + sizeof(en_dev->pkt_save_file.file_path)); + pkt_event_uninit(en_dev); + + while (en_dev->pkt_save_file.pkt_ubuf_idx != + en_dev->pkt_save_file.pkt_rbuf_idx) { + if (en_dev->pkt_file_info && + en_dev->pkt_file_info[en_dev->pkt_save_file + .pkt_ubuf_idx] + .pkt_addr_array) { + SAFE_KFREE(en_dev->pkt_file_info + [en_dev->pkt_save_file + .pkt_ubuf_idx] + .pkt_addr_array); + } + en_dev->pkt_save_file.pkt_ubuf_idx++; + if (en_dev->pkt_save_file.pkt_ubuf_idx >= + (ZXDH_MQ_PAIRS_NUM * + ZXDH_PF_MAX_DESC_NUM(en_dev))) { + en_dev->pkt_save_file.pkt_ubuf_idx = 0; + } + } + + if (en_dev->pkt_file_info && + en_dev->pkt_file_info[en_dev->pkt_save_file.pkt_rbuf_idx] + .pkt_addr_array) { + SAFE_KFREE(en_dev->pkt_file_info[en_dev->pkt_save_file + .pkt_rbuf_idx] + .pkt_addr_array); + } + + if (en_dev->pkt_save_file.log_file != NULL) { + close_log_file(en_dev->pkt_save_file.log_file); + en_dev->pkt_save_file.log_file = NULL; + en_dev->pkt_save_file.file_pos = 0; + DHTOOLS_LOG_INFO("pkt file is close\n"); + } + + if (en_dev->pkt_file_info) { + SAFE_KFREE(en_dev->pkt_file_info); + } + + en_dev->pkt_save_file.pkt_ubuf_idx = 0; + en_dev->pkt_save_file.pkt_rbuf_idx = 0; + zxdh_config_flag = zxdh_get_status_flag(pf_info); + zxdh_rule_flag = zxdh_get_rule_flag(pf_info); + if (zxdh_config_flag == 0 && zxdh_rule_flag == 0) { + en_dev->pkt_dev_flag = 0; + ret = dpp_pkt_capture_speed_set(pf_info, + ZXDH_PKT_INIT_SPEED); + if (ret != 0) { + DHTOOLS_LOG_ERR( + "dpp_pkt_capture_speed_set failed, ret:%d!!!\n", + ret); + SAFE_KFREE(pkt_save_file); + return DHTOOL_ERROR; + } + en_dev->pkt_dev_speed = ZXDH_PKT_INIT_SPEED; + } + } + + SAFE_KFREE(pkt_save_file); + return ret; +} + +uint32_t zxdh_pkt_capture_set_speed(struct net_device *netdev, + struct zxdh_tools_msg *tool_msg, + struct zxdh_pkt_capture_msg *pkt_msg, + DPP_PF_INFO_T *pf_info) +{ + uint32_t ret = 0; + uint32_t *speed_date = NULL; + struct zxdh_en_device *en_dev = NULL; + struct zxdh_en_priv *en_priv = NULL; + uint8_t zxdh_config_flag = 0; + uint8_t zxdh_rule_flag = 0; + + if (!pkt_msg) { + DHTOOLS_LOG_ERR("Payload is NULL\n"); + return DHTOOL_ERROR; + } + + en_priv = netdev_priv(netdev); + en_dev = &en_priv->edev; + zxdh_config_flag = zxdh_get_status_flag(pf_info); + zxdh_rule_flag = zxdh_get_rule_flag(pf_info); + if (zxdh_config_flag == 0 && zxdh_rule_flag == 0) { + goto set_speed; + } else if (zxdh_config_flag == 1 || zxdh_rule_flag == 1) { + if (en_dev->pkt_dev_flag == 0) { + return MSG_RECV_PKT_CAP_PF_LOCK; + } else { + goto set_speed; + } + } else { + return DHTOOL_ERROR; + } + +set_speed: + speed_date = (uint32_t *)pkt_msg->payload; + ret = dpp_pkt_capture_speed_set(pf_info, *speed_date); + if (ret != 0) { + DHTOOLS_LOG_ERR("dpp_pkt_capture_speed_set failed, ret:%d!!!\n", + ret); + } + en_dev->pkt_dev_speed = *speed_date; + return ret; +} + +zxdh_pkt_capture_callback_entry_t callback_table[] = { + { DHTOOL_PKT_CAPTURE_CMD_ENABLE, zxdh_pkt_capture_enable }, + { DHTOOL_PKT_CAPTURE_CMD_DISABLE, zxdh_pkt_capture_disable }, + { DHTOOL_PKT_CAPTURE_CMD_DISABLE_ALL, zxdh_pkt_capture_disable_all }, + { DHTOOL_PKT_CAPTURE_CMD_RULE_INSERT, zxdh_pkt_capture_rule_insert }, + { DHTOOL_PKT_CAPTURE_CMD_RULE_DELETE, zxdh_pkt_capture_rule_delete }, + { DHTOOL_PKT_CAPTURE_CMD_SHOW, zxdh_pkt_capture_cmd_show }, + { DHTOOL_PKT_CAPTURE_CMD_SAVE_TO_FILE, zxdh_pkt_capture_save_to_file }, + { DHTOOL_PKT_CAPTURE_CMD_SET_SPEED, zxdh_pkt_capture_set_speed } +}; + +uint32_t zxdh_pkt_capture_process_message(struct net_device *netdev, + struct zxdh_tools_msg *tool_msg, + struct zxdh_pkt_capture_msg *pkt_msg, + DPP_PF_INFO_T *pf_info) +{ + uint32_t i = 0; + uint32_t ret = 1; + for (i = 0; i < (sizeof(callback_table) / + sizeof(zxdh_pkt_capture_callback_entry_t)); + i++) { + if (callback_table[i].op_code == pkt_msg->op_code) { + ret = callback_table[i].callback(netdev, tool_msg, + pkt_msg, pf_info); + break; + } + } + + return ret; +} + +/* Started by AICoder, pid:5188ee7f11r7cdd140e40bf6f0fbff8eedb33772 */ +struct dhtool_eventpid_devbdf_array + eventpid_devbdf_array[MAX_DHTOOL_PID_NUMS] = { 0 }; + +void dhtool_eventpid_exited_set_invalid(uint32_t event_pid) +{ + int i = 0; + struct task_struct *task = NULL; + + for (i = 0; i < ARRAY_SIZE(eventpid_devbdf_array); i++) { + if (eventpid_devbdf_array[i].is_valid == false) { + continue; + } + task = pid_task(find_vpid(eventpid_devbdf_array[i].event_pid), + PIDTYPE_PID); + if (!task || + (event_pid == eventpid_devbdf_array[i].event_pid)) { + eventpid_devbdf_array[i].is_valid = false; + //DHTOOLS_LOG_INFO("dhtool with pid %d has exited, set invalid!\n", eventpid_devbdf_array[i].event_pid); + } + } +} + +int dhtool_eventpid_and_devbdf_register(uint16_t dev_pcieid, uint32_t dev_bdf, + uint32_t event_pid) +{ + int i = 0; + for (i = 0; i < ARRAY_SIZE(eventpid_devbdf_array); i++) { + if (eventpid_devbdf_array[i].is_valid) { + if (eventpid_devbdf_array[i].dev_bdf == dev_bdf) { + eventpid_devbdf_array[i].is_valid = + false; //dhtool文件锁可靠的前提下,解决再次运行dhtool,文件锁获取到,但驱动检测前一个dhtool还未退出 + //DHTOOLS_LOG_ERR(" dhtool with the dev_bdf %u is running, not allowed another!\n", dev_bdf); + break; + } + } + } + + for (i = 0; i < ARRAY_SIZE(eventpid_devbdf_array); i++) { + if (!eventpid_devbdf_array[i].is_valid) { + eventpid_devbdf_array[i].dev_pcieid = dev_pcieid; + eventpid_devbdf_array[i].dev_bdf = dev_bdf; + eventpid_devbdf_array[i].event_pid = event_pid; + eventpid_devbdf_array[i].is_valid = true; + return 0; + } + } + + DHTOOLS_LOG_ERR( + " dhtool eventpid_devbdf_array (%d) is full(all running), waiting a moment!\n", + MAX_DHTOOL_PID_NUMS); + return -1; +} +/* Ended by AICoder, pid:b188eq7f11f7cdd140e40bf6f0fbff8eedb33772 */ + +int dhtool_eventpid_devbdf_list_process(uint16_t dev_pcieid, uint32_t dev_bdf, + uint32_t event_pid) +{ + int ret = 0; + dhtool_eventpid_exited_set_invalid(event_pid); + ret = dhtool_eventpid_and_devbdf_register(dev_pcieid, dev_bdf, + event_pid); + return ret; +} + +/* Started by AICoder, pid:x7951lc26423ce61419e0a0a502a9b6cc494ddd1 */ + +int32_t zxdh_tools_mark_event_info(struct net_device *netdev, struct ifreq *ifr) +{ + int ret = 0; + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct zxdh_tools_msg *msg = NULL; + struct zxdh_tools_reps tools_reps = { 0 }; + struct dhtool_dev_pcieid_get dev_pcieid_get = { 0 }; + struct pci_dev *pdev = NULL; + uint32_t domain_no = 0; + uint32_t bus_no = 0; + uint32_t device_no = 0; + uint32_t func_no = 0; + uint32_t dev_bdf = 0; + + //DHTOOLS_LOG_INFO("is called!\n"); + msg = (struct zxdh_tools_msg *)kzalloc(sizeof(struct zxdh_tools_msg), + GFP_KERNEL); + if (msg == NULL) { + DHTOOLS_LOG_ERR(" kzalloc msg failed!!!\n"); + return -1; + } + + if (copy_from_user(msg, ifr->ifr_ifru.ifru_data, + sizeof(struct zxdh_tools_msg))) { + DHTOOLS_LOG_ERR("copy_from_user failed!!!\n"); + kfree(msg); + return -EFAULT; + } + + tools_reps.status = MSG_RECV_OK; + + DHTOOLS_LOG_INFO("en_dev->pcie_id=0x%x\n", en_dev->pcie_id); + DHTOOLS_LOG_INFO("msg->event_pid=%d\n", msg->event_pid); + dev_pcieid_get.dev_pcieid = en_dev->pcie_id; + + pdev = en_dev->ops->get_pdev(en_dev->parent); + if (!pdev) { + DHTOOLS_LOG_ERR("pdev is NULL\n"); + kfree(msg); + return -EINVAL; + } + ret = sscanf(pci_name(pdev), "%x:%x:%x.%u", &domain_no, &bus_no, + &device_no, &func_no); + if (ret != 4) { + DHTOOLS_LOG_ERR( + "could not get dev domain_no、bus_no、device_no、func_no from pci_name(pdev)\n"); + kfree(msg); + return -1; + } + dev_bdf = DBDF_ECAM(domain_no, bus_no, device_no, func_no); + DHTOOLS_LOG_INFO("dev_bdf=%d\n", dev_bdf); + + ret = dhtool_eventpid_devbdf_list_process(en_dev->pcie_id, dev_bdf, + msg->event_pid); + if (ret != 0) { + kfree(msg); + return -1; + } + + if (unlikely(copy_to_user((void __user *)msg->msg_reps, &dev_pcieid_get, + sizeof(struct dhtool_dev_pcieid_get)))) { + DHTOOLS_LOG_ERR("copy_to_user failed!!!\n"); + kfree(msg); + return -EFAULT; + } + + if (unlikely(copy_to_user((void __user *)msg->tools_reps, &tools_reps, + sizeof(struct zxdh_tools_reps)))) { + DHTOOLS_LOG_ERR("copy_to_user failed!!!\n"); + kfree(msg); + return -EFAULT; + } + kfree(msg); + return 0; +} +/* Ended by AICoder, pid:x7951lc26423ce61419e0a0a502a9b6cc494ddd1 */ + +int32_t zxdh_tools_ioctl_barchan_send(struct net_device *netdev, + struct ifreq *ifr) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct zxdh_tools_msg *msg = NULL; + uint8_t *payload_addr = NULL; + uint8_t *msg_reps = NULL; + uint16_t msg_reps_len = 0; + struct zxdh_tools_reps tools_reps = { 0 }; + struct zxdh_pci_bar_msg in = { 0 }; + struct zxdh_msg_recviver_mem result = { 0 }; + int32_t ret = 0; + + //DHTOOLS_LOG_INFO("is called!\n"); + //DHTOOLS_LOG_INFO("en_dev->pcie_id=0x%x\n", en_dev->pcie_id); + + msg = (struct zxdh_tools_msg *)kzalloc(sizeof(struct zxdh_tools_msg), + GFP_KERNEL); + if (msg == NULL) { + DHTOOLS_LOG_ERR("kzalloc msg failed!!!\n"); + return -1; + } + + if (copy_from_user(msg, ifr->ifr_ifru.ifru_data, + sizeof(struct zxdh_tools_msg))) { + DHTOOLS_LOG_ERR("copy_from_user failed!!!\n"); + kfree(msg); + return -EFAULT; + } + + if (msg->payload_len == 0 || + msg->payload_len > BAR_MSG_PAYLOAD_MAX_LEN) { + DHTOOLS_LOG_ERR("send para ERR: invalid payload len: %d!\n", + msg->payload_len); + kfree(msg); + return -1; + } + + payload_addr = vmalloc(msg->payload_len); + if (payload_addr == NULL) { + DHTOOLS_LOG_ERR("vmalloc payload_addr failed!!!\n"); + kfree(msg); + return -1; + } + + if (copy_from_user(payload_addr, + ifr->ifr_ifru.ifru_data + + sizeof(struct zxdh_tools_msg), + msg->payload_len)) { + DHTOOLS_LOG_ERR("copy_from_user failed!!!\n"); + vfree(payload_addr); + kfree(msg); + return -EFAULT; + } + +/*调试*/ +#if 0 + DHTOOLS_LOG_INFO("msg->payload_len = %u\n", msg->payload_len); + int i; + for (i = 0; i < msg->payload_len; i += sizeof(uint32_t)) { + DHTOOLS_LOG_INFO("*(uint32_t *)(payload_addr + %d) = %d\n", i, *(uint32_t *)(payload_addr + i)); + } +#endif + + in.virt_addr = (uint64_t)ZXDH_BAR_MSG_BASE( + en_dev->ops->get_bar_virt_addr(en_dev->parent, 0)); + in.payload_addr = payload_addr; + in.payload_len = msg->payload_len; + in.src = MSG_CHAN_END_PF; + in.dst = msg->dst; + in.event_id = msg->event_id; + in.src_pcieid = en_dev->pcie_id; + in.dst_pcieid = msg->dst_pcieid; + + if (msg->msg_reps_len == 0 || + msg->msg_reps_len > BAR_MSG_PAYLOAD_MAX_LEN) { + DHTOOLS_LOG_ERR("send para ERR: invalid msg_reps_len: %d!\n", + msg->msg_reps_len); + vfree(payload_addr); + kfree(msg); + return -1; + } + result.buffer_len = msg->msg_reps_len + REPS_HEADER_PAYLOAD_OFFSET; + result.recv_buffer = vmalloc(result.buffer_len); + if (result.recv_buffer == NULL) { + DHTOOLS_LOG_ERR("vmalloc result.recv_buffer failed!!!\n"); + vfree(payload_addr); + kfree(msg); + return -1; + } + msg_reps = (uint8_t *)result.recv_buffer + REPS_HEADER_PAYLOAD_OFFSET; + msg_reps_len = msg->msg_reps_len; + + ret = zxdh_bar_chan_sync_msg_send(&in, &result); + if (ret != BAR_MSG_OK) { + DHTOOLS_LOG_ERR( + "zxdh_bar_chan_sync_msg_send failed, ret=%d!!!\n", ret); + } + + tools_reps.bar_or_vq_chan_ret = ret; + tools_reps.status = MSG_RECV_OK; + + /*调试*/ + //DHTOOLS_LOG_INFO("result.recv_buffer 8 bytes: 0x%llx\n", *(uint64_t *)result.recv_buffer); + + if (unlikely(copy_to_user((void __user *)msg->msg_reps, msg_reps, + msg_reps_len))) { + DHTOOLS_LOG_ERR("copy_to_user failed!!!\n"); + vfree(result.recv_buffer); + vfree(payload_addr); + kfree(msg); + return -EFAULT; + } + + if (unlikely(copy_to_user((void __user *)msg->tools_reps, &tools_reps, + sizeof(struct zxdh_tools_reps)))) { + DHTOOLS_LOG_ERR("copy_to_user failed!!!\n"); + vfree(result.recv_buffer); + vfree(payload_addr); + kfree(msg); + return -EFAULT; + } + + vfree(result.recv_buffer); + vfree(payload_addr); + kfree(msg); + return 0; +} + +#ifdef ZXDH_MSGQ +int32_t zxdh_tools_ioctl_msgq_send(struct net_device *netdev, struct ifreq *ifr) +{ + struct zxdh_en_device *en_dev = NULL; + struct msgq_dev *msgq_dev = NULL; + struct msgq_pkt_info pkt_info = { 0 }; + struct zxdh_tools_msg *msg = NULL; + uint8_t *payload_addr = NULL; + struct zxdh_tools_reps tools_reps = { 0 }; + struct reps_info msg_reps = { 0 }; + int32_t ret = 0; + + CHECK_EQUAL_ERR(ifr, NULL, -EADDRNOTAVAIL, "ifr is null!\n"); + CHECK_EQUAL_ERR(netdev, NULL, -EADDRNOTAVAIL, "netdev is null!\n"); + + en_dev = netdev_priv(netdev); + if (en_dev == NULL) { + LOG_ERR("en_dev is null!\n"); + return -1; + } + msgq_dev = (struct msgq_dev *)en_dev->msgq_dev; + if (msgq_dev == NULL) { + LOG_ERR("msgq_dev null!\n"); + return -1; + } + + DHTOOLS_LOG_INFO("is called!\n"); + //DHTOOLS_LOG_INFO("en_dev->pcie_id=0x%x\n", en_dev->pcie_id); + + msg = (struct zxdh_tools_msg *)kzalloc(sizeof(struct zxdh_tools_msg), + GFP_KERNEL); + if (msg == NULL) { + DHTOOLS_LOG_ERR("kzalloc msg failed!!!\n"); + return -1; + } + + if (copy_from_user(msg, ifr->ifr_ifru.ifru_data, + sizeof(struct zxdh_tools_msg))) { + DHTOOLS_LOG_ERR("copy_from_user failed!!!\n"); + kfree(msg); + return -1; + } + + if (msg->payload_len == 0 || msg->payload_len > MAX_PACKET_LEN) { + DHTOOLS_LOG_ERR("send para ERR: invalid payload len: %d!\n", + msg->payload_len); + kfree(msg); + return -1; + } + + payload_addr = kzalloc(msg->payload_len + PRIV_HEADER_LEN, GFP_KERNEL); + if (payload_addr == NULL) { + DHTOOLS_LOG_ERR("vmalloc payload_addr failed!!!\n"); + kfree(msg); + return -1; + } + + if (copy_from_user(payload_addr + PRIV_HEADER_LEN, + ifr->ifr_ifru.ifru_data + + sizeof(struct zxdh_tools_msg), + msg->payload_len)) { + DHTOOLS_LOG_ERR("copy_from_user failed!!!\n"); + kfree(payload_addr); + kfree(msg); + return -1; + } + +/*调试*/ +#if 0 + DHTOOLS_LOG_INFO("msg->payload_len = %u\n", msg->payload_len); + int i; + for (i = 0; i < msg->payload_len; i += sizeof(uint32_t)) { + DHTOOLS_LOG_INFO("*(uint32_t *)(payload_addr + %d) = %d\n", i, *(uint32_t *)(payload_addr + i)); + } +#endif + + pkt_info.event_id = msg->event_id; + pkt_info.timeout_us = 400000; + pkt_info.is_async = msg->sync_or_async; + pkt_info.len = msg->payload_len + PRIV_HEADER_LEN; + pkt_info.addr = payload_addr; + //DHTOOLS_LOG_INFO("data_len: %d\n", pkt_info.len); + + if (msg->msg_reps_len == 0 || msg->msg_reps_len > MAX_PACKET_LEN) { + DHTOOLS_LOG_ERR("send para ERR: invalid msg_reps_len: %d!\n", + msg->msg_reps_len); + kfree(payload_addr); + kfree(msg); + return -1; + } + msg_reps.len = msg->msg_reps_len; + msg_reps.addr = vmalloc(msg_reps.len); + if (msg_reps.addr == NULL) { + DHTOOLS_LOG_ERR("vmalloc msg_reps.addr failed!!!\n"); + kfree(payload_addr); + kfree(msg); + return -1; + } + + ret = zxdh_msgq_send_cmd(msgq_dev, &pkt_info, &msg_reps); + if (ret != MSGQ_RET_OK) { + DHTOOLS_LOG_ERR("zxdh_msgq_send_cmd failed: %d\n", ret); + } + + tools_reps.bar_or_vq_chan_ret = ret; + tools_reps.status = MSG_RECV_OK; + + if (unlikely(copy_to_user((void __user *)msg->msg_reps, msg_reps.addr, + msg_reps.len))) { + DHTOOLS_LOG_ERR("copy_to_user failed!!!\n"); + goto err_ret; + } + + if (unlikely(copy_to_user((void __user *)msg->tools_reps, &tools_reps, + sizeof(struct zxdh_tools_reps)))) { + DHTOOLS_LOG_ERR("copy_to_user failed!!!\n"); + goto err_ret; + } + + vfree(msg_reps.addr); + kfree(msg); + return 0; + +err_ret: + vfree(msg_reps.addr); + kfree(msg); + return -1; +} +#endif + +int32_t dhtool_device_info_get(struct net_device *netdev, struct ifreq *ifr) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct pci_dev *pdev = NULL; + struct pci_dev *swdsp_pdev = NULL; + struct pci_dev *swusp_pdev = NULL; + struct pci_dev *rp_pdev = NULL; + + struct zxdh_tools_msg *msg = NULL; + struct zxdh_tools_reps tools_reps = { 0 }; + struct dhtool_dev_info_get_reps dev_info_get = { 0 }; + int sscanf_ret = 0; + + //DHTOOLS_LOG_INFO("is called!\n"); + msg = (struct zxdh_tools_msg *)kzalloc(sizeof(struct zxdh_tools_msg), + GFP_KERNEL); + if (msg == NULL) { + DHTOOLS_LOG_ERR(" kzalloc msg failed!!!\n"); + return -1; + } + + if (copy_from_user(msg, ifr->ifr_ifru.ifru_data, + sizeof(struct zxdh_tools_msg))) { + DHTOOLS_LOG_ERR("copy_from_user failed!!!\n"); + kfree(msg); + return -EFAULT; + } + + tools_reps.status = MSG_RECV_OK; + + pdev = en_dev->ops->get_pdev(en_dev->parent); + if (!pdev) { + DHTOOLS_LOG_ERR("pdev is NULL\n"); + kfree(msg); + return -EINVAL; + } + + sscanf_ret = sscanf(pci_name(pdev), "%x:%x:%x.%u", + &dev_info_get.dev_info.domain_no, + &dev_info_get.dev_info.bus_no, + &dev_info_get.dev_info.device_no, + &dev_info_get.dev_info.func_no); + if (sscanf_ret != 4) { + DHTOOLS_LOG_ERR( + "could not get dev domain_no、bus_no、device_no、func_no from pci_name(pdev)\n"); + kfree(msg); + return -1; + } + DHTOOLS_LOG_INFO( + "dev_info, domain:bus:device.func = %04x:%02x:%02x.%x\n", + dev_info_get.dev_info.domain_no, dev_info_get.dev_info.bus_no, + dev_info_get.dev_info.device_no, dev_info_get.dev_info.func_no); + + swdsp_pdev = pci_upstream_bridge(pdev); + if (!swdsp_pdev) { + DHTOOLS_LOG_ERR("swdsp_pdev is NULL\n"); + kfree(msg); + return -EINVAL; + } + + if (swdsp_pdev->device == DH_SWITCH_DEVICE_ID && + swdsp_pdev->vendor == DH_SWITCH_VENDOR_ID) { + swusp_pdev = pci_upstream_bridge(swdsp_pdev); + if (!swusp_pdev) { + DHTOOLS_LOG_ERR("swusp_pdev is NULL\n"); + kfree(msg); + return -EINVAL; + } + + rp_pdev = pci_upstream_bridge(swusp_pdev); + if (!rp_pdev) { + DHTOOLS_LOG_ERR("rp_pdev is NULL\n"); + kfree(msg); + return -EINVAL; + } + dev_info_get.switch_or_noswitch = SWITCH; + sscanf_ret = sscanf(pci_name(rp_pdev), "%x:%x:%x.%u", + &dev_info_get.rp_info.domain_no, + &dev_info_get.rp_info.bus_no, + &dev_info_get.rp_info.device_no, + &dev_info_get.rp_info.func_no); + if (sscanf_ret != 4) { + DHTOOLS_LOG_ERR( + "could not get rp domain_no、bus_no、device_no、func_no from pci_name(rp_pdev)\n"); + kfree(msg); + return -1; + } + sscanf_ret = sscanf(pci_name(swusp_pdev), "%x:%x:%x.%u", + &dev_info_get.swusp_info.domain_no, + &dev_info_get.swusp_info.bus_no, + &dev_info_get.swusp_info.device_no, + &dev_info_get.swusp_info.func_no); + if (sscanf_ret != 4) { + DHTOOLS_LOG_ERR( + "could not get swusp domain_no、bus_no、device_no、func_no from pci_name(swusp_pdev)\n"); + kfree(msg); + return -1; + } + } else { + dev_info_get.switch_or_noswitch = NO_SWITCH; + sscanf_ret = sscanf(pci_name(swdsp_pdev), "%x:%x:%x.%u", + &dev_info_get.rp_info.domain_no, + &dev_info_get.rp_info.bus_no, + &dev_info_get.rp_info.device_no, + &dev_info_get.rp_info.func_no); + if (sscanf_ret != 4) { + DHTOOLS_LOG_ERR( + "could not get rp domain_no、bus_no、device_no、func_no from pci_name(swdsp_pdev)\n"); + kfree(msg); + return -1; + } + } + DHTOOLS_LOG_INFO( + "rp_info, domain:bus:device.func = %04x:%02x:%02x.%x\n", + dev_info_get.rp_info.domain_no, dev_info_get.rp_info.bus_no, + dev_info_get.rp_info.device_no, dev_info_get.rp_info.func_no); + + if (unlikely(copy_to_user((void __user *)msg->msg_reps, &dev_info_get, + sizeof(struct dhtool_dev_info_get_reps)))) { + DHTOOLS_LOG_ERR("copy_to_user failed!!!\n"); + kfree(msg); + return -EFAULT; + } + + if (unlikely(copy_to_user((void __user *)msg->tools_reps, &tools_reps, + sizeof(struct zxdh_tools_reps)))) { + DHTOOLS_LOG_ERR("copy_to_user failed!!!\n"); + kfree(msg); + return -EFAULT; + } + kfree(msg); + return 0; +} + +int32_t zxdh_set_vf_status(struct zxdh_en_device *en_dev, int start_vf_idx, + VF_SET_STATUS vf_status) +{ + int num_vfs = 0; + bool pf_link_up = en_dev->ops->get_pf_link_up(en_dev->parent); + union zxdh_msg *msg = NULL; + struct zxdh_vf_item *vf_item = NULL; + struct pci_dev *pdev = NULL; + int32_t err = 0; + uint16_t vf_idx = 0; + uint16_t func_no = 0; + uint16_t pf_no = FIND_PF_ID(en_dev->pcie_id); + struct zxdh_bar_extra_para para = { 0 }; + LOG_INFO("is called\n"); + + para.is_sync = true; + para.retrycnt = BAR_MSG_RETRY_CNT_MAX; + msg = kzalloc(sizeof(union zxdh_msg), GFP_KERNEL); + if (msg == NULL) { + LOG_ERR("kzalloc(%lu, GFP_KERNEL) failed !", + sizeof(union zxdh_msg)); + return -ENOMEM; + } + msg->payload.hdr_to_agt.op_code = AGENT_DEV_STATUS_NOTIFY; + msg->payload.hdr_to_agt.pcie_id = en_dev->pcie_id; + pdev = en_dev->ops->get_pdev(en_dev->parent); + num_vfs = pci_num_vf(pdev); + for (vf_idx = start_vf_idx; vf_idx < num_vfs; vf_idx++) { + vf_item = en_dev->ops->get_vf_item(en_dev->parent, vf_idx); + switch (vf_status) { + case VF_STATUS_AUTO: + vf_item->link_forced = FALSE; + vf_item->link_up = pf_link_up; + break; + case VF_STATUS_ENABLE: + vf_item->link_forced = TRUE; + vf_item->link_up = TRUE; + break; + case VF_STATUS_DISABLE: + vf_item->link_forced = TRUE; + vf_item->link_up = FALSE; + break; + default: + err = -EINVAL; + goto free_msg; + } + if (en_dev->ops->get_vf_is_probe( + en_dev->parent, + vf_idx)) { //仅VF驱动加载后才配置状态寄存器、发中断 + func_no = GET_FUNC_NO(pf_no, vf_idx); + LOG_INFO( + "start set vf[%d], link_forced[%d], link_up[%d], probe[%d], func_no=0x%x\n", + vf_idx, vf_item->link_forced ? 1 : 0, + vf_item->link_up ? 1 : 0, + vf_item->is_probed ? 1 : 0, func_no); + msg->payload.pcie_msix_msg + .func_no[msg->payload.pcie_msix_msg.num++] = + func_no; + en_dev->ops->set_vf_link_info(en_dev->parent, vf_idx, + vf_item->link_up ? 1 : 0); + } + } + if (msg->payload.pcie_msix_msg.num > 0) { + err = en_dev->ops->msg_send_cmd(en_dev->parent, MODULE_MAC, msg, + msg, ¶); + if (err != 0) { + LOG_ERR("failed to update VF link info, en_dev->ops->msg_send_cmd err:%d\n", + err); + } + } + +free_msg: + kfree(msg); + return err; +} + +int32_t zxdh_set_vf_mac(struct zxdh_en_device *en_dev, + struct dhtool_set_vf_mac_msg *msg) +{ + int32_t ret = 0; + DHTOOLS_LOG_INFO("is called\n"); + + if (msg->action == MAC_ADD) { /* 添加单播和组播 */ + if (msg->mac_config.unicast_add_count != 0) { + ret = zxdh_pf_add_vf_unicast_mac(en_dev, msg); + if (ret != 0) { + DHTOOLS_LOG_ERR( + "zxdh_pf_add_vf_unicast_mac failed, ret:%d\n", + ret); + return ret; + } + } + + if (msg->mac_config.multicast_add_count != 0) { + ret = zxdh_pf_add_vf_multicast_mac(en_dev, msg); + if (ret != 0) { + DHTOOLS_LOG_ERR( + "zxdh_pf_add_vf_multicast_mac failed\n"); + return ret; + } + } + } else if (msg->action == MAC_DEL) { /* 删除单播和组播 */ + if (msg->mac_config.unicast_del_count != 0) { + ret = zxdh_pf_del_vf_unicast_mac(en_dev, msg); + if (ret != 0) { + DHTOOLS_LOG_ERR( + "zxdh_pf_del_vf_unicast_mac failed\n"); + return ret; + } + } + + if (msg->mac_config.multicast_del_count != 0) { + ret = zxdh_pf_del_vf_multicast_mac(en_dev, msg); + if (ret != 0) { + DHTOOLS_LOG_ERR( + "zxdh_pf_del_vf_multicast_mac failed\n"); + return ret; + } + } + } else if (msg->action == MAC_TRANSFER) { /* 迁移所有mac */ + ret = zxdh_pf_transfer_vf_mac(en_dev, msg->mac_transfer.src_vf, + msg->mac_transfer.dst_vf); + if (ret != 0) { + DHTOOLS_LOG_ERR("zxdh_pf_transfer_vf_mac failed\n"); + return ret; + } + } else { + DHTOOLS_LOG_ERR("unknown atcion:%d", msg->action); + return MAC_CONFIG_FAILED; + } + + return MAC_CONFIG_SUCCESS; +} + +int32_t zxdh_tools_set_vf_mac(struct net_device *netdev, struct ifreq *ifr) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct dhtool_set_vf_mac_msg *payload = NULL; + struct zxdh_tools_msg *tools_msg = NULL; + struct zxdh_tools_reps tools_reps = { 0 }; + int32_t res = 0; + + DHTOOLS_LOG_INFO("is called!\n"); + + tools_reps.status = MAC_CONFIG_SUCCESS; + if (en_dev->ops->is_bond(en_dev->parent)) { + tools_reps.status = MAC_CONFIG_FAILED; + DHTOOLS_LOG_ERR( + "zxdh_tools_set_vf_mac can't be used in bond_pf!\n"); + return -EINVAL; + } + + tools_msg = (struct zxdh_tools_msg *)kzalloc( + sizeof(struct zxdh_tools_msg), GFP_KERNEL); + if (tools_msg == NULL) { + DHTOOLS_LOG_ERR("kzalloc tools_msg failed!!!\n"); + return -ENOMEM; + } + + if (copy_from_user(tools_msg, ifr->ifr_ifru.ifru_data, + sizeof(struct zxdh_tools_msg))) { + DHTOOLS_LOG_ERR("copy_from_user failed!!!\n"); + kfree(tools_msg); + return -EFAULT; + } + + if (tools_msg->payload_len == 0 || + tools_msg->payload_len > BAR_MSG_PAYLOAD_MAX_LEN) { + DHTOOLS_LOG_ERR("send para ERR: invalid payload len: %d!\n", + tools_msg->payload_len); + kfree(tools_msg); + return -EINVAL; + } + + payload = (struct dhtool_set_vf_mac_msg *)kzalloc( + sizeof(struct dhtool_set_vf_mac_msg), GFP_KERNEL); + if (payload == NULL) { + DHTOOLS_LOG_ERR("kzalloc payload failed!!!\n"); + kfree(tools_msg); + return -ENOMEM; + } + + if (copy_from_user(payload, + ifr->ifr_ifru.ifru_data + + sizeof(struct zxdh_tools_msg), + sizeof(struct dhtool_set_vf_mac_msg))) { + DHTOOLS_LOG_ERR("copy_from_user failed!!!\n"); + kfree(payload); + kfree(tools_msg); + return -EFAULT; + } + + /* 执行VF相关的操作 */ + res = zxdh_set_vf_mac(en_dev, payload); + if (res != 0) { + DHTOOLS_LOG_ERR("zxdh_set_vf_mac failed, err %d\n", res); + tools_reps.status = res; /* 返回错误码,用户态解析 */ + } + + if (unlikely(copy_to_user((void __user *)tools_msg->tools_reps, + &tools_reps, + sizeof(struct zxdh_tools_reps)))) { + DHTOOLS_LOG_ERR("copy_to_user failed!!!\n"); + res = -EFAULT; + } + kfree(payload); + kfree(tools_msg); + return 0; +} + +int32_t zxdh_tools_get_sw_stat(struct net_device *netdev, struct ifreq *ifr) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct zxdh_tools_msg *tools_msg = NULL; + struct zxdh_tools_reps tools_reps = { 0 }; + zxdh_sw_stats_reply *reply = NULL; + zxdh_get_sw_stats *payload = NULL; + int32_t res = 0; + + DHTOOLS_LOG_INFO("is called!\n"); + + tools_reps.status = GET_STAT_SUCCESS; + + tools_msg = (struct zxdh_tools_msg *)kzalloc( + sizeof(struct zxdh_tools_msg), GFP_KERNEL); + if (tools_msg == NULL) { + DHTOOLS_LOG_ERR("kzalloc tools_msg failed!!!\n"); + return -ENOMEM; + } + + if (copy_from_user(tools_msg, ifr->ifr_ifru.ifru_data, + sizeof(struct zxdh_tools_msg))) { + DHTOOLS_LOG_ERR("copy_from_user failed!!!\n"); + res = -EFAULT; + goto err_tools_msg; + } + + if (tools_msg->payload_len == 0 || + tools_msg->payload_len > BAR_MSG_PAYLOAD_MAX_LEN) { + DHTOOLS_LOG_ERR("send para ERR: invalid payload len: %d!\n", + tools_msg->payload_len); + res = -EINVAL; + goto err_tools_msg; + } + + payload = (zxdh_get_sw_stats *)kzalloc(sizeof(zxdh_get_sw_stats), + GFP_KERNEL); + if (payload == NULL) { + DHTOOLS_LOG_ERR("kzalloc payload failed!!!\n"); + res = -ENOMEM; + goto err_tools_msg; + } + + if (copy_from_user(payload, + ifr->ifr_ifru.ifru_data + + sizeof(struct zxdh_tools_msg), + sizeof(zxdh_get_sw_stats))) { + DHTOOLS_LOG_ERR("copy_from_user failed!!!\n"); + res = -EFAULT; + goto err_payload; + } + + reply = (zxdh_sw_stats_reply *)kzalloc(sizeof(zxdh_sw_stats_reply), + GFP_KERNEL); + if (reply == NULL) { + DHTOOLS_LOG_ERR("kzalloc zxdh_sw_stats_reply failed!!!\n"); + res = -ENOMEM; + goto err_payload; + } + + if (payload->vf_idx > ZXDH_VF_NUM_MAX) { + DHTOOLS_LOG_ERR("payload->vf_idx > 256\n"); + res = -EFAULT; + goto err_reply; + } + + /* 执行VF相关的操作 */ + res = zxdh_get_vf_err_stats(en_dev, payload, reply); + if (res != 0) { + DHTOOLS_LOG_ERR("zxdh_get_vf_err_stats failed, err %d\n", res); + tools_reps.status = res; /* 返回错误码,用户态解析 */ + } + + if (unlikely(copy_to_user((void __user *)tools_msg->tools_reps, + &tools_reps, + sizeof(struct zxdh_tools_reps)))) { + DHTOOLS_LOG_ERR("copy tools_reps to user failed!!!\n"); + res = -EFAULT; + goto err_reply; + } + + if (unlikely(copy_to_user((void __user *)tools_msg->msg_reps, reply, + sizeof(zxdh_sw_stats_reply)))) { + DHTOOLS_LOG_ERR("copy reply to user failed!!!\n"); + res = -EFAULT; + goto err_reply; + } + +err_reply: + kfree(reply); +err_payload: + kfree(payload); +err_tools_msg: + kfree(tools_msg); + return res; +} + +int32_t zxdh_tools_set_vf_status(struct net_device *netdev, struct ifreq *ifr) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct dhtool_set_vf_status_msg *payload = NULL; + struct zxdh_tools_msg *tools_msg = NULL; + struct zxdh_tools_reps tools_reps = { 0 }; + struct pci_dev *pdev = en_dev->ops->get_pdev(en_dev->parent); + int num_vfs = pci_num_vf(pdev); + int32_t res = 0; + + DHTOOLS_LOG_INFO("is called!\n"); + + tools_reps.status = MSG_RECV_OK; + if (en_dev->ops->is_bond(en_dev->parent)) { + tools_reps.status = MSG_RECV_FAILED; + DHTOOLS_LOG_ERR( + "zxdh_tools_set_vf_status can't be used in bond_pf!\n"); + return -EINVAL; + } + + tools_msg = (struct zxdh_tools_msg *)kzalloc( + sizeof(struct zxdh_tools_msg), GFP_KERNEL); + if (tools_msg == NULL) { + DHTOOLS_LOG_ERR("kzalloc tools_msg failed!!!\n"); + return -ENOMEM; + } + + if (copy_from_user(tools_msg, ifr->ifr_ifru.ifru_data, + sizeof(struct zxdh_tools_msg))) { + DHTOOLS_LOG_ERR("copy_from_user failed!!!\n"); + kfree(tools_msg); + return -EFAULT; + } + + if (tools_msg->payload_len == 0 || + tools_msg->payload_len > BAR_MSG_PAYLOAD_MAX_LEN) { + DHTOOLS_LOG_ERR("send para ERR: invalid payload len: %d!\n", + tools_msg->payload_len); + kfree(tools_msg); + return -EINVAL; + } + + payload = (struct dhtool_set_vf_status_msg *)kzalloc( + sizeof(struct dhtool_set_vf_status_msg), GFP_KERNEL); + if (payload == NULL) { + DHTOOLS_LOG_ERR("kzalloc payload failed!!!\n"); + kfree(tools_msg); + return -ENOMEM; + } + + if (copy_from_user(payload, + ifr->ifr_ifru.ifru_data + + sizeof(struct zxdh_tools_msg), + sizeof(struct dhtool_set_vf_status_msg))) { + DHTOOLS_LOG_ERR("copy_from_user failed!!!\n"); + kfree(payload); + kfree(tools_msg); + return -EFAULT; + } + + //执行VF相关的操作 + DHTOOLS_LOG_INFO("payload->mode %d, payload->vf_status %d\n", + payload->mode, payload->vf_status); + + if (num_vfs <= 0) { + DHTOOLS_LOG_ERR( + "When the VF is %d, the set vf status function cannot be used.\n", + num_vfs); + tools_reps.status = MSG_RECV_FAILED; + goto out; + } + + if (payload->mode == VF3_MAX && num_vfs <= 2) { + DHTOOLS_LOG_ERR("VF3_MAX mode can't use when num_vfs %d ≤ 2\n", + num_vfs); + tools_reps.status = MSG_RECV_FAILED; + goto out; + } + res = zxdh_set_vf_status(en_dev, payload->mode == VF3_MAX ? 2 : 0, + payload->vf_status); + if (res != 0) { + DHTOOLS_LOG_ERR("zxdh_set_vf_status failed, err %d\n", res); + tools_reps.status = MSG_RECV_FAILED; + } + +out: + if (unlikely(copy_to_user((void __user *)tools_msg->tools_reps, + &tools_reps, + sizeof(struct zxdh_tools_reps)))) { + DHTOOLS_LOG_ERR("copy_to_user failed!!!\n"); + res = -EFAULT; + } + kfree(payload); + kfree(tools_msg); + return res; +} + +int32_t dhtool_dev_phyport_get(struct net_device *netdev, struct ifreq *ifr) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + struct zxdh_tools_msg *msg = NULL; + struct zxdh_tools_reps tools_reps = { 0 }; + struct dhtool_dev_phyport_get dev_phyport_get = { 0 }; + + tools_reps.status = MSG_RECV_OK; + msg = (struct zxdh_tools_msg *)kzalloc(sizeof(struct zxdh_tools_msg), + GFP_KERNEL); + if (msg == NULL) { + DHTOOLS_LOG_ERR(" kzalloc msg failed!!!\n"); + return -1; + } + + if (copy_from_user(msg, ifr->ifr_ifru.ifru_data, + sizeof(struct zxdh_tools_msg))) { + DHTOOLS_LOG_ERR("copy_from_user failed!!!\n"); + kfree(msg); + return -EFAULT; + } + + dev_phyport_get.phyport = en_dev->phy_port; + DHTOOLS_LOG_INFO("dev_phyport_get.phyport %u\n", + dev_phyport_get.phyport); + + if (unlikely(copy_to_user((void __user *)msg->msg_reps, + &dev_phyport_get, + sizeof(struct dhtool_dev_phyport_get)))) { + DHTOOLS_LOG_ERR("copy_to_user failed!!!\n"); + kfree(msg); + return -EFAULT; + } + + if (unlikely(copy_to_user((void __user *)msg->tools_reps, &tools_reps, + sizeof(struct zxdh_tools_reps)))) { + DHTOOLS_LOG_ERR("copy_to_user failed!!!\n"); + kfree(msg); + return -EFAULT; + } + kfree(msg); + return 0; +} + +int32_t dhtool_pkt_capture(struct net_device *netdev, struct ifreq *ifr) +{ + struct zxdh_en_priv *en_priv = netdev_priv(netdev); + struct zxdh_en_device *en_dev = &en_priv->edev; + uint16_t fw_patch = en_dev->ops->get_fw_patch(en_dev->parent); + struct pci_dev *pdev = NULL; + uint32_t ret = 0; + uint32_t i = 0; + struct zxdh_tools_msg *msg = NULL; + struct zxdh_tools_reps tools_reps = { 0 }; + struct zxdh_pkt_capture_msg *pkt_cap_msg = NULL; + DPP_PF_INFO_T pf_info = { 0 }; + + tools_reps.status = MSG_RECV_OK; + + pdev = en_dev->ops->get_pdev(en_dev->parent); + if (!pdev) { + DHTOOLS_LOG_ERR("pdev is NULL\n"); + return -EINVAL; + } + + pf_info.slot = en_dev->slot_id; + pf_info.vport = en_dev->vport; + + //DHTOOLS_LOG_INFO("is called!\n"); + msg = (struct zxdh_tools_msg *)kzalloc(sizeof(struct zxdh_tools_msg), + GFP_KERNEL); + if (msg == NULL) { + DHTOOLS_LOG_ERR(" kzalloc msg failed!!!\n"); + return -1; + } + + if (fw_patch < 2) { + tools_reps.status = MSG_RECV_FAILED; + if (unlikely(copy_to_user((void __user *)msg->tools_reps, + &tools_reps, + sizeof(struct zxdh_tools_reps)))) { + DHTOOLS_LOG_ERR("copy_to_user failed!!!\n"); + } + + kfree(msg); + DHTOOLS_LOG_ERR("fw version not match pkt func!!!\n"); + return -EFAULT; + } + + if (copy_from_user(msg, ifr->ifr_ifru.ifru_data, + sizeof(struct zxdh_tools_msg))) { + DHTOOLS_LOG_ERR("copy_from_user failed!!!\n"); + kfree(msg); + return -EFAULT; + } + + if (msg->payload_len == 0 || + msg->payload_len > BAR_MSG_PAYLOAD_MAX_LEN) { + DHTOOLS_LOG_ERR("send para ERR: invalid payload len: %d!\n", + msg->payload_len); + kfree(msg); + return -EINVAL; + } + + pkt_cap_msg = (struct zxdh_pkt_capture_msg *)kzalloc(msg->payload_len, + GFP_KERNEL); + if (pkt_cap_msg == NULL) { + DHTOOLS_LOG_ERR("kzalloc payload failed!!!\n"); + kfree(msg); + return -ENOMEM; + } + + if (copy_from_user(pkt_cap_msg, + ifr->ifr_ifru.ifru_data + + sizeof(struct zxdh_tools_msg), + msg->payload_len)) { + DHTOOLS_LOG_ERR("copy_from_user failed!!!\n"); + kfree(pkt_cap_msg); + kfree(msg); + return -EFAULT; + } + + if (pkt_cap_msg->payload_len > BAR_MSG_PAYLOAD_MAX_LEN) { + DHTOOLS_LOG_ERR("send para ERR: invalid payload len: %d!\n", + pkt_cap_msg->payload_len); + kfree(pkt_cap_msg); + kfree(msg); + return -EINVAL; + } + + for (i = 0; i < msg->payload_len; i++) { + if (pkt_cap_msg->payload[i] > PKT_PAYLIAD_VALUE) { + DHTOOLS_LOG_ERR( + "invalid payload value at index %u: %u\n", i, + pkt_cap_msg->payload[i]); + kfree(pkt_cap_msg); + kfree(msg); + return -EINVAL; + } + } + + ret = zxdh_pkt_capture_process_message(netdev, msg, pkt_cap_msg, + &pf_info); + if (ret != 0) { + switch (ret) { + case MSG_RECV_PKT_CAP_PF_LOCK: + case MSG_RECV_PKT_FILE_PATH_ERR: + case MSG_RECV_PKT_FILE_EXIST_ERR: + case MSG_RECV_PKT_FILE_IN_PROGRESS_ERR: + tools_reps.status = ret; + break; + default: + tools_reps.status = MSG_RECV_FAILED; + break; + } + + DHTOOLS_LOG_ERR("zxdh_pkt_capture_process_message failed!!!\n"); + } + + if (unlikely(copy_to_user((void __user *)msg->tools_reps, &tools_reps, + sizeof(struct zxdh_tools_reps)))) { + DHTOOLS_LOG_ERR("copy_to_user failed!!!\n"); + kfree(msg); + kfree(pkt_cap_msg); + return -EFAULT; + } + kfree(pkt_cap_msg); + kfree(msg); + return 0; +} + +int32_t dhtool_get_compat_infor(struct net_device *netdev, struct ifreq *ifr) +{ + struct zxdh_tools_msg *msg = NULL; + struct zxdh_tools_reps tools_reps = { 0 }; + struct dhtool_compat_reg compat_reg = { 0 }; + struct dhtool_compat_reg tool_compat_reg = { 0 }; + + tools_reps.status = MSG_RECV_OK; + msg = (struct zxdh_tools_msg *)kzalloc(sizeof(struct zxdh_tools_msg), + GFP_KERNEL); + if (msg == NULL) { + DHTOOLS_LOG_ERR(" kzalloc msg failed!!!\n"); + return -1; + } + + if (copy_from_user(msg, ifr->ifr_ifru.ifru_data, + sizeof(struct zxdh_tools_msg))) { + DHTOOLS_LOG_ERR("copy_from_user failed!!!\n"); + kfree(msg); + return -EFAULT; + } + + if (msg->payload_len != sizeof(struct dhtool_compat_reg)) { + DHTOOLS_LOG_ERR( + "msg->payload_len error, msg->payload_len=%d!!!\n", + msg->payload_len); + kfree(msg); + return -EFAULT; + } + + if (copy_from_user(&tool_compat_reg, + ifr->ifr_ifru.ifru_data + + sizeof(struct zxdh_tools_msg), + msg->payload_len)) { + DHTOOLS_LOG_ERR("copy_from_user failed!!!\n"); + kfree(msg); + return -EFAULT; + } + + /* 判断dhtool兼容性? + driver没有必要校验,就算检测不兼容也需要dhtool工具进行结果反馈。 + 所以dhtool对版本进行判断就行,这里先做打印方便调试和故障定位。 */ + DHTOOLS_LOG_INFO("DHTOOL_COMPAT_ITM=%d\n", DHTOOL_COMPAT_ITM); + DHTOOLS_LOG_INFO("DHTOOL_COMPAT_MAJOR=%d\n", DHTOOL_COMPAT_MAJOR); + DHTOOLS_LOG_INFO("DHTOOL_COMPAT_TOOL_MINOR=%d\n", + DHTOOL_COMPAT_TOOL_MINOR); + DHTOOLS_LOG_INFO("DHTOOL_COMPAT_DRIV_MINOR=%d\n", + DHTOOL_COMPAT_DRIV_MINOR); + DHTOOLS_LOG_INFO("DHTOOL_COMPAT_PATCH=%d\n", DHTOOL_COMPAT_PATCH); + + /* 将本端兼容版本信息传递给dhtool */ + compat_reg.version_compat_item = DHTOOL_COMPAT_ITM; + compat_reg.major = DHTOOL_COMPAT_MAJOR; + compat_reg.tool_minor = DHTOOL_COMPAT_TOOL_MINOR; + compat_reg.drv_minor = DHTOOL_COMPAT_DRIV_MINOR; + compat_reg.patch = DHTOOL_COMPAT_PATCH; + if (unlikely(copy_to_user((void __user *)msg->msg_reps, &compat_reg, + sizeof(struct dhtool_compat_reg)))) { + DHTOOLS_LOG_ERR("copy_to_user failed!!!\n"); + kfree(msg); + return -EFAULT; + } + + if (unlikely(copy_to_user((void __user *)msg->tools_reps, &tools_reps, + sizeof(struct zxdh_tools_reps)))) { + DHTOOLS_LOG_ERR("copy_to_user failed!!!\n"); + kfree(msg); + return -EFAULT; + } + kfree(msg); + return 0; +} + +struct zxdh_tools_ioctl_subcmd_info subcmd_table[] = { + { MSG_MARK_INFO, zxdh_tools_mark_event_info }, +#ifdef ZXDH_TOOLS_MSGQ + { MSG_SEND_TO_RISCV, zxdh_tools_ioctl_msgq_send }, +#else + { MSG_SEND_TO_RISCV, zxdh_tools_ioctl_barchan_send }, +#endif + { MSG_DEVICE_INFO_GET, dhtool_device_info_get }, + { MSG_SET_VF_STATUS, zxdh_tools_set_vf_status }, + { MSG_DEVICE_PHYPORT_GET, dhtool_dev_phyport_get }, + { MSG_PKT_CAPTURE, dhtool_pkt_capture }, + { MSG_GET_DRV_VERSION, dhtool_get_compat_infor }, + { MSG_SET_VF_MAC, zxdh_tools_set_vf_mac }, + { MSG_GET_SW_STAT, zxdh_tools_get_sw_stat } +}; + +int32_t zxdh_tools_ioctl_dispatcher(struct net_device *netdev, + struct ifreq *ifr) +{ + struct zxdh_tools_msg *msg = NULL; + int32_t ret = 0; + uint32_t i = 0; + + //DHTOOLS_LOG_INFO("is called!\n"); + msg = (struct zxdh_tools_msg *)kzalloc(sizeof(struct zxdh_tools_msg), + GFP_KERNEL); + if (msg == NULL) { + DHTOOLS_LOG_ERR("kzalloc msg failed!!!\n"); + return -1; + } + + if (copy_from_user(msg, ifr->ifr_ifru.ifru_data, + sizeof(struct zxdh_tools_msg))) { + DHTOOLS_LOG_ERR("copy_from_user failed!!!\n"); + kfree(msg); + return -EFAULT; + } + + for (i = 0; i < ARRAY_SIZE(subcmd_table); i++) { + if ((subcmd_table[i].subcmd == msg->subcmd) && + (subcmd_table[i].subcmd_callback)) { + ret = subcmd_table[i].subcmd_callback(netdev, ifr); + break; + } + } + + if (i == ARRAY_SIZE(subcmd_table)) { + DHTOOLS_LOG_ERR("No the callback of msg->subcmd %d!", + msg->subcmd); + } + + kfree(msg); + return ret; +} diff --git a/drivers/net/ethernet/dinghai/zxdh_tools/zxdh_tools_ioctl.h b/drivers/net/ethernet/dinghai/zxdh_tools/zxdh_tools_ioctl.h new file mode 100644 index 000000000000..76ea9c4c8065 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zxdh_tools/zxdh_tools_ioctl.h @@ -0,0 +1,237 @@ +#ifndef ZXDH_TOOLS_IOCTL_H_ +#define ZXDH_TOOLS_IOCTL_H_ + +#include +#include "../en_aux.h" +#include "../en_np/table/include/dpp_tbl_pkt_cap.h" +#include "../en_aux/en_aux_cmd.h" +//#define ZXDH_TOOLS_MSGQ + +#define SAFE_KFREE(ptr) \ + do { \ + if ((ptr) != NULL) { \ + kfree(ptr); \ + (ptr) = NULL; \ + } \ + } while (0) + +/* dhtool与内核驱动有新的兼容问题时维护这些值 */ +#define DHTOOL_COMPAT_ITM (0) +#define DHTOOL_COMPAT_MAJOR (0) +#define DHTOOL_COMPAT_DRIV_MINOR (0) +#define DHTOOL_COMPAT_TOOL_MINOR (0) +#define DHTOOL_COMPAT_PATCH (0) + +#define DH_SWITCH_DEVICE_ID 0x8036 +#define DH_SWITCH_VENDOR_ID 0x1cf2 + +#define MAX_DHTOOL_PID_NUMS 15 +#define DHTOOL_ERROR 0x1a +#define max_entry_num 68 +#define key_entry_num 2 +#define normal_tcam_index 60 +#define key_tcam_index 68 +#define ZXDH_PKT_FLAG 0xbb +#define PKT_PAYLIAD_VALUE 255 +#define ZXDH_PKT_INIT_SPEED 10000 +#define ZXDH_PKT_HEDER_LENGTH 20 +#define EIGHT_HOURS_SECONDS (8 * 3600) + +/* Started by AICoder, pid:kd6be6da67t21ef1435f09dca0f04f1b6b946b44 */ +#define DBDF_ECAM(domain, bus, devid, func) \ + (((domain & 0xffff) << 16) | ((bus & 0xff) << 8) | \ + ((devid & 0x1f) << 3) | (func & 0x07)) +/* Ended by AICoder, pid:kd6be6da67t21ef1435f09dca0f04f1b6b946b44 */ + +typedef enum { + MSG_MARK_INFO = 0, + MSG_SEND_TO_RISCV, + MSG_DEVICE_INFO_GET, + MSG_SET_VF_STATUS, + MSG_DEVICE_PHYPORT_GET, + MSG_PKT_CAPTURE = 5, + MSG_GET_DRV_VERSION = 6, + MSG_SET_VF_MAC = 7, + MSG_GET_SW_STAT = 8, + SUBCMD_NUM = 10, +} MSG_SUBCMD; + +struct zxdh_pkt_capture_msg { + uint32_t op_code; + uint16_t payload_len; + uint8_t payload[0]; +} __attribute__((packed)); + +typedef enum dhtool_pkt_capture_main_cmd_index { + DHTOOL_PKT_CAPTURE_CMD_ENABLE = 0, + DHTOOL_PKT_CAPTURE_CMD_DISABLE, + DHTOOL_PKT_CAPTURE_CMD_DISABLE_ALL, + DHTOOL_PKT_CAPTURE_CMD_RULE_INSERT, + DHTOOL_PKT_CAPTURE_CMD_RULE_DELETE, + DHTOOL_PKT_CAPTURE_CMD_SHOW, + DHTOOL_PKT_CAPTURE_CMD_SAVE_TO_FILE, + DHTOOL_PKT_CAPTURE_CMD_SET_SPEED, + DHTOOL_PKT_CAPTURE_CMD_ERROR +} DHTOOL_PKT_CAPTURE_MAIN_CMD_INDEX; + +typedef enum { + MSG_RECV_OK = 1, + MSG_RECV_FAILED = 2, + MSG_RECV_PKT_CAP_PF_LOCK = 3, + MSG_RECV_PKT_FILE_PATH_ERR = 4, + MSG_RECV_PKT_FILE_EXIST_ERR = 5, + MSG_RECV_PKT_FILE_IN_PROGRESS_ERR = 6, +} DHTOOL_RESPONSE; + +typedef enum { + NO_SWITCH = 0, + SWITCH = 1, +} SWITCH_FLAG; + +struct zxdh_tools_msg { + uint32_t subcmd; //provider care + uint32_t event_pid; //provider care + void *tools_reps; //provider care + uint32_t event_id; //caller care + uint16_t dst; //caller care + uint16_t dst_pcieid; //caller care + void *msg_reps; //caller care + uint16_t msg_reps_len; //caller care + uint16_t sync_or_async; //caller care + uint16_t payload_len; //caller care + uint16_t reserved; //caller care + uint8_t payload[0]; +} __attribute__((packed)); + +struct zxdh_tools_reps { + uint32_t status; /* must be */ + int32_t bar_or_vq_chan_ret; + uint32_t data[15]; +}; + +struct zxdh_tools_ioctl_subcmd_info { + MSG_SUBCMD subcmd; + int32_t (*subcmd_callback)(struct net_device *netdev, + struct ifreq *ifr); +}; + +struct dhtool_dev_pcieid_get { + uint32_t dev_pcieid; +}; +struct dhtool_dev_info { + uint32_t domain_no; + uint32_t bus_no; + uint32_t device_no; + uint32_t func_no; +}; + +struct dhtool_dev_info_get_reps { + SWITCH_FLAG switch_or_noswitch; + struct dhtool_dev_info dev_info; + struct dhtool_dev_info rp_info; + union { + struct dhtool_dev_info swusp_info; + }; +}; + +struct dhtool_eventpid_devbdf_array { + bool is_valid; + uint16_t dev_pcieid; + uint32_t dev_bdf; + uint32_t event_pid; +}; + +typedef enum { ALL_VF, + VF3_MAX } VF_SET_MODE; + +typedef enum { + VF_STATUS_AUTO, + VF_STATUS_ENABLE, + VF_STATUS_DISABLE +} VF_SET_STATUS; + +struct dhtool_set_vf_status_msg { + VF_SET_MODE mode; // 存储 'all_vf' 或 'vf3_max' + VF_SET_STATUS vf_status; // 存储 'auto', 'enable', 'disable' +}; + +struct dhtool_dev_phyport_get { + uint8_t phyport; + uint8_t rsv[15]; +}; + +struct pkt_deve_name { + char dev_name[IFNAMSIZ]; +}; + +typedef struct zxdh_pkt_cap_cmd_save_to_file { + char file_path[150]; + uint32_t file_size; + uint32_t pkt_count; + uint32_t is_stop; +} ZXDH_PKT_CAP_CMD_SAVE_TO_FILE; + +typedef struct zxdh_pkt_cap_rule_rule_insert { + uint32_t rule_index; + ZXDH_PKT_CAP_POINT cap_point; + ZXDH_PKT_CAP_MODE cap_mode; + ZXDH_PKT_CAP_KEY pkt_cap_key; + ZXDH_PKT_CAP_NORMAL_CONFIG rule_config; + char dev_name[IFNAMSIZ]; +} ZXDH_PKT_CAP_RULE_INSERT; + +typedef struct zxdh_pkt_cap_rule_rule_delete { + uint32_t rule_index; + uint32_t is_mode_all; + uint32_t is_all; + ZXDH_PKT_CAP_POINT cap_point; + ZXDH_PKT_CAP_MODE cap_mode; +} ZXDH_PKT_CAP_RULE_DELETE; + +typedef struct zxdh_pkt_cap_cmd_show { + uint32_t speed; + uint32_t entry_num; + uint32_t file_size; + uint32_t pkt_count; + uint32_t is_save_to_file; + ZXDH_PKT_CAP_ENABLE_STATUS enable_status; + ZXDH_PKT_CAP_RULE_INSERT entry_array[68]; + char file_path[150]; +} ZXDH_PKT_CAP_CMD_SHOW; + +typedef uint32_t (*callback_t)(struct net_device *netdev, + struct zxdh_tools_msg *tool_msg, + struct zxdh_pkt_capture_msg *pkt_msg, + DPP_PF_INFO_T *pf_info); + +typedef struct { + uint32_t op_code; + callback_t callback; +} zxdh_pkt_capture_callback_entry_t; + +struct dhtool_compat_reg { + uint8_t version_compat_item; + uint8_t major; + uint8_t tool_minor; + uint8_t drv_minor; + uint16_t patch; + uint8_t rsv[2]; +} __attribute__((packed)); + +typedef enum { zxdh_cap_disable, + zxdh_cap_enable } zxdh_cap_status; + +int32_t zxdh_tools_ioctl_dispatcher(struct net_device *netdev, + struct ifreq *ifr); +ssize_t pkt_packet_to_file(struct zxdh_en_device *en_dev, const char *data, + size_t len); +uint8_t pkt_packet_process(struct zxdh_en_device *en_dev, void *buf, + uint32_t len, uint8_t pkt_flag); +uint8_t pkt_skb_packet_process(struct zxdh_en_device *en_dev, + struct sk_buff *skb, uint8_t pkt_flag); +void capture_save_file_work_handler(struct work_struct *work); +void close_log_file(struct file *filp); +#define DHTOOLS_LOG_ERR(fmt, arg...) DH_LOG_ERR(MODULE_DHTOOLS, fmt, ##arg); +#define DHTOOLS_LOG_INFO(fmt, arg...) DH_LOG_INFO(MODULE_DHTOOLS, fmt, ##arg); + +#endif diff --git a/drivers/net/ethernet/dinghai/zxdh_tools/zxdh_tools_netlink.c b/drivers/net/ethernet/dinghai/zxdh_tools/zxdh_tools_netlink.c new file mode 100644 index 000000000000..99535fa67ab5 --- /dev/null +++ b/drivers/net/ethernet/dinghai/zxdh_tools/zxdh_tools_netlink.c @@ -0,0 +1,259 @@ +#include +#include +#include +#include "../en_aux.h" +#include "zxdh_tools_ioctl.h" +#include "zxdh_tools_netlink.h" + +int32_t zxdh_tools_genl_recv_doit(struct sk_buff *skb, struct genl_info *info); +/* operation definition */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(5, 10, 0)) +static struct nla_policy zxdh_tools_genl_policy[ZXDH_TOOLS_A_MAX + 1] = { + [ZXDH_TOOLS_A_MSG] = { .type = NLA_NUL_STRING }, +}; +#endif + +struct genl_ops zxdh_tools_gnl_ops[] = { { + .cmd = ZXDH_TOOLS_C_ECHO, + .flags = 0, +#if (LINUX_VERSION_CODE > KERNEL_VERSION(5, 10, 0)) + .policy = zxdh_tools_genl_policy, +#endif + .doit = zxdh_tools_genl_recv_doit, + .dumpit = NULL, +} }; + +static struct genl_family zxdh_tools_msg_family = { + .hdrsize = 0, + .name = ZXDH_TOOLS_NETLINK_NAME, + .version = 1, + .maxattr = ZXDH_TOOLS_A_MAX, + .ops = zxdh_tools_gnl_ops, + .n_ops = 1, +}; + +/* +* genl_msg_prepare_usr_msg : 构建netlink及gennetlink首部 +* @cmd : genl_ops的cmd +* @size : gen_netlink用户数据的长度(包括用户定义的首部) +*/ +int32_t zxdh_tools_genl_msg_prepare_usr_msg(u8 cmd, size_t size, uint32_t pid, + struct sk_buff **skbp) +{ + void *ptr = NULL; + struct sk_buff *skb; + //DHTOOLS_LOG_INFO("is called!\n"); + /* create a new netlink msg */ + skb = genlmsg_new(size, GFP_KERNEL); + if (skb == NULL) { + DHTOOLS_LOG_ERR("genlmsg_new failed!!!\n"); + return -1; + } + /* Add a new netlink message to an skb */ + ptr = genlmsg_put(skb, pid, 0, &zxdh_tools_msg_family, 0, cmd); + if (ptr == NULL) { + DHTOOLS_LOG_ERR("genlmsg_put failed!!!\n"); + return -1; + } + *skbp = skb; + return 0; +} + +/* +* 添加用户数据,及添加一个netlink addribute +*@type : nlattr的type +*@len : nlattr中的len +*@data : 用户数据 +*/ +int32_t zxdh_tools_genl_msg_mk_usr_msg(struct sk_buff *skb, int type, + void *data, int len) +{ + int ret = 0; + //DHTOOLS_LOG_INFO("is called!\n"); + /* add a netlink attribute to a socket buffer */ + ret = nla_put(skb, type, len, data); + if (ret != 0) { + DHTOOLS_LOG_ERR("nla_put failed, ret=%d!!!\n", ret); + return -1; + } + return 0; +} + +/** +* genl_msg_send_to_user - 通过generic netlink发送数据到netlink +* +* @data: 发送数据缓存 +* @len: 数据长度 单位:byte +* @pid: 发送到的客户端pid +*/ +int32_t zxdh_tools_genl_msg_send_to_user(void *data, uint16_t len, uint32_t pid) +{ + struct sk_buff *skb; + uint16_t size; + int ret = 0; + + //DHTOOLS_LOG_INFO("is called!\n"); + ret = nla_total_size( + len); /* total length of attribute including padding */ + if (ret <= 0) { + DHTOOLS_LOG_ERR("nla_total_size failed, ret=%d!\n", ret); + return -1; + } + size = ret; + + ret = zxdh_tools_genl_msg_prepare_usr_msg(ZXDH_TOOLS_C_ECHO, size, pid, + &skb); + if (ret) { + DHTOOLS_LOG_ERR( + "zxdh_tools_genl_msg_prepare_usr_msg failed, ret=%d!!!\n", + ret); + return -1; + } + + ret = zxdh_tools_genl_msg_mk_usr_msg(skb, ZXDH_TOOLS_A_MSG, data, len); + if (ret) { + DHTOOLS_LOG_ERR( + "zxdh_tools_genl_msg_mk_usr_msg failed, ret=%d!!!\n", + ret); + kfree_skb(skb); + return -1; + } + + ret = genlmsg_unicast(&init_net, skb, pid); + if (ret != 0) { + struct task_struct *task = NULL; + task = pid_task(find_vpid(pid), PIDTYPE_PID); + if (!task) { + DHTOOLS_LOG_ERR("dhtool with pid %d has exited!\n", + pid); + } + DHTOOLS_LOG_ERR("genlmsg_unicast failed, ret=%d!!!\n", ret); + return -1; + } + //DHTOOLS_LOG_INFO("genlnetlink msg send to user success.\n"); + return 0; +} + +/* Started by AICoder, pid:t59ebl8546g627314a890b5e207c778ad317015e */ +extern struct dhtool_eventpid_devbdf_array + eventpid_devbdf_array[MAX_DHTOOL_PID_NUMS]; +int32_t dhtool_find_eventpid_of_devbdf(uint32_t dev_bdf, uint32_t *event_pid) +{ + int i = 0; + for (i = 0; i < ARRAY_SIZE(eventpid_devbdf_array); i++) { + if (eventpid_devbdf_array[i].is_valid) { + if (dev_bdf == eventpid_devbdf_array[i].dev_bdf) { + *event_pid = eventpid_devbdf_array[i].event_pid; + //DHTOOLS_LOG_INFO("found event_pid = %u of dev_bdf %d.\n", eventpid_devbdf_array[i].event_pid, dev_bdf); + return 0; + } + } + } + + DHTOOLS_LOG_ERR("can not found the event_pid of dev_bdf %d.\n", + dev_bdf); + return -1; +} +/* Ended by AICoder, pid:t59ebl8546g627314a890b5e207c778ad317015e */ + +/* Started by AICoder, pid:d8c3fca75fx34f614b080ba560ac35630222a5da */ +int32_t zxdh_tools_sendto_user_netlink(void *pay_load, uint16_t len, + void *reps_buffer, uint16_t *reps_len, + void *dev) +{ + // 获取设备信息 + struct zxdh_en_device *en_dev = (struct zxdh_en_device *)dev; + int32_t ret = 0; + uint32_t event_pid = 0; + struct pci_dev *pdev = NULL; + uint32_t domain_no = 0; + uint32_t bus_no = 0; + uint32_t device_no = 0; + uint32_t func_no = 0; + uint32_t dev_bdf = 0; + + // 检查设备是否为空 + if (en_dev == NULL) { + DHTOOLS_LOG_ERR("dev is NULL\n"); + return -1; + } + + if (en_dev->ops->get_coredev_type(en_dev->parent) == DH_COREDEV_VF) { + return 0; + } + + // 检查负载和长度是否有效 + if ((pay_load == NULL) || (len == 0)) { + DHTOOLS_LOG_ERR("invalid para, pay_load = 0x%llx, len = %d\n", + (uint64_t)pay_load, len); + return -1; + } + + // 获取PCI设备 + pdev = en_dev->ops->get_pdev(en_dev->parent); + if (!pdev) { + DHTOOLS_LOG_ERR("pdev is NULL\n"); + return -1; + } + + // 解析PCI设备名称以获取域号、总线号、设备号和功能号 + ret = sscanf(pci_name(pdev), "%x:%x:%x.%u", &domain_no, &bus_no, + &device_no, &func_no); + if (ret != 4) { + DHTOOLS_LOG_ERR( + "could not get dev domain_no、bus_no、device_no、func_no from pci_name(pdev)\n"); + return -1; + } + + // 计算设备的BDF(基地址寄存器) + dev_bdf = DBDF_ECAM(domain_no, bus_no, device_no, func_no); + + // 查找事件PID + ret = dhtool_find_eventpid_of_devbdf(dev_bdf, &event_pid); + if (ret != 0) { + return -1; + } + + // 特殊处理操作码为 EVENT_OP_CODE_LOG_GET_FINISH_TO_H 的情况 + if ((*(uint32_t *)pay_load) == EVENT_OP_CODE_LOG_GET_FINISH_TO_H) { + //DHTOOLS_LOG_INFO("*(uint32_t *)pay_load(op_code)=%d\n", *(uint32_t *)pay_load); + } + + // 发送消息到用户空间 + ret = zxdh_tools_genl_msg_send_to_user(pay_load, len, event_pid); + if (ret) { + DHTOOLS_LOG_ERR( + "zxdh_tools_genl_msg_send_to_user failed, ret=%d!!!\n", + ret); + return -1; + } + + return 0; +} +/* Ended by AICoder, pid:d8c3fca75fx34f614b080ba560ac35630222a5da */ + +/* doit函数*/ +int32_t zxdh_tools_genl_recv_doit(struct sk_buff *skb, struct genl_info *info) +{ + DHTOOLS_LOG_INFO("is called!\n"); + return 0; +} + +int32_t zxdh_tools_netlink_register(void) +{ + int ret = 0; + ret = genl_register_family(&zxdh_tools_msg_family); + if (ret) { + DHTOOLS_LOG_ERR( + "zxdh_tools_netlink_family register failed, ret=%d!!!\n", + ret); + return -1; + } + + return 0; +} + +void zxdh_tools_netlink_unregister(void) +{ + genl_unregister_family(&zxdh_tools_msg_family); +} diff --git a/drivers/net/ethernet/dinghai/zxdh_tools/zxdh_tools_netlink.h b/drivers/net/ethernet/dinghai/zxdh_tools/zxdh_tools_netlink.h new file mode 100644 index 000000000000..a5cec83ceebc --- /dev/null +++ b/drivers/net/ethernet/dinghai/zxdh_tools/zxdh_tools_netlink.h @@ -0,0 +1,62 @@ +#ifndef ZXDH_TOOLS_NETLINK_H_ +#define ZXDH_TOOLS_NETLINK_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define NLA_DATA(na) ((void *)((char *)(na) + NLA_HDRLEN)) +#define ZXDH_TOOLS_NETLINK_NAME "tools_family" + +typedef enum { + EVENT_OP_CODE_DEV_PCIEID_TO_H = 0, + EVENT_OP_CODE_LOG_GET_TO_H = 1, + EVENT_OP_CODE_DIAG_TO_H = 2, + EVENT_OP_CODE_STAT_TO_H = 3, + EVENT_OP_CODE_REGSDUMP_TO_H = 4, + EVENT_OP_CODE_REGSMEM_TO_H = 5, + EVENT_OP_CODE_SN_MAC_SEND_TO_H = 6, + EVENT_OP_CODE_FWUPDATE_TO_H = 7, + EVENT_OP_CODE_DINGHAI_RESET_TO_H = 8, + EVENT_OP_CODE_FPUT_TO_H = 10, + EVENT_OP_CODE_LOG_GET_FINISH_TO_H = 11, + EVENT_OP_CODE_FPUT_FLASH_TO_H = 14, + EVENT_OP_CODE_NUM_TO_H = 100, +} EVENT_OP_CODE_TO_H; + +/* 属性类型*/ +enum { + ZXDH_TOOLS_A_UNSPEC, + ZXDH_TOOLS_A_MSG, + __ZXDH_TOOLS_A_MAX, +}; +#define ZXDH_TOOLS_A_MAX (__ZXDH_TOOLS_A_MAX - 1) + +/* 操作码*/ +enum { + ZXDH_TOOLS_C_UNSPEC, + ZXDH_TOOLS_C_ECHO, + __ZXDH_TOOLS_C_ECHO, +}; +#define ZXDH_TOOLS_C_MAX (__ZXDH_TOOLS_C_MAX - 1) + +typedef enum { + FWUPDATE = 27, +} event_op_code; + +struct zxdh_tools_recv_msg { + event_op_code op_code; + uint8_t status; +}; + +int32_t zxdh_tools_sendto_user_netlink(void *pay_load, uint16_t len, + void *reps_buffer, uint16_t *reps_len, + void *dev); +int zxdh_tools_netlink_register(void); +void zxdh_tools_netlink_unregister(void); + +#ifdef __cplusplus +} +#endif + +#endif /* ZXDH_TOOLS_NETLINK_H_ */ diff --git a/include/linux/dinghai/device.h b/include/linux/dinghai/device.h new file mode 100755 index 000000000000..d8e108da2117 --- /dev/null +++ b/include/linux/dinghai/device.h @@ -0,0 +1,183 @@ +#ifndef DINGHAI_DEVICE_H +#define DINGHAI_DEVICE_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/* helper macros */ +#define __dh_nullp(typ) ((struct dh_ifc_##typ##_bits *)0) +#define __dh_bit_sz(typ, fld) sizeof(__dh_nullp(typ)->fld) +#define __dh_bit_off(typ, fld) (offsetof(struct dh_ifc_##typ##_bits, fld)) +#define __dh_16_off(typ, fld) (__dh_bit_off(typ, fld) / 16) +#define __dh_dw_off(typ, fld) (__dh_bit_off(typ, fld) / 32) +#define __dh_64_off(typ, fld) (__dh_bit_off(typ, fld) / 64) +#define __dh_16_bit_off(typ, fld) (16 - __dh_bit_sz(typ, fld) - (__dh_bit_off(typ, fld) & 0xf)) +#define __dh_dw_bit_off(typ, fld) (32 - __dh_bit_sz(typ, fld) - (__dh_bit_off(typ, fld) & 0x1f)) +#define __dh_mask(typ, fld) ((u32)((1ull << __dh_bit_sz(typ, fld)) - 1)) +#define __dh_dw_mask(typ, fld) (__dh_mask(typ, fld) << __dh_dw_bit_off(typ, fld)) +#define __dh_mask16(typ, fld) ((u16)((1ull << __dh_bit_sz(typ, fld)) - 1)) +#define __dh_16_mask(typ, fld) (__dh_mask16(typ, fld) << __dh_16_bit_off(typ, fld)) +#define __dh_st_sz_bits(typ) sizeof(struct dh_ifc_##typ##_bits) + +#define DH_FLD_SZ_BYTES(typ, fld) (__dh_bit_sz(typ, fld) / 8) +#define DH_ST_SZ_BYTES(typ) (sizeof(struct dh_ifc_##typ##_bits) / 8) +#define DH_ST_SZ_DW(typ) (sizeof(struct dh_ifc_##typ##_bits) / 32) +#define DH_ST_SZ_QW(typ) (sizeof(struct dh_ifc_##typ##_bits) / 64) +#define DH_UN_SZ_BYTES(typ) (sizeof(union dh_ifc_##typ##_bits) / 8) +#define DH_UN_SZ_DW(typ) (sizeof(union dh_ifc_##typ##_bits) / 32) +#define DH_BYTE_OFF(typ, fld) (__dh_bit_off(typ, fld) / 8) +#define DH_ADDR_OF(typ, p, fld) ((void *)((uint8_t *)(p) + DH_BYTE_OFF(typ, fld))) + +/* insert a value to a struct */ +#define DH_SET(typ, p, fld, v) do { \ + u32 _v = v; \ + BUILD_BUG_ON(__dh_st_sz_bits(typ) % 32); \ + *((__be32 *)(p) + __dh_dw_off(typ, fld)) = \ + cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __dh_dw_off(typ, fld))) & \ + (~__dh_dw_mask(typ, fld))) | (((_v) & __dh_mask(typ, fld)) \ + << __dh_dw_bit_off(typ, fld))); \ +} while (0) + +#define DH_ARRAY_SET(typ, p, fld, idx, v) do { \ + BUILD_BUG_ON(__dh_bit_off(typ, fld) % 32); \ + DH_SET(typ, p, fld[idx], v); \ +} while (0) + +#define DH_SET_TO_ONES(typ, p, fld) do { \ + BUILD_BUG_ON(__dh_st_sz_bits(typ) % 32); \ + *((__be32 *)(p) + __dh_dw_off(typ, fld)) = \ + cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __dh_dw_off(typ, fld))) & \ + (~__dh_dw_mask(typ, fld))) | ((__dh_mask(typ, fld)) \ + << __dh_dw_bit_off(typ, fld))); \ +} while (0) + +#define DH_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) + \ +__dh_dw_off(typ, fld))) >> __dh_dw_bit_off(typ, fld)) & \ +__dh_mask(typ, fld)) + +#define DH_GET_PR(typ, p, fld) ({ \ + u32 ___t = DH_GET(typ, p, fld); \ + pr_debug(#fld " = 0x%x\n", ___t); \ + ___t; \ +}) + +#define __DH_SET64(typ, p, fld, v) do { \ + BUILD_BUG_ON(__dh_bit_sz(typ, fld) != 64); \ + *((__be64 *)(p) + __dh_64_off(typ, fld)) = cpu_to_be64(v); \ +} while (0) + +#define DH_SET64(typ, p, fld, v) do { \ + BUILD_BUG_ON(__dh_bit_off(typ, fld) % 64); \ + __DH_SET64(typ, p, fld, v); \ +} while (0) + +#define DH_ARRAY_SET64(typ, p, fld, idx, v) do { \ + BUILD_BUG_ON(__dh_bit_off(typ, fld) % 64); \ + __DH_SET64(typ, p, fld[idx], v); \ +} while (0) + +#define DH_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __dh_64_off(typ, fld))) + +#define DH_GET64_PR(typ, p, fld) ({ \ + u64 ___t = DH_GET64(typ, p, fld); \ + pr_debug(#fld " = 0x%llx\n", ___t); \ + ___t; \ +}) + +#define DH_GET16(typ, p, fld) ((be16_to_cpu(*((__be16 *)(p) + \ +__dh_16_off(typ, fld))) >> __dh_16_bit_off(typ, fld)) & \ +__dh_mask16(typ, fld)) + +#define DH_SET16(typ, p, fld, v) do { \ + u16 _v = v; \ + BUILD_BUG_ON(__dh_st_sz_bits(typ) % 16); \ + *((__be16 *)(p) + __dh_16_off(typ, fld)) = \ + cpu_to_be16((be16_to_cpu(*((__be16 *)(p) + __dh_16_off(typ, fld))) & \ + (~__dh_16_mask(typ, fld))) | (((_v) & __dh_mask16(typ, fld)) \ + << __dh_16_bit_off(typ, fld))); \ +} while (0) + +/* Big endian getters */ +#define DH_GET64_BE(typ, p, fld) (*((__be64 *)(p) + \ + __dh_64_off(typ, fld))) + +#define DH_GET_BE(type_t, typ, p, fld) ({ \ + type_t tmp; \ + switch (sizeof(tmp)) { \ + case sizeof(u8): \ + tmp = (__force type_t)DH_GET(typ, p, fld); \ + break; \ + case sizeof(u16): \ + tmp = (__force type_t)cpu_to_be16(DH_GET(typ, p, fld)); \ + break; \ + case sizeof(u32): \ + tmp = (__force type_t)cpu_to_be32(DH_GET(typ, p, fld)); \ + break; \ + case sizeof(u64): \ + tmp = (__force type_t)DH_GET64_BE(typ, p, fld); \ + break; \ + } \ + tmp; \ + }) + +enum dh_cap_type { + DH_CAP_GENERAL = 0, +}; +/* GET Dev Caps macros */ +#define DH_CAP_GEN(mdev, cap) \ + DH_GET(cmd_hca_cap, mdev->caps.hca[DH_CAP_GENERAL]->cur, cap) + +#define DH_CAP_GEN_64(mdev, cap) \ + DH_GET64(cmd_hca_cap, mdev->caps.hca[DH_CAP_GENERAL]->cur, cap) + +#define DH_CAP_GEN_MAX(mdev, cap) \ + DH_GET(cmd_hca_cap, mdev->caps.hca[DH_CAP_GENERAL]->max, cap) + + + + +enum dh_event_queue_type { + DH_EVENT_QUEUE_TYPE_SAMPLE, + DH_EVENT_QUEUE_TYPE_RISCV +}; + +struct dh_mpf_priv { + +}; + +enum dh_health_status { + IDLE, + HANDLING, + PENDING, + DONE +}; + +#define FW_VERISON_SIZE (32) +struct health_buffer { + uint32_t synd; + uint32_t health_counter; + uint8_t status; + uint8_t rfr; + uint8_t fw_exception; + uint8_t riscv_power_on; + uint8_t fw_version[FW_VERISON_SIZE]; + uint8_t pf_status[5]; + uint8_t health_version; + uint8_t rsv1[30]; +}; + +struct core_health { + struct health_buffer __iomem *hb; + uint32_t prev; + uint32_t miss_counter; + uint32_t synd; +}; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/linux/dinghai/devlink.h b/include/linux/dinghai/devlink.h new file mode 100755 index 000000000000..7129c6a21ab6 --- /dev/null +++ b/include/linux/dinghai/devlink.h @@ -0,0 +1,28 @@ +#ifndef __ZXDH_DEVLINK_H__ +#define __ZXDH_DEVLINK_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +struct devlink *zxdh_devlink_alloc(struct device *dev, struct devlink_ops *dh_devlink_ops, size_t priv_size); +void zxdh_devlink_free(struct devlink *devlink); + +int32_t zxdh_devlink_register(struct devlink *devlink); + +void zxdh_devlink_unregister(struct devlink *devlink); + +static inline struct net *dh_core_net(struct dh_core_dev *dev) +{ + return devlink_net(priv_to_devlink(dev)); +} + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/include/linux/dinghai/dh_cmd.h b/include/linux/dinghai/dh_cmd.h new file mode 100644 index 000000000000..21ef9be7d74d --- /dev/null +++ b/include/linux/dinghai/dh_cmd.h @@ -0,0 +1,316 @@ +#ifndef _ZXDH_MSG_CHAN_PUB_H_ +#define _ZXDH_MSG_CHAN_PUB_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + + +struct zxdh_bar_extra_para +{ + bool is_sync; /* 是否同步消息 */ + uint16_t retrycnt; /* bar消息重传次数 */ +}__attribute__ ((packed)); + + +#define INVALID_NUM 0xff +#define ZXDH_MPF_PCIEID 0x800 +#define ZXDH_NET_ACK_OK 0 +#define RISCV_MAC_OK 0xaa +#define BAR_MSG_REPS_OK 0xff +#define COMMON_TBL_OK 0xaa +#define RISCV_DEBUG_OK 0xaa +#define PCIEID_PF_ID_MASK (0x0700) +#define PCIEID_PF_ID_OFFSET (8) + +#define FIND_PF_PCIE_ID(value) ((value & 0xff00) | BIT(11)) +#define FIND_VF_PCIE_ID(pf_pcie_id, vf_id) ((pf_pcie_id & (~BIT(11))) | (vf_id)) +#define FIND_PF_ID(pf_pcie_id) ((pf_pcie_id & PCIEID_PF_ID_MASK) >> PCIEID_PF_ID_OFFSET) +#define GET_FUNC_NO(pf_no, vf_idx) ((pf_no & 0xF) | ((vf_idx & 0xFF) << 8)) + +#define PFVF_FLAG_OFFSET 11 // 用于判断pcie_id的第11位为0或1 + +typedef enum +{ + MODULE_DBG = 0, /* 中断测试*/ + MODULE_TBL, /* 资源表交互*/ + MODULE_MSIX, /* Msix配置*/ + MODULE_SDA, /* Sda消息*/ + MODULE_RDMA, /* Rdma调试*/ + MODULE_DEMO, /* 通路测试*/ + MODULE_SMMU, /* Smmu调试*/ + MODULE_MAC, /* MAC相关*/ + MODULE_VDPA, /* vdpa热迁移*/ + MODULE_VQM, /* vqm消息*/ + MODULE_MSGQ, + MODULE_VPORT_GET, /* 获取vport接口*/ + MODULE_BDF_GET, /* 获取bdf接口*/ + MODULE_RISC_READY, /* risc ready信号*/ + MODULE_REVERSE, /* 字节流取反*/ + MODULE_NVME, /* NVME调试*/ + MODULE_NPSDK, /* Np配表*/ + MODULE_TOD, /* UART通信*/ + MODULE_VF_BAR_MSG_TO_PF, /* VF发送给PF的消息 */ + MODULE_PF_BAR_MSG_TO_VF, /* PF发送给VF的消息 */ + MODULE_DEBUG = 20, /* 调用debug接口 */ + MODULE_PPS = 23, /*pps中断相关消息*/ + MODULE_VIRTIO = 25, + MODULE_FLASH = 32, /* 读取flash信息 */ + MODULE_OFFSET_GET = 33, + MODULE_CFG_MAC = 34, + MODULE_CFG_VQM = 36, + MODULE_PHYPORT_QUERY = 37, /* BOND 获取phyport*/ + MODULE_DHTOOL = 39, /*dhtool,riscv发送给AUX层的消息,将其转发给user*/ + MODULE_RESET_MSG = 40, /* host复位消息的ID号 */ + MODULE_PF_TIMER_TO_RISC_MSG = 41, /* server rsicv time */ + MODULE_LOGIN_CTRL = 43, /* 控制riscv上sshd守护进程开启和关闭*/ + MODULE_PCIE_RES_QUERY = 52, /* pcie资源查询*/ + MODULE_DTP = 53, + MODULE_MPF_PCIE_INFO = 54, /* pcie资源查询*/ + MODULE_HEALTH = 55, /* 自愈相关 */ + MODULE_VQMB = 59, /* riscv->host */ + MSG_MODULE_NUM = 60, +} MSG_MODULE_ID; + + +/* 消息端*/ +typedef enum BAR_DRIVER_TYPE +{ + MSG_CHAN_END_MPF = 0, + MSG_CHAN_END_PF, + MSG_CHAN_END_VF, + MSG_CHAN_END_RISC, + MSG_CHAN_END_ERR, +} BAR_DRIVER_TYPE; + +#define BDF_ECAM(bus, devid, func) ((bus & 0xff) << 8) | (func & 0x07) | ((devid & 0x1f) << 3) +#define SBDF_ECAM(domain, bus, devid, func) ((domain & 0xffff) << 16) | \ + ((bus & 0xff) << 8) | (func & 0x07) | ((devid & 0x1f) << 3) + +/* bar通道错误码*/ +typedef enum BAR_MSG_RTN +{ + BAR_MSG_OK = 0, + BAR_MSG_ERR_NULL, /* 空指针*/ + BAR_MSG_ERR_TYPE, /* 消息类型异常 */ + BAR_MSG_ERR_MODULE , /* 模块ID异常 */ + BAR_MSG_ERR_BODY_NULL, /* 消息体异常 */ + BAR_MSG_ERR_LEN, /* 消息长度异常 */ + BAR_MSG_ERR_TIME_OUT, /* 消息发送超长 */ + BAR_MSG_ERR_NOT_READY, /* 消息发送条件异常,BUF不可以用*/ + BAR_MEG_ERR_NULL_FUNC, /* 空的接收处理函数指针*/ + BAR_MSG_ERR_REPEAT_REGISTER, /* 模块重复注册*/ + BAR_MSG_ERR_UNGISTER, /* 重复解注册*/ + BAR_MSG_ERR_NULL_PARA, /* 发送接口参数界结构体指针为空*/ + BAR_MSG_ERR_REPSBUFF_LEN, /* reps_buff的长度太短*/ + BAR_MSG_ERR_MODULE_NOEXIST, /*查找不到该模块对应的消息处理函数*/ + BAR_MSG_ERR_VIRTADDR_NULL, /*发送接口传入参数中的虚拟地址为空*/ + BAR_MSG_ERR_REPLY, /**< seq_id匹配失败>**/ + BAR_MSG_ERR_MSGID, /**< seq_id申请失败>**/ + BAR_MSG_ERR_MPF_NOT_SCANED, /**< MPF通道不可用>**/ + BAR_MSG_ERR_USR_RET_ERR, /**< 处理函数返回值错误>**/ + BAR_MSG_ERR_ERR_PCIEID, /**< pcieID错误>**/ + BAR_MSG_ERR_LOCK_FAILED, /**获取硬件锁失败**/ + BAR_MSG_ERR_BAR_ABNORMAL, /**bar全ff**/ + BAR_MSG_ERR_NOT_MATCH, +} BAR_MSG_RTN; + +enum pciebar_layout_type +{ + URI_VQM = 0, + URI_SPINLOCK = 1, + URI_FWCAP = 2, + URI_FWSHR = 3, + URI_DRS_SEC = 4, + URI_RSV = 5, + URI_CTRLCH = 6, + URI_1588 = 7, + URI_QBV = 8, + URI_MACPCS = 9, + URI_RDMA = 10, +/* DEBUG PF */ + URI_MNP = 11, + URI_MSPM = 12, + URI_MVQM = 13, + URI_MDPI = 14, + URI_NP = 15, +/* END DEBUG PF */ + URI_MAX, +}; + +typedef enum +{ + BAR_MSG_MSIX_FROM_VF = 0, + BAR_MSG_MSIX_FROM_MPF, + BAR_MSG_MSIX_FROM_RISCV, + BAR_MSG_MSIX_NUM_MAX +} bar_msg_msix_irq_type; + +/* msix消息参数结构体*/ +struct msix_para +{ + uint16_t vector_risc; + uint16_t vector_pfvf; + uint16_t vector_mpf; + uint16_t driver_type; + uint16_t pcie_id; + struct pci_dev *pdev; + uint64_t virt_addr; +}; + +struct bar_offset_params +{ + uint64_t virt_addr; + uint16_t pcie_id; + uint16_t type; +}; +struct bar_offset_res +{ + uint32_t bar_offset; + uint32_t bar_length; +}; + +struct zxdh_pci_bar_msg +{ + uint64_t virt_addr; /**< 4k空间地址, 若src为MPF该参数不生效>**/ + void *payload_addr; /**< 消息净荷地址>**/ + uint16_t payload_len; /**< 消息净荷长度>**/ + uint16_t emec; /**< 消息紧急类型>**/ + uint16_t src; /**< 消息发送源,参考BAR_DRIVER_TYPE>**/ + uint16_t dst; /**< 消息接收者,参考BAR_DRIVER_TYPE>**/ + uint32_t event_id; /**< 事件id>**/ + uint16_t src_pcieid; /**< 源 pcie_id>**/ + uint16_t dst_pcieid; /**< 目的pcie_id>**/ +}; + +struct link_info_struct +{ + uint32_t speed; + uint32_t autoneg_enable; + uint32_t supported_speed_modes; + uint32_t advertising_speed_modes; + uint8_t duplex; +}; + +struct zxdh_msg_recviver_mem +{ + void *recv_buffer; /**< 消息接收缓存>**/ + uint16_t buffer_len; /**< 消息缓存长度>**/ +}; + +/** + * zxdh_bar_chan_msg_recv_callback - 消息处理函数 + * @pay_load: 消息内容 + * @len: 消息长度 + * @reps_buffer: 回复消息 + * @reps_len: 回复消息长度 + * @dev: 私有数据 + */ +typedef int (*zxdh_bar_chan_msg_recv_callback)(void *pay_load, uint16_t len, void *reps_buffer, uint16_t *reps_len, void *dev); + +/** + * zxdh_bar_chan_sync_msg_send - 通过PCIE BAR空间发送同步消息 + * @in: 消息发送信息 + * @result: 消息结果反馈 + * @return: 0 成功,其他失败 + */ +int zxdh_bar_chan_sync_msg_send(struct zxdh_pci_bar_msg *in, struct zxdh_msg_recviver_mem *result); + +/** + * zxdh_bar_send_without_reps_hdr - 回复不包括四字节头 + * @in: 消息发送信息 + * @result: 消息结果反馈 + * @return: 0 成功,其他失败 + */ +int zxdh_bar_send_without_reps_hdr(struct zxdh_pci_bar_msg *in, struct zxdh_msg_recviver_mem *result); + +/** + * zxdh_bar_chan_msg_recv_register - PCIE BAR空间消息方式,注册消息接收回调 + * @event_id: 注册模块id + * @callback: 模块实现的接收处理函数指针 + * @return: 0 成功,其他失败 + * 在驱动初始化时调用 + */ +int zxdh_bar_chan_msg_recv_register(uint8_t event_id, zxdh_bar_chan_msg_recv_callback callback); + +/** + * zxdh_bar_chan_msg_recv_unregister - PCIE BAR空间消息方式,解注册消息接收回调 + * @event_id: 内核PCIE设备地址 + * @return:0 成功,其他失败 + * 在驱动卸载时需要调用 + */ +int zxdh_bar_chan_msg_recv_unregister(uint8_t event_id); + +/** + * zxdh_bar_callback_register_state - 查看某一个消息处理函数状态 + * @event_id: 事件id + * @return:0 表示已经注册,其他表示未注册 + * 在驱动卸载时需要调用 + */ +int zxdh_bar_callback_register_state(uint16_t event_id); + +/** + * zxdh_bar_enable_chan - 驱动使能通道函数 + * @_msix_para: msix中断配置信息 + * @pcie_id: 查询到的pcie_id + * @vport: 查询到的vport + * @return: 0 成功,其他失败 + */ +int zxdh_bar_enable_chan(struct msix_para *_msix_para, uint16_t *vport); + +/** + * zxdh_get_bar_offset - 获取指定模块的偏移值 + * @bar_offset_params: 输入参数 + * @bar_offset_res: 模块偏移和长度 + */ +int zxdh_get_bar_offset(struct bar_offset_params *paras, struct bar_offset_res *res); + +int32_t zxdh_send_command(uint64_t vaddr, uint16_t pcie_id, uint16_t module_id, \ + void *msg, void *ack ,bool is_sync_msg); + +int zxdh_bar_msg_chan_init(void); +int zxdh_bar_msg_chan_remove(void); + +/** + * zxdh_bar_reset_valid - 重置risc发来消息的valid + * @subchan_addr: 4k首地址 + * @return: 0 成功,其他失败 + */ +void zxdh_bar_reset_valid(uint64_t subchan_addr); + +/** + * zxdh_get_event_id - 获取risc发来消息的event_id + * @subchan_addr: 4k首地址 + * @return: event_id + */ +uint16_t zxdh_get_event_id(uint64_t subchan_addr, uint8_t src_type, uint8_t dst_type); + +int zxdh_bar_irq_recv(uint8_t src, uint8_t dst, uint64_t virt_addr, void *dev); +int32_t call_msg_recv_func_tbl(uint16_t event_id, void *pay_load, uint16_t len, void *reps_buffer, uint16_t *reps_len, void *dev); + +/** + * bar_chan_pf_init_spinlock - 自定义网络驱动清理硬件锁 + * @pcie_id: 设备pcie_id + * @bar_base_addr: 设备bar0虚拟基地址 + */ +int bar_chan_pf_init_spinlock(uint16_t pcie_id, uint64_t bar_base_addr); + + +typedef int (*zxdh_usr_msg_cache_callback)(uint16_t event_id, void *msg, uint16_t msg_len); +/** + * zxdh_usr_msg_cache_func_register - 消息缓存函数注册接口 + * zxdh-zf-mpf调用, 当接收接口接收到用户态消息时, 调用钩子进行消息缓存 + * @msg_cache_func: 消息缓存函数, 参考格式msg_cache_func + */ +void zxdh_usr_msg_cache_func_register(zxdh_usr_msg_cache_callback func); + +int32_t zxdh_vqm_queue_cfg(uint64_t virt_addr, uint16_t pcie_id, uint32_t phy_queue_idx); + +#ifdef __cplusplus +} +#endif + +#endif /* _ZXDH_MSG_CHAN_PUB_H_ */ diff --git a/include/linux/dinghai/dh_ifc.h b/include/linux/dinghai/dh_ifc.h new file mode 100755 index 000000000000..e5d9086aed78 --- /dev/null +++ b/include/linux/dinghai/dh_ifc.h @@ -0,0 +1,51 @@ +#ifndef DINGHAI_IFC_H +#define DINGHAI_IFC_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +struct dh_ifc_cmd_hca_cap_bits { + +}; +union dh_ifc_hca_cap_union_bits{ + +}; + +struct dh_ifc_mbox_in_bits { + uint8_t opcode[0x10]; + uint8_t uid[0x10]; + + uint8_t reserved_at_20[0x10]; + uint8_t op_mod[0x10]; + + uint8_t reserved_at_40[0x40]; +}; + +struct dh_ifc_mbox_out_bits { + uint8_t status[0x8]; + uint8_t reserved_at_8[0x18]; + + uint8_t syndrome[0x20]; + + uint8_t reserved_at_40[0x40]; +}; + +struct dh_ifc_query_hca_cap_out_bits { + uint8_t status[0x8]; + uint8_t reserved_at_8[0x18]; + + uint8_t syndrome[0x20]; + + uint8_t reserved_at_40[0x40]; + + union dh_ifc_hca_cap_union_bits capability; +}; + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/include/linux/dinghai/dinghai_irq.h b/include/linux/dinghai/dinghai_irq.h new file mode 100755 index 000000000000..17fc37e81dda --- /dev/null +++ b/include/linux/dinghai/dinghai_irq.h @@ -0,0 +1,35 @@ +#ifndef __DINGHAI_IRQ_H__ +#define __DINGHAI_IRQ_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +#define DH_COMP_EQS_PER_SF 8 + +struct dh_irq; + +int32_t dh_irq_table_init(struct dh_core_dev *dev); +void dh_irq_table_cleanup(struct dh_core_dev *dev); +int32_t dh_irq_table_create(struct dh_core_dev *dev); +void dh_irq_table_destroy(struct dh_core_dev *dev); +void dh_irqs_release_vectors(struct dh_irq **irqs, int32_t nirqs); +int32_t dh_irq_attach_nb(struct dh_irq *irq, struct notifier_block *nb); +int32_t dh_irq_detach_nb(struct dh_irq *irq, struct notifier_block *nb); +struct cpumask *dh_irq_get_affinity_mask(struct dh_irq *irq); +int32_t dh_irq_get_index(struct dh_irq *irq); + +struct dh_irq_pool; + +int32_t dh_irq_affinity_irqs_request_auto(struct dh_irq_pool *pool, struct dh_irq **irqs, int32_t num_irqs, int numa); +struct dh_irq *dh_irq_affinity_request(struct dh_irq_pool *pool, const struct cpumask *req_mask); +void dh_irq_affinity_irqs_release(struct dh_irq_pool *pool, struct dh_irq **irqs, int32_t num_irqs); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/linux/dinghai/driver.h b/include/linux/dinghai/driver.h new file mode 100755 index 000000000000..1bde47f331d1 --- /dev/null +++ b/include/linux/dinghai/driver.h @@ -0,0 +1,416 @@ +#ifndef __DINGHAI_DRIVER_H__ +#define __DINGHAI_DRIVER_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include +#include + +#define ZXDH_MODULE_ID 1 +#define ZXDH_MAJOR 1 //新固件与新驱动同时不兼容旧版本时+1 +#define ZXDH_FW_MINOR 0 //新固件兼容旧驱动,新驱动不兼容旧固件时+1 +#define ZXDH_DRV_MINOR 0 //新固件不兼容旧驱动,新驱动兼容旧固件时+1 +#define ZXDH_PATCH 0 + +#define DH_NEW_QUEEU_ALLOC_PATCH (3) +#define DH_HPIRQ_PATCH (4) + +#define ZXDH_CHECK_RET_RETURN(ret, fmt, arg...) \ +do \ +{ \ + if ((ret) != 0) \ + { \ + LOG_ERR(fmt, ##arg); \ + return (ret); \ + } \ +} while (0) + +#define ZXDH_CHECK_RET_GOTO_ERR(ret, err, fmt, arg...) \ +do \ +{ \ + if ((ret) != 0) \ + { \ + LOG_ERR(fmt, ##arg); \ + goto err; \ + } \ +} while (0) + +struct dh_irq_table { + void *priv; +}; + +/*core dev*/ +enum dh_coredev_type { + DH_COREDEV_PF, + DH_COREDEV_VF, + DH_COREDEV_SF, + DH_COREDEV_MPF +}; + +enum { + ZXDH_DROP_NEW_HEALTH_WORK, + ZXDH_UNLOAD, + ZXDH_LOAD, + ZXDH_REMOVE, +}; + +typedef void (*zxdh_cmd_cbk_t)(int32_t status, void *context); + +struct dh_core_dev; + +struct dh_core_devlink_ops { + int32_t (*params_register)(struct devlink *devlink); + int32_t (*params_unregister)(struct devlink *devlink); +}; + +enum zxdh_device_state { + ZXDH_DEVICE_STATE_UNINITIALIZED, + ZXDH_DEVICE_STATE_UP, + ZXDH_DEVICE_STATE_INTERNAL_ERROR, + ZXDH_DEVICE_STATE_OPENED, +}; + +struct dh_core_dev { + struct device *device; /* pdev->dev or zxdh auxiliary device*/ + enum dh_coredev_type coredev_type; + enum zxdh_device_state device_state; + uint8_t driver_process; + uint8_t rsv; + struct pci_dev *pdev; /* parent pdev*/ + struct pcie_zf_ep *zf_ep; + struct dh_eq_table eq_table; + struct dh_irq_table irq_table; + struct dh_core_dev *parent; + struct dh_events *events; + int32_t numa_node; + struct devlink *devlink; + struct mutex lock; + struct dh_core_devlink_ops *devlink_ops; + char priv[] __aligned(32); +}; + +#define VF_MAX_UNICAST_MAC 128 +#define VF_MAX_MULTICAST_MAC 32 + +typedef struct +{ + uint8_t mac_addr[ETH_ALEN]; + uint8_t dhtool_mac_set_flag; /* 标识是否为dhtool配置mac */ +}k_mac_addr; + +typedef struct +{ + k_mac_addr unicast_mac[VF_MAX_UNICAST_MAC]; + k_mac_addr multicast_mac[VF_MAX_MULTICAST_MAC]; + uint32_t current_unicast_num; + uint32_t current_multicast_num; +}device_mac; + +typedef struct +{ + uint8_t mac_addr[ETH_ALEN]; +}tmp_mac; + +struct recover_mac +{ + tmp_mac umac[VF_MAX_UNICAST_MAC]; + tmp_mac mmac[VF_MAX_MULTICAST_MAC]; + uint32_t umac_num; + uint32_t mmac_num; +}; + +struct zxdh_vf_item { + uint8_t mac[6]; + uint16_t vlan; + uint8_t qos; + bool spoofchk; + bool trusted; + bool pf_set_mac; + bool link_forced; + bool link_up; + bool promisc; + bool mc_promisc; + bool enable; + uint32_t min_tx_rate; + uint32_t max_tx_rate; + uint16_t vf_rate_mode; + uint16_t vport; + bool is_probed; + device_mac vf_mac_info; + uint16_t vlan_proto; + void *init_np_stats; + struct mutex lock; +}; + +struct zxdh_en_vport_np_stats +{ + uint64_t rx_vport_unicast_packets; + uint64_t tx_vport_unicast_packets; + uint64_t rx_vport_unicast_bytes; + uint64_t tx_vport_unicast_bytes; + uint64_t rx_vport_multicast_packets; + uint64_t tx_vport_multicast_packets; + uint64_t rx_vport_multicast_bytes; + uint64_t tx_vport_multicast_bytes; + uint64_t rx_vport_broadcast_packets; + uint64_t tx_vport_broadcast_packets; + uint64_t rx_vport_broadcast_bytes; + uint64_t tx_vport_broadcast_bytes; + uint64_t rx_vport_mtu_drop_packets; + uint64_t tx_vport_mtu_drop_packets; + uint64_t rx_vport_mtu_drop_bytes; + uint64_t tx_vport_mtu_drop_bytes; + uint64_t rx_vport_plcr_drop_packets; + uint64_t tx_vport_plcr_drop_packets; + uint64_t rx_vport_plcr_drop_bytes; + uint64_t tx_vport_plcr_drop_bytes; + uint64_t tx_vport_ssvpc_packets; // switch security violation packet count, only for PF. + uint64_t rx_vport_idma_drop_packets; // port to np drop (idma point not enough). +}; + +static inline bool dh_core_is_pf(const struct dh_core_dev *dev) +{ + return dev->coredev_type == DH_COREDEV_PF; +} + +static inline bool dh_core_is_vf(const struct dh_core_dev *dev) +{ + return dev->coredev_type == DH_COREDEV_VF; +} + +static inline void *dh_core_priv(struct dh_core_dev *dh_coredev) +{ + BUG_ON(!dh_coredev); + return &dh_coredev->priv; +} + +int32_t zxdh_cmd_exec(struct dh_core_dev *dev, void *in, int32_t in_size, void *out, + int32_t out_size); + +#define zxdh_cmd_exec_inout(dev, ifc_cmd, in, out) \ + ({ \ + zxdh_cmd_exec(dev, in, DH_ST_SZ_BYTES(ifc_cmd##_in), out, \ + DH_ST_SZ_BYTES(ifc_cmd##_out)); \ + }) + +#define zxdh_cmd_exec_in(dev, ifc_cmd, in) \ + ({ \ + uint32_t _out[DH_ST_SZ_DW(ifc_cmd##_out)] = {}; \ + zxdh_cmd_exec_inout(dev, ifc_cmd, in, _out); \ + }) + +#define LOG_ERR(fmt, arg...) DH_LOG_ERR(MODULE_PF, fmt, ##arg); +#define LOG_INFO(fmt, arg...) DH_LOG_INFO(MODULE_PF, fmt, ##arg); +#define LOG_DEBUG(fmt, arg...) DH_LOG_DEBUG(MODULE_PF, fmt, ##arg); +#define LOG_WARN(fmt, arg...) DH_LOG_WARNING(MODULE_PF, fmt, ##arg); + +#define HEAL_ERR(fmt, arg...) DH_LOG_ERR(MODULE_HEAL, fmt, ##arg); +#define HEAL_INFO(fmt, arg...) DH_LOG_INFO(MODULE_HEAL, fmt, ##arg); +#define HEAL_DEBUG(fmt, arg...) DH_LOG_DEBUG(MODULE_HEAL, fmt, ##arg); + +#define ZXDH_EN_SF_NAME "zxdh_en" +#define ZXDH_EN_DEV_ID_NAME "en_aux" +#define ZXDH_PF_EN_SF_DEV_ID_NAME "pf_en_sf" +#define ZXDH_PF_NAME "dinghai10e" +#define ZXDH_MPF_EN_SF_DEV_ID_NAME "mpf_en_sf" +#define ZXDH_RDMA_DEV_NAME "rdma_aux" +#define ZXDH_SEC_DEV_NAME "sec_aux" + + +#define ZXDH_PF_BSI_VENDOR_ID 0x16c3 +#define ZXDH_PF_VENDOR_ID 0x1cf2 +#define ZXDH_PF_DEVICE_ID 0x8040 +#define ZXDH_VF_DEVICE_ID 0x8041 +#define ZXDH_INICA_BOND_DEVICE_ID 0x8045 +#define ZXDH_INICB_BOND_DEVICE_ID 0x8063 +#define ZXDH_INICC_BOND_DEVICE_ID 0x8066 +#define ZXDH_INICD_BOND0_DEVICE_ID 0x8075 +#define ZXDH_INICD_BOND1_DEVICE_ID 0x8078 +#define ZXDH_DPUA_BOND_DEVICE_ID 0x8047 +#define ZXDH_PF_DPUB_NOF_DEVICE_ID 0x804a +#define ZXDH_PF_DPUB_PF_DEVICE_ID 0x804b +#define ZXDH_PF_DPUB_INITIATOR1_DEVICE_ID 0x804c +#define ZXDH_PF_DPUB_INITIATOR2_DEVICE_ID 0x804d +#define ZXDH_PF_DPUB_RDMA_DEVICE_ID 0x806b +#define ZXDH_VF_DPUB_RDMA_DEVICE_ID 0x806c +#define ZXDH_PF_DPUB_SRIOV0_DEVICE_ID 0x8089 +#define ZXDH_PF_DPUB_SRIOV1_DEVICE_ID 0x808a + +#define ZXDH_PF_E310_DEVICE_ID 0x8061 +#define ZXDH_VF_E310_DEVICE_ID 0x8062 +#define ZXDH_PF_E310_CMCC_DEVICE_ID 0x80b0 +#define ZXDH_VF_E310_CMCC_DEVICE_ID 0x80b1 +#define ZXDH_PF_E312_DEVICE_ID 0x80a0 +#define ZXDH_VF_E312_DEVICE_ID 0x80a1 +#define ZXDH_UPF_PF_I512_DEVICE_ID 0x804e +#define ZXDH_UPF_VF_I512_DEVICE_ID 0x804f +#define ZXDH_INICA_RDMA_PF_DEVICE_ID 0x806d +#define ZXDH_INICA_RDMA_VF_DEVICE_ID 0x806e +#define ZXDH_INICA_UPF_BOND_DEVICE_ID 0x806f +#define ZXDH_PF_E316_DEVICE_ID 0x807e +#define ZXDH_VF_E316_DEVICE_ID 0x807f +#define ZXDH_PF_E311_DEVICE_ID 0x8080 +#define ZXDH_VF_E311_DEVICE_ID 0x8081 +/* Started by AICoder, pid:zd4a5r290f1c0c614f97090c20e5100a9f52dfca */ +#define ZXDH_PF_I511_DEVICE_ID 0x8082 +#define ZXDH_VF_I511_DEVICE_ID 0x8083 +/* Ended by AICoder, pid:zd4a5r290f1c0c614f97090c20e5100a9f52dfca */ +#define ZXDH_INICD_NE0_PF_DEVICE_ID 0x8076 +#define ZXDH_INICD_NE0_VF_DEVICE_ID 0x8077 +#define ZXDH_INICD_NE1_PF_DEVICE_ID 0x8079 +#define ZXDH_INICD_NE1_VF_DEVICE_ID 0x807A +/* Started by AICoder, pid:cb374346aeva52e1454b0bbf404b83093ba2d70f */ +#define ZXDH_INICD_NE2_PF_DEVICE_ID 0x807B +#define ZXDH_INICD_NE2_VF_DEVICE_ID 0x807C +/* Ended by AICoder, pid:cb374346aeva52e1454b0bbf404b83093ba2d70f */ + +/* e310 net rdma */ +#define ZXDH_PF_E310_RDMA_DEVICE_ID 0x8084 +#define ZXDH_VF_E310_RDMA_DEVICE_ID 0x8085 + +#define ZXDH_PF_E310S_DEVICE_ID 0x80b6 +#define ZXDH_VF_E310S_DEVICE_ID 0x80b7 + +#define ZXDH_PF_E312S_DEVICE_ID 0x807d +#define ZXDH_VF_E312S_DEVICE_ID 0x8088 + +#define ZXDH_PF_I510_SRIOV_SEC_DEVICE_ID 0x8086 +#define ZXDH_VF_I510_SRIOV_SEC_DEVICE_ID 0x8087 +/* e312 rdma */ +#define ZXDH_PF_E312_RDMA_DEVICE_ID 0x8049 +#define ZXDH_VF_E312_RDMA_DEVICE_ID 0x8060 +/* zxinic_i512_offload */ +#define ZXDH_PF_INICA_OFFLOAD_DEVICE_ID 0x80a4 + +/* B512Y-CTCZ100 */ +#define CTC_PF_B512Y_DEVICE_ID 0x1100 +#define CTC_VF_B512Y_DEVICE_ID 0x1101 + +/* B522Y-CTCZ100 */ +#define CTC_PF_B522Y_DEVICE_ID 0x1110 +#define CTC_VF_B522Y_DEVICE_ID 0x1111 + +/* CTC */ +#define CTC_PF_VENDOR_ID 0x1b18 + +/* XPU */ +#define ZXDH_PF_E316_XPU_DEVICE_ID 0x8601 +#define ZXDH_VF_E316_XPU_DEVICE_ID 0x8602 + +#define ZXDH_PF_E316_XPU_VENDER_ID 0x8848 + +#define ZXDH_PF_E312S_D_DEVICE_ID 0x80a2 +#define ZXDH_VF_E312S_D_DEVICE_ID 0x80a3 + +#define ZXDH_BAR_MSG_OFFSET 0x2000 +#define ZXDH_BAR_PFVF_MSG_OFFSET 0x1000 +#define ZXDH_BAR_MSG_BASE(vaddr) (ZXDH_BAR_MSG_OFFSET + vaddr) + +#define ZXDH_BAR_FWCAP(vaddr) (0x1000 + vaddr) +#define ZXDH_BOARD_TYPE (0X1) +#define ZXDH_PRODUCT_TYPE (0X2) +#define ZXDH_PANNEL_PORT_NUM (0X3) + +#define ZXDH_SWITCH_DEVICE_ID (0x8036) + +enum dh_board_type { + DH_DPUA,//x510 + DH_DPUB,//x512 + DH_INICA,//i512 + DH_INICB,//i510 + DH_STDA,//e312 e316 + DH_STDB,//e310 + DH_EVB_EP0, + DH_EVB_DPU, + DH_INICC,//i511 + DH_STDC,//e311 + DH_INICD,//vgcf + DH_STD_E312S, //e312s + DH_STD_E312S_D, //e312s_d + DH_STD_E310S, //e310s +}; + +#define IS_STD_BOARD(type) \ + ((type) == DH_STDA || (type) == DH_STDB || (type) == DH_STDC || (type) == DH_STD_E312S || \ + (type) == DH_STD_E312S_D || (type) == DH_STD_E310S) + +#define IS_INIC_BOARD(type) \ + ((type) == DH_INICA || (type) == DH_INICB || (type) == DH_INICC || \ + (type) == DH_DPUA || (type) == DH_DPUB) + +/* Warning: Must be modified together with firmware */ +enum +{ + ZXDH_PRODUCT_STD = 0, + ZXDH_PRODUCT_DPI = 1, + ZXDH_PRODUCT_NEO = 2, + ZXDH_PRODUCT_OVS = 3, + ZXDH_PRODUCT_EVB_EP0 = 4, + ZXDH_PRODUCT_EVB_EP0_EP4 = 5, +}; + +enum +{ + ZXDH_DEV_UNKNOW = 0, + ZXDH_DEV_UPF = 1, + ZXDH_DEV_NE0 = 2, + ZXDH_DEV_NE1 = 3, +}; + +#define ZXDH_VF_NUM_MAX 256 +#define ZXDH_PF_NUM_PER_EP 8 + +struct resource_range { + phys_addr_t base; + resource_size_t size; +}; + +struct dh_sf_dev { + struct zxdh_auxiliary_device adev; + struct dh_core_dev *parent_mdev; + struct dh_core_dev *mdev; + int32_t res_num; + struct resource_range *ranges; +}; + +struct zf_rbp_info +{ + bool host; /* true: host addr false: zf addr*/ + uint32_t pfid; /* bit7: 0-pf 1-vf */ + uint32_t vfid; + uint32_t epid; +}; + +struct zf_dma_addr_rbp +{ + dma_addr_t addr; /* src/dst addr */ + uint32_t flag; /* no support */ + struct zf_rbp_info rbp_info; +}; + +void zxdh_dev_list_lock(void); +void zxdh_dev_list_unlock(void); +int zxdh_dev_list_trylock(void); +int zxdh_health_init(struct dh_core_dev *dev); +int dh_pf_wait_riscv_ready(struct dh_core_dev *dh_dev); +void zxdh_health_cleanup(struct dh_core_dev *dev); +void zxdh_drain_health_wq(struct dh_core_dev *dev); + +enum { + act_health_info_show, + act_bbx_log_dump, + act_reset, + act_reload, +}; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/linux/dinghai/en_aux.h b/include/linux/dinghai/en_aux.h new file mode 100644 index 000000000000..ade219013782 --- /dev/null +++ b/include/linux/dinghai/en_aux.h @@ -0,0 +1,71 @@ +#ifndef _EN_ZUX_H_ +#define _EN_ZUX_H_ + +#ifdef __cplusplus +extern "C" { +#endif + + +struct eth_stats +{ + uint64_t rx_pkts; /* gorc */ + uint64_t rx_bytes; /* gorc */ + uint64_t rx_unicast; /* uprc */ + uint64_t rx_multicast; /* mprc */ + uint64_t rx_broadcast; /* bprc */ + uint64_t rx_discards; /* rdpc */ + uint64_t rx_errors; /* rupp */ + uint64_t tx_pkts; /* gorc */ + uint64_t tx_bytes; /* gotc */ + uint64_t tx_unicast; /* uptc */ + uint64_t tx_multicast; /* mptc */ + uint64_t tx_broadcast; /* bptc */ + uint64_t tx_discards; /* tdpc */ + uint64_t tx_errors; /* tepc */ + uint64_t rx_size_64; /* prc64 */ + uint64_t rx_size_127; /* prc127 */ + uint64_t rx_size_255; /* prc255 */ + uint64_t rx_size_511; /* prc511 */ + uint64_t rx_size_1023; /* prc1023 */ + uint64_t rx_size_1518; + uint64_t rx_size_1522; /* prc1522 */ + uint64_t rx_size_1548; + uint64_t rx_size_2047; + uint64_t rx_size_4095; + uint64_t rx_size_8191; + uint64_t rx_size_9215; + uint64_t rx_undersize; /* ruc */ + uint64_t rx_oversize; /* roc */ + uint64_t tx_size_64; /* ptc64 */ + uint64_t tx_size_127; /* ptc127 */ + uint64_t tx_size_255; /* ptc255 */ + uint64_t tx_size_511; /* ptc511 */ + uint64_t tx_size_1023; /* ptc1023 */ + uint64_t tx_size_1518; + uint64_t tx_size_1522; /* prc1522 */ + uint64_t tx_size_1548; + uint64_t tx_size_2047; + uint64_t tx_size_4095; + uint64_t tx_size_8191; + uint64_t tx_size_9215; + uint64_t tx_undersize; /* ruc */ + uint64_t tx_oversize; /* roc */ +}; + +struct zxdh_pf_eth_stats +{ + struct eth_stats eth_total_stat; + struct eth_stats mac0_stat; + struct eth_stats mac1_stat; + struct eth_stats mac2_stat; + struct eth_stats mac3_stat; +}; + +int32_t zxdh_en_driver_register(void); +void zxdh_en_driver_unregister(void); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/linux/dinghai/en_sf.h b/include/linux/dinghai/en_sf.h new file mode 100755 index 000000000000..c7a45e7610d9 --- /dev/null +++ b/include/linux/dinghai/en_sf.h @@ -0,0 +1,118 @@ +#ifndef __ZXDH_EN_SF_H__ +#define __ZXDH_EN_SF_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +struct zxdh_en_sf_if { + int (*en_sf_vq_irqs_request)(struct dh_core_dev *dh_dev, struct dh_irq **vq_irqs, int vq_channels, void *data); + void (*en_sf_affinity_irqs_release)(struct dh_core_dev *dh_dev, struct dh_irq **irqs, int32_t num_irqs); + void __iomem * (*en_sf_map_vq_notify)(struct dh_core_dev *dh_dev, uint32_t index, resource_size_t *pa); + void (*en_sf_unmap_vq_notify)(struct dh_core_dev *dh_dev, void *priv); + void (*en_sf_set_status)(struct dh_core_dev *dh_dev, uint8_t status); + uint8_t (*en_sf_get_status)(struct dh_core_dev *dh_dev); + uint8_t (*en_sf_get_cfg_gen)(struct dh_core_dev *dh_dev); + bool (*en_sf_get_rp_link_status)(struct dh_core_dev *dh_dev); + void (*en_sf_set_vf_mac)(struct dh_core_dev *dh_dev, uint8_t *mac, int32_t vf_id); + void (*en_sf_get_vf_mac)(struct dh_core_dev *dh_dev, uint8_t *mac, int32_t vf_id); + void (*en_sf_set_mac)(struct dh_core_dev *dh_dev, uint8_t *mac); + void (*en_sf_get_mac)(struct dh_core_dev *dh_dev, uint8_t *mac); + uint64_t (*en_sf_get_features)(struct dh_core_dev *dh_dev); + void (*en_sf_set_features)(struct dh_core_dev *dh_dev, uint64_t features); + void (*en_sf_set_queue_enable)(struct dh_core_dev *dh_dev, uint16_t index, bool enable); + uint16_t (*en_sf_get_channels_num)(struct dh_core_dev *dh_dev); + uint16_t (*en_sf_get_queue_num)(struct dh_core_dev *dh_dev); + uint16_t (*en_sf_get_queue_size)(struct dh_core_dev *dh_dev, uint16_t index); + uint16_t (*en_sf_get_queue_vector)(struct dh_core_dev *dh_dev, uint16_t channel, struct list_head *eqs_list, uint16_t queue_index, uint16_t vq_idx); + void (*en_sf_release_queue_vector)(struct dh_core_dev *dh_dev, int32_t queue_index); + void (*en_sf_set_queue_size)(struct dh_core_dev *dh_dev, uint32_t index, uint16_t size); + void (*en_sf_set_queue_address)(struct dh_core_dev *dh_dev, uint32_t index, uint64_t desc_addr, uint64_t driver_addr, uint64_t device_addr); + void (*en_sf_switch_irq)(struct dh_core_dev *dh_dev, int32_t i, int32_t op); + int32_t (*en_sf_get_vq_lock)(struct dh_core_dev *dh_dev); + int32_t (*en_sf_release_vq_lock)(struct dh_core_dev *dh_dev); + int32_t (*en_sf_find_valid_vqs)(struct dh_core_dev *dh_dev, uint16_t vq_cnt, int32_t *phy_index); + int32_t (*en_sf_write_vqs_bit)(struct dh_core_dev *dh_dev, uint16_t vq_cnt, uint32_t *phy_index); + int32_t (*en_sf_write_queue_tlb)(struct dh_core_dev *dh_dev, uint16_t vq_cnt, uint32_t *phy_index, bool need_msgq); + uint16_t (*en_sf_get_fw_patch)(struct dh_core_dev *dh_dev); + uint16_t (*en_sf_get_epbdf)(struct dh_core_dev *dh_dev); + uint64_t (*en_sf_get_spec_sbdf)(struct dh_core_dev *dh_dev); + bool (*en_sf_is_multi_ep)(struct dh_core_dev *dh_dev); + uint16_t (*en_sf_get_vport)(struct dh_core_dev *dh_dev); + enum dh_coredev_type (*en_sf_get_coredev_type)(struct dh_core_dev *dh_dev); + uint16_t (*en_sf_get_pcie_id)(struct dh_core_dev *dh_dev); + uint16_t (*en_sf_get_slot_id)(struct dh_core_dev *dh_dev); + bool (*en_sf_is_bond)(struct dh_core_dev *dh_dev); + bool (*en_sf_is_upf)(struct dh_core_dev *dh_dev); + struct pci_dev * (*en_sf_get_pdev)(struct dh_core_dev *dh_dev); + uint64_t (*en_sf_get_bar_virt_addr)(struct dh_core_dev *dh_dev, uint8_t bar_num); + uint64_t (*en_sf_get_bar_phy_addr)(struct dh_core_dev *dh_dev, uint8_t bar_num); + uint64_t (*en_sf_get_bar_size)(struct dh_core_dev *dh_dev, uint8_t bar_num); + int32_t (*en_sf_msg_send_cmd)(struct dh_core_dev *dh_dev, uint16_t module_id, void *msg, \ + void *ack, struct zxdh_bar_extra_para *para); + int32_t (*en_sf_async_eq_enable)(struct dh_core_dev *dh_dev, struct dh_eq_async *eq, const char *name, bool attach); + void (*en_sf_nh_attach)(struct dh_core_dev *dev, struct dh_nb *nb, bool attach); + struct zxdh_vf_item *(*en_sf_get_vf_item)(struct dh_core_dev *dh_dev, uint16_t vf_idx); + void (*en_sf_set_pf_link_up)(struct dh_core_dev *dh_dev, bool link_up); + bool (*en_sf_get_pf_link_up)(struct dh_core_dev *dh_dev); + void (*en_sf_update_pf_link_info)(struct dh_core_dev *dh_dev, struct link_info_struct *link_info_val); + int32_t (*en_sf_get_drv_msg)(struct dh_core_dev *dh_dev, uint8_t *drv_version, uint8_t *drv_version_len); + void (*en_sf_set_vepa)(struct dh_core_dev *dh_dev, bool vepa); + bool (*en_sf_get_vepa)(struct dh_core_dev *dh_dev); + void (*en_sf_set_bond_num)(struct dh_core_dev *dh_dev, bool add); + bool (*en_sf_if_init)(struct dh_core_dev *dh_dev); + int32_t (*en_sf_request_port_info)(struct dh_core_dev *dh_dev, void *data); + int32_t (*en_sf_release_port_info)(struct dh_core_dev *dh_dev, uint32_t port_id); + void (*en_sf_get_link_info_from_vqm)(struct dh_core_dev *dh_dev, uint8_t *link_up); + void (*en_sf_set_vf_link_info)(struct dh_core_dev *dh_dev, uint16_t vf_idx, uint8_t link_up); + bool (*en_sf_get_vf_is_probe)(struct dh_core_dev *dh_dev, uint16_t vf_idx); + void (*en_sf_set_pf_phy_port)(struct dh_core_dev *dh_dev, uint8_t phy_port); + uint8_t (*en_sf_get_pf_phy_port)(struct dh_core_dev *dh_dev); + void (*en_sf_set_init_comp_flag)(struct dh_core_dev *dh_dev, uint8_t flag); + int32_t (*en_sf_events_call_chain)(struct dh_core_dev *dh_dev, unsigned long type, void *data); + struct zxdh_ipv6_mac_tbl * (*en_sf_get_ip6mac_tbl)(struct dh_core_dev *dh_dev); + bool (*en_sf_is_nic)(struct dh_core_dev *dh_dev); + bool (*en_sf_is_special_bond)(struct dh_core_dev *dh_dev); + uint8_t (*en_sf_get_queue_pairs)(struct dh_core_dev *dh_dev); + struct zxdh_core_health * (*en_sf_get_core_health)(struct dh_core_dev *dh_dev); + int32_t (*en_sf_get_cpl_timeout_if_mask)(struct dh_core_dev *dh_dev); + int32_t (*en_sf_set_cpl_timeout_mask)(struct dh_core_dev *dh_dev, uint32_t mask); + int32_t (*en_sf_get_hp_irq_ctrl_status)(struct dh_core_dev *dh_dev); + int32_t (*en_sf_set_hp_irq_ctrl_status)(struct dh_core_dev *dh_dev, uint32_t status); + bool (*en_sf_is_rdma_enable)(struct dh_core_dev *dh_dev); + uint32_t (*en_sf_get_dev_type)(struct dh_core_dev *dh_dev); + bool (*en_sf_pf_suport_np_ext_stats)(struct dh_core_dev *dh_dev); + struct zxdh_np_ext_stats * (*en_sf_get_np_ext_stats)(struct dh_core_dev *dh_dev, uint8_t panel_id); + bool (*en_sf_is_drs_sec_enable)(struct dh_core_dev *dh_dev); + bool (*en_sf_is_pf_rate_enable)(struct dh_core_dev *dh_dev); + bool (*en_sf_is_fw_feature_support)(struct dh_core_dev *dh_dev, uint32_t feature); + uint16_t (*en_sf_get_ovs_pf_vfid)(struct dh_core_dev *dh_dev); + uint8_t (*en_sf_get_board_type)(struct dh_core_dev *dh_dev); + bool (*en_sf_is_hwbond)(struct dh_core_dev *dh_dev, bool is_hwbond, bool update_pf); + bool (*en_sf_is_rdma_aux_plug)(struct dh_core_dev *dh_dev, bool is_rdma_aux_plug, bool update_pf); + bool (*en_sf_is_primary_port)(struct dh_core_dev *dh_dev, bool is_primary_port, bool update_pf); + void (*en_sf_optim_hardware_bond_time)(struct dh_core_dev *dh_dev, bool enable); + int32_t (*en_sf_update_hb_file_val)(struct dh_core_dev *dh_dev, uint64_t spec_sbdf, const char *file_name, bool flag); + +}; + +struct zxdh_en_sf_container { + struct zxdh_auxiliary_device adev; + struct dh_core_dev *dh_dev; + struct dh_core_dev *cdev; + struct zxdh_en_sf_if *ops; + int max_channels; +}; + +int32_t zxdh_en_sf_driver_register(void); +void zxdh_en_sf_driver_unregister(void); + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/linux/dinghai/eq.h b/include/linux/dinghai/eq.h new file mode 100755 index 000000000000..1a55b2afbf47 --- /dev/null +++ b/include/linux/dinghai/eq.h @@ -0,0 +1,117 @@ +#ifndef __DINGHAI_EQ_H__ +#define __DINGHAI_EQ_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +struct dh_irq; + +#define INVALID_EVENT_TYPE 0 + +enum DH_EVENT_TYPE { + DH_EVENT_TYPE_RISCV_READY = 13, + DH_EVENT_TYPE_NOTIFY_ANY = 14, + DH_EVENT_TYPE_NOTIFY_RISC_TO_MPF = 15, + DH_EVENT_TYPE_NOTIFY_PF_TO_MPF = 16, + DH_EVENT_TYPE_NOTIFY_VF_TO_PF = 18, + DH_EVENT_TYPE_NOTIFY_PF_TO_VF = 19, + DH_EVENT_TYPE_NOTIFY_1 = 20, + DH_EVENT_TYPE_NOTIFY_2 = 21, + DH_EVENT_TYPE_NOTIFY_3 = 22, + DH_EVENT_TYPE_NOTIFY_4 = 23, + DH_EVENT_TYPE_NOTIFY_RISCV_TO_AUX = 24, + DH_EVENT_TYPE_NOTIFY_RISC_EXT_PPS = 25, + DH_EVENT_TYPE_NOTIFY_RISC_LOCAL_PPS = 26, + DH_EVENT_TYPE_AUX_UNLOAD = 27, + DH_EVENT_TYPE_AUX_LOAD = 28, + DH_EVENT_TYPE_AUX_EVENT = 29, + DH_EVENT_TYPE_AUX_STATE = 30, + DH_EVENT_TYPE_MAX = 100, +}; + +/* eq core */ +struct dh_eq { + struct dh_core_dev *dev; + __be32 __iomem *doorbell; + uint32_t cons_index; + struct dh_irq *irq; /* interrupt core */ +}; + +/* asynchronous interrupt */ +struct dh_eq_async { + struct dh_eq core; + struct notifier_block irq_nb; /* interrupt: notification chain related to the event queue*/ + void *priv; + spinlock_t lock; /* To avoid irq EQ handle races with resiliency flows */ +}; + +struct dh_eq_param { + int32_t nent; /* queue depth */ + enum dh_event_queue_type event_type; + struct dh_irq *irq; /* interrupt associated with the event queue*/ +}; + +/* event type in the event queue */ +struct dh_nb { + struct notifier_block nb; + int32_t event_type; +}; + +struct dh_irq_table; + +struct dh_eq_table { + struct atomic_notifier_head nh[DH_EVENT_TYPE_MAX]; + struct mutex lock; + struct dh_irq_table *irq_table; + void *priv; +}; + +struct dh_eq_vq { + struct dh_eq core; + struct notifier_block irq_nb; + void *para; +}; + +struct dh_eq_vqs { + struct dh_eq_vq vq_s; + struct list_head vqs; + struct list_head list; +}; + +#define DH_NB_INIT(name, handler, event) do { \ + (name)->nb.notifier_call = handler; \ + (name)->event_type = DH_EVENT_TYPE_##event; \ +} while (0) + +void dh_eq_table_cleanup(struct dh_core_dev *dev); +void dh_eq_table_init(struct dh_core_dev *dev, void *table_priv); +int32_t setup_async_eq(struct dh_core_dev *dev, struct dh_eq_async *eq, + struct dh_eq_param *param, notifier_fn_t dh_eq_async_int, const char *name, void *priv); +int32_t dh_inet6_addr_change_notifier_register(struct notifier_block *inet6_addr_change_notifier); +int32_t dh_inet6_addr_change_notifier_unregister(struct notifier_block *inet6_addr_change_notifier); +int32_t dh_vxlan_netdev_change_notifier_register(struct notifier_block *vxlan_netdev_change_notifier); +int32_t dh_vxlan_netdev_change_notifier_unregister(struct notifier_block *vxlan_netdev_change_notifier); +int32_t dh_eq_notifier_register(struct dh_eq_table *eqt, struct dh_nb *nb); +void dh_eq_disable(struct dh_core_dev *dev, struct dh_eq *eq, struct notifier_block *nb); +int32_t dh_eq_enable(struct dh_core_dev *dev, struct dh_eq *eq, struct notifier_block *nb); +int32_t dh_eq_notifier_unregister(struct dh_eq_table *eqt, struct dh_nb *nb); +uint16_t dh_eq_event_type_get(uint16_t event_id); + +typedef int32_t (*zxdh_callchain_cbk_t)(struct notifier_block *nb, unsigned long action, void *data); + +struct dh_vq_handler { + zxdh_callchain_cbk_t callback; + void *para; +}; + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/linux/dinghai/events.h b/include/linux/dinghai/events.h new file mode 100755 index 000000000000..ac10533fced1 --- /dev/null +++ b/include/linux/dinghai/events.h @@ -0,0 +1,35 @@ +#ifndef __ZXDH_EVENTS_H__ +#define __ZXDH_EVENTS_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +struct dh_event_nb { + struct dh_nb nb; + void *ctx; +}; + +struct dh_events { + struct dh_core_dev *dev; + struct workqueue_struct *wq; + int32_t evt_num; + struct dh_event_nb notifiers[0]; +}; + +void zxdh_events_work_enqueue(struct dh_core_dev *dev, struct work_struct *work); +void zxdh_events_cleanup(struct dh_core_dev *dev); + +#define dh_nb_cof(ptr, type, member) \ + (container_of(container_of(ptr, struct dh_nb, nb), type, member)) + + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/include/linux/dinghai/helper.h b/include/linux/dinghai/helper.h new file mode 100755 index 000000000000..9495e5215d10 --- /dev/null +++ b/include/linux/dinghai/helper.h @@ -0,0 +1,81 @@ +#ifndef DINGHAI_HELPER_H +#define DINGHAI_HELPER_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef HAVE_DEV_PRINTK_OPS +#include +#endif +#include +#include + +extern uint32_t dh_debug_mask; +#define dh_dbg(__dev, format, ...) \ + dev_dbg((__dev)->device, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + +#define dh_dbg_once(__dev, format, ...) \ + dev_dbg_once((__dev)->device, \ + "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + +#define dh_dbg_mask(__dev, mask, format, ...) \ +do { \ + if ((mask) & dh_debug_mask) \ + dh_dbg(__dev, format, ##__VA_ARGS__); \ +} while (0) + +#define dh_err(__dev, format, ...) \ + dev_err((__dev)->device, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + +#define dh_err_rl(__dev, format, ...) \ + dev_err_ratelimited((__dev)->device, \ + "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + +#define dh_warn(__dev, format, ...) \ + dev_warn((__dev)->device, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + +#define dh_warn_once(__dev, format, ...) \ + dev_warn_once((__dev)->device, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + +#define dh_warn_rl(__dev, format, ...) \ + dev_warn_ratelimited((__dev)->device, \ + "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + +#define dh_info(__dev, format, ...) \ + dev_info((__dev)->device, format, ##__VA_ARGS__) + +#define dh_info_rl(__dev, format, ...) \ + dev_info_ratelimited((__dev)->device, \ + "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + +enum { + ZXDH_PCI_DEV_IS_VF = 1 << 0, +}; + +static inline bool dh_core_is_sf(const struct dh_core_dev *dev) +{ + return dev->coredev_type == DH_COREDEV_SF; +} + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/include/linux/dinghai/kcompat.h b/include/linux/dinghai/kcompat.h new file mode 100644 index 000000000000..40334f15022e --- /dev/null +++ b/include/linux/dinghai/kcompat.h @@ -0,0 +1,7330 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2021 Intel Corporation. */ + +#ifndef _KCOMPAT_H_ +#define _KCOMPAT_H_ + +#ifndef LINUX_VERSION_CODE +#include +#else +#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#ifndef GCC_VERSION +#define GCC_VERSION (__GNUC__ * 10000 \ + + __GNUC_MINOR__ * 100 \ + + __GNUC_PATCHLEVEL__) +#endif /* GCC_VERSION */ + +/* Backport macros for controlling GCC diagnostics */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0) ) + +/* Compilers before gcc-4.6 do not understand "#pragma GCC diagnostic push" */ +#if GCC_VERSION >= 40600 +#define __diag_str1(s) #s +#define __diag_str(s) __diag_str1(s) +#define __diag(s) _Pragma(__diag_str(GCC diagnostic s)) +#else +#define __diag(s) +#endif /* GCC_VERSION >= 4.6 */ +#define __diag_push() __diag(push) +#define __diag_pop() __diag(pop) +#endif /* LINUX_VERSION < 4.18.0 */ + +#ifndef NSEC_PER_MSEC +#define NSEC_PER_MSEC 1000000L +#endif +#include +/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */ +#ifndef UTS_RELEASE +/* utsrelease.h changed locations in 2.6.33 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) +#include +#else +#include +#endif +#endif + +/* NAPI enable/disable flags here */ +#define NAPI + +#ifdef DISABLE_NET_POLL_CONTROLLER +#undef CONFIG_NET_POLL_CONTROLLER +#endif + +/* generic boolean compatibility */ +#undef TRUE +#undef FALSE +#define TRUE true +#define FALSE false +#ifdef GCC_VERSION +#if ( GCC_VERSION < 3000 ) +#define _Bool char +#endif +#else +#define _Bool char +#endif + +#ifndef BIT +#define BIT(nr) (1UL << (nr)) +#endif + +#undef __always_unused +#define __always_unused __attribute__((__unused__)) + +#undef __maybe_unused +#define __maybe_unused __attribute__((__unused__)) + +/* kernels less than 2.4.14 don't have this */ +#ifndef ETH_P_8021Q +#define ETH_P_8021Q 0x8100 +#endif + +#ifndef module_param +#define module_param(v,t,p) MODULE_PARM(v, "i"); +#endif + +#ifndef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffffffffffffULL +#endif + +#ifndef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0x00000000ffffffffULL +#endif + +#ifndef PCI_CAP_ID_EXP +#define PCI_CAP_ID_EXP 0x10 +#endif + +#ifndef uninitialized_var +#define uninitialized_var(x) x = x +#endif + +#ifndef SET_NETDEV_DEV +#define SET_NETDEV_DEV(net, pdev) +#endif + +#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) +#define free_netdev(x) kfree(x) +#endif + +#ifdef HAVE_POLL_CONTROLLER +#define CONFIG_NET_POLL_CONTROLLER +#endif + +#ifndef SKB_DATAREF_SHIFT +/* if we do not have the infrastructure to detect if skb_header is cloned + just return false in all cases */ +#define skb_header_cloned(x) 0 +#endif + +#ifndef NETIF_F_GSO +#define gso_size tso_size +#define gso_segs tso_segs +#endif + +#ifndef NETIF_F_GRO +#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \ + vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan) +#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb) +#endif + +#ifndef NETIF_F_LRO +#define NETIF_F_LRO BIT(15) +#endif + +#ifndef NETIF_F_NTUPLE +#define NETIF_F_NTUPLE BIT(27) +#endif + +#ifndef NETIF_F_ALL_FCOE +#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ + NETIF_F_FSO) +#endif + +#ifndef IPPROTO_SCTP +#define IPPROTO_SCTP 132 +#endif + +#ifndef IPPROTO_UDPLITE +#define IPPROTO_UDPLITE 136 +#endif + +#ifndef CHECKSUM_PARTIAL +#define CHECKSUM_PARTIAL CHECKSUM_HW +#define CHECKSUM_COMPLETE CHECKSUM_HW +#endif + +#ifndef unlikely +#define unlikely(_x) _x +#define likely(_x) _x +#endif + +#ifndef WARN_ON +#define WARN_ON(x) +#endif + +#ifndef PCI_DEVICE +#define PCI_DEVICE(vend,dev) \ + .vendor = (vend), .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID +#endif + +#ifndef node_online +#define node_online(node) ((node) == 0) +#endif + +// #ifndef cpu_online +// #define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map) +// #endif + +#ifndef _LINUX_RANDOM_H +#include +#endif + +#ifndef BITS_PER_TYPE +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) +#endif + +#ifndef BITS_TO_LONGS +#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) +#endif + +#ifndef DECLARE_BITMAP +#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)] +#endif + +#ifndef VLAN_HLEN +#define VLAN_HLEN 4 +#endif + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#if defined(__i386__) || defined(__x86_64__) +#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#endif +#endif + +/* taken from 2.6.24 definition in linux/kernel.h */ +#ifndef IS_ALIGNED +#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0) +#endif + +#ifdef IS_ENABLED +#undef IS_ENABLED +#undef __ARG_PLACEHOLDER_1 +#undef config_enabled +#undef _config_enabled +#undef __config_enabled +#undef ___config_enabled +#endif + +#define __ARG_PLACEHOLDER_1 0, +#define config_enabled(cfg) _config_enabled(cfg) +#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) +#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) +#define ___config_enabled(__ignored, val, ...) val + +#define IS_ENABLED(option) \ + (config_enabled(option) || config_enabled(option##_MODULE)) + +#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX) +struct _kc_vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_ethhdr _kc_vlan_ethhdr +struct _kc_vlan_hdr { + __be16 h_vlan_TCI; + __be16 h_vlan_encapsulated_proto; +}; +#define vlan_hdr _kc_vlan_hdr +#define vlan_tx_tag_present(_skb) 0 +#define vlan_tx_tag_get(_skb) 0 +#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */ + +#ifndef VLAN_PRIO_SHIFT +#define VLAN_PRIO_SHIFT 13 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_2_5GB +#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_5_0GB +#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 +#endif + +#ifndef PCI_EXP_LNKSTA_CLS_8_0GB +#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X1 +#define PCI_EXP_LNKSTA_NLW_X1 0x0010 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X2 +#define PCI_EXP_LNKSTA_NLW_X2 0x0020 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X4 +#define PCI_EXP_LNKSTA_NLW_X4 0x0040 +#endif + +#ifndef PCI_EXP_LNKSTA_NLW_X8 +#define PCI_EXP_LNKSTA_NLW_X8 0x0080 +#endif + +#ifndef __GFP_COLD +#define __GFP_COLD 0 +#endif + +#ifndef __GFP_COMP +#define __GFP_COMP 0 +#endif + +#ifndef IP_OFFSET +#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */ +#endif + +#ifndef ETH_GSTRING_LEN +#define ETH_GSTRING_LEN 32 +#endif + +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x1d +#undef ethtool_drvinfo +#define ethtool_drvinfo k_ethtool_drvinfo +struct k_ethtool_drvinfo { + u32 cmd; + char driver[32]; + char version[32]; + char fw_version[32]; + char bus_info[32]; + char reserved1[32]; + char reserved2[16]; + u32 n_stats; + u32 testinfo_len; + u32 eedump_len; + u32 regdump_len; +}; + +struct ethtool_stats { + u32 cmd; + u32 n_stats; + u64 data[0]; +}; +#endif /* ETHTOOL_GSTATS */ + +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x1b +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS, +}; +struct ethtool_gstrings { + u32 cmd; /* ETHTOOL_GSTRINGS */ + u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ + u32 len; /* number of strings in the string set */ + u8 data[0]; +}; +#endif /* ETHTOOL_GSTRINGS */ + +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x1a +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = BIT(0), + ETH_TEST_FL_FAILED = BIT(1), +}; +struct ethtool_test { + u32 cmd; + u32 flags; + u32 reserved; + u32 len; + u64 data[0]; +}; +#endif /* ETHTOOL_TEST */ + +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0xb +#undef ETHTOOL_GREGS +struct ethtool_eeprom { + u32 cmd; + u32 magic; + u32 offset; + u32 len; + u8 data[0]; +}; + +struct ethtool_value { + u32 cmd; + u32 data; +}; +#endif /* ETHTOOL_GEEPROM */ + +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0xa +#endif /* ETHTOOL_GLINK */ + +#ifndef ETHTOOL_GWOL +#define ETHTOOL_GWOL 0x5 +#define ETHTOOL_SWOL 0x6 +#define SOPASS_MAX 6 +struct ethtool_wolinfo { + u32 cmd; + u32 supported; + u32 wolopts; + u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */ +}; +#endif /* ETHTOOL_GWOL */ + +#ifndef ETHTOOL_GREGS +#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */ +#define ethtool_regs _kc_ethtool_regs +/* for passing big chunks of data */ +struct _kc_ethtool_regs { + u32 cmd; + u32 version; /* driver-specific, indicates different chips/revs */ + u32 len; /* bytes */ + u8 data[0]; +}; +#endif /* ETHTOOL_GREGS */ + +#ifndef ETHTOOL_GMSGLVL +#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ +#endif +#ifndef ETHTOOL_SMSGLVL +#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */ +#endif +#ifndef ETHTOOL_NWAY_RST +#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */ +#endif +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0x0000000a /* Get link status */ +#endif +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ +#endif +#ifndef ETHTOOL_SEEPROM +#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */ +#endif +#ifndef ETHTOOL_GCOALESCE +#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ + +#define ethtool_coalesce _kc_ethtool_coalesce +struct _kc_ethtool_coalesce { + u32 cmd; + + u32 rx_coalesce_usecs; + + u32 rx_max_coalesced_frames; + + u32 rx_coalesce_usecs_irq; + u32 rx_max_coalesced_frames_irq; + + u32 tx_coalesce_usecs; + + u32 tx_max_coalesced_frames; + + u32 tx_coalesce_usecs_irq; + u32 tx_max_coalesced_frames_irq; + + u32 stats_block_coalesce_usecs; + + u32 use_adaptive_rx_coalesce; + u32 use_adaptive_tx_coalesce; + + u32 pkt_rate_low; + u32 rx_coalesce_usecs_low; + u32 rx_max_coalesced_frames_low; + u32 tx_coalesce_usecs_low; + u32 tx_max_coalesced_frames_low; + + u32 pkt_rate_high; + u32 rx_coalesce_usecs_high; + u32 rx_max_coalesced_frames_high; + u32 tx_coalesce_usecs_high; + u32 tx_max_coalesced_frames_high; + + u32 rate_sample_interval; +}; +#endif /* ETHTOOL_GCOALESCE */ + +#ifndef ETHTOOL_SCOALESCE +#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ +#endif +#ifndef ETHTOOL_GRINGPARAM +#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ + +#define ethtool_ringparam _kc_ethtool_ringparam +struct _kc_ethtool_ringparam { + u32 cmd; + + u32 rx_max_pending; + u32 rx_mini_max_pending; + u32 rx_jumbo_max_pending; + u32 tx_max_pending; + + u32 rx_pending; + u32 rx_mini_pending; + u32 rx_jumbo_pending; + u32 tx_pending; +}; +#endif /* ETHTOOL_GRINGPARAM */ + +#ifndef ETHTOOL_SRINGPARAM +#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */ +#endif +#ifndef ETHTOOL_GPAUSEPARAM +#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ + +#define ethtool_pauseparam _kc_ethtool_pauseparam +struct _kc_ethtool_pauseparam { + u32 cmd; + u32 autoneg; + u32 rx_pause; + u32 tx_pause; +}; +#endif /* ETHTOOL_GPAUSEPARAM */ + +#ifndef ETHTOOL_SPAUSEPARAM +#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ +#endif +#ifndef ETHTOOL_GRXCSUM +#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_SRXCSUM +#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GTXCSUM +#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STXCSUM +#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GSG +#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable + * (ethtool_value) */ +#endif +#ifndef ETHTOOL_SSG +#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable + * (ethtool_value). */ +#endif +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */ +#endif +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ +#endif +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ +#endif +#ifndef ETHTOOL_GTSO +#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STSO +#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ +#endif + +#ifndef ETHTOOL_BUSINFO_LEN +#define ETHTOOL_BUSINFO_LEN 32 +#endif + +#ifndef WAKE_FILTER +#define WAKE_FILTER BIT(7) +#endif + +#ifndef SPEED_2500 +#define SPEED_2500 2500 +#endif +#ifndef SPEED_5000 +#define SPEED_5000 5000 +#endif +#ifndef SPEED_14000 +#define SPEED_14000 14000 +#endif +#ifndef SPEED_25000 +#define SPEED_25000 25000 +#endif +#ifndef SPEED_50000 +#define SPEED_50000 50000 +#endif +#ifndef SPEED_56000 +#define SPEED_56000 56000 +#endif +#ifndef SPEED_100000 +#define SPEED_100000 100000 +#endif + +#ifndef RHEL_RELEASE_VERSION +#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif +#ifndef AX_RELEASE_VERSION +#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif +#ifndef KYLIN_RELEASE_VERSION +#define KYLIN_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif + +#ifndef AX_RELEASE_CODE +#define AX_RELEASE_CODE 0 +#endif + +#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3) +#endif + +#ifndef RHEL_RELEASE_CODE +/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */ +#define RHEL_RELEASE_CODE 0 +#endif + +#ifndef KYLIN_RELEASE_CODE +#define KYLIN_RELEASE_CODE 0 +#endif + +/* RHEL 7 didn't backport the parameter change in + * create_singlethread_workqueue. + * If/when RH corrects this we will want to tighten up the version check. + */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) +#undef create_singlethread_workqueue +#define create_singlethread_workqueue(name) \ + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) +#endif + +/* Ubuntu Release ABI is the 4th digit of their kernel version. You can find + * it in /usr/src/linux/$(uname -r)/include/generated/utsrelease.h for new + * enough versions of Ubuntu. Otherwise you can simply see it in the output of + * uname as the 4th digit of the kernel. The UTS_UBUNTU_RELEASE_ABI is not in + * the linux-source package, but in the linux-headers package. It begins to + * appear in later releases of 14.04 and 14.10. + * + * Ex: + * + * $uname -r + * 3.13.0-45-generic + * ABI is 45 + * + * + * $uname -r + * 3.16.0-23-generic + * ABI is 23 + */ +#ifndef UTS_UBUNTU_RELEASE_ABI +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#else +/* Ubuntu does not provide actual release version macro, so we use the kernel + * version plus the ABI to generate a unique version code specific to Ubuntu. + * In addition, we mask the lower 8 bits of LINUX_VERSION_CODE in order to + * ignore differences in sublevel which are not important since we have the + * ABI value. Otherwise, it becomes impossible to correlate ABI to version for + * ordering checks. + */ +#define UBUNTU_VERSION_CODE (((~0xFF & LINUX_VERSION_CODE) << 8) + \ + UTS_UBUNTU_RELEASE_ABI) + +#if UTS_UBUNTU_RELEASE_ABI > 255 +#error UTS_UBUNTU_RELEASE_ABI is too large... +#endif /* UTS_UBUNTU_RELEASE_ABI > 255 */ + +#if ( LINUX_VERSION_CODE <= KERNEL_VERSION(3,0,0) ) +/* Our version code scheme does not make sense for non 3.x or newer kernels, + * and we have no support in kcompat for this scenario. Thus, treat this as a + * non-Ubuntu kernel. Possibly might be better to error here. + */ +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#endif + +#endif + +/* Note that the 3rd digit is always zero, and will be ignored. This is + * because Ubuntu kernels are based on x.y.0-ABI values, and while their linux + * version codes are 3 digit, this 3rd digit is superseded by the ABI value. + */ +#define UBUNTU_VERSION(a,b,c,d) ((KERNEL_VERSION(a,b,0) << 8) + (d)) + +/* SuSE version macros are the same as Linux kernel version macro */ +#ifndef SLE_VERSION +#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c) +#endif +#define SLE_LOCALVERSION(a,b,c) KERNEL_VERSION(a,b,c) +#ifdef CONFIG_SUSE_KERNEL +#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) ) +/* SLES11 GA is 2.6.27 based */ +#define SLE_VERSION_CODE SLE_VERSION(11,0,0) +#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) ) +/* SLES11 SP1 is 2.6.32 based */ +#define SLE_VERSION_CODE SLE_VERSION(11,1,0) +#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(3,0,13) ) +/* SLES11 SP2 GA is 3.0.13-0.27 */ +#define SLE_VERSION_CODE SLE_VERSION(11,2,0) +#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,76))) +/* SLES11 SP3 GA is 3.0.76-0.11 */ +#define SLE_VERSION_CODE SLE_VERSION(11,3,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,0,101)) + #if (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(0,8,0)) + /* some SLES11sp2 update kernels up to 3.0.101-0.7.x */ + #define SLE_VERSION_CODE SLE_VERSION(11,2,0) + #elif (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(63,0,0)) + /* most SLES11sp3 update kernels */ + #define SLE_VERSION_CODE SLE_VERSION(11,3,0) + #else + /* SLES11 SP4 GA (3.0.101-63) and update kernels 3.0.101-63+ */ + #define SLE_VERSION_CODE SLE_VERSION(11,4,0) + #endif +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,28)) +/* SLES12 GA is 3.12.28-4 + * kernel updates 3.12.xx-<33 through 52>[.yy] */ +#define SLE_VERSION_CODE SLE_VERSION(12,0,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,49)) +/* SLES12 SP1 GA is 3.12.49-11 + * updates 3.12.xx-60.yy where xx={51..} */ +#define SLE_VERSION_CODE SLE_VERSION(12,1,0) +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,21) && \ + (LINUX_VERSION_CODE <= KERNEL_VERSION(4,4,59))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,74) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(92,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(93,0,0))) +/* SLES12 SP2 GA is 4.4.21-69. + * SLES12 SP2 updates before SLES12 SP3 are: 4.4.{21,38,49,59} + * SLES12 SP2 updates after SLES12 SP3 are: 4.4.{74,90,103,114,120} + * but they all use a SLE_LOCALVERSION_CODE matching 92.nn.y */ +#define SLE_VERSION_CODE SLE_VERSION(12,2,0) +#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(4,4,73) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4,4,82) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4,4,92)) || \ + (LINUX_VERSION_CODE == KERNEL_VERSION(4,4,103) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(6,33,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(6,38,0))) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,114) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(94,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(95,0,0)) ) +/* SLES12 SP3 GM is 4.4.73-5 and update kernels are 4.4.82-6.3. + * SLES12 SP3 updates not conflicting with SP2 are: 4.4.{82,92} + * SLES12 SP3 updates conflicting with SP2 are: + * - 4.4.103-6.33.1, 4.4.103-6.38.1 + * - 4.4.{114,120}-94.nn.y */ +#define SLE_VERSION_CODE SLE_VERSION(12,3,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(94,41,0) || \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(95,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(96,0,0)))) +/* SLES12 SP4 GM is 4.12.14-94.41 and update kernel is 4.12.14-95.x. */ +#define SLE_VERSION_CODE SLE_VERSION(12,4,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \ + (SLE_LOCALVERSION_CODE == KERNEL_VERSION(23,0,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(2,0,0) || \ + SLE_LOCALVERSION_CODE == KERNEL_VERSION(136,0,0) || \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(25,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(26,0,0)) || \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(150,0,0) && \ + SLE_LOCALVERSION_CODE < KERNEL_VERSION(151,0,0)))) +/* SLES15 Beta1 is 4.12.14-2 + * SLES15 GM is 4.12.14-23 and update kernel is 4.12.14-{25,136}, + * and 4.12.14-150.14. + */ +#define SLE_VERSION_CODE SLE_VERSION(15,0,0) +#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,12,14) && \ + SLE_LOCALVERSION_CODE >= KERNEL_VERSION(25,23,0)) +/* SLES15 SP1 Beta1 is 4.12.14-25.23 */ +#define SLE_VERSION_CODE SLE_VERSION(15,1,0) +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,3,13)) +/* SLES15 SP2 Beta1 is 5.3.13 */ +#define SLE_VERSION_CODE SLE_VERSION(15,2,0) + +/* new SLES kernels must be added here with >= based on kernel + * the idea is to order from newest to oldest and just catch all + * of them using the >= + */ +#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */ +#endif /* CONFIG_SUSE_KERNEL */ +#ifndef SLE_VERSION_CODE +#define SLE_VERSION_CODE 0 +#endif /* SLE_VERSION_CODE */ +#ifndef SLE_LOCALVERSION_CODE +#define SLE_LOCALVERSION_CODE 0 +#endif /* SLE_LOCALVERSION_CODE */ + +/* + * ADQ depends on __TC_MQPRIO_MODE_MAX and related kernel code + * added around 4.15. Some distributions (e.g. Oracle Linux 7.7) + * have done a partial back-port of that to their kernels based + * on older mainline kernels that did not include all the necessary + * kernel enablement to support ADQ. + * Undefine __TC_MQPRIO_MODE_MAX for all OSV distributions with + * kernels based on mainline kernels older than 4.15 except for + * RHEL, SLES and Ubuntu which are known to have good back-ports. + */ +#if (!RHEL_RELEASE_CODE && !SLE_VERSION_CODE && !UBUNTU_VERSION_CODE) + #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) + #undef __TC_MQPRIO_MODE_MAX + #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(4,15,0) */ +#endif /* if (NOT RHEL && NOT SLES && NOT UBUNTU) */ + +#ifdef __KLOCWORK__ +/* The following are not compiled into the binary driver; they are here + * only to tune Klocwork scans to workaround false-positive issues. + */ +#ifdef ARRAY_SIZE +#undef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#define memcpy(dest, src, len) memcpy_s(dest, len, src, len) +#define memset(dest, ch, len) memset_s(dest, len, ch, len) + +static inline int _kc_test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags = 0; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old & ~mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} +#define test_and_clear_bit(nr, addr) _kc_test_and_clear_bit(nr, addr) + +static inline int _kc_test_and_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags = 0; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old | mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} +#define test_and_set_bit(nr, addr) _kc_test_and_set_bit(nr, addr) + +#ifdef CONFIG_DYNAMIC_DEBUG +#undef dev_dbg +#define dev_dbg(dev, format, arg...) dev_printk(KERN_DEBUG, dev, format, ##arg) +#undef pr_debug +#define pr_debug(format, arg...) printk(KERN_DEBUG format, ##arg) +#endif /* CONFIG_DYNAMIC_DEBUG */ + +#undef hlist_for_each_entry_safe +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (n = NULL, pos = hlist_entry_safe((head)->first, typeof(*(pos)), \ + member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#ifdef uninitialized_var +#undef uninitialized_var +#define uninitialized_var(x) x = *(&(x)) +#endif + +#ifdef WRITE_ONCE +#undef WRITE_ONCE +#define WRITE_ONCE(x, val) ((x) = (val)) +#endif /* WRITE_ONCE */ +#endif /* __KLOCWORK__ */ + +#include "kcompat_vfd.h" +struct vfd_objects *create_vfd_sysfs(struct pci_dev *pdev, int num_alloc_vfs); +void destroy_vfd_sysfs(struct pci_dev *pdev, struct vfd_objects *vfd_obj); + +/* Older versions of GCC will trigger -Wformat-nonliteral warnings for const + * char * strings. Unfortunately, the implementation of do_trace_printk does + * this, in order to add a storage attribute to the memory. This was fixed in + * GCC 5.1, but we still use older distributions built with GCC 4.x. + * + * The string pointer is only passed as a const char * to the __trace_bprintk + * function. Since that function has the __printf attribute, it will trigger + * the warnings. We can't remove the attribute, so instead we'll use the + * __diag macro to disable -Wformat-nonliteral around the call to + * __trace_bprintk. + */ +#if GCC_VERSION < 50100 +#define __trace_bprintk(ip, fmt, args...) ({ \ + int err; \ + __diag_push(); \ + __diag(ignored "-Wformat-nonliteral"); \ + err = __trace_bprintk(ip, fmt, ##args); \ + __diag_pop(); \ + err; \ +}) +#endif /* GCC_VERSION < 5.1.0 */ + +/* Newer kernels removed */ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,3))) +#define HAVE_PCI_ASPM_H +#endif + +/*****************************************************************************/ +/* 2.4.3 => 2.4.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) + +/**************************************/ +/* PCI DRIVER API */ + +#ifndef pci_set_dma_mask +#define pci_set_dma_mask _kc_pci_set_dma_mask +int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask); +#endif + +#ifndef pci_request_regions +#define pci_request_regions _kc_pci_request_regions +int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name); +#endif + +#ifndef pci_release_regions +#define pci_release_regions _kc_pci_release_regions +void _kc_pci_release_regions(struct pci_dev *pdev); +#endif + +/**************************************/ +/* NETWORK DRIVER API */ + +#ifndef alloc_etherdev +#define alloc_etherdev _kc_alloc_etherdev +struct net_device * _kc_alloc_etherdev(int sizeof_priv); +#endif + +#ifndef is_valid_ether_addr +#define is_valid_ether_addr _kc_is_valid_ether_addr +int _kc_is_valid_ether_addr(u8 *addr); +#endif + +/**************************************/ +#ifndef INIT_TQUEUE +#define INIT_TQUEUE(_tq, _routine, _data) \ + do { \ + INIT_LIST_HEAD(&(_tq)->list); \ + (_tq)->sync = 0; \ + (_tq)->routine = _routine; \ + (_tq)->data = _data; \ + } while (0) +#endif + +#endif /* 2.4.3 => 2.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) ) +/* Generic MII registers. */ +#define MII_BMCR 0x00 /* Basic mode control register */ +#define MII_BMSR 0x01 /* Basic mode status register */ +#define MII_PHYSID1 0x02 /* PHYS ID 1 */ +#define MII_PHYSID2 0x03 /* PHYS ID 2 */ +#define MII_ADVERTISE 0x04 /* Advertisement control reg */ +#define MII_LPA 0x05 /* Link partner ability reg */ +#define MII_EXPANSION 0x06 /* Expansion register */ +/* Basic mode control register. */ +#define BMCR_FULLDPLX 0x0100 /* Full duplex */ +#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */ +/* Basic mode status register. */ +#define BMSR_ERCAP 0x0001 /* Ext-reg capability */ +#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */ +#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ +#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ +#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ +#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */ +/* Advertisement control register. */ +#define ADVERTISE_CSMA 0x0001 /* Only selector supported */ +#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ +#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ +#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ +#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ +#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \ + ADVERTISE_100HALF | ADVERTISE_100FULL) +/* Expansion register for auto-negotiation. */ +#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */ +#endif + +/*****************************************************************************/ +/* 2.4.6 => 2.4.3 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) + +#ifndef pci_set_power_state +#define pci_set_power_state _kc_pci_set_power_state +int _kc_pci_set_power_state(struct pci_dev *dev, int state); +#endif + +#ifndef pci_enable_wake +#define pci_enable_wake _kc_pci_enable_wake +int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable); +#endif + +#ifndef pci_disable_device +#define pci_disable_device _kc_pci_disable_device +void _kc_pci_disable_device(struct pci_dev *pdev); +#endif + +/* PCI PM entry point syntax changed, so don't support suspend/resume */ +#undef CONFIG_PM + +#endif /* 2.4.6 => 2.4.3 */ + +/*****************************************************************************/ +/* 2.4.10 => 2.4.9 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) ) + +/**************************************/ +/* MODULE API */ + +#ifndef MODULE_LICENSE + #define MODULE_LICENSE(X) +#endif + +/**************************************/ +/* OTHER */ + +#undef min +#define min(x,y) ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x < _y ? _x : _y; }) + +#undef max +#define max(x,y) ({ \ + const typeof(x) _x = (x); \ + const typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x > _y ? _x : _y; }) + +#define min_t(type,x,y) ({ \ + type _x = (x); \ + type _y = (y); \ + _x < _y ? _x : _y; }) + +#define max_t(type,x,y) ({ \ + type _x = (x); \ + type _y = (y); \ + _x > _y ? _x : _y; }) + +#ifndef list_for_each_safe +#define list_for_each_safe(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = n, n = pos->next) +#endif + +#ifndef ____cacheline_aligned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_aligned_in_smp ____cacheline_aligned +#else +#define ____cacheline_aligned_in_smp +#endif /* CONFIG_SMP */ +#endif + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) +int _kc_snprintf(char * buf, size_t size, const char *fmt, ...); +#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args) +int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args) +#else /* 2.4.8 => 2.4.9 */ +int snprintf(char * buf, size_t size, const char *fmt, ...); +int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +#endif +#endif /* 2.4.10 -> 2.4.6 */ + + +/*****************************************************************************/ +/* 2.4.12 => 2.4.10 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) ) +#ifndef HAVE_NETIF_MSG +#define HAVE_NETIF_MSG 1 +enum { + NETIF_MSG_DRV = 0x0001, + NETIF_MSG_PROBE = 0x0002, + NETIF_MSG_LINK = 0x0004, + NETIF_MSG_TIMER = 0x0008, + NETIF_MSG_IFDOWN = 0x0010, + NETIF_MSG_IFUP = 0x0020, + NETIF_MSG_RX_ERR = 0x0040, + NETIF_MSG_TX_ERR = 0x0080, + NETIF_MSG_TX_QUEUED = 0x0100, + NETIF_MSG_INTR = 0x0200, + NETIF_MSG_TX_DONE = 0x0400, + NETIF_MSG_RX_STATUS = 0x0800, + NETIF_MSG_PKTDATA = 0x1000, + NETIF_MSG_HW = 0x2000, + NETIF_MSG_WOL = 0x4000, +}; + +#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) +#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) +#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) +#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) +#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) +#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) +#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) +#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) +#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) +#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) +#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) +#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) +#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) +#endif /* !HAVE_NETIF_MSG */ +#endif /* 2.4.12 => 2.4.10 */ + +/*****************************************************************************/ +/* 2.4.13 => 2.4.12 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) + +/**************************************/ +/* PCI DMA MAPPING */ + +#ifndef virt_to_page + #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT)) +#endif + +#ifndef pci_map_page +#define pci_map_page _kc_pci_map_page +u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction); +#endif + +#ifndef pci_unmap_page +#define pci_unmap_page _kc_pci_unmap_page +void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction); +#endif + +/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */ + +#undef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0xffffffff +#undef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffff + +/**************************************/ +/* OTHER */ + +#ifndef cpu_relax +#define cpu_relax() rep_nop() +#endif + +struct vlan_ethhdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + unsigned short h_vlan_proto; + unsigned short h_vlan_TCI; + unsigned short h_vlan_encapsulated_proto; +}; +#endif /* 2.4.13 => 2.4.12 */ + +/*****************************************************************************/ +/* 2.4.17 => 2.4.12 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) ) + +#ifndef __devexit_p + #define __devexit_p(x) &(x) +#endif + +#endif /* 2.4.17 => 2.4.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) ) +#define NETIF_MSG_HW 0x2000 +#define NETIF_MSG_WOL 0x4000 + +#ifndef netif_msg_hw +#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) +#endif +#ifndef netif_msg_wol +#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) +#endif +#endif /* 2.4.18 */ + +/*****************************************************************************/ + +/*****************************************************************************/ +/* 2.4.20 => 2.4.19 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) ) + +/* we won't support NAPI on less than 2.4.20 */ +#ifdef NAPI +#undef NAPI +#endif + +#endif /* 2.4.20 => 2.4.19 */ + +/*****************************************************************************/ +/* 2.4.22 => 2.4.17 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) +#define pci_name(x) ((x)->slot_name) + +#ifndef SUPPORTED_10000baseT_Full +#define SUPPORTED_10000baseT_Full BIT(12) +#endif +#ifndef ADVERTISED_10000baseT_Full +#define ADVERTISED_10000baseT_Full BIT(12) +#endif +#endif + +/*****************************************************************************/ +/*****************************************************************************/ +/* 2.4.23 => 2.4.22 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) ) +/*****************************************************************************/ +#ifdef NAPI +#ifndef netif_poll_disable +#define netif_poll_disable(x) _kc_netif_poll_disable(x) +static inline void _kc_netif_poll_disable(struct net_device *netdev) +{ + while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) { + /* No hurry */ + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(1); + } +} +#endif +#ifndef netif_poll_enable +#define netif_poll_enable(x) _kc_netif_poll_enable(x) +static inline void _kc_netif_poll_enable(struct net_device *netdev) +{ + clear_bit(__LINK_STATE_RX_SCHED, &netdev->state); +} +#endif +#endif /* NAPI */ +#ifndef netif_tx_disable +#define netif_tx_disable(x) _kc_netif_tx_disable(x) +static inline void _kc_netif_tx_disable(struct net_device *dev) +{ + spin_lock_bh(&dev->xmit_lock); + netif_stop_queue(dev); + spin_unlock_bh(&dev->xmit_lock); +} +#endif +#else /* 2.4.23 => 2.4.22 */ +#define HAVE_SCTP +#endif /* 2.4.23 => 2.4.22 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ) +#define ETHTOOL_OPS_COMPAT +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) +#define __user +#endif /* < 2.4.27 */ + +/*****************************************************************************/ +/* 2.5.71 => 2.4.x */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) ) +#define sk_protocol protocol +#define pci_get_device pci_find_device +#endif /* 2.5.70 => 2.4.x */ + +/*****************************************************************************/ +/* < 2.4.27 or 2.6.0 <= 2.6.5 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) ) + +#ifndef netif_msg_init +#define netif_msg_init _kc_netif_msg_init +static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits) +{ + /* use default */ + if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) + return default_msg_enable_bits; + if (debug_value == 0) /* no output */ + return 0; + /* set low N bits */ + return (1 << debug_value) -1; +} +#endif + +#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */ +/*****************************************************************************/ +#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \ + (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \ + ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ))) +#define netdev_priv(x) x->priv +#endif + +/*****************************************************************************/ +/* <= 2.5.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) ) +#include +#undef pci_register_driver +#define pci_register_driver pci_module_init + +/* + * Most of the dma compat code is copied/modifed from the 2.4.37 + * /include/linux/libata-compat.h header file + */ +/* These definitions mirror those in pci.h, so they can be used + * interchangeably with their PCI_ counterparts */ +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; + +struct device { + struct pci_dev pdev; +}; + +static inline struct pci_dev *to_pci_dev (struct device *dev) +{ + return (struct pci_dev *) dev; +} +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return (struct device *) pdev; +} +#define pdev_printk(lvl, pdev, fmt, args...) \ + printk("%s %s: " fmt, lvl, pci_name(pdev), ## args) +#define dev_err(dev, fmt, args...) \ + pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args) +#define dev_info(dev, fmt, args...) \ + pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args) +#define dev_warn(dev, fmt, args...) \ + pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args) +#define dev_notice(dev, fmt, args...) \ + pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args) +#define dev_dbg(dev, fmt, args...) \ + pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args) + +/* NOTE: dangerous! we ignore the 'gfp' argument */ +#define dma_alloc_coherent(dev,sz,dma,gfp) \ + pci_alloc_consistent(to_pci_dev(dev),(sz),(dma)) +#define dma_free_coherent(dev,sz,addr,dma_addr) \ + pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr)) + +#define dma_map_page(dev,a,b,c,d) \ + pci_map_page(to_pci_dev(dev),(a),(b),(c),(d)) +#define dma_unmap_page(dev,a,b,c) \ + pci_unmap_page(to_pci_dev(dev),(a),(b),(c)) + +#define dma_map_single(dev,a,b,c) \ + pci_map_single(to_pci_dev(dev),(a),(b),(c)) +#define dma_unmap_single(dev,a,b,c) \ + pci_unmap_single(to_pci_dev(dev),(a),(b),(c)) + +#define dma_map_sg(dev, sg, nents, dir) \ + pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir) +#define dma_unmap_sg(dev, sg, nents, dir) \ + pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir) + +#define dma_sync_single(dev,a,b,c) \ + pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c)) + +/* for range just sync everything, that's all the pci API can do */ +#define dma_sync_single_range(dev,addr,off,sz,dir) \ + pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir)) + +#define dma_set_mask(dev,mask) \ + pci_set_dma_mask(to_pci_dev(dev),(mask)) + +/* hlist_* code - double linked lists */ +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next, **pprev; +}; + +static inline void __hlist_del(struct hlist_node *n) +{ + struct hlist_node *next = n->next; + struct hlist_node **pprev = n->pprev; + *pprev = next; + if (next) + next->pprev = pprev; +} + +static inline void hlist_del(struct hlist_node *n) +{ + __hlist_del(n); + n->next = NULL; + n->pprev = NULL; +} + +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) +{ + struct hlist_node *first = h->first; + n->next = first; + if (first) + first->pprev = &n->next; + h->first = n; + n->pprev = &h->first; +} + +static inline int hlist_empty(const struct hlist_head *h) +{ + return !h->first; +} +#define HLIST_HEAD_INIT { .first = NULL } +#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) +static inline void INIT_HLIST_NODE(struct hlist_node *h) +{ + h->next = NULL; + h->pprev = NULL; +} + +#ifndef might_sleep +#define might_sleep() +#endif +#else +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return &pdev->dev; +} +#endif /* <= 2.5.0 */ + +/*****************************************************************************/ +/* 2.5.28 => 2.4.23 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) + +#include +#define work_struct tq_struct +#undef INIT_WORK +#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a) +#undef container_of +#define container_of list_entry +#define schedule_work schedule_task +#define flush_scheduled_work flush_scheduled_tasks +#define cancel_work_sync(x) flush_scheduled_work() + +#endif /* 2.5.28 => 2.4.17 */ + +/*****************************************************************************/ +/* 2.6.0 => 2.5.28 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +#ifndef read_barrier_depends +#define read_barrier_depends() rmb() +#endif + +#ifndef rcu_head +struct __kc_callback_head { + struct __kc_callback_head *next; + void (*func)(struct callback_head *head); +}; +#define rcu_head __kc_callback_head +#endif + +#undef get_cpu +#define get_cpu() smp_processor_id() +#undef put_cpu +#define put_cpu() do { } while(0) +#define MODULE_INFO(version, _version) +#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT +#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1 +#endif +#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT +#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1 +#endif +#ifndef CONFIG_IGC_DISABLE_PACKET_SPLIT +#define CONFIG_IGC_DISABLE_PACKET_SPLIT 1 +#endif + +#define dma_set_coherent_mask(dev,mask) 1 + +#undef dev_put +#define dev_put(dev) __dev_put(dev) + +#ifndef skb_fill_page_desc +#define skb_fill_page_desc _kc_skb_fill_page_desc +void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size); +#endif + +#undef ALIGN +#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) + +#ifndef page_count +#define page_count(p) atomic_read(&(p)->count) +#endif + +#ifdef MAX_NUMNODES +#undef MAX_NUMNODES +#endif +#define MAX_NUMNODES 1 + +/* find_first_bit and find_next bit are not defined for most + * 2.4 kernels (except for the redhat 2.4.21 kernels + */ +#include +#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) +#undef find_next_bit +#define find_next_bit _kc_find_next_bit +unsigned long _kc_find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset); +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) + +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (strchr(dev->name, '%')) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#ifndef strlcpy +#define strlcpy _kc_strlcpy +size_t _kc_strlcpy(char *dest, const char *src, size_t size); +#endif /* strlcpy */ + +#ifndef do_div +#if BITS_PER_LONG == 64 +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + __rem = ((uint64_t)(n)) % __base; \ + (n) = ((uint64_t)(n)) / __base; \ + __rem; \ + }) +#elif BITS_PER_LONG == 32 +uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor); +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + if (likely(((n) >> 32) == 0)) { \ + __rem = (uint32_t)(n) % __base; \ + (n) = (uint32_t)(n) / __base; \ + } else \ + __rem = _kc__div64_32(&(n), __base); \ + __rem; \ + }) +#else /* BITS_PER_LONG == ?? */ +# error do_div() does not yet support the C64 +#endif /* BITS_PER_LONG */ +#endif /* do_div */ + +#ifndef NSEC_PER_SEC +#define NSEC_PER_SEC 1000000000L +#endif + +#undef HAVE_I2C_SUPPORT +#else /* 2.6.0 */ + +#endif /* 2.6.0 => 2.5.28 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ) +#define dma_pool pci_pool +#define dma_pool_destroy pci_pool_destroy +#define dma_pool_alloc pci_pool_alloc +#define dma_pool_free pci_pool_free + +#define dma_pool_create(name,dev,size,align,allocation) \ + pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation)) +#endif /* < 2.6.3 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +#define MODULE_VERSION(_version) MODULE_INFO(version, _version) +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +/* 2.6.5 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) +#define dma_sync_single_for_cpu dma_sync_single +#define dma_sync_single_for_device dma_sync_single +#define dma_sync_single_range_for_cpu dma_sync_single_range +#define dma_sync_single_range_for_device dma_sync_single_range +#ifndef pci_dma_mapping_error +#define pci_dma_mapping_error _kc_pci_dma_mapping_error +static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr) +{ + return dma_addr == 0; +} +#endif +#endif /* 2.6.5 => 2.6.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...); +#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args) +#endif /* < 2.6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) ) +/* taken from 2.6 include/linux/bitmap.h */ +#undef bitmap_zero +#define bitmap_zero _kc_bitmap_zero +static inline void _kc_bitmap_zero(unsigned long *dst, int nbits) +{ + if (nbits <= BITS_PER_LONG) + *dst = 0UL; + else { + int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + memset(dst, 0, len); + } +} +#define page_to_nid(x) 0 + +#endif /* < 2.6.6 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) ) +#undef if_mii +#define if_mii _kc_if_mii +static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq) +{ + return (struct mii_ioctl_data *) &rq->ifr_ifru; +} + +#ifndef __force +#define __force +#endif +#endif /* < 2.6.7 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) +#ifndef PCI_EXP_DEVCTL +#define PCI_EXP_DEVCTL 8 +#endif +#ifndef PCI_EXP_DEVCTL_CERE +#define PCI_EXP_DEVCTL_CERE 0x0001 +#endif +#define PCI_EXP_FLAGS 2 /* Capabilities register */ +#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */ +#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */ +#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */ +#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */ +#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */ +#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ +#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ +#define PCI_EXP_DEVCAP 4 /* Device capabilities */ +#define PCI_EXP_DEVSTA 10 /* Device Status */ +#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \ + schedule_timeout((x * HZ)/1000 + 2); \ + } while (0) + +#endif /* < 2.6.8 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)) +#include +#define __iomem + +#ifndef kcalloc +#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags) +void *_kc_kzalloc(size_t size, int flags); +#endif +#define MSEC_PER_SEC 1000L +static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j) +{ +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (MSEC_PER_SEC / HZ) * j; +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); +#else + return (j * MSEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return m * (HZ / MSEC_PER_SEC); +#else + return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; +#endif +} + +#define msleep_interruptible _kc_msleep_interruptible +static inline unsigned long _kc_msleep_interruptible(unsigned int msecs) +{ + unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1; + + while (timeout && !signal_pending(current)) { + __set_current_state(TASK_INTERRUPTIBLE); + timeout = schedule_timeout(timeout); + } + return _kc_jiffies_to_msecs(timeout); +} + +/* Basic mode control register. */ +#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */ + +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 +#endif +#ifndef __be16 +#define __be16 u16 +#endif +#ifndef __be32 +#define __be32 u32 +#endif +#ifndef __be64 +#define __be64 u64 +#endif + +static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) +{ + return (struct vlan_ethhdr *)skb->mac.raw; +} + +/* Wake-On-Lan options. */ +#define WAKE_PHY BIT(0) +#define WAKE_UCAST BIT(1) +#define WAKE_MCAST BIT(2) +#define WAKE_BCAST BIT(3) +#define WAKE_ARP BIT(4) +#define WAKE_MAGIC BIT(5) +#define WAKE_MAGICSECURE BIT(6) /* only meaningful if WAKE_MAGIC */ + +#define skb_header_pointer _kc_skb_header_pointer +static inline void *_kc_skb_header_pointer(const struct sk_buff *skb, + int offset, int len, void *buffer) +{ + int hlen = skb_headlen(skb); + + if (hlen - offset >= len) + return skb->data + offset; + +#ifdef MAX_SKB_FRAGS + if (skb_copy_bits(skb, offset, buffer, len) < 0) + return NULL; + + return buffer; +#else + return NULL; +#endif + +#ifndef NETDEV_TX_OK +#define NETDEV_TX_OK 0 +#endif +#ifndef NETDEV_TX_BUSY +#define NETDEV_TX_BUSY 1 +#endif +#ifndef NETDEV_TX_LOCKED +#define NETDEV_TX_LOCKED -1 +#endif +} + +#ifndef __bitwise +#define __bitwise +#endif +#endif /* < 2.6.9 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) +#ifdef module_param_array_named +#undef module_param_array_named +#define module_param_array_named(name, array, type, nump, perm) \ + static struct kparam_array __param_arr_##name \ + = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \ + sizeof(array[0]), array }; \ + module_param_call(name, param_array_set, param_array_get, \ + &__param_arr_##name, perm) +#endif /* module_param_array_named */ +/* + * num_online is broken for all < 2.6.10 kernels. This is needed to support + * Node module parameter of ixgbe. + */ +#undef num_online_nodes +#define num_online_nodes(n) 1 +extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES); +#undef node_online_map +#define node_online_map _kcompat_node_online_map +#define pci_get_class pci_find_class +#endif /* < 2.6.10 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) ) +#define PCI_D0 0 +#define PCI_D1 1 +#define PCI_D2 2 +#define PCI_D3hot 3 +#define PCI_D3cold 4 +typedef int pci_power_t; +#define pci_choose_state(pdev,state) state +#define PMSG_SUSPEND 3 +#define PCI_EXP_LNKCTL 16 + +#undef NETIF_F_LLTX + +#ifndef ARCH_HAS_PREFETCH +#define prefetch(X) +#endif + +#ifndef NET_IP_ALIGN +#define NET_IP_ALIGN 2 +#endif + +#define KC_USEC_PER_SEC 1000000L +#define usecs_to_jiffies _kc_usecs_to_jiffies +static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j) +{ +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (KC_USEC_PER_SEC / HZ) * j; +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC); +#else + return (j * KC_USEC_PER_SEC) / HZ; +#endif +} +static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) + return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ); +#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) + return m * (HZ / KC_USEC_PER_SEC); +#else + return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC; +#endif +} + +#define PCI_EXP_LNKCAP 12 /* Link Capabilities */ +#define PCI_EXP_LNKSTA 18 /* Link Status */ +#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */ +#define PCI_EXP_SLTCTL 24 /* Slot Control */ +#define PCI_EXP_SLTSTA 26 /* Slot Status */ +#define PCI_EXP_RTCTL 28 /* Root Control */ +#define PCI_EXP_RTCAP 30 /* Root Capabilities */ +#define PCI_EXP_RTSTA 32 /* Root Status */ +#endif /* < 2.6.11 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) ) +#include +#define USE_REBOOT_NOTIFIER + +/* Generic MII registers. */ +#define MII_CTRL1000 0x09 /* 1000BASE-T control */ +#define MII_STAT1000 0x0a /* 1000BASE-T status */ +/* Advertisement control register. */ +#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ +#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */ +/* Link partner ability register. */ +#define LPA_PAUSE_CAP 0x0400 /* Can pause */ +#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */ +/* 1000BASE-T Control register */ +#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ +#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ +/* 1000BASE-T Status register */ +#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */ +#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */ + +#ifndef is_zero_ether_addr +#define is_zero_ether_addr _kc_is_zero_ether_addr +static inline int _kc_is_zero_ether_addr(const u8 *addr) +{ + return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); +} +#endif /* is_zero_ether_addr */ +#ifndef is_multicast_ether_addr +#define is_multicast_ether_addr _kc_is_multicast_ether_addr +static inline int _kc_is_multicast_ether_addr(const u8 *addr) +{ + return addr[0] & 0x01; +} +#endif /* is_multicast_ether_addr */ +#endif /* < 2.6.12 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) +#ifndef kstrdup +#define kstrdup _kc_kstrdup +char *_kc_kstrdup(const char *s, unsigned int gfp); +#endif +#endif /* < 2.6.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) +#define pm_message_t u32 +#ifndef kzalloc +#define kzalloc _kc_kzalloc +void *_kc_kzalloc(size_t size, int flags); +#endif + +/* Generic MII registers. */ +#define MII_ESTATUS 0x0f /* Extended Status */ +/* Basic mode status register. */ +#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */ +/* Extended status register. */ +#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */ +#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */ + +#define SUPPORTED_Pause BIT(13) +#define SUPPORTED_Asym_Pause BIT(14) +#define ADVERTISED_Pause BIT(13) +#define ADVERTISED_Asym_Pause BIT(14) + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)))) +#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t)) +#define gfp_t unsigned +#else +typedef unsigned gfp_t; +#endif +#endif /* !RHEL4.3->RHEL5.0 */ + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) ) +#ifdef CONFIG_X86_64 +#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \ + dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir)) +#define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \ + dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir)) +#endif +#endif +#endif /* < 2.6.14 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) ) +#ifndef kfree_rcu +/* this is placed here due to a lack of rcu_barrier in previous kernels */ +#define kfree_rcu(_ptr, _offset) kfree(_ptr) +#endif /* kfree_rcu */ +#ifndef vmalloc_node +#define vmalloc_node(a,b) vmalloc(a) +#endif /* vmalloc_node*/ + +#define setup_timer(_timer, _function, _data) \ +do { \ + (_timer)->function = _function; \ + (_timer)->data = _data; \ + init_timer(_timer); \ +} while (0) +#ifndef device_can_wakeup +#define device_can_wakeup(dev) (1) +#endif +#ifndef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) do{}while(0) +#endif +#ifndef device_init_wakeup +#define device_init_wakeup(dev,val) do {} while (0) +#endif +static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2) +{ + const u16 *a = (const u16 *) addr1; + const u16 *b = (const u16 *) addr2; + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; +} +#undef compare_ether_addr +#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2) +#endif /* < 2.6.15 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) ) +#undef DEFINE_MUTEX +#define DEFINE_MUTEX(x) DECLARE_MUTEX(x) +#define mutex_lock(x) down_interruptible(x) +#define mutex_unlock(x) up(x) + +#ifndef ____cacheline_internodealigned_in_smp +#ifdef CONFIG_SMP +#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp +#else +#define ____cacheline_internodealigned_in_smp +#endif /* CONFIG_SMP */ +#endif /* ____cacheline_internodealigned_in_smp */ +#undef HAVE_PCI_ERS +#else /* 2.6.16 and above */ +#undef HAVE_PCI_ERS +#define HAVE_PCI_ERS +#if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) ) +#ifdef device_can_wakeup +#undef device_can_wakeup +#endif /* device_can_wakeup */ +#define device_can_wakeup(dev) 1 +#endif /* SLE_VERSION(10,4,0) */ +#endif /* < 2.6.16 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) ) +#ifndef dev_notice +#define dev_notice(dev, fmt, args...) \ + dev_printk(KERN_NOTICE, dev, fmt, ## args) +#endif + +#ifndef first_online_node +#define first_online_node 0 +#endif +#ifndef NET_SKB_PAD +#define NET_SKB_PAD 16 +#endif +#endif /* < 2.6.17 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) ) + +#ifndef IRQ_HANDLED +#define irqreturn_t void +#define IRQ_HANDLED +#define IRQ_NONE +#endif + +#ifndef IRQF_PROBE_SHARED +#ifdef SA_PROBEIRQ +#define IRQF_PROBE_SHARED SA_PROBEIRQ +#else +#define IRQF_PROBE_SHARED 0 +#endif +#endif + +#ifndef IRQF_SHARED +#define IRQF_SHARED SA_SHIRQ +#endif + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#ifndef skb_is_gso +#ifdef NETIF_F_TSO +#define skb_is_gso _kc_skb_is_gso +static inline int _kc_skb_is_gso(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_size; +} +#else +#define skb_is_gso(a) 0 +#endif +#endif + +#ifndef resource_size_t +#define resource_size_t unsigned long +#endif + +#ifdef skb_pad +#undef skb_pad +#endif +#define skb_pad(x,y) _kc_skb_pad(x, y) +int _kc_skb_pad(struct sk_buff *skb, int pad); +#ifdef skb_padto +#undef skb_padto +#endif +#define skb_padto(x,y) _kc_skb_padto(x, y) +static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + if(likely(size >= len)) + return 0; + return _kc_skb_pad(skb, len - size); +} + +#ifndef DECLARE_PCI_UNMAP_ADDR +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ + dma_addr_t ADDR_NAME +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ + u32 LEN_NAME +#define pci_unmap_addr(PTR, ADDR_NAME) \ + ((PTR)->ADDR_NAME) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ + (((PTR)->ADDR_NAME) = (VAL)) +#define pci_unmap_len(PTR, LEN_NAME) \ + ((PTR)->LEN_NAME) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ + (((PTR)->LEN_NAME) = (VAL)) +#endif /* DECLARE_PCI_UNMAP_ADDR */ +#endif /* < 2.6.18 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) +enum pcie_link_width { + PCIE_LNK_WIDTH_RESRV = 0x00, + PCIE_LNK_X1 = 0x01, + PCIE_LNK_X2 = 0x02, + PCIE_LNK_X4 = 0x04, + PCIE_LNK_X8 = 0x08, + PCIE_LNK_X12 = 0x0C, + PCIE_LNK_X16 = 0x10, + PCIE_LNK_X32 = 0x20, + PCIE_LNK_WIDTH_UNKNOWN = 0xFF, +}; + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0))) +#define i_private u.generic_ip +#endif /* >= RHEL 5.0 */ + +#ifndef DIV_ROUND_UP +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) +#endif +#ifndef __ALIGN_MASK +#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#endif +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) ) +#if (!((RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0)))))) +typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *); +#endif +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +#undef CONFIG_INET_LRO +#undef CONFIG_INET_LRO_MODULE +#endif +typedef irqreturn_t (*new_handler_t)(int, void*); +static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) +#else /* 2.4.x */ +typedef void (*irq_handler_t)(int, void*, struct pt_regs *); +typedef void (*new_handler_t)(int, void*); +static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) +#endif /* >= 2.5.x */ +{ + irq_handler_t new_handler = (irq_handler_t) handler; + return request_irq(irq, new_handler, flags, devname, dev_id); +} + +#undef request_irq +#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id)) + +#define irq_handler_t new_handler_t + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) ) +#ifndef skb_checksum_help +static inline int __kc_skb_checksum_help(struct sk_buff *skb) +{ + return skb_checksum_help(skb, 0); +} +#define skb_checksum_help(skb) __kc_skb_checksum_help((skb)) +#endif +#endif /* < 2.6.19 && >= 2.6.11 */ + +/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) +#define PCIE_CONFIG_SPACE_LEN 256 +#define PCI_CONFIG_SPACE_LEN 64 +#define PCIE_LINK_STATUS 0x12 +#define pci_config_space_ich8lan() do {} while(0) +#undef pci_save_state +int _kc_pci_save_state(struct pci_dev *); +#define pci_save_state(pdev) _kc_pci_save_state(pdev) +#undef pci_restore_state +void _kc_pci_restore_state(struct pci_dev *); +#define pci_restore_state(pdev) _kc_pci_restore_state(pdev) +#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ + +#ifdef HAVE_PCI_ERS +#undef free_netdev +void _kc_free_netdev(struct net_device *); +#define free_netdev(netdev) _kc_free_netdev(netdev) +#endif +static inline int pci_enable_pcie_error_reporting(struct pci_dev __always_unused *dev) +{ + return 0; +} +#define pci_disable_pcie_error_reporting(dev) do {} while (0) +#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0) + +void *_kc_kmemdup(const void *src, size_t len, unsigned gfp); +#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp) +#ifndef bool +#define bool _Bool +#define true 1 +#define false 0 +#endif +#else /* 2.6.19 */ +#include +#include + +#define NEW_SKB_CSUM_HELP +#endif /* < 2.6.19 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) ) +#undef INIT_WORK +#define INIT_WORK(_work, _func) \ +do { \ + INIT_LIST_HEAD(&(_work)->entry); \ + (_work)->pending = 0; \ + (_work)->func = (void (*)(void *))_func; \ + (_work)->data = _work; \ + init_timer(&(_work)->timer); \ +} while (0) +#endif + +#ifndef PCI_VDEVICE +#define PCI_VDEVICE(ven, dev) \ + PCI_VENDOR_ID_##ven, (dev), \ + PCI_ANY_ID, PCI_ANY_ID, 0, 0 +#endif + +#ifndef PCI_VENDOR_ID_INTEL +#define PCI_VENDOR_ID_INTEL 0x8086 +#endif + +#ifndef round_jiffies +#define round_jiffies(x) x +#endif + +#define csum_offset csum + +#define HAVE_EARLY_VMALLOC_NODE +#define dev_to_node(dev) -1 +#undef set_dev_node +/* remove compiler warning with b=b, for unused variable */ +#define set_dev_node(a, b) do { (b) = (b); } while(0) + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +typedef __u16 __bitwise __sum16; +typedef __u32 __bitwise __wsum; +#endif + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) +static inline __wsum csum_unfold(__sum16 n) +{ + return (__force __wsum)n; +} +#endif + +#else /* < 2.6.20 */ +#define HAVE_DEVICE_NUMA_NODE +#endif /* < 2.6.20 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +#define to_net_dev(class) container_of(class, struct net_device, class_dev) +#define NETDEV_CLASS_DEV +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5))) +#define vlan_group_get_device(vg, id) (vg->vlan_devices[id]) +#define vlan_group_set_device(vg, id, dev) \ + do { \ + if (vg) vg->vlan_devices[id] = dev; \ + } while (0) +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */ +#define pci_channel_offline(pdev) (pdev->error_state && \ + pdev->error_state != pci_channel_io_normal) +#define pci_request_selected_regions(pdev, bars, name) \ + pci_request_regions(pdev, name) +#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev); + +#ifndef __aligned +#define __aligned(x) __attribute__((aligned(x))) +#endif + +struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev); +#define netdev_to_dev(netdev) \ + pci_dev_to_dev(_kc_netdev_to_pdev(netdev)) +#define devm_kzalloc(dev, size, flags) kzalloc(size, flags) +#define devm_kfree(dev, p) kfree(p) +#else /* 2.6.21 */ +static inline struct device *netdev_to_dev(struct net_device *netdev) +{ + return &netdev->dev; +} + +#endif /* < 2.6.21 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +#define tcp_hdr(skb) (skb->h.th) +#define tcp_hdrlen(skb) (skb->h.th->doff << 2) +#define skb_transport_offset(skb) (skb->h.raw - skb->data) +#define skb_transport_header(skb) (skb->h.raw) +#define ipv6_hdr(skb) (skb->nh.ipv6h) +#define ip_hdr(skb) (skb->nh.iph) +#define skb_network_offset(skb) (skb->nh.raw - skb->data) +#define skb_network_header(skb) (skb->nh.raw) +#define skb_tail_pointer(skb) skb->tail +#define skb_reset_tail_pointer(skb) \ + do { \ + skb->tail = skb->data; \ + } while (0) +#define skb_set_tail_pointer(skb, offset) \ + do { \ + skb->tail = skb->data + offset; \ + } while (0) +#define skb_copy_to_linear_data(skb, from, len) \ + memcpy(skb->data, from, len) +#define skb_copy_to_linear_data_offset(skb, offset, from, len) \ + memcpy(skb->data + offset, from, len) +#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw) +#define pci_register_driver pci_module_init +#define skb_mac_header(skb) skb->mac.raw + +#ifdef NETIF_F_MULTI_QUEUE +#ifndef alloc_etherdev_mq +#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a) +#endif +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef ETH_FCS_LEN +#define ETH_FCS_LEN 4 +#endif +#define cancel_work_sync(x) flush_scheduled_work() +#ifndef udp_hdr +#define udp_hdr _udp_hdr +static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) +{ + return (struct udphdr *)skb_transport_header(skb); +} +#endif + +#ifdef cpu_to_be16 +#undef cpu_to_be16 +#endif +#define cpu_to_be16(x) __constant_htons(x) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1))) +enum { + DUMP_PREFIX_NONE, + DUMP_PREFIX_ADDRESS, + DUMP_PREFIX_OFFSET +}; +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */ +#ifndef hex_asc +#define hex_asc(x) "0123456789abcdef"[x] +#endif +#include +void _kc_print_hex_dump(const char *level, const char *prefix_str, + int prefix_type, int rowsize, int groupsize, + const void *buf, size_t len, bool ascii); +#define print_hex_dump(lvl, s, t, r, g, b, l, a) \ + _kc_print_hex_dump(lvl, s, t, r, g, b, l, a) +#ifndef ADVERTISED_2500baseX_Full +#define ADVERTISED_2500baseX_Full BIT(15) +#endif +#ifndef SUPPORTED_2500baseX_Full +#define SUPPORTED_2500baseX_Full BIT(15) +#endif + +#ifndef ETH_P_PAUSE +#define ETH_P_PAUSE 0x8808 +#endif + +static inline int compound_order(struct page *page) +{ + return 0; +} + +#define __must_be_array(a) 0 + +#ifndef SKB_WITH_OVERHEAD +#define SKB_WITH_OVERHEAD(X) \ + ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#endif +#else /* 2.6.22 */ +#define ETH_TYPE_TRANS_SETS_DEV +#define HAVE_NETDEV_STATS_IN_NETDEV +#endif /* < 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) ) +#endif /* > 2.6.22 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ) +#define netif_subqueue_stopped(_a, _b) 0 +#ifndef PTR_ALIGN +#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) +#endif + +#ifndef CONFIG_PM_SLEEP +#define CONFIG_PM_SLEEP CONFIG_PM +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) ) +#define HAVE_ETHTOOL_GET_PERM_ADDR +#endif /* 2.6.14 through 2.6.22 */ + +static inline int __kc_skb_cow_head(struct sk_buff *skb, unsigned int headroom) +{ + int delta = 0; + + if (headroom > (skb->data - skb->head)) + delta = headroom - (skb->data - skb->head); + + if (delta || skb_header_cloned(skb)) + return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, + GFP_ATOMIC); + return 0; +} +#define skb_cow_head(s, h) __kc_skb_cow_head((s), (h)) +#endif /* < 2.6.23 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +#ifndef ETH_FLAG_LRO +#define ETH_FLAG_LRO NETIF_F_LRO +#endif + +#ifndef ACCESS_ONCE +#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) +#endif + +/* if GRO is supported then the napi struct must already exist */ +#ifndef NETIF_F_GRO +/* NAPI API changes in 2.6.24 break everything */ +struct napi_struct { + /* used to look up the real NAPI polling routine */ + int (*poll)(struct napi_struct *, int); + struct net_device *dev; + int weight; +}; +#endif + +#ifdef NAPI +int __kc_adapter_clean(struct net_device *, int *); +/* The following definitions are multi-queue aware, and thus we have a driver + * define list which determines which drivers support multiple queues, and + * thus need these stronger defines. If a driver does not support multi-queue + * functionality, you don't need to add it to this list. + */ +struct net_device *napi_to_poll_dev(const struct napi_struct *napi); + +static inline void __kc_mq_netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + struct net_device *poll_dev = napi_to_poll_dev(napi); + poll_dev->poll = __kc_adapter_clean; + poll_dev->priv = napi; + poll_dev->weight = weight; + set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); + set_bit(__LINK_STATE_START, &poll_dev->state); + dev_hold(poll_dev); + napi->poll = poll; + napi->weight = weight; + napi->dev = dev; +} +#define netif_napi_add __kc_mq_netif_napi_add + +static inline void __kc_mq_netif_napi_del(struct napi_struct *napi) +{ + struct net_device *poll_dev = napi_to_poll_dev(napi); + WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); + dev_put(poll_dev); + memset(poll_dev, 0, sizeof(struct net_device)); +} + +#define netif_napi_del __kc_mq_netif_napi_del + +static inline bool __kc_mq_napi_schedule_prep(struct napi_struct *napi) +{ + return netif_running(napi->dev) && + netif_rx_schedule_prep(napi_to_poll_dev(napi)); +} +#define napi_schedule_prep __kc_mq_napi_schedule_prep + +static inline void __kc_mq_napi_schedule(struct napi_struct *napi) +{ + if (napi_schedule_prep(napi)) + __netif_rx_schedule(napi_to_poll_dev(napi)); +} +#define napi_schedule __kc_mq_napi_schedule + +#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi)) +#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi)) +#ifdef CONFIG_SMP +static inline void napi_synchronize(const struct napi_struct *n) +{ + struct net_device *dev = napi_to_poll_dev(n); + + while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) { + /* No hurry. */ + msleep(1); + } +} +#else +#define napi_synchronize(n) barrier() +#endif /* CONFIG_SMP */ +#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi)) +static inline void _kc_napi_complete(struct napi_struct *napi) +{ +#ifdef NETIF_F_GRO + napi_gro_flush(napi); +#endif + netif_rx_complete(napi_to_poll_dev(napi)); +} +#define napi_complete _kc_napi_complete +#else /* NAPI */ + +/* The following definitions are only used if we don't support NAPI at all. */ + +static inline __kc_netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + dev->poll = poll; + dev->weight = weight; + napi->poll = poll; + napi->weight = weight; + napi->dev = dev; +} +#define netif_napi_del(_a) do {} while (0) +#endif /* NAPI */ + +#undef dev_get_by_name +#define dev_get_by_name(_a, _b) dev_get_by_name(_b) +#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b) +#ifndef DMA_BIT_MASK +#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1)) +#endif + +#ifdef NETIF_F_TSO6 +#define skb_is_gso_v6 _kc_skb_is_gso_v6 +static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; +} +#endif /* NETIF_F_TSO6 */ + +#ifndef KERN_CONT +#define KERN_CONT "" +#endif +#ifndef pr_err +#define pr_err(fmt, arg...) \ + printk(KERN_ERR fmt, ##arg) +#endif + +#ifndef rounddown_pow_of_two +#define rounddown_pow_of_two(n) \ + __builtin_constant_p(n) ? ( \ + (n == 1) ? 0 : \ + (1UL << ilog2(n))) : \ + (1UL << (fls_long(n) - 1)) +#endif + +#else /* < 2.6.24 */ +#define HAVE_ETHTOOL_GET_SSET_COUNT +#define HAVE_NETDEV_NAPI_LIST +#endif /* < 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) +#define INCLUDE_PM_QOS_PARAMS_H +#include +#else /* >= 3.2.0 */ +#include +#endif /* else >= 3.2.0 */ +#endif /* > 2.6.24 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ) +#define PM_QOS_CPU_DMA_LATENCY 1 + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) ) +#include +#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY +#define pm_qos_add_requirement(pm_qos_class, name, value) \ + set_acceptable_latency(name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) \ + remove_acceptable_latency(name) +#define pm_qos_update_requirement(pm_qos_class, name, value) \ + modify_acceptable_latency(name, value) +#else +#define PM_QOS_DEFAULT_VALUE -1 +#define pm_qos_add_requirement(pm_qos_class, name, value) +#define pm_qos_remove_requirement(pm_qos_class, name) +#define pm_qos_update_requirement(pm_qos_class, name, value) { \ + if (value != PM_QOS_DEFAULT_VALUE) { \ + printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \ + pci_name(adapter->pdev)); \ + } \ +} + +#endif /* > 2.6.18 */ + +#define pci_enable_device_mem(pdev) pci_enable_device(pdev) + +#ifndef DEFINE_PCI_DEVICE_TABLE +#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[] +#endif /* DEFINE_PCI_DEVICE_TABLE */ + +#ifndef strict_strtol +#define strict_strtol(s, b, r) _kc_strict_strtol(s, b, r) +static inline int _kc_strict_strtol(const char *buf, unsigned int base, long *res) +{ + /* adapted from strict_strtoul() in 2.6.25 */ + char *tail; + long val; + size_t len; + + *res = 0; + len = strlen(buf); + if (!len) + return -EINVAL; + val = simple_strtol(buf, &tail, base); + if (tail == buf) + return -EINVAL; + if ((*tail == '\0') || + ((len == (size_t)(tail - buf) + 1) && (*tail == '\n'))) { + *res = val; + return 0; + } + + return -EINVAL; +} +#endif + +#else /* < 2.6.25 */ + +#endif /* < 2.6.25 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) +#ifndef clamp_t +#define clamp_t(type, val, min, max) ({ \ + type __val = (val); \ + type __min = (min); \ + type __max = (max); \ + __val = __val < __min ? __min : __val; \ + __val > __max ? __max : __val; }) +#endif /* clamp_t */ +#undef kzalloc_node +#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags) + +void _kc_pci_disable_link_state(struct pci_dev *dev, int state); +#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s) +#else /* < 2.6.26 */ +#define NETDEV_CAN_SET_GSO_MAX_SIZE +#ifdef HAVE_PCI_ASPM_H +#include +#endif +#define HAVE_NETDEV_VLAN_FEATURES +#ifndef PCI_EXP_LNKCAP_ASPMS +#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ +#endif /* PCI_EXP_LNKCAP_ASPMS */ +#endif /* < 2.6.26 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)speed; + /* ep->speed_hi = (__u16)(speed >> 16); */ +} +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set + +static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep) +{ + /* no speed_hi before 2.6.27, and probably no need for it yet */ + return (__u32)ep->speed; +} +#define ethtool_cmd_speed _kc_ethtool_cmd_speed + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) ) +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM)) +#define ANCIENT_PM 1 +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \ + defined(CONFIG_PM_SLEEP)) +#define NEWER_PM 1 +#endif +#if defined(ANCIENT_PM) || defined(NEWER_PM) +#undef device_set_wakeup_enable +#define device_set_wakeup_enable(dev, val) \ + do { \ + u16 pmc = 0; \ + int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \ + if (pm) { \ + pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \ + &pmc); \ + } \ + (dev)->power.can_wakeup = !!(pmc >> 11); \ + (dev)->power.should_wakeup = (val && (pmc >> 11)); \ + } while (0) +#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */ +#endif /* 2.6.15 through 2.6.27 */ +#ifndef netif_napi_del +#define netif_napi_del(_a) do {} while (0) +#ifdef NAPI +#ifdef CONFIG_NETPOLL +#undef netif_napi_del +#define netif_napi_del(_a) list_del(&(_a)->dev_list); +#endif +#endif +#endif /* netif_napi_del */ +#ifdef dma_mapping_error +#undef dma_mapping_error +#endif +#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr) + +#ifdef CONFIG_NETDEVICES_MULTIQUEUE +#define HAVE_TX_MQ +#endif + +#ifndef DMA_ATTR_WEAK_ORDERING +#define DMA_ATTR_WEAK_ORDERING 0 +#endif + +#ifdef HAVE_TX_MQ +void _kc_netif_tx_stop_all_queues(struct net_device *); +void _kc_netif_tx_wake_all_queues(struct net_device *); +void _kc_netif_tx_start_all_queues(struct net_device *); +#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a) +#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a) +#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a) +#undef netif_stop_subqueue +#define netif_stop_subqueue(_ndev,_qi) do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_stop_subqueue((_ndev), (_qi)); \ + else \ + netif_stop_queue((_ndev)); \ + } while (0) +#undef netif_start_subqueue +#define netif_start_subqueue(_ndev,_qi) do { \ + if (netif_is_multiqueue((_ndev))) \ + netif_start_subqueue((_ndev), (_qi)); \ + else \ + netif_start_queue((_ndev)); \ + } while (0) +#else /* HAVE_TX_MQ */ +#define netif_tx_stop_all_queues(a) netif_stop_queue(a) +#define netif_tx_wake_all_queues(a) netif_wake_queue(a) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) ) +#define netif_tx_start_all_queues(a) netif_start_queue(a) +#else +#define netif_tx_start_all_queues(a) do {} while (0) +#endif +#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev)) +#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev)) +#endif /* HAVE_TX_MQ */ +#ifndef NETIF_F_MULTI_QUEUE +#define NETIF_F_MULTI_QUEUE 0 +#define netif_is_multiqueue(a) 0 +#define netif_wake_subqueue(a, b) +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef __WARN_printf +void __kc_warn_slowpath(const char *file, const int line, + const char *fmt, ...) __attribute__((format(printf, 3, 4))); +#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg) +#endif /* __WARN_printf */ + +#ifndef WARN +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf(format); \ + unlikely(__ret_warn_on); \ +}) +#endif /* WARN */ +#undef HAVE_IXGBE_DEBUG_FS +#undef HAVE_IGB_DEBUG_FS +#define qdisc_reset_all_tx(a) +#else /* < 2.6.27 */ +#include +#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set +static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + ep->speed = (__u16)(speed & 0xFFFF); + ep->speed_hi = (__u16)(speed >> 16); +} +#define HAVE_TX_MQ +#define HAVE_NETDEV_SELECT_QUEUE +#ifdef CONFIG_DEBUG_FS +#define HAVE_IXGBE_DEBUG_FS +#define HAVE_IGB_DEBUG_FS +#endif /* CONFIG_DEBUG_FS */ +#endif /* < 2.6.27 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) +#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \ + pci_resource_len(pdev, bar)) +#define pci_wake_from_d3 _kc_pci_wake_from_d3 +#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep +int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable); +int _kc_pci_prepare_to_sleep(struct pci_dev *dev); +#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC) +#ifndef __skb_queue_head_init +static inline void __kc_skb_queue_head_init(struct sk_buff_head *list) +{ + list->prev = list->next = (struct sk_buff *)list; + list->qlen = 0; +} +#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q) +#endif + +#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ +#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ + +#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */ +#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */ + +#endif /* < 2.6.28 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) +#ifndef swap +#define swap(a, b) \ + do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) +#endif +#define pci_request_selected_regions_exclusive(pdev, bars, name) \ + pci_request_selected_regions(pdev, bars, name) +#ifndef CONFIG_NR_CPUS +#define CONFIG_NR_CPUS 1 +#endif /* CONFIG_NR_CPUS */ +#ifndef pcie_aspm_enabled +#define pcie_aspm_enabled() (1) +#endif /* pcie_aspm_enabled */ + +#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */ + +#ifndef PCI_EXP_LNKSTA_CLS +#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */ +#endif +#ifndef PCI_EXP_LNKSTA_NLW +#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */ +#endif + +#ifndef pci_clear_master +void _kc_pci_clear_master(struct pci_dev *dev); +#define pci_clear_master(dev) _kc_pci_clear_master(dev) +#endif + +#ifndef PCI_EXP_LNKCTL_ASPMC +#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */ +#endif + +#ifndef PCI_EXP_LNKCAP_MLW +#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */ +#endif + +#else /* < 2.6.29 */ +#ifndef HAVE_NET_DEVICE_OPS +#define HAVE_NET_DEVICE_OPS +#endif +#ifdef CONFIG_DCB +#define HAVE_PFC_MODE_ENABLE +#endif /* CONFIG_DCB */ +#endif /* < 2.6.29 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) ) +#define NO_PTP_SUPPORT +#define skb_rx_queue_recorded(a) false +#define skb_get_rx_queue(a) 0 +#define skb_record_rx_queue(a, b) do {} while (0) +#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues) +#ifndef CONFIG_PCI_IOV +#undef pci_enable_sriov +#define pci_enable_sriov(a, b) -ENOTSUPP +#undef pci_disable_sriov +#define pci_disable_sriov(a) do {} while (0) +#endif /* CONFIG_PCI_IOV */ +#ifndef pr_cont +#define pr_cont(fmt, ...) \ + printk(KERN_CONT fmt, ##__VA_ARGS__) +#endif /* pr_cont */ +static inline void _kc_synchronize_irq(unsigned int a) +{ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) + synchronize_irq(); +#else /* < 2.5.28 */ + synchronize_irq(a); +#endif /* < 2.5.28 */ +} +#undef synchronize_irq +#define synchronize_irq(a) _kc_synchronize_irq(a) + +#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ + +#ifdef nr_cpus_node +#undef nr_cpus_node +#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node)) +#endif + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,5)) +#define HAVE_PCI_DEV_IS_VIRTFN_BIT +#endif /* RHEL >= 5.5 */ + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,5))) +static inline bool pci_is_root_bus(struct pci_bus *pbus) +{ + return !(pbus->parent); +} +#endif + +#else /* < 2.6.30 */ +#define HAVE_ASPM_QUIRKS +#define HAVE_PCI_DEV_IS_VIRTFN_BIT +#endif /* < 2.6.30 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ) +#define ETH_P_1588 0x88F7 +#define ETH_P_FIP 0x8914 +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc_count) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(uclist, dev) \ + for (uclist = dev->uc_list; uclist; uclist = uclist->next) +#endif +#ifndef PORT_OTHER +#define PORT_OTHER 0xff +#endif +#ifndef MDIO_PHY_ID_PRTAD +#define MDIO_PHY_ID_PRTAD 0x03e0 +#endif +#ifndef MDIO_PHY_ID_DEVAD +#define MDIO_PHY_ID_DEVAD 0x001f +#endif +#ifndef skb_dst +#define skb_dst(s) ((s)->dst) +#endif + +#ifndef SUPPORTED_1000baseKX_Full +#define SUPPORTED_1000baseKX_Full BIT(17) +#endif +#ifndef SUPPORTED_10000baseKX4_Full +#define SUPPORTED_10000baseKX4_Full BIT(18) +#endif +#ifndef SUPPORTED_10000baseKR_Full +#define SUPPORTED_10000baseKR_Full BIT(19) +#endif + +#ifndef ADVERTISED_1000baseKX_Full +#define ADVERTISED_1000baseKX_Full BIT(17) +#endif +#ifndef ADVERTISED_10000baseKX4_Full +#define ADVERTISED_10000baseKX4_Full BIT(18) +#endif +#ifndef ADVERTISED_10000baseKR_Full +#define ADVERTISED_10000baseKR_Full BIT(19) +#endif + +static inline unsigned long dev_trans_start(struct net_device *dev) +{ + return dev->trans_start; +} +#else /* < 2.6.31 */ +#ifndef HAVE_NETDEV_STORAGE_ADDRESS +#define HAVE_NETDEV_STORAGE_ADDRESS +#endif +#ifndef HAVE_NETDEV_HW_ADDR +#define HAVE_NETDEV_HW_ADDR +#endif +#ifndef HAVE_TRANS_START_IN_QUEUE +#define HAVE_TRANS_START_IN_QUEUE +#endif +#ifndef HAVE_INCLUDE_LINUX_MDIO_H +#define HAVE_INCLUDE_LINUX_MDIO_H +#endif +#include +#endif /* < 2.6.31 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) ) +#undef netdev_tx_t +#define netdev_tx_t int +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef NETIF_F_FCOE_MTU +#define NETIF_F_FCOE_MTU BIT(26) +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline int _kc_pm_runtime_get_sync() +{ + return 1; +} +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync() +#else /* 2.6.0 => 2.6.32 */ +static inline int _kc_pm_runtime_get_sync(struct device __always_unused *dev) +{ + return 1; +} +#ifndef pm_runtime_get_sync +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync(dev) +#endif +#endif /* 2.6.0 => 2.6.32 */ +#ifndef pm_runtime_put +#define pm_runtime_put(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_sync +#define pm_runtime_put_sync(dev) do {} while (0) +#endif +#ifndef pm_runtime_resume +#define pm_runtime_resume(dev) do {} while (0) +#endif +#ifndef pm_schedule_suspend +#define pm_schedule_suspend(dev, t) do {} while (0) +#endif +#ifndef pm_runtime_set_suspended +#define pm_runtime_set_suspended(dev) do {} while (0) +#endif +#ifndef pm_runtime_disable +#define pm_runtime_disable(dev) do {} while (0) +#endif +#ifndef pm_runtime_put_noidle +#define pm_runtime_put_noidle(dev) do {} while (0) +#endif +#ifndef pm_runtime_set_active +#define pm_runtime_set_active(dev) do {} while (0) +#endif +#ifndef pm_runtime_enable +#define pm_runtime_enable(dev) do {} while (0) +#endif +#ifndef pm_runtime_get_noresume +#define pm_runtime_get_noresume(dev) do {} while (0) +#endif +#else /* < 2.6.32 */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_NET_DEVICE_EXTENDED +#endif /* RHEL >= 6.2 && RHEL < 7.0 */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_NET_DEVICE_OPS_EXT +#define HAVE_NDO_SET_FEATURES +#endif /* RHEL >= 6.6 && RHEL < 7.0 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE +#define HAVE_NETDEV_OPS_FCOE_ENABLE +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_OPS_GETAPP +#define HAVE_DCBNL_OPS_GETAPP +#endif +#endif /* CONFIG_DCB */ +#include +/* IOV bad DMA target work arounds require at least this kernel rev support */ +#define HAVE_PCIE_TYPE +#endif /* < 2.6.32 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) +#ifndef pci_pcie_cap +#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP) +#endif +#ifndef IPV4_FLOW +#define IPV4_FLOW 0x10 +#endif /* IPV4_FLOW */ +#ifndef IPV6_FLOW +#define IPV6_FLOW 0x11 +#endif /* IPV6_FLOW */ +/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */ +#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) ) +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* RHEL6 or SLES11 SP1 */ +#ifndef __percpu +#define __percpu +#endif /* __percpu */ + +#ifndef PORT_DA +#define PORT_DA PORT_OTHER +#endif /* PORT_DA */ +#ifndef PORT_NONE +#define PORT_NONE PORT_OTHER +#endif + +#if ((RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))) +#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE) +#undef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME +#undef DEFINE_DMA_UNMAP_LEN +#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME +#undef dma_unmap_addr +#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) +#undef dma_unmap_addr_set +#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) +#undef dma_unmap_len +#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) +#undef dma_unmap_len_set +#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) +#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */ +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,8)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))) || \ + ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))))) +static inline bool pci_is_pcie(struct pci_dev *dev) +{ + return !!pci_pcie_cap(dev); +} +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)))) +#define sk_tx_queue_get(_sk) (-1) +#define sk_tx_queue_set(_sk, _tx_queue) do {} while(0) +#endif /* !(RHEL >= 6.2) */ + +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_ETHTOOL_SET_PHYS_ID +#define HAVE_ETHTOOL_GET_TS_INFO +#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,5)) +#define HAVE_ETHTOOL_GSRSSH +#define HAVE_RHEL6_SRIOV_CONFIGURE +#define HAVE_RXFH_NONCONST +#endif /* RHEL > 6.5 */ +#endif /* RHEL >= 6.4 && RHEL < 7.0 */ + +#else /* < 2.6.33 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN +#define HAVE_NETDEV_OPS_FCOE_GETWWN +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#endif /* < 2.6.33 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +#ifndef pci_num_vf +#define pci_num_vf(pdev) _kc_pci_num_vf(pdev) +int _kc_pci_num_vf(struct pci_dev *dev); +#endif +#endif /* RHEL_RELEASE_CODE */ + +#ifndef dev_is_pci +#define dev_is_pci(d) ((d)->bus == &pci_bus_type) +#endif + +#ifndef ETH_FLAG_NTUPLE +#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE +#endif + +#ifndef netdev_mc_count +#define netdev_mc_count(dev) ((dev)->mc_count) +#endif +#ifndef netdev_mc_empty +#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) +#endif +#ifndef netdev_for_each_mc_addr +#define netdev_for_each_mc_addr(mclist, dev) \ + for (mclist = dev->mc_list; mclist; mclist = mclist->next) +#endif +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc.count) +#endif +#ifndef netdev_uc_empty +#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(ha, dev) \ + list_for_each_entry(ha, &dev->uc.list, list) +#endif +#ifndef dma_set_coherent_mask +#define dma_set_coherent_mask(dev,mask) \ + pci_set_consistent_dma_mask(to_pci_dev(dev),(mask)) +#endif +#ifndef pci_dev_run_wake +#define pci_dev_run_wake(pdev) (0) +#endif + +/* netdev logging taken from include/linux/netdevice.h */ +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (dev->reg_state != NETREG_REGISTERED) + return "(unregistered net_device)"; + return dev->name; +} +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#undef netdev_printk +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + printk(level "%s: " format, pci_name(pdev), ##args); \ +} while(0) +#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) +#define netdev_printk(level, netdev, format, args...) \ +do { \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + struct device *dev = pci_dev_to_dev(pdev); \ + dev_printk(level, dev, "%s: " format, \ + netdev_name(netdev), ##args); \ +} while(0) +#else /* 2.6.21 => 2.6.34 */ +#define netdev_printk(level, netdev, format, args...) \ + dev_printk(level, (netdev)->dev.parent, \ + "%s: " format, \ + netdev_name(netdev), ##args) +#endif /* <2.6.0 <2.6.21 <2.6.34 */ +#undef netdev_emerg +#define netdev_emerg(dev, format, args...) \ + netdev_printk(KERN_EMERG, dev, format, ##args) +#undef netdev_alert +#define netdev_alert(dev, format, args...) \ + netdev_printk(KERN_ALERT, dev, format, ##args) +#undef netdev_crit +#define netdev_crit(dev, format, args...) \ + netdev_printk(KERN_CRIT, dev, format, ##args) +#undef netdev_err +#define netdev_err(dev, format, args...) \ + netdev_printk(KERN_ERR, dev, format, ##args) +#undef netdev_warn +#define netdev_warn(dev, format, args...) \ + netdev_printk(KERN_WARNING, dev, format, ##args) +#undef netdev_notice +#define netdev_notice(dev, format, args...) \ + netdev_printk(KERN_NOTICE, dev, format, ##args) +#undef netdev_info +#define netdev_info(dev, format, args...) \ + netdev_printk(KERN_INFO, dev, format, ##args) +#undef netdev_dbg +#if defined(DEBUG) +#define netdev_dbg(__dev, format, args...) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args) +#elif defined(CONFIG_DYNAMIC_DEBUG) +#define netdev_dbg(__dev, format, args...) \ +do { \ + dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \ + netdev_name(__dev), ##args); \ +} while (0) +#else /* DEBUG */ +#define netdev_dbg(__dev, format, args...) \ +({ \ + if (0) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args); \ + 0; \ +}) +#endif /* DEBUG */ + +#undef netif_printk +#define netif_printk(priv, type, level, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_printk(level, (dev), fmt, ##args); \ +} while (0) + +#undef netif_emerg +#define netif_emerg(priv, type, dev, fmt, args...) \ + netif_level(emerg, priv, type, dev, fmt, ##args) +#undef netif_alert +#define netif_alert(priv, type, dev, fmt, args...) \ + netif_level(alert, priv, type, dev, fmt, ##args) +#undef netif_crit +#define netif_crit(priv, type, dev, fmt, args...) \ + netif_level(crit, priv, type, dev, fmt, ##args) +#undef netif_err +#define netif_err(priv, type, dev, fmt, args...) \ + netif_level(err, priv, type, dev, fmt, ##args) +#undef netif_warn +#define netif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, fmt, ##args) +#undef netif_notice +#define netif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, fmt, ##args) +#undef netif_info +#define netif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, fmt, ##args) +#undef netif_dbg +#define netif_dbg(priv, type, dev, fmt, args...) \ + netif_level(dbg, priv, type, dev, fmt, ##args) + +#ifdef SET_SYSTEM_SLEEP_PM_OPS +#define HAVE_SYSTEM_SLEEP_PM_OPS +#endif + +#ifndef for_each_set_bit +#define for_each_set_bit(bit, addr, size) \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit */ + +#ifndef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN +#define dma_unmap_addr pci_unmap_addr +#define dma_unmap_addr_set pci_unmap_addr_set +#define dma_unmap_len pci_unmap_len +#define dma_unmap_len_set pci_unmap_len_set +#endif /* DEFINE_DMA_UNMAP_ADDR */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,3)) +#ifdef IGB_HWMON +#ifdef CONFIG_DEBUG_LOCK_ALLOC +#define sysfs_attr_init(attr) \ + do { \ + static struct lock_class_key __key; \ + (attr)->key = &__key; \ + } while (0) +#else +#define sysfs_attr_init(attr) do {} while (0) +#endif /* CONFIG_DEBUG_LOCK_ALLOC */ +#endif /* IGB_HWMON */ +#endif /* RHEL_RELEASE_CODE */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline bool _kc_pm_runtime_suspended() +{ + return false; +} +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended() +#else /* 2.6.0 => 2.6.34 */ +static inline bool _kc_pm_runtime_suspended(struct device __always_unused *dev) +{ + return false; +} +#ifndef pm_runtime_suspended +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended(dev) +#endif +#endif /* 2.6.0 => 2.6.34 */ + +#ifndef pci_bus_speed +/* override pci_bus_speed introduced in 2.6.19 with an expanded enum type */ +enum _kc_pci_bus_speed { + _KC_PCIE_SPEED_2_5GT = 0x14, + _KC_PCIE_SPEED_5_0GT = 0x15, + _KC_PCIE_SPEED_8_0GT = 0x16, + _KC_PCI_SPEED_UNKNOWN = 0xff, +}; +#define pci_bus_speed _kc_pci_bus_speed +#define PCIE_SPEED_2_5GT _KC_PCIE_SPEED_2_5GT +#define PCIE_SPEED_5_0GT _KC_PCIE_SPEED_5_0GT +#define PCIE_SPEED_8_0GT _KC_PCIE_SPEED_8_0GT +#define PCI_SPEED_UNKNOWN _KC_PCI_SPEED_UNKNOWN +#endif /* pci_bus_speed */ + +#else /* < 2.6.34 */ +#define HAVE_SYSTEM_SLEEP_PM_OPS +#ifndef HAVE_SET_RX_MODE +#define HAVE_SET_RX_MODE +#endif + +#endif /* < 2.6.34 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count); +#define simple_write_to_buffer _kc_simple_write_to_buffer + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4))) +static inline struct pci_dev *pci_physfn(struct pci_dev *dev) +{ +#ifdef HAVE_PCI_DEV_IS_VIRTFN_BIT +#ifdef CONFIG_PCI_IOV + if (dev->is_virtfn) + dev = dev->physfn; +#endif /* CONFIG_PCI_IOV */ +#endif /* HAVE_PCI_DEV_IS_VIRTFN_BIT */ + return dev; +} +#endif /* ! RHEL >= 6.4 */ + +#ifndef PCI_EXP_LNKSTA_NLW_SHIFT +#define PCI_EXP_LNKSTA_NLW_SHIFT 4 +#endif + +#ifndef numa_node_id +#define numa_node_id() 0 +#endif +#ifndef numa_mem_id +#define numa_mem_id numa_node_id +#endif +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) +#ifdef HAVE_TX_MQ +#include +#ifndef CONFIG_NETDEVICES_MULTIQUEUE +int _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int); +#else /* CONFIG_NETDEVICES_MULTI_QUEUE */ +static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + dev->egress_subqueue_count = txq; + return 0; +} +#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */ +#else /* HAVE_TX_MQ */ +static inline int _kc_netif_set_real_num_tx_queues(struct net_device __always_unused *dev, + unsigned int __always_unused txq) +{ + return 0; +} +#endif /* HAVE_TX_MQ */ +#define netif_set_real_num_tx_queues(dev, txq) \ + _kc_netif_set_real_num_tx_queues(dev, txq) +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ +#ifndef ETH_FLAG_RXHASH +#define ETH_FLAG_RXHASH (1<<28) +#endif /* ETH_FLAG_RXHASH */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) +#define HAVE_IRQ_AFFINITY_HINT +#endif +struct device_node; +#else /* < 2.6.35 */ +#define HAVE_STRUCT_DEVICE_OF_NODE +#define HAVE_PM_QOS_REQUEST_LIST +#define HAVE_IRQ_AFFINITY_HINT +#include +#endif /* < 2.6.35 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) +int _kc_ethtool_op_set_flags(struct net_device *, u32, u32); +#define ethtool_op_set_flags _kc_ethtool_op_set_flags +u32 _kc_ethtool_op_get_flags(struct net_device *); +#define ethtool_op_get_flags _kc_ethtool_op_get_flags + +enum { + WQ_UNBOUND = 0, + WQ_RESCUER = 0, +}; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#ifdef NET_IP_ALIGN +#undef NET_IP_ALIGN +#endif +#define NET_IP_ALIGN 0 +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + +#ifdef NET_SKB_PAD +#undef NET_SKB_PAD +#endif + +#if (L1_CACHE_BYTES > 32) +#define NET_SKB_PAD L1_CACHE_BYTES +#else +#define NET_SKB_PAD 32 +#endif + +static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev, + unsigned int length) +{ + struct sk_buff *skb; + + skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC); + if (skb) { +#if (NET_IP_ALIGN + NET_SKB_PAD) + skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); +#endif + skb->dev = dev; + } + return skb; +} + +#ifdef netdev_alloc_skb_ip_align +#undef netdev_alloc_skb_ip_align +#endif +#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l) + +#undef netif_level +#define netif_level(level, priv, type, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_##level(dev, fmt, ##args); \ +} while (0) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3))) +#undef usleep_range +#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) +#endif + +#define u64_stats_update_begin(a) do { } while(0) +#define u64_stats_update_end(a) do { } while(0) +#define u64_stats_fetch_begin(a) do { } while(0) +#define u64_stats_fetch_retry_bh(a,b) (0) +#define u64_stats_fetch_begin_bh(a) (0) + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) +#define HAVE_8021P_SUPPORT +#endif + +/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(11,2,0))) +static inline void skb_tx_timestamp(struct sk_buff __always_unused *skb) +{ + return; +} +#endif + +#else /* < 2.6.36 */ + +#define HAVE_PM_QOS_REQUEST_ACTIVE +#define HAVE_8021P_SUPPORT +#define HAVE_NDO_GET_STATS64 +#endif /* < 2.6.36 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) ) +#define HAVE_NON_CONST_PCI_DRIVER_NAME +#ifndef netif_set_real_num_tx_queues +static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + netif_set_real_num_tx_queues(dev, txq); + return 0; +} +#define netif_set_real_num_tx_queues(dev, txq) \ + _kc_netif_set_real_num_tx_queues(dev, txq) +#endif +#ifndef netif_set_real_num_rx_queues +static inline int __kc_netif_set_real_num_rx_queues(struct net_device __always_unused *dev, + unsigned int __always_unused rxq) +{ + return 0; +} +#define netif_set_real_num_rx_queues(dev, rxq) \ + __kc_netif_set_real_num_rx_queues((dev), (rxq)) +#endif +#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR +#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) +#endif +#ifndef VLAN_N_VID +#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN +#endif /* VLAN_N_VID */ +#ifndef ETH_FLAG_TXVLAN +#define ETH_FLAG_TXVLAN BIT(7) +#endif /* ETH_FLAG_TXVLAN */ +#ifndef ETH_FLAG_RXVLAN +#define ETH_FLAG_RXVLAN BIT(8) +#endif /* ETH_FLAG_RXVLAN */ + +#define WQ_MEM_RECLAIM WQ_RESCUER + +static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb) +{ + WARN_ON(skb->ip_summed != CHECKSUM_NONE); +} +#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb) + +static inline void *_kc_vzalloc_node(unsigned long size, int node) +{ + void *addr = vmalloc_node(size, node); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node) + +static inline void *_kc_vzalloc(unsigned long size) +{ + void *addr = vmalloc(size); + if (addr) + memset(addr, 0, size); + return addr; +} +#define vzalloc(_size) _kc_vzalloc(_size) + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,7)) || \ + (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,0))) +static inline __be16 vlan_get_protocol(const struct sk_buff *skb) +{ + if (vlan_tx_tag_present(skb) || + skb->protocol != cpu_to_be16(ETH_P_8021Q)) + return skb->protocol; + + if (skb_headlen(skb) < sizeof(struct vlan_ethhdr)) + return 0; + + return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto; +} +#endif /* !RHEL5.7+ || RHEL6.0 */ + +#ifdef HAVE_HW_TIME_STAMP +#define SKBTX_HW_TSTAMP BIT(0) +#define SKBTX_IN_PROGRESS BIT(2) +#define SKB_SHARED_TX_IS_UNION +#endif + +#ifndef device_wakeup_enable +#define device_wakeup_enable(dev) device_set_wakeup_enable(dev, true) +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) ) +#ifndef HAVE_VLAN_RX_REGISTER +#define HAVE_VLAN_RX_REGISTER +#endif +#endif /* > 2.4.18 */ +#endif /* < 2.6.37 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) +#define skb_checksum_start_offset(skb) skb_transport_offset(skb) +#else /* 2.6.22 -> 2.6.37 */ +static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb) +{ + return skb->csum_start - skb_headroom(skb); +} +#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb) +#endif /* 2.6.22 -> 2.6.37 */ +#if IS_ENABLED(CONFIG_DCB) +#ifndef IEEE_8021QAZ_MAX_TCS +#define IEEE_8021QAZ_MAX_TCS 8 +#endif +#ifndef DCB_CAP_DCBX_HOST +#define DCB_CAP_DCBX_HOST 0x01 +#endif +#ifndef DCB_CAP_DCBX_LLD_MANAGED +#define DCB_CAP_DCBX_LLD_MANAGED 0x02 +#endif +#ifndef DCB_CAP_DCBX_VER_CEE +#define DCB_CAP_DCBX_VER_CEE 0x04 +#endif +#ifndef DCB_CAP_DCBX_VER_IEEE +#define DCB_CAP_DCBX_VER_IEEE 0x08 +#endif +#ifndef DCB_CAP_DCBX_STATIC +#define DCB_CAP_DCBX_STATIC 0x10 +#endif +#endif /* CONFIG_DCB */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) +#define CONFIG_XPS +#endif /* RHEL_RELEASE_VERSION(6,2) */ +#endif /* < 2.6.38 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) +#ifndef TC_BITMASK +#define TC_BITMASK 15 +#endif +#ifndef NETIF_F_RXCSUM +#define NETIF_F_RXCSUM BIT(29) +#endif +#ifndef skb_queue_reverse_walk_safe +#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ + for (skb = (queue)->prev, tmp = skb->prev; \ + skb != (struct sk_buff *)(queue); \ + skb = tmp, tmp = skb->prev) +#endif +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef FCOE_MTU +#define FCOE_MTU 2158 +#endif +#endif +#if IS_ENABLED(CONFIG_DCB) +#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE +#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1 +#endif +#endif +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4))) +#define kstrtoul(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#define kstrtouint(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#define kstrtou32(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) +u16 ___kc_skb_tx_hash(struct net_device *, const struct sk_buff *, u16); +#define __skb_tx_hash(n, s, q) ___kc_skb_tx_hash((n), (s), (q)) +u8 _kc_netdev_get_num_tc(struct net_device *dev); +#define netdev_get_num_tc(dev) _kc_netdev_get_num_tc(dev) +int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc); +#define netdev_set_num_tc(dev, tc) _kc_netdev_set_num_tc((dev), (tc)) +#define netdev_reset_tc(dev) _kc_netdev_set_num_tc((dev), 0) +#define netdev_set_tc_queue(dev, tc, cnt, off) do {} while (0) +u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up); +#define netdev_get_prio_tc_map(dev, up) _kc_netdev_get_prio_tc_map(dev, up) +#define netdev_set_prio_tc_map(dev, up, tc) do {} while (0) +#else /* RHEL6.1 or greater */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif /* HAVE_MQPRIO */ +#if IS_ENABLED(CONFIG_DCB) +#ifndef HAVE_DCBNL_IEEE +#define HAVE_DCBNL_IEEE +#ifndef IEEE_8021QAZ_TSA_STRICT +#define IEEE_8021QAZ_TSA_STRICT 0 +#endif +#ifndef IEEE_8021QAZ_TSA_ETS +#define IEEE_8021QAZ_TSA_ETS 2 +#endif +#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE +#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1 +#endif +#endif +#endif /* CONFIG_DCB */ +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ + +#ifndef udp_csum +#define udp_csum __kc_udp_csum +static inline __wsum __kc_udp_csum(struct sk_buff *skb) +{ + __wsum csum = csum_partial(skb_transport_header(skb), + sizeof(struct udphdr), skb->csum); + + for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) { + csum = csum_add(csum, skb->csum); + } + return csum; +} +#endif /* udp_csum */ +#else /* < 2.6.39 */ +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#define HAVE_NETDEV_OPS_FCOE_DDP_TARGET +#endif +#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif +#ifndef HAVE_SETUP_TC +#define HAVE_SETUP_TC +#endif +#ifdef CONFIG_DCB +#ifndef HAVE_DCBNL_IEEE +#define HAVE_DCBNL_IEEE +#endif +#endif /* CONFIG_DCB */ +#ifndef HAVE_NDO_SET_FEATURES +#define HAVE_NDO_SET_FEATURES +#endif +#define HAVE_IRQ_AFFINITY_NOTIFY +#endif /* < 2.6.39 */ + +/*****************************************************************************/ +/* use < 2.6.40 because of a Fedora 15 kernel update where they + * updated the kernel version to 2.6.40.x and they back-ported 3.0 features + * like set_phys_id for ethtool. + */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) ) +#ifdef ETHTOOL_GRXRINGS +#ifndef FLOW_EXT +#define FLOW_EXT 0x80000000 +union _kc_ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + __u8 hdata[60]; +}; +struct _kc_ethtool_flow_ext { + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; +}; +struct _kc_ethtool_rx_flow_spec { + __u32 flow_type; + union _kc_ethtool_flow_union h_u; + struct _kc_ethtool_flow_ext h_ext; + union _kc_ethtool_flow_union m_u; + struct _kc_ethtool_flow_ext m_ext; + __u64 ring_cookie; + __u32 location; +}; +#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec +#endif /* FLOW_EXT */ +#endif + +#define pci_disable_link_state_locked pci_disable_link_state + +#ifndef PCI_LTR_VALUE_MASK +#define PCI_LTR_VALUE_MASK 0x000003ff +#endif +#ifndef PCI_LTR_SCALE_MASK +#define PCI_LTR_SCALE_MASK 0x00001c00 +#endif +#ifndef PCI_LTR_SCALE_SHIFT +#define PCI_LTR_SCALE_SHIFT 10 +#endif + +#else /* < 2.6.40 */ +#define HAVE_ETHTOOL_SET_PHYS_ID +#endif /* < 2.6.40 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) ) +#define USE_LEGACY_PM_SUPPORT +#ifndef kfree_rcu +#define kfree_rcu(_ptr, _rcu_head) do { \ + void __kc_kfree_rcu(struct rcu_head *rcu_head) \ + { \ + void *ptr = container_of(rcu_head, \ + typeof(*_ptr), \ + _rcu_head); \ + kfree(ptr); \ + } \ + call_rcu(&(_ptr)->_rcu_head, __kc_kfree_rcu); \ +} while (0) +#define HAVE_KFREE_RCU_BARRIER +#endif /* kfree_rcu */ +#ifndef kstrtol_from_user +#define kstrtol_from_user(s, c, b, r) _kc_kstrtol_from_user(s, c, b, r) +static inline int _kc_kstrtol_from_user(const char __user *s, size_t count, + unsigned int base, long *res) +{ + /* sign, base 2 representation, newline, terminator */ + char buf[1 + sizeof(long) * 8 + 1 + 1]; + + count = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, s, count)) + return -EFAULT; + buf[count] = '\0'; + return strict_strtol(buf, base, res); +} +#endif + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,0) || \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,7))) +/* 20000base_blah_full Supported and Advertised Registers */ +#define SUPPORTED_20000baseMLD2_Full BIT(21) +#define SUPPORTED_20000baseKR2_Full BIT(22) +#define ADVERTISED_20000baseMLD2_Full BIT(21) +#define ADVERTISED_20000baseKR2_Full BIT(22) +#endif /* RHEL_RELEASE_CODE */ +#endif /* < 3.0.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) +#ifndef __netdev_alloc_skb_ip_align +#define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l) +#endif /* __netdev_alloc_skb_ip_align */ +#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app) +#define dcb_ieee_delapp(dev, app) 0 +#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority) + +/* 1000BASE-T Control register */ +#define CTL1000_AS_MASTER 0x0800 +#define CTL1000_ENABLE_MASTER 0x1000 + +/* kernels less than 3.0.0 don't have this */ +#ifndef ETH_P_8021AD +#define ETH_P_8021AD 0x88A8 +#endif + +/* Stub definition for !CONFIG_OF is introduced later */ +#ifdef CONFIG_OF +static inline struct device_node * +pci_device_to_OF_node(struct pci_dev __maybe_unused *pdev) +{ +#ifdef HAVE_STRUCT_DEVICE_OF_NODE + return pdev ? pdev->dev.of_node : NULL; +#else + return NULL; +#endif /* !HAVE_STRUCT_DEVICE_OF_NODE */ +} +#endif /* CONFIG_OF */ +#else /* < 3.1.0 */ +#ifndef HAVE_DCBNL_IEEE_DELAPP +#define HAVE_DCBNL_IEEE_DELAPP +#endif +#endif /* < 3.1.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) +#ifndef dma_zalloc_coherent +#define dma_zalloc_coherent(d, s, h, f) _kc_dma_zalloc_coherent(d, s, h, f) +static inline void *_kc_dma_zalloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret = dma_alloc_coherent(dev, size, dma_handle, flag); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif +#ifdef ETHTOOL_GRXRINGS +#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS +#endif /* ETHTOOL_GRXRINGS */ + +#ifndef skb_frag_size +#define skb_frag_size(frag) _kc_skb_frag_size(frag) +static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag) +{ + return frag->size; +} +#endif /* skb_frag_size */ + +#ifndef skb_frag_size_sub +#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta) +static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta) +{ + frag->size -= delta; +} +#endif /* skb_frag_size_sub */ + +#ifndef skb_frag_page +#define skb_frag_page(frag) _kc_skb_frag_page(frag) +static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag) +{ + return frag->page; +} +#endif /* skb_frag_page */ + +#ifndef skb_frag_address +#define skb_frag_address(frag) _kc_skb_frag_address(frag) +static inline void *_kc_skb_frag_address(const skb_frag_t *frag) +{ + return page_address(skb_frag_page(frag)) + frag->page_offset; +} +#endif /* skb_frag_address */ + +#ifndef skb_frag_dma_map +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) +#include +#endif +#define skb_frag_dma_map(dev,frag,offset,size,dir) \ + _kc_skb_frag_dma_map(dev,frag,offset,size,dir) +static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev, + const skb_frag_t *frag, + size_t offset, size_t size, + enum dma_data_direction dir) +{ + return dma_map_page(dev, skb_frag_page(frag), + frag->page_offset + offset, size, dir); +} +#endif /* skb_frag_dma_map */ + +#ifndef __skb_frag_unref +#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag) +static inline void __kc_skb_frag_unref(skb_frag_t *frag) +{ + put_page(skb_frag_page(frag)); +} +#endif /* __skb_frag_unref */ + +#ifndef SPEED_UNKNOWN +#define SPEED_UNKNOWN -1 +#endif +#ifndef DUPLEX_UNKNOWN +#define DUPLEX_UNKNOWN 0xff +#endif +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#endif +#endif +#else /* < 3.2.0 */ +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_VF_SPOOFCHK_CONFIGURE +#endif +#ifndef HAVE_SKB_L4_RXHASH +#define HAVE_SKB_L4_RXHASH +#endif +#define HAVE_IOMMU_PRESENT +#define HAVE_PM_QOS_REQUEST_LIST_NEW +#endif /* < 3.2.0 */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,2)) +#undef ixgbe_get_netdev_tc_txq +#define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc]) +#endif +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) ) +/* NOTE: the order of parameters to _kc_alloc_workqueue() is different than + * alloc_workqueue() to avoid compiler warning from -Wvarargs + */ +static inline struct workqueue_struct * __attribute__ ((format(printf, 3, 4))) +_kc_alloc_workqueue(__maybe_unused int flags, __maybe_unused int max_active, + const char *fmt, ...) +{ + struct workqueue_struct *wq; + va_list args, temp; + unsigned int len; + char *p; + + va_start(args, fmt); + va_copy(temp, args); + len = vsnprintf(NULL, 0, fmt, temp); + va_end(temp); + + p = kmalloc(len + 1, GFP_KERNEL); + if (!p) { + va_end(args); + return NULL; + } + + vsnprintf(p, len + 1, fmt, args); + va_end(args); +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) + wq = create_workqueue(p); +#else + wq = alloc_workqueue(p, flags, max_active); +#endif + kfree(p); + + return wq; +} +#ifdef alloc_workqueue +#undef alloc_workqueue +#endif +#define alloc_workqueue(fmt, flags, max_active, args...) \ + _kc_alloc_workqueue(flags, max_active, fmt, ##args) + +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) +typedef u32 netdev_features_t; +#endif +#undef PCI_EXP_TYPE_RC_EC +#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ +#ifndef CONFIG_BQL +#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0) +#define netdev_completed_queue(_n, _p, _b) do {} while (0) +#define netdev_tx_sent_queue(_q, _b) do {} while (0) +#define netdev_sent_queue(_n, _b) do {} while (0) +#define netdev_tx_reset_queue(_q) do {} while (0) +#define netdev_reset_queue(_n) do {} while (0) +#endif +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#endif /* SLE_VERSION(11,3,0) */ +#define netif_xmit_stopped(_q) netif_tx_queue_stopped(_q) +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +static inline int __kc_ipv6_skip_exthdr(const struct sk_buff *skb, int start, + u8 *nexthdrp, + __be16 __always_unused *frag_offp) +{ + return ipv6_skip_exthdr(skb, start, nexthdrp); +} +#undef ipv6_skip_exthdr +#define ipv6_skip_exthdr(a,b,c,d) __kc_ipv6_skip_exthdr((a), (b), (c), (d)) +#endif /* !SLES11sp4 or greater */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) +static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) +{ + return index % n_rx_rings; +} +#endif + +#else /* ! < 3.3.0 */ +#define HAVE_ETHTOOL_GRXFHINDIR_SIZE +#define HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef ETHTOOL_SRXNTUPLE +#undef ETHTOOL_SRXNTUPLE +#endif +#endif /* < 3.3.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) +#ifndef NETIF_F_RXFCS +#define NETIF_F_RXFCS 0 +#endif /* NETIF_F_RXFCS */ +#ifndef NETIF_F_RXALL +#define NETIF_F_RXALL 0 +#endif /* NETIF_F_RXALL */ + +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define NUMTCS_RETURNS_U8 + +int _kc_simple_open(struct inode *inode, struct file *file); +#define simple_open _kc_simple_open +#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */ + +#ifndef skb_add_rx_frag +#define skb_add_rx_frag _kc_skb_add_rx_frag +void _kc_skb_add_rx_frag(struct sk_buff * skb, int i, struct page *page, + int off, int size, unsigned int truesize); +#endif +#ifdef NET_ADDR_RANDOM +#define eth_hw_addr_random(N) do { \ + eth_random_addr(N->dev_addr); \ + N->addr_assign_type |= NET_ADDR_RANDOM; \ + } while (0) +#else /* NET_ADDR_RANDOM */ +#define eth_hw_addr_random(N) eth_random_addr(N->dev_addr) +#endif /* NET_ADDR_RANDOM */ + +#ifndef for_each_set_bit_from +#define for_each_set_bit_from(bit, addr, size) \ + for ((bit) = find_next_bit((addr), (size), (bit)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit_from */ + +#else /* < 3.4.0 */ +#include +#endif /* >= 3.4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) || \ + ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) +#if !defined(NO_PTP_SUPPORT) && IS_ENABLED(CONFIG_PTP_1588_CLOCK) +#define HAVE_PTP_1588_CLOCK +#endif /* !NO_PTP_SUPPORT && IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ +#endif /* >= 3.0.0 || RHEL_RELEASE > 6.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) ) + +#ifndef SIZE_MAX +#define SIZE_MAX (~(size_t)0) +#endif + +#ifndef BITS_PER_LONG_LONG +#define BITS_PER_LONG_LONG 64 +#endif + +#ifndef ether_addr_equal +static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2) +{ + return !compare_ether_addr(addr1, addr2); +} +#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1),(_addr2)) +#endif + +/* Definitions for !CONFIG_OF_NET are introduced in 3.10 */ +#ifdef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void * +of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif +#else +#include +#define HAVE_FDB_OPS +#define HAVE_ETHTOOL_GET_TS_INFO +#endif /* < 3.5.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) ) +#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */ + +#ifndef MDIO_EEE_100TX +#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */ +#endif +#ifndef MDIO_EEE_1000T +#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */ +#endif +#ifndef MDIO_EEE_10GT +#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */ +#endif +#ifndef MDIO_EEE_1000KX +#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ +#endif +#ifndef MDIO_EEE_10GKX4 +#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ +#endif +#ifndef MDIO_EEE_10GKR +#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ +#endif + +#ifndef __GFP_MEMALLOC +#define __GFP_MEMALLOC 0 +#endif + +#ifndef eth_broadcast_addr +#define eth_broadcast_addr _kc_eth_broadcast_addr +static inline void _kc_eth_broadcast_addr(u8 *addr) +{ + memset(addr, 0xff, ETH_ALEN); +} +#endif + +#ifndef eth_random_addr +#define eth_random_addr _kc_eth_random_addr +static inline void _kc_eth_random_addr(u8 *addr) +{ + get_random_bytes(addr, ETH_ALEN); + addr[0] &= 0xfe; /* clear multicast */ + addr[0] |= 0x02; /* set local assignment */ +} +#endif /* eth_random_addr */ + +#ifndef DMA_ATTR_SKIP_CPU_SYNC +#define DMA_ATTR_SKIP_CPU_SYNC 0 +#endif +#else /* < 3.6.0 */ +#define HAVE_STRUCT_PAGE_PFMEMALLOC +#endif /* < 3.6.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) +#include +#ifndef ADVERTISED_40000baseKR4_Full +/* these defines were all added in one commit, so should be safe + * to trigger activiation on one define + */ +#define SUPPORTED_40000baseKR4_Full BIT(23) +#define SUPPORTED_40000baseCR4_Full BIT(24) +#define SUPPORTED_40000baseSR4_Full BIT(25) +#define SUPPORTED_40000baseLR4_Full BIT(26) +#define ADVERTISED_40000baseKR4_Full BIT(23) +#define ADVERTISED_40000baseCR4_Full BIT(24) +#define ADVERTISED_40000baseSR4_Full BIT(25) +#define ADVERTISED_40000baseLR4_Full BIT(26) +#endif + +#ifndef mmd_eee_cap_to_ethtool_sup_t +/** + * mmd_eee_cap_to_ethtool_sup_t + * @eee_cap: value of the MMD EEE Capability register + * + * A small helper function that translates MMD EEE Capability (3.20) bits + * to ethtool supported settings. + */ +static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap) +{ + u32 supported = 0; + + if (eee_cap & MDIO_EEE_100TX) + supported |= SUPPORTED_100baseT_Full; + if (eee_cap & MDIO_EEE_1000T) + supported |= SUPPORTED_1000baseT_Full; + if (eee_cap & MDIO_EEE_10GT) + supported |= SUPPORTED_10000baseT_Full; + if (eee_cap & MDIO_EEE_1000KX) + supported |= SUPPORTED_1000baseKX_Full; + if (eee_cap & MDIO_EEE_10GKX4) + supported |= SUPPORTED_10000baseKX4_Full; + if (eee_cap & MDIO_EEE_10GKR) + supported |= SUPPORTED_10000baseKR_Full; + + return supported; +} +#define mmd_eee_cap_to_ethtool_sup_t(eee_cap) \ + __kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap) +#endif /* mmd_eee_cap_to_ethtool_sup_t */ + +#ifndef mmd_eee_adv_to_ethtool_adv_t +/** + * mmd_eee_adv_to_ethtool_adv_t + * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers + * + * A small helper function that translates the MMD EEE Advertisement (7.60) + * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement + * settings. + */ +static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv) +{ + u32 adv = 0; + + if (eee_adv & MDIO_EEE_100TX) + adv |= ADVERTISED_100baseT_Full; + if (eee_adv & MDIO_EEE_1000T) + adv |= ADVERTISED_1000baseT_Full; + if (eee_adv & MDIO_EEE_10GT) + adv |= ADVERTISED_10000baseT_Full; + if (eee_adv & MDIO_EEE_1000KX) + adv |= ADVERTISED_1000baseKX_Full; + if (eee_adv & MDIO_EEE_10GKX4) + adv |= ADVERTISED_10000baseKX4_Full; + if (eee_adv & MDIO_EEE_10GKR) + adv |= ADVERTISED_10000baseKR_Full; + + return adv; +} + +#define mmd_eee_adv_to_ethtool_adv_t(eee_adv) \ + __kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv) +#endif /* mmd_eee_adv_to_ethtool_adv_t */ + +#ifndef ethtool_adv_to_mmd_eee_adv_t +/** + * ethtool_adv_to_mmd_eee_adv_t + * @adv: the ethtool advertisement settings + * + * A small helper function that translates ethtool advertisement settings + * to EEE advertisements for the MMD EEE Advertisement (7.60) and + * MMD EEE Link Partner Ability (7.61) registers. + */ +static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv) +{ + u16 reg = 0; + + if (adv & ADVERTISED_100baseT_Full) + reg |= MDIO_EEE_100TX; + if (adv & ADVERTISED_1000baseT_Full) + reg |= MDIO_EEE_1000T; + if (adv & ADVERTISED_10000baseT_Full) + reg |= MDIO_EEE_10GT; + if (adv & ADVERTISED_1000baseKX_Full) + reg |= MDIO_EEE_1000KX; + if (adv & ADVERTISED_10000baseKX4_Full) + reg |= MDIO_EEE_10GKX4; + if (adv & ADVERTISED_10000baseKR_Full) + reg |= MDIO_EEE_10GKR; + + return reg; +} +#define ethtool_adv_to_mmd_eee_adv_t(adv) __kc_ethtool_adv_to_mmd_eee_adv_t(adv) +#endif /* ethtool_adv_to_mmd_eee_adv_t */ + +#ifndef pci_pcie_type +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +static inline u8 pci_pcie_type(struct pci_dev *pdev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); + BUG_ON(!pos); + pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); + return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; +} +#else /* < 2.6.24 */ +#define pci_pcie_type(x) (x)->pcie_type +#endif /* < 2.6.24 */ +#endif /* pci_pcie_type */ + +#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) ) && \ + ( ! ( SLE_VERSION_CODE >= SLE_VERSION(11,3,0) ) ) && \ + ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) +#define ptp_clock_register(caps, args...) ptp_clock_register(caps) +#endif + +#ifndef pcie_capability_read_word +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); +#define pcie_capability_read_word(d,p,v) __kc_pcie_capability_read_word(d,p,v) +#endif /* pcie_capability_read_word */ + +#ifndef pcie_capability_read_dword +int __kc_pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val); +#define pcie_capability_read_dword(d,p,v) __kc_pcie_capability_read_dword(d,p,v) +#endif + +#ifndef pcie_capability_write_word +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val); +#define pcie_capability_write_word(d,p,v) __kc_pcie_capability_write_word(d,p,v) +#endif /* pcie_capability_write_word */ + +#ifndef pcie_capability_clear_and_set_word +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set); +#define pcie_capability_clear_and_set_word(d,p,c,s) \ + __kc_pcie_capability_clear_and_set_word(d,p,c,s) +#endif /* pcie_capability_clear_and_set_word */ + +#ifndef pcie_capability_clear_word +int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, + u16 clear); +#define pcie_capability_clear_word(d, p, c) \ + __kc_pcie_capability_clear_word(d, p, c) +#endif /* pcie_capability_clear_word */ + +#ifndef PCI_EXP_LNKSTA2 +#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define USE_CONST_DEV_UC_CHAR +#define HAVE_NDO_FDB_ADD_NLATTR +#endif + +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8)) +#define napi_gro_flush(_napi, _flush_old) napi_gro_flush(_napi) +#endif /* !RHEL6.8+ */ + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) +#include +#else + +#define DEFINE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DECLARE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] + +#define HASH_SIZE(name) (ARRAY_SIZE(name)) +#define HASH_BITS(name) ilog2(HASH_SIZE(name)) + +/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ +#define hash_min(val, bits) \ + (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) + +static inline void __hash_init(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + INIT_HLIST_HEAD(&ht[i]); +} + +#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) + +#define hash_add(hashtable, node, key) \ + hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) + +static inline bool hash_hashed(struct hlist_node *node) +{ + return !hlist_unhashed(node); +} + +static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + if (!hlist_empty(&ht[i])) + return false; + + return true; +} + +#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) + +static inline void hash_del(struct hlist_node *node) +{ + hlist_del_init(node); +} +#endif /* RHEL >= 6.6 */ + +/* We don't have @flags support prior to 3.7, so we'll simply ignore the flags + * parameter on these older kernels. + */ +#define __setup_timer(_timer, _fn, _data, _flags) \ + setup_timer((_timer), (_fn), (_data)) \ + +#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) ) ) && \ + ( ! ( SLE_VERSION_CODE >= SLE_VERSION(12,0,0) ) ) + +#ifndef mod_delayed_work +/** + * __mod_delayed_work - modify delay or queue delayed work + * @wq: workqueue to use + * @dwork: delayed work to queue + * @delay: number of jiffies to wait before queueing + * + * Return: %true if @dwork was pending and was rescheduled; + * %false if it wasn't pending + * + * Note: the dwork parameter was declared as a void* + * to avoid comptibility problems with early 2.6 kernels + * where struct delayed_work is not declared. Unlike the original + * implementation flags are not preserved and it shouldn't be + * used in the interrupt context. + */ +static inline bool __mod_delayed_work(struct workqueue_struct *wq, + void *dwork, + unsigned long delay) +{ + bool ret = cancel_delayed_work(dwork); + queue_delayed_work(wq, dwork, delay); + return ret; +} +#define mod_delayed_work(wq, dwork, delay) __mod_delayed_work(wq, dwork, delay) +#endif /* mod_delayed_work */ + +#endif /* !(RHEL >= 6.7) && !(SLE >= 12.0) */ +#else /* >= 3.7.0 */ +#include +#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS +#define USE_CONST_DEV_UC_CHAR +#define HAVE_NDO_FDB_ADD_NLATTR +#endif /* >= 3.7.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) ) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) +#ifndef pci_sriov_set_totalvfs +static inline int __kc_pci_sriov_set_totalvfs(struct pci_dev __always_unused *dev, u16 __always_unused numvfs) +{ + return 0; +} +#define pci_sriov_set_totalvfs(a, b) __kc_pci_sriov_set_totalvfs((a), (b)) +#endif +#endif /* !(RHEL_RELEASE_CODE >= 6.5 && SLE_VERSION_CODE >= 11.4) */ +#ifndef PCI_EXP_LNKCTL_ASPM_L0S +#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */ +#endif +#ifndef PCI_EXP_LNKCTL_ASPM_L1 +#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */ +#endif +#define HAVE_CONFIG_HOTPLUG +/* Reserved Ethernet Addresses per IEEE 802.1Q */ +static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { + 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; + +#ifndef is_link_local_ether_addr +static inline bool __kc_is_link_local_ether_addr(const u8 *addr) +{ + __be16 *a = (__be16 *)addr; + static const __be16 *b = (const __be16 *)eth_reserved_addr_base; + static const __be16 m = cpu_to_be16(0xfff0); + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; +} +#define is_link_local_ether_addr(addr) __kc_is_link_local_ether_addr(addr) +#endif /* is_link_local_ether_addr */ + +#ifndef FLOW_MAC_EXT +#define FLOW_MAC_EXT 0x40000000 +#endif /* FLOW_MAC_EXT */ + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +#define HAVE_SRIOV_CONFIGURE +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_2_5GB +#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */ +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_5_0GB +#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */ +#endif + +#undef PCI_EXP_LNKCAP2_SLS_2_5GB +#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */ + +#undef PCI_EXP_LNKCAP2_SLS_5_0GB +#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */ + +#undef PCI_EXP_LNKCAP2_SLS_8_0GB +#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */ + +#else /* >= 3.8.0 */ +#ifndef __devinit +#define __devinit +#endif + +#ifndef __devinitdata +#define __devinitdata +#endif + +#ifndef __devinitconst +#define __devinitconst +#endif + +#ifndef __devexit +#define __devexit +#endif + +#ifndef __devexit_p +#define __devexit_p +#endif + +#ifndef HAVE_ENCAP_CSUM_OFFLOAD +#define HAVE_ENCAP_CSUM_OFFLOAD +#endif + +#ifndef HAVE_GRE_ENCAP_OFFLOAD +#define HAVE_GRE_ENCAP_OFFLOAD +#endif + +#ifndef HAVE_SRIOV_CONFIGURE +#define HAVE_SRIOV_CONFIGURE +#endif + +#define HAVE_BRIDGE_ATTRIBS +#ifndef BRIDGE_MODE_VEB +#define BRIDGE_MODE_VEB 0 /* Default loopback mode */ +#endif /* BRIDGE_MODE_VEB */ +#ifndef BRIDGE_MODE_VEPA +#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */ +#endif /* BRIDGE_MODE_VEPA */ +#endif /* >= 3.8.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) + +#undef BUILD_BUG_ON +#ifdef __CHECKER__ +#define BUILD_BUG_ON(condition) (0) +#else /* __CHECKER__ */ +#ifndef __compiletime_warning +#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) +#define __compiletime_warning(message) __attribute__((warning(message))) +#else /* __GNUC__ */ +#define __compiletime_warning(message) +#endif /* __GNUC__ */ +#endif /* __compiletime_warning */ +#ifndef __compiletime_error +#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) +#define __compiletime_error(message) __attribute__((error(message))) +#define __compiletime_error_fallback(condition) do { } while (0) +#else /* __GNUC__ */ +#define __compiletime_error(message) +#define __compiletime_error_fallback(condition) \ + do { ((void)sizeof(char[1 - 2 * condition])); } while (0) +#endif /* __GNUC__ */ +#else /* __compiletime_error */ +#define __compiletime_error_fallback(condition) do { } while (0) +#endif /* __compiletime_error */ +#define __compiletime_assert(condition, msg, prefix, suffix) \ + do { \ + bool __cond = !(condition); \ + extern void prefix ## suffix(void) __compiletime_error(msg); \ + if (__cond) \ + prefix ## suffix(); \ + __compiletime_error_fallback(__cond); \ + } while (0) + +#define _compiletime_assert(condition, msg, prefix, suffix) \ + __compiletime_assert(condition, msg, prefix, suffix) +#define compiletime_assert(condition, msg) \ + _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) +#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) +#ifndef __OPTIMIZE__ +#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) +#else /* __OPTIMIZE__ */ +#define BUILD_BUG_ON(condition) \ + BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) +#endif /* __OPTIMIZE__ */ +#endif /* __CHECKER__ */ + +#undef hlist_entry +#define hlist_entry(ptr, type, member) container_of(ptr,type,member) + +#undef hlist_entry_safe +#define hlist_entry_safe(ptr, type, member) \ + ({ typeof(ptr) ____ptr = (ptr); \ + ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ + }) + +#undef hlist_for_each_entry +#define hlist_for_each_entry(pos, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hlist_for_each_entry_safe +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \ + pos && ({ n = pos->member.next; 1; }); \ + pos = hlist_entry_safe(n, typeof(*pos), member)) + +#undef hlist_for_each_entry_continue +#define hlist_for_each_entry_continue(pos, member) \ + for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hlist_for_each_entry_from +#define hlist_for_each_entry_from(pos, member) \ + for (; pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hash_for_each +#define hash_for_each(name, bkt, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry(obj, &name[bkt], member) + +#undef hash_for_each_safe +#define hash_for_each_safe(name, bkt, tmp, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) + +#undef hash_for_each_possible +#define hash_for_each_possible(name, obj, member, key) \ + hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member) + +#undef hash_for_each_possible_safe +#define hash_for_each_possible_safe(name, obj, tmp, member, key) \ + hlist_for_each_entry_safe(obj, tmp,\ + &name[hash_min(key, HASH_BITS(name))], member) + +#ifdef CONFIG_XPS +int __kc_netif_set_xps_queue(struct net_device *, const struct cpumask *, u16); +#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx)) +#else /* CONFIG_XPS */ +#define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0) +#endif /* CONFIG_XPS */ + +#ifdef HAVE_NETDEV_SELECT_QUEUE +#define _kc_hashrnd 0xd631614b /* not so random hash salt */ +u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); +#define __netdev_pick_tx __kc_netdev_pick_tx +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#else +#define HAVE_BRIDGE_FILTER +#define HAVE_FDB_DEL_NLATTR +#endif /* < 3.9.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) +#ifndef NAPI_POLL_WEIGHT +#define NAPI_POLL_WEIGHT 64 +#endif +#ifdef CONFIG_PCI_IOV +int __kc_pci_vfs_assigned(struct pci_dev *dev); +#else +static inline int __kc_pci_vfs_assigned(struct pci_dev __always_unused *dev) +{ + return 0; +} +#endif +#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev) + +#ifndef list_first_entry_or_null +#define list_first_entry_or_null(ptr, type, member) \ + (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) +#endif + +#ifndef VLAN_TX_COOKIE_MAGIC +static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb, + u16 vlan_tci) +{ +#ifdef VLAN_TAG_PRESENT + vlan_tci |= VLAN_TAG_PRESENT; +#endif + skb->vlan_tci = vlan_tci; + return skb; +} +#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \ + __kc__vlan_hwaccel_put_tag(skb, vlan_tci) +#endif + +#ifdef HAVE_FDB_OPS +#if defined(HAVE_NDO_FDB_ADD_NLATTR) +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 flags); +#elif defined(USE_CONST_DEV_UC_CHAR) +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr, u16 flags); +#else +int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr, u16 flags); +#endif /* HAVE_NDO_FDB_ADD_NLATTR */ +#if defined(HAVE_FDB_DEL_NLATTR) +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr); +#elif defined(USE_CONST_DEV_UC_CHAR) +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + const unsigned char *addr); +#else +int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, + unsigned char *addr); +#endif /* HAVE_FDB_DEL_NLATTR */ +#define ndo_dflt_fdb_add __kc_ndo_dflt_fdb_add +#define ndo_dflt_fdb_del __kc_ndo_dflt_fdb_del +#endif /* HAVE_FDB_OPS */ + +#ifndef PCI_DEVID +#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) +#endif + +/* The definitions for these functions when CONFIG_OF_NET is defined are + * pulled in from . For kernels older than 3.5 we already have + * backports for when CONFIG_OF_NET is true. These are separated and + * duplicated in order to cover all cases so that all kernels get either the + * real definitions (when CONFIG_OF_NET is defined) or the stub definitions + * (when CONFIG_OF_NET is not defined, or the kernel is too old to have real + * definitions). + */ +#ifndef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void * +of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif + +#else /* >= 3.10.0 */ + +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,2)) && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0))) +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#define HAVE_GENEVE_RX_OFFLOAD +#endif /* RHEL < 7.5 */ +#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC +#define HAVE_RHEL7_NET_DEVICE_OPS_EXT +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) +#define HAVE_UDP_ENC_TUNNEL +#endif /* !HAVE_UDP_ENC_TUNNEL && CONFIG_GENEVE */ +#endif /* RHEL >= 7.3 */ + +/* new hooks added to net_device_ops_extended in RHEL7.4 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL +#define HAVE_UDP_ENC_RX_OFFLOAD +#endif /* RHEL >= 7.4 */ +#endif /* RHEL >= 7.0 */ +#endif /* >= 3.10.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0) ) +#define netdev_notifier_info_to_dev(ptr) ptr +#ifndef time_in_range64 +#define time_in_range64(a, b, c) \ + (time_after_eq64(a, b) && \ + time_before_eq64(a, c)) +#endif /* time_in_range64 */ +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) +#define HAVE_NDO_SET_VF_LINK_STATE +#endif +#else /* >= 3.11.0 */ +#define HAVE_NDO_SET_VF_LINK_STATE +#endif /* >= 3.11.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) +int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width); +#ifndef pcie_get_minimum_link +#define pcie_get_minimum_link(_p, _s, _w) __kc_pcie_get_minimum_link(_p, _s, _w) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,7)) +int _kc_pci_wait_for_pending_transaction(struct pci_dev *dev); +#define pci_wait_for_pending_transaction _kc_pci_wait_for_pending_transaction +#endif /* = 3.12.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) +#define HAVE_VXLAN_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* < 4.8.0 */ +#define HAVE_NDO_GET_PHYS_PORT_ID +#define HAVE_NETIF_SET_XPS_QUEUE_CONST_MASK +#endif /* >= 3.12.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) ) +#define dma_set_mask_and_coherent(_p, _m) __kc_dma_set_mask_and_coherent(_p, _m) +int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask); +#ifndef u64_stats_init +#define u64_stats_init(a) do { } while(0) +#endif +#undef BIT_ULL +#define BIT_ULL(n) (1ULL << (n)) + +#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0))) +static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) +{ + dev = pci_physfn(dev); + if (pci_is_root_bus(dev->bus)) + return NULL; + + return dev->bus->self; +} +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,1,0)) +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#endif +#ifndef list_next_entry +#define list_next_entry(pos, member) \ + list_entry((pos)->member.next, typeof(*(pos)), member) +#endif +#ifndef list_prev_entry +#define list_prev_entry(pos, member) \ + list_entry((pos)->member.prev, typeof(*(pos)), member) +#endif + +#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,20) ) +#define devm_kcalloc(dev, cnt, size, flags) \ + devm_kzalloc(dev, cnt * size, flags) +#endif /* > 2.6.20 */ + +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +#define list_last_entry(ptr, type, member) list_entry((ptr)->prev, type, member) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) +bool _kc_pci_device_is_present(struct pci_dev *pdev); +#define pci_device_is_present _kc_pci_device_is_present +#endif /* = 3.13.0 */ +#define HAVE_VXLAN_CHECKS +#endif + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) ) + +#ifndef U16_MAX +#define U16_MAX ((u16)~0U) +#endif + +#ifndef U32_MAX +#define U32_MAX ((u32)~0U) +#endif + +#ifndef U64_MAX +#define U64_MAX ((u64)~0ULL) +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +#define dev_consume_skb_any(x) dev_kfree_skb_any(x) +#define dev_consume_skb_irq(x) dev_kfree_skb_irq(x) +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))) + +/* it isn't expected that this would be a #define unless we made it so */ +#ifndef skb_set_hash + +#define PKT_HASH_TYPE_NONE 0 +#define PKT_HASH_TYPE_L2 1 +#define PKT_HASH_TYPE_L3 2 +#define PKT_HASH_TYPE_L4 3 + +enum _kc_pkt_hash_types { + _KC_PKT_HASH_TYPE_NONE = PKT_HASH_TYPE_NONE, + _KC_PKT_HASH_TYPE_L2 = PKT_HASH_TYPE_L2, + _KC_PKT_HASH_TYPE_L3 = PKT_HASH_TYPE_L3, + _KC_PKT_HASH_TYPE_L4 = PKT_HASH_TYPE_L4, +}; +#define pkt_hash_types _kc_pkt_hash_types + +#define skb_set_hash __kc_skb_set_hash +static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb, + u32 __maybe_unused hash, + int __maybe_unused type) +{ +#ifdef HAVE_SKB_L4_RXHASH + skb->l4_rxhash = (type == PKT_HASH_TYPE_L4); +#endif +#ifdef NETIF_F_RXHASH + skb->rxhash = hash; +#endif +} +#endif /* !skb_set_hash */ + +#else /* RHEL_RELEASE_CODE >= 7.0 || SLE_VERSION_CODE >= 12.0 */ + +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,0)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE <= SLE_VERSION(12,1,0))) +/* GPLv2 code taken from 5.10-rc2 kernel source include/linux/pci.h, Copyright + * original authors. + */ +static inline int pci_enable_msix_exact(struct pci_dev *dev, + struct msix_entry *entries, int nvec) +{ + int rc = pci_enable_msix_range(dev, entries, nvec, nvec); + if (rc < 0) + return rc; + return 0; +} +#endif /* <=EL7.0 || <=SLES 12.1 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#ifndef HAVE_VXLAN_RX_OFFLOAD +#define HAVE_VXLAN_RX_OFFLOAD +#endif /* HAVE_VXLAN_RX_OFFLOAD */ +#endif + +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) +#define HAVE_UDP_ENC_TUNNEL +#endif + +#ifndef HAVE_VXLAN_CHECKS +#define HAVE_VXLAN_CHECKS +#endif /* HAVE_VXLAN_CHECKS */ +#endif /* !(RHEL_RELEASE_CODE >= 7.0 && SLE_VERSION_CODE >= 12.0) */ + +#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) ||\ + (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))) +#define HAVE_NDO_DFWD_OPS +#endif + +#ifndef pci_enable_msix_range +int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec); +#define pci_enable_msix_range __kc_pci_enable_msix_range +#endif + +#ifndef ether_addr_copy +#define ether_addr_copy __kc_ether_addr_copy +static inline void __kc_ether_addr_copy(u8 *dst, const u8 *src) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + *(u32 *)dst = *(const u32 *)src; + *(u16 *)(dst + 4) = *(const u16 *)(src + 4); +#else + u16 *a = (u16 *)dst; + const u16 *b = (const u16 *)src; + + a[0] = b[0]; + a[1] = b[1]; + a[2] = b[2]; +#endif +} +#endif /* ether_addr_copy */ +int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, + int target, unsigned short *fragoff, int *flags); +#define ipv6_find_hdr(a, b, c, d, e) __kc_ipv6_find_hdr((a), (b), (c), (d), (e)) + +#ifndef OPTIMIZE_HIDE_VAR +#ifdef __GNUC__ +#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var)) +#else +#include +#define OPTIMIZE_HIDE_VAR(var) barrier() +#endif +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,0)) && \ + !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,4,0))) +static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) +{ +#ifdef NETIF_F_RXHASH + return skb->rxhash; +#else + return 0; +#endif /* NETIF_F_RXHASH */ +} +#endif /* !RHEL > 5.9 && !SLES >= 10.4 */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#define request_firmware_direct request_firmware +#endif /* !RHEL || RHEL < 7.5 */ + +#else /* >= 3.14.0 */ + +/* for ndo_dfwd_ ops add_station, del_station and _start_xmit */ +#ifndef HAVE_NDO_DFWD_OPS +#define HAVE_NDO_DFWD_OPS +#endif +#endif /* 3.14.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) ) +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) +#define HAVE_SKBUFF_RXHASH +#endif /* >= 2.6.35 */ +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) && \ + !(UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,30))) +#define u64_stats_fetch_begin_irq u64_stats_fetch_begin_bh +#define u64_stats_fetch_retry_irq u64_stats_fetch_retry_bh +#endif + +char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp); +#define devm_kstrdup(dev, s, gfp) _kc_devm_kstrdup(dev, s, gfp) + +#else /* >= 3.15.0 */ +#define HAVE_NET_GET_RANDOM_ONCE +#define HAVE_PTP_1588_CLOCK_PINS +#define HAVE_NETDEV_PORT +#endif /* 3.15.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() +#endif +#ifndef __dev_uc_sync +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST +int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)); +#endif +#ifndef NETDEV_HW_ADDR_T_MULTICAST +int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, const unsigned char *)); +void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, + struct net_device *dev, + int (*unsync)(struct net_device *, const unsigned char *)); +#endif +#endif /* HAVE_SET_RX_MODE */ + +static inline int __kc_dev_uc_sync(struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_UNICAST + return __kc_hw_addr_sync_dev(&dev->uc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->uc_list, &dev->uc_count, + dev, sync, unsync); +#else + return 0; +#endif +} +#define __dev_uc_sync __kc_dev_uc_sync + +static inline void __kc_dev_uc_unsync(struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_UNICAST + __kc_hw_addr_unsync_dev(&dev->uc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->uc_list, &dev->uc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_UNICAST */ +#endif /* HAVE_SET_RX_MODE */ +} +#define __dev_uc_unsync __kc_dev_uc_unsync + +static inline int __kc_dev_mc_sync(struct net_device __maybe_unused *dev, + int __maybe_unused (*sync)(struct net_device *, const unsigned char *), + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef NETDEV_HW_ADDR_T_MULTICAST + return __kc_hw_addr_sync_dev(&dev->mc, dev, sync, unsync); +#elif defined(HAVE_SET_RX_MODE) + return __kc_dev_addr_sync_dev(&dev->mc_list, &dev->mc_count, + dev, sync, unsync); +#else + return 0; +#endif + +} +#define __dev_mc_sync __kc_dev_mc_sync + +static inline void __kc_dev_mc_unsync(struct net_device __maybe_unused *dev, + int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) +{ +#ifdef HAVE_SET_RX_MODE +#ifdef NETDEV_HW_ADDR_T_MULTICAST + __kc_hw_addr_unsync_dev(&dev->mc, dev, unsync); +#else /* NETDEV_HW_ADDR_T_MULTICAST */ + __kc_dev_addr_unsync_dev(&dev->mc_list, &dev->mc_count, dev, unsync); +#endif /* NETDEV_HW_ADDR_T_MULTICAST */ +#endif /* HAVE_SET_RX_MODE */ +} +#define __dev_mc_unsync __kc_dev_mc_unsync +#endif /* __dev_uc_sync */ + +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif + +#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM +/* if someone backports this, hopefully they backport as a #define. + * declare it as zero on older kernels so that if it get's or'd in + * it won't effect anything, therefore preventing core driver changes + */ +#define NETIF_F_GSO_UDP_TUNNEL_CSUM 0 +#define SKB_GSO_UDP_TUNNEL_CSUM 0 +#endif +void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, + gfp_t gfp); +#define devm_kmemdup __kc_devm_kmemdup + +#else +#if ( ( LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0) ) && \ + ! ( SLE_VERSION_CODE && ( SLE_VERSION_CODE >= SLE_VERSION(12,4,0)) ) ) +#define HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY +#endif /* >= 3.16.0 && < 4.13.0 && !(SLES >= 12sp4) */ +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +#endif /* 3.16.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) ) +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#ifndef timespec64 +#define timespec64 timespec +static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) +{ + return ts; +} +static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) +{ + return ts64; +} +#define timespec64_equal timespec_equal +#define timespec64_compare timespec_compare +#define set_normalized_timespec64 set_normalized_timespec +#define timespec64_add_safe timespec_add_safe +#define timespec64_add timespec_add +#define timespec64_sub timespec_sub +#define timespec64_valid timespec_valid +#define timespec64_valid_strict timespec_valid_strict +#define timespec64_to_ns timespec_to_ns +#define ns_to_timespec64 ns_to_timespec +#define ktime_to_timespec64 ktime_to_timespec +#define ktime_get_ts64 ktime_get_ts +#define ktime_get_real_ts64 ktime_get_real_ts +#define timespec64_add_ns timespec_add_ns +#endif /* timespec64 */ +#endif /* !(RHEL6.8= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) +static inline void ktime_get_real_ts64(struct timespec64 *ts) +{ + *ts = ktime_to_timespec64(ktime_get_real()); +} + +static inline void ktime_get_ts64(struct timespec64 *ts) +{ + *ts = ktime_to_timespec64(ktime_get()); +} +#endif + +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define hlist_add_behind(_a, _b) hlist_add_after(_b, _a) +#endif + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5)) +#endif /* RHEL_RELEASE_CODE < RHEL7.5 */ + +#if RHEL_RELEASE_CODE && \ + RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,3) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,3) +static inline u64 ktime_get_ns(void) +{ + return ktime_to_ns(ktime_get()); +} + +static inline u64 ktime_get_real_ns(void) +{ + return ktime_to_ns(ktime_get_real()); +} + +static inline u64 ktime_get_boot_ns(void) +{ + return ktime_to_ns(ktime_get_boottime()); +} +#endif /* RHEL < 7.3 */ + +#else +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#include +#define HAVE_RHASHTABLE +#endif /* 3.17.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) ) +#ifndef NO_PTP_SUPPORT +#include +struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb); +void __kc_skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps); +#define skb_clone_sk __kc_skb_clone_sk +#define skb_complete_tx_timestamp __kc_skb_complete_tx_timestamp +#endif +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +u32 __kc_eth_get_headlen(const struct net_device *dev, unsigned char *data, + unsigned int max_len); +#else +unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len); +#endif /* !RHEL >= 8.2 */ + +#define eth_get_headlen __kc_eth_get_headlen +#ifndef ETH_P_XDSA +#define ETH_P_XDSA 0x00F8 +#endif +/* RHEL 7.1 backported csum_level, but SLES 12 and 12-SP1 did not */ +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,1)) +#define HAVE_SKBUFF_CSUM_LEVEL +#endif /* >= RH 7.1 */ + +/* RHEL 7.3 backported xmit_more */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#define HAVE_SKB_XMIT_MORE +#endif /* >= RH 7.3 */ + +#undef GENMASK +#define GENMASK(h, l) \ + (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) +#undef GENMASK_ULL +#define GENMASK_ULL(h, l) \ + (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) + +#else /* 3.18.0 */ +#define HAVE_SKBUFF_CSUM_LEVEL +#ifndef HAVE_UOS_RELEASE_CODE +#define HAVE_SKB_XMIT_MORE +#endif +#define HAVE_SKB_INNER_PROTOCOL_TYPE +#endif /* 3.18.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,4) ) +#else +#define HAVE_NDO_FEATURES_CHECK +#endif /* 3.18.4 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,13) ) +#ifndef WRITE_ONCE +#define WRITE_ONCE(x, val) ({ ACCESS_ONCE(x) = (val); }) +#endif +#endif /* 3.18.13 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) +/* netdev_phys_port_id renamed to netdev_phys_item_id */ +#define netdev_phys_item_id netdev_phys_port_id + +static inline void _kc_napi_complete_done(struct napi_struct *napi, + int __always_unused work_done) { + napi_complete(napi); +} +/* don't use our backport if the distro kernels already have it */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +#define napi_complete_done _kc_napi_complete_done +#endif + +int _kc_bitmap_print_to_pagebuf(bool list, char *buf, + const unsigned long *maskp, int nmaskbits); +#define bitmap_print_to_pagebuf _kc_bitmap_print_to_pagebuf + +#ifndef NETDEV_RSS_KEY_LEN +#define NETDEV_RSS_KEY_LEN (13 * 4) +#endif +#if (!(RHEL_RELEASE_CODE && \ + ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))))) +#define netdev_rss_key_fill(buffer, len) __kc_netdev_rss_key_fill(buffer, len) +#endif /* RHEL_RELEASE_CODE */ +void __kc_netdev_rss_key_fill(void *buffer, size_t len); +#define SPEED_20000 20000 +#define SPEED_40000 40000 +#ifndef dma_rmb +#define dma_rmb() rmb() +#endif +#ifndef dev_alloc_pages +#ifndef NUMA_NO_NODE +#define NUMA_NO_NODE -1 +#endif +#define dev_alloc_pages(_order) alloc_pages_node(NUMA_NO_NODE, (GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC), (_order)) +#endif +#ifndef dev_alloc_page +#define dev_alloc_page() dev_alloc_pages(0) +#endif +#if !defined(eth_skb_pad) && !defined(skb_put_padto) +/** + * __kc_skb_put_padto - increase size and pad an skbuff up to a minimal size + * @skb: buffer to pad + * @len: minimal length + * + * Pads up a buffer to ensure the trailing bytes exist and are + * blanked. If the buffer already contains sufficient data it + * is untouched. Otherwise it is extended. Returns zero on + * success. The skb is freed on error. + */ +static inline int __kc_skb_put_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + + if (unlikely(size < len)) { + len -= size; + if (skb_pad(skb, len)) + return -ENOMEM; + __skb_put(skb, len); + } + return 0; +} +#define skb_put_padto(skb, len) __kc_skb_put_padto(skb, len) + +static inline int __kc_eth_skb_pad(struct sk_buff *skb) +{ + return __kc_skb_put_padto(skb, ETH_ZLEN); +} +#define eth_skb_pad(skb) __kc_eth_skb_pad(skb) +#endif /* eth_skb_pad && skb_put_padto */ + +#ifndef SKB_ALLOC_NAPI +/* RHEL 7.2 backported napi_alloc_skb and friends */ +static inline struct sk_buff *__kc_napi_alloc_skb(struct napi_struct *napi, unsigned int length) +{ + return netdev_alloc_skb_ip_align(napi->dev, length); +} +#define napi_alloc_skb(napi,len) __kc_napi_alloc_skb(napi,len) +#define __napi_alloc_skb(napi,len,mask) __kc_napi_alloc_skb(napi,len) +#endif /* SKB_ALLOC_NAPI */ +#define HAVE_CONFIG_PM_RUNTIME +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,7)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RXFH_HASHFUNC +#endif /* 6.7 < RHEL < 7.0 */ +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_RXFH_HASHFUNC +#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS +#endif /* RHEL > 7.1 */ +#ifndef napi_schedule_irqoff +#define napi_schedule_irqoff napi_schedule +#endif +#ifndef READ_ONCE +#define READ_ONCE(_x) ACCESS_ONCE(_x) +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_FDB_ADD_VID +#define HAVE_NDO_FDB_DEL_VID +#endif +#ifndef ETH_MODULE_SFF_8636 +#define ETH_MODULE_SFF_8636 0x3 +#endif +#ifndef ETH_MODULE_SFF_8636_LEN +#define ETH_MODULE_SFF_8636_LEN 256 +#endif +#ifndef ETH_MODULE_SFF_8436 +#define ETH_MODULE_SFF_8436 0x4 +#endif +#ifndef ETH_MODULE_SFF_8436_LEN +#define ETH_MODULE_SFF_8436_LEN 256 +#endif +#ifndef writel_relaxed +#define writel_relaxed writel +#endif +#else /* 3.19.0 */ +#if (KYLIN_RELEASE_CODE != KYLIN_RELEASE_VERSION(10,4)) +#define HAVE_NDO_FDB_ADD_VID +#endif/*KYLIN_RELEASE_CODE */ +#define HAVE_NDO_FDB_DEL_VID +#define HAVE_RXFH_HASHFUNC +#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS +#endif /* 3.19.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,20,0) ) +/* vlan_tx_xx functions got renamed to skb_vlan */ +#ifndef skb_vlan_tag_get +#define skb_vlan_tag_get vlan_tx_tag_get +#endif +#ifndef skb_vlan_tag_present +#define skb_vlan_tag_present vlan_tx_tag_present +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) +#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS +#endif +#else +#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#if (KYLIN_RELEASE_CODE != KYLIN_RELEASE_VERSION(10,4)) +#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS +#endif/*KYLIN_RELEASE_CODE */ +#endif /* 3.20.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) ) +/* Definition for CONFIG_OF was introduced earlier */ +#if !defined(CONFIG_OF) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +static inline struct device_node * +pci_device_to_OF_node(const struct pci_dev __always_unused *pdev) { return NULL; } +#else /* !CONFIG_OF && RHEL < 7.3 */ +#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT +#endif /* !CONFIG_OF && RHEL < 7.3 */ +#else /* < 4.0 */ +#define HAVE_DDP_PROFILE_UPLOAD_SUPPORT +#endif /* < 4.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) ) +#ifndef NO_PTP_SUPPORT +#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#include +#else +#include +#endif +static inline void __kc_timecounter_adjtime(struct timecounter *tc, s64 delta) +{ + tc->nsec += delta; +} + +static inline struct net_device * +of_find_net_device_by_node(struct device_node __always_unused *np) +{ + return NULL; +} + +#define timecounter_adjtime __kc_timecounter_adjtime +#endif +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,2,0)))) +#define HAVE_NDO_SET_VF_RSS_QUERY_EN +#endif +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) +#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +#define HAVE_RHEL7_EXTENDED_NDO_SET_TX_MAXRATE +#define HAVE_NDO_SET_TX_MAXRATE +#endif +#if !((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,8) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + (SLE_VERSION_CODE > SLE_VERSION(12,1,0))) +unsigned int _kc_cpumask_local_spread(unsigned int i, int node); +#define cpumask_local_spread _kc_cpumask_local_spread +#endif +#ifdef HAVE_RHASHTABLE +#define rhashtable_loopup_fast(ht, key, params) \ + do { \ + (void)params; \ + rhashtable_lookup((ht), (key)); \ + } while (0) + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) +#define rhashtable_insert_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_insert((ht), (obj), GFP_KERNEL); \ + } while (0) + +#define rhashtable_remove_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_remove((ht), (obj), GFP_KERNEL); \ + } while (0) + +#else /* >= 3,19,0 */ +#define rhashtable_insert_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_insert((ht), (obj)); \ + } while (0) + +#define rhashtable_remove_fast(ht, obj, params) \ + do { \ + (void)params; \ + rhashtable_remove((ht), (obj)); \ + } while (0) + +#endif /* 3,19,0 */ +#endif /* HAVE_RHASHTABLE */ +#else /* >= 4,1,0 */ +#define HAVE_NDO_GET_PHYS_PORT_NAME +#define HAVE_PTP_CLOCK_INFO_GETTIME64 +#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS +#define HAVE_PASSTHRU_FEATURES_CHECK +#define HAVE_NDO_SET_VF_RSS_QUERY_EN +#define HAVE_NDO_SET_TX_MAXRATE +#endif /* 4,1,0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,1,9)) +#if (!(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ + !((SLE_VERSION_CODE == SLE_VERSION(11,3,0)) && \ + (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(0,47,71))) && \ + !((SLE_VERSION_CODE == SLE_VERSION(11,4,0)) && \ + (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(65,0,0))) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) +static inline bool page_is_pfmemalloc(struct page __maybe_unused *page) +{ +#ifdef HAVE_STRUCT_PAGE_PFMEMALLOC + return page->pfmemalloc; +#else + return false; +#endif +} +#endif /* !RHEL7.2+ && !SLES11sp3(3.0.101-0.47.71+ update) && !SLES11sp4(3.0.101-65+ update) & !SLES12sp1+ */ +#else +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#endif /* 4.1.9 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) +#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFULL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000ULL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32 +static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie) +{ + return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie; +}; + +static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie) +{ + return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >> + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; +}; +#endif /* ! RHEL >= 7.2 && ! SLES >= 12.1 */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT +#endif + +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) +#if (!((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) || \ + RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) +static inline bool pci_ari_enabled(struct pci_bus *bus) +{ + return bus->self && bus->self->ari_enabled; +} +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) +#define HAVE_VF_STATS +#endif /* (RHEL7.2+) */ +#endif /* !(RHEL6.8+ || RHEL7.2+) */ +#else +static inline bool pci_ari_enabled(struct pci_bus *bus) +{ + return false; +} +#endif /* 2.6.27 */ +#else +#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT +#define HAVE_VF_STATS +#endif /* 4.2.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +/** + * _kc_flow_dissector_key_ipv4_addrs: + * @src: source ip address + * @dst: destination ip address + */ +struct _kc_flow_dissector_key_ipv4_addrs { + __be32 src; + __be32 dst; +}; + +/** + * _kc_flow_dissector_key_ipv6_addrs: + * @src: source ip address + * @dst: destination ip address + */ +struct _kc_flow_dissector_key_ipv6_addrs { + struct in6_addr src; + struct in6_addr dst; +}; + +/** + * _kc_flow_dissector_key_addrs: + * @v4addrs: IPv4 addresses + * @v6addrs: IPv6 addresses + */ +struct _kc_flow_dissector_key_addrs { + union { + struct _kc_flow_dissector_key_ipv4_addrs v4addrs; + struct _kc_flow_dissector_key_ipv6_addrs v6addrs; + }; +}; + +/** + * _kc_flow_dissector_key_tp_ports: + * @ports: port numbers of Transport header + * src: source port number + * dst: destination port number + */ +struct _kc_flow_dissector_key_ports { + union { + __be32 ports; + struct { + __be16 src; + __be16 dst; + }; + }; +}; + +/** + * _kc_flow_dissector_key_basic: + * @n_proto: Network header protocol (eg. IPv4/IPv6) + * @ip_proto: Transport header protocol (eg. TCP/UDP) + * @padding: padding for alignment + */ +struct _kc_flow_dissector_key_basic { + __be16 n_proto; + u8 ip_proto; + u8 padding; +}; + +struct _kc_flow_keys { + struct _kc_flow_dissector_key_basic basic; + struct _kc_flow_dissector_key_ports ports; + struct _kc_flow_dissector_key_addrs addrs; +}; + +/* These are all the include files for kernels inside this #ifdef block that + * have any reference to the in kernel definition of struct flow_keys. The + * reason for putting them here is to make 100% sure that these files do not get + * included after re-defining flow_keys to _kc_flow_keys. This is done to + * prevent any possible ABI issues that this structure re-definition could case. + */ +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)) || \ + RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) || \ + SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) +#include +#endif /* (>= 3.3.0 && < 4.2.0) || >= RHEL 6.7 || >= SLE 11.4 */ +#if (LINUX_VERSION_CODE == KERNEL_VERSION(4,2,0)) +#include +#endif /* 4.2.0 */ +#include +#include +#include +#include + +#define flow_keys _kc_flow_keys +bool +_kc_skb_flow_dissect_flow_keys(const struct sk_buff *skb, + struct flow_keys *flow, + unsigned int __always_unused flags); +#define skb_flow_dissect_flow_keys _kc_skb_flow_dissect_flow_keys +#endif /* ! >= RHEL 7.4 && ! >= SLES 12.2 */ + +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +#include +#endif /* >= RHEL7.3 || >= SLE12sp2 */ +#else /* >= 4.3.0 */ +#include +#endif /* 4.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0)) +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#define HAVE_NDO_SET_VF_TRUST +#endif /* (RHEL_RELEASE >= 7.3) */ +#ifndef CONFIG_64BIT +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) +#include /* 32-bit readq/writeq */ +#else /* 3.3.0 => 4.3.x */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) +#include +#endif /* 2.6.26 => 3.3.0 */ +#ifndef readq +static inline __u64 readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl(p); + high = readl(p + 1); + + return low + ((u64)high << 32); +} +#define readq readq +#endif + +#ifndef writeq +static inline void writeq(__u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} +#define writeq writeq +#endif +#endif /* < 3.3.0 */ +#endif /* !CONFIG_64BIT */ +#else /* < 4.4.0 */ +#define HAVE_NDO_SET_VF_TRUST + +#ifndef CONFIG_64BIT +#include /* 32-bit readq/writeq */ +#endif /* !CONFIG_64BIT */ +#endif /* 4.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0)) +/* protect against a likely backport */ +#ifndef NETIF_F_CSUM_MASK +#define NETIF_F_CSUM_MASK NETIF_F_ALL_CSUM +#endif /* NETIF_F_CSUM_MASK */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))) +#define eth_platform_get_mac_address _kc_eth_platform_get_mac_address +int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, + u8 *mac_addr __maybe_unused); +#endif /* !(RHEL_RELEASE >= 7.3) */ +#else /* 4.5.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) +#define HAVE_GENEVE_RX_OFFLOAD +#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) +#define HAVE_UDP_ENC_TUNNEL +#endif +#endif /* < 4.8.0 */ +#define HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD +#define HAVE_NETDEV_UPPER_INFO +#endif /* 4.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)) +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,3)) +static inline unsigned char *skb_checksum_start(const struct sk_buff *skb) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) + return skb->head + skb->csum_start; +#else /* < 2.6.22 */ + return skb_transport_header(skb); +#endif +} +#endif + +#if !(UBUNTU_VERSION_CODE && \ + UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,4,0,21)) && \ + !(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +static inline void napi_consume_skb(struct sk_buff *skb, + int __always_unused budget) +{ + dev_consume_skb_any(skb); +} + +#endif /* UBUNTU 4,4,0,21, RHEL 7.2, SLES12 SP3 */ +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) +{ + * sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); +} +#endif +#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) +static inline void page_ref_inc(struct page *page) +{ + get_page(page); +} +#else +#define HAVE_PAGE_COUNT_BULK_UPDATE +#endif +#ifndef IPV4_USER_FLOW +#define IPV4_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */ +#endif + +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_TC_SETUP_CLSFLOWER +#define HAVE_TC_FLOWER_ENC +#endif + +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(12,2,0))) +#define HAVE_TC_SETUP_CLSU32 +#endif + +#if (SLE_VERSION_CODE >= SLE_VERSION(12,2,0)) +#define HAVE_TC_SETUP_CLSFLOWER +#endif + +#else /* >= 4.6.0 */ +#define HAVE_PAGE_COUNT_BULK_UPDATE +#define HAVE_ETHTOOL_FLOW_UNION_IP6_SPEC +#define HAVE_PTP_CROSSTIMESTAMP +#if !((KYLIN_RELEASE_CODE == KYLIN_RELEASE_VERSION(10,4) && !defined(HAVE_UOS_RELEASE_CODE)) || \ + (KYLIN_RELEASE_CODE != KYLIN_RELEASE_VERSION(10,4) && defined(HAVE_UOS_RELEASE_CODE))) +#define HAVE_TC_SETUP_CLSFLOWER +#endif/*KYLIN UOS*/ +#define HAVE_TC_SETUP_CLSU32 +#endif /* 4.6.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)) +#if ((SLE_VERSION_CODE >= SLE_VERSION(12,3,0)) ||\ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4))) +#define HAVE_NETIF_TRANS_UPDATE +#endif /* SLES12sp3+ || RHEL7.4+ */ +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) ||\ + (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_ETHTOOL_25G_BITS +#define HAVE_ETHTOOL_50G_BITS +#define HAVE_ETHTOOL_100G_BITS +#endif /* RHEL7.3+ || SLES12sp3+ */ +#else /* 4.7.0 */ +#define HAVE_NETIF_TRANS_UPDATE +#define HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE +#define HAVE_ETHTOOL_25G_BITS +#define HAVE_ETHTOOL_50G_BITS +#define HAVE_ETHTOOL_100G_BITS +#define HAVE_TCF_MIRRED_REDIRECT +#endif /* 4.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)) +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +enum udp_parsable_tunnel_type { + UDP_TUNNEL_TYPE_VXLAN, + UDP_TUNNEL_TYPE_GENEVE, +}; +struct udp_tunnel_info { + unsigned short type; + sa_family_t sa_family; + __be16 port; +}; +#endif + +#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE < UBUNTU_VERSION(4,8,0,0)) +#define tc_no_actions(_exts) true +#define tc_for_each_action(_a, _exts) while (0) +#endif +#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) &&\ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +static inline int +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME +pci_request_io_regions(struct pci_dev *pdev, char *name) +#else +pci_request_io_regions(struct pci_dev *pdev, const char *name) +#endif +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO), name); +} + +static inline void +pci_release_io_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_IO)); +} + +static inline int +#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME +pci_request_mem_regions(struct pci_dev *pdev, char *name) +#else +pci_request_mem_regions(struct pci_dev *pdev, const char *name) +#endif +{ + return pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM), name); +} + +static inline void +pci_release_mem_regions(struct pci_dev *pdev) +{ + return pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +} +#endif /* !SLE_VERSION(12,3,0) */ +#else +#define HAVE_UDP_ENC_RX_OFFLOAD +#endif /* 4.8.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)) +#ifdef HAVE_TC_SETUP_CLSFLOWER +#if (!(RHEL_RELEASE_CODE) && !(SLE_VERSION_CODE) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0)))) +#define HAVE_TC_FLOWER_VLAN_IN_TAGS +#endif /* !RHEL_RELEASE_CODE && !SLE_VERSION_CODE || = RHEL_RELEASE_VERSION(7,4)) +#define HAVE_ETHTOOL_NEW_1G_BITS +#define HAVE_ETHTOOL_NEW_10G_BITS +#endif /* RHEL7.4+ */ +#if (!(SLE_VERSION_CODE) && !(RHEL_RELEASE_CODE)) || \ + SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0)) || \ + RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)) +#define time_is_before_jiffies64(a) time_after64(get_jiffies_64(), a) +#endif /* !SLE_VERSION_CODE && !RHEL_RELEASE_CODE || (SLES <= 12.3.0) || (RHEL <= 7.5) */ +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,4)) +static inline void bitmap_from_u64(unsigned long *dst, u64 mask) +{ + dst[0] = mask & ULONG_MAX; + + if (sizeof(mask) > sizeof(unsigned long)) + dst[1] = mask >> 32; +} +#endif /* = RHEL_RELEASE_VERSION(7,4)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,3,0)) && \ + !(UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,13,0,16))) +static inline bool eth_type_vlan(__be16 ethertype) +{ + switch (ethertype) { + case htons(ETH_P_8021Q): +#ifdef ETH_P_8021AD + case htons(ETH_P_8021AD): +#endif + return true; + default: + return false; + } +} +#endif /* Linux < 4.9 || RHEL < 7.4 || SLES < 12.3 || Ubuntu < 4.3.0-16 */ +#else /* >=4.9 */ +#define HAVE_FLOW_DISSECTOR_KEY_VLAN_PRIO +#define HAVE_ETHTOOL_NEW_1G_BITS +#define HAVE_ETHTOOL_NEW_10G_BITS +#endif /* KERNEL_VERSION(4.9.0) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) +/* SLES 12.3 and RHEL 7.5 backported this interface */ +#if (!SLE_VERSION_CODE && !RHEL_RELEASE_CODE) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +static inline bool _kc_napi_complete_done2(struct napi_struct *napi, + int __always_unused work_done) +{ + /* it was really hard to get napi_complete_done to be safe to call + * recursively without running into our own kcompat, so just use + * napi_complete + */ + napi_complete(napi); + + /* true means that the stack is telling the driver to go-ahead and + * re-enable interrupts + */ + return true; +} + +#ifdef napi_complete_done +#undef napi_complete_done +#endif +#define napi_complete_done _kc_napi_complete_done2 +#endif /* sles and rhel exclusion for < 4.10 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) +#define HAVE_DEV_WALK_API +#define HAVE_ETHTOOL_NEW_2500MB_BITS +#define HAVE_ETHTOOL_5G_BITS +#endif /* RHEL7.4+ */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE == SLE_VERSION(12,3,0))) +#define HAVE_STRUCT_DMA_ATTRS +#endif /* (SLES == 12.3.0) */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) +#define HAVE_NETDEVICE_MIN_MAX_MTU +#endif /* (SLES >= 12.3.0) */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_STRUCT_DMA_ATTRS +#define HAVE_RHEL7_EXTENDED_MIN_MAX_MTU +#define HAVE_NETDEVICE_MIN_MAX_MTU +#endif +#if (!(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \ + !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))) +#ifndef dma_map_page_attrs +#define dma_map_page_attrs __kc_dma_map_page_attrs +static inline dma_addr_t __kc_dma_map_page_attrs(struct device *dev, + struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + return dma_map_page(dev, page, offset, size, dir); +} +#endif + +#ifndef dma_unmap_page_attrs +#define dma_unmap_page_attrs __kc_dma_unmap_page_attrs +static inline void __kc_dma_unmap_page_attrs(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + dma_unmap_page(dev, addr, size, dir); +} +#endif + +static inline void __page_frag_cache_drain(struct page *page, + unsigned int count) +{ +#ifdef HAVE_PAGE_COUNT_BULK_UPDATE + if (!page_ref_sub_and_test(page, count)) + return; + + init_page_count(page); +#else + BUG_ON(count > 1); + if (!count) + return; +#endif + __free_pages(page, compound_order(page)); +} +#endif /* !SLE_VERSION(12,3,0) && !RHEL_VERSION(7,5) */ +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) ||\ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_SWIOTLB_SKIP_CPU_SYNC +#endif + +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE < SLE_VERSION(15,0,0))) ||\ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,4)))) +#define page_frag_free __free_page_frag +#endif +#ifndef ETH_MIN_MTU +#define ETH_MIN_MTU 68 +#endif /* ETH_MIN_MTU */ +#else /* >= 4.10 */ +#define HAVE_TC_FLOWER_ENC +#define HAVE_NETDEVICE_MIN_MAX_MTU +#define HAVE_SWIOTLB_SKIP_CPU_SYNC +#define HAVE_NETDEV_TC_RESETS_XPS +#define HAVE_XPS_QOS_SUPPORT +#define HAVE_DEV_WALK_API +#define HAVE_ETHTOOL_NEW_2500MB_BITS +#define HAVE_ETHTOOL_5G_BITS +/* kernel 4.10 onwards, as part of busy_poll rewrite, new state were added + * which is part of NAPI:state. If NAPI:state=NAPI_STATE_IN_BUSY_POLL, + * it means napi_poll is invoked in busy_poll context + */ +#define HAVE_NAPI_STATE_IN_BUSY_POLL +#define HAVE_TCF_MIRRED_EGRESS_REDIRECT +#endif /* 4.10.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)) +#ifdef CONFIG_NET_RX_BUSY_POLL +#define HAVE_NDO_BUSY_POLL +#endif /* CONFIG_NET_RX_BUSY_POLL */ +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))) +#define HAVE_VOID_NDO_GET_STATS64 +#endif /* (SLES >= 12.3.0) && (RHEL >= 7.5) */ + +static inline void _kc_dev_kfree_skb_irq(struct sk_buff *skb) +{ + if (!skb) + return; + dev_kfree_skb_irq(skb); +} + +#undef dev_kfree_skb_irq +#define dev_kfree_skb_irq _kc_dev_kfree_skb_irq + +static inline void _kc_dev_consume_skb_irq(struct sk_buff *skb) +{ + if (!skb) + return; + dev_consume_skb_irq(skb); +} + +#undef dev_consume_skb_irq +#define dev_consume_skb_irq _kc_dev_consume_skb_irq + +static inline void _kc_dev_kfree_skb_any(struct sk_buff *skb) +{ + if (!skb) + return; + dev_kfree_skb_any(skb); +} + +#undef dev_kfree_skb_any +#define dev_kfree_skb_any _kc_dev_kfree_skb_any + +static inline void _kc_dev_consume_skb_any(struct sk_buff *skb) +{ + if (!skb) + return; + dev_consume_skb_any(skb); +} + +#undef dev_consume_skb_any +#define dev_consume_skb_any _kc_dev_consume_skb_any + +#else /* > 4.11 */ +#define HAVE_VOID_NDO_GET_STATS64 +#define HAVE_VM_OPS_FAULT_NO_VMA +#endif /* 4.11.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)) +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) +/* The RHEL 7.7+ NL_SET_ERR_MSG_MOD triggers unused parameter warnings */ +#undef NL_SET_ERR_MSG_MOD +#endif +#ifndef NL_SET_ERR_MSG_MOD +#define NL_SET_ERR_MSG_MOD(extack, msg) \ + do { \ + uninitialized_var(extack); \ + pr_err(KBUILD_MODNAME ": " msg); \ + } while (0) +#endif /* !NL_SET_ERR_MSG_MOD */ +#endif /* 4.12 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0)) +#if ((SLE_VERSION_CODE && (SLE_VERSION_CODE > SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_TCF_EXTS_HAS_ACTION +#endif +#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#endif /* SLES >= 12sp4 */ +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#define UUID_SIZE 16 +typedef struct { + __u8 b[UUID_SIZE]; +} uuid_t; +#define UUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ +((uuid_t) \ +{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \ + ((b) >> 8) & 0xff, (b) & 0xff, \ + ((c) >> 8) & 0xff, (c) & 0xff, \ + (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) + +static inline bool uuid_equal(const uuid_t *u1, const uuid_t *u2) +{ + return memcmp(u1, u2, sizeof(uuid_t)) == 0; +} +#else +#define HAVE_METADATA_PORT_INFO +#endif /* !(RHEL >= 7.5) && !(SLES >= 12.4) */ +#else /* > 4.13 */ +#define HAVE_METADATA_PORT_INFO +#define HAVE_HWTSTAMP_FILTER_NTP_ALL +#define HAVE_NDO_SETUP_TC_CHAIN_INDEX +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#define HAVE_PTP_CLOCK_DO_AUX_WORK +#endif /* 4.13.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef ethtool_link_ksettings_del_link_mode +#define ethtool_link_ksettings_del_link_mode(ptr, name, mode) \ + __clear_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name) +#endif +#endif /* ETHTOOL_GLINKSETTINGS */ +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,4,0))) +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#endif + +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SETUP_TC +#endif + +#define TIMER_DATA_TYPE unsigned long +#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE) + +#define timer_setup(timer, callback, flags) \ + __setup_timer((timer), (TIMER_FUNC_TYPE)(callback), \ + (TIMER_DATA_TYPE)(timer), (flags)) + +#define from_timer(var, callback_timer, timer_fieldname) \ + container_of(callback_timer, typeof(*var), timer_fieldname) + +#ifndef xdp_do_flush_map +#define xdp_do_flush_map() do {} while (0) +#endif +struct _kc_xdp_buff { + void *data; + void *data_end; + void *data_hard_start; +}; +#define xdp_buff _kc_xdp_buff +struct _kc_bpf_prog { +}; +#define bpf_prog _kc_bpf_prog +#ifndef DIV_ROUND_DOWN_ULL +#define DIV_ROUND_DOWN_ULL(ll, d) \ + ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; }) +#endif /* DIV_ROUND_DOWN_ULL */ +#else /* > 4.14 */ +#define HAVE_XDP_SUPPORT +#define HAVE_NDO_SETUP_TC_REMOVE_TC_TO_NETDEV +#define HAVE_TCF_EXTS_HAS_ACTION +#endif /* 4.14.0 */ + +/*****************************************************************************/ +#ifndef ETHTOOL_GLINKSETTINGS + +#define __ETHTOOL_LINK_MODE_MASK_NBITS 32 +#define ETHTOOL_LINK_MASK_SIZE BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS) + +/** + * struct ethtool_link_ksettings + * @link_modes: supported and advertising, single item arrays + * @link_modes.supported: bitmask of supported link speeds + * @link_modes.advertising: bitmask of currently advertised speeds + * @base: base link details + * @base.speed: current link speed + * @base.port: current port type + * @base.duplex: current duplex mode + * @base.autoneg: current autonegotiation settings + * + * This struct and the following macros provide a way to support the old + * ethtool get/set_settings API on older kernels, but in the style of the new + * GLINKSETTINGS API. In this way, the same code can be used to support both + * APIs as seemlessly as possible. + * + * It should be noted the old API only has support up to the first 32 bits. + */ +struct ethtool_link_ksettings { + struct { + u32 speed; + u8 port; + u8 duplex; + u8 autoneg; + } base; + struct { + unsigned long supported[ETHTOOL_LINK_MASK_SIZE]; + unsigned long advertising[ETHTOOL_LINK_MASK_SIZE]; + } link_modes; +}; + +#define ETHTOOL_LINK_NAME_advertising(mode) ADVERTISED_ ## mode +#define ETHTOOL_LINK_NAME_supported(mode) SUPPORTED_ ## mode +#define ETHTOOL_LINK_NAME(name) ETHTOOL_LINK_NAME_ ## name +#define ETHTOOL_LINK_CONVERT(name, mode) ETHTOOL_LINK_NAME(name)(mode) + +/** + * ethtool_link_ksettings_zero_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + */ +#define ethtool_link_ksettings_zero_link_mode(ptr, name)\ + (*((ptr)->link_modes.name) = 0x0) + +/** + * ethtool_link_ksettings_add_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to add + */ +#define ethtool_link_ksettings_add_link_mode(ptr, name, mode)\ + (*((ptr)->link_modes.name) |= (typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode)) + +/** + * ethtool_link_ksettings_del_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to delete + */ +#define ethtool_link_ksettings_del_link_mode(ptr, name, mode)\ + (*((ptr)->link_modes.name) &= ~(typeof(*((ptr)->link_modes.name)))ETHTOOL_LINK_CONVERT(name, mode)) + +/** + * ethtool_link_ksettings_test_link_mode + * @ptr: ptr to ksettings struct + * @name: supported or advertising + * @mode: link mode to add + */ +#define ethtool_link_ksettings_test_link_mode(ptr, name, mode)\ + (!!(*((ptr)->link_modes.name) & ETHTOOL_LINK_CONVERT(name, mode))) + +/** + * _kc_ethtool_ksettings_to_cmd - Convert ethtool_link_ksettings to ethtool_cmd + * @ks: ethtool_link_ksettings struct + * @cmd: ethtool_cmd struct + * + * Convert an ethtool_link_ksettings structure into the older ethtool_cmd + * structure. We provide this in kcompat.h so that drivers can easily + * implement the older .{get|set}_settings as wrappers around the new api. + * Hence, we keep it prefixed with _kc_ to make it clear this isn't actually + * a real function in the kernel. + */ +static inline void +_kc_ethtool_ksettings_to_cmd(struct ethtool_link_ksettings *ks, + struct ethtool_cmd *cmd) +{ + cmd->supported = (u32)ks->link_modes.supported[0]; + cmd->advertising = (u32)ks->link_modes.advertising[0]; + ethtool_cmd_speed_set(cmd, ks->base.speed); + cmd->duplex = ks->base.duplex; + cmd->autoneg = ks->base.autoneg; + cmd->port = ks->base.port; +} + +#endif /* !ETHTOOL_GLINKSETTINGS */ + +/*****************************************************************************/ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE <= SLE_VERSION(12,3,0))) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(7,5)))) +#define phy_speed_to_str _kc_phy_speed_to_str +const char *_kc_phy_speed_to_str(int speed); +#else /* (LINUX >= 4.14.0) || (SLES > 12.3.0) || (RHEL > 7.5) */ +#include +#endif /* (LINUX < 4.14.0) || (SLES <= 12.3.0) || (RHEL <= 7.5) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) +#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6))) || \ + (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0)))) +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_TCF_BLOCK +#else /* RHEL >= 7.6 || SLES >= 15.1 */ +#define TC_SETUP_QDISC_MQPRIO TC_SETUP_MQPRIO +#endif /* !(RHEL >= 7.6) && !(SLES >= 15.1) */ +void _kc_ethtool_intersect_link_masks(struct ethtool_link_ksettings *dst, + struct ethtool_link_ksettings *src); +#define ethtool_intersect_link_masks _kc_ethtool_intersect_link_masks +#else /* >= 4.15 */ +#define HAVE_NDO_BPF +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#define HAVE_TCF_BLOCK +#endif /* 4.15.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,4,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +/* The return value of the strscpy() and strlcpy() functions is different. + * This could be potentially hazard for the future. + * To avoid this the void result is forced. + * So it is not possible use this function with the return value. + * Return value is required in kernel 4.3 through 4.15 + */ +#define strscpy(...) (void)(strlcpy(__VA_ARGS__)) +#endif /* !RHEL >= 7.7 && !SLES12sp4+ && !SLES15sp1+ */ + +#define pci_printk(level, pdev, fmt, arg...) \ + dev_printk(level, &(pdev)->dev, fmt, ##arg) +#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg) +#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg) +#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg) +#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg) +#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg) +#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg) +#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg) +#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg) + +#ifndef array_index_nospec +static inline unsigned long _kc_array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + /* + * Always calculate and emit the mask even if the compiler + * thinks the mask is not needed. The compiler does not take + * into account the value of @index under speculation. + */ + OPTIMIZER_HIDE_VAR(index); + return ~(long)(index | (size - 1UL - index)) >> (BITS_PER_LONG - 1); +} + +#define array_index_nospec(index, size) \ +({ \ + typeof(index) _i = (index); \ + typeof(size) _s = (size); \ + unsigned long _mask = _kc_array_index_mask_nospec(_i, _s); \ + \ + BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ + BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ + \ + (typeof(_i)) (_i & _mask); \ +}) +#endif /* array_index_nospec */ +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6))) && \ + !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0)))) +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#include +static inline bool +tc_cls_can_offload_and_chain0(const struct net_device *dev, + struct tc_cls_common_offload *common) +{ + if (!tc_can_offload(dev)) + return false; + if (common->chain_index) + return false; + + return true; +} +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#endif /* !(RHEL >= 7.6) && !(SLES >= 15.1) */ +#ifndef sizeof_field +#define sizeof_field(TYPE, MEMBER) (sizeof((((TYPE *)0)->MEMBER))) +#endif /* sizeof_field */ +#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(12,5,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0) || \ + SLE_VERSION_CODE >= SLE_VERSION(15,1,0)) +/* + * Copy bitmap and clear tail bits in last word. + */ +static inline void +bitmap_copy_clear_tail(unsigned long *dst, const unsigned long *src, unsigned int nbits) +{ + bitmap_copy(dst, src, nbits); + if (nbits % BITS_PER_LONG) + dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits); +} + +/* + * On 32-bit systems bitmaps are represented as u32 arrays internally, and + * therefore conversion is not needed when copying data from/to arrays of u32. + */ +#if BITS_PER_LONG == 64 +void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits); +#else +#define bitmap_from_arr32(bitmap, buf, nbits) \ + bitmap_copy_clear_tail((unsigned long *) (bitmap), \ + (const unsigned long *) (buf), (nbits)) +#endif /* BITS_PER_LONG == 64 */ +#endif /* !(RHEL >= 8.0) && !(SLES >= 12.5 && SLES < 15.0 || SLES >= 15.1) */ +#else /* >= 4.16 */ +#include +#define HAVE_XDP_BUFF_RXQ +#define HAVE_TC_FLOWER_OFFLOAD_COMMON_EXTACK +#define HAVE_TCF_MIRRED_DEV +#define HAVE_VF_STATS_DROPPED +#endif /* 4.16.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0)) +#include +#include +#define PCIE_SPEED_16_0GT 0x17 +#define PCI_EXP_LNKCAP_SLS_16_0GB 0x00000004 /* LNKCAP2 SLS Vector bit 3 */ +#define PCI_EXP_LNKSTA_CLS_16_0GB 0x0004 /* Current Link Speed 16.0GT/s */ +#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */ +void _kc_pcie_print_link_status(struct pci_dev *dev); +#define pcie_print_link_status _kc_pcie_print_link_status +#else /* >= 4.17.0 */ +#define HAVE_XDP_BUFF_IN_XDP_H +#endif /* 4.17.0 */ + +/*****************************************************************************/ +#if IS_ENABLED(CONFIG_NET_DEVLINK) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0)) +#include +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) && \ + (SLE_VERSION_CODE < SLE_VERSION(15,1,0)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,7))) +enum devlink_port_flavour { + DEVLINK_PORT_FLAVOUR_PHYSICAL, + DEVLINK_PORT_FLAVOUR_CPU, + DEVLINK_PORT_FLAVOUR_DSA, + DEVLINK_PORT_FLAVOUR_PCI_PF, + DEVLINK_PORT_FLAVOUR_PCI_VF, +}; +#endif /* <4.18.0 && +#ifndef macvlan_supports_dest_filter +#define macvlan_supports_dest_filter _kc_macvlan_supports_dest_filter +static inline bool _kc_macvlan_supports_dest_filter(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->mode == MACVLAN_MODE_PRIVATE || + macvlan->mode == MACVLAN_MODE_VEPA || + macvlan->mode == MACVLAN_MODE_BRIDGE; +} +#endif + +#if (!SLE_VERSION_CODE || (SLE_VERSION_CODE < SLE_VERSION(15,1,0))) +#ifndef macvlan_accel_priv +#define macvlan_accel_priv _kc_macvlan_accel_priv +static inline void *_kc_macvlan_accel_priv(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->fwd_priv; +} +#endif + +#ifndef macvlan_release_l2fw_offload +#define macvlan_release_l2fw_offload _kc_macvlan_release_l2fw_offload +static inline int _kc_macvlan_release_l2fw_offload(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + macvlan->fwd_priv = NULL; + return dev_uc_add(macvlan->lowerdev, dev->dev_addr); +} +#endif +#endif /* !SLES || SLES < 15.1 */ +#endif /* NETIF_F_HW_L2FW_DOFFLOAD */ +#include "kcompat_overflow.h" + +#if (SLE_VERSION_CODE < SLE_VERSION(15,1,0)) +#define firmware_request_nowarn request_firmware_direct + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,7)) +#if IS_ENABLED(CONFIG_NET_DEVLINK) && !defined(devlink_port_attrs_set) +static inline void +_kc_devlink_port_attrs_set(struct devlink_port *devlink_port, + struct _kc_devlink_port_attrs *attrs) +{ + if (attrs->split) + devlink_port_split_set(devlink_port, attrs->phys.port_number); +} + +#define devlink_port_attrs_set _kc_devlink_port_attrs_set +#endif /* CONFIG_NET_DEVLINK && !devlink_port_attrs_set */ +#endif /* +#include +#define HAVE_AF_XDP_SUPPORT +#endif /* 4.18.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,19,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0)) && \ + (RHEL_RELEASE_CODE <= RHEL_RELEASE_VERSION(8,2))) +#define HAVE_DEVLINK_REGIONS +#endif /* RHEL >= 8.0 && RHEL <= 8.2 */ +#define bitmap_alloc(nbits, flags) \ + kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long), flags) +#define bitmap_zalloc(nbits, flags) bitmap_alloc(nbits, ((flags) | __GFP_ZERO)) +#define bitmap_free(bitmap) kfree(bitmap) +#ifdef ETHTOOL_GLINKSETTINGS +#define ethtool_ks_clear(ptr, name) \ + ethtool_link_ksettings_zero_link_mode(ptr, name) +#define ethtool_ks_add_mode(ptr, name, mode) \ + ethtool_link_ksettings_add_link_mode(ptr, name, mode) +#define ethtool_ks_del_mode(ptr, name, mode) \ + ethtool_link_ksettings_del_link_mode(ptr, name, mode) +#define ethtool_ks_test(ptr, name, mode) \ + ethtool_link_ksettings_test_link_mode(ptr, name, mode) +#endif /* ETHTOOL_GLINKSETTINGS */ +#define HAVE_NETPOLL_CONTROLLER +#define REQUIRE_PCI_CLEANUP_AER_ERROR_STATUS +#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +#define HAVE_TCF_MIRRED_DEV +#endif + +static inline void __kc_metadata_dst_free(void *md_dst) +{ + kfree(md_dst); +} + +#define metadata_dst_free(md_dst) __kc_metadata_dst_free(md_dst) +#elif (LINUX_VERSION_CODE > KERNEL_VERSION(4,19,0)) +#if (KYLIN_RELEASE_CODE != KYLIN_RELEASE_VERSION(10,4)) +#define HAVE_DEVLINK_REGIONS +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4,19,91)) +#define PPM_TO_PPB +#define NEED_XARRAY +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4,19,90)) +#define HAVE_DEV_PRINTK_OPS +#define HAVE_IOPOLL_OPS +#endif /* <4.19.90 */ +#endif /* KYLIN_RELEASE_VERSION */ +#endif /* >4.19.0 */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,18,0) && LINUX_VERSION_CODE <= KERNEL_VERSION(5,4,119)) +#if (LINUX_VERSION_CODE != KERNEL_VERSION(4,19,0)) +#define NETDEVICE_OPS +#define NO_XA_FOR_EACH_RANGE +#define DEFAULT_GROUPS +#endif /* !=4.19.0 */ +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(8,7)) +#define HAVE_DEVLINK_REGISTER_GET_1_PARAMS +#undef HAVE_DEVLINK_ALLOC_GET_1_PARAMS +#undef NETDEVICE_OPS +#define HAVE_ETHTOOL_RING_PARAM +#define ETHTOOL_COALESCE_CFG +#endif +#define NEED_SYSFS_EMIT +#ifndef ETH_MODULE_SFF_8636_MAX_LEN +#define ETH_MODULE_SFF_8636_MAX_LEN 640 +#endif +#ifndef ETH_MODULE_SFF_8436_MAX_LEN +#define ETH_MODULE_SFF_8436_MAX_LEN 640 +#endif +#endif /* >=4.18.0 <=5.4.119 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)) +#ifdef HAVE_AF_XDP_SUPPORT +#ifndef napi_if_scheduled_mark_missed +static inline bool __kc_napi_if_scheduled_mark_missed(struct napi_struct *n) +{ + unsigned long val, new; + + do { + val = READ_ONCE(n->state); + if (val & NAPIF_STATE_DISABLE) + return true; + + if (!(val & NAPIF_STATE_SCHED)) + return false; + + new = val | NAPIF_STATE_MISSED; + } while (cmpxchg(&n->state, val, new) != val); + + return true; +} + +#define napi_if_scheduled_mark_missed __kc_napi_if_scheduled_mark_missed +#endif /* !napi_if_scheduled_mark_missed */ +#endif /* HAVE_AF_XDP_SUPPORT */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,0))) +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#endif /* RHEL >= 8.0 */ +#if ((SLE_VERSION_CODE >= SLE_VERSION(12,5,0) && \ + SLE_VERSION_CODE < SLE_VERSION(15,0,0)) || \ + (SLE_VERSION_CODE >= SLE_VERSION(15,1,0))) +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#endif /* SLE == 12sp5 || SLE >= 15sp1 */ +#else /* >= 4.20.0 */ +#define HAVE_DEVLINK_ESWITCH_OPS_EXTACK +#define HAVE_AF_XDP_ZC_SUPPORT +#define HAVE_VXLAN_TYPE +#define HAVE_ETF_SUPPORT /* Earliest TxTime First */ +#endif /* 4.20.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8,0))) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)) +#define NETLINK_MAX_COOKIE_LEN 20 +struct netlink_ext_ack { + const char *_msg; + const struct nlattr *bad_attr; + u8 cookie[NETLINK_MAX_COOKIE_LEN]; + u8 cookie_len; +}; + +#endif /* < 4.12 */ +static inline int _kc_dev_open(struct net_device *netdev, + struct netlink_ext_ack __always_unused *extack) +{ + return dev_open(netdev); +} + +#define dev_open _kc_dev_open + +static inline int +_kc_dev_change_flags(struct net_device *netdev, unsigned int flags, + struct netlink_ext_ack __always_unused *extack) +{ + return dev_change_flags(netdev, flags); +} + +#define dev_change_flags _kc_dev_change_flags +#endif /* !(RHEL_RELEASE_CODE && RHEL > RHEL(8,0)) */ +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,7) && \ + RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)) || \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL +#else /* RHEL >= 7.7 && RHEL < 8.0 || RHEL >= 8.1 */ +#if !((KYLIN_RELEASE_CODE == KYLIN_RELEASE_VERSION(10,4) && !defined(HAVE_UOS_RELEASE_CODE)) || \ + (KYLIN_RELEASE_CODE != KYLIN_RELEASE_VERSION(10,4) && defined(HAVE_UOS_RELEASE_CODE))) +struct ptp_system_timestamp { + struct timespec64 pre_ts; + struct timespec64 post_ts; +}; + +static inline void +ptp_read_system_prets(struct ptp_system_timestamp __always_unused *sts) +{ + ; +} + +static inline void +ptp_read_system_postts(struct ptp_system_timestamp __always_unused *sts) +{ + ; +} +#endif/*KYLIN_RELEASE_CODE */ +#endif /* !(RHEL >= 7.7 && RHEL != 8.0) */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#endif /* RHEL 8.1 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)) +#define HAVE_TC_INDIR_BLOCK +#endif /* RHEL 8.2 */ +#else /* >= 5.0.0 */ +#define HAVE_PTP_SYS_OFFSET_EXTENDED_IOCTL +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_DMA_ALLOC_COHERENT_ZEROES_MEM +#define HAVE_GENEVE_TYPE +#define HAVE_TC_INDIR_BLOCK +#endif /* 5.0.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_DEVLINK_INFO_GET +#define HAVE_DEVLINK_FLASH_UPDATE +#else /* RHEL < 8.1 */ +#ifdef HAVE_TC_SETUP_CLSFLOWER +#include + +struct flow_match { + struct flow_dissector *dissector; + void *mask; + void *key; +}; + +struct flow_match_basic { + struct flow_dissector_key_basic *key, *mask; +}; + +struct flow_match_control { + struct flow_dissector_key_control *key, *mask; +}; + +struct flow_match_eth_addrs { + struct flow_dissector_key_eth_addrs *key, *mask; +}; + +#ifdef HAVE_TC_FLOWER_ENC +struct flow_match_enc_keyid { + struct flow_dissector_key_keyid *key, *mask; +}; +#endif + +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +struct flow_match_vlan { + struct flow_dissector_key_vlan *key, *mask; +}; +#endif + +struct flow_match_ipv4_addrs { + struct flow_dissector_key_ipv4_addrs *key, *mask; +}; + +struct flow_match_ipv6_addrs { + struct flow_dissector_key_ipv6_addrs *key, *mask; +}; + +struct flow_match_ports { + struct flow_dissector_key_ports *key, *mask; +}; + +struct flow_rule { + struct flow_match match; +#if 0 + /* In 5.1+ kernels, action is a member of struct flow_rule but is + * not compatible with how we kcompat tc_cls_flower_offload_flow_rule + * below. By not declaring it here, any driver that attempts to use + * action as an element of struct flow_rule will fail to compile + * instead of silently trying to access memory that shouldn't be. + */ + struct flow_action action; +#endif +}; + +void flow_rule_match_basic(const struct flow_rule *rule, + struct flow_match_basic *out); +void flow_rule_match_control(const struct flow_rule *rule, + struct flow_match_control *out); +void flow_rule_match_eth_addrs(const struct flow_rule *rule, + struct flow_match_eth_addrs *out); +#ifndef HAVE_TC_FLOWER_VLAN_IN_TAGS +void flow_rule_match_vlan(const struct flow_rule *rule, + struct flow_match_vlan *out); +#endif +void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out); +void flow_rule_match_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out); +void flow_rule_match_ports(const struct flow_rule *rule, + struct flow_match_ports *out); +#ifdef HAVE_TC_FLOWER_ENC +void flow_rule_match_enc_ports(const struct flow_rule *rule, + struct flow_match_ports *out); +void flow_rule_match_enc_control(const struct flow_rule *rule, + struct flow_match_control *out); +void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out); +void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out); +void flow_rule_match_enc_keyid(const struct flow_rule *rule, + struct flow_match_enc_keyid *out); +#endif + +static inline struct flow_rule * +tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd) +{ + return (struct flow_rule *)&tc_flow_cmd->dissector; +} + +static inline bool flow_rule_match_key(const struct flow_rule *rule, + enum flow_dissector_key_id key) +{ + return dissector_uses_key(rule->match.dissector, key); +} +#endif /* HAVE_TC_SETUP_CLSFLOWER */ + +#endif /* RHEL < 8.1 */ + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define devlink_params_publish(devlink) do { } while (0) +#define devlink_params_unpublish(devlink) do { } while (0) +#endif + +#else /* >= 5.1.0 */ +#define HAVE_NDO_FDB_ADD_EXTACK +#define NO_XDP_QUERY_XSK_UMEM +#define HAVE_AF_XDP_NETDEV_UMEM +#define HAVE_TC_FLOW_RULE_INFRASTRUCTURE +#define HAVE_TC_FLOWER_ENC_IP +#define HAVE_DEVLINK_INFO_GET +#define HAVE_DEVLINK_FLASH_UPDATE +#define HAVE_DEVLINK_PORT_PARAMS +#endif /* 5.1.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0)) +#if (defined HAVE_SKB_XMIT_MORE) && \ +(!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#if (KYLIN_RELEASE_CODE != KYLIN_RELEASE_VERSION(10,4)) +#define netdev_xmit_more() (skb->xmit_more) +#endif +#else +#define netdev_xmit_more() (0) +#endif + +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)))) +#ifndef eth_get_headlen +#if !((KYLIN_RELEASE_CODE == KYLIN_RELEASE_VERSION(10,4) && !defined(HAVE_UOS_RELEASE_CODE)) || \ + (KYLIN_RELEASE_CODE != KYLIN_RELEASE_VERSION(10,4) && defined(HAVE_UOS_RELEASE_CODE))) +static inline u32 +__kc_eth_get_headlen(const struct net_device __always_unused *dev, void *data, + unsigned int len) +{ + return eth_get_headlen(data, len); +} + +#define eth_get_headlen(dev, data, len) __kc_eth_get_headlen(dev, data, len) +#endif/*KYLIN UOS*/ +#endif /* !eth_get_headlen */ +#endif /* !RHEL >= 8.2 */ + +#ifndef mmiowb +#ifdef CONFIG_IA64 +#define mmiowb() asm volatile ("mf.a" ::: "memory") +#else +#define mmiowb() +#endif +#endif /* mmiowb */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,2)) +#if IS_ENABLED(CONFIG_NET_DEVLINK) && !defined(devlink_port_attrs_set) +static inline void +_kc_devlink_port_attrs_set(struct devlink_port *devlink_port, + struct _kc_devlink_port_attrs *attrs) +{ + devlink_port_attrs_set(devlink_port, attrs->flavour, + attrs->phys.port_number, attrs->split, + attrs->phys.split_subport_number); +} + +#define devlink_port_attrs_set _kc_devlink_port_attrs_set +#endif /* CONFIG_NET_DEVLINK && !devlink_port_attrs_set */ +#endif /* RHEL_RELEASE_VERSION(8,1)) +#define HAVE_NDO_GET_DEVLINK_PORT +#endif /* RHEL > 8.1 */ + +#else /* >= 5.2.0 */ +#define HAVE_NDO_SELECT_QUEUE_FALLBACK_REMOVED +#define SPIN_UNLOCK_IMPLIES_MMIOWB +#define HAVE_NDO_GET_DEVLINK_PORT +#endif /* 5.2.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2))) +#define flow_block_offload tc_block_offload +#define flow_block_command tc_block_command +#define flow_cls_offload tc_cls_flower_offload +#define flow_block_binder_type tcf_block_binder_type +#define flow_cls_common_offload tc_cls_common_offload +#define flow_cls_offload_flow_rule tc_cls_flower_offload_flow_rule +#define FLOW_CLS_REPLACE TC_CLSFLOWER_REPLACE +#define FLOW_CLS_DESTROY TC_CLSFLOWER_DESTROY +#define FLOW_CLS_STATS TC_CLSFLOWER_STATS +#define FLOW_CLS_TMPLT_CREATE TC_CLSFLOWER_TMPLT_CREATE +#define FLOW_CLS_TMPLT_DESTROY TC_CLSFLOWER_TMPLT_DESTROY +#define FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS \ + TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS +#define FLOW_BLOCK_BIND TC_BLOCK_BIND +#define FLOW_BLOCK_UNBIND TC_BLOCK_UNBIND + +#ifdef HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO +#include + +int _kc_flow_block_cb_setup_simple(struct flow_block_offload *f, + struct list_head *driver_list, + tc_setup_cb_t *cb, + void *cb_ident, void *cb_priv, + bool ingress_only); + +#define flow_block_cb_setup_simple(f, driver_list, cb, cb_ident, cb_priv, \ + ingress_only) \ + _kc_flow_block_cb_setup_simple(f, driver_list, cb, cb_ident, cb_priv, \ + ingress_only) +#endif /* HAVE_TC_CB_AND_SETUP_QDISC_MQPRIO */ +#else /* RHEL >= 8.2 */ +#define HAVE_FLOW_BLOCK_API +#define HAVE_DEVLINK_PORT_ATTR_PCI_VF +#endif /* RHEL >= 8.2 */ + +#ifndef ETH_P_LLDP +#define ETH_P_LLDP 0x88CC +#endif /* !ETH_P_LLDP */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,2)) +#if IS_ENABLED(CONFIG_NET_DEVLINK) +#if (KYLIN_RELEASE_CODE != KYLIN_RELEASE_VERSION(10,4)) +static inline void +devlink_flash_update_begin_notify(struct devlink __always_unused *devlink) +{ +} + +static inline void +devlink_flash_update_end_notify(struct devlink __always_unused *devlink) +{ +} + +static inline void +devlink_flash_update_status_notify(struct devlink __always_unused *devlink, + const char __always_unused *status_msg, + const char __always_unused *component, + unsigned long __always_unused done, + unsigned long __always_unused total) +{ +} +#endif/*KYLIN_RELEASE_CODE */ +#endif /* CONFIG_NET_DEVLINK */ +#endif /* = 5.3.0 */ +#define XSK_UMEM_RETURNS_XDP_DESC +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)) +#define HAVE_XSK_UMEM_HAS_ADDRS +#endif /* < 5.8.0*/ +#define HAVE_FLOW_BLOCK_API +#define HAVE_DEVLINK_PORT_ATTR_PCI_VF +#if IS_ENABLED(CONFIG_DIMLIB) +#define HAVE_CONFIG_DIMLIB +#endif +#endif /* 5.3.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)) +#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)) && \ + !(SLE_VERSION_CODE >= SLE_VERSION(15,2,0))) +#if (KYLIN_RELEASE_CODE != KYLIN_RELEASE_VERSION(10,4)) +static inline unsigned int skb_frag_off(const skb_frag_t *frag) +{ + return frag->page_offset; +} + +static inline void skb_frag_off_add(skb_frag_t *frag, int delta) +{ + frag->page_offset += delta; +} +#endif/*KYLIN_RELEASE_CODE */ +#define __flow_indr_block_cb_register __tc_indr_block_cb_register +#define __flow_indr_block_cb_unregister __tc_indr_block_cb_unregister +#endif /* !(RHEL >= 8.2) && !(SLES >= 15sp2) */ +#if (SLE_VERSION_CODE >= SLE_VERSION(15,2,0)) +#define HAVE_NDO_XSK_WAKEUP +#endif /* SLES15sp2 */ +#else /* >= 5.4.0 */ +#define HAVE_NDO_XSK_WAKEUP +#endif /* 5.4.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,5,0)) +static inline unsigned long _kc_bitmap_get_value8(const unsigned long *map, + unsigned long start) +{ + const size_t index = BIT_WORD(start); + const unsigned long offset = start % BITS_PER_LONG; + + return (map[index] >> offset) & 0xFF; +} +#define bitmap_get_value8 _kc_bitmap_get_value8 + +static inline void _kc_bitmap_set_value8(unsigned long *map, + unsigned long value, + unsigned long start) +{ + const size_t index = BIT_WORD(start); + const unsigned long offset = start % BITS_PER_LONG; + + map[index] &= ~(0xFFUL << offset); + map[index] |= value << offset; +} +#define bitmap_set_value8 _kc_bitmap_set_value8 + +#endif /* 5.5.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)) +#ifdef HAVE_AF_XDP_SUPPORT +#define xsk_umem_release_addr xsk_umem_discard_addr +#define xsk_umem_release_addr_rq xsk_umem_discard_addr_rq +#endif /* HAVE_AF_XDP_SUPPORT */ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3))) +#define HAVE_TX_TIMEOUT_TXQUEUE +#endif +#else /* >= 5.6.0 */ +#define HAVE_TX_TIMEOUT_TXQUEUE +#endif /* 5.6.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0)) +u64 _kc_pci_get_dsn(struct pci_dev *dev); +#define pci_get_dsn(dev) _kc_pci_get_dsn(dev) +#if !(SLE_VERSION_CODE > SLE_VERSION(15,2,0)) && \ + !((LINUX_VERSION_CODE == KERNEL_VERSION(5,3,18)) && \ + (SLE_LOCALVERSION_CODE >= KERNEL_VERSION(14,0,0))) && \ + !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3))) +#define pci_aer_clear_nonfatal_status pci_cleanup_aer_uncorrect_error_status +#endif + +#define cpu_latency_qos_update_request pm_qos_update_request +#define cpu_latency_qos_add_request(arg1, arg2) pm_qos_add_request(arg1, PM_QOS_CPU_DMA_LATENCY, arg2) +#define cpu_latency_qos_remove_request pm_qos_remove_request + +#ifndef DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID +#define DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID "fw.bundle_id" +#endif + +#ifdef HAVE_DEVLINK_REGIONS +#if IS_ENABLED(CONFIG_NET_DEVLINK) +struct devlink_region_ops { + const char *name; + void (*destructor)(const void *data); +}; + +#ifndef devlink_region_create +static inline struct devlink_region * +_kc_devlink_region_create(struct devlink *devlink, + const struct devlink_region_ops *ops, + u32 region_max_snapshots, u64 region_size) +{ + return devlink_region_create(devlink, ops->name, region_max_snapshots, + region_size); +} + +#define devlink_region_create _kc_devlink_region_create +#endif /* devlink_region_create */ +#endif /* CONFIG_NET_DEVLINK */ +#define HAVE_DEVLINK_SNAPSHOT_CREATE_DESTRUCTOR +#endif /* HAVE_DEVLINK_REGIONS */ +#else /* >= 5.7.0 */ +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT +#define HAVE_ETHTOOL_COALESCE_PARAMS_SUPPORT +#endif /* 5.7.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)) +#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,4))) +#if (KYLIN_RELEASE_CODE != KYLIN_RELEASE_VERSION(10,4)) +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#endif/*KYLIN_RELEASE_CODE */ +#endif /* (RHEL < 8.4) */ +#define flex_array_size(p, member, count) \ + array_size(count, sizeof(*(p)->member) + __must_be_array((p)->member)) +#ifdef HAVE_AF_XDP_ZC_SUPPORT +#ifndef xsk_umem_get_rx_frame_size +static inline u32 _xsk_umem_get_rx_frame_size(struct xdp_umem *umem) +{ + return umem->chunk_size_nohr - XDP_PACKET_HEADROOM; +} + +#define xsk_umem_get_rx_frame_size _xsk_umem_get_rx_frame_size +#endif /* xsk_umem_get_rx_frame_size */ +#endif /* HAVE_AF_XDP_ZC_SUPPORT */ +#else /* >= 5.8.0 */ +#define HAVE_TC_FLOW_INDIR_DEV +#define HAVE_TC_FLOW_INDIR_BLOCK_CLEANUP +#define HAVE_XDP_BUFF_FRAME_SZ +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#endif /* 5.8.0 */ + +/*****************************************************************************/ +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,4))) +#if IS_ENABLED(CONFIG_NET_DEVLINK) && !defined(devlink_port_attrs_set) +#undef devlink_port_attrs +#define devlink_port_attrs_set devlink_port_attrs_set +#endif /* CONFIG_NET_DEVLINK && !devlink_port_attrs_set */ +#endif /* (RHEL >= 8.4) */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0)) +#if IS_ENABLED(CONFIG_NET_DEVLINK) && !defined(devlink_port_attrs_set) +static inline void +_kc_devlink_port_attrs_set(struct devlink_port *devlink_port, + struct _kc_devlink_port_attrs *attrs) +{ + devlink_port_attrs_set(devlink_port, attrs->flavour, + attrs->phys.port_number, attrs->split, + attrs->phys.split_subport_number, + attrs->switch_id.id, attrs->switch_id.id_len); +} + +#define devlink_port_attrs_set _kc_devlink_port_attrs_set +#endif /* CONFIG_NET_DEVLINK && !devlink_port_attrs_set */ +#define HAVE_XDP_QUERY_PROG +#else /* >= 5.9.0 */ +#define HAVE_FLOW_INDIR_BLOCK_QDISC +#define HAVE_UDP_TUNNEL_NIC_INFO +#endif /* 5.9.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)) +#if (RHEL_RELEASE_CODE != RHEL_RELEASE_VERSION(8,7)) +#if (KYLIN_RELEASE_CODE != KYLIN_RELEASE_VERSION(10,4)) +struct devlink_flash_update_params { + const char *file_name; + const char *component; + u32 overwrite_mask; +}; +#endif/*KYLIN_RELEASE_CODE */ +#endif +#ifndef DEVLINK_FLASH_OVERWRITE_SETTINGS +#define DEVLINK_FLASH_OVERWRITE_SETTINGS BIT(0) +#endif + +#ifndef DEVLINK_FLASH_OVERWRITE_IDENTIFIERS +#define DEVLINK_FLASH_OVERWRITE_IDENTIFIERS BIT(1) +#endif + +#if IS_ENABLED(CONFIG_NET_DEVLINK) +#include +#if (RHEL_RELEASE_CODE != RHEL_RELEASE_VERSION(8,7)) +#if (KYLIN_RELEASE_CODE != KYLIN_RELEASE_VERSION(10,4)) +static inline void +devlink_flash_update_timeout_notify(struct devlink *devlink, + const char *status_msg, + const char *component, + unsigned long __always_unused timeout) +{ + devlink_flash_update_status_notify(devlink, status_msg, component, 0, 0); +} +#endif/*KYLIN_RELEASE_CODE */ +#endif +#endif /* CONFIG_NET_DEVLINK */ + +#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,4))) +static inline void net_prefetch(void *p) +{ + prefetch(p); +#if L1_CACHE_BYTES < 128 + prefetch((u8 *)p + L1_CACHE_BYTES); +#endif +} +#endif /* (RHEL < 8.4) */ + +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xsk_get_pool_from_qid xdp_get_umem_from_qid +#define xsk_pool_get_rx_frame_size xsk_umem_get_rx_frame_size +#define xsk_pool_set_rxq_info xsk_buff_set_rxq_info +#define xsk_pool_dma_unmap xsk_buff_dma_unmap +#define xsk_pool_dma_map xsk_buff_dma_map +#define xsk_tx_peek_desc xsk_umem_consume_tx +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL +#include +static inline void +_kc_xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, + void __always_unused *pool) +{ + xsk_buff_dma_sync_for_cpu(xdp); +} + +#define xsk_buff_dma_sync_for_cpu(xdp, pool) \ + _kc_xsk_buff_dma_sync_for_cpu(xdp, pool) +#endif /* HAVE_MEM_TYPE_XSK_BUFF_POOL */ +#else /* >= 5.10.0 */ +#define HAVE_DEVLINK_REGION_OPS_SNAPSHOT_OPS +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#define HAVE_NETDEV_BPF_XSK_POOL +#endif /* 5.10.0 */ + +/*****************************************************************************/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)) +#ifdef HAVE_XDP_BUFF_RXQ +#include +#if (RHEL_RELEASE_CODE != RHEL_RELEASE_VERSION(8,7)) +static inline int +_kc_xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, struct net_device *dev, + u32 queue_index, unsigned int __always_unused napi_id) +{ + return xdp_rxq_info_reg(xdp_rxq, dev, queue_index); +} + +#define xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id) \ + _kc_xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id) +#endif +#endif /* HAVE_XDP_BUFF_RXQ */ +#define HAVE_DEVLINK_FLASH_UPDATE_BEGIN_END_NOTIFY +#else /* >= 5.11.0 */ +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW +#endif /* 5.11.0 */ + +/*****************************************************************************/ + +static inline int pci_enable_pcie_error_reporting(struct pci_dev __always_unused *dev) +{ + return 0; +} +#define pci_disable_pcie_error_reporting(dev) do {} while (0) + +#endif /* _KCOMPAT_H_ */ diff --git a/include/linux/dinghai/kcompat_vfd.h b/include/linux/dinghai/kcompat_vfd.h new file mode 100644 index 000000000000..a0c6c7b2d62f --- /dev/null +++ b/include/linux/dinghai/kcompat_vfd.h @@ -0,0 +1,189 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2021 Intel Corporation. */ + +#ifndef _KCOMPAT_VFD_H_ +#define _KCOMPAT_VFD_H_ + +#define VFD_PROMISC_OFF 0x00 +#define VFD_PROMISC_UNICAST 0x01 +#define VFD_PROMISC_MULTICAST 0x02 + +#define VFD_LINKSTATE_OFF 0x00 +#define VFD_LINKSTATE_ON 0x01 +#define VFD_LINKSTATE_AUTO 0x02 + +#define VFD_EGRESS_MIRROR_OFF -1 +#define VFD_INGRESS_MIRROR_OFF -1 + +#define VFD_QUEUE_TYPE_RSS 0x00 +#define VFD_QUEUE_TYPE_QOS 0x01 + +#define VFD_NUM_TC 0x8 + +/** + * struct vfd_objects - VF-d kobjects information struct + * @num_vfs: number of VFs allocated + * @sriov_kobj: pointer to the top sriov kobject + * @vf_kobj: array of pointer to each VF's kobjects + */ +struct vfd_objects { + int num_vfs; + struct kobject *sriov_kobj; + struct vfd_vf_obj *vfs; + struct vfd_qos_objects *qos; +}; + +/** + * struct vfd_vf_obj - VF-d VF kobjects information struct + * @vf_kobj: pointer to VF qos kobject + * @vf_qos_kobj: pointer to VF kobject + * @vf_tc_kobj: pointer to VF TC kobjects + */ +struct vfd_vf_obj { + struct kobject *vf_qos_kobj; + struct kobject *vf_kobj; + struct kobject *vf_tc_kobjs[VFD_NUM_TC]; +}; + +/** + * struct vfd_qos_objects - VF-d qos kobjects information struct + * @qos_kobj: pointer to PF qos kobject + * @pf_qos_kobj: pointer to PF TC kobjects + */ +struct vfd_qos_objects { + struct kobject *qos_kobj; + struct kobject *pf_qos_kobjs[VFD_NUM_TC]; +}; + +struct vfd_macaddr { + u8 mac[ETH_ALEN]; + struct list_head list; +}; + +#define VFD_LINK_SPEED_2_5GB_SHIFT 0x0 +#define VFD_LINK_SPEED_100MB_SHIFT 0x1 +#define VFD_LINK_SPEED_1GB_SHIFT 0x2 +#define VFD_LINK_SPEED_10GB_SHIFT 0x3 +#define VFD_LINK_SPEED_40GB_SHIFT 0x4 +#define VFD_LINK_SPEED_20GB_SHIFT 0x5 +#define VFD_LINK_SPEED_25GB_SHIFT 0x6 +#define VFD_LINK_SPEED_5GB_SHIFT 0x7 + + +enum vfd_link_speed { + VFD_LINK_SPEED_UNKNOWN = 0, + VFD_LINK_SPEED_100MB = BIT(VFD_LINK_SPEED_100MB_SHIFT), + VFD_LINK_SPEED_1GB = BIT(VFD_LINK_SPEED_1GB_SHIFT), + VFD_LINK_SPEED_2_5GB = BIT(VFD_LINK_SPEED_2_5GB_SHIFT), + VFD_LINK_SPEED_5GB = BIT(VFD_LINK_SPEED_5GB_SHIFT), + VFD_LINK_SPEED_10GB = BIT(VFD_LINK_SPEED_10GB_SHIFT), + VFD_LINK_SPEED_40GB = BIT(VFD_LINK_SPEED_40GB_SHIFT), + VFD_LINK_SPEED_20GB = BIT(VFD_LINK_SPEED_20GB_SHIFT), + VFD_LINK_SPEED_25GB = BIT(VFD_LINK_SPEED_25GB_SHIFT), +}; + +struct vfd_ops { + int (*get_trunk)(struct pci_dev *pdev, int vf_id, unsigned long *buff); + int (*set_trunk)(struct pci_dev *pdev, int vf_id, + const unsigned long *buff); + int (*get_vlan_mirror)(struct pci_dev *pdev, int vf_id, + unsigned long *buff); + int (*set_vlan_mirror)(struct pci_dev *pdev, int vf_id, + const unsigned long *buff); + int (*get_egress_mirror)(struct pci_dev *pdev, int vf_id, int *data); + int (*set_egress_mirror)(struct pci_dev *pdev, int vf_id, + const int data); + int (*get_ingress_mirror)(struct pci_dev *pdev, int vf_id, int *data); + int (*set_ingress_mirror)(struct pci_dev *pdev, int vf_id, + const int data); + int (*get_mac_anti_spoof)(struct pci_dev *pdev, int vf_id, bool *data); + int (*set_mac_anti_spoof)(struct pci_dev *pdev, int vf_id, + const bool data); + int (*get_vlan_anti_spoof)(struct pci_dev *pdev, int vf_id, bool *data); + int (*set_vlan_anti_spoof)(struct pci_dev *pdev, int vf_id, + const bool data); + int (*get_allow_untagged)(struct pci_dev *pdev, int vf_id, bool *data); + int (*set_allow_untagged)(struct pci_dev *pdev, int vf_id, + const bool data); + int (*get_loopback)(struct pci_dev *pdev, int vf_id, bool *data); + int (*set_loopback)(struct pci_dev *pdev, int vf_id, const bool data); + int (*get_mac)(struct pci_dev *pdev, int vf_id, u8 *macaddr); + int (*set_mac)(struct pci_dev *pdev, int vf_id, const u8 *macaddr); + int (*get_mac_list)(struct pci_dev *pdev, int vf_id, + struct list_head *mac_list); + int (*add_macs_to_list)(struct pci_dev *pdev, int vf_id, + struct list_head *mac_list); + int (*rem_macs_from_list)(struct pci_dev *pdev, int vf_id, + struct list_head *mac_list); + int (*get_promisc)(struct pci_dev *pdev, int vf_id, u8 *data); + int (*set_promisc)(struct pci_dev *pdev, int vf_id, const u8 data); + int (*get_vlan_strip)(struct pci_dev *pdev, int vf_id, bool *data); + int (*set_vlan_strip)(struct pci_dev *pdev, int vf_id, const bool data); + int (*get_link_state)(struct pci_dev *pdev, int vf_id, bool *enabled, + enum vfd_link_speed *link_speed); + int (*set_link_state)(struct pci_dev *pdev, int vf_id, const u8 data); + int (*get_max_tx_rate)(struct pci_dev *pdev, int vf_id, + unsigned int *max_tx_rate); + int (*set_max_tx_rate)(struct pci_dev *pdev, int vf_id, + unsigned int *max_tx_rate); + int (*get_min_tx_rate)(struct kobject *, + struct kobj_attribute *, char *); + int (*set_min_tx_rate)(struct kobject *, struct kobj_attribute *, + const char *, size_t); + int (*get_spoofcheck)(struct kobject *, + struct kobj_attribute *, char *); + int (*set_spoofcheck)(struct kobject *, struct kobj_attribute *, + const char *, size_t); + int (*get_trust)(struct kobject *, + struct kobj_attribute *, char *); + int (*set_trust)(struct kobject *, struct kobj_attribute *, + const char *, size_t); + int (*get_vf_enable)(struct pci_dev *pdev, int vf_id, bool *data); + int (*set_vf_enable)(struct pci_dev *pdev, int vf_id, const bool data); + int (*get_rx_bytes) (struct pci_dev *pdev, int vf_id, u64 *data); + int (*get_rx_dropped)(struct pci_dev *pdev, int vf_id, u64 *data); + int (*get_rx_packets)(struct pci_dev *pdev, int vf_id, u64 *data); + int (*get_tx_bytes) (struct pci_dev *pdev, int vf_id, u64 *data); + int (*get_tx_dropped)(struct pci_dev *pdev, int vf_id, u64 *data); + int (*get_tx_packets)(struct pci_dev *pdev, int vf_id, u64 *data); + int (*get_tx_spoofed)(struct pci_dev *pdev, int vf_id, u64 *data); + int (*get_tx_errors)(struct pci_dev *pdev, int vf_id, u64 *data); + int (*reset_stats)(struct pci_dev *pdev, int vf_id); + int (*set_vf_bw_share)(struct pci_dev *pdev, int vf_id, u8 bw_share); + int (*get_vf_bw_share)(struct pci_dev *pdev, int vf_id, u8 *bw_share); + int (*set_pf_qos_apply)(struct pci_dev *pdev); + int (*get_pf_ingress_mirror)(struct pci_dev *pdev, int *data); + int (*set_pf_ingress_mirror)(struct pci_dev *pdev, const int data); + int (*get_pf_egress_mirror)(struct pci_dev *pdev, int *data); + int (*set_pf_egress_mirror)(struct pci_dev *pdev, const int data); + int (*get_pf_tpid)(struct pci_dev *pdev, u16 *data); + int (*set_pf_tpid)(struct pci_dev *pdev, const u16 data); + int (*get_num_queues)(struct pci_dev *pdev, int vf_id, int *num_queues); + int (*set_num_queues)(struct pci_dev *pdev, int vf_id, const int num_queues); + int (*get_trust_state)(struct pci_dev *pdev, int vf_id, bool *data); + int (*set_trust_state)(struct pci_dev *pdev, int vf_id, bool data); + int (*get_queue_type)(struct pci_dev *pdev, int vf_id, u8 *data); + int (*set_queue_type)(struct pci_dev *pdev, int vf_id, const u8 data); + int (*get_allow_bcast)(struct pci_dev *pdev, int vf_id, bool *data); + int (*set_allow_bcast)(struct pci_dev *pdev, int vf_id, const bool data); + int (*get_pf_qos_tc_max_bw)(struct pci_dev *pdev, int tc, u16 *req_bw); + int (*set_pf_qos_tc_max_bw)(struct pci_dev *pdev, int tc, u16 req_bw); + int (*get_pf_qos_tc_lsp)(struct pci_dev *pdev, int tc, bool *on); + int (*set_pf_qos_tc_lsp)(struct pci_dev *pdev, int tc, bool on); + int (*get_pf_qos_tc_priority)(struct pci_dev *pdev, int tc, + char *tc_bitmap); + int (*set_pf_qos_tc_priority)(struct pci_dev *pdev, int tc, + char tc_bitmap); + int (*get_vf_qos_tc_share)(struct pci_dev *pdev, int vf_id, int tc, + u8 *share); + int (*set_vf_qos_tc_share)(struct pci_dev *pdev, int vf_id, int tc, + u8 share); + int (*get_vf_max_tc_tx_rate)(struct pci_dev *pdev, int vf_id, int tc, + int *rate); + int (*set_vf_max_tc_tx_rate)(struct pci_dev *pdev, int vf_id, int tc, + int rate); +}; + +extern const struct vfd_ops *vfd_ops; + +#endif /* _KCOMPAT_VFD_H_ */ diff --git a/include/linux/dinghai/lag.h b/include/linux/dinghai/lag.h new file mode 100644 index 000000000000..6887d9abfb08 --- /dev/null +++ b/include/linux/dinghai/lag.h @@ -0,0 +1,55 @@ +#ifndef __DINGHAI_LAG_H__ +#define __DINGHAI_LAG_H__ + +#include +#include + +#define ZXDH_PF_VFID(ep, pf) (1152 + ep * 8 + pf) + + +enum zxdh_netdev_lag_tx_type +{ + ZXDH_NETDEV_LAG_TX_TYPE_UNKNOWN = 0, + ZXDH_NETDEV_LAG_TX_TYPE_ACTIVEBACKUP = 1, + ZXDH_NETDEV_LAG_TX_TYPE_HASH = 2, + /* not surpported follow */ + ZXDH_NETDEV_LAG_TX_TYPE_RANDOM, + ZXDH_NETDEV_LAG_TX_TYPE_BROADCAST, + ZXDH_NETDEV_LAG_TX_TYPE_ROUNDROBIN, +}; + +enum zxdh_netdev_lag_hash +{ + ZXDH_NETDEV_LAG_HASH_NONE = 0, /* L2 default */ + ZXDH_NETDEV_LAG_HASH_L2 = 1, + ZXDH_NETDEV_LAG_HASH_L23 = 2, + ZXDH_NETDEV_LAG_HASH_L34 = 4, + // ZXDH_NETDEV_LAG_HASH_E23, + // ZXDH_NETDEV_LAG_HASH_E34, + // ZXDH_NETDEV_LAG_HASH_VLAN_SRCMAC, + ZXDH_NETDEV_LAG_HASH_UNKNOWN, +}; + +struct zxdh_lag_attrs +{ + uint16_t pannel_id; + uint16_t vport; + uint32_t qid[2]; + uint16_t slot_id; + uint16_t pcie_id; + uint8_t phy_port; + uint8_t rsv; +}; + +void zxdh_regitster_ldev(struct dh_core_dev *dh_devs); +void zxdh_unregitster_ldev(struct dh_core_dev *dh_dev); + +int32_t zxdh_ldev_add_netdev(struct dh_core_dev *dev, uint16_t ida, struct net_device *netdev, struct zxdh_lag_attrs *attr); +void zxdh_ldev_remove_netdev(struct dh_core_dev *dh_dev, struct net_device *netdev, struct zxdh_lag_attrs *attr); + +/* sriov netdev hardware bond */ +int32_t zxdh_hardware_bond_init(struct net_device *netdev); +void zxdh_hardware_bond_uninit(struct net_device *netdev); +int32_t zxdh_recover_hwbond_in_reload(struct net_device *netdev); + +#endif /* __DINGHAI_LAG_H__ */ \ No newline at end of file diff --git a/include/linux/dinghai/log.h b/include/linux/dinghai/log.h new file mode 100644 index 000000000000..6de7a1e58239 --- /dev/null +++ b/include/linux/dinghai/log.h @@ -0,0 +1,45 @@ +#ifndef __KERNEL_LOG_H__ +#define __KERNEL_LOG_H__ + +#include +#include + +#define MODULE_CMD "zxdh_cmd" +#define MODULE_NP "zxdh_np" +#define MODULE_PF "zxdh_pf" +#define MODULE_PTP "zxdh_ptp" +#define MODULE_TSN "zxdh_tsn" +#define MODULE_LAG "zxdh_lag" +#define MODULE_DHTOOLS "zxdh_tool" +#define MODULE_SEC "zxdh_sec" +#define MODULE_MPF "zxdh_mpf" +#define MODULE_FUC_HP "zxdh_func_hp" +#define MODULE_UACCE "zxdh_uacce" +#define MODULE_HEAL "zxdh_health" + +extern int debug_print; +#define DH_LOG_EMERG(module, fmt, arg...) \ + printk(KERN_EMERG "[%s][%s][%d] "fmt"", module, __func__, __LINE__, ##arg); + +#define DH_LOG_ALERT(module, fmt, arg...) \ + printk(KERN_ALERT "[%s][%s][%d] "fmt"", module, __func__, __LINE__, ##arg); + +#define DH_LOG_CRIT(module, fmt, arg...) \ + printk(KERN_CRIT "[%s][%s][%d] "fmt"", module, __func__, __LINE__, ##arg); + +#define DH_LOG_ERR(module, fmt, arg...) \ + printk(KERN_ERR "[%s][%s][%d] "fmt"", module, __func__, __LINE__, ##arg); + +#define DH_LOG_WARNING(module, fmt, arg...) \ + printk(KERN_WARNING "[%s][%s][%d] "fmt"", module, __func__, __LINE__, ##arg); + +#define DH_LOG_INFO(module, fmt, arg...) \ + printk(KERN_INFO "[%s][%s][%d] "fmt"", module, __func__, __LINE__, ##arg); + +#define DH_LOG_DEBUG(module, fmt, arg...) \ +do { \ + if (debug_print) \ + printk(KERN_DEBUG "[%s][%s][%d] "fmt"", module, __func__, __LINE__, ##arg); \ +} while (0) + +#endif /* __KERNEL_LOG_H__ */ diff --git a/include/linux/dinghai/pci_irq.h b/include/linux/dinghai/pci_irq.h new file mode 100755 index 000000000000..9baead529fa9 --- /dev/null +++ b/include/linux/dinghai/pci_irq.h @@ -0,0 +1,81 @@ +#ifndef DINGHAI_PCI_IRQ_H +#define DINGHAI_PCI_IRQ_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +#define DH_MAX_IRQ_NAME 100 +#define DH_FW_RESERVED_EQS 16 +/* max irq_index is 2047, so four chars */ +#define DH_MAX_IRQ_IDX_CHARS (4) +#define DH_EQ_REFS_PER_IRQ (2) + +#ifdef CONFIG_AARCH +#define ZXDH_VQS_CHANNELS_NUM (16+2) +#else +#define ZXDH_VQS_CHANNELS_NUM (64+2) +#endif + +#define ZXDH_BOND_VQS_CHANNELS_NUM (2+2) +#define ZXDH_ASYNC_CHANNELS_NUM 8 +#define ZXDH_RDMA_CHANNELS_NUM 6 +#define ZXDH_RDMA_IRQ_START_IDX ZXDH_ASYNC_CHANNELS_NUM +#define ZXDH_VQS_IRQ_START_IDX (ZXDH_ASYNC_CHANNELS_NUM + ZXDH_RDMA_CHANNELS_NUM) + +#define ZXDH_VF_VQS_CHANNELS_NUM 16 +#define ZXDH_VF_ASYNC_CHANNELS_NUM 6 +#define ZXDH_VF_RDMA_IRQ_START_IDX ZXDH_VF_ASYNC_CHANNELS_NUM +#define ZXDH_VF_VQS_IRQ_START_IDX (ZXDH_RDMA_CHANNELS_NUM + ZXDH_VF_ASYNC_CHANNELS_NUM) + +struct dh_irq; + +struct dh_irq_pool { + char name[DH_MAX_IRQ_NAME]; + struct xa_limit xa_num_irqs; + struct mutex lock; /* sync IRQs creations */ + struct xarray irqs; + uint32_t max_threshold; + uint32_t min_threshold; + uint16_t *irqs_per_cpu; + struct dh_core_dev *dev; + void *data; +}; + +static inline bool dh_irq_pool_is_sf_pool(struct dh_irq_pool *pool) +{ + return !strncmp("dh_mpf_sf", pool->name, strlen("dh_mpf_sf")); +} + +struct dh_irq *zxdh_get_irq_of_pool(struct dh_core_dev *dev, struct dh_irq_pool *pool); + +struct dh_irq *dh_irq_alloc(struct dh_irq_pool *pool, int32_t i, + const struct cpumask *affinity); + +int32_t dh_irq_read_locked(struct dh_irq *irq); +int32_t dh_irq_get_locked(struct dh_irq *irq); +int32_t dh_irq_put(struct dh_irq *irq); + +void irq_pool_free(struct dh_irq_pool *pool); +struct dh_irq_pool * +irq_pool_alloc(struct dh_core_dev *dev, int32_t start, int32_t size, char *name, + uint32_t min_threshold, uint32_t max_threshold); + +struct dh_irq { + struct atomic_notifier_head nh; /* notification chain invoked when an interruption is triggered. the invocation environment is the atomic context */ + cpumask_var_t mask; /* interrupt affinity */ + char name[DH_MAX_IRQ_NAME]; /* interrupt name */ + struct dh_irq_pool *pool; /* interrupt pool */ + int32_t refcount; + uint32_t index; /* interrupt vec index */ + int32_t irqn; /* interrupt number */ +}; + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/include/linux/dinghai/queue.h b/include/linux/dinghai/queue.h new file mode 100644 index 000000000000..22b5b1bc44de --- /dev/null +++ b/include/linux/dinghai/queue.h @@ -0,0 +1,78 @@ +#ifndef __DINGHAI_QUEUE_H__ +#define __DINGHAI_QUEUE_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Vector value used to disable MSI for queue */ +#define ZXDH_MSI_NO_VECTOR 0xff + +/* Status byte for guest to report progress, and synchronize features */ +/* We have seen device and processed generic fields */ +#define ZXDH_CONFIG_S_ACKNOWLEDGE 1 +/* We have found a driver for the device. */ +#define ZXDH_CONFIG_S_DRIVER 2 +/* Driver has used its parts of the config, and is happy */ +#define ZXDH_CONFIG_S_DRIVER_OK 4 +/* Driver has finished configuring features */ +#define ZXDH_CONFIG_S_FEATURES_OK 8 +/* Device entered invalid state, driver must reset it */ +#define ZXDH_CONFIG_S_NEEDS_RESET 0x40 +/* We've given up on this device */ +#define ZXDH_CONFIG_S_FAILED 0x80 + + +/* This is the PCI capability header: */ +struct zxdh_pf_pci_cap +{ + __u8 cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */ + __u8 cap_next; /* Generic PCI field: next ptr. */ + __u8 cap_len; /* Generic PCI field: capability length */ + __u8 cfg_type; /* Identifies the structure. */ + __u8 bar; /* Where to find it. */ + __u8 id; /* Multiple capabilities of the same type */ + __u8 padding[2]; /* Pad to full dword. */ + __le32 offset; /* Offset within bar. */ + __le32 length; /* Length of the structure, in bytes. */ +}; + +/* Fields in ZXDH_PF_PCI_CAP_COMMON_CFG: */ +struct zxdh_pf_pci_common_cfg +{ + /* About the whole device. */ + __le32 device_feature_select; /* read-write */ + __le32 device_feature; /* read-only */ + __le32 guest_feature_select; /* read-write */ + __le32 guest_feature; /* read-write */ + __le16 msix_config; /* read-write */ + __le16 num_queues; /* read-only */ + __u8 device_status; /* read-write */ + __u8 config_generation; /* read-only */ + + /* About a specific virtqueue. */ + __le16 queue_select; /* read-write */ + __le16 queue_size; /* read-write, power of 2. */ + __le16 queue_msix_vector; /* read-write */ + __le16 queue_enable; /* read-write */ + __le16 queue_notify_off; /* read-only */ + __le32 queue_desc_lo; /* read-write */ + __le32 queue_desc_hi; /* read-write */ + __le32 queue_avail_lo; /* read-write */ + __le32 queue_avail_hi; /* read-write */ + __le32 queue_used_lo; /* read-write */ + __le32 queue_used_hi; /* read-write */ +}; + +struct zxdh_pf_pci_notify_cap +{ + struct zxdh_pf_pci_cap cap; + __le32 notify_off_multiplier; /* Multiplier for queue_notify_off. */ +}; + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/linux/dinghai/xarray.h b/include/linux/dinghai/xarray.h new file mode 100644 index 000000000000..ab4cf42fc652 --- /dev/null +++ b/include/linux/dinghai/xarray.h @@ -0,0 +1,1733 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#ifndef DINGHAI_XARRAY_H +#define DINGHAI_XARRAY_H +/* + * eXtensible Arrays + * Copyright (c) 2017 Microsoft Corporation + * Author: Matthew Wilcox + * + * See Documentation/core-api/xarray.rst for how to use the XArray. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef NEED_XARRAY +#include +#else +#include + +/* + * The bottom two bits of the entry determine how the XArray interprets + * the contents: + * + * 00: Pointer entry + * 10: Internal entry + * x1: Value entry or tagged pointer + * + * Attempting to store internal entries in the XArray is a bug. + * + * Most internal entries are pointers to the next node in the tree. + * The following internal entries have a special meaning: + * + * 0-62: Sibling entries + * 256: Retry entry + * 257: Zero entry + * + * Errors are also represented as internal entries, but use the negative + * space (-4094 to -2). They're never stored in the slots array; only + * returned by the normal API. + */ + +#define BITS_PER_XA_VALUE (BITS_PER_LONG - 1) + +/** + * xa_to_value() - Get value stored in an XArray entry. + * @entry: XArray entry. + * + * Context: Any context. + * Return: The value stored in the XArray entry. + */ +static inline unsigned long xa_to_value(const void *entry) +{ + return (unsigned long)entry >> 1; +} + +/** + * xa_is_value() - Determine if an entry is a value. + * @entry: XArray entry. + * + * Context: Any context. + * Return: True if the entry is a value, false if it is a pointer. + */ +static inline bool xa_is_value(const void *entry) +{ + return (unsigned long)entry & 1; +} + +/* + * xa_mk_internal() - Create an internal entry. + * @v: Value to turn into an internal entry. + * + * Internal entries are used for a number of purposes. Entries 0-255 are + * used for sibling entries (only 0-62 are used by the current code). 256 + * is used for the retry entry. 257 is used for the reserved / zero entry. + * Negative internal entries are used to represent errnos. Node pointers + * are also tagged as internal entries in some situations. + * + * Context: Any context. + * Return: An XArray internal entry corresponding to this value. + */ +static inline void *xa_mk_internal(unsigned long v) +{ + return (void *)((v << 2) | 2); +} + +/* + * xa_to_internal() - Extract the value from an internal entry. + * @entry: XArray entry. + * + * Context: Any context. + * Return: The value which was stored in the internal entry. + */ +static inline unsigned long xa_to_internal(const void *entry) +{ + return (unsigned long)entry >> 2; +} + +/* + * xa_is_internal() - Is the entry an internal entry? + * @entry: XArray entry. + * + * Context: Any context. + * Return: %true if the entry is an internal entry. + */ +static inline bool xa_is_internal(const void *entry) +{ + return ((unsigned long)entry & 3) == 2; +} + +#define XA_ZERO_ENTRY xa_mk_internal(257) + +/** + * xa_is_zero() - Is the entry a zero entry? + * @entry: Entry retrieved from the XArray + * + * The normal API will return NULL as the contents of a slot containing + * a zero entry. You can only see zero entries by using the advanced API. + * + * Return: %true if the entry is a zero entry. + */ +static inline bool xa_is_zero(const void *entry) +{ + return unlikely(entry == XA_ZERO_ENTRY); +} + +/** + * xa_is_err() - Report whether an XArray operation returned an error + * @entry: Result from calling an XArray function + * + * If an XArray operation cannot complete an operation, it will return + * a special value indicating an error. This function tells you + * whether an error occurred; xa_err() tells you which error occurred. + * + * Context: Any context. + * Return: %true if the entry indicates an error. + */ +static inline bool xa_is_err(const void *entry) +{ + return unlikely(xa_is_internal(entry) && + entry >= xa_mk_internal(-MAX_ERRNO)); +} + +/** + * xa_err() - Turn an XArray result into an errno. + * @entry: Result from calling an XArray function. + * + * If an XArray operation cannot complete an operation, it will return + * a special pointer value which encodes an errno. This function extracts + * the errno from the pointer value, or returns 0 if the pointer does not + * represent an errno. + * + * Context: Any context. + * Return: A negative errno or 0. + */ +static inline int xa_err(void *entry) +{ + /* xa_to_internal() would not do sign extension. */ + if (xa_is_err(entry)) + return (long)entry >> 2; + return 0; +} + +/** + * struct xa_limit - Represents a range of IDs. + * @min: The lowest ID to allocate (inclusive). + * @max: The maximum ID to allocate (inclusive). + * + * This structure is used either directly or via the XA_LIMIT() macro + * to communicate the range of IDs that are valid for allocation. + * Two common ranges are predefined for you: + * * xa_limit_32b - [0 - UINT_MAX] + * * xa_limit_31b - [0 - INT_MAX] + */ +struct xa_limit { + u32 max; + u32 min; +}; + +#define XA_LIMIT(_min, _max) (struct xa_limit) { .min = _min, .max = _max } + +#define xa_limit_32b XA_LIMIT(0, UINT_MAX) +#define xa_limit_31b XA_LIMIT(0, INT_MAX) + +typedef unsigned __bitwise xa_mark_t; +#define XA_MARK_0 ((__force xa_mark_t)0U) +#define XA_MARK_1 ((__force xa_mark_t)1U) +#define XA_MARK_2 ((__force xa_mark_t)2U) +#define XA_PRESENT ((__force xa_mark_t)8U) +#define XA_MARK_MAX XA_MARK_2 +#define XA_FREE_MARK XA_MARK_0 + +enum xa_lock_type { + DH_XA_LOCK_IRQ = 1, + DH_XA_LOCK_BH = 2, +}; + +/* + * Values for xa_flags. The radix tree stores its GFP flags in the xa_flags, + * and we remain compatible with that. + */ +#define XA_FLAGS_LOCK_IRQ ((__force gfp_t)DH_XA_LOCK_IRQ) +#define XA_FLAGS_LOCK_BH ((__force gfp_t)DH_XA_LOCK_BH) +#define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U) +#define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U) +#define XA_FLAGS_ALLOC_WRAPPED ((__force gfp_t)16U) +#define XA_FLAGS_ACCOUNT ((__force gfp_t)32U) +#define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \ + (__force unsigned)(mark))) + +/* ALLOC is for a normal 0-based alloc. ALLOC1 is for an 1-based alloc */ +#define XA_FLAGS_ALLOC (XA_FLAGS_TRACK_FREE | XA_FLAGS_MARK(XA_FREE_MARK)) +#define XA_FLAGS_ALLOC1 (XA_FLAGS_TRACK_FREE | XA_FLAGS_ZERO_BUSY) + +/** + * struct xarray - The anchor of the XArray. + * @xa_lock: Lock that protects the contents of the XArray. + * + * To use the xarray, define it statically or embed it in your data structure. + * It is a very small data structure, so it does not usually make sense to + * allocate it separately and keep a pointer to it in your data structure. + * + * You may use the xa_lock to protect your own data structures as well. + */ +/* + * If all of the entries in the array are NULL, @xa_head is a NULL pointer. + * If the only non-NULL entry in the array is at index 0, @xa_head is that + * entry. If any other entry in the array is non-NULL, @xa_head points + * to an @xa_node. + */ +struct xarray { + spinlock_t xa_lock; +/* private: The rest of the data structure is not to be used directly. */ + gfp_t xa_flags; + void __rcu * xa_head; +}; + +#define XARRAY_INIT(name, flags) { \ + .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \ + .xa_flags = flags, \ + .xa_head = NULL, \ +} + +/** + * DEFINE_XARRAY_FLAGS() - Define an XArray with custom flags. + * @name: A string that names your XArray. + * @flags: XA_FLAG values. + * + * This is intended for file scope definitions of XArrays. It declares + * and initialises an empty XArray with the chosen name and flags. It is + * equivalent to calling xa_init_flags() on the array, but it does the + * initialisation at compiletime instead of runtime. + */ +#define DEFINE_XARRAY_FLAGS(name, flags) \ + struct xarray name = XARRAY_INIT(name, flags) + +/** + * DEFINE_XARRAY() - Define an XArray. + * @name: A string that names your XArray. + * + * This is intended for file scope definitions of XArrays. It declares + * and initialises an empty XArray with the chosen name. It is equivalent + * to calling xa_init() on the array, but it does the initialisation at + * compiletime instead of runtime. + */ +#define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0) + +/** + * DEFINE_XARRAY_ALLOC() - Define an XArray which allocates IDs starting at 0. + * @name: A string that names your XArray. + * + * This is intended for file scope definitions of allocating XArrays. + * See also DEFINE_XARRAY(). + */ +#define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC) + +/** + * DEFINE_XARRAY_ALLOC1() - Define an XArray which allocates IDs starting at 1. + * @name: A string that names your XArray. + * + * This is intended for file scope definitions of allocating XArrays. + * See also DEFINE_XARRAY(). + */ +#define DEFINE_XARRAY_ALLOC1(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC1) + +void *xa_load(struct xarray *, unsigned long index); +void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); +void *xa_erase(struct xarray *, unsigned long index); +void *xa_find(struct xarray *xa, unsigned long *index, + unsigned long max, xa_mark_t) __attribute__((nonnull(2))); +void *xa_find_after(struct xarray *xa, unsigned long *index, + unsigned long max, xa_mark_t) __attribute__((nonnull(2))); +void xa_destroy(struct xarray *); +void __init dh_radix_tree_init(void); + +/** + * xa_init_flags() - Initialise an empty XArray with flags. + * @xa: XArray. + * @flags: XA_FLAG values. + * + * If you need to initialise an XArray with special flags (eg you need + * to take the lock from interrupt context), use this function instead + * of xa_init(). + * + * Context: Any context. + */ +static inline void xa_init_flags(struct xarray *xa, gfp_t flags) +{ + spin_lock_init(&xa->xa_lock); + xa->xa_flags = flags; + xa->xa_head = NULL; +} + +/** + * xa_init() - Initialise an empty XArray. + * @xa: XArray. + * + * An empty XArray is full of NULL entries. + * + * Context: Any context. + */ +static inline void xa_init(struct xarray *xa) +{ + xa_init_flags(xa, 0); +} + +/** + * xa_empty() - Determine if an array has any present entries. + * @xa: XArray. + * + * Context: Any context. + * Return: %true if the array contains only NULL pointers. + */ +static inline bool xa_empty(const struct xarray *xa) +{ + return xa->xa_head == NULL; +} + +/** + * xa_marked() - Inquire whether any entry in this array has a mark set + * @xa: Array + * @mark: Mark value + * + * Context: Any context. + * Return: %true if any entry has this mark set. + */ +static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark) +{ + return xa->xa_flags & XA_FLAGS_MARK(mark); +} + +/** + * xa_for_each_start() - Iterate over a portion of an XArray. + * @xa: XArray. + * @index: Index of @entry. + * @entry: Entry retrieved from array. + * @start: First index to retrieve from array. + * + * During the iteration, @entry will have the value of the entry stored + * in @xa at @index. You may modify @index during the iteration if you + * want to skip or reprocess indices. It is safe to modify the array + * during the iteration. At the end of the iteration, @entry will be set + * to NULL and @index will have a value less than or equal to max. + * + * xa_for_each_start() is O(n.log(n)) while xas_for_each() is O(n). You have + * to handle your own locking with xas_for_each(), and if you have to unlock + * after each iteration, it will also end up being O(n.log(n)). + * xa_for_each_start() will spin if it hits a retry entry; if you intend to + * see retry entries, you should use the xas_for_each() iterator instead. + * The xas_for_each() iterator will expand into more inline code than + * xa_for_each_start(). + * + * Context: Any context. Takes and releases the RCU lock. + */ +#define xa_for_each_start(xa, index, entry, start) \ + xa_for_each_range(xa, index, entry, start, ULONG_MAX) + +/** + * xa_for_each() - Iterate over present entries in an XArray. + * @xa: XArray. + * @index: Index of @entry. + * @entry: Entry retrieved from array. + * + * During the iteration, @entry will have the value of the entry stored + * in @xa at @index. You may modify @index during the iteration if you want + * to skip or reprocess indices. It is safe to modify the array during the + * iteration. At the end of the iteration, @entry will be set to NULL and + * @index will have a value less than or equal to max. + * + * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have + * to handle your own locking with xas_for_each(), and if you have to unlock + * after each iteration, it will also end up being O(n.log(n)). xa_for_each() + * will spin if it hits a retry entry; if you intend to see retry entries, + * you should use the xas_for_each() iterator instead. The xas_for_each() + * iterator will expand into more inline code than xa_for_each(). + * + * Context: Any context. Takes and releases the RCU lock. + */ +#define xa_for_each(xa, index, entry) \ + xa_for_each_start(xa, index, entry, 0) + +/** + * xa_for_each_marked() - Iterate over marked entries in an XArray. + * @xa: XArray. + * @index: Index of @entry. + * @entry: Entry retrieved from array. + * @filter: Selection criterion. + * + * During the iteration, @entry will have the value of the entry stored + * in @xa at @index. The iteration will skip all entries in the array + * which do not match @filter. You may modify @index during the iteration + * if you want to skip or reprocess indices. It is safe to modify the array + * during the iteration. At the end of the iteration, @entry will be set to + * NULL and @index will have a value less than or equal to max. + * + * xa_for_each_marked() is O(n.log(n)) while xas_for_each_marked() is O(n). + * You have to handle your own locking with xas_for_each(), and if you have + * to unlock after each iteration, it will also end up being O(n.log(n)). + * xa_for_each_marked() will spin if it hits a retry entry; if you intend to + * see retry entries, you should use the xas_for_each_marked() iterator + * instead. The xas_for_each_marked() iterator will expand into more inline + * code than xa_for_each_marked(). + * + * Context: Any context. Takes and releases the RCU lock. + */ +#define xa_for_each_marked(xa, index, entry, filter) \ + for (index = 0, entry = xa_find(xa, &index, ULONG_MAX, filter); \ + entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter)) + +#define xa_trylock(xa) spin_trylock(&(xa)->xa_lock) +#define xa_lock(xa) spin_lock(&(xa)->xa_lock) +#define xa_unlock(xa) spin_unlock(&(xa)->xa_lock) +#define xa_lock_bh(xa) spin_lock_bh(&(xa)->xa_lock) +#define xa_unlock_bh(xa) spin_unlock_bh(&(xa)->xa_lock) +#define xa_lock_irq(xa) spin_lock_irq(&(xa)->xa_lock) +#define xa_unlock_irq(xa) spin_unlock_irq(&(xa)->xa_lock) +#define xa_lock_irqsave(xa, flags) \ + spin_lock_irqsave(&(xa)->xa_lock, flags) +#define xa_unlock_irqrestore(xa, flags) \ + spin_unlock_irqrestore(&(xa)->xa_lock, flags) +#define xa_lock_nested(xa, subclass) \ + spin_lock_nested(&(xa)->xa_lock, subclass) +#define xa_lock_bh_nested(xa, subclass) \ + spin_lock_bh_nested(&(xa)->xa_lock, subclass) +#define xa_lock_irq_nested(xa, subclass) \ + spin_lock_irq_nested(&(xa)->xa_lock, subclass) +#define xa_lock_irqsave_nested(xa, flags, subclass) \ + spin_lock_irqsave_nested(&(xa)->xa_lock, flags, subclass) + +/* + * Versions of the normal API which require the caller to hold the + * xa_lock. If the GFP flags allow it, they will drop the lock to + * allocate memory, then reacquire it afterwards. These functions + * may also re-enable interrupts if the XArray flags indicate the + * locking should be interrupt safe. + */ +void *__xa_erase(struct xarray *, unsigned long index); +void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); +void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, + void *entry, gfp_t); +int __must_check __xa_insert(struct xarray *, unsigned long index, + void *entry, gfp_t); +int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry, + struct xa_limit, gfp_t); +int __must_check __xa_alloc_cyclic(struct xarray *, u32 *id, void *entry, + struct xa_limit, u32 *next, gfp_t); +void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); +void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); + +/** + * xa_store_bh() - Store this entry in the XArray. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * This function is like calling xa_store() except it disables softirqs + * while holding the array lock. + * + * Context: Any context. Takes and releases the xa_lock while + * disabling softirqs. + * Return: The old entry at this index or xa_err() if an error happened. + */ +static inline void *xa_store_bh(struct xarray *xa, unsigned long index, + void *entry, gfp_t gfp) +{ + void *curr; + + xa_lock_bh(xa); + curr = __xa_store(xa, index, entry, gfp); + xa_unlock_bh(xa); + + return curr; +} + +/** + * xa_store_irq() - Store this entry in the XArray. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * This function is like calling xa_store() except it disables interrupts + * while holding the array lock. + * + * Context: Process context. Takes and releases the xa_lock while + * disabling interrupts. + * Return: The old entry at this index or xa_err() if an error happened. + */ +static inline void *xa_store_irq(struct xarray *xa, unsigned long index, + void *entry, gfp_t gfp) +{ + void *curr; + + xa_lock_irq(xa); + curr = __xa_store(xa, index, entry, gfp); + xa_unlock_irq(xa); + + return curr; +} + +/** + * xa_erase_bh() - Erase this entry from the XArray. + * @xa: XArray. + * @index: Index of entry. + * + * After this function returns, loading from @index will return %NULL. + * If the index is part of a multi-index entry, all indices will be erased + * and none of the entries will be part of a multi-index entry. + * + * Context: Any context. Takes and releases the xa_lock while + * disabling softirqs. + * Return: The entry which used to be at this index. + */ +static inline void *xa_erase_bh(struct xarray *xa, unsigned long index) +{ + void *entry; + + xa_lock_bh(xa); + entry = __xa_erase(xa, index); + xa_unlock_bh(xa); + + return entry; +} + +/** + * xa_erase_irq() - Erase this entry from the XArray. + * @xa: XArray. + * @index: Index of entry. + * + * After this function returns, loading from @index will return %NULL. + * If the index is part of a multi-index entry, all indices will be erased + * and none of the entries will be part of a multi-index entry. + * + * Context: Process context. Takes and releases the xa_lock while + * disabling interrupts. + * Return: The entry which used to be at this index. + */ +static inline void *xa_erase_irq(struct xarray *xa, unsigned long index) +{ + void *entry; + + xa_lock_irq(xa); + entry = __xa_erase(xa, index); + xa_unlock_irq(xa); + + return entry; +} + +/** + * xa_cmpxchg() - Conditionally replace an entry in the XArray. + * @xa: XArray. + * @index: Index into array. + * @old: Old value to test against. + * @entry: New value to place in array. + * @gfp: Memory allocation flags. + * + * If the entry at @index is the same as @old, replace it with @entry. + * If the return value is equal to @old, then the exchange was successful. + * + * Context: Any context. Takes and releases the xa_lock. May sleep + * if the @gfp flags permit. + * Return: The old value at this index or xa_err() if an error happened. + */ +static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index, + void *old, void *entry, gfp_t gfp) +{ + void *curr; + + xa_lock(xa); + curr = __xa_cmpxchg(xa, index, old, entry, gfp); + xa_unlock(xa); + + return curr; +} + +/** + * xa_cmpxchg_bh() - Conditionally replace an entry in the XArray. + * @xa: XArray. + * @index: Index into array. + * @old: Old value to test against. + * @entry: New value to place in array. + * @gfp: Memory allocation flags. + * + * This function is like calling xa_cmpxchg() except it disables softirqs + * while holding the array lock. + * + * Context: Any context. Takes and releases the xa_lock while + * disabling softirqs. May sleep if the @gfp flags permit. + * Return: The old value at this index or xa_err() if an error happened. + */ +static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index, + void *old, void *entry, gfp_t gfp) +{ + void *curr; + + xa_lock_bh(xa); + curr = __xa_cmpxchg(xa, index, old, entry, gfp); + xa_unlock_bh(xa); + + return curr; +} + +/** + * xa_cmpxchg_irq() - Conditionally replace an entry in the XArray. + * @xa: XArray. + * @index: Index into array. + * @old: Old value to test against. + * @entry: New value to place in array. + * @gfp: Memory allocation flags. + * + * This function is like calling xa_cmpxchg() except it disables interrupts + * while holding the array lock. + * + * Context: Process context. Takes and releases the xa_lock while + * disabling interrupts. May sleep if the @gfp flags permit. + * Return: The old value at this index or xa_err() if an error happened. + */ +static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, + void *old, void *entry, gfp_t gfp) +{ + void *curr; + + xa_lock_irq(xa); + curr = __xa_cmpxchg(xa, index, old, entry, gfp); + xa_unlock_irq(xa); + + return curr; +} + +/** + * xa_insert() - Store this entry in the XArray unless another entry is + * already present. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * Inserting a NULL entry will store a reserved entry (like xa_reserve()) + * if no entry is present. Inserting will fail if a reserved entry is + * present, even though loading from this index will return NULL. + * + * Context: Any context. Takes and releases the xa_lock. May sleep if + * the @gfp flags permit. + * Return: 0 if the store succeeded. -EBUSY if another entry was present. + * -ENOMEM if memory could not be allocated. + */ +static inline int __must_check xa_insert(struct xarray *xa, + unsigned long index, void *entry, gfp_t gfp) +{ + int err; + + xa_lock(xa); + err = __xa_insert(xa, index, entry, gfp); + xa_unlock(xa); + + return err; +} + +/** + * xa_insert_bh() - Store this entry in the XArray unless another entry is + * already present. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * Inserting a NULL entry will store a reserved entry (like xa_reserve()) + * if no entry is present. Inserting will fail if a reserved entry is + * present, even though loading from this index will return NULL. + * + * Context: Any context. Takes and releases the xa_lock while + * disabling softirqs. May sleep if the @gfp flags permit. + * Return: 0 if the store succeeded. -EBUSY if another entry was present. + * -ENOMEM if memory could not be allocated. + */ +static inline int __must_check xa_insert_bh(struct xarray *xa, + unsigned long index, void *entry, gfp_t gfp) +{ + int err; + + xa_lock_bh(xa); + err = __xa_insert(xa, index, entry, gfp); + xa_unlock_bh(xa); + + return err; +} + +/** + * xa_insert_irq() - Store this entry in the XArray unless another entry is + * already present. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * Inserting a NULL entry will store a reserved entry (like xa_reserve()) + * if no entry is present. Inserting will fail if a reserved entry is + * present, even though loading from this index will return NULL. + * + * Context: Process context. Takes and releases the xa_lock while + * disabling interrupts. May sleep if the @gfp flags permit. + * Return: 0 if the store succeeded. -EBUSY if another entry was present. + * -ENOMEM if memory could not be allocated. + */ +static inline int __must_check xa_insert_irq(struct xarray *xa, + unsigned long index, void *entry, gfp_t gfp) +{ + int err; + + xa_lock_irq(xa); + err = __xa_insert(xa, index, entry, gfp); + xa_unlock_irq(xa); + + return err; +} + +/** + * xa_alloc() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @entry: New entry. + * @limit: Range of ID to allocate. + * @gfp: Memory allocation flags. + * + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. + * + * Context: Any context. Takes and releases the xa_lock. May sleep if + * the @gfp flags permit. + * Return: 0 on success, -ENOMEM if memory could not be allocated or + * -EBUSY if there are no free entries in @limit. + */ +static inline __must_check int xa_alloc(struct xarray *xa, u32 *id, + void *entry, struct xa_limit limit, gfp_t gfp) +{ + int err; + + xa_lock(xa); + err = __xa_alloc(xa, id, entry, limit, gfp); + xa_unlock(xa); + + return err; +} + +/** + * xa_alloc_bh() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @entry: New entry. + * @limit: Range of ID to allocate. + * @gfp: Memory allocation flags. + * + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. + * + * Context: Any context. Takes and releases the xa_lock while + * disabling softirqs. May sleep if the @gfp flags permit. + * Return: 0 on success, -ENOMEM if memory could not be allocated or + * -EBUSY if there are no free entries in @limit. + */ +static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id, + void *entry, struct xa_limit limit, gfp_t gfp) +{ + int err; + + xa_lock_bh(xa); + err = __xa_alloc(xa, id, entry, limit, gfp); + xa_unlock_bh(xa); + + return err; +} + +/** + * xa_alloc_irq() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @entry: New entry. + * @limit: Range of ID to allocate. + * @gfp: Memory allocation flags. + * + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. + * + * Context: Process context. Takes and releases the xa_lock while + * disabling interrupts. May sleep if the @gfp flags permit. + * Return: 0 on success, -ENOMEM if memory could not be allocated or + * -EBUSY if there are no free entries in @limit. + */ +static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id, + void *entry, struct xa_limit limit, gfp_t gfp) +{ + int err; + + xa_lock_irq(xa); + err = __xa_alloc(xa, id, entry, limit, gfp); + xa_unlock_irq(xa); + + return err; +} + +/** + * xa_alloc_cyclic() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @entry: New entry. + * @limit: Range of allocated ID. + * @next: Pointer to next ID to allocate. + * @gfp: Memory allocation flags. + * + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. + * The search for an empty entry will start at @next and will wrap + * around if necessary. + * + * Context: Any context. Takes and releases the xa_lock. May sleep if + * the @gfp flags permit. + * Return: 0 if the allocation succeeded without wrapping. 1 if the + * allocation succeeded after wrapping, -ENOMEM if memory could not be + * allocated or -EBUSY if there are no free entries in @limit. + */ +static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, + struct xa_limit limit, u32 *next, gfp_t gfp) +{ + int err; + + xa_lock(xa); + err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); + xa_unlock(xa); + + return err; +} + +/** + * xa_alloc_cyclic_bh() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @entry: New entry. + * @limit: Range of allocated ID. + * @next: Pointer to next ID to allocate. + * @gfp: Memory allocation flags. + * + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. + * The search for an empty entry will start at @next and will wrap + * around if necessary. + * + * Context: Any context. Takes and releases the xa_lock while + * disabling softirqs. May sleep if the @gfp flags permit. + * Return: 0 if the allocation succeeded without wrapping. 1 if the + * allocation succeeded after wrapping, -ENOMEM if memory could not be + * allocated or -EBUSY if there are no free entries in @limit. + */ +static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry, + struct xa_limit limit, u32 *next, gfp_t gfp) +{ + int err; + + xa_lock_bh(xa); + err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); + xa_unlock_bh(xa); + + return err; +} + +/** + * xa_alloc_cyclic_irq() - Find somewhere to store this entry in the XArray. + * @xa: XArray. + * @id: Pointer to ID. + * @entry: New entry. + * @limit: Range of allocated ID. + * @next: Pointer to next ID to allocate. + * @gfp: Memory allocation flags. + * + * Finds an empty entry in @xa between @limit.min and @limit.max, + * stores the index into the @id pointer, then stores the entry at + * that index. A concurrent lookup will not see an uninitialised @id. + * The search for an empty entry will start at @next and will wrap + * around if necessary. + * + * Context: Process context. Takes and releases the xa_lock while + * disabling interrupts. May sleep if the @gfp flags permit. + * Return: 0 if the allocation succeeded without wrapping. 1 if the + * allocation succeeded after wrapping, -ENOMEM if memory could not be + * allocated or -EBUSY if there are no free entries in @limit. + */ +static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry, + struct xa_limit limit, u32 *next, gfp_t gfp) +{ + int err; + + xa_lock_irq(xa); + err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); + xa_unlock_irq(xa); + + return err; +} + +/** + * xa_reserve() - Reserve this index in the XArray. + * @xa: XArray. + * @index: Index into array. + * @gfp: Memory allocation flags. + * + * Ensures there is somewhere to store an entry at @index in the array. + * If there is already something stored at @index, this function does + * nothing. If there was nothing there, the entry is marked as reserved. + * Loading from a reserved entry returns a %NULL pointer. + * + * If you do not use the entry that you have reserved, call xa_release() + * or xa_erase() to free any unnecessary memory. + * + * Context: Any context. Takes and releases the xa_lock. + * May sleep if the @gfp flags permit. + * Return: 0 if the reservation succeeded or -ENOMEM if it failed. + */ +static inline __must_check +int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) +{ + return xa_err(xa_cmpxchg(xa, index, NULL, XA_ZERO_ENTRY, gfp)); +} + +/** + * xa_reserve_bh() - Reserve this index in the XArray. + * @xa: XArray. + * @index: Index into array. + * @gfp: Memory allocation flags. + * + * A softirq-disabling version of xa_reserve(). + * + * Context: Any context. Takes and releases the xa_lock while + * disabling softirqs. + * Return: 0 if the reservation succeeded or -ENOMEM if it failed. + */ +static inline __must_check +int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp) +{ + return xa_err(xa_cmpxchg_bh(xa, index, NULL, XA_ZERO_ENTRY, gfp)); +} + +/** + * xa_reserve_irq() - Reserve this index in the XArray. + * @xa: XArray. + * @index: Index into array. + * @gfp: Memory allocation flags. + * + * An interrupt-disabling version of xa_reserve(). + * + * Context: Process context. Takes and releases the xa_lock while + * disabling interrupts. + * Return: 0 if the reservation succeeded or -ENOMEM if it failed. + */ +static inline __must_check +int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp) +{ + return xa_err(xa_cmpxchg_irq(xa, index, NULL, XA_ZERO_ENTRY, gfp)); +} + +/** + * xa_release() - Release a reserved entry. + * @xa: XArray. + * @index: Index of entry. + * + * After calling xa_reserve(), you can call this function to release the + * reservation. If the entry at @index has been stored to, this function + * will do nothing. + */ +static inline void xa_release(struct xarray *xa, unsigned long index) +{ + xa_cmpxchg(xa, index, XA_ZERO_ENTRY, NULL, 0); +} + +/* Everything below here is the Advanced API. Proceed with caution. */ + +/* + * The xarray is constructed out of a set of 'chunks' of pointers. Choosing + * the best chunk size requires some tradeoffs. A power of two recommends + * itself so that we can walk the tree based purely on shifts and masks. + * Generally, the larger the better; as the number of slots per level of the + * tree increases, the less tall the tree needs to be. But that needs to be + * balanced against the memory consumption of each node. On a 64-bit system, + * xa_node is currently 576 bytes, and we get 7 of them per 4kB page. If we + * doubled the number of slots per node, we'd get only 3 nodes per 4kB page. + */ +#ifndef XA_CHUNK_SHIFT +#define XA_CHUNK_SHIFT (CONFIG_BASE_SMALL ? 4 : 6) +#endif +#define XA_CHUNK_SIZE (1UL << XA_CHUNK_SHIFT) +#define XA_CHUNK_MASK (XA_CHUNK_SIZE - 1) +#define XA_MAX_MARKS 3 +#define XA_MARK_LONGS DIV_ROUND_UP(XA_CHUNK_SIZE, BITS_PER_LONG) + +/* + * @count is the count of every non-NULL element in the ->slots array + * whether that is a value entry, a retry entry, a user pointer, + * a sibling entry or a pointer to the next level of the tree. + * @nr_values is the count of every element in ->slots which is + * either a value entry or a sibling of a value entry. + */ +struct xa_node { + unsigned char shift; /* Bits remaining in each slot */ + unsigned char offset; /* Slot offset in parent */ + unsigned char count; /* Total entry count */ + unsigned char nr_values; /* Value entry count */ + struct xa_node __rcu *parent; /* NULL at top of tree */ + struct xarray *array; /* The array we belong to */ + union { + struct list_head private_list; /* For tree user */ + struct rcu_head rcu_head; /* Used when freeing node */ + }; + void __rcu *slots[XA_CHUNK_SIZE]; + union { + unsigned long tags[XA_MAX_MARKS][XA_MARK_LONGS]; + unsigned long marks[XA_MAX_MARKS][XA_MARK_LONGS]; + }; +}; + +void xa_dump(const struct xarray *); +void xa_dump_node(const struct xa_node *); + +#ifdef XA_DEBUG +#define XA_BUG_ON(xa, x) do { \ + if (x) { \ + xa_dump(xa); \ + BUG(); \ + } \ + } while (0) +#define XA_NODE_BUG_ON(node, x) do { \ + if (x) { \ + if (node) xa_dump_node(node); \ + BUG(); \ + } \ + } while (0) +#else +#define XA_BUG_ON(xa, x) do { } while (0) +#define XA_NODE_BUG_ON(node, x) do { } while (0) +#endif + +/* Private */ +static inline void *xa_head(const struct xarray *xa) +{ + return rcu_dereference_check(xa->xa_head, + lockdep_is_held(&xa->xa_lock)); +} + +/* Private */ +static inline void *xa_head_locked(const struct xarray *xa) +{ + return rcu_dereference_protected(xa->xa_head, + lockdep_is_held(&xa->xa_lock)); +} + +/* Private */ +static inline void *xa_entry(const struct xarray *xa, + const struct xa_node *node, unsigned int offset) +{ + XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE); + return rcu_dereference_check(node->slots[offset], + lockdep_is_held(&xa->xa_lock)); +} + +/* Private */ +static inline void *xa_entry_locked(const struct xarray *xa, + const struct xa_node *node, unsigned int offset) +{ + XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE); + return rcu_dereference_protected(node->slots[offset], + lockdep_is_held(&xa->xa_lock)); +} + +/* Private */ +static inline struct xa_node *xa_parent(const struct xarray *xa, + const struct xa_node *node) +{ + return rcu_dereference_check(node->parent, + lockdep_is_held(&xa->xa_lock)); +} + +/* Private */ +static inline struct xa_node *xa_parent_locked(const struct xarray *xa, + const struct xa_node *node) +{ + return rcu_dereference_protected(node->parent, + lockdep_is_held(&xa->xa_lock)); +} + +/* Private */ +static inline void *xa_mk_node(const struct xa_node *node) +{ + return (void *)((unsigned long)node | 2); +} + +/* Private */ +static inline struct xa_node *xa_to_node(const void *entry) +{ + return (struct xa_node *)((unsigned long)entry - 2); +} + +/* Private */ +static inline bool xa_is_node(const void *entry) +{ + return xa_is_internal(entry) && (unsigned long)entry > 4096; +} + +/* Private */ +static inline void *xa_mk_sibling(unsigned int offset) +{ + return xa_mk_internal(offset); +} + +/* Private */ +static inline unsigned long xa_to_sibling(const void *entry) +{ + return xa_to_internal(entry); +} + +/** + * xa_is_sibling() - Is the entry a sibling entry? + * @entry: Entry retrieved from the XArray + * + * Return: %true if the entry is a sibling entry. + */ +static inline bool xa_is_sibling(const void *entry) +{ + return IS_ENABLED(CONFIG_XARRAY_MULTI) && xa_is_internal(entry) && + (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1)); +} + +#define XA_RETRY_ENTRY xa_mk_internal(256) + +/** + * xa_is_retry() - Is the entry a retry entry? + * @entry: Entry retrieved from the XArray + * + * Return: %true if the entry is a retry entry. + */ +static inline bool xa_is_retry(const void *entry) +{ + return unlikely(entry == XA_RETRY_ENTRY); +} + +/** + * xa_is_advanced() - Is the entry only permitted for the advanced API? + * @entry: Entry to be stored in the XArray. + * + * Return: %true if the entry cannot be stored by the normal API. + */ +static inline bool xa_is_advanced(const void *entry) +{ + return xa_is_internal(entry) && (entry <= XA_RETRY_ENTRY); +} + +/** + * typedef xa_update_node_t - A callback function from the XArray. + * @node: The node which is being processed + * + * This function is called every time the XArray updates the count of + * present and value entries in a node. It allows advanced users to + * maintain the private_list in the node. + * + * Context: The xa_lock is held and interrupts may be disabled. + * Implementations should not drop the xa_lock, nor re-enable + * interrupts. + */ +typedef void (*xa_update_node_t)(struct xa_node *node); + +void xa_delete_node(struct xa_node *, xa_update_node_t); + +/* + * The xa_state is opaque to its users. It contains various different pieces + * of state involved in the current operation on the XArray. It should be + * declared on the stack and passed between the various internal routines. + * The various elements in it should not be accessed directly, but only + * through the provided accessor functions. The below documentation is for + * the benefit of those working on the code, not for users of the XArray. + * + * @xa_node usually points to the xa_node containing the slot we're operating + * on (and @xa_offset is the offset in the slots array). If there is a + * single entry in the array at index 0, there are no allocated xa_nodes to + * point to, and so we store %NULL in @xa_node. @xa_node is set to + * the value %XAS_RESTART if the xa_state is not walked to the correct + * position in the tree of nodes for this operation. If an error occurs + * during an operation, it is set to an %XAS_ERROR value. If we run off the + * end of the allocated nodes, it is set to %XAS_BOUNDS. + */ +struct xa_state { + struct xarray *xa; + unsigned long xa_index; + unsigned char xa_shift; + unsigned char xa_sibs; + unsigned char xa_offset; + unsigned char xa_pad; /* Helps gcc generate better code */ + struct xa_node *xa_node; + struct xa_node *xa_alloc; + xa_update_node_t xa_update; +}; + +/* + * We encode errnos in the xas->xa_node. If an error has happened, we need to + * drop the lock to fix it, and once we've done so the xa_state is invalid. + */ +#define XA_ERROR(errno) ((struct xa_node *)(((unsigned long)errno << 2) | 2UL)) +#define XAS_BOUNDS ((struct xa_node *)1UL) +#define XAS_RESTART ((struct xa_node *)3UL) + +#define __XA_STATE(array, index, shift, sibs) { \ + .xa = array, \ + .xa_index = index, \ + .xa_shift = shift, \ + .xa_sibs = sibs, \ + .xa_offset = 0, \ + .xa_pad = 0, \ + .xa_node = XAS_RESTART, \ + .xa_alloc = NULL, \ + .xa_update = NULL \ +} + +/** + * XA_STATE() - Declare an XArray operation state. + * @name: Name of this operation state (usually xas). + * @array: Array to operate on. + * @index: Initial index of interest. + * + * Declare and initialise an xa_state on the stack. + */ +#define XA_STATE(name, array, index) \ + struct xa_state name = __XA_STATE(array, index, 0, 0) + +/** + * XA_STATE_ORDER() - Declare an XArray operation state. + * @name: Name of this operation state (usually xas). + * @array: Array to operate on. + * @index: Initial index of interest. + * @order: Order of entry. + * + * Declare and initialise an xa_state on the stack. This variant of + * XA_STATE() allows you to specify the 'order' of the element you + * want to operate on.` + */ +#define XA_STATE_ORDER(name, array, index, order) \ + struct xa_state name = __XA_STATE(array, \ + (index >> order) << order, \ + order - (order % XA_CHUNK_SHIFT), \ + (1U << (order % XA_CHUNK_SHIFT)) - 1) + +#define xas_marked(xas, mark) xa_marked((xas)->xa, (mark)) +#define xas_trylock(xas) xa_trylock((xas)->xa) +#define xas_lock(xas) xa_lock((xas)->xa) +#define xas_unlock(xas) xa_unlock((xas)->xa) +#define xas_lock_bh(xas) xa_lock_bh((xas)->xa) +#define xas_unlock_bh(xas) xa_unlock_bh((xas)->xa) +#define xas_lock_irq(xas) xa_lock_irq((xas)->xa) +#define xas_unlock_irq(xas) xa_unlock_irq((xas)->xa) +#define xas_lock_irqsave(xas, flags) \ + xa_lock_irqsave((xas)->xa, flags) +#define xas_unlock_irqrestore(xas, flags) \ + xa_unlock_irqrestore((xas)->xa, flags) + +/** + * xas_error() - Return an errno stored in the xa_state. + * @xas: XArray operation state. + * + * Return: 0 if no error has been noted. A negative errno if one has. + */ +static inline int xas_error(const struct xa_state *xas) +{ + return xa_err(xas->xa_node); +} + +/** + * xas_set_err() - Note an error in the xa_state. + * @xas: XArray operation state. + * @err: Negative error number. + * + * Only call this function with a negative @err; zero or positive errors + * will probably not behave the way you think they should. If you want + * to clear the error from an xa_state, use xas_reset(). + */ +static inline void xas_set_err(struct xa_state *xas, long err) +{ + xas->xa_node = XA_ERROR(err); +} + +/** + * xas_invalid() - Is the xas in a retry or error state? + * @xas: XArray operation state. + * + * Return: %true if the xas cannot be used for operations. + */ +static inline bool xas_invalid(const struct xa_state *xas) +{ + return (unsigned long)xas->xa_node & 3; +} + +/** + * xas_valid() - Is the xas a valid cursor into the array? + * @xas: XArray operation state. + * + * Return: %true if the xas can be used for operations. + */ +static inline bool xas_valid(const struct xa_state *xas) +{ + return !xas_invalid(xas); +} + +/** + * xas_is_node() - Does the xas point to a node? + * @xas: XArray operation state. + * + * Return: %true if the xas currently references a node. + */ +static inline bool xas_is_node(const struct xa_state *xas) +{ + return xas_valid(xas) && xas->xa_node; +} + +/* True if the pointer is something other than a node */ +static inline bool xas_not_node(struct xa_node *node) +{ + return ((unsigned long)node & 3) || !node; +} + +/* True if the node represents RESTART or an error */ +static inline bool xas_frozen(struct xa_node *node) +{ + return (unsigned long)node & 2; +} + +/* True if the node represents head-of-tree, RESTART or BOUNDS */ +static inline bool xas_top(struct xa_node *node) +{ + return node <= XAS_RESTART; +} + +/** + * xas_reset() - Reset an XArray operation state. + * @xas: XArray operation state. + * + * Resets the error or walk state of the @xas so future walks of the + * array will start from the root. Use this if you have dropped the + * xarray lock and want to reuse the xa_state. + * + * Context: Any context. + */ +static inline void xas_reset(struct xa_state *xas) +{ + xas->xa_node = XAS_RESTART; +} + +/** + * xas_retry() - Retry the operation if appropriate. + * @xas: XArray operation state. + * @entry: Entry from xarray. + * + * The advanced functions may sometimes return an internal entry, such as + * a retry entry or a zero entry. This function sets up the @xas to restart + * the walk from the head of the array if needed. + * + * Context: Any context. + * Return: true if the operation needs to be retried. + */ +static inline bool xas_retry(struct xa_state *xas, const void *entry) +{ + if (xa_is_zero(entry)) + return true; + if (!xa_is_retry(entry)) + return false; + xas_reset(xas); + return true; +} + +void *xas_load(struct xa_state *); +void *xas_store(struct xa_state *, void *entry); +void *xas_find(struct xa_state *, unsigned long max); +void *xas_find_conflict(struct xa_state *); + +bool xas_get_mark(const struct xa_state *, xa_mark_t); +void xas_set_mark(const struct xa_state *, xa_mark_t); +void xas_clear_mark(const struct xa_state *, xa_mark_t); +void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t); +void xas_init_marks(const struct xa_state *); + +bool xas_nomem(struct xa_state *, gfp_t); +void xas_pause(struct xa_state *); + +void xas_create_range(struct xa_state *); + +#ifdef CONFIG_XARRAY_MULTI +int xa_get_order(struct xarray *, unsigned long index); +void xas_split(struct xa_state *, void *entry, unsigned int order); +void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t); +#else +static inline int xa_get_order(struct xarray *xa, unsigned long index) +{ + return 0; +} + +static inline void xas_split(struct xa_state *xas, void *entry, + unsigned int order) +{ + xas_store(xas, entry); +} + +static inline void xas_split_alloc(struct xa_state *xas, void *entry, + unsigned int order, gfp_t gfp) +{ +} +#endif + +/** + * xas_reload() - Refetch an entry from the xarray. + * @xas: XArray operation state. + * + * Use this function to check that a previously loaded entry still has + * the same value. This is useful for the lockless pagecache lookup where + * we walk the array with only the RCU lock to protect us, lock the page, + * then check that the page hasn't moved since we looked it up. + * + * The caller guarantees that @xas is still valid. If it may be in an + * error or restart state, call xas_load() instead. + * + * Return: The entry at this location in the xarray. + */ +static inline void *xas_reload(struct xa_state *xas) +{ + struct xa_node *node = xas->xa_node; + void *entry; + char offset; + + if (!node) + return xa_head(xas->xa); + if (IS_ENABLED(CONFIG_XARRAY_MULTI)) { + offset = (xas->xa_index >> node->shift) & XA_CHUNK_MASK; + entry = xa_entry(xas->xa, node, offset); + if (!xa_is_sibling(entry)) + return entry; + offset = xa_to_sibling(entry); + } else { + offset = xas->xa_offset; + } + return xa_entry(xas->xa, node, offset); +} + +/** + * xas_set() - Set up XArray operation state for a different index. + * @xas: XArray operation state. + * @index: New index into the XArray. + * + * Move the operation state to refer to a different index. This will + * have the effect of starting a walk from the top; see xas_next() + * to move to an adjacent index. + */ +static inline void xas_set(struct xa_state *xas, unsigned long index) +{ + xas->xa_index = index; + xas->xa_node = XAS_RESTART; +} + +/** + * xas_set_order() - Set up XArray operation state for a multislot entry. + * @xas: XArray operation state. + * @index: Target of the operation. + * @order: Entry occupies 2^@order indices. + */ +static inline void xas_set_order(struct xa_state *xas, unsigned long index, + unsigned int order) +{ +#ifdef CONFIG_XARRAY_MULTI + xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0; + xas->xa_shift = order - (order % XA_CHUNK_SHIFT); + xas->xa_sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1; + xas->xa_node = XAS_RESTART; +#else + BUG_ON(order > 0); + xas_set(xas, index); +#endif +} + +/** + * xas_set_update() - Set up XArray operation state for a callback. + * @xas: XArray operation state. + * @update: Function to call when updating a node. + * + * The XArray can notify a caller after it has updated an xa_node. + * This is advanced functionality and is only needed by the page cache. + */ +static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update) +{ + xas->xa_update = update; +} + +/** + * xas_next_entry() - Advance iterator to next present entry. + * @xas: XArray operation state. + * @max: Highest index to return. + * + * xas_next_entry() is an inline function to optimise xarray traversal for + * speed. It is equivalent to calling xas_find(), and will call xas_find() + * for all the hard cases. + * + * Return: The next present entry after the one currently referred to by @xas. + */ +static inline void *xas_next_entry(struct xa_state *xas, unsigned long max) +{ + struct xa_node *node = xas->xa_node; + void *entry; + + if (unlikely(xas_not_node(node) || node->shift || + xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK))) + return xas_find(xas, max); + + do { + if (unlikely(xas->xa_index >= max)) + return xas_find(xas, max); + if (unlikely(xas->xa_offset == XA_CHUNK_MASK)) + return xas_find(xas, max); + entry = xa_entry(xas->xa, node, xas->xa_offset + 1); + if (unlikely(xa_is_internal(entry))) + return xas_find(xas, max); + xas->xa_offset++; + xas->xa_index++; + } while (!entry); + + return entry; +} + +/* Private */ +static inline unsigned int xas_find_chunk(struct xa_state *xas, bool advance, + xa_mark_t mark) +{ + unsigned long *addr = xas->xa_node->marks[(__force unsigned)mark]; + unsigned int offset = xas->xa_offset; + + if (advance) + offset++; + if (XA_CHUNK_SIZE == BITS_PER_LONG) { + if (offset < XA_CHUNK_SIZE) { + unsigned long data = *addr & (~0UL << offset); + if (data) + return __ffs(data); + } + return XA_CHUNK_SIZE; + } + + return find_next_bit(addr, XA_CHUNK_SIZE, offset); +} + +/** + * xas_next_marked() - Advance iterator to next marked entry. + * @xas: XArray operation state. + * @max: Highest index to return. + * @mark: Mark to search for. + * + * xas_next_marked() is an inline function to optimise xarray traversal for + * speed. It is equivalent to calling xas_find_marked(), and will call + * xas_find_marked() for all the hard cases. + * + * Return: The next marked entry after the one currently referred to by @xas. + */ +static inline void *xas_next_marked(struct xa_state *xas, unsigned long max, + xa_mark_t mark) +{ + struct xa_node *node = xas->xa_node; + void *entry; + unsigned int offset; + + if (unlikely(xas_not_node(node) || node->shift)) + return xas_find_marked(xas, max, mark); + offset = xas_find_chunk(xas, true, mark); + xas->xa_offset = offset; + xas->xa_index = (xas->xa_index & ~XA_CHUNK_MASK) + offset; + if (xas->xa_index > max) + return NULL; + if (offset == XA_CHUNK_SIZE) + return xas_find_marked(xas, max, mark); + entry = xa_entry(xas->xa, node, offset); + if (!entry) + return xas_find_marked(xas, max, mark); + return entry; +} + +/* + * If iterating while holding a lock, drop the lock and reschedule + * every %XA_CHECK_SCHED loops. + */ +enum { + XA_CHECK_SCHED = 4096, +}; + +/** + * xas_for_each() - Iterate over a range of an XArray. + * @xas: XArray operation state. + * @entry: Entry retrieved from the array. + * @max: Maximum index to retrieve from array. + * + * The loop body will be executed for each entry present in the xarray + * between the current xas position and @max. @entry will be set to + * the entry retrieved from the xarray. It is safe to delete entries + * from the array in the loop body. You should hold either the RCU lock + * or the xa_lock while iterating. If you need to drop the lock, call + * xas_pause() first. + */ +#define xas_for_each(xas, entry, max) \ + for (entry = xas_find(xas, max); entry; \ + entry = xas_next_entry(xas, max)) + +/** + * xas_for_each_marked() - Iterate over a range of an XArray. + * @xas: XArray operation state. + * @entry: Entry retrieved from the array. + * @max: Maximum index to retrieve from array. + * @mark: Mark to search for. + * + * The loop body will be executed for each marked entry in the xarray + * between the current xas position and @max. @entry will be set to + * the entry retrieved from the xarray. It is safe to delete entries + * from the array in the loop body. You should hold either the RCU lock + * or the xa_lock while iterating. If you need to drop the lock, call + * xas_pause() first. + */ +#define xas_for_each_marked(xas, entry, max, mark) \ + for (entry = xas_find_marked(xas, max, mark); entry; \ + entry = xas_next_marked(xas, max, mark)) + +/** + * xas_for_each_conflict() - Iterate over a range of an XArray. + * @xas: XArray operation state. + * @entry: Entry retrieved from the array. + * + * The loop body will be executed for each entry in the XArray that + * lies within the range specified by @xas. If the loop terminates + * normally, @entry will be %NULL. The user may break out of the loop, + * which will leave @entry set to the conflicting entry. The caller + * may also call xa_set_err() to exit the loop while setting an error + * to record the reason. + */ +#define xas_for_each_conflict(xas, entry) \ + while ((entry = xas_find_conflict(xas))) + +void *__xas_next(struct xa_state *); +void *__xas_prev(struct xa_state *); + +/** + * xas_prev() - Move iterator to previous index. + * @xas: XArray operation state. + * + * If the @xas was in an error state, it will remain in an error state + * and this function will return %NULL. If the @xas has never been walked, + * it will have the effect of calling xas_load(). Otherwise one will be + * subtracted from the index and the state will be walked to the correct + * location in the array for the next operation. + * + * If the iterator was referencing index 0, this function wraps + * around to %ULONG_MAX. + * + * Return: The entry at the new index. This may be %NULL or an internal + * entry. + */ +static inline void *xas_prev(struct xa_state *xas) +{ + struct xa_node *node = xas->xa_node; + + if (unlikely(xas_not_node(node) || node->shift || + xas->xa_offset == 0)) + return __xas_prev(xas); + + xas->xa_index--; + xas->xa_offset--; + return xa_entry(xas->xa, node, xas->xa_offset); +} + +/** + * xas_next() - Move state to next index. + * @xas: XArray operation state. + * + * If the @xas was in an error state, it will remain in an error state + * and this function will return %NULL. If the @xas has never been walked, + * it will have the effect of calling xas_load(). Otherwise one will be + * added to the index and the state will be walked to the correct + * location in the array for the next operation. + * + * If the iterator was referencing index %ULONG_MAX, this function wraps + * around to 0. + * + * Return: The entry at the new index. This may be %NULL or an internal + * entry. + */ +static inline void *xas_next(struct xa_state *xas) +{ + struct xa_node *node = xas->xa_node; + + if (unlikely(xas_not_node(node) || node->shift || + xas->xa_offset == XA_CHUNK_MASK)) + return __xas_next(xas); + + xas->xa_index++; + xas->xa_offset++; + return xa_entry(xas->xa, node, xas->xa_offset); +} +#endif + +#endif /* DINGHAI_XARRAY_H */ diff --git a/include/linux/dinghai/zxdh_auxiliary_bus.h b/include/linux/dinghai/zxdh_auxiliary_bus.h new file mode 100644 index 000000000000..25e854f08849 --- /dev/null +++ b/include/linux/dinghai/zxdh_auxiliary_bus.h @@ -0,0 +1,140 @@ + +#ifndef _AUXILIARY_BUS_H_ +#define _AUXILIARY_BUS_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +#define ZXDH_AUXILIARY_NAME_SIZE 32 +#define ZXDH_AUXILIARY_MODULE_PREFIX "dinghai10e_auxiliary:" + +struct zxdh_auxiliary_device_id +{ + char name[ZXDH_AUXILIARY_NAME_SIZE]; + kernel_ulong_t driver_data; +}; + +struct zxdh_auxiliary_device { + struct device dev; + const char *name; + uint32_t id; + uint32_t adev_type; +}; + +/** + * struct zxdh_auxiliary_driver - Definition of an auxiliary bus driver + * @probe: Called when a matching device is added to the bus. + * @remove: Called when device is removed from the bus. + * @shutdown: Called at shut-down time to quiesce the device. + * @suspend: Called to put the device to sleep mode. Usually to a power state. + * @resume: Called to bring a device from sleep mode. + * @name: Driver name. + * @driver: Core driver structure. + * @id_table: Table of devices this driver should match on the bus. + * + * Auxiliary drivers follow the standard driver model convention, where + * discovery/enumeration is handled by the core, and drivers provide probe() + * and remove() methods. They support power management and shutdown + * notifications using the standard conventions. + * + * Auxiliary drivers register themselves with the bus by calling + * zxdh_auxiliary_driver_register(). The id_table contains the match_names of + * auxiliary devices that a driver can bind with. + * + * .. code-block:: c + * + * static const struct zxdh_auxiliary_device_id my_auxiliary_id_table[] = { + * { .name = "foo_mod.foo_dev" }, + * {}, + * }; + * + * MODULE_DEVICE_TABLE(zxdh_auxiliary, my_auxiliary_id_table); + * + * struct zxdh_auxiliary_driver my_drv = { + * .name = "myauxiliarydrv", + * .id_table = my_auxiliary_id_table, + * .probe = my_drv_probe, + * .remove = my_drv_remove + * }; + */ +struct zxdh_auxiliary_driver { + int32_t (*probe)(struct zxdh_auxiliary_device *auxdev, const struct zxdh_auxiliary_device_id *id); + int32_t (*remove)(struct zxdh_auxiliary_device *auxdev); + void (*shutdown)(struct zxdh_auxiliary_device *auxdev); + int32_t (*suspend)(struct zxdh_auxiliary_device *auxdev, pm_message_t state); + int32_t (*resume)(struct zxdh_auxiliary_device *auxdev); + const char *name; + struct device_driver driver; + const struct zxdh_auxiliary_device_id *id_table; +}; + +static inline void *zxdh_auxiliary_get_drvdata(struct zxdh_auxiliary_device *auxdev) +{ + return dev_get_drvdata(&auxdev->dev); +} + +static inline void zxdh_auxiliary_set_drvdata(struct zxdh_auxiliary_device *auxdev, void *data) +{ + dev_set_drvdata(&auxdev->dev, data); +} + +static inline struct zxdh_auxiliary_device *zxdh_to_auxiliary_dev(struct device *dev) +{ + return container_of(dev, struct zxdh_auxiliary_device, dev); +} + +static inline struct zxdh_auxiliary_driver *zxdh_to_auxiliary_drv(struct device_driver *drv) +{ + return container_of(drv, struct zxdh_auxiliary_driver, driver); +} + +int32_t zxdh_auxiliary_device_init(struct zxdh_auxiliary_device *auxdev); +int32_t zxdh_aux_dev_add(struct zxdh_auxiliary_device *auxdev, const char *modname); +#define zxdh_auxiliary_device_add(auxdev) zxdh_aux_dev_add(auxdev, KBUILD_MODNAME) + +static inline void zxdh_auxiliary_device_uninit(struct zxdh_auxiliary_device *auxdev) +{ + put_device(&auxdev->dev); +} + +static inline void zxdh_auxiliary_device_delete(struct zxdh_auxiliary_device *auxdev) +{ + device_del(&auxdev->dev); +} + +int32_t zxdh_aux_drv_register(struct zxdh_auxiliary_driver *auxdrv, struct module *owner, + const char *modname); +#define zxdh_auxiliary_driver_register(auxdrv) \ + zxdh_aux_drv_register(auxdrv, THIS_MODULE, KBUILD_MODNAME) + +void zxdh_auxiliary_driver_unregister(struct zxdh_auxiliary_driver *auxdrv); + +/** + * module_auxiliary_driver() - Helper macro for registering an auxiliary driver + * @__auxiliary_driver: auxiliary driver struct + * + * Helper macro for auxiliary drivers which do not do anything special in + * module init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + * + * .. code-block:: c + * + * module_auxiliary_driver(my_drv); + */ +#define module_auxiliary_driver(__auxiliary_driver) \ + module_driver(__auxiliary_driver, zxdh_auxiliary_driver_register, zxdh_auxiliary_driver_unregister) + +struct zxdh_auxiliary_device *zxdh_auxiliary_find_device(struct device *start, + const void *data, + int32_t (*match)(struct device *dev, const void *data)); + +#ifdef __cplusplus +} +#endif + +#endif /* _AUXILIARY_BUS_H_ */ -- Gitee From bbdab8847274ebe0589791858819a2aa3fae8eb5 Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Fri, 3 May 2024 12:46:22 +0530 Subject: [PATCH 129/231] perf vendor events amd: Add Zen 5 mapping commit 77a70f80751da3a673232e7bff3f71d8c3995eff upstream Add a regular expression in the map file so that appropriate JSON event files are used for AMD Zen 5 processors belonging to Family 1Ah. Reviewed-by: Ian Rogers Signed-off-by: Sandipan Das Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ananth Narayan Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Stephane Eranian Link: https://lore.kernel.org/r/862a6b683755601725f9081897a850127d085ace.1714717230.git.sandipan.das@amd.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: priyanka-mani Signed-off-by: PvsNarasimha --- tools/perf/pmu-events/arch/x86/mapfile.csv | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv index 2e7a89c99d76..da3cd1763d8e 100644 --- a/tools/perf/pmu-events/arch/x86/mapfile.csv +++ b/tools/perf/pmu-events/arch/x86/mapfile.csv @@ -39,3 +39,4 @@ AuthenticAMD-23-([12][0-9A-F]|[0-9A-F]),v2,amdzen1,core AuthenticAMD-23-[[:xdigit:]]+,v1,amdzen2,core AuthenticAMD-25-([245][[:xdigit:]]|[[:xdigit:]]),v1,amdzen3,core AuthenticAMD-25-[[:xdigit:]]+,v1,amdzen4,core +AuthenticAMD-26-[[:xdigit:]]+,v1,amdzen5,core -- Gitee From e9d00dc7c6e18ca2b60a34486aef21aae3957644 Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Thu, 8 Jan 2026 13:22:17 +0530 Subject: [PATCH 130/231] perf vendor events amd: Add Zen 6 mapping commit 2c3cd43d27c1148fae05b50870f970ab24464fd5 upstream Add a regular expression in the map file so that appropriate JSON event files are used for AMD Zen 6 processors. Restrict the regular expression for AMD Zen 5 processors to known model ranges since they also belong to Family 1Ah. Reviewed-by: Ian Rogers Signed-off-by: Sandipan Das Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ananth Narayan Cc: Caleb Biggers Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Stephane Eranian [ Moved this one to the front of the series to keep the tree bisectable, as per Ian Rogers suggestion ] Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: priyanka-mani Signed-off-by: PvsNarasimha --- tools/perf/pmu-events/arch/x86/mapfile.csv | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv index da3cd1763d8e..25112fa1300a 100644 --- a/tools/perf/pmu-events/arch/x86/mapfile.csv +++ b/tools/perf/pmu-events/arch/x86/mapfile.csv @@ -39,4 +39,5 @@ AuthenticAMD-23-([12][0-9A-F]|[0-9A-F]),v2,amdzen1,core AuthenticAMD-23-[[:xdigit:]]+,v1,amdzen2,core AuthenticAMD-25-([245][[:xdigit:]]|[[:xdigit:]]),v1,amdzen3,core AuthenticAMD-25-[[:xdigit:]]+,v1,amdzen4,core -AuthenticAMD-26-[[:xdigit:]]+,v1,amdzen5,core +AuthenticAMD-26-([12467][[:xdigit:]]|[[:xdigit:]]),v1,amdzen5,core +AuthenticAMD-26-[[:xdigit:]]+,v1,amdzen6,core -- Gitee From d20c854b1dfb49ae820cefa9bf9cd9c7a19cae89 Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Thu, 8 Jan 2026 13:22:14 +0530 Subject: [PATCH 131/231] perf vendor events amd: Add Zen 6 core events commit 2f42fb0661d9a979800a506b6a91dc3a7d1fb162 upstream Add core events taken from Section 1.5 "Core Performance Monitor Counters" of the Performance Monitor Counters for AMD Family 1Ah Model 50h-57h Processors document available at the link below. This constitutes events which capture information on op dispatch, execution and retirement, branch prediction, L1 and L2 cache activity, TLB activity, etc. Reviewed-by: Ian Rogers Signed-off-by: Sandipan Das Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ananth Narayan Cc: Caleb Biggers Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Stephane Eranian Link: https://bugzilla.kernel.org/attachment.cgi?id=309149 Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: priyanka-mani Signed-off-by: PvsNarasimha --- .../arch/x86/amdzen6/branch-prediction.json | 93 ++ .../pmu-events/arch/x86/amdzen6/decode.json | 139 +++ .../arch/x86/amdzen6/execution.json | 192 +++ .../arch/x86/amdzen6/floating-point.json | 1106 +++++++++++++++++ .../arch/x86/amdzen6/inst-cache.json | 120 ++ .../pmu-events/arch/x86/amdzen6/l2-cache.json | 326 +++++ .../arch/x86/amdzen6/load-store.json | 523 ++++++++ 7 files changed, 2499 insertions(+) create mode 100644 tools/perf/pmu-events/arch/x86/amdzen6/branch-prediction.json create mode 100644 tools/perf/pmu-events/arch/x86/amdzen6/decode.json create mode 100644 tools/perf/pmu-events/arch/x86/amdzen6/execution.json create mode 100644 tools/perf/pmu-events/arch/x86/amdzen6/floating-point.json create mode 100644 tools/perf/pmu-events/arch/x86/amdzen6/inst-cache.json create mode 100644 tools/perf/pmu-events/arch/x86/amdzen6/l2-cache.json create mode 100644 tools/perf/pmu-events/arch/x86/amdzen6/load-store.json diff --git a/tools/perf/pmu-events/arch/x86/amdzen6/branch-prediction.json b/tools/perf/pmu-events/arch/x86/amdzen6/branch-prediction.json new file mode 100644 index 000000000000..dd70069f68ed --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/amdzen6/branch-prediction.json @@ -0,0 +1,93 @@ +[ + { + "EventName": "bp_l1_tlb_miss_l2_tlb_hit", + "EventCode": "0x84", + "BriefDescription": "Instruction fetches that miss in the L1 ITLB but hit in the L2 ITLB." + }, + { + "EventName": "bp_l1_tlb_miss_l2_tlb_miss.if4k", + "EventCode": "0x85", + "BriefDescription": "Instruction fetches that miss in both the L1 and L2 ITLBs (page-table walks requested) for 4k pages.", + "UMask": "0x01" + }, + { + "EventName": "bp_l1_tlb_miss_l2_tlb_miss.if2m", + "EventCode": "0x85", + "BriefDescription": "Instruction fetches that miss in both the L1 and L2 ITLBs (page-table walks requested) for 2M pages.", + "UMask": "0x02" + }, + { + "EventName": "bp_l1_tlb_miss_l2_tlb_miss.if1g", + "EventCode": "0x85", + "BriefDescription": "Instruction fetches that miss in both the L1 and L2 ITLBs (page-table walks requested) for 1G pages.", + "UMask": "0x04" + }, + { + "EventName": "bp_l1_tlb_miss_l2_tlb_miss.coalesced_4k", + "EventCode": "0x85", + "BriefDescription": "Instruction fetches that miss in both the L1 and L2 ITLBs (page-table walks requested) for coalesced pages (16k pages created from four adjacent 4k pages).", + "UMask": "0x08" + }, + { + "EventName": "bp_l1_tlb_miss_l2_tlb_miss.all", + "EventCode": "0x85", + "BriefDescription": "Instruction fetches that miss in both the L1 and L2 ITLBs (page-table walks requested) for all page sizes.", + "UMask": "0x0f" + }, + { + "EventName": "bp_pipe_correct", + "EventCode": "0x8b", + "BriefDescription": "Branch predictor pipeline flushes due to internal conditions such as a second level prediction structure." + }, + { + "EventName": "bp_var_target_pred", + "EventCode": "0x8e", + "BriefDescription": "Indirect predictions (branch used the indirect predictor to make a prediction)." + }, + { + "EventName": "bp_early_redir", + "EventCode": "0x91", + "BriefDescription": "Early redirects sent to branch predictor. This happens when either the decoder or dispatch logic is able to detect that the branch predictor needs to be redirected." + }, + { + "EventName": "bp_l1_tlb_fetch_hit.if4k", + "EventCode": "0x94", + "BriefDescription": "Instruction fetches that hit in the L1 ITLB for 4k or coalesced pages (16k pages created from four adjacent 4k pages).", + "UMask": "0x01" + }, + { + "EventName": "bp_l1_tlb_fetch_hit.if2m", + "EventCode": "0x94", + "BriefDescription": "Instruction fetches that hit in the L1 ITLB for 2M pages.", + "UMask": "0x02" + }, + { + "EventName": "bp_l1_tlb_fetch_hit.if1g", + "EventCode": "0x94", + "BriefDescription": "Instruction fetches that hit in the L1 ITLB for 1G pages.", + "UMask": "0x04" + }, + { + "EventName": "bp_l1_tlb_fetch_hit.all", + "EventCode": "0x94", + "BriefDescription": "Instruction fetches that hit in the L1 ITLB for all page sizes.", + "UMask": "0x07" + }, + { + "EventName": "bp_fe_redir.resync", + "EventCode": "0x9f", + "BriefDescription": "Redirects of the pipeline frontend caused by resyncs. These are retire time pipeline restarts.", + "UMask": "0x01" + }, + { + "EventName": "bp_fe_redir.ex_redir", + "EventCode": "0x9f", + "BriefDescription": "Redirects of the pipeline frontend caused by mispredicts. These are used for branch direction correction and handling indirect branch target mispredicts.", + "UMask": "0x02" + }, + { + "EventName": "bp_fe_redir.all", + "EventCode": "0x9f", + "BriefDescription": "Redirects of the pipeline frontend caused by any reason." + } +] diff --git a/tools/perf/pmu-events/arch/x86/amdzen6/decode.json b/tools/perf/pmu-events/arch/x86/amdzen6/decode.json new file mode 100644 index 000000000000..c5d37fbac948 --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/amdzen6/decode.json @@ -0,0 +1,139 @@ +[ + { + "EventName": "de_op_queue_empty", + "EventCode": "0xa9", + "BriefDescription": "Cycles where the op queue is empty. Such cycles indicate that the frontend is not delivering instructions fast enough." + }, + { + "EventName": "de_src_op_disp.x86_decoder", + "EventCode": "0xaa", + "BriefDescription": "Ops dispatched from x86 decoder.", + "UMask": "0x01" + }, + { + "EventName": "de_src_op_disp.op_cache", + "EventCode": "0xaa", + "BriefDescription": "Ops dispatched from op cache.", + "UMask": "0x02" + }, + { + "EventName": "de_src_op_disp.all", + "EventCode": "0xaa", + "BriefDescription": "Ops dispatched from any source.", + "UMask": "0x07" + }, + { + "EventName": "de_dis_ops_from_decoder.any_fp", + "EventCode": "0xab", + "BriefDescription": "Ops dispatched from the decoder to a floating-point unit.", + "UMask": "0x04" + }, + { + "EventName": "de_dis_ops_from_decoder.any_int", + "EventCode": "0xab", + "BriefDescription": "Ops dispatched from the decoder to an integer unit.", + "UMask": "0x08" + }, + { + "EventName": "de_disp_stall_cycles_dynamic_tokens_part1.int_phy_reg_file_rsrc_stall", + "EventCode": "0xae", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to integer physical register file resource stalls.", + "UMask": "0x01" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part1.load_queue_rsrc_stall", + "EventCode": "0xae", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to load queue token stalls.", + "UMask": "0x02" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part1.store_queue_rsrc_stall", + "EventCode": "0xae", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to store queue token stalls.", + "UMask": "0x04" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part1.taken_brnch_buffer_rsrc", + "EventCode": "0xae", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to taken branch buffer resource stalls.", + "UMask": "0x10" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part1.fp_sch_rsrc_stall", + "EventCode": "0xae", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to floating-point non-schedulable queue token stalls.", + "UMask": "0x40" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part2.int_sq0", + "EventCode": "0xaf", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to unavailability of integer scheduler 0 tokens.", + "UMask": "0x01" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part2.int_sq1", + "EventCode": "0xaf", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to unavailability of integer scheduler 1 tokens.", + "UMask": "0x02" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part2.int_sq2", + "EventCode": "0xaf", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to unavailability of integer scheduler 2 tokens.", + "UMask": "0x04" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part2.int_sq3", + "EventCode": "0xaf", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to unavailability of integer scheduler 3 tokens.", + "UMask": "0x08" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part2.int_sq4", + "EventCode": "0xaf", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to unavailability of integer scheduler 4 tokens.", + "UMask": "0x10" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part2.int_sq5", + "EventCode": "0xaf", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to unavailability of integer scheduler 5 tokens.", + "UMask": "0x20" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part2.ret_q", + "EventCode": "0xaf", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to unavailability of retire queue tokens.", + "UMask": "0x80" + }, + { + "EventName": "de_dispatch_stall_cycle_dynamic_tokens_part2.all", + "EventCode": "0xaf", + "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to any token stalls.", + "UMask": "0xbf" + }, + { + "EventName": "de_no_dispatch_per_slot.no_ops_from_frontend", + "EventCode": "0x1a0", + "BriefDescription": "Dispatch slots in each cycle that were empty because the frontend did not supply ops.", + "UMask": "0x01" + }, + { + "EventName": "de_no_dispatch_per_slot.backend_stalls", + "EventCode": "0x1a0", + "BriefDescription": "Dispatch slots in each cycle that were unused because of backend stalls.", + "UMask": "0x1e" + }, + { + "EventName": "de_no_dispatch_per_slot.smt_contention", + "EventCode": "0x1a0", + "BriefDescription": "Dispatch slots in each cycle that were unused because the dispatch cycle was granted to the other SMT thread.", + "UMask": "0x60" + }, + { + "EventName": "de_additional_resource_stalls.dispatch_stalls", + "EventCode": "0x1a2", + "BriefDescription": "Counts additional cycles where dispatch is stalled due to a lack of dispatch resources.", + "UMask": "0x30" + } +] diff --git a/tools/perf/pmu-events/arch/x86/amdzen6/execution.json b/tools/perf/pmu-events/arch/x86/amdzen6/execution.json new file mode 100644 index 000000000000..1b80acc89b6f --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/amdzen6/execution.json @@ -0,0 +1,192 @@ +[ + { + "EventName": "ex_ret_instr", + "EventCode": "0xc0", + "BriefDescription": "Retired instructions." + }, + { + "EventName": "ex_ret_ops", + "EventCode": "0xc1", + "BriefDescription": "Retired macro-ops." + }, + { + "EventName": "ex_ret_brn", + "EventCode": "0xc2", + "BriefDescription": "Retired branch instructions (all types of architectural control flow changes, including exceptions and interrupts)." + }, + { + "EventName": "ex_ret_brn_misp", + "EventCode": "0xc3", + "BriefDescription": "Retired branch instructions that were mispredicted." + }, + { + "EventName": "ex_ret_brn_tkn", + "EventCode": "0xc4", + "BriefDescription": "Retired taken branch instructions (all types of architectural control flow changes, including exceptions and interrupts)." + }, + { + "EventName": "ex_ret_brn_tkn_misp", + "EventCode": "0xc5", + "BriefDescription": "Retired taken branch instructions that were mispredicted." + }, + { + "EventName": "ex_ret_brn_far", + "EventCode": "0xc6", + "BriefDescription": "Retired far control transfers (far call, far jump, far return, IRET, SYSCALL and SYSRET, plus exceptions and interrupts). Far control transfers are not subject to branch prediction." + }, + { + "EventName": "ex_ret_near_ret", + "EventCode": "0xc8", + "BriefDescription": "Retired near returns (RET or RET Iw)." + }, + { + "EventName": "ex_ret_near_ret_mispred", + "EventCode": "0xc9", + "BriefDescription": "Retired near returns that were mispredicted. Each misprediction incurs the same penalty as that of a mispredicted conditional branch instruction." + }, + { + "EventName": "ex_ret_brn_ind_misp", + "EventCode": "0xca", + "BriefDescription": "Retired indirect branch instructions that were mispredicted (only EX mispredicts). Each misprediction incurs the same penalty as that of a mispredicted conditional branch instruction." + }, + { + "EventName": "ex_ret_brn_ind", + "EventCode": "0xcc", + "BriefDescription": "Retired indirect branch instructions." + }, + { + "EventName": "ex_ret_brn_cond", + "EventCode": "0xd1", + "BriefDescription": "Retired conditional branch instructions." + }, + { + "EventName": "ex_div_busy", + "EventCode": "0xd3", + "BriefDescription": "Cycles where the divider is busy." + }, + { + "EventName": "ex_div_count", + "EventCode": "0xd4", + "BriefDescription": "Divide ops executed." + }, + { + "EventName": "ex_no_retire.empty", + "EventCode": "0xd6", + "BriefDescription": "Cycles where the thread does not retire any ops due to a lack of valid ops in the retire queue (may be caused by front-end bottlenecks or pipeline redirects).", + "UMask": "0x01" + }, + { + "EventName": "ex_no_retire.not_complete", + "EventCode": "0xd6", + "BriefDescription": "Cycles where the thread does not retire any ops as the oldest retire slot is waiting to be marked as completed.", + "UMask": "0x02" + }, + { + "EventName": "ex_no_retire.other", + "EventCode": "0xd6", + "BriefDescription": "Cycles where the thread does not retire any ops due to other reasons (retire breaks, traps, faults, etc.).", + "UMask": "0x08" + }, + { + "EventName": "ex_no_retire.thread_not_selected", + "EventCode": "0xd6", + "BriefDescription": "Cycles where the thread does not retire any ops as thread arbitration did not select the current thread.", + "UMask": "0x10" + }, + { + "EventName": "ex_no_retire.load_not_complete", + "EventCode": "0xd6", + "BriefDescription": "Cycles where the thread does not retire any ops due to missing load completion.", + "UMask": "0xa2" + }, + { + "EventName": "ex_ret_ucode_instr", + "EventCode": "0x1c1", + "BriefDescription": "Retired microcoded instructions." + }, + { + "EventName": "ex_ret_ucode_ops", + "EventCode": "0x1c2", + "BriefDescription": "Retired microcode ops." + }, + { + "EventName": "ex_ret_brn_cond_misp", + "EventCode": "0x1c7", + "BriefDescription": "Retired conditional branch instructions that were mispredicted due to direction mismatch." + }, + { + "EventName": "ex_ret_brn_uncond_ind_near_misp", + "EventCode": "0x1c8", + "BriefDescription": "Retired unconditional indirect near branch instructions that were mispredicted." + }, + { + "EventName": "ex_ret_brn_uncond", + "EventCode": "0x1c9", + "BriefDescription": "Retired unconditional branch instructions." + }, + { + "EventName": "ex_tagged_ibs_ops.tagged", + "EventCode": "0x1cf", + "BriefDescription": "Execution IBS tagged ops.", + "UMask": "0x01" + }, + { + "EventName": "ex_tagged_ibs_ops.tagged_ret", + "EventCode": "0x1cf", + "BriefDescription": "Execution IBS tagged ops that retired.", + "UMask": "0x02" + }, + { + "EventName": "ex_tagged_ibs_ops.rollovers", + "EventCode": "0x1cf", + "BriefDescription": "Execution IBS periodic counter rollovers due to a previous tagged op not being IBS complete.", + "UMask": "0x04" + }, + { + "EventName": "ex_tagged_ibs_ops.filtered", + "EventCode": "0x1cf", + "BriefDescription": "Execution IBS tagged ops that retired but were discarded due to IBS filtering.", + "UMask": "0x08" + }, + { + "EventName": "ex_tagged_ibs_ops.valid", + "EventCode": "0x1cf", + "BriefDescription": "Execution IBS tagged ops that resulted in a valid sample and an IBS interrupt.", + "UMask": "0x10" + }, + { + "EventName": "ex_ret_fused_instr", + "EventCode": "0x1d0", + "BriefDescription": "Retired fused instructions." + }, + { + "EventName": "ex_mprof_ibs_ops.tagged", + "EventCode": "0x2c0", + "BriefDescription": "Memory Profiler IBS tagged ops.", + "UMask": "0x01" + }, + { + "EventName": "ex_mprof_ibs_ops.tagged_ret", + "EventCode": "0x2c0", + "BriefDescription": "Memory Profiler IBS tagged ops that retired.", + "UMask": "0x02" + }, + { + "EventName": "ex_mprof_ibs_ops.rollovers", + "EventCode": "0x2c0", + "BriefDescription": "Memory Profiler IBS periodic counter rollovers due to a previous tagged op not being IBS complete.", + "UMask": "0x04" + }, + { + "EventName": "ex_mprof_ibs_ops.filtered", + "EventCode": "0x2c0", + "BriefDescription": "Memory Profiler IBS tagged ops that retired but were discarded due to IBS filtering.", + "UMask": "0x08" + }, + { + "EventName": "ex_mprof_ibs_ops.valid", + "EventCode": "0x2c0", + "BriefDescription": "Memory Profiler IBS tagged ops that resulted in a valid sample and an IBS interrupt.", + "UMask": "0x10" + } +] diff --git a/tools/perf/pmu-events/arch/x86/amdzen6/floating-point.json b/tools/perf/pmu-events/arch/x86/amdzen6/floating-point.json new file mode 100644 index 000000000000..03cb039434de --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/amdzen6/floating-point.json @@ -0,0 +1,1106 @@ +[ + { + "EventName": "fp_ret_x87_fp_ops.add_sub_ops", + "EventCode": "0x02", + "BriefDescription": "Retired x87 floating-point add and subtract uops.", + "UMask": "0x01" + }, + { + "EventName": "fp_ret_x87_fp_ops.mul_ops", + "EventCode": "0x02", + "BriefDescription": "Retired x87 floating-point multiply uops.", + "UMask": "0x02" + }, + { + "EventName": "fp_ret_x87_fp_ops.div_sqrt_ops", + "EventCode": "0x02", + "BriefDescription": "Retired x87 floating-point divide and square root uops.", + "UMask": "0x04" + }, + { + "EventName": "fp_ret_x87_fp_ops.all", + "EventCode": "0x02", + "BriefDescription": "Retired x87 floating-point uops of all types.", + "UMask": "0x07" + }, + { + "EventName": "fp_ret_sse_avx_ops.add_sub_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX add and subtract FLOPs.", + "UMask": "0x01" + }, + { + "EventName": "fp_ret_sse_avx_ops.mult_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX multiply FLOPs.", + "UMask": "0x02" + }, + { + "EventName": "fp_ret_sse_avx_ops.div_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX divide and square root FLOPs.", + "UMask": "0x04" + }, + { + "EventName": "fp_ret_sse_avx_ops.mac_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX multiply-accumulate FLOPs (each operation is counted as 2 FLOPs, bfloat operations are not included).", + "UMask": "0x08" + }, + { + "EventName": "fp_ret_sse_avx_ops.bfloat16_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX bfloat16 FLOPs.", + "UMask": "0x20" + }, + { + "EventName": "fp_ret_sse_avx_ops.scalar_single_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX scalar single-precision (FP32) FLOPs.", + "UMask": "0x40" + }, + { + "EventName": "fp_ret_sse_avx_ops.packed_single_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX packed single-precision (FP32) FLOPs.", + "UMask": "0x60" + }, + { + "EventName": "fp_ret_sse_avx_ops.scalar_double_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX scalar double-precision (FP64) FLOPs.", + "UMask": "0x80" + }, + { + "EventName": "fp_ret_sse_avx_ops.packed_double_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX packed double-precision (FP64) FLOPs.", + "UMask": "0xa0" + }, + { + "EventName": "fp_ret_sse_avx_ops.scalar_half_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX scalar half-precision (FP16) FLOPs.", + "UMask": "0xa0" + }, + { + "EventName": "fp_ret_sse_avx_ops.packed_half_flops", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX packed half-precision (FP16) FLOPs.", + "UMask": "0xa0" + }, + { + "EventName": "fp_ret_sse_avx_ops.all", + "EventCode": "0x03", + "BriefDescription": "Retired SSE and AVX FLOPs of all types.", + "UMask": "0x0f" + }, + { + "EventName": "fp_ops_ret_by_width.x87", + "EventCode": "0x08", + "BriefDescription": "Retired x87 floating-point uops.", + "UMask": "0x01" + }, + { + "EventName": "fp_ops_ret_by_width.mmx", + "EventCode": "0x08", + "BriefDescription": "Retired MMX floating-point uops.", + "UMask": "0x02" + }, + { + "EventName": "fp_ops_ret_by_width.scalar", + "EventCode": "0x08", + "BriefDescription": "Retired scalar floating-point uops.", + "UMask": "0x04" + }, + { + "EventName": "fp_ops_ret_by_width.pack_128", + "EventCode": "0x08", + "BriefDescription": "Retired packed 128-bit floating-point uops.", + "UMask": "0x08" + }, + { + "EventName": "fp_ops_ret_by_width.pack_256", + "EventCode": "0x08", + "BriefDescription": "Retired packed 256-bit floating-point uops.", + "UMask": "0x10" + }, + { + "EventName": "fp_ops_ret_by_width.pack_512", + "EventCode": "0x08", + "BriefDescription": "Retired packed 512-bit floating-point uops.", + "UMask": "0x20" + }, + { + "EventName": "fp_ops_ret_by_width.all", + "EventCode": "0x08", + "BriefDescription": "Retired floating-point uops of all widths.", + "UMask": "0x3f" + }, + { + "EventName": "fp_ops_ret_by_type.scalar_add", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point add uops.", + "UMask": "0x01" + }, + { + "EventName": "fp_ops_ret_by_type.scalar_sub", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point subtract uops.", + "UMask": "0x02" + }, + { + "EventName": "fp_ops_ret_by_type.scalar_mul", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point multiply uops.", + "UMask": "0x03" + }, + { + "EventName": "fp_ops_ret_by_type.scalar_mac", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point multiply-accumulate uops.", + "UMask": "0x04" + }, + { + "EventName": "fp_ops_ret_by_type.scalar_div", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point divide uops.", + "UMask": "0x05" + }, + { + "EventName": "fp_ops_ret_by_type.scalar_sqrt", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point square root uops.", + "UMask": "0x06" + }, + { + "EventName": "fp_ops_ret_by_type.scalar_cmp", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point compare uops.", + "UMask": "0x07" + }, + { + "EventName": "fp_ops_ret_by_type.scalar_cvt", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point convert uops.", + "UMask": "0x08" + }, + { + "EventName": "fp_ops_ret_by_type.scalar_blend", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point blend uops.", + "UMask": "0x09" + }, + { + "EventName": "fp_ops_ret_by_type.scalar_move", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point move uops.", + "UMask": "0x0a" + }, + { + "EventName": "fp_ops_ret_by_type.scalar_shuffle", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point shuffle uops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).", + "UMask": "0x0b" + }, + { + "EventName": "fp_ops_ret_by_type.scalar_bfloat", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point bfloat uops.", + "UMask": "0x0c" + }, + { + "EventName": "fp_ops_ret_by_type.scalar_logical", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point move uops.", + "UMask": "0x0d" + }, + { + "EventName": "fp_ops_ret_by_type.scalar_other", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point uops of other types.", + "UMask": "0x0e" + }, + { + "EventName": "fp_ops_ret_by_type.scalar_all", + "EventCode": "0x0a", + "BriefDescription": "Retired scalar floating-point uops of all types.", + "UMask": "0x0f" + }, + { + "EventName": "fp_ops_ret_by_type.vector_add", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point add uops.", + "UMask": "0x10" + }, + { + "EventName": "fp_ops_ret_by_type.vector_sub", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point subtract uops.", + "UMask": "0x20" + }, + { + "EventName": "fp_ops_ret_by_type.vector_mul", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point multiply uops.", + "UMask": "0x30" + }, + { + "EventName": "fp_ops_ret_by_type.vector_mac", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point multiply-accumulate uops.", + "UMask": "0x40" + }, + { + "EventName": "fp_ops_ret_by_type.vector_div", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point divide uops.", + "UMask": "0x50" + }, + { + "EventName": "fp_ops_ret_by_type.vector_sqrt", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point square root uops.", + "UMask": "0x60" + }, + { + "EventName": "fp_ops_ret_by_type.vector_cmp", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point compare uops.", + "UMask": "0x70" + }, + { + "EventName": "fp_ops_ret_by_type.vector_cvt", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point convert uops.", + "UMask": "0x80" + }, + { + "EventName": "fp_ops_ret_by_type.vector_blend", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point blend uops.", + "UMask": "0x90" + }, + { + "EventName": "fp_ops_ret_by_type.vector_move", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point move uops.", + "UMask": "0xa0" + }, + { + "EventName": "fp_ops_ret_by_type.vector_shuffle", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point shuffle uops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).", + "UMask": "0xb0" + }, + { + "EventName": "fp_ops_ret_by_type.vector_bfloat", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point bfloat uops.", + "UMask": "0xc0" + }, + { + "EventName": "fp_ops_ret_by_type.vector_logical", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point logical uops.", + "UMask": "0xd0" + }, + { + "EventName": "fp_ops_ret_by_type.vector_other", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point uops of other types.", + "UMask": "0xe0" + }, + { + "EventName": "fp_ops_ret_by_type.vector_all", + "EventCode": "0x0a", + "BriefDescription": "Retired vector floating-point uops of all types.", + "UMask": "0xf0" + }, + { + "EventName": "fp_ops_ret_by_type.all", + "EventCode": "0x0a", + "BriefDescription": "Retired floating-point uops of all types.", + "UMask": "0xff" + }, + { + "EventName": "fp_sse_avx_ops_ret.mmx_add", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer add uops.", + "UMask": "0x01" + }, + { + "EventName": "fp_sse_avx_ops_ret.mmx_sub", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer subtract uops.", + "UMask": "0x02" + }, + { + "EventName": "fp_sse_avx_ops_ret.mmx_mul", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer multiply uops.", + "UMask": "0x03" + }, + { + "EventName": "fp_sse_avx_ops_ret.mmx_mac", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer multiply-accumulate uops.", + "UMask": "0x04" + }, + { + "EventName": "fp_sse_avx_ops_ret.mmx_aes", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer AES uops.", + "UMask": "0x05" + }, + { + "EventName": "fp_sse_avx_ops_ret.mmx_sha", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer SHA uops.", + "UMask": "0x06" + }, + { + "EventName": "fp_sse_avx_ops_ret.mmx_cmp", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer compare uops.", + "UMask": "0x07" + }, + { + "EventName": "fp_sse_avx_ops_ret.mmx_cvt", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer convert or pack uops.", + "UMask": "0x08" + }, + { + "EventName": "fp_sse_avx_ops_ret.mmx_shift", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer shift or rotate uops.", + "UMask": "0x09" + }, + { + "EventName": "fp_sse_avx_ops_ret.mmx_mov", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer move uops.", + "UMask": "0x0a" + }, + { + "EventName": "fp_sse_avx_ops_ret.mmx_shuffle", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer shuffle uops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).", + "UMask": "0x0b" + }, + { + "EventName": "fp_sse_avx_ops_ret.mmx_vnni", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer VNNI uops.", + "UMask": "0x0c" + }, + { + "EventName": "fp_sse_avx_ops_ret.mmx_logical", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer logical uops.", + "UMask": "0x0d" + }, + { + "EventName": "fp_sse_avx_ops_ret.mmx_other", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer multiply uops of other types.", + "UMask": "0x0e" + }, + { + "EventName": "fp_sse_avx_ops_ret.mmx_all", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX integer uops of all types.", + "UMask": "0x0f" + }, + { + "EventName": "fp_sse_avx_ops_ret.sse_avx_add", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer add uops.", + "UMask": "0x10" + }, + { + "EventName": "fp_sse_avx_ops_ret.sse_avx_sub", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer subtract uops.", + "UMask": "0x20" + }, + { + "EventName": "fp_sse_avx_ops_ret.sse_avx_mul", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer multiply uops.", + "UMask": "0x30" + }, + { + "EventName": "fp_sse_avx_ops_ret.sse_avx_mac", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer multiply-accumulate uops.", + "UMask": "0x40" + }, + { + "EventName": "fp_sse_avx_ops_ret.sse_avx_aes", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer AES uops.", + "UMask": "0x50" + }, + { + "EventName": "fp_sse_avx_ops_ret.sse_avx_sha", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer SHA uops.", + "UMask": "0x60" + }, + { + "EventName": "fp_sse_avx_ops_ret.sse_avx_cmp", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer compare uops.", + "UMask": "0x70" + }, + { + "EventName": "fp_sse_avx_ops_ret.sse_avx_cvt", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer convert or pack uops.", + "UMask": "0x80" + }, + { + "EventName": "fp_sse_avx_ops_ret.sse_avx_shift", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer shift or rotate uops.", + "UMask": "0x90" + }, + { + "EventName": "fp_sse_avx_ops_ret.sse_avx_mov", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer move uops.", + "UMask": "0xa0" + }, + { + "EventName": "fp_sse_avx_ops_ret.sse_avx_shuffle", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer shuffle uops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).", + "UMask": "0xb0" + }, + { + "EventName": "fp_sse_avx_ops_ret.sse_avx_vnni", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer VNNI uops.", + "UMask": "0xc0" + }, + { + "EventName": "fp_sse_avx_ops_ret.sse_avx_logical", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer logical uops.", + "UMask": "0xd0" + }, + { + "EventName": "fp_sse_avx_ops_ret.sse_avx_other", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer uops of other types.", + "UMask": "0xe0" + }, + { + "EventName": "fp_sse_avx_ops_ret.sse_avx_all", + "EventCode": "0x0b", + "BriefDescription": "Retired SSE and AVX integer uops of all types.", + "UMask": "0xf0" + }, + { + "EventName": "fp_sse_avx_ops_ret.all", + "EventCode": "0x0b", + "BriefDescription": "Retired MMX, SSE and AVX integer uops of all types.", + "UMask": "0xff" + }, + { + "EventName": "fp_pack_ops_ret.fp128_add", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point add uops.", + "UMask": "0x01" + }, + { + "EventName": "fp_pack_ops_ret.fp128_sub", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point subtract uops.", + "UMask": "0x02" + }, + { + "EventName": "fp_pack_ops_ret.fp128_mul", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point multiply uops.", + "UMask": "0x03" + }, + { + "EventName": "fp_pack_ops_ret.fp128_mac", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point multiply-accumulate uops.", + "UMask": "0x04" + }, + { + "EventName": "fp_pack_ops_ret.fp128_div", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point divide uops.", + "UMask": "0x05" + }, + { + "EventName": "fp_pack_ops_ret.fp128_sqrt", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point square root uops.", + "UMask": "0x06" + }, + { + "EventName": "fp_pack_ops_ret.fp128_cmp", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point compare uops.", + "UMask": "0x07" + }, + { + "EventName": "fp_pack_ops_ret.fp128_cvt", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point convert uops.", + "UMask": "0x08" + }, + { + "EventName": "fp_pack_ops_ret.fp128_blend", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point blend uops.", + "UMask": "0x09" + }, + { + "EventName": "fp_pack_ops_ret.fp128_mov", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point move uops.", + "UMask": "0x0a" + }, + { + "EventName": "fp_pack_ops_ret.fp128_shuffle", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point shuffle uops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).", + "UMask": "0x0b" + }, + { + "EventName": "fp_pack_ops_ret.fp128_bfloat", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point bfloat uops.", + "UMask": "0x0c" + }, + { + "EventName": "fp_pack_ops_ret.fp128_logical", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point logical uops.", + "UMask": "0x0d" + }, + { + "EventName": "fp_pack_ops_ret.fp128_other", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point uops of other types.", + "UMask": "0x0e" + }, + { + "EventName": "fp_pack_ops_ret.fp128_all", + "EventCode": "0x0c", + "BriefDescription": "Retired 128-bit packed floating-point uops of all types.", + "UMask": "0x0f" + }, + { + "EventName": "fp_pack_ops_ret.fp256_add", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point add uops.", + "UMask": "0x10" + }, + { + "EventName": "fp_pack_ops_ret.fp256_sub", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point subtract uops.", + "UMask": "0x20" + }, + { + "EventName": "fp_pack_ops_ret.fp256_mul", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point multiply uops.", + "UMask": "0x30" + }, + { + "EventName": "fp_pack_ops_ret.fp256_mac", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point multiply-accumulate uops.", + "UMask": "0x40" + }, + { + "EventName": "fp_pack_ops_ret.fp256_div", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point divide uops.", + "UMask": "0x50" + }, + { + "EventName": "fp_pack_ops_ret.fp256_sqrt", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point square root uops.", + "UMask": "0x60" + }, + { + "EventName": "fp_pack_ops_ret.fp256_cmp", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point compare uops.", + "UMask": "0x70" + }, + { + "EventName": "fp_pack_ops_ret.fp256_cvt", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point convert uops.", + "UMask": "0x80" + }, + { + "EventName": "fp_pack_ops_ret.fp256_blend", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point blend uops.", + "UMask": "0x90" + }, + { + "EventName": "fp_pack_ops_ret.fp256_mov", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point move uops.", + "UMask": "0xa0" + }, + { + "EventName": "fp_pack_ops_ret.fp256_shuffle", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point shuffle uops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).", + "UMask": "0xb0" + }, + { + "EventName": "fp_pack_ops_ret.fp256_logical", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point logical uops.", + "UMask": "0xd0" + }, + { + "EventName": "fp_pack_ops_ret.fp256_other", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point uops of other types.", + "UMask": "0xe0" + }, + { + "EventName": "fp_pack_ops_ret.fp256_all", + "EventCode": "0x0c", + "BriefDescription": "Retired 256-bit packed floating-point uops of all types.", + "UMask": "0xf0" + }, + { + "EventName": "fp_pack_ops_ret.fp_all", + "EventCode": "0x0c", + "BriefDescription": "Retired packed floating-point uops of all types.", + "UMask": "0xff" + }, + { + "EventName": "fp_pack_int_ops_ret.int128_add", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer add uops.", + "UMask": "0x01" + }, + { + "EventName": "fp_pack_int_ops_ret.int128_sub", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer subtract uops.", + "UMask": "0x02" + }, + { + "EventName": "fp_pack_int_ops_ret.int128_mul", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer multiply uops.", + "UMask": "0x03" + }, + { + "EventName": "fp_pack_int_ops_ret.int128_mac", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer multiply-accumulate uops.", + "UMask": "0x04" + }, + { + "EventName": "fp_pack_int_ops_ret.int128_aes", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer AES uops.", + "UMask": "0x05" + }, + { + "EventName": "fp_pack_int_ops_ret.int128_sha", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer SHA uops.", + "UMask": "0x06" + }, + { + "EventName": "fp_pack_int_ops_ret.int128_cmp", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer compare uops.", + "UMask": "0x07" + }, + { + "EventName": "fp_pack_int_ops_ret.int128_cvt", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer convert or pack uops.", + "UMask": "0x08" + }, + { + "EventName": "fp_pack_int_ops_ret.int128_shift", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer shift or rotate uops.", + "UMask": "0x09" + }, + { + "EventName": "fp_pack_int_ops_ret.int128_mov", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer move uops.", + "UMask": "0x0a" + }, + { + "EventName": "fp_pack_int_ops_ret.int128_shuffle", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer shuffle uops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).", + "UMask": "0x0b" + }, + { + "EventName": "fp_pack_int_ops_ret.int128_vnni", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer VNNI ops.", + "UMask": "0x0c" + }, + { + "EventName": "fp_pack_int_ops_ret.int128_logical", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer logical uops.", + "UMask": "0x0d" + }, + { + "EventName": "fp_pack_int_ops_ret.int128_other", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer uops of other types.", + "UMask": "0x0e" + }, + { + "EventName": "fp_pack_int_ops_ret.int128_all", + "EventCode": "0x0d", + "BriefDescription": "Retired 128-bit packed integer uops of all types.", + "UMask": "0x0f" + }, + { + "EventName": "fp_pack_int_ops_ret.int256_add", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer add uops.", + "UMask": "0x10" + }, + { + "EventName": "fp_pack_int_ops_ret.int256_sub", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer subtract uops.", + "UMask": "0x20" + }, + { + "EventName": "fp_pack_int_ops_ret.int256_mul", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer multiply uops.", + "UMask": "0x30" + }, + { + "EventName": "fp_pack_int_ops_ret.int256_mac", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer multiply-accumulate uops.", + "UMask": "0x40" + }, + { + "EventName": "fp_pack_int_ops_ret.int256_cmp", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer compare uops.", + "UMask": "0x70" + }, + { + "EventName": "fp_pack_int_ops_ret.int256_shift", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer shift or rotate uops.", + "UMask": "0x90" + }, + { + "EventName": "fp_pack_int_ops_ret.int256_mov", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer move uops.", + "UMask": "0xa0" + }, + { + "EventName": "fp_pack_int_ops_ret.int256_shuffle", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer shuffle uops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).", + "UMask": "0xb0" + }, + { + "EventName": "fp_pack_int_ops_ret.int256_vnni", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer VNNI uops.", + "UMask": "0xc0" + }, + { + "EventName": "fp_pack_int_ops_ret.int256_logical", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer logical uops.", + "UMask": "0xd0" + }, + { + "EventName": "fp_pack_int_ops_ret.int256_other", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer uops of other types.", + "UMask": "0xe0" + }, + { + "EventName": "fp_pack_int_ops_ret.int256_all", + "EventCode": "0x0d", + "BriefDescription": "Retired 256-bit packed integer uops of all types.", + "UMask": "0xf0" + }, + { + "EventName": "fp_pack_int_ops_ret.int_all", + "EventCode": "0x0d", + "BriefDescription": "Retired packed integer uops of all types.", + "UMask": "0xff" + }, + { + "EventName": "fp_disp_faults.x87_fill_fault", + "EventCode": "0x0e", + "BriefDescription": "Floating-point dispatch faults for x87 fills.", + "UMask": "0x01" + }, + { + "EventName": "fp_disp_faults.xmm_fill_fault", + "EventCode": "0x0e", + "BriefDescription": "Floating-point dispatch faults for XMM fills.", + "UMask": "0x02" + }, + { + "EventName": "fp_disp_faults.ymm_fill_fault", + "EventCode": "0x0e", + "BriefDescription": "Floating-point dispatch faults for YMM fills.", + "UMask": "0x04" + }, + { + "EventName": "fp_disp_faults.ymm_spill_fault", + "EventCode": "0x0e", + "BriefDescription": "Floating-point dispatch faults for YMM spills.", + "UMask": "0x08" + }, + { + "EventName": "fp_disp_faults.sse_avx_all", + "EventCode": "0x0e", + "BriefDescription": "Floating-point dispatch faults of all types for SSE and AVX ops.", + "UMask": "0x0e" + }, + { + "EventName": "fp_disp_faults.all", + "EventCode": "0x0e", + "BriefDescription": "Floating-point dispatch faults of all types.", + "UMask": "0x0f" + }, + { + "EventName": "fp_pack_512b_ops_ret.fp512_add", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed floating-point add uops.", + "UMask": "0x01" + }, + { + "EventName": "fp_pack_512b_ops_ret.fp512_sub", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed floating-point subtract uops.", + "UMask": "0x02" + }, + { + "EventName": "fp_pack_512b_ops_ret.fp512_mul", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed floating-point multiply uops.", + "UMask": "0x03" + }, + { + "EventName": "fp_pack_512b_ops_ret.fp512_mac", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed floating-point multiply-accumulate uops.", + "UMask": "0x04" + }, + { + "EventName": "fp_pack_512b_ops_ret.fp512_div", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed floating-point divide uops.", + "UMask": "0x05" + }, + { + "EventName": "fp_pack_512b_ops_ret.fp512_sqrt", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed floating-point square root uops.", + "UMask": "0x06" + }, + { + "EventName": "fp_pack_512b_ops_ret.fp512_cmp", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed floating-point compare uops.", + "UMask": "0x07" + }, + { + "EventName": "fp_pack_512b_ops_ret.fp512_cvt", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed floating-point convert uops.", + "UMask": "0x08" + }, + { + "EventName": "fp_pack_512b_ops_ret.fp512_blend", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed floating-point blend uops.", + "UMask": "0x09" + }, + { + "EventName": "fp_pack_512b_ops_ret.fp512_mov", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed floating-point move uops.", + "UMask": "0x0a" + }, + { + "EventName": "fp_pack_512b_ops_ret.fp512_shuffle", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed floating-point shuffle uops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).", + "UMask": "0x0b" + }, + { + "EventName": "fp_pack_512b_ops_ret.fp512_bfloat", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed floating-point bfloat uops.", + "UMask": "0x0c" + }, + { + "EventName": "fp_pack_512b_ops_ret.fp512_logical", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed floating-point logical uops.", + "UMask": "0x0d" + }, + { + "EventName": "fp_pack_512b_ops_ret.fp512_other", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed floating-point uops of other types.", + "UMask": "0x0e" + }, + { + "EventName": "fp_pack_512b_ops_ret.fp512_all", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed floating-point uops of all types.", + "UMask": "0x0f" + }, + { + "EventName": "fp_pack_512b_ops_ret.int512_add", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed integer add uops.", + "UMask": "0x10" + }, + { + "EventName": "fp_pack_512b_ops_ret.int512_sub", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed integer subtract uops.", + "UMask": "0x20" + }, + { + "EventName": "fp_pack_512b_ops_ret.int512_mul", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed integer multiply uops.", + "UMask": "0x30" + }, + { + "EventName": "fp_pack_512b_ops_ret.int512_mac", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed integer multiply-accumulate uops.", + "UMask": "0x40" + }, + { + "EventName": "fp_pack_512b_ops_ret.int512_aes", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed integer AES uops.", + "UMask": "0x50" + }, + { + "EventName": "fp_pack_512b_ops_ret.int512_sha", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed integer SHA uops.", + "UMask": "0x60" + }, + { + "EventName": "fp_pack_512b_ops_ret.int512_cmp", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed integer compare uops.", + "UMask": "0x70" + }, + { + "EventName": "fp_pack_512b_ops_ret.int512_cvt", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed integer convert or pack uops.", + "UMask": "0x80" + }, + { + "EventName": "fp_pack_512b_ops_ret.int512_shift", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed integer shift or rotate uops.", + "UMask": "0x90" + }, + { + "EventName": "fp_pack_512b_ops_ret.int512_mov", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed integer move uops.", + "UMask": "0xa0" + }, + { + "EventName": "fp_pack_512b_ops_ret.int512_shuffle", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed integer shuffle uops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).", + "UMask": "0xb0" + }, + { + "EventName": "fp_pack_512b_ops_ret.int512_vnni", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed integer VNNI uops.", + "UMask": "0xc0" + }, + { + "EventName": "fp_pack_512b_ops_ret.int512_logical", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed integer logical uops.", + "UMask": "0xd0" + }, + { + "EventName": "fp_pack_512b_ops_ret.int512_other", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed integer uops of other types.", + "UMask": "0xe0" + }, + { + "EventName": "fp_pack_512b_ops_ret.int512_all", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed integer uops of all types.", + "UMask": "0xf0" + }, + { + "EventName": "fp_pack_512b_ops_ret.512b_all", + "EventCode": "0x0f", + "BriefDescription": "Retired 512-bit packed uops of all types.", + "UMask": "0xff" + }, + { + "EventName": "fp_nsq_read_stalls.fp_prf", + "EventCode": "0x13", + "BriefDescription": "Cycles when reads of the NSQ and writes to the floating-point or SIMD schedulers are stalled due to insufficient free physical register file (FP-PRF) entries.", + "UMask": "0x0e" + }, + { + "EventName": "fp_nsq_read_stalls.k_prf", + "EventCode": "0x13", + "BriefDescription": "Cycles when reads of the NSQ and writes to the floating-point or SIMD schedulers are stalled due to insufficient free mask physical register file (K-PRF) entries.", + "UMask": "0x0e" + }, + { + "EventName": "fp_nsq_read_stalls.fp_sq", + "EventCode": "0x13", + "BriefDescription": "Cycles when reads of the NSQ and writes to the floating-point or SIMD schedulers are stalled due to insufficient free scheduler entries.", + "UMask": "0x0e" + }, + { + "EventName": "fp_nsq_read_stalls.all", + "EventCode": "0x13", + "BriefDescription": "Cycles when reads of the NSQ and writes to the floating-point or SIMD schedulers are stalled due to any reason.", + "UMask": "0x0e" + } +] diff --git a/tools/perf/pmu-events/arch/x86/amdzen6/inst-cache.json b/tools/perf/pmu-events/arch/x86/amdzen6/inst-cache.json new file mode 100644 index 000000000000..5ab6766f8940 --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/amdzen6/inst-cache.json @@ -0,0 +1,120 @@ +[ + { + "EventName": "ic_cache_fill_l2", + "EventCode": "0x82", + "BriefDescription": "Instruction cache lines (64 bytes) fulfilled from the L2 cache." + }, + { + "EventName": "ic_cache_fill_sys", + "EventCode": "0x83", + "BriefDescription": "Instruction cache lines (64 bytes) fulfilled from system memory or another cache." + }, + { + "EventName": "ic_fetch_ibs_events.tagged", + "EventCode": "0x188", + "BriefDescription": "Fetch IBS tagged fetches. Not all tagged fetches result in a valid sample and an IBS interrupt.", + "UMask": "0x02" + }, + { + "EventName": "ic_fetch_ibs_events.filtered", + "EventCode": "0x188", + "BriefDescription": "Fetch IBS tagged fetches that were discarded due to IBS filtering.", + "UMask": "0x08" + }, + { + "EventName": "ic_fetch_ibs_events.valid", + "EventCode": "0x188", + "BriefDescription": "Fetch IBS tagged fetches that resulted in a valid sample and an IBS interrupt.", + "UMask": "0x10" + }, + { + "EventName": "op_cache_hit_miss.hit", + "EventCode": "0x28f", + "BriefDescription": "Op cache fetch hits.", + "UMask": "0x03" + }, + { + "EventName": "op_cache_hit_miss.miss", + "EventCode": "0x28f", + "BriefDescription": "Op cache fetch misses.", + "UMask": "0x04" + }, + { + "EventName": "op_cache_hit_miss.all", + "EventCode": "0x28f", + "BriefDescription": "Op cache fetches of all types.", + "UMask": "0x07" + }, + { + "EventName": "ic_fills_from_sys.local_l2", + "EventCode": "0x29c", + "BriefDescription": "Instruction cache fills where data is returned from local L2 cache.", + "UMask": "0x01" + }, + { + "EventName": "ic_fills_from_sys.local_ccx", + "EventCode": "0x29c", + "BriefDescription": "Instruction cache fills where data is returned from L3 cache or different L2 cache in the same CCX.", + "UMask": "0x02" + }, + { + "EventName": "ic_fills_from_sys.local_all", + "EventCode": "0x29c", + "BriefDescription": "Instruction cache fills where data is returned from local L2 cache, L3 cache or different L2 cache in the same CCX.", + "UMask": "0x03" + }, + { + "EventName": "ic_fills_from_sys.near_cache", + "EventCode": "0x29c", + "BriefDescription": "Instruction cache fills where data is returned from cache of another CCX in the same NUMA node.", + "UMask": "0x04" + }, + { + "EventName": "ic_fills_from_sys.dram_io_near", + "EventCode": "0x29c", + "BriefDescription": "Instruction cache fills where data is returned from either DRAM or MMIO in the same NUMA node.", + "UMask": "0x08" + }, + { + "EventName": "ic_fills_from_sys.far_cache", + "EventCode": "0x29c", + "BriefDescription": "Instruction cache fills where data is returned from cache of another CCX in a different NUMA node.", + "UMask": "0x10" + }, + { + "EventName": "ic_fills_from_sys.remote_cache", + "EventCode": "0x29c", + "BriefDescription": "Instruction cache fills where data is returned from cache of another CCX in the same or a different NUMA node.", + "UMask": "0x14" + }, + { + "EventName": "ic_fills_from_sys.dram_io_far", + "EventCode": "0x29c", + "BriefDescription": "Instruction cache fills where data is returned from either DRAM or MMIO in a different NUMA node.", + "UMask": "0x40" + }, + { + "EventName": "ic_fills_from_sys.dram_io_all", + "EventCode": "0x29c", + "BriefDescription": "Instruction cache fills where data is returned from either DRAM or MMIO in the same or a different NUMA node.", + "UMask": "0x48" + }, + { + "EventName": "ic_fills_from_sys.far_all", + "EventCode": "0x29c", + "BriefDescription": "Instruction cache fills where data is returned from either cache of another CCX, DRAM or MMIO in a different NUMA node.", + "UMask": "0x50" + }, + { + "EventName": "ic_fills_from_sys.alt_mem", + "EventCode": "0x29c", + "BriefDescription": "Instruction cache fills where data is returned from extension memory (CXL).", + "UMask": "0x80" + }, + { + "EventName": "ic_fills_from_sys.all", + "EventCode": "0x29c", + "BriefDescription": "Instruction cache fills where data is returned from all types of sources.", + "UMask": "0xdf" + } +] diff --git a/tools/perf/pmu-events/arch/x86/amdzen6/l2-cache.json b/tools/perf/pmu-events/arch/x86/amdzen6/l2-cache.json new file mode 100644 index 000000000000..b0b2090fb920 --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/amdzen6/l2-cache.json @@ -0,0 +1,326 @@ +[ + { + "EventName": "l2_request_g1.group2", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests of non-cacheable type (non-cached data and instructions reads, self-modifying code checks).", + "UMask": "0x01" + }, + { + "EventName": "l2_request_g1.l2_hwpf", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests from hardware prefetchers to prefetch directly into L2 (hit or miss).", + "UMask": "0x02" + }, + { + "EventName": "l2_request_g1.prefetch_l2_cmd", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests to prefetch directly into L2.", + "UMask": "0x04" + }, + { + "EventName": "l2_request_g1.cacheable_ic_read", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests for instruction cache reads.", + "UMask": "0x10" + }, + { + "EventName": "l2_request_g1.ls_rd_blk_c_s", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests for data cache shared reads.", + "UMask": "0x20" + }, + { + "EventName": "l2_request_g1.rd_blk_x", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests for data cache stores.", + "UMask": "0x40" + }, + { + "EventName": "l2_request_g1.rd_blk_l", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests for data cache reads (includes hardware and software prefetches).", + "UMask": "0x80" + }, + { + "EventName": "l2_request_g1.dc_all", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests of common types from data cache (includes prefetches).", + "UMask": "0xe0" + }, + { + "EventName": "l2_request_g1.no_pf_all", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests of common types not including prefetches.", + "UMask": "0xf1" + }, + { + "EventName": "l2_request_g1.all", + "EventCode": "0x60", + "BriefDescription": "L2 cache requests of all types.", + "UMask": "0xf7" + }, + { + "EventName": "l2_request_g2.ls_rd_sized_nc", + "EventCode": "0x61", + "BriefDescription": "L2 cache requests for non-coherent, non-cacheable LS sized reads.", + "UMask": "0x20" + }, + { + "EventName": "l2_request_g2.ls_rd_sized", + "EventCode": "0x61", + "BriefDescription": "L2 cache requests for coherent, non-cacheable LS sized reads.", + "UMask": "0x40" + }, + { + "EventName": "l2_request_g2.all", + "EventCode": "0x61", + "BriefDescription": "L2 cache requests of all rare types.", + "UMask": "0x40" + }, + { + "EventName": "l2_wcb_req.wcb_close", + "EventCode": "0x63", + "BriefDescription": "Write Combining Buffer (WCB) closures.", + "UMask": "0x20" + }, + { + "EventName": "l2_cache_req_stat.ic_fill_miss", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) from the instruction cache that result in L2 misses.", + "UMask": "0x01" + }, + { + "EventName": "l2_cache_req_stat.ic_fill_hit_s", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) from the instruction cache that result in L2 hits on non-modifiable lines.", + "UMask": "0x02" + }, + { + "EventName": "l2_cache_req_stat.ic_fill_hit_x", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) from the instruction cache that result in L2 hits on modifiable lines.", + "UMask": "0x04" + }, + { + "EventName": "l2_cache_req_stat.ic_hit_in_l2", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) from the instruction cache that result in L2 hits.", + "UMask": "0x06" + }, + { + "EventName": "l2_cache_req_stat.ic_access_in_l2", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) from the instruction cache that result in L2 accesses.", + "UMask": "0x07" + }, + { + "EventName": "l2_cache_req_stat.ls_rd_blk_c", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) from the data cache that result in L2 misses.", + "UMask": "0x08" + }, + { + "EventName": "l2_cache_req_stat.ic_dc_miss_in_l2", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) from the data cache and the instruction cache that result in L2 misses.", + "UMask": "0x09" + }, + { + "EventName": "l2_cache_req_stat.ls_rd_blk_x", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) that result in data cache stores or L2 state change hits.", + "UMask": "0x10" + }, + { + "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_s", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) from the data cache that result in L2 hits on non-modifiable lines.", + "UMask": "0x20" + }, + { + "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_x", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) from the data cache that result in L2 hits on modifiable lines.", + "UMask": "0x40" + }, + { + "EventName": "l2_cache_req_stat.ls_rd_blk_cs", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) from the data cache that result in L2 read hits on shared lines.", + "UMask": "0x80" + }, + { + "EventName": "l2_cache_req_stat.dc_hit_in_l2", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) from the data cache that result in L2 hits.", + "UMask": "0xf0" + }, + { + "EventName": "l2_cache_req_stat.ic_dc_hit_in_l2", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) from the data cache and the instruction cache that result in L2 hits.", + "UMask": "0xf6" + }, + { + "EventName": "l2_cache_req_stat.dc_access_in_l2", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) from the data cache that result in L2 accesses.", + "UMask": "0xf8" + }, + { + "EventName": "l2_cache_req_stat.all", + "EventCode": "0x64", + "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) from the data cache and the instruction cache that result in L2 accesses.", + "UMask": "0xff" + }, + { + "EventName": "l2_pf_hit_l2.l2_hwpf", + "EventCode": "0x70", + "BriefDescription": "L2 prefetches accepted by the L2 pipeline which hit in the L2 cache and are generated from L2 hardware prefetchers.", + "UMask": "0x1f" + }, + { + "EventName": "l2_pf_hit_l2.l1_dc_hwpf", + "EventCode": "0x70", + "BriefDescription": "L2 prefetches accepted by the L2 pipeline which hit in the L2 cache and are generated from L1 data hardware prefetchers.", + "UMask": "0xe0" + }, + { + "EventName": "l2_pf_hit_l2.l1_dc_l2_hwpf", + "EventCode": "0x70", + "BriefDescription": "L2 prefetches accepted by the L2 pipeline which hit in the L2 cache and are generated from L1 data and L2 hardware prefetchers.", + "UMask": "0xff" + }, + { + "EventName": "l2_pf_miss_l2_hit_l3.l2_hwpf", + "EventCode": "0x71", + "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 cache but hit in the L3 cache and are generated from L2 hardware prefetchers.", + "UMask": "0x1f" + }, + { + "EventName": "l2_pf_miss_l2_hit_l3.l1_dc_hwpf", + "EventCode": "0x71", + "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 cache but hit in the L3 cache and are generated from L1 data hardware prefetchers.", + "UMask": "0xe0" + }, + { + "EventName": "l2_pf_miss_l2_hit_l3.l1_dc_l2_hwpf", + "EventCode": "0x71", + "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 cache but hit in the L3 cache and are generated from L1 data and L2 hardware prefetchers.", + "UMask": "0xff" + }, + { + "EventName": "l2_pf_miss_l2_l3.l2_hwpf", + "EventCode": "0x72", + "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 as well as the L3 caches and are generated from L2 hardware prefetchers.", + "UMask": "0x1f" + }, + { + "EventName": "l2_pf_miss_l2_l3.l1_dc_hwpf", + "EventCode": "0x72", + "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 as well as the L3 caches and are generated from L1 data hardware prefetchers.", + "UMask": "0xe0" + }, + { + "EventName": "l2_pf_miss_l2_l3.l1_dc_l2_hwpf", + "EventCode": "0x72", + "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 as well as the L3 caches and are generated from L1 data and L2 hardware prefetchers.", + "UMask": "0xff" + }, + { + "EventName": "l2_fill_rsp_src.local_ccx", + "EventCode": "0x165", + "BriefDescription": "L2 cache fills where data is returned from L3 cache or different L2 cache in the same CCX.", + "UMask": "0x02" + }, + { + "EventName": "l2_fill_rsp_src.near_cache", + "EventCode": "0x165", + "BriefDescription": "L2 cache fills where data is returned from cache of another CCX in the same NUMA node.", + "UMask": "0x04" + }, + { + "EventName": "l2_fill_rsp_src.dram_io_near", + "EventCode": "0x165", + "BriefDescription": "L2 cache fills where data is returned from either DRAM or MMIO in the same NUMA node.", + "UMask": "0x08" + }, + { + "EventName": "l2_fill_rsp_src.far_cache", + "EventCode": "0x165", + "BriefDescription": "L2 cache fills where data is returned from cache of another CCX in a different NUMA node.", + "UMask": "0x10" + }, + { + "EventName": "l2_fill_rsp_src.dram_io_far", + "EventCode": "0x165", + "BriefDescription": "L2 cache fills where data is returned from either DRAM or MMIO in a different NUMA node.", + "UMask": "0x40" + }, + { + "EventName": "l2_fill_rsp_src.dram_io_all", + "EventCode": "0x165", + "BriefDescription": "L2 cache fills where data is returned from either DRAM or MMIO in the same or a different NUMA node.", + "UMask": "0x48" + }, + { + "EventName": "l2_fill_rsp_src.far_all", + "EventCode": "0x165", + "BriefDescription": "L2 cache fills where data is returned from either cache of another CCX, DRAM or MMIO in a different NUMA node.", + "UMask": "0x50" + }, + { + "EventName": "l2_fill_rsp_src.alt_mem", + "EventCode": "0x165", + "BriefDescription": "L2 cache fills where data is returned from extension memory (CXL).", + "UMask": "0x80" + }, + { + "EventName": "l2_fill_rsp_src.all", + "EventCode": "0x165", + "BriefDescription": "L2 cache fills where data is returned from all types of sources.", + "UMask": "0xde" + }, + { + "EventName": "l2_sys_bw.local_dram_fill", + "EventCode": "0x175", + "BriefDescription": "System bandwidth utilization for fill events that target the same NUMA node and return from DRAM in the same NUMA node.", + "UMask": "0x01" + }, + { + "EventName": "l2_sys_bw.remote_dram_fill", + "EventCode": "0x175", + "BriefDescription": "System bandwidth utilization for fill events that target a different NUMA node and return from DRAM in a different NUMA node.", + "UMask": "0x02" + }, + { + "EventName": "l2_sys_bw.nt_write", + "EventCode": "0x175", + "BriefDescription": "System bandwidth utilization for non-temporal write events that target all NUMA nodes.", + "UMask": "0x04" + }, + { + "EventName": "l2_sys_bw.local_scm_fill", + "EventCode": "0x175", + "BriefDescription": "System bandwidth utilization for fill events that target the same NUMA node and return from extension memory (CXL) in the same NUMA node.", + "UMask": "0x10" + }, + { + "EventName": "l2_sys_bw.remote_scm_fill", + "EventCode": "0x175", + "BriefDescription": "System bandwidth utilization for fill events that target a different NUMA node and return from extension memory (CXL) in a different NUMA node.", + "UMask": "0x20" + }, + { + "EventName": "l2_sys_bw.victim", + "EventCode": "0x175", + "BriefDescription": "System bandwidth utilization for cache victim events that target all NUMA nodes.", + "UMask": "0x40" + }, + { + "EventName": "l2_sys_bw.all", + "EventCode": "0x175", + "BriefDescription": "System bandwidth utilization for all types of events (total utilization).", + "UMask": "0xff" + } +] diff --git a/tools/perf/pmu-events/arch/x86/amdzen6/load-store.json b/tools/perf/pmu-events/arch/x86/amdzen6/load-store.json new file mode 100644 index 000000000000..4291eb59426f --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/amdzen6/load-store.json @@ -0,0 +1,523 @@ +[ + { + "EventName": "ls_bad_status2.stli_other", + "EventCode": "0x24", + "BriefDescription": "Store-to-load conflicts (loads unable to complete due to a non-forwardable conflict with an older store).", + "UMask": "0x02" + }, + { + "EventName": "ls_locks.bus_lock", + "EventCode": "0x25", + "BriefDescription": "Retired lock instructions which caused a bus lock (non-cacheable or cache-misaligned lock).", + "UMask": "0x01" + }, + { + "EventName": "ls_locks.all", + "EventCode": "0x25", + "BriefDescription": "Retired lock instructions of all types.", + "UMask": "0x1f" + }, + { + "EventName": "ls_ret_cl_flush", + "EventCode": "0x26", + "BriefDescription": "Retired CLFLUSH instructions." + }, + { + "EventName": "ls_ret_cpuid", + "EventCode": "0x27", + "BriefDescription": "Retired CPUID instructions." + }, + { + "EventName": "ls_dispatch.pure_ld", + "EventCode": "0x29", + "BriefDescription": "Memory load operations dispatched to the load-store unit.", + "UMask": "0x01" + }, + { + "EventName": "ls_dispatch.pure_st", + "EventCode": "0x29", + "BriefDescription": "Memory store operations dispatched to the load-store unit.", + "UMask": "0x02" + }, + { + "EventName": "ls_dispatch.ld_st", + "EventCode": "0x29", + "BriefDescription": "Memory load-store operations (load from and store to the same memory address) dispatched to the load-store unit.", + "UMask": "0x04" + }, + { + "EventName": "ls_dispatch.all", + "EventCode": "0x29", + "BriefDescription": "Memory operations dispatched to the load-store unit of all types.", + "UMask": "0x07" + }, + { + "EventName": "ls_smi_rx", + "EventCode": "0x2b", + "BriefDescription": "System Management Interrupts (SMIs) received." + }, + { + "EventName": "ls_int_taken", + "EventCode": "0x2c", + "BriefDescription": "Interrupts taken." + }, + { + "EventName": "ls_stlf", + "EventCode": "0x35", + "BriefDescription": "Store-to-load-forward (STLF) hits." + }, + { + "EventName": "ls_st_commit_cancel.older_st_vis_dep", + "EventCode": "0x37", + "BriefDescription": "Store commits cancelled due to an older store, that the thread was waiting on to become globally visible, was unable to become globally visible.", + "UMask": "0x01" + }, + { + "EventName": "ls_mab_alloc.ls", + "EventCode": "0x41", + "BriefDescription": "Miss Address Buffer (MAB) entries allocated by a Load-Store (LS) pipe for load-store allocations.", + "UMask": "0x07" + }, + { + "EventName": "ls_mab_alloc.hwpf", + "EventCode": "0x41", + "BriefDescription": "Miss Address Buffer (MAB) entries allocated by a Load-Store (LS) pipe for hardware prefetcher allocations.", + "UMask": "0x08" + }, + { + "EventName": "ls_mab_alloc.all", + "EventCode": "0x41", + "BriefDescription": "Miss Address Buffer (MAB) entries allocated by a Load-Store (LS) pipe for all types of allocations.", + "UMask": "0x0f" + }, + { + "EventName": "ls_dmnd_fills_from_sys.local_l2", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills where data is returned from local L2 cache.", + "UMask": "0x01" + }, + { + "EventName": "ls_dmnd_fills_from_sys.local_ccx", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills where data is returned from L3 cache or different L2 cache in the same CCX.", + "UMask": "0x02" + }, + { + "EventName": "ls_dmnd_fills_from_sys.local_all", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills where data is returned from local L2 cache, L3 cache or different L2 cache in the same CCX.", + "UMask": "0x03" + }, + { + "EventName": "ls_dmnd_fills_from_sys.near_cache", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills where data is returned from cache of another CCX in the same NUMA node.", + "UMask": "0x04" + }, + { + "EventName": "ls_dmnd_fills_from_sys.dram_io_near", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills where data is returned from either DRAM or MMIO in the same NUMA node.", + "UMask": "0x08" + }, + { + "EventName": "ls_dmnd_fills_from_sys.far_cache", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills where data is returned from cache of another CCX in a different NUMA node.", + "UMask": "0x10" + }, + { + "EventName": "ls_dmnd_fills_from_sys.remote_cache", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills where data is returned from cache of another CCX in the same or a different NUMA node.", + "UMask": "0x14" + }, + { + "EventName": "ls_dmnd_fills_from_sys.dram_io_far", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills where data is returned from either DRAM or MMIO in a different NUMA node.", + "UMask": "0x40" + }, + { + "EventName": "ls_dmnd_fills_from_sys.dram_io_all", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills where data is returned from either DRAM or MMIO in the same or a different NUMA node.", + "UMask": "0x48" + }, + { + "EventName": "ls_dmnd_fills_from_sys.far_all", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills where data is returned from either cache of another CCX, DRAM or MMIO in a different NUMA node.", + "UMask": "0x50" + }, + { + "EventName": "ls_dmnd_fills_from_sys.alt_mem", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills where data is returned from extension memory (CXL).", + "UMask": "0x80" + }, + { + "EventName": "ls_dmnd_fills_from_sys.all", + "EventCode": "0x43", + "BriefDescription": "Demand data cache fills where data is returned from all types of sources.", + "UMask": "0xdf" + }, + { + "EventName": "ls_any_fills_from_sys.local_l2", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills where data is returned from local L2 cache.", + "UMask": "0x01" + }, + { + "EventName": "ls_any_fills_from_sys.local_ccx", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills where data is returned from L3 cache or different L2 cache in the same CCX.", + "UMask": "0x02" + }, + { + "EventName": "ls_any_fills_from_sys.local_all", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills where data is returned from local L2 cache, L3 cache or different L2 cache in the same CCX.", + "UMask": "0x03" + }, + { + "EventName": "ls_any_fills_from_sys.near_cache", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills where data is returned from cache of another CCX in the same NUMA node.", + "UMask": "0x04" + }, + { + "EventName": "ls_any_fills_from_sys.dram_io_near", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills where data is returned from either DRAM or MMIO in the same NUMA node.", + "UMask": "0x08" + }, + { + "EventName": "ls_any_fills_from_sys.far_cache", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills where data is returned from cache of another CCX in a different NUMA node.", + "UMask": "0x10" + }, + { + "EventName": "ls_any_fills_from_sys.remote_cache", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills where data is returned from cache of another CCX in the same or a different NUMA node.", + "UMask": "0x14" + }, + { + "EventName": "ls_any_fills_from_sys.dram_io_far", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills where data is returned from either DRAM or MMIO in a different NUMA node.", + "UMask": "0x40" + }, + { + "EventName": "ls_any_fills_from_sys.dram_io_all", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills where data is returned from either DRAM or MMIO in the same or a different NUMA node.", + "UMask": "0x48" + }, + { + "EventName": "ls_any_fills_from_sys.far_all", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills where data is returned from either cache of another CCX, DRAM or MMIO when the address was in a different NUMA node.", + "UMask": "0x50" + }, + { + "EventName": "ls_any_fills_from_sys.alt_mem", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills where data is returned from extension memory (CXL).", + "UMask": "0x80" + }, + { + "EventName": "ls_any_fills_from_sys.all", + "EventCode": "0x44", + "BriefDescription": "Any data cache fills where data is returned from all types of data sources.", + "UMask": "0xff" + }, + { + "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_hit", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses with L2 DTLB hits for 4k pages.", + "UMask": "0x01" + }, + { + "EventName": "ls_l1_d_tlb_miss.tlb_reload_coalesced_page_hit", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses with L2 DTLB hits for coalesced pages (16k pages created from four adjacent 4k pages).", + "UMask": "0x02" + }, + { + "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_hit", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses with L2 DTLB hits for 2M pages.", + "UMask": "0x04" + }, + { + "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_hit", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses with L2 DTLB hits for 1G pages.", + "UMask": "0x08" + }, + { + "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_miss", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses with L2 DTLB misses (page-table walks requested) for 4k pages.", + "UMask": "0x10" + }, + { + "EventName": "ls_l1_d_tlb_miss.tlb_reload_coalesced_page_miss", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses with L2 DTLB misses (page-table walks requested) for coalesced pages (16k pages created from four adjacent 4k pages).", + "UMask": "0x20" + }, + { + "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_miss", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses with L2 DTLB misses (page-table walks requested) for 2M pages.", + "UMask": "0x40" + }, + { + "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_miss", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses with L2 DTLB misses (page-table walks requested) for 1G pages.", + "UMask": "0x80" + }, + { + "EventName": "ls_l1_d_tlb_miss.l2_miss_all", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses with L2 DTLB misses (page-table walks requested) for all page sizes.", + "UMask": "0xf0" + }, + { + "EventName": "ls_l1_d_tlb_miss.all", + "EventCode": "0x45", + "BriefDescription": "L1 DTLB misses for all page sizes.", + "UMask": "0xff" + }, + { + "EventName": "ls_misal_loads.ma64", + "EventCode": "0x47", + "BriefDescription": "64B misaligned (cacheline crossing) loads.", + "UMask": "0x01" + }, + { + "EventName": "ls_misal_loads.ma4k", + "EventCode": "0x47", + "BriefDescription": "4kB misaligned (page crossing) loads.", + "UMask": "0x02" + }, + { + "EventName": "ls_pref_instr_disp.prefetch", + "EventCode": "0x4b", + "BriefDescription": "Software prefetch instructions dispatched (speculative) of type PrefetchT0 (move data to all cache levels), T1 (move data to all cache levels except L1) and T2 (move data to all cache levels except L1 and L2).", + "UMask": "0x01" + }, + { + "EventName": "ls_pref_instr_disp.prefetch_w", + "EventCode": "0x4b", + "BriefDescription": "Software prefetch instructions dispatched (speculative) of type PrefetchW (move data to L1 cache and mark it modifiable).", + "UMask": "0x02" + }, + { + "EventName": "ls_pref_instr_disp.prefetch_nta", + "EventCode": "0x4b", + "BriefDescription": "Software prefetch instructions dispatched (speculative) of type PrefetchNTA (move data with minimum cache pollution i.e. non-temporal access).", + "UMask": "0x04" + }, + { + "EventName": "ls_pref_instr_disp.all", + "EventCode": "0x4b", + "BriefDescription": "Software prefetch instructions dispatched (speculative) of all types.", + "UMask": "0x07" + }, + { + "EventName": "wcb_close.full_line_64b", + "EventCode": "0x50", + "BriefDescription": "Events that caused a Write Combining Buffer (WCB) entry to close because all 64 bytes of the entry have been written to.", + "UMask": "0x01" + }, + { + "EventName": "ls_inef_sw_pref.dc_hit", + "EventCode": "0x52", + "BriefDescription": "Software prefetches that did not fetch data outside of the processor core as the PREFETCH instruction saw a data cache hit.", + "UMask": "0x01" + }, + { + "EventName": "ls_inef_sw_pref.mab_hit", + "EventCode": "0x52", + "BriefDescription": "Software prefetches that did not fetch data outside of the processor core as the PREFETCH instruction saw a match on an already allocated miss request (MAB).", + "UMask": "0x02" + }, + { + "EventName": "ls_inef_sw_pref.all", + "EventCode": "0x52", + "BriefDescript6ion": "Software prefetches that did not fetch data outside of the processor core for any reason.", + "UMask": "0x03" + }, + { + "EventName": "ls_sw_pf_dc_fills.local_l2", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills where data is returned from local L2 cache.", + "UMask": "0x01" + }, + { + "EventName": "ls_sw_pf_dc_fills.local_ccx", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills where data is returned from L3 cache or different L2 cache in the same CCX.", + "UMask": "0x02" + }, + { + "EventName": "ls_sw_pf_dc_fills.local_all", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills where data is returned from local L2 cache, L3 cache or different L2 cache in the same CCX.", + "UMask": "0x03" + }, + { + "EventName": "ls_sw_pf_dc_fills.near_cache", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills where data is returned from cache of another CCX in the same NUMA node.", + "UMask": "0x04" + }, + { + "EventName": "ls_sw_pf_dc_fills.dram_io_near", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills where data is returned from either DRAM or MMIO in the same NUMA node.", + "UMask": "0x08" + }, + { + "EventName": "ls_sw_pf_dc_fills.far_cache", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills where data is returned from cache of another CCX in a different NUMA node.", + "UMask": "0x10" + }, + { + "EventName": "ls_sw_pf_dc_fills.remote_cache", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills where data is returned from cache of another CCX in the same or a different NUMA node.", + "UMask": "0x14" + }, + { + "EventName": "ls_sw_pf_dc_fills.dram_io_far", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills where data is returned from either DRAM or MMIO in a different NUMA node.", + "UMask": "0x40" + }, + { + "EventName": "ls_sw_pf_dc_fills.dram_io_all", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills where data is returned from either DRAM or MMIO in the same or a different NUMA node.", + "UMask": "0x48" + }, + { + "EventName": "ls_sw_pf_dc_fills.far_all", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills where data is returned from either cache of another CCX, DRAM or MMIO in a different NUMA node.", + "UMask": "0x50" + }, + { + "EventName": "ls_sw_pf_dc_fills.alt_mem", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills where data is returned from extension memory (CXL).", + "UMask": "0x80" + }, + { + "EventName": "ls_sw_pf_dc_fills.all", + "EventCode": "0x59", + "BriefDescription": "Software prefetch data cache fills where data is returned from all types of data sources.", + "UMask": "0xdf" + }, + { + "EventName": "ls_hw_pf_dc_fills.local_l2", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills where data is returned from local L2 cache.", + "UMask": "0x01" + }, + { + "EventName": "ls_hw_pf_dc_fills.local_ccx", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills where data is returned from L3 cache or different L2 cache in the same CCX.", + "UMask": "0x02" + }, + { + "EventName": "ls_hw_pf_dc_fills.local_all", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills where data is returned from local L2 cache, L3 cache or different L2 cache in the same CCX.", + "UMask": "0x03" + }, + { + "EventName": "ls_hw_pf_dc_fills.near_cache", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills where data is returned from cache of another CCX in the same NUMA node.", + "UMask": "0x04" + }, + { + "EventName": "ls_hw_pf_dc_fills.dram_io_near", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills where data is returned from either DRAM or MMIO in the same NUMA node.", + "UMask": "0x08" + }, + { + "EventName": "ls_hw_pf_dc_fills.far_cache", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills where data is returned from cache of another CCX in a different NUMA node.", + "UMask": "0x10" + }, + { + "EventName": "ls_hw_pf_dc_fills.remote_cache", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills where data is returned from cache of another CCX in the same or a different NUMA node.", + "UMask": "0x14" + }, + { + "EventName": "ls_hw_pf_dc_fills.dram_io_far", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills where data is returned from either DRAM or MMIO in a different NUMA node.", + "UMask": "0x40" + }, + { + "EventName": "ls_hw_pf_dc_fills.dram_io_all", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills where data is returned from either DRAM or MMIO in the same or a different NUMA node.", + "UMask": "0x48" + }, + { + "EventName": "ls_hw_pf_dc_fills.far_all", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills where data is returned from either cache of another CCX, DRAM or MMIO in a different NUMA node.", + "UMask": "0x50" + }, + { + "EventName": "ls_hw_pf_dc_fills.alt_mem", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills where data is returned from extension memory (CXL).", + "UMask": "0x80" + }, + { + "EventName": "ls_hw_pf_dc_fills.all", + "EventCode": "0x5a", + "BriefDescription": "Hardware prefetch data cache fills where data is returned from all types of data sources.", + "UMask": "0xdf" + }, + { + "EventName": "ls_alloc_mab_count", + "EventCode": "0x5f", + "BriefDescription": "In-flight L1 data cache misses i.e. Miss Address Buffer (MAB) allocations each cycle." + }, + { + "EventName": "ls_not_halted_cyc", + "EventCode": "0x76", + "BriefDescription": "Core cycles where the thread is not in halted state." + }, + { + "EventName": "ls_tlb_flush.all", + "EventCode": "0x78", + "BriefDescription": "All TLB flushes.", + "UMask": "0xff" + }, + { + "EventName": "ls_not_halted_p0_cyc.p0_freq_cyc", + "EventCode": "0x120", + "BriefDescription": "Reference cycles (P0 frequency) where the thread is not in halted state.", + "UMask": "0x1" + } +] -- Gitee From 227f2e44106269f3e2bfe27e882b317770f9516e Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Thu, 8 Jan 2026 13:22:15 +0530 Subject: [PATCH 132/231] perf vendor events amd: Add Zen 6 uncore events commit de18394f8f69e4cb86e1561f3dd86e9f724b8f25 upstream Add uncore events taken from Section 1.6 "L3 Cache Performance Monitor Counters" and Section 2.2 "UMC Performance Monitor Events" of the Performance Monitor Counters for AMD Family 1Ah Model 50h-57h Processors document available at the link below. This constitutes events which capture L3 cache and UMC command activity. Reviewed-by: Ian Rogers Signed-off-by: Sandipan Das Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ananth Narayan Cc: Caleb Biggers Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Stephane Eranian Link: https://bugzilla.kernel.org/attachment.cgi?id=309149 Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: priyanka-mani Signed-off-by: PvsNarasimha --- .../pmu-events/arch/x86/amdzen6/l3-cache.json | 177 ++++++++++++++++++ .../arch/x86/amdzen6/memory-controller.json | 101 ++++++++++ 2 files changed, 278 insertions(+) create mode 100644 tools/perf/pmu-events/arch/x86/amdzen6/l3-cache.json create mode 100644 tools/perf/pmu-events/arch/x86/amdzen6/memory-controller.json diff --git a/tools/perf/pmu-events/arch/x86/amdzen6/l3-cache.json b/tools/perf/pmu-events/arch/x86/amdzen6/l3-cache.json new file mode 100644 index 000000000000..9b9804317da7 --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/amdzen6/l3-cache.json @@ -0,0 +1,177 @@ +[ + { + "EventName": "l3_lookup_state.l3_miss", + "EventCode": "0x04", + "BriefDescription": "L3 cache misses.", + "UMask": "0x01", + "Unit": "L3PMC" + }, + { + "EventName": "l3_lookup_state.l3_hit", + "EventCode": "0x04", + "BriefDescription": "L3 cache hits.", + "UMask": "0xfe", + "Unit": "L3PMC" + }, + { + "EventName": "l3_lookup_state.all_coherent_accesses_to_l3", + "EventCode": "0x04", + "BriefDescription": "L3 cache requests for all coherent accesses.", + "UMask": "0xff", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency.dram_near", + "EventCode": "0xac", + "BriefDescription": "Average sampled latency for L3 requests where data is returned from DRAM in the same NUMA node.", + "UMask": "0x01", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency.dram_far", + "EventCode": "0xac", + "BriefDescription": "Average sampled latency for L3 requests where data is returned from DRAM in a different NUMA node.", + "UMask": "0x02", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency.near_cache", + "EventCode": "0xac", + "BriefDescription": "Average sampled latency for L3 requests where data is returned from cache of another CCX in the same NUMA node.", + "UMask": "0x04", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency.far_cache", + "EventCode": "0xac", + "BriefDescription": "Average sampled latency for L3 requests where data is returned from cache of another CCX in a different NUMA node.", + "UMask": "0x08", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency.ext_near", + "EventCode": "0xac", + "BriefDescription": "Average sampled latency for L3 requests where data is returned from extension memory (CXL) in the same NUMA node.", + "UMask": "0x10", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency.ext_far", + "EventCode": "0xac", + "BriefDescription": "Average sampled latency for L3 requests where data is returned from extension memory (CXL) in a different NUMA node.", + "UMask": "0x20", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency.all", + "EventCode": "0xac", + "BriefDescription": "Average sampled latency for L3 requests where data is returned from all types of sources.", + "UMask": "0x3f", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency_requests.dram_near", + "EventCode": "0xad", + "BriefDescription": "Average sampled L3 requests where data is returned from DRAM in the same NUMA node.", + "UMask": "0x01", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency_requests.dram_far", + "EventCode": "0xad", + "BriefDescription": "Average sampled L3 requests where data is returned from DRAM in a different NUMA node.", + "UMask": "0x02", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency_requests.near_cache", + "EventCode": "0xad", + "BriefDescription": "Average sampled L3 requests where data is returned from cache of another CCX in the same NUMA node.", + "UMask": "0x04", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency_requests.far_cache", + "EventCode": "0xad", + "BriefDescription": "Average sampled L3 requests where data is returned from cache of another CCX in a different NUMA node.", + "UMask": "0x08", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency_requests.ext_near", + "EventCode": "0xad", + "BriefDescription": "Average sampled L3 requests where data is returned from extension memory (CXL) in the same NUMA node.", + "UMask": "0x10", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency_requests.ext_far", + "EventCode": "0xad", + "BriefDescription": "Average sampled L3 requests where data is returned from extension memory (CXL) in a different NUMA node.", + "UMask": "0x20", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + }, + { + "EventName": "l3_xi_sampled_latency_requests.all", + "EventCode": "0xad", + "BriefDescription": "Average sampled L3 requests where data is returned from all types of sources.", + "UMask": "0x3f", + "EnAllCores": "0x1", + "EnAllSlices": "0x1", + "SliceId": "0x3", + "ThreadMask": "0x3", + "Unit": "L3PMC" + } +] diff --git a/tools/perf/pmu-events/arch/x86/amdzen6/memory-controller.json b/tools/perf/pmu-events/arch/x86/amdzen6/memory-controller.json new file mode 100644 index 000000000000..649a60b09e1b --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/amdzen6/memory-controller.json @@ -0,0 +1,101 @@ +[ + { + "EventName": "umc_mem_clk", + "PublicDescription": "Memory clock (MEMCLK) cycles.", + "EventCode": "0x00", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_act_cmd.all", + "PublicDescription": "ACTIVATE commands sent.", + "EventCode": "0x05", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_act_cmd.rd", + "PublicDescription": "ACTIVATE commands sent for reads.", + "EventCode": "0x05", + "RdWrMask": "0x1", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_act_cmd.wr", + "PublicDescription": "ACTIVATE commands sent for writes.", + "EventCode": "0x05", + "RdWrMask": "0x2", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_pchg_cmd.all", + "PublicDescription": "PRECHARGE commands sent.", + "EventCode": "0x06", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_pchg_cmd.rd", + "PublicDescription": "PRECHARGE commands sent for reads.", + "EventCode": "0x06", + "RdWrMask": "0x1", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_pchg_cmd.wr", + "PublicDescription": "PRECHARGE commands sent for writes.", + "EventCode": "0x06", + "RdWrMask": "0x2", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_cas_cmd.all", + "PublicDescription": "CAS commands sent.", + "EventCode": "0x0a", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_cas_cmd.rd", + "PublicDescription": "CAS commands sent for reads.", + "EventCode": "0x0a", + "RdWrMask": "0x1", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_cas_cmd.wr", + "PublicDescription": "CAS commands sent for writes.", + "EventCode": "0x0a", + "RdWrMask": "0x2", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_data_slot_clks.all", + "PublicDescription": "Clock cycles where the data bus is utilized.", + "EventCode": "0x14", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_data_slot_clks.rd", + "PublicDescription": "Clock cycles where the data bus is utilized for reads.", + "EventCode": "0x14", + "RdWrMask": "0x1", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_data_slot_clks.wr", + "PublicDescription": "Clock cycles where the data bus is utilized for writes.", + "EventCode": "0x14", + "RdWrMask": "0x2", + "PerPkg": "1", + "Unit": "UMCPMC" + } +] -- Gitee From a01627ad3c303cd9cd700f3096384b86188cb501 Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Thu, 8 Jan 2026 13:22:16 +0530 Subject: [PATCH 133/231] perf vendor events amd: Add Zen 6 metrics commit d0a3df886d777180322a254176c40fd4a4a23cbe upstream Add metrics taken from Section 1.2 "Performance Measurement" of the Performance Monitor Counters for AMD Family 1Ah Model 50h-57h Processors document available at the link below. The recommended metrics are sourced from Table 1 "Guidance for Common Performance Statistics with Complex Event Selects". The pipeline utilization metrics are sourced from Table 2 "Guidance for Pipeline Utilization Analysis Statistics". These are useful for finding performance bottlenecks by analyzing activity at different stages of the pipeline. There are metric groups available for Level 1 and Level 2 analysis. Reviewed-by: Ian Rogers Signed-off-by: Sandipan Das Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ananth Narayan Cc: Caleb Biggers Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Stephane Eranian Link: https://bugzilla.kernel.org/attachment.cgi?id=309149 Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: priyanka-mani Signed-off-by: PvsNarasimha --- .../pmu-events/arch/x86/amdzen6/pipeline.json | 99 +++++ .../arch/x86/amdzen6/recommended.json | 339 ++++++++++++++++++ 2 files changed, 438 insertions(+) create mode 100644 tools/perf/pmu-events/arch/x86/amdzen6/pipeline.json create mode 100644 tools/perf/pmu-events/arch/x86/amdzen6/recommended.json diff --git a/tools/perf/pmu-events/arch/x86/amdzen6/pipeline.json b/tools/perf/pmu-events/arch/x86/amdzen6/pipeline.json new file mode 100644 index 000000000000..48c501d8a097 --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/amdzen6/pipeline.json @@ -0,0 +1,99 @@ +[ + { + "MetricName": "total_dispatch_slots", + "BriefDescription": "Total dispatch slots (up to 8 instructions can be dispatched in each cycle).", + "MetricExpr": "8 * ls_not_halted_cyc", + "ScaleUnit": "1slots" + }, + { + "MetricName": "frontend_bound", + "BriefDescription": "Percentage of dispatch slots that remained unused because the frontend did not supply enough instructions/ops.", + "MetricExpr": "d_ratio(de_no_dispatch_per_slot.no_ops_from_frontend, total_dispatch_slots)", + "MetricGroup": "PipelineL1", + "ScaleUnit": "100%slots" + }, + { + "MetricName": "bad_speculation", + "BriefDescription": "Percentage of dispatched ops that did not retire.", + "MetricExpr": "d_ratio(de_src_op_disp.all - ex_ret_ops, total_dispatch_slots)", + "MetricGroup": "PipelineL1", + "ScaleUnit": "100%ops" + }, + { + "MetricName": "backend_bound", + "BriefDescription": "Percentage of dispatch slots that remained unused because of backend stalls.", + "MetricExpr": "d_ratio(de_no_dispatch_per_slot.backend_stalls, total_dispatch_slots)", + "MetricGroup": "PipelineL1", + "ScaleUnit": "100%slots" + }, + { + "MetricName": "smt_contention", + "BriefDescription": "Percentage of dispatch slots that remained unused because the other thread was selected.", + "MetricExpr": "d_ratio(de_no_dispatch_per_slot.smt_contention, total_dispatch_slots)", + "MetricGroup": "PipelineL1", + "ScaleUnit": "100%slots" + }, + { + "MetricName": "retiring", + "BriefDescription": "Percentage of dispatch slots used by ops that retired.", + "MetricExpr": "d_ratio(ex_ret_ops, total_dispatch_slots)", + "MetricGroup": "PipelineL1", + "ScaleUnit": "100%slots" + }, + { + "MetricName": "frontend_bound_by_latency", + "BriefDescription": "Percentage of dispatch slots that remained unused because of a latency bottleneck in the frontend (such as instruction cache or TLB misses).", + "MetricExpr": "d_ratio((8 * cpu@de_no_dispatch_per_slot.no_ops_from_frontend\\,cmask\\=0x8@), total_dispatch_slots)", + "MetricGroup": "PipelineL2;frontend_bound_group", + "ScaleUnit": "100%slots" + }, + { + "MetricName": "frontend_bound_by_bandwidth", + "BriefDescription": "Percentage of dispatch slots that remained unused because of a bandwidth bottleneck in the frontend (such as decode or op cache fetch bandwidth).", + "MetricExpr": "d_ratio(de_no_dispatch_per_slot.no_ops_from_frontend - (8 * cpu@de_no_dispatch_per_slot.no_ops_from_frontend\\,cmask\\=0x8@), total_dispatch_slots)", + "MetricGroup": "PipelineL2;frontend_bound_group", + "ScaleUnit": "100%slots" + }, + { + "MetricName": "bad_speculation_from_mispredicts", + "BriefDescription": "Percentage of dispatched ops that were flushed due to branch mispredicts.", + "MetricExpr": "d_ratio(bad_speculation * ex_ret_brn_misp, ex_ret_brn_misp + bp_fe_redir.resync)", + "MetricGroup": "PipelineL2;bad_speculation_group", + "ScaleUnit": "100%ops" + }, + { + "MetricName": "bad_speculation_from_pipeline_restarts", + "BriefDescription": "Percentage of dispatched ops that were flushed due to pipeline restarts (resyncs).", + "MetricExpr": "d_ratio(bad_speculation * bp_fe_redir.resync, ex_ret_brn_misp + bp_fe_redir.resync)", + "MetricGroup": "PipelineL2;bad_speculation_group", + "ScaleUnit": "100%ops" + }, + { + "MetricName": "backend_bound_by_memory", + "BriefDescription": "Percentage of dispatch slots that remained unused because of stalls due to the memory subsystem.", + "MetricExpr": "backend_bound * d_ratio(ex_no_retire.load_not_complete, ex_no_retire.not_complete)", + "MetricGroup": "PipelineL2;backend_bound_group", + "ScaleUnit": "100%slots" + }, + { + "MetricName": "backend_bound_by_cpu", + "BriefDescription": "Percentage of dispatch slots that remained unused because of stalls not related to the memory subsystem.", + "MetricExpr": "backend_bound * (1 - d_ratio(ex_no_retire.load_not_complete, ex_no_retire.not_complete))", + "MetricGroup": "PipelineL2;backend_bound_group", + "ScaleUnit": "100%slots" + }, + { + "MetricName": "retiring_from_fastpath", + "BriefDescription": "Percentage of dispatch slots used by fastpath ops that retired.", + "MetricExpr": "retiring * (1 - d_ratio(ex_ret_ucode_ops, ex_ret_ops))", + "MetricGroup": "PipelineL2;retiring_group", + "ScaleUnit": "100%slots" + }, + { + "MetricName": "retiring_from_microcode", + "BriefDescription": "Percentage of dispatch slots used by microcode ops that retired.", + "MetricExpr": "retiring * d_ratio(ex_ret_ucode_ops, ex_ret_ops)", + "MetricGroup": "PipelineL2;retiring_group", + "ScaleUnit": "100%slots" + } +] diff --git a/tools/perf/pmu-events/arch/x86/amdzen6/recommended.json b/tools/perf/pmu-events/arch/x86/amdzen6/recommended.json new file mode 100644 index 000000000000..2849a8c159f6 --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/amdzen6/recommended.json @@ -0,0 +1,339 @@ +[ + { + "MetricName": "branch_misprediction_rate", + "BriefDescription": "Execution-time branch misprediction rate (non-speculative).", + "MetricExpr": "d_ratio(ex_ret_brn_misp, ex_ret_brn)", + "MetricGroup": "branch_prediction", + "ScaleUnit": "1per_branch" + }, + { + "MetricName": "all_data_cache_accesses_pti", + "BriefDescription": "All data cache accesses per thousand instructions.", + "MetricExpr": "ls_dispatch.all / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "all_l2_cache_accesses_pti", + "BriefDescription": "All L2 cache accesses per thousand instructions.", + "MetricExpr": "(l2_request_g1.no_pf_all + l2_pf_hit_l2.l2_hwpf + l2_pf_miss_l2_hit_l3.l2_hwpf + l2_pf_miss_l2_l3.l2_hwpf) / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_cache_accesses_from_l1_ic_misses_pti", + "BriefDescription": "L2 cache accesses from L1 instruction cache misses (including prefetch) per thousand instructions.", + "MetricExpr": "l2_request_g1.cacheable_ic_read / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_cache_accesses_from_l1_dc_misses_pti", + "BriefDescription": "L2 cache accesses from L1 data cache misses (including prefetch) per thousand instructions.", + "MetricExpr": "l2_request_g1.dc_all / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_cache_accesses_from_l2_hwpf_pti", + "BriefDescription": "L2 cache accesses from L2 cache hardware prefetcher per thousand instructions.", + "MetricExpr": "(l2_pf_hit_l2.l1_dc_l2_hwpf + l2_pf_miss_l2_hit_l3.l1_dc_l2_hwpf + l2_pf_miss_l2_l3.l1_dc_l2_hwpf) / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "all_l2_cache_misses_pti", + "BriefDescription": "All L2 cache misses per thousand instructions.", + "MetricExpr": "(l2_cache_req_stat.ic_dc_miss_in_l2 + l2_pf_miss_l2_hit_l3.l2_hwpf + l2_pf_miss_l2_l3.l2_hwpf) / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_cache_misses_from_l1_ic_miss_pti", + "BriefDescription": "L2 cache misses from L1 instruction cache misses per thousand instructions.", + "MetricExpr": "l2_cache_req_stat.ic_fill_miss / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_cache_misses_from_l1_dc_miss_pti", + "BriefDescription": "L2 cache misses from L1 data cache misses per thousand instructions.", + "MetricExpr": "l2_cache_req_stat.ls_rd_blk_c / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_cache_misses_from_l2_hwpf_pti", + "BriefDescription": "L2 cache misses from L2 cache hardware prefetcher per thousand instructions.", + "MetricExpr": "(l2_pf_miss_l2_hit_l3.l1_dc_l2_hwpf + l2_pf_miss_l2_l3.l1_dc_l2_hwpf) / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "all_l2_cache_hits_pti", + "BriefDescription": "All L2 cache hits per thousand instructions.", + "MetricExpr": "(l2_cache_req_stat.ic_dc_hit_in_l2 + l2_pf_hit_l2.l2_hwpf) / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_cache_hits_from_l1_ic_miss_pti", + "BriefDescription": "L2 cache hits from L1 instruction cache misses per thousand instructions.", + "MetricExpr": "l2_cache_req_stat.ic_hit_in_l2 / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_cache_hits_from_l1_dc_miss_pti", + "BriefDescription": "L2 cache hits from L1 data cache misses per thousand instructions.", + "MetricExpr": "l2_cache_req_stat.dc_hit_in_l2 / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_cache_hits_from_l2_hwpf_pti", + "BriefDescription": "L2 cache hits from L2 cache hardware prefetcher per thousand instructions.", + "MetricExpr": "l2_pf_hit_l2.l1_dc_l2_hwpf / instructions", + "MetricGroup": "l2_cache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l3_cache_accesses", + "BriefDescription": "L3 cache accesses.", + "MetricExpr": "l3_lookup_state.all_coherent_accesses_to_l3", + "MetricGroup": "l3_cache" + }, + { + "MetricName": "l3_misses", + "BriefDescription": "L3 misses (including cacheline state change requests).", + "MetricExpr": "l3_lookup_state.l3_miss", + "MetricGroup": "l3_cache" + }, + { + "MetricName": "l3_read_miss_latency", + "BriefDescription": "Average L3 read miss latency (in core clocks).", + "MetricExpr": "(l3_xi_sampled_latency.all * 10) / l3_xi_sampled_latency_requests.all", + "MetricGroup": "l3_cache", + "ScaleUnit": "1ns" + }, + { + "MetricName": "l3_read_miss_latency_for_local_dram", + "BriefDescription": "Average L3 read miss latency (in core clocks) for local DRAM.", + "MetricExpr": "(l3_xi_sampled_latency.dram_near * 10) / l3_xi_sampled_latency_requests.dram_near", + "MetricGroup": "l3_cache", + "ScaleUnit": "1ns" + }, + { + "MetricName": "l3_read_miss_latency_for_remote_dram", + "BriefDescription": "Average L3 read miss latency (in core clocks) for remote DRAM.", + "MetricExpr": "(l3_xi_sampled_latency.dram_far * 10) / l3_xi_sampled_latency_requests.dram_far", + "MetricGroup": "l3_cache", + "ScaleUnit": "1ns" + }, + { + "MetricName": "op_cache_fetch_miss_ratio", + "BriefDescription": "Op cache miss ratio for all fetches.", + "MetricExpr": "d_ratio(op_cache_hit_miss.miss, op_cache_hit_miss.all)", + "ScaleUnit": "100%" + }, + { + "MetricName": "l1_data_cache_fills_from_memory_pti", + "BriefDescription": "L1 data cache fills from DRAM or MMIO in any NUMA node per thousand instructions.", + "MetricExpr": "ls_any_fills_from_sys.dram_io_all / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_data_cache_fills_from_remote_node_pti", + "BriefDescription": "L1 data cache fills from a different NUMA node per thousand instructions.", + "MetricExpr": "ls_any_fills_from_sys.far_all / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_data_cache_fills_from_same_ccx_pti", + "BriefDescription": "L1 data cache fills from within the same CCX per thousand instructions.", + "MetricExpr": "ls_any_fills_from_sys.local_all / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_data_cache_fills_from_different_ccx_pti", + "BriefDescription": "L1 data cache fills from another CCX cache in any NUMA node per thousand instructions.", + "MetricExpr": "ls_any_fills_from_sys.remote_cache / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "all_l1_data_cache_fills_pti", + "BriefDescription": "All L1 data cache fills per thousand instructions.", + "MetricExpr": "ls_any_fills_from_sys.all / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_demand_data_cache_fills_from_local_l2_pti", + "BriefDescription": "L1 demand data cache fills from local L2 cache per thousand instructions.", + "MetricExpr": "ls_dmnd_fills_from_sys.local_l2 / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_demand_data_cache_fills_from_same_ccx_pti", + "BriefDescription": "L1 demand data cache fills from within the same CCX per thousand instructions.", + "MetricExpr": "ls_dmnd_fills_from_sys.local_ccx / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_demand_data_cache_fills_from_near_cache_pti", + "BriefDescription": "L1 demand data cache fills from another CCX cache in the same NUMA node per thousand instructions.", + "MetricExpr": "ls_dmnd_fills_from_sys.near_cache / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_demand_data_cache_fills_from_near_memory_pti", + "BriefDescription": "L1 demand data cache fills from DRAM or MMIO in the same NUMA node per thousand instructions.", + "MetricExpr": "ls_dmnd_fills_from_sys.dram_io_near / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_demand_data_cache_fills_from_far_cache_pti", + "BriefDescription": "L1 demand data cache fills from another CCX cache in a different NUMA node per thousand instructions.", + "MetricExpr": "ls_dmnd_fills_from_sys.far_cache / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_demand_data_cache_fills_from_far_memory_pti", + "BriefDescription": "L1 demand data cache fills from DRAM or MMIO in a different NUMA node per thousand instructions.", + "MetricExpr": "ls_dmnd_fills_from_sys.dram_io_far / instructions", + "MetricGroup": "l1_dcache", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_itlb_misses_pti", + "BriefDescription": "L1 instruction TLB misses per thousand instructions.", + "MetricExpr": "(bp_l1_tlb_miss_l2_tlb_hit + bp_l1_tlb_miss_l2_tlb_miss.all) / instructions", + "MetricGroup": "tlb", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_itlb_misses_pti", + "BriefDescription": "L2 instruction TLB misses and instruction page walks per thousand instructions.", + "MetricExpr": "bp_l1_tlb_miss_l2_tlb_miss.all / instructions", + "MetricGroup": "tlb", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l1_dtlb_misses_pti", + "BriefDescription": "L1 data TLB misses per thousand instructions.", + "MetricExpr": "ls_l1_d_tlb_miss.all / instructions", + "MetricGroup": "tlb", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "l2_dtlb_misses_pti", + "BriefDescription": "L2 data TLB misses and data page walks per thousand instructions.", + "MetricExpr": "ls_l1_d_tlb_miss.l2_miss_all / instructions", + "MetricGroup": "tlb", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "all_tlbs_flushed_pti", + "BriefDescription": "All TLBs flushed per thousand instructions.", + "MetricExpr": "ls_tlb_flush.all / instructions", + "MetricGroup": "tlb", + "ScaleUnit": "1e3per_1k_instr" + }, + { + "MetricName": "macro_ops_dispatched", + "BriefDescription": "Macro-ops dispatched.", + "MetricExpr": "de_src_op_disp.all", + "MetricGroup": "decoder" + }, + { + "MetricName": "sse_avx_stalls", + "BriefDescription": "Mixed SSE/AVX stalls.", + "MetricExpr": "fp_disp_faults.sse_avx_all" + }, + { + "MetricName": "macro_ops_retired", + "BriefDescription": "Macro-ops retired.", + "MetricExpr": "ex_ret_ops" + }, + { + "MetricName": "umc_data_bus_utilization", + "BriefDescription": "Memory controller data bus utilization.", + "MetricExpr": "d_ratio(umc_data_slot_clks.all / 2, umc_mem_clk)", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "100%" + }, + { + "MetricName": "umc_cas_cmd_rate", + "BriefDescription": "Memory controller CAS command rate.", + "MetricExpr": "d_ratio(umc_cas_cmd.all * 1000, umc_mem_clk)", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "1per_memclk" + }, + { + "MetricName": "umc_cas_cmd_read_ratio", + "BriefDescription": "Ratio of memory controller CAS commands for reads.", + "MetricExpr": "d_ratio(umc_cas_cmd.rd, umc_cas_cmd.all)", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "100%" + }, + { + "MetricName": "umc_cas_cmd_write_ratio", + "BriefDescription": "Ratio of memory controller CAS commands for writes.", + "MetricExpr": "d_ratio(umc_cas_cmd.wr, umc_cas_cmd.all)", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "100%" + }, + { + "MetricName": "umc_mem_read_bandwidth", + "BriefDescription": "Estimated memory read bandwidth.", + "MetricExpr": "(umc_cas_cmd.rd * 64) / 1e6 / duration_time", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "1MB/s" + }, + { + "MetricName": "umc_mem_write_bandwidth", + "BriefDescription": "Estimated memory write bandwidth.", + "MetricExpr": "(umc_cas_cmd.wr * 64) / 1e6 / duration_time", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "1MB/s" + }, + { + "MetricName": "umc_mem_bandwidth", + "BriefDescription": "Estimated combined memory bandwidth.", + "MetricExpr": "(umc_cas_cmd.all * 64) / 1e6 / duration_time", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "1MB/s" + }, + { + "MetricName": "umc_activate_cmd_rate", + "BriefDescription": "Memory controller ACTIVATE command rate.", + "MetricExpr": "d_ratio(umc_act_cmd.all * 1000, umc_mem_clk)", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "1per_memclk" + }, + { + "MetricName": "umc_precharge_cmd_rate", + "BriefDescription": "Memory controller PRECHARGE command rate.", + "MetricExpr": "d_ratio(umc_pchg_cmd.all * 1000, umc_mem_clk)", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "1per_memclk" + } +] -- Gitee From dbd48f50ce0eb0ef510b851cd5336d2d289fc67a Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Wed, 29 May 2024 16:32:09 +0200 Subject: [PATCH 134/231] PCI: pciehp: Detect device replacement during system sleep mainline inclusion from v6.11-rc1 commit 9d573d19547b3fae0c1d4e5fce52bdad3fda3664 upstream. category: feature ------------------- Ricky reports that replacing a device in a hotplug slot during ACPI sleep state S3 does not cause re-enumeration on resume, as one would expect. Instead, the new device is treated as if it was the old one. There is no bulletproof way to detect device replacement, but as a heuristic, check whether the device identity in config space matches cached data in struct pci_dev (Vendor ID, Device ID, Class Code, Revision ID, Subsystem Vendor ID, Subsystem ID). Additionally, cache and compare the Device Serial Number (PCIe r6.2 sec 7.9.3). If a mismatch is detected, mark the old device disconnected (to prevent its driver from accessing the new device) and synthesize a Presence Detect Changed event. The device identity in config space which is compared here is the same as the one included in the signed Subject Alternative Name per PCIe r6.1 sec 6.31.3. Thus, the present commit prevents attacks where a valid device is replaced with a malicious device during system sleep and the valid device's driver obliviously accesses the malicious device. This is about as much as can be done at the PCI layer. Drivers may have additional ways to identify devices (such as reading a WWID from some register) and may trigger re-enumeration when detecting an identity change on resume. Link: https://lore.kernel.org/r/a1afaa12f341d146ecbea27c1743661c71683833.1716992815.git.lukas@wunner.de Reported-by: Ricky Wu Closes: https://lore.kernel.org/r/a608b5930d0a48f092f717c0e137454b@realtek.com Tested-by: Ricky Wu Signed-off-by: Lukas Wunner Signed-off-by: Bjorn Helgaas --- drivers/pci/hotplug/pciehp.h | 4 +++ drivers/pci/hotplug/pciehp_core.c | 42 ++++++++++++++++++++++++++++++- drivers/pci/hotplug/pciehp_hpc.c | 5 ++++ drivers/pci/hotplug/pciehp_pci.c | 4 +++ 4 files changed, 54 insertions(+), 1 deletion(-) diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index e0a614acee05..273dd8c66f4e 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -46,6 +46,9 @@ extern int pciehp_poll_time; /** * struct controller - PCIe hotplug controller * @pcie: pointer to the controller's PCIe port service device + * @dsn: cached copy of Device Serial Number of Function 0 in the hotplug slot + * (PCIe r6.2 sec 7.9.3); used to determine whether a hotplugged device + * was replaced with a different one during system sleep * @slot_cap: cached copy of the Slot Capabilities register * @inband_presence_disabled: In-Band Presence Detect Disable supported by * controller and disabled per spec recommendation (PCIe r5.0, appendix I @@ -87,6 +90,7 @@ extern int pciehp_poll_time; */ struct controller { struct pcie_device *pcie; + u64 dsn; u32 slot_cap; /* capabilities and quirks */ unsigned int inband_presence_disabled:1; diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 4042d87d539d..c69e7e8d5618 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -283,6 +283,32 @@ static int pciehp_suspend(struct pcie_device *dev) return 0; } +static bool pciehp_device_replaced(struct controller *ctrl) +{ + struct pci_dev *pdev __free(pci_dev_put); + u32 reg; + + pdev = pci_get_slot(ctrl->pcie->port->subordinate, PCI_DEVFN(0, 0)); + if (!pdev) + return true; + + if (pci_read_config_dword(pdev, PCI_VENDOR_ID, ®) || + reg != (pdev->vendor | (pdev->device << 16)) || + pci_read_config_dword(pdev, PCI_CLASS_REVISION, ®) || + reg != (pdev->revision | (pdev->class << 8))) + return true; + + if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL && + (pci_read_config_dword(pdev, PCI_SUBSYSTEM_VENDOR_ID, ®) || + reg != (pdev->subsystem_vendor | (pdev->subsystem_device << 16)))) + return true; + + if (pci_get_dsn(pdev) != ctrl->dsn) + return true; + + return false; +} + static int pciehp_resume_noirq(struct pcie_device *dev) { struct controller *ctrl = get_service_data(dev); @@ -292,9 +318,23 @@ static int pciehp_resume_noirq(struct pcie_device *dev) ctrl->cmd_busy = true; /* clear spurious events from rediscovery of inserted card */ - if (ctrl->state == ON_STATE || ctrl->state == BLINKINGOFF_STATE) + if (ctrl->state == ON_STATE || ctrl->state == BLINKINGOFF_STATE) { pcie_clear_hotplug_events(ctrl); + /* + * If hotplugged device was replaced with a different one + * during system sleep, mark the old device disconnected + * (to prevent its driver from accessing the new device) + * and synthesize a Presence Detect Changed event. + */ + if (pciehp_device_replaced(ctrl)) { + ctrl_dbg(ctrl, "device replaced during system sleep\n"); + pci_walk_bus(ctrl->pcie->port->subordinate, + pci_dev_set_disconnected, NULL); + pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC); + } + } + return 0; } #endif diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index b0bccc4d0da2..c5b591830406 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -1056,6 +1056,11 @@ struct controller *pcie_init(struct pcie_device *dev) } } + pdev = pci_get_slot(subordinate, PCI_DEVFN(0, 0)); + if (pdev) + ctrl->dsn = pci_get_dsn(pdev); + pci_dev_put(pdev); + return ctrl; } diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c index ad12515a4a12..65e50bee1a8c 100644 --- a/drivers/pci/hotplug/pciehp_pci.c +++ b/drivers/pci/hotplug/pciehp_pci.c @@ -72,6 +72,10 @@ int pciehp_configure_device(struct controller *ctrl) pci_bus_add_devices(parent); down_read_nested(&ctrl->reset_lock, ctrl->depth); + dev = pci_get_slot(parent, PCI_DEVFN(0, 0)); + ctrl->dsn = pci_get_dsn(dev); + pci_dev_put(dev); + out: pci_unlock_rescan_remove(); return ret; -- Gitee From b6c296b7cab6425db37c1a43ef671651bb99c3be Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Tue, 22 Oct 2024 15:48:48 -0700 Subject: [PATCH 135/231] PCI: Make pci_destroy_dev() concurrent safe mainline inclusion from v6.13-rc1 commit e3f30d563a388220a7c4e3b9a7b52ac0b0324b26 upstream. category: feature ------------------- Use an atomic flag instead of the racy check against the device's kobj parent. We shouldn't be poking into device implementation details at this level anyway. Link: https://lore.kernel.org/r/20241022224851.340648-3-kbusch@meta.com Signed-off-by: Keith Busch Signed-off-by: Bjorn Helgaas Reviewed-by: Jonathan Cameron --- drivers/pci/pci.h | 6 ++++++ drivers/pci/remove.c | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index cac029e7140e..fbe22537f756 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -382,6 +382,7 @@ static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused) #define PCI_DEV_ADDED 0 #define PCI_DPC_RECOVERED 1 #define PCI_DPC_RECOVERING 2 +#define PCI_DEV_REMOVED 3 static inline void pci_dev_assign_added(struct pci_dev *dev, bool added) { @@ -393,6 +394,11 @@ static inline bool pci_dev_is_added(const struct pci_dev *dev) return test_bit(PCI_DEV_ADDED, &dev->priv_flags); } +static inline bool pci_dev_test_and_set_removed(struct pci_dev *dev) +{ + return test_and_set_bit(PCI_DEV_REMOVED, &dev->priv_flags); +} + #ifdef CONFIG_PCIEAER #include diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index d749ea8250d6..2b866742516a 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c @@ -30,7 +30,7 @@ static void pci_stop_dev(struct pci_dev *dev) static void pci_destroy_dev(struct pci_dev *dev) { - if (!dev->dev.kobj.parent) + if (pci_dev_test_and_set_removed(dev)) return; device_del(&dev->dev); -- Gitee From 6b3965e4fbf41a5ede5ae6d8f648602be2082918 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Thu, 10 Apr 2025 17:27:11 +0200 Subject: [PATCH 136/231] PCI: pciehp: Ignore Presence Detect Changed caused by DPC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit mainline inclusion from v6.16-rc1 commit c3be50f7547ccb533284b22f74baf37d3379843e upstream. category: feature ------------------- Commit a97396c6eb13 ("PCI: pciehp: Ignore Link Down/Up caused by DPC") amended PCIe hotplug to not bring down the slot upon Data Link Layer State Changed events caused by Downstream Port Containment. However Keith reports off-list that if the slot uses in-band presence detect (i.e. Presence Detect State is derived from Data Link Layer Link Active), DPC also causes a spurious Presence Detect Changed event. This needs to be ignored as well. Unfortunately there's no register indicating that in-band presence detect is used. PCIe r5.0 sec 7.5.3.10 introduced the In-Band PD Disable bit in the Slot Control Register. The PCIe hotplug driver sets this bit on ports supporting it. But older ports may still use in-band presence detect. If in-band presence detect can be disabled, Presence Detect Changed events occurring during DPC must not be ignored because they signal device replacement. On all other ports, device replacement cannot be detected reliably because the Presence Detect Changed event could be a side effect of DPC. On those (older) ports, perform a best-effort device replacement check by comparing the Vendor ID, Device ID and other data in Config Space with the values cached in struct pci_dev. Use the existing helper pciehp_device_replaced() to accomplish this. It is currently #ifdef'ed to CONFIG_PM_SLEEP in pciehp_core.c, so move it to pciehp_hpc.c where most other functions accessing config space reside. Reported-by: Keith Busch Signed-off-by: Lukas Wunner Signed-off-by: Bjorn Helgaas Reviewed-by: Kuppuswamy Sathyanarayanan Reviewed-by: Ilpo Järvinen Link: https://patch.msgid.link/fa264ff71952915c4e35a53c89eb0cde8455a5c5.1744298239.git.lukas@wunner.de --- drivers/pci/hotplug/pciehp.h | 1 + drivers/pci/hotplug/pciehp_core.c | 26 ---------------- drivers/pci/hotplug/pciehp_hpc.c | 49 ++++++++++++++++++++++++++----- 3 files changed, 43 insertions(+), 33 deletions(-) diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 273dd8c66f4e..debc79b0adfb 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -187,6 +187,7 @@ int pciehp_card_present(struct controller *ctrl); int pciehp_card_present_or_link_active(struct controller *ctrl); int pciehp_check_link_status(struct controller *ctrl); int pciehp_check_link_active(struct controller *ctrl); +bool pciehp_device_replaced(struct controller *ctrl); void pciehp_release_ctrl(struct controller *ctrl); int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot); diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index c69e7e8d5618..575403ef06db 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -283,32 +283,6 @@ static int pciehp_suspend(struct pcie_device *dev) return 0; } -static bool pciehp_device_replaced(struct controller *ctrl) -{ - struct pci_dev *pdev __free(pci_dev_put); - u32 reg; - - pdev = pci_get_slot(ctrl->pcie->port->subordinate, PCI_DEVFN(0, 0)); - if (!pdev) - return true; - - if (pci_read_config_dword(pdev, PCI_VENDOR_ID, ®) || - reg != (pdev->vendor | (pdev->device << 16)) || - pci_read_config_dword(pdev, PCI_CLASS_REVISION, ®) || - reg != (pdev->revision | (pdev->class << 8))) - return true; - - if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL && - (pci_read_config_dword(pdev, PCI_SUBSYSTEM_VENDOR_ID, ®) || - reg != (pdev->subsystem_vendor | (pdev->subsystem_device << 16)))) - return true; - - if (pci_get_dsn(pdev) != ctrl->dsn) - return true; - - return false; -} - static int pciehp_resume_noirq(struct pcie_device *dev) { struct controller *ctrl = get_service_data(dev); diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index c5b591830406..04ba0fac031b 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -559,18 +559,48 @@ void pciehp_power_off_slot(struct controller *ctrl) PCI_EXP_SLTCTL_PWR_OFF); } +bool pciehp_device_replaced(struct controller *ctrl) +{ + struct pci_dev *pdev __free(pci_dev_put) = NULL; + u32 reg; + + if (pci_dev_is_disconnected(ctrl->pcie->port)) + return false; + + pdev = pci_get_slot(ctrl->pcie->port->subordinate, PCI_DEVFN(0, 0)); + if (!pdev) + return true; + + if (pci_read_config_dword(pdev, PCI_VENDOR_ID, ®) || + reg != (pdev->vendor | (pdev->device << 16)) || + pci_read_config_dword(pdev, PCI_CLASS_REVISION, ®) || + reg != (pdev->revision | (pdev->class << 8))) + return true; + + if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL && + (pci_read_config_dword(pdev, PCI_SUBSYSTEM_VENDOR_ID, ®) || + reg != (pdev->subsystem_vendor | (pdev->subsystem_device << 16)))) + return true; + + if (pci_get_dsn(pdev) != ctrl->dsn) + return true; + + return false; +} + static void pciehp_ignore_dpc_link_change(struct controller *ctrl, - struct pci_dev *pdev, int irq) + struct pci_dev *pdev, int irq, + u16 ignored_events) { /* * Ignore link changes which occurred while waiting for DPC recovery. * Could be several if DPC triggered multiple times consecutively. */ synchronize_hardirq(irq); - atomic_and(~PCI_EXP_SLTSTA_DLLSC, &ctrl->pending_events); + atomic_and(~ignored_events, &ctrl->pending_events); if (pciehp_poll_mode) pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, - PCI_EXP_SLTSTA_DLLSC); + ignored_events); ctrl_info(ctrl, "Slot(%s): Link Down/Up ignored (recovered by DPC)\n", slot_name(ctrl)); @@ -580,8 +610,8 @@ static void pciehp_ignore_dpc_link_change(struct controller *ctrl, * Synthesize it to ensure that it is acted on. */ down_read_nested(&ctrl->reset_lock, ctrl->depth); - if (!pciehp_check_link_active(ctrl)) - pciehp_request(ctrl, PCI_EXP_SLTSTA_DLLSC); + if (!pciehp_check_link_active(ctrl) || pciehp_device_replaced(ctrl)) + pciehp_request(ctrl, ignored_events); up_read(&ctrl->reset_lock); } @@ -732,8 +762,13 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) */ if ((events & PCI_EXP_SLTSTA_DLLSC) && pci_dpc_recovered(pdev) && ctrl->state == ON_STATE) { - events &= ~PCI_EXP_SLTSTA_DLLSC; - pciehp_ignore_dpc_link_change(ctrl, pdev, irq); + u16 ignored_events = PCI_EXP_SLTSTA_DLLSC; + + if (!ctrl->inband_presence_disabled) + ignored_events |= events & PCI_EXP_SLTSTA_PDC; + + events &= ~ignored_events; + pciehp_ignore_dpc_link_change(ctrl, pdev, irq, ignored_events); } /* -- Gitee From 5abdb64680f8fb560d17da11f79820a8d9fb8fde Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Thu, 10 Apr 2025 17:27:12 +0200 Subject: [PATCH 137/231] PCI: pciehp: Ignore Link Down/Up caused by Secondary Bus Reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit mainline inclusion from v6.16-rc1 commit 2af781a9edc4ef5f6684c0710cc3542d9be48b31 upstream. category: feature ------------------- When a Secondary Bus Reset is issued at a hotplug port, it causes a Data Link Layer State Changed event as a side effect. On hotplug ports using in-band presence detect, it additionally causes a Presence Detect Changed event. These spurious events should not result in teardown and re-enumeration of the device in the slot. Hence commit 2e35afaefe64 ("PCI: pciehp: Add reset_slot() method") masked the Presence Detect Changed Enable bit in the Slot Control register during a Secondary Bus Reset. Commit 06a8d89af551 ("PCI: pciehp: Disable link notification across slot reset") additionally masked the Data Link Layer State Changed Enable bit. However masking those bits only disables interrupt generation (PCIe r6.2 sec 6.7.3.1). The events are still visible in the Slot Status register and picked up by the IRQ handler if it runs during a Secondary Bus Reset. This can happen if the interrupt is shared or if an unmasked hotplug event occurs, e.g. Attention Button Pressed or Power Fault Detected. The likelihood of this happening used to be small, so it wasn't much of a problem in practice. That has changed with the recent introduction of bandwidth control in v6.13-rc1 with commit 665745f27487 ("PCI/bwctrl: Re-add BW notification portdrv as PCIe BW controller"): Bandwidth control shares the interrupt with PCIe hotplug. A Secondary Bus Reset causes a Link Bandwidth Notification, so the hotplug IRQ handler runs, picks up the masked events and tears down the device in the slot. As a result, Joel reports VFIO passthrough failure of a GPU, which Ilpo root-caused to the incorrect handling of masked hotplug events. Clearly, a more reliable way is needed to ignore spurious hotplug events. For Downstream Port Containment, a new ignore mechanism was introduced by commit a97396c6eb13 ("PCI: pciehp: Ignore Link Down/Up caused by DPC"). It has been working reliably for the past four years. Adapt it for Secondary Bus Resets. Introduce two helpers to annotate code sections which cause spurious link changes: pci_hp_ignore_link_change() and pci_hp_unignore_link_change() Use those helpers in lieu of masking interrupts in the Slot Control register. Introduce a helper to check whether such a code section is executing concurrently and if so, await it: pci_hp_spurious_link_change() Invoke the helper in the hotplug IRQ thread pciehp_ist(). Re-use the IRQ thread's existing code which ignores DPC-induced link changes unless the link is unexpectedly down after reset recovery or the device was replaced during the bus reset. That code block in pciehp_ist() was previously only executed if a Data Link Layer State Changed event has occurred. Additionally execute it for Presence Detect Changed events. That's necessary for compatibility with PCIe r1.0 hotplug ports because Data Link Layer State Changed didn't exist before PCIe r1.1. DPC was added with PCIe r3.1 and thus DPC-capable hotplug ports always support Data Link Layer State Changed events. But the same cannot be assumed for Secondary Bus Reset, which already existed in PCIe r1.0. Secondary Bus Reset is only one of many causes of spurious link changes. Others include runtime suspend to D3cold, firmware updates or FPGA reconfiguration. The new pci_hp_{,un}ignore_link_change() helpers may be used by all kinds of drivers to annotate such code sections, hence their declarations are publicly visible in . A case in point is the Mellanox Ethernet driver which disables a firmware reset feature if the Ethernet card is attached to a hotplug port, see commit 3d7a3f2612d7 ("net/mlx5: Nack sync reset request when HotPlug is enabled"). Going forward, PCIe hotplug will be able to cope gracefully with all such use cases once the code sections are properly annotated. The new helpers internally use two bits in struct pci_dev's priv_flags as well as a wait_queue. This mirrors what was done for DPC by commit a97396c6eb13 ("PCI: pciehp: Ignore Link Down/Up caused by DPC"). That may be insufficient if spurious link changes are caused by multiple sources simultaneously. An example might be a Secondary Bus Reset issued by AER during FPGA reconfiguration. If this turns out to happen in real life, support for it can easily be added by replacing the PCI_LINK_CHANGING flag with an atomic_t counter incremented by pci_hp_ignore_link_change() and decremented by pci_hp_unignore_link_change(). Instead of awaiting a zero PCI_LINK_CHANGING flag, the pci_hp_spurious_link_change() helper would then simply await a zero counter. Fixes: 665745f27487 ("PCI/bwctrl: Re-add BW notification portdrv as PCIe BW controller") Reported-by: Joel Mathew Thomas Closes: https://bugzilla.kernel.org/show_bug.cgi?id=219765 Signed-off-by: Lukas Wunner Signed-off-by: Bjorn Helgaas Tested-by: Joel Mathew Thomas Reviewed-by: Kuppuswamy Sathyanarayanan Reviewed-by: Ilpo Järvinen Link: https://patch.msgid.link/d04deaf49d634a2edf42bf3c06ed81b4ca54d17b.1744298239.git.lukas@wunner.de Signed-off-by: leoliu-oc --- drivers/pci/hotplug/pci_hotplug_core.c | 69 ++++++++++++++++++++++++++ drivers/pci/hotplug/pciehp_hpc.c | 35 +++++-------- drivers/pci/pci.h | 3 ++ include/linux/pci.h | 8 +++ 4 files changed, 92 insertions(+), 23 deletions(-) diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c index 058d5937d8a9..5606ceaffba4 100644 --- a/drivers/pci/hotplug/pci_hotplug_core.c +++ b/drivers/pci/hotplug/pci_hotplug_core.c @@ -558,6 +558,75 @@ void pci_hp_destroy(struct hotplug_slot *slot) } EXPORT_SYMBOL_GPL(pci_hp_destroy); +static DECLARE_WAIT_QUEUE_HEAD(pci_hp_link_change_wq); + +/** + * pci_hp_ignore_link_change - begin code section causing spurious link changes + * @pdev: PCI hotplug bridge + * + * Mark the beginning of a code section causing spurious link changes on the + * Secondary Bus of @pdev, e.g. as a side effect of a Secondary Bus Reset, + * D3cold transition, firmware update or FPGA reconfiguration. + * + * Hotplug drivers can thus check whether such a code section is executing + * concurrently, await it with pci_hp_spurious_link_change() and ignore the + * resulting link change events. + * + * Must be paired with pci_hp_unignore_link_change(). May be called both + * from the PCI core and from Endpoint drivers. May be called for bridges + * which are not hotplug-capable, in which case it has no effect because + * no hotplug driver is bound to the bridge. + */ +void pci_hp_ignore_link_change(struct pci_dev *pdev) +{ + set_bit(PCI_LINK_CHANGING, &pdev->priv_flags); + smp_mb__after_atomic(); /* pairs with implied barrier of wait_event() */ +} + +/** + * pci_hp_unignore_link_change - end code section causing spurious link changes + * @pdev: PCI hotplug bridge + * + * Mark the end of a code section causing spurious link changes on the + * Secondary Bus of @pdev. Must be paired with pci_hp_ignore_link_change(). + */ +void pci_hp_unignore_link_change(struct pci_dev *pdev) +{ + set_bit(PCI_LINK_CHANGED, &pdev->priv_flags); + mb(); /* ensure pci_hp_spurious_link_change() sees either bit set */ + clear_bit(PCI_LINK_CHANGING, &pdev->priv_flags); + wake_up_all(&pci_hp_link_change_wq); +} + +/** + * pci_hp_spurious_link_change - check for spurious link changes + * @pdev: PCI hotplug bridge + * + * Check whether a code section is executing concurrently which is causing + * spurious link changes on the Secondary Bus of @pdev. Await the end of the + * code section if so. + * + * May be called by hotplug drivers to check whether a link change is spurious + * and can be ignored. + * + * Because a genuine link change may have occurred in-between a spurious link + * change and the invocation of this function, hotplug drivers should perform + * sanity checks such as retrieving the current link state and bringing down + * the slot if the link is down. + * + * Return: %true if such a code section has been executing concurrently, + * otherwise %false. Also return %true if such a code section has not been + * executing concurrently, but at least once since the last invocation of this + * function. + */ +bool pci_hp_spurious_link_change(struct pci_dev *pdev) +{ + wait_event(pci_hp_link_change_wq, + !test_bit(PCI_LINK_CHANGING, &pdev->priv_flags)); + + return test_and_clear_bit(PCI_LINK_CHANGED, &pdev->priv_flags); +} + static int __init pci_hotplug_init(void) { int result; diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 04ba0fac031b..d1a42e888b02 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -588,21 +588,21 @@ bool pciehp_device_replaced(struct controller *ctrl) return false; } -static void pciehp_ignore_dpc_link_change(struct controller *ctrl, - struct pci_dev *pdev, int irq, - u16 ignored_events) +static void pciehp_ignore_link_change(struct controller *ctrl, + struct pci_dev *pdev, int irq, + u16 ignored_events) { /* * Ignore link changes which occurred while waiting for DPC recovery. * Could be several if DPC triggered multiple times consecutively. + * Also ignore link changes caused by Secondary Bus Reset, etc. */ synchronize_hardirq(irq); atomic_and(~ignored_events, &ctrl->pending_events); if (pciehp_poll_mode) pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, ignored_events); - ctrl_info(ctrl, "Slot(%s): Link Down/Up ignored (recovered by DPC)\n", - slot_name(ctrl)); + ctrl_info(ctrl, "Slot(%s): Link Down/Up ignored\n", slot_name(ctrl)); /* * If the link is unexpectedly down after successful recovery, @@ -758,9 +758,11 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) /* * Ignore Link Down/Up events caused by Downstream Port Containment - * if recovery from the error succeeded. + * if recovery succeeded, or caused by Secondary Bus Reset, + * suspend to D3cold, firmware update, FPGA reconfiguration, etc. */ - if ((events & PCI_EXP_SLTSTA_DLLSC) && pci_dpc_recovered(pdev) && + if ((events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC)) && + (pci_dpc_recovered(pdev) || pci_hp_spurious_link_change(pdev)) && ctrl->state == ON_STATE) { u16 ignored_events = PCI_EXP_SLTSTA_DLLSC; @@ -768,7 +770,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) ignored_events |= events & PCI_EXP_SLTSTA_PDC; events &= ~ignored_events; - pciehp_ignore_dpc_link_change(ctrl, pdev, irq, ignored_events); + pciehp_ignore_link_change(ctrl, pdev, irq, ignored_events); } /* @@ -933,7 +935,6 @@ int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, bool probe) { struct controller *ctrl = to_ctrl(hotplug_slot); struct pci_dev *pdev = ctrl_dev(ctrl); - u16 stat_mask = 0, ctrl_mask = 0; int rc; if (probe) @@ -941,23 +942,11 @@ int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, bool probe) down_write_nested(&ctrl->reset_lock, ctrl->depth); - if (!ATTN_BUTTN(ctrl)) { - ctrl_mask |= PCI_EXP_SLTCTL_PDCE; - stat_mask |= PCI_EXP_SLTSTA_PDC; - } - ctrl_mask |= PCI_EXP_SLTCTL_DLLSCE; - stat_mask |= PCI_EXP_SLTSTA_DLLSC; - - pcie_write_cmd(ctrl, 0, ctrl_mask); - ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, - pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0); + pci_hp_ignore_link_change(pdev); rc = pci_bridge_secondary_bus_reset(ctrl->pcie->port); - pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask); - pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask); - ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, - pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask); + pci_hp_unignore_link_change(pdev); up_write(&ctrl->reset_lock); return rc; diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index fbe22537f756..7312b508d1e9 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -152,6 +152,7 @@ static inline int pci_proc_detach_bus(struct pci_bus *bus) { return 0; } /* Functions for PCI Hotplug drivers to use */ int pci_hp_add_bridge(struct pci_dev *dev); +bool pci_hp_spurious_link_change(struct pci_dev *pdev); #ifdef HAVE_PCI_LEGACY void pci_create_legacy_files(struct pci_bus *bus); @@ -383,6 +384,8 @@ static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused) #define PCI_DPC_RECOVERED 1 #define PCI_DPC_RECOVERING 2 #define PCI_DEV_REMOVED 3 +#define PCI_LINK_CHANGED 4 +#define PCI_LINK_CHANGING 5 static inline void pci_dev_assign_added(struct pci_dev *dev, bool added) { diff --git a/include/linux/pci.h b/include/linux/pci.h index 71d3507e91f6..8854a2b24a80 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1875,6 +1875,14 @@ static inline bool pcie_aspm_support_enabled(void) { return false; } static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; } #endif +#ifdef CONFIG_HOTPLUG_PCI +void pci_hp_ignore_link_change(struct pci_dev *pdev); +void pci_hp_unignore_link_change(struct pci_dev *pdev); +#else +static inline void pci_hp_ignore_link_change(struct pci_dev *pdev) { } +static inline void pci_hp_unignore_link_change(struct pci_dev *pdev) { } +#endif + #ifdef CONFIG_PCIEAER bool pci_aer_available(void); #else -- Gitee From e337139af4e0066de9c736f43cab0782ed60552a Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Wed, 18 Jun 2025 16:38:25 +0200 Subject: [PATCH 138/231] PCI: pciehp: Ignore belated Presence Detect Changed caused by DPC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit mainline inclusion from v6.16-rc3 commit bbf10cd686835d5a4b8566dc73a3b00b4cd7932a upstream. category: feature ------------------- Commit c3be50f7547c ("PCI: pciehp: Ignore Presence Detect Changed caused by DPC") sought to ignore Presence Detect Changed events occurring as a side effect of Downstream Port Containment. The commit awaits recovery from DPC and then clears events which occurred in the meantime. However if the first event seen after DPC is Data Link Layer State Changed, only that event is cleared and not Presence Detect Changed. The object of the commit is thus defeated. That's because pciehp_ist() computes the events to clear based on the local "events" variable instead of "ctrl->pending_events". The former contains the events that had occurred when pciehp_ist() was entered, whereas the latter also contains events that have accumulated while awaiting DPC recovery. In practice, the order of PDC and DLLSC events is arbitrary and the delay in-between can be several milliseconds. So change the logic to always clear PDC events, even if they come after an initial DLLSC event. Fixes: c3be50f7547c ("PCI: pciehp: Ignore Presence Detect Changed caused by DPC") Reported-by: Lương Việt Hoàng Reported-by: Joel Mathew Thomas Closes: https://bugzilla.kernel.org/show_bug.cgi?id=219765#c165 Signed-off-by: Lukas Wunner Signed-off-by: Bjorn Helgaas Tested-by: Lương Việt Hoàng Tested-by: Joel Mathew Thomas Link: https://patch.msgid.link/d9c4286a16253af7e93eaf12e076e3ef3546367a.1750257164.git.lukas@wunner.de --- drivers/pci/hotplug/pciehp_hpc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index d1a42e888b02..3b230b384e87 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -767,7 +767,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) u16 ignored_events = PCI_EXP_SLTSTA_DLLSC; if (!ctrl->inband_presence_disabled) - ignored_events |= events & PCI_EXP_SLTSTA_PDC; + ignored_events |= PCI_EXP_SLTSTA_PDC; events &= ~ignored_events; pciehp_ignore_link_change(ctrl, pdev, irq, ignored_events); -- Gitee From 5b47d56677055e39855c9832f76ee67e14e6c18e Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 9 Mar 2026 14:33:02 +0800 Subject: [PATCH 139/231] PCI: dpc: Increase pciehp waiting time for DPC recovery zhaoxin inclusion category: feature -------------------- Commit a97396c6eb13 ("PCI: pciehp: Ignore Link Down/Up caused by DPC") amended PCIe hotplug to not bring down the slot upon Data Link Layer State Changed events caused by Downstream Port Containment. However, PCIe hotplug (pciehp) waits up to 4 seconds before assuming that DPC recovery has failed and disabling the slot. This timeout period is insufficient for some PCIe devices. For example, the E810 dual-port network card driver needs to take over 10 seconds to execute its err_detected() callback. Since this exceeds the maximum wait time allowed for DPC recovery by the hotplug IRQ threads, a race condition occurs between the hotplug thread and the dpc_handler() thread. Signed-off-by: leoliu-oc --- drivers/pci/pcie/dpc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c index e1923b944a02..74b8b458b7ef 100644 --- a/drivers/pci/pcie/dpc.c +++ b/drivers/pci/pcie/dpc.c @@ -115,10 +115,10 @@ bool pci_dpc_recovered(struct pci_dev *pdev) /* * Need a timeout in case DPC never completes due to failure of * dpc_wait_rp_inactive(). The spec doesn't mandate a time limit, - * but reports indicate that DPC completes within 4 seconds. + * but reports indicate that DPC completes within 16 seconds. */ wait_event_timeout(dpc_completed_waitqueue, dpc_completed(pdev), - msecs_to_jiffies(4000)); + msecs_to_jiffies(16000)); return test_and_clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags); } -- Gitee From 280da6426c2aa885021725de8caffe13b8210fa7 Mon Sep 17 00:00:00 2001 From: LeoLiu-oc Date: Mon, 16 Mar 2026 20:06:32 +0800 Subject: [PATCH 140/231] ahci: guard Zhaoxin enclosure quirk fields behind CONFIG_X86 zhaoxin inclusion category: feature -------------------- The AHCI host struct previously always contained members used only by the x86-only Zhaoxin enclosure management quirk (p1_mmio, sx_index, px_index, has_p0_p1). These members are not referenced on non-x86 builds, so keep them inside a CONFIG_X86 guard to avoid unnecessary struct bloat and to better reflect their platform-specific use. Signed-off-by: Your Name Signed-off-by: LeoLiu-oc --- drivers/ata/ahci.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 8ba831020d77..37b793b8939f 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -383,11 +383,13 @@ struct ahci_host_priv { /* only required for per-port MSI(-X) support */ int (*get_irq_vector)(struct ata_host *host, int port); +#ifdef CONFIG_X86 /* fix zhaoxin Enclosure Management quirk */ void __iomem *p1_mmio; u8 sx_index; u8 px_index; bool has_p0_p1; +#endif }; extern int ahci_ignore_sss; -- Gitee From 9e82de74b3627a44eae24db0ffec854b376a2bb6 Mon Sep 17 00:00:00 2001 From: Min Fanlei Date: Thu, 18 Dec 2025 15:23:13 +0800 Subject: [PATCH 141/231] sw64: kvm: fix srcu lock leak in pv steal time Some early return paths in kvm_sw64_record_steal_time() fail to release the SRCU read lock, which can lead to resource leak. Fix it. Signed-off-by: Min Fanlei Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/kvm/pvtime.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/sw_64/kvm/pvtime.c b/arch/sw_64/kvm/pvtime.c index 767617d4c5e0..806e662b1100 100644 --- a/arch/sw_64/kvm/pvtime.c +++ b/arch/sw_64/kvm/pvtime.c @@ -27,19 +27,19 @@ void kvm_sw64_record_steal_time(struct kvm_vcpu *vcpu) hva = kvm_vcpu_gfn_to_hva(vcpu, gfn); if (WARN_ON(kvm_is_error_hva(hva))) { vcpu->arch.steal.base = INVALID_GPA; - return; + goto out_unlock; } steal_ptr = (__u64 __user *)(hva + offset_in_page(base) + offset_s); version_ptr = (__u32 __user *)(hva + offset_in_page(base) + offset_v); if (WARN_ON(get_user(version, version_ptr))) - return; + goto out_unlock; version += 1; if (WARN_ON(put_user(version, version_ptr))) - return; + goto out_unlock; if (!WARN_ON(get_user(steal, steal_ptr))) { vcpu->arch.steal.last_steal = READ_ONCE(current->sched_info.run_delay); @@ -54,6 +54,8 @@ void kvm_sw64_record_steal_time(struct kvm_vcpu *vcpu) WARN_ON(put_user(version, version_ptr)); kvm_vcpu_mark_page_dirty(vcpu, gfn); + +out_unlock: srcu_read_unlock(&kvm->srcu, idx); } -- Gitee From 8cf6465977d22991bd95a2892b7e6bbcd4a94c89 Mon Sep 17 00:00:00 2001 From: Xiao Ziwang Date: Tue, 23 Dec 2025 12:07:30 +0800 Subject: [PATCH 142/231] sw64: kvm: add qemu-gdb watchpoint support We add kvm support for qemu-gdb watchpoint. When qemu inserts a watchpoint, kvm receives the information and converts it. We save and restore the debug CSRs before entering VM and after exiting VM. If a qemu-gdb watchpoint is triggered, we do VM exit to handle it. We also create a file when host kernel booting to indicate that host kernel supports qemu-gdb watchpoint. Signed-off-by: Xiao Ziwang Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/include/asm/cpu.h | 1 + arch/sw_64/include/asm/kvm_host.h | 5 +++++ arch/sw_64/include/asm/vcpu.h | 4 +++- arch/sw_64/include/uapi/asm/kvm.h | 4 ++++ arch/sw_64/kernel/setup.c | 14 +++++++++++++ arch/sw_64/kvm/handle_exit.c | 8 +++++++- arch/sw_64/kvm/kvm_core4.c | 18 +++++++++++++++++ arch/sw_64/kvm/sw64.c | 33 +++++++++++++++++++++++++++++++ 8 files changed, 85 insertions(+), 2 deletions(-) diff --git a/arch/sw_64/include/asm/cpu.h b/arch/sw_64/include/asm/cpu.h index 9fc739dacfab..cfb6090baded 100644 --- a/arch/sw_64/include/asm/cpu.h +++ b/arch/sw_64/include/asm/cpu.h @@ -25,6 +25,7 @@ enum hmcall_cpuid_cmd { #define CPU_FEAT_SIMD 0x2 #define CPU_FEAT_UNA 0x4 #define CPU_FEAT_VINT 0x8 +#define CPU_FEAT_WP 0x20 enum sunway_cpu_model { CPU_SW3231 = 0x31, diff --git a/arch/sw_64/include/asm/kvm_host.h b/arch/sw_64/include/asm/kvm_host.h index 3d1ec0a2ea5d..6f02a6fb5386 100644 --- a/arch/sw_64/include/asm/kvm_host.h +++ b/arch/sw_64/include/asm/kvm_host.h @@ -119,6 +119,10 @@ struct kvm_vcpu_arch { /* Don't run the guest (internal implementation need) */ bool pause; + /* vcpu debug state */ + struct kvm_guest_debug_arch host_debug_state; + struct kvm_guest_debug_arch guest_debug_state; + struct kvm_decode mmio_decode; /* Cache some mmu pages needed inside spinlock regions */ @@ -207,6 +211,7 @@ void kvm_sw64_destroy_vm(struct kvm *kvm); int kvm_sw64_vcpu_reset(struct kvm_vcpu *vcpu); long kvm_sw64_set_vcb(struct file *filp, unsigned long arg); long kvm_sw64_get_vcb(struct file *filp, unsigned long arg); +void kvm_sw64_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg); void update_aptp(unsigned long pgd); void vcpu_set_numa_affinity(struct kvm_vcpu *vcpu); diff --git a/arch/sw_64/include/asm/vcpu.h b/arch/sw_64/include/asm/vcpu.h index 38066427afdb..2026aea49b3e 100644 --- a/arch/sw_64/include/asm/vcpu.h +++ b/arch/sw_64/include/asm/vcpu.h @@ -113,7 +113,9 @@ struct vcpucb { unsigned long reserved1[10]; /* USE IN HMCODE */ DECLARE_BITMAP(irqs_pending, CORE4VM_IRQS); /* Pending virtual interrupts */ unsigned long irqs_addr; - unsigned long reserved2[30]; + unsigned long reserved2[3]; /* USE IN HMCODE */ + unsigned long watchpoint_in_use; + unsigned long reserved3[26]; }; #endif diff --git a/arch/sw_64/include/uapi/asm/kvm.h b/arch/sw_64/include/uapi/asm/kvm.h index 2b220eed1441..cd180f705647 100644 --- a/arch/sw_64/include/uapi/asm/kvm.h +++ b/arch/sw_64/include/uapi/asm/kvm.h @@ -47,10 +47,14 @@ struct kvm_fpu { struct kvm_debug_exit_arch { unsigned long epc; + unsigned long reason;//indicate breakpoint or watchpoint }; /* for KVM_SET_GUEST_DEBUG */ struct kvm_guest_debug_arch { + uint64_t addr; + uint64_t mask; + uint64_t ctl; }; /* definition of registers in kvm_run */ diff --git a/arch/sw_64/kernel/setup.c b/arch/sw_64/kernel/setup.c index f8925a8a1b61..b7e12d9bdeff 100644 --- a/arch/sw_64/kernel/setup.c +++ b/arch/sw_64/kernel/setup.c @@ -831,6 +831,20 @@ static int __init debugfs_mclk_init(void) return 0; } late_initcall(debugfs_mclk_init); + +static int __init debugfs_watchpoint_init(void) +{ + struct dentry *dir = sw64_debugfs_dir; + static u64 feature_wp; + + feature_wp = (cpuid(GET_FEATURES, 0) & CPU_FEAT_WP); + if (feature_wp) { + debugfs_create_u64("watchpoint", 0644, dir, &feature_wp); + } + + return 0; +} +late_initcall(debugfs_watchpoint_init); #endif #ifdef CONFIG_OF diff --git a/arch/sw_64/kvm/handle_exit.c b/arch/sw_64/kvm/handle_exit.c index 3a623c59895a..33849d981427 100644 --- a/arch/sw_64/kvm/handle_exit.c +++ b/arch/sw_64/kvm/handle_exit.c @@ -61,7 +61,13 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, case SW64_KVM_EXIT_DEBUG: vcpu->stat.debug_exits++; vcpu->run->exit_reason = KVM_EXIT_DEBUG; - vcpu->run->debug.arch.epc = vcpu->arch.regs.pc; + /* hargs-arg0 is assigned in hmcode to indicate qemu-gdb watchpoint */ + if (hargs->arg0 == 2) { + vcpu->run->debug.arch.epc = hargs->arg2; + vcpu->run->debug.arch.reason = hargs->arg0; + } else { + vcpu->run->debug.arch.epc = vcpu->arch.regs.pc; + } return 0; #ifdef CONFIG_SUBARCH_C4 case SW64_KVM_EXIT_APT_FAULT: diff --git a/arch/sw_64/kvm/kvm_core4.c b/arch/sw_64/kvm/kvm_core4.c index a13e9fd57249..9f777e434b83 100644 --- a/arch/sw_64/kvm/kvm_core4.c +++ b/arch/sw_64/kvm/kvm_core4.c @@ -102,6 +102,24 @@ long kvm_sw64_set_vcb(struct file *filp, unsigned long arg) return 0; } +void kvm_sw64_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) +{ + uint64_t match_ctl, match_ctl_mode; + vcpu->guest_debug = dbg->control; + if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) { + vcpu->guest_debug = 0; + } else { + vcpu->arch.guest_debug_state = dbg->arch; + match_ctl_mode = (vcpu->arch.guest_debug_state.ctl >> 8) & 0x3; + match_ctl = sw64_read_csr(CSR_DC_CTLP); + match_ctl &= ~((0x1UL << 3) | (0x3UL << DA_MATCH_EN_S) | + (0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + match_ctl |= (match_ctl_mode << DA_MATCH_EN_S) | (0x1UL << DPM_MATCH_EN_S) | + (0x2UL << DPM_MATCH); + vcpu->arch.guest_debug_state.ctl = match_ctl; + } +} + int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) { if (feature_vint) diff --git a/arch/sw_64/kvm/sw64.c b/arch/sw_64/kvm/sw64.c index 665e11656706..e9fe07cce1f1 100644 --- a/arch/sw_64/kvm/sw64.c +++ b/arch/sw_64/kvm/sw64.c @@ -398,6 +398,9 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { trace_kvm_set_guest_debug(vcpu, dbg->control); +#ifdef CONFIG_SUBARCH_C4 + kvm_sw64_set_guest_debug(vcpu, dbg); +#endif return 0; } @@ -408,6 +411,28 @@ void update_vcpu_stat_time(struct kvm_vcpu_stat *vcpu_stat) vcpu_stat->gtime = current->gtime; } +void kvm_sw64_switch_debug_state_pre_run(struct kvm_vcpu *vcpu) +{ + vcpu->arch.host_debug_state.addr = sw64_read_csr(CSR_DA_MATCH); + vcpu->arch.host_debug_state.mask = sw64_read_csr(CSR_DA_MASK); + vcpu->arch.host_debug_state.ctl = sw64_read_csr(CSR_DC_CTLP); + + sw64_write_csr(vcpu->arch.guest_debug_state.addr, CSR_DA_MATCH); + sw64_write_csr(vcpu->arch.guest_debug_state.mask, CSR_DA_MASK); + sw64_write_csr(vcpu->arch.guest_debug_state.ctl, CSR_DC_CTLP); +} + +void kvm_sw64_switch_debug_state_post_run(struct kvm_vcpu *vcpu) +{ + vcpu->arch.guest_debug_state.addr = sw64_read_csr(CSR_DA_MATCH); + vcpu->arch.guest_debug_state.mask = sw64_read_csr(CSR_DA_MASK); + vcpu->arch.guest_debug_state.ctl = sw64_read_csr(CSR_DC_CTLP); + + sw64_write_csr(vcpu->arch.host_debug_state.addr, CSR_DA_MATCH); + sw64_write_csr(vcpu->arch.host_debug_state.mask, CSR_DA_MASK); + sw64_write_csr(vcpu->arch.host_debug_state.ctl, CSR_DC_CTLP); +} + /* * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on * proper exit to userspace. @@ -477,6 +502,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) /* update aptp before the guest runs */ update_aptp((unsigned long)vcpu->kvm->arch.pgd); + if (vcpu->guest_debug) { + kvm_sw64_switch_debug_state_pre_run(vcpu); + } + /* Enter the guest */ trace_kvm_sw64_entry(vcpu->vcpu_id, vcpu->arch.regs.pc); vcpu->mode = IN_GUEST_MODE; @@ -492,6 +521,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) trace_kvm_sw64_exit(ret, vcpu->arch.regs.pc); + if (vcpu->guest_debug) { + kvm_sw64_switch_debug_state_post_run(vcpu); + } + preempt_enable(); /* ret = 0 indicate interrupt in guest mode, ret > 0 indicate hcall */ -- Gitee From 5c0fe034c82590e8707986eb527171166ddc995d Mon Sep 17 00:00:00 2001 From: Gu Yuchen Date: Tue, 6 Jan 2026 10:28:01 +0800 Subject: [PATCH 143/231] sw64: add definitions for kernel-required CPU capabilities Add definitions for CPU capabilities that are required by the kernel. Signed-off-by: Gu Yuchen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/include/asm/cpucaps.h | 8 ++++++ arch/sw_64/include/asm/cpufeature.h | 30 +++++++++++++++++++++++ arch/sw_64/include/asm/elf.h | 4 +-- arch/sw_64/include/asm/hwcap.h | 14 +++++++++++ arch/sw_64/include/uapi/asm/hwcap.h | 7 ++++++ arch/sw_64/kernel/Makefile | 2 +- arch/sw_64/kernel/cpu.c | 3 ++- arch/sw_64/kernel/cpufeature.c | 38 +++++++++++++++++++++++++++++ arch/sw_64/kernel/setup.c | 5 +++- 9 files changed, 106 insertions(+), 5 deletions(-) create mode 100644 arch/sw_64/include/asm/cpucaps.h create mode 100644 arch/sw_64/include/asm/cpufeature.h create mode 100644 arch/sw_64/include/asm/hwcap.h create mode 100644 arch/sw_64/include/uapi/asm/hwcap.h create mode 100644 arch/sw_64/kernel/cpufeature.c diff --git a/arch/sw_64/include/asm/cpucaps.h b/arch/sw_64/include/asm/cpucaps.h new file mode 100644 index 000000000000..b39abe2c53c2 --- /dev/null +++ b/arch/sw_64/include/asm/cpucaps.h @@ -0,0 +1,8 @@ +#ifndef __ASM_CPUCAPS_H +#define __ASM_CPUCAPS_H + +#define CPU_FEATURE_HWUNA 0 + +#define SW64_NCAPS 1 + +#endif /* __ASM_CPUCAPS_H */ diff --git a/arch/sw_64/include/asm/cpufeature.h b/arch/sw_64/include/asm/cpufeature.h new file mode 100644 index 000000000000..44105b53f483 --- /dev/null +++ b/arch/sw_64/include/asm/cpufeature.h @@ -0,0 +1,30 @@ +#ifndef __ASM_CPUFEATURE_H +#define __ASM_CPUFEATURE_H + +#include +#include +#include +#include + +#include +#include + +#define MAX_CPU_FEATURES 64 +#define cpu_feature(x) KERNEL_HWCAP_SW64_ ## x +#define cpu_set_named_feature(name) cpu_set_feature(cpu_feature(name)) +#define cpu_have_named_feature(name) cpu_have_feature(cpu_feature(name)) + +extern DECLARE_BITMAP(system_cpucaps, SW64_NCAPS); + +void __init setup_cpu_features(void); +void cpu_set_feature(unsigned int num); +bool cpu_have_feature(unsigned int num); + +static inline bool cpus_have_cap(unsigned int num) +{ + if (num >= SW64_NCAPS) + return false; + return test_bit(num, system_cpucaps); +} + +#endif diff --git a/arch/sw_64/include/asm/elf.h b/arch/sw_64/include/asm/elf.h index 7aec6327901f..6e26ec976367 100644 --- a/arch/sw_64/include/asm/elf.h +++ b/arch/sw_64/include/asm/elf.h @@ -136,8 +136,8 @@ do { \ * This yields a mask that user programs can use to figure out what * instruction set this CPU supports. */ - -#define ELF_HWCAP 0 +#define ELF_HWCAP (elf_hwcap) +extern unsigned long elf_hwcap; /* * This yields a string that ld.so will use to load implementation diff --git a/arch/sw_64/include/asm/hwcap.h b/arch/sw_64/include/asm/hwcap.h new file mode 100644 index 000000000000..6e1cb87caa47 --- /dev/null +++ b/arch/sw_64/include/asm/hwcap.h @@ -0,0 +1,14 @@ +#ifndef __ASM_HWCAP_H +#define __ASM_HWCAP_H + +#include +#include + +#ifndef __ASSEMBLY__ +#include +#define __khwcap_feature(x) const_ilog2(HWCAP_SW64_ ## x) + +#define KERNEL_HWCAP_SW64_HWUNA __khwcap_feature(HWUNA) + +#endif +#endif diff --git a/arch/sw_64/include/uapi/asm/hwcap.h b/arch/sw_64/include/uapi/asm/hwcap.h new file mode 100644 index 000000000000..36b247e92b20 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/hwcap.h @@ -0,0 +1,7 @@ +#ifndef _UAPI__ASM_HWCAP_H +#define _UAPI__ASM_HWCAP_H + +/* HWCAP flags */ +#define HWCAP_SW64_HWUNA (1 << 0) + +#endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/sw_64/kernel/Makefile b/arch/sw_64/kernel/Makefile index caf6de81dbde..8d195f6df15e 100644 --- a/arch/sw_64/kernel/Makefile +++ b/arch/sw_64/kernel/Makefile @@ -23,7 +23,7 @@ obj-y := fpu.o traps.o process.o sys_sw64.o irq.o cpu.o \ systbls.o dup_print.o chip_setup.o \ insn.o early_init.o topology.o cacheinfo.o \ vdso.o vdso/ hmcall.o stacktrace.o idle.o reset.o \ - head.o termios.o trap_unalign.o + head.o termios.o trap_unalign.o cpufeature.o obj-$(CONFIG_SUBARCH_C3B) += entry_c3.o tc.o obj-$(CONFIG_SUBARCH_C4) += entry_c4.o diff --git a/arch/sw_64/kernel/cpu.c b/arch/sw_64/kernel/cpu.c index 7683fedda70f..63095e0f58bf 100644 --- a/arch/sw_64/kernel/cpu.c +++ b/arch/sw_64/kernel/cpu.c @@ -10,6 +10,7 @@ #include #include +#include #include #include @@ -266,7 +267,7 @@ static int show_cpuinfo(struct seq_file *f, void *slot) (loops_per_jiffy / (5000/HZ)) % 100); seq_printf(f, "flags\t\t: fpu simd vpn upn cpuid%s\n", - (cpuid(GET_FEATURES, 0) & CPU_FEAT_UNA) ? " una" : ""); + (cpu_have_named_feature(HWUNA)) ? " una" : ""); seq_printf(f, "page size\t: %d\n", 8192); seq_printf(f, "cache_alignment\t: %d\n", l3_cachline_size); seq_printf(f, "address sizes\t: %u bits physical, %u bits virtual\n\n", diff --git a/arch/sw_64/kernel/cpufeature.c b/arch/sw_64/kernel/cpufeature.c new file mode 100644 index 000000000000..611d14b0ddcd --- /dev/null +++ b/arch/sw_64/kernel/cpufeature.c @@ -0,0 +1,38 @@ +#include + +#include +#include +#include + +unsigned long elf_hwcap __read_mostly; + +DECLARE_BITMAP(system_cpucaps, SW64_NCAPS); +EXPORT_SYMBOL(system_cpucaps); + +void cpu_set_feature(unsigned int num) +{ + WARN_ON(num >= MAX_CPU_FEATURES); + elf_hwcap |= BIT(num); +} +EXPORT_SYMBOL_GPL(cpu_set_feature); + +bool cpu_have_feature(unsigned int num) +{ + WARN_ON(num >= MAX_CPU_FEATURES); + return elf_hwcap & BIT(num); +} +EXPORT_SYMBOL_GPL(cpu_have_feature); + +static void setup_cpu_features_common(void) +{ + elf_hwcap = 0; + if (cpuid(GET_FEATURES, 0) & CPU_FEAT_UNA) { + cpu_set_named_feature(HWUNA); + set_bit(CPU_FEATURE_HWUNA, system_cpucaps); + } +} + +void __init setup_cpu_features(void) +{ + setup_cpu_features_common(); +} diff --git a/arch/sw_64/kernel/setup.c b/arch/sw_64/kernel/setup.c index b7e12d9bdeff..4407c0ef9aec 100644 --- a/arch/sw_64/kernel/setup.c +++ b/arch/sw_64/kernel/setup.c @@ -23,6 +23,7 @@ #include #include +#include #include #include #include @@ -578,7 +579,7 @@ static void __init setup_firmware_fdt(void) static void __init setup_cpu_caps(void) { - if (cpuid(GET_FEATURES, 0) & CPU_FEAT_UNA) + if (cpu_have_named_feature(HWUNA)) static_branch_enable(&hw_una_enabled); } @@ -673,6 +674,8 @@ setup_arch(char **cmdline_p) */ trap_init(); + setup_cpu_features(); + jump_label_init(); #ifdef CONFIG_SUBARCH_C3B -- Gitee From be1794974f52b310d30fe7206c12e6cb3a9a84d5 Mon Sep 17 00:00:00 2001 From: Xu Yiwei Date: Mon, 5 Jan 2026 07:30:20 +0000 Subject: [PATCH 144/231] sw64: pci: add sunway pci controller for Command Completed errata Sunway PCI controller does not set the Command Completed bit unless writes to the Slot Command register change "Control" bits. Add a quirk on sunway pci controller. Signed-off-by: Xu Yiwei Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/pci/pci.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/sw_64/pci/pci.c b/arch/sw_64/pci/pci.c index 48aff446e3a7..87a34cad3f2b 100644 --- a/arch/sw_64/pci/pci.c +++ b/arch/sw_64/pci/pci.c @@ -232,6 +232,10 @@ static void fixup_root_complex(struct pci_dev *dev) } dev->no_msi = 1; + +#ifdef CONFIG_HOTPLUG_PCI_PCIE + dev->broken_cmd_compl = 1; +#endif } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JN, PCI_DEVICE_ID_SW64_ROOT_BRIDGE, fixup_root_complex); -- Gitee From da509fa3cb2c32432e16feec6cffe7ee9f69f5b0 Mon Sep 17 00:00:00 2001 From: Gao Chen Date: Thu, 8 Jan 2026 16:35:19 +0800 Subject: [PATCH 145/231] sw64: use ioremap to map spaces for cpufreq and S3 Map SPBU, INTPU and GPIO spaces to fix page faults during cpufreq and S3. Signed-off-by: Gao Chen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/include/asm/uncore_io_junzhang.h | 4 ++++ arch/sw_64/include/asm/uncore_io_xuelang.h | 4 ++++ drivers/platform/sw64/misc-platform.c | 24 +++++++++++++++------ 3 files changed, 26 insertions(+), 6 deletions(-) diff --git a/arch/sw_64/include/asm/uncore_io_junzhang.h b/arch/sw_64/include/asm/uncore_io_junzhang.h index 24c1e3be32f3..6f78eb766801 100644 --- a/arch/sw_64/include/asm/uncore_io_junzhang.h +++ b/arch/sw_64/include/asm/uncore_io_junzhang.h @@ -62,6 +62,10 @@ #define LPC_FIRMWARE_IO (0x3UL << 28 | IO_BASE | LPC_BASE) #define PCI_VT_LEGACY_IO (IO_BASE | PCI_BASE | PCI_LEGACY_IO) +#define SPBU_SIZE 0xe000 +#define INTPU_SIZE 0x1900 +#define GPIO_SIZE 0x4000 + #define CORE0_CID (rcid_to_domain_id(cpu_to_rcid(0)) << 7 | \ rcid_to_thread_id(cpu_to_rcid(0)) << 6 | \ rcid_to_core_id(cpu_to_rcid(0))) diff --git a/arch/sw_64/include/asm/uncore_io_xuelang.h b/arch/sw_64/include/asm/uncore_io_xuelang.h index c8f01cb01e52..243b4d34c291 100644 --- a/arch/sw_64/include/asm/uncore_io_xuelang.h +++ b/arch/sw_64/include/asm/uncore_io_xuelang.h @@ -67,6 +67,10 @@ #define DLI_PHY_CTL (0x10UL << 24) #define PCI_VT_LEGACY_IO (IO_BASE | PCI_BASE | PCI_LEGACY_IO) +#define SPBU_SIZE 0x8f00 +#define INTPU_SIZE 0x1680 +#define GPIO_SIZE 0x3c00 + #define CORE0_CID (rcid_to_domain_id(cpu_to_rcid(0)) << 6 | \ rcid_to_core_id(cpu_to_rcid(0))) #define PME_ENABLE_INTD_CORE0 (0x1UL << 62 | 0x1UL << 10 | CORE0_CID) diff --git a/drivers/platform/sw64/misc-platform.c b/drivers/platform/sw64/misc-platform.c index af426429ef23..8959b6d3cdd4 100644 --- a/drivers/platform/sw64/misc-platform.c +++ b/drivers/platform/sw64/misc-platform.c @@ -102,16 +102,28 @@ static int misc_platform_probe(struct platform_device *pdev) gpio_base = __va(SW64_IO_BASE(node) | GPIO_BASE); if (!device_property_read_u64(dev, "sunway,spbu_base", - &base_address)) - spbu_base = __va(base_address); + &base_address)) { + if (is_junzhang_v1() || is_junzhang_v2()) + spbu_base = ioremap(base_address, SPBU_SIZE); + else + spbu_base = __va(base_address); + } if (!device_property_read_u64(dev, "sunway,intpu_base", - &base_address)) - intpu_base = __va(base_address); + &base_address)) { + if (is_junzhang_v1() || is_junzhang_v2()) + intpu_base = ioremap(base_address, INTPU_SIZE); + else + intpu_base = __va(base_address); + } if (!device_property_read_u64(dev, "sunway,gpio_base", - &base_address)) - gpio_base = __va(base_address); + &base_address)) { + if (is_junzhang_v1() || is_junzhang_v2()) + gpio_base = ioremap(base_address, GPIO_SIZE); + else + gpio_base = __va(base_address); + } misc_platform_devices[node].spbu_base = spbu_base; misc_platform_devices[node].intpu_base = intpu_base; -- Gitee From d17c7bcc53d24473f72e6bb3760343bfbaf8f4f1 Mon Sep 17 00:00:00 2001 From: Xu Yiwei Date: Mon, 22 Dec 2025 07:17:34 +0000 Subject: [PATCH 146/231] sw64: pci: fix IO_BASE if numa_off The current implementation assembles a wrong PCI_IO_BASE address when numa is off because hose->node is always all 0 in this case. This will further interfere device usage on other nodes. Signed-off-by: Xu Yiwei Signed-off-by: Jinyu Tang Signed-off-by: Yizhou Chen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- drivers/usb/host/pci-quirks.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 88c493b60d9a..f382d1712bb9 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -1328,7 +1328,7 @@ fixup_usb_xhci_reset(struct pci_dev *dev) if (offset == 0) return; - base = ioremap(SW64_PCI_IO_BASE(hose->node, hose->index) | offset, SZ_8K); + base = ioremap(hose->dense_mem_base | offset, SZ_8K); ext_cap_offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_LEGACY); if (!ext_cap_offset) -- Gitee From 1b173aac35ac4ae4169bff7c85a8947deac15093 Mon Sep 17 00:00:00 2001 From: Gu Yuchen Date: Tue, 6 Jan 2026 14:56:00 +0800 Subject: [PATCH 147/231] sw64: add alternative runtime patching Introduce the alternative mechanism to sw64. In future, we can use this mechanism to optimize hospot functions according to cpu features. Signed-off-by: Gu Yuchen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/include/asm/alternative-asm.h | 80 +++++++++++ arch/sw_64/include/asm/alternative.h | 105 +++++++++++++++ arch/sw_64/include/asm/insn.h | 9 ++ arch/sw_64/include/asm/module.h | 15 +++ arch/sw_64/kernel/Makefile | 3 +- arch/sw_64/kernel/alternative.c | 164 +++++++++++++++++++++++ arch/sw_64/kernel/module.c | 16 +++ arch/sw_64/kernel/setup.c | 3 + arch/sw_64/kernel/vdso/vdso.lds.S | 5 + arch/sw_64/kernel/vmlinux.lds.S | 7 + 10 files changed, 406 insertions(+), 1 deletion(-) create mode 100644 arch/sw_64/include/asm/alternative-asm.h create mode 100644 arch/sw_64/include/asm/alternative.h create mode 100644 arch/sw_64/kernel/alternative.c diff --git a/arch/sw_64/include/asm/alternative-asm.h b/arch/sw_64/include/asm/alternative-asm.h new file mode 100644 index 000000000000..088ed1987283 --- /dev/null +++ b/arch/sw_64/include/asm/alternative-asm.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ALTERNATIVE_ASM_H +#define _ASM_ALTERNATIVE_ASM_H + +#ifdef __ASSEMBLY__ + +/* + * Issue one struct alt_instr descriptor entry (need to put it into + * the section .altinstructions, see below). This entry contains + * enough information for the alternatives patching code to patch an + * instruction. See apply_alternatives(). + */ +.macro altinstruction_entry orig alt feature orig_len alt_len + .long \orig - . + .long \alt - . + .short \feature + .byte \orig_len + .byte \alt_len +.endm + +/* + * Define an alternative between two instructions. If @feature is + * present, early code in apply_alternatives() replaces @oldinstr with + * @newinstr. ".fill" directive takes care of proper instruction padding + * in case @newinstr is longer than @oldinstr. + */ +.macro ALTERNATIVE oldinstr, newinstr, feature +140 : + \oldinstr +141 : + .fill - (((144f-143f)-(141b-140b)) > 0) * ((144f-143f)-(141b-140b)) / 4, 4, 0x43ff075f +142 : + + .pushsection .altinstructions, "a" + altinstruction_entry 140b, 143f, \feature, 142b-140b, 144f-143f + .popsection + + .subsection 1 +143 : + \newinstr +144 : + .previous +.endm + +#define old_len (141b-140b) +#define new_len1 (144f-143f) +#define new_len2 (145f-144f) + +#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b))))) + +/* + * Same as ALTERNATIVE macro above but for two alternatives. If CPU + * has @feature1, it replaces @oldinstr with @newinstr1. If CPU has + * @feature2, it replaces @oldinstr with @feature2. + */ +.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2 +140 : + \oldinstr +141 : + .fill - ((alt_max_short(new_len1, new_len2) - (old_len)) > 0) * \ + (alt_max_short(new_len1, new_len2) - (old_len)) / 4, 4, 0x43ff075f +142 : + + .pushsection .altinstructions, "a" + altinstruction_entry 140b, 143f, \feature1, 142b-140b, 144f-143f, 142b-141b + altinstruction_entry 140b, 144f, \feature2, 142b-140b, 145f-144f, 142b-141b + .popsection + + .subsection 1 +143 : + \newinstr1 +144 : + \newinstr2 +145 : + .previous +.endm + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_ALTERNATIVE_ASM_H */ diff --git a/arch/sw_64/include/asm/alternative.h b/arch/sw_64/include/asm/alternative.h new file mode 100644 index 000000000000..81138e21472b --- /dev/null +++ b/arch/sw_64/include/asm/alternative.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ALTERNATIVE_H +#define _ASM_ALTERNATIVE_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include + +#include + +struct alt_instr { + s32 instr_offset; /* offset to original instruction */ + s32 replace_offset; /* offset to replacement instruction */ + u16 feature; /* feature bit set for replacement */ + u8 instrlen; /* length of original instruction */ + u8 replacementlen; /* length of new instruction */ +} __packed; + +extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); +extern void apply_alternatives_all(void); + +#define b_replacement(num) "664"#num +#define e_replacement(num) "665"#num + +#define alt_end_marker "663" +#define alt_slen "662b-661b" +#define alt_total_slen alt_end_marker"b-661b" +#define alt_rlen(num) e_replacement(num)"f-"b_replacement(num)"f" + +#define __OLDINSTR(oldinstr, num) \ + "661:\n\t" oldinstr "\n662:\n" \ + ".fill -(((" alt_rlen(num) ")-(" alt_slen ")) > 0) * " \ + "((" alt_rlen(num) ")-(" alt_slen ")) / 4, 4, 0x43ff075f\n" + +#define OLDINSTR(oldinstr, num) \ + __OLDINSTR(oldinstr, num) \ + alt_end_marker ":\n" + +#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") < (" b ")))))" + +/* + * Pad the second replacement alternative with additional NOPs if it is + * additionally longer than the first replacement alternative. + */ +#define OLDINSTR_2(oldinstr, num1, num2) \ + "661:\n\t" oldinstr "\n662:\n" \ + ".fill -((" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) > 0) * " \ + "(" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) / 4, " \ + "4, 0x43ff075f\n" \ + alt_end_marker ":\n" + +#define ALTINSTR_ENTRY(feature, num) \ + " .long 661b - .\n" /* label */ \ + " .long " b_replacement(num)"f - .\n" /* new instruction */ \ + " .short " __stringify(feature) "\n" /* feature bit */ \ + " .byte " alt_total_slen "\n" /* source len */ \ + " .byte " alt_rlen(num) "\n" /* replacement len */ + +#define ALTINSTR_REPLACEMENT(newinstr, feature, num) /* replacement */ \ + b_replacement(num)":\n\t" newinstr "\n" e_replacement(num) ":\n\t" + +/* alternative assembly primitive: */ +#define ALTERNATIVE(oldinstr, newinstr, feature) \ + OLDINSTR(oldinstr, 1) \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY(feature, 1) \ + ".popsection\n" \ + ".subsection 1\n" \ + ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ + ".previous\n" + +#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\ + OLDINSTR_2(oldinstr, 1, 2) \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY(feature1, 1) \ + ALTINSTR_ENTRY(feature2, 2) \ + ".popsection\n" \ + ".subsection 1\n" \ + ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ + ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ + ".previous\n" + +/* + * Alternative instructions for different CPU types or capabilities. + * + * This allows to use optimized instructions even on generic binary + * kernels. + * + * length of oldinstr must be longer or equal the length of newinstr + * It can be padded with nops as needed. + * + * For non barrier like inlines please define new variants + * without volatile and memory clobber. + */ +#define alternative(oldinstr, newinstr, feature) \ + (asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory")) + +#define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \ + (asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) ::: "memory")) + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_ALTERNATIVE_H */ diff --git a/arch/sw_64/include/asm/insn.h b/arch/sw_64/include/asm/insn.h index 437cb48d1e93..7bd24d6f6598 100644 --- a/arch/sw_64/include/asm/insn.h +++ b/arch/sw_64/include/asm/insn.h @@ -77,6 +77,7 @@ SW64_INSN(imemb, 0x18000001, 0xfc00ffff); SW64_INSN(rtc, 0x18000020, 0xfc00ffff); SW64_INSN(halt, 0x18000080, 0xfc00ffff); SW64_INSN(rd_f, 0x18001000, 0xfc00ffff); +SW64_INSN(lbr, 0x74000000, 0xfc000000); SW64_INSN(beq, 0xc0000000, 0xfc000000); SW64_INSN(bne, 0xc4000000, 0xfc000000); SW64_INSN(blt, 0xc8000000, 0xfc000000); @@ -94,4 +95,12 @@ SW64_INSN(fbge, 0xf4000000, 0xfc000000); SW64_INSN(lldw, 0x20000000, 0xfc00f000); SW64_INSN(lldl, 0x20001000, 0xfc00f000); +static inline bool sw64_insn_is_branch(u32 insn) +{ + bool is_branch = ((insn & 0xfc000000) >= 0x10000000 && (insn & 0xfc000000) <= 0x14000000) || \ + ((insn & 0xfc000000) >= 0xc0000000 && (insn & 0xfc000000) <= 0xf4000000) || \ + ((insn & 0xfc000000) == 0x74000000); + return is_branch; +} + #endif /* _ASM_SW64_INSN_H */ diff --git a/arch/sw_64/include/asm/module.h b/arch/sw_64/include/asm/module.h index d1663aab4097..708adc93bca7 100644 --- a/arch/sw_64/include/asm/module.h +++ b/arch/sw_64/include/asm/module.h @@ -14,4 +14,19 @@ struct mod_arch_specific { asm(".section .got, \"aw\", @progbits; .align 3; .previous"); #endif +static inline const Elf_Shdr *find_section(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + const char *name) +{ + const Elf_Shdr *s, *se; + const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + + for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { + if (strcmp(name, secstrs + s->sh_name) == 0) + return s; + } + + return NULL; +} + #endif /* _ASM_SW64_MODULE_H */ diff --git a/arch/sw_64/kernel/Makefile b/arch/sw_64/kernel/Makefile index 8d195f6df15e..fd166b642113 100644 --- a/arch/sw_64/kernel/Makefile +++ b/arch/sw_64/kernel/Makefile @@ -23,7 +23,8 @@ obj-y := fpu.o traps.o process.o sys_sw64.o irq.o cpu.o \ systbls.o dup_print.o chip_setup.o \ insn.o early_init.o topology.o cacheinfo.o \ vdso.o vdso/ hmcall.o stacktrace.o idle.o reset.o \ - head.o termios.o trap_unalign.o cpufeature.o + head.o termios.o trap_unalign.o alternative.o \ + cpufeature.o obj-$(CONFIG_SUBARCH_C3B) += entry_c3.o tc.o obj-$(CONFIG_SUBARCH_C4) += entry_c4.o diff --git a/arch/sw_64/kernel/alternative.c b/arch/sw_64/kernel/alternative.c new file mode 100644 index 000000000000..87b1d76acc2d --- /dev/null +++ b/arch/sw_64/kernel/alternative.c @@ -0,0 +1,164 @@ +#define pr_fmt(fmt) "alternatives: " fmt + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#define MAX_PATCH_SIZE (((u8)(-1)) / SW64_INSN_SIZE) + +extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; + +/* Use this to add nops to a buffer, then text_poke the whole buffer. */ +static void __init_or_module add_nops(u32 *insn, int count) +{ + while (count--) { + *insn = SW64_NOP; + insn++; + } +} + +/* Is the jump addr in local .altinstructions */ +static inline bool in_alt_jump(unsigned long jump, void *start, void *end) +{ + return jump >= (unsigned long)start && jump < (unsigned long)end; +} + +static void __init_or_module recompute_jump(u32 *buf, u32 *dest, u32 *src, + void *start, void *end) +{ + unsigned long si_lo21, si_lo26, disp; + unsigned long cur_pc, jump_addr, pc; + + cur_pc = (unsigned long)src; + pc = (unsigned long)dest; + + si_lo21 = *src & 0x1fffff; + si_lo26 = *src & 0x3ffffff; + *buf = *src; + + if (sw64_insn_is_lbr(*src)) { + jump_addr = cur_pc + SW64_INSN_SIZE * si_lo26; + if (in_alt_jump(jump_addr, start, end)) + return; + disp = (jump_addr - pc) / SW64_INSN_SIZE; + *buf = (*buf & ~0x3ffffff) | disp; + } else { + jump_addr = cur_pc + SW64_INSN_SIZE * si_lo21; + if (in_alt_jump(jump_addr, start, end)) + return; + disp = (jump_addr - pc) / SW64_INSN_SIZE; + *buf = (*buf & ~0x1fffff) | disp; + } + + return; +} + +/* Not support pcrel instruction at present! */ +static int __init_or_module copy_alt_insns(u32 *buf, + u32 *dest, u32 *src, int nr) +{ + int i; + + for (i = 0; i < nr; i++) { + buf[i] = src[i]; + + if (sw64_insn_is_branch(src[i])) { + recompute_jump(&buf[i], &dest[i], &src[i], src, src + nr); + } + } + + return 0; +} + +static void *__init_or_module text_poke_early(u32 *insn, u32 *buf, unsigned int nr) +{ + int i; + unsigned long flags; + + local_irq_save(flags); + + for (i = 0; i < nr; i++) + insn[i] = buf[i]; + + local_irq_restore(flags); + + tbiv(); + + return insn; +} + +void __init_or_module apply_alternatives(struct alt_instr *start, struct alt_instr *end) +{ + struct alt_instr *a; + unsigned int nr_instr, nr_repl, nr_insnbuf; + u32 *instr, *replacement; + u32 insnbuf[MAX_PATCH_SIZE]; + /* + * The scan order should be from start to end. A later scanned + * alternative code can overwrite previously scanned alternative code. + * Some kernel functions (e.g. memcpy, memset, etc) use this order to + * patch code. + * + * So be careful if you want to change the scan order to any other + * order. + */ + for (a = start; a < end; a++) { + nr_insnbuf = 0; + + instr = (void *)&a->instr_offset + a->instr_offset; + replacement = (void *)&a->replace_offset + a->replace_offset; + + nr_instr = a->instrlen / SW64_INSN_SIZE; + nr_repl = a->replacementlen / SW64_INSN_SIZE; + + if (!cpus_have_cap(a->feature)) + continue; + + copy_alt_insns(insnbuf, instr, replacement, nr_repl); + nr_insnbuf = nr_repl; + + if (nr_instr > nr_repl) { + add_nops(insnbuf + nr_repl, nr_instr - nr_repl); + nr_insnbuf += nr_instr - nr_repl; + } + + text_poke_early(instr, insnbuf, nr_insnbuf); + } +} + +static void __init apply_vdso_alternatives(void) +{ + const Elf_Ehdr *hdr; + const Elf_Shdr *shdr; + const Elf_Shdr *alt; + struct alt_instr *begin, *end; + + hdr = (Elf_Ehdr *)vdso_start; + shdr = (void *)hdr + hdr->e_shoff; + alt = find_section(hdr, shdr, ".altinstructions"); + if (!alt) + return; + + begin = (void *)hdr + alt->sh_offset, + end = (void *)hdr + alt->sh_offset + alt->sh_size, + + apply_alternatives((struct alt_instr *)begin, + (struct alt_instr *)end); +} + +void __init apply_alternatives_all(void) +{ + apply_vdso_alternatives(); + + apply_alternatives(__alt_instructions, __alt_instructions_end); +} diff --git a/arch/sw_64/kernel/module.c b/arch/sw_64/kernel/module.c index fb7c61c1b481..56bd1ba07f23 100644 --- a/arch/sw_64/kernel/module.c +++ b/arch/sw_64/kernel/module.c @@ -5,6 +5,9 @@ #include #include +#include +#include + #define DEBUGP(fmt...) /* Allocate the GOT at the end of the core sections. */ @@ -287,3 +290,16 @@ void *module_alloc(unsigned long size) GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, NUMA_NO_NODE, __builtin_return_address(0)); } + +int module_finalize(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + struct module *me) +{ + const Elf_Shdr *s; + + s = find_section(hdr, sechdrs, ".altinstructions"); + if (s) + apply_alternatives((void *)s->sh_addr, (void *)s->sh_addr + s->sh_size); + + return 0; +} diff --git a/arch/sw_64/kernel/setup.c b/arch/sw_64/kernel/setup.c index 4407c0ef9aec..dcfd0acf89e1 100644 --- a/arch/sw_64/kernel/setup.c +++ b/arch/sw_64/kernel/setup.c @@ -23,6 +23,7 @@ #include #include +#include #include #include #include @@ -676,6 +677,8 @@ setup_arch(char **cmdline_p) setup_cpu_features(); + apply_alternatives_all(); + jump_label_init(); #ifdef CONFIG_SUBARCH_C3B diff --git a/arch/sw_64/kernel/vdso/vdso.lds.S b/arch/sw_64/kernel/vdso/vdso.lds.S index 0dde95a9e912..212b9d10d2a3 100644 --- a/arch/sw_64/kernel/vdso/vdso.lds.S +++ b/arch/sw_64/kernel/vdso/vdso.lds.S @@ -48,6 +48,11 @@ SECTIONS PROVIDE (_etext = .); PROVIDE (etext = .); + . = ALIGN(4); + .altinstructions : { + *(.altinstructions) + } + .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr .eh_frame : { KEEP (*(.eh_frame)) } :text diff --git a/arch/sw_64/kernel/vmlinux.lds.S b/arch/sw_64/kernel/vmlinux.lds.S index 8bbce3e743fe..6e2b59bf4292 100644 --- a/arch/sw_64/kernel/vmlinux.lds.S +++ b/arch/sw_64/kernel/vmlinux.lds.S @@ -53,6 +53,13 @@ SECTIONS } PERCPU_SECTION(L1_CACHE_BYTES) + . = ALIGN(4); + .altinstructions : { + __alt_instructions = .; + *(.altinstructions) + __alt_instructions_end = .; + } + /* * Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page * needed for the THREAD_SIZE aligned init_task gets freed after init -- Gitee From da3069809d3826678b7558e4b02d721129ce5a14 Mon Sep 17 00:00:00 2001 From: Jing Li Date: Wed, 21 Jan 2026 12:28:07 +0800 Subject: [PATCH 148/231] sw64: cache: fix shared_cpu_map when PPTT is invalid Add check to the code to prevent access to null pointer. Signed-off-by: Jing Li Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/kernel/cacheinfo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/sw_64/kernel/cacheinfo.c b/arch/sw_64/kernel/cacheinfo.c index a6e8298cff50..9aa1890420d1 100644 --- a/arch/sw_64/kernel/cacheinfo.c +++ b/arch/sw_64/kernel/cacheinfo.c @@ -131,7 +131,7 @@ static void setup_shared_cpu_map(unsigned int cpu) struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i); if ((rcid_to_domain_id(sib_rcid) != rcid_to_domain_id(rcid)) || - (i == cpu)) + (i == cpu) || !sib_cpu_ci->info_list) continue; sib_leaf = sib_cpu_ci->info_list + index; -- Gitee From 6ebb11d1cf489c8f0f92c6f471ba8be12d29b883 Mon Sep 17 00:00:00 2001 From: Jing Li Date: Fri, 5 Dec 2025 19:02:23 +0800 Subject: [PATCH 149/231] sw64: iommu: refactor iommu v2 initialization Move the early initialization code of Sunway IOMMU version 2 to the PCI driver and add device tree description for the IOMMU. Signed-off-by: Jing Li Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/pci/acpi.c | 7 + drivers/iommu/sw64/iommu.c | 4 +- drivers/iommu/sw64/iommu.h | 4 +- drivers/iommu/sw64/iommu_v2.c | 382 +++++++++++----------------- drivers/pci/controller/pci-sunway.c | 7 + 5 files changed, 172 insertions(+), 232 deletions(-) diff --git a/arch/sw_64/pci/acpi.c b/arch/sw_64/pci/acpi.c index 1b748074fdf8..a45a4e232c26 100644 --- a/arch/sw_64/pci/acpi.c +++ b/arch/sw_64/pci/acpi.c @@ -173,6 +173,11 @@ static int pci_acpi_prepare_root_resources(struct acpi_pci_root_info *ci) return status; } +int __weak sunway_iommu_early_init(struct pci_controller *hose) +{ + return -ENOENT; +} + /** * This function is called from ACPI code and used to * setup PCI host controller. @@ -229,6 +234,8 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) pcie_bus_configure_settings(child); } + sunway_iommu_early_init(pci_bus_to_pci_controller(bus)); + return bus; setup_ecam_err: diff --git a/drivers/iommu/sw64/iommu.c b/drivers/iommu/sw64/iommu.c index de135a7e648b..a161aec1069c 100644 --- a/drivers/iommu/sw64/iommu.c +++ b/drivers/iommu/sw64/iommu.c @@ -739,7 +739,7 @@ struct syscore_ops iommu_cpu_syscore_ops = { static struct iommu_domain *sunway_iommu_domain_alloc(unsigned int type); -static struct sunway_iommu *sunway_iommu_early_init(struct pci_controller *hose) +static struct sunway_iommu *iommu_early_init(struct pci_controller *hose) { struct sunway_iommu *iommu; struct page *page; @@ -805,7 +805,7 @@ static int sunway_iommu_init(void) if (hose->iommu_enable) continue; - iommu = sunway_iommu_early_init(hose); + iommu = iommu_early_init(hose); if (!iommu) { pr_err("Allocating sunway_iommu failed\n"); hose->iommu_enable = false; diff --git a/drivers/iommu/sw64/iommu.h b/drivers/iommu/sw64/iommu.h index a8b49139fade..5a64943e8e9a 100644 --- a/drivers/iommu/sw64/iommu.h +++ b/drivers/iommu/sw64/iommu.h @@ -21,7 +21,7 @@ struct sunway_iommu { unsigned long *iommu_dtbr; spinlock_t dt_lock; /* Device Table Lock */ - struct pci_controller *hose_pt; + struct pci_controller *hose; struct iommu_device iommu; /* IOMMU core code handle */ struct list_head list; }; @@ -85,3 +85,5 @@ struct sunway_iommu_group { #define PAGE_SIZE_IOMMU (_AC(1, UL) << PAGE_SHIFT_IOMMU) #define PCACHE_FLUSHPADDR_MASK 0xffffffffff80UL + +extern int sunway_pci_init_iommu(struct pci_controller *hose); diff --git a/drivers/iommu/sw64/iommu_v2.c b/drivers/iommu/sw64/iommu_v2.c index 7e363bfbdaaf..d93d5fff6001 100644 --- a/drivers/iommu/sw64/iommu_v2.c +++ b/drivers/iommu/sw64/iommu_v2.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -26,9 +27,8 @@ #include #include #include + #include -#include -#include #include "iommu.h" #include "../dma-iommu.h" @@ -57,7 +57,7 @@ struct acpi_table_header *dmar_tbl; #define MAX_NR_IOMMU_PER_NODE 16 -LIST_HEAD(iommu_list); +static LIST_HEAD(iommu_list); /* IOMMU Exceptional Status */ enum exceptype { @@ -565,54 +565,6 @@ static struct sunway_iommu_dev *search_dev_data(u16 devid) return NULL; } -/********************************************************************** - * - * Following functions describe IOMMU init ops - * - **********************************************************************/ - -static struct sunway_iommu *sunway_iommu_early_init(struct pci_controller *hose) -{ - struct sunway_iommu *iommu; - struct page *page; - unsigned long base; - int ret = 0; - int node; - - iommu = kzalloc(sizeof(struct sunway_iommu), GFP_KERNEL); - if (!iommu) { - ret = -ENOMEM; - goto out; - } - - spin_lock_init(&iommu->dt_lock); - - iommu->node = hose->node; - iommu->index = hose->index; - - node = node_online(iommu->node) ? iommu->node : NUMA_NO_NODE; - page = alloc_pages_node(node, __GFP_ZERO, get_order(PAGE_SIZE)); - if (!page) { - ret = -ENOMEM; - goto free_iommu; - } - - iommu->iommu_dtbr = page_address(page); - base = __pa(iommu->iommu_dtbr) & PAGE_MASK; - iommu->reg_base_addr = hose->piu_ior0_base; - writeq(base, iommu->reg_base_addr + DTBASEADDR); - - hose->pci_iommu = iommu; - iommu->enabled = true; - - return iommu; - -free_iommu: - kfree(iommu); -out: - return ERR_PTR(ret); -} - unsigned long fetch_dte(struct sunway_iommu *iommu, unsigned long devid, enum exceptype type) { @@ -802,30 +754,27 @@ struct irqaction iommu_irqaction = { .name = "sunway_iommu", }; -void sunway_enable_iommu_func(struct pci_controller *hose) +static void sunway_enable_iommu_func(struct sunway_iommu *iommu) { - struct sunway_iommu *iommu; + struct pci_controller *hose = iommu->hose; unsigned int iommu_irq, err; unsigned long iommu_conf, iommu_ctrl; iommu_irq = hose->int_irq; - pr_debug("%s node %ld rc %ld iommu_irq %d\n", - __func__, hose->node, hose->index, iommu_irq); err = request_irq(iommu_irq, iommu_interrupt, IRQF_SHARED, "sunway_iommu", hose); if (err < 0) pr_info("sw iommu request irq failed!\n"); - iommu = hose->pci_iommu; iommu_ctrl = (1UL << 63) | (0x100UL << 10); writeq(iommu_ctrl, iommu->reg_base_addr + IOMMUEXCPT_CTRL); iommu_conf = readq(iommu->reg_base_addr + PIUCONFIG0); iommu_conf = iommu_conf | (0x3 << 7); writeq(iommu_conf, iommu->reg_base_addr + PIUCONFIG0); writeq(0xf, iommu->reg_base_addr + TIMEOUT_CONFIG); - iommu_conf = readq(iommu->reg_base_addr + PIUCONFIG0); - pr_debug("SW arch configure node %ld hose-%ld iommu_conf = %#lx\n", - hose->node, hose->index, iommu_conf); + + iommu->enabled = true; + hose->iommu_enable = true; } /* iommu cpu syscore ops */ @@ -844,219 +793,213 @@ struct syscore_ops iommu_cpu_syscore_ops = { .resume = iommu_cpu_resume, }; -static struct iommu_domain *sunway_iommu_domain_alloc(unsigned int type); - -/* Init functions */ -static int do_detect(void) +static bool sunway_iommu_is_enabled(unsigned long pnode, unsigned long index) { - acpi_status status = AE_OK; - - status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl); - - if (ACPI_SUCCESS(status) && !dmar_tbl) { - pr_warn("No DMAR found!\n"); - status = AE_NOT_FOUND; - } + unsigned long which_iommu = MAX_NR_IOMMU_PER_NODE * pnode + index; - return ACPI_SUCCESS(status) ? 0 : -ENOENT; + return test_bit(which_iommu, iommu_bitmap); } -static struct pci_controller *find_hose_by_rcid(int node, int index) +#ifdef CONFIG_ACPI +static const struct acpi_dmar_sw_hardware_unit * +find_dmar_entry(unsigned long pnode, unsigned long index) { - struct pci_controller *hose; + struct acpi_table_header *dmar_header; + struct acpi_table_sw_dmar *dmar; + const struct acpi_sw_dmar_header *entry, *start, *end; + const struct acpi_dmar_sw_hardware_unit *unit; + size_t len; + u16 iommu_index = ((u16)pnode << 8) | ((u16)index); + + acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_header); + if (!dmar_header) { + pr_warn("No DMAR table found\n"); + return NULL; + } - for (hose = hose_head; hose; hose = hose->next) - if (hose->node == node && hose->index == index) - return hose; + dmar = (struct acpi_table_sw_dmar *)dmar_header; + len = dmar->header.length - sizeof(*dmar); + entry = start = (struct acpi_sw_dmar_header *)(dmar + 1); + end = (void *)start + len; - return NULL; -} + for (; entry < end; entry = (void *)entry + entry->length) { + unit = (const struct acpi_dmar_sw_hardware_unit *)entry; -static int parse_one_drhd_unit(struct acpi_sw_dmar_header *header) -{ - struct acpi_dmar_sw_hardware_unit *drhd; - struct sunway_iommu *iommu; - struct pci_controller *hose; - struct page *page; - unsigned long base; - int cmdline_enabled; - int rc_mask, ret, node; - int rc_node, rc_index; + if (!entry->length || + (entry->type >= ACPI_SW_DMAR_TYPE_RESERVED)) { + pr_err(FW_BUG "Invalid DMAR table\n"); + acpi_put_table(dmar_header); + return NULL; + } - drhd = (struct acpi_dmar_sw_hardware_unit *)header; - if (!drhd->enable) - return 0; + if (unit->index == iommu_index) + break; + } - rc_node = (drhd->index >> 8) & 0xff; - rc_index = drhd->index & 0xff; + acpi_put_table(dmar_header); - hose = find_hose_by_rcid(rc_node, rc_index); - if (!hose) - return 0; + return (entry < end) ? unit : NULL; +} - iommu = kzalloc(sizeof(struct sunway_iommu), GFP_KERNEL); - if (!iommu) - return -ENOMEM; +static int pci_acpi_init_iommu(struct pci_controller *hose) +{ + struct pci_config_window *cfg; + struct device *dev; + struct acpi_device *adev; + unsigned long long pxm; + acpi_status status; + const struct acpi_dmar_sw_hardware_unit *entry; - iommu->node = rc_node; - iommu->index = rc_index; - iommu->reg_base_addr = ioremap(drhd->address, drhd->size); + cfg = hose->bus->sysdata; + dev = cfg->parent; + adev = to_acpi_device(dev); - rc_mask = MAX_NR_IOMMU_PER_NODE * iommu->node + iommu->index; - cmdline_enabled = test_bit(rc_mask, iommu_bitmap); - if (!cmdline_enabled) { - iommu->enabled = false; - ret = 0; - goto free_iommu; + status = acpi_evaluate_integer(acpi_device_handle(adev), + "_PXM", NULL, &pxm); + if (ACPI_FAILURE(status)) { + dev_err(dev, "failed to retrieve _PXM\n"); + return -EINVAL; } - node = node_online(iommu->node) ? iommu->node : NUMA_NO_NODE; - page = alloc_pages_node(node, __GFP_ZERO, get_order(PAGE_SIZE)); - if (!page) { - ret = -ENOMEM; - goto free_iommu; + entry = find_dmar_entry((unsigned long)pxm, hose->index); + if (!entry) { + dev_err(dev, "failed to find dmar entry\n"); + return -ENODEV; } - iommu->iommu_dtbr = page_address(page); - base = __pa(iommu->iommu_dtbr) & PAGE_MASK; - writeq(base, iommu->reg_base_addr + DTBASEADDR); + if (!entry->enable) { + dev_info(dev, "IOMMU disabled by firmware\n"); + return -ENODEV; + } - list_add(&iommu->list, &iommu_list); - iommu->enabled = true; + if (!sunway_iommu_is_enabled((unsigned long)pxm, hose->index)) { + dev_info(dev, "IOMMU disabled by cmdline\n"); + return -ENODEV; + } - hose->pci_iommu = iommu; + dev_info(dev, "IOMMU with physical node %llu index %lu enabled\n", + pxm, hose->index); - pr_info("iommu: node: %ld index: %ld IOMMU enabled!\n", - iommu->node, iommu->index); return 0; - -free_iommu: - kfree(iommu); - return ret; } +#endif /* CONFIG_ACPI */ -static int parse_drhd_units(struct acpi_table_sw_dmar *dmar) +#ifdef CONFIG_OF +static int pci_of_init_iommu(struct pci_controller *hose) { - struct acpi_sw_dmar_header *iter, *start, *next, *end; - size_t len = dmar->header.length - sizeof(*dmar); - int ret, count = 0; + struct pci_config_window *cfg; + struct device *dev; + struct device_node *np; + u32 pnode = NUMA_NO_NODE, enable, width; - /* Skip DMAR table, point to first DRHD table. */ - start = (struct acpi_sw_dmar_header *)(dmar + 1); - end = ((void *)start) + len; + cfg = hose->bus->sysdata; + dev = cfg->parent; - for (iter = start; iter < end; iter = next) { - next = (void *)iter + iter->length; - if (iter->length == 0) { - pr_warn(FW_BUG "Invalid 0-length structure\n"); - break; - } else if (next > end) { - pr_warn(FW_BUG "Record passes table end\n"); - return -EINVAL; - } + np = of_node_get(dev->of_node); - if (iter->type >= ACPI_SW_DMAR_TYPE_RESERVED) { - pr_info("Unknown DMAR structure type %d\n", - iter->type); - } else if (iter->type == 0) { - ret = parse_one_drhd_unit(iter); - if (ret) - return ret; - } - count++; + of_property_read_u32(np, "numa-node-id", &pnode); + if (pnode == NUMA_NO_NODE) { + dev_err(dev, "failed to retrieve numa-node-id\n"); + return -EINVAL; } - return 0; -} + if (of_property_read_u32(np, "sunway,iommu-width", &width)) + width = 42; // Backward compatibility -static int sunway_iommu_acpi_early_init(void) -{ - int ret; + if (of_property_read_u32(np, "sunway,iommu-enable", &enable)) + enable = 1; // Backward compatibility - struct acpi_table_sw_dmar *dmar; + of_node_put(np); - ret = do_detect(); - if (ret) - return ret; - - dmar = (struct acpi_table_sw_dmar *)dmar_tbl; - if (!dmar) + if (!enable) { + dev_info(dev, "IOMMU disabled by firmware\n"); return -ENODEV; + } - if (dmar->width < 42) { - pr_warn("Invalid DMAR haw\n"); - return -EINVAL; + if (!sunway_iommu_is_enabled(pnode, hose->index)) { + dev_info(dev, "IOMMU disabled by cmdline\n"); + return -ENODEV; } - pr_info("Host address width: %d\n", dmar->width); - ret = parse_drhd_units(dmar); + dev_info(dev, "IOMMU with physical node %u index %lu enabled\n", + pnode, hose->index); - return ret; + return 0; +} +#endif /* CONFIG_OF */ + +static int pci_init_iommu(struct pci_controller *hose) +{ +#ifdef CONFIG_OF + if (acpi_disabled) + return pci_of_init_iommu(hose); +#endif + +#ifdef CONFIG_ACPI + if (!acpi_disabled) + return pci_acpi_init_iommu(hose); +#endif + + return -EINVAL; } -static int sunway_iommu_acpi_init(void) +int sunway_iommu_early_init(struct pci_controller *hose) { struct sunway_iommu *iommu; - struct pci_controller *hose; - int iommu_index = 0; - int ret; + struct page *page; + unsigned long base; + int ret, node; - ret = sunway_iommu_acpi_early_init(); + ret = pci_init_iommu(hose); if (ret) return ret; - for_each_iommu(iommu) { - hose = find_hose_by_rcid(iommu->node, iommu->index); - if (!hose) - continue; + iommu = kzalloc_node(sizeof(struct sunway_iommu), GFP_KERNEL, + hose->node); + if (!iommu) + return -ENOMEM; - if (!iommu->enabled || hose->iommu_enable) - continue; + iommu->node = hose->node; + iommu->index = hose->index; - sunway_enable_iommu_func(hose); - hose->iommu_enable = true; - iommu_device_sysfs_add(&iommu->iommu, NULL, NULL, "%d", - iommu_index); - iommu_device_register(&iommu->iommu, &sunway_iommu_ops, NULL); - iommu_index++; - piu_flush_all(iommu); + node = node_online(iommu->node) ? iommu->node : NUMA_NO_NODE; + page = alloc_pages_node(node, __GFP_ZERO, get_order(PAGE_SIZE)); + if (!page) { + kfree(iommu); + return -ENOMEM; } - ret = iova_cache_get(); - if (ret) - return ret; + iommu->iommu_dtbr = page_address(page); + base = __pa(iommu->iommu_dtbr) & PAGE_MASK; + iommu->reg_base_addr = hose->piu_ior0_base; + writeq(base, iommu->reg_base_addr + DTBASEADDR); - register_syscore_ops(&iommu_cpu_syscore_ops); + hose->pci_iommu = iommu; + iommu->hose = hose; + + list_add(&iommu->list, &iommu_list); return 0; } -static int sunway_iommu_legacy_init(void) +static int sunway_iommu_init(void) { - struct pci_controller *hose; struct sunway_iommu *iommu; - unsigned long rc_mask; - int iommu_index = 0; - int ret; + int iommu_index = 0, ret = 0; - /* Do the loop */ - for (hose = hose_head; hose; hose = hose->next) { - rc_mask = MAX_NR_IOMMU_PER_NODE * hose->node + hose->index; - if (!test_bit(rc_mask, iommu_bitmap)) { - hose->iommu_enable = false; - continue; - } + sunway_iommu_domain_bitmap = + (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(MAX_DOMAIN_NUM / 8)); + if (!sunway_iommu_domain_bitmap) + return -ENOMEM; - if (hose->iommu_enable) - continue; + __set_bit(0, sunway_iommu_domain_bitmap); - iommu = sunway_iommu_early_init(hose); - sunway_enable_iommu_func(hose); - hose->iommu_enable = true; + for_each_iommu(iommu) { + sunway_enable_iommu_func(iommu); iommu_device_sysfs_add(&iommu->iommu, NULL, NULL, "%d", - iommu_index); + iommu_index++); iommu_device_register(&iommu->iommu, &sunway_iommu_ops, NULL); - iommu_index++; piu_flush_all(iommu); } @@ -1068,25 +1011,6 @@ static int sunway_iommu_legacy_init(void) return 0; } - -static int sunway_iommu_init(void) -{ - int ret; - - sunway_iommu_domain_bitmap = - (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - get_order(MAX_DOMAIN_NUM / 8)); - if (!sunway_iommu_domain_bitmap) - return 0; - __set_bit(0, sunway_iommu_domain_bitmap); - - if (!acpi_disabled) - ret = sunway_iommu_acpi_init(); - else - ret = sunway_iommu_legacy_init(); - - return ret; -} subsys_initcall_sync(sunway_iommu_init); /******************************************************************************* diff --git a/drivers/pci/controller/pci-sunway.c b/drivers/pci/controller/pci-sunway.c index e3487e2ea43c..d8fc6b280120 100644 --- a/drivers/pci/controller/pci-sunway.c +++ b/drivers/pci/controller/pci-sunway.c @@ -1058,6 +1058,11 @@ static struct pci_host_bridge *sunway_pcie_of_init(struct platform_device *pdev) return bridge; } +int __weak sunway_iommu_early_init(struct pci_controller *hose) +{ + return -ENOENT; +} + static int sunway_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -1095,6 +1100,8 @@ static int sunway_pcie_probe(struct platform_device *pdev) pci_bus_add_devices(bus); + sunway_iommu_early_init(pci_bus_to_pci_controller(bus)); + return 0; } -- Gitee From 7f18154dfabc23e96661185366a89c9095190908 Mon Sep 17 00:00:00 2001 From: Wu Liliu Date: Thu, 5 Feb 2026 14:06:38 +0800 Subject: [PATCH 150/231] sw64: fix: getcpu passed a null pointer error The parameters of the getcpu system call can be NULL, during processing, it's neccessary to check whether the parameters are NULL before assigning values. Signed-off-by: Wu Liliu Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/kernel/vdso/vgetcpu.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/arch/sw_64/kernel/vdso/vgetcpu.c b/arch/sw_64/kernel/vdso/vgetcpu.c index d17f1b16ccb8..6398abd16736 100644 --- a/arch/sw_64/kernel/vdso/vgetcpu.c +++ b/arch/sw_64/kernel/vdso/vgetcpu.c @@ -27,12 +27,16 @@ static void __getcpu(unsigned int *cpu, unsigned int *node, "mov $0, %0\n" : "=&r"(cpuid) : "i"(HMC_uwhami)); - *cpu = data->vdso_whami_to_cpu[cpuid]; - *node = data->vdso_whami_to_node[cpuid]; + if (cpu) + *cpu = data->vdso_whami_to_cpu[cpuid]; + if (node) + *node = data->vdso_whami_to_node[cpuid]; #else asm volatile ("csrr %0, %1" : "=&r"(cpuid) : "i"(CSR_SOFTCID)); - *cpu = cpuid; - *node = data->vdso_cpu_to_node[*cpu]; + if (cpu) + *cpu = cpuid; + if (node) + *node = data->vdso_cpu_to_node[*cpu]; #endif } -- Gitee From 7461ca31125ade7b30f2aadcde35d1a8bd8a55ed Mon Sep 17 00:00:00 2001 From: He Chuyue Date: Wed, 24 Dec 2025 09:53:39 +0800 Subject: [PATCH 151/231] sw64: add EDAC support with dynamic node and MC detection Add EDAC support for the SW64 platform. The driver dynamically detects the presence of nodes and memory controllers, and supports both ACPI and Device Tree based firmware descriptions. Signed-off-by: He Chuyue Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/Kconfig | 1 + arch/sw_64/include/asm/uncore_io_junzhang.h | 24 + .../include/asm/uncore_io_ops_junzhang.h | 2 + drivers/edac/Kconfig | 7 + drivers/edac/Makefile | 2 + drivers/edac/sw64_edac.c | 483 ++++++++++++++++++ drivers/edac/sw64_edac.h | 5 + 7 files changed, 524 insertions(+) create mode 100644 drivers/edac/sw64_edac.c create mode 100644 drivers/edac/sw64_edac.h diff --git a/arch/sw_64/Kconfig b/arch/sw_64/Kconfig index ac645c8eda64..c0e293b0bbc9 100644 --- a/arch/sw_64/Kconfig +++ b/arch/sw_64/Kconfig @@ -63,6 +63,7 @@ config SW64 select AUDIT_ARCH select COMMON_CLK select DMA_OPS if PCI + select EDAC_SUPPORT select GENERIC_ARCH_TOPOLOGY select GENERIC_CLOCKEVENTS select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO diff --git a/arch/sw_64/include/asm/uncore_io_junzhang.h b/arch/sw_64/include/asm/uncore_io_junzhang.h index 6f78eb766801..2cb23480ecd8 100644 --- a/arch/sw_64/include/asm/uncore_io_junzhang.h +++ b/arch/sw_64/include/asm/uncore_io_junzhang.h @@ -75,4 +75,28 @@ #define PIUCONFIG0_INIT_VAL 0x38016 +/* MC IO REG */ +enum { + CFGDEC = 0x400UL, + CFGCR = 0x480UL, + INIT_CTRL = 0x580UL, + CFGERR = 0xd00UL, + FSMSTAT = 0xe00UL, + PUB_INTERFACE = 0x1000UL, + POWERCTRL = 0x1080UL, + CFGMR0 = 0x1280UL, + CFGMR1 = 0x1300UL, + CFGMR2 = 0x1380UL, + CFGMR3 = 0x1400UL, + PERF_CTRL = 0x1480UL, + MC_PERF0 = 0x1500UL, + CFGMR4 = 0x1800UL, + CFGMR5 = 0x1880UL, + CFGMR6 = 0x1900UL, + MC_CTRL = 0x1c00UL, + MEMSERR_P = 0x1c80UL, + MEMSERR = 0x1d00UL, + MERRADDR = 0x2280UL, +}; + #endif /* _ASM_SW64_UNCORE_IO_JUNZHANG_H */ diff --git a/arch/sw_64/include/asm/uncore_io_ops_junzhang.h b/arch/sw_64/include/asm/uncore_io_ops_junzhang.h index eff4f40886d8..472c495ebcdf 100644 --- a/arch/sw_64/include/asm/uncore_io_ops_junzhang.h +++ b/arch/sw_64/include/asm/uncore_io_ops_junzhang.h @@ -3,6 +3,8 @@ #define _ASM_SW64_UNCORE_IO_OPS_JUNZHANG_H #define OFFSET_CFG_INFO 0x1100UL +#define OFFSET_FAULT_SOURCE 0xb00UL +#define OFFSET_MC_ONLINE 0x3780UL static inline int __get_cpu_nums(void) { diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 2484f0795925..daca7959a3db 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -571,4 +571,11 @@ config EDAC_LOONGSON errors (CE) only. Loongson-3A5000/3C5000/3D5000/3A6000/3C6000 are compatible. +config EDAC_SW64 + tristate "SW64 UNCORE_JUNZHANG" + depends on SW64 && SUBARCH_C4 + help + Support for error detection and correction on the + UNCORE_JUNZHANG chipsets. + endif # EDAC diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index 699b818ac7cb..aeb56cf0eba8 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -45,6 +45,8 @@ obj-$(CONFIG_EDAC_X38) += x38_edac.o obj-$(CONFIG_EDAC_I82860) += i82860_edac.o obj-$(CONFIG_EDAC_R82600) += r82600_edac.o obj-$(CONFIG_EDAC_AMD64) += amd64_edac.o +obj-$(CONFIG_EDAC_SW64) += sw64_edac.o + obj-$(CONFIG_EDAC_PASEMI) += pasemi_edac.o diff --git a/drivers/edac/sw64_edac.c b/drivers/edac/sw64_edac.c new file mode 100644 index 000000000000..2f6529c00542 --- /dev/null +++ b/drivers/edac/sw64_edac.c @@ -0,0 +1,483 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "edac_module.h" + +#define EDAC_MOD_STR "sw64_edac" + +struct sw64_edac { + struct device *dev; + void __iomem *spbu_base; + struct list_head mc; + int node; + int edac_idx; +}; + +struct sw64_edac_mc { + struct fwnode_handle *fwnode; + struct list_head next; + char *name; + struct mem_ctl_info *mci; + struct sw64_edac *edac; + void __iomem *mc_vbase; + u32 mc_id; + int irq; +}; + +struct sw64_platform_data { + struct sw64_edac_mc *properties; + unsigned int nports; +}; + +static int edac_mc_idx; +static const char *sw64_ctl_name = "SW64"; + +/*********************** DRAM err device **********************************/ + +static void sw64_edac_mc_check(struct mem_ctl_info *mci) +{ + struct sw64_edac_mc *mc = mci->pvt_info; + u32 reg; + u32 err_addr; + + reg = readq(mc->mc_vbase + MEMSERR) >> 32; + err_addr = readq(mc->mc_vbase + MERRADDR); + + /* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */ + if (reg & 0x1) { + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, + err_addr >> PAGE_SHIFT, + err_addr & PAGE_MASK, 0, + 0, 0, -1, + mci->ctl_name, ""); + /* clear the error */ + writeq(0x1, mc->mc_vbase + MEMSERR); + writeq(1UL << 32, mc->mc_vbase + MEMSERR); + } + if (reg & 0x2) { /* 2 bit error, UE */ + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, + err_addr >> PAGE_SHIFT, + err_addr & PAGE_MASK, 0, + 0, 0, -1, + mci->ctl_name, ""); + /* clear the error */ + writeq(1UL << 33, mc->mc_vbase + MEMSERR); + } + +} + +static irqreturn_t sw64_edac_isr(int irq, void *dev_id) +{ + struct sw64_edac *edac = dev_id; + struct sw64_edac_mc *mc; + u32 cause; + void __iomem *spbu_base = edac->spbu_base; + + cause = (readq(spbu_base + OFFSET_FAULT_SOURCE) << 2) & 0x1; + if (!cause) + return IRQ_NONE; + + /* writing 0's to the ECC err addr in check function clears irq */ + list_for_each_entry(mc, &edac->mc, next) + sw64_edac_mc_check(mc->mci); + + return IRQ_HANDLED; +} + +static unsigned long get_total_mem(struct sw64_edac *edac) +{ + unsigned long total_mem; + void __iomem *spbu_base = edac->spbu_base; + + total_mem = readq(spbu_base + OFFSET_CFG_INFO) >> 3; + total_mem = (total_mem & 0xffff) << 28; + + return total_mem; +} + +static void sw64_init_csrows(struct mem_ctl_info *mci, + struct sw64_edac_mc *mc) +{ + struct csrow_info *csrow; + struct dimm_info *dimm; + unsigned long total_mem; + + u32 devtype; + + total_mem = get_total_mem(mc->edac); + + csrow = mci->csrows[0]; + dimm = csrow->channels[0]->dimm; + + dimm->nr_pages = total_mem >> PAGE_SHIFT; + dimm->grain = 8; + + dimm->mtype = MEM_DDR4; + + devtype = readq(mc->mc_vbase + MC_CTRL) >> 20; + switch (devtype & 0x3) { + case 0x0: + dimm->dtype = DEV_X4; + break; + case 0x2: + dimm->dtype = DEV_X8; + break; + case 0x3: + dimm->dtype = DEV_X16; + break; + default: + dimm->dtype = DEV_UNKNOWN; + break; + } + + dimm->edac_mode = EDAC_SECDED; +} + +#ifdef CONFIG_NUMA +static void sw64_edac_get_node(struct sw64_edac *edac, + struct device *dev) +{ + if (numa_off) + return; + + if (acpi_disabled) { + if (device_property_read_u32(dev, "numa-node-id", &edac->node)) + dev_warn(dev, "sw64_edac: node ID unknown\n"); + } else + edac->node = dev_to_node(dev); + + /** + * If numa_off is not set, we expect a valid node ID. + * If not, fallback to node 0. + */ + if (edac->node == NUMA_NO_NODE) { + pr_warn("Invalid node ID\n"); + edac->node = 0; + } +} +#endif + +static int sw64_edac_mc_add(struct device *dev, struct fwnode_handle *fwnode, + struct sw64_platform_data *pdata, int i) +{ + struct sw64_edac *edac; + struct mem_ctl_info *mci; + struct edac_mc_layer layers[2]; + struct sw64_edac_mc tmp_mc; + struct sw64_edac_mc *mc; + struct resource res; + struct resource *r; + int ret; + int numa_node; + struct platform_device *child_pdev; + acpi_status status; + unsigned long long sta; + + edac = dev_get_drvdata(dev); + memset(&tmp_mc, 0, sizeof(tmp_mc)); + + if (!devres_open_group(edac->dev, sw64_edac_mc_add, GFP_KERNEL)) + return -ENOMEM; + + tmp_mc.fwnode = fwnode; + tmp_mc = pdata->properties[i++]; + + if (fwnode_property_read_u32(fwnode, "memory-controller", + &tmp_mc.mc_id)) { + dev_err(dev, "Failed to get memory-controller ID\n"); + ret = -ENODEV; + goto err_group; + } + + if (acpi_disabled) { + unsigned long mc_online; + void __iomem *spbu_base; + + numa_node = edac->node; + spbu_base = edac->spbu_base; + mc_online = readq(spbu_base + OFFSET_MC_ONLINE) & 0xff; + + if (!(mc_online & (1 << tmp_mc.mc_id))) { + pr_info("mc %d.%d is offline, skip init\n", numa_node, tmp_mc.mc_id); + goto err_group; + } + } else { + if (fwnode_property_read_u32(fwnode, "numa-node-id", + &numa_node)) { + dev_info(edac->dev, "Failed to get numa node ID\n"); + ret = -ENODEV; + goto err_group; + } + + status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_STA", NULL, &sta); + if (ACPI_FAILURE(status)) + goto err_group; + if (!sta) { + pr_info("mc %d.%d is offline, skip init\n", numa_node, tmp_mc.mc_id); + goto err_group; + } + } + + + if (acpi_disabled) { + u64 regs[2]; + + if (fwnode_property_read_u64_array(fwnode, "reg", regs, 2)) { + dev_err(dev, "Failed to get MC registers\n"); + fwnode_handle_put(fwnode); + ret = -ENODEV; + } + res.start = regs[0]; + res.end = regs[0] + regs[1] - 1; + res.flags = IORESOURCE_MEM; + tmp_mc.mc_vbase = devm_ioremap_resource(edac->dev, &res); + } else { + child_pdev = to_platform_device(fwnode->dev); + r = platform_get_resource(child_pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(dev, "Failed to get MC registers\n"); + ret = -ENODEV; + } + tmp_mc.mc_vbase = devm_ioremap_resource(edac->dev, r); + } + + + if (IS_ERR(tmp_mc.mc_vbase)) { + dev_err(dev, "unable to map MCU resource\n"); + devm_kfree(dev, &mc); + ret = -ENODEV; + goto err_group; + } + + layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; + layers[0].size = 2; + layers[0].is_virt_csrow = true; + layers[1].type = EDAC_MC_LAYER_CHANNEL; + layers[1].size = 1; + layers[1].is_virt_csrow = false; + mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers, + sizeof(struct sw64_edac_mc)); + if (!mci) { + ret = -ENOMEM; + goto err_group; + } + + mc = mci->pvt_info; + *mc = tmp_mc; /* Copy over resource value */ + mc->edac = edac; + mc->name = "sw64_edac_mc_err"; + mc->mci = mci; + mci->pdev = &mci->dev; + mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; + mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; + mci->edac_cap = EDAC_FLAG_SECDED; + mci->mod_name = EDAC_MOD_STR; + mci->ctl_name = sw64_ctl_name; + mci->dev_name = mc->name; + mci->edac_check = sw64_edac_mc_check; + mci->ctl_page_to_phys = NULL; + mci->scrub_mode = SCRUB_SW_SRC; + edac->edac_idx = edac_mc_idx++; + + sw64_init_csrows(mci, mc); + + if (edac_mc_add_mc(mci)) { + dev_err(dev, "edac_mc_add_mc failed\n"); + ret = -EINVAL; + goto err_free; + } + + list_add(&mc->next, &edac->mc); + + devres_remove_group(edac->dev, sw64_edac_mc_add); + + dev_info(dev, "SW64 EDAC MC registered\n"); + + return 0; + +err_free: + edac_mc_free(mci); +err_group: + devres_release_group(edac->dev, sw64_edac_mc_add); + return ret; +} + +static int sw64_edac_probe(struct platform_device *pdev) +{ + struct sw64_edac *edac; + struct device *dev = &pdev->dev; + struct resource *res; + int ret; + acpi_status status; + unsigned long long sta; + struct fwnode_handle *fwnode; + struct sw64_platform_data *pdata; + struct sw64_edac_mc *mc; + int nports; + int i; + + edac = devm_kzalloc(&pdev->dev, sizeof(*edac), GFP_KERNEL); + if (!edac) + return -ENOMEM; + +#ifdef CONFIG_NUMA + sw64_edac_get_node(edac, dev); +#endif + + edac->spbu_base = misc_platform_get_spbu_base(edac->node); + if (IS_ERR(edac->spbu_base)) + return PTR_ERR(edac->spbu_base); + + edac->dev = &pdev->dev; + platform_set_drvdata(pdev, edac); + INIT_LIST_HEAD(&edac->mc); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + ret = -ENOENT; + goto out_err; + } + + if (!acpi_disabled) { + status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_STA", NULL, &sta); + if (ACPI_FAILURE(status)) { + ret = -EIO; + goto out_err; + } + if (!sta) { + ret = -EIO; + goto out_err; + } + } + + if (edac_op_state == EDAC_OPSTATE_INT) { + int irq; + /* acquire interrupt that reports errors */ + irq = platform_get_irq(pdev, 0); + ret = devm_request_irq(&pdev->dev, + irq, + sw64_edac_isr, + IRQF_SHARED, + "[EDAC] MC err", + edac); + if (ret < 0) { + ret = -ENODEV; + goto out_err; + } + } + + i = 0; + nports = device_get_child_node_count(dev); + + if (nports == 0) { + ret = -ENODEV; + goto out_err; + } + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) { + ret = -ENOMEM; + goto out_err; + } + + pdata->properties = devm_kcalloc(dev, nports, sizeof(*mc), GFP_KERNEL); + if (!pdata->properties) { + ret = -ENOMEM; + goto out_err; + } + + pdata->nports = nports; + + i = 0; + device_for_each_child_node(dev, fwnode) { + sw64_edac_mc_add(dev, fwnode, pdata, i); + } + + return 0; + +out_err: + return ret; +} + +static int sw64_edac_mc_remove(struct sw64_edac_mc *mc) +{ + edac_mc_del_mc(&mc->mci->dev); + edac_mc_free(mc->mci); + return 0; +} + +static int sw64_edac_remove(struct platform_device *pdev) +{ + struct sw64_edac *edac = dev_get_drvdata(&pdev->dev); + struct sw64_edac_mc *mc, *tmp_mc; + + list_for_each_entry_safe(mc, tmp_mc, &edac->mc, next) + sw64_edac_mc_remove(mc); + + return 0; +} + +static const struct of_device_id sw64_edac_of_match[] = { + { .compatible = "sunway,edac", .data = (void *)0 }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sw64_edac_of_match); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id sw64_edac_acpi_match[] = { + { "SUNW0201", 0 }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, sw64_edac_acpi_match); +#endif + +static struct platform_driver sw64_edac_driver = { + .probe = sw64_edac_probe, + .remove = sw64_edac_remove, + .driver = { + .name = "sw64-edac", + .of_match_table = of_match_ptr(sw64_edac_of_match), + .acpi_match_table = ACPI_PTR(sw64_edac_acpi_match), + } +}; + +static int __init sw64_edac_init(void) +{ + /* make sure error reporting method is sane */ + switch (edac_op_state) { + case EDAC_OPSTATE_POLL: + case EDAC_OPSTATE_INT: + break; + default: + edac_op_state = EDAC_OPSTATE_POLL; + break; + } + + return platform_driver_register(&sw64_edac_driver); +} +module_init(sw64_edac_init); + +static void __exit sw64_edac_exit(void) +{ + platform_driver_unregister(&sw64_edac_driver); +} +module_exit(sw64_edac_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("He Chuyue "); +MODULE_DESCRIPTION("SW64 EDAC driver"); +module_param(edac_op_state, int, 0444); +MODULE_PARM_DESC(edac_op_state, + "EDAC Error Reporting state: 0=Poll, 2=Interrupt"); diff --git a/drivers/edac/sw64_edac.h b/drivers/edac/sw64_edac.h new file mode 100644 index 000000000000..b48b506bb7c9 --- /dev/null +++ b/drivers/edac/sw64_edac.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SW64_EDAC_H_ +#define _SW64_EDAC_H_ + +#endif -- Gitee From 053ffad625b38f1e22aae119a3bb2995490dcbde Mon Sep 17 00:00:00 2001 From: Gao Chen Date: Tue, 10 Feb 2026 16:25:41 +0800 Subject: [PATCH 152/231] sw64: add memb before/after atomic To fix potential atomicity issue, this patch removes the atchitecture-specific implememtations of __smp_mb__before_atomic and __smp_mb__after_atomic and replaces them with the common implementations using memb. Signed-off-by: Gao Chen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/include/asm/barrier.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/sw_64/include/asm/barrier.h b/arch/sw_64/include/asm/barrier.h index bff199126c9f..ceadeb323f38 100644 --- a/arch/sw_64/include/asm/barrier.h +++ b/arch/sw_64/include/asm/barrier.h @@ -22,9 +22,6 @@ #define __ASM_SMP_MB #endif -#define __smp_mb__before_atomic() barrier() -#define __smp_mb__after_atomic() barrier() - #include #endif /* _ASM_SW64_BARRIER_H */ -- Gitee From 4b03a26939aa6aab2e7eb4c761d16f1373763aea Mon Sep 17 00:00:00 2001 From: Gu Yuchen Date: Mon, 2 Mar 2026 09:25:24 +0800 Subject: [PATCH 153/231] sw64: fix mmap_base exceeding Java assertion address According to commit 4fd4e1ddbb83 ("sw64: fix random mmap base range"), the mmap_base address where Java dynamic libraries are mapped exceeded Java's assertion address of 0x7fff7ff8000. This commit fixes it. Signed-off-by: Gu Yuchen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/include/asm/processor.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/sw_64/include/asm/processor.h b/arch/sw_64/include/asm/processor.h index 4360140e9535..03980e7cde72 100644 --- a/arch/sw_64/include/asm/processor.h +++ b/arch/sw_64/include/asm/processor.h @@ -45,7 +45,7 @@ #define TASK_SIZE_MAX TASK_SIZE_64 #define TASK_SIZE TASK_SIZE_64 -#define DEFAULT_MAP_WINDOW DEFAULT_MAP_WINDOW_64 +#define DEFAULT_MAP_WINDOW (DEFAULT_MAP_WINDOW_64 - SZ_8G) #ifdef CONFIG_SW64_FORCE_52BIT #define STACK_TOP_MAX TASK_SIZE -- Gitee From d4c35ad44fd73077490ab88d0288d497f8625444 Mon Sep 17 00:00:00 2001 From: Gao Chen Date: Thu, 17 Jul 2025 17:24:04 +0800 Subject: [PATCH 154/231] sw64: add fixmap support Allocate virtual address space and extablish page tables for fixmap, enabling runtime modification of kernel page table contents. Signed-off-by: Gao Chen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/Kconfig | 8 ++++++ arch/sw_64/include/asm/fixmap.h | 42 ++++++++++++++++++++++++++++++++ arch/sw_64/include/asm/pgtable.h | 6 +++++ arch/sw_64/mm/init.c | 29 ++++++++++++++++++++++ 4 files changed, 85 insertions(+) create mode 100644 arch/sw_64/include/asm/fixmap.h diff --git a/arch/sw_64/Kconfig b/arch/sw_64/Kconfig index c0e293b0bbc9..f1438771a6d2 100644 --- a/arch/sw_64/Kconfig +++ b/arch/sw_64/Kconfig @@ -294,6 +294,14 @@ config SW64_CPUAUTOPLUG Turns on the interface for SW64_CPU CPUAUTOPLUG. endmenu + +config SW64_KERNEL_PAGE_TABLE + bool "sw64 kernel page table" + depends on SUBARCH_C4 + default y + help + Map the kernel and the memory by page table. + # clear all implied options (don't want default values for those): # Most of these machines have ISA slots; not exactly sure which don't, # and this doesn't activate hordes of code, so do it always. diff --git a/arch/sw_64/include/asm/fixmap.h b/arch/sw_64/include/asm/fixmap.h new file mode 100644 index 000000000000..e39b831666b6 --- /dev/null +++ b/arch/sw_64/include/asm/fixmap.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_FIXMAP_H +#define _ASM_SW64_FIXMAP_H + +#ifdef CONFIG_SW64_KERNEL_PAGE_TABLE + +#include +#include +#include +#include +#include + +enum fixed_addresses { + FIX_HOLE, + FIX_PTE, + FIX_PMD, + FIX_PUD, + FIX_TEXT_POKE0, + FIX_EARLYCON_MEM_BASE, + + __end_of_permanent_fixed_addresses, +#define FIX_BTMAPS_SLOTS 7 +#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS) + + FIX_BTMAP_END = __end_of_permanent_fixed_addresses, + FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, + + __end_of_fixed_addresses +}; + +#define FIXMAP_PAGE_IO PAGE_KERNEL + +#define __early_set_fixmap __set_fixmap + +#define __late_set_fixmap __set_fixmap +#define __late_clear_fixmap(idx) __set_fixmap((idx), 0, FIXMAP_PAGE_CLEAR) +extern void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot); + +#include + +#endif /* CONFIG_SW64_KERNEL_PAGE_TABLE */ +#endif /* _ASM_SW64_FIXMAP_H */ diff --git a/arch/sw_64/include/asm/pgtable.h b/arch/sw_64/include/asm/pgtable.h index 2614b47d25dc..849828140aa7 100644 --- a/arch/sw_64/include/asm/pgtable.h +++ b/arch/sw_64/include/asm/pgtable.h @@ -89,6 +89,12 @@ static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) #define VMALLOC_END ((unsigned long)vmemmap) #endif +#ifdef CONFIG_SW64_KERNEL_PAGE_TABLE +#define FIXADDR_TOP MODULES_VADDR +#define FIXADDR_SIZE SZ_8M +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) +#endif + /* * HMcode-imposed page table bits */ diff --git a/arch/sw_64/mm/init.c b/arch/sw_64/mm/init.c index d5e1baf23f72..5e6dd219f377 100644 --- a/arch/sw_64/mm/init.c +++ b/arch/sw_64/mm/init.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include struct mem_desc_t mem_desc; #ifndef CONFIG_NUMA @@ -43,6 +45,14 @@ static pud_t vmalloc_pud[1024] __aligned(PAGE_SIZE); static phys_addr_t mem_start; static phys_addr_t mem_size_limit; +#ifdef CONFIG_SW64_KERNEL_PAGE_TABLE +pgd_t early_pg_dir[1024] __initdata __attribute__((__aligned__(PAGE_SIZE))); + +pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss; +pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss; +pud_t fixmap_pud[PTRS_PER_PUD] __page_aligned_bss; +#endif /* CONFIG_SW64_KERNEL_PAGE_TABLE */ + #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE unsigned long memory_block_size_bytes(void) { @@ -142,6 +152,25 @@ void __init zone_sizes_init(void) free_area_init(max_zone_pfns); } +#ifdef CONFIG_SW64_KERNEL_PAGE_TABLE +void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) +{ + unsigned long addr = __fix_to_virt(idx); + pte_t *ptep; + + BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); + + ptep = &fixmap_pte[pte_index(addr)]; + + if (pgprot_val(prot)) + set_pte(ptep, pfn_pte(PHYS_PFN(phys), prot)); + else + pte_clear(&init_mm, addr, ptep); + + local_flush_tlb_all(); +} +#endif /* CONFIG_SW64_KERNEL_PAGE_TABLE */ + /* * paging_init() sets up the memory map. */ -- Gitee From eef24fadac5930cebf97a747af62bbea9a28d669 Mon Sep 17 00:00:00 2001 From: Gao Chen Date: Thu, 17 Jul 2025 17:30:44 +0800 Subject: [PATCH 155/231] sw64: map the early and the final page table Create temporary page tables in early_paging_init() and final page tables in paging_init(). When kernel page table support is detected via cpuid(), write CSR_ATC to switch the core to paging mode after setting up the temporary page tables. Signed-off-by: Gao Chen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/include/asm/cpu.h | 2 + arch/sw_64/include/asm/hmcall.h | 8 + arch/sw_64/include/asm/mmu.h | 7 + arch/sw_64/include/asm/pgtable.h | 81 ++++++-- arch/sw_64/include/asm/platform.h | 11 + arch/sw_64/kernel/dup_print.c | 6 - arch/sw_64/kernel/early_init.c | 3 + arch/sw_64/kernel/setup.c | 10 +- arch/sw_64/kernel/smp.c | 15 ++ arch/sw_64/mm/init.c | 335 +++++++++++++++++++++++++++++- 10 files changed, 446 insertions(+), 32 deletions(-) diff --git a/arch/sw_64/include/asm/cpu.h b/arch/sw_64/include/asm/cpu.h index cfb6090baded..1697c2a3369e 100644 --- a/arch/sw_64/include/asm/cpu.h +++ b/arch/sw_64/include/asm/cpu.h @@ -71,4 +71,6 @@ static inline unsigned long get_cpu_freq(unsigned int cpu) } #endif +extern bool sunway_support_kpt; + #endif /* _ASM_SW64_CPU_H */ diff --git a/arch/sw_64/include/asm/hmcall.h b/arch/sw_64/include/asm/hmcall.h index 60f68e31c568..24ef9e4b768b 100644 --- a/arch/sw_64/include/asm/hmcall.h +++ b/arch/sw_64/include/asm/hmcall.h @@ -22,6 +22,7 @@ #define HMC_mtinten 0x0F #define HMC_wrap_asid 0x10 #define HMC_load_mm 0x11 +#define HMC_rwatc 0x12 #define HMC_tbisasid 0x14 #define HMC_tbivpn 0x19 #define HMC_ret 0x1A @@ -172,6 +173,8 @@ __CALL_HMC_R0(rdksp, unsigned long); __CALL_HMC_W1(wrksp, unsigned long); __CALL_HMC_R0(rdhtctl, unsigned long); +__CALL_HMC_RW2(rwatc, unsigned long, unsigned long, unsigned long); + /* * Load a mm context. This is needed when we change the page * table pointer(CSR:PTBR) or when we update the ASID. @@ -253,6 +256,11 @@ static inline void wrap_asid(unsigned long asid, unsigned long ptbr) #define set_nmi(irq) setup_nmi(1, (irq)) #define clear_nmi(irq) setup_nmi(0, (irq)) +#define ATC_PAGE 1 +#define ATC_KSEG 3 +#define set_atc(val) rwatc(1, (val)) +#define get_atc() rwatc(0, 0) + #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/sw_64/include/asm/mmu.h b/arch/sw_64/include/asm/mmu.h index f24219fac654..395df786d114 100644 --- a/arch/sw_64/include/asm/mmu.h +++ b/arch/sw_64/include/asm/mmu.h @@ -7,4 +7,11 @@ typedef struct { unsigned long asid[NR_CPUS]; void *vdso; } mm_context_t; + +#ifdef CONFIG_SW64_KERNEL_PAGE_TABLE +void create_pgd_mapping(pgd_t *pgdir, unsigned long virt, unsigned long phys, + unsigned long size, pgprot_t prot, + void *(*pgtable_alloc)(void)); +#endif + #endif /* _ASM_SW64_MMU_H */ diff --git a/arch/sw_64/include/asm/pgtable.h b/arch/sw_64/include/asm/pgtable.h index 849828140aa7..e0fef676dd7c 100644 --- a/arch/sw_64/include/asm/pgtable.h +++ b/arch/sw_64/include/asm/pgtable.h @@ -23,26 +23,6 @@ struct mm_struct; struct vm_area_struct; -static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) -{ - *pmdp = pmd; -} - -static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, - pmd_t *pmdp, pmd_t pmdval) -{ - set_pmd(pmdp, pmdval); -} - -static inline void set_pud(pud_t *pudp, pud_t pud) -{ - *pudp = pud; -} - -static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) -{ - *p4dp = p4d; -} /* PGDIR_SHIFT determines what a forth-level page table entry can map */ #define PGDIR_SHIFT (PAGE_SHIFT + 3 * (PAGE_SHIFT - 3)) #define PGDIR_SIZE (1UL << PGDIR_SHIFT) @@ -196,7 +176,11 @@ static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) #define PAGE_NONE __pgprot(__ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE | _PAGE_LEAF | _PAGE_PROTNONE) #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_KERN | _PAGE_LEAF) #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_LEAF | (x)) -#define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL) + +/* prot for kernel page table */ +#define PAGE_KERNEL_NOEXEC __pgprot(_PAGE_VALID | _PAGE_KERN | _PAGE_LEAF | _PAGE_FOE) +#define PAGE_KERNEL_READONLY __pgprot(_PAGE_VALID | _PAGE_KERN | _PAGE_LEAF | _PAGE_FOW | _PAGE_FOE) +#define PAGE_KERNEL_READONLY_EXEC __pgprot(_PAGE_VALID | _PAGE_KERN | _PAGE_LEAF | _PAGE_FOW) #define page_valid_kern(x) ((x & (_PAGE_VALID | _PAGE_KERN)) == (_PAGE_VALID | _PAGE_KERN)) #endif @@ -227,6 +211,8 @@ static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) #define PAGE_COPY PAGE_COPY_EXEC #define PAGE_SHARED PAGE_SHARED_EXEC +#define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL) + /* * The hardware can handle write-only mappings, but as the sw64 * architecture does byte-wide writes with a read-modify-write @@ -252,6 +238,12 @@ static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) #define __S110 PAGE_SHARED_EXEC #define __S111 PAGE_SHARED_EXEC +#define cont_pmd_addr_end(addr, end) \ +({ \ + unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK;\ + (__boundary - 1 < (end) - 1) ? __boundary : (end); \ +}) + /* * pgprot_noncached() is only for infiniband pci support, and a real * implementation for RAM would be more complicated. @@ -282,6 +274,38 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, set_pte(ptep, pteval); } +static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) +{ + *pmdp = pmd; + + if (page_valid_kern(pmd_val(pmd))) { + mb(); + if ((pmd_val(pmd) & _PAGE_FOE) == 0) + imemb(); + } +} + +static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmdval) +{ + set_pmd(pmdp, pmdval); +} + +static inline void set_pud(pud_t *pudp, pud_t pud) +{ + *pudp = pud; + + if (page_valid_kern(pud_val(pud))) { + mb(); + if ((pud_val(pud) & _PAGE_FOE) == 0) + imemb(); + } +} + +static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) +{ + *p4dp = p4d; +} #define pud_write pud_write static inline int pud_write(pud_t pud) { @@ -400,6 +424,13 @@ static inline int pmd_none(pmd_t pmd) return !pmd_val(pmd); } +#define pmd_leaf pmd_leaf +static inline int pmd_leaf(pmd_t pmd) +{ + return !pmd_none(pmd) && + (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_LEAF)) != _PAGE_PRESENT; +} + static inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE; @@ -505,6 +536,13 @@ static inline int pud_none(pud_t pud) return !pud_val(pud); } +#define pud_leaf pud_leaf +static inline int pud_leaf(pud_t pud) +{ + return !pud_none(pud) && + (pud_val(pud) & (_PAGE_PRESENT|_PAGE_LEAF)) != _PAGE_PRESENT; +} + static inline int pud_bad(pud_t pud) { return (pud_val(pud) & ~_PFN_MASK) != _PAGE_TABLE; @@ -838,6 +876,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte) pr_err("%s: %d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e)) #define pgd_ERROR(e) \ pr_err("%s: %d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) +extern void early_paging_init(void); extern void paging_init(void); #define HAVE_ARCH_UNMAPPED_AREA diff --git a/arch/sw_64/include/asm/platform.h b/arch/sw_64/include/asm/platform.h index 227f8eeb7f95..a1164258c640 100644 --- a/arch/sw_64/include/asm/platform.h +++ b/arch/sw_64/include/asm/platform.h @@ -9,6 +9,17 @@ #include #endif +#ifdef CONFIG_SW64_RRU +#define USER_PRINT_BUFF_BASE (0x600000UL + __START_KERNEL_map) +#define USER_PRINT_BUFF_LEN 0x100000UL +#define USER_MESSAGE_MAX_LEN 0x100000UL +#endif + +#ifdef CONFIG_SW64_RRK +#define KERNEL_PRINTK_BUFF_BASE (0x700000UL + __START_KERNEL_map) +#define PRINTK_SIZE 0x100000UL +#endif + extern struct boot_params *sunway_boot_params; extern unsigned long sunway_boot_magic; extern unsigned long sunway_dtb_address; diff --git a/arch/sw_64/kernel/dup_print.c b/arch/sw_64/kernel/dup_print.c index 2d77ccae30d2..f15dbacd1c1b 100644 --- a/arch/sw_64/kernel/dup_print.c +++ b/arch/sw_64/kernel/dup_print.c @@ -8,12 +8,9 @@ #ifdef CONFIG_SW64_RRK -#define KERNEL_PRINTK_BUFF_BASE (0x700000UL + __START_KERNEL_map) - static DEFINE_SPINLOCK(printk_lock); static unsigned long sw64_printk_offset; -#define PRINTK_SIZE 0x100000UL static bool rrk_last_newline_end; static unsigned long rrk_last_id; @@ -113,9 +110,6 @@ void sw64_rrk_store(const char *text, u16 text_len, u64 ts_nsec, int level, #include static DEFINE_SPINLOCK(printf_lock); -#define USER_PRINT_BUFF_BASE (0x600000UL + __START_KERNEL_map) -#define USER_PRINT_BUFF_LEN 0x100000UL -#define USER_MESSAGE_MAX_LEN 0x100000UL unsigned long sw64_printf_offset; int sw64_user_printf(const char __user *buf, int len) { diff --git a/arch/sw_64/kernel/early_init.c b/arch/sw_64/kernel/early_init.c index 2ec7a3e99443..8fb15a9e385c 100644 --- a/arch/sw_64/kernel/early_init.c +++ b/arch/sw_64/kernel/early_init.c @@ -2,6 +2,9 @@ #include #include +#include + +bool sunway_support_kpt; asmlinkage __visible void __init sw64_start_kernel(void) { diff --git a/arch/sw_64/kernel/setup.c b/arch/sw_64/kernel/setup.c index dcfd0acf89e1..8cb5e97fc280 100644 --- a/arch/sw_64/kernel/setup.c +++ b/arch/sw_64/kernel/setup.c @@ -675,6 +675,8 @@ setup_arch(char **cmdline_p) */ trap_init(); + early_paging_init(); + setup_cpu_features(); apply_alternatives_all(); @@ -701,8 +703,6 @@ setup_arch(char **cmdline_p) atomic_notifier_chain_register(&panic_notifier_list, &sw64_panic_block); - callback_init(); - /* * Process command-line arguments. */ @@ -724,6 +724,10 @@ setup_arch(char **cmdline_p) sw64_memblock_init(); + paging_init(); + + callback_init(); + /* Try to upgrade ACPI tables via initrd */ acpi_table_upgrade(); @@ -743,8 +747,6 @@ setup_arch(char **cmdline_p) zone_sizes_init(); - paging_init(); - kexec_control_page_init(); /* diff --git a/arch/sw_64/kernel/smp.c b/arch/sw_64/kernel/smp.c index 968772b7013f..34f9d0ebbc82 100644 --- a/arch/sw_64/kernel/smp.c +++ b/arch/sw_64/kernel/smp.c @@ -186,6 +186,14 @@ void smp_callin(void) complete(&cpu_running); +#ifdef CONFIG_SW64_KERNEL_PAGE_TABLE + /* switch to paging mode */ + if (sunway_support_kpt) { + set_atc(ATC_PAGE); + tbiv(); + } +#endif + /* Must have completely accurate bogos. */ local_irq_enable(); @@ -256,12 +264,19 @@ static void __init process_nr_cpu_ids(void) nr_cpu_ids = num_possible_cpus(); } +extern void * __init pgtable_alloc_fixmap(void); + void __init smp_rcb_init(struct smp_rcb_struct *smp_rcb_base_addr) { if (smp_rcb != NULL) return; smp_rcb = smp_rcb_base_addr; +#ifdef CONFIG_SW64_KERNEL_PAGE_TABLE + create_pgd_mapping((&init_mm)->pgd, (unsigned long)smp_rcb, __pa(smp_rcb), + CONFIG_PHYSICAL_START - __pa(smp_rcb), + PAGE_KERNEL_NOEXEC, pgtable_alloc_fixmap); +#endif memset(smp_rcb, 0, sizeof(struct smp_rcb_struct)); /* Setup SMP_RCB fields that uses to activate secondary CPU */ smp_rcb->restart_entry = __smp_callin; diff --git a/arch/sw_64/mm/init.c b/arch/sw_64/mm/init.c index 5e6dd219f377..8e8b14fcaea9 100644 --- a/arch/sw_64/mm/init.c +++ b/arch/sw_64/mm/init.c @@ -48,6 +48,13 @@ static phys_addr_t mem_size_limit; #ifdef CONFIG_SW64_KERNEL_PAGE_TABLE pgd_t early_pg_dir[1024] __initdata __attribute__((__aligned__(PAGE_SIZE))); +pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); +pud_t early_pud[PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE); +pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); +pud_t early_dtb_pud[PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE); +pmd_t early_printk_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); +pud_t early_printk_pud[PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE); + pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss; pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss; pud_t fixmap_pud[PTRS_PER_PUD] __page_aligned_bss; @@ -117,7 +124,9 @@ pgd_alloc(struct mm_struct *mm) static inline void switch_to_system_map(void) { +#ifndef CONFIG_SW64_KERNEL_PAGE_TABLE memset(swapper_pg_dir, 0, PAGE_SIZE); +#endif update_ptbr_sys(virt_to_phys(swapper_pg_dir)); #ifdef CONFIG_SUBARCH_C4 update_ptbr_usr(__pa_symbol(empty_zero_page)); @@ -169,13 +178,337 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) local_flush_tlb_all(); } + +static pte_t *__init get_pte_virt_fixmap(phys_addr_t phys) +{ + clear_fixmap(FIX_PTE); + return (pte_t *)set_fixmap_offset(FIX_PTE, phys); +} + +static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t phys) +{ + clear_fixmap(FIX_PMD); + return (pmd_t *)set_fixmap_offset(FIX_PMD, phys); +} + +static pud_t *__init get_pud_virt_fixmap(phys_addr_t phys) +{ + clear_fixmap(FIX_PUD); + return (pud_t *)set_fixmap_offset(FIX_PUD, phys); +} + +void * __init pgtable_alloc_fixmap(void) +{ + return (void *)__va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE)); +} + +static void __init +create_pte_mapping(pte_t *pte_first, unsigned long virt, unsigned long phys, + unsigned long size, pgprot_t prot) +{ + pte_t *pte; + unsigned long addr, next, end, pfn; + + addr = virt; + end = virt + size; + pte_first = get_pte_virt_fixmap(__pa(pte_first)); + for (; addr < end; addr = next) { + next = (addr + PAGE_SIZE) & PAGE_MASK; + pte = pte_first + pte_index(addr); + pfn = PHYS_PFN(phys); + set_pte(pte, pfn_pte(pfn, prot)); + phys += next - addr; + } +} + +static void __init +create_pmd_mapping(pmd_t *pmd_first, unsigned long virt, unsigned long phys, + unsigned long size, pgprot_t prot, + void *(*pgtable_alloc)(void)) +{ + pmd_t *pmd; + pte_t *pte; + unsigned long addr, next, end, pfn; + + addr = virt; + end = virt + size; + for (; addr < end; addr = next) { + next = pmd_addr_end(addr, end); + pmd = pmd_first + pmd_index(addr); + + if (next - addr == PMD_SIZE) { + pfn = PHYS_PFN(phys); + set_pmd(pmd, pfn_pmd(pfn, prot)); + } else { + if (!pmd_none(*pmd)) + pte = pte_offset_kernel(pmd, 0); + else { + pte = (pte_t *)pgtable_alloc(); + memset(get_pte_virt_fixmap(__pa(pte)), 0, + PAGE_SIZE); + pmd_populate(NULL, pmd, + virt_to_page((unsigned long)pte)); + } + create_pte_mapping(pte, addr, phys, next - addr, prot); + } + phys += next - addr; + } +} + +static void __init +create_cont_pmd_mapping(pmd_t *pmd_first, unsigned long virt, + unsigned long phys, unsigned long size, pgprot_t prot, + void *(*pgtable_alloc)(void)) +{ + pmd_t *pmd; + unsigned long addr, next, end, pfn, i; + + addr = virt; + end = virt + size; + pmd_first = get_pmd_virt_fixmap(__pa(pmd_first)); + for (; addr < end; addr = next) { + next = cont_pmd_addr_end(addr, end); + pmd = pmd_first + pmd_index(addr); + + if ((next - addr == CONT_PMD_SIZE) && + (PTRS_PER_PMD - pmd_index(addr) >= CONT_PMDS)) { + pfn = PHYS_PFN(phys); + for (i = 0; i < CONT_PMDS; i++) + set_pmd(pmd + i, pfn_pmd(pfn, + __pgprot(pgprot_val(prot) | + _PAGE_CONT))); + } else + create_pmd_mapping(pmd_first, addr, phys, next - addr, + prot, pgtable_alloc); + phys += next - addr; + } +} + +static void __init +create_pud_mapping(pud_t *pud_first, unsigned long virt, unsigned long phys, + unsigned long size, pgprot_t prot, + void *(*pgtable_alloc)(void)) +{ + pud_t *pud; + pmd_t *pmd; + unsigned long addr, next, end, pfn; + + addr = virt; + end = virt + size; + pud_first = get_pud_virt_fixmap(__pa(pud_first)); + for (; addr < end; addr = next) { + next = pud_addr_end(addr, end); + pud = pud_first + pud_index(addr); + if (next - addr == PUD_SIZE) { + pfn = PHYS_PFN(phys); + set_pud(pud, pfn_pud(pfn, prot)); + } else { + if (!pud_none(*pud)) + pmd = pmd_offset(pud, 0); + else { + pmd = (pmd_t *)pgtable_alloc(); + memset(get_pmd_virt_fixmap(__pa(pmd)), 0, + PAGE_SIZE); + pud_populate(NULL, pud, pmd); + } + create_cont_pmd_mapping(pmd, addr, phys, next - addr, + prot, pgtable_alloc); + } + phys += next - addr; + } +} + +void __init +create_pgd_mapping(pgd_t *pgdir, unsigned long virt, unsigned long phys, + unsigned long size, pgprot_t prot, + void *(*pgtable_alloc)(void)) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + unsigned long addr, next, end; + + addr = virt & PAGE_MASK; + phys &= PAGE_MASK; + end = PAGE_ALIGN(virt + size); + for (; addr < end; addr = next) { + next = pgd_addr_end(addr, end); + pgd = pgd_offset_pgd(pgdir, addr); + p4d = p4d_offset(pgd, addr); + + if (!p4d_none(*p4d)) + pud = pud_offset(p4d, 0); + else { + pud = (pud_t *)pgtable_alloc(); + memset(get_pud_virt_fixmap(__pa(pud)), 0, PAGE_SIZE); + p4d_populate(NULL, p4d, pud); + } + create_pud_mapping(pud, addr, phys, next - addr, prot, + pgtable_alloc); + phys += next - addr; + } + clear_fixmap(FIX_PTE); + clear_fixmap(FIX_PMD); + clear_fixmap(FIX_PUD); +} + +static void __init early_create_pmd(pgd_t *pgdir, pud_t *pud, pmd_t *pmd, + unsigned long start_va, unsigned long size, unsigned long pa) +{ + pgd_t *pgdp; + p4d_t *p4dp; + pud_t *pudp; + pmd_t *pmdp; + unsigned long addr, end_va; + int pmd_num, i; + + addr = start_va & PMD_MASK; + end_va = start_va + size; + pmd_num = (end_va - addr) / PMD_SIZE; + if (end_va % PMD_SIZE) + pmd_num += 1; + + pgdp = pgd_offset_pgd(pgdir, addr); + p4dp = p4d_offset(pgdp, addr); + if (p4d_none(*p4dp)) { + BUG_ON(!pud); + p4d_populate(NULL, p4dp, pud); + } + pudp = pud_offset(p4dp, addr); + if (pud_none(*pudp)) { + BUG_ON(!pmd); + pud_populate(NULL, pudp, pmd); + } + + for (i = 0; i < pmd_num; i++) { + pmdp = pmd_offset(pudp, addr); + set_pmd(pmdp, pfn_pmd(PHYS_PFN(pa), PAGE_KERNEL)); + addr += PMD_SIZE; + pa += PMD_SIZE; + } +} + +static void __init fixmap_init(pgd_t *pgdir) +{ + pgd_t *pgdp; + p4d_t *p4dp; + pud_t *pudp; + pmd_t *pmdp; + unsigned long addr = FIXADDR_START & PMD_MASK; + + pgdp = pgd_offset_pgd(pgdir, addr); + p4dp = p4d_offset(pgdp, addr); + if (p4d_none(*p4dp)) + p4d_populate(NULL, p4dp, (pud_t *)fixmap_pud); + pudp = pud_offset(p4dp, addr); + if (pud_none(*pudp)) + pud_populate(NULL, pudp, (pmd_t *)fixmap_pmd); + pmdp = pmd_offset(pudp, addr); + if (pmd_none(*pmdp)) + pmd_populate(NULL, pmdp, virt_to_page(fixmap_pte)); +} #endif /* CONFIG_SW64_KERNEL_PAGE_TABLE */ /* - * paging_init() sets up the memory map. + * early_paging_init sets up a temporary memory map. + */ +void __init early_paging_init(void) +{ +#ifdef CONFIG_SW64_KERNEL_PAGE_TABLE + unsigned long img_start, img_size; + unsigned long dtb_start, dtb_size = 0; + + img_start = (unsigned long)(KERNEL_START_PHYS + __START_KERNEL_map); + img_size = (unsigned long)_end - img_start; + dtb_start = sunway_dtb_address; + + fixmap_init(early_pg_dir); + +#ifdef CONFIG_SW64_RRK + early_create_pmd(early_pg_dir, (pud_t *)early_printk_pud, + (pmd_t *)early_printk_pmd, KERNEL_PRINTK_BUFF_BASE, + PRINTK_SIZE, __pa(KERNEL_PRINTK_BUFF_BASE)); +#endif + early_create_pmd(early_pg_dir, (pud_t *)early_pud, (pmd_t *)early_pmd, + img_start, img_size, __pa(img_start)); + if (dtb_start) { + dtb_size = (unsigned long)fdt_totalsize((void *)dtb_start); + early_create_pmd(early_pg_dir, (pud_t *)early_dtb_pud, + (pmd_t *)early_dtb_pmd, dtb_start, dtb_size, + __pa(dtb_start)); + } + update_ptbr_sys(virt_to_phys(early_pg_dir)); + + /* switch to paging mode */ + if (sunway_support_kpt) { + pr_info("SW64 kernel page table enabled\n"); + set_atc(ATC_PAGE); + } + + tbiv(); +#endif /* CONFIG_SW64_KERNEL_PAGE_TABLE */ +} + +/* + * paging_init() sets up the final memory map. */ void __init paging_init(void) { +#ifdef CONFIG_SW64_KERNEL_PAGE_TABLE + unsigned long sw64_vcpucb_start = PAGE_OFFSET + 0x20000; + unsigned long sw64_vcpucb_size = 0x60000; + unsigned long sw64_reserve_start = CONFIG_PHYSICAL_START + PAGE_OFFSET; + unsigned long sw64_reserve_size = (unsigned long)_stext - sw64_reserve_start; + unsigned long text_start = (unsigned long)_stext; + unsigned long text_size = (unsigned long)_etext - text_start; + unsigned long ro_start = (unsigned long)__start_rodata; + unsigned long ro_size = (unsigned long)__init_begin - ro_start; + unsigned long init_start = (unsigned long)__init_begin; + unsigned long init_size = (unsigned long)__init_end - init_start; + unsigned long data_start = (unsigned long)_sdata; + unsigned long data_size = (unsigned long)_end - data_start; + pgd_t *pgdir = (&init_mm)->pgd; + phys_addr_t start, end; + u64 i; + + fixmap_init(pgdir); + + create_pgd_mapping(pgdir, sw64_vcpucb_start, __pa(sw64_vcpucb_start), + sw64_vcpucb_size, PAGE_KERNEL_NOEXEC, pgtable_alloc_fixmap); +#ifdef CONFIG_SW64_RRU + create_pgd_mapping(pgdir, USER_PRINT_BUFF_BASE, __pa(USER_PRINT_BUFF_BASE), + USER_PRINT_BUFF_LEN, PAGE_KERNEL_NOEXEC, + pgtable_alloc_fixmap); +#endif +#ifdef CONFIG_SW64_RRK + create_pgd_mapping(pgdir, KERNEL_PRINTK_BUFF_BASE, __pa(KERNEL_PRINTK_BUFF_BASE), + PRINTK_SIZE, PAGE_KERNEL_NOEXEC, pgtable_alloc_fixmap); +#endif + create_pgd_mapping(pgdir, sw64_reserve_start, __pa(sw64_reserve_start), + sw64_reserve_size, PAGE_KERNEL_NOEXEC, + pgtable_alloc_fixmap); + create_pgd_mapping(pgdir, text_start, __pa(text_start), text_size, + PAGE_KERNEL_READONLY_EXEC, pgtable_alloc_fixmap); + create_pgd_mapping(pgdir, ro_start, __pa(ro_start), ro_size, + PAGE_KERNEL_NOEXEC, pgtable_alloc_fixmap); + create_pgd_mapping(pgdir, init_start, __pa(init_start), init_size, + PAGE_KERNEL, pgtable_alloc_fixmap); + create_pgd_mapping(pgdir, data_start, __pa(data_start), data_size, + PAGE_KERNEL_NOEXEC, pgtable_alloc_fixmap); + + memblock_mark_nomap(__pa(sw64_reserve_start), + __pa((unsigned long)_end - sw64_reserve_start)); + for_each_mem_range(i, &start, &end) { + if (start >= end) + break; + create_pgd_mapping(pgdir, (unsigned long)__va(start), + (unsigned long)start, + (unsigned long)(end - start), + PAGE_KERNEL_NOEXEC, pgtable_alloc_fixmap); + } + memblock_clear_nomap(__pa(sw64_reserve_start), + __pa((unsigned long)_end - sw64_reserve_start)); +#endif /* CONFIG_SW64_KERNEL_PAGE_TABLE */ } static void __init setup_socket_info(void) -- Gitee From c4f03809aa58f2c9574adf804817c73d07a67971 Mon Sep 17 00:00:00 2001 From: Gao Chen Date: Tue, 22 Jul 2025 10:54:30 +0800 Subject: [PATCH 156/231] sw64: modify codes with fixmap Create a temporary fixmap mapping for a text page, modify through this mapping, then remove the mapping. Signed-off-by: Gao Chen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/include/asm/insn.h | 3 ++ arch/sw_64/kernel/ftrace.c | 16 ++++---- arch/sw_64/kernel/insn.c | 67 +++++++++++++++++++++++++++------- arch/sw_64/kernel/jump_label.c | 4 ++ 4 files changed, 67 insertions(+), 23 deletions(-) diff --git a/arch/sw_64/include/asm/insn.h b/arch/sw_64/include/asm/insn.h index 7bd24d6f6598..901df28338c5 100644 --- a/arch/sw_64/include/asm/insn.h +++ b/arch/sw_64/include/asm/insn.h @@ -56,6 +56,9 @@ extern unsigned int sw64_insn_nop(void); extern unsigned int sw64_insn_call(unsigned int ra, unsigned int rb); extern unsigned int sw64_insn_sys_call(unsigned int num); extern unsigned int sw64_insn_br(unsigned int ra, unsigned long pc, unsigned long new_pc); +#ifdef CONFIG_SW64_KERNEL_PAGE_TABLE +extern int sw64_patch_text_nosync(void *addr, u32 insn); +#endif #define SW64_OPCODE_RA(opcode) ((opcode >> 21) & 0x1f) diff --git a/arch/sw_64/kernel/ftrace.c b/arch/sw_64/kernel/ftrace.c index 84ba10d93c9f..2f36ef89df70 100644 --- a/arch/sw_64/kernel/ftrace.c +++ b/arch/sw_64/kernel/ftrace.c @@ -77,10 +77,9 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) insn[1] = SW64_CALL(R28, R28, 0); insn[2] = SW64_NOP; - *((u32 *)pc) = insn[0]; - mb(); - *((u32 *)(pc + 4)) = insn[1]; - *((u32 *)(pc + 8)) = insn[2]; + ftrace_modify_code(pc, insn[0]); + ftrace_modify_code(pc + 4, insn[1]); + ftrace_modify_code(pc + 8, insn[2]); return 0; } @@ -94,10 +93,9 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long pc = rec->ip + MCOUNT_LDGP_SIZE; unsigned int insn[3] = {SW64_NOP, SW64_NOP, SW64_NOP}; - *((u32 *)(pc + 8)) = insn[2]; - *((u32 *)(pc + 4)) = insn[1]; - mb(); - *((u32 *)pc) = insn[0]; + ftrace_modify_code(pc + 8, insn[2]); + ftrace_modify_code(pc + 4, insn[1]); + ftrace_modify_code(pc, insn[0]); return 0; } @@ -135,7 +133,7 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, /* ldl r28,(ftrace_addr_offset)(r8) */ insn[0] = (0x23U << 26) | (28U << 21) | (8U << 16) | offset; - copy_to_kernel_nofault((void *)pc, insn, SW64_INSN_SIZE); + ftrace_modify_code(pc, insn[0]); return 0; } diff --git a/arch/sw_64/kernel/insn.c b/arch/sw_64/kernel/insn.c index 281578e1bfc0..1ef13ff18f27 100644 --- a/arch/sw_64/kernel/insn.c +++ b/arch/sw_64/kernel/insn.c @@ -17,7 +17,38 @@ #include #include -//static DEFINE_RAW_SPINLOCK(patch_lock); +#include + +#ifdef CONFIG_SW64_KERNEL_PAGE_TABLE +static DEFINE_RAW_SPINLOCK(patch_lock); + +static void __kprobes *patch_map(void *addr, int fixmap) +{ + unsigned long uintaddr = (uintptr_t)addr; + struct page *page; + + if (core_kernel_text((unsigned long)addr)) + page = pfn_to_page(PHYS_PFN(__pa(addr))); + else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) + page = vmalloc_to_page(addr); + else + return addr; + + BUG_ON(!page); + return (void *)set_fixmap_offset(fixmap, page_to_pa(page) + + (uintaddr & ~PAGE_MASK)); +} + +static void __kprobes patch_unmap(int fixmap) +{ + clear_fixmap(fixmap); +} + +int __kprobes sw64_patch_text_nosync(void *addr, u32 insn) +{ + return sw64_insn_write(addr, insn); +} +#endif /* CONFIG_SW64_KERNEL_PAGE_TABLE */ int __kprobes sw64_insn_read(void *addr, u32 *insnp) { @@ -33,30 +64,37 @@ int __kprobes sw64_insn_read(void *addr, u32 *insnp) static int __kprobes __sw64_insn_write(void *addr, __le32 insn) { - void *waddr = addr; int ret; +#ifdef CONFIG_SW64_KERNEL_PAGE_TABLE + void *waddr; + unsigned long flags = 0; - //raw_spin_lock_irqsave(&patch_lock, flags); - + raw_spin_lock_irqsave(&patch_lock, flags); + waddr = patch_map(addr, FIX_TEXT_POKE0); ret = copy_to_kernel_nofault(waddr, &insn, SW64_INSN_SIZE); - - //raw_spin_unlock_irqrestore(&patch_lock, flags); - + patch_unmap(FIX_TEXT_POKE0); + raw_spin_unlock_irqrestore(&patch_lock, flags); +#else + ret = copy_to_kernel_nofault(addr, &insn, SW64_INSN_SIZE); +#endif return ret; } static int __kprobes __sw64_insn_double_write(void *addr, __le64 insn) { - void *waddr = addr; - //unsigned long flags = 0; int ret; +#ifdef CONFIG_SW64_KERNEL_PAGE_TABLE + void *waddr; + unsigned long flags = 0; - //raw_spin_lock_irqsave(&patch_lock, flags); - + raw_spin_lock_irqsave(&patch_lock, flags); + waddr = patch_map(addr, FIX_TEXT_POKE0); ret = copy_to_kernel_nofault(waddr, &insn, 2 * SW64_INSN_SIZE); - - //raw_spin_unlock_irqrestore(&patch_lock, flags); - + patch_unmap(FIX_TEXT_POKE0); + raw_spin_unlock_irqrestore(&patch_lock, flags); +#else + ret = copy_to_kernel_nofault(addr, &insn, 2 * SW64_INSN_SIZE); +#endif return ret; } @@ -77,6 +115,7 @@ int __kprobes sw64_insn_double_write(void *addr, u64 insn) return -EINVAL; return __sw64_insn_double_write(addr, cpu_to_le64(insn)); } + unsigned int __kprobes sw64_insn_nop(void) { return SW64_BIS(R31, R31, R31); diff --git a/arch/sw_64/kernel/jump_label.c b/arch/sw_64/kernel/jump_label.c index f3bc40370e4d..4e448dea4221 100644 --- a/arch/sw_64/kernel/jump_label.c +++ b/arch/sw_64/kernel/jump_label.c @@ -18,7 +18,11 @@ void arch_jump_label_transform(struct jump_entry *entry, insn = sw64_insn_nop(); } +#ifdef CONFIG_SW64_KERNEL_PAGE_TABLE + sw64_patch_text_nosync(insnp, insn); +#else *insnp = insn; +#endif flush_icache_range(entry->code, entry->code + SW64_INSN_SIZE); } -- Gitee From c55fe751b434e262f77656a475478ebd1ca609ff Mon Sep 17 00:00:00 2001 From: Gao Chen Date: Tue, 22 Jul 2025 15:53:00 +0800 Subject: [PATCH 157/231] sw64: set protection for kernel page table Use mark_rodata_ro() to configure distinct page permissions for rodata section in the kernel page table. Signed-off-by: Gao Chen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/Kconfig | 4 +- arch/sw_64/include/asm/set_memory.h | 13 +++ arch/sw_64/mm/Makefile | 1 + arch/sw_64/mm/init.c | 12 +++ arch/sw_64/mm/pageattr.c | 128 ++++++++++++++++++++++++++++ 5 files changed, 157 insertions(+), 1 deletion(-) create mode 100644 arch/sw_64/include/asm/set_memory.h create mode 100644 arch/sw_64/mm/pageattr.c diff --git a/arch/sw_64/Kconfig b/arch/sw_64/Kconfig index f1438771a6d2..620699b359b2 100644 --- a/arch/sw_64/Kconfig +++ b/arch/sw_64/Kconfig @@ -17,7 +17,9 @@ config SW64 select ARCH_HAS_PMEM_API select ARCH_HAS_PTE_DEVMAP select ARCH_HAS_PTE_SPECIAL + select ARCH_HAS_SET_MEMORY if SW64_KERNEL_PAGE_TABLE select ARCH_HAS_SG_CHAIN + select ARCH_HAS_STRICT_KERNEL_RWX if SW64_KERNEL_PAGE_TABLE select ARCH_HAS_UACCESS_FLUSHCACHE select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_HAS_ZONE_DEVICE @@ -67,7 +69,7 @@ config SW64 select GENERIC_ARCH_TOPOLOGY select GENERIC_CLOCKEVENTS select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO - select GENERIC_IOREMAP if SUBARCH_C4 + select GENERIC_IOREMAP if SW64_KERNEL_PAGE_TABLE select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP select GENERIC_IRQ_LEGACY select GENERIC_IRQ_MIGRATION if SMP diff --git a/arch/sw_64/include/asm/set_memory.h b/arch/sw_64/include/asm/set_memory.h new file mode 100644 index 000000000000..6429975a7ad7 --- /dev/null +++ b/arch/sw_64/include/asm/set_memory.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_SET_MEMORY_H +#define _ASM_SW64_SET_MEMORY_H + +#ifdef CONFIG_SW64_KERNEL_PAGE_TABLE +int set_memory_ro(unsigned long addr, int numpages); +int set_memory_rw(unsigned long addr, int numpages); +int set_memory_x(unsigned long addr, int numpages); +int set_memory_nx(unsigned long addr, int numpages); +#endif + +#endif /* _ASM_SW64_SET_MEMORY_H */ diff --git a/arch/sw_64/mm/Makefile b/arch/sw_64/mm/Makefile index 8b9d6e4d2ebf..1f6cd3965633 100644 --- a/arch/sw_64/mm/Makefile +++ b/arch/sw_64/mm/Makefile @@ -7,6 +7,7 @@ obj-y := init.o fault.o physaddr.o mmap.o extable.o +obj-$(CONFIG_SW64_KERNEL_PAGE_TABLE) += pageattr.o obj-$(CONFIG_NUMA) += numa.o ifeq ($(CONFIG_SUBARCH_C4),y) obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage_c4.o diff --git a/arch/sw_64/mm/init.c b/arch/sw_64/mm/init.c index 8e8b14fcaea9..433b5be03573 100644 --- a/arch/sw_64/mm/init.c +++ b/arch/sw_64/mm/init.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -511,6 +512,17 @@ void __init paging_init(void) #endif /* CONFIG_SW64_KERNEL_PAGE_TABLE */ } +#ifdef CONFIG_STRICT_KERNEL_RWX +void mark_rodata_ro(void) +{ + unsigned long ro_start = (unsigned long)__start_rodata; + unsigned long ro_size = (unsigned long)__init_begin - ro_start; + + if (sunway_support_kpt) + set_memory_ro(ro_start, PAGE_ALIGN(ro_size) >> PAGE_SHIFT); +} +#endif + static void __init setup_socket_info(void) { int i; diff --git a/arch/sw_64/mm/pageattr.c b/arch/sw_64/mm/pageattr.c new file mode 100644 index 000000000000..2949fa7b42dd --- /dev/null +++ b/arch/sw_64/mm/pageattr.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include + +#include +#include + +struct pageattr_masks { + pgprot_t set_mask; + pgprot_t clear_mask; +}; + +static unsigned long +set_pageattr_masks(unsigned long val, struct mm_walk *walk) +{ + struct pageattr_masks *masks = walk->private; + unsigned long new_val = val; + + new_val &= ~(pgprot_val(masks->clear_mask)); + new_val |= (pgprot_val(masks->set_mask)); + + return new_val; +} + +static int pageattr_pud_entry(pud_t *pud, unsigned long addr, + unsigned long next, struct mm_walk *walk) +{ + pud_t val = READ_ONCE(*pud); + + if (pud_huge(val)) { + val = __pud(set_pageattr_masks(pud_val(val), walk)); + set_pud(pud, val); + } + + return 0; +} + +static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr, + unsigned long next, struct mm_walk *walk) +{ + pmd_t val = READ_ONCE(*pmd); + + if (pmd_huge(val)) { + val = __pmd(set_pageattr_masks(pmd_val(val), walk)); + set_pmd(pmd, val); + } + + return 0; +} + +static int pageattr_pte_entry(pte_t *pte, unsigned long addr, + unsigned long next, struct mm_walk *walk) +{ + pte_t val = READ_ONCE(*pte); + + val = __pte(set_pageattr_masks(pte_val(val), walk)); + set_pte(pte, val); + + return 0; +} + +static const struct mm_walk_ops pageattr_ops = { + .pud_entry = pageattr_pud_entry, + .pmd_entry = pageattr_pmd_entry, + .pte_entry = pageattr_pte_entry, +}; + +static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, + pgprot_t clear_mask) +{ + int ret; + unsigned long start = addr; + unsigned long end = start + PAGE_SIZE * numpages; + struct pageattr_masks masks = { + .set_mask = set_mask, + .clear_mask = clear_mask + }; + + if (!numpages) + return 0; + + mmap_read_lock(&init_mm); + ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL, + &masks); + mmap_read_unlock(&init_mm); + + local_flush_tlb_all(); + + return ret; +} + +int set_memory_ro(unsigned long addr, int numpages) +{ + if (sunway_support_kpt) + return __set_memory(addr, numpages, __pgprot(_PAGE_FOW), + __pgprot(_PAGE_FOR)); + else + return 0; +} + +int set_memory_rw(unsigned long addr, int numpages) +{ + if (sunway_support_kpt) + return __set_memory(addr, numpages, __pgprot(0), + __pgprot(_PAGE_FOR | _PAGE_FOW)); + else + return 0; +} + +int set_memory_x(unsigned long addr, int numpages) +{ + if (sunway_support_kpt) + return __set_memory(addr, numpages, __pgprot(0), + __pgprot(_PAGE_FOE)); + else + return 0; +} + +int set_memory_nx(unsigned long addr, int numpages) +{ + if (sunway_support_kpt) + return __set_memory(addr, numpages, __pgprot(_PAGE_FOE), + __pgprot(0)); + else + return 0; +} -- Gitee From fbaa5e7b58550004ceff24df202658c2ffffb61a Mon Sep 17 00:00:00 2001 From: Gao Chen Date: Thu, 18 Sep 2025 11:28:12 +0800 Subject: [PATCH 158/231] sw64: setup mapping for efi_mm Create page table mappings in efi_mm for all efi memory descs. During efi_call_virt, first switch to efi_mm to perform the callback, then switch back to init_mm. Signed-off-by: Gao Chen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/include/asm/efi.h | 11 +++++ arch/sw_64/kernel/efi.c | 43 +++++++++++++++++++ drivers/firmware/efi/sunway-runtime.c | 59 ++++++++++++++++++++++++++- 3 files changed, 112 insertions(+), 1 deletion(-) diff --git a/arch/sw_64/include/asm/efi.h b/arch/sw_64/include/asm/efi.h index f2a470127c47..63d1d3d32c50 100644 --- a/arch/sw_64/include/asm/efi.h +++ b/arch/sw_64/include/asm/efi.h @@ -3,6 +3,9 @@ #ifndef _ASM_SW64_EFI_H #define _ASM_SW64_EFI_H +#include + +#include #include #ifdef CONFIG_EFI @@ -21,8 +24,16 @@ extern unsigned long sunway_bios_version; #define sunway_bios_version (0) #endif /* CONFIG_EFI */ +#ifdef CONFIG_SW64_KERNEL_PAGE_TABLE +int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md); +#define arch_efi_call_virt_setup() efi_virtmap_load() +#define arch_efi_call_virt_teardown() efi_virtmap_unload() +void efi_virtmap_load(void); +void efi_virtmap_unload(void); +#else #define arch_efi_call_virt_setup() #define arch_efi_call_virt_teardown() +#endif #define ARCH_EFI_IRQ_FLAGS_MASK 0x00000001 diff --git a/arch/sw_64/kernel/efi.c b/arch/sw_64/kernel/efi.c index 7955fa840a43..ac6d8dd42a71 100644 --- a/arch/sw_64/kernel/efi.c +++ b/arch/sw_64/kernel/efi.c @@ -2,8 +2,10 @@ #include #include +#include #include +#include bool efi_poweroff_required(void) { @@ -17,3 +19,44 @@ bool efi_poweroff_required(void) return efi_enabled(EFI_RUNTIME_SERVICES); } + +#ifdef CONFIG_SW64_KERNEL_PAGE_TABLE +static __init pgprot_t create_mapping_protection(efi_memory_desc_t *md) +{ + switch (md->type) { + case EFI_RUNTIME_SERVICES_CODE: + case EFI_PAL_CODE: + return PAGE_KERNEL_READONLY_EXEC; + case EFI_RESERVED_TYPE: + case EFI_RUNTIME_SERVICES_DATA: + case EFI_UNUSABLE_MEMORY: + case EFI_ACPI_MEMORY_NVS: + case EFI_MEMORY_MAPPED_IO: + case EFI_MEMORY_MAPPED_IO_PORT_SPACE: + case EFI_MAX_MEMORY_TYPE: + return PAGE_KERNEL_NOEXEC; + default: + break; + } + + return PAGE_KERNEL; +} + +static void * __init pgtable_alloc_late(void) +{ + return (void *)__get_free_page(GFP_KERNEL); +} + +int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) +{ + pgprot_t prot = create_mapping_protection(md); + unsigned long start, size; + + start = (unsigned long)__va(md->phys_addr); + size = (unsigned long)(md->num_pages << EFI_PAGE_SHIFT); + create_pgd_mapping(mm->pgd, start, (unsigned long)md->phys_addr, size, + prot, pgtable_alloc_late); + + return 0; +} +#endif diff --git a/drivers/firmware/efi/sunway-runtime.c b/drivers/firmware/efi/sunway-runtime.c index 6bd96cff7d5d..489792adc6b1 100644 --- a/drivers/firmware/efi/sunway-runtime.c +++ b/drivers/firmware/efi/sunway-runtime.c @@ -29,6 +29,57 @@ #include #include #include +#include + +#ifdef CONFIG_SW64_KERNEL_PAGE_TABLE + +DEFINE_PER_CPU(unsigned long, atc_state); + +static bool __init efi_virtmap_init(void) +{ + efi_memory_desc_t *md; + + efi_mm.pgd = pgd_alloc(&efi_mm); + memcpy(efi_mm.pgd + USER_PTRS_PER_PGD, + init_mm.pgd + USER_PTRS_PER_PGD, + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + mm_init_cpumask(&efi_mm); + init_new_context(NULL, &efi_mm); + + for_each_efi_memory_desc(md) { + phys_addr_t phys = md->phys_addr; + int ret; + + if (!(md->attribute & EFI_MEMORY_RUNTIME)) + continue; + + ret = efi_create_mapping(&efi_mm, md); + if (ret) { + pr_warn("EFI remap %pa: failed to create mapping (%d)\n", + &phys, ret); + return false; + } + } + + return true; +} + +void efi_virtmap_load(void) +{ + preempt_disable(); + update_ptbr_sys(virt_to_phys(efi_mm.pgd)); + /* switch CSR_ATC for bios compatibility */ + this_cpu_write(atc_state, get_atc()); + set_atc(ATC_KSEG); +} + +void efi_virtmap_unload(void) +{ + set_atc(this_cpu_read(atc_state)); + update_ptbr_sys(virt_to_phys(init_mm.pgd)); + preempt_enable(); +} +#endif /* CONFIG_SW64_KERNEL_PAGE_TABLE */ /* * Enable the UEFI Runtime Services if all prerequisites are in place, i.e., @@ -63,6 +114,13 @@ static int __init sunway_enable_runtime_services(void) return 0; } +#ifdef CONFIG_SW64_KERNEL_PAGE_TABLE + if (!efi_virtmap_init()) { + pr_err("UEFI virtual mapping missing or invalid -- runtime services will not be available\n"); + return -ENOMEM; + } +#endif + /* Set up runtime services function pointers */ efi_native_runtime_setup(); set_bit(EFI_RUNTIME_SERVICES, &efi.flags); @@ -71,7 +129,6 @@ static int __init sunway_enable_runtime_services(void) } early_initcall(sunway_enable_runtime_services); - static int __init sunway_dmi_init(void) { /* -- Gitee From 01beecb44da94eb44fe8cf9c194e49fdf03b034a Mon Sep 17 00:00:00 2001 From: Gao Chen Date: Wed, 23 Jul 2025 09:35:20 +0800 Subject: [PATCH 159/231] sw64: reset ATC for kvm Reset vcb.atc to "3" when kvm restarts. Signed-off-by: Gao Chen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/kvm/sw64.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/sw_64/kvm/sw64.c b/arch/sw_64/kvm/sw64.c index e9fe07cce1f1..90031e61b095 100644 --- a/arch/sw_64/kvm/sw64.c +++ b/arch/sw_64/kvm/sw64.c @@ -481,6 +481,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) if (vcpu->arch.restart == 1) { /* handle reset vCPU */ vcpu->arch.regs.pc = GUEST_RESET_PC; +#ifdef CONFIG_SUBARCH_C4 + vcpu->arch.vcb.atc = 3; +#endif vcpu->arch.restart = 0; } -- Gitee From 940734b40feb7c479ddcf9d86c53276f06182fb2 Mon Sep 17 00:00:00 2001 From: Gao Chen Date: Wed, 23 Jul 2025 11:02:58 +0800 Subject: [PATCH 160/231] sw64: setup early_ioremap Select GENERIC_EARLY_IOREMAP for C4, which map IO space by fixmap. Signed-off-by: Gao Chen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/Kconfig | 1 + arch/sw_64/include/asm/Kbuild | 1 + arch/sw_64/include/asm/dmi.h | 4 ++-- arch/sw_64/include/asm/early_ioremap.h | 8 ++++++++ arch/sw_64/kernel/setup.c | 8 ++++++++ 5 files changed, 20 insertions(+), 2 deletions(-) diff --git a/arch/sw_64/Kconfig b/arch/sw_64/Kconfig index 620699b359b2..db5ebfe253be 100644 --- a/arch/sw_64/Kconfig +++ b/arch/sw_64/Kconfig @@ -68,6 +68,7 @@ config SW64 select EDAC_SUPPORT select GENERIC_ARCH_TOPOLOGY select GENERIC_CLOCKEVENTS + select GENERIC_EARLY_IOREMAP if SW64_KERNEL_PAGE_TABLE select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO select GENERIC_IOREMAP if SW64_KERNEL_PAGE_TABLE select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP diff --git a/arch/sw_64/include/asm/Kbuild b/arch/sw_64/include/asm/Kbuild index bbd410bda0a4..beeffdb0188d 100644 --- a/arch/sw_64/include/asm/Kbuild +++ b/arch/sw_64/include/asm/Kbuild @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 +generic-$(CONFIG_SW64_KERNEL_PAGE_TABLE) += early_ioremap.h generic-y += clkdev.h generic-y += export.h generic-y += mcs_spinlock.h diff --git a/arch/sw_64/include/asm/dmi.h b/arch/sw_64/include/asm/dmi.h index 05e80c9a3a76..6cf70a01475d 100644 --- a/arch/sw_64/include/asm/dmi.h +++ b/arch/sw_64/include/asm/dmi.h @@ -23,8 +23,8 @@ /* Use early IO mappings for DMI because it's initialized early */ #define dmi_early_remap(x, l) early_ioremap(x, l) #define dmi_early_unmap(x, l) early_iounmap(x, l) -#define dmi_remap(x, l) early_ioremap(x, l) -#define dmi_unmap(x) early_iounmap(x, 0) +#define dmi_remap(x, l) ioremap(x, l) +#define dmi_unmap(x) iounmap(x) #define dmi_alloc(l) kzalloc(l, GFP_KERNEL) #endif /* _ASM_SW64_DMI_H */ diff --git a/arch/sw_64/include/asm/early_ioremap.h b/arch/sw_64/include/asm/early_ioremap.h index 172b96a401cb..dd57d34b26f4 100644 --- a/arch/sw_64/include/asm/early_ioremap.h +++ b/arch/sw_64/include/asm/early_ioremap.h @@ -5,6 +5,8 @@ #include #include +#ifndef CONFIG_GENERIC_EARLY_IOREMAP + static inline void __iomem * early_ioremap(unsigned long phys_addr, unsigned long size) { @@ -26,4 +28,10 @@ static inline void early_iounmap(volatile void __iomem *addr, unsigned long size } #define early_memunmap(addr, size) early_iounmap(addr, size) +#else + +#include + +#endif /* !CONFIG_GENERIC_EARLY_IOREMAP */ + #endif /* _ASM_SW64_EARLY_IOREMAP_H */ diff --git a/arch/sw_64/kernel/setup.c b/arch/sw_64/kernel/setup.c index 8cb5e97fc280..f06204022c9f 100644 --- a/arch/sw_64/kernel/setup.c +++ b/arch/sw_64/kernel/setup.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -676,6 +677,9 @@ setup_arch(char **cmdline_p) trap_init(); early_paging_init(); +#ifdef CONFIG_GENERIC_EARLY_IOREMAP + early_ioremap_setup(); +#endif setup_cpu_features(); @@ -737,6 +741,10 @@ setup_arch(char **cmdline_p) if (acpi_disabled) device_tree_init(); +#ifdef CONFIG_GENERIC_EARLY_IOREMAP + early_ioremap_reset(); +#endif + setup_smp(); sw64_numa_init(); -- Gitee From 9d6a671bbada45296e8146ae5afde2177d24ef4b Mon Sep 17 00:00:00 2001 From: Gao Chen Date: Mon, 13 Oct 2025 15:07:49 +0800 Subject: [PATCH 161/231] sw64: map legacy I/O to K segment Map legacy I/O regions for LPC and PCI into the K segment to allow access via __va(). Signed-off-by: Gao Chen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/mm/init.c | 31 +++++++++++++++++++++++++++++ drivers/pci/controller/pci-sunway.c | 4 ++++ 2 files changed, 35 insertions(+) diff --git a/arch/sw_64/mm/init.c b/arch/sw_64/mm/init.c index 433b5be03573..b61b29f276b7 100644 --- a/arch/sw_64/mm/init.c +++ b/arch/sw_64/mm/init.c @@ -408,6 +408,35 @@ static void __init fixmap_init(pgd_t *pgdir) if (pmd_none(*pmdp)) pmd_populate(NULL, pmdp, virt_to_page(fixmap_pte)); } + +/* + * Map legacy io to K segmemt in advance. + */ +extern unsigned long legacy_io_base; +extern unsigned long legacy_io_shift; +static void __init map_legacy_io(pgd_t *pgdir) +{ + unsigned long pci_io_start; + unsigned long lpc_legacy_io_start = LPC_LEGACY_IO; + unsigned long legacy_io_start = legacy_io_base; + unsigned long size = 0x10000; + unsigned long i, j; + pgprot_t prot_none; + + prot_none = __pgprot(pgprot_val(PAGE_KERNEL_READONLY) | _PAGE_FOW); + + for (i = 0; i < 2; i++) { + for (j = 0; j < 6; j++) { + pci_io_start = SW64_PCI_IO_BASE(i, j) | PCI_LEGACY_IO; + create_pgd_mapping(pgdir, (unsigned long)__va(pci_io_start), pci_io_start, + size, prot_none, pgtable_alloc_fixmap); + } + } + create_pgd_mapping(pgdir, (unsigned long)__va(legacy_io_start), legacy_io_start, + size << legacy_io_shift, PAGE_KERNEL_NOEXEC, pgtable_alloc_fixmap); + create_pgd_mapping(pgdir, (unsigned long)__va(lpc_legacy_io_start), lpc_legacy_io_start, + size, PAGE_KERNEL_NOEXEC, pgtable_alloc_fixmap); +} #endif /* CONFIG_SW64_KERNEL_PAGE_TABLE */ /* @@ -474,6 +503,8 @@ void __init paging_init(void) fixmap_init(pgdir); + map_legacy_io(pgdir); + create_pgd_mapping(pgdir, sw64_vcpucb_start, __pa(sw64_vcpucb_start), sw64_vcpucb_size, PAGE_KERNEL_NOEXEC, pgtable_alloc_fixmap); #ifdef CONFIG_SW64_RRU diff --git a/drivers/pci/controller/pci-sunway.c b/drivers/pci/controller/pci-sunway.c index d8fc6b280120..7225f6f48903 100644 --- a/drivers/pci/controller/pci-sunway.c +++ b/drivers/pci/controller/pci-sunway.c @@ -6,6 +6,7 @@ #include #include #include +#include #include @@ -865,6 +866,9 @@ static int pci_prepare_controller(struct pci_controller *hose, hose->sparse_io_base = 0; hose->dense_mem_base = props[PROP_PCIE_IO_BASE]; hose->dense_io_base = props[PROP_EP_IO_BASE]; +#ifdef CONFIG_SW64_KERNEL_PAGE_TABLE + set_memory_rw((unsigned long)__va(hose->dense_io_base), 0x10000 >> PAGE_SHIFT); +#endif if (!is_guest_or_emul()) { hose->rc_config_space_base = ioremap(props[PROP_RC_CONFIG_BASE], SUNWAY_RC_SIZE); -- Gitee From 34f7286a904f6a384b1a4629b7315a4088716f74 Mon Sep 17 00:00:00 2001 From: Lei Yilong Date: Thu, 16 Oct 2025 13:42:02 +0800 Subject: [PATCH 162/231] sw64: use ioremap to map IO address Readq is no longer used in function get_vt_smp_info(). Use ioremap to map the IO physical address before accessing the IO virtual address. Signed-off-by: Lei Yilong Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/kernel/topology.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/sw_64/kernel/topology.c b/arch/sw_64/kernel/topology.c index 76b80593a9d1..605a3e73953b 100644 --- a/arch/sw_64/kernel/topology.c +++ b/arch/sw_64/kernel/topology.c @@ -22,13 +22,16 @@ static void __init get_vt_smp_info(void) { unsigned long smp_info; void __iomem *spbu_base = misc_platform_get_spbu_base(0); + unsigned long *smp_info_addr; - smp_info = readq(spbu_base + OFFSET_SMP_INFO); + smp_info_addr = ioremap((phys_addr_t)(__pa(spbu_base) + OFFSET_SMP_INFO), 0x80); + smp_info = *smp_info_addr; if (smp_info == -1UL) smp_info = 0; topo_nr_threads = (smp_info >> VT_THREADS_SHIFT) & VT_THREADS_MASK; topo_nr_cores = (smp_info >> VT_CORES_SHIFT) & VT_CORES_MASK; topo_nr_maxcpus = (smp_info >> VT_MAX_CPUS_SHIFT) & VT_MAX_CPUS_MASK; + iounmap(smp_info_addr); } static void __init init_topo_threads(void) -- Gitee From 3f274152ef85a47e5c62c7e038465b17b2368d05 Mon Sep 17 00:00:00 2001 From: Gu Yuchen Date: Fri, 31 Oct 2025 17:41:27 +0800 Subject: [PATCH 163/231] sw64: move non-boot core's kernel page table enabling logic to hmcode Currently, the enabling of non-boot core's kernel page table is in smp_callin. Accessing __smp_callin can result in a PTINVALID error because kernel page table is not enabled for non-boot cores. Move non-boot core's kernel page table enabling logic before calling __smp_callin to hmcode, so that non-boot core is already running in kernel page table mode when jumping to __smp_callin. Signed-off-by: Gu Yuchen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/include/asm/smp.h | 1 + arch/sw_64/kernel/smp.c | 14 ++++++-------- arch/sw_64/mm/init.c | 5 +++++ 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/arch/sw_64/include/asm/smp.h b/arch/sw_64/include/asm/smp.h index 0b1ebf2143f2..cdd9b29a0792 100644 --- a/arch/sw_64/include/asm/smp.h +++ b/arch/sw_64/include/asm/smp.h @@ -35,6 +35,7 @@ struct smp_rcb_struct { unsigned long ready; unsigned long init_done; unsigned long feat_vint; + unsigned long ptbr; }; extern bool __init is_rcid_duplicate(int rcid); diff --git a/arch/sw_64/kernel/smp.c b/arch/sw_64/kernel/smp.c index 34f9d0ebbc82..0c11735d39b1 100644 --- a/arch/sw_64/kernel/smp.c +++ b/arch/sw_64/kernel/smp.c @@ -186,14 +186,6 @@ void smp_callin(void) complete(&cpu_running); -#ifdef CONFIG_SW64_KERNEL_PAGE_TABLE - /* switch to paging mode */ - if (sunway_support_kpt) { - set_atc(ATC_PAGE); - tbiv(); - } -#endif - /* Must have completely accurate bogos. */ local_irq_enable(); @@ -281,6 +273,12 @@ void __init smp_rcb_init(struct smp_rcb_struct *smp_rcb_base_addr) /* Setup SMP_RCB fields that uses to activate secondary CPU */ smp_rcb->restart_entry = __smp_callin; smp_rcb->init_done = 0xDEADBEEFUL; +#ifdef CONFIG_SW64_KERNEL_PAGE_TABLE + if (sunway_support_kpt) { + smp_rcb->init_done = 0x2025DEADBEEFUL; + smp_rcb->ptbr = virt_to_phys(init_mm.pgd); + } +#endif mb(); } diff --git a/arch/sw_64/mm/init.c b/arch/sw_64/mm/init.c index b61b29f276b7..8bd24cdfeef4 100644 --- a/arch/sw_64/mm/init.c +++ b/arch/sw_64/mm/init.c @@ -497,6 +497,8 @@ void __init paging_init(void) unsigned long init_size = (unsigned long)__init_end - init_start; unsigned long data_start = (unsigned long)_sdata; unsigned long data_size = (unsigned long)_end - data_start; + unsigned long sw64_guest_reset_start = (unsigned long)(__va(0x10000)); + unsigned long sw64_guest_reset_size = PAGE_SIZE; pgd_t *pgdir = (&init_mm)->pgd; phys_addr_t start, end; u64 i; @@ -527,6 +529,9 @@ void __init paging_init(void) PAGE_KERNEL, pgtable_alloc_fixmap); create_pgd_mapping(pgdir, data_start, __pa(data_start), data_size, PAGE_KERNEL_NOEXEC, pgtable_alloc_fixmap); + if (is_in_guest()) + create_pgd_mapping(pgdir, sw64_guest_reset_start, __pa(sw64_guest_reset_start), sw64_guest_reset_size, + PAGE_KERNEL_NOEXEC, pgtable_alloc_fixmap); memblock_mark_nomap(__pa(sw64_reserve_start), __pa((unsigned long)_end - sw64_reserve_start)); -- Gitee From 890abcf72b1f92c7d650cc37b8a5249fe5178c53 Mon Sep 17 00:00:00 2001 From: Gu Yuchen Date: Fri, 31 Oct 2025 18:32:14 +0800 Subject: [PATCH 164/231] sw64: use ioremap to map IO address in functions like __get_cpu_nums() Use ioremap to map the IO physical address in functions like __get_cpu_nums() before accessing the IO virtual address. Signed-off-by: Gu Yuchen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/include/asm/uncore_io_ops_junzhang.h | 10 ++++++++-- arch/sw_64/kernel/smp.c | 10 ++++++++-- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/arch/sw_64/include/asm/uncore_io_ops_junzhang.h b/arch/sw_64/include/asm/uncore_io_ops_junzhang.h index 472c495ebcdf..74fe919f4528 100644 --- a/arch/sw_64/include/asm/uncore_io_ops_junzhang.h +++ b/arch/sw_64/include/asm/uncore_io_ops_junzhang.h @@ -11,13 +11,16 @@ static inline int __get_cpu_nums(void) int cpus; unsigned long cfg_info; void __iomem *spbu_base; + unsigned long *cfg_info_addr; spbu_base = misc_platform_get_spbu_base(0); - cfg_info = readq(spbu_base + OFFSET_CFG_INFO); + cfg_info_addr = ioremap((phys_addr_t)(__pa(spbu_base) + OFFSET_CFG_INFO), 0x8); + cfg_info = readq(cfg_info_addr); cfg_info = (cfg_info >> 33) & 0x3; cpus = 1 << cfg_info; + iounmap(cfg_info_addr); return cpus; } @@ -26,13 +29,16 @@ static inline unsigned long __get_node_mem(int node) unsigned long node_mem; unsigned long total_mem; void __iomem *spbu_base; + unsigned long *cfg_info_addr; spbu_base = misc_platform_get_spbu_base(node); - total_mem = readq(spbu_base + OFFSET_CFG_INFO) >> 3; + cfg_info_addr = ioremap((phys_addr_t)(__pa(spbu_base) + OFFSET_CFG_INFO), 0x8); + total_mem = readq(cfg_info_addr) >> 3; total_mem = (total_mem & 0xffff) << 28; node_mem = total_mem / __get_cpu_nums(); + iounmap(cfg_info_addr); return node_mem; } diff --git a/arch/sw_64/kernel/smp.c b/arch/sw_64/kernel/smp.c index 0c11735d39b1..10b063ae881b 100644 --- a/arch/sw_64/kernel/smp.c +++ b/arch/sw_64/kernel/smp.c @@ -68,6 +68,8 @@ static void upshift_freq(void) { int i, cpu_num; void __iomem *spbu_base; + unsigned long *clu_lv2_selh_addr; + unsigned long *clu_lv2_sell_addr; if (is_guest_or_emul()) return; @@ -78,10 +80,14 @@ static void upshift_freq(void) cpu_num = sw64_chip->get_cpu_num(); for (i = 0; i < cpu_num; i++) { spbu_base = misc_platform_get_spbu_base(i); - writeq(-1UL, spbu_base + OFFSET_CLU_LV2_SELH); - writeq(-1UL, spbu_base + OFFSET_CLU_LV2_SELL); + clu_lv2_selh_addr = ioremap((phys_addr_t)(__pa(spbu_base) + OFFSET_CLU_LV2_SELH), 0x8); + clu_lv2_sell_addr = ioremap((phys_addr_t)(__pa(spbu_base) + OFFSET_CLU_LV2_SELL), 0x8); + writeq(-1UL, clu_lv2_selh_addr); + writeq(-1UL, clu_lv2_sell_addr); udelay(1000); } + iounmap(clu_lv2_selh_addr); + iounmap(clu_lv2_sell_addr); } static void downshift_freq(void) -- Gitee From 9785e935d9bcc7ab21d6211b13ff0b5b89998a51 Mon Sep 17 00:00:00 2001 From: Gao Chen Date: Thu, 24 Jul 2025 16:27:44 +0800 Subject: [PATCH 165/231] sw64: enable kernel page table Check if we support sunway kernel page table by cpuid(). Signed-off-by: Gao Chen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/include/asm/cpu.h | 9 +++++++++ arch/sw_64/kernel/early_init.c | 1 + 2 files changed, 10 insertions(+) diff --git a/arch/sw_64/include/asm/cpu.h b/arch/sw_64/include/asm/cpu.h index 1697c2a3369e..8f0b7f3d31d8 100644 --- a/arch/sw_64/include/asm/cpu.h +++ b/arch/sw_64/include/asm/cpu.h @@ -25,6 +25,7 @@ enum hmcall_cpuid_cmd { #define CPU_FEAT_SIMD 0x2 #define CPU_FEAT_UNA 0x4 #define CPU_FEAT_VINT 0x8 +#define CPU_FEAT_KPT 0x10 #define CPU_FEAT_WP 0x20 enum sunway_cpu_model { @@ -73,4 +74,12 @@ static inline unsigned long get_cpu_freq(unsigned int cpu) extern bool sunway_support_kpt; +static inline void check_sunway_kpt_support(void) +{ + if (cpuid(GET_FEATURES, 0) & CPU_FEAT_KPT) + sunway_support_kpt = true; + else + sunway_support_kpt = false; +} + #endif /* _ASM_SW64_CPU_H */ diff --git a/arch/sw_64/kernel/early_init.c b/arch/sw_64/kernel/early_init.c index 8fb15a9e385c..d5e3d578211d 100644 --- a/arch/sw_64/kernel/early_init.c +++ b/arch/sw_64/kernel/early_init.c @@ -8,6 +8,7 @@ bool sunway_support_kpt; asmlinkage __visible void __init sw64_start_kernel(void) { + check_sunway_kpt_support(); fixup_hmcall(); save_ktp(); start_kernel(); -- Gitee From 05c4d0dad895893c2441c958daf158cede9ab81d Mon Sep 17 00:00:00 2001 From: Gao Chen Date: Mon, 26 Jan 2026 08:46:11 +0800 Subject: [PATCH 166/231] sw64: fix an inappropriate lock in __set_memory Using mmap_read_lock() in __set_memory is inappropriate, and this patch modifies it to mmap_write_lock(). Signed-off-by: Gao Chen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/mm/pageattr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/sw_64/mm/pageattr.c b/arch/sw_64/mm/pageattr.c index 2949fa7b42dd..0d55065fbe54 100644 --- a/arch/sw_64/mm/pageattr.c +++ b/arch/sw_64/mm/pageattr.c @@ -81,10 +81,10 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, if (!numpages) return 0; - mmap_read_lock(&init_mm); + mmap_write_lock(&init_mm); ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL, &masks); - mmap_read_unlock(&init_mm); + mmap_write_unlock(&init_mm); local_flush_tlb_all(); -- Gitee From bd75dcb3c6837fa776d6b941602c33f3508f2974 Mon Sep 17 00:00:00 2001 From: Gao Chen Date: Tue, 27 Jan 2026 13:41:03 +0800 Subject: [PATCH 167/231] sw64: adapt kernel page table for S4 Fix kernel page table adaptation for hibernation with the following adjustmemts: - PCI reserved space and memmap reserved space are not saved. - Use restore code page to prevent self-overwriting of the code during resume. - Reserve extra soft_csrs, gp, uts_version, header_entry and csr_atc in hibernate header. - Switch csr_atc to KSEG to ensure access permission during resume. Signed-off-by: Gao Chen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/Kconfig | 3 + arch/sw_64/include/asm/csr.h | 159 ++++++++++++++++++++++++++++++ arch/sw_64/include/asm/suspend.h | 2 + arch/sw_64/kernel/asm-offsets.c | 1 + arch/sw_64/kernel/hibernate.c | 104 ++++++++++++++++--- arch/sw_64/kernel/hibernate_asm.S | 20 ++-- arch/sw_64/kernel/setup.c | 22 +++++ 7 files changed, 292 insertions(+), 19 deletions(-) diff --git a/arch/sw_64/Kconfig b/arch/sw_64/Kconfig index db5ebfe253be..a6987ccf1949 100644 --- a/arch/sw_64/Kconfig +++ b/arch/sw_64/Kconfig @@ -702,6 +702,9 @@ config ARCH_HIBERNATION_POSSIBLE depends on SW64 def_bool y +config ARCH_HIBERNATION_HEADER + def_bool HIBERNATION + config SW64_POWERCAP bool "Sunway powercap driver" select IPMI_HANDLER diff --git a/arch/sw_64/include/asm/csr.h b/arch/sw_64/include/asm/csr.h index 39688acbda36..b879d15d9f80 100644 --- a/arch/sw_64/include/asm/csr.h +++ b/arch/sw_64/include/asm/csr.h @@ -82,6 +82,55 @@ #endif +#define SOFTCSR0 0xe0 +#define SOFTCSR1 0xe1 +#define SOFTCSR2 0xe2 +#define SOFTCSR3 0xe3 +#define SOFTCSR4 0xe4 +#define SOFTCSR5 0xe5 +#define SOFTCSR6 0xe6 +#define SOFTCSR7 0xe7 +#define SOFTCSR8 0xe8 +#define SOFTCSR9 0xe9 +#define SOFTCSR10 0xea +#define SOFTCSR11 0xeb +#define SOFTCSR12 0xec +#define SOFTCSR13 0xed +#define SOFTCSR14 0xee +#define SOFTCSR15 0xef +#define SOFTCSR16 0xf0 +#define SOFTCSR17 0xf1 +#define SOFTCSR18 0xf2 +#define SOFTCSR19 0xf3 +#define SOFTCSR20 0xf4 +#define SOFTCSR21 0xf5 +#define SOFTCSR22 0xf6 +#define SOFTCSR23 0xf7 +#define SOFTCSR24 0xf8 +#define SOFTCSR25 0xf9 +#define SOFTCSR26 0xfa +#define SOFTCSR27 0xfb +#define SOFTCSR28 0xfc +#define SOFTCSR29 0xfd +#define SOFTCSR30 0xfe +#define SOFTCSR31 0xff + +#define SOFTCSR32 0xd0 +#define SOFTCSR33 0xd1 +#define SOFTCSR34 0xd2 +#define SOFTCSR35 0xd3 +#define SOFTCSR36 0xd4 +#define SOFTCSR37 0xd5 +#define SOFTCSR38 0xd6 +#define SOFTCSR39 0xd7 +#define SOFTCSR40 0xd8 +#define SOFTCSR41 0xd9 +#define SOFTCSR42 0xda +#define SOFTCSR43 0xdb +#define SOFTCSR44 0xdc +#define SOFTCSR45 0xdd +#define SOFTCSR46 0xde +#define SOFTCSR47 0xdf #ifdef CONFIG_HAVE_CSRRW #ifndef __ASSEMBLY__ @@ -130,6 +179,116 @@ static inline void update_ptbr_sys(unsigned long ptbr) wrptbr(ptbr); } #endif +#endif /* CONFIG_HAVE_CSRRW */ + +#ifndef __ASSEMBLY__ +struct soft_csrs { + unsigned long sc[48]; +}; +static inline void save_all_soft_csrs(struct soft_csrs *sc) +{ + sc->sc[0] = sw64_read_csr(SOFTCSR0); + sc->sc[1] = sw64_read_csr(SOFTCSR1); + sc->sc[2] = sw64_read_csr(SOFTCSR2); + sc->sc[3] = sw64_read_csr(SOFTCSR3); + sc->sc[4] = sw64_read_csr(SOFTCSR4); + sc->sc[5] = sw64_read_csr(SOFTCSR5); + sc->sc[6] = sw64_read_csr(SOFTCSR6); + sc->sc[7] = sw64_read_csr(SOFTCSR7); + sc->sc[8] = sw64_read_csr(SOFTCSR8); + sc->sc[9] = sw64_read_csr(SOFTCSR9); + sc->sc[10] = sw64_read_csr(SOFTCSR10); + sc->sc[11] = sw64_read_csr(SOFTCSR11); + sc->sc[12] = sw64_read_csr(SOFTCSR12); + sc->sc[13] = sw64_read_csr(SOFTCSR13); + sc->sc[14] = sw64_read_csr(SOFTCSR14); + sc->sc[15] = sw64_read_csr(SOFTCSR15); + sc->sc[16] = sw64_read_csr(SOFTCSR16); + sc->sc[17] = sw64_read_csr(SOFTCSR17); + sc->sc[18] = sw64_read_csr(SOFTCSR18); + sc->sc[19] = sw64_read_csr(SOFTCSR19); + sc->sc[20] = sw64_read_csr(SOFTCSR20); + sc->sc[21] = sw64_read_csr(SOFTCSR21); + sc->sc[22] = sw64_read_csr(SOFTCSR22); + sc->sc[23] = sw64_read_csr(SOFTCSR23); + sc->sc[24] = sw64_read_csr(SOFTCSR24); + sc->sc[25] = sw64_read_csr(SOFTCSR25); + sc->sc[26] = sw64_read_csr(SOFTCSR26); + sc->sc[27] = sw64_read_csr(SOFTCSR27); + sc->sc[28] = sw64_read_csr(SOFTCSR28); + sc->sc[29] = sw64_read_csr(SOFTCSR29); + sc->sc[30] = sw64_read_csr(SOFTCSR30); + sc->sc[31] = sw64_read_csr(SOFTCSR31); + sc->sc[32] = sw64_read_csr(SOFTCSR32); + sc->sc[33] = sw64_read_csr(SOFTCSR33); + sc->sc[34] = sw64_read_csr(SOFTCSR34); + sc->sc[35] = sw64_read_csr(SOFTCSR35); + sc->sc[36] = sw64_read_csr(SOFTCSR36); + sc->sc[37] = sw64_read_csr(SOFTCSR37); + sc->sc[38] = sw64_read_csr(SOFTCSR38); + sc->sc[39] = sw64_read_csr(SOFTCSR39); + sc->sc[40] = sw64_read_csr(SOFTCSR40); + sc->sc[41] = sw64_read_csr(SOFTCSR41); + sc->sc[42] = sw64_read_csr(SOFTCSR42); + sc->sc[43] = sw64_read_csr(SOFTCSR43); + sc->sc[44] = sw64_read_csr(SOFTCSR44); + sc->sc[45] = sw64_read_csr(SOFTCSR45); + sc->sc[46] = sw64_read_csr(SOFTCSR46); + sc->sc[47] = sw64_read_csr(SOFTCSR47); +} + +static inline void restore_all_soft_csrs(struct soft_csrs *sc) +{ + sw64_write_csr(sc->sc[0], SOFTCSR0); + sw64_write_csr(sc->sc[1], SOFTCSR1); + sw64_write_csr(sc->sc[2], SOFTCSR2); + sw64_write_csr(sc->sc[3], SOFTCSR3); + sw64_write_csr(sc->sc[4], SOFTCSR4); + sw64_write_csr(sc->sc[5], SOFTCSR5); + sw64_write_csr(sc->sc[6], SOFTCSR6); + sw64_write_csr(sc->sc[7], SOFTCSR7); + sw64_write_csr(sc->sc[8], SOFTCSR8); + sw64_write_csr(sc->sc[9], SOFTCSR9); + sw64_write_csr(sc->sc[10], SOFTCSR10); + sw64_write_csr(sc->sc[11], SOFTCSR11); + sw64_write_csr(sc->sc[12], SOFTCSR12); + sw64_write_csr(sc->sc[13], SOFTCSR13); + sw64_write_csr(sc->sc[14], SOFTCSR14); + sw64_write_csr(sc->sc[15], SOFTCSR15); + sw64_write_csr(sc->sc[16], SOFTCSR16); + sw64_write_csr(sc->sc[17], SOFTCSR17); + sw64_write_csr(sc->sc[18], SOFTCSR18); + sw64_write_csr(sc->sc[19], SOFTCSR19); + sw64_write_csr(sc->sc[20], SOFTCSR20); + sw64_write_csr(sc->sc[21], SOFTCSR21); + sw64_write_csr(sc->sc[22], SOFTCSR22); + sw64_write_csr(sc->sc[23], SOFTCSR23); + sw64_write_csr(sc->sc[24], SOFTCSR24); + sw64_write_csr(sc->sc[25], SOFTCSR25); + sw64_write_csr(sc->sc[26], SOFTCSR26); + sw64_write_csr(sc->sc[27], SOFTCSR27); + sw64_write_csr(sc->sc[28], SOFTCSR28); + sw64_write_csr(sc->sc[29], SOFTCSR29); + sw64_write_csr(sc->sc[30], SOFTCSR30); + sw64_write_csr(sc->sc[31], SOFTCSR31); + sw64_write_csr(sc->sc[32], SOFTCSR32); + sw64_write_csr(sc->sc[33], SOFTCSR33); + sw64_write_csr(sc->sc[34], SOFTCSR34); + sw64_write_csr(sc->sc[35], SOFTCSR35); + sw64_write_csr(sc->sc[36], SOFTCSR36); + sw64_write_csr(sc->sc[37], SOFTCSR37); + sw64_write_csr(sc->sc[38], SOFTCSR38); + sw64_write_csr(sc->sc[39], SOFTCSR39); + sw64_write_csr(sc->sc[40], SOFTCSR40); + sw64_write_csr(sc->sc[41], SOFTCSR41); + sw64_write_csr(sc->sc[42], SOFTCSR42); + sw64_write_csr(sc->sc[43], SOFTCSR43); + sw64_write_csr(sc->sc[44], SOFTCSR44); + sw64_write_csr(sc->sc[45], SOFTCSR45); + sw64_write_csr(sc->sc[46], SOFTCSR46); + sw64_write_csr(sc->sc[47], SOFTCSR47); +} #endif + #endif /* _ASM_SW64_CSR_H */ diff --git a/arch/sw_64/include/asm/suspend.h b/arch/sw_64/include/asm/suspend.h index b76db1580543..127a7481c34f 100644 --- a/arch/sw_64/include/asm/suspend.h +++ b/arch/sw_64/include/asm/suspend.h @@ -41,7 +41,9 @@ struct processor_state { unsigned long ktp; #ifdef CONFIG_HIBERNATION unsigned long sp; + unsigned long gp; struct vcpucb vcb; + struct soft_csrs sc; #endif }; diff --git a/arch/sw_64/kernel/asm-offsets.c b/arch/sw_64/kernel/asm-offsets.c index 1a68486da270..dd728be0820f 100644 --- a/arch/sw_64/kernel/asm-offsets.c +++ b/arch/sw_64/kernel/asm-offsets.c @@ -38,6 +38,7 @@ void foo(void) OFFSET(PSTATE_KTP, processor_state, ktp); #ifdef CONFIG_HIBERNATION OFFSET(PSTATE_SP, processor_state, sp); + OFFSET(PSTATE_GP, processor_state, gp); #endif OFFSET(PBE_ADDR, pbe, address); OFFSET(PBE_ORIG_ADDR, pbe, orig_address); diff --git a/arch/sw_64/kernel/hibernate.c b/arch/sw_64/kernel/hibernate.c index e84f93762f13..eaef5b4a76d2 100644 --- a/arch/sw_64/kernel/hibernate.c +++ b/arch/sw_64/kernel/hibernate.c @@ -1,16 +1,42 @@ // SPDX-License-Identifier: GPL-2.0 #include +#include +#include + #include #include +#include struct processor_state hibernate_state; /* Defined in hibernate_asm.S */ extern int restore_image(void); +extern int trampoline_restore_image(unsigned long code, + unsigned long processor_state_addr); + +/** + * struct arch_hibernate_hdr_invariants - container to store kernel build version. + * @uts_version: put the build number and date so that we do not resume with a + * different kernel. + */ +struct arch_hibernate_hdr_invariants { + char uts_version[__NEW_UTS_LEN + 1]; +}; + +/** + * struct arch_hibernate_hdr - helper paramenters that help us to restore the image. + * @invariants: container to store kernel build version. + * @processor_state_addr: address of image where the processor_state is saved. + */ +static struct arch_hibernate_hdr { + struct arch_hibernate_hdr_invariants invariants; + unsigned long processor_state_addr; +} resume_hdr; void save_processor_state(void) { struct vcpucb *vcb = &(hibernate_state.vcb); + struct soft_csrs *sc = &(hibernate_state.sc); vcb->ksp = rdksp(); vcb->usp = rdusp(); @@ -20,12 +46,15 @@ void save_processor_state(void) #elif defined(CONFIG_SUBARCH_C4) vcb->ptbr_usr = sw64_read_csr(CSR_PTBR_USR); vcb->ptbr_sys = sw64_read_csr(CSR_PTBR_SYS); + vcb->atc = get_atc(); + save_all_soft_csrs(sc); #endif } void restore_processor_state(void) { struct vcpucb *vcb = &(hibernate_state.vcb); + struct soft_csrs *sc = &(hibernate_state.sc); wrksp(vcb->ksp); wrusp(vcb->usp); @@ -35,16 +64,53 @@ void restore_processor_state(void) #elif defined(CONFIG_SUBARCH_C4) sw64_write_csr_imb(vcb->ptbr_usr, CSR_PTBR_USR); sw64_write_csr_imb(vcb->ptbr_sys, CSR_PTBR_SYS); + set_atc(vcb->atc); + restore_all_soft_csrs(sc); #endif sflush(); tbiv(); } +static unsigned long prepare_restore_code_page(void) +{ + void *page; + + /* + * Memory allocated by get_safe_page() will be handled by the hibernate code, so + * there is no need to free it here. + */ + page = (void *)get_safe_page(GFP_ATOMIC); + if (!page) + return -ENOMEM; + copy_page(page, restore_image); + + /* Make sure this page is executable. */ + set_memory_x((unsigned long)page, 1); + + return (unsigned long)page; +} + int swsusp_arch_resume(void) { - restore_image(); + unsigned long relocated_restore_code; + +#ifdef CONFIG_SW64_KERNEL_PAGE_TABLE + /* + * To ensure executable and writeable permission for the pages during resuming, + * set CSR_ATC to KSEG. Then restore_processor_state() will restore CSR_ATC to + * the value saved in the image. + */ + set_atc(ATC_KSEG); +#endif + + /* Prepare a retore code page so that it doesn't get overwritten by itself. */ + relocated_restore_code = prepare_restore_code_page(); + + trampoline_restore_image(relocated_restore_code, resume_hdr.processor_state_addr); + return 0; } + /* References to section boundaries */ extern const void __nosave_begin, __nosave_end; int pfn_is_nosave(unsigned long pfn) @@ -55,26 +121,30 @@ int pfn_is_nosave(unsigned long pfn) return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); } -struct restore_data_record { - unsigned long magic; -}; - -#define RESTORE_MAGIC 0x0123456789ABCDEFUL +static void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i) +{ + memset(i, 0, sizeof(*i)); + memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version)); +} /** * arch_hibernation_header_save - populate the architecture specific part - * of a hibernation image header + * of a hibernation image header, and save helper paramenters. * @addr: address to save the data at */ int arch_hibernation_header_save(void *addr, unsigned int max_size) { - struct restore_data_record *rdr = addr; + struct arch_hibernate_hdr *hdr = addr; - if (max_size < sizeof(struct restore_data_record)) + if (max_size < sizeof(*hdr)) return -EOVERFLOW; - rdr->magic = RESTORE_MAGIC; + + arch_hdr_invariants(&hdr->invariants); + hdr->processor_state_addr = (unsigned long)&hibernate_state; + return 0; } +EXPORT_SYMBOL(arch_hibernation_header_save); /** * arch_hibernation_header_restore - read the architecture specific data @@ -83,7 +153,17 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size) */ int arch_hibernation_header_restore(void *addr) { - struct restore_data_record *rdr = addr; + struct arch_hibernate_hdr_invariants invariants; + struct arch_hibernate_hdr *hdr = addr; - return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL; + arch_hdr_invariants(&invariants); + if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) { + pr_crit("Hibernate image not generated by this kernel!\n"); + return -EINVAL; + } + + resume_hdr = *hdr; + + return 0; } +EXPORT_SYMBOL(arch_hibernation_header_restore); diff --git a/arch/sw_64/kernel/hibernate_asm.S b/arch/sw_64/kernel/hibernate_asm.S index 0655efc59a25..134bc0d2a212 100644 --- a/arch/sw_64/kernel/hibernate_asm.S +++ b/arch/sw_64/kernel/hibernate_asm.S @@ -26,6 +26,7 @@ ENTRY(swsusp_arch_suspend) stl $14, CALLEE_R14($1) stl $15, CALLEE_R15($1) stl $26, CALLEE_RA($1) + /* SIMD-FP */ ldi $1, PSTATE_FPREGS($16) vstd $f2, CALLEE_F2($1) @@ -41,6 +42,7 @@ ENTRY(swsusp_arch_suspend) stl $8, PSTATE_KTP($16) stl sp, PSTATE_SP($16) + stl gp, PSTATE_GP($16) call swsusp_save ldi $16, hibernate_state ldi $1, PSTATE_REGS($16) @@ -50,6 +52,7 @@ ENTRY(swsusp_arch_suspend) ret END(swsusp_arch_suspend) +/* The restore_image code will be copied to a 'safe' page and executed there. */ ENTRY(restore_image) /* prepare to copy image data to their original locations */ ldi t0, restore_pblist @@ -74,13 +77,11 @@ $cpyloop: ldl t0, PBE_NEXT(t0) bne t0, $loop $done: - /* tell the hibernation core that we've just restored the memory */ ldi $0, in_suspend stl $31, 0($0) - ldi $16, hibernate_state - ldi $1, PSTATE_REGS($16) + ldi $1, PSTATE_REGS($17) ldl $9, CALLEE_R9($1) ldl $10, CALLEE_R10($1) @@ -91,7 +92,7 @@ $done: ldl $15, CALLEE_R15($1) ldl $26, CALLEE_RA($1) /* SIMD-FP */ - fldd $f0, PSTATE_FPCR($16) + fldd $f0, PSTATE_FPCR($17) wfpcr $f0 fimovd $f0, $2 and $2, 0x3, $2 @@ -111,7 +112,7 @@ $hibernate_setfpec_1: $hibernate_setfpec_2: setfpec2 $hibernate_setfpec_over: - ldi $1, PSTATE_FPREGS($16) + ldi $1, PSTATE_FPREGS($17) vldd $f2, CALLEE_F2($1) vldd $f3, CALLEE_F3($1) vldd $f4, CALLEE_F4($1) @@ -121,10 +122,15 @@ $hibernate_setfpec_over: vldd $f8, CALLEE_F8($1) vldd $f9, CALLEE_F9($1) - ldl sp, PSTATE_SP($16) - ldl $8, PSTATE_KTP($16) + ldl sp, PSTATE_SP($17) + ldl gp, PSTATE_GP($17) + ldl $8, PSTATE_KTP($17) SAVE_KTP ldi $0, 0($31) ret END(restore_image) + +ENTRY(trampoline_restore_image) + call $26, ($16) +END(trampoline_restore_image) diff --git a/arch/sw_64/kernel/setup.c b/arch/sw_64/kernel/setup.c index f06204022c9f..655b3b851e08 100644 --- a/arch/sw_64/kernel/setup.c +++ b/arch/sw_64/kernel/setup.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -333,6 +334,21 @@ int __init add_memmap_region(u64 addr, u64 size, enum memmap_types type) return 0; } +static void __init memmap_nosave_init(void) +{ + int i; + phys_addr_t start, end; + + for (i = 0; i < memmap_nr; i++) { + if (memmap_map[i].type == memmap_reserved || + memmap_map[i].type == memmap_pci) { + start = memmap_map[i].addr; + end = start + memmap_map[i].size; + register_nosave_region(PFN_DOWN(start), PFN_UP(end)); + } + } +} + static struct resource* __init insert_ram_resource(u64 start, u64 end, bool reserved) { @@ -732,6 +748,12 @@ setup_arch(char **cmdline_p) callback_init(); + /* + * After linear mapping is established, register no-save regions to ensure + * these spaces are unsaveable during hibernation. + */ + memmap_nosave_init(); + /* Try to upgrade ACPI tables via initrd */ acpi_table_upgrade(); -- Gitee From 222a0fad6da3611a11421876298c1d10bc36a9ff Mon Sep 17 00:00:00 2001 From: Gu Yuchen Date: Tue, 27 Jan 2026 09:28:29 +0800 Subject: [PATCH 168/231] sw64: adjust the marking and definitions of some kernel mapping functions Currently, the create_pgd_mapping function is marked as __init type, making it inaccessible after the initialization phase. Add a non-__init interface __create_pgd_mapping. The same applies to the pgtable_alloc_late function, and its declaration is moved to mmu.h. Signed-off-by: Gu Yuchen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/include/asm/mmu.h | 4 ++++ arch/sw_64/kernel/efi.c | 5 ----- arch/sw_64/kernel/smp.c | 3 +-- arch/sw_64/mm/init.c | 31 ++++++++++++++++++++++--------- 4 files changed, 27 insertions(+), 16 deletions(-) diff --git a/arch/sw_64/include/asm/mmu.h b/arch/sw_64/include/asm/mmu.h index 395df786d114..da9f268baeee 100644 --- a/arch/sw_64/include/asm/mmu.h +++ b/arch/sw_64/include/asm/mmu.h @@ -12,6 +12,10 @@ typedef struct { void create_pgd_mapping(pgd_t *pgdir, unsigned long virt, unsigned long phys, unsigned long size, pgprot_t prot, void *(*pgtable_alloc)(void)); + +void * __init pgtable_alloc_fixmap(void); +void *pgtable_alloc_late(void); + #endif #endif /* _ASM_SW64_MMU_H */ diff --git a/arch/sw_64/kernel/efi.c b/arch/sw_64/kernel/efi.c index ac6d8dd42a71..4120b53a14ac 100644 --- a/arch/sw_64/kernel/efi.c +++ b/arch/sw_64/kernel/efi.c @@ -42,11 +42,6 @@ static __init pgprot_t create_mapping_protection(efi_memory_desc_t *md) return PAGE_KERNEL; } -static void * __init pgtable_alloc_late(void) -{ - return (void *)__get_free_page(GFP_KERNEL); -} - int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) { pgprot_t prot = create_mapping_protection(md); diff --git a/arch/sw_64/kernel/smp.c b/arch/sw_64/kernel/smp.c index 10b063ae881b..07047d0d0ae1 100644 --- a/arch/sw_64/kernel/smp.c +++ b/arch/sw_64/kernel/smp.c @@ -12,6 +12,7 @@ #include #include +#include #include #include #include @@ -262,8 +263,6 @@ static void __init process_nr_cpu_ids(void) nr_cpu_ids = num_possible_cpus(); } -extern void * __init pgtable_alloc_fixmap(void); - void __init smp_rcb_init(struct smp_rcb_struct *smp_rcb_base_addr) { if (smp_rcb != NULL) diff --git a/arch/sw_64/mm/init.c b/arch/sw_64/mm/init.c index 8bd24cdfeef4..d95403371a72 100644 --- a/arch/sw_64/mm/init.c +++ b/arch/sw_64/mm/init.c @@ -180,19 +180,19 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) local_flush_tlb_all(); } -static pte_t *__init get_pte_virt_fixmap(phys_addr_t phys) +static pte_t *get_pte_virt_fixmap(phys_addr_t phys) { clear_fixmap(FIX_PTE); return (pte_t *)set_fixmap_offset(FIX_PTE, phys); } -static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t phys) +static pmd_t *get_pmd_virt_fixmap(phys_addr_t phys) { clear_fixmap(FIX_PMD); return (pmd_t *)set_fixmap_offset(FIX_PMD, phys); } -static pud_t *__init get_pud_virt_fixmap(phys_addr_t phys) +static pud_t *get_pud_virt_fixmap(phys_addr_t phys) { clear_fixmap(FIX_PUD); return (pud_t *)set_fixmap_offset(FIX_PUD, phys); @@ -203,7 +203,12 @@ void * __init pgtable_alloc_fixmap(void) return (void *)__va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE)); } -static void __init +void *pgtable_alloc_late(void) +{ + return (void *)__get_free_page(GFP_KERNEL); +} + +static void create_pte_mapping(pte_t *pte_first, unsigned long virt, unsigned long phys, unsigned long size, pgprot_t prot) { @@ -222,7 +227,7 @@ create_pte_mapping(pte_t *pte_first, unsigned long virt, unsigned long phys, } } -static void __init +static void create_pmd_mapping(pmd_t *pmd_first, unsigned long virt, unsigned long phys, unsigned long size, pgprot_t prot, void *(*pgtable_alloc)(void)) @@ -256,7 +261,7 @@ create_pmd_mapping(pmd_t *pmd_first, unsigned long virt, unsigned long phys, } } -static void __init +static void create_cont_pmd_mapping(pmd_t *pmd_first, unsigned long virt, unsigned long phys, unsigned long size, pgprot_t prot, void *(*pgtable_alloc)(void)) @@ -285,7 +290,7 @@ create_cont_pmd_mapping(pmd_t *pmd_first, unsigned long virt, } } -static void __init +static void create_pud_mapping(pud_t *pud_first, unsigned long virt, unsigned long phys, unsigned long size, pgprot_t prot, void *(*pgtable_alloc)(void)) @@ -319,8 +324,8 @@ create_pud_mapping(pud_t *pud_first, unsigned long virt, unsigned long phys, } } -void __init -create_pgd_mapping(pgd_t *pgdir, unsigned long virt, unsigned long phys, +static void +__create_pgd_mapping(pgd_t *pgdir, unsigned long virt, unsigned long phys, unsigned long size, pgprot_t prot, void *(*pgtable_alloc)(void)) { @@ -353,6 +358,14 @@ create_pgd_mapping(pgd_t *pgdir, unsigned long virt, unsigned long phys, clear_fixmap(FIX_PUD); } +void __init +create_pgd_mapping(pgd_t *pgdir, unsigned long virt, unsigned long phys, + unsigned long size, pgprot_t prot, + void *(*pgtable_alloc)(void)) +{ + __create_pgd_mapping(pgdir, virt, phys, size, prot, pgtable_alloc); +} + static void __init early_create_pmd(pgd_t *pgdir, pud_t *pud, pmd_t *pmd, unsigned long start_va, unsigned long size, unsigned long pa) { -- Gitee From 0dbeaa2cc7c08189982dc91ef6c33885167d6182 Mon Sep 17 00:00:00 2001 From: Gu Yuchen Date: Thu, 20 Nov 2025 11:04:04 +0800 Subject: [PATCH 169/231] sw64: set execute permission for virtual machine's hmcode text section The execute permission for the virtual machine's hmcode section is incorrectly set to non-executable before. This commit updates its permission to read and execute. Signed-off-by: Gu Yuchen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/mm/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/sw_64/mm/init.c b/arch/sw_64/mm/init.c index d95403371a72..9a28c84d218b 100644 --- a/arch/sw_64/mm/init.c +++ b/arch/sw_64/mm/init.c @@ -544,7 +544,7 @@ void __init paging_init(void) PAGE_KERNEL_NOEXEC, pgtable_alloc_fixmap); if (is_in_guest()) create_pgd_mapping(pgdir, sw64_guest_reset_start, __pa(sw64_guest_reset_start), sw64_guest_reset_size, - PAGE_KERNEL_NOEXEC, pgtable_alloc_fixmap); + PAGE_KERNEL_READONLY_EXEC, pgtable_alloc_fixmap); memblock_mark_nomap(__pa(sw64_reserve_start), __pa((unsigned long)_end - sw64_reserve_start)); -- Gitee From a6760af9304021f815cd84624cf12957647ba7f0 Mon Sep 17 00:00:00 2001 From: Gu Yuchen Date: Wed, 26 Nov 2025 10:22:46 +0800 Subject: [PATCH 170/231] sw64: set PTBR_SYS before accessing vmalloc area In commit c054792f93b3 ("sw64: use ioremap to map IO address in functions like __get_cpu_nums()"), some IO addresses are mapped to vmalloc area via ioremap() before being accessed. Move update_ptbr_sys() to the beginning of smp_callin() to make sure PTBR_SYS is set before accessing vmalloc area. Signed-off-by: Gu Yuchen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/kernel/smp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/sw_64/kernel/smp.c b/arch/sw_64/kernel/smp.c index 07047d0d0ae1..072165260321 100644 --- a/arch/sw_64/kernel/smp.c +++ b/arch/sw_64/kernel/smp.c @@ -142,6 +142,8 @@ void smp_callin(void) unsigned long __maybe_unused nmi_stack; save_ktp(); + /* update csr:ptbr */ + update_ptbr_sys(virt_to_phys(init_mm.pgd)); upshift_freq(); cpuid = smp_processor_id(); WARN_ON_ONCE(!irqs_disabled()); @@ -163,8 +165,6 @@ void smp_callin(void) /* All kernel threads share the same mm context. */ mmgrab(&init_mm); current->active_mm = &init_mm; - /* update csr:ptbr */ - update_ptbr_sys(virt_to_phys(init_mm.pgd)); #ifdef CONFIG_SUBARCH_C4 update_ptbr_usr(__pa_symbol(empty_zero_page)); #endif -- Gitee From b618fc263ba00f2bb21b640297eb4826481df0a7 Mon Sep 17 00:00:00 2001 From: Gu Yuchen Date: Wed, 4 Feb 2026 13:57:30 +0800 Subject: [PATCH 171/231] sw64: adapt kernel page table for memory hotplug Add the mapping operation for the memory block to the memory hot-add function, and the unmapping operation to the memory hot-remove function, so as to enable the kernel page table to support memory hot-plug. Signed-off-by: Gu Yuchen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/include/asm/pgtable-4level.h | 1 + arch/sw_64/mm/init.c | 336 ++++++++++++++++++++++++ 2 files changed, 337 insertions(+) diff --git a/arch/sw_64/include/asm/pgtable-4level.h b/arch/sw_64/include/asm/pgtable-4level.h index 719e2c5377e3..bbe93ee82bbe 100644 --- a/arch/sw_64/include/asm/pgtable-4level.h +++ b/arch/sw_64/include/asm/pgtable-4level.h @@ -27,6 +27,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; #endif /* !__ASSEMBLY__ */ #define PAGE_OFFSET 0xfff0000000000000 +#define PAGE_END 0xfff07fffffffffff #endif #endif /* _ASM_SW64_PGTABLE_4LEVEL_H */ diff --git a/arch/sw_64/mm/init.c b/arch/sw_64/mm/init.c index 9a28c84d218b..4ed79bc92617 100644 --- a/arch/sw_64/mm/init.c +++ b/arch/sw_64/mm/init.c @@ -872,16 +872,349 @@ void vmemmap_free(unsigned long start, unsigned long end, #endif #ifdef CONFIG_MEMORY_HOTPLUG +#ifdef CONFIG_SW64_KERNEL_PAGE_TABLE +static void free_hotplug_page_range(struct page *page, size_t size, + struct vmem_altmap *altmap) +{ + if (altmap) { + vmem_altmap_free(altmap, size >> PAGE_SHIFT); + } else { + WARN_ON(PageReserved(page)); + free_pages((unsigned long)page_address(page), get_order(size)); + } +} + +static void free_hotplug_pgtable_page(struct page *page) +{ + free_hotplug_page_range(page, PAGE_SIZE, NULL); +} + +static bool pgtable_range_aligned(unsigned long start, unsigned long end, + unsigned long floor, unsigned long ceiling, + unsigned long mask) +{ + start &= mask; + if (start < floor) + return false; + + if (ceiling) { + ceiling &= mask; + if (!ceiling) + return false; + } + + if (end - 1 > ceiling - 1) + return false; + return true; +} + +static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr, + unsigned long end, bool free_mapped, + struct vmem_altmap *altmap) +{ + pte_t *ptep, pte; + + do { + ptep = pte_offset_kernel(pmdp, addr); + pte = READ_ONCE(*ptep); + if (pte_none(pte)) + continue; + + WARN_ON(!pte_present(pte)); + pte_clear(&init_mm, addr, ptep); + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + if (free_mapped) + free_hotplug_page_range(pte_page(pte), + PAGE_SIZE, altmap); + } while (addr += PAGE_SIZE, addr < end); +} + +static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr, + unsigned long end, bool free_mapped, + struct vmem_altmap *altmap) +{ + unsigned long next; + pmd_t *pmdp, pmd; + + for (; addr < end; addr = next) { + next = pmd_addr_end(addr, end); + pmdp = pmd_offset(pudp, addr); + pmd = READ_ONCE(*pmdp); + if (pmd_none(pmd)) + continue; + + WARN_ON(!pmd_present(pmd)); + if (next - addr == PMD_SIZE) { + pmd_clear(pmdp); + flush_tlb_kernel_range(addr, addr + PMD_SIZE); + if (free_mapped) + free_hotplug_page_range(pmd_page(pmd), + PMD_SIZE, altmap); + continue; + } + unmap_hotplug_pte_range(pmdp, addr, next, free_mapped, altmap); + } +} + +static void unmap_hotplug_cont_pmd_range(pud_t *pudp, unsigned long addr, + unsigned long end, bool free_mapped, + struct vmem_altmap *altmap) +{ + unsigned long next, i; + pmd_t *pmdp, pmd; + + for (; addr < end; addr = next) { + next = cont_pmd_addr_end(addr, end); + pmdp = pmd_offset(pudp, addr); + pmd = READ_ONCE(*pmdp); + if (pmd_none(pmd)) + continue; + + WARN_ON(!pmd_present(pmd)); + if ((next - addr == CONT_PMD_SIZE) && + (PTRS_PER_PMD - pmd_index(addr) >= CONT_PMDS)) { + for (i = 0; i < CONT_PMDS; i++) { + pmd_clear(pmdp + i); + if (free_mapped) + free_hotplug_page_range(pmd_page(pmd), + PMD_SIZE, altmap); + } + flush_tlb_kernel_range(addr, addr + CONT_PMD_SIZE); + continue; + } + unmap_hotplug_pmd_range(pudp, addr, next, free_mapped, altmap); + } +} + +static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr, + unsigned long end, bool free_mapped, + struct vmem_altmap *altmap) +{ + unsigned long next; + pud_t *pudp, pud; + + for (; addr < end; addr = next) { + next = pud_addr_end(addr, end); + pudp = pud_offset(p4dp, addr); + pud = READ_ONCE(*pudp); + if (pud_none(pud)) + continue; + + WARN_ON(!pud_present(pud)); + if (next - addr == PUD_SIZE) { + pud_clear(pudp); + flush_tlb_kernel_range(addr, addr + PUD_SIZE); + if (free_mapped) + free_hotplug_page_range(pud_page(pud), + PUD_SIZE, altmap); + continue; + } + unmap_hotplug_cont_pmd_range(pudp, addr, next, free_mapped, altmap); + } +} + +static void unmap_hotplug_range(unsigned long addr, unsigned long end, + bool free_mapped, struct vmem_altmap *altmap) +{ + unsigned long next; + pgd_t *pgdp; + p4d_t *p4dp, p4d; + + /* + * altmap can only be used as vmemmap mapping backing memory. + * In case the backing memory itself is not being freed, then + * altmap is irrelevant. Warn about this inconsistency when + * encountered. + */ + WARN_ON(!free_mapped && altmap); + + for (; addr < end; addr = next) { + next = pgd_addr_end(addr, end); + pgdp = pgd_offset_k(addr); + p4dp = p4d_offset(pgdp, addr); + p4d = READ_ONCE(*p4dp); + if (p4d_none(p4d)) + continue; + + WARN_ON(!p4d_present(p4d)); + unmap_hotplug_pud_range(p4dp, addr, next, free_mapped, altmap); + } +} + +static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr, + unsigned long end, unsigned long floor, + unsigned long ceiling) +{ + pte_t *ptep, pte; + unsigned long i, start = addr; + + do { + ptep = pte_offset_kernel(pmdp, addr); + pte = READ_ONCE(*ptep); + + /* + * This is just a sanity check here which verifies that + * pte clearing has been done by earlier unmap loops. + */ + WARN_ON(!pte_none(pte)); + } while (addr += PAGE_SIZE, addr < end); + + if (!pgtable_range_aligned(start, end, floor, ceiling, PMD_MASK)) + return; + + /* + * Check whether we can free the pte page if the rest of the + * entries are empty. Overlap with other regions have been + * handled by the floor/ceiling check. + */ + ptep = pte_offset_kernel(pmdp, 0UL); + for (i = 0; i < PTRS_PER_PTE; i++) { + if (!pte_none(READ_ONCE(ptep[i]))) + return; + } + + pmd_clear(pmdp); + + /* invalidate the walk caches */ + flush_tlb_all(); + free_hotplug_pgtable_page(virt_to_page(ptep)); +} + +static void free_empty_pmd_table(pud_t *pudp, unsigned long addr, + unsigned long end, unsigned long floor, + unsigned long ceiling) +{ + pmd_t *pmdp, pmd; + unsigned long i, next, start = addr; + + for (; addr < end; addr = next) { + next = pmd_addr_end(addr, end); + pmdp = pmd_offset(pudp, addr); + pmd = READ_ONCE(*pmdp); + if (pmd_none(pmd)) + continue; + + WARN_ON(!pmd_present(pmd)); + free_empty_pte_table(pmdp, addr, next, floor, ceiling); + }; + + if (CONFIG_PGTABLE_LEVELS <= 2) + return; + + if (!pgtable_range_aligned(start, end, floor, ceiling, PUD_MASK)) + return; + + /* + * Check whether we can free the pmd page if the rest of the + * entries are empty. Overlap with other regions have been + * handled by the floor/ceiling check. + */ + pmdp = pmd_offset(pudp, 0UL); + for (i = 0; i < PTRS_PER_PMD; i++) { + if (!pmd_none(READ_ONCE(pmdp[i]))) + return; + } + + pud_clear(pudp); + + /* invalidate the walk caches */ + flush_tlb_all(); + free_hotplug_pgtable_page(virt_to_page(pmdp)); +} + +static void free_empty_pud_table(p4d_t *p4dp, unsigned long addr, + unsigned long end, unsigned long floor, + unsigned long ceiling) +{ + pud_t *pudp, pud; + unsigned long i, next, start = addr; + + for (; addr < end; addr = next) { + next = pud_addr_end(addr, end); + pudp = pud_offset(p4dp, addr); + pud = READ_ONCE(*pudp); + if (pud_none(pud)) + continue; + + WARN_ON(!pud_present(pud)); + free_empty_pmd_table(pudp, addr, next, floor, ceiling); + } + + if (CONFIG_PGTABLE_LEVELS <= 3) + return; + + if (!pgtable_range_aligned(start, end, floor, ceiling, PGDIR_MASK)) + return; + + /* + * Check whether we can free the pud page if the rest of the + * entries are empty. Overlap with other regions have been + * handled by the floor/ceiling check. + */ + pudp = pud_offset(p4dp, 0UL); + for (i = 0; i < PTRS_PER_PUD; i++) { + if (!pud_none(READ_ONCE(pudp[i]))) + return; + } + + p4d_clear(p4dp); + + /* invalidate the walk caches */ + flush_tlb_all(); + free_hotplug_pgtable_page(virt_to_page(pudp)); +} + +static void free_empty_tables(unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) +{ + unsigned long next; + pgd_t *pgdp; + p4d_t *p4dp, p4d; + + for (; addr < end; addr = next) { + next = pgd_addr_end(addr, end); + pgdp = pgd_offset_k(addr); + p4dp = p4d_offset(pgdp, addr); + p4d = READ_ONCE(*p4dp); + if (p4d_none(p4d)) + continue; + + WARN_ON(!p4d_present(p4d)); + free_empty_pud_table(p4dp, addr, next, floor, ceiling); + } +} + +static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size) +{ + unsigned long end = start + size; + + WARN_ON(pgdir != init_mm.pgd); + WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END)); + + unmap_hotplug_range(start, end, false, NULL); + free_empty_tables(start, end, PAGE_OFFSET, PAGE_END); +} +#endif /* CONFIG_SW64_KERNEL_PAGE_TABLE */ + int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; int ret; +#ifdef CONFIG_SW64_KERNEL_PAGE_TABLE + __create_pgd_mapping(swapper_pg_dir, (unsigned long)__va(start), start, + size, params->pgprot, pgtable_alloc_late); +#endif /* CONFIG_SW64_KERNEL_PAGE_TABLE */ + ret = __add_pages(nid, start_pfn, nr_pages, params); if (ret) pr_warn("%s: Problem encountered in __add_pages() as ret=%d\n", __func__, ret); + else { + max_pfn = PFN_UP(start + size); + max_low_pfn = max_pfn; + } return ret; } @@ -892,6 +1225,9 @@ void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) unsigned long nr_pages = size >> PAGE_SHIFT; __remove_pages(start_pfn, nr_pages, altmap); +#ifdef CONFIG_SW64_KERNEL_PAGE_TABLE + __remove_pgd_mapping(swapper_pg_dir, (unsigned long)__va(start), size); +#endif /* CONFIG_SW64_KERNEL_PAGE_TABLE */ } #endif -- Gitee From 2ac468840e1c0c09ca448c01387eea3c9c409fc5 Mon Sep 17 00:00:00 2001 From: Gao Chen Date: Thu, 5 Feb 2026 09:29:45 +0800 Subject: [PATCH 172/231] sw64: adapt kernel page table for kdump This patch fixes the adaptation of kernel page table for kdump with the following adjustmemts: - Move kexec_start_address and kexec_indirection_page out of the text section to ensure they have write permission. - Add execute permission to the page of reboot_code_buffer. - Set csr_atc to KSEG before jumping to the second kernel. - Clear soft_csrs before jumping to the second kernel. - Creating mapping for the sunway FDT. Signed-off-by: Gao Chen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/include/asm/csr.h | 52 +++++++++++++++++++ arch/sw_64/include/asm/setup.h | 3 +- arch/sw_64/kernel/machine_kexec.c | 15 +++++- arch/sw_64/kernel/relocate_kernel.S | 10 ---- arch/sw_64/mm/init.c | 79 +++++++++++++++++++++++++++-- 5 files changed, 142 insertions(+), 17 deletions(-) diff --git a/arch/sw_64/include/asm/csr.h b/arch/sw_64/include/asm/csr.h index b879d15d9f80..b7b2a3fb9467 100644 --- a/arch/sw_64/include/asm/csr.h +++ b/arch/sw_64/include/asm/csr.h @@ -289,6 +289,58 @@ static inline void restore_all_soft_csrs(struct soft_csrs *sc) sw64_write_csr(sc->sc[46], SOFTCSR46); sw64_write_csr(sc->sc[47], SOFTCSR47); } + +static inline void clear_soft_csrs(void) +{ + sw64_write_csr_imb(0, SOFTCSR0); + sw64_write_csr_imb(0, SOFTCSR1); + sw64_write_csr_imb(0, SOFTCSR2); + sw64_write_csr_imb(0, SOFTCSR3); + sw64_write_csr_imb(0, SOFTCSR4); + sw64_write_csr_imb(0, SOFTCSR5); + sw64_write_csr_imb(0, SOFTCSR6); + sw64_write_csr_imb(0, SOFTCSR7); + sw64_write_csr_imb(0, SOFTCSR8); + sw64_write_csr_imb(0, SOFTCSR9); + sw64_write_csr_imb(0, SOFTCSR10); + sw64_write_csr_imb(0, SOFTCSR11); + sw64_write_csr_imb(0, SOFTCSR12); + sw64_write_csr_imb(0, SOFTCSR13); + sw64_write_csr_imb(0, SOFTCSR14); + sw64_write_csr_imb(0, SOFTCSR15); + sw64_write_csr_imb(0, SOFTCSR16); + sw64_write_csr_imb(0, SOFTCSR17); + sw64_write_csr_imb(0, SOFTCSR18); + sw64_write_csr_imb(0, SOFTCSR19); + sw64_write_csr_imb(0, SOFTCSR20); + sw64_write_csr_imb(0, SOFTCSR21); + sw64_write_csr_imb(0, SOFTCSR22); + sw64_write_csr_imb(0, SOFTCSR23); + sw64_write_csr_imb(0, SOFTCSR24); + sw64_write_csr_imb(0, SOFTCSR25); + sw64_write_csr_imb(0, SOFTCSR26); + sw64_write_csr_imb(0, SOFTCSR27); + sw64_write_csr_imb(0, SOFTCSR28); + sw64_write_csr_imb(0, SOFTCSR29); + sw64_write_csr_imb(0, SOFTCSR30); + sw64_write_csr_imb(0, SOFTCSR31); + sw64_write_csr_imb(0, SOFTCSR32); + sw64_write_csr_imb(0, SOFTCSR33); + sw64_write_csr_imb(0, SOFTCSR34); + sw64_write_csr_imb(0, SOFTCSR35); + sw64_write_csr_imb(0, SOFTCSR36); + sw64_write_csr_imb(0, SOFTCSR37); + sw64_write_csr_imb(0, SOFTCSR38); + sw64_write_csr_imb(0, SOFTCSR39); + sw64_write_csr_imb(0, SOFTCSR40); + sw64_write_csr_imb(0, SOFTCSR41); + sw64_write_csr_imb(0, SOFTCSR42); + sw64_write_csr_imb(0, SOFTCSR43); + sw64_write_csr_imb(0, SOFTCSR44); + sw64_write_csr_imb(0, SOFTCSR45); + sw64_write_csr_imb(0, SOFTCSR46); + sw64_write_csr_imb(0, SOFTCSR47); +} #endif #endif /* _ASM_SW64_CSR_H */ diff --git a/arch/sw_64/include/asm/setup.h b/arch/sw_64/include/asm/setup.h index 0a2edf9af3ca..fdcbb9debbd2 100644 --- a/arch/sw_64/include/asm/setup.h +++ b/arch/sw_64/include/asm/setup.h @@ -32,7 +32,8 @@ #define INITRD_SIZE (*(unsigned long *)(PARAM + 0x108)) #define DTB_START (*(unsigned long *)(PARAM + 0x118)) -#define _TEXT_START (KERNEL_START + 0x10000) +#define TEXT_OFFSET 0x10000 +#define _TEXT_START (KERNEL_START + TEXT_OFFSET) #define COMMAND_LINE_OFF (0x10000UL - 0xB000UL) #define INITRD_START_OFF (0x10000UL - 0xA100UL) diff --git a/arch/sw_64/kernel/machine_kexec.c b/arch/sw_64/kernel/machine_kexec.c index 9547f12a5299..9d8f3a6a7ba9 100644 --- a/arch/sw_64/kernel/machine_kexec.c +++ b/arch/sw_64/kernel/machine_kexec.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -22,8 +23,8 @@ extern const unsigned char relocate_new_kernel[]; extern const size_t relocate_new_kernel_size; -extern unsigned long kexec_start_address; -extern unsigned long kexec_indirection_page; +unsigned long kexec_start_address; +unsigned long kexec_indirection_page; static atomic_t waiting_for_crash_ipi; static void *kexec_control_page; @@ -346,6 +347,8 @@ void machine_kexec(struct kimage *image) unsigned long *ptr; reboot_code_buffer = kexec_control_page; + /* Since we do not have kimg, use set_memory to add exec_prot. */ + set_memory_x((unsigned long)reboot_code_buffer, 1); pr_info("reboot_code_buffer = %px\n", reboot_code_buffer); kexec_start_address = phys_to_ktext(image->start); pr_info("kexec_start_address = %#lx\n", kexec_start_address); @@ -383,6 +386,14 @@ void machine_kexec(struct kimage *image) pr_info("Will call new kernel at %08lx\n", image->start); pr_info("Bye ...\n"); +#ifdef CONFIG_SW64_KERNEL_PAGE_TABLE + /* + * We will jump directly instead of following the full reboot + * path, so softcsrs and CSR_ATC needs to be reseted here. + */ + clear_soft_csrs(); + set_atc(ATC_KSEG); +#endif smp_wmb(); ((noretfun_t) reboot_code_buffer)(sunway_boot_magic, sunway_dtb_address); diff --git a/arch/sw_64/kernel/relocate_kernel.S b/arch/sw_64/kernel/relocate_kernel.S index a4b0d27778b9..793ab7d548d9 100644 --- a/arch/sw_64/kernel/relocate_kernel.S +++ b/arch/sw_64/kernel/relocate_kernel.S @@ -120,16 +120,6 @@ kexec_flag: .quad 0x1 #endif -kexec_start_address: - .globl kexec_start_address - .quad 0x0 - .size kexec_start_address, 8 - -kexec_indirection_page: - .globl kexec_indirection_page - .quad 0 - .size kexec_indirection_page, 8 - relocate_new_kernel_end: relocate_new_kernel_size: diff --git a/arch/sw_64/mm/init.c b/arch/sw_64/mm/init.c index 4ed79bc92617..b2edfe37fff5 100644 --- a/arch/sw_64/mm/init.c +++ b/arch/sw_64/mm/init.c @@ -450,6 +450,75 @@ static void __init map_legacy_io(pgd_t *pgdir) create_pgd_mapping(pgdir, (unsigned long)__va(lpc_legacy_io_start), lpc_legacy_io_start, size, PAGE_KERNEL_NOEXEC, pgtable_alloc_fixmap); } + +static bool check_present(pgd_t *pgdir, unsigned long addr) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + bool ret; + + if (!addr) + return false; + + pgd = pgd_offset_pgd(pgdir, addr); + p4d = p4d_offset(pgd, addr); + if (p4d_none(*p4d)) + return false; + + pud = pud_offset(p4d, addr); + pud = get_pud_virt_fixmap(__pa(pud)); + if (pud_none(*pud)) { + ret = false; + goto out; + } + if (pud_leaf(*pud)) { + ret = true; + goto out; + } + + pmd = pmd_offset(pud, addr); + pmd = get_pmd_virt_fixmap(__pa(pmd)); + if (!pmd_present(*pmd)) { + ret = false; + goto out; + } + if (pmd_leaf(*pmd)) { + ret = true; + goto out; + } + + pte = pte_offset_kernel(pmd, addr); + pte = get_pte_virt_fixmap(__pa(pte)); + if (pte_none(*pte)) + ret = false; + else + ret = true; + +out: + clear_fixmap(FIX_PTE); + clear_fixmap(FIX_PMD); + clear_fixmap(FIX_PUD); + + return ret; +} + +static void __init map_fdt(pgd_t *pgdir) +{ + unsigned long dtb_size = 0; + + /* + * If sunway_dtb_address is not included in the memory mapping, create a mapping + * for it. + */ + if (!check_present(pgdir, sunway_dtb_address)) { + dtb_size = (unsigned long)fdt_totalsize((void *)sunway_dtb_address); + create_pgd_mapping(pgdir, sunway_dtb_address, __pa(sunway_dtb_address), + dtb_size, PAGE_KERNEL_READONLY, pgtable_alloc_fixmap); + } +} #endif /* CONFIG_SW64_KERNEL_PAGE_TABLE */ /* @@ -461,7 +530,7 @@ void __init early_paging_init(void) unsigned long img_start, img_size; unsigned long dtb_start, dtb_size = 0; - img_start = (unsigned long)(KERNEL_START_PHYS + __START_KERNEL_map); + img_start = (unsigned long)_text - TEXT_OFFSET; img_size = (unsigned long)_end - img_start; dtb_start = sunway_dtb_address; @@ -500,9 +569,9 @@ void __init paging_init(void) #ifdef CONFIG_SW64_KERNEL_PAGE_TABLE unsigned long sw64_vcpucb_start = PAGE_OFFSET + 0x20000; unsigned long sw64_vcpucb_size = 0x60000; - unsigned long sw64_reserve_start = CONFIG_PHYSICAL_START + PAGE_OFFSET; - unsigned long sw64_reserve_size = (unsigned long)_stext - sw64_reserve_start; - unsigned long text_start = (unsigned long)_stext; + unsigned long sw64_reserve_start = (unsigned long)_text - TEXT_OFFSET; + unsigned long sw64_reserve_size = (unsigned long)_text - sw64_reserve_start; + unsigned long text_start = (unsigned long)_text; unsigned long text_size = (unsigned long)_etext - text_start; unsigned long ro_start = (unsigned long)__start_rodata; unsigned long ro_size = (unsigned long)__init_begin - ro_start; @@ -558,6 +627,8 @@ void __init paging_init(void) } memblock_clear_nomap(__pa(sw64_reserve_start), __pa((unsigned long)_end - sw64_reserve_start)); + + map_fdt(pgdir); #endif /* CONFIG_SW64_KERNEL_PAGE_TABLE */ } -- Gitee From 5920dad14999271ca40d2a2a5730f614f7f1a36f Mon Sep 17 00:00:00 2001 From: Gao Chen Date: Fri, 6 Feb 2026 09:16:56 +0800 Subject: [PATCH 173/231] sw64: add barriers and tlb flush for set_atc Add barriers and TLB flush before and after switching CSR_ATC to ensure correntness. Signed-off-by: Gao Chen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/include/asm/hmcall.h | 2 +- arch/sw_64/mm/init.c | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/sw_64/include/asm/hmcall.h b/arch/sw_64/include/asm/hmcall.h index 24ef9e4b768b..e0cd6bbb17d2 100644 --- a/arch/sw_64/include/asm/hmcall.h +++ b/arch/sw_64/include/asm/hmcall.h @@ -258,7 +258,7 @@ static inline void wrap_asid(unsigned long asid, unsigned long ptbr) #define ATC_PAGE 1 #define ATC_KSEG 3 -#define set_atc(val) rwatc(1, (val)) +#define set_atc(val) ({mb(); imemb(); rwatc(1, (val)); tbiv(); imemb(); }) #define get_atc() rwatc(0, 0) #endif /* !__ASSEMBLY__ */ diff --git a/arch/sw_64/mm/init.c b/arch/sw_64/mm/init.c index b2edfe37fff5..837725a1b116 100644 --- a/arch/sw_64/mm/init.c +++ b/arch/sw_64/mm/init.c @@ -556,8 +556,6 @@ void __init early_paging_init(void) pr_info("SW64 kernel page table enabled\n"); set_atc(ATC_PAGE); } - - tbiv(); #endif /* CONFIG_SW64_KERNEL_PAGE_TABLE */ } -- Gitee From 3e532e6c610d591523c1c52ee36f25a80fc00594 Mon Sep 17 00:00:00 2001 From: Wang Yicheng Date: Thu, 5 Mar 2026 14:42:40 +0800 Subject: [PATCH 174/231] sw64: cpuidle: refine C6 state stability This commit refines the sw64 C6 deep idle state by setting CPUIDLE_FLAG_RCU_IDLE for manual RCU management and update clock events to recalibrate timer interrupts during complex frequency transitions. However, software compensations cannot fully eliminate hardware-level timekeeping drift, the cpuidle driver is disabled under tickless systems to guarantee overall system stability. Signed-off-by: Wang Yicheng Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- drivers/clocksource/timer-sw64.c | 9 +++++++++ drivers/cpuidle/Kconfig.sw64 | 5 +++++ drivers/cpuidle/cpuidle-sw64.c | 28 +++++++++++++++++++++------- 3 files changed, 35 insertions(+), 7 deletions(-) diff --git a/drivers/clocksource/timer-sw64.c b/drivers/clocksource/timer-sw64.c index d925d2650bde..2840b0e2dbe2 100644 --- a/drivers/clocksource/timer-sw64.c +++ b/drivers/clocksource/timer-sw64.c @@ -384,6 +384,15 @@ static void sw64_update_clockevents(void *data) clockevents_update_freq(swevt, freqs->new * 1000); } +void sw64_cpuidle_updatevents(int freq) +{ + + struct clock_event_device *swevt = this_cpu_ptr(&timer_events); + + clockevents_update_freq(swevt, freq * 1000); + +} + static int sw64_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) { diff --git a/drivers/cpuidle/Kconfig.sw64 b/drivers/cpuidle/Kconfig.sw64 index 0f23175ad36a..f36ace6e2a01 100644 --- a/drivers/cpuidle/Kconfig.sw64 +++ b/drivers/cpuidle/Kconfig.sw64 @@ -4,3 +4,8 @@ # config SW64_CPUIDLE bool "Generic SW64 CPU idle Driver" + depends on !NO_HZ + depends on !NO_HZ_IDLE + help + Enable cpuidle support for SW64 architecture. + Disabled when tickless system (NO_HZ or NO_HZ_IDLE) is active. diff --git a/drivers/cpuidle/cpuidle-sw64.c b/drivers/cpuidle/cpuidle-sw64.c index 7af9d75185c1..59635640c93a 100644 --- a/drivers/cpuidle/cpuidle-sw64.c +++ b/drivers/cpuidle/cpuidle-sw64.c @@ -38,9 +38,13 @@ static bool can_switch_freq(int cpu_sibling) * Handles frequency scaling by writing specific bits to * CLU_LV2_SEL when CPU core wants a deeper idle state. */ +extern unsigned int cpufreq_quick_get(unsigned int cpu); +extern void sw64_cpuidle_updatevents(int freq); + static void sw64_do_deeper_idle(int cpu) { int core_id, node_id, cpu_sibling; + int cur_freq, downshift_freq = 200000; unsigned long flags; u64 freq_scaling; @@ -48,6 +52,8 @@ static void sw64_do_deeper_idle(int cpu) node_id = rcid_to_domain_id(cpu_to_rcid(cpu)); cpu_sibling = cpu_siblings[cpu]; + cur_freq = cpufreq_quick_get(cpu); + /* downshift frequency before idle if possible*/ spin_lock_irqsave(&per_core_lock[node_id][core_id], flags); cpu_deeper_states[cpu] = true; @@ -55,15 +61,23 @@ static void sw64_do_deeper_idle(int cpu) freq_scaling = 0x1UL << (2 * (core_id & CORE_ID_BITMASK)); writeq(freq_scaling, spbu_base[node_id] + OFFSET_CLU_LV2(core_id)); } + + sw64_cpuidle_updatevents(downshift_freq); + spin_unlock_irqrestore(&per_core_lock[node_id][core_id], flags); + rcu_idle_enter(); arch_cpu_idle(); + local_irq_disable(); + rcu_idle_exit(); + local_irq_enable(); /* upshift frequency after idle */ spin_lock_irqsave(&per_core_lock[node_id][core_id], flags); cpu_deeper_states[cpu] = false; freq_scaling = 0x3UL << (2 * (core_id & CORE_ID_BITMASK)); writeq(freq_scaling, spbu_base[node_id] + OFFSET_CLU_LV2(core_id)); + sw64_cpuidle_updatevents(cur_freq); spin_unlock_irqrestore(&per_core_lock[node_id][core_id], flags); } @@ -117,21 +131,21 @@ static struct cpuidle_driver sw64_idle_driver = { * be unavailable. */ .states[0] = { - .name = "idle0", - .desc = "sw64 idle 0", + .name = "C1", + .desc = "halt", .exit_latency = 1, .target_residency = 1, .enter = sw64_idle_enter, }, .states[1] = { - .name = "idle1", - .desc = "sw64 idle 1", - .exit_latency = 100, - .target_residency = 100, + .name = "C6", + .desc = "freq downshift", + .exit_latency = 200, + .target_residency = 200, .enter = sw64_idle_enter, + .flags = CPUIDLE_FLAG_RCU_IDLE, }, .state_count = 2, - .cpumask = (struct cpumask *) cpu_possible_mask, }; static int get_sibling_cpu(int cpu) -- Gitee From c28ffcc29627c57dfee6dae248e60d8cc0542648 Mon Sep 17 00:00:00 2001 From: Gu Yuchen Date: Sat, 28 Feb 2026 16:10:02 +0800 Subject: [PATCH 175/231] sw64: define get_cycles macro Define get_cycles macro. Signed-off-by: Gu Yuchen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/include/asm/timex.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/sw_64/include/asm/timex.h b/arch/sw_64/include/asm/timex.h index a5760bf8abd4..2564bbf59f3e 100644 --- a/arch/sw_64/include/asm/timex.h +++ b/arch/sw_64/include/asm/timex.h @@ -19,5 +19,6 @@ static inline cycles_t get_cycles(void) { return rdtc(); } +#define get_cycles get_cycles #endif /* _ASM_SW64_TIMEX_H */ -- Gitee From d64d996419f382407180cd424cf54e65f0af74da Mon Sep 17 00:00:00 2001 From: Jinyu Tang Date: Mon, 2 Mar 2026 08:19:20 +0800 Subject: [PATCH 176/231] sw64: add HUGE_VMAP support for c4 Enable HAVE_ARCH_HUGE_VMALLOC and HAVE_ARCH_HUGE_VMAP for the sw64 c4 architecture to allow ioremap() and vmalloc() to use huge pages. This significantly reduces TLB pressure and page table memory overhead when mapping large contiguous physical regions like PCIe BARs. Signed-off-by: Jinyu Tang Signed-off-by: Gu Yuchen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/Kconfig | 2 + arch/sw_64/include/asm/vmalloc.h | 19 +++++++ arch/sw_64/mm/Makefile | 2 +- arch/sw_64/mm/pgtable.c | 90 ++++++++++++++++++++++++++++++++ 4 files changed, 112 insertions(+), 1 deletion(-) create mode 100644 arch/sw_64/mm/pgtable.c diff --git a/arch/sw_64/Kconfig b/arch/sw_64/Kconfig index a6987ccf1949..5e1bdeb07679 100644 --- a/arch/sw_64/Kconfig +++ b/arch/sw_64/Kconfig @@ -93,6 +93,8 @@ config SW64 select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE + select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP + select HAVE_ARCH_HUGE_VMAP if SUBARCH_C4 select HAVE_ASM_MODVERSIONS select HAVE_C_RECORDMCOUNT select HAVE_DEBUG_BUGVERBOSE diff --git a/arch/sw_64/include/asm/vmalloc.h b/arch/sw_64/include/asm/vmalloc.h index a76d1133d6c6..548e32d159b6 100644 --- a/arch/sw_64/include/asm/vmalloc.h +++ b/arch/sw_64/include/asm/vmalloc.h @@ -2,4 +2,23 @@ #ifndef _ASM_SW64_VMALLOC_H #define _ASM_SW64_VMALLOC_H +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP + +#include + +#define IOREMAP_MAX_ORDER (PUD_SHIFT) + +#define arch_vmap_pud_supported arch_vmap_pud_supported +static inline bool arch_vmap_pud_supported(pgprot_t prot) +{ + return true; +} + +#define arch_vmap_pmd_supported arch_vmap_pmd_supported +static inline bool arch_vmap_pmd_supported(pgprot_t prot) +{ + return true; +} + +#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ #endif /* _ASM_SW64_VMALLOC_H */ diff --git a/arch/sw_64/mm/Makefile b/arch/sw_64/mm/Makefile index 1f6cd3965633..700345bd3c40 100644 --- a/arch/sw_64/mm/Makefile +++ b/arch/sw_64/mm/Makefile @@ -5,7 +5,7 @@ #ccflags-y := -Werror -obj-y := init.o fault.o physaddr.o mmap.o extable.o +obj-y := init.o fault.o physaddr.o mmap.o extable.o pgtable.o obj-$(CONFIG_SW64_KERNEL_PAGE_TABLE) += pageattr.o obj-$(CONFIG_NUMA) += numa.o diff --git a/arch/sw_64/mm/pgtable.c b/arch/sw_64/mm/pgtable.c new file mode 100644 index 000000000000..b47ec8c96b7a --- /dev/null +++ b/arch/sw_64/mm/pgtable.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP + +int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot) +{ + pmd_t new_pmd = pfn_pmd(phys >> PAGE_SHIFT, prot); + + new_pmd = __pmd(pmd_val(new_pmd) | _PAGE_LEAF); + set_pmd(pmd, new_pmd); + + return 1; +} + +int pmd_clear_huge(pmd_t *pmd) +{ + if (!(pmd_val(*pmd) & _PAGE_LEAF)) + return 0; + + pmd_clear(pmd); + return 1; +} + +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) +{ + pte_t *pte; + + if (pmd_val(*pmd) & _PAGE_LEAF) + return 0; + + pte = (pte_t *)pmd_page_vaddr(*pmd); + + pmd_clear(pmd); + + flush_tlb_kernel_range(addr, addr + PMD_SIZE); + + pte_free_kernel(NULL, pte); + + return 1; +} + +int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot) +{ + pud_t new_pud = pfn_pud(phys >> PAGE_SHIFT, prot); + + new_pud = __pud(pud_val(new_pud) | _PAGE_LEAF); + + set_pud(pud, new_pud); + return 1; +} + +int pud_clear_huge(pud_t *pud) +{ + if (!(pud_val(*pud) & _PAGE_LEAF)) + return 0; + + pud_clear(pud); + return 1; +} + +int pud_free_pmd_page(pud_t *pud, unsigned long addr) +{ + pmd_t *pmd; + int i; + + if (pud_val(*pud) & _PAGE_LEAF) + return 0; + + pmd = (pmd_t *)pud_page_vaddr(*pud); + pud_clear(pud); + flush_tlb_kernel_range(addr, addr + PUD_SIZE); + + for (i = 0; i < PTRS_PER_PMD; i++) { + if (!pmd_none(pmd[i])) { + pte_t *pte = (pte_t *)pmd_page_vaddr(pmd[i]); + + pte_free_kernel(NULL, pte); + } + } + + pmd_free(NULL, pmd); + + return 1; +} +#endif -- Gitee From af158ca19444c1de516989d25dac1dbf77aa1c97 Mon Sep 17 00:00:00 2001 From: Gu Yuchen Date: Thu, 12 Mar 2026 16:34:16 +0800 Subject: [PATCH 177/231] sw64: fix the parameter passing logic of memblock_mark_nomap Fix the parameter passing logic of memblock_mark_nomap and memblock_clear_nomap. Signed-off-by: Gu Yuchen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/mm/init.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/sw_64/mm/init.c b/arch/sw_64/mm/init.c index 837725a1b116..ec9125914a71 100644 --- a/arch/sw_64/mm/init.c +++ b/arch/sw_64/mm/init.c @@ -613,8 +613,7 @@ void __init paging_init(void) create_pgd_mapping(pgdir, sw64_guest_reset_start, __pa(sw64_guest_reset_start), sw64_guest_reset_size, PAGE_KERNEL_READONLY_EXEC, pgtable_alloc_fixmap); - memblock_mark_nomap(__pa(sw64_reserve_start), - __pa((unsigned long)_end - sw64_reserve_start)); + memblock_mark_nomap(__pa(sw64_reserve_start), (unsigned long)_end - sw64_reserve_start); for_each_mem_range(i, &start, &end) { if (start >= end) break; @@ -623,8 +622,7 @@ void __init paging_init(void) (unsigned long)(end - start), PAGE_KERNEL_NOEXEC, pgtable_alloc_fixmap); } - memblock_clear_nomap(__pa(sw64_reserve_start), - __pa((unsigned long)_end - sw64_reserve_start)); + memblock_clear_nomap(__pa(sw64_reserve_start), (unsigned long)_end - sw64_reserve_start); map_fdt(pgdir); #endif /* CONFIG_SW64_KERNEL_PAGE_TABLE */ -- Gitee From eab52a48c7ac7f9990b4751d4f75eadf6df1768d Mon Sep 17 00:00:00 2001 From: Jinyu Tang Date: Wed, 4 Mar 2026 17:30:01 +0800 Subject: [PATCH 178/231] sw64: add support for batched TLB flushing during unmap This patch enables ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH for sw64 architecture. By deferring TLB flushes during memory reclamation, we can significantly reduce the number of Inter-Processor Interrupts (IPIs) sent between cores. This is highly beneficial in multi-threaded workloads sharing the same memory space, preventing IPI storms when swapping out pages actively cached in multiple CPUs. Signed-off-by: Jinyu Tang Signed-off-by: Gu Yuchen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/Kconfig | 1 + arch/sw_64/include/asm/tlbbatch.h | 11 +++++++++++ arch/sw_64/include/asm/tlbflush.h | 24 ++++++++++++++++++++++++ arch/sw_64/kernel/smp.c | 18 ++++++++++++++++++ 4 files changed, 54 insertions(+) create mode 100644 arch/sw_64/include/asm/tlbbatch.h diff --git a/arch/sw_64/Kconfig b/arch/sw_64/Kconfig index 5e1bdeb07679..968b1edb50f7 100644 --- a/arch/sw_64/Kconfig +++ b/arch/sw_64/Kconfig @@ -59,6 +59,7 @@ config SW64 select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS + select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH select ARCH_WANT_DEFAULT_BPF_JIT select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_IPC_PARSE_VERSION diff --git a/arch/sw_64/include/asm/tlbbatch.h b/arch/sw_64/include/asm/tlbbatch.h new file mode 100644 index 000000000000..b48a199f92aa --- /dev/null +++ b/arch/sw_64/include/asm/tlbbatch.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _ASM_SW_64_TLBBATCH_H +#define _ASM_SW_64_TLBBATCH_H + +#include + +struct arch_tlbflush_unmap_batch { + struct cpumask cpumask; +}; + +#endif /* _ASM_SW_64_TLBBATCH_H */ diff --git a/arch/sw_64/include/asm/tlbflush.h b/arch/sw_64/include/asm/tlbflush.h index 73995d9663a6..ce9b9f42e673 100644 --- a/arch/sw_64/include/asm/tlbflush.h +++ b/arch/sw_64/include/asm/tlbflush.h @@ -91,4 +91,28 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); #endif /* CONFIG_SMP */ + +static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm) +{ + bool should_defer = false; + + /* If remote CPUs need to be flushed then defer batch the flush */ + if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids) + should_defer = true; + put_cpu(); + + return should_defer; +} + +static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm) +{ + flush_tlb_mm(mm); +} + +extern void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, + struct mm_struct *mm, + unsigned long uaddr); +extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); + + #endif /* _ASM_SW64_TLBFLUSH_H */ diff --git a/arch/sw_64/kernel/smp.c b/arch/sw_64/kernel/smp.c index 072165260321..0a9326ec8cc7 100644 --- a/arch/sw_64/kernel/smp.c +++ b/arch/sw_64/kernel/smp.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -821,6 +822,23 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) } EXPORT_SYMBOL(flush_tlb_kernel_range); +void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, + struct mm_struct *mm, + unsigned long uaddr) +{ + cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); + + mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL); +} + +void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) +{ + if (!cpumask_empty(&batch->cpumask)) { + on_each_cpu_mask(&batch->cpumask, ipi_flush_tlb_all, NULL, 1); + cpumask_clear(&batch->cpumask); + } +} + #ifdef CONFIG_HOTPLUG_CPU extern int can_unplug_cpu(void); int __cpu_disable(void) -- Gitee From 6d4e1f2a3957faa98884129623671284be6a4a7b Mon Sep 17 00:00:00 2001 From: Gu Yuchen Date: Thu, 12 Mar 2026 16:47:24 +0800 Subject: [PATCH 179/231] sw64: fix the compilation error when CONFIG_DEBUG_VIRTUAL is enabled Compiling with CONFIG_DEBUG_VIRTUAL=y caused the following error: implicit declaration of function 'phys_addr_valid' This commit fixes the compilation error. Signed-off-by: Gu Yuchen Reviewed-by: He Sheng Signed-off-by: Gu Zitao --- arch/sw_64/kernel/setup.c | 2 +- arch/sw_64/mm/physaddr.c | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/sw_64/kernel/setup.c b/arch/sw_64/kernel/setup.c index 655b3b851e08..fd3a24dc3643 100644 --- a/arch/sw_64/kernel/setup.c +++ b/arch/sw_64/kernel/setup.c @@ -46,7 +46,7 @@ DEFINE_PER_CPU(unsigned long, hard_node_id) = { 0 }; static DEFINE_PER_CPU(struct cpu, cpu_devices); -static inline int phys_addr_valid(unsigned long addr) +inline int phys_addr_valid(unsigned long addr) { /* * At this point memory probe has not been done such that max_pfn diff --git a/arch/sw_64/mm/physaddr.c b/arch/sw_64/mm/physaddr.c index 3c6ecb8ee86a..bdbfbe9c9920 100644 --- a/arch/sw_64/mm/physaddr.c +++ b/arch/sw_64/mm/physaddr.c @@ -4,6 +4,7 @@ #include #include +extern int phys_addr_valid(unsigned long); unsigned long __phys_addr(unsigned long addr) { VIRTUAL_BUG_ON(addr < PAGE_OFFSET); -- Gitee From b40409023018564be65076729f271a3d7745acd2 Mon Sep 17 00:00:00 2001 From: Gu Zitao Date: Tue, 17 Mar 2026 14:21:22 +0800 Subject: [PATCH 180/231] sw64: always define pxx_pgprot() according to upstream Signed-off-by: Gu Zitao Reviewed-by: He Sheng --- arch/sw_64/mm/hugetlbpage_c4.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/sw_64/mm/hugetlbpage_c4.c b/arch/sw_64/mm/hugetlbpage_c4.c index 40094107eff2..93fdffde9f90 100644 --- a/arch/sw_64/mm/hugetlbpage_c4.c +++ b/arch/sw_64/mm/hugetlbpage_c4.c @@ -40,6 +40,7 @@ EXPORT_SYMBOL(pud_huge); /* * Select all bits except the pfn */ +#define pte_pgprot pte_pgprot static inline pgprot_t pte_pgprot(pte_t pte) { unsigned long pfn = pte_pfn(pte); -- Gitee From 45581e9c2ae8a276719969841a1d48c07627c4ae Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Mon, 13 Jan 2025 21:37:17 +0800 Subject: [PATCH 181/231] LoongArch: KVM: Clear LLBCTL if secondary mmu mapping is changed commit 4d38d0416ece7bab532e89a49f988a9954f12ee9 upstream LLBCTL is a separated guest CSR register from host, host exception ERET instruction will clear the host LLBCTL CSR register, and guest exception will clear the guest LLBCTL CSR register. VCPU0 atomic64_fetch_add_unless VCPU1 atomic64_fetch_add_unless ll.d %[p], %[c] beq %[p], %[u], 1f Here secondary mmu mapping is changed, host hpa page is replaced with a new page. And VCPU1 will execute atomic instruction on the new page. ll.d %[p], %[c] beq %[p], %[u], 1f add.d %[rc], %[p], %[a] sc.d %[rc], %[c] add.d %[rc], %[p], %[a] sc.d %[rc], %[c] LLBCTL is set on VCPU0 and it represents the memory is not modified by other VCPUs, sc.d will modify the memory directly. So clear WCLLB of the guest LLBCTL register when mapping is the changed. Fixes: ("LoongArch:LSVZ: Clear LLBCTL if secondary mmu mapping is changed") Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Change-Id: I6db0b913a2a56021153078b896396b825e544545 Signed-off-by: Ming Wang --- arch/loongarch/kvm/main.c | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c index ef2bad612c00..3dddf3c8f222 100644 --- a/arch/loongarch/kvm/main.c +++ b/arch/loongarch/kvm/main.c @@ -246,20 +246,22 @@ void kvm_check_vpid(struct kvm_vcpu *vcpu) vcpu->cpu = cpu; kvm_clear_request(KVM_REQ_TLB_FLUSH_GPA, vcpu); /* - * LLBCTL_WCLLB is separated CSR register from host - * eret instruction in host mode clears host LLBCTL_WCLLB - * register, and clears guest register in guest mode + * LLBCTL is a separated guest CSR register from host, a general + * exception ERET instruction clears the host LLBCTL register in + * host mode, and clears the guest LLBCTL register in guest mode. + * ERET in tlb refill exception does not clear LLBCTL register. * - * When gpa --> hpa mapping is changed, guest does not know - * even if the content is changed with new address + * When secondary mmu mapping is changed, guest OS does not know + * even if the content is changed after mapping is changed. * - * Here clear guest LLBCTL_WCLLB register when mapping is - * changed, else if mapping is changed when guest is executing - * LL/SC pair, LL loads old address, SC store new address - * successfully since LLBCTL_WCLLB is on, even if memory - * with new address is changed with other VCPUs. + * Here clear WCLLB of the guest LLBCTL register when mapping is + * changed. Otherwise, if mmu mapping is changed while guest is + * executing LL/SC pair, LL loads with the old address and set + * the LLBCTL flag, SC checks the LLBCTL flag and will store the + * new address successfully since LLBCTL_WCLLB is on, even if + * memory with new address is changed on other VCPUs. */ - set_gcsr_llbctl(LOONGARCH_CSR_LLBCTL); + set_gcsr_llbctl(CSR_LLBCTL_WCLLB); } /* Restore GSTAT(0x50).vpid */ -- Gitee From 075d7348c3d26c94370cc4dff1fd5c8acfc75133 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Fri, 26 Dec 2025 18:35:40 +0800 Subject: [PATCH 182/231] LoongArch: Fix AP booting issue in VM mode commit 6ce031e5d6f475d476bab55ab7d8ea168fedc4c1 upstream Native IPI is used for AP booting, because it is the booting interface between OS and BIOS firmware. The paravirt IPI is only used inside OS, and native IPI is necessary to boot AP. When booting AP, we write the kernel entry address in the HW mailbox of AP and send IPI interrupt to it. AP executes idle instruction and waits for interrupts or SW events, then clears IPI interrupt and jumps to the kernel entry from HW mailbox. Between writing HW mailbox and sending IPI, AP can be woken up by SW events and jumps to the kernel entry, so ACTION_BOOT_CPU IPI interrupt will keep pending during AP booting. And native IPI interrupt handler needs be registered so that it can clear pending native IPI, else there will be endless interrupts during AP booting stage. Here native IPI interrupt is initialized even if paravirt IPI is used. Cc: stable@vger.kernel.org Fixes: ("LoongArch: KVM: Add PV IPI support on guest side") Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li --- arch/loongarch/kernel/paravirt.c | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c index 9cc27c3feb69..56690e025258 100644 --- a/arch/loongarch/kernel/paravirt.c +++ b/arch/loongarch/kernel/paravirt.c @@ -50,11 +50,18 @@ static u64 paravt_steal_clock(int cpu) } #ifdef CONFIG_SMP +static struct smp_ops native_ops; + static void pv_send_ipi_single(int cpu, unsigned int action) { int min, old; irq_cpustat_t *info = &per_cpu(irq_stat, cpu); + if (unlikely(action == ACTION_BOOT_CPU)) { + native_ops.send_ipi_single(cpu, action); + return; + } + old = atomic_fetch_or(BIT(action), &info->message); if (old) return; @@ -74,6 +81,11 @@ static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action) if (cpumask_empty(mask)) return; + if (unlikely(action == ACTION_BOOT_CPU)) { + native_ops.send_ipi_mask(mask, action); + return; + } + action = BIT(action); for_each_cpu(i, mask) { info = &per_cpu(irq_stat, i); @@ -106,7 +118,7 @@ static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action) if (bitmap) kvm_hypercall3(KVM_HCALL_FUNC_IPI, (unsigned long)bitmap, - (unsigned long)(bitmap >> BITS_PER_LONG), min); + (unsigned long)(bitmap >> BITS_PER_LONG), min); } static irqreturn_t pv_ipi_interrupt(int irq, void *dev) @@ -129,6 +141,11 @@ static irqreturn_t pv_ipi_interrupt(int irq, void *dev) info->ipi_irqs[IPI_CALL_FUNCTION]++; } + if (action & SMP_CLEAR_VECTOR) { + complete_irq_moving(); + info->ipi_irqs[IPI_CLEAR_VECTOR]++; + } + return IRQ_HANDLED; } @@ -136,6 +153,8 @@ static void pv_init_ipi(void) { int r, swi; + /* Init native ipi irq for ACTION_BOOT_CPU */ + native_ops.init_ipi(); swi = get_percpu_irq(INT_SWI0); if (swi < 0) panic("SWI0 IRQ mapping failed\n"); @@ -182,6 +201,7 @@ int __init pv_ipi_init(void) return 0; #ifdef CONFIG_SMP + native_ops = smp_ops; smp_ops.init_ipi = pv_init_ipi; smp_ops.send_ipi_single = pv_send_ipi_single; smp_ops.send_ipi_mask = pv_send_ipi_mask; -- Gitee From 32ad8243342f106c7bde4858dc848b29ff753a3a Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Thu, 25 Dec 2025 10:50:10 +0800 Subject: [PATCH 183/231] LoongArch: Disable instrumentation for setup_ptwalker() Upstream: no Conflict: none Checkpatch: pass According to Documentation/dev-tools/kasan.rst, software KASAN modes use compiler instrumentation to insert validity checks. Such instrumentation might be incompatible with some parts of the kernel, and therefore needs to be disabled, just use the attribute __no_sanitize_address to disable instrumentation for the low level function setup_ptwalker(). Otherwise bringing up the secondary CPUs failed when CONFIG_KASAN is set on the Loongson-3C6000 server machine, here are the call chains: smpboot_entry() start_secondary() cpu_probe() per_cpu_trap_init() tlb_init() setup_tlb_handler() setup_ptwalker() Signed-off-by: Tiezhu Yang Signed-off-by: Ming Wang --- arch/loongarch/mm/tlb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c index 5503d4e4b096..1f9a7bbc0562 100644 --- a/arch/loongarch/mm/tlb.c +++ b/arch/loongarch/mm/tlb.c @@ -202,7 +202,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep local_irq_restore(flags); } -static void setup_ptwalker(void) +static void __no_sanitize_address setup_ptwalker(void) { unsigned long pwctl0, pwctl1; unsigned long pgd_i = 0, pgd_w = 0; -- Gitee From 5f6e07b52e04122e5811385974c5b9eaa0e45146 Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Thu, 25 Dec 2025 10:30:00 +0800 Subject: [PATCH 184/231] LoongArch: Remove some code if kasan_arch_is_ready() is false Upstream: no Conflict: none Checkpatch: pass In the current kasan_init(), kasan_arch_is_ready() is false before setting kasan_early_stage as false, kasan_mem_to_shadow() always return the same address (void *)(kasan_early_shadow_page) no matter what the parameter is, kasan_populate_early_shadow() does nothing due to its parameters are the same addresses, just remove the related code. Signed-off-by: Tiezhu Yang Signed-off-by: Ming Wang --- arch/loongarch/mm/kasan_init.c | 49 +++++++++++++++------------------- 1 file changed, 21 insertions(+), 28 deletions(-) diff --git a/arch/loongarch/mm/kasan_init.c b/arch/loongarch/mm/kasan_init.c index 7277b7583e1b..853a004a45cf 100644 --- a/arch/loongarch/mm/kasan_init.c +++ b/arch/loongarch/mm/kasan_init.c @@ -44,34 +44,30 @@ bool kasan_early_stage = true; void *kasan_mem_to_shadow(const void *addr) { - if (!kasan_arch_is_ready()) { + unsigned long maddr = (unsigned long)addr; + unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff; + unsigned long offset = 0; + + if (maddr >= FIXADDR_START) return (void *)(kasan_early_shadow_page); - } else { - unsigned long maddr = (unsigned long)addr; - unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff; - unsigned long offset = 0; - - if (maddr >= FIXADDR_START) - return (void *)(kasan_early_shadow_page); - - maddr &= XRANGE_SHADOW_MASK; - switch (xrange) { - case XKPRANGE_CC_SEG: - offset = XKPRANGE_CC_SHADOW_OFFSET; - break; - case XKPRANGE_UC_SEG: - offset = XKPRANGE_UC_SHADOW_OFFSET; - break; - case XKVRANGE_VC_SEG: - offset = XKVRANGE_VC_SHADOW_OFFSET; - break; - default: - WARN_ON(1); - return NULL; - } - return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset); + maddr &= XRANGE_SHADOW_MASK; + switch (xrange) { + case XKPRANGE_CC_SEG: + offset = XKPRANGE_CC_SHADOW_OFFSET; + break; + case XKPRANGE_UC_SEG: + offset = XKPRANGE_UC_SHADOW_OFFSET; + break; + case XKVRANGE_VC_SEG: + offset = XKVRANGE_VC_SHADOW_OFFSET; + break; + default: + WARN_ON(1); + return NULL; } + + return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset); } const void *kasan_shadow_to_mem(const void *shadow_addr) @@ -290,9 +286,6 @@ void __init kasan_init(void) /* Maps everything to a single page of zeroes */ kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, true); - kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START), - kasan_mem_to_shadow((void *)KFENCE_AREA_END)); - kasan_early_stage = false; /* Populate the linear mapping */ -- Gitee From 70fdd76b501ab6d76566f3acdaba2e76e1e60670 Mon Sep 17 00:00:00 2001 From: Ojaswin Mujoo Date: Tue, 18 Mar 2025 13:22:55 +0530 Subject: [PATCH 185/231] ext4: define ext4_journal_destroy wrapper commit 5a02a6204ca37e7c22fbb55a789c503f05e8e89a upstream Define an ext4 wrapper over jbd2_journal_destroy to make sure we have consistent behavior during journal destruction. This will also come useful in the next patch where we add some ext4 specific logic in the destroy path. Reviewed-by: Jan Kara Reviewed-by: Baokun Li Signed-off-by: Ojaswin Mujoo Link: https://patch.msgid.link/c3ba78c5c419757e6d5f2d8ebb4a8ce9d21da86a.1742279837.git.ojaswin@linux.ibm.com Signed-off-by: Theodore Ts'o Signed-off-by: Xianglai Li Change-Id: I08deb6b882360e24425c9fe660f46657a84c1871 --- fs/ext4/ext4_jbd2.h | 14 ++++++++++++++ fs/ext4/super.c | 16 ++++++---------- 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h index 0c77697d5e90..930778e507cc 100644 --- a/fs/ext4/ext4_jbd2.h +++ b/fs/ext4/ext4_jbd2.h @@ -513,4 +513,18 @@ static inline int ext4_should_dioread_nolock(struct inode *inode) return 1; } +/* + * Pass journal explicitly as it may not be cached in the sbi->s_journal in some + * cases + */ +static inline int ext4_journal_destroy(struct ext4_sb_info *sbi, journal_t *journal) +{ + int err = 0; + + err = jbd2_journal_destroy(journal); + sbi->s_journal = NULL; + + return err; +} + #endif /* _EXT4_JBD2_H */ diff --git a/fs/ext4/super.c b/fs/ext4/super.c index b83b9603347d..959ac242848f 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1322,8 +1322,7 @@ static void ext4_put_super(struct super_block *sb) if (sbi->s_journal) { aborted = is_journal_aborted(sbi->s_journal); - err = jbd2_journal_destroy(sbi->s_journal); - sbi->s_journal = NULL; + err = ext4_journal_destroy(sbi, sbi->s_journal); if ((err < 0) && !aborted) { ext4_abort(sb, -err, "Couldn't clean up the journal"); } @@ -4994,8 +4993,7 @@ static int ext4_load_and_init_journal(struct super_block *sb, out: /* flush s_sb_upd_work before destroying the journal. */ flush_work(&sbi->s_sb_upd_work); - jbd2_journal_destroy(sbi->s_journal); - sbi->s_journal = NULL; + ext4_journal_destroy(sbi, sbi->s_journal); return -EINVAL; } @@ -5684,8 +5682,7 @@ failed_mount8: __maybe_unused if (sbi->s_journal) { /* flush s_sb_upd_work before journal destroy. */ flush_work(&sbi->s_sb_upd_work); - jbd2_journal_destroy(sbi->s_journal); - sbi->s_journal = NULL; + ext4_journal_destroy(sbi, sbi->s_journal); } failed_mount3a: ext4_es_unregister_shrinker(sbi); @@ -5993,7 +5990,7 @@ static journal_t *ext4_open_dev_journal(struct super_block *sb, return journal; out_journal: - jbd2_journal_destroy(journal); + ext4_journal_destroy(EXT4_SB(sb), journal); out_bdev: blkdev_put(journal_bdev, sb); return ERR_PTR(errno); @@ -6110,8 +6107,7 @@ static int ext4_load_journal(struct super_block *sb, EXT4_SB(sb)->s_journal = journal; err = ext4_clear_journal_err(sb, es); if (err) { - EXT4_SB(sb)->s_journal = NULL; - jbd2_journal_destroy(journal); + ext4_journal_destroy(EXT4_SB(sb), journal); return err; } @@ -6129,7 +6125,7 @@ static int ext4_load_journal(struct super_block *sb, return 0; err_out: - jbd2_journal_destroy(journal); + ext4_journal_destroy(EXT4_SB(sb), journal); return err; } -- Gitee From 720925a63b6d9206db81ed91f15716669a874ae1 Mon Sep 17 00:00:00 2001 From: Ojaswin Mujoo Date: Tue, 18 Mar 2025 13:22:57 +0530 Subject: [PATCH 186/231] ext4: Make sb update interval tunable commit 896b02d0b9e7deb4a4eb365e13dd912b49916519 upstream Currently, outside error paths, we auto commit the super block after 1 hour has passed and 16MB worth of updates have been written since last commit. This is a policy decision so make this tunable while keeping the defaults same. This is useful if user wants to tweak the superblock behavior or for debugging the codepath by allowing to trigger it more frequently. We can now tweak the super block update using sb_update_sec and sb_update_kb files in /sys/fs/ext4// Reviewed-by: Jan Kara Reviewed-by: Ritesh Harjani (IBM) Reviewed-by: Baokun Li Signed-off-by: Ojaswin Mujoo Link: https://patch.msgid.link/950fb8c9b2905620e16f02a3b9eeea5a5b6cb87e.1742279837.git.ojaswin@linux.ibm.com Signed-off-by: Theodore Ts'o Signed-off-by: Xianglai Li Change-Id: I45e75285943798aa2fd61b473b28c4f9d22e8a9a --- fs/ext4/ext4.h | 9 +++++++++ fs/ext4/super.c | 15 ++++++++------- fs/ext4/sysfs.c | 4 ++++ 3 files changed, 21 insertions(+), 7 deletions(-) diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 7fb143a3e1a1..6a5a1813858f 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1601,6 +1601,8 @@ struct ext4_sb_info { unsigned int s_mb_prefetch; unsigned int s_mb_prefetch_limit; unsigned int s_mb_best_avail_max_trim_order; + unsigned int s_sb_update_sec; + unsigned int s_sb_update_kb; /* stats for buddy allocator */ atomic_t s_bal_reqs; /* number of reqs with len > 1 */ @@ -2281,6 +2283,13 @@ static inline int ext4_forced_shutdown(struct super_block *sb) #define EXT4_DEF_MIN_BATCH_TIME 0 #define EXT4_DEF_MAX_BATCH_TIME 15000 /* 15ms */ +/* + * Default values for superblock update + */ +#define EXT4_DEF_SB_UPDATE_INTERVAL_SEC (3600) /* seconds (1 hour) */ +#define EXT4_DEF_SB_UPDATE_INTERVAL_KB (16384) /* kilobytes (16MB) */ + + /* * Minimum number of groups in a flexgroup before we separate out * directories into the first block group of a flexgroup diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 959ac242848f..4ac60119b3a4 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -442,9 +442,6 @@ static time64_t __ext4_get_tstamp(__le32 *lo, __u8 *hi) #define ext4_get_tstamp(es, tstamp) \ __ext4_get_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi) -#define EXT4_SB_REFRESH_INTERVAL_SEC (3600) /* seconds (1 hour) */ -#define EXT4_SB_REFRESH_INTERVAL_KB (16384) /* kilobytes (16MB) */ - /* * The ext4_maybe_update_superblock() function checks and updates the * superblock if needed. @@ -452,8 +449,10 @@ static time64_t __ext4_get_tstamp(__le32 *lo, __u8 *hi) * This function is designed to update the on-disk superblock only under * certain conditions to prevent excessive disk writes and unnecessary * waking of the disk from sleep. The superblock will be updated if: - * 1. More than an hour has passed since the last superblock update, and - * 2. More than 16MB have been written since the last superblock update. + * 1. More than sbi->s_sb_update_sec (def: 1 hour) has passed since the last + * superblock update + * 2. More than sbi->s_sb_update_kb (def: 16MB) kbs have been written since the + * last superblock update. * * @sb: The superblock */ @@ -474,7 +473,7 @@ static void ext4_maybe_update_superblock(struct super_block *sb) now = ktime_get_real_seconds(); last_update = ext4_get_tstamp(es, s_wtime); - if (likely(now - last_update < EXT4_SB_REFRESH_INTERVAL_SEC)) + if (likely(now - last_update < sbi->s_sb_update_sec)) return; lifetime_write_kbytes = sbi->s_kbytes_written + @@ -489,7 +488,7 @@ static void ext4_maybe_update_superblock(struct super_block *sb) */ diff_size = lifetime_write_kbytes - le64_to_cpu(es->s_kbytes_written); - if (diff_size > EXT4_SB_REFRESH_INTERVAL_KB) + if (diff_size > sbi->s_sb_update_kb) schedule_work(&EXT4_SB(sb)->s_sb_upd_work); } @@ -5280,6 +5279,8 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ; sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME; sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME; + sbi->s_sb_update_kb = EXT4_DEF_SB_UPDATE_INTERVAL_KB; + sbi->s_sb_update_sec = EXT4_DEF_SB_UPDATE_INTERVAL_SEC; /* * set default s_li_wait_mult for lazyinit, for the case there is diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c index d65dccb44ed5..00133f4c6d94 100644 --- a/fs/ext4/sysfs.c +++ b/fs/ext4/sysfs.c @@ -248,6 +248,8 @@ EXT4_ATTR(journal_task, 0444, journal_task); EXT4_RW_ATTR_SBI_UI(mb_prefetch, s_mb_prefetch); EXT4_RW_ATTR_SBI_UI(mb_prefetch_limit, s_mb_prefetch_limit); EXT4_RW_ATTR_SBI_UL(last_trim_minblks, s_last_trim_minblks); +EXT4_RW_ATTR_SBI_UI(sb_update_sec, s_sb_update_sec); +EXT4_RW_ATTR_SBI_UI(sb_update_kb, s_sb_update_kb); static unsigned int old_bump_val = 128; EXT4_ATTR_PTR(max_writeback_mb_bump, 0444, pointer_ui, &old_bump_val); @@ -299,6 +301,8 @@ static struct attribute *ext4_attrs[] = { ATTR_LIST(mb_prefetch), ATTR_LIST(mb_prefetch_limit), ATTR_LIST(last_trim_minblks), + ATTR_LIST(sb_update_sec), + ATTR_LIST(sb_update_kb), NULL, }; ATTRIBUTE_GROUPS(ext4); -- Gitee From 8be9de76c1648f7ecbc0e89cefb1069dd686da1d Mon Sep 17 00:00:00 2001 From: Ojaswin Mujoo Date: Tue, 18 Mar 2025 13:22:56 +0530 Subject: [PATCH 187/231] ext4: avoid journaling sb update on error if journal is destroying commit ce2f26e73783b4a7c46a86e3af5b5c8de0971790 upstream Presently we always BUG_ON if trying to start a transaction on a journal marked with JBD2_UNMOUNT, since this should never happen. However, while ltp running stress tests, it was observed that in case of some error handling paths, it is possible for update_super_work to start a transaction after the journal is destroyed eg: (umount) ext4_kill_sb kill_block_super generic_shutdown_super sync_filesystem /* commits all txns */ evict_inodes /* might start a new txn */ ext4_put_super flush_work(&sbi->s_sb_upd_work) /* flush the workqueue */ jbd2_journal_destroy journal_kill_thread journal->j_flags |= JBD2_UNMOUNT; jbd2_journal_commit_transaction jbd2_journal_get_descriptor_buffer jbd2_journal_bmap ext4_journal_bmap ext4_map_blocks ... ext4_inode_error ext4_handle_error schedule_work(&sbi->s_sb_upd_work) /* work queue kicks in */ update_super_work jbd2_journal_start start_this_handle BUG_ON(journal-> j_flags & JBD2_UNMOUNT) Hence, introduce a new mount flag to indicate journal is destroying and only do a journaled (and deferred) update of sb if this flag is not set. Otherwise, just fallback to an un-journaled commit. Further, in the journal destroy path, we have the following sequence: 1. Set mount flag indicating journal is destroying 2. force a commit and wait for it 3. flush pending sb updates This sequence is important as it ensures that, after this point, there is no sb update that might be journaled so it is safe to update the sb outside the journal. (To avoid race discussed in 2d01ddc86606) Also, we don't need a similar check in ext4_grp_locked_error since it is only called from mballoc and AFAICT it would be always valid to schedule work here. Fixes: 2d01ddc86606 ("ext4: save error info to sb through journal if available") Signed-off-by: Ojaswin Mujoo Reviewed-by: Jan Kara Link: https://patch.msgid.link/9613c465d6ff00cd315602f99283d5f24018c3f7.1742279837.git.ojaswin@linux.ibm.com Signed-off-by: Theodore Ts'o Signed-off-by: Xianglai Li Change-Id: Ie3d5a8670e765452edbfcfe87e4f332f2a0f6c53 Signed-off-by: Ming Wang --- fs/ext4/ext4.h | 3 ++- fs/ext4/ext4_jbd2.h | 15 +++++++++++++++ fs/ext4/super.c | 16 ++++++++-------- 3 files changed, 25 insertions(+), 9 deletions(-) diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 6a5a1813858f..9fa4f9cbb801 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1817,7 +1817,8 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) */ enum { EXT4_MF_MNTDIR_SAMPLED, - EXT4_MF_FC_INELIGIBLE /* Fast commit ineligible */ + EXT4_MF_FC_INELIGIBLE, /* Fast commit ineligible */ + EXT4_MF_JOURNAL_DESTROY /* Journal is in process of destroying */ }; static inline void ext4_set_mount_flag(struct super_block *sb, int bit) diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h index 930778e507cc..ada46189b086 100644 --- a/fs/ext4/ext4_jbd2.h +++ b/fs/ext4/ext4_jbd2.h @@ -521,6 +521,21 @@ static inline int ext4_journal_destroy(struct ext4_sb_info *sbi, journal_t *jour { int err = 0; + /* + * At this point only two things can be operating on the journal. + * JBD2 thread performing transaction commit and s_sb_upd_work + * issuing sb update through the journal. Once we set + * EXT4_JOURNAL_DESTROY, new ext4_handle_error() calls will not + * queue s_sb_upd_work and ext4_force_commit() makes sure any + * ext4_handle_error() calls from the running transaction commit are + * finished. Hence no new s_sb_upd_work can be queued after we + * flush it here. + */ + ext4_set_mount_flag(sbi->s_sb, EXT4_MF_JOURNAL_DESTROY); + + ext4_force_commit(sbi->s_sb); + flush_work(&sbi->s_sb_upd_work); + err = jbd2_journal_destroy(journal); sbi->s_journal = NULL; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 4ac60119b3a4..1752f1748d6d 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -728,9 +728,13 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error, * In case the fs should keep running, we need to writeout * superblock through the journal. Due to lock ordering * constraints, it may not be safe to do it right here so we - * defer superblock flushing to a workqueue. + * defer superblock flushing to a workqueue. We just need to be + * careful when the journal is already shutting down. If we get + * here in that case, just update the sb directly as the last + * transaction won't commit anyway. */ - if (continue_fs && journal) + if (continue_fs && journal && + !ext4_test_mount_flag(sb, EXT4_MF_JOURNAL_DESTROY)) schedule_work(&EXT4_SB(sb)->s_sb_upd_work); else ext4_commit_super(sb); @@ -1315,7 +1319,6 @@ static void ext4_put_super(struct super_block *sb) ext4_unregister_li_request(sb); ext4_quotas_off(sb, EXT4_MAXQUOTAS); - flush_work(&sbi->s_sb_upd_work); destroy_workqueue(sbi->rsv_conversion_wq); ext4_release_orphan_info(sb); @@ -1325,7 +1328,8 @@ static void ext4_put_super(struct super_block *sb) if ((err < 0) && !aborted) { ext4_abort(sb, -err, "Couldn't clean up the journal"); } - } + } else + flush_work(&sbi->s_sb_upd_work); ext4_es_unregister_shrinker(sbi); timer_shutdown_sync(&sbi->s_err_report); @@ -4990,8 +4994,6 @@ static int ext4_load_and_init_journal(struct super_block *sb, return 0; out: - /* flush s_sb_upd_work before destroying the journal. */ - flush_work(&sbi->s_sb_upd_work); ext4_journal_destroy(sbi, sbi->s_journal); return -EINVAL; } @@ -5681,8 +5683,6 @@ failed_mount8: __maybe_unused sbi->s_ea_block_cache = NULL; if (sbi->s_journal) { - /* flush s_sb_upd_work before journal destroy. */ - flush_work(&sbi->s_sb_upd_work); ext4_journal_destroy(sbi, sbi->s_journal); } failed_mount3a: -- Gitee From 6d88511432e88f8ab53b45f687966d4baaf8c9b4 Mon Sep 17 00:00:00 2001 From: Haowei Zheng Date: Wed, 20 Nov 2024 19:50:53 +0800 Subject: [PATCH 188/231] cpufreq: loongson3-acpi-cpufreq: Expand cpu id field for 3C6000 Upstream: no Conflict: none Checkpatch: pass The Loongson 3C6000 processor supports a higher number of cores, making the original CPU ID encoding insufficient for frequency and voltage control messages. Expand the CPU ID field in the command message payload. This is achieved by encoding the upper bits of the CPU ID into bits [11:8] of the message, while retaining the lower bits in bits [3:0]. This ensures cpufreq functions correctly on multi-core 3C6000 platforms. Signed-off-by: Haowei Zheng --- drivers/cpufreq/loongson3-acpi-cpufreq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/cpufreq/loongson3-acpi-cpufreq.c b/drivers/cpufreq/loongson3-acpi-cpufreq.c index b5ffcb5c89b7..8a8633526afa 100644 --- a/drivers/cpufreq/loongson3-acpi-cpufreq.c +++ b/drivers/cpufreq/loongson3-acpi-cpufreq.c @@ -57,7 +57,6 @@ MODULE_LICENSE("GPL"); #define FACTOR (0xeac0c6e8) #define BOOST_THRESHOLD (900) #define MAX_CORES_PER_PACKAGE 64 -#define CPU_ID_FIELD 0xf #define VOLTAGE_COMMAND 0x21 #define MAX_READY_TIMEOUT 300000000 #define RESERVED_FREQ 3 @@ -410,6 +409,7 @@ static void do_set_freq_level(int cpu, int freq_level) uint32_t val; message = (0 << 31) | (VOLTAGE_COMMAND << 24) + | (((cpu >> 4) & CPU_ID_FIELD) << 8) | ((uint32_t)freq_level << 4) | (cpu & CPU_ID_FIELD); iocsr_write32(message, 0x51c); -- Gitee From bd2a1c10385a6c26bfb6472d489269bcdc49ad1d Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Thu, 13 Nov 2025 20:28:05 +0800 Subject: [PATCH 189/231] pci/quirks: 2k3000: set discrete GPU as default boot device if present Upstream: no Conflict: none Checkpatch: pass According to 7A1000 and 7A2000 platform default gpu setting, The 2k3000 should also support this. Signed-off-by: Tianrui Zhao Signed-off-by: Ming Wang --- drivers/pci/quirks.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index fa6ec8f6893c..881e1de38744 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -35,6 +35,9 @@ #ifdef CONFIG_PSWIOTLB #include #endif +#ifdef CONFIG_LOONGARCH +#include +#endif /* * Retrain the link of a downstream PCIe port by hand if necessary. @@ -413,6 +416,35 @@ static void loongson_pcie_msi_quirk(struct pci_dev *dev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, 0x7a59, loongson_pcie_msi_quirk); #ifdef CONFIG_LOONGARCH +#define DEV_LS7A1000_DC 0x7a06 +#define DEV_LS7A2000_DC 0x7a36 +#define DEV_LS2K3000_DC 0x7a46 +static void loongson_vgadev_quirk(struct pci_dev *pdev) +{ + struct pci_dev *devp = NULL; + + while ((devp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, devp))) { + /* If the graphics card is SM750, set it as a slave */ + if (devp->vendor == 0x126f && devp->device == 0x0750) { + vga_set_default_device(pdev); + dev_info(&pdev->dev, + "Overriding boot device as %X:%X\n", + pdev->vendor, pdev->device); + break; + } + + if (devp->vendor != PCI_VENDOR_ID_LOONGSON) { + vga_set_default_device(devp); + dev_info(&pdev->dev, + "Overriding boot device as %X:%X\n", + devp->vendor, devp->device); + } + } +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A1000_DC, loongson_vgadev_quirk); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A2000_DC, loongson_vgadev_quirk); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS2K3000_DC, loongson_vgadev_quirk); + #define DEV_PCIE_PORT_4 0x7a39 #define DEV_PCIE_PORT_5 0x7a49 #define DEV_PCIE_PORT_6 0x7a59 -- Gitee From bb58110909f7bbcf7b4881608cb2f01c9d46845c Mon Sep 17 00:00:00 2001 From: WANG Rui Date: Wed, 8 Nov 2023 14:12:15 +0800 Subject: [PATCH 190/231] LoongArch: Relax memory ordering for atomic operations commit affef66b65889a0ea0060e13e5f7fe569897d787 upstream This patch relaxes the implementation while satisfying the memory ordering requirements for atomic operations, which will help improve performance on LA664+. Unixbench with full threads (8) before after Dhrystone 2 using register variables 203910714.2 203909539.8 0.00% Double-Precision Whetstone 37930.9 37931 0.00% Execl Throughput 29431.5 29545.8 0.39% File Copy 1024 bufsize 2000 maxblocks 6645759.5 6676320 0.46% File Copy 256 bufsize 500 maxblocks 2138772.4 2144182.4 0.25% File Copy 4096 bufsize 8000 maxblocks 11640698.4 11602703 -0.33% Pipe Throughput 8849077.7 8917009.4 0.77% Pipe-based Context Switching 1255108.5 1287277.3 2.56% Process Creation 50825.9 50442.1 -0.76% Shell Scripts (1 concurrent) 25795.8 25942.3 0.57% Shell Scripts (8 concurrent) 3812.6 3835.2 0.59% System Call Overhead 9248212.6 9353348.6 1.14% ======= System Benchmarks Index Score 8076.6 8114.4 0.47% Signed-off-by: WANG Rui Signed-off-by: Huacai Chen Signed-off-by: Ming Wang --- arch/loongarch/include/asm/atomic.h | 88 ++++++++++++++++++++++------- 1 file changed, 68 insertions(+), 20 deletions(-) diff --git a/arch/loongarch/include/asm/atomic.h b/arch/loongarch/include/asm/atomic.h index 2143202cb380..c86f0ab922ec 100644 --- a/arch/loongarch/include/asm/atomic.h +++ b/arch/loongarch/include/asm/atomic.h @@ -38,19 +38,19 @@ static inline void arch_atomic_##op(int i, atomic_t *v) \ { \ __asm__ __volatile__( \ - "am"#asm_op"_db.w" " $zero, %1, %0 \n" \ + "am"#asm_op".w" " $zero, %1, %0 \n" \ : "+ZB" (v->counter) \ : "r" (I) \ : "memory"); \ } -#define ATOMIC_OP_RETURN(op, I, asm_op, c_op) \ -static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ +#define ATOMIC_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \ +static inline int arch_atomic_##op##_return##suffix(int i, atomic_t *v) \ { \ int result; \ \ __asm__ __volatile__( \ - "am"#asm_op"_db.w" " %1, %2, %0 \n" \ + "am"#asm_op#mb".w" " %1, %2, %0 \n" \ : "+ZB" (v->counter), "=&r" (result) \ : "r" (I) \ : "memory"); \ @@ -58,13 +58,13 @@ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ return result c_op I; \ } -#define ATOMIC_FETCH_OP(op, I, asm_op) \ -static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ +#define ATOMIC_FETCH_OP(op, I, asm_op, mb, suffix) \ +static inline int arch_atomic_fetch_##op##suffix(int i, atomic_t *v) \ { \ int result; \ \ __asm__ __volatile__( \ - "am"#asm_op"_db.w" " %1, %2, %0 \n" \ + "am"#asm_op#mb".w" " %1, %2, %0 \n" \ : "+ZB" (v->counter), "=&r" (result) \ : "r" (I) \ : "memory"); \ @@ -74,29 +74,53 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ #define ATOMIC_OPS(op, I, asm_op, c_op) \ ATOMIC_OP(op, I, asm_op) \ - ATOMIC_OP_RETURN(op, I, asm_op, c_op) \ - ATOMIC_FETCH_OP(op, I, asm_op) + ATOMIC_OP_RETURN(op, I, asm_op, c_op, _db, ) \ + ATOMIC_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \ + ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \ + ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed) ATOMIC_OPS(add, i, add, +) ATOMIC_OPS(sub, -i, add, +) +#define arch_atomic_add_return arch_atomic_add_return +#define arch_atomic_add_return_acquire arch_atomic_add_return +#define arch_atomic_add_return_release arch_atomic_add_return #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed +#define arch_atomic_sub_return arch_atomic_sub_return +#define arch_atomic_sub_return_acquire arch_atomic_sub_return +#define arch_atomic_sub_return_release arch_atomic_sub_return #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed +#define arch_atomic_fetch_add arch_atomic_fetch_add +#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add +#define arch_atomic_fetch_add_release arch_atomic_fetch_add #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_sub arch_atomic_fetch_sub +#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub +#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed #undef ATOMIC_OPS #define ATOMIC_OPS(op, I, asm_op) \ ATOMIC_OP(op, I, asm_op) \ - ATOMIC_FETCH_OP(op, I, asm_op) + ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \ + ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed) ATOMIC_OPS(and, i, and) ATOMIC_OPS(or, i, or) ATOMIC_OPS(xor, i, xor) +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and +#define arch_atomic_fetch_and_release arch_atomic_fetch_and #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or +#define arch_atomic_fetch_or_release arch_atomic_fetch_or #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_xor arch_atomic_fetch_xor +#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor +#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed #undef ATOMIC_OPS @@ -174,18 +198,18 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v) static inline void arch_atomic64_##op(long i, atomic64_t *v) \ { \ __asm__ __volatile__( \ - "am"#asm_op"_db.d " " $zero, %1, %0 \n" \ + "am"#asm_op".d " " $zero, %1, %0 \n" \ : "+ZB" (v->counter) \ : "r" (I) \ : "memory"); \ } -#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \ -static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v) \ +#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \ +static inline long arch_atomic64_##op##_return##suffix(long i, atomic64_t *v) \ { \ long result; \ __asm__ __volatile__( \ - "am"#asm_op"_db.d " " %1, %2, %0 \n" \ + "am"#asm_op#mb".d " " %1, %2, %0 \n" \ : "+ZB" (v->counter), "=&r" (result) \ : "r" (I) \ : "memory"); \ @@ -193,13 +217,13 @@ static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v) \ return result c_op I; \ } -#define ATOMIC64_FETCH_OP(op, I, asm_op) \ -static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \ +#define ATOMIC64_FETCH_OP(op, I, asm_op, mb, suffix) \ +static inline long arch_atomic64_fetch_##op##suffix(long i, atomic64_t *v) \ { \ long result; \ \ __asm__ __volatile__( \ - "am"#asm_op"_db.d " " %1, %2, %0 \n" \ + "am"#asm_op#mb".d " " %1, %2, %0 \n" \ : "+ZB" (v->counter), "=&r" (result) \ : "r" (I) \ : "memory"); \ @@ -209,29 +233,53 @@ static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \ #define ATOMIC64_OPS(op, I, asm_op, c_op) \ ATOMIC64_OP(op, I, asm_op) \ - ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \ - ATOMIC64_FETCH_OP(op, I, asm_op) + ATOMIC64_OP_RETURN(op, I, asm_op, c_op, _db, ) \ + ATOMIC64_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \ + ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \ + ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed) ATOMIC64_OPS(add, i, add, +) ATOMIC64_OPS(sub, -i, add, +) +#define arch_atomic64_add_return arch_atomic64_add_return +#define arch_atomic64_add_return_acquire arch_atomic64_add_return +#define arch_atomic64_add_return_release arch_atomic64_add_return #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed +#define arch_atomic64_sub_return arch_atomic64_sub_return +#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return +#define arch_atomic64_sub_return_release arch_atomic64_sub_return #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed +#define arch_atomic64_fetch_add arch_atomic64_fetch_add +#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add +#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed +#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub +#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub +#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed #undef ATOMIC64_OPS #define ATOMIC64_OPS(op, I, asm_op) \ ATOMIC64_OP(op, I, asm_op) \ - ATOMIC64_FETCH_OP(op, I, asm_op) + ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \ + ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed) ATOMIC64_OPS(and, i, and) ATOMIC64_OPS(or, i, or) ATOMIC64_OPS(xor, i, xor) +#define arch_atomic64_fetch_and arch_atomic64_fetch_and +#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and +#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed +#define arch_atomic64_fetch_or arch_atomic64_fetch_or +#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or +#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed +#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor +#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor +#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed #undef ATOMIC64_OPS -- Gitee From 3c3688a74d4479d737084895e47c515b53638470 Mon Sep 17 00:00:00 2001 From: Ming Wang Date: Wed, 18 Mar 2026 11:42:06 +0800 Subject: [PATCH 191/231] LoongArch: Enable CONFIG_PSI_DEFAULT_DISABLED=y Upstream: no Conflict: none Checkpatch: pass Enable this configuration to prevent PSI from working by default, thereby improving system performance. Signed-off-by: Ming Wang --- arch/loongarch/configs/loongson3_defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index 019a90a465d0..5d48a0d1b0e6 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -15,6 +15,7 @@ CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y CONFIG_PSI=y +CONFIG_PSI_DEFAULT_DISABLED=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=18 -- Gitee From fbbb9dd34d3ab584ca8605b1193763b2e337bb05 Mon Sep 17 00:00:00 2001 From: Phillip Potter Date: Wed, 23 Jul 2025 00:19:00 +0100 Subject: [PATCH 192/231] cdrom: Call cdrom_mrw_exit from cdrom_release function commit 5ec9d26b78c4eb7c2fab54dcec6c0eb845302a98 upstream Remove the cdrom_mrw_exit call from unregister_cdrom, as it invokes block commands that can fail due to a NULL pointer dereference from the call happening too late, during the unloading of the driver (e.g. unplugging of USB optical drives). Instead perform the call inside cdrom_release, thus also removing the need for the exit function pointer inside the cdrom_device_info struct. Reported-by: Sergey Senozhatsky Closes: https://lore.kernel.org/linux-block/uxgzea5ibqxygv3x7i4ojbpvcpv2wziorvb3ns5cdtyvobyn7h@y4g4l5ezv2ec Suggested-by: Jens Axboe Link: https://lore.kernel.org/linux-block/6686fe78-a050-4a1d-aa27-b7bf7ca6e912@kernel.dk Tested-by: Phillip Potter Signed-off-by: Phillip Potter Link: https://lore.kernel.org/r/20250722231900.1164-2-phil@philpotter.co.uk Signed-off-by: Jens Axboe --- Documentation/cdrom/cdrom-standard.rst | 1 - drivers/cdrom/cdrom.c | 8 ++------ include/linux/cdrom.h | 1 - 3 files changed, 2 insertions(+), 8 deletions(-) diff --git a/Documentation/cdrom/cdrom-standard.rst b/Documentation/cdrom/cdrom-standard.rst index 6c1303cff159..b97a4e9b9bd3 100644 --- a/Documentation/cdrom/cdrom-standard.rst +++ b/Documentation/cdrom/cdrom-standard.rst @@ -273,7 +273,6 @@ The drive-specific, minor-like information that is registered with __u8 media_written; /* dirty flag, DVD+RW bookkeeping */ unsigned short mmc3_profile; /* current MMC3 profile */ int for_data; /* unknown:TBD */ - int (*exit)(struct cdrom_device_info *);/* unknown:TBD */ int mrw_mode_page; /* which MRW mode page is in use */ }; diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 54b80911f3e2..f2530873ee29 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -624,9 +624,6 @@ int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi) if (check_media_type == 1) cdi->options |= (int) CDO_CHECK_TYPE; - if (CDROM_CAN(CDC_MRW_W)) - cdi->exit = cdrom_mrw_exit; - if (cdi->ops->read_cdda_bpc) cdi->cdda_method = CDDA_BPC_FULL; else @@ -651,9 +648,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi) list_del(&cdi->list); mutex_unlock(&cdrom_mutex); - if (cdi->exit) - cdi->exit(cdi); - cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name); } EXPORT_SYMBOL(unregister_cdrom); @@ -1264,6 +1258,8 @@ void cdrom_release(struct cdrom_device_info *cdi) cd_dbg(CD_CLOSE, "Use count for \"/dev/%s\" now zero\n", cdi->name); cdrom_dvd_rw_close_write(cdi); + if (CDROM_CAN(CDC_MRW_W)) + cdrom_mrw_exit(cdi); if ((cdo->capability & CDC_LOCK) && !cdi->keeplocked) { cd_dbg(CD_CLOSE, "Unlocking door!\n"); diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h index fdfb61ccf55a..b907e6c2307d 100644 --- a/include/linux/cdrom.h +++ b/include/linux/cdrom.h @@ -62,7 +62,6 @@ struct cdrom_device_info { __u8 last_sense; __u8 media_written; /* dirty flag, DVD+RW bookkeeping */ unsigned short mmc3_profile; /* current MMC3 profile */ - int (*exit)(struct cdrom_device_info *); int mrw_mode_page; bool opened_for_data; __s64 last_media_change_ms; -- Gitee From c59e2e4c30f809f2fca26190b0e188db4a5589e5 Mon Sep 17 00:00:00 2001 From: Jianping Liu Date: Sun, 22 Mar 2026 23:14:12 +0800 Subject: [PATCH 193/231] ethernet/Kconfig: do not source dinghai/Kconfig DingHai NIC driver will cause compile error, so for now, we are not referencing the Kconfig of the dinghai network card driver. Signed-off-by: Jianping Liu --- drivers/net/ethernet/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 182562249c4c..bcc21234a57b 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -198,6 +198,6 @@ source "drivers/net/ethernet/wangxun/Kconfig" source "drivers/net/ethernet/xilinx/Kconfig" source "drivers/net/ethernet/xircom/Kconfig" source "drivers/net/ethernet/northlink/Kconfig" -source "drivers/net/ethernet/dinghai/Kconfig" +# source "drivers/net/ethernet/dinghai/Kconfig" endif # ETHERNET -- Gitee From df7da8988791a121ba564a423dcd4f2be0db23e3 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 13 Dec 2024 12:50:30 -0800 Subject: [PATCH 194/231] x86/cpu: Remove unnecessary MwAIT leaf checks commit 262fba55708b60a063b30d103963477dc5026f8c upstream. The CPUID leaf dependency checker will remove X86_FEATURE_MWAIT if the CPUID level is below the required level (CPUID_MWAIT_LEAF). Thus, if you check X86_FEATURE_MWAIT you do not need to also check the CPUID level. Intel-SIG: commit 262fba55708b x86/cpu: Remove unnecessary MwAIT leaf checks Fix the incorrect power/idle state of the offlined cpus. For GRR/SRF/CWF. Signed-off-by: Dave Hansen Link: https://lore.kernel.org/all/20241213205030.9B42B458%40davehans-spike.ostc.intel.com [ Zhang Rui: amend commit log ] Signed-off-by: Zhang Rui --- arch/x86/kernel/hpet.c | 3 --- arch/x86/kernel/smpboot.c | 2 -- drivers/acpi/acpi_pad.c | 2 -- drivers/idle/intel_idle.c | 3 --- 4 files changed, 10 deletions(-) diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index e1dcb241b2e1..c51e103bca79 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -942,9 +942,6 @@ static bool __init mwait_pc10_supported(void) if (!cpu_feature_enabled(X86_FEATURE_MWAIT)) return false; - if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) - return false; - cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); return (ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) && diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index c3ac9e602558..88100d16d637 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1308,8 +1308,6 @@ static inline void mwait_play_dead(void) return; if (!this_cpu_has(X86_FEATURE_CLFLUSH)) return; - if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF) - return; eax = CPUID_MWAIT_LEAF; ecx = 0; diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index 71e25c798976..5af09a6f84e4 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -41,8 +41,6 @@ static void power_saving_mwait_init(void) if (!boot_cpu_has(X86_FEATURE_MWAIT)) return; - if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) - return; cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 21dfe0d73ef4..60e1f0afffbc 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -2242,9 +2242,6 @@ static int __init intel_idle_init(void) return -ENODEV; } - if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) - return -ENODEV; - cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || -- Gitee From e1be53553b81d3d11e86cb2c293d930c04026f26 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 15 Nov 2024 21:58:31 +0100 Subject: [PATCH 195/231] cpuidle: Do not return from cpuidle_play_dead() on callback failures commit f65ee094eda6e5897172a1276ffee88cf5489928 upstream. If the :enter_dead() idle state callback fails for a certain state, there may be still a shallower state for which it will work. Because the only caller of cpuidle_play_dead(), native_play_dead(), falls back to hlt_play_dead() if it returns an error, it should better try all of the idle states for which :enter_dead() is present before failing, so change it accordingly. Also notice that the :enter_dead() state callback is not expected to return on success (the CPU should be "dead" then), so make cpuidle_play_dead() ignore its return value. Intel-SIG: commit f65ee094eda6 cpuidle: Do not return from cpuidle_play_dead() on callback failures Fix the incorrect power/idle state of the offlined cpus. For GRR/SRF/CWF. Signed-off-by: Rafael J. Wysocki Reviewed-by: Mario Limonciello Tested-by: Mario Limonciello # 6.12-rc7 Reviewed-by: Gautham R. Shenoy Link: https://patch.msgid.link/3318440.aeNJFYEL58@rjwysocki.net [ Zhang Rui: amend commit log ] Signed-off-by: Zhang Rui --- drivers/cpuidle/cpuidle.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 6704d610573a..c075e0e1ab6a 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -69,11 +69,15 @@ int cpuidle_play_dead(void) if (!drv) return -ENODEV; - /* Find lowest-power state that supports long-term idle */ - for (i = drv->state_count - 1; i >= 0; i--) + for (i = drv->state_count - 1; i >= 0; i--) { if (drv->states[i].enter_dead) - return drv->states[i].enter_dead(dev, i); + drv->states[i].enter_dead(dev, i); + } + /* + * If :enter_dead() is successful, it will never return, so reaching + * here means that all of them failed above or were not present. + */ return -ENODEV; } -- Gitee From 655d52019a7b30e93a1c5a924303ac784a5cb745 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 15 Nov 2024 22:00:25 +0100 Subject: [PATCH 196/231] cpuidle: Change :enter_dead() driver callback return type to void commit 9cf9f2e70bea4e66a2c8b8c4743489beb21258a8 upstream. After a previous change, cpuidle_play_dead(), which is the only caller of idle state :enter_dead() callbacks, ignores their return values, so they may as well be void. Intel-SIG: commit 9cf9f2e70bea cpuidle: Change :enter_dead() driver callback return type to void Fix the incorrect power/idle state of the offlined cpus. For GRR/SRF/CWF. Suggested-by: Peter Zijlstra Signed-off-by: Rafael J. Wysocki Reviewed-by: Gautham R. Shenoy Reviewed-by: Mario Limonciello Link: https://patch.msgid.link/2285569.iZASKD2KPV@rjwysocki.net [ Zhang Rui: amend commit log ] Signed-off-by: Zhang Rui --- drivers/acpi/processor_idle.c | 7 ++----- include/linux/cpuidle.h | 2 +- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 86e026c49aeb..4ba9fe2400a1 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -588,7 +588,7 @@ static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx) * @dev: the target CPU * @index: the index of suggested state */ -static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) +static void acpi_idle_play_dead(struct cpuidle_device *dev, int index) { struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); @@ -601,11 +601,8 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) { io_idle(cx->address); } else - return -ENODEV; + return; } - - /* Never reached */ - return 0; } static __always_inline bool acpi_idle_fallback_to_c1(struct acpi_processor *pr) diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 3183aeb7f5b4..a9ee4fe55dcf 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -61,7 +61,7 @@ struct cpuidle_state { struct cpuidle_driver *drv, int index); - int (*enter_dead) (struct cpuidle_device *dev, int index); + void (*enter_dead) (struct cpuidle_device *dev, int index); /* * CPUs execute ->enter_s2idle with the local tick or entire timekeeping -- Gitee From 84f6452ed2bbf8f8417d89bb3fa9df0c9318fe0c Mon Sep 17 00:00:00 2001 From: Patryk Wlazlyn Date: Wed, 5 Feb 2025 17:52:08 +0200 Subject: [PATCH 197/231] x86/smp: Allow calling mwait_play_dead with an arbitrary hint commit a7dd183f0b3848c056bbeed78ef5d5c52fe94d83 upstream. Introduce a helper function to allow offlined CPUs to enter idle states with a specific MWAIT hint. The new helper will be used in subsequent patches by the acpi_idle and intel_idle drivers. No functional change intended. Intel-SIG: commit a7dd183f0b38 x86/smp: Allow calling mwait_play_dead with an arbitrary hint Fix the incorrect power/idle state of the offlined cpus. For GRR/SRF/CWF. Signed-off-by: Patryk Wlazlyn Signed-off-by: Artem Bityutskiy Signed-off-by: Dave Hansen Reviewed-by: Gautham R. Shenoy Acked-by: Rafael J. Wysocki Link: https://lore.kernel.org/all/20250205155211.329780-2-artem.bityutskiy%40linux.intel.com [ Zhang Rui: resolve conflict (use CPUID_MWAIT_LEAF) and amend commit log ] Signed-off-by: Zhang Rui --- arch/x86/include/asm/smp.h | 3 ++ arch/x86/kernel/smpboot.c | 88 ++++++++++++++++++++------------------ 2 files changed, 50 insertions(+), 41 deletions(-) diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index b36e8785c199..3d201d78455b 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -122,6 +122,7 @@ void wbinvd_on_cpu(int cpu); int wbinvd_on_all_cpus(void); void smp_kick_mwait_play_dead(void); +void mwait_play_dead(unsigned int eax_hint); void native_smp_send_reschedule(int cpu); void native_send_call_func_ipi(const struct cpumask *mask); @@ -172,6 +173,8 @@ static inline struct cpumask *cpu_llc_shared_mask(int cpu) { return (struct cpumask *)cpumask_of(0); } + +static inline void mwait_play_dead(unsigned int eax_hint) { } #endif /* CONFIG_SMP */ #ifdef CONFIG_DEBUG_NMI_SELFTEST diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 88100d16d637..6cb3e821d864 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1289,47 +1289,9 @@ void play_dead_common(void) local_irq_disable(); } -/* - * We need to flush the caches before going to sleep, lest we have - * dirty data in our caches when we come back up. - */ -static inline void mwait_play_dead(void) +void __noreturn mwait_play_dead(unsigned int eax_hint) { struct mwait_cpu_dead *md = this_cpu_ptr(&mwait_cpu_dead); - unsigned int eax, ebx, ecx, edx; - unsigned int highest_cstate = 0; - unsigned int highest_subcstate = 0; - int i; - - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || - boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) - return; - if (!this_cpu_has(X86_FEATURE_MWAIT)) - return; - if (!this_cpu_has(X86_FEATURE_CLFLUSH)) - return; - - eax = CPUID_MWAIT_LEAF; - ecx = 0; - native_cpuid(&eax, &ebx, &ecx, &edx); - - /* - * eax will be 0 if EDX enumeration is not valid. - * Initialized below to cstate, sub_cstate value when EDX is valid. - */ - if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) { - eax = 0; - } else { - edx >>= MWAIT_SUBSTATE_SIZE; - for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) { - if (edx & MWAIT_SUBSTATE_MASK) { - highest_cstate = i; - highest_subcstate = edx & MWAIT_SUBSTATE_MASK; - } - } - eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) | - (highest_subcstate - 1); - } /* Set up state for the kexec() hack below */ md->status = CPUDEAD_MWAIT_WAIT; @@ -1350,7 +1312,7 @@ static inline void mwait_play_dead(void) mb(); __monitor(md, 0, 0); mb(); - __mwait(eax, 0); + __mwait(eax_hint, 0); if (READ_ONCE(md->control) == CPUDEAD_MWAIT_KEXEC_HLT) { /* @@ -1372,6 +1334,50 @@ static inline void mwait_play_dead(void) } } +/* + * We need to flush the caches before going to sleep, lest we have + * dirty data in our caches when we come back up. + */ +static inline void mwait_play_dead_cpuid_hint(void) +{ + unsigned int eax, ebx, ecx, edx; + unsigned int highest_cstate = 0; + unsigned int highest_subcstate = 0; + int i; + + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + return; + if (!this_cpu_has(X86_FEATURE_MWAIT)) + return; + if (!this_cpu_has(X86_FEATURE_CLFLUSH)) + return; + + eax = CPUID_MWAIT_LEAF; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + + /* + * eax will be 0 if EDX enumeration is not valid. + * Initialized below to cstate, sub_cstate value when EDX is valid. + */ + if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) { + eax = 0; + } else { + edx >>= MWAIT_SUBSTATE_SIZE; + for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) { + if (edx & MWAIT_SUBSTATE_MASK) { + highest_cstate = i; + highest_subcstate = edx & MWAIT_SUBSTATE_MASK; + } + } + eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) | + (highest_subcstate - 1); + } + + mwait_play_dead(eax); +} + /* * Kick all "offline" CPUs out of mwait on kexec(). See comment in * mwait_play_dead(). @@ -1415,7 +1421,7 @@ void native_play_dead(void) play_dead_common(); tboot_shutdown(TB_SHUTDOWN_WFS); - mwait_play_dead(); + mwait_play_dead_cpuid_hint(); if (cpuidle_play_dead()) hlt_play_dead(); } -- Gitee From b0c1428720df6b37e180395129a5f76d72d07091 Mon Sep 17 00:00:00 2001 From: Patryk Wlazlyn Date: Wed, 5 Feb 2025 17:52:09 +0200 Subject: [PATCH 198/231] ACPI/processor_idle: Add FFH state handling commit 541ddf31e30022b8e6f44b3a943964e8f0989d15 upstream. Recent Intel platforms will depend on the idle driver to pass the correct hint for playing dead via mwait_play_dead_with_hint(). Expand the existing enter_dead interface with handling for FFH states and pass the MWAIT hint to the mwait_play_dead code. Intel-SIG: commit 541ddf31e300 ACPI/processor_idle: Add FFH state handling Fix the incorrect power/idle state of the offlined cpus. For GRR/SRF/CWF. Suggested-by: Gautham R. Shenoy Signed-off-by: Patryk Wlazlyn Signed-off-by: Artem Bityutskiy Signed-off-by: Dave Hansen Acked-by: Rafael J. Wysocki Link: https://lore.kernel.org/all/20250205155211.329780-3-artem.bityutskiy%40linux.intel.com [ Zhang Rui: amend commit log ] Signed-off-by: Zhang Rui --- arch/x86/kernel/acpi/cstate.c | 10 ++++++++++ drivers/acpi/processor_idle.c | 2 ++ include/acpi/processor.h | 5 +++++ 3 files changed, 17 insertions(+) diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index a851e580689a..b047da0eb842 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -15,6 +15,7 @@ #include #include #include +#include /* * Initialize bm_flags based on the CPU cache properties @@ -204,6 +205,15 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, } EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); +void acpi_processor_ffh_play_dead(struct acpi_processor_cx *cx) +{ + unsigned int cpu = smp_processor_id(); + struct cstate_entry *percpu_entry; + + percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); + mwait_play_dead(percpu_entry->states[cx->index].eax); +} + void __cpuidle acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) { unsigned int cpu = smp_processor_id(); diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 4ba9fe2400a1..bd088cff4917 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -600,6 +600,8 @@ static void acpi_idle_play_dead(struct cpuidle_device *dev, int index) raw_safe_halt(); else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) { io_idle(cx->address); + } else if (cx->entry_method == ACPI_CSTATE_FFH) { + acpi_processor_ffh_play_dead(cx); } else return; } diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 3f34ebb27525..ce5ab2e20a99 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -280,6 +280,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, struct acpi_processor_cx *cx, struct acpi_power_register *reg); void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cstate); +void acpi_processor_ffh_play_dead(struct acpi_processor_cx *cx); #else static inline void acpi_processor_power_init_bm_check(struct acpi_processor_flags @@ -300,6 +301,10 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx { return; } +static inline void acpi_processor_ffh_play_dead(struct acpi_processor_cx *cx) +{ + return; +} #endif static inline int call_on_cpu(int cpu, long (*fn)(void *), void *arg, -- Gitee From 85f58e80f572c3c0dae4a9c7eeb50c785f08bb2e Mon Sep 17 00:00:00 2001 From: Patryk Wlazlyn Date: Wed, 5 Feb 2025 17:52:10 +0200 Subject: [PATCH 199/231] intel_idle: Provide the default enter_dead() handler commit fc4ca9537bc4e3141ba7e058700369ea242703df upstream. Recent Intel platforms require idle driver to provide information about the MWAIT hint used to enter the deepest idle state in the play_dead code. Provide the default enter_dead() handler for all of the platforms and allow overwriting with a custom handler for each platform if needed. Intel-SIG: commit fc4ca9537bc4 intel_idle: Provide the default enter_dead() handler Fix the incorrect power/idle state of the offlined cpus. For GRR/SRF/CWF. Signed-off-by: Patryk Wlazlyn Signed-off-by: Artem Bityutskiy Signed-off-by: Dave Hansen Acked-by: Rafael J. Wysocki Link: https://lore.kernel.org/all/20250205155211.329780-4-artem.bityutskiy%40linux.intel.com [ Zhang Rui: amend commit log ] Signed-off-by: Zhang Rui --- drivers/idle/intel_idle.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 60e1f0afffbc..1b3c6e6ee843 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -58,6 +58,7 @@ #include #include #include +#include #define INTEL_IDLE_VERSION "0.5.1" @@ -228,6 +229,15 @@ static __cpuidle int intel_idle_s2idle(struct cpuidle_device *dev, return 0; } +static void intel_idle_enter_dead(struct cpuidle_device *dev, int index) +{ + struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); + struct cpuidle_state *state = &drv->states[index]; + unsigned long eax = flg2MWAIT(state->flags); + + mwait_play_dead(eax); +} + /* * States are indexed by the cstate number, * which is also the index into the MWAIT hint array. @@ -1727,6 +1737,7 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) mark_tsc_unstable("TSC halts in idle"); state->enter = intel_idle; + state->enter_dead = intel_idle_enter_dead; state->enter_s2idle = intel_idle_s2idle; } } @@ -2073,6 +2084,9 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) !cpuidle_state_table[cstate].enter_s2idle) break; + if (!cpuidle_state_table[cstate].enter_dead) + cpuidle_state_table[cstate].enter_dead = intel_idle_enter_dead; + /* If marked as unusable, skip this state. */ if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_UNUSABLE) { pr_debug("state %s is disabled\n", -- Gitee From f9f9a2c9216cacf9fe1910a22259e11ce6feae0e Mon Sep 17 00:00:00 2001 From: Patryk Wlazlyn Date: Wed, 5 Feb 2025 17:52:11 +0200 Subject: [PATCH 200/231] x86/smp: Eliminate mwait_play_dead_cpuid_hint() commit 96040f7273e2bc0be1871ad9ed4da7b504da9410 upstream. Currently, mwait_play_dead_cpuid_hint() looks up the MWAIT hint of the deepest idle state by inspecting CPUID leaf 0x5 with the assumption that, if the number of sub-states for a given major C-state is nonzero, those sub-states are always represented by consecutive numbers starting from 0. This assumption is not based on the documented platform behavior and in fact it is not met on recent Intel platforms. For example, Intel's Sierra Forest report two C-states with two substates each in cpuid leaf 0x5: Name* target cstate target subcstate (mwait hint) =========================================================== C1 0x00 0x00 C1E 0x00 0x01 -- 0x10 ---- C6S 0x20 0x22 C6P 0x20 0x23 -- 0x30 ---- /* No more (sub)states all the way down to the end. */ =========================================================== * Names of the cstates are not included in the CPUID leaf 0x5, they are taken from the product specific documentation. Notice that hints 0x20 and 0x21 are not defined for C-state 0x20 (C6), so the existing MWAIT hint lookup in mwait_play_dead_cpuid_hint() based on the CPUID leaf 0x5 contents does not work in this case. Instead of using MWAIT hint lookup that is not guaranteed to work, make native_play_dead() rely on the idle driver for the given platform to put CPUs going offline into appropriate idle state and, if that fails, fall back to hlt_play_dead(). Accordingly, drop mwait_play_dead_cpuid_hint() altogether and make native_play_dead() call cpuidle_play_dead() instead of it unconditionally with the assumption that it will not return if it is successful. Still, in case cpuidle_play_dead() fails, call hlt_play_dead() at the end. Intel-SIG: commit 96040f7273e2 x86/smp: Eliminate mwait_play_dead_cpuid_hint() Fix the incorrect power/idle state of the offlined cpus. For GRR/SRF/CWF. Signed-off-by: Patryk Wlazlyn Signed-off-by: Artem Bityutskiy Signed-off-by: Dave Hansen Reviewed-by: Gautham R. Shenoy Acked-by: Rafael J. Wysocki Link: https://lore.kernel.org/all/20250205155211.329780-5-artem.bityutskiy%40linux.intel.com [ Zhang Rui: resolve conflict (use CPUID_MWAIT_LEAF) and amend commit log ] Signed-off-by: Zhang Rui --- arch/x86/kernel/smpboot.c | 54 +++++---------------------------------- 1 file changed, 7 insertions(+), 47 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 6cb3e821d864..9b818af170ed 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1289,6 +1289,10 @@ void play_dead_common(void) local_irq_disable(); } +/* + * We need to flush the caches before going to sleep, lest we have + * dirty data in our caches when we come back up. + */ void __noreturn mwait_play_dead(unsigned int eax_hint) { struct mwait_cpu_dead *md = this_cpu_ptr(&mwait_cpu_dead); @@ -1334,50 +1338,6 @@ void __noreturn mwait_play_dead(unsigned int eax_hint) } } -/* - * We need to flush the caches before going to sleep, lest we have - * dirty data in our caches when we come back up. - */ -static inline void mwait_play_dead_cpuid_hint(void) -{ - unsigned int eax, ebx, ecx, edx; - unsigned int highest_cstate = 0; - unsigned int highest_subcstate = 0; - int i; - - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || - boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) - return; - if (!this_cpu_has(X86_FEATURE_MWAIT)) - return; - if (!this_cpu_has(X86_FEATURE_CLFLUSH)) - return; - - eax = CPUID_MWAIT_LEAF; - ecx = 0; - native_cpuid(&eax, &ebx, &ecx, &edx); - - /* - * eax will be 0 if EDX enumeration is not valid. - * Initialized below to cstate, sub_cstate value when EDX is valid. - */ - if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) { - eax = 0; - } else { - edx >>= MWAIT_SUBSTATE_SIZE; - for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) { - if (edx & MWAIT_SUBSTATE_MASK) { - highest_cstate = i; - highest_subcstate = edx & MWAIT_SUBSTATE_MASK; - } - } - eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) | - (highest_subcstate - 1); - } - - mwait_play_dead(eax); -} - /* * Kick all "offline" CPUs out of mwait on kexec(). See comment in * mwait_play_dead(). @@ -1421,9 +1381,9 @@ void native_play_dead(void) play_dead_common(); tboot_shutdown(TB_SHUTDOWN_WFS); - mwait_play_dead_cpuid_hint(); - if (cpuidle_play_dead()) - hlt_play_dead(); + /* Below returns only on error. */ + cpuidle_play_dead(); + hlt_play_dead(); } #else /* ... !CONFIG_HOTPLUG_CPU */ -- Gitee From 79019ed508e1e395fc55b3397e20b62590a9faeb Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Sun, 16 Feb 2025 14:26:14 +0200 Subject: [PATCH 201/231] ACPI/processor_idle: Export acpi_processor_ffh_play_dead() commit 64aad4749d7911f8c5e69d93a929a269605dd3cb upstream. The kernel test robot reported the following build error: >> ERROR: modpost: "acpi_processor_ffh_play_dead" [drivers/acpi/processor.ko] undefined! Caused by this recently merged commit: 541ddf31e300 ("ACPI/processor_idle: Add FFH state handling") The build failure is due to an oversight in the 'CONFIG_ACPI_PROCESSOR=m' case, the function export is missing. Add it. Intel-SIG: commit 64aad4749d79 ACPI/processor_idle: Export acpi_processor_ffh_play_dead() Fix the incorrect power/idle state of the offlined cpus. For GRR/SRF/CWF. Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202502151207.FA9UO1iX-lkp@intel.com/ Fixes: 541ddf31e300 ("ACPI/processor_idle: Add FFH state handling") Signed-off-by: Artem Bityutskiy Signed-off-by: Ingo Molnar Cc: Dave Hansen Link: https://lore.kernel.org/r/de5bf4f116779efde315782a15146fdc77a4a044.camel@linux.intel.com [ Zhang Rui: amend commit log ] Signed-off-by: Zhang Rui --- arch/x86/kernel/acpi/cstate.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index b047da0eb842..ea45b2d5454c 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -213,6 +213,7 @@ void acpi_processor_ffh_play_dead(struct acpi_processor_cx *cx) percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); mwait_play_dead(percpu_entry->states[cx->index].eax); } +EXPORT_SYMBOL_GPL(acpi_processor_ffh_play_dead); void __cpuidle acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) { -- Gitee From 130d11e9065cd337a5429192ae2201aa60b9ae34 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Sun, 2 Mar 2025 16:48:51 -0800 Subject: [PATCH 202/231] x86/smp: Fix mwait_play_dead() and acpi_processor_ffh_play_dead() noreturn behavior commit 4e32645cd8f97a308300623f81c902747df6b97b upstream. Fix some related issues (done in a single patch to avoid introducing intermediate bisect warnings): 1) The SMP version of mwait_play_dead() doesn't return, but its !SMP counterpart does. Make its calling behavior consistent by resolving the !SMP version to a BUG(). It should never be called anyway, this just enforces that at runtime and enables its callers to be marked as __noreturn. 2) While the SMP definition of mwait_play_dead() is annotated as __noreturn, the declaration isn't. Nor is it listed in tools/objtool/noreturns.h. Fix that. 3) Similar to #1, the SMP version of acpi_processor_ffh_play_dead() doesn't return but its !SMP counterpart does. Make the !SMP version a BUG(). It should never be called. 4) acpi_processor_ffh_play_dead() doesn't return, but is lacking any __noreturn annotations. Fix that. This fixes the following objtool warnings: vmlinux.o: warning: objtool: acpi_processor_ffh_play_dead+0x67: mwait_play_dead() is missing a __noreturn annotation vmlinux.o: warning: objtool: acpi_idle_play_dead+0x3c: acpi_processor_ffh_play_dead() is missing a __noreturn annotation Intel-SIG: commit 4e32645cd8f9 x86/smp: Fix mwait_play_dead() and acpi_processor_ffh_play_dead() noreturn behavior Fix the incorrect power/idle state of the offlined cpus. For GRR/SRF/CWF. Fixes: a7dd183f0b38 ("x86/smp: Allow calling mwait_play_dead with an arbitrary hint") Fixes: 541ddf31e300 ("ACPI/processor_idle: Add FFH state handling") Reported-by: Paul E. McKenney Signed-off-by: Josh Poimboeuf Signed-off-by: Ingo Molnar Tested-by: Paul E. McKenney Link: https://lore.kernel.org/r/e885c6fa9e96a61471b33e48c2162d28b15b14c5.1740962711.git.jpoimboe@kernel.org [ Zhang Rui: amend commit log ] Signed-off-by: Zhang Rui --- arch/x86/include/asm/smp.h | 4 ++-- arch/x86/kernel/acpi/cstate.c | 2 +- include/acpi/processor.h | 6 +++--- tools/objtool/noreturns.h | 2 ++ 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 3d201d78455b..a6de0a8c1dc6 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -122,7 +122,7 @@ void wbinvd_on_cpu(int cpu); int wbinvd_on_all_cpus(void); void smp_kick_mwait_play_dead(void); -void mwait_play_dead(unsigned int eax_hint); +void __noreturn mwait_play_dead(unsigned int eax_hint); void native_smp_send_reschedule(int cpu); void native_send_call_func_ipi(const struct cpumask *mask); @@ -174,7 +174,7 @@ static inline struct cpumask *cpu_llc_shared_mask(int cpu) return (struct cpumask *)cpumask_of(0); } -static inline void mwait_play_dead(unsigned int eax_hint) { } +static inline void __noreturn mwait_play_dead(unsigned int eax_hint) { BUG(); } #endif /* CONFIG_SMP */ #ifdef CONFIG_DEBUG_NMI_SELFTEST diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index ea45b2d5454c..8d4c9cff0b5b 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -205,7 +205,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, } EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); -void acpi_processor_ffh_play_dead(struct acpi_processor_cx *cx) +void __noreturn acpi_processor_ffh_play_dead(struct acpi_processor_cx *cx) { unsigned int cpu = smp_processor_id(); struct cstate_entry *percpu_entry; diff --git a/include/acpi/processor.h b/include/acpi/processor.h index ce5ab2e20a99..034edf4178be 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -280,7 +280,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, struct acpi_processor_cx *cx, struct acpi_power_register *reg); void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cstate); -void acpi_processor_ffh_play_dead(struct acpi_processor_cx *cx); +void __noreturn acpi_processor_ffh_play_dead(struct acpi_processor_cx *cx); #else static inline void acpi_processor_power_init_bm_check(struct acpi_processor_flags @@ -301,9 +301,9 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx { return; } -static inline void acpi_processor_ffh_play_dead(struct acpi_processor_cx *cx) +static inline void __noreturn acpi_processor_ffh_play_dead(struct acpi_processor_cx *cx) { - return; + BUG(); } #endif diff --git a/tools/objtool/noreturns.h b/tools/objtool/noreturns.h index 80a3e6acf31e..57cb18636e34 100644 --- a/tools/objtool/noreturns.h +++ b/tools/objtool/noreturns.h @@ -12,6 +12,7 @@ NORETURN(__reiserfs_panic) NORETURN(__stack_chk_fail) NORETURN(__ubsan_handle_builtin_unreachable) NORETURN(arch_call_rest_init) +NORETURN(acpi_processor_ffh_play_dead) NORETURN(arch_cpu_idle_dead) NORETURN(cpu_bringup_and_idle) NORETURN(cpu_startup_entry) @@ -28,6 +29,7 @@ NORETURN(kunit_try_catch_throw) NORETURN(machine_real_restart) NORETURN(make_task_dead) NORETURN(mpt_halt_firmware) +NORETURN(mwait_play_dead) NORETURN(nmi_panic_self_stop) NORETURN(panic) NORETURN(panic_smp_self_stop) -- Gitee From 5aeee8a32c9dad689828051d7acb161225500a35 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 29 May 2025 15:40:43 +0200 Subject: [PATCH 203/231] Revert "x86/smp: Eliminate mwait_play_dead_cpuid_hint()" commit 70523f335734b0b42f97647556d331edf684c7dc upstream. Revert commit 96040f7273e2 ("x86/smp: Eliminate mwait_play_dead_cpuid_hint()") because it introduced a significant power regression on systems that start with "nosmt" in the kernel command line. Namely, on such systems, SMT siblings permanently go offline early, when cpuidle has not been initialized yet, so after the above commit, hlt_play_dead() is called for them. Later on, when the processor attempts to enter a deep package C-state, including PC10 which is requisite for reaching minimum power in suspend-to-idle, it is not able to do that because of the SMT siblings staying in C1 (which they have been put into by HLT). As a result, the idle power (including power in suspend-to-idle) rises quite dramatically on those systems with all of the possible consequences, which (needless to say) may not be expected by their users. This issue is hard to debug and potentially dangerous, so it needs to be addressed as soon as possible in a way that will work for 6.15.y, hence the revert. Of course, after this revert, the issue that commit 96040f7273e2 attempted to address will be back and it will need to be fixed again later. Intel-SIG: commit 70523f335734 Revert "x86/smp: Eliminate mwait_play_dead_cpuid_hint()" Fix the incorrect power/idle state of the offlined cpus. For GRR/SRF/CWF. Fixes: 96040f7273e2 ("x86/smp: Eliminate mwait_play_dead_cpuid_hint()") Reported-by: Todd Brandt Tested-by: Todd Brandt Cc: 6.15+ # 6.15+ Signed-off-by: Rafael J. Wysocki Acked-by: Dave Hansen Link: https://patch.msgid.link/12674167.O9o76ZdvQC@rjwysocki.net [ Zhang Rui: resolve conflict (use CPUID_MWAIT_LEAF) and amend commit log ] Signed-off-by: Zhang Rui --- arch/x86/kernel/smpboot.c | 54 ++++++++++++++++++++++++++++++++++----- 1 file changed, 47 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 9b818af170ed..6cb3e821d864 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1289,10 +1289,6 @@ void play_dead_common(void) local_irq_disable(); } -/* - * We need to flush the caches before going to sleep, lest we have - * dirty data in our caches when we come back up. - */ void __noreturn mwait_play_dead(unsigned int eax_hint) { struct mwait_cpu_dead *md = this_cpu_ptr(&mwait_cpu_dead); @@ -1338,6 +1334,50 @@ void __noreturn mwait_play_dead(unsigned int eax_hint) } } +/* + * We need to flush the caches before going to sleep, lest we have + * dirty data in our caches when we come back up. + */ +static inline void mwait_play_dead_cpuid_hint(void) +{ + unsigned int eax, ebx, ecx, edx; + unsigned int highest_cstate = 0; + unsigned int highest_subcstate = 0; + int i; + + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + return; + if (!this_cpu_has(X86_FEATURE_MWAIT)) + return; + if (!this_cpu_has(X86_FEATURE_CLFLUSH)) + return; + + eax = CPUID_MWAIT_LEAF; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + + /* + * eax will be 0 if EDX enumeration is not valid. + * Initialized below to cstate, sub_cstate value when EDX is valid. + */ + if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) { + eax = 0; + } else { + edx >>= MWAIT_SUBSTATE_SIZE; + for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) { + if (edx & MWAIT_SUBSTATE_MASK) { + highest_cstate = i; + highest_subcstate = edx & MWAIT_SUBSTATE_MASK; + } + } + eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) | + (highest_subcstate - 1); + } + + mwait_play_dead(eax); +} + /* * Kick all "offline" CPUs out of mwait on kexec(). See comment in * mwait_play_dead(). @@ -1381,9 +1421,9 @@ void native_play_dead(void) play_dead_common(); tboot_shutdown(TB_SHUTDOWN_WFS); - /* Below returns only on error. */ - cpuidle_play_dead(); - hlt_play_dead(); + mwait_play_dead_cpuid_hint(); + if (cpuidle_play_dead()) + hlt_play_dead(); } #else /* ... !CONFIG_HOTPLUG_CPU */ -- Gitee From c0f33044cb406cd381248f385cdba8369b6fe1ed Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 5 Jun 2025 17:04:11 +0200 Subject: [PATCH 204/231] intel_idle: Use subsys_initcall_sync() for initialization commit c0f691388992c708436ab5f6e810865be6ddf5c6 upstream. It is not necessary to wait until the device_initcall() stage with intel_idle initialization. All of its dependencies are met after all subsys_initcall()s have run, so subsys_initcall_sync() can be used for initializing it. It is also better to ensure that intel_idle will always initialize before the ACPI processor driver that uses module_init() for its initialization. Intel-SIG: commit c0f691388992 intel_idle: Use subsys_initcall_sync() for initialization Fix the incorrect power/idle state of the offlined cpus. For GRR/SRF/CWF. Signed-off-by: Rafael J. Wysocki Tested-by: Artem Bityutskiy Link: https://patch.msgid.link/2994397.e9J7NaK4W3@rjwysocki.net [ Zhang Rui: amend commit log ] Signed-off-by: Zhang Rui --- drivers/idle/intel_idle.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 1b3c6e6ee843..4dbec64cc600 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -2312,7 +2312,7 @@ static int __init intel_idle_init(void) return retval; } -device_initcall(intel_idle_init); +subsys_initcall_sync(intel_idle_init); /* * We are not really modular, but we used to support that. Meaning we also -- Gitee From 46386c190f662f5858ce61de7990f8a1ea26b408 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Sat, 7 Jun 2025 14:22:56 +0200 Subject: [PATCH 205/231] x86/smp: PM/hibernate: Split arch_resume_nosmt() commit 4c529a4a7260776bb4abe264498857b4537aa70d upstream. Move the inner part of the arch_resume_nosmt() code into a separate function called arch_cpu_rescan_dead_smt_siblings(), so it can be used in other places where "dead" SMT siblings may need to be taken online and offline again in order to get into deep idle states. No intentional functional impact. Intel-SIG: commit 4c529a4a7260 x86/smp: PM/hibernate: Split arch_resume_nosmt() Fix the incorrect power/idle state of the offlined cpus. For GRR/SRF/CWF. Signed-off-by: Rafael J. Wysocki Acked-by: Dave Hansen Tested-by: Artem Bityutskiy Link: https://patch.msgid.link/3361688.44csPzL39Z@rjwysocki.net [ rjw: Prevent build issues with CONFIG_SMP unset ] Signed-off-by: Rafael J. Wysocki [ Zhang Rui: amend commit log ] Signed-off-by: Zhang Rui --- arch/x86/kernel/smp.c | 24 ++++++++++++++++++++++++ arch/x86/power/hibernate.c | 19 ++++++------------- include/linux/cpu.h | 3 +++ 3 files changed, 33 insertions(+), 13 deletions(-) diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 96a771f9f930..1e8e6efe8ddf 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -295,3 +295,27 @@ struct smp_ops smp_ops = { .send_call_func_single_ipi = native_send_call_func_single_ipi, }; EXPORT_SYMBOL_GPL(smp_ops); + +int arch_cpu_rescan_dead_smt_siblings(void) +{ + enum cpuhp_smt_control old = cpu_smt_control; + int ret; + + /* + * If SMT has been disabled and SMT siblings are in HLT, bring them back + * online and offline them again so that they end up in MWAIT proper. + * + * Called with hotplug enabled. + */ + if (old != CPU_SMT_DISABLED && old != CPU_SMT_FORCE_DISABLED) + return 0; + + ret = cpuhp_smt_enable(); + if (ret) + return ret; + + ret = cpuhp_smt_disable(old); + + return ret; +} +EXPORT_SYMBOL_GPL(arch_cpu_rescan_dead_smt_siblings); diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c index d8af46e67750..ee3c56af548e 100644 --- a/arch/x86/power/hibernate.c +++ b/arch/x86/power/hibernate.c @@ -188,7 +188,8 @@ int relocate_restore_code(void) int arch_resume_nosmt(void) { - int ret = 0; + int ret; + /* * We reached this while coming out of hibernation. This means * that SMT siblings are sleeping in hlt, as mwait is not safe @@ -202,18 +203,10 @@ int arch_resume_nosmt(void) * Called with hotplug disabled. */ cpu_hotplug_enable(); - if (cpu_smt_control == CPU_SMT_DISABLED || - cpu_smt_control == CPU_SMT_FORCE_DISABLED) { - enum cpuhp_smt_control old = cpu_smt_control; - - ret = cpuhp_smt_enable(); - if (ret) - goto out; - ret = cpuhp_smt_disable(old); - if (ret) - goto out; - } -out: + + ret = arch_cpu_rescan_dead_smt_siblings(); + cpu_hotplug_disable(); + return ret; } diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 8e596dc11a0f..afbaf4762371 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -115,6 +115,7 @@ extern void cpu_maps_update_begin(void); extern void cpu_maps_update_done(void); int bringup_hibernate_cpu(unsigned int sleep_cpu); void bringup_nonboot_cpus(unsigned int setup_max_cpus); +int arch_cpu_rescan_dead_smt_siblings(void); #else /* CONFIG_SMP */ #define cpuhp_tasks_frozen 0 @@ -129,6 +130,8 @@ static inline void cpu_maps_update_done(void) static inline int add_cpu(unsigned int cpu) { return 0;} +static inline int arch_cpu_rescan_dead_smt_siblings(void) { return 0; } + #endif /* CONFIG_SMP */ extern struct bus_type cpu_subsys; -- Gitee From eab8d8529f1057d480653e4e367ae42386ccb858 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 5 Jun 2025 17:06:08 +0200 Subject: [PATCH 206/231] intel_idle: Rescan "dead" SMT siblings during initialization commit a430c11f401589a0f4f57fd398271a5d85142c7a upstream. Make intel_idle_init() call arch_cpu_rescan_dead_smt_siblings() after successfully registering intel_idle as the cpuidle driver so as to allow the "dead" SMT siblings (if any) to go into deep idle states. This is necessary for the processor to be able to reach deep package C-states (like PC10) going forward which is requisite for reducing power sufficiently in suspend-to-idle, among other things. Intel-SIG: commit a430c11f4015 intel_idle: Rescan "dead" SMT siblings during initialization Fix the incorrect power/idle state of the offlined cpus. For GRR/SRF/CWF. Signed-off-by: Rafael J. Wysocki Tested-by: Artem Bityutskiy Link: https://patch.msgid.link/10669885.nUPlyArG6x@rjwysocki.net [ Zhang Rui: amend commit log ] Signed-off-by: Zhang Rui --- drivers/idle/intel_idle.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 4dbec64cc600..c576d614a8e5 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -2302,6 +2302,8 @@ static int __init intel_idle_init(void) pr_debug("Local APIC timer is reliable in %s\n", boot_cpu_has(X86_FEATURE_ARAT) ? "all C-states" : "C1"); + arch_cpu_rescan_dead_smt_siblings(); + return 0; hp_setup_fail: -- Gitee From 8feaa879a0fd286d91202785861da83fa2429dd3 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 5 Jun 2025 17:07:31 +0200 Subject: [PATCH 207/231] ACPI: processor: Rescan "dead" SMT siblings during initialization commit f694481b1d3177144fcac4242eb750cfcb9f7bd5 upstream. Make acpi_processor_driver_init() call arch_cpu_rescan_dead_smt_siblings(), via a new wrapper function called acpi_idle_rescan_dead_smt_siblings(), after successfully initializing the driver, to allow the "dead" SMT siblings to go into deep idle states, which is necessary for the processor to be able to reach deep package C-states (like PC10) going forward, so that power can be reduced sufficiently in suspend-to-idle, among other things. However, do it only if the ACPI idle driver is the current cpuidle driver (otherwise it is assumed that another cpuidle driver will take care of this) and avoid doing it on architectures other than x86. Intel-SIG: commit f694481b1d31 ACPI: processor: Rescan "dead" SMT siblings during initialization Fix the incorrect power/idle state of the offlined cpus. For GRR/SRF/CWF. Signed-off-by: Rafael J. Wysocki Tested-by: Artem Bityutskiy Link: https://patch.msgid.link/2005721.PYKUYFuaPT@rjwysocki.net [ Zhang Rui: amend commit log ] Signed-off-by: Zhang Rui --- drivers/acpi/internal.h | 6 ++++++ drivers/acpi/processor_driver.c | 3 +++ drivers/acpi/processor_idle.c | 8 ++++++++ 3 files changed, 17 insertions(+) diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 1e8ee97fc85f..a7fb8a3038bd 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -155,6 +155,12 @@ bool processor_physically_present(acpi_handle handle); static inline void acpi_early_processor_control_setup(void) {} #endif +#ifdef CONFIG_ACPI_PROCESSOR_CSTATE +void acpi_idle_rescan_dead_smt_siblings(void); +#else +static inline void acpi_idle_rescan_dead_smt_siblings(void) {} +#endif + /* -------------------------------------------------------------------------- Embedded Controller -------------------------------------------------------------------------- */ diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 4bd16b3f0781..126643dde2d4 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -283,6 +283,9 @@ static int __init acpi_processor_driver_init(void) NULL, acpi_soft_cpu_dead); acpi_processor_throttling_init(); + + acpi_idle_rescan_dead_smt_siblings(); + return 0; err: driver_unregister(&acpi_processor_driver); diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index bd088cff4917..24b56b074860 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -24,6 +24,8 @@ #include #include +#include "internal.h" + /* * Include the apic definitions for x86 to have the APIC timer related defines * available also for UP (on SMP it gets magically included via linux/smp.h). @@ -55,6 +57,12 @@ struct cpuidle_driver acpi_idle_driver = { }; #ifdef CONFIG_ACPI_PROCESSOR_CSTATE +void acpi_idle_rescan_dead_smt_siblings(void) +{ + if (cpuidle_get_driver() == &acpi_idle_driver) + arch_cpu_rescan_dead_smt_siblings(); +} + static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate); -- Gitee From 161b5b02d296983f0ae6922ba3d634f167a5414e Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 5 Jun 2025 17:09:35 +0200 Subject: [PATCH 208/231] Reapply "x86/smp: Eliminate mwait_play_dead_cpuid_hint()" commit a18d098f2aab82abde1d59c56aef9beca01c892b upstream. Revert commit 70523f335734 ("Revert "x86/smp: Eliminate mwait_play_dead_cpuid_hint()"") to reapply the changes from commit 96040f7273e2 ("x86/smp: Eliminate mwait_play_dead_cpuid_hint()") reverted by it. Previously, these changes caused idle power to rise on systems booting with "nosmt" in the kernel command line because they effectively caused "dead" SMT siblings to remain in idle state C1 after executing the HLT instruction, which prevented the processor from reaching package idle states deeper than PC2 going forward. Now, the "dead" SMT siblings are rescanned after initializing a proper cpuidle driver for the processor (either intel_idle or ACPI idle), at which point they are able to enter a sufficiently deep idle state in native_play_dead() via cpuidle, so the code changes in question can be reapplied. Intel-SIG: commit a18d098f2aab Reapply "x86/smp: Eliminate mwait_play_dead_cpuid_hint()" Fix the incorrect power/idle state of the offlined cpus. For GRR/SRF/CWF. Signed-off-by: Rafael J. Wysocki Acked-by: Dave Hansen Tested-by: Artem Bityutskiy Link: https://patch.msgid.link/7813065.EvYhyI6sBW@rjwysocki.net [ Zhang Rui: resolve conflict (use CPUID_MWAIT_LEAF) and amend commit log ] Signed-off-by: Zhang Rui --- arch/x86/kernel/smpboot.c | 54 +++++---------------------------------- 1 file changed, 7 insertions(+), 47 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 6cb3e821d864..9b818af170ed 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1289,6 +1289,10 @@ void play_dead_common(void) local_irq_disable(); } +/* + * We need to flush the caches before going to sleep, lest we have + * dirty data in our caches when we come back up. + */ void __noreturn mwait_play_dead(unsigned int eax_hint) { struct mwait_cpu_dead *md = this_cpu_ptr(&mwait_cpu_dead); @@ -1334,50 +1338,6 @@ void __noreturn mwait_play_dead(unsigned int eax_hint) } } -/* - * We need to flush the caches before going to sleep, lest we have - * dirty data in our caches when we come back up. - */ -static inline void mwait_play_dead_cpuid_hint(void) -{ - unsigned int eax, ebx, ecx, edx; - unsigned int highest_cstate = 0; - unsigned int highest_subcstate = 0; - int i; - - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || - boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) - return; - if (!this_cpu_has(X86_FEATURE_MWAIT)) - return; - if (!this_cpu_has(X86_FEATURE_CLFLUSH)) - return; - - eax = CPUID_MWAIT_LEAF; - ecx = 0; - native_cpuid(&eax, &ebx, &ecx, &edx); - - /* - * eax will be 0 if EDX enumeration is not valid. - * Initialized below to cstate, sub_cstate value when EDX is valid. - */ - if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) { - eax = 0; - } else { - edx >>= MWAIT_SUBSTATE_SIZE; - for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) { - if (edx & MWAIT_SUBSTATE_MASK) { - highest_cstate = i; - highest_subcstate = edx & MWAIT_SUBSTATE_MASK; - } - } - eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) | - (highest_subcstate - 1); - } - - mwait_play_dead(eax); -} - /* * Kick all "offline" CPUs out of mwait on kexec(). See comment in * mwait_play_dead(). @@ -1421,9 +1381,9 @@ void native_play_dead(void) play_dead_common(); tboot_shutdown(TB_SHUTDOWN_WFS); - mwait_play_dead_cpuid_hint(); - if (cpuidle_play_dead()) - hlt_play_dead(); + /* Below returns only on error. */ + cpuidle_play_dead(); + hlt_play_dead(); } #else /* ... !CONFIG_HOTPLUG_CPU */ -- Gitee From dff95b9d7bb26fae86cfd2b8f9f620ec7d136f33 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 21 Sep 2025 10:56:40 +0200 Subject: [PATCH 209/231] x86/topology: Implement topology_is_core_online() to address SMT regression commit 2066f00e5b2dc061fb6d8c88fadaebc97f11feaa upstream. Christian reported that commit a430c11f4015 ("intel_idle: Rescan "dead" SMT siblings during initialization") broke the use case in which both 'nosmt' and 'maxcpus' are on the kernel command line because it onlines primary threads, which were offline due to the maxcpus limit. The initially proposed fix to skip primary threads in the loop is inconsistent. While it prevents the primary thread to be onlined, it then onlines the corresponding hyperthread(s), which does not really make sense. The CPU iterator in cpuhp_smt_enable() contains a check which excludes all threads of a core, when the primary thread is offline. The default implementation is a NOOP and therefore not effective on x86. Implement topology_is_core_online() on x86 to address this issue. This makes the behaviour consistent between x86 and PowerPC. Intel-SIG: commit 2066f00e5b2d x86/topology: Implement topology_is_core_online() to address SMT regression Fix the incorrect power/idle state of the offlined cpus. For GRR/SRF/CWF. Fixes: a430c11f4015 ("intel_idle: Rescan "dead" SMT siblings during initialization") Fixes: f694481b1d31 ("ACPI: processor: Rescan "dead" SMT siblings during initialization") Closes: https://lore.kernel.org/linux-pm/724616a2-6374-4ba3-8ce3-ea9c45e2ae3b@arm.com/ Reported-by: Christian Loehle Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Rafael J. Wysocki (Intel) Tested-by: Christian Loehle Cc: stable@vger.kernel.org Link: https://lore.kernel.org/12740505.O9o76ZdvQC@rafael.j.wysocki [ Zhang Rui: amend commit log ] Signed-off-by: Zhang Rui --- arch/x86/include/asm/topology.h | 10 ++++++++++ arch/x86/kernel/cpu/topology.c | 13 +++++++++++++ 2 files changed, 23 insertions(+) diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 76b1d87f1531..ff09685f00ba 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -220,6 +220,16 @@ static inline bool topology_is_primary_thread(unsigned int cpu) return cpumask_test_cpu(cpu, cpu_primary_thread_mask); } +int topology_get_primary_thread(unsigned int cpu); + +static inline bool topology_is_core_online(unsigned int cpu) +{ + int pcpu = topology_get_primary_thread(cpu); + + return pcpu >= 0 ? cpu_online(pcpu) : false; +} +#define topology_is_core_online topology_is_core_online + #else /* CONFIG_SMP */ static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; } static inline int topology_max_smt_threads(void) { return 1; } diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c index a8d1cc6d223d..cd0cfd42ea8b 100644 --- a/arch/x86/kernel/cpu/topology.c +++ b/arch/x86/kernel/cpu/topology.c @@ -371,6 +371,19 @@ unsigned int topology_unit_count(u32 apicid, enum x86_topology_domains which_uni return topo_unit_count(lvlid, at_level, apic_maps[which_units].map); } +#ifdef CONFIG_SMP +int topology_get_primary_thread(unsigned int cpu) +{ + u32 apic_id = cpuid_to_apicid[cpu]; + + /* + * Get the core domain level APIC id, which is the primary thread + * and return the CPU number assigned to it. + */ + return topo_lookup_cpuid(topo_apicid(apic_id, TOPO_CORE_DOMAIN)); +} +#endif + #ifdef CONFIG_ACPI_HOTPLUG_CPU /** * topology_hotplug_apic - Handle a physical hotplugged APIC after boot -- Gitee From 36562b1c8d79f103401bf00a9d9510673d342cda Mon Sep 17 00:00:00 2001 From: Ming Wang Date: Tue, 24 Mar 2026 09:12:17 +0800 Subject: [PATCH 210/231] LoongArch: config: enable QLogic and QED-related drivers as modules Upstream: no Conflict: none Checkpatch: pass This patch enables a series of QLogic and QED-related network, storage, and InfiniBand drivers as modules in loongson3_defconfig. Specifically, the following configs are enabled: - Storage: CONFIG_QEDI (iSCSI) and CONFIG_QEDF (FCoE) - Network: CONFIG_QED, CONFIG_QEDE, CONFIG_QLA3XXX, and CONFIG_NETXEN_NIC - InfiniBand: CONFIG_INFINIBAND_QEDR Signed-off-by: Ming Wang --- arch/loongarch/configs/loongson3_defconfig | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index 5d48a0d1b0e6..f7f5ccc4f0dc 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -726,6 +726,8 @@ CONFIG_SCSI_QLOGIC_1280=m CONFIG_SCSI_QLA_FC=m CONFIG_TCM_QLA2XXX=m CONFIG_SCSI_QLA_ISCSI=m +CONFIG_QEDI=m +CONFIG_QEDF=m CONFIG_SCSI_VIRTIO=m CONFIG_SCSI_CHELSIO_FCOE=m CONFIG_SCSI_DH=y @@ -873,7 +875,10 @@ CONFIG_YT6801=m # CONFIG_NET_VENDOR_NVIDIA is not set # CONFIG_NET_VENDOR_OKI is not set CONFIG_ETHOC=m -# CONFIG_NET_VENDOR_QLOGIC is not set +CONFIG_QLA3XXX=m +CONFIG_NETXEN_NIC=m +CONFIG_QED=m +CONFIG_QEDE=m # CONFIG_NET_VENDOR_BROCADE is not set # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RDC is not set @@ -1848,6 +1853,7 @@ CONFIG_INFINIBAND_BNXT_RE=m CONFIG_INFINIBAND_CXGB4=m CONFIG_MLX4_INFINIBAND=m CONFIG_MLX5_INFINIBAND=m +CONFIG_INFINIBAND_QEDR=m CONFIG_INFINIBAND_VMWARE_PVRDMA=m CONFIG_RDMA_RXE=m CONFIG_INFINIBAND_IPOIB=m -- Gitee From c28fe093da13422e8a392c7413769d657e2f7ef9 Mon Sep 17 00:00:00 2001 From: Liao Xuan Date: Thu, 29 Jan 2026 17:34:33 +0800 Subject: [PATCH 211/231] EDAC/amd64: Convert Hygon family 18h model checks to switch-case commit 41369fd13d4c2193c8ecb92a8072b705e38aabac anolis ANBZ: #30343 Replaces a chain of if-else statements with a switch statement for handling Hygon family 18h models processors. Hygon-SIG: commit none hygon anolis: EDAC/amd64: Convert Hygon family 18h model checks to switch-case Hygon-SIG: commit 41369fd13d4c anolis anolis: EDAC/amd64: Convert Hygon family 18h model checks to switch-case Backport from anolis to support Hygon family 18h model 18h Signed-off-by: Liao Xuan Cc: hygon-arch@list.openanolis.cn Reviewed-by: Xiaochen Shen Reviewed-by: Ruidong Tian Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/6454 [ YuntongJin : amend commit log ] Signed-off-by: YuntongJin --- drivers/edac/amd64_edac.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 18ea110a1d74..28ebbade564c 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -4481,28 +4481,31 @@ static int per_family_init(struct amd64_pvt *pvt) break; case 0x18: - if (pvt->model == 0x4) { + switch (pvt->model) { + case 0x4: pvt->ctl_name = "F18h_M04h"; pvt->max_mcs = 3; break; - } else if (pvt->model == 0x5) { + case 0x5: pvt->ctl_name = "F18h_M05h"; pvt->max_mcs = 1; break; - } else if (pvt->model == 0x6) { + case 0x6: pvt->ctl_name = "F18h_M06h"; break; - } else if (pvt->model == 0x7) { + case 0x7: pvt->ctl_name = "F18h_M07h"; break; - } else if (pvt->model == 0x8) { + case 0x8: pvt->ctl_name = "F18h_M08h"; break; - } else if (pvt->model == 0x10) { + case 0x10: pvt->ctl_name = "F18h_M10h"; break; + default: + pvt->ctl_name = "F18h"; + break; } - pvt->ctl_name = "F18h"; break; case 0x19: -- Gitee From 442588c90c5dbbd17b1017f723a928d522e6097d Mon Sep 17 00:00:00 2001 From: Liao Xuan Date: Tue, 27 Jan 2026 15:08:42 +0800 Subject: [PATCH 212/231] ALSA: hda: Add support for Hygon family 18h model 18h HD-Audio commit f8f6147fddc76fe44c4ca6a82241b5772afc0860 anolis. ANBZ: #30096 Add the new PCI ID 0x1d94 0x2007 for Hygon family 18h model 18h HDA controller. Hygon-SIG: commit none hygon anolis: ALSA: hda: Add support for Hygon family 18h model 18h HD-Audio Hygon-SIG: commit f8f6147fddc7 anolis anolis: ALSA: hda: Add support for Hygon family 18h model 18h HD-Audio Backport from anolis to support Hygon family 18h model 18h Signed-off-by: Liao Xuan Cc: hygon-arch@list.openanolis.cn Reviewed-by Xiaochen Shen Reviewed-by Guixin Liu Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/6459 [ YuntongJin : amend commit log and add PCI_DEVICE_ID_HYGON_18H_M18H_HDA in pci_ids.h ] Signed-off-by: YuntongJin --- include/linux/pci_ids.h | 1 + sound/pci/hda/hda_intel.c | 2 ++ 2 files changed, 3 insertions(+) diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 71e84110fcdc..b830bae07582 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2603,6 +2603,7 @@ #define PCI_VENDOR_ID_HYGON 0x1d94 #define PCI_DEVICE_ID_HYGON_18H_M05H_HDA 0x14a9 #define PCI_DEVICE_ID_HYGON_18H_M10H_HDA 0x14c9 +#define PCI_DEVICE_ID_HYGON_18H_M18H_HDA 0x2007 #define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3 0x14b3 #define PCI_DEVICE_ID_HYGON_18H_M10H_DF_F3 0x14d3 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 11d8d0134a26..133a5610b3f3 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2870,6 +2870,8 @@ static const struct pci_device_id azx_ids[] = { .driver_data = AZX_DRIVER_HYGON | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_NO_MSI }, { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_HYGON_18H_M10H_HDA), .driver_data = AZX_DRIVER_HYGON }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M18H_HDA), + .driver_data = AZX_DRIVER_HYGON | AZX_DCAPS_POSFIX_LPIB }, { 0, } }; MODULE_DEVICE_TABLE(pci, azx_ids); -- Gitee From c9e032460ecdc3e5dad4944aa08e9135deb495e8 Mon Sep 17 00:00:00 2001 From: Liao Xuan Date: Tue, 27 Jan 2026 11:42:48 +0800 Subject: [PATCH 213/231] iommu/hygon: Add support for Hygon family 18h model 18h IOAPIC commit ec8b426005d02f1cff53e599d9a5aa1f7a85f920 anolis. ANBZ: #30095 The SB IOAPIC for Hygon family 18h model 18h processors is also on the device 0xb. Hygon-SIG: commit none hygon anolis: iommu/hygon: Add support for Hygon family 18h model 18h IOAPIC Hygon-SIG: commit ec8b426005d0 anolis anolis: iommu/hygon: Add support for Hygon family 18h model 18h IOAPIC Backport to support Hygon family 18h model 18h Signed-off-by: Liao Xuan Cc: hygon-arch@list.openanolis.cn Reviewed-by Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/6458 [ YuntongJin : amend commit log ] Signed-off-by: YuntongJin --- drivers/iommu/amd/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 24bdc26f354b..4738c8e7f007 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -3074,7 +3074,7 @@ static bool __init check_ioapic_information(void) (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && boot_cpu_data.x86 == 0x18 && boot_cpu_data.x86_model >= 0x4 && - boot_cpu_data.x86_model <= 0x10 && + boot_cpu_data.x86_model <= 0x18 && devid == IOAPIC_SB_DEVID_FAM18H_M4H)) { has_sb_ioapic = true; ret = true; -- Gitee From 6e26304153e07e48c4053c69c8cf687a0859b1e5 Mon Sep 17 00:00:00 2001 From: Liao Xuan Date: Tue, 27 Jan 2026 11:20:51 +0800 Subject: [PATCH 214/231] hwmon/k10temp: Add support for Hygon family 18h model 18h commit 017d6e72deb966de1cbc6b608f1386a07f26ae2d anolis. ANBZ: #30094 Add 18H_M18H DF F3 device ID to get the temperature for Hygon family 18h model 18h processor. Hygon-SIG: commit none hygon anolis: hwmon/k10temp: Add support for Hygon family 18h model 18h Hygon-SIG: commit 017d6e72deb9 anolis anolis: hwmon/k10temp: Add support for Hygon family 18h model 18h Backport to support Hygon family 18h model 18h Signed-off-by: Liao Xuan Cc: hygon-arch@list.openanolis.cn Reviewed-by Guixin Liu Reviewed-by Xiaochen Shen Link: https://gitee.com/anolis/cloud-kernel/pulls/6457 [ YuntongJin: amend commit log ] Signed-off-by: YuntongJin --- drivers/hwmon/k10temp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index cc3ae64e25ae..52bfeb8713e1 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -653,6 +653,7 @@ static const struct pci_device_id k10temp_id_table[] = { { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) }, { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_HYGON_18H_M10H_DF_F3) }, + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_HYGON_18H_M18H_DF_F3) }, {} }; MODULE_DEVICE_TABLE(pci, k10temp_id_table); -- Gitee From 64dddb0588b8d83a174d97c686774f015d1713ba Mon Sep 17 00:00:00 2001 From: Liao Xuan Date: Mon, 26 Jan 2026 16:23:14 +0800 Subject: [PATCH 215/231] x86/amd_nb: Add Hygon family 18h model 18h PCI IDs commit 903e3d7834e2c901fe6371d79d45493c9ca23f9a anolis. ANBZ: #30093 Add the PCI device IDs for Hygon family 18h model 18h processors. Hygon-SIG: commit none hygon anolis: x86/amd_nb: Add Hygon family 18h model 18h PCI IDs Hygon-SIG: commit 903e3d7834e2 anolis anolis: x86/amd_nb: Add Hygon family 18h model 18h PCI IDs Backport to support Hygon family 18h model 18h Signed-off-by: Liao Xuan Cc: hygon-arch@list.openanolis.cn Reviewed-by Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/6456 [ YuntongJin : amend commit log ] Signed-off-by: YuntongJin --- arch/x86/kernel/amd_nb.c | 5 +++++ include/linux/pci_ids.h | 1 + 2 files changed, 6 insertions(+) diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 30750716814a..826e7ea041e0 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -23,10 +23,12 @@ #define PCI_DEVICE_ID_HYGON_18H_M05H_ROOT 0x14a0 #define PCI_DEVICE_ID_HYGON_18H_M10H_ROOT 0x14c0 +#define PCI_DEVICE_ID_HYGON_18H_M18H_ROOT 0x2000 #define PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1 0x1491 #define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1 0x14b1 #define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F4 0x14b4 #define PCI_DEVICE_ID_HYGON_18H_M10H_DF_F4 0x14d4 +#define PCI_DEVICE_ID_HYGON_18H_M18H_DF_F4 0x2014 #define PCI_DEVICE_ID_HYGON_18H_M06H_DF_F5 0x14b5 static u32 *flush_words; @@ -49,6 +51,7 @@ static const struct pci_device_id hygon_root_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_ROOT) }, { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M10H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M18H_ROOT) }, {} }; @@ -57,6 +60,7 @@ static const struct pci_device_id hygon_nb_misc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M10H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M18H_DF_F3) }, {} }; @@ -65,6 +69,7 @@ static const struct pci_device_id hygon_nb_link_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M10H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M18H_DF_F4) }, {} }; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 71e84110fcdc..971a362e9a04 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2605,6 +2605,7 @@ #define PCI_DEVICE_ID_HYGON_18H_M10H_HDA 0x14c9 #define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3 0x14b3 #define PCI_DEVICE_ID_HYGON_18H_M10H_DF_F3 0x14d3 +#define PCI_DEVICE_ID_HYGON_18H_M18H_DF_F3 0x2013 #define PCI_VENDOR_ID_FUNGIBLE 0x1dad -- Gitee From b7311432b700c9d8205b0ea79dd3a393f0d265a7 Mon Sep 17 00:00:00 2001 From: Liao Xuan Date: Mon, 16 Mar 2026 11:42:42 +0800 Subject: [PATCH 216/231] EDAC/amd64: Add support for Hygon family 18h model 18h commit eb162a81e5007122e42bab182a9807dbe34fc029 anolis. ANBZ: #30343 Add Hygon family 18h model 18h processor support for amd64_edac. For Hygon family 18h model 0x18h-0x1fh processors, the UMC base are identical, so modify them uniformly. Meanwhile, code for Hygon UMC base calculations has been moved to a dedicated function. Hygon-SIG: commit none hygon anolis: EDAC/amd64: Add support for Hygon family 18h model 18h Hygon-SIG: commit eb162a81e500 anolis anolis: EDAC/amd64: Add support for Hygon family 18h model 18h Backport from anolis to support Hygon family 18h model 18h Signed-off-by: Liao Xuan Cc: hygon-arch@list.openanolis.cn Reviewed-by: Xiaochen Shen Reviewed-by: Ruidong Tian Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/6454 [ YuntongJin : amend commit log, fix conflict ] Signed-off-by: YuntongJin --- drivers/edac/amd64_edac.c | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 28ebbade564c..a69d9ebf9472 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -107,6 +107,29 @@ static u32 get_umc_base_f18h_m4h(u16 node, u8 channel) return get_umc_base(channel) + (0x80000000 + (0x10000000 * df_id)); } +static u32 get_umc_base_f18h_m18h(u8 channel) +{ + return 0x70000000 + (channel << 20); +} + +static u32 hygon_get_umc_base(struct amd64_pvt *pvt, u8 channel) +{ + u32 umc_base; + + if (hygon_f18h_m4h()) + umc_base = get_umc_base_f18h_m4h(pvt->mc_node_id, channel); + /* + * For Hygon family 18h model 0x18h-0x1fh processors, the UMC base + * are identical. + */ + else if (hygon_f18h_m10h() && boot_cpu_data.x86_model >= 0x18) + umc_base = get_umc_base_f18h_m18h(channel); + else + umc_base = get_umc_base(channel); + + return umc_base; +} + /* * Select DCT to which PCI cfg accesses are routed */ @@ -2053,8 +2076,8 @@ static void umc_read_base_mask(struct amd64_pvt *pvt) if (!hygon_umc_channel_enabled(pvt, umc)) continue; - if (hygon_f18h_m4h()) - umc_base = get_umc_base_f18h_m4h(pvt->mc_node_id, umc); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + umc_base = hygon_get_umc_base(pvt, umc); else umc_base = get_umc_base(umc); @@ -3543,8 +3566,8 @@ static void umc_read_mc_regs(struct amd64_pvt *pvt) if (!hygon_umc_channel_enabled(pvt, i)) continue; - if (hygon_f18h_m4h()) - umc_base = get_umc_base_f18h_m4h(pvt->mc_node_id, i); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + umc_base = hygon_get_umc_base(pvt, i); else umc_base = get_umc_base(i); -- Gitee From 78a448517f4b9364667818966dfb24d9250c2f73 Mon Sep 17 00:00:00 2001 From: Liao Xuan Date: Tue, 27 Jan 2026 17:17:57 +0800 Subject: [PATCH 217/231] mmc: sdhci-pci: Add support for Hygon SD controller commit 40cf687b108d607faea96aa28265a77d7b9aa38a anolis. ANBZ: #30151 Add PCI ID for Hygon SD controller. Hygon-SIG: commit none hygon anolis: mmc: sdhci-pci: Add support for Hygon SD controller Hygon-SIG: commit 40cf687b108d anolis anolis: mmc: sdhci-pci: Add support for Hygon SD controller Backport from anolis to support Hygon family 18h model 18h Signed-off-by: Liao Xuan Cc: hygon-arch@list.openanolis.cn Reviewed-by Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/6465 [ YuntongJin: amend commit log and fix conflict ] Signed-off-by: YuntongJin --- drivers/mmc/host/sdhci-pci-core.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 2ea5357e3bf0..47d3e6640b7a 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -1942,6 +1942,7 @@ static const struct pci_device_id pci_ids[] = { SDHCI_PCI_DEVICE(GLI, 9763E, gl9763e), SDHCI_PCI_DEVICE(GLI, 9767, gl9767), SDHCI_PCI_DEVICE_CLASS(AMD, SYSTEM_SDHCI, PCI_CLASS_MASK, amd), + SDHCI_PCI_DEVICE_CLASS(HYGON, SYSTEM_SDHCI, PCI_CLASS_MASK, amd), /* Generic SD host controller */ {PCI_DEVICE_CLASS(SYSTEM_SDHCI, PCI_CLASS_MASK)}, { /* end: all zeroes */ }, -- Gitee From 4c18fd82c0b7ceeeec491e68d9d268984326567a Mon Sep 17 00:00:00 2001 From: Liao Xuan Date: Tue, 27 Jan 2026 16:51:57 +0800 Subject: [PATCH 218/231] perf/x86/uncore: Add DF PMU support for Hygon family 18h model 18h commit dbab1265fda0034466e3d7b1d7c4e1877fbf84d2 anolis. ANBZ: #30150 Adjust the DF PMU event and umask for Hygon family 18h model 18h processor. Hygon-SIG: commit none hygon anolis: perf/x86/uncore: Add DF PMU support for Hygon family 18h model 18h Hygon-SIG: commit dbab1265fda0 anolis anolis: perf/x86/uncore: Add DF PMU support for Hygon family 18h model 18h Backport from anolis to support Hygon family 18h model 18h Signed-off-by: Liao Xuan Cc: hygon-arch@list.openanolis.cn Reviewed-by Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/6464 [ YuntongJin: amend commit log, fix conflict ] Signed-off-by: YuntongJin --- arch/x86/events/amd/uncore.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index cb0aa2e93d06..c7831fdef5cc 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -742,10 +742,8 @@ static int __init amd_uncore_init(void) if (boot_cpu_data.x86_model == 0x4 || boot_cpu_data.x86_model == 0x5) *df_attr++ = &format_attr_umask10f18h.attr; - else if (boot_cpu_data.x86_model == 0x6 || - boot_cpu_data.x86_model == 0x7 || - boot_cpu_data.x86_model == 0x8 || - boot_cpu_data.x86_model == 0x10) + else if (boot_cpu_data.x86_model >= 0x6 && + boot_cpu_data.x86_model <= 0x18) *df_attr++ = &format_attr_umask12f18h.attr; } -- Gitee From ddf7e78c1c71d55ae3038c8383bb9846b81a0d68 Mon Sep 17 00:00:00 2001 From: Liao Xuan Date: Tue, 27 Jan 2026 15:31:37 +0800 Subject: [PATCH 219/231] mmc: sdhci-acpi: Add device HID for Hygon eMMC controller commit 42c9284c729ea71b3d363c8c4160b796b738c9f7 anolis. ANBZ: #30151 Add HID HYGO0040 to support Hygon eMMC controller. Hygon-SIG: commit none hygon anolis: mmc: sdhci-acpi: Add device HID for Hygon eMMC controller Hygon-SIG: commit 42c9284c729e anolis anolis: mmc: sdhci-acpi: Add device HID for Hygon eMMC controller Backport from anolis to support Hygon family 18h model 18h Signed-off-by: Liao Xuan Cc: hygon-arch@list.openanolis.cn Reviewed-by Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/6462 [ YuntongJin : amend commit log,fix conflict ] Signed-off-by: YuntongJin --- drivers/mmc/host/sdhci-acpi.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index eb8f427f9770..2f24859580b4 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -700,6 +700,7 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = { { "QCOM8052", NULL, &sdhci_acpi_slot_qcom_sd }, { "AMDI0040", NULL, &sdhci_acpi_slot_amd_emmc }, { "AMDI0041", NULL, &sdhci_acpi_slot_amd_emmc }, + { "HYGO0040", NULL, &sdhci_acpi_slot_amd_emmc }, { }, }; @@ -718,6 +719,7 @@ static const struct acpi_device_id sdhci_acpi_ids[] = { { "QCOM8052" }, { "AMDI0040" }, { "AMDI0041" }, + { "HYGO0040" }, { }, }; MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids); -- Gitee From f1161e235f053aa5d4511ccaf845a69044f0d805 Mon Sep 17 00:00:00 2001 From: Liao Xuan Date: Tue, 27 Jan 2026 16:43:41 +0800 Subject: [PATCH 220/231] spi: dw: Add device HID for Hygon SPI controller commit 4bf4507e1cf3c59031c12d2b7c5e93043dd11c69 anolis. ANBZ: #30152 Add HID HYGO0062 to support Hygon SPI controller. Hygon-SIG: commit none hygon anolis: spi: dw: Add device HID for Hygon SPI controller Hygon-SIG: commit 4bf4507e1cf3 anolis anolis: spi: dw: Add device HID for Hygon SPI controller Backport from anolis to support Hygon family 18h model 18h Signed-off-by: Liao Xuan Cc: hygon-arch@list.openanolis.cn Reviewed-by Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/6463 [ YuntongJin : amend commit log, fix conflict ] Signed-off-by: YuntongJin --- drivers/spi/spi-dw-mmio.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c index 805264c9c65c..6a2222290b52 100644 --- a/drivers/spi/spi-dw-mmio.c +++ b/drivers/spi/spi-dw-mmio.c @@ -439,6 +439,7 @@ MODULE_DEVICE_TABLE(of, dw_spi_mmio_of_match); #ifdef CONFIG_ACPI static const struct acpi_device_id dw_spi_mmio_acpi_match[] = { {"HISI0173", (kernel_ulong_t)dw_spi_pssi_init}, + {"HYGO0062", (kernel_ulong_t)dw_spi_hssi_init}, {}, }; MODULE_DEVICE_TABLE(acpi, dw_spi_mmio_acpi_match); -- Gitee From 6efff4c0a1eb39fefaeab8d9ec281ddab569fce8 Mon Sep 17 00:00:00 2001 From: Liao Xuan Date: Wed, 25 Mar 2026 16:52:02 +0800 Subject: [PATCH 221/231] anolis: EDAC/amd64: Adjust the maximum number of memory controllers per node to 1 for Hygon family 18h model 18h commit 301061b5a9bf685af46b38f3ec512ae690d22f9e anolis. ANBZ: #31970 Adjust max_mcs for Hygon Family 18h Model 18h processors to reflect the correct number of memory controllers per node. Set to 1 as this CPU model hasa single memory controller per node. Hygon-SIG: commit none hygon anolis: EDAC/amd64: Adjust the maximum number of memory controllers per node to 1 for Hygon family 18h model 18h Hygon-SIG: commit 301061b5a9bf anolis anolis: EDAC/amd64: Adjust the maximum number of memory controllers per node to 1 for Hygon family 18h model 18h Backport from anolis to support Hygon family 18h model 18h Signed-off-by: Liao Xuan Cc: hygon-arch@list.openanolis.cn Reviewed-by: Xiaochen Shen Reviewed-by: Yuanhe Shu Reviewed-by: Ruidong Tian Reviewed-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/6671 [ YuntongJin : amend commit log,fix conflict ] Signed-off-by: YuntongJin --- drivers/edac/amd64_edac.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index a69d9ebf9472..a68d5f2ac7c7 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -4525,6 +4525,10 @@ static int per_family_init(struct amd64_pvt *pvt) case 0x10: pvt->ctl_name = "F18h_M10h"; break; + case 0x18: + pvt->ctl_name = "F18h_M18h"; + pvt->max_mcs = 1; + break; default: pvt->ctl_name = "F18h"; break; -- Gitee From 675bab3beb64534d3558aa411651a01dc7ba62ac Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Thu, 31 Oct 2024 15:02:13 -0700 Subject: [PATCH 222/231] x86/resctrl: Support Sub-NUMA cluster mode SNC6 commit 9bce6e94c4b39b6baa649784d92f908aa9168a45 upstream. Support Sub-NUMA cluster mode with 6 nodes per L3 cache (SNC6) on some Intel platforms. Intel-SIG: commit 9bce6e94c4b3 x86/resctrl: Support Sub-NUMA cluster mode SNC6 Backport the support for Sub-NUMA cluster mode SNC6. Signed-off-by: Tony Luck Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Reinette Chatre Link: https://lore.kernel.org/r/20241031220213.17991-1-tony.luck@intel.com [ Zhiquan Li: amend commit log ] Signed-off-by: Zhiquan Li Signed-off-by: Jason Zeng --- arch/x86/kernel/cpu/resctrl/monitor.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 1caf4cdda774..d6ecd7a5503c 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -1162,11 +1162,12 @@ static __init int snc_get_config(void) ret = cpus_per_l3 / cpus_per_node; - /* sanity check: Only valid results are 1, 2, 3, 4 */ + /* sanity check: Only valid results are 1, 2, 3, 4, 6 */ switch (ret) { case 1: break; case 2 ... 4: + case 6: pr_info("Sub-NUMA Cluster mode detected with %d nodes per L3 cache\n", ret); rdt_resources_all[RDT_RESOURCE_L3].r_resctrl.mon_scope = RESCTRL_L3_NODE; break; -- Gitee From fbb3ae2494e615e1980cf80ed758974d2830339e Mon Sep 17 00:00:00 2001 From: Chen Yu Date: Fri, 26 Sep 2025 12:17:22 +0800 Subject: [PATCH 223/231] x86/resctrl: Support Sub-NUMA Cluster (SNC) mode on Clearwater Forest commit a0a0999507752574b80d7fbd179cce052c92791b upstream. Clearwater Forest supports SNC mode. Add it to the snc_cpu_ids[] table. Intel-SIG: commit a0a099950775 x86/resctrl: Support Sub-NUMA Cluster (SNC) mode on Clearwater Forest Backport Resctrl/RDT-SNC for Intel platforms Signed-off-by: Chen Yu Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Reinette Chatre Acked-by: Tony Luck Conflicts: arch/x86/kernel/cpu/resctrl/monitor.c [jz: resolve context conflict] Signed-off-by: Jason Zeng --- arch/x86/kernel/cpu/resctrl/monitor.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index d6ecd7a5503c..0b36960254cd 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -1124,6 +1124,7 @@ static const struct x86_cpu_id snc_cpu_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, 0), X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, 0), X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, 0), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_DARKMONT_X, 0), {} }; -- Gitee From b1f001b202a1b425cd43d3c8ec2a8a62946c3264 Mon Sep 17 00:00:00 2001 From: Ming Wang Date: Wed, 1 Apr 2026 16:51:29 +0800 Subject: [PATCH 224/231] LoongArch: config: enable tracer snapshot support Upstream: no Conflict: none Checkpatch: pass This patch enables CONFIG_TRACER_SNAPSHOT in loongson3_defconfig so that the kernel provides snapshot buffer support for ftrace. With this config enabled, trace-cmd snapshot -s and related snapshot operations can work correctly on LoongArch systems. Signed-off-by: Ming Wang --- arch/loongarch/configs/loongson3_defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index 5d48a0d1b0e6..628fbc071020 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -2229,6 +2229,7 @@ CONFIG_DEBUG_LIST=y CONFIG_RCU_CPU_STALL_TIMEOUT=60 # CONFIG_RCU_TRACE is not set CONFIG_FUNCTION_TRACER=y +CONFIG_TRACER_SNAPSHOT=y CONFIG_FTRACE_SYSCALLS=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_SAMPLES=y -- Gitee From b71a405f0e38b9cecb633fd24a14626c662940e0 Mon Sep 17 00:00:00 2001 From: Kanglong Wang Date: Tue, 7 Apr 2026 10:41:29 +0800 Subject: [PATCH 225/231] LoongArch: Set kasan_early_stage as false at end of kasan_init() commit 5ec5ac4ca27e4daa234540ac32f9fc5219377d53 upstream kasan_arch_is_ready() indicates that kasan is fully initialized, so it should set kasan_early_stage as false to make kasan_arch_is_ready() returns true at end of kasan_init() like the other archs. Otherwise bringing up the master CPU failed when CONFIG_KASAN is set on the Loongson-3C6000 server machine, here are the call chains: kernel_entry() start_kernel() setup_arch() kasan_init() kasan_early_stage = false kasan_arch_is_ready() Signed-off-by: Tiezhu Yang Signed-off-by: Kanglong Wang --- arch/loongarch/mm/kasan_init.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/loongarch/mm/kasan_init.c b/arch/loongarch/mm/kasan_init.c index 853a004a45cf..b098bbd80953 100644 --- a/arch/loongarch/mm/kasan_init.c +++ b/arch/loongarch/mm/kasan_init.c @@ -286,8 +286,6 @@ void __init kasan_init(void) /* Maps everything to a single page of zeroes */ kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, true); - kasan_early_stage = false; - /* Populate the linear mapping */ for_each_mem_range(i, &pa_start, &pa_end) { void *start = (void *)phys_to_virt(pa_start); @@ -317,5 +315,6 @@ void __init kasan_init(void) /* At this point kasan is fully initialized. Enable error messages */ init_task.kasan_depth = 0; + kasan_early_stage = false; pr_info("KernelAddressSanitizer initialized.\n"); } -- Gitee From da32db7c92d67a5435beab0ec3a787ca09d1c68b Mon Sep 17 00:00:00 2001 From: Wei Wang Date: Wed, 1 Apr 2026 17:10:45 +0800 Subject: [PATCH 226/231] KVM: x86: Add support for CPUID leaf 0x8C860000 CPUID leaf 0x8C860000 on Hygon processors advertises support for the SM3 and SM4 cryptographic algorithms. Expose this leaf to KVM guests to allow the guest OS to detect these features and enable hardware-accelerated SM3/SM4 operations. Hygon-SIG: commit none hygon KVM: x86: Add support for CPUID leaf 0x8C860000 Signed-off-by: Wei Wang Tested-by: Yabin Li Tested-by: Liyang Han Tested-by: Yongwei Xu Reviewed-by: Bin Guo --- arch/x86/kvm/cpuid.c | 16 ++++++++++++++++ arch/x86/kvm/reverse_cpuid.h | 1 + 2 files changed, 17 insertions(+) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 9c1dccd156e1..49303e21496e 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -847,6 +847,9 @@ void kvm_set_cpu_caps(void) kvm_cpu_cap_set(X86_FEATURE_NULL_SEL_CLR_BASE); kvm_cpu_cap_set(X86_FEATURE_NO_SMM_CTL_MSR); + kvm_cpu_cap_mask(CPUID_8C86_0000_EDX, + 0 | F(HYGON_SM3) | F(HYGON_SM4)); + kvm_cpu_cap_mask(CPUID_C000_0001_EDX, F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) | F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) | @@ -1416,6 +1419,10 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) entry->ebx = ebx.full; break; } + case 0x8C860000: + entry->eax = 0x8C860000; + cpuid_entry_override(entry, CPUID_8C86_0000_EDX); + break; /*Add support for Centaur's CPUID instruction*/ case 0xC0000000: /* Extended to 0xC0000006 */ @@ -1473,6 +1480,15 @@ static int get_cpuid_func(struct kvm_cpuid_array *array, u32 func, return r; limit = array->entries[array->nent - 1].eax; + + if ((func == 0x80000000) && + (boot_cpu_has(X86_FEATURE_HYGON_SM3) || + boot_cpu_has(X86_FEATURE_HYGON_SM4))) { + r = do_cpuid_func(array, 0x8C860000, type); + if (r) + return r; + } + for (func = func + 1; func <= limit; ++func) { r = do_cpuid_func(array, func, type); if (r) diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h index 5fe7761bb97c..cb7f37502709 100644 --- a/arch/x86/kvm/reverse_cpuid.h +++ b/arch/x86/kvm/reverse_cpuid.h @@ -106,6 +106,7 @@ static const struct cpuid_reg reverse_cpuid[] = { [CPUID_7_2_EDX] = { 7, 2, CPUID_EDX}, [CPUID_24_0_EBX] = { 0x24, 0, CPUID_EBX}, [CPUID_8000_0021_ECX] = {0x80000021, 0, CPUID_ECX}, + [CPUID_8C86_0000_EDX] = {0x8c860000, 0, CPUID_EDX}, }; /* -- Gitee From 6c814f95904481cf3e62291008aac54459acdc7c Mon Sep 17 00:00:00 2001 From: Zhang Qiao Date: Wed, 25 Feb 2026 19:10:26 +0800 Subject: [PATCH 227/231] sched/soft_domain: Increasing NR_MAX_CLUSTER to 32 commit b0230a93098ca7d4dd3271acac70bd626a0f3cde openEuler Bumped the maximum number of clusters per node from 16 to 32 to resolve an out-of-bounds/array overflow issue. Fixes: 645a1ba256ef ("sched: topology: Build soft domain for LLC") Signed-off-by: Zhang Qiao --- kernel/sched/soft_domain.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/kernel/sched/soft_domain.c b/kernel/sched/soft_domain.c index 3c52680ca82f..7b569fdb2c6e 100644 --- a/kernel/sched/soft_domain.c +++ b/kernel/sched/soft_domain.c @@ -17,6 +17,8 @@ #include +#define NR_MAX_CLUSTER 32 + static DEFINE_STATIC_KEY_TRUE(__soft_domain_switch); static int __init soft_domain_switch_setup(char *str) @@ -50,6 +52,7 @@ static int build_soft_sub_domain(int nid, struct cpumask *cpus) const struct cpumask *span = cpumask_of_node(nid); struct soft_domain *sf_d = NULL; int i; + int cls_cnt = 0; sf_d = kzalloc_node(sizeof(struct soft_domain) + cpumask_size(), GFP_KERNEL, nid); @@ -63,6 +66,12 @@ static int build_soft_sub_domain(int nid, struct cpumask *cpus) for_each_cpu_and(i, span, cpus) { struct soft_subdomain *sub_d = NULL; + cls_cnt++; + if (cls_cnt > NR_MAX_CLUSTER) { + pr_info("clsuter number > %d, unsupport soft domain.\n", NR_MAX_CLUSTER); + return -EINVAL; + } + sub_d = kzalloc_node(sizeof(struct soft_subdomain) + cpumask_size(), GFP_KERNEL, nid); if (!sub_d) { @@ -138,8 +147,6 @@ void build_soft_domain(void) static DEFINE_MUTEX(soft_domain_mutex); -#define NR_MAX_CLUSTER 16 - struct domain_node { struct soft_subdomain *sud_d; unsigned int attached; -- Gitee From 1f2154b609947c4869e775970ca61c0a936faa9c Mon Sep 17 00:00:00 2001 From: Liao Xuan Date: Thu, 26 Feb 2026 14:48:14 +0800 Subject: [PATCH 228/231] pwm: dwc: Add support for Hygon pwm controller commit 0a8df9c4bd74291069be933be93c81a479f456ed anolis. ANBZ: #30097 Add PCI ID for Hygon pwm controller. Hygon-SIG: commit none hygon anolis: pwm: dwc: Add support for Hygon pwm controller Hygon-SIG: commit 0a8df9c4bd74 anolis anolis: pwm: dwc: Add support for Hygon pwm controller Backport from anolis to support Hygon family 18h model 18h Signed-off-by: Liao Xuan Cc: hygon-arch@list.openanolis.cn Reviewed-by: Xiaochen Shen Reviewed-by: Yuanhe Shu Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/6545 [ YuntongJin : amend commit log ] Signed-off-by: YuntongJin --- drivers/pwm/pwm-dwc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/pwm/pwm-dwc.c b/drivers/pwm/pwm-dwc.c index 3bbb26c862c3..8d2d5cd53113 100644 --- a/drivers/pwm/pwm-dwc.c +++ b/drivers/pwm/pwm-dwc.c @@ -301,6 +301,7 @@ static SIMPLE_DEV_PM_OPS(dwc_pwm_pm_ops, dwc_pwm_suspend, dwc_pwm_resume); static const struct pci_device_id dwc_pwm_id_table[] = { { PCI_VDEVICE(INTEL, 0x4bb7) }, /* Elkhart Lake */ + { PCI_VDEVICE(HYGON, 0x7910) }, { } /* Terminating Entry */ }; MODULE_DEVICE_TABLE(pci, dwc_pwm_id_table); -- Gitee From cba21faadb9db7e39f8108f4ec41dac11fc749c3 Mon Sep 17 00:00:00 2001 From: Han Chengfei Date: Sat, 25 Apr 2026 13:11:57 +0800 Subject: [PATCH 229/231] ethernet,dinghai: enable dinghai CONFIG Enable dinghai CONFIG and fix compile when using defconfig and allyesconfig. Signed-off-by: Han Chengfei Signed-off-by: Jianping Liu --- drivers/net/ethernet/Kconfig | 2 +- drivers/net/ethernet/dinghai/Kconfig | 6 +- drivers/net/ethernet/dinghai/Makefile | 13 ++- drivers/net/ethernet/dinghai/dh_cmd.c | 3 + drivers/net/ethernet/dinghai/en_aux.c | 14 +-- .../ethernet/dinghai/en_aux/dcbnl/en_dcbnl.c | 4 + .../ethernet/dinghai/en_aux/dcbnl/en_dcbnl.h | 2 + .../ethernet/dinghai/en_aux/en_aux_ioctl.c | 10 +-- .../net/ethernet/dinghai/en_aux/priv_queue.c | 22 ++--- drivers/net/ethernet/dinghai/en_aux/queue.c | 88 +++++++++---------- drivers/net/ethernet/dinghai/en_aux/queue.h | 14 +-- drivers/net/ethernet/dinghai/en_np/Makefile | 1 - .../dinghai/en_np/init/source/dpp_np_init.c | 5 ++ drivers/net/ethernet/dinghai/en_pf.c | 3 + .../net/ethernet/dinghai/en_ptp/zxdh_ptp.c | 3 + .../net/ethernet/dinghai/en_tsn/zxdh_tsn.c | 3 + drivers/net/ethernet/dinghai/log.c | 4 - 17 files changed, 109 insertions(+), 88 deletions(-) delete mode 100644 drivers/net/ethernet/dinghai/log.c diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index bcc21234a57b..182562249c4c 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -198,6 +198,6 @@ source "drivers/net/ethernet/wangxun/Kconfig" source "drivers/net/ethernet/xilinx/Kconfig" source "drivers/net/ethernet/xircom/Kconfig" source "drivers/net/ethernet/northlink/Kconfig" -# source "drivers/net/ethernet/dinghai/Kconfig" +source "drivers/net/ethernet/dinghai/Kconfig" endif # ETHERNET diff --git a/drivers/net/ethernet/dinghai/Kconfig b/drivers/net/ethernet/dinghai/Kconfig index 86831c41842a..d3930fa024f1 100644 --- a/drivers/net/ethernet/dinghai/Kconfig +++ b/drivers/net/ethernet/dinghai/Kconfig @@ -1,6 +1,7 @@ config NET_VENDOR_DINGHAI tristate "Dinghai Devices" depends on (X86 || ARM64) + select NET_DEVLINK default y help If you have a Ethernet DINGHAI device belonging to this @@ -18,7 +19,7 @@ config DINGHAI_DH_CMD config DINGHAI_EN_AUX tristate "Dinghai Auxiliary Support" depends on (X86 || ARM64) - default y + default m help Auxiliary functionality for Dinghai devices. @@ -54,7 +55,7 @@ config DINGHAI_TSN_M config ZXDH_SF tristate "Dinghai SF Support" depends on (X86 || ARM64) - default y + default m help Sub-Function (SF) support. @@ -68,6 +69,7 @@ config ZXDH_MSGQ config DINGHAI_SEC tristate "Dinghai SEC Support" depends on (X86 || ARM64) + depends on XFRM_OFFLOAD default n help Security (SEC) module support. diff --git a/drivers/net/ethernet/dinghai/Makefile b/drivers/net/ethernet/dinghai/Makefile index 1f0b65ed6694..d4dbc10ea823 100644 --- a/drivers/net/ethernet/dinghai/Makefile +++ b/drivers/net/ethernet/dinghai/Makefile @@ -19,11 +19,11 @@ EXTRA_CFLAGS += -DDRIVER_VERSION_VAL=\"$(CONFIG_DRIVER_VERSION)\" endif obj-$(CONFIG_DINGHAI_DH_CMD) += dinghai10e_cmd.o -dinghai10e_cmd-y := dh_cmd.o cmd/msg_main.o cmd/msg_chan_lock.o cmd/msg_chan_test.o log.o +dinghai10e_cmd-y := dh_cmd.o cmd/msg_main.o cmd/msg_chan_lock.o cmd/msg_chan_test.o obj-$(CONFIG_DINGHAI_PF) += dinghai10e.o dinghai10e-y := events.o en_pf.o eq.o pci_irq.o en_pf/en_pf_irq.o en_pf/en_pf_eq.o devlink.o en_pf/en_pf_devlink.o en_pf/en_pf_events.o \ - en_pf/msg_func.o dh_procfs.o lag/lag.o lag/lag_procfs.o plcr.o sriov_sysfs.o xarray.o health.o log.o slib.o + en_pf/msg_func.o dh_procfs.o lag/lag.o lag/lag_procfs.o plcr.o sriov_sysfs.o xarray.o health.o slib.o dinghai10e-$(CONFIG_ZXDH_SF) += irq_affinity.o en_sf.o en_sf/en_sf_eq.o en_sf/en_sf_irq.o en_sf/en_sf_devlink.o @@ -31,8 +31,8 @@ dinghai10e-$(CONFIG_DINGHAI_EN_AUX) += en_aux.o eq.o pci_irq.o irq_affinity.o en en_aux/en_aux_events.o en_ethtool/ethtool.o en_aux/en_aux_ioctl.o \ en_aux/dcbnl/en_dcbnl.o en_aux/dcbnl/en_dcbnl_api.o \ zxdh_tools/zxdh_tools_ioctl.o zxdh_tools/zxdh_tools_netlink.o \ - en_aux/en_1588_pkt_proc.o en_aux/en_1588_pkt_proc_func.o xarray.o en_aux/selq.o log.o slib.o -dinghai10e-y += en_aux/drs_sec_dtb.o + en_aux/en_1588_pkt_proc.o en_aux/en_1588_pkt_proc_func.o xarray.o en_aux/selq.o slib.o +dinghai10e-$(CONFIG_DINGHAI_SEC) += en_aux/drs_sec_dtb.o dinghai10e-y += en_aux/priv_queue.o dinghai10e-y += bonding/zxdh_lag.o bonding/rdma_ops.o @@ -40,10 +40,9 @@ obj-$(CONFIG_DINGHAI_AUXILIARY) += dinghai10e_auxiliary.o dinghai10e_auxiliary-y += en_auxiliary.o obj-$(CONFIG_DINGHAI_PTP) += dinghai10e_ptp.o -dinghai10e_ptp-y :=en_ptp/tod_driver.o en_ptp/tod_driver_stub.o en_ptp/zxdh_ptp.o log.o +dinghai10e_ptp-y :=en_ptp/tod_driver.o en_ptp/tod_driver_stub.o en_ptp/zxdh_ptp.o obj-$(CONFIG_DINGHAI_TSN_M) += dinghai10e_tsn.o -dinghai10e_tsn-y :=en_tsn/zxdh_tsn.o en_tsn/zxdh_tsn_reg.o en_tsn/zxdh_tsn_ioctl.o log.o +dinghai10e_tsn-y :=en_tsn/zxdh_tsn.o en_tsn/zxdh_tsn_reg.o en_tsn/zxdh_tsn_ioctl.o include $(src)/en_np/Makefile - diff --git a/drivers/net/ethernet/dinghai/dh_cmd.c b/drivers/net/ethernet/dinghai/dh_cmd.c index 5bd24ecd9694..3a946f6cd137 100644 --- a/drivers/net/ethernet/dinghai/dh_cmd.c +++ b/drivers/net/ethernet/dinghai/dh_cmd.c @@ -9,6 +9,9 @@ #include "en_aux/en_aux_cmd.h" #include "msg_common.h" +int debug_print; +module_param(debug_print, int, 0644); + /***************************************** [src/dst]时应该将消息发到低2k(0)还是高2K(1) src/dst: TO_RISC, TO_PFVF, TO_MPF diff --git a/drivers/net/ethernet/dinghai/en_aux.c b/drivers/net/ethernet/dinghai/en_aux.c index ca9cbae4d89a..2ae9ff4a1bd3 100644 --- a/drivers/net/ethernet/dinghai/en_aux.c +++ b/drivers/net/ethernet/dinghai/en_aux.c @@ -747,7 +747,7 @@ int32_t xmit_skb(struct net_device *netdev, struct send_queue *sq, num_sg++; } - return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); + return zxdh_virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); } netdev_tx_t zxdh_en_xmit(struct sk_buff *skb, struct net_device *netdev) @@ -764,13 +764,13 @@ netdev_tx_t zxdh_en_xmit(struct sk_buff *skb, struct net_device *netdev) /* Free up any pending old buffers before queueing new ones. */ do { if (use_napi) { - virtqueue_disable_cb(sq->vq); + zxdh_virtqueue_disable_cb(sq->vq); } free_old_xmit_skbs(netdev, sq, false); } while (use_napi && kick && - unlikely(!virtqueue_enable_cb_delayed(sq->vq))); + unlikely(!zxdh_virtqueue_enable_cb_delayed(sq->vq))); /* timestamp packet in software */ skb_tx_timestamp(skb); @@ -806,19 +806,19 @@ netdev_tx_t zxdh_en_xmit(struct sk_buff *skb, struct net_device *netdev) netif_stop_subqueue(netdev, qnum); en_dev->hw_stats.q_stats[qnum].q_tx_stopped++; if (!use_napi && - unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { + unlikely(!zxdh_virtqueue_enable_cb_delayed(sq->vq))) { /* More just got used, free them then recheck. */ free_old_xmit_skbs(netdev, sq, false); if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) { netif_start_subqueue(netdev, qnum); - virtqueue_disable_cb(sq->vq); + zxdh_virtqueue_disable_cb(sq->vq); } } } if (kick || netif_xmit_stopped(txq)) { if (virtqueue_kick_prepare_packed(sq->vq) && - virtqueue_notify(sq->vq)) { + zxdh_virtqueue_notify(sq->vq)) { u64_stats_update_begin(&sq->stats.syncp); sq->stats.kicks++; u64_stats_update_end(&sq->stats.syncp); @@ -5409,7 +5409,9 @@ void zxdh_netdev_features_init(struct net_device *netdev) return; } +#ifdef ZXDH_SEC extern const struct xfrmdev_ops zxdh_xfrmdev_ops; +#endif static void zxdh_build_nic_netdev(struct net_device *netdev) { struct zxdh_en_priv *en_priv = netdev_priv(netdev); diff --git a/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl.c b/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl.c index 292ce4819772..f8fd69cdfa9f 100644 --- a/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl.c +++ b/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl.c @@ -823,7 +823,9 @@ uint32_t zxdh_dcbnl_initialize(struct net_device *netdev) zxdh_dcbnl_printk_ets_tree(en_priv); en_dev->dcb_para.init_flag = ZXDH_DCBNL_INIT_FLAG; +#ifdef CONFIG_DCB netdev->dcbnl_ops = &zxdh_dcbnl_ops; +#endif //zxdh_dcbnl_set_tm_pport_mcode_gate_open(netdev); LOG_INFO("%s dcbnl init ok ", netdev->name); @@ -842,7 +844,9 @@ uint32_t zxdh_dcbnl_ets_uninit(struct net_device *netdev) LOG_INFO("%s dcbnl uninit begin\n", netdev->name); en_dev->dcb_para.init_flag = 0; +#ifdef CONFIG_DCB netdev->dcbnl_ops = NULL; +#endif zxdh_dcbnl_set_tm_pport_mcode_gate_close(netdev); zxdh_dcbnl_free_flow_resources(en_priv); diff --git a/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl.h b/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl.h index fd6810133a7b..fb2f53d40bd6 100644 --- a/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl.h +++ b/drivers/net/ethernet/dinghai/en_aux/dcbnl/en_dcbnl.h @@ -6,6 +6,8 @@ extern "C" { #endif #include +#include +#include /* 启用dcb会大幅度增加初始化时间,暂时先注释 */ #define ZXDH_DCBNL_OPEN diff --git a/drivers/net/ethernet/dinghai/en_aux/en_aux_ioctl.c b/drivers/net/ethernet/dinghai/en_aux/en_aux_ioctl.c index 841786f537ac..4a4843fd1a40 100644 --- a/drivers/net/ethernet/dinghai/en_aux/en_aux_ioctl.c +++ b/drivers/net/ethernet/dinghai/en_aux/en_aux_ioctl.c @@ -300,8 +300,8 @@ int32_t zxdh_tx_file_pkts(struct zxdh_en_priv *en_priv, struct zxdh_en_reg *reg) uint32_t pktLen = reg->num; uint32_t buffLen = 4096; - while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { - LOG_ERR("virtqueue_get_buf() != NULL, ptr=0x%llx, len=0x%x\n", + while ((ptr = zxdh_virtqueue_get_buf(sq->vq, &len)) != NULL) { + LOG_ERR("zxdh_virtqueue_get_buf() != NULL, ptr=0x%llx, len=0x%x\n", (uint64_t)ptr, len); }; @@ -342,13 +342,13 @@ int32_t zxdh_tx_file_pkts(struct zxdh_en_priv *en_priv, struct zxdh_en_reg *reg) } } - if (unlikely(virtqueue_add_outbuf(sq->vq, sg, total_sg, data_pkt, + if (unlikely(zxdh_virtqueue_add_outbuf(sq->vq, sg, total_sg, data_pkt, GFP_ATOMIC) != 0)) { - LOG_ERR("virtqueue_add_outbuf failure!\n"); + LOG_ERR("zxdh_virtqueue_add_outbuf failure!\n"); goto err1; } - if (virtqueue_kick_prepare_packed(sq->vq) && virtqueue_notify(sq->vq)) { + if (virtqueue_kick_prepare_packed(sq->vq) && zxdh_virtqueue_notify(sq->vq)) { u64_stats_update_begin(&sq->stats.syncp); sq->stats.kicks++; u64_stats_update_end(&sq->stats.syncp); diff --git a/drivers/net/ethernet/dinghai/en_aux/priv_queue.c b/drivers/net/ethernet/dinghai/en_aux/priv_queue.c index 5a8f58b93bad..78723295fc85 100644 --- a/drivers/net/ethernet/dinghai/en_aux/priv_queue.c +++ b/drivers/net/ethernet/dinghai/en_aux/priv_queue.c @@ -85,7 +85,7 @@ static int32_t msgq_add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) sg_init_one(rq->sg, buf, len); ctx = (void *)(unsigned long)len; - err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); + err = zxdh_virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); if (err < 0) { put_page(virt_to_head_page(buf)); } @@ -107,7 +107,7 @@ static bool msgq_try_fill_recv(struct receive_queue *rq, gfp_t gfp) } } while (rq->vq->num_free); - if (virtqueue_kick_prepare_packed(rq->vq) && virtqueue_notify(rq->vq)) { + if (virtqueue_kick_prepare_packed(rq->vq) && zxdh_virtqueue_notify(rq->vq)) { flags = u64_stats_update_begin_irqsave(&rq->stats.syncp); rq->stats.kicks++; u64_stats_update_end_irqrestore(&rq->stats.syncp, flags); @@ -119,7 +119,7 @@ static bool msgq_try_fill_recv(struct receive_queue *rq, gfp_t gfp) uint32_t msgq_mergeable_min_buf_len(struct virtqueue *vq) { const uint32_t hdr_len = PRIV_HEADER_LEN; - uint32_t rq_size = virtqueue_get_vring_size(vq); + uint32_t rq_size = zxdh_virtqueue_get_vring_size(vq); uint32_t min_buf_len = DIV_ROUND_UP(BUFF_LEN, rq_size); return max(max(min_buf_len, hdr_len) - hdr_len, @@ -330,11 +330,11 @@ static int32_t page_send_cmd(struct send_queue *sq, uint8_t *buf, } } - err = virtqueue_add_outbuf(sq->vq, sq->sg, total_sg, buf, GFP_ATOMIC); + err = zxdh_virtqueue_add_outbuf(sq->vq, sq->sg, total_sg, buf, GFP_ATOMIC); ZXDH_CHECK_RET_GOTO_ERR(err, free_addr, - "virtqueue_add_outbuf failed: %d\n", err); + "zxdh_virtqueue_add_outbuf failed: %d\n", err); - if (virtqueue_kick_prepare_packed(sq->vq) && virtqueue_notify(sq->vq)) { + if (virtqueue_kick_prepare_packed(sq->vq) && zxdh_virtqueue_notify(sq->vq)) { u64_stats_update_begin(&sq->stats.syncp); sq->stats.kicks++; u64_stats_update_end(&sq->stats.syncp); @@ -354,7 +354,7 @@ static int32_t zxdh_msgq_pkt_send(struct msgq_dev *msgq_dev, uint32_t len = 0; if (spin_trylock(&msgq_dev->tx_lock)) { - while ((buf = virtqueue_get_buf(msgq_dev->sq_priv->vq, &len)) != + while ((buf = zxdh_virtqueue_get_buf(msgq_dev->sq_priv->vq, &len)) != NULL) { ZXDH_FREE_PTR(buf); }; @@ -550,7 +550,7 @@ static int32_t zxdh_response_msg_handle(struct msgq_dev *msgq_dev, memcpy(*tmp_buff->data, (uint8_t *)buf + PRIV_HEADER_LEN, pkt_len); while (--num_buf != 0) { rx_free_pages(msgq_dev, buf, len); - buf = virtqueue_get_buf(msgq_dev->rq_priv->vq, &len); + buf = zxdh_virtqueue_get_buf(msgq_dev->rq_priv->vq, &len); if (unlikely(buf == NULL)) { LOG_ERR("msgq rx error: %dth buffers missing\n", num_buf); @@ -707,7 +707,7 @@ static int32_t zxdh_msgq_receive(struct receive_queue *rq, int32_t budget) } if (rq->vq->num_free > - min((uint32_t)budget, virtqueue_get_vring_size(rq->vq)) / 2) { + min((uint32_t)budget, zxdh_virtqueue_get_vring_size(rq->vq)) / 2) { if (!msgq_try_fill_recv(rq, GFP_ATOMIC)) { LOG_ERR("msgq_try_fill_recv failed\n"); } @@ -732,7 +732,7 @@ static void free_old_xmit_bufs(struct send_queue *sq) uint32_t bytes = 0; void *buf = NULL; - while ((buf = virtqueue_get_buf(sq->vq, &len)) != NULL) + while ((buf = zxdh_virtqueue_get_buf(sq->vq, &len)) != NULL) { bytes += len; packets++; @@ -761,7 +761,7 @@ static void msgq_poll_cleantx(struct receive_queue *rq) } if (spin_trylock(&msgq_dev->tx_lock)) { - virtqueue_disable_cb(sq->vq); + zxdh_virtqueue_disable_cb(sq->vq); //free_old_xmit_bufs(sq); spin_unlock(&msgq_dev->tx_lock); } diff --git a/drivers/net/ethernet/dinghai/en_aux/queue.c b/drivers/net/ethernet/dinghai/en_aux/queue.c index 51312678867d..1d902afc7fd6 100644 --- a/drivers/net/ethernet/dinghai/en_aux/queue.c +++ b/drivers/net/ethernet/dinghai/en_aux/queue.c @@ -85,7 +85,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, u16 *num_buf, void *buf; int off; - buf = virtqueue_get_buf(rq->vq, &buflen); + buf = zxdh_virtqueue_get_buf(rq->vq, &buflen); if (unlikely(!buf)) goto err_buf; @@ -303,7 +303,7 @@ inline void vqm_wmb(bool weak_barriers) } } -void vring_del_virtqueue(struct virtqueue *_vq) +static void vring_del_virtqueue(struct virtqueue *_vq) { struct zxdh_en_device *en_dev = _vq->en_dev; struct vring_virtqueue *vq = to_vvq(_vq); @@ -375,7 +375,7 @@ void vp_detach_vqs(void *para) } } -void vp_del_vqs(struct net_device *netdev) +static void vp_del_vqs(struct net_device *netdev) { struct zxdh_en_priv *en_priv = netdev_priv(netdev); struct zxdh_en_device *en_dev = &en_priv->edev; @@ -387,20 +387,20 @@ void vp_del_vqs(struct net_device *netdev) } /** - * virtqueue_get_vring_size - return the size of the virtqueue's vring + * zxdh_virtqueue_get_vring_size - return the size of the virtqueue's vring * @_vq: the struct virtqueue containing the vring of interest. * * Returns the size of the vring. This is mainly used for boasting to * userspace. Unlike other operations, this need not be serialized. */ -uint32_t virtqueue_get_vring_size(struct virtqueue *_vq) +uint32_t zxdh_virtqueue_get_vring_size(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); return vq->packed.vring.num; } -dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq) +static dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); @@ -409,7 +409,7 @@ dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq) return vq->packed.ring_dma_addr; } -dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq) +static dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); @@ -418,7 +418,7 @@ dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq) return vq->packed.driver_event_dma_addr; } -dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq) +static dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); @@ -719,7 +719,7 @@ void zxdh_vvq_reset(struct zxdh_en_device *en_dev) } /* the notify function used when creating a virt queue */ -bool vp_notify(struct virtqueue *vq) +static bool vp_notify(struct virtqueue *vq) { /* we write the queue's selector into the notification register to * signal the other end */ @@ -766,7 +766,7 @@ struct virtqueue *vp_setup_vq(struct net_device *netdev, unsigned index, /* activate the queue */ en_dev->ops->activate_phy_vq(en_dev->parent, en_dev->phy_index[index], - virtqueue_get_vring_size(vq), + zxdh_virtqueue_get_vring_size(vq), virtqueue_get_desc_addr(vq), virtqueue_get_avail_addr(vq), virtqueue_get_used_addr(vq)); @@ -1284,7 +1284,7 @@ int32_t virtqueue_add_packed(struct virtqueue *_vq, struct scatterlist *sgs[], } /** - * virtqueue_add_inbuf_ctx - expose input buffers to other end + * zxdh_virtqueue_add_inbuf_ctx - expose input buffers to other end * @vq: the struct virtqueue we're talking about. * @sg: scatterlist (must be well-formed and terminated!) * @num: the number of entries in @sg writable by other side @@ -1297,7 +1297,7 @@ int32_t virtqueue_add_packed(struct virtqueue *_vq, struct scatterlist *sgs[], * * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). */ -int32_t virtqueue_add_inbuf_ctx(struct virtqueue *vq, struct scatterlist *sg, +int32_t zxdh_virtqueue_add_inbuf_ctx(struct virtqueue *vq, struct scatterlist *sg, uint32_t num, void *data, void *ctx, gfp_t gfp) { return virtqueue_add_packed(vq, &sg, num, 0, 1, data, ctx, gfp); @@ -1338,7 +1338,7 @@ bool virtqueue_poll_packed(struct virtqueue *_vq, uint16_t off_wrap) * * This does not need to be serialized. */ -bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) +static bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) { struct vring_virtqueue *vq = to_vvq(_vq); @@ -1383,7 +1383,7 @@ unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq) return vq->last_used_idx; } -int32_t virtqueue_enable_cb_prepare(struct virtqueue *_vq) +static int32_t virtqueue_enable_cb_prepare(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); @@ -1586,7 +1586,7 @@ void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq, uint32_t *len, return ret; } -void *virtqueue_get_buf(struct virtqueue *_vq, uint32_t *len) +void *zxdh_virtqueue_get_buf(struct virtqueue *_vq, uint32_t *len) { return virtqueue_get_buf_ctx_packed(_vq, len, NULL); } @@ -1614,7 +1614,7 @@ void free_old_xmit_skbs(struct net_device *netdev, struct send_queue *sq, uint32_t bytes = 0; void *ptr = NULL; - while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { + while ((ptr = zxdh_virtqueue_get_buf(sq->vq, &len)) != NULL) { if (likely(!is_xdp_frame(ptr))) { struct sk_buff *skb = ptr; @@ -1642,7 +1642,7 @@ void free_old_xmit_skbs(struct net_device *netdev, struct send_queue *sq, u64_stats_update_end(&sq->stats.syncp); } -void virtqueue_disable_cb_packed(struct virtqueue *_vq) +void zxdh_virtqueue_disable_cb_packed(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); @@ -1654,7 +1654,7 @@ void virtqueue_disable_cb_packed(struct virtqueue *_vq) } /** - * virtqueue_disable_cb - disable callbacks + * zxdh_virtqueue_disable_cb - disable callbacks * @_vq: the struct virtqueue we're talking about. * * Note that this is not necessarily synchronous, hence unreliable and only @@ -1662,7 +1662,7 @@ void virtqueue_disable_cb_packed(struct virtqueue *_vq) * * Unlike other operations, this need not be serialized. */ -void virtqueue_disable_cb(struct virtqueue *_vq) +void zxdh_virtqueue_disable_cb(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); @@ -1673,13 +1673,13 @@ void virtqueue_disable_cb(struct virtqueue *_vq) return; } - virtqueue_disable_cb_packed(_vq); + zxdh_virtqueue_disable_cb_packed(_vq); } void virtqueue_napi_schedule(struct napi_struct *napi, struct virtqueue *vq) { if (napi_schedule_prep(napi)) { - virtqueue_disable_cb(vq); + zxdh_virtqueue_disable_cb(vq); __napi_schedule(napi); } } @@ -1743,7 +1743,7 @@ int virtnet_poll_tx(struct napi_struct *napi, int budget) txq = netdev_get_tx_queue(en_dev->netdev, index); __netif_tx_lock(txq, raw_smp_processor_id()); - virtqueue_disable_cb(sq->vq); + zxdh_virtqueue_disable_cb(sq->vq); free_old_xmit_skbs(en_dev->netdev, sq, true); if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) { @@ -1755,7 +1755,7 @@ int virtnet_poll_tx(struct napi_struct *napi, int budget) done = napi_complete_done(napi, 0); if (!done) { - virtqueue_disable_cb(sq->vq); + zxdh_virtqueue_disable_cb(sq->vq); } __netif_tx_unlock(txq); @@ -1764,7 +1764,7 @@ int virtnet_poll_tx(struct napi_struct *napi, int budget) if (unlikely(virtqueue_poll(sq->vq, opaque))) { if (napi_schedule_prep(napi)) { __netif_tx_lock(txq, raw_smp_processor_id()); - virtqueue_disable_cb(sq->vq); + zxdh_virtqueue_disable_cb(sq->vq); __netif_tx_unlock(txq); __napi_schedule(napi); } @@ -1774,7 +1774,7 @@ int virtnet_poll_tx(struct napi_struct *napi, int budget) return 0; } -bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq) +bool zxdh_virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); uint16_t used_idx = 0; @@ -1878,7 +1878,7 @@ uint32_t mergeable_ctx_to_truesize(void *mrg_ctx) } /** - * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. + * zxdh_virtqueue_enable_cb_delayed - restart callbacks after disable_cb. * @_vq: the struct virtqueue we're talking about. * * This re-enables callbacks but hints to the other side to delay @@ -1890,7 +1890,7 @@ uint32_t mergeable_ctx_to_truesize(void *mrg_ctx) * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). */ -bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) +bool zxdh_virtqueue_enable_cb_delayed(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); @@ -1898,7 +1898,7 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) vq->event_triggered = false; } - return virtqueue_enable_cb_delayed_packed(_vq); + return zxdh_virtqueue_enable_cb_delayed_packed(_vq); } void virtnet_poll_cleantx(struct receive_queue *rq) @@ -1914,9 +1914,9 @@ void virtnet_poll_cleantx(struct receive_queue *rq) if (__netif_tx_trylock(txq)) { do { - virtqueue_disable_cb(sq->vq); + zxdh_virtqueue_disable_cb(sq->vq); free_old_xmit_skbs(en_dev->netdev, sq, true); - } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq))); + } while (unlikely(!zxdh_virtqueue_enable_cb_delayed(sq->vq))); if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) { netif_tx_wake_queue(txq); @@ -2026,7 +2026,7 @@ struct sk_buff *page_to_skb(struct zxdh_en_device *en_dev, } /** - * virtqueue_add_outbuf - expose output buffers to other end + * zxdh_virtqueue_add_outbuf - expose output buffers to other end * @vq: the struct virtqueue we're talking about. * @sg: scatterlist (must be well-formed and terminated!) * @num: the number of entries in @sg readable by other side @@ -2038,7 +2038,7 @@ struct sk_buff *page_to_skb(struct zxdh_en_device *en_dev, * * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). */ -int32_t virtqueue_add_outbuf(struct virtqueue *vq, struct scatterlist *sg, +int32_t zxdh_virtqueue_add_outbuf(struct virtqueue *vq, struct scatterlist *sg, uint32_t num, void *data, gfp_t gfp) { return virtqueue_add_packed(vq, &sg, num, 1, 0, data, NULL, gfp); @@ -2064,7 +2064,7 @@ static int __zxdh_en_xdp_xmit_one(struct zxdh_en_device *en_dev, sg_init_one(sq->sg, xdpf->data, xdpf->len); - err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf), + err = zxdh_virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf), GFP_ATOMIC); if (unlikely(err)) return -ENOSPC; /* Caller handle free/refcnt */ @@ -2144,7 +2144,7 @@ int zxdh_en_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, } /* Free up any pending old buffers before queueing new ones. */ - while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { + while ((ptr = zxdh_virtqueue_get_buf(sq->vq, &len)) != NULL) { if (likely(is_xdp_frame(ptr))) { struct xdp_frame *frame = ptr_to_xdp(ptr); @@ -2170,7 +2170,7 @@ int zxdh_en_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, if (flags & XDP_XMIT_FLUSH) { if (virtqueue_kick_prepare_packed(sq->vq) && - virtqueue_notify(sq->vq)) + zxdh_virtqueue_notify(sq->vq)) kicks = 1; } out: @@ -2465,7 +2465,7 @@ struct sk_buff *receive_mergeable(struct net_device *netdev, err_skb: put_page(page); while (num_buf-- > 1) { - buf = virtqueue_get_buf(rq->vq, &len); + buf = zxdh_virtqueue_get_buf(rq->vq, &len); if (unlikely(!buf)) { LOG_ERR("%s: rx error: %d buffers missing\n", netdev->name, num_buf); @@ -2673,14 +2673,14 @@ void receive_buf(struct zxdh_en_device *en_dev, struct receive_queue *rq, } /** - * virtqueue_notify - second half of split virtqueue_kick call. + * zxdh_virtqueue_notify - second half of split virtqueue_kick call. * @_vq: the struct virtqueue * * This does not need to be serialized. * * Returns false if host notify failed or queue is broken, otherwise true. */ -bool virtqueue_notify(struct virtqueue *_vq) +bool zxdh_virtqueue_notify(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); @@ -2769,7 +2769,7 @@ int32_t add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) sg_init_one(rq->sg, buf, len); ctx = mergeable_len_to_ctx(len, headroom); - err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); + err = zxdh_virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); if (err < 0) { put_page(virt_to_head_page(buf)); } @@ -2866,7 +2866,7 @@ bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) } } while (rq->vq->num_free); - if (virtqueue_kick_prepare_packed(rq->vq) && virtqueue_notify(rq->vq)) { + if (virtqueue_kick_prepare_packed(rq->vq) && zxdh_virtqueue_notify(rq->vq)) { flags = u64_stats_update_begin_irqsave(&rq->stats.syncp); rq->stats.kicks++; u64_stats_update_end_irqrestore(&rq->stats.syncp, flags); @@ -2892,7 +2892,7 @@ int32_t virtnet_receive(struct receive_queue *rq, int32_t budget, } if (rq->vq->num_free > - min((uint32_t)budget, virtqueue_get_vring_size(rq->vq)) / 2) { + min((uint32_t)budget, zxdh_virtqueue_get_vring_size(rq->vq)) / 2) { if (!try_fill_recv(rq, GFP_ATOMIC)) { schedule_delayed_work(&en_dev->refill, 0); } @@ -2922,7 +2922,7 @@ void virtqueue_napi_complete(struct napi_struct *napi, struct virtqueue *vq, virtqueue_napi_schedule(napi, vq); } } else { - virtqueue_disable_cb(vq); + zxdh_virtqueue_disable_cb(vq); } } @@ -2955,7 +2955,7 @@ int virtnet_poll(struct napi_struct *napi, int budget) if (xdp_xmit & ZXDH_XDP_TX) { sq = zxdh_en_xdp_get_sq(en_dev); if (virtqueue_kick_prepare_packed(sq->vq) && - virtqueue_notify(sq->vq)) { + zxdh_virtqueue_notify(sq->vq)) { u64_stats_update_begin(&sq->stats.syncp); sq->stats.kicks++; u64_stats_update_end(&sq->stats.syncp); @@ -3262,7 +3262,7 @@ void zxdh_en_xmit_pkts(struct virtqueue *tvq) struct napi_struct *napi = &en_dev->sq[vq2txq(tvq)].napi; /* Suppress further interrupts. */ - virtqueue_disable_cb(tvq); + zxdh_virtqueue_disable_cb(tvq); if (napi->weight) { virtqueue_napi_schedule(napi, tvq); diff --git a/drivers/net/ethernet/dinghai/en_aux/queue.h b/drivers/net/ethernet/dinghai/en_aux/queue.h index 09f833d3ea01..b43ad93c1698 100644 --- a/drivers/net/ethernet/dinghai/en_aux/queue.h +++ b/drivers/net/ethernet/dinghai/en_aux/queue.h @@ -870,14 +870,14 @@ void vring_free_queue(struct zxdh_en_device *en_dev, size_t size, void *queue, netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *netdev); bool try_fill_recv(struct receive_queue *rq, gfp_t gfp); inline struct zxdh_net_hdr_rx *skb_vnet_hdr(struct sk_buff *skb); -int32_t virtqueue_add_outbuf(struct virtqueue *vq, struct scatterlist *sg, +int32_t zxdh_virtqueue_add_outbuf(struct virtqueue *vq, struct scatterlist *sg, uint32_t num, void *data, gfp_t gfp); -void virtqueue_disable_cb(struct virtqueue *_vq); +void zxdh_virtqueue_disable_cb(struct virtqueue *_vq); void free_old_xmit_skbs(struct net_device *netdev, struct send_queue *sq, bool in_napi); -bool virtqueue_enable_cb_delayed(struct virtqueue *_vq); +bool zxdh_virtqueue_enable_cb_delayed(struct virtqueue *_vq); bool virtqueue_kick_prepare_packed(struct virtqueue *_vq); -bool virtqueue_notify(struct virtqueue *_vq); +bool zxdh_virtqueue_notify(struct virtqueue *_vq); void zxdh_pf_features_init(struct net_device *netdev); bool zxdh_has_feature(struct zxdh_en_device *en_dev, uint32_t fbit); bool zxdh_has_status(struct net_device *netdev, uint32_t sbit); @@ -890,13 +890,13 @@ int32_t zxdh_vqs_init(struct net_device *netdev); int32_t dh_eq_vqs_vring_int(struct notifier_block *nb, unsigned long action, void *data); int32_t vq2rxq(struct virtqueue *vq); -void *virtqueue_get_buf(struct virtqueue *_vq, uint32_t *len); +void *zxdh_virtqueue_get_buf(struct virtqueue *_vq, uint32_t *len); void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq, uint32_t *len, void **ctx); -uint32_t virtqueue_get_vring_size(struct virtqueue *_vq); +uint32_t zxdh_virtqueue_get_vring_size(struct virtqueue *_vq); void virtqueue_napi_complete(struct napi_struct *napi, struct virtqueue *vq, int32_t processed); -int32_t virtqueue_add_inbuf_ctx(struct virtqueue *vq, struct scatterlist *sg, +int32_t zxdh_virtqueue_add_inbuf_ctx(struct virtqueue *vq, struct scatterlist *sg, uint32_t num, void *data, void *ctx, gfp_t gfp); bool dh_skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp); diff --git a/drivers/net/ethernet/dinghai/en_np/Makefile b/drivers/net/ethernet/dinghai/en_np/Makefile index 08c650ab45ff..8bd2e4b36af8 100644 --- a/drivers/net/ethernet/dinghai/en_np/Makefile +++ b/drivers/net/ethernet/dinghai/en_np/Makefile @@ -20,5 +20,4 @@ obj_files := $(src_files:.c=.o) obj-$(CONFIG_DINGHAI_NP) += dinghai10e_np.o dinghai10e_np-y := $(obj_files) -dinghai10e_np-y += log.o # dinghai10e_np-$(CONFIG_DINGHAI_DH_CMD) += dh_cmd.o cmd/msg_chan_netlink.o cmd/msg_chan_lock.o diff --git a/drivers/net/ethernet/dinghai/en_np/init/source/dpp_np_init.c b/drivers/net/ethernet/dinghai/en_np/init/source/dpp_np_init.c index 8a1b9ebe714f..66b8a40b112a 100644 --- a/drivers/net/ethernet/dinghai/en_np/init/source/dpp_np_init.c +++ b/drivers/net/ethernet/dinghai/en_np/init/source/dpp_np_init.c @@ -16,6 +16,11 @@ #include "dpp_tbl_pkt_cap.h" #include "dpp_drv_sdt.h" #include "dpp_tbl_api.h" +#include + +__weak int debug_print; +module_param(debug_print, int, 0644); + extern DPP_DEV_MGR_T *dpp_dev_mgr_get(ZXIC_VOID); ZXIC_UINT32 dpp_vport_register(DPP_PF_INFO_T *pf_info, struct pci_dev *p_dev) diff --git a/drivers/net/ethernet/dinghai/en_pf.c b/drivers/net/ethernet/dinghai/en_pf.c index d835f7eaccb3..ee2922a54ccc 100644 --- a/drivers/net/ethernet/dinghai/en_pf.c +++ b/drivers/net/ethernet/dinghai/en_pf.c @@ -54,6 +54,9 @@ MODULE_DESCRIPTION(DRV_SUMMARY); MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("Dual BSD/GPL"); +__weak int debug_print; +module_param(debug_print, int, 0644); + uint32_t dh_debug_mask; struct slot_id_array dh_slot[DPP_PCIE_SLOT_MAX]; module_param_named(debug_mask, dh_debug_mask, uint, 0644); diff --git a/drivers/net/ethernet/dinghai/en_ptp/zxdh_ptp.c b/drivers/net/ethernet/dinghai/en_ptp/zxdh_ptp.c index 9fbcbce5930c..0018e9405e58 100644 --- a/drivers/net/ethernet/dinghai/en_ptp/zxdh_ptp.c +++ b/drivers/net/ethernet/dinghai/en_ptp/zxdh_ptp.c @@ -13,6 +13,9 @@ #include "zxdh_ptp_regs.h" #include "zxdh_ptp_common.h" +__weak int debug_print; +module_param(debug_print, int, 0644); + #define ZXDH_PF_BAR0 0 char pps[3][15] = { "pp1s_out", "pp1s_1588", "pp1s_external" }; diff --git a/drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn.c b/drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn.c index 3f7e6bbe2a50..c29dca8a6454 100644 --- a/drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn.c +++ b/drivers/net/ethernet/dinghai/en_tsn/zxdh_tsn.c @@ -7,6 +7,9 @@ #include "zxdh_tsn_comm.h" #include "zxdh_tsn_ioctl.h" +__weak int debug_print; +module_param(debug_print, int, 0644); + int32_t zxdh_tsn_init(struct dh_core_dev *dh_dev) { struct zxdh_tsn_private *tsn = NULL; diff --git a/drivers/net/ethernet/dinghai/log.c b/drivers/net/ethernet/dinghai/log.c deleted file mode 100644 index befc13021be0..000000000000 --- a/drivers/net/ethernet/dinghai/log.c +++ /dev/null @@ -1,4 +0,0 @@ -#include - -int debug_print; -module_param(debug_print, int, 0644); \ No newline at end of file -- Gitee From 04990a99e8959d4e384da4cfab9b746528def719 Mon Sep 17 00:00:00 2001 From: Jianping Liu Date: Sat, 25 Apr 2026 14:51:14 +0800 Subject: [PATCH 230/231] ethernet,dinghai: fix out-of-tree build of en_np When building via `make dist-rpm` (or any out-of-tree build using O=), compiling drivers/net/ethernet/dinghai/dh_cmd.c fails with: drivers/net/ethernet/dinghai/en_np/table/include/dpp_tbl_api.h:23:10: fatal error: zxic_common.h: No such file or directory In drivers/net/ethernet/dinghai/en_np/Makefile we have: dinghai_root := $(src) include $(dinghai_root)/en_np/Makefile.include and Makefile.include builds header search paths from $(dinghai_root): abs_include += -I$(dinghai_root)/en_np/comm/include ... ccflags-y += $(abs_include) ... In kbuild, $(src) is a path *relative to* $(srctree) (see scripts/Makefile.build: "src := $(obj)"). For an in-tree build srctree and objtree are the same directory, so the relative path happens to resolve correctly from gcc's cwd. For an out-of-tree build (which is what dist-rpm uses, building under $_KernBuild while sources live in $_KernSrc), gcc runs in the build directory and the relative -Idrivers/net/ethernet/dinghai/en_np/comm/include points at a non-existent path, so zxic_common.h cannot be found. The kbuild auto-added "-I $(srctree)/$(src) -I $(objtree)/$(obj)" (scripts/Makefile.lib) only covers the current obj directory, not the deeper en_np/{comm,table,...}/include subtrees that this driver adds manually, so dist-rpm builds break while plain in-tree "make -j" builds happen to work. Use the canonical kbuild idiom $(srctree)/$(src) so dinghai_root is an absolute path that works for both in-tree and out-of-tree builds. This also fixes the $(wildcard $(dinghai_root)/...*.c) lookups in the en_np/*/source/Kbuild.include files, which silently returned empty under out-of-tree builds. Signed-off-by: Jianping Liu --- drivers/net/ethernet/dinghai/en_np/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/dinghai/en_np/Makefile b/drivers/net/ethernet/dinghai/en_np/Makefile index 8bd2e4b36af8..211c18c8b07a 100644 --- a/drivers/net/ethernet/dinghai/en_np/Makefile +++ b/drivers/net/ethernet/dinghai/en_np/Makefile @@ -11,7 +11,7 @@ subdirs += en_np/fc/ subdirs += en_np/flow/ #dinghai_root := $(CWD)/drivers/net/ethernet/dinghai -dinghai_root := $(src) +dinghai_root := $(srctree)/$(src) include $(dinghai_root)/en_np/Makefile.include src_files := -- Gitee From 403cfd6625dde430d2f6aa57eab583e8bf04d195 Mon Sep 17 00:00:00 2001 From: Jianping Liu Date: Sat, 25 Apr 2026 18:00:07 +0800 Subject: [PATCH 231/231] ethernet,dinghai: fix header collision in out-of-tree build When building via `make dist-rpm` (or any out-of-tree build using O=), en_aux/en_aux_events.c fails with: drivers/net/ethernet/dinghai/en_aux/../en_np/table/include/dpp_tbl_comm.h:55: error: 'DPP_DEV_SDT_ID_MAX' undeclared here (not in a function) drivers/net/ethernet/dinghai/en_aux/../en_np/table/include/dpp_tbl_comm.h:72: error: unknown type name 'DPP_PF_INFO_T' drivers/net/ethernet/dinghai/en_aux/en_aux_events.h:36: error: 'struct zxdh_en_priv' declared inside parameter list ... ... (cc1: all warnings being treated as errors) There are four header files whose basename collides between the driver's local directory and the public include directory: drivers/net/ethernet/dinghai/{en_aux,en_sf,lag,queue}.h include/linux/dinghai/{en_aux,en_sf,lag,queue}.h The two trees of files are completely different. The driver wants its local en_aux.h (which defines struct zxdh_en_priv and pulls in the en_np/driver/.../dpp_drv_hash.h -> dpp_apt_se_api.h -> dpp_dev.h chain), not the one under include/linux/dinghai/ (which only defines a struct eth_stats). drivers/net/ethernet/dinghai/Makefile sets: subdir-ccflags-y += -I$(src) subdir-ccflags-y += -I$(srctree)/include/linux/dinghai/ In kbuild, $(src) is a path relative to $(srctree). For an in-tree build srctree and objtree are the same directory, so gcc's cwd is the source tree and the relative -Idrivers/net/ethernet/dinghai resolves correctly, finding the local en_aux.h first. For an out-of-tree build gcc runs in the build directory and the relative path does not resolve, so gcc continues down the -I list and matches en_aux.h under -I$(srctree)/include/linux/dinghai/ instead - the wrong file. Without struct zxdh_en_priv and the dpp_dev.h chain, dpp_tbl_comm.h cannot parse and the en_aux_events.h prototypes lose their struct types, which -Werror then turns into a hard build failure. The kbuild auto-added "-I $(srctree)/$(src) -I $(objtree)/$(obj)" (scripts/Makefile.lib) is appended after include/linux/dinghai/, so it does not save us in this case. Use the canonical kbuild idiom $(srctree)/$(src) so the local include directory is an absolute path that resolves in both in-tree and out-of-tree builds, and that comes before -I$(srctree)/include/linux/dinghai/ on the gcc command line so the correct local headers win the lookup. Signed-off-by: Jianping Liu --- drivers/net/ethernet/dinghai/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/dinghai/Makefile b/drivers/net/ethernet/dinghai/Makefile index d4dbc10ea823..6049bfe4edef 100644 --- a/drivers/net/ethernet/dinghai/Makefile +++ b/drivers/net/ethernet/dinghai/Makefile @@ -1,4 +1,4 @@ -subdir-ccflags-y += -I$(src) +subdir-ccflags-y += -I$(srctree)/$(src) subdir-ccflags-y += -I$(srctree)/include/linux/dinghai/ ccflags-y += -Werror -- Gitee